diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index 371137e6bbe..00000000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,5 +0,0 @@ -The Changelog has moved! - -You can find it at the following URL: - -https://dl.influxdata.com/platform/nightlies/master/CHANGELOG.md diff --git a/CHANGELOG_OLD.md b/CHANGELOG_OLD.md deleted file mode 100644 index 1f26c3d77ac..00000000000 --- a/CHANGELOG_OLD.md +++ /dev/null @@ -1,1478 +0,0 @@ -## unreleased - -### Go Version - -This release upgrades the project to `go` version 1.17. - -#### Minimum macOS Version - -Because of the version bump to `go`, the macOS build for this release requires at least version 10.13 High Sierra to run. - -### `influx` CLI moved to separate repository - -The `influx` CLI has been moved to its [own GitHub repository](https://github.com/influxdata/influx-cli/). Release artifacts -produced by `influxdb` are impacted as follows: - -* Release archives (`.tar.gz` and `.zip`) no longer contain the `influx` binary. -* The `influxdb2` package (`.deb` and `.rpm`) no longer contains the `influx` binary. Instead, it declares a recommended - dependency on the new `influx-cli` package. -* The `quay.io/influxdb/influxdb` image no longer contains the `influx` binary. Users are recommended to migrate to the - `influxdb` image hosted in DockerHub. - -With this change, versions of the `influx` CLI and `influxd` server are not guaranteed to exactly match. Please use -`influxd version` or `curl /health` when checking the version of the installed/running server. - -### Notebooks and Annotations - -Support for Notebooks and Annotations is included with this release. - -### SQLite Metadata Store - -This release adds an embedded SQLite database for storing metadata required by the latest UI features like Notebooks and Annotations. - -### Features - -1. [19811](https://github.com/influxdata/influxdb/pull/19811): Add Geo graph type to be able to store in Dashboard cells. -1. [21218](https://github.com/influxdata/influxdb/pull/21218): Add the properties of a static legend for line graphs and band plots. -1. [21367](https://github.com/influxdata/influxdb/pull/21367): List users via the API now supports pagination -1. [21531](https://github.com/influxdata/influxdb/pull/21531): Remove feature flags for permanent UI features -1. [21543](https://github.com/influxdata/influxdb/pull/21543): Added `influxd` configuration flag `--sqlite-path` for specifying a user-defined path to the SQLite database file -1. [21543](https://github.com/influxdata/influxdb/pull/21543): Updated `influxd` configuration flag `--store` to work with string values `disk` or `memory`. Memory continues to store metadata in-memory for testing; `disk` will persist metadata to disk via bolt and SQLite -1. [21547](https://github.com/influxdata/influxdb/pull/21547): Allow hiding the tooltip independently of the static legend -1. [21584](https://github.com/influxdata/influxdb/pull/21584): Added the `api/v2/backup/metadata` endpoint for backing up both KV and SQL metadata, and the `api/v2/restore/sql` for restoring SQL metadata. -1. [21635](https://github.com/influxdata/influxdb/pull/21635): Port `influxd inspect verify-seriesfile` to 2.x -1. [21621](https://github.com/influxdata/influxdb/pull/21621): Add `storage-wal-max-concurrent-writes` config option to `influxd` to enable tuning memory pressure under heavy write load. -1. [21621](https://github.com/influxdata/influxdb/pull/21621): Add `storage-wal-max-write-delay` config option to `influxd` to prevent deadlocks when the WAL is overloaded with concurrent writes. -1. [21615](https://github.com/influxdata/influxdb/pull/21615): Ported the `influxd inspect verify-tsm` command from 1.x. -1. [21646](https://github.com/influxdata/influxdb/pull/21646): Ported the `influxd inspect verify-tombstone` command from 1.x. -1. [21761](https://github.com/influxdata/influxdb/pull/21761): Ported the `influxd inspect dump-tsm` command from 1.x. -1. [21788](https://github.com/influxdata/influxdb/pull/21788): Ported the `influxd inspect report-tsi` command from 1.x. -1. [21784](https://github.com/influxdata/influxdb/pull/21784): Ported the `influxd inspect dumptsi` command from 1.x. -1. [21786](https://github.com/influxdata/influxdb/pull/21786): Ported the `influxd inspect deletetsm` command from 1.x. -1. [21888](https://github.com/influxdata/influxdb/pull/21888): Ported the `influxd inspect dump-wal` command from 1.x. -1. [21828](https://github.com/influxdata/influxdb/pull/21828): Added the command `influx inspect verify-wal`. -1. [21814](https://github.com/influxdata/influxdb/pull/21814): Ported the `influxd inspect report-tsm` command from 1.x. -1. [21936](https://github.com/influxdata/influxdb/pull/21936): Ported the `influxd inspect build-tsi` command from 1.x. -1. [21938](https://github.com/influxdata/influxdb/pull/21938): Added route to delete individual secret. -1. [21972](https://github.com/influxdata/influxdb/pull/21972): Added support for notebooks and annotations. -1. [22072](https://github.com/influxdata/influxdb/pull/22072): Added `--flux-log-enabled` option to `influxd` to show detail logs for flux queries. -1. [22135](https://github.com/influxdata/influxdb/pull/22135): Added route to return known resources. -1. [22311](https://github.com/influxdata/influxdb/pull/22311): Add `storage-no-validate-field-size` config to `influxd` to disable enforcement of max field size. -1. [22316](https://github.com/influxdata/influxdb/pull/22316): Optimize series iteration for queries that can be answered without inspecting TSM data. -1. [22322](https://github.com/influxdata/influxdb/pull/22322): Add support for `merge_hll`, `sum_hll`, and `count_hll` in InfluxQL. - -### Bug Fixes - -1. [21648](https://github.com/influxdata/influxdb/pull/21648): Change static legend's `hide` to `show` to let users decide if they want it. -1. [22186](https://github.com/influxdata/influxdb/pull/22186): Preserve comments in flux queries when saving task definitions. -1. [22228](https://github.com/influxdata/influxdb/pull/22228): influxdb2 packages should depend on curl. -1. [22211](https://github.com/influxdata/influxdb/pull/22211): Prevent scheduling an inactivated task after updating it. -1. [22235](https://github.com/influxdata/influxdb/pull/22235): Avoid compaction queue stats flutter. -1. [22272](https://github.com/influxdata/influxdb/pull/22272): Requests to `/api/v2/authorizations` filter correctly on `org` and `user` parameters. -1. [22311](https://github.com/influxdata/influxdb/pull/22311): Enforce max field size while parsing line protocol. -1. [22334](https://github.com/influxdata/influxdb/pull/22334): Periodically compact old and large TSI files. - -## v2.0.8 [2021-08-13] - -### Go Version - -This release upgrades the project to `go` version 1.16. - -#### Minimum macOS Version - -Because of the version bump to `go`, the macOS build for this release requires at least version 10.12 Sierra to run. - -### Features - -1. [21910](https://github.com/influxdata/influxdb/pull/21910): Added `--ui-disabled` option to `influxd` to allow for running with the UI disabled. -1. [21958](https://github.com/influxdata/influxdb/pull/21958): Telemetry improvements: Do not record telemetry data for non-existant paths; replace invalid static asset paths with a slug. -1. [22023](https://github.com/influxdata/influxdb/pull/22023): Upgrade Flux to v0.124.0. - -### Bug Fixes - -1. [21610](https://github.com/influxdata/influxdb/pull/21610): Avoid rewriting `fields.idx` unnecessarily. -1. [21662](https://github.com/influxdata/influxdb/pull/21662): Do not close connection twice in DigestWithOptions. -1. [21691](https://github.com/influxdata/influxdb/pull/21691): Remove incorrect optimization for group-by. -1. [21747](https://github.com/influxdata/influxdb/pull/21747): Rename arm rpms with yum-compatible names. -1. [21800](https://github.com/influxdata/influxdb/pull/21800): Return an error instead of panicking when InfluxQL statement rewrites fail. -1. [21802](https://github.com/influxdata/influxdb/pull/21802): Removed unused `chronograf-migator` package & chronograf API service, and updated various "chronograf" references. -1. [21839](https://github.com/influxdata/influxdb/pull/21839): Fix display and parsing of `influxd upgrade` CLI prompts in PowerShell. -1. [21840](https://github.com/influxdata/influxdb/pull/21840): Migrate restored KV snapshots to latest schema before using them. -1. [21844](https://github.com/influxdata/influxdb/pull/21844): Upgrade to latest version of `influxdata/cron` so that tasks can be created with interval of `every: 1w`. -1. [21849](https://github.com/influxdata/influxdb/pull/21849): Specify which fields are missing when rejecting an incomplete onboarding request. -1. [21850](https://github.com/influxdata/influxdb/pull/21850): Systemd unit should block on startup until http endpoint is ready. -1. [21925](https://github.com/influxdata/influxdb/pull/21925): Upgrade to golang-jwt 3.2.1.. -1. [21946](https://github.com/influxdata/influxdb/pull/21946): Prevent silently dropped writes when there are overlapping shards. -1. [21950](https://github.com/influxdata/influxdb/pull/21950): Invalid requests to /api/v2 subroutes now return 404 instead of a list of links. -1. [21962](https://github.com/influxdata/influxdb/pull/21962): Flux metaqueries for `_field` take fast path if `_measurement` is the only predicate. -1. [22059](https://github.com/influxdata/influxdb/pull/22059): Copy names from mmapped memory before closing iterator. -1. [22174](https://github.com/influxdata/influxdb/pull/22174): systemd service -- handle 40x and block indefinitely. - -## v2.0.7 [2021-06-04] - -### Features - -1. [21539](https://github.com/influxdata/influxdb/pull/21539): Upgrade Flux to v0.117.0. -1. [21519](https://github.com/influxdata/influxdb/pull/21519): Optimize `table.fill()` execution within Flux aggregate windows. - -### Bug Fixes - -1. [21318](https://github.com/influxdata/influxdb/pull/21318): Fix off-by-one error in query range calculation over partially compacted data. -1. [21345](https://github.com/influxdata/influxdb/pull/21345): Deprecate the unsupported `PostSetupUser` API. -1. [21356](https://github.com/influxdata/influxdb/pull/21356): Disable MergeFiltersRule until it is more stable. -1. [21369](https://github.com/influxdata/influxdb/pull/21369): Add limits to the `/api/v2/delete` endpoint for start and stop times with error messages. -1. [21375](https://github.com/influxdata/influxdb/pull/21375): Add logging to NATS streaming server to help debug startup failures. -1. [21477](https://github.com/influxdata/influxdb/pull/21477): Accept `--input` instead of a positional arg in `influx restore`. -1. [21477](https://github.com/influxdata/influxdb/pull/21477): Print error instead of panicking when `influx restore` fails to find backup manifests. -1. [21481](https://github.com/influxdata/influxdb/pull/21481): Set last-modified time of empty shard directory to the directory's mod time instead of Unix epoch. -1. [21486](https://github.com/influxdata/influxdb/pull/21486): Remove erroneous dependency on istio. -1. [21522](https://github.com/influxdata/influxdb/pull/21522): Replace telemetry file name with slug for `ttf`, `woff`, and `eot` files. -1. [21540](https://github.com/influxdata/influxdb/pull/21540): Enable use of absolute path for `--upgrade-log` when running `influxd upgrade` on Windows. -1. [21545](https://github.com/influxdata/influxdb/pull/21545): Make InfluxQL meta queries respect query timeouts. - -## v2.0.6 [2021-04-29] - -### Bug Fixes - -1. [21321](https://github.com/influxdata/influxdb/pull/21321): Ensure query config written by influxd upgrade is valid. -1. [21324](https://github.com/influxdata/influxdb/pull/21324): Revert to nonzero defaults for `query-concurrency` and `query-queue-size` to avoid validation failures for upgrading users. -1. [21324](https://github.com/influxdata/influxdb/pull/21324): Don't fail validation when `query-concurrency` is 0 and `query-queue-size` is > 0. - -## v2.0.5 [2021-04-27] - -### Windows Support - -This release includes our initial Windows preview build. - -### Breaking Changes - -#### /debug/vars removed - -Prior to this release, the `influxd` server would always expose profiling information over `/debug/vars`. -This endpoint was unauthenticated, and not used by InfluxDB systems to report diagnostics. For security and clarity, -the endpoint has been removed. Use the `/metrics` endpoint to collect system statistics. - -#### `influx transpile` removed - -The `transpile` command has been retired. Users can send InfluxQL directly to the server via the `/api/v2/query` -or `/query` HTTP endpoints. - -#### Default query concurrency changed - -The default setting for the max number of concurrent Flux queries has been changed from 10 to unlimited. Set the -`query-concurrency` config parameter to > 0 when running `influxd` to re-limit the maximum running query count, -and the `query-queue-size` config parameter to > 0 to set the max number of queries that can be queued before the -server starts rejecting requests. - -#### Prefix for query-controller metrics changed - -The prefix used for Prometheus metrics from the query controller has changed from `query_control_` to `qc_`. - -### Features - -1. [20621](https://github.com/influxdata/influxdb/pull/20621): Add Swift client library to the data loading section of the UI. -1. [20307](https://github.com/influxdata/influxdb/pull/20307): Add `influx task retry-failed` command to rerun failed runs. -1. [20759](https://github.com/influxdata/influxdb/pull/20759): Add additional properties for Mosaic Graph. -1. [20763](https://github.com/influxdata/influxdb/pull/20763): Add `--compression` option to `influx write` to support GZIP inputs. -1. [20827](https://github.com/influxdata/influxdb/pull/20827): Add `--pprof-disabled` option to `influxd` to disable exposing profiling information over HTTP. -1. [20827](https://github.com/influxdata/influxdb/pull/20827): Add `/debug/pprof/all` HTTP endpoint to gather all profiles at once. -1. [20827](https://github.com/influxdata/influxdb/pull/20827): Upgrade `http.pprof-enabled` config in `influxd upgrade`. -1. [20911](https://github.com/influxdata/influxdb/pull/20911): Add support for explicitly setting shard-group durations on buckets. Thanks @hinst! -1. [20882](https://github.com/influxdata/influxdb/pull/20882): Rewrite regex conditions in InfluxQL subqueries for performance. Thanks @yujiahaol68! -1. [20963](https://github.com/influxdata/influxdb/pull/20963): Add `--metrics-disabled` option to `influxd` to disable exposing Prometheus metrics over HTTP. -1. [20971](https://github.com/influxdata/influxdb/pull/20971): Add `--http-read-header-timeout`, `--http-read-timeout`, `--http-write-timeout`, and `--http-idle-timeout` options to `influxd`. -1. [20971](https://github.com/influxdata/influxdb/pull/20971): Set a default `--http-read-header-timeout` of 10s in `influxd`. -1. [20971](https://github.com/influxdata/influxdb/pull/20971): Set a default `--http-idle-timeout` of 3m in `influxd`. -1. [20861](https://github.com/influxdata/influxdb/pull/20861): Update Telegraf plugins in UI to include additions and changes in 1.18 release. -1. [20894](https://github.com/influxdata/influxdb/pull/20894): Display task IDs in the UI. -1. [21046](https://github.com/influxdata/influxdb/pull/21046): Write to standard out when `--output-path -` is passed to `influxd inspect export-lp`. -1. [21006](https://github.com/influxdata/influxdb/pull/21006): Add `-p, --profilers` flag to `influx query` command. -1. [21090](https://github.com/influxdata/influxdb/pull/21090): Update UI to match InfluxDB Cloud. -1. [21127](https://github.com/influxdata/influxdb/pull/21127): Allow for disabling concurrency-limits in Flux controller. -1. [21158](https://github.com/influxdata/influxdb/pull/21158): Replace unique resource IDs (UI assets, backup shards) with slugs to reduce cardinality of telemetry data. -1. [21235](https://github.com/influxdata/influxdb/pull/21235): HTTP server errors output logs following the standard format. -1. [21255](https://github.com/influxdata/influxdb/pull/21255): Upgrade Flux to v0.113.0. -1. [21364](https://github.com/influxdata/influxdb/pull/21364): Update Static Legend properties to allow disabling without nulling - -### Bug Fixes - -1. [20705](https://github.com/influxdata/influxdb/pull/20705): Repair swagger to match implementation of DBRPs type. -1. [19936](https://github.com/influxdata/influxdb/pull/19936): Fix use-after-free bug in series ID iterator. Thanks @foobar! -1. [20585](https://github.com/influxdata/influxdb/pull/20585): Fix TSM WAL segement size check. Thanks @foobar! -1. [20754](https://github.com/influxdata/influxdb/pull/20754): Update references to docs site to use current URLs. -1. [20773](https://github.com/influxdata/influxdb/pull/20773): Fix data race in TSM engine when inspecting tombstone stats. -1. [20797](https://github.com/influxdata/influxdb/pull/20797): Fix data race in TSM cache. Thanks @StoneYunZhao! -1. [20811](https://github.com/influxdata/influxdb/pull/20811): Fix TSM WAL segment size computing. Thanks @StoneYunZhao! -1. [20798](https://github.com/influxdata/influxdb/pull/20798): Deprecate misleading `retentionPeriodHrs` key in onboarding API. -1. [20819](https://github.com/influxdata/influxdb/pull/20819): Fix Single Stat graphs with thresholds crashing on negative values. -1. [20809](https://github.com/influxdata/influxdb/pull/20809): Fix InfluxDB port in Flux function UI examples. Thanks @sunjincheng121! -1. [20827](https://github.com/influxdata/influxdb/pull/20827): Remove unauthenticated, unsupported `/debug/vars` HTTP endpoint. -1. [20856](https://github.com/influxdata/influxdb/pull/20856): Respect 24 hour clock formats in the UI and allow more choices -1. [20875](https://github.com/influxdata/influxdb/pull/20875): Prevent "do not have an execution context" error when parsing Flux options in tasks. -1. [20932](https://github.com/influxdata/influxdb/pull/20932): Prevent time field names from being formatted in the Table visualization -1. [20929](https://github.com/influxdata/influxdb/pull/20929): Log error details when `influxd upgrade` fails to migrate databases. -1. [20921](https://github.com/influxdata/influxdb/pull/20921): Fix the cipher suite used when TLS strict ciphers are enabled in `influxd`. -1. [20925](https://github.com/influxdata/influxdb/pull/20925): Fix parse error in UI for tag filters containing regex meta characters. -1. [21042](https://github.com/influxdata/influxdb/pull/21042): Prevent concurrent access panic when gathering bolt metrics. -1. [21127](https://github.com/influxdata/influxdb/pull/21127): Fix race condition in Flux controller shutdown. -1. [21228](https://github.com/influxdata/influxdb/pull/21228): Reduce lock contention when adding new fields and measurements. -1. [21232](https://github.com/influxdata/influxdb/pull/21232): Escape dots in community templates hostname regex. -1. [21140](https://github.com/influxdata/influxdb/pull/21140): Use descending cursor when needed in pushed-down aggregate Flux queries. - -## v2.0.4 [2021-02-08] - -### Docker - -#### ARM64 - -This release extends the Docker builds hosted in `quay.io` to support the `linux/arm64` platform. - -#### 2.x nightly images - -Prior to this release, competing nightly builds caused the `nightly` Docker tag to contain outdated -binaries. This conflict has been fixed, and the image tagged with `nightly` will now contain `2.x` -binaries built from the `HEAD` of the `master` branch. - -### Breaking Changes - -#### inmem index option removed - -This release fully removes the `inmem` indexing option, along with the associated config options: - -- `max-series-per-database` -- `max-values-per-tag` - -Replacement `tsi1` indexes will be automatically generated on startup for shards that need it. - -#### Artifact naming conventions - -The names of artifacts produced by our nightly & release builds have been updated according to the -[Google developer guidelines](https://developers.google.com/style/filenames). Underscores (`_`) have -been replaced by hyphens (`-`) in nearly all cases; the one exception is the use of `x86_64` in our -RPM packages, which has been left unchanged. - -### Features - -1. [20473](https://github.com/influxdata/influxdb/pull/20473): Add `--overwrite-existing-v2` flag to `influxd upgrade` to overwrite existing files at output paths (instead of aborting). -1. [20524](https://github.com/influxdata/influxdb/pull/20524): Add `influxd print-config` command to support automated config inspection. -1. [20561](https://github.com/influxdata/influxdb/pull/20561): Add `nats-port` config option for `influxd` server. -1. [20564](https://github.com/influxdata/influxdb/pull/20564): Add `nats-max-payload-bytes` config option for `influxd` server. -1. [20467](https://github.com/influxdata/influxdb/pull/20467): Add `influxd inspect export-lp` command to extract data in line-protocol format. -1. [20604](https://github.com/influxdata/influxdb/pull/20604): Update telegraf plugins list in UI to include Beat, Intel PowerStats, and Rienmann. -1. [20634](https://github.com/influxdata/influxdb/pull/20634): Promote schema and fill query optimizations to default behavior. -1. [20678](https://github.com/influxdata/influxdb/pull/20678): Upgrade Flux to v0.104.0 -1. [20680](https://github.com/influxdata/influxdb/pull/20680): UI: Upgrade flux-lsp-browser to v0.5.31 - -### Bug Fixes - -1. [20339](https://github.com/influxdata/influxdb/pull/20339): Include upgrade helper script in goreleaser manifest. -1. [20348](https://github.com/influxdata/influxdb/pull/20348): Don't show the upgrade notice on fresh `influxdb2` installs. -1. [20348](https://github.com/influxdata/influxdb/pull/20348): Ensure `config.toml` is initialized on fresh `influxdb2` installs. -1. [20349](https://github.com/influxdata/influxdb/pull/20349): Ensure `influxdb` service sees default env variables when running under `init.d`. -1. [20317](https://github.com/influxdata/influxdb/pull/20317): Don't ignore failures to set password during initial user onboarding. -1. [20362](https://github.com/influxdata/influxdb/pull/20362): Don't overwrite stack name/description on `influx stack update`. -1. [20355](https://github.com/influxdata/influxdb/pull/20355): Fix timeout setup for `influxd` graceful shutdown. -1. [20387](https://github.com/influxdata/influxdb/pull/20387): Improve error message shown when `influx` CLI can't find an org by name. -1. [20380](https://github.com/influxdata/influxdb/pull/20380): Remove duplication from task error messages. -1. [20313](https://github.com/influxdata/influxdb/pull/20313): Automatically build `tsi1` indexes for shards that need it instead of falling back to `inmem`. -1. [20313](https://github.com/influxdata/influxdb/pull/20313): Fix logging initialization for storage engine. -1. [20442](https://github.com/influxdata/influxdb/pull/20442): Don't return 500 codes for partial write failures. -1. [20440](https://github.com/influxdata/influxdb/pull/20440): Add confirmation step w/ file sizes before copying data files in `influxd upgrade`. -1. [20409](https://github.com/influxdata/influxdb/pull/20409): Improve messages in DBRP API validation errors. -1. [20489](https://github.com/influxdata/influxdb/pull/20489): Improve error message when opening BoltDB with unsupported file system options. -1. [20490](https://github.com/influxdata/influxdb/pull/20490): Fix silent failure to register CLI args as required. -1. [20522](https://github.com/influxdata/influxdb/pull/20522): Fix loading config when INFLUXD_CONFIG_PATH points to a `.yml` file. -1. [20527](https://github.com/influxdata/influxdb/pull/20527): Don't leak .tmp files while backing up shards. -1. [20527](https://github.com/influxdata/influxdb/pull/20527): Allow backups to complete while a snapshot is in progress. -1. [20539](https://github.com/influxdata/influxdb/pull/20539): Prevent extra output row from GROUP BY crossing DST boundary. -1. [20548](https://github.com/influxdata/influxdb/pull/20548): Prevent panic in `influxd upgrade` when V1 users exist and no V1 config is given. -1. [20565](https://github.com/influxdata/influxdb/pull/20565): Set correct Content-Type on v1 query responses. -1. [20565](https://github.com/influxdata/influxdb/pull/20565): Update V1 API spec to document all valid Accept headers and matching Content-Types. -1. [20578](https://github.com/influxdata/influxdb/pull/20578): Respect the --skip-verify flag when running `influx query`. -1. [20495](https://github.com/influxdata/influxdb/pull/20495): Update Flux functions list in UI to reflect that `v1` package was renamed to `schema`. -1. [20669](https://github.com/influxdata/influxdb/pull/20669): Remove blank lines from payloads sent by `influx write`. -1. [20657](https://github.com/influxdata/influxdb/pull/20657): Allow for creating users without initial passwords in `influx user create`. -1. [20679](https://github.com/influxdata/influxdb/pull/20679): Fix incorrect "bucket not found" errors when passing `--bucket-id` to `influx write`. -1. [20702](https://github.com/influxdata/influxdb/pull/20702): Fix loading config when `INFLUXD_CONFIG_PATH` points to a directory with `.` in its name. -1. [20678](https://github.com/influxdata/influxdb/pull/20678): Fix infinite loop in Flux parser caused by invalid array expressions. -1. [20360](https://github.com/influxdata/influxdb/pull/20360): Update API spec to document Flux dictionary features. - -## v2.0.3 [2020-12-14] - -### ARM Support - -This release includes our initial ARM64 preview build. - -### Breaking Changes - -#### influxd upgrade - -Previously, `influxd upgrade` would attempt to write upgraded `config.toml` files into the same directory as the source -`influxdb.conf` file. If this failed, a warning would be logged and `config.toml` would be written into the `HOME` directory. - -This release breaks this behavior in two ways: - -1. By default, `config.toml` is now written into the same directory as the Bolt DB and engine files (`~/.influxdbv2/`) -2. If writing upgraded config fails, the `upgrade` process exits with an error instead of falling back to the `HOME` directory - -Users can use the new `--v2-config-path` option to override the output path for upgraded config if they can't or don't -want to use the default. - -#### v2 packaging - -Based on community feedback, the v2 deb and rpm packaging has been improved to avoid confusion between versions. The package -name is now influxdb2 and conflicts with any previous influxdb package (including initial 2.0.0, 2.0.1, and 2.0.2 packages). -Additionally, v2 specific path defaults are now defined and helper scripts are provided for `influxd upgrade` and cleanup cases. - -### Features - -1. [20123](https://github.com/influxdata/influxdb/pull/20123): Allow password to be specified as a CLI option in `influx v1 auth create`. -1. [20123](https://github.com/influxdata/influxdb/pull/20123): Allow password to be specified as a CLI option in `influx v1 auth set-password`. -1. [20110](https://github.com/influxdata/influxdb/pull/20110): Allow for users to specify where V2 config should be written in `influxd upgrade`. -1. [20204](https://github.com/influxdata/influxdb/pull/20204): Improve ID-related error messages for `influx v1 dbrp` commands. -1. [20236](https://github.com/influxdata/influxdb/pull/20236): Delete with predicate. -1. [20322](https://github.com/influxdata/influxdb/pull/20322): Upgrade Flux to v0.99.0. -1. [20327](https://github.com/influxdata/influxdb/pull/20327): Upgrade flux-lsp-browser to v0.5.26. - -### Bug Fixes - -1. [20110](https://github.com/influxdata/influxdb/pull/20110): Use V2 directory for default V2 config path in `influxd upgrade`. -1. [20137](https://github.com/influxdata/influxdb/pull/20137): Fix panic when writing a point with 100 tags. Thanks @foobar! -1. [20151](https://github.com/influxdata/influxdb/pull/20151): Don't log bodies of V1 write requests. -1. [20097](https://github.com/influxdata/influxdb/pull/20097): Ensure Index.Walk fetches matching foreign keys only. -1. [20149](https://github.com/influxdata/influxdb/pull/20149): Enforce max value of 2147483647 on query concurrency to avoid startup panic. -1. [20149](https://github.com/influxdata/influxdb/pull/20149): Enforce max value of 2147483647 on query queue size to avoid startup panic. -1. [20168](https://github.com/influxdata/influxdb/pull/20168): Auto-migrate existing DBRP mappings from old schema to avoid panic. -1. [20201](https://github.com/influxdata/influxdb/pull/20201): Optimize shard lookup in groups containing only one shard. Thanks @StoneYunZhao! -1. [20155](https://github.com/influxdata/influxdb/pull/20155): Respect the `--name` option in `influx setup` whether configs already exist or not. -1. [20155](https://github.com/influxdata/influxdb/pull/20155): Allow for 0 (infinite) values for `--retention` in `influx setup`. -1. [20305](https://github.com/influxdata/influxdb/pull/20305): Set v2 default paths and provide upgrade helper scripts in release packages - -## v2.0.2 [2020-11-19] - -### Features - -1. [19979](https://github.com/influxdata/influxdb/pull/19979): Added functionality to filter task runs by time. -1. [20036](https://github.com/influxdata/influxdb/pull/20036): Warn if V1 users are upgraded, but V1 auth wasn't enabled. -1. [20039](https://github.com/influxdata/influxdb/pull/20039): Export 1.x CQs as part of `influxd upgrade`. -1. [20053](https://github.com/influxdata/influxdb/pull/20053): Upgrade Flux to v0.95.0. -1. [20058](https://github.com/influxdata/influxdb/pull/20058): UI: Upgrade flux-lsp-browser to v0.5.23. -1. [20067](https://github.com/influxdata/influxdb/pull/20067): Add DBRP cli commands as `influxd v1 dbrp`. - -### Bug Fixes - -1. [19987](https://github.com/influxdata/influxdb/pull/19987): Fix various typos. Thanks @kumakichi! -1. [19991](https://github.com/influxdata/influxdb/pull/19991): Use --skip-verify flag for backup/restore CLI command. -1. [19995](https://github.com/influxdata/influxdb/pull/19995): Don't auto-print help on influxd errors -1. [20008](https://github.com/influxdata/influxdb/pull/20008): Add locking during TSI iterator creation. -1. [20012](https://github.com/influxdata/influxdb/pull/20012): Validate input paths to `influxd upgrade` up-front. -1. [20015](https://github.com/influxdata/influxdb/pull/20015): Add same site strict flag to session cookie. -1. [20017](https://github.com/influxdata/influxdb/pull/20017): Don't include duplicates for SHOW DATABASES -1. [20064](https://github.com/influxdata/influxdb/pull/20064): Ensure Flux reads across all shards. -1. [20047](https://github.com/influxdata/influxdb/pull/20047): Allow scraper to ignore insecure certificates on a target. Thanks @cmackenzie1! -1. [20076](https://github.com/influxdata/influxdb/pull/20076): Remove internal `influxd upgrade` subcommands from help text. -1. [20074](https://github.com/influxdata/influxdb/pull/20074): Use default DBRP mapping on V1 write when no RP is specified. -1. [20091](https://github.com/influxdata/influxdb/pull/20091): Make the DBRP http API match the swagger spec. - -## v2.0.1 [2020-11-10] - -### Bug Fixes - -1. [19918](https://github.com/influxdata/influxdb/pull/19918): Swagger: add operationId to /delete -1. [19967](https://github.com/influxdata/influxdb/pull/19967): Upgrade: add log-level option -1. [19969](https://github.com/influxdata/influxdb/pull/19969): Check for existing 2.x CLI configs file -1. [19971](https://github.com/influxdata/influxdb/pull/19971): Swagger: remove Invites from swagger -1. [19972](https://github.com/influxdata/influxdb/pull/19972): Remove obsolete unused option (influx-command-path) -1. [19980](https://github.com/influxdata/influxdb/pull/19980): check write permission in legacy write path - -## v2.0.0 [2020-11-09] - -### Features - -1. [19935](https://github.com/influxdata/influxdb/pull/19935): Improve the UI for the influx v1 auth commands -1. [19940](https://github.com/influxdata/influxdb/pull/19940): Update Flux to v0.94.0 -1. [19943](https://github.com/influxdata/influxdb/pull/19943): Upgrade flux-lsp-browser to v0.5.22 -1. [19946](https://github.com/influxdata/influxdb/pull/19946): Adding RAS telegraf input - -### Bug Fixes - -1. [19924](https://github.com/influxdata/influxdb/pull/19924): Remove unused 'security-script' option from upgrade command -1. [19925](https://github.com/influxdata/influxdb/pull/19937): Create CLI configs in `influxd upgrade` -1. [19928](https://github.com/influxdata/influxdb/pull/19928): Fix parsing of retention policy CLI args in `influx setup` and `influxd upgrade` -1. [19930](https://github.com/influxdata/influxdb/pull/19930): Replace 0 with MaxInt when upgrading query-concurrency -1. [19937](https://github.com/influxdata/influxdb/pull/19937): Create CLI configs -1. [19939](https://github.com/influxdata/influxdb/pull/19939): Make influxd help more specific -1. [19945](https://github.com/influxdata/influxdb/pull/19945): Allow write-only V1 tokens to find DBRPs -1. [19947](https://github.com/influxdata/influxdb/pull/19947): Updating v1 auth description -1. [19952](https://github.com/influxdata/influxdb/pull/19952): Use `db`/`rp` naming convention when migrating DBs to buckets -1. [19956](https://github.com/influxdata/influxdb/pull/19956): Improve help for --no-password switch -1. [19959](https://github.com/influxdata/influxdb/pull/19959): Use 10 instead of MaxInt when rewriting query-concurrency -1. [19960](https://github.com/influxdata/influxdb/pull/19960): Remove bucket and mapping auto-creation from v1 /write API -1. [19885](https://github.com/influxdata/influxdb/pull/19875): Misuse of reflect.SliceHeader - -## v2.0.0-rc.4 [2020-11-05] - -### Features - -1. [19854](https://github.com/influxdata/influxdb/pull/19854): Use v1 authorization for users upgrade -1. [19855](https://github.com/influxdata/influxdb/pull/19855): Enable window pushdowns -1. [19864](https://github.com/influxdata/influxdb/pull/19864): Implement backup/restore CLI subcommands -1. [19865](https://github.com/influxdata/influxdb/pull/19865): Implementation of v1 authorization -1. [19879](https://github.com/influxdata/influxdb/pull/19879): Make sure the query plan nodes have unique ids -1. [19881](https://github.com/influxdata/influxdb/pull/19881): Update Flux to v0.93.0 - -### Bug Fixes - -1. [19685](https://github.com/influxdata/influxdb/pull/19685): Cloning tasks makes actions shared in task list view -1. [19712](https://github.com/influxdata/influxdb/pull/19712): Reduce filesize of influx binary -1. [19819](https://github.com/influxdata/influxdb/pull/19819): Isolate telegraf config service and remove URM interactions -1. [19853](https://github.com/influxdata/influxdb/pull/19853): Use updated HTTP client for authorization service -1. [19856](https://github.com/influxdata/influxdb/pull/19856): Make tagKeys and tagValues work for edge cases involving fields -1. [19870](https://github.com/influxdata/influxdb/pull/19870): Correctly parse float as 64-bits -1. [19873](https://github.com/influxdata/influxdb/pull/19873): Add simple metrics related to installed templates -1. [19885](https://github.com/influxdata/influxdb/pull/19885): Remove extra multiplication of retention policies in onboarding -1. [19887](https://github.com/influxdata/influxdb/pull/19887): Use fluxinit package to init flux library instead of builtin -1. [19886](https://github.com/influxdata/influxdb/pull/19886): Add Logger to constructor function to ensure log field is initialized -1. [19894](https://github.com/influxdata/influxdb/pull/19894): Return empty iterator instead of null in tagValues -1. [19899](https://github.com/influxdata/influxdb/pull/19899): Docs: flux 0.92 functions -1. [19908](https://github.com/influxdata/influxdb/pull/19908): Fix /ready response content type - -## v2.0.0-rc.3 [2020-10-29] - -### Features - -1. [19807](https://github.com/influxdata/influxdb/pull/19807): Enable window agg mean pushdown -1. [19813](https://github.com/influxdata/influxdb/pull/19813): Aggregate array cursors -1. [19815](https://github.com/influxdata/influxdb/pull/19815): Create a v1 authorization service -1. [19826](https://github.com/influxdata/influxdb/pull/19826): Update FLux to v0.91.0 -1. [19829](https://github.com/influxdata/influxdb/pull/19829): Extend CLI with v1 authorization commands -1. [19839](https://github.com/influxdata/influxdb/pull/19839): Add tick generation properties and legendColorizeRows -1. [19840](https://github.com/influxdata/influxdb/pull/19840): Add bcrypt password support to v1 authorizations -1. [19850](https://github.com/influxdata/influxdb/pull/19850): Update generate ticks into an array of properties for each axis - -### Bug Fixes - -1. [19784](https://github.com/influxdata/influxdb/pull/19784): UI: bump papaparse from 4.6.3 to 5.2.0 -1. [19802](https://github.com/influxdata/influxdb/pull/19802): Docs: update PostDBRP docs to reflect mutual exclusive requirement of org vs orgID -1. [19804](https://github.com/influxdata/influxdb/pull/19804): Notifications: move rule service into own package -1. [19816](https://github.com/influxdata/influxdb/pull/19816): Type-convert fs.Bavail for portability -1. [19818](https://github.com/influxdata/influxdb/pull/19818): Notifications: isolate endpoint service -1. [19823](https://github.com/influxdata/influxdb/pull/19823): Clear Logout -1. [19825](https://github.com/influxdata/influxdb/pull/19825): Docs: Update FUZZ.md -1. [19828](https://github.com/influxdata/influxdb/pull/19828): Add 1.x compatible endpoints to swagger -1. [19833](https://github.com/influxdata/influxdb/pull/19833): allow newIndexSeriesCursor() to accept an influxql.Expr -1. [19834](https://github.com/influxdata/influxdb/pull/19834): Docs: Fix typos in http/swagger.yml -1. [19836](https://github.com/influxdata/influxdb/pull/19836): UI: import flux-lsp v0.5.21 -1. [19846](https://github.com/influxdata/influxdb/pull/19846): prune some unreferenced packages - -## v2.0.0-rc.2 [2020-10-21] - -### Features - -1. [19725](https://github.com/influxdata/influxdb/pull/19725): Add window agg result set -1. [19740](https://github.com/influxdata/influxdb/pull/19740): Provide means to remove stack without confirmation -1. [19750](https://github.com/influxdata/influxdb/pull/19750): Return error on failed resource addition -1. [19774](https://github.com/influxdata/influxdb/pull/19774): Update Flux to v0.90.0 - -### Bug Fixes - -1. [19465](https://github.com/influxdata/influxdb/pull/19465): Use valid flux in pkger test templates -1. [19773](https://github.com/influxdata/influxdb/pull/19773): Upgrade: fallback to user's home when saving upgraded config -1. [19775](https://github.com/influxdata/influxdb/pull/19775): Telegraf plugin updates (remove RAS for now) -1. [19776](https://github.com/influxdata/influxdb/pull/19776): TimeMachine: change derivative to 1s -1. [19789](https://github.com/influxdata/influxdb/pull/19789): Launcher: Switch to AuthorizationService from authorization package -1. [19780](https://github.com/influxdata/influxdb/pull/19780): Upgrade: proper default 2.x config filename -1. [19781](https://github.com/influxdata/influxdb/pull/19781): Upgrade: fixing typos and grammar errors - -## v2.0.0-rc.1 [2020-10-14] - -### Features - -1. [19641](https://github.com/influxdata/influxdb/pull/19641): Added `influx upgrade` command for upgrading from 1.x to 2.0 -1. [19746](https://github.com/influxdata/influxdb/pull/19746): Added Intel RDT and RAS Daemon telegraf plugins -1. [19731](https://github.com/influxdata/influxdb/pull/19731): Upgraded Flux to v0.89.0 - -### Bug Fixes - -1. [19708](https://github.com/influxdata/influxdb/pull/19708): Scrapers not working in RC0 -1. [19732](https://github.com/influxdata/influxdb/pull/19732): Update default value of list tasks influx CLI command to 100 -1. [19710](https://github.com/influxdata/influxdb/pull/19710): InfluxDB Templates: allow same duration unit identifiers that the tasks api allows -1. [19700](https://github.com/influxdata/influxdb/pull/19700): InfluxDB Templates: preserve cell colors on export/import -1. [19695](https://github.com/influxdata/influxdb/pull/19695): Influx CLI fix an issue where a single telegraf config was not being returned -1. [19593](https://github.com/influxdata/influxdb/pull/19593): Don't allow short passwords in `influx setup` - -## v2.0.0-rc.0 [2020-09-29] - -### Breaking Changes - -In the interests of simplifying the migration for existing users of InfluxDB 1.x, this -release includes significant breaking changes. - -**Upgrading from previous beta builds of `influxd` is not supported** - -In order to continue using `influxd` betas, users will be required to move all existing -data out of their `~/.influxdbv2` (or equivalent) path, including `influxd.bolt`. This -means all existing dashboards, tasks, integrations, alerts, users and tokens will need to -be recreated. The `influx export all` command may be used to export and re-import most -of this data. - -At this time, there is no tooling to convert existing time series data from previous -beta releases. If data from a prior beta release is found, `influxd` will refuse to start. - -We have also changed the default port of InfluxDB from 9999 back to 8086. If you still would like -to run on port 9999, you can start influxd with the `--http-bind-address` option. You will also -need to update any InfluxDB CLI config profiles with the new port number. - -1. [19446](https://github.com/influxdata/influxdb/pull/19446): Port TSM1 storage engine -1. [19494](https://github.com/influxdata/influxdb/pull/19494): Changing the default port from 9999 to 8086 -1. [19636](https://github.com/influxdata/influxdb/pull/19636): Disable unimplemented delete with predicate API - -### Features - -1. [18779](https://github.com/influxdata/influxdb/pull/18779): Add new processing options and enhancements to influx write. -1. [19246](https://github.com/influxdata/influxdb/pull/19246): Redesign load data page to increase discovery and ease of use -1. [19334](https://github.com/influxdata/influxdb/pull/19334): Add --active-config flag to influx to set config for single command -1. [19219](https://github.com/influxdata/influxdb/pull/19219): List buckets via the API now supports after (ID) parameter as an alternative to offset. -1. [19390](https://github.com/influxdata/influxdb/pull/19390): Record last success and failure run times in the Task -1. [19402](https://github.com/influxdata/influxdb/pull/19402): Inject Task's LatestSuccess Timestamp In Flux Extern -1. [19433](https://github.com/influxdata/influxdb/pull/19433): Add option to dump raw query results in CLI -1. [19506](https://github.com/influxdata/influxdb/pull/19506): Add TSM 1.x storage options as flags -1. [19508](https://github.com/influxdata/influxdb/pull/19508): Add subset of InfluxQL coordinator options as flags -1. [19457](https://github.com/influxdata/influxdb/pull/19457): Add ability to export resources by name via the CLI -1. [19640](https://github.com/influxdata/influxdb/pull/19640): Turn on Community Templates -1. [19663](https://github.com/influxdata/influxdb/pull/19663): Added InfluxDB v2 Listener, NSD, OPC-UA, and Windows Event Log to the sources page -1. [19662](https://github.com/influxdata/influxdb/pull/19662): Add `max-line-length` switch to `influx write` command to address `token too long` errors for large inputs -1. [19660](https://github.com/influxdata/influxdb/pull/19660): Add --rate-limit option to `influx write`. -1. [19740](https://github.com/influxdata/influxdb/pull/19740): Add `--force` option to `influx stack rm` to skip confirmation - -### Bug Fixes - -1. [19331](https://github.com/influxdata/influxdb/pull/19331): Add description to auth influx command outputs. -1. [19392](https://github.com/influxdata/influxdb/pull/19392): Include the edge of the boundary we are observing. -1. [19453](https://github.com/influxdata/influxdb/pull/19453): Warn about duplicate tag names during influx write csv. -1. [19466](https://github.com/influxdata/influxdb/pull/19466): Do not override existing line part in group annotation. -1. [19637](https://github.com/influxdata/influxdb/pull/19637): Added PATCH to the list of allowed methods - -## v2.0.0-beta.16 [2020-08-07] - -### Breaking - -1. [19066](https://github.com/influxdata/influxdb/pull/19066): Drop deprecated /packages route tree -1. [19116](https://github.com/influxdata/influxdb/pull/19116): Support more types for template envRef default value and require explicit default values -1. [19104](https://github.com/influxdata/influxdb/pull/19104): Remove orgs/labels nested routes from the API. -1. [19653](https://github.com/influxdata/influxdb/pull/19653): Remove PointBatcher from tsdb package API - -### Features - -1. [19075](https://github.com/influxdata/influxdb/pull/19075): Add resource links to a stack's resources from public HTTP API list/read calls -1. [19103](https://github.com/influxdata/influxdb/pull/19103): Enhance resource creation experience when limits are reached -1. [19223](https://github.com/influxdata/influxdb/pull/19223): Add dashboards command to influx CLI -1. [19225](https://github.com/influxdata/influxdb/pull/19225): Allow user onboarding to optionally set passwords -1. [18841](https://github.com/influxdata/influxdb/pull/18841): Limit query response sizes for queries built in QueryBuilder by requiring an aggregate window -1. [19135](https://github.com/influxdata/influxdb/pull/19135): Add telegram notification. - -### Bug Fixes - -1. [19043](https://github.com/influxdata/influxdb/pull/19043): Enforce all influx CLI flag args are valid -1. [19188](https://github.com/influxdata/influxdb/pull/19188): Dashboard cells correctly map results when multiple queries exist -1. [19146](https://github.com/influxdata/influxdb/pull/19146): Dashboard cells and overlay use UTC as query time when toggling to UTC timezone -1. [19222](https://github.com/influxdata/influxdb/pull/19222): Bucket names may not include quotation marks -1. [19317](https://github.com/influxdata/influxdb/pull/19317): Add validation to Variable name creation for valid Flux identifiers. - -### UI Improvements - -1. [19231](https://github.com/influxdata/influxdb/pull/19231): Alerts page filter inputs now have tab indices for keyboard navigation -1. [19364](https://github.com/influxdata/influxdb/pull/19364): Errors in OSS are now properly printed to the console - -## v2.0.0-beta.15 [2020-07-23] - -### Breaking - -1. [19004](https://github.com/influxdata/influxdb/pull/19004): Removed the `migrate` command from the `influxd` binary. -1. [18921](https://github.com/influxdata/influxdb/pull/18921): Restricted UI variable names to not clash with Flux reserved words - -### Features - -1. [18888](https://github.com/influxdata/influxdb/pull/18888): Add event source to influx stack operations -1. [18910](https://github.com/influxdata/influxdb/pull/18910): Add uninstall functionality for stacks -1. [18912](https://github.com/influxdata/influxdb/pull/18912): Drop deprecated influx pkg command tree -1. [18997](https://github.com/influxdata/influxdb/pull/18997): Add telegraf management commands to influx CLI -1. [19030](https://github.com/influxdata/influxdb/pull/19030): Enable dynamic destination for the influx CLI configs file -1. [19029](https://github.com/influxdata/influxdb/pull/19029): Navigating away from a dashboard cancels all pending queries -1. [19003](https://github.com/influxdata/influxdb/pull/19003): Upgrade to Flux v0.74.0 -1. [19040](https://github.com/influxdata/influxdb/pull/19040): Drop the REPL command from influx CLI -1. [19032](https://github.com/influxdata/influxdb/pull/19032): Redesign asset & rate limit alerts - -### Bug Fixes - -1. [18891](https://github.com/influxdata/influxdb/pull/18891): Allow 0 to be the custom set minimum value for Y Domain -1. [18969](https://github.com/influxdata/influxdb/pull/18969): Single Stat cells should render properly in Safari again -1. [18974](https://github.com/influxdata/influxdb/pull/18974): Limit variable querying when submitting queries to used variables -1. [19039](https://github.com/influxdata/influxdb/pull/19039): Fix an issue where switching orgs was not redirecting correctly -1. [18989](https://github.com/influxdata/influxdb/pull/18989): Stopped fetching tags in the advanced builder -1. [19044](https://github.com/influxdata/influxdb/pull/19044): Graph customization: X and Y axis properly accept values - -## v2.0.0-beta.14 [2020-07-08] - -### Features - -1. [18758](https://github.com/influxdata/influxdb/pull/18758): Extend influx stacks update cmd with ability to add resources without apply template -1. [18793](https://github.com/influxdata/influxdb/pull/18793): Normalize InfluxDB templates under new /api/v2/templates and /api/v2/stacks public API -1. [18818](https://github.com/influxdata/influxdb/pull/18818): Extend template Summary and Diff nested types with kind identifiers -1. [18857](https://github.com/influxdata/influxdb/pull/18857): Flux updated to v0.71.1 -1. [18805](https://github.com/influxdata/influxdb/pull/18805): Added static builds for Linux - -### Bug Fixes - -1. [18878](https://github.com/influxdata/influxdb/pull/18878): Don't overwrite build date set via ldflags -1. [18842](https://github.com/influxdata/influxdb/pull/18842): Fixed an issue where define query was unusable after importing a Check -1. [18845](https://github.com/influxdata/influxdb/pull/18845): Update documentation links - -## v2.0.0-beta.13 [2020-06-25] - -### Features - -1. [18387](https://github.com/influxdata/influxdb/pull/18387): Integrate query cancellation after queries have been submitted -1. [18515](https://github.com/influxdata/influxdb/pull/18515): Extend templates with the source file|url|reader. -1. [18539](https://github.com/influxdata/influxdb/pull/18539): Collect stats on installed influxdata community template usage. -1. [18541](https://github.com/influxdata/influxdb/pull/18541): Pkger allow raw github.com host URLs for yaml|json|jsonnet URLs -1. [18546](https://github.com/influxdata/influxdb/pull/18546): Influx allow for files to be remotes for all template commands -1. [18560](https://github.com/influxdata/influxdb/pull/18560): Extend stacks API with update capability -1. [18568](https://github.com/influxdata/influxdb/pull/18568): Add support for config files to influxd and any cli.NewCommand use case -1. [18573](https://github.com/influxdata/influxdb/pull/18573): Extend influx stacks cmd with new influx stacks update cmd -1. [18595](https://github.com/influxdata/influxdb/pull/18595): Add ability to skip resources in a template by kind or by metadata.name -1. [18600](https://github.com/influxdata/influxdb/pull/18600): Extend influx apply with resource filter capabilities -1. [18601](https://github.com/influxdata/influxdb/pull/18601): Provide active config running influx config without args -1. [18606](https://github.com/influxdata/influxdb/pull/18606): Enable influxd binary to look for a config file on startup -1. [18647](https://github.com/influxdata/influxdb/pull/18647): Add support for env ref default values to the template parser -1. [18655](https://github.com/influxdata/influxdb/pull/18655): Add support for platform variable selected field to templates - -### Bug Fixes - -1. [18602](https://github.com/influxdata/influxdb/pull/18602): Fix uint overflow during setup on 32bit systems -1. [18623](https://github.com/influxdata/influxdb/pull/18623): Drop support for --local flag within influx CLI -1. [18632](https://github.com/influxdata/influxdb/pull/18632): Prevents undefined queries in cells from erroring out in dashboards -1. [18649](https://github.com/influxdata/influxdb/pull/18649): Fixes bucket selection issue and query builder state -1. [18658](https://github.com/influxdata/influxdb/pull/18658): Add support for 'd' day and 'w' week time identifiers in the CLI for bucket and setup commands -1. [18581](https://github.com/influxdata/influxdb/pull/18581): Cache dashboard cell query results to use as a reference for cell configurations -1. [18707](https://github.com/influxdata/influxdb/pull/18707): Validate host-url for influx config create/set commands -1. [18713](https://github.com/influxdata/influxdb/pull/18713): Fix influx CLI flags to accurately depict flags for all commands - -## v2.0.0-beta.12 [2020-06-12] - -### Features - -1. [18279](https://github.com/influxdata/influxdb/pull/18279): Make all pkg applications stateful via stacks -1. [18322](https://github.com/influxdata/influxdb/pull/18322): Add ability to export a stack's existing (as they are in the platform) resource state as a pkg -1. [18334](https://github.com/influxdata/influxdb/pull/18334): Update influx pkg commands with improved usage and examples in long form. -1. [18344](https://github.com/influxdata/influxdb/pull/18344): Extend influx CLI with version and User-Agent. -1. [18355](https://github.com/influxdata/influxdb/pull/18355): Integrate RedirectTo functionality so CLOUD users now get navigated back to the originally linked page after login -1. [18392](https://github.com/influxdata/influxdb/pull/18392): Consolidate pkg influx commands under templates. This removes some nesting of the CLI commands as part of that. -1. [18400](https://github.com/influxdata/influxdb/pull/18400): Dashboards maintain sort order after navigating away -1. [18480](https://github.com/influxdata/influxdb/pull/18480): Allows tasks to open in new tabs -1. [18553](https://github.com/influxdata/influxdb/pull/18553): Update usage and soften comparisons for kind matching on 'influx export --resourceType' cmd - -### Bug Fixes - -1. [18331](https://github.com/influxdata/influxdb/pull/18331): Support organization name in addition to ID in DBRP operations -1. [18335](https://github.com/influxdata/influxdb/pull/18335): Disable failing when providing an unexpected error to influx CLI -1. [18345](https://github.com/influxdata/influxdb/pull/18345): Have influx delete cmd respect the config -1. [18385](https://github.com/influxdata/influxdb/pull/18385): Store initialization for pkger enforced on reads -1. [18434](https://github.com/influxdata/influxdb/pull/18434): Backfill missing fillColumns field for histograms in pkger -1. [18471](https://github.com/influxdata/influxdb/pull/18471): Notifies the user how to escape presentation mode when the feature is toggled - -### UI Improvements - -1. [18319](https://github.com/influxdata/influxdb/pull/18319): Display bucket ID in bucket list and enable 1 click copying -1. [18361](https://github.com/influxdata/influxdb/pull/18361): Tokens list is now consistent with the other resource lists -1. [18346](https://github.com/influxdata/influxdb/pull/18346): Reduce the number of variables being hydrated when toggling variables -1. [18447](https://github.com/influxdata/influxdb/pull/18447): Redesign dashboard cell loading indicator to be more obvious -1. [18593](https://github.com/influxdata/influxdb/pull/18593): Add copyable User and Organization Ids to About page - -## v2.0.0-beta.11 [2020-05-26] - -### Features - -1. [18011](https://github.com/influxdata/influxdb/pull/18011): Integrate UTC dropdown when making custom time range query -1. [18040](https://github.com/influxdata/influxdb/pull/18040): Allow for min OR max y-axis visualization settings rather than min AND max -1. [17764](https://github.com/influxdata/influxdb/pull/17764): Add CSV to line protocol conversion library -1. [18059](https://github.com/influxdata/influxdb/pull/18059): Make the dropdown width adjustable -1. [18173](https://github.com/influxdata/influxdb/pull/18173): Add version to /health response - -### Bug Fixes - -1. [18066](https://github.com/influxdata/influxdb/pull/18066): Fixed bug that wasn't persisting timeFormat for Graph + Single Stat selections -1. [17959](https://github.com/influxdata/influxdb/pull/17959): Authorizer now exposes full permission set -1. [18071](https://github.com/influxdata/influxdb/pull/18071): Fixed issue that was causing variable selections to hydrate all variable values -1. [18016](https://github.com/influxdata/influxdb/pull/18016): Remove the fancy scrollbars -1. [18171](https://github.com/influxdata/influxdb/pull/18171): Check status now displaying warning if loading a large amount - -## v2.0.0-beta.10 [2020-05-07] - -### Features - -1. [17934](https://github.com/influxdata/influxdb/pull/17934): Add ability to delete a stack and all the resources associated with it -1. [17941](https://github.com/influxdata/influxdb/pull/17941): Enforce DNS name compliance on all pkger resources' metadata.name field -1. [17989](https://github.com/influxdata/influxdb/pull/17989): Add stateful pkg management with stacks -1. [18007](https://github.com/influxdata/influxdb/pull/18007): Add remove and list pkger stack commands to influx CLI -1. [18017](https://github.com/influxdata/influxdb/pull/18017): Fixup display message for interactive influx setup cmd - -### Bug Fixes - -1. [17906](https://github.com/influxdata/influxdb/pull/17906): Ensure UpdateUser cleans up the index when updating names -1. [17933](https://github.com/influxdata/influxdb/pull/17933): Ensure Checks can be set for zero values - -### UI Improvements - -1. [17860](https://github.com/influxdata/influxdb/pull/17860): Allow bucket creation from the Data Explorer and Cell Editor - -## v2.0.0-beta.9 [2020-04-23] - -### Features - -1. [17851](https://github.com/influxdata/influxdb/pull/17851): Add feature flag package capability and flags endpoint - -### Bug Fixes - -1. [17618](https://github.com/influxdata/influxdb/pull/17618): Add index for URM by user ID to improve lookup performance -1. [17751](https://github.com/influxdata/influxdb/pull/17751): Existing session expiration time is respected on session renewal -1. [17817](https://github.com/influxdata/influxdb/pull/17817): Make CLI respect env vars and flags in addition to the configs and extend support for config orgs to all commands - -### UI Improvements - -1. [17714](https://github.com/influxdata/influxdb/pull/17714): Cloud environments no longer render markdown images, for security reasons. -1. [17321](https://github.com/influxdata/influxdb/pull/17321): Improve UI for sorting resources -1. [17740](https://github.com/influxdata/influxdb/pull/17740): Add single-color color schemes for visualizations -1. [17849](https://github.com/influxdata/influxdb/pull/17849): Move Organization navigation items to user menu. - -## v2.0.0-beta.8 [2020-04-10] - -### Features - -1. [17490](https://github.com/influxdata/influxdb/pull/17490): `influx config -`, to switch back to previous activated configuration -1. [17581](https://github.com/influxdata/influxdb/pull/17581): Introduce new navigation menu -1. [17595](https://github.com/influxdata/influxdb/pull/17595): Add -f (--file) option to `influx query` and `influx task` commands -1. [17498](https://github.com/influxdata/influxdb/pull/17498): Added support for command line options to limit memory for queries - -### Bug Fixes - -1. [17257](https://github.com/influxdata/influxdb/pull/17769): Fix retention policy after bucket is migrated -1. [17612](https://github.com/influxdata/influxdb/pull/17612): Fix card size and layout jank in dashboards index view -1. [17651](https://github.com/influxdata/influxdb/pull/17651): Fix check graph font and lines defaulting to black causing graph to be unreadable -1. [17660](https://github.com/influxdata/influxdb/pull/17660): Fix text wrapping display issue and popover sizing bug when adding labels to a resource -1. [17670](https://github.com/influxdata/influxdb/pull/17670): Respect the now-time of the compiled query if it's provided -1. [17692](https://github.com/influxdata/influxdb/pull/17692): Update giraffe to fix spacing between ticks -1. [17694](https://github.com/influxdata/influxdb/pull/17694): Fixed typos in the Flux functions list -1. [17701](https://github.com/influxdata/influxdb/pull/17701): Allow mouse cursor inside Script Editor for Safari -1. [17609](https://github.com/influxdata/influxdb/pull/17609): Fixed an issue where Variables could not use other Variables -1. [17754](https://github.com/influxdata/influxdb/pull/17754): Adds error messaging for Cells in Dashboard View - -### UI Improvements - -1. [17583](https://github.com/influxdata/influxdb/pull/17583): Update layout of Alerts page to work on all screen sizes -1. [17657](https://github.com/influxdata/influxdb/pull/17657): Sort dashboards on Getting Started page by recently modified - -## v2.0.0-beta.7 [2020-03-27] - -### Features - -1. [17232](https://github.com/influxdata/influxdb/pull/17232): Allow dashboards to optionally be displayed in light mode -1. [17273](https://github.com/influxdata/influxdb/pull/17273): Add shell completions command for the influx cli -1. [17353](https://github.com/influxdata/influxdb/pull/17353): Make all pkg resources unique by metadata.name field -1. [17363](https://github.com/influxdata/influxdb/pull/17363): Telegraf config tokens can no longer be retrieved after creation, but new tokens can be created after a telegraf has been setup -1. [17400](https://github.com/influxdata/influxdb/pull/17400): Be able to delete bucket by name via cli -1. [17396](https://github.com/influxdata/influxdb/pull/17396): Add module to write line data to specified url, org, and bucket -1. [17398](https://github.com/influxdata/influxdb/pull/17398): Extend influx cli write command with ability to process CSV data -1. [17448](https://github.com/influxdata/influxdb/pull/17448): Add foundation for pkger stacks, stateful package management -1. [17462](https://github.com/influxdata/influxdb/pull/17462): Flag to disable scheduling of tasks -1. [17470](https://github.com/influxdata/influxdb/pull/17470): Add ability to output cli output as json and hide table headers -1. [17472](https://github.com/influxdata/influxdb/pull/17472): Add an easy way to switch config via cli - -### Bug Fixes - -1. [17240](https://github.com/influxdata/influxdb/pull/17240): NodeJS logo displays properly in Firefox -1. [17363](https://github.com/influxdata/influxdb/pull/17363): Fixed telegraf configuration bugs where system buckets were appearing in the buckets dropdown -1. [17391](https://github.com/influxdata/influxdb/pull/17391): Fixed threshold check bug where checks could not be created when a field had a space in the name -1. [17384](https://github.com/influxdata/influxdb/pull/17384): Reuse slices built by iterator to reduce allocations -1. [17404](https://github.com/influxdata/influxdb/pull/17404): Updated duplicate check error message to be more explicit and actionable -1. [17515](https://github.com/influxdata/influxdb/pull/17515): Editing a table cell shows the proper values and respects changes -1. [17521](https://github.com/influxdata/influxdb/pull/17521): Table view scrolling should be slightly smoother -1. [17601](https://github.com/influxdata/influxdb/pull/17601): URL table values on single columns are being correctly parsed -1. [17552](https://github.com/influxdata/influxdb/pull/17552): Fixed a regression bug that insert aggregate functions where the cursor is rather than a new line - -### UI Improvements - -1. [17291](https://github.com/influxdata/influxdb/pull/17291): Redesign OSS Login page -1. [17297](https://github.com/influxdata/influxdb/pull/17297): Display graphic when a dashboard has no cells - -## v2.0.0-beta.6 [2020-03-12] - -### Features - -1. [17085](https://github.com/influxdata/influxdb/pull/17085): Clicking on bucket name takes user to Data Explorer with bucket selected -1. [17095](https://github.com/influxdata/influxdb/pull/17095): Extend pkger dashboards with table view support -1. [17114](https://github.com/influxdata/influxdb/pull/17114): Allow for retention to be provided to influx setup command as a duration -1. [17138](https://github.com/influxdata/influxdb/pull/17138): Extend pkger export all capabilities to support filtering by lable name and resource type -1. [17049](https://github.com/influxdata/influxdb/pull/17049): Added new login and sign-up screen that for cloud users that allows direct login from their region -1. [17170](https://github.com/influxdata/influxdb/pull/17170): Added new cli multiple profiles management tool -1. [17145](https://github.com/influxdata/influxdb/pull/17145): Update kv.Store to define schema changes via new kv.Migrator types - -### Bug Fixes - -1. [17039](https://github.com/influxdata/influxdb/pull/17039): Fixed issue where tasks are exported for notification rules -1. [17042](https://github.com/influxdata/influxdb/pull/17042): Fixed issue where tasks are not exported when exporting by org id -1. [17070](https://github.com/influxdata/influxdb/pull/17070): Fixed issue where tasks with imports in query break in pkger -1. [17028](https://github.com/influxdata/influxdb/pull/17028): Fixed issue where selecting an aggregate function in the script editor was not adding the function to a new line -1. [17072](https://github.com/influxdata/influxdb/pull/17072): Fixed issue where creating a variable of type map was piping the incorrect value when map variables were used in queries -1. [17050](https://github.com/influxdata/influxdb/pull/17050): Added missing user names to auth CLI commands -1. [17113](https://github.com/influxdata/influxdb/pull/17113): Disabled group functionality for check query builder -1. [17120](https://github.com/influxdata/influxdb/pull/17120): Fixed cell configuration error that was popping up when users create a dashboard and accessed the disk usage cell for the first time -1. [17097](https://github.com/influxdata/influxdb/pull/17097): Listing all the default variables in the VariableTab of the script editor -1. [17049](https://github.com/influxdata/influxdb/pull/17049): Fixed bug that was preventing the interval status on the dashboard header from refreshing on selections -1. [17161](https://github.com/influxdata/influxdb/pull/17161): Update table custom decimal feature for tables to update table onFocus -1. [17168](https://github.com/influxdata/influxdb/pull/17168): Fixed UI bug that was setting Telegraf config buttons off-center and was resizing config selections when filtering through the data -1. [17208](https://github.com/influxdata/influxdb/pull/17208): Fixed UI bug that was setting causing dashboard cells to error when the a v.bucket was being used and was being configured for the first time -1. [17214](https://github.com/influxdata/influxdb/pull/17214): Fix appearance of client library logos in Safari -1. [17202](https://github.com/influxdata/influxdb/pull/17202): Fixed UI bug that was preventing checks created with the query builder from updating. Also fixed a bug that was preventing dashboard cell queries from working properly when creating group queries using the query builder - -## v2.0.0-beta.5 [2020-02-27] - -### Features - -1. [16991](https://github.com/influxdata/influxdb/pull/16991): Update Flux functions list for v0.61 -1. [16574](https://github.com/influxdata/influxdb/pull/16574): Add secure flag to session cookie - -### Bug Fixes - -1. [16919](https://github.com/influxdata/influxdb/pull/16919): Sort dashboards on homepage alphabetically -1. [16934](https://github.com/influxdata/influxdb/pull/16934): Tokens page now sorts by status -1. [16931](https://github.com/influxdata/influxdb/pull/16931): Set the default value of tags in a Check -1. [16935](https://github.com/influxdata/influxdb/pull/16935): Fix sort by variable type -1. [16973](https://github.com/influxdata/influxdb/pull/16973): Calculate correct stacked line cumulative when lines are different lengths -1. [17010](https://github.com/influxdata/influxdb/pull/17010): Fixed scrollbar issue where resource cards would overflow the parent container rather than be hidden and scrollable -1. [16992](https://github.com/influxdata/influxdb/pull/16992): Query Builder now groups on column values, not tag values -1. [17013](https://github.com/influxdata/influxdb/pull/17013): Scatterplots can once again render the tooltip correctly -1. [17027](https://github.com/influxdata/influxdb/pull/17027): Drop pkger gauge chart requirement for color threshold type -1. [17040](https://github.com/influxdata/influxdb/pull/17040): Fixed bug that was preventing the interval status on the dashboard header from refreshing on selections -1. [16961](https://github.com/influxdata/influxdb/pull/16961): Remove cli confirmation of secret, add an optional parameter of secret value - -## v2.0.0-beta.4 [2020-02-14] - -### Features - -1. [16855](https://github.com/influxdata/influxdb/pull/16855): Added labels to buckets in UI -1. [16842](https://github.com/influxdata/influxdb/pull/16842): Connect monaco editor to Flux LSP server -1. [16856](https://github.com/influxdata/influxdb/pull/16856): Update Flux to v0.59.6 - -### Bug Fixes - -1. [16852](https://github.com/influxdata/influxdb/pull/16852): Revert for bad indexing of UserResourceMappings and Authorizations -1. [15911](https://github.com/influxdata/influxdb/pull/15911): Gauge no longer allowed to become too small -1. [16878](https://github.com/influxdata/influxdb/pull/16878): Fix issue with INFLUX_TOKEN env vars being overridden by default token - -## v2.0.0-beta.3 [2020-02-11] - -### Features - -1. [16765](https://github.com/influxdata/influxdb/pull/16765): Extend influx cli pkg command with ability to take multiple files and directories -1. [16767](https://github.com/influxdata/influxdb/pull/16767): Extend influx cli pkg command with ability to take multiple urls, files, directories, and stdin at the same time -1. [16786](https://github.com/influxdata/influxdb/pull/16786): influx cli can manage secrets. - -### Bug Fixes - -1. [16733](https://github.com/influxdata/influxdb/pull/16733): Fix notification rule renaming panics from UI -1. [16769](https://github.com/influxdata/influxdb/pull/16769): Fix the tooltip for stacked line graphs -1. [16825](https://github.com/influxdata/influxdb/pull/16825): Fixed false success notification for read-only users creating dashboards -1. [16822](https://github.com/influxdata/influxdb/pull/16822): Fix issue with pkger/http stack crashing on dupe content type - -## v2.0.0-beta.2 [2020-01-24] - -### Features - -1. [16711](https://github.com/influxdata/influxdb/pull/16711): Query Builder supports group() function (change the dropdown from filter to group) -1. [16523](https://github.com/influxdata/influxdb/pull/16523): Change influx packages to be CRD compliant -1. [16547](https://github.com/influxdata/influxdb/pull/16547): Allow trailing newline in credentials file and CLI integration -1. [16545](https://github.com/influxdata/influxdb/pull/16545): Add support for prefixed cursor search to ForwardCursor types -1. [16504](https://github.com/influxdata/influxdb/pull/16504): Add backup and restore -1. [16522](https://github.com/influxdata/influxdb/pull/16522): Introduce resource logger to tasks, buckets and organizations - -### Bug Fixes - -1. [16656](https://github.com/influxdata/influxdb/pull/16656): Check engine closed before collecting index metrics -1. [16412](https://github.com/influxdata/influxdb/pull/16412): Reject writes which use any of the reserved tag keys -1. [16715](https://github.com/influxdata/influxdb/pull/16715): Fixed dashboard mapping for getDashboards to map correct prop -1. [16716](https://github.com/influxdata/influxdb/pull/16716): Improve the lacking error responses for unmarshal errors in org service - -### Bug Fixes - -1. [16527](https://github.com/influxdata/influxdb/pull/16527): fix /telegrafs panics when using org=org_name parameter - -### UI Improvements - -1. [16575](https://github.com/influxdata/influxdb/pull/16575): Swap billingURL with checkoutURL -1. [16203](https://github.com/influxdata/influxdb/pull/16203): Move cloud navigation to top of page instead of within left side navigation -1. [16536](https://github.com/influxdata/influxdb/pull/16536): Adjust aggregate window periods to be more "reasonable". Use duration input with validation. - -## v2.0.0-beta.1 [2020-01-08] - -### Features - -1. [16234](https://github.com/influxdata/influxdb/pull/16234): Add support for notification endpoints to influx templates/pkgs. -1. [16242](https://github.com/influxdata/influxdb/pull/16242): Drop id prefix for secret key requirement for notification endpoints -1. [16259](https://github.com/influxdata/influxdb/pull/16259): Add support for check resource to pkger parser -1. [16262](https://github.com/influxdata/influxdb/pull/16262): Add support for check resource pkger dry run functionality -1. [16275](https://github.com/influxdata/influxdb/pull/16275): Add support for check resource pkger apply functionality -1. [16283](https://github.com/influxdata/influxdb/pull/16283): Add support for check resource pkger export functionality -1. [16212](https://github.com/influxdata/influxdb/pull/16212): Add new kv.ForwardCursor interface -1. [16297](https://github.com/influxdata/influxdb/pull/16297): Add support for notification rule to pkger parser -1. [16298](https://github.com/influxdata/influxdb/pull/16298): Add support for notification rule pkger dry run functionality -1. [16305](https://github.com/influxdata/influxdb/pull/16305): Add support for notification rule pkger apply functionality -1. [16312](https://github.com/influxdata/influxdb/pull/16312): Add support for notification rule pkger export functionality -1. [16320](https://github.com/influxdata/influxdb/pull/16320): Add support for tasks to pkger parser -1. [16322](https://github.com/influxdata/influxdb/pull/16322): Add support for tasks to pkger dry run functionality -1. [16323](https://github.com/influxdata/influxdb/pull/16323): Add support for tasks to pkger apply functionality -1. [16324](https://github.com/influxdata/influxdb/pull/16324): Add support for tasks to pkger export functionality -1. [16226](https://github.com/influxdata/influxdb/pull/16226): Add group() to Query Builder -1. [16338](https://github.com/influxdata/influxdb/pull/16338): Add last run status to check and notification rules -1. [16340](https://github.com/influxdata/influxdb/pull/16340): Add last run status to tasks -1. [16341](https://github.com/influxdata/influxdb/pull/16341): Extend pkger apply functionality with ability to provide secrets outside of pkg -1. [16345](https://github.com/influxdata/influxdb/pull/16345): Add hide headers flag to influx cli task find cmd -1. [16336](https://github.com/influxdata/influxdb/pull/16336): Manual Overrides for Readiness Endpoint -1. [16347](https://github.com/influxdata/influxdb/pull/16347): Drop legacy inmem service implementation in favor of kv service with inmem dependency -1. [16348](https://github.com/influxdata/influxdb/pull/16348): Drop legacy bolt service implementation in favor of kv service with bolt dependency -1. [16014](https://github.com/influxdata/influxdb/pull/16014): While creating check, also display notification rules that would match check based on tag rules -1. [16389](https://github.com/influxdata/influxdb/pull/16389): Increase default bucket retention period to 30 days -1. [16430](https://github.com/influxdata/influxdb/pull/16430): Added toggle to table thresholds to allow users to choose between setting threshold colors to text or background -1. [16418](https://github.com/influxdata/influxdb/pull/16418): Add Developer Documentation -1. [16260](https://github.com/influxdata/influxdb/pull/16260): Capture User-Agent header as query source for logging purposes -1. [16469](https://github.com/influxdata/influxdb/pull/16469): Add support for configurable max batch size in points write handler -1. [16509](https://github.com/influxdata/influxdb/pull/16509): Add support for applying an influx package via a public facing URL -1. [16511](https://github.com/influxdata/influxdb/pull/16511): Add jsonnet support for influx packages -1. [14782](https://github.com/influxdata/influxdb/pull/16336): Add view page for Check -1. [16537](https://github.com/influxdata/influxdb/pull/16537): Add update password for CLI - -### Bug Fixes - -1. [16225](https://github.com/influxdata/influxdb/pull/16225): Ensures env vars are applied consistently across cmd, and fixes issue where INFLUX\_ env var prefix was not set globally. -1. [16235](https://github.com/influxdata/influxdb/pull/16235): Removed default frontend sorting when flux queries specify sorting -1. [16238](https://github.com/influxdata/influxdb/pull/16238): Store canceled task runs in the correct bucket -1. [16237](https://github.com/influxdata/influxdb/pull/16237): Updated Sortby functionality for table frontend sorts to sort numbers correctly -1. [16249](https://github.com/influxdata/influxdb/pull/16249): Prevent potential infinite loop when finding tasks by organization. -1. [16255](https://github.com/influxdata/influxdb/pull/16255): Retain user input when parsing invalid JSON during import -1. [16268](https://github.com/influxdata/influxdb/pull/16268): Fixed test flakiness that stemmed from multiple flush/signins being called in the same test suite -1. [16346](https://github.com/influxdata/influxdb/pull/16346): Update pkger task export to only trim out option task and not all vars provided -1. [16374](https://github.com/influxdata/influxdb/pull/16374): Update influx CLI, only show "see help" message, instead of the whole usage. -1. [16380](https://github.com/influxdata/influxdb/pull/16380): Fix notification tag matching rules and enable tests to verify -1. [16376](https://github.com/influxdata/influxdb/pull/16376): Extend the y-axis when stacked graph is selected -1. [16404](https://github.com/influxdata/influxdb/pull/16404): Fixed query reset bug that was resetting query in script editor whenever dates were changed -1. [16430](https://github.com/influxdata/influxdb/pull/16430): Fixed table threshold bug that was defaulting set colors to the background. -1. [16435](https://github.com/influxdata/influxdb/pull/16435): Time labels are no longer squished to the left -1. [16427](https://github.com/influxdata/influxdb/pull/16427): Fixed underlying issue with disappearing queries made in Advanced Mode -1. [16439](https://github.com/influxdata/influxdb/pull/16439): Prevent negative zero and allow zero to have decimal places -1. [16376](https://github.com/influxdata/influxdb/pull/16413): Limit data loader bucket selection to non system buckets -1. [16458](https://github.com/influxdata/influxdb/pull/16458): Fix EOF error when manually running tasks from the Task Page. -1. [16491](https://github.com/influxdata/influxdb/pull/16491): Add missing env vals to influx cli usage and fixes precedence of flag/env var priority - -### UI Improvements - -1. [16444](https://github.com/influxdata/influxdb/pull/16444): Add honeybadger reporting to create checks - -## v2.0.0-alpha.21 [2019-12-13] - -### Features - -1. [15836](https://github.com/influxdata/influxdb/pull/16077): Add stacked line layer option to graphs -1. [16094](https://github.com/influxdata/influxdb/pull/16094): Annotate log messages with trace ID, if available -1. [16187](https://github.com/influxdata/influxdb/pull/16187): Bucket create to accept an org name flag -1. [16158](https://github.com/influxdata/influxdb/pull/16158): Add trace ID response header to query endpoint - -### Bug Fixes - -1. [15655](https://github.com/influxdata/influxdb/pull/15655): Allow table columns to be draggable in table settings -1. [15757](https://github.com/influxdata/influxdb/pull/15757): Light up the home page icon when active -1. [15797](https://github.com/influxdata/influxdb/pull/15797): Make numeric inputs first class citizens -1. [15853](https://github.com/influxdata/influxdb/pull/15853): Prompt users to make a dashboard when dashboards are empty -1. [15884](https://github.com/influxdata/influxdb/pull/15884): Remove name editing from query definition during threshold check creation -1. [15975](https://github.com/influxdata/influxdb/pull/15975): Wait until user stops dragging and releases marker before zooming in after threshold changes -1. [16057](https://github.com/influxdata/influxdb/pull/16057): Adds `properties` to each cell on GET /dashboards/{dashboardID} -1. [16101](https://github.com/influxdata/influxdb/pull/16101): Gracefully handle invalid user-supplied JSON -1. [16105](https://github.com/influxdata/influxdb/pull/16105): Fix crash when loading queries built using Query Builder -1. [16112](https://github.com/influxdata/influxdb/pull/16112): Create cell view properties on dashboard creation -1. [16144](https://github.com/influxdata/influxdb/pull/16144): Scrollbars are dapper and proper -1. [16172](https://github.com/influxdata/influxdb/pull/16172): Fixed table ui threshold colorization issue where setting thresholds would not change table UI -1. [16194](https://github.com/influxdata/influxdb/pull/16194): Fixed windowPeriod issue that stemmed from webpack rules -1. [16175](https://github.com/influxdata/influxdb/pull/16175): Added delete functionality to note cells so that they can be deleted -1. [16204](https://github.com/influxdata/influxdb/pull/16204): Fix failure to create labels when creating telegraf configs -1. [16207](https://github.com/influxdata/influxdb/pull/16207): Fix crash when editing a Telegraf config -1. [16201](https://github.com/influxdata/influxdb/pull/16201): Updated start/endtime functionality so that custom script timeranges overwrite dropdown selections -1. [16217](https://github.com/influxdata/influxdb/pull/16217): Fix 12-hour time format to use consistent formatting and number of time ticks - -### UI Improvements - -## v2.0.0-alpha.20 [2019-11-20] - -### Features - -1. [15805](https://github.com/influxdata/influxdb/pull/15924): Add tls insecure skip verify to influx CLI. -1. [15981](https://github.com/influxdata/influxdb/pull/15981): Extend influx cli user create to allow for organization ID and user passwords to be set on user. -1. [15983](https://github.com/influxdata/influxdb/pull/15983): Autopopulate organization ids in the code samples -1. [15749](https://github.com/influxdata/influxdb/pull/15749): Expose bundle analysis tools for frontend resources -1. [15674](https://github.com/influxdata/influxdb/pull/15674): Allow users to view just the output section of a telegraf config -1. [15923](https://github.com/influxdata/influxdb/pull/15923): Allow the users to see string data in the single stat graph type - -### Bug Fixes - -1. [15777](https://github.com/influxdata/influxdb/pull/15777): Fix long startup when running 'influx help' -1. [15713](https://github.com/influxdata/influxdb/pull/15713): Mock missing Flux dependencies when creating tasks -1. [15731](https://github.com/influxdata/influxdb/pull/15731): Ensure array cursor iterator stats accumulate all cursor stats -1. [15866](https://github.com/influxdata/influxdb/pull/15866): Do not show Members section in Cloud environments -1. [15801](https://github.com/influxdata/influxdb/pull/15801): Change how cloud mode is enabled -1. [15820](https://github.com/influxdata/influxdb/pull/15820): Merge frontend development environments -1. [15944](https://github.com/influxdata/influxdb/pull/15944): Refactor table state logic on the frontend -1. [15920](https://github.com/influxdata/influxdb/pull/15920): Arrows in tables now show data in ascending and descening order -1. [15728](https://github.com/influxdata/influxdb/pull/15728): Sort by retention rules now sorts by seconds -1. [15628](https://github.com/influxdata/influxdb/pull/15628): Horizontal scrollbar no longer covering data - -### UI Improvements - -1. [15809](https://github.com/influxdata/influxdb/pull/15809): Redesign cards and animations on getting started page -1. [15787](https://github.com/influxdata/influxdb/pull/15787): Allow the users to filter with labels in telegraph input search - -## v2.0.0-alpha.19 [2019-10-30] - -### Features - -1. [15313](https://github.com/influxdata/influxdb/pull/15313): Add shortcut for toggling comments in script editor -1. [15650](https://github.com/influxdata/influxdb/pull/15650): Expose last run status and last run error in task API - -### UI Improvements - -1. [15503](https://github.com/influxdata/influxdb/pull/15503): Redesign page headers to be more space efficient -1. [15426](https://github.com/influxdata/influxdb/pull/15426): Add 403 handler that redirects back to the sign-in page on oats-generated routes. -1. [15710](https://github.com/influxdata/influxdb/pull/15710): Add button to nginx and redis configuration sections to make interaction more clear - -### Bug Fixes - -1. [15295](https://github.com/influxdata/influxdb/pull/15295): Ensures users are created with an active status -1. [15306](https://github.com/influxdata/influxdb/pull/15306): Added missing string values for CacheStatus type -1. [15348](https://github.com/influxdata/influxdb/pull/15348): Disable saving for threshold check if no threshold selected -1. [15354](https://github.com/influxdata/influxdb/pull/15354): Query variable selector shows variable keys, not values -1. [15246](https://github.com/influxdata/influxdb/pull/15427): UI/Telegraf filter functionality shows results based on input name -1. [13940](https://github.com/influxdata/influxdb/pull/15443): Create Label Overlay UI will disable the submit button and return a UI error if the name field is empty -1. [15452](https://github.com/influxdata/influxdb/pull/15452): Log error as info message on unauthorized API call attempts -1. [15504](https://github.com/influxdata/influxdb/pull/15504): Ensure members&owners eps 404 when /org resource does not exist -1. [15510](https://github.com/influxdata/influxdb/pull/15510): UI/Telegraf sort functionality fixed -1. [15549](https://github.com/influxdata/influxdb/pull/15549): UI/Task edit functionality fixed -1. [15559](https://github.com/influxdata/influxdb/pull/15559): Exiting a configuration of a dashboard cell now properly renders the cell content -1. [15556](https://github.com/influxdata/influxdb/pull/15556): Creating a check now displays on the checklist -1. [15592](https://github.com/influxdata/influxdb/pull/15592): Changed task runs success status code from 200 to 201 to match Swagger documentation. -1. [15634](https://github.com/influxdata/influxdb/pull/15634): TextAreas have the correct height -1. [15647](https://github.com/influxdata/influxdb/pull/15647): Ensures labels are unique by organization in the kv store -1. [15695](https://github.com/influxdata/influxdb/pull/15695): Ensures variable names are unique by organization - -## v2.0.0-alpha.18 [2019-09-26] - -### Features - -1. [15151](https://github.com/influxdata/influxdb/pull/15151): Add jsonweb package for future JWT support -1. [15168](https://github.com/influxdata/influxdb/pull/15168): Added the JMeter Template dashboard -1. [15152](https://github.com/influxdata/influxdb/pull/15152): Add JWT support to http auth middleware - -### UI Improvements - -1. [15211](https://github.com/influxdata/influxdb/pull/15211): Display dashboards index as a grid -1. [15099](https://github.com/influxdata/influxdb/pull/15099): Add viewport scaling to html meta for responsive mobile scaling -1. [15056](https://github.com/influxdata/influxdb/pull/15056): Remove rename and delete functionality from system buckets -1. [15056](https://github.com/influxdata/influxdb/pull/15056): Prevent new buckets from being named with the reserved "\_" prefix -1. [15056](https://github.com/influxdata/influxdb/pull/15056): Prevent user from selecting system buckets when creating Scrapers, Telegraf configurations, read/write tokens, and when saving as a task -1. [15056](https://github.com/influxdata/influxdb/pull/15056): Limit values from draggable threshold handles to 2 decimal places -1. [15040](https://github.com/influxdata/influxdb/pull/15040): Redesign check builder UI to fill the screen and make more room for composing message templates -1. [14990](https://github.com/influxdata/influxdb/pull/14990): Move Tokens tab from Settings to Load Data page -1. [14990](https://github.com/influxdata/influxdb/pull/14990): Expose all Settings tabs in navigation menu -1. [15289](https://github.com/influxdata/influxdb/pull/15289): Added Stream and table functions to query builder - -### Bug Fixes - -1. [14931](https://github.com/influxdata/influxdb/pull/14931): Remove scrollbars blocking onboarding UI step. - -## v2.0.0-alpha.17 [2019-08-14] - -### Features - -1. [14809](https://github.com/influxdata/influxdb/pull/14809): Add task middleware's for checks and notifications -1. [14495](https://github.com/influxdata/influxdb/pull/14495): optional gzip compression of the query CSV response. -1. [14567](https://github.com/influxdata/influxdb/pull/14567): Add task types. -1. [14604](https://github.com/influxdata/influxdb/pull/14604): When getting task runs from the API, runs will be returned in order of most recently scheduled first. -1. [14631](https://github.com/influxdata/influxdb/pull/14631): Added Github and Apache templates -1. [14631](https://github.com/influxdata/influxdb/pull/14631): Updated name of Local Metrics template -1. [14631](https://github.com/influxdata/influxdb/pull/14631): Dashboards for all Telegraf config bundles now created -1. [14694](https://github.com/influxdata/influxdb/pull/14694): Add ability to find tasks by name. -1. [14901](https://github.com/influxdata/influxdb/pull/14901): Add ability to Peek() on reads package StreamReader types. - -### UI Improvements - -1. [14917](https://github.com/influxdata/influxdb/pull/14917): Make first steps in Monitoring & Alerting more obvious -1. [14889](https://github.com/influxdata/influxdb/pull/14889): Make adding data to buckets more discoverable -1. [14709](https://github.com/influxdata/influxdb/pull/14709): Move Buckets, Telgrafs, and Scrapers pages into a tab called "Load Data" for ease of discovery -1. [14846](https://github.com/influxdata/influxdb/pull/14846): Standardize formatting of "updated at" timestamp in all resource cards -1. [14887](https://github.com/influxdata/influxdb/pull/14887): Move no buckets warning in telegraf tab above the search box - -### Bug Fixes - -1. [14480](https://github.com/influxdata/influxdb/pull/14480): Fix authentication when updating a task with invalid org or bucket. -1. [14497](https://github.com/influxdata/influxdb/pull/14497): Update the documentation link for Telegraf. -1. [14492](https://github.com/influxdata/influxdb/pull/14492): Fix to surface errors properly as task notifications on create. -1. [14569](https://github.com/influxdata/influxdb/pull/14569): Fix limiting of get runs for task. -1. [14779](https://github.com/influxdata/influxdb/pull/14779): Refactor tasks coordinator. -1. [14846](https://github.com/influxdata/influxdb/pull/14846): Ensure onboarding "advanced" button goes to correct location - -## v2.0.0-alpha.16 [2019-07-25] - -### Bug Fixes - -1. [14385](https://github.com/influxdata/influxdb/pull/14385): Add link to Documentation text in line protocol upload overlay -1. [14344](https://github.com/influxdata/influxdb/pull/14344): Fix issue in Authorization API, can't create auth for another user. -1. [14352](https://github.com/influxdata/influxdb/pull/14352): Fix Influx CLI ignored user flag for auth creation. -1. [14379](https://github.com/influxdata/influxdb/pull/14379): Fix the map example in the documentation -1. [14423](https://github.com/influxdata/influxdb/pull/14423): Ignore null/empty Flux rows which prevents a single stat/gauge crash. -1. [14434](https://github.com/influxdata/influxdb/pull/14434): Fixes an issue where clicking on a dashboard name caused an incorrect redirect. -1. [14441](https://github.com/influxdata/influxdb/pull/14441): Upgrade templates lib to 0.5.0 -1. [14453](https://github.com/influxdata/influxdb/pull/14453): Upgrade giraffe lib to 0.16.1 -1. [14412](https://github.com/influxdata/influxdb/pull/14412): Fix incorrect notification type for manually running a Task -1. [14356](https://github.com/influxdata/influxdb/pull/14356): Fix an issue where canceled tasks did not resume. - -## v2.0.0-alpha.15 [2019-07-11] - -### Features - -1. [14256](https://github.com/influxdata/influxdb/pull/14256): Add time zone support to UI -2. [14243](https://github.com/influxdata/influxdb/pull/14243): Addded new storage inspection tool to verify tsm files -3. [14353](https://github.com/influxdata/influxdb/pull/14353): Require a token to be supplied for all task creation - -### Bug Fixes - -1. [14287](https://github.com/influxdata/influxdb/pull/14287): Fix incorrect reporting of task as successful when error occurs during result iteration -1. [14412](https://github.com/influxdata/influxdb/pull/14412): Fix incorrect notification type for manually running a Task - -### Known Issues - -1. [influxdata/flux#1492](https://github.com/influxdata/flux/issues/1492): Null support in Flux was introduced in Alhpa 14. Several null issues were fixed in this release, but one known issue remains - Users may hit a panic if the first record processed by a map function has a null value. - -## v2.0.0-alpha.14 [2019-06-28] - -### Features - -1. [14221](https://github.com/influxdata/influxdb/pull/14221): Add influxd inspect verify-wal tool -1. [14218](https://github.com/influxdata/influxdb/commit/4faf2a24def4f351aef5b3c0f2907c385f82fdb9): Move to Flux .34.2 - which includes new string functions and initial multi-datasource support with Sql.from() -1. [14164](https://github.com/influxdata/influxdb/pull/14164): Only click save once to save cell -1. [14188](https://github.com/influxdata/influxdb/pull/14188): Enable selecting more columns for line visualizations - -### UI Improvements - -1. [14194](https://github.com/influxdata/influxdb/pull/14194): Draw gauges correctly on HiDPI displays -1. [14194](https://github.com/influxdata/influxdb/pull/14194): Clamp gauge position to gauge domain -1. [14168](https://github.com/influxdata/influxdb/pull/14168): Improve display of error messages -1. [14157](https://github.com/influxdata/influxdb/pull/14157): Remove rendering bottleneck when streaming Flux responses -1. [14165](https://github.com/influxdata/influxdb/pull/14165): Prevent variable dropdown from clipping - -## v2.0.0-alpha.13 [2019-06-13] - -### Features - -1. [14130](https://github.com/influxdata/influxdb/pull/14130): Add static templates for system, docker, redis, kubernetes -1. [14189](https://github.com/influxdata/influxdb/pull/14189): Add option to select a token when creating a task -1. [14200](https://github.com/influxdata/influxdb/pull/14200): Add the ability to update a token when updating a task - -## v2.0.0-alpha.12 [2019-06-13] - -### Features - -1. [14059](https://github.com/influxdata/influxdb/pull/14059): Enable formatting line graph y ticks with binary prefix -1. [14052](https://github.com/influxdata/influxdb/pull/14052): Add x and y column pickers to graph types -1. [14128](https://github.com/influxdata/influxdb/pull/14128): Add option to shade area below line graphs - -### Bug Fixes - -1. [14085](https://github.com/influxdata/influxdb/pull/14085): Fix performance regression in graph tooltips - -### UI Improvements - -## v2.0.0-alpha.11 [2019-05-31] - -1. [14031](https://github.com/influxdata/influxdb/pull/14031): Correctly check if columnKeys include xColumn in heatmap - -## v2.0.0-alpha.10 [2019-05-30] - -### Features - -1. [13945](https://github.com/influxdata/influxdb/pull/13945): Add heatmap visualization type -1. [13961](https://github.com/influxdata/influxdb/pull/13961): Add scatter graph visualization type -1. [13850](https://github.com/influxdata/influxdb/pull/13850): Add description field to Tasks -1. [13924](https://github.com/influxdata/influxdb/pull/13924): Add CLI arguments for configuring session length and renewal -1. [13961](https://github.com/influxdata/influxdb/pull/13961): Add smooth interpolation option to line graphs - -### Bug Fixes - -1. [13753](https://github.com/influxdata/influxdb/pull/13753): Removed hardcoded bucket for Getting Started with Flux dashboard -1. [13783](https://github.com/influxdata/influxdb/pull/13783): Ensure map type variables allow for selecting values -1. [13800](https://github.com/influxdata/influxdb/pull/13800): Generate more idiomatic Flux in query builder -1. [13797](https://github.com/influxdata/influxdb/pull/13797): Expand tab key presses to 2 spaces in the Flux editor -1. [13823](https://github.com/influxdata/influxdb/pull/13823): Prevent dragging of Variable Dropdowns when dragging a scrollbar inside the dropdown -1. [13853](https://github.com/influxdata/influxdb/pull/13853): Improve single stat computation -1. [13945](https://github.com/influxdata/influxdb/pull/13945): Fix crash when opening histogram settings with no data - -### UI Improvements - -1. [#13835](https://github.com/influxdata/influxdb/pull/13835): Render checkboxes in query builder tag selection lists -1. [#13856](https://github.com/influxdata/influxdb/pull/13856): Fix jumbled card text in Telegraf configuration wizard -1. [#13888](https://github.com/influxdata/influxdb/pull/13888): Change scrapers in scrapers list to be resource cards -1. [#13925](https://github.com/influxdata/influxdb/pull/13925): Export and download resource with formatted resource name with no spaces - -## v2.0.0-alpha.9 [2019-05-01] - -**NOTE: This will remove all tasks from your InfluxDB v2.0 instance.** - -### Features - -1. [13423](https://github.com/influxdata/influxdb/pull/13423): Set autorefresh of dashboard to pause if absolute time range is selected -1. [13473](https://github.com/influxdata/influxdb/pull/13473): Switch task back end to a more modular and flexible system -1. [13493](https://github.com/influxdata/influxdb/pull/13493): Add org profile tab with ability to edit organization name -1. [13510](https://github.com/influxdata/influxdb/pull/13510): Add org name to dahboard page title -1. [13520](https://github.com/influxdata/influxdb/pull/13520): Add cautioning to bucket renaming -1. [13560](https://github.com/influxdata/influxdb/pull/13560): Add option to generate all access token in tokens tab -1. [13601](https://github.com/influxdata/influxdb/pull/13601): Add option to generate read/write token in tokens tab -1. [13715](https://github.com/influxdata/influxdb/pull/13715): Added a new Local Metrics Dashboard template that is created during Quick Start - -### Bug Fixes - -1. [13584](https://github.com/influxdata/influxdb/pull/13584): Fixed scroll clipping found in label editing flow -1. [13585](https://github.com/influxdata/influxdb/pull/13585): Prevent overlapping text and dot in time range dropdown -1. [13602](https://github.com/influxdata/influxdb/pull/13602): Updated link in notes cell to a more useful site -1. [13618](https://github.com/influxdata/influxdb/pull/13618): Show error message when adding line protocol -1. [13657](https://github.com/influxdata/influxdb/pull/13657): Update UI Flux function documentation -1. [13718](https://github.com/influxdata/influxdb/pull/13718): Updated System template to support math with floats -1. [13732](https://github.com/influxdata/influxdb/pull/13732): Fixed the window function documentation -1. [13738](https://github.com/influxdata/influxdb/pull/13738): Fixed typo in the `range` Flux function example -1. [13742](https://github.com/influxdata/influxdb/pull/13742): Updated the `systemTime` function to use `system.time` - -### UI Improvements - -1. [13424](https://github.com/influxdata/influxdb/pull/13424): Add general polish and empty states to Create Dashboard from Template overlay - -## v2.0.0-alpha.8 [2019-04-12] - -### Features - -1. [13024](https://github.com/influxdata/influxdb/pull/13024): Add the ability to edit token's description -1. [13078](https://github.com/influxdata/influxdb/pull/13078): Add the option to create a Dashboard from a Template. -1. [13161](https://github.com/influxdata/influxdb/pull/13161): Add the ability to add labels on variables -1. [13171](https://github.com/influxdata/influxdb/pull/13171): Add switch organizations dropdown to home navigation menu item. -1. [13173](https://github.com/influxdata/influxdb/pull/13173): Add create org to side nav -1. [13345](https://github.com/influxdata/influxdb/pull/13345): Added a new Getting Started with Flux Template - -### Bug Fixes - -1. [13284](https://github.com/influxdata/influxdb/pull/13284): Update shift to timeShift in the flux functions side bar - -### UI Improvements - -1. [13287](https://github.com/influxdata/influxdb/pull/13287): Update cursor to grab when hovering draggable areas -1. [13311](https://github.com/influxdata/influxdb/pull/13311): Sync note editor text and preview scrolling -1. [13249](https://github.com/influxdata/influxdb/pull/13249): Add the ability to create a bucket when creating an organization - -## v2.0.0-alpha.7 [2019-03-28] - -### Features - -1. [12663](https://github.com/influxdata/influxdb/pull/12663): Insert flux function near cursor in flux editor -1. [12678](https://github.com/influxdata/influxdb/pull/12678): Enable the use of variables in the Data Explorer and Cell Editor Overlay -1. [12655](https://github.com/influxdata/influxdb/pull/12655): Add a variable control bar to dashboards to select values for variables. -1. [12706](https://github.com/influxdata/influxdb/pull/12706): Add ability to add variable to script from the side menu. -1. [12791](https://github.com/influxdata/influxdb/pull/12791): Use time range for metaqueries in Data Explorer and Cell Editor Overlay -1. [12827](https://github.com/influxdata/influxdb/pull/12827): Fix screen tearing bug in Raw Data View -1. [12843](https://github.com/influxdata/influxdb/pull/12843): Add copy to clipboard button to export overlays -1. [12826](https://github.com/influxdata/influxdb/pull/12826): Enable copying error messages to the clipboard from dashboard cells -1. [12876](https://github.com/influxdata/influxdb/pull/12876): Add the ability to update token's status in Token list -1. [12821](https://github.com/influxdata/influxdb/pull/12821): Allow variables to be re-ordered within control bar on a dashboard. -1. [12888](https://github.com/influxdata/influxdb/pull/12888): Add the ability to delete a template -1. [12901](https://github.com/influxdata/influxdb/pull/12901): Save user preference for variable control bar visibility and default to visible -1. [12910](https://github.com/influxdata/influxdb/pull/12910): Add the ability to clone a template -1. [12958](https://github.com/influxdata/influxdb/pull/12958): Add the ability to import a variable - -### Bug Fixes - -1. [12684](https://github.com/influxdata/influxdb/pull/12684): Fix mismatch in bucket row and header -1. [12703](https://github.com/influxdata/influxdb/pull/12703): Allows user to edit note on cell -1. [12764](https://github.com/influxdata/influxdb/pull/12764): Fix empty state styles in scrapers in org view -1. [12790](https://github.com/influxdata/influxdb/pull/12790): Fix bucket creation error when changing rentention rules types. -1. [12793](https://github.com/influxdata/influxdb/pull/12793): Fix task creation error when switching schedule types. -1. [12805](https://github.com/influxdata/influxdb/pull/12805): Fix hidden horizonal scrollbars in flux raw data view -1. [12827](https://github.com/influxdata/influxdb/pull/12827): Fix screen tearing bug in Raw Data View -1. [12961](https://github.com/influxdata/influxdb/pull/12961): Fix scroll clipping in graph legends & dropdown menus -1. [12959](https://github.com/influxdata/influxdb/pull/12959): Fix routing loop - -### UI Improvements - -1. [12782](https://github.com/influxdata/influxdb/pull/12782): Move bucket selection in the query builder to the first card in the list -1. [12850](https://github.com/influxdata/influxdb/pull/12850): Ensure editor is automatically focused in note editor -1. [12915](https://github.com/influxdata/influxdb/pull/12915): Add ability to edit a template's name. - -## v2.0.0-alpha.6 [2019-03-15] - -### Release Notes - -We have updated the way we do predefined dashboards to [include Templates](https://github.com/influxdata/influxdb/pull/12532) in this release which will cause existing Organizations to not have a System dashboard created when they build a new Telegraf configuration. In order to get this functionality, remove your existing data and start from scratch. - -**NOTE: This will remove all data from your InfluxDB v2.0 instance including timeseries data.** - -On most `linux` systems including `macOS`: - -```sh -$ rm -r ~/.influxdbv2 -``` - -Once completed, `v2.0.0-alpha.6` can be started. - -### Features - -1. [12496](https://github.com/influxdata/influxdb/pull/12496): Add ability to import a dashboard -1. [12524](https://github.com/influxdata/influxdb/pull/12524): Add ability to import a dashboard from org view -1. [12531](https://github.com/influxdata/influxdb/pull/12531): Add ability to export a dashboard and a task -1. [12615](https://github.com/influxdata/influxdb/pull/12615): Add `run` subcommand to influxd binary. This is also the default when no subcommand is specified. -1. [12523](https://github.com/influxdata/influxdb/pull/12523): Add ability to save a query as a variable from the Data Explorer. -1. [12532](https://github.com/influxdata/influxdb/pull/12532): Add System template on onboarding - -### Bug Fixes - -1. [12641](https://github.com/influxdata/influxdb/pull/12641): Stop scrollbars from covering text in flux editor - -### UI Improvements - -1. [12610](https://github.com/influxdata/influxdb/pull/12610): Fine tune keyboard interactions for managing labels from a resource card - -## v2.0.0-alpha.5 [2019-03-08] - -### Release Notes - -This release includes a [breaking change](https://github.com/influxdata/influxdb/pull/12391) to the format that TSM and index data are stored on disk. -Any existing local data will not be queryable once InfluxDB is upgraded to this release. -Prior to installing this release we recommend all storage-engine data is removed from your local InfluxDB `2.x` installation; this can be achieved without losing any of your other InfluxDB `2.x` data (settings etc). -To remove only local storage data, run the following in a terminal. - -On most `linux` systems: - -```sh - -# Replace with your actual username. - -$ rm -r /home//.influxdbv2/engine -``` - -On `macOS`: - -```sh -# Replace with your actual username. - -$ rm -r /Users//.influxdbv2/engine -``` - -Once completed, `v2.0.0-alpha.5` can be started. - -### Features - -1. [12096](https://github.com/influxdata/influxdb/pull/12096): Add labels to cloned tasks -1. [12111](https://github.com/influxdata/influxdb/pull/12111): Add ability to filter resources by clicking a label -1. [12401](https://github.com/influxdata/influxdb/pull/12401): Add ability to add a member to org -1. [12391](https://github.com/influxdata/influxdb/pull/12391): Improve representation of TSM tagsets on disk -1. [12437](https://github.com/influxdata/influxdb/pull/12437): Add ability to remove a member from org - -### Bug Fixes - -1. [12302](https://github.com/influxdata/influxdb/pull/12302): Prevent clipping of code snippets in Firefox -1. [12379](https://github.com/influxdata/influxdb/pull/12379): Prevent clipping of cell edit menus in dashboards - -### UI Improvements - -1. [12302](https://github.com/influxdata/influxdb/pull/12302): Make code snippet copy functionality easier to use -1. [12304](https://github.com/influxdata/influxdb/pull/12304): Always show live preview in Note Cell editor -1. [12317](https://github.com/influxdata/influxdb/pull/12317): Redesign Create Scraper workflow -1. [12317](https://github.com/influxdata/influxdb/pull/12317): Show warning in Telegrafs and Scrapers lists when user has no buckets -1. [12384](https://github.com/influxdata/influxdb/pull/12384): Streamline label addition, removal, and creation from the dashboards list -1. [12464](https://github.com/influxdata/influxdb/pull/12464): Improve label color selection - -## v2.0.0-alpha.4 [2019-02-21] - -### Features - -1. [11954](https://github.com/influxdata/influxdb/pull/11954): Add the ability to run a task manually from tasks page -1. [11990](https://github.com/influxdata/influxdb/pull/11990): Add the ability to select a custom time range in explorer and dashboard -1. [12009](https://github.com/influxdata/influxdb/pull/12009): Display the version information on the login page -1. [12011](https://github.com/influxdata/influxdb/pull/12011): Add the ability to update a Variable's name and query. -1. [12026](https://github.com/influxdata/influxdb/pull/12026): Add labels to cloned dashboard -1. [12018](https://github.com/influxdata/influxdb/pull/12057): Add ability filter resources by label name -1. [11973](https://github.com/influxdata/influxdb/pull/11973): Add ability to create or add labels to a resource from labels editor - -### Bug Fixes - -1. [11997](https://github.com/influxdata/influxdb/pull/11997): Update the bucket retention policy to update the time in seconds - -### UI Improvements - -1. [12016](https://github.com/influxdata/influxdb/pull/12016): Update the preview in the label overlays to be shorter -1. [12012](https://github.com/influxdata/influxdb/pull/12012): Add notifications to scrapers page for created/deleted/updated scrapers -1. [12023](https://github.com/influxdata/influxdb/pull/12023): Add notifications to buckets page for created/deleted/updated buckets -1. [12072](https://github.com/influxdata/influxdb/pull/12072): Update the admin page to display error for password length - -## v2.0.0-alpha.3 [2019-02-15] - -### Features - -1. [11809](https://github.com/influxdata/influxdb/pull/11809): Add the ability to name a scraper target -1. [11821](https://github.com/influxdata/influxdb/pull/11821): Display scraper name as the first and only updatable column in scrapers list -1. [11804](https://github.com/influxdata/influxdb/pull/11804): Add the ability to view runs for a task -1. [11824](https://github.com/influxdata/influxdb/pull/11824): Display last completed run for tasks list -1. [11836](https://github.com/influxdata/influxdb/pull/11836): Add the ability to view the logs for a specific task run - -### Bug Fixes - -1. [11819](https://github.com/influxdata/influxdb/pull/11819): Update the inline edit for resource names to guard for empty strings -1. [11852](https://github.com/influxdata/influxdb/pull/11852): Prevent a new template dashboard from being created on every telegraf config update -1. [11848](https://github.com/influxdata/influxdb/pull/11848): Fix overlapping buttons in the telegrafs verify data step - -### UI Improvements - -1. [11764](https://github.com/influxdata/influxdb/pull/11764): Move the download telegraf config button to view config overlay -1. [11879](https://github.com/influxdata/influxdb/pull/11879): Combine permissions for user by type -1. [11938](https://github.com/influxdata/influxdb/pull/11938): Add ordering to UI list items - -## v2.0.0-alpha.2 [2019-02-07] - -### Features - -1. [11677](https://github.com/influxdata/influxdb/pull/11677): Add instructions button to view `$INFLUX_TOKEN` setup for telegraf configs -1. [11693](https://github.com/influxdata/influxdb/pull/11693): Save the \$INFLUX_TOKEN environmental variable in telegraf configs -1. [11700](https://github.com/influxdata/influxdb/pull/11700): Update Tasks tab on Org page to look like Tasks Page -1. [11740](https://github.com/influxdata/influxdb/pull/11740): Add view button to view the telegraf config toml -1. [11522](https://github.com/influxdata/influxdb/pull/11522): Add plugin information step to allow for config naming and configure one plugin at a time -1. [11758](https://github.com/influxdata/influxdb/pull/11758): Update Dashboards tab on Org page to look like Dashboards Page -1. [11810](https://github.com/influxdata/influxdb/pull/11810): Add tab for template variables under organizations page - -## Bug Fixes - -1. [11678](https://github.com/influxdata/influxdb/pull/11678): Update the System Telegraf Plugin bundle to include the swap plugin -1. [11722](https://github.com/influxdata/influxdb/pull/11722): Revert behavior allowing users to create authorizations on behalf of another user - -### UI Improvements - -1. [11683](https://github.com/influxdata/influxdb/pull/11683): Change the wording for the plugin config form button to Done -1. [11689](https://github.com/influxdata/influxdb/pull/11689): Change the wording for the Collectors configure step button to Create and Verify -1. [11697](https://github.com/influxdata/influxdb/pull/11697): Standardize page loading spinner styles -1. [11711](https://github.com/influxdata/influxdb/pull/11711): Show checkbox on Save As button in data explorer -1. [11705](https://github.com/influxdata/influxdb/pull/11705): Make collectors plugins side bar visible in only the configure step -1. [11745](https://github.com/influxdata/influxdb/pull/11745): Swap retention policies on Create bucket page - -## v2.0.0-alpha.1 [2019-01-23] - -### Release Notes - -This is the initial alpha release of InfluxDB 2.0. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 3a841b9ef3c..00000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,286 +0,0 @@ -# Contributing to InfluxDB v2 - -## How to report a bug - -Before you report an issue, please [search existing issues](https://github.com/influxdata/influxdb/issues) to check whether it's -already been reported, or perhaps even fixed. -If you choose to report an issue, please include the following in your report: - -- Full details of your operating system (or distribution)--for example, `64bit Ubuntu 18.04`. - To get your operating system details, run the following command in your terminal - and copy-paste the output into your report: - - ```sh - uname -srm - ``` -- How you installed InfluxDB. Did you use a pre-built package or did you build from source? -- The version of InfluxDB you're running. - If you installed InfluxDB using a pre-built package, run the following command in your terminal and then copy-paste the output into your report: - - ```sh - influxd version - ``` - - If you built and ran `influxd` from source, run the following command from your *influxdb* directory and then copy-paste the output into your report: - - ```sh - bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influxd version - ``` -- [Clear steps to reproduce the issue](#how-to-provide-steps-to-reproduce-an-issue) - -### How to provide steps for reproducing an issue - -The easier we can reproduce the problem, the easier we can fix it. -To learn how to write an effective bug report, we recommend reading [Simon Tatham's essay, "How to Report Bugs Effectively."](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html). - -When describing how to reproduce the issue, -please provide test cases in the form of `curl` commands--for example: - -```bash -# write data -curl -XPOST "http://localhost:8086/api/v2/write?org=YOUR_ORG&bucket=YOUR_BUCKET&precision=s" \ - --header "Authorization: Token YOURAUTHTOKEN" \ - --data-raw "mem,host=host1 used_percent=23.43234543 1556896326" - -# query data -# Bug: expected it to return no data, but data comes back. -curl http://localhost:8086/api/v2/query?org=my-org -XPOST -sS \ - -H 'Authorization: Token YOURAUTHTOKEN' \ - -H 'Accept: application/csv' \ - -H 'Content-type: application/vnd.flux' \ - -d 'from(bucket:"example-bucket") - |> range(start:-1000h) - |> group(columns:["_measurement"], mode:"by") - |> sum()' -``` - -Test cases with `influx` CLI commands are also helpful--for example: - -``` -# write data -influx write -o YOUR_ORG -b YOUR_BUCKET -p s -t YOURAUTHTOKEN \ - "mem,host=host1 used_percent=23.43234543 1556896326" - -# query data -# Bug: expected it to return no data, but data comes back. -influx query -o YOUR_ORG -t YOURAUTHTOKEN 'from(bucket:"example-bucket") - |> range(start:-1000h) - |> group(columns:["_measurement"], mode:"by") - |> sum()' -``` - -If you don't provide clear test cases like the examples above, then investigating your issue will be very difficult for us. -If you have trouble including data in your report, please zip up your data directory and include a link to it in your bug report. - -Note that issues are _not the place to file general support requests_ such as "How do I use `collectd` with InfluxDB?" -Please submit requests for help to the [InfluxData Community](https://community.influxdata.com/) - don't report them as issues in the repo. - -## How to request a feature - -We encourage you to submit feature requests as they help us prioritize our work. - -In your feature request, please include the following: -- Clear requirements and goals. -- What you would like to see added to InfluxDB. -- Examples. -- Why the feature is important to you. - -If you find your request already exists in a Github issue, -please indicate your support for the existing issue by using the "thumbs up" reaction. - -## How to submit a pull (change) request - -To submit a change for code or documentation in this repository, please [create a pull request](https://github.com/influxdata/influxdb/compare) and follow the instructions in the pull request template to help us review your PR. -After you complete the template steps and submit the PR, expect some deliberation as we review and finalize the change. -Once your PR is approved, you can merge it. - -## How to report security vulnerabilities - -InfluxData takes security and our users' trust very seriously. -If you believe you have found a security issue in any of our open source projects, please responsibly disclose it by contacting security@influxdata.com. -More details about security vulnerability reporting, including our GPG key, [can be found here](https://www.influxdata.com/how-to-report-security-vulnerabilities/). - -## Signing the CLA - -Before you contribute to InfluxDB, please sign our [Individual Contributor License Agreement (CLA)](https://influxdata.com/community/cla/). - -## How to build InfluxDB from source - -### Install Go - -InfluxDB requires Go 1.20. - -At InfluxData we find `gvm`, a Go version manager, useful for installing Go. -For instructions on how to install it see [the gvm page on github](https://github.com/moovweb/gvm). - -After installing `gvm` you can install and set the default Go version by running the following: - -```bash -$ gvm install go1.20 -$ gvm use go1.20 --default -``` - -InfluxDB requires Go module support. Set `GO111MODULE=on` or build the project outside of your `GOPATH` for it to succeed. For information about modules, please refer to the [wiki](https://github.com/golang/go/wiki/Modules). - -### Install revision control systems - -Go has the ability to import remote packages via revision control systems with the `go get` command. -To ensure that you can retrieve any remote package, install `git` and `bzr` revision control software, following the instructions for your system: - -- [Install Git](http://git-scm.com/book/en/Getting-Started-Installing-Git) -- [Install Bazaar](http://doc.bazaar.canonical.com/latest/en/user-guide/installing_bazaar.html) - -### Install additional dependencies - -In addition to `go`, `git`, and `bzr`, you will need the following prerequisites -installed on your system: - -- Rust (a recent stable version, 1.60 or higher). - To install Rust, we recommend using [rustup](https://rustup.rs/). -- `clang` -- `make` -- `pkg-config` -- `protobuf` -- Go protobuf plugin. To use Go to install the plugin, enter the following command in your terminal: - - `go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28` - -To install prerequisites, use the following example command for your system: - -- OSX: `brew install pkg-config protobuf` - - For OSX, you must have [HomeBrew](https://brew.sh) installed. - - You will also need the [Developer Tools](https://webkit.org/build-tools/), which includes `make`. -- Linux (Arch): `pacman -S clang make pkgconf protobuf` -- Linux (Ubuntu): `sudo apt install make clang pkg-config protobuf-compiler libprotobuf-dev build-essential` -- Linux (RHEL): see the [RedHat-specific instructions](#redhat-specific-instructions). - -#### RedHat-specific instructions - -For RedHat, you must enable the [EPEL](https://fedoraproject.org/wiki/EPEL) - -### Build influxd with make - -`influxd` is the InfluxDB service. - -For `influx`, the InfluxDB CLI tool, see the [influx-cli repository on Github](https://github.com/influxdata/influx-cli). - -Once you've installed the dependencies, -follow these steps to build `influxd` from source and start the service: - -1. Clone this repo (influxdb). -2. In your influxdb directory, run `make` to generate the influxd binary: - - ```sh - make - ``` - - If successful, `make` installs the binary to a platform-specific path for your system. - The output is the following: - - ```sh - env GO111MODULE=on go build -tags 'assets ' -o bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influxd ./cmd/influxd - ``` - -3. To start the `influxd` service that runs InfluxDB, enter the following command - to run the platform-specific binary: - - ``` - bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influxd - ``` - - `influxd` logs to `stdout` by default. - -**Troubleshooting** - -- If you've changed Go or Rust versions and have trouble building, try running `go clean -r -x -cache -testcache -modcache ./` to clear out old build artifacts that may be incompatible. - -### Run tests - -This project is built from various languages. -To run tests for all languages and components, enter the following command in -your terminal: - -```bash -make test -``` - -To run tests for only Go and Rust components, enter the following command in your terminal: - -```bash -make test-go -``` - -## Generated Google Protobuf code - -Most changes to the source don't require changes to the generated protocol buffer code. -If you need to modify the protocol buffer code, you'll first need to install the protocol buffers toolchain. - -First install the [protocol buffer compiler](https://developers.google.com/protocol-buffers/) 3.17.3 or later for your OS. - -Then run `go generate` after updating any `*.proto` file: - -```bash -go generate ./... -``` - -**How to troubleshoot protobuf** - -If generating the protobuf code is failing for you, check each of the following: - -- Ensure the protobuf library can be found. Make sure that `LD_LIBRARY_PATH` includes the directory in which the library `libprotoc.so` has been installed. -- Ensure the command `protoc-gen-go`, found in `GOPATH/bin`, is on your path. This can be done by adding `GOPATH/bin` to `PATH`. - -## Generated Go Templates - -The query engine requires optimized data structures for each data type so instead of writing each implementation several times we use templates. -_Do not change code that ends in a `.gen.go` extension!_ -Instead you must edit the `.gen.go.tmpl` file that was used to generate it. - -Once you've edited the template file, you'll need the [`tmpl`][tmpl] utility to generate the code: - -```sh -$ go get github.com/benbjohnson/tmpl -``` - -Then you can regenerate all templates in the project: - -```sh -$ go generate ./... -``` - -[tmpl]: https://github.com/benbjohnson/tmpl - -## Profiling - -When troubleshooting problems with CPU or memory the Go toolchain can be helpful. -You can start InfluxDB with CPU and memory profiling turned on. -For example: - -```bash -# start influx with profiling - -$ ./influxd -cpuprofile influxdcpu.prof -memprof influxdmem.prof - -# run queries, writes, whatever you're testing -# Quit out of influxd and influxd.prof will then be written. -# open up pprof to examine the profiling data. - -$ go tool pprof ./influxd influxd.prof - -# once inside run "web", opens up browser with the CPU graph -# can also run "web " to zoom in. Or "list " to see specific lines -``` - -Note that when you pass the binary to `go tool pprof` _you must specify the path to the binary_. - -If you are profiling benchmarks built with the `testing` package, you may wish -to use the [`github.com/pkg/profile`](github.com/pkg/profile) package to limit -the code being profiled: - -```go -func BenchmarkSomething(b *testing.B) { - // do something intensive like fill database with data... - defer profile.Start(profile.ProfilePath("/tmp"), profile.MemProfile).Stop() - // do something that you want to profile... -} -``` diff --git a/FUZZ.md b/FUZZ.md deleted file mode 100644 index e9380115797..00000000000 --- a/FUZZ.md +++ /dev/null @@ -1,20 +0,0 @@ -# Fuzzing InfluxDB - -## Local fuzzing - -For local fuzzing, install [go-fuzz](https://github.com/dvyukov/go-fuzz): - -``` -$ go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build -``` - -For writing fuzz tests, see the [go-fuzz README](https://github.com/dvyukov/go-fuzz). - -Below is an example of building and running a fuzz test. -In this case, the test is located at `./jsonweb/fuzz.go`. - -``` -$ cd go/src/github.com/influxdata/influxdb/jsonweb -$ go-fuzz-build github.com/influxdata/influxdb/v2/jsonweb -$ go-fuzz -bin jsonweb-fuzz.zip -``` diff --git a/GNUmakefile b/GNUmakefile deleted file mode 100644 index 01a138e7101..00000000000 --- a/GNUmakefile +++ /dev/null @@ -1,194 +0,0 @@ -# Top level Makefile for the entire project -# -# This Makefile encodes the "go generate" prerequisites ensuring that the proper tooling is installed and -# that the generate steps are executed when their prerequisite files change. -# -# This Makefile follows a few conventions: -# -# * All cmds must be added to this top level Makefile. -# * All binaries are placed in ./bin, its recommended to add this directory to your PATH. -# -export GOPATH=$(shell go env GOPATH) -export GOOS=$(shell go env GOOS) -export GOARCH=$(shell go env GOARCH) - -ifneq (,$(filter $(GOARCH),amd64 s390x)) - # Including the assets tag requires the UI to be built for compilation to succeed. - # Don't force it for running tests. - GO_TEST_TAGS := - GO_BUILD_TAGS := assets -else - # noasm needed to avoid a panic in Flux for non-amd64, non-s390x. - GO_TEST_TAGS := noasm - GO_BUILD_TAGS := assets,noasm -endif - -# Tags used for builds and tests on all architectures -COMMON_TAGS := sqlite_foreign_keys,sqlite_json - -GO_TEST_ARGS := -tags '$(COMMON_TAGS),$(GO_TEST_TAGS)' -GO_BUILD_ARGS := -tags '$(COMMON_TAGS),$(GO_BUILD_TAGS)' - -# Use default flags, but allow adding -gcflags "..." if desired. Eg, for debug -# builds, may want to use GCFLAGS="all=-N -l" in the build environment. -GCFLAGS ?= -ifneq ($(GCFLAGS),) -GO_BUILD_ARGS += -gcflags "$(GCFLAGS)" -endif - -ifeq ($(OS), Windows_NT) - VERSION := $(shell git describe --exact-match --tags 2>nil) -else - VERSION := $(shell git describe --exact-match --tags 2>/dev/null) -endif -COMMIT := $(shell git rev-parse --short HEAD) - -LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -ifdef VERSION - LDFLAGS += -X main.version=$(VERSION) -endif - -# Allow for `go test` to be swapped out by other tooling, i.e. `gotestsum` -GO_TEST_CMD=go test -# Allow for a subset of tests to be specified. -GO_TEST_PATHS=./... - -# Test vars can be used by all recursive Makefiles -export PKG_CONFIG:=$(PWD)/scripts/pkg-config.sh -export GO_BUILD=env GO111MODULE=on go build $(GO_BUILD_ARGS) -ldflags "$(LDFLAGS)" -export GO_INSTALL=env GO111MODULE=on go install $(GO_BUILD_ARGS) -ldflags "$(LDFLAGS)" -export GO_TEST=env GOTRACEBACK=all GO111MODULE=on $(GO_TEST_CMD) $(GO_TEST_ARGS) -# Do not add GO111MODULE=on to the call to go generate so it doesn't pollute the environment. -export GO_GENERATE=go generate $(GO_BUILD_ARGS) -export GO_VET=env GO111MODULE=on go vet $(GO_TEST_ARGS) -export GO_RUN=env GO111MODULE=on go run $(GO_BUILD_ARGS) -export PATH := $(PWD)/bin/$(GOOS):$(PATH) - - -# All go source files -SOURCES := $(shell find . -name '*.go' -not -name '*_test.go') go.mod go.sum - -# All go source files excluding the vendored sources. -SOURCES_NO_VENDOR := $(shell find . -path ./vendor -prune -o -name "*.go" -not -name '*_test.go' -print) - -# List of binary cmds to build -CMDS := \ - bin/$(GOOS)/influxd - -all: generate $(CMDS) - -# -# Define targets for commands -# -bin/$(GOOS)/influxd: $(SOURCES) - $(GO_BUILD) -o $@ ./cmd/$(shell basename "$@") - -influxd: bin/$(GOOS)/influxd - -static/data/build: scripts/fetch-ui-assets.sh - ./scripts/fetch-ui-assets.sh - -static/data/swagger.json: scripts/fetch-swagger.sh - ./scripts/fetch-swagger.sh - -# static/static_gen.go is the output of go-bindata, embedding all assets used by the UI. -static/static_gen.go: static/data/build static/data/swagger.json - $(GO_GENERATE) ./static - -# -# Define action only targets -# - -fmt: $(SOURCES_NO_VENDOR) - ./etc/fmt.sh - -checkfmt: - ./etc/checkfmt.sh - $(GO_RUN) github.com/editorconfig-checker/editorconfig-checker/cmd/editorconfig-checker - -tidy: - GO111MODULE=on go mod tidy - -checktidy: - ./etc/checktidy.sh - -checkgenerate: - ./etc/checkgenerate.sh - -checksqlmigrations: - ./etc/check-sql-migrations.sh - -# generate-web-assets outputs all the files needed to link the UI to the back-end. -# Currently, none of these files are tracked by git. -generate-web-assets: static/static_gen.go - -# generate-sources outputs all the Go files generated from protobufs, tmpls, and other tooling. -# These files are tracked by git; CI will enforce that they are up-to-date. -generate-sources: protoc tmpl stringer goimports - $(GO_GENERATE) ./influxql/... ./models/... ./pkg/... ./storage/... ./tsdb/... ./v1/... - -generate: generate-web-assets generate-sources - -protoc: - $(GO_INSTALL) google.golang.org/protobuf/cmd/protoc-gen-go@v1.27.1 - -tmpl: - $(GO_INSTALL) github.com/benbjohnson/tmpl - -stringer: - $(GO_INSTALL) golang.org/x/tools/cmd/stringer - -goimports: - $(GO_INSTALL) golang.org/x/tools/cmd/goimports - -test-go: - $(GO_TEST) $(GO_TEST_PATHS) - -test-flux: - @./etc/test-flux.sh - -test-tls: - @./etc/test-tls.sh - -test-integration: GO_TAGS=integration -test-integration: - $(GO_TEST) -count=1 $(GO_TEST_PATHS) - -test: test-go - -test-go-race: - $(GO_TEST) -v -race -count=1 $(GO_TEST_PATHS) - -vet: - $(GO_VET) -v ./... - -bench: - $(GO_TEST) -bench=. -run=^$$ ./... - -build: all - -pkg-config: - $(GO_INSTALL) github.com/influxdata/pkg-config - -clean: - $(RM) -r static/static_gen.go static/data - $(RM) -r bin - $(RM) -r dist - -# generate feature flags -flags: - $(GO_GENERATE) ./kit/feature - -docker-image-influx: - @cp .gitignore .dockerignore - @docker image build -t influxdb:dev --target influx . - -dshell-image: - @cp .gitignore .dockerignore - @docker image build --build-arg "USERID=$(shell id -u)" -t influxdb:dshell --target dshell . - -dshell: dshell-image - @docker container run --rm -p 8086:8086 -p 8080:8080 -u $(shell id -u) -it -v $(shell pwd):/code -w /code influxdb:dshell - -# .PHONY targets represent actions that do not create an actual file. -.PHONY: all $(SUBDIRS) run fmt checkfmt tidy checktidy checkgenerate test test-go test-go-race test-tls bench clean node_modules vet nightly dist protoc influxd libflux flags dshell dclean docker-image-flux docker-image-influx pkg-config diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 2517ee1da6e..00000000000 --- a/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2018 InfluxData - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/README.md b/README.md deleted file mode 100644 index da6a17c7f87..00000000000 --- a/README.md +++ /dev/null @@ -1,220 +0,0 @@ -# InfluxDB -
- InfluxDB Logo -
- -

- - CircleCI - - - - Slack Status - - - - Docker Pulls - - - - Docker Pulls - -

-

- Website - • - Documentation - • - InfluxDB University - • - Blog -

- ---- - -InfluxDB is an open source time series platform. This includes APIs for storing and querying data, processing it in the background for ETL or monitoring and alerting purposes, user dashboards, and visualizing and exploring the data and more. The master branch on this repo now represents the latest InfluxDB, which now includes functionality for Kapacitor (background processing) and Chronograf (the UI) all in a single binary. - -The list of InfluxDB Client Libraries that are compatible with the latest version can be found in [our documentation](https://docs.influxdata.com/influxdb/latest/tools/client-libraries/). - -If you are looking for the 1.x line of releases, there are branches for each minor version as well as a `master-1.x` branch that will contain the code for the next 1.x release. The master-1.x [working branch is here](https://github.com/influxdata/influxdb/tree/master-1.x). The [InfluxDB 1.x Go Client can be found here](https://github.com/influxdata/influxdb1-client). - -| Try **InfluxDB Cloud** for free and get started fast with no local setup required. Click [**here**](https://cloud2.influxdata.com/signup) to start building your application on InfluxDB Cloud. | -|:------| - -## Install - -We have nightly and versioned Docker images, Debian packages, RPM packages, and tarballs of InfluxDB available at the [InfluxData downloads page](https://portal.influxdata.com/downloads/). We also provide the `influx` command line interface (CLI) client as a separate binary available at the same location. - -If you are interested in building from source, see the [building from source](CONTRIBUTING.md#building-from-source) guide for contributors. - - - - - -## Get Started - -For a complete getting started guide, please see our full [online documentation site](https://docs.influxdata.com/influxdb/latest/). - -To write and query data or use the API in any way, you'll need to first create a user, credentials, organization and bucket. -Everything in InfluxDB is organized under a concept of an organization. The API is designed to be multi-tenant. -Buckets represent where you store time series data. -They're synonymous with what was previously in InfluxDB 1.x a database and retention policy. - -The simplest way to get set up is to point your browser to [http://localhost:8086](http://localhost:8086) and go through the prompts. - -You can also get set up from the CLI using the command `influx setup`: - - -```bash -$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx setup -Welcome to InfluxDB 2.0! -Please type your primary username: marty - -Please type your password: - -Please type your password again: - -Please type your primary organization name.: InfluxData - -Please type your primary bucket name.: telegraf - -Please type your retention period in hours. -Or press ENTER for infinite.: 72 - - -You have entered: - Username: marty - Organization: InfluxData - Bucket: telegraf - Retention Period: 72 hrs -Confirm? (y/n): y - -UserID Username Organization Bucket -033a3f2c5ccaa000 marty InfluxData Telegraf -Your token has been stored in /Users/marty/.influxdbv2/credentials -``` - -You can run this command non-interactively using the `-f, --force` flag if you are automating the setup. -Some added flags can help: -```bash -$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx setup \ ---username marty \ ---password F1uxKapacit0r85 \ ---org InfluxData \ ---bucket telegraf \ ---retention 168 \ ---token where-were-going-we-dont-need-roads \ ---force -``` - -Once setup is complete, a configuration profile is created to allow you to interact with your local InfluxDB without passing in credentials each time. You can list and manage those profiles using the `influx config` command. -```bash -$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx config -Active Name URL Org -* default http://localhost:8086 InfluxData -``` - -## Write Data -Write to measurement `m`, with tag `v=2`, in bucket `telegraf`, which belongs to organization `InfluxData`: - -```bash -$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx write --bucket telegraf --precision s "m v=2 $(date +%s)" -``` - -Since you have a default profile set up, you can omit the Organization and Token from the command. - -Write the same point using `curl`: - -```bash -curl --header "Authorization: Token $(bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx auth list --json | jq -r '.[0].token')" \ ---data-raw "m v=2 $(date +%s)" \ -"http://localhost:8086/api/v2/write?org=InfluxData&bucket=telegraf&precision=s" -``` - -Read that back with a simple Flux query: - -```bash -$ bin/$(uname -s | tr '[:upper:]' '[:lower:]')/influx query 'from(bucket:"telegraf") |> range(start:-1h)' -Result: _result -Table: keys: [_start, _stop, _field, _measurement] - _start:time _stop:time _field:string _measurement:string _time:time _value:float ------------------------------- ------------------------------ ---------------------- ---------------------- ------------------------------ ---------------------------- -2019-12-30T22:19:39.043918000Z 2019-12-30T23:19:39.043918000Z v m 2019-12-30T23:17:02.000000000Z 2 -``` - -Use the `-r, --raw` option to return the raw flux response from the query. This is useful for moving data from one instance to another as the `influx write` command can accept the Flux response using the `--format csv` option. - -## Script with Flux - -Flux (previously named IFQL) is an open source functional data scripting language designed for querying, analyzing, and acting on data. Flux supports multiple data source types, including: - -- Time series databases (such as InfluxDB) -- Relational SQL databases (such as MySQL and PostgreSQL) -- CSV - -The source for Flux is [available on GitHub](https://github.com/influxdata/flux). -To learn more about Flux, see the latest [InfluxData Flux documentation](https://docs.influxdata.com/flux/) and [CTO Paul Dix's presentation](https://speakerdeck.com/pauldix/flux-number-fluxlang-a-new-time-series-data-scripting-language). - -## Contribute to the Project - -InfluxDB is an [MIT licensed](LICENSE) open source project and we love our community. The fastest way to get something fixed is to open a PR. Check out our [contributing](CONTRIBUTING.md) guide if you're interested in helping out. Also, join us on our [Community Slack Workspace](https://influxdata.com/slack) if you have questions or comments for our engineering teams. - -## CI and Static Analysis - -### CI - -All pull requests will run through CI, which is currently hosted by Circle. -Community contributors should be able to see the outcome of this process by looking at the checks on their PR. -Please fix any issues to ensure a prompt review from members of the team. - -The InfluxDB project is used internally in a number of proprietary InfluxData products, and as such, PRs and changes need to be tested internally. -This can take some time, and is not really visible to community contributors. - -### Static Analysis - -This project uses the following static analysis tools. -Failure during the running of any of these tools results in a failed build. -Generally, code must be adjusted to satisfy these tools, though there are exceptions. - -- [go vet](https://golang.org/cmd/vet/) checks for Go code that should be considered incorrect. -- [go fmt](https://golang.org/cmd/gofmt/) checks that Go code is correctly formatted. -- [go mod tidy](https://tip.golang.org/cmd/go/#hdr-Add_missing_and_remove_unused_modules) ensures that the source code and go.mod agree. -- [staticcheck](https://staticcheck.io/docs/) checks for things like: unused code, code that can be simplified, code that is incorrect and code that will have performance issues. - -### staticcheck - -If your PR fails `staticcheck` it is easy to dig into why it failed, and also to fix the problem. -First, take a look at the error message in Circle under the `staticcheck` build section, e.g., - -``` -tsdb/tsm1/encoding.gen.go:1445:24: func BooleanValues.assertOrdered is unused (U1000) -tsdb/tsm1/encoding.go:172:7: receiver name should not be an underscore, omit the name if it is unused (ST1006) -``` - -Next, go and take a [look here](http://next.staticcheck.io/docs/checks) for some clarification on the error code that you have received, e.g., `U1000`. -The docs will tell you what's wrong, and often what you need to do to fix the issue. - -#### Generated Code - -Sometimes generated code will contain unused code or occasionally that will fail a different check. -`staticcheck` allows for [entire files](http://next.staticcheck.io/docs/#ignoring-problems) to be ignored, though it's not ideal. -A linter directive, in the form of a comment, must be placed within the generated file. -This is problematic because it will be erased if the file is re-generated. -Until a better solution comes about, below is the list of generated files that need an ignores comment. -If you re-generate a file and find that `staticcheck` has failed, please see this list below for what you need to put back: - -| File | Comment | -| :--------------------: | :--------------------------------------------------------------: | -| query/promql/promql.go | //lint:file-ignore SA6001 Ignore all unused code, it's generated | - -#### End-to-End Tests - -CI also runs end-to-end tests. These test the integration between the `influxd` server the UI. -Since the UI is used by interal repositories as well as the `influxdb` repository, the -end-to-end tests cannot be run on forked pull requests or run locally. The extent of end-to-end -testing required for forked pull requests will be determined as part of the review process. - -## Additional Resources -- [InfluxDB Tips and Tutorials](https://www.influxdata.com/blog/category/tech/influxdb/) -- [InfluxDB Essentials Course](https://university.influxdata.com/courses/influxdb-essentials-tutorial/) -- [Exploring InfluxDB Cloud Course](https://university.influxdata.com/courses/exploring-influxdb-cloud-tutorial/) \ No newline at end of file diff --git a/SECURITY.md b/SECURITY.md deleted file mode 100644 index 7894b690c7a..00000000000 --- a/SECURITY.md +++ /dev/null @@ -1,16 +0,0 @@ -# Security Policy - -## Supported Versions - -These versions of InfluxDB are currently being supported with security updates. - -| Version | Supported | -| ------- | ------------------ | -| 2.0.x | :white_check_mark: | -| 1.8.x | :white_check_mark: | -| 1.7.x | :white_check_mark: | -| < 1.7.x | :x: | - -## Reporting a Vulnerability - -InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our open source projects, please responsibly disclose it by contacting security@influxdata.com. More details about security vulnerability reporting, including our GPG key, can be found at https://www.influxdata.com/how-to-report-security-vulnerabilities/. diff --git a/annotation.go b/annotation.go deleted file mode 100644 index 7e969870580..00000000000 --- a/annotation.go +++ /dev/null @@ -1,483 +0,0 @@ -package influxdb - -import ( - "context" - "database/sql/driver" - "encoding/json" - "fmt" - "regexp" - "strings" - "time" - "unicode/utf8" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - errEmptySummary = &errors.Error{ - Code: errors.EInvalid, - Msg: "summary cannot be empty", - } - errSummaryTooLong = &errors.Error{ - Code: errors.EInvalid, - Msg: "summary must be less than 255 characters", - } - errStreamTagTooLong = &errors.Error{ - Code: errors.EInvalid, - Msg: "stream tag must be less than 255 characters", - } - errStreamNameTooLong = &errors.Error{ - Code: errors.EInvalid, - Msg: "stream name must be less than 255 characters", - } - errStreamDescTooLong = &errors.Error{ - Code: errors.EInvalid, - Msg: "stream description must be less than 1024 characters", - } - errStickerTooLong = &errors.Error{ - Code: errors.EInvalid, - Msg: "stickers must be less than 255 characters", - } - errMsgTooLong = &errors.Error{ - Code: errors.EInvalid, - Msg: "message must be less than 4096 characters", - } - errReversedTimes = &errors.Error{ - Code: errors.EInvalid, - Msg: "start time must come before end time", - } - errMissingStreamName = &errors.Error{ - Code: errors.EInvalid, - Msg: "stream name must be set", - } - errMissingStreamTagOrId = &errors.Error{ - Code: errors.EInvalid, - Msg: "stream tag or id must be set", - } - errMissingEndTime = &errors.Error{ - Code: errors.EInvalid, - Msg: "end time must be set", - } - errMissingStartTime = &errors.Error{ - Code: errors.EInvalid, - Msg: "start time must be set", - } -) - -func invalidStickerError(s string) error { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("invalid sticker: %q", s), - } -} - -func stickerSliceToMap(stickers []string) (map[string]string, error) { - stickerMap := map[string]string{} - - for i := range stickers { - if stick0, stick1, found := strings.Cut(stickers[i], "="); found { - stickerMap[stick0] = stick1 - } else { - return nil, invalidStickerError(stickers[i]) - } - } - - return stickerMap, nil -} - -// AnnotationService is the service contract for Annotations -type AnnotationService interface { - // CreateAnnotations creates annotations. - CreateAnnotations(ctx context.Context, orgID platform.ID, create []AnnotationCreate) ([]AnnotationEvent, error) - // ListAnnotations lists all annotations matching the filter. - ListAnnotations(ctx context.Context, orgID platform.ID, filter AnnotationListFilter) ([]StoredAnnotation, error) - // GetAnnotation gets an annotation by id. - GetAnnotation(ctx context.Context, id platform.ID) (*StoredAnnotation, error) - // DeleteAnnotations deletes annotations matching the filter. - DeleteAnnotations(ctx context.Context, orgID platform.ID, delete AnnotationDeleteFilter) error - // DeleteAnnotation deletes an annotation by id. - DeleteAnnotation(ctx context.Context, id platform.ID) error - // UpdateAnnotation updates an annotation. - UpdateAnnotation(ctx context.Context, id platform.ID, update AnnotationCreate) (*AnnotationEvent, error) - - // ListStreams lists all streams matching the filter. - ListStreams(ctx context.Context, orgID platform.ID, filter StreamListFilter) ([]StoredStream, error) - // CreateOrUpdateStream creates or updates the matching stream by name. - CreateOrUpdateStream(ctx context.Context, orgID platform.ID, stream Stream) (*ReadStream, error) - // GetStream gets a stream by id. Currently this is only used for authorization, and there are no - // API routes for getting a single stream by ID. - GetStream(ctx context.Context, id platform.ID) (*StoredStream, error) - // UpdateStream updates the stream by the ID. - UpdateStream(ctx context.Context, id platform.ID, stream Stream) (*ReadStream, error) - // DeleteStreams deletes one or more streams by name. - DeleteStreams(ctx context.Context, orgID platform.ID, delete BasicStream) error - // DeleteStreamByID deletes the stream metadata by id. - DeleteStreamByID(ctx context.Context, id platform.ID) error -} - -// AnnotationEvent contains fields for annotating an event. -type AnnotationEvent struct { - ID platform.ID `json:"id,omitempty"` // ID is the annotation ID. - AnnotationCreate // AnnotationCreate defines the common input/output bits of an annotation. -} - -// AnnotationCreate contains user providable fields for annotating an event. -type AnnotationCreate struct { - StreamTag string `json:"stream,omitempty"` // StreamTag provides a means to logically group a set of annotated events. - Summary string `json:"summary"` // Summary is the only field required to annotate an event. - Message string `json:"message,omitempty"` // Message provides more details about the event being annotated. - Stickers AnnotationStickers `json:"stickers,omitempty"` // Stickers are like tags, but named something obscure to differentiate them from influx tags. They are there to differentiate an annotated event. - EndTime *time.Time `json:"endTime,omitempty"` // EndTime is the time of the event being annotated. Defaults to now if not set. - StartTime *time.Time `json:"startTime,omitempty"` // StartTime is the start time of the event being annotated. Defaults to EndTime if not set. -} - -// StoredAnnotation represents annotation data to be stored in the database. -type StoredAnnotation struct { - ID platform.ID `db:"id"` // ID is the annotation's id. - OrgID platform.ID `db:"org_id"` // OrgID is the annotations's owning organization. - StreamID platform.ID `db:"stream_id"` // StreamID is the id of a stream. - StreamTag string `db:"stream"` // StreamTag is the name of a stream (when selecting with join of streams). - Summary string `db:"summary"` // Summary is the summary of the annotated event. - Message string `db:"message"` // Message is a longer description of the annotated event. - Stickers AnnotationStickers `db:"stickers"` // Stickers are additional labels to group annotations by. - Duration string `db:"duration"` // Duration is the time range (with zone) of an annotated event. - Lower string `db:"lower"` // Lower is the time an annotated event begins. - Upper string `db:"upper"` // Upper is the time an annotated event ends. -} - -// ToCreate is a utility method for converting a StoredAnnotation to an AnnotationCreate type -func (s StoredAnnotation) ToCreate() (*AnnotationCreate, error) { - et, err := time.Parse(time.RFC3339Nano, s.Upper) - if err != nil { - return nil, err - } - - st, err := time.Parse(time.RFC3339Nano, s.Lower) - if err != nil { - return nil, err - } - - return &AnnotationCreate{ - StreamTag: s.StreamTag, - Summary: s.Summary, - Message: s.Message, - Stickers: s.Stickers, - EndTime: &et, - StartTime: &st, - }, nil -} - -// ToEvent is a utility method for converting a StoredAnnotation to an AnnotationEvent type -func (s StoredAnnotation) ToEvent() (*AnnotationEvent, error) { - c, err := s.ToCreate() - if err != nil { - return nil, err - } - - return &AnnotationEvent{ - ID: s.ID, - AnnotationCreate: *c, - }, nil -} - -type AnnotationStickers map[string]string - -// Value implements the database/sql Valuer interface for adding AnnotationStickers to the database -// Stickers are stored in the database as a slice of strings like "[key=val]" -// They are encoded into a JSON string for storing into the database, and the JSON sqlite extension is -// able to manipulate them like an object. -func (a AnnotationStickers) Value() (driver.Value, error) { - stickSlice := make([]string, 0, len(a)) - - for k, v := range a { - stickSlice = append(stickSlice, fmt.Sprintf("%s=%s", k, v)) - } - - sticks, err := json.Marshal(stickSlice) - if err != nil { - return nil, err - } - - return string(sticks), nil -} - -// Scan implements the database/sql Scanner interface for retrieving AnnotationStickers from the database -// The string is decoded into a slice of strings, which are then converted back into a map -func (a *AnnotationStickers) Scan(value interface{}) error { - vString, ok := value.(string) - if !ok { - return &errors.Error{ - Code: errors.EInternal, - Msg: "could not load stickers from sqlite", - } - } - - var stickSlice []string - if err := json.NewDecoder(strings.NewReader(vString)).Decode(&stickSlice); err != nil { - return err - } - - stickMap, err := stickerSliceToMap(stickSlice) - if err != nil { - return nil - } - - *a = stickMap - return nil -} - -// Validate validates the creation object. -func (a *AnnotationCreate) Validate(nowFunc func() time.Time) error { - switch s := utf8.RuneCountInString(a.Summary); { - case s <= 0: - return errEmptySummary - case s > 255: - return errSummaryTooLong - } - - switch t := utf8.RuneCountInString(a.StreamTag); { - case t == 0: - a.StreamTag = "default" - case t > 255: - return errStreamTagTooLong - } - - if utf8.RuneCountInString(a.Message) > 4096 { - return errMsgTooLong - } - - for k, v := range a.Stickers { - if utf8.RuneCountInString(k) > 255 || utf8.RuneCountInString(v) > 255 { - return errStickerTooLong - } - } - - now := nowFunc() - if a.EndTime == nil { - a.EndTime = &now - } - - if a.StartTime == nil { - a.StartTime = a.EndTime - } - - if a.EndTime.Before(*(a.StartTime)) { - return errReversedTimes - } - - return nil -} - -// AnnotationDeleteFilter contains fields for deleting an annotated event. -type AnnotationDeleteFilter struct { - StreamTag string `json:"stream,omitempty"` // StreamTag provides a means to logically group a set of annotated events. - StreamID platform.ID `json:"streamID,omitempty"` // StreamID provides a means to logically group a set of annotated events. - Stickers map[string]string `json:"stickers,omitempty"` // Stickers are like tags, but named something obscure to differentiate them from influx tags. They are there to differentiate an annotated event. - EndTime *time.Time `json:"endTime,omitempty"` // EndTime is the time of the event being annotated. Defaults to now if not set. - StartTime *time.Time `json:"startTime,omitempty"` // StartTime is the start time of the event being annotated. Defaults to EndTime if not set. -} - -// Validate validates the deletion object. -func (a *AnnotationDeleteFilter) Validate() error { - var errs []string - - if len(a.StreamTag) == 0 && !a.StreamID.Valid() { - errs = append(errs, errMissingStreamTagOrId.Error()) - } - - if a.EndTime == nil { - errs = append(errs, errMissingEndTime.Error()) - } - - if a.StartTime == nil { - errs = append(errs, errMissingStartTime.Error()) - } - - if len(errs) > 0 { - return &errors.Error{ - Code: errors.EInvalid, - Msg: strings.Join(errs, "; "), - } - } - - if a.EndTime.Before(*(a.StartTime)) { - return errReversedTimes - } - - return nil -} - -var dre = regexp.MustCompile(`stickers\[(.*)\]`) - -// SetStickers sets the stickers from the query parameters. -func (a *AnnotationDeleteFilter) SetStickers(vals map[string][]string) { - if a.Stickers == nil { - a.Stickers = map[string]string{} - } - - for k, v := range vals { - if ss := dre.FindStringSubmatch(k); len(ss) == 2 && len(v) > 0 { - a.Stickers[ss[1]] = v[0] - } - } -} - -// AnnotationList defines the structure of the response when listing annotations. -type AnnotationList struct { - StreamTag string `json:"stream"` - Annotations []ReadAnnotation `json:"annotations"` -} - -// ReadAnnotations allows annotations to be assigned to a stream. -type ReadAnnotations map[string][]ReadAnnotation - -// MarshalJSON allows us to marshal the annotations belonging to a stream properly. -func (s ReadAnnotations) MarshalJSON() ([]byte, error) { - annotationList := []AnnotationList{} - - for k, v := range s { - annotationList = append(annotationList, AnnotationList{ - StreamTag: k, - Annotations: v, - }) - } - - return json.Marshal(annotationList) -} - -// ReadAnnotation defines the simplest form of an annotation to be returned. Essentially, it's AnnotationEvent without stream info. -type ReadAnnotation struct { - ID platform.ID `json:"id"` // ID is the annotation's generated id. - Summary string `json:"summary"` // Summary is the only field required to annotate an event. - Message string `json:"message,omitempty"` // Message provides more details about the event being annotated. - Stickers map[string]string `json:"stickers,omitempty"` // Stickers are like tags, but named something obscure to differentiate them from influx tags. They are there to differentiate an annotated event. - EndTime string `json:"endTime"` // EndTime is the time of the event being annotated. - StartTime string `json:"startTime,omitempty"` // StartTime is the start time of the event being annotated. -} - -// AnnotationListFilter is a selection filter for listing annotations. -type AnnotationListFilter struct { - StickerIncludes AnnotationStickers `json:"stickerIncludes,omitempty"` // StickerIncludes allows the user to filter annotated events based on it's sticker. - StreamIncludes []string `json:"streamIncludes,omitempty"` // StreamIncludes allows the user to filter annotated events by stream. - BasicFilter -} - -// Validate validates the filter. -func (f *AnnotationListFilter) Validate(nowFunc func() time.Time) error { - return f.BasicFilter.Validate(nowFunc) -} - -var re = regexp.MustCompile(`stickerIncludes\[(.*)\]`) - -// SetStickerIncludes sets the stickerIncludes from the query parameters. -func (f *AnnotationListFilter) SetStickerIncludes(vals map[string][]string) { - if f.StickerIncludes == nil { - f.StickerIncludes = map[string]string{} - } - - for k, v := range vals { - if ss := re.FindStringSubmatch(k); len(ss) == 2 && len(v) > 0 { - f.StickerIncludes[ss[1]] = v[0] - } - } -} - -// StreamListFilter is a selection filter for listing streams. Streams are not considered first class resources, but depend on an annotation using them. -type StreamListFilter struct { - StreamIncludes []string `json:"streamIncludes,omitempty"` // StreamIncludes allows the user to filter streams returned. - BasicFilter -} - -// Validate validates the filter. -func (f *StreamListFilter) Validate(nowFunc func() time.Time) error { - return f.BasicFilter.Validate(nowFunc) -} - -// Stream defines the stream metadata. Used in create and update requests/responses. Delete requests will only require stream name. -type Stream struct { - Name string `json:"stream"` // Name is the name of a stream. - Description string `json:"description,omitempty"` // Description is more information about a stream. -} - -// ReadStream defines the returned stream. -type ReadStream struct { - ID platform.ID `json:"id" db:"id"` // ID is the id of a stream. - Name string `json:"stream" db:"name"` // Name is the name of a stream. - Description string `json:"description,omitempty" db:"description"` // Description is more information about a stream. - CreatedAt time.Time `json:"createdAt" db:"created_at"` // CreatedAt is a timestamp. - UpdatedAt time.Time `json:"updatedAt" db:"updated_at"` // UpdatedAt is a timestamp. -} - -// IsValid validates the stream. -func (s *Stream) Validate(strict bool) error { - switch nameChars := utf8.RuneCountInString(s.Name); { - case nameChars <= 0: - if strict { - return errMissingStreamName - } - s.Name = "default" - case nameChars > 255: - return errStreamNameTooLong - } - - if utf8.RuneCountInString(s.Description) > 1024 { - return errStreamDescTooLong - } - - return nil -} - -// StoredStream represents stream data to be stored in the metadata database. -type StoredStream struct { - ID platform.ID `db:"id"` // ID is the stream's id. - OrgID platform.ID `db:"org_id"` // OrgID is the stream's owning organization. - Name string `db:"name"` // Name is the name of a stream. - Description string `db:"description"` // Description is more information about a stream. - CreatedAt time.Time `db:"created_at"` // CreatedAt is a timestamp. - UpdatedAt time.Time `db:"updated_at"` // UpdatedAt is a timestamp. -} - -// BasicStream defines a stream by name. Used for stream deletes. -type BasicStream struct { - Names []string `json:"stream"` -} - -// IsValid validates the stream is not empty. -func (s BasicStream) IsValid() bool { - if len(s.Names) <= 0 { - return false - } - - for i := range s.Names { - if len(s.Names[i]) <= 0 { - return false - } - } - - return true -} - -// BasicFilter defines common filter options. -type BasicFilter struct { - StartTime *time.Time `json:"startTime,omitempty"` // StartTime is the time the event being annotated started. - EndTime *time.Time `json:"endTime,omitempty"` // EndTime is the time the event being annotated ended. -} - -// Validate validates the basic filter options, setting sane defaults where appropriate. -func (f *BasicFilter) Validate(nowFunc func() time.Time) error { - now := nowFunc().UTC().Truncate(time.Second) - if f.EndTime == nil || f.EndTime.IsZero() { - f.EndTime = &now - } - - if f.StartTime == nil { - f.StartTime = &time.Time{} - } - - if f.EndTime.Before(*(f.StartTime)) { - return errReversedTimes - } - - return nil -} diff --git a/annotation_test.go b/annotation_test.go deleted file mode 100644 index e8c48dafc22..00000000000 --- a/annotation_test.go +++ /dev/null @@ -1,592 +0,0 @@ -package influxdb - -import ( - "encoding/json" - "strings" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/stretchr/testify/require" -) - -var ( - testTime time.Time = time.Now() - testTime2 time.Time = testTime.Add(time.Minute) - - annID, _ = platform.IDFromString("2345678901234567") -) - -func nowFunc() time.Time { - return testTime -} - -func TestAnnotationCreate(t *testing.T) { - type tst struct { - name string - input AnnotationCreate - expected AnnotationCreate - err *errors.Error - } - - tests := []tst{ - { - name: "minimum valid create request", - input: AnnotationCreate{ - Summary: "this is a default annotation", - }, - expected: AnnotationCreate{ - StreamTag: "default", - Summary: "this is a default annotation", - EndTime: &testTime, - StartTime: &testTime, - }, - }, - { - name: "full valid create request", - input: AnnotationCreate{ - StreamTag: "other", - Summary: "this is another annotation", - Message: "This is a much longer description or message to add to the annotation summary", - Stickers: map[string]string{"product": "cloud"}, - EndTime: &testTime2, - StartTime: &testTime, - }, - expected: AnnotationCreate{ - StreamTag: "other", - Summary: "this is another annotation", - Message: "This is a much longer description or message to add to the annotation summary", - Stickers: map[string]string{"product": "cloud"}, - EndTime: &testTime2, - StartTime: &testTime, - }, - }, - { - name: "empty create request", - input: AnnotationCreate{}, - err: errEmptySummary, - }, - { - name: "end time before start create request", - input: AnnotationCreate{ - Summary: "this is a default annotation", - EndTime: &testTime, - StartTime: &testTime2, - }, - err: errReversedTimes, - }, - { - name: "default end time before start create request", - input: AnnotationCreate{ - Summary: "this is a default annotation", - StartTime: &testTime2, - }, - err: errReversedTimes, - }, - { - name: "summary too long", - input: AnnotationCreate{ - Summary: strings.Repeat("a", 256), - }, - err: errSummaryTooLong, - }, - { - name: "message too long", - input: AnnotationCreate{ - Summary: "longTom", - Message: strings.Repeat("a", 4097), - }, - err: errMsgTooLong, - }, - { - name: "stream tag too long", - input: AnnotationCreate{ - Summary: "longTom", - StreamTag: strings.Repeat("a", 256), - }, - err: errStreamTagTooLong, - }, - { - name: "sticker key too long", - input: AnnotationCreate{ - Summary: "longTom", - Stickers: map[string]string{strings.Repeat("1", 256): "val"}, - }, - err: errStickerTooLong, - }, - { - name: "sticker val too long", - input: AnnotationCreate{ - Summary: "longTom", - Stickers: map[string]string{"key": strings.Repeat("1", 256)}, - }, - err: errStickerTooLong, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - err := test.input.Validate(nowFunc) - if test.err != nil { - require.Equal(t, test.err, err) - return - } - - require.NoError(t, err) - require.Equal(t, test.expected, test.input) - }) - } -} - -func TestDeleteFilter(t *testing.T) { - type tst struct { - name string - input AnnotationDeleteFilter - expected AnnotationDeleteFilter - err *errors.Error - } - - tests := []tst{ - { - name: "minimum valid delete", - input: AnnotationDeleteFilter{ - StreamTag: "default", - EndTime: &testTime, - StartTime: &testTime, - }, - expected: AnnotationDeleteFilter{ - StreamTag: "default", - EndTime: &testTime, - StartTime: &testTime, - }, - }, - { - name: "full valid delete", - input: AnnotationDeleteFilter{ - StreamTag: "default", - Stickers: map[string]string{"product": "oss"}, - EndTime: &testTime, - StartTime: &testTime, - }, - expected: AnnotationDeleteFilter{ - StreamTag: "default", - Stickers: map[string]string{"product": "oss"}, - EndTime: &testTime, - StartTime: &testTime, - }, - }, - { - name: "missing stream tag", - input: AnnotationDeleteFilter{ - Stickers: map[string]string{"product": "oss"}, - EndTime: &testTime, - StartTime: &testTime, - }, - err: errMissingStreamTagOrId, - }, - { - name: "missing start time", - input: AnnotationDeleteFilter{ - StreamTag: "default", - Stickers: map[string]string{"product": "oss"}, - EndTime: &testTime, - }, - err: errMissingStartTime, - }, - { - name: "missing end time", - input: AnnotationDeleteFilter{ - StreamTag: "default", - Stickers: map[string]string{"product": "oss"}, - StartTime: &testTime, - }, - err: errMissingEndTime, - }, - { - name: "end time before start create request", - input: AnnotationDeleteFilter{ - StreamTag: "default", - Stickers: map[string]string{"product": "oss"}, - EndTime: &testTime, - StartTime: &testTime2, - }, - err: errReversedTimes, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - err := test.input.Validate() - if test.err != nil { - require.Equal(t, test.err, err) - return - } - - require.NoError(t, err) - require.Equal(t, test.expected, test.input) - }) - } -} - -func TestAnnotationListFilter(t *testing.T) { - type tst struct { - name string - input AnnotationListFilter - expected AnnotationListFilter - checkValue bool - err *errors.Error - } - - tests := []tst{ - { - name: "minimum valid", - input: AnnotationListFilter{ - BasicFilter: BasicFilter{ - EndTime: &testTime, - StartTime: &testTime, - }, - }, - expected: AnnotationListFilter{ - BasicFilter: BasicFilter{ - EndTime: &testTime, - StartTime: &testTime, - }, - }, - }, - { - name: "empty valid", - input: AnnotationListFilter{}, - expected: AnnotationListFilter{ - BasicFilter: BasicFilter{ - EndTime: &testTime, - StartTime: &testTime, - }, - }, - checkValue: true, - }, - { - name: "invalid due to reversed times", - input: AnnotationListFilter{ - BasicFilter: BasicFilter{ - EndTime: &testTime, - StartTime: &testTime2, - }, - }, - err: errReversedTimes, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - err := test.input.Validate(nowFunc) - if test.err != nil { - require.Equal(t, test.err, err) - return - } - - require.NoError(t, err) - if test.checkValue { - require.Equal(t, *test.expected.BasicFilter.StartTime, *test.expected.BasicFilter.EndTime) - } else { - require.Equal(t, test.expected, test.input) - } - }) - } -} - -func TestStreamListFilter(t *testing.T) { - type tst struct { - name string - input StreamListFilter - expected StreamListFilter - checkValue bool - err *errors.Error - } - - tests := []tst{ - { - name: "minimum valid", - input: StreamListFilter{ - BasicFilter: BasicFilter{ - EndTime: &testTime, - StartTime: &testTime, - }, - }, - expected: StreamListFilter{ - BasicFilter: BasicFilter{ - EndTime: &testTime, - StartTime: &testTime, - }, - }, - }, - { - name: "empty valid", - input: StreamListFilter{}, - expected: StreamListFilter{ - BasicFilter: BasicFilter{ - EndTime: &testTime, - StartTime: &testTime, - }, - }, - checkValue: true, - }, - { - name: "invalid due to reversed times", - input: StreamListFilter{ - BasicFilter: BasicFilter{ - EndTime: &testTime, - StartTime: &testTime2, - }, - }, - err: errReversedTimes, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - err := test.input.Validate(nowFunc) - if test.err != nil { - require.Equal(t, test.err, err) - return - } - - require.NoError(t, err) - if test.checkValue { - require.Equal(t, *test.expected.BasicFilter.StartTime, *test.expected.BasicFilter.EndTime) - } else { - require.Equal(t, test.expected, test.input) - } - }) - } -} - -func TestStreamIsValid(t *testing.T) { - type tst struct { - name string - input Stream - err *errors.Error - } - - tests := []tst{ - { - name: "minimum valid", - input: Stream{ - Name: "default", - }, - }, - { - name: "empty valid", - input: Stream{}, - }, - { - name: "invalid name too long", - input: Stream{ - Name: strings.Repeat("a", 512), - }, - err: errStreamNameTooLong, - }, - { - name: "invalid description too long", - input: Stream{ - Name: "longTom", - Description: strings.Repeat("a", 2048), - }, - err: errStreamDescTooLong, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - if test.err != nil { - require.Equal(t, test.err, test.input.Validate(false)) - } else { - require.NoError(t, test.input.Validate(false)) - } - }) - } -} - -func TestBasicStreamIsValid(t *testing.T) { - type tst struct { - name string - input BasicStream - expected bool - } - - tests := []tst{ - { - name: "minimum valid", - input: BasicStream{ - Names: []string{"default"}, - }, - expected: true, - }, - { - name: "invalid", - input: BasicStream{}, - expected: false, - }, - { - name: "empty name", - input: BasicStream{Names: []string{""}}, - expected: false, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - require.Equal(t, test.expected, test.input.IsValid()) - }) - } -} - -func TestMashallReadAnnotations(t *testing.T) { - ra := ReadAnnotations{ - "default": []ReadAnnotation{ - { - ID: *annID, - Summary: "this is one annotation", - Stickers: map[string]string{"env": "testing"}, - StartTime: testTime.Format(time.RFC3339Nano), - EndTime: testTime2.Format(time.RFC3339Nano), - }, - { - ID: *annID, - Summary: "this is another annotation", - Stickers: map[string]string{"env": "testing"}, - StartTime: testTime.Format(time.RFC3339Nano), - EndTime: testTime.Format(time.RFC3339Nano), - }, - }, - "testing": []ReadAnnotation{ - { - ID: *annID, - Summary: "this is yet another annotation", - Stickers: map[string]string{"env": "testing"}, - StartTime: testTime.Format(time.RFC3339Nano), - EndTime: testTime.Format(time.RFC3339Nano), - }, - }, - } - - b, err := json.Marshal(ra) - require.NoError(t, err) - require.Greater(t, len(b), 0) -} - -func TestSetStickerIncludes(t *testing.T) { - type tst struct { - name string - input map[string][]string - expected AnnotationStickers - } - - tests := []tst{ - { - name: "with stickerIncludes", - input: map[string][]string{ - "stickerIncludes[product]": {"oss"}, - "stickerIncludes[author]": {"russ"}, - "streams": {"default", "blogs"}, - }, - expected: map[string]string{ - "product": "oss", - "author": "russ", - }, - }, - { - name: "no sticker includes", - input: map[string][]string{ - "startTime": {"2021-01-13T22%3A17%3A37.953Z"}, - "endTime": {"2021-01-13T22%3A17%3A37.953Z"}, - "streams": {"default", "blogs"}, - }, - expected: map[string]string{}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - f := AnnotationListFilter{} - f.SetStickerIncludes(test.input) - require.Equal(t, test.expected, f.StickerIncludes) - }) - } -} - -func TestSetStickers(t *testing.T) { - type tst struct { - name string - input map[string][]string - expected map[string]string - } - - tests := []tst{ - { - name: "with stickers", - input: map[string][]string{ - "stickers[product]": {"oss"}, - "stickers[author]": {"russ"}, - "streams": {"default", "blogs"}, - }, - expected: map[string]string{ - "product": "oss", - "author": "russ", - }, - }, - { - name: "no stickers", - input: map[string][]string{ - "startTime": {"2021-01-13T22%3A17%3A37.953Z"}, - "endTime": {"2021-01-13T22%3A17%3A37.953Z"}, - "streams": {"default", "blogs"}, - }, - expected: map[string]string{}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - f := AnnotationDeleteFilter{} - f.SetStickers(test.input) - require.Equal(t, test.expected, f.Stickers) - }) - } -} - -func TestStickerSliceToMap(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - stickers []string - want map[string]string - wantErr error - }{ - { - "good stickers", - []string{"good1=val1", "good2=val2"}, - map[string]string{"good1": "val1", "good2": "val2"}, - nil, - }, - { - "bad stickers", - []string{"this is an invalid sticker", "shouldbe=likethis"}, - nil, - invalidStickerError("this is an invalid sticker"), - }, - { - "no stickers", - []string{}, - map[string]string{}, - nil, - }, - } - - for _, tt := range tests { - got, err := stickerSliceToMap(tt.stickers) - require.Equal(t, tt.want, got) - require.Equal(t, tt.wantErr, err) - } -} diff --git a/annotations/README.md b/annotations/README.md deleted file mode 100644 index 9644528643c..00000000000 --- a/annotations/README.md +++ /dev/null @@ -1,33 +0,0 @@ -## Annotations - -This package provides an HTTP API for interacting with both annotations and -streams independently. The HTTP handlers are located in the `transport` folder. -The code for interacting with the sqlite datastore is located in the -`service.go` file. Definitions for the basic types & interfaces associated with -annotations and streams used throughout the platform are located in the -top-level `influxdb` package, in the `annotation.go` file. - -### Anatomy - -An annotation is, at its simplest, a textual note on a range of time. The start -and stop time of that range can be the same point in time, which represents an -annotation at a single instance. Annotations can also have "stickers". -"Stickers" allow users to "tag" the annotation with further granularity for -filtering in key-value pairs. Some examples of sticker key-value pairs are: -`"product: oss"`, `"product: cloud"`, or `"service: tasks"`, but keys and values -can be any string. - -Every annotation belongs to a single "stream". A "stream" represents a logical -grouping of annotated events. Some examples of stream names are: `"incidents"`, -`"deployments"`, or `"marketing"`, but can be any string. A stream can also have -a description to further clarify what annotated events may be expected in the -stream. - -### Use - -Requested annotations may be filtered by stream name, stickers, and/or time -range. Streams may also be retrieved, in order to view their description. If a -stream is deleted, all annotations associated with that stream are deleted as -well. Every annotation that is created must have a stream associated with it - -if a stream name is not provided when creating an annotation, it will be -assigned to the default stream. diff --git a/annotations/middleware_logging.go b/annotations/middleware_logging.go deleted file mode 100644 index b1471d7d7ab..00000000000 --- a/annotations/middleware_logging.go +++ /dev/null @@ -1,168 +0,0 @@ -package annotations - -import ( - "context" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "go.uber.org/zap" -) - -func NewLoggingService(logger *zap.Logger, underlying influxdb.AnnotationService) *loggingService { - return &loggingService{ - logger: logger, - underlying: underlying, - } -} - -type loggingService struct { - logger *zap.Logger - underlying influxdb.AnnotationService -} - -var _ influxdb.AnnotationService = (*loggingService)(nil) - -func (l loggingService) CreateAnnotations(ctx context.Context, orgID platform.ID, create []influxdb.AnnotationCreate) (an []influxdb.AnnotationEvent, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to create annotations", zap.Error(err), dur) - return - } - l.logger.Debug("annotations create", dur) - }(time.Now()) - return l.underlying.CreateAnnotations(ctx, orgID, create) -} - -func (l loggingService) ListAnnotations(ctx context.Context, orgID platform.ID, filter influxdb.AnnotationListFilter) (an []influxdb.StoredAnnotation, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to find annotations", zap.Error(err), dur) - return - } - l.logger.Debug("annotations find", dur) - }(time.Now()) - return l.underlying.ListAnnotations(ctx, orgID, filter) -} - -func (l loggingService) GetAnnotation(ctx context.Context, id platform.ID) (an *influxdb.StoredAnnotation, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to find annotation by ID", zap.Error(err), dur) - return - } - l.logger.Debug("annotation find by ID", dur) - }(time.Now()) - return l.underlying.GetAnnotation(ctx, id) -} - -func (l loggingService) DeleteAnnotations(ctx context.Context, orgID platform.ID, delete influxdb.AnnotationDeleteFilter) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to delete annotations", zap.Error(err), dur) - return - } - l.logger.Debug("annotations delete", dur) - }(time.Now()) - return l.underlying.DeleteAnnotations(ctx, orgID, delete) -} - -func (l loggingService) DeleteAnnotation(ctx context.Context, id platform.ID) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to delete annotation", zap.Error(err), dur) - return - } - l.logger.Debug("annotation delete", dur) - }(time.Now()) - return l.underlying.DeleteAnnotation(ctx, id) -} - -func (l loggingService) UpdateAnnotation(ctx context.Context, id platform.ID, update influxdb.AnnotationCreate) (an *influxdb.AnnotationEvent, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to update annotation", zap.Error(err), dur) - return - } - l.logger.Debug("annotation update", dur) - }(time.Now()) - return l.underlying.UpdateAnnotation(ctx, id, update) -} - -func (l loggingService) ListStreams(ctx context.Context, orgID platform.ID, filter influxdb.StreamListFilter) (stm []influxdb.StoredStream, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to find streams", zap.Error(err), dur) - return - } - l.logger.Debug("streams find", dur) - }(time.Now()) - return l.underlying.ListStreams(ctx, orgID, filter) -} - -func (l loggingService) CreateOrUpdateStream(ctx context.Context, orgID platform.ID, stream influxdb.Stream) (stm *influxdb.ReadStream, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to create or update stream", zap.Error(err), dur) - return - } - l.logger.Debug("stream create or update", dur) - }(time.Now()) - return l.underlying.CreateOrUpdateStream(ctx, orgID, stream) -} - -func (l loggingService) UpdateStream(ctx context.Context, id platform.ID, stream influxdb.Stream) (stm *influxdb.ReadStream, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to update stream", zap.Error(err), dur) - return - } - l.logger.Debug("stream update", dur) - }(time.Now()) - return l.underlying.UpdateStream(ctx, id, stream) -} - -func (l loggingService) GetStream(ctx context.Context, id platform.ID) (stm *influxdb.StoredStream, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to find stream by ID", zap.Error(err), dur) - return - } - l.logger.Debug("stream find by ID", dur) - }(time.Now()) - return l.underlying.GetStream(ctx, id) -} - -func (l loggingService) DeleteStreams(ctx context.Context, orgID platform.ID, delete influxdb.BasicStream) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to delete streams", zap.Error(err), dur) - return - } - l.logger.Debug("streams delete", dur) - }(time.Now()) - return l.underlying.DeleteStreams(ctx, orgID, delete) -} - -func (l loggingService) DeleteStreamByID(ctx context.Context, id platform.ID) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to delete stream", zap.Error(err), dur) - return - } - l.logger.Debug("stream delete", dur) - }(time.Now()) - return l.underlying.DeleteStreamByID(ctx, id) -} diff --git a/annotations/middleware_metrics.go b/annotations/middleware_metrics.go deleted file mode 100644 index d7609348772..00000000000 --- a/annotations/middleware_metrics.go +++ /dev/null @@ -1,94 +0,0 @@ -package annotations - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/metric" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/prometheus/client_golang/prometheus" -) - -func NewMetricCollectingService(reg prometheus.Registerer, underlying influxdb.AnnotationService, opts ...metric.ClientOptFn) *metricsService { - o := metric.ApplyMetricOpts(opts...) - return &metricsService{ - rec: metric.New(reg, o.ApplySuffix("annotation")), - underlying: underlying, - } -} - -type metricsService struct { - // RED metrics - rec *metric.REDClient - underlying influxdb.AnnotationService -} - -var _ influxdb.AnnotationService = (*metricsService)(nil) - -func (m metricsService) CreateAnnotations(ctx context.Context, orgID platform.ID, create []influxdb.AnnotationCreate) ([]influxdb.AnnotationEvent, error) { - rec := m.rec.Record("create_annotation") - ans, err := m.underlying.CreateAnnotations(ctx, orgID, create) - return ans, rec(err) -} - -func (m metricsService) ListAnnotations(ctx context.Context, orgID platform.ID, filter influxdb.AnnotationListFilter) ([]influxdb.StoredAnnotation, error) { - rec := m.rec.Record("find_annotations") - ans, err := m.underlying.ListAnnotations(ctx, orgID, filter) - return ans, rec(err) -} - -func (m metricsService) GetAnnotation(ctx context.Context, id platform.ID) (*influxdb.StoredAnnotation, error) { - rec := m.rec.Record("find_annotation_by_id") - an, err := m.underlying.GetAnnotation(ctx, id) - return an, rec(err) -} - -func (m metricsService) DeleteAnnotations(ctx context.Context, orgID platform.ID, delete influxdb.AnnotationDeleteFilter) error { - rec := m.rec.Record("delete_annotations") - return rec(m.underlying.DeleteAnnotations(ctx, orgID, delete)) -} - -func (m metricsService) DeleteAnnotation(ctx context.Context, id platform.ID) error { - rec := m.rec.Record("delete_annotation") - return rec(m.underlying.DeleteAnnotation(ctx, id)) -} - -func (m metricsService) UpdateAnnotation(ctx context.Context, id platform.ID, update influxdb.AnnotationCreate) (*influxdb.AnnotationEvent, error) { - rec := m.rec.Record("update_annotation") - an, err := m.underlying.UpdateAnnotation(ctx, id, update) - return an, rec(err) -} - -func (m metricsService) ListStreams(ctx context.Context, orgID platform.ID, filter influxdb.StreamListFilter) ([]influxdb.StoredStream, error) { - rec := m.rec.Record("find_streams") - stms, err := m.underlying.ListStreams(ctx, orgID, filter) - return stms, rec(err) -} - -func (m metricsService) CreateOrUpdateStream(ctx context.Context, orgID platform.ID, stream influxdb.Stream) (*influxdb.ReadStream, error) { - rec := m.rec.Record("create_or_update_stream") - stm, err := m.underlying.CreateOrUpdateStream(ctx, orgID, stream) - return stm, rec(err) -} - -func (m metricsService) GetStream(ctx context.Context, id platform.ID) (*influxdb.StoredStream, error) { - rec := m.rec.Record("find_stream_by_id") - stm, err := m.underlying.GetStream(ctx, id) - return stm, rec(err) -} - -func (m metricsService) UpdateStream(ctx context.Context, id platform.ID, stream influxdb.Stream) (*influxdb.ReadStream, error) { - rec := m.rec.Record("update_stream") - stm, err := m.underlying.UpdateStream(ctx, id, stream) - return stm, rec(err) -} - -func (m metricsService) DeleteStreams(ctx context.Context, orgID platform.ID, delete influxdb.BasicStream) error { - rec := m.rec.Record("delete_streams") - return rec(m.underlying.DeleteStreams(ctx, orgID, delete)) -} - -func (m metricsService) DeleteStreamByID(ctx context.Context, id platform.ID) error { - rec := m.rec.Record("delete_stream") - return rec(m.underlying.DeleteStreamByID(ctx, id)) -} diff --git a/annotations/service.go b/annotations/service.go deleted file mode 100644 index aef9c68306f..00000000000 --- a/annotations/service.go +++ /dev/null @@ -1,596 +0,0 @@ -package annotations - -import ( - "context" - "database/sql" - "errors" - "fmt" - "time" - - sq "github.com/Masterminds/squirrel" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - ierrors "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/influxdata/influxdb/v2/sqlite" -) - -var ( - errAnnotationNotFound = &ierrors.Error{ - Code: ierrors.EInvalid, - Msg: "annotation not found", - } - errStreamNotFound = &ierrors.Error{ - Code: ierrors.EInvalid, - Msg: "stream not found", - } -) - -var _ influxdb.AnnotationService = (*Service)(nil) - -type Service struct { - store *sqlite.SqlStore - idGenerator platform.IDGenerator -} - -func NewService(store *sqlite.SqlStore) *Service { - return &Service{ - store: store, - idGenerator: snowflake.NewIDGenerator(), - } -} - -// CreateAnnotations creates annotations in the database for the provided orgID as defined by the provided list -// Streams corresponding to the StreamTag property of each annotation are created if they don't already exist -// as part of a transaction -func (s *Service) CreateAnnotations(ctx context.Context, orgID platform.ID, creates []influxdb.AnnotationCreate) ([]influxdb.AnnotationEvent, error) { - // Guard clause - an empty list was provided for some reason, immediately return an empty result - // set without doing the transaction - if len(creates) == 0 { - return []influxdb.AnnotationEvent{}, nil - } - - s.store.Mu.Lock() - defer s.store.Mu.Unlock() - - // store a unique list of stream names first. the invalid ID is a placeholder for the real id, - // which will be obtained separately - streamNamesIDs := make(map[string]platform.ID) - for _, c := range creates { - streamNamesIDs[c.StreamTag] = platform.InvalidID() - } - - // streamIDsNames is used for re-populating the resulting list of annotations with the stream names - // from the stream IDs returned from the database - streamIDsNames := make(map[platform.ID]string) - - tx, err := s.store.DB.BeginTxx(ctx, nil) - if err != nil { - tx.Rollback() - return nil, err - } - - // upsert each stream individually. a possible enhancement might be to do this as a single batched query - // it is unlikely that this would offer much benefit since there is currently no mechanism for creating large numbers - // of annotations simultaneously - now := time.Now() - for name := range streamNamesIDs { - query, args, err := newUpsertStreamQuery(orgID, s.idGenerator.ID(), now, influxdb.Stream{Name: name}) - if err != nil { - tx.Rollback() - return nil, err - } - - var streamID platform.ID - if err = tx.GetContext(ctx, &streamID, query, args...); err != nil { - tx.Rollback() - return nil, err - } - - streamNamesIDs[name] = streamID - streamIDsNames[streamID] = name - } - - // bulk insert for the creates. this also is unlikely to offer much performance benefit, but since the query - // is only used here it is easy enough to form to bulk query. - q := sq.Insert("annotations"). - Columns("id", "org_id", "stream_id", "summary", "message", "stickers", "duration", "lower", "upper"). - Suffix("RETURNING *") - - for _, create := range creates { - // double check that we have a valid name for this stream tag - error if we don't. this should never be an error. - streamID, ok := streamNamesIDs[create.StreamTag] - if !ok { - tx.Rollback() - return nil, &ierrors.Error{ - Code: ierrors.EInternal, - Msg: fmt.Sprintf("unable to find id for stream %q", create.StreamTag), - } - } - - // add the row to the query - newID := s.idGenerator.ID() - lower := create.StartTime.Format(time.RFC3339Nano) - upper := create.EndTime.Format(time.RFC3339Nano) - duration := timesToDuration(*create.StartTime, *create.EndTime) - q = q.Values(newID, orgID, streamID, create.Summary, create.Message, create.Stickers, duration, lower, upper) - } - - // get the query string and args list for the bulk insert - query, args, err := q.ToSql() - if err != nil { - tx.Rollback() - return nil, err - } - - // run the bulk insert and store the result - var res []*influxdb.StoredAnnotation - if err := tx.SelectContext(ctx, &res, query, args...); err != nil { - tx.Rollback() - return nil, err - } - - if err = tx.Commit(); err != nil { - return nil, err - } - - // add the stream names to the list of results - for _, a := range res { - a.StreamTag = streamIDsNames[a.StreamID] - } - - // convert the StoredAnnotation structs to AnnotationEvent structs before returning - return storedAnnotationsToEvents(res) -} - -// ListAnnotations returns a list of annotations from the database matching the filter -// For time range matching, sqlite is able to compare times with millisecond accuracy -func (s *Service) ListAnnotations(ctx context.Context, orgID platform.ID, filter influxdb.AnnotationListFilter) ([]influxdb.StoredAnnotation, error) { - // we need to explicitly format time strings here and elsewhere to ensure they are - // interpreted by the database consistently - sf := filter.StartTime.Format(time.RFC3339Nano) - ef := filter.EndTime.Format(time.RFC3339Nano) - - q := sq.Select("annotations.*", "streams.name AS stream"). - Distinct(). - InnerJoin("streams ON annotations.stream_id = streams.id"). - Where(sq.Eq{"annotations.org_id": orgID}). - Where(sq.GtOrEq{"lower": sf}). - Where(sq.LtOrEq{"upper": ef}) - - // If the filter contains stickers, use the json_each table value function to break out - // rows with the sticker array values. If the filter does not contain stickers, using - // the json_each TVF would exclude annotations with an empty array of stickers, so select - // from the annotations table only. This allows a filter with no sticker constraints to - // return annotations that don't have any stickers. - if len(filter.StickerIncludes) > 0 { - q = q.From("annotations, json_each(annotations.stickers) AS json") - - // Add sticker filters to the query - for k, v := range filter.StickerIncludes { - q = q.Where(sq.And{sq.Eq{"json.value": fmt.Sprintf("%s=%s", k, v)}}) - } - } else { - q = q.From("annotations") - } - - // Add stream name filters to the query - if len(filter.StreamIncludes) > 0 { - q = q.Where(sq.Eq{"stream": filter.StreamIncludes}) - } - - sql, args, err := q.ToSql() - if err != nil { - return nil, err - } - - ans := []influxdb.StoredAnnotation{} - if err := s.store.DB.SelectContext(ctx, &ans, sql, args...); err != nil { - return nil, err - } - - return ans, nil -} - -// GetAnnotation gets a single annotation by ID -func (s *Service) GetAnnotation(ctx context.Context, id platform.ID) (*influxdb.StoredAnnotation, error) { - q := sq.Select("annotations.*, streams.name AS stream"). - From("annotations"). - InnerJoin("streams ON annotations.stream_id = streams.id"). - Where(sq.Eq{"annotations.id": id}) - - query, args, err := q.ToSql() - if err != nil { - return nil, err - } - - var a influxdb.StoredAnnotation - if err := s.store.DB.GetContext(ctx, &a, query, args...); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, errAnnotationNotFound - } - - return nil, err - } - - return &a, nil -} - -// DeleteAnnotations deletes multiple annotations according to the provided filter -func (s *Service) DeleteAnnotations(ctx context.Context, orgID platform.ID, delete influxdb.AnnotationDeleteFilter) error { - s.store.Mu.Lock() - defer s.store.Mu.Unlock() - - sf := delete.StartTime.Format(time.RFC3339Nano) - ef := delete.EndTime.Format(time.RFC3339Nano) - - // This is a subquery that will be as part of a DELETE FROM ... WHERE id IN (subquery) - // A subquery is used because the json_each virtual table can only be used in a SELECT - subQ := sq.Select("annotations.id"). - Distinct(). - InnerJoin("streams ON annotations.stream_id = streams.id"). - Where(sq.Eq{"annotations.org_id": orgID}). - Where(sq.GtOrEq{"lower": sf}). - Where(sq.LtOrEq{"upper": ef}) - - // If the filter contains stickers, use the json_each table value function to break out - // rows with the sticker array values. If the filter does not contain stickers, using - // the json_each TVF would exclude annotations with an empty array of stickers, so select - // from the annotations table only. This allows a filter with no sticker constraints to - // delete annotations that don't have any stickers. - if len(delete.Stickers) > 0 { - subQ = subQ.From("annotations, json_each(annotations.stickers) AS json") - - // Add sticker filters to the subquery - for k, v := range delete.Stickers { - subQ = subQ.Where(sq.And{sq.Eq{"json.value": fmt.Sprintf("%s=%s", k, v)}}) - } - } else { - subQ = subQ.From("annotations") - } - - // Add the stream name filter to the subquery (if present) - if len(delete.StreamTag) > 0 { - subQ = subQ.Where(sq.Eq{"streams.name": delete.StreamTag}) - } - - // Add the stream ID filter to the subquery (if present) - if delete.StreamID.Valid() { - subQ = subQ.Where(sq.Eq{"stream_id": delete.StreamID}) - } - - // Parse the subquery into a string and list of args - subQuery, subArgs, err := subQ.ToSql() - if err != nil { - return err - } - - // Convert the subquery into a sq.Sqlizer so that it can be used in the actual DELETE - // operation. This is a bit of a hack since squirrel doesn't have great support for subqueries - // outside of SELECT statements - subExpr := sq.Expr("("+subQuery+")", subArgs...) - - q := sq. - Delete("annotations"). - Suffix("WHERE annotations.id IN"). - SuffixExpr(subExpr) - - query, args, err := q.ToSql() - - if err != nil { - return err - } - - if _, err := s.store.DB.ExecContext(ctx, query, args...); err != nil { - return err - } - - return nil -} - -// DeleteAnnotation deletes a single annotation by ID -func (s *Service) DeleteAnnotation(ctx context.Context, id platform.ID) error { - s.store.Mu.Lock() - defer s.store.Mu.Unlock() - - q := sq.Delete("annotations"). - Where(sq.Eq{"id": id}). - Suffix("RETURNING id") - - query, args, err := q.ToSql() - if err != nil { - return err - } - - var d platform.ID - if err := s.store.DB.GetContext(ctx, &d, query, args...); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return errAnnotationNotFound - } - - return err - } - - return nil -} - -// UpdateAnnotation updates a single annotation by ID -// In a similar fashion as CreateAnnotations, if the StreamTag in the update request does not exist, -// a stream will be created as part of a transaction with the update operation -func (s *Service) UpdateAnnotation(ctx context.Context, id platform.ID, update influxdb.AnnotationCreate) (*influxdb.AnnotationEvent, error) { - // get the full data for this annotation first so we can get its orgID - // this will ensure that the annotation already exists before starting the transaction - ann, err := s.GetAnnotation(ctx, id) - if err != nil { - return nil, err - } - - now := time.Now() - - // get a write lock on the database before starting the transaction to create/update the stream - // while simultaneously updating the annotation - s.store.Mu.Lock() - defer s.store.Mu.Unlock() - - tx, err := s.store.DB.BeginTxx(ctx, nil) - if err != nil { - tx.Rollback() - return nil, err - } - - query, args, err := newUpsertStreamQuery(ann.OrgID, s.idGenerator.ID(), now, influxdb.Stream{Name: update.StreamTag}) - if err != nil { - tx.Rollback() - return nil, err - } - - var streamID platform.ID - if err = tx.GetContext(ctx, &streamID, query, args...); err != nil { - tx.Rollback() - return nil, err - } - - q := sq.Update("annotations"). - SetMap(sq.Eq{ - "stream_id": streamID, - "summary": update.Summary, - "message": update.Message, - "stickers": update.Stickers, - "duration": timesToDuration(*update.StartTime, *update.EndTime), - "lower": update.StartTime.Format(time.RFC3339Nano), - "upper": update.EndTime.Format(time.RFC3339Nano), - }). - Where(sq.Eq{"id": id}). - Suffix("RETURNING *") - - query, args, err = q.ToSql() - if err != nil { - return nil, err - } - - var st influxdb.StoredAnnotation - err = tx.GetContext(ctx, &st, query, args...) - if err != nil { - tx.Rollback() - return nil, err - } - - if err = tx.Commit(); err != nil { - return nil, err - } - - // add the stream name to the result. we know that this StreamTag value was updated to the - // stream via the transaction having completed successfully. - st.StreamTag = update.StreamTag - - return st.ToEvent() -} - -// ListStreams returns a list of streams matching the filter for the provided orgID. -func (s *Service) ListStreams(ctx context.Context, orgID platform.ID, filter influxdb.StreamListFilter) ([]influxdb.StoredStream, error) { - q := sq.Select("id", "org_id", "name", "description", "created_at", "updated_at"). - From("streams"). - Where(sq.Eq{"org_id": orgID}) - - // Add stream name filters to the query - if len(filter.StreamIncludes) > 0 { - q = q.Where(sq.Eq{"name": filter.StreamIncludes}) - } - - sql, args, err := q.ToSql() - if err != nil { - return nil, err - } - - sts := []influxdb.StoredStream{} - err = s.store.DB.SelectContext(ctx, &sts, sql, args...) - if err != nil { - return nil, err - } - - return sts, nil -} - -// GetStream gets a single stream by ID -func (s *Service) GetStream(ctx context.Context, id platform.ID) (*influxdb.StoredStream, error) { - q := sq.Select("id", "org_id", "name", "description", "created_at", "updated_at"). - From("streams"). - Where(sq.Eq{"id": id}) - - query, args, err := q.ToSql() - if err != nil { - return nil, err - } - - var st influxdb.StoredStream - if err := s.store.DB.GetContext(ctx, &st, query, args...); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, errStreamNotFound - } - - return nil, err - } - - return &st, nil -} - -// CreateOrUpdateStream creates a new stream, or updates the description of an existing stream. -// Doesn't support updating a stream desctription to "". For that use the UpdateStream method. -func (s *Service) CreateOrUpdateStream(ctx context.Context, orgID platform.ID, stream influxdb.Stream) (*influxdb.ReadStream, error) { - s.store.Mu.Lock() - defer s.store.Mu.Unlock() - - newID := s.idGenerator.ID() - now := time.Now() - query, args, err := newUpsertStreamQuery(orgID, newID, now, stream) - if err != nil { - return nil, err - } - - var id platform.ID - if err = s.store.DB.GetContext(ctx, &id, query, args...); err != nil { - return nil, err - } - - // do a separate query to read the stream back from the database and return it. - // this is necessary because the sqlite driver does not support scanning time values from - // a RETURNING clause back into time.Time - return s.getReadStream(ctx, id) -} - -// UpdateStream updates a stream name and/or a description. It is strictly used for updating an existing stream. -func (s *Service) UpdateStream(ctx context.Context, id platform.ID, stream influxdb.Stream) (*influxdb.ReadStream, error) { - s.store.Mu.Lock() - defer s.store.Mu.Unlock() - - q := sq.Update("streams"). - SetMap(sq.Eq{ - "name": stream.Name, - "description": stream.Description, - "updated_at": sq.Expr(`datetime('now')`), - }). - Where(sq.Eq{"id": id}). - Suffix(`RETURNING id`) - - query, args, err := q.ToSql() - if err != nil { - return nil, err - } - - var newID platform.ID - err = s.store.DB.GetContext(ctx, &newID, query, args...) - if err != nil { - if err == sql.ErrNoRows { - return nil, errStreamNotFound - } - - return nil, err - } - - // do a separate query to read the stream back from the database and return it. - // this is necessary because the sqlite driver does not support scanning time values from - // a RETURNING clause back into time.Time - return s.getReadStream(ctx, newID) -} - -// DeleteStreams is used for deleting multiple streams by name -func (s *Service) DeleteStreams(ctx context.Context, orgID platform.ID, delete influxdb.BasicStream) error { - s.store.Mu.Lock() - defer s.store.Mu.Unlock() - - q := sq.Delete("streams"). - Where(sq.Eq{"org_id": orgID}). - Where(sq.Eq{"name": delete.Names}) - - query, args, err := q.ToSql() - if err != nil { - return err - } - - _, err = s.store.DB.ExecContext(ctx, query, args...) - if err != nil { - return err - } - - return nil -} - -// DeleteStreamByID deletes a single stream by ID. Returns an error if the ID could not be found. -func (s *Service) DeleteStreamByID(ctx context.Context, id platform.ID) error { - s.store.Mu.Lock() - defer s.store.Mu.Unlock() - - q := sq.Delete("streams"). - Where(sq.Eq{"id": id}). - Suffix("RETURNING id") - - query, args, err := q.ToSql() - if err != nil { - return err - } - - var d platform.ID - if err := s.store.DB.GetContext(ctx, &d, query, args...); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return errStreamNotFound - } - - return err - } - - return nil -} - -func newUpsertStreamQuery(orgID, newID platform.ID, t time.Time, stream influxdb.Stream) (string, []interface{}, error) { - q := sq.Insert("streams"). - Columns("id", "org_id", "name", "description", "created_at", "updated_at"). - Values(newID, orgID, stream.Name, stream.Description, t, t). - Suffix(`ON CONFLICT(org_id, name) DO UPDATE - SET - updated_at = excluded.updated_at, - description = IIF(length(excluded.description) = 0, description, excluded.description)`). - Suffix("RETURNING id") - - return q.ToSql() -} - -// getReadStream is a helper which should only be called when the stream has been verified to exist -// via an update or insert. -func (s *Service) getReadStream(ctx context.Context, id platform.ID) (*influxdb.ReadStream, error) { - q := sq.Select("id", "name", "description", "created_at", "updated_at"). - From("streams"). - Where(sq.Eq{"id": id}) - - query, args, err := q.ToSql() - if err != nil { - return nil, err - } - - r := &influxdb.ReadStream{} - if err := s.store.DB.GetContext(ctx, r, query, args...); err != nil { - return nil, err - } - - return r, nil -} - -func storedAnnotationsToEvents(stored []*influxdb.StoredAnnotation) ([]influxdb.AnnotationEvent, error) { - events := make([]influxdb.AnnotationEvent, 0, len(stored)) - for _, s := range stored { - c, err := s.ToCreate() - if err != nil { - return nil, err - } - - events = append(events, influxdb.AnnotationEvent{ - ID: s.ID, - AnnotationCreate: *c, - }) - } - - return events, nil -} - -func timesToDuration(l, u time.Time) string { - return fmt.Sprintf("[%s, %s]", l.Format(time.RFC3339Nano), u.Format(time.RFC3339Nano)) -} diff --git a/annotations/service_test.go b/annotations/service_test.go deleted file mode 100644 index ab9e0da80d3..00000000000 --- a/annotations/service_test.go +++ /dev/null @@ -1,1041 +0,0 @@ -//go:build sqlite_json && sqlite_foreign_keys - -package annotations - -import ( - "context" - "sort" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/influxdata/influxdb/v2/sqlite" - "github.com/influxdata/influxdb/v2/sqlite/migrations" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -var ( - idGen = snowflake.NewIDGenerator() -) - -func TestAnnotationsCRUD(t *testing.T) { - t.Parallel() - - // intialize some variables that can be shared across tests - // the timeline for the 3 test annotations start & end times is visualized below. - // now - // v - // |---|---|---|---| - // ^ ^ ^ ^ ^ - // st1 et1 - // st2 et2 - // st3 et3 - // st4 et4 - - et1 := time.Now().UTC() - st1 := et1.Add(-10 * time.Minute) - - et2 := et1.Add(-5 * time.Minute) - st2 := et2.Add(-10 * time.Minute) - - et3 := et1.Add(-10 * time.Minute) - st3 := et2.Add(-15 * time.Minute) - - et4 := et3 - st4 := st3 - - // used for tests involving time filters - earlierEt1 := et1.Add(-1 * time.Millisecond) - laterSt3 := st3.Add(1 * time.Millisecond) - beforeAny := st3.Add(-1 * time.Millisecond) - afterAny := et1.Add(1 * time.Millisecond) - - orgID := *influxdbtesting.IDPtr(1) - otherOrgID := *influxdbtesting.IDPtr(2) - ctx := context.Background() - - s1 := influxdb.StoredAnnotation{ - OrgID: orgID, - StreamTag: "stream1", - Summary: "summary1", - Message: "message1", - Stickers: map[string]string{"stick1": "val1", "stick2": "val2"}, - Duration: timesToDuration(st1, et1), - Lower: st1.Format(time.RFC3339Nano), - Upper: et1.Format(time.RFC3339Nano), - } - - c1, err := s1.ToCreate() - require.NoError(t, err) - - s2 := influxdb.StoredAnnotation{ - OrgID: orgID, - StreamTag: "stream2", - Summary: "summary2", - Message: "message2", - Stickers: map[string]string{"stick2": "val2", "stick3": "val3", "stick4": "val4"}, - Duration: timesToDuration(st2, et2), - Lower: st2.Format(time.RFC3339Nano), - Upper: et2.Format(time.RFC3339Nano), - } - - c2, err := s2.ToCreate() - require.NoError(t, err) - - s3 := influxdb.StoredAnnotation{ - OrgID: orgID, - StreamTag: "stream2", - Summary: "summary3", - Message: "message3", - Stickers: map[string]string{"stick1": "val2"}, - Duration: timesToDuration(st3, et3), - Lower: st3.Format(time.RFC3339Nano), - Upper: et3.Format(time.RFC3339Nano), - } - - c3, err := s3.ToCreate() - require.NoError(t, err) - - // s4 is an annotation without any stickers, with the same start/end time as s3 - s4 := influxdb.StoredAnnotation{ - OrgID: orgID, - StreamTag: "stream4", - Summary: "summary4", - Message: "message4", - Stickers: map[string]string{}, - Duration: timesToDuration(st4, et4), - Lower: st3.Format(time.RFC3339Nano), - Upper: et3.Format(time.RFC3339Nano), - } - - c4, err := s4.ToCreate() - require.NoError(t, err) - - // helper function for setting up the database with data that can be used for tests - // that involve querying the database. uses the annotations objects initialized above - // via the closure. - populateAnnotationsData := func(t *testing.T, svc *Service) []influxdb.AnnotationEvent { - t.Helper() - - got, err := svc.CreateAnnotations(ctx, orgID, []influxdb.AnnotationCreate{*c1, *c2, *c3, *c4}) - require.NoError(t, err) - assertAnnotationEvents(t, got, []influxdb.AnnotationEvent{ - {AnnotationCreate: *c1}, - {AnnotationCreate: *c2}, - {AnnotationCreate: *c3}, - {AnnotationCreate: *c4}, - }) - - return got - } - - t.Run("create annotations", func(t *testing.T) { - svc := newTestService(t) - - tests := []struct { - name string - creates []influxdb.AnnotationCreate - want []influxdb.AnnotationEvent - }{ - { - "empty creates list returns empty events list", - []influxdb.AnnotationCreate{}, - []influxdb.AnnotationEvent{}, - }, - { - "creates annotations successfully", - []influxdb.AnnotationCreate{*c1, *c2, *c3, *c4}, - []influxdb.AnnotationEvent{ - {AnnotationCreate: *c1}, - {AnnotationCreate: *c2}, - {AnnotationCreate: *c3}, - {AnnotationCreate: *c4}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := svc.CreateAnnotations(ctx, orgID, tt.creates) - require.NoError(t, err) - assertAnnotationEvents(t, got, tt.want) - }) - } - }) - - t.Run("select with filters", func(t *testing.T) { - svc := newTestService(t) - populateAnnotationsData(t, svc) - - tests := []struct { - name string - orgID platform.ID - f influxdb.AnnotationListFilter - want []influxdb.StoredAnnotation - skip string // link to issue and/or reason - }{ - { - name: "time filter is inclusive - gets all", - orgID: orgID, - f: influxdb.AnnotationListFilter{ - BasicFilter: influxdb.BasicFilter{ - StartTime: &st3, - EndTime: &et1, - }, - }, - want: []influxdb.StoredAnnotation{s1, s2, s3, s4}, - skip: "", - }, - { - name: "doesn't get results for other org", - orgID: otherOrgID, - f: influxdb.AnnotationListFilter{ - BasicFilter: influxdb.BasicFilter{ - StartTime: &st3, - EndTime: &et1, - }, - }, - want: []influxdb.StoredAnnotation{}, - skip: "", - }, - { - name: "end time will filter out annotations", - orgID: orgID, - f: influxdb.AnnotationListFilter{ - BasicFilter: influxdb.BasicFilter{ - StartTime: &st3, - EndTime: &earlierEt1, - }, - }, - want: []influxdb.StoredAnnotation{s2, s3, s4}, - skip: "", - }, - { - name: "start time will filter out annotations", - orgID: orgID, - f: influxdb.AnnotationListFilter{ - BasicFilter: influxdb.BasicFilter{ - StartTime: &laterSt3, - EndTime: &et1, - }, - }, - want: []influxdb.StoredAnnotation{s1, s2}, - skip: "https://github.com/influxdata/influxdb/issues/23272", - }, - { - name: "time can filter out all annotations if it's too soon", - orgID: orgID, - f: influxdb.AnnotationListFilter{ - BasicFilter: influxdb.BasicFilter{ - StartTime: &beforeAny, - EndTime: &beforeAny, - }, - }, - want: []influxdb.StoredAnnotation{}, - skip: "https://github.com/influxdata/influxdb/issues/23272", - }, - { - name: "time can filter out all annotations if it's too late", - orgID: orgID, - f: influxdb.AnnotationListFilter{ - BasicFilter: influxdb.BasicFilter{ - StartTime: &afterAny, - EndTime: &afterAny, - }, - }, - want: []influxdb.StoredAnnotation{}, - skip: "", - }, - { - name: "time can filter out all annotations if it's too narrow", - orgID: orgID, - f: influxdb.AnnotationListFilter{ - BasicFilter: influxdb.BasicFilter{ - StartTime: &laterSt3, - EndTime: &et3, - }, - }, - want: []influxdb.StoredAnnotation{}, - skip: "", - }, - { - name: "can filter by stickers - one sticker matches one", - orgID: orgID, - f: influxdb.AnnotationListFilter{ - StickerIncludes: map[string]string{"stick1": "val2"}, - }, - want: []influxdb.StoredAnnotation{s3}, - skip: "", - }, - { - name: "can filter by stickers - one sticker matches multiple", - orgID: orgID, - f: influxdb.AnnotationListFilter{ - StickerIncludes: map[string]string{"stick2": "val2"}, - }, - want: []influxdb.StoredAnnotation{s1, s2}, - skip: "", - }, - { - name: "can filter by stickers - matching key but wrong value", - orgID: orgID, - f: influxdb.AnnotationListFilter{ - StickerIncludes: map[string]string{"stick2": "val3"}, - }, - want: []influxdb.StoredAnnotation{}, - skip: "", - }, - { - name: "can filter by stream - matches one", - orgID: orgID, - f: influxdb.AnnotationListFilter{ - StreamIncludes: []string{"stream1"}, - }, - want: []influxdb.StoredAnnotation{s1}, - skip: "", - }, - { - name: "can filter by stream - matches multiple", - orgID: orgID, - f: influxdb.AnnotationListFilter{ - StreamIncludes: []string{"stream2"}, - }, - want: []influxdb.StoredAnnotation{s2, s3}, - skip: "", - }, - { - name: "can filter by stream - no match", - orgID: orgID, - f: influxdb.AnnotationListFilter{ - StreamIncludes: []string{"badStream"}, - }, - want: []influxdb.StoredAnnotation{}, - skip: "", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if tt.skip != "" { - t.Skip(tt.skip) - } - tt.f.Validate(time.Now) - got, err := svc.ListAnnotations(ctx, tt.orgID, tt.f) - require.NoError(t, err) - assertStoredAnnotations(t, got, tt.want) - }) - } - }) - - t.Run("get by id", func(t *testing.T) { - svc := newTestService(t) - anns := populateAnnotationsData(t, svc) - - tests := []struct { - name string - id platform.ID - want *influxdb.AnnotationEvent - wantErr error - }{ - { - "gets the first one by id", - anns[0].ID, - &anns[0], - nil, - }, - { - "gets the second one by id", - anns[1].ID, - &anns[1], - nil, - }, - { - "has the correct error if not found", - idGen.ID(), - nil, - errAnnotationNotFound, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := svc.GetAnnotation(ctx, tt.id) - require.Equal(t, tt.wantErr, err) - - if tt.want == nil { - require.Nil(t, got) - } else { - e, err := got.ToEvent() - require.NoError(t, err) - require.Equal(t, tt.want, e) - } - }) - } - }) - - t.Run("delete multiple with a filter", func(t *testing.T) { - t.Run("delete by stream id", func(t *testing.T) { - svc := newTestService(t) - populateAnnotationsData(t, svc) - - ctx := context.Background() - - lf := influxdb.AnnotationListFilter{BasicFilter: influxdb.BasicFilter{}} - lf.Validate(time.Now) - ans, err := svc.ListAnnotations(ctx, orgID, lf) - require.NoError(t, err) - - annID1 := ans[0].ID - streamID1 := ans[0].StreamID - st1, err := time.Parse(time.RFC3339Nano, ans[0].Lower) - require.NoError(t, err) - et1, err := time.Parse(time.RFC3339Nano, ans[0].Upper) - require.NoError(t, err) - - streamID2 := ans[1].StreamID - st2, err := time.Parse(time.RFC3339Nano, ans[1].Lower) - require.NoError(t, err) - et2, err := time.Parse(time.RFC3339Nano, ans[1].Upper) - require.NoError(t, err) - - tests := []struct { - name string - deleteOrgID platform.ID - id platform.ID - filter influxdb.AnnotationDeleteFilter - shouldDelete bool - }{ - { - "matches stream id but not time range", - orgID, - annID1, - influxdb.AnnotationDeleteFilter{ - StreamID: streamID1, - StartTime: &st2, - EndTime: &et2, - }, - false, - }, - { - "matches time range but not stream id", - orgID, - annID1, - influxdb.AnnotationDeleteFilter{ - StreamID: streamID2, - StartTime: &st1, - EndTime: &et1, - }, - false, - }, - { - "doesn't delete for other org", - otherOrgID, - annID1, - influxdb.AnnotationDeleteFilter{ - StreamID: streamID1, - StartTime: &st1, - EndTime: &et1, - }, - false, - }, - { - "matches stream id and time range", - orgID, - annID1, - influxdb.AnnotationDeleteFilter{ - StreamID: streamID1, - StartTime: &st1, - EndTime: &et1, - }, - true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := svc.DeleteAnnotations(ctx, tt.deleteOrgID, tt.filter) - require.NoError(t, err) - - lf := influxdb.AnnotationListFilter{BasicFilter: influxdb.BasicFilter{}} - lf.Validate(time.Now) - list, err := svc.ListAnnotations(ctx, orgID, lf) - require.NoError(t, err) - get, getErr := svc.GetAnnotation(ctx, tt.id) - - if tt.shouldDelete { - require.Equal(t, 3, len(list)) - require.Nil(t, get) - require.Equal(t, errAnnotationNotFound, getErr) - } else { - require.Equal(t, 4, len(list)) - require.NoError(t, getErr) - require.Equal(t, *get, ans[0]) - } - }) - } - }) - - t.Run("delete with non-id filters", func(t *testing.T) { - svc := newTestService(t) - populateAnnotationsData(t, svc) - - tests := []struct { - name string - deleteOrgID platform.ID - filter influxdb.AnnotationDeleteFilter - wantList []influxdb.StoredAnnotation - }{ - { - "matches stream tag but not time range", - orgID, - influxdb.AnnotationDeleteFilter{ - StreamTag: "stream1", - StartTime: &st1, - EndTime: &earlierEt1, - }, - []influxdb.StoredAnnotation{s1, s2, s3, s4}, - }, - { - "matches stream tag and time range", - orgID, - influxdb.AnnotationDeleteFilter{ - StreamTag: "stream1", - StartTime: &st1, - EndTime: &et1, - }, - []influxdb.StoredAnnotation{s2, s3, s4}, - }, - { - "matches stream tag and time range for item with no stickers", - orgID, - influxdb.AnnotationDeleteFilter{ - StreamTag: "stream4", - StartTime: &st4, - EndTime: &et4, - }, - []influxdb.StoredAnnotation{s1, s2, s3}, - }, - { - "matches stream tag for multiple", - orgID, - influxdb.AnnotationDeleteFilter{ - StreamTag: "stream2", - StartTime: &st3, - EndTime: &et1, - }, - []influxdb.StoredAnnotation{s1, s4}, - }, - { - "matches stream tag but wrong org", - otherOrgID, - influxdb.AnnotationDeleteFilter{ - StreamTag: "stream1", - StartTime: &st1, - EndTime: &et1, - }, - []influxdb.StoredAnnotation{s1, s2, s3, s4}, - }, - - { - "matches stickers but not time range", - orgID, - influxdb.AnnotationDeleteFilter{ - Stickers: map[string]string{"stick1": "val1"}, - StartTime: &st1, - EndTime: &earlierEt1, - }, - []influxdb.StoredAnnotation{s1, s2, s3, s4}, - }, - { - "matches stickers and time range", - orgID, - influxdb.AnnotationDeleteFilter{ - Stickers: map[string]string{"stick1": "val1"}, - StartTime: &st1, - EndTime: &et1, - }, - []influxdb.StoredAnnotation{s2, s3, s4}, - }, - { - "matches stickers for multiple", - orgID, - influxdb.AnnotationDeleteFilter{ - Stickers: map[string]string{"stick2": "val2"}, - StartTime: &st2, - EndTime: &et1, - }, - []influxdb.StoredAnnotation{s3, s4}, - }, - { - "matches stickers but wrong org", - otherOrgID, - influxdb.AnnotationDeleteFilter{ - Stickers: map[string]string{"stick1": "val1"}, - StartTime: &st1, - EndTime: &et1, - }, - []influxdb.StoredAnnotation{s1, s2, s3, s4}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - svc := newTestService(t) - populateAnnotationsData(t, svc) - - err := svc.DeleteAnnotations(ctx, tt.deleteOrgID, tt.filter) - require.NoError(t, err) - - f := influxdb.AnnotationListFilter{} - f.Validate(time.Now) - list, err := svc.ListAnnotations(ctx, orgID, f) - require.NoError(t, err) - assertStoredAnnotations(t, list, tt.wantList) - }) - } - }) - }) - - t.Run("delete a single annotation by id", func(t *testing.T) { - svc := newTestService(t) - ans := populateAnnotationsData(t, svc) - - tests := []struct { - name string - id platform.ID - shouldDelete bool - }{ - { - "has the correct error if not found", - idGen.ID(), - false, - }, - { - "deletes the first one by id", - ans[0].ID, - true, - }, - { - "deletes the second one by id", - ans[1].ID, - true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := svc.DeleteAnnotation(ctx, tt.id) - - if tt.shouldDelete { - require.NoError(t, err) - } else { - require.Equal(t, errAnnotationNotFound, err) - } - - got, err := svc.GetAnnotation(ctx, tt.id) - require.Equal(t, errAnnotationNotFound, err) - require.Nil(t, got) - }) - } - }) - - t.Run("update a single annotation by id", func(t *testing.T) { - svc := newTestService(t) - ans := populateAnnotationsData(t, svc) - - updatedTime := time.Time{}.Add(time.Minute) - - tests := []struct { - name string - id platform.ID - update influxdb.AnnotationCreate - wantErr error - }{ - { - "has the correct error if not found", - idGen.ID(), - influxdb.AnnotationCreate{ - StreamTag: "updated tag", - Summary: "updated summary", - Message: "updated message", - Stickers: map[string]string{"updated": "sticker"}, - EndTime: &updatedTime, - StartTime: &updatedTime, - }, - errAnnotationNotFound, - }, - { - "updates the first one by id", - ans[0].ID, - influxdb.AnnotationCreate{ - StreamTag: "updated tag", - Summary: "updated summary", - Message: "updated message", - Stickers: map[string]string{"updated": "sticker"}, - EndTime: &updatedTime, - StartTime: &updatedTime, - }, - nil, - }, - { - "updates the second one by id", - ans[1].ID, - influxdb.AnnotationCreate{ - StreamTag: "updated tag2", - Summary: "updated summary2", - Message: "updated message2", - Stickers: map[string]string{"updated2": "sticker2"}, - EndTime: &updatedTime, - StartTime: &updatedTime, - }, - nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - want := &influxdb.AnnotationEvent{ID: tt.id, AnnotationCreate: tt.update} - if tt.wantErr != nil { - want = nil - } - - got, err := svc.UpdateAnnotation(ctx, tt.id, tt.update) - require.Equal(t, tt.wantErr, err) - require.Equal(t, want, got) - - if tt.wantErr == nil { - new, err := svc.GetAnnotation(ctx, tt.id) - require.NoError(t, err) - e, err := new.ToEvent() - require.NoError(t, err) - require.Equal(t, got, e) - } - }) - } - }) - - t.Run("deleted streams cascade to deleted annotations", func(t *testing.T) { - svc := newTestService(t) - - ctx := context.Background() - ans := populateAnnotationsData(t, svc) - sort.Slice(ans, func(i, j int) bool { - return ans[i].StreamTag < ans[j].StreamTag - }) - - // annotations s2 and s3 have the stream tag of "stream2", so get the id of that stream - id := ans[1].ID - a, err := svc.GetAnnotation(ctx, id) - require.NoError(t, err) - streamID := a.StreamID - - // delete the stream - err = svc.DeleteStreamByID(ctx, streamID) - require.NoError(t, err) - - // s1 and s4 should still be there - s1, err := svc.GetAnnotation(ctx, ans[0].ID) - require.NoError(t, err) - s4, err := svc.GetAnnotation(ctx, ans[3].ID) - require.NoError(t, err) - - // both s2 and s3 should now be deleted - f := influxdb.AnnotationListFilter{} - f.Validate(time.Now) - remaining, err := svc.ListAnnotations(ctx, orgID, f) - require.NoError(t, err) - require.Equal(t, []influxdb.StoredAnnotation{*s1, *s4}, remaining) - }) - - t.Run("renamed streams are reflected in subsequent annotation queries", func(t *testing.T) { - svc := newTestService(t) - - ctx := context.Background() - populateAnnotationsData(t, svc) - - // get all the annotations with the tag "stream2" - f := influxdb.AnnotationListFilter{StreamIncludes: []string{"stream2"}} - f.Validate(time.Now) - originalList, err := svc.ListAnnotations(ctx, orgID, f) - require.NoError(t, err) - assertStoredAnnotations(t, []influxdb.StoredAnnotation{s2, s3}, originalList) - - // check that the original list has the right stream tag for all annotations - for _, a := range originalList { - require.Equal(t, "stream2", a.StreamTag) - } - - // update the name for stream2 - streamID := originalList[0].StreamID - _, err = svc.UpdateStream(ctx, streamID, influxdb.Stream{Name: "new name", Description: "new desc"}) - require.NoError(t, err) - - // get all the annotations with the new tag - f = influxdb.AnnotationListFilter{StreamIncludes: []string{"new name"}} - f.Validate(time.Now) - newList, err := svc.ListAnnotations(ctx, orgID, f) - require.NoError(t, err) - - // check that the new list has the right stream tag for all annotations - for _, a := range newList { - require.Equal(t, "new name", a.StreamTag) - } - - // verify that the new list of annotations is the same as the original except for the stream name change - require.Equal(t, len(originalList), len(newList)) - - sort.Slice(originalList, func(i, j int) bool { - return originalList[i].ID < originalList[j].ID - }) - - sort.Slice(newList, func(i, j int) bool { - return originalList[i].ID < originalList[j].ID - }) - - for i := range newList { - originalList[i].StreamTag = "new name" - require.Equal(t, originalList[i], newList[i]) - } - }) -} - -func TestStreamsCRUDSingle(t *testing.T) { - t.Parallel() - - svc := newTestService(t) - - ctx := context.Background() - orgID := *influxdbtesting.IDPtr(1) - - stream := influxdb.Stream{ - Name: "testName", - Description: "original description", - } - - var err error - var s1, s2, s3 *influxdb.ReadStream - - t.Run("create a single stream", func(t *testing.T) { - s1, err = svc.CreateOrUpdateStream(ctx, orgID, stream) - require.NoError(t, err) - require.Equal(t, stream.Name, s1.Name) - require.Equal(t, stream.Description, s1.Description) - }) - - t.Run("stream updates", func(t *testing.T) { - u1 := influxdb.Stream{ - Name: "testName", - Description: "updated description", - } - - u2 := influxdb.Stream{ - Name: "otherName", - Description: "other description", - } - - t.Run("updating an existing stream with CreateOrUpdateStream does not change id but does change description", func(t *testing.T) { - s2, err = svc.CreateOrUpdateStream(ctx, orgID, u1) - require.NoError(t, err) - require.Equal(t, stream.Name, s2.Name) - require.Equal(t, u1.Description, s2.Description) - require.Equal(t, s1.ID, s2.ID) - }) - - t.Run("updating a non-existant stream with UpdateStream returns not found error", func(t *testing.T) { - readGot, err := svc.UpdateStream(ctx, idGen.ID(), u2) - require.Nil(t, readGot) - require.Equal(t, errStreamNotFound, err) - }) - - t.Run("updating an existing stream with UpdateStream changes both name & description", func(t *testing.T) { - s3, err = svc.UpdateStream(ctx, s2.ID, u2) - require.NoError(t, err) - require.Equal(t, s2.ID, s3.ID) - require.Equal(t, u2.Name, s3.Name) - require.Equal(t, u2.Description, s3.Description) - }) - }) - - t.Run("getting a stream", func(t *testing.T) { - t.Run("non-existant stream returns a not found error", func(t *testing.T) { - storedGot, err := svc.GetStream(ctx, idGen.ID()) - require.Nil(t, storedGot) - require.Equal(t, errStreamNotFound, err) - }) - - t.Run("existing stream returns without error", func(t *testing.T) { - storedGot, err := svc.GetStream(ctx, s3.ID) - require.NoError(t, err) - require.Equal(t, s3.Name, storedGot.Name) - require.Equal(t, s3.Description, storedGot.Description) - }) - }) - - t.Run("deleting a stream", func(t *testing.T) { - t.Run("non-existant stream returns a not found error", func(t *testing.T) { - err := svc.DeleteStreamByID(ctx, idGen.ID()) - require.Equal(t, errStreamNotFound, err) - }) - - t.Run("deletes an existing stream without error", func(t *testing.T) { - err := svc.DeleteStreamByID(ctx, s1.ID) - require.NoError(t, err) - - storedGot, err := svc.GetStream(ctx, s1.ID) - require.Nil(t, storedGot) - require.Equal(t, err, errStreamNotFound) - }) - }) -} - -func TestStreamsCRUDMany(t *testing.T) { - t.Parallel() - - svc := newTestService(t) - - ctx := context.Background() - - orgID1 := influxdbtesting.IDPtr(1) - orgID2 := influxdbtesting.IDPtr(2) - orgID3 := influxdbtesting.IDPtr(3) - - // populate the database with some streams for testing delete and select many - combos := map[platform.ID][]string{ - *orgID1: {"org1_s1", "org1_s2", "org1_s3", "org1_s4"}, - *orgID2: {"org2_s1"}, - *orgID3: {"org3_s1", "org3_s2"}, - } - - for orgID, streams := range combos { - for _, s := range streams { - _, err := svc.CreateOrUpdateStream(ctx, orgID, influxdb.Stream{ - Name: s, - }) - require.NoError(t, err) - } - } - - t.Run("all streams can be listed for each org if passing an empty list", func(t *testing.T) { - for orgID, streams := range combos { - got, err := svc.ListStreams(ctx, orgID, influxdb.StreamListFilter{ - StreamIncludes: []string{}, - }) - require.NoError(t, err) - assertStreamNames(t, streams, got) - } - }) - - t.Run("can select specific streams and get only those for that org", func(t *testing.T) { - for orgID, streams := range combos { - got, err := svc.ListStreams(ctx, orgID, influxdb.StreamListFilter{ - StreamIncludes: streams, - }) - require.NoError(t, err) - assertStreamNames(t, streams, got) - } - }) - - t.Run("can delete a single stream with DeleteStreams, but does not delete streams for other org", func(t *testing.T) { - err := svc.DeleteStreams(ctx, *orgID1, influxdb.BasicStream{ - Names: []string{"org1_s1", "org2_s1"}, - }) - require.NoError(t, err) - - got, err := svc.ListStreams(ctx, *orgID1, influxdb.StreamListFilter{ - StreamIncludes: []string{}, - }) - require.NoError(t, err) - assertStreamNames(t, []string{"org1_s2", "org1_s3", "org1_s4"}, got) - - got, err = svc.ListStreams(ctx, *orgID2, influxdb.StreamListFilter{ - StreamIncludes: []string{}, - }) - require.NoError(t, err) - assertStreamNames(t, []string{"org2_s1"}, got) - }) - - t.Run("can delete all streams for all orgs", func(t *testing.T) { - for orgID, streams := range combos { - err := svc.DeleteStreams(ctx, orgID, influxdb.BasicStream{ - Names: streams, - }) - require.NoError(t, err) - - got, err := svc.ListStreams(ctx, orgID, influxdb.StreamListFilter{ - StreamIncludes: []string{}, - }) - require.NoError(t, err) - require.Equal(t, []influxdb.StoredStream{}, got) - } - }) -} - -func assertAnnotationEvents(t *testing.T, got, want []influxdb.AnnotationEvent) { - t.Helper() - - require.Equal(t, len(want), len(got)) - - sort.Slice(want, func(i, j int) bool { - return want[i].StreamTag < want[j].StreamTag - }) - - sort.Slice(got, func(i, j int) bool { - return got[i].StreamTag < got[j].StreamTag - }) - - for idx, w := range want { - w.ID = got[idx].ID - require.Equal(t, w, got[idx]) - } -} - -// should make these are lists similar -func assertStoredAnnotations(t *testing.T, got, want []influxdb.StoredAnnotation) { - t.Helper() - - require.Equal(t, len(want), len(got)) - - sort.Slice(want, func(i, j int) bool { - return want[i].ID < want[j].ID - }) - - sort.Slice(got, func(i, j int) bool { - return got[i].ID < got[j].ID - }) - - for idx, w := range want { - w.ID = got[idx].ID - w.StreamID = got[idx].StreamID - require.Equal(t, w, got[idx]) - } -} - -func assertStreamNames(t *testing.T, want []string, got []influxdb.StoredStream) { - t.Helper() - - storedNames := make([]string, len(got)) - for i, s := range got { - storedNames[i] = s.Name - } - - require.ElementsMatch(t, want, storedNames) -} - -func newTestService(t *testing.T) *Service { - t.Helper() - - store := sqlite.NewTestStore(t) - ctx := context.Background() - - sqliteMigrator := sqlite.NewMigrator(store, zap.NewNop()) - err := sqliteMigrator.Up(ctx, migrations.AllUp) - require.NoError(t, err) - - svc := NewService(store) - - return svc -} diff --git a/annotations/transport/annotations_router.go b/annotations/transport/annotations_router.go deleted file mode 100644 index e2f6b096040..00000000000 --- a/annotations/transport/annotations_router.go +++ /dev/null @@ -1,289 +0,0 @@ -package transport - -import ( - "encoding/json" - "net/http" - "time" - - "github.com/go-chi/chi" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -func (h *AnnotationHandler) annotationsRouter() http.Handler { - r := chi.NewRouter() - - r.Post("/", h.handleCreateAnnotations) - r.Get("/", h.handleGetAnnotations) - r.Delete("/", h.handleDeleteAnnotations) - - r.Route("/{id}", func(r chi.Router) { - r.Get("/", h.handleGetAnnotation) - r.Delete("/", h.handleDeleteAnnotation) - r.Put("/", h.handleUpdateAnnotation) - }) - - return r -} - -func (h *AnnotationHandler) handleCreateAnnotations(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - o, err := platform.IDFromString(r.URL.Query().Get("orgID")) - if err != nil { - h.api.Err(w, r, errBadOrg) - return - } - - c, err := decodeCreateAnnotationsRequest(r) - if err != nil { - h.api.Err(w, r, err) - return - } - - l, err := h.annotationService.CreateAnnotations(ctx, *o, c) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, l) -} - -func (h *AnnotationHandler) handleGetAnnotations(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - o, err := platform.IDFromString(r.URL.Query().Get("orgID")) - if err != nil { - h.api.Err(w, r, errBadOrg) - return - } - - f, err := decodeListAnnotationsRequest(r) - if err != nil { - h.api.Err(w, r, err) - return - } - - s, err := h.annotationService.ListAnnotations(ctx, *o, *f) - if err != nil { - h.api.Err(w, r, err) - return - } - - l, err := storedAnnotationsToReadAnnotations(s) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, l) -} - -func (h *AnnotationHandler) handleDeleteAnnotations(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - o, err := platform.IDFromString(r.URL.Query().Get("orgID")) - if err != nil { - h.api.Err(w, r, errBadOrg) - return - } - - f, err := decodeDeleteAnnotationsRequest(r) - if err != nil { - h.api.Err(w, r, err) - return - } - - if err = h.annotationService.DeleteAnnotations(ctx, *o, *f); err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusNoContent, nil) -} - -func (h *AnnotationHandler) handleGetAnnotation(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, errBadAnnotationId) - return - } - - s, err := h.annotationService.GetAnnotation(ctx, *id) - if err != nil { - h.api.Err(w, r, err) - return - } - - c, err := storedAnnotationToEvent(s) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, c) -} - -func (h *AnnotationHandler) handleDeleteAnnotation(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, errBadAnnotationId) - return - } - - if err := h.annotationService.DeleteAnnotation(ctx, *id); err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusNoContent, nil) -} - -func (h *AnnotationHandler) handleUpdateAnnotation(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, errBadAnnotationId) - return - } - - u, err := decodeUpdateAnnotationRequest(r) - if err != nil { - h.api.Err(w, r, err) - return - } - - a, err := h.annotationService.UpdateAnnotation(ctx, *id, *u) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, a) -} - -func decodeCreateAnnotationsRequest(r *http.Request) ([]influxdb.AnnotationCreate, error) { - cs := []influxdb.AnnotationCreate{} - if err := json.NewDecoder(r.Body).Decode(&cs); err != nil { - return nil, err - } - - for _, c := range cs { - if err := c.Validate(time.Now); err != nil { - return nil, err - } - } - - return cs, nil -} - -func decodeListAnnotationsRequest(r *http.Request) (*influxdb.AnnotationListFilter, error) { - startTime, endTime, err := tFromReq(r) - if err != nil { - return nil, err - } - - f := &influxdb.AnnotationListFilter{ - StreamIncludes: r.URL.Query()["streamIncludes"], - BasicFilter: influxdb.BasicFilter{ - EndTime: endTime, - StartTime: startTime, - }, - } - f.SetStickerIncludes(r.URL.Query()) - if err := f.Validate(time.Now); err != nil { - return nil, err - } - - return f, nil -} - -func decodeDeleteAnnotationsRequest(r *http.Request) (*influxdb.AnnotationDeleteFilter, error) { - // Try to get a stream ID from the query params. The stream ID is not required, - // so if one is not set we can leave streamID as the zero value. - var streamID platform.ID - if qid := chi.URLParam(r, "streamID"); qid != "" { - id, err := platform.IDFromString(qid) - // if a streamID parameter was provided but is not valid, return an error - if err != nil { - return nil, errBadStreamId - } - streamID = *id - } - - startTime, endTime, err := tFromReq(r) - if err != nil { - return nil, err - } - - f := &influxdb.AnnotationDeleteFilter{ - StreamTag: r.URL.Query().Get("stream"), - StreamID: streamID, - EndTime: endTime, - StartTime: startTime, - } - f.SetStickers(r.URL.Query()) - if err := f.Validate(); err != nil { - return nil, err - } - - return f, nil -} - -func decodeUpdateAnnotationRequest(r *http.Request) (*influxdb.AnnotationCreate, error) { - u := &influxdb.AnnotationCreate{} - if err := json.NewDecoder(r.Body).Decode(u); err != nil { - return nil, err - } else if err := u.Validate(time.Now); err != nil { - return nil, err - } - - return u, nil -} - -func storedAnnotationsToReadAnnotations(s []influxdb.StoredAnnotation) (influxdb.ReadAnnotations, error) { - r := influxdb.ReadAnnotations{} - - for _, val := range s { - r[val.StreamTag] = append(r[val.StreamTag], influxdb.ReadAnnotation{ - ID: val.ID, - Summary: val.Summary, - Message: val.Message, - Stickers: val.Stickers, - StartTime: val.Lower, - EndTime: val.Upper, - }) - } - - return r, nil -} - -func storedAnnotationToEvent(s *influxdb.StoredAnnotation) (*influxdb.AnnotationEvent, error) { - st, err := tStringToPointer(s.Lower) - if err != nil { - return nil, err - } - - et, err := tStringToPointer(s.Upper) - if err != nil { - return nil, err - } - - return &influxdb.AnnotationEvent{ - ID: s.ID, - AnnotationCreate: influxdb.AnnotationCreate{ - StreamTag: s.StreamTag, - Summary: s.Summary, - Message: s.Message, - Stickers: s.Stickers, - EndTime: et, - StartTime: st, - }, - }, nil -} diff --git a/annotations/transport/annotations_router_test.go b/annotations/transport/annotations_router_test.go deleted file mode 100644 index 0b0d89a0421..00000000000 --- a/annotations/transport/annotations_router_test.go +++ /dev/null @@ -1,267 +0,0 @@ -package transport - -import ( - "encoding/json" - "net/http" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/require" -) - -var ( - testCreateAnnotation = influxdb.AnnotationCreate{ - StreamTag: "sometag", - Summary: "testing the api", - Message: "stored annotation message", - Stickers: map[string]string{"val1": "sticker1", "val2": "sticker2"}, - EndTime: &now, - StartTime: &now, - } - - testEvent = influxdb.AnnotationEvent{ - ID: *id, - AnnotationCreate: testCreateAnnotation, - } - - testReadAnnotation1 = influxdb.ReadAnnotation{ - ID: *influxdbtesting.IDPtr(1), - } - - testReadAnnotation2 = influxdb.ReadAnnotation{ - ID: *influxdbtesting.IDPtr(2), - } - - testStoredAnnotation = influxdb.StoredAnnotation{ - ID: *id, - OrgID: *orgID, - StreamID: *influxdbtesting.IDPtr(3), - StreamTag: "sometag", - Summary: "testing the api", - Message: "stored annotation message", - Stickers: map[string]string{"val1": "sticker1", "val2": "sticker2"}, - Lower: now.Format(time.RFC3339), - Upper: now.Format(time.RFC3339), - } - - testReadAnnotations = influxdb.ReadAnnotations{ - "sometag": []influxdb.ReadAnnotation{ - { - ID: testStoredAnnotation.ID, - Summary: testStoredAnnotation.Summary, - Message: testStoredAnnotation.Message, - Stickers: map[string]string{"val1": "sticker1", "val2": "sticker2"}, - EndTime: testStoredAnnotation.Lower, - StartTime: testStoredAnnotation.Upper, - }, - }, - } -) - -func TestAnnotationRouter(t *testing.T) { - t.Parallel() - - t.Run("get annotations happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "GET", ts.URL+"/annotations", nil) - - q := req.URL.Query() - q.Add("orgID", orgStr) - q.Add("endTime", now.Format(time.RFC3339)) - q.Add("stickerIncludes[product]", "oss") - q.Add("stickerIncludes[env]", "dev") - q.Add("streamIncludes", "stream1") - q.Add("streamIncludes", "stream2") - req.URL.RawQuery = q.Encode() - - want := []influxdb.AnnotationList{ - { - StreamTag: "stream1", - Annotations: []influxdb.ReadAnnotation{testReadAnnotation1}, - }, - { - StreamTag: "stream2", - Annotations: []influxdb.ReadAnnotation{testReadAnnotation2}, - }, - } - - svc.EXPECT(). - ListAnnotations(gomock.Any(), *orgID, influxdb.AnnotationListFilter{ - StickerIncludes: map[string]string{"product": "oss", "env": "dev"}, - StreamIncludes: []string{"stream1", "stream2"}, - BasicFilter: influxdb.BasicFilter{ - StartTime: &time.Time{}, - EndTime: &now, - }, - }). - Return([]influxdb.StoredAnnotation{ - { - ID: testReadAnnotation1.ID, - StreamTag: "stream1", - }, - { - ID: testReadAnnotation2.ID, - StreamTag: "stream2", - }, - }, nil) - - res := doTestRequest(t, req, http.StatusOK, true) - - got := []influxdb.AnnotationList{} - err := json.NewDecoder(res.Body).Decode(&got) - require.NoError(t, err) - require.ElementsMatch(t, want, got) - }) - - t.Run("create annotations happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - createAnnotations := []influxdb.AnnotationCreate{testCreateAnnotation} - - req := newTestRequest(t, "POST", ts.URL+"/annotations", createAnnotations) - - q := req.URL.Query() - q.Add("orgID", orgStr) - req.URL.RawQuery = q.Encode() - - want := []influxdb.AnnotationEvent{testEvent} - - svc.EXPECT(). - CreateAnnotations(gomock.Any(), *orgID, createAnnotations). - Return(want, nil) - - res := doTestRequest(t, req, http.StatusOK, true) - - got := []influxdb.AnnotationEvent{} - err := json.NewDecoder(res.Body).Decode(&got) - require.NoError(t, err) - require.Equal(t, want, got) - }) - - t.Run("delete annotations happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "DELETE", ts.URL+"/annotations", nil) - q := req.URL.Query() - q.Add("orgID", orgStr) - q.Add("stream", "someTag") - q.Add("startTime", now.Format(time.RFC3339)) - q.Add("endTime", later.Format(time.RFC3339)) - req.URL.RawQuery = q.Encode() - - svc.EXPECT(). - DeleteAnnotations(gomock.Any(), *orgID, influxdb.AnnotationDeleteFilter{ - StreamTag: "someTag", - StartTime: &now, - EndTime: &later, - Stickers: map[string]string{}, - }). - Return(nil) - - doTestRequest(t, req, http.StatusNoContent, false) - }) - - t.Run("get annotation happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "GET", ts.URL+"/annotations/"+idStr, nil) - - svc.EXPECT(). - GetAnnotation(gomock.Any(), *id). - Return(&testStoredAnnotation, nil) - - res := doTestRequest(t, req, http.StatusOK, true) - - got := &influxdb.AnnotationEvent{} - err := json.NewDecoder(res.Body).Decode(got) - require.NoError(t, err) - require.Equal(t, &testEvent, got) - }) - - t.Run("delete annotation happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "DELETE", ts.URL+"/annotations/"+idStr, nil) - - svc.EXPECT(). - DeleteAnnotation(gomock.Any(), *id). - Return(nil) - - doTestRequest(t, req, http.StatusNoContent, false) - }) - - t.Run("update annotation happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "PUT", ts.URL+"/annotations/"+idStr, testCreateAnnotation) - - svc.EXPECT(). - UpdateAnnotation(gomock.Any(), *id, testCreateAnnotation). - Return(&testEvent, nil) - - res := doTestRequest(t, req, http.StatusOK, true) - - got := &influxdb.AnnotationEvent{} - err := json.NewDecoder(res.Body).Decode(got) - require.NoError(t, err) - require.Equal(t, &testEvent, got) - }) - - t.Run("invalid org ids return 400 when required", func(t *testing.T) { - methods := []string{"POST", "GET", "DELETE"} - - for _, m := range methods { - t.Run(m, func(t *testing.T) { - ts, _ := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, m, ts.URL+"/annotations", nil) - q := req.URL.Query() - q.Add("orgID", "badid") - req.URL.RawQuery = q.Encode() - - doTestRequest(t, req, http.StatusBadRequest, false) - }) - } - }) - - t.Run("invalid annotation ids return 400 when required", func(t *testing.T) { - methods := []string{"GET", "DELETE", "PUT"} - - for _, m := range methods { - t.Run(m, func(t *testing.T) { - ts, _ := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, m, ts.URL+"/annotations/badID", nil) - doTestRequest(t, req, http.StatusBadRequest, false) - }) - } - }) -} - -func TestStoredAnnotationsToReadAnnotations(t *testing.T) { - t.Parallel() - - got, err := storedAnnotationsToReadAnnotations([]influxdb.StoredAnnotation{testStoredAnnotation}) - require.NoError(t, err) - require.Equal(t, got, testReadAnnotations) -} - -func TestStoredAnnotationToEvent(t *testing.T) { - t.Parallel() - - got, err := storedAnnotationToEvent(&testStoredAnnotation) - require.NoError(t, err) - require.Equal(t, got, &testEvent) -} diff --git a/annotations/transport/helpers_test.go b/annotations/transport/helpers_test.go deleted file mode 100644 index 6b0c501b95b..00000000000 --- a/annotations/transport/helpers_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package transport - -import ( - "bytes" - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/mock" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -var ( - orgStr = "1234123412341234" - orgID, _ = platform.IDFromString(orgStr) - idStr = "4321432143214321" - id, _ = platform.IDFromString(idStr) - now = time.Now().UTC().Truncate(time.Second) - later = now.Add(5 * time.Minute) -) - -func newTestServer(t *testing.T) (*httptest.Server, *mock.MockAnnotationService) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockAnnotationService(ctrlr) - server := NewAnnotationHandler(zaptest.NewLogger(t), svc) - return httptest.NewServer(server), svc -} - -func newTestRequest(t *testing.T, method, path string, body interface{}) *http.Request { - dat, err := json.Marshal(body) - require.NoError(t, err) - - req, err := http.NewRequest(method, path, bytes.NewBuffer(dat)) - require.NoError(t, err) - - req.Header.Add("Content-Type", "application/json") - - return req -} - -func doTestRequest(t *testing.T, req *http.Request, wantCode int, needJSON bool) *http.Response { - res, err := http.DefaultClient.Do(req) - require.NoError(t, err) - require.Equal(t, wantCode, res.StatusCode) - if needJSON { - require.Equal(t, "application/json; charset=utf-8", res.Header.Get("Content-Type")) - } - return res -} diff --git a/annotations/transport/http.go b/annotations/transport/http.go deleted file mode 100644 index f603e9b7b42..00000000000 --- a/annotations/transport/http.go +++ /dev/null @@ -1,104 +0,0 @@ -package transport - -import ( - "net/http" - "time" - - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "go.uber.org/zap" -) - -const ( - // this is the base api prefix, since the annotations system mounts handlers at - // both the ../annotations and ../streams paths. - prefixAnnotations = "/api/v2private" -) - -var ( - errBadOrg = &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid or missing org id", - } - - errBadAnnotationId = &errors.Error{ - Code: errors.EInvalid, - Msg: "annotation id is invalid", - } - - errBadStreamId = &errors.Error{ - Code: errors.EInvalid, - Msg: "stream id is invalid", - } - - errBadStreamName = &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid stream name", - } -) - -// AnnotationsHandler is the handler for the annotation service -type AnnotationHandler struct { - chi.Router - - log *zap.Logger - api *kithttp.API - - annotationService influxdb.AnnotationService -} - -func NewAnnotationHandler(log *zap.Logger, annotationService influxdb.AnnotationService) *AnnotationHandler { - h := &AnnotationHandler{ - log: log, - api: kithttp.NewAPI(kithttp.WithLog(log)), - annotationService: annotationService, - } - - r := chi.NewRouter() - r.Use( - middleware.Recoverer, - middleware.RequestID, - middleware.RealIP, - ) - - r.Mount("/annotations", h.annotationsRouter()) - r.Mount("/streams", h.streamsRouter()) - h.Router = r - - return h -} - -func (h *AnnotationHandler) Prefix() string { - return prefixAnnotations -} - -// tFromReq and tStringToPointer are used in handlers to extract time values from query parameters. -// pointers to time.Time structs are used, since the JSON responses may omit empty (nil pointer) times. -func tFromReq(r *http.Request) (*time.Time, *time.Time, error) { - st, err := tStringToPointer(r.URL.Query().Get("startTime")) - if err != nil { - return nil, nil, err - } - - et, err := tStringToPointer(r.URL.Query().Get("endTime")) - if err != nil { - return nil, nil, err - } - - return st, et, nil -} - -func tStringToPointer(s string) (*time.Time, error) { - if s == "" { - return nil, nil - } - - t, err := time.Parse(time.RFC3339, s) - if err != nil { - return nil, err - } - return &t, nil -} diff --git a/annotations/transport/streams_router.go b/annotations/transport/streams_router.go deleted file mode 100644 index 2ff90a533f2..00000000000 --- a/annotations/transport/streams_router.go +++ /dev/null @@ -1,206 +0,0 @@ -package transport - -import ( - "encoding/json" - "net/http" - "time" - - "github.com/go-chi/chi" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -func (h *AnnotationHandler) streamsRouter() http.Handler { - r := chi.NewRouter() - - r.Put("/", h.handleCreateOrUpdateStream) - r.Get("/", h.handleGetStreams) - r.Delete("/", h.handleDeleteStreams) - - r.Route("/{id}", func(r chi.Router) { - r.Delete("/", h.handleDeleteStream) - r.Put("/", h.handleUpdateStreamByID) - }) - - return r -} - -func (h *AnnotationHandler) handleCreateOrUpdateStream(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - o, err := platform.IDFromString(r.URL.Query().Get("orgID")) - if err != nil { - h.api.Err(w, r, errBadOrg) - return - } - - u, err := decodeCreateOrUpdateStreamRequest(r) - if err != nil { - h.api.Err(w, r, err) - return - } - - s, err := h.annotationService.CreateOrUpdateStream(ctx, *o, *u) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, s) -} - -func (h *AnnotationHandler) handleGetStreams(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - o, err := platform.IDFromString(r.URL.Query().Get("orgID")) - if err != nil { - h.api.Err(w, r, errBadOrg) - return - } - - f, err := decodeListStreamsRequest(r) - if err != nil { - h.api.Err(w, r, err) - return - } - - s, err := h.annotationService.ListStreams(ctx, *o, *f) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, storedStreamsToReadStreams(s)) -} - -// Delete stream(s) by name, capable of handling a list of names -func (h *AnnotationHandler) handleDeleteStreams(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - o, err := platform.IDFromString(r.URL.Query().Get("orgID")) - if err != nil { - h.api.Err(w, r, errBadOrg) - return - } - - f, err := decodeDeleteStreamsRequest(r) - if err != nil { - h.api.Err(w, r, err) - return - } - - // delete all of the streams according to the filter. annotations associated with the stream - // will be deleted by the ON DELETE CASCADE relationship between streams and annotations. - if err = h.annotationService.DeleteStreams(ctx, *o, *f); err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusNoContent, nil) -} - -// Delete a single stream by ID -func (h *AnnotationHandler) handleDeleteStream(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, errBadAnnotationId) - return - } - - // as in the handleDeleteStreams method above, deleting a stream will delete annotations - // associated with it due to the ON DELETE CASCADE relationship between the two - if err := h.annotationService.DeleteStreamByID(ctx, *id); err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusNoContent, nil) -} - -func (h *AnnotationHandler) handleUpdateStreamByID(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, errBadAnnotationId) - return - } - - u, err := decodeCreateOrUpdateStreamRequest(r) - if err != nil { - h.api.Err(w, r, err) - return - } - - s, err := h.annotationService.UpdateStream(ctx, *id, *u) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, s) -} - -func decodeCreateOrUpdateStreamRequest(r *http.Request) (*influxdb.Stream, error) { - s := influxdb.Stream{} - - if err := json.NewDecoder(r.Body).Decode(&s); err != nil { - return nil, err - } - - if err := s.Validate(false); err != nil { - return nil, err - } - - return &s, nil -} - -func decodeListStreamsRequest(r *http.Request) (*influxdb.StreamListFilter, error) { - startTime, endTime, err := tFromReq(r) - if err != nil { - return nil, err - } - - f := &influxdb.StreamListFilter{ - StreamIncludes: r.URL.Query()["streamIncludes"], - BasicFilter: influxdb.BasicFilter{ - EndTime: endTime, - StartTime: startTime, - }, - } - - if err := f.Validate(time.Now); err != nil { - return nil, err - } - return f, nil -} - -func decodeDeleteStreamsRequest(r *http.Request) (*influxdb.BasicStream, error) { - f := &influxdb.BasicStream{ - Names: r.URL.Query()["stream"], - } - - if !f.IsValid() { - return nil, errBadStreamName - } - - return f, nil -} - -func storedStreamsToReadStreams(stored []influxdb.StoredStream) []influxdb.ReadStream { - r := make([]influxdb.ReadStream, 0, len(stored)) - - for _, s := range stored { - r = append(r, influxdb.ReadStream{ - ID: s.ID, - Name: s.Name, - Description: s.Description, - CreatedAt: s.CreatedAt, - UpdatedAt: s.UpdatedAt, - }) - } - - return r -} diff --git a/annotations/transport/streams_router_test.go b/annotations/transport/streams_router_test.go deleted file mode 100644 index ece4806d483..00000000000 --- a/annotations/transport/streams_router_test.go +++ /dev/null @@ -1,198 +0,0 @@ -package transport - -import ( - "encoding/json" - "net/http" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/require" -) - -var ( - testCreateStream = influxdb.Stream{ - Name: "test stream", - } - - testReadStream1 = &influxdb.ReadStream{ - ID: *influxdbtesting.IDPtr(1), - Name: "test stream 1", - CreatedAt: now, - UpdatedAt: now, - } - - testReadStream2 = &influxdb.ReadStream{ - ID: *influxdbtesting.IDPtr(2), - Name: "test stream 2", - CreatedAt: now, - UpdatedAt: now, - } - - testStoredStream1 = influxdb.StoredStream{ - ID: testReadStream1.ID, - OrgID: *orgID, - Name: testReadStream1.Name, - Description: testReadStream1.Description, - CreatedAt: testReadStream1.CreatedAt, - UpdatedAt: testReadStream1.UpdatedAt, - } - - testStoredStream2 = influxdb.StoredStream{ - ID: testReadStream2.ID, - OrgID: *orgID, - Name: testReadStream2.Name, - Description: testReadStream2.Description, - CreatedAt: testReadStream2.CreatedAt, - UpdatedAt: testReadStream2.UpdatedAt, - } -) - -func TestStreamsRouter(t *testing.T) { - t.Parallel() - - t.Run("create or update stream happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "PUT", ts.URL+"/streams", testCreateStream) - - q := req.URL.Query() - q.Add("orgID", orgStr) - req.URL.RawQuery = q.Encode() - - svc.EXPECT(). - CreateOrUpdateStream(gomock.Any(), *orgID, testCreateStream). - Return(testReadStream1, nil) - - res := doTestRequest(t, req, http.StatusOK, true) - - got := &influxdb.ReadStream{} - err := json.NewDecoder(res.Body).Decode(got) - require.NoError(t, err) - require.Equal(t, testReadStream1, got) - }) - - t.Run("get streams happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "GET", ts.URL+"/streams", nil) - - q := req.URL.Query() - q.Add("orgID", orgStr) - q.Add("endTime", now.Format(time.RFC3339)) - q.Add("streamIncludes", "stream1") - q.Add("streamIncludes", "stream2") - req.URL.RawQuery = q.Encode() - - svc.EXPECT(). - ListStreams(gomock.Any(), *orgID, influxdb.StreamListFilter{ - StreamIncludes: []string{"stream1", "stream2"}, - BasicFilter: influxdb.BasicFilter{ - StartTime: &time.Time{}, - EndTime: &now, - }, - }). - Return([]influxdb.StoredStream{testStoredStream1, testStoredStream2}, nil) - - res := doTestRequest(t, req, http.StatusOK, true) - - got := []influxdb.ReadStream{} - err := json.NewDecoder(res.Body).Decode(&got) - require.NoError(t, err) - require.ElementsMatch(t, []influxdb.ReadStream{*testReadStream1, *testReadStream2}, got) - }) - - t.Run("delete streams (by name) happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "DELETE", ts.URL+"/streams", nil) - q := req.URL.Query() - q.Add("orgID", orgStr) - q.Add("stream", "stream1") - q.Add("stream", "stream2") - req.URL.RawQuery = q.Encode() - - svc.EXPECT(). - DeleteStreams(gomock.Any(), *orgID, influxdb.BasicStream{ - Names: []string{"stream1", "stream2"}, - }). - Return(nil) - - doTestRequest(t, req, http.StatusNoContent, false) - }) - - t.Run("delete stream happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "DELETE", ts.URL+"/streams/"+idStr, nil) - - svc.EXPECT(). - DeleteStreamByID(gomock.Any(), *id). - Return(nil) - - doTestRequest(t, req, http.StatusNoContent, false) - }) - - t.Run("update stream by id happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "PUT", ts.URL+"/streams/"+idStr, testCreateStream) - - svc.EXPECT(). - UpdateStream(gomock.Any(), *id, testCreateStream). - Return(testReadStream1, nil) - - res := doTestRequest(t, req, http.StatusOK, true) - - got := &influxdb.ReadStream{} - err := json.NewDecoder(res.Body).Decode(got) - require.NoError(t, err) - require.Equal(t, testReadStream1, got) - }) - - t.Run("invalid org ids return 400 when required", func(t *testing.T) { - methods := []string{"GET", "PUT", "DELETE"} - - for _, m := range methods { - t.Run(m, func(t *testing.T) { - ts, _ := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, m, ts.URL+"/streams", nil) - q := req.URL.Query() - q.Add("orgID", "badid") - req.URL.RawQuery = q.Encode() - - doTestRequest(t, req, http.StatusBadRequest, false) - }) - } - }) - - t.Run("invalid stream ids return 400 when required", func(t *testing.T) { - methods := []string{"DELETE", "PUT"} - - for _, m := range methods { - t.Run(m, func(t *testing.T) { - ts, _ := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, m, ts.URL+"/streams/badID", nil) - doTestRequest(t, req, http.StatusBadRequest, false) - }) - } - }) -} - -func TestStoredStreamsToReadStreams(t *testing.T) { - t.Parallel() - - got := storedStreamsToReadStreams([]influxdb.StoredStream{testStoredStream1, testStoredStream2}) - require.Equal(t, got, []influxdb.ReadStream{*testReadStream1, *testReadStream2}) -} diff --git a/api-compat.Jenkinsfile b/api-compat.Jenkinsfile deleted file mode 100644 index 09a37ae5b69..00000000000 --- a/api-compat.Jenkinsfile +++ /dev/null @@ -1,8 +0,0 @@ -properties([disableConcurrentBuilds()]) - -node("dind-1-12") { - container('dind') { - // This method is provided by the private api-compatibility library. - compat.test_build() - } -} diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 5b400bbd123..00000000000 --- a/appveyor.yml +++ /dev/null @@ -1,33 +0,0 @@ -version: 0.{build} -pull_requests: - do_not_increment_build_number: true - -# After a PR is opened, don't run separate appveyor builds for the branch - only builds for the PR. -skip_branch_with_pr: true - -os: Windows Server 2012 R2 - -# Custom clone folder (variables are not expanded here). -clone_folder: c:\influxdb - -# Environment variables -environment: - GOROOT: C:\go111 - GOFLAGS: "-mod=readonly" - -# Scripts that run after cloning repository -install: -# - choco install bzr # Needed to install go modules - - set PATH=%GOROOT%\bin;%GOPATH%\bin;C:\Program Files (x86)\Bazaar\;%PATH% - - echo %PATH% - - echo %GOPATH% - - cd C:\influxdb - - go version - - go env - -build: false -deploy: false - -test_script: - - echo "Appveyor needs to be re-enabled. See https://github.com/influxdata/influxdb/issues/10937" - # - go test -timeout 15m -v ./... diff --git a/assets/influxdb-logo.png b/assets/influxdb-logo.png deleted file mode 100644 index d831030273f..00000000000 Binary files a/assets/influxdb-logo.png and /dev/null differ diff --git a/assets/influxdbU-banner.png b/assets/influxdbU-banner.png deleted file mode 100644 index 301c77c226b..00000000000 Binary files a/assets/influxdbU-banner.png and /dev/null differ diff --git a/auth.go b/auth.go deleted file mode 100644 index e5910b266f9..00000000000 --- a/auth.go +++ /dev/null @@ -1,127 +0,0 @@ -package influxdb - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// AuthorizationKind is returned by (*Authorization).Kind(). -const AuthorizationKind = "authorization" - -// ErrUnableToCreateToken sanitized error message for all errors when a user cannot create a token -var ErrUnableToCreateToken = &errors.Error{ - Msg: "unable to create token", - Code: errors.EInvalid, -} - -// Authorization is an authorization. 🎉 -type Authorization struct { - ID platform.ID `json:"id"` - Token string `json:"token"` - Status Status `json:"status"` - Description string `json:"description"` - OrgID platform.ID `json:"orgID"` - UserID platform.ID `json:"userID,omitempty"` - Permissions []Permission `json:"permissions"` - CRUDLog -} - -// AuthorizationUpdate is the authorization update request. -type AuthorizationUpdate struct { - Status *Status `json:"status,omitempty"` - Description *string `json:"description,omitempty"` -} - -// Valid ensures that the authorization is valid. -func (a *Authorization) Valid() error { - for _, p := range a.Permissions { - if p.Resource.OrgID != nil && *p.Resource.OrgID != a.OrgID { - return &errors.Error{ - Msg: fmt.Sprintf("permission %s is not for org id %s", p, a.OrgID), - Code: errors.EInvalid, - } - } - } - - return nil -} - -// PermissionSet returns the set of permissions associated with the Authorization. -func (a *Authorization) PermissionSet() (PermissionSet, error) { - if !a.IsActive() { - return nil, &errors.Error{ - Code: errors.EUnauthorized, - Msg: "token is inactive", - } - } - - return a.Permissions, nil -} - -// IsActive is a stub for idpe. -func IsActive(a *Authorization) bool { - return a.IsActive() -} - -// IsActive returns true if the authorization active. -func (a *Authorization) IsActive() bool { - return a.Status == Active -} - -// GetUserID returns the user id. -func (a *Authorization) GetUserID() platform.ID { - return a.UserID -} - -// Kind returns session and is used for auditing. -func (a *Authorization) Kind() string { return AuthorizationKind } - -// Identifier returns the authorizations ID and is used for auditing. -func (a *Authorization) Identifier() platform.ID { return a.ID } - -// auth service op -const ( - OpFindAuthorizationByID = "FindAuthorizationByID" - OpFindAuthorizationByToken = "FindAuthorizationByToken" - OpFindAuthorizations = "FindAuthorizations" - OpCreateAuthorization = "CreateAuthorization" - OpUpdateAuthorization = "UpdateAuthorization" - OpDeleteAuthorization = "DeleteAuthorization" -) - -// AuthorizationService represents a service for managing authorization data. -type AuthorizationService interface { - // Returns a single authorization by ID. - FindAuthorizationByID(ctx context.Context, id platform.ID) (*Authorization, error) - - // Returns a single authorization by Token. - FindAuthorizationByToken(ctx context.Context, t string) (*Authorization, error) - - // Returns a list of authorizations that match filter and the total count of matching authorizations. - // Additional options provide pagination & sorting. - FindAuthorizations(ctx context.Context, filter AuthorizationFilter, opt ...FindOptions) ([]*Authorization, int, error) - - // Creates a new authorization and sets a.Token and a.UserID with the new identifier. - CreateAuthorization(ctx context.Context, a *Authorization) error - - // UpdateAuthorization updates the status and description if available. - UpdateAuthorization(ctx context.Context, id platform.ID, upd *AuthorizationUpdate) (*Authorization, error) - - // Removes a authorization by token. - DeleteAuthorization(ctx context.Context, id platform.ID) error -} - -// AuthorizationFilter represents a set of filter that restrict the returned results. -type AuthorizationFilter struct { - Token *string - ID *platform.ID - - UserID *platform.ID - User *string - - OrgID *platform.ID - Org *string -} diff --git a/authorization/error.go b/authorization/error.go deleted file mode 100644 index 8247169e03b..00000000000 --- a/authorization/error.go +++ /dev/null @@ -1,66 +0,0 @@ -package authorization - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - // ErrInvalidAuthID is used when the Authorization's ID cannot be encoded - ErrInvalidAuthID = &errors.Error{ - Code: errors.EInvalid, - Msg: "authorization ID is invalid", - } - - // ErrAuthNotFound is used when the specified auth cannot be found - ErrAuthNotFound = &errors.Error{ - Code: errors.ENotFound, - Msg: "authorization not found", - } - - // NotUniqueIDError occurs when attempting to create an Authorization with an ID that already belongs to another one - NotUniqueIDError = &errors.Error{ - Code: errors.EConflict, - Msg: "ID already exists", - } - - // ErrFailureGeneratingID occurs ony when the random number generator - // cannot generate an ID in MaxIDGenerationN times. - ErrFailureGeneratingID = &errors.Error{ - Code: errors.EInternal, - Msg: "unable to generate valid id", - } - - // ErrTokenAlreadyExistsError is used when attempting to create an authorization - // with a token that already exists - ErrTokenAlreadyExistsError = &errors.Error{ - Code: errors.EConflict, - Msg: "token already exists", - } -) - -// ErrInvalidAuthIDError is used when a service was provided an invalid ID. -func ErrInvalidAuthIDError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "auth id provided is invalid", - Err: err, - } -} - -// ErrInternalServiceError is used when the error comes from an internal system. -func ErrInternalServiceError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Err: err, - } -} - -// UnexpectedAuthIndexError is used when the error comes from an internal system. -func UnexpectedAuthIndexError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("unexpected error retrieving auth index; Err: %v", err), - } -} diff --git a/authorization/http_client.go b/authorization/http_client.go deleted file mode 100644 index d51d8366ef2..00000000000 --- a/authorization/http_client.go +++ /dev/null @@ -1,107 +0,0 @@ -package authorization - -import ( - "context" - "errors" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/pkg/httpc" -) - -var _ influxdb.AuthorizationService = (*AuthorizationClientService)(nil) - -// AuthorizationClientService connects to Influx via HTTP using tokens to manage authorizations -type AuthorizationClientService struct { - Client *httpc.Client -} - -// CreateAuthorization creates a new authorization and sets b.ID with the new identifier. -func (s *AuthorizationClientService) CreateAuthorization(ctx context.Context, a *influxdb.Authorization) error { - newAuth, err := newPostAuthorizationRequest(a) - if err != nil { - return err - } - - return s.Client. - PostJSON(newAuth, prefixAuthorization). - DecodeJSON(a). - Do(ctx) -} - -// FindAuthorizations returns a list of authorizations that match filter and the total count of matching authorizations. -// Additional options provide pagination & sorting. -func (s *AuthorizationClientService) FindAuthorizations(ctx context.Context, filter influxdb.AuthorizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - params := influxdb.FindOptionParams(opt...) - if filter.ID != nil { - params = append(params, [2]string{"id", filter.ID.String()}) - } - if filter.UserID != nil { - params = append(params, [2]string{"userID", filter.UserID.String()}) - } - if filter.User != nil { - params = append(params, [2]string{"user", *filter.User}) - } - if filter.OrgID != nil { - params = append(params, [2]string{"orgID", filter.OrgID.String()}) - } - if filter.Org != nil { - params = append(params, [2]string{"org", *filter.Org}) - } - - var as authsResponse - err := s.Client. - Get(prefixAuthorization). - QueryParams(params...). - DecodeJSON(&as). - Do(ctx) - if err != nil { - return nil, 0, err - } - - auths := make([]*influxdb.Authorization, 0, len(as.Auths)) - for _, a := range as.Auths { - auths = append(auths, a.toInfluxdb()) - } - - return auths, len(auths), nil -} - -// FindAuthorizationByToken is not supported by the HTTP authorization service. -func (s *AuthorizationClientService) FindAuthorizationByToken(ctx context.Context, token string) (*influxdb.Authorization, error) { - return nil, errors.New("not supported in HTTP authorization service") -} - -// FindAuthorizationByID finds a single Authorization by its ID against a remote influx server. -func (s *AuthorizationClientService) FindAuthorizationByID(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { - var b influxdb.Authorization - err := s.Client. - Get(prefixAuthorization, id.String()). - DecodeJSON(&b). - Do(ctx) - if err != nil { - return nil, err - } - return &b, nil -} - -// UpdateAuthorization updates the status and description if available. -func (s *AuthorizationClientService) UpdateAuthorization(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { - var res authResponse - err := s.Client. - PatchJSON(upd, prefixAuthorization, id.String()). - DecodeJSON(&res). - Do(ctx) - if err != nil { - return nil, err - } - - return res.toInfluxdb(), nil -} - -// DeleteAuthorization removes a authorization by id. -func (s *AuthorizationClientService) DeleteAuthorization(ctx context.Context, id platform.ID) error { - return s.Client. - Delete(prefixAuthorization, id.String()). - Do(ctx) -} diff --git a/authorization/http_server.go b/authorization/http_server.go deleted file mode 100644 index b27b68f355c..00000000000 --- a/authorization/http_server.go +++ /dev/null @@ -1,603 +0,0 @@ -package authorization - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "time" - - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" - "github.com/influxdata/influxdb/v2" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "go.uber.org/zap" -) - -// TenantService is used to look up the Organization and User for an Authorization -type TenantService interface { - FindOrganizationByID(ctx context.Context, id platform.ID) (*influxdb.Organization, error) - FindOrganization(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) - FindUserByID(ctx context.Context, id platform.ID) (*influxdb.User, error) - FindUser(ctx context.Context, filter influxdb.UserFilter) (*influxdb.User, error) - FindBucketByID(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) -} - -type AuthHandler struct { - chi.Router - api *kithttp.API - log *zap.Logger - authSvc influxdb.AuthorizationService - tenantService TenantService -} - -// NewHTTPAuthHandler constructs a new http server. -func NewHTTPAuthHandler(log *zap.Logger, authService influxdb.AuthorizationService, tenantService TenantService) *AuthHandler { - h := &AuthHandler{ - api: kithttp.NewAPI(kithttp.WithLog(log)), - log: log, - authSvc: authService, - tenantService: tenantService, - } - - r := chi.NewRouter() - r.Use( - middleware.Recoverer, - middleware.RequestID, - middleware.RealIP, - ) - - r.Route("/", func(r chi.Router) { - r.Post("/", h.handlePostAuthorization) - r.Get("/", h.handleGetAuthorizations) - - r.Route("/{id}", func(r chi.Router) { - r.Get("/", h.handleGetAuthorization) - r.Patch("/", h.handleUpdateAuthorization) - r.Delete("/", h.handleDeleteAuthorization) - }) - }) - - h.Router = r - return h -} - -const prefixAuthorization = "/api/v2/authorizations" - -func (h *AuthHandler) Prefix() string { - return prefixAuthorization -} - -// handlePostAuthorization is the HTTP handler for the POST /api/v2/authorizations route. -func (h *AuthHandler) handlePostAuthorization(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - a, err := decodePostAuthorizationRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - - user, err := getAuthorizedUser(r, h.tenantService) - if err != nil { - h.api.Err(w, r, influxdb.ErrUnableToCreateToken) - return - } - - userID := user.ID - if a.UserID != nil && a.UserID.Valid() { - userID = *a.UserID - } - - auth := a.toInfluxdb(userID) - - if err := h.authSvc.CreateAuthorization(ctx, auth); err != nil { - h.api.Err(w, r, err) - return - } - - perms, err := h.newPermissionsResponse(ctx, auth.Permissions) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.log.Debug("Auth created ", zap.String("auth", fmt.Sprint(auth))) - - resp, err := h.newAuthResponse(ctx, auth, perms) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusCreated, resp) -} - -func getAuthorizedUser(r *http.Request, ts TenantService) (*influxdb.User, error) { - ctx := r.Context() - - a, err := icontext.GetAuthorizer(ctx) - if err != nil { - return nil, err - } - - return ts.FindUserByID(ctx, a.GetUserID()) -} - -type postAuthorizationRequest struct { - Status influxdb.Status `json:"status"` - OrgID platform.ID `json:"orgID"` - UserID *platform.ID `json:"userID,omitempty"` - Description string `json:"description"` - Permissions []influxdb.Permission `json:"permissions"` -} - -type authResponse struct { - ID platform.ID `json:"id"` - Token string `json:"token"` - Status influxdb.Status `json:"status"` - Description string `json:"description"` - OrgID platform.ID `json:"orgID"` - Org string `json:"org"` - UserID platform.ID `json:"userID"` - User string `json:"user"` - Permissions []permissionResponse `json:"permissions"` - Links map[string]string `json:"links"` - CreatedAt time.Time `json:"createdAt"` - UpdatedAt time.Time `json:"updatedAt"` -} - -// In the future, we would like only the service layer to look up the user and org to see if they are valid -// but for now we need to look up the User and Org here because the API expects the response -// to have the names of the Org and User -func (h *AuthHandler) newAuthResponse(ctx context.Context, a *influxdb.Authorization, ps []permissionResponse) (*authResponse, error) { - org, err := h.tenantService.FindOrganizationByID(ctx, a.OrgID) - if err != nil { - h.log.Info("Failed to get org", zap.String("handler", "getAuthorizations"), zap.String("orgID", a.OrgID.String()), zap.Error(err)) - return nil, err - } - user, err := h.tenantService.FindUserByID(ctx, a.UserID) - if err != nil { - h.log.Info("Failed to get user", zap.String("userID", a.UserID.String()), zap.Error(err)) - return nil, err - } - res := &authResponse{ - ID: a.ID, - Token: a.Token, - Status: a.Status, - Description: a.Description, - OrgID: a.OrgID, - UserID: a.UserID, - User: user.Name, - Org: org.Name, - Permissions: ps, - Links: map[string]string{ - "self": fmt.Sprintf("/api/v2/authorizations/%s", a.ID), - "user": fmt.Sprintf("/api/v2/users/%s", a.UserID), - }, - CreatedAt: a.CreatedAt, - UpdatedAt: a.UpdatedAt, - } - return res, nil -} - -func (p *postAuthorizationRequest) toInfluxdb(userID platform.ID) *influxdb.Authorization { - return &influxdb.Authorization{ - OrgID: p.OrgID, - Status: p.Status, - Description: p.Description, - Permissions: p.Permissions, - UserID: userID, - } -} - -func (a *authResponse) toInfluxdb() *influxdb.Authorization { - res := &influxdb.Authorization{ - ID: a.ID, - Token: a.Token, - Status: a.Status, - Description: a.Description, - OrgID: a.OrgID, - UserID: a.UserID, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: a.CreatedAt, - UpdatedAt: a.UpdatedAt, - }, - } - for _, p := range a.Permissions { - res.Permissions = append(res.Permissions, influxdb.Permission{Action: p.Action, Resource: p.Resource.Resource}) - } - return res -} - -type authsResponse struct { - Links map[string]string `json:"links"` - Auths []*authResponse `json:"authorizations"` -} - -func newAuthsResponse(as []*authResponse) *authsResponse { - return &authsResponse{ - // TODO(desa): update links to include paging and filter information - Links: map[string]string{ - "self": "/api/v2/authorizations", - }, - Auths: as, - } -} - -func newPostAuthorizationRequest(a *influxdb.Authorization) (*postAuthorizationRequest, error) { - res := &postAuthorizationRequest{ - OrgID: a.OrgID, - Description: a.Description, - Permissions: a.Permissions, - Status: a.Status, - } - - if a.UserID.Valid() { - res.UserID = &a.UserID - } - - res.SetDefaults() - - return res, res.Validate() -} - -func (p *postAuthorizationRequest) SetDefaults() { - if p.Status == "" { - p.Status = influxdb.Active - } -} - -func (p *postAuthorizationRequest) Validate() error { - if len(p.Permissions) == 0 { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "authorization must include permissions", - } - } - - for _, perm := range p.Permissions { - if err := perm.Valid(); err != nil { - return &errors.Error{ - Err: err, - } - } - } - - if !p.OrgID.Valid() { - return &errors.Error{ - Err: platform.ErrInvalidID, - Code: errors.EInvalid, - Msg: "org id required", - } - } - - if p.Status == "" { - p.Status = influxdb.Active - } - - err := p.Status.Valid() - if err != nil { - return err - } - - return nil -} - -type permissionResponse struct { - Action influxdb.Action `json:"action"` - Resource resourceResponse `json:"resource"` -} - -type resourceResponse struct { - influxdb.Resource - Name string `json:"name,omitempty"` - Organization string `json:"org,omitempty"` -} - -func (h *AuthHandler) newPermissionsResponse(ctx context.Context, ps []influxdb.Permission) ([]permissionResponse, error) { - res := make([]permissionResponse, len(ps)) - for i, p := range ps { - res[i] = permissionResponse{ - Action: p.Action, - Resource: resourceResponse{ - Resource: p.Resource, - }, - } - - if p.Resource.ID != nil { - name, err := h.getNameForResource(ctx, p.Resource.Type, *p.Resource.ID) - if errors.ErrorCode(err) == errors.ENotFound { - continue - } - if err != nil { - return nil, err - } - res[i].Resource.Name = name - } - - if p.Resource.OrgID != nil { - name, err := h.getNameForResource(ctx, influxdb.OrgsResourceType, *p.Resource.OrgID) - if errors.ErrorCode(err) == errors.ENotFound { - continue - } - if err != nil { - return nil, err - } - res[i].Resource.Organization = name - } - } - return res, nil -} - -func (h *AuthHandler) getNameForResource(ctx context.Context, resource influxdb.ResourceType, id platform.ID) (string, error) { - if err := resource.Valid(); err != nil { - return "", err - } - - if ok := id.Valid(); !ok { - return "", platform.ErrInvalidID - } - - switch resource { - case influxdb.BucketsResourceType: - r, err := h.tenantService.FindBucketByID(ctx, id) - if err != nil { - return "", err - } - return r.Name, nil - case influxdb.OrgsResourceType: - r, err := h.tenantService.FindOrganizationByID(ctx, id) - if err != nil { - return "", err - } - return r.Name, nil - case influxdb.UsersResourceType: - r, err := h.tenantService.FindUserByID(ctx, id) - if err != nil { - return "", err - } - return r.Name, nil - } - - return "", nil -} - -func decodePostAuthorizationRequest(ctx context.Context, r *http.Request) (*postAuthorizationRequest, error) { - a := &postAuthorizationRequest{} - if err := json.NewDecoder(r.Body).Decode(a); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid json structure", - Err: err, - } - } - - a.SetDefaults() - - return a, a.Validate() -} - -// handleGetAuthorizations is the HTTP handler for the GET /api/v2/authorizations route. -func (h *AuthHandler) handleGetAuthorizations(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeGetAuthorizationsRequest(ctx, r) - if err != nil { - h.log.Info("Failed to decode request", zap.String("handler", "getAuthorizations"), zap.Error(err)) - h.api.Err(w, r, err) - return - } - - f := req.filter - // Look up user ID and org ID if they were not provided, but names were - if f.UserID == nil && f.User != nil { - u, err := h.tenantService.FindUser(ctx, influxdb.UserFilter{Name: f.User}) - if err != nil { - h.api.Err(w, r, err) - return - } - f.UserID = &u.ID - } - - if f.OrgID == nil && f.Org != nil { - o, err := h.tenantService.FindOrganization(ctx, influxdb.OrganizationFilter{Name: f.Org}) - if err != nil { - h.api.Err(w, r, err) - return - } - f.OrgID = &o.ID - } - - opts := influxdb.FindOptions{} - as, _, err := h.authSvc.FindAuthorizations(ctx, f, opts) - - if err != nil { - h.api.Err(w, r, err) - return - } - - auths := make([]*authResponse, 0, len(as)) - for _, a := range as { - ps, err := h.newPermissionsResponse(ctx, a.Permissions) - if err != nil { - h.api.Err(w, r, err) - return - } - - resp, err := h.newAuthResponse(ctx, a, ps) - if err != nil { - h.log.Info("Failed to create auth response", zap.String("handler", "getAuthorizations")) - continue - } - auths = append(auths, resp) - } - - h.log.Debug("Auths retrieved ", zap.String("auths", fmt.Sprint(auths))) - - h.api.Respond(w, r, http.StatusOK, newAuthsResponse(auths)) -} - -type getAuthorizationsRequest struct { - filter influxdb.AuthorizationFilter -} - -func decodeGetAuthorizationsRequest(ctx context.Context, r *http.Request) (*getAuthorizationsRequest, error) { - qp := r.URL.Query() - - req := &getAuthorizationsRequest{} - - userID := qp.Get("userID") - if userID != "" { - id, err := platform.IDFromString(userID) - if err != nil { - return nil, err - } - req.filter.UserID = id - } - - user := qp.Get("user") - if user != "" { - req.filter.User = &user - } - - orgID := qp.Get("orgID") - if orgID != "" { - id, err := platform.IDFromString(orgID) - if err != nil { - return nil, err - } - req.filter.OrgID = id - } - - org := qp.Get("org") - if org != "" { - req.filter.Org = &org - } - - authID := qp.Get("id") - if authID != "" { - id, err := platform.IDFromString(authID) - if err != nil { - return nil, err - } - req.filter.ID = id - } - - return req, nil -} - -func (h *AuthHandler) handleGetAuthorization(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.log.Info("Failed to decode request", zap.String("handler", "getAuthorization"), zap.Error(err)) - h.api.Err(w, r, err) - return - } - - a, err := h.authSvc.FindAuthorizationByID(ctx, *id) - if err != nil { - // Don't log here, it should already be handled by the service - h.api.Err(w, r, err) - return - } - - ps, err := h.newPermissionsResponse(ctx, a.Permissions) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.log.Debug("Auth retrieved ", zap.String("auth", fmt.Sprint(a))) - - resp, err := h.newAuthResponse(ctx, a, ps) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, resp) -} - -// handleUpdateAuthorization is the HTTP handler for the PATCH /api/v2/authorizations/:id route that updates the authorization's status and desc. -func (h *AuthHandler) handleUpdateAuthorization(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeUpdateAuthorizationRequest(ctx, r) - if err != nil { - h.log.Info("Failed to decode request", zap.String("handler", "updateAuthorization"), zap.Error(err)) - h.api.Err(w, r, err) - return - } - - a, err := h.authSvc.FindAuthorizationByID(ctx, req.ID) - if err != nil { - h.api.Err(w, r, err) - return - } - - a, err = h.authSvc.UpdateAuthorization(ctx, a.ID, req.AuthorizationUpdate) - if err != nil { - h.api.Err(w, r, err) - return - } - - ps, err := h.newPermissionsResponse(ctx, a.Permissions) - if err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Auth updated", zap.String("auth", fmt.Sprint(a))) - - resp, err := h.newAuthResponse(ctx, a, ps) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, resp) -} - -type updateAuthorizationRequest struct { - ID platform.ID - *influxdb.AuthorizationUpdate -} - -func decodeUpdateAuthorizationRequest(ctx context.Context, r *http.Request) (*updateAuthorizationRequest, error) { - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - return nil, err - } - - upd := &influxdb.AuthorizationUpdate{} - if err := json.NewDecoder(r.Body).Decode(upd); err != nil { - return nil, err - } - - return &updateAuthorizationRequest{ - ID: *id, - AuthorizationUpdate: upd, - }, nil -} - -// handleDeleteAuthorization is the HTTP handler for the DELETE /api/v2/authorizations/:id route. -func (h *AuthHandler) handleDeleteAuthorization(w http.ResponseWriter, r *http.Request) { - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.log.Info("Failed to decode request", zap.String("handler", "deleteAuthorization"), zap.Error(err)) - h.api.Err(w, r, err) - return - } - - if err := h.authSvc.DeleteAuthorization(r.Context(), *id); err != nil { - // Don't log here, it should already be handled by the service - h.api.Err(w, r, err) - return - } - - h.log.Debug("Auth deleted", zap.String("authID", fmt.Sprint(id))) - - w.WriteHeader(http.StatusNoContent) -} diff --git a/authorization/http_server_test.go b/authorization/http_server_test.go deleted file mode 100644 index c2cf01fe741..00000000000 --- a/authorization/http_server_test.go +++ /dev/null @@ -1,914 +0,0 @@ -package authorization - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "sort" - "testing" - - "github.com/go-chi/chi" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - itesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func TestService_handlePostAuthorization(t *testing.T) { - type fields struct { - AuthorizationService influxdb.AuthorizationService - TenantService TenantService - } - type args struct { - session *influxdb.Authorization - authorization *influxdb.Authorization - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "create a new authorization", - fields: fields{ - AuthorizationService: &mock.AuthorizationService{ - CreateAuthorizationFn: func(ctx context.Context, c *influxdb.Authorization) error { - c.ID = itesting.MustIDBase16("020f755c3c082000") - return nil - }, - }, - TenantService: &tenantService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ - ID: id, - Name: "u1", - }, nil - }, - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: id, - Name: "o1", - }, nil - }, - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: id, - Name: "b1", - }, nil - }, - }, - }, - args: args{ - session: &influxdb.Authorization{ - Token: "session-token", - ID: itesting.MustIDBase16("020f755c3c082000"), - UserID: itesting.MustIDBase16("aaaaaaaaaaaaaaaa"), - OrgID: itesting.MustIDBase16("020f755c3c083000"), - Description: "can write to authorization resource", - Permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: itesting.IDPtr(itesting.MustIDBase16("020f755c3c083000")), - }, - }, - }, - }, - authorization: &influxdb.Authorization{ - ID: itesting.MustIDBase16("020f755c3c082000"), - OrgID: itesting.MustIDBase16("020f755c3c083000"), - Description: "only read dashboards sucka", - Permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - OrgID: itesting.IDPtr(itesting.MustIDBase16("020f755c3c083000")), - }, - }, - }, - }, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` -{ - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z", - "description": "only read dashboards sucka", - "id": "020f755c3c082000", - "links": { - "self": "/api/v2/authorizations/020f755c3c082000", - "user": "/api/v2/users/aaaaaaaaaaaaaaaa" - }, - "org": "o1", - "orgID": "020f755c3c083000", - "permissions": [ - { - "action": "read", - "resource": { - "type": "dashboards", - "orgID": "020f755c3c083000", - "org": "o1" - } - } - ], - "status": "active", - "token": "new-test-token", - "user": "u1", - "userID": "aaaaaaaaaaaaaaaa" -} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Helper() - - s := itesting.NewTestInmemStore(t) - storage, err := NewStore(s) - if err != nil { - t.Fatal(err) - } - - svc := NewService(storage, tt.fields.TenantService) - - handler := NewHTTPAuthHandler(zaptest.NewLogger(t), svc, tt.fields.TenantService) - router := chi.NewRouter() - router.Mount(handler.Prefix(), handler) - - req, err := newPostAuthorizationRequest(tt.args.authorization) - if err != nil { - t.Fatalf("failed to create new authorization request: %v", err) - } - b, err := json.Marshal(req) - if err != nil { - t.Fatalf("failed to unmarshal authorization: %v", err) - } - - r := httptest.NewRequest("GET", "http://any.url", bytes.NewReader(b)) - r = r.WithContext(context.WithValue( - context.Background(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "userID", - Value: fmt.Sprintf("%d", tt.args.session.UserID), - }, - })) - - w := httptest.NewRecorder() - - ctx := icontext.SetAuthorizer(context.Background(), tt.args.session) - r = r.WithContext(ctx) - - handler.handlePostAuthorization(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Logf("headers: %v body: %s", res.Header, body) - t.Errorf("%q. handlePostAuthorization() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePostAuthorization() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if diff, err := jsonDiff(string(body), tt.wants.body); diff != "" { - t.Errorf("%q. handlePostAuthorization() = ***%s***", tt.name, diff) - } else if err != nil { - t.Errorf("%q, handlePostAuthorization() error: %v", tt.name, err) - } - }) - } -} - -func TestService_handleGetAuthorization(t *testing.T) { - type fields struct { - AuthorizationService influxdb.AuthorizationService - TenantService TenantService - } - type args struct { - id string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get a authorization by id", - fields: fields{ - AuthorizationService: &mock.AuthorizationService{ - FindAuthorizationByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { - if id == itesting.MustIDBase16("020f755c3c082000") { - return &influxdb.Authorization{ - ID: itesting.MustIDBase16("020f755c3c082000"), - UserID: itesting.MustIDBase16("020f755c3c082000"), - OrgID: itesting.MustIDBase16("020f755c3c083000"), - Permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: itesting.IDPtr(itesting.MustIDBase16("020f755c3c083000")), - ID: func() *platform.ID { - id := itesting.MustIDBase16("020f755c3c084000") - return &id - }(), - }, - }, - }, - Token: "hello", - }, nil - } - - return nil, fmt.Errorf("not found") - }, - }, - TenantService: &tenantService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ - ID: id, - Name: "u1", - }, nil - }, - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: id, - Name: "o1", - }, nil - }, - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: id, - Name: "b1", - }, nil - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z", - "description": "", - "id": "020f755c3c082000", - "links": { - "self": "/api/v2/authorizations/020f755c3c082000", - "user": "/api/v2/users/020f755c3c082000" - }, - "org": "o1", - "orgID": "020f755c3c083000", - "permissions": [ - { - "action": "read", - "resource": { - "type": "buckets", - "orgID": "020f755c3c083000", - "id": "020f755c3c084000", - "name": "b1", - "org": "o1" - } - } - ], - "status": "", - "token": "hello", - "user": "u1", - "userID": "020f755c3c082000" -} -`, - }, - }, - { - name: "not found", - fields: fields{ - AuthorizationService: &mock.AuthorizationService{ - FindAuthorizationByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: "authorization not found", - } - }, - }, - TenantService: &tenantService{}, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNotFound, - body: `{"code":"not found","message":"authorization not found"}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Helper() - - handler := NewHTTPAuthHandler(zaptest.NewLogger(t), tt.fields.AuthorizationService, tt.fields.TenantService) - router := chi.NewRouter() - router.Mount(handler.Prefix(), handler) - - w := httptest.NewRecorder() - - r := httptest.NewRequest("GET", "http://any.url", nil) - rctx := chi.NewRouteContext() - rctx.URLParams.Add("id", tt.args.id) - r = r.WithContext(context.WithValue(r.Context(), chi.RouteCtxKey, rctx)) - - handler.handleGetAuthorization(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Logf("headers: %v body: %s", res.Header, body) - t.Errorf("%q. handleGetAuthorization() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetAuthorization() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if diff, err := jsonDiff(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleGetAuthorization. error unmarshalling json %v", tt.name, err) - } else if tt.wants.body != "" && diff != "" { - t.Errorf("%q. handleGetAuthorization() = -got/+want %s**", tt.name, diff) - } - }) - } -} - -func TestGetAuthorizationsWithNames(t *testing.T) { - t.Parallel() - - testUserName := "user" - testUserID := itesting.MustIDBase16("6c7574652c206f6e") - testOrgName := "org" - testOrgID := itesting.MustIDBase16("9d70616e656d2076") - - ts := &tenantService{ - FindUserFn: func(ctx context.Context, f influxdb.UserFilter) (*influxdb.User, error) { - require.Equal(t, &testUserName, f.Name) - - return &influxdb.User{ - ID: testUserID, - Name: testUserName, - }, nil - }, - - FindOrganizationF: func(ctx context.Context, f influxdb.OrganizationFilter) (*influxdb.Organization, error) { - require.Equal(t, &testOrgName, f.Name) - - return &influxdb.Organization{ - ID: testOrgID, - Name: testOrgName, - }, nil - }, - } - - as := &mock.AuthorizationService{ - FindAuthorizationsFn: func(ctx context.Context, f influxdb.AuthorizationFilter, opts ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - require.Equal(t, &testOrgID, f.OrgID) - require.Equal(t, &testUserID, f.UserID) - - return []*influxdb.Authorization{}, 0, nil - }, - } - - h := NewHTTPAuthHandler(zaptest.NewLogger(t), as, ts) - - w := httptest.NewRecorder() - r := httptest.NewRequest("get", "http://any.url", nil) - qp := r.URL.Query() - qp.Add("user", testUserName) - qp.Add("org", testOrgName) - r.URL.RawQuery = qp.Encode() - - h.handleGetAuthorizations(w, r) -} - -func TestService_handleGetAuthorizations(t *testing.T) { - type fields struct { - AuthorizationService influxdb.AuthorizationService - TenantService TenantService - } - - type args struct { - queryParams map[string][]string - } - - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get all authorizations", - fields: fields{ - &mock.AuthorizationService{ - FindAuthorizationsFn: func(ctx context.Context, filter influxdb.AuthorizationFilter, opts ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - return []*influxdb.Authorization{ - { - ID: itesting.MustIDBase16("0d0a657820696e74"), - Token: "hello", - UserID: itesting.MustIDBase16("2070616e656d2076"), - OrgID: itesting.MustIDBase16("3070616e656d2076"), - Description: "t1", - Permissions: influxdb.OperPermissions(), - }, - { - ID: itesting.MustIDBase16("6669646573207375"), - Token: "example", - UserID: itesting.MustIDBase16("6c7574652c206f6e"), - OrgID: itesting.MustIDBase16("9d70616e656d2076"), - Description: "t2", - Permissions: influxdb.OperPermissions(), - }, - }, 2, nil - }, - }, - &tenantService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ - ID: id, - Name: id.String(), - }, nil - }, - - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: id, - Name: id.String(), - }, nil - }, - }, - }, - args: args{}, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: fmt.Sprintf(` -{ - "links": { - "self": "/api/v2/authorizations" - }, - "authorizations": [ - { - "links": { - "user": "/api/v2/users/2070616e656d2076", - "self": "/api/v2/authorizations/0d0a657820696e74" - }, - "id": "0d0a657820696e74", - "userID": "2070616e656d2076", - "user": "2070616e656d2076", - "org": "3070616e656d2076", - "orgID": "3070616e656d2076", - "status": "", - "token": "hello", - "description": "t1", - "permissions": %s, - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z" - }, - { - "links": { - "user": "/api/v2/users/6c7574652c206f6e", - "self": "/api/v2/authorizations/6669646573207375" - }, - "id": "6669646573207375", - "userID": "6c7574652c206f6e", - "user": "6c7574652c206f6e", - "org": "9d70616e656d2076", - "orgID": "9d70616e656d2076", - "status": "", - "token": "example", - "description": "t2", - "permissions": %s, - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z" - } - ] -} -`, - MustMarshal(influxdb.OperPermissions()), - MustMarshal(influxdb.OperPermissions())), - }, - }, - { - name: "skip authorizations with no org", - fields: fields{ - &mock.AuthorizationService{ - FindAuthorizationsFn: func(ctx context.Context, filter influxdb.AuthorizationFilter, opts ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - return []*influxdb.Authorization{ - { - ID: itesting.MustIDBase16("0d0a657820696e74"), - Token: "hello", - UserID: itesting.MustIDBase16("2070616e656d2076"), - OrgID: itesting.MustIDBase16("3070616e656d2076"), - Description: "t1", - Permissions: influxdb.OperPermissions(), - }, - { - ID: itesting.MustIDBase16("6669646573207375"), - Token: "example", - UserID: itesting.MustIDBase16("6c7574652c206f6e"), - OrgID: itesting.MustIDBase16("9d70616e656d2076"), - Description: "t2", - Permissions: influxdb.OperPermissions(), - }, - }, 2, nil - }, - }, - &tenantService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - if id.String() == "2070616e656d2076" { - return &influxdb.User{ - ID: id, - Name: id.String(), - }, nil - } - return nil, &errors.Error{} - }, - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: id, - Name: id.String(), - }, nil - }, - }, - }, - args: args{}, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: fmt.Sprintf(` -{ - "links": { - "self": "/api/v2/authorizations" - }, - "authorizations": [ - { - "links": { - "user": "/api/v2/users/2070616e656d2076", - "self": "/api/v2/authorizations/0d0a657820696e74" - }, - "id": "0d0a657820696e74", - "userID": "2070616e656d2076", - "user": "2070616e656d2076", - "org": "3070616e656d2076", - "orgID": "3070616e656d2076", - "status": "", - "token": "hello", - "description": "t1", - "permissions": %s, - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z" - } - ] -} -`, - MustMarshal(influxdb.OperPermissions())), - }, - }, - { - name: "skip authorizations with no user", - fields: fields{ - &mock.AuthorizationService{ - FindAuthorizationsFn: func(ctx context.Context, filter influxdb.AuthorizationFilter, opts ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - return []*influxdb.Authorization{ - { - ID: itesting.MustIDBase16("0d0a657820696e74"), - Token: "hello", - UserID: itesting.MustIDBase16("2070616e656d2076"), - OrgID: itesting.MustIDBase16("3070616e656d2076"), - Description: "t1", - Permissions: influxdb.OperPermissions(), - }, - { - ID: itesting.MustIDBase16("6669646573207375"), - Token: "example", - UserID: itesting.MustIDBase16("6c7574652c206f6e"), - OrgID: itesting.MustIDBase16("9d70616e656d2076"), - Description: "t2", - Permissions: influxdb.OperPermissions(), - }, - }, 2, nil - }, - }, - &tenantService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ - ID: id, - Name: id.String(), - }, nil - }, - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - if id.String() == "3070616e656d2076" { - return &influxdb.Organization{ - ID: id, - Name: id.String(), - }, nil - } - return nil, &errors.Error{} - }, - }, - }, - args: args{}, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: fmt.Sprintf(` -{ - "links": { - "self": "/api/v2/authorizations" - }, - "authorizations": [ - { - "links": { - "user": "/api/v2/users/2070616e656d2076", - "self": "/api/v2/authorizations/0d0a657820696e74" - }, - "id": "0d0a657820696e74", - "userID": "2070616e656d2076", - "user": "2070616e656d2076", - "org": "3070616e656d2076", - "orgID": "3070616e656d2076", - "status": "", - "token": "hello", - "description": "t1", - "permissions": %s, - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z" - } - ] -} -`, - MustMarshal(influxdb.OperPermissions())), - }, - }, - { - name: "get all authorizations when there are none", - fields: fields{ - AuthorizationService: &mock.AuthorizationService{ - FindAuthorizationsFn: func(ctx context.Context, filter influxdb.AuthorizationFilter, opts ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - return []*influxdb.Authorization{}, 0, nil - }, - }, - }, - args: args{}, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/authorizations" - }, - "authorizations": [] -}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Helper() - - s := itesting.NewTestInmemStore(t) - storage, err := NewStore(s) - if err != nil { - t.Fatal(err) - } - - svc := NewService(storage, tt.fields.TenantService) - - handler := NewHTTPAuthHandler(zaptest.NewLogger(t), svc, tt.fields.TenantService) - router := chi.NewRouter() - router.Mount(handler.Prefix(), handler) - - r := httptest.NewRequest("GET", "http://any.url", nil) - - qp := r.URL.Query() - for k, vs := range tt.args.queryParams { - for _, v := range vs { - qp.Add(k, v) - } - } - r.URL.RawQuery = qp.Encode() - - w := httptest.NewRecorder() - - handler.handleGetAuthorizations(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetAuthorizations() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetAuthorizations() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if diff, err := jsonDiff(string(body), tt.wants.body); diff != "" { - t.Errorf("%q. handleGetAuthorizations() = ***%s***", tt.name, diff) - } else if err != nil { - t.Errorf("%q, handleGetAuthorizations() error: %v", tt.name, err) - } - - }) - } -} - -func TestService_handleDeleteAuthorization(t *testing.T) { - type fields struct { - AuthorizationService influxdb.AuthorizationService - TenantService TenantService - } - type args struct { - id string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "remove a authorization by id", - fields: fields{ - &mock.AuthorizationService{ - DeleteAuthorizationFn: func(ctx context.Context, id platform.ID) error { - if id == itesting.MustIDBase16("020f755c3c082000") { - return nil - } - - return fmt.Errorf("wrong id") - }, - }, - &tenantService{}, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNoContent, - }, - }, - { - name: "authorization not found", - fields: fields{ - &mock.AuthorizationService{ - DeleteAuthorizationFn: func(ctx context.Context, id platform.ID) error { - return &errors.Error{ - Code: errors.ENotFound, - Msg: "authorization not found", - } - }, - }, - &tenantService{}, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNotFound, - body: `{"code":"not found","message":"authorization not found"}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Helper() - - handler := NewHTTPAuthHandler(zaptest.NewLogger(t), tt.fields.AuthorizationService, tt.fields.TenantService) - router := chi.NewRouter() - router.Mount(handler.Prefix(), handler) - - w := httptest.NewRecorder() - - r := httptest.NewRequest("GET", "http://any.url", nil) - rctx := chi.NewRouteContext() - rctx.URLParams.Add("id", tt.args.id) - r = r.WithContext(context.WithValue(r.Context(), chi.RouteCtxKey, rctx)) - - handler.handleDeleteAuthorization(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleDeleteAuthorization() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleDeleteAuthorization() = %v, want %v", tt.name, content, tt.wants.contentType) - } - - if tt.wants.body != "" { - if diff, err := jsonDiff(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleDeleteAuthorization(). error unmarshalling json %v", tt.name, err) - } else if diff != "" { - t.Errorf("%q. handleDeleteAuthorization() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func jsonDiff(s1, s2 string) (diff string, err error) { - if s1 == s2 { - return "", nil - } - - if s1 == "" { - return s2, fmt.Errorf("s1 is empty") - } - - if s2 == "" { - return s1, fmt.Errorf("s2 is empty") - } - - var o1 influxdb.Authorization - if err = json.Unmarshal([]byte(s1), &o1); err != nil { - return - } - - var o2 influxdb.Authorization - if err = json.Unmarshal([]byte(s2), &o2); err != nil { - return - } - - return cmp.Diff(o1, o2, authorizationCmpOptions...), err -} - -var authorizationCmpOptions = cmp.Options{ - cmpopts.EquateEmpty(), - cmpopts.IgnoreFields(influxdb.Authorization{}, "ID", "Token", "CreatedAt", "UpdatedAt"), - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.Authorization) []*influxdb.Authorization { - out := append([]*influxdb.Authorization(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -func MustMarshal(o interface{}) []byte { - b, _ := json.Marshal(o) - return b -} diff --git a/authorization/middleware_auth.go b/authorization/middleware_auth.go deleted file mode 100644 index 91a87e6081e..00000000000 --- a/authorization/middleware_auth.go +++ /dev/null @@ -1,124 +0,0 @@ -package authorization - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -type AuthedAuthorizationService struct { - s influxdb.AuthorizationService - ts TenantService -} - -var _ influxdb.AuthorizationService = (*AuthedAuthorizationService)(nil) - -func NewAuthedAuthorizationService(s influxdb.AuthorizationService, ts TenantService) *AuthedAuthorizationService { - return &AuthedAuthorizationService{ - s: s, - ts: ts, - } -} - -func (s *AuthedAuthorizationService) CreateAuthorization(ctx context.Context, a *influxdb.Authorization) error { - if _, _, err := authorizer.AuthorizeCreate(ctx, influxdb.AuthorizationsResourceType, a.OrgID); err != nil { - return err - } - if _, _, err := authorizer.AuthorizeWriteResource(ctx, influxdb.UsersResourceType, a.UserID); err != nil { - return err - } - if err := authorizer.VerifyPermissions(ctx, a.Permissions); err != nil { - return err - } - for _, v := range a.Permissions { - if v.Resource.Type == influxdb.InstanceResourceType { - return fmt.Errorf("authorizations cannot be created with the instance type, it is only used during setup") - } - } - - return s.s.CreateAuthorization(ctx, a) -} - -func (s *AuthedAuthorizationService) FindAuthorizationByToken(ctx context.Context, t string) (*influxdb.Authorization, error) { - a, err := s.s.FindAuthorizationByToken(ctx, t) - if err != nil { - return nil, err - } - if _, _, err := authorizer.AuthorizeRead(ctx, influxdb.AuthorizationsResourceType, a.ID, a.OrgID); err != nil { - return nil, err - } - if _, _, err := authorizer.AuthorizeReadResource(ctx, influxdb.UsersResourceType, a.UserID); err != nil { - return nil, err - } - return a, nil -} - -func (s *AuthedAuthorizationService) FindAuthorizationByID(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { - a, err := s.s.FindAuthorizationByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := authorizer.AuthorizeRead(ctx, influxdb.AuthorizationsResourceType, a.ID, a.OrgID); err != nil { - return nil, err - } - if _, _, err := authorizer.AuthorizeReadResource(ctx, influxdb.UsersResourceType, a.UserID); err != nil { - return nil, err - } - return a, nil -} - -func (s *AuthedAuthorizationService) FindAuthorizations(ctx context.Context, filter influxdb.AuthorizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - // TODO: we'll likely want to push this operation into the database eventually since fetching the whole list of data - // will likely be expensive. - as, _, err := s.s.FindAuthorizations(ctx, filter, opt...) - if err != nil { - return nil, 0, err - } - return authorizer.AuthorizeFindAuthorizations(ctx, as) -} - -func (s *AuthedAuthorizationService) UpdateAuthorization(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { - a, err := s.s.FindAuthorizationByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := authorizer.AuthorizeWrite(ctx, influxdb.AuthorizationsResourceType, a.ID, a.OrgID); err != nil { - return nil, err - } - if _, _, err := authorizer.AuthorizeWriteResource(ctx, influxdb.UsersResourceType, a.UserID); err != nil { - return nil, err - } - return s.s.UpdateAuthorization(ctx, id, upd) -} - -func (s *AuthedAuthorizationService) DeleteAuthorization(ctx context.Context, id platform.ID) error { - a, err := s.s.FindAuthorizationByID(ctx, id) - if err != nil { - return err - } - if _, _, err := authorizer.AuthorizeWrite(ctx, influxdb.AuthorizationsResourceType, a.ID, a.OrgID); err != nil { - return err - } - if _, _, err := authorizer.AuthorizeWriteResource(ctx, influxdb.UsersResourceType, a.UserID); err != nil { - return err - } - return s.s.DeleteAuthorization(ctx, id) -} - -// VerifyPermissions ensures that an authorization is allowed all of the appropriate permissions. -func VerifyPermissions(ctx context.Context, ps []influxdb.Permission) error { - for _, p := range ps { - if err := authorizer.IsAllowed(ctx, p); err != nil { - return &errors.Error{ - Err: err, - Msg: fmt.Sprintf("permission %s is not allowed", p), - Code: errors.EForbidden, - } - } - } - return nil -} diff --git a/authorization/middleware_auth_test.go b/authorization/middleware_auth_test.go deleted file mode 100644 index 7ed49182a99..00000000000 --- a/authorization/middleware_auth_test.go +++ /dev/null @@ -1,467 +0,0 @@ -package authorization_test - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorization" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/tenant" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -var authorizationCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.Authorization) []*influxdb.Authorization { - out := append([]*influxdb.Authorization(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -func TestAuthorizationService_ReadAuthorization(t *testing.T) { - type args struct { - permissions []influxdb.Permission - } - type wants struct { - err error - authorizations []*influxdb.Authorization - } - - tests := []struct { - name string - args args - wants wants - }{ - { - name: "authorized to access id", - args: args{ - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - authorizations: []*influxdb.Authorization{ - { - ID: 10, - UserID: 1, - OrgID: 1, - }, - }, - }, - }, - { - name: "unauthorized to access id - wrong org", - args: args{ - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: influxdbtesting.IDPtr(2), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/0000000000000001/authorizations/000000000000000a is unauthorized", - Code: errors.EUnauthorized, - }, - authorizations: []*influxdb.Authorization{}, - }, - }, - { - name: "unauthorized to access id - wrong user", - args: args{ - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:users/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - authorizations: []*influxdb.Authorization{}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - m := &mock.AuthorizationService{} - m.FindAuthorizationByIDFn = func(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { - return &influxdb.Authorization{ - ID: id, - UserID: 1, - OrgID: 1, - }, nil - } - m.FindAuthorizationByTokenFn = func(ctx context.Context, t string) (*influxdb.Authorization, error) { - return &influxdb.Authorization{ - ID: 10, - UserID: 1, - OrgID: 1, - }, nil - } - m.FindAuthorizationsFn = func(ctx context.Context, filter influxdb.AuthorizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - return []*influxdb.Authorization{ - { - ID: 10, - UserID: 1, - OrgID: 1, - }, - }, 1, nil - } - // set up tenant service - ctx := context.Background() - st := inmem.NewKVStore() - if err := all.Up(ctx, zaptest.NewLogger(t), st); err != nil { - t.Fatal(err) - } - - store := tenant.NewStore(st) - ts := tenant.NewService(store) - s := authorization.NewAuthedAuthorizationService(m, ts) - - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - t.Run("find authorization by id", func(t *testing.T) { - _, err := s.FindAuthorizationByID(ctx, 10) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - t.Run("find authorization by token", func(t *testing.T) { - _, err := s.FindAuthorizationByToken(ctx, "10") - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - - t.Run("find authorizations", func(t *testing.T) { - as, _, err := s.FindAuthorizations(ctx, influxdb.AuthorizationFilter{}) - influxdbtesting.ErrorsEqual(t, err, nil) - - if diff := cmp.Diff(as, tt.wants.authorizations, authorizationCmpOptions...); diff != "" { - t.Errorf("authorizations are different -got/+want\ndiff %s", diff) - } - }) - }) - } -} - -func TestAuthorizationService_WriteAuthorization(t *testing.T) { - type args struct { - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - args args - wants wants - }{ - { - name: "authorized to write authorization", - args: args{ - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to write authorization - wrong org", - args: args{ - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: influxdbtesting.IDPtr(2), - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/0000000000000001/authorizations/000000000000000a is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - { - name: "unauthorized to write authorization - wrong user", - args: args{ - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:users/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - m := &mock.AuthorizationService{} - m.FindAuthorizationByIDFn = func(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { - return &influxdb.Authorization{ - ID: id, - UserID: 1, - OrgID: 1, - }, nil - } - m.CreateAuthorizationFn = func(ctx context.Context, a *influxdb.Authorization) error { - return nil - } - m.DeleteAuthorizationFn = func(ctx context.Context, id platform.ID) error { - return nil - } - m.UpdateAuthorizationFn = func(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { - return nil, nil - } - // set up tenant service - ctx := context.Background() - st := inmem.NewKVStore() - if err := all.Up(ctx, zaptest.NewLogger(t), st); err != nil { - t.Fatal(err) - } - - store := tenant.NewStore(st) - ts := tenant.NewService(store) - s := authorization.NewAuthedAuthorizationService(m, ts) - - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - t.Run("update authorization", func(t *testing.T) { - _, err := s.UpdateAuthorization(ctx, 10, &influxdb.AuthorizationUpdate{Status: influxdb.Active.Ptr()}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - - t.Run("delete authorization", func(t *testing.T) { - err := s.DeleteAuthorization(ctx, 10) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - - }) - } -} - -func TestAuthorizationService_CreateAuthorization(t *testing.T) { - type args struct { - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - args args - wants wants - }{ - { - name: "authorized to write authorization", - args: args{ - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to write authorization - wrong org", - args: args{ - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: influxdbtesting.IDPtr(2), - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/0000000000000001/authorizations is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - { - name: "unauthorized to write authorization - wrong user", - args: args{ - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:users/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - m := &mock.AuthorizationService{} - m.FindAuthorizationByIDFn = func(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { - return &influxdb.Authorization{ - ID: id, - UserID: 1, - OrgID: 1, - }, nil - } - m.CreateAuthorizationFn = func(ctx context.Context, a *influxdb.Authorization) error { - return nil - } - m.DeleteAuthorizationFn = func(ctx context.Context, id platform.ID) error { - return nil - } - m.UpdateAuthorizationFn = func(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { - return nil, nil - } - // set up tenant service - st := inmem.NewKVStore() - ctx := context.Background() - if err := all.Up(ctx, zaptest.NewLogger(t), st); err != nil { - t.Fatal(err) - } - - store := tenant.NewStore(st) - ts := tenant.NewService(store) - s := authorization.NewAuthedAuthorizationService(m, ts) - - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.CreateAuthorization(ctx, &influxdb.Authorization{OrgID: 1, UserID: 1}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} diff --git a/authorization/middleware_logging.go b/authorization/middleware_logging.go deleted file mode 100644 index 42a4ac24988..00000000000 --- a/authorization/middleware_logging.go +++ /dev/null @@ -1,101 +0,0 @@ -package authorization - -import ( - "context" - "fmt" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "go.uber.org/zap" -) - -type AuthLogger struct { - logger *zap.Logger - authService influxdb.AuthorizationService -} - -// NewAuthLogger returns a logging service middleware for the Authorization Service. -func NewAuthLogger(log *zap.Logger, s influxdb.AuthorizationService) *AuthLogger { - return &AuthLogger{ - logger: log, - authService: s, - } -} - -var _ influxdb.AuthorizationService = (*AuthLogger)(nil) - -func (l *AuthLogger) CreateAuthorization(ctx context.Context, a *influxdb.Authorization) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to create authorization", zap.Error(err), dur) - return - } - l.logger.Debug("authorization create", dur) - }(time.Now()) - return l.authService.CreateAuthorization(ctx, a) -} - -func (l *AuthLogger) FindAuthorizationByID(ctx context.Context, id platform.ID) (a *influxdb.Authorization, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - msg := fmt.Sprintf("failed to find authorization with ID %v", id) - l.logger.Debug(msg, zap.Error(err), dur) - return - } - l.logger.Debug("auth find by ID", dur) - }(time.Now()) - return l.authService.FindAuthorizationByID(ctx, id) -} - -func (l *AuthLogger) FindAuthorizationByToken(ctx context.Context, t string) (a *influxdb.Authorization, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to find authorization with token", zap.Error(err), dur) - return - } - l.logger.Debug("auth find", dur) - - }(time.Now()) - return l.authService.FindAuthorizationByToken(ctx, t) -} - -func (l *AuthLogger) FindAuthorizations(ctx context.Context, filter influxdb.AuthorizationFilter, opt ...influxdb.FindOptions) (as []*influxdb.Authorization, count int, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to find authorizations matching the given filter", zap.Error(err), dur) - return - } - l.logger.Debug("authorizations find", dur) - }(time.Now()) - return l.authService.FindAuthorizations(ctx, filter) -} - -func (l *AuthLogger) UpdateAuthorization(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (a *influxdb.Authorization, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to update authorization", zap.Error(err), dur) - return - } - l.logger.Debug("authorization update", dur) - }(time.Now()) - return l.authService.UpdateAuthorization(ctx, id, upd) -} - -func (l *AuthLogger) DeleteAuthorization(ctx context.Context, id platform.ID) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - msg := fmt.Sprintf("failed to delete authorization with ID %v", id) - l.logger.Debug(msg, zap.Error(err), dur) - return - } - l.logger.Debug("authorization delete", dur) - }(time.Now()) - return l.authService.DeleteAuthorization(ctx, id) -} diff --git a/authorization/middleware_metrics.go b/authorization/middleware_metrics.go deleted file mode 100644 index b541f672e0f..00000000000 --- a/authorization/middleware_metrics.go +++ /dev/null @@ -1,61 +0,0 @@ -package authorization - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/metric" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/prometheus/client_golang/prometheus" -) - -type AuthMetrics struct { - // RED metrics - rec *metric.REDClient - - authService influxdb.AuthorizationService -} - -var _ influxdb.AuthorizationService = (*AuthMetrics)(nil) - -func NewAuthMetrics(reg prometheus.Registerer, s influxdb.AuthorizationService, opts ...metric.ClientOptFn) *AuthMetrics { - o := metric.ApplyMetricOpts(opts...) - return &AuthMetrics{ - rec: metric.New(reg, o.ApplySuffix("token")), - authService: s, - } -} - -func (m *AuthMetrics) CreateAuthorization(ctx context.Context, a *influxdb.Authorization) error { - rec := m.rec.Record("create_authorization") - err := m.authService.CreateAuthorization(ctx, a) - return rec(err) -} - -func (m *AuthMetrics) FindAuthorizationByID(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { - rec := m.rec.Record("find_authorization_by_id") - a, err := m.authService.FindAuthorizationByID(ctx, id) - return a, rec(err) -} -func (m *AuthMetrics) FindAuthorizationByToken(ctx context.Context, t string) (*influxdb.Authorization, error) { - rec := m.rec.Record("find_authorization_by_token") - a, err := m.authService.FindAuthorizationByToken(ctx, t) - return a, rec(err) -} -func (m *AuthMetrics) FindAuthorizations(ctx context.Context, filter influxdb.AuthorizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - rec := m.rec.Record("find_authorization_by_token") - a, n, err := m.authService.FindAuthorizations(ctx, filter, opt...) - return a, n, rec(err) -} - -func (m *AuthMetrics) UpdateAuthorization(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { - rec := m.rec.Record("update_authorization") - a, err := m.authService.UpdateAuthorization(ctx, id, upd) - return a, rec(err) -} - -func (m *AuthMetrics) DeleteAuthorization(ctx context.Context, id platform.ID) error { - rec := m.rec.Record("delete_authorization") - err := m.authService.DeleteAuthorization(ctx, id) - return rec(err) -} diff --git a/authorization/mock_tenant.go b/authorization/mock_tenant.go deleted file mode 100644 index b8dd501091e..00000000000 --- a/authorization/mock_tenant.go +++ /dev/null @@ -1,41 +0,0 @@ -package authorization - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// tenantService is a mock implementation of an authorization.tenantService -type tenantService struct { - FindUserByIDFn func(context.Context, platform.ID) (*influxdb.User, error) - FindUserFn func(context.Context, influxdb.UserFilter) (*influxdb.User, error) - FindOrganizationByIDF func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) - FindOrganizationF func(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) - FindBucketByIDFn func(context.Context, platform.ID) (*influxdb.Bucket, error) -} - -// FindUserByID returns a single User by ID. -func (s *tenantService) FindUserByID(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return s.FindUserByIDFn(ctx, id) -} - -// FindUsers returns a list of Users that match filter and the total count of matching Users. -func (s *tenantService) FindUser(ctx context.Context, filter influxdb.UserFilter) (*influxdb.User, error) { - return s.FindUserFn(ctx, filter) -} - -// FindOrganizationByID calls FindOrganizationByIDF. -func (s *tenantService) FindOrganizationByID(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return s.FindOrganizationByIDF(ctx, id) -} - -// FindOrganization calls FindOrganizationF. -func (s *tenantService) FindOrganization(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return s.FindOrganizationF(ctx, filter) -} - -func (s *tenantService) FindBucketByID(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - return s.FindBucketByIDFn(ctx, id) -} diff --git a/authorization/service.go b/authorization/service.go deleted file mode 100644 index 196c4a7b9e4..00000000000 --- a/authorization/service.go +++ /dev/null @@ -1,218 +0,0 @@ -package authorization - -import ( - "context" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/rand" -) - -var _ influxdb.AuthorizationService = (*Service)(nil) - -type Service struct { - store *Store - tokenGenerator influxdb.TokenGenerator - tenantService TenantService -} - -func NewService(st *Store, ts TenantService) influxdb.AuthorizationService { - return &Service{ - store: st, - tokenGenerator: rand.NewTokenGenerator(64), - tenantService: ts, - } -} - -func (s *Service) CreateAuthorization(ctx context.Context, a *influxdb.Authorization) error { - if err := a.Valid(); err != nil { - return &errors.Error{ - Err: err, - } - } - - if _, err := s.tenantService.FindUserByID(ctx, a.UserID); err != nil { - return influxdb.ErrUnableToCreateToken - } - - if _, err := s.tenantService.FindOrganizationByID(ctx, a.OrgID); err != nil { - return influxdb.ErrUnableToCreateToken - } - - err := s.store.View(ctx, func(tx kv.Tx) error { - if err := s.store.uniqueAuthToken(ctx, tx, a); err != nil { - return err - } - return nil - }) - if err != nil { - return ErrTokenAlreadyExistsError - } - - if a.Token == "" { - token, err := s.tokenGenerator.Token() - if err != nil { - return &errors.Error{ - Err: err, - } - } - a.Token = token - } - - now := time.Now() - a.SetCreatedAt(now) - a.SetUpdatedAt(now) - - return s.store.Update(ctx, func(tx kv.Tx) error { - return s.store.CreateAuthorization(ctx, tx, a) - }) -} - -func (s *Service) FindAuthorizationByID(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { - var a *influxdb.Authorization - err := s.store.View(ctx, func(tx kv.Tx) error { - auth, err := s.store.GetAuthorizationByID(ctx, tx, id) - if err != nil { - return err - } - - a = auth - return nil - }) - - if err != nil { - return nil, err - } - - return a, nil -} - -// FindAuthorizationByToken returns a authorization by token for a particular authorization. -func (s *Service) FindAuthorizationByToken(ctx context.Context, n string) (*influxdb.Authorization, error) { - var a *influxdb.Authorization - err := s.store.View(ctx, func(tx kv.Tx) error { - auth, err := s.store.GetAuthorizationByToken(ctx, tx, n) - if err != nil { - return err - } - - a = auth - - return nil - }) - - if err != nil { - return nil, err - } - - return a, nil -} - -// FindAuthorizations retrieves all authorizations that match an arbitrary authorization filter. -// Filters using ID, or Token should be efficient. -// Other filters will do a linear scan across all authorizations searching for a match. -func (s *Service) FindAuthorizations(ctx context.Context, filter influxdb.AuthorizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - if filter.ID != nil { - var auth *influxdb.Authorization - err := s.store.View(ctx, func(tx kv.Tx) error { - a, e := s.store.GetAuthorizationByID(ctx, tx, *filter.ID) - if e != nil { - return e - } - auth = a - return nil - }) - if err != nil { - return nil, 0, &errors.Error{ - Err: err, - } - } - - return []*influxdb.Authorization{auth}, 1, nil - } - - if filter.Token != nil { - var auth *influxdb.Authorization - err := s.store.View(ctx, func(tx kv.Tx) error { - a, e := s.store.GetAuthorizationByToken(ctx, tx, *filter.Token) - if e != nil { - return e - } - auth = a - return nil - }) - if err != nil { - return nil, 0, &errors.Error{ - Err: err, - } - } - - return []*influxdb.Authorization{auth}, 1, nil - } - - as := []*influxdb.Authorization{} - err := s.store.View(ctx, func(tx kv.Tx) error { - auths, err := s.store.ListAuthorizations(ctx, tx, filter) - if err != nil { - return err - } - as = auths - return nil - }) - - if err != nil { - return nil, 0, &errors.Error{ - Err: err, - } - } - - return as, len(as), nil -} - -// UpdateAuthorization updates the status and description if available. -func (s *Service) UpdateAuthorization(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { - var auth *influxdb.Authorization - err := s.store.View(ctx, func(tx kv.Tx) error { - a, e := s.store.GetAuthorizationByID(ctx, tx, id) - if e != nil { - return e - } - auth = a - return nil - }) - - if err != nil { - return nil, &errors.Error{ - Code: errors.ENotFound, - Err: err, - } - } - - if upd.Status != nil { - auth.Status = *upd.Status - } - if upd.Description != nil { - auth.Description = *upd.Description - } - - auth.SetUpdatedAt(time.Now()) - - err = s.store.Update(ctx, func(tx kv.Tx) error { - a, e := s.store.UpdateAuthorization(ctx, tx, id, auth) - if e != nil { - return e - } - auth = a - return nil - }) - return auth, err -} - -func (s *Service) DeleteAuthorization(ctx context.Context, id platform.ID) error { - return s.store.Update(ctx, func(tx kv.Tx) (err error) { - return s.store.DeleteAuthorization(ctx, tx, id) - }) -} diff --git a/authorization/service_test.go b/authorization/service_test.go deleted file mode 100644 index c729227dddd..00000000000 --- a/authorization/service_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package authorization_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorization" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/tenant" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -func initBoltAuthService(f influxdbtesting.AuthorizationFields, t *testing.T) (influxdb.AuthorizationService, string, func()) { - s, closeBolt := influxdbtesting.NewTestBoltStore(t) - svc, closeSvc := initAuthService(s, f, t) - return svc, "service_auth", func() { - closeSvc() - closeBolt() - } -} - -func initAuthService(s kv.Store, f influxdbtesting.AuthorizationFields, t *testing.T) (influxdb.AuthorizationService, func()) { - st := tenant.NewStore(s) - if f.OrgIDGenerator != nil { - st.OrgIDGen = f.OrgIDGenerator - } - - ts := tenant.NewService(st) - storage, err := authorization.NewStore(s) - if err != nil { - t.Fatal(err) - } - - svc := authorization.NewService(storage, ts) - - for _, u := range f.Users { - if err := ts.CreateUser(context.Background(), u); err != nil { - t.Fatalf("error populating users: %v", err) - } - } - - for _, o := range f.Orgs { - if err := ts.CreateOrganization(context.Background(), o); err != nil { - t.Fatalf("failed to populate organizations: %s", err) - } - } - - for _, m := range f.Authorizations { - if err := svc.CreateAuthorization(context.Background(), m); err != nil { - t.Fatalf("failed to populate authorizations: %v", err) - } - } - - return svc, func() { - for _, m := range f.Authorizations { - if err := svc.DeleteAuthorization(context.Background(), m.ID); err != nil { - t.Logf("failed to remove authorization token: %v", err) - } - } - } -} - -func TestBoltAuthService(t *testing.T) { - t.Parallel() - influxdbtesting.AuthorizationService(initBoltAuthService, t) -} diff --git a/authorization/storage.go b/authorization/storage.go deleted file mode 100644 index 4b8c8fdec3b..00000000000 --- a/authorization/storage.go +++ /dev/null @@ -1,107 +0,0 @@ -package authorization - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/snowflake" -) - -const MaxIDGenerationN = 100 -const ReservedIDs = 1000 - -var ( - authBucket = []byte("authorizationsv1") - authIndex = []byte("authorizationindexv1") -) - -type Store struct { - kvStore kv.Store - IDGen platform.IDGenerator -} - -func NewStore(kvStore kv.Store) (*Store, error) { - st := &Store{ - kvStore: kvStore, - IDGen: snowflake.NewDefaultIDGenerator(), - } - return st, st.setup() -} - -// View opens up a transaction that will not write to any data. Implementing interfaces -// should take care to ensure that all view transactions do not mutate any data. -func (s *Store) View(ctx context.Context, fn func(kv.Tx) error) error { - return s.kvStore.View(ctx, fn) -} - -// Update opens up a transaction that will mutate data. -func (s *Store) Update(ctx context.Context, fn func(kv.Tx) error) error { - return s.kvStore.Update(ctx, fn) -} - -func (s *Store) setup() error { - return s.Update(context.Background(), func(tx kv.Tx) error { - if _, err := tx.Bucket(authBucket); err != nil { - return err - } - if _, err := authIndexBucket(tx); err != nil { - return err - } - - return nil - }) -} - -// generateSafeID attempts to create ids for buckets -// and orgs that are without backslash, commas, and spaces, BUT ALSO do not already exist. -func (s *Store) generateSafeID(ctx context.Context, tx kv.Tx, bucket []byte) (platform.ID, error) { - for i := 0; i < MaxIDGenerationN; i++ { - id := s.IDGen.ID() - - // TODO: this is probably unnecessary but for testing we need to keep it in. - // After KV is cleaned out we can update the tests and remove this. - if id < ReservedIDs { - continue - } - - err := s.uniqueID(ctx, tx, bucket, id) - if err == nil { - return id, nil - } - - if err == NotUniqueIDError { - continue - } - - return platform.InvalidID(), err - } - return platform.InvalidID(), ErrFailureGeneratingID -} - -func (s *Store) uniqueID(ctx context.Context, tx kv.Tx, bucket []byte, id platform.ID) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - encodedID, err := id.Encode() - if err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - b, err := tx.Bucket(bucket) - if err != nil { - return err - } - - _, err = b.Get(encodedID) - if kv.IsNotFound(err) { - return nil - } - - return NotUniqueIDError -} diff --git a/authorization/storage_authorization.go b/authorization/storage_authorization.go deleted file mode 100644 index 3a25146655d..00000000000 --- a/authorization/storage_authorization.go +++ /dev/null @@ -1,448 +0,0 @@ -package authorization - -import ( - "context" - "encoding/json" - - "github.com/buger/jsonparser" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" - jsonp "github.com/influxdata/influxdb/v2/pkg/jsonparser" -) - -func authIndexKey(n string) []byte { - return []byte(n) -} - -func authIndexBucket(tx kv.Tx) (kv.Bucket, error) { - b, err := tx.Bucket([]byte(authIndex)) - if err != nil { - return nil, UnexpectedAuthIndexError(err) - } - - return b, nil -} - -func encodeAuthorization(a *influxdb.Authorization) ([]byte, error) { - switch a.Status { - case influxdb.Active, influxdb.Inactive: - case "": - a.Status = influxdb.Active - default: - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unknown authorization status", - } - } - - return json.Marshal(a) -} - -func decodeAuthorization(b []byte, a *influxdb.Authorization) error { - if err := json.Unmarshal(b, a); err != nil { - return err - } - if a.Status == "" { - a.Status = influxdb.Active - } - return nil -} - -// CreateAuthorization takes an Authorization object and saves it in storage using its token -// using its token property as an index -func (s *Store) CreateAuthorization(ctx context.Context, tx kv.Tx, a *influxdb.Authorization) error { - // if the provided ID is invalid, or already maps to an existing Auth, then generate a new one - if !a.ID.Valid() { - id, err := s.generateSafeID(ctx, tx, authBucket) - if err != nil { - return nil - } - a.ID = id - } else if err := uniqueID(ctx, tx, a.ID); err != nil { - id, err := s.generateSafeID(ctx, tx, authBucket) - if err != nil { - return nil - } - a.ID = id - } - - if err := s.uniqueAuthToken(ctx, tx, a); err != nil { - return ErrTokenAlreadyExistsError - } - - v, err := encodeAuthorization(a) - if err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - encodedID, err := a.ID.Encode() - if err != nil { - return ErrInvalidAuthIDError(err) - } - - idx, err := authIndexBucket(tx) - if err != nil { - return err - } - - if err := idx.Put(authIndexKey(a.Token), encodedID); err != nil { - return &errors.Error{ - Code: errors.EInternal, - Err: err, - } - } - - b, err := tx.Bucket(authBucket) - if err != nil { - return err - } - - if err := b.Put(encodedID, v); err != nil { - return &errors.Error{ - Err: err, - } - } - - return nil -} - -// GetAuthorization gets an authorization by its ID from the auth bucket in kv -func (s *Store) GetAuthorizationByID(ctx context.Context, tx kv.Tx, id platform.ID) (*influxdb.Authorization, error) { - encodedID, err := id.Encode() - if err != nil { - return nil, ErrInvalidAuthID - } - - b, err := tx.Bucket(authBucket) - if err != nil { - return nil, ErrInternalServiceError(err) - } - - v, err := b.Get(encodedID) - if kv.IsNotFound(err) { - return nil, ErrAuthNotFound - } - - if err != nil { - return nil, ErrInternalServiceError(err) - } - - a := &influxdb.Authorization{} - if err := decodeAuthorization(v, a); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - return a, nil -} - -func (s *Store) GetAuthorizationByToken(ctx context.Context, tx kv.Tx, token string) (*influxdb.Authorization, error) { - idx, err := authIndexBucket(tx) - if err != nil { - return nil, err - } - - // use the token to look up the authorization's ID - idKey, err := idx.Get(authIndexKey(token)) - if kv.IsNotFound(err) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: "authorization not found", - } - } - - var id platform.ID - if err := id.Decode(idKey); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - return s.GetAuthorizationByID(ctx, tx, id) -} - -// ListAuthorizations returns all the authorizations matching a set of FindOptions. This function is used for -// FindAuthorizationByID, FindAuthorizationByToken, and FindAuthorizations in the AuthorizationService implementation -func (s *Store) ListAuthorizations(ctx context.Context, tx kv.Tx, f influxdb.AuthorizationFilter) ([]*influxdb.Authorization, error) { - var as []*influxdb.Authorization - pred := authorizationsPredicateFn(f) - filterFn := filterAuthorizationsFn(f) - err := s.forEachAuthorization(ctx, tx, pred, func(a *influxdb.Authorization) bool { - if filterFn(a) { - as = append(as, a) - } - return true - }) - if err != nil { - return nil, err - } - - return as, nil -} - -// forEachAuthorization will iterate through all authorizations while fn returns true. -func (s *Store) forEachAuthorization(ctx context.Context, tx kv.Tx, pred kv.CursorPredicateFunc, fn func(*influxdb.Authorization) bool) error { - b, err := tx.Bucket(authBucket) - if err != nil { - return err - } - - var cur kv.Cursor - if pred != nil { - cur, err = b.Cursor(kv.WithCursorHintPredicate(pred)) - } else { - cur, err = b.Cursor() - } - if err != nil { - return err - } - - for k, v := cur.First(); k != nil; k, v = cur.Next() { - // preallocate Permissions to reduce multiple slice re-allocations - a := &influxdb.Authorization{ - Permissions: make([]influxdb.Permission, 64), - } - - if err := decodeAuthorization(v, a); err != nil { - return err - } - if !fn(a) { - break - } - } - - return nil -} - -// UpdateAuthorization updates the status and description only of an authorization -func (s *Store) UpdateAuthorization(ctx context.Context, tx kv.Tx, id platform.ID, a *influxdb.Authorization) (*influxdb.Authorization, error) { - v, err := encodeAuthorization(a) - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - encodedID, err := a.ID.Encode() - if err != nil { - return nil, &errors.Error{ - Code: errors.ENotFound, - Err: err, - } - } - - idx, err := authIndexBucket(tx) - if err != nil { - return nil, err - } - - if err := idx.Put(authIndexKey(a.Token), encodedID); err != nil { - return nil, &errors.Error{ - Code: errors.EInternal, - Err: err, - } - } - - b, err := tx.Bucket(authBucket) - if err != nil { - return nil, err - } - - if err := b.Put(encodedID, v); err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - return a, nil - -} - -// DeleteAuthorization removes an authorization from storage -func (s *Store) DeleteAuthorization(ctx context.Context, tx kv.Tx, id platform.ID) error { - a, err := s.GetAuthorizationByID(ctx, tx, id) - if err != nil { - return err - } - - encodedID, err := id.Encode() - if err != nil { - return ErrInvalidAuthID - } - - idx, err := authIndexBucket(tx) - if err != nil { - return err - } - - b, err := tx.Bucket(authBucket) - if err != nil { - return err - } - - if err := idx.Delete([]byte(a.Token)); err != nil { - return ErrInternalServiceError(err) - } - - if err := b.Delete(encodedID); err != nil { - return ErrInternalServiceError(err) - } - - return nil -} - -func (s *Store) uniqueAuthToken(ctx context.Context, tx kv.Tx, a *influxdb.Authorization) error { - err := unique(ctx, tx, authIndex, authIndexKey(a.Token)) - if err == kv.NotUniqueError { - // by returning a generic error we are trying to hide when - // a token is non-unique. - return influxdb.ErrUnableToCreateToken - } - // otherwise, this is some sort of internal server error and we - // should provide some debugging information. - return err -} - -func unique(ctx context.Context, tx kv.Tx, indexBucket, indexKey []byte) error { - bucket, err := tx.Bucket(indexBucket) - if err != nil { - return kv.UnexpectedIndexError(err) - } - - _, err = bucket.Get(indexKey) - // if not found then this token is unique. - if kv.IsNotFound(err) { - return nil - } - - // no error means this is not unique - if err == nil { - return kv.NotUniqueError - } - - // any other error is some sort of internal server error - return kv.UnexpectedIndexError(err) -} - -// uniqueID returns nil if the ID provided is unique, returns an error otherwise -func uniqueID(ctx context.Context, tx kv.Tx, id platform.ID) error { - encodedID, err := id.Encode() - if err != nil { - return ErrInvalidAuthID - } - - b, err := tx.Bucket(authBucket) - if err != nil { - return ErrInternalServiceError(err) - } - - _, err = b.Get(encodedID) - // if not found then the ID is unique - if kv.IsNotFound(err) { - return nil - } - // no error means this is not unique - if err == nil { - return kv.NotUniqueError - } - - // any other error is some sort of internal server error - return kv.UnexpectedIndexError(err) -} - -func authorizationsPredicateFn(f influxdb.AuthorizationFilter) kv.CursorPredicateFunc { - // if any errors occur reading the JSON data, the predicate will always return true - // to ensure the value is included and handled higher up. - - if f.ID != nil { - exp := *f.ID - return func(_, value []byte) bool { - got, err := jsonp.GetID(value, "id") - if err != nil { - return true - } - return got == exp - } - } - - if f.Token != nil { - exp := *f.Token - return func(_, value []byte) bool { - // it is assumed that token never has escaped string data - got, _, _, err := jsonparser.Get(value, "token") - if err != nil { - return true - } - return string(got) == exp - } - } - - var pred kv.CursorPredicateFunc - if f.OrgID != nil { - exp := *f.OrgID - pred = func(_, value []byte) bool { - got, err := jsonp.GetID(value, "orgID") - if err != nil { - return true - } - - return got == exp - } - } - - if f.UserID != nil { - exp := *f.UserID - prevFn := pred - pred = func(key, value []byte) bool { - prev := prevFn == nil || prevFn(key, value) - got, exists, err := jsonp.GetOptionalID(value, "userID") - return prev && ((exp == got && exists) || err != nil) - } - } - - return pred -} - -func filterAuthorizationsFn(filter influxdb.AuthorizationFilter) func(a *influxdb.Authorization) bool { - if filter.ID != nil { - return func(a *influxdb.Authorization) bool { - return a.ID == *filter.ID - } - } - - if filter.Token != nil { - return func(a *influxdb.Authorization) bool { - return a.Token == *filter.Token - } - } - - // Filter by org and user - if filter.OrgID != nil && filter.UserID != nil { - return func(a *influxdb.Authorization) bool { - return a.OrgID == *filter.OrgID && a.UserID == *filter.UserID - } - } - - if filter.OrgID != nil { - return func(a *influxdb.Authorization) bool { - return a.OrgID == *filter.OrgID - } - } - - if filter.UserID != nil { - return func(a *influxdb.Authorization) bool { - return a.UserID == *filter.UserID - } - } - - return func(a *influxdb.Authorization) bool { return true } -} diff --git a/authorization/storage_authorization_test.go b/authorization/storage_authorization_test.go deleted file mode 100644 index 79406a2d249..00000000000 --- a/authorization/storage_authorization_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package authorization_test - -import ( - "context" - "fmt" - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorization" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "go.uber.org/zap/zaptest" -) - -func TestAuth(t *testing.T) { - setup := func(t *testing.T, store *authorization.Store, tx kv.Tx) { - for i := 1; i <= 10; i++ { - err := store.CreateAuthorization(context.Background(), tx, &influxdb.Authorization{ - ID: platform.ID(i), - Token: fmt.Sprintf("randomtoken%d", i), - OrgID: platform.ID(i), - UserID: platform.ID(i), - Status: influxdb.Active, - }) - - if err != nil { - t.Fatal(err) - } - } - } - - tt := []struct { - name string - setup func(*testing.T, *authorization.Store, kv.Tx) - update func(*testing.T, *authorization.Store, kv.Tx) - results func(*testing.T, *authorization.Store, kv.Tx) - }{ - { - name: "create", - setup: setup, - results: func(t *testing.T, store *authorization.Store, tx kv.Tx) { - auths, err := store.ListAuthorizations(context.Background(), tx, influxdb.AuthorizationFilter{}) - if err != nil { - t.Fatal(err) - } - - if len(auths) != 10 { - t.Fatalf("expected 10 authorizations, got: %d", len(auths)) - } - - expected := []*influxdb.Authorization{} - for i := 1; i <= 10; i++ { - expected = append(expected, &influxdb.Authorization{ - ID: platform.ID(i), - Token: fmt.Sprintf("randomtoken%d", i), - OrgID: platform.ID(i), - UserID: platform.ID(i), - Status: "active", - }) - } - if !reflect.DeepEqual(auths, expected) { - t.Fatalf("expected identical authorizations: \n%+v\n%+v", auths, expected) - } - - // should not be able to create two authorizations with identical tokens - err = store.CreateAuthorization(context.Background(), tx, &influxdb.Authorization{ - ID: platform.ID(1), - Token: fmt.Sprintf("randomtoken%d", 1), - OrgID: platform.ID(1), - UserID: platform.ID(1), - }) - if err == nil { - t.Fatalf("expected to be unable to create authorizations with identical tokens") - } - }, - }, - { - name: "read", - setup: setup, - results: func(t *testing.T, store *authorization.Store, tx kv.Tx) { - for i := 1; i <= 10; i++ { - expectedAuth := &influxdb.Authorization{ - ID: platform.ID(i), - Token: fmt.Sprintf("randomtoken%d", i), - OrgID: platform.ID(i), - UserID: platform.ID(i), - Status: influxdb.Active, - } - - authByID, err := store.GetAuthorizationByID(context.Background(), tx, platform.ID(i)) - if err != nil { - t.Fatalf("Unexpectedly could not acquire Authorization by ID [Error]: %v", err) - } - - if !reflect.DeepEqual(authByID, expectedAuth) { - t.Fatalf("ID TEST: expected identical authorizations:\n[Expected]: %+#v\n[Got]: %+#v", expectedAuth, authByID) - } - - authByToken, err := store.GetAuthorizationByToken(context.Background(), tx, fmt.Sprintf("randomtoken%d", i)) - if err != nil { - t.Fatalf("cannot get authorization by Token [Error]: %v", err) - } - - if !reflect.DeepEqual(authByToken, expectedAuth) { - t.Fatalf("TOKEN TEST: expected identical authorizations:\n[Expected]: %+#v\n[Got]: %+#v", expectedAuth, authByToken) - } - } - - }, - }, - { - name: "update", - setup: setup, - update: func(t *testing.T, store *authorization.Store, tx kv.Tx) { - for i := 1; i <= 10; i++ { - auth, err := store.GetAuthorizationByID(context.Background(), tx, platform.ID(i)) - if err != nil { - t.Fatalf("Could not get authorization [Error]: %v", err) - } - - auth.Status = influxdb.Inactive - - _, err = store.UpdateAuthorization(context.Background(), tx, platform.ID(i), auth) - if err != nil { - t.Fatalf("Could not get updated authorization [Error]: %v", err) - } - } - }, - results: func(t *testing.T, store *authorization.Store, tx kv.Tx) { - - for i := 1; i <= 10; i++ { - auth, err := store.GetAuthorizationByID(context.Background(), tx, platform.ID(i)) - if err != nil { - t.Fatalf("Could not get authorization [Error]: %v", err) - } - - expectedAuth := &influxdb.Authorization{ - ID: platform.ID(i), - Token: fmt.Sprintf("randomtoken%d", i), - OrgID: platform.ID(i), - UserID: platform.ID(i), - Status: influxdb.Inactive, - } - - if !reflect.DeepEqual(auth, expectedAuth) { - t.Fatalf("expected identical authorizations:\n[Expected] %+#v\n[Got] %+#v", expectedAuth, auth) - } - } - }, - }, - { - name: "delete", - setup: setup, - update: func(t *testing.T, store *authorization.Store, tx kv.Tx) { - for i := 1; i <= 10; i++ { - err := store.DeleteAuthorization(context.Background(), tx, platform.ID(i)) - if err != nil { - t.Fatalf("Could not delete authorization [Error]: %v", err) - } - } - }, - results: func(t *testing.T, store *authorization.Store, tx kv.Tx) { - for i := 1; i <= 10; i++ { - _, err := store.GetAuthorizationByID(context.Background(), tx, platform.ID(i)) - if err == nil { - t.Fatal("Authorization was not deleted correctly") - } - } - }, - }, - } - - for _, testScenario := range tt { - t.Run(testScenario.name, func(t *testing.T) { - store := inmem.NewKVStore() - if err := all.Up(context.Background(), zaptest.NewLogger(t), store); err != nil { - t.Fatal(err) - } - - ts, err := authorization.NewStore(store) - if err != nil { - t.Fatal(err) - } - - // setup - if testScenario.setup != nil { - err := ts.Update(context.Background(), func(tx kv.Tx) error { - testScenario.setup(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - - // update - if testScenario.update != nil { - err := ts.Update(context.Background(), func(tx kv.Tx) error { - testScenario.update(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - - // results - if testScenario.results != nil { - err := ts.View(context.Background(), func(tx kv.Tx) error { - testScenario.results(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - }) - } -} diff --git a/authorizer/agent.go b/authorizer/agent.go deleted file mode 100644 index 62cf1b0c94f..00000000000 --- a/authorizer/agent.go +++ /dev/null @@ -1,46 +0,0 @@ -package authorizer - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// AuthAgent provides a means to authenticate users with resource and their associate actions. It -// makes for a clear dependency, to an auth middleware for instance. -type AuthAgent struct{} - -// OrgPermissions identifies if a user has access to the org by the specified action. -func (a *AuthAgent) OrgPermissions(ctx context.Context, orgID platform.ID, action influxdb.Action, rest ...influxdb.Action) error { - for _, action := range append(rest, action) { - var err error - switch action { - case influxdb.ReadAction: - _, _, err = AuthorizeReadOrg(ctx, orgID) - case influxdb.WriteAction: - _, _, err = AuthorizeWriteOrg(ctx, orgID) - default: - err = &errors.Error{Code: errors.EInvalid, Msg: "invalid action provided: " + string(action)} - } - if err != nil { - return err - } - } - return nil -} - -func (a *AuthAgent) IsWritable(ctx context.Context, orgID platform.ID, resType influxdb.ResourceType) error { - _, _, resTypeErr := AuthorizeOrgWriteResource(ctx, resType, orgID) - _, _, orgErr := AuthorizeWriteOrg(ctx, orgID) - - if resTypeErr != nil && orgErr != nil { - return &errors.Error{ - Code: errors.EUnauthorized, - Msg: "not authorized to create " + string(resType), - } - } - - return nil -} diff --git a/authorizer/agent_test.go b/authorizer/agent_test.go deleted file mode 100644 index 12fd914f73f..00000000000 --- a/authorizer/agent_test.go +++ /dev/null @@ -1,299 +0,0 @@ -package authorizer_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/require" -) - -func Test_Agent(t *testing.T) { - t.Run("OrgPermissions", func(t *testing.T) { - tests := []struct { - name string - action influxdb.Action - orgID platform.ID - permissions []influxdb.Permission - shouldErr bool - }{ - { - name: "read valid org is successful", - action: influxdb.ReadAction, - orgID: 3, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(3), - }, - }, - }, - }, - { - name: "write from valid org is successful", - action: influxdb.WriteAction, - orgID: 3, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(3), - }, - }, - }, - }, - { - name: "read from org with only both privileges is successful", - action: influxdb.ReadAction, - orgID: 3, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(3), - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(3), - }, - }, - }, - }, - { - name: "write from org with only both privileges is successful", - action: influxdb.WriteAction, - orgID: 3, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(3), - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(3), - }, - }, - }, - }, - { - name: "read from invalid org errors", - action: influxdb.ReadAction, - orgID: 3333, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(3), - }, - }, - }, - shouldErr: true, - }, - { - name: "write from invalid org errors", - action: influxdb.WriteAction, - orgID: 3333, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(3), - }, - }, - }, - shouldErr: true, - }, - { - name: "read from org with only write privileges should errors", - action: influxdb.ReadAction, - orgID: 3, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(3), - }, - }, - }, - shouldErr: true, - }, - { - name: "write from org with only read privileges should errors", - action: influxdb.WriteAction, - orgID: 3, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(3), - }, - }, - }, - shouldErr: true, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - ctx := icontext.SetAuthorizer(context.TODO(), mock.NewMockAuthorizer(false, tt.permissions)) - - agent := new(authorizer.AuthAgent) - - err := agent.OrgPermissions(ctx, tt.orgID, tt.action) - if tt.shouldErr { - require.Error(t, err) - } else { - require.NoError(t, err) - } - } - - t.Run(tt.name, fn) - } - }) - - t.Run("IsWritable", func(t *testing.T) { - tests := []struct { - name string - resourceType influxdb.ResourceType - orgID platform.ID - permissions []influxdb.Permission - shouldErr bool - }{ - { - name: "valid org write perms is always successful", - resourceType: influxdb.LabelsResourceType, - orgID: 3, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(3), - }, - }, - }, - }, - { - name: "valid resource write perm is always successful", - resourceType: influxdb.LabelsResourceType, - orgID: 3, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - }, - }, - { - name: "valid org and resource write perm is always successful", - resourceType: influxdb.LabelsResourceType, - orgID: 3, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(3), - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - }, - }, - { - name: "read only org perm errors", - resourceType: influxdb.LabelsResourceType, - orgID: 3, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: idPtr(3), - }, - }, - }, - shouldErr: true, - }, - { - name: "read only resource perms errors", - resourceType: influxdb.LabelsResourceType, - orgID: 3, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - }, - shouldErr: true, - }, - { - name: "read only org and resource resource perms errors", - resourceType: influxdb.LabelsResourceType, - orgID: 3, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: idPtr(3), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - }, - shouldErr: true, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - ctx := icontext.SetAuthorizer(context.TODO(), mock.NewMockAuthorizer(false, tt.permissions)) - - agent := new(authorizer.AuthAgent) - - err := agent.IsWritable(ctx, tt.orgID, tt.resourceType) - if tt.shouldErr { - require.Error(t, err) - } else { - require.NoError(t, err) - } - } - - t.Run(tt.name, fn) - } - }) -} diff --git a/authorizer/annotation.go b/authorizer/annotation.go deleted file mode 100644 index ef4545a99d3..00000000000 --- a/authorizer/annotation.go +++ /dev/null @@ -1,189 +0,0 @@ -package authorizer - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var _ influxdb.AnnotationService = (*AnnotationService)(nil) - -// AnnotationService wraps an influxdb.AnnotationService and authorizes actions -// against it appropriately. -type AnnotationService struct { - s influxdb.AnnotationService -} - -// NewAnnotationService constructs an instance of an authorizing check service -func NewAnnotationService(s influxdb.AnnotationService) *AnnotationService { - return &AnnotationService{ - s: s, - } -} - -// CreateAnnotations checks to see if the authorizer on context has write access for annotations for the provided orgID -func (s *AnnotationService) CreateAnnotations(ctx context.Context, orgID platform.ID, create []influxdb.AnnotationCreate) ([]influxdb.AnnotationEvent, error) { - if _, _, err := AuthorizeCreate(ctx, influxdb.AnnotationsResourceType, orgID); err != nil { - return nil, err - } - - return s.s.CreateAnnotations(ctx, orgID, create) -} - -// ListAnnotations checks to see if the authorizer on context has read access for annotations for the provided orgID -// and then filters the list down to only the resources that are authorized -func (s *AnnotationService) ListAnnotations(ctx context.Context, orgID platform.ID, filter influxdb.AnnotationListFilter) ([]influxdb.StoredAnnotation, error) { - if _, _, err := AuthorizeOrgReadResource(ctx, influxdb.AnnotationsResourceType, orgID); err != nil { - return nil, err - } - - as, err := s.s.ListAnnotations(ctx, orgID, filter) - if err != nil { - return nil, err - } - - as, _, err = AuthorizeFindAnnotations(ctx, as) - return as, err -} - -// GetAnnotation checks to see if the authorizer on context has read access to the requested annotation -func (s *AnnotationService) GetAnnotation(ctx context.Context, id platform.ID) (*influxdb.StoredAnnotation, error) { - a, err := s.s.GetAnnotation(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeRead(ctx, influxdb.AnnotationsResourceType, id, a.OrgID); err != nil { - return nil, err - } - return a, nil -} - -// DeleteAnnotations checks to see if the authorizer on context has write access to the provided orgID -func (s *AnnotationService) DeleteAnnotations(ctx context.Context, orgID platform.ID, delete influxdb.AnnotationDeleteFilter) error { - if _, _, err := AuthorizeOrgWriteResource(ctx, influxdb.AnnotationsResourceType, orgID); err != nil { - return err - } - return s.s.DeleteAnnotations(ctx, orgID, delete) -} - -// DeleteAnnotation checks to see if the authorizer on context has write access to the requested annotation -func (s *AnnotationService) DeleteAnnotation(ctx context.Context, id platform.ID) error { - a, err := s.s.GetAnnotation(ctx, id) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.AnnotationsResourceType, id, a.OrgID); err != nil { - return err - } - return s.s.DeleteAnnotation(ctx, id) -} - -// UpdateAnnotation checks to see if the authorizer on context has write access to the requested annotation -func (s *AnnotationService) UpdateAnnotation(ctx context.Context, id platform.ID, update influxdb.AnnotationCreate) (*influxdb.AnnotationEvent, error) { - a, err := s.s.GetAnnotation(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.AnnotationsResourceType, id, a.OrgID); err != nil { - return nil, err - } - return s.s.UpdateAnnotation(ctx, id, update) -} - -// ListStreams checks to see if the authorizer on context has read access for streams for the provided orgID -// and then filters the list down to only the resources that are authorized -func (s *AnnotationService) ListStreams(ctx context.Context, orgID platform.ID, filter influxdb.StreamListFilter) ([]influxdb.StoredStream, error) { - if _, _, err := AuthorizeOrgReadResource(ctx, influxdb.AnnotationsResourceType, orgID); err != nil { - return nil, err - } - - ss, err := s.s.ListStreams(ctx, orgID, filter) - if err != nil { - return nil, err - } - - ss, _, err = AuthorizeFindStreams(ctx, ss) - return ss, err -} - -// GetStream checks to see if the authorizer on context has read access to the requested stream -func (s *AnnotationService) GetStream(ctx context.Context, id platform.ID) (*influxdb.StoredStream, error) { - st, err := s.s.GetStream(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeRead(ctx, influxdb.AnnotationsResourceType, id, st.OrgID); err != nil { - return nil, err - } - return st, nil -} - -func (s *AnnotationService) CreateOrUpdateStream(ctx context.Context, orgID platform.ID, stream influxdb.Stream) (*influxdb.ReadStream, error) { - // We need to know if the request is creating a new stream, or updating an existing stream to check - // permissions appropriately - - // Get the stream by name. An empty slice will be returned if the stream doesn't exist - // note: a given org can only have one stream by the same name. this constraint is enforced in the database schema - streams, err := s.s.ListStreams(ctx, orgID, influxdb.StreamListFilter{ - StreamIncludes: []string{stream.Name}, - }) - if err != nil { - return nil, err - } - - // update an already existing stream - if len(streams) == 1 { - return s.UpdateStream(ctx, streams[0].ID, stream) - } - - // create a new stream if one doesn't already exist - if len(streams) == 0 { - if _, _, err := AuthorizeCreate(ctx, influxdb.AnnotationsResourceType, orgID); err != nil { - return nil, err - } - - return s.s.CreateOrUpdateStream(ctx, orgID, stream) - } - - // if multiple streams were returned somehow, return an error - // this should never happen, so return a server error - return nil, &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("more than one stream named %q for org %q", streams[0].Name, orgID), - } -} - -// UpdateStream checks to see if the authorizer on context has write access to the requested stream -func (s *AnnotationService) UpdateStream(ctx context.Context, id platform.ID, stream influxdb.Stream) (*influxdb.ReadStream, error) { - st, err := s.s.GetStream(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.AnnotationsResourceType, id, st.OrgID); err != nil { - return nil, err - } - return s.s.UpdateStream(ctx, id, stream) -} - -// DeleteStreams checks to see if the authorizer on context has write access to the provided orgID -func (s *AnnotationService) DeleteStreams(ctx context.Context, orgID platform.ID, delete influxdb.BasicStream) error { - if _, _, err := AuthorizeOrgWriteResource(ctx, influxdb.AnnotationsResourceType, orgID); err != nil { - return err - } - return s.s.DeleteStreams(ctx, orgID, delete) -} - -// DeleteStreamByID checks to see if the authorizer on context has write access to the requested stream -func (s *AnnotationService) DeleteStreamByID(ctx context.Context, id platform.ID) error { - st, err := s.s.GetStream(ctx, id) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.AnnotationsResourceType, id, st.OrgID); err != nil { - return err - } - return s.s.DeleteStreamByID(ctx, id) -} diff --git a/authorizer/annotation_test.go b/authorizer/annotation_test.go deleted file mode 100644 index 803d5b54c6b..00000000000 --- a/authorizer/annotation_test.go +++ /dev/null @@ -1,725 +0,0 @@ -package authorizer_test - -import ( - "context" - "fmt" - "testing" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/require" -) - -var ( - annOrgID1 = influxdbtesting.IDPtr(1) - annOrgID2 = influxdbtesting.IDPtr(10) - rID = influxdbtesting.IDPtr(2) -) - -func Test_CreateAnnotations(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - wantRet []influxdb.AnnotationEvent - wantErr error - }{ - { - "authorized to create annotation(s) with the specified org", - []influxdb.AnnotationEvent{{ID: *rID}}, - nil, - }, - { - "not authorized to create annotation(s) with the specified org", - nil, - &errors.Error{ - Msg: fmt.Sprintf("write:orgs/%s/annotations is unauthorized", annOrgID1), - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockAnnotationService(ctrlr) - s := authorizer.NewAnnotationService(svc) - - var perm influxdb.Permission - if tt.wantErr == nil { - perm = newTestAnnotationsPermission(influxdb.WriteAction, annOrgID1) - svc.EXPECT(). - CreateAnnotations(gomock.Any(), *annOrgID1, []influxdb.AnnotationCreate{{}}). - Return(tt.wantRet, nil) - } else { - perm = newTestAnnotationsPermission(influxdb.ReadAction, annOrgID1) - } - - ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) - got, err := s.CreateAnnotations(ctx, *annOrgID1, []influxdb.AnnotationCreate{{}}) - require.Equal(t, tt.wantErr, err) - require.Equal(t, tt.wantRet, got) - }) - } -} - -func Test_ListAnnotations(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - wantRet []influxdb.StoredAnnotation - wantErr error - }{ - { - "authorized to list annotations for the specified org", - []influxdb.StoredAnnotation{}, - nil, - }, - { - "not authorized to list annotations for the specified org", - nil, - &errors.Error{ - Msg: fmt.Sprintf("read:orgs/%s/annotations is unauthorized", annOrgID1), - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockAnnotationService(ctrlr) - s := authorizer.NewAnnotationService(svc) - - var perm influxdb.Permission - if tt.wantErr == nil { - perm = newTestAnnotationsPermission(influxdb.ReadAction, annOrgID1) - svc.EXPECT(). - ListAnnotations(gomock.Any(), *annOrgID1, influxdb.AnnotationListFilter{}). - Return(tt.wantRet, nil) - } - - ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) - got, err := s.ListAnnotations(ctx, *annOrgID1, influxdb.AnnotationListFilter{}) - require.Equal(t, tt.wantErr, err) - require.Equal(t, tt.wantRet, got) - }) - } -} - -func Test_GetAnnotation(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - permissionOrg *platform.ID - wantRet *influxdb.StoredAnnotation - wantErr error - }{ - { - "authorized to access annotation by id", - annOrgID1, - &influxdb.StoredAnnotation{ - ID: *rID, - OrgID: *annOrgID1, - }, - nil, - }, - { - "not authorized to access annotation by id", - annOrgID2, - nil, - &errors.Error{ - Msg: fmt.Sprintf("read:orgs/%s/annotations/%s is unauthorized", annOrgID1, rID), - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockAnnotationService(ctrlr) - s := authorizer.NewAnnotationService(svc) - - svc.EXPECT(). - GetAnnotation(gomock.Any(), *rID). - Return(&influxdb.StoredAnnotation{ - ID: *rID, - OrgID: *annOrgID1, - }, nil) - - perm := newTestAnnotationsPermission(influxdb.ReadAction, tt.permissionOrg) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) - got, err := s.GetAnnotation(ctx, *rID) - require.Equal(t, tt.wantErr, err) - require.Equal(t, tt.wantRet, got) - }) - } -} - -func Test_DeleteAnnotations(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - wantErr error - }{ - { - "authorized to delete annotations with the specified org", - nil, - }, - { - "not authorized to delete annotations with the specified org", - &errors.Error{ - Msg: fmt.Sprintf("write:orgs/%s/annotations is unauthorized", annOrgID1), - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockAnnotationService(ctrlr) - s := authorizer.NewAnnotationService(svc) - - var perm influxdb.Permission - if tt.wantErr == nil { - perm = newTestAnnotationsPermission(influxdb.WriteAction, annOrgID1) - svc.EXPECT(). - DeleteAnnotations(gomock.Any(), *annOrgID1, influxdb.AnnotationDeleteFilter{}). - Return(nil) - } else { - perm = newTestAnnotationsPermission(influxdb.ReadAction, annOrgID1) - } - - ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) - err := s.DeleteAnnotations(ctx, *annOrgID1, influxdb.AnnotationDeleteFilter{}) - require.Equal(t, tt.wantErr, err) - }) - } -} - -func Test_DeleteAnnotation(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - permissionOrg *platform.ID - wantErr error - }{ - { - "authorized to delete annotation by id", - annOrgID1, - nil, - }, - { - "not authorized to delete annotation by id", - annOrgID2, - &errors.Error{ - Msg: fmt.Sprintf("write:orgs/%s/annotations/%s is unauthorized", annOrgID1, rID), - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockAnnotationService(ctrlr) - s := authorizer.NewAnnotationService(svc) - - svc.EXPECT(). - GetAnnotation(gomock.Any(), *rID). - Return(&influxdb.StoredAnnotation{ - ID: *rID, - OrgID: *annOrgID1, - }, nil) - - perm := newTestAnnotationsPermission(influxdb.WriteAction, tt.permissionOrg) - - if tt.wantErr == nil { - svc.EXPECT(). - DeleteAnnotation(gomock.Any(), *rID). - Return(nil) - } - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) - err := s.DeleteAnnotation(ctx, *rID) - require.Equal(t, tt.wantErr, err) - }) - } -} - -func Test_UpdateAnnotation(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - permissionOrg *platform.ID - wantRet *influxdb.AnnotationEvent - wantErr error - }{ - { - "authorized to update annotation by id", - annOrgID1, - &influxdb.AnnotationEvent{}, - nil, - }, - { - "not authorized to update annotation by id", - annOrgID2, - nil, - &errors.Error{ - Msg: fmt.Sprintf("write:orgs/%s/annotations/%s is unauthorized", annOrgID1, rID), - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockAnnotationService(ctrlr) - s := authorizer.NewAnnotationService(svc) - - svc.EXPECT(). - GetAnnotation(gomock.Any(), *rID). - Return(&influxdb.StoredAnnotation{ - ID: *rID, - OrgID: *annOrgID1, - }, nil) - - perm := newTestAnnotationsPermission(influxdb.WriteAction, tt.permissionOrg) - - if tt.wantErr == nil { - svc.EXPECT(). - UpdateAnnotation(gomock.Any(), *rID, influxdb.AnnotationCreate{}). - Return(tt.wantRet, nil) - } - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) - got, err := s.UpdateAnnotation(ctx, *rID, influxdb.AnnotationCreate{}) - require.Equal(t, tt.wantErr, err) - require.Equal(t, tt.wantRet, got) - }) - } -} - -func Test_ListStreams(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - wantRet []influxdb.StoredStream - wantErr error - }{ - { - "authorized to list streams for the specified org", - []influxdb.StoredStream{}, - nil, - }, - { - "not authorized to list streams for the specified org", - nil, - &errors.Error{ - Msg: fmt.Sprintf("read:orgs/%s/annotations is unauthorized", annOrgID1), - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockAnnotationService(ctrlr) - s := authorizer.NewAnnotationService(svc) - - var perm influxdb.Permission - if tt.wantErr == nil { - perm = newTestAnnotationsPermission(influxdb.ReadAction, annOrgID1) - svc.EXPECT(). - ListStreams(gomock.Any(), *annOrgID1, influxdb.StreamListFilter{}). - Return(tt.wantRet, nil) - } - - ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) - got, err := s.ListStreams(ctx, *annOrgID1, influxdb.StreamListFilter{}) - require.Equal(t, tt.wantErr, err) - require.Equal(t, tt.wantRet, got) - }) - } -} - -func Test_GetStream(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - permissionOrg *platform.ID - wantRet *influxdb.StoredStream - wantErr error - }{ - { - "authorized to access stream by id", - annOrgID1, - &influxdb.StoredStream{ - ID: *rID, - OrgID: *annOrgID1, - }, - nil, - }, - { - "not authorized to access stream by id", - annOrgID2, - nil, - &errors.Error{ - Msg: fmt.Sprintf("read:orgs/%s/annotations/%s is unauthorized", annOrgID1, rID), - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockAnnotationService(ctrlr) - s := authorizer.NewAnnotationService(svc) - - svc.EXPECT(). - GetStream(gomock.Any(), *rID). - Return(&influxdb.StoredStream{ - ID: *rID, - OrgID: *annOrgID1, - }, nil) - - perm := newTestAnnotationsPermission(influxdb.ReadAction, tt.permissionOrg) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) - got, err := s.GetStream(ctx, *rID) - require.Equal(t, tt.wantErr, err) - require.Equal(t, tt.wantRet, got) - }) - } -} - -func Test_CreateOrUpdateStream(t *testing.T) { - t.Parallel() - - var ( - testStreamName = "test stream" - testStream = influxdb.Stream{ - Name: testStreamName, - } - ) - - t.Run("updating a stream", func(t *testing.T) { - tests := []struct { - name string - permissionOrg *platform.ID - existingStreams []influxdb.StoredStream - getStreamRet *influxdb.StoredStream - wantRet *influxdb.ReadStream - wantErr error - }{ - { - "authorized to update an existing stream", - annOrgID1, - []influxdb.StoredStream{{ID: *rID, OrgID: *annOrgID1}}, - &influxdb.StoredStream{ID: *rID, OrgID: *annOrgID1}, - &influxdb.ReadStream{ID: *rID}, - nil, - }, - { - "not authorized to update an existing stream", - annOrgID2, - []influxdb.StoredStream{{ID: *rID, OrgID: *annOrgID1}}, - &influxdb.StoredStream{ID: *rID, OrgID: *annOrgID1}, - nil, - &errors.Error{ - Msg: fmt.Sprintf("write:orgs/%s/annotations/%s is unauthorized", annOrgID1, rID), - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockAnnotationService(ctrlr) - s := authorizer.NewAnnotationService(svc) - - svc.EXPECT(). - ListStreams(gomock.Any(), *tt.permissionOrg, influxdb.StreamListFilter{ - StreamIncludes: []string{testStreamName}, - }). - Return(tt.existingStreams, nil) - - svc.EXPECT(). - GetStream(gomock.Any(), tt.existingStreams[0].ID). - Return(tt.getStreamRet, nil) - - if tt.wantErr == nil { - svc.EXPECT(). - UpdateStream(gomock.Any(), tt.existingStreams[0].ID, testStream). - Return(tt.wantRet, tt.wantErr) - } - - perm := newTestAnnotationsPermission(influxdb.WriteAction, tt.permissionOrg) - ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) - got, err := s.CreateOrUpdateStream(ctx, *tt.permissionOrg, testStream) - require.Equal(t, tt.wantErr, err) - require.Equal(t, tt.wantRet, got) - }) - } - }) - - t.Run("creating a stream", func(t *testing.T) { - tests := []struct { - name string - existingStreams []influxdb.StoredStream - wantRet *influxdb.ReadStream - wantErr error - }{ - { - "authorized to create a stream with the specified org", - []influxdb.StoredStream{}, - &influxdb.ReadStream{}, - nil, - }, - { - "not authorized to create a stream with the specified org", - []influxdb.StoredStream{}, - nil, - &errors.Error{ - Msg: fmt.Sprintf("write:orgs/%s/annotations is unauthorized", annOrgID1), - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockAnnotationService(ctrlr) - s := authorizer.NewAnnotationService(svc) - - svc.EXPECT(). - ListStreams(gomock.Any(), *annOrgID1, influxdb.StreamListFilter{ - StreamIncludes: []string{testStreamName}, - }). - Return(tt.existingStreams, nil) - - var perm influxdb.Permission - if tt.wantErr == nil { - perm = newTestAnnotationsPermission(influxdb.WriteAction, annOrgID1) - svc.EXPECT(). - CreateOrUpdateStream(gomock.Any(), *annOrgID1, testStream). - Return(tt.wantRet, nil) - } else { - perm = newTestAnnotationsPermission(influxdb.ReadAction, annOrgID1) - } - - ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) - got, err := s.CreateOrUpdateStream(ctx, *annOrgID1, testStream) - require.Equal(t, tt.wantErr, err) - require.Equal(t, tt.wantRet, got) - }) - } - }) - - t.Run("stream list longer than 1 returns a server error", func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockAnnotationService(ctrlr) - s := authorizer.NewAnnotationService(svc) - - svc.EXPECT(). - ListStreams(gomock.Any(), *annOrgID1, influxdb.StreamListFilter{ - StreamIncludes: []string{testStreamName}, - }). - Return([]influxdb.StoredStream{{Name: testStreamName}, {Name: testStreamName}}, nil) - - wantErr := &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("more than one stream named %q for org %q", testStreamName, annOrgID1), - } - - got, err := s.CreateOrUpdateStream(context.Background(), *annOrgID1, testStream) - require.Nil(t, got) - require.Equal(t, err, wantErr) - }) -} - -func Test_UpdateStream(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - permissionOrg *platform.ID - wantRet *influxdb.ReadStream - wantErr error - }{ - { - "authorized to update stream by id", - annOrgID1, - &influxdb.ReadStream{}, - nil, - }, - { - "not authorized to update stream by id", - annOrgID2, - nil, - &errors.Error{ - Msg: fmt.Sprintf("write:orgs/%s/annotations/%s is unauthorized", annOrgID1, rID), - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockAnnotationService(ctrlr) - s := authorizer.NewAnnotationService(svc) - - svc.EXPECT(). - GetStream(gomock.Any(), *rID). - Return(&influxdb.StoredStream{ - ID: *rID, - OrgID: *annOrgID1, - }, nil) - - perm := newTestAnnotationsPermission(influxdb.WriteAction, tt.permissionOrg) - - if tt.wantErr == nil { - svc.EXPECT(). - UpdateStream(gomock.Any(), *rID, influxdb.Stream{}). - Return(tt.wantRet, nil) - } - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) - got, err := s.UpdateStream(ctx, *rID, influxdb.Stream{}) - require.Equal(t, tt.wantErr, err) - require.Equal(t, tt.wantRet, got) - }) - } -} - -func Test_DeleteStreams(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - wantErr error - }{ - { - "authorized to delete streams with the specified org", - nil, - }, - { - "not authorized to delete streams with the specified org", - &errors.Error{ - Msg: fmt.Sprintf("write:orgs/%s/annotations is unauthorized", annOrgID1), - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockAnnotationService(ctrlr) - s := authorizer.NewAnnotationService(svc) - - var perm influxdb.Permission - if tt.wantErr == nil { - perm = newTestAnnotationsPermission(influxdb.WriteAction, annOrgID1) - svc.EXPECT(). - DeleteStreams(gomock.Any(), *annOrgID1, influxdb.BasicStream{}). - Return(nil) - } else { - perm = newTestAnnotationsPermission(influxdb.ReadAction, annOrgID1) - } - - ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) - err := s.DeleteStreams(ctx, *annOrgID1, influxdb.BasicStream{}) - require.Equal(t, tt.wantErr, err) - }) - } -} - -func Test_DeleteStreamByID(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - permissionOrg *platform.ID - wantErr error - }{ - { - "authorized to delete stream by id", - annOrgID1, - nil, - }, - { - "not authorized to delete stream by id", - annOrgID2, - &errors.Error{ - Msg: fmt.Sprintf("write:orgs/%s/annotations/%s is unauthorized", annOrgID1, rID), - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockAnnotationService(ctrlr) - s := authorizer.NewAnnotationService(svc) - - svc.EXPECT(). - GetStream(gomock.Any(), *rID). - Return(&influxdb.StoredStream{ - ID: *rID, - OrgID: *annOrgID1, - }, nil) - - perm := newTestAnnotationsPermission(influxdb.WriteAction, tt.permissionOrg) - - if tt.wantErr == nil { - svc.EXPECT(). - DeleteStreamByID(gomock.Any(), *rID). - Return(nil) - } - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) - err := s.DeleteStreamByID(ctx, *rID) - require.Equal(t, tt.wantErr, err) - }) - } -} - -func newTestAnnotationsPermission(action influxdb.Action, orgID *platform.ID) influxdb.Permission { - return influxdb.Permission{ - Action: action, - Resource: influxdb.Resource{ - Type: influxdb.AnnotationsResourceType, - OrgID: orgID, - }, - } -} diff --git a/authorizer/auth.go b/authorizer/auth.go deleted file mode 100644 index 3e70d0fbe5d..00000000000 --- a/authorizer/auth.go +++ /dev/null @@ -1,124 +0,0 @@ -package authorizer - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var _ influxdb.AuthorizationService = (*AuthorizationService)(nil) - -// AuthorizationService wraps a influxdb.AuthorizationService and authorizes actions -// against it appropriately. -type AuthorizationService struct { - s influxdb.AuthorizationService -} - -// NewAuthorizationService constructs an instance of an authorizing authorization service. -func NewAuthorizationService(s influxdb.AuthorizationService) *AuthorizationService { - return &AuthorizationService{ - s: s, - } -} - -// FindAuthorizationByID checks to see if the authorizer on context has read access to the id provided. -func (s *AuthorizationService) FindAuthorizationByID(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { - a, err := s.s.FindAuthorizationByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeRead(ctx, influxdb.AuthorizationsResourceType, a.ID, a.OrgID); err != nil { - return nil, err - } - if _, _, err := AuthorizeReadResource(ctx, influxdb.UsersResourceType, a.UserID); err != nil { - return nil, err - } - return a, nil -} - -// FindAuthorizationByToken retrieves the authorization and checks to see if the authorizer on context has read access to the authorization. -func (s *AuthorizationService) FindAuthorizationByToken(ctx context.Context, t string) (*influxdb.Authorization, error) { - a, err := s.s.FindAuthorizationByToken(ctx, t) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeRead(ctx, influxdb.AuthorizationsResourceType, a.ID, a.OrgID); err != nil { - return nil, err - } - if _, _, err := AuthorizeReadResource(ctx, influxdb.UsersResourceType, a.UserID); err != nil { - return nil, err - } - return a, nil -} - -// FindAuthorizations retrieves all authorizations that match the provided filter and then filters the list down to only the resources that are authorized. -func (s *AuthorizationService) FindAuthorizations(ctx context.Context, filter influxdb.AuthorizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - // TODO: we'll likely want to push this operation into the database eventually since fetching the whole list of data - // will likely be expensive. - as, _, err := s.s.FindAuthorizations(ctx, filter, opt...) - if err != nil { - return nil, 0, err - } - return AuthorizeFindAuthorizations(ctx, as) -} - -// CreateAuthorization checks to see if the authorizer on context has write access to the global authorizations resource. -func (s *AuthorizationService) CreateAuthorization(ctx context.Context, a *influxdb.Authorization) error { - if _, _, err := AuthorizeCreate(ctx, influxdb.AuthorizationsResourceType, a.OrgID); err != nil { - return err - } - if _, _, err := AuthorizeWriteResource(ctx, influxdb.UsersResourceType, a.UserID); err != nil { - return err - } - if err := VerifyPermissions(ctx, a.Permissions); err != nil { - return err - } - return s.s.CreateAuthorization(ctx, a) -} - -// UpdateAuthorization checks to see if the authorizer on context has write access to the authorization provided. -func (s *AuthorizationService) UpdateAuthorization(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { - a, err := s.s.FindAuthorizationByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.AuthorizationsResourceType, a.ID, a.OrgID); err != nil { - return nil, err - } - if _, _, err := AuthorizeWriteResource(ctx, influxdb.UsersResourceType, a.UserID); err != nil { - return nil, err - } - return s.s.UpdateAuthorization(ctx, id, upd) -} - -// DeleteAuthorization checks to see if the authorizer on context has write access to the authorization provided. -func (s *AuthorizationService) DeleteAuthorization(ctx context.Context, id platform.ID) error { - a, err := s.s.FindAuthorizationByID(ctx, id) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.AuthorizationsResourceType, a.ID, a.OrgID); err != nil { - return err - } - if _, _, err := AuthorizeWriteResource(ctx, influxdb.UsersResourceType, a.UserID); err != nil { - return err - } - return s.s.DeleteAuthorization(ctx, id) -} - -// VerifyPermissions ensures that an authorization is allowed all of the appropriate permissions. -func VerifyPermissions(ctx context.Context, ps []influxdb.Permission) error { - for _, p := range ps { - if err := IsAllowed(ctx, p); err != nil { - return &errors.Error{ - Err: err, - Msg: fmt.Sprintf("permission %s is not allowed", p), - Code: errors.EForbidden, - } - } - } - return nil -} diff --git a/authorizer/auth_test.go b/authorizer/auth_test.go deleted file mode 100644 index 3f1538db230..00000000000 --- a/authorizer/auth_test.go +++ /dev/null @@ -1,440 +0,0 @@ -package authorizer_test - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -var authorizationCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.Authorization) []*influxdb.Authorization { - out := append([]*influxdb.Authorization(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -func TestAuthorizationService_ReadAuthorization(t *testing.T) { - type args struct { - permissions []influxdb.Permission - } - type wants struct { - err error - authorizations []*influxdb.Authorization - } - - tests := []struct { - name string - args args - wants wants - }{ - { - name: "authorized to access id", - args: args{ - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - authorizations: []*influxdb.Authorization{ - { - ID: 10, - UserID: 1, - OrgID: 1, - }, - }, - }, - }, - { - name: "unauthorized to access id - wrong org", - args: args{ - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: influxdbtesting.IDPtr(2), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/0000000000000001/authorizations/000000000000000a is unauthorized", - Code: errors.EUnauthorized, - }, - authorizations: []*influxdb.Authorization{}, - }, - }, - { - name: "unauthorized to access id - wrong user", - args: args{ - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:users/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - authorizations: []*influxdb.Authorization{}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - m := &mock.AuthorizationService{} - m.FindAuthorizationByIDFn = func(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { - return &influxdb.Authorization{ - ID: id, - UserID: 1, - OrgID: 1, - }, nil - } - m.FindAuthorizationByTokenFn = func(ctx context.Context, t string) (*influxdb.Authorization, error) { - return &influxdb.Authorization{ - ID: 10, - UserID: 1, - OrgID: 1, - }, nil - } - m.FindAuthorizationsFn = func(ctx context.Context, filter influxdb.AuthorizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - return []*influxdb.Authorization{ - { - ID: 10, - UserID: 1, - OrgID: 1, - }, - }, 1, nil - } - s := authorizer.NewAuthorizationService(m) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - t.Run("find authorization by id", func(t *testing.T) { - _, err := s.FindAuthorizationByID(ctx, 10) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - t.Run("find authorization by token", func(t *testing.T) { - _, err := s.FindAuthorizationByToken(ctx, "10") - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - - t.Run("find authorizations", func(t *testing.T) { - as, _, err := s.FindAuthorizations(ctx, influxdb.AuthorizationFilter{}) - influxdbtesting.ErrorsEqual(t, err, nil) - - if diff := cmp.Diff(as, tt.wants.authorizations, authorizationCmpOptions...); diff != "" { - t.Errorf("authorizations are different -got/+want\ndiff %s", diff) - } - }) - - }) - } -} - -func TestAuthorizationService_WriteAuthorization(t *testing.T) { - type args struct { - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - args args - wants wants - }{ - { - name: "authorized to write authorization", - args: args{ - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to write authorization - wrong org", - args: args{ - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: influxdbtesting.IDPtr(2), - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/0000000000000001/authorizations/000000000000000a is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - { - name: "unauthorized to write authorization - wrong user", - args: args{ - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:users/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - m := &mock.AuthorizationService{} - m.FindAuthorizationByIDFn = func(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { - return &influxdb.Authorization{ - ID: id, - UserID: 1, - OrgID: 1, - }, nil - } - m.CreateAuthorizationFn = func(ctx context.Context, a *influxdb.Authorization) error { - return nil - } - m.DeleteAuthorizationFn = func(ctx context.Context, id platform.ID) error { - return nil - } - m.UpdateAuthorizationFn = func(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { - return nil, nil - } - s := authorizer.NewAuthorizationService(m) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - t.Run("update authorization", func(t *testing.T) { - _, err := s.UpdateAuthorization(ctx, 10, &influxdb.AuthorizationUpdate{Status: influxdb.Active.Ptr()}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - - t.Run("delete authorization", func(t *testing.T) { - err := s.DeleteAuthorization(ctx, 10) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - - }) - } -} - -func TestAuthorizationService_CreateAuthorization(t *testing.T) { - type args struct { - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - args args - wants wants - }{ - { - name: "authorized to write authorization", - args: args{ - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to write authorization - wrong org", - args: args{ - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: influxdbtesting.IDPtr(2), - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/0000000000000001/authorizations is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - { - name: "unauthorized to write authorization - wrong user", - args: args{ - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:users/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - m := &mock.AuthorizationService{} - m.FindAuthorizationByIDFn = func(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { - return &influxdb.Authorization{ - ID: id, - UserID: 1, - OrgID: 1, - }, nil - } - m.CreateAuthorizationFn = func(ctx context.Context, a *influxdb.Authorization) error { - return nil - } - m.DeleteAuthorizationFn = func(ctx context.Context, id platform.ID) error { - return nil - } - m.UpdateAuthorizationFn = func(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { - return nil, nil - } - s := authorizer.NewAuthorizationService(m) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.CreateAuthorization(ctx, &influxdb.Authorization{OrgID: 1, UserID: 1}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} diff --git a/authorizer/authorize.go b/authorizer/authorize.go deleted file mode 100644 index 215f76ad4df..00000000000 --- a/authorizer/authorize.go +++ /dev/null @@ -1,176 +0,0 @@ -package authorizer - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -func isAllowedAll(a influxdb.Authorizer, permissions []influxdb.Permission) error { - pset, err := a.PermissionSet() - if err != nil { - return err - } - - for _, p := range permissions { - if !pset.Allowed(p) { - return &errors.Error{ - Code: errors.EUnauthorized, - Msg: fmt.Sprintf("%s is unauthorized", p), - } - } - } - return nil -} - -func isAllowed(a influxdb.Authorizer, p influxdb.Permission) error { - return isAllowedAll(a, []influxdb.Permission{p}) -} - -// IsAllowedAll checks to see if an action is authorized by ALL permissions. -// Also see IsAllowed. -func IsAllowedAll(ctx context.Context, permissions []influxdb.Permission) error { - a, err := icontext.GetAuthorizer(ctx) - if err != nil { - return err - } - return isAllowedAll(a, permissions) -} - -// IsAllowed checks to see if an action is authorized by retrieving the authorizer -// off of context and authorizing the action appropriately. -func IsAllowed(ctx context.Context, p influxdb.Permission) error { - return IsAllowedAll(ctx, []influxdb.Permission{p}) -} - -// IsAllowedAny checks to see if an action is authorized by ANY permissions. -// Also see IsAllowed. -func IsAllowedAny(ctx context.Context, permissions []influxdb.Permission) error { - a, err := icontext.GetAuthorizer(ctx) - if err != nil { - return err - } - pset, err := a.PermissionSet() - if err != nil { - return err - } - for _, p := range permissions { - if pset.Allowed(p) { - return nil - } - } - return &errors.Error{ - Code: errors.EUnauthorized, - Msg: fmt.Sprintf("none of %v is authorized", permissions), - } -} - -func authorize(ctx context.Context, a influxdb.Action, rt influxdb.ResourceType, rid, oid *platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { - var p *influxdb.Permission - var err error - if rid != nil && oid != nil { - p, err = influxdb.NewPermissionAtID(*rid, a, rt, *oid) - } else if rid != nil { - p, err = influxdb.NewResourcePermission(a, rt, *rid) - } else if oid != nil { - p, err = influxdb.NewPermission(a, rt, *oid) - } else { - p, err = influxdb.NewGlobalPermission(a, rt) - } - if err != nil { - return nil, influxdb.Permission{}, err - } - auth, err := icontext.GetAuthorizer(ctx) - if err != nil { - return nil, influxdb.Permission{}, err - } - return auth, *p, isAllowed(auth, *p) -} - -func authorizeReadSystemBucket(ctx context.Context, bid, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { - return AuthorizeReadOrg(ctx, oid) -} - -// AuthorizeReadBucket exists because buckets are a special case and should use this method. -// I.e., instead of: -// -// AuthorizeRead(ctx, influxdb.BucketsResourceType, b.ID, b.OrgID) -// -// use: -// -// AuthorizeReadBucket(ctx, b.Type, b.ID, b.OrgID) -func AuthorizeReadBucket(ctx context.Context, bt influxdb.BucketType, bid, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { - switch bt { - case influxdb.BucketTypeSystem: - return authorizeReadSystemBucket(ctx, bid, oid) - default: - return AuthorizeRead(ctx, influxdb.BucketsResourceType, bid, oid) - } -} - -// AuthorizeRead authorizes the user in the context to read the specified resource (identified by its type, ID, and orgID). -// NOTE: authorization will pass even if the user only has permissions for the resource type and organization ID only. -func AuthorizeRead(ctx context.Context, rt influxdb.ResourceType, rid, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { - return authorize(ctx, influxdb.ReadAction, rt, &rid, &oid) -} - -// AuthorizeWrite authorizes the user in the context to write the specified resource (identified by its type, ID, and orgID). -// NOTE: authorization will pass even if the user only has permissions for the resource type and organization ID only. -func AuthorizeWrite(ctx context.Context, rt influxdb.ResourceType, rid, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { - return authorize(ctx, influxdb.WriteAction, rt, &rid, &oid) -} - -// AuthorizeRead authorizes the user in the context to read the specified resource (identified by its type, ID). -// NOTE: authorization will pass only if the user has a specific permission for the given resource. -func AuthorizeReadResource(ctx context.Context, rt influxdb.ResourceType, rid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { - return authorize(ctx, influxdb.ReadAction, rt, &rid, nil) -} - -// AuthorizeWrite authorizes the user in the context to write the specified resource (identified by its type, ID). -// NOTE: authorization will pass only if the user has a specific permission for the given resource. -func AuthorizeWriteResource(ctx context.Context, rt influxdb.ResourceType, rid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { - return authorize(ctx, influxdb.WriteAction, rt, &rid, nil) -} - -// AuthorizeOrgReadResource authorizes the given org to read the resources of the given type. -// NOTE: this is pretty much the same as AuthorizeRead, in the case that the resource ID is ignored. -// Use it in the case that you do not know which resource in particular you want to give access to. -func AuthorizeOrgReadResource(ctx context.Context, rt influxdb.ResourceType, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { - return authorize(ctx, influxdb.ReadAction, rt, nil, &oid) -} - -// AuthorizeOrgWriteResource authorizes the given org to write the resources of the given type. -// NOTE: this is pretty much the same as AuthorizeWrite, in the case that the resource ID is ignored. -// Use it in the case that you do not know which resource in particular you want to give access to. -func AuthorizeOrgWriteResource(ctx context.Context, rt influxdb.ResourceType, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { - return authorize(ctx, influxdb.WriteAction, rt, nil, &oid) -} - -// AuthorizeCreate authorizes a user to create a resource of the given type for the given org. -func AuthorizeCreate(ctx context.Context, rt influxdb.ResourceType, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { - return AuthorizeOrgWriteResource(ctx, rt, oid) -} - -// AuthorizeReadOrg authorizes the user to read the given org. -func AuthorizeReadOrg(ctx context.Context, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { - return authorize(ctx, influxdb.ReadAction, influxdb.OrgsResourceType, &oid, nil) -} - -// AuthorizeWriteOrg authorizes the user to write the given org. -func AuthorizeWriteOrg(ctx context.Context, oid platform.ID) (influxdb.Authorizer, influxdb.Permission, error) { - return authorize(ctx, influxdb.WriteAction, influxdb.OrgsResourceType, &oid, nil) -} - -// AuthorizeReadGlobal authorizes to read resources of the given type. -func AuthorizeReadGlobal(ctx context.Context, rt influxdb.ResourceType) (influxdb.Authorizer, influxdb.Permission, error) { - return authorize(ctx, influxdb.ReadAction, rt, nil, nil) -} - -// AuthorizeWriteGlobal authorizes to write resources of the given type. -func AuthorizeWriteGlobal(ctx context.Context, rt influxdb.ResourceType) (influxdb.Authorizer, influxdb.Permission, error) { - return authorize(ctx, influxdb.WriteAction, rt, nil, nil) -} diff --git a/authorizer/authorize_find.go b/authorizer/authorize_find.go deleted file mode 100644 index aa6c89fadd9..00000000000 --- a/authorizer/authorize_find.go +++ /dev/null @@ -1,358 +0,0 @@ -package authorizer - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -// AuthorizeFindDBRPs takes the given items and returns only the ones that the user is authorized to access. -func AuthorizeFindDBRPs(ctx context.Context, rs []*influxdb.DBRPMapping) ([]*influxdb.DBRPMapping, int, error) { - // This filters without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - rrs := rs[:0] - for _, r := range rs { - _, _, err := AuthorizeRead(ctx, influxdb.BucketsResourceType, r.BucketID, r.OrganizationID) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, 0, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - rrs = append(rrs, r) - } - return rrs, len(rrs), nil -} - -// AuthorizeFindAuthorizations takes the given items and returns only the ones that the user is authorized to read. -func AuthorizeFindAuthorizations(ctx context.Context, rs []*influxdb.Authorization) ([]*influxdb.Authorization, int, error) { - // This filters without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - rrs := rs[:0] - for _, r := range rs { - _, _, err := AuthorizeRead(ctx, influxdb.AuthorizationsResourceType, r.ID, r.OrgID) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, 0, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - _, _, err = AuthorizeReadResource(ctx, influxdb.UsersResourceType, r.UserID) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, 0, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - rrs = append(rrs, r) - } - return rrs, len(rrs), nil -} - -// AuthorizeFindBuckets takes the given items and returns only the ones that the user is authorized to read. -func AuthorizeFindBuckets(ctx context.Context, rs []*influxdb.Bucket) ([]*influxdb.Bucket, int, error) { - // This filters without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - rrs := rs[:0] - for _, r := range rs { - _, _, err := AuthorizeReadBucket(ctx, r.Type, r.ID, r.OrgID) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, 0, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - rrs = append(rrs, r) - } - return rrs, len(rrs), nil -} - -// AuthorizeFindDashboards takes the given items and returns only the ones that the user is authorized to read. -func AuthorizeFindDashboards(ctx context.Context, rs []*influxdb.Dashboard) ([]*influxdb.Dashboard, int, error) { - // This filters without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - rrs := rs[:0] - for _, r := range rs { - _, _, err := AuthorizeRead(ctx, influxdb.DashboardsResourceType, r.ID, r.OrganizationID) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, 0, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - rrs = append(rrs, r) - } - return rrs, len(rrs), nil -} - -// AuthorizeFindAnnotations takes the given items and returns only the ones that the user is authorized to read. -func AuthorizeFindAnnotations(ctx context.Context, rs []influxdb.StoredAnnotation) ([]influxdb.StoredAnnotation, int, error) { - // This filters without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - rrs := rs[:0] - for _, r := range rs { - _, _, err := AuthorizeRead(ctx, influxdb.AnnotationsResourceType, r.ID, r.OrgID) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, 0, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - rrs = append(rrs, r) - } - return rrs, len(rrs), nil -} - -// AuthorizeFindStreams takes the given items and returns only the ones that the user is authorized to read. -func AuthorizeFindStreams(ctx context.Context, rs []influxdb.StoredStream) ([]influxdb.StoredStream, int, error) { - // This filters without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - rrs := rs[:0] - for _, r := range rs { - _, _, err := AuthorizeRead(ctx, influxdb.AnnotationsResourceType, r.ID, r.OrgID) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, 0, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - rrs = append(rrs, r) - } - return rrs, len(rrs), nil -} - -// AuthorizeFindNotebooks takes the given items and returns only the ones that the user is authorized to read. -func AuthorizeFindNotebooks(ctx context.Context, rs []*influxdb.Notebook) ([]*influxdb.Notebook, int, error) { - // This filters without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - rrs := rs[:0] - for _, r := range rs { - _, _, err := AuthorizeRead(ctx, influxdb.NotebooksResourceType, r.ID, r.OrgID) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, 0, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - rrs = append(rrs, r) - } - return rrs, len(rrs), nil -} - -// AuthorizeFindOrganizations takes the given items and returns only the ones that the user is authorized to read. -func AuthorizeFindOrganizations(ctx context.Context, rs []*influxdb.Organization) ([]*influxdb.Organization, int, error) { - // This filters without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - rrs := rs[:0] - for _, r := range rs { - _, _, err := AuthorizeReadOrg(ctx, r.ID) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, 0, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - rrs = append(rrs, r) - } - return rrs, len(rrs), nil -} - -// AuthorizeFindSources takes the given items and returns only the ones that the user is authorized to read. -func AuthorizeFindSources(ctx context.Context, rs []*influxdb.Source) ([]*influxdb.Source, int, error) { - // This filters without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - rrs := rs[:0] - for _, r := range rs { - _, _, err := AuthorizeRead(ctx, influxdb.SourcesResourceType, r.ID, r.OrganizationID) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, 0, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - rrs = append(rrs, r) - } - return rrs, len(rrs), nil -} - -// AuthorizeFindTasks takes the given items and returns only the ones that the user is authorized to read. -func AuthorizeFindTasks(ctx context.Context, rs []*taskmodel.Task) ([]*taskmodel.Task, int, error) { - // This filters without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - rrs := rs[:0] - for _, r := range rs { - _, _, err := AuthorizeRead(ctx, influxdb.TasksResourceType, r.ID, r.OrganizationID) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, 0, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - rrs = append(rrs, r) - } - return rrs, len(rrs), nil -} - -// AuthorizeFindTelegrafs takes the given items and returns only the ones that the user is authorized to read. -func AuthorizeFindTelegrafs(ctx context.Context, rs []*influxdb.TelegrafConfig) ([]*influxdb.TelegrafConfig, int, error) { - // This filters without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - rrs := rs[:0] - for _, r := range rs { - _, _, err := AuthorizeRead(ctx, influxdb.TelegrafsResourceType, r.ID, r.OrgID) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, 0, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - rrs = append(rrs, r) - } - return rrs, len(rrs), nil -} - -// AuthorizeFindUsers takes the given items and returns only the ones that the user is authorized to read. -func AuthorizeFindUsers(ctx context.Context, rs []*influxdb.User) ([]*influxdb.User, int, error) { - // This filters without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - rrs := rs[:0] - for _, r := range rs { - _, _, err := AuthorizeReadResource(ctx, influxdb.UsersResourceType, r.ID) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, 0, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - rrs = append(rrs, r) - } - return rrs, len(rrs), nil -} - -// AuthorizeFindVariables takes the given items and returns only the ones that the user is authorized to read. -func AuthorizeFindVariables(ctx context.Context, rs []*influxdb.Variable) ([]*influxdb.Variable, int, error) { - // This filters without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - rrs := rs[:0] - for _, r := range rs { - _, _, err := AuthorizeRead(ctx, influxdb.VariablesResourceType, r.ID, r.OrganizationID) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, 0, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - rrs = append(rrs, r) - } - return rrs, len(rrs), nil -} - -// AuthorizeFindScrapers takes the given items and returns only the ones that the user is authorize to read. -func AuthorizeFindScrapers(ctx context.Context, rs []influxdb.ScraperTarget) ([]influxdb.ScraperTarget, int, error) { - // This filters without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - rrs := rs[:0] - for _, r := range rs { - _, _, err := AuthorizeRead(ctx, influxdb.ScraperResourceType, r.ID, r.OrgID) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, 0, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - rrs = append(rrs, r) - } - return rrs, len(rrs), nil -} - -// AuthorizeFindLabels takes the given items and returns only the ones that the user is authorized to read. -func AuthorizeFindLabels(ctx context.Context, rs []*influxdb.Label) ([]*influxdb.Label, int, error) { - // This filters without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - rrs := rs[:0] - for _, r := range rs { - _, _, err := AuthorizeRead(ctx, influxdb.LabelsResourceType, r.ID, r.OrgID) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, 0, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - rrs = append(rrs, r) - } - return rrs, len(rrs), nil -} - -// AuthorizeFindNotificationRules takes the given items and returns only the ones that the user is authorized to read. -func AuthorizeFindNotificationRules(ctx context.Context, rs []influxdb.NotificationRule) ([]influxdb.NotificationRule, int, error) { - // This filters without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - rrs := rs[:0] - for _, r := range rs { - _, _, err := AuthorizeRead(ctx, influxdb.NotificationRuleResourceType, r.GetID(), r.GetOrgID()) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, 0, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - rrs = append(rrs, r) - } - return rrs, len(rrs), nil -} - -// AuthorizeFindNotificationEndpoints takes the given items and returns only the ones that the user is authorized to read. -func AuthorizeFindNotificationEndpoints(ctx context.Context, rs []influxdb.NotificationEndpoint) ([]influxdb.NotificationEndpoint, int, error) { - // This filters without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - rrs := rs[:0] - for _, r := range rs { - _, _, err := AuthorizeRead(ctx, influxdb.NotificationEndpointResourceType, r.GetID(), r.GetOrgID()) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, 0, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - rrs = append(rrs, r) - } - return rrs, len(rrs), nil -} - -// AuthorizeFindChecks takes the given items and returns only the ones that the user is authorized to read. -func AuthorizeFindChecks(ctx context.Context, rs []influxdb.Check) ([]influxdb.Check, int, error) { - // This filters without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - rrs := rs[:0] - for _, r := range rs { - _, _, err := AuthorizeRead(ctx, influxdb.ChecksResourceType, r.GetID(), r.GetOrgID()) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, 0, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - rrs = append(rrs, r) - } - return rrs, len(rrs), nil -} - -// AuthorizeFindUserResourceMappings takes the given items and returns only the ones that the user is authorized to read. -func AuthorizeFindUserResourceMappings(ctx context.Context, os OrgIDResolver, rs []*influxdb.UserResourceMapping) ([]*influxdb.UserResourceMapping, int, error) { - // This filters without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - rrs := rs[:0] - for _, r := range rs { - orgID, err := os.FindResourceOrganizationID(ctx, r.ResourceType, r.ResourceID) - if err != nil { - return nil, 0, err - } - if _, _, err := AuthorizeRead(ctx, r.ResourceType, r.ResourceID, orgID); err != nil { - continue - } - rrs = append(rrs, r) - } - return rrs, len(rrs), nil -} diff --git a/authorizer/backup.go b/authorizer/backup.go deleted file mode 100644 index dbbe94c91df..00000000000 --- a/authorizer/backup.go +++ /dev/null @@ -1,58 +0,0 @@ -package authorizer - -import ( - "context" - "io" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/tracing" -) - -var _ influxdb.BackupService = (*BackupService)(nil) - -// BackupService wraps a influxdb.BackupService and authorizes actions -// against it appropriately. -type BackupService struct { - s influxdb.BackupService -} - -// NewBackupService constructs an instance of an authorizing backup service. -func NewBackupService(s influxdb.BackupService) *BackupService { - return &BackupService{ - s: s, - } -} - -func (b BackupService) BackupKVStore(ctx context.Context, w io.Writer) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if err := IsAllowedAll(ctx, influxdb.OperPermissions()); err != nil { - return err - } - return b.s.BackupKVStore(ctx, w) -} - -func (b BackupService) BackupShard(ctx context.Context, w io.Writer, shardID uint64, since time.Time) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if err := IsAllowedAll(ctx, influxdb.OperPermissions()); err != nil { - return err - } - return b.s.BackupShard(ctx, w, shardID, since) -} - -// The Lock and Unlock methods below do not have authorization checks and should only be used -// when appropriate authorization has already been confirmed, such as behind a middleware. They -// are intended to be used for coordinating the locking and unlocking of the kv and sql metadata -// databases during a backup. They are made available here to allow the calls to pass-through to the -// underlying service. -func (b BackupService) RLockKVStore() { - b.s.RLockKVStore() -} - -func (b BackupService) RUnlockKVStore() { - b.s.RUnlockKVStore() -} diff --git a/authorizer/bucket.go b/authorizer/bucket.go deleted file mode 100644 index 81dd4334154..00000000000 --- a/authorizer/bucket.go +++ /dev/null @@ -1,118 +0,0 @@ -package authorizer - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/tracing" -) - -var _ influxdb.BucketService = (*BucketService)(nil) - -// BucketService wraps a influxdb.BucketService and authorizes actions -// against it appropriately. -type BucketService struct { - s influxdb.BucketService -} - -// NewBucketService constructs an instance of an authorizing bucket service. -func NewBucketService(s influxdb.BucketService) *BucketService { - return &BucketService{ - s: s, - } -} - -// FindBucketByID checks to see if the authorizer on context has read access to the id provided. -func (s *BucketService) FindBucketByID(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - b, err := s.s.FindBucketByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeReadBucket(ctx, b.Type, b.ID, b.OrgID); err != nil { - return nil, err - } - return b, nil -} - -// FindBucketByName returns a bucket by name for a particular organization. -func (s *BucketService) FindBucketByName(ctx context.Context, orgID platform.ID, n string) (*influxdb.Bucket, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - b, err := s.s.FindBucketByName(ctx, orgID, n) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeReadBucket(ctx, b.Type, b.ID, b.OrgID); err != nil { - return nil, err - } - return b, nil -} - -// FindBucket retrieves the bucket and checks to see if the authorizer on context has read access to the bucket. -func (s *BucketService) FindBucket(ctx context.Context, filter influxdb.BucketFilter) (*influxdb.Bucket, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - b, err := s.s.FindBucket(ctx, filter) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeReadBucket(ctx, b.Type, b.ID, b.OrgID); err != nil { - return nil, err - } - return b, nil -} - -// FindBuckets retrieves all buckets that match the provided filter and then filters the list down to only the resources that are authorized. -func (s *BucketService) FindBuckets(ctx context.Context, filter influxdb.BucketFilter, opt ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - // TODO: we'll likely want to push this operation into the database eventually since fetching the whole list of data - // will likely be expensive. - bs, _, err := s.s.FindBuckets(ctx, filter, opt...) - if err != nil { - return nil, 0, err - } - return AuthorizeFindBuckets(ctx, bs) -} - -// CreateBucket checks to see if the authorizer on context has write access to the global buckets resource. -func (s *BucketService) CreateBucket(ctx context.Context, b *influxdb.Bucket) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if _, _, err := AuthorizeCreate(ctx, influxdb.BucketsResourceType, b.OrgID); err != nil { - return err - } - return s.s.CreateBucket(ctx, b) -} - -// UpdateBucket checks to see if the authorizer on context has write access to the bucket provided. -func (s *BucketService) UpdateBucket(ctx context.Context, id platform.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { - b, err := s.s.FindBucketByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.BucketsResourceType, id, b.OrgID); err != nil { - return nil, err - } - return s.s.UpdateBucket(ctx, id, upd) -} - -// DeleteBucket checks to see if the authorizer on context has write access to the bucket provided. -func (s *BucketService) DeleteBucket(ctx context.Context, id platform.ID) error { - b, err := s.s.FindBucketByID(ctx, id) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.BucketsResourceType, id, b.OrgID); err != nil { - return err - } - return s.s.DeleteBucket(ctx, id) -} diff --git a/authorizer/bucket_test.go b/authorizer/bucket_test.go deleted file mode 100644 index 81faf2e01e4..00000000000 --- a/authorizer/bucket_test.go +++ /dev/null @@ -1,630 +0,0 @@ -package authorizer_test - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -var bucketCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.Bucket) []*influxdb.Bucket { - out := append([]*influxdb.Bucket(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -func TestBucketService_FindBucketByID(t *testing.T) { - type fields struct { - BucketService influxdb.BucketService - } - type args struct { - permission influxdb.Permission - id platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access id", - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: id, - OrgID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access id", - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: id, - OrgID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - id: 1, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/000000000000000a/buckets/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewBucketService(tt.fields.BucketService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindBucketByID(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestBucketService_FindBucket(t *testing.T) { - type fields struct { - BucketService influxdb.BucketService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access bucket", - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketFn: func(ctx context.Context, filter influxdb.BucketFilter) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: 1, - OrgID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access bucket", - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketFn: func(ctx context.Context, filter influxdb.BucketFilter) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: 1, - OrgID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/000000000000000a/buckets/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewBucketService(tt.fields.BucketService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindBucket(ctx, influxdb.BucketFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestBucketService_FindBuckets(t *testing.T) { - type fields struct { - BucketService influxdb.BucketService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - buckets []*influxdb.Bucket - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all buckets", - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketsFn: func(ctx context.Context, filter influxdb.BucketFilter, opt ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - return []*influxdb.Bucket{ - { - ID: 1, - OrgID: 10, - }, - { - ID: 2, - OrgID: 10, - }, - { - ID: 3, - OrgID: 11, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - }, - }, - }, - wants: wants{ - buckets: []*influxdb.Bucket{ - { - ID: 1, - OrgID: 10, - }, - { - ID: 2, - OrgID: 10, - }, - { - ID: 3, - OrgID: 11, - }, - }, - }, - }, - { - name: "authorized to access a single orgs buckets", - fields: fields{ - BucketService: &mock.BucketService{ - - FindBucketsFn: func(ctx context.Context, filter influxdb.BucketFilter, opt ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - return []*influxdb.Bucket{ - { - ID: 1, - OrgID: 10, - }, - { - ID: 2, - OrgID: 10, - }, - { - ID: 3, - OrgID: 11, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - buckets: []*influxdb.Bucket{ - { - ID: 1, - OrgID: 10, - }, - { - ID: 2, - OrgID: 10, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewBucketService(tt.fields.BucketService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - buckets, _, err := s.FindBuckets(ctx, influxdb.BucketFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(buckets, tt.wants.buckets, bucketCmpOptions...); diff != "" { - t.Errorf("buckets are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestBucketService_UpdateBucket(t *testing.T) { - type fields struct { - BucketService influxdb.BucketService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to update bucket", - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: 1, - OrgID: 10, - }, nil - }, - UpdateBucketFn: func(ctx context.Context, id platform.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: 1, - OrgID: 10, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to update bucket", - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: 1, - OrgID: 10, - }, nil - }, - UpdateBucketFn: func(ctx context.Context, id platform.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: 1, - OrgID: 10, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/buckets/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewBucketService(tt.fields.BucketService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - _, err := s.UpdateBucket(ctx, tt.args.id, influxdb.BucketUpdate{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestBucketService_DeleteBucket(t *testing.T) { - type fields struct { - BucketService influxdb.BucketService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete bucket", - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: 1, - OrgID: 10, - }, nil - }, - DeleteBucketFn: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete bucket", - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: 1, - OrgID: 10, - }, nil - }, - DeleteBucketFn: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/buckets/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewBucketService(tt.fields.BucketService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.DeleteBucket(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestBucketService_CreateBucket(t *testing.T) { - type fields struct { - BucketService influxdb.BucketService - } - type args struct { - permission influxdb.Permission - orgID platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to create bucket", - fields: fields{ - BucketService: &mock.BucketService{ - CreateBucketFn: func(ctx context.Context, b *influxdb.Bucket) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to create bucket", - fields: fields{ - BucketService: &mock.BucketService{ - CreateBucketFn: func(ctx context.Context, b *influxdb.Bucket) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/buckets is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewBucketService(tt.fields.BucketService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.CreateBucket(ctx, &influxdb.Bucket{OrgID: tt.args.orgID}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} diff --git a/authorizer/check.go b/authorizer/check.go deleted file mode 100644 index 0f0744e3948..00000000000 --- a/authorizer/check.go +++ /dev/null @@ -1,108 +0,0 @@ -package authorizer - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -var _ influxdb.CheckService = (*CheckService)(nil) - -// CheckService wraps a influxdb.CheckService and authorizes actions -// against it appropriately. -type CheckService struct { - s influxdb.CheckService - influxdb.UserResourceMappingService - influxdb.OrganizationService - taskmodel.TaskService -} - -// NewCheckService constructs an instance of an authorizing check service. -func NewCheckService(s influxdb.CheckService, urm influxdb.UserResourceMappingService, org influxdb.OrganizationService) *CheckService { - return &CheckService{ - s: s, - UserResourceMappingService: urm, - OrganizationService: org, - } -} - -// FindCheckByID checks to see if the authorizer on context has read access to the id provided. -func (s *CheckService) FindCheckByID(ctx context.Context, id platform.ID) (influxdb.Check, error) { - chk, err := s.s.FindCheckByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeRead(ctx, influxdb.ChecksResourceType, chk.GetID(), chk.GetOrgID()); err != nil { - return nil, err - } - return chk, nil -} - -// FindChecks retrieves all checks that match the provided filter and then filters the list down to only the resources that are authorized. -func (s *CheckService) FindChecks(ctx context.Context, filter influxdb.CheckFilter, opt ...influxdb.FindOptions) ([]influxdb.Check, int, error) { - // TODO: we'll likely want to push this operation into the database eventually since fetching the whole list of data - // will likely be expensive. - chks, _, err := s.s.FindChecks(ctx, filter, opt...) - if err != nil { - return nil, 0, err - } - return AuthorizeFindChecks(ctx, chks) -} - -// FindCheck will return the check. -func (s *CheckService) FindCheck(ctx context.Context, filter influxdb.CheckFilter) (influxdb.Check, error) { - chk, err := s.s.FindCheck(ctx, filter) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeRead(ctx, influxdb.ChecksResourceType, chk.GetID(), chk.GetOrgID()); err != nil { - return nil, err - } - return chk, nil -} - -// CreateCheck checks to see if the authorizer on context has write access to the global check resource. -func (s *CheckService) CreateCheck(ctx context.Context, chk influxdb.CheckCreate, userID platform.ID) error { - if _, _, err := AuthorizeCreate(ctx, influxdb.ChecksResourceType, chk.GetOrgID()); err != nil { - return err - } - return s.s.CreateCheck(ctx, chk, userID) -} - -// UpdateCheck checks to see if the authorizer on context has write access to the check provided. -func (s *CheckService) UpdateCheck(ctx context.Context, id platform.ID, upd influxdb.CheckCreate) (influxdb.Check, error) { - chk, err := s.FindCheckByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.ChecksResourceType, chk.GetID(), chk.GetOrgID()); err != nil { - return nil, err - } - return s.s.UpdateCheck(ctx, id, upd) -} - -// PatchCheck checks to see if the authorizer on context has write access to the check provided. -func (s *CheckService) PatchCheck(ctx context.Context, id platform.ID, upd influxdb.CheckUpdate) (influxdb.Check, error) { - chk, err := s.FindCheckByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.ChecksResourceType, chk.GetID(), chk.GetOrgID()); err != nil { - return nil, err - } - return s.s.PatchCheck(ctx, id, upd) -} - -// DeleteCheck checks to see if the authorizer on context has write access to the check provided. -func (s *CheckService) DeleteCheck(ctx context.Context, id platform.ID) error { - chk, err := s.FindCheckByID(ctx, id) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.ChecksResourceType, chk.GetID(), chk.GetOrgID()); err != nil { - return err - } - return s.s.DeleteCheck(ctx, id) -} diff --git a/authorizer/check_test.go b/authorizer/check_test.go deleted file mode 100644 index f90c1a91d80..00000000000 --- a/authorizer/check_test.go +++ /dev/null @@ -1,717 +0,0 @@ -package authorizer_test - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/notification/check" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -var checkCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []influxdb.Check) []influxdb.Check { - out := append([]influxdb.Check(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].GetID() > out[j].GetID() - }) - return out - }), -} - -func TestCheckService_FindCheckByID(t *testing.T) { - type fields struct { - CheckService influxdb.CheckService - } - type args struct { - permission influxdb.Permission - id platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access id", - fields: fields{ - CheckService: &mock.CheckService{ - FindCheckByIDFn: func(ctx context.Context, id platform.ID) (influxdb.Check, error) { - return &check.Deadman{ - Base: check.Base{ - ID: id, - OrgID: 10, - }, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - id: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access id", - fields: fields{ - CheckService: &mock.CheckService{ - FindCheckByIDFn: func(ctx context.Context, id platform.ID) (influxdb.Check, error) { - return &check.Deadman{ - Base: check.Base{ - ID: id, - OrgID: 10, - }, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - OrgID: influxdbtesting.IDPtr(2), - }, - }, - id: 1, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/000000000000000a/checks/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewCheckService(tt.fields.CheckService, mock.NewUserResourceMappingService(), mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindCheckByID(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestCheckService_FindChecks(t *testing.T) { - type fields struct { - CheckService influxdb.CheckService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - checks []influxdb.Check - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all checks", - fields: fields{ - CheckService: &mock.CheckService{ - FindChecksFn: func(ctx context.Context, filter influxdb.CheckFilter, opt ...influxdb.FindOptions) ([]influxdb.Check, int, error) { - return []influxdb.Check{ - &check.Deadman{ - Base: check.Base{ - ID: 1, - OrgID: 10, - }, - }, - &check.Deadman{ - Base: check.Base{ - ID: 2, - OrgID: 10, - }, - }, - &check.Threshold{ - Base: check.Base{ - ID: 3, - OrgID: 11, - }, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - }, - }, - }, - wants: wants{ - checks: []influxdb.Check{ - &check.Deadman{ - Base: check.Base{ - ID: 1, - OrgID: 10, - }, - }, - &check.Deadman{ - Base: check.Base{ - ID: 2, - OrgID: 10, - }, - }, - &check.Threshold{ - Base: check.Base{ - ID: 3, - OrgID: 11, - }, - }, - }, - }, - }, - { - name: "authorized to access a single orgs checks", - fields: fields{ - CheckService: &mock.CheckService{ - FindChecksFn: func(ctx context.Context, filter influxdb.CheckFilter, opt ...influxdb.FindOptions) ([]influxdb.Check, int, error) { - return []influxdb.Check{ - &check.Deadman{ - Base: check.Base{ - ID: 1, - OrgID: 10, - }, - }, - &check.Deadman{ - Base: check.Base{ - ID: 2, - OrgID: 10, - }, - }, - &check.Threshold{ - Base: check.Base{ - ID: 3, - OrgID: 11, - }, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - checks: []influxdb.Check{ - &check.Deadman{ - Base: check.Base{ - ID: 1, - OrgID: 10, - }, - }, - &check.Deadman{ - Base: check.Base{ - ID: 2, - OrgID: 10, - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewCheckService(tt.fields.CheckService, mock.NewUserResourceMappingService(), mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - ts, _, err := s.FindChecks(ctx, influxdb.CheckFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(ts, tt.wants.checks, checkCmpOptions...); diff != "" { - t.Errorf("checks are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestCheckService_UpdateCheck(t *testing.T) { - type fields struct { - CheckService influxdb.CheckService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to update check", - fields: fields{ - CheckService: &mock.CheckService{ - FindCheckByIDFn: func(ctx context.Context, id platform.ID) (influxdb.Check, error) { - return &check.Deadman{ - Base: check.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - UpdateCheckFn: func(ctx context.Context, id platform.ID, upd influxdb.CheckCreate) (influxdb.Check, error) { - return &check.Deadman{ - Base: check.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to update check", - fields: fields{ - CheckService: &mock.CheckService{ - FindCheckByIDFn: func(ctx context.Context, id platform.ID) (influxdb.Check, error) { - return &check.Deadman{ - Base: check.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - UpdateCheckFn: func(ctx context.Context, id platform.ID, upd influxdb.CheckCreate) (influxdb.Check, error) { - return &check.Deadman{ - Base: check.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/checks/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewCheckService(tt.fields.CheckService, mock.NewUserResourceMappingService(), mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - cc := influxdb.CheckCreate{ - Check: &check.Deadman{}, - Status: influxdb.Active, - } - - _, err := s.UpdateCheck(ctx, tt.args.id, cc) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestCheckService_PatchCheck(t *testing.T) { - type fields struct { - CheckService influxdb.CheckService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to patch check", - fields: fields{ - CheckService: &mock.CheckService{ - FindCheckByIDFn: func(ctx context.Context, id platform.ID) (influxdb.Check, error) { - return &check.Deadman{ - Base: check.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - PatchCheckFn: func(ctx context.Context, id platform.ID, upd influxdb.CheckUpdate) (influxdb.Check, error) { - return &check.Deadman{ - Base: check.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to patch check", - fields: fields{ - CheckService: &mock.CheckService{ - FindCheckByIDFn: func(ctx context.Context, id platform.ID) (influxdb.Check, error) { - return &check.Deadman{ - Base: check.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - PatchCheckFn: func(ctx context.Context, id platform.ID, upd influxdb.CheckUpdate) (influxdb.Check, error) { - return &check.Deadman{ - Base: check.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/checks/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewCheckService(tt.fields.CheckService, mock.NewUserResourceMappingService(), mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - _, err := s.PatchCheck(ctx, tt.args.id, influxdb.CheckUpdate{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestCheckService_DeleteCheck(t *testing.T) { - type fields struct { - CheckService influxdb.CheckService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete check", - fields: fields{ - CheckService: &mock.CheckService{ - FindCheckByIDFn: func(ctx context.Context, id platform.ID) (influxdb.Check, error) { - return &check.Deadman{ - Base: check.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - DeleteCheckFn: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete check", - fields: fields{ - CheckService: &mock.CheckService{ - FindCheckByIDFn: func(ctx context.Context, id platform.ID) (influxdb.Check, error) { - return &check.Deadman{ - Base: check.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - DeleteCheckFn: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/checks/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewCheckService(tt.fields.CheckService, mock.NewUserResourceMappingService(), mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.DeleteCheck(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestCheckService_CreateCheck(t *testing.T) { - type fields struct { - CheckService influxdb.CheckService - } - type args struct { - permission influxdb.Permission - orgID platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to create check with org owner", - fields: fields{ - CheckService: &mock.CheckService{ - CreateCheckFn: func(ctx context.Context, chk influxdb.CheckCreate, userID platform.ID) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to create check", - fields: fields{ - CheckService: &mock.CheckService{ - CreateCheckFn: func(ctx context.Context, chk influxdb.CheckCreate, userID platform.ID) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/checks is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewCheckService(tt.fields.CheckService, mock.NewUserResourceMappingService(), mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - c := &check.Deadman{ - Base: check.Base{ - OrgID: tt.args.orgID}, - } - - cc := influxdb.CheckCreate{ - Check: c, - Status: influxdb.Active, - } - - err := s.CreateCheck(ctx, cc, 3) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} diff --git a/authorizer/dashboard.go b/authorizer/dashboard.go deleted file mode 100644 index 17ebb46fb63..00000000000 --- a/authorizer/dashboard.go +++ /dev/null @@ -1,144 +0,0 @@ -package authorizer - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.DashboardService = (*DashboardService)(nil) - -// DashboardService wraps a influxdb.DashboardService and authorizes actions -// against it appropriately. -type DashboardService struct { - s influxdb.DashboardService -} - -// NewDashboardService constructs an instance of an authorizing dashboard service. -func NewDashboardService(s influxdb.DashboardService) *DashboardService { - return &DashboardService{ - s: s, - } -} - -// FindDashboardByID checks to see if the authorizer on context has read access to the id provided. -func (s *DashboardService) FindDashboardByID(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { - b, err := s.s.FindDashboardByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeRead(ctx, influxdb.DashboardsResourceType, id, b.OrganizationID); err != nil { - return nil, err - } - return b, nil -} - -// FindDashboards retrieves all dashboards that match the provided filter and then filters the list down to only the resources that are authorized. -func (s *DashboardService) FindDashboards(ctx context.Context, filter influxdb.DashboardFilter, opt influxdb.FindOptions) ([]*influxdb.Dashboard, int, error) { - // TODO: we'll likely want to push this operation into the database eventually since fetching the whole list of data - // will likely be expensive. - ds, _, err := s.s.FindDashboards(ctx, filter, opt) - if err != nil { - return nil, 0, err - } - return AuthorizeFindDashboards(ctx, ds) -} - -// CreateDashboard checks to see if the authorizer on context has write access to the global dashboards resource. -func (s *DashboardService) CreateDashboard(ctx context.Context, b *influxdb.Dashboard) error { - if _, _, err := AuthorizeCreate(ctx, influxdb.DashboardsResourceType, b.OrganizationID); err != nil { - return err - } - return s.s.CreateDashboard(ctx, b) -} - -// UpdateDashboard checks to see if the authorizer on context has write access to the dashboard provided. -func (s *DashboardService) UpdateDashboard(ctx context.Context, id platform.ID, upd influxdb.DashboardUpdate) (*influxdb.Dashboard, error) { - b, err := s.s.FindDashboardByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.DashboardsResourceType, id, b.OrganizationID); err != nil { - return nil, err - } - return s.s.UpdateDashboard(ctx, id, upd) -} - -// DeleteDashboard checks to see if the authorizer on context has write access to the dashboard provided. -func (s *DashboardService) DeleteDashboard(ctx context.Context, id platform.ID) error { - b, err := s.s.FindDashboardByID(ctx, id) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.DashboardsResourceType, id, b.OrganizationID); err != nil { - return err - } - return s.s.DeleteDashboard(ctx, id) -} - -func (s *DashboardService) AddDashboardCell(ctx context.Context, id platform.ID, c *influxdb.Cell, opts influxdb.AddDashboardCellOptions) error { - b, err := s.s.FindDashboardByID(ctx, id) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.DashboardsResourceType, id, b.OrganizationID); err != nil { - return err - } - return s.s.AddDashboardCell(ctx, id, c, opts) -} - -func (s *DashboardService) RemoveDashboardCell(ctx context.Context, dashboardID platform.ID, cellID platform.ID) error { - b, err := s.s.FindDashboardByID(ctx, dashboardID) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.DashboardsResourceType, dashboardID, b.OrganizationID); err != nil { - return err - } - return s.s.RemoveDashboardCell(ctx, dashboardID, cellID) -} - -func (s *DashboardService) UpdateDashboardCell(ctx context.Context, dashboardID platform.ID, cellID platform.ID, upd influxdb.CellUpdate) (*influxdb.Cell, error) { - b, err := s.s.FindDashboardByID(ctx, dashboardID) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.DashboardsResourceType, dashboardID, b.OrganizationID); err != nil { - return nil, err - } - return s.s.UpdateDashboardCell(ctx, dashboardID, cellID, upd) -} - -func (s *DashboardService) GetDashboardCellView(ctx context.Context, dashboardID platform.ID, cellID platform.ID) (*influxdb.View, error) { - b, err := s.s.FindDashboardByID(ctx, dashboardID) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeRead(ctx, influxdb.DashboardsResourceType, dashboardID, b.OrganizationID); err != nil { - return nil, err - } - return s.s.GetDashboardCellView(ctx, dashboardID, cellID) -} - -func (s *DashboardService) UpdateDashboardCellView(ctx context.Context, dashboardID platform.ID, cellID platform.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) { - b, err := s.s.FindDashboardByID(ctx, dashboardID) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.DashboardsResourceType, dashboardID, b.OrganizationID); err != nil { - return nil, err - } - return s.s.UpdateDashboardCellView(ctx, dashboardID, cellID, upd) -} - -func (s *DashboardService) ReplaceDashboardCells(ctx context.Context, id platform.ID, c []*influxdb.Cell) error { - b, err := s.s.FindDashboardByID(ctx, id) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.DashboardsResourceType, id, b.OrganizationID); err != nil { - return err - } - return s.s.ReplaceDashboardCells(ctx, id, c) -} diff --git a/authorizer/dashboard_test.go b/authorizer/dashboard_test.go deleted file mode 100644 index f817b92c9dc..00000000000 --- a/authorizer/dashboard_test.go +++ /dev/null @@ -1,769 +0,0 @@ -package authorizer_test - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -var dashboardCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.Dashboard) []*influxdb.Dashboard { - out := append([]*influxdb.Dashboard(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -func TestDashboardService_FindDashboardByID(t *testing.T) { - type fields struct { - DashboardService influxdb.DashboardService - } - type args struct { - permission influxdb.Permission - id platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access id", - fields: fields{ - DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { - return &influxdb.Dashboard{ - ID: id, - OrganizationID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access id", - fields: fields{ - DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { - return &influxdb.Dashboard{ - ID: id, - OrganizationID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - id: 1, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/000000000000000a/dashboards/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewDashboardService(tt.fields.DashboardService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindDashboardByID(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestDashboardService_FindDashboards(t *testing.T) { - type fields struct { - DashboardService influxdb.DashboardService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - dashboards []*influxdb.Dashboard - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all dashboards", - fields: fields{ - DashboardService: &mock.DashboardService{ - FindDashboardsF: func(ctx context.Context, filter influxdb.DashboardFilter, opt influxdb.FindOptions) ([]*influxdb.Dashboard, int, error) { - return []*influxdb.Dashboard{ - { - ID: 1, - OrganizationID: 10, - }, - { - ID: 2, - OrganizationID: 10, - }, - { - ID: 3, - OrganizationID: 11, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - }, - }, - }, - wants: wants{ - dashboards: []*influxdb.Dashboard{ - { - ID: 1, - OrganizationID: 10, - }, - { - ID: 2, - OrganizationID: 10, - }, - { - ID: 3, - OrganizationID: 11, - }, - }, - }, - }, - { - name: "authorized to access a single orgs dashboards", - fields: fields{ - DashboardService: &mock.DashboardService{ - - FindDashboardsF: func(ctx context.Context, filter influxdb.DashboardFilter, opt influxdb.FindOptions) ([]*influxdb.Dashboard, int, error) { - return []*influxdb.Dashboard{ - { - ID: 1, - OrganizationID: 10, - }, - { - ID: 2, - OrganizationID: 10, - }, - { - ID: 3, - OrganizationID: 11, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - dashboards: []*influxdb.Dashboard{ - { - ID: 1, - OrganizationID: 10, - }, - { - ID: 2, - OrganizationID: 10, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewDashboardService(tt.fields.DashboardService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - dashboards, _, err := s.FindDashboards(ctx, influxdb.DashboardFilter{}, influxdb.FindOptions{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(dashboards, tt.wants.dashboards, dashboardCmpOptions...); diff != "" { - t.Errorf("dashboards are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestDashboardService_UpdateDashboard(t *testing.T) { - type fields struct { - DashboardService influxdb.DashboardService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to update dashboard", - fields: fields{ - DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctc context.Context, id platform.ID) (*influxdb.Dashboard, error) { - return &influxdb.Dashboard{ - ID: 1, - OrganizationID: 10, - }, nil - }, - UpdateDashboardF: func(ctx context.Context, id platform.ID, upd influxdb.DashboardUpdate) (*influxdb.Dashboard, error) { - return &influxdb.Dashboard{ - ID: 1, - OrganizationID: 10, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to update dashboard", - fields: fields{ - DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctc context.Context, id platform.ID) (*influxdb.Dashboard, error) { - return &influxdb.Dashboard{ - ID: 1, - OrganizationID: 10, - }, nil - }, - UpdateDashboardF: func(ctx context.Context, id platform.ID, upd influxdb.DashboardUpdate) (*influxdb.Dashboard, error) { - return &influxdb.Dashboard{ - ID: 1, - OrganizationID: 10, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/dashboards/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewDashboardService(tt.fields.DashboardService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - _, err := s.UpdateDashboard(ctx, tt.args.id, influxdb.DashboardUpdate{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestDashboardService_DeleteDashboard(t *testing.T) { - type fields struct { - DashboardService influxdb.DashboardService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete dashboard", - fields: fields{ - DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctc context.Context, id platform.ID) (*influxdb.Dashboard, error) { - return &influxdb.Dashboard{ - ID: 1, - OrganizationID: 10, - }, nil - }, - DeleteDashboardF: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete dashboard", - fields: fields{ - DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctc context.Context, id platform.ID) (*influxdb.Dashboard, error) { - return &influxdb.Dashboard{ - ID: 1, - OrganizationID: 10, - }, nil - }, - DeleteDashboardF: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/dashboards/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewDashboardService(tt.fields.DashboardService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.DeleteDashboard(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestDashboardService_CreateDashboard(t *testing.T) { - type fields struct { - DashboardService influxdb.DashboardService - } - type args struct { - permission influxdb.Permission - orgID platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to create dashboard", - fields: fields{ - DashboardService: &mock.DashboardService{ - CreateDashboardF: func(ctx context.Context, o *influxdb.Dashboard) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to create dashboard", - fields: fields{ - DashboardService: &mock.DashboardService{ - CreateDashboardF: func(ctx context.Context, o *influxdb.Dashboard) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/dashboards is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewDashboardService(tt.fields.DashboardService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.CreateDashboard(ctx, &influxdb.Dashboard{OrganizationID: tt.args.orgID}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestDashboardService_WriteDashboardCell(t *testing.T) { - type fields struct { - DashboardService influxdb.DashboardService - } - type args struct { - permission influxdb.Permission - orgID platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to write dashboard cells/cell/view", - fields: fields{ - DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { - return &influxdb.Dashboard{ - ID: id, - OrganizationID: 10, - }, nil - }, - AddDashboardCellF: func(ctx context.Context, id platform.ID, c *influxdb.Cell, opts influxdb.AddDashboardCellOptions) error { - return nil - }, - RemoveDashboardCellF: func(ctx context.Context, id platform.ID, cid platform.ID) error { - return nil - }, - ReplaceDashboardCellsF: func(ctx context.Context, id platform.ID, cs []*influxdb.Cell) error { - return nil - }, - UpdateDashboardCellF: func(ctx context.Context, id platform.ID, cid platform.ID, upd influxdb.CellUpdate) (*influxdb.Cell, error) { - return &influxdb.Cell{}, nil - }, - UpdateDashboardCellViewF: func(ctx context.Context, id platform.ID, cid platform.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) { - return &influxdb.View{}, nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to write dashboard cells/cell/view", - fields: fields{ - DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { - return &influxdb.Dashboard{ - ID: id, - OrganizationID: 10, - }, nil - }, - AddDashboardCellF: func(ctx context.Context, id platform.ID, c *influxdb.Cell, opts influxdb.AddDashboardCellOptions) error { - return nil - }, - ReplaceDashboardCellsF: func(ctx context.Context, id platform.ID, cs []*influxdb.Cell) error { - return nil - }, - UpdateDashboardCellF: func(ctx context.Context, id platform.ID, cid platform.ID, upd influxdb.CellUpdate) (*influxdb.Cell, error) { - return &influxdb.Cell{}, nil - }, - RemoveDashboardCellF: func(ctx context.Context, id platform.ID, cid platform.ID) error { - return nil - }, - UpdateDashboardCellViewF: func(ctx context.Context, id platform.ID, cid platform.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) { - return &influxdb.View{}, nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - ID: influxdbtesting.IDPtr(100), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/dashboards/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewDashboardService(tt.fields.DashboardService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.AddDashboardCell(ctx, 1, &influxdb.Cell{}, influxdb.AddDashboardCellOptions{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - err = s.RemoveDashboardCell(ctx, 1, 2) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - _, err = s.UpdateDashboardCellView(ctx, 1, 2, influxdb.ViewUpdate{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - _, err = s.UpdateDashboardCell(ctx, 1, 2, influxdb.CellUpdate{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - err = s.ReplaceDashboardCells(ctx, 1, []*influxdb.Cell{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestDashboardService_FindDashboardCellView(t *testing.T) { - type fields struct { - DashboardService influxdb.DashboardService - } - type args struct { - permission influxdb.Permission - orgID platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to read dashboard cells/cell/view", - fields: fields{ - DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { - return &influxdb.Dashboard{ - ID: id, - OrganizationID: 10, - }, nil - }, - GetDashboardCellViewF: func(ctx context.Context, id platform.ID, cid platform.ID) (*influxdb.View, error) { - return &influxdb.View{}, nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to read dashboard cells/cell/view", - fields: fields{ - DashboardService: &mock.DashboardService{ - FindDashboardByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { - return &influxdb.Dashboard{ - ID: id, - OrganizationID: 10, - }, nil - }, - GetDashboardCellViewF: func(ctx context.Context, id platform.ID, cid platform.ID) (*influxdb.View, error) { - return &influxdb.View{}, nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - ID: influxdbtesting.IDPtr(100), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/000000000000000a/dashboards/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewDashboardService(tt.fields.DashboardService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.GetDashboardCellView(ctx, 1, 1) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} diff --git a/authorizer/document.go b/authorizer/document.go deleted file mode 100644 index 4c027de1f4e..00000000000 --- a/authorizer/document.go +++ /dev/null @@ -1,99 +0,0 @@ -package authorizer - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.DocumentService = (*DocumentService)(nil) -var _ influxdb.DocumentStore = (*documentStore)(nil) - -type DocumentService struct { - s influxdb.DocumentService -} - -// NewDocumentService constructs an instance of an authorizing document service. -func NewDocumentService(s influxdb.DocumentService) influxdb.DocumentService { - return &DocumentService{ - s: s, - } -} - -func (s *DocumentService) CreateDocumentStore(ctx context.Context, name string) (influxdb.DocumentStore, error) { - ds, err := s.s.CreateDocumentStore(ctx, name) - if err != nil { - return nil, err - } - return &documentStore{s: ds}, nil -} - -func (s *DocumentService) FindDocumentStore(ctx context.Context, name string) (influxdb.DocumentStore, error) { - ds, err := s.s.FindDocumentStore(ctx, name) - if err != nil { - return nil, err - } - return &documentStore{s: ds}, nil -} - -type documentStore struct { - s influxdb.DocumentStore -} - -func newDocumentPermission(a influxdb.Action, orgID platform.ID, did *platform.ID) (*influxdb.Permission, error) { - if did != nil { - return influxdb.NewPermissionAtID(*did, a, influxdb.DocumentsResourceType, orgID) - } - return influxdb.NewPermission(a, influxdb.DocumentsResourceType, orgID) -} - -func toPerms(action influxdb.Action, orgs map[platform.ID]influxdb.UserType, did *platform.ID) ([]influxdb.Permission, error) { - ps := make([]influxdb.Permission, 0, len(orgs)) - for orgID := range orgs { - p, err := newDocumentPermission(action, orgID, did) - if err != nil { - return nil, err - } - ps = append(ps, *p) - } - return ps, nil -} - -func (s *documentStore) CreateDocument(ctx context.Context, d *influxdb.Document) error { - if len(d.Organizations) == 0 { - return fmt.Errorf("cannot authorize document creation without any orgID") - } - ps, err := toPerms(influxdb.WriteAction, d.Organizations, nil) - if err != nil { - return err - } - if err := IsAllowedAny(ctx, ps); err != nil { - return err - } - return s.s.CreateDocument(ctx, d) -} - -func (s *documentStore) FindDocument(ctx context.Context, id platform.ID) (*influxdb.Document, error) { - d, err := s.s.FindDocument(ctx, id) - if err != nil { - return nil, err - } - ps, err := toPerms(influxdb.ReadAction, d.Organizations, &id) - if err != nil { - return nil, err - } - if err := IsAllowedAny(ctx, ps); err != nil { - return nil, err - } - return d, nil -} - -func (s *documentStore) FindDocuments(ctx context.Context, oid platform.ID) ([]*influxdb.Document, error) { - if _, _, err := AuthorizeOrgReadResource(ctx, influxdb.DocumentsResourceType, oid); err != nil { - return nil, err - } - - return s.s.FindDocuments(ctx, oid) -} diff --git a/authorizer/label.go b/authorizer/label.go deleted file mode 100644 index d1fafd2f718..00000000000 --- a/authorizer/label.go +++ /dev/null @@ -1,135 +0,0 @@ -package authorizer - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.LabelService = (*LabelService)(nil) - -// LabelService wraps a influxdb.LabelService and authorizes actions -// against it appropriately. -type LabelService struct { - s influxdb.LabelService - orgIDResolver OrgIDResolver -} - -// NewLabelServiceWithOrg constructs an instance of an authorizing label service. -// Replaces NewLabelService. -func NewLabelServiceWithOrg(s influxdb.LabelService, orgIDResolver OrgIDResolver) *LabelService { - return &LabelService{ - s: s, - orgIDResolver: orgIDResolver, - } -} - -// FindLabelByID checks to see if the authorizer on context has read access to the label id provided. -func (s *LabelService) FindLabelByID(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - l, err := s.s.FindLabelByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeRead(ctx, influxdb.LabelsResourceType, id, l.OrgID); err != nil { - return nil, err - } - return l, nil -} - -// FindLabels retrieves all labels that match the provided filter and then filters the list down to only the resources that are authorized. -func (s *LabelService) FindLabels(ctx context.Context, filter influxdb.LabelFilter, opt ...influxdb.FindOptions) ([]*influxdb.Label, error) { - // TODO: we'll likely want to push this operation into the database eventually since fetching the whole list of data - // will likely be expensive. - ls, err := s.s.FindLabels(ctx, filter, opt...) - if err != nil { - return nil, err - } - ls, _, err = AuthorizeFindLabels(ctx, ls) - return ls, err -} - -// FindResourceLabels retrieves all labels belonging to the filtering resource if the authorizer on context has read access to it. -// Then it filters the list down to only the labels that are authorized. -func (s *LabelService) FindResourceLabels(ctx context.Context, filter influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - if err := filter.ResourceType.Valid(); err != nil { - return nil, err - } - - orgID, err := s.orgIDResolver.FindResourceOrganizationID(ctx, filter.ResourceType, filter.ResourceID) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeRead(ctx, filter.ResourceType, filter.ResourceID, orgID); err != nil { - return nil, err - } - - ls, err := s.s.FindResourceLabels(ctx, filter) - if err != nil { - return nil, err - } - ls, _, err = AuthorizeFindLabels(ctx, ls) - return ls, err -} - -// CreateLabel checks to see if the authorizer on context has write access to the new label's org. -func (s *LabelService) CreateLabel(ctx context.Context, l *influxdb.Label) error { - if _, _, err := AuthorizeCreate(ctx, influxdb.LabelsResourceType, l.OrgID); err != nil { - return err - } - return s.s.CreateLabel(ctx, l) -} - -// CreateLabelMapping checks to see if the authorizer on context has write access to the label and the resource contained by the label mapping in creation. -func (s *LabelService) CreateLabelMapping(ctx context.Context, m *influxdb.LabelMapping) error { - l, err := s.s.FindLabelByID(ctx, m.LabelID) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.LabelsResourceType, m.LabelID, l.OrgID); err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, m.ResourceType, m.ResourceID, l.OrgID); err != nil { - return err - } - return s.s.CreateLabelMapping(ctx, m) -} - -// UpdateLabel checks to see if the authorizer on context has write access to the label provided. -func (s *LabelService) UpdateLabel(ctx context.Context, id platform.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) { - l, err := s.s.FindLabelByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.LabelsResourceType, l.ID, l.OrgID); err != nil { - return nil, err - } - return s.s.UpdateLabel(ctx, id, upd) -} - -// DeleteLabel checks to see if the authorizer on context has write access to the label provided. -func (s *LabelService) DeleteLabel(ctx context.Context, id platform.ID) error { - l, err := s.s.FindLabelByID(ctx, id) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.LabelsResourceType, l.ID, l.OrgID); err != nil { - return err - } - return s.s.DeleteLabel(ctx, id) -} - -// DeleteLabelMapping checks to see if the authorizer on context has write access to the label and the resource of the label mapping to delete. -func (s *LabelService) DeleteLabelMapping(ctx context.Context, m *influxdb.LabelMapping) error { - l, err := s.s.FindLabelByID(ctx, m.LabelID) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.LabelsResourceType, m.LabelID, l.OrgID); err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, m.ResourceType, m.ResourceID, l.OrgID); err != nil { - return err - } - return s.s.DeleteLabelMapping(ctx, m) -} diff --git a/authorizer/label_test.go b/authorizer/label_test.go deleted file mode 100644 index a03173dd889..00000000000 --- a/authorizer/label_test.go +++ /dev/null @@ -1,1130 +0,0 @@ -package authorizer_test - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -const ( - orgOneID = "020f755c3c083000" -) - -var ( - orgOneInfluxID = influxdbtesting.MustIDBase16(orgOneID) - orgSvc = &mock.OrganizationService{ - FindResourceOrganizationIDF: func(_ context.Context, _ influxdb.ResourceType, _ platform.ID) (platform.ID, error) { - return orgOneInfluxID, nil - }, - } -) - -var labelCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.Label) []*influxdb.Label { - out := append([]*influxdb.Label(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -func TestLabelService_FindLabelByID(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - permission influxdb.Permission - id platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access id", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: id, - OrgID: orgOneInfluxID, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access id", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: id, - OrgID: orgOneInfluxID, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - id: 1, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewLabelServiceWithOrg(tt.fields.LabelService, orgSvc) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindLabelByID(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestLabelService_FindLabels(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - labels []*influxdb.Label - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all labels", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelsFn: func(ctx context.Context, filter influxdb.LabelFilter) ([]*influxdb.Label, error) { - return []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - { - ID: 2, - OrgID: orgOneInfluxID, - }, - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - { - ID: 2, - OrgID: orgOneInfluxID, - }, - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, - }, - }, - { - name: "authorized to access a single label", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelsFn: func(ctx context.Context, filter influxdb.LabelFilter) ([]*influxdb.Label, error) { - return []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - { - ID: 2, - OrgID: orgOneInfluxID, - }, - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - }, - }, - }, - { - name: "unable to access labels", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelsFn: func(ctx context.Context, filter influxdb.LabelFilter) ([]*influxdb.Label, error) { - return []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - { - ID: 2, - OrgID: orgOneInfluxID, - }, - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - ID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - // fixme(leodido) > should we return error in this case? - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewLabelServiceWithOrg(tt.fields.LabelService, orgSvc) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - labels, err := s.FindLabels(ctx, influxdb.LabelFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(labels, tt.wants.labels, labelCmpOptions...); diff != "" { - t.Errorf("labels are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestLabelService_UpdateLabel(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to update label", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - UpdateLabelFn: func(ctx context.Context, id platform.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to update label", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - UpdateLabelFn: func(ctx context.Context, id platform.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewLabelServiceWithOrg(tt.fields.LabelService, orgSvc) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - _, err := s.UpdateLabel(ctx, tt.args.id, influxdb.LabelUpdate{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestLabelService_DeleteLabel(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete label", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - DeleteLabelFn: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - ID: influxdbtesting.IDPtr(1), - OrgID: influxdbtesting.IDPtr(orgOneInfluxID), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete label", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - DeleteLabelFn: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - ID: influxdbtesting.IDPtr(1), - OrgID: influxdbtesting.IDPtr(orgOneInfluxID), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewLabelServiceWithOrg(tt.fields.LabelService, orgSvc) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.DeleteLabel(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestLabelService_CreateLabel(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "unauthorized to create label with read only permission", - fields: fields{ - LabelService: &mock.LabelService{ - CreateLabelFn: func(ctx context.Context, l *influxdb.Label) error { - return nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - ID: influxdbtesting.IDPtr(orgOneInfluxID), - Type: influxdb.OrgsResourceType, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/020f755c3c083000/labels is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - { - name: "unauthorized to create label with wrong write permission", - fields: fields{ - LabelService: &mock.LabelService{ - CreateLabelFn: func(ctx context.Context, b *influxdb.Label) error { - return nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/020f755c3c083000/labels is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - - { - name: "authorized to create label", - fields: fields{ - LabelService: &mock.LabelService{ - CreateLabelFn: func(ctx context.Context, l *influxdb.Label) error { - return nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - OrgID: influxdbtesting.IDPtr(orgOneInfluxID), - Type: influxdb.LabelsResourceType, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewLabelServiceWithOrg(tt.fields.LabelService, orgSvc) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.CreateLabel(ctx, &influxdb.Label{Name: "name", OrgID: orgOneInfluxID}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestLabelService_FindResourceLabels(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - filter influxdb.LabelMappingFilter - permissions []influxdb.Permission - } - type wants struct { - err error - labels []*influxdb.Label - } - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all labels belonging to a resource", - fields: fields{ - LabelService: &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - return []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - { - ID: 2, - OrgID: orgOneInfluxID, - }, - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, nil - }, - }, - }, - args: args{ - filter: influxdb.LabelMappingFilter{ - ResourceID: 10, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: nil, - labels: []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - { - ID: 2, - OrgID: orgOneInfluxID, - }, - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, - }, - }, - { - name: "authorized to access a single label", - fields: fields{ - LabelService: &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - return []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - { - ID: 2, - OrgID: orgOneInfluxID, - }, - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, nil - }, - }, - }, - args: args{ - filter: influxdb.LabelMappingFilter{ - ResourceID: 10, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - ID: influxdbtesting.IDPtr(3), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: nil, - labels: []*influxdb.Label{ - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, - }, - }, - { - name: "unable to access labels when missing read permission on labels", - fields: fields{ - LabelService: &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - return []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - { - ID: 2, - OrgID: orgOneInfluxID, - }, - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, nil - }, - }, - }, - args: args{ - filter: influxdb.LabelMappingFilter{ - ResourceID: 10, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - // fixme(leodido) > should we return error in this case? - }, - }, - { - name: "unable to access labels when missing read permission on filtering resource", - fields: fields{ - LabelService: &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - return []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - { - ID: 2, - OrgID: orgOneInfluxID, - }, - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, nil - }, - }, - }, - args: args{ - filter: influxdb.LabelMappingFilter{ - ResourceID: 10, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/020f755c3c083000/buckets/000000000000000a is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewLabelServiceWithOrg(tt.fields.LabelService, orgSvc) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - labels, err := s.FindResourceLabels(ctx, tt.args.filter) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(labels, tt.wants.labels, labelCmpOptions...); diff != "" { - t.Errorf("labels are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestLabelService_CreateLabelMapping(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - mapping influxdb.LabelMapping - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to create label mapping", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - CreateLabelMappingFn: func(ctx context.Context, lm *influxdb.LabelMapping) error { - return nil - }, - }, - }, - args: args{ - mapping: influxdb.LabelMapping{ - LabelID: 1, - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to create label mapping for resources on which the user does not have write access", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - CreateLabelMappingFn: func(ctx context.Context, lm *influxdb.LabelMapping) error { - return nil - }, - }, - }, - args: args{ - mapping: influxdb.LabelMapping{ - LabelID: 1, - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EUnauthorized, - Msg: "write:orgs/020f755c3c083000/buckets/0000000000000002 is unauthorized", - }, - }, - }, - { - name: "unauthorized to create label mapping", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - CreateLabelMappingFn: func(ctx context.Context, lm *influxdb.LabelMapping) error { - return nil - }, - }, - }, - args: args{ - mapping: influxdb.LabelMapping{ - LabelID: 1, - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewLabelServiceWithOrg(tt.fields.LabelService, orgSvc) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.CreateLabelMapping(ctx, &tt.args.mapping) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestLabelService_DeleteLabelMapping(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - mapping influxdb.LabelMapping - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete label mapping", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - DeleteLabelMappingFn: func(ctx context.Context, m *influxdb.LabelMapping) error { - return nil - }, - }, - }, - args: args{ - mapping: influxdb.LabelMapping{ - LabelID: 1, - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete label mapping containing a resources on which the user does not have write access", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - DeleteLabelMappingFn: func(ctx context.Context, m *influxdb.LabelMapping) error { - return nil - }, - }, - }, - args: args{ - mapping: influxdb.LabelMapping{ - LabelID: 1, - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EUnauthorized, - Msg: "write:orgs/020f755c3c083000/buckets/0000000000000002 is unauthorized", - }, - }, - }, - { - name: "unauthorized to delete label mapping", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - DeleteLabelMappingFn: func(ctx context.Context, m *influxdb.LabelMapping) error { - return nil - }, - }, - }, - args: args{ - mapping: influxdb.LabelMapping{ - LabelID: 1, - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewLabelServiceWithOrg(tt.fields.LabelService, orgSvc) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.DeleteLabelMapping(ctx, &tt.args.mapping) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} diff --git a/authorizer/notebook.go b/authorizer/notebook.go deleted file mode 100644 index 233c3f8f977..00000000000 --- a/authorizer/notebook.go +++ /dev/null @@ -1,83 +0,0 @@ -package authorizer - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.NotebookService = (*NotebookService)(nil) - -// NotebookService wraps an influxdb.NotebookService and authorizes actions -// against it appropriately. -type NotebookService struct { - s influxdb.NotebookService -} - -// NewNotebookService constructs an instance of an authorizing check service. -func NewNotebookService(s influxdb.NotebookService) *NotebookService { - return &NotebookService{ - s: s, - } -} - -// GetNotebook checks to see if the authorizer on context has read access to the id provided. -func (s *NotebookService) GetNotebook(ctx context.Context, id platform.ID) (*influxdb.Notebook, error) { - nb, err := s.s.GetNotebook(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeRead(ctx, influxdb.NotebooksResourceType, id, nb.OrgID); err != nil { - return nil, err - } - return nb, nil -} - -// CreateNotebook checks to see if the authorizer on context has write access for notebooks for organization id provided in the notebook body. -func (s *NotebookService) CreateNotebook(ctx context.Context, create *influxdb.NotebookReqBody) (*influxdb.Notebook, error) { - if _, _, err := AuthorizeCreate(ctx, influxdb.NotebooksResourceType, create.OrgID); err != nil { - return nil, err - } - - return s.s.CreateNotebook(ctx, create) -} - -// UpdateNotebook checks to see if the authorizer on context has write access to the notebook provided. -func (s *NotebookService) UpdateNotebook(ctx context.Context, id platform.ID, update *influxdb.NotebookReqBody) (*influxdb.Notebook, error) { - nb, err := s.s.GetNotebook(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.NotebooksResourceType, id, nb.OrgID); err != nil { - return nil, err - } - return s.s.UpdateNotebook(ctx, id, update) -} - -// DeleteNotebook checks to see if the authorizer on context has write access to the notebook provided. -func (s *NotebookService) DeleteNotebook(ctx context.Context, id platform.ID) error { - nb, err := s.s.GetNotebook(ctx, id) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.NotebooksResourceType, id, nb.OrgID); err != nil { - return err - } - return s.s.DeleteNotebook(ctx, id) -} - -// ListNotebooks checks to see if the requesting user has read access to the provided org and returns a list of notebooks for that org if so. -func (s *NotebookService) ListNotebooks(ctx context.Context, filter influxdb.NotebookListFilter) ([]*influxdb.Notebook, error) { - if _, _, err := AuthorizeOrgReadResource(ctx, influxdb.NotebooksResourceType, filter.OrgID); err != nil { - return nil, err - } - - ns, err := s.s.ListNotebooks(ctx, filter) - if err != nil { - return nil, err - } - - ns, _, err = AuthorizeFindNotebooks(ctx, ns) - return ns, err -} diff --git a/authorizer/notebook_test.go b/authorizer/notebook_test.go deleted file mode 100644 index 7842ec3e91d..00000000000 --- a/authorizer/notebook_test.go +++ /dev/null @@ -1,315 +0,0 @@ -package authorizer_test - -import ( - "context" - "fmt" - "testing" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/require" -) - -var ( - orgID1 = influxdbtesting.IDPtr(1) - orgID2 = influxdbtesting.IDPtr(10) - nbID = influxdbtesting.IDPtr(2) -) - -func Test_GetNotebook(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - notebookOrg *platform.ID - permissionOrg *platform.ID - wantRet *influxdb.Notebook - wantErr error - }{ - { - "authorized to access notebook by id", - orgID1, - orgID1, - newTestNotebook(*orgID1), - nil, - }, - { - "not authorized to access notebook by id", - orgID1, - orgID2, - nil, - &errors.Error{ - Msg: fmt.Sprintf("read:orgs/%s/notebooks/%s is unauthorized", orgID1, nbID), - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockNotebookService(ctrlr) - s := authorizer.NewNotebookService(svc) - - svc.EXPECT(). - GetNotebook(gomock.Any(), *nbID). - Return(newTestNotebook(*orgID1), nil) - - perm := newTestNotebooksPermission(influxdb.ReadAction, tt.permissionOrg) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) - got, err := s.GetNotebook(ctx, *nbID) - require.Equal(t, tt.wantErr, err) - require.Equal(t, tt.wantRet, got) - }) - } -} - -func Test_CreateNotebook(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - notebookOrg *platform.ID - permissionOrg *platform.ID - wantRet *influxdb.Notebook - wantErr error - }{ - { - "authorized to create a notebook with the given org", - orgID1, - orgID1, - newTestNotebook(*orgID1), - nil, - }, - { - "not authorized to create a notebook with the given org", - orgID1, - orgID2, - nil, - &errors.Error{ - Msg: fmt.Sprintf("write:orgs/%s/notebooks is unauthorized", orgID1), - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockNotebookService(ctrlr) - s := authorizer.NewNotebookService(svc) - - perm := newTestNotebooksPermission(influxdb.WriteAction, tt.permissionOrg) - nb := newTestReqBody(*tt.notebookOrg) - - if tt.wantErr == nil { - svc.EXPECT(). - CreateNotebook(gomock.Any(), nb). - Return(tt.wantRet, nil) - } - - ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) - got, err := s.CreateNotebook(ctx, nb) - require.Equal(t, tt.wantErr, err) - require.Equal(t, tt.wantRet, got) - }) - } -} - -func Test_UpdateNotebook(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - notebookOrg *platform.ID - permissionOrg *platform.ID - wantRet *influxdb.Notebook - wantErr error - }{ - { - "authorized to update notebook by id", - orgID1, - orgID1, - newTestNotebook(*orgID1), - nil, - }, - { - "not authorized to update notebook by id", - orgID1, - orgID2, - nil, - &errors.Error{ - Msg: fmt.Sprintf("write:orgs/%s/notebooks/%s is unauthorized", orgID1, nbID), - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockNotebookService(ctrlr) - s := authorizer.NewNotebookService(svc) - - svc.EXPECT(). - GetNotebook(gomock.Any(), *nbID). - Return(newTestNotebook(*tt.notebookOrg), nil) - - perm := newTestNotebooksPermission(influxdb.WriteAction, tt.permissionOrg) - nb := newTestReqBody(*tt.notebookOrg) - - if tt.wantErr == nil { - svc.EXPECT(). - UpdateNotebook(gomock.Any(), *nbID, nb). - Return(tt.wantRet, nil) - } - - ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) - got, err := s.UpdateNotebook(ctx, *nbID, nb) - require.Equal(t, tt.wantErr, err) - require.Equal(t, tt.wantRet, got) - }) - } -} - -func Test_DeleteNotebook(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - notebookOrg *platform.ID - permissionOrg *platform.ID - wantErr error - }{ - { - "authorized to delete notebook by id", - orgID1, - orgID1, - nil, - }, - { - "not authorized to delete notebook by id", - orgID1, - orgID2, - &errors.Error{ - Msg: fmt.Sprintf("write:orgs/%s/notebooks/%s is unauthorized", orgID1, nbID), - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockNotebookService(ctrlr) - s := authorizer.NewNotebookService(svc) - - svc.EXPECT(). - GetNotebook(gomock.Any(), *nbID). - Return(newTestNotebook(*tt.notebookOrg), nil) - - perm := newTestNotebooksPermission(influxdb.WriteAction, tt.permissionOrg) - - if tt.wantErr == nil { - svc.EXPECT(). - DeleteNotebook(gomock.Any(), *nbID). - Return(nil) - } - - ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) - got := s.DeleteNotebook(ctx, *nbID) - require.Equal(t, tt.wantErr, got) - }) - } -} - -func Test_ListNotebooks(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - notebookOrg *platform.ID - permissionOrg *platform.ID - wantRet []*influxdb.Notebook - wantErr error - }{ - { - "authorized to list notebooks for the specified org", - orgID1, - orgID1, - []*influxdb.Notebook{}, - nil, - }, - { - "not authorized to list notebooks for the specified org", - orgID1, - orgID2, - nil, - &errors.Error{ - Msg: fmt.Sprintf("read:orgs/%s/notebooks is unauthorized", orgID1), - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockNotebookService(ctrlr) - s := authorizer.NewNotebookService(svc) - - perm := newTestNotebooksPermission(influxdb.ReadAction, tt.permissionOrg) - filter := influxdb.NotebookListFilter{OrgID: *tt.notebookOrg} - - if tt.wantErr == nil { - svc.EXPECT(). - ListNotebooks(gomock.Any(), filter). - Return(tt.wantRet, nil) - } - - ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{perm})) - got, err := s.ListNotebooks(ctx, filter) - require.Equal(t, tt.wantErr, err) - require.Equal(t, tt.wantRet, got) - }) - } -} - -func newTestNotebook(orgID platform.ID) *influxdb.Notebook { - return &influxdb.Notebook{ - OrgID: orgID, - ID: *nbID, - Name: "test notebook", - Spec: influxdb.NotebookSpec{ - "hello": "goodbye", - }, - } -} - -func newTestReqBody(orgID platform.ID) *influxdb.NotebookReqBody { - return &influxdb.NotebookReqBody{ - OrgID: orgID, - Name: "testing", - Spec: influxdb.NotebookSpec{ - "hello": "goodbye", - }, - } -} - -func newTestNotebooksPermission(action influxdb.Action, orgID *platform.ID) influxdb.Permission { - return influxdb.Permission{ - Action: action, - Resource: influxdb.Resource{ - Type: influxdb.NotebooksResourceType, - OrgID: orgID, - }, - } -} diff --git a/authorizer/notification_endpoint.go b/authorizer/notification_endpoint.go deleted file mode 100644 index c316e7b2ff8..00000000000 --- a/authorizer/notification_endpoint.go +++ /dev/null @@ -1,107 +0,0 @@ -package authorizer - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var _ influxdb.NotificationEndpointService = (*NotificationEndpointService)(nil) - -// NotificationEndpointService wraps a influxdb.NotificationEndpointService and authorizes actions -// against it appropriately. -type NotificationEndpointService struct { - s influxdb.NotificationEndpointService - influxdb.UserResourceMappingService - influxdb.OrganizationService -} - -// NewNotificationEndpointService constructs an instance of an authorizing notification endpoint service. -func NewNotificationEndpointService( - s influxdb.NotificationEndpointService, - urm influxdb.UserResourceMappingService, - org influxdb.OrganizationService, -) *NotificationEndpointService { - return &NotificationEndpointService{ - s: s, - UserResourceMappingService: urm, - OrganizationService: org, - } -} - -// FindNotificationEndpointByID checks to see if the authorizer on context has read access to the id provided. -func (s *NotificationEndpointService) FindNotificationEndpointByID(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - edp, err := s.s.FindNotificationEndpointByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeRead(ctx, influxdb.NotificationEndpointResourceType, edp.GetID(), edp.GetOrgID()); err != nil { - return nil, err - } - return edp, nil -} - -// FindNotificationEndpoints retrieves all notification endpoints that match the provided filter and then filters the list down to only the resources that are authorized. -func (s *NotificationEndpointService) FindNotificationEndpoints(ctx context.Context, filter influxdb.NotificationEndpointFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) { - // TODO: This is a temporary fix as to not fetch the entire collection when no filter is provided. - if !filter.UserID.Valid() && filter.OrgID == nil { - return nil, 0, &errors.Error{ - Code: errors.EUnauthorized, - Msg: "cannot process a request without a org or user filter", - } - } - - // TODO: we'll likely want to push this operation into the database eventually since fetching the whole list of data - // will likely be expensive. - edps, _, err := s.s.FindNotificationEndpoints(ctx, filter, opt...) - if err != nil { - return nil, 0, err - } - return AuthorizeFindNotificationEndpoints(ctx, edps) -} - -// CreateNotificationEndpoint checks to see if the authorizer on context has write access to the global notification endpoint resource. -func (s *NotificationEndpointService) CreateNotificationEndpoint(ctx context.Context, edp influxdb.NotificationEndpoint, userID platform.ID) error { - if _, _, err := AuthorizeCreate(ctx, influxdb.NotificationEndpointResourceType, edp.GetOrgID()); err != nil { - return err - } - return s.s.CreateNotificationEndpoint(ctx, edp, userID) -} - -// UpdateNotificationEndpoint checks to see if the authorizer on context has write access to the notification endpoint provided. -func (s *NotificationEndpointService) UpdateNotificationEndpoint(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpoint, userID platform.ID) (influxdb.NotificationEndpoint, error) { - edp, err := s.FindNotificationEndpointByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.NotificationEndpointResourceType, edp.GetID(), edp.GetOrgID()); err != nil { - return nil, err - } - return s.s.UpdateNotificationEndpoint(ctx, id, upd, userID) -} - -// PatchNotificationEndpoint checks to see if the authorizer on context has write access to the notification endpoint provided. -func (s *NotificationEndpointService) PatchNotificationEndpoint(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpointUpdate) (influxdb.NotificationEndpoint, error) { - edp, err := s.FindNotificationEndpointByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.NotificationEndpointResourceType, edp.GetID(), edp.GetOrgID()); err != nil { - return nil, err - } - return s.s.PatchNotificationEndpoint(ctx, id, upd) -} - -// DeleteNotificationEndpoint checks to see if the authorizer on context has write access to the notification endpoint provided. -func (s *NotificationEndpointService) DeleteNotificationEndpoint(ctx context.Context, id platform.ID) ([]influxdb.SecretField, platform.ID, error) { - edp, err := s.FindNotificationEndpointByID(ctx, id) - if err != nil { - return nil, 0, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.NotificationEndpointResourceType, edp.GetID(), edp.GetOrgID()); err != nil { - return nil, 0, err - } - return s.s.DeleteNotificationEndpoint(ctx, id) -} diff --git a/authorizer/notification_endpoint_test.go b/authorizer/notification_endpoint_test.go deleted file mode 100644 index 04fb3b098c5..00000000000 --- a/authorizer/notification_endpoint_test.go +++ /dev/null @@ -1,679 +0,0 @@ -package authorizer_test - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/notification/endpoint" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -var notificationEndpointCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []influxdb.NotificationEndpoint) []influxdb.NotificationEndpoint { - out := append([]influxdb.NotificationEndpoint(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].GetID().String() > out[j].GetID().String() - }) - return out - }), -} - -func TestNotificationEndpointService_FindNotificationEndpointByID(t *testing.T) { - type fields struct { - NotificationEndpointService influxdb.NotificationEndpointService - } - type args struct { - permission influxdb.Permission - id platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access id with org", - fields: fields{ - NotificationEndpointService: &mock.NotificationEndpointService{ - FindNotificationEndpointByIDF: func(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - orgID := platform.ID(10) - return &endpoint.Slack{ - Base: endpoint.Base{ - ID: &id, - OrgID: &orgID, - }, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationEndpointResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - id: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access id", - fields: fields{ - NotificationEndpointService: &mock.NotificationEndpointService{ - FindNotificationEndpointByIDF: func(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - orgID := platform.ID(10) - return &endpoint.Slack{ - Base: endpoint.Base{ - ID: &id, - OrgID: &orgID, - }, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationEndpointResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - id: 1, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/000000000000000a/notificationEndpoints/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewNotificationEndpointService(tt.fields.NotificationEndpointService, mock.NewUserResourceMappingService(), mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindNotificationEndpointByID(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestNotificationEndpointService_FindNotificationEndpoints(t *testing.T) { - type fields struct { - NotificationEndpointService influxdb.NotificationEndpointService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - notificationEndpoints []influxdb.NotificationEndpoint - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access a single orgs notificationEndpoints", - fields: fields{ - NotificationEndpointService: &mock.NotificationEndpointService{ - FindNotificationEndpointsF: func(ctx context.Context, filter influxdb.NotificationEndpointFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) { - return []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(1), - OrgID: idPtr(10), - }, - }, - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(1), - OrgID: idPtr(10), - }, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationEndpointResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(1), - OrgID: idPtr(10), - }, - }, - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(1), - OrgID: idPtr(10), - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewNotificationEndpointService(tt.fields.NotificationEndpointService, - mock.NewUserResourceMappingService(), - mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - oid := platform.ID(10) - edps, _, err := s.FindNotificationEndpoints(ctx, influxdb.NotificationEndpointFilter{OrgID: &oid}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(edps, tt.wants.notificationEndpoints, notificationEndpointCmpOptions...); diff != "" { - t.Errorf("notificationEndpoints are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestNotificationEndpointService_UpdateNotificationEndpoint(t *testing.T) { - type fields struct { - NotificationEndpointService influxdb.NotificationEndpointService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to update notificationEndpoint with org owner", - fields: fields{ - NotificationEndpointService: &mock.NotificationEndpointService{ - FindNotificationEndpointByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - return &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(1), - OrgID: idPtr(10), - }, - }, nil - }, - UpdateNotificationEndpointF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpoint, userID platform.ID) (influxdb.NotificationEndpoint, error) { - return &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(1), - OrgID: idPtr(10), - }, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationEndpointResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationEndpointResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to update notificationEndpoint", - fields: fields{ - NotificationEndpointService: &mock.NotificationEndpointService{ - FindNotificationEndpointByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - return &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(1), - OrgID: idPtr(10), - }, - }, nil - }, - UpdateNotificationEndpointF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpoint, userID platform.ID) (influxdb.NotificationEndpoint, error) { - return &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(1), - OrgID: idPtr(10), - }, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationEndpointResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/notificationEndpoints/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewNotificationEndpointService(tt.fields.NotificationEndpointService, - mock.NewUserResourceMappingService(), - mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - _, err := s.UpdateNotificationEndpoint(ctx, tt.args.id, &endpoint.Slack{}, platform.ID(1)) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestNotificationEndpointService_PatchNotificationEndpoint(t *testing.T) { - type fields struct { - NotificationEndpointService influxdb.NotificationEndpointService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to patch notificationEndpoint", - fields: fields{ - NotificationEndpointService: &mock.NotificationEndpointService{ - FindNotificationEndpointByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - return &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(1), - OrgID: idPtr(10), - }, - }, nil - }, - PatchNotificationEndpointF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpointUpdate) (influxdb.NotificationEndpoint, error) { - return &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(1), - OrgID: idPtr(10), - }, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationEndpointResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationEndpointResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to patch notificationEndpoint", - fields: fields{ - NotificationEndpointService: &mock.NotificationEndpointService{ - FindNotificationEndpointByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - return &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(1), - OrgID: idPtr(10), - }, - }, nil - }, - PatchNotificationEndpointF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpointUpdate) (influxdb.NotificationEndpoint, error) { - return &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(1), - OrgID: idPtr(10), - }, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationEndpointResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/notificationEndpoints/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewNotificationEndpointService(tt.fields.NotificationEndpointService, mock.NewUserResourceMappingService(), - mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - _, err := s.PatchNotificationEndpoint(ctx, tt.args.id, influxdb.NotificationEndpointUpdate{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestNotificationEndpointService_DeleteNotificationEndpoint(t *testing.T) { - type fields struct { - NotificationEndpointService influxdb.NotificationEndpointService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete notificationEndpoint", - fields: fields{ - NotificationEndpointService: &mock.NotificationEndpointService{ - FindNotificationEndpointByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - return &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(1), - OrgID: idPtr(10), - }, - }, nil - }, - DeleteNotificationEndpointF: func(ctx context.Context, id platform.ID) ([]influxdb.SecretField, platform.ID, error) { - return nil, 0, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationEndpointResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationEndpointResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete notificationEndpoint", - fields: fields{ - NotificationEndpointService: &mock.NotificationEndpointService{ - FindNotificationEndpointByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - return &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(1), - OrgID: idPtr(10), - }, - }, nil - }, - DeleteNotificationEndpointF: func(ctx context.Context, id platform.ID) ([]influxdb.SecretField, platform.ID, error) { - return nil, 0, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationEndpointResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/notificationEndpoints/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewNotificationEndpointService(tt.fields.NotificationEndpointService, mock.NewUserResourceMappingService(), - mock.NewOrganizationService(), - ) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - _, _, err := s.DeleteNotificationEndpoint(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestNotificationEndpointService_CreateNotificationEndpoint(t *testing.T) { - type fields struct { - NotificationEndpointService influxdb.NotificationEndpointService - } - type args struct { - permission influxdb.Permission - orgID platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to create notificationEndpoint", - fields: fields{ - NotificationEndpointService: &mock.NotificationEndpointService{ - CreateNotificationEndpointF: func(ctx context.Context, tc influxdb.NotificationEndpoint, userID platform.ID) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationEndpointResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "authorized to create notificationEndpoint with org owner", - fields: fields{ - NotificationEndpointService: &mock.NotificationEndpointService{ - CreateNotificationEndpointF: func(ctx context.Context, tc influxdb.NotificationEndpoint, userID platform.ID) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationEndpointResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to create notificationEndpoint", - fields: fields{ - NotificationEndpointService: &mock.NotificationEndpointService{ - CreateNotificationEndpointF: func(ctx context.Context, tc influxdb.NotificationEndpoint, userID platform.ID) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationEndpointResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/notificationEndpoints is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewNotificationEndpointService(tt.fields.NotificationEndpointService, - mock.NewUserResourceMappingService(), - mock.NewOrganizationService(), - ) - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.CreateNotificationEndpoint(ctx, &endpoint.Slack{ - Base: endpoint.Base{ - OrgID: idPtr(tt.args.orgID)}, - }, platform.ID(1)) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func idPtr(id platform.ID) *platform.ID { - return &id -} diff --git a/authorizer/notification_rule.go b/authorizer/notification_rule.go deleted file mode 100644 index 1a586d70d15..00000000000 --- a/authorizer/notification_rule.go +++ /dev/null @@ -1,94 +0,0 @@ -package authorizer - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.NotificationRuleStore = (*NotificationRuleStore)(nil) - -// NotificationRuleStore wraps a influxdb.NotificationRuleStore and authorizes actions -// against it appropriately. -type NotificationRuleStore struct { - s influxdb.NotificationRuleStore - influxdb.UserResourceMappingService - influxdb.OrganizationService -} - -// NewNotificationRuleStore constructs an instance of an authorizing notification rule service. -func NewNotificationRuleStore(s influxdb.NotificationRuleStore, urm influxdb.UserResourceMappingService, org influxdb.OrganizationService) *NotificationRuleStore { - return &NotificationRuleStore{ - s: s, - UserResourceMappingService: urm, - OrganizationService: org, - } -} - -// FindNotificationRuleByID checks to see if the authorizer on context has read access to the id provided. -func (s *NotificationRuleStore) FindNotificationRuleByID(ctx context.Context, id platform.ID) (influxdb.NotificationRule, error) { - nr, err := s.s.FindNotificationRuleByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeRead(ctx, influxdb.NotificationRuleResourceType, nr.GetID(), nr.GetOrgID()); err != nil { - return nil, err - } - return nr, nil -} - -// FindNotificationRules retrieves all notification rules that match the provided filter and then filters the list down to only the resources that are authorized. -func (s *NotificationRuleStore) FindNotificationRules(ctx context.Context, filter influxdb.NotificationRuleFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationRule, int, error) { - // TODO: we'll likely want to push this operation into the database eventually since fetching the whole list of data - // will likely be expensive. - nrs, _, err := s.s.FindNotificationRules(ctx, filter, opt...) - if err != nil { - return nil, 0, err - } - return AuthorizeFindNotificationRules(ctx, nrs) -} - -// CreateNotificationRule checks to see if the authorizer on context has write access to the global notification rule resource. -func (s *NotificationRuleStore) CreateNotificationRule(ctx context.Context, nr influxdb.NotificationRuleCreate, userID platform.ID) error { - if _, _, err := AuthorizeCreate(ctx, influxdb.NotificationRuleResourceType, nr.GetOrgID()); err != nil { - return err - } - return s.s.CreateNotificationRule(ctx, nr, userID) -} - -// UpdateNotificationRule checks to see if the authorizer on context has write access to the notification rule provided. -func (s *NotificationRuleStore) UpdateNotificationRule(ctx context.Context, id platform.ID, upd influxdb.NotificationRuleCreate, userID platform.ID) (influxdb.NotificationRule, error) { - nr, err := s.FindNotificationRuleByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.NotificationRuleResourceType, nr.GetID(), nr.GetOrgID()); err != nil { - return nil, err - } - return s.s.UpdateNotificationRule(ctx, id, upd, userID) -} - -// PatchNotificationRule checks to see if the authorizer on context has write access to the notification rule provided. -func (s *NotificationRuleStore) PatchNotificationRule(ctx context.Context, id platform.ID, upd influxdb.NotificationRuleUpdate) (influxdb.NotificationRule, error) { - nr, err := s.s.FindNotificationRuleByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.NotificationRuleResourceType, nr.GetID(), nr.GetOrgID()); err != nil { - return nil, err - } - return s.s.PatchNotificationRule(ctx, id, upd) -} - -// DeleteNotificationRule checks to see if the authorizer on context has write access to the notification rule provided. -func (s *NotificationRuleStore) DeleteNotificationRule(ctx context.Context, id platform.ID) error { - nr, err := s.s.FindNotificationRuleByID(ctx, id) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.NotificationRuleResourceType, nr.GetID(), nr.GetOrgID()); err != nil { - return err - } - return s.s.DeleteNotificationRule(ctx, id) -} diff --git a/authorizer/notification_rule_test.go b/authorizer/notification_rule_test.go deleted file mode 100644 index adb1b2818b5..00000000000 --- a/authorizer/notification_rule_test.go +++ /dev/null @@ -1,717 +0,0 @@ -package authorizer_test - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/notification/rule" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -var notificationRuleCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []influxdb.NotificationRule) []influxdb.NotificationRule { - out := append([]influxdb.NotificationRule(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].GetID().String() > out[j].GetID().String() - }) - return out - }), -} - -func TestNotificationRuleStore_FindNotificationRuleByID(t *testing.T) { - type fields struct { - NotificationRuleStore influxdb.NotificationRuleStore - } - type args struct { - permission influxdb.Permission - id platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access id", - fields: fields{ - NotificationRuleStore: &mock.NotificationRuleStore{ - FindNotificationRuleByIDF: func(ctx context.Context, id platform.ID) (influxdb.NotificationRule, error) { - return &rule.Slack{ - Base: rule.Base{ - ID: id, - OrgID: 10, - }, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationRuleResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - id: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access id", - fields: fields{ - NotificationRuleStore: &mock.NotificationRuleStore{ - FindNotificationRuleByIDF: func(ctx context.Context, id platform.ID) (influxdb.NotificationRule, error) { - return &rule.Slack{ - Base: rule.Base{ - ID: id, - OrgID: 10, - }, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationRuleResourceType, - OrgID: influxdbtesting.IDPtr(2), - }, - }, - id: 1, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/000000000000000a/notificationRules/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewNotificationRuleStore(tt.fields.NotificationRuleStore, mock.NewUserResourceMappingService(), mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindNotificationRuleByID(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestNotificationRuleStore_FindNotificationRules(t *testing.T) { - type fields struct { - NotificationRuleStore influxdb.NotificationRuleStore - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - notificationRules []influxdb.NotificationRule - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all notificationRules", - fields: fields{ - NotificationRuleStore: &mock.NotificationRuleStore{ - FindNotificationRulesF: func(ctx context.Context, filter influxdb.NotificationRuleFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationRule, int, error) { - return []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: 1, - OrgID: 10, - }, - }, - &rule.Slack{ - Base: rule.Base{ - ID: 2, - OrgID: 10, - }, - }, - &rule.PagerDuty{ - Base: rule.Base{ - ID: 3, - OrgID: 11, - }, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationRuleResourceType, - }, - }, - }, - wants: wants{ - notificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: 1, - OrgID: 10, - }, - }, - &rule.Slack{ - Base: rule.Base{ - ID: 2, - OrgID: 10, - }, - }, - &rule.PagerDuty{ - Base: rule.Base{ - ID: 3, - OrgID: 11, - }, - }, - }, - }, - }, - { - name: "authorized to access a single orgs notificationRules", - fields: fields{ - NotificationRuleStore: &mock.NotificationRuleStore{ - FindNotificationRulesF: func(ctx context.Context, filter influxdb.NotificationRuleFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationRule, int, error) { - return []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: 1, - OrgID: 10, - }, - }, - &rule.Slack{ - Base: rule.Base{ - ID: 2, - OrgID: 10, - }, - }, - &rule.PagerDuty{ - Base: rule.Base{ - ID: 3, - OrgID: 11, - }, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationRuleResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - notificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: 1, - OrgID: 10, - }, - }, - &rule.Slack{ - Base: rule.Base{ - ID: 2, - OrgID: 10, - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewNotificationRuleStore(tt.fields.NotificationRuleStore, mock.NewUserResourceMappingService(), mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - ts, _, err := s.FindNotificationRules(ctx, influxdb.NotificationRuleFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(ts, tt.wants.notificationRules, notificationRuleCmpOptions...); diff != "" { - t.Errorf("notificationRules are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestNotificationRuleStore_UpdateNotificationRule(t *testing.T) { - type fields struct { - NotificationRuleStore influxdb.NotificationRuleStore - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to update notificationRule", - fields: fields{ - NotificationRuleStore: &mock.NotificationRuleStore{ - FindNotificationRuleByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationRule, error) { - return &rule.Slack{ - Base: rule.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - UpdateNotificationRuleF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationRuleCreate, userID platform.ID) (influxdb.NotificationRule, error) { - return &rule.Slack{ - Base: rule.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationRuleResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationRuleResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to update notificationRule", - fields: fields{ - NotificationRuleStore: &mock.NotificationRuleStore{ - FindNotificationRuleByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationRule, error) { - return &rule.Slack{ - Base: rule.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - UpdateNotificationRuleF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationRuleCreate, userID platform.ID) (influxdb.NotificationRule, error) { - return &rule.Slack{ - Base: rule.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationRuleResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/notificationRules/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewNotificationRuleStore(tt.fields.NotificationRuleStore, mock.NewUserResourceMappingService(), mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - nrc := influxdb.NotificationRuleCreate{ - NotificationRule: &rule.Slack{}, - Status: influxdb.Active, - } - - _, err := s.UpdateNotificationRule(ctx, tt.args.id, nrc, platform.ID(1)) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestNotificationRuleStore_PatchNotificationRule(t *testing.T) { - type fields struct { - NotificationRuleStore influxdb.NotificationRuleStore - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to patch notificationRule", - fields: fields{ - NotificationRuleStore: &mock.NotificationRuleStore{ - FindNotificationRuleByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationRule, error) { - return &rule.Slack{ - Base: rule.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - PatchNotificationRuleF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationRuleUpdate) (influxdb.NotificationRule, error) { - return &rule.Slack{ - Base: rule.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationRuleResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationRuleResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to patch notificationRule", - fields: fields{ - NotificationRuleStore: &mock.NotificationRuleStore{ - FindNotificationRuleByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationRule, error) { - return &rule.Slack{ - Base: rule.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - PatchNotificationRuleF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationRuleUpdate) (influxdb.NotificationRule, error) { - return &rule.Slack{ - Base: rule.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationRuleResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/notificationRules/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewNotificationRuleStore(tt.fields.NotificationRuleStore, mock.NewUserResourceMappingService(), mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - _, err := s.PatchNotificationRule(ctx, tt.args.id, influxdb.NotificationRuleUpdate{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestNotificationRuleStore_DeleteNotificationRule(t *testing.T) { - type fields struct { - NotificationRuleStore influxdb.NotificationRuleStore - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete notificationRule", - fields: fields{ - NotificationRuleStore: &mock.NotificationRuleStore{ - FindNotificationRuleByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationRule, error) { - return &rule.Slack{ - Base: rule.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - DeleteNotificationRuleF: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationRuleResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationRuleResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete notificationRule", - fields: fields{ - NotificationRuleStore: &mock.NotificationRuleStore{ - FindNotificationRuleByIDF: func(ctc context.Context, id platform.ID) (influxdb.NotificationRule, error) { - return &rule.Slack{ - Base: rule.Base{ - ID: 1, - OrgID: 10, - }, - }, nil - }, - DeleteNotificationRuleF: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationRuleResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/notificationRules/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewNotificationRuleStore(tt.fields.NotificationRuleStore, mock.NewUserResourceMappingService(), mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.DeleteNotificationRule(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestNotificationRuleStore_CreateNotificationRule(t *testing.T) { - type fields struct { - NotificationRuleStore influxdb.NotificationRuleStore - } - type args struct { - permission influxdb.Permission - orgID platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to create notificationRule", - fields: fields{ - NotificationRuleStore: &mock.NotificationRuleStore{ - CreateNotificationRuleF: func(ctx context.Context, tc influxdb.NotificationRuleCreate, userID platform.ID) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationRuleResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to create notificationRule", - fields: fields{ - NotificationRuleStore: &mock.NotificationRuleStore{ - CreateNotificationRuleF: func(ctx context.Context, tc influxdb.NotificationRuleCreate, userID platform.ID) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.NotificationRuleResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/notificationRules is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewNotificationRuleStore(tt.fields.NotificationRuleStore, mock.NewUserResourceMappingService(), mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - nr := &rule.Slack{ - Base: rule.Base{ - OrgID: tt.args.orgID}, - } - - nrc := influxdb.NotificationRuleCreate{ - NotificationRule: nr, - Status: influxdb.Active, - } - - err := s.CreateNotificationRule(ctx, nrc, platform.ID(1)) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} diff --git a/authorizer/org.go b/authorizer/org.go deleted file mode 100644 index e57b9cfbcd3..00000000000 --- a/authorizer/org.go +++ /dev/null @@ -1,91 +0,0 @@ -package authorizer - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.OrganizationService = (*OrgService)(nil) - -// OrgService wraps a influxdb.OrganizationService and authorizes actions -// against it appropriately. -type OrgService struct { - s influxdb.OrganizationService -} - -// NewOrgService constructs an instance of an authorizing org service. -func NewOrgService(s influxdb.OrganizationService) *OrgService { - return &OrgService{ - s: s, - } -} - -// FindOrganizationByID checks to see if the authorizer on context has read access to the id provided. -func (s *OrgService) FindOrganizationByID(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - if _, _, err := AuthorizeReadOrg(ctx, id); err != nil { - return nil, err - } - return s.s.FindOrganizationByID(ctx, id) -} - -// FindOrganization retrieves the organization and checks to see if the authorizer on context has read access to the org. -func (s *OrgService) FindOrganization(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { - o, err := s.s.FindOrganization(ctx, filter) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeReadOrg(ctx, o.ID); err != nil { - return nil, err - } - return o, nil -} - -// FindOrganizations retrieves all organizations that match the provided filter and then filters the list down to only the resources that are authorized. -func (s *OrgService) FindOrganizations(ctx context.Context, filter influxdb.OrganizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Organization, int, error) { - if filter.Name == nil && filter.ID == nil && filter.UserID == nil { - // if the user doesnt have permission to look up all orgs we need to add this users id to the filter to save lookup time - auth, err := icontext.GetAuthorizer(ctx) - if err != nil { - return nil, 0, err - } - if _, _, err := AuthorizeReadGlobal(ctx, influxdb.OrgsResourceType); err != nil { - userid := auth.GetUserID() - filter.UserID = &userid - } - } - - // TODO: we'll likely want to push this operation into the database eventually since fetching the whole list of data - // will likely be expensive. - os, _, err := s.s.FindOrganizations(ctx, filter, opt...) - if err != nil { - return nil, 0, err - } - return AuthorizeFindOrganizations(ctx, os) -} - -// CreateOrganization checks to see if the authorizer on context has write access to the global orgs resource. -func (s *OrgService) CreateOrganization(ctx context.Context, o *influxdb.Organization) error { - if _, _, err := AuthorizeWriteGlobal(ctx, influxdb.OrgsResourceType); err != nil { - return err - } - return s.s.CreateOrganization(ctx, o) -} - -// UpdateOrganization checks to see if the authorizer on context has write access to the organization provided. -func (s *OrgService) UpdateOrganization(ctx context.Context, id platform.ID, upd influxdb.OrganizationUpdate) (*influxdb.Organization, error) { - if _, _, err := AuthorizeWriteOrg(ctx, id); err != nil { - return nil, err - } - return s.s.UpdateOrganization(ctx, id, upd) -} - -// DeleteOrganization checks to see if the authorizer on context has write access to the organization provided. -func (s *OrgService) DeleteOrganization(ctx context.Context, id platform.ID) error { - if _, _, err := AuthorizeWriteOrg(ctx, id); err != nil { - return err - } - return s.s.DeleteOrganization(ctx, id) -} diff --git a/authorizer/org_test.go b/authorizer/org_test.go deleted file mode 100644 index e5ea7785f5c..00000000000 --- a/authorizer/org_test.go +++ /dev/null @@ -1,558 +0,0 @@ -package authorizer_test - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -var orgCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.Organization) []*influxdb.Organization { - out := append([]*influxdb.Organization(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -func TestOrgService_FindOrganizationByID(t *testing.T) { - type fields struct { - OrgService influxdb.OrganizationService - } - type args struct { - permission influxdb.Permission - id platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access id", - fields: fields{ - OrgService: &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: id, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access id", - fields: fields{ - OrgService: &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: id, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - id: 1, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewOrgService(tt.fields.OrgService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - _, err := s.FindOrganizationByID(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestOrgService_FindOrganization(t *testing.T) { - type fields struct { - OrgService influxdb.OrganizationService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access org", - fields: fields{ - OrgService: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: 1, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access org", - fields: fields{ - OrgService: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: 1, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewOrgService(tt.fields.OrgService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindOrganization(ctx, influxdb.OrganizationFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestOrgService_FindOrganizations(t *testing.T) { - type fields struct { - OrgService influxdb.OrganizationService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - orgs []*influxdb.Organization - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all orgs", - fields: fields{ - OrgService: &mock.OrganizationService{ - FindOrganizationsF: func(ctx context.Context, filter influxdb.OrganizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Organization, int, error) { - return []*influxdb.Organization{ - { - ID: 1, - }, - { - ID: 2, - }, - { - ID: 3, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - }, - }, - }, - wants: wants{ - orgs: []*influxdb.Organization{ - { - ID: 1, - }, - { - ID: 2, - }, - { - ID: 3, - }, - }, - }, - }, - { - name: "authorized to access a single org", - fields: fields{ - OrgService: &mock.OrganizationService{ - FindOrganizationsF: func(ctx context.Context, filter influxdb.OrganizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Organization, int, error) { - return []*influxdb.Organization{ - { - ID: 1, - }, - { - ID: 2, - }, - { - ID: 3, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - wants: wants{ - orgs: []*influxdb.Organization{ - { - ID: 2, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewOrgService(tt.fields.OrgService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - orgs, _, err := s.FindOrganizations(ctx, influxdb.OrganizationFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(orgs, tt.wants.orgs, orgCmpOptions...); diff != "" { - t.Errorf("organizations are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestOrgService_UpdateOrganization(t *testing.T) { - type fields struct { - OrgService influxdb.OrganizationService - } - type args struct { - id platform.ID - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to update org", - fields: fields{ - OrgService: &mock.OrganizationService{ - UpdateOrganizationF: func(ctx context.Context, id platform.ID, upd influxdb.OrganizationUpdate) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: 1, - }, nil - }, - }, - }, - args: args{ - id: 1, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to update org", - fields: fields{ - OrgService: &mock.OrganizationService{ - UpdateOrganizationF: func(ctx context.Context, id platform.ID, upd influxdb.OrganizationUpdate) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: 1, - }, nil - }, - }, - }, - args: args{ - id: 1, - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewOrgService(tt.fields.OrgService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.UpdateOrganization(ctx, tt.args.id, influxdb.OrganizationUpdate{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestOrgService_DeleteOrganization(t *testing.T) { - type fields struct { - OrgService influxdb.OrganizationService - } - type args struct { - id platform.ID - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete org", - fields: fields{ - OrgService: &mock.OrganizationService{ - DeleteOrganizationF: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete org", - fields: fields{ - OrgService: &mock.OrganizationService{ - DeleteOrganizationF: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewOrgService(tt.fields.OrgService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.DeleteOrganization(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestOrgService_CreateOrganization(t *testing.T) { - type fields struct { - OrgService influxdb.OrganizationService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to create org", - fields: fields{ - OrgService: &mock.OrganizationService{ - CreateOrganizationF: func(ctx context.Context, o *influxdb.Organization) error { - return nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to create org", - fields: fields{ - OrgService: &mock.OrganizationService{ - CreateOrganizationF: func(ctx context.Context, o *influxdb.Organization) error { - return nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewOrgService(tt.fields.OrgService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.CreateOrganization(ctx, &influxdb.Organization{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} diff --git a/authorizer/password.go b/authorizer/password.go deleted file mode 100644 index b0605d65dbd..00000000000 --- a/authorizer/password.go +++ /dev/null @@ -1,38 +0,0 @@ -package authorizer - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// PasswordService is a new authorization middleware for a password service. -type PasswordService struct { - next influxdb.PasswordsService -} - -// NewPasswordService wraps an existing password service with auth middleware. -func NewPasswordService(svc influxdb.PasswordsService) *PasswordService { - return &PasswordService{next: svc} -} - -// SetPassword overrides the password of a known user. -func (s *PasswordService) SetPassword(ctx context.Context, userID platform.ID, password string) error { - if _, _, err := AuthorizeWriteResource(ctx, influxdb.UsersResourceType, userID); err != nil { - return err - } - return s.next.SetPassword(ctx, userID, password) -} - -// ComparePassword checks if the password matches the password recorded. -// Passwords that do not match return errors. -func (s *PasswordService) ComparePassword(ctx context.Context, userID platform.ID, password string) error { - panic("not implemented") -} - -// CompareAndSetPassword checks the password and if they match -// updates to the new password. -func (s *PasswordService) CompareAndSetPassword(ctx context.Context, userID platform.ID, old string, new string) error { - panic("not implemented") -} diff --git a/authorizer/password_test.go b/authorizer/password_test.go deleted file mode 100644 index b0b6df6729f..00000000000 --- a/authorizer/password_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package authorizer_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/mock" - "github.com/stretchr/testify/require" -) - -func TestPasswordService(t *testing.T) { - t.Run("SetPassword", func(t *testing.T) { - t.Run("user with permissions should proceed", func(t *testing.T) { - userID := platform.ID(1) - - permission := influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: &userID, - }, - } - - fakeSVC := mock.NewPasswordsService() - fakeSVC.SetPasswordFn = func(_ context.Context, _ platform.ID, _ string) error { - return nil - } - s := authorizer.NewPasswordService(fakeSVC) - - ctx := icontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{permission})) - - err := s.SetPassword(ctx, 1, "password") - require.NoError(t, err) - }) - - t.Run("user without permissions should proceed", func(t *testing.T) { - goodUserID := platform.ID(1) - badUserID := platform.ID(3) - - tests := []struct { - name string - badPermission influxdb.Permission - }{ - { - name: "has no access", - }, - { - name: "has read only access on correct resource", - badPermission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: &goodUserID, - }, - }, - }, - { - name: "has write access on incorrect resource", - badPermission: influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: &goodUserID, - }, - }, - }, - { - name: "user accessing user that is not self", - badPermission: influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: &badUserID, - }, - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - fakeSVC := &mock.PasswordsService{ - SetPasswordFn: func(_ context.Context, _ platform.ID, _ string) error { - return nil - }, - } - s := authorizer.NewPasswordService(fakeSVC) - - ctx := icontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{tt.badPermission})) - - err := s.SetPassword(ctx, goodUserID, "password") - require.Error(t, err) - } - - t.Run(tt.name, fn) - } - }) - }) -} diff --git a/authorizer/restore.go b/authorizer/restore.go deleted file mode 100644 index 51f5e9f5035..00000000000 --- a/authorizer/restore.go +++ /dev/null @@ -1,55 +0,0 @@ -package authorizer - -import ( - "context" - "io" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/tracing" -) - -var _ influxdb.RestoreService = (*RestoreService)(nil) - -// RestoreService wraps a influxdb.RestoreService and authorizes actions -// against it appropriately. -type RestoreService struct { - s influxdb.RestoreService -} - -// NewRestoreService constructs an instance of an authorizing restore service. -func NewRestoreService(s influxdb.RestoreService) *RestoreService { - return &RestoreService{ - s: s, - } -} - -func (b RestoreService) RestoreKVStore(ctx context.Context, r io.Reader) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if err := IsAllowedAll(ctx, influxdb.OperPermissions()); err != nil { - return err - } - return b.s.RestoreKVStore(ctx, r) -} - -func (b RestoreService) RestoreBucket(ctx context.Context, id platform.ID, dbi []byte) (shardIDMap map[uint64]uint64, err error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if err := IsAllowedAll(ctx, influxdb.OperPermissions()); err != nil { - return nil, err - } - return b.s.RestoreBucket(ctx, id, dbi) -} - -func (b RestoreService) RestoreShard(ctx context.Context, shardID uint64, r io.Reader) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if err := IsAllowedAll(ctx, influxdb.OperPermissions()); err != nil { - return err - } - return b.s.RestoreShard(ctx, shardID, r) -} diff --git a/authorizer/scraper.go b/authorizer/scraper.go deleted file mode 100644 index c2c3c8c17c8..00000000000 --- a/authorizer/scraper.go +++ /dev/null @@ -1,91 +0,0 @@ -package authorizer - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.ScraperTargetStoreService = (*ScraperTargetStoreService)(nil) - -// ScraperTargetStoreService wraps a influxdb.ScraperTargetStoreService and authorizes actions -// against it appropriately. -type ScraperTargetStoreService struct { - influxdb.UserResourceMappingService - influxdb.OrganizationService - s influxdb.ScraperTargetStoreService -} - -// NewScraperTargetStoreService constructs an instance of an authorizing scraper target store service. -func NewScraperTargetStoreService(s influxdb.ScraperTargetStoreService, - urm influxdb.UserResourceMappingService, - org influxdb.OrganizationService, -) *ScraperTargetStoreService { - return &ScraperTargetStoreService{ - UserResourceMappingService: urm, - s: s, - } -} - -// GetTargetByID checks to see if the authorizer on context has read access to the id provided. -func (s *ScraperTargetStoreService) GetTargetByID(ctx context.Context, id platform.ID) (*influxdb.ScraperTarget, error) { - st, err := s.s.GetTargetByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeRead(ctx, influxdb.ScraperResourceType, id, st.OrgID); err != nil { - return nil, err - } - return st, nil -} - -// ListTargets retrieves all scraper targets that match the provided filter and then filters the list down to only the resources that are authorized. -func (s *ScraperTargetStoreService) ListTargets(ctx context.Context, filter influxdb.ScraperTargetFilter) ([]influxdb.ScraperTarget, error) { - // TODO: we'll likely want to push this operation into the database eventually since fetching the whole list of data - // will likely be expensive. - ss, err := s.s.ListTargets(ctx, filter) - if err != nil { - return nil, err - } - ss, _, err = AuthorizeFindScrapers(ctx, ss) - return ss, err -} - -// AddTarget checks to see if the authorizer on context has write access to the global scraper target resource. -func (s *ScraperTargetStoreService) AddTarget(ctx context.Context, st *influxdb.ScraperTarget, userID platform.ID) error { - if _, _, err := AuthorizeCreate(ctx, influxdb.ScraperResourceType, st.OrgID); err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.BucketsResourceType, st.BucketID, st.OrgID); err != nil { - return err - } - return s.s.AddTarget(ctx, st, userID) -} - -// UpdateTarget checks to see if the authorizer on context has write access to the scraper target provided. -func (s *ScraperTargetStoreService) UpdateTarget(ctx context.Context, upd *influxdb.ScraperTarget, userID platform.ID) (*influxdb.ScraperTarget, error) { - st, err := s.s.GetTargetByID(ctx, upd.ID) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.ScraperResourceType, upd.ID, st.OrgID); err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.BucketsResourceType, st.BucketID, st.OrgID); err != nil { - return nil, err - } - return s.s.UpdateTarget(ctx, upd, userID) -} - -// RemoveTarget checks to see if the authorizer on context has write access to the scraper target provided. -func (s *ScraperTargetStoreService) RemoveTarget(ctx context.Context, id platform.ID) error { - st, err := s.s.GetTargetByID(ctx, id) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.ScraperResourceType, st.ID, st.OrgID); err != nil { - return err - } - return s.s.RemoveTarget(ctx, id) -} diff --git a/authorizer/scraper_test.go b/authorizer/scraper_test.go deleted file mode 100644 index 18dca984b91..00000000000 --- a/authorizer/scraper_test.go +++ /dev/null @@ -1,659 +0,0 @@ -package authorizer_test - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -var scraperCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []influxdb.ScraperTarget) []influxdb.ScraperTarget { - out := append([]influxdb.ScraperTarget(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -func TestScraperTargetStoreService_GetTargetByID(t *testing.T) { - type fields struct { - ScraperTargetStoreService influxdb.ScraperTargetStoreService - } - type args struct { - permission influxdb.Permission - id platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access id", - fields: fields{ - ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - GetTargetByIDF: func(ctx context.Context, id platform.ID) (*influxdb.ScraperTarget, error) { - return &influxdb.ScraperTarget{ - ID: id, - OrgID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ScraperResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access id", - fields: fields{ - ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - GetTargetByIDF: func(ctx context.Context, id platform.ID) (*influxdb.ScraperTarget, error) { - return &influxdb.ScraperTarget{ - ID: id, - OrgID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ScraperResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - id: 1, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/000000000000000a/scrapers/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewScraperTargetStoreService(tt.fields.ScraperTargetStoreService, mock.NewUserResourceMappingService(), mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.GetTargetByID(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestScraperTargetStoreService_ListTargets(t *testing.T) { - type fields struct { - ScraperTargetStoreService influxdb.ScraperTargetStoreService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - scrapers []influxdb.ScraperTarget - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all scrapers", - fields: fields{ - ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - ListTargetsF: func(ctx context.Context, filter influxdb.ScraperTargetFilter) ([]influxdb.ScraperTarget, error) { - return []influxdb.ScraperTarget{ - { - ID: 1, - OrgID: 10, - }, - { - ID: 2, - OrgID: 10, - }, - { - ID: 3, - OrgID: 11, - }, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ScraperResourceType, - }, - }, - }, - wants: wants{ - scrapers: []influxdb.ScraperTarget{ - { - ID: 1, - OrgID: 10, - }, - { - ID: 2, - OrgID: 10, - }, - { - ID: 3, - OrgID: 11, - }, - }, - }, - }, - { - name: "authorized to access a single orgs scrapers", - fields: fields{ - ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - ListTargetsF: func(ctx context.Context, filter influxdb.ScraperTargetFilter) ([]influxdb.ScraperTarget, error) { - return []influxdb.ScraperTarget{ - { - ID: 1, - OrgID: 10, - }, - { - ID: 2, - OrgID: 10, - }, - { - ID: 3, - OrgID: 11, - }, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ScraperResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - scrapers: []influxdb.ScraperTarget{ - { - ID: 1, - OrgID: 10, - }, - { - ID: 2, - OrgID: 10, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewScraperTargetStoreService(tt.fields.ScraperTargetStoreService, mock.NewUserResourceMappingService(), - mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - ts, err := s.ListTargets(ctx, influxdb.ScraperTargetFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(ts, tt.wants.scrapers, scraperCmpOptions...); diff != "" { - t.Errorf("scrapers are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestScraperTargetStoreService_UpdateTarget(t *testing.T) { - type fields struct { - ScraperTargetStoreService influxdb.ScraperTargetStoreService - } - type args struct { - id platform.ID - bucketID platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to update scraper", - fields: fields{ - ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - GetTargetByIDF: func(ctc context.Context, id platform.ID) (*influxdb.ScraperTarget, error) { - return &influxdb.ScraperTarget{ - ID: 1, - OrgID: 10, - BucketID: 100, - }, nil - }, - UpdateTargetF: func(ctx context.Context, upd *influxdb.ScraperTarget, userID platform.ID) (*influxdb.ScraperTarget, error) { - return &influxdb.ScraperTarget{ - ID: 1, - OrgID: 10, - BucketID: 100, - }, nil - }, - }, - }, - args: args{ - id: 1, - bucketID: 100, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.ScraperResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ScraperResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(100), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to update scraper", - fields: fields{ - ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - GetTargetByIDF: func(ctc context.Context, id platform.ID) (*influxdb.ScraperTarget, error) { - return &influxdb.ScraperTarget{ - ID: 1, - OrgID: 10, - BucketID: 100, - }, nil - }, - UpdateTargetF: func(ctx context.Context, upd *influxdb.ScraperTarget, userID platform.ID) (*influxdb.ScraperTarget, error) { - return &influxdb.ScraperTarget{ - ID: 1, - OrgID: 10, - BucketID: 100, - }, nil - }, - }, - }, - args: args{ - id: 1, - bucketID: 100, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ScraperResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/scrapers/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - { - name: "unauthorized to write to bucket", - fields: fields{ - ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - GetTargetByIDF: func(ctc context.Context, id platform.ID) (*influxdb.ScraperTarget, error) { - return &influxdb.ScraperTarget{ - ID: 1, - OrgID: 10, - BucketID: 100, - }, nil - }, - UpdateTargetF: func(ctx context.Context, upd *influxdb.ScraperTarget, userID platform.ID) (*influxdb.ScraperTarget, error) { - return &influxdb.ScraperTarget{ - ID: 1, - OrgID: 10, - BucketID: 100, - }, nil - }, - }, - }, - args: args{ - id: 1, - bucketID: 100, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.ScraperResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/buckets/0000000000000064 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewScraperTargetStoreService(tt.fields.ScraperTargetStoreService, mock.NewUserResourceMappingService(), - mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - _, err := s.UpdateTarget(ctx, &influxdb.ScraperTarget{ID: tt.args.id, BucketID: tt.args.bucketID}, platform.ID(1)) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestScraperTargetStoreService_RemoveTarget(t *testing.T) { - type fields struct { - ScraperTargetStoreService influxdb.ScraperTargetStoreService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete scraper", - fields: fields{ - ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - GetTargetByIDF: func(ctc context.Context, id platform.ID) (*influxdb.ScraperTarget, error) { - return &influxdb.ScraperTarget{ - ID: 1, - OrgID: 10, - }, nil - }, - RemoveTargetF: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.ScraperResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ScraperResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete scraper", - fields: fields{ - ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - GetTargetByIDF: func(ctc context.Context, id platform.ID) (*influxdb.ScraperTarget, error) { - return &influxdb.ScraperTarget{ - ID: 1, - OrgID: 10, - }, nil - }, - RemoveTargetF: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ScraperResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/scrapers/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewScraperTargetStoreService(tt.fields.ScraperTargetStoreService, mock.NewUserResourceMappingService(), - mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.RemoveTarget(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestScraperTargetStoreService_AddTarget(t *testing.T) { - type fields struct { - ScraperTargetStoreService influxdb.ScraperTargetStoreService - } - type args struct { - permissions []influxdb.Permission - orgID platform.ID - bucketID platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to create scraper", - fields: fields{ - ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - AddTargetF: func(ctx context.Context, st *influxdb.ScraperTarget, userID platform.ID) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - bucketID: 100, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.ScraperResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(100), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to create scraper", - fields: fields{ - ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - AddTargetF: func(ctx context.Context, st *influxdb.ScraperTarget, userID platform.ID) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - bucketID: 100, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.ScraperResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(100), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/scrapers is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - { - name: "unauthorized to write to bucket", - fields: fields{ - ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - AddTargetF: func(ctx context.Context, st *influxdb.ScraperTarget, userID platform.ID) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - bucketID: 100, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.ScraperResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/buckets/0000000000000064 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewScraperTargetStoreService(tt.fields.ScraperTargetStoreService, mock.NewUserResourceMappingService(), - mock.NewOrganizationService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.AddTarget(ctx, &influxdb.ScraperTarget{OrgID: tt.args.orgID, BucketID: tt.args.bucketID}, platform.ID(1)) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} diff --git a/authorizer/secret.go b/authorizer/secret.go deleted file mode 100644 index bf816f96187..00000000000 --- a/authorizer/secret.go +++ /dev/null @@ -1,100 +0,0 @@ -package authorizer - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.SecretService = (*SecretService)(nil) - -// SecretService wraps a influxdb.SecretService and authorizes actions -// against it appropriately. -type SecretService struct { - s influxdb.SecretService -} - -// NewSecretService constructs an instance of an authorizing secret service. -func NewSecretService(s influxdb.SecretService) *SecretService { - return &SecretService{ - s: s, - } -} - -// LoadSecret checks to see if the authorizer on context has read access to the secret key provided. -func (s *SecretService) LoadSecret(ctx context.Context, orgID platform.ID, key string) (string, error) { - if _, _, err := AuthorizeOrgReadResource(ctx, influxdb.SecretsResourceType, orgID); err != nil { - return "", err - } - secret, err := s.s.LoadSecret(ctx, orgID, key) - if err != nil { - return "", err - } - return secret, nil -} - -// GetSecretKeys checks to see if the authorizer on context has read access to all the secrets belonging to orgID. -func (s *SecretService) GetSecretKeys(ctx context.Context, orgID platform.ID) ([]string, error) { - if _, _, err := AuthorizeOrgReadResource(ctx, influxdb.SecretsResourceType, orgID); err != nil { - return []string{}, err - } - secrets, err := s.s.GetSecretKeys(ctx, orgID) - if err != nil { - return []string{}, err - } - return secrets, nil -} - -// PutSecret checks to see if the authorizer on context has write access to the secret key provided. -func (s *SecretService) PutSecret(ctx context.Context, orgID platform.ID, key string, val string) error { - if _, _, err := AuthorizeCreate(ctx, influxdb.SecretsResourceType, orgID); err != nil { - return err - } - err := s.s.PutSecret(ctx, orgID, key, val) - if err != nil { - return err - } - return nil -} - -// PutSecrets checks to see if the authorizer on context has read and write access to the secret keys provided. -func (s *SecretService) PutSecrets(ctx context.Context, orgID platform.ID, m map[string]string) error { - // PutSecrets operates on intersection between m and keys beloging to orgID. - // We need to have read access to those secrets since it deletes the secrets (within the intersection) that have not be overridden. - if _, _, err := AuthorizeOrgReadResource(ctx, influxdb.SecretsResourceType, orgID); err != nil { - return err - } - if _, _, err := AuthorizeOrgWriteResource(ctx, influxdb.SecretsResourceType, orgID); err != nil { - return err - } - err := s.s.PutSecrets(ctx, orgID, m) - if err != nil { - return err - } - return nil -} - -// PatchSecrets checks to see if the authorizer on context has write access to the secret keys provided. -func (s *SecretService) PatchSecrets(ctx context.Context, orgID platform.ID, m map[string]string) error { - if _, _, err := AuthorizeOrgWriteResource(ctx, influxdb.SecretsResourceType, orgID); err != nil { - return err - } - err := s.s.PatchSecrets(ctx, orgID, m) - if err != nil { - return err - } - return nil -} - -// DeleteSecret checks to see if the authorizer on context has write access to the secret keys provided. -func (s *SecretService) DeleteSecret(ctx context.Context, orgID platform.ID, keys ...string) error { - if _, _, err := AuthorizeOrgWriteResource(ctx, influxdb.SecretsResourceType, orgID); err != nil { - return err - } - err := s.s.DeleteSecret(ctx, orgID, keys...) - if err != nil { - return err - } - return nil -} diff --git a/authorizer/secret_test.go b/authorizer/secret_test.go deleted file mode 100644 index cca5f09093d..00000000000 --- a/authorizer/secret_test.go +++ /dev/null @@ -1,700 +0,0 @@ -package authorizer_test - -import ( - "bytes" - "context" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -var secretCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), -} - -func TestSecretService_LoadSecret(t *testing.T) { - type fields struct { - SecretService influxdb.SecretService - } - type args struct { - permission influxdb.Permission - org platform.ID - key string - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access secret within org", - fields: fields{ - SecretService: &mock.SecretService{ - LoadSecretFn: func(ctx context.Context, orgID platform.ID, k string) (string, error) { - if k == "key" { - return "val", nil - } - return "", &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrSecretNotFound, - } - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - key: "key", - org: platform.ID(10), - }, - wants: wants{ - err: nil, - }, - }, - { - name: "cannot access not existing secret", - fields: fields{ - SecretService: &mock.SecretService{ - LoadSecretFn: func(ctx context.Context, orgID platform.ID, k string) (string, error) { - if k == "key" { - return "val", nil - } - return "", &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrSecretNotFound, - } - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - key: "not existing", - org: platform.ID(10), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrSecretNotFound, - }, - }, - }, - { - name: "unauthorized to access secret within org", - fields: fields{ - SecretService: &mock.SecretService{ - LoadSecretFn: func(ctx context.Context, orgID platform.ID, k string) (string, error) { - if k == "key" { - return "val", nil - } - return "", &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrSecretNotFound, - } - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - ID: influxdbtesting.IDPtr(10), - }, - }, - org: platform.ID(2), - key: "key", - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/0000000000000002/secrets is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewSecretService(tt.fields.SecretService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.LoadSecret(ctx, tt.args.org, tt.args.key) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestSecretService_GetSecretKeys(t *testing.T) { - type fields struct { - SecretService influxdb.SecretService - } - type args struct { - permission influxdb.Permission - org platform.ID - } - type wants struct { - err error - secrets []string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all secrets within an org", - fields: fields{ - SecretService: &mock.SecretService{ - GetSecretKeysFn: func(ctx context.Context, orgID platform.ID) ([]string, error) { - return []string{ - "0000000000000001secret1", - "0000000000000001secret2", - "0000000000000001secret3", - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - org: platform.ID(1), - }, - wants: wants{ - secrets: []string{ - "0000000000000001secret1", - "0000000000000001secret2", - "0000000000000001secret3", - }, - }, - }, - { - name: "unauthorized to see all secrets within an org", - fields: fields{ - SecretService: &mock.SecretService{ - GetSecretKeysFn: func(ctx context.Context, orgID platform.ID) ([]string, error) { - return []string{ - "0000000000000002secret1", - "0000000000000002secret2", - "0000000000000002secret3", - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - org: platform.ID(2), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EUnauthorized, - Msg: "read:orgs/0000000000000002/secrets is unauthorized", - }, - secrets: []string{}, - }, - }, - { - name: "errors when there are not secret into an org", - fields: fields{ - SecretService: &mock.SecretService{ - GetSecretKeysFn: func(ctx context.Context, orgID platform.ID) ([]string, error) { - return []string(nil), &errors.Error{ - Code: errors.ENotFound, - Msg: "organization has no secret keys", - } - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - org: platform.ID(10), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "organization has no secret keys", - }, - secrets: []string{}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewSecretService(tt.fields.SecretService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - secrets, err := s.GetSecretKeys(ctx, tt.args.org) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(secrets, tt.wants.secrets, secretCmpOptions...); diff != "" { - t.Errorf("secrets are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestSecretService_PatchSecrets(t *testing.T) { - type fields struct { - SecretService influxdb.SecretService - } - type args struct { - org platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to patch secrets", - fields: fields{ - SecretService: &mock.SecretService{ - PatchSecretsFn: func(ctx context.Context, orgID platform.ID, m map[string]string) error { - return nil - }, - }, - }, - args: args{ - org: platform.ID(1), - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to update secret", - fields: fields{ - SecretService: &mock.SecretService{ - PatchSecretsFn: func(ctx context.Context, orgID platform.ID, m map[string]string) error { - return nil - }, - }, - }, - args: args{ - org: platform.ID(1), - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/0000000000000001/secrets is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewSecretService(tt.fields.SecretService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - patches := make(map[string]string) - err := s.PatchSecrets(ctx, tt.args.org, patches) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestSecretService_DeleteSecret(t *testing.T) { - type fields struct { - SecretService influxdb.SecretService - } - type args struct { - org platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete secret", - fields: fields{ - SecretService: &mock.SecretService{ - DeleteSecretFn: func(ctx context.Context, orgID platform.ID, keys ...string) error { - return nil - }, - }, - }, - args: args{ - org: platform.ID(1), - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete secret", - fields: fields{ - SecretService: &mock.SecretService{ - DeleteSecretFn: func(ctx context.Context, orgID platform.ID, keys ...string) error { - return nil - }, - }, - }, - args: args{ - org: 10, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/secrets is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewSecretService(tt.fields.SecretService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.DeleteSecret(ctx, tt.args.org) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestSecretService_PutSecret(t *testing.T) { - type fields struct { - SecretService influxdb.SecretService - } - type args struct { - permission influxdb.Permission - orgID platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to put a secret", - fields: fields{ - SecretService: &mock.SecretService{ - PutSecretFn: func(ctx context.Context, orgID platform.ID, key string, val string) error { - return nil - }, - }, - }, - args: args{ - orgID: platform.ID(10), - permission: influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to put a secret", - fields: fields{ - SecretService: &mock.SecretService{ - PutSecretFn: func(ctx context.Context, orgID platform.ID, key string, val string) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/secrets is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewSecretService(tt.fields.SecretService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.PutSecret(ctx, tt.args.orgID, "", "") - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestSecretService_PutSecrets(t *testing.T) { - type fields struct { - SecretService influxdb.SecretService - } - type args struct { - permissions []influxdb.Permission - orgID platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to put secrets", - fields: fields{ - SecretService: &mock.SecretService{ - PutSecretsFn: func(ctx context.Context, orgID platform.ID, m map[string]string) error { - return nil - }, - }, - }, - args: args{ - orgID: platform.ID(10), - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to put secrets", - fields: fields{ - SecretService: &mock.SecretService{ - PutSecretsFn: func(ctx context.Context, orgID platform.ID, m map[string]string) error { - return nil - }, - }, - }, - args: args{ - orgID: platform.ID(2), - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(2), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/0000000000000002/secrets is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - { - name: "unauthorized to put secrets without read access to their org", - fields: fields{ - SecretService: &mock.SecretService{ - PutSecretFn: func(ctx context.Context, orgID platform.ID, key string, val string) error { - return nil - }, - PutSecretsFn: func(ctx context.Context, orgID platform.ID, m map[string]string) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/000000000000000a/secrets is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - { - name: "unauthorized to put secrets without write access to their org", - fields: fields{ - SecretService: &mock.SecretService{ - PutSecretFn: func(ctx context.Context, orgID platform.ID, key string, val string) error { - return nil - }, - PutSecretsFn: func(ctx context.Context, orgID platform.ID, m map[string]string) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/secrets is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewSecretService(tt.fields.SecretService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - secrets := make(map[string]string) - err := s.PutSecrets(ctx, tt.args.orgID, secrets) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} diff --git a/authorizer/source.go b/authorizer/source.go deleted file mode 100644 index 90b6ac5879c..00000000000 --- a/authorizer/source.go +++ /dev/null @@ -1,89 +0,0 @@ -package authorizer - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.SourceService = (*SourceService)(nil) - -// SourceService wraps a influxdb.SourceService and authorizes actions -// against it appropriately. -type SourceService struct { - s influxdb.SourceService -} - -// NewSourceService constructs an instance of an authorizing source service. -func NewSourceService(s influxdb.SourceService) *SourceService { - return &SourceService{ - s: s, - } -} - -// DefaultSource checks to see if the authorizer on context has read access to the default source. -func (s *SourceService) DefaultSource(ctx context.Context) (*influxdb.Source, error) { - src, err := s.s.DefaultSource(ctx) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeRead(ctx, influxdb.SourcesResourceType, src.ID, src.OrganizationID); err != nil { - return nil, err - } - return src, nil -} - -// FindSourceByID checks to see if the authorizer on context has read access to the id provided. -func (s *SourceService) FindSourceByID(ctx context.Context, id platform.ID) (*influxdb.Source, error) { - src, err := s.s.FindSourceByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeRead(ctx, influxdb.SourcesResourceType, src.ID, src.OrganizationID); err != nil { - return nil, err - } - return src, nil -} - -// FindSources retrieves all sources that match the provided options and then filters the list down to only the resources that are authorized. -func (s *SourceService) FindSources(ctx context.Context, opts influxdb.FindOptions) ([]*influxdb.Source, int, error) { - // TODO: we'll likely want to push this operation into the database since fetching the whole list of data will likely be expensive. - ss, _, err := s.s.FindSources(ctx, opts) - if err != nil { - return nil, 0, err - } - return AuthorizeFindSources(ctx, ss) -} - -// CreateSource checks to see if the authorizer on context has write access to the global source resource. -func (s *SourceService) CreateSource(ctx context.Context, src *influxdb.Source) error { - if _, _, err := AuthorizeCreate(ctx, influxdb.SourcesResourceType, src.OrganizationID); err != nil { - return err - } - return s.s.CreateSource(ctx, src) -} - -// UpdateSource checks to see if the authorizer on context has write access to the source provided. -func (s *SourceService) UpdateSource(ctx context.Context, id platform.ID, upd influxdb.SourceUpdate) (*influxdb.Source, error) { - src, err := s.s.FindSourceByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.SourcesResourceType, src.ID, src.OrganizationID); err != nil { - return nil, err - } - return s.s.UpdateSource(ctx, id, upd) -} - -// DeleteSource checks to see if the authorizer on context has write access to the source provided. -func (s *SourceService) DeleteSource(ctx context.Context, id platform.ID) error { - src, err := s.s.FindSourceByID(ctx, id) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.SourcesResourceType, src.ID, src.OrganizationID); err != nil { - return err - } - return s.s.DeleteSource(ctx, id) -} diff --git a/authorizer/source_test.go b/authorizer/source_test.go deleted file mode 100644 index 2a41608cf04..00000000000 --- a/authorizer/source_test.go +++ /dev/null @@ -1,629 +0,0 @@ -package authorizer_test - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -var sourceCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.Source) []*influxdb.Source { - out := append([]*influxdb.Source(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -func TestSourceService_DefaultSource(t *testing.T) { - type fields struct { - SourceService influxdb.SourceService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access id", - fields: fields{ - SourceService: &mock.SourceService{ - DefaultSourceFn: func(ctx context.Context) (*influxdb.Source, error) { - return &influxdb.Source{ - ID: 1, - OrganizationID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.SourcesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access id", - fields: fields{ - SourceService: &mock.SourceService{ - DefaultSourceFn: func(ctx context.Context) (*influxdb.Source, error) { - return &influxdb.Source{ - ID: 1, - OrganizationID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.SourcesResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/000000000000000a/sources/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewSourceService(tt.fields.SourceService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.DefaultSource(ctx) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestSourceService_FindSourceByID(t *testing.T) { - type fields struct { - SourceService influxdb.SourceService - } - type args struct { - permission influxdb.Permission - id platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access id", - fields: fields{ - SourceService: &mock.SourceService{ - FindSourceByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Source, error) { - return &influxdb.Source{ - ID: id, - OrganizationID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.SourcesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access id", - fields: fields{ - SourceService: &mock.SourceService{ - FindSourceByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Source, error) { - return &influxdb.Source{ - ID: id, - OrganizationID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.SourcesResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - id: 1, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/000000000000000a/sources/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewSourceService(tt.fields.SourceService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindSourceByID(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestSourceService_FindSources(t *testing.T) { - type fields struct { - SourceService influxdb.SourceService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - sources []*influxdb.Source - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all sources", - fields: fields{ - SourceService: &mock.SourceService{ - FindSourcesFn: func(ctx context.Context, opts influxdb.FindOptions) ([]*influxdb.Source, int, error) { - return []*influxdb.Source{ - { - ID: 1, - OrganizationID: 10, - }, - { - ID: 2, - OrganizationID: 10, - }, - { - ID: 3, - OrganizationID: 11, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.SourcesResourceType, - }, - }, - }, - wants: wants{ - sources: []*influxdb.Source{ - { - ID: 1, - OrganizationID: 10, - }, - { - ID: 2, - OrganizationID: 10, - }, - { - ID: 3, - OrganizationID: 11, - }, - }, - }, - }, - { - name: "authorized to access a single org sources", - fields: fields{ - SourceService: &mock.SourceService{ - FindSourcesFn: func(ctx context.Context, opts influxdb.FindOptions) ([]*influxdb.Source, int, error) { - return []*influxdb.Source{ - { - ID: 1, - OrganizationID: 10, - }, - { - ID: 2, - OrganizationID: 10, - }, - { - ID: 3, - OrganizationID: 11, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.SourcesResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - sources: []*influxdb.Source{ - { - ID: 1, - OrganizationID: 10, - }, - { - ID: 2, - OrganizationID: 10, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewSourceService(tt.fields.SourceService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - sources, _, err := s.FindSources(ctx, influxdb.DefaultSourceFindOptions) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(sources, tt.wants.sources, sourceCmpOptions...); diff != "" { - t.Errorf("sources are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestSourceService_UpdateSource(t *testing.T) { - type fields struct { - SourceService influxdb.SourceService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to update source", - fields: fields{ - SourceService: &mock.SourceService{ - FindSourceByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Source, error) { - return &influxdb.Source{ - ID: 1, - OrganizationID: 10, - }, nil - }, - UpdateSourceFn: func(ctx context.Context, id platform.ID, upd influxdb.SourceUpdate) (*influxdb.Source, error) { - return &influxdb.Source{ - ID: 1, - OrganizationID: 10, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.SourcesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.SourcesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to update source", - fields: fields{ - SourceService: &mock.SourceService{ - FindSourceByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Source, error) { - return &influxdb.Source{ - ID: 1, - OrganizationID: 10, - }, nil - }, - UpdateSourceFn: func(ctx context.Context, id platform.ID, upd influxdb.SourceUpdate) (*influxdb.Source, error) { - return &influxdb.Source{ - ID: 1, - OrganizationID: 10, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.SourcesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/sources/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewSourceService(tt.fields.SourceService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - _, err := s.UpdateSource(ctx, tt.args.id, influxdb.SourceUpdate{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestSourceService_DeleteSource(t *testing.T) { - type fields struct { - SourceService influxdb.SourceService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete source", - fields: fields{ - SourceService: &mock.SourceService{ - FindSourceByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Source, error) { - return &influxdb.Source{ - ID: 1, - OrganizationID: 10, - }, nil - }, - DeleteSourceFn: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.SourcesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.SourcesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete source", - fields: fields{ - SourceService: &mock.SourceService{ - FindSourceByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Source, error) { - return &influxdb.Source{ - ID: 1, - OrganizationID: 10, - }, nil - }, - DeleteSourceFn: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.SourcesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/sources/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewSourceService(tt.fields.SourceService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.DeleteSource(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestSourceService_CreateSource(t *testing.T) { - type fields struct { - SourceService influxdb.SourceService - } - type args struct { - permission influxdb.Permission - orgID platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to create Source", - fields: fields{ - SourceService: &mock.SourceService{ - CreateSourceFn: func(ctx context.Context, o *influxdb.Source) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.SourcesResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to create Source", - fields: fields{ - SourceService: &mock.SourceService{ - CreateSourceFn: func(ctx context.Context, o *influxdb.Source) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.SourcesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/sources is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewSourceService(tt.fields.SourceService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.CreateSource(ctx, &influxdb.Source{OrganizationID: tt.args.orgID}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} diff --git a/authorizer/sql_backup_restore.go b/authorizer/sql_backup_restore.go deleted file mode 100644 index 906b0b5740e..00000000000 --- a/authorizer/sql_backup_restore.go +++ /dev/null @@ -1,57 +0,0 @@ -package authorizer - -import ( - "context" - "io" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/tracing" -) - -var _ influxdb.SqlBackupRestoreService = (*SqlBackupRestoreService)(nil) - -// SqlBackupRestoreService wraps a influxdb.SqlBackupRestoreService and authorizes actions -// against it appropriately. -type SqlBackupRestoreService struct { - s influxdb.SqlBackupRestoreService -} - -// NewSqlBackupRestoreService constructs an instance of an authorizing backup service. -func NewSqlBackupRestoreService(s influxdb.SqlBackupRestoreService) *SqlBackupRestoreService { - return &SqlBackupRestoreService{ - s: s, - } -} - -func (s SqlBackupRestoreService) BackupSqlStore(ctx context.Context, w io.Writer) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if err := IsAllowedAll(ctx, influxdb.OperPermissions()); err != nil { - return err - } - return s.s.BackupSqlStore(ctx, w) -} - -func (s SqlBackupRestoreService) RestoreSqlStore(ctx context.Context, r io.Reader) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if err := IsAllowedAll(ctx, influxdb.OperPermissions()); err != nil { - return err - } - return s.s.RestoreSqlStore(ctx, r) -} - -// The Lock and Unlock methods below do not have authorization checks and should only be used -// when appropriate authorization has already been confirmed, such as behind a middleware. They -// are intended to be used for coordinating the locking and unlocking of the kv and sql metadata -// databases during a backup. They are made available here to allow the calls to pass-through to the -// underlying service. -func (s SqlBackupRestoreService) RLockSqlStore() { - s.s.RLockSqlStore() -} - -func (s SqlBackupRestoreService) RUnlockSqlStore() { - s.s.RUnlockSqlStore() -} diff --git a/authorizer/sql_backup_restore_test.go b/authorizer/sql_backup_restore_test.go deleted file mode 100644 index b7a5ed2dbc7..00000000000 --- a/authorizer/sql_backup_restore_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package authorizer_test - -import ( - "bytes" - "context" - "testing" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/stretchr/testify/require" -) - -func Test_BackupSqlStore(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - permList []influxdb.Permission - wantErr error - }{ - { - "authorized to do the backup", - influxdb.OperPermissions(), - nil, - }, - { - "not authorized to do the backup", - influxdb.ReadAllPermissions(), - &errors.Error{ - Msg: "write:authorizations is unauthorized", - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockSqlBackupRestoreService(ctrlr) - s := authorizer.NewSqlBackupRestoreService(svc) - - w := bytes.NewBuffer([]byte{}) - - if tt.wantErr == nil { - svc.EXPECT(). - BackupSqlStore(gomock.Any(), w). - Return(nil) - } - - ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, tt.permList)) - err := s.BackupSqlStore(ctx, w) - require.Equal(t, tt.wantErr, err) - }) - } -} - -func Test_RestoreSqlStore(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - permList []influxdb.Permission - wantErr error - }{ - { - "authorized to do the restore", - influxdb.OperPermissions(), - nil, - }, - { - "not authorized to do the restore", - influxdb.ReadAllPermissions(), - &errors.Error{ - Msg: "write:authorizations is unauthorized", - Code: errors.EUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockSqlBackupRestoreService(ctrlr) - s := authorizer.NewSqlBackupRestoreService(svc) - - w := bytes.NewBuffer([]byte{}) - - if tt.wantErr == nil { - svc.EXPECT(). - RestoreSqlStore(gomock.Any(), w). - Return(nil) - } - - ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, tt.permList)) - err := s.RestoreSqlStore(ctx, w) - require.Equal(t, tt.wantErr, err) - }) - } -} diff --git a/authorizer/task.go b/authorizer/task.go deleted file mode 100644 index 9538c4a0c1e..00000000000 --- a/authorizer/task.go +++ /dev/null @@ -1,255 +0,0 @@ -package authorizer - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "go.uber.org/zap" -) - -type authError struct { - error - perm influxdb.Permission - auth influxdb.Authorizer -} - -func (ae *authError) AuthzError() error { - return fmt.Errorf("permission failed for auth (%s): %s", ae.auth.Identifier().String(), ae.perm.String()) -} - -var ( - ErrInactiveTask = &errors.Error{ - Code: errors.EInvalid, - Msg: "inactive task", - } - - ErrFailedPermission = &errors.Error{ - Code: errors.EInvalid, - Msg: "unauthorized", - } -) - -type taskServiceValidator struct { - taskmodel.TaskService - log *zap.Logger -} - -// TaskService wraps ts and checks appropriate permissions before calling requested methods on ts. -// Authorization failures are logged to the logger. -func NewTaskService(log *zap.Logger, ts taskmodel.TaskService) taskmodel.TaskService { - return &taskServiceValidator{ - TaskService: ts, - log: log, - } -} - -func (ts *taskServiceValidator) processPermissionError(a influxdb.Authorizer, p influxdb.Permission, err error, loggerFields ...zap.Field) error { - if errors.ErrorCode(err) == errors.EUnauthorized { - ts.log.With(loggerFields...).Info("Authorization failed", - zap.String("user_id", a.GetUserID().String()), - zap.String("auth_kind", a.Kind()), - zap.String("auth_id", a.Identifier().String()), - zap.String("disallowed_permission", p.String()), - ) - return authError{error: ErrFailedPermission, perm: p, auth: a} - } - return err -} - -func (ts *taskServiceValidator) FindTaskByID(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - // Unauthenticated task lookup, to identify the task's organization. - task, err := ts.TaskService.FindTaskByID(ctx, id) - if err != nil { - return nil, err - } - - a, p, err := AuthorizeRead(ctx, influxdb.TasksResourceType, task.ID, task.OrganizationID) - loggerFields := []zap.Field{zap.String("method", "FindTaskByID"), zap.Stringer("task_id", id)} - if err := ts.processPermissionError(a, p, err, loggerFields...); err != nil { - return nil, err - } - return task, nil -} - -func (ts *taskServiceValidator) FindTasks(ctx context.Context, filter taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - // Get the tasks in the organization, without authentication. - unauthenticatedTasks, _, err := ts.TaskService.FindTasks(ctx, filter) - if err != nil { - return nil, 0, err - } - return AuthorizeFindTasks(ctx, unauthenticatedTasks) -} - -func (ts *taskServiceValidator) CreateTask(ctx context.Context, t taskmodel.TaskCreate) (*taskmodel.Task, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if !t.OwnerID.Valid() { - return nil, taskmodel.ErrInvalidOwnerID - } - - a, p, err := AuthorizeCreate(ctx, influxdb.TasksResourceType, t.OrganizationID) - loggerFields := []zap.Field{zap.String("method", "CreateTask")} - if err := ts.processPermissionError(a, p, err, loggerFields...); err != nil { - return nil, err - } - return ts.TaskService.CreateTask(ctx, t) -} - -func (ts *taskServiceValidator) UpdateTask(ctx context.Context, id platform.ID, upd taskmodel.TaskUpdate) (*taskmodel.Task, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - // Unauthenticated task lookup, to identify the task's organization. - task, err := ts.TaskService.FindTaskByID(ctx, id) - if err != nil { - return nil, err - } - - a, p, err := AuthorizeWrite(ctx, influxdb.TasksResourceType, task.ID, task.OrganizationID) - loggerFields := []zap.Field{zap.String("method", "UpdateTask"), zap.Stringer("task_id", id)} - if err := ts.processPermissionError(a, p, err, loggerFields...); err != nil { - return nil, err - } - return ts.TaskService.UpdateTask(ctx, id, upd) -} - -func (ts *taskServiceValidator) DeleteTask(ctx context.Context, id platform.ID) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - // Unauthenticated task lookup, to identify the task's organization. - task, err := ts.TaskService.FindTaskByID(ctx, id) - if err != nil { - return err - } - - a, p, err := AuthorizeWrite(ctx, influxdb.TasksResourceType, task.ID, task.OrganizationID) - loggerFields := []zap.Field{zap.String("method", "DeleteTask"), zap.Stringer("task_id", id)} - if err := ts.processPermissionError(a, p, err, loggerFields...); err != nil { - return err - } - return ts.TaskService.DeleteTask(ctx, id) -} - -func (ts *taskServiceValidator) FindLogs(ctx context.Context, filter taskmodel.LogFilter) ([]*taskmodel.Log, int, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - // Look up the task first, through the validator, to ensure we have permission to view the task. - if _, err := ts.FindTaskByID(ctx, filter.Task); err != nil { - return nil, -1, err - } - - // If we can find the task, we can read its logs. - return ts.TaskService.FindLogs(ctx, filter) -} - -func (ts *taskServiceValidator) FindRuns(ctx context.Context, filter taskmodel.RunFilter) ([]*taskmodel.Run, int, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - // Look up the task first, through the validator, to ensure we have permission to view the task. - task, err := ts.FindTaskByID(ctx, filter.Task) - if err != nil { - return nil, -1, err - } - - a, p, err := AuthorizeRead(ctx, influxdb.TasksResourceType, task.ID, task.OrganizationID) - loggerFields := []zap.Field{zap.String("method", "FindRuns"), zap.Stringer("task_id", task.ID)} - if err := ts.processPermissionError(a, p, err, loggerFields...); err != nil { - return nil, -1, err - } - // TODO(lyon): If the user no longer has permission to the organization we might fail or filter here? - return ts.TaskService.FindRuns(ctx, filter) -} - -func (ts *taskServiceValidator) FindRunByID(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - // Unauthenticated task lookup, to identify the task's organization. - task, err := ts.TaskService.FindTaskByID(ctx, taskID) - if err != nil { - return nil, err - } - - a, p, err := AuthorizeRead(ctx, influxdb.TasksResourceType, task.ID, task.OrganizationID) - loggerFields := []zap.Field{zap.String("method", "FindRunByID"), zap.Stringer("task_id", taskID), zap.Stringer("run_id", runID)} - if err := ts.processPermissionError(a, p, err, loggerFields...); err != nil { - return nil, err - } - return ts.TaskService.FindRunByID(ctx, taskID, runID) -} - -func (ts *taskServiceValidator) CancelRun(ctx context.Context, taskID, runID platform.ID) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - // Unauthenticated task lookup, to identify the task's organization. - task, err := ts.TaskService.FindTaskByID(ctx, taskID) - if err != nil { - return err - } - - a, p, err := AuthorizeWrite(ctx, influxdb.TasksResourceType, task.ID, task.OrganizationID) - loggerFields := []zap.Field{zap.String("method", "CancelRun"), zap.Stringer("task_id", taskID), zap.Stringer("run_id", runID)} - if err := ts.processPermissionError(a, p, err, loggerFields...); err != nil { - return err - } - return ts.TaskService.CancelRun(ctx, taskID, runID) -} - -func (ts *taskServiceValidator) RetryRun(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - // Unauthenticated task lookup, to identify the task's organization. - task, err := ts.TaskService.FindTaskByID(ctx, taskID) - if err != nil { - return nil, err - } - - if task.Status != string(taskmodel.TaskActive) { - return nil, ErrInactiveTask - } - - a, p, err := AuthorizeWrite(ctx, influxdb.TasksResourceType, task.ID, task.OrganizationID) - loggerFields := []zap.Field{zap.String("method", "RetryRun"), zap.Stringer("task_id", taskID), zap.Stringer("run_id", runID)} - if err := ts.processPermissionError(a, p, err, loggerFields...); err != nil { - return nil, err - } - return ts.TaskService.RetryRun(ctx, taskID, runID) -} - -func (ts *taskServiceValidator) ForceRun(ctx context.Context, taskID platform.ID, scheduledFor int64) (*taskmodel.Run, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - // Unauthenticated task lookup, to identify the task's organization. - task, err := ts.TaskService.FindTaskByID(ctx, taskID) - if err != nil { - return nil, err - } - - if task.Status != string(taskmodel.TaskActive) { - return nil, ErrInactiveTask - } - - a, p, err := AuthorizeWrite(ctx, influxdb.TasksResourceType, task.ID, task.OrganizationID) - loggerFields := []zap.Field{zap.String("method", "ForceRun"), zap.Stringer("task_id", taskID)} - if err := ts.processPermissionError(a, p, err, loggerFields...); err != nil { - return nil, err - } - return ts.TaskService.ForceRun(ctx, taskID, scheduledFor) -} diff --git a/authorizer/task_test.go b/authorizer/task_test.go deleted file mode 100644 index 2b504bc5210..00000000000 --- a/authorizer/task_test.go +++ /dev/null @@ -1,609 +0,0 @@ -package authorizer_test - -import ( - "context" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorization" - "github.com/influxdata/influxdb/v2/authorizer" - pctx "github.com/influxdata/influxdb/v2/context" - _ "github.com/influxdata/influxdb/v2/fluxinit/static" - "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "github.com/influxdata/influxdb/v2/tenant" - "github.com/pkg/errors" - "go.uber.org/zap/zaptest" -) - -func TestOnboardingValidation(t *testing.T) { - _, onboard := setup(t) - - ts := authorizer.NewTaskService(zaptest.NewLogger(t), mockTaskService(3, 2, 1)) - - r, err := onboard.OnboardInitialUser(context.Background(), &influxdb.OnboardingRequest{ - User: "Setec Astronomy", - Password: "too many secrets", - Org: "thing", - Bucket: "holder", - RetentionPeriodSeconds: 1, - }) - if err != nil { - t.Fatal(err) - } - - ctx := pctx.SetAuthorizer(context.Background(), r.Auth) - - _, err = ts.CreateTask(ctx, taskmodel.TaskCreate{ - OrganizationID: r.Org.ID, - OwnerID: r.Auth.GetUserID(), - Flux: `option task = { - name: "my_task", - every: 1s, -} -from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")`, - }) - if err != nil { - t.Fatal(err) - } -} - -func mockTaskService(orgID, taskID, runID platform.ID) taskmodel.TaskService { - task := taskmodel.Task{ - ID: taskID, - OrganizationID: orgID, - Name: "cows", - Status: string(taskmodel.TaskActive), - Flux: `option task = { - name: "my_task", - every: 1s, -} -from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")`, - Every: "1s", - } - - log := taskmodel.Log{Message: "howdy partner"} - - run := taskmodel.Run{ - ID: runID, - TaskID: taskID, - Status: "completed", - ScheduledFor: time.Now().UTC(), - StartedAt: time.Now().UTC().Add(time.Second * 3), - FinishedAt: time.Now().UTC().Add(time.Second * 10), - Log: []taskmodel.Log{log}, - } - - return &mock.TaskService{ - FindTaskByIDFn: func(context.Context, platform.ID) (*taskmodel.Task, error) { - return &task, nil - }, - FindTasksFn: func(context.Context, taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { - return []*taskmodel.Task{&task}, 1, nil - }, - CreateTaskFn: func(_ context.Context, tc taskmodel.TaskCreate) (*taskmodel.Task, error) { - taskCopy := task - return &taskCopy, nil - }, - UpdateTaskFn: func(context.Context, platform.ID, taskmodel.TaskUpdate) (*taskmodel.Task, error) { - return &task, nil - }, - DeleteTaskFn: func(context.Context, platform.ID) error { - return nil - }, - FindLogsFn: func(context.Context, taskmodel.LogFilter) ([]*taskmodel.Log, int, error) { - return []*taskmodel.Log{&log}, 1, nil - }, - FindRunsFn: func(context.Context, taskmodel.RunFilter) ([]*taskmodel.Run, int, error) { - return []*taskmodel.Run{&run}, 1, nil - }, - FindRunByIDFn: func(context.Context, platform.ID, platform.ID) (*taskmodel.Run, error) { - return &run, nil - }, - CancelRunFn: func(context.Context, platform.ID, platform.ID) error { - return nil - }, - RetryRunFn: func(context.Context, platform.ID, platform.ID) (*taskmodel.Run, error) { - return &run, nil - }, - ForceRunFn: func(context.Context, platform.ID, int64) (*taskmodel.Run, error) { - return &run, nil - }, - } -} - -func TestValidations(t *testing.T) { - var ( - taskID = platform.ID(0x7456) - runID = platform.ID(0x402) - otherOrg = &influxdb.Organization{Name: "other_org"} - ) - - svc, onboard := setup(t) - - r, err := onboard.OnboardInitialUser(context.Background(), &influxdb.OnboardingRequest{ - User: "Setec Astronomy", - Password: "too many secrets", - Org: "thing", - Bucket: "holder", - RetentionPeriodSeconds: 1, - }) - if err != nil { - t.Fatal(err) - } - - if err := svc.CreateOrganization(context.Background(), otherOrg); err != nil { - t.Fatal(err) - } - - otherBucket := &influxdb.Bucket{ - Name: "other_bucket", - OrgID: otherOrg.ID, - } - - if err = svc.CreateBucket(context.Background(), otherBucket); err != nil { - t.Fatal(err) - } - - var ( - orgID = r.Org.ID - validTaskService = authorizer.NewTaskService(zaptest.NewLogger(t), mockTaskService(orgID, taskID, runID)) - - // Read all tasks in org. - orgReadAllTaskPermissions = []influxdb.Permission{ - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.TasksResourceType, OrgID: &orgID}}, - } - - // Read all tasks in some other org. - wrongOrgReadAllTaskPermissions = []influxdb.Permission{ - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.TasksResourceType, OrgID: &taskID}}, - } - - // Write all tasks in org, no specific bucket permissions. - orgWriteAllTaskPermissions = []influxdb.Permission{ - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.TasksResourceType, OrgID: &orgID}}, - } - - // Write all tasks in org, and read/write the onboarding bucket. - orgWriteAllTaskBucketPermissions = []influxdb.Permission{ - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.TasksResourceType, OrgID: &orgID}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.BucketsResourceType, OrgID: &orgID, ID: &r.Bucket.ID}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.BucketsResourceType, OrgID: &orgID, ID: &r.Bucket.ID}}, - } - - // Write the specific task, and read/write the onboarding bucket. - orgWriteTaskBucketPermissions = []influxdb.Permission{ - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.TasksResourceType, OrgID: &orgID, ID: &taskID}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.BucketsResourceType, OrgID: &orgID, ID: &r.Bucket.ID}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.BucketsResourceType, OrgID: &orgID, ID: &r.Bucket.ID}}, - } - - // Permission only to specifically write the target task. - orgWriteTaskPermissions = []influxdb.Permission{ - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.TasksResourceType, OrgID: &orgID, ID: &taskID}}, - } - - // Permission only to specifically read the target task. - orgReadTaskPermissions = []influxdb.Permission{ - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.TasksResourceType, OrgID: &orgID, ID: &taskID}}, - } - ) - - tests := []struct { - name string - check func(context.Context, taskmodel.TaskService) error - auth *influxdb.Authorization - }{ - { - name: "create failure", - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, err := svc.CreateTask(ctx, taskmodel.TaskCreate{ - OrganizationID: r.Org.ID, - Flux: `option task = { - name: "my_task", - every: 1s, -} -from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")`, - }) - if err == nil { - return errors.New("failed to error without permission") - } - return nil - }, - auth: &influxdb.Authorization{}, - }, - { - name: "create success", - auth: r.Auth, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, err := svc.CreateTask(ctx, taskmodel.TaskCreate{ - OrganizationID: r.Org.ID, - OwnerID: r.Auth.GetUserID(), - Flux: `option task = { - name: "my_task", - every: 1s, -} -from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")`, - }) - return err - }, - }, - { - name: "FindTaskByID missing auth", - auth: &influxdb.Authorization{Permissions: []influxdb.Permission{}}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, err := svc.FindTaskByID(ctx, taskID) - if err == nil { - return errors.New("returned without error without permission") - } - return nil - }, - }, - { - name: "FindTaskByID with org auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgReadAllTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, err := svc.FindTaskByID(ctx, taskID) - return err - }, - }, - { - name: "FindTaskByID with task auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgReadTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, err := svc.FindTaskByID(ctx, taskID) - return err - }, - }, - { - name: "FindTasks with bad auth", - auth: &influxdb.Authorization{Status: "active", Permissions: wrongOrgReadAllTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - ts, _, err := svc.FindTasks(ctx, taskmodel.TaskFilter{ - OrganizationID: &orgID, - }) - if err == nil && len(ts) > 0 { - return errors.New("returned no error with a invalid auth") - } - return nil - }, - }, - { - name: "FindTasks with org auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgReadAllTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, _, err := svc.FindTasks(ctx, taskmodel.TaskFilter{ - OrganizationID: &orgID, - }) - return err - }, - }, - { - name: "FindTasks with task auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgReadTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, _, err := svc.FindTasks(ctx, taskmodel.TaskFilter{ - OrganizationID: &orgID, - }) - return err - }, - }, - { - name: "FindTasks without org filter", - auth: &influxdb.Authorization{Status: "active", Permissions: orgReadAllTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, _, err := svc.FindTasks(ctx, taskmodel.TaskFilter{}) - return err - }, - }, - { - name: "UpdateTask with readonly auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgReadAllTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - flux := `option task = { - name: "my_task", - every: 1s, -} -from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` - _, err := svc.UpdateTask(ctx, taskID, taskmodel.TaskUpdate{ - Flux: &flux, - }) - if err == nil { - return errors.New("returned no error with a invalid auth") - } - return nil - }, - }, - { - name: "UpdateTask with org auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteAllTaskBucketPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - flux := `option task = { - name: "my_task", - every: 1s, - } - from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` - _, err := svc.UpdateTask(ctx, taskID, taskmodel.TaskUpdate{ - Flux: &flux, - }) - return err - }, - }, - { - name: "UpdateTask with task auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteTaskBucketPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - flux := `option task = { - name: "my_task", - every: 1s, -} -from(bucket:"holder") |> range(start:-5m) |> to(bucket:"holder", org:"thing")` - _, err := svc.UpdateTask(ctx, taskID, taskmodel.TaskUpdate{ - Flux: &flux, - }) - return err - }, - }, - { - name: "DeleteTask missing auth", - auth: &influxdb.Authorization{Permissions: []influxdb.Permission{}}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - err := svc.DeleteTask(ctx, taskID) - if err == nil { - return errors.New("returned without error without permission") - } - return nil - }, - }, - { - name: "DeleteTask readonly auth", - auth: &influxdb.Authorization{Permissions: orgReadAllTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - err := svc.DeleteTask(ctx, taskID) - if err == nil { - return errors.New("returned without error without permission") - } - return nil - }, - }, - { - name: "DeleteTask with org auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteAllTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - err := svc.DeleteTask(ctx, taskID) - return err - }, - }, - { - name: "DeleteTask with task auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - err := svc.DeleteTask(ctx, taskID) - return err - }, - }, - { - name: "FindLogs with bad auth", - auth: &influxdb.Authorization{Status: "active", Permissions: wrongOrgReadAllTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, _, err := svc.FindLogs(ctx, taskmodel.LogFilter{ - Task: taskID, - }) - if err == nil { - return errors.New("returned no error with a invalid auth") - } - return nil - }, - }, - { - name: "FindLogs with org auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgReadAllTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, _, err := svc.FindLogs(ctx, taskmodel.LogFilter{ - Task: taskID, - }) - return err - }, - }, - { - name: "FindLogs with task auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgReadTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, _, err := svc.FindLogs(ctx, taskmodel.LogFilter{ - Task: taskID, - }) - return err - }, - }, - { - name: "FindRuns with bad auth", - auth: &influxdb.Authorization{Status: "active", Permissions: wrongOrgReadAllTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, _, err := svc.FindRuns(ctx, taskmodel.RunFilter{ - Task: taskID, - }) - if err == nil { - return errors.New("returned no error with a invalid auth") - } - return nil - }, - }, - { - name: "FindRuns with org auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgReadAllTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, _, err := svc.FindRuns(ctx, taskmodel.RunFilter{ - Task: taskID, - }) - return err - }, - }, - { - name: "FindRuns with task auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgReadTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, _, err := svc.FindRuns(ctx, taskmodel.RunFilter{ - Task: taskID, - }) - return err - }, - }, - { - name: "FindRunByID missing auth", - auth: &influxdb.Authorization{Permissions: []influxdb.Permission{}}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, err := svc.FindRunByID(ctx, taskID, 10) - if err == nil { - return errors.New("returned without error without permission") - } - return nil - }, - }, - { - name: "FindRunByID with org auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgReadAllTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, err := svc.FindRunByID(ctx, taskID, 10) - return err - }, - }, - { - name: "FindRunByID with task auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgReadTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, err := svc.FindRunByID(ctx, taskID, 10) - return err - }, - }, - { - name: "CancelRun with bad auth", - auth: &influxdb.Authorization{Status: "active", Permissions: wrongOrgReadAllTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - err := svc.CancelRun(ctx, taskID, 10) - if err == nil { - return errors.New("returned no error with a invalid auth") - } - return nil - }, - }, - { - name: "CancelRun with org auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteAllTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - err := svc.CancelRun(ctx, taskID, 10) - return err - }, - }, - { - name: "CancelRun with task auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - err := svc.CancelRun(ctx, taskID, 10) - return err - }, - }, - { - name: "RetryRun with bad auth", - auth: &influxdb.Authorization{Status: "active", Permissions: wrongOrgReadAllTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, err := svc.RetryRun(ctx, taskID, 10) - if err == nil { - return errors.New("returned no error with a invalid auth") - } - return nil - }, - }, - { - name: "RetryRun with org auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteAllTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, err := svc.RetryRun(ctx, taskID, 10) - return err - }, - }, - { - name: "RetryRun with task auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, err := svc.RetryRun(ctx, taskID, 10) - return err - }, - }, - { - name: "ForceRun with bad auth", - auth: &influxdb.Authorization{Status: "active", Permissions: wrongOrgReadAllTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, err := svc.ForceRun(ctx, taskID, 10000) - if err == nil { - return errors.New("returned no error with a invalid auth") - } - return nil - }, - }, - { - name: "ForceRun with org auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteAllTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, err := svc.ForceRun(ctx, taskID, 10000) - return err - }, - }, - { - name: "ForceRun with task auth", - auth: &influxdb.Authorization{Status: "active", Permissions: orgWriteTaskPermissions}, - check: func(ctx context.Context, svc taskmodel.TaskService) error { - _, err := svc.ForceRun(ctx, taskID, 10000) - return err - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - ctx := pctx.SetAuthorizer(context.Background(), test.auth) - if err := test.check(ctx, validTaskService); err != nil { - if aerr, ok := err.(http.AuthzError); ok { - t.Error(aerr.AuthzError()) - } - t.Error(err) - } - }) - } -} - -func setup(t *testing.T) (*tenant.Service, influxdb.OnboardingService) { - t.Helper() - - store := newStore(t) - - svc := tenant.NewService(tenant.NewStore(store)) - - authStore, err := authorization.NewStore(store) - if err != nil { - t.Fatal(err) - } - - authSvc := authorization.NewService(authStore, svc) - - onboard := tenant.NewOnboardService(svc, authSvc) - - return svc, onboard -} - -func newStore(t *testing.T) kv.Store { - t.Helper() - - store := inmem.NewKVStore() - - if err := all.Up(context.Background(), zaptest.NewLogger(t), store); err != nil { - t.Fatal(err) - } - - return store -} diff --git a/authorizer/telegraf.go b/authorizer/telegraf.go deleted file mode 100644 index 614c7eabd95..00000000000 --- a/authorizer/telegraf.go +++ /dev/null @@ -1,80 +0,0 @@ -package authorizer - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.TelegrafConfigStore = (*TelegrafConfigService)(nil) - -// TelegrafConfigService wraps a influxdb.TelegrafConfigStore and authorizes actions -// against it appropriately. -type TelegrafConfigService struct { - s influxdb.TelegrafConfigStore - influxdb.UserResourceMappingService -} - -// NewTelegrafConfigService constructs an instance of an authorizing telegraf service. -func NewTelegrafConfigService(s influxdb.TelegrafConfigStore, urm influxdb.UserResourceMappingService) *TelegrafConfigService { - return &TelegrafConfigService{ - s: s, - UserResourceMappingService: urm, - } -} - -// FindTelegrafConfigByID checks to see if the authorizer on context has read access to the id provided. -func (s *TelegrafConfigService) FindTelegrafConfigByID(ctx context.Context, id platform.ID) (*influxdb.TelegrafConfig, error) { - tc, err := s.s.FindTelegrafConfigByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeRead(ctx, influxdb.TelegrafsResourceType, tc.ID, tc.OrgID); err != nil { - return nil, err - } - return tc, nil -} - -// FindTelegrafConfigs retrieves all telegraf configs that match the provided filter and then filters the list down to only the resources that are authorized. -func (s *TelegrafConfigService) FindTelegrafConfigs(ctx context.Context, filter influxdb.TelegrafConfigFilter, opt ...influxdb.FindOptions) ([]*influxdb.TelegrafConfig, int, error) { - // TODO: we'll likely want to push this operation into the database eventually since fetching the whole list of data - // will likely be expensive. - ts, _, err := s.s.FindTelegrafConfigs(ctx, filter, opt...) - if err != nil { - return nil, 0, err - } - return AuthorizeFindTelegrafs(ctx, ts) -} - -// CreateTelegrafConfig checks to see if the authorizer on context has write access to the global telegraf config resource. -func (s *TelegrafConfigService) CreateTelegrafConfig(ctx context.Context, tc *influxdb.TelegrafConfig, userID platform.ID) error { - if _, _, err := AuthorizeCreate(ctx, influxdb.TelegrafsResourceType, tc.OrgID); err != nil { - return err - } - return s.s.CreateTelegrafConfig(ctx, tc, userID) -} - -// UpdateTelegrafConfig checks to see if the authorizer on context has write access to the telegraf config provided. -func (s *TelegrafConfigService) UpdateTelegrafConfig(ctx context.Context, id platform.ID, upd *influxdb.TelegrafConfig, userID platform.ID) (*influxdb.TelegrafConfig, error) { - tc, err := s.FindTelegrafConfigByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.TelegrafsResourceType, tc.ID, tc.OrgID); err != nil { - return nil, err - } - return s.s.UpdateTelegrafConfig(ctx, id, upd, userID) -} - -// DeleteTelegrafConfig checks to see if the authorizer on context has write access to the telegraf config provided. -func (s *TelegrafConfigService) DeleteTelegrafConfig(ctx context.Context, id platform.ID) error { - tc, err := s.FindTelegrafConfigByID(ctx, id) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.TelegrafsResourceType, tc.ID, tc.OrgID); err != nil { - return err - } - return s.s.DeleteTelegrafConfig(ctx, id) -} diff --git a/authorizer/telegraf_test.go b/authorizer/telegraf_test.go deleted file mode 100644 index 85581e19d82..00000000000 --- a/authorizer/telegraf_test.go +++ /dev/null @@ -1,544 +0,0 @@ -package authorizer_test - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -var telegrafCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.TelegrafConfig) []*influxdb.TelegrafConfig { - out := append([]*influxdb.TelegrafConfig(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -func TestTelegrafConfigStore_FindTelegrafConfigByID(t *testing.T) { - type fields struct { - TelegrafConfigStore influxdb.TelegrafConfigStore - } - type args struct { - permission influxdb.Permission - id platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access id", - fields: fields{ - TelegrafConfigStore: &mock.TelegrafConfigStore{ - FindTelegrafConfigByIDF: func(ctx context.Context, id platform.ID) (*influxdb.TelegrafConfig, error) { - return &influxdb.TelegrafConfig{ - ID: id, - OrgID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.TelegrafsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access id", - fields: fields{ - TelegrafConfigStore: &mock.TelegrafConfigStore{ - FindTelegrafConfigByIDF: func(ctx context.Context, id platform.ID) (*influxdb.TelegrafConfig, error) { - return &influxdb.TelegrafConfig{ - ID: id, - OrgID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.TelegrafsResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - id: 1, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/000000000000000a/telegrafs/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewTelegrafConfigService(tt.fields.TelegrafConfigStore, mock.NewUserResourceMappingService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindTelegrafConfigByID(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestTelegrafConfigStore_FindTelegrafConfigs(t *testing.T) { - type fields struct { - TelegrafConfigStore influxdb.TelegrafConfigStore - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - telegrafs []*influxdb.TelegrafConfig - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all telegrafs", - fields: fields{ - TelegrafConfigStore: &mock.TelegrafConfigStore{ - FindTelegrafConfigsF: func(ctx context.Context, filter influxdb.TelegrafConfigFilter, opt ...influxdb.FindOptions) ([]*influxdb.TelegrafConfig, int, error) { - return []*influxdb.TelegrafConfig{ - { - ID: 1, - OrgID: 10, - }, - { - ID: 2, - OrgID: 10, - }, - { - ID: 3, - OrgID: 11, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.TelegrafsResourceType, - }, - }, - }, - wants: wants{ - telegrafs: []*influxdb.TelegrafConfig{ - { - ID: 1, - OrgID: 10, - }, - { - ID: 2, - OrgID: 10, - }, - { - ID: 3, - OrgID: 11, - }, - }, - }, - }, - { - name: "authorized to access a single orgs telegrafs", - fields: fields{ - TelegrafConfigStore: &mock.TelegrafConfigStore{ - FindTelegrafConfigsF: func(ctx context.Context, filter influxdb.TelegrafConfigFilter, opt ...influxdb.FindOptions) ([]*influxdb.TelegrafConfig, int, error) { - return []*influxdb.TelegrafConfig{ - { - ID: 1, - OrgID: 10, - }, - { - ID: 2, - OrgID: 10, - }, - { - ID: 3, - OrgID: 11, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.TelegrafsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - telegrafs: []*influxdb.TelegrafConfig{ - { - ID: 1, - OrgID: 10, - }, - { - ID: 2, - OrgID: 10, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewTelegrafConfigService(tt.fields.TelegrafConfigStore, mock.NewUserResourceMappingService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - ts, _, err := s.FindTelegrafConfigs(ctx, influxdb.TelegrafConfigFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(ts, tt.wants.telegrafs, telegrafCmpOptions...); diff != "" { - t.Errorf("telegrafs are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestTelegrafConfigStore_UpdateTelegrafConfig(t *testing.T) { - type fields struct { - TelegrafConfigStore influxdb.TelegrafConfigStore - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to update telegraf", - fields: fields{ - TelegrafConfigStore: &mock.TelegrafConfigStore{ - FindTelegrafConfigByIDF: func(ctc context.Context, id platform.ID) (*influxdb.TelegrafConfig, error) { - return &influxdb.TelegrafConfig{ - ID: 1, - OrgID: 10, - }, nil - }, - UpdateTelegrafConfigF: func(ctx context.Context, id platform.ID, upd *influxdb.TelegrafConfig, userID platform.ID) (*influxdb.TelegrafConfig, error) { - return &influxdb.TelegrafConfig{ - ID: 1, - OrgID: 10, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.TelegrafsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.TelegrafsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to update telegraf", - fields: fields{ - TelegrafConfigStore: &mock.TelegrafConfigStore{ - FindTelegrafConfigByIDF: func(ctc context.Context, id platform.ID) (*influxdb.TelegrafConfig, error) { - return &influxdb.TelegrafConfig{ - ID: 1, - OrgID: 10, - }, nil - }, - UpdateTelegrafConfigF: func(ctx context.Context, id platform.ID, upd *influxdb.TelegrafConfig, userID platform.ID) (*influxdb.TelegrafConfig, error) { - return &influxdb.TelegrafConfig{ - ID: 1, - OrgID: 10, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.TelegrafsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/telegrafs/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewTelegrafConfigService(tt.fields.TelegrafConfigStore, mock.NewUserResourceMappingService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - _, err := s.UpdateTelegrafConfig(ctx, tt.args.id, &influxdb.TelegrafConfig{}, platform.ID(1)) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestTelegrafConfigStore_DeleteTelegrafConfig(t *testing.T) { - type fields struct { - TelegrafConfigStore influxdb.TelegrafConfigStore - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete telegraf", - fields: fields{ - TelegrafConfigStore: &mock.TelegrafConfigStore{ - FindTelegrafConfigByIDF: func(ctc context.Context, id platform.ID) (*influxdb.TelegrafConfig, error) { - return &influxdb.TelegrafConfig{ - ID: 1, - OrgID: 10, - }, nil - }, - DeleteTelegrafConfigF: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.TelegrafsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.TelegrafsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete telegraf", - fields: fields{ - TelegrafConfigStore: &mock.TelegrafConfigStore{ - FindTelegrafConfigByIDF: func(ctc context.Context, id platform.ID) (*influxdb.TelegrafConfig, error) { - return &influxdb.TelegrafConfig{ - ID: 1, - OrgID: 10, - }, nil - }, - DeleteTelegrafConfigF: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.TelegrafsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/telegrafs/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewTelegrafConfigService(tt.fields.TelegrafConfigStore, mock.NewUserResourceMappingService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.DeleteTelegrafConfig(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestTelegrafConfigStore_CreateTelegrafConfig(t *testing.T) { - type fields struct { - TelegrafConfigStore influxdb.TelegrafConfigStore - } - type args struct { - permission influxdb.Permission - orgID platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to create telegraf", - fields: fields{ - TelegrafConfigStore: &mock.TelegrafConfigStore{ - CreateTelegrafConfigF: func(ctx context.Context, tc *influxdb.TelegrafConfig, userID platform.ID) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.TelegrafsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to create telegraf", - fields: fields{ - TelegrafConfigStore: &mock.TelegrafConfigStore{ - CreateTelegrafConfigF: func(ctx context.Context, tc *influxdb.TelegrafConfig, userID platform.ID) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.TelegrafsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/telegrafs is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewTelegrafConfigService(tt.fields.TelegrafConfigStore, mock.NewUserResourceMappingService()) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.CreateTelegrafConfig(ctx, &influxdb.TelegrafConfig{OrgID: tt.args.orgID}, platform.ID(1)) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} diff --git a/authorizer/urm.go b/authorizer/urm.go deleted file mode 100644 index b59b0d75ad3..00000000000 --- a/authorizer/urm.go +++ /dev/null @@ -1,65 +0,0 @@ -package authorizer - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -type OrgIDResolver interface { - FindResourceOrganizationID(ctx context.Context, rt influxdb.ResourceType, id platform.ID) (platform.ID, error) -} - -type URMService struct { - s influxdb.UserResourceMappingService - orgIDResolver OrgIDResolver -} - -func NewURMService(orgIDResolver OrgIDResolver, s influxdb.UserResourceMappingService) *URMService { - return &URMService{ - s: s, - orgIDResolver: orgIDResolver, - } -} - -func (s *URMService) FindUserResourceMappings(ctx context.Context, filter influxdb.UserResourceMappingFilter, opt ...influxdb.FindOptions) ([]*influxdb.UserResourceMapping, int, error) { - urms, _, err := s.s.FindUserResourceMappings(ctx, filter, opt...) - if err != nil { - return nil, 0, err - } - return AuthorizeFindUserResourceMappings(ctx, s.orgIDResolver, urms) -} - -func (s *URMService) CreateUserResourceMapping(ctx context.Context, m *influxdb.UserResourceMapping) error { - orgID, err := s.orgIDResolver.FindResourceOrganizationID(ctx, m.ResourceType, m.ResourceID) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, m.ResourceType, m.ResourceID, orgID); err != nil { - return err - } - return s.s.CreateUserResourceMapping(ctx, m) -} - -func (s *URMService) DeleteUserResourceMapping(ctx context.Context, resourceID platform.ID, userID platform.ID) error { - f := influxdb.UserResourceMappingFilter{ResourceID: resourceID, UserID: userID} - urms, _, err := s.s.FindUserResourceMappings(ctx, f) - if err != nil { - return err - } - - for _, urm := range urms { - orgID, err := s.orgIDResolver.FindResourceOrganizationID(ctx, urm.ResourceType, urm.ResourceID) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, urm.ResourceType, urm.ResourceID, orgID); err != nil { - return err - } - if err := s.s.DeleteUserResourceMapping(ctx, urm.ResourceID, urm.UserID); err != nil { - return err - } - } - return nil -} diff --git a/authorizer/urm_test.go b/authorizer/urm_test.go deleted file mode 100644 index 8d2152fde3e..00000000000 --- a/authorizer/urm_test.go +++ /dev/null @@ -1,260 +0,0 @@ -package authorizer_test - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -type OrgService struct { - OrgID platform.ID -} - -func (s *OrgService) FindResourceOrganizationID(ctx context.Context, rt influxdb.ResourceType, id platform.ID) (platform.ID, error) { - return s.OrgID, nil -} - -func TestURMService_FindUserResourceMappings(t *testing.T) { - type fields struct { - UserResourceMappingService influxdb.UserResourceMappingService - OrgService authorizer.OrgIDResolver - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - urms []*influxdb.UserResourceMapping - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all users", - fields: fields{ - OrgService: &OrgService{OrgID: 10}, - UserResourceMappingService: &mock.UserResourceMappingService{ - FindMappingsFn: func(ctx context.Context, filter influxdb.UserResourceMappingFilter) ([]*influxdb.UserResourceMapping, int, error) { - return []*influxdb.UserResourceMapping{ - { - ResourceID: 1, - ResourceType: influxdb.BucketsResourceType, - }, - { - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - { - ResourceID: 3, - ResourceType: influxdb.BucketsResourceType, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - urms: []*influxdb.UserResourceMapping{ - { - ResourceID: 1, - ResourceType: influxdb.BucketsResourceType, - }, - { - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - { - ResourceID: 3, - ResourceType: influxdb.BucketsResourceType, - }, - }, - }, - }, - { - name: "authorized to see all users", - fields: fields{ - OrgService: &OrgService{OrgID: 10}, - UserResourceMappingService: &mock.UserResourceMappingService{ - FindMappingsFn: func(ctx context.Context, filter influxdb.UserResourceMappingFilter) ([]*influxdb.UserResourceMapping, int, error) { - return []*influxdb.UserResourceMapping{ - { - ResourceID: 1, - ResourceType: influxdb.BucketsResourceType, - }, - { - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - { - ResourceID: 3, - ResourceType: influxdb.BucketsResourceType, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(11), - }, - }, - }, - wants: wants{ - urms: []*influxdb.UserResourceMapping{}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewURMService(tt.fields.OrgService, tt.fields.UserResourceMappingService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - urms, _, err := s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(urms, tt.wants.urms); diff != "" { - t.Errorf("urms are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestURMService_WriteUserResourceMapping(t *testing.T) { - type fields struct { - UserResourceMappingService influxdb.UserResourceMappingService - OrgService authorizer.OrgIDResolver - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to write urm", - fields: fields{ - OrgService: &OrgService{OrgID: 10}, - UserResourceMappingService: &mock.UserResourceMappingService{ - CreateMappingFn: func(ctx context.Context, m *influxdb.UserResourceMapping) error { - return nil - }, - DeleteMappingFn: func(ctx context.Context, rid, uid platform.ID) error { - return nil - }, - FindMappingsFn: func(ctx context.Context, filter influxdb.UserResourceMappingFilter) ([]*influxdb.UserResourceMapping, int, error) { - return []*influxdb.UserResourceMapping{ - { - ResourceID: 1, - ResourceType: influxdb.BucketsResourceType, - UserID: 100, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to write urm", - fields: fields{ - OrgService: &OrgService{OrgID: 10}, - UserResourceMappingService: &mock.UserResourceMappingService{ - CreateMappingFn: func(ctx context.Context, m *influxdb.UserResourceMapping) error { - return nil - }, - DeleteMappingFn: func(ctx context.Context, rid, uid platform.ID) error { - return nil - }, - FindMappingsFn: func(ctx context.Context, filter influxdb.UserResourceMappingFilter) ([]*influxdb.UserResourceMapping, int, error) { - return []*influxdb.UserResourceMapping{ - { - ResourceID: 1, - ResourceType: influxdb.BucketsResourceType, - UserID: 100, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(11), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/buckets/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewURMService(tt.fields.OrgService, tt.fields.UserResourceMappingService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - t.Run("create urm", func(t *testing.T) { - err := s.CreateUserResourceMapping(ctx, &influxdb.UserResourceMapping{ResourceType: influxdb.BucketsResourceType, ResourceID: 1}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - - t.Run("delete urm", func(t *testing.T) { - err := s.DeleteUserResourceMapping(ctx, 1, 100) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - - }) - } -} diff --git a/authorizer/user.go b/authorizer/user.go deleted file mode 100644 index 68702212551..00000000000 --- a/authorizer/user.go +++ /dev/null @@ -1,86 +0,0 @@ -package authorizer - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var _ influxdb.UserService = (*UserService)(nil) - -// UserService wraps a influxdb.UserService and authorizes actions -// against it appropriately. -type UserService struct { - s influxdb.UserService -} - -// NewUserService constructs an instance of an authorizing user service. -func NewUserService(s influxdb.UserService) *UserService { - return &UserService{ - s: s, - } -} - -// FindUserByID checks to see if the authorizer on context has read access to the id provided. -func (s *UserService) FindUserByID(ctx context.Context, id platform.ID) (*influxdb.User, error) { - if _, _, err := AuthorizeReadResource(ctx, influxdb.UsersResourceType, id); err != nil { - return nil, err - } - return s.s.FindUserByID(ctx, id) -} - -// FindUser retrieves the user and checks to see if the authorizer on context has read access to the user. -func (s *UserService) FindUser(ctx context.Context, filter influxdb.UserFilter) (*influxdb.User, error) { - u, err := s.s.FindUser(ctx, filter) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeReadResource(ctx, influxdb.UsersResourceType, u.ID); err != nil { - return nil, err - } - return u, nil -} - -// FindUsers retrieves all users that match the provided filter and then filters the list down to only the resources that are authorized. -func (s *UserService) FindUsers(ctx context.Context, filter influxdb.UserFilter, opt ...influxdb.FindOptions) ([]*influxdb.User, int, error) { - // TODO: we'll likely want to push this operation into the database eventually since fetching the whole list of data - // will likely be expensive. - us, _, err := s.s.FindUsers(ctx, filter, opt...) - if err != nil { - return nil, 0, err - } - return AuthorizeFindUsers(ctx, us) -} - -// CreateUser checks to see if the authorizer on context has write access to the global users resource. -func (s *UserService) CreateUser(ctx context.Context, o *influxdb.User) error { - if _, _, err := AuthorizeWriteGlobal(ctx, influxdb.UsersResourceType); err != nil { - return err - } - return s.s.CreateUser(ctx, o) -} - -// UpdateUser checks to see if the authorizer on context has write access to the user provided. -func (s *UserService) UpdateUser(ctx context.Context, id platform.ID, upd influxdb.UserUpdate) (*influxdb.User, error) { - if _, _, err := AuthorizeWriteResource(ctx, influxdb.UsersResourceType, id); err != nil { - return nil, err - } - return s.s.UpdateUser(ctx, id, upd) -} - -// DeleteUser checks to see if the authorizer on context has write access to the user provided. -func (s *UserService) DeleteUser(ctx context.Context, id platform.ID) error { - if _, _, err := AuthorizeWriteResource(ctx, influxdb.UsersResourceType, id); err != nil { - return err - } - return s.s.DeleteUser(ctx, id) -} - -func (s *UserService) FindPermissionForUser(ctx context.Context, uid platform.ID) (influxdb.PermissionSet, error) { - return nil, &errors.Error{ - Code: errors.EInternal, - Msg: "not implemented", - } -} diff --git a/authorizer/user_test.go b/authorizer/user_test.go deleted file mode 100644 index f960adfa916..00000000000 --- a/authorizer/user_test.go +++ /dev/null @@ -1,559 +0,0 @@ -package authorizer_test - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -var userCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.User) []*influxdb.User { - out := append([]*influxdb.User(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -func TestUserService_FindUserByID(t *testing.T) { - type fields struct { - UserService influxdb.UserService - } - type args struct { - permission influxdb.Permission - id platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access id", - fields: fields{ - UserService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ - ID: id, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access id", - fields: fields{ - UserService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ - ID: id, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - id: 1, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:users/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewUserService(tt.fields.UserService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindUserByID(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestUserService_FindUser(t *testing.T) { - type fields struct { - UserService influxdb.UserService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access user", - fields: fields{ - UserService: &mock.UserService{ - FindUserFn: func(ctx context.Context, filter influxdb.UserFilter) (*influxdb.User, error) { - return &influxdb.User{ - ID: 1, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access user", - fields: fields{ - UserService: &mock.UserService{ - FindUserFn: func(ctx context.Context, filter influxdb.UserFilter) (*influxdb.User, error) { - return &influxdb.User{ - ID: 1, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:users/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewUserService(tt.fields.UserService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindUser(ctx, influxdb.UserFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestUserService_FindUsers(t *testing.T) { - type fields struct { - UserService influxdb.UserService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - users []*influxdb.User - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all users", - fields: fields{ - UserService: &mock.UserService{ - FindUsersFn: func(ctx context.Context, filter influxdb.UserFilter, opt ...influxdb.FindOptions) ([]*influxdb.User, int, error) { - return []*influxdb.User{ - { - ID: 1, - }, - { - ID: 2, - }, - { - ID: 3, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - }, - }, - }, - wants: wants{ - users: []*influxdb.User{ - { - ID: 1, - }, - { - ID: 2, - }, - { - ID: 3, - }, - }, - }, - }, - { - name: "authorized to access a single user", - fields: fields{ - UserService: &mock.UserService{ - FindUsersFn: func(ctx context.Context, filter influxdb.UserFilter, opt ...influxdb.FindOptions) ([]*influxdb.User, int, error) { - return []*influxdb.User{ - { - ID: 1, - }, - { - ID: 2, - }, - { - ID: 3, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - wants: wants{ - users: []*influxdb.User{ - { - ID: 2, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewUserService(tt.fields.UserService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - users, _, err := s.FindUsers(ctx, influxdb.UserFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(users, tt.wants.users, userCmpOptions...); diff != "" { - t.Errorf("users are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestUserService_UpdateUser(t *testing.T) { - type fields struct { - UserService influxdb.UserService - } - type args struct { - id platform.ID - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to update user", - fields: fields{ - UserService: &mock.UserService{ - UpdateUserFn: func(ctx context.Context, id platform.ID, upd influxdb.UserUpdate) (*influxdb.User, error) { - return &influxdb.User{ - ID: 1, - }, nil - }, - }, - }, - args: args{ - id: 1, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to update user", - fields: fields{ - UserService: &mock.UserService{ - UpdateUserFn: func(ctx context.Context, id platform.ID, upd influxdb.UserUpdate) (*influxdb.User, error) { - return &influxdb.User{ - ID: 1, - }, nil - }, - }, - }, - args: args{ - id: 1, - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:users/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewUserService(tt.fields.UserService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.UpdateUser(ctx, tt.args.id, influxdb.UserUpdate{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestUserService_DeleteUser(t *testing.T) { - type fields struct { - UserService influxdb.UserService - } - type args struct { - id platform.ID - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete user", - fields: fields{ - UserService: &mock.UserService{ - DeleteUserFn: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete user", - fields: fields{ - UserService: &mock.UserService{ - DeleteUserFn: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:users/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewUserService(tt.fields.UserService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.DeleteUser(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestUserService_CreateUser(t *testing.T) { - type fields struct { - UserService influxdb.UserService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to create user", - fields: fields{ - UserService: &mock.UserService{ - CreateUserFn: func(ctx context.Context, o *influxdb.User) error { - return nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to create user", - fields: fields{ - UserService: &mock.UserService{ - CreateUserFn: func(ctx context.Context, o *influxdb.User) error { - return nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:users is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewUserService(tt.fields.UserService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.CreateUser(ctx, &influxdb.User{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} diff --git a/authorizer/variable.go b/authorizer/variable.go deleted file mode 100644 index b8d2608b2b0..00000000000 --- a/authorizer/variable.go +++ /dev/null @@ -1,90 +0,0 @@ -package authorizer - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.VariableService = (*VariableService)(nil) - -// VariableService wraps a influxdb.VariableService and authorizes actions -// against it appropriately. -type VariableService struct { - s influxdb.VariableService -} - -// NewVariableService constructs an instance of an authorizing variable service. -func NewVariableService(s influxdb.VariableService) *VariableService { - return &VariableService{ - s: s, - } -} - -// FindVariableByID checks to see if the authorizer on context has read access to the id provided. -func (s *VariableService) FindVariableByID(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { - v, err := s.s.FindVariableByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeRead(ctx, influxdb.VariablesResourceType, v.ID, v.OrganizationID); err != nil { - return nil, err - } - return v, nil -} - -// FindVariables retrieves all variables that match the provided filter and then filters the list down to only the resources that are authorized. -func (s *VariableService) FindVariables(ctx context.Context, filter influxdb.VariableFilter, opt ...influxdb.FindOptions) ([]*influxdb.Variable, error) { - // TODO: we'll likely want to push this operation into the database since fetching the whole list of data will likely be expensive. - vs, err := s.s.FindVariables(ctx, filter, opt...) - if err != nil { - return nil, err - } - vs, _, err = AuthorizeFindVariables(ctx, vs) - return vs, err -} - -// CreateVariable checks to see if the authorizer on context has write access to the global variable resource. -func (s *VariableService) CreateVariable(ctx context.Context, v *influxdb.Variable) error { - if _, _, err := AuthorizeCreate(ctx, influxdb.VariablesResourceType, v.OrganizationID); err != nil { - return err - } - return s.s.CreateVariable(ctx, v) -} - -// UpdateVariable checks to see if the authorizer on context has write access to the variable provided. -func (s *VariableService) UpdateVariable(ctx context.Context, id platform.ID, upd *influxdb.VariableUpdate) (*influxdb.Variable, error) { - v, err := s.FindVariableByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.VariablesResourceType, v.ID, v.OrganizationID); err != nil { - return nil, err - } - return s.s.UpdateVariable(ctx, id, upd) -} - -// ReplaceVariable checks to see if the authorizer on context has write access to the variable provided. -func (s *VariableService) ReplaceVariable(ctx context.Context, m *influxdb.Variable) error { - v, err := s.FindVariableByID(ctx, m.ID) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.VariablesResourceType, v.ID, v.OrganizationID); err != nil { - return err - } - return s.s.ReplaceVariable(ctx, m) -} - -// DeleteVariable checks to see if the authorizer on context has write access to the variable provided. -func (s *VariableService) DeleteVariable(ctx context.Context, id platform.ID) error { - v, err := s.FindVariableByID(ctx, id) - if err != nil { - return err - } - if _, _, err := AuthorizeWrite(ctx, influxdb.VariablesResourceType, v.ID, v.OrganizationID); err != nil { - return err - } - return s.s.DeleteVariable(ctx, id) -} diff --git a/authorizer/variable_test.go b/authorizer/variable_test.go deleted file mode 100644 index dbde4ee8497..00000000000 --- a/authorizer/variable_test.go +++ /dev/null @@ -1,656 +0,0 @@ -package authorizer_test - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -var variableCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.Variable) []*influxdb.Variable { - out := append([]*influxdb.Variable(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -func TestVariableService_FindVariableByID(t *testing.T) { - type fields struct { - VariableService influxdb.VariableService - } - type args struct { - permission influxdb.Permission - id platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access id", - fields: fields{ - VariableService: &mock.VariableService{ - FindVariableByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { - return &influxdb.Variable{ - ID: id, - OrganizationID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.VariablesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access id", - fields: fields{ - VariableService: &mock.VariableService{ - FindVariableByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { - return &influxdb.Variable{ - ID: id, - OrganizationID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.VariablesResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - id: 1, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/000000000000000a/variables/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewVariableService(tt.fields.VariableService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindVariableByID(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestVariableService_FindVariables(t *testing.T) { - type fields struct { - VariableService influxdb.VariableService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - variables []*influxdb.Variable - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all variables", - fields: fields{ - VariableService: &mock.VariableService{ - FindVariablesF: func(ctx context.Context, filter influxdb.VariableFilter, opt ...influxdb.FindOptions) ([]*influxdb.Variable, error) { - return []*influxdb.Variable{ - { - ID: 1, - OrganizationID: 10, - }, - { - ID: 2, - OrganizationID: 10, - }, - { - ID: 3, - OrganizationID: 11, - }, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.VariablesResourceType, - }, - }, - }, - wants: wants{ - variables: []*influxdb.Variable{ - { - ID: 1, - OrganizationID: 10, - }, - { - ID: 2, - OrganizationID: 10, - }, - { - ID: 3, - OrganizationID: 11, - }, - }, - }, - }, - { - name: "authorized to access a single orgs variables", - fields: fields{ - VariableService: &mock.VariableService{ - FindVariablesF: func(ctx context.Context, filter influxdb.VariableFilter, opt ...influxdb.FindOptions) ([]*influxdb.Variable, error) { - return []*influxdb.Variable{ - { - ID: 1, - OrganizationID: 10, - }, - { - ID: 2, - OrganizationID: 10, - }, - { - ID: 3, - OrganizationID: 11, - }, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.VariablesResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - variables: []*influxdb.Variable{ - { - ID: 1, - OrganizationID: 10, - }, - { - ID: 2, - OrganizationID: 10, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewVariableService(tt.fields.VariableService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - variables, err := s.FindVariables(ctx, influxdb.VariableFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(variables, tt.wants.variables, variableCmpOptions...); diff != "" { - t.Errorf("variables are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestVariableService_UpdateVariable(t *testing.T) { - type fields struct { - VariableService influxdb.VariableService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to update variable", - fields: fields{ - VariableService: &mock.VariableService{ - FindVariableByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { - return &influxdb.Variable{ - ID: 1, - OrganizationID: 10, - }, nil - }, - UpdateVariableF: func(ctx context.Context, id platform.ID, upd *influxdb.VariableUpdate) (*influxdb.Variable, error) { - return &influxdb.Variable{ - ID: 1, - OrganizationID: 10, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.VariablesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.VariablesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to update variable", - fields: fields{ - VariableService: &mock.VariableService{ - FindVariableByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { - return &influxdb.Variable{ - ID: 1, - OrganizationID: 10, - }, nil - }, - UpdateVariableF: func(ctx context.Context, id platform.ID, upd *influxdb.VariableUpdate) (*influxdb.Variable, error) { - return &influxdb.Variable{ - ID: 1, - OrganizationID: 10, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.VariablesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/variables/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewVariableService(tt.fields.VariableService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - _, err := s.UpdateVariable(ctx, tt.args.id, &influxdb.VariableUpdate{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestVariableService_ReplaceVariable(t *testing.T) { - type fields struct { - VariableService influxdb.VariableService - } - type args struct { - variable influxdb.Variable - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to replace variable", - fields: fields{ - VariableService: &mock.VariableService{ - FindVariableByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { - return &influxdb.Variable{ - ID: 1, - OrganizationID: 10, - }, nil - }, - ReplaceVariableF: func(ctx context.Context, m *influxdb.Variable) error { - return nil - }, - }, - }, - args: args{ - variable: influxdb.Variable{ - ID: 1, - OrganizationID: 10, - Name: "replace", - }, - permissions: []influxdb.Permission{ - { - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.VariablesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.VariablesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to replace variable", - fields: fields{ - VariableService: &mock.VariableService{ - FindVariableByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { - return &influxdb.Variable{ - ID: 1, - OrganizationID: 10, - }, nil - }, - ReplaceVariableF: func(ctx context.Context, m *influxdb.Variable) error { - return nil - }, - }, - }, - args: args{ - variable: influxdb.Variable{ - ID: 1, - OrganizationID: 10, - }, - permissions: []influxdb.Permission{ - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.VariablesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/variables/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewVariableService(tt.fields.VariableService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.ReplaceVariable(ctx, &tt.args.variable) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestVariableService_DeleteVariable(t *testing.T) { - type fields struct { - VariableService influxdb.VariableService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete variable", - fields: fields{ - VariableService: &mock.VariableService{ - FindVariableByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { - return &influxdb.Variable{ - ID: 1, - OrganizationID: 10, - }, nil - }, - DeleteVariableF: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.VariablesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.VariablesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete variable", - fields: fields{ - VariableService: &mock.VariableService{ - FindVariableByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { - return &influxdb.Variable{ - ID: 1, - OrganizationID: 10, - }, nil - }, - DeleteVariableF: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.VariablesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/variables/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewVariableService(tt.fields.VariableService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.DeleteVariable(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestVariableService_CreateVariable(t *testing.T) { - type fields struct { - VariableService influxdb.VariableService - } - type args struct { - permission influxdb.Permission - orgID platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to create variable", - fields: fields{ - VariableService: &mock.VariableService{ - CreateVariableF: func(ctx context.Context, o *influxdb.Variable) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.VariablesResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to create variable", - fields: fields{ - VariableService: &mock.VariableService{ - CreateVariableF: func(ctx context.Context, o *influxdb.Variable) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.VariablesResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/variables is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := authorizer.NewVariableService(tt.fields.VariableService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.CreateVariable(ctx, &influxdb.Variable{OrganizationID: tt.args.orgID}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} diff --git a/authz.go b/authz.go deleted file mode 100644 index 53bd298cfbf..00000000000 --- a/authz.go +++ /dev/null @@ -1,455 +0,0 @@ -package influxdb - -import ( - "errors" - "fmt" - "path" - - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - // ErrAuthorizerNotSupported notes that the provided authorizer is not supported for the action you are trying to perform. - ErrAuthorizerNotSupported = errors.New("your authorizer is not supported, please use *platform.Authorization as authorizer") - // ErrInvalidResourceType notes that the provided resource is invalid - ErrInvalidResourceType = errors.New("unknown resource type for permission") - // ErrInvalidAction notes that the provided action is invalid - ErrInvalidAction = errors.New("unknown action for permission") -) - -// Authorizer will authorize a permission. -type Authorizer interface { - // PermissionSet returns the PermissionSet associated with the authorizer - PermissionSet() (PermissionSet, error) - - // ID returns an identifier used for auditing. - Identifier() platform.ID - - // GetUserID returns the user id. - GetUserID() platform.ID - - // Kind metadata for auditing. - Kind() string -} - -// PermissionAllowed determines if a permission is allowed. -func PermissionAllowed(perm Permission, ps []Permission) bool { - for _, p := range ps { - if p.Matches(perm) { - return true - } - } - return false -} - -// Action is an enum defining all possible resource operations -type Action string - -const ( - // ReadAction is the action for reading. - ReadAction Action = "read" // 1 - // WriteAction is the action for writing. - WriteAction Action = "write" // 2 -) - -var actions = []Action{ - ReadAction, // 1 - WriteAction, // 2 -} - -// Valid checks if the action is a member of the Action enum -func (a Action) Valid() (err error) { - switch a { - case ReadAction: // 1 - case WriteAction: // 2 - default: - err = ErrInvalidAction - } - - return err -} - -// ResourceType is an enum defining all resource types that have a permission model in platform -type ResourceType string - -// Resource is an authorizable resource. -type Resource struct { - Type ResourceType `json:"type"` - ID *platform.ID `json:"id,omitempty"` - OrgID *platform.ID `json:"orgID,omitempty"` -} - -// String stringifies a resource -func (r Resource) String() string { - if r.OrgID != nil && r.ID != nil { - return path.Join(string(OrgsResourceType), r.OrgID.String(), string(r.Type), r.ID.String()) - } - - if r.OrgID != nil { - return path.Join(string(OrgsResourceType), r.OrgID.String(), string(r.Type)) - } - - if r.ID != nil { - return path.Join(string(r.Type), r.ID.String()) - } - - return string(r.Type) -} - -const ( - // AuthorizationsResourceType gives permissions to one or more authorizations. - AuthorizationsResourceType = ResourceType("authorizations") // 0 - // BucketsResourceType gives permissions to one or more buckets. - BucketsResourceType = ResourceType("buckets") // 1 - // DashboardsResourceType gives permissions to one or more dashboards. - DashboardsResourceType = ResourceType("dashboards") // 2 - // OrgsResourceType gives permissions to one or more orgs. - OrgsResourceType = ResourceType("orgs") // 3 - // SourcesResourceType gives permissions to one or more sources. - SourcesResourceType = ResourceType("sources") // 4 - // TasksResourceType gives permissions to one or more tasks. - TasksResourceType = ResourceType("tasks") // 5 - // TelegrafsResourceType type gives permissions to a one or more telegrafs. - TelegrafsResourceType = ResourceType("telegrafs") // 6 - // UsersResourceType gives permissions to one or more users. - UsersResourceType = ResourceType("users") // 7 - // VariablesResourceType gives permission to one or more variables. - VariablesResourceType = ResourceType("variables") // 8 - // ScraperResourceType gives permission to one or more scrapers. - ScraperResourceType = ResourceType("scrapers") // 9 - // SecretsResourceType gives permission to one or more secrets. - SecretsResourceType = ResourceType("secrets") // 10 - // LabelsResourceType gives permission to one or more labels. - LabelsResourceType = ResourceType("labels") // 11 - // ViewsResourceType gives permission to one or more views. - ViewsResourceType = ResourceType("views") // 12 - // DocumentsResourceType gives permission to one or more documents. - DocumentsResourceType = ResourceType("documents") // 13 - // NotificationRuleResourceType gives permission to one or more notificationRules. - NotificationRuleResourceType = ResourceType("notificationRules") // 14 - // NotificationEndpointResourceType gives permission to one or more notificationEndpoints. - NotificationEndpointResourceType = ResourceType("notificationEndpoints") // 15 - // ChecksResourceType gives permission to one or more Checks. - ChecksResourceType = ResourceType("checks") // 16 - // DBRPType gives permission to one or more DBRPs. - DBRPResourceType = ResourceType("dbrp") // 17 - // NotebooksResourceType gives permission to one or more notebooks. - NotebooksResourceType = ResourceType("notebooks") // 18 - // AnnotationsResourceType gives permission to one or more annotations. - AnnotationsResourceType = ResourceType("annotations") // 19 - // RemotesResourceType gives permission to one or more remote connections. - RemotesResourceType = ResourceType("remotes") // 20 - // ReplicationsResourceType gives permission to one or more replications. - ReplicationsResourceType = ResourceType("replications") // 21 - // InstanceResourceType is a special permission that allows ownership of the entire instance (creating orgs/operator tokens/etc) - InstanceResourceType = ResourceType("instance") // 22 -) - -// AllResourceTypes is the list of all known resource types. -var AllResourceTypes = []ResourceType{ - AuthorizationsResourceType, // 0 - BucketsResourceType, // 1 - DashboardsResourceType, // 2 - OrgsResourceType, // 3 - SourcesResourceType, // 4 - TasksResourceType, // 5 - TelegrafsResourceType, // 6 - UsersResourceType, // 7 - VariablesResourceType, // 8 - ScraperResourceType, // 9 - SecretsResourceType, // 10 - LabelsResourceType, // 11 - ViewsResourceType, // 12 - DocumentsResourceType, // 13 - NotificationRuleResourceType, // 14 - NotificationEndpointResourceType, // 15 - ChecksResourceType, // 16 - DBRPResourceType, // 17 - NotebooksResourceType, // 18 - AnnotationsResourceType, // 19 - RemotesResourceType, // 20 - ReplicationsResourceType, // 21 - InstanceResourceType, // 22 - // NOTE: when modifying this list, please update the swagger for components.schemas.Permission resource enum. -} - -// Valid checks if the resource type is a member of the ResourceType enum. -func (r Resource) Valid() (err error) { - return r.Type.Valid() -} - -// Valid checks if the resource type is a member of the ResourceType enum. -func (t ResourceType) Valid() (err error) { - switch t { - case AuthorizationsResourceType: // 0 - case BucketsResourceType: // 1 - case DashboardsResourceType: // 2 - case OrgsResourceType: // 3 - case TasksResourceType: // 4 - case TelegrafsResourceType: // 5 - case SourcesResourceType: // 6 - case UsersResourceType: // 7 - case VariablesResourceType: // 8 - case ScraperResourceType: // 9 - case SecretsResourceType: // 10 - case LabelsResourceType: // 11 - case ViewsResourceType: // 12 - case DocumentsResourceType: // 13 - case NotificationRuleResourceType: // 14 - case NotificationEndpointResourceType: // 15 - case ChecksResourceType: // 16 - case DBRPResourceType: // 17 - case NotebooksResourceType: // 18 - case AnnotationsResourceType: // 19 - case RemotesResourceType: // 20 - case ReplicationsResourceType: // 21 - case InstanceResourceType: // 22 - default: - err = ErrInvalidResourceType - } - - return err -} - -type PermissionSet []Permission - -func (ps PermissionSet) Allowed(p Permission) bool { - return PermissionAllowed(p, ps) -} - -// Permission defines an action and a resource. -type Permission struct { - Action Action `json:"action"` - Resource Resource `json:"resource"` -} - -// Matches returns whether or not one permission matches the other. -func (p Permission) Matches(perm Permission) bool { - return p.matchesV1(perm) -} - -func (p Permission) matchesV1(perm Permission) bool { - if p.Action != perm.Action { - return false - } - - if p.Resource.Type == InstanceResourceType { - return true - } - - if p.Resource.Type != perm.Resource.Type { - return false - } - - if p.Resource.OrgID == nil && p.Resource.ID == nil { - return true - } - - if p.Resource.OrgID != nil && perm.Resource.OrgID != nil && p.Resource.ID != nil && perm.Resource.ID != nil { - if *p.Resource.OrgID != *perm.Resource.OrgID && *p.Resource.ID == *perm.Resource.ID { - fmt.Printf("v1: old match used: p.Resource.OrgID=%s perm.Resource.OrgID=%s p.Resource.ID=%s", - *p.Resource.OrgID, *perm.Resource.OrgID, *p.Resource.ID) - } - } - - if p.Resource.OrgID != nil && p.Resource.ID == nil { - pOrgID := *p.Resource.OrgID - if perm.Resource.OrgID != nil { - permOrgID := *perm.Resource.OrgID - if pOrgID == permOrgID { - return true - } - } - } - - if p.Resource.ID != nil { - pID := *p.Resource.ID - if perm.Resource.ID != nil { - permID := *perm.Resource.ID - if pID == permID { - return true - } - } - } - - return false -} - -func (p Permission) String() string { - return fmt.Sprintf("%s:%s", p.Action, p.Resource) -} - -// Valid checks if there the resource and action provided is known. -func (p *Permission) Valid() error { - if err := p.Resource.Valid(); err != nil { - return &errors2.Error{ - Code: errors2.EInvalid, - Err: err, - Msg: "invalid resource type for permission", - } - } - - if err := p.Action.Valid(); err != nil { - return &errors2.Error{ - Code: errors2.EInvalid, - Err: err, - Msg: "invalid action type for permission", - } - } - - if p.Resource.OrgID != nil && !p.Resource.OrgID.Valid() { - return &errors2.Error{ - Code: errors2.EInvalid, - Err: platform.ErrInvalidID, - Msg: "invalid org id for permission", - } - } - - if p.Resource.ID != nil && !p.Resource.ID.Valid() { - return &errors2.Error{ - Code: errors2.EInvalid, - Err: platform.ErrInvalidID, - Msg: "invalid id for permission", - } - } - - return nil -} - -// NewPermission returns a permission with provided arguments. -func NewPermission(a Action, rt ResourceType, orgID platform.ID) (*Permission, error) { - p := &Permission{ - Action: a, - Resource: Resource{ - Type: rt, - OrgID: &orgID, - }, - } - - return p, p.Valid() -} - -// NewResourcePermission returns a permission with provided arguments. -func NewResourcePermission(a Action, rt ResourceType, rid platform.ID) (*Permission, error) { - p := &Permission{ - Action: a, - Resource: Resource{ - Type: rt, - ID: &rid, - }, - } - - return p, p.Valid() -} - -// NewGlobalPermission constructs a global permission capable of accessing any resource of type rt. -func NewGlobalPermission(a Action, rt ResourceType) (*Permission, error) { - p := &Permission{ - Action: a, - Resource: Resource{ - Type: rt, - }, - } - return p, p.Valid() -} - -// NewPermissionAtID creates a permission with the provided arguments. -func NewPermissionAtID(id platform.ID, a Action, rt ResourceType, orgID platform.ID) (*Permission, error) { - p := &Permission{ - Action: a, - Resource: Resource{ - Type: rt, - OrgID: &orgID, - ID: &id, - }, - } - - return p, p.Valid() -} - -// OperPermissions are the default permissions for those who setup the application. -func OperPermissions() []Permission { - ps := []Permission{} - for _, r := range AllResourceTypes { - // For now, we are only allowing instance permissions when logged in through session auth - // That is handled in user resource mapping - if r == InstanceResourceType { - continue - } - for _, a := range actions { - ps = append(ps, Permission{Action: a, Resource: Resource{Type: r}}) - } - } - - return ps -} - -// ReadAllPermissions represents permission to read all data and metadata. -// Like OperPermissions, but allows read-only users. -func ReadAllPermissions() []Permission { - ps := make([]Permission, len(AllResourceTypes)) - for i, t := range AllResourceTypes { - // For now, we are only allowing instance permissions when logged in through session auth - // That is handled in user resource mapping - if t == InstanceResourceType { - continue - } - ps[i] = Permission{Action: ReadAction, Resource: Resource{Type: t}} - } - return ps -} - -// OwnerPermissions are the default permissions for those who own a resource. -func OwnerPermissions(orgID platform.ID) []Permission { - ps := []Permission{} - for _, r := range AllResourceTypes { - // For now, we are only allowing instance permissions when logged in through session auth - // That is handled in user resource mapping - if r == InstanceResourceType { - continue - } - for _, a := range actions { - if r == OrgsResourceType { - ps = append(ps, Permission{Action: a, Resource: Resource{Type: r, ID: &orgID}}) - continue - } - ps = append(ps, Permission{Action: a, Resource: Resource{Type: r, OrgID: &orgID}}) - } - } - return ps -} - -// MePermissions is the permission to read/write myself. -func MePermissions(userID platform.ID) []Permission { - ps := []Permission{} - for _, a := range actions { - ps = append(ps, Permission{Action: a, Resource: Resource{Type: UsersResourceType, ID: &userID}}) - } - - return ps -} - -// MemberPermissions are the default permissions for those who can see a resource. -func MemberPermissions(orgID platform.ID) []Permission { - ps := []Permission{} - for _, r := range AllResourceTypes { - // For now, we are only allowing instance permissions when logged in through session auth - // That is handled in user resource mapping - if r == InstanceResourceType { - continue - } - if r == OrgsResourceType { - ps = append(ps, Permission{Action: ReadAction, Resource: Resource{Type: r, ID: &orgID}}) - continue - } - ps = append(ps, Permission{Action: ReadAction, Resource: Resource{Type: r, OrgID: &orgID}}) - } - - return ps -} - -// MemberBucketPermission are the default permissions for those who can see a resource. -func MemberBucketPermission(bucketID platform.ID) Permission { - return Permission{Action: ReadAction, Resource: Resource{Type: BucketsResourceType, ID: &bucketID}} -} diff --git a/authz_test.go b/authz_test.go deleted file mode 100644 index f92a9ea37f4..00000000000 --- a/authz_test.go +++ /dev/null @@ -1,440 +0,0 @@ -package influxdb_test - -import ( - "testing" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -func TestAuthorizer_PermissionAllowed(t *testing.T) { - tests := []struct { - name string - permission platform.Permission - permissions []platform.Permission - allowed bool - }{ - { - name: "global permission", - permission: platform.Permission{ - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - ID: influxdbtesting.IDPtr(1), - }, - }, - permissions: []platform.Permission{ - { - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - }, - }, - }, - allowed: true, - }, - { - name: "bad org id in permission", - permission: platform.Permission{ - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(0), - ID: influxdbtesting.IDPtr(0), - }, - }, - permissions: []platform.Permission{ - { - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - allowed: false, - }, - { - name: "bad resource id in permission", - permission: platform.Permission{ - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - ID: influxdbtesting.IDPtr(0), - }, - }, - permissions: []platform.Permission{ - { - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - allowed: false, - }, - { - name: "bad resource id in permissions", - permission: platform.Permission{ - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - ID: influxdbtesting.IDPtr(1), - }, - }, - permissions: []platform.Permission{ - { - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - ID: influxdbtesting.IDPtr(0), - }, - }, - }, - allowed: false, - }, - { - name: "matching action resource and ID", - permission: platform.Permission{ - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - ID: influxdbtesting.IDPtr(1), - }, - }, - permissions: []platform.Permission{ - { - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - allowed: true, - }, - { - name: "matching action resource with total", - permission: platform.Permission{ - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - ID: influxdbtesting.IDPtr(1), - }, - }, - permissions: []platform.Permission{ - { - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - }, - allowed: true, - }, - { - name: "matching action resource no ID", - permission: platform.Permission{ - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - permissions: []platform.Permission{ - { - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - }, - allowed: true, - }, - { - name: "matching action resource differing ID", - permission: platform.Permission{ - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - ID: influxdbtesting.IDPtr(1), - }, - }, - permissions: []platform.Permission{ - { - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - allowed: false, - }, - { - name: "differing action same resource", - permission: platform.Permission{ - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - ID: influxdbtesting.IDPtr(1), - }, - }, - permissions: []platform.Permission{ - { - Action: platform.ReadAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - allowed: false, - }, - { - name: "same action differing resource", - permission: platform.Permission{ - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - ID: influxdbtesting.IDPtr(1), - }, - }, - permissions: []platform.Permission{ - { - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.TasksResourceType, - OrgID: influxdbtesting.IDPtr(1), - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - allowed: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - allowed := platform.PermissionAllowed(tt.permission, tt.permissions) - if allowed != tt.allowed { - t.Errorf("got allowed = %v, expected allowed = %v", allowed, tt.allowed) - } - }) - } -} - -func TestPermission_Valid(t *testing.T) { - type fields struct { - Action platform.Action - Resource platform.Resource - } - tests := []struct { - name string - fields fields - wantErr bool - }{ - { - name: "valid bucket permission with ID", - fields: fields{ - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - ID: validID(), - OrgID: influxdbtesting.IDPtr(1), - }, - }, - }, - { - name: "valid bucket permission with nil ID", - fields: fields{ - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - ID: nil, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - }, - { - name: "invalid bucket permission with an invalid ID", - fields: fields{ - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - ID: func() *platform2.ID { id := platform2.InvalidID(); return &id }(), - OrgID: influxdbtesting.IDPtr(1), - }, - }, - wantErr: true, - }, - { - name: "invalid permission without an action", - fields: fields{ - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - wantErr: true, - }, - { - name: "invalid permission without a resource", - fields: fields{ - Action: platform.WriteAction, - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - p := &platform.Permission{ - Action: tt.fields.Action, - Resource: tt.fields.Resource, - } - if err := p.Valid(); (err != nil) != tt.wantErr { - t.Errorf("Permission.Valid() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestPermissionAllResources_Valid(t *testing.T) { - var resources = []platform.ResourceType{ - platform.UsersResourceType, - platform.OrgsResourceType, - platform.TasksResourceType, - platform.BucketsResourceType, - platform.DashboardsResourceType, - platform.SourcesResourceType, - platform.NotebooksResourceType, - platform.AnnotationsResourceType, - } - - for _, rt := range resources { - p := &platform.Permission{ - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: rt, - ID: influxdbtesting.IDPtr(1), - }, - } - - if err := p.Valid(); err != nil { - t.Errorf("PermissionAllResources.Valid() error = %v", err) - } - } -} - -func TestPermissionAllActions(t *testing.T) { - var actions = []platform.Action{ - platform.ReadAction, - platform.WriteAction, - } - - for _, a := range actions { - p := &platform.Permission{ - Action: a, - Resource: platform.Resource{ - Type: platform.TasksResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - } - - if err := p.Valid(); err != nil { - t.Errorf("PermissionAllActions.Valid() error = %v", err) - } - } -} - -func TestPermission_String(t *testing.T) { - type fields struct { - Action platform.Action - Resource platform.Resource - Name *string - } - tests := []struct { - name string - fields fields - want string - }{ - { - name: "valid permission with no id", - fields: fields{ - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - want: `write:orgs/0000000000000001/buckets`, - }, - { - name: "valid permission with an id", - fields: fields{ - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - ID: validID(), - }, - }, - want: `write:orgs/0000000000000001/buckets/0000000000000064`, - }, - { - name: "valid permission with no id or org id", - fields: fields{ - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - }, - }, - want: `write:buckets`, - }, - { - name: "valid permission with no org id", - fields: fields{ - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - want: `write:buckets/0000000000000001`, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - p := platform.Permission{ - Action: tt.fields.Action, - Resource: tt.fields.Resource, - } - if got := p.String(); got != tt.want { - t.Errorf("Permission.String() = %v, want %v", got, tt.want) - } - }) - } -} - -func validID() *platform2.ID { - id := platform2.ID(100) - return &id -} diff --git a/backup.go b/backup.go deleted file mode 100644 index c5f4af6b708..00000000000 --- a/backup.go +++ /dev/null @@ -1,152 +0,0 @@ -package influxdb - -import ( - "context" - "io" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" -) - -const ( - BackupFilenamePattern = "20060102T150405Z" -) - -// BackupService represents the data backup functions of InfluxDB. -type BackupService interface { - // BackupKVStore creates a live backup copy of the metadata database. - BackupKVStore(ctx context.Context, w io.Writer) error - - // BackupShard downloads a backup file for a single shard. - BackupShard(ctx context.Context, w io.Writer, shardID uint64, since time.Time) error - - // RLockKVStore locks the database. - RLockKVStore() - - // RUnlockKVStore unlocks the database. - RUnlockKVStore() -} - -// SqlBackupRestoreService represents the backup and restore functions for the sqlite database. -type SqlBackupRestoreService interface { - // BackupSqlStore creates a live backup copy of the sqlite database. - BackupSqlStore(ctx context.Context, w io.Writer) error - - // RestoreSqlStore restores & replaces the sqlite database. - RestoreSqlStore(ctx context.Context, r io.Reader) error - - // RLockSqlStore takes a read lock on the database - RLockSqlStore() - - // RUnlockSqlStore releases a previously-taken read lock on the database. - RUnlockSqlStore() -} - -type BucketManifestWriter interface { - WriteManifest(ctx context.Context, w io.Writer) error -} - -// RestoreService represents the data restore functions of InfluxDB. -type RestoreService interface { - // RestoreKVStore restores & replaces metadata database. - RestoreKVStore(ctx context.Context, r io.Reader) error - - // RestoreBucket restores storage metadata for a bucket. - // TODO(danmoran): As far as I can tell, dbInfo is typed as a []byte because typing it as - // a meta.DatabaseInfo introduces a circular dependency between the root package and `meta`. - // We should refactor to make this signature easier to use. It might be easier to wait - // until we're ready to delete the 2.0.x restore APIs before refactoring. - RestoreBucket(ctx context.Context, id platform.ID, dbInfo []byte) (shardIDMap map[uint64]uint64, err error) - - // RestoreShard uploads a backup file for a single shard. - RestoreShard(ctx context.Context, shardID uint64, r io.Reader) error -} - -// BucketMetadataManifest contains the information about a bucket for backup purposes. -// It is composed of various nested structs below. -type BucketMetadataManifest struct { - OrganizationID platform.ID `json:"organizationID"` - OrganizationName string `json:"organizationName"` - BucketID platform.ID `json:"bucketID"` - BucketName string `json:"bucketName"` - Description *string `json:"description,omitempty"` - DefaultRetentionPolicy string `json:"defaultRetentionPolicy"` - RetentionPolicies []RetentionPolicyManifest `json:"retentionPolicies"` -} - -type RetentionPolicyManifest struct { - Name string `json:"name"` - ReplicaN int `json:"replicaN"` - Duration time.Duration `json:"duration"` - ShardGroupDuration time.Duration `json:"shardGroupDuration"` - ShardGroups []ShardGroupManifest `json:"shardGroups"` - Subscriptions []SubscriptionManifest `json:"subscriptions"` -} - -type ShardGroupManifest struct { - ID uint64 `json:"id"` - StartTime time.Time `json:"startTime"` - EndTime time.Time `json:"endTime"` - DeletedAt *time.Time `json:"deletedAt,omitempty"` // use pointer to time.Time so that omitempty works - TruncatedAt *time.Time `json:"truncatedAt,omitempty"` // use pointer to time.Time so that omitempty works - Shards []ShardManifest `json:"shards"` -} - -type ShardManifest struct { - ID uint64 `json:"id"` - ShardOwners []ShardOwner `json:"shardOwners"` -} - -type ShardOwner struct { - NodeID uint64 `json:"nodeID"` -} - -type SubscriptionManifest struct { - Name string `json:"name"` - Mode string `json:"mode"` - Destinations []string `json:"destinations"` -} - -// Manifest lists the KV and shard file information contained in the backup. -type Manifest struct { - KV ManifestKVEntry `json:"kv"` - Files []ManifestEntry `json:"files"` -} - -// ManifestEntry contains the data information for a backed up shard. -type ManifestEntry struct { - OrganizationID string `json:"organizationID"` - OrganizationName string `json:"organizationName"` - BucketID string `json:"bucketID"` - BucketName string `json:"bucketName"` - ShardID uint64 `json:"shardID"` - FileName string `json:"fileName"` - Size int64 `json:"size"` - LastModified time.Time `json:"lastModified"` -} - -// ManifestKVEntry contains the KV store information for a backup. -type ManifestKVEntry struct { - FileName string `json:"fileName"` - Size int64 `json:"size"` -} - -type RestoredBucketMappings struct { - ID platform.ID `json:"id"` - Name string `json:"name"` - ShardMappings []RestoredShardMapping `json:"shardMappings"` -} - -type RestoredShardMapping struct { - OldId uint64 `json:"oldId"` - NewId uint64 `json:"newId"` -} - -// Size returns the size of the manifest. -func (m *Manifest) Size() int64 { - n := m.KV.Size - for _, f := range m.Files { - n += f.Size - } - return n -} diff --git a/backup/backup.go b/backup/backup.go deleted file mode 100644 index 94cc4a9b58a..00000000000 --- a/backup/backup.go +++ /dev/null @@ -1,143 +0,0 @@ -package backup - -import ( - "context" - "encoding/json" - "io" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/tenant" - "github.com/influxdata/influxdb/v2/v1/services/meta" -) - -type BucketManifestWriter struct { - ts *tenant.Service - mc *meta.Client -} - -func NewBucketManifestWriter(ts *tenant.Service, mc *meta.Client) BucketManifestWriter { - return BucketManifestWriter{ - ts: ts, - mc: mc, - } -} - -// WriteManifest writes a bucket manifest describing all of the buckets that exist in the database. -// It is intended to be used to write to an HTTP response after appropriate measures have been taken -// to ensure that the request is authorized. -func (b BucketManifestWriter) WriteManifest(ctx context.Context, w io.Writer) error { - bkts, _, err := b.ts.FindBuckets(ctx, influxdb.BucketFilter{}) - if err != nil { - return err - } - - l := make([]influxdb.BucketMetadataManifest, 0, len(bkts)) - - for _, bkt := range bkts { - org, err := b.ts.OrganizationService.FindOrganizationByID(ctx, bkt.OrgID) - if err != nil { - return err - } - - dbInfo := b.mc.Database(bkt.ID.String()) - - var description *string - if bkt.Description != "" { - description = &bkt.Description - } - - l = append(l, influxdb.BucketMetadataManifest{ - OrganizationID: bkt.OrgID, - OrganizationName: org.Name, - BucketID: bkt.ID, - BucketName: bkt.Name, - Description: description, - DefaultRetentionPolicy: dbInfo.DefaultRetentionPolicy, - RetentionPolicies: retentionPolicyToManifest(dbInfo.RetentionPolicies), - }) - } - - return json.NewEncoder(w).Encode(&l) -} - -// retentionPolicyToManifest and the various similar functions that follow are for converting -// from the structs in the meta package to the manifest structs -func retentionPolicyToManifest(meta []meta.RetentionPolicyInfo) []influxdb.RetentionPolicyManifest { - r := make([]influxdb.RetentionPolicyManifest, 0, len(meta)) - - for _, m := range meta { - r = append(r, influxdb.RetentionPolicyManifest{ - Name: m.Name, - ReplicaN: m.ReplicaN, - Duration: m.Duration, - ShardGroupDuration: m.ShardGroupDuration, - ShardGroups: shardGroupToManifest(m.ShardGroups), - Subscriptions: subscriptionInfosToManifest(m.Subscriptions), - }) - } - - return r -} - -func subscriptionInfosToManifest(subInfos []meta.SubscriptionInfo) []influxdb.SubscriptionManifest { - r := make([]influxdb.SubscriptionManifest, 0, len(subInfos)) - - for _, s := range subInfos { - r = append(r, influxdb.SubscriptionManifest(s)) - } - - return r -} - -func shardGroupToManifest(shardGroups []meta.ShardGroupInfo) []influxdb.ShardGroupManifest { - r := make([]influxdb.ShardGroupManifest, 0, len(shardGroups)) - - for _, s := range shardGroups { - deletedAt := &s.DeletedAt - truncatedAt := &s.TruncatedAt - - // set deletedAt and truncatedAt to nil rather than their zero values so that the fields - // can be properly omitted from the JSON response if they are empty - if deletedAt.IsZero() { - deletedAt = nil - } - - if truncatedAt.IsZero() { - truncatedAt = nil - } - - r = append(r, influxdb.ShardGroupManifest{ - ID: s.ID, - StartTime: s.StartTime, - EndTime: s.EndTime, - DeletedAt: deletedAt, - TruncatedAt: truncatedAt, - Shards: shardInfosToManifest(s.Shards), - }) - } - - return r -} - -func shardInfosToManifest(shards []meta.ShardInfo) []influxdb.ShardManifest { - r := make([]influxdb.ShardManifest, 0, len(shards)) - - for _, s := range shards { - r = append(r, influxdb.ShardManifest{ - ID: s.ID, - ShardOwners: shardOwnersToManifest(s.Owners), - }) - } - - return r -} - -func shardOwnersToManifest(shardOwners []meta.ShardOwner) []influxdb.ShardOwner { - r := make([]influxdb.ShardOwner, 0, len(shardOwners)) - - for _, s := range shardOwners { - r = append(r, influxdb.ShardOwner(s)) - } - - return r -} diff --git a/bolt/bbolt.go b/bolt/bbolt.go deleted file mode 100644 index 9d6c8bdace3..00000000000 --- a/bolt/bbolt.go +++ /dev/null @@ -1,129 +0,0 @@ -package bolt - -import ( - "context" - "fmt" - "os" - "path/filepath" - "time" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/rand" - "github.com/influxdata/influxdb/v2/snowflake" - bolt "go.etcd.io/bbolt" - "go.uber.org/zap" -) - -const DefaultFilename = "influxd.bolt" - -// Client is a client for the boltDB data store. -type Client struct { - Path string - db *bolt.DB - log *zap.Logger - - IDGenerator platform2.IDGenerator - TokenGenerator platform.TokenGenerator - platform.TimeGenerator - - pluginsCollector *pluginMetricsCollector -} - -// NewClient returns an instance of a Client. -func NewClient(log *zap.Logger) *Client { - return &Client{ - log: log, - IDGenerator: snowflake.NewIDGenerator(), - TokenGenerator: rand.NewTokenGenerator(64), - TimeGenerator: platform.RealTimeGenerator{}, - // Refresh telegraf plugin metrics every hour. - pluginsCollector: NewPluginMetricsCollector(time.Minute * 59), - } -} - -// DB returns the clients DB. -func (c *Client) DB() *bolt.DB { - return c.db -} - -// Open / create boltDB file. -func (c *Client) Open(ctx context.Context) error { - // Ensure the required directory structure exists. - if err := os.MkdirAll(filepath.Dir(c.Path), 0700); err != nil { - return fmt.Errorf("unable to create directory %s: %v", c.Path, err) - } - - if _, err := os.Stat(c.Path); err != nil && !os.IsNotExist(err) { - return err - } - - // Open database file. - db, err := bolt.Open(c.Path, 0600, &bolt.Options{Timeout: 1 * time.Second}) - if err != nil { - // Hack to give a slightly nicer error message for a known failure mode when bolt calls - // mmap on a file system that doesn't support the MAP_SHARED option. - // - // See: https://github.com/boltdb/bolt/issues/272 - // See: https://stackoverflow.com/a/18421071 - if err.Error() == "invalid argument" { - return fmt.Errorf("unable to open boltdb: mmap of %q may not support the MAP_SHARED option", c.Path) - } - - return fmt.Errorf("unable to open boltdb: %w", err) - } - c.db = db - - if err := c.initialize(ctx); err != nil { - return err - } - - c.pluginsCollector.Open(c.db) - - c.log.Info("Resources opened", zap.String("path", c.Path)) - return nil -} - -// initialize creates Buckets that are missing -func (c *Client) initialize(ctx context.Context) error { - if err := c.db.Update(func(tx *bolt.Tx) error { - // Always create ID bucket. - // TODO: is this still needed? - if err := c.initializeID(tx); err != nil { - return err - } - - // TODO: make card to normalize everything under kv? - bkts := [][]byte{ - authorizationBucket, - bucketBucket, - dashboardBucket, - organizationBucket, - scraperBucket, - telegrafBucket, - telegrafPluginsBucket, - remoteBucket, - replicationBucket, - userBucket, - } - for _, bktName := range bkts { - if _, err := tx.CreateBucketIfNotExists(bktName); err != nil { - return err - } - } - return nil - }); err != nil { - return err - } - - return nil -} - -// Close the connection to the bolt database -func (c *Client) Close() error { - c.pluginsCollector.Close() - if c.db != nil { - return c.db.Close() - } - return nil -} diff --git a/bolt/bbolt_test.go b/bolt/bbolt_test.go deleted file mode 100644 index 951e8fed1c5..00000000000 --- a/bolt/bbolt_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package bolt_test - -import ( - "context" - "errors" - "os" - "path/filepath" - "testing" - - "github.com/influxdata/influxdb/v2/bolt" - "go.uber.org/zap/zaptest" -) - -func NewTestClient(t *testing.T) (*bolt.Client, func(), error) { - c, closeFn, err := newTestClient(t) - if err != nil { - return nil, nil, err - } - if err := c.Open(context.Background()); err != nil { - return nil, nil, err - } - - return c, closeFn, nil -} - -func newTestClient(t *testing.T) (*bolt.Client, func(), error) { - c := bolt.NewClient(zaptest.NewLogger(t)) - - f, err := os.CreateTemp("", "influxdata-platform-bolt-") - if err != nil { - return nil, nil, errors.New("unable to open temporary boltdb file") - } - f.Close() - - c.Path = f.Name() - - close := func() { - c.Close() - os.Remove(c.Path) - } - - return c, close, nil -} - -func TestClientOpen(t *testing.T) { - tempDir := t.TempDir() - - boltFile := filepath.Join(tempDir, "test", "bolt.db") - - c := bolt.NewClient(zaptest.NewLogger(t)) - c.Path = boltFile - - if err := c.Open(context.Background()); err != nil { - t.Fatalf("unable to create database %s: %v", boltFile, err) - } - - if err := c.Close(); err != nil { - t.Fatalf("unable to close database %s: %v", boltFile, err) - } -} - -func NewTestKVStore(t *testing.T) (*bolt.KVStore, func(), error) { - f, err := os.CreateTemp("", "influxdata-platform-bolt-") - if err != nil { - return nil, nil, errors.New("unable to open temporary boltdb file") - } - f.Close() - - path := f.Name() - s := bolt.NewKVStore(zaptest.NewLogger(t), path, bolt.WithNoSync) - if err := s.Open(context.TODO()); err != nil { - return nil, nil, err - } - - close := func() { - s.Close() - os.Remove(path) - } - - return s, close, nil -} diff --git a/bolt/id.go b/bolt/id.go deleted file mode 100644 index a6ad52f4548..00000000000 --- a/bolt/id.go +++ /dev/null @@ -1,90 +0,0 @@ -package bolt - -import ( - "errors" - "fmt" - "math/rand" - - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - bolt "go.etcd.io/bbolt" - "go.uber.org/zap" -) - -var ( - idsBucket = []byte("idsv1") - idKey = []byte("id") - errIDNotFound = errors.New("source not found") -) - -var _ platform2.IDGenerator = (*Client)(nil) - -func (c *Client) initializeID(tx *bolt.Tx) error { - if _, err := tx.CreateBucketIfNotExists(idsBucket); err != nil { - return err - } - - _, err := c.getID(tx) - if err != nil && err != errIDNotFound { - return err - } - - if err == errIDNotFound { - if err := c.generateID(tx); err != nil { - return err - } - } - - return nil -} - -// ID retrieves the unique ID for this influx instance. -func (c *Client) ID() platform2.ID { - // if any error occurs return a random number - id := platform2.ID(rand.Int63()) - err := c.db.View(func(tx *bolt.Tx) error { - val, err := c.getID(tx) - if err != nil { - return err - } - - id = val - return nil - }) - - if err != nil { - c.log.Error("Unable to load id", zap.Error(err)) - } - - return id -} - -func (c *Client) getID(tx *bolt.Tx) (platform2.ID, error) { - v := tx.Bucket(idsBucket).Get(idKey) - if len(v) == 0 { - return platform2.InvalidID(), errIDNotFound - } - return decodeID(v) -} - -func decodeID(val []byte) (platform2.ID, error) { - if len(val) < platform2.IDLength { - // This should not happen. - return platform2.InvalidID(), fmt.Errorf("provided value is too short to contain an ID. Please report this error") - } - - var id platform2.ID - if err := id.Decode(val[:platform2.IDLength]); err != nil { - return platform2.InvalidID(), err - } - return id, nil -} - -func (c *Client) generateID(tx *bolt.Tx) error { - id := c.IDGenerator.ID() - encodedID, err := id.Encode() - if err != nil { - return err - } - - return tx.Bucket(idsBucket).Put(idKey, encodedID) -} diff --git a/bolt/id_test.go b/bolt/id_test.go deleted file mode 100644 index 017993736b6..00000000000 --- a/bolt/id_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package bolt_test - -import ( - "context" - "testing" - - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/mock" -) - -func TestID(t *testing.T) { - c, closeFn, err := newTestClient(t) - if err != nil { - t.Fatalf("failed to create new bolt client: %v", err) - } - defer closeFn() - - testID := platform2.ID(70000) - c.IDGenerator = mock.NewIDGenerator(testID.String(), t) - - if err := c.Open(context.Background()); err != nil { - t.Fatalf("failed to open bolt client: %v", err) - } - - if got, want := c.ID(), testID; got != want { - t.Errorf("Client.ID() = %v, want %v", got, want) - } -} diff --git a/bolt/kv.go b/bolt/kv.go deleted file mode 100644 index 85f99f08282..00000000000 --- a/bolt/kv.go +++ /dev/null @@ -1,531 +0,0 @@ -package bolt - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "sync" - "time" - - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/pkg/fs" - bolt "go.etcd.io/bbolt" - "go.uber.org/zap" -) - -// check that *KVStore implement kv.SchemaStore interface. -var _ kv.SchemaStore = (*KVStore)(nil) - -// KVStore is a kv.Store backed by boltdb. -type KVStore struct { - path string - mu sync.RWMutex - db *bolt.DB - log *zap.Logger - - noSync bool -} - -type KVOption func(*KVStore) - -// WithNoSync WARNING: this is useful for tests only -// this skips fsyncing on every commit to improve -// write performance in exchange for no guarantees -// that the db will persist. -func WithNoSync(s *KVStore) { - s.noSync = true -} - -// NewKVStore returns an instance of KVStore with the file at -// the provided path. -func NewKVStore(log *zap.Logger, path string, opts ...KVOption) *KVStore { - store := &KVStore{ - path: path, - log: log, - } - - for _, opt := range opts { - opt(store) - } - - return store -} - -// tempPath returns the path to the temporary file used by Restore(). -func (s *KVStore) tempPath() string { - return s.path + ".tmp" -} - -// Open creates boltDB file it doesn't exists and opens it otherwise. -func (s *KVStore) Open(ctx context.Context) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - // Ensure the required directory structure exists. - if err := os.MkdirAll(filepath.Dir(s.path), 0700); err != nil { - return fmt.Errorf("unable to create directory %s: %v", s.path, err) - } - - if _, err := os.Stat(s.path); err != nil && !os.IsNotExist(err) { - return err - } - - // Remove any temporary file created during a failed restore. - if err := os.Remove(s.tempPath()); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("unable to remove boltdb partial restore file: %w", err) - } - - // Open database file. - if err := s.openDB(); err != nil { - return fmt.Errorf("unable to open boltdb file %v", err) - } - - s.log.Info("Resources opened", zap.String("path", s.path)) - return nil -} - -func (s *KVStore) openDB() (err error) { - if s.db, err = bolt.Open(s.path, 0600, &bolt.Options{Timeout: 1 * time.Second}); err != nil { - return fmt.Errorf("unable to open boltdb file %v", err) - } - s.db.NoSync = s.noSync - return nil -} - -// Close the connection to the bolt database -func (s *KVStore) Close() error { - if db := s.DB(); db != nil { - return db.Close() - } - return nil -} - -func (s *KVStore) RLock() { - s.mu.RLock() -} - -func (s *KVStore) RUnlock() { - s.mu.RUnlock() -} - -// DB returns a reference to the current Bolt database. -func (s *KVStore) DB() *bolt.DB { - s.mu.RLock() - defer s.mu.RUnlock() - return s.db -} - -// Flush removes all bolt keys within each bucket. -func (s *KVStore) Flush(ctx context.Context) { - _ = s.DB().Update( - func(tx *bolt.Tx) error { - return tx.ForEach(func(name []byte, b *bolt.Bucket) error { - s.cleanBucket(tx, b) - return nil - }) - }, - ) -} - -func (s *KVStore) cleanBucket(tx *bolt.Tx, b *bolt.Bucket) { - // nested bucket recursion base case: - if b == nil { - return - } - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - _ = v - if err := c.Delete(); err != nil { - // clean out nexted buckets - s.cleanBucket(tx, b.Bucket(k)) - } - } -} - -// WithDB sets the boltdb on the store. -func (s *KVStore) WithDB(db *bolt.DB) { - s.mu.Lock() - defer s.mu.Unlock() - s.db = db -} - -// View opens up a view transaction against the store. -func (s *KVStore) View(ctx context.Context, fn func(tx kv.Tx) error) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - return s.DB().View(func(tx *bolt.Tx) error { - return fn(&Tx{ - tx: tx, - ctx: ctx, - }) - }) -} - -// Update opens up an update transaction against the store. -func (s *KVStore) Update(ctx context.Context, fn func(tx kv.Tx) error) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - return s.DB().Update(func(tx *bolt.Tx) error { - return fn(&Tx{ - tx: tx, - ctx: ctx, - }) - }) -} - -// CreateBucket creates a bucket in the underlying boltdb store if it -// does not already exist -func (s *KVStore) CreateBucket(ctx context.Context, name []byte) error { - return s.DB().Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(name) - return err - }) -} - -// DeleteBucket creates a bucket in the underlying boltdb store if it -// does not already exist -func (s *KVStore) DeleteBucket(ctx context.Context, name []byte) error { - return s.DB().Update(func(tx *bolt.Tx) error { - if err := tx.DeleteBucket(name); err != nil && !errors.Is(err, bolt.ErrBucketNotFound) { - return err - } - - return nil - }) -} - -// Backup copies all K:Vs to a writer, in BoltDB format. -func (s *KVStore) Backup(ctx context.Context, w io.Writer) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - return s.DB().View(func(tx *bolt.Tx) error { - _, err := tx.WriteTo(w) - return err - }) -} - -// Restore replaces the underlying database with the data from r. -func (s *KVStore) Restore(ctx context.Context, r io.Reader) error { - if err := func() error { - f, err := os.Create(s.tempPath()) - if err != nil { - return err - } - defer f.Close() - - if _, err := io.Copy(f, r); err != nil { - return err - } else if err := f.Sync(); err != nil { - return err - } else if err := f.Close(); err != nil { - return err - } - - // Run the migrations on the restored database prior to swapping it in. - if err := s.migrateRestored(ctx); err != nil { - return err - } - - // Swap and reopen under lock. - s.mu.Lock() - defer s.mu.Unlock() - - if err := s.db.Close(); err != nil { - return err - } - - // Atomically swap temporary file with current DB file. - if err := fs.RenameFileWithReplacement(s.tempPath(), s.path); err != nil { - return err - } - - // Reopen with new database file. - return s.openDB() - }(); err != nil { - os.Remove(s.tempPath()) // clean up on error - return err - } - return nil -} - -// migrateRestored opens the database at the temporary path and applies the -// migrations to it. The database at the temporary path is closed after the -// migrations are complete. This should be used as part of the restore -// operation, prior to swapping the restored database with the active database. -func (s *KVStore) migrateRestored(ctx context.Context) error { - restoredClient := NewClient(s.log.With(zap.String("service", "restored bolt"))) - restoredClient.Path = s.tempPath() - if err := restoredClient.Open(ctx); err != nil { - return err - } - defer restoredClient.Close() - - restoredKV := NewKVStore(s.log.With(zap.String("service", "restored kvstore-bolt")), s.tempPath()) - restoredKV.WithDB(restoredClient.DB()) - - migrator, err := migration.NewMigrator( - s.log.With(zap.String("service", "bolt restore migrations")), - restoredKV, - all.Migrations[:]..., - ) - if err != nil { - return err - } - - return migrator.Up(ctx) -} - -// Tx is a light wrapper around a boltdb transaction. It implements kv.Tx. -type Tx struct { - tx *bolt.Tx - ctx context.Context -} - -// Context returns the context for the transaction. -func (tx *Tx) Context() context.Context { - return tx.ctx -} - -// WithContext sets the context for the transaction. -func (tx *Tx) WithContext(ctx context.Context) { - tx.ctx = ctx -} - -// Bucket retrieves the bucket named b. -func (tx *Tx) Bucket(b []byte) (kv.Bucket, error) { - bkt := tx.tx.Bucket(b) - if bkt == nil { - return nil, fmt.Errorf("bucket %q: %w", string(b), kv.ErrBucketNotFound) - } - return &Bucket{ - bucket: bkt, - }, nil -} - -// Bucket implements kv.Bucket. -type Bucket struct { - bucket *bolt.Bucket -} - -// Get retrieves the value at the provided key. -func (b *Bucket) Get(key []byte) ([]byte, error) { - val := b.bucket.Get(key) - if len(val) == 0 { - return nil, kv.ErrKeyNotFound - } - - return val, nil -} - -// GetBatch retrieves the values for the provided keys. -func (b *Bucket) GetBatch(keys ...[]byte) ([][]byte, error) { - values := make([][]byte, len(keys)) - for idx, key := range keys { - val := b.bucket.Get(key) - if len(val) == 0 { - continue - } - - values[idx] = val - } - - return values, nil -} - -// Put sets the value at the provided key. -func (b *Bucket) Put(key []byte, value []byte) error { - err := b.bucket.Put(key, value) - if err == bolt.ErrTxNotWritable { - return kv.ErrTxNotWritable - } - return err -} - -// Delete removes the provided key. -func (b *Bucket) Delete(key []byte) error { - err := b.bucket.Delete(key) - if err == bolt.ErrTxNotWritable { - return kv.ErrTxNotWritable - } - return err -} - -// ForwardCursor retrieves a cursor for iterating through the entries -// in the key value store in a given direction (ascending / descending). -func (b *Bucket) ForwardCursor(seek []byte, opts ...kv.CursorOption) (kv.ForwardCursor, error) { - var ( - cursor = b.bucket.Cursor() - config = kv.NewCursorConfig(opts...) - key, value []byte - ) - - if len(seek) == 0 && config.Direction == kv.CursorDescending { - seek, _ = cursor.Last() - } - - key, value = cursor.Seek(seek) - - if config.Prefix != nil && !bytes.HasPrefix(seek, config.Prefix) { - return nil, fmt.Errorf("seek bytes %q not prefixed with %q: %w", string(seek), string(config.Prefix), kv.ErrSeekMissingPrefix) - } - - c := &Cursor{ - cursor: cursor, - config: config, - } - - // only remember first seeked item if not skipped - if !config.SkipFirst { - c.key = key - c.value = value - } - - return c, nil -} - -// Cursor retrieves a cursor for iterating through the entries -// in the key value store. -func (b *Bucket) Cursor(opts ...kv.CursorHint) (kv.Cursor, error) { - return &Cursor{ - cursor: b.bucket.Cursor(), - }, nil -} - -// Cursor is a struct for iterating through the entries -// in the key value store. -type Cursor struct { - cursor *bolt.Cursor - - // previously seeked key/value - key, value []byte - - config kv.CursorConfig - closed bool - seen int -} - -// Close sets the closed to closed -func (c *Cursor) Close() error { - c.closed = true - - return nil -} - -// Seek seeks for the first key that matches the prefix provided. -func (c *Cursor) Seek(prefix []byte) ([]byte, []byte) { - if c.closed { - return nil, nil - } - k, v := c.cursor.Seek(prefix) - if len(k) == 0 && len(v) == 0 { - return nil, nil - } - return k, v -} - -// First retrieves the first key value pair in the bucket. -func (c *Cursor) First() ([]byte, []byte) { - if c.closed { - return nil, nil - } - k, v := c.cursor.First() - if len(k) == 0 && len(v) == 0 { - return nil, nil - } - return k, v -} - -// Last retrieves the last key value pair in the bucket. -func (c *Cursor) Last() ([]byte, []byte) { - if c.closed { - return nil, nil - } - k, v := c.cursor.Last() - if len(k) == 0 && len(v) == 0 { - return nil, nil - } - return k, v -} - -// Next retrieves the next key in the bucket. -func (c *Cursor) Next() (k []byte, v []byte) { - if c.closed || - c.atLimit() || - (c.key != nil && c.missingPrefix(c.key)) { - return nil, nil - } - - // get and unset previously seeked values if they exist - k, v, c.key, c.value = c.key, c.value, nil, nil - if len(k) > 0 || len(v) > 0 { - c.seen++ - return - } - - next := c.cursor.Next - if c.config.Direction == kv.CursorDescending { - next = c.cursor.Prev - } - - k, v = next() - if (len(k) == 0 && len(v) == 0) || c.missingPrefix(k) { - return nil, nil - } - - c.seen++ - - return k, v -} - -// Prev retrieves the previous key in the bucket. -func (c *Cursor) Prev() (k []byte, v []byte) { - if c.closed || - c.atLimit() || - (c.key != nil && c.missingPrefix(c.key)) { - return nil, nil - } - - // get and unset previously seeked values if they exist - k, v, c.key, c.value = c.key, c.value, nil, nil - if len(k) > 0 && len(v) > 0 { - c.seen++ - return - } - - prev := c.cursor.Prev - if c.config.Direction == kv.CursorDescending { - prev = c.cursor.Next - } - - k, v = prev() - if (len(k) == 0 && len(v) == 0) || c.missingPrefix(k) { - return nil, nil - } - - c.seen++ - - return k, v -} - -func (c *Cursor) missingPrefix(key []byte) bool { - return c.config.Prefix != nil && !bytes.HasPrefix(key, c.config.Prefix) -} - -func (c *Cursor) atLimit() bool { - return c.config.Limit != nil && c.seen >= *c.config.Limit -} - -// Err always returns nil as nothing can go wrong™ during iteration -func (c *Cursor) Err() error { - return nil -} diff --git a/bolt/kv_test.go b/bolt/kv_test.go deleted file mode 100644 index bc004d498d7..00000000000 --- a/bolt/kv_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package bolt_test - -import ( - "context" - "fmt" - "testing" - - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration" - platformtesting "github.com/influxdata/influxdb/v2/testing" -) - -func initKVStore(f platformtesting.KVStoreFields, t *testing.T) (kv.Store, func()) { - s, closeFn, err := NewTestKVStore(t) - if err != nil { - t.Fatalf("failed to create new kv store: %v", err) - } - - mustCreateBucket(t, s, f.Bucket) - - err = s.Update(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket(f.Bucket) - if err != nil { - return err - } - - for _, p := range f.Pairs { - if err := b.Put(p.Key, p.Value); err != nil { - return err - } - } - - return nil - }) - if err != nil { - t.Fatalf("failed to put keys: %v", err) - } - return s, func() { - closeFn() - } -} - -func TestKVStore(t *testing.T) { - platformtesting.KVStore(initKVStore, t) -} - -func mustCreateBucket(t testing.TB, store kv.SchemaStore, bucket []byte) { - t.Helper() - - migrationName := fmt.Sprintf("create bucket %q", string(bucket)) - - if err := migration.CreateBuckets(migrationName, bucket).Up(context.Background(), store); err != nil { - t.Fatal(err) - } -} diff --git a/bolt/metrics.go b/bolt/metrics.go deleted file mode 100644 index d47314cdd78..00000000000 --- a/bolt/metrics.go +++ /dev/null @@ -1,295 +0,0 @@ -package bolt - -import ( - "encoding/json" - "sync" - "time" - - "github.com/prometheus/client_golang/prometheus" - bolt "go.etcd.io/bbolt" -) - -var _ prometheus.Collector = (*Client)(nil) - -// available buckets -// TODO: nuke this whole thing? -var ( - authorizationBucket = []byte("authorizationsv1") - bucketBucket = []byte("bucketsv1") - dashboardBucket = []byte("dashboardsv2") - organizationBucket = []byte("organizationsv1") - scraperBucket = []byte("scraperv2") - telegrafBucket = []byte("telegrafv1") - telegrafPluginsBucket = []byte("telegrafPluginsv1") - remoteBucket = []byte("remotesv2") - replicationBucket = []byte("replicationsv2") - userBucket = []byte("usersv1") -) - -var ( - orgsDesc = prometheus.NewDesc( - "influxdb_organizations_total", - "Number of total organizations on the server", - nil, nil) - - bucketsDesc = prometheus.NewDesc( - "influxdb_buckets_total", - "Number of total buckets on the server", - nil, nil) - - usersDesc = prometheus.NewDesc( - "influxdb_users_total", - "Number of total users on the server", - nil, nil) - - tokensDesc = prometheus.NewDesc( - "influxdb_tokens_total", - "Number of total tokens on the server", - nil, nil) - - dashboardsDesc = prometheus.NewDesc( - "influxdb_dashboards_total", - "Number of total dashboards on the server", - nil, nil) - - scrapersDesc = prometheus.NewDesc( - "influxdb_scrapers_total", - "Number of total scrapers on the server", - nil, nil) - - telegrafsDesc = prometheus.NewDesc( - "influxdb_telegrafs_total", - "Number of total telegraf configurations on the server", - nil, nil) - - telegrafPluginsDesc = prometheus.NewDesc( - "influxdb_telegraf_plugins_count", - "Number of individual telegraf plugins configured", - []string{"plugin"}, nil) - - remoteDesc = prometheus.NewDesc( - "influxdb_remotes_total", - "Number of total remote connections configured on the server", - nil, nil) - - replicationDesc = prometheus.NewDesc( - "influxdb_replications_total", - "Number of total replication configurations on the server", - nil, nil) - - boltWritesDesc = prometheus.NewDesc( - "boltdb_writes_total", - "Total number of boltdb writes", - nil, nil) - - boltReadsDesc = prometheus.NewDesc( - "boltdb_reads_total", - "Total number of boltdb reads", - nil, nil) -) - -// Describe returns all descriptions of the collector. -func (c *Client) Describe(ch chan<- *prometheus.Desc) { - ch <- orgsDesc - ch <- bucketsDesc - ch <- usersDesc - ch <- tokensDesc - ch <- dashboardsDesc - ch <- scrapersDesc - ch <- telegrafsDesc - ch <- remoteDesc - ch <- replicationDesc - ch <- boltWritesDesc - ch <- boltReadsDesc - - c.pluginsCollector.Describe(ch) -} - -type pluginMetricsCollector struct { - ticker *time.Ticker - tickerDone chan struct{} - - // cacheMu protects cache - cacheMu sync.RWMutex - cache map[string]float64 -} - -func (c *pluginMetricsCollector) Open(db *bolt.DB) { - go c.pollTelegrafStats(db) -} - -func (c *pluginMetricsCollector) pollTelegrafStats(db *bolt.DB) { - for { - select { - case <-c.tickerDone: - return - case <-c.ticker.C: - c.refreshTelegrafStats(db) - } - } -} - -func (c *pluginMetricsCollector) refreshTelegrafStats(db *bolt.DB) { - c.cacheMu.Lock() - defer c.cacheMu.Unlock() - - // Check if stats-polling got canceled between the point of receiving - // a tick and grabbing the lock. - select { - case <-c.tickerDone: - return - default: - } - - // Clear plugins from last check. - c.cache = map[string]float64{} - - // Loop through all registered plugins. - _ = db.View(func(tx *bolt.Tx) error { - rawPlugins := [][]byte{} - if err := tx.Bucket(telegrafPluginsBucket).ForEach(func(k, v []byte) error { - rawPlugins = append(rawPlugins, v) - return nil - }); err != nil { - return err - } - - for _, v := range rawPlugins { - pStats := map[string]float64{} - if err := json.Unmarshal(v, &pStats); err != nil { - return err - } - - for k, v := range pStats { - c.cache[k] += v - } - } - - return nil - }) -} - -func (c *pluginMetricsCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- telegrafPluginsDesc -} - -func (c *pluginMetricsCollector) Collect(ch chan<- prometheus.Metric) { - c.cacheMu.RLock() - defer c.cacheMu.RUnlock() - - for k, v := range c.cache { - ch <- prometheus.MustNewConstMetric( - telegrafPluginsDesc, - prometheus.GaugeValue, - v, - k, // Adds a label for plugin type.name. - ) - } -} - -func (c *pluginMetricsCollector) Close() { - // Wait for any already-running cache-refresh procedures to complete. - c.cacheMu.Lock() - defer c.cacheMu.Unlock() - - close(c.tickerDone) -} - -func NewPluginMetricsCollector(tickDuration time.Duration) *pluginMetricsCollector { - return &pluginMetricsCollector{ - ticker: time.NewTicker(tickDuration), - tickerDone: make(chan struct{}), - cache: make(map[string]float64), - } -} - -// Collect returns the current state of all metrics of the collector. -func (c *Client) Collect(ch chan<- prometheus.Metric) { - stats := c.db.Stats() - writes := stats.TxStats.Write - reads := stats.TxN - - ch <- prometheus.MustNewConstMetric( - boltReadsDesc, - prometheus.CounterValue, - float64(reads), - ) - - ch <- prometheus.MustNewConstMetric( - boltWritesDesc, - prometheus.CounterValue, - float64(writes), - ) - - orgs, buckets, users, tokens := 0, 0, 0, 0 - dashboards, scrapers, telegrafs := 0, 0, 0 - remotes, replications := 0, 0 - _ = c.db.View(func(tx *bolt.Tx) error { - buckets = tx.Bucket(bucketBucket).Stats().KeyN - dashboards = tx.Bucket(dashboardBucket).Stats().KeyN - orgs = tx.Bucket(organizationBucket).Stats().KeyN - scrapers = tx.Bucket(scraperBucket).Stats().KeyN - telegrafs = tx.Bucket(telegrafBucket).Stats().KeyN - remotes = tx.Bucket(remoteBucket).Stats().KeyN - replications = tx.Bucket(replicationBucket).Stats().KeyN - tokens = tx.Bucket(authorizationBucket).Stats().KeyN - users = tx.Bucket(userBucket).Stats().KeyN - return nil - }) - - ch <- prometheus.MustNewConstMetric( - orgsDesc, - prometheus.CounterValue, - float64(orgs), - ) - - ch <- prometheus.MustNewConstMetric( - bucketsDesc, - prometheus.CounterValue, - float64(buckets), - ) - - ch <- prometheus.MustNewConstMetric( - usersDesc, - prometheus.CounterValue, - float64(users), - ) - - ch <- prometheus.MustNewConstMetric( - tokensDesc, - prometheus.CounterValue, - float64(tokens), - ) - - ch <- prometheus.MustNewConstMetric( - dashboardsDesc, - prometheus.CounterValue, - float64(dashboards), - ) - - ch <- prometheus.MustNewConstMetric( - scrapersDesc, - prometheus.CounterValue, - float64(scrapers), - ) - - ch <- prometheus.MustNewConstMetric( - telegrafsDesc, - prometheus.CounterValue, - float64(telegrafs), - ) - - ch <- prometheus.MustNewConstMetric( - remoteDesc, - prometheus.CounterValue, - float64(remotes), - ) - - ch <- prometheus.MustNewConstMetric( - replicationDesc, - prometheus.CounterValue, - float64(replications), - ) - - c.pluginsCollector.Collect(ch) -} diff --git a/bolt/metrics_test.go b/bolt/metrics_test.go deleted file mode 100644 index fc834279f1d..00000000000 --- a/bolt/metrics_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package bolt_test - -import ( - "context" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/bolt" - "github.com/influxdata/influxdb/v2/kit/prom" - "github.com/influxdata/influxdb/v2/kit/prom/promtest" - "github.com/influxdata/influxdb/v2/kv/migration/all" - telegrafservice "github.com/influxdata/influxdb/v2/telegraf/service" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func TestInitialMetrics(t *testing.T) { - t.Parallel() - - client, teardown, err := NewTestClient(t) - if err != nil { - t.Fatalf("unable to setup bolt client: %v", err) - } - defer teardown() - - reg := prom.NewRegistry(zaptest.NewLogger(t)) - reg.MustRegister(client) - - mfs, err := reg.Gather() - if err != nil { - t.Fatal(err) - } - - metrics := map[string]int{ - "influxdb_organizations_total": 0, - "influxdb_buckets_total": 0, - "influxdb_users_total": 0, - "influxdb_tokens_total": 0, - "influxdb_dashboards_total": 0, - "influxdb_remotes_total": 0, - "influxdb_replications_total": 0, - "boltdb_reads_total": 0, - } - for name, count := range metrics { - c := promtest.MustFindMetric(t, mfs, name, nil) - if got := c.GetCounter().GetValue(); int(got) != count { - t.Errorf("expected %s counter to be %d, got %v", name, count, got) - } - } -} - -func TestPluginMetrics(t *testing.T) { - t.Parallel() - - // Set up a BoltDB, and register a telegraf config. - client, teardown, err := NewTestClient(t) - require.NoError(t, err) - defer teardown() - - ctx := context.Background() - log := zaptest.NewLogger(t) - kvStore := bolt.NewKVStore(log, client.Path) - kvStore.WithDB(client.DB()) - require.NoError(t, all.Up(ctx, log, kvStore)) - - tsvc := telegrafservice.New(kvStore) - tconf := influxdb.TelegrafConfig{ - Name: "test", - Config: "[[inputs.cpu]]\n[[outputs.influxdb_v2]]", - OrgID: 1, - } - require.NoError(t, tsvc.CreateTelegrafConfig(ctx, &tconf, 1)) - - // Run a plugin metrics collector with a quicker tick interval than the default. - pluginCollector := bolt.NewPluginMetricsCollector(time.Millisecond) - pluginCollector.Open(client.DB()) - defer pluginCollector.Close() - - reg := prom.NewRegistry(zaptest.NewLogger(t)) - reg.MustRegister(pluginCollector) - - // Run a periodic gather in the background. - gatherTick := time.NewTicker(time.Millisecond) - doneCh := make(chan struct{}) - defer close(doneCh) - - go func() { - for { - select { - case <-doneCh: - return - case <-gatherTick.C: - _, err := reg.Gather() - require.NoError(t, err) - } - } - }() - - // Run a few gathers to see if any race conditions are flushed out. - time.Sleep(250 * time.Millisecond) - - // Gather plugin metrics and ensure they're correct. - metrics, err := reg.Gather() - require.NoError(t, err) - inCpu := promtest.MustFindMetric(t, metrics, "influxdb_telegraf_plugins_count", map[string]string{"plugin": "inputs.cpu"}) - outInfluxDb := promtest.MustFindMetric(t, metrics, "influxdb_telegraf_plugins_count", map[string]string{"plugin": "outputs.influxdb_v2"}) - require.Equal(t, 1, int(inCpu.GetGauge().GetValue())) - require.Equal(t, 1, int(outInfluxDb.GetGauge().GetValue())) - - // Register some more plugins. - tconf = influxdb.TelegrafConfig{ - Name: "test", - Config: "[[inputs.mem]]\n[[outputs.influxdb_v2]]", - OrgID: 1, - } - require.NoError(t, tsvc.CreateTelegrafConfig(ctx, &tconf, 2)) - - // Let a few more background gathers run. - time.Sleep(250 * time.Millisecond) - - // Gather again, and ensure plugin metrics have been updated. - metrics, err = reg.Gather() - require.NoError(t, err) - inCpu = promtest.MustFindMetric(t, metrics, "influxdb_telegraf_plugins_count", map[string]string{"plugin": "inputs.cpu"}) - inMem := promtest.MustFindMetric(t, metrics, "influxdb_telegraf_plugins_count", map[string]string{"plugin": "inputs.mem"}) - outInfluxDb = promtest.MustFindMetric(t, metrics, "influxdb_telegraf_plugins_count", map[string]string{"plugin": "outputs.influxdb_v2"}) - require.Equal(t, 1, int(inCpu.GetGauge().GetValue())) - require.Equal(t, 1, int(inMem.GetGauge().GetValue())) - require.Equal(t, 2, int(outInfluxDb.GetGauge().GetValue())) -} diff --git a/bucket.go b/bucket.go deleted file mode 100644 index 095a9034f8e..00000000000 --- a/bucket.go +++ /dev/null @@ -1,172 +0,0 @@ -package influxdb - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -const ( - // BucketTypeUser is a user created bucket - BucketTypeUser = BucketType(0) - // BucketTypeSystem is an internally created bucket that cannot be deleted/renamed. - BucketTypeSystem = BucketType(1) - // MonitoringSystemBucketRetention is the time we should retain monitoring system bucket information - MonitoringSystemBucketRetention = time.Hour * 24 * 7 - // TasksSystemBucketRetention is the time we should retain task system bucket information - TasksSystemBucketRetention = time.Hour * 24 * 3 -) - -// Bucket names constants -const ( - TasksSystemBucketName = "_tasks" - MonitoringSystemBucketName = "_monitoring" -) - -// InfiniteRetention is default infinite retention period. -const InfiniteRetention = 0 - -// Bucket is a bucket. 🎉 -type Bucket struct { - ID platform.ID `json:"id,omitempty"` - OrgID platform.ID `json:"orgID,omitempty"` - Type BucketType `json:"type"` - Name string `json:"name"` - Description string `json:"description"` - RetentionPolicyName string `json:"rp,omitempty"` // This to support v1 sources - RetentionPeriod time.Duration `json:"retentionPeriod"` - ShardGroupDuration time.Duration `json:"shardGroupDuration"` - CRUDLog -} - -// Clone returns a shallow copy of b. -func (b *Bucket) Clone() *Bucket { - other := *b - return &other -} - -// BucketType differentiates system buckets from user buckets. -type BucketType int - -// String converts a BucketType into a human-readable string. -func (bt BucketType) String() string { - if bt == BucketTypeSystem { - return "system" - } - return "user" -} - -// ParseBucketType parses a bucket type from a string -func ParseBucketType(s string) BucketType { - if s == "system" { - return BucketTypeSystem - } - return BucketTypeUser -} - -// ops for buckets error and buckets op logs. -var ( - OpFindBucketByID = "FindBucketByID" - OpFindBucket = "FindBucket" - OpFindBuckets = "FindBuckets" - OpCreateBucket = "CreateBucket" - OpPutBucket = "PutBucket" - OpUpdateBucket = "UpdateBucket" - OpDeleteBucket = "DeleteBucket" -) - -// BucketService represents a service for managing bucket data. -type BucketService interface { - // FindBucketByID returns a single bucket by ID. - FindBucketByID(ctx context.Context, id platform.ID) (*Bucket, error) - - // FindBucket returns the first bucket that matches filter. - FindBucket(ctx context.Context, filter BucketFilter) (*Bucket, error) - - // FindBuckets returns a list of buckets that match filter and the total count of matching buckets. - // Additional options provide pagination & sorting. - FindBuckets(ctx context.Context, filter BucketFilter, opt ...FindOptions) ([]*Bucket, int, error) - - // CreateBucket creates a new bucket and sets b.ID with the new identifier. - CreateBucket(ctx context.Context, b *Bucket) error - - // UpdateBucket updates a single bucket with changeset. - // Returns the new bucket state after update. - UpdateBucket(ctx context.Context, id platform.ID, upd BucketUpdate) (*Bucket, error) - - // DeleteBucket removes a bucket by ID. - DeleteBucket(ctx context.Context, id platform.ID) error - FindBucketByName(ctx context.Context, orgID platform.ID, name string) (*Bucket, error) -} - -// BucketUpdate represents updates to a bucket. -// Only fields which are set are updated. -type BucketUpdate struct { - Name *string - Description *string - RetentionPeriod *time.Duration - ShardGroupDuration *time.Duration -} - -// BucketFilter represents a set of filter that restrict the returned results. -type BucketFilter struct { - ID *platform.ID - Name *string - OrganizationID *platform.ID - Org *string -} - -// QueryParams Converts BucketFilter fields to url query params. -func (f BucketFilter) QueryParams() map[string][]string { - qp := map[string][]string{} - if f.ID != nil { - qp["id"] = []string{f.ID.String()} - } - - if f.Name != nil { - qp["bucket"] = []string{*f.Name} - } - - if f.OrganizationID != nil { - qp["orgID"] = []string{f.OrganizationID.String()} - } - - if f.Org != nil { - qp["org"] = []string{*f.Org} - } - - return qp -} - -// String returns a human-readable string of the BucketFilter, -// particularly useful for error messages. -func (f BucketFilter) String() string { - // There should always be exactly 2 fields set, but if it's somehow more, that's fine. - parts := make([]string, 0, 2) - if f.ID != nil { - parts = append(parts, "Bucket ID: "+f.ID.String()) - } - if f.Name != nil { - parts = append(parts, "Bucket Name: "+*f.Name) - } - if f.OrganizationID != nil { - parts = append(parts, "Org ID: "+f.OrganizationID.String()) - } - if f.Org != nil { - parts = append(parts, "Org Name: "+*f.Org) - } - return "[" + strings.Join(parts, ", ") + "]" -} - -func ErrInternalBucketServiceError(op string, err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("unexpected error in buckets; Err: %v", err), - Op: op, - Err: err, - } -} diff --git a/build.go b/build.go deleted file mode 100644 index 606e94960ba..00000000000 --- a/build.go +++ /dev/null @@ -1,22 +0,0 @@ -package influxdb - -// BuildInfo represents the information about InfluxDB build. -type BuildInfo struct { - Version string // Version is the current git tag with v prefix stripped - Commit string // Commit is the current git commit SHA - Date string // Date is the build date in RFC3339 -} - -var buildInfo BuildInfo - -// SetBuildInfo sets the build information for the binary. -func SetBuildInfo(version, commit, date string) { - buildInfo.Version = version - buildInfo.Commit = commit - buildInfo.Date = date -} - -// GetBuildInfo returns the current build information for the binary. -func GetBuildInfo() BuildInfo { - return buildInfo -} diff --git a/check.go b/check.go deleted file mode 100644 index 63c21aa9c09..00000000000 --- a/check.go +++ /dev/null @@ -1,148 +0,0 @@ -package influxdb - -import ( - "context" - "encoding/json" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/query/fluxlang" -) - -// consts for checks config. -const ( - CheckDefaultPageSize = 100 - CheckMaxPageSize = 500 -) - -// Check represents the information required to generate a periodic check task. -type Check interface { - Valid(lang fluxlang.FluxLanguageService) error - Type() string - ClearPrivateData() - SetTaskID(platform.ID) - GetTaskID() platform.ID - GetOwnerID() platform.ID - SetOwnerID(platform.ID) - GenerateFlux(lang fluxlang.FluxLanguageService) (string, error) - json.Marshaler - - CRUDLogSetter - SetID(id platform.ID) - SetOrgID(id platform.ID) - SetName(name string) - SetDescription(description string) - - GetID() platform.ID - GetCRUDLog() CRUDLog - GetOrgID() platform.ID - GetName() string - GetDescription() string -} - -// ops for checks error -var ( - OpFindCheckByID = "FindCheckByID" - OpFindCheck = "FindCheck" - OpFindChecks = "FindChecks" - OpCreateCheck = "CreateCheck" - OpUpdateCheck = "UpdateCheck" - OpDeleteCheck = "DeleteCheck" -) - -// CheckService represents a service for managing checks. -type CheckService interface { - // FindCheckByID returns a single check by ID. - FindCheckByID(ctx context.Context, id platform.ID) (Check, error) - - // FindCheck returns the first check that matches filter. - FindCheck(ctx context.Context, filter CheckFilter) (Check, error) - - // FindChecks returns a list of checks that match filter and the total count of matching checks. - // Additional options provide pagination & sorting. - FindChecks(ctx context.Context, filter CheckFilter, opt ...FindOptions) ([]Check, int, error) - - // CreateCheck creates a new check and sets b.ID with the new identifier. - CreateCheck(ctx context.Context, c CheckCreate, userID platform.ID) error - - // UpdateCheck updates the whole check. - // Returns the new check state after update. - UpdateCheck(ctx context.Context, id platform.ID, c CheckCreate) (Check, error) - - // PatchCheck updates a single bucket with changeset. - // Returns the new check state after update. - PatchCheck(ctx context.Context, id platform.ID, upd CheckUpdate) (Check, error) - - // DeleteCheck will delete the check by id. - DeleteCheck(ctx context.Context, id platform.ID) error -} - -// CheckUpdate are properties than can be updated on a check -type CheckUpdate struct { - Name *string `json:"name,omitempty"` - Status *Status `json:"status,omitempty"` - Description *string `json:"description,omitempty"` -} - -// CheckCreate represent data to create a new Check -type CheckCreate struct { - Check - Status Status `json:"status"` -} - -// Valid returns err is the update is invalid. -func (n *CheckUpdate) Valid() error { - if n.Name != nil && *n.Name == "" { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Check Name can't be empty", - } - } - - if n.Description != nil && *n.Description == "" { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Check Description can't be empty", - } - } - - if n.Status != nil { - if err := n.Status.Valid(); err != nil { - return err - } - } - - return nil -} - -// CheckFilter represents a set of filters that restrict the returned results. -type CheckFilter struct { - ID *platform.ID - Name *string - OrgID *platform.ID - Org *string - UserResourceMappingFilter -} - -// QueryParams Converts CheckFilter fields to url query params. -func (f CheckFilter) QueryParams() map[string][]string { - qp := map[string][]string{} - - if f.ID != nil { - qp["id"] = []string{f.ID.String()} - } - - if f.Name != nil { - qp["name"] = []string{*f.Name} - } - - if f.OrgID != nil { - qp["orgID"] = []string{f.OrgID.String()} - } - - if f.Org != nil { - qp["org"] = []string{*f.Org} - } - - return qp -} diff --git a/checks/service.go b/checks/service.go deleted file mode 100644 index 5277f911440..00000000000 --- a/checks/service.go +++ /dev/null @@ -1,569 +0,0 @@ -package checks - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/notification/check" - "github.com/influxdata/influxdb/v2/query/fluxlang" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "go.uber.org/zap" -) - -var _ influxdb.CheckService = (*Service)(nil) - -// Service is a check service -// It provides all the operations needed to manage checks -type Service struct { - kv kv.Store - - log *zap.Logger - - orgs influxdb.OrganizationService - tasks taskmodel.TaskService - - timeGenerator influxdb.TimeGenerator - idGenerator platform.IDGenerator - - checkStore *kv.IndexStore -} - -// NewService constructs and configures a new checks.Service -func NewService(logger *zap.Logger, store kv.Store, orgs influxdb.OrganizationService, tasks taskmodel.TaskService) *Service { - return &Service{ - kv: store, - log: logger, - orgs: orgs, - tasks: tasks, - - timeGenerator: influxdb.RealTimeGenerator{}, - idGenerator: snowflake.NewIDGenerator(), - checkStore: newCheckStore(), - } -} - -func newCheckStore() *kv.IndexStore { - const resource = "check" - - var decEndpointEntFn kv.DecodeBucketValFn = func(key, val []byte) ([]byte, interface{}, error) { - ch, err := check.UnmarshalJSON(val) - return key, ch, err - } - - var decValToEntFn kv.ConvertValToEntFn = func(_ []byte, v interface{}) (kv.Entity, error) { - ch, ok := v.(influxdb.Check) - if err := kv.IsErrUnexpectedDecodeVal(ok); err != nil { - return kv.Entity{}, err - } - return kv.Entity{ - PK: kv.EncID(ch.GetID()), - UniqueKey: kv.Encode( - kv.EncID(ch.GetOrgID()), - kv.EncString(ch.GetName()), - ), - Body: ch, - }, nil - } - - return &kv.IndexStore{ - Resource: resource, - EntStore: kv.NewStoreBase( - resource, - []byte("checksv1"), - kv.EncIDKey, - kv.EncBodyJSON, - decEndpointEntFn, - decValToEntFn, - ), - IndexStore: kv.NewOrgNameKeyStore(resource, []byte("checkindexv1"), false), - } -} - -// FindCheckByID retrieves a check by id. -func (s *Service) FindCheckByID(ctx context.Context, id platform.ID) (influxdb.Check, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var c influxdb.Check - err := s.kv.View(ctx, func(tx kv.Tx) error { - chkVal, err := s.findCheckByID(ctx, tx, id) - if err != nil { - return err - } - c = chkVal - return nil - }) - if err != nil { - return nil, err - } - - return c, nil -} - -func (s *Service) findCheckByID(ctx context.Context, tx kv.Tx, id platform.ID) (influxdb.Check, error) { - chkVal, err := s.checkStore.FindEnt(ctx, tx, kv.Entity{PK: kv.EncID(id)}) - if err != nil { - return nil, err - } - return chkVal.(influxdb.Check), nil -} - -func (s *Service) findCheckByName(ctx context.Context, tx kv.Tx, orgID platform.ID, name string) (influxdb.Check, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - chVal, err := s.checkStore.FindEnt(ctx, tx, kv.Entity{ - UniqueKey: kv.Encode(kv.EncID(orgID), kv.EncString(name)), - }) - if kv.IsNotFound(err) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Err: err, - } - } - if err != nil { - return nil, err - } - - return chVal.(influxdb.Check), nil -} - -// FindCheck retrieves a check using an arbitrary check filter. -// Filters using ID, or OrganizationID and check Name should be efficient. -// Other filters will do a linear scan across checks until it finds a match. -func (s *Service) FindCheck(ctx context.Context, filter influxdb.CheckFilter) (influxdb.Check, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if filter.ID != nil { - return s.FindCheckByID(ctx, *filter.ID) - } - - if filter.Org != nil { - o, err := s.orgs.FindOrganization(ctx, influxdb.OrganizationFilter{Name: filter.Org}) - if err != nil { - return nil, err - } - filter.OrgID = &o.ID - } - - var c influxdb.Check - err := s.kv.View(ctx, func(tx kv.Tx) error { - if filter.OrgID != nil && filter.Name != nil { - ch, err := s.findCheckByName(ctx, tx, *filter.OrgID, *filter.Name) - c = ch - return err - } - - var prefix []byte - if filter.OrgID != nil { - ent := kv.Entity{UniqueKey: kv.EncID(*filter.OrgID)} - prefix, _ = s.checkStore.IndexStore.EntKey(ctx, ent) - } - - filterFn := filterChecksFn(filter) - return s.checkStore.Find(ctx, tx, kv.FindOpts{ - Prefix: prefix, - Limit: 1, - FilterEntFn: func(k []byte, v interface{}) bool { - ch, ok := v.(influxdb.Check) - if err := kv.IsErrUnexpectedDecodeVal(ok); err != nil { - return false - } - return filterFn(ch) - }, - CaptureFn: func(key []byte, decodedVal interface{}) error { - c, _ = decodedVal.(influxdb.Check) - return nil - }, - }) - }) - if err != nil { - return nil, err - } - - if c == nil { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: "check not found", - } - } - return c, nil -} - -func filterChecksFn(filter influxdb.CheckFilter) func(c influxdb.Check) bool { - return func(c influxdb.Check) bool { - if filter.ID != nil && c.GetID() != *filter.ID { - return false - } - if filter.OrgID != nil && c.GetOrgID() != *filter.OrgID { - return false - } - if filter.Name != nil && c.GetName() != *filter.Name { - return false - } - return true - } -} - -// FindChecks retrieves all checks that match an arbitrary check filter. -// Filters using ID, or OrganizationID and check Name should be efficient. -// Other filters will do a linear scan across all checks searching for a match. -func (s *Service) FindChecks(ctx context.Context, filter influxdb.CheckFilter, opts ...influxdb.FindOptions) ([]influxdb.Check, int, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if filter.ID != nil { - c, err := s.FindCheckByID(ctx, *filter.ID) - if err != nil { - return nil, 0, err - } - return []influxdb.Check{c}, 1, nil - } - - if filter.Org != nil { - o, err := s.orgs.FindOrganization(ctx, influxdb.OrganizationFilter{Name: filter.Org}) - if err != nil { - return nil, 0, &errors.Error{Err: err} - } - - filter.OrgID = &o.ID - } - - var checks []influxdb.Check - err := s.kv.View(ctx, func(tx kv.Tx) error { - var opt influxdb.FindOptions - if len(opts) > 0 { - opt = opts[0] - } - - filterFn := filterChecksFn(filter) - return s.checkStore.Find(ctx, tx, kv.FindOpts{ - Descending: opt.Descending, - Offset: opt.Offset, - Limit: opt.Limit, - FilterEntFn: func(k []byte, v interface{}) bool { - ch, ok := v.(influxdb.Check) - if err := kv.IsErrUnexpectedDecodeVal(ok); err != nil { - return false - } - return filterFn(ch) - }, - CaptureFn: func(key []byte, decodedVal interface{}) error { - c, ok := decodedVal.(influxdb.Check) - if err := kv.IsErrUnexpectedDecodeVal(ok); err != nil { - return err - } - checks = append(checks, c) - return nil - }, - }) - }) - if err != nil { - return nil, 0, err - } - - return checks, len(checks), nil -} - -// CreateCheck creates a influxdb check and sets ID. -func (s *Service) CreateCheck(ctx context.Context, c influxdb.CheckCreate, userID platform.ID) (err error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if err := c.Status.Valid(); err != nil { - return err - } - - if c.GetOrgID().Valid() { - if _, err := s.orgs.FindOrganizationByID(ctx, c.GetOrgID()); err != nil { - return &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpCreateCheck, - Err: err, - } - } - } - - c.SetID(s.idGenerator.ID()) - c.SetOwnerID(userID) - now := s.timeGenerator.Now() - c.SetCreatedAt(now) - c.SetUpdatedAt(now) - - if err := c.Valid(fluxlang.DefaultService); err != nil { - return err - } - - // create task initially in inactive state - t, err := s.createCheckTask(ctx, c) - if err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Could not create task from check", - Err: err, - } - } - - c.SetTaskID(t.ID) - - err = s.kv.Update(ctx, func(tx kv.Tx) error { - return s.putCheck(ctx, tx, c, kv.PutNew()) - }) - - // something went wrong persisting new check - // so remove associated task - if err != nil { - if derr := s.tasks.DeleteTask(ctx, t.ID); derr != nil { - msg := fmt.Sprintf("error removing task %q for check %q in org %q", t.ID, c.GetName(), c.GetOrgID()) - s.log.Error(msg, zap.Error(derr)) - } - - return err - } - - // update task to be in matching state to check - if influxdb.Status(t.Status) != c.Status { - _, err = s.tasks.UpdateTask(ctx, t.ID, taskmodel.TaskUpdate{ - Status: strPtr(string(c.Status)), - }) - } - - return err -} - -func (s *Service) createCheckTask(ctx context.Context, c influxdb.CheckCreate) (*taskmodel.Task, error) { - script, err := c.GenerateFlux(fluxlang.DefaultService) - if err != nil { - return nil, err - } - - tc := taskmodel.TaskCreate{ - Type: c.Type(), - Flux: script, - OwnerID: c.GetOwnerID(), - OrganizationID: c.GetOrgID(), - // task initially in inactive state to ensure it isn't - // scheduled until check is persisted and active - Status: string(influxdb.Inactive), - } - - t, err := s.tasks.CreateTask(ctx, tc) - if err != nil { - return nil, err - } - - return t, nil -} - -// PutCheck will put a check without setting an ID. -func (s *Service) PutCheck(ctx context.Context, c influxdb.Check) error { - if err := c.Valid(fluxlang.DefaultService); err != nil { - return err - } - return s.kv.Update(ctx, func(tx kv.Tx) error { - return s.putCheck(ctx, tx, c) - }) -} - -func (s *Service) putCheck(ctx context.Context, tx kv.Tx, c influxdb.Check, opts ...kv.PutOptionFn) error { - return s.checkStore.Put(ctx, tx, kv.Entity{ - PK: kv.EncID(c.GetID()), - UniqueKey: kv.Encode(kv.EncID(c.GetOrgID()), kv.EncString(c.GetName())), - Body: c, - }, opts...) -} - -// PatchCheck updates a check according the parameters set on upd. -func (s *Service) PatchCheck(ctx context.Context, id platform.ID, upd influxdb.CheckUpdate) (influxdb.Check, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var check influxdb.Check - if err := s.kv.Update(ctx, func(tx kv.Tx) error { - c, err := s.findCheckByID(ctx, tx, id) - if err != nil { - return err - } - - c, err = s.patchCheck(ctx, tx, c, upd) - if err != nil { - return err - } - - check = c - return nil - }); err != nil { - return nil, err - } - - if err := s.patchCheckTask(ctx, check.GetTaskID(), upd); err != nil { - return nil, err - } - - return check, nil -} - -// UpdateCheck updates the check. -func (s *Service) UpdateCheck(ctx context.Context, id platform.ID, chk influxdb.CheckCreate) (influxdb.Check, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var check influxdb.Check - if err := s.kv.Update(ctx, func(tx kv.Tx) error { - c, err := s.updateCheck(ctx, tx, id, chk) - if err != nil { - return err - } - check = c - return nil - }); err != nil { - return nil, err - } - - if err := s.updateCheckTask(ctx, chk); err != nil { - return nil, err - } - - return check, nil -} - -func (s *Service) updateCheckTask(ctx context.Context, chk influxdb.CheckCreate) error { - flux, err := chk.GenerateFlux(fluxlang.DefaultService) - if err != nil { - return err - } - - tu := taskmodel.TaskUpdate{ - Flux: &flux, - Description: strPtr(chk.GetDescription()), - } - - if chk.Status != "" { - tu.Status = strPtr(string(chk.Status)) - } - - if _, err := s.tasks.UpdateTask(ctx, chk.GetTaskID(), tu); err != nil { - return err - } - - return err -} - -func (s *Service) patchCheckTask(ctx context.Context, taskID platform.ID, upd influxdb.CheckUpdate) error { - tu := taskmodel.TaskUpdate{ - Description: upd.Description, - } - - if upd.Status != nil { - tu.Status = strPtr(string(*upd.Status)) - } - - if _, err := s.tasks.UpdateTask(ctx, taskID, tu); err != nil { - return err - } - - return nil -} - -func (s *Service) updateCheck(ctx context.Context, tx kv.Tx, id platform.ID, chk influxdb.CheckCreate) (influxdb.Check, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - current, err := s.findCheckByID(ctx, tx, id) - if err != nil { - return nil, err - } - - chk.SetTaskID(current.GetTaskID()) - - if chk.GetName() != current.GetName() { - c0, err := s.findCheckByName(ctx, tx, current.GetOrgID(), chk.GetName()) - if err == nil && c0.GetID() != id { - return nil, &errors.Error{ - Code: errors.EConflict, - Msg: "check name is not unique", - } - } - - ent := kv.Entity{ - UniqueKey: kv.Encode(kv.EncID(current.GetOrgID()), kv.EncString(current.GetName())), - } - if err := s.checkStore.IndexStore.DeleteEnt(ctx, tx, ent); err != nil { - return nil, err - } - } - - // ID, OrganizationID, and OwnerID can not be updated. - chk.SetID(current.GetID()) - chk.SetOrgID(current.GetOrgID()) - chk.SetOwnerID(current.GetOwnerID()) - chk.SetCreatedAt(current.GetCRUDLog().CreatedAt) - chk.SetUpdatedAt(s.timeGenerator.Now()) - - if err := chk.Valid(fluxlang.DefaultService); err != nil { - return nil, err - } - - if err := chk.Status.Valid(); err != nil { - return nil, err - } - - if err := s.putCheck(ctx, tx, chk.Check); err != nil { - return nil, err - } - - return chk.Check, nil -} - -func (s *Service) patchCheck(ctx context.Context, tx kv.Tx, check influxdb.Check, upd influxdb.CheckUpdate) (influxdb.Check, error) { - if upd.Name != nil { - check.SetName(*upd.Name) - } - - if upd.Description != nil { - check.SetDescription(*upd.Description) - } - - check.SetUpdatedAt(s.timeGenerator.Now()) - - if err := check.Valid(fluxlang.DefaultService); err != nil { - return nil, err - } - - if err := s.putCheck(ctx, tx, check, kv.PutUpdate()); err != nil { - return nil, err - } - - return check, nil -} - -// DeleteCheck deletes a check and prunes it from the index. -func (s *Service) DeleteCheck(ctx context.Context, id platform.ID) error { - ch, err := s.FindCheckByID(ctx, id) - if err != nil { - return err - } - - if err := s.tasks.DeleteTask(ctx, ch.GetTaskID()); err != nil { - return err - } - - return s.kv.Update(ctx, func(tx kv.Tx) error { - return s.checkStore.DeleteEnt(ctx, tx, kv.Entity{ - PK: kv.EncID(id), - }) - }) -} - -func strPtr(s string) *string { - ss := new(string) - *ss = s - return ss -} diff --git a/checks/service_external_test.go b/checks/service_external_test.go deleted file mode 100644 index a6abc97845a..00000000000 --- a/checks/service_external_test.go +++ /dev/null @@ -1,1806 +0,0 @@ -package checks - -import ( - "bytes" - "context" - "sort" - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/flux/ast" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/notification" - "github.com/influxdata/influxdb/v2/notification/check" - "github.com/influxdata/influxdb/v2/task/taskmodel" - itesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - orgOneID = "020f755c3c083000" - orgTwoID = "020f755c3c083001" - - oneID = "020f755c3c082000" - twoID = "020f755c3c082001" - threeID = "020f755c3c082002" - fourID = "020f755c3c082003" - fiveID = "020f755c3c082004" - sixID = "020f755c3c082005" - - checkOneID = "020f755c3c082000" - checkTwoID = "020f755c3c082001" -) - -var script = `data = from(bucket: "telegraf") |> range(start: -1m) |> filter(fn: (r) => r._field == "usage_user")` - -var deadman1 = &check.Deadman{ - Base: check.Base{ - Name: "name1", - ID: MustIDBase16(checkOneID), - OrgID: MustIDBase16(orgOneID), - OwnerID: MustIDBase16(sixID), - Description: "desc1", - TaskID: 1, - Query: influxdb.DashboardQuery{ - Text: script, - BuilderConfig: influxdb.BuilderConfig{ - Buckets: []string{}, - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{ - { - Key: "_field", - Values: []string{"usage_user"}, - AggregateFunctionType: "filter", - }, - }, - Functions: []struct { - Name string `json:"name"` - }{}, - }, - }, - Every: mustDuration("1m"), - StatusMessageTemplate: "msg1", - Tags: []influxdb.Tag{ - {Key: "k1", Value: "v1"}, - {Key: "k2", Value: "v2"}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - TimeSince: mustDuration("21s"), - StaleTime: mustDuration("1h"), - ReportZero: true, - Level: notification.Critical, -} - -var threshold1 = &check.Threshold{ - Base: check.Base{ - Name: "name2", - ID: MustIDBase16(checkTwoID), - OrgID: MustIDBase16(orgTwoID), - OwnerID: MustIDBase16(sixID), - TaskID: 1, - Description: "desc2", - StatusMessageTemplate: "msg2", - Every: mustDuration("1m"), - Query: influxdb.DashboardQuery{ - Text: script, - BuilderConfig: influxdb.BuilderConfig{ - Buckets: []string{}, - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{}, - Functions: []struct { - Name string `json:"name"` - }{}, - }, - }, - Tags: []influxdb.Tag{ - {Key: "k11", Value: "v11"}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - Thresholds: []check.ThresholdConfig{ - &check.Lesser{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Ok, - }, - Value: 1000, - }, - &check.Greater{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Warn, - }, - Value: 2000, - }, - &check.Range{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Info, - }, - Min: 1500, - Max: 1900, - Within: true, - }, - }, -} - -var checkCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmpopts.IgnoreFields(check.Base{}, "TaskID"), - cmp.Transformer("Sort", func(in []influxdb.Check) []influxdb.Check { - out := append([]influxdb.Check(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].GetID() > out[j].GetID() - }) - return out - }), -} - -var taskCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - // skip comparing permissions - cmpopts.IgnoreFields( - taskmodel.Task{}, - "LatestCompleted", - "LatestScheduled", - "CreatedAt", - "UpdatedAt", - ), - cmp.Transformer("Sort", func(in []*taskmodel.Task) []*taskmodel.Task { - out := append([]*taskmodel.Task{}, in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID > out[j].ID - }) - return out - }), - cmp.Transformer("FormatFlux", func(in taskmodel.Task) taskmodel.Task { - newTask := in - newTask.Flux = itesting.FormatFluxString(&testing.T{}, newTask.Flux) - return newTask - }), -} - -// CheckFields will include the IDGenerator, and checks -type CheckFields struct { - IDGenerator platform.IDGenerator - TimeGenerator influxdb.TimeGenerator - TaskService taskmodel.TaskService - Checks []influxdb.Check - Organizations []*influxdb.Organization - Tasks []taskmodel.TaskCreate -} - -type checkServiceFactory func(CheckFields, *testing.T) (influxdb.CheckService, taskmodel.TaskService, string, func()) - -type checkServiceF func( - init checkServiceFactory, - t *testing.T, -) - -// CheckService tests all the service functions. -func CheckService( - init checkServiceFactory, - t *testing.T, -) { - tests := []struct { - name string - fn checkServiceF - }{ - { - name: "CreateCheck", - fn: CreateCheck, - }, - { - name: "FindCheckByID", - fn: FindCheckByID, - }, - { - name: "FindChecks", - fn: FindChecks, - }, - { - name: "FindCheck", - fn: FindCheck, - }, - { - name: "PatchCheck", - fn: PatchCheck, - }, - { - name: "UpdateCheck", - fn: UpdateCheck, - }, - { - name: "DeleteCheck", - fn: DeleteCheck, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.fn(init, t) - }) - } -} - -// CreateCheck testing -func CreateCheck( - init checkServiceFactory, - t *testing.T, -) { - type args struct { - userID platform.ID - check influxdb.Check - } - type wants struct { - err *errors.Error - checks []influxdb.Check - tasks []*taskmodel.Task - } - - tests := []struct { - name string - fields CheckFields - args args - wants wants - }{ - { - name: "create checks with empty set", - fields: CheckFields{ - IDGenerator: mock.NewIDGenerator(checkOneID, t), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Checks: []influxdb.Check{}, - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - }, - }, - args: args{ - userID: MustIDBase16(twoID), - check: &check.Deadman{ - Base: check.Base{ - Name: "name1", - OrgID: MustIDBase16(orgOneID), - Description: "desc1", - Query: influxdb.DashboardQuery{ - Text: script, - BuilderConfig: influxdb.BuilderConfig{ - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{ - { - Key: "_field", - Values: []string{"usage_user"}, - AggregateFunctionType: "filter", - }, - }, - }, - }, - Every: mustDuration("1m"), - StatusMessageTemplate: "msg1", - Tags: []influxdb.Tag{ - {Key: "k1", Value: "v1"}, - {Key: "k2", Value: "v2"}, - }, - }, - TimeSince: mustDuration("21s"), - StaleTime: mustDuration("1h"), - ReportZero: true, - Level: notification.Critical, - }, - }, - wants: wants{ - tasks: []*taskmodel.Task{ - { - ID: MustIDBase16("020f755c3c082000"), - Name: "name1", - Type: "deadman", - OrganizationID: MustIDBase16("020f755c3c083000"), - Organization: "theorg", - OwnerID: MustIDBase16("020f755c3c082001"), - Status: "active", - Flux: "import \"influxdata/influxdb/monitor\"\nimport \"experimental\"\nimport \"influxdata/influxdb/v1\"\n\ndata = from(bucket: \"telegraf\") |> range(start: -1h) |> filter(fn: (r) => r._field == \"usage_user\")\n\noption task = {name: \"name1\", every: 1m}\n\ncheck = {_check_id: \"020f755c3c082000\", _check_name: \"name1\", _type: \"deadman\", tags: {k1: \"v1\", k2: \"v2\"}}\ncrit = (r) => r[\"dead\"]\nmessageFn = (r) => \"msg1\"\n\ndata\n |> v1[\"fieldsAsCols\"]()\n |> monitor[\"deadman\"](t: experimental[\"subDuration\"](from: now(), d: 21s))\n |> monitor[\"check\"](data: check, messageFn: messageFn, crit: crit)\n", - Every: "1m", - }, - }, - checks: []influxdb.Check{ - &check.Deadman{ - Base: check.Base{ - Name: "name1", - ID: MustIDBase16(checkOneID), - OrgID: MustIDBase16(orgOneID), - OwnerID: MustIDBase16(twoID), - Query: influxdb.DashboardQuery{ - Text: script, - BuilderConfig: influxdb.BuilderConfig{ - Buckets: []string{}, - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{ - { - Key: "_field", - Values: []string{"usage_user"}, - AggregateFunctionType: "filter", - }, - }, - Functions: []struct { - Name string `json:"name"` - }{}, - }, - }, - Every: mustDuration("1m"), - Description: "desc1", - StatusMessageTemplate: "msg1", - Tags: []influxdb.Tag{ - {Key: "k1", Value: "v1"}, - {Key: "k2", Value: "v2"}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - TimeSince: mustDuration("21s"), - StaleTime: mustDuration("1h"), - ReportZero: true, - Level: notification.Critical, - }, - }, - }, - }, - { - name: "basic create check", - fields: CheckFields{ - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform.ID { - return MustIDBase16(checkTwoID) - }, - }, - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Checks: []influxdb.Check{ - deadman1, - }, - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - { - Name: "otherorg", - ID: MustIDBase16(orgTwoID), - }, - }, - }, - args: args{ - userID: MustIDBase16(sixID), - check: &check.Threshold{ - Base: check.Base{ - Name: "name2", - OrgID: MustIDBase16(orgTwoID), - OwnerID: MustIDBase16(twoID), - Description: "desc2", - StatusMessageTemplate: "msg2", - Every: mustDuration("1m"), - Query: influxdb.DashboardQuery{ - Text: script, - }, - Tags: []influxdb.Tag{ - {Key: "k11", Value: "v11"}, - }, - }, - Thresholds: []check.ThresholdConfig{ - &check.Lesser{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Ok, - }, - Value: 1000, - }, - &check.Greater{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Warn, - }, - Value: 2000, - }, - &check.Range{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Info, - }, - Min: 1500, - Max: 1900, - Within: true, - }, - }, - }, - }, - wants: wants{ - checks: []influxdb.Check{ - deadman1, - threshold1, - }, - tasks: []*taskmodel.Task{ - { - ID: MustIDBase16("020f755c3c082001"), - Name: "name2", - Type: "threshold", - OrganizationID: MustIDBase16("020f755c3c083001"), - Organization: "otherorg", - OwnerID: MustIDBase16("020f755c3c082005"), - Status: "active", - Every: "1m", - Flux: `import "influxdata/influxdb/monitor" -import "influxdata/influxdb/v1" - -data = from(bucket: "telegraf") |> range(start: -1m) |> filter(fn: (r) => r._field == "usage_user") - -option task = {name: "name2", every: 1m} - -check = {_check_id: "020f755c3c082001", _check_name: "name2", _type: "threshold", tags: {k11: "v11"}} -ok = (r) => r["usage_user"] < 1000.0 -warn = (r) => r["usage_user"] > 2000.0 -info = (r) => r["usage_user"] < 1900.0 and r["usage_user"] > 1500.0 -messageFn = (r) => "msg2" - -data - |> v1["fieldsAsCols"]() - |> monitor["check"]( - data: check, - messageFn: messageFn, - ok: ok, - warn: warn, - info: info, - ) -`, - }, - }, - }, - }, - { - name: "names should be unique within an organization", - fields: CheckFields{ - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform.ID { - return MustIDBase16(checkTwoID) - }, - }, - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Checks: []influxdb.Check{ - deadman1, - }, - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - { - Name: "otherorg", - ID: MustIDBase16(orgTwoID), - }, - }, - }, - args: args{ - userID: MustIDBase16(twoID), - check: &check.Threshold{ - Base: check.Base{ - Name: "name1", - OrgID: MustIDBase16(orgOneID), - Description: "desc1", - Every: mustDuration("1m"), - Query: influxdb.DashboardQuery{ - Text: script, - BuilderConfig: influxdb.BuilderConfig{ - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{ - { - Key: "_field", - Values: []string{"usage_user"}, - AggregateFunctionType: "filter", - }, - }, - }, - }, - StatusMessageTemplate: "msg1", - Tags: []influxdb.Tag{ - {Key: "k1", Value: "v1"}, - {Key: "k2", Value: "v2"}, - }, - }, - }, - }, - wants: wants{ - checks: []influxdb.Check{ - deadman1, - }, - err: &errors.Error{ - Code: errors.EConflict, - Op: influxdb.OpCreateCheck, - Msg: "check is not unique", - }, - }, - }, - { - name: "names should not be unique across organizations", - fields: CheckFields{ - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform.ID { - return MustIDBase16(checkTwoID) - }, - }, - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - { - Name: "otherorg", - ID: MustIDBase16(orgTwoID), - }, - }, - Checks: []influxdb.Check{ - deadman1, - }, - }, - args: args{ - userID: MustIDBase16(twoID), - check: &check.Threshold{ - Base: check.Base{ - Name: "name1", - OrgID: MustIDBase16(orgTwoID), - Description: "desc2", - Every: mustDuration("1m"), - Query: influxdb.DashboardQuery{ - Text: script, - BuilderConfig: influxdb.BuilderConfig{ - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{ - { - Key: "_field", - Values: []string{"usage_user"}, - AggregateFunctionType: "filter", - }, - }, - }, - }, - StatusMessageTemplate: "msg2", - Tags: []influxdb.Tag{ - {Key: "k11", Value: "v11"}, - {Key: "k22", Value: "v22"}, - }, - }, - }, - }, - wants: wants{ - tasks: []*taskmodel.Task{ - { - ID: MustIDBase16("020f755c3c082001"), - Name: "name1", - Type: "threshold", - OrganizationID: MustIDBase16("020f755c3c083001"), - Organization: "otherorg", - OwnerID: MustIDBase16("020f755c3c082001"), - Status: "active", - Every: "1m", - Flux: "import \"influxdata/influxdb/monitor\"\nimport \"influxdata/influxdb/v1\"\n\ndata = from(bucket: \"telegraf\") |> range(start: -1m) |> filter(fn: (r) => r._field == \"usage_user\")\n\noption task = {name: \"name1\", every: 1m}\n\ncheck = {_check_id: \"020f755c3c082001\", _check_name: \"name1\", _type: \"threshold\", tags: {k11: \"v11\", k22: \"v22\"}}\nmessageFn = (r) => \"msg2\"\n\ndata |> v1[\"fieldsAsCols\"]() |> monitor[\"check\"](data: check, messageFn: messageFn)\n", - }, - }, - checks: []influxdb.Check{ - deadman1, - &check.Threshold{ - Base: check.Base{ - ID: MustIDBase16(checkTwoID), - Name: "name1", - OrgID: MustIDBase16(orgTwoID), - Every: mustDuration("1m"), - Query: influxdb.DashboardQuery{ - Text: script, - BuilderConfig: influxdb.BuilderConfig{ - Buckets: []string{}, - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{ - { - Key: "_field", - Values: []string{"usage_user"}, - AggregateFunctionType: "filter", - }, - }, - Functions: []struct { - Name string `json:"name"` - }{}, - }, - }, - OwnerID: MustIDBase16(twoID), - Description: "desc2", - StatusMessageTemplate: "msg2", - Tags: []influxdb.Tag{ - {Key: "k11", Value: "v11"}, - {Key: "k22", Value: "v22"}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - }, - }, - }, - }, - { - name: "create check with orgID not exist", - fields: CheckFields{ - IDGenerator: mock.NewIDGenerator(checkOneID, t), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Checks: []influxdb.Check{}, - Organizations: []*influxdb.Organization{}, - }, - args: args{ - userID: MustIDBase16(twoID), - check: &check.Threshold{ - Base: check.Base{ - Name: "name1", - OrgID: MustIDBase16(orgOneID), - Every: mustDuration("1m"), - Query: influxdb.DashboardQuery{ - Text: script, - BuilderConfig: influxdb.BuilderConfig{ - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{ - { - Key: "_field", - Values: []string{"usage_user"}, - AggregateFunctionType: "filter", - }, - }, - }, - }, - Description: "desc2", - StatusMessageTemplate: "msg2", - Tags: []influxdb.Tag{ - {Key: "k11", Value: "v11"}, - {Key: "k22", Value: "v22"}, - }, - }, - }, - }, - wants: wants{ - checks: []influxdb.Check{}, - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "organization not found", - Op: influxdb.OpCreateCheck, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, tasks, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - createCheck := influxdb.CheckCreate{Check: tt.args.check, Status: influxdb.Active} - err := s.CreateCheck(ctx, createCheck, tt.args.userID) - influxErrsEqual(t, tt.wants.err, err) - - defer s.DeleteCheck(ctx, tt.args.check.GetID()) - - checks, _, err := s.FindChecks(ctx, influxdb.CheckFilter{}) - if err != nil { - t.Fatalf("failed to retrieve checks: %v", err) - } - if diff := cmp.Diff(checks, tt.wants.checks, checkCmpOptions...); diff != "" { - t.Errorf("checks are different -got/+want\ndiff %s", diff) - } - - foundTasks, _, err := tasks.FindTasks(ctx, taskmodel.TaskFilter{}) - if err != nil { - t.Fatal(err) - } - - if diff := cmp.Diff(foundTasks, tt.wants.tasks, taskCmpOptions); diff != "" { - t.Errorf("tasks are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindCheckByID testing -func FindCheckByID( - init checkServiceFactory, - t *testing.T, -) { - type args struct { - id platform.ID - } - type wants struct { - err *errors.Error - check influxdb.Check - } - - tests := []struct { - name string - fields CheckFields - args args - wants wants - }{ - { - name: "basic find check by id", - fields: CheckFields{ - Checks: []influxdb.Check{ - deadman1, - threshold1, - }, - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - }, - }, - args: args{ - id: MustIDBase16(checkTwoID), - }, - wants: wants{ - check: threshold1, - }, - }, - { - name: "find check by id not exist", - fields: CheckFields{ - Checks: []influxdb.Check{ - deadman1, - threshold1, - }, - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - }, - }, - args: args{ - id: MustIDBase16(threeID), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindCheckByID, - Msg: "check not found", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - check, err := s.FindCheckByID(ctx, tt.args.id) - influxErrsEqual(t, tt.wants.err, err) - - if diff := cmp.Diff(check, tt.wants.check, checkCmpOptions...); diff != "" { - t.Errorf("check is different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindChecks testing -func FindChecks( - init checkServiceFactory, - t *testing.T, -) { - type args struct { - ID platform.ID - name string - organization string - OrgID platform.ID - userID platform.ID - findOptions influxdb.FindOptions - } - - type wants struct { - checks []influxdb.Check - err error - } - tests := []struct { - name string - fields CheckFields - args args - wants wants - }{ - { - name: "find all checks", - fields: CheckFields{ - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - { - Name: "otherorg", - ID: MustIDBase16(orgTwoID), - }, - }, - Checks: []influxdb.Check{ - deadman1, - threshold1, - }, - }, - args: args{ - userID: MustIDBase16(sixID), - }, - wants: wants{ - checks: []influxdb.Check{ - deadman1, - threshold1, - }, - }, - }, - { - name: "find all checks by offset and limit", - fields: CheckFields{ - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - }, - Checks: []influxdb.Check{ - deadman1, - threshold1, - }, - }, - args: args{ - findOptions: influxdb.FindOptions{ - Offset: 1, - Limit: 1, - }, - }, - wants: wants{ - checks: []influxdb.Check{ - threshold1, - }, - }, - }, - { - name: "find all checks by descending", - fields: CheckFields{ - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - }, - Checks: []influxdb.Check{ - deadman1, - threshold1, - }, - }, - args: args{ - userID: MustIDBase16(sixID), - findOptions: influxdb.FindOptions{ - Limit: 1, - Descending: true, - }, - }, - wants: wants{ - checks: []influxdb.Check{ - threshold1, - }, - }, - }, - { - name: "find checks by organization name", - fields: CheckFields{ - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - { - Name: "otherorg", - ID: MustIDBase16(orgTwoID), - }, - }, - Checks: []influxdb.Check{ - deadman1, - threshold1, - }, - }, - args: args{ - userID: MustIDBase16(sixID), - organization: "theorg", - }, - wants: wants{ - checks: []influxdb.Check{ - deadman1, - }, - }, - }, - { - name: "find checks by organization id", - fields: CheckFields{ - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - { - Name: "otherorg", - ID: MustIDBase16(orgTwoID), - }, - }, - Checks: []influxdb.Check{ - deadman1, - threshold1, - }, - }, - args: args{ - userID: MustIDBase16(sixID), - OrgID: MustIDBase16(orgTwoID), - }, - wants: wants{ - checks: []influxdb.Check{ - threshold1, - }, - }, - }, - { - name: "find check by name", - fields: CheckFields{ - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - }, - Checks: []influxdb.Check{ - deadman1, - threshold1, - }, - }, - args: args{ - userID: MustIDBase16(sixID), - name: "name2", - }, - wants: wants{ - checks: []influxdb.Check{ - threshold1, - }, - }, - }, - { - name: "missing check returns no checks", - fields: CheckFields{ - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - }, - Checks: []influxdb.Check{}, - }, - args: args{ - userID: MustIDBase16(sixID), - name: "xyz", - }, - wants: wants{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - filter := influxdb.CheckFilter{} - if tt.args.ID.Valid() { - filter.ID = &tt.args.ID - } - if tt.args.OrgID.Valid() { - filter.OrgID = &tt.args.OrgID - } - if tt.args.organization != "" { - filter.Org = &tt.args.organization - } - if tt.args.name != "" { - filter.Name = &tt.args.name - } - - checks, _, err := s.FindChecks(ctx, filter, tt.args.findOptions) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(checks, tt.wants.checks, checkCmpOptions...); diff != "" { - t.Errorf("checks are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// DeleteCheck testing -func DeleteCheck( - init checkServiceFactory, - t *testing.T, -) { - type args struct { - ID string - userID platform.ID - } - type wants struct { - err *errors.Error - checks []influxdb.Check - } - - tests := []struct { - name string - fields CheckFields - args args - wants wants - }{ - { - name: "delete checks using exist id", - fields: CheckFields{ - IDGenerator: mock.NewIDGenerator("0000000000000001", t), - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - }, - Tasks: []taskmodel.TaskCreate{ - { - Flux: `option task = { every: 10s, name: "foo" } -data = from(bucket: "telegraf") |> range(start: -1m)`, - OrganizationID: MustIDBase16(orgOneID), - OwnerID: MustIDBase16(sixID), - }, - }, - Checks: []influxdb.Check{ - deadman1, - threshold1, - }, - }, - args: args{ - ID: checkOneID, - userID: MustIDBase16(sixID), - }, - wants: wants{ - checks: []influxdb.Check{ - threshold1, - }, - }, - }, - { - name: "delete checks using id that does not exist", - fields: CheckFields{ - IDGenerator: mock.NewIDGenerator("0000000000000001", t), - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - }, - Tasks: []taskmodel.TaskCreate{ - { - Flux: `option task = { every: 10s, name: "foo" } - data = from(bucket: "telegraf") |> range(start: -1m)`, - OrganizationID: MustIDBase16(orgOneID), - OwnerID: MustIDBase16(sixID), - }, - }, - Checks: []influxdb.Check{ - deadman1, - threshold1, - }, - }, - args: args{ - ID: "1234567890654321", - userID: MustIDBase16(sixID), - }, - wants: wants{ - err: &errors.Error{ - Op: influxdb.OpDeleteCheck, - Msg: "check not found", - Code: errors.ENotFound, - }, - checks: []influxdb.Check{ - deadman1, - threshold1, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.DeleteCheck(ctx, MustIDBase16(tt.args.ID)) - influxErrsEqual(t, tt.wants.err, err) - - filter := influxdb.CheckFilter{} - checks, _, err := s.FindChecks(ctx, filter) - if err != nil { - t.Fatalf("failed to retrieve checks: %v", err) - } - if diff := cmp.Diff(checks, tt.wants.checks, checkCmpOptions...); diff != "" { - t.Errorf("checks are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindCheck testing -func FindCheck( - init checkServiceFactory, - t *testing.T, -) { - type args struct { - name string - OrgID platform.ID - } - - type wants struct { - check influxdb.Check - err *errors.Error - } - - tests := []struct { - name string - fields CheckFields - args args - wants wants - }{ - { - name: "find check by name", - fields: CheckFields{ - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - { - Name: "theorg2", - ID: MustIDBase16(orgTwoID), - }, - }, - Checks: []influxdb.Check{ - deadman1, - threshold1, - }, - }, - args: args{ - name: "name1", - OrgID: MustIDBase16(orgOneID), - }, - wants: wants{ - check: deadman1, - }, - }, - { - name: "mixed filter", - fields: CheckFields{ - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - }, - Checks: []influxdb.Check{}, - }, - args: args{ - name: "name2", - OrgID: MustIDBase16(orgOneID), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindCheck, - Msg: "check not found", - }, - }, - }, - { - name: "missing check returns error", - fields: CheckFields{ - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - }, - Checks: []influxdb.Check{}, - }, - args: args{ - name: "xyz", - OrgID: MustIDBase16(orgOneID), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindCheck, - Msg: "check not found", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, _, done := init(tt.fields, t) - defer done() - - var filter influxdb.CheckFilter - if tt.args.name != "" { - filter.Name = &tt.args.name - } - if tt.args.OrgID.Valid() { - filter.OrgID = &tt.args.OrgID - } - - check, err := s.FindCheck(context.Background(), filter) - influxErrsEqual(t, tt.wants.err, err) - - if diff := cmp.Diff(check, tt.wants.check, checkCmpOptions...); diff != "" { - t.Errorf("checks are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// UpdateCheck testing -func UpdateCheck( - init checkServiceFactory, - t *testing.T, -) { - type args struct { - id platform.ID - check influxdb.Check - } - type wants struct { - err error - check influxdb.Check - } - - tests := []struct { - name string - fields CheckFields - args args - wants wants - }{ - { - name: "mixed update", - fields: CheckFields{ - IDGenerator: mock.NewIDGenerator("0000000000000001", t), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2007, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - }, - Tasks: []taskmodel.TaskCreate{ - { - Flux: `option task = { every: 10s, name: "foo" } -data = from(bucket: "telegraf") |> range(start: -1m)`, - OrganizationID: MustIDBase16(orgOneID), - OwnerID: MustIDBase16(sixID), - }, - }, - Checks: []influxdb.Check{ - deadman1, - }, - }, - args: args{ - id: MustIDBase16(checkOneID), - check: &check.Threshold{ - Base: check.Base{ - ID: MustIDBase16(checkTwoID), - OrgID: MustIDBase16(orgOneID), - OwnerID: MustIDBase16(twoID), - Every: mustDuration("1m"), - Query: influxdb.DashboardQuery{ - Text: script, - BuilderConfig: influxdb.BuilderConfig{ - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{ - { - Key: "_field", - Values: []string{"usage_user"}, - AggregateFunctionType: "filter", - }, - }, - }, - }, - Name: "changed", - Description: "desc changed", - StatusMessageTemplate: "msg2", - TaskID: 1, - Tags: []influxdb.Tag{ - {Key: "k11", Value: "v11"}, - {Key: "k22", Value: "v22"}, - {Key: "k33", Value: "v33"}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: time.Date(2001, 5, 4, 1, 2, 3, 0, time.UTC), - UpdatedAt: time.Date(2002, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - Thresholds: []check.ThresholdConfig{ - &check.Lesser{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Ok, - }, - Value: 1000, - }, - &check.Greater{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Warn, - }, - Value: 2000, - }, - &check.Range{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Info, - }, - Min: 1500, - Max: 1900, - Within: true, - }, - }, - }, - }, - wants: wants{ - check: &check.Threshold{ - Base: check.Base{ - ID: MustIDBase16(checkOneID), - OrgID: MustIDBase16(orgOneID), - Name: "changed", - Every: mustDuration("1m"), - OwnerID: MustIDBase16(sixID), - Query: influxdb.DashboardQuery{ - Text: script, - BuilderConfig: influxdb.BuilderConfig{ - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{ - { - Key: "_field", - Values: []string{"usage_user"}, - AggregateFunctionType: "filter", - }, - }, - }, - }, - Description: "desc changed", - StatusMessageTemplate: "msg2", - Tags: []influxdb.Tag{ - {Key: "k11", Value: "v11"}, - {Key: "k22", Value: "v22"}, - {Key: "k33", Value: "v33"}, - }, - TaskID: 1, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - UpdatedAt: time.Date(2007, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - Thresholds: []check.ThresholdConfig{ - &check.Lesser{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Ok, - }, - Value: 1000, - }, - &check.Greater{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Warn, - }, - Value: 2000, - }, - &check.Range{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Info, - }, - Min: 1500, - Max: 1900, - Within: true, - }, - }, - }, - }, - }, - { - name: "update name unique", - fields: CheckFields{ - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - }, - Checks: []influxdb.Check{ - deadman1, - &check.Deadman{ - Base: check.Base{ - ID: MustIDBase16(checkTwoID), - OrgID: MustIDBase16(orgOneID), - Every: mustDuration("1m"), - Query: influxdb.DashboardQuery{ - Text: script, - BuilderConfig: influxdb.BuilderConfig{ - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{ - { - Key: "_field", - Values: []string{"usage_user"}, - AggregateFunctionType: "filter", - }, - }, - }, - }, - TaskID: 1, - Name: "check2", - OwnerID: MustIDBase16(twoID), - StatusMessageTemplate: "msg1", - }, - }, - }, - }, - args: args{ - id: MustIDBase16(checkOneID), - check: &check.Deadman{ - Base: check.Base{ - OrgID: MustIDBase16(orgOneID), - OwnerID: MustIDBase16(twoID), - Name: "check2", - Description: "desc changed", - TaskID: 1, - Every: mustDuration("1m"), - StatusMessageTemplate: "msg2", - Tags: []influxdb.Tag{ - {Key: "k11", Value: "v11"}, - {Key: "k22", Value: "v22"}, - {Key: "k33", Value: "v33"}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: time.Date(2001, 5, 4, 1, 2, 3, 0, time.UTC), - UpdatedAt: time.Date(2002, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - TimeSince: mustDuration("12s"), - StaleTime: mustDuration("1h"), - ReportZero: false, - Level: notification.Warn, - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EConflict, - Msg: "check name is not unique", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - checkCreate := influxdb.CheckCreate{Check: tt.args.check, Status: influxdb.Active} - - check, err := s.UpdateCheck(ctx, tt.args.id, checkCreate) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(check, tt.wants.check, checkCmpOptions...); diff != "" { - t.Errorf("check is different -got/+want\ndiff %s", diff) - } - }) - } -} - -// PatchCheck testing -func PatchCheck( - init checkServiceFactory, - t *testing.T, -) { - type args struct { - id platform.ID - upd influxdb.CheckUpdate - } - type wants struct { - err *errors.Error - check influxdb.Check - } - - inactive := influxdb.Inactive - - tests := []struct { - name string - fields CheckFields - args args - wants wants - }{ - { - name: "mixed patch", - fields: CheckFields{ - IDGenerator: mock.NewIDGenerator("0000000000000001", t), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2007, 5, 4, 1, 2, 3, 0, time.UTC)}, - Tasks: []taskmodel.TaskCreate{ - { - Flux: `option task = { every: 10s, name: "foo" } -data = from(bucket: "telegraf") |> range(start: -1m)`, - OrganizationID: MustIDBase16(orgOneID), - OwnerID: MustIDBase16(sixID), - }, - }, - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - }, - Checks: []influxdb.Check{ - deadman1, - }, - }, - args: args{ - id: MustIDBase16(checkOneID), - upd: influxdb.CheckUpdate{ - Name: strPtr("changed"), - Description: strPtr("desc changed"), - Status: &inactive, - }, - }, - wants: wants{ - check: &check.Deadman{ - Base: check.Base{ - ID: MustIDBase16(checkOneID), - OrgID: MustIDBase16(orgOneID), - Name: "changed", - OwnerID: MustIDBase16(sixID), - Every: mustDuration("1m"), - Description: "desc changed", - Query: influxdb.DashboardQuery{ - Text: script, - BuilderConfig: influxdb.BuilderConfig{ - Buckets: []string{}, - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{ - { - Key: "_field", - Values: []string{"usage_user"}, - AggregateFunctionType: "filter", - }, - }, - Functions: []struct { - Name string `json:"name"` - }{}, - }, - }, - StatusMessageTemplate: "msg1", - Tags: []influxdb.Tag{ - {Key: "k1", Value: "v1"}, - {Key: "k2", Value: "v2"}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - UpdatedAt: time.Date(2007, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - TimeSince: mustDuration("21s"), - StaleTime: mustDuration("1h"), - ReportZero: true, - Level: notification.Critical, - }, - }, - }, - { - name: "update name unique", - fields: CheckFields{ - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - ID: MustIDBase16(orgOneID), - }, - }, - Checks: []influxdb.Check{ - deadman1, - &check.Deadman{ - Base: check.Base{ - ID: MustIDBase16(checkTwoID), - OrgID: MustIDBase16(orgOneID), - Every: mustDuration("1m"), - Name: "check2", - OwnerID: MustIDBase16(sixID), - Query: influxdb.DashboardQuery{ - Text: script, - BuilderConfig: influxdb.BuilderConfig{ - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{ - { - Key: "_field", - Values: []string{"usage_user"}, - AggregateFunctionType: "filter", - }, - }, - }, - }, - StatusMessageTemplate: "msg1", - }, - }, - }, - }, - args: args{ - id: MustIDBase16(checkOneID), - upd: influxdb.CheckUpdate{ - Name: strPtr("check2"), - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EConflict, - Msg: "check entity update conflicts with an existing entity", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - check, err := s.PatchCheck(ctx, tt.args.id, tt.args.upd) - influxErrsEqual(t, tt.wants.err, err) - - if diff := cmp.Diff(check, tt.wants.check, checkCmpOptions...); diff != "" { - t.Errorf("check is different -got/+want\ndiff %s", diff) - } - }) - } -} - -// MustIDBase16 is an helper to ensure a correct ID is built during testing. -func MustIDBase16(s string) platform.ID { - id, err := platform.IDFromString(s) - if err != nil { - panic(err) - } - return *id -} - -func diffPlatformErrors(name string, actual, expected error, opPrefix string, t *testing.T) { - t.Helper() - ErrorsEqual(t, actual, expected) -} - -// ErrorsEqual checks to see if the provided errors are equivalent. -func ErrorsEqual(t *testing.T, actual, expected error) { - t.Helper() - if expected == nil && actual == nil { - return - } - - if expected == nil && actual != nil { - t.Errorf("unexpected error %s", actual.Error()) - } - - if expected != nil && actual == nil { - t.Errorf("expected error %s but received nil", expected.Error()) - } - - if errors.ErrorCode(expected) != errors.ErrorCode(actual) { - t.Logf("\nexpected: %v\nactual: %v\n\n", expected, actual) - t.Errorf("expected error code %q but received %q", errors.ErrorCode(expected), errors.ErrorCode(actual)) - } - - if errors.ErrorMessage(expected) != errors.ErrorMessage(actual) { - t.Logf("\nexpected: %v\nactual: %v\n\n", expected, actual) - t.Errorf("expected error message %q but received %q", errors.ErrorMessage(expected), errors.ErrorMessage(actual)) - } -} - -func influxErrsEqual(t *testing.T, expected *errors.Error, actual error) { - t.Helper() - - if expected != nil { - require.Error(t, actual) - } - - if actual == nil { - return - } - - if expected == nil { - require.NoError(t, actual) - return - } - iErr, ok := actual.(*errors.Error) - require.True(t, ok) - assert.Equal(t, expected.Code, iErr.Code) - assert.Truef(t, strings.HasPrefix(iErr.Error(), expected.Error()), "expected: %s got err: %s", expected.Error(), actual.Error()) -} - -func mustDuration(d string) *notification.Duration { - dur, err := time.ParseDuration(d) - if err != nil { - panic(err) - } - - ndur, err := notification.FromTimeDuration(dur) - if err != nil { - panic(err) - } - - // Filter out the zero values from the duration. - durs := make([]ast.Duration, 0, len(ndur.Values)) - for _, d := range ndur.Values { - if d.Magnitude != 0 { - durs = append(durs, d) - } - } - ndur.Values = durs - return &ndur -} diff --git a/checks/service_test.go b/checks/service_test.go deleted file mode 100644 index ffff6327e76..00000000000 --- a/checks/service_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package checks - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - _ "github.com/influxdata/influxdb/v2/fluxinit/static" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/query/fluxlang" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "github.com/influxdata/influxdb/v2/tenant" - "go.uber.org/zap/zaptest" -) - -func NewKVTestStore(t *testing.T) (kv.Store, func()) { - t.Helper() - - store := inmem.NewKVStore() - - if err := all.Up(context.Background(), zaptest.NewLogger(t), store); err != nil { - t.Fatal(err) - } - - return store, func() {} -} - -func TestCheckService(t *testing.T) { - CheckService(initCheckService, t) -} - -func initCheckService(f CheckFields, t *testing.T) (influxdb.CheckService, taskmodel.TaskService, string, func()) { - store, closeKVStore := NewKVTestStore(t) - logger := zaptest.NewLogger(t) - - tenantStore := tenant.NewStore(store) - tenantSvc := tenant.NewService(tenantStore) - - svc := kv.NewService(logger, store, tenantSvc, kv.ServiceConfig{ - FluxLanguageService: fluxlang.DefaultService, - }) - svc.IDGenerator = f.IDGenerator - svc.TimeGenerator = f.TimeGenerator - if f.TimeGenerator == nil { - svc.TimeGenerator = influxdb.RealTimeGenerator{} - } - - checkService := NewService(logger, store, tenantSvc, svc) - checkService.idGenerator = f.IDGenerator - if f.TimeGenerator != nil { - checkService.timeGenerator = f.TimeGenerator - } - - ctx := context.Background() - for _, o := range f.Organizations { - mock.SetIDForFunc(&tenantStore.OrgIDGen, o.ID, func() { - if err := tenantSvc.CreateOrganization(ctx, o); err != nil { - t.Fatalf("failed to populate organizations") - } - }) - } - for _, c := range f.Checks { - if err := checkService.PutCheck(ctx, c); err != nil { - t.Fatalf("failed to populate checks") - } - } - for _, tc := range f.Tasks { - if _, err := svc.CreateTask(ctx, tc); err != nil { - t.Fatalf("failed to populate tasks: %v", err) - } - } - return checkService, svc, kv.OpPrefix, func() { - for _, o := range f.Organizations { - if err := tenantSvc.DeleteOrganization(ctx, o.ID); err != nil { - t.Logf("failed to remove organization: %v", err) - } - } - for _, c := range f.Checks { - if err := checkService.DeleteCheck(ctx, c.GetID()); err != nil { - t.Logf("failed to remove check: %v", err) - } - } - - closeKVStore() - } -} diff --git a/cliff.toml b/cliff.toml deleted file mode 100644 index 8cc22a9eb87..00000000000 --- a/cliff.toml +++ /dev/null @@ -1,35 +0,0 @@ -[changelog] -body = """ -{%- if version %} -## {{ version }} [{{ timestamp | date(format="%Y-%m-%d") }}] -{%- else %} -## [unreleased] -{%- endif %} ----------------------- -{% set grouped_commits = commits | group_by(attribute="group") -%} -{%- set_global groups_arr = [] -%} -{%- for group, _commits in grouped_commits -%} - {%- set_global groups_arr = groups_arr | concat(with=group) -%} -{%- endfor -%} -{% for group in groups_arr | sort | reverse %} - {% set g_commits = grouped_commits[group] -%} - ### {{ group | upper_first }} - {% for commit in g_commits -%} - {%- set message = commit.message | split(pat="\n") | first | split(pat=": ") | slice(start=1) | join(sep=" ") | trim | capitalize -%} - {% set pr_num = message | split(pat=" ") | last | trim_start_matches(pat="(") | trim_end_matches(pat=")") | trim_start_matches(pat="#") %} - {%- set message = message | split(pat=" ") | slice(end=-1) | join(sep=" ") | trim %} - 1. [{{ pr_num }}](https://github.com/influxdata/influxdb/pull/{{ pr_num }}): {{ message }} - {%- endfor %} -{% endfor %} - -""" -trim = true - -[git] -conventional_commits = false -commit_parsers = [ - { message = "^feat*", group = "Features"}, - { message = "^fix*", group = "Bug Fixes"}, -] -filter_commits = true -tag_pattern = "v[12].[0-9].[0-9]*" diff --git a/cmd/influxd/downgrade/downgrade.go b/cmd/influxd/downgrade/downgrade.go deleted file mode 100644 index 4e343f7c85b..00000000000 --- a/cmd/influxd/downgrade/downgrade.go +++ /dev/null @@ -1,218 +0,0 @@ -package downgrade - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/bolt" - "github.com/influxdata/influxdb/v2/internal/fs" - "github.com/influxdata/influxdb/v2/kit/cli" - "github.com/influxdata/influxdb/v2/kv/migration" - "github.com/influxdata/influxdb/v2/kv/migration/all" - influxlogger "github.com/influxdata/influxdb/v2/logger" - "github.com/influxdata/influxdb/v2/sqlite" - sqliteMigrations "github.com/influxdata/influxdb/v2/sqlite/migrations" - "github.com/spf13/cobra" - "github.com/spf13/viper" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -const ( - backupPathFormat = "%s.%s-pre-%s-downgrade.backup" -) - -type migrationTarget struct { - kvMigration, sqlMigration int -} - -// migrationTarget int values mean "migrate to this migration number". For example, kvMigration of 15 will result in the -// downgraded database including migration 15, but migration 15 will not be undone. -var downgradeMigrationTargets = map[string]migrationTarget{ - "2.0": {kvMigration: 15, sqlMigration: 0}, - "2.1": {kvMigration: 18, sqlMigration: 3}, - "2.3": {kvMigration: 20, sqlMigration: 5}, - "2.4": {kvMigration: 20, sqlMigration: 7}, -} - -func NewCommand(ctx context.Context, v *viper.Viper) (*cobra.Command, error) { - v2dir, err := fs.InfluxDir() - if err != nil { - return nil, fmt.Errorf("error fetching default InfluxDB 2.0 dir: %w", err) - } - - var validDowngradeTargets []string - for k := range downgradeMigrationTargets { - validDowngradeTargets = append(validDowngradeTargets, k) - } - var validTargetsHelp string - if len(validDowngradeTargets) == 1 { - validTargetsHelp = validDowngradeTargets[0] - } else { - validTargetsHelp = fmt.Sprintf("<%s>", strings.Join(validDowngradeTargets, "|")) - } - - var sqlitePath string - var boltPath string - var logLevel zapcore.Level - - cmd := &cobra.Command{ - Use: fmt.Sprintf("downgrade [flags] %s", validTargetsHelp), - Short: "Downgrade metadata schema used by influxd to match the expectations of an older release", - Long: `Run this command prior to downgrading the influxd binary. - -influxd does not guarantee backwards-compatibility with older releases in its embedded -metadata stores. Attempting to boot up an older influxd on a BoltDB/SQLite file that has -been migrated to a newer schema will result in a startup error. This command downgrades -those metadata schemas to match the expectations of an older release, allowing the older -influxd binary to boot successfully. - -The target version of the downgrade must be specified, i.e. "influxd downgrade 2.0". -`, - ValidArgs: validDowngradeTargets, - Args: cobra.ExactValidArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - logconf := &influxlogger.Config{ - Format: "auto", - Level: logLevel, - } - logger, err := logconf.New(os.Stdout) - if err != nil { - return err - } - - return downgrade(ctx, boltPath, sqlitePath, args[0], logger) - }, - } - - opts := []cli.Opt{ - { - DestP: &boltPath, - Flag: "bolt-path", - Default: filepath.Join(v2dir, bolt.DefaultFilename), - Desc: "path for boltdb database", - }, - { - DestP: &sqlitePath, - Flag: "sqlite-path", - Desc: fmt.Sprintf("path to sqlite database. if not set, the database is assumed to be in the bolt-path directory as %q", sqlite.DefaultFilename), - }, - { - DestP: &logLevel, - Flag: "log-level", - Default: zapcore.InfoLevel, - Desc: "supported log levels are debug, info, warn and error", - }, - } - if err := cli.BindOptions(v, cmd, opts); err != nil { - return nil, err - } - - if sqlitePath == "" { - sqlitePath = filepath.Join(filepath.Dir(boltPath), sqlite.DefaultFilename) - } - - return cmd, nil -} - -func downgrade(ctx context.Context, boltPath, sqlitePath, targetVersion string, log *zap.Logger) error { - info := influxdb.GetBuildInfo() - - n, err := compareVersionStrings(targetVersion, "2.4.0") - if n < 0 || err != nil { - errStr := "if the target version is less than 2.4.0, any replications using bucket names rather than ids will be deleted" - log.Warn("downgrade warning", zap.String("targetVersion", errStr)) - } - - // Files must exist at the specified paths for the downgrade to work properly. The bolt and sqlite "open" methods will - // create files if they do not exist, so their existence must be verified here. - if _, err := os.Stat(boltPath); err != nil { - return fmt.Errorf("invalid bolt path %q: %w", boltPath, err) - } - - if _, err := os.Stat(sqlitePath); err != nil { - return fmt.Errorf("invalid sqlite path %q: %w", sqlitePath, err) - } - - // Initialize both migrators prior to attempting any migrations so that we can error out prior to mutating either DB - // if there are errors initializing either migrator. - boltClient := bolt.NewClient(log.With(zap.String("service", "bolt"))) - boltClient.Path = boltPath - - if err := boltClient.Open(ctx); err != nil { - return fmt.Errorf("failed to open bolt DB: %w", err) - } - defer boltClient.Close() - - kvStore := bolt.NewKVStore(log.With(zap.String("service", "kvstore-bolt")), boltPath) - kvStore.WithDB(boltClient.DB()) - - kvMigrator, err := migration.NewMigrator(log.With(zap.String("service", "kv-migrator")), kvStore) - if err != nil { - return fmt.Errorf("failed to initialize KV migrator: %w", err) - } - - kvMigrator.SetBackupPath(fmt.Sprintf(backupPathFormat, boltPath, info.Version, targetVersion)) - kvMigrator.AddMigrations(all.Migrations[:]...) - - sqlStore, err := sqlite.NewSqlStore(sqlitePath, log.With(zap.String("service", "sqlstore-sqlite"))) - if err != nil { - return fmt.Errorf("failed to initialize SQL migrator: %w", err) - } - defer sqlStore.Close() - - sqlMigrator := sqlite.NewMigrator(sqlStore, log.With(zap.String("service", "sql-migrator"))) - sqlMigrator.SetBackupPath(fmt.Sprintf(backupPathFormat, sqlitePath, info.Version, targetVersion)) - - log.Info("Downgrading KV metadata to target version", zap.String("version", targetVersion)) - if err := kvMigrator.Down(ctx, downgradeMigrationTargets[targetVersion].kvMigration); err != nil { - return fmt.Errorf("failed to tear down KV migrations: %w", err) - } - - log.Info("Downgrading SQL metadata to target version", zap.String("version", targetVersion)) - if err := sqlMigrator.Down(ctx, downgradeMigrationTargets[targetVersion].sqlMigration, sqliteMigrations.AllDown); err != nil { - return fmt.Errorf("failed to tear down SQL migrations: %w", err) - } - - log.Info("Metadata successfully downgraded, you can now safely replace this `influxd` with the target older version", - zap.String("version", targetVersion)) - return nil -} - -func compareVersionStrings(left string, right string) (int, error) { - l := strings.Split(left, ".") - r := strings.Split(right, ".") - loop := len(r) - if len(l) > len(r) { - loop = len(l) - } - for i := 0; i < loop; i++ { - var x, y string - if len(l) > i { - x = l[i] - } - if len(r) > i { - y = r[i] - } - lefti, err := strconv.Atoi(x) - if err != nil { - return 0, err - } - righti, err := strconv.Atoi(y) - if err != nil { - return 0, err - } - - if lefti > righti { - return 1, nil - } else if lefti < righti { - return -1, nil - } - } - return 0, nil -} diff --git a/cmd/influxd/inspect/build_tsi/build_tsi.go b/cmd/influxd/inspect/build_tsi/build_tsi.go deleted file mode 100644 index e835b6770b1..00000000000 --- a/cmd/influxd/inspect/build_tsi/build_tsi.go +++ /dev/null @@ -1,610 +0,0 @@ -package build_tsi - -import ( - "errors" - "fmt" - "os" - "os/user" - "path/filepath" - "runtime" - "strconv" - "sync/atomic" - - "github.com/influxdata/influx-cli/v2/clients" - "github.com/influxdata/influx-cli/v2/pkg/stdio" - "github.com/influxdata/influxdb/v2/logger" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/file" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" - "github.com/spf13/cobra" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "golang.org/x/sync/errgroup" -) - -const defaultBatchSize = 10000 - -type buildTSI struct { - // Data path options - dataPath string // optional. Defaults to /engine/data - walPath string // optional. Defaults to /engine/wal - bucketID string // optional. Defaults to all buckets - shardID string // optional. Defaults to all shards - - batchSize int // optional. Defaults to 10000 - maxLogFileSize int64 // optional. Defaults to tsdb.DefaultMaxIndexLogFileSize - maxCacheSize uint64 // optional. Defaults to tsdb.DefaultCacheMaxMemorySize - compactSeriesFile bool // optional. Defaults to false - concurrency int // optional. Defaults to GOMAXPROCS(0) - - verbose bool // optional. Defaults to false - Logger *zap.Logger -} - -// NewBuildTSICommand returns a new instance of Command with default settings applied. -func NewBuildTSICommand() *cobra.Command { - var buildTSICmd buildTSI - - cmd := &cobra.Command{ - Use: "build-tsi", - Short: "Rebuilds the TSI index and (where necessary) the Series File.", - Long: `This command will rebuild the TSI index and if needed the Series File. - -The index is built by reading all of the TSM indexes in the TSM data -directory, and all of the WAL entries in the WAL data directory. If the -Series File directory is missing, then the series file will be rebuilt. -If the TSI index directory already exists, then this tool will fail. -Performance of the tool can be tweaked by adjusting the max log file size, -max cache file size and the batch size. - -max-log-file-size determines how big in-memory parts of the index have to -get before they're compacted into memory-mappable index files. -Consider decreasing this from the default if you find the heap -requirements of your TSI index are too much. - -max-cache-size refers to the maximum cache size allowed. If there are WAL -files to index, then they need to be replayed into a tsm1.Cache first -by this tool. If the maximum cache size isn't large enough then there -will be an error and this tool will fail. Increase max-cache-size to -address this. - -batch-size refers to the size of the batches written into the index. -Increasing this can improve performance but can result in much more -memory usage. - `, - Args: cobra.MaximumNArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { - if buildTSICmd.shardID != "" && buildTSICmd.bucketID == "" { - return errors.New("if shard-id is specified, bucket-id must also be specified") - } - - config := logger.NewConfig() - - // Set logger level based on verbose flag - if buildTSICmd.verbose { - config.Level = zapcore.DebugLevel - } else { - config.Level = zapcore.InfoLevel - } - - newLogger, err := config.New(cmd.OutOrStdout()) - if err != nil { - return err - } - buildTSICmd.Logger = newLogger - - return buildTSICmd.run() - }, - } - - defaultPath := filepath.Join(os.Getenv("HOME"), "/.influxdbv2/engine/") - defaultDataPath := filepath.Join(defaultPath, "data") - defaultWALPath := filepath.Join(defaultPath, "wal") - - cmd.Flags().StringVar(&buildTSICmd.dataPath, "data-path", defaultDataPath, "Path to the TSM data directory.") - cmd.Flags().StringVar(&buildTSICmd.walPath, "wal-path", defaultWALPath, "Path to the WAL data directory.") - cmd.Flags().StringVar(&buildTSICmd.bucketID, "bucket-id", "", "Bucket ID") - cmd.Flags().StringVar(&buildTSICmd.shardID, "shard-id", "", "Shard ID, if this is specified a bucket-id must also be specified") - cmd.Flags().BoolVar(&buildTSICmd.compactSeriesFile, "compact-series-file", false, "Compact existing series file. Does not rebuilt index.") - cmd.Flags().IntVarP(&buildTSICmd.concurrency, "concurrency", "c", runtime.GOMAXPROCS(0), "Number of workers to dedicate to shard index building.") - cmd.Flags().Int64Var(&buildTSICmd.maxLogFileSize, "max-log-file-size", tsdb.DefaultMaxIndexLogFileSize, "Maximum log file size") - cmd.Flags().Uint64Var(&buildTSICmd.maxCacheSize, "max-cache-size", tsdb.DefaultCacheMaxMemorySize, "Maximum cache size") - cmd.Flags().BoolVarP(&buildTSICmd.verbose, "verbose", "v", false, "Verbose output, includes debug-level logs") - cmd.Flags().IntVar(&buildTSICmd.batchSize, "batch-size", defaultBatchSize, "Set the size of the batches we write to the index. Setting this can have adverse affects on performance and heap requirements") - - return cmd -} - -// Run executes the run command for BuildTSI. -func (buildTSICmd *buildTSI) run() error { - // Verify the user actually wants to run as root. - if isRoot() { - cli := clients.CLI{StdIO: stdio.TerminalStdio} - if cli.StdIO.IsInteractive() { - if confirmed := cli.StdIO.GetConfirm(` -You are currently running as root. This will build your -index files with root ownership and will be inaccessible -if you run influxd as a non-root user. You should run -build-tsi as the same user you are running influxd. -Are you sure you want to continue?`); !confirmed { - return errors.New("operation aborted") - } - } else { - buildTSICmd.Logger.Warn( - "You are current running as root. This will build your index files with root ownership and will be inaccessible if you run influxd as a non-root user.") - } - } - - if buildTSICmd.compactSeriesFile { - if buildTSICmd.shardID != "" { - return errors.New("cannot specify shard ID when compacting series file") - } - } - - fis, err := os.ReadDir(buildTSICmd.dataPath) - if err != nil { - return err - } - for _, fi := range fis { - name := fi.Name() - if !fi.IsDir() { - continue - } else if buildTSICmd.bucketID != "" && name != buildTSICmd.bucketID { - continue - } - - if buildTSICmd.compactSeriesFile { - if err := buildTSICmd.compactBucketSeriesFile(filepath.Join(buildTSICmd.dataPath, name)); err != nil { - return err - } - continue - } - - if err := buildTSICmd.processBucket(name, filepath.Join(buildTSICmd.dataPath, name), filepath.Join(buildTSICmd.walPath, name)); err != nil { - return err - } - } - - return nil - -} - -// compactBucketSeriesFile compacts the series file segments associated with -// the series file for the provided bucket. -func (buildTSICmd *buildTSI) compactBucketSeriesFile(path string) error { - sfilePath := filepath.Join(path, tsdb.SeriesFileDirectory) - paths, err := buildTSICmd.seriesFilePartitionPaths(sfilePath) - if err != nil { - return err - } - - // Build input channel. - pathCh := make(chan string, len(paths)) - for _, path := range paths { - pathCh <- path - } - close(pathCh) - - // Concurrently process each partition in the series file - var g errgroup.Group - for i := 0; i < buildTSICmd.concurrency; i++ { - g.Go(func() error { - for path := range pathCh { - if err := buildTSICmd.compactSeriesFilePartition(path); err != nil { - return err - } - } - return nil - }) - } - if err := g.Wait(); err != nil { - return err - } - - // Build new series file indexes - sfile := tsdb.NewSeriesFile(sfilePath) - err = sfile.Open() - defer sfile.Close() - - if err != nil { - return err - } - - compactor := tsdb.NewSeriesPartitionCompactor() - for _, partition := range sfile.Partitions() { - if err = compactor.Compact(partition); err != nil { - return err - } - buildTSICmd.Logger.Debug("Compacted", zap.String("path", partition.Path())) - } - return nil -} - -func (buildTSICmd *buildTSI) compactSeriesFilePartition(path string) error { - const tmpExt = ".tmp" - buildTSICmd.Logger.Info("Processing partition", zap.String("path", path)) - - // Open partition so index can recover from entries not in the snapshot. - partitionID, err := strconv.Atoi(filepath.Base(path)) - if err != nil { - return fmt.Errorf("cannot parse partition id from path: %s", path) - } - p := tsdb.NewSeriesPartition(partitionID, path, nil) - if err := p.Open(); err != nil { - return fmt.Errorf("cannot open partition: path=%s err=%w", path, err) - } - defer p.Close() - - // Loop over segments and compact. - indexPath := p.IndexPath() - var segmentPaths []string - for _, segment := range p.Segments() { - buildTSICmd.Logger.Debug("Processing segment", zap.String("path", segment.Path()), zap.Uint16("segment-id", segment.ID())) - - if err := segment.CompactToPath(segment.Path()+tmpExt, p.Index()); err != nil { - return err - } - segmentPaths = append(segmentPaths, segment.Path()) - } - - // Close partition. - if err := p.Close(); err != nil { - return err - } - - // Remove the old segment files and replace with new ones. - for _, dst := range segmentPaths { - src := dst + tmpExt - - buildTSICmd.Logger.Debug("Renaming new segment", zap.String("prev", src), zap.String("new", dst)) - if err := file.RenameFile(src, dst); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("serious failure. Please rebuild index and series file: %w", err) - } - } - - // Remove index file so it will be rebuilt when reopened. - buildTSICmd.Logger.Debug("Removing index file", zap.String("path", indexPath)) - - if err := os.Remove(indexPath); err != nil && !os.IsNotExist(err) { // index won't exist for low cardinality - return err - } - - return nil -} - -// seriesFilePartitionPaths returns the paths to each partition in the series file. -func (buildTSICmd *buildTSI) seriesFilePartitionPaths(path string) ([]string, error) { - sfile := tsdb.NewSeriesFile(path) - sfile.Logger = buildTSICmd.Logger - if err := sfile.Open(); err != nil { - return nil, err - } - - var paths []string - for _, partition := range sfile.Partitions() { - paths = append(paths, partition.Path()) - } - if err := sfile.Close(); err != nil { - return nil, err - } - return paths, nil -} - -func (buildTSICmd *buildTSI) processBucket(bucketID, dataDir, walDir string) error { - buildTSICmd.Logger.Info("Rebuilding bucket", zap.String("name", bucketID)) - - sfile := tsdb.NewSeriesFile(filepath.Join(dataDir, tsdb.SeriesFileDirectory)) - sfile.Logger = buildTSICmd.Logger - if err := sfile.Open(); err != nil { - return err - } - defer sfile.Close() - - fis, err := os.ReadDir(dataDir) - if err != nil { - return err - } - - for _, fi := range fis { - rpName := fi.Name() - if !fi.IsDir() { - continue - } else if rpName == tsdb.SeriesFileDirectory { - continue - } - - if err := buildTSICmd.processRetentionPolicy(sfile, bucketID, rpName, filepath.Join(dataDir, rpName), filepath.Join(walDir, rpName)); err != nil { - return err - } - } - - return nil -} - -func (buildTSICmd *buildTSI) processRetentionPolicy(sfile *tsdb.SeriesFile, bucketID, rpName, dataDir, walDir string) error { - buildTSICmd.Logger.Info("Rebuilding retention policy", logger.Database(bucketID), logger.RetentionPolicy(rpName)) - - fis, err := os.ReadDir(dataDir) - if err != nil { - return err - } - - type shard struct { - ID uint64 - Path string - } - - var shards []shard - - for _, fi := range fis { - if !fi.IsDir() { - continue - } else if buildTSICmd.shardID != "" && fi.Name() != buildTSICmd.shardID { - continue - } - - shardID, err := strconv.ParseUint(fi.Name(), 10, 64) - if err != nil { - continue - } - - shards = append(shards, shard{shardID, fi.Name()}) - } - - errC := make(chan error, len(shards)) - var maxi uint32 // index of maximum shard being worked on. - for k := 0; k < buildTSICmd.concurrency; k++ { - go func() { - for { - i := int(atomic.AddUint32(&maxi, 1) - 1) // Get next partition to work on. - if i >= len(shards) { - return // No more work. - } - - id, name := shards[i].ID, shards[i].Path - log := buildTSICmd.Logger.With(logger.Database(bucketID), logger.RetentionPolicy(rpName), logger.Shard(id)) - errC <- IndexShard(sfile, filepath.Join(dataDir, name), filepath.Join(walDir, name), buildTSICmd.maxLogFileSize, buildTSICmd.maxCacheSize, buildTSICmd.batchSize, log) - } - }() - } - - // Check for error - for i := 0; i < cap(errC); i++ { - if err := <-errC; err != nil { - return err - } - } - return nil -} - -func IndexShard(sfile *tsdb.SeriesFile, dataDir, walDir string, maxLogFileSize int64, maxCacheSize uint64, batchSize int, log *zap.Logger) error { - log.Debug("Rebuilding shard") - - // Check if shard already has a TSI index. - indexPath := filepath.Join(dataDir, "index") - log.Debug("Checking index path", zap.String("path", indexPath)) - if _, err := os.Stat(indexPath); !os.IsNotExist(err) { - log.Warn("tsi1 index already exists, skipping", zap.String("path", indexPath)) - return nil - } - - log.Debug("Opening shard") - - // Remove temporary index files if this is being re-run. - tmpPath := filepath.Join(dataDir, ".index") - log.Debug("Cleaning up partial index from previous run, if any") - if err := os.RemoveAll(tmpPath); err != nil { - return err - } - - // Open TSI index in temporary path. - tsiIndex := tsi1.NewIndex(sfile, "", - tsi1.WithPath(tmpPath), - tsi1.WithMaximumLogFileSize(maxLogFileSize), - tsi1.DisableFsync(), - // Each new series entry in a log file is ~12 bytes so this should - // roughly equate to one flush to the file for every batch. - tsi1.WithLogFileBufferSize(12*batchSize), - ) - - tsiIndex.WithLogger(log) - - log.Debug("Opening tsi index in temporary location", zap.String("path", tmpPath)) - if err := tsiIndex.Open(); err != nil { - return err - } - defer tsiIndex.Close() - - // Write out tsm1 files. - // Find shard files. - tsmPaths, err := collectTSMFiles(dataDir) - if err != nil { - return err - } - - log.Debug("Iterating over tsm files") - for _, path := range tsmPaths { - log.Debug("Processing tsm file", zap.String("path", path)) - if err := IndexTSMFile(tsiIndex, path, batchSize, log); err != nil { - return err - } - } - - // Write out wal files. - walPaths, err := collectWALFiles(walDir) - - if err != nil { - if !os.IsNotExist(err) { - return err - } - } else { - log.Debug("Building cache from wal files") - cache := tsm1.NewCache(maxCacheSize, tsdb.EngineTags{}) // tags are for metrics only - loader := tsm1.NewCacheLoader(walPaths) - loader.WithLogger(log) - if err := loader.Load(cache); err != nil { - return err - } - - log.Debug("Iterating over cache") - keysBatch := make([][]byte, 0, batchSize) - namesBatch := make([][]byte, 0, batchSize) - tagsBatch := make([]models.Tags, 0, batchSize) - - for _, key := range cache.Keys() { - seriesKey, _ := tsm1.SeriesAndFieldFromCompositeKey(key) - name, tags := models.ParseKeyBytes(seriesKey) - - log.Debug("Series", zap.String("name", string(name)), zap.String("tags", tags.String())) - - keysBatch = append(keysBatch, seriesKey) - namesBatch = append(namesBatch, name) - tagsBatch = append(tagsBatch, tags) - - // Flush batch? - if len(keysBatch) == batchSize { - if err := tsiIndex.CreateSeriesListIfNotExists(keysBatch, namesBatch, tagsBatch); err != nil { - return fmt.Errorf("problem creating series: %w", err) - } - keysBatch = keysBatch[:0] - namesBatch = namesBatch[:0] - tagsBatch = tagsBatch[:0] - } - } - - // Flush any remaining series in the batches - if len(keysBatch) > 0 { - if err := tsiIndex.CreateSeriesListIfNotExists(keysBatch, namesBatch, tagsBatch); err != nil { - return fmt.Errorf("problem creating series: %w", err) - } - keysBatch = nil - namesBatch = nil - tagsBatch = nil - } - } - - // Attempt to compact the index & wait for all compactions to complete. - log.Debug("compacting index") - tsiIndex.Compact() - tsiIndex.Wait() - - // Close TSI index. - log.Debug("Closing tsi index") - if err := tsiIndex.Close(); err != nil { - return err - } - - log.Debug("Reopening TSI index with max-index-log-file-size=1 to fully compact log files") - compactingIndex := tsi1.NewIndex(sfile, "", - tsi1.WithPath(tmpPath), - tsi1.WithMaximumLogFileSize(1), - ) - if err := compactingIndex.Open(); err != nil { - return err - } - compactingIndex.Compact() - compactingIndex.Wait() - log.Debug("re-closing tsi index") - if err := compactingIndex.Close(); err != nil { - return err - } - - // Rename TSI to standard path. - log.Debug("Moving tsi to permanent location") - return os.Rename(tmpPath, indexPath) -} - -func IndexTSMFile(index *tsi1.Index, path string, batchSize int, log *zap.Logger) error { - f, err := os.Open(path) - if err != nil { - return err - } - defer f.Close() - - r, err := tsm1.NewTSMReader(f) - if err != nil { - log.Warn("Unable to read, skipping", zap.String("path", path), zap.Error(err)) - return nil - } - defer r.Close() - - keysBatch := make([][]byte, 0, batchSize) - namesBatch := make([][]byte, 0, batchSize) - tagsBatch := make([]models.Tags, batchSize) - var ti int - for i := 0; i < r.KeyCount(); i++ { - key, _ := r.KeyAt(i) - seriesKey, _ := tsm1.SeriesAndFieldFromCompositeKey(key) - var name []byte - name, tagsBatch[ti] = models.ParseKeyBytesWithTags(seriesKey, tagsBatch[ti]) - - log.Debug("Series", zap.String("name", string(name)), zap.String("tags", tagsBatch[ti].String())) - - keysBatch = append(keysBatch, seriesKey) - namesBatch = append(namesBatch, name) - ti++ - - // Flush batch? - if len(keysBatch) == batchSize { - if err := index.CreateSeriesListIfNotExists(keysBatch, namesBatch, tagsBatch[:ti]); err != nil { - return fmt.Errorf("problem creating series: %w", err) - } - keysBatch = keysBatch[:0] - namesBatch = namesBatch[:0] - ti = 0 // Reset tags. - } - } - - // Flush any remaining series in the batches - if len(keysBatch) > 0 { - if err := index.CreateSeriesListIfNotExists(keysBatch, namesBatch, tagsBatch[:ti]); err != nil { - return fmt.Errorf("problem creating series: %w", err) - } - } - return nil -} - -func collectTSMFiles(path string) ([]string, error) { - fis, err := os.ReadDir(path) - if err != nil { - return nil, err - } - - var paths []string - for _, fi := range fis { - if filepath.Ext(fi.Name()) != "."+tsm1.TSMFileExtension { - continue - } - paths = append(paths, filepath.Join(path, fi.Name())) - } - return paths, nil -} - -func collectWALFiles(path string) ([]string, error) { - if path == "" { - return nil, os.ErrNotExist - } - if _, err := os.Stat(path); os.IsNotExist(err) { - return nil, err - } - fis, err := os.ReadDir(path) - if err != nil { - return nil, err - } - - var paths []string - for _, fi := range fis { - if filepath.Ext(fi.Name()) != "."+tsm1.WALFileExtension { - continue - } - paths = append(paths, filepath.Join(path, fi.Name())) - } - return paths, nil -} - -func isRoot() bool { - currUser, _ := user.Current() - return currUser != nil && currUser.Username == "root" -} diff --git a/cmd/influxd/inspect/build_tsi/build_tsi_test.go b/cmd/influxd/inspect/build_tsi/build_tsi_test.go deleted file mode 100644 index b488b64ef22..00000000000 --- a/cmd/influxd/inspect/build_tsi/build_tsi_test.go +++ /dev/null @@ -1,427 +0,0 @@ -package build_tsi - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "strconv" - "testing" - - "github.com/golang/snappy" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" - "github.com/spf13/cobra" - "github.com/stretchr/testify/require" -) - -type cmdParams struct { - dataPath string - walPath string - bucketID string - shardID string - batchSize int - maxLogFileSize int64 - maxCacheSize uint64 - compactSeriesFile bool - concurrency int - verbose bool -} - -type cmdOuts struct { - expectedOut string - expectErr bool - expectBuiltIndex bool - expectCompactSeries bool - sfilePath string -} - -func Test_BuildTSI_ShardID_Without_BucketID(t *testing.T) { - params := cmdParams{ - shardID: "1", - concurrency: 1, - } - - outs := cmdOuts{ - expectErr: true, - expectedOut: "if shard-id is specified, bucket-id must also be specified", - } - - runCommand(t, params, outs) -} - -func Test_BuildTSI_Invalid_Index_Already_Exists(t *testing.T) { - tempDir := t.TempDir() - - os.MkdirAll(filepath.Join(tempDir, "data", "12345", "autogen", "1", "index"), 0777) - os.MkdirAll(filepath.Join(tempDir, "wal", "12345", "autogen", "1"), 0777) - - params := cmdParams{ - dataPath: filepath.Join(tempDir, "data"), - walPath: filepath.Join(tempDir, "wal"), - concurrency: 1, - } - - outs := cmdOuts{ - expectedOut: "tsi1 index already exists, skipping", - } - - runCommand(t, params, outs) -} - -func Test_BuildTSI_Valid(t *testing.T) { - tempDir := t.TempDir() - - os.MkdirAll(filepath.Join(tempDir, "data", "12345", "autogen", "1"), 0777) - os.MkdirAll(filepath.Join(tempDir, "wal", "12345", "autogen", "1"), 0777) - - // Create a temp .tsm file - tsmValues := []tsm1.Value{tsm1.NewValue(0, 1.0)} - newTempTsmFile(t, filepath.Join(tempDir, "data", "12345", "autogen", "1"), tsmValues) - - // Create a temp .wal file - p1 := tsm1.NewValue(10, 1.1) - p2 := tsm1.NewValue(1, int64(1)) - p3 := tsm1.NewValue(1, true) - p4 := tsm1.NewValue(1, "string") - p5 := tsm1.NewValue(5, uint64(10)) - - walValues := map[string][]tsm1.Value{ - "cpu,host=A#!~#float": {p1}, - "cpu,host=A#!~#int": {p2}, - "cpu,host=A#!~#bool": {p3}, - "cpu,host=A#!~#string": {p4}, - "cpu,host=A#!~#unsigned": {p5}, - } - - newTempWalFile(t, filepath.Join(tempDir, "wal", "12345", "autogen", "1"), walValues) - - params := cmdParams{ - dataPath: filepath.Join(tempDir, "data"), - walPath: filepath.Join(tempDir, "wal"), - concurrency: 1, - batchSize: defaultBatchSize, - maxLogFileSize: tsdb.DefaultMaxIndexLogFileSize, - maxCacheSize: tsdb.DefaultCacheMaxMemorySize, - } - - outs := cmdOuts{ - expectBuiltIndex: true, - } - - runCommand(t, params, outs) -} - -func Test_BuildTSI_Valid_Batch_Size_Exceeded(t *testing.T) { - tempDir := t.TempDir() - - os.MkdirAll(filepath.Join(tempDir, "data", "12345", "autogen", "1"), 0777) - os.MkdirAll(filepath.Join(tempDir, "wal", "12345", "autogen", "1"), 0777) - - // Create a temp .tsm file - tsmValues := []tsm1.Value{tsm1.NewValue(0, 1.0)} - newTempTsmFile(t, filepath.Join(tempDir, "data", "12345", "autogen", "1"), tsmValues) - - // Create a temp .wal file - p1 := tsm1.NewValue(10, 1.1) - p2 := tsm1.NewValue(1, int64(1)) - p3 := tsm1.NewValue(1, true) - p4 := tsm1.NewValue(1, "string") - p5 := tsm1.NewValue(5, uint64(10)) - - walValues := map[string][]tsm1.Value{ - "cpu,host=A#!~#float": {p1}, - "cpu,host=A#!~#int": {p2}, - "cpu,host=A#!~#bool": {p3}, - "cpu,host=A#!~#string": {p4}, - "cpu,host=A#!~#unsigned": {p5}, - } - - newTempWalFile(t, filepath.Join(tempDir, "wal", "12345", "autogen", "1"), walValues) - - params := cmdParams{ - dataPath: filepath.Join(tempDir, "data"), - walPath: filepath.Join(tempDir, "wal"), - concurrency: 1, - batchSize: 1, - maxLogFileSize: tsdb.DefaultMaxIndexLogFileSize, - maxCacheSize: tsdb.DefaultCacheMaxMemorySize, - } - - outs := cmdOuts{ - expectBuiltIndex: true, - } - - runCommand(t, params, outs) -} - -func Test_BuildTSI_Valid_Verbose(t *testing.T) { - // Set up temp directory structure - tempDir := t.TempDir() - - os.MkdirAll(filepath.Join(tempDir, "data", "12345", "autogen", "1"), 0777) - os.MkdirAll(filepath.Join(tempDir, "wal", "12345", "autogen", "1"), 0777) - - // Create a temp .tsm file - tsmValues := []tsm1.Value{tsm1.NewValue(0, 1.0)} - newTempTsmFile(t, filepath.Join(tempDir, "data", "12345", "autogen", "1"), tsmValues) - - // Create temp .wal file - p1 := tsm1.NewValue(10, 1.1) - p2 := tsm1.NewValue(1, int64(1)) - p3 := tsm1.NewValue(1, true) - p4 := tsm1.NewValue(1, "string") - p5 := tsm1.NewValue(5, uint64(10)) - - walValues := map[string][]tsm1.Value{ - "cpu,host=A#!~#float": {p1}, - "cpu,host=A#!~#int": {p2}, - "cpu,host=A#!~#bool": {p3}, - "cpu,host=A#!~#string": {p4}, - "cpu,host=A#!~#unsigned": {p5}, - } - - newTempWalFile(t, filepath.Join(tempDir, "wal", "12345", "autogen", "1"), walValues) - - // Run command with appropriate parameters and expected outputs - params := cmdParams{ - dataPath: filepath.Join(tempDir, "data"), - walPath: filepath.Join(tempDir, "wal"), - concurrency: 1, - batchSize: defaultBatchSize, - maxLogFileSize: tsdb.DefaultMaxIndexLogFileSize, - maxCacheSize: tsdb.DefaultCacheMaxMemorySize, - verbose: true, - } - - outs := cmdOuts{ - expectBuiltIndex: true, - expectedOut: "lvl=info", - } - - runCommand(t, params, outs) -} - -func Test_BuildTSI_Invalid_Compact_Series_Specific_Shard(t *testing.T) { - params := cmdParams{ - bucketID: "12345", - shardID: "1", - concurrency: 1, - compactSeriesFile: true, - } - - outs := cmdOuts{ - expectErr: true, - expectedOut: "cannot specify shard ID when compacting series file", - } - - runCommand(t, params, outs) -} - -func Test_BuildTSI_Valid_Compact_Series(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("mmap implementation on Windows prevents series-file from shrinking during compaction") - } - - tempDir := t.TempDir() - - os.MkdirAll(filepath.Join(tempDir, "data", "12345", "_series"), 0777) - - // Create new series file - sfile := tsdb.NewSeriesFile(filepath.Join(tempDir, "data", "12345", "_series")) - require.NoError(t, sfile.Open()) - defer sfile.Close() - - // Generate a bunch of keys. - var mms [][]byte - var tagSets []models.Tags - for i := 0; i < 1000; i++ { - mms = append(mms, []byte("cpu")) - tagSets = append(tagSets, models.NewTags(map[string]string{"region": fmt.Sprintf("r%d", i)})) - } - - // Add all to the series file. - _, err := sfile.CreateSeriesListIfNotExists(mms, tagSets) - require.NoError(t, err) - - params := cmdParams{ - dataPath: filepath.Join(tempDir, "data"), - walPath: filepath.Join(tempDir, "wal"), - concurrency: 1, - compactSeriesFile: true, - batchSize: defaultBatchSize, - maxLogFileSize: tsdb.DefaultMaxIndexLogFileSize, - maxCacheSize: tsdb.DefaultCacheMaxMemorySize, - } - - outs := cmdOuts{ - expectCompactSeries: true, - sfilePath: sfile.Path(), - } - - require.NoError(t, sfile.Close()) - runCommand(t, params, outs) -} - -func initCommand(t *testing.T, params cmdParams) *cobra.Command { - t.Helper() - - // Create new command - cmd := NewBuildTSICommand() - - // Set args - allArgs := make([]string, 0) - - if params.dataPath != filepath.Join(os.Getenv("HOME"), ".influxdbv2", "engine", "data") { - allArgs = append(allArgs, "--data-path", params.dataPath) - } - if params.walPath != filepath.Join(os.Getenv("HOME"), ".influxdbv2", "engine", "wal") { - allArgs = append(allArgs, "--wal-path", params.walPath) - } - if params.bucketID != "" { - allArgs = append(allArgs, "--bucket-id", params.bucketID) - } - if params.shardID != "" { - allArgs = append(allArgs, "--shard-id", params.shardID) - } - if params.batchSize != 10000 { - allArgs = append(allArgs, "--batch-size", strconv.Itoa(params.batchSize)) - } - if params.maxLogFileSize != tsdb.DefaultMaxIndexLogFileSize { - allArgs = append(allArgs, "--max-log-file-size", strconv.Itoa(int(params.maxLogFileSize))) - } - if params.maxCacheSize != tsdb.DefaultCacheMaxMemorySize { - allArgs = append(allArgs, "--max-cache-size", strconv.Itoa(int(params.maxCacheSize))) - } - if params.compactSeriesFile { - allArgs = append(allArgs, "--compact-series-file") - } - if params.verbose { - allArgs = append(allArgs, "-v") - } - if params.concurrency != runtime.GOMAXPROCS(0) { - allArgs = append(allArgs, "--concurrency", strconv.Itoa(params.concurrency)) - } - - cmd.SetArgs(allArgs) - - return cmd -} - -func getOutput(t *testing.T, cmd *cobra.Command) []byte { - t.Helper() - - b := &bytes.Buffer{} - cmd.SetOut(b) - cmd.SetErr(b) - require.NoError(t, cmd.Execute()) - - out, err := io.ReadAll(b) - require.NoError(t, err) - - return out -} - -func runCommand(t *testing.T, params cmdParams, outs cmdOuts) { - t.Helper() - - cmd := initCommand(t, params) - - if outs.expectErr { - require.EqualError(t, cmd.Execute(), outs.expectedOut) - return - } - - if outs.expectBuiltIndex { - require.NoDirExists(t, filepath.Join(params.dataPath, "12345", "autogen", "1", "index")) - require.NoError(t, cmd.Execute()) - - // Check that a valid index directory is present after executing the command - isIndex, err := tsi1.IsIndexDir(filepath.Join(params.dataPath, "12345", "autogen", "1", "index")) - require.NoError(t, err) - require.True(t, isIndex) - - // Check manifest files, at least one index file should be listed in each - for i := 0; i < 8; i++ { - currentPartition := strconv.Itoa(i) - manifest, _, err := tsi1.ReadManifestFile(filepath.Join(params.dataPath, "12345", "autogen", "1", "index", currentPartition, "MANIFEST")) - require.NoError(t, err) - require.NotZero(t, len(manifest.Files)) - } - } - - if outs.expectCompactSeries { - sfile := tsdb.NewSeriesFile(outs.sfilePath) - require.NoError(t, sfile.Open()) - defer sfile.Close() - - // Get size of all partitions before series compaction - beforeSize, err := sfile.FileSize() - require.NoError(t, err) - require.NoError(t, sfile.Close()) - - // Run command with series compaction option chosen - require.NoError(t, cmd.Execute()) - - // Check if series directory exists - require.DirExists(t, filepath.Join(params.dataPath, "12345", "_series")) - - // Get size of all partitions after series compaction - require.NoError(t, sfile.Open()) - afterSize, err := sfile.FileSize() - require.NoError(t, err) - - // Check that collective size of all series partitions has decreased after compaction - require.Greater(t, beforeSize, afterSize) - } - - if outs.expectedOut != "" { - // Get output - out := getOutput(t, cmd) - - // Check output - if outs.expectedOut != "" { - require.Contains(t, string(out), outs.expectedOut) - } - } -} - -func newTempTsmFile(t *testing.T, path string, values []tsm1.Value) { - t.Helper() - - tsmFile, err := os.CreateTemp(path, "buildtsitest*"+"."+tsm1.TSMFileExtension) - require.NoError(t, err) - - w, err := tsm1.NewTSMWriter(tsmFile) - require.NoError(t, err) - - require.NoError(t, w.Write([]byte("cpu"), values)) - require.NoError(t, w.WriteIndex()) - - w.Close() -} - -func newTempWalFile(t *testing.T, path string, values map[string][]tsm1.Value) { - t.Helper() - - walFile, err := os.CreateTemp(path, "buildtsitest*"+"."+tsm1.WALFileExtension) - require.NoError(t, err) - - e := &tsm1.WriteWALEntry{Values: values} - b, err := e.Encode(nil) - require.NoError(t, err) - - w := tsm1.NewWALSegmentWriter(walFile) - err = w.Write(e.Type(), snappy.Encode(nil, b)) - require.NoError(t, err) - - err = w.Flush() - require.NoError(t, err) - - err = walFile.Sync() - require.NoError(t, err) -} diff --git a/cmd/influxd/inspect/delete_tsm/delete_tsm.go b/cmd/influxd/inspect/delete_tsm/delete_tsm.go deleted file mode 100644 index 314dca6263a..00000000000 --- a/cmd/influxd/inspect/delete_tsm/delete_tsm.go +++ /dev/null @@ -1,159 +0,0 @@ -package delete_tsm - -import ( - "fmt" - "os" - "path/filepath" - "time" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/file" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/spf13/cobra" -) - -type args struct { - measurement string // measurement to delete - sanitize bool // remove all keys with non-printable unicode - verbose bool // verbose logging -} - -func NewDeleteTSMCommand() *cobra.Command { - var arguments args - cmd := &cobra.Command{ - Use: "delete-tsm", - Short: "Deletes a measurement from a raw tsm file.", - Args: cobra.MinimumNArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - // Validate measurement or sanitize flag. - if arguments.measurement == "" && !arguments.sanitize { - return fmt.Errorf("--measurement or --sanitize flag required") - } - - // Process each TSM file. - for _, path := range args { - if arguments.verbose { - cmd.Printf("processing: %s", path) - } - if err := arguments.process(cmd, path); err != nil { - return err - } - } - return nil - }, - } - - cmd.Flags().StringVar(&arguments.measurement, "measurement", "", - "The name of the measurement to remove") - cmd.Flags().BoolVar(&arguments.sanitize, "sanitize", false, - "Remove all keys with non-printable unicode characters") - cmd.Flags().BoolVarP(&arguments.verbose, "verbose", "v", false, - "Enable verbose logging") - - return cmd -} - -func (a *args) process(cmd *cobra.Command, path string) error { - // Open TSM reader. - input, err := os.Open(path) - if err != nil { - return fmt.Errorf("failed to open file %q: %w", path, err) - } - defer input.Close() - - // Check if path is a directory - fi, err := input.Stat() - if err != nil { - return fmt.Errorf("failed to read FileInfo of file %s: %w", path, err) - } - if fi.IsDir() { - return fmt.Errorf("%s is a directory", path) - } - - // Check if file is a TSM file - if filepath.Ext(path) != "."+tsm1.TSMFileExtension { - return fmt.Errorf("%s is not a TSM file", path) - } - - r, err := tsm1.NewTSMReader(input) - if err != nil { - return fmt.Errorf("unable to read TSM file %q: %w", path, err) - } - defer r.Close() - - // Remove previous temporary files. - outputPath := path + ".rewriting.tmp" - if err := os.RemoveAll(outputPath); err != nil { - return fmt.Errorf("failed to remove existing temp file at %q: %w", outputPath, err) - } else if err := os.RemoveAll(outputPath + ".idx.tmp"); err != nil { - return fmt.Errorf("failed to remove existing temp file at %q: %w", outputPath+".idx.tmp", err) - } - - // Create TSMWriter to temporary location. - output, err := os.Create(outputPath) - if err != nil { - return fmt.Errorf("failed to create temporary file at %q: %w", outputPath, err) - } - defer output.Close() - - w, err := tsm1.NewTSMWriter(output) - if err != nil { - return fmt.Errorf("failed to create TSM Reader for file %q: %w", output.Name(), err) - } - defer w.Close() - - // Iterate over the input blocks. - hasData := false - itr := r.BlockIterator() - for itr.Next() { - // Read key & time range. - key, minTime, maxTime, _, _, block, err := itr.Read() - if err != nil { - return fmt.Errorf("failed to read block: %w", err) - } - - // Skip block if this is the measurement and time range we are deleting. - series, _ := tsm1.SeriesAndFieldFromCompositeKey(key) - measurement, tags := models.ParseKey(series) - if measurement == a.measurement || (a.sanitize && !models.ValidKeyTokens(measurement, tags)) { - if a.verbose { - cmd.Printf("deleting block: %s (%s-%s) sz=%d", - key, - time.Unix(0, minTime).UTC().Format(time.RFC3339Nano), - time.Unix(0, maxTime).UTC().Format(time.RFC3339Nano), - len(block), - ) - } - continue - } - - if err := w.WriteBlock(key, minTime, maxTime, block); err != nil { - return fmt.Errorf("failed to write block %q: %w", block, err) - } - hasData = true - } - - // Write index & close. - if hasData { - if err := w.WriteIndex(); err != nil { - return fmt.Errorf("failed to write index to TSM file: %w", err) - } - } - if err := w.Close(); err != nil { - return fmt.Errorf("failed to close TSM Writer: %w", err) - } - if err := r.Close(); err != nil { - return fmt.Errorf("failed to close TSM Reader: %w", err) - } - - // Replace original file with new file. - if err := file.RenameFile(outputPath, path); err != nil { - return fmt.Errorf("failed to update TSM file %q: %w", path, err) - } - if !hasData { - if err := os.Remove(path); err != nil { - return fmt.Errorf("failed to remove empty TSM file %q: %w", path, err) - } - } - return nil -} diff --git a/cmd/influxd/inspect/delete_tsm/delete_tsm_test.go b/cmd/influxd/inspect/delete_tsm/delete_tsm_test.go deleted file mode 100644 index 93ee5e425a9..00000000000 --- a/cmd/influxd/inspect/delete_tsm/delete_tsm_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package delete_tsm - -import ( - "bytes" - "encoding/binary" - "io" - "os" - "testing" - - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/stretchr/testify/require" -) - -func Test_DeleteTSM_EmptyFile(t *testing.T) { - _, file := createTSMFile(t, tsmParams{}) - - runCommand(t, testParams{ - file: file, - expectErr: true, - expectOut: "unable to read TSM file", - }) -} - -func Test_DeleteTSM_WrongExt(t *testing.T) { - _, file := createTSMFile(t, tsmParams{ - improperExt: true, - }) - - runCommand(t, testParams{ - file: file, - expectErr: true, - expectOut: "is not a TSM file", - }) -} - -func Test_DeleteTSM_NotFile(t *testing.T) { - dir, _ := createTSMFile(t, tsmParams{}) - - runCommand(t, testParams{ - file: dir, - expectErr: true, - expectOut: "is a directory", - }) -} - -func Test_DeleteTSM_SingleEntry_Valid(t *testing.T) { - _, file := createTSMFile(t, tsmParams{ - keys: []string{"cpu"}, - }) - - runCommand(t, testParams{ - file: file, - shouldBeDeleted: true, - expectOut: "deleting block: cpu", - }) -} - -func Test_DeleteTSM_SingleEntry_Invalid(t *testing.T) { - _, file := createTSMFile(t, tsmParams{ - invalid: true, - keys: []string{"cpu"}, - }) - - runCommand(t, testParams{ - file: file, - expectErr: true, - expectOut: "unable to read TSM file", - }) -} - -func Test_DeleteTSM_ManyEntries_Valid(t *testing.T) { - _, file := createTSMFile(t, tsmParams{ - keys: []string{"cpu", "foobar", "mem"}, - }) - - runCommand(t, testParams{ - file: file, - expectOut: "deleting block: cpu", - }) -} - -func Test_DeleteTSM_ManyEntries_Invalid(t *testing.T) { - _, file := createTSMFile(t, tsmParams{ - invalid: true, - keys: []string{"cpu", "foobar", "mem"}, - }) - - runCommand(t, testParams{ - file: file, - expectErr: true, - expectOut: "unable to read TSM file", - }) -} - -type testParams struct { - file string - sanitize bool // if true, run with --sanitize flag. Else run with --measurement flag - expectOut string - expectErr bool - shouldBeDeleted bool -} - -func runCommand(t *testing.T, params testParams) { - cmd := NewDeleteTSMCommand() - args := []string{params.file} - if params.sanitize { - args = append(args, "--sanitize") - } else { - args = append(args, "--measurement", "cpu") - } - args = append(args, "--verbose") - cmd.SetArgs(args) - - b := bytes.NewBufferString("") - cmd.SetOut(b) - cmd.SetErr(b) - - if params.expectErr { - require.Error(t, cmd.Execute()) - } else { - require.NoError(t, cmd.Execute()) - if params.shouldBeDeleted { - require.NoFileExists(t, params.file) - } else { - file, err := os.Open(params.file) - require.NoError(t, err) - - r, err := tsm1.NewTSMReader(file) - require.NoError(t, err) - - require.False(t, r.Contains([]byte("cpu"))) - - require.NoError(t, r.Close()) - } - } - - out, err := io.ReadAll(b) - require.NoError(t, err) - require.Contains(t, string(out), params.expectOut) -} - -type tsmParams struct { - invalid bool - improperExt bool - keys []string -} - -func createTSMFile(t *testing.T, params tsmParams) (string, string) { - t.Helper() - dir := t.TempDir() - - var file *os.File - var err error - if !params.improperExt { - file, err = os.CreateTemp(dir, "*."+tsm1.TSMFileExtension) - } else { - file, err = os.CreateTemp(dir, "*.txt") - } - require.NoError(t, err) - defer file.Close() - - w, err := tsm1.NewTSMWriter(file) - require.NoError(t, err) - defer w.Close() - - for _, key := range params.keys { - values := []tsm1.Value{tsm1.NewValue(0, 1.0)} - require.NoError(t, w.Write([]byte(key), values)) - } - - if len(params.keys) != 0 { - require.NoError(t, w.WriteIndex()) - } - - if params.invalid { - require.NoError(t, binary.Write(file, binary.BigEndian, []byte("foobar\n"))) - } - - return dir, file.Name() -} diff --git a/cmd/influxd/inspect/dump_tsi/dump_tsi.go b/cmd/influxd/inspect/dump_tsi/dump_tsi.go deleted file mode 100644 index ec61ce2da4a..00000000000 --- a/cmd/influxd/inspect/dump_tsi/dump_tsi.go +++ /dev/null @@ -1,489 +0,0 @@ -package dump_tsi - -import ( - "fmt" - "io" - "os" - "path/filepath" - "regexp" - "text/tabwriter" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/errors" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" - "github.com/spf13/cobra" -) - -type args struct { - seriesFilePath string - paths []string - - showSeries bool - showMeasurements bool - showTagKeys bool - showTagValues bool - showTagValueSeries bool - - measurementFilter *regexp.Regexp - tagKeyFilter *regexp.Regexp - tagValueFilter *regexp.Regexp - - w io.Writer -} - -func NewDumpTSICommand() *cobra.Command { - var arguments args - var measurementFilter, tagKeyFilter, tagValueFilter string - cmd := &cobra.Command{ - Use: "dump-tsi", - Short: "Dumps low-level details about tsi1 files.", - Args: cobra.MinimumNArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - // Parse filters. - if measurementFilter != "" { - re, err := regexp.Compile(measurementFilter) - if err != nil { - return fmt.Errorf("failed to parse regex %q: %w", measurementFilter, err) - } - arguments.measurementFilter = re - } - if tagKeyFilter != "" { - re, err := regexp.Compile(tagKeyFilter) - if err != nil { - return fmt.Errorf("failed to parse regex %q: %w", tagKeyFilter, err) - } - arguments.tagKeyFilter = re - } - if tagValueFilter != "" { - re, err := regexp.Compile(tagValueFilter) - if err != nil { - return fmt.Errorf("failed to parse regex %q: %w", tagValueFilter, err) - } - arguments.tagValueFilter = re - } - - arguments.paths = args - if len(arguments.paths) == 0 { - return fmt.Errorf("at least one path required") - } - - // Some flags imply other flags. - if arguments.showTagValueSeries { - arguments.showTagValues = true - } - if arguments.showTagValues { - arguments.showTagKeys = true - } - if arguments.showTagKeys { - arguments.showMeasurements = true - } - - arguments.w = cmd.OutOrStdout() - return arguments.run() - }, - } - - cmd.Flags().StringVar(&arguments.seriesFilePath, "series-file", "", - "Path to series file") - cmd.Flags().BoolVar(&arguments.showSeries, "series", false, - "Show raw series data") - cmd.Flags().BoolVar(&arguments.showMeasurements, "measurements", false, - "Show raw measurement data") - cmd.Flags().BoolVar(&arguments.showTagKeys, "tag-keys", false, - "Show raw tag key data") - cmd.Flags().BoolVar(&arguments.showTagValues, "tag-values", false, - "Show raw tag value data") - cmd.Flags().BoolVar(&arguments.showTagValueSeries, "tag-value-series", false, - "Show raw series data for each value") - cmd.Flags().StringVar(&measurementFilter, "measurement-filter", "", - "Regex measurement filter") - cmd.Flags().StringVar(&tagKeyFilter, "tag-key-filter", "", - "Regex tag key filter") - cmd.Flags().StringVar(&tagValueFilter, "tag-value-filter", "", - "Regex tag value filter") - - cmd.MarkFlagRequired("series-file") - - return cmd -} - -func (a *args) run() (rErr error) { - sfile := tsdb.NewSeriesFile(a.seriesFilePath) - if err := sfile.Open(); err != nil { - return err - } - defer sfile.Close() - - // Build a file set from the paths on the command line. - idx, fs, err := a.readFileSet(sfile) - if err != nil { - return err - } - if fs != nil { - defer errors.Capture(&rErr, fs.Close)() - defer fs.Release() - } - if idx != nil { - defer errors.Capture(&rErr, idx.Close)() - } - - if a.showSeries { - if err := a.printSeries(sfile); err != nil { - return err - } - } - - // If this is an ad-hoc fileset then process it and close afterward. - if fs != nil { - if a.showSeries || a.showMeasurements { - return a.printMeasurements(sfile, fs) - } - return a.printFileSummaries(fs) - } - - // Otherwise iterate over each partition in the index. - for i := 0; i < int(idx.PartitionN); i++ { - if err := func() error { - fs, err := idx.PartitionAt(i).RetainFileSet() - if err != nil { - return err - } - defer fs.Release() - - if a.showSeries || a.showMeasurements { - return a.printMeasurements(sfile, fs) - } - return a.printFileSummaries(fs) - }(); err != nil { - return err - } - } - return nil -} - -func (a *args) readFileSet(sfile *tsdb.SeriesFile) (*tsi1.Index, *tsi1.FileSet, error) { - // If only one path exists and it's a directory then open as an index. - if len(a.paths) == 1 { - fi, err := os.Stat(a.paths[0]) - if err != nil { - return nil, nil, fmt.Errorf("failed to get FileInfo of %q: %w", a.paths[0], err) - } else if fi.IsDir() { - // Verify directory is an index before opening it. - if ok, err := tsi1.IsIndexDir(a.paths[0]); err != nil { - return nil, nil, err - } else if !ok { - return nil, nil, fmt.Errorf("not an index directory: %q", a.paths[0]) - } - - idx := tsi1.NewIndex(sfile, - "", - tsi1.WithPath(a.paths[0]), - tsi1.DisableCompactions(), - ) - if err := idx.Open(); err != nil { - return nil, nil, fmt.Errorf("failed to open TSI Index at %q: %w", idx.Path(), err) - } - return idx, nil, nil - } - } - - // Open each file and group into a fileset. - var files []tsi1.File - for _, path := range a.paths { - switch ext := filepath.Ext(path); ext { - case tsi1.LogFileExt: - f := tsi1.NewLogFile(sfile, path) - if err := f.Open(); err != nil { - return nil, nil, fmt.Errorf("failed to get TSI logfile at %q: %w", sfile.Path(), err) - } - files = append(files, f) - - case tsi1.IndexFileExt: - f := tsi1.NewIndexFile(sfile) - f.SetPath(path) - if err := f.Open(); err != nil { - return nil, nil, fmt.Errorf("failed to open index file at %q: %w", f.Path(), err) - } - files = append(files, f) - - default: - return nil, nil, fmt.Errorf("unexpected file extension: %s", ext) - } - } - - fs := tsi1.NewFileSet(files) - fs.Retain() - - return nil, fs, nil -} - -func (a *args) printSeries(sfile *tsdb.SeriesFile) error { - if !a.showSeries { - return nil - } - - // Print header. - tw := tabwriter.NewWriter(a.w, 8, 8, 1, '\t', 0) - fmt.Fprintln(tw, "Series\t") - - // Iterate over each series. - itr := sfile.SeriesIDIterator() - for { - e, err := itr.Next() - if err != nil { - return fmt.Errorf("failed to get next series ID in %q: %w", sfile.Path(), err) - } else if e.SeriesID == 0 { - break - } - name, tags := tsdb.ParseSeriesKey(sfile.SeriesKey(e.SeriesID)) - - if !a.matchSeries(name, tags) { - continue - } - - deleted := sfile.IsDeleted(e.SeriesID) - - fmt.Fprintf(tw, "%s%s\t%v\n", name, tags.HashKey(), deletedString(deleted)) - } - - // Flush & write footer spacing. - if err := tw.Flush(); err != nil { - return fmt.Errorf("failed to flush tabwriter: %w", err) - } - fmt.Fprint(a.w, "\n\n") - - return nil -} - -func (a *args) printMeasurements(sfile *tsdb.SeriesFile, fs *tsi1.FileSet) error { - if !a.showMeasurements { - return nil - } - - tw := tabwriter.NewWriter(a.w, 8, 8, 1, '\t', 0) - fmt.Fprintln(tw, "Measurement\t") - - // Iterate over each series. - if itr := fs.MeasurementIterator(); itr != nil { - for e := itr.Next(); e != nil; e = itr.Next() { - if a.measurementFilter != nil && !a.measurementFilter.Match(e.Name()) { - continue - } - - fmt.Fprintf(tw, "%s\t%v\n", e.Name(), deletedString(e.Deleted())) - if err := tw.Flush(); err != nil { - return fmt.Errorf("failed to flush tabwriter: %w", err) - } - - if err := a.printTagKeys(sfile, fs, e.Name()); err != nil { - return err - } - } - } - - fmt.Fprint(a.w, "\n\n") - - return nil -} - -func (a *args) printTagKeys(sfile *tsdb.SeriesFile, fs *tsi1.FileSet, name []byte) error { - if !a.showTagKeys { - return nil - } - - // Iterate over each key. - tw := tabwriter.NewWriter(a.w, 8, 8, 1, '\t', 0) - itr := fs.TagKeyIterator(name) - for e := itr.Next(); e != nil; e = itr.Next() { - if a.tagKeyFilter != nil && !a.tagKeyFilter.Match(e.Key()) { - continue - } - - fmt.Fprintf(tw, " %s\t%v\n", e.Key(), deletedString(e.Deleted())) - if err := tw.Flush(); err != nil { - return fmt.Errorf("failed to flush tabwriter: %w", err) - } - - if err := a.printTagValues(sfile, fs, name, e.Key()); err != nil { - return err - } - } - fmt.Fprint(a.w, "\n") - - return nil -} - -func (a *args) printTagValues(sfile *tsdb.SeriesFile, fs *tsi1.FileSet, name, key []byte) error { - if !a.showTagValues { - return nil - } - - // Iterate over each value. - tw := tabwriter.NewWriter(a.w, 8, 8, 1, '\t', 0) - itr := fs.TagValueIterator(name, key) - for e := itr.Next(); e != nil; e = itr.Next() { - if a.tagValueFilter != nil && !a.tagValueFilter.Match(e.Value()) { - continue - } - - fmt.Fprintf(tw, " %s\t%v\n", e.Value(), deletedString(e.Deleted())) - if err := tw.Flush(); err != nil { - return fmt.Errorf("failed to flush tabwriter: %w", err) - } - - if err := a.printTagValueSeries(sfile, fs, name, key, e.Value()); err != nil { - return err - } - } - fmt.Fprint(a.w, "\n") - - return nil -} - -func (a *args) printTagValueSeries(sfile *tsdb.SeriesFile, fs *tsi1.FileSet, name, key, value []byte) error { - if !a.showTagValueSeries { - return nil - } - - // Iterate over each series. - tw := tabwriter.NewWriter(a.w, 8, 8, 1, '\t', 0) - itr, err := fs.TagValueSeriesIDIterator(name, key, value) - if err != nil { - return fmt.Errorf("failed to get series ID iterator with name %q: %w", name, err) - } - for { - e, err := itr.Next() - if err != nil { - return fmt.Errorf("failed to print tag value series: %w", err) - } else if e.SeriesID == 0 { - break - } - - name, tags := tsdb.ParseSeriesKey(sfile.SeriesKey(e.SeriesID)) - - if !a.matchSeries(name, tags) { - continue - } - - fmt.Fprintf(tw, " %s%s\n", name, tags.HashKey()) - if err := tw.Flush(); err != nil { - return fmt.Errorf("failed to flush tabwriter: %w", err) - } - } - fmt.Fprint(a.w, "\n") - - return nil -} - -func (a *args) printFileSummaries(fs *tsi1.FileSet) error { - for _, f := range fs.Files() { - switch f := f.(type) { - case *tsi1.LogFile: - if err := a.printLogFileSummary(f); err != nil { - return err - } - case *tsi1.IndexFile: - if err := a.printIndexFileSummary(f); err != nil { - return err - } - default: - panic("unreachable") - } - fmt.Fprintln(a.w, "") - } - return nil -} - -func (a *args) printLogFileSummary(f *tsi1.LogFile) error { - fmt.Fprintf(a.w, "[LOG FILE] %s\n", filepath.Base(f.Path())) - tw := tabwriter.NewWriter(a.w, 8, 8, 1, '\t', 0) - fmt.Fprintf(tw, "Series:\t%d\n", f.SeriesN()) - fmt.Fprintf(tw, "Measurements:\t%d\n", f.MeasurementN()) - fmt.Fprintf(tw, "Tag Keys:\t%d\n", f.TagKeyN()) - fmt.Fprintf(tw, "Tag Values:\t%d\n", f.TagValueN()) - return tw.Flush() -} - -func (a *args) printIndexFileSummary(f *tsi1.IndexFile) error { - fmt.Fprintf(a.w, "[INDEX FILE] %s\n", filepath.Base(f.Path())) - - // Calculate summary stats. - var measurementN, measurementSeriesN, measurementSeriesSize uint64 - var keyN uint64 - var valueN, valueSeriesN, valueSeriesSize uint64 - - if mitr := f.MeasurementIterator(); mitr != nil { - for me, _ := mitr.Next().(*tsi1.MeasurementBlockElem); me != nil; me, _ = mitr.Next().(*tsi1.MeasurementBlockElem) { - kitr := f.TagKeyIterator(me.Name()) - for ke, _ := kitr.Next().(*tsi1.TagBlockKeyElem); ke != nil; ke, _ = kitr.Next().(*tsi1.TagBlockKeyElem) { - vitr := f.TagValueIterator(me.Name(), ke.Key()) - for ve, _ := vitr.Next().(*tsi1.TagBlockValueElem); ve != nil; ve, _ = vitr.Next().(*tsi1.TagBlockValueElem) { - valueN++ - valueSeriesN += ve.SeriesN() - valueSeriesSize += uint64(len(ve.SeriesData())) - } - keyN++ - } - measurementN++ - measurementSeriesN += me.SeriesN() - measurementSeriesSize += uint64(len(me.SeriesData())) - } - } - - // Write stats. - tw := tabwriter.NewWriter(a.w, 8, 8, 1, '\t', 0) - fmt.Fprintf(tw, "Measurements:\t%d\n", measurementN) - fmt.Fprintf(tw, " Series data size:\t%d (%s)\n", measurementSeriesSize, formatSize(measurementSeriesSize)) - fmt.Fprintf(tw, " Bytes per series:\t%.01fb\n", float64(measurementSeriesSize)/float64(measurementSeriesN)) - fmt.Fprintf(tw, "Tag Keys:\t%d\n", keyN) - fmt.Fprintf(tw, "Tag Values:\t%d\n", valueN) - fmt.Fprintf(tw, " Series:\t%d\n", valueSeriesN) - fmt.Fprintf(tw, " Series data size:\t%d (%s)\n", valueSeriesSize, formatSize(valueSeriesSize)) - fmt.Fprintf(tw, " Bytes per series:\t%.01fb\n", float64(valueSeriesSize)/float64(valueSeriesN)) - return tw.Flush() -} - -// matchSeries returns true if the command filters matches the series. -func (a *args) matchSeries(name []byte, tags models.Tags) bool { - // Filter by measurement. - if a.measurementFilter != nil && !a.measurementFilter.Match(name) { - return false - } - - // Filter by tag key/value. - if a.tagKeyFilter != nil || a.tagValueFilter != nil { - var matched bool - for _, tag := range tags { - if (a.tagKeyFilter == nil || a.tagKeyFilter.Match(tag.Key)) && (a.tagValueFilter == nil || a.tagValueFilter.Match(tag.Value)) { - matched = true - break - } - } - if !matched { - return false - } - } - - return true -} - -// deletedString returns "(deleted)" if v is true. -func deletedString(v bool) string { - if v { - return "(deleted)" - } - return "" -} - -func formatSize(v uint64) string { - denom := uint64(1) - var uom string - for _, uom = range []string{"b", "kb", "mb", "gb", "tb"} { - if denom*1024 > v { - break - } - denom *= 1024 - } - return fmt.Sprintf("%0.01f%s", float64(v)/float64(denom), uom) -} diff --git a/cmd/influxd/inspect/dump_tsi/dump_tsi_test.go b/cmd/influxd/inspect/dump_tsi/dump_tsi_test.go deleted file mode 100644 index 0f65976ac7e..00000000000 --- a/cmd/influxd/inspect/dump_tsi/dump_tsi_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package dump_tsi_test - -import ( - "bytes" - "os" - "path/filepath" - "testing" - - "github.com/influxdata/influxdb/v2/cmd/influxd/inspect/dump_tsi" - "github.com/influxdata/influxdb/v2/pkg/tar" - "github.com/stretchr/testify/require" -) - -func Test_DumpTSI_NoError(t *testing.T) { - - // Create the Command object - cmd := dump_tsi.NewDumpTSICommand() - b := bytes.NewBufferString("") - cmd.SetOut(b) - - // Create the temp-dir for our un-tared files to live in - dir := t.TempDir() - - // Untar the test data - file, err := os.Open("../tsi-test-data.tar.gz") - require.NoError(t, err) - require.NoError(t, tar.Untar(dir, file)) - require.NoError(t, file.Close()) - - // Run the test - cmd.SetArgs([]string{ - "--series-file", filepath.Join(dir, "test-db-low-cardinality", "_series"), - filepath.Join(dir, "test-db-low-cardinality", "autogen", "1", "index", "0", "L0-00000001.tsl"), - }) - require.NoError(t, cmd.Execute()) - - // Validate output is as-expected - out := b.String() - require.Contains(t, out, "[LOG FILE] L0-00000001.tsl") - require.Contains(t, out, "Series:\t\t1") - require.Contains(t, out, "Measurements:\t1") - require.Contains(t, out, "Tag Keys:\t6") - require.Contains(t, out, "Tag Values:\t6") -} diff --git a/cmd/influxd/inspect/dump_tsm/dump_tsm.go b/cmd/influxd/inspect/dump_tsm/dump_tsm.go deleted file mode 100644 index 55377d9c2bf..00000000000 --- a/cmd/influxd/inspect/dump_tsm/dump_tsm.go +++ /dev/null @@ -1,350 +0,0 @@ -package dump_tsm - -import ( - "encoding/binary" - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - "text/tabwriter" - "time" - - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/spf13/cobra" - "golang.org/x/text/cases" - "golang.org/x/text/language" -) - -type args struct { - dumpIndex bool - dumpBlocks bool - dumpAll bool - filterKey string - path string -} - -func NewDumpTSMCommand() *cobra.Command { - var arguments args - cmd := &cobra.Command{ - Use: "dump-tsm", - Short: "Dumps low-level details about tsm1 files", - Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - if arguments.path == "" { - cmd.PrintErrf("TSM File not specified\n") - return nil - } - arguments.dumpBlocks = arguments.dumpBlocks || arguments.dumpAll - arguments.dumpIndex = arguments.dumpIndex || arguments.dumpAll - return dumpTSM(cmd, arguments) - }, - } - - cmd.Flags().StringVar(&arguments.path, "file-path", "", - "Path to TSM file") - cmd.Flags().BoolVar(&arguments.dumpIndex, "index", false, - "Dump raw index data") - cmd.Flags().BoolVar(&arguments.dumpBlocks, "blocks", false, - "Dump raw block data") - cmd.Flags().BoolVar(&arguments.dumpAll, "all", false, - "Dump all data. Caution: This may print a lot of information") - cmd.Flags().StringVar(&arguments.filterKey, "filter-key", "", - "Only display data matching this key substring") - - return cmd -} - -func dumpTSM(cmd *cobra.Command, args args) error { - f, err := os.Open(args.path) - if err != nil { - return err - } - defer f.Close() - - // Get the file size - stat, err := f.Stat() - if err != nil { - return err - } - - if stat.IsDir() { - return fmt.Errorf("%s is a directory, must be a TSM file", args.path) - } - - if filepath.Ext(args.path) != "."+tsm1.TSMFileExtension { - return fmt.Errorf("%s is not a TSM file", args.path) - } - - r, err := tsm1.NewTSMReader(f) - if err != nil { - return fmt.Errorf("error opening TSM file: %w", err) - } - defer r.Close() - - minTime, maxTime := r.TimeRange() - keyCount := r.KeyCount() - - blockStats := &blockStats{} - - cmd.Println("Summary:") - cmd.Printf(" File: %s\n", args.path) - cmd.Printf(" Time Range: %s - %s\n", - time.Unix(0, minTime).UTC().Format(time.RFC3339Nano), - time.Unix(0, maxTime).UTC().Format(time.RFC3339Nano), - ) - cmd.Printf(" Duration: %s ", time.Unix(0, maxTime).Sub(time.Unix(0, minTime))) - cmd.Printf(" Series: %d ", keyCount) - cmd.Printf(" File Size: %d\n\n", stat.Size()) - - tw := tabwriter.NewWriter(cmd.OutOrStdout(), 8, 8, 1, '\t', 0) - - if args.dumpIndex { - dumpIndex(cmd, args, dumpIndexParams{ - tw: tw, - minTime: minTime, - maxTime: maxTime, - keyCount: keyCount, - r: r, - }) - } - - if args.dumpBlocks { - tw = tabwriter.NewWriter(cmd.OutOrStdout(), 8, 8, 1, '\t', 0) - fmt.Fprintln(tw, " "+strings.Join([]string{"Blk", "Chk", "Ofs", "Len", "Type", "Min Time", "Points", "Enc [T/V]", "Len [T/V]"}, "\t")) - } - - indexSize := r.IndexSize() - blockCount, pointCount, blockSize, err := dumpBlocks(cmd, dumpBlocksParams{ - tw: tw, - keyCount: keyCount, - filterKey: args.filterKey, - dumpBlocks: args.dumpBlocks, - blockStats: blockStats, - f: f, - r: r, - }) - if err != nil { - return fmt.Errorf("failed to decode block in tsm1 file: %w", err) - } - - // Flush the printer to display all block details - if args.dumpBlocks { - cmd.Println("Blocks:") - tw.Flush() - cmd.Println() - } - - // Always print summary statistics about the blocks - var blockSizeAvg int64 - if blockCount > 0 { - blockSizeAvg = blockSize / blockCount - } - cmd.Printf("Statistics\n") - cmd.Printf(" Blocks:\n") - cmd.Printf(" Total: %d Size: %d Min: %d Max: %d Avg: %d\n", - blockCount, blockSize, blockStats.min, blockStats.max, blockSizeAvg) - cmd.Printf(" Index:\n") - cmd.Printf(" Total: %d Size: %d\n", blockCount, indexSize) - cmd.Printf(" Points:\n") - cmd.Printf(" Total: %d\n", pointCount) - - cmd.Println(" Encoding:") - for i, counts := range blockStats.counts { - if len(counts) == 0 { - continue - } - cmd.Printf(" %s: ", cases.Title(language.Und).String(fieldType[i])) - for j, v := range counts { - cmd.Printf("\t%s: %d (%d%%) ", encDescs[i][j], v, int(float64(v)/float64(blockCount)*100)) - } - cmd.Println() - } - cmd.Printf(" Compression:\n") - cmd.Printf(" Per block: %0.2f bytes/point\n", float64(blockSize)/float64(pointCount)) - cmd.Printf(" Total: %0.2f bytes/point\n", float64(stat.Size())/float64(pointCount)) - - return nil -} - -type dumpIndexParams struct { - tw *tabwriter.Writer - minTime int64 - maxTime int64 - keyCount int - r *tsm1.TSMReader -} - -func dumpIndex(cmd *cobra.Command, args args, info dumpIndexParams) { - cmd.Println("Index:") - info.tw.Flush() - cmd.Println() - - fmt.Fprintln(info.tw, " "+strings.Join([]string{"Pos", "Min Time", "Max Time", "Ofs", "Size", "Key", "Field"}, "\t")) - var pos int - for i := 0; i < info.keyCount; i++ { - key, _ := info.r.KeyAt(i) - for _, e := range info.r.Entries(key) { - pos++ - measurement, field, _ := strings.Cut(string(key), "#!~#") - - if args.filterKey != "" && !strings.Contains(string(key), args.filterKey) { - continue - } - fmt.Fprintln(info.tw, " "+strings.Join([]string{ - strconv.FormatInt(int64(pos), 10), - time.Unix(0, e.MinTime).UTC().Format(time.RFC3339Nano), - time.Unix(0, e.MaxTime).UTC().Format(time.RFC3339Nano), - strconv.FormatInt(e.Offset, 10), - strconv.FormatInt(int64(e.Size), 10), - measurement, - field, - }, "\t")) - info.tw.Flush() - } - } -} - -type dumpBlocksParams struct { - tw *tabwriter.Writer - keyCount int - filterKey string - dumpBlocks bool - blockStats *blockStats - f *os.File - r *tsm1.TSMReader -} - -func dumpBlocks(cmd *cobra.Command, params dumpBlocksParams) (int64, int64, int64, error) { - // Starting at 5 because the magic number is 4 bytes + 1 byte version - i := int64(5) - b := make([]byte, 8) - var blockCount, pointCount, blockSize int64 - - // Start at the beginning and read every block - for j := 0; j < params.keyCount; j++ { - key, _ := params.r.KeyAt(j) - for _, e := range params.r.Entries(key) { - - params.f.Seek(e.Offset, 0) - params.f.Read(b[:4]) - - chksum := binary.BigEndian.Uint32(b[:4]) - - buf := make([]byte, e.Size-4) - params.f.Read(buf) - - blockSize += int64(e.Size) - - if params.filterKey != "" && !strings.Contains(string(key), params.filterKey) { - i += blockSize - blockCount++ - continue - } - - blockType := buf[0] - - encoded := buf[1:] - - var v []tsm1.Value - v, err := tsm1.DecodeBlock(buf, v) - if err != nil { - return 0, 0, 0, err - } - startTime := time.Unix(0, v[0].UnixNano()) - - pointCount += int64(len(v)) - - // Length of the timestamp block - tsLen, j := binary.Uvarint(encoded) - - // Unpack the timestamp bytes - ts := encoded[j : j+int(tsLen)] - - // Unpack the value bytes - values := encoded[j+int(tsLen):] - - tsEncoding := timeEnc[int(ts[0]>>4)] - vEncoding := encDescs[int(blockType+1)][values[0]>>4] - - typeDesc := blockTypes[blockType] - - params.blockStats.inc(0, ts[0]>>4) - params.blockStats.inc(int(blockType+1), values[0]>>4) - params.blockStats.size(len(buf)) - - // Add a row of block details to the printer. Doesn't actually print yet. - if params.dumpBlocks { - fmt.Fprintln(params.tw, " "+strings.Join([]string{ - strconv.FormatInt(blockCount, 10), - strconv.FormatUint(uint64(chksum), 10), - strconv.FormatInt(i, 10), - strconv.FormatInt(int64(len(buf)), 10), - typeDesc, - startTime.UTC().Format(time.RFC3339Nano), - strconv.FormatInt(int64(len(v)), 10), - fmt.Sprintf("%s/%s", tsEncoding, vEncoding), - fmt.Sprintf("%d/%d", len(ts), len(values)), - }, "\t")) - } - - i += blockSize - blockCount++ - } - } - return blockCount, pointCount, blockSize, nil -} - -var ( - fieldType = []string{ - "timestamp", "float", "int", "bool", "string", "unsigned", - } - blockTypes = []string{ - "float64", "int64", "bool", "string", "unsigned", - } - timeEnc = []string{ - "none", "s8b", "rle", - } - floatEnc = []string{ - "none", "gor", - } - intEnc = []string{ - "none", "s8b", "rle", - } - boolEnc = []string{ - "none", "bp", - } - stringEnc = []string{ - "none", "snpy", - } - unsignedEnc = []string{ - "none", "s8b", "rle", - } - encDescs = [][]string{ - timeEnc, floatEnc, intEnc, boolEnc, stringEnc, unsignedEnc, - } -) - -type blockStats struct { - min, max int - counts [][]int -} - -func (b *blockStats) inc(typ int, enc byte) { - for len(b.counts) <= typ { - b.counts = append(b.counts, []int{}) - } - for len(b.counts[typ]) <= int(enc) { - b.counts[typ] = append(b.counts[typ], 0) - } - b.counts[typ][enc]++ -} - -func (b *blockStats) size(sz int) { - if b.min == 0 || sz < b.min { - b.min = sz - } - if b.min == 0 || sz > b.max { - b.max = sz - } -} diff --git a/cmd/influxd/inspect/dump_tsm/dump_tsm_test.go b/cmd/influxd/inspect/dump_tsm/dump_tsm_test.go deleted file mode 100644 index 7a541ab347f..00000000000 --- a/cmd/influxd/inspect/dump_tsm/dump_tsm_test.go +++ /dev/null @@ -1,210 +0,0 @@ -package dump_tsm - -import ( - "bytes" - "encoding/binary" - "io" - "os" - "testing" - - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/stretchr/testify/require" -) - -var argsKeys = []string{"index", "blocks", "all", "filter-key"} - -func Test_DumpTSM_NoFile(t *testing.T) { - runCommand(t, cmdParams{ - file: "", - expectOut: "TSM File not specified", - }) -} - -func Test_DumpTSM_EmptyFile(t *testing.T) { - _, file := makeTSMFile(t, tsmParams{}) - - runCommand(t, cmdParams{ - file: file, - expectErr: true, - expectOut: "error opening TSM file", - }) -} - -func Test_DumpTSM_WrongExt(t *testing.T) { - _, file := makeTSMFile(t, tsmParams{ - wrongExt: true, - }) - - runCommand(t, cmdParams{ - file: file, - expectErr: true, - expectOut: "is not a TSM file", - }) -} - -func Test_DumpTSM_NotFile(t *testing.T) { - dir, _ := makeTSMFile(t, tsmParams{}) - - runCommand(t, cmdParams{ - file: dir, - expectErr: true, - expectOut: "is a directory", - }) -} - -func Test_DumpTSM_Valid(t *testing.T) { - _, file := makeTSMFile(t, tsmParams{ - keys: []string{"cpu"}, - }) - - runCommand(t, cmdParams{ - file: file, - expectOuts: makeExpectOut( - []string{"Summary:", "Index:", "Total: 1", "Size: 34"}, - []string{"Summary:", "Blocks:", "float64", "9/19", "s8b/gor"}, - []string{"Summary:", "Index:", "Blocks:", "Points:", "Encoding:", "Compression:"}, - ), - }) -} - -func Test_DumpTSM_Invalid(t *testing.T) { - _, file := makeTSMFile(t, tsmParams{ - invalid: true, - keys: []string{"cpu"}, - }) - - runCommand(t, cmdParams{ - file: file, - expectErr: true, - expectOut: "error opening TSM file", - }) -} - -func Test_DumpTSM_ManyKeys(t *testing.T) { - _, file := makeTSMFile(t, tsmParams{ - keys: []string{"cpu", "foobar", "mem"}, - }) - - runCommand(t, cmdParams{ - file: file, - expectOuts: makeExpectOut( - []string{"Total: 3", "Size: 102"}, - []string{"Blocks:", "float64", "s8b/gor", "9/19"}, - // TODO https://github.com/influxdata/influxdb/issues/22145 - //[]string{"cpu", "foobar", "mem", "float64"}, - ), - }) -} - -func Test_DumpTSM_FilterKey(t *testing.T) { - _, file := makeTSMFile(t, tsmParams{ - keys: []string{"cpu", "foobar", "mem"}, - }) - - runCommand(t, cmdParams{ - file: file, - filter: "cpu", - expectOuts: makeExpectOut( - []string{"Total: 3", "Size: 102"}, - []string{"Blocks:", "float64", "s8b/gor", "9/19"}, - // TODO https://github.com/influxdata/influxdb/issues/22145 - //[]string{"cpu", "foobar", "mem", "float64"}, - //[]string{"Points:\n Total: 1", "s8b: 1 (33%)", "gor: 1 (33%)"}, - ), - }) -} - -func makeExpectOut(outs ...[]string) (m map[string][]string) { - m = make(map[string][]string) - for i, value := range outs { - m[argsKeys[i]] = value - } - return -} - -type cmdParams struct { - file string - expectErr bool - expectOuts map[string][]string - expectOut string - filter string -} - -func runCommand(t *testing.T, params cmdParams) { - cmd := NewDumpTSMCommand() - - b := bytes.NewBufferString("") - cmd.SetOut(b) - cmd.SetErr(b) - - m := makeArgs(params.file, params.filter) - - for argsKey, args := range m { - cmd.SetArgs(args) - if params.expectErr { - require.Error(t, cmd.Execute()) - } else { - require.NoError(t, cmd.Execute()) - } - - out, err := io.ReadAll(b) - require.NoError(t, err) - - if params.expectOut != "" { - require.Contains(t, string(out), params.expectOut) - } else { - for _, value := range params.expectOuts[argsKey] { - require.Contains(t, string(out), value) - } - } - } -} - -func makeArgs(path string, meas string) (args map[string][]string) { - args = make(map[string][]string) - args[argsKeys[0]] = []string{"--file-path", path, "--index"} - args[argsKeys[1]] = []string{"--file-path", path, "--blocks"} - args[argsKeys[2]] = []string{"--file-path", path, "--all"} - if meas != "" { - args[argsKeys[3]] = []string{"--file-path", path, "--filter-key", meas, "--all"} - } - return -} - -type tsmParams struct { - wrongExt bool - invalid bool - keys []string -} - -func makeTSMFile(t *testing.T, params tsmParams) (string, string) { - t.Helper() - - dir := t.TempDir() - - ext := tsm1.TSMFileExtension - if params.wrongExt { - ext = "txt" - } - file, err := os.CreateTemp(dir, "*."+ext) - require.NoError(t, err) - - w, err := tsm1.NewTSMWriter(file) - require.NoError(t, err) - - for _, key := range params.keys { - values := []tsm1.Value{tsm1.NewValue(0, 1.0)} - require.NoError(t, w.Write([]byte(key), values)) - require.NoError(t, w.Flush()) - } - if len(params.keys) != 0 { - require.NoError(t, w.WriteIndex()) - } - - if params.invalid { - require.NoError(t, binary.Write(file, binary.BigEndian, []byte("foobar\n"))) - } - require.NoError(t, w.Close()) - - return dir, file.Name() -} diff --git a/cmd/influxd/inspect/dump_wal/dump_wal.go b/cmd/influxd/inspect/dump_wal/dump_wal.go deleted file mode 100644 index cc2bce02133..00000000000 --- a/cmd/influxd/inspect/dump_wal/dump_wal.go +++ /dev/null @@ -1,164 +0,0 @@ -package dump_wal - -import ( - "fmt" - "os" - "path/filepath" - "sort" - - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/spf13/cobra" -) - -type dumpWALCommand struct { - findDuplicates bool -} - -func NewDumpWALCommand() *cobra.Command { - var dumpWAL dumpWALCommand - cmd := &cobra.Command{ - Use: "dump-wal", - Short: "Dumps TSM data from WAL files", - Long: ` -This tool dumps data from WAL files for debugging purposes. Given at least one WAL file path as an argument, the tool will parse and print out the entries in each file. -It has two modes of operation, depending on the --find-duplicates flag. ---find-duplicates=false (default): for each file, the following is printed: - * The file name - * for each entry, - * The type of the entry (either [write] or [delete-bucket-range]); - * The formatted entry contents ---find-duplicates=true: for each file, the following is printed: - * The file name - * A list of keys in the file that have duplicate or out of order timestamps -`, - Args: cobra.MinimumNArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return dumpWAL.run(cmd, args) - }, - } - - cmd.Flags().BoolVarP(&dumpWAL.findDuplicates, "find-duplicates", "", false, - "ignore dumping entries; only report keys in the WAL files that are duplicates or out of order (default false)") - - return cmd -} - -func (dumpWAL *dumpWALCommand) run(cmd *cobra.Command, args []string) error { - - // Process each WAL file. - for _, path := range args { - if err := dumpWAL.processWALFile(cmd, path); err != nil { - return err - } - } - return nil -} - -func (dumpWAL *dumpWALCommand) processWALFile(cmd *cobra.Command, path string) error { - if filepath.Ext(path) != "."+tsm1.WALFileExtension { - cmd.Printf("invalid wal file path, skipping %s\n", path) - return nil - } - - // Track the earliest timestamp for each key and a set of keys with out-of-order points. - minTimestampByKey := make(map[string]int64) - duplicateKeys := make(map[string]struct{}) - - // Open WAL reader. - f, err := os.Open(path) - if err != nil { - return err - } - defer f.Close() - r := tsm1.NewWALSegmentReader(f) - defer r.Close() - - // Iterate over the WAL entries. - for r.Next() { - entry, err := r.Read() - if err != nil { - return fmt.Errorf("failed to read entry from %q: %w", path, err) - } - - switch entry := entry.(type) { - case *tsm1.WriteWALEntry: - if !dumpWAL.findDuplicates { - cmd.Printf("[write] sz=%d\n", entry.MarshalSize()) - } - - keys := make([]string, 0, len(entry.Values)) - for k := range entry.Values { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - for _, v := range entry.Values[k] { - t := v.UnixNano() - - if dumpWAL.findDuplicates { - // Check for duplicate/out of order keys. - if min, ok := minTimestampByKey[k]; ok && t <= min { - duplicateKeys[k] = struct{}{} - } - minTimestampByKey[k] = t - - // Skip printing if we are only showing duplicate keys. - continue - } - - switch v := v.(type) { - case tsm1.IntegerValue: - cmd.Printf("%s %vi %d\n", k, v.Value(), t) - case tsm1.UnsignedValue: - cmd.Printf("%s %vu %d\n", k, v.Value(), t) - case tsm1.FloatValue: - cmd.Printf("%s %v %d\n", k, v.Value(), t) - case tsm1.BooleanValue: - cmd.Printf("%s %v %d\n", k, v.Value(), t) - case tsm1.StringValue: - cmd.Printf("%s %q %d\n", k, v.Value(), t) - default: - cmd.Printf("%s EMPTY\n", k) - } - } - } - - case *tsm1.DeleteWALEntry: - cmd.Printf("[delete] sz=%d\n", entry.MarshalSize()) - for _, k := range entry.Keys { - cmd.Printf("%s\n", string(k)) - } - - case *tsm1.DeleteRangeWALEntry: - cmd.Printf("[delete-range] min=%d max=%d sz=%d\n", entry.Min, entry.Max, entry.MarshalSize()) - for _, k := range entry.Keys { - cmd.Printf("%s\n", string(k)) - } - - default: - return fmt.Errorf("invalid wal entry: %#v", entry) - } - } - - // Print keys with duplicate or out-of-order points, if requested. - if dumpWAL.findDuplicates { - keys := make([]string, 0, len(duplicateKeys)) - - if len(duplicateKeys) == 0 { - cmd.Println("No duplicates or out of order timestamps found") - return nil - } - - for k := range duplicateKeys { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - cmd.Println(k) - } - } - - return nil -} diff --git a/cmd/influxd/inspect/dump_wal/dump_wal_test.go b/cmd/influxd/inspect/dump_wal/dump_wal_test.go deleted file mode 100644 index a0c471be779..00000000000 --- a/cmd/influxd/inspect/dump_wal/dump_wal_test.go +++ /dev/null @@ -1,221 +0,0 @@ -package dump_wal - -import ( - "bytes" - "fmt" - "io" - "os" - "testing" - - "github.com/golang/snappy" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/spf13/cobra" - "github.com/stretchr/testify/require" -) - -func Test_DumpWal_No_Args(t *testing.T) { - params := cmdParams{ - walPaths: []string{}, - expectErr: true, - expectedOut: "requires at least 1 arg(s), only received 0", - } - - runCommand(t, params) -} - -func Test_DumpWal_Bad_Path(t *testing.T) { - params := cmdParams{ - findDuplicates: false, - walPaths: []string{"badpath.wal"}, - expectErr: true, - expectedOut: "open badpath.wal", - } - - runCommand(t, params) -} - -func Test_DumpWal_Wrong_File_Type(t *testing.T) { - // Creates a temporary .txt file (wrong extension) - file := newTempWal(t, false, false) - - params := cmdParams{ - walPaths: []string{file}, - expectedOut: fmt.Sprintf("invalid wal file path, skipping %s", file), - expectErr: false, - } - runCommand(t, params) -} - -func Test_DumpWal_File_Valid(t *testing.T) { - file := newTempWal(t, true, false) - - params := cmdParams{ - walPaths: []string{file}, - expectedOuts: []string{ - "[write]", - "cpu,host=A#!~#float 1.1 1", - "cpu,host=A#!~#int 1i 1", - "cpu,host=A#!~#bool true 1", - "cpu,host=A#!~#string \"string\" 1", - "cpu,host=A#!~#unsigned 10u 5", - }, - } - - runCommand(t, params) -} - -func Test_DumpWal_Find_Duplicates_None(t *testing.T) { - file := newTempWal(t, true, false) - - params := cmdParams{ - findDuplicates: true, - walPaths: []string{file}, - expectedOut: "No duplicates or out of order timestamps found", - } - - runCommand(t, params) -} - -func Test_DumpWal_Find_Duplicates_Present(t *testing.T) { - file := newTempWal(t, true, true) - - params := cmdParams{ - findDuplicates: true, - walPaths: []string{file}, - expectedOut: "cpu,host=A#!~#unsigned", - } - - runCommand(t, params) -} - -func newTempWal(t *testing.T, validExt bool, withDuplicate bool) string { - t.Helper() - - dir := t.TempDir() - - if !validExt { - file, err := os.CreateTemp(dir, "dumpwaltest*.txt") - require.NoError(t, err) - t.Cleanup(func() { - file.Close() - }) - return file.Name() - } - - file, err := os.CreateTemp(dir, "dumpwaltest*"+"."+tsm1.WALFileExtension) - require.NoError(t, err) - t.Cleanup(func() { - file.Close() - }) - - p1 := tsm1.NewValue(10, 1.1) - p2 := tsm1.NewValue(1, int64(1)) - p3 := tsm1.NewValue(1, true) - p4 := tsm1.NewValue(1, "string") - p5 := tsm1.NewValue(5, uint64(10)) - - values := map[string][]tsm1.Value{ - "cpu,host=A#!~#float": {p1}, - "cpu,host=A#!~#int": {p2}, - "cpu,host=A#!~#bool": {p3}, - "cpu,host=A#!~#string": {p4}, - "cpu,host=A#!~#unsigned": {p5}, - } - - if withDuplicate { - p6 := tsm1.NewValue(1, uint64(70)) - values = map[string][]tsm1.Value{ - "cpu,host=A#!~#unsigned": {p5, p6}, - } - } - - // Write to WAL File - writeWalFile(t, file, values) - - return file.Name() -} - -func writeWalFile(t *testing.T, file *os.File, vals map[string][]tsm1.Value) { - t.Helper() - - e := &tsm1.WriteWALEntry{Values: vals} - b, err := e.Encode(nil) - require.NoError(t, err) - - w := tsm1.NewWALSegmentWriter(file) - err = w.Write(e.Type(), snappy.Encode(nil, b)) - require.NoError(t, err) - - err = w.Flush() - require.NoError(t, err) - - err = file.Sync() - require.NoError(t, err) -} - -type cmdParams struct { - findDuplicates bool - walPaths []string - expectedOut string - expectedOuts []string - expectErr bool - expectExactEqual bool -} - -func initCommand(t *testing.T, params cmdParams) *cobra.Command { - t.Helper() - - // Creates new command and sets args - cmd := NewDumpWALCommand() - - allArgs := params.walPaths - if params.findDuplicates { - allArgs = append(allArgs, "--find-duplicates") - } - - cmd.SetArgs(allArgs) - - return cmd -} - -func getOutput(t *testing.T, cmd *cobra.Command) []byte { - t.Helper() - - b := bytes.NewBufferString("") - cmd.SetOut(b) - cmd.SetErr(b) - require.NoError(t, cmd.Execute()) - - out, err := io.ReadAll(b) - require.NoError(t, err) - - return out -} - -func runCommand(t *testing.T, params cmdParams) { - t.Helper() - - cmd := initCommand(t, params) - - if params.expectErr { - require.Contains(t, cmd.Execute().Error(), params.expectedOut) - return - } - - // Get output - out := getOutput(t, cmd) - - // Check output - if params.expectExactEqual { - require.Equal(t, string(out), params.expectedOut) - return - } - - if params.expectedOut != "" { - require.Contains(t, string(out), params.expectedOut) - } else { - for _, output := range params.expectedOuts { - require.Contains(t, string(out), output) - } - } -} diff --git a/cmd/influxd/inspect/export_index/export_index.go b/cmd/influxd/inspect/export_index/export_index.go deleted file mode 100644 index 3910c289803..00000000000 --- a/cmd/influxd/inspect/export_index/export_index.go +++ /dev/null @@ -1,57 +0,0 @@ -package export_index - -import ( - "bufio" - "os" - - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" - "github.com/spf13/cobra" -) - -func NewExportIndexCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: `export-index`, - Short: "Exports TSI index data", - Long: ` -This command will export all series in a TSI index to -SQL format for easier inspection and debugging.`, - Args: cobra.NoArgs, - } - - var seriesFilePath, dataPath string - cmd.Flags().StringVar(&seriesFilePath, "series-path", "", "Path to series file") - cmd.Flags().StringVar(&dataPath, "index-path", "", "Path to the index directory of the data engine") - _ = cmd.MarkFlagRequired("series-path") - _ = cmd.MarkFlagRequired("index-path") - - cmd.RunE = func(cmd *cobra.Command, args []string) error { - // Initialize series file. - sfile := tsdb.NewSeriesFile(seriesFilePath) - if err := sfile.Open(); err != nil { - return err - } - defer sfile.Close() - - // Open index. - idx := tsi1.NewIndex(sfile, "", tsi1.WithPath(dataPath), tsi1.DisableCompactions()) - if err := idx.Open(); err != nil { - return err - } - defer idx.Close() - - // Dump out index data. - w := bufio.NewWriter(os.Stdout) - e := tsi1.NewSQLIndexExporter(w) - if err := e.ExportIndex(idx); err != nil { - return err - } else if err := e.Close(); err != nil { - return err - } else if err := w.Flush(); err != nil { - return err - } - return nil - } - - return cmd -} diff --git a/cmd/influxd/inspect/export_lp/export_lp.go b/cmd/influxd/inspect/export_lp/export_lp.go deleted file mode 100644 index 3a5549208e6..00000000000 --- a/cmd/influxd/inspect/export_lp/export_lp.go +++ /dev/null @@ -1,431 +0,0 @@ -package export_lp - -import ( - "bufio" - "compress/gzip" - "fmt" - "io" - "math" - "os" - "path/filepath" - "sort" - "strconv" - "sync" - "time" - - "github.com/influxdata/influxdb/v2/kit/cli" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/escape" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/spf13/cobra" - "github.com/spf13/viper" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -// exportFlags contains CLI-compatible forms of export options. -type exportFlags struct { - enginePath string - bucketID platform.ID - measurements []string - startTime string - endTime string - - outputPath string - compress bool - - logLevel zapcore.Level -} - -// exportFilters contains storage-optimized forms of parameters used to restrict exports. -type exportFilters struct { - measurements map[string]struct{} - start int64 - end int64 -} - -func newFilters() *exportFilters { - return &exportFilters{ - measurements: make(map[string]struct{}), - start: math.MinInt64, - end: math.MaxInt64, - } -} - -// filters converts CLI-specified filters into storage-optimized forms. -func (f *exportFlags) filters() (*exportFilters, error) { - filters := newFilters() - - if f.startTime != "" { - s, err := time.Parse(time.RFC3339, f.startTime) - if err != nil { - return nil, err - } - filters.start = s.UnixNano() - } - - if f.endTime != "" { - e, err := time.Parse(time.RFC3339, f.endTime) - if err != nil { - return nil, err - } - filters.end = e.UnixNano() - } - - for _, m := range f.measurements { - filters.measurements[m] = struct{}{} - } - - return filters, nil -} - -func newFlags() *exportFlags { - return &exportFlags{ - logLevel: zapcore.InfoLevel, - compress: false, - } -} - -// NewExportLineProtocolCommand builds and registers the `export` subcommand of `influxd inspect`. -func NewExportLineProtocolCommand(v *viper.Viper) (*cobra.Command, error) { - flags := newFlags() - - cmd := &cobra.Command{ - Use: `export-lp`, - Short: "Export TSM data as line protocol", - Long: ` -This command will export all TSM data stored in a bucket -to line protocol for inspection and re-ingestion.`, - Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, _ []string) error { - return exportRunE(cmd, flags) - }, - } - - opts := []cli.Opt{ - { - DestP: &flags.enginePath, - Flag: "engine-path", - Desc: "path to persistent engine files", - Required: true, - }, - { - DestP: &flags.bucketID, - Flag: "bucket-id", - Desc: "ID of bucket containing data to export", - Required: true, - }, - { - DestP: &flags.measurements, - Flag: "measurement", - Desc: "optional: name(s) of specific measurement to export", - }, - { - DestP: &flags.startTime, - Flag: "start", - Desc: "optional: the start time to export (RFC3339 format)", - }, - { - DestP: &flags.endTime, - Flag: "end", - Desc: "optional: the end time to export (RFC3339 format)", - }, - { - DestP: &flags.outputPath, - Flag: "output-path", - Desc: "path where exported line-protocol should be written. Use '-' to write to standard out", - Required: true, - }, - { - DestP: &flags.compress, - Flag: "compress", - Desc: "if true, compress output with GZIP", - }, - { - DestP: &flags.logLevel, - Flag: "log-level", - Default: flags.logLevel, - }, - } - - if err := cli.BindOptions(v, cmd, opts); err != nil { - return nil, err - } - return cmd, nil -} - -func exportRunE(cmd *cobra.Command, flags *exportFlags) error { - logconf := zap.NewProductionConfig() - logconf.Level = zap.NewAtomicLevelAt(flags.logLevel) - logger, err := logconf.Build() - if err != nil { - return err - } - - filters, err := flags.filters() - if err != nil { - return err - } - - var w io.Writer - if flags.outputPath == "-" { - w = cmd.OutOrStdout() - } else { - f, err := os.Create(flags.outputPath) - if err != nil { - return err - } - defer f.Close() - w = f - } - - // Because calling (*os.File).Write is relatively expensive, - // and we don't *need* to sync to disk on every written line of export, - // use a sized buffered writer so that we only sync the file every megabyte. - bw := bufio.NewWriterSize(w, 1024*1024) - defer bw.Flush() - w = bw - - if flags.compress { - gzw := gzip.NewWriter(w) - defer gzw.Close() - w = gzw - } - - if err := exportTSMs(flags.enginePath, flags.bucketID, filters, w, logger); err != nil { - return err - } - - if err := exportWALs(flags.enginePath, flags.bucketID, filters, w, logger); err != nil { - return err - } - - logger.Info("export complete") - return nil -} - -// exportTSMs finds, reads, and exports all data stored in TSM files for a bucket that matches a set of filters. -func exportTSMs(engineDir string, bucketID platform.ID, filters *exportFilters, out io.Writer, log *zap.Logger) error { - // TSM is stored under `/data////*.tsm` - tsmDir := filepath.Join(engineDir, "data", bucketID.String()) - tsmPattern := filepath.Join(tsmDir, "*", "*", fmt.Sprintf("*.%s", tsm1.TSMFileExtension)) - log.Debug("searching for TSM files", zap.String("file_pattern", tsmPattern)) - tsmFiles, err := filepath.Glob(tsmPattern) - if err != nil { - return err - } - - log.Info("exporting TSM files", zap.String("tsm_dir", tsmDir), zap.Int("file_count", len(tsmFiles))) - - // Ensure we export in the same order that the TSM file store would process the files. - // See FileStore.Open() in tsm1/file_store.go - sort.Strings(tsmFiles) - - for _, f := range tsmFiles { - if err := exportTSM(f, filters, out, log); err != nil { - return err - } - } - - return nil -} - -func exportTSM(tsmFile string, filters *exportFilters, out io.Writer, log *zap.Logger) error { - log.Debug("exporting TSM file", zap.String("file_path", tsmFile)) - f, err := os.Open(tsmFile) - if err != nil { - // TSM files can disappear if we're exporting from the engine dir of a live DB, - // and compactions run between our path-lookup and export steps. - if os.IsNotExist(err) { - log.Warn("skipping missing TSM file", zap.String("file_path", tsmFile)) - return nil - } - return err - } - defer f.Close() - - reader, err := tsm1.NewTSMReader(f) - if err != nil { - return err - } - defer reader.Close() - - if !reader.OverlapsTimeRange(filters.start, filters.end) { - return nil - } - filterMeasurement := len(filters.measurements) > 0 - - for i := 0; i < reader.KeyCount(); i++ { - key, _ := reader.KeyAt(i) - values, err := reader.ReadAll(key) - if err != nil { - log.Error( - "unable to read key, skipping point", - zap.ByteString("key", key), - zap.String("tsm_file", tsmFile), - zap.Error(err), - ) - continue - } - key, field := tsm1.SeriesAndFieldFromCompositeKey(key) - if filterMeasurement { - measurement, _ := models.ParseKey(key) - if _, ok := filters.measurements[measurement]; !ok { - continue - } - } - field = escape.Bytes(field) - - if err := writeValues(key, field, values, filters, out, log); err != nil { - return err - } - } - - return nil -} - -// exportTSMs finds, reads, and exports all data stored in WAL files for a bucket that matches a set of filters. -// -// N.B. exported lines can include some duplicates from a matching call to exportTSMs on the same engine/bucket. -// This is OK since writes are idempotent. -func exportWALs(engineDir string, bucketID platform.ID, filters *exportFilters, out io.Writer, log *zap.Logger) error { - // WAL is stored under `/wal////*.wal` - walDir := filepath.Join(engineDir, "wal", bucketID.String()) - walPattern := filepath.Join(walDir, "*", "*", fmt.Sprintf("*.%s", tsm1.WALFileExtension)) - log.Debug("searching for WAL files", zap.String("file_pattern", walPattern)) - walFiles, err := filepath.Glob(walPattern) - if err != nil { - return err - } - - // N.B. WAL files might contain tombstone markers that haven't been sync'd down into TSM yet. - // We can't really deal with them when working at this low level, so we warn the user if we encounter one. - var tombstoneWarnOnce sync.Once - warnTombstone := func() { - tombstoneWarnOnce.Do(func() { - log.Warn("detected deletes in WAL file, some deleted data may be brought back by replaying this export") - }) - } - - // Ensure we export in the same order that the TSM WAL would process the files. - // See segmentFileNames in tsm1/wal.go - sort.Strings(walFiles) - - log.Info("exporting WAL files", zap.String("wal_dir", walDir), zap.Int("file_count", len(walFiles))) - for _, f := range walFiles { - if err := exportWAL(f, filters, out, log, warnTombstone); err != nil { - return err - } - } - - return nil -} - -func exportWAL(walFile string, filters *exportFilters, out io.Writer, log *zap.Logger, onDelete func()) error { - log.Debug("exporting WAL file", zap.String("file_path", walFile)) - f, err := os.Open(walFile) - if err != nil { - // WAL files can disappear if we're exporting from the engine dir of a live DB, - // and a snapshot is written between our path-lookup and export steps. - if os.IsNotExist(err) { - log.Warn("skipping missing WAL file", zap.String("file_path", walFile)) - return nil - } - } - defer f.Close() - - reader := tsm1.NewWALSegmentReader(f) - defer reader.Close() - - filterMeasurement := len(filters.measurements) > 0 - - for reader.Next() { - entry, err := reader.Read() - if err != nil { - n := reader.Count() - log.Error( - "stopping at corrupt position in WAL file", - zap.String("file_path", walFile), - zap.Int64("position", n), - ) - break - } - - switch t := entry.(type) { - case *tsm1.DeleteWALEntry, *tsm1.DeleteRangeWALEntry: - onDelete() - continue - case *tsm1.WriteWALEntry: - for key, values := range t.Values { - key, field := tsm1.SeriesAndFieldFromCompositeKey([]byte(key)) - if filterMeasurement { - measurement, _ := models.ParseKey(key) - if _, ok := filters.measurements[measurement]; !ok { - continue - } - } - field = escape.Bytes(field) - if err := writeValues(key, field, values, filters, out, log); err != nil { - return err - } - } - } - } - - return nil -} - -func writeValues(key []byte, field []byte, values []tsm1.Value, filters *exportFilters, out io.Writer, log *zap.Logger) error { - buf := []byte(fmt.Sprintf("%s %s=", key, field)) - prefixLen := len(buf) - - for _, value := range values { - ts := value.UnixNano() - if ts < filters.start || ts > filters.end { - continue - } - - // Re-slice buf to be " =". - buf = buf[:prefixLen] - - // Append the correct representation of the value. - switch v := value.Value().(type) { - case float64: - buf = strconv.AppendFloat(buf, v, 'g', -1, 64) - case int64: - buf = strconv.AppendInt(buf, v, 10) - buf = append(buf, 'i') - case uint64: - buf = strconv.AppendUint(buf, v, 10) - buf = append(buf, 'u') - case bool: - buf = strconv.AppendBool(buf, v) - case string: - buf = append(buf, '"') - buf = append(buf, models.EscapeStringField(v)...) - buf = append(buf, '"') - default: - // This shouldn't be possible. - log.Error( - "ignoring value with unsupported type", - zap.ByteString("key", key), - zap.ByteString("field", field), - zap.String("value", value.String()), - ) - continue - } - - // Now buf has " =". - // Append the timestamp and a newline, then write it. - buf = append(buf, ' ') - buf = strconv.AppendInt(buf, ts, 10) - buf = append(buf, '\n') - if _, err := out.Write(buf); err != nil { - // Underlying IO error needs to be returned. - return err - } - } - - return nil -} diff --git a/cmd/influxd/inspect/export_lp/export_lp_test.go b/cmd/influxd/inspect/export_lp/export_lp_test.go deleted file mode 100644 index 726be700e7f..00000000000 --- a/cmd/influxd/inspect/export_lp/export_lp_test.go +++ /dev/null @@ -1,437 +0,0 @@ -package export_lp - -import ( - "bytes" - "fmt" - "math/rand" - "os" - "sort" - "strconv" - "strings" - "testing" - - "github.com/golang/snappy" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" -) - -type corpus map[string][]tsm1.Value - -var ( - floatCorpus = corpus{ - tsm1.SeriesFieldKey("floats,k=f", "f"): []tsm1.Value{ - tsm1.NewValue(1, float64(1.5)), - tsm1.NewValue(2, float64(3)), - }, - } - - floatLines = []string{ - "floats,k=f f=1.5 1", - "floats,k=f f=3 2", - } - - intCorpus = corpus{ - tsm1.SeriesFieldKey("ints,k=i", "i"): []tsm1.Value{ - tsm1.NewValue(10, int64(15)), - tsm1.NewValue(20, int64(30)), - }, - } - - intLines = []string{ - "ints,k=i i=15i 10", - "ints,k=i i=30i 20", - } - - boolCorpus = corpus{ - tsm1.SeriesFieldKey("bools,k=b", "b"): []tsm1.Value{ - tsm1.NewValue(100, true), - tsm1.NewValue(200, false), - }, - } - - boolLines = []string{ - "bools,k=b b=true 100", - "bools,k=b b=false 200", - } - - stringCorpus = corpus{ - tsm1.SeriesFieldKey("strings,k=s", "s"): []tsm1.Value{ - tsm1.NewValue(1000, "1k"), - tsm1.NewValue(2000, "2k"), - }, - } - - stringLines = []string{ - `strings,k=s s="1k" 1000`, - `strings,k=s s="2k" 2000`, - } - - uintCorpus = corpus{ - tsm1.SeriesFieldKey("uints,k=u", "u"): []tsm1.Value{ - tsm1.NewValue(3000, uint64(45)), - tsm1.NewValue(4000, uint64(60)), - }, - } - - uintLines = []string{ - `uints,k=u u=45u 3000`, - `uints,k=u u=60u 4000`, - } - - escapeStringCorpus = corpus{ - tsm1.SeriesFieldKey("t", "s"): []tsm1.Value{ - tsm1.NewValue(1, `1. "quotes"`), - tsm1.NewValue(2, `2. back\slash`), - tsm1.NewValue(3, `3. bs\q"`), - }, - } - - escCorpusExpLines = []string{ - `t s="1. \"quotes\"" 1`, - `t s="2. back\\slash" 2`, - `t s="3. bs\\q\"" 3`, - } - - basicCorpus = make(corpus) - basicCorpusExpLines []string - - numsOnlyFilter = newFilters() - numsOnlyExpLines []string - - earlyEntriesOnlyFilter = newFilters() - earlyEntriesOnlyExpLines []string - - lateEntriesOnlyFilter = newFilters() - lateEntriesOnlyExpLines []string -) - -func init() { - for _, c := range []corpus{floatCorpus, intCorpus, boolCorpus, stringCorpus, uintCorpus} { - for k, v := range c { - basicCorpus[k] = v - } - } - - for _, l := range [][]string{floatLines, intLines, boolLines, stringLines, uintLines} { - basicCorpusExpLines = append(basicCorpusExpLines, l...) - } - - for _, m := range []string{"floats", "ints", "uints"} { - numsOnlyFilter.measurements[m] = struct{}{} - } - for _, l := range [][]string{floatLines, intLines, uintLines} { - numsOnlyExpLines = append(numsOnlyExpLines, l...) - } - - earlyEntriesOnlyFilter.end = 150 - earlyEntriesOnlyExpLines = append(earlyEntriesOnlyExpLines, floatLines...) - earlyEntriesOnlyExpLines = append(earlyEntriesOnlyExpLines, intLines...) - earlyEntriesOnlyExpLines = append(earlyEntriesOnlyExpLines, boolLines[0]) - - lateEntriesOnlyFilter.start = 150 - lateEntriesOnlyExpLines = append(lateEntriesOnlyExpLines, boolLines[1]) - lateEntriesOnlyExpLines = append(lateEntriesOnlyExpLines, stringLines...) - lateEntriesOnlyExpLines = append(lateEntriesOnlyExpLines, uintLines...) -} - -func Test_exportWAL(t *testing.T) { - log := zaptest.NewLogger(t) - - for _, c := range []struct { - corpus corpus - filter *exportFilters - lines []string - }{ - {corpus: basicCorpus, filter: newFilters(), lines: basicCorpusExpLines}, - {corpus: escapeStringCorpus, filter: newFilters(), lines: escCorpusExpLines}, - {corpus: basicCorpus, filter: numsOnlyFilter, lines: numsOnlyExpLines}, - {corpus: basicCorpus, filter: earlyEntriesOnlyFilter, lines: earlyEntriesOnlyExpLines}, - {corpus: basicCorpus, filter: lateEntriesOnlyFilter, lines: lateEntriesOnlyExpLines}, - } { - walFile, err := writeCorpusToWALFile(c.corpus) - if err != nil { - t.Fatal(err) - } - defer os.Remove(walFile.Name()) - - var out bytes.Buffer - if err := exportWAL(walFile.Name(), c.filter, &out, log, func() {}); err != nil { - t.Fatal(err) - } - - lines := strings.Split(out.String(), "\n") - for _, exp := range c.lines { - found := false - for _, l := range lines { - if exp == l { - found = true - break - } - } - - if !found { - t.Fatalf("expected line %q to be in exported output:\n%s", exp, out.String()) - } - } - } - - // Missing .wal file should not cause a failure. - var out bytes.Buffer - if err := exportWAL("file-that-does-not-exist.wal", newFilters(), &out, log, func() {}); err != nil { - t.Fatal(err) - } -} - -func Test_exportTSM(t *testing.T) { - log := zaptest.NewLogger(t) - - for _, c := range []struct { - corpus corpus - filter *exportFilters - lines []string - }{ - {corpus: basicCorpus, filter: newFilters(), lines: basicCorpusExpLines}, - {corpus: escapeStringCorpus, filter: newFilters(), lines: escCorpusExpLines}, - {corpus: basicCorpus, filter: numsOnlyFilter, lines: numsOnlyExpLines}, - {corpus: basicCorpus, filter: earlyEntriesOnlyFilter, lines: earlyEntriesOnlyExpLines}, - {corpus: basicCorpus, filter: lateEntriesOnlyFilter, lines: lateEntriesOnlyExpLines}, - } { - tsmFile, err := writeCorpusToTSMFile(c.corpus) - if err != nil { - t.Fatal(err) - } - defer os.Remove(tsmFile.Name()) - - var out bytes.Buffer - if err := exportTSM(tsmFile.Name(), c.filter, &out, log); err != nil { - t.Fatal(err) - } - - lines := strings.Split(out.String(), "\n") - for _, exp := range c.lines { - found := false - for _, l := range lines { - if exp == l { - found = true - break - } - } - - if !found { - t.Fatalf("expected line %q to be in exported output:\n%s", exp, out.String()) - } - } - } - - // Missing .tsm file should not cause a failure. - var out bytes.Buffer - if err := exportTSM("file-that-does-not-exist.tsm", newFilters(), &out, log); err != nil { - t.Fatal(err) - } -} - -var sink interface{} - -func benchmarkExportTSM(c corpus, b *testing.B) { - log := zap.NewNop() - - // Garbage collection is relatively likely to happen during export, so track allocations. - b.ReportAllocs() - - f, err := writeCorpusToTSMFile(c) - if err != nil { - b.Fatal(err) - } - defer os.Remove(f.Name()) - - var out bytes.Buffer - b.ResetTimer() - b.StartTimer() - for i := 0; i < b.N; i++ { - if err := exportTSM(f.Name(), newFilters(), &out, log); err != nil { - b.Fatal(err) - } - - sink = out.Bytes() - out.Reset() - } -} - -func BenchmarkExportTSMFloats_100s_250vps(b *testing.B) { - benchmarkExportTSM(makeFloatsCorpus(100, 250), b) -} - -func BenchmarkExportTSMInts_100s_250vps(b *testing.B) { - benchmarkExportTSM(makeIntsCorpus(100, 250), b) -} - -func BenchmarkExportTSMBools_100s_250vps(b *testing.B) { - benchmarkExportTSM(makeBoolsCorpus(100, 250), b) -} - -func BenchmarkExportTSMStrings_100s_250vps(b *testing.B) { - benchmarkExportTSM(makeStringsCorpus(100, 250), b) -} - -func benchmarkExportWAL(c corpus, b *testing.B) { - log := zap.NewNop() - - // Garbage collection is relatively likely to happen during export, so track allocations. - b.ReportAllocs() - - f, err := writeCorpusToWALFile(c) - if err != nil { - b.Fatal(err) - } - defer os.Remove(f.Name()) - - var out bytes.Buffer - b.ResetTimer() - b.StartTimer() - for i := 0; i < b.N; i++ { - if err := exportWAL(f.Name(), newFilters(), &out, log, func() {}); err != nil { - b.Fatal(err) - } - - sink = out.Bytes() - out.Reset() - } -} - -func BenchmarkExportWALFloats_100s_250vps(b *testing.B) { - benchmarkExportWAL(makeFloatsCorpus(100, 250), b) -} - -func BenchmarkExportWALInts_100s_250vps(b *testing.B) { - benchmarkExportWAL(makeIntsCorpus(100, 250), b) -} - -func BenchmarkExportWALBools_100s_250vps(b *testing.B) { - benchmarkExportWAL(makeBoolsCorpus(100, 250), b) -} - -func BenchmarkExportWALStrings_100s_250vps(b *testing.B) { - benchmarkExportWAL(makeStringsCorpus(100, 250), b) -} - -// makeCorpus returns a new corpus filled with values generated by fn. -// The RNG passed to fn is seeded with numSeries * numValuesPerSeries, for predictable output. -func makeCorpus(numSeries, numValuesPerSeries int, fn func(*rand.Rand) interface{}) corpus { - rng := rand.New(rand.NewSource(int64(numSeries) * int64(numValuesPerSeries))) - var unixNano int64 - corpus := make(corpus, numSeries) - for i := 0; i < numSeries; i++ { - vals := make([]tsm1.Value, numValuesPerSeries) - for j := 0; j < numValuesPerSeries; j++ { - vals[j] = tsm1.NewValue(unixNano, fn(rng)) - unixNano++ - } - - k := fmt.Sprintf("m,t=%d", i) - corpus[tsm1.SeriesFieldKey(k, "x")] = vals - } - - return corpus -} - -func makeFloatsCorpus(numSeries, numFloatsPerSeries int) corpus { - return makeCorpus(numSeries, numFloatsPerSeries, func(rng *rand.Rand) interface{} { - return rng.Float64() - }) -} - -func makeIntsCorpus(numSeries, numIntsPerSeries int) corpus { - return makeCorpus(numSeries, numIntsPerSeries, func(rng *rand.Rand) interface{} { - // This will only return positive integers. That's probably okay. - return rng.Int63() - }) -} - -func makeBoolsCorpus(numSeries, numBoolsPerSeries int) corpus { - return makeCorpus(numSeries, numBoolsPerSeries, func(rng *rand.Rand) interface{} { - return rand.Int63n(2) == 1 - }) -} - -func makeStringsCorpus(numSeries, numStringsPerSeries int) corpus { - return makeCorpus(numSeries, numStringsPerSeries, func(rng *rand.Rand) interface{} { - // The string will randomly have 2-6 parts - parts := make([]string, rand.Intn(4)+2) - - for i := range parts { - // Each part is a random base36-encoded number - parts[i] = strconv.FormatInt(rand.Int63(), 36) - } - - // Join the individual parts with underscores. - return strings.Join(parts, "_") - }) -} - -// writeCorpusToWALFile writes the given corpus as a WAL file, and returns a handle to that file. -// It is the caller's responsibility to remove the returned temp file. -func writeCorpusToWALFile(c corpus) (*os.File, error) { - walFile, err := os.CreateTemp("", "export_test_corpus_wal") - if err != nil { - return nil, err - } - - e := &tsm1.WriteWALEntry{Values: c} - b, err := e.Encode(nil) - if err != nil { - return nil, err - } - - w := tsm1.NewWALSegmentWriter(walFile) - if err := w.Write(e.Type(), snappy.Encode(nil, b)); err != nil { - return nil, err - } - - if err := w.Flush(); err != nil { - return nil, err - } - // (*tsm1.WALSegmentWriter).sync isn't exported, but it only Syncs the file anyway. - if err := walFile.Sync(); err != nil { - return nil, err - } - - return walFile, nil -} - -// writeCorpusToTSMFile writes the given corpus as a TSM file, and returns a handle to that file. -// It is the caller's responsibility to remove the returned temp file. -func writeCorpusToTSMFile(c corpus) (*os.File, error) { - tsmFile, err := os.CreateTemp("", "export_test_corpus_tsm") - if err != nil { - return nil, err - } - - w, err := tsm1.NewTSMWriter(tsmFile) - if err != nil { - return nil, err - } - - // Write the series in alphabetical order so that each test run is comparable, - // given an identical corpus. - keys := make([]string, 0, len(c)) - for k := range c { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - if err := w.Write([]byte(k), c[k]); err != nil { - return nil, err - } - } - - if err := w.WriteIndex(); err != nil { - return nil, err - } - - if err := w.Close(); err != nil { - return nil, err - } - - return tsmFile, nil -} diff --git a/cmd/influxd/inspect/inspect.go b/cmd/influxd/inspect/inspect.go deleted file mode 100644 index 43398b8676d..00000000000 --- a/cmd/influxd/inspect/inspect.go +++ /dev/null @@ -1,72 +0,0 @@ -package inspect - -import ( - "github.com/influxdata/influxdb/v2/cmd/influxd/inspect/build_tsi" - "github.com/influxdata/influxdb/v2/cmd/influxd/inspect/delete_tsm" - "github.com/influxdata/influxdb/v2/cmd/influxd/inspect/dump_tsi" - "github.com/influxdata/influxdb/v2/cmd/influxd/inspect/dump_tsm" - "github.com/influxdata/influxdb/v2/cmd/influxd/inspect/dump_wal" - "github.com/influxdata/influxdb/v2/cmd/influxd/inspect/export_index" - "github.com/influxdata/influxdb/v2/cmd/influxd/inspect/export_lp" - "github.com/influxdata/influxdb/v2/cmd/influxd/inspect/report_db" - "github.com/influxdata/influxdb/v2/cmd/influxd/inspect/report_tsi" - "github.com/influxdata/influxdb/v2/cmd/influxd/inspect/report_tsm" - typecheck "github.com/influxdata/influxdb/v2/cmd/influxd/inspect/type_conflicts" - "github.com/influxdata/influxdb/v2/cmd/influxd/inspect/verify_seriesfile" - "github.com/influxdata/influxdb/v2/cmd/influxd/inspect/verify_tombstone" - "github.com/influxdata/influxdb/v2/cmd/influxd/inspect/verify_tsm" - "github.com/influxdata/influxdb/v2/cmd/influxd/inspect/verify_wal" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -// NewCommand creates the new command. -func NewCommand(v *viper.Viper) (*cobra.Command, error) { - base := &cobra.Command{ - Use: "inspect", - Short: "Commands for inspecting on-disk database data", - Args: cobra.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - cmd.PrintErrf("See '%s -h' for help\n", cmd.CommandPath()) - }, - } - - exportLp, err := export_lp.NewExportLineProtocolCommand(v) - if err != nil { - return nil, err - } - - reportDB, err := report_db.NewReportDBCommand(v) - if err != nil { - return nil, err - } - - checkSchema, err := typecheck.NewCheckSchemaCommand(v) - if err != nil { - return nil, err - } - - mergeSchema, err := typecheck.NewMergeSchemaCommand(v) - if err != nil { - return nil, err - } - - base.AddCommand(exportLp) - base.AddCommand(report_tsi.NewReportTSICommand()) - base.AddCommand(export_index.NewExportIndexCommand()) - base.AddCommand(verify_tsm.NewTSMVerifyCommand()) - base.AddCommand(verify_seriesfile.NewVerifySeriesfileCommand()) - base.AddCommand(verify_tombstone.NewVerifyTombstoneCommand()) - base.AddCommand(dump_tsm.NewDumpTSMCommand()) - base.AddCommand(dump_tsi.NewDumpTSICommand()) - base.AddCommand(delete_tsm.NewDeleteTSMCommand()) - base.AddCommand(dump_wal.NewDumpWALCommand()) - base.AddCommand(verify_wal.NewVerifyWALCommand()) - base.AddCommand(report_tsm.NewReportTSMCommand()) - base.AddCommand(build_tsi.NewBuildTSICommand()) - base.AddCommand(reportDB) - base.AddCommand(checkSchema) - base.AddCommand(mergeSchema) - - return base, nil -} diff --git a/cmd/influxd/inspect/report_db/aggregators/aggregators.go b/cmd/influxd/inspect/report_db/aggregators/aggregators.go deleted file mode 100644 index 4040737d8be..00000000000 --- a/cmd/influxd/inspect/report_db/aggregators/aggregators.go +++ /dev/null @@ -1,242 +0,0 @@ -package aggregators - -import ( - "fmt" - "strings" - "sync" - "text/tabwriter" - - report "github.com/influxdata/influxdb/v2/cmd/influxd/inspect/report_tsm" - "github.com/influxdata/influxdb/v2/models" -) - -type rollupNodeMap map[string]RollupNode - -type RollupNode interface { - sync.Locker - report.Counter - Children() rollupNodeMap - RecordSeries(bucket, rp, ms string, key, field []byte, tags models.Tags) - Print(tw *tabwriter.Writer, printTags bool, bucket, rp, ms string) error - isLeaf() bool - child(key string, isLeaf bool) NodeWrapper -} - -type NodeWrapper struct { - RollupNode -} - -var detailedHeader = []string{"bucket", "retention policy", "measurement", "series", "fields", "tag total", "tags"} -var simpleHeader = []string{"bucket", "retention policy", "measurement", "series"} - -type RollupNodeFactory struct { - header []string - EstTitle string - NewNode func(isLeaf bool) NodeWrapper - counter func() report.Counter -} - -var nodeFactory *RollupNodeFactory - -func CreateNodeFactory(detailed, exact bool) *RollupNodeFactory { - estTitle := " (est.)" - newCounterFn := report.NewHLLCounter - if exact { - newCounterFn = report.NewExactCounter - estTitle = "" - } - - if detailed { - nodeFactory = newDetailedNodeFactory(newCounterFn, estTitle) - } else { - nodeFactory = newSimpleNodeFactory(newCounterFn, estTitle) - } - return nodeFactory -} - -func (f *RollupNodeFactory) PrintHeader(tw *tabwriter.Writer) error { - _, err := fmt.Fprintln(tw, strings.Join(f.header, "\t")) - return err -} - -func (f *RollupNodeFactory) PrintDivider(tw *tabwriter.Writer) error { - divLine := f.makeTabDivider() - _, err := fmt.Fprintln(tw, divLine) - return err -} - -func (f *RollupNodeFactory) makeTabDivider() string { - div := make([]string, 0, len(f.header)) - for _, s := range f.header { - div = append(div, strings.Repeat("-", len(s))) - } - return strings.Join(div, "\t") -} - -func newSimpleNodeFactory(newCounterFn func() report.Counter, est string) *RollupNodeFactory { - return &RollupNodeFactory{ - header: simpleHeader, - EstTitle: est, - NewNode: func(isLeaf bool) NodeWrapper { return NodeWrapper{newSimpleNode(isLeaf, newCounterFn)} }, - counter: newCounterFn, - } -} - -func newDetailedNodeFactory(newCounterFn func() report.Counter, est string) *RollupNodeFactory { - return &RollupNodeFactory{ - header: detailedHeader, - EstTitle: est, - NewNode: func(isLeaf bool) NodeWrapper { return NodeWrapper{newDetailedNode(isLeaf, newCounterFn)} }, - counter: newCounterFn, - } -} - -type simpleNode struct { - sync.Mutex - report.Counter - rollupNodeMap -} - -func (s *simpleNode) Children() rollupNodeMap { - return s.rollupNodeMap -} - -func (s *simpleNode) child(key string, isLeaf bool) NodeWrapper { - if s.isLeaf() { - panic("Trying to get the child to a leaf node") - } - s.Lock() - defer s.Unlock() - c, ok := s.Children()[key] - if !ok { - c = nodeFactory.NewNode(isLeaf) - s.Children()[key] = c - } - return NodeWrapper{c} -} - -func (s *simpleNode) isLeaf() bool { - return s.Children() == nil -} - -func newSimpleNode(isLeaf bool, fn func() report.Counter) *simpleNode { - s := &simpleNode{Counter: fn()} - if !isLeaf { - s.rollupNodeMap = make(rollupNodeMap) - } else { - s.rollupNodeMap = nil - } - return s -} - -func (s *simpleNode) RecordSeries(bucket, rp, _ string, key, _ []byte, _ models.Tags) { - s.Lock() - defer s.Unlock() - s.recordSeriesNoLock(bucket, rp, key) -} - -func (s *simpleNode) recordSeriesNoLock(bucket, rp string, key []byte) { - s.Add([]byte(fmt.Sprintf("%s.%s.%s", bucket, rp, key))) -} - -func (s *simpleNode) Print(tw *tabwriter.Writer, _ bool, bucket, rp, ms string) error { - _, err := fmt.Fprintf(tw, "%s\t%s\t%s\t%d\n", - bucket, - rp, - ms, - s.Count()) - return err -} - -type detailedNode struct { - simpleNode - fields report.Counter - tags map[string]report.Counter -} - -func newDetailedNode(isLeaf bool, fn func() report.Counter) *detailedNode { - d := &detailedNode{ - simpleNode: simpleNode{ - Counter: fn(), - }, - fields: fn(), - tags: make(map[string]report.Counter), - } - if !isLeaf { - d.simpleNode.rollupNodeMap = make(rollupNodeMap) - } else { - d.simpleNode.rollupNodeMap = nil - } - return d -} - -func (d *detailedNode) RecordSeries(bucket, rp, ms string, key, field []byte, tags models.Tags) { - d.Lock() - defer d.Unlock() - d.simpleNode.recordSeriesNoLock(bucket, rp, key) - d.fields.Add([]byte(fmt.Sprintf("%s.%s.%s.%s", bucket, rp, ms, field))) - for _, t := range tags { - // Add database, retention policy, and measurement - // to correctly aggregate in inner (non-leaf) nodes - canonTag := fmt.Sprintf("%s.%s.%s.%s", bucket, rp, ms, t.Key) - tc, ok := d.tags[canonTag] - if !ok { - tc = nodeFactory.counter() - d.tags[canonTag] = tc - } - tc.Add(t.Value) - } -} - -func (d *detailedNode) Print(tw *tabwriter.Writer, printTags bool, bucket, rp, ms string) error { - seriesN := d.Count() - fieldsN := d.fields.Count() - var tagKeys []string - tagN := uint64(0) - - if printTags { - tagKeys = make([]string, 0, len(d.tags)) - } - for k, v := range d.tags { - c := v.Count() - tagN += c - if printTags { - tagKeys = append(tagKeys, fmt.Sprintf("%q: %d", k[strings.LastIndex(k, ".")+1:], c)) - } - } - _, err := fmt.Fprintf(tw, "%s\t%s\t%s\t%d\t%d\t%d\t%s\n", - bucket, - rp, - ms, - seriesN, - fieldsN, - tagN, - strings.Join(tagKeys, ", ")) - return err -} - -func (r *NodeWrapper) Record(depth, totalDepth int, bucket, rp, measurement string, key []byte, field []byte, tags models.Tags) { - r.RecordSeries(bucket, rp, measurement, key, field, tags) - - switch depth { - case 2: - if depth < totalDepth { - // Create measurement level in tree - c := r.child(measurement, true) - c.RecordSeries(bucket, rp, measurement, key, field, tags) - } - case 1: - if depth < totalDepth { - // Create retention policy level in tree - c := r.child(rp, (depth+1) == totalDepth) - c.Record(depth+1, totalDepth, bucket, rp, measurement, key, field, tags) - } - case 0: - if depth < totalDepth { - // Create database level in tree - c := r.child(bucket, (depth+1) == totalDepth) - c.Record(depth+1, totalDepth, bucket, rp, measurement, key, field, tags) - } - default: - } -} diff --git a/cmd/influxd/inspect/report_db/aggregators/aggregators_test.go b/cmd/influxd/inspect/report_db/aggregators/aggregators_test.go deleted file mode 100644 index 0cb264228db..00000000000 --- a/cmd/influxd/inspect/report_db/aggregators/aggregators_test.go +++ /dev/null @@ -1,330 +0,0 @@ -package aggregators - -import ( - "bytes" - "sync" - "testing" - - "github.com/influxdata/influxdb/v2/models" - "github.com/stretchr/testify/require" -) - -type result struct { - fields uint64 - tags uint64 - series uint64 -} - -type test struct { - db string - rp string - key []byte -} - -// Ensure that tags and fields and series which differ only in database, retention policy, or measurement -// are correctly counted. -func Test_canonicalize(t *testing.T) { - totalDepth := 3 - - // measurement,tag1=tag1_value1,tag2=tag2_value1#!~#field1 - tests := []test{ - { - db: "db1", - rp: "rp1", - key: []byte("m1,t1=t1_v1,t2=t2_v1#!~#f1"), - }, - { - db: "db1", - rp: "rp1", - key: []byte("m1,t1=t1_v2,t2=t2_v1#!~#f1"), - }, - { - db: "db1", - rp: "rp1", - key: []byte("m1,t1=t1_v1,t2=t2_v2#!~#f1"), - }, - { - db: "db1", - rp: "rp1", - key: []byte("m1,t1=t1_v2,t2=t2_v2#!~#f1"), - }, - { - db: "db1", - rp: "rp1", - key: []byte("m1,t1=t1_v2,t2=t2_v2#!~#f2"), - }, - { - db: "db1", - rp: "rp2", - key: []byte("m1,t1=t1_v1,t2=t2_v1#!~#f1"), - }, - { - db: "db1", - rp: "rp2", - key: []byte("m1,t1=t1_v2,t2=t2_v1#!~#f1"), - }, - { - db: "db1", - rp: "rp2", - key: []byte("m1,t1=t1_v1,t2=t2_v2#!~#f1"), - }, - { - db: "db1", - rp: "rp2", - key: []byte("m1,t1=t1_v2,t2=t2_v2#!~#f3"), - }, - { - db: "db1", - rp: "rp2", - key: []byte("m1,t1=t1_v2,t2=t2_v2#!~#f2"), - }, - { - db: "db1", - rp: "rp1", - key: []byte("m2,t1=t1_v1,t2=t2_v1#!~#f1"), - }, - { - db: "db1", - rp: "rp1", - key: []byte("m2,t1=t1_v2,t2=t2_v1#!~#f1"), - }, - { - db: "db1", - rp: "rp1", - key: []byte("m2,t1=t1_v1,t2=t2_v2#!~#f1"), - }, - { - db: "db1", - rp: "rp1", - key: []byte("m2,t1=t1_v2,t2=t2_v2#!~#f1"), - }, - { - db: "db1", - rp: "rp1", - key: []byte("m2,t1=t1_v2,t2=t2_v2#!~#f2"), - }, - { - db: "db1", - rp: "rp2", - key: []byte("m2,t1=t1_v1,t2=t2_v1#!~#f1"), - }, - { - db: "db1", - rp: "rp2", - key: []byte("m2,t1=t1_v2,t2=t2_v1#!~#f1"), - }, - { - db: "db1", - rp: "rp2", - key: []byte("m2,t1=t1_v1,t2=t2_v2#!~#f1"), - }, - { - db: "db1", - rp: "rp2", - key: []byte("m2,t1=t1_v2,t2=t2_v2#!~#f1"), - }, - { - db: "db1", - rp: "rp2", - key: []byte("m2,t1=t1_v2,t2=t2_v2#!~#f2"), - }, - { - db: "db2", - rp: "rp1", - key: []byte("m1,t1=t1_v1,t2=t2_v1#!~#f1"), - }, - { - db: "db2", - rp: "rp1", - key: []byte("m1,t1=t1_v2,t2=t2_v1#!~#f1"), - }, - { - db: "db2", - rp: "rp1", - key: []byte("m1,t1=t1_v1,t2=t2_v2#!~#f1"), - }, - { - db: "db2", - rp: "rp1", - key: []byte("m1,t1=t1_v2,t2=t2_v2#!~#f1"), - }, - { - db: "db2", - rp: "rp1", - key: []byte("m1,t1=t1_v2,t2=t2_v2#!~#f2"), - }, - { - db: "db2", - rp: "rp2", - key: []byte("m1,t1=t1_v1,t2=t2_v1#!~#f1"), - }, - { - db: "db2", - rp: "rp2", - key: []byte("m1,t1=t1_v2,t2=t2_v1#!~#f1"), - }, - { - db: "db2", - rp: "rp2", - key: []byte("m1,t1=t1_v1,t2=t2_v2#!~#f1"), - }, - { - db: "db2", - rp: "rp2", - key: []byte("m1,t1=t1_v2,t2=t2_v2#!~#f1"), - }, - { - db: "db2", - rp: "rp2", - key: []byte("m1,t1=t1_v2,t2=t2_v2#!~#f2"), - }, - { - db: "db2", - rp: "rp1", - key: []byte("m2,t1=t1_v1,t2=t2_v1#!~#f1"), - }, - { - db: "db2", - rp: "rp1", - key: []byte("m2,t1=t1_v2,t2=t2_v1#!~#f1"), - }, - { - db: "db2", - rp: "rp1", - key: []byte("m2,t1=t1_v1,t2=t2_v2#!~#f1"), - }, - { - db: "db2", - rp: "rp1", - key: []byte("m2,t1=t1_v2,t2=t2_v2#!~#f1"), - }, - { - db: "db2", - rp: "rp1", - key: []byte("m2,t1=t1_v2,t2=t2_v2#!~#f2"), - }, - { - db: "db2", - rp: "rp2", - key: []byte("m2,t1=t1_v1,t2=t2_v1#!~#f1"), - }, - { - db: "db2", - rp: "rp2", - key: []byte("m2,t1=t1_v2,t2=t2_v1#!~#f1"), - }, - { - db: "db2", - rp: "rp2", - key: []byte("m2,t1=t1_v1,t2=t2_v2#!~#f1"), - }, - { - db: "db2", - rp: "rp2", - key: []byte("m2,t1=t1_v2,t2=t2_v2#!~#f1"), - }, - { - db: "db2", - rp: "rp2", - key: []byte("m2,t1=t1_v2,t2=t2_v2#!~#f2"), - }, - } - - results := map[string]map[string]map[string]*result{ - "db1": { - "rp1": { - "m1": {2, 4, 5}, - "m2": {2, 4, 5}, - "": {4, 8, 10}, - }, - "rp2": { - "m1": {3, 4, 5}, - "m2": {2, 4, 5}, - "": {5, 8, 10}, - }, - "": { - "": {9, 16, 20}, - }, - }, - "db2": { - "rp1": { - "m1": {2, 4, 5}, - "m2": {2, 4, 5}, - "": {4, 8, 10}, - }, - "rp2": { - "m1": {2, 4, 5}, - "m2": {2, 4, 5}, - "": {4, 8, 10}, - }, - "": { - "": {8, 16, 20}, - }, - }, - "": { - "": { - "": {17, 32, 40}, - }, - }, - } - - testLoop(t, false, true, totalDepth, tests, results) - testLoop(t, true, true, totalDepth, tests, results) - testLoop(t, false, false, totalDepth, tests, results) - testLoop(t, true, false, totalDepth, tests, results) - -} - -func testLoop(t *testing.T, detailed bool, exact bool, totalDepth int, tests []test, results map[string]map[string]map[string]*result) { - factory := CreateNodeFactory(detailed, exact) - tree := factory.NewNode(totalDepth == 0) - - wg := sync.WaitGroup{} - tf := func() { - for i := range tests { - seriesKey, field, _ := bytes.Cut(tests[i].key, []byte("#!~#")) - measurement, tags := models.ParseKey(seriesKey) - tree.Record(0, totalDepth, tests[i].db, tests[i].rp, measurement, tests[i].key, field, tags) - } - wg.Done() - } - const concurrency = 5 - wg.Add(concurrency) - for j := 0; j < concurrency; j++ { - go tf() - } - wg.Wait() - - for d, db := range tree.Children() { - for r, rp := range db.Children() { - for m, measure := range rp.Children() { - checkNode(t, measure, results[d][r][m], d, r, m) - } - checkNode(t, rp, results[d][r][""], d, r, "") - } - checkNode(t, db, results[d][""][""], d, "", "") - } - checkNode(t, tree, results[""][""][""], "", "", "") -} - -func checkNode(t *testing.T, measure RollupNode, results *result, d string, r string, m string) { - mr, ok := measure.(NodeWrapper) - if !ok { - t.Fatalf("internal error: expected a NodeWrapper type") - } - - switch node := mr.RollupNode.(type) { - case *detailedNode: - require.Equalf(t, results.series, node.Count(), "series count wrong. db: %q, rp: %q, ms: %q", d, r, m) - require.Equalf(t, results.fields, node.fields.Count(), "field count wrong. db: %q, rp: %q, ms: %q", d, r, m) - tagSum := uint64(0) - for _, t := range node.tags { - tagSum += t.Count() - } - require.Equalf(t, results.tags, tagSum, "tag value count wrong. db: %q, rp: %q, ms: %q", d, r, m) - case *simpleNode: - require.Equalf(t, results.series, node.Count(), "series count wrong. db: %q, rp: %q, ms: %q", d, r, m) - default: - t.Fatalf("internal error: unknown node type") - } -} diff --git a/cmd/influxd/inspect/report_db/report_db.go b/cmd/influxd/inspect/report_db/report_db.go deleted file mode 100644 index 30bdd42480c..00000000000 --- a/cmd/influxd/inspect/report_db/report_db.go +++ /dev/null @@ -1,189 +0,0 @@ -package report_db - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "os" - "text/tabwriter" - - "github.com/influxdata/influxdb/v2/cmd/influxd/inspect/report_db/aggregators" - "github.com/influxdata/influxdb/v2/kit/cli" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/reporthelper" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/spf13/cobra" - "github.com/spf13/viper" - "golang.org/x/sync/errgroup" -) - -// ReportDB represents the program execution for "influxd report-db". -type ReportDB struct { - // Standard input/output, overridden for testing. - Stderr io.Writer - Stdout io.Writer - - dbPath string - exact bool - detailed bool - // How many goroutines to dedicate to calculating cardinality. - concurrency int - // t, d, r, m for Total, Database, Retention Policy, Measurement - rollup string -} - -func NewReportDBCommand(v *viper.Viper) (*cobra.Command, error) { - flags := &ReportDB{ - Stderr: os.Stderr, - Stdout: os.Stdout, - } - - cmd := &cobra.Command{ - Use: "report-db", - Short: "Estimates cloud 2 cardinality for a database", - Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, _ []string) error { - return reportDBRunE(cmd, flags) - }, - } - opts := []cli.Opt{ - { - DestP: &flags.dbPath, - Flag: "db-path", - Desc: "path to database", - Required: true, - }, - { - DestP: &flags.concurrency, - Flag: "c", - Desc: "set worker concurrency, defaults to one", - Default: 1, - }, - { - DestP: &flags.detailed, - Flag: "detailed", - Desc: "include counts for fields, tags", - Default: false, - }, - { - DestP: &flags.exact, - Flag: "exact", - Desc: "report exact counts", - Default: false, - }, - { - DestP: &flags.rollup, - Flag: "rollup", - Desc: "rollup level - t: total, b: bucket, r: retention policy, m: measurement", - Default: "m", - }, - } - if err := cli.BindOptions(v, cmd, opts); err != nil { - return nil, err - } - return cmd, nil -} - -func reportDBRunE(_ *cobra.Command, reportdb *ReportDB) error { - var legalRollups = map[string]int{"m": 3, "r": 2, "b": 1, "t": 0} - if reportdb.dbPath == "" { - return errors.New("path to database must be provided") - } - - totalDepth, ok := legalRollups[reportdb.rollup] - - if !ok { - return fmt.Errorf("invalid rollup specified: %q", reportdb.rollup) - } - - factory := aggregators.CreateNodeFactory(reportdb.detailed, reportdb.exact) - totalsTree := factory.NewNode(totalDepth == 0) - - g, ctx := errgroup.WithContext(context.Background()) - g.SetLimit(reportdb.concurrency) - processTSM := func(bucket, rp, id, path string) error { - file, err := os.OpenFile(path, os.O_RDONLY, 0600) - if err != nil { - _, _ = fmt.Fprintf(reportdb.Stderr, "error: %s: %v. Skipping.\n", path, err) - return nil - } - - reader, err := tsm1.NewTSMReader(file) - if err != nil { - _, _ = fmt.Fprintf(reportdb.Stderr, "error: %s: %v. Skipping.\n", file.Name(), err) - // NewTSMReader won't close the file handle on failure, so do it here. - _ = file.Close() - return nil - } - defer func() { - // The TSMReader will close the underlying file handle here. - if err := reader.Close(); err != nil { - _, _ = fmt.Fprintf(reportdb.Stderr, "error closing: %s: %v.\n", file.Name(), err) - } - }() - - seriesCount := reader.KeyCount() - for i := 0; i < seriesCount; i++ { - func() { - key, _ := reader.KeyAt(i) - seriesKey, field, _ := bytes.Cut(key, []byte("#!~#")) - measurement, tags := models.ParseKey(seriesKey) - totalsTree.Record(0, totalDepth, bucket, rp, measurement, key, field, tags) - }() - } - return nil - } - done := ctx.Done() - err := reporthelper.WalkShardDirs(reportdb.dbPath, func(bucket, rp, id, path string) error { - select { - case <-done: - return nil - default: - g.Go(func() error { - return processTSM(bucket, rp, id, path) - }) - return nil - } - }) - - if err != nil { - _, _ = fmt.Fprintf(reportdb.Stderr, "%s: %v\n", reportdb.dbPath, err) - return err - } - err = g.Wait() - if err != nil { - _, _ = fmt.Fprintf(reportdb.Stderr, "%s: %v\n", reportdb.dbPath, err) - return err - } - - tw := tabwriter.NewWriter(reportdb.Stdout, 8, 2, 1, ' ', 0) - - if err = factory.PrintHeader(tw); err != nil { - return err - } - if err = factory.PrintDivider(tw); err != nil { - return err - } - for d, bucket := range totalsTree.Children() { - for r, rp := range bucket.Children() { - for m, measure := range rp.Children() { - err = measure.Print(tw, true, fmt.Sprintf("%q", d), fmt.Sprintf("%q", r), fmt.Sprintf("%q", m)) - if err != nil { - return err - } - } - if err = rp.Print(tw, false, fmt.Sprintf("%q", d), fmt.Sprintf("%q", r), ""); err != nil { - return err - } - } - if err = bucket.Print(tw, false, fmt.Sprintf("%q", d), "", ""); err != nil { - return err - } - } - if err = totalsTree.Print(tw, false, "Total"+factory.EstTitle, "", ""); err != nil { - return err - } - return tw.Flush() -} diff --git a/cmd/influxd/inspect/report_tsi/report_tsi.go b/cmd/influxd/inspect/report_tsi/report_tsi.go deleted file mode 100644 index 68c2bff8b56..00000000000 --- a/cmd/influxd/inspect/report_tsi/report_tsi.go +++ /dev/null @@ -1,463 +0,0 @@ -// Package report_tsi provides a report about the series cardinality in one or more TSI indexes. -package report_tsi - -import ( - "errors" - "fmt" - "math" - "os" - "path/filepath" - "runtime" - "sort" - "strconv" - "sync/atomic" - "text/tabwriter" - - "github.com/influxdata/influxdb/v2/logger" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" - "github.com/spf13/cobra" -) - -const ( - // Number of series IDs to stored in slice before we convert to a roaring - // bitmap. Roaring bitmaps have a non-trivial initial cost to construct. - useBitmapN = 25 -) - -// reportTSI represents the program execution for "inspect report-tsi". -type reportTSI struct { - // Flags - bucketId string // required - dataPath string - topN int - concurrency int - - // Variables for calculating and storing cardinalities - sfile *tsdb.SeriesFile - shardPaths map[uint64]string - shardIdxs map[uint64]*tsi1.Index - cardinalities map[uint64]map[string]*cardinality -} - -// NewReportTSICommand returns a new instance of Command with default setting applied. -func NewReportTSICommand() *cobra.Command { - var arguments reportTSI - cmd := &cobra.Command{ - Use: "report-tsi", - Short: "Reports the cardinality of TSI files", - Long: `This command will analyze TSI files within a specified bucket, reporting the -cardinality of data within the files, segmented by shard and further by measurement.`, - Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - - arguments.shardPaths = map[uint64]string{} - arguments.shardIdxs = map[uint64]*tsi1.Index{} - arguments.cardinalities = map[uint64]map[string]*cardinality{} - - return arguments.run(cmd) - }, - } - - cmd.Flags().StringVarP(&arguments.bucketId, "bucket-id", "b", "", "Required - specify which bucket to report on. A bucket id must be a base-16 string") - cmd.Flags().StringVar(&arguments.dataPath, "data-path", os.Getenv("HOME")+"/.influxdbv2/engine/data", "Path to data directory") - cmd.Flags().IntVarP(&arguments.topN, "top", "t", 0, "Limit results to top n") - cmd.Flags().IntVarP(&arguments.concurrency, "concurrency", "c", runtime.GOMAXPROCS(0), "How many concurrent workers to run") - cmd.MarkFlagRequired("bucket-id") - - return cmd -} - -// Run executes the command. -func (report *reportTSI) run(cmd *cobra.Command) error { - // Get all shards from specified bucket - dirEntries, err := os.ReadDir(filepath.Join(report.dataPath, report.bucketId, "autogen")) - - if err != nil { - return err - } - - for _, entry := range dirEntries { - - if !entry.IsDir() { - continue - } - - if entry.Name() == tsdb.SeriesFileDirectory || entry.Name() == "index" { - continue - } - - id, err := strconv.Atoi(entry.Name()) - if err != nil { - continue - } - - report.shardPaths[uint64(id)] = filepath.Join(report.dataPath, report.bucketId, "autogen", entry.Name()) - } - - if len(report.shardPaths) == 0 { - cmd.Printf("No shards under %s\n", filepath.Join(report.dataPath, report.bucketId, "autogen")) - return nil - } - - report.sfile = tsdb.NewSeriesFile(filepath.Join(report.dataPath, report.bucketId, tsdb.SeriesFileDirectory)) - - config := logger.NewConfig() - newLogger, err := config.New(os.Stderr) - if err != nil { - return err - } - report.sfile.Logger = newLogger - - if err := report.sfile.Open(); err != nil { - return err - } - defer report.sfile.Close() - - // Blocks until all work done. - if err = report.calculateCardinalities(report.cardinalityByMeasurement); err != nil { - return err - } - - allIDs := make([]uint64, 0, len(report.shardIdxs)) - - for id := range report.shardIdxs { - allIDs = append(allIDs, id) - } - - // Print summary. - if err = report.printSummaryByMeasurement(cmd); err != nil { - return err - } - - sort.Slice(allIDs, func(i int, j int) bool { return allIDs[i] < allIDs[j] }) - - for _, id := range allIDs { - if err := report.printShardByMeasurement(cmd, id); err != nil { - return err - } - } - return nil -} - -// calculateCardinalities calculates the cardinalities of the set of shard being -// worked on concurrently. The provided function determines how cardinality is -// calculated and broken down. -func (report *reportTSI) calculateCardinalities(fn func(id uint64) error) error { - // Get list of shards to work on. - shardIDs := make([]uint64, 0, len(report.shardPaths)) - for id := range report.shardPaths { - pth := filepath.Join(report.shardPaths[id], "index") - - // Verify directory is an index before opening it. - if ok, err := tsi1.IsIndexDir(pth); err != nil { - return err - } else if !ok { - return fmt.Errorf("not a TSI index directory: %s", pth) - } - - report.shardIdxs[id] = tsi1.NewIndex(report.sfile, - "", - tsi1.WithPath(pth), - tsi1.DisableCompactions(), - ) - - // Initialise cardinality set to store cardinalities for each shard - report.cardinalities[id] = map[string]*cardinality{} - - shardIDs = append(shardIDs, id) - } - - errC := make(chan error, len(shardIDs)) - var maxi uint32 // index of maximum shard being worked on. - for k := 0; k < report.concurrency; k++ { - go func() { - for { - i := int(atomic.AddUint32(&maxi, 1) - 1) // Get next shard to work on. - if i >= len(shardIDs) { - return // No more work. - } - errC <- fn(shardIDs[i]) - } - }() - } - - // Check for error - for i := 0; i < cap(errC); i++ { - if err := <-errC; err != nil { - return err - } - } - - return nil -} - -// Cardinality struct and methods -type cardinality struct { - name []byte - short []uint32 - set *tsdb.SeriesIDSet -} - -func (c *cardinality) add(x uint64) { - if c.set != nil { - c.set.AddNoLock(x) - return - } - - c.short = append(c.short, uint32(x)) // Series IDs never get beyond 2^32 - - // Cheaper to store in bitmap. - if len(c.short) > useBitmapN { - c.set = tsdb.NewSeriesIDSet() - for _, s := range c.short { - c.set.AddNoLock(uint64(s)) - } - c.short = nil - return - } -} - -func (c *cardinality) cardinality() int64 { - if c == nil || (c.short == nil && c.set == nil) { - return 0 - } - - if c.short != nil { - return int64(len(c.short)) - } - return int64(c.set.Cardinality()) -} - -type cardinalities []*cardinality - -func (a cardinalities) Len() int { return len(a) } -func (a cardinalities) Less(i, j int) bool { return a[i].cardinality() < a[j].cardinality() } -func (a cardinalities) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -func (report *reportTSI) cardinalityByMeasurement(shardID uint64) error { - if err := report.shardIdxs[shardID].Open(); err != nil { - return err - } - - idx := report.shardIdxs[shardID] - itr, err := idx.MeasurementIterator() - if err != nil { - return err - } else if itr == nil { - return nil - } - defer itr.Close() - -OUTER: - for { - name, err := itr.Next() - if err != nil { - return err - } else if name == nil { - break OUTER - } - - // Get series ID set to track cardinality under measurement. - c, ok := report.cardinalities[shardID][string(name)] - if !ok { - c = &cardinality{name: name} - report.cardinalities[shardID][string(name)] = c - } - - sitr, err := idx.MeasurementSeriesIDIterator(name) - if err != nil { - return err - } else if sitr == nil { - continue - } - - var e tsdb.SeriesIDElem - for e, err = sitr.Next(); err == nil && e.SeriesID != 0; e, err = sitr.Next() { - if e.SeriesID > math.MaxUint32 { - return fmt.Errorf("series ID is too large: %d (max %d)", e.SeriesID, uint32(math.MaxUint32)) - } - c.add(e.SeriesID) - } - sitr.Close() - - if err != nil { - return err - } - } - return nil -} - -type result struct { - name []byte - count int64 - - // For low cardinality measurements just track series using map - lowCardinality map[uint32]struct{} - - // For higher cardinality measurements track using bitmap. - set *tsdb.SeriesIDSet -} - -func (r *result) addShort(ids []uint32) { - // There is already a bitset of this result. - if r.set != nil { - for _, id := range ids { - r.set.AddNoLock(uint64(id)) - } - return - } - - // Still tracking low cardinality sets - if r.lowCardinality == nil { - r.lowCardinality = map[uint32]struct{}{} - } - - for _, id := range ids { - r.lowCardinality[id] = struct{}{} - } - - // Cardinality is large enough that we will benefit from using a bitmap - if len(r.lowCardinality) > useBitmapN { - r.set = tsdb.NewSeriesIDSet() - for id := range r.lowCardinality { - r.set.AddNoLock(uint64(id)) - } - r.lowCardinality = nil - } -} - -func (r *result) merge(other *tsdb.SeriesIDSet) { - if r.set == nil { - r.set = tsdb.NewSeriesIDSet() - for id := range r.lowCardinality { - r.set.AddNoLock(uint64(id)) - } - r.lowCardinality = nil - } - r.set.Merge(other) -} - -type results []*result - -func (a results) Len() int { return len(a) } -func (a results) Less(i, j int) bool { return a[i].count < a[j].count } -func (a results) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -func (report *reportTSI) printSummaryByMeasurement(cmd *cobra.Command) error { - // Get global set of measurement names across shards. - idxs := &tsdb.IndexSet{SeriesFile: report.sfile} - for _, idx := range report.shardIdxs { - idxs.Indexes = append(idxs.Indexes, idx) - } - - mitr, err := idxs.MeasurementIterator() - if err != nil { - return err - } else if mitr == nil { - return errors.New("got nil measurement iterator for index set") - } - defer mitr.Close() - - var name []byte - var totalCardinality int64 - measurements := results{} - for name, err = mitr.Next(); err == nil && name != nil; name, err = mitr.Next() { - res := &result{name: name} - for _, shardCards := range report.cardinalities { - other, ok := shardCards[string(name)] - if !ok { - continue // this shard doesn't have anything for this measurement. - } - - if other.short != nil { // low cardinality case - res.addShort(other.short) - } else if other.set != nil { // High cardinality case - res.merge(other.set) - } - } - - // Determine final cardinality and allow intermediate structures to be - // GCd. - if res.lowCardinality != nil { - res.count = int64(len(res.lowCardinality)) - } else { - res.count = int64(res.set.Cardinality()) - } - totalCardinality += res.count - res.set = nil - res.lowCardinality = nil - measurements = append(measurements, res) - } - - if err != nil { - return err - } - - // sort measurements by cardinality. - sort.Sort(sort.Reverse(measurements)) - - if report.topN > 0 { - // There may not be "topN" measurement cardinality to sub-slice. - n := int(math.Min(float64(report.topN), float64(len(measurements)))) - measurements = measurements[:n] - } - - tw := tabwriter.NewWriter(cmd.OutOrStdout(), 8, 8, 1, '\t', tabwriter.AlignRight) - - fmt.Fprintf(tw, "Summary\nDatabase Path: %s\nCardinality (exact): %d\n\n", filepath.Join(report.dataPath, report.bucketId), totalCardinality) - fmt.Fprint(tw, "Measurement\tCardinality (exact)\n\n") - for _, res := range measurements { - fmt.Fprintf(tw, "%q\t%d\t\n", res.name, res.count) - } - - if err := tw.Flush(); err != nil { - return err - } - fmt.Fprint(tw, "\n\n") - - return nil -} - -func (report *reportTSI) printShardByMeasurement(cmd *cobra.Command, id uint64) error { - defer report.shardIdxs[id].Close() - - allMap, ok := report.cardinalities[id] - if !ok { - return nil - } - - var totalCardinality int64 - all := make(cardinalities, 0, len(allMap)) - for _, card := range allMap { - n := card.cardinality() - if n == 0 { - continue - } - - totalCardinality += n - all = append(all, card) - } - - sort.Sort(sort.Reverse(all)) - - // Trim to top-n - if report.topN > 0 { - // There may not be "topN" measurement cardinality to sub-slice. - n := int(math.Min(float64(report.topN), float64(len(all)))) - all = all[:n] - } - - tw := tabwriter.NewWriter(cmd.OutOrStdout(), 8, 8, 1, '\t', 0) - fmt.Fprintf(tw, "===============\nShard ID: %d\nPath: %s\nCardinality (exact): %d\n\n", id, report.shardPaths[id], totalCardinality) - fmt.Fprint(tw, "Measurement\tCardinality (exact)\n\n") - for _, card := range all { - fmt.Fprintf(tw, "%q\t%d\t\n", card.name, card.cardinality()) - } - fmt.Fprint(tw, "===============\n\n") - if err := tw.Flush(); err != nil { - return err - } - - return nil -} diff --git a/cmd/influxd/inspect/report_tsi/report_tsi_test.go b/cmd/influxd/inspect/report_tsi/report_tsi_test.go deleted file mode 100644 index 89e0fe386e7..00000000000 --- a/cmd/influxd/inspect/report_tsi/report_tsi_test.go +++ /dev/null @@ -1,203 +0,0 @@ -package report_tsi - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "strconv" - "testing" - - "github.com/influxdata/influxdb/v2/pkg/tar" - "github.com/spf13/cobra" - "github.com/stretchr/testify/require" -) - -type cmdParams struct { - testName string - bucketId string - concurrency int - dataPath string - topN int - expectedOut string - expectedOuts []string - expectErr bool -} - -const ( - bucketID = "12345" - lowCard = "test-db-low-cardinality" - highCard = "test-db-high-cardinality" -) - -func Test_ReportTSI_GeneratedData(t *testing.T) { - shardlessPath := newTempDirectories(t, false) - shardPath := newTempDirectories(t, true) - - tests := []cmdParams{ - { - testName: "Bucket_Does_Not_Exist", - expectErr: true, - expectedOut: fmt.Sprintf("open %s", filepath.Join(bucketID, "autogen")), - }, - { - testName: "Bucket_Contains_No_Shards", - dataPath: shardlessPath, - expectedOut: fmt.Sprintf("No shards under %s", filepath.Join(shardlessPath, bucketID, "autogen")), - }, - { - testName: "Invalid_Index_Dir", - dataPath: shardPath, - expectErr: true, - expectedOut: fmt.Sprintf("not a TSI index directory: %s", filepath.Join(shardPath, bucketID, "autogen", "1", "index")), - }, - } - - for _, tc := range tests { - tc := tc - t.Run(tc.testName, func(t *testing.T) { - runCommand(t, tc) - }) - } -} - -func Test_ReportTSI_TestData(t *testing.T) { - - // Create temp directory for extracted test data - path := t.TempDir() - - // Extract test data - file, err := os.Open("../tsi-test-data.tar.gz") - require.NoError(t, err) - require.NoError(t, tar.Untar(path, file)) - require.NoError(t, file.Close()) - - tests := []cmdParams{ - { - testName: "Valid_No_Roaring_Bitmap", - bucketId: lowCard, - dataPath: path, - expectedOuts: []string{ - fmt.Sprintf("Summary\nDatabase Path: %s\nCardinality (exact): 5", filepath.Join(path, lowCard)), - fmt.Sprintf("Shard ID: 1\nPath: %s\nCardinality (exact): 5", filepath.Join(path, lowCard, "autogen", "1")), - "\"m0\"\t1\t\n\"m1\"\t1\t\n\"m2\"\t1\t\n\"m3\"\t1\t\n\"m4\"\t1\t", - }, - }, - { - testName: "Valid_Roaring_Bitmap", - bucketId: highCard, - dataPath: path, - expectedOuts: []string{ - fmt.Sprintf("Summary\nDatabase Path: %s\nCardinality (exact): 31", filepath.Join(path, highCard)), - fmt.Sprintf("Shard ID: 1\nPath: %s\nCardinality (exact): 31", filepath.Join(path, highCard, "autogen", "1")), - "\"m0\"\t27\t\n\"m1\"\t1\t\n\"m2\"\t1\t\n\"m3\"\t1\t\n\"m4\"\t1\t", - }, - }, - { - testName: "Valid_TopN", - bucketId: lowCard, - dataPath: path, - topN: 2, - expectedOuts: []string{ - fmt.Sprintf("Summary\nDatabase Path: %s\nCardinality (exact): 5", filepath.Join(path, lowCard)), - fmt.Sprintf("Shard ID: 1\nPath: %s\nCardinality (exact): 5", filepath.Join(path, lowCard, "autogen", "1")), - "\"m0\"\t1\t\n\"m1\"\t1\t\n\n\n", - }, - }, - } - - for _, tc := range tests { - tc := tc - t.Run(tc.testName, func(t *testing.T) { - tc.concurrency = 1 - runCommand(t, tc) - }) - } -} - -func newTempDirectories(t *testing.T, withShards bool) string { - t.Helper() - - dataDir := t.TempDir() - - err := os.MkdirAll(filepath.Join(dataDir, bucketID, "autogen"), 0777) - require.NoError(t, err) - - if withShards { - // Create shard and index directory within it, with one partition - err = os.MkdirAll(filepath.Join(dataDir, bucketID, "autogen", "1", "index", "0"), 0777) - require.NoError(t, err) - } - - return dataDir -} - -func initCommand(t *testing.T, params cmdParams) *cobra.Command { - t.Helper() - - // Creates new command and sets args - cmd := NewReportTSICommand() - - allArgs := make([]string, 0) - - if params.bucketId == "" { - allArgs = append(allArgs, "--bucket-id", bucketID) - } else { - allArgs = append(allArgs, "--bucket-id", params.bucketId) - } - - if params.concurrency != runtime.GOMAXPROCS(0) { - allArgs = append(allArgs, "--concurrency", strconv.Itoa(params.concurrency)) - } - - if params.dataPath != os.Getenv("HOME")+"/.influxdbv2/engine/data" { - allArgs = append(allArgs, "--data-path", params.dataPath) - } - - if params.topN != 0 { - allArgs = append(allArgs, "--top", strconv.Itoa(params.topN)) - } - - cmd.SetArgs(allArgs) - - return cmd -} - -func getOutput(t *testing.T, cmd *cobra.Command) []byte { - t.Helper() - - b := &bytes.Buffer{} - cmd.SetOut(b) - cmd.SetErr(b) - require.NoError(t, cmd.Execute()) - - out, err := io.ReadAll(b) - require.NoError(t, err) - - return out -} - -func runCommand(t *testing.T, params cmdParams) { - t.Helper() - - cmd := initCommand(t, params) - - if params.expectErr { - require.Contains(t, cmd.Execute().Error(), params.expectedOut) - return - } - - // Get output - out := getOutput(t, cmd) - - // Check output - if params.expectedOut != "" { - require.Contains(t, string(out), params.expectedOut) - } else { - for _, output := range params.expectedOuts { - require.Contains(t, string(out), output) - } - } -} diff --git a/cmd/influxd/inspect/report_tsm/report_tsm.go b/cmd/influxd/inspect/report_tsm/report_tsm.go deleted file mode 100644 index 5f86e56de78..00000000000 --- a/cmd/influxd/inspect/report_tsm/report_tsm.go +++ /dev/null @@ -1,366 +0,0 @@ -package report_tsm - -import ( - "bytes" - "errors" - "fmt" - "math" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "text/tabwriter" - "time" - - "github.com/influxdata/influxdb/v2/internal/fs" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/retailnext/hllpp" - "github.com/spf13/cobra" -) - -type args struct { - dir string - pattern string - detailed bool - exact bool -} - -func NewReportTSMCommand() *cobra.Command { - var arguments args - cmd := &cobra.Command{ - Use: "report-tsm", - Short: "Run TSM report", - Long: ` -This command will analyze TSM files within a storage engine directory, reporting -the cardinality within the files as well as the time range that the point data -covers. -This command only interrogates the index within each file, and does not read any -block data. To reduce heap requirements, by default report-tsm estimates the -overall cardinality in the file set by using the HLL++ algorithm. Exact -cardinalities can be determined by using the --exact flag. -For each file, the following is output: - * The full filename; - * The series cardinality within the file; - * The number of series first encountered within the file; - * The min and max timestamp associated with TSM data in the file; and - * The time taken to load the TSM index and apply any tombstones. -The summary section then outputs the total time range and series cardinality for -the fileset. Depending on the --detailed flag, series cardinality is segmented -in the following ways: - * Series cardinality for each organization; - * Series cardinality for each bucket; - * Series cardinality for each measurement; - * Number of field keys for each measurement; and - * Number of tag values for each tag key.`, - Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - - // Verify if shard dir - err := arguments.isShardDir(arguments.dir) - if arguments.detailed && err != nil { - return errors.New("--detailed only supported for shard dirs") - } - - return arguments.Run(cmd) - }, - } - - cmd.Flags().StringVarP(&arguments.pattern, "pattern", "", "", "only process TSM files containing pattern") - cmd.Flags().BoolVarP(&arguments.exact, "exact", "", false, "calculate and exact cardinality count. Warning, may use significant memory...") - cmd.Flags().BoolVarP(&arguments.detailed, "detailed", "", false, "emit series cardinality segmented by measurements, tag keys and fields. Warning, may take a while.") - - dir, err := fs.InfluxDir() - if err != nil { - panic(err) - } - dir = filepath.Join(dir, "engine/data") - cmd.Flags().StringVarP(&arguments.dir, "data-path", "", dir, "use provided data directory") - - return cmd -} - -func (a *args) isShardDir(dir string) error { - name := filepath.Base(dir) - if id, err := strconv.Atoi(name); err != nil || id < 1 { - return fmt.Errorf("not a valid shard dir: %s", dir) - } - - return nil -} - -func (a *args) Run(cmd *cobra.Command) error { - // Create the cardinality Counter - newCounterFn := NewHLLCounter - estTitle := " (est)" - if a.exact { - estTitle = "" - newCounterFn = NewExactCounter - } - - totalSeries := newCounterFn() - tagCardinalities := map[string]Counter{} - measCardinalities := map[string]Counter{} - fieldCardinalities := map[string]Counter{} - - dbCardinalities := map[string]Counter{} - - start := time.Now() - - tw := tabwriter.NewWriter(cmd.OutOrStdout(), 8, 2, 1, ' ', 0) - _, _ = fmt.Fprintln(tw, strings.Join([]string{"DB", "RP", "Shard", "File", "Series", "New" + estTitle, "Min Time", "Max Time", "Load Time"}, "\t")) - - minTime, maxTime := int64(math.MaxInt64), int64(math.MinInt64) - var fileCount int - if err := a.walkShardDirs(a.dir, func(db, rp, id, path string) error { - if a.pattern != "" && !strings.Contains(path, a.pattern) { - return nil - } - - file, err := os.OpenFile(path, os.O_RDONLY, 0600) - if err != nil { - _, _ = fmt.Fprintf(cmd.ErrOrStderr(), "error opening %q, skipping: %v\n", path, err) - return nil - } - - loadStart := time.Now() - reader, err := tsm1.NewTSMReader(file) - if err != nil { - _, _ = fmt.Fprintf(cmd.ErrOrStderr(), "error reading %q, skipping: %v\n", file.Name(), err) - return nil - } - loadTime := time.Since(loadStart) - fileCount++ - - dbCount, ok := dbCardinalities[db] - if !ok { - dbCount = newCounterFn() - dbCardinalities[db] = dbCount - } - - oldCount := dbCount.Count() - - seriesCount := reader.KeyCount() - for i := 0; i < seriesCount; i++ { - key, _ := reader.KeyAt(i) - totalSeries.Add(key) - dbCount.Add(key) - - if a.detailed { - seriesKey, field, _ := bytes.Cut(key, []byte("#!~#")) - measurement, tags := models.ParseKey(seriesKey) - - measCount, ok := measCardinalities[measurement] - if !ok { - measCount = newCounterFn() - measCardinalities[measurement] = measCount - } - measCount.Add(key) - - fieldCount, ok := fieldCardinalities[measurement] - if !ok { - fieldCount = newCounterFn() - fieldCardinalities[measurement] = fieldCount - } - fieldCount.Add(field) - - for _, t := range tags { - tagCount, ok := tagCardinalities[string(t.Key)] - if !ok { - tagCount = newCounterFn() - tagCardinalities[string(t.Key)] = tagCount - } - tagCount.Add(t.Value) - } - } - } - minT, maxT := reader.TimeRange() - if minT < minTime { - minTime = minT - } - if maxT > maxTime { - maxTime = maxT - } - err = reader.Close() - if err != nil { - return fmt.Errorf("failed to close TSM Reader: %v", err) - } - - _, _ = fmt.Fprintln(tw, strings.Join([]string{ - db, rp, id, - filepath.Base(file.Name()), - strconv.FormatInt(int64(seriesCount), 10), - strconv.FormatInt(int64(dbCount.Count()-oldCount), 10), - time.Unix(0, minT).UTC().Format(time.RFC3339Nano), - time.Unix(0, maxT).UTC().Format(time.RFC3339Nano), - loadTime.String(), - }, "\t")) - if a.detailed { - err = tw.Flush() - if err != nil { - return fmt.Errorf("failed to flush tabwriter: %v", err) - } - } - return nil - }); err != nil { - return err - } - - err := tw.Flush() - if err != nil { - return fmt.Errorf("failed to flush tabwriter: %v", err) - } - - printSummary(cmd, printArgs{ - fileCount: fileCount, - minTime: minTime, - maxTime: maxTime, - estTitle: estTitle, - totalSeries: totalSeries, - detailed: a.detailed, - tagCardinalities: tagCardinalities, - measCardinalities: measCardinalities, - fieldCardinalities: fieldCardinalities, - dbCardinalities: dbCardinalities, - }) - - cmd.Printf("Completed in %s\n", time.Since(start)) - return nil -} - -type printArgs struct { - fileCount int - minTime, maxTime int64 - estTitle string - totalSeries Counter - detailed bool - - tagCardinalities map[string]Counter - measCardinalities map[string]Counter - fieldCardinalities map[string]Counter - dbCardinalities map[string]Counter -} - -func printSummary(cmd *cobra.Command, p printArgs) { - cmd.Printf("\nSummary:") - cmd.Printf(" Files: %d\n", p.fileCount) - cmd.Printf(" Time Range: %s - %s\n", - time.Unix(0, p.minTime).UTC().Format(time.RFC3339Nano), - time.Unix(0, p.maxTime).UTC().Format(time.RFC3339Nano), - ) - cmd.Printf(" Duration: %s \n\n", time.Unix(0, p.maxTime).Sub(time.Unix(0, p.minTime))) - - cmd.Printf("Statistics\n") - cmd.Printf(" Series:\n") - for db, counts := range p.dbCardinalities { - cmd.Printf(" - %s%s: %d (%d%%)\n", db, p.estTitle, counts.Count(), int(float64(counts.Count())/float64(p.totalSeries.Count())*100)) - } - cmd.Printf(" Total%s: %d\n", p.estTitle, p.totalSeries.Count()) - - if p.detailed { - cmd.Printf("\n Measurements (est):\n") - for _, t := range sortKeys(p.measCardinalities) { - cmd.Printf(" - %v: %d (%d%%)\n", t, p.measCardinalities[t].Count(), int((float64(p.measCardinalities[t].Count())/float64(p.totalSeries.Count()))*100)) - } - - cmd.Printf("\n Fields (est):\n") - for _, t := range sortKeys(p.fieldCardinalities) { - cmd.Printf(" - %v: %d\n", t, p.fieldCardinalities[t].Count()) - } - - cmd.Printf("\n Tags (est):\n") - for _, t := range sortKeys(p.tagCardinalities) { - cmd.Printf(" - %v: %d\n", t, p.tagCardinalities[t].Count()) - } - } -} - -// sortKeys is a quick helper to return the sorted set of a map's keys -func sortKeys(vals map[string]Counter) (keys []string) { - for k := range vals { - keys = append(keys, k) - } - sort.Strings(keys) - - return keys -} - -func (a *args) walkShardDirs(root string, fn func(db, rp, id, path string) error) error { - type location struct { - db, rp, id, path string - } - - var tsms []location - if err := filepath.WalkDir(root, func(path string, info os.DirEntry, err error) error { - if err != nil { - return err - } - - if info.IsDir() { - return nil - } - - if filepath.Ext(info.Name()) == "."+tsm1.TSMFileExtension { - shardDir := filepath.Dir(path) - - if err := a.isShardDir(shardDir); err != nil { - return err - } - absPath, err := filepath.Abs(path) - if err != nil { - return err - } - parts := strings.Split(absPath, string(filepath.Separator)) - db, rp, id := parts[len(parts)-4], parts[len(parts)-3], parts[len(parts)-2] - tsms = append(tsms, location{db: db, rp: rp, id: id, path: path}) - return nil - } - return nil - }); err != nil { - return err - } - - sort.Slice(tsms, func(i, j int) bool { - a, _ := strconv.Atoi(tsms[i].id) - b, _ := strconv.Atoi(tsms[j].id) - return a < b - }) - - for _, shard := range tsms { - if err := fn(shard.db, shard.rp, shard.id, shard.path); err != nil { - return err - } - } - return nil -} - -// Counter abstracts a method of counting keys. -type Counter interface { - Add(key []byte) - Count() uint64 -} - -// NewHLLCounter returns an approximate Counter using HyperLogLogs for cardinality estimation. -func NewHLLCounter() Counter { - return hllpp.New() -} - -// exactCounter returns an exact count for keys using counting all distinct items in a set. -type exactCounter struct { - m map[string]struct{} -} - -func (c *exactCounter) Add(key []byte) { - c.m[string(key)] = struct{}{} -} - -func (c *exactCounter) Count() uint64 { - return uint64(len(c.m)) -} - -func NewExactCounter() Counter { - return &exactCounter{ - m: make(map[string]struct{}), - } -} diff --git a/cmd/influxd/inspect/report_tsm/report_tsm_test.go b/cmd/influxd/inspect/report_tsm/report_tsm_test.go deleted file mode 100644 index 9e3b21539c9..00000000000 --- a/cmd/influxd/inspect/report_tsm/report_tsm_test.go +++ /dev/null @@ -1,246 +0,0 @@ -package report_tsm - -import ( - "bytes" - "encoding/binary" - "io" - "os" - "strconv" - "testing" - - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/stretchr/testify/require" -) - -func Test_Invalid_NotDir(t *testing.T) { - dir := t.TempDir() - file, err := os.CreateTemp(dir, "") - require.NoError(t, err) - - runCommand(t, testInfo{ - dir: file.Name(), - expectOut: []string{"Files: 0"}, - }) - require.NoError(t, file.Close()) -} - -func Test_Invalid_EmptyDir(t *testing.T) { - var info dirInfo - dir := makeTempDir(t, "", info) - defer os.RemoveAll(dir) - - runCommand(t, testInfo{ - dir: dir, - expectOut: []string{"Files: 0"}, - }) -} - -func Test_Invalid_NotTSMFile(t *testing.T) { - info := dirInfo{ - numFiles: 1, - tsm: tsmInfo{ - withFile: true, - emptyTSM: true, - }, - } - dir := makeTempDir(t, "", info) - defer os.RemoveAll(dir) - - runCommand(t, testInfo{ - dir: dir, - expectOut: []string{"Files: 0"}, - }) -} - -func Test_Invalid_EmptyFile(t *testing.T) { - info := dirInfo{ - numFiles: 1, - tsm: tsmInfo{ - withTSMFile: true, - emptyTSM: true, - }, - } - dir := makeTempDir(t, "", info) - defer os.RemoveAll(dir) - - runCommand(t, testInfo{ - dir: dir, - expectOut: []string{"error reading magic number of file"}, - }) -} - -func Test_Invalid_BadFile(t *testing.T) { - info := dirInfo{ - numFiles: 1, - tsm: tsmInfo{ - withTSMFile: true, - invalidTSM: true, - }, - } - dir := makeTempDir(t, "", info) - defer os.RemoveAll(dir) - - runCommand(t, testInfo{ - dir: dir, - expectOut: []string{"can only read from tsm file"}, - }) -} - -func Test_Invalid_BadFile_WithGoodFiles(t *testing.T) { - info := dirInfo{ - numFiles: 3, - tsm: tsmInfo{ - withTSMFile: true, - invalidTSM: true, - }, - } - dir := makeTempDir(t, "", info) - defer os.RemoveAll(dir) - - runCommand(t, testInfo{ - dir: dir, - expectOut: []string{ - "can only read from tsm file", // bad file - "Files: 2", // 2 other good files - }, - }) -} - -func Test_Valid_SingleFile(t *testing.T) { - info := dirInfo{ - numFiles: 1, - tsm: tsmInfo{ - withTSMFile: true, - }, - } - dir := makeTempDir(t, "", info) - - runCommand(t, testInfo{ - dir: dir, - expectOut: []string{"Files: 1"}, - }) -} - -func Test_Valid_MultipleFiles_SingleDir(t *testing.T) { - info := dirInfo{ - numFiles: 3, - tsm: tsmInfo{ - withTSMFile: true, - }, - } - dir := makeTempDir(t, "", info) - defer os.RemoveAll(dir) - - runCommand(t, testInfo{ - dir: dir, - expectOut: []string{"Files: 3"}, - }) -} - -func Test_Valid_MultipleFiles_MultipleDirs(t *testing.T) { - info := dirInfo{ - numFiles: 3, - subDirs: 3, - tsm: tsmInfo{ - withTSMFile: true, - }, - } - dir := makeTempDir(t, "", info) - defer os.RemoveAll(dir) - - runCommand(t, testInfo{ - dir: dir, - expectOut: []string{"Files: 12"}, - }) -} - -type dirInfo struct { - tsm tsmInfo - numFiles int - subDirs int - - subDirIndex int // Used for recursion only -} - -type tsmInfo struct { - withFile bool - withTSMFile bool - - emptyTSM bool - invalidTSM bool -} - -type testInfo struct { - dir string - expectOut []string -} - -func runCommand(t *testing.T, info testInfo) { - cmd := NewReportTSMCommand() - cmd.SetArgs([]string{"--data-path", info.dir}) - - b := bytes.NewBufferString("") - cmd.SetOut(b) - cmd.SetErr(b) - - require.NoError(t, cmd.Execute()) - - out, err := io.ReadAll(b) - require.NoError(t, err) - for _, entry := range info.expectOut { - require.Contains(t, string(out), entry) - } -} - -// makeTempDir returns the path to the root temporary directory -func makeTempDir(t *testing.T, startDir string, info dirInfo) string { - t.Helper() - - dir, err := os.MkdirTemp(startDir, strconv.Itoa(info.subDirIndex)) - require.NoError(t, err) - - // Make subdirectories - if info.subDirIndex == 0 { - for i := 0; i < info.subDirs; i++ { - info.subDirIndex += 1 - makeTempDir(t, dir, info) - } - } - - // Make TSM files - for i := 0; i < info.numFiles; i++ { - makeTempTSM(t, dir, info.tsm) - info.tsm.invalidTSM = false // only do 1 max invalid TSM file, as the tests desire - } - return dir -} - -func makeTempTSM(t *testing.T, dir string, info tsmInfo) { - t.Helper() - - if info.withFile || info.withTSMFile { - var ext string - if info.withTSMFile { - ext = tsm1.TSMFileExtension - } else { - ext = "txt" - } - file, err := os.CreateTemp(dir, "reporttsm*."+ext) - require.NoError(t, err) - - if !info.emptyTSM { - w, err := tsm1.NewTSMWriter(file) - require.NoError(t, err) - defer w.Close() - - values := []tsm1.Value{tsm1.NewValue(0, 1.0)} - require.NoError(t, w.Write([]byte("cpu"), values)) - - if info.invalidTSM { - require.NoError(t, binary.Write(file, binary.BigEndian, []byte("foobar\n"))) - } - - require.NoError(t, w.WriteIndex()) - } - } -} diff --git a/cmd/influxd/inspect/tsi-test-data.tar.gz b/cmd/influxd/inspect/tsi-test-data.tar.gz deleted file mode 100644 index 0b0cdb0f53c..00000000000 Binary files a/cmd/influxd/inspect/tsi-test-data.tar.gz and /dev/null differ diff --git a/cmd/influxd/inspect/type_conflicts/check_schema.go b/cmd/influxd/inspect/type_conflicts/check_schema.go deleted file mode 100644 index e91ee9569c8..00000000000 --- a/cmd/influxd/inspect/type_conflicts/check_schema.go +++ /dev/null @@ -1,155 +0,0 @@ -package typecheck - -import ( - "errors" - "fmt" - "io" - "io/fs" - "os" - "path" - "path/filepath" - "strings" - - "github.com/influxdata/influxdb/v2/kit/cli" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/spf13/cobra" - "github.com/spf13/viper" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -type TypeConflictChecker struct { - Path string - SchemaFile string - ConflictsFile string - Logger *zap.Logger - - logLevel zapcore.Level -} - -func NewCheckSchemaCommand(v *viper.Viper) (*cobra.Command, error) { - flags := TypeConflictChecker{} - - cmd := &cobra.Command{ - Use: "check-schema", - Short: "Check for conflicts in the types between shards", - Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, _ []string) error { - return checkSchemaRunE(cmd, flags) - }, - } - opts := []cli.Opt{ - { - DestP: &flags.Path, - Flag: "path", - Desc: "Path under which fields.idx files are located", - Default: ".", - }, - { - DestP: &flags.SchemaFile, - Flag: "schema-file", - Desc: "Filename schema data should be written to", - Default: "schema.json", - }, - { - DestP: &flags.ConflictsFile, - Flag: "conflicts-file", - Desc: "Filename conflicts data should be written to", - Default: "conflicts.json", - }, - { - DestP: &flags.logLevel, - Flag: "log-level", - Desc: "The level of logging used througout the command", - Default: zap.InfoLevel, - }, - } - - if err := cli.BindOptions(v, cmd, opts); err != nil { - return nil, err - } - return cmd, nil -} - -func checkSchemaRunE(_ *cobra.Command, tc TypeConflictChecker) error { - logconf := zap.NewProductionConfig() - logconf.Level = zap.NewAtomicLevelAt(tc.logLevel) - logger, err := logconf.Build() - if err != nil { - return err - } - tc.Logger = logger - - // Get a set of every measurement/field/type tuple present. - var schema Schema - schema, err = tc.readFields() - if err != nil { - return err - } - - if err := schema.WriteSchemaFile(tc.SchemaFile); err != nil { - return err - } - if err := schema.WriteConflictsFile(tc.ConflictsFile); err != nil { - return err - } - - return nil -} - -func (tc *TypeConflictChecker) readFields() (Schema, error) { - schema := NewSchema() - var root string - fi, err := os.Stat(tc.Path) - if err != nil { - return nil, err - } - if fi.IsDir() { - root = tc.Path - } else { - root = path.Dir(tc.Path) - } - fileSystem := os.DirFS(".") - err = fs.WalkDir(fileSystem, root, func(path string, d fs.DirEntry, err error) error { - if err != nil { - return fmt.Errorf("error walking file: %w", err) - } - - if filepath.Base(path) == tsdb.FieldsChangeFile { - fmt.Printf("WARN: A %s file was encountered at %s. The database was not shutdown properly, results of this command may be incomplete\n", - tsdb.FieldsChangeFile, - path, - ) - return nil - } - - if filepath.Base(path) != "fields.idx" { - return nil - } - - dirs := strings.Split(path, string(os.PathSeparator)) - bucket := dirs[len(dirs)-4] - rp := dirs[len(dirs)-3] - fmt.Printf("Processing %s\n", path) - - mfs, err := tsdb.NewMeasurementFieldSet(path, tc.Logger) - if err != nil { - if errors.Is(err, io.EOF) { - return nil - } - return fmt.Errorf("unable to open file %q: %w", path, err) - } - defer mfs.Close() - - measurements := mfs.MeasurementNames() - for _, m := range measurements { - for f, typ := range mfs.FieldsByString(m).FieldSet() { - schema.AddField(bucket, rp, m, f, typ.String()) - } - } - - return nil - }) - - return schema, err -} diff --git a/cmd/influxd/inspect/type_conflicts/merge_schema.go b/cmd/influxd/inspect/type_conflicts/merge_schema.go deleted file mode 100644 index d6d982d21e2..00000000000 --- a/cmd/influxd/inspect/type_conflicts/merge_schema.go +++ /dev/null @@ -1,76 +0,0 @@ -package typecheck - -import ( - "errors" - - "github.com/influxdata/influxdb/v2/kit/cli" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -type MergeFilesCommand struct { - OutputFile string - ConflictsFile string -} - -func NewMergeSchemaCommand(v *viper.Viper) (*cobra.Command, error) { - flags := MergeFilesCommand{} - - cmd := &cobra.Command{ - Use: "merge-schema", - Short: "Merge a set of schema files from the check-schema command", - Args: cobra.MinimumNArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return mergeSchemaRunE(cmd, args, flags) - }, - } - - opts := []cli.Opt{ - { - DestP: &flags.OutputFile, - Flag: "schema-file", - Desc: "Filename for the output file", - Default: "schema.json", - }, - { - DestP: &flags.ConflictsFile, - Flag: "conflicts-file", - Desc: "Filename conflicts data should be written to", - Default: "conflicts.json", - }, - } - - if err := cli.BindOptions(v, cmd, opts); err != nil { - return nil, err - } - return cmd, nil -} - -func mergeSchemaRunE(_ *cobra.Command, args []string, mf MergeFilesCommand) error { - return mf.mergeFiles(args) -} - -func (rc *MergeFilesCommand) mergeFiles(filenames []string) error { - if len(filenames) < 1 { - return errors.New("at least 1 file must be specified") - } - - schema, err := SchemaFromFile(filenames[0]) - if err != nil { - return err - } - - for _, filename := range filenames[1:] { - other, err := SchemaFromFile(filename) - if err != nil { - return err - } - schema.Merge(other) - } - - if err := schema.WriteConflictsFile(rc.ConflictsFile); err != nil { - return err - } - - return schema.WriteSchemaFile(rc.OutputFile) -} diff --git a/cmd/influxd/inspect/type_conflicts/schema.go b/cmd/influxd/inspect/type_conflicts/schema.go deleted file mode 100644 index 7e6a33d137a..00000000000 --- a/cmd/influxd/inspect/type_conflicts/schema.go +++ /dev/null @@ -1,149 +0,0 @@ -package typecheck - -import ( - "encoding/json" - "fmt" - "io" - "os" - "strings" - - errors2 "github.com/influxdata/influxdb/v2/pkg/errors" -) - -type UniqueField struct { - Database string `json:"database"` - Retention string `json:"retention"` - Measurement string `json:"measurement"` - Field string `json:"field"` -} - -type FieldTypes map[string]struct{} -type Schema map[string]FieldTypes - -func (ft FieldTypes) MarshalText() (text []byte, err error) { - s := make([]string, 0, len(ft)) - for f := range ft { - s = append(s, f) - } - return []byte(strings.Join(s, ",")), nil -} - -func (ft *FieldTypes) UnmarshalText(text []byte) error { - if *ft == nil { - *ft = make(FieldTypes) - } - for _, ty := range strings.Split(string(text), ",") { - (*ft)[ty] = struct{}{} - } - return nil -} - -func NewSchema() Schema { - return make(Schema) -} - -func SchemaFromFile(filename string) (Schema, error) { - f, err := os.Open(filename) - if err != nil { - return nil, fmt.Errorf("unable to open schema file %q: %w", filename, err) - } - - s := NewSchema() - if err := s.Decode(f); err != nil { - return nil, fmt.Errorf("unable to decode schema file %q: %w", filename, err) - } - return s, nil -} - -func (uf *UniqueField) String() string { - return fmt.Sprintf("%q.%q.%q.%q", uf.Database, uf.Retention, uf.Measurement, uf.Field) -} - -func (s Schema) AddField(database, retention, measurement, field, dataType string) { - uf := UniqueField{ - Database: database, - Retention: retention, - Measurement: measurement, - Field: field, - } - s.AddFormattedField(uf.String(), dataType) -} - -func (s Schema) AddFormattedField(field string, dataType string) { - if _, ok := s[field]; !ok { - s[field] = make(map[string]struct{}) - } - s[field][dataType] = struct{}{} -} - -func (s Schema) Merge(schema Schema) { - for field, types := range schema { - for t := range types { - s.AddFormattedField(field, t) - } - } -} - -func (s Schema) Conflicts() Schema { - cs := NewSchema() - for field, t := range s { - if len(t) > 1 { - for ty := range t { - cs.AddFormattedField(field, ty) - } - } - } - return cs -} - -func (s Schema) WriteSchemaFile(filename string) error { - if len(s) == 0 { - fmt.Println("No schema file generated: no valid measurements/fields found") - return nil - } - - if err := s.encodeSchema(filename); err != nil { - return fmt.Errorf("unable to write schema file to %q: %w", filename, err) - } - fmt.Printf("Schema file written successfully to: %q\n", filename) - return nil -} - -func (s Schema) WriteConflictsFile(filename string) error { - conflicts := s.Conflicts() - if len(conflicts) == 0 { - fmt.Println("No conflicts file generated: no conflicts found") - return nil - } - - if err := conflicts.encodeSchema(filename); err != nil { - return fmt.Errorf("unable to write conflicts file to %q: %w", filename, err) - } - fmt.Printf("Conflicts file written successfully to: %q\n", filename) - return nil -} - -func (s Schema) encodeSchema(filename string) (rErr error) { - schemaFile, err := os.Create(filename) - defer errors2.Capture(&rErr, schemaFile.Close) - if err != nil { - return fmt.Errorf("unable to create schema file: %w", err) - } - return s.Encode(schemaFile) -} - -func (s Schema) Encode(w io.Writer) error { - enc := json.NewEncoder(w) - enc.SetIndent("", " ") - if err := enc.Encode(s); err != nil { - return fmt.Errorf("unable to encode schema: %w", err) - } - return nil -} - -func (s Schema) Decode(r io.Reader) error { - if err := json.NewDecoder(r).Decode(&s); err != nil { - return fmt.Errorf("unable to decode schema: %w", err) - } - return nil -} diff --git a/cmd/influxd/inspect/type_conflicts/schema_test.go b/cmd/influxd/inspect/type_conflicts/schema_test.go deleted file mode 100644 index b4b34764c56..00000000000 --- a/cmd/influxd/inspect/type_conflicts/schema_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package typecheck_test - -import ( - "bytes" - "testing" - - typecheck "github.com/influxdata/influxdb/v2/cmd/influxd/inspect/type_conflicts" - "github.com/stretchr/testify/assert" -) - -func TestSchema_Encoding(t *testing.T) { - s := typecheck.NewSchema() - - b := bytes.Buffer{} - - s.AddField("db1", "rp1", "foo", "v2", "float") - s.AddField("db1", "rp1", "foo", "v2", "bool") - s.AddField("db1", "rp1", "bZ", "v1", "int") - - err := s.Encode(&b) - assert.NoError(t, err, "encode failed unexpectedly") - s2 := typecheck.NewSchema() - err = s2.Decode(&b) - assert.NoError(t, err, "decode failed unexpectedly") - assert.Len(t, s2, 2, "wrong number of fields - expected %d, got %d", 2, len(s)) - for f1, fields1 := range s { - assert.Len(t, - s2[f1], - len(fields1), - "differing number of types for a conflicted field %s: expected %d, got %d", - f1, - len(fields1), - len(s2[f1])) - } -} - -type filler struct { - typecheck.UniqueField - typ string -} - -func TestSchema_Merge(t *testing.T) { - const expectedConflicts = 2 - s1Fill := []filler{ - {typecheck.UniqueField{"db1", "rp1", "m1", "f1"}, "integer"}, - {typecheck.UniqueField{"db2", "rp1", "m1", "f1"}, "float"}, - {typecheck.UniqueField{"db1", "rp2", "m1", "f1"}, "string"}, - {typecheck.UniqueField{"db1", "rp1", "m2", "f1"}, "string"}, - {typecheck.UniqueField{"db1", "rp1", "m1", "f2"}, "float"}, - {typecheck.UniqueField{"db2", "rp2", "m2", "f2"}, "integer"}, - } - - s2Fill := []filler{ - {typecheck.UniqueField{"db1", "rp1", "m1", "f1"}, "integer"}, - {typecheck.UniqueField{"db2", "rp1", "m1", "f1"}, "string"}, - {typecheck.UniqueField{"db2", "rp2", "m2", "f2"}, "float"}, - {typecheck.UniqueField{"db1", "rp2", "m1", "f1"}, "string"}, - {typecheck.UniqueField{"db1", "rp1", "m2", "f1"}, "string"}, - {typecheck.UniqueField{"db1", "rp1", "m1", "f2"}, "float"}, - {typecheck.UniqueField{"db2", "rp2", "m2", "f2"}, "integer"}, - } - - s1 := typecheck.NewSchema() - s2 := typecheck.NewSchema() - fillSchema(s1, s1Fill) - fillSchema(s2, s2Fill) - - s1.Merge(s2) - conflicts := s1.Conflicts() - - assert.Len(t, conflicts, expectedConflicts, "wrong number of type conflicts detected: expected %d, got %d", expectedConflicts, len(conflicts)) -} - -func fillSchema(s typecheck.Schema, fill []filler) { - for _, f := range fill { - s.AddFormattedField(f.String(), f.typ) - } -} diff --git a/cmd/influxd/inspect/verify_seriesfile/verify_seriesfile.go b/cmd/influxd/inspect/verify_seriesfile/verify_seriesfile.go deleted file mode 100644 index e419104ec92..00000000000 --- a/cmd/influxd/inspect/verify_seriesfile/verify_seriesfile.go +++ /dev/null @@ -1,497 +0,0 @@ -package verify_seriesfile - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sort" - "sync" - - "github.com/influxdata/influxdb/v2/logger" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/spf13/cobra" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -type args struct { - dir string - db string - seriesFile string - verbose bool - concurrent int -} - -func NewVerifySeriesfileCommand() *cobra.Command { - var arguments args - cmd := &cobra.Command{ - Use: "verify-seriesfile", - Short: "Verifies the integrity of series files.", - Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - cmd.SetOut(os.Stdout) - - config := logger.NewConfig() - config.Level = zapcore.WarnLevel - if arguments.verbose { - config.Level = zapcore.InfoLevel - } - log, err := config.New(os.Stderr) - if err != nil { - return err - } - - v := newVerify() - v.Logger = log - v.Concurrent = arguments.concurrent - - var db string - if arguments.seriesFile != "" { - db = arguments.seriesFile - } else if arguments.db != "" { - db = filepath.Join(arguments.dir, arguments.db, "_series") - } - if db != "" { - _, err := v.verifySeriesFile(db) - return err - } - - dbs, err := os.ReadDir(arguments.dir) - if err != nil { - return err - } - - var hasError bool - for _, db := range dbs { - if !db.IsDir() { - continue - } - filePath := filepath.Join(arguments.dir, db.Name(), "_series") - if _, err := v.verifySeriesFile(filePath); err != nil { - v.Logger.Error("Failed to verify series file", - zap.String("filename", filePath), - zap.Error(err)) - hasError = true - } - } - if hasError { - return errors.New("some files failed verification, see logs for details") - } - return nil - }, - } - - cmd.Flags().StringVar(&arguments.dir, "data-path", filepath.Join(os.Getenv("HOME"), ".influxdbv2", "engine", "data"), - "Data Directory.") - cmd.Flags().StringVar(&arguments.db, "bucket-id", "", - "Only use this bucket inside of the data directory.") - cmd.Flags().StringVar(&arguments.seriesFile, "series-path", "", - "Path to a series file. This overrides --data-path and --bucket-id.") - cmd.Flags().BoolVarP(&arguments.verbose, "verbose", "v", false, - "Verbose output.") - cmd.Flags().IntVarP(&arguments.concurrent, "concurrency", "c", runtime.GOMAXPROCS(0), - "How many concurrent workers to run.") - - return cmd -} - -// verifyResult contains the result of a verify... call -type verifyResult struct { - valid bool - err error -} - -// verify contains configuration for running verification of series files. -type verify struct { - Concurrent int - Logger *zap.Logger - - done chan struct{} -} - -// newVerify constructs a verify with good defaults. -func newVerify() verify { - return verify{ - Concurrent: runtime.GOMAXPROCS(0), - Logger: zap.NewNop(), - } -} - -// verifySeriesFile performs verifications on a series file. The error is only returned -// if there was some fatal problem with operating, not if there was a problem with the series file. -func (v verify) verifySeriesFile(filePath string) (valid bool, err error) { - v.Logger = v.Logger.With(zap.String("path", filePath)) - v.Logger.Info("Verifying series file") - - defer func() { - if rec := recover(); rec != nil { - v.Logger.Error("Panic verifying file", zap.String("recovered", fmt.Sprint(rec))) - valid = false - } - }() - - partitionInfos, err := os.ReadDir(filePath) - if os.IsNotExist(err) { - v.Logger.Error("Series file does not exist") - return false, nil - } - if err != nil { - return false, err - } - - // Check every partition in concurrently. - concurrent := v.Concurrent - if concurrent <= 0 { - concurrent = 1 - } - in := make(chan string, len(partitionInfos)) - out := make(chan verifyResult, len(partitionInfos)) - - // Make sure all the workers are cleaned up when we return. - var wg sync.WaitGroup - defer wg.Wait() - - // Set up cancellation. Any return will cause the workers to be cancelled. - v.done = make(chan struct{}) - defer close(v.done) - - for i := 0; i < concurrent; i++ { - wg.Add(1) - go func() { - defer wg.Done() - - for partitionPath := range in { - valid, err := v.verifyPartition(partitionPath) - select { - case out <- verifyResult{valid: valid, err: err}: - case <-v.done: - return - } - } - }() - } - - // send off the work and read the results. - for _, partitionInfo := range partitionInfos { - in <- filepath.Join(filePath, partitionInfo.Name()) - } - close(in) - - for range partitionInfos { - result := <-out - if result.err != nil { - return false, err - } else if !result.valid { - return false, nil - } - } - - return true, nil -} - -// verifyPartition performs verifications on a partition of a series file. The error is only returned -// if there was some fatal problem with operating, not if there was a problem with the partition. -func (v verify) verifyPartition(partitionPath string) (valid bool, err error) { - v.Logger = v.Logger.With(zap.String("partition", filepath.Base(partitionPath))) - v.Logger.Info("Verifying partition") - - defer func() { - if rec := recover(); rec != nil { - v.Logger.Error("Panic verifying partition", zap.String("recovered", fmt.Sprint(rec))) - valid = false - } - }() - - segmentInfos, err := os.ReadDir(partitionPath) - if err != nil { - return false, err - } - - segments := make([]*tsdb.SeriesSegment, 0, len(segmentInfos)) - ids := make(map[uint64]IDData) - - // check every segment - for _, segmentInfo := range segmentInfos { - select { - default: - case <-v.done: - return false, nil - } - - segmentPath := filepath.Join(partitionPath, segmentInfo.Name()) - segmentID, err := tsdb.ParseSeriesSegmentFilename(segmentInfo.Name()) - if err != nil { - continue - } - - if valid, err := v.verifySegment(segmentPath, ids); err != nil { - return false, err - } else if !valid { - return false, nil - } - - // open the segment for verifying the index. we want it to be open outside - // the for loop as well, so the defer is ok. - segment := tsdb.NewSeriesSegment(segmentID, segmentPath) - if err := segment.Open(); err != nil { - return false, err - } - defer segment.Close() - - segments = append(segments, segment) - } - - // check the index - indexPath := filepath.Join(partitionPath, "index") - if valid, err := v.verifyIndex(indexPath, segments, ids); err != nil { - return false, err - } else if !valid { - return false, nil - } - - return true, nil -} - -// IDData keeps track of data about a series ID. -type IDData struct { - Offset int64 - Key []byte - Deleted bool -} - -// verifySegment performs verifications on a segment of a series file. The error is only returned -// if there was some fatal problem with operating, not if there was a problem with the partition. -// The ids map is populated with information about the ids stored in the segment. -func (v verify) verifySegment(segmentPath string, ids map[uint64]IDData) (valid bool, err error) { - segmentName := filepath.Base(segmentPath) - v.Logger = v.Logger.With(zap.String("segment", segmentName)) - v.Logger.Info("Verifying segment") - - // Open up the segment and grab it's data. - segmentID, err := tsdb.ParseSeriesSegmentFilename(segmentName) - if err != nil { - return false, err - } - segment := tsdb.NewSeriesSegment(segmentID, segmentPath) - if err := segment.Open(); err != nil { - v.Logger.Error("Error opening segment", zap.Error(err)) - return false, nil - } - defer segment.Close() - buf := newBuffer(segment.Data()) - - defer func() { - if rec := recover(); rec != nil { - v.Logger.Error("Panic verifying segment", zap.String("recovered", fmt.Sprint(rec)), - zap.Int64("offset", buf.offset)) - valid = false - } - }() - - // Skip the header: it has already been verified by the Open call. - if err := buf.advance(tsdb.SeriesSegmentHeaderSize); err != nil { - v.Logger.Error("Unable to advance buffer", - zap.Int64("offset", buf.offset), - zap.Error(err)) - return false, nil - } - - prevID, firstID := uint64(0), true - -entries: - for len(buf.data) > 0 { - select { - default: - case <-v.done: - return false, nil - } - - flag, id, key, sz := tsdb.ReadSeriesEntry(buf.data) - - // Check the flag is valid and for id monotonicity. - hasKey := true - switch flag { - case tsdb.SeriesEntryInsertFlag: - if !firstID && prevID > id { - v.Logger.Error("ID is not monotonically increasing", - zap.Uint64("prev_id", prevID), - zap.Uint64("id", id), - zap.Int64("offset", buf.offset)) - return false, nil - } - - firstID = false - prevID = id - - if ids != nil { - keyCopy := make([]byte, len(key)) - copy(keyCopy, key) - - ids[id] = IDData{ - Offset: tsdb.JoinSeriesOffset(segment.ID(), uint32(buf.offset)), - Key: keyCopy, - } - } - - case tsdb.SeriesEntryTombstoneFlag: - hasKey = false - if ids != nil { - data := ids[id] - data.Deleted = true - ids[id] = data - } - - case 0: // if zero, there are no more entries - if err := buf.advance(sz); err != nil { - v.Logger.Error("Unable to advance buffer", - zap.Int64("offset", buf.offset), - zap.Error(err)) - return false, nil - } - break entries - - default: - v.Logger.Error("Invalid flag", - zap.Uint8("flag", flag), - zap.Int64("offset", buf.offset)) - return false, nil - } - - // Ensure the key parses. This may panic, but our defer handler should - // make the error message more usable by providing the key. - if hasKey { - parsed := false - func() { - defer func() { - if rec := recover(); rec != nil { - v.Logger.Error("Panic parsing key", - zap.String("key", fmt.Sprintf("%x", key)), - zap.Int64("offset", buf.offset), - zap.String("recovered", fmt.Sprint(rec))) - } - }() - tsdb.ParseSeriesKey(key) - parsed = true - }() - if !parsed { - return false, nil - } - } - - // Advance past the entry. - if err := buf.advance(sz); err != nil { - v.Logger.Error("Unable to advance buffer", - zap.Int64("offset", buf.offset), - zap.Error(err)) - return false, nil - } - } - - return true, nil -} - -// verifyIndex performs verification on an index in a series file. The error is only returned -// if there was some fatal problem with operating, not if there was a problem with the partition. -// The ids map must be built from verifying the passed in segments. -func (v verify) verifyIndex(indexPath string, segments []*tsdb.SeriesSegment, - ids map[uint64]IDData) (valid bool, err error) { - v.Logger.Info("Verifying index") - - defer func() { - if rec := recover(); rec != nil { - v.Logger.Error("Panic verifying index", zap.String("recovered", fmt.Sprint(rec))) - valid = false - } - }() - - index := tsdb.NewSeriesIndex(indexPath) - if err := index.Open(); err != nil { - v.Logger.Error("Error opening index", zap.Error(err)) - return false, nil - } - defer index.Close() - - if err := index.Recover(segments); err != nil { - v.Logger.Error("Error recovering index", zap.Error(err)) - return false, nil - } - - // we check all the ids in a consistent order to get the same errors if - // there is a problem - idsList := make([]uint64, 0, len(ids)) - for id := range ids { - idsList = append(idsList, id) - } - sort.Slice(idsList, func(i, j int) bool { - return idsList[i] < idsList[j] - }) - - for _, id := range idsList { - select { - default: - case <-v.done: - return false, nil - } - - IDData := ids[id] - - if gotDeleted := index.IsDeleted(id); gotDeleted != IDData.Deleted { - v.Logger.Error("Index inconsistency", - zap.Uint64("id", id), - zap.Bool("got_deleted", gotDeleted), - zap.Bool("expected_deleted", IDData.Deleted)) - return false, nil - } - - // do not perform any other checks if the id is deleted. - if IDData.Deleted { - continue - } - - // otherwise, check both that the offset is right and that we get the right id for the key - if gotOffset := index.FindOffsetByID(id); gotOffset != IDData.Offset { - v.Logger.Error("Index inconsistency", - zap.Uint64("id", id), - zap.Int64("got_offset", gotOffset), - zap.Int64("expected_offset", IDData.Offset)) - return false, nil - } - - if gotID := index.FindIDBySeriesKey(segments, IDData.Key); gotID != id { - v.Logger.Error("Index inconsistency", - zap.Uint64("id", id), - zap.Uint64("got_id", gotID), - zap.Uint64("expected_id", id)) - return false, nil - } - } - - return true, nil -} - -// buffer allows one to safely advance a byte slice and keep track of how many bytes were advanced. -type buffer struct { - offset int64 - data []byte -} - -// newBuffer constructs a buffer with the provided data. -func newBuffer(data []byte) *buffer { - return &buffer{ - offset: 0, - data: data, - } -} - -// advance will consume n bytes from the data slice and return an error if there is not enough -// data to do so. -func (b *buffer) advance(n int64) error { - if int64(len(b.data)) < n { - return fmt.Errorf("unable to advance %d bytes: %d remaining", n, len(b.data)) - } - b.data = b.data[n:] - b.offset += n - return nil -} diff --git a/cmd/influxd/inspect/verify_seriesfile/verify_seriesfile_test.go b/cmd/influxd/inspect/verify_seriesfile/verify_seriesfile_test.go deleted file mode 100644 index 8b6ae3b521e..00000000000 --- a/cmd/influxd/inspect/verify_seriesfile/verify_seriesfile_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package verify_seriesfile - -import ( - "fmt" - "io" - "os" - "path/filepath" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func TestVerifies_BasicCobra(t *testing.T) { - test := NewTest(t) - defer os.RemoveAll(test.Path) - - verify := NewVerifySeriesfileCommand() - verify.SetArgs([]string{"--data-path", test.Path}) - require.NoError(t, verify.Execute()) -} - -func TestVerifies_Valid(t *testing.T) { - test := NewTest(t) - defer os.RemoveAll(test.Path) - - verify := newVerify() - verify.Logger = zaptest.NewLogger(t) - - passed, err := verify.verifySeriesFile(test.Path) - require.NoError(t, err) - require.True(t, passed) -} - -func TestVerifies_Invalid(t *testing.T) { - test := NewTest(t) - defer os.RemoveAll(test.Path) - - require.NoError(t, filepath.WalkDir(test.Path, func(path string, entry os.DirEntry, err error) error { - require.NoError(t, err) - - if entry.IsDir() { - return nil - } - - test.Backup(path) - defer test.Restore(path) - - fh, err := os.OpenFile(path, os.O_RDWR, 0) - require.NoError(t, err) - defer fh.Close() - - _, err = fh.WriteAt([]byte("foobar"), 0) - require.NoError(t, err) - require.NoError(t, fh.Close()) - - verify := newVerify() - verify.Logger = zaptest.NewLogger(t) - - passed, err := verify.verifySeriesFile(test.Path) - require.NoError(t, err) - require.False(t, passed) - - return nil - })) -} - -type Test struct { - *testing.T - Path string -} - -func NewTest(t *testing.T) *Test { - t.Helper() - - dir := t.TempDir() - - // create a series file in the directory - err := func() error { - seriesFile := tsdb.NewSeriesFile(dir) - if err := seriesFile.Open(); err != nil { - return err - } - defer seriesFile.Close() - seriesFile.EnableCompactions() - - const ( - compactionThreshold = 100 - numSeries = 2 * tsdb.SeriesFilePartitionN * compactionThreshold - ) - - for _, partition := range seriesFile.Partitions() { - partition.CompactThreshold = compactionThreshold - } - - var names [][]byte - var tagsSlice []models.Tags - - for i := 0; i < numSeries; i++ { - names = append(names, []byte(fmt.Sprintf("series%d", i))) - tagsSlice = append(tagsSlice, nil) - } - - ids, err := seriesFile.CreateSeriesListIfNotExists(names, tagsSlice) - if err != nil { - return err - } - - // delete one series - if err := seriesFile.DeleteSeriesID(ids[0]); err != nil { - return err - } - - // wait for compaction to make sure we detect issues with the index - partitions := seriesFile.Partitions() - wait: - for _, partition := range partitions { - if partition.Compacting() { - time.Sleep(100 * time.Millisecond) - goto wait - } - } - - return seriesFile.Close() - }() - if err != nil { - t.Fatal(err) - } - - return &Test{ - T: t, - Path: dir, - } -} - -// Backup makes a copy of the path for a later Restore. -func (t *Test) Backup(path string) { - in, err := os.Open(path) - require.NoError(t.T, err) - defer in.Close() - - out, err := os.Create(path + ".backup") - require.NoError(t.T, err) - defer out.Close() - - _, err = io.Copy(out, in) - require.NoError(t.T, err) -} - -// Restore restores the file at the path to the time when Backup was called last. -func (t *Test) Restore(path string) { - require.NoError(t.T, os.Rename(path+".backup", path)) -} diff --git a/cmd/influxd/inspect/verify_tombstone/verify_tombstone.go b/cmd/influxd/inspect/verify_tombstone/verify_tombstone.go deleted file mode 100644 index 5aa920728b8..00000000000 --- a/cmd/influxd/inspect/verify_tombstone/verify_tombstone.go +++ /dev/null @@ -1,134 +0,0 @@ -package verify_tombstone - -import ( - "errors" - "os" - "path/filepath" - "time" - - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/spf13/cobra" -) - -type args struct { - dir string - v bool - vv bool -} - -type verifier struct { - path string - verbosity int - files []string - f string -} - -const ( - quiet = iota - verbose - veryVerbose -) - -func NewVerifyTombstoneCommand() *cobra.Command { - var arguments args - cmd := &cobra.Command{ - Use: "verify-tombstone", - Short: "Verify the integrity of tombstone files", - Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - runner := verifier{path: arguments.dir} - if arguments.vv { - runner.verbosity = veryVerbose - } else if arguments.v { - runner.verbosity = verbose - } - return runner.run(cmd) - }, - } - - cmd.Flags().StringVar(&arguments.dir, "engine-path", filepath.Join(os.Getenv("HOME"), ".influxdbv2", "engine"), - "Path to find tombstone files.") - cmd.Flags().BoolVarP(&arguments.v, "verbose", "v", false, - "Verbose: Emit periodic progress.") - cmd.Flags().BoolVar(&arguments.vv, "vv", false, - "Very verbose: Emit every tombstone entry key and time range.") - cmd.Flags().Bool("vvv", false, - "Leftover from original command left for compatibility") - _ = cmd.Flags().MarkHidden("vvv") - return cmd -} - -func (v *verifier) loadFiles() error { - return filepath.WalkDir(v.path, func(path string, d os.DirEntry, err error) error { - if err != nil { - return err - } - if filepath.Ext(path) == "."+tsm1.TombstoneFileExtension { - v.files = append(v.files, path) - } - return nil - }) -} - -func (v *verifier) next() bool { - if len(v.files) == 0 { - return false - } - - v.f, v.files = v.files[0], v.files[1:] - return true -} - -func (v *verifier) run(cmd *cobra.Command) error { - if err := v.loadFiles(); err != nil { - return err - } - - var failed bool - var foundTombstoneFile bool - start := time.Now() - for v.next() { - foundTombstoneFile = true - if v.verbosity > quiet { - cmd.Printf("Verifying: %q\n", v.f) - } - - tombstoner := tsm1.NewTombstoner(v.f, nil) - if !tombstoner.HasTombstones() { - cmd.Printf("%s has no tombstone entries", v.f) - continue - } - - var totalEntries int64 - err := tombstoner.Walk(func(t tsm1.Tombstone) error { - totalEntries++ - if v.verbosity > quiet && totalEntries%(10*1e6) == 0 { - cmd.Printf("Verified %d tombstone entries\n", totalEntries) - } else if v.verbosity > verbose { - var min interface{} = t.Min - var max interface{} = t.Max - if v.verbosity > veryVerbose { - min = time.Unix(0, t.Min) - max = time.Unix(0, t.Max) - } - cmd.Printf("key: %q, min: %v, max: %v\n", t.Key, min, max) - } - return nil - }) - if err != nil { - cmd.Printf("%q failed to walk tombstone entries: %v. Last okay entry: %d\n", v.f, err, totalEntries) - failed = true - continue - } - - cmd.Printf("Completed verification for %q in %v.\nVerified %d entries\n\n", v.f, time.Since(start), totalEntries) - } - - if failed { - return errors.New("failed tombstone verification") - } - if !foundTombstoneFile { - cmd.Printf("No tombstone files found\n") - } - return nil -} diff --git a/cmd/influxd/inspect/verify_tombstone/verify_tombstone_test.go b/cmd/influxd/inspect/verify_tombstone/verify_tombstone_test.go deleted file mode 100644 index fc1cc025bd1..00000000000 --- a/cmd/influxd/inspect/verify_tombstone/verify_tombstone_test.go +++ /dev/null @@ -1,164 +0,0 @@ -package verify_tombstone - -import ( - "bytes" - "encoding/binary" - "io" - "os" - "testing" - - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/stretchr/testify/require" -) - -// Tombstone file headers for different versions. -// Treated as v1 without a header. -const ( - v2header = 0x1502 - v3header = 0x1503 - v4header = 0x1504 -) - -// Run tests on a directory with no Tombstone files -func TestVerifies_InvalidFileType(t *testing.T) { - path := t.TempDir() - - f, err := os.CreateTemp(path, "verifytombstonetest*"+".txt") - require.NoError(t, err) - require.NoError(t, f.Close()) - - verify := NewVerifyTombstoneCommand() - verify.SetArgs([]string{"--engine-path", path}) - - b := bytes.NewBufferString("") - verify.SetOut(b) - require.NoError(t, verify.Execute()) - - out, err := io.ReadAll(b) - require.NoError(t, err) - require.Contains(t, string(out), "No tombstone files found") -} - -// Run tests on an empty Tombstone file (treated as v1) -func TestVerifies_InvalidEmptyFile(t *testing.T) { - path, _ := NewTempTombstone(t) - - verify := NewVerifyTombstoneCommand() - verify.SetArgs([]string{"--engine-path", path}) - - b := bytes.NewBufferString("") - verify.SetOut(b) - require.NoError(t, verify.Execute()) - - out, err := io.ReadAll(b) - require.NoError(t, err) - require.Contains(t, string(out), "has no tombstone entries") -} - -// Runs tests on an invalid V2 Tombstone File -func TestVerifies_InvalidV2(t *testing.T) { - path, file := NewTempTombstone(t) - - WriteTombstoneHeader(t, file, v2header) - WriteBadData(t, file) - - verify := NewVerifyTombstoneCommand() - verify.SetArgs([]string{"--engine-path", path}) - verify.SetOut(bytes.NewBufferString("")) - - require.Error(t, verify.Execute()) -} - -func TestVerifies_ValidTS(t *testing.T) { - path, file := NewTempTombstone(t) - - ts := tsm1.NewTombstoner(file.Name(), nil) - require.NoError(t, ts.Add([][]byte{[]byte("foobar")})) - require.NoError(t, ts.Flush()) - - verify := NewVerifyTombstoneCommand() - verify.SetArgs([]string{"--engine-path", path, "--vv"}) - verify.SetOut(bytes.NewBufferString("")) - - require.NoError(t, verify.Execute()) -} - -// Runs tests on an invalid V3 Tombstone File -func TestVerifies_InvalidV3(t *testing.T) { - path, file := NewTempTombstone(t) - - WriteTombstoneHeader(t, file, v3header) - WriteBadData(t, file) - - verify := NewVerifyTombstoneCommand() - verify.SetArgs([]string{"--engine-path", path}) - verify.SetOut(bytes.NewBufferString("")) - - require.Error(t, verify.Execute()) -} - -// Runs tests on an invalid V4 Tombstone File -func TestVerifies_InvalidV4(t *testing.T) { - path, file := NewTempTombstone(t) - - WriteTombstoneHeader(t, file, v4header) - WriteBadData(t, file) - - verify := NewVerifyTombstoneCommand() - verify.SetArgs([]string{"--engine-path", path}) - verify.SetOut(bytes.NewBufferString("")) - - require.Error(t, verify.Execute()) -} - -// Ensures "--vvv" flag will not error as it -// is not needed, but was part of old command. -func TestTombstone_VeryVeryVerbose(t *testing.T) { - path, file := NewTempTombstone(t) - - WriteTombstoneHeader(t, file, v4header) - WriteBadData(t, file) - - verify := NewVerifyTombstoneCommand() - verify.SetArgs([]string{"--engine-path", path, "--vvv"}) - verify.SetOut(bytes.NewBufferString("")) - - require.Error(t, verify.Execute()) -} - -func NewTempTombstone(t *testing.T) (string, *os.File) { - t.Helper() - - dir := t.TempDir() - - file, err := os.CreateTemp(dir, "verifytombstonetest*"+"."+tsm1.TombstoneFileExtension) - require.NoError(t, err) - defer file.Close() - - return dir, file -} - -func WriteTombstoneHeader(t *testing.T, file *os.File, header uint32) { - t.Helper() - - writer, err := os.OpenFile(file.Name(), os.O_RDWR, 0) - require.NoError(t, err) - defer writer.Close() - - var b [4]byte - binary.BigEndian.PutUint32(b[:], header) - _, err = writer.Write(b[:]) - require.NoError(t, err) -} - -func WriteBadData(t *testing.T, file *os.File) { - t.Helper() - - writer, err := os.OpenFile(file.Name(), os.O_APPEND|os.O_WRONLY, 0644) - require.NoError(t, err) - defer writer.Close() - - written, err := writer.Write([]byte("foobar")) - require.NoError(t, err) - require.Equal(t, 6, written) -} diff --git a/cmd/influxd/inspect/verify_tsm/verify_tsm.go b/cmd/influxd/inspect/verify_tsm/verify_tsm.go deleted file mode 100644 index 16f50b97cc7..00000000000 --- a/cmd/influxd/inspect/verify_tsm/verify_tsm.go +++ /dev/null @@ -1,207 +0,0 @@ -package verify_tsm - -import ( - "fmt" - "hash/crc32" - "os" - "path/filepath" - "time" - "unicode/utf8" - - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -type verifier interface { - run(cmd *cobra.Command, dataPath string, verbose bool) error -} - -type verifyTSM struct { - files []string - f string - startTime time.Time -} - -type verifyUTF8 struct { - verifyTSM - totalErrors int - total int -} - -type verifyChecksums struct { - verifyTSM - totalErrors int - total int -} - -func NewTSMVerifyCommand() *cobra.Command { - var checkUTF8 bool - var dir string - var verbose bool - - cmd := &cobra.Command{ - Use: `verify-tsm`, - Short: `Verifies the integrity of TSM files`, - Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - var runner verifier - if checkUTF8 { - runner = &verifyUTF8{} - } else { - runner = &verifyChecksums{} - } - err := runner.run(cmd, dir, verbose) - return err - }, - } - cmd.Flags().StringVar(&dir, "engine-path", os.Getenv("HOME")+"/.influxdbv2"+"/engine", "Root storage path.") - cmd.Flags().BoolVar(&checkUTF8, "check-utf8", false, "Verify series keys are valid UTF-8. This check skips verification of block checksums.") - cmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "Enable verbose logging") - return cmd -} - -func (v *verifyUTF8) run(cmd *cobra.Command, dataPath string, verbose bool) error { - if err := v.loadFiles(dataPath); err != nil { - return err - } - - v.start() - - for v.next() { - reader, closer, err := v.tsmReader() - if closer != nil { - defer closer() - } - if err != nil { - return err - } - - n := reader.KeyCount() - fileErrors := 0 - v.total += n - for i := 0; i < n; i++ { - key, _ := reader.KeyAt(i) - if !utf8.Valid(key) { - v.totalErrors++ - fileErrors++ - if verbose { - cmd.PrintErrf("%s: key #%d is not valid UTF-8\n", v.f, i) - } - } - } - if fileErrors == 0 && verbose { - cmd.PrintErrf("%s: healthy\n", v.f) - } - } - - cmd.PrintErrf("Invalid Keys: %d / %d, in %vs\n", v.totalErrors, v.total, v.elapsed().Seconds()) - if v.totalErrors > 0 { - return errors.New("check-utf8: failed") - } - - return nil -} - -func (v *verifyChecksums) run(cmd *cobra.Command, dataPath string, verbose bool) error { - if err := v.loadFiles(dataPath); err != nil { - return err - } - - v.start() - - for v.next() { - reader, closer, err := v.tsmReader() - if closer != nil { - defer closer() - } - if err != nil { - return err - } - - blockItr := reader.BlockIterator() - fileErrors := 0 - count := 0 - for blockItr.Next() { - v.total++ - key, _, _, _, checksum, buf, err := blockItr.Read() - if err != nil { - v.totalErrors++ - fileErrors++ - if verbose { - cmd.PrintErrf("%s: could not get checksum for key %v block %d due to error: %q\n", v.f, key, count, err) - } - } else if expected := crc32.ChecksumIEEE(buf); checksum != expected { - v.totalErrors++ - fileErrors++ - if verbose { - cmd.PrintErrf("%s: got %d but expected %d for key %v, block %d\n", v.f, checksum, expected, key, count) - } - } - count++ - } - if fileErrors == 0 && verbose { - cmd.PrintErrf("%s: healthy\n", v.f) - } - } - - cmd.PrintErrf("Broken Blocks: %d / %d, in %vs\n", v.totalErrors, v.total, v.elapsed().Seconds()) - - return nil -} - -func (v *verifyTSM) loadFiles(dataPath string) error { - err := filepath.WalkDir(dataPath, func(path string, d os.DirEntry, err error) error { - if err != nil { - return err - } - if filepath.Ext(path) == "."+tsm1.TSMFileExtension { - v.files = append(v.files, path) - } - return nil - }) - - if err != nil { - return fmt.Errorf("could not load storage files (use -dir for custom storage root): %w", err) - } - - return nil -} - -func (v *verifyTSM) next() bool { - if len(v.files) == 0 { - return false - } - - v.f, v.files = v.files[0], v.files[1:] - return true -} - -func (v *verifyTSM) tsmReader() (*tsm1.TSMReader, func(), error) { - file, err := os.OpenFile(v.f, os.O_RDONLY, 0600) - if err != nil { - return nil, nil, err - } - - reader, err := tsm1.NewTSMReader(file) - if err != nil { - closer := func() { - file.Close() - } - return nil, closer, err - } - - closer := func() { - file.Close() - reader.Close() - } - return reader, closer, nil -} - -func (v *verifyTSM) start() { - v.startTime = time.Now() -} - -func (v *verifyTSM) elapsed() time.Duration { - return time.Since(v.startTime) -} diff --git a/cmd/influxd/inspect/verify_tsm/verify_tsm_test.go b/cmd/influxd/inspect/verify_tsm/verify_tsm_test.go deleted file mode 100644 index 6584e67b6f3..00000000000 --- a/cmd/influxd/inspect/verify_tsm/verify_tsm_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package verify_tsm - -import ( - "bytes" - "encoding/binary" - "io" - "os" - "testing" - - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/stretchr/testify/require" -) - -func TestInvalidChecksum(t *testing.T) { - path := newChecksumTest(t, true) - defer os.RemoveAll(path) - - verify := NewTSMVerifyCommand() - b := bytes.NewBufferString("") - verify.SetOut(b) - verify.SetArgs([]string{"--engine-path", path}) - require.NoError(t, verify.Execute()) - - out, err := io.ReadAll(b) - require.NoError(t, err) - require.Contains(t, string(out), "Broken Blocks: 1 / 1") -} - -func TestValidChecksum(t *testing.T) { - path := newChecksumTest(t, false) - defer os.RemoveAll(path) - - verify := NewTSMVerifyCommand() - b := bytes.NewBufferString("") - verify.SetOut(b) - verify.SetArgs([]string{"--engine-path", path}) - require.NoError(t, verify.Execute()) - - out, err := io.ReadAll(b) - require.NoError(t, err) - require.Contains(t, string(out), "Broken Blocks: 0 / 1") -} - -func TestInvalidUTF8(t *testing.T) { - path := newUTFTest(t, true) - defer os.RemoveAll(path) - - verify := NewTSMVerifyCommand() - verify.SetOut(bytes.NewBufferString("")) - verify.SetArgs([]string{"--engine-path", path, "--check-utf8"}) - require.Error(t, verify.Execute()) -} - -func TestValidUTF8(t *testing.T) { - path := newUTFTest(t, false) - defer os.RemoveAll(path) - - verify := NewTSMVerifyCommand() - b := bytes.NewBufferString("") - verify.SetOut(b) - verify.SetArgs([]string{"--engine-path", path, "--check-utf8"}) - require.NoError(t, verify.Execute()) - - out, err := io.ReadAll(b) - require.NoError(t, err) - require.Contains(t, string(out), "Invalid Keys: 0 / 1") -} - -func newUTFTest(t *testing.T, withError bool) string { - t.Helper() - - dir := t.TempDir() - - f, err := os.CreateTemp(dir, "verifytsmtest*"+"."+tsm1.TSMFileExtension) - require.NoError(t, err) - - w, err := tsm1.NewTSMWriter(f) - require.NoError(t, err) - defer w.Close() - - values := []tsm1.Value{tsm1.NewValue(0, 1.0)} - require.NoError(t, w.Write([]byte("cpu"), values)) - - if withError { - require.NoError(t, binary.Write(f, binary.BigEndian, []byte("foobar\n"))) - } - - require.NoError(t, w.WriteIndex()) - - return dir -} - -func newChecksumTest(t *testing.T, withError bool) string { - t.Helper() - - dir := t.TempDir() - - f, err := os.CreateTemp(dir, "verifytsmtest*"+"."+tsm1.TSMFileExtension) - require.NoError(t, err) - - w, err := tsm1.NewTSMWriter(f) - require.NoError(t, err) - - values := []tsm1.Value{tsm1.NewValue(0, "entry")} - require.NoError(t, w.Write([]byte("cpu"), values)) - - require.NoError(t, w.WriteIndex()) - w.Close() - - if withError { - fh, err := os.OpenFile(f.Name(), os.O_RDWR, 0) - require.NoError(t, err) - defer fh.Close() - - written, err := fh.WriteAt([]byte("foob"), 5) - require.Equal(t, 4, written) - require.NoError(t, err) - } - - return dir -} diff --git a/cmd/influxd/inspect/verify_wal/verify_wal.go b/cmd/influxd/inspect/verify_wal/verify_wal.go deleted file mode 100644 index d16d2b93f06..00000000000 --- a/cmd/influxd/inspect/verify_wal/verify_wal.go +++ /dev/null @@ -1,146 +0,0 @@ -package verify_wal - -import ( - "fmt" - "os" - "path/filepath" - "text/tabwriter" - "time" - - "github.com/influxdata/influxdb/v2/internal/fs" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/spf13/cobra" -) - -type args struct { - dir string - verbose bool -} - -func NewVerifyWALCommand() *cobra.Command { - var arguments args - cmd := &cobra.Command{ - Use: `verify-wal`, - Short: "Check for WAL corruption", - Long: ` -This command will analyze the WAL (Write-Ahead Log) in a storage directory to -check if there are any corrupt files. If any corrupt files are found, the names -of said corrupt files will be reported. The tool will also count the total number -of entries in the scanned WAL files, in case this is of interest. -For each file, the following is output: - * The file name; - * "clean" (if the file is clean) OR - The first position of any corruption that is found -In the summary section, the following is printed: - * The number of WAL files scanned; - * The number of WAL entries scanned; - * A list of files found to be corrupt`, - Args: cobra.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return arguments.Run(cmd) - }, - } - - dir, err := fs.InfluxDir() - if err != nil { - panic(err) - } - dir = filepath.Join(dir, "engine/wal") - cmd.Flags().StringVar(&arguments.dir, "wal-path", dir, "use provided WAL path.") - cmd.Flags().BoolVarP(&arguments.verbose, "verbose", "v", false, "enable verbose logging") - return cmd -} - -func (a args) Run(cmd *cobra.Command) error { - // Verify valid directory - fi, err := os.Stat(a.dir) - if err != nil { - return fmt.Errorf("failed to stat %q: %w", a.dir, err) - } else if !fi.IsDir() { - return fmt.Errorf("%q is not a directory", a.dir) - } - - // Find all WAL files in provided directory - files, err := loadFiles(a.dir) - if err != nil { - return fmt.Errorf("failed to search for WAL files in directory %s: %w", a.dir, err) - } - if len(files) == 0 { - return fmt.Errorf("no WAL files found in directory %s", a.dir) - } - - start := time.Now() - tw := tabwriter.NewWriter(cmd.OutOrStdout(), 8, 2, 1, ' ', 0) - - var corruptFiles []string - var totalEntriesScanned int - - // Scan each WAL file - for _, fpath := range files { - var entriesScanned int - f, err := os.OpenFile(fpath, os.O_RDONLY, 0600) - if err != nil { - return fmt.Errorf("error opening file %s: %w. Exiting", fpath, err) - } - - clean := true - reader := tsm1.NewWALSegmentReader(f) - - // Check for corrupted entries - for reader.Next() { - entriesScanned++ - _, err := reader.Read() - if err != nil { - clean = false - _, _ = fmt.Fprintf(cmd.ErrOrStderr(), "%s: corrupt entry found at position %d\n", fpath, reader.Count()) - corruptFiles = append(corruptFiles, fpath) - break - } - } - - if a.verbose { - if entriesScanned == 0 { - // No data found in file - _, _ = fmt.Fprintf(cmd.ErrOrStderr(), "%s: no WAL entries found\n", f.Name()) - } else if clean { - // No corrupted entry found - _, _ = fmt.Fprintf(cmd.ErrOrStderr(), "%s: clean\n", fpath) - } - } - totalEntriesScanned += entriesScanned - _ = tw.Flush() - - _ = reader.Close() - } - - // Print Summary - _, _ = fmt.Fprintf(tw, "Results:\n") - _, _ = fmt.Fprintf(tw, " Files checked: %d\n", len(files)) - _, _ = fmt.Fprintf(tw, " Total entries checked: %d\n", totalEntriesScanned) - _, _ = fmt.Fprintf(tw, " Corrupt files found: ") - if len(corruptFiles) == 0 { - _, _ = fmt.Fprintf(tw, "None") - } else { - for _, name := range corruptFiles { - _, _ = fmt.Fprintf(tw, "\n %s", name) - } - } - - _, _ = fmt.Fprintf(tw, "\nCompleted in %v\n", time.Since(start)) - _ = tw.Flush() - - return nil -} - -func loadFiles(dir string) (files []string, err error) { - err = filepath.WalkDir(dir, func(path string, d os.DirEntry, err error) error { - if err != nil { - return err - } - if filepath.Ext(path) == "."+tsm1.WALFileExtension { - files = append(files, path) - } - return nil - }) - return -} diff --git a/cmd/influxd/inspect/verify_wal/verify_wal_test.go b/cmd/influxd/inspect/verify_wal/verify_wal_test.go deleted file mode 100644 index b39c0e9285f..00000000000 --- a/cmd/influxd/inspect/verify_wal/verify_wal_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package verify_wal - -import ( - "bytes" - "context" - "io" - "os" - "testing" - - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/stretchr/testify/require" -) - -type testInfo struct { - t *testing.T - path string - expectedOut string - expectErr bool - withStdErr bool -} - -func TestVerifies_InvalidFileType(t *testing.T) { - path := t.TempDir() - - f, err := os.CreateTemp(path, "verifywaltest*"+".txt") - require.NoError(t, err) - require.NoError(t, f.Close()) - - runCommand(testInfo{ - t: t, - path: path, - expectedOut: "no WAL files found in directory", - expectErr: true, - }) -} - -func TestVerifies_InvalidNotDir(t *testing.T) { - _, file := newTempWALInvalid(t, true) - - runCommand(testInfo{ - t: t, - path: file.Name(), - expectedOut: "is not a directory", - expectErr: true, - }) -} - -func TestVerifies_InvalidEmptyFile(t *testing.T) { - path, _ := newTempWALInvalid(t, true) - - runCommand(testInfo{ - t: t, - path: path, - expectedOut: "no WAL entries found", - withStdErr: true, - }) -} - -func TestVerifies_Invalid(t *testing.T) { - path, _ := newTempWALInvalid(t, false) - - runCommand(testInfo{ - t: t, - path: path, - expectedOut: "corrupt entry found at position", - withStdErr: true, - }) -} - -func TestVerifies_Valid(t *testing.T) { - path := newTempWALValid(t) - - runCommand(testInfo{ - t: t, - path: path, - expectedOut: "clean", - withStdErr: true, - }) -} - -func runCommand(args testInfo) { - verify := NewVerifyWALCommand() - verify.SetArgs([]string{"--wal-path", args.path, "--verbose"}) - - b := bytes.NewBufferString("") - verify.SetOut(b) - if args.withStdErr { - verify.SetErr(b) - } - - if args.expectErr { - require.Error(args.t, verify.Execute()) - } else { - require.NoError(args.t, verify.Execute()) - } - - out, err := io.ReadAll(b) - require.NoError(args.t, err) - require.Contains(args.t, string(out), args.expectedOut) -} - -func newTempWALValid(t *testing.T) string { - t.Helper() - - dir := t.TempDir() - - w := tsm1.NewWAL(dir, 0, 0, tsdb.EngineTags{}) - require.NoError(t, w.Open()) - t.Cleanup(func() { - require.NoError(t, w.Close()) - }) - - p1 := tsm1.NewValue(1, 1.1) - p2 := tsm1.NewValue(1, int64(1)) - p3 := tsm1.NewValue(1, true) - p4 := tsm1.NewValue(1, "string") - p5 := tsm1.NewValue(1, ^uint64(0)) - - values := map[string][]tsm1.Value{ - "cpu,host=A#!~#float": {p1}, - "cpu,host=A#!~#int": {p2}, - "cpu,host=A#!~#bool": {p3}, - "cpu,host=A#!~#string": {p4}, - "cpu,host=A#!~#unsigned": {p5}, - } - - _, err := w.WriteMulti(context.Background(), values) - require.NoError(t, err) - - return dir -} - -func newTempWALInvalid(t *testing.T, empty bool) (string, *os.File) { - t.Helper() - - dir := t.TempDir() - - file, err := os.CreateTemp(dir, "verifywaltest*."+tsm1.WALFileExtension) - require.NoError(t, err) - t.Cleanup(func() { file.Close() }) - - if !empty { - written, err := file.Write([]byte("foobar")) - require.NoError(t, err) - require.Equal(t, 6, written) - } - - return dir, file -} diff --git a/cmd/influxd/launcher/_tlstests/tls_test.go b/cmd/influxd/launcher/_tlstests/tls_test.go deleted file mode 100644 index 22b831db1aa..00000000000 --- a/cmd/influxd/launcher/_tlstests/tls_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package tlstests - -import ( - "context" - "os" - "testing" - - "github.com/influxdata/influxdb/v2/cmd/influxd/launcher" - "github.com/influxdata/influxdb/v2/http" - "github.com/stretchr/testify/require" -) - -const ( - certPathVar = "INFLUXDB_TEST_SSL_CERT_PATH" - certKeyVar = "INFLUXDB_TEST_SSL_KEY_PATH" -) - -var ( - certPath string - keyPath string -) - -func init() { - certPath = os.Getenv(certPathVar) - keyPath = os.Getenv(certKeyVar) -} - -func TestTLS_NonStrict(t *testing.T) { - require.NotEmpty(t, certPath, "INFLUXDB_TEST_SSL_CERT_PATH must be set to run this test") - require.NotEmpty(t, keyPath, "INFLUXDB_TEST_SSL_KEY_PATH must be set to run this test") - ctx := context.Background() - - for _, tlsVersion := range []string{"1.0", "1.1", "1.2", "1.3"} { - tlsVersion := tlsVersion - t.Run(tlsVersion, func(t *testing.T) { - l := launcher.NewTestLauncher() - l.RunOrFail(t, ctx, func(o *launcher.InfluxdOpts) { - o.HttpTLSCert = certPath - o.HttpTLSKey = keyPath - o.HttpTLSMinVersion = tlsVersion - o.HttpTLSStrictCiphers = false - }) - defer l.ShutdownOrFail(t, ctx) - - req, err := l.NewHTTPRequest("GET", "/ping", "", "") - require.NoError(t, err) - require.Regexp(t, "https://.*", req.URL) - - client := http.NewClient("https", true) - _, err = client.Do(req) - require.NoError(t, err) - }) - } -} - -func TestTLS_Strict(t *testing.T) { - require.NotEmpty(t, certPath, "INFLUXDB_TEST_SSL_CERT_PATH must be set to run this test") - require.NotEmpty(t, keyPath, "INFLUXDB_TEST_SSL_KEY_PATH must be set to run this test") - ctx := context.Background() - - for _, tlsVersion := range []string{"1.0", "1.1", "1.2", "1.3"} { - tlsVersion := tlsVersion - t.Run(tlsVersion, func(t *testing.T) { - l := launcher.NewTestLauncher() - l.RunOrFail(t, ctx, func(o *launcher.InfluxdOpts) { - o.HttpTLSCert = certPath - o.HttpTLSKey = keyPath - o.HttpTLSMinVersion = tlsVersion - o.HttpTLSStrictCiphers = true - }) - defer l.ShutdownOrFail(t, ctx) - - req, err := l.NewHTTPRequest("GET", "/ping", "", "") - require.NoError(t, err) - require.Regexp(t, "https://.*", req.URL) - - client := http.NewClient("https", true) - _, err = client.Do(req) - require.NoError(t, err) - }) - } -} - -func TestTLS_UnsupportedVersion(t *testing.T) { - require.NotEmpty(t, certPath, "INFLUXDB_TEST_SSL_CERT_PATH must be set to run this test") - require.NotEmpty(t, keyPath, "INFLUXDB_TEST_SSL_KEY_PATH must be set to run this test") - ctx := context.Background() - - l := launcher.NewTestLauncher() - err := l.Run(t, ctx, func(o *launcher.InfluxdOpts) { - o.HttpTLSCert = certPath - o.HttpTLSKey = keyPath - o.HttpTLSMinVersion = "1.4" - o.HttpTLSStrictCiphers = true - }) - require.Error(t, err) -} diff --git a/cmd/influxd/launcher/backup_restore_test.go b/cmd/influxd/launcher/backup_restore_test.go deleted file mode 100644 index 868e75593e3..00000000000 --- a/cmd/influxd/launcher/backup_restore_test.go +++ /dev/null @@ -1,212 +0,0 @@ -package launcher_test - -import ( - "context" - "testing" - - "github.com/influxdata/influx-cli/v2/clients/backup" - "github.com/influxdata/influx-cli/v2/clients/restore" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/cmd/influxd/launcher" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -func TestBackupRestore_Full(t *testing.T) { - t.Parallel() - ctx := context.Background() - - backupDir := t.TempDir() - - // Boot a server, write some data, and take a backup. - l1 := launcher.RunAndSetupNewLauncherOrFail(ctx, t, func(o *launcher.InfluxdOpts) { - o.StoreType = "bolt" - o.Testing = false - o.LogLevel = zap.InfoLevel - }) - originalAuth := *l1.Auth - l1.WritePointsOrFail(t, "m,k=v1 f=100i 946684800000000000\nm,k=v2 f=200i 946684800000000001") - l1.BackupOrFail(t, ctx, backup.Params{Path: backupDir}) - - // Create a new bucket, write data into it (+ the old bucket), and take another backup. - b1 := influxdb.Bucket{OrgID: l1.Org.ID, Name: "bucket2"} - require.NoError(t, l1.BucketService(t).CreateBucket(ctx, &b1)) - l1.WriteOrFail(t, &influxdb.OnboardingResults{ - Org: l1.Org, - Bucket: &b1, - Auth: l1.Auth, - }, "m,k=v1 f=100i 946684800000000005\nm,k=v2 f=200i 946684800000000006") - l1.WritePointsOrFail(t, "m,k=v1 f=100i 946684800000000002\nm,k=v2 f=200i 946684800000000003") - l1.BackupOrFail(t, ctx, backup.Params{Path: backupDir}) - - // Shut down the server. - l1.ShutdownOrFail(t, ctx) - - // Boot up a second server, using a new auth token - l2 := launcher.NewTestLauncher() - l2.RunOrFail(t, ctx, func(o *launcher.InfluxdOpts) { - o.StoreType = "bolt" - o.Testing = false - o.LogLevel = zap.InfoLevel - }) - defer l2.ShutdownOrFail(t, ctx) - - onboardReq := influxdb.OnboardingRequest{ - User: "USER", - Password: "PASSWORD", - Org: "ORG", - Bucket: "BUCKET", - } - onboardRes := l2.OnBoardOrFail(t, &onboardReq) - l2.Org = onboardRes.Org - l2.Bucket = onboardRes.Bucket - l2.Auth = onboardRes.Auth - - // Create a second bucket, write data into it. - b2 := influxdb.Bucket{OrgID: onboardRes.Org.ID, Name: "2bucket"} - require.NoError(t, l2.BucketService(t).CreateBucket(ctx, &b2)) - l2.WriteOrFail(t, &influxdb.OnboardingResults{ - Org: onboardRes.Org, - Bucket: &b2, - Auth: onboardRes.Auth, - }, "m,k=v5 f=100i 946684800000000005\nm,k=v7 f=200i 946684800000000006") - - // Perform a full restore from the previous backups. - l2.RestoreOrFail(t, ctx, restore.Params{Path: backupDir, Full: true}) - - // A full restore also restores the original token - l2.Auth = &originalAuth - l2.ResetHTTPCLient() - - // Check that orgs and buckets were reset to match the original server's metadata. - _, err := l2.OrgService(t).FindOrganizationByID(ctx, l2.Org.ID) - require.Equal(t, errors.ENotFound, errors.ErrorCode(err)) - rbkt1, err := l2.BucketService(t).FindBucket(ctx, influxdb.BucketFilter{OrganizationID: &l1.Org.ID, ID: &l1.Bucket.ID}) - require.NoError(t, err) - require.Equal(t, l1.Bucket.Name, rbkt1.Name) - rbkt2, err := l2.BucketService(t).FindBucket(ctx, influxdb.BucketFilter{OrganizationID: &l1.Org.ID, ID: &b1.ID}) - require.NoError(t, err) - require.Equal(t, b1.Name, rbkt2.Name) - _, err = l2.BucketService(t).FindBucket(ctx, influxdb.BucketFilter{OrganizationID: &l2.Org.ID, ID: &b2.ID}) - require.Equal(t, errors.ENotFound, errors.ErrorCode(err)) - - // Check that data was restored to buckets. - q1 := `from(bucket:"BUCKET") |> range(start:2000-01-01T00:00:00Z,stop:2000-01-02T00:00:00Z)` - exp1 := `,result,table,_start,_stop,_time,_value,_field,_measurement,k` + "\r\n" + - `,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,100,f,m,v1` + "\r\n" + - `,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00.000000002Z,100,f,m,v1` + "\r\n" + - `,_result,1,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00.000000001Z,200,f,m,v2` + "\r\n" + - `,_result,1,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00.000000003Z,200,f,m,v2` + "\r\n\r\n" - res1 := l2.FluxQueryOrFail(t, l2.Org, l2.Auth.Token, q1) - require.Equal(t, exp1, res1) - - q2 := `from(bucket:"bucket2") |> range(start:2000-01-01T00:00:00Z,stop:2000-01-02T00:00:00Z)` - exp2 := `,result,table,_start,_stop,_time,_value,_field,_measurement,k` + "\r\n" + - `,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00.000000005Z,100,f,m,v1` + "\r\n" + - `,_result,1,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00.000000006Z,200,f,m,v2` + "\r\n\r\n" - res2 := l2.FluxQueryOrFail(t, l2.Org, l2.Auth.Token, q2) - require.Equal(t, exp2, res2) -} - -func TestBackupRestore_Partial(t *testing.T) { - t.Parallel() - ctx := context.Background() - - backupDir := t.TempDir() - - // Boot a server, write some data, and take a backup. - l1 := launcher.RunAndSetupNewLauncherOrFail(ctx, t, func(o *launcher.InfluxdOpts) { - o.StoreType = "bolt" - o.Testing = false - o.LogLevel = zap.InfoLevel - }) - l1.WritePointsOrFail(t, "m,k=v1 f=100i 946684800000000000\nm,k=v2 f=200i 946684800000000001") - l1.BackupOrFail(t, ctx, backup.Params{Path: backupDir}) - - // Create a new bucket, write data into it (+ the old bucket), and take another backup. - b1 := influxdb.Bucket{OrgID: l1.Org.ID, Name: "bucket2"} - require.NoError(t, l1.BucketService(t).CreateBucket(ctx, &b1)) - l1.WriteOrFail(t, &influxdb.OnboardingResults{ - Org: l1.Org, - Bucket: &b1, - Auth: l1.Auth, - }, "m,k=v1 f=100i 946684800000000005\nm,k=v2 f=200i 946684800000000006") - l1.WritePointsOrFail(t, "m,k=v1 f=100i 946684800000000002\nm,k=v2 f=200i 946684800000000003") - l1.BackupOrFail(t, ctx, backup.Params{Path: backupDir}) - - // Shut down the server. - l1.ShutdownOrFail(t, ctx) - - // Boot up a second server. - l2 := launcher.NewTestLauncher() - l2.RunOrFail(t, ctx, func(o *launcher.InfluxdOpts) { - o.StoreType = "bolt" - o.Testing = false - o.LogLevel = zap.InfoLevel - }) - defer l2.ShutdownOrFail(t, ctx) - - onboardReq := influxdb.OnboardingRequest{ - User: "USER", - Password: "PASSWORD", - Org: "ORG2", - Bucket: "BUCKET", - } - onboardRes := l2.OnBoardOrFail(t, &onboardReq) - l2.Org = onboardRes.Org - l2.Bucket = onboardRes.Bucket - l2.Auth = onboardRes.Auth - - // Create a second bucket, write data into it. - b2 := influxdb.Bucket{OrgID: onboardRes.Org.ID, Name: "2bucket"} - require.NoError(t, l2.BucketService(t).CreateBucket(ctx, &b2)) - l2.WriteOrFail(t, &influxdb.OnboardingResults{ - Org: onboardRes.Org, - Bucket: &b2, - Auth: onboardRes.Auth, - }, "m,k=v5 f=100i 946684800000000005\nm,k=v7 f=200i 946684800000000006") - - // Perform a partial restore from the previous backups. - l2.RestoreOrFail(t, ctx, restore.Params{Path: backupDir}) - - // Check that buckets from the 1st launcher were restored to the new server. - rbkt1, err := l2.BucketService(t).FindBucket(ctx, influxdb.BucketFilter{Org: &l1.Org.Name, Name: &l1.Bucket.Name}) - require.NoError(t, err) - require.Equal(t, l1.Bucket.Name, rbkt1.Name) - rbkt2, err := l2.BucketService(t).FindBucket(ctx, influxdb.BucketFilter{Org: &l1.Org.Name, Name: &b1.Name}) - require.NoError(t, err) - require.Equal(t, b1.Name, rbkt2.Name) - - // Check that data was restored to buckets. - q1 := `from(bucket:"BUCKET") |> range(start:2000-01-01T00:00:00Z,stop:2000-01-02T00:00:00Z)` - exp1 := `,result,table,_start,_stop,_time,_value,_field,_measurement,k` + "\r\n" + - `,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,100,f,m,v1` + "\r\n" + - `,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00.000000002Z,100,f,m,v1` + "\r\n" + - `,_result,1,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00.000000001Z,200,f,m,v2` + "\r\n" + - `,_result,1,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00.000000003Z,200,f,m,v2` + "\r\n\r\n" - res1 := l2.FluxQueryOrFail(t, l1.Org, l2.Auth.Token, q1) - require.Equal(t, exp1, res1) - - q2 := `from(bucket:"bucket2") |> range(start:2000-01-01T00:00:00Z,stop:2000-01-02T00:00:00Z)` - exp2 := `,result,table,_start,_stop,_time,_value,_field,_measurement,k` + "\r\n" + - `,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00.000000005Z,100,f,m,v1` + "\r\n" + - `,_result,1,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00.000000006Z,200,f,m,v2` + "\r\n\r\n" - res2 := l2.FluxQueryOrFail(t, l1.Org, l2.Auth.Token, q2) - require.Equal(t, exp2, res2) - - // Check that the 2nd launcher's buckets weren't touched. - newBucket1, err := l2.BucketService(t).FindBucket(ctx, influxdb.BucketFilter{OrganizationID: &l2.Org.ID, ID: &l2.Bucket.ID}) - require.NoError(t, err) - require.Equal(t, l2.Bucket.Name, newBucket1.Name) - newBucket2, err := l2.BucketService(t).FindBucket(ctx, influxdb.BucketFilter{OrganizationID: &l2.Org.ID, ID: &b2.ID}) - require.NoError(t, err) - require.Equal(t, b2.Name, newBucket2.Name) - - q3 := `from(bucket:"2bucket") |> range(start:2000-01-01T00:00:00Z,stop:2000-01-02T00:00:00Z)` - exp3 := `,result,table,_start,_stop,_time,_value,_field,_measurement,k` + "\r\n" + - `,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00.000000005Z,100,f,m,v5` + "\r\n" + - `,_result,1,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00.000000006Z,200,f,m,v7` + "\r\n\r\n" - res3 := l2.FluxQueryOrFail(t, l2.Org, l2.Auth.Token, q3) - require.Equal(t, exp3, res3) -} diff --git a/cmd/influxd/launcher/cmd.go b/cmd/influxd/launcher/cmd.go deleted file mode 100644 index 35a97601349..00000000000 --- a/cmd/influxd/launcher/cmd.go +++ /dev/null @@ -1,705 +0,0 @@ -package launcher - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - "time" - - "github.com/influxdata/influxdb/v2/bolt" - "github.com/influxdata/influxdb/v2/fluxinit" - "github.com/influxdata/influxdb/v2/internal/fs" - "github.com/influxdata/influxdb/v2/kit/cli" - "github.com/influxdata/influxdb/v2/kit/signals" - influxlogger "github.com/influxdata/influxdb/v2/logger" - "github.com/influxdata/influxdb/v2/pprof" - "github.com/influxdata/influxdb/v2/sqlite" - "github.com/influxdata/influxdb/v2/storage" - "github.com/influxdata/influxdb/v2/v1/coordinator" - "github.com/influxdata/influxdb/v2/vault" - "github.com/spf13/cobra" - "github.com/spf13/viper" - "go.uber.org/zap/zapcore" -) - -func errInvalidFlags(flags []string, configFile string) error { - return fmt.Errorf( - "error: found flags from an InfluxDB 1.x configuration in config file at %s - see https://docs.influxdata.com/influxdb/latest/reference/config-options/ for flags supported on this version of InfluxDB: %s", - configFile, - strings.Join(flags, ","), - ) -} - -// NewInfluxdCommand constructs the root of the influxd CLI, along with a `run` subcommand. -// The `run` subcommand is set as the default to execute. -func NewInfluxdCommand(ctx context.Context, v *viper.Viper) (*cobra.Command, error) { - o := NewOpts(v) - cliOpts := o.BindCliOpts() - - prog := cli.Program{ - Name: "influxd", - Run: cmdRunE(ctx, o), - } - cmd, err := cli.NewCommand(o.Viper, &prog) - if err != nil { - return nil, err - } - - // Error out if invalid flags are found in the config file. This may indicate trying to launch 2.x using a 1.x config. - if invalidFlags := invalidFlags(v); len(invalidFlags) > 0 { - return nil, errInvalidFlags(invalidFlags, v.ConfigFileUsed()) - } - - runCmd := &cobra.Command{ - Use: "run", - RunE: cmd.RunE, - Args: cobra.NoArgs, - } - for _, c := range []*cobra.Command{cmd, runCmd} { - setCmdDescriptions(c) - if err := cli.BindOptions(o.Viper, c, cliOpts); err != nil { - return nil, err - } - } - cmd.AddCommand(runCmd) - printCmd, err := NewInfluxdPrintConfigCommand(v, cliOpts) - if err != nil { - return nil, err - } - cmd.AddCommand(printCmd) - - return cmd, nil -} - -func invalidFlags(v *viper.Viper) []string { - var invalid []string - for _, k := range v.AllKeys() { - if inOneDotExFlagsList(k) { - invalid = append(invalid, k) - } - } - - return invalid -} - -func setCmdDescriptions(cmd *cobra.Command) { - cmd.Short = "Start the influxd server" - cmd.Long = ` - Start up the daemon configured with flags/env vars/config file. - - The order of precedence for config options are as follows (1 highest, 3 lowest): - 1. flags - 2. env vars - 3. config file - - A config file can be provided via the INFLUXD_CONFIG_PATH env var. If a file is - not provided via an env var, influxd will look in the current directory for a - config.{json|toml|yaml|yml} file. If one does not exist, then it will continue unchanged. -` -} - -func cmdRunE(ctx context.Context, o *InfluxdOpts) func() error { - return func() error { - // Set this as early as possible, since it affects global profiling rates. - pprof.SetGlobalProfiling(!o.ProfilingDisabled) - - fluxinit.FluxInit() - - l := NewLauncher() - - // Create top level logger - logconf := &influxlogger.Config{ - Format: "auto", - Level: o.LogLevel, - } - logger, err := logconf.New(os.Stdout) - if err != nil { - return err - } - l.log = logger - - // Start the launcher and wait for it to exit on SIGINT or SIGTERM. - if err := l.run(signals.WithStandardSignals(ctx), o); err != nil { - return err - } - <-l.Done() - - // Tear down the launcher, allowing it a few seconds to finish any - // in-progress requests. - shutdownCtx, cancel := context.WithTimeout(ctx, 2*time.Second) - defer cancel() - return l.Shutdown(shutdownCtx) - } -} - -// InfluxdOpts captures all arguments for running the InfluxDB server. -type InfluxdOpts struct { - Testing bool - TestingAlwaysAllowSetup bool - - LogLevel zapcore.Level - FluxLogEnabled bool - TracingType string - ReportingDisabled bool - - AssetsPath string - BoltPath string - SqLitePath string - EnginePath string - - StoreType string - SecretStore string - VaultConfig vault.Config - - InstanceID string - - HttpBindAddress string - HttpReadHeaderTimeout time.Duration - HttpReadTimeout time.Duration - HttpWriteTimeout time.Duration - HttpIdleTimeout time.Duration - HttpTLSCert string - HttpTLSKey string - HttpTLSMinVersion string - HttpTLSStrictCiphers bool - SessionLength int // in minutes - SessionRenewDisabled bool - - ProfilingDisabled bool - MetricsDisabled bool - UIDisabled bool - - NatsPort int - NatsMaxPayloadBytes int - - NoTasks bool - FeatureFlags map[string]string - - // Query options. - ConcurrencyQuota int32 - InitialMemoryBytesQuotaPerQuery int64 - MemoryBytesQuotaPerQuery int64 - MaxMemoryBytes int64 - QueueSize int32 - CoordinatorConfig coordinator.Config - - // Storage options. - StorageConfig storage.Config - - Viper *viper.Viper - - HardeningEnabled bool -} - -// NewOpts constructs options with default values. -func NewOpts(viper *viper.Viper) *InfluxdOpts { - dir, err := fs.InfluxDir() - if err != nil { - panic(fmt.Errorf("failed to determine influx directory: %v", err)) - } - - return &InfluxdOpts{ - Viper: viper, - StorageConfig: storage.NewConfig(), - CoordinatorConfig: coordinator.NewConfig(), - - LogLevel: zapcore.InfoLevel, - FluxLogEnabled: false, - ReportingDisabled: false, - - BoltPath: filepath.Join(dir, bolt.DefaultFilename), - SqLitePath: filepath.Join(dir, sqlite.DefaultFilename), - EnginePath: filepath.Join(dir, "engine"), - - HttpBindAddress: ":8086", - HttpReadHeaderTimeout: 10 * time.Second, - HttpIdleTimeout: 3 * time.Minute, - HttpTLSMinVersion: "1.2", - HttpTLSStrictCiphers: false, - SessionLength: 60, // 60 minutes - SessionRenewDisabled: false, - - ProfilingDisabled: false, - MetricsDisabled: false, - UIDisabled: false, - - StoreType: DiskStore, - SecretStore: BoltStore, - - NatsPort: 0, - NatsMaxPayloadBytes: 0, - - NoTasks: false, - - ConcurrencyQuota: 1024, - InitialMemoryBytesQuotaPerQuery: 0, - MemoryBytesQuotaPerQuery: 0, - MaxMemoryBytes: 0, - QueueSize: 1024, - - Testing: false, - TestingAlwaysAllowSetup: false, - - HardeningEnabled: false, - } -} - -// BindCliOpts returns a list of options which can be added to a cobra command -// in order to set options over the CLI. -func (o *InfluxdOpts) BindCliOpts() []cli.Opt { - return []cli.Opt{ - { - DestP: &o.LogLevel, - Flag: "log-level", - Default: o.LogLevel, - Desc: "supported log levels are debug, info, and error", - }, - { - DestP: &o.FluxLogEnabled, - Flag: "flux-log-enabled", - Default: o.FluxLogEnabled, - Desc: "enables detailed logging for flux queries", - }, - { - DestP: &o.TracingType, - Flag: "tracing-type", - Desc: fmt.Sprintf("supported tracing types are %s, %s", LogTracing, JaegerTracing), - }, - { - DestP: &o.BoltPath, - Flag: "bolt-path", - Default: o.BoltPath, - Desc: "path to boltdb database", - }, - { - DestP: &o.SqLitePath, - Flag: "sqlite-path", - Desc: fmt.Sprintf("path to sqlite database. if not set, sqlite database will be stored in the bolt-path directory as %q.", sqlite.DefaultFilename), - }, - { - DestP: &o.AssetsPath, - Flag: "assets-path", - Desc: "override default assets by serving from a specific directory (developer mode)", - }, - { - DestP: &o.StoreType, - Flag: "store", - Default: o.StoreType, - Desc: "backing store for REST resources (disk or memory)", - }, - { - DestP: &o.Testing, - Flag: "e2e-testing", - Default: o.Testing, - Desc: "add /debug/flush endpoint to clear stores; used for end-to-end tests", - }, - { - DestP: &o.TestingAlwaysAllowSetup, - Flag: "testing-always-allow-setup", - Default: o.TestingAlwaysAllowSetup, - Desc: "ensures the /api/v2/setup endpoint always returns true to allow onboarding", - }, - { - DestP: &o.EnginePath, - Flag: "engine-path", - Default: o.EnginePath, - Desc: "path to persistent engine files", - }, - { - DestP: &o.SecretStore, - Flag: "secret-store", - Default: o.SecretStore, - Desc: "data store for secrets (bolt or vault)", - }, - { - DestP: &o.ReportingDisabled, - Flag: "reporting-disabled", - Default: o.ReportingDisabled, - Desc: "disable sending telemetry data to https://telemetry.influxdata.com every 8 hours", - }, - { - DestP: &o.SessionLength, - Flag: "session-length", - Default: o.SessionLength, - Desc: "ttl in minutes for newly created sessions", - }, - { - DestP: &o.SessionRenewDisabled, - Flag: "session-renew-disabled", - Default: o.SessionRenewDisabled, - Desc: "disables automatically extending session ttl on request", - }, - { - DestP: &o.VaultConfig.Address, - Flag: "vault-addr", - Desc: "address of the Vault server expressed as a URL and port, for example: https://127.0.0.1:8200/.", - }, - { - DestP: &o.VaultConfig.ClientTimeout, - Flag: "vault-client-timeout", - Desc: "timeout variable. The default value is 60s.", - }, - { - DestP: &o.VaultConfig.MaxRetries, - Flag: "vault-max-retries", - Desc: "maximum number of retries when a 5xx error code is encountered. The default is 2, for three total attempts. Set this to 0 or less to disable retrying.", - }, - { - DestP: &o.VaultConfig.CACert, - Flag: "vault-cacert", - Desc: "path to a PEM-encoded CA certificate file on the local disk. This file is used to verify the Vault server's SSL certificate. This environment variable takes precedence over VAULT_CAPATH.", - }, - { - DestP: &o.VaultConfig.CAPath, - Flag: "vault-capath", - Desc: "path to a directory of PEM-encoded CA certificate files on the local disk. These certificates are used to verify the Vault server's SSL certificate.", - }, - { - DestP: &o.VaultConfig.ClientCert, - Flag: "vault-client-cert", - Desc: "path to a PEM-encoded client certificate on the local disk. This file is used for TLS communication with the Vault server.", - }, - { - DestP: &o.VaultConfig.ClientKey, - Flag: "vault-client-key", - Desc: "path to an unencrypted, PEM-encoded private key on disk which corresponds to the matching client certificate.", - }, - { - DestP: &o.VaultConfig.InsecureSkipVerify, - Flag: "vault-skip-verify", - Desc: "do not verify Vault's presented certificate before communicating with it. Setting this variable is not recommended and voids Vault's security model.", - }, - { - DestP: &o.VaultConfig.TLSServerName, - Flag: "vault-tls-server-name", - Desc: "name to use as the SNI host when connecting via TLS.", - }, - { - DestP: &o.VaultConfig.Token, - Flag: "vault-token", - Desc: "vault authentication token", - }, - - // HTTP options - { - DestP: &o.HttpBindAddress, - Flag: "http-bind-address", - Default: o.HttpBindAddress, - Desc: "bind address for the REST HTTP API", - }, - { - DestP: &o.HttpReadHeaderTimeout, - Flag: "http-read-header-timeout", - Default: o.HttpReadHeaderTimeout, - Desc: "max duration the server should spend trying to read HTTP headers for new requests. Set to 0 for no timeout", - }, - { - DestP: &o.HttpReadTimeout, - Flag: "http-read-timeout", - Default: o.HttpReadTimeout, - Desc: "max duration the server should spend trying to read the entirety of new requests. Set to 0 for no timeout", - }, - { - DestP: &o.HttpWriteTimeout, - Flag: "http-write-timeout", - Default: o.HttpWriteTimeout, - Desc: "max duration the server should spend on processing+responding to requests. Set to 0 for no timeout", - }, - { - DestP: &o.HttpIdleTimeout, - Flag: "http-idle-timeout", - Default: o.HttpIdleTimeout, - Desc: "max duration the server should keep established connections alive while waiting for new requests. Set to 0 for no timeout", - }, - { - DestP: &o.HttpTLSCert, - Flag: "tls-cert", - Desc: "TLS certificate for HTTPs", - }, - { - DestP: &o.HttpTLSKey, - Flag: "tls-key", - Desc: "TLS key for HTTPs", - }, - { - DestP: &o.HttpTLSMinVersion, - Flag: "tls-min-version", - Default: o.HttpTLSMinVersion, - Desc: "Minimum accepted TLS version", - }, - { - DestP: &o.HttpTLSStrictCiphers, - Flag: "tls-strict-ciphers", - Default: o.HttpTLSStrictCiphers, - Desc: "Restrict accept ciphers to: ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, ECDHE_RSA_WITH_AES_128_GCM_SHA256, ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, ECDHE_RSA_WITH_AES_256_GCM_SHA384, ECDHE_ECDSA_WITH_CHACHA20_POLY1305, ECDHE_RSA_WITH_CHACHA20_POLY1305", - }, - - { - DestP: &o.NoTasks, - Flag: "no-tasks", - Default: o.NoTasks, - Desc: "disables the task scheduler", - }, - { - DestP: &o.ConcurrencyQuota, - Flag: "query-concurrency", - Default: o.ConcurrencyQuota, - Desc: "the number of queries that are allowed to execute concurrently. Set to 0 to allow an unlimited number of concurrent queries", - }, - { - DestP: &o.InitialMemoryBytesQuotaPerQuery, - Flag: "query-initial-memory-bytes", - Default: o.InitialMemoryBytesQuotaPerQuery, - Desc: "the initial number of bytes allocated for a query when it is started. If this is unset, then query-memory-bytes will be used", - }, - { - DestP: &o.MemoryBytesQuotaPerQuery, - Flag: "query-memory-bytes", - Default: o.MemoryBytesQuotaPerQuery, - Desc: "maximum number of bytes a query is allowed to use at any given time. This must be greater or equal to query-initial-memory-bytes", - }, - { - DestP: &o.MaxMemoryBytes, - Flag: "query-max-memory-bytes", - Default: o.MaxMemoryBytes, - Desc: "the maximum amount of memory used for queries. Can only be set when query-concurrency is limited. If this is unset, then this number is query-concurrency * query-memory-bytes", - }, - { - DestP: &o.QueueSize, - Flag: "query-queue-size", - Default: o.QueueSize, - Desc: "the number of queries that are allowed to be awaiting execution before new queries are rejected. Must be > 0 if query-concurrency is not unlimited", - }, - { - DestP: &o.FeatureFlags, - Flag: "feature-flags", - Desc: "feature flag overrides", - }, - { - DestP: &o.InstanceID, - Flag: "instance-id", - Default: "", - Desc: "add an instance id for replications to prevent collisions and allow querying by edge node", - }, - - // storage configuration - { - DestP: &o.StorageConfig.WriteTimeout, - Flag: "storage-write-timeout", - Default: o.StorageConfig.WriteTimeout, - Desc: "The max amount of time the engine will spend completing a write request before cancelling with a timeout.", - }, - { - DestP: &o.StorageConfig.Data.WALFsyncDelay, - Flag: "storage-wal-fsync-delay", - Desc: "The amount of time that a write will wait before fsyncing. A duration greater than 0 can be used to batch up multiple fsync calls. This is useful for slower disks or when WAL write contention is seen.", - }, - { - DestP: &o.StorageConfig.Data.WALMaxConcurrentWrites, - Flag: "storage-wal-max-concurrent-writes", - Desc: "The max number of writes that will attempt to write to the WAL at a time. (default * 2)", - }, - { - DestP: &o.StorageConfig.Data.WALMaxWriteDelay, - Flag: "storage-wal-max-write-delay", - Default: o.StorageConfig.Data.WALMaxWriteDelay, - Desc: "The max amount of time a write will wait when the WAL already has `storage-wal-max-concurrent-writes` active writes. Set to 0 to disable the timeout.", - }, - { - DestP: &o.StorageConfig.Data.ValidateKeys, - Flag: "storage-validate-keys", - Desc: "Validates incoming writes to ensure keys only have valid unicode characters.", - }, - { - DestP: &o.StorageConfig.Data.SkipFieldSizeValidation, - Flag: "storage-no-validate-field-size", - Desc: "Skip field-size validation on incoming writes.", - }, - { - DestP: &o.StorageConfig.Data.CacheMaxMemorySize, - Flag: "storage-cache-max-memory-size", - Desc: "The maximum size a shard's cache can reach before it starts rejecting writes.", - }, - { - DestP: &o.StorageConfig.Data.CacheSnapshotMemorySize, - Flag: "storage-cache-snapshot-memory-size", - Desc: "The size at which the engine will snapshot the cache and write it to a TSM file, freeing up memory.", - }, - { - DestP: &o.StorageConfig.Data.CacheSnapshotWriteColdDuration, - Flag: "storage-cache-snapshot-write-cold-duration", - Desc: "The length of time at which the engine will snapshot the cache and write it to a new TSM file if the shard hasn't received writes or deletes.", - }, - { - DestP: &o.StorageConfig.Data.CompactFullWriteColdDuration, - Flag: "storage-compact-full-write-cold-duration", - Desc: "The duration at which the engine will compact all TSM files in a shard if it hasn't received a write or delete.", - }, - { - DestP: &o.StorageConfig.Data.CompactThroughputBurst, - Flag: "storage-compact-throughput-burst", - Desc: "The rate limit in bytes per second that we will allow TSM compactions to write to disk.", - }, - // limits - { - DestP: &o.StorageConfig.Data.MaxConcurrentCompactions, - Flag: "storage-max-concurrent-compactions", - Desc: "The maximum number of concurrent full and level compactions that can run at one time. A value of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime. Any number greater than 0 limits compactions to that value. This setting does not apply to cache snapshotting.", - }, - { - DestP: &o.StorageConfig.Data.MaxIndexLogFileSize, - Flag: "storage-max-index-log-file-size", - Desc: "The threshold, in bytes, when an index write-ahead log file will compact into an index file. Lower sizes will cause log files to be compacted more quickly and result in lower heap usage at the expense of write throughput.", - }, - { - DestP: &o.StorageConfig.Data.SeriesIDSetCacheSize, - Flag: "storage-series-id-set-cache-size", - Desc: "The size of the internal cache used in the TSI index to store previously calculated series results.", - }, - { - DestP: &o.StorageConfig.Data.SeriesFileMaxConcurrentSnapshotCompactions, - Flag: "storage-series-file-max-concurrent-snapshot-compactions", - Desc: "The maximum number of concurrent snapshot compactions that can be running at one time across all series partitions in a database.", - }, - { - DestP: &o.StorageConfig.Data.TSMWillNeed, - Flag: "storage-tsm-use-madv-willneed", - Desc: "Controls whether we hint to the kernel that we intend to page in mmap'd sections of TSM files.", - }, - { - DestP: &o.StorageConfig.RetentionService.CheckInterval, - Flag: "storage-retention-check-interval", - Desc: "The interval of time when retention policy enforcement checks run.", - }, - { - DestP: &o.StorageConfig.PrecreatorConfig.CheckInterval, - Flag: "storage-shard-precreator-check-interval", - Desc: "The interval of time when the check to pre-create new shards runs.", - }, - { - DestP: &o.StorageConfig.PrecreatorConfig.AdvancePeriod, - Flag: "storage-shard-precreator-advance-period", - Desc: "The default period ahead of the endtime of a shard group that its successor group is created.", - }, - - // InfluxQL Coordinator Config - { - DestP: &o.CoordinatorConfig.MaxSelectPointN, - Flag: "influxql-max-select-point", - Desc: "The maximum number of points a SELECT can process. A value of 0 will make the maximum point count unlimited. This will only be checked every second so queries will not be aborted immediately when hitting the limit.", - }, - { - DestP: &o.CoordinatorConfig.MaxSelectSeriesN, - Flag: "influxql-max-select-series", - Desc: "The maximum number of series a SELECT can run. A value of 0 will make the maximum series count unlimited.", - }, - { - DestP: &o.CoordinatorConfig.MaxSelectBucketsN, - Flag: "influxql-max-select-buckets", - Desc: "The maximum number of group by time bucket a SELECT can create. A value of zero will max the maximum number of buckets unlimited.", - }, - - // NATS config - { - DestP: &o.NatsPort, - Flag: "nats-port", - Desc: "deprecated: nats has been replaced", - Default: o.NatsPort, - Hidden: true, - }, - { - DestP: &o.NatsMaxPayloadBytes, - Flag: "nats-max-payload-bytes", - Desc: "deprecated: nats has been replaced", - Default: o.NatsMaxPayloadBytes, - Hidden: true, - }, - - // Pprof config - { - DestP: &o.ProfilingDisabled, - Flag: "pprof-disabled", - Desc: "Don't expose debugging information over HTTP at /debug/pprof", - Default: o.ProfilingDisabled, - }, - - // Metrics config - { - DestP: &o.MetricsDisabled, - Flag: "metrics-disabled", - Desc: "Don't expose metrics over HTTP at /metrics", - Default: o.MetricsDisabled, - }, - // UI Config - { - DestP: &o.UIDisabled, - Flag: "ui-disabled", - Default: o.UIDisabled, - Desc: "Disable the InfluxDB UI", - }, - - // hardening options - // --hardening-enabled is meant to enable all hardending - // options in one go. Today it enables the IP validator for - // flux and pkger templates HTTP requests. In the future, - // --hardening-enabled might be used to enable other security - // features, at which point we can add per-feature flags so - // that users can either opt into all features - // (--hardening-enabled) or to precisely the features they - // require. Since today there is but one feature, there is no - // need to introduce --hardening-ip-validation-enabled (or - // similar). - { - DestP: &o.HardeningEnabled, - Flag: "hardening-enabled", - Default: o.HardeningEnabled, - Desc: "enable hardening options (disallow private IPs within flux and templates HTTP requests)", - }, - } -} - -var ( - oneDotExFlagsList = []string{ - // "reporting-disabled" is valid in both 1x and 2x configs - "bind-address", // global setting is called "http-bind-address" on 2x - - // Remaining flags, when parsed from a 1.x config file, will be in sub-sections prefixed by these headers: - "collectd.", - "continuous_queries.", - "coordinator.", - "data.", - "graphite.", - "http.", - "logging.", - "meta.", - "monitor.", - "opentsdb.", - "retention.", - "shard-precreation.", - "subscriber.", - "tls.", - "udp.", - } -) - -// compareFlags checks if a given flag from the read configuration matches one from the list. If the value from the list -// ends in a ".", the given flag is check for that prefix. Otherwise, the flag is checked for equality. -func compareFlags(key, fromList string) bool { - if strings.HasSuffix(fromList, ".") { - return strings.HasPrefix(key, fromList) - } - - return strings.EqualFold(key, fromList) -} - -func inOneDotExFlagsList(key string) bool { - for _, f := range oneDotExFlagsList { - if compareFlags(key, f) { - return true - } - } - - return false -} diff --git a/cmd/influxd/launcher/cmd_test.go b/cmd/influxd/launcher/cmd_test.go deleted file mode 100644 index 167fa85f608..00000000000 --- a/cmd/influxd/launcher/cmd_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package launcher - -import ( - "strings" - "testing" - - "github.com/spf13/viper" - "github.com/stretchr/testify/require" -) - -func TestInvalidFlags(t *testing.T) { - t.Parallel() - - v2config := ` -bolt-path = "/db/.influxdbv2/influxd.bolt" -engine-path = "/db/.influxdbv2/engine" -http-bind-address = ":8086" -` - - v1config := ` -reporting-disabled = false - -# Bind address to use for the RPC service for backup and restore. -bind-address = "127.0.0.1:8088" - -[http] - flux-enabled = false - -[data] - index-version = "inmem"` - - tests := []struct { - name string - config string - want []string - }{ - { - name: "empty config", - config: "", - want: []string(nil), - }, - { - name: "v2 config", - config: v2config, - want: []string(nil), - }, - { - name: "v1 config", - config: v1config, - want: []string{"http.flux-enabled", "data.index-version", "bind-address"}, - }, - { - name: "mixed config", - config: v2config + v1config, - want: []string{"http.flux-enabled", "data.index-version", "bind-address"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := strings.NewReader(tt.config) - v := viper.GetViper() - v.SetConfigType("toml") - require.NoError(t, v.ReadConfig(r)) - got := invalidFlags(v) - require.ElementsMatch(t, tt.want, got) - }) - } -} diff --git a/cmd/influxd/launcher/engine.go b/cmd/influxd/launcher/engine.go deleted file mode 100644 index ab670262bb4..00000000000 --- a/cmd/influxd/launcher/engine.go +++ /dev/null @@ -1,201 +0,0 @@ -package launcher - -import ( - "context" - "io" - "os" - "sync" - "time" - - "github.com/influxdata/influxql" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/prom" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/storage" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -var _ Engine = (*storage.Engine)(nil) - -// Engine defines the time-series storage engine. Wraps *storage.Engine -// to facilitate testing. -type Engine interface { - influxdb.DeleteService - storage.PointsWriter - storage.EngineSchema - prom.PrometheusCollector - influxdb.BackupService - influxdb.RestoreService - - SeriesCardinality(ctx context.Context, bucketID platform.ID) int64 - - TSDBStore() storage.TSDBStore - MetaClient() storage.MetaClient - - WithLogger(log *zap.Logger) - Open(context.Context) error - Close() error -} - -var _ Engine = (*TemporaryEngine)(nil) -var _ http.Flusher = (*TemporaryEngine)(nil) - -// TemporaryEngine creates a time-series storage engine backed -// by a temporary directory that is removed on Close. -type TemporaryEngine struct { - path string - config storage.Config - options []storage.Option - - mu sync.Mutex - opened bool - - engine *storage.Engine - tsdbStore temporaryTSDBStore - - log *zap.Logger -} - -// NewTemporaryEngine creates a new engine that places the storage engine files into -// a temporary directory; used for testing. -func NewTemporaryEngine(c storage.Config, options ...storage.Option) *TemporaryEngine { - return &TemporaryEngine{ - config: c, - options: options, - log: zap.NewNop(), - } -} - -// Open creates a temporary directory and opens the engine. -func (t *TemporaryEngine) Open(ctx context.Context) error { - t.mu.Lock() - defer t.mu.Unlock() - - if t.opened { - return nil - } - - path, err := os.MkdirTemp("", "e2e") - if err != nil { - return err - } - - t.path = path - t.engine = storage.NewEngine(path, t.config, t.options...) - t.engine.WithLogger(t.log) - - if err := t.engine.Open(ctx); err != nil { - _ = os.RemoveAll(path) - return err - } - - t.tsdbStore.TSDBStore = t.engine.TSDBStore() - - t.opened = true - return nil -} - -// Close will remove the directory containing the time-series files. -func (t *TemporaryEngine) Close() error { - t.mu.Lock() - defer t.mu.Unlock() - - t.opened = false - err := t.engine.Close() - _ = os.RemoveAll(t.path) - return err -} - -// WritePoints stores points into the storage engine. -func (t *TemporaryEngine) WritePoints(ctx context.Context, orgID platform.ID, bucketID platform.ID, points []models.Point) error { - return t.engine.WritePoints(ctx, orgID, bucketID, points) -} - -// SeriesCardinality returns the number of series in the engine. -func (t *TemporaryEngine) SeriesCardinality(ctx context.Context, bucketID platform.ID) int64 { - return t.engine.SeriesCardinality(ctx, bucketID) -} - -// DeleteBucketRangePredicate will delete a bucket from the range and predicate. -func (t *TemporaryEngine) DeleteBucketRangePredicate(ctx context.Context, orgID, bucketID platform.ID, min, max int64, pred influxdb.Predicate, measurement influxql.Expr) error { - return t.engine.DeleteBucketRangePredicate(ctx, orgID, bucketID, min, max, pred, measurement) -} - -func (t *TemporaryEngine) CreateBucket(ctx context.Context, b *influxdb.Bucket) error { - return t.engine.CreateBucket(ctx, b) -} - -func (t *TemporaryEngine) UpdateBucketRetentionPolicy(ctx context.Context, bucketID platform.ID, upd *influxdb.BucketUpdate) error { - return t.engine.UpdateBucketRetentionPolicy(ctx, bucketID, upd) -} - -// DeleteBucket deletes a bucket from the time-series data. -func (t *TemporaryEngine) DeleteBucket(ctx context.Context, orgID, bucketID platform.ID) error { - return t.engine.DeleteBucket(ctx, orgID, bucketID) -} - -// WithLogger sets the logger on the engine. It must be called before Open. -func (t *TemporaryEngine) WithLogger(log *zap.Logger) { - t.log = log.With(zap.String("service", "temporary_engine")) -} - -// PrometheusCollectors returns all the prometheus collectors associated with -// the engine and its components. -func (t *TemporaryEngine) PrometheusCollectors() []prometheus.Collector { - return t.engine.PrometheusCollectors() -} - -// Flush will remove the time-series files and re-open the engine. -func (t *TemporaryEngine) Flush(ctx context.Context) { - if err := t.Close(); err != nil { - t.log.Fatal("unable to close engine", zap.Error(err)) - } - - if err := t.Open(ctx); err != nil { - t.log.Fatal("unable to open engine", zap.Error(err)) - } -} - -func (t *TemporaryEngine) BackupKVStore(ctx context.Context, w io.Writer) error { - return t.engine.BackupKVStore(ctx, w) -} - -func (t *TemporaryEngine) RLockKVStore() { - t.engine.RLockKVStore() -} - -func (t *TemporaryEngine) RUnlockKVStore() { - t.engine.RUnlockKVStore() -} - -func (t *TemporaryEngine) RestoreKVStore(ctx context.Context, r io.Reader) error { - return t.engine.RestoreKVStore(ctx, r) -} - -func (t *TemporaryEngine) RestoreBucket(ctx context.Context, id platform.ID, dbi []byte) (map[uint64]uint64, error) { - return t.engine.RestoreBucket(ctx, id, dbi) -} - -func (t *TemporaryEngine) BackupShard(ctx context.Context, w io.Writer, shardID uint64, since time.Time) error { - return t.engine.BackupShard(ctx, w, shardID, since) -} - -func (t *TemporaryEngine) RestoreShard(ctx context.Context, shardID uint64, r io.Reader) error { - return t.engine.RestoreShard(ctx, shardID, r) -} - -func (t *TemporaryEngine) TSDBStore() storage.TSDBStore { - return &t.tsdbStore -} - -func (t *TemporaryEngine) MetaClient() storage.MetaClient { - return t.engine.MetaClient() -} - -type temporaryTSDBStore struct { - storage.TSDBStore -} diff --git a/cmd/influxd/launcher/flusher.go b/cmd/influxd/launcher/flusher.go deleted file mode 100644 index 890582626aa..00000000000 --- a/cmd/influxd/launcher/flusher.go +++ /dev/null @@ -1,15 +0,0 @@ -package launcher - -import ( - "context" - - "github.com/influxdata/influxdb/v2/http" -) - -type flushers []http.Flusher - -func (f flushers) Flush(ctx context.Context) { - for _, flusher := range []http.Flusher(f) { - flusher.Flush(ctx) - } -} diff --git a/cmd/influxd/launcher/launcher.go b/cmd/influxd/launcher/launcher.go deleted file mode 100644 index 5fd9a2987be..00000000000 --- a/cmd/influxd/launcher/launcher.go +++ /dev/null @@ -1,1290 +0,0 @@ -package launcher - -import ( - "context" - "crypto/tls" - "errors" - "fmt" - "net" - nethttp "net/http" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/dependencies/testing" - "github.com/influxdata/flux/dependencies/url" - "github.com/influxdata/flux/execute/executetest" - platform "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/annotations" - annotationTransport "github.com/influxdata/influxdb/v2/annotations/transport" - "github.com/influxdata/influxdb/v2/authorization" - "github.com/influxdata/influxdb/v2/authorizer" - "github.com/influxdata/influxdb/v2/backup" - "github.com/influxdata/influxdb/v2/bolt" - "github.com/influxdata/influxdb/v2/checks" - "github.com/influxdata/influxdb/v2/dashboards" - dashboardTransport "github.com/influxdata/influxdb/v2/dashboards/transport" - "github.com/influxdata/influxdb/v2/dbrp" - "github.com/influxdata/influxdb/v2/gather" - "github.com/influxdata/influxdb/v2/http" - iqlcontrol "github.com/influxdata/influxdb/v2/influxql/control" - iqlquery "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/internal/resource" - "github.com/influxdata/influxdb/v2/kit/feature" - overrideflagger "github.com/influxdata/influxdb/v2/kit/feature/override" - "github.com/influxdata/influxdb/v2/kit/metric" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/prom" - "github.com/influxdata/influxdb/v2/kit/tracing" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/label" - "github.com/influxdata/influxdb/v2/notebooks" - notebookTransport "github.com/influxdata/influxdb/v2/notebooks/transport" - endpointservice "github.com/influxdata/influxdb/v2/notification/endpoint/service" - ruleservice "github.com/influxdata/influxdb/v2/notification/rule/service" - "github.com/influxdata/influxdb/v2/pkger" - infprom "github.com/influxdata/influxdb/v2/prometheus" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/query/control" - "github.com/influxdata/influxdb/v2/query/fluxlang" - "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb" - "github.com/influxdata/influxdb/v2/remotes" - remotesTransport "github.com/influxdata/influxdb/v2/remotes/transport" - "github.com/influxdata/influxdb/v2/replications" - replicationTransport "github.com/influxdata/influxdb/v2/replications/transport" - "github.com/influxdata/influxdb/v2/secret" - "github.com/influxdata/influxdb/v2/session" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/influxdata/influxdb/v2/source" - "github.com/influxdata/influxdb/v2/sqlite" - sqliteMigrations "github.com/influxdata/influxdb/v2/sqlite/migrations" - "github.com/influxdata/influxdb/v2/storage" - storageflux "github.com/influxdata/influxdb/v2/storage/flux" - "github.com/influxdata/influxdb/v2/storage/readservice" - taskbackend "github.com/influxdata/influxdb/v2/task/backend" - "github.com/influxdata/influxdb/v2/task/backend/coordinator" - "github.com/influxdata/influxdb/v2/task/backend/executor" - "github.com/influxdata/influxdb/v2/task/backend/middleware" - "github.com/influxdata/influxdb/v2/task/backend/scheduler" - "github.com/influxdata/influxdb/v2/task/taskmodel" - telegrafservice "github.com/influxdata/influxdb/v2/telegraf/service" - "github.com/influxdata/influxdb/v2/telemetry" - "github.com/influxdata/influxdb/v2/tenant" - "github.com/prometheus/client_golang/prometheus/collectors" - - // needed for tsm1 - _ "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - - // needed for tsi1 - _ "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" - authv1 "github.com/influxdata/influxdb/v2/v1/authorization" - iqlcoordinator "github.com/influxdata/influxdb/v2/v1/coordinator" - "github.com/influxdata/influxdb/v2/v1/services/meta" - storage2 "github.com/influxdata/influxdb/v2/v1/services/storage" - "github.com/influxdata/influxdb/v2/vault" - pzap "github.com/influxdata/influxdb/v2/zap" - "github.com/opentracing/opentracing-go" - jaegerconfig "github.com/uber/jaeger-client-go/config" - "go.uber.org/zap" -) - -const ( - // DiskStore stores all REST resources to disk in boltdb and sqlite. - DiskStore = "disk" - // BoltStore also stores all REST resources to disk in boltdb and sqlite. Kept for backwards-compatibility. - BoltStore = "bolt" - // MemoryStore stores all REST resources in memory (useful for testing). - MemoryStore = "memory" - - // LogTracing enables tracing via zap logs - LogTracing = "log" - // JaegerTracing enables tracing via the Jaeger client library - JaegerTracing = "jaeger" -) - -type labeledCloser struct { - label string - closer func(context.Context) error -} - -// Launcher represents the main program execution. -type Launcher struct { - wg sync.WaitGroup - cancel func() - doneChan <-chan struct{} - closers []labeledCloser - flushers flushers - - flagger feature.Flagger - - kvStore kv.Store - kvService *kv.Service - sqlStore *sqlite.SqlStore - - // storage engine - engine Engine - - // InfluxQL query engine - queryController *control.Controller - - httpPort int - tlsEnabled bool - - scheduler stoppingScheduler - executor *executor.Executor - - log *zap.Logger - reg *prom.Registry - - apibackend *http.APIBackend -} - -type stoppingScheduler interface { - scheduler.Scheduler - Stop() -} - -// NewLauncher returns a new instance of Launcher with a no-op logger. -func NewLauncher() *Launcher { - return &Launcher{ - log: zap.NewNop(), - } -} - -// Registry returns the prometheus metrics registry. -func (m *Launcher) Registry() *prom.Registry { - return m.reg -} - -// Engine returns a reference to the storage engine. It should only be called -// for end-to-end testing purposes. -func (m *Launcher) Engine() Engine { - return m.engine -} - -// Shutdown shuts down the HTTP server and waits for all services to clean up. -func (m *Launcher) Shutdown(ctx context.Context) error { - var errs []string - - // Shut down subsystems in the reverse order of their registration. - for i := len(m.closers); i > 0; i-- { - lc := m.closers[i-1] - m.log.Info("Stopping subsystem", zap.String("subsystem", lc.label)) - if err := lc.closer(ctx); err != nil { - m.log.Error("Failed to stop subsystem", zap.String("subsystem", lc.label), zap.Error(err)) - errs = append(errs, err.Error()) - } - } - - m.wg.Wait() - - // N.B. We ignore any errors here because Sync is known to fail with EINVAL - // when logging to Stdout on certain OS's. - // - // Uber made the same change within the core of the logger implementation. - // See: https://github.com/uber-go/zap/issues/328 - _ = m.log.Sync() - - if len(errs) > 0 { - return fmt.Errorf("failed to shut down server: [%s]", strings.Join(errs, ",")) - } - return nil -} - -func (m *Launcher) Done() <-chan struct{} { - return m.doneChan -} - -func (m *Launcher) run(ctx context.Context, opts *InfluxdOpts) (err error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - ctx, m.cancel = context.WithCancel(ctx) - m.doneChan = ctx.Done() - - info := platform.GetBuildInfo() - m.log.Info("Welcome to InfluxDB", - zap.String("version", info.Version), - zap.String("commit", info.Commit), - zap.String("build_date", info.Date), - zap.String("log_level", opts.LogLevel.String()), - ) - m.initTracing(opts) - - if p := opts.Viper.ConfigFileUsed(); p != "" { - m.log.Debug("loaded config file", zap.String("path", p)) - } - - if opts.NatsPort != 0 { - m.log.Warn("nats-port argument is deprecated and unused") - } - - if opts.NatsMaxPayloadBytes != 0 { - m.log.Warn("nats-max-payload-bytes argument is deprecated and unused") - } - - // Parse feature flags. - // These flags can be used to modify the remaining setup logic in this method. - // They will also be injected into the contexts of incoming HTTP requests at runtime, - // for use in modifying behavior there. - if m.flagger == nil { - m.flagger = feature.DefaultFlagger() - if len(opts.FeatureFlags) > 0 { - f, err := overrideflagger.Make(opts.FeatureFlags, feature.ByKey) - if err != nil { - m.log.Error("Failed to configure feature flag overrides", - zap.Error(err), zap.Any("overrides", opts.FeatureFlags)) - return err - } - m.log.Info("Running with feature flag overrides", zap.Any("overrides", opts.FeatureFlags)) - m.flagger = f - } - } - - m.reg = prom.NewRegistry(m.log.With(zap.String("service", "prom_registry"))) - m.reg.MustRegister(collectors.NewGoCollector()) - - // Open KV and SQL stores. - procID, err := m.openMetaStores(ctx, opts) - if err != nil { - return err - } - m.reg.MustRegister(infprom.NewInfluxCollector(procID, info)) - - tenantStore := tenant.NewStore(m.kvStore) - ts := tenant.NewSystem(tenantStore, m.log.With(zap.String("store", "new")), m.reg, metric.WithSuffix("new")) - - serviceConfig := kv.ServiceConfig{ - FluxLanguageService: fluxlang.DefaultService, - } - - m.kvService = kv.NewService(m.log.With(zap.String("store", "kv")), m.kvStore, ts, serviceConfig) - - var ( - opLogSvc = tenant.NewOpLogService(m.kvStore, m.kvService) - userLogSvc platform.UserOperationLogService = opLogSvc - bucketLogSvc platform.BucketOperationLogService = opLogSvc - orgLogSvc platform.OrganizationOperationLogService = opLogSvc - ) - var ( - variableSvc platform.VariableService = m.kvService - sourceSvc platform.SourceService = m.kvService - scraperTargetSvc platform.ScraperTargetStoreService = m.kvService - ) - - var authSvc platform.AuthorizationService - { - authStore, err := authorization.NewStore(m.kvStore) - if err != nil { - m.log.Error("Failed creating new authorization store", zap.Error(err)) - return err - } - authSvc = authorization.NewService(authStore, ts) - } - - secretStore, err := secret.NewStore(m.kvStore) - if err != nil { - m.log.Error("Failed creating new secret store", zap.Error(err)) - return err - } - - var secretSvc platform.SecretService = secret.NewMetricService(m.reg, secret.NewLogger(m.log.With(zap.String("service", "secret")), secret.NewService(secretStore))) - - switch opts.SecretStore { - case "bolt": - // If it is bolt, then we already set it above. - case "vault": - // The vault secret service is configured using the standard vault environment variables. - // https://www.vaultproject.io/docs/commands/index.html#environment-variables - svc, err := vault.NewSecretService(vault.WithConfig(opts.VaultConfig)) - if err != nil { - m.log.Error("Failed initializing vault secret service", zap.Error(err)) - return err - } - secretSvc = svc - default: - err := fmt.Errorf("unknown secret service %q, expected \"bolt\" or \"vault\"", opts.SecretStore) - m.log.Error("Failed setting secret service", zap.Error(err)) - return err - } - - metaClient := meta.NewClient(meta.NewConfig(), m.kvStore) - if err := metaClient.Open(); err != nil { - m.log.Error("Failed to open meta client", zap.Error(err)) - return err - } - - if opts.Testing { - // the testing engine will write/read into a temporary directory - engine := NewTemporaryEngine( - opts.StorageConfig, - storage.WithMetaClient(metaClient), - ) - m.flushers = append(m.flushers, engine) - m.engine = engine - } else { - // check for 2.x data / state from a prior 2.x - if err := checkForPriorVersion(ctx, m.log, opts.BoltPath, opts.EnginePath, ts.BucketService, metaClient); err != nil { - os.Exit(1) - } - - m.engine = storage.NewEngine( - opts.EnginePath, - opts.StorageConfig, - storage.WithMetricsDisabled(opts.MetricsDisabled), - storage.WithMetaClient(metaClient), - ) - } - m.engine.WithLogger(m.log) - if err := m.engine.Open(ctx); err != nil { - m.log.Error("Failed to open engine", zap.Error(err)) - return err - } - m.closers = append(m.closers, labeledCloser{ - label: "engine", - closer: func(context.Context) error { - return m.engine.Close() - }, - }) - // The Engine's metrics must be registered after it opens. - m.reg.MustRegister(m.engine.PrometheusCollectors()...) - - var ( - deleteService platform.DeleteService = m.engine - pointsWriter storage.PointsWriter = m.engine - backupService platform.BackupService = m.engine - restoreService platform.RestoreService = m.engine - ) - - remotesSvc := remotes.NewService(m.sqlStore) - remotesServer := remotesTransport.NewInstrumentedRemotesHandler( - m.log.With(zap.String("handler", "remotes")), m.reg, m.kvStore, remotesSvc) - - replicationSvc, replicationsMetrics := replications.NewService(m.sqlStore, ts, pointsWriter, m.log.With(zap.String("service", "replications")), opts.EnginePath, opts.InstanceID) - replicationServer := replicationTransport.NewInstrumentedReplicationHandler( - m.log.With(zap.String("handler", "replications")), m.reg, m.kvStore, replicationSvc) - ts.BucketService = replications.NewBucketService( - m.log.With(zap.String("service", "replication_buckets")), ts.BucketService, replicationSvc) - - m.reg.MustRegister(replicationsMetrics.PrometheusCollectors()...) - - if err = replicationSvc.Open(ctx); err != nil { - m.log.Error("Failed to open replications service", zap.Error(err)) - return err - } - - m.closers = append(m.closers, labeledCloser{ - label: "replications", - closer: func(context.Context) error { - return replicationSvc.Close() - }, - }) - - pointsWriter = replicationSvc - - // When --hardening-enabled, use an HTTP IP validator that restricts - // flux and pkger HTTP requests to private addressess. - var urlValidator url.Validator - if opts.HardeningEnabled { - urlValidator = url.PrivateIPValidator{} - } else { - urlValidator = url.PassValidator{} - } - - deps, err := influxdb.NewDependencies( - storageflux.NewReader(storage2.NewStore(m.engine.TSDBStore(), m.engine.MetaClient())), - pointsWriter, - authorizer.NewBucketService(ts.BucketService), - authorizer.NewOrgService(ts.OrganizationService), - authorizer.NewSecretService(secretSvc), - nil, - influxdb.WithURLValidator(urlValidator), - ) - if err != nil { - m.log.Error("Failed to get query controller dependencies", zap.Error(err)) - return err - } - - dependencyList := []flux.Dependency{deps} - if opts.Testing { - dependencyList = append(dependencyList, executetest.NewDefaultTestFlagger()) - dependencyList = append(dependencyList, testing.FrameworkConfig{}) - } - - m.queryController, err = control.New(control.Config{ - ConcurrencyQuota: opts.ConcurrencyQuota, - InitialMemoryBytesQuotaPerQuery: opts.InitialMemoryBytesQuotaPerQuery, - MemoryBytesQuotaPerQuery: opts.MemoryBytesQuotaPerQuery, - MaxMemoryBytes: opts.MaxMemoryBytes, - QueueSize: opts.QueueSize, - ExecutorDependencies: dependencyList, - FluxLogEnabled: opts.FluxLogEnabled, - }, m.log.With(zap.String("service", "storage-reads"))) - if err != nil { - m.log.Error("Failed to create query controller", zap.Error(err)) - return err - } - m.closers = append(m.closers, labeledCloser{ - label: "query", - closer: func(ctx context.Context) error { - return m.queryController.Shutdown(ctx) - }, - }) - - m.reg.MustRegister(m.queryController.PrometheusCollectors()...) - - var storageQueryService = readservice.NewProxyQueryService(m.queryController) - var taskSvc taskmodel.TaskService - { - // create the task stack - combinedTaskService := taskbackend.NewAnalyticalStorage( - m.log.With(zap.String("service", "task-analytical-store")), - m.kvService, - ts.BucketService, - m.kvService, - pointsWriter, - query.QueryServiceBridge{AsyncQueryService: m.queryController}, - ) - - executor, executorMetrics := executor.NewExecutor( - m.log.With(zap.String("service", "task-executor")), - query.QueryServiceBridge{AsyncQueryService: m.queryController}, - ts.UserService, - combinedTaskService, - combinedTaskService, - executor.WithFlagger(m.flagger), - ) - err = executor.LoadExistingScheduleRuns(ctx) - if err != nil { - m.log.Fatal("could not load existing scheduled runs", zap.Error(err)) - } - m.executor = executor - m.reg.MustRegister(executorMetrics.PrometheusCollectors()...) - schLogger := m.log.With(zap.String("service", "task-scheduler")) - - var sch stoppingScheduler = &scheduler.NoopScheduler{} - if !opts.NoTasks { - var ( - sm *scheduler.SchedulerMetrics - err error - ) - sch, sm, err = scheduler.NewScheduler( - executor, - taskbackend.NewSchedulableTaskService(m.kvService), - scheduler.WithOnErrorFn(func(ctx context.Context, taskID scheduler.ID, scheduledAt time.Time, err error) { - schLogger.Info( - "error in scheduler run", - zap.String("taskID", platform2.ID(taskID).String()), - zap.Time("scheduledAt", scheduledAt), - zap.Error(err)) - }), - ) - if err != nil { - m.log.Fatal("could not start task scheduler", zap.Error(err)) - } - m.closers = append(m.closers, labeledCloser{ - label: "task", - closer: func(context.Context) error { - sch.Stop() - return nil - }, - }) - m.reg.MustRegister(sm.PrometheusCollectors()...) - } - - m.scheduler = sch - - coordLogger := m.log.With(zap.String("service", "task-coordinator")) - taskCoord := coordinator.NewCoordinator( - coordLogger, - sch, - executor) - - taskSvc = middleware.New(combinedTaskService, taskCoord) - if err := taskbackend.TaskNotifyCoordinatorOfExisting( - ctx, - taskSvc, - combinedTaskService, - taskCoord, - func(ctx context.Context, taskID platform2.ID, runID platform2.ID) error { - _, err := executor.ResumeCurrentRun(ctx, taskID, runID) - return err - }, - coordLogger); err != nil { - m.log.Error("Failed to resume existing tasks", zap.Error(err)) - } - } - - dbrpSvc := dbrp.NewAuthorizedService(dbrp.NewService(ctx, authorizer.NewBucketService(ts.BucketService), m.kvStore)) - - cm := iqlcontrol.NewControllerMetrics([]string{}) - m.reg.MustRegister(cm.PrometheusCollectors()...) - - mapper := &iqlcoordinator.LocalShardMapper{ - MetaClient: metaClient, - TSDBStore: m.engine.TSDBStore(), - DBRP: dbrpSvc, - } - - m.log.Info("Configuring InfluxQL statement executor (zeros indicate unlimited).", - zap.Int("max_select_point", opts.CoordinatorConfig.MaxSelectPointN), - zap.Int("max_select_series", opts.CoordinatorConfig.MaxSelectSeriesN), - zap.Int("max_select_buckets", opts.CoordinatorConfig.MaxSelectBucketsN)) - - qe := iqlquery.NewExecutor(m.log, cm) - se := &iqlcoordinator.StatementExecutor{ - MetaClient: metaClient, - TSDBStore: m.engine.TSDBStore(), - ShardMapper: mapper, - DBRP: dbrpSvc, - MaxSelectPointN: opts.CoordinatorConfig.MaxSelectPointN, - MaxSelectSeriesN: opts.CoordinatorConfig.MaxSelectSeriesN, - MaxSelectBucketsN: opts.CoordinatorConfig.MaxSelectBucketsN, - } - qe.StatementExecutor = se - qe.StatementNormalizer = se - - var checkSvc platform.CheckService - { - coordinator := coordinator.NewCoordinator(m.log, m.scheduler, m.executor) - checkSvc = checks.NewService(m.log.With(zap.String("svc", "checks")), m.kvStore, ts.OrganizationService, m.kvService) - checkSvc = middleware.NewCheckService(checkSvc, m.kvService, coordinator) - } - - var notificationEndpointSvc platform.NotificationEndpointService - { - notificationEndpointSvc = endpointservice.New(endpointservice.NewStore(m.kvStore), secretSvc) - } - - var notificationRuleSvc platform.NotificationRuleStore - { - coordinator := coordinator.NewCoordinator(m.log, m.scheduler, m.executor) - notificationRuleSvc, err = ruleservice.New(m.log, m.kvStore, m.kvService, ts.OrganizationService, notificationEndpointSvc) - if err != nil { - return err - } - - // tasks service notification middleware which keeps task service up to date - // with persisted changes to notification rules. - notificationRuleSvc = middleware.NewNotificationRuleStore(notificationRuleSvc, m.kvService, coordinator) - } - - var telegrafSvc platform.TelegrafConfigStore - { - telegrafSvc = telegrafservice.New(m.kvStore) - } - - scraperScheduler, err := gather.NewScheduler(m.log.With(zap.String("service", "scraper")), 100, 10, scraperTargetSvc, pointsWriter, 10*time.Second) - if err != nil { - m.log.Error("Failed to create scraper subscriber", zap.Error(err)) - return err - } - m.closers = append(m.closers, labeledCloser{ - label: "scraper", - closer: func(ctx context.Context) error { - scraperScheduler.Close() - return nil - }, - }) - - var sessionSvc platform.SessionService - { - sessionSvc = session.NewService( - session.NewStorage(inmem.NewSessionStore()), - ts.UserService, - ts.UserResourceMappingService, - authSvc, - session.WithSessionLength(time.Duration(opts.SessionLength)*time.Minute), - ) - sessionSvc = session.NewSessionMetrics(m.reg, sessionSvc) - sessionSvc = session.NewSessionLogger(m.log.With(zap.String("service", "session")), sessionSvc) - } - - var labelSvc platform.LabelService - { - labelsStore, err := label.NewStore(m.kvStore) - if err != nil { - m.log.Error("Failed creating new labels store", zap.Error(err)) - return err - } - labelSvc = label.NewService(labelsStore) - } - - ts.BucketService = storage.NewBucketService(m.log, ts.BucketService, m.engine) - ts.BucketService = dbrp.NewBucketService(m.log, ts.BucketService, dbrpSvc) - - bucketManifestWriter := backup.NewBucketManifestWriter(ts, metaClient) - - onboardingLogger := m.log.With(zap.String("handler", "onboard")) - onboardOpts := []tenant.OnboardServiceOptionFn{tenant.WithOnboardingLogger(onboardingLogger)} - if opts.TestingAlwaysAllowSetup { - onboardOpts = append(onboardOpts, tenant.WithAlwaysAllowInitialUser()) - } - - onboardSvc := tenant.NewOnboardService(ts, authSvc, onboardOpts...) // basic service - onboardSvc = tenant.NewAuthedOnboardSvc(onboardSvc) // with auth - onboardSvc = tenant.NewOnboardingMetrics(m.reg, onboardSvc, metric.WithSuffix("new")) // with metrics - onboardSvc = tenant.NewOnboardingLogger(onboardingLogger, onboardSvc) // with logging - - var ( - passwordV1 platform.PasswordsService - authSvcV1 *authv1.Service - ) - { - authStore, err := authv1.NewStore(m.kvStore) - if err != nil { - m.log.Error("Failed creating new authorization store", zap.Error(err)) - return err - } - - authSvcV1 = authv1.NewService(authStore, ts) - passwordV1 = authv1.NewCachingPasswordsService(authSvcV1) - } - - var ( - dashboardSvc platform.DashboardService - dashboardLogSvc platform.DashboardOperationLogService - ) - { - dashboardService := dashboards.NewService(m.kvStore, m.kvService) - dashboardSvc = dashboardService - dashboardLogSvc = dashboardService - } - - // resourceResolver is a deprecated type which combines the lookups - // of multiple resources into one type, used to resolve the resources - // associated org ID or name . It is a stop-gap while we move this - // behaviour off of *kv.Service to aid in reducing the coupling on this type. - resourceResolver := &resource.Resolver{ - AuthorizationFinder: authSvc, - BucketFinder: ts.BucketService, - OrganizationFinder: ts.OrganizationService, - DashboardFinder: dashboardSvc, - SourceFinder: sourceSvc, - TaskFinder: taskSvc, - TelegrafConfigFinder: telegrafSvc, - VariableFinder: variableSvc, - TargetFinder: scraperTargetSvc, - CheckFinder: checkSvc, - NotificationEndpointFinder: notificationEndpointSvc, - NotificationRuleFinder: notificationRuleSvc, - } - - errorHandler := kithttp.NewErrorHandler(m.log.With(zap.String("handler", "error_logger"))) - m.apibackend = &http.APIBackend{ - AssetsPath: opts.AssetsPath, - UIDisabled: opts.UIDisabled, - HTTPErrorHandler: errorHandler, - Logger: m.log, - FluxLogEnabled: opts.FluxLogEnabled, - SessionRenewDisabled: opts.SessionRenewDisabled, - NewQueryService: source.NewQueryService, - PointsWriter: &storage.LoggingPointsWriter{ - Underlying: pointsWriter, - BucketFinder: ts.BucketService, - LogBucketName: platform.MonitoringSystemBucketName, - }, - DeleteService: deleteService, - BackupService: backupService, - SqlBackupRestoreService: m.sqlStore, - BucketManifestWriter: bucketManifestWriter, - RestoreService: restoreService, - AuthorizationService: authSvc, - AuthorizationV1Service: authSvcV1, - PasswordV1Service: passwordV1, - AuthorizerV1: &authv1.Authorizer{ - AuthV1: authSvcV1, - AuthV2: authSvc, - Comparer: passwordV1, - User: ts, - }, - AlgoWProxy: &http.NoopProxyHandler{}, - // Wrap the BucketService in a storage backed one that will ensure deleted buckets are removed from the storage engine. - BucketService: ts.BucketService, - SessionService: sessionSvc, - UserService: ts.UserService, - OnboardingService: onboardSvc, - DBRPService: dbrpSvc, - OrganizationService: ts.OrganizationService, - UserResourceMappingService: ts.UserResourceMappingService, - LabelService: labelSvc, - DashboardService: dashboardSvc, - DashboardOperationLogService: dashboardLogSvc, - BucketOperationLogService: bucketLogSvc, - UserOperationLogService: userLogSvc, - OrganizationOperationLogService: orgLogSvc, - SourceService: sourceSvc, - VariableService: variableSvc, - PasswordsService: ts.PasswordsService, - InfluxqldService: iqlquery.NewProxyExecutor(m.log, qe), - FluxService: storageQueryService, - FluxLanguageService: fluxlang.DefaultService, - TaskService: taskSvc, - TelegrafService: telegrafSvc, - NotificationRuleStore: notificationRuleSvc, - NotificationEndpointService: notificationEndpointSvc, - CheckService: checkSvc, - ScraperTargetStoreService: scraperTargetSvc, - SecretService: secretSvc, - LookupService: resourceResolver, - DocumentService: m.kvService, - OrgLookupService: resourceResolver, - WriteEventRecorder: infprom.NewEventRecorder("write"), - QueryEventRecorder: infprom.NewEventRecorder("query"), - Flagger: m.flagger, - FlagsHandler: feature.NewFlagsHandler(errorHandler, feature.ByKey), - } - - m.reg.MustRegister(m.apibackend.PrometheusCollectors()...) - - authAgent := new(authorizer.AuthAgent) - - var pkgSVC pkger.SVC - { - b := m.apibackend - authedOrgSVC := authorizer.NewOrgService(b.OrganizationService) - authedUrmSVC := authorizer.NewURMService(b.OrgLookupService, b.UserResourceMappingService) - pkgerLogger := m.log.With(zap.String("service", "pkger")) - pkgSVC = pkger.NewService( - pkger.WithHTTPClient(pkger.NewDefaultHTTPClient(urlValidator)), - pkger.WithLogger(pkgerLogger), - pkger.WithStore(pkger.NewStoreKV(m.kvStore)), - pkger.WithBucketSVC(authorizer.NewBucketService(b.BucketService)), - pkger.WithCheckSVC(authorizer.NewCheckService(b.CheckService, authedUrmSVC, authedOrgSVC)), - pkger.WithDashboardSVC(authorizer.NewDashboardService(b.DashboardService)), - pkger.WithLabelSVC(label.NewAuthedLabelService(labelSvc, b.OrgLookupService)), - pkger.WithNotificationEndpointSVC(authorizer.NewNotificationEndpointService(b.NotificationEndpointService, authedUrmSVC, authedOrgSVC)), - pkger.WithNotificationRuleSVC(authorizer.NewNotificationRuleStore(b.NotificationRuleStore, authedUrmSVC, authedOrgSVC)), - pkger.WithOrganizationService(authorizer.NewOrgService(b.OrganizationService)), - pkger.WithSecretSVC(authorizer.NewSecretService(b.SecretService)), - pkger.WithTaskSVC(authorizer.NewTaskService(pkgerLogger, b.TaskService)), - pkger.WithTelegrafSVC(authorizer.NewTelegrafConfigService(b.TelegrafService, b.UserResourceMappingService)), - pkger.WithVariableSVC(authorizer.NewVariableService(b.VariableService)), - ) - pkgSVC = pkger.MWTracing()(pkgSVC) - pkgSVC = pkger.MWMetrics(m.reg)(pkgSVC) - pkgSVC = pkger.MWLogging(pkgerLogger)(pkgSVC) - pkgSVC = pkger.MWAuth(authAgent)(pkgSVC) - } - - var stacksHTTPServer *pkger.HTTPServerStacks - { - tLogger := m.log.With(zap.String("handler", "stacks")) - stacksHTTPServer = pkger.NewHTTPServerStacks(tLogger, pkgSVC) - } - - var templatesHTTPServer *pkger.HTTPServerTemplates - { - tLogger := m.log.With(zap.String("handler", "templates")) - templatesHTTPServer = pkger.NewHTTPServerTemplates(tLogger, pkgSVC, pkger.NewDefaultHTTPClient(urlValidator)) - } - - userHTTPServer := ts.NewUserHTTPHandler(m.log) - meHTTPServer := ts.NewMeHTTPHandler(m.log) - onboardHTTPServer := tenant.NewHTTPOnboardHandler(m.log, onboardSvc) - - // feature flagging for new labels service - var labelHandler *label.LabelHandler - { - b := m.apibackend - - labelSvc = label.NewAuthedLabelService(labelSvc, b.OrgLookupService) - labelSvc = label.NewLabelLogger(m.log.With(zap.String("handler", "labels")), labelSvc) - labelSvc = label.NewLabelMetrics(m.reg, labelSvc) - labelHandler = label.NewHTTPLabelHandler(m.log, labelSvc) - } - - // feature flagging for new authorization service - var authHTTPServer *authorization.AuthHandler - { - authLogger := m.log.With(zap.String("handler", "authorization")) - - var authService platform.AuthorizationService - authService = authorization.NewAuthedAuthorizationService(authSvc, ts) - authService = authorization.NewAuthMetrics(m.reg, authService) - authService = authorization.NewAuthLogger(authLogger, authService) - - authHTTPServer = authorization.NewHTTPAuthHandler(m.log, authService, ts) - } - - var v1AuthHTTPServer *authv1.AuthHandler - { - authLogger := m.log.With(zap.String("handler", "v1_authorization")) - - var authService platform.AuthorizationService - authService = authorization.NewAuthedAuthorizationService(authSvcV1, ts) - authService = authorization.NewAuthLogger(authLogger, authService) - - passService := authv1.NewAuthedPasswordService(authv1.AuthFinder(authSvcV1), passwordV1) - v1AuthHTTPServer = authv1.NewHTTPAuthHandler(m.log, authService, passService, ts) - } - - var sessionHTTPServer *session.SessionHandler - { - sessionHTTPServer = session.NewSessionHandler(m.log.With(zap.String("handler", "session")), sessionSvc, ts.UserService, ts.PasswordsService) - } - - orgHTTPServer := ts.NewOrgHTTPHandler(m.log, secret.NewAuthedService(secretSvc)) - - bucketHTTPServer := ts.NewBucketHTTPHandler(m.log, labelSvc) - - var dashboardServer *dashboardTransport.DashboardHandler - { - urmHandler := tenant.NewURMHandler( - m.log.With(zap.String("handler", "urm")), - platform.DashboardsResourceType, - "id", - ts.UserService, - tenant.NewAuthedURMService(ts.OrganizationService, ts.UserResourceMappingService), - ) - - labelHandler := label.NewHTTPEmbeddedHandler( - m.log.With(zap.String("handler", "label")), - platform.DashboardsResourceType, - labelSvc, - ) - - dashboardServer = dashboardTransport.NewDashboardHandler( - m.log.With(zap.String("handler", "dashboards")), - authorizer.NewDashboardService(dashboardSvc), - labelSvc, - ts.UserService, - ts.OrganizationService, - urmHandler, - labelHandler, - ) - } - - notebookSvc := notebooks.NewService(m.sqlStore) - notebookServer := notebookTransport.NewNotebookHandler( - m.log.With(zap.String("handler", "notebooks")), - authorizer.NewNotebookService( - notebooks.NewLoggingService( - m.log.With(zap.String("service", "notebooks")), - notebooks.NewMetricCollectingService(m.reg, notebookSvc), - ), - ), - ) - - annotationSvc := annotations.NewService(m.sqlStore) - annotationServer := annotationTransport.NewAnnotationHandler( - m.log.With(zap.String("handler", "annotations")), - authorizer.NewAnnotationService( - annotations.NewLoggingService( - m.log.With(zap.String("service", "annotations")), - annotations.NewMetricCollectingService(m.reg, annotationSvc), - ), - ), - ) - - configHandler, err := http.NewConfigHandler(m.log.With(zap.String("handler", "config")), opts.BindCliOpts()) - if err != nil { - return err - } - - platformHandler := http.NewPlatformHandler( - m.apibackend, - http.WithResourceHandler(stacksHTTPServer), - http.WithResourceHandler(templatesHTTPServer), - http.WithResourceHandler(onboardHTTPServer), - http.WithResourceHandler(authHTTPServer), - http.WithResourceHandler(labelHandler), - http.WithResourceHandler(sessionHTTPServer.SignInResourceHandler()), - http.WithResourceHandler(sessionHTTPServer.SignOutResourceHandler()), - http.WithResourceHandler(userHTTPServer), - http.WithResourceHandler(meHTTPServer), - http.WithResourceHandler(orgHTTPServer), - http.WithResourceHandler(bucketHTTPServer), - http.WithResourceHandler(v1AuthHTTPServer), - http.WithResourceHandler(dashboardServer), - http.WithResourceHandler(notebookServer), - http.WithResourceHandler(annotationServer), - http.WithResourceHandler(remotesServer), - http.WithResourceHandler(replicationServer), - http.WithResourceHandler(configHandler), - ) - - httpLogger := m.log.With(zap.String("service", "http")) - var httpHandler nethttp.Handler = http.NewRootHandler( - "platform", - http.WithLog(httpLogger), - http.WithAPIHandler(platformHandler), - http.WithPprofEnabled(!opts.ProfilingDisabled), - http.WithMetrics(m.reg, !opts.MetricsDisabled), - ) - - if opts.LogLevel == zap.DebugLevel { - httpHandler = http.LoggingMW(httpLogger)(httpHandler) - } - // If we are in testing mode we allow all data to be flushed and removed. - if opts.Testing { - httpHandler = http.Debug(ctx, httpHandler, m.flushers, onboardSvc) - } - - if !opts.ReportingDisabled { - m.runReporter(ctx) - } - if err := m.runHTTP(opts, httpHandler, httpLogger); err != nil { - return err - } - - return nil -} - -// initTracing sets up the global tracer for the influxd process. -// Any errors encountered during setup are logged, but don't crash the process. -func (m *Launcher) initTracing(opts *InfluxdOpts) { - switch opts.TracingType { - case LogTracing: - m.log.Info("Tracing via zap logging") - opentracing.SetGlobalTracer(pzap.NewTracer(m.log, snowflake.NewIDGenerator())) - - case JaegerTracing: - m.log.Info("Tracing via Jaeger") - cfg, err := jaegerconfig.FromEnv() - if err != nil { - m.log.Error("Failed to get Jaeger client config from environment variables", zap.Error(err)) - return - } - tracer, closer, err := cfg.NewTracer() - if err != nil { - m.log.Error("Failed to instantiate Jaeger tracer", zap.Error(err)) - return - } - m.closers = append(m.closers, labeledCloser{ - label: "Jaeger tracer", - closer: func(context.Context) error { - return closer.Close() - }, - }) - opentracing.SetGlobalTracer(tracer) - } -} - -// openMetaStores opens the embedded DBs used to store metadata about influxd resources, migrating them to -// the latest schema expected by the server. -// On success, a unique ID is returned to be used as an identifier for the influxd instance in telemetry. -func (m *Launcher) openMetaStores(ctx context.Context, opts *InfluxdOpts) (string, error) { - type flushableKVStore interface { - kv.SchemaStore - http.Flusher - } - var kvStore flushableKVStore - var sqlStore *sqlite.SqlStore - - var procID string - var err error - switch opts.StoreType { - case BoltStore: - m.log.Warn("Using --store=bolt is deprecated. Use --store=disk instead.") - fallthrough - case DiskStore: - boltClient := bolt.NewClient(m.log.With(zap.String("service", "bolt"))) - boltClient.Path = opts.BoltPath - - if err := boltClient.Open(ctx); err != nil { - m.log.Error("Failed opening bolt", zap.Error(err)) - return "", err - } - m.closers = append(m.closers, labeledCloser{ - label: "bolt", - closer: func(context.Context) error { - return boltClient.Close() - }, - }) - m.reg.MustRegister(boltClient) - procID = boltClient.ID().String() - - boltKV := bolt.NewKVStore(m.log.With(zap.String("service", "kvstore-bolt")), opts.BoltPath) - boltKV.WithDB(boltClient.DB()) - kvStore = boltKV - - // If a sqlite-path is not specified, store sqlite db in the same directory as bolt with the default filename. - if opts.SqLitePath == "" { - opts.SqLitePath = filepath.Join(filepath.Dir(opts.BoltPath), sqlite.DefaultFilename) - } - sqlStore, err = sqlite.NewSqlStore(opts.SqLitePath, m.log.With(zap.String("service", "sqlite"))) - if err != nil { - m.log.Error("Failed opening sqlite store", zap.Error(err)) - return "", err - } - - case MemoryStore: - kvStore = inmem.NewKVStore() - sqlStore, err = sqlite.NewSqlStore(sqlite.InmemPath, m.log.With(zap.String("service", "sqlite"))) - if err != nil { - m.log.Error("Failed opening sqlite store", zap.Error(err)) - return "", err - } - - default: - err := fmt.Errorf("unknown store type %s; expected disk or memory", opts.StoreType) - m.log.Error("Failed opening metadata store", zap.Error(err)) - return "", err - } - - m.closers = append(m.closers, labeledCloser{ - label: "sqlite", - closer: func(context.Context) error { - return sqlStore.Close() - }, - }) - if opts.Testing { - m.flushers = append(m.flushers, kvStore, sqlStore) - } - - // Apply migrations to the KV and SQL metadata stores. - kvMigrator, err := migration.NewMigrator( - m.log.With(zap.String("service", "KV migrations")), - kvStore, - all.Migrations[:]..., - ) - if err != nil { - m.log.Error("Failed to initialize kv migrator", zap.Error(err)) - return "", err - } - sqlMigrator := sqlite.NewMigrator(sqlStore, m.log.With(zap.String("service", "SQL migrations"))) - - // If we're migrating a persistent data store, take a backup of the pre-migration state for rollback. - if opts.StoreType == DiskStore || opts.StoreType == BoltStore { - backupPattern := "%s.pre-%s-upgrade.backup" - info := platform.GetBuildInfo() - kvMigrator.SetBackupPath(fmt.Sprintf(backupPattern, opts.BoltPath, info.Version)) - sqlMigrator.SetBackupPath(fmt.Sprintf(backupPattern, opts.SqLitePath, info.Version)) - } - if err := kvMigrator.Up(ctx); err != nil { - m.log.Error("Failed to apply KV migrations", zap.Error(err)) - return "", err - } - if err := sqlMigrator.Up(ctx, sqliteMigrations.AllUp); err != nil { - m.log.Error("Failed to apply SQL migrations", zap.Error(err)) - return "", err - } - - m.kvStore = kvStore - m.sqlStore = sqlStore - return procID, nil -} - -// runHTTP configures and launches a listener for incoming HTTP(S) requests. -// The listener is run in a separate goroutine. If it fails to start up, it -// will cancel the launcher. -func (m *Launcher) runHTTP(opts *InfluxdOpts, handler nethttp.Handler, httpLogger *zap.Logger) error { - log := m.log.With(zap.String("service", "tcp-listener")) - - httpServer := &nethttp.Server{ - Addr: opts.HttpBindAddress, - Handler: handler, - ReadHeaderTimeout: opts.HttpReadHeaderTimeout, - ReadTimeout: opts.HttpReadTimeout, - WriteTimeout: opts.HttpWriteTimeout, - IdleTimeout: opts.HttpIdleTimeout, - ErrorLog: zap.NewStdLog(httpLogger), - } - m.closers = append(m.closers, labeledCloser{ - label: "HTTP server", - closer: httpServer.Shutdown, - }) - - ln, err := net.Listen("tcp", opts.HttpBindAddress) - if err != nil { - log.Error("Failed to set up TCP listener", zap.String("addr", opts.HttpBindAddress), zap.Error(err)) - return err - } - if addr, ok := ln.Addr().(*net.TCPAddr); ok { - m.httpPort = addr.Port - } - m.wg.Add(1) - - m.tlsEnabled = opts.HttpTLSCert != "" && opts.HttpTLSKey != "" - if !m.tlsEnabled { - if opts.HttpTLSCert != "" || opts.HttpTLSKey != "" { - log.Warn("TLS requires specifying both cert and key, falling back to HTTP") - } - - go func(log *zap.Logger) { - defer m.wg.Done() - log.Info("Listening", zap.String("transport", "http"), zap.String("addr", opts.HttpBindAddress), zap.Int("port", m.httpPort)) - - if err := httpServer.Serve(ln); err != nethttp.ErrServerClosed { - log.Error("Failed to serve HTTP", zap.Error(err)) - m.cancel() - } - log.Info("Stopping") - }(log) - - return nil - } - - if _, err = tls.LoadX509KeyPair(opts.HttpTLSCert, opts.HttpTLSKey); err != nil { - log.Error("Failed to load x509 key pair", zap.String("cert-path", opts.HttpTLSCert), zap.String("key-path", opts.HttpTLSKey)) - return err - } - - var tlsMinVersion uint16 - var useStrictCiphers = opts.HttpTLSStrictCiphers - switch opts.HttpTLSMinVersion { - case "1.0": - log.Warn("Setting the minimum version of TLS to 1.0 - this is discouraged. Please use 1.2 or 1.3") - tlsMinVersion = tls.VersionTLS10 - case "1.1": - log.Warn("Setting the minimum version of TLS to 1.1 - this is discouraged. Please use 1.2 or 1.3") - tlsMinVersion = tls.VersionTLS11 - case "1.2": - tlsMinVersion = tls.VersionTLS12 - case "1.3": - if useStrictCiphers { - log.Warn("TLS version 1.3 does not support configuring strict ciphers") - useStrictCiphers = false - } - tlsMinVersion = tls.VersionTLS13 - default: - return fmt.Errorf("unsupported TLS version: %s", opts.HttpTLSMinVersion) - } - - // nil uses the default cipher suite - var cipherConfig []uint16 = nil - if useStrictCiphers { - // See https://ssl-config.mozilla.org/#server=go&version=1.14.4&config=intermediate&guideline=5.6 - cipherConfig = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - } - } - - httpServer.TLSConfig = &tls.Config{ - CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256}, - PreferServerCipherSuites: !useStrictCiphers, - MinVersion: tlsMinVersion, - CipherSuites: cipherConfig, - } - - go func(log *zap.Logger) { - defer m.wg.Done() - log.Info("Listening", zap.String("transport", "https"), zap.String("addr", opts.HttpBindAddress), zap.Int("port", m.httpPort)) - - if err := httpServer.ServeTLS(ln, opts.HttpTLSCert, opts.HttpTLSKey); err != nethttp.ErrServerClosed { - log.Error("Failed to serve HTTPS", zap.Error(err)) - m.cancel() - } - log.Info("Stopping") - }(log) - - return nil -} - -// runReporter configures and launches a periodic telemetry report for the server. -func (m *Launcher) runReporter(ctx context.Context) { - reporter := telemetry.NewReporter(m.log, m.reg) - reporter.Interval = 8 * time.Hour - m.wg.Add(1) - go func() { - defer m.wg.Done() - reporter.Report(ctx) - }() -} - -func checkForPriorVersion(ctx context.Context, log *zap.Logger, boltPath string, enginePath string, bs platform.BucketService, metaClient *meta.Client) error { - buckets, _, err := bs.FindBuckets(ctx, platform.BucketFilter{}) - if err != nil { - log.Error("Failed to retrieve buckets", zap.Error(err)) - return err - } - - hasErrors := false - - // if there are no buckets, we will be fine - if len(buckets) > 0 { - log.Info("Checking InfluxDB metadata for prior version.", zap.String("bolt_path", boltPath)) - - for i := range buckets { - bucket := buckets[i] - if dbi := metaClient.Database(bucket.ID.String()); dbi == nil { - log.Error("Missing metadata for bucket.", zap.String("bucket", bucket.Name), zap.Stringer("bucket_id", bucket.ID)) - hasErrors = true - } - } - - if hasErrors { - log.Error("Incompatible InfluxDB 2.0 metadata found. File must be moved before influxd will start.", zap.String("path", boltPath)) - } - } - - // see if there are existing files which match the old directory structure - { - for _, name := range []string{"_series", "index"} { - dir := filepath.Join(enginePath, name) - if fi, err := os.Stat(dir); err == nil { - if fi.IsDir() { - log.Error("Found directory that is incompatible with this version of InfluxDB.", zap.String("path", dir)) - hasErrors = true - } - } - } - } - - if hasErrors { - log.Error("Incompatible InfluxDB 2.0 version found. Move all files outside of engine_path before influxd will start.", zap.String("engine_path", enginePath)) - return errors.New("incompatible InfluxDB version") - } - - return nil -} - -// OrganizationService returns the internal organization service. -func (m *Launcher) OrganizationService() platform.OrganizationService { - return m.apibackend.OrganizationService -} - -// QueryController returns the internal query service. -func (m *Launcher) QueryController() *control.Controller { - return m.queryController -} - -// BucketService returns the internal bucket service. -func (m *Launcher) BucketService() platform.BucketService { - return m.apibackend.BucketService -} - -// UserService returns the internal user service. -func (m *Launcher) UserService() platform.UserService { - return m.apibackend.UserService -} - -// AuthorizationService returns the internal authorization service. -func (m *Launcher) AuthorizationService() platform.AuthorizationService { - return m.apibackend.AuthorizationService -} - -func (m *Launcher) AuthorizationV1Service() platform.AuthorizationService { - return m.apibackend.AuthorizationV1Service -} - -// SecretService returns the internal secret service. -func (m *Launcher) SecretService() platform.SecretService { - return m.apibackend.SecretService -} - -// CheckService returns the internal check service. -func (m *Launcher) CheckService() platform.CheckService { - return m.apibackend.CheckService -} - -func (m *Launcher) DBRPMappingService() platform.DBRPMappingService { - return m.apibackend.DBRPService -} - -func (m *Launcher) SessionService() platform.SessionService { - return m.apibackend.SessionService -} diff --git a/cmd/influxd/launcher/launcher_helpers.go b/cmd/influxd/launcher/launcher_helpers.go deleted file mode 100644 index 3cb48ee8306..00000000000 --- a/cmd/influxd/launcher/launcher_helpers.go +++ /dev/null @@ -1,662 +0,0 @@ -package launcher - -import ( - "bytes" - "context" - "fmt" - "io" - nethttp "net/http" - "net/url" - "os" - "path/filepath" - "reflect" - "sort" - "strings" - "testing" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/lang" - "github.com/influxdata/influx-cli/v2/api" - "github.com/influxdata/influx-cli/v2/clients" - clibackup "github.com/influxdata/influx-cli/v2/clients/backup" - clirestore "github.com/influxdata/influx-cli/v2/clients/restore" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/bolt" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - dashboardTransport "github.com/influxdata/influxdb/v2/dashboards/transport" - "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/kit/feature" - "github.com/influxdata/influxdb/v2/label" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/pkg/httpc" - "github.com/influxdata/influxdb/v2/pkger" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/sqlite" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "github.com/influxdata/influxdb/v2/tenant" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" - "github.com/spf13/viper" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" -) - -// TestLauncher is a test wrapper for launcher.Launcher. -type TestLauncher struct { - *Launcher - - // Root temporary directory for all data. - Path string - - // Initialized after calling the Setup() helper. - User *influxdb.User - Org *influxdb.Organization - Bucket *influxdb.Bucket - Auth *influxdb.Authorization - - httpClient *httpc.Client - apiClient *api.APIClient - - // Flag to act as standard server: disk store, no-e2e testing flag - realServer bool -} - -// RunAndSetupNewLauncherOrFail shorcuts the most common pattern used in testing, -// building a new TestLauncher, running it, and setting it up with an initial user. -func RunAndSetupNewLauncherOrFail(ctx context.Context, tb testing.TB, setters ...OptSetter) *TestLauncher { - tb.Helper() - - l := NewTestLauncher() - l.RunOrFail(tb, ctx, setters...) - defer func() { - // If setup fails, shut down the launcher. - if tb.Failed() { - l.Shutdown(ctx) - } - }() - l.SetupOrFail(tb) - return l -} - -// NewTestLauncher returns a new instance of TestLauncher. -func NewTestLauncher() *TestLauncher { - l := &TestLauncher{Launcher: NewLauncher()} - - path, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } - l.Path = path - return l -} - -// NewTestLauncherServer returns a new instance of TestLauncher configured as real server (disk store, no e2e flag). -func NewTestLauncherServer() *TestLauncher { - l := NewTestLauncher() - l.realServer = true - return l -} - -// URL returns the URL to connect to the HTTP server. -func (tl *TestLauncher) URL() *url.URL { - u := url.URL{ - Host: fmt.Sprintf("127.0.0.1:%d", tl.Launcher.httpPort), - Scheme: "http", - } - if tl.Launcher.tlsEnabled { - u.Scheme = "https" - } - return &u -} - -type OptSetter = func(o *InfluxdOpts) - -func (tl *TestLauncher) SetFlagger(flagger feature.Flagger) { - tl.Launcher.flagger = flagger -} - -// Run executes the program, failing the test if the launcher fails to start. -func (tl *TestLauncher) RunOrFail(tb testing.TB, ctx context.Context, setters ...OptSetter) { - if err := tl.Run(tb, ctx, setters...); err != nil { - tb.Fatal(err) - } -} - -// Run executes the program with additional arguments to set paths and ports. -// Passed arguments will overwrite/add to the default ones. -func (tl *TestLauncher) Run(tb zaptest.TestingT, ctx context.Context, setters ...OptSetter) error { - opts := NewOpts(viper.New()) - if !tl.realServer { - opts.StoreType = "memory" - opts.Testing = true - } - opts.TestingAlwaysAllowSetup = true - opts.BoltPath = filepath.Join(tl.Path, bolt.DefaultFilename) - opts.SqLitePath = filepath.Join(tl.Path, sqlite.DefaultFilename) - opts.EnginePath = filepath.Join(tl.Path, "engine") - opts.HttpBindAddress = "127.0.0.1:0" - opts.LogLevel = zap.DebugLevel - opts.ReportingDisabled = true - opts.ConcurrencyQuota = 32 - opts.QueueSize = 16 - - for _, setter := range setters { - setter(opts) - } - - // Set up top-level logger to write into the test-case. - tl.Launcher.log = zaptest.NewLogger(tb, zaptest.Level(opts.LogLevel)).With(zap.String("test_name", tb.Name())) - return tl.Launcher.run(ctx, opts) -} - -// Shutdown stops the program and cleans up temporary paths. -func (tl *TestLauncher) Shutdown(ctx context.Context) error { - defer os.RemoveAll(tl.Path) - tl.cancel() - return tl.Launcher.Shutdown(ctx) -} - -// ShutdownOrFail stops the program and cleans up temporary paths. Fail on error. -func (tl *TestLauncher) ShutdownOrFail(tb testing.TB, ctx context.Context) { - tb.Helper() - if err := tl.Shutdown(ctx); err != nil { - tb.Fatal(err) - } -} - -// Setup creates a new user, bucket, org, and auth token. -func (tl *TestLauncher) Setup() error { - results, err := tl.OnBoard(&influxdb.OnboardingRequest{ - User: "USER", - Password: "PASSWORD", - Org: "ORG", - Bucket: "BUCKET", - }) - if err != nil { - return err - } - - tl.User = results.User - tl.Org = results.Org - tl.Bucket = results.Bucket - tl.Auth = results.Auth - return nil -} - -// SetupOrFail creates a new user, bucket, org, and auth token. Fail on error. -func (tl *TestLauncher) SetupOrFail(tb testing.TB) { - if err := tl.Setup(); err != nil { - tb.Fatal(err) - } -} - -// OnBoard attempts an on-boarding request. -// The on-boarding status is also reset to allow multiple user/org/buckets to be created. -func (tl *TestLauncher) OnBoard(req *influxdb.OnboardingRequest) (*influxdb.OnboardingResults, error) { - return tl.apibackend.OnboardingService.OnboardInitialUser(context.Background(), req) -} - -// OnBoardOrFail attempts an on-boarding request or fails on error. -// The on-boarding status is also reset to allow multiple user/org/buckets to be created. -func (tl *TestLauncher) OnBoardOrFail(tb testing.TB, req *influxdb.OnboardingRequest) *influxdb.OnboardingResults { - tb.Helper() - res, err := tl.OnBoard(req) - if err != nil { - tb.Fatal(err) - } - return res -} - -// WriteOrFail attempts a write to the organization and bucket identified by to or fails if there is an error. -func (tl *TestLauncher) WriteOrFail(tb testing.TB, to *influxdb.OnboardingResults, data string) { - tb.Helper() - resp, err := nethttp.DefaultClient.Do(tl.NewHTTPRequestOrFail(tb, "POST", fmt.Sprintf("/api/v2/write?org=%s&bucket=%s", to.Org.ID, to.Bucket.ID), to.Auth.Token, data)) - if err != nil { - tb.Fatal(err) - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - tb.Fatal(err) - } - - if err := resp.Body.Close(); err != nil { - tb.Fatal(err) - } - - if resp.StatusCode != nethttp.StatusNoContent { - tb.Fatalf("unexpected status code: %d, body: %s, headers: %v", resp.StatusCode, body, resp.Header) - } -} - -// WritePoints attempts a write to the organization and bucket used during setup. -func (tl *TestLauncher) WritePoints(data string) error { - req, err := tl.NewHTTPRequest( - "POST", fmt.Sprintf("/api/v2/write?org=%s&bucket=%s", tl.Org.ID, tl.Bucket.ID), - tl.Auth.Token, data) - if err != nil { - return err - } - resp, err := nethttp.DefaultClient.Do(req) - if err != nil { - return err - } - body, err := io.ReadAll(resp.Body) - if err != nil { - return err - } - if err := resp.Body.Close(); err != nil { - return err - } - if resp.StatusCode != nethttp.StatusNoContent { - return fmt.Errorf("unexpected status code: %d, body: %s, headers: %v", resp.StatusCode, body, resp.Header) - } - return nil -} - -// WritePointsOrFail attempts a write to the organization and bucket used during setup or fails if there is an error. -func (tl *TestLauncher) WritePointsOrFail(tb testing.TB, data string) { - tb.Helper() - if err := tl.WritePoints(data); err != nil { - tb.Fatal(err) - } -} - -// MustExecuteQuery executes the provided query panicking if an error is encountered. -// Callers of MustExecuteQuery must call Done on the returned QueryResults. -func (tl *TestLauncher) MustExecuteQuery(query string) *QueryResults { - results, err := tl.ExecuteQuery(query) - if err != nil { - panic(err) - } - return results -} - -// ExecuteQuery executes the provided query against the ith query node. -// Callers of ExecuteQuery must call Done on the returned QueryResults. -func (tl *TestLauncher) ExecuteQuery(q string) (*QueryResults, error) { - ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(true, nil)) - ctx, _ = feature.Annotate(ctx, tl.flagger) - fq, err := tl.QueryController().Query(ctx, &query.Request{ - Authorization: tl.Auth, - OrganizationID: tl.Auth.OrgID, - Compiler: lang.FluxCompiler{ - Query: q, - }}) - if err != nil { - return nil, err - } - - results := make([]flux.Result, 0, 1) - for res := range fq.Results() { - results = append(results, res) - } - - if err := fq.Err(); err != nil { - fq.Done() - return nil, err - } - - return &QueryResults{ - Results: results, - Query: fq, - }, nil -} - -// QueryAndConsume queries InfluxDB using the request provided. It uses a function to consume the results obtained. -// It returns the first error encountered when requesting the query, consuming the results, or executing the query. -func (tl *TestLauncher) QueryAndConsume(ctx context.Context, req *query.Request, fn func(r flux.Result) error) error { - res, err := tl.FluxQueryService().Query(ctx, req) - if err != nil { - return err - } - // iterate over results to populate res.Err() - var gotErr error - for res.More() { - if err := fn(res.Next()); gotErr == nil { - gotErr = err - } - } - if gotErr != nil { - return gotErr - } - return res.Err() -} - -// QueryAndNopConsume does the same as QueryAndConsume but consumes results with a nop function. -func (tl *TestLauncher) QueryAndNopConsume(ctx context.Context, req *query.Request) error { - return tl.QueryAndConsume(ctx, req, func(r flux.Result) error { - return r.Tables().Do(func(table flux.Table) error { - return nil - }) - }) -} - -// FluxQueryOrFail performs a query to the specified organization and returns the results -// or fails if there is an error. -func (tl *TestLauncher) FluxQueryOrFail(tb testing.TB, org *influxdb.Organization, token string, query string) string { - tb.Helper() - - b, err := http.SimpleQuery(tl.URL(), query, org.Name, token) - if err != nil { - tb.Fatal(err) - } - - return string(b) -} - -// QueryFlux returns the csv response from a flux query. -// It also removes all the \r to make it easier to write tests. -func (tl *TestLauncher) QueryFlux(tb testing.TB, org *influxdb.Organization, token, query string) string { - tb.Helper() - - b, err := http.SimpleQuery(tl.URL(), query, org.Name, token) - if err != nil { - tb.Fatal(err) - } - - // remove all \r as well as the extra terminating \n - b = bytes.ReplaceAll(b, []byte("\r"), nil) - return string(b[:len(b)-1]) -} - -func (tl *TestLauncher) BackupOrFail(tb testing.TB, ctx context.Context, req clibackup.Params) { - tb.Helper() - require.NoError(tb, tl.Backup(tb, ctx, req)) -} - -func (tl *TestLauncher) Backup(tb testing.TB, ctx context.Context, req clibackup.Params) error { - tb.Helper() - return tl.BackupService(tb).Backup(ctx, &req) -} - -func (tl *TestLauncher) RestoreOrFail(tb testing.TB, ctx context.Context, req clirestore.Params) { - tb.Helper() - require.NoError(tb, tl.Restore(tb, ctx, req)) -} - -func (tl *TestLauncher) Restore(tb testing.TB, ctx context.Context, req clirestore.Params) error { - tb.Helper() - return tl.RestoreService(tb).Restore(ctx, &req) -} - -// MustNewHTTPRequest returns a new nethttp.Request with base URL and auth attached. Fail on error. -func (tl *TestLauncher) MustNewHTTPRequest(method, rawurl, body string) *nethttp.Request { - req, err := nethttp.NewRequest(method, tl.URL().String()+rawurl, strings.NewReader(body)) - if err != nil { - panic(err) - } - - req.Header.Set("Authorization", "Token "+tl.Auth.Token) - return req -} - -// NewHTTPRequest returns a new nethttp.Request with base URL and auth attached. -func (tl *TestLauncher) NewHTTPRequest(method, rawurl, token string, body string) (*nethttp.Request, error) { - req, err := nethttp.NewRequest(method, tl.URL().String()+rawurl, strings.NewReader(body)) - if err != nil { - return nil, err - } - req.Header.Set("Authorization", "Token "+token) - return req, nil -} - -// NewHTTPRequestOrFail returns a new nethttp.Request with base URL and auth attached. Fail on error. -func (tl *TestLauncher) NewHTTPRequestOrFail(tb testing.TB, method, rawurl, token string, body string) *nethttp.Request { - tb.Helper() - req, err := tl.NewHTTPRequest(method, rawurl, token, body) - if err != nil { - tb.Fatal(err) - } - return req -} - -// Services - -func (tl *TestLauncher) FluxService() *http.FluxService { - return &http.FluxService{Addr: tl.URL().String(), Token: tl.Auth.Token} -} - -func (tl *TestLauncher) FluxQueryService() *http.FluxQueryService { - return &http.FluxQueryService{Addr: tl.URL().String(), Token: tl.Auth.Token} -} - -func (tl *TestLauncher) BucketService(tb testing.TB) *tenant.BucketClientService { - tb.Helper() - return &tenant.BucketClientService{Client: tl.HTTPClient(tb)} -} - -func (tl *TestLauncher) DashboardService(tb testing.TB) influxdb.DashboardService { - tb.Helper() - return &dashboardTransport.DashboardService{Client: tl.HTTPClient(tb)} -} - -func (tl *TestLauncher) LabelService(tb testing.TB) influxdb.LabelService { - tb.Helper() - return &label.LabelClientService{Client: tl.HTTPClient(tb)} -} - -func (tl *TestLauncher) NotificationEndpointService(tb testing.TB) *http.NotificationEndpointService { - tb.Helper() - return http.NewNotificationEndpointService(tl.HTTPClient(tb)) -} - -func (tl *TestLauncher) NotificationRuleService(tb testing.TB) influxdb.NotificationRuleStore { - tb.Helper() - return http.NewNotificationRuleService(tl.HTTPClient(tb)) -} - -func (tl *TestLauncher) OrgService(tb testing.TB) influxdb.OrganizationService { - tb.Helper() - return &tenant.OrgClientService{Client: tl.HTTPClient(tb)} -} - -func (tl *TestLauncher) PkgerService(tb testing.TB) pkger.SVC { - return &pkger.HTTPRemoteService{Client: tl.HTTPClient(tb)} -} - -func (tl *TestLauncher) TaskServiceKV(tb testing.TB) taskmodel.TaskService { - return tl.kvService -} - -func (tl *TestLauncher) TelegrafService(tb testing.TB) *http.TelegrafService { - tb.Helper() - return http.NewTelegrafService(tl.HTTPClient(tb)) -} - -func (tl *TestLauncher) VariableService(tb testing.TB) *http.VariableService { - tb.Helper() - return &http.VariableService{Client: tl.HTTPClient(tb)} -} - -func (tl *TestLauncher) AuthorizationService(tb testing.TB) *http.AuthorizationService { - tb.Helper() - return &http.AuthorizationService{Client: tl.HTTPClient(tb)} -} - -func (tl *TestLauncher) TaskService(tb testing.TB) taskmodel.TaskService { - tb.Helper() - return &http.TaskService{Client: tl.HTTPClient(tb)} -} - -func (tl *TestLauncher) BackupService(tb testing.TB) *clibackup.Client { - tb.Helper() - client := tl.APIClient(tb) - return &clibackup.Client{ - CLI: clients.CLI{}, - BackupApi: client.BackupApi, - HealthApi: client.HealthApi, - } -} - -func (tl *TestLauncher) RestoreService(tb testing.TB) *clirestore.Client { - tb.Helper() - client := tl.APIClient(tb) - return &clirestore.Client{ - CLI: clients.CLI{}, - HealthApi: client.HealthApi, - RestoreApi: client.RestoreApi, - BucketsApi: client.BucketsApi, - OrganizationsApi: client.OrganizationsApi, - ApiConfig: client, - } -} - -func (tl *TestLauncher) ResetHTTPCLient() { - tl.httpClient = nil -} - -func (tl *TestLauncher) HTTPClient(tb testing.TB) *httpc.Client { - tb.Helper() - - if tl.httpClient == nil { - token := "" - if tl.Auth != nil { - token = tl.Auth.Token - } - client, err := http.NewHTTPClient(tl.URL().String(), token, false) - if err != nil { - tb.Fatal(err) - } - tl.httpClient = client - } - return tl.httpClient -} - -func (tl *TestLauncher) APIClient(tb testing.TB) *api.APIClient { - tb.Helper() - - if tl.apiClient == nil { - params := api.ConfigParams{ - Host: tl.URL(), - } - if tl.Auth != nil { - params.Token = &tl.Auth.Token - } - tl.apiClient = api.NewAPIClient(api.NewAPIConfig(params)) - } - - return tl.apiClient -} - -func (tl *TestLauncher) Metrics(tb testing.TB) (metrics map[string]*dto.MetricFamily) { - req := tl.HTTPClient(tb). - Get("/metrics"). - RespFn(func(resp *nethttp.Response) error { - if resp.StatusCode != nethttp.StatusOK { - return fmt.Errorf("unexpected status code: %d %s", resp.StatusCode, resp.Status) - } - defer func() { _ = resp.Body.Close() }() - - var parser expfmt.TextParser - metrics, _ = parser.TextToMetricFamilies(resp.Body) - return nil - }) - if err := req.Do(context.Background()); err != nil { - tb.Fatal(err) - } - return metrics -} - -func (tl *TestLauncher) NumReads(tb testing.TB, op string) uint64 { - const metricName = "query_influxdb_source_read_request_duration_seconds" - mf := tl.Metrics(tb)[metricName] - if mf != nil { - fmt.Printf("%v\n", mf) - for _, m := range mf.Metric { - for _, label := range m.Label { - if label.GetName() == "op" && label.GetValue() == op { - return m.Histogram.GetSampleCount() - } - } - } - } - return 0 -} - -// QueryResult wraps a single flux.Result with some helper methods. -type QueryResult struct { - t *testing.T - q flux.Result -} - -// HasTableWithCols checks if the desired number of tables and columns exist, -// ignoring any system columns. -// -// If the result is not as expected then the testing.T fails. -func (r *QueryResult) HasTablesWithCols(want []int) { - r.t.Helper() - - // _start, _stop, _time, _f - systemCols := 4 - got := []int{} - if err := r.q.Tables().Do(func(b flux.Table) error { - got = append(got, len(b.Cols())-systemCols) - b.Do(func(c flux.ColReader) error { return nil }) - return nil - }); err != nil { - r.t.Fatal(err) - } - - if !reflect.DeepEqual(got, want) { - r.t.Fatalf("got %v, expected %v", got, want) - } -} - -// TablesN returns the number of tables for the result. -func (r *QueryResult) TablesN() int { - var total int - r.q.Tables().Do(func(b flux.Table) error { - total++ - b.Do(func(c flux.ColReader) error { return nil }) - return nil - }) - return total -} - -// QueryResults wraps a set of query results with some helper methods. -type QueryResults struct { - Results []flux.Result - Query flux.Query -} - -func (r *QueryResults) Done() { - r.Query.Done() -} - -// First returns the first QueryResult. When there are not exactly 1 table First -// will fail. -func (r *QueryResults) First(t *testing.T) *QueryResult { - r.HasTableCount(t, 1) - for _, result := range r.Results { - return &QueryResult{t: t, q: result} - } - return nil -} - -// HasTableCount asserts that there are n tables in the result. -func (r *QueryResults) HasTableCount(t *testing.T, n int) { - if got, exp := len(r.Results), n; got != exp { - t.Fatalf("result has %d tables, expected %d. Tables: %s", got, exp, r.Names()) - } -} - -// Names returns the sorted set of result names for the query results. -func (r *QueryResults) Names() []string { - if len(r.Results) == 0 { - return nil - } - names := make([]string, len(r.Results), 0) - for _, r := range r.Results { - names = append(names, r.Name()) - } - return names -} - -// SortedNames returns the sorted set of table names for the query results. -func (r *QueryResults) SortedNames() []string { - names := r.Names() - sort.Strings(names) - return names -} diff --git a/cmd/influxd/launcher/launcher_test.go b/cmd/influxd/launcher/launcher_test.go deleted file mode 100644 index 1db0c0972d4..00000000000 --- a/cmd/influxd/launcher/launcher_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package launcher_test - -import ( - "context" - "encoding/json" - "io" - nethttp "net/http" - "testing" - "time" - - platform "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/cmd/influxd/launcher" - _ "github.com/influxdata/influxdb/v2/fluxinit/static" - "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/tenant" - "github.com/stretchr/testify/assert" -) - -// Default context. -var ctx = context.Background() - -func TestLauncher_Setup(t *testing.T) { - l := launcher.NewTestLauncher() - l.RunOrFail(t, ctx) - defer l.ShutdownOrFail(t, ctx) - - client, err := http.NewHTTPClient(l.URL().String(), "", false) - if err != nil { - t.Fatal(err) - } - - svc := &tenant.OnboardClientService{Client: client} - if results, err := svc.OnboardInitialUser(ctx, &platform.OnboardingRequest{ - User: "USER", - Password: "PASSWORD", - Org: "ORG", - Bucket: "BUCKET", - }); err != nil { - t.Fatal(err) - } else if results.User.ID == 0 { - t.Fatal("expected user id") - } else if results.Org.ID == 0 { - t.Fatal("expected org id") - } else if results.Bucket.ID == 0 { - t.Fatal("expected bucket id") - } else if results.Auth.Token == "" { - t.Fatal("expected auth token") - } -} - -// This is to mimic the UI using cookies as sessions -// rather than authorizations -func TestLauncher_SetupWithUsers(t *testing.T) { - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - - r, err := nethttp.NewRequest("POST", l.URL().String()+"/api/v2/signin", nil) - if err != nil { - t.Fatal(err) - } - - r.SetBasicAuth("USER", "PASSWORD") - - resp, err := nethttp.DefaultClient.Do(r) - if err != nil { - t.Fatal(err) - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - - if err := resp.Body.Close(); err != nil { - t.Fatal(err) - } - - if resp.StatusCode != nethttp.StatusNoContent { - t.Fatalf("unexpected status code: %d, body: %s, headers: %v", resp.StatusCode, body, resp.Header) - } - - cookies := resp.Cookies() - if len(cookies) != 1 { - t.Fatalf("expected 1 cookie but received %d", len(cookies)) - } - - user2 := &platform.User{ - Name: "USER2", - } - - b, _ := json.Marshal(user2) - r = l.NewHTTPRequestOrFail(t, "POST", "/api/v2/users", l.Auth.Token, string(b)) - - resp, err = nethttp.DefaultClient.Do(r) - if err != nil { - t.Fatal(err) - } - - body, err = io.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - - if err := resp.Body.Close(); err != nil { - t.Fatal(err) - } - - if resp.StatusCode != nethttp.StatusCreated { - t.Fatalf("unexpected status code: %d, body: %s, headers: %v", resp.StatusCode, body, resp.Header) - } - - r, err = nethttp.NewRequest("GET", l.URL().String()+"/api/v2/users", nil) - if err != nil { - t.Fatal(err) - } - r.AddCookie(cookies[0]) - - resp, err = nethttp.DefaultClient.Do(r) - if err != nil { - t.Fatal(err) - } - - body, err = io.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - - if err := resp.Body.Close(); err != nil { - t.Fatal(err) - } - - if resp.StatusCode != nethttp.StatusOK { - t.Fatalf("unexpected status code: %d, body: %s, headers: %v", resp.StatusCode, body, resp.Header) - } - - exp := struct { - Users []platform.User `json:"users"` - }{} - err = json.Unmarshal(body, &exp) - if err != nil { - t.Fatalf("unexpected error unmarshalling user: %v", err) - } - if len(exp.Users) != 2 { - t.Fatalf("unexpected 2 users: %#+v", exp) - } -} - -func TestLauncher_PingHeaders(t *testing.T) { - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - - platform.SetBuildInfo("dev", "none", time.Now().UTC().Format(time.RFC3339)) - - r, err := nethttp.NewRequest("GET", l.URL().String()+"/ping", nil) - if err != nil { - t.Fatal(err) - } - - resp, err := nethttp.DefaultClient.Do(r) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, []string{"OSS"}, resp.Header.Values("X-Influxdb-Build")) - assert.Equal(t, []string{"dev"}, resp.Header.Values("X-Influxdb-Version")) -} diff --git a/cmd/influxd/launcher/pkger_test.go b/cmd/influxd/launcher/pkger_test.go deleted file mode 100644 index d1ff7deb2b5..00000000000 --- a/cmd/influxd/launcher/pkger_test.go +++ /dev/null @@ -1,5118 +0,0 @@ -package launcher - -import ( - "context" - "errors" - "fmt" - nethttp "net/http" - "net/http/httptest" - "os" - "runtime" - "sort" - "strings" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/notification" - "github.com/influxdata/influxdb/v2/notification/check" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/influxdata/influxdb/v2/notification/rule" - "github.com/influxdata/influxdb/v2/pkger" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -var ctx = context.Background() - -func TestLauncher_Pkger(t *testing.T) { - l := RunAndSetupNewLauncherOrFail(ctx, t, func(o *InfluxdOpts) { - o.LogLevel = zap.ErrorLevel - }) - defer l.ShutdownOrFail(t, ctx) - - require.NoError(t, l.BucketService(t).DeleteBucket(ctx, l.Bucket.ID)) - - svc := l.PkgerService(t) - - resourceCheck := newResourceChecker(l) - - deleteStackFn := func(t *testing.T, stackID platform.ID) { - t.Helper() - err := svc.DeleteStack(ctx, struct{ OrgID, UserID, StackID platform.ID }{ - OrgID: l.Org.ID, - UserID: l.User.ID, - StackID: stackID, - }) - require.NoError(t, err, "failed to delete stack and its associated resources") - } - - newStackFn := func(t *testing.T, stack pkger.StackCreate) (pkger.Stack, func()) { - t.Helper() - - if stack.OrgID == 0 { - stack.OrgID = l.Org.ID - } - - newStack, err := svc.InitStack(ctx, l.User.ID, stack) - require.NoError(t, err) - - assert.Equal(t, l.Org.ID, newStack.OrgID) - ev := newStack.LatestEvent() - assert.Equal(t, stack.Name, ev.Name) - assert.Equal(t, stack.Description, ev.Description) - assert.NotNil(t, ev.Resources, "failed to match stack resources") - expectedURLs := stack.TemplateURLs - if expectedURLs == nil { - expectedURLs = []string{} - } - assert.Equal(t, expectedURLs, ev.TemplateURLs, "failed to match stack URLs") - assert.NotZero(t, newStack.CreatedAt) - assert.NotZero(t, ev.UpdatedAt) - - return newStack, func() { - // deletes are idempotent, so any error encountered here is not a not found error - // but rather an error to concern ourselves with. - deleteStackFn(t, newStack.ID) - } - } - - newTemplate := func(objects ...pkger.Object) *pkger.Template { - return &pkger.Template{Objects: objects} - } - - newBucketObject := func(pkgName, name, desc string) pkger.Object { - obj := pkger.BucketToObject("", influxdb.Bucket{ - Name: name, - Description: desc, - }) - obj.SetMetadataName(pkgName) - return obj - } - - newCheckBase := func(t *testing.T, name string, every time.Duration) check.Base { - t.Helper() - - d, err := notification.FromTimeDuration(every) - require.NoError(t, err) - - return check.Base{ - Name: name, - Every: &d, - Query: influxdb.DashboardQuery{ - Text: `from(bucket: "rucket_1") |> range(start: -1d)`, - }, - StatusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }", - } - } - - newCheckDeadmanObject := func(t *testing.T, pkgName, name string, every time.Duration) pkger.Object { - t.Helper() - - obj := pkger.CheckToObject("", &check.Deadman{ - Base: newCheckBase(t, name, every), - Level: notification.Critical, - }) - obj.SetMetadataName(pkgName) - return obj - } - - newCheckThresholdObject := func(t *testing.T, pkgName, name string, every time.Duration) pkger.Object { - t.Helper() - - obj := pkger.CheckToObject("", &check.Threshold{ - Base: newCheckBase(t, name, every), - Thresholds: []check.ThresholdConfig{ - check.Lesser{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Critical, - }, - Value: 0.5, - }, - }, - }) - obj.SetMetadataName(pkgName) - return obj - } - - newDashObject := func(pkgName, name, desc string) pkger.Object { - obj := pkger.DashboardToObject("", influxdb.Dashboard{ - Name: name, - Description: desc, - }) - obj.SetMetadataName(pkgName) - return obj - } - - newEndpointBase := func(name, desc string) endpoint.Base { - return endpoint.Base{ - Name: name, - Description: desc, - Status: influxdb.Inactive, - } - } - - newEndpointHTTP := func(pkgName, name, description string) pkger.Object { - obj := pkger.NotificationEndpointToObject("", &endpoint.HTTP{ - Base: newEndpointBase(name, description), - AuthMethod: "none", - URL: "http://example.com", - Method: "GET", - }) - obj.SetMetadataName(pkgName) - return obj - } - - newEndpointPagerDuty := func(pkgName, name, description string) pkger.Object { - obj := pkger.NotificationEndpointToObject("", &endpoint.PagerDuty{ - Base: newEndpointBase(name, description), - ClientURL: "http://example.com", - RoutingKey: influxdb.SecretField{ - Key: "routing-key", - Value: strPtr("threeve"), - }, - }) - obj.SetMetadataName(pkgName) - return obj - } - - newEndpointSlack := func(pkgName, name, description string) pkger.Object { - obj := pkger.NotificationEndpointToObject("", &endpoint.Slack{ - Base: newEndpointBase(name, description), - URL: "http://influxslack.com", - }) - obj.SetMetadataName(pkgName) - return obj - } - - newLabelObject := func(pkgName, name, desc, color string) pkger.Object { - obj := pkger.LabelToObject("", influxdb.Label{ - Name: name, - Properties: map[string]string{ - "color": color, - "description": desc, - }, - }) - obj.SetMetadataName(pkgName) - return obj - } - - newRuleObject := func(t *testing.T, pkgName, name, endpointPkgName, desc string) pkger.Object { - t.Helper() - - every, err := notification.FromTimeDuration(time.Hour) - require.NoError(t, err) - - obj := pkger.NotificationRuleToObject("", endpointPkgName, &rule.HTTP{ - Base: rule.Base{ - Name: name, - Description: desc, - Every: &every, - StatusRules: []notification.StatusRule{{CurrentLevel: notification.Critical}}, - }, - }) - obj.SetMetadataName(pkgName) - return obj - } - - newTaskObject := func(pkgName, name, description string) pkger.Object { - obj := pkger.TaskToObject("", taskmodel.Task{ - Name: name, - Description: description, - Flux: "buckets()", - Every: "1h", - }) - obj.SetMetadataName(pkgName) - return obj - } - - newTelegrafObject := func(pkgName, name, description string) pkger.Object { - obj := pkger.TelegrafToObject("", influxdb.TelegrafConfig{ - Name: name, - Description: description, - Config: telegrafCfg, - }) - obj.SetMetadataName(pkgName) - return obj - } - - newVariableObject := func(pkgName, name, description string, selected ...string) pkger.Object { - obj := pkger.VariableToObject("", influxdb.Variable{ - Name: name, - Description: description, - Selected: selected, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"a", "b"}, - }, - }) - obj.SetMetadataName(pkgName) - return obj - } - - validateAllResourcesRemoved := func(t *testing.T, summary pkger.Summary) { - t.Helper() - - for _, b := range summary.Buckets { - _, err := resourceCheck.getBucket(t, bySafeID(b.ID)) - assertErrorCode(t, errors2.ENotFound, err) - } - - for _, c := range summary.Checks { - _, err := resourceCheck.getCheck(t, byID(c.Check.GetID())) - assert.Error(t, err) - } - - for _, d := range summary.Dashboards { - _, err := resourceCheck.getDashboard(t, bySafeID(d.ID)) - assertErrorCode(t, errors2.ENotFound, err) - } - - for _, l := range summary.Labels { - _, err := resourceCheck.getLabel(t, bySafeID(l.ID)) - assertErrorCode(t, errors2.ENotFound, err) - } - - for _, e := range summary.NotificationEndpoints { - _, err := resourceCheck.getEndpoint(t, byID(e.NotificationEndpoint.GetID())) - assert.Error(t, err) - } - - for _, r := range summary.NotificationRules { - _, err := resourceCheck.getRule(t, bySafeID(r.ID)) - assert.Error(t, err) - } - - for _, ta := range summary.Tasks { - _, err := resourceCheck.getTask(t, bySafeID(ta.ID)) - assert.Error(t, err) - } - - for _, te := range summary.TelegrafConfigs { - _, err := resourceCheck.getTelegrafConfig(t, byID(te.TelegrafConfig.ID)) - assert.Error(t, err) - } - - for _, v := range summary.Variables { - _, err := resourceCheck.getVariable(t, bySafeID(v.ID)) - assertErrorCode(t, errors2.ENotFound, err) - } - } - - t.Run("managing pkg lifecycle with stacks", func(t *testing.T) { - t.Run("list stacks", func(t *testing.T) { - stacks, err := svc.ListStacks(ctx, l.Org.ID, pkger.ListFilter{}) - require.NoError(t, err) - require.Empty(t, stacks) - - newStack1, cleanup1 := newStackFn(t, pkger.StackCreate{ - Name: "first stack", - }) - defer cleanup1() - - newStack2, cleanup2 := newStackFn(t, pkger.StackCreate{ - Name: "second stack", - }) - defer cleanup2() - - containsStack := func(t *testing.T, haystack []pkger.Stack, needle pkger.Stack) { - t.Helper() - for _, hay := range haystack { - if hay.ID == needle.ID { - return - } - } - require.FailNowf(t, "did not find expected stack", "got: %+v", needle) - } - - t.Run("returns all stacks when no filter args provided", func(t *testing.T) { - stacks, err := svc.ListStacks(ctx, l.Org.ID, pkger.ListFilter{}) - require.NoError(t, err) - - containsStack(t, stacks, newStack1) - containsStack(t, stacks, newStack2) - }) - - t.Run("filters stacks by ID filter", func(t *testing.T) { - stacks, err := svc.ListStacks(ctx, l.Org.ID, pkger.ListFilter{ - StackIDs: []platform.ID{newStack1.ID}, - }) - require.NoError(t, err) - require.Len(t, stacks, 1) - containsStack(t, stacks, newStack1) - }) - - t.Run("filter stacks by names", func(t *testing.T) { - stacks, err := svc.ListStacks(ctx, l.Org.ID, pkger.ListFilter{ - Names: []string{newStack2.LatestEvent().Name}, - }) - require.NoError(t, err) - require.Len(t, stacks, 1) - containsStack(t, stacks, newStack2) - }) - }) - - t.Run("creating a stack", func(t *testing.T) { - _, cleanup := newStackFn(t, pkger.StackCreate{ - OrgID: l.Org.ID, - Name: "first stack", - Description: "desc", - TemplateURLs: []string{"http://example.com"}, - }) - cleanup() - }) - - t.Run("uninstall a stack", func(t *testing.T) { - t.Run("should remove all resources associated with it", func(t *testing.T) { - newStack, cleanup := newStackFn(t, pkger.StackCreate{}) - defer cleanup() - - newEndpointPkgName := "non-existent-endpoint" - allResourcesPkg := newTemplate( - newBucketObject("non-existent-bucket", "", ""), - newCheckDeadmanObject(t, "non-existent-check", "", time.Minute), - newDashObject("non-existent-dash", "", ""), - newEndpointHTTP(newEndpointPkgName, "", ""), - newLabelObject("non-existent-label", "", "", ""), - newRuleObject(t, "non-existent-rule", "", newEndpointPkgName, ""), - newTaskObject("non-existent-task", "", ""), - newTelegrafObject("non-existent-tele", "", ""), - newVariableObject("non-existent-var", "", ""), - ) - - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, - pkger.ApplyWithTemplate(allResourcesPkg), - pkger.ApplyWithStackID(newStack.ID), - ) - require.NoError(t, err) - - sum := impact.Summary - - require.Len(t, sum.Buckets, 1) - assert.NotZero(t, sum.Buckets[0].ID) - require.Len(t, sum.Checks, 1) - assert.NotZero(t, sum.Checks[0].Check.GetID()) - require.Len(t, sum.Dashboards, 1) - assert.NotZero(t, sum.Dashboards[0].ID) - require.Len(t, sum.Labels, 1) - assert.NotZero(t, sum.Labels[0].ID) - require.Len(t, sum.NotificationEndpoints, 1) - assert.NotZero(t, sum.NotificationEndpoints[0].NotificationEndpoint.GetID()) - require.Len(t, sum.NotificationRules, 1) - assert.NotZero(t, sum.NotificationRules[0].ID) - require.Len(t, sum.Tasks, 1) - assert.NotZero(t, sum.Tasks[0].ID) - require.Len(t, sum.TelegrafConfigs, 1) - assert.NotZero(t, sum.TelegrafConfigs[0].TelegrafConfig.ID) - require.Len(t, sum.Variables, 1) - assert.NotZero(t, sum.Variables[0].ID) - - _, err = svc.UninstallStack(ctx, struct{ OrgID, UserID, StackID platform.ID }{ - OrgID: l.Org.ID, - UserID: l.User.ID, - StackID: newStack.ID, - }) - require.NoError(t, err) - - matchingStack, err := svc.ReadStack(ctx, newStack.ID) - require.NoError(t, err) - - ev := matchingStack.LatestEvent() - assert.Equal(t, pkger.StackEventUninstalled, ev.EventType) - assert.Empty(t, ev.Resources) - - validateAllResourcesRemoved(t, sum) - }) - }) - - t.Run("delete a stack", func(t *testing.T) { - t.Run("should delete the stack and all resources associated with it", func(t *testing.T) { - newStack, cleanup := newStackFn(t, pkger.StackCreate{}) - defer cleanup() - - newEndpointPkgName := "non-existent-endpoint" - allResourcesPkg := newTemplate( - newBucketObject("non-existent-bucket", "", ""), - newCheckDeadmanObject(t, "non-existent-check", "", time.Minute), - newDashObject("non-existent-dash", "", ""), - newEndpointHTTP(newEndpointPkgName, "", ""), - newLabelObject("non-existent-label", "", "", ""), - newRuleObject(t, "non-existent-rule", "", newEndpointPkgName, ""), - newTaskObject("non-existent-task", "", ""), - newTelegrafObject("non-existent-tele", "", ""), - newVariableObject("non-existent-var", "", ""), - ) - - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, - pkger.ApplyWithTemplate(allResourcesPkg), - pkger.ApplyWithStackID(newStack.ID), - ) - require.NoError(t, err) - - sum := impact.Summary - - require.Len(t, sum.Buckets, 1) - assert.NotZero(t, sum.Buckets[0].ID) - require.Len(t, sum.Checks, 1) - assert.NotZero(t, sum.Checks[0].Check.GetID()) - require.Len(t, sum.Dashboards, 1) - assert.NotZero(t, sum.Dashboards[0].ID) - require.Len(t, sum.Labels, 1) - assert.NotZero(t, sum.Labels[0].ID) - require.Len(t, sum.NotificationEndpoints, 1) - assert.NotZero(t, sum.NotificationEndpoints[0].NotificationEndpoint.GetID()) - require.Len(t, sum.NotificationRules, 1) - assert.NotZero(t, sum.NotificationRules[0].ID) - require.Len(t, sum.Tasks, 1) - assert.NotZero(t, sum.Tasks[0].ID) - require.Len(t, sum.TelegrafConfigs, 1) - assert.NotZero(t, sum.TelegrafConfigs[0].TelegrafConfig.ID) - require.Len(t, sum.Variables, 1) - assert.NotZero(t, sum.Variables[0].ID) - - err = svc.DeleteStack(ctx, struct{ OrgID, UserID, StackID platform.ID }{ - OrgID: l.Org.ID, - UserID: l.User.ID, - StackID: newStack.ID, - }) - require.NoError(t, err) - - validateAllResourcesRemoved(t, sum) - }) - - t.Run("that has already been deleted should be successful", func(t *testing.T) { - newStack, cleanup := newStackFn(t, pkger.StackCreate{}) - defer cleanup() - - err := svc.DeleteStack(ctx, struct{ OrgID, UserID, StackID platform.ID }{ - OrgID: l.Org.ID, - UserID: l.User.ID, - StackID: newStack.ID, - }) - require.NoError(t, err) - - // delete same stack - err = svc.DeleteStack(ctx, struct{ OrgID, UserID, StackID platform.ID }{ - OrgID: l.Org.ID, - UserID: l.User.ID, - StackID: newStack.ID, - }) - require.NoError(t, err) - }) - - t.Run("that doesn't exist should be successful", func(t *testing.T) { - // delete stack that doesn't exist - err := svc.DeleteStack(ctx, struct{ OrgID, UserID, StackID platform.ID }{ - OrgID: l.Org.ID, - UserID: l.User.ID, - StackID: 9000, - }) - require.NoError(t, err) - }) - }) - - t.Run("read a stack", func(t *testing.T) { - newStack1, cleanup1 := newStackFn(t, pkger.StackCreate{ - Name: "first stack", - }) - defer cleanup1() - - newStack2, cleanup2 := newStackFn(t, pkger.StackCreate{ - Name: "second stack", - }) - defer cleanup2() - - actual, err := svc.ReadStack(ctx, newStack1.ID) - require.NoError(t, err) - assert.Equal(t, newStack1, actual) - - actual, err = svc.ReadStack(ctx, newStack2.ID) - require.NoError(t, err) - assert.Equal(t, newStack2, actual) - - _, err = svc.ReadStack(ctx, platform.ID(9000)) - require.Equal(t, errors2.ENotFound, errors2.ErrorCode(err)) - }) - - t.Run("updating a stack", func(t *testing.T) { - t.Run("bootstrapped updates successfully", func(t *testing.T) { - stack, cleanup := newStackFn(t, pkger.StackCreate{ - OrgID: l.Org.ID, - Name: "first name", - Description: "first desc", - TemplateURLs: []string{}, - }) - defer cleanup() - - // Wait for time.Now() to move ahead of the stack's latest update time. - // We do this so that on Windows (where time.Now() is updated relatively slowly) - // it doesn't appear that the stack was updated at the same time it was created. - for now := time.Now(); now.Equal(stack.LatestEvent().UpdatedAt); now = time.Now() { - time.Sleep(time.Millisecond) - } - - assertStack := func(t *testing.T, st pkger.Stack) { - t.Helper() - assert.Equal(t, stack.ID, st.ID) - ev := st.LatestEvent() - assert.Equal(t, "2nd name", ev.Name) - assert.Equal(t, "2nd desc", ev.Description) - assert.Equal(t, []string{"http://example.com"}, ev.TemplateURLs) - resources := []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindBucket, - MetaName: "bucket-meta", - }, - } - assert.Equal(t, resources, ev.Resources) - assert.True(t, ev.UpdatedAt.After(stack.LatestEvent().UpdatedAt)) - } - - updStack, err := svc.UpdateStack(ctx, pkger.StackUpdate{ - ID: stack.ID, - Name: strPtr("2nd name"), - Description: strPtr("2nd desc"), - TemplateURLs: []string{"http://example.com"}, - AdditionalResources: []pkger.StackAdditionalResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindBucket, - MetaName: "bucket-meta", - }, - }, - }) - require.NoError(t, err) - assertStack(t, updStack) - - readStack, err := svc.ReadStack(ctx, stack.ID) - require.NoError(t, err) - assertStack(t, readStack) - }) - - t.Run("associated with installed template returns valid resources", func(t *testing.T) { - tmpl := newTemplate(newLabelObject("label-1", "", "", "")) - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(tmpl)) - require.NoError(t, err) - defer deleteStackFn(t, impact.StackID) - - stack, err := svc.ReadStack(ctx, impact.StackID) - require.NoError(t, err) - - assertStack := func(t *testing.T, st pkger.Stack) { - t.Helper() - assert.Equal(t, stack.ID, st.ID) - ev := st.LatestEvent() - assert.Equal(t, "2nd name", ev.Name) - resources := []pkger.StackResource{{ - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindBucket, - MetaName: "bucket-meta", - }} - resources = append(resources, stack.LatestEvent().Resources...) - assert.Equal(t, resources, ev.Resources) - } - - updStack, err := svc.UpdateStack(ctx, pkger.StackUpdate{ - ID: stack.ID, - Name: strPtr("2nd name"), - AdditionalResources: []pkger.StackAdditionalResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindBucket, - MetaName: "bucket-meta", - }, - }, - }) - require.NoError(t, err) - assertStack(t, updStack) - - readStack, err := svc.ReadStack(ctx, stack.ID) - require.NoError(t, err) - assertStack(t, readStack) - }) - }) - - t.Run("apply with only a stackID succeeds when stack has URLs", func(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("https://github.com/influxdata/influxdb/issues/22474") - } - - svr := httptest.NewServer(nethttp.HandlerFunc(func(w nethttp.ResponseWriter, r *nethttp.Request) { - pkg := newTemplate(newBucketObject("bucket-0", "", "")) - b, err := pkg.Encode(pkger.EncodingJSON) - if err != nil { - w.WriteHeader(nethttp.StatusInternalServerError) - return - } - w.Write(b) - })) - defer svr.Close() - - f, err := os.CreateTemp("", "pkg.yml") - require.NoError(t, err) - defer f.Close() - - pkg := newTemplate(newBucketObject("bucket-1", "", "")) - b, err := pkg.Encode(pkger.EncodingYAML) - require.NoError(t, err) - f.Write(b) - require.NoError(t, f.Close()) - - expectedURLs := []string{ - // URL for http call - svr.URL + "/pkg.json", - // URL for file - "file://" + f.Name(), - } - - newStack, cleanup := newStackFn(t, pkger.StackCreate{ - TemplateURLs: expectedURLs, - }) - defer cleanup() - - sumEquals := func(t *testing.T, impact pkger.ImpactSummary) { - t.Helper() - - assert.Equal(t, expectedURLs, impact.Sources) - - sum := impact.Summary - require.Len(t, sum.Buckets, 2) - assert.Equal(t, "bucket-0", sum.Buckets[0].MetaName) - assert.Equal(t, "bucket-0", sum.Buckets[0].Name) - assert.Equal(t, "bucket-1", sum.Buckets[1].MetaName) - assert.Equal(t, "bucket-1", sum.Buckets[1].Name) - } - - impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithStackID(newStack.ID)) - require.NoError(t, err) - sumEquals(t, impact) - - impact, err = svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithStackID(newStack.ID)) - require.NoError(t, err) - sumEquals(t, impact) - - stacks, err := svc.ListStacks(ctx, l.Org.ID, pkger.ListFilter{ - StackIDs: []platform.ID{newStack.ID}, - }) - require.NoError(t, err) - - require.Len(t, stacks, 1) - assert.Equal(t, expectedURLs, stacks[0].LatestEvent().Sources) - }) - - t.Run("apply a pkg with a stack and associations", func(t *testing.T) { - testLabelMappingFn := func(t *testing.T, stackID platform.ID, pkg *pkger.Template, assertAssociatedLabelsFn func(pkger.Summary, []*influxdb.Label, influxdb.ResourceType)) pkger.Summary { - t.Helper() - - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, - pkger.ApplyWithTemplate(pkg), - pkger.ApplyWithStackID(stackID), - ) - require.NoError(t, err) - - assert.Equal(t, []string{"byte stream"}, impact.Sources) - - sum := impact.Summary - - require.Len(t, sum.Buckets, 1) - assert.NotZero(t, sum.Buckets[0].ID) - assert.Equal(t, "bucket", sum.Buckets[0].Name) - - require.Len(t, sum.Checks, 1) - assert.NotZero(t, sum.Checks[0].Check.GetID()) - assert.Equal(t, "check-0", sum.Checks[0].Check.GetName()) - - require.Len(t, sum.Dashboards, 1) - assert.NotZero(t, sum.Dashboards[0].ID) - assert.Equal(t, "dash-0", sum.Dashboards[0].Name) - - require.Len(t, sum.NotificationEndpoints, 1) - assert.NotZero(t, sum.NotificationEndpoints[0].NotificationEndpoint.GetID()) - assert.Equal(t, "endpoint-0", sum.NotificationEndpoints[0].NotificationEndpoint.GetName()) - - require.Len(t, sum.NotificationRules, 1) - assert.NotZero(t, sum.NotificationRules[0].ID) - assert.Equal(t, "rule-0", sum.NotificationRules[0].Name) - - require.Len(t, sum.Labels, 1) - assert.NotZero(t, sum.Labels[0].ID) - assert.Equal(t, "label 1", sum.Labels[0].Name) - - require.Len(t, sum.Tasks, 1) - assert.NotZero(t, sum.Tasks[0].ID) - assert.Equal(t, "task-0", sum.Tasks[0].Name) - - require.Len(t, sum.TelegrafConfigs, 1) - assert.NotZero(t, sum.TelegrafConfigs[0].TelegrafConfig.ID) - assert.Equal(t, "tele-0", sum.TelegrafConfigs[0].TelegrafConfig.Name) - - resources := []struct { - resID platform.ID - resourceType influxdb.ResourceType - }{ - {resID: platform.ID(sum.Buckets[0].ID), resourceType: influxdb.BucketsResourceType}, - {resID: sum.Checks[0].Check.GetID(), resourceType: influxdb.ChecksResourceType}, - {resID: platform.ID(sum.Dashboards[0].ID), resourceType: influxdb.DashboardsResourceType}, - {resID: sum.NotificationEndpoints[0].NotificationEndpoint.GetID(), resourceType: influxdb.NotificationEndpointResourceType}, - {resID: platform.ID(sum.NotificationRules[0].ID), resourceType: influxdb.NotificationRuleResourceType}, - {resID: platform.ID(sum.Tasks[0].ID), resourceType: influxdb.TasksResourceType}, - {resID: sum.TelegrafConfigs[0].TelegrafConfig.ID, resourceType: influxdb.TelegrafsResourceType}, - {resID: platform.ID(sum.Variables[0].ID), resourceType: influxdb.VariablesResourceType}, - } - for _, res := range resources { - mappedLabels, err := l.LabelService(t).FindResourceLabels(ctx, influxdb.LabelMappingFilter{ - ResourceID: res.resID, - ResourceType: res.resourceType, - }) - require.NoError(t, err, "resource_type="+res.resourceType) - assertAssociatedLabelsFn(sum, mappedLabels, res.resourceType) - } - - return sum - } - - newObjectsFn := func() []pkger.Object { - return []pkger.Object{ - newBucketObject("bucket", "", ""), - newCheckDeadmanObject(t, "check-0", "", time.Hour), - newDashObject("dash-0", "", ""), - newEndpointHTTP("endpoint-0", "", ""), - newRuleObject(t, "rule-0", "", "endpoint-0", ""), - newTaskObject("task-0", "", ""), - newTelegrafObject("tele-0", "", ""), - newVariableObject("var-0", "", ""), - } - } - labelObj := newLabelObject("label-1", "label 1", "", "") - - testAssociationFn := func(t *testing.T) (pkger.Summary, pkger.Stack, func()) { - t.Helper() - - stack, cleanup := newStackFn(t, pkger.StackCreate{}) - defer func() { - if t.Failed() { - // if test fails in setup, then we attempt to clean it up - // so it doesn't pollute other tests - cleanup() - } - }() - - pkgObjects := newObjectsFn() - for _, obj := range pkgObjects { - obj.AddAssociations(pkger.ObjectAssociation{ - Kind: pkger.KindLabel, - MetaName: labelObj.Name(), - }) - } - pkgObjects = append(pkgObjects, labelObj) - - pkg := newTemplate(pkgObjects...) - - sum := testLabelMappingFn(t, stack.ID, pkg, func(sum pkger.Summary, mappedLabels []*influxdb.Label, resType influxdb.ResourceType) { - require.Len(t, mappedLabels, 1, "resource_type="+resType) - assert.Equal(t, sum.Labels[0].ID, pkger.SafeID(mappedLabels[0].ID)) - }) - - return sum, stack, cleanup - } - - t.Run("should associate resources with labels", func(t *testing.T) { - _, _, cleanup := testAssociationFn(t) - cleanup() - }) - - t.Run("should rollback to previous state when errors in creation", func(t *testing.T) { - sum, stack, cleanup := testAssociationFn(t) - defer cleanup() - - logger := l.log.With(zap.String("service", "pkger")) - var svc pkger.SVC = pkger.NewService( - pkger.WithLogger(logger), - pkger.WithBucketSVC(l.BucketService(t)), - pkger.WithDashboardSVC(l.DashboardService(t)), - pkger.WithCheckSVC(l.CheckService()), - pkger.WithLabelSVC(&fakeLabelSVC{ - // can't use the LabelService HTTP client b/c it doesn't cover the - // all the resources pkger supports... :sadpanda: - LabelService: l.LabelService(t), - createKillCount: -1, - deleteKillCount: 3, - }), - pkger.WithNotificationEndpointSVC(l.NotificationEndpointService(t)), - pkger.WithNotificationRuleSVC(l.NotificationRuleService(t)), - pkger.WithStore(pkger.NewStoreKV(l.Launcher.kvStore)), - pkger.WithTaskSVC(l.TaskServiceKV(t)), - pkger.WithTelegrafSVC(l.TelegrafService(t)), - pkger.WithVariableSVC(l.VariableService(t)), - ) - svc = pkger.MWLogging(logger)(svc) - - pkg := newTemplate(append(newObjectsFn(), labelObj)...) - _, err := svc.Apply(ctx, l.Org.ID, l.User.ID, - pkger.ApplyWithTemplate(pkg), - pkger.ApplyWithStackID(stack.ID), - ) - require.Error(t, err) - - resources := []struct { - resID platform.ID - resourceType influxdb.ResourceType - }{ - {resID: platform.ID(sum.Buckets[0].ID), resourceType: influxdb.BucketsResourceType}, - {resID: sum.Checks[0].Check.GetID(), resourceType: influxdb.ChecksResourceType}, - {resID: platform.ID(sum.Dashboards[0].ID), resourceType: influxdb.DashboardsResourceType}, - {resID: sum.NotificationEndpoints[0].NotificationEndpoint.GetID(), resourceType: influxdb.NotificationEndpointResourceType}, - {resID: platform.ID(sum.NotificationRules[0].ID), resourceType: influxdb.NotificationRuleResourceType}, - {resID: platform.ID(sum.Tasks[0].ID), resourceType: influxdb.TasksResourceType}, - {resID: sum.TelegrafConfigs[0].TelegrafConfig.ID, resourceType: influxdb.TelegrafsResourceType}, - {resID: platform.ID(sum.Variables[0].ID), resourceType: influxdb.VariablesResourceType}, - } - for _, res := range resources { - mappedLabels, err := l.LabelService(t).FindResourceLabels(ctx, influxdb.LabelMappingFilter{ - ResourceID: res.resID, - ResourceType: res.resourceType, - }) - require.NoError(t, err) - - assert.Len(t, mappedLabels, 1, res.resourceType) - if len(mappedLabels) == 1 { - assert.Equal(t, sum.Labels[0].ID, pkger.SafeID(mappedLabels[0].ID)) - } - } - }) - - t.Run("should unassociate resources from label when removing association in pkg", func(t *testing.T) { - _, stack, cleanup := testAssociationFn(t) - defer cleanup() - - objects := newObjectsFn() - pkg := newTemplate(append(objects, labelObj)...) - - testLabelMappingFn(t, stack.ID, pkg, func(sum pkger.Summary, mappedLabels []*influxdb.Label, resType influxdb.ResourceType) { - assert.Empty(t, mappedLabels, "res_type="+resType) - }) - }) - }) - - t.Run("apply a pkg with a stack and all resources", func(t *testing.T) { - testStackApplyFn := func(t *testing.T) (pkger.Summary, pkger.Stack, func()) { - t.Helper() - - stack, cleanup := newStackFn(t, pkger.StackCreate{}) - defer func() { - if t.Failed() { - cleanup() - } - }() - - var ( - initialBucketPkgName = "rucketeer-1" - initialCheckPkgName = "checkers" - initialDashPkgName = "dash-of-salt" - initialEndpointPkgName = "endzo" - initialLabelPkgName = "labelino" - initialRulePkgName = "oh-doyle-rules" - initialTaskPkgName = "tap" - initialTelegrafPkgName = "teletype" - initialVariablePkgName = "laces-out-dan" - ) - initialPkg := newTemplate( - newBucketObject(initialBucketPkgName, "display name", "init desc"), - newCheckDeadmanObject(t, initialCheckPkgName, "check_0", time.Minute), - newDashObject(initialDashPkgName, "dash_0", "init desc"), - newEndpointHTTP(initialEndpointPkgName, "endpoint_0", "init desc"), - newLabelObject(initialLabelPkgName, "label 1", "init desc", "#222eee"), - newRuleObject(t, initialRulePkgName, "rule_0", initialEndpointPkgName, "init desc"), - newTaskObject(initialTaskPkgName, "task_0", "init desc"), - newTelegrafObject(initialTelegrafPkgName, "tele_0", "init desc"), - newVariableObject(initialVariablePkgName, "var char", "init desc"), - ) - - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, - pkger.ApplyWithTemplate(initialPkg), - pkger.ApplyWithStackID(stack.ID), - ) - require.NoError(t, err) - - summary := impact.Summary - - require.Len(t, summary.Buckets, 1) - assert.NotZero(t, summary.Buckets[0].ID) - assert.Equal(t, "display name", summary.Buckets[0].Name) - assert.Equal(t, "init desc", summary.Buckets[0].Description) - - require.Len(t, summary.Checks, 1) - assert.NotZero(t, summary.Checks[0].Check.GetID()) - assert.Equal(t, "check_0", summary.Checks[0].Check.GetName()) - - require.Len(t, summary.Dashboards, 1) - assert.NotZero(t, summary.Dashboards[0].ID) - assert.Equal(t, "dash_0", summary.Dashboards[0].Name) - - require.Len(t, summary.NotificationEndpoints, 1) - assert.NotZero(t, summary.NotificationEndpoints[0].NotificationEndpoint.GetID()) - assert.Equal(t, "endpoint_0", summary.NotificationEndpoints[0].NotificationEndpoint.GetName()) - - require.Len(t, summary.Labels, 1) - assert.NotZero(t, summary.Labels[0].ID) - assert.Equal(t, "label 1", summary.Labels[0].Name) - assert.Equal(t, "init desc", summary.Labels[0].Properties.Description) - assert.Equal(t, "#222eee", summary.Labels[0].Properties.Color) - - require.Len(t, summary.NotificationRules, 1) - assert.NotZero(t, summary.NotificationRules[0].ID) - assert.Equal(t, "rule_0", summary.NotificationRules[0].Name) - assert.Equal(t, initialEndpointPkgName, summary.NotificationRules[0].EndpointMetaName) - assert.Equal(t, "init desc", summary.NotificationRules[0].Description) - - require.Len(t, summary.Tasks, 1) - assert.NotZero(t, summary.Tasks[0].ID) - assert.Equal(t, "task_0", summary.Tasks[0].Name) - assert.Equal(t, "init desc", summary.Tasks[0].Description) - - require.Len(t, summary.TelegrafConfigs, 1) - assert.NotZero(t, summary.TelegrafConfigs[0].TelegrafConfig.ID) - assert.Equal(t, "tele_0", summary.TelegrafConfigs[0].TelegrafConfig.Name) - assert.Equal(t, "init desc", summary.TelegrafConfigs[0].TelegrafConfig.Description) - - require.Len(t, summary.Variables, 1) - assert.NotZero(t, summary.Variables[0].ID) - assert.Equal(t, "var char", summary.Variables[0].Name) - assert.Equal(t, "init desc", summary.Variables[0].Description) - - t.Log("\tverify changes reflected in platform") - { - actualBkt := resourceCheck.mustGetBucket(t, byName("display name")) - assert.Equal(t, summary.Buckets[0].ID, pkger.SafeID(actualBkt.ID)) - - actualCheck := resourceCheck.mustGetCheck(t, byName("check_0")) - assert.Equal(t, summary.Checks[0].Check.GetID(), actualCheck.GetID()) - - actualDash := resourceCheck.mustGetDashboard(t, byName("dash_0")) - assert.Equal(t, summary.Dashboards[0].ID, pkger.SafeID(actualDash.ID)) - - actualEndpint := resourceCheck.mustGetEndpoint(t, byName("endpoint_0")) - assert.Equal(t, summary.NotificationEndpoints[0].NotificationEndpoint.GetID(), actualEndpint.GetID()) - - actualLabel := resourceCheck.mustGetLabel(t, byName("label 1")) - assert.Equal(t, summary.Labels[0].ID, pkger.SafeID(actualLabel.ID)) - - actualRule := resourceCheck.mustGetRule(t, byName("rule_0")) - assert.Equal(t, summary.NotificationRules[0].ID, pkger.SafeID(actualRule.GetID())) - - actualTask := resourceCheck.mustGetTask(t, byName("task_0")) - assert.Equal(t, summary.Tasks[0].ID, pkger.SafeID(actualTask.ID)) - - actualTele := resourceCheck.mustGetTelegrafConfig(t, byName("tele_0")) - assert.Equal(t, summary.TelegrafConfigs[0].TelegrafConfig.ID, actualTele.ID) - - actualVar := resourceCheck.mustGetVariable(t, byName("var char")) - assert.Equal(t, summary.Variables[0].ID, pkger.SafeID(actualVar.ID)) - } - - return summary, stack, cleanup - } - - t.Run("apply pkg with stack id", func(t *testing.T) { - _, _, cleanup := testStackApplyFn(t) - cleanup() - }) - - t.Run("apply pkg with stack id where resources change", func(t *testing.T) { - initialSum, stack, cleanup := testStackApplyFn(t) - defer cleanup() - - var ( - updateBucketName = "new bucket" - updateCheckName = "new check" - updateDashName = "new dash" - updateEndpointName = "new endpoint" - updateLabelName = "new label" - updateRuleName = "new rule" - updateTaskName = "new task" - updateTelegrafName = "new telegraf" - updateVariableName = "new variable" - ) - updatedPkg := newTemplate( - newBucketObject(initialSum.Buckets[0].MetaName, updateBucketName, ""), - newCheckDeadmanObject(t, initialSum.Checks[0].MetaName, updateCheckName, time.Hour), - newDashObject(initialSum.Dashboards[0].MetaName, updateDashName, ""), - newEndpointHTTP(initialSum.NotificationEndpoints[0].MetaName, updateEndpointName, ""), - newLabelObject(initialSum.Labels[0].MetaName, updateLabelName, "", ""), - newRuleObject( - t, - initialSum.NotificationRules[0].MetaName, - updateRuleName, - initialSum.NotificationEndpoints[0].MetaName, - "", - ), - newTaskObject(initialSum.Tasks[0].MetaName, updateTaskName, ""), - newTelegrafObject(initialSum.TelegrafConfigs[0].MetaName, updateTelegrafName, ""), - newVariableObject(initialSum.Variables[0].MetaName, updateVariableName, ""), - ) - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, - pkger.ApplyWithTemplate(updatedPkg), - pkger.ApplyWithStackID(stack.ID), - ) - require.NoError(t, err) - - sum := impact.Summary - - require.Len(t, sum.Buckets, 1) - assert.Equal(t, initialSum.Buckets[0].ID, sum.Buckets[0].ID) - assert.Equal(t, updateBucketName, sum.Buckets[0].Name) - - require.Len(t, sum.Checks, 1) - assert.Equal(t, initialSum.Checks[0].Check.GetID(), sum.Checks[0].Check.GetID()) - assert.Equal(t, updateCheckName, sum.Checks[0].Check.GetName()) - - require.Len(t, sum.Dashboards, 1) - assert.Equal(t, initialSum.Dashboards[0].ID, sum.Dashboards[0].ID) - assert.Equal(t, updateDashName, sum.Dashboards[0].Name) - - require.Len(t, sum.NotificationEndpoints, 1) - sumEndpoint := sum.NotificationEndpoints[0].NotificationEndpoint - assert.Equal(t, initialSum.NotificationEndpoints[0].NotificationEndpoint.GetID(), sumEndpoint.GetID()) - assert.Equal(t, updateEndpointName, sumEndpoint.GetName()) - - require.Len(t, sum.NotificationRules, 1) - sumRule := sum.NotificationRules[0] - assert.Equal(t, initialSum.NotificationRules[0].ID, sumRule.ID) - assert.Equal(t, updateRuleName, sumRule.Name) - - require.Len(t, sum.Labels, 1) - assert.Equal(t, initialSum.Labels[0].ID, sum.Labels[0].ID) - assert.Equal(t, updateLabelName, sum.Labels[0].Name) - - require.Len(t, sum.Tasks, 1) - assert.Equal(t, initialSum.Tasks[0].ID, sum.Tasks[0].ID) - assert.Equal(t, updateTaskName, sum.Tasks[0].Name) - - require.Len(t, sum.TelegrafConfigs, 1) - updatedTele := sum.TelegrafConfigs[0].TelegrafConfig - assert.Equal(t, initialSum.TelegrafConfigs[0].TelegrafConfig.ID, updatedTele.ID) - assert.Equal(t, updateTelegrafName, updatedTele.Name) - - require.Len(t, sum.Variables, 1) - assert.Equal(t, initialSum.Variables[0].ID, sum.Variables[0].ID) - assert.Equal(t, updateVariableName, sum.Variables[0].Name) - - t.Log("\tverify changes reflected in platform") - { - actualBkt := resourceCheck.mustGetBucket(t, byName(updateBucketName)) - require.Equal(t, initialSum.Buckets[0].ID, pkger.SafeID(actualBkt.ID)) - - actualCheck := resourceCheck.mustGetCheck(t, byName(updateCheckName)) - require.Equal(t, initialSum.Checks[0].Check.GetID(), actualCheck.GetID()) - - actualDash := resourceCheck.mustGetDashboard(t, byName(updateDashName)) - require.Equal(t, initialSum.Dashboards[0].ID, pkger.SafeID(actualDash.ID)) - - actualEndpoint := resourceCheck.mustGetEndpoint(t, byName(updateEndpointName)) - assert.Equal(t, sumEndpoint.GetID(), actualEndpoint.GetID()) - - actualLabel := resourceCheck.mustGetLabel(t, byName(updateLabelName)) - require.Equal(t, initialSum.Labels[0].ID, pkger.SafeID(actualLabel.ID)) - - actualRule := resourceCheck.mustGetRule(t, byName(updateRuleName)) - require.Equal(t, initialSum.NotificationRules[0].ID, pkger.SafeID(actualRule.GetID())) - - actualTask := resourceCheck.mustGetTask(t, byName(updateTaskName)) - require.Equal(t, initialSum.Tasks[0].ID, pkger.SafeID(actualTask.ID)) - - actualTelegraf := resourceCheck.mustGetTelegrafConfig(t, byName(updateTelegrafName)) - require.Equal(t, initialSum.TelegrafConfigs[0].TelegrafConfig.ID, actualTelegraf.ID) - - actualVar := resourceCheck.mustGetVariable(t, byName(updateVariableName)) - assert.Equal(t, sum.Variables[0].ID, pkger.SafeID(actualVar.ID)) - } - }) - - t.Run("an error during application roles back resources to previous state", func(t *testing.T) { - initialSum, stack, cleanup := testStackApplyFn(t) - defer cleanup() - - logger := l.log.With(zap.String("service", "pkger")) - var svc pkger.SVC = pkger.NewService( - pkger.WithLogger(logger), - pkger.WithBucketSVC(l.BucketService(t)), - pkger.WithDashboardSVC(l.DashboardService(t)), - pkger.WithCheckSVC(l.CheckService()), - pkger.WithLabelSVC(l.LabelService(t)), - pkger.WithNotificationEndpointSVC(l.NotificationEndpointService(t)), - pkger.WithNotificationRuleSVC(&fakeRuleStore{ - NotificationRuleStore: l.NotificationRuleService(t), - createKillCount: 2, - }), - pkger.WithStore(pkger.NewStoreKV(l.Launcher.kvStore)), - pkger.WithTaskSVC(l.TaskServiceKV(t)), - pkger.WithTelegrafSVC(l.TelegrafService(t)), - pkger.WithVariableSVC(l.VariableService(t)), - ) - svc = pkger.MWLogging(logger)(svc) - - endpointPkgName := "z-endpoint-rolls-back" - - pkgWithDelete := newTemplate( - newBucketObject("z-roll-me-back", "", ""), - newBucketObject("z-rolls-back-too", "", ""), - newDashObject("z-rolls-dash", "", ""), - newLabelObject("z-label-roller", "", "", ""), - newCheckDeadmanObject(t, "z-check", "", time.Hour), - newEndpointHTTP(endpointPkgName, "", ""), - newRuleObject(t, "z-rules-back", "", endpointPkgName, ""), - newRuleObject(t, "z-rules-back-2", "", endpointPkgName, ""), - newRuleObject(t, "z-rules-back-3", "", endpointPkgName, ""), - newTaskObject("z-task-rolls-back", "", ""), - newTelegrafObject("z-telegraf-rolls-back", "", ""), - newVariableObject("z-var-rolls-back", "", ""), - ) - _, err := svc.Apply(ctx, l.Org.ID, l.User.ID, - pkger.ApplyWithTemplate(pkgWithDelete), - pkger.ApplyWithStackID(stack.ID), - ) - require.Error(t, err) - - t.Log("validate all resources are rolled back") - { - actualBkt := resourceCheck.mustGetBucket(t, byName(initialSum.Buckets[0].Name)) - assert.NotEqual(t, initialSum.Buckets[0].ID, pkger.SafeID(actualBkt.ID)) - - actualCheck := resourceCheck.mustGetCheck(t, byName(initialSum.Checks[0].Check.GetName())) - assert.NotEqual(t, initialSum.Checks[0].Check.GetID(), actualCheck.GetID()) - - actualDash := resourceCheck.mustGetDashboard(t, byName(initialSum.Dashboards[0].Name)) - assert.NotEqual(t, initialSum.Dashboards[0].ID, pkger.SafeID(actualDash.ID)) - - actualEndpoint := resourceCheck.mustGetEndpoint(t, byName(initialSum.NotificationEndpoints[0].NotificationEndpoint.GetName())) - assert.NotEqual(t, initialSum.NotificationEndpoints[0].NotificationEndpoint.GetID(), actualEndpoint.GetID()) - - actualRule := resourceCheck.mustGetRule(t, byName(initialSum.NotificationRules[0].Name)) - assert.NotEqual(t, initialSum.NotificationRules[0].ID, pkger.SafeID(actualRule.GetID())) - - actualLabel := resourceCheck.mustGetLabel(t, byName(initialSum.Labels[0].Name)) - assert.NotEqual(t, initialSum.Labels[0].ID, pkger.SafeID(actualLabel.ID)) - - actualTask := resourceCheck.mustGetTask(t, byName(initialSum.Tasks[0].Name)) - assert.NotEqual(t, initialSum.Tasks[0].ID, pkger.SafeID(actualTask.ID)) - - actualTelegraf := resourceCheck.mustGetTelegrafConfig(t, byName(initialSum.TelegrafConfigs[0].TelegrafConfig.Name)) - assert.NotEqual(t, initialSum.TelegrafConfigs[0].TelegrafConfig.ID, actualTelegraf.ID) - - actualVariable := resourceCheck.mustGetVariable(t, byName(initialSum.Variables[0].Name)) - assert.NotEqual(t, initialSum.Variables[0].ID, pkger.SafeID(actualVariable.ID)) - } - - t.Log("validate all changes do not persist") - { - for _, name := range []string{"z-roll-me-back", "z-rolls-back-too"} { - _, err := resourceCheck.getBucket(t, byName(name)) - assert.Error(t, err) - } - - for _, name := range []string{"z-rules-back", "z-rules-back-2", "z-rules-back-3"} { - _, err = resourceCheck.getRule(t, byName(name)) - assert.Error(t, err) - } - - _, err := resourceCheck.getCheck(t, byName("z-check")) - assert.Error(t, err) - - _, err = resourceCheck.getDashboard(t, byName("z-rolls_dash")) - assert.Error(t, err) - - _, err = resourceCheck.getEndpoint(t, byName("z-endpoint-rolls-back")) - assert.Error(t, err) - - _, err = resourceCheck.getLabel(t, byName("z-label-roller")) - assert.Error(t, err) - - _, err = resourceCheck.getTelegrafConfig(t, byName("z-telegraf-rolls-back")) - assert.Error(t, err) - - _, err = resourceCheck.getVariable(t, byName("z-var-rolls-back")) - assert.Error(t, err) - } - }) - - t.Run("apply pkg with stack id where resources have been removed since last run", func(t *testing.T) { - initialSum, stack, cleanup := testStackApplyFn(t) - defer cleanup() - - newEndpointPkgName := "non-existent-endpoint" - allNewResourcesPkg := newTemplate( - newBucketObject("non-existent-bucket", "", ""), - newCheckDeadmanObject(t, "non-existent-check", "", time.Minute), - newDashObject("non-existent-dash", "", ""), - newEndpointHTTP(newEndpointPkgName, "", ""), - newLabelObject("non-existent-label", "", "", ""), - newRuleObject(t, "non-existent-rule", "", newEndpointPkgName, ""), - newTaskObject("non-existent-task", "", ""), - newTelegrafObject("non-existent-tele", "", ""), - newVariableObject("non-existent-var", "", ""), - ) - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, - pkger.ApplyWithTemplate(allNewResourcesPkg), - pkger.ApplyWithStackID(stack.ID), - ) - require.NoError(t, err) - - sum := impact.Summary - - require.Len(t, sum.Buckets, 1) - assert.NotEqual(t, initialSum.Buckets[0].ID, sum.Buckets[0].ID) - assert.NotZero(t, sum.Buckets[0].ID) - assert.Equal(t, "non-existent-bucket", sum.Buckets[0].Name) - - require.Len(t, sum.Checks, 1) - assert.NotEqual(t, initialSum.Checks[0].Check.GetID(), sum.Checks[0].Check.GetID()) - assert.NotZero(t, sum.Checks[0].Check.GetID()) - assert.Equal(t, "non-existent-check", sum.Checks[0].Check.GetName()) - - require.Len(t, sum.Dashboards, 1) - assert.NotEqual(t, initialSum.Dashboards[0].ID, sum.Dashboards[0].ID) - assert.NotZero(t, sum.Dashboards[0].ID) - assert.Equal(t, "non-existent-dash", sum.Dashboards[0].Name) - - require.Len(t, sum.NotificationEndpoints, 1) - sumEndpoint := sum.NotificationEndpoints[0].NotificationEndpoint - assert.NotEqual(t, initialSum.NotificationEndpoints[0].NotificationEndpoint.GetID(), sumEndpoint.GetID()) - assert.NotZero(t, sumEndpoint.GetID()) - assert.Equal(t, newEndpointPkgName, sumEndpoint.GetName()) - - require.Len(t, sum.NotificationRules, 1) - sumRule := sum.NotificationRules[0] - assert.NotEqual(t, initialSum.NotificationRules[0].ID, sumRule.ID) - assert.NotZero(t, sumRule.ID) - assert.Equal(t, "non-existent-rule", sumRule.Name) - - require.Len(t, sum.Labels, 1) - assert.NotEqual(t, initialSum.Labels[0].ID, sum.Labels[0].ID) - assert.NotZero(t, sum.Labels[0].ID) - assert.Equal(t, "non-existent-label", sum.Labels[0].Name) - - require.Len(t, sum.Tasks, 1) - assert.NotEqual(t, initialSum.Tasks[0].ID, sum.Tasks[0].ID) - assert.NotZero(t, sum.Tasks[0].ID) - assert.Equal(t, "non-existent-task", sum.Tasks[0].Name) - - require.Len(t, sum.TelegrafConfigs, 1) - newTele := sum.TelegrafConfigs[0].TelegrafConfig - assert.NotEqual(t, initialSum.TelegrafConfigs[0].TelegrafConfig.ID, newTele.ID) - assert.NotZero(t, newTele.ID) - assert.Equal(t, "non-existent-tele", newTele.Name) - - require.Len(t, sum.Variables, 1) - assert.NotEqual(t, initialSum.Variables[0].ID, sum.Variables[0].ID) - assert.NotZero(t, sum.Variables[0].ID) - assert.Equal(t, "non-existent-var", sum.Variables[0].Name) - - t.Log("\tvalidate all resources are created") - { - bkt := resourceCheck.mustGetBucket(t, byName("non-existent-bucket")) - assert.Equal(t, pkger.SafeID(bkt.ID), sum.Buckets[0].ID) - - chk := resourceCheck.mustGetCheck(t, byName("non-existent-check")) - assert.Equal(t, chk.GetID(), sum.Checks[0].Check.GetID()) - - endpoint := resourceCheck.mustGetEndpoint(t, byName(newEndpointPkgName)) - assert.Equal(t, endpoint.GetID(), sum.NotificationEndpoints[0].NotificationEndpoint.GetID()) - - label := resourceCheck.mustGetLabel(t, byName("non-existent-label")) - assert.Equal(t, pkger.SafeID(label.ID), sum.Labels[0].ID) - - actualRule := resourceCheck.mustGetRule(t, byName("non-existent-rule")) - assert.Equal(t, pkger.SafeID(actualRule.GetID()), sum.NotificationRules[0].ID) - - task := resourceCheck.mustGetTask(t, byName("non-existent-task")) - assert.Equal(t, pkger.SafeID(task.ID), sum.Tasks[0].ID) - - tele := resourceCheck.mustGetTelegrafConfig(t, byName("non-existent-tele")) - assert.Equal(t, tele.ID, sum.TelegrafConfigs[0].TelegrafConfig.ID) - - variable := resourceCheck.mustGetVariable(t, byName("non-existent-var")) - assert.Equal(t, pkger.SafeID(variable.ID), sum.Variables[0].ID) - } - - t.Log("\tvalidate all previous resources are removed") - { - _, err = resourceCheck.getBucket(t, byName(initialSum.Buckets[0].Name)) - require.Error(t, err) - - _, err = resourceCheck.getCheck(t, byName(initialSum.Checks[0].Check.GetName())) - require.Error(t, err) - - _, err = resourceCheck.getEndpoint(t, byName(initialSum.NotificationEndpoints[0].NotificationEndpoint.GetName())) - require.Error(t, err) - - _, err = resourceCheck.getLabel(t, byName(initialSum.Labels[0].Name)) - require.Error(t, err) - - _, err = resourceCheck.getTask(t, byName(initialSum.Tasks[0].Name)) - require.Error(t, err) - - _, err = resourceCheck.getTelegrafConfig(t, byName(initialSum.TelegrafConfigs[0].TelegrafConfig.Name)) - require.Error(t, err) - - _, err = resourceCheck.getVariable(t, byName(initialSum.Variables[0].Name)) - require.Error(t, err) - } - }) - }) - - t.Run("apply should handle cases where users have changed platform data", func(t *testing.T) { - initializeStackPkg := func(t *testing.T, pkg *pkger.Template) (platform.ID, func(), pkger.Summary) { - t.Helper() - - stack, cleanup := newStackFn(t, pkger.StackCreate{}) - defer func() { - if t.Failed() { - cleanup() - } - }() - - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, - pkger.ApplyWithTemplate(pkg), - pkger.ApplyWithStackID(stack.ID), - ) - require.NoError(t, err) - - return stack.ID, cleanup, impact.Summary - } - - testValidRemoval := func(t *testing.T, stackID platform.ID) { - t.Helper() - _, err := svc.Apply( - ctx, - l.Org.ID, - l.User.ID, - pkger.ApplyWithStackID(stackID), - ) - require.NoError(t, err) - } - - t.Run("when a user has deleted a variable that was previously created by a stack", func(t *testing.T) { - testUserDeletedVariable := func(t *testing.T, actionFn func(t *testing.T, stackID platform.ID, initialVarObj pkger.Object, initialSum pkger.Summary)) { - t.Helper() - - obj := newVariableObject("var-1", "", "") - stackID, cleanup, initialSum := initializeStackPkg(t, newTemplate(obj)) - defer cleanup() - - require.Len(t, initialSum.Variables, 1) - require.NotZero(t, initialSum.Variables[0].ID) - resourceCheck.mustDeleteVariable(t, platform.ID(initialSum.Variables[0].ID)) - - actionFn(t, stackID, obj, initialSum) - } - - t.Run("should create new resource when attempting to update", func(t *testing.T) { - testUserDeletedVariable(t, func(t *testing.T, stackID platform.ID, initialVarObj pkger.Object, initialSum pkger.Summary) { - pkg := newTemplate(initialVarObj) - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stackID)) - require.NoError(t, err) - - updateSum := impact.Summary - - require.Len(t, updateSum.Variables, 1) - initVar, updateVar := initialSum.Variables[0], updateSum.Variables[0] - assert.NotEqual(t, initVar.ID, updateVar.ID) - initVar.ID, updateVar.ID = 0, 0 - assert.Equal(t, initVar, updateVar) - }) - }) - - t.Run("should not error when attempting to remove", func(t *testing.T) { - testUserDeletedVariable(t, func(t *testing.T, stackID platform.ID, initialVarObj pkger.Object, initialSum pkger.Summary) { - testValidRemoval(t, stackID) - }) - }) - }) - - t.Run("when a user has deleted a bucket that was previously created by a stack", func(t *testing.T) { - testUserDeletedBucket := func(t *testing.T, actionFn func(t *testing.T, stackID platform.ID, initialObj pkger.Object, initialSum pkger.Summary)) { - t.Helper() - - obj := newBucketObject("bucket-1", "", "") - stackID, cleanup, initialSum := initializeStackPkg(t, newTemplate(obj)) - defer cleanup() - - require.Len(t, initialSum.Buckets, 1) - require.NotZero(t, initialSum.Buckets[0].ID) - resourceCheck.mustDeleteBucket(t, platform.ID(initialSum.Buckets[0].ID)) - - actionFn(t, stackID, obj, initialSum) - } - - t.Run("should create new resource when attempting to update", func(t *testing.T) { - testUserDeletedBucket(t, func(t *testing.T, stackID platform.ID, initialObj pkger.Object, initialSum pkger.Summary) { - pkg := newTemplate(initialObj) - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stackID)) - require.NoError(t, err) - - updateSum := impact.Summary - - require.Len(t, updateSum.Buckets, 1) - intial, updated := initialSum.Buckets[0], updateSum.Buckets[0] - assert.NotEqual(t, intial.ID, updated.ID) - intial.ID, updated.ID = 0, 0 - assert.Equal(t, intial, updated) - }) - }) - - t.Run("should not error when attempting to remove", func(t *testing.T) { - testUserDeletedBucket(t, func(t *testing.T, stackID platform.ID, initialObj pkger.Object, initialSum pkger.Summary) { - testValidRemoval(t, stackID) - }) - }) - }) - - t.Run("when a user has deleted a check that was previously created by a stack", func(t *testing.T) { - testUserDeletedCheck := func(t *testing.T, actionFn func(t *testing.T, stackID platform.ID, initialObj pkger.Object, initialSum pkger.Summary)) { - t.Helper() - - obj := newCheckDeadmanObject(t, "check-1", "", time.Hour) - stackID, cleanup, initialSum := initializeStackPkg(t, newTemplate(obj)) - defer cleanup() - - require.Len(t, initialSum.Checks, 1) - require.NotZero(t, initialSum.Checks[0].Check.GetID()) - resourceCheck.mustDeleteCheck(t, initialSum.Checks[0].Check.GetID()) - - actionFn(t, stackID, obj, initialSum) - } - - t.Run("should create new resource when attempting to update", func(t *testing.T) { - testUserDeletedCheck(t, func(t *testing.T, stackID platform.ID, initialObj pkger.Object, initialSum pkger.Summary) { - pkg := newTemplate(initialObj) - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stackID)) - require.NoError(t, err) - - updateSum := impact.Summary - - require.Len(t, updateSum.Checks, 1) - intial, updated := initialSum.Checks[0].Check, updateSum.Checks[0].Check - assert.NotEqual(t, intial.GetID(), updated.GetID()) - intial.SetID(0) - updated.SetID(0) - assert.Equal(t, intial, updated) - }) - }) - - t.Run("should not error when attempting to remove", func(t *testing.T) { - testUserDeletedCheck(t, func(t *testing.T, stackID platform.ID, initialObj pkger.Object, initialSum pkger.Summary) { - testValidRemoval(t, stackID) - }) - }) - }) - - t.Run("when a user has deleted a dashboard that was previously created by a stack", func(t *testing.T) { - testUserDeletedDashboard := func(t *testing.T, actionFn func(t *testing.T, stackID platform.ID, initialObj pkger.Object, initialSum pkger.Summary)) { - t.Helper() - - obj := newDashObject("dash-1", "", "") - stackID, cleanup, initialSum := initializeStackPkg(t, newTemplate(obj)) - defer cleanup() - - require.Len(t, initialSum.Dashboards, 1) - require.NotZero(t, initialSum.Dashboards[0].ID) - resourceCheck.mustDeleteDashboard(t, platform.ID(initialSum.Dashboards[0].ID)) - - actionFn(t, stackID, obj, initialSum) - } - - t.Run("should create new resource when attempting to update", func(t *testing.T) { - testUserDeletedDashboard(t, func(t *testing.T, stackID platform.ID, initialObj pkger.Object, initialSum pkger.Summary) { - pkg := newTemplate(initialObj) - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stackID)) - require.NoError(t, err) - - updateSum := impact.Summary - - require.Len(t, updateSum.Dashboards, 1) - initial, updated := initialSum.Dashboards[0], updateSum.Dashboards[0] - assert.NotEqual(t, initial.ID, updated.ID) - initial.ID, updated.ID = 0, 0 - assert.Equal(t, initial, updated) - }) - }) - - t.Run("should not error when attempting to remove", func(t *testing.T) { - testUserDeletedDashboard(t, func(t *testing.T, stackID platform.ID, initialObj pkger.Object, initialSum pkger.Summary) { - testValidRemoval(t, stackID) - }) - }) - }) - - t.Run("when a user has deleted a label that was previously created by a stack", func(t *testing.T) { - testUserDeletedLabel := func(t *testing.T, actionFn func(t *testing.T, stackID platform.ID, initialObj pkger.Object, initialSum pkger.Summary)) { - t.Helper() - - obj := newLabelObject("label-1", "", "", "") - stackID, cleanup, initialSum := initializeStackPkg(t, newTemplate(obj)) - defer cleanup() - - require.Len(t, initialSum.Labels, 1) - require.NotZero(t, initialSum.Labels[0].ID) - resourceCheck.mustDeleteLabel(t, platform.ID(initialSum.Labels[0].ID)) - - actionFn(t, stackID, obj, initialSum) - } - - t.Run("should create new resource when attempting to update", func(t *testing.T) { - testUserDeletedLabel(t, func(t *testing.T, stackID platform.ID, initialObj pkger.Object, initialSum pkger.Summary) { - pkg := newTemplate(initialObj) - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stackID)) - require.NoError(t, err) - - updateSum := impact.Summary - - require.Len(t, updateSum.Labels, 1) - initial, updated := initialSum.Labels[0], updateSum.Labels[0] - assert.NotEqual(t, initial.ID, updated.ID, "label ID should be different") - initial.ID, updated.ID = 0, 0 - assert.Equal(t, initial, updated) - }) - }) - - t.Run("should not error when attempting to remove", func(t *testing.T) { - testUserDeletedLabel(t, func(t *testing.T, stackID platform.ID, initialObj pkger.Object, initialSum pkger.Summary) { - testValidRemoval(t, stackID) - }) - }) - }) - - t.Run("when a user has deleted a notification endpoint that was previously created by a stack", func(t *testing.T) { - testUserDeletedEndpoint := func(t *testing.T, actionFn func(t *testing.T, stackID platform.ID, initialObj pkger.Object, initialSum pkger.Summary)) { - t.Helper() - - obj := newEndpointHTTP("endpoint-1", "", "") - stackID, cleanup, initialSum := initializeStackPkg(t, newTemplate(obj)) - defer cleanup() - - require.Len(t, initialSum.NotificationEndpoints, 1) - require.NotZero(t, initialSum.NotificationEndpoints[0].NotificationEndpoint.GetID()) - resourceCheck.mustDeleteEndpoint(t, initialSum.NotificationEndpoints[0].NotificationEndpoint.GetID()) - - actionFn(t, stackID, obj, initialSum) - } - - t.Run("should create new resource when attempting to update", func(t *testing.T) { - testUserDeletedEndpoint(t, func(t *testing.T, stackID platform.ID, initialObj pkger.Object, initialSum pkger.Summary) { - pkg := newTemplate(initialObj) - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stackID)) - require.NoError(t, err) - - updateSum := impact.Summary - - require.Len(t, updateSum.NotificationEndpoints, 1) - initial, updated := initialSum.NotificationEndpoints[0].NotificationEndpoint, updateSum.NotificationEndpoints[0].NotificationEndpoint - assert.NotEqual(t, initial.GetID(), updated.GetID()) - initial.SetID(0) - updated.SetID(0) - assert.Equal(t, initial, updated) - }) - }) - - t.Run("should not error when attempting to remove", func(t *testing.T) { - testUserDeletedEndpoint(t, func(t *testing.T, stackID platform.ID, initialObj pkger.Object, initialSum pkger.Summary) { - testValidRemoval(t, stackID) - }) - }) - }) - - t.Run("when a user has deleted a notification rule that was previously created by a stack", func(t *testing.T) { - testUserDeletedRule := func(t *testing.T, actionFn func(t *testing.T, stackID platform.ID, initialObjects []pkger.Object, initialSum pkger.Summary)) { - t.Helper() - - endpointObj := newEndpointHTTP("endpoint-1", "", "") - ruleObj := newRuleObject(t, "rule-0", "", endpointObj.Name(), "") - stackID, cleanup, initialSum := initializeStackPkg(t, newTemplate(endpointObj, ruleObj)) - defer cleanup() - - require.Len(t, initialSum.NotificationEndpoints, 1) - require.NotZero(t, initialSum.NotificationEndpoints[0].NotificationEndpoint.GetID()) - require.Len(t, initialSum.NotificationRules, 1) - require.NotZero(t, initialSum.NotificationRules[0].ID) - resourceCheck.mustDeleteRule(t, platform.ID(initialSum.NotificationRules[0].ID)) - - actionFn(t, stackID, []pkger.Object{ruleObj, endpointObj}, initialSum) - } - - t.Run("should create new resource when attempting to update", func(t *testing.T) { - testUserDeletedRule(t, func(t *testing.T, stackID platform.ID, initialObjects []pkger.Object, initialSum pkger.Summary) { - pkg := newTemplate(initialObjects...) - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stackID)) - require.NoError(t, err) - - updateSum := impact.Summary - - require.Len(t, updateSum.NotificationRules, 1) - initial, updated := initialSum.NotificationRules[0], updateSum.NotificationRules[0] - assert.NotEqual(t, initial.ID, updated.ID) - initial.ID, updated.ID = 0, 0 - assert.Equal(t, initial, updated) - }) - }) - - t.Run("should not error when attempting to remove", func(t *testing.T) { - testUserDeletedRule(t, func(t *testing.T, stackID platform.ID, _ []pkger.Object, _ pkger.Summary) { - testValidRemoval(t, stackID) - }) - }) - }) - - t.Run("when a user has deleted a task that was previously created by a stack", func(t *testing.T) { - testUserDeletedTask := func(t *testing.T, actionFn func(t *testing.T, stackID platform.ID, initialObj pkger.Object, initialSum pkger.Summary)) { - t.Helper() - - obj := newTaskObject("task-1", "", "") - stackID, cleanup, initialSum := initializeStackPkg(t, newTemplate(obj)) - defer cleanup() - - require.Len(t, initialSum.Tasks, 1) - require.NotZero(t, initialSum.Tasks[0].ID) - resourceCheck.mustDeleteTask(t, platform.ID(initialSum.Tasks[0].ID)) - - actionFn(t, stackID, obj, initialSum) - } - - t.Run("should create new resource when attempting to update", func(t *testing.T) { - testUserDeletedTask(t, func(t *testing.T, stackID platform.ID, initialObj pkger.Object, initialSum pkger.Summary) { - pkg := newTemplate(initialObj) - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stackID)) - require.NoError(t, err) - - updateSum := impact.Summary - - require.Len(t, updateSum.Tasks, 1) - initial, updated := initialSum.Tasks[0], updateSum.Tasks[0] - assert.NotEqual(t, initial.ID, updated.ID) - initial.ID, updated.ID = 0, 0 - assert.Equal(t, initial, updated) - }) - }) - - t.Run("should not error when attempting to remove", func(t *testing.T) { - testUserDeletedTask(t, func(t *testing.T, stackID platform.ID, _ pkger.Object, _ pkger.Summary) { - testValidRemoval(t, stackID) - }) - }) - }) - - t.Run("when a user has deleted a telegraf config that was previously created by a stack", func(t *testing.T) { - testUserDeletedTelegraf := func(t *testing.T, actionFn func(t *testing.T, stackID platform.ID, initialObj pkger.Object, initialSum pkger.Summary)) { - t.Helper() - - obj := newTelegrafObject("tele-1", "", "") - stackID, cleanup, initialSum := initializeStackPkg(t, newTemplate(obj)) - defer cleanup() - - require.Len(t, initialSum.TelegrafConfigs, 1) - require.NotZero(t, initialSum.TelegrafConfigs[0].TelegrafConfig.ID) - resourceCheck.mustDeleteTelegrafConfig(t, initialSum.TelegrafConfigs[0].TelegrafConfig.ID) - - actionFn(t, stackID, obj, initialSum) - } - - t.Run("should create new resource when attempting to update", func(t *testing.T) { - testUserDeletedTelegraf(t, func(t *testing.T, stackID platform.ID, initialObj pkger.Object, initialSum pkger.Summary) { - pkg := newTemplate(initialObj) - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stackID)) - require.NoError(t, err) - - updateSum := impact.Summary - - require.Len(t, updateSum.TelegrafConfigs, 1) - initial, updated := initialSum.TelegrafConfigs[0].TelegrafConfig, updateSum.TelegrafConfigs[0].TelegrafConfig - assert.NotEqual(t, initial.ID, updated.ID) - initial.ID, updated.ID = 0, 0 - assert.Equal(t, initial, updated) - }) - }) - - t.Run("should not error when attempting to remove", func(t *testing.T) { - testUserDeletedTelegraf(t, func(t *testing.T, stackID platform.ID, _ pkger.Object, _ pkger.Summary) { - testValidRemoval(t, stackID) - }) - }) - }) - }) - - t.Run("applying updates to existing variable should be successful", func(t *testing.T) { - stack, cleanup := newStackFn(t, pkger.StackCreate{}) - defer cleanup() - - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, - pkger.ApplyWithStackID(stack.ID), - pkger.ApplyWithTemplate(newTemplate(newVariableObject("var", "", ""))), - ) - require.NoError(t, err) - - vars := impact.Summary.Variables - require.Len(t, vars, 1) - v := resourceCheck.mustGetVariable(t, byID(platform.ID(vars[0].ID))) - assert.Empty(t, v.Selected) - - impact, err = svc.Apply(ctx, l.Org.ID, l.User.ID, - pkger.ApplyWithStackID(stack.ID), - pkger.ApplyWithTemplate(newTemplate(newVariableObject("var", "", "", "selected"))), - ) - require.NoError(t, err) - - vars = impact.Summary.Variables - require.Len(t, vars, 1) - v = resourceCheck.mustGetVariable(t, byID(platform.ID(vars[0].ID))) - assert.Equal(t, []string{"selected"}, v.Selected) - }) - - t.Run("apply with actions", func(t *testing.T) { - var ( - bucketPkgName = "rucketeer-1" - checkPkgName = "checkers" - dashPkgName = "dash-of-salt" - endpointPkgName = "endzo" - labelPkgName = "labelino" - rulePkgName = "oh-doyle-rules" - taskPkgName = "tap" - telegrafPkgName = "teletype" - variablePkgName = "laces-out-dan" - ) - - defaultPkgFn := func(*testing.T) *pkger.Template { - return newTemplate( - newBucketObject(bucketPkgName, "", ""), - newCheckDeadmanObject(t, checkPkgName, "", time.Hour), - newDashObject(dashPkgName, "", ""), - newEndpointHTTP(endpointPkgName, "", ""), - newLabelObject(labelPkgName, "", "", ""), - newRuleObject(t, rulePkgName, "", endpointPkgName, ""), - newTaskObject(taskPkgName, "", ""), - newTelegrafObject(telegrafPkgName, "", ""), - newVariableObject(variablePkgName, "", ""), - ) - } - - tests := []struct { - name string - pkgFn func(t *testing.T) *pkger.Template - applyOpts []pkger.ApplyOptFn - assertFn func(t *testing.T, impact pkger.ImpactSummary) - }{ - { - name: "skip resource", - pkgFn: defaultPkgFn, - applyOpts: []pkger.ApplyOptFn{ - pkger.ApplyWithResourceSkip(pkger.ActionSkipResource{ - Kind: pkger.KindBucket, - MetaName: bucketPkgName, - }), - pkger.ApplyWithResourceSkip(pkger.ActionSkipResource{ - Kind: pkger.KindCheckDeadman, - MetaName: checkPkgName, - }), - pkger.ApplyWithResourceSkip(pkger.ActionSkipResource{ - Kind: pkger.KindDashboard, - MetaName: dashPkgName, - }), - pkger.ApplyWithResourceSkip(pkger.ActionSkipResource{ - Kind: pkger.KindNotificationEndpointHTTP, - MetaName: endpointPkgName, - }), - pkger.ApplyWithResourceSkip(pkger.ActionSkipResource{ - Kind: pkger.KindLabel, - MetaName: labelPkgName, - }), - pkger.ApplyWithResourceSkip(pkger.ActionSkipResource{ - Kind: pkger.KindNotificationRule, - MetaName: rulePkgName, - }), - pkger.ApplyWithResourceSkip(pkger.ActionSkipResource{ - Kind: pkger.KindTask, - MetaName: taskPkgName, - }), - pkger.ApplyWithResourceSkip(pkger.ActionSkipResource{ - Kind: pkger.KindTelegraf, - MetaName: telegrafPkgName, - }), - pkger.ApplyWithResourceSkip(pkger.ActionSkipResource{ - Kind: pkger.KindVariable, - MetaName: variablePkgName, - }), - }, - assertFn: func(t *testing.T, impact pkger.ImpactSummary) { - summary := impact.Summary - assert.Empty(t, summary.Buckets) - assert.Empty(t, summary.Checks) - assert.Empty(t, summary.Dashboards) - assert.Empty(t, summary.NotificationEndpoints) - assert.Empty(t, summary.Labels) - assert.Empty(t, summary.NotificationRules, 0) - assert.Empty(t, summary.Tasks) - assert.Empty(t, summary.TelegrafConfigs) - assert.Empty(t, summary.Variables) - }, - }, - { - name: "skip kind", - pkgFn: defaultPkgFn, - applyOpts: []pkger.ApplyOptFn{ - pkger.ApplyWithKindSkip(pkger.ActionSkipKind{ - Kind: pkger.KindBucket, - }), - pkger.ApplyWithKindSkip(pkger.ActionSkipKind{ - Kind: pkger.KindCheckDeadman, - }), - pkger.ApplyWithKindSkip(pkger.ActionSkipKind{ - Kind: pkger.KindDashboard, - }), - pkger.ApplyWithKindSkip(pkger.ActionSkipKind{ - Kind: pkger.KindNotificationEndpointHTTP, - }), - pkger.ApplyWithKindSkip(pkger.ActionSkipKind{ - Kind: pkger.KindLabel, - }), - pkger.ApplyWithKindSkip(pkger.ActionSkipKind{ - Kind: pkger.KindNotificationRule, - }), - pkger.ApplyWithKindSkip(pkger.ActionSkipKind{ - Kind: pkger.KindTask, - }), - pkger.ApplyWithKindSkip(pkger.ActionSkipKind{ - Kind: pkger.KindTelegraf, - }), - pkger.ApplyWithKindSkip(pkger.ActionSkipKind{ - Kind: pkger.KindVariable, - }), - }, - assertFn: func(t *testing.T, impact pkger.ImpactSummary) { - summary := impact.Summary - assert.Empty(t, summary.Buckets) - assert.Empty(t, summary.Checks) - assert.Empty(t, summary.Dashboards) - assert.Empty(t, summary.NotificationEndpoints) - assert.Empty(t, summary.Labels) - assert.Empty(t, summary.NotificationRules, 0) - assert.Empty(t, summary.Tasks) - assert.Empty(t, summary.TelegrafConfigs) - assert.Empty(t, summary.Variables) - }, - }, - { - name: "skip label and assoications should be dropped", - pkgFn: func(t *testing.T) *pkger.Template { - objs := []pkger.Object{ - newBucketObject(bucketPkgName, "", ""), - newCheckDeadmanObject(t, checkPkgName, "", time.Hour), - newDashObject(dashPkgName, "", ""), - newEndpointHTTP(endpointPkgName, "", ""), - newRuleObject(t, rulePkgName, "", endpointPkgName, ""), - newTaskObject(taskPkgName, "", ""), - newTelegrafObject(telegrafPkgName, "", ""), - newVariableObject(variablePkgName, "", ""), - } - for _, obj := range objs { - obj.AddAssociations(pkger.ObjectAssociation{ - Kind: pkger.KindLabel, - MetaName: labelPkgName, - }) - } - - objs = append(objs, newLabelObject(labelPkgName, "", "", "")) - - return newTemplate(objs...) - }, - applyOpts: []pkger.ApplyOptFn{ - pkger.ApplyWithKindSkip(pkger.ActionSkipKind{ - Kind: pkger.KindLabel, - }), - }, - assertFn: func(t *testing.T, impact pkger.ImpactSummary) { - summary := impact.Summary - assert.Empty(t, summary.Labels, 0) - assert.Empty(t, summary.LabelMappings) - - assert.Len(t, summary.Buckets, 1) - assert.Empty(t, summary.Buckets[0].LabelAssociations) - assert.Len(t, summary.Checks, 1) - assert.Empty(t, summary.Checks[0].LabelAssociations) - assert.Len(t, summary.Dashboards, 1) - assert.Empty(t, summary.Dashboards[0].LabelAssociations) - assert.Len(t, summary.NotificationEndpoints, 1) - assert.Empty(t, summary.NotificationEndpoints[0].LabelAssociations) - assert.Len(t, summary.NotificationRules, 1) - assert.Empty(t, summary.NotificationRules[0].LabelAssociations) - assert.Len(t, summary.Tasks, 1) - assert.Empty(t, summary.Tasks[0].LabelAssociations) - assert.Len(t, summary.TelegrafConfigs, 1) - assert.Empty(t, summary.TelegrafConfigs[0].LabelAssociations) - assert.Len(t, summary.Variables, 1) - assert.Empty(t, summary.Variables[0].LabelAssociations) - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - stack, cleanup := newStackFn(t, pkger.StackCreate{}) - defer cleanup() - - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, - append( - tt.applyOpts, - pkger.ApplyWithTemplate(tt.pkgFn(t)), - pkger.ApplyWithStackID(stack.ID), - )..., - ) - require.NoError(t, err) - - tt.assertFn(t, impact) - } - - t.Run(tt.name, fn) - } - }) - - t.Run("exporting the existing state of stack resources as template", func(t *testing.T) { - testStackApplyFn := func(t *testing.T) (pkger.Summary, pkger.Stack, func()) { - t.Helper() - - stack, cleanup := newStackFn(t, pkger.StackCreate{}) - defer func() { - if t.Failed() { - cleanup() - } - }() - - var ( - initialBucketPkgName = "rucketeer-1" - initialCheckPkgName = "checkers" - initialDashPkgName = "dash-of-salt" - initialEndpointPkgName = "endzo" - initialLabelPkgName = "labelino" - initialRulePkgName = "oh-doyle-rules" - initialTaskPkgName = "tap" - initialTelegrafPkgName = "teletype" - initialVariablePkgName = "laces-out-dan" - ) - - labelObj := newLabelObject(initialLabelPkgName, "label 1", "init desc", "#222eee") - setAssociation := func(o pkger.Object) pkger.Object { - o.AddAssociations(pkger.ObjectAssociation{ - Kind: pkger.KindLabel, - MetaName: labelObj.Name(), - }) - return o - } - - initialPkg := newTemplate( - setAssociation(newBucketObject(initialBucketPkgName, "display name", "init desc")), - setAssociation(newCheckDeadmanObject(t, initialCheckPkgName, "check_0", time.Minute)), - setAssociation(newDashObject(initialDashPkgName, "dash_0", "init desc")), - setAssociation(newEndpointHTTP(initialEndpointPkgName, "endpoint_0", "init desc")), - labelObj, - setAssociation(newRuleObject(t, initialRulePkgName, "rule_0", initialEndpointPkgName, "init desc")), - setAssociation(newTaskObject(initialTaskPkgName, "task_0", "init desc")), - setAssociation(newTelegrafObject(initialTelegrafPkgName, "tele_0", "init desc")), - setAssociation(newVariableObject(initialVariablePkgName, "var char", "init desc")), - ) - - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, - pkger.ApplyWithTemplate(initialPkg), - pkger.ApplyWithStackID(stack.ID), - ) - require.NoError(t, err) - - summary := impact.Summary - - hasAssociation := func(t *testing.T, actual []pkger.SummaryLabel) { - t.Helper() - require.Len(t, actual, 1, "unexpected number of label mappings") - assert.Equal(t, actual[0].MetaName, labelObj.Name()) - } - - require.Len(t, summary.Buckets, 1) - assert.NotZero(t, summary.Buckets[0].ID) - assert.Equal(t, "display name", summary.Buckets[0].Name) - assert.Equal(t, "init desc", summary.Buckets[0].Description) - hasAssociation(t, summary.Buckets[0].LabelAssociations) - - require.Len(t, summary.Checks, 1) - assert.NotZero(t, summary.Checks[0].Check.GetID()) - assert.Equal(t, "check_0", summary.Checks[0].Check.GetName()) - hasAssociation(t, summary.Checks[0].LabelAssociations) - - require.Len(t, summary.Dashboards, 1) - assert.NotZero(t, summary.Dashboards[0].ID) - assert.Equal(t, "dash_0", summary.Dashboards[0].Name) - hasAssociation(t, summary.Dashboards[0].LabelAssociations) - - require.Len(t, summary.NotificationEndpoints, 1) - assert.NotZero(t, summary.NotificationEndpoints[0].NotificationEndpoint.GetID()) - assert.Equal(t, "endpoint_0", summary.NotificationEndpoints[0].NotificationEndpoint.GetName()) - hasAssociation(t, summary.NotificationEndpoints[0].LabelAssociations) - - require.Len(t, summary.Labels, 1) - assert.NotZero(t, summary.Labels[0].ID) - assert.Equal(t, "label 1", summary.Labels[0].Name) - assert.Equal(t, "init desc", summary.Labels[0].Properties.Description) - assert.Equal(t, "#222eee", summary.Labels[0].Properties.Color) - - require.Len(t, summary.NotificationRules, 1) - assert.NotZero(t, summary.NotificationRules[0].ID) - assert.Equal(t, "rule_0", summary.NotificationRules[0].Name) - assert.Equal(t, initialEndpointPkgName, summary.NotificationRules[0].EndpointMetaName) - assert.Equal(t, "init desc", summary.NotificationRules[0].Description) - hasAssociation(t, summary.NotificationRules[0].LabelAssociations) - - require.Len(t, summary.Tasks, 1) - assert.NotZero(t, summary.Tasks[0].ID) - assert.Equal(t, "task_0", summary.Tasks[0].Name) - assert.Equal(t, "init desc", summary.Tasks[0].Description) - hasAssociation(t, summary.Tasks[0].LabelAssociations) - - require.Len(t, summary.TelegrafConfigs, 1) - assert.NotZero(t, summary.TelegrafConfigs[0].TelegrafConfig.ID) - assert.Equal(t, "tele_0", summary.TelegrafConfigs[0].TelegrafConfig.Name) - assert.Equal(t, "init desc", summary.TelegrafConfigs[0].TelegrafConfig.Description) - hasAssociation(t, summary.TelegrafConfigs[0].LabelAssociations) - - require.Len(t, summary.Variables, 1) - assert.NotZero(t, summary.Variables[0].ID) - assert.Equal(t, "var char", summary.Variables[0].Name) - assert.Equal(t, "init desc", summary.Variables[0].Description) - hasAssociation(t, summary.Variables[0].LabelAssociations) - - // verify changes reflected in platform - { - actualBkt := resourceCheck.mustGetBucket(t, byName("display name")) - assert.Equal(t, summary.Buckets[0].ID, pkger.SafeID(actualBkt.ID)) - - actualCheck := resourceCheck.mustGetCheck(t, byName("check_0")) - assert.Equal(t, summary.Checks[0].Check.GetID(), actualCheck.GetID()) - - actualDash := resourceCheck.mustGetDashboard(t, byName("dash_0")) - assert.Equal(t, summary.Dashboards[0].ID, pkger.SafeID(actualDash.ID)) - - actualEndpint := resourceCheck.mustGetEndpoint(t, byName("endpoint_0")) - assert.Equal(t, summary.NotificationEndpoints[0].NotificationEndpoint.GetID(), actualEndpint.GetID()) - - actualLabel := resourceCheck.mustGetLabel(t, byName("label 1")) - assert.Equal(t, summary.Labels[0].ID, pkger.SafeID(actualLabel.ID)) - - actualRule := resourceCheck.mustGetRule(t, byName("rule_0")) - assert.Equal(t, summary.NotificationRules[0].ID, pkger.SafeID(actualRule.GetID())) - - actualTask := resourceCheck.mustGetTask(t, byName("task_0")) - assert.Equal(t, summary.Tasks[0].ID, pkger.SafeID(actualTask.ID)) - - actualTele := resourceCheck.mustGetTelegrafConfig(t, byName("tele_0")) - assert.Equal(t, summary.TelegrafConfigs[0].TelegrafConfig.ID, actualTele.ID) - - actualVar := resourceCheck.mustGetVariable(t, byName("var char")) - assert.Equal(t, summary.Variables[0].ID, pkger.SafeID(actualVar.ID)) - } - - return summary, stack, cleanup - } - - t.Run("should be return pkg matching source pkg when all resources are unchanged", func(t *testing.T) { - initialSum, stack, cleanup := testStackApplyFn(t) - defer cleanup() - - exportedTemplate, err := svc.Export(ctx, pkger.ExportWithStackID(stack.ID)) - require.NoError(t, err) - - hasAssociation := func(t *testing.T, actual []pkger.SummaryLabel) { - t.Helper() - assert.Len(t, actual, 1, "unexpected number of label mappings") - if len(actual) != 1 { - return - } - assert.Equal(t, actual[0].MetaName, initialSum.Labels[0].MetaName) - } - - sum := exportedTemplate.Summary() - - require.Len(t, sum.Buckets, 1, "missing required buckets") - assert.Equal(t, initialSum.Buckets[0].MetaName, sum.Buckets[0].MetaName) - assert.Equal(t, initialSum.Buckets[0].Name, sum.Buckets[0].Name) - hasAssociation(t, sum.Buckets[0].LabelAssociations) - - require.Len(t, sum.Checks, 1, "missing required checks") - assert.Equal(t, initialSum.Checks[0].MetaName, sum.Checks[0].MetaName) - assert.Equal(t, initialSum.Checks[0].Check.GetName(), sum.Checks[0].Check.GetName()) - hasAssociation(t, sum.Checks[0].LabelAssociations) - - require.Len(t, sum.Dashboards, 1, "missing required dashboards") - assert.Equal(t, initialSum.Dashboards[0].MetaName, sum.Dashboards[0].MetaName) - assert.Equal(t, initialSum.Dashboards[0].Name, sum.Dashboards[0].Name) - hasAssociation(t, sum.Dashboards[0].LabelAssociations) - - require.Len(t, sum.Labels, 1, "missing required labels") - assert.Equal(t, initialSum.Labels[0].MetaName, sum.Labels[0].MetaName) - assert.Equal(t, initialSum.Labels[0].Name, sum.Labels[0].Name) - - require.Len(t, sum.NotificationRules, 1, "missing required rules") - assert.Equal(t, initialSum.NotificationRules[0].MetaName, sum.NotificationRules[0].MetaName) - assert.Equal(t, initialSum.NotificationRules[0].Name, sum.NotificationRules[0].Name) - assert.Equal(t, initialSum.NotificationRules[0].EndpointMetaName, sum.NotificationRules[0].EndpointMetaName) - assert.Equal(t, initialSum.NotificationRules[0].EndpointType, sum.NotificationRules[0].EndpointType) - hasAssociation(t, sum.NotificationRules[0].LabelAssociations) - - require.Len(t, sum.Tasks, 1, "missing required tasks") - assert.Equal(t, initialSum.Tasks[0].MetaName, sum.Tasks[0].MetaName) - assert.Equal(t, initialSum.Tasks[0].Name, sum.Tasks[0].Name) - hasAssociation(t, sum.Tasks[0].LabelAssociations) - - require.Len(t, sum.TelegrafConfigs, 1, "missing required telegraf configs") - assert.Equal(t, initialSum.TelegrafConfigs[0].MetaName, sum.TelegrafConfigs[0].MetaName) - assert.Equal(t, initialSum.TelegrafConfigs[0].TelegrafConfig.Name, sum.TelegrafConfigs[0].TelegrafConfig.Name) - hasAssociation(t, sum.TelegrafConfigs[0].LabelAssociations) - - require.Len(t, sum.Variables, 1, "missing required variables") - assert.Equal(t, initialSum.Variables[0].MetaName, sum.Variables[0].MetaName) - assert.Equal(t, initialSum.Variables[0].Name, sum.Variables[0].Name) - hasAssociation(t, sum.Variables[0].LabelAssociations) - }) - - t.Run("should return all associated dashboards in full", func(t *testing.T) { - dash := influxdb.Dashboard{ - Name: "dasher", - Cells: []*influxdb.Cell{ - { - ID: 1, - CellProperty: influxdb.CellProperty{ - H: 1, - W: 2, - }, - View: &influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "name", - }, - Properties: influxdb.MarkdownViewProperties{ - Type: influxdb.ViewPropertyTypeMarkdown, - Note: "the markdown", - }, - }, - }, - }, - } - - pkg := newTemplate(pkger.DashboardToObject("", dash)) - - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg)) - require.NoError(t, err) - - defer deleteStackFn(t, impact.StackID) - - require.Len(t, impact.Summary.Dashboards, 1) - - exportedTemplate, err := svc.Export(ctx, pkger.ExportWithStackID(impact.StackID)) - require.NoError(t, err) - - summary := exportedTemplate.Summary() - require.Len(t, summary.Dashboards, 1) - - exportedDash := summary.Dashboards[0] - require.Len(t, exportedDash.Charts, 1) - - expectedChartProps := dash.Cells[0].View.Properties - assert.Equal(t, expectedChartProps, exportedDash.Charts[0].Properties) - }) - - t.Run("when label associations have changed", func(t *testing.T) { - newLabelAssociationTestFn := func(t *testing.T) (pkger.Stack, pkger.Summary, func()) { - t.Helper() - - stack, cleanup := newStackFn(t, pkger.StackCreate{}) - defer func() { - if t.Failed() { - cleanup() - } - }() - - labelObj := newLabelObject("test-label", "", "", "") - bktObj := newBucketObject("test-bucket", "", "") - bktObj.AddAssociations(pkger.ObjectAssociation{ - Kind: pkger.KindLabel, - MetaName: labelObj.Name(), - }) - pkg := newTemplate(bktObj, labelObj) - - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg), pkger.ApplyWithStackID(stack.ID)) - require.NoError(t, err) - - require.Len(t, impact.Summary.Labels, 1) - require.Len(t, impact.Summary.Buckets, 1) - require.Len(t, impact.Summary.Buckets[0].LabelAssociations, 1) - - return stack, impact.Summary, cleanup - } - - t.Run("should not export associations removed in platform", func(t *testing.T) { - stack, initialSummary, cleanup := newLabelAssociationTestFn(t) - defer cleanup() - - err := l.LabelService(t).DeleteLabelMapping(ctx, &influxdb.LabelMapping{ - LabelID: platform.ID(initialSummary.Labels[0].ID), - ResourceID: platform.ID(initialSummary.Buckets[0].ID), - ResourceType: influxdb.BucketsResourceType, - }) - require.NoError(t, err) - - exportedTemplate, err := svc.Export(ctx, pkger.ExportWithStackID(stack.ID)) - require.NoError(t, err) - - exportedSum := exportedTemplate.Summary() - require.Len(t, exportedSum.Labels, 1) - require.Len(t, exportedSum.Buckets, 1) - require.Empty(t, exportedSum.Buckets[0].LabelAssociations, "received unexpected label associations") - }) - - t.Run("should export associations platform resources not associated with stack", func(t *testing.T) { - stack, initialSummary, cleanup := newLabelAssociationTestFn(t) - defer cleanup() - - newLabel := &influxdb.Label{ - OrgID: l.Org.ID, - Name: "test-label-2", - } - require.NoError(t, l.LabelService(t).CreateLabel(ctx, newLabel)) - defer resourceCheck.mustDeleteLabel(t, newLabel.ID) - - err := l.LabelService(t).CreateLabelMapping(ctx, &influxdb.LabelMapping{ - LabelID: newLabel.ID, - ResourceID: platform.ID(initialSummary.Buckets[0].ID), - ResourceType: influxdb.BucketsResourceType, - }) - require.NoError(t, err) - - exportedTemplate, err := svc.Export(ctx, pkger.ExportWithStackID(stack.ID)) - require.NoError(t, err) - - exportedSum := exportedTemplate.Summary() - assert.Len(t, exportedSum.Labels, 2) - require.Len(t, exportedSum.Buckets, 1) - require.Len(t, exportedSum.Buckets[0].LabelAssociations, 2) - - expectedAssociation := initialSummary.Labels[0] - expectedAssociation.ID, expectedAssociation.OrgID = 0, 0 - assert.Contains(t, exportedSum.Buckets[0].LabelAssociations, expectedAssociation) - }) - }) - }) - }) - - t.Run("errors incurred during application of template rolls back to state before template", func(t *testing.T) { - stacks, err := svc.ListStacks(ctx, l.Org.ID, pkger.ListFilter{}) - require.NoError(t, err) - require.Empty(t, stacks) - - svc := pkger.NewService( - pkger.WithBucketSVC(l.BucketService(t)), - pkger.WithDashboardSVC(l.DashboardService(t)), - pkger.WithCheckSVC(l.CheckService()), - pkger.WithLabelSVC(&fakeLabelSVC{ - LabelService: l.LabelService(t), - createKillCount: 2, // hits error on 3rd attempt at creating a mapping - }), - pkger.WithNotificationEndpointSVC(l.NotificationEndpointService(t)), - pkger.WithNotificationRuleSVC(l.NotificationRuleService(t)), - pkger.WithOrganizationService(l.OrganizationService()), - pkger.WithStore(pkger.NewStoreKV(l.kvStore)), - pkger.WithTaskSVC(l.TaskServiceKV(t)), - pkger.WithTelegrafSVC(l.TelegrafService(t)), - pkger.WithVariableSVC(l.VariableService(t)), - ) - - _, err = svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(newCompletePkg(t))) - require.Error(t, err) - - bkts, _, err := l.BucketService(t).FindBuckets(ctx, influxdb.BucketFilter{OrganizationID: &l.Org.ID}) - require.NoError(t, err) - for _, b := range bkts { - if influxdb.BucketTypeSystem == b.Type { - continue - } - // verify system buckets and org bucket are the buckets available - assert.Equal(t, l.Bucket.Name, b.Name) - } - - labels, err := l.LabelService(t).FindLabels(ctx, influxdb.LabelFilter{OrgID: &l.Org.ID}) - require.NoError(t, err) - assert.Empty(t, labels) - - dashs, _, err := l.DashboardService(t).FindDashboards(ctx, influxdb.DashboardFilter{ - OrganizationID: &l.Org.ID, - }, influxdb.DefaultDashboardFindOptions) - require.NoError(t, err) - assert.Empty(t, dashs) - - endpoints, _, err := l.NotificationEndpointService(t).FindNotificationEndpoints(ctx, influxdb.NotificationEndpointFilter{ - OrgID: &l.Org.ID, - }) - require.NoError(t, err) - assert.Empty(t, endpoints) - - rules, _, err := l.NotificationRuleService(t).FindNotificationRules(ctx, influxdb.NotificationRuleFilter{ - OrgID: &l.Org.ID, - }) - require.NoError(t, err) - assert.Empty(t, rules) - - tasks, _, err := l.TaskServiceKV(t).FindTasks(ctx, taskmodel.TaskFilter{ - OrganizationID: &l.Org.ID, - }) - require.NoError(t, err) - assert.Empty(t, tasks) - - teles, _, err := l.TelegrafService(t).FindTelegrafConfigs(ctx, influxdb.TelegrafConfigFilter{ - OrgID: &l.Org.ID, - }) - require.NoError(t, err) - assert.Empty(t, teles) - - vars, err := l.VariableService(t).FindVariables(ctx, influxdb.VariableFilter{OrganizationID: &l.Org.ID}) - require.NoError(t, err) - assert.Empty(t, vars) - - afterStacks, err := svc.ListStacks(ctx, l.Org.ID, pkger.ListFilter{}) - require.NoError(t, err) - require.Empty(t, afterStacks) - }) - - hasLabelAssociations := func(t *testing.T, associations []pkger.SummaryLabel, numAss int, expectedNames ...string) { - t.Helper() - hasAss := func(t *testing.T, expected string) { - t.Helper() - for _, ass := range associations { - if ass.Name == expected { - return - } - } - require.FailNow(t, "did not find expected association: "+expected) - } - - require.Len(t, associations, numAss) - for _, expected := range expectedNames { - hasAss(t, expected) - } - } - - t.Run("dry run", func(t *testing.T) { - t.Run("template with no existing resources", func(t *testing.T) { - impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(newCompletePkg(t))) - require.NoError(t, err) - - sum, diff := impact.Summary, impact.Diff - - require.Len(t, diff.Buckets, 1) - assert.True(t, diff.Buckets[0].IsNew()) - - require.Len(t, diff.Checks, 2) - for _, ch := range diff.Checks { - assert.True(t, ch.IsNew()) - } - - require.Len(t, diff.Labels, 2) - assert.True(t, diff.Labels[0].IsNew()) - assert.True(t, diff.Labels[1].IsNew()) - - require.Len(t, diff.Variables, 1) - assert.True(t, diff.Variables[0].IsNew()) - - require.Len(t, diff.NotificationRules, 1) - // the pkg being run here has a relationship with the rule and the endpoint within the pkg. - assert.Equal(t, "http", diff.NotificationRules[0].New.EndpointType) - - require.Len(t, diff.Dashboards, 1) - require.Len(t, diff.NotificationEndpoints, 1) - require.Len(t, diff.Tasks, 1) - require.Len(t, diff.Telegrafs, 1) - - labels := sum.Labels - require.Len(t, labels, 2) - assert.Equal(t, "label-1", labels[0].Name) - assert.Equal(t, "the 2nd label", labels[1].Name) - - bkts := sum.Buckets - require.Len(t, bkts, 1) - assert.Equal(t, "rucketeer", bkts[0].Name) - hasLabelAssociations(t, bkts[0].LabelAssociations, 2, "label-1", "the 2nd label") - - checks := sum.Checks - require.Len(t, checks, 2) - assert.Equal(t, "check 0 name", checks[0].Check.GetName()) - hasLabelAssociations(t, checks[0].LabelAssociations, 1, "label-1") - assert.Equal(t, "check-1", checks[1].Check.GetName()) - hasLabelAssociations(t, checks[1].LabelAssociations, 1, "label-1") - - dashs := sum.Dashboards - require.Len(t, dashs, 1) - assert.Equal(t, "dash_1", dashs[0].Name) - assert.Equal(t, "desc1", dashs[0].Description) - hasLabelAssociations(t, dashs[0].LabelAssociations, 2, "label-1", "the 2nd label") - - endpoints := sum.NotificationEndpoints - require.Len(t, endpoints, 1) - assert.Equal(t, "no auth endpoint", endpoints[0].NotificationEndpoint.GetName()) - assert.Equal(t, "http none auth desc", endpoints[0].NotificationEndpoint.GetDescription()) - hasLabelAssociations(t, endpoints[0].LabelAssociations, 1, "label-1") - - require.Len(t, sum.Tasks, 1) - task := sum.Tasks[0] - assert.Equal(t, "task_1", task.Name) - assert.Equal(t, "desc_1", task.Description) - assert.Equal(t, "15 * * * *", task.Cron) - hasLabelAssociations(t, task.LabelAssociations, 1, "label-1") - - teles := sum.TelegrafConfigs - require.Len(t, teles, 1) - assert.Equal(t, "first tele config", teles[0].TelegrafConfig.Name) - assert.Equal(t, "desc", teles[0].TelegrafConfig.Description) - hasLabelAssociations(t, teles[0].LabelAssociations, 1, "label-1") - - vars := sum.Variables - require.Len(t, vars, 1) - assert.Equal(t, "query var", vars[0].Name) - hasLabelAssociations(t, vars[0].LabelAssociations, 1, "label-1") - varArgs := vars[0].Arguments - require.NotNil(t, varArgs) - assert.Equal(t, "query", varArgs.Type) - assert.Equal(t, influxdb.VariableQueryValues{ - Query: "buckets() |> filter(fn: (r) => r.name !~ /^_/) |> rename(columns: {name: \"_value\"}) |> keep(columns: [\"_value\"])", - Language: "flux", - }, varArgs.Values) - }) - - t.Run("template with env ref", func(t *testing.T) { - pkgStr := fmt.Sprintf(` -apiVersion: %[1]s -kind: Label -metadata: - name: - envRef: - key: label-1-name-ref -spec: ---- -apiVersion: %[1]s -kind: Bucket -metadata: - name: - envRef: - key: bkt-1-name-ref -spec: - associations: - - kind: Label - name: - envRef: - key: label-1-name-ref -`, pkger.APIVersion) - - pkg, err := pkger.Parse(pkger.EncodingYAML, pkger.FromString(pkgStr)) - require.NoError(t, err) - - impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, - pkger.ApplyWithTemplate(pkg), - pkger.ApplyWithEnvRefs(map[string]interface{}{ - "bkt-1-name-ref": "new-bkt-name", - "label-1-name-ref": "new-label-name", - }), - ) - require.NoError(t, err) - - sum := impact.Summary - - require.Len(t, sum.Buckets, 1) - assert.Equal(t, "new-bkt-name", sum.Buckets[0].Name) - - require.Len(t, sum.Labels, 1) - assert.Equal(t, "new-label-name", sum.Labels[0].Name) - }) - - t.Run("bucket", func(t *testing.T) { - template := newTemplate( - newBucketObject("foo", "", ""), - ) - - impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(template)) - require.NoError(t, err) - - require.Len(t, impact.Diff.Buckets, 1) - assert.Equal(t, pkger.KindBucket, impact.Diff.Buckets[0].Kind) - }) - - t.Run("check", func(t *testing.T) { - template := newTemplate( - newCheckDeadmanObject(t, "check1", "", time.Hour), - newCheckThresholdObject(t, "check2", "", time.Hour), - ) - - impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(template)) - require.NoError(t, err) - - require.Len(t, impact.Diff.Checks, 2) - assert.Equal(t, pkger.KindCheckDeadman, impact.Diff.Checks[0].Kind) - assert.Equal(t, pkger.KindCheckThreshold, impact.Diff.Checks[1].Kind) - }) - - t.Run("dashboards", func(t *testing.T) { - newQuery := func() influxdb.DashboardQuery { - return influxdb.DashboardQuery{ - BuilderConfig: influxdb.BuilderConfig{ - Buckets: []string{}, - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{}, - Functions: []struct { - Name string `json:"name"` - }{}, - }, - Text: "from(v.bucket) |> count()", - EditMode: "advanced", - } - } - - newAxes := func() map[string]influxdb.Axis { - return map[string]influxdb.Axis{ - "x": { - Bounds: []string{}, - Label: "labx", - Prefix: "pre", - Suffix: "suf", - Base: "base", - Scale: "linear", - }, - "y": { - Bounds: []string{}, - Label: "laby", - Prefix: "pre", - Suffix: "suf", - Base: "base", - Scale: "linear", - }, - } - } - - newColors := func(types ...string) []influxdb.ViewColor { - var out []influxdb.ViewColor - for _, t := range types { - out = append(out, influxdb.ViewColor{ - Type: t, - Hex: time.Now().Format(time.RFC3339), - Name: time.Now().Format(time.RFC3339), - Value: float64(time.Now().Unix()), - }) - } - return out - } - - tests := []struct { - name string - props influxdb.ViewProperties - }{ - { - name: "gauge", - props: influxdb.GaugeViewProperties{ - Type: influxdb.ViewPropertyTypeGauge, - DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1}, - Note: "a note", - Prefix: "pre", - TickPrefix: "true", - Suffix: "suf", - TickSuffix: "false", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShowNoteWhenEmpty: true, - ViewColors: newColors("min", "max", "threshold"), - }, - }, - { - name: "heatmap", - props: influxdb.HeatmapViewProperties{ - Type: influxdb.ViewPropertyTypeHeatMap, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShowNoteWhenEmpty: true, - ViewColors: []string{"#8F8AF4", "#8F8AF4", "#8F8AF4"}, - XColumn: "x", - YColumn: "y", - XDomain: []float64{0, 10}, - YDomain: []float64{0, 100}, - XAxisLabel: "x_label", - XPrefix: "x_prefix", - XSuffix: "x_suffix", - YAxisLabel: "y_label", - YPrefix: "y_prefix", - YSuffix: "y_suffix", - BinSize: 10, - TimeFormat: "", - }, - }, - { - name: "histogram", - props: influxdb.HistogramViewProperties{ - Type: influxdb.ViewPropertyTypeHistogram, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShowNoteWhenEmpty: true, - ViewColors: []influxdb.ViewColor{{Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}}, - FillColumns: []string{"a", "b"}, - XColumn: "_value", - XDomain: []float64{0, 10}, - XAxisLabel: "x_label", - BinCount: 30, - Position: "stacked", - }, - }, - { - name: "markdown", - props: influxdb.MarkdownViewProperties{ - Type: influxdb.ViewPropertyTypeMarkdown, - Note: "the note is here with **markdown**", - }, - }, - { - name: "scatter", - props: influxdb.ScatterViewProperties{ - Type: influxdb.ViewPropertyTypeScatter, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShowNoteWhenEmpty: true, - ViewColors: []string{"#8F8AF4", "#8F8AF4", "#8F8AF4"}, - XColumn: "x", - YColumn: "y", - XDomain: []float64{0, 10}, - YDomain: []float64{0, 100}, - XAxisLabel: "x_label", - XPrefix: "x_prefix", - XSuffix: "x_suffix", - YAxisLabel: "y_label", - YPrefix: "y_prefix", - YSuffix: "y_suffix", - TimeFormat: "", - }, - }, - { - name: "single stat", - props: influxdb.SingleStatViewProperties{ - Type: influxdb.ViewPropertyTypeSingleStat, - DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1}, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - Prefix: "pre", - TickPrefix: "false", - ShowNoteWhenEmpty: true, - Suffix: "suf", - TickSuffix: "true", - ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}}, - }, - }, - { - name: "single stat plus line", - props: influxdb.LinePlusSingleStatProperties{ - Type: influxdb.ViewPropertyTypeSingleStatPlusLine, - Axes: newAxes(), - DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1}, - Note: "a note", - Prefix: "pre", - Suffix: "suf", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShadeBelow: true, - HoverDimension: "y", - ShowNoteWhenEmpty: true, - ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}}, - XColumn: "x", - YColumn: "y", - Position: "stacked", - }, - }, - { - name: "table", - props: influxdb.TableViewProperties{ - Type: influxdb.ViewPropertyTypeTable, - Note: "a note", - ShowNoteWhenEmpty: true, - Queries: []influxdb.DashboardQuery{newQuery()}, - ViewColors: []influxdb.ViewColor{{Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}}, - TableOptions: influxdb.TableOptions{ - VerticalTimeAxis: true, - SortBy: influxdb.RenamableField{ - InternalName: "_time", - }, - Wrapping: "truncate", - FixFirstColumn: true, - }, - FieldOptions: []influxdb.RenamableField{ - { - InternalName: "_time", - DisplayName: "time (ms)", - Visible: true, - }, - }, - TimeFormat: "YYYY:MM:DD", - DecimalPlaces: influxdb.DecimalPlaces{ - IsEnforced: true, - Digits: 1, - }, - }, - }, - { - name: "xy", - props: influxdb.XYViewProperties{ - Type: influxdb.ViewPropertyTypeXY, - Axes: newAxes(), - Geom: "step", - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShadeBelow: true, - HoverDimension: "y", - ShowNoteWhenEmpty: true, - ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}}, - XColumn: "x", - YColumn: "y", - Position: "overlaid", - TimeFormat: "", - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - obj := pkger.DashboardToObject("", influxdb.Dashboard{ - OrganizationID: l.Org.ID, - Name: tt.name, - Cells: []*influxdb.Cell{ - { - CellProperty: influxdb.CellProperty{ - X: 2, Y: 2, - H: 5, W: 5, - }, - View: &influxdb.View{Properties: tt.props}, - }, - }, - }) - template := newTemplate(obj) - - impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(template)) - require.NoError(t, err) - - diff := impact.Diff.Dashboards - require.Len(t, diff, 1) - - actual := diff[0] - assert.Equal(t, pkger.KindDashboard, actual.Kind) - assert.Equal(t, tt.name, actual.New.Name) - - charts := actual.New.Charts - require.Len(t, charts, 1) - require.NotNil(t, charts[0].Properties) - assert.Equal(t, tt.props, charts[0].Properties) - } - - t.Run(tt.name, fn) - } - }) - - t.Run("labels", func(t *testing.T) { - template := newTemplate( - newLabelObject("label", "", "", ""), - ) - - impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(template)) - require.NoError(t, err) - - require.Len(t, impact.Diff.Labels, 1) - assert.Equal(t, pkger.KindLabel, impact.Diff.Labels[0].Kind) - }) - - t.Run("endpoints", func(t *testing.T) { - template := newTemplate( - newEndpointHTTP("http", "", ""), - newEndpointPagerDuty("pager", "", ""), - newEndpointSlack("slack", "", ""), - ) - - impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(template)) - require.NoError(t, err) - - require.Len(t, impact.Diff.NotificationEndpoints, 3) - assert.Equal(t, pkger.KindNotificationEndpointHTTP, impact.Diff.NotificationEndpoints[0].Kind) - assert.Equal(t, pkger.KindNotificationEndpointPagerDuty, impact.Diff.NotificationEndpoints[1].Kind) - assert.Equal(t, pkger.KindNotificationEndpointSlack, impact.Diff.NotificationEndpoints[2].Kind) - }) - - t.Run("rules", func(t *testing.T) { - template := newTemplate( - newEndpointHTTP("http", "", ""), - newRuleObject(t, "rule", "", "http", ""), - ) - - impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(template)) - require.NoError(t, err) - - require.Len(t, impact.Diff.NotificationRules, 1) - assert.Equal(t, pkger.KindNotificationRule, impact.Diff.NotificationRules[0].Kind) - }) - - t.Run("tasks", func(t *testing.T) { - template := newTemplate( - newTaskObject("task", "", ""), - ) - - impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(template)) - require.NoError(t, err) - - require.Len(t, impact.Diff.Tasks, 1) - assert.Equal(t, pkger.KindTask, impact.Diff.Tasks[0].Kind) - }) - - t.Run("telegraf configs", func(t *testing.T) { - template := newTemplate( - newTelegrafObject("tele", "", ""), - ) - - impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(template)) - require.NoError(t, err) - - require.Len(t, impact.Diff.Telegrafs, 1) - assert.Equal(t, pkger.KindTelegraf, impact.Diff.Telegrafs[0].Kind) - }) - - t.Run("variables", func(t *testing.T) { - template := newTemplate( - newVariableObject("var", "", ""), - ) - - impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(template)) - require.NoError(t, err) - - require.Len(t, impact.Diff.Variables, 1) - assert.Equal(t, pkger.KindVariable, impact.Diff.Variables[0].Kind) - }) - }) - - t.Run("apply a template of all new resources", func(t *testing.T) { - // this initial test is also setup for the sub tests - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(newCompletePkg(t))) - require.NoError(t, err) - defer deleteStackFn(t, impact.StackID) - - assert.NotZero(t, impact.StackID) - - sum1 := impact.Summary - - labels := sum1.Labels - require.Len(t, labels, 2) - assert.Equal(t, pkger.KindLabel, labels[0].Kind) - assert.NotZero(t, labels[0].ID) - assert.Equal(t, "label-1", labels[0].Name) - assert.Equal(t, "the 2nd label", labels[1].Name) - - bkts := sum1.Buckets - require.Len(t, bkts, 1) - assert.Equal(t, pkger.KindBucket, bkts[0].Kind) - assert.NotZero(t, bkts[0].ID) - assert.NotEmpty(t, bkts[0].MetaName) - assert.Equal(t, "rucketeer", bkts[0].Name) - hasLabelAssociations(t, bkts[0].LabelAssociations, 2, "label-1", "the 2nd label") - - checks := sum1.Checks - require.Len(t, checks, 2) - assert.Equal(t, "check 0 name", checks[0].Check.GetName()) - assert.Equal(t, pkger.KindCheckThreshold, checks[0].Kind) - hasLabelAssociations(t, checks[0].LabelAssociations, 1, "label-1") - assert.Equal(t, "check-1", checks[1].Check.GetName()) - assert.Equal(t, pkger.KindCheckDeadman, checks[1].Kind) - hasLabelAssociations(t, checks[1].LabelAssociations, 1, "label-1") - for _, ch := range checks { - assert.NotZero(t, ch.Check.GetID()) - } - - dashs := sum1.Dashboards - require.Len(t, dashs, 1) - assert.Equal(t, pkger.KindDashboard, dashs[0].Kind) - assert.NotZero(t, dashs[0].ID) - assert.NotEmpty(t, dashs[0].Name) - assert.Equal(t, "dash_1", dashs[0].Name) - assert.Equal(t, "desc1", dashs[0].Description) - hasLabelAssociations(t, dashs[0].LabelAssociations, 2, "label-1", "the 2nd label") - require.Len(t, dashs[0].Charts, 1) - assert.Equal(t, influxdb.ViewPropertyTypeSingleStat, dashs[0].Charts[0].Properties.GetType()) - - endpoints := sum1.NotificationEndpoints - require.Len(t, endpoints, 1) - assert.Equal(t, pkger.KindNotificationEndpointHTTP, endpoints[0].Kind) - assert.NotZero(t, endpoints[0].NotificationEndpoint.GetID()) - assert.Equal(t, "no auth endpoint", endpoints[0].NotificationEndpoint.GetName()) - assert.Equal(t, "http none auth desc", endpoints[0].NotificationEndpoint.GetDescription()) - assert.Equal(t, taskmodel.TaskStatusInactive, string(endpoints[0].NotificationEndpoint.GetStatus())) - hasLabelAssociations(t, endpoints[0].LabelAssociations, 1, "label-1") - - require.Len(t, sum1.NotificationRules, 1) - rule := sum1.NotificationRules[0] - assert.Equal(t, pkger.KindNotificationRule, rule.Kind) - assert.NotZero(t, rule.ID) - assert.Equal(t, "rule_0", rule.Name) - assert.Equal(t, pkger.SafeID(endpoints[0].NotificationEndpoint.GetID()), rule.EndpointID) - assert.Equal(t, "http-none-auth-notification-endpoint", rule.EndpointMetaName) - assert.Equalf(t, "http", rule.EndpointType, "rule: %+v", rule) - - require.Len(t, sum1.Tasks, 1) - task := sum1.Tasks[0] - assert.Equal(t, pkger.KindTask, task.Kind) - assert.NotZero(t, task.ID) - assert.Equal(t, "task_1", task.Name) - assert.Equal(t, "desc_1", task.Description) - - teles := sum1.TelegrafConfigs - require.Len(t, teles, 1) - assert.Equal(t, pkger.KindTelegraf, teles[0].Kind) - assert.NotZero(t, teles[0].TelegrafConfig.ID) - assert.Equal(t, l.Org.ID, teles[0].TelegrafConfig.OrgID) - assert.Equal(t, "first tele config", teles[0].TelegrafConfig.Name) - assert.Equal(t, "desc", teles[0].TelegrafConfig.Description) - assert.Equal(t, telegrafCfg, teles[0].TelegrafConfig.Config) - - vars := sum1.Variables - require.Len(t, vars, 1) - assert.Equal(t, pkger.KindVariable, vars[0].Kind) - assert.NotZero(t, vars[0].ID) - assert.Equal(t, "query var", vars[0].Name) - assert.Equal(t, []string{"rucketeer"}, vars[0].Selected) - hasLabelAssociations(t, vars[0].LabelAssociations, 1, "label-1") - varArgs := vars[0].Arguments - require.NotNil(t, varArgs) - assert.Equal(t, "query", varArgs.Type) - assert.Equal(t, influxdb.VariableQueryValues{ - Query: "buckets() |> filter(fn: (r) => r.name !~ /^_/) |> rename(columns: {name: \"_value\"}) |> keep(columns: [\"_value\"])", - Language: "flux", - }, varArgs.Values) - platformVar := resourceCheck.mustGetVariable(t, byID(platform.ID(vars[0].ID))) - assert.Equal(t, []string{"rucketeer"}, platformVar.Selected) - - newSumMapping := func(id pkger.SafeID, pkgName, name string, rt influxdb.ResourceType) pkger.SummaryLabelMapping { - return pkger.SummaryLabelMapping{ - Status: pkger.StateStatusNew, - ResourceID: id, - ResourceType: rt, - ResourceMetaName: pkgName, - ResourceName: name, - LabelMetaName: labels[0].MetaName, - LabelName: labels[0].Name, - LabelID: labels[0].ID, - } - } - - mappings := sum1.LabelMappings - - mappingsContain := func(t *testing.T, id pkger.SafeID, pkgName, name string, resourceType influxdb.ResourceType) { - t.Helper() - assert.Contains(t, mappings, newSumMapping(id, pkgName, name, resourceType)) - } - - require.Len(t, mappings, 11) - mappingsContain(t, bkts[0].ID, bkts[0].MetaName, bkts[0].Name, influxdb.BucketsResourceType) - mappingsContain(t, pkger.SafeID(checks[0].Check.GetID()), checks[0].MetaName, checks[0].Check.GetName(), influxdb.ChecksResourceType) - mappingsContain(t, dashs[0].ID, dashs[0].MetaName, dashs[0].Name, influxdb.DashboardsResourceType) - mappingsContain(t, pkger.SafeID(endpoints[0].NotificationEndpoint.GetID()), endpoints[0].MetaName, endpoints[0].NotificationEndpoint.GetName(), influxdb.NotificationEndpointResourceType) - mappingsContain(t, rule.ID, rule.MetaName, rule.Name, influxdb.NotificationRuleResourceType) - mappingsContain(t, task.ID, task.MetaName, task.Name, influxdb.TasksResourceType) - mappingsContain(t, pkger.SafeID(teles[0].TelegrafConfig.ID), teles[0].MetaName, teles[0].TelegrafConfig.Name, influxdb.TelegrafsResourceType) - mappingsContain(t, vars[0].ID, vars[0].MetaName, vars[0].Name, influxdb.VariablesResourceType) - - var ( - // used in dependent subtests - sum1Bkts = sum1.Buckets - sum1Checks = sum1.Checks - sum1Dashs = sum1.Dashboards - sum1Endpoints = sum1.NotificationEndpoints - sum1Labels = sum1.Labels - sum1Rules = sum1.NotificationRules - sum1Tasks = sum1.Tasks - sum1Teles = sum1.TelegrafConfigs - sum1Vars = sum1.Variables - ) - - t.Run("exporting all resources for an org", func(t *testing.T) { - t.Run("getting everything", func(t *testing.T) { - newPkg, err := svc.Export(ctx, pkger.ExportWithAllOrgResources( - pkger.ExportByOrgIDOpt{ - OrgID: l.Org.ID, - }, - )) - require.NoError(t, err) - - sum := newPkg.Summary() - - labels := sum.Labels - require.Len(t, labels, 2) - sortLabels(labels) - assert.Equal(t, "label-1", labels[0].Name) - assert.Equal(t, "the 2nd label", labels[1].Name) - - bkts := sum.Buckets - require.Len(t, bkts, 1) - assert.NotEmpty(t, bkts[0].MetaName) - assert.Equal(t, "rucketeer", bkts[0].Name) - hasLabelAssociations(t, bkts[0].LabelAssociations, 2, "label-1", "the 2nd label") - - checks := sum.Checks - require.Len(t, checks, 2) - sortChecks(checks) - assert.Equal(t, "check 0 name", checks[0].Check.GetName()) - hasLabelAssociations(t, checks[0].LabelAssociations, 1, "label-1") - assert.Equal(t, "check-1", checks[1].Check.GetName()) - hasLabelAssociations(t, checks[1].LabelAssociations, 1, "label-1") - - dashs := sum.Dashboards - require.Len(t, dashs, 1) - assert.NotEmpty(t, dashs[0].Name) - assert.Equal(t, "dash_1", dashs[0].Name) - assert.Equal(t, "desc1", dashs[0].Description) - hasLabelAssociations(t, dashs[0].LabelAssociations, 2, "label-1", "the 2nd label") - require.Len(t, dashs[0].Charts, 1) - assert.Equal(t, influxdb.ViewPropertyTypeSingleStat, dashs[0].Charts[0].Properties.GetType()) - - endpoints := sum.NotificationEndpoints - require.Len(t, endpoints, 1) - assert.Equal(t, "no auth endpoint", endpoints[0].NotificationEndpoint.GetName()) - assert.Equal(t, "http none auth desc", endpoints[0].NotificationEndpoint.GetDescription()) - assert.Equal(t, taskmodel.TaskStatusInactive, string(endpoints[0].NotificationEndpoint.GetStatus())) - hasLabelAssociations(t, endpoints[0].LabelAssociations, 1, "label-1") - - require.Len(t, sum.NotificationRules, 1) - rule := sum.NotificationRules[0] - assert.Equal(t, "rule_0", rule.Name) - assert.Equal(t, pkger.SafeID(endpoints[0].NotificationEndpoint.GetID()), rule.EndpointID) - assert.NotEmpty(t, rule.EndpointMetaName) - - require.Len(t, sum.Tasks, 1) - task := sum.Tasks[0] - assert.Equal(t, "task_1", task.Name) - assert.Equal(t, "desc_1", task.Description) - - teles := sum.TelegrafConfigs - require.Len(t, teles, 1) - assert.Equal(t, "first tele config", teles[0].TelegrafConfig.Name) - assert.Equal(t, "desc", teles[0].TelegrafConfig.Description) - assert.Equal(t, telegrafCfg, teles[0].TelegrafConfig.Config) - - vars := sum.Variables - require.Len(t, vars, 1) - assert.Equal(t, "query var", vars[0].Name) - hasLabelAssociations(t, vars[0].LabelAssociations, 1, "label-1") - varArgs := vars[0].Arguments - require.NotNil(t, varArgs) - assert.Equal(t, "query", varArgs.Type) - assert.Equal(t, influxdb.VariableQueryValues{ - Query: "buckets() |> filter(fn: (r) => r.name !~ /^_/) |> rename(columns: {name: \"_value\"}) |> keep(columns: [\"_value\"])", - Language: "flux", - }, varArgs.Values) - - newSumMapping := func(id pkger.SafeID, pkgName, name string, rt influxdb.ResourceType) pkger.SummaryLabelMapping { - return pkger.SummaryLabelMapping{ - Status: pkger.StateStatusNew, - ResourceID: id, - ResourceType: rt, - ResourceMetaName: pkgName, - ResourceName: name, - LabelMetaName: labels[0].MetaName, - LabelName: labels[0].Name, - LabelID: labels[0].ID, - } - } - - mappings := sum.LabelMappings - require.Len(t, mappings, 11) - assert.Contains(t, mappings, newSumMapping(bkts[0].ID, bkts[0].MetaName, bkts[0].Name, influxdb.BucketsResourceType)) - - ch0 := checks[0] - assert.Contains(t, mappings, newSumMapping(pkger.SafeID(ch0.Check.GetID()), ch0.MetaName, ch0.Check.GetName(), influxdb.ChecksResourceType)) - - ch1 := checks[0] - assert.Contains(t, mappings, newSumMapping(pkger.SafeID(ch1.Check.GetID()), ch1.MetaName, ch1.Check.GetName(), influxdb.ChecksResourceType)) - - ne := endpoints[0] - assert.Contains(t, mappings, newSumMapping(pkger.SafeID(ne.NotificationEndpoint.GetID()), ne.MetaName, ne.NotificationEndpoint.GetName(), influxdb.NotificationEndpointResourceType)) - - assert.Contains(t, mappings, newSumMapping(dashs[0].ID, dashs[0].MetaName, dashs[0].Name, influxdb.DashboardsResourceType)) - assert.Contains(t, mappings, newSumMapping(rule.ID, rule.MetaName, rule.Name, influxdb.NotificationRuleResourceType)) - assert.Contains(t, mappings, newSumMapping(task.ID, task.MetaName, task.Name, influxdb.TasksResourceType)) - assert.Contains(t, mappings, newSumMapping(pkger.SafeID(teles[0].TelegrafConfig.ID), teles[0].MetaName, teles[0].TelegrafConfig.Name, influxdb.TelegrafsResourceType)) - assert.Contains(t, mappings, newSumMapping(vars[0].ID, vars[0].MetaName, vars[0].Name, influxdb.VariablesResourceType)) - }) - - t.Run("filtered by resource types", func(t *testing.T) { - newPkg, err := svc.Export(ctx, pkger.ExportWithAllOrgResources( - pkger.ExportByOrgIDOpt{ - OrgID: l.Org.ID, - ResourceKinds: []pkger.Kind{pkger.KindCheck, pkger.KindTask}, - }, - )) - require.NoError(t, err) - - newSum := newPkg.Summary() - assert.NotEmpty(t, newSum.Checks) - assert.NotEmpty(t, newSum.Labels) - assert.NotEmpty(t, newSum.Tasks) - assert.Empty(t, newSum.Buckets) - assert.Empty(t, newSum.Dashboards) - assert.Empty(t, newSum.NotificationEndpoints) - assert.Empty(t, newSum.NotificationRules) - assert.Empty(t, newSum.TelegrafConfigs) - assert.Empty(t, newSum.Variables) - }) - - t.Run("filtered by label resource type", func(t *testing.T) { - newPkg, err := svc.Export(ctx, pkger.ExportWithAllOrgResources( - pkger.ExportByOrgIDOpt{ - OrgID: l.Org.ID, - ResourceKinds: []pkger.Kind{pkger.KindLabel}, - }, - )) - require.NoError(t, err) - - newSum := newPkg.Summary() - assert.NotEmpty(t, newSum.Labels) - assert.Empty(t, newSum.Buckets) - assert.Empty(t, newSum.Checks) - assert.Empty(t, newSum.Dashboards) - assert.Empty(t, newSum.NotificationEndpoints) - assert.Empty(t, newSum.NotificationRules) - assert.Empty(t, newSum.Tasks) - assert.Empty(t, newSum.TelegrafConfigs) - assert.Empty(t, newSum.Variables) - }) - - t.Run("filtered by label name", func(t *testing.T) { - newPkg, err := svc.Export(ctx, pkger.ExportWithAllOrgResources( - pkger.ExportByOrgIDOpt{ - OrgID: l.Org.ID, - LabelNames: []string{"the 2nd label"}, - }, - )) - require.NoError(t, err) - - newSum := newPkg.Summary() - assert.NotEmpty(t, newSum.Buckets) - assert.NotEmpty(t, newSum.Dashboards) - assert.NotEmpty(t, newSum.Labels) - assert.Empty(t, newSum.Checks) - assert.Empty(t, newSum.NotificationEndpoints) - assert.Empty(t, newSum.NotificationRules) - assert.Empty(t, newSum.Tasks) - assert.Empty(t, newSum.Variables) - }) - - t.Run("filtered by label name and resource type", func(t *testing.T) { - newPkg, err := svc.Export(ctx, pkger.ExportWithAllOrgResources( - pkger.ExportByOrgIDOpt{ - OrgID: l.Org.ID, - LabelNames: []string{"the 2nd label"}, - ResourceKinds: []pkger.Kind{pkger.KindDashboard}, - }, - )) - require.NoError(t, err) - - newSum := newPkg.Summary() - assert.NotEmpty(t, newSum.Dashboards) - assert.NotEmpty(t, newSum.Labels) - assert.Empty(t, newSum.Buckets) - assert.Empty(t, newSum.Checks) - assert.Empty(t, newSum.NotificationEndpoints) - assert.Empty(t, newSum.NotificationRules) - assert.Empty(t, newSum.Tasks) - assert.Empty(t, newSum.Variables) - }) - }) - - t.Run("pkg with same bkt-var-label does nto create new resources for them", func(t *testing.T) { - // validate the new template doesn't create new resources for bkts/labels/vars - // since names collide. - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(newCompletePkg(t))) - require.NoError(t, err) - - sum2 := impact.Summary - - require.Equal(t, sum1.Buckets, sum2.Buckets) - require.Equal(t, sum1.Labels, sum2.Labels) - require.Equal(t, sum1.NotificationEndpoints, sum2.NotificationEndpoints) - require.Equal(t, sum1.Variables, sum2.Variables) - - // dashboards should be new - require.NotEqual(t, sum1.Dashboards, sum2.Dashboards) - }) - - t.Run("referenced secret values provided do not create new secrets", func(t *testing.T) { - applyPkgStr := func(t *testing.T, pkgStr string) pkger.Summary { - t.Helper() - pkg, err := pkger.Parse(pkger.EncodingYAML, pkger.FromString(pkgStr)) - require.NoError(t, err) - - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg)) - require.NoError(t, err) - return impact.Summary - } - - pkgWithSecretRaw := fmt.Sprintf(` -apiVersion: %[1]s -kind: NotificationEndpointPagerDuty -metadata: - name: pager-duty-notification-endpoint -spec: - url: http://localhost:8080/orgs/7167eb6719fa34e5/alert-history - routingKey: secret-sauce -`, pkger.APIVersion) - - secretSum := applyPkgStr(t, pkgWithSecretRaw) - require.Len(t, secretSum.NotificationEndpoints, 1) - - id := secretSum.NotificationEndpoints[0].NotificationEndpoint.GetID() - expected := influxdb.SecretField{ - Key: id.String() + "-routing-key", - } - secrets := secretSum.NotificationEndpoints[0].NotificationEndpoint.SecretFields() - require.Len(t, secrets, 1) - assert.Equal(t, expected, secrets[0]) - - const pkgWithSecretRef = ` -apiVersion: %[1]s -kind: NotificationEndpointPagerDuty -metadata: - name: pager-duty-notification-endpoint -spec: - url: http://localhost:8080/orgs/7167eb6719fa34e5/alert-history - routingKey: - secretRef: - key: %s-routing-key -` - secretSum = applyPkgStr(t, fmt.Sprintf(pkgWithSecretRef, pkger.APIVersion, id.String())) - require.Len(t, secretSum.NotificationEndpoints, 1) - - expected = influxdb.SecretField{ - Key: id.String() + "-routing-key", - } - secrets = secretSum.NotificationEndpoints[0].NotificationEndpoint.SecretFields() - require.Len(t, secrets, 1) - assert.Equal(t, expected, secrets[0]) - }) - - t.Run("exporting resources with existing ids should return a valid pkg", func(t *testing.T) { - resToClone := []pkger.ResourceToClone{ - { - Kind: pkger.KindBucket, - ID: platform.ID(sum1Bkts[0].ID), - }, - { - Kind: pkger.KindCheck, - ID: sum1Checks[0].Check.GetID(), - }, - { - Kind: pkger.KindCheck, - ID: sum1Checks[1].Check.GetID(), - }, - { - Kind: pkger.KindDashboard, - ID: platform.ID(sum1Dashs[0].ID), - }, - { - Kind: pkger.KindLabel, - ID: platform.ID(sum1Labels[0].ID), - }, - { - Kind: pkger.KindNotificationEndpoint, - ID: sum1Endpoints[0].NotificationEndpoint.GetID(), - }, - { - Kind: pkger.KindTask, - ID: platform.ID(sum1Tasks[0].ID), - }, - { - Kind: pkger.KindTelegraf, - ID: sum1Teles[0].TelegrafConfig.ID, - }, - } - - resWithNewName := []pkger.ResourceToClone{ - { - Kind: pkger.KindNotificationRule, - Name: "new rule name", - ID: platform.ID(sum1Rules[0].ID), - }, - { - Kind: pkger.KindVariable, - Name: "new name", - ID: platform.ID(sum1Vars[0].ID), - }, - } - - newPkg, err := svc.Export(ctx, - pkger.ExportWithExistingResources(append(resToClone, resWithNewName...)...), - ) - require.NoError(t, err) - - newSum := newPkg.Summary() - - labels := newSum.Labels - require.Len(t, labels, 2) - sortLabels(labels) - assert.Zero(t, labels[0].ID) - assert.Equal(t, "label-1", labels[0].Name) - assert.Zero(t, labels[1].ID) - assert.Equal(t, "the 2nd label", labels[1].Name) - - bkts := newSum.Buckets - require.Len(t, bkts, 1) - assert.Zero(t, bkts[0].ID) - assert.Equal(t, "rucketeer", bkts[0].Name) - hasLabelAssociations(t, bkts[0].LabelAssociations, 2, "label-1", "the 2nd label") - - checks := newSum.Checks - require.Len(t, checks, 2) - sortChecks(checks) - assert.Equal(t, "check 0 name", checks[0].Check.GetName()) - hasLabelAssociations(t, checks[0].LabelAssociations, 1, "label-1") - assert.Equal(t, "check-1", checks[1].Check.GetName()) - hasLabelAssociations(t, checks[1].LabelAssociations, 1, "label-1") - - dashs := newSum.Dashboards - require.Len(t, dashs, 1) - assert.Zero(t, dashs[0].ID) - assert.Equal(t, "dash_1", dashs[0].Name) - assert.Equal(t, "desc1", dashs[0].Description) - hasLabelAssociations(t, dashs[0].LabelAssociations, 2, "label-1", "the 2nd label") - require.Len(t, dashs[0].Charts, 1) - assert.Equal(t, influxdb.ViewPropertyTypeSingleStat, dashs[0].Charts[0].Properties.GetType()) - - newEndpoints := newSum.NotificationEndpoints - require.Len(t, newEndpoints, 1) - assert.Equal(t, sum1Endpoints[0].NotificationEndpoint.GetName(), newEndpoints[0].NotificationEndpoint.GetName()) - assert.Equal(t, sum1Endpoints[0].NotificationEndpoint.GetDescription(), newEndpoints[0].NotificationEndpoint.GetDescription()) - hasLabelAssociations(t, newEndpoints[0].LabelAssociations, 1, "label-1") - - require.Len(t, newSum.NotificationRules, 1) - newRule := newSum.NotificationRules[0] - assert.Equal(t, "new rule name", newRule.Name) - assert.Zero(t, newRule.EndpointID) - assert.NotEmpty(t, newRule.EndpointMetaName) - hasLabelAssociations(t, newRule.LabelAssociations, 1, "label-1") - - require.Len(t, newSum.Tasks, 1) - newTask := newSum.Tasks[0] - assert.Equal(t, sum1Tasks[0].Name, newTask.Name) - assert.Equal(t, sum1Tasks[0].Description, newTask.Description) - assert.Equal(t, sum1Tasks[0].Cron, newTask.Cron) - assert.Equal(t, sum1Tasks[0].Every, newTask.Every) - assert.Equal(t, sum1Tasks[0].Offset, newTask.Offset) - assert.Equal(t, sum1Tasks[0].Query, newTask.Query) - assert.Equal(t, sum1Tasks[0].Status, newTask.Status) - - require.Len(t, newSum.TelegrafConfigs, 1) - assert.Equal(t, sum1Teles[0].TelegrafConfig.Name, newSum.TelegrafConfigs[0].TelegrafConfig.Name) - assert.Equal(t, sum1Teles[0].TelegrafConfig.Description, newSum.TelegrafConfigs[0].TelegrafConfig.Description) - hasLabelAssociations(t, newSum.TelegrafConfigs[0].LabelAssociations, 1, "label-1") - - vars := newSum.Variables - require.Len(t, vars, 1) - assert.Zero(t, vars[0].ID) - assert.Equal(t, "new name", vars[0].Name) // new name - hasLabelAssociations(t, vars[0].LabelAssociations, 1, "label-1") - varArgs := vars[0].Arguments - require.NotNil(t, varArgs) - assert.Equal(t, "query", varArgs.Type) - assert.Equal(t, influxdb.VariableQueryValues{ - Query: "buckets() |> filter(fn: (r) => r.name !~ /^_/) |> rename(columns: {name: \"_value\"}) |> keep(columns: [\"_value\"])", - Language: "flux", - }, varArgs.Values) - }) - - t.Run("error incurred during template application when resources already exist rollsback to prev state", func(t *testing.T) { - updatePkg, err := pkger.Parse(pkger.EncodingYAML, pkger.FromString(updatePkgYMLStr)) - require.NoError(t, err) - - svc := pkger.NewService( - pkger.WithBucketSVC(&fakeBucketSVC{ - BucketService: l.BucketService(t), - updateKillCount: 0, // kill on first update for bucket - }), - pkger.WithCheckSVC(l.CheckService()), - pkger.WithDashboardSVC(l.DashboardService(t)), - pkger.WithLabelSVC(l.LabelService(t)), - pkger.WithNotificationEndpointSVC(l.NotificationEndpointService(t)), - pkger.WithNotificationRuleSVC(l.NotificationRuleService(t)), - pkger.WithOrganizationService(l.OrganizationService()), - pkger.WithStore(pkger.NewStoreKV(l.kvStore)), - pkger.WithTaskSVC(l.TaskServiceKV(t)), - pkger.WithTelegrafSVC(l.TelegrafService(t)), - pkger.WithVariableSVC(l.VariableService(t)), - ) - - _, err = svc.Apply(ctx, l.Org.ID, 0, pkger.ApplyWithTemplate(updatePkg)) - require.Error(t, err) - - bkt, err := l.BucketService(t).FindBucketByID(ctx, platform.ID(sum1Bkts[0].ID)) - require.NoError(t, err) - // make sure the desc change is not applied and is rolled back to prev desc - assert.Equal(t, sum1Bkts[0].Description, bkt.Description) - - ch, err := l.CheckService().FindCheckByID(ctx, sum1Checks[0].Check.GetID()) - require.NoError(t, err) - ch.SetOwnerID(0) - deadman, ok := ch.(*check.Threshold) - require.True(t, ok) - // validate the change to query is not persisting returned to previous state. - // not checking entire bits, b/c we dont' save userID and so forth and makes a - // direct comparison very annoying... - assert.Equal(t, sum1Checks[0].Check.(*check.Threshold).Query.Text, deadman.Query.Text) - - label, err := l.LabelService(t).FindLabelByID(ctx, platform.ID(sum1Labels[0].ID)) - require.NoError(t, err) - assert.Equal(t, sum1Labels[0].Properties.Description, label.Properties["description"]) - - endpoint, err := l.NotificationEndpointService(t).FindNotificationEndpointByID(ctx, sum1Endpoints[0].NotificationEndpoint.GetID()) - require.NoError(t, err) - assert.Equal(t, sum1Endpoints[0].NotificationEndpoint.GetDescription(), endpoint.GetDescription()) - - v, err := l.VariableService(t).FindVariableByID(ctx, platform.ID(sum1Vars[0].ID)) - require.NoError(t, err) - assert.Equal(t, sum1Vars[0].Description, v.Description) - }) - }) - - t.Run("apply a task template with a complex query", func(t *testing.T) { - // validates bug: https://github.com/influxdata/influxdb/issues/17069 - - pkgStr := fmt.Sprintf(` -apiVersion: %[1]s -kind: Task -metadata: - name: http-post-synthetic -spec: - name: Http.POST Synthetic (POST) - every: 5m - query: |- - import "strings" - import "csv" - import "http" - import "system" - - timeDiff = (t1, t2) => { - return duration(v: uint(v: t2) - uint(v: t1)) - } - timeDiffNum = (t1, t2) => { - return uint(v: t2) - uint(v: t1) - } - urlToPost = "http://www.duckduckgo.com" - timeBeforeCall = system.time() - responseCode = http.post(url: urlToPost, data: bytes(v: "influxdata")) - timeAfterCall = system.time() - responseTime = timeDiff(t1: timeBeforeCall, t2: timeAfterCall) - responseTimeNum = timeDiffNum(t1: timeBeforeCall, t2: timeAfterCall) - data = "#group,false,false,true,true,true,true,true,true - #datatype,string,long,string,string,string,string,string,string - #default,mean,,,,,,, - ,result,table,service,response_code,time_before,time_after,response_time_duration,response_time_ns - ,,0,http_post_ping,${string(v: responseCode)},${string(v: timeBeforeCall)},${string(v: timeAfterCall)},${string(v: responseTime)},${string(v: responseTimeNum)}" - theTable = csv.from(csv: data) - - theTable - |> map(fn: (r) => - ({r with _time: now()})) - |> map(fn: (r) => - ({r with _measurement: "PingService", url: urlToPost, method: "POST"})) - |> drop(columns: ["time_before", "time_after", "response_time_duration"]) - |> to(bucket: "Pingpire", orgID: "039346c3777a1000", fieldFn: (r) => - ({"responseCode": r.response_code, "responseTime": int(v: r.response_time_ns)})) -`, pkger.APIVersion) - - pkg, err := pkger.Parse(pkger.EncodingYAML, pkger.FromString(pkgStr)) - require.NoError(t, err) - - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg)) - require.NoError(t, err) - assert.NotZero(t, impact.StackID) - - require.Len(t, impact.Summary.Tasks, 1) - }) - - t.Run("applying a template with an invalid template returns parser errors", func(t *testing.T) { - stack, cleanup := newStackFn(t, pkger.StackCreate{}) - defer cleanup() - - template := newTemplate(newTelegrafObject("with_underscore-is-bad", "", "")) - - _, err := svc.Apply(ctx, l.Org.ID, l.User.ID, - pkger.ApplyWithTemplate(template), - pkger.ApplyWithStackID(stack.ID), - ) - require.Error(t, err) - require.True(t, pkger.IsParseErr(err)) - - vErrs := err.(pkger.ParseError).ValidationErrs() - require.Len(t, vErrs, 1) - - // this check is to make sure we aren't creating duplicate error messages like - // was witnessed from CLI recently. - require.Equal(t, 1, strings.Count(err.Error(), "DNS-1123")) - }) - - t.Run("applying a pkg without a stack will have a stack created for it", func(t *testing.T) { - pkg := newTemplate(newBucketObject("room", "for", "more")) - - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg)) - require.NoError(t, err) - - require.NotZero(t, impact.StackID) - defer deleteStackFn(t, impact.StackID) - - require.Len(t, impact.Summary.Buckets, 1) - require.NotZero(t, impact.Summary.Buckets[0].ID) - - stacks, err := svc.ListStacks(ctx, l.Org.ID, pkger.ListFilter{ - StackIDs: []platform.ID{impact.StackID}, - }) - require.NoError(t, err) - - require.Len(t, stacks, 1) - ev := stacks[0].LatestEvent() - require.Len(t, ev.Resources, 1) - assert.Equal(t, ev.Resources[0].MetaName, "room") - assert.Equal(t, platform.ID(impact.Summary.Buckets[0].ID), ev.Resources[0].ID) - }) - - t.Run("apply a template with env refs", func(t *testing.T) { - pkgStr := fmt.Sprintf(` -apiVersion: %[1]s -kind: Bucket -metadata: - name: - envRef: - key: "bkt-1-name-ref" -spec: - associations: - - kind: Label - name: - envRef: - key: label-1-name-ref ---- -apiVersion: %[1]s -kind: Label -metadata: - name: - envRef: - key: "label-1-name-ref" -spec: ---- -apiVersion: influxdata.com/v2alpha1 -kind: CheckDeadman -metadata: - name: - envRef: - key: check-1-name-ref -spec: - every: 5m - level: cRiT - query: > - from(bucket: "rucket_1") |> range(start: v.timeRangeStart, stop: v.timeRangeStop) - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" ---- -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: - envRef: - key: dash-1-name-ref -spec: ---- -apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointSlack -metadata: - name: - envRef: - key: endpoint-1-name-ref -spec: - url: https://hooks.slack.com/services/bip/piddy/boppidy ---- -apiVersion: influxdata.com/v2alpha1 -kind: NotificationRule -metadata: - name: - envRef: - key: rule-1-name-ref -spec: - endpointName: - envRef: - key: endpoint-1-name-ref - every: 10m - messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }" - statusRules: - - currentLevel: WARN ---- -apiVersion: influxdata.com/v2alpha1 -kind: Telegraf -metadata: - name: - envRef: - key: telegraf-1-name-ref -spec: - config: | - [agent] - interval = "10s" ---- -apiVersion: influxdata.com/v2alpha1 -kind: Task -metadata: - name: - envRef: - key: task-1-name-ref -spec: - cron: 15 * * * * - query: > - from(bucket: "rucket_1") ---- -apiVersion: influxdata.com/v2alpha1 -kind: Variable -metadata: - name: - envRef: - key: var-1-name-ref -spec: - type: constant - values: [first val] -`, pkger.APIVersion) - - pkg, err := pkger.Parse(pkger.EncodingYAML, pkger.FromString(pkgStr)) - require.NoError(t, err) - - impact, err := svc.DryRun(ctx, l.Org.ID, l.User.ID, pkger.ApplyWithTemplate(pkg)) - require.NoError(t, err) - assert.Zero(t, impact.StackID) - - sum := impact.Summary - - require.Len(t, sum.Buckets, 1) - assert.Equal(t, "env-bkt-1-name-ref", sum.Buckets[0].Name) - assert.Len(t, sum.Buckets[0].LabelAssociations, 1) - require.Len(t, sum.Checks, 1) - assert.Equal(t, "env-check-1-name-ref", sum.Checks[0].Check.GetName()) - require.Len(t, sum.Dashboards, 1) - assert.Equal(t, "env-dash-1-name-ref", sum.Dashboards[0].Name) - require.Len(t, sum.Labels, 1) - assert.Equal(t, "env-label-1-name-ref", sum.Labels[0].Name) - require.Len(t, sum.NotificationEndpoints, 1) - assert.Equal(t, "env-endpoint-1-name-ref", sum.NotificationEndpoints[0].NotificationEndpoint.GetName()) - require.Len(t, sum.NotificationRules, 1) - assert.Equal(t, "env-rule-1-name-ref", sum.NotificationRules[0].Name) - require.Len(t, sum.TelegrafConfigs, 1) - assert.Equal(t, "env-task-1-name-ref", sum.Tasks[0].Name) - require.Len(t, sum.TelegrafConfigs, 1) - assert.Equal(t, "env-telegraf-1-name-ref", sum.TelegrafConfigs[0].TelegrafConfig.Name) - require.Len(t, sum.Variables, 1) - assert.Equal(t, "env-var-1-name-ref", sum.Variables[0].Name) - - expectedMissingEnvs := []string{ - "bkt-1-name-ref", - "check-1-name-ref", - "dash-1-name-ref", - "endpoint-1-name-ref", - "label-1-name-ref", - "rule-1-name-ref", - "task-1-name-ref", - "telegraf-1-name-ref", - "var-1-name-ref", - } - assert.Equal(t, expectedMissingEnvs, sum.MissingEnvs) - - impact, err = svc.Apply(ctx, l.Org.ID, l.User.ID, - pkger.ApplyWithTemplate(pkg), - pkger.ApplyWithEnvRefs(map[string]interface{}{ - "bkt-1-name-ref": "rucket_threeve", - "check-1-name-ref": "check_threeve", - "dash-1-name-ref": "dash_threeve", - "endpoint-1-name-ref": "endpoint_threeve", - "label-1-name-ref": "label_threeve", - "rule-1-name-ref": "rule_threeve", - "telegraf-1-name-ref": "telegraf_threeve", - "task-1-name-ref": "task_threeve", - "var-1-name-ref": "var_threeve", - }), - ) - require.NoError(t, err) - assert.NotZero(t, impact.StackID) - - sum = impact.Summary - - assert.Equal(t, "rucket_threeve", sum.Buckets[0].Name) - assert.Equal(t, "check_threeve", sum.Checks[0].Check.GetName()) - assert.Equal(t, "dash_threeve", sum.Dashboards[0].Name) - assert.Equal(t, "endpoint_threeve", sum.NotificationEndpoints[0].NotificationEndpoint.GetName()) - assert.Equal(t, "label_threeve", sum.Labels[0].Name) - assert.Equal(t, "rule_threeve", sum.NotificationRules[0].Name) - assert.Equal(t, "endpoint_threeve", sum.NotificationRules[0].EndpointMetaName) - assert.Equal(t, "telegraf_threeve", sum.TelegrafConfigs[0].TelegrafConfig.Name) - assert.Equal(t, "task_threeve", sum.Tasks[0].Name) - assert.Equal(t, "var_threeve", sum.Variables[0].Name) - assert.Empty(t, sum.MissingEnvs) - }) - - t.Run("apply a template with query refs", func(t *testing.T) { - t.Run("dashboard", func(t *testing.T) { - dashName := "dash-1" - newDashTmpl := func(t *testing.T) *pkger.Template { - t.Helper() - - tmplStr := ` -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: %s -spec: - charts: - - kind: Single_Stat - name: single stat - xPos: 1 - yPos: 2 - width: 6 - height: 3 - queries: - - query: | - option params = { - bucket: "foo", - start: -1d, - stop: now(), - name: "max", - floatVal: 1.0, - minVal: 10 - } - - from(bucket: params.bucket) - |> range(start: params.start, end: params.stop) - |> filter(fn: (r) => r._measurement == "processes") - |> filter(fn: (r) => r.floater == params.floatVal) - |> filter(fn: (r) => r._value > params.minVal) - |> aggregateWindow(every: v.windowPeriod, fn: max) - |> yield(name: params.name) - - params: - - key: bucket - default: "bar" - type: string - - key: start - type: duration - - key: stop - type: time - - key: floatVal - default: 37.2 - type: float - - key: minVal - type: int - - key: name # infer type - colors: - - name: laser - type: text - hex: "#8F8AF4" - value: 3` - tmplStr = fmt.Sprintf(tmplStr, dashName) - - template, err := pkger.Parse(pkger.EncodingYAML, pkger.FromString(tmplStr)) - require.NoError(t, err) - return template - } - - isExpectedQuery := func(t *testing.T, actual pkger.SummaryDashboard, expectedParams string) { - t.Helper() - - require.Len(t, actual.Charts, 1) - - props, ok := actual.Charts[0].Properties.(influxdb.SingleStatViewProperties) - require.True(t, ok, "unexpected chart properties") - - require.Len(t, props.Queries, 1) - - expectedQuery := expectedParams + ` - -from(bucket: params.bucket) - |> range(start: params.start, end: params.stop) - |> filter(fn: (r) => r._measurement == "processes") - |> filter(fn: (r) => r.floater == params.floatVal) - |> filter(fn: (r) => r._value > params.minVal) - |> aggregateWindow(every: v.windowPeriod, fn: max) - |> yield(name: params.name) -` - - assert.Equal(t, expectedQuery, props.Queries[0].Text) - assert.Equal(t, "advanced", props.Queries[0].EditMode) - } - - envKey := func(paramKey string) string { - return fmt.Sprintf( - "dashboards[%s].spec.charts[0].queries[0].params.%s", - dashName, - paramKey, - ) - } - - t.Run("using default values", func(t *testing.T) { - stack, cleanup := newStackFn(t, pkger.StackCreate{}) - defer cleanup() - - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, - pkger.ApplyWithStackID(stack.ID), - pkger.ApplyWithTemplate(newDashTmpl(t)), - ) - require.NoError(t, err) - - require.Len(t, impact.Summary.Dashboards, 1) - - actual := impact.Summary.Dashboards[0] - - expectedParams := `option params = { - bucket: "bar", - start: -24h0m0s, - stop: now(), - name: "max", - floatVal: 37.2, - minVal: 10, -}` - isExpectedQuery(t, actual, expectedParams) - - require.Len(t, actual.EnvReferences, 6) - - expectedRefs := []pkger.SummaryReference{ - { - Field: "spec.charts[0].queries[0].params.bucket", - EnvRefKey: `dashboards[dash-1].spec.charts[0].queries[0].params.bucket`, - ValType: "string", - DefaultValue: "bar", - }, - { - Field: "spec.charts[0].queries[0].params.floatVal", - EnvRefKey: `dashboards[dash-1].spec.charts[0].queries[0].params.floatVal`, - ValType: "float", - DefaultValue: 37.2, - }, - } - assert.Equal(t, expectedRefs, actual.EnvReferences[:len(expectedRefs)]) - - // check necessary since json can flip int to float type and fail assertions - // in a flakey manner - expectedIntRef := pkger.SummaryReference{ - Field: "spec.charts[0].queries[0].params.minVal", - EnvRefKey: `dashboards[dash-1].spec.charts[0].queries[0].params.minVal`, - ValType: "integer", - DefaultValue: int64(10), - } - actualIntRef := actual.EnvReferences[len(expectedRefs)] - if f, ok := actualIntRef.DefaultValue.(float64); ok { - actualIntRef.DefaultValue = int64(f) - } - assert.Equal(t, expectedIntRef, actualIntRef) - - expectedRefs = []pkger.SummaryReference{ - { - Field: "spec.charts[0].queries[0].params.name", - EnvRefKey: `dashboards[dash-1].spec.charts[0].queries[0].params.name`, - ValType: "string", - DefaultValue: "max", - }, - { - Field: "spec.charts[0].queries[0].params.start", - EnvRefKey: `dashboards[dash-1].spec.charts[0].queries[0].params.start`, - ValType: "duration", - DefaultValue: "-24h0m0s", - }, - { - Field: "spec.charts[0].queries[0].params.stop", - EnvRefKey: `dashboards[dash-1].spec.charts[0].queries[0].params.stop`, - ValType: "time", - DefaultValue: "now()", - }, - } - assert.Equal(t, expectedRefs, actual.EnvReferences[3:]) - }) - - t.Run("with user provided values", func(t *testing.T) { - stack, cleanup := newStackFn(t, pkger.StackCreate{}) - defer cleanup() - - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, - pkger.ApplyWithStackID(stack.ID), - pkger.ApplyWithTemplate(newDashTmpl(t)), - pkger.ApplyWithEnvRefs(map[string]interface{}{ - envKey("bucket"): "foobar", - envKey("name"): "min", - envKey("start"): "-5d", - envKey("floatVal"): 33.3, - envKey("minVal"): 3, - }), - ) - require.NoError(t, err) - - require.Len(t, impact.Summary.Dashboards, 1) - - actual := impact.Summary.Dashboards[0] - - expectedParams := `option params = { - bucket: "foobar", - start: -5d, - stop: now(), - name: "min", - floatVal: 33.3, - minVal: 3, -}` - isExpectedQuery(t, actual, expectedParams) - - require.Len(t, actual.EnvReferences, 6) - - expectedRefs := []pkger.SummaryReference{ - { - Field: "spec.charts[0].queries[0].params.bucket", - EnvRefKey: `dashboards[dash-1].spec.charts[0].queries[0].params.bucket`, - ValType: "string", - Value: "foobar", - DefaultValue: "bar", - }, - { - Field: "spec.charts[0].queries[0].params.floatVal", - EnvRefKey: `dashboards[dash-1].spec.charts[0].queries[0].params.floatVal`, - ValType: "float", - Value: 33.3, - DefaultValue: 37.2, - }, - } - assert.Equal(t, expectedRefs, actual.EnvReferences[:len(expectedRefs)]) - - // check necessary since json can flip int to float type and fail assertions - // in a flakey manner - expectedIntRef := pkger.SummaryReference{ - Field: "spec.charts[0].queries[0].params.minVal", - EnvRefKey: `dashboards[dash-1].spec.charts[0].queries[0].params.minVal`, - ValType: "integer", - Value: int64(3), - DefaultValue: int64(10), - } - actualIntRef := actual.EnvReferences[len(expectedRefs)] - if f, ok := actualIntRef.DefaultValue.(float64); ok { - actualIntRef.DefaultValue = int64(f) - } - if f, ok := actualIntRef.Value.(float64); ok { - actualIntRef.Value = int64(f) - } - assert.Equal(t, expectedIntRef, actualIntRef) - - expectedRefs = []pkger.SummaryReference{ - { - Field: "spec.charts[0].queries[0].params.name", - EnvRefKey: `dashboards[dash-1].spec.charts[0].queries[0].params.name`, - ValType: "string", - Value: "min", - DefaultValue: "max", - }, - { - Field: "spec.charts[0].queries[0].params.start", - EnvRefKey: `dashboards[dash-1].spec.charts[0].queries[0].params.start`, - ValType: "duration", - Value: "-5d", - DefaultValue: "-24h0m0s", - }, - { - Field: "spec.charts[0].queries[0].params.stop", - EnvRefKey: `dashboards[dash-1].spec.charts[0].queries[0].params.stop`, - ValType: "time", - DefaultValue: "now()", - }, - } - assert.Equal(t, expectedRefs, actual.EnvReferences[3:]) - }) - }) - - t.Run("task", func(t *testing.T) { - taskName := "task-1" - newDashTmpl := func(t *testing.T) *pkger.Template { - t.Helper() - - tmplStr := ` -apiVersion: influxdata.com/v2alpha1 -kind: Task -metadata: - name: %s -spec: - every: 10m - query: | - option params = { - bucket: "foo", - start: -1d, - stop: now(), - name: "max", - floatVal: 1.0, - minVal: 10 - } - - from(bucket: params.bucket) - |> range(start: params.start, stop: params.stop) - |> filter(fn: (r) => r._measurement == "processes") - |> filter(fn: (r) => r.floater == params.floatVal) - |> filter(fn: (r) => r._value > params.minVal) - |> aggregateWindow(every: 1m, fn: max) - |> yield(name: params.name) - - params: - - key: bucket - default: "bar" - type: string - - key: start - type: duration - - key: stop - type: time - - key: floatVal - default: 37.2 - type: float - - key: minVal - type: int - - key: name # infer type` - tmplStr = fmt.Sprintf(tmplStr, taskName) - - template, err := pkger.Parse(pkger.EncodingYAML, pkger.FromString(tmplStr)) - require.NoError(t, err) - return template - } - - isExpectedQuery := func(t *testing.T, actual pkger.SummaryTask, expectedParams string) { - t.Helper() - - expectedQuery := expectedParams + ` - -from(bucket: params.bucket) - |> range(start: params.start, stop: params.stop) - |> filter(fn: (r) => r._measurement == "processes") - |> filter(fn: (r) => r.floater == params.floatVal) - |> filter(fn: (r) => r._value > params.minVal) - |> aggregateWindow(every: 1m, fn: max) - |> yield(name: params.name) -` - - assert.Equal(t, expectedQuery, actual.Query) - } - - envKey := func(paramKey string) string { - return fmt.Sprintf( - "tasks[%s].spec.params.%s", - taskName, - paramKey, - ) - } - - t.Run("using default values", func(t *testing.T) { - stack, cleanup := newStackFn(t, pkger.StackCreate{}) - defer cleanup() - - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, - pkger.ApplyWithStackID(stack.ID), - pkger.ApplyWithTemplate(newDashTmpl(t)), - ) - require.NoError(t, err) - - require.Len(t, impact.Summary.Tasks, 1) - - actual := impact.Summary.Tasks[0] - - expectedParams := `option params = { - bucket: "bar", - start: -24h0m0s, - stop: now(), - name: "max", - floatVal: 37.2, - minVal: 10, -}` - isExpectedQuery(t, actual, expectedParams) - - require.Len(t, actual.EnvReferences, 6) - - expectedRefs := []pkger.SummaryReference{ - { - Field: "spec.params.bucket", - EnvRefKey: `tasks[task-1].spec.params.bucket`, - ValType: "string", - DefaultValue: "bar", - }, - { - Field: "spec.params.floatVal", - EnvRefKey: `tasks[task-1].spec.params.floatVal`, - ValType: "float", - DefaultValue: 37.2, - }, - } - assert.Equal(t, expectedRefs, actual.EnvReferences[:len(expectedRefs)]) - - // check necessary since json can flip int to float type and fail assertions - // in a flakey manner - expectedIntRef := pkger.SummaryReference{ - Field: "spec.params.minVal", - EnvRefKey: `tasks[task-1].spec.params.minVal`, - ValType: "integer", - DefaultValue: int64(10), - } - actualIntRef := actual.EnvReferences[len(expectedRefs)] - if f, ok := actualIntRef.DefaultValue.(float64); ok { - actualIntRef.DefaultValue = int64(f) - } - assert.Equal(t, expectedIntRef, actualIntRef) - - expectedRefs = []pkger.SummaryReference{ - { - Field: "spec.params.name", - EnvRefKey: `tasks[task-1].spec.params.name`, - ValType: "string", - DefaultValue: "max", - }, - { - Field: "spec.params.start", - EnvRefKey: `tasks[task-1].spec.params.start`, - ValType: "duration", - DefaultValue: "-24h0m0s", - }, - { - Field: "spec.params.stop", - EnvRefKey: `tasks[task-1].spec.params.stop`, - ValType: "time", - DefaultValue: "now()", - }, - } - assert.Equal(t, expectedRefs, actual.EnvReferences[3:]) - }) - - t.Run("with user provided values", func(t *testing.T) { - stack, cleanup := newStackFn(t, pkger.StackCreate{}) - defer cleanup() - - impact, err := svc.Apply(ctx, l.Org.ID, l.User.ID, - pkger.ApplyWithStackID(stack.ID), - pkger.ApplyWithTemplate(newDashTmpl(t)), - pkger.ApplyWithEnvRefs(map[string]interface{}{ - envKey("bucket"): "foobar", - envKey("name"): "min", - envKey("start"): "-5d", - envKey("floatVal"): 33.3, - envKey("minVal"): 3, - }), - ) - require.NoError(t, err) - - require.Len(t, impact.Summary.Tasks, 1) - - actual := impact.Summary.Tasks[0] - - expectedParams := `option params = { - bucket: "foobar", - start: -5d, - stop: now(), - name: "min", - floatVal: 33.3, - minVal: 3, -}` - isExpectedQuery(t, actual, expectedParams) - - require.Len(t, actual.EnvReferences, 6) - - expectedRefs := []pkger.SummaryReference{ - { - Field: "spec.params.bucket", - EnvRefKey: `tasks[task-1].spec.params.bucket`, - ValType: "string", - Value: "foobar", - DefaultValue: "bar", - }, - { - Field: "spec.params.floatVal", - EnvRefKey: `tasks[task-1].spec.params.floatVal`, - ValType: "float", - Value: 33.3, - DefaultValue: 37.2, - }, - } - assert.Equal(t, expectedRefs, actual.EnvReferences[:len(expectedRefs)]) - - // check necessary since json can flip int to float type and fail assertions - // in a flakey manner - expectedIntRef := pkger.SummaryReference{ - Field: "spec.params.minVal", - EnvRefKey: `tasks[task-1].spec.params.minVal`, - ValType: "integer", - Value: int64(3), - DefaultValue: int64(10), - } - actualIntRef := actual.EnvReferences[len(expectedRefs)] - if f, ok := actualIntRef.DefaultValue.(float64); ok { - actualIntRef.DefaultValue = int64(f) - } - if f, ok := actualIntRef.Value.(float64); ok { - actualIntRef.Value = int64(f) - } - assert.Equal(t, expectedIntRef, actualIntRef) - - expectedRefs = []pkger.SummaryReference{ - { - Field: "spec.params.name", - EnvRefKey: `tasks[task-1].spec.params.name`, - ValType: "string", - Value: "min", - DefaultValue: "max", - }, - { - Field: "spec.params.start", - EnvRefKey: `tasks[task-1].spec.params.start`, - ValType: "duration", - Value: "-5d", - DefaultValue: "-24h0m0s", - }, - { - Field: "spec.params.stop", - EnvRefKey: `tasks[task-1].spec.params.stop`, - ValType: "time", - DefaultValue: "now()", - }, - } - assert.Equal(t, expectedRefs, actual.EnvReferences[3:]) - }) - }) - }) -} - -func newCompletePkg(t *testing.T) *pkger.Template { - t.Helper() - - pkg, err := pkger.Parse(pkger.EncodingYAML, pkger.FromString(pkgYMLStr)) - require.NoError(t, err) - return pkg -} - -const telegrafCfg = `[agent] - interval = "10s" - metric_batch_size = 1000 - metric_buffer_limit = 10000 - collection_jitter = "0s" - flush_interval = "10s" -[[outputs.influxdb_v2]] - urls = ["http://localhost:8086"] - token = "$INFLUX_TOKEN" - organization = "rg" - bucket = "rucket_3" -[[inputs.cpu]] - percpu = true -` - -var pkgYMLStr = fmt.Sprintf(` -apiVersion: %[1]s -kind: Label -metadata: - name: label-1 ---- -apiVersion: %[1]s -kind: Label -metadata: - name: the-2nd-label -spec: - name: the 2nd label ---- -apiVersion: %[1]s -kind: Bucket -metadata: - name: rucket-1 -spec: - name: rucketeer - associations: - - kind: Label - name: label-1 - - kind: Label - name: the-2nd-label ---- -apiVersion: %[1]s -kind: Dashboard -metadata: - name: dash-uuid -spec: - name: dash_1 - description: desc1 - charts: - - kind: Single_Stat - name: single stat - suffix: days - width: 6 - height: 3 - shade: true - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "system") |> filter(fn: (r) => r._field == "uptime") |> last() |> map(fn: (r) => ({r with _value: r._value / 86400})) |> yield(name: "last") - colors: - - name: laser - type: text - hex: "#8F8AF4" - associations: - - kind: Label - name: label-1 - - kind: Label - name: the-2nd-label ---- -apiVersion: %[1]s -kind: Variable -metadata: - name: var-query-1 -spec: - name: query var - description: var_query_1 desc - type: query - language: flux - query: | - buckets() |> filter(fn: (r) => r.name !~ /^_/) |> rename(columns: {name: "_value"}) |> keep(columns: ["_value"]) - selected: - - rucketeer - associations: - - kind: Label - name: label-1 ---- -apiVersion: %[1]s -kind: Telegraf -metadata: - name: first-tele-config -spec: - name: first tele config - description: desc - associations: - - kind: Label - name: label-1 - config: %+q ---- -apiVersion: %[1]s -kind: NotificationEndpointHTTP -metadata: - name: http-none-auth-notification-endpoint # on export of resource created from this, will not be same name as this -spec: - name: no auth endpoint - type: none - description: http none auth desc - method: GET - url: https://www.example.com/endpoint/noneauth - status: inactive - associations: - - kind: Label - name: label-1 ---- -apiVersion: %[1]s -kind: CheckThreshold -metadata: - name: check-0 -spec: - name: check 0 name - every: 1m - query: > - from(bucket: "rucket_1") - |> range(start: v.timeRangeStart, stop: v.timeRangeStop) - |> filter(fn: (r) => r._measurement == "cpu") - |> filter(fn: (r) => r._field == "usage_idle") - |> aggregateWindow(every: 1m, fn: mean) - |> yield(name: "mean") - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - tags: - - key: tag_1 - value: val_1 - thresholds: - - type: inside_range - level: INfO - min: 30.0 - max: 45.0 - - type: outside_range - level: WARN - min: 60.0 - max: 70.0 - - type: greater - level: CRIT - val: 80 - - type: lesser - level: OK - val: 30 - associations: - - kind: Label - name: label-1 ---- -apiVersion: %[1]s -kind: CheckDeadman -metadata: - name: check-1 -spec: - description: desc_1 - every: 5m - level: cRiT - offset: 10s - query: > - from(bucket: "rucket_1") - |> range(start: v.timeRangeStart, stop: v.timeRangeStop) - |> filter(fn: (r) => r._measurement == "cpu") - |> filter(fn: (r) => r._field == "usage_idle") - |> aggregateWindow(every: 1m, fn: mean) - |> yield(name: "mean") - reportZero: true - staleTime: 10m - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - timeSince: 90s - associations: - - kind: Label - name: label-1 ---- -apiVersion: %[1]s -kind: NotificationRule -metadata: - name: rule-uuid -spec: - name: rule_0 - description: desc_0 - endpointName: http-none-auth-notification-endpoint - every: 10m - offset: 30s - messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }" - status: active - statusRules: - - currentLevel: WARN - - currentLevel: CRIT - previousLevel: OK - tagRules: - - key: k1 - value: v2 - operator: eQuAl - - key: k1 - value: v1 - operator: eQuAl - associations: - - kind: Label - name: label-1 ---- -apiVersion: %[1]s -kind: Task -metadata: - name: task-uuid -spec: - name: task_1 - description: desc_1 - cron: 15 * * * * - query: > - from(bucket: "rucket_1") - |> yield() - associations: - - kind: Label - name: label-1 -`, pkger.APIVersion, telegrafCfg) - -var updatePkgYMLStr = fmt.Sprintf(` -apiVersion: %[1]s -kind: Label -metadata: - name: label-1 -spec: - descriptin: new desc ---- -apiVersion: %[1]s -kind: Bucket -metadata: - name: rucket-1 -spec: - descriptin: new desc - associations: - - kind: Label - name: label-1 ---- -apiVersion: %[1]s -kind: Variable -metadata: - name: var-query-1 -spec: - description: new desc - type: query - language: flux - query: | - buckets() |> filter(fn: (r) => r.name !~ /^_/) |> rename(columns: {name: "_value"}) |> keep(columns: ["_value"]) - associations: - - kind: Label - name: label-1 ---- -apiVersion: %[1]s -kind: NotificationEndpointHTTP -metadata: - name: http-none-auth-notification-endpoint -spec: - name: no auth endpoint - type: none - description: new desc - method: GET - url: https://www.example.com/endpoint/noneauth - status: active ---- -apiVersion: %[1]s -kind: CheckThreshold -metadata: - name: check-0 -spec: - every: 1m - query: > - from("rucket1") |> yield() - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - thresholds: - - type: inside_range - level: INfO - min: 30.0 - max: 45.0 -`, pkger.APIVersion) - -type fakeBucketSVC struct { - influxdb.BucketService - createCallCount mock.SafeCount - createKillCount int - updateCallCount mock.SafeCount - updateKillCount int -} - -func (f *fakeBucketSVC) CreateBucket(ctx context.Context, b *influxdb.Bucket) error { - defer f.createCallCount.IncrFn()() - if f.createCallCount.Count() == f.createKillCount { - return errors.New("reached kill count") - } - return f.BucketService.CreateBucket(ctx, b) -} - -func (f *fakeBucketSVC) UpdateBucket(ctx context.Context, id platform.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { - defer f.updateCallCount.IncrFn()() - if f.updateCallCount.Count() == f.updateKillCount { - return nil, errors.New("reached kill count") - } - return f.BucketService.UpdateBucket(ctx, id, upd) -} - -type fakeLabelSVC struct { - influxdb.LabelService - createCallCount mock.SafeCount - createKillCount int - - deleteCallCount mock.SafeCount - deleteKillCount int -} - -func (f *fakeLabelSVC) CreateLabelMapping(ctx context.Context, m *influxdb.LabelMapping) error { - defer f.createCallCount.IncrFn()() - if f.createCallCount.Count() == f.createKillCount { - return errors.New("reached kill count") - } - return f.LabelService.CreateLabelMapping(ctx, m) -} - -func (f *fakeLabelSVC) DeleteLabelMapping(ctx context.Context, m *influxdb.LabelMapping) error { - defer f.deleteCallCount.IncrFn()() - if f.deleteCallCount.Count() == f.deleteKillCount { - return errors.New("reached kill count") - } - return f.LabelService.DeleteLabelMapping(ctx, m) -} - -type fakeRuleStore struct { - influxdb.NotificationRuleStore - createCallCount mock.SafeCount - createKillCount int -} - -func (f *fakeRuleStore) CreateNotificationRule(ctx context.Context, nr influxdb.NotificationRuleCreate, userID platform.ID) error { - defer f.createCallCount.IncrFn()() - if f.createCallCount.Count() == f.createKillCount { - return errors.New("reached kill count") - } - return f.NotificationRuleStore.CreateNotificationRule(ctx, nr, userID) -} - -func assertErrorCode(t *testing.T, expected string, err error) { - t.Helper() - assert.Error(t, err) - assert.Equal(t, expected, errors2.ErrorCode(err)) -} - -type resourceChecker struct { - tl *TestLauncher -} - -func newResourceChecker(tl *TestLauncher) resourceChecker { - return resourceChecker{tl: tl} -} - -type ( - getResourceOpt struct { - id platform.ID - name string - } - - getResourceOptFn func() getResourceOpt -) - -func byID(id platform.ID) getResourceOptFn { - return func() getResourceOpt { - return getResourceOpt{id: id} - } -} - -func byName(name string) getResourceOptFn { - return func() getResourceOpt { - return getResourceOpt{name: name} - } -} - -func bySafeID(id pkger.SafeID) getResourceOptFn { - return func() getResourceOpt { - return getResourceOpt{id: platform.ID(id)} - } -} - -func (r resourceChecker) getBucket(t *testing.T, getOpt getResourceOptFn) (influxdb.Bucket, error) { - t.Helper() - - bktSVC := r.tl.BucketService(t) - - var ( - bkt *influxdb.Bucket - err error - ) - switch opt := getOpt(); { - case opt.name != "": - bkt, err = bktSVC.FindBucketByName(ctx, r.tl.Org.ID, opt.name) - case opt.id != 0: - bkt, err = bktSVC.FindBucketByID(ctx, opt.id) - default: - require.Fail(t, "did not provide any get option") - } - if err != nil { - return influxdb.Bucket{}, err - } - - return *bkt, nil -} - -func (r resourceChecker) mustGetBucket(t *testing.T, getOpt getResourceOptFn) influxdb.Bucket { - t.Helper() - - bkt, err := r.getBucket(t, getOpt) - require.NoError(t, err) - return bkt -} - -func (r resourceChecker) mustDeleteBucket(t *testing.T, id platform.ID) { - t.Helper() - require.NoError(t, r.tl.BucketService(t).DeleteBucket(ctx, id)) -} - -func (r resourceChecker) getCheck(t *testing.T, getOpt getResourceOptFn) (influxdb.Check, error) { - t.Helper() - - checkSVC := r.tl.CheckService() - - var ( - ch influxdb.Check - err error - ) - switch opt := getOpt(); { - case opt.name != "": - ch, err = checkSVC.FindCheck(ctx, influxdb.CheckFilter{ - Name: &opt.name, - OrgID: &r.tl.Org.ID, - }) - case opt.id != 0: - ch, err = checkSVC.FindCheckByID(ctx, opt.id) - default: - require.Fail(t, "did not provide any get option") - } - - return ch, err -} - -func (r resourceChecker) mustGetCheck(t *testing.T, getOpt getResourceOptFn) influxdb.Check { - t.Helper() - - c, err := r.getCheck(t, getOpt) - require.NoError(t, err) - return c -} - -func (r resourceChecker) mustDeleteCheck(t *testing.T, id platform.ID) { - t.Helper() - - require.NoError(t, r.tl.CheckService().DeleteCheck(ctx, id)) -} - -func (r resourceChecker) getDashboard(t *testing.T, getOpt getResourceOptFn) (influxdb.Dashboard, error) { - t.Helper() - - dashSVC := r.tl.DashboardService(t) - - var ( - dashboard *influxdb.Dashboard - err error - ) - opt := getOpt() - switch { - case opt.name != "": - dashs, _, err := dashSVC.FindDashboards(ctx, influxdb.DashboardFilter{}, influxdb.DefaultDashboardFindOptions) - if err != nil { - return influxdb.Dashboard{}, err - } - for _, d := range dashs { - if d.Name == opt.name { - dashboard = d - break - } - } - case opt.id != 0: - dashboard, err = dashSVC.FindDashboardByID(ctx, opt.id) - default: - require.Fail(t, "did not provide any get option") - } - if err != nil { - return influxdb.Dashboard{}, err - } - if dashboard == nil { - return influxdb.Dashboard{}, fmt.Errorf("failed to find desired dashboard with opts: %+v", opt) - } - - return *dashboard, nil -} - -func (r resourceChecker) mustGetDashboard(t *testing.T, getOpt getResourceOptFn) influxdb.Dashboard { - t.Helper() - - dash, err := r.getDashboard(t, getOpt) - require.NoError(t, err) - return dash -} - -func (r resourceChecker) mustDeleteDashboard(t *testing.T, id platform.ID) { - t.Helper() - - require.NoError(t, r.tl.DashboardService(t).DeleteDashboard(ctx, id)) -} - -func (r resourceChecker) getEndpoint(t *testing.T, getOpt getResourceOptFn) (influxdb.NotificationEndpoint, error) { - t.Helper() - - endpointSVC := r.tl.NotificationEndpointService(t) - - var ( - e influxdb.NotificationEndpoint - err error - ) - switch opt := getOpt(); { - case opt.name != "": - var endpoints []influxdb.NotificationEndpoint - endpoints, _, err = endpointSVC.FindNotificationEndpoints(ctx, influxdb.NotificationEndpointFilter{ - OrgID: &r.tl.Org.ID, - }) - for _, existing := range endpoints { - if existing.GetName() == opt.name { - e = existing - break - } - } - case opt.id != 0: - e, err = endpointSVC.FindNotificationEndpointByID(ctx, opt.id) - default: - require.Fail(t, "did not provide any get option") - } - - if e == nil { - return nil, errors.New("did not find endpoint") - } - - return e, err -} - -func (r resourceChecker) mustGetEndpoint(t *testing.T, getOpt getResourceOptFn) influxdb.NotificationEndpoint { - t.Helper() - - e, err := r.getEndpoint(t, getOpt) - require.NoError(t, err) - return e -} - -func (r resourceChecker) mustDeleteEndpoint(t *testing.T, id platform.ID) { - t.Helper() - - _, _, err := r.tl.NotificationEndpointService(t).DeleteNotificationEndpoint(ctx, id) - require.NoError(t, err) -} - -func (r resourceChecker) getLabel(t *testing.T, getOpt getResourceOptFn) (influxdb.Label, error) { - t.Helper() - - labelSVC := r.tl.LabelService(t) - - var ( - label *influxdb.Label - err error - ) - switch opt := getOpt(); { - case opt.name != "": - labels, err := labelSVC.FindLabels( - ctx, - influxdb.LabelFilter{ - Name: opt.name, - OrgID: &r.tl.Org.ID, - }, - influxdb.FindOptions{Limit: 1}, - ) - if err != nil { - return influxdb.Label{}, err - } - if len(labels) == 0 { - return influxdb.Label{}, errors.New("did not find label: " + opt.name) - } - label = labels[0] - case opt.id != 0: - label, err = labelSVC.FindLabelByID(ctx, opt.id) - default: - require.Fail(t, "did not provide any get option") - } - if err != nil { - return influxdb.Label{}, err - } - - return *label, nil -} - -func (r resourceChecker) mustGetLabel(t *testing.T, getOpt getResourceOptFn) influxdb.Label { - t.Helper() - - l, err := r.getLabel(t, getOpt) - require.NoError(t, err) - return l -} - -func (r resourceChecker) mustDeleteLabel(t *testing.T, id platform.ID) { - t.Helper() - require.NoError(t, r.tl.LabelService(t).DeleteLabel(ctx, id)) -} - -func (r resourceChecker) getRule(t *testing.T, getOpt getResourceOptFn) (influxdb.NotificationRule, error) { - t.Helper() - - ruleSVC := r.tl.NotificationRuleService(t) - - var ( - rule influxdb.NotificationRule - err error - ) - switch opt := getOpt(); { - case opt.name != "": - var rules []influxdb.NotificationRule - rules, _, err = ruleSVC.FindNotificationRules(ctx, influxdb.NotificationRuleFilter{ - OrgID: &r.tl.Org.ID, - }) - for _, existing := range rules { - if existing.GetName() == opt.name { - rule = existing - break - } - } - case opt.id != 0: - rule, err = ruleSVC.FindNotificationRuleByID(ctx, opt.id) - default: - require.Fail(t, "did not provide any get option") - } - - if rule == nil { - return nil, errors.New("did not find rule") - } - - return rule, err -} - -func (r resourceChecker) mustGetRule(t *testing.T, getOpt getResourceOptFn) influxdb.NotificationRule { - t.Helper() - - rule, err := r.getRule(t, getOpt) - require.NoError(t, err) - return rule -} - -func (r resourceChecker) mustDeleteRule(t *testing.T, id platform.ID) { - t.Helper() - - require.NoError(t, r.tl.NotificationRuleService(t).DeleteNotificationRule(ctx, id)) -} - -func (r resourceChecker) getTask(t *testing.T, getOpt getResourceOptFn) (taskmodel.Task, error) { - t.Helper() - - taskSVC := r.tl.TaskService(t) - - var ( - task *taskmodel.Task - err error - ) - switch opt := getOpt(); { - case opt.name != "": - tasks, _, err := taskSVC.FindTasks(ctx, taskmodel.TaskFilter{ - Name: &opt.name, - OrganizationID: &r.tl.Org.ID, - }) - if err != nil { - return taskmodel.Task{}, err - } - for _, tt := range tasks { - if tt.Name == opt.name { - task = tasks[0] - break - } - } - case opt.id != 0: - task, err = taskSVC.FindTaskByID(ctx, opt.id) - default: - require.Fail(t, "did not provide a valid get option") - } - if task == nil { - return taskmodel.Task{}, errors.New("did not find expected task by name") - } - - return *task, err -} - -func (r resourceChecker) mustGetTask(t *testing.T, getOpt getResourceOptFn) taskmodel.Task { - t.Helper() - - task, err := r.getTask(t, getOpt) - require.NoError(t, err) - return task -} - -func (r resourceChecker) mustDeleteTask(t *testing.T, id platform.ID) { - t.Helper() - - require.NoError(t, r.tl.TaskService(t).DeleteTask(ctx, id)) -} - -func (r resourceChecker) getTelegrafConfig(t *testing.T, getOpt getResourceOptFn) (influxdb.TelegrafConfig, error) { - t.Helper() - - teleSVC := r.tl.TelegrafService(t) - - var ( - config *influxdb.TelegrafConfig - err error - ) - switch opt := getOpt(); { - case opt.name != "": - teles, _, _ := teleSVC.FindTelegrafConfigs(ctx, influxdb.TelegrafConfigFilter{ - OrgID: &r.tl.Org.ID, - }) - for _, tt := range teles { - if opt.name != "" && tt.Name == opt.name { - config = teles[0] - break - } - } - case opt.id != 0: - config, err = teleSVC.FindTelegrafConfigByID(ctx, opt.id) - default: - require.Fail(t, "did not provide a valid get option") - } - if config == nil { - return influxdb.TelegrafConfig{}, errors.New("did not find expected telegraf by name") - } - - return *config, err -} - -func (r resourceChecker) mustGetTelegrafConfig(t *testing.T, getOpt getResourceOptFn) influxdb.TelegrafConfig { - t.Helper() - - tele, err := r.getTelegrafConfig(t, getOpt) - require.NoError(t, err) - return tele -} - -func (r resourceChecker) mustDeleteTelegrafConfig(t *testing.T, id platform.ID) { - t.Helper() - - require.NoError(t, r.tl.TelegrafService(t).DeleteTelegrafConfig(ctx, id)) -} - -func (r resourceChecker) getVariable(t *testing.T, getOpt getResourceOptFn) (influxdb.Variable, error) { - t.Helper() - - varSVC := r.tl.VariableService(t) - - var ( - variable *influxdb.Variable - err error - ) - switch opt := getOpt(); { - case opt.name != "": - vars, err := varSVC.FindVariables(ctx, influxdb.VariableFilter{ - OrganizationID: &r.tl.Org.ID, - }) - if err != nil { - return influxdb.Variable{}, err - } - for i := range vars { - v := vars[i] - if v.Name == opt.name { - variable = v - break - } - } - if variable == nil { - return influxdb.Variable{}, errors.New("did not find variable: " + opt.name) - } - case opt.id != 0: - variable, err = varSVC.FindVariableByID(ctx, opt.id) - default: - require.Fail(t, "did not provide any get option") - } - if err != nil { - return influxdb.Variable{}, err - } - return *variable, nil -} - -func (r resourceChecker) mustGetVariable(t *testing.T, getOpt getResourceOptFn) influxdb.Variable { - t.Helper() - - l, err := r.getVariable(t, getOpt) - require.NoError(t, err) - return l -} - -func (r resourceChecker) mustDeleteVariable(t *testing.T, id platform.ID) { - t.Helper() - - err := r.tl.VariableService(t).DeleteVariable(ctx, id) - require.NoError(t, err) -} - -func sortChecks(checks []pkger.SummaryCheck) { - sort.Slice(checks, func(i, j int) bool { - return checks[i].Check.GetName() < checks[j].Check.GetName() - }) -} - -func sortLabels(labels []pkger.SummaryLabel) { - sort.Slice(labels, func(i, j int) bool { - return labels[i].Name < labels[j].Name - }) -} - -func strPtr(s string) *string { - return &s -} diff --git a/cmd/influxd/launcher/print_config.go b/cmd/influxd/launcher/print_config.go deleted file mode 100644 index 1401a55e9a2..00000000000 --- a/cmd/influxd/launcher/print_config.go +++ /dev/null @@ -1,91 +0,0 @@ -package launcher - -import ( - "fmt" - "io" - - "github.com/influxdata/influxdb/v2/kit/cli" - "github.com/spf13/cobra" - "github.com/spf13/viper" - "gopkg.in/yaml.v3" -) - -func NewInfluxdPrintConfigCommand(v *viper.Viper, influxdOpts []cli.Opt) (*cobra.Command, error) { - - var keyToPrint string - printOpts := make([]cli.Opt, len(influxdOpts)+1) - - printOpts[0] = cli.Opt{ - DestP: &keyToPrint, - Flag: "key-name", - Desc: "config key name; if set, only the resolved value of that key will be printed", - } - for i, opt := range influxdOpts { - printOpts[i+1] = cli.Opt{ - DestP: opt.DestP, - Flag: opt.Flag, - Hidden: true, - } - } - - cmd := &cobra.Command{ - Use: "print-config", - Short: "Print the full influxd config resolved from the current environment", - Deprecated: "use the influx-cli command server-config to display the configuration values from the running server", - Long: ` -Print config (in YAML) that the influxd server would use if run with the current flags/env vars/config file. - -The order of precedence for config options are as follows (1 highest, 3 lowest): - 1. flags - 2. env vars - 3. config file - -A config file can be provided via the INFLUXD_CONFIG_PATH env var. If a file is -not provided via an env var, influxd will look in the current directory for a -config.{json|toml|yaml|yml} file. If one does not exist, then it will continue unchanged. - -See 'influxd -h' for the full list of config options supported by the server. -`, - RunE: func(cmd *cobra.Command, _ []string) error { - var err error - if keyToPrint == "" { - err = printAllConfigRunE(printOpts, cmd.OutOrStdout()) - } else { - err = printOneConfigRunE(printOpts, keyToPrint, cmd.OutOrStdout()) - } - - if err != nil { - return fmt.Errorf("failed to print config: %w", err) - } - - return nil - }, - Args: cobra.NoArgs, - } - if err := cli.BindOptions(v, cmd, printOpts); err != nil { - return nil, err - } - - return cmd, nil -} - -func printAllConfigRunE(configOpts []cli.Opt, out io.Writer) error { - configMap := make(map[string]interface{}, len(configOpts)) - - for _, o := range configOpts { - configMap[o.Flag] = o.DestP - } - - return yaml.NewEncoder(out).Encode(configMap) -} - -func printOneConfigRunE(configOpts []cli.Opt, key string, out io.Writer) error { - for _, o := range configOpts { - if o.Flag != key { - continue - } - return yaml.NewEncoder(out).Encode(o.DestP) - } - - return fmt.Errorf("key %q not found in config", key) -} diff --git a/cmd/influxd/launcher/print_config_test.go b/cmd/influxd/launcher/print_config_test.go deleted file mode 100644 index 81c51875c14..00000000000 --- a/cmd/influxd/launcher/print_config_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package launcher - -import ( - "bytes" - "testing" - - "github.com/influxdata/influxdb/v2/kit/cli" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zapcore" -) - -// Pretend we've already used cobra/viper to write -// values into these vars. -var stringVar = "string-value" -var intVar = 12344 -var boolVar = false -var floatVar = 987.654 -var sliceVar = []string{"hello", "world"} -var mapVar = map[string]string{"foo": "bar", "baz": "qux"} -var levelVar = zapcore.InfoLevel -var idVar, _ = platform.IDFromString("020f755c3c082000") - -var opts = []cli.Opt{ - { - DestP: &stringVar, - Flag: "string-var", - }, - { - DestP: &intVar, - Flag: "int-var", - }, - { - DestP: &boolVar, - Flag: "bool-var", - }, - { - DestP: &floatVar, - Flag: "float-var", - }, - { - DestP: &sliceVar, - Flag: "slice-var", - }, - { - DestP: &mapVar, - Flag: "map-var", - }, - { - DestP: &levelVar, - Flag: "level-var", - }, - { - DestP: &idVar, - Flag: "id-var", - }, -} - -func Test_printAllConfig(t *testing.T) { - var out bytes.Buffer - require.NoError(t, printAllConfigRunE(opts, &out)) - - expected := `bool-var: false -float-var: 987.654 -id-var: 020f755c3c082000 -int-var: 12344 -level-var: info -map-var: - baz: qux - foo: bar -slice-var: - - hello - - world -string-var: string-value -` - - require.Equal(t, expected, out.String()) -} - -func Test_printOneConfig(t *testing.T) { - testCases := []struct { - key string - expected string - }{ - { - key: "bool-var", - expected: "false", - }, - { - key: "float-var", - expected: "987.654", - }, - { - key: "id-var", - expected: "020f755c3c082000", - }, - { - key: "level-var", - expected: "info", - }, - { - key: "map-var", - expected: `baz: qux -foo: bar`, - }, - { - key: "slice-var", - expected: `- hello -- world`, - }, - { - key: "string-var", - expected: "string-value", - }, - } - - for _, tc := range testCases { - t.Run(tc.key, func(t *testing.T) { - var out bytes.Buffer - require.NoError(t, printOneConfigRunE(opts, tc.key, &out)) - require.Equal(t, tc.expected+"\n", out.String()) - }) - } - - t.Run("bad-key", func(t *testing.T) { - var out bytes.Buffer - require.Error(t, printOneConfigRunE(opts, "bad-key", &out)) - require.Empty(t, out.String()) - }) -} diff --git a/cmd/influxd/launcher/query_test.go b/cmd/influxd/launcher/query_test.go deleted file mode 100644 index fabdbe81f3d..00000000000 --- a/cmd/influxd/launcher/query_test.go +++ /dev/null @@ -1,2663 +0,0 @@ -package launcher_test - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "html/template" - "io" - "math/rand" - nethttp "net/http" - "strings" - "sync" - "testing" - "time" - - context2 "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/mock" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/csv" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/execute/executetest" - "github.com/influxdata/flux/execute/table" - "github.com/influxdata/flux/lang" - "github.com/influxdata/flux/memory" - "github.com/influxdata/flux/runtime" - "github.com/influxdata/flux/values" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/cmd/influxd/launcher" - phttp "github.com/influxdata/influxdb/v2/http" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/prom" - "github.com/influxdata/influxdb/v2/query" - "go.uber.org/zap" -) - -func TestLauncher_Write_Query_FieldKey(t *testing.T) { - be := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer be.ShutdownOrFail(t, ctx) - - resp, err := nethttp.DefaultClient.Do( - be.MustNewHTTPRequest( - "POST", - fmt.Sprintf("/api/v2/write?org=%s&bucket=%s", be.Org.ID, be.Bucket.ID), - `cpu,region=west,server=a v0=1.2 -cpu,region=west,server=b v0=33.2 -cpu,region=east,server=b,area=z v1=100.0 -disk,regions=north,server=b v1=101.2 -mem,server=b value=45.2`)) - if err != nil { - t.Fatal(err) - } - defer func() { - if err := resp.Body.Close(); err != nil { - t.Error(err) - } - }() - if resp.StatusCode != 204 { - t.Fatal("failed call to write points") - } - - rawQ := fmt.Sprintf(`from(bucket:"%s") - |> range(start:-1m) - |> filter(fn: (r) => r._measurement == "cpu" and (r._field == "v1" or r._field == "v0")) - |> group(columns:["_time", "_value"], mode:"except") - `, be.Bucket.Name) - - // Expected keys: - // - // _measurement=cpu,region=west,server=a,_field=v0 - // _measurement=cpu,region=west,server=b,_field=v0 - // _measurement=cpu,region=east,server=b,area=z,_field=v1 - // - results := be.MustExecuteQuery(rawQ) - defer results.Done() - results.First(t).HasTablesWithCols([]int{4, 4, 5}) -} - -func mustDoRequest(t *testing.T, req *nethttp.Request, expectStatus int) []byte { - resp, err := nethttp.DefaultClient.Do(req) - require.NoError(t, err) - defer func() { - require.NoError(t, resp.Body.Close()) - }() - - body, err := io.ReadAll(resp.Body) - require.NoError(t, err) - require.Equal(t, expectStatus, resp.StatusCode, "body is: %v", string(body)) - return body -} - -// This test initialises a default launcher writes some data, -// and checks that the queried results contain the expected number of tables -// and expected number of columns. -func TestLauncher_WriteV2_Query(t *testing.T) { - be := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer be.ShutdownOrFail(t, ctx) - - now := time.Now().UTC() - - // The default gateway instance inserts some values directly such that ID lookups seem to break, - // so go the roundabout way to insert things correctly. - req := be.MustNewHTTPRequest( - "POST", - fmt.Sprintf("/api/v2/write?org=%s&bucket=%s", be.Org.ID, be.Bucket.ID), - fmt.Sprintf("ctr n=1i %d", now.UnixNano()), - ) - phttp.SetToken(be.Auth.Token, req) - - mustDoRequest(t, req, nethttp.StatusNoContent) - - res := be.MustExecuteQuery(fmt.Sprintf(`from(bucket:"%s") |> range(start:-5m)`, be.Bucket.Name)) - defer res.Done() - res.HasTableCount(t, 1) - - require.NoError(t, be.DBRPMappingService().Create(context2.SetAuthorizer(ctx, mock.NewMockAuthorizer(true, nil)), &influxdb.DBRPMapping{ - ID: 0, - Database: "mydb", - RetentionPolicy: "autogen", - Default: true, - OrganizationID: be.Org.ID, - BucketID: be.Bucket.ID, - })) - - tests := []struct { - name string - permissions string - expectStatus int - expectBody string - }{ - { - name: "only auth permission", - permissions: `[{"action": "read", "resource": {"type": "authorizations"}}]`, - expectStatus: 200, - expectBody: `{"results":[{"statement_id":0,"error":"database not found: mydb"}]}` + "\n", - }, { - name: "only write permission", - permissions: fmt.Sprintf(`[{"action": "write", "resource": {"type": "buckets", "name": %q}}]`, be.Bucket.Name), - expectStatus: 200, - expectBody: `{"results":[{"statement_id":0,"error":"database not found: mydb"}]}` + "\n", - }, { - name: "only read permission", - permissions: fmt.Sprintf(`[{"action": "read", "resource": {"type": "buckets", "name": %q}}]`, be.Bucket.Name), - expectStatus: 200, - expectBody: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"ctr","columns":["time","n"],"values":[["%v",1]]}]}]}`, now.Format("2006-01-02T15:04:05.999999999Z")) + "\n", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tokenReq := be.MustNewHTTPRequest( - "POST", "/api/v2/authorizations", - fmt.Sprintf(`{"status": "active", "orgID": %q, "permissions": %v}`, be.Org.ID.String(), tt.permissions), - ) - token := struct { - Token string `json:"token"` - }{} - require.NoError(t, json.Unmarshal(mustDoRequest(t, tokenReq, nethttp.StatusCreated), &token)) - queryReq := be.MustNewHTTPRequest("POST", "/query?db=mydb", "select * from /.*/") - phttp.SetToken(token.Token, queryReq) - queryReq.Header.Set("Content-Type", "application/vnd.influxql") - body := mustDoRequest(t, queryReq, tt.expectStatus) - assert.Equal(t, tt.expectBody, string(body)) - }) - } - -} - -func getMemoryUnused(t *testing.T, reg *prom.Registry) int64 { - t.Helper() - - ms, err := reg.Gather() - if err != nil { - t.Fatal(err) - } - for _, m := range ms { - if m.GetName() == "qc_memory_unused_bytes" { - return int64(*m.GetMetric()[0].Gauge.Value) - } - } - t.Errorf("query metric for unused memory not found") - return 0 -} - -//lint:ignore U1000 erroneously flagged by staticcheck since it is used in skipped tests -func checkMemoryUsed(t *testing.T, l *launcher.TestLauncher, concurrency, initial int) { - t.Helper() - - got := l.QueryController().GetUsedMemoryBytes() - // base memory used is equal to initial memory bytes * concurrency. - if want := int64(concurrency * initial); want != got { - t.Errorf("expected used memory %d, got %d", want, got) - } -} - -func writeBytes(t *testing.T, l *launcher.TestLauncher, tagValue string, bs int) int { - // When represented in Flux, every point is: - // 1 byte _measurement ("m") - // + 1 byte _field ("f") - // + 8 bytes _value - // + len(tagValue) bytes - // + 8 bytes _time - // + 8 bytes _start - // + 8 bytes _stop - // --------------------------- - // = 34 + len(tag) bytes - pointSize := 34 + len(tagValue) - if bs < pointSize { - bs = pointSize - } - n := bs / pointSize - if n*pointSize < bs { - n++ - } - sb := strings.Builder{} - for i := 0; i < n; i++ { - sb.WriteString(fmt.Sprintf(`m,t=%s f=%di %d`, tagValue, i*100, time.Now().UnixNano())) - sb.WriteRune('\n') - } - l.WritePointsOrFail(t, sb.String()) - return n * pointSize -} - -type data struct { - Bucket string - TagValue string - Sleep string - verbose bool -} - -type queryOption func(d *data) - -func withTagValue(tv string) queryOption { - return func(d *data) { - d.TagValue = tv - } -} - -func withSleep(s time.Duration) queryOption { - return func(d *data) { - d.Sleep = flux.ConvertDuration(s).String() - } -} - -func queryPoints(ctx context.Context, t *testing.T, l *launcher.TestLauncher, opts ...queryOption) error { - d := &data{ - Bucket: l.Bucket.Name, - } - for _, opt := range opts { - opt(d) - } - tmpls := `from(bucket: "{{ .Bucket }}") - |> range(start:-5m) - {{- if .TagValue }} - // this must be pushed down to avoid unnecessary memory allocations. - |> filter(fn: (r) => r.t == "{{ .TagValue }}") - {{- end}} - // ensure we load everything into memory. - |> sort(columns: ["_time"]) - {{- if .Sleep }} - // now that you have everything in memory, you can sleep. - |> sleep(duration: {{ .Sleep }}) - {{- end}}` - tmpl, err := template.New("test-query").Parse(tmpls) - if err != nil { - return err - } - bs := new(bytes.Buffer) - if err := tmpl.Execute(bs, d); err != nil { - return err - } - qs := bs.String() - if d.verbose { - t.Logf("query:\n%s", qs) - } - pkg, err := runtime.ParseToJSON(context.Background(), qs) - if err != nil { - t.Fatal(err) - } - req := &query.Request{ - Authorization: l.Auth, - OrganizationID: l.Org.ID, - Compiler: lang.ASTCompiler{ - AST: pkg, - }, - } - return l.QueryAndNopConsume(ctx, req) -} - -// This test: -// - initializes a default launcher and sets memory limits; -// - writes some data; -// - queries the data; -// - verifies that the query fails (or not) and that the memory was de-allocated. -func TestLauncher_QueryMemoryLimits(t *testing.T) { - tcs := []struct { - name string - setOpts launcher.OptSetter - err bool - querySizeBytes int - // max_memory - per_query_memory * concurrency - unusedMemoryBytes int - }{ - { - name: "ok - initial memory bytes, memory bytes, and max memory set", - setOpts: func(o *launcher.InfluxdOpts) { - o.ConcurrencyQuota = 1 - o.QueueSize = 1 - o.InitialMemoryBytesQuotaPerQuery = 100 - o.MaxMemoryBytes = 1048576 // 1MB - }, - querySizeBytes: 30000, - err: false, - unusedMemoryBytes: 1048476, - }, - { - name: "error - memory bytes and max memory set", - setOpts: func(o *launcher.InfluxdOpts) { - o.ConcurrencyQuota = 1 - o.QueueSize = 1 - o.MemoryBytesQuotaPerQuery = 1 - o.MaxMemoryBytes = 100 - }, - querySizeBytes: 2, - err: true, - unusedMemoryBytes: 99, - }, - { - name: "error - initial memory bytes and max memory set", - setOpts: func(o *launcher.InfluxdOpts) { - o.ConcurrencyQuota = 1 - o.QueueSize = 1 - o.InitialMemoryBytesQuotaPerQuery = 1 - o.MaxMemoryBytes = 100 - }, - querySizeBytes: 101, - err: true, - unusedMemoryBytes: 99, - }, - { - name: "error - initial memory bytes, memory bytes, and max memory set", - setOpts: func(o *launcher.InfluxdOpts) { - o.ConcurrencyQuota = 1 - o.QueueSize = 1 - o.InitialMemoryBytesQuotaPerQuery = 1 - o.MemoryBytesQuotaPerQuery = 50 - o.MaxMemoryBytes = 100 - }, - querySizeBytes: 51, - err: true, - unusedMemoryBytes: 99, - }, - } - - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t, tc.setOpts) - defer l.ShutdownOrFail(t, ctx) - - const tagValue = "t0" - writeBytes(t, l, tagValue, tc.querySizeBytes) - if err := queryPoints(context.Background(), t, l, withTagValue(tagValue)); err != nil { - if tc.err { - if !strings.Contains(err.Error(), "allocation limit reached") { - t.Errorf("query errored with unexpected error: %v", err) - } - } else { - t.Errorf("unexpected error: %v", err) - } - } else if tc.err { - t.Errorf("expected error, got successful query execution") - } - - reg := l.Registry() - got := getMemoryUnused(t, reg) - want := int64(tc.unusedMemoryBytes) - if want != got { - t.Errorf("expected unused memory %d, got %d", want, got) - } - }) - } -} - -// This test: -// - initializes a default launcher and sets memory limits; -// - writes some data; -// - launches a query that does not error; -// - launches a query that gets canceled while executing; -// - launches a query that does not error; -// - verifies after each query run the used memory. -func TestLauncher_QueryMemoryManager_ExceedMemory(t *testing.T) { - t.Skip("this test is flaky, occasionally get error: \"memory allocation limit reached\" on OK query") - - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t, func(o *launcher.InfluxdOpts) { - o.LogLevel = zap.ErrorLevel - o.ConcurrencyQuota = 1 - o.InitialMemoryBytesQuotaPerQuery = 100 - o.MemoryBytesQuotaPerQuery = 50000 - o.MaxMemoryBytes = 200000 - }) - defer l.ShutdownOrFail(t, ctx) - - // One tag does not exceed memory. - const tOK = "t0" - writeBytes(t, l, tOK, 10000) - // The other does. - const tKO = "t1" - writeBytes(t, l, tKO, 50001) - - if err := queryPoints(context.Background(), t, l, withTagValue(tOK)); err != nil { - t.Errorf("unexpected error: %v", err) - } - checkMemoryUsed(t, l, 1, 100) - if err := queryPoints(context.Background(), t, l, withTagValue(tKO)); err != nil { - if !strings.Contains(err.Error(), "allocation limit reached") { - t.Errorf("query errored with unexpected error: %v", err) - } - } else { - t.Errorf("unexpected error: %v", err) - } - checkMemoryUsed(t, l, 1, 100) - if err := queryPoints(context.Background(), t, l, withTagValue(tOK)); err != nil { - t.Errorf("unexpected error: %v", err) - } - checkMemoryUsed(t, l, 1, 100) -} - -// This test: -// - initializes a default launcher and sets memory limits; -// - writes some data; -// - launches a query that does not error; -// - launches a query and cancels its context; -// - launches a query that does not error; -// - verifies after each query run the used memory. -func TestLauncher_QueryMemoryManager_ContextCanceled(t *testing.T) { - t.Skip("this test is flaky, occasionally get error: \"memory allocation limit reached\"") - - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t, func(o *launcher.InfluxdOpts) { - o.LogLevel = zap.ErrorLevel - o.ConcurrencyQuota = 1 - o.InitialMemoryBytesQuotaPerQuery = 100 - o.MemoryBytesQuotaPerQuery = 50000 - o.MaxMemoryBytes = 200000 - }) - defer l.ShutdownOrFail(t, ctx) - - const tag = "t0" - writeBytes(t, l, tag, 10000) - - if err := queryPoints(context.Background(), t, l, withTagValue(tag)); err != nil { - t.Errorf("unexpected error: %v", err) - } - checkMemoryUsed(t, l, 1, 100) - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - if err := queryPoints(ctx, t, l, withSleep(4*time.Second)); err == nil { - t.Errorf("expected error got none") - } - checkMemoryUsed(t, l, 1, 100) - if err := queryPoints(context.Background(), t, l, withTagValue(tag)); err != nil { - t.Errorf("unexpected error: %v", err) - } - checkMemoryUsed(t, l, 1, 100) -} - -// This test: -// - initializes a default launcher and sets memory limits; -// - writes some data; -// - launches (concurrently) a mixture of -// - OK queries; -// - queries that exceed the memory limit; -// - queries that get canceled; -// - verifies the used memory. -// -// Concurrency limit is set to 1, so only 1 query runs at a time and the others are queued. -// OK queries do not overcome the soft limit, so that they can run concurrently with the ones that exceed limits. -// The aim of this test is to verify that memory tracking works properly in the controller, -// even in the case of concurrent/queued queries. -func TestLauncher_QueryMemoryManager_ConcurrentQueries(t *testing.T) { - t.Skip("this test is flaky, occasionally get error: \"dial tcp 127.0.0.1:59654: connect: connection reset by peer\"") - - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t, func(o *launcher.InfluxdOpts) { - o.LogLevel = zap.ErrorLevel - o.QueueSize = 1024 - o.ConcurrencyQuota = 1 - o.InitialMemoryBytesQuotaPerQuery = 10000 - o.MemoryBytesQuotaPerQuery = 50000 - o.MaxMemoryBytes = 200000 - }) - defer l.ShutdownOrFail(t, ctx) - - // One tag does not exceed memory. - // The size is below the soft limit, so that querying this bucket never fail. - const tSmall = "t0" - writeBytes(t, l, tSmall, 9000) - // The other exceeds memory per query. - const tBig = "t1" - writeBytes(t, l, tBig, 100000) - - const nOK = 100 - const nMemExceeded = 100 - const nContextCanceled = 100 - nTotalQueries := nOK + nMemExceeded + nContextCanceled - - // In order to increase the variety of the load, store and shuffle queries. - qs := make([]func(), 0, nTotalQueries) - // Flock of OK queries. - for i := 0; i < nOK; i++ { - qs = append(qs, func() { - if err := queryPoints(context.Background(), t, l, withTagValue(tSmall)); err != nil { - t.Errorf("unexpected error (ok-query %d): %v", i, err) - } - }) - } - // Flock of big queries. - for i := 0; i < nMemExceeded; i++ { - qs = append(qs, func() { - if err := queryPoints(context.Background(), t, l, withTagValue(tBig)); err == nil { - t.Errorf("expected error got none (high-memory-query %d)", i) - } else if !strings.Contains(err.Error(), "allocation limit reached") { - t.Errorf("got wrong error (high-memory-query %d): %v", i, err) - } - }) - } - // Flock of context canceled queries. - for i := 0; i < nContextCanceled; i++ { - qs = append(qs, func() { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - if err := queryPoints(ctx, t, l, withTagValue(tSmall), withSleep(4*time.Second)); err == nil { - t.Errorf("expected error got none (context-canceled-query %d)", i) - } else if !strings.Contains(err.Error(), "context") { - t.Errorf("got wrong error (context-canceled-query %d): %v", i, err) - } - }) - } - rand.Shuffle(len(qs), func(i, j int) { qs[i], qs[j] = qs[j], qs[i] }) - - wg := sync.WaitGroup{} - wg.Add(nTotalQueries) - for i, q := range qs { - qs[i] = func() { - defer wg.Done() - q() - } - } - for _, q := range qs { - go q() - } - wg.Wait() - checkMemoryUsed(t, l, 1, 10000) -} - -func TestLauncher_Query_LoadSecret_Success(t *testing.T) { - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - - const key, value = "mytoken", "secrettoken" - if err := l.SecretService().PutSecret(ctx, l.Org.ID, key, value); err != nil { - t.Fatalf("unexpected error: %s", err) - } - - // write one point so we can use it - l.WritePointsOrFail(t, fmt.Sprintf(`m,k=v1 f=%di %d`, 0, time.Now().UnixNano())) - - // we expect this request to succeed - req := &query.Request{ - Authorization: l.Auth, - OrganizationID: l.Org.ID, - Compiler: lang.FluxCompiler{ - Query: fmt.Sprintf(` -import "influxdata/influxdb/secrets" - -token = secrets.get(key: "mytoken") -from(bucket: "%s") - |> range(start: -5m) - |> set(key: "token", value: token) -`, l.Bucket.Name), - }, - } - if err := l.QueryAndConsume(ctx, req, func(r flux.Result) error { - return r.Tables().Do(func(tbl flux.Table) error { - return tbl.Do(func(cr flux.ColReader) error { - j := execute.ColIdx("token", cr.Cols()) - if j == -1 { - return errors.New("cannot find table column \"token\"") - } - - for i := 0; i < cr.Len(); i++ { - v := execute.ValueForRow(cr, i, j) - if got, want := v, values.NewString("secrettoken"); !got.Equal(want) { - t.Errorf("unexpected value at row %d -want/+got:\n\t- %v\n\t+ %v", i, got, want) - } - } - return nil - }) - }) - }); err != nil { - t.Fatalf("unexpected error: %s", err) - } -} - -func TestLauncher_Query_LoadSecret_Forbidden(t *testing.T) { - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - - const key, value = "mytoken", "secrettoken" - if err := l.SecretService().PutSecret(ctx, l.Org.ID, key, value); err != nil { - t.Fatalf("unexpected error: %s", err) - } - - // write one point so we can use it - l.WritePointsOrFail(t, fmt.Sprintf(`m,k=v1 f=%di %d`, 0, time.Now().UnixNano())) - - auth := &influxdb.Authorization{ - OrgID: l.Org.ID, - UserID: l.User.ID, - Permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: &l.Bucket.ID, - OrgID: &l.Org.ID, - }, - }, - }, - } - if err := l.AuthorizationService(t).CreateAuthorization(ctx, auth); err != nil { - t.Fatalf("unexpected error creating authorization: %s", err) - } - l.Auth = auth - - // we expect this request to succeed - req := &query.Request{ - Authorization: l.Auth, - OrganizationID: l.Org.ID, - Compiler: lang.FluxCompiler{ - Query: fmt.Sprintf(` -import "influxdata/influxdb/secrets" - -token = secrets.get(key: "mytoken") -from(bucket: "%s") - |> range(start: -5m) - |> set(key: "token", value: token) -`, l.Bucket.Name), - }, - } - if err := l.QueryAndNopConsume(ctx, req); err == nil { - t.Error("expected error") - } else if got, want := errors2.ErrorCode(err), errors2.EUnauthorized; got != want { - t.Errorf("unexpected error code -want/+got:\n\t- %v\n\t+ %v", got, want) - } -} - -// We need a separate test for dynamic queries because our Flux e2e tests cannot test them now. -// Indeed, tableFind would fail while initializing the data in the input bucket, because the data is not -// written, and tableFind would complain not finding the tables. -// This will change once we make side effects drive execution and remove from/to concurrency in our e2e tests. -// See https://github.com/influxdata/flux/issues/1799. -func TestLauncher_DynamicQuery(t *testing.T) { - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - - l.WritePointsOrFail(t, ` -m0,k=k0 f=0i 0 -m0,k=k0 f=1i 1 -m0,k=k0 f=2i 2 -m0,k=k0 f=3i 3 -m0,k=k0 f=4i 4 -m0,k=k1 f=5i 5 -m0,k=k1 f=6i 6 -m1,k=k0 f=5i 7 -m1,k=k2 f=0i 8 -m1,k=k0 f=6i 9 -m1,k=k1 f=6i 10 -m1,k=k0 f=7i 11 -m1,k=k0 f=5i 12 -m1,k=k1 f=8i 13 -m1,k=k2 f=9i 14 -m1,k=k3 f=5i 15`) - - // How many points do we have in stream2 with the same values of the ones in the table with key k0 in stream1? - // The only point matching the description is `m1,k=k2 f=0i 8`, because its value is in the set [0, 1, 2, 3, 4]. - dq := fmt.Sprintf(` -stream1 = from(bucket: "%s") |> range(start: 0) |> filter(fn: (r) => r._measurement == "m0" and r._field == "f") -stream2 = from(bucket: "%s") |> range(start: 0) |> filter(fn: (r) => r._measurement == "m1" and r._field == "f") -col = stream1 |> tableFind(fn: (key) => key.k == "k0") |> getColumn(column: "_value") -// Here is where dynamicity kicks in. -stream2 |> filter(fn: (r) => contains(value: r._value, set: col)) |> group() |> count() |> yield(name: "dynamic")`, - l.Bucket.Name, l.Bucket.Name) - req := &query.Request{ - Authorization: l.Auth, - OrganizationID: l.Org.ID, - Compiler: lang.FluxCompiler{Query: dq}, - } - noRes := 0 - if err := l.QueryAndConsume(ctx, req, func(r flux.Result) error { - noRes++ - if n := r.Name(); n != "dynamic" { - t.Fatalf("got unexpected result: %s", n) - } - noTables := 0 - if err := r.Tables().Do(func(tbl flux.Table) error { - return tbl.Do(func(cr flux.ColReader) error { - noTables++ - j := execute.ColIdx("_value", cr.Cols()) - if j == -1 { - return errors.New("cannot find table column \"_value\"") - } - if want := 1; cr.Len() != want { - t.Fatalf("wrong number of rows in table: -want/+got:\n\t- %d\n\t+ %d", want, cr.Len()) - } - v := execute.ValueForRow(cr, 0, j) - if got, want := v, values.NewInt(1); !got.Equal(want) { - t.Errorf("unexpected value at row %d -want/+got:\n\t- %v\n\t+ %v", 0, want, got) - } - return nil - }) - }); err != nil { - return err - } - if want := 1; noTables != want { - t.Fatalf("wrong number of tables in result: -want/+got:\n\t- %d\n\t+ %d", want, noRes) - } - return nil - }); err != nil { - t.Fatalf("unexpected error: %s", err) - } - if want := 1; noRes != want { - t.Fatalf("wrong number of results: -want/+got:\n\t- %d\n\t+ %d", want, noRes) - } -} - -func TestLauncher_Query_ExperimentalTo(t *testing.T) { - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - - // Last row of data tests nil field value - data := ` -#datatype,string,long,dateTime:RFC3339,double,string,string,string,string -#group,false,false,false,false,true,true,true,true -#default,_result,,,,,,, -,result,table,_time,_value,_field,_measurement,cpu,host -,,0,2018-05-22T19:53:26Z,1.0,usage_guest,cpu,cpu-total,host.local -,,0,2018-05-22T19:53:36Z,1.1,usage_guest,cpu,cpu-total,host.local -,,1,2018-05-22T19:53:26Z,2.0,usage_guest_nice,cpu,cpu-total,host.local -,,1,2018-05-22T19:53:36Z,2.1,usage_guest_nice,cpu,cpu-total,host.local -,,2,2018-05-22T19:53:26Z,91.7364670583823,usage_idle,cpu,cpu-total,host.local -,,2,2018-05-22T19:53:36Z,89.51118889861233,usage_idle,cpu,cpu-total,host.local -,,3,2018-05-22T19:53:26Z,3.0,usage_iowait,cpu,cpu-total,host.local -,,3,2018-05-22T19:53:36Z,,usage_iowait,cpu,cpu-total,host.local -` - pivotQuery := fmt.Sprintf(` -import "csv" -import "experimental" -import "influxdata/influxdb/v1" -csv.from(csv: "%s") - |> range(start: 2018-05-21T00:00:00Z, stop: 2018-05-23T00:00:00Z) - |> v1.fieldsAsCols() -`, data) - res := l.MustExecuteQuery(pivotQuery) - defer res.Done() - pivotedResultIterator := flux.NewSliceResultIterator(res.Results) - - toQuery := pivotQuery + fmt.Sprintf(`|> experimental.to(bucket: "%s", org: "%s") |> yield(name: "_result")`, - l.Bucket.Name, l.Org.Name) - res = l.MustExecuteQuery(toQuery) - defer res.Done() - toOutputResultIterator := flux.NewSliceResultIterator(res.Results) - - // Make sure that experimental.to() echoes its input to its output - if err := executetest.EqualResultIterators(pivotedResultIterator, toOutputResultIterator); err != nil { - t.Fatal(err) - } - - csvQuery := fmt.Sprintf(` -import "csv" -csv.from(csv: "%s") - |> filter(fn: (r) => exists r._value) -`, - data) - res = l.MustExecuteQuery(csvQuery) - defer res.Done() - csvResultIterator := flux.NewSliceResultIterator(res.Results) - - fromQuery := fmt.Sprintf(` -from(bucket: "%s") - |> range(start: 2018-05-15T00:00:00Z, stop: 2018-06-01T00:00:00Z) - |> drop(columns: ["_start", "_stop"]) -`, - l.Bucket.Name) - res = l.MustExecuteQuery(fromQuery) - defer res.Done() - fromResultIterator := flux.NewSliceResultIterator(res.Results) - - // Make sure that the data we stored matches the CSV - if err := executetest.EqualResultIterators(csvResultIterator, fromResultIterator); err != nil { - t.Fatal(err) - } -} - -type TestQueryProfiler struct { - start int64 -} - -func (s TestQueryProfiler) Name() string { - return fmt.Sprintf("query%d", s.start) -} - -func (s TestQueryProfiler) GetSortedResult(q flux.Query, alloc memory.Allocator, desc bool, sortKeys ...string) (flux.Table, error) { - return nil, nil -} - -func (s TestQueryProfiler) GetResult(q flux.Query, alloc memory.Allocator) (flux.Table, error) { - groupKey := execute.NewGroupKey( - []flux.ColMeta{ - { - Label: "_measurement", - Type: flux.TString, - }, - }, - []values.Value{ - values.NewString(fmt.Sprintf("profiler/query%d", s.start)), - }, - ) - b := execute.NewColListTableBuilder(groupKey, alloc) - colMeta := []flux.ColMeta{ - { - Label: "_measurement", - Type: flux.TString, - }, - { - Label: "TotalDuration", - Type: flux.TInt, - }, - { - Label: "CompileDuration", - Type: flux.TInt, - }, - { - Label: "QueueDuration", - Type: flux.TInt, - }, - { - Label: "PlanDuration", - Type: flux.TInt, - }, - { - Label: "RequeueDuration", - Type: flux.TInt, - }, - { - Label: "ExecuteDuration", - Type: flux.TInt, - }, - { - Label: "Concurrency", - Type: flux.TInt, - }, - { - Label: "MaxAllocated", - Type: flux.TInt, - }, - { - Label: "TotalAllocated", - Type: flux.TInt, - }, - { - Label: "RuntimeErrors", - Type: flux.TString, - }, - { - Label: "influxdb/scanned-bytes", - Type: flux.TInt, - }, - { - Label: "influxdb/scanned-values", - Type: flux.TInt, - }, - { - Label: "flux/query-plan", - Type: flux.TString, - }, - } - colData := []interface{}{ - fmt.Sprintf("profiler/query%d", s.start), - s.start, - s.start + 1, - s.start + 2, - s.start + 3, - s.start + 4, - s.start + 5, - s.start + 6, - s.start + 7, - s.start + 8, - "error1\nerror2", - s.start + 9, - s.start + 10, - "query plan", - } - for _, col := range colMeta { - if _, err := b.AddCol(col); err != nil { - return nil, err - } - } - for i := 0; i < len(colData); i++ { - if intValue, ok := colData[i].(int64); ok { - b.AppendInt(i, intValue) - } else { - b.AppendString(i, colData[i].(string)) - } - } - tbl, err := b.Table() - if err != nil { - return nil, err - } - return tbl, nil -} - -func NewTestQueryProfiler0() execute.Profiler { - return &TestQueryProfiler{start: 0} -} - -func NewTestQueryProfiler100() execute.Profiler { - return &TestQueryProfiler{start: 100} -} - -func TestFluxProfiler(t *testing.T) { - testcases := []struct { - name string - data []string - query string - want string - }{ - { - name: "range last single point start time", - data: []string{ - "m,tag=a f=1i 1", - }, - query: ` -option profiler.enabledProfilers = ["query0", "query100", "query100", "NonExistentProfiler"] -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00.000000001Z, stop: 1970-01-01T01:00:00Z) - |> last() -`, - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_value,_field,_measurement,tag -,,0,1970-01-01T00:00:00.000000001Z,1970-01-01T01:00:00Z,1970-01-01T00:00:00.000000001Z,1,f,m,a - -#datatype,string,long,string,long,long,long,long,long,long,long,long,long,string,string,long,long -#group,false,false,true,false,false,false,false,false,false,false,false,false,false,false,false,false -#default,_profiler,,,,,,,,,,,,,,, -,result,table,_measurement,TotalDuration,CompileDuration,QueueDuration,PlanDuration,RequeueDuration,ExecuteDuration,Concurrency,MaxAllocated,TotalAllocated,RuntimeErrors,flux/query-plan,influxdb/scanned-bytes,influxdb/scanned-values -,,0,profiler/query0,0,1,2,3,4,5,6,7,8,"error1 -error2","query plan",9,10 -,,1,profiler/query100,100,101,102,103,104,105,106,107,108,"error1 -error2","query plan",109,110 -`, - }, - } - execute.RegisterProfilerFactories(NewTestQueryProfiler0, NewTestQueryProfiler100) - for _, tc := range testcases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - - l.WritePointsOrFail(t, strings.Join(tc.data, "\n")) - - queryStr := "import \"profiler\"\nv = {bucket: " + "\"" + l.Bucket.Name + "\"" + "}\n" + tc.query - req := &query.Request{ - Authorization: l.Auth, - OrganizationID: l.Org.ID, - Compiler: lang.FluxCompiler{ - Query: queryStr, - }, - } - if got, err := l.FluxQueryService().Query(ctx, req); err != nil { - t.Error(err) - } else { - dec := csv.NewMultiResultDecoder(csv.ResultDecoderConfig{}) - want, err := dec.Decode(io.NopCloser(strings.NewReader(tc.want))) - if err != nil { - t.Fatal(err) - } - defer want.Release() - - if err := executetest.EqualResultIterators(want, got); err != nil { - t.Fatal(err) - } - } - }) - } -} - -func TestQueryPushDowns(t *testing.T) { - testcases := []struct { - name string - data []string - query string - op string - want string - skip string - }{ - { - name: "range last single point start time", - data: []string{ - "m,tag=a f=1i 1", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00.000000001Z, stop: 1970-01-01T01:00:00Z) - |> last() -`, - op: "readWindow(last)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_value,_field,_measurement,tag -,,0,1970-01-01T00:00:00.000000001Z,1970-01-01T01:00:00Z,1970-01-01T00:00:00.000000001Z,1,f,m,a -`, - }, - { - name: "window last", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:05Z, stop: 1970-01-01T00:00:20Z) - |> window(every: 3s) - |> last() -`, - op: "readWindow(last)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_value,_field,_measurement,k -,,0,1970-01-01T00:00:05Z,1970-01-01T00:00:06Z,1970-01-01T00:00:05Z,5,f,m0,k0 -,,1,1970-01-01T00:00:06Z,1970-01-01T00:00:09Z,1970-01-01T00:00:08Z,0,f,m0,k0 -,,2,1970-01-01T00:00:09Z,1970-01-01T00:00:12Z,1970-01-01T00:00:11Z,7,f,m0,k0 -,,3,1970-01-01T00:00:12Z,1970-01-01T00:00:15Z,1970-01-01T00:00:14Z,9,f,m0,k0 -,,4,1970-01-01T00:00:15Z,1970-01-01T00:00:18Z,1970-01-01T00:00:15Z,5,f,m0,k0 -`, - }, - { - name: "window offset last", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:05Z, stop: 1970-01-01T00:00:20Z) - |> window(every: 3s, offset: 2s) - |> last() -`, - op: "readWindow(last)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_value,_field,_measurement,k -,,0,1970-01-01T00:00:05Z,1970-01-01T00:00:08Z,1970-01-01T00:00:07Z,5,f,m0,k0 -,,1,1970-01-01T00:00:08Z,1970-01-01T00:00:11Z,1970-01-01T00:00:10Z,6,f,m0,k0 -,,2,1970-01-01T00:00:11Z,1970-01-01T00:00:14Z,1970-01-01T00:00:13Z,8,f,m0,k0 -,,3,1970-01-01T00:00:14Z,1970-01-01T00:00:17Z,1970-01-01T00:00:15Z,5,f,m0,k0 -`, - }, - { - name: "bare last", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:05Z, stop: 1970-01-01T00:00:20Z) - |> last() -`, - op: "readWindow(last)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_value,_field,_measurement,k -,,0,1970-01-01T00:00:05Z,1970-01-01T00:00:20Z,1970-01-01T00:00:15Z,5,f,m0,k0 -`, - }, - { - name: "window empty last", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1969-12-31T23:00:00Z, stop: 1970-01-01T02:00:00Z) - |> window(every: 1h, createEmpty: true) - |> last() -`, - op: "readWindow(last)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,0,1969-12-31T23:00:00Z,1970-01-01T00:00:00Z,,,f,m0,k0 -,result,table,_start,_stop,_time,_value,_field,_measurement,k - -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_value,_field,_measurement,k -,,1,1970-01-01T00:00:00Z,1970-01-01T01:00:00Z,1970-01-01T00:00:15Z,5,f,m0,k0 - -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,2,1970-01-01T01:00:00Z,1970-01-01T02:00:00Z,,,f,m0,k0 -,result,table,_start,_stop,_time,_value,_field,_measurement,k -`, - }, - { - name: "window empty offset last", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1969-12-31T23:00:00Z, stop: 1970-01-01T02:00:00Z) - |> window(every: 1h, offset: 1h, createEmpty: true) - |> last() -`, - op: "readWindow(last)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,0,1969-12-31T23:00:00Z,1970-01-01T00:00:00Z,,,f,m0,k0 -,result,table,_start,_stop,_time,_value,_field,_measurement,k - -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_value,_field,_measurement,k -,,1,1970-01-01T00:00:00Z,1970-01-01T01:00:00Z,1970-01-01T00:00:15Z,5,f,m0,k0 - -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,2,1970-01-01T01:00:00Z,1970-01-01T02:00:00Z,,,f,m0,k0 -,result,table,_start,_stop,_time,_value,_field,_measurement,k -`, - }, - { - name: "window aggregate last", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1969-12-31T23:59:59Z, stop: 1970-01-01T00:00:33Z) - |> aggregateWindow(every: 10s, fn: last, createEmpty: false) -`, - op: "readWindow(last)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_value,_field,_measurement,k -,,0,1969-12-31T23:59:59Z,1970-01-01T00:00:33Z,1970-01-01T00:00:10Z,6,f,m0,k0 -,,0,1969-12-31T23:59:59Z,1970-01-01T00:00:33Z,1970-01-01T00:00:20Z,5,f,m0,k0 -`, - }, - { - name: "window first", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:05Z, stop: 1970-01-01T00:00:20Z) - |> window(every: 3s) - |> first() -`, - op: "readWindow(first)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_value,_field,_measurement,k -,,0,1970-01-01T00:00:05Z,1970-01-01T00:00:06Z,1970-01-01T00:00:05Z,5,f,m0,k0 -,,1,1970-01-01T00:00:06Z,1970-01-01T00:00:09Z,1970-01-01T00:00:06Z,6,f,m0,k0 -,,2,1970-01-01T00:00:09Z,1970-01-01T00:00:12Z,1970-01-01T00:00:09Z,6,f,m0,k0 -,,3,1970-01-01T00:00:12Z,1970-01-01T00:00:15Z,1970-01-01T00:00:12Z,5,f,m0,k0 -,,4,1970-01-01T00:00:15Z,1970-01-01T00:00:18Z,1970-01-01T00:00:15Z,5,f,m0,k0 -`, - }, - { - name: "window first string", - data: []string{ - "m,tag=a f=\"c\" 2000000000", - "m,tag=a f=\"d\" 3000000000", - "m,tag=a f=\"h\" 7000000000", - "m,tag=a f=\"i\" 8000000000", - "m,tag=a f=\"j\" 9000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:10Z) - |> window(every: 5s) - |> first() -`, - op: "readWindow(first)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_value,_field,_measurement,tag -,,0,1970-01-01T00:00:00Z,1970-01-01T00:00:05Z,1970-01-01T00:00:02Z,c,f,m,a -,,1,1970-01-01T00:00:05Z,1970-01-01T00:00:10Z,1970-01-01T00:00:07Z,h,f,m,a -`, - }, - { - name: "bare first", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:05Z, stop: 1970-01-01T00:00:20Z) - |> first() -`, - op: "readWindow(first)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_value,_field,_measurement,k -,,0,1970-01-01T00:00:05Z,1970-01-01T00:00:20Z,1970-01-01T00:00:05Z,5,f,m0,k0 -`, - }, - { - name: "window empty first", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:02Z) - |> window(every: 500ms, createEmpty: true) - |> first() -`, - op: "readWindow(first)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,_result,table,_start,_stop,_time,_value,_field,_measurement,k -,_result,0,1970-01-01T00:00:00Z,1970-01-01T00:00:00.5Z,1970-01-01T00:00:00Z,0,f,m0,k0 - -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,1,1970-01-01T00:00:00.5Z,1970-01-01T00:00:01Z,,,f,m0,k0 -,_result,table,_start,_stop,_time,_value,_field,_measurement,k - -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,_result,table,_start,_stop,_time,_value,_field,_measurement,k -,_result,2,1970-01-01T00:00:01Z,1970-01-01T00:00:01.5Z,1970-01-01T00:00:01Z,1,f,m0,k0 - -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,3,1970-01-01T00:00:01.5Z,1970-01-01T00:00:02Z,,,f,m0,k0 -,_result,table,_start,_stop,_time,_value,_field,_measurement,k -`, - }, - { - name: "window aggregate first", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:02Z) - |> aggregateWindow(every: 500ms, fn: first, createEmpty: false) -`, - op: "readWindow(first)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_value,_field,_measurement,k -,,0,1970-01-01T00:00:00Z,1970-01-01T00:00:02Z,1970-01-01T00:00:00.5Z,0,f,m0,k0 -,,0,1970-01-01T00:00:00Z,1970-01-01T00:00:02Z,1970-01-01T00:00:01.5Z,1,f,m0,k0 -`, - }, - { - name: "window min", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:05Z, stop: 1970-01-01T00:00:20Z) - |> window(every: 3s) - |> min() -`, - op: "readWindow(min)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_value,_field,_measurement,k -,,0,1970-01-01T00:00:05Z,1970-01-01T00:00:06Z,1970-01-01T00:00:05Z,5,f,m0,k0 -,,1,1970-01-01T00:00:06Z,1970-01-01T00:00:09Z,1970-01-01T00:00:08Z,0,f,m0,k0 -,,2,1970-01-01T00:00:09Z,1970-01-01T00:00:12Z,1970-01-01T00:00:09Z,6,f,m0,k0 -,,3,1970-01-01T00:00:12Z,1970-01-01T00:00:15Z,1970-01-01T00:00:12Z,5,f,m0,k0 -,,4,1970-01-01T00:00:15Z,1970-01-01T00:00:18Z,1970-01-01T00:00:15Z,5,f,m0,k0 -`, - }, - { - name: "bare min", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:05Z, stop: 1970-01-01T00:00:20Z) - |> min() -`, - op: "readWindow(min)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_value,_field,_measurement,k -,,0,1970-01-01T00:00:05Z,1970-01-01T00:00:20Z,1970-01-01T00:00:08Z,0,f,m0,k0 -`, - }, - { - name: "window empty min", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:12Z) - |> window(every: 3s, createEmpty: true) - |> min() -`, - op: "readWindow(min)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,_result,table,_start,_stop,_time,_value,_field,_measurement,k -,_result,0,1970-01-01T00:00:00Z,1970-01-01T00:00:03Z,1970-01-01T00:00:00Z,0,f,m0,k0 - -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,1,1970-01-01T00:00:03Z,1970-01-01T00:00:06Z,,,f,m0,k0 -,_result,table,_start,_stop,_time,_value,_field,_measurement,k - -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,_result,table,_start,_stop,_time,_value,_field,_measurement,k -,_result,2,1970-01-01T00:00:06Z,1970-01-01T00:00:09Z,1970-01-01T00:00:08Z,0,f,m0,k0 - -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,3,1970-01-01T00:00:09Z,1970-01-01T00:00:12Z,,,f,m0,k0 -,_result,table,_start,_stop,_time,_value,_field,_measurement,k -`, - }, - { - name: "window aggregate min", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:12Z) - |> aggregateWindow(every: 3s, fn: min, createEmpty: false) -`, - op: "readWindow(min)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_value,_field,_measurement,k -,,0,1970-01-01T00:00:00Z,1970-01-01T00:00:12Z,1970-01-01T00:00:03Z,0,f,m0,k0 -,,0,1970-01-01T00:00:00Z,1970-01-01T00:00:12Z,1970-01-01T00:00:09Z,0,f,m0,k0 -`, - }, - { - name: "window max", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:05Z, stop: 1970-01-01T00:00:20Z) - |> window(every: 3s) - |> max() -`, - op: "readWindow(max)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_value,_field,_measurement,k -,,0,1970-01-01T00:00:05Z,1970-01-01T00:00:06Z,1970-01-01T00:00:05Z,5,f,m0,k0 -,,1,1970-01-01T00:00:06Z,1970-01-01T00:00:09Z,1970-01-01T00:00:06Z,6,f,m0,k0 -,,2,1970-01-01T00:00:09Z,1970-01-01T00:00:12Z,1970-01-01T00:00:11Z,7,f,m0,k0 -,,3,1970-01-01T00:00:12Z,1970-01-01T00:00:15Z,1970-01-01T00:00:14Z,9,f,m0,k0 -,,4,1970-01-01T00:00:15Z,1970-01-01T00:00:18Z,1970-01-01T00:00:15Z,5,f,m0,k0 -`, - }, - { - name: "bare max", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:05Z, stop: 1970-01-01T00:00:20Z) - |> max() -`, - op: "readWindow(max)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_value,_field,_measurement,k -,,0,1970-01-01T00:00:05Z,1970-01-01T00:00:20Z,1970-01-01T00:00:14Z,9,f,m0,k0 -`, - }, - { - name: "window empty max", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:12Z) - |> window(every: 3s, createEmpty: true) - |> max() -`, - op: "readWindow(max)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,_result,table,_start,_stop,_time,_value,_field,_measurement,k -,_result,0,1970-01-01T00:00:00Z,1970-01-01T00:00:03Z,1970-01-01T00:00:02Z,2,f,m0,k0 - -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,1,1970-01-01T00:00:03Z,1970-01-01T00:00:06Z,,,f,m0,k0 -,_result,table,_start,_stop,_time,_value,_field,_measurement,k - -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,_result,table,_start,_stop,_time,_value,_field,_measurement,k -,_result,2,1970-01-01T00:00:06Z,1970-01-01T00:00:09Z,1970-01-01T00:00:06Z,6,f,m0,k0 - -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,3,1970-01-01T00:00:09Z,1970-01-01T00:00:12Z,,,f,m0,k0 -,_result,table,_start,_stop,_time,_value,_field,_measurement,k -`, - }, - { - name: "window aggregate max", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:12Z) - |> aggregateWindow(every: 3s, fn: max, createEmpty: false) -`, - op: "readWindow(max)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,false,true,true,true -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_value,_field,_measurement,k -,,0,1970-01-01T00:00:00Z,1970-01-01T00:00:12Z,1970-01-01T00:00:03Z,2,f,m0,k0 -,,0,1970-01-01T00:00:00Z,1970-01-01T00:00:12Z,1970-01-01T00:00:09Z,6,f,m0,k0 -`, - }, - { - name: "window count removes empty series", - data: []string{ - "m,tag=a f=0i 1500000000", - "m,tag=b f=1i 2500000000", - "m,tag=c f=2i 3500000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:01Z, stop: 1970-01-01T00:00:02Z) - |> window(every: 500ms, createEmpty: true) - |> count() -`, - op: "readWindow(count)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,true,true,true -#default,_result,,,,,,, -,result,table,_start,_stop,_value,_field,_measurement,tag -,_result,0,1970-01-01T00:00:01Z,1970-01-01T00:00:01.5Z,0,f,m,a -,_result,1,1970-01-01T00:00:01.5Z,1970-01-01T00:00:02Z,1,f,m,a -`, - }, - { - name: "count", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:15Z) - |> aggregateWindow(every: 5s, fn: count) - |> drop(columns: ["_start", "_stop"]) -`, - op: "readWindow(count)", - want: ` -#datatype,string,long,dateTime:RFC3339,long,string,string,string -#group,false,false,false,false,true,true,true -#default,_result,,,,,, -,result,table,_time,_value,_field,_measurement,k -,,0,1970-01-01T00:00:05Z,5,f,m0,k0 -,,0,1970-01-01T00:00:10Z,5,f,m0,k0 -,,0,1970-01-01T00:00:15Z,5,f,m0,k0 -`, - }, - { - name: "window offset count", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:15Z) - |> window(every: 5s, offset: 2s) - |> count() -`, - op: "readWindow(count)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,true,true,true -#default,_result,,,,,,, -,result,table,_start,_stop,_value,_field,_measurement,k -,,0,1970-01-01T00:00:00Z,1970-01-01T00:00:02Z,2,f,m0,k0 -,,1,1970-01-01T00:00:02Z,1970-01-01T00:00:07Z,5,f,m0,k0 -,,2,1970-01-01T00:00:07Z,1970-01-01T00:00:12Z,5,f,m0,k0 -,,3,1970-01-01T00:00:12Z,1970-01-01T00:00:15Z,3,f,m0,k0 -`, - }, - { - name: "count with nulls", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:15Z) - |> aggregateWindow(every: 5s, fn: count) - |> drop(columns: ["_start", "_stop"]) -`, - op: "readWindow(count)", - want: ` -#datatype,string,long,dateTime:RFC3339,long,string,string,string -#group,false,false,false,false,true,true,true -#default,_result,,,,,, -,result,table,_time,_value,_field,_measurement,k -,,0,1970-01-01T00:00:05Z,5,f,m0,k0 -,,0,1970-01-01T00:00:10Z,0,f,m0,k0 -,,0,1970-01-01T00:00:15Z,5,f,m0,k0 -`, - }, - { - name: "bare count", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:15Z) - |> count() - |> drop(columns: ["_start", "_stop"]) -`, - op: "readWindow(count)", - want: ` -#group,false,false,false,true,true,true -#datatype,string,long,long,string,string,string -#default,_result,,,,, -,result,table,_value,_field,_measurement,k -,,0,15,f,m0,k0 -`, - }, - { - name: "window sum removes empty series", - data: []string{ - "m,tag=a f=1i 1500000000", - "m,tag=a f=2i 1600000000", - "m,tag=b f=3i 2500000000", - "m,tag=c f=4i 3500000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:01Z, stop: 1970-01-01T00:00:02Z) - |> window(every: 500ms, createEmpty: true) - |> sum() -`, - op: "readWindow(sum)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,true,true,true -#default,_result,,,,,,, -,result,table,_start,_stop,_value,_field,_measurement,tag -,_result,0,1970-01-01T00:00:01Z,1970-01-01T00:00:01.5Z,,f,m,a -,_result,1,1970-01-01T00:00:01.5Z,1970-01-01T00:00:02Z,3,f,m,a -`, - }, - { - name: "sum", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:15Z) - |> aggregateWindow(every: 5s, fn: sum) - |> drop(columns: ["_start", "_stop"]) -`, - op: "readWindow(sum)", - want: ` -#datatype,string,long,dateTime:RFC3339,long,string,string,string -#group,false,false,false,false,true,true,true -#default,_result,,,,,, -,result,table,_time,_value,_field,_measurement,k -,,0,1970-01-01T00:00:05Z,10,f,m0,k0 -,,0,1970-01-01T00:00:10Z,22,f,m0,k0 -,,0,1970-01-01T00:00:15Z,35,f,m0,k0 -`, - }, - { - name: "window offset sum", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:15Z) - |> window(every: 5s, offset: 2s) - |> sum() -`, - op: "readWindow(sum)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,long,string,string,string -#group,false,false,true,true,false,true,true,true -#default,_result,,,,,,, -,result,table,_start,_stop,_value,_field,_measurement,k -,,0,1970-01-01T00:00:00Z,1970-01-01T00:00:02Z,1,f,m0,k0 -,,1,1970-01-01T00:00:02Z,1970-01-01T00:00:07Z,20,f,m0,k0 -,,2,1970-01-01T00:00:07Z,1970-01-01T00:00:12Z,24,f,m0,k0 -,,3,1970-01-01T00:00:12Z,1970-01-01T00:00:15Z,22,f,m0,k0 -`, - }, - { - name: "sum with nulls", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:15Z) - |> aggregateWindow(every: 5s, fn: sum) - |> drop(columns: ["_start", "_stop"]) -`, - op: "readWindow(sum)", - want: ` -#datatype,string,long,dateTime:RFC3339,long,string,string,string -#group,false,false,false,false,true,true,true -#default,_result,,,,,, -,result,table,_time,_value,_field,_measurement,k -,,0,1970-01-01T00:00:05Z,10,f,m0,k0 -,,0,1970-01-01T00:00:10Z,,f,m0,k0 -,,0,1970-01-01T00:00:15Z,35,f,m0,k0 -`, - }, - { - name: "bare sum", - data: []string{ - "m0,k=k0 f=0i 0", - "m0,k=k0 f=1i 1000000000", - "m0,k=k0 f=2i 2000000000", - "m0,k=k0 f=3i 3000000000", - "m0,k=k0 f=4i 4000000000", - "m0,k=k0 f=5i 5000000000", - "m0,k=k0 f=6i 6000000000", - "m0,k=k0 f=5i 7000000000", - "m0,k=k0 f=0i 8000000000", - "m0,k=k0 f=6i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=5i 12000000000", - "m0,k=k0 f=8i 13000000000", - "m0,k=k0 f=9i 14000000000", - "m0,k=k0 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:15Z) - |> sum() - |> drop(columns: ["_start", "_stop"]) -`, - op: "readWindow(sum)", - want: ` -#group,false,false,false,true,true,true -#datatype,string,long,long,string,string,string -#default,_result,,,,, -,result,table,_value,_field,_measurement,k -,,0,67,f,m0,k0 -`, - }, - { - name: "bare mean", - data: []string{ - "m0,k=k0,kk=kk0 f=5 0", - "m0,k=k0,kk=kk0 f=6 5000000000", - "m0,k=k0,kk=kk0 f=7 10000000000", - "m0,k=k0,kk=kk0 f=9 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 0) - |> mean() - |> keep(columns: ["_value"]) -`, - op: "readWindow(mean)", - want: ` -#datatype,string,long,double -#group,false,false,false -#default,_result,, -,result,table,_value -,,0,6.75 -`, - }, - { - name: "window mean", - data: []string{ - "m0,k=k0 f=1i 5000000000", - "m0,k=k0 f=2i 6000000000", - "m0,k=k0 f=3i 7000000000", - "m0,k=k0 f=4i 8000000000", - "m0,k=k0 f=5i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=8i 12000000000", - "m0,k=k0 f=9i 13000000000", - "m0,k=k0 f=10i 14000000000", - "m0,k=k0 f=11i 15000000000", - "m0,k=k0 f=12i 16000000000", - "m0,k=k0 f=13i 17000000000", - "m0,k=k0 f=14i 18000000000", - "m0,k=k0 f=16i 19000000000", - "m0,k=k0 f=17i 20000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:05Z, stop: 1970-01-01T00:00:20Z) - |> aggregateWindow(fn: mean, every: 5s) - |> keep(columns: ["_time", "_value"]) -`, - op: "readWindow(mean)", - want: ` -#datatype,string,long,dateTime:RFC3339,double -#group,false,false,false,false -#default,_result,,, -,result,table,_time,_value -,,0,1970-01-01T00:00:10Z,3 -,,0,1970-01-01T00:00:15Z,8 -,,0,1970-01-01T00:00:20Z,13.2 -`, - }, - { - name: "window mean offset", - data: []string{ - "m0,k=k0 f=1i 5000000000", - "m0,k=k0 f=2i 6000000000", - "m0,k=k0 f=3i 7000000000", - "m0,k=k0 f=4i 8000000000", - "m0,k=k0 f=5i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=8i 12000000000", - "m0,k=k0 f=9i 13000000000", - "m0,k=k0 f=10i 14000000000", - "m0,k=k0 f=11i 15000000000", - "m0,k=k0 f=12i 16000000000", - "m0,k=k0 f=13i 17000000000", - "m0,k=k0 f=14i 18000000000", - "m0,k=k0 f=16i 19000000000", - "m0,k=k0 f=17i 20000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:05Z, stop: 1970-01-01T00:00:20Z) - |> window(every: 5s, offset: 1s) - |> mean() -`, - op: "readWindow(mean)", - want: ` -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,string,double -#group,false,false,true,true,true,true,true,false -#default,_result,,,,,,, -,result,table,_start,_stop,_field,_measurement,k,_value -,,0,1970-01-01T00:00:05Z,1970-01-01T00:00:06Z,f,m0,k0,1 -,,1,1970-01-01T00:00:06Z,1970-01-01T00:00:11Z,f,m0,k0,4 -,,2,1970-01-01T00:00:11Z,1970-01-01T00:00:16Z,f,m0,k0,9 -,,3,1970-01-01T00:00:16Z,1970-01-01T00:00:20Z,f,m0,k0,13.75 -`, - }, - { - name: "window mean offset with duplicate and unwindow", - data: []string{ - "m0,k=k0 f=1i 5000000000", - "m0,k=k0 f=2i 6000000000", - "m0,k=k0 f=3i 7000000000", - "m0,k=k0 f=4i 8000000000", - "m0,k=k0 f=5i 9000000000", - "m0,k=k0 f=6i 10000000000", - "m0,k=k0 f=7i 11000000000", - "m0,k=k0 f=8i 12000000000", - "m0,k=k0 f=9i 13000000000", - "m0,k=k0 f=10i 14000000000", - "m0,k=k0 f=11i 15000000000", - "m0,k=k0 f=12i 16000000000", - "m0,k=k0 f=13i 17000000000", - "m0,k=k0 f=14i 18000000000", - "m0,k=k0 f=16i 19000000000", - "m0,k=k0 f=17i 20000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:05Z, stop: 1970-01-01T00:00:20Z) - |> window(every: 5s, offset: 1s) - |> mean() - |> duplicate(column: "_stop", as: "_time") - |> window(every: inf) - |> keep(columns: ["_time", "_value"]) -`, - op: "readWindow(mean)", - want: ` -#datatype,string,long,dateTime:RFC3339,double -#group,false,false,false,false -#default,_result,,, -,result,table,_time,_value -,,0,1970-01-01T00:00:06Z,1 -,,0,1970-01-01T00:00:11Z,4 -,,0,1970-01-01T00:00:16Z,9 -,,0,1970-01-01T00:00:20Z,13.75 -`, - }, - { - name: "group first", - data: []string{ - "m0,k=k0,kk=kk0 f=0i 0", - "m0,k=k0,kk=kk1 f=1i 1000000000", - "m0,k=k0,kk=kk0 f=2i 2000000000", - "m0,k=k0,kk=kk1 f=3i 3000000000", - "m0,k=k0,kk=kk0 f=4i 4000000000", - "m0,k=k0,kk=kk1 f=5i 5000000000", - "m0,k=k0,kk=kk0 f=6i 6000000000", - "m0,k=k0,kk=kk1 f=5i 7000000000", - "m0,k=k0,kk=kk0 f=0i 8000000000", - "m0,k=k0,kk=kk1 f=6i 9000000000", - "m0,k=k0,kk=kk0 f=6i 10000000000", - "m0,k=k0,kk=kk1 f=7i 11000000000", - "m0,k=k0,kk=kk0 f=5i 12000000000", - "m0,k=k0,kk=kk1 f=8i 13000000000", - "m0,k=k0,kk=kk0 f=9i 14000000000", - "m0,k=k0,kk=kk1 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 0) - |> group(columns: ["k"]) - |> first() - |> keep(columns: ["_time", "_value"]) -`, - op: "readGroup(first)", - want: ` -#datatype,string,long,dateTime:RFC3339,long -#group,false,false,false,false -#default,_result,,, -,result,table,_time,_value -,,0,1970-01-01T00:00:00.00Z,0 -`, - skip: "https://github.com/influxdata/idpe/issues/8828", - }, - { - name: "group none first", - data: []string{ - "m0,k=k0,kk=kk0 f=0i 0", - "m0,k=k0,kk=kk1 f=1i 1000000000", - "m0,k=k0,kk=kk0 f=2i 2000000000", - "m0,k=k0,kk=kk1 f=3i 3000000000", - "m0,k=k0,kk=kk0 f=4i 4000000000", - "m0,k=k0,kk=kk1 f=5i 5000000000", - "m0,k=k0,kk=kk0 f=6i 6000000000", - "m0,k=k0,kk=kk1 f=5i 7000000000", - "m0,k=k0,kk=kk0 f=0i 8000000000", - "m0,k=k0,kk=kk1 f=6i 9000000000", - "m0,k=k0,kk=kk0 f=6i 10000000000", - "m0,k=k0,kk=kk1 f=7i 11000000000", - "m0,k=k0,kk=kk0 f=5i 12000000000", - "m0,k=k0,kk=kk1 f=8i 13000000000", - "m0,k=k0,kk=kk0 f=9i 14000000000", - "m0,k=k0,kk=kk1 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 0) - |> group() - |> first() - |> keep(columns: ["_time", "_value"]) -`, - op: "readGroup(first)", - want: ` -#datatype,string,long,dateTime:RFC3339,long -#group,false,false,false,false -#default,_result,,, -,result,table,_time,_value -,,0,1970-01-01T00:00:00.00Z,0 -`, - skip: "https://github.com/influxdata/idpe/issues/8828", - }, - { - name: "group last", - data: []string{ - "m0,k=k0,kk=kk0 f=0i 0", - "m0,k=k0,kk=kk1 f=1i 1000000000", - "m0,k=k0,kk=kk0 f=2i 2000000000", - "m0,k=k0,kk=kk1 f=3i 3000000000", - "m0,k=k0,kk=kk0 f=4i 4000000000", - "m0,k=k0,kk=kk1 f=5i 5000000000", - "m0,k=k0,kk=kk0 f=6i 6000000000", - "m0,k=k0,kk=kk1 f=5i 7000000000", - "m0,k=k0,kk=kk0 f=0i 8000000000", - "m0,k=k0,kk=kk1 f=6i 9000000000", - "m0,k=k0,kk=kk0 f=6i 10000000000", - "m0,k=k0,kk=kk1 f=7i 11000000000", - "m0,k=k0,kk=kk0 f=5i 12000000000", - "m0,k=k0,kk=kk1 f=8i 13000000000", - "m0,k=k0,kk=kk0 f=9i 14000000000", - "m0,k=k0,kk=kk1 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 0) - |> group(columns: ["k"]) - |> last() - |> keep(columns: ["_time", "_value"]) -`, - op: "readGroup(last)", - want: ` -#datatype,string,long,dateTime:RFC3339,long -#group,false,false,false,false -#default,_result,,, -,result,table,_time,_value -,,0,1970-01-01T00:00:15.00Z,5 -`, - skip: "https://github.com/influxdata/idpe/issues/8828", - }, - { - name: "group none last", - data: []string{ - "m0,k=k0,kk=kk0 f=0i 0", - "m0,k=k0,kk=kk1 f=1i 1000000000", - "m0,k=k0,kk=kk0 f=2i 2000000000", - "m0,k=k0,kk=kk1 f=3i 3000000000", - "m0,k=k0,kk=kk0 f=4i 4000000000", - "m0,k=k0,kk=kk1 f=5i 5000000000", - "m0,k=k0,kk=kk0 f=6i 6000000000", - "m0,k=k0,kk=kk1 f=5i 7000000000", - "m0,k=k0,kk=kk0 f=0i 8000000000", - "m0,k=k0,kk=kk1 f=6i 9000000000", - "m0,k=k0,kk=kk0 f=6i 10000000000", - "m0,k=k0,kk=kk1 f=7i 11000000000", - "m0,k=k0,kk=kk0 f=5i 12000000000", - "m0,k=k0,kk=kk1 f=8i 13000000000", - "m0,k=k0,kk=kk0 f=9i 14000000000", - "m0,k=k0,kk=kk1 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 0) - |> group() - |> last() - |> keep(columns: ["_time", "_value"]) -`, - op: "readGroup(last)", - want: ` -#datatype,string,long,dateTime:RFC3339,long -#group,false,false,false,false -#default,_result,,, -,result,table,_time,_value -,,0,1970-01-01T00:00:15.00Z,5 -`, - skip: "https://github.com/influxdata/idpe/issues/8828", - }, - { - name: "count group none", - data: []string{ - "m0,k=k0,kk=kk0 f=0i 0", - "m0,k=k0,kk=kk1 f=1i 1000000000", - "m0,k=k0,kk=kk0 f=2i 2000000000", - "m0,k=k0,kk=kk1 f=3i 3000000000", - "m0,k=k0,kk=kk0 f=4i 4000000000", - "m0,k=k0,kk=kk1 f=5i 5000000000", - "m0,k=k0,kk=kk0 f=6i 6000000000", - "m0,k=k0,kk=kk1 f=5i 7000000000", - "m0,k=k0,kk=kk0 f=0i 8000000000", - "m0,k=k0,kk=kk1 f=6i 9000000000", - "m0,k=k0,kk=kk0 f=6i 10000000000", - "m0,k=k0,kk=kk1 f=7i 11000000000", - "m0,k=k0,kk=kk0 f=5i 12000000000", - "m0,k=k0,kk=kk1 f=8i 13000000000", - "m0,k=k0,kk=kk0 f=9i 14000000000", - "m0,k=k0,kk=kk1 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:15Z) - |> group() - |> count() - |> drop(columns: ["_start", "_stop"]) -`, - op: "readGroup(count)", - want: ` -#datatype,string,long,long -#group,false,false,false -#default,_result,, -,result,table,_value -,,0,15 -`, - skip: "https://github.com/influxdata/idpe/issues/8828", - }, - { - name: "count group", - data: []string{ - "m0,k=k0,kk=kk0 f=0i 0", - "m0,k=k0,kk=kk1 f=1i 1000000000", - "m0,k=k0,kk=kk0 f=2i 2000000000", - "m0,k=k0,kk=kk1 f=3i 3000000000", - "m0,k=k0,kk=kk0 f=4i 4000000000", - "m0,k=k0,kk=kk1 f=5i 5000000000", - "m0,k=k0,kk=kk0 f=6i 6000000000", - "m0,k=k0,kk=kk1 f=5i 7000000000", - "m0,k=k0,kk=kk0 f=0i 8000000000", - "m0,k=k0,kk=kk1 f=6i 9000000000", - "m0,k=k0,kk=kk0 f=6i 10000000000", - "m0,k=k0,kk=kk1 f=7i 11000000000", - "m0,k=k0,kk=kk0 f=5i 12000000000", - "m0,k=k0,kk=kk1 f=8i 13000000000", - "m0,k=k0,kk=kk0 f=9i 14000000000", - "m0,k=k0,kk=kk1 f=5i 15000000000", - }, - op: "readGroup(count)", - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:15Z) - |> group(columns: ["kk"]) - |> count() - |> drop(columns: ["_start", "_stop"]) -`, - want: ` -#datatype,string,long,string,long -#group,false,false,true,false -#default,_result,,, -,result,table,kk,_value -,,0,kk0,8 -,,1,kk1,7 -`, - skip: "https://github.com/influxdata/idpe/issues/8828", - }, - { - name: "sum group none", - data: []string{ - "m0,k=k0,kk=kk0 f=0i 0", - "m0,k=k0,kk=kk1 f=1i 1000000000", - "m0,k=k0,kk=kk0 f=2i 2000000000", - "m0,k=k0,kk=kk1 f=3i 3000000000", - "m0,k=k0,kk=kk0 f=4i 4000000000", - "m0,k=k0,kk=kk1 f=5i 5000000000", - "m0,k=k0,kk=kk0 f=6i 6000000000", - "m0,k=k0,kk=kk1 f=5i 7000000000", - "m0,k=k0,kk=kk0 f=0i 8000000000", - "m0,k=k0,kk=kk1 f=6i 9000000000", - "m0,k=k0,kk=kk0 f=6i 10000000000", - "m0,k=k0,kk=kk1 f=7i 11000000000", - "m0,k=k0,kk=kk0 f=5i 12000000000", - "m0,k=k0,kk=kk1 f=8i 13000000000", - "m0,k=k0,kk=kk0 f=9i 14000000000", - "m0,k=k0,kk=kk1 f=5i 15000000000", - }, - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:15Z) - |> group() - |> sum() - |> drop(columns: ["_start", "_stop"]) -`, - op: "readGroup(sum)", - want: ` -#datatype,string,long,long -#group,false,false,false -#default,_result,, -,result,table,_value -,,0,67 -`, - skip: "https://github.com/influxdata/idpe/issues/8828", - }, - { - name: "sum group", - data: []string{ - "m0,k=k0,kk=kk0 f=0i 0", - "m0,k=k0,kk=kk1 f=1i 1000000000", - "m0,k=k0,kk=kk0 f=2i 2000000000", - "m0,k=k0,kk=kk1 f=3i 3000000000", - "m0,k=k0,kk=kk0 f=4i 4000000000", - "m0,k=k0,kk=kk1 f=5i 5000000000", - "m0,k=k0,kk=kk0 f=6i 6000000000", - "m0,k=k0,kk=kk1 f=5i 7000000000", - "m0,k=k0,kk=kk0 f=0i 8000000000", - "m0,k=k0,kk=kk1 f=6i 9000000000", - "m0,k=k0,kk=kk0 f=6i 10000000000", - "m0,k=k0,kk=kk1 f=7i 11000000000", - "m0,k=k0,kk=kk0 f=5i 12000000000", - "m0,k=k0,kk=kk1 f=8i 13000000000", - "m0,k=k0,kk=kk0 f=9i 14000000000", - "m0,k=k0,kk=kk1 f=5i 15000000000", - }, - op: "readGroup(sum)", - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:15Z) - |> group(columns: ["kk"]) - |> sum() - |> drop(columns: ["_start", "_stop"]) -`, - want: ` -#datatype,string,long,string,long -#group,false,false,true,false -#default,_result,,, -,result,table,kk,_value -,,0,kk0,32 -,,1,kk1,35 -`, - skip: "https://github.com/influxdata/idpe/issues/8828", - }, - { - name: "min group", - data: []string{ - "m0,k=k0,kk=kk0 f=0i 0", - "m0,k=k0,kk=kk1 f=1i 1000000000", - "m0,k=k0,kk=kk0 f=2i 2000000000", - "m0,k=k0,kk=kk1 f=3i 3000000000", - "m0,k=k0,kk=kk0 f=4i 4000000000", - "m0,k=k0,kk=kk1 f=5i 5000000000", - "m0,k=k0,kk=kk0 f=6i 6000000000", - "m0,k=k0,kk=kk1 f=5i 7000000000", - "m0,k=k0,kk=kk0 f=0i 8000000000", - "m0,k=k0,kk=kk1 f=6i 9000000000", - "m0,k=k0,kk=kk0 f=6i 10000000000", - "m0,k=k0,kk=kk1 f=7i 11000000000", - "m0,k=k0,kk=kk0 f=5i 12000000000", - "m0,k=k0,kk=kk1 f=8i 13000000000", - "m0,k=k0,kk=kk0 f=9i 14000000000", - "m0,k=k0,kk=kk1 f=5i 15000000000", - }, - op: "readGroup(min)", - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:15Z) - |> group(columns: ["kk"]) - |> min() - |> keep(columns: ["kk", "_value"]) -`, - want: ` -#datatype,string,long,string,long -#group,false,false,true,false -#default,_result,,, -,result,table,kk,_value -,,0,kk0,0 -,,1,kk1,1 -`, - skip: "https://github.com/influxdata/idpe/issues/8828", - }, - { - name: "max group", - data: []string{ - "m0,k=k0,kk=kk0 f=0i 0", - "m0,k=k0,kk=kk1 f=1i 1000000000", - "m0,k=k0,kk=kk0 f=2i 2000000000", - "m0,k=k0,kk=kk1 f=3i 3000000000", - "m0,k=k0,kk=kk0 f=4i 4000000000", - "m0,k=k0,kk=kk1 f=5i 5000000000", - "m0,k=k0,kk=kk0 f=6i 6000000000", - "m0,k=k0,kk=kk1 f=5i 7000000000", - "m0,k=k0,kk=kk0 f=0i 8000000000", - "m0,k=k0,kk=kk1 f=6i 9000000000", - "m0,k=k0,kk=kk0 f=6i 10000000000", - "m0,k=k0,kk=kk1 f=7i 11000000000", - "m0,k=k0,kk=kk0 f=5i 12000000000", - "m0,k=k0,kk=kk1 f=8i 13000000000", - "m0,k=k0,kk=kk0 f=9i 14000000000", - "m0,k=k0,kk=kk1 f=5i 15000000000", - }, - op: "readGroup(max)", - query: ` -from(bucket: v.bucket) - |> range(start: 1970-01-01T00:00:00Z, stop: 1970-01-01T00:00:15Z) - |> group(columns: ["kk"]) - |> max() - |> keep(columns: ["kk", "_value"]) -`, - want: ` -#datatype,string,long,string,long -#group,false,false,true,false -#default,_result,,, -,result,table,kk,_value -,,0,kk0,9 -,,1,kk1,8 -`, - skip: "https://github.com/influxdata/idpe/issues/8828", - }, - } - for _, tc := range testcases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - if tc.skip != "" { - t.Skip(tc.skip) - } - - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - - l.WritePointsOrFail(t, strings.Join(tc.data, "\n")) - - queryStr := "v = {bucket: " + "\"" + l.Bucket.Name + "\"" + "}\n" + tc.query - - res := l.MustExecuteQuery(queryStr) - defer res.Done() - got := flux.NewSliceResultIterator(res.Results) - defer got.Release() - - dec := csv.NewMultiResultDecoder(csv.ResultDecoderConfig{}) - want, err := dec.Decode(io.NopCloser(strings.NewReader(tc.want))) - if err != nil { - t.Fatal(err) - } - defer want.Release() - - if err := executetest.EqualResultIterators(want, got); err != nil { - t.Fatal(err) - } - if want, got := uint64(1), l.NumReads(t, tc.op); want != got { - t.Fatalf("unexpected sample count -want/+got:\n\t- %d\n\t+ %d", want, got) - } - }) - } -} - -func TestLauncher_Query_Buckets_MultiplePages(t *testing.T) { - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - - // Create a large number of buckets. This is above the default - // page size of 20. - for i := 0; i < 50; i++ { - b := &influxdb.Bucket{ - OrgID: l.Org.ID, - Name: fmt.Sprintf("b%02d", i), - } - if err := l.BucketService(t).CreateBucket(ctx, b); err != nil { - t.Fatal(err) - } - } - - var sb strings.Builder - sb.WriteString(` -#datatype,string,long,string -#group,false,false,false -#default,_result,, -,result,table,name -,,0,BUCKET -,,0,_monitoring -,,0,_tasks -`) - for i := 0; i < 50; i++ { - _, _ = fmt.Fprintf(&sb, ",,0,b%02d\n", i) - } - data := sb.String() - - bucketsQuery := ` -buckets() - |> keep(columns: ["name"]) - |> sort(columns: ["name"]) -` - res := l.MustExecuteQuery(bucketsQuery) - defer res.Done() - - firstResult := func(ri flux.ResultIterator) flux.Result { - ri.More() - return ri.Next() - } - got := firstResult(flux.NewSliceResultIterator(res.Results)) - - want, err := csv.NewResultDecoder(csv.ResultDecoderConfig{}).Decode(strings.NewReader(data)) - if err != nil { - t.Fatal(err) - } - - if diff := table.Diff(want.Tables(), got.Tables()); diff != "" { - t.Fatalf("unexpected output -want/+got:\n%s", diff) - } -} diff --git a/cmd/influxd/launcher/remote_to_test.go b/cmd/influxd/launcher/remote_to_test.go deleted file mode 100644 index 9ea54cddb1a..00000000000 --- a/cmd/influxd/launcher/remote_to_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package launcher_test - -import ( - "context" - "fmt" - "testing" - - "github.com/influxdata/influxdb/v2/cmd/influxd/launcher" - "github.com/stretchr/testify/require" -) - -func TestRemoteTo(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - // Boot 2 servers. - l1 := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l1.ShutdownOrFail(t, ctx) - l2 := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l2.ShutdownOrFail(t, ctx) - - // Run a flux script in the 1st server, writing data to the 2nd. - q1 := fmt.Sprintf(`import "array" - -option now = () => (2030-01-01T00:01:00Z) - -rows = [ - {_time: now(), _measurement: "test", _field: "f", _value: 1.0}, - {_time: now(), _measurement: "test", _field: "v", _value: -123.0}, - {_time: now(), _measurement: "test2", _field: "f", _value: 0.03} -] - -array.from(rows) |> to(bucket: "%s", host: "%s", token: "%s", org: "%s") -`, l2.Bucket.Name, l2.URL().String(), l2.Auth.Token, l2.Org.Name) - _ = l1.FluxQueryOrFail(t, l1.Org, l1.Auth.Token, q1) - - // Query the 2nd server and check that the points landed. - q2 := fmt.Sprintf(`from(bucket:"%s") - |> range(start: 2030-01-01T00:00:00Z, stop: 2030-01-02T00:00:00Z) - |> keep(columns: ["_measurement", "_field", "_value"]) -`, l2.Bucket.Name) - exp := `,result,table,_value,_field,_measurement` + "\r\n" + - `,_result,0,1,f,test` + "\r\n" + - `,_result,1,0.03,f,test2` + "\r\n" + - `,_result,2,-123,v,test` + "\r\n\r\n" - res := l2.FluxQueryOrFail(t, l2.Org, l2.Auth.Token, q2) - require.Equal(t, exp, res) -} - -func TestRemoteTo_Experimental(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - // Boot 2 servers. - l1 := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l1.ShutdownOrFail(t, ctx) - l2 := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l2.ShutdownOrFail(t, ctx) - - // Run a flux script in the 1st server, writing data to the 2nd. - q1 := fmt.Sprintf(`import "array" -import "experimental" - -option now = () => (2030-01-01T00:01:00Z) - -testRows = [ - {_time: now(), _field: "f", _value: 1.0}, - {_time: now(), _field: "v", _value: -123.0}, -] - -test2Rows = [ - {_time: now(), _field: "f", _value: 0.03} -] - -testTable = array.from(rows: testRows) - |> pivot(rowKey: ["_time"], columnKey: ["_field"], valueColumn: "_value") - |> map(fn: (r) => ({r with _measurement: "test"})) - -test2Table = array.from(rows: test2Rows) - |> pivot(rowKey: ["_time"], columnKey: ["_field"], valueColumn: "_value") - |> map(fn: (r) => ({r with _measurement: "test2"})) - -union(tables: [testTable, test2Table]) |> group(columns: ["_measurement"]) - |> experimental.to(bucket: "%s", host: "%s", token: "%s", org: "%s") -`, l2.Bucket.Name, l2.URL().String(), l2.Auth.Token, l2.Org.Name) - _ = l1.FluxQueryOrFail(t, l1.Org, l1.Auth.Token, q1) - - // Query the 2nd server and check that the points landed. - q2 := fmt.Sprintf(`from(bucket:"%s") - |> range(start: 2030-01-01T00:00:00Z, stop: 2030-01-02T00:00:00Z) - |> keep(columns: ["_measurement", "_field", "_value"]) -`, l2.Bucket.Name) - exp := `,result,table,_value,_field,_measurement` + "\r\n" + - `,_result,0,1,f,test` + "\r\n" + - `,_result,1,0.03,f,test2` + "\r\n" + - `,_result,2,-123,v,test` + "\r\n\r\n" - res := l2.FluxQueryOrFail(t, l2.Org, l2.Auth.Token, q2) - require.Equal(t, exp, res) -} diff --git a/cmd/influxd/launcher/replication_test.go b/cmd/influxd/launcher/replication_test.go deleted file mode 100644 index 2a46c82fa43..00000000000 --- a/cmd/influxd/launcher/replication_test.go +++ /dev/null @@ -1,503 +0,0 @@ -package launcher_test - -import ( - "bytes" - "compress/gzip" - "context" - "fmt" - "io" - "math/rand" - nethttp "net/http" - "net/http/httptest" - "net/http/httputil" - "strings" - "sync" - "testing" - "time" - - "github.com/influxdata/influx-cli/v2/api" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/cmd/influxd/launcher" - "github.com/stretchr/testify/require" -) - -func TestValidateReplication_Valid(t *testing.T) { - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - client := l.APIClient(t) - - // Create a "remote" connection to the launcher from itself. - remote, err := client.RemoteConnectionsApi.PostRemoteConnection(ctx). - RemoteConnectionCreationRequest(api.RemoteConnectionCreationRequest{ - Name: "self", - OrgID: l.Org.ID.String(), - RemoteURL: l.URL().String(), - RemoteAPIToken: l.Auth.Token, - RemoteOrgID: api.PtrString(l.Org.ID.String()), - AllowInsecureTLS: false, - }).Execute() - require.NoError(t, err) - - // Validate the replication before creating it. - createReq := api.ReplicationCreationRequest{ - Name: "test", - OrgID: l.Org.ID.String(), - RemoteID: remote.Id, - LocalBucketID: l.Bucket.ID.String(), - RemoteBucketID: api.PtrString(l.Bucket.ID.String()), - MaxQueueSizeBytes: influxdb.DefaultReplicationMaxQueueSizeBytes, - } - _, err = client.ReplicationsApi.PostReplication(ctx).ReplicationCreationRequest(createReq).Validate(true).Execute() - require.NoError(t, err) - - // Create the replication. - replication, err := client.ReplicationsApi.PostReplication(ctx).ReplicationCreationRequest(createReq).Execute() - require.NoError(t, err) - - // Ensure the replication is marked as valid. - require.NoError(t, client.ReplicationsApi.PostValidateReplicationByID(ctx, replication.Id).Execute()) - - // Create a new auth token that can only write to the bucket. - auth := influxdb.Authorization{ - Status: "active", - OrgID: l.Org.ID, - UserID: l.User.ID, - Permissions: []influxdb.Permission{{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: &l.Bucket.ID, - OrgID: &l.Org.ID, - }, - }}, - CRUDLog: influxdb.CRUDLog{}, - } - require.NoError(t, l.AuthorizationService(t).CreateAuthorization(ctx, &auth)) - - // Update the remote to use the new token. - _, err = client.RemoteConnectionsApi.PatchRemoteConnectionByID(ctx, remote.Id). - RemoteConnenctionUpdateRequest(api.RemoteConnenctionUpdateRequest{RemoteAPIToken: &auth.Token}). - Execute() - require.NoError(t, err) - - // Ensure the replication is still valid. - require.NoError(t, client.ReplicationsApi.PostValidateReplicationByID(ctx, replication.Id).Execute()) -} - -func TestValidateReplication_Invalid(t *testing.T) { - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - client := l.APIClient(t) - - // Create a "remote" connection to the launcher from itself, - // but with a bad auth token. - remote, err := client.RemoteConnectionsApi.PostRemoteConnection(ctx). - RemoteConnectionCreationRequest(api.RemoteConnectionCreationRequest{ - Name: "self", - OrgID: l.Org.ID.String(), - RemoteURL: l.URL().String(), - RemoteAPIToken: "foo", - RemoteOrgID: api.PtrString(l.Org.ID.String()), - AllowInsecureTLS: false, - }).Execute() - require.NoError(t, err) - - // Validate the replication before creating it. This should fail because of the bad - // auth token in the linked remote. - createReq := api.ReplicationCreationRequest{ - Name: "test", - OrgID: l.Org.ID.String(), - RemoteID: remote.Id, - LocalBucketID: l.Bucket.ID.String(), - RemoteBucketID: api.PtrString(l.Bucket.ID.String()), - MaxQueueSizeBytes: influxdb.DefaultReplicationMaxQueueSizeBytes, - } - _, err = client.ReplicationsApi.PostReplication(ctx).ReplicationCreationRequest(createReq).Validate(true).Execute() - require.Error(t, err) - - // Create the replication even though it failed validation. - replication, err := client.ReplicationsApi.PostReplication(ctx).ReplicationCreationRequest(createReq).Execute() - require.NoError(t, err) - - // Ensure the replication is marked as invalid. - require.Error(t, client.ReplicationsApi.PostValidateReplicationByID(ctx, replication.Id).Execute()) - - // Create a new auth token that can only write to the bucket. - auth := influxdb.Authorization{ - Status: "active", - OrgID: l.Org.ID, - UserID: l.User.ID, - Permissions: []influxdb.Permission{{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: &l.Bucket.ID, - OrgID: &l.Org.ID, - }, - }}, - CRUDLog: influxdb.CRUDLog{}, - } - require.NoError(t, l.AuthorizationService(t).CreateAuthorization(ctx, &auth)) - - // Update the remote to use the new token. - _, err = client.RemoteConnectionsApi.PatchRemoteConnectionByID(ctx, remote.Id). - RemoteConnenctionUpdateRequest(api.RemoteConnenctionUpdateRequest{RemoteAPIToken: &auth.Token}). - Execute() - require.NoError(t, err) - - // Ensure the replication is now valid. - require.NoError(t, client.ReplicationsApi.PostValidateReplicationByID(ctx, replication.Id).Execute()) - - // Create a new bucket. - bucket2 := influxdb.Bucket{ - OrgID: l.Org.ID, - Name: "bucket2", - RetentionPeriod: 0, - ShardGroupDuration: 0, - } - require.NoError(t, l.BucketService(t).CreateBucket(ctx, &bucket2)) - bucket2Id := bucket2.ID.String() - - // Updating the replication to point at the new bucket should fail validation. - _, err = client.ReplicationsApi.PatchReplicationByID(ctx, replication.Id). - ReplicationUpdateRequest(api.ReplicationUpdateRequest{RemoteBucketID: &bucket2Id}). - Validate(true). - Execute() - require.Error(t, err) -} - -func TestReplicationStreamEndToEnd(t *testing.T) { - // Points that will be written to the local bucket when only one replication is active. - testPoints1 := []string{ - `m,k=v1 f=100i 946684800000000000`, - `m,k=v2 f=200i 946684800000000000`, - } - - // Flux script to write points that to the local bucket when both replications are active. - testPoints2 := `import "csv" -csvData = "#datatype,string,long,dateTime:RFC3339,string,long,string,string -#group,false,false,false,true,false,true,true -#default,,,,,,, -,result,table,_time,k,_value,_measurement,_field -,,0,2000-01-01T00:00:00Z,v3,300,m,f -,,0,2000-01-01T00:00:00Z,v4,400,m,f -" -csv.from(csv: csvData) |> to(bucket: %q) -` - - // Format string to be used as a flux query to get data from a bucket. - qs := `from(bucket:%q) |> range(start:2000-01-01T00:00:00Z,stop:2000-01-02T00:00:00Z)` - - // Data that should be in a bucket which received all the testPoints1. - exp1 := `,result,table,_start,_stop,_time,_value,_field,_measurement,k` + "\r\n" + - `,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,100,f,m,v1` + "\r\n" + - `,_result,1,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,200,f,m,v2` + "\r\n\r\n" - - // Data that should be in a bucket which received all the points from testPoints1 and testPoints2. - exp2 := `,result,table,_start,_stop,_time,_value,_field,_measurement,k` + "\r\n" + - `,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,100,f,m,v1` + "\r\n" + - `,_result,1,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,200,f,m,v2` + "\r\n" + - `,_result,2,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,300,f,m,v3` + "\r\n" + - `,_result,3,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,400,f,m,v4` + "\r\n\r\n" - - // Data that should be in a bucket which received points only from testPoints2. - exp3 := `,result,table,_start,_stop,_time,_value,_field,_measurement,k` + "\r\n" + - `,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,300,f,m,v3` + "\r\n" + - `,_result,1,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,400,f,m,v4` + "\r\n\r\n" - - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - client := l.APIClient(t) - - localBucketName := l.Bucket.Name - remote1BucketName := "remote1" - remote2BucketName := "remote2" - - // Create a proxy for use in testing. This will proxy requests to the server, and also decrement the waitGroup to - // allow for synchronization. - var wg sync.WaitGroup - var mu sync.Mutex - proxyHandler := httputil.NewSingleHostReverseProxy(l.URL()) - proxy := httptest.NewServer(nethttp.HandlerFunc(func(w nethttp.ResponseWriter, r *nethttp.Request) { - mu.Lock() - defer mu.Unlock() - proxyHandler.ServeHTTP(w, r) - wg.Done() - })) - defer proxy.Close() - - // Create a "remote" connection to the launcher from itself via the test proxy. - remote, err := client.RemoteConnectionsApi.PostRemoteConnection(ctx). - RemoteConnectionCreationRequest(api.RemoteConnectionCreationRequest{ - Name: "self", - OrgID: l.Org.ID.String(), - RemoteURL: proxy.URL, - RemoteAPIToken: l.Auth.Token, - RemoteOrgID: api.PtrString(l.Org.ID.String()), - AllowInsecureTLS: false, - }).Execute() - require.NoError(t, err) - - // Create separate buckets to act as the target for remote writes - svc := l.BucketService(t) - remote1Bucket := &influxdb.Bucket{ - OrgID: l.Org.ID, - Name: remote1BucketName, - } - require.NoError(t, svc.CreateBucket(ctx, remote1Bucket)) - remote2Bucket := &influxdb.Bucket{ - OrgID: l.Org.ID, - Name: remote2BucketName, - } - require.NoError(t, svc.CreateBucket(ctx, remote2Bucket)) - - // Create a replication for the first remote bucket. - replicationCreateReq := api.ReplicationCreationRequest{ - Name: "test1", - OrgID: l.Org.ID.String(), - RemoteID: remote.Id, - LocalBucketID: l.Bucket.ID.String(), - RemoteBucketID: api.PtrString(remote1Bucket.ID.String()), - MaxQueueSizeBytes: influxdb.DefaultReplicationMaxQueueSizeBytes, - MaxAgeSeconds: influxdb.DefaultReplicationMaxAge, - } - - _, err = client.ReplicationsApi.PostReplication(ctx).ReplicationCreationRequest(replicationCreateReq).Execute() - require.NoError(t, err) - - // Write the first set of points to the launcher bucket. This is the local bucket in the replication. - for _, p := range testPoints1 { - wg.Add(1) - l.WritePointsOrFail(t, p) - } - wg.Wait() - - // Data should now be in the local bucket and in the replication remote bucket, but not in the bucket without the - // replication. - require.Equal(t, exp1, l.FluxQueryOrFail(t, l.Org, l.Auth.Token, fmt.Sprintf(qs, localBucketName))) - require.Equal(t, exp1, l.FluxQueryOrFail(t, l.Org, l.Auth.Token, fmt.Sprintf(qs, remote1BucketName))) - require.Equal(t, "\r\n", l.FluxQueryOrFail(t, l.Org, l.Auth.Token, fmt.Sprintf(qs, remote2BucketName))) - - // Create a replication for the second remote bucket. - replicationCreateReq = api.ReplicationCreationRequest{ - Name: "test2", - OrgID: l.Org.ID.String(), - RemoteID: remote.Id, - LocalBucketID: l.Bucket.ID.String(), - RemoteBucketID: api.PtrString(remote2Bucket.ID.String()), - MaxQueueSizeBytes: influxdb.DefaultReplicationMaxQueueSizeBytes, - MaxAgeSeconds: influxdb.DefaultReplicationMaxAge, - } - _, err = client.ReplicationsApi.PostReplication(ctx).ReplicationCreationRequest(replicationCreateReq).Execute() - require.NoError(t, err) - - // Write the second set of points to the launcher bucket via flux - wg.Add(2) // since there are two replications, the proxy server will handle 2 requests - l.FluxQueryOrFail(t, l.Org, l.Auth.Token, fmt.Sprintf(testPoints2, l.Bucket.Name)) - wg.Wait() - - // All the data should be in the local bucket and first replicated bucket. Only part of the data should be in the - // second replicated bucket. - require.Equal(t, exp2, l.FluxQueryOrFail(t, l.Org, l.Auth.Token, fmt.Sprintf(qs, localBucketName))) - require.Equal(t, exp2, l.FluxQueryOrFail(t, l.Org, l.Auth.Token, fmt.Sprintf(qs, remote1BucketName))) - require.Equal(t, exp3, l.FluxQueryOrFail(t, l.Org, l.Auth.Token, fmt.Sprintf(qs, remote2BucketName))) -} - -func TestReplicationStreamEndToEndRemoteFailures(t *testing.T) { - // Points that will be written to the local bucket. - testPoints := []string{ - `m,k=v0 f=100i 946684800000000000`, - `m,k=v1 f=200i 946684800000000000`, - `m,k=v2 f=300i 946684800000000000`, - `m,k=v3 f=400i 946684800000000000`, - } - - // Format string to be used as a flux query to get data from a bucket. - qs := `from(bucket:%q) |> range(start:2000-01-01T00:00:00Z,stop:2000-01-02T00:00:00Z)` - - // Data that should be in a bucket which received all the testPoints. - exp := `,result,table,_start,_stop,_time,_value,_field,_measurement,k` + "\r\n" + - `,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,100,f,m,v0` + "\r\n" + - `,_result,1,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,200,f,m,v1` + "\r\n" + - `,_result,2,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,300,f,m,v2` + "\r\n" + - `,_result,3,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,400,f,m,v3` + "\r\n\r\n" - - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - client := l.APIClient(t) - - localBucketName := l.Bucket.Name - remoteBucketName := "remote" - - random := rand.New(rand.NewSource(1000)) - pointsIndex := 0 - - // Create a proxy for use in testing. This will proxy requests to the server, and also decrement the waitGroup to - // allow for synchronization. - var wg sync.WaitGroup - var mu sync.Mutex - proxyHandler := httputil.NewSingleHostReverseProxy(l.URL()) - proxy := httptest.NewServer(nethttp.HandlerFunc(func(w nethttp.ResponseWriter, r *nethttp.Request) { - mu.Lock() - defer mu.Unlock() - - // Read the current point off of the request. - body, _ := io.ReadAll(r.Body) - reader, _ := gzip.NewReader(bytes.NewBuffer(body)) - unzipBody, _ := io.ReadAll(reader) - - // Fix the Body on the request. - r.Body.Close() - r.Body = io.NopCloser(bytes.NewBuffer(body)) - - // "Randomly" fail on remote write to test retries. - if random.Intn(2) == 0 { - // Increment the index if the proper point succeeds. - // This is needed since the replication queue will currently send the same - // data several times if a failure is reached, since there is not enough - // data to warrant a call to scan.Advance(). - if strings.Contains(string(unzipBody), fmt.Sprintf("v%d", pointsIndex)) { - pointsIndex++ - } - proxyHandler.ServeHTTP(w, r) - - // Decrement the waitGroup if all points have succeeded in writing. - if pointsIndex == len(testPoints) { - wg.Done() - } - } else { - w.WriteHeader(nethttp.StatusGatewayTimeout) - } - })) - defer proxy.Close() - - // Create a "remote" connection to the launcher from itself via the test proxy. - remote, err := client.RemoteConnectionsApi.PostRemoteConnection(ctx). - RemoteConnectionCreationRequest(api.RemoteConnectionCreationRequest{ - Name: "self", - OrgID: l.Org.ID.String(), - RemoteURL: proxy.URL, - RemoteAPIToken: l.Auth.Token, - RemoteOrgID: api.PtrString(l.Org.ID.String()), - AllowInsecureTLS: false, - }).Execute() - require.NoError(t, err) - - // Create separate buckets to act as the target for remote writes. - svc := l.BucketService(t) - remoteBucket := &influxdb.Bucket{ - OrgID: l.Org.ID, - Name: remoteBucketName, - } - require.NoError(t, svc.CreateBucket(ctx, remoteBucket)) - - // Create a replication for the remote bucket. - replicationCreateReq := api.ReplicationCreationRequest{ - Name: "test1", - OrgID: l.Org.ID.String(), - RemoteID: remote.Id, - LocalBucketID: l.Bucket.ID.String(), - RemoteBucketID: api.PtrString(remoteBucket.ID.String()), - MaxQueueSizeBytes: influxdb.DefaultReplicationMaxQueueSizeBytes, - MaxAgeSeconds: influxdb.DefaultReplicationMaxAge, - } - - _, err = client.ReplicationsApi.PostReplication(ctx).ReplicationCreationRequest(replicationCreateReq).Execute() - require.NoError(t, err) - - // Write the set of points to the launcher bucket. This is the local bucket in the replication. - wg.Add(1) - for _, p := range testPoints { - l.WritePointsOrFail(t, p) - } - wg.Wait() - - // Data should now be in the local bucket and in the replication remote bucket. - require.Equal(t, exp, l.FluxQueryOrFail(t, l.Org, l.Auth.Token, fmt.Sprintf(qs, localBucketName))) - require.Equal(t, exp, l.FluxQueryOrFail(t, l.Org, l.Auth.Token, fmt.Sprintf(qs, remoteBucketName))) -} - -func TestReplicationsLocalWriteAndShutdownBlocking(t *testing.T) { - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - client := l.APIClient(t) - - // Server that only returns an error will cause the remote write to retry on loop. - svr := httptest.NewServer(nethttp.HandlerFunc(func(w nethttp.ResponseWriter, r *nethttp.Request) { - w.WriteHeader(nethttp.StatusInternalServerError) - })) - defer svr.Close() - - // Create a "remote" connection to the blocking server - remote, err := client.RemoteConnectionsApi.PostRemoteConnection(ctx). - RemoteConnectionCreationRequest(api.RemoteConnectionCreationRequest{ - Name: "self", - OrgID: l.Org.ID.String(), - RemoteURL: svr.URL, - RemoteAPIToken: "foo", - RemoteOrgID: api.PtrString(l.Org.ID.String()), - AllowInsecureTLS: false, - }).Execute() - require.NoError(t, err) - - createReq := api.ReplicationCreationRequest{ - Name: "test", - OrgID: l.Org.ID.String(), - RemoteID: remote.Id, - LocalBucketID: l.Bucket.ID.String(), - RemoteBucketID: api.PtrString(l.Bucket.ID.String()), - MaxQueueSizeBytes: influxdb.DefaultReplicationMaxQueueSizeBytes, - } - - // Create the replication - _, err = client.ReplicationsApi.PostReplication(ctx).ReplicationCreationRequest(createReq).Execute() - require.NoError(t, err) - - p := `m,k=v1 f=100i 946684800000000000` - - // Do a write; the remote writer will block forever - l.WritePointsOrFail(t, p) - - // Do some more writes; these should not be blocked locally, although the remote writer will be. - var wg sync.WaitGroup - for idx := 0; idx < 3; idx++ { - wg.Add(1) - go func() { - l.WritePointsOrFail(t, p) - wg.Done() - }() - } - - writesAreDone := make(chan struct{}) - - go func() { - wg.Wait() - // If remote writes don't block local writes, this will quickly send on the writesAreDone channel to prevent the - // test from timing out. - writesAreDone <- struct{}{} - }() - - // Test timeout for local writes - delay := 5 * time.Second - select { - case <-time.After(delay): - t.Fatalf("test timed out after %s - writing was blocked by remote writer", delay) - case <-writesAreDone: - } - - // Try to shut down the server - didShutdown := make(chan struct{}) - go func() { - // If remote writes don't block the server shutdown, the server should quickly shutdown and send on the didShutdown - // channel. - l.ShutdownOrFail(t, context.Background()) - didShutdown <- struct{}{} - }() - - // Test timeout for server shutdown - delay = 10 * time.Second - select { - case <-time.After(delay): - t.Fatalf("test timed out after %s - server shutdown was blocked by remote writer", delay) - case <-didShutdown: - } -} diff --git a/cmd/influxd/launcher/storage_test.go b/cmd/influxd/launcher/storage_test.go deleted file mode 100644 index 07aed8cf8bd..00000000000 --- a/cmd/influxd/launcher/storage_test.go +++ /dev/null @@ -1,656 +0,0 @@ -package launcher_test - -import ( - "context" - "fmt" - "io" - nethttp "net/http" - "strings" - "testing" - "time" - - "github.com/dustin/go-humanize" - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/cmd/influxd/launcher" - "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestStorage_WriteAndQuery(t *testing.T) { - l := launcher.NewTestLauncher() - l.RunOrFail(t, ctx) - defer l.ShutdownOrFail(t, ctx) - - org1 := l.OnBoardOrFail(t, &influxdb.OnboardingRequest{ - User: "USER-1", - Password: "PASSWORD-1", - Org: "ORG-01", - Bucket: "BUCKET", - }) - org2 := l.OnBoardOrFail(t, &influxdb.OnboardingRequest{ - User: "USER-2", - Password: "PASSWORD-1", - Org: "ORG-02", - Bucket: "BUCKET", - }) - - // Execute single write against the server. - l.WriteOrFail(t, org1, `m,k=v1 f=100i 946684800000000000`) - l.WriteOrFail(t, org2, `m,k=v2 f=200i 946684800000000000`) - - qs := `from(bucket:"BUCKET") |> range(start:2000-01-01T00:00:00Z,stop:2000-01-02T00:00:00Z)` - - exp := `,result,table,_start,_stop,_time,_value,_field,_measurement,k` + "\r\n" + - `,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,100,f,m,v1` + "\r\n\r\n" - if got := l.FluxQueryOrFail(t, org1.Org, org1.Auth.Token, qs); !cmp.Equal(got, exp) { - t.Errorf("unexpected query results -got/+exp\n%s", cmp.Diff(got, exp)) - } - - exp = `,result,table,_start,_stop,_time,_value,_field,_measurement,k` + "\r\n" + - `,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,200,f,m,v2` + "\r\n\r\n" - if got := l.FluxQueryOrFail(t, org2.Org, org2.Auth.Token, qs); !cmp.Equal(got, exp) { - t.Errorf("unexpected query results -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -// Ensure the server will write all points possible with exception of -// - field type conflict -// - field too large -func TestStorage_PartialWrite(t *testing.T) { - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - - // Initial write of integer. - l.WritePointsOrFail(t, `cpu value=1i 946684800000000000`) - - // Write mixed-field types. - err := l.WritePoints("cpu value=2i 946684800000000001\ncpu value=3 946684800000000002\ncpu value=4i 946684800000000003") - require.Error(t, err) - - // Write oversized field value. - err = l.WritePoints(fmt.Sprintf(`cpu str="%s" 946684800000000004`, strings.Repeat("a", tsdb.MaxFieldValueLength+1))) - require.Error(t, err) - - // Write biggest field value. - l.WritePointsOrFail(t, fmt.Sprintf(`cpu str="%s" 946684800000000005`, strings.Repeat("a", tsdb.MaxFieldValueLength))) - - // Ensure the valid points were written. - qs := `from(bucket:"BUCKET") |> range(start:2000-01-01T00:00:00Z,stop:2000-01-02T00:00:00Z) |> keep(columns: ["_time","_field"])` - - exp := `,result,table,_time,_field` + "\r\n" + - `,_result,0,2000-01-01T00:00:00.000000005Z,str` + "\r\n" + // str=max-length string - `,_result,1,2000-01-01T00:00:00Z,value` + "\r\n" + // value=1 - `,_result,1,2000-01-01T00:00:00.000000001Z,value` + "\r\n" + // value=2 - `,_result,1,2000-01-01T00:00:00.000000003Z,value` + "\r\n\r\n" // value=4 - - buf, err := http.SimpleQuery(l.URL(), qs, l.Org.Name, l.Auth.Token) - require.NoError(t, err) - require.Equal(t, exp, string(buf)) -} - -func TestStorage_DisableMaxFieldValueSize(t *testing.T) { - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t, func(o *launcher.InfluxdOpts) { - o.StorageConfig.Data.SkipFieldSizeValidation = true - }) - defer l.ShutdownOrFail(t, ctx) - - // Write a normally-oversized field value. - l.WritePointsOrFail(t, fmt.Sprintf(`cpu str="%s" 946684800000000000`, strings.Repeat("a", tsdb.MaxFieldValueLength+1))) - - // Check that the point can be queried. - qs := `from(bucket:"BUCKET") |> range(start:2000-01-01T00:00:00Z,stop:2000-01-02T00:00:00Z) |> keep(columns: ["_value"])` - exp := `,result,table,_value` + "\r\n" + - fmt.Sprintf(`,_result,0,%s`, strings.Repeat("a", tsdb.MaxFieldValueLength+1)) + "\r\n\r\n" - - buf, err := http.SimpleQuery(l.URL(), qs, l.Org.Name, l.Auth.Token) - require.NoError(t, err) - require.Equal(t, exp, string(buf)) -} - -func TestLauncher_WriteAndQuery(t *testing.T) { - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - - // Execute single write against the server. - resp, err := nethttp.DefaultClient.Do(l.MustNewHTTPRequest("POST", fmt.Sprintf("/api/v2/write?org=%s&bucket=%s", l.Org.ID, l.Bucket.ID), `m,k=v f=100i 946684800000000000`)) - if err != nil { - t.Fatal(err) - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - - if err := resp.Body.Close(); err != nil { - t.Fatal(err) - } - - if resp.StatusCode != nethttp.StatusNoContent { - t.Fatalf("unexpected status code: %d, body: %s, headers: %v", resp.StatusCode, body, resp.Header) - } - - // Query server to ensure write persists. - qs := `from(bucket:"BUCKET") |> range(start:2000-01-01T00:00:00Z,stop:2000-01-02T00:00:00Z)` - exp := `,result,table,_start,_stop,_time,_value,_field,_measurement,k` + "\r\n" + - `,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,100,f,m,v` + "\r\n\r\n" - - buf, err := http.SimpleQuery(l.URL(), qs, l.Org.Name, l.Auth.Token) - if err != nil { - t.Fatalf("unexpected error querying server: %v", err) - } - if diff := cmp.Diff(string(buf), exp); diff != "" { - t.Fatal(diff) - } -} - -func TestLauncher_BucketDelete(t *testing.T) { - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - - // Execute single write against the server. - resp, err := nethttp.DefaultClient.Do(l.MustNewHTTPRequest("POST", fmt.Sprintf("/api/v2/write?org=%s&bucket=%s", l.Org.ID, l.Bucket.ID), `m,k=v f=100i 946684800000000000`)) - if err != nil { - t.Fatal(err) - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - - if err := resp.Body.Close(); err != nil { - t.Fatal(err) - } - - if resp.StatusCode != nethttp.StatusNoContent { - t.Fatalf("unexpected status code: %d, body: %s, headers: %v", resp.StatusCode, body, resp.Header) - } - - // Query server to ensure write persists. - qs := `from(bucket:"BUCKET") |> range(start:2000-01-01T00:00:00Z,stop:2000-01-02T00:00:00Z)` - exp := `,result,table,_start,_stop,_time,_value,_field,_measurement,k` + "\r\n" + - `,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,100,f,m,v` + "\r\n\r\n" - - buf, err := http.SimpleQuery(l.URL(), qs, l.Org.Name, l.Auth.Token) - if err != nil { - t.Fatalf("unexpected error querying server: %v", err) - } - if diff := cmp.Diff(string(buf), exp); diff != "" { - t.Fatal(diff) - } - - // Verify the cardinality in the engine. - engine := l.Launcher.Engine() - if got, exp := engine.SeriesCardinality(ctx, l.Bucket.ID), int64(1); got != exp { - t.Fatalf("got %d, exp %d", got, exp) - } - - // Delete the bucket. - if resp, err = nethttp.DefaultClient.Do(l.MustNewHTTPRequest("DELETE", fmt.Sprintf("/api/v2/buckets/%s", l.Bucket.ID), "")); err != nil { - t.Fatal(err) - } - - if body, err = io.ReadAll(resp.Body); err != nil { - t.Fatal(err) - } - - if err := resp.Body.Close(); err != nil { - t.Fatal(err) - } - - if resp.StatusCode != nethttp.StatusNoContent { - t.Fatalf("unexpected status code: %d, body: %s, headers: %v", resp.StatusCode, body, resp.Header) - } - - // Verify that the data has been removed from the storage engine. - if got, exp := engine.SeriesCardinality(ctx, l.Bucket.ID), int64(0); got != exp { - t.Fatalf("after bucket delete got %d, exp %d", got, exp) - } - - databaseInfo := engine.MetaClient().Database(l.Bucket.ID.String()) - assert.Nil(t, databaseInfo) -} - -func TestLauncher_DeleteWithPredicate(t *testing.T) { - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - - // Write data to server. - if resp, err := nethttp.DefaultClient.Do(l.MustNewHTTPRequest("POST", fmt.Sprintf("/api/v2/write?org=%s&bucket=%s", l.Org.ID, l.Bucket.ID), - "cpu,region=us-east-1 v=1 946684800000000000\n"+ - "cpu,region=us-west-1 v=1 946684800000000000\n"+ - "mem,region=us-west-1 v=1 946684800000000000\n", - )); err != nil { - t.Fatal(err) - } else if err := resp.Body.Close(); err != nil { - t.Fatal(err) - } - - // Execute single write against the server. - s := http.DeleteService{ - Addr: l.URL().String(), - Token: l.Auth.Token, - } - if err := s.DeleteBucketRangePredicate(context.Background(), http.DeleteRequest{ - OrgID: l.Org.ID.String(), - BucketID: l.Bucket.ID.String(), - Start: "2000-01-01T00:00:00Z", - Stop: "2000-01-02T00:00:00Z", - Predicate: `_measurement="cpu" AND region="us-west-1"`, - }); err != nil { - t.Fatal(err) - } - - // Query server to ensure write persists. - qs := `from(bucket:"BUCKET") |> range(start:2000-01-01T00:00:00Z,stop:2000-01-02T00:00:00Z)` - exp := `,result,table,_start,_stop,_time,_value,_field,_measurement,region` + "\r\n" + - `,_result,0,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,1,v,cpu,us-east-1` + "\r\n" + - `,_result,1,2000-01-01T00:00:00Z,2000-01-02T00:00:00Z,2000-01-01T00:00:00Z,1,v,mem,us-west-1` + "\r\n\r\n" - - buf, err := http.SimpleQuery(l.URL(), qs, l.Org.Name, l.Auth.Token) - if err != nil { - t.Fatalf("unexpected error querying server: %v", err) - } else if diff := cmp.Diff(string(buf), exp); diff != "" { - t.Fatal(diff) - } -} - -func TestLauncher_FluxCardinality(t *testing.T) { - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - - // Run a query without any data on the server - should return 0 and not crash. - query := `import "influxdata/influxdb" - influxdb.cardinality( - bucket: "BUCKET", - start: 2000-01-01T00:00:00Z, - stop: 2000-01-02T00:00:00Z, - predicate: (r) => true - )` - - exp := `,result,table,_value` + "\r\n" + - `,_result,0,0` + "\r\n\r\n" - - body, err := http.SimpleQuery(l.URL(), query, l.Org.Name, l.Auth.Token) - require.NoError(t, err) - require.Equal(t, exp, string(body)) - - // Write data to server. - resp, err := nethttp.DefaultClient.Do(l.MustNewHTTPRequest("POST", fmt.Sprintf("/api/v2/write?org=%s&bucket=%s", l.Org.ID, l.Bucket.ID), - "cpu,region=us-east-1 v=1 946684800000000000\n"+ - "cpu,region=us-west-1 v=1 946684800000000000\n"+ - "mem,region=us-west-1 v=1 946684800000000000\n"+ - "mem,region=us-south-1 v=2 996684800000000000\n", - )) - require.NoError(t, err) - require.NoError(t, resp.Body.Close()) - - // Specific time values for tests bracketing shards with time ranges - mc := l.Engine().MetaClient() - sgs, err := mc.ShardGroupsByTimeRange(l.Bucket.ID.String(), meta.DefaultRetentionPolicyName, time.Unix(0, 946684800000000000), time.Unix(0, 996684800000000000)) - require.NoError(t, err) - require.Equal(t, 2, len(sgs)) - - sg1Start := sgs[0].StartTime - sg2End := sgs[1].EndTime - sg2Start := sgs[1].StartTime - preSg1Start := sg1Start.Add(-1 * time.Minute) - - lastPoint := time.Unix(0, 996684800000000000) - // a point in the middle of the later shard group, after the data but before - // the end of the group - afterLastPoint := lastPoint.Add(1 * time.Minute) - require.True(t, afterLastPoint.Before(sg2End)) - require.True(t, afterLastPoint.After(sg2Start)) - - // similar, but before the data - beforeLastPoint := lastPoint.Add(-1 * time.Minute) - require.True(t, beforeLastPoint.Before(sg2End)) - require.True(t, beforeLastPoint.After(sg2Start)) - - tests := []struct { - name string - query string - exp string - }{ - { - name: "boolean literal predicate - true", - query: `import "influxdata/influxdb" - influxdb.cardinality( - bucket: "BUCKET", - start: 2000-01-01T00:00:00Z, - stop: 2000-01-02T00:00:00Z, - predicate: (r) => true - )`, - exp: `,result,table,_value` + "\r\n" + - `,_result,0,3` + "\r\n\r\n", - }, - { - name: "boolean literal predicate - false", - query: `import "influxdata/influxdb" - influxdb.cardinality( - bucket: "BUCKET", - start: 2000-01-01T00:00:00Z, - stop: 2000-01-02T00:00:00Z, - predicate: (r) => false - )`, - exp: `,result,table,_value` + "\r\n" + - `,_result,0,0` + "\r\n\r\n", - }, - { - name: "nil predicate", - query: `import "influxdata/influxdb" - influxdb.cardinality( - bucket: "BUCKET", - start: 2000-01-01T00:00:00Z, - stop: 2000-01-02T00:00:00Z, - )`, - exp: `,result,table,_value` + "\r\n" + - `,_result,0,3` + "\r\n\r\n", - }, - { - name: "nil predicate with large time range", - query: `import "influxdata/influxdb" - influxdb.cardinality( - bucket: "BUCKET", - start: 1990-01-01T00:00:00Z, - stop: 2010-01-01T00:00:00Z, - )`, - exp: `,result,table,_value` + "\r\n" + - `,_result,0,4` + "\r\n\r\n", - }, - { - name: "single measurement match", - query: `import "influxdata/influxdb" - influxdb.cardinality( - bucket: "BUCKET", - start: 2000-01-01T00:00:00Z, - stop: 2000-01-02T00:00:00Z, - predicate: (r) => r._measurement == "cpu" - )`, - exp: `,result,table,_value` + "\r\n" + - `,_result,0,2` + "\r\n\r\n", - }, - { - name: "multiple measurement match", - query: `import "influxdata/influxdb" - influxdb.cardinality( - bucket: "BUCKET", - start: 2000-01-01T00:00:00Z, - stop: 2000-01-02T00:00:00Z, - predicate: (r) => r._measurement == "cpu" or r._measurement == "mem" - )`, - exp: `,result,table,_value` + "\r\n" + - `,_result,0,3` + "\r\n\r\n", - }, - { - name: "predicate matches nothing", - query: `import "influxdata/influxdb" - influxdb.cardinality( - bucket: "BUCKET", - start: 2000-01-01T00:00:00Z, - stop: 2000-01-02T00:00:00Z, - predicate: (r) => r._measurement == "cpu" and r._measurement == "mem" - )`, - exp: `,result,table,_value` + "\r\n" + - `,_result,0,0` + "\r\n\r\n", - }, - { - name: "time range matches nothing", - query: `import "influxdata/influxdb" - influxdb.cardinality( - bucket: "BUCKET", - start: 2000-04-01T00:00:00Z, - stop: 2000-05-02T00:00:00Z, - )`, - exp: `,result,table,_value` + "\r\n" + - `,_result,0,0` + "\r\n\r\n", - }, - { - name: "large time range - all shards are within the window", - query: `import "influxdata/influxdb" - influxdb.cardinality( - bucket: "BUCKET", - start: 1990-01-01T00:00:00Z, - stop: 2010-01-01T00:00:00Z, - predicate: (r) => r._measurement == "cpu" - )`, - exp: `,result,table,_value` + "\r\n" + - `,_result,0,2` + "\r\n\r\n", - }, - { - name: "start range is inclusive", - query: fmt.Sprintf(`import "influxdata/influxdb" - influxdb.cardinality( - bucket: "BUCKET", - start: %s, - stop: 2010-01-01T00:00:00Z, - predicate: (r) => r._measurement == "mem" - )`, time.Unix(0, 946684800000000000).Format(time.RFC3339Nano), - ), - exp: `,result,table,_value` + "\r\n" + - `,_result,0,2` + "\r\n\r\n", - }, - { - name: "stop range is exclusive", - query: fmt.Sprintf(`import "influxdata/influxdb" - influxdb.cardinality( - bucket: "BUCKET", - start: 1990-01-01T00:00:00Z, - stop: %s, - predicate: (r) => r._measurement == "mem" - )`, lastPoint.Format(time.RFC3339Nano), - ), - exp: `,result,table,_value` + "\r\n" + - `,_result,0,1` + "\r\n\r\n", - }, - { - name: "one shard is entirely in the time range, other is partially, range includes data in partial shard", - query: fmt.Sprintf(`import "influxdata/influxdb" - influxdb.cardinality( - bucket: "BUCKET", - start: %s, - stop: %s, - predicate: (r) => r._measurement == "mem" - )`, preSg1Start.Format(time.RFC3339Nano), afterLastPoint.Format(time.RFC3339Nano), - ), - exp: `,result,table,_value` + "\r\n" + - `,_result,0,2` + "\r\n\r\n", - }, - { - name: "one shard is entirely in the time range, other is partially, range does not include data in partial shard", - query: fmt.Sprintf(`import "influxdata/influxdb" - influxdb.cardinality( - bucket: "BUCKET", - start: %s, - stop: %s, - predicate: (r) => r._measurement == "mem" - )`, preSg1Start.Format(time.RFC3339Nano), beforeLastPoint.Format(time.RFC3339Nano), - ), - exp: `,result,table,_value` + "\r\n" + - `,_result,0,1` + "\r\n\r\n", - }, - } - - for _, tt := range tests { - body, err := http.SimpleQuery(l.URL(), tt.query, l.Org.Name, l.Auth.Token) - require.NoError(t, err) - require.Equal(t, tt.exp, string(body)) - } -} - -func TestLauncher_UpdateRetentionPolicy(t *testing.T) { - durPtr := func(d time.Duration) *time.Duration { - return &d - } - - testCases := []struct { - name string - initRp time.Duration - initSgd time.Duration - derivedSgd *time.Duration - newRp *time.Duration - newSgd *time.Duration - expectInitErr bool - expectUpdateErr bool - }{ - { - name: "infinite to 1w", - derivedSgd: durPtr(humanize.Week), - newRp: durPtr(humanize.Week), - }, - { - name: "1w to 1d", - initRp: humanize.Week, - derivedSgd: durPtr(humanize.Day), - newRp: durPtr(humanize.Day), - }, - { - name: "1d to 1h", - initRp: humanize.Day, - derivedSgd: durPtr(time.Hour), - newRp: durPtr(time.Hour), - }, - { - name: "infinite, update shard duration", - initSgd: humanize.Month, - derivedSgd: durPtr(humanize.Month), - newSgd: durPtr(humanize.Week), - }, - { - name: "1w, update shard duration", - initRp: humanize.Week, - initSgd: humanize.Week, - newSgd: durPtr(time.Hour), - }, - { - name: "1d, update shard duration", - initRp: humanize.Day, - initSgd: 3 * time.Hour, - newSgd: durPtr(1*time.Hour + 30*time.Minute), - }, - { - name: "infinite, update both retention and shard duration", - derivedSgd: durPtr(humanize.Week), - newRp: durPtr(time.Hour), - newSgd: durPtr(time.Hour), - }, - { - name: "init shard duration larger than RP", - initRp: time.Hour, - initSgd: humanize.Day, - expectInitErr: true, - }, - { - name: "updated shard duration larger than RP", - initRp: humanize.Day, - initSgd: time.Hour, - newSgd: durPtr(humanize.Week), - expectUpdateErr: true, - }, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - bucketService := l.BucketService(t) - - bucket := &influxdb.Bucket{ - OrgID: l.Org.ID, - RetentionPeriod: tc.initRp, - ShardGroupDuration: tc.initSgd, - } - err := bucketService.CreateBucket(ctx, bucket) - if tc.expectInitErr { - require.Error(t, err) - return - } - require.NoError(t, err) - defer bucketService.DeleteBucket(ctx, bucket.ID) - - bucket, err = bucketService.FindBucketByID(ctx, bucket.ID) - require.NoError(t, err) - - expectedSgd := tc.initSgd - if tc.derivedSgd != nil { - expectedSgd = *tc.derivedSgd - } - require.Equal(t, tc.initRp, bucket.RetentionPeriod) - require.Equal(t, expectedSgd, bucket.ShardGroupDuration) - - bucket, err = bucketService.UpdateBucket(ctx, bucket.ID, influxdb.BucketUpdate{ - RetentionPeriod: tc.newRp, - ShardGroupDuration: tc.newSgd, - }) - if tc.expectUpdateErr { - require.Error(t, err) - return - } - require.NoError(t, err) - - bucket, err = bucketService.FindBucketByID(ctx, bucket.ID) - require.NoError(t, err) - - expectedRp := tc.initRp - if tc.newRp != nil { - expectedRp = *tc.newRp - } - if tc.newSgd != nil { - expectedSgd = *tc.newSgd - } - require.Equal(t, expectedRp, bucket.RetentionPeriod) - require.Equal(t, expectedSgd, bucket.ShardGroupDuration) - }) - } -} - -func TestLauncher_OverlappingShards(t *testing.T) { - l := launcher.RunAndSetupNewLauncherOrFail(ctx, t) - defer l.ShutdownOrFail(t, ctx) - - bkt := influxdb.Bucket{Name: "test", ShardGroupDuration: time.Hour, OrgID: l.Org.ID} - require.NoError(t, l.BucketService(t).CreateBucket(ctx, &bkt)) - - req := l.MustNewHTTPRequest("POST", fmt.Sprintf("/api/v2/write?org=%s&bucket=%s", l.Org.ID, bkt.ID), - "m,s=0 n=0 1626416520000000000\nm,s=0 n=1 1626420120000000000\n") - resp, err := nethttp.DefaultClient.Do(req) - require.NoError(t, err) - require.NoError(t, resp.Body.Close()) - - newDur := humanize.Day - _, err = l.BucketService(t).UpdateBucket(ctx, bkt.ID, influxdb.BucketUpdate{ShardGroupDuration: &newDur}) - require.NoError(t, err) - - req = l.MustNewHTTPRequest("POST", fmt.Sprintf("/api/v2/write?org=%s&bucket=%s", l.Org.ID, bkt.ID), - // NOTE: The 3rd point's timestamp is chronologically earlier than the other two points, but it - // must come after the others in the request to trigger the overlapping-shard bug. If it comes - // first in the request, the bug is avoided because: - // 1. The point-writer sees there is no shard for the earlier point, and creates a new 24h shard-group - // 2. The new 24 group covers the timestamps of the remaining 2 points, so the writer doesn't bother looking - // for existing shards that also cover the timestamp - // 3. With only 1 shard mapped to the 3 points, there is no overlap to trigger the bug - "m,s=0 n=0 1626416520000000000\nm,s=0 n=1 1626420120000000000\nm,s=1 n=1 1626412920000000000\n") - resp, err = nethttp.DefaultClient.Do(req) - require.NoError(t, err) - require.NoError(t, resp.Body.Close()) - - query := `from(bucket:"test") |> range(start:2000-01-01T00:00:00Z,stop:2050-01-01T00:00:00Z)` + - ` |> drop(columns:["_start","_stop"])` - exp := `,result,table,_time,_value,_field,_measurement,s` + "\r\n" + - `,_result,0,2021-07-16T06:22:00Z,0,n,m,0` + "\r\n" + - `,_result,0,2021-07-16T07:22:00Z,1,n,m,0` + "\r\n" + - `,_result,1,2021-07-16T05:22:00Z,1,n,m,1` + "\r\n\r\n" - - buf, err := http.SimpleQuery(l.URL(), query, l.Org.Name, l.Auth.Token) - require.NoError(t, err) - require.Equal(t, exp, string(buf)) -} diff --git a/cmd/influxd/main.go b/cmd/influxd/main.go deleted file mode 100644 index 200b76ef55d..00000000000 --- a/cmd/influxd/main.go +++ /dev/null @@ -1,79 +0,0 @@ -package main - -import ( - "context" - "fmt" - "os" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/cmd/influxd/downgrade" - "github.com/influxdata/influxdb/v2/cmd/influxd/inspect" - "github.com/influxdata/influxdb/v2/cmd/influxd/launcher" - "github.com/influxdata/influxdb/v2/cmd/influxd/recovery" - "github.com/influxdata/influxdb/v2/cmd/influxd/upgrade" - _ "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - _ "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ( - version = "dev" - commit = "none" - date = "" -) - -func main() { - if len(date) == 0 { - date = time.Now().UTC().Format(time.RFC3339) - } - - influxdb.SetBuildInfo(version, commit, date) - - ctx := context.Background() - v := viper.New() - - rootCmd, err := launcher.NewInfluxdCommand(ctx, v) - if err != nil { - handleErr(err.Error()) - } - // upgrade binds options to env variables, so it must be added after rootCmd is initialized - upgradeCmd, err := upgrade.NewCommand(ctx, v) - if err != nil { - handleErr(err.Error()) - } - rootCmd.AddCommand(upgradeCmd) - inspectCmd, err := inspect.NewCommand(v) - if err != nil { - handleErr(err.Error()) - } - rootCmd.AddCommand(inspectCmd) - rootCmd.AddCommand(versionCmd()) - rootCmd.AddCommand(recovery.NewCommand()) - downgradeCmd, err := downgrade.NewCommand(ctx, v) - if err != nil { - handleErr(err.Error()) - } - rootCmd.AddCommand(downgradeCmd) - - rootCmd.SilenceUsage = true - if err := rootCmd.Execute(); err != nil { - handleErr(fmt.Sprintf("See '%s -h' for help", rootCmd.CommandPath())) - } -} - -func handleErr(err string) { - _, _ = fmt.Fprintln(os.Stderr, err) - os.Exit(1) -} - -func versionCmd() *cobra.Command { - return &cobra.Command{ - Use: "version", - Short: "Print the influxd server version", - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("InfluxDB %s (git: %s) build_date: %s\n", version, commit, date) - }, - } -} diff --git a/cmd/influxd/recovery/auth/auth.go b/cmd/influxd/recovery/auth/auth.go deleted file mode 100644 index f868303addd..00000000000 --- a/cmd/influxd/recovery/auth/auth.go +++ /dev/null @@ -1,222 +0,0 @@ -package auth - -import ( - "context" - "fmt" - "io" - "os" - "path/filepath" - - "github.com/influxdata/influx-cli/v2/pkg/tabwriter" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorization" - "github.com/influxdata/influxdb/v2/bolt" - "github.com/influxdata/influxdb/v2/logger" - "github.com/influxdata/influxdb/v2/tenant" - "github.com/spf13/cobra" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -func NewAuthCommand() *cobra.Command { - base := &cobra.Command{ - Use: "auth", - Short: "On-disk authorization management commands, for recovery", - Args: cobra.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - cmd.PrintErrf("See '%s -h' for help\n", cmd.CommandPath()) - }, - } - - base.AddCommand(NewAuthListCommand()) - base.AddCommand(NewAuthCreateCommand()) - - return base -} - -type authListCommand struct { - logger *zap.Logger - boltPath string - out io.Writer -} - -func NewAuthListCommand() *cobra.Command { - var authCmd authListCommand - cmd := &cobra.Command{ - Use: "list", - Short: "List authorizations", - RunE: func(cmd *cobra.Command, args []string) error { - config := logger.NewConfig() - config.Level = zapcore.InfoLevel - - newLogger, err := config.New(cmd.ErrOrStderr()) - if err != nil { - return err - } - authCmd.logger = newLogger - authCmd.out = cmd.OutOrStdout() - return authCmd.run() - }, - } - - defaultPath := filepath.Join(os.Getenv("HOME"), ".influxdbv2", "influxd.bolt") - - cmd.Flags().StringVar(&authCmd.boltPath, "bolt-path", defaultPath, "Path to the BoltDB file.") - - return cmd -} - -func (cmd *authListCommand) run() error { - ctx := context.Background() - store := bolt.NewKVStore(cmd.logger.With(zap.String("system", "bolt-kvstore")), cmd.boltPath) - if err := store.Open(ctx); err != nil { - return err - } - defer store.Close() - tenantStore := tenant.NewStore(store) - tenantService := tenant.NewService(tenantStore) - authStore, err := authorization.NewStore(store) - if err != nil { - return err - } - auth := authorization.NewService(authStore, tenantService) - filter := influxdb.AuthorizationFilter{} - auths, _, err := auth.FindAuthorizations(ctx, filter) - if err != nil { - return err - } - - return PrintAuth(ctx, cmd.out, auths, tenantService) -} - -type authCreateCommand struct { - logger *zap.Logger - boltPath string - out io.Writer - username string - org string -} - -func NewAuthCreateCommand() *cobra.Command { - var authCmd authCreateCommand - cmd := &cobra.Command{ - Use: "create-operator", - Short: "Create new operator token for a user", - RunE: func(cmd *cobra.Command, args []string) error { - config := logger.NewConfig() - config.Level = zapcore.InfoLevel - - newLogger, err := config.New(cmd.ErrOrStderr()) - if err != nil { - return err - } - authCmd.logger = newLogger - authCmd.out = cmd.OutOrStdout() - return authCmd.run() - }, - } - - defaultPath := filepath.Join(os.Getenv("HOME"), ".influxdbv2", "influxd.bolt") - cmd.Flags().StringVar(&authCmd.boltPath, "bolt-path", defaultPath, "Path to the BoltDB file") - cmd.Flags().StringVar(&authCmd.username, "username", "", "Name of the user") - cmd.Flags().StringVar(&authCmd.org, "org", "", "Name of the org") - - return cmd -} - -func (cmd *authCreateCommand) run() error { - ctx := context.Background() - store := bolt.NewKVStore(cmd.logger.With(zap.String("system", "bolt-kvstore")), cmd.boltPath) - if err := store.Open(ctx); err != nil { - return err - } - defer store.Close() - tenantStore := tenant.NewStore(store) - tenantService := tenant.NewService(tenantStore) - authStore, err := authorization.NewStore(store) - if err != nil { - return err - } - auth := authorization.NewService(authStore, tenantService) - - if cmd.username == "" { - return fmt.Errorf("must provide --username") - } - if cmd.org == "" { - return fmt.Errorf("must provide --org") - } - - // Find the user - user, err := tenantService.FindUser(ctx, influxdb.UserFilter{Name: &cmd.username}) - if err != nil { - return fmt.Errorf("could not find user %q: %w", cmd.username, err) - } - - orgs, _, err := tenantService.FindOrganizations(ctx, influxdb.OrganizationFilter{ - Name: &cmd.org, - }) - if err != nil { - return fmt.Errorf("could not find org %q: %w", cmd.org, err) - } - org := orgs[0] - - // Create operator token - authToCreate := &influxdb.Authorization{ - Description: fmt.Sprintf("%s's Recovery Token", cmd.username), - Permissions: influxdb.OperPermissions(), - UserID: user.ID, - OrgID: org.ID, - } - if err := auth.CreateAuthorization(ctx, authToCreate); err != nil { - return fmt.Errorf("could not create recovery token: %w", err) - } - - // Print all authorizations now that we have added one - filter := influxdb.AuthorizationFilter{} - auths, _, err := auth.FindAuthorizations(ctx, filter) - if err != nil { - return err - } - return PrintAuth(ctx, cmd.out, auths, tenantService) -} - -func PrintAuth(ctx context.Context, w io.Writer, v []*influxdb.Authorization, userSvc influxdb.UserService) error { - headers := []string{ - "ID", - "User Name", - "User ID", - "Description", - "Token", - "Permissions", - } - - var rows []map[string]interface{} - for _, t := range v { - user, err := userSvc.FindUserByID(ctx, t.UserID) - userName := "" - if err == nil && user != nil { - userName = user.Name - } - row := map[string]interface{}{ - "ID": t.ID, - "Description": t.Description, - "User Name": userName, - "User ID": t.UserID, - "Token": t.Token, - "Permissions": t.Permissions, - } - rows = append(rows, row) - } - - writer := tabwriter.NewTabWriter(w, false) - defer writer.Flush() - if err := writer.WriteHeaders(headers...); err != nil { - return err - } - for _, row := range rows { - if err := writer.Write(row); err != nil { - return err - } - } - return nil -} diff --git a/cmd/influxd/recovery/auth/auth_test.go b/cmd/influxd/recovery/auth/auth_test.go deleted file mode 100644 index 28d66477b88..00000000000 --- a/cmd/influxd/recovery/auth/auth_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package auth - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/cmd/influxd/recovery/testhelper" - "github.com/stretchr/testify/assert" -) - -func Test_Auth_Basic(t *testing.T) { - db := testhelper.NewTestBoltDb(t) - defer db.Close() - assert.Equal(t, ""+ - `ID User Name User ID Description Token Permissions`+"\n"+ - `08371db24dcc8000 testuser 08371db1dd8c8000 testuser's Token A9Ovdl8SmP-rfp8wQ2vJoPUsZoQQJ3EochD88SlJcgrcLw4HBwgUqpSHQxc9N9Drg0_aY6Lp1jutBRcKhbV7aQ== [read:authorizations write:authorizations read:buckets write:buckets read:dashboards write:dashboards read:orgs write:orgs read:sources write:sources read:tasks write:tasks read:telegrafs write:telegrafs read:users write:users read:variables write:variables read:scrapers write:scrapers read:secrets write:secrets read:labels write:labels read:views write:views read:documents write:documents read:notificationRules write:notificationRules read:notificationEndpoints write:notificationEndpoints read:checks write:checks read:dbrp write:dbrp read:notebooks write:notebooks read:annotations write:annotations]`+"\n"+ - `08371deae98c8000 testuser 08371db1dd8c8000 testuser's read buckets token 4-pZrlm84u9uiMVrPBeITe46KxfdEnvTX5H2CZh38BtAsXX4O47b8QwZ9jHL_Cek2w-VbVfRxDpo0Mu8ORiqyQ== [read:orgs/dd7cd2292f6e974a/buckets]`+"\n", - testhelper.MustRunCommand(t, NewAuthCommand(), "list", "--bolt-path", db.Name())) - - // org name not created - assert.EqualError(t, testhelper.RunCommand(t, NewAuthCommand(), "create-operator", "--bolt-path", db.Name(), "--org", "not-exist", "--username", "testuser"), "could not find org \"not-exist\": organization name \"not-exist\" not found") - - // user not created - assert.EqualError(t, testhelper.RunCommand(t, NewAuthCommand(), "create-operator", "--bolt-path", db.Name(), "--org", "myorg", "--username", "testuser2"), "could not find user \"testuser2\": user not found") - - // existing user creates properly - assert.NoError(t, testhelper.RunCommand(t, NewAuthCommand(), "create-operator", "--bolt-path", db.Name(), "--username", "testuser", "--org", "myorg")) - - assert.Regexp(t, ""+ - `ID User Name User ID Description Token Permissions`+"\n"+ - `08371db24dcc8000 testuser 08371db1dd8c8000 testuser's Token A9Ovdl8SmP-rfp8wQ2vJoPUsZoQQJ3EochD88SlJcgrcLw4HBwgUqpSHQxc9N9Drg0_aY6Lp1jutBRcKhbV7aQ== \[read:authorizations write:authorizations read:buckets write:buckets read:dashboards write:dashboards read:orgs write:orgs read:sources write:sources read:tasks write:tasks read:telegrafs write:telegrafs read:users write:users read:variables write:variables read:scrapers write:scrapers read:secrets write:secrets read:labels write:labels read:views write:views read:documents write:documents read:notificationRules write:notificationRules read:notificationEndpoints write:notificationEndpoints read:checks write:checks read:dbrp write:dbrp read:notebooks write:notebooks read:annotations write:annotations\]`+"\n"+ - `08371deae98c8000 testuser 08371db1dd8c8000 testuser's read buckets token 4-pZrlm84u9uiMVrPBeITe46KxfdEnvTX5H2CZh38BtAsXX4O47b8QwZ9jHL_Cek2w-VbVfRxDpo0Mu8ORiqyQ== \[read:orgs/dd7cd2292f6e974a/buckets\]`+"\n"+ - `[^\t]* testuser [^\t]* testuser's Recovery Token [^\t]* \[read:authorizations write:authorizations read:buckets write:buckets read:dashboards write:dashboards read:orgs write:orgs read:sources write:sources read:tasks write:tasks read:telegrafs write:telegrafs read:users write:users read:variables write:variables read:scrapers write:scrapers read:secrets write:secrets read:labels write:labels read:views write:views read:documents write:documents read:notificationRules write:notificationRules read:notificationEndpoints write:notificationEndpoints read:checks write:checks read:dbrp write:dbrp read:notebooks write:notebooks read:annotations write:annotations read:remotes write:remotes read:replications write:replications\]`+"\n", - testhelper.MustRunCommand(t, NewAuthCommand(), "list", "--bolt-path", db.Name())) -} diff --git a/cmd/influxd/recovery/organization/org.go b/cmd/influxd/recovery/organization/org.go deleted file mode 100644 index 84410b0d8b5..00000000000 --- a/cmd/influxd/recovery/organization/org.go +++ /dev/null @@ -1,170 +0,0 @@ -package organization - -import ( - "context" - "fmt" - "io" - "os" - "path/filepath" - - "github.com/influxdata/influx-cli/v2/pkg/tabwriter" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/bolt" - "github.com/influxdata/influxdb/v2/logger" - "github.com/influxdata/influxdb/v2/tenant" - "github.com/spf13/cobra" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -func NewOrgCommand() *cobra.Command { - base := &cobra.Command{ - Use: "org", - Short: "On-disk organization management commands, for recovery", - Args: cobra.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - cmd.PrintErrf("See '%s -h' for help\n", cmd.CommandPath()) - }, - } - - base.AddCommand(NewOrgListCommand()) - base.AddCommand(NewOrgCreateCommand()) - - return base -} - -type orgListCommand struct { - logger *zap.Logger - boltPath string - out io.Writer -} - -func NewOrgListCommand() *cobra.Command { - var orgCmd orgListCommand - cmd := &cobra.Command{ - Use: "list", - Short: "List organizations", - RunE: func(cmd *cobra.Command, args []string) error { - config := logger.NewConfig() - config.Level = zapcore.InfoLevel - - newLogger, err := config.New(cmd.ErrOrStderr()) - if err != nil { - return err - } - orgCmd.logger = newLogger - orgCmd.out = cmd.OutOrStdout() - return orgCmd.run() - }, - } - - defaultPath := filepath.Join(os.Getenv("HOME"), ".influxdbv2", "influxd.bolt") - - cmd.Flags().StringVar(&orgCmd.boltPath, "bolt-path", defaultPath, "Path to the BoltDB file") - - return cmd -} - -func (cmd *orgListCommand) run() error { - ctx := context.Background() - store := bolt.NewKVStore(cmd.logger.With(zap.String("system", "bolt-kvstore")), cmd.boltPath) - if err := store.Open(ctx); err != nil { - return err - } - defer store.Close() - tenantStore := tenant.NewStore(store) - tenantService := tenant.NewService(tenantStore) - orgs, _, err := tenantService.FindOrganizations(ctx, influxdb.OrganizationFilter{}) - if err != nil { - return err - } - - return PrintOrgs(ctx, cmd.out, orgs) -} - -type orgCreateCommand struct { - logger *zap.Logger - boltPath string - out io.Writer - org string -} - -func NewOrgCreateCommand() *cobra.Command { - var orgCmd orgCreateCommand - cmd := &cobra.Command{ - Use: "create", - Short: "Create new org", - RunE: func(cmd *cobra.Command, args []string) error { - config := logger.NewConfig() - config.Level = zapcore.InfoLevel - - newLogger, err := config.New(cmd.ErrOrStderr()) - if err != nil { - return err - } - orgCmd.logger = newLogger - orgCmd.out = cmd.OutOrStdout() - return orgCmd.run() - }, - } - - defaultPath := filepath.Join(os.Getenv("HOME"), ".influxdbv2", "influxd.bolt") - cmd.Flags().StringVar(&orgCmd.boltPath, "bolt-path", defaultPath, "Path to the BoltDB file") - cmd.Flags().StringVar(&orgCmd.org, "org", "", "Name of the org to create") - - return cmd -} - -func (cmd *orgCreateCommand) run() error { - ctx := context.Background() - store := bolt.NewKVStore(cmd.logger.With(zap.String("system", "bolt-kvstore")), cmd.boltPath) - if err := store.Open(ctx); err != nil { - return err - } - defer store.Close() - tenantStore := tenant.NewStore(store) - tenantService := tenant.NewService(tenantStore) - if cmd.org == "" { - return fmt.Errorf("must provide --org") - } - - if err := tenantService.CreateOrganization(ctx, &influxdb.Organization{ - Name: cmd.org, - }); err != nil { - return err - } - - orgs, _, err := tenantService.FindOrganizations(ctx, influxdb.OrganizationFilter{}) - if err != nil { - return err - } - return PrintOrgs(ctx, cmd.out, orgs) -} - -func PrintOrgs(ctx context.Context, w io.Writer, v []*influxdb.Organization) error { - headers := []string{ - "ID", - "Name", - } - - var rows []map[string]interface{} - for _, org := range v { - row := map[string]interface{}{ - "ID": org.ID, - "Name": org.Name, - } - rows = append(rows, row) - } - - writer := tabwriter.NewTabWriter(w, false) - defer writer.Flush() - if err := writer.WriteHeaders(headers...); err != nil { - return err - } - for _, row := range rows { - if err := writer.Write(row); err != nil { - return err - } - } - return nil -} diff --git a/cmd/influxd/recovery/organization/org_test.go b/cmd/influxd/recovery/organization/org_test.go deleted file mode 100644 index ce99d587c9c..00000000000 --- a/cmd/influxd/recovery/organization/org_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package organization - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/cmd/influxd/recovery/testhelper" - "github.com/stretchr/testify/assert" -) - -func Test_Org_Basic(t *testing.T) { - db := testhelper.NewTestBoltDb(t) - defer db.Close() - assert.Equal(t, `ID Name -dd7cd2292f6e974a myorg -`, - testhelper.MustRunCommand(t, NewOrgCommand(), "list", "--bolt-path", db.Name())) - - // org creation only works for new names - assert.EqualError(t, testhelper.RunCommand(t, NewOrgCommand(), "create", "--bolt-path", db.Name(), "--org", "myorg"), "organization with name myorg already exists") - - // org creation works - assert.NoError(t, testhelper.RunCommand(t, NewOrgCommand(), "create", "--bolt-path", db.Name(), "--org", "neworg")) - - // neworg shows up in list of orgs - assert.Regexp(t, "\tneworg\n", testhelper.MustRunCommand(t, NewOrgCommand(), "list", "--bolt-path", db.Name())) -} diff --git a/cmd/influxd/recovery/recovery.go b/cmd/influxd/recovery/recovery.go deleted file mode 100644 index 40217aa8437..00000000000 --- a/cmd/influxd/recovery/recovery.go +++ /dev/null @@ -1,26 +0,0 @@ -package recovery - -import ( - "github.com/influxdata/influxdb/v2/cmd/influxd/recovery/auth" - "github.com/influxdata/influxdb/v2/cmd/influxd/recovery/organization" - "github.com/influxdata/influxdb/v2/cmd/influxd/recovery/user" - "github.com/spf13/cobra" -) - -// NewCommand creates the new command. -func NewCommand() *cobra.Command { - base := &cobra.Command{ - Use: "recovery", - Short: "Commands used to recover / regenerate operator access to the DB", - Args: cobra.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - cmd.PrintErrf("See '%s -h' for help\n", cmd.CommandPath()) - }, - } - - base.AddCommand(auth.NewAuthCommand()) - base.AddCommand(user.NewUserCommand()) - base.AddCommand(organization.NewOrgCommand()) - - return base -} diff --git a/cmd/influxd/recovery/testhelper/influxd.bolt.testdata b/cmd/influxd/recovery/testhelper/influxd.bolt.testdata deleted file mode 100644 index ea5ea17d3d5..00000000000 Binary files a/cmd/influxd/recovery/testhelper/influxd.bolt.testdata and /dev/null differ diff --git a/cmd/influxd/recovery/testhelper/testboltdb.go b/cmd/influxd/recovery/testhelper/testboltdb.go deleted file mode 100644 index f59ae7e6d10..00000000000 --- a/cmd/influxd/recovery/testhelper/testboltdb.go +++ /dev/null @@ -1,53 +0,0 @@ -package testhelper - -import ( - "bytes" - _ "embed" - "io" - "os" - "testing" - - "github.com/spf13/cobra" - "github.com/stretchr/testify/require" -) - -//go:embed influxd.bolt.testdata -var influxdBolt []byte - -type TestBoltDb struct { - f *os.File - t *testing.T -} - -func NewTestBoltDb(t *testing.T) *TestBoltDb { - f, err := os.CreateTemp("", "") - require.NoError(t, err) - - _, err = io.Copy(f, bytes.NewBuffer(influxdBolt)) - require.NoError(t, err) - return &TestBoltDb{f, t} -} - -func (t *TestBoltDb) Name() string { - return t.f.Name() -} - -func (t *TestBoltDb) Close() { - require.NoError(t.t, t.f.Close()) - require.NoError(t.t, os.Remove(t.f.Name())) -} - -func MustRunCommand(t *testing.T, cmd *cobra.Command, args ...string) string { - buf := &bytes.Buffer{} - cmd.SetArgs(args) - cmd.SetOut(buf) - require.NoError(t, cmd.Execute()) - return buf.String() -} - -func RunCommand(t *testing.T, cmd *cobra.Command, args ...string) error { - buf := &bytes.Buffer{} - cmd.SetArgs(args) - cmd.SetOut(buf) - return cmd.Execute() -} diff --git a/cmd/influxd/recovery/user/user.go b/cmd/influxd/recovery/user/user.go deleted file mode 100644 index 2fe4763228b..00000000000 --- a/cmd/influxd/recovery/user/user.go +++ /dev/null @@ -1,270 +0,0 @@ -package user - -import ( - "context" - "fmt" - "io" - "os" - "path/filepath" - - "github.com/influxdata/influx-cli/v2/pkg/tabwriter" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/bolt" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/logger" - "github.com/influxdata/influxdb/v2/tenant" - "github.com/spf13/cobra" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -func NewUserCommand() *cobra.Command { - base := &cobra.Command{ - Use: "user", - Short: "On-disk user management commands, for recovery", - Args: cobra.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - cmd.PrintErrf("See '%s -h' for help\n", cmd.CommandPath()) - }, - } - - base.AddCommand(NewUserListCommand()) - base.AddCommand(NewUserCreateCommand()) - base.AddCommand(NewUserUpdateCommand()) - - return base -} - -type userListCommand struct { - logger *zap.Logger - boltPath string - out io.Writer -} - -func NewUserListCommand() *cobra.Command { - var userCmd userListCommand - cmd := &cobra.Command{ - Use: "list", - Short: "List users", - RunE: func(cmd *cobra.Command, args []string) error { - config := logger.NewConfig() - config.Level = zapcore.InfoLevel - - newLogger, err := config.New(cmd.ErrOrStderr()) - if err != nil { - return err - } - userCmd.logger = newLogger - userCmd.out = cmd.OutOrStdout() - return userCmd.run() - }, - } - - defaultPath := filepath.Join(os.Getenv("HOME"), ".influxdbv2", "influxd.bolt") - - cmd.Flags().StringVar(&userCmd.boltPath, "bolt-path", defaultPath, "Path to the BoltDB file.") - - return cmd -} - -func (cmd *userListCommand) run() error { - ctx := context.Background() - store := bolt.NewKVStore(cmd.logger.With(zap.String("system", "bolt-kvstore")), cmd.boltPath) - if err := store.Open(ctx); err != nil { - return err - } - defer store.Close() - tenantStore := tenant.NewStore(store) - tenantService := tenant.NewService(tenantStore) - filter := influxdb.UserFilter{} - users, _, err := tenantService.FindUsers(ctx, filter) - if err != nil { - return err - } - - return PrintUsers(ctx, cmd.out, users) -} - -type userCreateCommand struct { - logger *zap.Logger - boltPath string - out io.Writer - username string - password string -} - -func NewUserCreateCommand() *cobra.Command { - var userCmd userCreateCommand - cmd := &cobra.Command{ - Use: "create", - Short: "Create new user", - RunE: func(cmd *cobra.Command, args []string) error { - config := logger.NewConfig() - config.Level = zapcore.InfoLevel - - newLogger, err := config.New(cmd.ErrOrStderr()) - if err != nil { - return err - } - userCmd.logger = newLogger - userCmd.out = cmd.OutOrStdout() - return userCmd.run() - }, - } - - defaultPath := filepath.Join(os.Getenv("HOME"), ".influxdbv2", "influxd.bolt") - cmd.Flags().StringVar(&userCmd.boltPath, "bolt-path", defaultPath, "Path to the BoltDB file") - cmd.Flags().StringVar(&userCmd.username, "username", "", "Name of the user") - cmd.Flags().StringVar(&userCmd.password, "password", "", "Password for new user") - - return cmd -} - -func (cmd *userCreateCommand) run() error { - ctx := context.Background() - store := bolt.NewKVStore(cmd.logger.With(zap.String("system", "bolt-kvstore")), cmd.boltPath) - if err := store.Open(ctx); err != nil { - return err - } - defer store.Close() - tenantStore := tenant.NewStore(store) - tenantService := tenant.NewService(tenantStore) - - if cmd.username == "" { - return fmt.Errorf("must provide --username") - } - if cmd.password == "" { - return fmt.Errorf("must provide --password") - } - - user := influxdb.User{ - Name: cmd.username, - } - - if err := tenantService.CreateUser(ctx, &user); err != nil { - return err - } - - if err := tenantService.SetPassword(ctx, user.ID, cmd.password); err != nil { - // attempt to delete new user because password failed - if delErr := tenantService.DeleteUser(ctx, user.ID); delErr != nil { - fmt.Fprintf(cmd.out, "Could not delete bad user %q after password set failed: %s", cmd.username, delErr) - } - return err - } - - // Print all users now that we have added one - filter := influxdb.UserFilter{} - users, _, err := tenantService.FindUsers(ctx, filter) - if err != nil { - return err - } - return PrintUsers(ctx, cmd.out, users) -} - -func PrintUsers(ctx context.Context, w io.Writer, v []*influxdb.User) error { - headers := []string{"ID", "Name"} - - var rows []map[string]interface{} - for _, u := range v { - row := map[string]interface{}{ - "ID": u.ID, - "Name": u.Name, - } - rows = append(rows, row) - } - - writer := tabwriter.NewTabWriter(w, false) - defer writer.Flush() - if err := writer.WriteHeaders(headers...); err != nil { - return err - } - for _, row := range rows { - if err := writer.Write(row); err != nil { - return err - } - } - return nil -} - -type userUpdateCommand struct { - logger *zap.Logger - boltPath string - out io.Writer - username string - id string - password string -} - -func NewUserUpdateCommand() *cobra.Command { - var userCmd userUpdateCommand - cmd := &cobra.Command{ - Use: "update", - Short: "Update user", - RunE: func(cmd *cobra.Command, args []string) error { - config := logger.NewConfig() - config.Level = zapcore.InfoLevel - - newLogger, err := config.New(cmd.ErrOrStderr()) - if err != nil { - return err - } - userCmd.logger = newLogger - userCmd.out = cmd.OutOrStdout() - return userCmd.run() - }, - } - - defaultPath := filepath.Join(os.Getenv("HOME"), ".influxdbv2", "influxd.bolt") - cmd.Flags().StringVar(&userCmd.boltPath, "bolt-path", defaultPath, "Path to the BoltDB file") - cmd.Flags().StringVar(&userCmd.username, "username", "", "Name of the user") - cmd.Flags().StringVar(&userCmd.id, "id", "", "ID of the user") - cmd.Flags().StringVar(&userCmd.password, "password", "", "New password for new user") - - return cmd -} - -func (cmd *userUpdateCommand) run() error { - ctx := context.Background() - store := bolt.NewKVStore(cmd.logger.With(zap.String("system", "bolt-kvstore")), cmd.boltPath) - if err := store.Open(ctx); err != nil { - return err - } - defer store.Close() - tenantStore := tenant.NewStore(store) - tenantService := tenant.NewService(tenantStore) - - if cmd.password == "" { - return fmt.Errorf("must provide a new password to set, with --password") - } - - filter := influxdb.UserFilter{} - if cmd.id != "" { - userID, err := platform.IDFromString(cmd.id) - if err != nil { - return fmt.Errorf("invalid id %q: %w", cmd.id, err) - } - filter.ID = userID - } else if cmd.username != "" { - filter.Name = &cmd.username - } - - users, _, err := tenantService.FindUsers(ctx, filter) - if err != nil { - return err - } - if len(users) != 1 { - return fmt.Errorf("expected 1 user, found %d", len(users)) - } - - if err := tenantService.SetPassword(ctx, users[0].ID, cmd.password); err != nil { - return err - } - - // Print all users now that we have added one - users, _, err = tenantService.FindUsers(ctx, filter) - if err != nil { - return err - } - return PrintUsers(ctx, cmd.out, users) -} diff --git a/cmd/influxd/recovery/user/user_test.go b/cmd/influxd/recovery/user/user_test.go deleted file mode 100644 index a74ccf06523..00000000000 --- a/cmd/influxd/recovery/user/user_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package user - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/cmd/influxd/recovery/testhelper" - "github.com/stretchr/testify/assert" -) - -func Test_User_Basic(t *testing.T) { - db := testhelper.NewTestBoltDb(t) - defer db.Close() - assert.Equal(t, `ID Name -08371db1dd8c8000 testuser -`, - testhelper.MustRunCommand(t, NewUserCommand(), "list", "--bolt-path", db.Name())) - - // existing user must not be created - assert.EqualError(t, testhelper.RunCommand(t, NewUserCommand(), "create", "--bolt-path", db.Name(), "--username", "testuser", "--password", "foo"), - "user with name testuser already exists") - - // user needs a long-ish password - assert.EqualError(t, testhelper.RunCommand(t, NewUserCommand(), "create", "--bolt-path", db.Name(), "--username", "testuser2", "--password", "foo"), "passwords must be at least 8 characters long") - assert.NoError(t, testhelper.RunCommand(t, NewUserCommand(), "create", "--bolt-path", db.Name(), "--username", "testuser2", "--password", "my_password"), "") - - // at least run the update code - assert.NoError(t, testhelper.RunCommand(t, NewUserCommand(), "update", "--bolt-path", db.Name(), "--username", "testuser2", "--password", "some_other_password"), "") - assert.Regexp(t, "\ttestuser2\n", - testhelper.MustRunCommand(t, NewUserCommand(), "list", "--bolt-path", db.Name())) -} diff --git a/cmd/influxd/upgrade/config.go b/cmd/influxd/upgrade/config.go deleted file mode 100644 index abedd66a7fa..00000000000 --- a/cmd/influxd/upgrade/config.go +++ /dev/null @@ -1,206 +0,0 @@ -package upgrade - -// Configuration file upgrade implementation. -// The strategy is to transform only those entries for which rule exists. - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/BurntSushi/toml" - "go.uber.org/zap" - "golang.org/x/text/encoding/unicode" - "golang.org/x/text/transform" -) - -// passthroughConfigRules maps v1 config key-names to corresponding v2 config key-names. -// The values of these configs will not be modified during the upgrade process. -var passthroughConfigRules = map[string]string{ - "reporting-disabled": "reporting-disabled", - "data.dir": "engine-path", - "data.wal-fsync-delay": "storage-wal-fsync-delay", - "data.validate-keys": "storage-validate-keys", - "data.cache-max-memory-size": "storage-cache-max-memory-size", - "data.cache-snapshot-memory-size": "storage-cache-snapshot-memory-size", - "data.cache-snapshot-write-cold-duration": "storage-cache-snapshot-write-cold-duration", - "data.compact-full-write-cold-duration": "storage-compact-full-write-cold-duration", - "data.compact-throughput-burst": "storage-compact-throughput-burst", - "data.max-concurrent-compactions": "storage-max-concurrent-compactions", - "data.max-index-log-file-size": "storage-max-index-log-file-size", - "data.series-id-set-cache-size": "storage-series-id-set-cache-size", - "data.series-file-max-concurrent-snapshot-compactions": "storage-series-file-max-concurrent-snapshot-compactions", - "data.tsm-use-madv-willneed": "storage-tsm-use-madv-willneed", - "retention.check-interval": "storage-retention-check-interval", - "shard-precreation.check-interval": "storage-shard-precreator-check-interval", - "shard-precreation.advance-period": "storage-shard-precreator-advance-period", - "coordinator.max-concurrent-queries": "query-concurrency", - "coordinator.max-select-point": "influxql-max-select-point", - "coordinator.max-select-series": "influxql-max-select-series", - "coordinator.max-select-buckets": "influxql-max-select-buckets", - "logging.level": "log-level", - "http.bind-address": "http-bind-address", - "http.https-certificate": "tls-cert", - "http.https-private-key": "tls-key", -} - -func loadV1Config(configFile string) (*configV1, *map[string]interface{}, error) { - _, err := os.Stat(configFile) - if err != nil { - return nil, nil, fmt.Errorf("1.x config file '%s' does not exist", configFile) - } - - // load 1.x config content into byte array - bs, err := load(configFile) - if err != nil { - return nil, nil, err - } - - // parse it into simplified v1 config used as return value - var configV1 configV1 - _, err = toml.Decode(string(bs), &configV1) - if err != nil { - return nil, nil, err - } - - // parse into a generic config map - var cAny map[string]interface{} - _, err = toml.Decode(string(bs), &cAny) - if err != nil { - return nil, nil, err - } - - return &configV1, &cAny, nil -} - -func load(path string) ([]byte, error) { - bs, err := os.ReadFile(path) - if err != nil { - return nil, err - } - - // From master-1.x/cmd/influxd/run/config.go: - // Handle any potential Byte-Order-Marks that may be in the config file. - // This is for Windows compatibility only. - // See https://github.com/influxdata/telegraf/issues/1378 and - // https://github.com/influxdata/influxdb/issues/8965. - bom := unicode.BOMOverride(transform.Nop) - bs, _, err = transform.Bytes(bom, bs) - - return bs, err -} - -// upgradeConfig upgrades existing 1.x configuration file to 2.x influxdb.toml file. -func upgradeConfig(v1Config map[string]interface{}, targetOptions optionsV2, log *zap.Logger) error { - // create and initialize helper - cu := &configUpgrader{ - rules: passthroughConfigRules, - log: log, - } - - // rewrite config options from V1 to V2 paths - cTransformed := cu.transform(v1Config) - - // update new config with upgrade command options - cu.updateV2Config(cTransformed, targetOptions) - - // write the ugpraded config to disk - return cu.save(cTransformed, targetOptions.configPath) -} - -// configUpgrader is a helper used by `upgrade-config` command. -type configUpgrader struct { - rules map[string]string - log *zap.Logger -} - -func (cu *configUpgrader) updateV2Config(config map[string]interface{}, targetOptions optionsV2) { - if targetOptions.enginePath != "" { - config["engine-path"] = targetOptions.enginePath - } - if targetOptions.boltPath != "" { - config["bolt-path"] = targetOptions.boltPath - } -} - -func (cu *configUpgrader) save(config map[string]interface{}, path string) error { - // Open the target file, creating parent directories if needed. - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - outFile, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0644) - if err != nil { - return err - } - defer outFile.Close() - - // Encode the config directly into the file as TOML. - return toml.NewEncoder(outFile).Encode(&config) -} - -// Credits: @rogpeppe (Roger Peppe) - -func (cu *configUpgrader) transform(v1Config map[string]interface{}) map[string]interface{} { - res := make(map[string]interface{}) - for old, new := range cu.rules { - if val, ok := cu.lookup(v1Config, old); ok { - res[new] = val - } - } - - // Special case: flip the value for pprof. - if val, ok := cu.lookup(v1Config, "http.pprof-enabled"); ok { - if b, ok := val.(bool); ok { - res["pprof-disabled"] = !b - } - } - - // Special case: ensure query settings are valid. - fixQueryLimits(res) - - return res -} - -// fixQueryLimits ensures that all query-related config settings are compatible -// with the upgraded value of the 'query-concurrency' setting. -func fixQueryLimits(v2Config map[string]interface{}) { - concurrencyVal, ok := v2Config["query-concurrency"] - if !ok { - return - } - var concurrency int64 - switch c := concurrencyVal.(type) { - case int: - concurrency = int64(c) - case int32: - concurrency = int64(c) - case int64: - concurrency = c - default: - concurrency = 0 - } - if concurrency == 0 { - // The upgrade process doesn't generate a value for query-queue-size, so if - // query-concurrency is 0 / unset then it's safe to leave query-queue-size unset. - return - } - - // When query-concurrency is > 0, query-queue-size must also be > 0. - v2Config["query-queue-size"] = concurrency -} - -func (cu *configUpgrader) lookup(v1Config map[string]interface{}, path string) (interface{}, bool) { - for { - elem, rest, _ := strings.Cut(path, ".") - val, ok := v1Config[elem] - if rest == "" { - return val, ok - } - child, ok := val.(map[string]interface{}) - if !ok { - return nil, false - } - path, v1Config = rest, child - } -} diff --git a/cmd/influxd/upgrade/config_test.go b/cmd/influxd/upgrade/config_test.go deleted file mode 100644 index 8168db6286d..00000000000 --- a/cmd/influxd/upgrade/config_test.go +++ /dev/null @@ -1,472 +0,0 @@ -package upgrade - -import ( - "os" - "path/filepath" - "testing" - - "github.com/BurntSushi/toml" - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/pkg/testing/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func TestConfigUpgrade(t *testing.T) { - type testCase struct { - name string - config1x string - config2x string - } - - var testCases = []testCase{ - { - name: "minimal", - config1x: testConfigV1minimal, - config2x: testConfigV2minimal, - }, - { - name: "default", - config1x: testConfigV1default, - config2x: testConfigV2default, - }, - { - name: "empty", - config1x: testConfigV1empty, - config2x: testConfigV2empty, - }, - { - name: "obsolete / arrays", - config1x: testConfigV1obsoleteArrays, - config2x: testConfigV2obsoleteArrays, - }, - { - name: "query concurrency", - config1x: testConfigV1QueryConcurrency, - config2x: testConfigV2QueryConcurrency, - }, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - tmpdir := t.TempDir() - configFile := filepath.Join(tmpdir, "influxdb.conf") - configFileV2 := filepath.Join(filepath.Dir(configFile), "config.toml") - err := os.WriteFile(configFile, []byte(tc.config1x), 0444) - require.NoError(t, err) - - targetOtions := optionsV2{ - boltPath: "/db/.influxdbv2/influxd.bolt", - enginePath: "/db/.influxdbv2/engine", - configPath: configFileV2, - } - - var rawV1Config map[string]interface{} - if _, err = toml.Decode(tc.config1x, &rawV1Config); err != nil { - t.Fatal(err) - } - err = upgradeConfig(rawV1Config, targetOtions, zaptest.NewLogger(t)) - assert.NoError(t, err) - - var actual, expected map[string]interface{} - if _, err = toml.Decode(tc.config2x, &expected); err != nil { - t.Fatal(err) - } - if _, err = toml.DecodeFile(configFileV2, &actual); err != nil { - t.Fatal(err) - } - if diff := cmp.Diff(expected, actual); diff != "" { - t.Fatal(diff) - } - }) - } -} - -func TestConfigLoadFile(t *testing.T) { - var typicalRetval, emptyRetval configV1 - _, err := toml.Decode( - "[meta]\ndir=\"/var/lib/influxdb/meta\"\n[data]\ndir=\"/var/lib/influxdb/data\"\nwal-dir=\"/var/lib/influxdb/wal\"\n[http]\nbind-address=\":8086\"\nhttps-enabled=false", - &typicalRetval, - ) - require.NoError(t, err) - - type testCase struct { - name string - config1x string - retval *configV1 - } - - var testCases = []testCase{ - { - name: "minimal", - config1x: testConfigV1minimal, - retval: &typicalRetval, - }, - { - name: "default", - config1x: testConfigV1default, - retval: &typicalRetval, - }, - { - name: "empty", - config1x: testConfigV1empty, - retval: &emptyRetval, - }, - { - name: "obsolete / arrays", - config1x: testConfigV1obsoleteArrays, - retval: &typicalRetval, - }, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - tmpdir := t.TempDir() - configFile := filepath.Join(tmpdir, "influxdb.conf") - err := os.WriteFile(configFile, []byte(tc.config1x), 0444) - require.NoError(t, err) - retval, _, err := loadV1Config(configFile) - require.NoError(t, err) - - if diff := cmp.Diff(tc.retval, retval); diff != "" { - t.Fatal(diff) - } - }) - } -} - -func TestConfigLoadFileNotExists(t *testing.T) { - configFile := "/there/is/no/such/path/influxdb.conf" - - // try upgrade - - _, _, err := loadV1Config(configFile) - if err == nil { - t.Fatal("error expected") - } -} - -// 1.x test configs - -var testConfigV1minimal = `### Welcome to the InfluxDB configuration file. - -# Change this option to true to disable reporting. -reporting-disabled = false - -# Bind address to use for the RPC service for backup and restore. -bind-address = "127.0.0.1:8088" - -[meta] - dir = "/var/lib/influxdb/meta" - -[data] - dir = "/var/lib/influxdb/data" - wal-dir = "/var/lib/influxdb/wal" - wal-fsync-delay = "100s" - index-version = "inmem" - -[coordinator] - max-select-point = 0 - -[retention] - check-interval = "30m" - -[shard-precreation] - check-interval = "5m" - -[monitor] - store-enabled = true - -[http] - flux-enabled = false - bind-address = ":8086" - https-certificate = "/etc/ssl/influxdb.pem" - https-private-key = "/etc/ssl/influxdb-key.pem" - pprof-enabled = false - -[logging] - level = "debug" - -[subscriber] - -[[graphite]] - -[[collectd]] - -[[opentsdb]] - -[[udp]] - -[continuous_queries] - query-stats-enabled = true - -[tls] -` - -var testConfigV1default = `reporting-disabled = false -bind-address = "127.0.0.1:8088" - -[meta] - dir = "/var/lib/influxdb/meta" - retention-autocreate = true - logging-enabled = true - -[data] - dir = "/var/lib/influxdb/data" - wal-dir = "/var/lib/influxdb/wal" - wal-fsync-delay = "0s" - validate-keys = false - index-version = "tsi1" - query-log-enabled = true - cache-max-memory-size = 1073741824 - cache-snapshot-memory-size = 26214400 - cache-snapshot-write-cold-duration = "10m0s" - compact-full-write-cold-duration = "4h0m0s" - compact-throughput = 50331648 - compact-throughput-burst = 50331648 - max-concurrent-compactions = 0 - max-index-log-file-size = 1048576 - series-id-set-cache-size = 100 - series-file-max-concurrent-snapshot-compactions = 0 - trace-logging-enabled = false - tsm-use-madv-willneed = false - -[coordinator] - write-timeout = "10s" - max-concurrent-queries = 0 - query-timeout = "0s" - log-queries-after = "0s" - max-select-point = 0 - max-select-series = 0 - max-select-buckets = 0 - -[retention] - enabled = true - check-interval = "30m0s" - -[shard-precreation] - enabled = true - check-interval = "10m0s" - advance-period = "30m0s" - -[monitor] - store-enabled = true - store-database = "_internal" - store-interval = "10s" - -[subscriber] - enabled = true - http-timeout = "30s" - insecure-skip-verify = false - ca-certs = "" - write-concurrency = 40 - write-buffer-size = 1000 - -[http] - enabled = true - bind-address = ":8086" - auth-enabled = false - log-enabled = true - suppress-write-log = false - write-tracing = false - flux-enabled = false - flux-log-enabled = false - pprof-enabled = true - pprof-auth-enabled = false - debug-pprof-enabled = false - ping-auth-enabled = false - prom-read-auth-enabled = false - https-enabled = false - https-certificate = "/etc/ssl/influxdb.pem" - https-private-key = "" - max-row-limit = 0 - max-connection-limit = 0 - shared-secret = "" - realm = "InfluxDB" - unix-socket-enabled = false - unix-socket-permissions = "0777" - bind-socket = "/var/run/influxdb.sock" - max-body-size = 25000000 - access-log-path = "" - max-concurrent-write-limit = 0 - max-enqueued-write-limit = 0 - enqueued-write-timeout = 30000000000 - -[logging] - format = "auto" - level = "info" - suppress-logo = false - -[[graphite]] - enabled = false - bind-address = ":2003" - database = "graphite" - retention-policy = "" - protocol = "tcp" - batch-size = 5000 - batch-pending = 10 - batch-timeout = "1s" - consistency-level = "one" - separator = "." - udp-read-buffer = 0 - -[[collectd]] - enabled = false - bind-address = ":25826" - database = "collectd" - retention-policy = "" - batch-size = 5000 - batch-pending = 10 - batch-timeout = "10s" - read-buffer = 0 - typesdb = "/usr/share/collectd/types.db" - security-level = "none" - auth-file = "/etc/collectd/auth_file" - parse-multivalue-plugin = "split" - -[[opentsdb]] - enabled = false - bind-address = ":4242" - database = "opentsdb" - retention-policy = "" - consistency-level = "one" - tls-enabled = false - certificate = "/etc/ssl/influxdb.pem" - batch-size = 1000 - batch-pending = 5 - batch-timeout = "1s" - log-point-errors = true - -[[udp]] - enabled = false - bind-address = ":8089" - database = "udp" - retention-policy = "" - batch-size = 5000 - batch-pending = 10 - read-buffer = 0 - batch-timeout = "1s" - precision = "" - -[continuous_queries] - log-enabled = true - enabled = true - query-stats-enabled = false - run-interval = "1s" - -[tls] - min-version = "tls1.2" - max-version = "tls1.3" -` - -var testConfigV1obsoleteArrays = ` -reporting-disabled = true - -[meta] - dir = "/var/lib/influxdb/meta" - -[data] - dir = "/var/lib/influxdb/data" - wal-dir = "/var/lib/influxdb/wal" - -[http] - enabled = true - bind-address = ":8086" - -[[udp]] - enabled = false - bind-address = ":8089" - database = "udp" - retention-policy = "" - batch-size = 5000 - batch-pending = 10 - read-buffer = 0 - batch-timeout = "1s" - precision = "" - -[[udp]] - enabled = false - bind-address = ":8090" - database = "udp2" - retention-policy = "" - batch-size = 5000 - batch-pending = 10 - read-buffer = 0 - batch-timeout = "1s" - precision = "" -` - -var testConfigV1empty = ` -` - -var testConfigV1QueryConcurrency = ` -[coordinator] - max-concurrent-queries = 128 -` - -// 2.x test configs - -var testConfigV2minimal = `reporting-disabled = false -bolt-path = "/db/.influxdbv2/influxd.bolt" -engine-path = "/db/.influxdbv2/engine" -http-bind-address = ":8086" -influxql-max-select-point = 0 -log-level = "debug" -storage-retention-check-interval = "30m" -storage-shard-precreator-check-interval = "5m" -storage-wal-fsync-delay = "100s" -tls-cert = "/etc/ssl/influxdb.pem" -tls-key = "/etc/ssl/influxdb-key.pem" -pprof-disabled = true -` - -var testConfigV2default = `reporting-disabled = false -bolt-path = "/db/.influxdbv2/influxd.bolt" -engine-path = "/db/.influxdbv2/engine" -http-bind-address = ":8086" -influxql-max-select-buckets = 0 -influxql-max-select-point = 0 -influxql-max-select-series = 0 -log-level = "info" -query-concurrency = 0 -storage-cache-max-memory-size = 1073741824 -storage-cache-snapshot-memory-size = 26214400 -storage-cache-snapshot-write-cold-duration = "10m0s" -storage-compact-full-write-cold-duration = "4h0m0s" -storage-compact-throughput-burst = 50331648 -storage-max-concurrent-compactions = 0 -storage-max-index-log-file-size = 1048576 -storage-retention-check-interval = "30m0s" -storage-series-file-max-concurrent-snapshot-compactions = 0 -storage-series-id-set-cache-size = 100 -storage-shard-precreator-advance-period = "30m0s" -storage-shard-precreator-check-interval = "10m0s" -storage-tsm-use-madv-willneed = false -storage-validate-keys = false -storage-wal-fsync-delay = "0s" -tls-cert = "/etc/ssl/influxdb.pem" -tls-key = "" -pprof-disabled = false -` - -var testConfigV2obsoleteArrays = `reporting-disabled = true -bolt-path = "/db/.influxdbv2/influxd.bolt" -engine-path = "/db/.influxdbv2/engine" -http-bind-address = ":8086" -` - -var testConfigV2empty = ` -bolt-path = "/db/.influxdbv2/influxd.bolt" -engine-path = "/db/.influxdbv2/engine" -` - -var testConfigV2QueryConcurrency = ` -bolt-path = "/db/.influxdbv2/influxd.bolt" -engine-path = "/db/.influxdbv2/engine" -query-concurrency = 128 -query-queue-size = 128 -` diff --git a/cmd/influxd/upgrade/database.go b/cmd/influxd/upgrade/database.go deleted file mode 100644 index 0fa365bd35f..00000000000 --- a/cmd/influxd/upgrade/database.go +++ /dev/null @@ -1,237 +0,0 @@ -package upgrade - -import ( - "context" - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/dustin/go-humanize" - "github.com/influxdata/influx-cli/v2/clients" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/pkg/fs" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "go.uber.org/zap" -) - -// upgradeDatabases creates databases, buckets, retention policies and shard info according to 1.x meta and copies data -func upgradeDatabases(ctx context.Context, cli clients.CLI, v1 *influxDBv1, v2 *influxDBv2, opts *options, orgID platform.ID, log *zap.Logger) (map[string][]platform.ID, error) { - v1opts := opts.source - v2opts := opts.target - db2BucketIds := make(map[string][]platform.ID) - - targetDataPath := filepath.Join(v2opts.enginePath, "data") - targetWalPath := filepath.Join(v2opts.enginePath, "wal") - dirFilterFunc := func(path string) bool { - base := filepath.Base(path) - if base == "_series" || - (len(base) > 0 && base[0] == '_') || //skip internal databases - base == "index" { - return true - } - return false - } - if len(v1.meta.Databases()) == 0 { - log.Info("No database found in the 1.x meta") - return db2BucketIds, nil - } - if err := checkDiskSpace(cli, opts, log); err != nil { - return nil, err - } - - cqFile, err := os.OpenFile(v2opts.cqPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - return nil, fmt.Errorf("error opening file for CQ export %s: %w", v2opts.cqPath, err) - } - defer cqFile.Close() - - log.Info("Upgrading databases") - // read each database / retention policy from v1.meta and create bucket db-name/rp-name - // create database in v2.meta - // copy shard info from v1.meta - // export any continuous queries - for _, db := range v1.meta.Databases() { - if db.Name == "_internal" { - log.Debug("Skipping _internal ") - continue - } - log.Debug("Upgrading database", zap.String("database", db.Name)) - - // db to buckets IDs mapping - db2BucketIds[db.Name] = make([]platform.ID, 0, len(db.RetentionPolicies)) - - for _, rp := range db.RetentionPolicies { - sourcePath := filepath.Join(v1opts.dataDir, db.Name, rp.Name) - - bucket := &influxdb.Bucket{ - OrgID: orgID, - Type: influxdb.BucketTypeUser, - Name: db.Name + "/" + rp.Name, - Description: fmt.Sprintf("Upgraded from v1 database %s with retention policy %s", db.Name, rp.Name), - RetentionPolicyName: rp.Name, - RetentionPeriod: rp.Duration, - ShardGroupDuration: rp.ShardGroupDuration, - } - log.Debug("Creating bucket", zap.String("Bucket", bucket.Name)) - err = v2.bucketSvc.CreateBucket(ctx, bucket) - if err != nil { - return nil, fmt.Errorf("error creating bucket %s: %w", bucket.Name, err) - - } - - db2BucketIds[db.Name] = append(db2BucketIds[db.Name], bucket.ID) - log.Debug("Creating database with retention policy", zap.String("database", bucket.ID.String())) - spec := rp.ToSpec() - spec.Name = meta.DefaultRetentionPolicyName - dbv2, err := v2.meta.CreateDatabaseWithRetentionPolicy(bucket.ID.String(), spec) - if err != nil { - return nil, fmt.Errorf("error creating database %s: %w", bucket.ID.String(), err) - } - - mapping := &influxdb.DBRPMapping{ - Database: db.Name, - RetentionPolicy: rp.Name, - Default: db.DefaultRetentionPolicy == rp.Name, - OrganizationID: orgID, - BucketID: bucket.ID, - } - log.Info( - "Creating mapping", - zap.String("database", mapping.Database), - zap.String("retention policy", mapping.RetentionPolicy), - zap.String("orgID", mapping.OrganizationID.String()), - zap.String("bucketID", mapping.BucketID.String()), - ) - err = v2.dbrpSvc.Create(ctx, mapping) - if err != nil { - return nil, fmt.Errorf("error creating mapping %s/%s -> Org %s, bucket %s: %w", mapping.Database, mapping.RetentionPolicy, mapping.OrganizationID.String(), mapping.BucketID.String(), err) - } - shardsNum := 0 - for _, sg := range rp.ShardGroups { - log.Debug( - "Creating shard group", - zap.String("database", dbv2.Name), - zap.String("retention policy", dbv2.DefaultRetentionPolicy), - zap.Time("time", sg.StartTime), - ) - shardsNum += len(sg.Shards) - _, err := v2.meta.CreateShardGroupWithShards(dbv2.Name, dbv2.DefaultRetentionPolicy, sg.StartTime, sg.Shards) - if err != nil { - return nil, fmt.Errorf("error creating database %s: %w", bucket.ID.String(), err) - } - } - //empty retention policy doesn't have data - if shardsNum > 0 { - targetPath := filepath.Join(targetDataPath, dbv2.Name, spec.Name) - log.Debug( - "Copying data", - zap.String("source", sourcePath), - zap.String("target", targetPath), - ) - err = CopyDir(sourcePath, - targetPath, - nil, - dirFilterFunc, - nil) - if err != nil { - return nil, fmt.Errorf("error copying v1 data from %s to %s: %w", sourcePath, targetPath, err) - } - sourcePath = filepath.Join(v1opts.walDir, db.Name, rp.Name) - targetPath = filepath.Join(targetWalPath, dbv2.Name, spec.Name) - log.Debug( - "Copying wal", - zap.String("source", sourcePath), - zap.String("target", targetPath), - ) - err = CopyDir(sourcePath, - targetPath, - nil, - dirFilterFunc, - nil) - if err != nil { - return nil, fmt.Errorf("error copying v1 data from %s to %s: %w", sourcePath, targetPath, err) - } - } else { - log.Warn("Empty retention policy, no shards found", zap.String("source", sourcePath)) - } - } - - // Output CQs in the same format as SHOW CONTINUOUS QUERIES - _, err := cqFile.WriteString(fmt.Sprintf("name: %s\n", db.Name)) - if err != nil { - return nil, err - } - maxNameLen := 4 // 4 == len("name"), the column header - for _, cq := range db.ContinuousQueries { - if len(cq.Name) > maxNameLen { - maxNameLen = len(cq.Name) - } - } - - headerPadding := maxNameLen - 4 + 1 - _, err = cqFile.WriteString(fmt.Sprintf("name%[1]squery\n----%[1]s-----\n", strings.Repeat(" ", headerPadding))) - if err != nil { - return nil, err - } - - for _, cq := range db.ContinuousQueries { - log.Debug("Exporting CQ", zap.String("db", db.Name), zap.String("cq_name", cq.Name)) - padding := maxNameLen - len(cq.Name) + 1 - - _, err := cqFile.WriteString(fmt.Sprintf("%s%s%s\n", cq.Name, strings.Repeat(" ", padding), cq.Query)) - if err != nil { - return nil, fmt.Errorf("error exporting continuous query %s from DB %s: %w", cq.Name, db.Name, err) - } - } - _, err = cqFile.WriteString("\n") - if err != nil { - return nil, err - } - } - - log.Info("Database upgrade complete", zap.Int("upgraded_count", len(db2BucketIds))) - return db2BucketIds, nil -} - -// checkDiskSpace ensures there is enough room at the target path to store -// a full copy of all V1 data. -func checkDiskSpace(cli clients.CLI, opts *options, log *zap.Logger) error { - log.Info("Checking available disk space") - - size, err := DirSize(opts.source.dataDir) - if err != nil { - return fmt.Errorf("error getting size of %s: %w", opts.source.dataDir, err) - } - - walSize, err := DirSize(opts.source.walDir) - if err != nil { - return fmt.Errorf("error getting size of %s: %w", opts.source.walDir, err) - } - size += walSize - - v2dir := filepath.Dir(opts.target.boltPath) - diskInfo, err := fs.DiskUsage(v2dir) - if err != nil { - return fmt.Errorf("error getting info of disk %s: %w", v2dir, err) - } - - freeBytes := humanize.Bytes(diskInfo.Free) - requiredBytes := humanize.Bytes(size) - log.Info("Computed disk space", zap.String("free", freeBytes), zap.String("required", requiredBytes)) - - if size > diskInfo.Free { - return fmt.Errorf("not enough space on target disk of %s: need %d, available %d", v2dir, size, diskInfo.Free) - } - if !opts.force { - if confirmed := cli.StdIO.GetConfirm(fmt.Sprintf(`Proceeding will copy all V1 data to %q - Space available: %s - Space required: %s -`, v2dir, freeBytes, requiredBytes)); !confirmed { - return errors.New("upgrade was canceled") - } - } - return nil -} diff --git a/cmd/influxd/upgrade/fs.go b/cmd/influxd/upgrade/fs.go deleted file mode 100644 index bca9448e6e4..00000000000 --- a/cmd/influxd/upgrade/fs.go +++ /dev/null @@ -1,138 +0,0 @@ -package upgrade - -import ( - "fmt" - "io" - "os" - "path/filepath" -) - -// DirSize returns total size in bytes of containing files -func DirSize(path string) (uint64, error) { - var size uint64 - err := filepath.WalkDir(path, func(_ string, entry os.DirEntry, err error) error { - if err != nil { - return err - } - if !entry.IsDir() { - info, err := entry.Info() - if err != nil { - return err - } - size += uint64(info.Size()) - } - return err - }) - return size, err -} - -// CopyFile copies the contents of the file named src to the file named -// by dst. The file will be created if it does not already exist. If the -// destination file exists, all it's contents will be replaced by the contents -// of the source file. The file mode will be copied from the source and -// the copied data is synced/flushed to stable storage. -func CopyFile(src, dst string) (err error) { - in, err := os.Open(src) - if err != nil { - return - } - defer in.Close() - - si, err := os.Stat(src) - if err != nil { - return - } - - out, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE, si.Mode()) - //out, err := os.Create(dst) - if err != nil { - return - } - defer func() { - if e := out.Close(); e != nil { - err = e - } - }() - - _, err = io.Copy(out, in) - if err != nil { - return - } - - err = out.Sync() - if err != nil { - return - } - - return -} - -// CopyDir recursively copies a directory tree, attempting to preserve permissions. -// Source directory must exist, destination directory must *not* exist. -// Symlinks are ignored and skipped. -// dirRenameFunc is a mapping function that transforms path to a new name. Returning the path specifies the directory should not be renamed. -// dirFilterFunc ignores all directories where dirFilterFunc(path) is true. Passing nil for dirFilterFunc includes all directories. -// fileFilterFunc ignores all files where fileFilterFunc(path) is true. Passing nil for fileFilterFunc includes all files. -func CopyDir(src string, dst string, dirRenameFunc func(path string) string, dirFilterFunc func(path string) bool, fileFilterFunc func(path string) bool) (err error) { - src = filepath.Clean(src) - dst = filepath.Clean(dst) - - if dirFilterFunc != nil && dirFilterFunc(src) { - return - } - si, err := os.Stat(src) - if err != nil { - return err - } - if !si.IsDir() { - return fmt.Errorf("source is not a directory") - } - - _, err = os.Stat(dst) - if err != nil && !os.IsNotExist(err) { - return - } - if err == nil { - return fmt.Errorf("destination '%s' already exists", dst) - } - - err = os.MkdirAll(dst, si.Mode()) - if err != nil { - return - } - - entries, err := os.ReadDir(src) - if err != nil { - return - } - - for _, entry := range entries { - srcPath := filepath.Join(src, entry.Name()) - entryName := entry.Name() - if dirRenameFunc != nil { - entryName = dirRenameFunc(entryName) - } - dstPath := filepath.Join(dst, entryName) - - if entry.IsDir() { - err = CopyDir(srcPath, dstPath, dirRenameFunc, dirFilterFunc, fileFilterFunc) - if err != nil { - return - } - } else { - // Skip symlinks. - if entry.Type().Perm()&os.ModeSymlink != 0 { - continue - } - if fileFilterFunc != nil && fileFilterFunc(src) { - continue - } - err = CopyFile(srcPath, dstPath) - if err != nil { - return - } - } - } - - return -} diff --git a/cmd/influxd/upgrade/fs_test.go b/cmd/influxd/upgrade/fs_test.go deleted file mode 100644 index c8ce336dd4f..00000000000 --- a/cmd/influxd/upgrade/fs_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package upgrade - -import ( - "bytes" - "math/rand" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestCopyDirAndDirSize(t *testing.T) { - tmpdir := t.TempDir() - - err := os.MkdirAll(filepath.Join(tmpdir, "1", "1", "1"), 0700) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(filepath.Join(tmpdir, "1", "2", "1"), 0770) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(filepath.Join(tmpdir, "1", "2", "skip"), 0770) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(filepath.Join(tmpdir, "2", "1", "1"), 0777) - if err != nil { - t.Fatal(err) - } - - bin11Mode := mustCreateFile(t, filepath.Join(tmpdir, "1", "1.bin"), 300, 0600) - bin1111Mode := mustCreateFile(t, filepath.Join(tmpdir, "1", "1", "1", "1.bin"), 250, 0600) - bin1112Mode := mustCreateFile(t, filepath.Join(tmpdir, "1", "1", "1", "2.bin"), 350, 0400) - bin1211Mode := mustCreateFile(t, filepath.Join(tmpdir, "1", "2", "1", "1.bin"), 200, 0640) - _ = mustCreateFile(t, filepath.Join(tmpdir, "1", "2", "skip", "1.bin"), 200, 0640) - bin2111Mode := mustCreateFile(t, filepath.Join(tmpdir, "2", "1", "1", "1.bin"), 200, 0644) - bin2112Mode := mustCreateFile(t, filepath.Join(tmpdir, "2", "1", "1", "2.bin"), 100, 0640) - - size, err := DirSize(tmpdir) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, uint64(1600), size) - - targetDir := t.TempDir() - targetDir = filepath.Join(targetDir, "x") - err = CopyDir(tmpdir, targetDir, nil, func(path string) bool { - base := filepath.Base(path) - return base == "skip" - }, - nil) - if err != nil { - t.Fatal(err) - } - assetFileExistHasSizeAndPerm(t, filepath.Join(targetDir, "1", "1.bin"), 300, bin11Mode) - assetFileExistHasSizeAndPerm(t, filepath.Join(targetDir, "1", "1", "1", "1.bin"), 250, bin1111Mode) - assetFileExistHasSizeAndPerm(t, filepath.Join(targetDir, "1", "1", "1", "2.bin"), 350, bin1112Mode) - assetFileExistHasSizeAndPerm(t, filepath.Join(targetDir, "1", "2", "1", "1.bin"), 200, bin1211Mode) - assert.NoFileExists(t, filepath.Join(targetDir, "1", "2", "skip", "1.bin")) - assetFileExistHasSizeAndPerm(t, filepath.Join(targetDir, "2", "1", "1", "1.bin"), 200, bin2111Mode) - assetFileExistHasSizeAndPerm(t, filepath.Join(targetDir, "2", "1", "1", "2.bin"), 100, bin2112Mode) -} - -func assetFileExistHasSizeAndPerm(t *testing.T, path string, size int, mode os.FileMode) { - t.Helper() - fi, err := os.Stat(path) - if err != nil { - t.Error(err) - } else { - assert.Equal(t, int64(size), fi.Size(), path) - assert.Equal(t, mode, fi.Mode()&0xFFF, path) - } -} - -func mustCreateFile(t *testing.T, path string, size int, mode os.FileMode) os.FileMode { - t.Helper() - var buff bytes.Buffer - - for i := 0; i < size; i++ { - b := byte(rand.Int31n(256)) - buff.Write([]byte{b}) - } - require.NoError(t, os.WriteFile(path, buff.Bytes(), mode)) - // Windows doesn't preserve the full FileMode, so we check the value that was - // actually persisted by the OS and return it here so we can assert that it - // remains unchanged later. - fi, err := os.Stat(path) - require.NoError(t, err) - return fi.Mode() -} diff --git a/cmd/influxd/upgrade/logging_unix.go b/cmd/influxd/upgrade/logging_unix.go deleted file mode 100644 index adafcd58c46..00000000000 --- a/cmd/influxd/upgrade/logging_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !windows - -package upgrade - -// Zap only has problems with Windows paths, so this is a no-op on Unix systems. -func (o *logOptions) zapSafeLogPath() (string, error) { - return o.logPath, nil -} diff --git a/cmd/influxd/upgrade/logging_windows.go b/cmd/influxd/upgrade/logging_windows.go deleted file mode 100644 index 69730c1795b..00000000000 --- a/cmd/influxd/upgrade/logging_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package upgrade - -import ( - "net/url" - "os" - "path/filepath" - - "go.uber.org/zap" -) - -// Work around a bug in zap's handling of absolute paths on Windows. -// See https://github.com/uber-go/zap/issues/621 - -const FakeWindowsScheme = "winfile" - -func init() { - newWinFileSink := func(u *url.URL) (zap.Sink, error) { - // Remove leading slash left by url.Parse() - return os.OpenFile(u.Path[1:], os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) - } - zap.RegisterSink(FakeWindowsScheme, newWinFileSink) -} - -func (o *logOptions) zapSafeLogPath() (string, error) { - logPath, err := filepath.Abs(o.logPath) - if err != nil { - return "", err - } - return FakeWindowsScheme + ":///" + logPath, nil -} diff --git a/cmd/influxd/upgrade/security.go b/cmd/influxd/upgrade/security.go deleted file mode 100644 index 6dfecc30269..00000000000 --- a/cmd/influxd/upgrade/security.go +++ /dev/null @@ -1,155 +0,0 @@ -package upgrade - -// Security upgrade implementation. -// Creates tokens representing v1 users. - -import ( - "context" - "errors" - "fmt" - "sort" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/influxdata/influxql" - "go.uber.org/zap" -) - -// upgradeUsers creates tokens representing v1 users. -func upgradeUsers( - ctx context.Context, - v1 *influxDBv1, - v2 *influxDBv2, - targetOptions *optionsV2, - dbBuckets map[string][]platform2.ID, - log *zap.Logger, -) (int, error) { - // check if there any 1.x users at all - v1meta := v1.meta - if len(v1meta.Users()) == 0 { - log.Info("There are no users in 1.x, nothing to upgrade") - return 0, nil - } - - // get helper instance - helper := newSecurityUpgradeHelper(log) - - // check if target buckets exists in 2.x - proceed := helper.checkDbBuckets(v1meta, dbBuckets) - if !proceed { - return 0, errors.New("upgrade: there were errors/warnings, please fix them and run the command again") - } - - // upgrade users - log.Info("Upgrading 1.x users") - numUpgraded := 0 - for _, row := range helper.sortUserInfo(v1meta.Users()) { - username := row.Name - if row.Admin { - log.Warn("User is admin and will not be upgraded", zap.String("username", username)) - } else if len(row.Privileges) == 0 { - log.Warn("User has no privileges and will not be upgraded", zap.String("username", username)) - } else { - var dbList []string - for database := range row.Privileges { - dbList = append(dbList, database) - } - var permissions []platform.Permission - for _, database := range dbList { - permission := row.Privileges[database] - for _, id := range dbBuckets[database] { - switch permission { - case influxql.ReadPrivilege: - p, err := platform.NewPermissionAtID(id, platform.ReadAction, platform.BucketsResourceType, targetOptions.orgID) - if err != nil { - return numUpgraded, err - } - permissions = append(permissions, *p) - case influxql.WritePrivilege: - p, err := platform.NewPermissionAtID(id, platform.WriteAction, platform.BucketsResourceType, targetOptions.orgID) - if err != nil { - return numUpgraded, err - } - permissions = append(permissions, *p) - case influxql.AllPrivileges: - p, err := platform.NewPermissionAtID(id, platform.ReadAction, platform.BucketsResourceType, targetOptions.orgID) - if err != nil { - return numUpgraded, err - } - permissions = append(permissions, *p) - p, err = platform.NewPermissionAtID(id, platform.WriteAction, platform.BucketsResourceType, targetOptions.orgID) - if err != nil { - return numUpgraded, err - } - permissions = append(permissions, *p) - } - } - } - if len(permissions) > 0 { - auth := &platform.Authorization{ - Description: username + "'s Legacy Token", - Permissions: permissions, - Token: username, - OrgID: targetOptions.orgID, - UserID: targetOptions.userID, - } - err := v2.authSvc.CreateAuthorization(ctx, auth) - if err != nil { - log.Error("Failed to create authorization", zap.String("user", username), zap.Error(err)) - continue - } - err = v2.authSvc.SetPasswordHash(ctx, auth.ID, row.Hash) - if err != nil { - log.Error("Failed to set user's password", zap.String("user", username), zap.Error(err)) - continue - } - log.Debug("User upgraded", zap.String("username", username)) - numUpgraded++ - } else { - log.Warn("User has no privileges and will not be upgraded", zap.String("username", username)) - } - } - } - - log.Info("User upgrade complete", zap.Int("upgraded_count", numUpgraded)) - return numUpgraded, nil -} - -// securityUpgradeHelper is a helper used by `upgrade` command. -type securityUpgradeHelper struct { - log *zap.Logger -} - -// newSecurityUpgradeHelper returns new security script helper instance for `upgrade` command. -func newSecurityUpgradeHelper(log *zap.Logger) *securityUpgradeHelper { - helper := &securityUpgradeHelper{ - log: log, - } - - return helper -} -func (h *securityUpgradeHelper) checkDbBuckets(meta *meta.Client, databases map[string][]platform2.ID) bool { - ok := true - for _, row := range meta.Users() { - for database := range row.Privileges { - if database == "_internal" { - continue - } - ids := databases[database] - if len(ids) == 0 { - h.log.Warn(fmt.Sprintf("No buckets for database [%s] exist in 2.x.", database)) - ok = false - } - } - } - - return ok -} - -func (h *securityUpgradeHelper) sortUserInfo(info []meta.UserInfo) []meta.UserInfo { - sort.Slice(info, func(i, j int) bool { - return info[i].Name < info[j].Name - }) - return info -} diff --git a/cmd/influxd/upgrade/security_test.go b/cmd/influxd/upgrade/security_test.go deleted file mode 100644 index 1c779e90c68..00000000000 --- a/cmd/influxd/upgrade/security_test.go +++ /dev/null @@ -1,307 +0,0 @@ -package upgrade - -import ( - "context" - "errors" - "fmt" - "reflect" - "sort" - "testing" - "unsafe" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorization" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv/migration" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/pkg/testing/assert" - "github.com/influxdata/influxdb/v2/tenant" - authv1 "github.com/influxdata/influxdb/v2/v1/authorization" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/influxdata/influxql" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" - "golang.org/x/crypto/bcrypt" -) - -func TestUpgradeSecurity(t *testing.T) { - - type testCase struct { - name string - users []meta.UserInfo - db2ids map[string][]platform.ID - wantErr error - want []*influxdb.Authorization - } - - hash := func(password string) string { - hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) - require.NoError(t, err) - return string(hash) - } - - var testCases = []testCase{ - { - name: "ordinary", - users: []meta.UserInfo{ - { // not upgraded because admin - Name: "superman", - Admin: true, - Hash: hash("superman@123"), - }, - { // not upgraded because no privileges - Name: "loser", - Admin: false, - Hash: hash("loser@123"), - }, - { - Name: "weatherman", - Admin: false, - Hash: hash("weatherman@123"), - Privileges: map[string]influxql.Privilege{ - "water": influxql.AllPrivileges, - "air": influxql.AllPrivileges, - }, - }, - { - Name: "hitgirl", - Admin: false, - Hash: hash("hitgirl@123"), - Privileges: map[string]influxql.Privilege{ - "hits": influxql.WritePrivilege, - }, - }, - { - Name: "boss@hits.org", // special name - Admin: false, - Hash: hash("boss@123"), - Privileges: map[string]influxql.Privilege{ - "hits": influxql.AllPrivileges, - }, - }, - { - Name: "viewer", - Admin: false, - Hash: hash("viewer@123"), - Privileges: map[string]influxql.Privilege{ - "water": influxql.ReadPrivilege, - "air": influxql.ReadPrivilege, - }, - }, - }, - db2ids: map[string][]platform.ID{ - "water": {0x33f9d67bc9cbc5b7, 0x33f9d67bc9cbc5b8, 0x33f9d67bc9cbc5b9}, - "air": {0x43f9d67bc9cbc5b7, 0x43f9d67bc9cbc5b8, 0x43f9d67bc9cbc5b9}, - "hits": {0x53f9d67bc9cbc5b7}, - }, - want: []*influxdb.Authorization{ - { - Token: "boss@hits.org", - Status: "active", - Description: "boss@hits.org's Legacy Token", - }, - { - Token: "hitgirl", - Status: "active", - Description: "hitgirl's Legacy Token", - }, - { - Token: "viewer", - Status: "active", - Description: "viewer's Legacy Token", - }, - { - Token: "weatherman", - Status: "active", - Description: "weatherman's Legacy Token", - }, - }, - }, - { - name: "missing buckets", - users: []meta.UserInfo{ - { - Name: "weatherman", - Admin: false, - Hash: hash("weatherman@123"), - Privileges: map[string]influxql.Privilege{ - "water": influxql.AllPrivileges, - "air": influxql.AllPrivileges, - }, - }, - }, - db2ids: nil, - wantErr: errors.New("upgrade: there were errors/warnings, please fix them and run the command again"), - }, - { - name: "no users", - users: []meta.UserInfo{}, - }, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { // better do not run in parallel - ctx := context.Background() - log := zaptest.NewLogger(t) - - // mock v1 meta - v1 := &influxDBv1{ - meta: &meta.Client{}, - } - data := &meta.Data{ - Users: tc.users, - } - f := reflect.ValueOf(v1.meta).Elem().Field(4) - f = reflect.NewAt(f.Type(), unsafe.Pointer(f.UnsafeAddr())).Elem() - f.Set(reflect.ValueOf(data)) - - // mock v2 meta - kvStore := inmem.NewKVStore() - migrator, err := migration.NewMigrator(zap.NewNop(), kvStore, all.Migrations[:]...) - require.NoError(t, err) - err = migrator.Up(ctx) - require.NoError(t, err) - - authStoreV1, err := authv1.NewStore(kvStore) - require.NoError(t, err) - - tenantStore := tenant.NewStore(kvStore) - tenantSvc := tenant.NewService(tenantStore) - - authStoreV2, err := authorization.NewStore(kvStore) - require.NoError(t, err) - - v2 := &influxDBv2{ - authSvc: authv1.NewService(authStoreV1, tenantSvc), - onboardSvc: tenant.NewOnboardService( - tenantSvc, - authorization.NewService(authStoreV2, tenantSvc), - ), - } - - // onboard admin - oReq := &influxdb.OnboardingRequest{ - User: "admin", - Password: "12345678", - Org: "testers", - Bucket: "def", - RetentionPeriodSeconds: influxdb.InfiniteRetention, - } - oResp, err := setupAdmin(ctx, v2, oReq) - require.NoError(t, err) - - // target options - targetOptions := optionsV2{ - userName: oReq.User, - orgName: oReq.Org, - token: oResp.Auth.Token, - orgID: oResp.Auth.OrgID, - userID: oResp.Auth.UserID, - } - - for k, v := range tc.db2ids { - for i, id := range v { - b := &influxdb.Bucket{ - ID: id, - Name: fmt.Sprintf("%s_%d", k, id), - OrgID: targetOptions.orgID, - } - err := tenantSvc.CreateBucket(context.Background(), b) - require.NoError(t, err) - tc.db2ids[k][i] = b.ID - } - } - - // fill in expected permissions now that we know IDs - for _, want := range tc.want { - for _, user := range tc.users { - if want.Token == user.Name { // v1 username is v2 token - var permissions []influxdb.Permission - for db, privilege := range user.Privileges { - ids, ok := tc.db2ids[db] - require.True(t, ok) - for _, id := range ids { - id := id - resource := influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: &targetOptions.orgID, - ID: &id, - } - switch privilege { - case influxql.ReadPrivilege: - permissions = append(permissions, influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: resource, - }) - case influxql.WritePrivilege: - permissions = append(permissions, influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: resource, - }) - case influxql.AllPrivileges: - permissions = append(permissions, influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: resource, - }) - permissions = append(permissions, influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: resource, - }) - } - } - } - want.Permissions = permissions - } - } - } - - // command execution - n, err := upgradeUsers(ctx, v1, v2, &targetOptions, tc.db2ids, log) - assert.Equal(t, len(tc.want), n, "Upgraded count must match") - if err != nil { - if tc.wantErr != nil { - if diff := cmp.Diff(tc.wantErr.Error(), err.Error()); diff != "" { - t.Fatal(diff) - } - } else { - t.Fatal(err) - } - } else if tc.wantErr != nil { - t.Fatalf("should have failed with %v", tc.wantErr) - } - for _, want := range tc.want { - actual, err := v2.authSvc.FindAuthorizationByToken(ctx, want.Token) - require.NoError(t, err) - if diff := cmp.Diff(targetOptions.orgID, actual.OrgID); diff != "" { - t.Fatal(diff) - } - if diff := cmp.Diff(targetOptions.userID, actual.UserID); diff != "" { - t.Fatal(diff) - } - if diff := cmp.Diff(want.Token, actual.Token); diff != "" { - t.Fatal(diff) - } - if diff := cmp.Diff(want.Description, actual.Description); diff != "" { - t.Fatal(diff) - } - if diff := cmp.Diff(want.Status, actual.Status); diff != "" { - t.Fatal(diff) - } - sort.Slice(want.Permissions, func(i, j int) bool { - return *(want.Permissions[i].Resource.ID) < *(want.Permissions[j].Resource.ID) - }) - sort.Slice(actual.Permissions, func(i, j int) bool { - return *(actual.Permissions[i].Resource.ID) < *(actual.Permissions[j].Resource.ID) - }) - if diff := cmp.Diff(want.Permissions, actual.Permissions); diff != "" { - t.Logf("permissions mismatch for user %s", want.Token) - t.Fatal(diff) - } - } - }) - } -} diff --git a/cmd/influxd/upgrade/setup.go b/cmd/influxd/upgrade/setup.go deleted file mode 100644 index 5eabcd0c3ba..00000000000 --- a/cmd/influxd/upgrade/setup.go +++ /dev/null @@ -1,76 +0,0 @@ -package upgrade - -import ( - "context" - "errors" - "fmt" - "path/filepath" - - "github.com/influxdata/influx-cli/v2/clients" - "github.com/influxdata/influx-cli/v2/clients/setup" - "github.com/influxdata/influx-cli/v2/config" - "github.com/influxdata/influxdb/v2" - "go.uber.org/zap" -) - -func setupAdmin(ctx context.Context, v2 *influxDBv2, req *influxdb.OnboardingRequest) (*influxdb.OnboardingResults, error) { - res, err := v2.onboardSvc.OnboardInitialUser(ctx, req) - - if err != nil { - return nil, fmt.Errorf("onboarding error: %w", err) - } - return res, nil -} - -func onboardingRequest(cli clients.CLI, options *options) (*influxdb.OnboardingRequest, error) { - setupClient := setup.Client{CLI: cli} - cliReq, err := setupClient.OnboardingRequest(&setup.Params{ - Username: options.target.userName, - Password: options.target.password, - AuthToken: options.target.token, - Org: options.target.orgName, - Bucket: options.target.bucket, - Retention: options.target.retention, - Force: options.force, - }) - if err != nil { - return nil, err - } - req := influxdb.OnboardingRequest{ - User: cliReq.Username, - Org: cliReq.Org, - Bucket: cliReq.Bucket, - } - if cliReq.Password != nil { - req.Password = *cliReq.Password - } - if cliReq.RetentionPeriodSeconds != nil { - req.RetentionPeriodSeconds = *cliReq.RetentionPeriodSeconds - } - if cliReq.Token != nil { - req.Token = *cliReq.Token - } - return &req, nil -} - -func saveLocalConfig(sourceOptions *optionsV1, targetOptions *optionsV2, log *zap.Logger) error { - dPath, dir := targetOptions.cliConfigsPath, filepath.Dir(targetOptions.cliConfigsPath) - if dPath == "" || dir == "" { - return errors.New("a valid configurations path must be provided") - } - - localConfigSVC := config.NewLocalConfigService(targetOptions.cliConfigsPath) - p := config.DefaultConfig - p.Token = targetOptions.token - p.Org = targetOptions.orgName - if sourceOptions.dbURL != "" { - p.Host = sourceOptions.dbURL - } - if _, err := localConfigSVC.CreateConfig(p); err != nil { - log.Error("failed to save CLI config", zap.String("path", dPath), zap.Error(err)) - return errors.New("failed to save CLI config") - } - log.Info("CLI config has been stored.", zap.String("path", dPath)) - - return nil -} diff --git a/cmd/influxd/upgrade/setup_test.go b/cmd/influxd/upgrade/setup_test.go deleted file mode 100644 index 1c919ea92b9..00000000000 --- a/cmd/influxd/upgrade/setup_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package upgrade - -import ( - "path/filepath" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influx-cli/v2/config" - "github.com/influxdata/influxdb/v2/bolt" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func TestLocalConfig(t *testing.T) { - - type testCase struct { - name string - sourceOpts *optionsV1 - targetOpts *optionsV2 - want config.Config - } - - var testCases = []testCase{ - { - name: "default", - sourceOpts: &optionsV1{}, - targetOpts: &optionsV2{ - orgName: "my-org", - token: "my-token", - }, - want: config.Config{ - Name: config.DefaultConfig.Name, - Host: config.DefaultConfig.Host, - Org: "my-org", - Token: "my-token", - Active: config.DefaultConfig.Active, - }, - }, - { - name: "v1 url", - sourceOpts: &optionsV1{ - dbURL: "https://10.0.0.1:8086", - }, - targetOpts: &optionsV2{ - orgName: "my-org", - token: "my-token", - }, - want: config.Config{ - Name: config.DefaultConfig.Name, - Host: "https://10.0.0.1:8086", - Org: "my-org", - Token: "my-token", - Active: config.DefaultConfig.Active, - }, - }, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - dir := t.TempDir() - log := zaptest.NewLogger(t) - // adjust paths to runtime values - opts := tc.targetOpts - opts.boltPath = filepath.Join(dir, bolt.DefaultFilename) - opts.cliConfigsPath = filepath.Join(dir, "configs") - opts.enginePath = filepath.Join(dir, "engine") - // execute op - err := saveLocalConfig(tc.sourceOpts, opts, log) - require.NoError(t, err) - // verify saved config - localConfigSVC := config.NewLocalConfigService(opts.cliConfigsPath) - actual, err := localConfigSVC.Active() - require.NoError(t, err) - if diff := cmp.Diff(tc.want, actual); diff != "" { - t.Fatal(diff) - } - }) - } -} diff --git a/cmd/influxd/upgrade/testdata/v1db.zip b/cmd/influxd/upgrade/testdata/v1db.zip deleted file mode 100644 index 7e0240a0706..00000000000 Binary files a/cmd/influxd/upgrade/testdata/v1db.zip and /dev/null differ diff --git a/cmd/influxd/upgrade/upgrade.go b/cmd/influxd/upgrade/upgrade.go deleted file mode 100644 index aa4526feee1..00000000000 --- a/cmd/influxd/upgrade/upgrade.go +++ /dev/null @@ -1,747 +0,0 @@ -package upgrade - -import ( - "context" - "errors" - "fmt" - "net/url" - "os" - "os/user" - "path/filepath" - "strings" - - "github.com/influxdata/influx-cli/v2/clients" - "github.com/influxdata/influx-cli/v2/pkg/stdio" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorization" - "github.com/influxdata/influxdb/v2/bolt" - "github.com/influxdata/influxdb/v2/dbrp" - "github.com/influxdata/influxdb/v2/internal/fs" - "github.com/influxdata/influxdb/v2/kit/cli" - "github.com/influxdata/influxdb/v2/kit/metric" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/prom" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/storage" - "github.com/influxdata/influxdb/v2/tenant" - authv1 "github.com/influxdata/influxdb/v2/v1/authorization" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/influxdata/influxdb/v2/v1/services/meta/filestore" - "github.com/spf13/cobra" - "github.com/spf13/viper" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -// Simplified 1.x config. -type configV1 struct { - Meta struct { - Dir string `toml:"dir"` - } `toml:"meta"` - Data struct { - Dir string `toml:"dir"` - WALDir string `toml:"wal-dir"` - } `toml:"data"` - Http struct { - BindAddress string `toml:"bind-address"` - HttpsEnabled bool `toml:"https-enabled"` - AuthEnabled bool `toml:"auth-enabled"` - } `toml:"http"` -} - -func (c *configV1) dbURL() string { - address := c.Http.BindAddress - if address == "" { // fallback to default - address = ":8086" - } - var url url.URL - if c.Http.HttpsEnabled { - url.Scheme = "https" - } else { - url.Scheme = "http" - } - if strings.HasPrefix(address, ":") { // address is just :port - url.Host = "localhost" + address - } else { - url.Host = address - } - return url.String() -} - -type optionsV1 struct { - metaDir string - walDir string - dataDir string - dbURL string - // cmd option - dbDir string - configFile string -} - -// populateDirs sets values for expected sub-directories of o.dbDir -func (o *optionsV1) populateDirs() { - o.metaDir = filepath.Join(o.dbDir, "meta") - o.dataDir = filepath.Join(o.dbDir, "data") - o.walDir = filepath.Join(o.dbDir, "wal") -} - -type optionsV2 struct { - boltPath string - cliConfigsPath string - enginePath string - cqPath string - configPath string - rmConflicts bool - - userName string - password string - orgName string - bucket string - orgID platform.ID - userID platform.ID - token string - retention string -} - -type options struct { - // flags for source InfluxDB - source optionsV1 - - // flags for target InfluxDB - target optionsV2 - - force bool -} - -type logOptions struct { - logLevel zapcore.Level - logPath string -} - -func NewCommand(ctx context.Context, v *viper.Viper) (*cobra.Command, error) { - - // target flags - v2dir, err := fs.InfluxDir() - if err != nil { - return nil, fmt.Errorf("error fetching default InfluxDB 2.0 dir: %w", err) - } - - // DEPRECATED in favor of log-level=debug, but left for backwards-compatibility - verbose := false - logOptions := &logOptions{} - options := &options{} - - cmd := &cobra.Command{ - Use: "upgrade", - Short: "Upgrade a 1.x version of InfluxDB", - Long: ` - Upgrades a 1.x version of InfluxDB by performing the following actions: - 1. Reads the 1.x config file and creates a 2.x config file with matching options. Unsupported 1.x options are reported. - 2. Copies 1.x database files. - 3. Creates influx CLI configurations. - 4. Exports any 1.x continuous queries to disk. - - If --config-file is not passed, 1.x db folder (--v1-dir options) is taken as an input. If neither option is given, - the CLI will search for config under ${HOME}/.influxdb/ and /etc/influxdb/. If config can't be found, the CLI assumes - a standard V1 directory structure under ${HOME}/.influxdb/. - - Target 2.x database dir is specified by the --engine-path option. If changed, the bolt path should be changed as well. -`, - RunE: func(cmd *cobra.Command, _ []string) error { - logger, err := buildLogger(logOptions, verbose) - if err != nil { - return err - } - return runUpgradeE(ctx, clients.CLI{StdIO: stdio.TerminalStdio}, options, logger) - }, - Args: cobra.NoArgs, - } - - opts := []cli.Opt{ - { - DestP: &options.source.dbDir, - Flag: "v1-dir", - Desc: "path to source 1.x db directory containing meta, data and wal sub-folders", - }, - { - DestP: &verbose, - Flag: "verbose", - Default: false, - Desc: "DEPRECATED: use --log-level=debug instead", - Short: 'v', - Hidden: true, - }, - { - DestP: &options.target.boltPath, - Flag: "bolt-path", - Default: filepath.Join(v2dir, bolt.DefaultFilename), - Desc: "path for boltdb database", - Short: 'm', - }, - { - DestP: &options.target.cliConfigsPath, - Flag: "influx-configs-path", - Default: filepath.Join(v2dir, "configs"), - Desc: "path for 2.x CLI configurations file", - Short: 'c', - }, - { - DestP: &options.target.enginePath, - Flag: "engine-path", - Default: filepath.Join(v2dir, "engine"), - Desc: "path for persistent engine files", - Short: 'e', - }, - { - DestP: &options.target.cqPath, - Flag: "continuous-query-export-path", - Default: filepath.Join(homeOrAnyDir(), "continuous_queries.txt"), - Desc: "path for exported 1.x continuous queries", - }, - { - DestP: &options.target.userName, - Flag: "username", - Default: "", - Desc: "primary username", - Short: 'u', - }, - { - DestP: &options.target.password, - Flag: "password", - Default: "", - Desc: "password for username", - Short: 'p', - }, - { - DestP: &options.target.orgName, - Flag: "org", - Default: "", - Desc: "primary organization name", - Short: 'o', - }, - { - DestP: &options.target.bucket, - Flag: "bucket", - Default: "", - Desc: "primary bucket name", - Short: 'b', - }, - { - DestP: &options.target.retention, - Flag: "retention", - Default: "", - Desc: "optional: duration bucket will retain data (i.e '1w' or '72h'). Default is infinite.", - Short: 'r', - }, - { - DestP: &options.target.token, - Flag: "token", - Default: "", - Desc: "optional: token for username, else auto-generated", - Short: 't', - }, - { - DestP: &options.source.configFile, - Flag: "config-file", - Desc: "optional: Custom InfluxDB 1.x config file path, else the default config file", - }, - { - DestP: &options.target.configPath, - Flag: "v2-config-path", - Default: filepath.Join(v2dir, "config.toml"), - Desc: "optional: Custom path where upgraded 2.x config should be written", - }, - { - DestP: &logOptions.logLevel, - Flag: "log-level", - Default: zapcore.InfoLevel, - Desc: "supported log levels are debug, info, warn and error", - }, - { - DestP: &logOptions.logPath, - Flag: "log-path", - Default: filepath.Join(homeOrAnyDir(), "upgrade.log"), - Desc: "optional: custom log file path", - }, - { - DestP: &options.force, - Flag: "force", - Default: false, - Desc: "skip the confirmation prompt", - Short: 'f', - }, - { - DestP: &options.target.rmConflicts, - Flag: "overwrite-existing-v2", - Default: false, - Desc: "if files are present at an output path, overwrite them instead of aborting the upgrade process", - }, - } - - if err := cli.BindOptions(v, cmd, opts); err != nil { - return nil, err - } - // add sub commands - cmd.AddCommand(v1DumpMetaCommand) - cmd.AddCommand(v2DumpMetaCommand) - return cmd, nil -} - -type influxDBv1 struct { - meta *meta.Client -} - -type influxDBv2 struct { - log *zap.Logger - boltClient *bolt.Client - store *bolt.KVStore - kvStore kv.SchemaStore - tenantStore *tenant.Store - ts *tenant.Service - dbrpSvc influxdb.DBRPMappingService - bucketSvc influxdb.BucketService - onboardSvc influxdb.OnboardingService - authSvc *authv1.Service - authSvcV2 influxdb.AuthorizationService - meta *meta.Client -} - -func (i *influxDBv2) close() error { - err := i.meta.Close() - if err != nil { - return err - } - err = i.boltClient.Close() - if err != nil { - return err - } - err = i.store.Close() - if err != nil { - return err - } - return nil -} - -func buildLogger(options *logOptions, verbose bool) (*zap.Logger, error) { - config := zap.NewProductionConfig() - - config.Level = zap.NewAtomicLevelAt(options.logLevel) - if verbose { - config.Level.SetLevel(zap.DebugLevel) - } - logPath, err := options.zapSafeLogPath() - if err != nil { - return nil, err - } - - config.OutputPaths = append(config.OutputPaths, logPath) - config.ErrorOutputPaths = append(config.ErrorOutputPaths, logPath) - - log, err := config.Build() - if err != nil { - return nil, err - } - if verbose { - log.Warn("--verbose is deprecated, use --log-level=debug instead") - } - return log, nil -} - -func runUpgradeE(ctx context.Context, cli clients.CLI, options *options, log *zap.Logger) error { - if options.source.configFile != "" && options.source.dbDir != "" { - return errors.New("only one of --v1-dir or --config-file may be specified") - } - - if options.source.configFile == "" && options.source.dbDir == "" { - // Try finding config at usual paths - options.source.configFile = influxConfigPathV1() - // If not found, try loading a V1 dir under HOME. - if options.source.configFile == "" { - v1dir, err := influxDirV1() - if err != nil { - return fmt.Errorf("error fetching default InfluxDB 1.x dir: %w", err) - } - options.source.dbDir = v1dir - } - } - - v1Config := &configV1{} - var genericV1ops *map[string]interface{} - var err error - - if options.source.configFile != "" { - // If config is present, use it to set data paths. - v1Config, genericV1ops, err = loadV1Config(options.source.configFile) - if err != nil { - return err - } - options.source.metaDir = v1Config.Meta.Dir - options.source.dataDir = v1Config.Data.Dir - options.source.walDir = v1Config.Data.WALDir - } else { - // Otherwise, assume a standard directory layout - // and the default port on localhost. - options.source.populateDirs() - } - - options.source.dbURL = v1Config.dbURL() - if err := options.source.validatePaths(); err != nil { - return err - } - checkV2paths := options.target.validatePaths - if options.target.rmConflicts { - checkV2paths = options.target.clearPaths - } - if err := checkV2paths(); err != nil { - return err - } - - log.Info("Starting InfluxDB 1.x upgrade") - - if genericV1ops != nil { - log.Info("Upgrading config file", zap.String("file", options.source.configFile)) - if err := upgradeConfig(*genericV1ops, options.target, log); err != nil { - return err - } - log.Info( - "Config file upgraded.", - zap.String("1.x config", options.source.configFile), - zap.String("2.x config", options.target.configPath), - ) - - } else { - log.Info("No InfluxDB 1.x config file specified, skipping its upgrade") - } - - log.Info("Upgrade source paths", zap.String("meta", options.source.metaDir), zap.String("data", options.source.dataDir)) - log.Info("Upgrade target paths", zap.String("bolt", options.target.boltPath), zap.String("engine", options.target.enginePath)) - - v1, err := newInfluxDBv1(&options.source) - if err != nil { - return err - } - - v2, err := newInfluxDBv2(ctx, &options.target, log) - if err != nil { - return err - } - - defer func() { - if err := v2.close(); err != nil { - log.Error("Failed to close 2.0 services", zap.Error(err)) - } - }() - - canOnboard, err := v2.onboardSvc.IsOnboarding(ctx) - if err != nil { - return err - } - - if !canOnboard { - return errors.New("InfluxDB has been already set up") - } - - req, err := onboardingRequest(cli, options) - if err != nil { - return err - } - or, err := setupAdmin(ctx, v2, req) - if err != nil { - return err - } - - options.target.orgID = or.Org.ID - options.target.userID = or.User.ID - options.target.token = or.Auth.Token - - err = saveLocalConfig(&options.source, &options.target, log) - if err != nil { - return err - } - - db2BucketIds, err := upgradeDatabases(ctx, cli, v1, v2, options, or.Org.ID, log) - if err != nil { - // remove all files - log.Error("Database upgrade error, removing data", zap.Error(err)) - if e := os.Remove(options.target.boltPath); e != nil { - log.Error("Unable to remove bolt database", zap.Error(e)) - } - - if e := os.RemoveAll(options.target.enginePath); e != nil { - log.Error("Unable to remove time series data", zap.Error(e)) - } - return err - } - - usersUpgraded, err := upgradeUsers(ctx, v1, v2, &options.target, db2BucketIds, log) - if err != nil { - return err - } - if usersUpgraded > 0 && !v1Config.Http.AuthEnabled { - log.Warn( - "1.x users were upgraded, but 1.x auth was not enabled. Existing clients will fail authentication against 2.x if using invalid credentials", - ) - } - - log.Info( - "Upgrade successfully completed. Start the influxd service now, then log in", - zap.String("login_url", options.source.dbURL), - ) - - return nil -} - -// validatePaths ensures that all paths pointing to V1 inputs are usable by the upgrade command. -func (o *optionsV1) validatePaths() error { - if o.dbDir != "" { - fi, err := os.Stat(o.dbDir) - if err != nil { - return fmt.Errorf("1.x DB dir '%s' does not exist", o.dbDir) - } - if !fi.IsDir() { - return fmt.Errorf("1.x DB dir '%s' is not a directory", o.dbDir) - } - } - - metaDb := filepath.Join(o.metaDir, "meta.db") - _, err := os.Stat(metaDb) - if err != nil { - return fmt.Errorf("1.x meta.db '%s' does not exist: %w", metaDb, err) - } - - return nil -} - -// validatePaths ensures that none of the paths pointing to V2 outputs refer to existing files. -func (o *optionsV2) validatePaths() error { - if o.configPath != "" { - if _, err := os.Stat(o.configPath); err == nil { - return fmt.Errorf("file present at target path for upgraded 2.x config file %q", o.configPath) - } else if !os.IsNotExist(err) { - return fmt.Errorf("error checking for existing file at %q: %w", o.configPath, err) - } - } - - if _, err := os.Stat(o.boltPath); err == nil { - return fmt.Errorf("file present at target path for upgraded 2.x bolt DB: %q", o.boltPath) - } else if !os.IsNotExist(err) { - return fmt.Errorf("error checking for existing file at %q: %w", o.boltPath, err) - } - - if fi, err := os.Stat(o.enginePath); err == nil { - if !fi.IsDir() { - return fmt.Errorf("upgraded 2.x engine path %q is not a directory", o.enginePath) - } - entries, err := os.ReadDir(o.enginePath) - if err != nil { - return fmt.Errorf("error checking contents of existing engine directory %q: %w", o.enginePath, err) - } - if len(entries) > 0 { - return fmt.Errorf("upgraded 2.x engine directory %q must be empty", o.enginePath) - } - } else if !os.IsNotExist(err) { - return fmt.Errorf("error checking for existing file at %q: %w", o.enginePath, err) - } - - if _, err := os.Stat(o.cliConfigsPath); err == nil { - return fmt.Errorf("file present at target path for 2.x CLI configs %q", o.cliConfigsPath) - } else if !os.IsNotExist(err) { - return fmt.Errorf("error checking for existing file at %q: %w", o.cliConfigsPath, err) - } - - if _, err := os.Stat(o.cqPath); err == nil { - return fmt.Errorf("file present at target path for exported continuous queries %q", o.cqPath) - } else if !os.IsNotExist(err) { - return fmt.Errorf("error checking for existing file at %q: %w", o.cqPath, err) - } - - return nil -} - -// clearPaths deletes any files already present at the specified V2 output paths. -func (o *optionsV2) clearPaths() error { - if o.configPath != "" { - if err := os.RemoveAll(o.configPath); err != nil { - return fmt.Errorf("couldn't delete existing file at %q: %w", o.configPath, err) - } - } - - if err := os.RemoveAll(o.boltPath); err != nil { - return fmt.Errorf("couldn't delete existing file at %q: %w", o.boltPath, err) - } - - if err := os.RemoveAll(o.enginePath); err != nil { - return fmt.Errorf("couldn't delete existing file at %q: %w", o.enginePath, err) - } - - if err := os.RemoveAll(o.cliConfigsPath); err != nil { - return fmt.Errorf("couldn't delete existing file at %q: %w", o.cliConfigsPath, err) - } - - if err := os.RemoveAll(o.cqPath); err != nil { - return fmt.Errorf("couldn't delete existing file at %q: %w", o.cqPath, err) - } - - return nil -} - -func newInfluxDBv1(opts *optionsV1) (svc *influxDBv1, err error) { - svc = &influxDBv1{} - svc.meta, err = openV1Meta(opts.metaDir) - if err != nil { - return nil, fmt.Errorf("error opening 1.x meta.db: %w", err) - } - - return svc, nil -} - -func newInfluxDBv2(ctx context.Context, opts *optionsV2, log *zap.Logger) (svc *influxDBv2, err error) { - reg := prom.NewRegistry(log.With(zap.String("service", "prom_registry"))) - - svc = &influxDBv2{} - svc.log = log - - // Create BoltDB store and K/V service - svc.boltClient = bolt.NewClient(log.With(zap.String("service", "bolt"))) - svc.boltClient.Path = opts.boltPath - if err := svc.boltClient.Open(ctx); err != nil { - log.Error("Failed opening bolt", zap.Error(err)) - return nil, err - } - - svc.store = bolt.NewKVStore(log.With(zap.String("service", "kvstore-bolt")), opts.boltPath) - svc.store.WithDB(svc.boltClient.DB()) - svc.kvStore = svc.store - - // ensure migrator is run - migrator, err := migration.NewMigrator( - log.With(zap.String("service", "migrations")), - svc.kvStore, - all.Migrations[:]..., - ) - if err != nil { - log.Error("Failed to initialize kv migrator", zap.Error(err)) - return nil, err - } - - // apply migrations to metadata store - if err := migrator.Up(ctx); err != nil { - log.Error("Failed to apply migrations", zap.Error(err)) - return nil, err - } - - // Create Tenant service (orgs, buckets, ) - svc.tenantStore = tenant.NewStore(svc.kvStore) - svc.ts = tenant.NewSystem(svc.tenantStore, log.With(zap.String("store", "new")), reg, metric.WithSuffix("new")) - - svc.meta = meta.NewClient(meta.NewConfig(), svc.kvStore) - if err := svc.meta.Open(); err != nil { - return nil, err - } - - // DB/RP service - svc.dbrpSvc = dbrp.NewService(ctx, svc.ts.BucketService, svc.kvStore) - svc.bucketSvc = svc.ts.BucketService - - engine := storage.NewEngine( - opts.enginePath, - storage.NewConfig(), - storage.WithMetaClient(svc.meta), - ) - - svc.ts.BucketService = storage.NewBucketService(log, svc.ts.BucketService, engine) - - authStoreV2, err := authorization.NewStore(svc.store) - if err != nil { - return nil, err - } - - svc.authSvcV2 = authorization.NewService(authStoreV2, svc.ts) - - // on-boarding service (influx setup) - svc.onboardSvc = tenant.NewOnboardService(svc.ts, svc.authSvcV2) - - // v1 auth service - authStoreV1, err := authv1.NewStore(svc.kvStore) - if err != nil { - return nil, err - } - - svc.authSvc = authv1.NewService(authStoreV1, svc.ts) - - return svc, nil -} - -func openV1Meta(dir string) (*meta.Client, error) { - cfg := meta.NewConfig() - cfg.Dir = dir - store := filestore.New(cfg.Dir, string(meta.BucketName), "meta.db") - c := meta.NewClient(cfg, store) - if err := c.Open(); err != nil { - return nil, err - } - - return c, nil -} - -// influxDirV1 retrieves the influxdb directory. -func influxDirV1() (string, error) { - var dir string - // By default, store meta and data files in current users home directory - u, err := user.Current() - if err == nil { - dir = u.HomeDir - } else if home := os.Getenv("HOME"); home != "" { - dir = home - } else { - wd, err := os.Getwd() - if err != nil { - return "", err - } - dir = wd - } - dir = filepath.Join(dir, ".influxdb") - - return dir, nil -} - -// influxConfigPathV1 returns default 1.x config file path or empty path if not found. -func influxConfigPathV1() string { - if envVar := os.Getenv("INFLUXDB_CONFIG_PATH"); envVar != "" { - return envVar - } - for _, path := range []string{ - os.ExpandEnv("${HOME}/.influxdb/influxdb.conf"), - "/etc/influxdb/influxdb.conf", - } { - if _, err := os.Stat(path); err == nil { - return path - } - } - - return "" -} - -// homeOrAnyDir retrieves user's home directory, current working one or just none. -func homeOrAnyDir() string { - var dir string - u, err := user.Current() - if err == nil { - dir = u.HomeDir - } else if home := os.Getenv("HOME"); home != "" { - dir = home - } else if home := os.Getenv("USERPROFILE"); home != "" { - dir = home - } else { - wd, err := os.Getwd() - if err != nil { - dir = "" - } else { - dir = wd - } - } - - return dir -} diff --git a/cmd/influxd/upgrade/upgrade_test.go b/cmd/influxd/upgrade/upgrade_test.go deleted file mode 100644 index 4986c2ab88b..00000000000 --- a/cmd/influxd/upgrade/upgrade_test.go +++ /dev/null @@ -1,364 +0,0 @@ -package upgrade - -import ( - "context" - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "runtime" - "strings" - "testing" - - "github.com/BurntSushi/toml" - "github.com/dustin/go-humanize" - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influx-cli/v2/clients" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/bolt" - "github.com/influxdata/influxdb/v2/cmd/influxd/launcher" - "github.com/influxdata/influxdb/v2/internal/testutil" - "github.com/influxdata/influxdb/v2/kit/cli" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/spf13/cobra" - "github.com/spf13/viper" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" -) - -func TestPathValidations(t *testing.T) { - tmpdir := t.TempDir() - - v1Dir := filepath.Join(tmpdir, "v1db") - v2Dir := filepath.Join(tmpdir, "v2db") - - boltPath := filepath.Join(v2Dir, bolt.DefaultFilename) - configsPath := filepath.Join(v2Dir, "configs") - enginePath := filepath.Join(v2Dir, "engine") - - err := os.MkdirAll(filepath.Join(enginePath, "db"), 0777) - require.Nil(t, err) - - sourceOpts := &optionsV1{ - dbDir: v1Dir, - configFile: "", - } - sourceOpts.populateDirs() - - targetOpts := &optionsV2{ - boltPath: boltPath, - cliConfigsPath: configsPath, - enginePath: enginePath, - } - - err = sourceOpts.validatePaths() - require.NotNil(t, err, "Must fail") - require.Contains(t, err.Error(), "1.x DB dir") - - err = os.MkdirAll(filepath.Join(v1Dir, "meta"), 0777) - require.Nil(t, err) - - err = sourceOpts.validatePaths() - require.NotNil(t, err, "Must fail") - require.Contains(t, err.Error(), "1.x meta.db") - - err = os.WriteFile(filepath.Join(v1Dir, "meta", "meta.db"), []byte{1}, 0777) - require.Nil(t, err) - - err = sourceOpts.validatePaths() - require.Nil(t, err) - - err = targetOpts.validatePaths() - require.NotNil(t, err, "Must fail") - require.Contains(t, err.Error(), "2.x engine") - - err = os.Remove(filepath.Join(enginePath, "db")) - require.Nil(t, err) - - err = os.WriteFile(configsPath, []byte{1}, 0777) - require.Nil(t, err) - - err = targetOpts.validatePaths() - require.NotNil(t, err, "Must fail") - require.Contains(t, err.Error(), "2.x CLI configs") -} - -func TestClearTargetPaths(t *testing.T) { - tmpdir := t.TempDir() - - v2Dir := filepath.Join(tmpdir, "v2db") - boltPath := filepath.Join(v2Dir, bolt.DefaultFilename) - configsPath := filepath.Join(v2Dir, "configs") - enginePath := filepath.Join(v2Dir, "engine") - cqPath := filepath.Join(v2Dir, "cqs") - configPath := filepath.Join(v2Dir, "config") - - err := os.MkdirAll(filepath.Join(enginePath, "db"), 0777) - require.NoError(t, err) - err = os.WriteFile(boltPath, []byte{1}, 0777) - require.NoError(t, err) - err = os.WriteFile(configsPath, []byte{1}, 0777) - require.NoError(t, err) - err = os.WriteFile(cqPath, []byte{1}, 0777) - require.NoError(t, err) - err = os.WriteFile(configPath, []byte{1}, 0777) - require.NoError(t, err) - - targetOpts := &optionsV2{ - boltPath: boltPath, - cliConfigsPath: configsPath, - enginePath: enginePath, - configPath: configPath, - cqPath: cqPath, - } - - err = targetOpts.validatePaths() - require.Error(t, err) - err = targetOpts.clearPaths() - require.NoError(t, err) - err = targetOpts.validatePaths() - require.NoError(t, err) -} - -func TestDbURL(t *testing.T) { - - type testCase struct { - name string - conf string - want string - } - - var testCases = []testCase{ - { - name: "default", - conf: "[meta]\n[data]\n[http]\n", - want: "http://localhost:8086", - }, - { - name: "custom but same as default", - conf: "[meta]\n[data]\n[http]\nbind-address=\":8086\"\nhttps-enabled=false", - want: "http://localhost:8086", - }, - { - name: "custom no host", - conf: "[meta]\n[data]\n[http]\nbind-address=\":8186\"\nhttps-enabled=true", - want: "https://localhost:8186", - }, - { - name: "custom with host", - conf: "[meta]\n[data]\n[http]\nbind-address=\"10.0.0.1:8086\"\nhttps-enabled=true", - want: "https://10.0.0.1:8086", - }, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - var c configV1 - _, err := toml.Decode(tc.conf, &c) - require.NoError(t, err) - if diff := cmp.Diff(tc.want, c.dbURL()); diff != "" { - t.Fatal(diff) - } - }) - } -} - -func TestUpgradeRealDB(t *testing.T) { - ctx := context.Background() - - tmpdir := t.TempDir() - - err := testutil.Unzip(filepath.Join("testdata", "v1db.zip"), tmpdir) - require.NoError(t, err) - - v1ConfigPath := filepath.Join(tmpdir, "v1.conf") - v1Config, err := os.Create(v1ConfigPath) - require.NoError(t, err) - defer v1Config.Close() - - var pathsep = "/" - if runtime.GOOS == "windows" { - // Turn '\' into '\\' so it's escaped properly. - pathsep = "\\\\" - tmpdir = strings.ReplaceAll(tmpdir, "\\", "\\\\") - } - _, err = v1Config.WriteString(fmt.Sprintf(`reporting-disabled = true -[meta] - dir = "%[1]s%[2]sv1db%[2]smeta" - -[data] - dir = "%[1]s%[2]sv1db%[2]sdata" - wal-dir = "%[1]s%[2]sv1db%[2]swal" - -[coordinator] - max-concurrent-queries = 0 -`, - tmpdir, pathsep)) - require.NoError(t, err) - v1Config.Close() - - tl := launcher.NewTestLauncherServer() - boltPath := filepath.Join(tl.Path, bolt.DefaultFilename) - enginePath := filepath.Join(tl.Path, "engine") - cqPath := filepath.Join(tl.Path, "cq.txt") - cliConfigPath := filepath.Join(tl.Path, "influx-configs") - configPath := filepath.Join(tl.Path, "config.toml") - - v1opts := &optionsV1{configFile: v1ConfigPath} - v2opts := &optionsV2{ - boltPath: boltPath, - enginePath: enginePath, - cqPath: cqPath, - cliConfigsPath: cliConfigPath, - configPath: configPath, - userName: "my-user", - password: "my-password", - orgName: "my-org", - bucket: "my-bucket", - retention: "7d", - token: "my-token", - } - - opts := &options{source: *v1opts, target: *v2opts, force: true} - log := zaptest.NewLogger(t, zaptest.Level(zap.InfoLevel)) - err = runUpgradeE(ctx, clients.CLI{}, opts, log) - require.NoError(t, err) - - v := viper.New() - v.SetConfigFile(configPath) - require.NoError(t, v.ReadInConfig()) - lOpts := launcher.NewOpts(v) - cliOpts := lOpts.BindCliOpts() - - cmd := cobra.Command{ - Use: "test", - Run: func(*cobra.Command, []string) { - tl.RunOrFail(t, ctx, func(o *launcher.InfluxdOpts) { - *o = *lOpts - }) - defer tl.ShutdownOrFail(t, ctx) - - orgs, _, err := tl.OrganizationService().FindOrganizations(ctx, influxdb.OrganizationFilter{}) - require.NoError(t, err) - require.NotNil(t, orgs) - require.Len(t, orgs, 1) - require.Equal(t, "my-org", orgs[0].Name) - - users, _, err := tl.UserService().FindUsers(ctx, influxdb.UserFilter{}) - require.NoError(t, err) - require.NotNil(t, users) - require.Len(t, users, 1) - require.Equal(t, "my-user", users[0].Name) - - tokenNames := []string{"reader", "writer", "readerwriter"} - compatTokens, _, err := tl.Launcher.AuthorizationV1Service().FindAuthorizations(ctx, influxdb.AuthorizationFilter{}) - require.NoError(t, err) - require.NotNil(t, compatTokens) - require.Len(t, compatTokens, len(tokenNames)) - - buckets, _, err := tl.Launcher.BucketService().FindBuckets(ctx, influxdb.BucketFilter{}) - require.NoError(t, err) - - bucketNames := []string{"my-bucket", "_tasks", "_monitoring", "mydb/autogen", "mydb/1week", "test/autogen", "empty/autogen"} - myDbAutogenBucketId := "" - myDb1weekBucketId := "" - testBucketId := "" - emptyBucketId := "" - - require.NotNil(t, buckets) - require.Len(t, buckets, len(bucketNames)) - - for _, b := range buckets { - require.Contains(t, bucketNames, b.Name) - switch b.Name { - case bucketNames[0]: - tl.Bucket = b - case bucketNames[3]: - myDbAutogenBucketId = b.ID.String() - case bucketNames[4]: - myDb1weekBucketId = b.ID.String() - case bucketNames[5]: - testBucketId = b.ID.String() - case bucketNames[6]: - emptyBucketId = b.ID.String() - } - require.NotZero(t, b.ShardGroupDuration) - } - require.NoDirExists(t, filepath.Join(enginePath, "data", "_internal")) - - // Ensure retention policy from the setup request passed through to the bucket. - require.Equal(t, humanize.Week, tl.Bucket.RetentionPeriod) - - dbChecks := []struct { - dbname string - shardsNum int - }{ - {myDbAutogenBucketId, 3}, - {testBucketId, 5}, - {myDb1weekBucketId, 1}, - {emptyBucketId, 0}, - } - - for _, check := range dbChecks { - db := tl.Launcher.Engine().MetaClient().Database(check.dbname) - require.NotNil(t, db) - require.Len(t, db.ShardInfos(), check.shardsNum) - if check.shardsNum > 0 { - require.DirExists(t, filepath.Join(enginePath, "data", check.dbname, meta.DefaultRetentionPolicyName)) - } - } - - auths, _, err := tl.Launcher.AuthorizationService().FindAuthorizations(ctx, influxdb.AuthorizationFilter{}) - require.NoError(t, err) - require.Len(t, auths, 1) - - respBody := mustRunQuery(t, tl, "test", "select count(avg) from stat", auths[0].Token) - require.Contains(t, respBody, `["1970-01-01T00:00:00Z",5776]`) - - respBody = mustRunQuery(t, tl, "mydb", "select count(avg) from testv1", auths[0].Token) - require.Contains(t, respBody, `["1970-01-01T00:00:00Z",2882]`) - - respBody = mustRunQuery(t, tl, "mydb", "select count(i) from testv1", auths[0].Token) - require.Contains(t, respBody, `["1970-01-01T00:00:00Z",21]`) - - respBody = mustRunQuery(t, tl, "mydb", `select count(line) from mydb."1week".log`, auths[0].Token) - require.Contains(t, respBody, `["1970-01-01T00:00:00Z",1]`) - - cqBytes, err := os.ReadFile(cqPath) - require.NoError(t, err) - cqs := string(cqBytes) - - require.Contains(t, cqs, "CREATE CONTINUOUS QUERY other_cq ON test BEGIN SELECT mean(foo) INTO test.autogen.foo FROM empty.autogen.foo GROUP BY time(1h) END") - require.Contains(t, cqs, "CREATE CONTINUOUS QUERY cq_3 ON test BEGIN SELECT mean(bar) INTO test.autogen.bar FROM test.autogen.foo GROUP BY time(1m) END") - require.Contains(t, cqs, "CREATE CONTINUOUS QUERY cq ON empty BEGIN SELECT mean(example) INTO empty.autogen.mean FROM empty.autogen.raw GROUP BY time(1h) END") - }, - } - require.NoError(t, cli.BindOptions(v, &cmd, cliOpts)) - require.NoError(t, cmd.Execute()) -} - -func mustRunQuery(t *testing.T, tl *launcher.TestLauncher, db, rawQ, token string) string { - queryUrl := *tl.URL() - queryUrl.Path = "/query" - - params := queryUrl.Query() - params.Set("db", db) - params.Set("q", rawQ) - queryUrl.RawQuery = params.Encode() - - req, err := http.NewRequest(http.MethodGet, queryUrl.String(), nil) - require.Nil(t, err) - - req.Header.Set("Authorization", "Token "+token) - resp, err := http.DefaultClient.Do(req) - require.Nil(t, err) - - respBody, err := io.ReadAll(resp.Body) - require.Nil(t, err) - - return string(respBody) -} diff --git a/cmd/influxd/upgrade/v1_dump_meta.go b/cmd/influxd/upgrade/v1_dump_meta.go deleted file mode 100644 index 508badbf47d..00000000000 --- a/cmd/influxd/upgrade/v1_dump_meta.go +++ /dev/null @@ -1,105 +0,0 @@ -package upgrade - -import ( - "fmt" - "os" - "path/filepath" - "text/tabwriter" - - "github.com/influxdata/influxdb/v2/fluxinit" - "github.com/spf13/cobra" -) - -var v1DumpMetaCommand = &cobra.Command{ - Use: "v1-dump-meta", - Short: "Dump InfluxDB 1.x meta.db", - Args: cobra.NoArgs, - Hidden: true, - RunE: func(cmd *cobra.Command, args []string) error { - fluxinit.FluxInit() - svc, err := newInfluxDBv1(&v1DumpMetaOptions) - if err != nil { - return fmt.Errorf("error opening 1.x meta.db: %w", err) - } - meta := svc.meta - - tw := tabwriter.NewWriter(os.Stdout, 15, 4, 1, ' ', 0) - - showBool := func(b bool) string { - if b { - return "✓" - } - return "" - } - - fmt.Fprintln(os.Stdout, "Databases") - fmt.Fprintln(os.Stdout, "---------") - fmt.Fprintf(tw, "%s\t%s\t%s\n", "Name", "Default RP", "Shards") - for _, row := range meta.Databases() { - fmt.Fprintf(tw, "%s\t%s\t", row.Name, row.DefaultRetentionPolicy) - for i, si := range row.ShardInfos() { - if i > 0 { - fmt.Fprint(tw, ",") - } - fmt.Fprintf(tw, "%d", si.ID) - } - fmt.Fprintln(tw) - } - _ = tw.Flush() - fmt.Fprintln(os.Stdout) - - fmt.Fprintln(os.Stdout, "Retention policies") - fmt.Fprintln(os.Stdout, "---------") - fmt.Fprintf(tw, "%s\t%s\t%s\t%s\n", "Database", "Name", "Duration", "Shard Group duration") - for _, db := range meta.Databases() { - for _, rp := range db.RetentionPolicies { - fmt.Fprintf(tw, "%s\t%s\t%s\t%s\n", db.Name, rp.Name, rp.Duration.String(), rp.ShardGroupDuration.String()) - } - } - _ = tw.Flush() - fmt.Fprintln(os.Stdout) - - fmt.Fprintln(os.Stdout, "Shard groups") - fmt.Fprintln(os.Stdout, "---------") - fmt.Fprintf(tw, "%s\t%s\t%s\t%s\n", "Database/RP", "Start Time", "End Time", "Shards") - for _, db := range meta.Databases() { - for _, rp := range db.RetentionPolicies { - for _, sg := range rp.ShardGroups { - fmt.Fprintf(tw, "%s/%s\t%s\t%s\t", db.Name, rp.Name, sg.StartTime.String(), sg.EndTime.String()) - for i, si := range sg.Shards { - if i > 0 { - fmt.Fprint(tw, ",") - } - fmt.Fprintf(tw, "%d", si.ID) - } - fmt.Fprintln(tw) - } - } - } - _ = tw.Flush() - fmt.Fprintln(os.Stdout) - - fmt.Fprintln(os.Stdout, "Users") - fmt.Fprintln(os.Stdout, "-----") - fmt.Fprintf(tw, "%s\t%s\n", "Name", "Admin") - for _, row := range meta.Users() { - fmt.Fprintf(tw, "%s\t%s\n", row.Name, showBool(row.Admin)) - } - _ = tw.Flush() - fmt.Fprintln(os.Stdout) - return nil - }, -} - -var v1DumpMetaOptions = optionsV1{} - -func init() { - flags := v1DumpMetaCommand.Flags() - - v1dir, err := influxDirV1() - if err != nil { - panic("error fetching default InfluxDB 1.x dir: " + err.Error()) - } - - flags.StringVar(&v1DumpMetaOptions.metaDir, "v1-meta-dir", filepath.Join(v1dir, "meta"), "Path to meta.db directory") -} diff --git a/cmd/influxd/upgrade/v2_dump_meta.go b/cmd/influxd/upgrade/v2_dump_meta.go deleted file mode 100644 index 9ce34ba5d73..00000000000 --- a/cmd/influxd/upgrade/v2_dump_meta.go +++ /dev/null @@ -1,168 +0,0 @@ -package upgrade - -import ( - "context" - "fmt" - "os" - "path/filepath" - "text/tabwriter" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/fluxinit" - "github.com/influxdata/influxdb/v2/internal/fs" - "github.com/spf13/cobra" - "go.uber.org/zap" -) - -var v2DumpMetaCommand = &cobra.Command{ - Use: "v2-dump-meta", - Short: "Dump InfluxDB 2.x influxd.bolt", - Args: cobra.NoArgs, - Hidden: true, - RunE: func(cmd *cobra.Command, args []string) error { - fluxinit.FluxInit() - ctx := context.Background() - svc, err := newInfluxDBv2(ctx, &v2DumpMetaOptions, zap.NewNop()) - if err != nil { - return fmt.Errorf("error opening InfluxDB 2.0: %w", err) - } - - tw := tabwriter.NewWriter(os.Stdout, 15, 4, 1, ' ', 0) - - fmt.Fprintln(os.Stdout, "Orgs") - fmt.Fprintln(os.Stdout, "----") - fmt.Fprintf(tw, "%s\t%s\n", "ID", "Name") - orgs, _, err := svc.ts.FindOrganizations(ctx, influxdb.OrganizationFilter{}) - if err != nil { - return err - } - for _, row := range orgs { - fmt.Fprintf(tw, "%s\t%s\n", row.ID.String(), row.Name) - } - _ = tw.Flush() - fmt.Fprintln(os.Stdout) - - fmt.Fprintln(os.Stdout, "Users") - fmt.Fprintln(os.Stdout, "-----") - fmt.Fprintf(tw, "%s\t%s\n", "ID", "Name") - users, _, err := svc.ts.FindUsers(ctx, influxdb.UserFilter{}) - if err != nil { - return err - } - for _, row := range users { - fmt.Fprintf(tw, "%s\t%s\n", row.ID.String(), row.Name) - } - _ = tw.Flush() - fmt.Fprintln(os.Stdout) - - fmt.Fprintln(os.Stdout, "Buckets") - fmt.Fprintln(os.Stdout, "-------") - fmt.Fprintf(tw, "%s\t%s\n", "ID", "Name") - buckets, _, err := svc.ts.FindBuckets(ctx, influxdb.BucketFilter{}) - if err != nil { - return err - } - for _, row := range buckets { - fmt.Fprintf(tw, "%s\t%s\n", row.ID.String(), row.Name) - } - _ = tw.Flush() - fmt.Fprintln(os.Stdout) - - fmt.Fprintln(os.Stdout, "Databases") - fmt.Fprintln(os.Stdout, "---------") - fmt.Fprintf(tw, "%s\t%s\t%s\n", "Name", "Default RP", "Shards") - for _, row := range svc.meta.Databases() { - fmt.Fprintf(tw, "%s\t%s\t", row.Name, row.DefaultRetentionPolicy) - for i, si := range row.ShardInfos() { - if i > 0 { - fmt.Fprint(tw, ",") - } - fmt.Fprintf(tw, "%d", si.ID) - } - fmt.Fprintln(tw) - } - _ = tw.Flush() - fmt.Fprintln(os.Stdout) - - fmt.Fprintln(os.Stdout, "Retention policies") - fmt.Fprintln(os.Stdout, "---------") - fmt.Fprintf(tw, "%s\t%s\t%s\t%s\n", "Database", "Name", "Duration", "Shard Group duration") - for _, db := range svc.meta.Databases() { - for _, rp := range db.RetentionPolicies { - fmt.Fprintf(tw, "%s\t%s\t%s\t%s\n", db.Name, rp.Name, rp.Duration.String(), rp.ShardGroupDuration.String()) - } - } - _ = tw.Flush() - fmt.Fprintln(os.Stdout) - - fmt.Fprintln(os.Stdout, "Shard groups") - fmt.Fprintln(os.Stdout, "---------") - fmt.Fprintf(tw, "%s\t%s\t%s\t%s\n", "Database/RP", "Start Time", "End Time", "Shards") - for _, db := range svc.meta.Databases() { - for _, rp := range db.RetentionPolicies { - for _, sg := range rp.ShardGroups { - fmt.Fprintf(tw, "%s/%s\t%s\t%s\t", db.Name, rp.Name, sg.StartTime.String(), sg.EndTime.String()) - for i, si := range sg.Shards { - if i > 0 { - fmt.Fprint(tw, ",") - } - fmt.Fprintf(tw, "%d", si.ID) - } - fmt.Fprintln(tw) - } - } - } - _ = tw.Flush() - fmt.Fprintln(os.Stdout) - - fmt.Fprintln(os.Stdout, "Mappings") - fmt.Fprintln(os.Stdout, "---------") - fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\n", "Database", "RP", "Org", "Bucket", "Default") - mappings, _, err := svc.dbrpSvc.FindMany(ctx, influxdb.DBRPMappingFilter{}) - if err != nil { - return err - } - showBool := func(b bool) string { - if b { - return "yes" - } - return "no" - } - for _, row := range mappings { - fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\n", row.Database, row.RetentionPolicy, row.OrganizationID.String(), row.BucketID.String(), showBool(row.Default)) - } - _ = tw.Flush() - fmt.Fprintln(os.Stdout) - - showCheck := func(b bool) string { - if b { - return "✓" - } - return "" - } - - fmt.Fprintln(os.Stdout, "Users") - fmt.Fprintln(os.Stdout, "-----") - fmt.Fprintf(tw, "%s\t%s\n", "Name", "Admin") - for _, row := range svc.meta.Users() { - fmt.Fprintf(tw, "%s\t%s\n", row.Name, showCheck(row.Admin)) - } - _ = tw.Flush() - fmt.Fprintln(os.Stdout) - - return nil - }, -} - -var v2DumpMetaOptions = optionsV2{} - -func init() { - flags := v2DumpMetaCommand.Flags() - - v2dir, err := fs.InfluxDir() - if err != nil { - panic("error fetching default InfluxDB 2.0 dir: " + err.Error()) - } - - flags.StringVar(&v2DumpMetaOptions.boltPath, "v2-bolt-path", filepath.Join(v2dir, "influxd.bolt"), "Path to 2.0 metadata") -} diff --git a/cmd/telemetryd/README.md b/cmd/telemetryd/README.md deleted file mode 100644 index 2bdb6e8358a..00000000000 --- a/cmd/telemetryd/README.md +++ /dev/null @@ -1,8 +0,0 @@ -## Telemetry Server - -Telemetry server accepts pushed prometheus metrics where it -logs them to stdout. - -Telemetry server is very similar to prometheus pushgateway, but, -has stores that are configurable rather than just a /metrics -endpoint. diff --git a/cmd/telemetryd/main.go b/cmd/telemetryd/main.go deleted file mode 100644 index 04d75432de9..00000000000 --- a/cmd/telemetryd/main.go +++ /dev/null @@ -1,81 +0,0 @@ -package main - -import ( - "fmt" - "net/http" - "os" - "time" - - "github.com/influxdata/influxdb/v2/kit/cli" - influxlogger "github.com/influxdata/influxdb/v2/logger" - "github.com/influxdata/influxdb/v2/prometheus" - "github.com/influxdata/influxdb/v2/telemetry" - "github.com/spf13/viper" - "go.uber.org/zap" -) - -var ( - addr string -) - -func main() { - logconf := influxlogger.NewConfig() - log, err := logconf.New(os.Stdout) - if err != nil { - _, _ = fmt.Fprintf(os.Stderr, "Failed to configure logger: %v", err) - os.Exit(1) - } - - prog := &cli.Program{ - Run: func() error { - return run(log) - }, - Name: "telemetryd", - Opts: []cli.Opt{ - { - DestP: &addr, - Flag: "bind-addr", - Default: ":8080", - Desc: "binding address for telemetry server", - }, - }, - } - - v := viper.New() - cmd, err := cli.NewCommand(v, prog) - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err.Error()) - os.Exit(1) - } - - var exitCode int - if err := cmd.Execute(); err != nil { - exitCode = 1 - log.Error("Command returned error", zap.Error(err)) - } - - if err := log.Sync(); err != nil { - exitCode = 1 - fmt.Fprintf(os.Stderr, "Error syncing logs: %v\n", err) - } - time.Sleep(10 * time.Millisecond) - os.Exit(exitCode) -} - -func run(log *zap.Logger) error { - log = log.With(zap.String("service", "telemetryd")) - store := telemetry.NewLogStore(log) - svc := telemetry.NewPushGateway(log, store) - // Print data as line protocol - svc.Encoder = &prometheus.LineProtocol{} - - handler := http.HandlerFunc(svc.Handler) - log.Info("Starting telemetryd server", zap.String("addr", addr)) - - srv := http.Server{ - Addr: addr, - Handler: handler, - ErrorLog: zap.NewStdLog(log), - } - return srv.ListenAndServe() -} diff --git a/context/token.go b/context/token.go deleted file mode 100644 index 42e04cbe120..00000000000 --- a/context/token.go +++ /dev/null @@ -1,70 +0,0 @@ -package context - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -type contextKey string - -const ( - authorizerCtxKey contextKey = "influx/authorizer/v1" -) - -// SetAuthorizer sets an authorizer on context. -func SetAuthorizer(ctx context.Context, a influxdb.Authorizer) context.Context { - return context.WithValue(ctx, authorizerCtxKey, a) -} - -// GetAuthorizer retrieves an authorizer from context. -func GetAuthorizer(ctx context.Context) (influxdb.Authorizer, error) { - a, ok := ctx.Value(authorizerCtxKey).(influxdb.Authorizer) - if !ok { - return nil, &errors.Error{ - Msg: "authorizer not found on context", - Code: errors.EInternal, - } - } - if a == nil { - return nil, &errors.Error{ - Code: errors.EInternal, - Msg: "unexpected invalid authorizer", - } - } - - return a, nil -} - -// GetToken retrieves a token from the context; errors if no token. -func GetToken(ctx context.Context) (string, error) { - a, ok := ctx.Value(authorizerCtxKey).(influxdb.Authorizer) - if !ok { - return "", &errors.Error{ - Msg: "authorizer not found on context", - Code: errors.EInternal, - } - } - - auth, ok := a.(*influxdb.Authorization) - if !ok { - return "", &errors.Error{ - Msg: fmt.Sprintf("authorizer not an authorization but a %T", a), - Code: errors.EInternal, - } - } - - return auth.Token, nil -} - -// GetUserID retrieves the user ID from the authorizer on the context. -func GetUserID(ctx context.Context) (platform.ID, error) { - a, err := GetAuthorizer(ctx) - if err != nil { - return 0, err - } - return a.GetUserID(), nil -} diff --git a/context/token_test.go b/context/token_test.go deleted file mode 100644 index bc1eaa3ab37..00000000000 --- a/context/token_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package context_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -func TestGetAuthorizer(t *testing.T) { - ctx := context.Background() - ctx = icontext.SetAuthorizer(ctx, &influxdb.Authorization{ - ID: 1234, - }) - got, err := icontext.GetAuthorizer(ctx) - if err != nil { - t.Errorf("unexpected error while retrieving token: %v", err) - } - - if want := platform.ID(1234); got.Identifier() != want { - t.Errorf("GetToken() want %s, got %s", want, got) - } -} - -func TestGetToken(t *testing.T) { - ctx := context.Background() - ctx = icontext.SetAuthorizer(ctx, &influxdb.Authorization{ - Token: "howdy", - }) - got, err := icontext.GetToken(ctx) - if err != nil { - t.Errorf("unexpected error while retrieving token: %v", err) - } - - if want := "howdy"; got != want { - t.Errorf("GetToken() want %s, got %s", want, got) - } -} - -func TestGetUserID(t *testing.T) { - ctx := context.Background() - ctx = icontext.SetAuthorizer(ctx, &influxdb.Authorization{ - UserID: 5678, - }) - got, err := icontext.GetUserID(ctx) - if err != nil { - t.Errorf("unexpected error while retrieving user ID: %v", err) - } - - if want := platform.ID(5678); got != want { - t.Errorf("GetUserID() want %s, got %s", want, got) - } -} diff --git a/credentials.go b/credentials.go deleted file mode 100644 index e9fe541d2fe..00000000000 --- a/credentials.go +++ /dev/null @@ -1,41 +0,0 @@ -package influxdb - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - // ErrCredentialsUnauthorized is the error returned when CredentialsV1 cannot be - // authorized. - ErrCredentialsUnauthorized = &errors.Error{ - Code: errors.EUnauthorized, - Msg: "Unauthorized", - } -) - -// SchemeV1 is an enumeration of supported authorization types -type SchemeV1 string - -const ( - // SchemeV1Basic indicates the credentials came from an Authorization header using the BASIC scheme - SchemeV1Basic SchemeV1 = "basic" - - // SchemeV1Token indicates the credentials came from an Authorization header using the Token scheme - SchemeV1Token SchemeV1 = "token" - - // SchemeV1URL indicates the credentials came from the u and p query parameters - SchemeV1URL SchemeV1 = "url" -) - -// CredentialsV1 encapsulates the required credentials to authorize a v1 HTTP request. -type CredentialsV1 struct { - Scheme SchemeV1 - Username string - Token string -} - -type AuthorizerV1 interface { - Authorize(ctx context.Context, v1 CredentialsV1) (*Authorization, error) -} diff --git a/crud_log.go b/crud_log.go deleted file mode 100644 index 50992477856..00000000000 --- a/crud_log.go +++ /dev/null @@ -1,41 +0,0 @@ -package influxdb - -import ( - "time" -) - -// CRUDLogSetter is the interface to set the crudlog. -type CRUDLogSetter interface { - SetCreatedAt(now time.Time) - SetUpdatedAt(now time.Time) -} - -// CRUDLog is the struct to store crud related ops. -type CRUDLog struct { - CreatedAt time.Time `json:"createdAt"` - UpdatedAt time.Time `json:"updatedAt"` -} - -// SetCreatedAt set the created time. -func (log *CRUDLog) SetCreatedAt(now time.Time) { - log.CreatedAt = now -} - -// SetUpdatedAt set the updated time. -func (log *CRUDLog) SetUpdatedAt(now time.Time) { - log.UpdatedAt = now -} - -// TimeGenerator represents a generator for now. -type TimeGenerator interface { - // Now creates the generated time. - Now() time.Time -} - -// RealTimeGenerator will generate the real time. -type RealTimeGenerator struct{} - -// Now returns the current time. -func (g RealTimeGenerator) Now() time.Time { - return time.Now() -} diff --git a/dashboard.go b/dashboard.go deleted file mode 100644 index 77dd4fd5e4a..00000000000 --- a/dashboard.go +++ /dev/null @@ -1,1215 +0,0 @@ -package influxdb - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - "sort" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// ErrDashboardNotFound is the error msg for a missing dashboard. -const ErrDashboardNotFound = "dashboard not found" - -// ErrCellNotFound is the error msg for a missing cell. -const ErrCellNotFound = "cell not found" - -// ErrViewNotFound is the error msg for a missing View. -const ErrViewNotFound = "view not found" - -// ops for dashboard service. -const ( - OpFindDashboardByID = "FindDashboardByID" - OpFindDashboards = "FindDashboards" - OpCreateDashboard = "CreateDashboard" - OpUpdateDashboard = "UpdateDashboard" - OpAddDashboardCell = "AddDashboardCell" - OpRemoveDashboardCell = "RemoveDashboardCell" - OpUpdateDashboardCell = "UpdateDashboardCell" - OpGetDashboardCellView = "GetDashboardCellView" - OpUpdateDashboardCellView = "UpdateDashboardCellView" - OpDeleteDashboard = "DeleteDashboard" - OpReplaceDashboardCells = "ReplaceDashboardCells" -) - -// DashboardService represents a service for managing dashboard data. -type DashboardService interface { - // FindDashboardByID returns a single dashboard by ID. - FindDashboardByID(ctx context.Context, id platform.ID) (*Dashboard, error) - - // FindDashboards returns a list of dashboards that match filter and the total count of matching dashboards. - // Additional options provide pagination & sorting. - FindDashboards(ctx context.Context, filter DashboardFilter, opts FindOptions) ([]*Dashboard, int, error) - - // CreateDashboard creates a new dashboard and sets b.ID with the new identifier. - CreateDashboard(ctx context.Context, b *Dashboard) error - - // UpdateDashboard updates a single dashboard with changeset. - // Returns the new dashboard state after update. - UpdateDashboard(ctx context.Context, id platform.ID, upd DashboardUpdate) (*Dashboard, error) - - // AddDashboardCell adds a cell to a dashboard. - AddDashboardCell(ctx context.Context, id platform.ID, c *Cell, opts AddDashboardCellOptions) error - - // RemoveDashboardCell removes a dashboard. - RemoveDashboardCell(ctx context.Context, dashboardID, cellID platform.ID) error - - // UpdateDashboardCell replaces the dashboard cell with the provided ID. - UpdateDashboardCell(ctx context.Context, dashboardID, cellID platform.ID, upd CellUpdate) (*Cell, error) - - // GetDashboardCellView retrieves a dashboard cells view. - GetDashboardCellView(ctx context.Context, dashboardID, cellID platform.ID) (*View, error) - - // UpdateDashboardCellView retrieves a dashboard cells view. - UpdateDashboardCellView(ctx context.Context, dashboardID, cellID platform.ID, upd ViewUpdate) (*View, error) - - // DeleteDashboard removes a dashboard by ID. - DeleteDashboard(ctx context.Context, id platform.ID) error - - // ReplaceDashboardCells replaces all cells in a dashboard - ReplaceDashboardCells(ctx context.Context, id platform.ID, c []*Cell) error -} - -// Dashboard represents all visual and query data for a dashboard. -type Dashboard struct { - ID platform.ID `json:"id,omitempty"` - OrganizationID platform.ID `json:"orgID,omitempty"` - Name string `json:"name"` - Description string `json:"description"` - Cells []*Cell `json:"cells"` - Meta DashboardMeta `json:"meta"` - OwnerID *platform.ID `json:"owner,omitempty"` -} - -// DashboardMeta contains meta information about dashboards -type DashboardMeta struct { - CreatedAt time.Time `json:"createdAt"` - UpdatedAt time.Time `json:"updatedAt"` -} - -// DefaultDashboardFindOptions are the default find options for dashboards -var DefaultDashboardFindOptions = FindOptions{ - SortBy: "ID", -} - -// SortDashboards sorts a slice of dashboards by a field. -func SortDashboards(opts FindOptions, ds []*Dashboard) { - var sorter func(i, j int) bool - switch opts.SortBy { - case "CreatedAt": - sorter = func(i, j int) bool { - return ds[j].Meta.CreatedAt.After(ds[i].Meta.CreatedAt) - } - case "UpdatedAt": - sorter = func(i, j int) bool { - return ds[j].Meta.UpdatedAt.After(ds[i].Meta.UpdatedAt) - } - case "Name": - sorter = func(i, j int) bool { - return ds[i].Name < ds[j].Name - } - default: - sorter = func(i, j int) bool { - if opts.Descending { - return ds[i].ID > ds[j].ID - } - return ds[i].ID < ds[j].ID - } - } - - sort.Slice(ds, sorter) -} - -// Cell holds positional information about a cell on dashboard and a reference to a cell. -type Cell struct { - ID platform.ID `json:"id,omitempty"` - CellProperty - View *View `json:"-"` -} - -// Marshals the cell -func (c *Cell) MarshalJSON() ([]byte, error) { - type resp struct { - ID *platform.ID `json:"id,omitempty"` - Name string `json:"name,omitempty"` - ViewProperties json.RawMessage `json:"properties,omitempty"` - CellProperty - } - response := resp{ - CellProperty: c.CellProperty, - } - if c.ID != 0 { - response.ID = &c.ID - } - if c.View != nil { - response.Name = c.View.Name - rawJSON, err := MarshalViewPropertiesJSON(c.View.Properties) - if err != nil { - return nil, err - } - response.ViewProperties = rawJSON - } - return json.Marshal(response) -} - -func (c *Cell) UnmarshalJSON(b []byte) error { - var newCell struct { - ID platform.ID `json:"id,omitempty"` - Name string `json:"name,omitempty"` - ViewProperties json.RawMessage `json:"properties,omitempty"` - CellProperty - } - if err := json.Unmarshal(b, &newCell); err != nil { - return err - } - - c.ID = newCell.ID - c.CellProperty = newCell.CellProperty - - if newCell.Name != "" { - if c.View == nil { - c.View = new(View) - } - c.View.Name = newCell.Name - } - - props, err := UnmarshalViewPropertiesJSON(newCell.ViewProperties) - if err == nil { - if c.View == nil { - c.View = new(View) - } - c.View.Properties = props - } - - return nil -} - -// CellProperty contains the properties of a cell. -type CellProperty struct { - X int32 `json:"x"` - Y int32 `json:"y"` - W int32 `json:"w"` - H int32 `json:"h"` -} - -// DashboardFilter is a filter for dashboards. -type DashboardFilter struct { - IDs []*platform.ID - OrganizationID *platform.ID - Organization *string - OwnerID *platform.ID -} - -// QueryParams turns a dashboard filter into query params -// -// It implements PagingFilter. -func (f DashboardFilter) QueryParams() map[string][]string { - qp := url.Values{} - for _, id := range f.IDs { - if id != nil { - qp.Add("id", id.String()) - } - } - - if f.OrganizationID != nil { - qp.Add("orgID", f.OrganizationID.String()) - } - - if f.Organization != nil { - qp.Add("org", *f.Organization) - } - - if f.OwnerID != nil { - qp.Add("owner", f.OwnerID.String()) - } - - return qp -} - -// DashboardUpdate is the patch structure for a dashboard. -type DashboardUpdate struct { - Name *string `json:"name"` - Description *string `json:"description"` - Cells *[]*Cell `json:"cells"` -} - -// Apply applies an update to a dashboard. -func (u DashboardUpdate) Apply(d *Dashboard) error { - if u.Name != nil { - d.Name = *u.Name - } - - if u.Description != nil { - d.Description = *u.Description - } - - if u.Cells != nil { - d.Cells = *u.Cells - } - - return nil -} - -// Valid returns an error if the dashboard update is invalid. -func (u DashboardUpdate) Valid() *errors.Error { - if u.Name == nil && u.Description == nil { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "must update at least one attribute", - } - } - - return nil -} - -// AddDashboardCellOptions are options for adding a dashboard. -type AddDashboardCellOptions struct { - View *View -} - -// CellUpdate is the patch structure for a cell. -type CellUpdate struct { - X *int32 `json:"x"` - Y *int32 `json:"y"` - W *int32 `json:"w"` - H *int32 `json:"h"` -} - -// Apply applies an update to a Cell. -func (u CellUpdate) Apply(c *Cell) error { - if u.X != nil { - c.X = *u.X - } - - if u.Y != nil { - c.Y = *u.Y - } - - if u.W != nil { - c.W = *u.W - } - - if u.H != nil { - c.H = *u.H - } - - return nil -} - -// Valid returns an error if the cell update is invalid. -func (u CellUpdate) Valid() *errors.Error { - if u.H == nil && u.W == nil && u.Y == nil && u.X == nil { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "must update at least one attribute", - } - } - - return nil -} - -// ViewUpdate is a struct for updating Views. -type ViewUpdate struct { - ViewContentsUpdate - Properties ViewProperties -} - -// Valid validates the update struct. It expects minimal values to be set. -func (u ViewUpdate) Valid() *errors.Error { - _, ok := u.Properties.(EmptyViewProperties) - if u.Name == nil && ok { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "expected at least one attribute to be updated", - } - } - - return nil -} - -// Apply updates a view with the view updates properties. -func (u ViewUpdate) Apply(v *View) error { - if err := u.Valid(); err != nil { - return err - } - - if u.Name != nil { - v.Name = *u.Name - } - - if u.Properties != nil { - v.Properties = u.Properties - } - - return nil -} - -// ViewContentsUpdate is a struct for updating the non properties content of a View. -type ViewContentsUpdate struct { - Name *string `json:"name"` -} - -// ViewFilter represents a set of filter that restrict the returned results. -type ViewFilter struct { - ID *platform.ID - Types []string -} - -// View holds positional and visual information for a View. -type View struct { - ViewContents - Properties ViewProperties -} - -// ViewContents is the id and name of a specific view. -type ViewContents struct { - ID platform.ID `json:"id,omitempty"` - Name string `json:"name"` -} - -// Values for all supported view property types. -const ( - ViewPropertyTypeCheck = "check" - ViewPropertyTypeGauge = "gauge" - ViewPropertyTypeHeatMap = "heatmap" - ViewPropertyTypeHistogram = "histogram" - ViewPropertyTypeLogViewer = "log-viewer" - ViewPropertyTypeMarkdown = "markdown" - ViewPropertyTypeScatter = "scatter" - ViewPropertyTypeSingleStat = "single-stat" - ViewPropertyTypeSingleStatPlusLine = "line-plus-single-stat" - ViewPropertyTypeTable = "table" - ViewPropertyTypeXY = "xy" - ViewPropertyTypeMosaic = "mosaic" - ViewPropertyTypeBand = "band" - ViewPropertyTypeGeo = "geo" -) - -// ViewProperties is used to mark other structures as conforming to a View. -type ViewProperties interface { - viewProperties() - GetType() string -} - -// EmptyViewProperties is visualization that has no values -type EmptyViewProperties struct{} - -func (v EmptyViewProperties) viewProperties() {} - -func (v EmptyViewProperties) GetType() string { return "" } - -// UnmarshalViewPropertiesJSON unmarshals JSON bytes into a ViewProperties. -func UnmarshalViewPropertiesJSON(b []byte) (ViewProperties, error) { - var v struct { - B json.RawMessage `json:"properties"` - } - - if err := json.Unmarshal(b, &v); err != nil { - return nil, err - } - - if len(v.B) == 0 { - // Then there wasn't any visualization field, so there's no need unmarshal it - return EmptyViewProperties{}, nil - } - - var t struct { - Shape string `json:"shape"` - Type string `json:"type"` - } - - if err := json.Unmarshal(v.B, &t); err != nil { - return nil, err - } - - var vis ViewProperties - switch t.Shape { - case "chronograf-v2": - switch t.Type { - case ViewPropertyTypeCheck: - var cv CheckViewProperties - if err := json.Unmarshal(v.B, &cv); err != nil { - return nil, err - } - vis = cv - case ViewPropertyTypeXY: - var xyv XYViewProperties - if err := json.Unmarshal(v.B, &xyv); err != nil { - return nil, err - } - vis = xyv - case ViewPropertyTypeSingleStat: - var ssv SingleStatViewProperties - if err := json.Unmarshal(v.B, &ssv); err != nil { - return nil, err - } - vis = ssv - case ViewPropertyTypeGauge: - var gv GaugeViewProperties - if err := json.Unmarshal(v.B, &gv); err != nil { - return nil, err - } - vis = gv - case ViewPropertyTypeGeo: - var gvw GeoViewProperties - if err := json.Unmarshal(v.B, &gvw); err != nil { - return nil, err - } - vis = gvw - case ViewPropertyTypeTable: - var tv TableViewProperties - if err := json.Unmarshal(v.B, &tv); err != nil { - return nil, err - } - vis = tv - case ViewPropertyTypeMarkdown: - var mv MarkdownViewProperties - if err := json.Unmarshal(v.B, &mv); err != nil { - return nil, err - } - vis = mv - case ViewPropertyTypeLogViewer: // happens in log viewer stays in log viewer. - var lv LogViewProperties - if err := json.Unmarshal(v.B, &lv); err != nil { - return nil, err - } - vis = lv - case ViewPropertyTypeSingleStatPlusLine: - var lv LinePlusSingleStatProperties - if err := json.Unmarshal(v.B, &lv); err != nil { - return nil, err - } - vis = lv - case ViewPropertyTypeHistogram: - var hv HistogramViewProperties - if err := json.Unmarshal(v.B, &hv); err != nil { - return nil, err - } - vis = hv - case ViewPropertyTypeHeatMap: - var hv HeatmapViewProperties - if err := json.Unmarshal(v.B, &hv); err != nil { - return nil, err - } - vis = hv - case ViewPropertyTypeScatter: - var sv ScatterViewProperties - if err := json.Unmarshal(v.B, &sv); err != nil { - return nil, err - } - vis = sv - case ViewPropertyTypeMosaic: - var mv MosaicViewProperties - if err := json.Unmarshal(v.B, &mv); err != nil { - return nil, err - } - vis = mv - case ViewPropertyTypeBand: - var bv BandViewProperties - if err := json.Unmarshal(v.B, &bv); err != nil { - return nil, err - } - vis = bv - } - case "empty": - var ev EmptyViewProperties - if err := json.Unmarshal(v.B, &ev); err != nil { - return nil, err - } - vis = ev - default: - return nil, fmt.Errorf("unknown shape %v", t.Shape) - } - - return vis, nil -} - -// MarshalViewPropertiesJSON encodes a view into JSON bytes. -func MarshalViewPropertiesJSON(v ViewProperties) ([]byte, error) { - var s interface{} - switch vis := v.(type) { - case SingleStatViewProperties: - s = struct { - Shape string `json:"shape"` - SingleStatViewProperties - }{ - Shape: "chronograf-v2", - - SingleStatViewProperties: vis, - } - case TableViewProperties: - s = struct { - Shape string `json:"shape"` - TableViewProperties - }{ - Shape: "chronograf-v2", - - TableViewProperties: vis, - } - case GaugeViewProperties: - s = struct { - Shape string `json:"shape"` - GaugeViewProperties - }{ - Shape: "chronograf-v2", - - GaugeViewProperties: vis, - } - case GeoViewProperties: - s = struct { - Shape string `json:"shape"` - GeoViewProperties - }{ - Shape: "chronograf-v2", - GeoViewProperties: vis, - } - case XYViewProperties: - s = struct { - Shape string `json:"shape"` - XYViewProperties - }{ - Shape: "chronograf-v2", - - XYViewProperties: vis, - } - case BandViewProperties: - s = struct { - Shape string `json:"shape"` - BandViewProperties - }{ - Shape: "chronograf-v2", - - BandViewProperties: vis, - } - case LinePlusSingleStatProperties: - s = struct { - Shape string `json:"shape"` - LinePlusSingleStatProperties - }{ - Shape: "chronograf-v2", - - LinePlusSingleStatProperties: vis, - } - case HistogramViewProperties: - s = struct { - Shape string `json:"shape"` - HistogramViewProperties - }{ - Shape: "chronograf-v2", - - HistogramViewProperties: vis, - } - case HeatmapViewProperties: - s = struct { - Shape string `json:"shape"` - HeatmapViewProperties - }{ - Shape: "chronograf-v2", - - HeatmapViewProperties: vis, - } - case ScatterViewProperties: - s = struct { - Shape string `json:"shape"` - ScatterViewProperties - }{ - Shape: "chronograf-v2", - - ScatterViewProperties: vis, - } - case MosaicViewProperties: - s = struct { - Shape string `json:"shape"` - MosaicViewProperties - }{ - Shape: "chronograf-v2", - - MosaicViewProperties: vis, - } - case MarkdownViewProperties: - s = struct { - Shape string `json:"shape"` - MarkdownViewProperties - }{ - Shape: "chronograf-v2", - - MarkdownViewProperties: vis, - } - case LogViewProperties: - s = struct { - Shape string `json:"shape"` - LogViewProperties - }{ - Shape: "chronograf-v2", - LogViewProperties: vis, - } - case CheckViewProperties: - s = struct { - Shape string `json:"shape"` - CheckViewProperties - }{ - Shape: "chronograf-v2", - - CheckViewProperties: vis, - } - default: - s = struct { - Shape string `json:"shape"` - EmptyViewProperties - }{ - Shape: "empty", - EmptyViewProperties: EmptyViewProperties{}, - } - } - return json.Marshal(s) -} - -// MarshalJSON encodes a view to JSON bytes. -func (v View) MarshalJSON() ([]byte, error) { - viewProperties, err := MarshalViewPropertiesJSON(v.Properties) - if err != nil { - return nil, err - } - - return json.Marshal(struct { - ViewContents - ViewProperties json.RawMessage `json:"properties"` - }{ - ViewContents: v.ViewContents, - ViewProperties: viewProperties, - }) -} - -// UnmarshalJSON decodes JSON bytes into the corresponding view type (those that implement ViewProperties). -func (c *View) UnmarshalJSON(b []byte) error { - if err := json.Unmarshal(b, &c.ViewContents); err != nil { - return err - } - - v, err := UnmarshalViewPropertiesJSON(b) - if err != nil { - return err - } - c.Properties = v - return nil -} - -// UnmarshalJSON decodes JSON bytes into the corresponding view update type (those that implement ViewProperties). -func (u *ViewUpdate) UnmarshalJSON(b []byte) error { - if err := json.Unmarshal(b, &u.ViewContentsUpdate); err != nil { - return err - } - - v, err := UnmarshalViewPropertiesJSON(b) - if err != nil { - return err - } - u.Properties = v - return nil -} - -// MarshalJSON encodes a view to JSON bytes. -func (u ViewUpdate) MarshalJSON() ([]byte, error) { - vis, err := MarshalViewPropertiesJSON(u.Properties) - if err != nil { - return nil, err - } - - return json.Marshal(struct { - ViewContentsUpdate - ViewProperties json.RawMessage `json:"properties,omitempty"` - }{ - ViewContentsUpdate: u.ViewContentsUpdate, - ViewProperties: vis, - }) -} - -// LinePlusSingleStatProperties represents options for line plus single stat view in Chronograf -type LinePlusSingleStatProperties struct { - Queries []DashboardQuery `json:"queries"` - Axes map[string]Axis `json:"axes"` - Type string `json:"type"` - StaticLegend StaticLegend `json:"staticLegend"` - ViewColors []ViewColor `json:"colors"` - Prefix string `json:"prefix"` - Suffix string `json:"suffix"` - DecimalPlaces DecimalPlaces `json:"decimalPlaces"` - Note string `json:"note"` - ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` - XColumn string `json:"xColumn"` - GenerateXAxisTicks []string `json:"generateXAxisTicks"` - XTotalTicks int `json:"xTotalTicks"` - XTickStart float64 `json:"xTickStart"` - XTickStep float64 `json:"xTickStep"` - YColumn string `json:"yColumn"` - GenerateYAxisTicks []string `json:"generateYAxisTicks"` - YTotalTicks int `json:"yTotalTicks"` - YTickStart float64 `json:"yTickStart"` - YTickStep float64 `json:"yTickStep"` - ShadeBelow bool `json:"shadeBelow"` - Position string `json:"position"` - TimeFormat string `json:"timeFormat"` - HoverDimension string `json:"hoverDimension"` - LegendColorizeRows bool `json:"legendColorizeRows"` - LegendHide bool `json:"legendHide"` - LegendOpacity float64 `json:"legendOpacity"` - LegendOrientationThreshold int `json:"legendOrientationThreshold"` -} - -// XYViewProperties represents options for line, bar, step, or stacked view in Chronograf -type XYViewProperties struct { - Queries []DashboardQuery `json:"queries"` - Axes map[string]Axis `json:"axes"` - Type string `json:"type"` - StaticLegend StaticLegend `json:"staticLegend"` - Geom string `json:"geom"` // Either "line", "step", "stacked", or "bar" - ViewColors []ViewColor `json:"colors"` - Note string `json:"note"` - ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` - XColumn string `json:"xColumn"` - GenerateXAxisTicks []string `json:"generateXAxisTicks"` - XTotalTicks int `json:"xTotalTicks"` - XTickStart float64 `json:"xTickStart"` - XTickStep float64 `json:"xTickStep"` - YColumn string `json:"yColumn"` - GenerateYAxisTicks []string `json:"generateYAxisTicks"` - YTotalTicks int `json:"yTotalTicks"` - YTickStart float64 `json:"yTickStart"` - YTickStep float64 `json:"yTickStep"` - ShadeBelow bool `json:"shadeBelow"` - Position string `json:"position"` - TimeFormat string `json:"timeFormat"` - HoverDimension string `json:"hoverDimension"` - LegendColorizeRows bool `json:"legendColorizeRows"` - LegendHide bool `json:"legendHide"` - LegendOpacity float64 `json:"legendOpacity"` - LegendOrientationThreshold int `json:"legendOrientationThreshold"` -} - -// BandViewProperties represents options for the band view -type BandViewProperties struct { - Queries []DashboardQuery `json:"queries"` - Axes map[string]Axis `json:"axes"` - Type string `json:"type"` - StaticLegend StaticLegend `json:"staticLegend"` - Geom string `json:"geom"` - ViewColors []ViewColor `json:"colors"` - Note string `json:"note"` - ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` - TimeFormat string `json:"timeFormat"` - HoverDimension string `json:"hoverDimension"` - XColumn string `json:"xColumn"` - GenerateXAxisTicks []string `json:"generateXAxisTicks"` - XTotalTicks int `json:"xTotalTicks"` - XTickStart float64 `json:"xTickStart"` - XTickStep float64 `json:"xTickStep"` - YColumn string `json:"yColumn"` - GenerateYAxisTicks []string `json:"generateYAxisTicks"` - YTotalTicks int `json:"yTotalTicks"` - YTickStart float64 `json:"yTickStart"` - YTickStep float64 `json:"yTickStep"` - UpperColumn string `json:"upperColumn"` - MainColumn string `json:"mainColumn"` - LowerColumn string `json:"lowerColumn"` - LegendColorizeRows bool `json:"legendColorizeRows"` - LegendHide bool `json:"legendHide"` - LegendOpacity float64 `json:"legendOpacity"` - LegendOrientationThreshold int `json:"legendOrientationThreshold"` -} - -// CheckViewProperties represents options for a view representing a check -type CheckViewProperties struct { - Type string `json:"type"` - CheckID string `json:"checkID"` - Queries []DashboardQuery `json:"queries"` - ViewColors []string `json:"colors"` - LegendColorizeRows bool `json:"legendColorizeRows"` - LegendHide bool `json:"legendHide"` - LegendOpacity float64 `json:"legendOpacity"` - LegendOrientationThreshold int `json:"legendOrientationThreshold"` -} - -// SingleStatViewProperties represents options for single stat view in Chronograf -type SingleStatViewProperties struct { - Type string `json:"type"` - Queries []DashboardQuery `json:"queries"` - Prefix string `json:"prefix"` - TickPrefix string `json:"tickPrefix"` - Suffix string `json:"suffix"` - TickSuffix string `json:"tickSuffix"` - ViewColors []ViewColor `json:"colors"` - DecimalPlaces DecimalPlaces `json:"decimalPlaces"` - Note string `json:"note"` - ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` -} - -// HistogramViewProperties represents options for histogram view in Chronograf -type HistogramViewProperties struct { - Type string `json:"type"` - Queries []DashboardQuery `json:"queries"` - ViewColors []ViewColor `json:"colors"` - XColumn string `json:"xColumn"` - FillColumns []string `json:"fillColumns"` - XDomain []float64 `json:"xDomain,omitempty"` - XAxisLabel string `json:"xAxisLabel"` - Position string `json:"position"` - BinCount int `json:"binCount"` - Note string `json:"note"` - ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` - LegendColorizeRows bool `json:"legendColorizeRows"` - LegendHide bool `json:"legendHide"` - LegendOpacity float64 `json:"legendOpacity"` - LegendOrientationThreshold int `json:"legendOrientationThreshold"` -} - -// HeatmapViewProperties represents options for heatmap view in Chronograf -type HeatmapViewProperties struct { - Type string `json:"type"` - Queries []DashboardQuery `json:"queries"` - ViewColors []string `json:"colors"` - BinSize int32 `json:"binSize"` - XColumn string `json:"xColumn"` - GenerateXAxisTicks []string `json:"generateXAxisTicks"` - XTotalTicks int `json:"xTotalTicks"` - XTickStart float64 `json:"xTickStart"` - XTickStep float64 `json:"xTickStep"` - YColumn string `json:"yColumn"` - GenerateYAxisTicks []string `json:"generateYAxisTicks"` - YTotalTicks int `json:"yTotalTicks"` - YTickStart float64 `json:"yTickStart"` - YTickStep float64 `json:"yTickStep"` - XDomain []float64 `json:"xDomain,omitempty"` - YDomain []float64 `json:"yDomain,omitempty"` - XAxisLabel string `json:"xAxisLabel"` - YAxisLabel string `json:"yAxisLabel"` - XPrefix string `json:"xPrefix"` - XSuffix string `json:"xSuffix"` - YPrefix string `json:"yPrefix"` - YSuffix string `json:"ySuffix"` - Note string `json:"note"` - ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` - TimeFormat string `json:"timeFormat"` - LegendColorizeRows bool `json:"legendColorizeRows"` - LegendHide bool `json:"legendHide"` - LegendOpacity float64 `json:"legendOpacity"` - LegendOrientationThreshold int `json:"legendOrientationThreshold"` -} - -// ScatterViewProperties represents options for scatter view in Chronograf -type ScatterViewProperties struct { - Type string `json:"type"` - Queries []DashboardQuery `json:"queries"` - ViewColors []string `json:"colors"` - FillColumns []string `json:"fillColumns"` - SymbolColumns []string `json:"symbolColumns"` - XColumn string `json:"xColumn"` - GenerateXAxisTicks []string `json:"generateXAxisTicks"` - XTotalTicks int `json:"xTotalTicks"` - XTickStart float64 `json:"xTickStart"` - XTickStep float64 `json:"xTickStep"` - YColumn string `json:"yColumn"` - GenerateYAxisTicks []string `json:"generateYAxisTicks"` - YTotalTicks int `json:"yTotalTicks"` - YTickStart float64 `json:"yTickStart"` - YTickStep float64 `json:"yTickStep"` - XDomain []float64 `json:"xDomain,omitempty"` - YDomain []float64 `json:"yDomain,omitempty"` - XAxisLabel string `json:"xAxisLabel"` - YAxisLabel string `json:"yAxisLabel"` - XPrefix string `json:"xPrefix"` - XSuffix string `json:"xSuffix"` - YPrefix string `json:"yPrefix"` - YSuffix string `json:"ySuffix"` - Note string `json:"note"` - ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` - TimeFormat string `json:"timeFormat"` - LegendColorizeRows bool `json:"legendColorizeRows"` - LegendHide bool `json:"legendHide"` - LegendOpacity float64 `json:"legendOpacity"` - LegendOrientationThreshold int `json:"legendOrientationThreshold"` -} - -// MosaicViewProperties represents options for mosaic view in Chronograf -type MosaicViewProperties struct { - Type string `json:"type"` - Queries []DashboardQuery `json:"queries"` - ViewColors []string `json:"colors"` - FillColumns []string `json:"fillColumns"` - XColumn string `json:"xColumn"` - GenerateXAxisTicks []string `json:"generateXAxisTicks"` - XTotalTicks int `json:"xTotalTicks"` - XTickStart float64 `json:"xTickStart"` - XTickStep float64 `json:"xTickStep"` - YLabelColumnSeparator string `json:"yLabelColumnSeparator"` - YLabelColumns []string `json:"yLabelColumns"` - YSeriesColumns []string `json:"ySeriesColumns"` - XDomain []float64 `json:"xDomain,omitempty"` - YDomain []float64 `json:"yDomain,omitempty"` - XAxisLabel string `json:"xAxisLabel"` - YAxisLabel string `json:"yAxisLabel"` - XPrefix string `json:"xPrefix"` - XSuffix string `json:"xSuffix"` - YPrefix string `json:"yPrefix"` - YSuffix string `json:"ySuffix"` - Note string `json:"note"` - ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` - TimeFormat string `json:"timeFormat"` - HoverDimension string `json:"hoverDimension"` - LegendColorizeRows bool `json:"legendColorizeRows"` - LegendHide bool `json:"legendHide"` - LegendOpacity float64 `json:"legendOpacity"` - LegendOrientationThreshold int `json:"legendOrientationThreshold"` -} - -// GaugeViewProperties represents options for gauge view in Chronograf -type GaugeViewProperties struct { - Type string `json:"type"` - Queries []DashboardQuery `json:"queries"` - Prefix string `json:"prefix"` - TickPrefix string `json:"tickPrefix"` - Suffix string `json:"suffix"` - TickSuffix string `json:"tickSuffix"` - ViewColors []ViewColor `json:"colors"` - DecimalPlaces DecimalPlaces `json:"decimalPlaces"` - Note string `json:"note"` - ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` -} - -// Geographical coordinates -type Datum struct { - Lat float64 `json:"lat"` - Lon float64 `json:"lon"` -} - -// Single visualization layer properties of a chronograf map widget -type GeoLayer struct { - Type string `json:"type"` - RadiusField string `json:"radiusField"` - ColorField string `json:"colorField"` - IntensityField string `json:"intensityField"` - // circle layer properties - ViewColors []ViewColor `json:"colors"` - Radius int32 `json:"radius"` - Blur int32 `json:"blur"` - RadiusDimension Axis `json:"radiusDimension,omitempty"` - ColorDimension Axis `json:"colorDimension,omitempty"` - IntensityDimension Axis `json:"intensityDimension,omitempty"` - InterpolateColors bool `json:"interpolateColors"` - // track layer properties - TrackWidth int32 `json:"trackWidth"` - Speed int32 `json:"speed"` - RandomColors bool `json:"randomColors"` - // point layer properties - IsClustered bool `json:"isClustered"` -} - -// GeoViewProperties represents options for map view in Chronograf -type GeoViewProperties struct { - Type string `json:"type"` - Queries []DashboardQuery `json:"queries"` - Center Datum `json:"center"` - Zoom float64 `json:"zoom"` - MapStyle string `json:"mapStyle"` - AllowPanAndZoom bool `json:"allowPanAndZoom"` - DetectCoordinateFields bool `json:"detectCoordinateFields"` - ViewColor []ViewColor `json:"colors"` - GeoLayers []GeoLayer `json:"layers"` - Note string `json:"note"` - ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` -} - -// TableViewProperties represents options for table view in Chronograf -type TableViewProperties struct { - Type string `json:"type"` - Queries []DashboardQuery `json:"queries"` - ViewColors []ViewColor `json:"colors"` - TableOptions TableOptions `json:"tableOptions"` - FieldOptions []RenamableField `json:"fieldOptions"` - TimeFormat string `json:"timeFormat"` - DecimalPlaces DecimalPlaces `json:"decimalPlaces"` - Note string `json:"note"` - ShowNoteWhenEmpty bool `json:"showNoteWhenEmpty"` -} - -type MarkdownViewProperties struct { - Type string `json:"type"` - Note string `json:"note"` -} - -// LogViewProperties represents options for log viewer in Chronograf. -type LogViewProperties struct { - Type string `json:"type"` - Columns []LogViewerColumn `json:"columns"` -} - -// LogViewerColumn represents a specific column in a Log Viewer. -type LogViewerColumn struct { - Name string `json:"name"` - Position int32 `json:"position"` - Settings []LogColumnSetting `json:"settings"` -} - -// LogColumnSetting represent the settings for a specific column of a Log Viewer. -type LogColumnSetting struct { - Type string `json:"type"` - Value string `json:"value"` - Name string `json:"name,omitempty"` -} - -func (XYViewProperties) viewProperties() {} -func (BandViewProperties) viewProperties() {} -func (LinePlusSingleStatProperties) viewProperties() {} -func (SingleStatViewProperties) viewProperties() {} -func (HistogramViewProperties) viewProperties() {} -func (HeatmapViewProperties) viewProperties() {} -func (ScatterViewProperties) viewProperties() {} -func (MosaicViewProperties) viewProperties() {} -func (GaugeViewProperties) viewProperties() {} -func (GeoViewProperties) viewProperties() {} -func (TableViewProperties) viewProperties() {} -func (MarkdownViewProperties) viewProperties() {} -func (LogViewProperties) viewProperties() {} -func (CheckViewProperties) viewProperties() {} - -func (v XYViewProperties) GetType() string { return v.Type } -func (v BandViewProperties) GetType() string { return v.Type } -func (v LinePlusSingleStatProperties) GetType() string { return v.Type } -func (v SingleStatViewProperties) GetType() string { return v.Type } -func (v HistogramViewProperties) GetType() string { return v.Type } -func (v HeatmapViewProperties) GetType() string { return v.Type } -func (v ScatterViewProperties) GetType() string { return v.Type } -func (v MosaicViewProperties) GetType() string { return v.Type } -func (v GaugeViewProperties) GetType() string { return v.Type } -func (v GeoViewProperties) GetType() string { return v.Type } -func (v TableViewProperties) GetType() string { return v.Type } -func (v MarkdownViewProperties) GetType() string { return v.Type } -func (v LogViewProperties) GetType() string { return v.Type } -func (v CheckViewProperties) GetType() string { return v.Type } - -///////////////////////////// -// Old Chronograf Types -///////////////////////////// - -// DashboardQuery represents a query used in a dashboard cell -type DashboardQuery struct { - Text string `json:"text"` - EditMode string `json:"editMode"` // Either "builder" or "advanced" - Name string `json:"name"` // Term or phrase that refers to the query - BuilderConfig BuilderConfig `json:"builderConfig"` -} - -type BuilderConfig struct { - Buckets []string `json:"buckets"` - Tags []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - } `json:"tags"` - Functions []struct { - Name string `json:"name"` - } `json:"functions"` - AggregateWindow struct { - Period string `json:"period"` - FillValues bool `json:"fillValues"` - } `json:"aggregateWindow"` -} - -// MarshalJSON is necessary for the time being. UI keeps breaking -// b/c it relies on these slices being populated/not nil. Other -// consumers may have same issue. -func (b BuilderConfig) MarshalJSON() ([]byte, error) { - type alias BuilderConfig - copyCfg := alias(b) - if copyCfg.Buckets == nil { - copyCfg.Buckets = []string{} - } - if copyCfg.Tags == nil { - copyCfg.Tags = []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{} - } - if copyCfg.Functions == nil { - copyCfg.Functions = []struct { - Name string `json:"name"` - }{} - } - return json.Marshal(copyCfg) -} - -// NewBuilderTag is a constructor for the builder config types. This -// isn't technically required, but working with struct literals with embedded -// struct tags is really painful. This is to get around that bit. Would be nicer -// to have these as actual types maybe. -func NewBuilderTag(key string, functionType string, values ...string) struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` -} { - return struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{ - Key: key, - Values: values, - AggregateFunctionType: functionType, - } -} - -// Axis represents the visible extents of a visualization -type Axis struct { - Bounds []string `json:"bounds"` // bounds are an arbitrary list of client-defined strings that specify the viewport for a View - LegacyBounds [2]int64 `json:"-"` // legacy bounds are for testing a migration from an earlier version of axis - Label string `json:"label"` // label is a description of this Axis - Prefix string `json:"prefix"` // Prefix represents a label prefix for formatting axis values - Suffix string `json:"suffix"` // Suffix represents a label suffix for formatting axis values - Base string `json:"base"` // Base represents the radix for formatting axis values - Scale string `json:"scale"` // Scale is the axis formatting scale. Supported: "log", "linear" -} - -// ViewColor represents the encoding of data into visualizations -type ViewColor struct { - ID string `json:"id"` // ID is the unique id of the View color - Type string `json:"type"` // Type is how the color is used. Accepted (min,max,threshold) - Hex string `json:"hex"` // Hex is the hex number of the color - Name string `json:"name"` // Name is the user-facing name of the hex color - Value float64 `json:"value"` // Value is the data value mapped to this color -} - -// StaticLegend represents the options specific to the static legend -type StaticLegend struct { - ColorizeRows bool `json:"colorizeRows,omitempty"` - HeightRatio float64 `json:"heightRatio,omitempty"` - Show bool `json:"show,omitempty"` - Opacity float64 `json:"opacity,omitempty"` - OrientationThreshold int `json:"orientationThreshold,omitempty"` - ValueAxis string `json:"valueAxis,omitempty"` - WidthRatio float64 `json:"widthRatio,omitempty"` -} - -// TableOptions is a type of options for a DashboardView with type Table -type TableOptions struct { - VerticalTimeAxis bool `json:"verticalTimeAxis"` - SortBy RenamableField `json:"sortBy"` - Wrapping string `json:"wrapping"` - FixFirstColumn bool `json:"fixFirstColumn"` -} - -// RenamableField is a column/row field in a DashboardView of type Table -type RenamableField struct { - InternalName string `json:"internalName"` - DisplayName string `json:"displayName"` - Visible bool `json:"visible"` -} - -// DecimalPlaces indicates whether decimal places should be enforced, and how many digits it should show. -type DecimalPlaces struct { - IsEnforced bool `json:"isEnforced"` - Digits int32 `json:"digits"` -} diff --git a/dashboard_test.go b/dashboard_test.go deleted file mode 100644 index 9004ccb1a48..00000000000 --- a/dashboard_test.go +++ /dev/null @@ -1,250 +0,0 @@ -package influxdb_test - -import ( - "encoding/json" - "testing" - - "github.com/google/go-cmp/cmp" - platform "github.com/influxdata/influxdb/v2" - platformtesting "github.com/influxdata/influxdb/v2/testing" -) - -func TestView_MarshalJSON(t *testing.T) { - type args struct { - view platform.View - } - type wants struct { - json string - } - tests := []struct { - name string - args args - wants wants - }{ - { - name: "xy", - args: args{ - view: platform.View{ - ViewContents: platform.ViewContents{ - ID: platformtesting.MustIDBase16("f01dab1ef005ba11"), - Name: "XY widget", - }, - Properties: platform.XYViewProperties{ - Type: "xy", - }, - }, - }, - wants: wants{ - json: ` -{ - "id": "f01dab1ef005ba11", - "name": "XY widget", - "properties": { - "shape": "chronograf-v2", - "queries": null, - "axes": null, - "type": "xy", - "staticLegend": {}, - "geom": "", - "colors": null, - "note": "", - "showNoteWhenEmpty": false, - "xColumn": "", - "generateXAxisTicks": null, - "xTotalTicks": 0, - "xTickStart": 0, - "xTickStep": 0, - "yColumn": "", - "generateYAxisTicks": null, - "yTotalTicks": 0, - "yTickStart": 0, - "yTickStep": 0, - "shadeBelow": false, - "position": "", - "timeFormat": "", - "hoverDimension": "", - "legendColorizeRows": false, - "legendHide": false, - "legendOpacity": 0, - "legendOrientationThreshold": 0 - } -}`, - }, - }, - { - name: "geo", - args: args{ - view: platform.View{ - ViewContents: platform.ViewContents{ - ID: platformtesting.MustIDBase16("e21da111ef005b11"), - Name: "Circle Map", - }, - Properties: platform.GeoViewProperties{ - Type: platform.ViewPropertyTypeGeo, - Zoom: 2, - Center: platform.Datum{Lat: 50.4, Lon: 10.1}, - AllowPanAndZoom: true, - GeoLayers: []platform.GeoLayer{ - { - Type: "circleMap", - RadiusField: "radius", - ColorField: "color", - Radius: 12, - Blur: 20, - RadiusDimension: platform.Axis{ - Label: "Frequency", - Bounds: []string{"10", "20"}, - Suffix: "m", - }, - ColorDimension: platform.Axis{ - Label: "Severity", - Bounds: []string{"10.0", "40"}, - Suffix: "%", - }, - ViewColors: []platform.ViewColor{{ - Type: "min", - Hex: "#FF0000", - }, { - Hex: "#000000", - Value: 10, - }, { - Hex: "#FFFFFF", - Value: 40, - }}, - IntensityDimension: platform.Axis{ - Label: "Impact", - Prefix: "$", - }, - InterpolateColors: true, - TrackWidth: 2, - Speed: 1.0, - IsClustered: true, - }, - }, - Note: "Some more information", - }, - }, - }, - wants: wants{ - json: ` -{ - "id": "e21da111ef005b11", - "name": "Circle Map", - "properties": { - "shape": "chronograf-v2", - "type": "geo", - "queries": null, - "center": { - "lat": 50.4, - "lon": 10.1 - }, - "zoom": 2, - "mapStyle": "", - "allowPanAndZoom": true, - "detectCoordinateFields": false, - "colors": null, - "layers": [ - { - "type": "circleMap", - "radiusField": "radius", - "colorField": "color", - "intensityField": "", - "colors": [ - { - "id": "", - "type": "min", - "hex": "#FF0000", - "name": "", - "value": 0 - }, - { - "id": "", - "type": "", - "hex": "#000000", - "name": "", - "value": 10 - }, - { - "id": "", - "type": "", - "hex": "#FFFFFF", - "name": "", - "value": 40 - } - ], - "radius": 12, - "blur": 20, - "radiusDimension": { - "bounds": [ - "10", - "20" - ], - "label": "Frequency", - "prefix": "", - "suffix": "m", - "base": "", - "scale": "" - }, - "colorDimension": { - "bounds": [ - "10.0", - "40" - ], - "label": "Severity", - "prefix": "", - "suffix": "%", - "base": "", - "scale": "" - }, - "intensityDimension": { - "bounds": null, - "label": "Impact", - "prefix": "$", - "suffix": "", - "base": "", - "scale": "" - }, - "interpolateColors": true, - "trackWidth": 2, - "speed": 1, - "randomColors": false, - "isClustered": true - } - ], - "note": "Some more information", - "showNoteWhenEmpty": false - } -}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - b, err := json.MarshalIndent(tt.args.view, "", " ") - if err != nil { - t.Fatalf("error marshalling json") - } - - eq, err := jsonEqual(string(b), tt.wants.json) - if err != nil { - t.Fatalf("error marshalling json %v", err) - } - if !eq { - t.Errorf("JSON did not match\nexpected:%s\ngot:\n%s\n", tt.wants.json, string(b)) - } - }) - } -} - -func jsonEqual(s1, s2 string) (eq bool, err error) { - var o1, o2 interface{} - - if err = json.Unmarshal([]byte(s1), &o1); err != nil { - return - } - if err = json.Unmarshal([]byte(s2), &o2); err != nil { - return - } - return cmp.Equal(o1, o2), nil -} diff --git a/dashboards/service.go b/dashboards/service.go deleted file mode 100644 index e65c3e40c33..00000000000 --- a/dashboards/service.go +++ /dev/null @@ -1,1014 +0,0 @@ -package dashboards - -import ( - "bytes" - "context" - "encoding/json" - "time" - - influxdb "github.com/influxdata/influxdb/v2" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/feature" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/snowflake" -) - -var ( - dashboardBucket = []byte("dashboardsv2") - orgDashboardIndex = []byte("orgsdashboardsv1") - dashboardCellViewBucket = []byte("dashboardcellviewsv1") -) - -// TODO(desa): what do we want these to be? -const ( - dashboardCreatedEvent = "Dashboard Created" - dashboardUpdatedEvent = "Dashboard Updated" - dashboardRemovedEvent = "Dashboard Removed" - - dashboardCellsReplacedEvent = "Dashboard Cells Replaced" - dashboardCellAddedEvent = "Dashboard Cell Added" - dashboardCellRemovedEvent = "Dashboard Cell Removed" - dashboardCellUpdatedEvent = "Dashboard Cell Updated" -) - -// OpLogStore is a type which persists and reports operation log entries on a backing -// kv store transaction. -type OpLogStore interface { - AddLogEntryTx(ctx context.Context, tx kv.Tx, k, v []byte, t time.Time) error - ForEachLogEntryTx(ctx context.Context, tx kv.Tx, k []byte, opts influxdb.FindOptions, fn func([]byte, time.Time) error) error -} - -var _ influxdb.DashboardService = (*Service)(nil) -var _ influxdb.DashboardOperationLogService = (*Service)(nil) - -type Service struct { - kv kv.Store - - opLog OpLogStore - - IDGenerator platform.IDGenerator - TimeGenerator influxdb.TimeGenerator -} - -// NewService constructs and configures a new dashboard service. -func NewService(store kv.Store, opLog OpLogStore) *Service { - return &Service{ - kv: store, - opLog: opLog, - IDGenerator: snowflake.NewIDGenerator(), - TimeGenerator: influxdb.RealTimeGenerator{}, - } -} - -// FindDashboardByID retrieves a dashboard by id. -func (s *Service) FindDashboardByID(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { - var d *influxdb.Dashboard - - err := s.kv.View(ctx, func(tx kv.Tx) error { - dash, err := s.findDashboardByID(ctx, tx, id) - if err != nil { - return err - } - d = dash - return nil - }) - - if err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - return d, nil -} - -func (s *Service) findDashboardByID(ctx context.Context, tx kv.Tx, id platform.ID) (*influxdb.Dashboard, error) { - encodedID, err := id.Encode() - if err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - b, err := tx.Bucket(dashboardBucket) - if err != nil { - return nil, err - } - - v, err := b.Get(encodedID) - if kv.IsNotFound(err) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrDashboardNotFound, - } - } - - if err != nil { - return nil, err - } - - var d influxdb.Dashboard - if err := json.Unmarshal(v, &d); err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - return &d, nil -} - -// FindDashboard retrieves a dashboard using an arbitrary dashboard filter. -func (s *Service) FindDashboard(ctx context.Context, filter influxdb.DashboardFilter, opts ...influxdb.FindOptions) (*influxdb.Dashboard, error) { - if len(filter.IDs) == 1 { - return s.FindDashboardByID(ctx, *filter.IDs[0]) - } - - var d *influxdb.Dashboard - err := s.kv.View(ctx, func(tx kv.Tx) error { - filterFn := filterDashboardsFn(filter) - return s.forEachDashboard(ctx, tx, opts[0].Descending, func(dash *influxdb.Dashboard) bool { - if filterFn(dash) { - d = dash - return false - } - return true - }) - }) - - if err != nil { - return nil, err - } - - if d == nil { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrDashboardNotFound, - } - } - - return d, nil -} - -func filterDashboardsFn(filter influxdb.DashboardFilter) func(d *influxdb.Dashboard) bool { - if len(filter.IDs) > 0 { - m := map[string]struct{}{} - for _, id := range filter.IDs { - m[id.String()] = struct{}{} - } - return func(d *influxdb.Dashboard) bool { - _, ok := m[d.ID.String()] - return ok - } - } - - return func(d *influxdb.Dashboard) bool { - return ((filter.OrganizationID == nil) || (*filter.OrganizationID == d.OrganizationID)) && - ((filter.OwnerID == nil) || (d.OwnerID != nil && *filter.OwnerID == *d.OwnerID)) - } -} - -// FindDashboards retrieves all dashboards that match an arbitrary dashboard filter. -func (s *Service) FindDashboards(ctx context.Context, filter influxdb.DashboardFilter, opts influxdb.FindOptions) ([]*influxdb.Dashboard, int, error) { - ds := []*influxdb.Dashboard{} - if len(filter.IDs) == 1 { - d, err := s.FindDashboardByID(ctx, *filter.IDs[0]) - if err != nil && errors.ErrorCode(err) != errors.ENotFound { - return ds, 0, &errors.Error{ - Err: err, - } - } - if d == nil { - return ds, 0, nil - } - return []*influxdb.Dashboard{d}, 1, nil - } - err := s.kv.View(ctx, func(tx kv.Tx) error { - dashs, err := s.findDashboards(ctx, tx, filter, opts) - if err != nil && errors.ErrorCode(err) != errors.ENotFound { - return err - } - ds = dashs - return nil - }) - - if err != nil { - return nil, 0, &errors.Error{ - Err: err, - } - } - - influxdb.SortDashboards(opts, ds) - - return ds, len(ds), nil -} - -func (s *Service) findOrganizationDashboards(ctx context.Context, tx kv.Tx, orgID platform.ID, filter influxdb.DashboardFilter) ([]*influxdb.Dashboard, error) { - idx, err := tx.Bucket(orgDashboardIndex) - if err != nil { - return nil, err - } - - prefix, err := orgID.Encode() - if err != nil { - return nil, err - } - - // TODO(desa): support find options. - cur, err := idx.ForwardCursor(prefix, kv.WithCursorPrefix(prefix)) - if err != nil { - return nil, err - } - - ds := []*influxdb.Dashboard{} - filterFn := filterDashboardsFn(filter) - for k, _ := cur.Next(); k != nil; k, _ = cur.Next() { - _, id, err := decodeOrgDashboardIndexKey(k) - if err != nil { - return nil, err - } - - d, err := s.findDashboardByID(ctx, tx, id) - if err != nil { - return nil, err - } - - if filterFn(d) { - ds = append(ds, d) - } - } - - return ds, nil -} - -func decodeOrgDashboardIndexKey(indexKey []byte) (orgID platform.ID, dashID platform.ID, err error) { - if len(indexKey) != 2*platform.IDLength { - return 0, 0, &errors.Error{Code: errors.EInternal, Msg: "malformed org dashboard index key (please report this error)"} - } - - if err := (&orgID).Decode(indexKey[:platform.IDLength]); err != nil { - return 0, 0, &errors.Error{Code: errors.EInternal, Msg: "bad org id", Err: platform.ErrInvalidID} - } - - if err := (&dashID).Decode(indexKey[platform.IDLength:]); err != nil { - return 0, 0, &errors.Error{Code: errors.EInternal, Msg: "bad dashboard id", Err: platform.ErrInvalidID} - } - - return orgID, dashID, nil -} - -func (s *Service) findDashboards(ctx context.Context, tx kv.Tx, filter influxdb.DashboardFilter, opts ...influxdb.FindOptions) ([]*influxdb.Dashboard, error) { - enforceOrgPagination := feature.EnforceOrganizationDashboardLimits().Enabled(ctx) - if !enforceOrgPagination { - if filter.OrganizationID != nil { - return s.findOrganizationDashboards(ctx, tx, *filter.OrganizationID, filter) - } - } - - var offset, limit, count int - var descending bool - if len(opts) > 0 { - offset = opts[0].Offset - limit = opts[0].Limit - descending = opts[0].Descending - } - - if enforceOrgPagination { - if filter.OrganizationID != nil { - orgDashboards, err := s.findOrganizationDashboards(ctx, tx, *filter.OrganizationID, filter) - if err != nil { - return nil, &errors.Error{ - Err: err, - } - } - if offset > 0 && offset < len(orgDashboards) { - orgDashboards = orgDashboards[offset:] - } - if limit > 0 && limit < len(orgDashboards) { - orgDashboards = orgDashboards[:limit] - } - if descending { - for i, j := 0, len(orgDashboards)-1; i < j; i, j = i+1, j-1 { - orgDashboards[i], orgDashboards[j] = orgDashboards[j], orgDashboards[i] - } - } - - return orgDashboards, nil - } - } - - ds := []*influxdb.Dashboard{} - filterFn := filterDashboardsFn(filter) - err := s.forEachDashboard(ctx, tx, descending, func(d *influxdb.Dashboard) bool { - if filterFn(d) { - if count >= offset { - ds = append(ds, d) - } - count++ - } - if limit > 0 && len(ds) >= limit { - return false - } - return true - }) - - if err != nil { - return nil, err - } - - return ds, nil -} - -// CreateDashboard creates a influxdb dashboard and sets d.ID. -func (s *Service) CreateDashboard(ctx context.Context, d *influxdb.Dashboard) error { - err := s.kv.Update(ctx, func(tx kv.Tx) error { - d.ID = s.IDGenerator.ID() - - for _, cell := range d.Cells { - cell.ID = s.IDGenerator.ID() - - if err := s.createCellView(ctx, tx, d.ID, cell.ID, cell.View); err != nil { - return err - } - } - - if err := s.appendDashboardEventToLog(ctx, tx, d.ID, dashboardCreatedEvent); err != nil { - return err - } - - if err := s.putOrganizationDashboardIndex(ctx, tx, d); err != nil { - return err - } - - d.Meta.CreatedAt = s.TimeGenerator.Now() - d.Meta.UpdatedAt = s.TimeGenerator.Now() - - if err := s.putDashboardWithMeta(ctx, tx, d); err != nil { - return err - } - - return nil - }) - if err != nil { - return &errors.Error{ - Err: err, - } - } - return nil -} - -func (s *Service) createCellView(ctx context.Context, tx kv.Tx, dashID, cellID platform.ID, view *influxdb.View) error { - if view == nil { - // If not view exists create the view - view = &influxdb.View{} - } - // TODO: this is temporary until we can fully remove the view service. - view.ID = cellID - return s.putDashboardCellView(ctx, tx, dashID, cellID, view) -} - -// ReplaceDashboardCells updates the positions of each cell in a dashboard concurrently. -func (s *Service) ReplaceDashboardCells(ctx context.Context, id platform.ID, cs []*influxdb.Cell) error { - err := s.kv.Update(ctx, func(tx kv.Tx) error { - d, err := s.findDashboardByID(ctx, tx, id) - if err != nil { - return err - } - - ids := map[string]*influxdb.Cell{} - for _, cell := range d.Cells { - ids[cell.ID.String()] = cell - } - - for _, cell := range cs { - if !cell.ID.Valid() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "cannot provide empty cell id", - } - } - - if _, ok := ids[cell.ID.String()]; !ok { - return &errors.Error{ - Code: errors.EConflict, - Msg: "cannot replace cells that were not already present", - } - } - } - - d.Cells = cs - if err := s.appendDashboardEventToLog(ctx, tx, d.ID, dashboardCellsReplacedEvent); err != nil { - return err - } - - return s.putDashboardWithMeta(ctx, tx, d) - }) - if err != nil { - return &errors.Error{ - Err: err, - } - } - return nil -} - -func (s *Service) addDashboardCell(ctx context.Context, tx kv.Tx, id platform.ID, cell *influxdb.Cell, opts influxdb.AddDashboardCellOptions) error { - d, err := s.findDashboardByID(ctx, tx, id) - if err != nil { - return err - } - cell.ID = s.IDGenerator.ID() - if err := s.createCellView(ctx, tx, id, cell.ID, opts.View); err != nil { - return err - } - - d.Cells = append(d.Cells, cell) - - if err := s.appendDashboardEventToLog(ctx, tx, d.ID, dashboardCellAddedEvent); err != nil { - return err - } - - return s.putDashboardWithMeta(ctx, tx, d) -} - -// AddDashboardCell adds a cell to a dashboard and sets the cells ID. -func (s *Service) AddDashboardCell(ctx context.Context, id platform.ID, cell *influxdb.Cell, opts influxdb.AddDashboardCellOptions) error { - err := s.kv.Update(ctx, func(tx kv.Tx) error { - return s.addDashboardCell(ctx, tx, id, cell, opts) - }) - if err != nil { - return &errors.Error{ - Err: err, - } - } - return nil -} - -// RemoveDashboardCell removes a cell from a dashboard. -func (s *Service) RemoveDashboardCell(ctx context.Context, dashboardID, cellID platform.ID) error { - return s.kv.Update(ctx, func(tx kv.Tx) error { - d, err := s.findDashboardByID(ctx, tx, dashboardID) - if err != nil { - return &errors.Error{ - Err: err, - } - } - - idx := -1 - for i, cell := range d.Cells { - if cell.ID == cellID { - idx = i - break - } - } - if idx == -1 { - return &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrCellNotFound, - } - } - - if err := s.deleteDashboardCellView(ctx, tx, d.ID, d.Cells[idx].ID); err != nil { - return &errors.Error{ - Err: err, - } - } - - d.Cells = append(d.Cells[:idx], d.Cells[idx+1:]...) - - if err := s.appendDashboardEventToLog(ctx, tx, d.ID, dashboardCellRemovedEvent); err != nil { - return &errors.Error{ - Err: err, - } - } - - if err := s.putDashboardWithMeta(ctx, tx, d); err != nil { - return &errors.Error{ - Err: err, - } - } - return nil - }) -} - -// GetDashboardCellView retrieves the view for a dashboard cell. -func (s *Service) GetDashboardCellView(ctx context.Context, dashboardID, cellID platform.ID) (*influxdb.View, error) { - var v *influxdb.View - err := s.kv.View(ctx, func(tx kv.Tx) error { - view, err := s.findDashboardCellView(ctx, tx, dashboardID, cellID) - if err != nil { - return err - } - - v = view - return nil - }) - - if err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - return v, nil -} - -func (s *Service) findDashboardCellView(ctx context.Context, tx kv.Tx, dashboardID, cellID platform.ID) (*influxdb.View, error) { - k, err := encodeDashboardCellViewID(dashboardID, cellID) - if err != nil { - return nil, errors.NewError(errors.WithErrorErr(err)) - } - - vb, err := tx.Bucket(dashboardCellViewBucket) - if err != nil { - return nil, err - } - - v, err := vb.Get(k) - if kv.IsNotFound(err) { - return nil, errors.NewError(errors.WithErrorCode(errors.ENotFound), errors.WithErrorMsg(influxdb.ErrViewNotFound)) - } - - if err != nil { - return nil, err - } - - view := &influxdb.View{} - if err := json.Unmarshal(v, view); err != nil { - return nil, errors.NewError(errors.WithErrorErr(err)) - } - - return view, nil -} - -func (s *Service) deleteDashboardCellView(ctx context.Context, tx kv.Tx, dashboardID, cellID platform.ID) error { - k, err := encodeDashboardCellViewID(dashboardID, cellID) - if err != nil { - return errors.NewError(errors.WithErrorErr(err)) - } - - vb, err := tx.Bucket(dashboardCellViewBucket) - if err != nil { - return err - } - - if err := vb.Delete(k); err != nil { - return errors.NewError(errors.WithErrorErr(err)) - } - - return nil -} - -func (s *Service) putDashboardCellView(ctx context.Context, tx kv.Tx, dashboardID, cellID platform.ID, view *influxdb.View) error { - k, err := encodeDashboardCellViewID(dashboardID, cellID) - if err != nil { - return errors.NewError(errors.WithErrorErr(err)) - } - - v, err := json.Marshal(view) - if err != nil { - return errors.NewError(errors.WithErrorErr(err)) - } - - vb, err := tx.Bucket(dashboardCellViewBucket) - if err != nil { - return err - } - - if err := vb.Put(k, v); err != nil { - return errors.NewError(errors.WithErrorErr(err)) - } - - return nil -} - -func encodeDashboardCellViewID(dashID, cellID platform.ID) ([]byte, error) { - did, err := dashID.Encode() - if err != nil { - return nil, err - } - - cid, err := cellID.Encode() - if err != nil { - return nil, err - } - - buf := bytes.NewBuffer(nil) - if _, err := buf.Write(did); err != nil { - return nil, err - } - - if _, err := buf.Write(cid); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// UpdateDashboardCellView updates the view for a dashboard cell. -func (s *Service) UpdateDashboardCellView(ctx context.Context, dashboardID, cellID platform.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) { - var v *influxdb.View - - err := s.kv.Update(ctx, func(tx kv.Tx) error { - view, err := s.findDashboardCellView(ctx, tx, dashboardID, cellID) - if err != nil { - return err - } - - if err := upd.Apply(view); err != nil { - return err - } - - if err := s.putDashboardCellView(ctx, tx, dashboardID, cellID, view); err != nil { - return err - } - - v = view - return nil - }) - - if err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - return v, nil -} - -// UpdateDashboardCell udpates a cell on a dashboard. -func (s *Service) UpdateDashboardCell(ctx context.Context, dashboardID, cellID platform.ID, upd influxdb.CellUpdate) (*influxdb.Cell, error) { - if err := upd.Valid(); err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - var cell *influxdb.Cell - err := s.kv.Update(ctx, func(tx kv.Tx) error { - d, err := s.findDashboardByID(ctx, tx, dashboardID) - if err != nil { - return err - } - - idx := -1 - for i, cell := range d.Cells { - if cell.ID == cellID { - idx = i - break - } - } - if idx == -1 { - return &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrCellNotFound, - } - } - - if err := upd.Apply(d.Cells[idx]); err != nil { - return err - } - - cell = d.Cells[idx] - - if err := s.appendDashboardEventToLog(ctx, tx, d.ID, dashboardCellUpdatedEvent); err != nil { - return err - } - - return s.putDashboardWithMeta(ctx, tx, d) - }) - - if err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - return cell, nil -} - -// PutDashboard will put a dashboard without setting an ID. -func (s *Service) PutDashboard(ctx context.Context, d *influxdb.Dashboard) error { - return s.kv.Update(ctx, func(tx kv.Tx) error { - for _, cell := range d.Cells { - if err := s.createCellView(ctx, tx, d.ID, cell.ID, cell.View); err != nil { - return err - } - } - - if err := s.putOrganizationDashboardIndex(ctx, tx, d); err != nil { - return err - } - - return s.putDashboard(ctx, tx, d) - }) -} - -func encodeOrgDashboardIndex(orgID platform.ID, dashID platform.ID) ([]byte, error) { - oid, err := orgID.Encode() - if err != nil { - return nil, err - } - - did, err := dashID.Encode() - if err != nil { - return nil, err - } - - key := make([]byte, 0, len(oid)+len(did)) - key = append(key, oid...) - key = append(key, did...) - - return key, nil -} - -func (s *Service) putOrganizationDashboardIndex(ctx context.Context, tx kv.Tx, d *influxdb.Dashboard) error { - k, err := encodeOrgDashboardIndex(d.OrganizationID, d.ID) - if err != nil { - return err - } - - idx, err := tx.Bucket(orgDashboardIndex) - if err != nil { - return err - } - - if err := idx.Put(k, nil); err != nil { - return err - } - - return nil -} - -func (s *Service) removeOrganizationDashboardIndex(ctx context.Context, tx kv.Tx, d *influxdb.Dashboard) error { - k, err := encodeOrgDashboardIndex(d.OrganizationID, d.ID) - if err != nil { - return err - } - - idx, err := tx.Bucket(orgDashboardIndex) - if err != nil { - return err - } - - if err := idx.Delete(k); err != nil { - return err - } - - return nil -} - -func (s *Service) putDashboard(ctx context.Context, tx kv.Tx, d *influxdb.Dashboard) error { - v, err := json.Marshal(d) - if err != nil { - return err - } - - encodedID, err := d.ID.Encode() - if err != nil { - return err - } - - b, err := tx.Bucket(dashboardBucket) - if err != nil { - return err - } - - if err := b.Put(encodedID, v); err != nil { - return err - } - - return nil -} - -func (s *Service) putDashboardWithMeta(ctx context.Context, tx kv.Tx, d *influxdb.Dashboard) error { - // TODO(desa): don't populate this here. use the first/last methods of the oplog to get meta fields. - d.Meta.UpdatedAt = s.TimeGenerator.Now() - return s.putDashboard(ctx, tx, d) -} - -// forEachDashboard will iterate through all dashboards while fn returns true. -func (s *Service) forEachDashboard(ctx context.Context, tx kv.Tx, descending bool, fn func(*influxdb.Dashboard) bool) error { - b, err := tx.Bucket(dashboardBucket) - if err != nil { - return err - } - - direction := kv.CursorAscending - if descending { - direction = kv.CursorDescending - } - - cur, err := b.ForwardCursor(nil, kv.WithCursorDirection(direction)) - if err != nil { - return err - } - - for k, v := cur.Next(); k != nil; k, v = cur.Next() { - d := &influxdb.Dashboard{} - if err := json.Unmarshal(v, d); err != nil { - return err - } - - if !fn(d) { - break - } - } - - return nil -} - -// UpdateDashboard updates a dashboard according the parameters set on upd. -func (s *Service) UpdateDashboard(ctx context.Context, id platform.ID, upd influxdb.DashboardUpdate) (*influxdb.Dashboard, error) { - if err := upd.Valid(); err != nil { - return nil, err - } - - var d *influxdb.Dashboard - err := s.kv.Update(ctx, func(tx kv.Tx) error { - dash, err := s.updateDashboard(ctx, tx, id, upd) - if err != nil { - return err - } - d = dash - - return nil - }) - if err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - return d, err -} - -func (s *Service) updateDashboard(ctx context.Context, tx kv.Tx, id platform.ID, upd influxdb.DashboardUpdate) (*influxdb.Dashboard, error) { - d, err := s.findDashboardByID(ctx, tx, id) - if err != nil { - return nil, err - } - - if upd.Cells != nil { - for _, c := range *upd.Cells { - if !c.ID.Valid() { - c.ID = s.IDGenerator.ID() - if c.View != nil { - c.View.ViewContents.ID = c.ID - } - } - } - for _, c := range d.Cells { - if err := s.deleteDashboardCellView(ctx, tx, d.ID, c.ID); err != nil { - return nil, err - } - } - } - - if err := upd.Apply(d); err != nil { - return nil, err - } - - if err := s.appendDashboardEventToLog(ctx, tx, d.ID, dashboardUpdatedEvent); err != nil { - return nil, err - } - - if err := s.putDashboardWithMeta(ctx, tx, d); err != nil { - return nil, err - } - - if upd.Cells != nil { - for _, c := range d.Cells { - if err := s.putDashboardCellView(ctx, tx, d.ID, c.ID, c.View); err != nil { - return nil, err - } - } - } - - return d, nil -} - -// DeleteDashboard deletes a dashboard and prunes it from the index. -func (s *Service) DeleteDashboard(ctx context.Context, id platform.ID) error { - return s.kv.Update(ctx, func(tx kv.Tx) error { - if pe := s.deleteDashboard(ctx, tx, id); pe != nil { - return &errors.Error{ - Err: pe, - } - } - return nil - }) -} - -func (s *Service) deleteDashboard(ctx context.Context, tx kv.Tx, id platform.ID) error { - d, err := s.findDashboardByID(ctx, tx, id) - if err != nil { - return err - } - - for _, cell := range d.Cells { - if err := s.deleteDashboardCellView(ctx, tx, d.ID, cell.ID); err != nil { - return &errors.Error{ - Err: err, - } - } - } - - encodedID, err := id.Encode() - if err != nil { - return &errors.Error{ - Err: err, - } - } - - if err := s.removeOrganizationDashboardIndex(ctx, tx, d); err != nil { - return errors.NewError(errors.WithErrorErr(err)) - } - - b, err := tx.Bucket(dashboardBucket) - if err != nil { - return err - } - - if err := b.Delete(encodedID); err != nil { - return &errors.Error{ - Err: err, - } - } - - if err := s.appendDashboardEventToLog(ctx, tx, d.ID, dashboardRemovedEvent); err != nil { - return &errors.Error{ - Err: err, - } - } - - return nil -} - -const dashboardOperationLogKeyPrefix = "dashboard" - -func encodeDashboardOperationLogKey(id platform.ID) ([]byte, error) { - buf, err := id.Encode() - if err != nil { - return nil, err - } - return append([]byte(dashboardOperationLogKeyPrefix), buf...), nil -} - -// GetDashboardOperationLog retrieves a dashboards operation log. -func (s *Service) GetDashboardOperationLog(ctx context.Context, id platform.ID, opts influxdb.FindOptions) ([]*influxdb.OperationLogEntry, int, error) { - // TODO(desa): might be worthwhile to allocate a slice of size opts.Limit - log := []*influxdb.OperationLogEntry{} - - err := s.kv.View(ctx, func(tx kv.Tx) error { - key, err := encodeDashboardOperationLogKey(id) - if err != nil { - return err - } - - return s.opLog.ForEachLogEntryTx(ctx, tx, key, opts, func(v []byte, t time.Time) error { - e := &influxdb.OperationLogEntry{} - if err := json.Unmarshal(v, e); err != nil { - return err - } - e.Time = t - - log = append(log, e) - - return nil - }) - }) - - if err != nil && err != kv.ErrKeyValueLogBoundsNotFound { - return nil, 0, err - } - - return log, len(log), nil -} - -func (s *Service) appendDashboardEventToLog(ctx context.Context, tx kv.Tx, id platform.ID, st string) error { - e := &influxdb.OperationLogEntry{ - Description: st, - } - // TODO(desa): this is fragile and non explicit since it requires an authorizer to be on context. It should be - // replaced with a higher level transaction so that adding to the log can take place in the http handler - // where the userID will exist explicitly. - - a, err := icontext.GetAuthorizer(ctx) - if err == nil { - // Add the user to the log if you can, but don't error if its not there. - e.UserID = a.GetUserID() - } - - v, err := json.Marshal(e) - if err != nil { - return err - } - - k, err := encodeDashboardOperationLogKey(id) - if err != nil { - return err - } - - return s.opLog.AddLogEntryTx(ctx, tx, k, v, s.TimeGenerator.Now()) -} diff --git a/dashboards/service_test.go b/dashboards/service_test.go deleted file mode 100644 index 21cb79eff08..00000000000 --- a/dashboards/service_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package dashboards - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - dashboardtesting "github.com/influxdata/influxdb/v2/dashboards/testing" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/mock" - itesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func TestBoltDashboardService(t *testing.T) { - dashboardtesting.DashboardService(initBoltDashboardService, t) -} - -func initBoltDashboardService(f dashboardtesting.DashboardFields, t *testing.T) (influxdb.DashboardService, string, func()) { - s, closeBolt := itesting.NewTestBoltStore(t) - svc, op, closeSvc := initDashboardService(s, f, t) - return svc, op, func() { - closeSvc() - closeBolt() - } -} - -func initDashboardService(s kv.SchemaStore, f dashboardtesting.DashboardFields, t *testing.T) (influxdb.DashboardService, string, func()) { - if f.TimeGenerator == nil { - f.TimeGenerator = influxdb.RealTimeGenerator{} - } - - ctx := context.Background() - kvSvc := kv.NewService(zaptest.NewLogger(t), s, &mock.OrganizationService{}) - kvSvc.IDGenerator = f.IDGenerator - kvSvc.TimeGenerator = f.TimeGenerator - - svc := NewService(s, kvSvc) - svc.IDGenerator = f.IDGenerator - svc.TimeGenerator = f.TimeGenerator - - for _, b := range f.Dashboards { - if err := svc.PutDashboard(ctx, b); err != nil { - t.Fatalf("failed to populate dashboards") - } - } - return svc, kv.OpPrefix, func() { - for _, b := range f.Dashboards { - if err := svc.DeleteDashboard(ctx, b.ID); err != nil { - t.Logf("failed to remove dashboard: %v", err) - } - } - } -} diff --git a/dashboards/testing/dashboards.go b/dashboards/testing/dashboards.go deleted file mode 100644 index 5a4c5aae473..00000000000 --- a/dashboards/testing/dashboards.go +++ /dev/null @@ -1,2149 +0,0 @@ -package testing - -import ( - "bytes" - "context" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - platform "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/feature" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" -) - -const ( - dashOneID = "020f755c3c082000" - dashTwoID = "020f755c3c082001" - dashThreeID = "020f755c3c082002" - dashFourID = "020f755c3c082003" - - ownerOneID = "020f755c3c0820a0" - ownerTwoID = "020f755c3c0820a1" -) - -func int32Ptr(i int32) *int32 { - return &i -} - -var dashboardCmpOptions = cmp.Options{ - cmpopts.EquateEmpty(), - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), -} - -// DashboardFields will include the IDGenerator, and dashboards -type DashboardFields struct { - IDGenerator platform2.IDGenerator - TimeGenerator platform.TimeGenerator - Dashboards []*platform.Dashboard - Views []*platform.View -} - -// DashboardService tests all the service functions. -func DashboardService( - init func(DashboardFields, *testing.T) (platform.DashboardService, string, func()), t *testing.T, -) { - tests := []struct { - name string - fn func(init func(DashboardFields, *testing.T) (platform.DashboardService, string, func()), - t *testing.T) - }{ - { - name: "FindDashboardByID", - fn: FindDashboardByID, - }, - { - name: "FindDashboards", - fn: FindDashboards, - }, - { - name: "CreateDashboard", - fn: CreateDashboard, - }, - { - name: "UpdateDashboard", - fn: UpdateDashboard, - }, - { - name: "DeleteDashboard", - fn: DeleteDashboard, - }, - { - name: "AddDashboardCell", - fn: AddDashboardCell, - }, - { - name: "RemoveDashboardCell", - fn: RemoveDashboardCell, - }, - { - name: "UpdateDashboardCell", - fn: UpdateDashboardCell, - }, - { - name: "ReplaceDashboardCells", - fn: ReplaceDashboardCells, - }, - { - name: "GetDashboardCellView", - fn: GetDashboardCellView, - }, - { - name: "UpdateDashboardCellView", - fn: UpdateDashboardCellView, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt := tt - t.Parallel() - tt.fn(init, t) - }) - } -} - -// CreateDashboard testing -func CreateDashboard( - init func(DashboardFields, *testing.T) (platform.DashboardService, string, func()), - t *testing.T, -) { - type args struct { - dashboard *platform.Dashboard - } - type wants struct { - err error - dashboards []*platform.Dashboard - } - - tests := []struct { - name string - fields DashboardFields - args args - wants wants - }{ - { - name: "basic create dashboard", - fields: DashboardFields{ - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform2.ID { - return MustIDBase16(dashTwoID) - }, - }, - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC)}, - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - }, - }, - }, - args: args{ - dashboard: &platform.Dashboard{ - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "dashboard2", - OwnerID: MustIDBase16Ptr("00000000000000aa"), - }, - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "dashboard2", - OwnerID: MustIDBase16Ptr("00000000000000aa"), - Meta: platform.DashboardMeta{ - CreatedAt: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC), - UpdatedAt: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - }, - }, - }, - }, - { - name: "create dashboard with missing id", - fields: DashboardFields{ - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform2.ID { - return MustIDBase16(dashTwoID) - }, - }, - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC)}, - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - }, - }, - }, - args: args{ - dashboard: &platform.Dashboard{ - OrganizationID: 1, - Name: "dashboard2", - }, - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "dashboard2", - Meta: platform.DashboardMeta{ - CreatedAt: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC), - UpdatedAt: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.CreateDashboard(ctx, tt.args.dashboard) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - defer s.DeleteDashboard(ctx, tt.args.dashboard.ID) - - dashboards, _, err := s.FindDashboards(ctx, platform.DashboardFilter{}, platform.DefaultDashboardFindOptions) - if err != nil { - t.Fatalf("failed to retrieve dashboards: %v", err) - } - if diff := cmp.Diff(dashboards, tt.wants.dashboards, dashboardCmpOptions...); diff != "" { - t.Errorf("dashboards are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// AddDashboardCell testing -func AddDashboardCell( - init func(DashboardFields, *testing.T) (platform.DashboardService, string, func()), - t *testing.T, -) { - type args struct { - dashboardID platform2.ID - cell *platform.Cell - } - type wants struct { - err error - dashboards []*platform.Dashboard - } - - tests := []struct { - name string - fields DashboardFields - args args - wants wants - }{ - { - name: "basic add cell", - fields: DashboardFields{ - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform2.ID { - return MustIDBase16(dashTwoID) - }, - }, - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC)}, - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - }, - }, - Views: []*platform.View{ - { - ViewContents: platform.ViewContents{ - ID: MustIDBase16(dashTwoID), - }, - }, - }, - }, - args: args{ - dashboardID: MustIDBase16(dashOneID), - cell: &platform.Cell{ - ID: MustIDBase16(dashTwoID), - }, - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Meta: platform.DashboardMeta{ - UpdatedAt: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Name: "dashboard1", - Cells: []*platform.Cell{ - { - ID: MustIDBase16(dashTwoID), - }, - }, - }, - }, - }, - }, - { - name: "add cell with no id", - fields: DashboardFields{ - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC)}, - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform2.ID { - return MustIDBase16(dashTwoID) - }, - }, - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - }, - }, - Views: []*platform.View{ - { - ViewContents: platform.ViewContents{ - ID: MustIDBase16(dashTwoID), - }, - }, - }, - }, - args: args{ - dashboardID: MustIDBase16(dashOneID), - cell: &platform.Cell{}, - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Meta: platform.DashboardMeta{ - UpdatedAt: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Name: "dashboard1", - Cells: []*platform.Cell{ - { - ID: MustIDBase16(dashTwoID), - }, - }, - }, - }, - }, - }, - { - name: "add cell with id not exist", - fields: DashboardFields{ - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC)}, - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform2.ID { - return MustIDBase16(dashTwoID) - }, - }, - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - }, - }, - Views: []*platform.View{ - { - ViewContents: platform.ViewContents{ - ID: MustIDBase16(dashTwoID), - }, - }, - }, - }, - args: args{ - dashboardID: MustIDBase16(dashThreeID), - cell: &platform.Cell{}, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: platform.OpAddDashboardCell, - Msg: platform.ErrDashboardNotFound, - }, - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.AddDashboardCell(ctx, tt.args.dashboardID, tt.args.cell, platform.AddDashboardCellOptions{}) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - defer s.DeleteDashboard(ctx, tt.args.dashboardID) - - dashboards, _, err := s.FindDashboards(ctx, platform.DashboardFilter{}, platform.DefaultDashboardFindOptions) - if err != nil { - t.Fatalf("failed to retrieve dashboards: %v", err) - } - if diff := cmp.Diff(dashboards, tt.wants.dashboards, dashboardCmpOptions...); diff != "" { - t.Errorf("dashboards are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindDashboardByID testing -func FindDashboardByID( - init func(DashboardFields, *testing.T) (platform.DashboardService, string, func()), - t *testing.T, -) { - type args struct { - id platform2.ID - } - type wants struct { - err error - dashboard *platform.Dashboard - } - - tests := []struct { - name string - fields DashboardFields - args args - wants wants - }{ - { - name: "basic find dashboard by id", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "dashboard2", - }, - }, - }, - args: args{ - id: MustIDBase16(dashTwoID), - }, - wants: wants{ - dashboard: &platform.Dashboard{ - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "dashboard2", - }, - }, - }, - { - name: "find dashboard by id not exists", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "dashboard2", - }, - }, - }, - args: args{ - id: MustIDBase16(dashThreeID), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: platform.OpFindDashboardByID, - Msg: platform.ErrDashboardNotFound, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - dashboard, err := s.FindDashboardByID(ctx, tt.args.id) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(dashboard, tt.wants.dashboard, dashboardCmpOptions...); diff != "" { - t.Errorf("dashboard is different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindDashboards testing -func FindDashboards( - init func(DashboardFields, *testing.T) (platform.DashboardService, string, func()), - t *testing.T, -) { - type args struct { - IDs []*platform2.ID - organizationID *platform2.ID - ownerID *platform2.ID - findOptions platform.FindOptions - } - - type wants struct { - dashboards []*platform.Dashboard - err error - } - - tests := []struct { - name string - fields DashboardFields - args args - wants wants - }{ - { - name: "find all dashboards", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "abc", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "xyz", - }, - }, - }, - args: args{ - findOptions: platform.DefaultDashboardFindOptions, - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "abc", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "xyz", - }, - }, - }, - }, - { - name: "find all dashboards by offset and limit", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "abc", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "xyz", - }, - { - ID: MustIDBase16(dashThreeID), - OrganizationID: 1, - Name: "321", - }, - }, - }, - args: args{ - findOptions: platform.FindOptions{ - Limit: 1, - Offset: 1, - }, - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "xyz", - }, - }, - }, - }, - { - name: "find all dashboards with limit", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "abc", - }, - { - ID: MustIDBase16(dashThreeID), - OrganizationID: 2, - Name: "321", - }, - }, - }, - args: args{ - findOptions: platform.FindOptions{ - Limit: 1, - }, - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "abc", - }, - }, - }, - }, - { - name: "find all dashboards by descending", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "abc", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "xyz", - }, - { - ID: MustIDBase16(dashThreeID), - OrganizationID: 1, - Name: "321", - }, - }, - }, - args: args{ - findOptions: platform.FindOptions{ - Descending: true, - Offset: 1, - }, - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "xyz", - }, - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "abc", - }, - }, - }, - }, - { - name: "find all dashboards by org 10", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "abc", - }, - { - ID: 2, - OrganizationID: 10, - Name: "hello", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "xyz", - }, - { - ID: 3, - OrganizationID: 10, - Name: "world", - }, - }, - }, - args: args{ - findOptions: platform.DefaultDashboardFindOptions, - organizationID: idPtr(10), - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: 2, - OrganizationID: 10, - Name: "hello", - }, - { - ID: 3, - OrganizationID: 10, - Name: "world", - }, - }, - }, - }, - { - name: "find all dashboards by offset and limit and org 1", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "abc", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "xyz", - }, - { - ID: MustIDBase16(dashThreeID), - OrganizationID: 1, - Name: "321", - }, - }, - }, - args: args{ - findOptions: platform.FindOptions{ - Limit: 1, - Offset: 1, - }, - organizationID: idPtr(1), - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "xyz", - }, - }, - }, - }, - { - name: "find all dashboards sorted by created at", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Meta: platform.DashboardMeta{ - CreatedAt: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Name: "abc", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Meta: platform.DashboardMeta{ - CreatedAt: time.Date(2004, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Name: "xyz", - }, - }, - }, - args: args{ - findOptions: platform.FindOptions{ - SortBy: "CreatedAt", - }, - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "xyz", - Meta: platform.DashboardMeta{ - CreatedAt: time.Date(2004, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - }, - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "abc", - Meta: platform.DashboardMeta{ - CreatedAt: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - }, - }, - }, - }, - { - name: "find all dashboards sorted by updated at", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Meta: platform.DashboardMeta{ - UpdatedAt: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Name: "abc", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Meta: platform.DashboardMeta{ - UpdatedAt: time.Date(2010, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Name: "xyz", - }, - }, - }, - args: args{ - findOptions: platform.FindOptions{ - SortBy: "UpdatedAt", - }, - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Meta: platform.DashboardMeta{ - UpdatedAt: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Name: "abc", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Meta: platform.DashboardMeta{ - UpdatedAt: time.Date(2010, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Name: "xyz", - }, - }, - }, - }, - { - name: "find dashboard by id", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "abc", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "xyz", - }, - }, - }, - args: args{ - IDs: []*platform2.ID{ - idPtr(MustIDBase16(dashTwoID)), - }, - findOptions: platform.DefaultDashboardFindOptions, - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "xyz", - }, - }, - }, - }, - { - name: "find multiple dashboards by id", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "abc", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "xyz", - }, - }, - }, - args: args{ - IDs: []*platform2.ID{ - idPtr(MustIDBase16(dashOneID)), - idPtr(MustIDBase16(dashTwoID)), - }, - findOptions: platform.DefaultDashboardFindOptions, - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "abc", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "xyz", - }, - }, - }, - }, - { - name: "find multiple dashboards by owner", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "abc", - OwnerID: MustIDBase16Ptr(ownerOneID), - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "xyz", - OwnerID: MustIDBase16Ptr(ownerTwoID), - }, - { - ID: MustIDBase16(dashThreeID), - OrganizationID: 1, - Name: "def", - OwnerID: MustIDBase16Ptr(ownerTwoID), - }, - { - ID: MustIDBase16(dashFourID), - OrganizationID: 1, - Name: "def", - // ownerless dashboard added to similar nil - // owner pointer scenario - }, - }, - }, - args: args{ - ownerID: MustIDBase16Ptr(ownerOneID), - findOptions: platform.DefaultDashboardFindOptions, - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "abc", - OwnerID: MustIDBase16Ptr(ownerOneID), - }, - }, - }, - }, - { - name: "find multiple dashboards by id not exists", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "abc", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "xyz", - }, - }, - }, - args: args{ - IDs: []*platform2.ID{ - idPtr(MustIDBase16(dashThreeID)), - }, - findOptions: platform.DefaultDashboardFindOptions, - }, - wants: wants{ - dashboards: []*platform.Dashboard{}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx, err := feature.Annotate(context.Background(), mock.NewFlagger(map[feature.Flag]interface{}{ - feature.EnforceOrganizationDashboardLimits(): true, - })) - if err != nil { - t.Fatal(err) - } - - filter := platform.DashboardFilter{ - IDs: tt.args.IDs, - OrganizationID: tt.args.organizationID, - OwnerID: tt.args.ownerID, - } - - dashboards, _, err := s.FindDashboards(ctx, filter, tt.args.findOptions) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(dashboards, tt.wants.dashboards, dashboardCmpOptions...); diff != "" { - t.Errorf("dashboards are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// DeleteDashboard testing -func DeleteDashboard( - init func(DashboardFields, *testing.T) (platform.DashboardService, string, func()), - t *testing.T, -) { - type args struct { - ID platform2.ID - } - type wants struct { - err error - dashboards []*platform.Dashboard - } - - tests := []struct { - name string - fields DashboardFields - args args - wants wants - }{ - { - name: "delete dashboards using exist id", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - Name: "A", - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - }, - { - Name: "B", - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - }, - }, - }, - args: args{ - ID: MustIDBase16(dashOneID), - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - Name: "B", - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - }, - }, - }, - }, - { - name: "delete dashboards using id that does not exist", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - Name: "A", - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - }, - { - Name: "B", - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - }, - }, - }, - args: args{ - ID: MustIDBase16(dashThreeID), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: platform.OpDeleteDashboard, - Msg: platform.ErrDashboardNotFound, - }, - dashboards: []*platform.Dashboard{ - { - Name: "A", - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - }, - { - Name: "B", - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.DeleteDashboard(ctx, tt.args.ID) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - filter := platform.DashboardFilter{} - dashboards, _, err := s.FindDashboards(ctx, filter, platform.DefaultDashboardFindOptions) - if err != nil { - t.Fatalf("failed to retrieve dashboards: %v", err) - } - if diff := cmp.Diff(dashboards, tt.wants.dashboards, dashboardCmpOptions...); diff != "" { - t.Errorf("dashboards are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// UpdateDashboard testing -func UpdateDashboard( - init func(DashboardFields, *testing.T) (platform.DashboardService, string, func()), - t *testing.T, -) { - type args struct { - name string - description string - id platform2.ID - cells []*platform.Cell - } - type wants struct { - err error - dashboard *platform.Dashboard - } - - tests := []struct { - name string - fields DashboardFields - args args - wants wants - }{ - { - name: "update name", - fields: DashboardFields{ - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC)}, - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "dashboard2", - }, - }, - }, - args: args{ - id: MustIDBase16(dashOneID), - name: "changed", - }, - wants: wants{ - dashboard: &platform.Dashboard{ - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Meta: platform.DashboardMeta{ - UpdatedAt: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Name: "changed", - }, - }, - }, - { - name: "update description", - fields: DashboardFields{ - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC)}, - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "dashboard2", - }, - }, - }, - args: args{ - id: MustIDBase16(dashOneID), - description: "changed", - }, - wants: wants{ - dashboard: &platform.Dashboard{ - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - Description: "changed", - Meta: platform.DashboardMeta{ - UpdatedAt: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - }, - }, - }, - { - name: "update description and name", - fields: DashboardFields{ - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC)}, - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "dashboard2", - }, - }, - }, - args: args{ - id: MustIDBase16(dashOneID), - description: "changed", - name: "changed", - }, - wants: wants{ - dashboard: &platform.Dashboard{ - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "changed", - Description: "changed", - Meta: platform.DashboardMeta{ - UpdatedAt: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - }, - }, - }, - { - name: "update description name and cells", - fields: DashboardFields{ - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC)}, - IDGenerator: mock.IDGenerator{IDFn: func() platform2.ID { - return 5 - }}, - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "dashboard2", - }, - }, - }, - args: args{ - id: MustIDBase16(dashOneID), - description: "changed", - name: "changed", - cells: []*platform.Cell{ - { - CellProperty: platform.CellProperty{X: 0, Y: 2}, - View: &platform.View{ - Properties: &platform.SingleStatViewProperties{ - Type: platform.ViewPropertyTypeSingleStat, - Queries: []platform.DashboardQuery{{Text: "buckets() |> count()"}}, - }, - }, - }, - }, - }, - wants: wants{ - dashboard: &platform.Dashboard{ - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "changed", - Description: "changed", - Meta: platform.DashboardMeta{ - UpdatedAt: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Cells: []*platform.Cell{ - { - ID: 5, - CellProperty: platform.CellProperty{X: 0, Y: 2}, - View: &platform.View{ - ViewContents: platform.ViewContents{ - ID: 5, - }, - Properties: &platform.SingleStatViewProperties{ - Type: platform.ViewPropertyTypeSingleStat, - Queries: []platform.DashboardQuery{{Text: "buckets() |> count()"}}, - }, - }, - }, - }, - }, - }, - }, - { - name: "update with id not exist", - fields: DashboardFields{ - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC)}, - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - }, - { - ID: MustIDBase16(dashTwoID), - OrganizationID: 1, - Name: "dashboard2", - }, - }, - }, - args: args{ - id: MustIDBase16(dashThreeID), - description: "changed", - name: "changed", - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: platform.OpUpdateDashboard, - Msg: platform.ErrDashboardNotFound, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - upd := platform.DashboardUpdate{} - if tt.args.name != "" { - upd.Name = &tt.args.name - } - if tt.args.description != "" { - upd.Description = &tt.args.description - } - if tt.args.cells != nil { - upd.Cells = &tt.args.cells - } - - dashboard, err := s.UpdateDashboard(ctx, tt.args.id, upd) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(dashboard, tt.wants.dashboard, dashboardCmpOptions...); diff != "" { - t.Errorf("dashboard is different -got/+want\ndiff %s", diff) - } - }) - } -} - -// RemoveDashboardCell testing -func RemoveDashboardCell( - init func(DashboardFields, *testing.T) (platform.DashboardService, string, func()), - t *testing.T, -) { - type args struct { - dashboardID platform2.ID - cellID platform2.ID - } - type wants struct { - err error - dashboards []*platform.Dashboard - } - - tests := []struct { - name string - fields DashboardFields - args args - wants wants - }{ - { - name: "basic remove cell", - fields: DashboardFields{ - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC)}, - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform2.ID { - return MustIDBase16(dashTwoID) - }, - }, - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - Cells: []*platform.Cell{ - { - ID: MustIDBase16(dashTwoID), - }, - { - ID: MustIDBase16(dashOneID), - }, - }, - }, - }, - Views: []*platform.View{ - { - ViewContents: platform.ViewContents{ - ID: MustIDBase16(dashTwoID), - }, - }, - }, - }, - args: args{ - dashboardID: MustIDBase16(dashOneID), - cellID: MustIDBase16(dashTwoID), - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Meta: platform.DashboardMeta{ - UpdatedAt: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Name: "dashboard1", - Cells: []*platform.Cell{ - { - ID: MustIDBase16(dashOneID), - }, - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.RemoveDashboardCell(ctx, tt.args.dashboardID, tt.args.cellID) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - defer s.DeleteDashboard(ctx, tt.args.dashboardID) - - dashboards, _, err := s.FindDashboards(ctx, platform.DashboardFilter{}, platform.DefaultDashboardFindOptions) - if err != nil { - t.Fatalf("failed to retrieve dashboards: %v", err) - } - if diff := cmp.Diff(dashboards, tt.wants.dashboards, dashboardCmpOptions...); diff != "" { - t.Errorf("dashboards are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// UpdateDashboardCell testing -func UpdateDashboardCell( - init func(DashboardFields, *testing.T) (platform.DashboardService, string, func()), - t *testing.T, -) { - type args struct { - dashboardID platform2.ID - cellID platform2.ID - cellUpdate platform.CellUpdate - } - type wants struct { - err error - dashboards []*platform.Dashboard - } - - tests := []struct { - name string - fields DashboardFields - args args - wants wants - }{ - { - name: "basic update cell", - fields: DashboardFields{ - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC)}, - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform2.ID { - return MustIDBase16(dashTwoID) - }, - }, - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - Cells: []*platform.Cell{ - { - ID: MustIDBase16(dashTwoID), - }, - { - ID: MustIDBase16(dashOneID), - }, - }, - }, - }, - }, - args: args{ - dashboardID: MustIDBase16(dashOneID), - cellID: MustIDBase16(dashTwoID), - cellUpdate: platform.CellUpdate{ - X: func(i int32) *int32 { return &i }(int32(10)), - }, - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Meta: platform.DashboardMeta{ - UpdatedAt: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Name: "dashboard1", - Cells: []*platform.Cell{ - { - ID: MustIDBase16(dashTwoID), - CellProperty: platform.CellProperty{ - X: 10, - }, - }, - { - ID: MustIDBase16(dashOneID), - }, - }, - }, - }, - }, - }, - { - name: "invalid cell update without attribute", - fields: DashboardFields{ - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC)}, - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform2.ID { - return MustIDBase16(dashTwoID) - }, - }, - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - Cells: []*platform.Cell{ - { - ID: MustIDBase16(dashTwoID), - }, - { - ID: MustIDBase16(dashOneID), - }, - }, - }, - }, - }, - args: args{ - dashboardID: MustIDBase16(dashOneID), - cellID: MustIDBase16(dashTwoID), - cellUpdate: platform.CellUpdate{}, - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - Cells: []*platform.Cell{ - { - ID: MustIDBase16(dashTwoID), - }, - { - ID: MustIDBase16(dashOneID), - }, - }, - }, - }, - err: &errors.Error{ - Code: errors.EInvalid, - Op: platform.OpUpdateDashboardCell, - Msg: "must update at least one attribute", - }, - }, - }, - { - name: "invalid cell update cell id not exist", - fields: DashboardFields{ - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC)}, - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform2.ID { - return MustIDBase16(dashTwoID) - }, - }, - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - Cells: []*platform.Cell{ - { - ID: MustIDBase16(dashTwoID), - }, - { - ID: MustIDBase16(dashOneID), - }, - }, - }, - }, - }, - args: args{ - dashboardID: MustIDBase16(dashOneID), - cellID: MustIDBase16(dashFourID), - cellUpdate: platform.CellUpdate{ - X: int32Ptr(1), - }, - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - Cells: []*platform.Cell{ - { - ID: MustIDBase16(dashTwoID), - }, - { - ID: MustIDBase16(dashOneID), - }, - }, - }, - }, - err: &errors.Error{ - Code: errors.ENotFound, - Op: platform.OpUpdateDashboardCell, - Msg: platform.ErrCellNotFound, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - _, err := s.UpdateDashboardCell(ctx, tt.args.dashboardID, tt.args.cellID, tt.args.cellUpdate) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - defer s.DeleteDashboard(ctx, tt.args.dashboardID) - - dashboards, _, err := s.FindDashboards(ctx, platform.DashboardFilter{}, platform.DefaultDashboardFindOptions) - if err != nil { - t.Fatalf("failed to retrieve dashboards: %v", err) - } - if diff := cmp.Diff(dashboards, tt.wants.dashboards, dashboardCmpOptions...); diff != "" { - t.Errorf("dashboards are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// ReplaceDashboardCells testing -func ReplaceDashboardCells( - init func(DashboardFields, *testing.T) (platform.DashboardService, string, func()), - t *testing.T, -) { - type args struct { - dashboardID platform2.ID - cells []*platform.Cell - } - type wants struct { - err error - dashboards []*platform.Dashboard - } - - tests := []struct { - name string - fields DashboardFields - args args - wants wants - }{ - { - name: "basic replace cells", - fields: DashboardFields{ - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC)}, - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform2.ID { - return MustIDBase16(dashTwoID) - }, - }, - Views: []*platform.View{ - { - ViewContents: platform.ViewContents{ - ID: MustIDBase16(dashTwoID), - }, - }, - { - ViewContents: platform.ViewContents{ - ID: MustIDBase16(dashOneID), - }, - }, - }, - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - Cells: []*platform.Cell{ - { - ID: MustIDBase16(dashTwoID), - }, - { - ID: MustIDBase16(dashOneID), - }, - }, - }, - }, - }, - args: args{ - dashboardID: MustIDBase16(dashOneID), - cells: []*platform.Cell{ - { - ID: MustIDBase16(dashTwoID), - CellProperty: platform.CellProperty{ - X: 10, - }, - }, - { - ID: MustIDBase16(dashOneID), - CellProperty: platform.CellProperty{ - Y: 11, - }, - }, - }, - }, - wants: wants{ - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - Meta: platform.DashboardMeta{ - UpdatedAt: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Cells: []*platform.Cell{ - { - ID: MustIDBase16(dashTwoID), - CellProperty: platform.CellProperty{ - X: 10, - }, - }, - { - ID: MustIDBase16(dashOneID), - CellProperty: platform.CellProperty{ - Y: 11, - }, - }, - }, - }, - }, - }, - }, - { - name: "try to add a cell that didn't previously exist", - fields: DashboardFields{ - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC)}, - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform2.ID { - return MustIDBase16(dashTwoID) - }, - }, - Views: []*platform.View{ - { - ViewContents: platform.ViewContents{ - ID: MustIDBase16(dashTwoID), - }, - }, - }, - Dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - Cells: []*platform.Cell{ - { - ID: MustIDBase16(dashTwoID), - }, - }, - }, - }, - }, - args: args{ - dashboardID: MustIDBase16(dashOneID), - cells: []*platform.Cell{ - { - ID: MustIDBase16(dashTwoID), - CellProperty: platform.CellProperty{ - X: 10, - }, - }, - { - ID: MustIDBase16(dashOneID), - CellProperty: platform.CellProperty{ - Y: 11, - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EConflict, - Op: platform.OpReplaceDashboardCells, - Msg: "cannot replace cells that were not already present", - }, - dashboards: []*platform.Dashboard{ - { - ID: MustIDBase16(dashOneID), - OrganizationID: 1, - Name: "dashboard1", - Cells: []*platform.Cell{ - { - ID: MustIDBase16(dashTwoID), - }, - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.ReplaceDashboardCells(ctx, tt.args.dashboardID, tt.args.cells) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - defer s.DeleteDashboard(ctx, tt.args.dashboardID) - - dashboards, _, err := s.FindDashboards(ctx, platform.DashboardFilter{}, platform.DefaultDashboardFindOptions) - if err != nil { - t.Fatalf("failed to retrieve dashboards: %v", err) - } - if diff := cmp.Diff(dashboards, tt.wants.dashboards, dashboardCmpOptions...); diff != "" { - t.Errorf("dashboards are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// GetDashboardCellView is the conformance test for the retrieving a dashboard cell. -func GetDashboardCellView( - init func(DashboardFields, *testing.T) (platform.DashboardService, string, func()), - t *testing.T, -) { - type args struct { - dashboardID platform2.ID - cellID platform2.ID - } - type wants struct { - err error - view *platform.View - } - - tests := []struct { - name string - fields DashboardFields - args args - wants wants - }{ - { - name: "get view for cell that exists", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: 1, - OrganizationID: 1, - Name: "dashboard1", - Cells: []*platform.Cell{ - { - ID: 100, - }, - }, - }, - }, - }, - args: args{ - dashboardID: 1, - cellID: 100, - }, - wants: wants{ - view: &platform.View{ - ViewContents: platform.ViewContents{ - ID: 100, - }, - Properties: platform.EmptyViewProperties{}, - }, - }, - }, - { - name: "get view for cell that does not exist", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: 1, - OrganizationID: 1, - Name: "dashboard1", - Cells: []*platform.Cell{ - { - ID: 100, - }, - }, - }, - }, - }, - args: args{ - dashboardID: 1, - cellID: 5, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: platform.OpGetDashboardCellView, - Msg: platform.ErrViewNotFound, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - view, err := s.GetDashboardCellView(ctx, tt.args.dashboardID, tt.args.cellID) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(view, tt.wants.view); diff != "" { - t.Errorf("dashboard cell views are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// UpdateDashboardCellView is the conformance test for the updating a dashboard cell. -func UpdateDashboardCellView( - init func(DashboardFields, *testing.T) (platform.DashboardService, string, func()), - t *testing.T, -) { - type args struct { - dashboardID platform2.ID - cellID platform2.ID - properties platform.ViewProperties - name string - } - type wants struct { - err error - view *platform.View - } - - tests := []struct { - name string - fields DashboardFields - args args - wants wants - }{ - { - name: "update view name", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: 1, - OrganizationID: 1, - Name: "dashboard1", - Cells: []*platform.Cell{ - { - ID: 100, - }, - }, - }, - }, - }, - args: args{ - dashboardID: 1, - cellID: 100, - name: "hello", - }, - wants: wants{ - view: &platform.View{ - ViewContents: platform.ViewContents{ - ID: 100, - Name: "hello", - }, - Properties: platform.EmptyViewProperties{}, - }, - }, - }, - { - name: "update view type", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: 1, - OrganizationID: 1, - Name: "dashboard1", - Cells: []*platform.Cell{ - { - ID: 100, - }, - }, - }, - }, - }, - args: args{ - dashboardID: 1, - cellID: 100, - properties: platform.TableViewProperties{ - Type: "table", - TimeFormat: "rfc3339", - }, - }, - wants: wants{ - view: &platform.View{ - ViewContents: platform.ViewContents{ - ID: 100, - }, - Properties: platform.TableViewProperties{ - Type: "table", - TimeFormat: "rfc3339", - }, - }, - }, - }, - { - name: "update view type and name", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: 1, - OrganizationID: 1, - Name: "dashboard1", - Cells: []*platform.Cell{ - { - ID: 100, - }, - }, - }, - }, - }, - args: args{ - dashboardID: 1, - cellID: 100, - name: "hello", - properties: platform.TableViewProperties{ - Type: "table", - TimeFormat: "rfc3339", - }, - }, - wants: wants{ - view: &platform.View{ - ViewContents: platform.ViewContents{ - ID: 100, - Name: "hello", - }, - Properties: platform.TableViewProperties{ - Type: "table", - TimeFormat: "rfc3339", - }, - }, - }, - }, - { - name: "update view for cell that does not exist", - fields: DashboardFields{ - Dashboards: []*platform.Dashboard{ - { - ID: 1, - OrganizationID: 1, - Name: "dashboard1", - Cells: []*platform.Cell{ - { - ID: 100, - }, - }, - }, - }, - }, - args: args{ - dashboardID: 1, - cellID: 5, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: platform.OpGetDashboardCellView, - Msg: platform.ErrViewNotFound, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - upd := platform.ViewUpdate{} - if tt.args.name != "" { - upd.Name = &tt.args.name - } - - if tt.args.properties != nil { - upd.Properties = tt.args.properties - } - - view, err := s.UpdateDashboardCellView(ctx, tt.args.dashboardID, tt.args.cellID, upd) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(view, tt.wants.view); diff != "" { - t.Errorf("dashboard cell views are different -got/+want\ndiff %s", diff) - } - }) - } -} diff --git a/dashboards/testing/util.go b/dashboards/testing/util.go deleted file mode 100644 index a084a6b7053..00000000000 --- a/dashboards/testing/util.go +++ /dev/null @@ -1,134 +0,0 @@ -package testing - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// TODO(goller): remove opPrefix argument -func diffPlatformErrors(name string, actual, expected error, opPrefix string, t *testing.T) { - t.Helper() - ErrorsEqual(t, actual, expected) -} - -// ErrorsEqual checks to see if the provided errors are equivalent. -func ErrorsEqual(t *testing.T, actual, expected error) { - t.Helper() - if expected == nil && actual == nil { - return - } - - if expected == nil && actual != nil { - t.Errorf("unexpected error %s", actual.Error()) - } - - if expected != nil && actual == nil { - t.Errorf("expected error %s but received nil", expected.Error()) - } - - if errors.ErrorCode(expected) != errors.ErrorCode(actual) { - t.Logf("\nexpected: %v\nactual: %v\n\n", expected, actual) - t.Errorf("expected error code %q but received %q", errors.ErrorCode(expected), errors.ErrorCode(actual)) - } - - if errors.ErrorMessage(expected) != errors.ErrorMessage(actual) { - t.Logf("\nexpected: %v\nactual: %v\n\n", expected, actual) - t.Errorf("expected error message %q but received %q", errors.ErrorMessage(expected), errors.ErrorMessage(actual)) - } -} - -// FloatPtr takes the ref of a float number. -func FloatPtr(f float64) *float64 { - p := new(float64) - *p = f - return p -} - -func idPtr(id platform.ID) *platform.ID { - return &id -} - -// MustIDBase16 is an helper to ensure a correct ID is built during testing. -func MustIDBase16(s string) platform.ID { - id, err := platform.IDFromString(s) - if err != nil { - panic(err) - } - return *id -} - -// MustIDBase16Ptr is an helper to ensure a correct ID ptr ref is built during testing. -func MustIDBase16Ptr(s string) *platform.ID { - id := MustIDBase16(s) - return &id -} - -func MustCreateOrgs(ctx context.Context, svc influxdb.OrganizationService, os ...*influxdb.Organization) { - for _, o := range os { - if err := svc.CreateOrganization(ctx, o); err != nil { - panic(err) - } - } -} - -func MustCreateLabels(ctx context.Context, svc influxdb.LabelService, labels ...*influxdb.Label) { - for _, l := range labels { - if err := svc.CreateLabel(ctx, l); err != nil { - panic(err) - } - } -} - -func MustCreateUsers(ctx context.Context, svc influxdb.UserService, us ...*influxdb.User) { - for _, u := range us { - if err := svc.CreateUser(ctx, u); err != nil { - panic(err) - } - } -} - -func MustCreateMappings(ctx context.Context, svc influxdb.UserResourceMappingService, ms ...*influxdb.UserResourceMapping) { - for _, m := range ms { - if err := svc.CreateUserResourceMapping(ctx, m); err != nil { - panic(err) - } - } -} - -func MustMakeUsersOrgOwner(ctx context.Context, svc influxdb.UserResourceMappingService, oid platform.ID, uids ...platform.ID) { - ms := make([]*influxdb.UserResourceMapping, len(uids)) - for i, uid := range uids { - ms[i] = &influxdb.UserResourceMapping{ - UserID: uid, - UserType: influxdb.Owner, - ResourceType: influxdb.OrgsResourceType, - ResourceID: oid, - } - } - MustCreateMappings(ctx, svc, ms...) -} - -func MustMakeUsersOrgMember(ctx context.Context, svc influxdb.UserResourceMappingService, oid platform.ID, uids ...platform.ID) { - ms := make([]*influxdb.UserResourceMapping, len(uids)) - for i, uid := range uids { - ms[i] = &influxdb.UserResourceMapping{ - UserID: uid, - UserType: influxdb.Member, - ResourceType: influxdb.OrgsResourceType, - ResourceID: oid, - } - } - MustCreateMappings(ctx, svc, ms...) -} - -func MustNewPermissionAtID(id platform.ID, a influxdb.Action, rt influxdb.ResourceType, orgID platform.ID) *influxdb.Permission { - perm, err := influxdb.NewPermissionAtID(id, a, rt, orgID) - if err != nil { - panic(err) - } - return perm -} diff --git a/dashboards/transport/http.go b/dashboards/transport/http.go deleted file mode 100644 index cd7a546104f..00000000000 --- a/dashboards/transport/http.go +++ /dev/null @@ -1,1098 +0,0 @@ -package transport - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "path" - - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/pkg/httpc" - "go.uber.org/zap" -) - -// DashboardHandler is the handler for the dashboard service -type DashboardHandler struct { - chi.Router - - api *kithttp.API - log *zap.Logger - - dashboardService influxdb.DashboardService - labelService influxdb.LabelService - userService influxdb.UserService - orgService influxdb.OrganizationService -} - -const ( - prefixDashboards = "/api/v2/dashboards" -) - -// NewDashboardHandler returns a new instance of DashboardHandler. -func NewDashboardHandler( - log *zap.Logger, - dashboardService influxdb.DashboardService, - labelService influxdb.LabelService, - userService influxdb.UserService, - orgService influxdb.OrganizationService, - urmHandler, labelHandler http.Handler, -) *DashboardHandler { - h := &DashboardHandler{ - log: log, - api: kithttp.NewAPI(kithttp.WithLog(log)), - dashboardService: dashboardService, - labelService: labelService, - userService: userService, - orgService: orgService, - } - - // setup routing - { - r := chi.NewRouter() - r.Use( - middleware.Recoverer, - middleware.RequestID, - middleware.RealIP, - ) - - r.Route("/", func(r chi.Router) { - r.Post("/", h.handlePostDashboard) - r.Get("/", h.handleGetDashboards) - - r.Route("/{id}", func(r chi.Router) { - r.Get("/", h.handleGetDashboard) - r.Patch("/", h.handlePatchDashboard) - r.Delete("/", h.handleDeleteDashboard) - - r.Route("/cells", func(r chi.Router) { - r.Put("/", h.handlePutDashboardCells) - r.Post("/", h.handlePostDashboardCell) - - r.Route("/{cellID}", func(r chi.Router) { - r.Delete("/", h.handleDeleteDashboardCell) - r.Patch("/", h.handlePatchDashboardCell) - - r.Route("/view", func(r chi.Router) { - r.Get("/", h.handleGetDashboardCellView) - r.Patch("/", h.handlePatchDashboardCellView) - }) - }) - }) - - // mount embedded resources - mountableRouter := r.With(kithttp.ValidResource(h.api, h.lookupOrgByDashboardID)) - mountableRouter.Mount("/members", urmHandler) - mountableRouter.Mount("/owners", urmHandler) - mountableRouter.Mount("/labels", labelHandler) - }) - }) - - h.Router = r - } - - return h -} - -// Prefix returns the mounting prefix for the handler -func (h *DashboardHandler) Prefix() string { - return prefixDashboards -} - -type dashboardLinks struct { - Self string `json:"self"` - Members string `json:"members"` - Owners string `json:"owners"` - Cells string `json:"cells"` - Labels string `json:"labels"` - Organization string `json:"org"` -} - -type dashboardResponse struct { - ID platform.ID `json:"id,omitempty"` - OrganizationID platform.ID `json:"orgID,omitempty"` - Name string `json:"name"` - Description string `json:"description"` - Meta influxdb.DashboardMeta `json:"meta"` - Cells []dashboardCellResponse `json:"cells"` - Labels []influxdb.Label `json:"labels"` - Links dashboardLinks `json:"links"` -} - -func (d dashboardResponse) toinfluxdb() *influxdb.Dashboard { - var cells []*influxdb.Cell - if len(d.Cells) > 0 { - cells = make([]*influxdb.Cell, len(d.Cells)) - } - for i := range d.Cells { - cells[i] = d.Cells[i].toinfluxdb() - } - return &influxdb.Dashboard{ - ID: d.ID, - OrganizationID: d.OrganizationID, - Name: d.Name, - Description: d.Description, - Meta: d.Meta, - Cells: cells, - } -} - -func newDashboardResponse(d *influxdb.Dashboard, labels []*influxdb.Label) dashboardResponse { - res := dashboardResponse{ - Links: dashboardLinks{ - Self: fmt.Sprintf("/api/v2/dashboards/%s", d.ID), - Members: fmt.Sprintf("/api/v2/dashboards/%s/members", d.ID), - Owners: fmt.Sprintf("/api/v2/dashboards/%s/owners", d.ID), - Cells: fmt.Sprintf("/api/v2/dashboards/%s/cells", d.ID), - Labels: fmt.Sprintf("/api/v2/dashboards/%s/labels", d.ID), - Organization: fmt.Sprintf("/api/v2/orgs/%s", d.OrganizationID), - }, - ID: d.ID, - OrganizationID: d.OrganizationID, - Name: d.Name, - Description: d.Description, - Meta: d.Meta, - Labels: []influxdb.Label{}, - Cells: []dashboardCellResponse{}, - } - - for _, l := range labels { - res.Labels = append(res.Labels, *l) - } - - for _, cell := range d.Cells { - res.Cells = append(res.Cells, newDashboardCellResponse(d.ID, cell)) - } - - return res -} - -type dashboardCellResponse struct { - influxdb.Cell - Properties influxdb.ViewProperties `json:"-"` - Name string `json:"name,omitempty"` - Links map[string]string `json:"links"` -} - -func (d *dashboardCellResponse) MarshalJSON() ([]byte, error) { - r := struct { - influxdb.Cell - Properties json.RawMessage `json:"properties,omitempty"` - Name string `json:"name,omitempty"` - Links map[string]string `json:"links"` - }{ - Cell: d.Cell, - Links: d.Links, - } - - if d.Cell.View != nil { - b, err := influxdb.MarshalViewPropertiesJSON(d.Cell.View.Properties) - if err != nil { - return nil, err - } - r.Properties = b - r.Name = d.Cell.View.Name - } - - return json.Marshal(r) -} - -func (c dashboardCellResponse) toinfluxdb() *influxdb.Cell { - return &c.Cell -} - -func newDashboardCellResponse(dashboardID platform.ID, c *influxdb.Cell) dashboardCellResponse { - resp := dashboardCellResponse{ - Cell: *c, - Links: map[string]string{ - "self": fmt.Sprintf("/api/v2/dashboards/%s/cells/%s", dashboardID, c.ID), - "view": fmt.Sprintf("/api/v2/dashboards/%s/cells/%s/view", dashboardID, c.ID), - }, - } - - if c.View != nil { - resp.Properties = c.View.Properties - resp.Name = c.View.Name - } - return resp -} - -type dashboardCellsResponse struct { - Cells []dashboardCellResponse `json:"cells"` - Links map[string]string `json:"links"` -} - -func newDashboardCellsResponse(dashboardID platform.ID, cs []*influxdb.Cell) dashboardCellsResponse { - res := dashboardCellsResponse{ - Cells: []dashboardCellResponse{}, - Links: map[string]string{ - "self": fmt.Sprintf("/api/v2/dashboards/%s/cells", dashboardID), - }, - } - - for _, cell := range cs { - res.Cells = append(res.Cells, newDashboardCellResponse(dashboardID, cell)) - } - - return res -} - -type viewLinks struct { - Self string `json:"self"` -} - -type dashboardCellViewResponse struct { - influxdb.View - Links viewLinks `json:"links"` -} - -func (r dashboardCellViewResponse) MarshalJSON() ([]byte, error) { - props, err := influxdb.MarshalViewPropertiesJSON(r.Properties) - if err != nil { - return nil, err - } - - return json.Marshal(struct { - influxdb.ViewContents - Links viewLinks `json:"links"` - Properties json.RawMessage `json:"properties"` - }{ - ViewContents: r.ViewContents, - Links: r.Links, - Properties: props, - }) -} - -func newDashboardCellViewResponse(dashID, cellID platform.ID, v *influxdb.View) dashboardCellViewResponse { - return dashboardCellViewResponse{ - Links: viewLinks{ - Self: fmt.Sprintf("/api/v2/dashboards/%s/cells/%s", dashID, cellID), - }, - View: *v, - } -} - -// handleGetDashboards returns all dashboards within the store. -func (h *DashboardHandler) handleGetDashboards(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeGetDashboardsRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - - dashboardFilter := req.filter - - if dashboardFilter.Organization != nil { - orgNameFilter := influxdb.OrganizationFilter{Name: dashboardFilter.Organization} - o, err := h.orgService.FindOrganization(ctx, orgNameFilter) - if err != nil { - h.api.Err(w, r, err) - return - } - dashboardFilter.OrganizationID = &o.ID - } - - dashboards, _, err := h.dashboardService.FindDashboards(ctx, dashboardFilter, req.opts) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.log.Debug("List Dashboards", zap.String("dashboards", fmt.Sprint(dashboards))) - - h.api.Respond(w, r, http.StatusOK, newGetDashboardsResponse(ctx, dashboards, req.filter, req.opts, h.labelService)) -} - -type getDashboardsRequest struct { - filter influxdb.DashboardFilter - opts influxdb.FindOptions -} - -func decodeGetDashboardsRequest(ctx context.Context, r *http.Request) (*getDashboardsRequest, error) { - qp := r.URL.Query() - req := &getDashboardsRequest{} - - opts, err := influxdb.DecodeFindOptions(r) - if err != nil { - return nil, err - } - req.opts = *opts - - initialID := platform.InvalidID() - if ids, ok := qp["id"]; ok { - for _, id := range ids { - i := initialID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - req.filter.IDs = append(req.filter.IDs, &i) - } - } else if ownerID := qp.Get("ownerID"); ownerID != "" { - req.filter.OwnerID = &initialID - if err := req.filter.OwnerID.DecodeFromString(ownerID); err != nil { - return nil, err - } - } else if orgID := qp.Get("orgID"); orgID != "" { - id := platform.InvalidID() - if err := id.DecodeFromString(orgID); err != nil { - return nil, err - } - req.filter.OrganizationID = &id - } else if org := qp.Get("org"); org != "" { - req.filter.Organization = &org - } - - return req, nil -} - -type getDashboardsResponse struct { - Links *influxdb.PagingLinks `json:"links"` - Dashboards []dashboardResponse `json:"dashboards"` -} - -func (d getDashboardsResponse) toinfluxdb() []*influxdb.Dashboard { - res := make([]*influxdb.Dashboard, len(d.Dashboards)) - for i := range d.Dashboards { - res[i] = d.Dashboards[i].toinfluxdb() - } - return res -} - -func newGetDashboardsResponse(ctx context.Context, dashboards []*influxdb.Dashboard, filter influxdb.DashboardFilter, opts influxdb.FindOptions, labelService influxdb.LabelService) getDashboardsResponse { - res := getDashboardsResponse{ - Links: influxdb.NewPagingLinks(prefixDashboards, opts, filter, len(dashboards)), - Dashboards: make([]dashboardResponse, 0, len(dashboards)), - } - - for _, dashboard := range dashboards { - if dashboard != nil { - labels, _ := labelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: dashboard.ID, ResourceType: influxdb.DashboardsResourceType}) - res.Dashboards = append(res.Dashboards, newDashboardResponse(dashboard, labels)) - } - } - - return res -} - -// handlePostDashboard creates a new dashboard. -func (h *DashboardHandler) handlePostDashboard(w http.ResponseWriter, r *http.Request) { - var ( - ctx = r.Context() - d influxdb.Dashboard - ) - - if err := h.api.DecodeJSON(r.Body, &d); err != nil { - h.api.Err(w, r, err) - return - } - - if err := h.dashboardService.CreateDashboard(ctx, &d); err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusCreated, newDashboardResponse(&d, []*influxdb.Label{})) -} - -// handleGetDashboard retrieves a dashboard by ID. -func (h *DashboardHandler) handleGetDashboard(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeGetDashboardRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - - dashboard, err := h.dashboardService.FindDashboardByID(ctx, req.DashboardID) - if err != nil { - h.api.Err(w, r, err) - return - } - - if r.URL.Query().Get("include") == "properties" { - for _, c := range dashboard.Cells { - view, err := h.dashboardService.GetDashboardCellView(ctx, dashboard.ID, c.ID) - if err != nil { - h.api.Err(w, r, err) - return - } - - if view != nil { - c.View = view - } - } - } - - labels, err := h.labelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: dashboard.ID, ResourceType: influxdb.DashboardsResourceType}) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.log.Debug("Get Dashboard", zap.String("dashboard", fmt.Sprint(dashboard))) - - h.api.Respond(w, r, http.StatusOK, newDashboardResponse(dashboard, labels)) -} - -type getDashboardRequest struct { - DashboardID platform.ID -} - -func decodeGetDashboardRequest(ctx context.Context, r *http.Request) (*getDashboardRequest, error) { - id := chi.URLParam(r, "id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - - return &getDashboardRequest{ - DashboardID: i, - }, nil -} - -// handleDeleteDashboard removes a dashboard by ID. -func (h *DashboardHandler) handleDeleteDashboard(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeDeleteDashboardRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - - if err := h.dashboardService.DeleteDashboard(ctx, req.DashboardID); err != nil { - h.api.Err(w, r, err) - return - } - - h.log.Debug("Dashboard deleted", zap.String("dashboardID", req.DashboardID.String())) - - w.WriteHeader(http.StatusNoContent) -} - -type deleteDashboardRequest struct { - DashboardID platform.ID -} - -func decodeDeleteDashboardRequest(ctx context.Context, r *http.Request) (*deleteDashboardRequest, error) { - id := chi.URLParam(r, "id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - - return &deleteDashboardRequest{ - DashboardID: i, - }, nil -} - -// handlePatchDashboard updates a dashboard. -func (h *DashboardHandler) handlePatchDashboard(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePatchDashboardRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - dashboard, err := h.dashboardService.UpdateDashboard(ctx, req.DashboardID, req.Upd) - if err != nil { - h.api.Err(w, r, err) - return - } - - labels, err := h.labelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: dashboard.ID, ResourceType: influxdb.DashboardsResourceType}) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.log.Debug("Dashboard updated", zap.String("dashboard", fmt.Sprint(dashboard))) - - h.api.Respond(w, r, http.StatusOK, newDashboardResponse(dashboard, labels)) -} - -type patchDashboardRequest struct { - DashboardID platform.ID - Upd influxdb.DashboardUpdate -} - -func decodePatchDashboardRequest(ctx context.Context, r *http.Request) (*patchDashboardRequest, error) { - req := &patchDashboardRequest{} - upd := influxdb.DashboardUpdate{} - if err := json.NewDecoder(r.Body).Decode(&upd); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - req.Upd = upd - - id := chi.URLParam(r, "id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - - req.DashboardID = i - - if err := req.Valid(); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - return req, nil -} - -// Valid validates that the dashboard ID is non zero valued and update has expected values set. -func (r *patchDashboardRequest) Valid() error { - if !r.DashboardID.Valid() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "missing dashboard ID", - } - } - - if pe := r.Upd.Valid(); pe != nil { - return pe - } - return nil -} - -type postDashboardCellRequest struct { - dashboardID platform.ID - *influxdb.CellProperty - UsingView *platform.ID `json:"usingView"` - Name *string `json:"name"` -} - -func decodePostDashboardCellRequest(ctx context.Context, r *http.Request) (*postDashboardCellRequest, error) { - req := &postDashboardCellRequest{} - id := chi.URLParam(r, "id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - if err := json.NewDecoder(r.Body).Decode(req); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "bad request json body", - Err: err, - } - } - - if err := req.dashboardID.DecodeFromString(id); err != nil { - return nil, err - } - - return req, nil -} - -// handlePostDashboardCell creates a dashboard cell. -func (h *DashboardHandler) handlePostDashboardCell(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePostDashboardCellRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - cell := new(influxdb.Cell) - - opts := new(influxdb.AddDashboardCellOptions) - if req.UsingView != nil || req.Name != nil { - opts.View = new(influxdb.View) - if req.UsingView != nil { - // load the view - opts.View, err = h.dashboardService.GetDashboardCellView(ctx, req.dashboardID, *req.UsingView) - if err != nil { - h.api.Err(w, r, err) - return - } - } - if req.Name != nil { - opts.View.Name = *req.Name - } - } else if req.CellProperty == nil { - h.api.Err(w, r, &errors.Error{ - Code: errors.EInvalid, - Msg: "req body is empty", - }) - return - } - - if req.CellProperty != nil { - cell.CellProperty = *req.CellProperty - } - - if err := h.dashboardService.AddDashboardCell(ctx, req.dashboardID, cell, *opts); err != nil { - h.api.Err(w, r, err) - return - } - - h.log.Debug("Dashboard cell created", zap.String("dashboardID", req.dashboardID.String()), zap.String("cell", fmt.Sprint(cell))) - - h.api.Respond(w, r, http.StatusCreated, newDashboardCellResponse(req.dashboardID, cell)) -} - -type putDashboardCellRequest struct { - dashboardID platform.ID - cells []*influxdb.Cell -} - -func decodePutDashboardCellRequest(ctx context.Context, r *http.Request) (*putDashboardCellRequest, error) { - req := &putDashboardCellRequest{} - - id := chi.URLParam(r, "id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - if err := req.dashboardID.DecodeFromString(id); err != nil { - return nil, err - } - - req.cells = []*influxdb.Cell{} - if err := json.NewDecoder(r.Body).Decode(&req.cells); err != nil { - return nil, err - } - - return req, nil -} - -// handlePutDashboardCells replaces a dashboards cells. -func (h *DashboardHandler) handlePutDashboardCells(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePutDashboardCellRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - - if err := h.dashboardService.ReplaceDashboardCells(ctx, req.dashboardID, req.cells); err != nil { - h.api.Err(w, r, err) - return - } - - h.log.Debug("Dashboard cell replaced", zap.String("dashboardID", req.dashboardID.String()), zap.String("cells", fmt.Sprint(req.cells))) - - h.api.Respond(w, r, http.StatusCreated, newDashboardCellsResponse(req.dashboardID, req.cells)) -} - -type deleteDashboardCellRequest struct { - dashboardID platform.ID - cellID platform.ID -} - -func decodeDeleteDashboardCellRequest(ctx context.Context, r *http.Request) (*deleteDashboardCellRequest, error) { - req := &deleteDashboardCellRequest{} - - id := chi.URLParam(r, "id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - if err := req.dashboardID.DecodeFromString(id); err != nil { - return nil, err - } - - cellID := chi.URLParam(r, "cellID") - if cellID == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing cellID", - } - } - if err := req.cellID.DecodeFromString(cellID); err != nil { - return nil, err - } - - return req, nil -} - -type getDashboardCellViewRequest struct { - dashboardID platform.ID - cellID platform.ID -} - -func decodeGetDashboardCellViewRequest(ctx context.Context, r *http.Request) (*getDashboardCellViewRequest, error) { - req := &getDashboardCellViewRequest{} - - id := chi.URLParam(r, "id") - if id == "" { - return nil, errors.NewError(errors.WithErrorMsg("url missing id"), errors.WithErrorCode(errors.EInvalid)) - } - if err := req.dashboardID.DecodeFromString(id); err != nil { - return nil, err - } - - cellID := chi.URLParam(r, "cellID") - if cellID == "" { - return nil, errors.NewError(errors.WithErrorMsg("url missing cellID"), errors.WithErrorCode(errors.EInvalid)) - } - if err := req.cellID.DecodeFromString(cellID); err != nil { - return nil, err - } - - return req, nil -} - -func (h *DashboardHandler) handleGetDashboardCellView(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeGetDashboardCellViewRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - - view, err := h.dashboardService.GetDashboardCellView(ctx, req.dashboardID, req.cellID) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.log.Debug("Dashboard cell view retrieved", zap.String("dashboardID", req.dashboardID.String()), zap.String("cellID", req.cellID.String()), zap.String("view", fmt.Sprint(view))) - - h.api.Respond(w, r, http.StatusOK, newDashboardCellViewResponse(req.dashboardID, req.cellID, view)) -} - -type patchDashboardCellViewRequest struct { - dashboardID platform.ID - cellID platform.ID - upd influxdb.ViewUpdate -} - -func decodePatchDashboardCellViewRequest(ctx context.Context, r *http.Request) (*patchDashboardCellViewRequest, error) { - req := &patchDashboardCellViewRequest{} - - id := chi.URLParam(r, "id") - if id == "" { - return nil, errors.NewError(errors.WithErrorMsg("url missing id"), errors.WithErrorCode(errors.EInvalid)) - } - if err := req.dashboardID.DecodeFromString(id); err != nil { - return nil, err - } - - cellID := chi.URLParam(r, "cellID") - if cellID == "" { - return nil, errors.NewError(errors.WithErrorMsg("url missing cellID"), errors.WithErrorCode(errors.EInvalid)) - } - if err := req.cellID.DecodeFromString(cellID); err != nil { - return nil, err - } - - if err := json.NewDecoder(r.Body).Decode(&req.upd); err != nil { - return nil, err - } - - return req, nil -} - -func (h *DashboardHandler) handlePatchDashboardCellView(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePatchDashboardCellViewRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - - view, err := h.dashboardService.UpdateDashboardCellView(ctx, req.dashboardID, req.cellID, req.upd) - if err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Dashboard cell view updated", zap.String("dashboardID", req.dashboardID.String()), zap.String("cellID", req.cellID.String()), zap.String("view", fmt.Sprint(view))) - - h.api.Respond(w, r, http.StatusOK, newDashboardCellViewResponse(req.dashboardID, req.cellID, view)) -} - -// handleDeleteDashboardCell deletes a dashboard cell. -func (h *DashboardHandler) handleDeleteDashboardCell(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeDeleteDashboardCellRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - if err := h.dashboardService.RemoveDashboardCell(ctx, req.dashboardID, req.cellID); err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Dashboard cell deleted", zap.String("dashboardID", req.dashboardID.String()), zap.String("cellID", req.cellID.String())) - - w.WriteHeader(http.StatusNoContent) -} - -type patchDashboardCellRequest struct { - dashboardID platform.ID - cellID platform.ID - upd influxdb.CellUpdate -} - -func decodePatchDashboardCellRequest(ctx context.Context, r *http.Request) (*patchDashboardCellRequest, error) { - req := &patchDashboardCellRequest{} - - id := chi.URLParam(r, "id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - if err := req.dashboardID.DecodeFromString(id); err != nil { - return nil, err - } - - cellID := chi.URLParam(r, "cellID") - if cellID == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "cannot provide empty cell id", - } - } - if err := req.cellID.DecodeFromString(cellID); err != nil { - return nil, err - } - - if err := json.NewDecoder(r.Body).Decode(&req.upd); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - if pe := req.upd.Valid(); pe != nil { - return nil, pe - } - - return req, nil -} - -// handlePatchDashboardCell updates a dashboard cell. -func (h *DashboardHandler) handlePatchDashboardCell(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePatchDashboardCellRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - cell, err := h.dashboardService.UpdateDashboardCell(ctx, req.dashboardID, req.cellID, req.upd) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.log.Debug("Dashboard cell updated", zap.String("dashboardID", req.dashboardID.String()), zap.String("cell", fmt.Sprint(cell))) - - h.api.Respond(w, r, http.StatusOK, newDashboardCellResponse(req.dashboardID, cell)) -} - -func (h *DashboardHandler) lookupOrgByDashboardID(ctx context.Context, id platform.ID) (platform.ID, error) { - d, err := h.dashboardService.FindDashboardByID(ctx, id) - if err != nil { - return 0, err - } - return d.OrganizationID, nil -} - -// DashboardService is a dashboard service over HTTP to the influxdb server. -type DashboardService struct { - Client *httpc.Client -} - -// FindDashboardByID returns a single dashboard by ID. -func (s *DashboardService) FindDashboardByID(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { - var dr dashboardResponse - err := s.Client. - Get(prefixDashboards, id.String()). - QueryParams([2]string{"include", "properties"}). - DecodeJSON(&dr). - Do(ctx) - if err != nil { - return nil, err - } - return dr.toinfluxdb(), nil -} - -// FindDashboards returns a list of dashboards that match filter and the total count of matching dashboards. -// Additional options provide pagination & sorting. -func (s *DashboardService) FindDashboards(ctx context.Context, filter influxdb.DashboardFilter, opts influxdb.FindOptions) ([]*influxdb.Dashboard, int, error) { - queryPairs := influxdb.FindOptionParams(opts) - for _, id := range filter.IDs { - queryPairs = append(queryPairs, [2]string{"id", id.String()}) - } - if filter.OrganizationID != nil { - queryPairs = append(queryPairs, [2]string{"orgID", filter.OrganizationID.String()}) - } - if filter.Organization != nil { - queryPairs = append(queryPairs, [2]string{"org", *filter.Organization}) - } - - var dr getDashboardsResponse - err := s.Client. - Get(prefixDashboards). - QueryParams(queryPairs...). - DecodeJSON(&dr). - Do(ctx) - if err != nil { - return nil, 0, err - } - - dashboards := dr.toinfluxdb() - return dashboards, len(dashboards), nil -} - -// CreateDashboard creates a new dashboard and sets b.ID with the new identifier. -func (s *DashboardService) CreateDashboard(ctx context.Context, d *influxdb.Dashboard) error { - return s.Client. - PostJSON(d, prefixDashboards). - DecodeJSON(d). - Do(ctx) -} - -// UpdateDashboard updates a single dashboard with changeset. -// Returns the new dashboard state after update. -func (s *DashboardService) UpdateDashboard(ctx context.Context, id platform.ID, upd influxdb.DashboardUpdate) (*influxdb.Dashboard, error) { - var d influxdb.Dashboard - err := s.Client. - PatchJSON(upd, prefixDashboards, id.String()). - DecodeJSON(&d). - Do(ctx) - if err != nil { - return nil, err - } - - if len(d.Cells) == 0 { - // TODO(@jsteenb2): decipher why this is doing this? - d.Cells = nil - } - - return &d, nil -} - -// DeleteDashboard removes a dashboard by ID. -func (s *DashboardService) DeleteDashboard(ctx context.Context, id platform.ID) error { - return s.Client. - Delete(dashboardIDPath(id)). - Do(ctx) -} - -// AddDashboardCell adds a cell to a dashboard. -func (s *DashboardService) AddDashboardCell(ctx context.Context, id platform.ID, c *influxdb.Cell, opts influxdb.AddDashboardCellOptions) error { - return s.Client. - PostJSON(c, cellPath(id)). - DecodeJSON(c). - Do(ctx) -} - -// RemoveDashboardCell removes a dashboard. -func (s *DashboardService) RemoveDashboardCell(ctx context.Context, dashboardID, cellID platform.ID) error { - return s.Client. - Delete(dashboardCellIDPath(dashboardID, cellID)). - Do(ctx) -} - -// UpdateDashboardCell replaces the dashboard cell with the provided ID. -func (s *DashboardService) UpdateDashboardCell(ctx context.Context, dashboardID, cellID platform.ID, upd influxdb.CellUpdate) (*influxdb.Cell, error) { - if err := upd.Valid(); err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - var c influxdb.Cell - err := s.Client. - PatchJSON(upd, dashboardCellIDPath(dashboardID, cellID)). - DecodeJSON(&c). - Do(ctx) - if err != nil { - return nil, err - } - - return &c, nil -} - -// GetDashboardCellView retrieves the view for a dashboard cell. -func (s *DashboardService) GetDashboardCellView(ctx context.Context, dashboardID, cellID platform.ID) (*influxdb.View, error) { - var dcv dashboardCellViewResponse - err := s.Client. - Get(cellViewPath(dashboardID, cellID)). - DecodeJSON(&dcv). - Do(ctx) - if err != nil { - return nil, err - } - - return &dcv.View, nil -} - -// UpdateDashboardCellView updates the view for a dashboard cell. -func (s *DashboardService) UpdateDashboardCellView(ctx context.Context, dashboardID, cellID platform.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) { - var dcv dashboardCellViewResponse - err := s.Client. - PatchJSON(upd, cellViewPath(dashboardID, cellID)). - DecodeJSON(&dcv). - Do(ctx) - if err != nil { - return nil, err - } - return &dcv.View, nil -} - -// ReplaceDashboardCells replaces all cells in a dashboard -func (s *DashboardService) ReplaceDashboardCells(ctx context.Context, id platform.ID, cs []*influxdb.Cell) error { - return s.Client. - PutJSON(cs, cellPath(id)). - // TODO: previous implementation did not do anything with the response except validate it is valid json. - // seems likely we should have to overwrite (:sadpanda:) the incoming cs... - DecodeJSON(&dashboardCellsResponse{}). - Do(ctx) -} - -func dashboardIDPath(id platform.ID) string { - return path.Join(prefixDashboards, id.String()) -} - -func cellPath(id platform.ID) string { - return path.Join(dashboardIDPath(id), "cells") -} - -func cellViewPath(dashboardID, cellID platform.ID) string { - return path.Join(dashboardCellIDPath(dashboardID, cellID), "view") -} - -func dashboardCellIDPath(id platform.ID, cellID platform.ID) string { - return path.Join(cellPath(id), cellID.String()) -} diff --git a/dashboards/transport/http_test.go b/dashboards/transport/http_test.go deleted file mode 100644 index 50eb7fce218..00000000000 --- a/dashboards/transport/http_test.go +++ /dev/null @@ -1,2000 +0,0 @@ -package transport - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "strconv" - "testing" - "time" - - "github.com/go-chi/chi" - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/dashboards" - dashboardstesting "github.com/influxdata/influxdb/v2/dashboards/testing" - ihttp "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/label" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/tenant" - itesting "github.com/influxdata/influxdb/v2/testing" - "github.com/yudai/gojsondiff" - "github.com/yudai/gojsondiff/formatter" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" -) - -func newDashboardHandler(log *zap.Logger, opts ...option) *DashboardHandler { - deps := dashboardDependencies{ - dashboardService: mock.NewDashboardService(), - userService: mock.NewUserService(), - orgService: mock.NewOrganizationService(), - labelService: mock.NewLabelService(), - urmService: mock.NewUserResourceMappingService(), - } - - for _, opt := range opts { - opt(&deps) - } - - return NewDashboardHandler( - log, - deps.dashboardService, - deps.labelService, - deps.userService, - deps.orgService, - tenant.NewURMHandler( - log.With(zap.String("handler", "urm")), - influxdb.DashboardsResourceType, - "id", - deps.userService, - deps.urmService, - ), - label.NewHTTPEmbeddedHandler( - log.With(zap.String("handler", "label")), - influxdb.DashboardsResourceType, - deps.labelService, - ), - ) -} - -func TestService_handleGetDashboards(t *testing.T) { - type fields struct { - DashboardService influxdb.DashboardService - LabelService influxdb.LabelService - } - type args struct { - queryParams map[string][]string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get all dashboards", - fields: fields{ - &mock.DashboardService{ - FindDashboardsF: func(ctx context.Context, filter influxdb.DashboardFilter, opts influxdb.FindOptions) ([]*influxdb.Dashboard, int, error) { - return []*influxdb.Dashboard{ - { - ID: dashboardstesting.MustIDBase16("da7aba5e5d81e550"), - OrganizationID: 1, - Name: "hello", - Description: "oh hello there!", - Meta: influxdb.DashboardMeta{ - CreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), - UpdatedAt: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Cells: []*influxdb.Cell{ - { - ID: dashboardstesting.MustIDBase16("da7aba5e5d81e550"), - CellProperty: influxdb.CellProperty{ - X: 1, - Y: 2, - W: 3, - H: 4, - }, - }, - }, - }, - { - ID: dashboardstesting.MustIDBase16("0ca2204eca2204e0"), - OrganizationID: 1, - Meta: influxdb.DashboardMeta{ - CreatedAt: time.Date(2012, time.November, 10, 23, 0, 0, 0, time.UTC), - UpdatedAt: time.Date(2012, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Name: "example", - }, - }, 2, nil - }, - }, - &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - labels := []*influxdb.Label{ - { - ID: dashboardstesting.MustIDBase16("fc3dc670a4be9b9a"), - Name: "label", - Properties: map[string]string{ - "color": "fff000", - }, - }, - } - return labels, nil - }, - }, - }, - args: args{}, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/dashboards?descending=false&limit=` + strconv.Itoa(influxdb.DefaultPageSize) + `&offset=0" - }, - "dashboards": [ - { - "id": "da7aba5e5d81e550", - "orgID": "0000000000000001", - "name": "hello", - "description": "oh hello there!", - "labels": [ - { - "id": "fc3dc670a4be9b9a", - "name": "label", - "properties": { - "color": "fff000" - } - } - ], - "meta": { - "createdAt": "2009-11-10T23:00:00Z", - "updatedAt": "2009-11-11T00:00:00Z" - }, - "cells": [ - { - "id": "da7aba5e5d81e550", - "x": 1, - "y": 2, - "w": 3, - "h": 4, - "links": { - "self": "/api/v2/dashboards/da7aba5e5d81e550/cells/da7aba5e5d81e550", - "view": "/api/v2/dashboards/da7aba5e5d81e550/cells/da7aba5e5d81e550/view" - } - } - ], - "links": { - "self": "/api/v2/dashboards/da7aba5e5d81e550", - "org": "/api/v2/orgs/0000000000000001", - "members": "/api/v2/dashboards/da7aba5e5d81e550/members", - "owners": "/api/v2/dashboards/da7aba5e5d81e550/owners", - "cells": "/api/v2/dashboards/da7aba5e5d81e550/cells", - "labels": "/api/v2/dashboards/da7aba5e5d81e550/labels" - } - }, - { - "id": "0ca2204eca2204e0", - "orgID": "0000000000000001", - "name": "example", - "description": "", - "labels": [ - { - "id": "fc3dc670a4be9b9a", - "name": "label", - "properties": { - "color": "fff000" - } - } - ], - "meta": { - "createdAt": "2012-11-10T23:00:00Z", - "updatedAt": "2012-11-11T00:00:00Z" - }, - "cells": [], - "links": { - "self": "/api/v2/dashboards/0ca2204eca2204e0", - "org": "/api/v2/orgs/0000000000000001", - "members": "/api/v2/dashboards/0ca2204eca2204e0/members", - "owners": "/api/v2/dashboards/0ca2204eca2204e0/owners", - "cells": "/api/v2/dashboards/0ca2204eca2204e0/cells", - "labels": "/api/v2/dashboards/0ca2204eca2204e0/labels" - } - } - ] -} -`, - }, - }, - { - name: "get all dashboards when there are none", - fields: fields{ - &mock.DashboardService{ - FindDashboardsF: func(ctx context.Context, filter influxdb.DashboardFilter, opts influxdb.FindOptions) ([]*influxdb.Dashboard, int, error) { - return []*influxdb.Dashboard{}, 0, nil - }, - }, - &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - return []*influxdb.Label{}, nil - }, - }, - }, - args: args{}, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/dashboards?descending=false&limit=` + strconv.Itoa(influxdb.DefaultPageSize) + `&offset=0" - }, - "dashboards": [] -}`, - }, - }, - { - name: "get all dashboards belonging to org 1", - fields: fields{ - &mock.DashboardService{ - FindDashboardsF: func(ctx context.Context, filter influxdb.DashboardFilter, opts influxdb.FindOptions) ([]*influxdb.Dashboard, int, error) { - return []*influxdb.Dashboard{ - { - ID: dashboardstesting.MustIDBase16("da7aba5e5d81e550"), - OrganizationID: 1, - Name: "hello", - Description: "oh hello there!", - Meta: influxdb.DashboardMeta{ - CreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), - UpdatedAt: time.Date(2009, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Cells: []*influxdb.Cell{ - { - ID: dashboardstesting.MustIDBase16("da7aba5e5d81e550"), - CellProperty: influxdb.CellProperty{ - X: 1, - Y: 2, - W: 3, - H: 4, - }, - }, - }, - }, - }, 1, nil - }, - }, - &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - labels := []*influxdb.Label{ - { - ID: dashboardstesting.MustIDBase16("fc3dc670a4be9b9a"), - Name: "label", - Properties: map[string]string{ - "color": "fff000", - }, - }, - } - return labels, nil - }, - }, - }, - args: args{ - map[string][]string{ - "orgID": {"0000000000000001"}, - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/dashboards?descending=false&limit=` + strconv.Itoa(influxdb.DefaultPageSize) + `&offset=0&orgID=0000000000000001" - }, - "dashboards": [ - { - "id": "da7aba5e5d81e550", - "orgID": "0000000000000001", - "name": "hello", - "description": "oh hello there!", - "meta": { - "createdAt": "2009-11-10T23:00:00Z", - "updatedAt": "2009-11-11T00:00:00Z" - }, - "labels": [ - { - "id": "fc3dc670a4be9b9a", - "name": "label", - "properties": { - "color": "fff000" - } - } - ], - "cells": [ - { - "id": "da7aba5e5d81e550", - "x": 1, - "y": 2, - "w": 3, - "h": 4, - "links": { - "self": "/api/v2/dashboards/da7aba5e5d81e550/cells/da7aba5e5d81e550", - "view": "/api/v2/dashboards/da7aba5e5d81e550/cells/da7aba5e5d81e550/view" - } - } - ], - "links": { - "self": "/api/v2/dashboards/da7aba5e5d81e550", - "org": "/api/v2/orgs/0000000000000001", - "members": "/api/v2/dashboards/da7aba5e5d81e550/members", - "owners": "/api/v2/dashboards/da7aba5e5d81e550/owners", - "cells": "/api/v2/dashboards/da7aba5e5d81e550/cells", - "labels": "/api/v2/dashboards/da7aba5e5d81e550/labels" - } - } - ] -} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - log := zaptest.NewLogger(t) - h := newDashboardHandler( - log, - withDashboardService(tt.fields.DashboardService), - withLabelService(tt.fields.LabelService), - ) - - r := httptest.NewRequest("GET", "http://any.url", nil) - - qp := r.URL.Query() - for k, vs := range tt.args.queryParams { - for _, v := range vs { - qp.Add(k, v) - } - } - r.URL.RawQuery = qp.Encode() - - w := httptest.NewRecorder() - - h.handleGetDashboards(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetDashboards() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetDashboards() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleGetDashboards(). error unmarshalling json %v", tt.name, err) - } else if tt.wants.body != "" && !eq { - t.Errorf("%q. handleGetDashboards() = ***%s***", tt.name, diff) - } - }) - } -} - -func TestService_handleGetDashboard(t *testing.T) { - type fields struct { - DashboardService influxdb.DashboardService - } - type args struct { - id string - queryString map[string]string - } - type wants struct { - statusCode int - contentType string - body string - } - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get a dashboard by id with view properties", - fields: fields{ - &mock.DashboardService{ - GetDashboardCellViewF: func(ctx context.Context, dashboardID platform.ID, cellID platform.ID) (*influxdb.View, error) { - return &influxdb.View{ViewContents: influxdb.ViewContents{Name: "the cell name"}, Properties: influxdb.XYViewProperties{Type: influxdb.ViewPropertyTypeXY}}, nil - }, - FindDashboardByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { - if id == dashboardstesting.MustIDBase16("020f755c3c082000") { - return &influxdb.Dashboard{ - ID: dashboardstesting.MustIDBase16("020f755c3c082000"), - OrganizationID: 1, - Meta: influxdb.DashboardMeta{ - CreatedAt: time.Date(2012, time.November, 10, 23, 0, 0, 0, time.UTC), - UpdatedAt: time.Date(2012, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Name: "hello", - Cells: []*influxdb.Cell{ - { - ID: dashboardstesting.MustIDBase16("da7aba5e5d81e550"), - CellProperty: influxdb.CellProperty{ - X: 1, - Y: 2, - W: 3, - H: 4, - }, - View: &influxdb.View{ViewContents: influxdb.ViewContents{Name: "the cell name"}, Properties: influxdb.XYViewProperties{Type: influxdb.ViewPropertyTypeXY}}, - }, - }, - }, nil - } - - return nil, fmt.Errorf("not found") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - queryString: map[string]string{ - "include": "properties", - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "id": "020f755c3c082000", - "orgID": "0000000000000001", - "name": "hello", - "description": "", - "labels": [], - "meta": { - "createdAt": "2012-11-10T23:00:00Z", - "updatedAt": "2012-11-11T00:00:00Z" - }, - "cells": [ - { - "id": "da7aba5e5d81e550", - "x": 1, - "y": 2, - "w": 3, - "h": 4, - "name": "the cell name", - "properties": { - "shape": "chronograf-v2", - "axes": null, - "colors": null, - "geom": "", - "staticLegend": {}, - "position": "", - "note": "", - "queries": null, - "shadeBelow": false, - "hoverDimension": "", - "showNoteWhenEmpty": false, - "timeFormat": "", - "type": "xy", - "xColumn": "", - "generateXAxisTicks": null, - "xTotalTicks": 0, - "xTickStart": 0, - "xTickStep": 0, - "yColumn": "", - "generateYAxisTicks": null, - "yTotalTicks": 0, - "yTickStart": 0, - "yTickStep": 0, - "legendColorizeRows": false, - "legendHide": false, - "legendOpacity": 0, - "legendOrientationThreshold": 0 - }, - "links": { - "self": "/api/v2/dashboards/020f755c3c082000/cells/da7aba5e5d81e550", - "view": "/api/v2/dashboards/020f755c3c082000/cells/da7aba5e5d81e550/view" - } - } - ], - "links": { - "self": "/api/v2/dashboards/020f755c3c082000", - "org": "/api/v2/orgs/0000000000000001", - "members": "/api/v2/dashboards/020f755c3c082000/members", - "owners": "/api/v2/dashboards/020f755c3c082000/owners", - "cells": "/api/v2/dashboards/020f755c3c082000/cells", - "labels": "/api/v2/dashboards/020f755c3c082000/labels" - } -} -`, - }, - }, - { - name: "get a dashboard by id with view properties, but a cell doesnt exist", - fields: fields{ - &mock.DashboardService{ - GetDashboardCellViewF: func(ctx context.Context, dashboardID platform.ID, cellID platform.ID) (*influxdb.View, error) { - return nil, nil - }, - FindDashboardByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { - if id == dashboardstesting.MustIDBase16("020f755c3c082000") { - return &influxdb.Dashboard{ - ID: dashboardstesting.MustIDBase16("020f755c3c082000"), - OrganizationID: 1, - Meta: influxdb.DashboardMeta{ - CreatedAt: time.Date(2012, time.November, 10, 23, 0, 0, 0, time.UTC), - UpdatedAt: time.Date(2012, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Name: "hello", - Cells: []*influxdb.Cell{ - { - ID: dashboardstesting.MustIDBase16("da7aba5e5d81e550"), - CellProperty: influxdb.CellProperty{ - X: 1, - Y: 2, - W: 3, - H: 4, - }, - }, - }, - }, nil - } - - return nil, fmt.Errorf("not found") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - queryString: map[string]string{ - "include": "properties", - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "id": "020f755c3c082000", - "orgID": "0000000000000001", - "name": "hello", - "description": "", - "labels": [], - "meta": { - "createdAt": "2012-11-10T23:00:00Z", - "updatedAt": "2012-11-11T00:00:00Z" - }, - "cells": [ - { - "id": "da7aba5e5d81e550", - "x": 1, - "y": 2, - "w": 3, - "h": 4, - "links": { - "self": "/api/v2/dashboards/020f755c3c082000/cells/da7aba5e5d81e550", - "view": "/api/v2/dashboards/020f755c3c082000/cells/da7aba5e5d81e550/view" - } - } - ], - "links": { - "self": "/api/v2/dashboards/020f755c3c082000", - "org": "/api/v2/orgs/0000000000000001", - "members": "/api/v2/dashboards/020f755c3c082000/members", - "owners": "/api/v2/dashboards/020f755c3c082000/owners", - "cells": "/api/v2/dashboards/020f755c3c082000/cells", - "labels": "/api/v2/dashboards/020f755c3c082000/labels" - } -} -`, - }, - }, - { - name: "get a dashboard by id doesnt return cell properties if they exist by default", - fields: fields{ - &mock.DashboardService{ - GetDashboardCellViewF: func(ctx context.Context, dashboardID platform.ID, cellID platform.ID) (*influxdb.View, error) { - return &influxdb.View{ViewContents: influxdb.ViewContents{Name: "the cell name"}, Properties: influxdb.XYViewProperties{Type: influxdb.ViewPropertyTypeXY}}, nil - }, - FindDashboardByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { - if id == dashboardstesting.MustIDBase16("020f755c3c082000") { - return &influxdb.Dashboard{ - ID: dashboardstesting.MustIDBase16("020f755c3c082000"), - OrganizationID: 1, - Meta: influxdb.DashboardMeta{ - CreatedAt: time.Date(2012, time.November, 10, 23, 0, 0, 0, time.UTC), - UpdatedAt: time.Date(2012, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Name: "hello", - Cells: []*influxdb.Cell{ - { - ID: dashboardstesting.MustIDBase16("da7aba5e5d81e550"), - CellProperty: influxdb.CellProperty{ - X: 1, - Y: 2, - W: 3, - H: 4, - }, - }, - }, - }, nil - } - - return nil, fmt.Errorf("not found") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - queryString: map[string]string{}, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "id": "020f755c3c082000", - "orgID": "0000000000000001", - "name": "hello", - "description": "", - "labels": [], - "meta": { - "createdAt": "2012-11-10T23:00:00Z", - "updatedAt": "2012-11-11T00:00:00Z" - }, - "cells": [ - { - "id": "da7aba5e5d81e550", - "x": 1, - "y": 2, - "w": 3, - "h": 4, - "links": { - "self": "/api/v2/dashboards/020f755c3c082000/cells/da7aba5e5d81e550", - "view": "/api/v2/dashboards/020f755c3c082000/cells/da7aba5e5d81e550/view" - } - } - ], - "links": { - "self": "/api/v2/dashboards/020f755c3c082000", - "org": "/api/v2/orgs/0000000000000001", - "members": "/api/v2/dashboards/020f755c3c082000/members", - "owners": "/api/v2/dashboards/020f755c3c082000/owners", - "cells": "/api/v2/dashboards/020f755c3c082000/cells", - "labels": "/api/v2/dashboards/020f755c3c082000/labels" - } -} -`, - }, - }, - { - name: "get a dashboard by id", - fields: fields{ - &mock.DashboardService{ - FindDashboardByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { - if id == dashboardstesting.MustIDBase16("020f755c3c082000") { - return &influxdb.Dashboard{ - ID: dashboardstesting.MustIDBase16("020f755c3c082000"), - OrganizationID: 1, - Meta: influxdb.DashboardMeta{ - CreatedAt: time.Date(2012, time.November, 10, 23, 0, 0, 0, time.UTC), - UpdatedAt: time.Date(2012, time.November, 10, 24, 0, 0, 0, time.UTC), - }, - Name: "hello", - Cells: []*influxdb.Cell{ - { - ID: dashboardstesting.MustIDBase16("da7aba5e5d81e550"), - CellProperty: influxdb.CellProperty{ - X: 1, - Y: 2, - W: 3, - H: 4, - }, - }, - }, - }, nil - } - - return nil, fmt.Errorf("not found") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` - { - "id": "020f755c3c082000", - "orgID": "0000000000000001", - "name": "hello", - "description": "", - "labels": [], - "meta": { - "createdAt": "2012-11-10T23:00:00Z", - "updatedAt": "2012-11-11T00:00:00Z" - }, - "cells": [ - { - "id": "da7aba5e5d81e550", - "x": 1, - "y": 2, - "w": 3, - "h": 4, - "links": { - "self": "/api/v2/dashboards/020f755c3c082000/cells/da7aba5e5d81e550", - "view": "/api/v2/dashboards/020f755c3c082000/cells/da7aba5e5d81e550/view" - } - } - ], - "links": { - "self": "/api/v2/dashboards/020f755c3c082000", - "org": "/api/v2/orgs/0000000000000001", - "members": "/api/v2/dashboards/020f755c3c082000/members", - "owners": "/api/v2/dashboards/020f755c3c082000/owners", - "cells": "/api/v2/dashboards/020f755c3c082000/cells", - "labels": "/api/v2/dashboards/020f755c3c082000/labels" - } - } - `, - }, - }, - { - name: "not found", - fields: fields{ - &mock.DashboardService{ - FindDashboardByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Dashboard, error) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrDashboardNotFound, - } - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := newDashboardHandler( - zaptest.NewLogger(t), - withDashboardService(tt.fields.DashboardService), - ) - - r := httptest.NewRequest("GET", "http://any.url", nil) - - urlQuery := r.URL.Query() - - for k, v := range tt.args.queryString { - urlQuery.Add(k, v) - } - - r.URL.RawQuery = urlQuery.Encode() - - rctx := chi.NewRouteContext() - rctx.URLParams.Add("id", tt.args.id) - r = r.WithContext(context.WithValue( - context.Background(), - chi.RouteCtxKey, - rctx), - ) - - w := httptest.NewRecorder() - - h.handleGetDashboard(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetDashboard() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetDashboard() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && err != nil { - t.Errorf("%q, handleGetDashboard(). error unmarshalling json %v", tt.name, err) - } else if tt.wants.body != "" && !eq { - t.Errorf("%q. handleGetDashboard() = ***%s***", tt.name, diff) - } - }) - } -} - -func TestService_handlePostDashboard(t *testing.T) { - type fields struct { - DashboardService influxdb.DashboardService - } - type args struct { - dashboard *influxdb.Dashboard - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "create a new dashboard", - fields: fields{ - &mock.DashboardService{ - CreateDashboardF: func(ctx context.Context, c *influxdb.Dashboard) error { - c.ID = dashboardstesting.MustIDBase16("020f755c3c082000") - c.Meta = influxdb.DashboardMeta{ - CreatedAt: time.Date(2012, time.November, 10, 23, 0, 0, 0, time.UTC), - UpdatedAt: time.Date(2012, time.November, 10, 24, 0, 0, 0, time.UTC), - } - return nil - }, - }, - }, - args: args{ - dashboard: &influxdb.Dashboard{ - ID: dashboardstesting.MustIDBase16("020f755c3c082000"), - OrganizationID: 1, - Name: "hello", - Description: "howdy there", - Cells: []*influxdb.Cell{ - { - ID: dashboardstesting.MustIDBase16("da7aba5e5d81e550"), - CellProperty: influxdb.CellProperty{ - X: 1, - Y: 2, - W: 3, - H: 4, - }, - }, - }, - }, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` - { - "id": "020f755c3c082000", - "orgID": "0000000000000001", - "name": "hello", - "description": "howdy there", - "labels": [], - "meta": { - "createdAt": "2012-11-10T23:00:00Z", - "updatedAt": "2012-11-11T00:00:00Z" - }, - "cells": [ - { - "id": "da7aba5e5d81e550", - "x": 1, - "y": 2, - "w": 3, - "h": 4, - "links": { - "self": "/api/v2/dashboards/020f755c3c082000/cells/da7aba5e5d81e550", - "view": "/api/v2/dashboards/020f755c3c082000/cells/da7aba5e5d81e550/view" - } - } - ], - "links": { - "self": "/api/v2/dashboards/020f755c3c082000", - "org": "/api/v2/orgs/0000000000000001", - "members": "/api/v2/dashboards/020f755c3c082000/members", - "owners": "/api/v2/dashboards/020f755c3c082000/owners", - "cells": "/api/v2/dashboards/020f755c3c082000/cells", - "labels": "/api/v2/dashboards/020f755c3c082000/labels" - } - }`, - }, - }, - { - name: "create a new dashboard with cell view properties", - fields: fields{ - &mock.DashboardService{ - CreateDashboardF: func(ctx context.Context, c *influxdb.Dashboard) error { - c.ID = dashboardstesting.MustIDBase16("020f755c3c082000") - c.Meta = influxdb.DashboardMeta{ - CreatedAt: time.Date(2012, time.November, 10, 23, 0, 0, 0, time.UTC), - UpdatedAt: time.Date(2012, time.November, 10, 24, 0, 0, 0, time.UTC), - } - c.Cells = []*influxdb.Cell{ - { - ID: dashboardstesting.MustIDBase16("da7aba5e5d81e550"), - CellProperty: influxdb.CellProperty{ - X: 1, - Y: 2, - W: 3, - H: 4, - }, - View: &influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "hello a view", - }, - Properties: influxdb.XYViewProperties{ - Type: influxdb.ViewPropertyTypeXY, - Note: "note", - }, - }, - }, - } - return nil - }, - }, - }, - args: args{ - dashboard: &influxdb.Dashboard{ - ID: dashboardstesting.MustIDBase16("020f755c3c082000"), - OrganizationID: 1, - Name: "hello", - Description: "howdy there", - Cells: []*influxdb.Cell{ - { - ID: dashboardstesting.MustIDBase16("da7aba5e5d81e550"), - CellProperty: influxdb.CellProperty{ - X: 1, - Y: 2, - W: 3, - H: 4, - }, - View: &influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "hello a view", - }, - Properties: struct { - influxdb.XYViewProperties - Shape string - }{ - XYViewProperties: influxdb.XYViewProperties{ - Note: "note", - Type: influxdb.ViewPropertyTypeXY, - }, - Shape: "chronograf-v2", - }, - }, - }, - }, - }, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` - { - "id": "020f755c3c082000", - "orgID": "0000000000000001", - "name": "hello", - "description": "howdy there", - "labels": [], - "meta": { - "createdAt": "2012-11-10T23:00:00Z", - "updatedAt": "2012-11-11T00:00:00Z" - }, - "cells": [ - { - "id": "da7aba5e5d81e550", - "x": 1, - "y": 2, - "w": 3, - "h": 4, - "name": "hello a view", - "properties": { - "shape": "chronograf-v2", - "axes": null, - "colors": null, - "geom": "", - "staticLegend": {}, - "note": "note", - "position": "", - "queries": null, - "shadeBelow": false, - "hoverDimension": "", - "showNoteWhenEmpty": false, - "timeFormat": "", - "type": "", - "xColumn": "", - "generateXAxisTicks": null, - "xTotalTicks": 0, - "xTickStart": 0, - "xTickStep": 0, - "yColumn": "", - "generateYAxisTicks": null, - "yTotalTicks": 0, - "yTickStart": 0, - "yTickStep": 0, - "type": "xy", - "legendColorizeRows": false, - "legendHide": false, - "legendOpacity": 0, - "legendOrientationThreshold": 0 - }, - "links": { - "self": "/api/v2/dashboards/020f755c3c082000/cells/da7aba5e5d81e550", - "view": "/api/v2/dashboards/020f755c3c082000/cells/da7aba5e5d81e550/view" - } - } - ], - "links": { - "self": "/api/v2/dashboards/020f755c3c082000", - "org": "/api/v2/orgs/0000000000000001", - "members": "/api/v2/dashboards/020f755c3c082000/members", - "owners": "/api/v2/dashboards/020f755c3c082000/owners", - "cells": "/api/v2/dashboards/020f755c3c082000/cells", - "labels": "/api/v2/dashboards/020f755c3c082000/labels" - } - }`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := newDashboardHandler( - zaptest.NewLogger(t), - withDashboardService(tt.fields.DashboardService), - ) - - b, err := json.Marshal(tt.args.dashboard) - if err != nil { - t.Fatalf("failed to unmarshal dashboard: %v", err) - } - - r := httptest.NewRequest("GET", "http://any.url", bytes.NewReader(b)) - w := httptest.NewRecorder() - - h.handlePostDashboard(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePostDashboard() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePostDashboard() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handlePostDashboard(). error unmarshalling json %v", tt.name, err) - } else if tt.wants.body != "" && !eq { - t.Errorf("%q. handlePostDashboard() = ***%s***", tt.name, diff) - } - }) - } -} - -func TestService_handleDeleteDashboard(t *testing.T) { - type fields struct { - DashboardService influxdb.DashboardService - } - type args struct { - id string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "remove a dashboard by id", - fields: fields{ - &mock.DashboardService{ - DeleteDashboardF: func(ctx context.Context, id platform.ID) error { - if id == dashboardstesting.MustIDBase16("020f755c3c082000") { - return nil - } - - return fmt.Errorf("wrong id") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNoContent, - }, - }, - { - name: "dashboard not found", - fields: fields{ - &mock.DashboardService{ - DeleteDashboardF: func(ctx context.Context, id platform.ID) error { - return &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrDashboardNotFound, - } - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := newDashboardHandler( - zaptest.NewLogger(t), - withDashboardService(tt.fields.DashboardService), - ) - - r := httptest.NewRequest("GET", "http://any.url", nil) - - rctx := chi.NewRouteContext() - rctx.URLParams.Add("id", tt.args.id) - r = r.WithContext(context.WithValue( - context.Background(), - chi.RouteCtxKey, - rctx), - ) - w := httptest.NewRecorder() - - h.handleDeleteDashboard(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleDeleteDashboard() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleDeleteDashboard() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleDeleteDashboard(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handleDeleteDashboard() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestService_handlePatchDashboard(t *testing.T) { - type fields struct { - DashboardService influxdb.DashboardService - } - type args struct { - id string - name string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "update a dashboard name", - fields: fields{ - &mock.DashboardService{ - UpdateDashboardF: func(ctx context.Context, id platform.ID, upd influxdb.DashboardUpdate) (*influxdb.Dashboard, error) { - if id == dashboardstesting.MustIDBase16("020f755c3c082000") { - d := &influxdb.Dashboard{ - ID: dashboardstesting.MustIDBase16("020f755c3c082000"), - OrganizationID: 1, - Name: "hello", - Meta: influxdb.DashboardMeta{ - CreatedAt: time.Date(2012, time.November, 10, 23, 0, 0, 0, time.UTC), - UpdatedAt: time.Date(2012, time.November, 10, 25, 0, 0, 0, time.UTC), - }, - Cells: []*influxdb.Cell{ - { - ID: dashboardstesting.MustIDBase16("da7aba5e5d81e550"), - CellProperty: influxdb.CellProperty{ - X: 1, - Y: 2, - W: 3, - H: 4, - }, - }, - }, - } - - if upd.Name != nil { - d.Name = *upd.Name - } - - return d, nil - } - - return nil, fmt.Errorf("not found") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - name: "example", - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` - { - "id": "020f755c3c082000", - "orgID": "0000000000000001", - "name": "example", - "description": "", - "labels": [], - "meta": { - "createdAt": "2012-11-10T23:00:00Z", - "updatedAt": "2012-11-11T01:00:00Z" - }, - "cells": [ - { - "id": "da7aba5e5d81e550", - "x": 1, - "y": 2, - "w": 3, - "h": 4, - "links": { - "self": "/api/v2/dashboards/020f755c3c082000/cells/da7aba5e5d81e550", - "view": "/api/v2/dashboards/020f755c3c082000/cells/da7aba5e5d81e550/view" - } - } - ], - "links": { - "self": "/api/v2/dashboards/020f755c3c082000", - "org": "/api/v2/orgs/0000000000000001", - "members": "/api/v2/dashboards/020f755c3c082000/members", - "owners": "/api/v2/dashboards/020f755c3c082000/owners", - "cells": "/api/v2/dashboards/020f755c3c082000/cells", - "labels": "/api/v2/dashboards/020f755c3c082000/labels" - } - } - `, - }, - }, - { - name: "update a dashboard with empty request body", - fields: fields{ - &mock.DashboardService{ - UpdateDashboardF: func(ctx context.Context, id platform.ID, upd influxdb.DashboardUpdate) (*influxdb.Dashboard, error) { - return nil, fmt.Errorf("not found") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusBadRequest, - }, - }, - { - name: "dashboard not found", - fields: fields{ - &mock.DashboardService{ - UpdateDashboardF: func(ctx context.Context, id platform.ID, upd influxdb.DashboardUpdate) (*influxdb.Dashboard, error) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrDashboardNotFound, - } - }, - }, - }, - args: args{ - id: "020f755c3c082000", - name: "hello", - }, - wants: wants{ - statusCode: http.StatusNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := newDashboardHandler( - zaptest.NewLogger(t), - withDashboardService(tt.fields.DashboardService), - ) - - upd := influxdb.DashboardUpdate{} - if tt.args.name != "" { - upd.Name = &tt.args.name - } - - b, err := json.Marshal(upd) - if err != nil { - t.Fatalf("failed to unmarshal dashboard update: %v", err) - } - - r := httptest.NewRequest("GET", "http://any.url", bytes.NewReader(b)) - - rctx := chi.NewRouteContext() - rctx.URLParams.Add("id", tt.args.id) - r = r.WithContext(context.WithValue( - context.Background(), - chi.RouteCtxKey, - rctx), - ) - - w := httptest.NewRecorder() - - h.handlePatchDashboard(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePatchDashboard() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePatchDashboard() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handlePatchDashboard(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handlePatchDashboard() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestService_handlePostDashboardCell(t *testing.T) { - type fields struct { - DashboardService influxdb.DashboardService - } - type args struct { - id string - body string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "empty body", - fields: fields{ - &mock.DashboardService{ - AddDashboardCellF: func(ctx context.Context, id platform.ID, c *influxdb.Cell, opt influxdb.AddDashboardCellOptions) error { - c.ID = dashboardstesting.MustIDBase16("020f755c3c082000") - return nil - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusBadRequest, - contentType: "application/json; charset=utf-8", - body: `{"code":"invalid","message":"bad request json body: EOF"}`, - }, - }, - { - name: "no properties", - fields: fields{ - &mock.DashboardService{ - AddDashboardCellF: func(ctx context.Context, id platform.ID, c *influxdb.Cell, opt influxdb.AddDashboardCellOptions) error { - c.ID = dashboardstesting.MustIDBase16("020f755c3c082000") - return nil - }, - }, - }, - args: args{ - id: "020f755c3c082000", - body: `{"bad":1}`, - }, - wants: wants{ - statusCode: http.StatusBadRequest, - contentType: "application/json; charset=utf-8", - body: ` - { - "code": "invalid", - "message": "req body is empty" - }`, - }, - }, - { - name: "bad dash id", - fields: fields{ - &mock.DashboardService{ - AddDashboardCellF: func(ctx context.Context, id platform.ID, c *influxdb.Cell, opt influxdb.AddDashboardCellOptions) error { - c.ID = dashboardstesting.MustIDBase16("020f755c3c082000") - return nil - }, - }, - }, - args: args{ - id: "fff", - body: `{}`, - }, - wants: wants{ - statusCode: http.StatusBadRequest, - contentType: "application/json; charset=utf-8", - body: ` - { - "code": "invalid", - "message": "id must have a length of 16 bytes" - }`, - }, - }, - { - name: "general create a dashboard cell", - fields: fields{ - &mock.DashboardService{ - AddDashboardCellF: func(ctx context.Context, id platform.ID, c *influxdb.Cell, opt influxdb.AddDashboardCellOptions) error { - c.ID = dashboardstesting.MustIDBase16("020f755c3c082000") - return nil - }, - GetDashboardCellViewF: func(ctx context.Context, id1, id2 platform.ID) (*influxdb.View, error) { - return &influxdb.View{ - ViewContents: influxdb.ViewContents{ - ID: dashboardstesting.MustIDBase16("020f755c3c082001"), - }}, nil - }, - }, - }, - args: args{ - id: "020f755c3c082000", - body: `{"x":10,"y":11,"name":"name1","usingView":"020f755c3c082001"}`, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` -{ - "id": "020f755c3c082000", - "x": 10, - "y": 11, - "w": 0, - "h": 0, - "links": { - "self": "/api/v2/dashboards/020f755c3c082000/cells/020f755c3c082000", - "view": "/api/v2/dashboards/020f755c3c082000/cells/020f755c3c082000/view" - } -} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := newDashboardHandler( - zaptest.NewLogger(t), - withDashboardService(tt.fields.DashboardService), - ) - - buf := new(bytes.Buffer) - _, _ = buf.WriteString(tt.args.body) - r := httptest.NewRequest("POST", "http://any.url", buf) - - rctx := chi.NewRouteContext() - rctx.URLParams.Add("id", tt.args.id) - r = r.WithContext(context.WithValue( - context.Background(), - chi.RouteCtxKey, - rctx), - ) - - w := httptest.NewRecorder() - - h.handlePostDashboardCell(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePostDashboardCell() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePostDashboardCell() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(tt.wants.body, string(body)); err != nil { - t.Errorf("%q, handlePostDashboardCell(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handlePostDashboardCell() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestService_handleDeleteDashboardCell(t *testing.T) { - type fields struct { - DashboardService influxdb.DashboardService - } - type args struct { - id string - cellID string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "remove a dashboard cell", - fields: fields{ - &mock.DashboardService{ - RemoveDashboardCellF: func(ctx context.Context, id platform.ID, cellID platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: "020f755c3c082000", - cellID: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNoContent, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := newDashboardHandler( - zaptest.NewLogger(t), - withDashboardService(tt.fields.DashboardService), - ) - - r := httptest.NewRequest("GET", "http://any.url", nil) - - rctx := chi.NewRouteContext() - rctx.URLParams.Add("id", tt.args.id) - rctx.URLParams.Add("cellID", tt.args.cellID) - r = r.WithContext(context.WithValue( - context.Background(), - chi.RouteCtxKey, - rctx), - ) - - w := httptest.NewRecorder() - - h.handleDeleteDashboardCell(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleDeleteDashboardCell() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleDeleteDashboardCell() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleDeleteDashboardCell(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handleDeleteDashboardCell() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestService_handlePatchDashboardCell(t *testing.T) { - type fields struct { - DashboardService influxdb.DashboardService - } - type args struct { - id string - cellID string - x int32 - y int32 - w int32 - h int32 - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "update a dashboard cell", - fields: fields{ - &mock.DashboardService{ - UpdateDashboardCellF: func(ctx context.Context, id, cellID platform.ID, upd influxdb.CellUpdate) (*influxdb.Cell, error) { - cell := &influxdb.Cell{ - ID: dashboardstesting.MustIDBase16("020f755c3c082000"), - } - - if err := upd.Apply(cell); err != nil { - return nil, err - } - - return cell, nil - }, - }, - }, - args: args{ - id: "020f755c3c082000", - cellID: "020f755c3c082000", - x: 10, - y: 11, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "id": "020f755c3c082000", - "x": 10, - "y": 11, - "w": 0, - "h": 0, - "links": { - "self": "/api/v2/dashboards/020f755c3c082000/cells/020f755c3c082000", - "view": "/api/v2/dashboards/020f755c3c082000/cells/020f755c3c082000/view" - } -} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := newDashboardHandler( - zaptest.NewLogger(t), - withDashboardService(tt.fields.DashboardService), - ) - - upd := influxdb.CellUpdate{} - if tt.args.x != 0 { - upd.X = &tt.args.x - } - if tt.args.y != 0 { - upd.Y = &tt.args.y - } - if tt.args.w != 0 { - upd.W = &tt.args.w - } - if tt.args.h != 0 { - upd.H = &tt.args.h - } - - b, err := json.Marshal(upd) - if err != nil { - t.Fatalf("failed to unmarshal cell: %v", err) - } - - r := httptest.NewRequest("GET", "http://any.url", bytes.NewReader(b)) - - rctx := chi.NewRouteContext() - rctx.URLParams.Add("id", tt.args.id) - rctx.URLParams.Add("cellID", tt.args.cellID) - r = r.WithContext(context.WithValue( - context.Background(), - chi.RouteCtxKey, - rctx), - ) - w := httptest.NewRecorder() - - h.handlePatchDashboardCell(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePatchDashboardCell() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePatchDashboardCell() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handlePatchDashboardCell(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handlePatchDashboardCell() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func Test_dashboardCellIDPath(t *testing.T) { - t.Parallel() - dashboard, err := platform.IDFromString("deadbeefdeadbeef") - if err != nil { - t.Fatal(err) - } - - cell, err := platform.IDFromString("cade9a7ecade9a7e") - if err != nil { - t.Fatal(err) - } - - want := "/api/v2/dashboards/deadbeefdeadbeef/cells/cade9a7ecade9a7e" - if got := dashboardCellIDPath(*dashboard, *cell); got != want { - t.Errorf("dashboardCellIDPath() = got: %s want: %s", got, want) - } -} - -func initDashboardService(f dashboardstesting.DashboardFields, t *testing.T) (influxdb.DashboardService, string, func()) { - t.Helper() - log := zaptest.NewLogger(t) - store := itesting.NewTestInmemStore(t) - - kvsvc := kv.NewService(log, store, &mock.OrganizationService{}) - kvsvc.IDGenerator = f.IDGenerator - - svc := dashboards.NewService( - store, - kvsvc, // operation log storage - ) - - svc.IDGenerator = f.IDGenerator - - ctx := context.Background() - - for _, d := range f.Dashboards { - if err := svc.PutDashboard(ctx, d); err != nil { - t.Fatalf("failed to populate dashboard") - } - } - - h := newDashboardHandler( - log, - withDashboardService(svc), - ) - - r := chi.NewRouter() - r.Mount(h.Prefix(), h) - server := httptest.NewServer(r) - - httpClient, err := ihttp.NewHTTPClient(server.URL, "", false) - if err != nil { - t.Fatal(err) - } - - client := DashboardService{Client: httpClient} - - return &client, "", server.Close -} - -func TestDashboardService(t *testing.T) { - t.Parallel() - dashboardstesting.DeleteDashboard(initDashboardService, t) -} - -func TestService_handlePostDashboardLabel(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - labelMapping *influxdb.LabelMapping - dashboardID platform.ID - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "add label to dashboard", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - Name: "label", - Properties: map[string]string{ - "color": "fff000", - }, - }, nil - }, - CreateLabelMappingFn: func(ctx context.Context, m *influxdb.LabelMapping) error { return nil }, - }, - }, - args: args{ - labelMapping: &influxdb.LabelMapping{ - ResourceID: 100, - LabelID: 1, - }, - dashboardID: 100, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` -{ - "label": { - "id": "0000000000000001", - "name": "label", - "properties": { - "color": "fff000" - } - }, - "links": { - "self": "/api/v2/labels/0000000000000001" - } -} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := newDashboardHandler( - zaptest.NewLogger(t), - withLabelService(tt.fields.LabelService), - withDashboardService(&mock.DashboardService{ - FindDashboardByIDF: func(_ context.Context, id platform.ID) (*influxdb.Dashboard, error) { - return &influxdb.Dashboard{ - ID: id, - OrganizationID: platform.ID(25), - }, nil - }, - }), - ) - - router := chi.NewRouter() - router.Mount(h.Prefix(), h) - - b, err := json.Marshal(tt.args.labelMapping) - if err != nil { - t.Fatalf("failed to unmarshal label mapping: %v", err) - } - - url := fmt.Sprintf("http://localhost:8086/api/v2/dashboards/%s/labels", tt.args.dashboardID) - r := httptest.NewRequest("POST", url, bytes.NewReader(b)) - w := httptest.NewRecorder() - - router.ServeHTTP(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("got %v, want %v", res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("got %v, want %v", content, tt.wants.contentType) - } - if eq, diff, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq { - t.Errorf("Diff\n%s", diff) - } - }) - } -} - -func jsonEqual(s1, s2 string) (eq bool, diff string, err error) { - if s1 == s2 { - return true, "", nil - } - - if s1 == "" { - return false, s2, fmt.Errorf("s1 is empty") - } - - if s2 == "" { - return false, s1, fmt.Errorf("s2 is empty") - } - - var o1 interface{} - if err = json.Unmarshal([]byte(s1), &o1); err != nil { - return - } - - var o2 interface{} - if err = json.Unmarshal([]byte(s2), &o2); err != nil { - return - } - - differ := gojsondiff.New() - d, err := differ.Compare([]byte(s1), []byte(s2)) - if err != nil { - return - } - - config := formatter.AsciiFormatterConfig{} - - formatter := formatter.NewAsciiFormatter(o1, config) - diff, err = formatter.Format(d) - - return cmp.Equal(o1, o2), diff, err -} - -type dashboardDependencies struct { - dashboardService influxdb.DashboardService - userService influxdb.UserService - orgService influxdb.OrganizationService - labelService influxdb.LabelService - urmService influxdb.UserResourceMappingService -} - -type option func(*dashboardDependencies) - -func withDashboardService(svc influxdb.DashboardService) option { - return func(d *dashboardDependencies) { - d.dashboardService = svc - } -} - -func withLabelService(svc influxdb.LabelService) option { - return func(d *dashboardDependencies) { - d.labelService = svc - } -} diff --git a/dbrp/bucket_service.go b/dbrp/bucket_service.go deleted file mode 100644 index 847f4edb779..00000000000 --- a/dbrp/bucket_service.go +++ /dev/null @@ -1,49 +0,0 @@ -package dbrp - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "go.uber.org/zap" -) - -type BucketService struct { - influxdb.BucketService - Logger *zap.Logger - DBRPMappingService influxdb.DBRPMappingService -} - -func NewBucketService(logger *zap.Logger, bucketService influxdb.BucketService, dbrpService influxdb.DBRPMappingService) *BucketService { - return &BucketService{ - Logger: logger, - BucketService: bucketService, - DBRPMappingService: dbrpService, - } -} - -func (s *BucketService) DeleteBucket(ctx context.Context, id platform.ID) error { - bucket, err := s.BucketService.FindBucketByID(ctx, id) - if err != nil { - return err - } - if err := s.BucketService.DeleteBucket(ctx, id); err != nil { - return err - } - - logger := s.Logger.With(zap.String("bucket_id", id.String())) - mappings, _, err := s.DBRPMappingService.FindMany(ctx, influxdb.DBRPMappingFilter{ - OrgID: &bucket.OrgID, - BucketID: &bucket.ID, - }) - if err != nil { - logger.Error("Failed to lookup DBRP mappings for Bucket.", zap.Error(err)) - return nil - } - for _, m := range mappings { - if err := s.DBRPMappingService.Delete(ctx, bucket.OrgID, m.ID); err != nil { - logger.Error("Failed to delete DBRP mapping for Bucket.", zap.Error(err)) - } - } - return nil -} diff --git a/dbrp/bucket_service_test.go b/dbrp/bucket_service_test.go deleted file mode 100644 index 99ab3f7d5a6..00000000000 --- a/dbrp/bucket_service_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package dbrp - -import ( - "context" - "testing" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/dbrp/mocks" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -var generator = snowflake.NewDefaultIDGenerator() - -func TestBucketService(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - var ( - ctx = context.Background() - bucketID = generator.ID() - orgID = generator.ID() - mappingID = generator.ID() - - logger = zap.NewNop() - bucketServiceMock = mocks.NewMockBucketService(ctrl) - dbrpService = mocks.NewMockDBRPMappingService(ctrl) - - bucket = &influxdb.Bucket{ - ID: bucketID, - OrgID: orgID, - } - ) - - findBucket := bucketServiceMock.EXPECT(). - FindBucketByID(gomock.Any(), bucketID). - Return(bucket, nil) - deleteBucket := bucketServiceMock.EXPECT(). - DeleteBucket(gomock.Any(), bucketID). - Return(nil) - - findMapping := dbrpService.EXPECT(). - FindMany(gomock.Any(), influxdb.DBRPMappingFilter{ - BucketID: &bucketID, - OrgID: &orgID, - }).Return([]*influxdb.DBRPMapping{ - {ID: mappingID}, - }, 1, nil) - deleteMapping := dbrpService.EXPECT(). - Delete(gomock.Any(), orgID, mappingID). - Return(nil) - - gomock.InOrder( - findBucket, - deleteBucket, - findMapping, - deleteMapping, - ) - - bucketService := NewBucketService(logger, bucketServiceMock, dbrpService) - err := bucketService.DeleteBucket(ctx, bucketID) - require.NoError(t, err) -} diff --git a/dbrp/error.go b/dbrp/error.go deleted file mode 100644 index de42c84c523..00000000000 --- a/dbrp/error.go +++ /dev/null @@ -1,100 +0,0 @@ -package dbrp - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -var ( - // ErrDBRPNotFound is used when the specified DBRP cannot be found. - ErrDBRPNotFound = &errors.Error{ - Code: errors.ENotFound, - Msg: "unable to find DBRP", - } - - // ErrNotUniqueID is used when the ID of the DBRP is not unique. - ErrNotUniqueID = &errors.Error{ - Code: errors.EConflict, - Msg: "ID already exists", - } - - // ErrFailureGeneratingID occurs ony when the random number generator - // cannot generate an ID in MaxIDGenerationN times. - ErrFailureGeneratingID = &errors.Error{ - Code: errors.EInternal, - Msg: "unable to generate valid id", - } - - ErrNoOrgProvided = &errors.Error{ - Code: errors.EInvalid, - Msg: "either 'org' or 'orgID' must be provided", - } -) - -// ErrOrgNotFound returns a more informative error about a 404 on org name. -func ErrOrgNotFound(org string) error { - return &errors.Error{ - Code: errors.ENotFound, - Msg: fmt.Sprintf("invalid org %q", org), - Err: taskmodel.ErrOrgNotFound, - } -} - -// ErrInvalidOrgID returns a more informative error about a failure -// to decode an organization ID. -func ErrInvalidOrgID(id string, err error) error { - return &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("invalid org ID %q", id), - Err: err, - } -} - -// ErrInvalidBucketID returns a more informative error about a failure -// to decode a bucket ID. -func ErrInvalidBucketID(id string, err error) error { - return &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("invalid bucket ID %q", id), - Err: err, - } -} - -// ErrInvalidDBRPID is used when the ID of the DBRP cannot be encoded. -func ErrInvalidDBRPID(id string, err error) error { - return &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("invalid DBRP ID %q", id), - Err: err, - } -} - -// ErrInvalidDBRP is used when a service was provided an invalid DBRP. -func ErrInvalidDBRP(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "DBRP provided is invalid", - Err: err, - } -} - -// ErrInternalService is used when the error comes from an internal system. -func ErrInternalService(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Err: err, - } -} - -// ErrDBRPAlreadyExists is used when there is a conflict in creating a new DBRP. -func ErrDBRPAlreadyExists(msg string) *errors.Error { - if msg == "" { - msg = "DBRP already exists" - } - return &errors.Error{ - Code: errors.EConflict, - Msg: msg, - } -} diff --git a/dbrp/http_client_dbrp.go b/dbrp/http_client_dbrp.go deleted file mode 100644 index d2e996b2423..00000000000 --- a/dbrp/http_client_dbrp.go +++ /dev/null @@ -1,135 +0,0 @@ -package dbrp - -import ( - "context" - "fmt" - "path" - "strconv" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/pkg/httpc" -) - -var _ influxdb.DBRPMappingService = (*Client)(nil) - -// Client connects to Influx via HTTP using tokens to manage DBRPs. -type Client struct { - Client *httpc.Client - Prefix string -} - -func NewClient(client *httpc.Client) *Client { - return &Client{ - Client: client, - Prefix: PrefixDBRP, - } -} - -func (c *Client) dbrpURL(id platform.ID) string { - return path.Join(c.Prefix, id.String()) -} - -func (c *Client) FindByID(ctx context.Context, orgID, id platform.ID) (*influxdb.DBRPMapping, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var resp getDBRPResponse - if err := c.Client. - Get(c.dbrpURL(id)). - QueryParams([2]string{"orgID", orgID.String()}). - DecodeJSON(&resp). - Do(ctx); err != nil { - return nil, err - } - return resp.Content, nil -} - -func (c *Client) FindMany(ctx context.Context, filter influxdb.DBRPMappingFilter, opts ...influxdb.FindOptions) ([]*influxdb.DBRPMapping, int, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - params := influxdb.FindOptionParams(opts...) - if filter.OrgID != nil { - params = append(params, [2]string{"orgID", filter.OrgID.String()}) - } else { - return nil, 0, fmt.Errorf("please filter by orgID") - } - if filter.ID != nil { - params = append(params, [2]string{"id", filter.ID.String()}) - } - if filter.BucketID != nil { - params = append(params, [2]string{"bucketID", filter.BucketID.String()}) - } - if filter.Database != nil { - params = append(params, [2]string{"db", *filter.Database}) - } - if filter.RetentionPolicy != nil { - params = append(params, [2]string{"rp", *filter.RetentionPolicy}) - } - if filter.Default != nil { - params = append(params, [2]string{"default", strconv.FormatBool(*filter.Default)}) - } - - var resp getDBRPsResponse - if err := c.Client. - Get(c.Prefix). - QueryParams(params...). - DecodeJSON(&resp). - Do(ctx); err != nil { - return nil, 0, err - } - return resp.Content, len(resp.Content), nil -} - -func (c *Client) Create(ctx context.Context, dbrp *influxdb.DBRPMapping) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var newDBRP influxdb.DBRPMapping - if err := c.Client. - PostJSON(createDBRPRequest{ - Database: dbrp.Database, - RetentionPolicy: dbrp.RetentionPolicy, - Default: dbrp.Default, - OrganizationID: dbrp.OrganizationID.String(), - BucketID: dbrp.BucketID.String(), - }, c.Prefix). - DecodeJSON(&newDBRP). - Do(ctx); err != nil { - return err - } - dbrp.ID = newDBRP.ID - return nil -} - -func (c *Client) Update(ctx context.Context, dbrp *influxdb.DBRPMapping) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if err := dbrp.Validate(); err != nil { - return err - } - - var newDBRP influxdb.DBRPMapping - if err := c.Client. - PatchJSON(dbrp, c.dbrpURL(dbrp.ID)). - QueryParams([2]string{"orgID", dbrp.OrganizationID.String()}). - DecodeJSON(&newDBRP). - Do(ctx); err != nil { - return err - } - *dbrp = newDBRP - return nil -} - -func (c *Client) Delete(ctx context.Context, orgID, id platform.ID) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - return c.Client. - Delete(c.dbrpURL(id)). - QueryParams([2]string{"orgID", orgID.String()}). - Do(ctx) -} diff --git a/dbrp/http_client_dbrp_test.go b/dbrp/http_client_dbrp_test.go deleted file mode 100644 index 1b8eddf706a..00000000000 --- a/dbrp/http_client_dbrp_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package dbrp_test - -import ( - "context" - "net/http/httptest" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/dbrp" - "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/pkg/httpc" - "go.uber.org/zap/zaptest" -) - -func setup(t *testing.T) (*dbrp.Client, func()) { - t.Helper() - dbrpSvc := &mock.DBRPMappingService{ - CreateFn: func(ctx context.Context, dbrp *influxdb.DBRPMapping) error { - dbrp.ID = 1 - return nil - }, - FindByIDFn: func(ctx context.Context, orgID, id platform.ID) (*influxdb.DBRPMapping, error) { - return &influxdb.DBRPMapping{ - ID: id, - Database: "db", - RetentionPolicy: "rp", - Default: false, - OrganizationID: id, - BucketID: 1, - }, nil - }, - FindManyFn: func(ctx context.Context, dbrp influxdb.DBRPMappingFilter, opts ...influxdb.FindOptions) ([]*influxdb.DBRPMapping, int, error) { - return []*influxdb.DBRPMapping{}, 0, nil - }, - } - orgSvc := &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: *filter.ID, - Name: "org", - }, nil - }, - } - server := httptest.NewServer(dbrp.NewHTTPHandler(zaptest.NewLogger(t), dbrpSvc, orgSvc)) - client, err := httpc.New(httpc.WithAddr(server.URL), httpc.WithStatusFn(http.CheckError)) - if err != nil { - t.Fatal(err) - } - dbrpClient := dbrp.NewClient(client) - dbrpClient.Prefix = "" - return dbrpClient, func() { - server.Close() - } -} - -func TestClient(t *testing.T) { - t.Run("can create", func(t *testing.T) { - client, shutdown := setup(t) - defer shutdown() - - if err := client.Create(context.Background(), &influxdb.DBRPMapping{ - Database: "db", - RetentionPolicy: "rp", - Default: false, - OrganizationID: 1, - BucketID: 1, - }); err != nil { - t.Error(err) - } - }) - - t.Run("can read", func(t *testing.T) { - client, shutdown := setup(t) - defer shutdown() - - if _, err := client.FindByID(context.Background(), 1, 1); err != nil { - t.Error(err) - } - oid := platform.ID(1) - if _, _, err := client.FindMany(context.Background(), influxdb.DBRPMappingFilter{OrgID: &oid}); err != nil { - t.Error(err) - } - }) - - t.Run("can update", func(t *testing.T) { - client, shutdown := setup(t) - defer shutdown() - - if err := client.Update(context.Background(), &influxdb.DBRPMapping{ - ID: 1, - Database: "db", - RetentionPolicy: "rp", - Default: false, - OrganizationID: 1, - BucketID: 1, - }); err != nil { - t.Error(err) - } - }) - - t.Run("can delete", func(t *testing.T) { - client, shutdown := setup(t) - defer shutdown() - - if err := client.Delete(context.Background(), 1, 1); err != nil { - t.Error(err) - } - }) -} diff --git a/dbrp/http_server_dbrp.go b/dbrp/http_server_dbrp.go deleted file mode 100644 index 617b44ec50c..00000000000 --- a/dbrp/http_server_dbrp.go +++ /dev/null @@ -1,361 +0,0 @@ -package dbrp - -import ( - "encoding/json" - "net/http" - "strconv" - - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "go.uber.org/zap" -) - -const ( - PrefixDBRP = "/api/v2/dbrps" -) - -type Handler struct { - chi.Router - api *kithttp.API - log *zap.Logger - dbrpSvc influxdb.DBRPMappingService - orgSvc influxdb.OrganizationService -} - -// NewHTTPHandler constructs a new http server. -func NewHTTPHandler(log *zap.Logger, dbrpSvc influxdb.DBRPMappingService, orgSvc influxdb.OrganizationService) *Handler { - h := &Handler{ - api: kithttp.NewAPI(kithttp.WithLog(log)), - log: log, - dbrpSvc: dbrpSvc, - orgSvc: orgSvc, - } - - r := chi.NewRouter() - r.Use( - middleware.Recoverer, - middleware.RequestID, - middleware.RealIP, - ) - - r.Route("/", func(r chi.Router) { - r.Post("/", h.handlePostDBRP) - r.Get("/", h.handleGetDBRPs) - - r.Route("/{id}", func(r chi.Router) { - r.Get("/", h.handleGetDBRP) - r.Patch("/", h.handlePatchDBRP) - r.Delete("/", h.handleDeleteDBRP) - }) - }) - - h.Router = r - return h -} - -type createDBRPRequest struct { - Database string `json:"database"` - RetentionPolicy string `json:"retention_policy"` - Default bool `json:"default"` - Org string `json:"org"` - // N.B. These are purposefully typed as string instead of - // influxdb.ID so we can provide more specific error messages. - // If they have the ID type, our JSON decoder will just return - // a generic "invalid ID" error without stating which ID is - // the problem. - // - // Ideally we'd fix the decoder so we could get more useful - // errors everywhere, but I'm worried about the impact of a - // system-wide change to our "invalid ID" error format. - OrganizationID string `json:"orgID"` - BucketID string `json:"bucketID"` -} - -func (h *Handler) handlePostDBRP(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - var req createDBRPRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - h.api.Err(w, r, &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid json structure", - Err: err, - }) - return - } - - var orgID platform.ID - var bucketID platform.ID - - if req.OrganizationID == "" { - if req.Org == "" { - h.api.Err(w, r, ErrNoOrgProvided) - return - } - org, err := h.orgSvc.FindOrganization(r.Context(), influxdb.OrganizationFilter{ - Name: &req.Org, - }) - if err != nil { - h.api.Err(w, r, ErrOrgNotFound(req.Org)) - return - } - orgID = org.ID - } else if err := orgID.DecodeFromString(req.OrganizationID); err != nil { - h.api.Err(w, r, ErrInvalidOrgID(req.OrganizationID, err)) - return - } - - if err := bucketID.DecodeFromString(req.BucketID); err != nil { - h.api.Err(w, r, ErrInvalidBucketID(req.BucketID, err)) - return - } - - dbrp := &influxdb.DBRPMapping{ - Database: req.Database, - RetentionPolicy: req.RetentionPolicy, - Default: req.Default, - OrganizationID: orgID, - BucketID: bucketID, - } - if err := h.dbrpSvc.Create(ctx, dbrp); err != nil { - h.api.Err(w, r, err) - return - } - h.api.Respond(w, r, http.StatusCreated, dbrp) -} - -type getDBRPsResponse struct { - Content []*influxdb.DBRPMapping `json:"content"` -} - -func (h *Handler) handleGetDBRPs(w http.ResponseWriter, r *http.Request) { - filter, err := h.getFilterFromHTTPRequest(r) - if err != nil { - h.api.Err(w, r, err) - return - } - dbrps, _, err := h.dbrpSvc.FindMany(r.Context(), filter) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, getDBRPsResponse{ - Content: dbrps, - }) -} - -type getDBRPResponse struct { - Content *influxdb.DBRPMapping `json:"content"` -} - -func (h *Handler) handleGetDBRP(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id := chi.URLParam(r, "id") - if id == "" { - h.api.Err(w, r, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - }) - return - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - h.api.Err(w, r, err) - return - } - - orgID, err := h.mustGetOrgIDFromHTTPRequest(r) - if err != nil { - h.api.Err(w, r, err) - return - } - - dbrp, err := h.dbrpSvc.FindByID(ctx, *orgID, i) - if err != nil { - h.api.Err(w, r, err) - return - } - h.api.Respond(w, r, http.StatusOK, getDBRPResponse{ - Content: dbrp, - }) -} - -func (h *Handler) handlePatchDBRP(w http.ResponseWriter, r *http.Request) { - bodyRequest := struct { - Default *bool `json:"default"` - RetentionPolicy *string `json:"retention_policy"` - }{} - - ctx := r.Context() - - id := chi.URLParam(r, "id") - if id == "" { - h.api.Err(w, r, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - }) - return - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - h.api.Err(w, r, err) - return - } - - orgID, err := h.mustGetOrgIDFromHTTPRequest(r) - if err != nil { - h.api.Err(w, r, err) - return - } - - dbrp, err := h.dbrpSvc.FindByID(ctx, *orgID, i) - if err != nil { - h.api.Err(w, r, err) - return - } - - if err := json.NewDecoder(r.Body).Decode(&bodyRequest); err != nil { - h.api.Err(w, r, &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid json structure", - Err: err, - }) - return - } - - if bodyRequest.Default != nil && dbrp.Default != *bodyRequest.Default { - dbrp.Default = *bodyRequest.Default - } - - if bodyRequest.RetentionPolicy != nil && *bodyRequest.RetentionPolicy != dbrp.RetentionPolicy { - dbrp.RetentionPolicy = *bodyRequest.RetentionPolicy - } - - if err := h.dbrpSvc.Update(ctx, dbrp); err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, struct { - Content *influxdb.DBRPMapping `json:"content"` - }{ - Content: dbrp, - }) -} - -func (h *Handler) handleDeleteDBRP(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id := chi.URLParam(r, "id") - if id == "" { - h.api.Err(w, r, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - }) - return - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - h.api.Err(w, r, err) - return - } - - orgID, err := h.mustGetOrgIDFromHTTPRequest(r) - if err != nil { - h.api.Err(w, r, err) - return - } - - if err := h.dbrpSvc.Delete(ctx, *orgID, i); err != nil { - h.api.Err(w, r, err) - return - } - - w.WriteHeader(http.StatusNoContent) -} - -func (h *Handler) getFilterFromHTTPRequest(r *http.Request) (f influxdb.DBRPMappingFilter, err error) { - // Always provide OrgID. - f.OrgID, err = h.mustGetOrgIDFromHTTPRequest(r) - if err != nil { - return f, err - } - f.ID, err = getDBRPIDFromHTTPRequest(r) - if err != nil { - return f, err - } - f.BucketID, err = getBucketIDFromHTTPRequest(r) - if err != nil { - return f, err - } - rawDB := r.URL.Query().Get("db") - if rawDB != "" { - f.Database = &rawDB - } - rawRP := r.URL.Query().Get("rp") - if rawRP != "" { - f.RetentionPolicy = &rawRP - } - rawDefault := r.URL.Query().Get("default") - if rawDefault != "" { - d, err := strconv.ParseBool(rawDefault) - if err != nil { - return f, &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid default parameter", - } - } - f.Default = &d - } - return f, nil -} - -func getIDFromHTTPRequest(r *http.Request, key string, onErr func(string, error) error) (*platform.ID, error) { - var id platform.ID - raw := r.URL.Query().Get(key) - if raw != "" { - if err := id.DecodeFromString(raw); err != nil { - return nil, onErr(raw, err) - } - } else { - return nil, nil - } - return &id, nil -} - -// mustGetOrgIDFromHTTPRequest returns the org ID parameter from the request, falling -// back to looking up the org ID by org name if the ID parameter is not present. -func (h *Handler) mustGetOrgIDFromHTTPRequest(r *http.Request) (*platform.ID, error) { - orgID, err := getIDFromHTTPRequest(r, "orgID", ErrInvalidOrgID) - if err != nil { - return nil, err - } - if orgID == nil { - name := r.URL.Query().Get("org") - if name == "" { - return nil, ErrNoOrgProvided - } - org, err := h.orgSvc.FindOrganization(r.Context(), influxdb.OrganizationFilter{ - Name: &name, - }) - if err != nil { - return nil, ErrOrgNotFound(name) - } - orgID = &org.ID - } - return orgID, nil -} - -func getDBRPIDFromHTTPRequest(r *http.Request) (*platform.ID, error) { - return getIDFromHTTPRequest(r, "id", ErrInvalidDBRPID) -} - -func getBucketIDFromHTTPRequest(r *http.Request) (*platform.ID, error) { - return getIDFromHTTPRequest(r, "bucketID", ErrInvalidBucketID) -} diff --git a/dbrp/http_server_dbrp_test.go b/dbrp/http_server_dbrp_test.go deleted file mode 100644 index 67d08316c09..00000000000 --- a/dbrp/http_server_dbrp_test.go +++ /dev/null @@ -1,542 +0,0 @@ -package dbrp_test - -import ( - "context" - "encoding/json" - "errors" - "io" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/dbrp" - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zaptest" -) - -func initHttpService(t *testing.T) (influxdb.DBRPMappingService, *httptest.Server, func()) { - t.Helper() - ctx := context.Background() - bucketSvc := mock.NewBucketService() - orgSvc := &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { - if filter.Name == nil || *filter.Name != "org" { - return nil, errors.New("not found") - } - return &influxdb.Organization{ - Name: "org", - ID: influxdbtesting.MustIDBase16("059af7ed2a034000"), - }, nil - }, - } - - s, closeS := influxdbtesting.NewTestBoltStore(t) - svc := dbrp.NewService(ctx, bucketSvc, s) - - server := httptest.NewServer(dbrp.NewHTTPHandler(zaptest.NewLogger(t), svc, orgSvc)) - return svc, server, func() { - closeS() - server.Close() - } -} - -func Test_handlePostDBRP(t *testing.T) { - table := []struct { - Name string - ExpectedErr error - ExpectedDBRP *influxdb.DBRPMapping - Input io.Reader - }{ - { - Name: "Create valid dbrp", - Input: strings.NewReader(`{ - "bucketID": "5555f7ed2a035555", - "orgID": "059af7ed2a034000", - "database": "mydb", - "retention_policy": "autogen", - "default": false -}`), - ExpectedDBRP: &influxdb.DBRPMapping{ - OrganizationID: influxdbtesting.MustIDBase16("059af7ed2a034000"), - }, - }, - { - Name: "Create valid dbrp by org name", - Input: strings.NewReader(`{ - "bucketID": "5555f7ed2a035555", - "org": "org", - "database": "mydb", - "retention_policy": "autogen", - "default": false -}`), - ExpectedDBRP: &influxdb.DBRPMapping{ - OrganizationID: influxdbtesting.MustIDBase16("059af7ed2a034000"), - }, - }, - { - Name: "Create with no orgID or org", - Input: strings.NewReader(`{ - "bucketID": "5555f7ed2a035555", - "database": "mydb", - "retention_policy": "autogen", - "default": false -}`), - ExpectedErr: dbrp.ErrNoOrgProvided, - }, - { - Name: "Create with invalid orgID", - Input: strings.NewReader(`{ - "bucketID": "5555f7ed2a035555", - "orgID": "invalid", - "database": "mydb", - "retention_policy": "autogen", - "default": false -}`), - ExpectedErr: dbrp.ErrInvalidOrgID("invalid", platform.ErrInvalidIDLength), - }, - { - Name: "Create with invalid org name", - Input: strings.NewReader(`{ - "bucketID": "5555f7ed2a035555", - "org": "invalid", - "database": "mydb", - "retention_policy": "autogen", - "default": false -}`), - ExpectedErr: dbrp.ErrOrgNotFound("invalid"), - }, - { - Name: "Create with invalid bucket ID", - Input: strings.NewReader(`{ - "bucketID": "invalid", - "org": "org", - "database": "mydb", - "retention_policy": "autogen", - "default": false -}`), - ExpectedErr: dbrp.ErrInvalidBucketID("invalid", platform.ErrInvalidIDLength), - }, - } - - for _, tt := range table { - t.Run(tt.Name, func(t *testing.T) { - if tt.ExpectedErr != nil && tt.ExpectedDBRP != nil { - t.Fatal("one of `ExpectedErr` or `ExpectedDBRP` has to be set") - } - _, server, shutdown := initHttpService(t) - defer shutdown() - client := server.Client() - - resp, err := client.Post(server.URL+"/", "application/json", tt.Input) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - - if tt.ExpectedErr != nil { - b, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - var actualErr errors2.Error - if err := json.Unmarshal(b, &actualErr); err != nil { - t.Fatal(err) - } - - assert.Equal(t, tt.ExpectedErr.Error(), actualErr.Error()) - return - } - dbrp := &influxdb.DBRPMapping{} - if err := json.NewDecoder(resp.Body).Decode(&dbrp); err != nil { - t.Fatal(err) - } - - if !dbrp.ID.Valid() { - t.Fatalf("expected valid id, got an invalid one %s", dbrp.ID.String()) - } - - if dbrp.OrganizationID != tt.ExpectedDBRP.OrganizationID { - t.Fatalf("expected orgid %s got %s", tt.ExpectedDBRP.OrganizationID, dbrp.OrganizationID) - } - - if !dbrp.Default { - t.Fatalf("expected dbrp to be marked as default") - } - }) - } -} - -func Test_handleGetDBRPs(t *testing.T) { - table := []struct { - Name string - QueryParams string - ExpectedErr error - ExpectedDBRPs []influxdb.DBRPMapping - }{ - { - Name: "ok", - QueryParams: "orgID=059af7ed2a034000", - ExpectedDBRPs: []influxdb.DBRPMapping{ - { - ID: influxdbtesting.MustIDBase16("1111111111111111"), - BucketID: influxdbtesting.MustIDBase16("5555f7ed2a035555"), - OrganizationID: influxdbtesting.MustIDBase16("059af7ed2a034000"), - Database: "mydb", - RetentionPolicy: "autogen", - Default: true, - }, - }, - }, - { - Name: "invalid org", - QueryParams: "orgID=invalid", - ExpectedErr: dbrp.ErrInvalidOrgID("invalid", platform.ErrInvalidIDLength), - }, - { - Name: "invalid bucket", - QueryParams: "orgID=059af7ed2a034000&bucketID=invalid", - ExpectedErr: dbrp.ErrInvalidBucketID("invalid", platform.ErrInvalidIDLength), - }, - { - Name: "invalid default", - QueryParams: "orgID=059af7ed2a034000&default=notabool", - ExpectedErr: &errors2.Error{ - Code: errors2.EInvalid, - Msg: "invalid default parameter", - }, - }, - { - Name: "no org", - QueryParams: "default=true&retention_police=lol", - ExpectedErr: dbrp.ErrNoOrgProvided, - }, - { - Name: "no match", - QueryParams: "orgID=059af7ed2a034000&default=false", - ExpectedDBRPs: []influxdb.DBRPMapping{}, - }, - { - Name: "all match", - QueryParams: "orgID=059af7ed2a034000&default=true&rp=autogen&db=mydb&bucketID=5555f7ed2a035555&id=1111111111111111", - ExpectedDBRPs: []influxdb.DBRPMapping{ - { - ID: influxdbtesting.MustIDBase16("1111111111111111"), - BucketID: influxdbtesting.MustIDBase16("5555f7ed2a035555"), - OrganizationID: influxdbtesting.MustIDBase16("059af7ed2a034000"), - Database: "mydb", - RetentionPolicy: "autogen", - Default: true, - }, - }, - }, - { - Name: "org name", - QueryParams: "org=org", - ExpectedDBRPs: []influxdb.DBRPMapping{ - { - ID: influxdbtesting.MustIDBase16("1111111111111111"), - BucketID: influxdbtesting.MustIDBase16("5555f7ed2a035555"), - OrganizationID: influxdbtesting.MustIDBase16("059af7ed2a034000"), - Database: "mydb", - RetentionPolicy: "autogen", - Default: true, - }, - }, - }, - } - - ctx := context.Background() - for _, tt := range table { - t.Run(tt.Name, func(t *testing.T) { - if tt.ExpectedErr != nil && len(tt.ExpectedDBRPs) != 0 { - t.Error("one of those has to be set") - } - svc, server, shutdown := initHttpService(t) - defer shutdown() - - if svc, ok := svc.(*dbrp.Service); ok { - svc.IDGen = mock.NewIDGenerator("1111111111111111", t) - } - db := &influxdb.DBRPMapping{ - BucketID: influxdbtesting.MustIDBase16("5555f7ed2a035555"), - OrganizationID: influxdbtesting.MustIDBase16("059af7ed2a034000"), - Database: "mydb", - RetentionPolicy: "autogen", - Default: true, - } - if err := svc.Create(ctx, db); err != nil { - t.Fatal(err) - } - - client := server.Client() - resp, err := client.Get(server.URL + "?" + tt.QueryParams) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - - if tt.ExpectedErr != nil { - b, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - var actualErr errors2.Error - if err := json.Unmarshal(b, &actualErr); err != nil { - t.Fatal(err) - } - - assert.Equal(t, tt.ExpectedErr.Error(), actualErr.Error()) - return - } - dbrps := struct { - Content []influxdb.DBRPMapping `json:"content"` - }{} - if err := json.NewDecoder(resp.Body).Decode(&dbrps); err != nil { - t.Fatal(err) - } - - if len(dbrps.Content) != len(tt.ExpectedDBRPs) { - t.Fatalf("expected %d dbrps got %d", len(tt.ExpectedDBRPs), len(dbrps.Content)) - } - - if !cmp.Equal(tt.ExpectedDBRPs, dbrps.Content) { - t.Fatalf(cmp.Diff(tt.ExpectedDBRPs, dbrps.Content)) - } - - }) - } -} - -func Test_handlePatchDBRP(t *testing.T) { - table := []struct { - Name string - ExpectedErr error - ExpectedDBRP *influxdb.DBRPMapping - URLSuffix string - Input io.Reader - }{ - { - Name: "happy path update", - URLSuffix: "/1111111111111111?orgID=059af7ed2a034000", - Input: strings.NewReader(`{ - "retention_policy": "updaterp", - "database": "wont_change" -}`), - ExpectedDBRP: &influxdb.DBRPMapping{ - ID: influxdbtesting.MustIDBase16("1111111111111111"), - BucketID: influxdbtesting.MustIDBase16("5555f7ed2a035555"), - OrganizationID: influxdbtesting.MustIDBase16("059af7ed2a034000"), - Database: "mydb", - RetentionPolicy: "updaterp", - Default: true, - }, - }, - { - Name: "happy path update by org name", - URLSuffix: "/1111111111111111?org=org", - Input: strings.NewReader(`{ - "retention_policy": "updaterp", - "database": "wont_change" -}`), - ExpectedDBRP: &influxdb.DBRPMapping{ - ID: influxdbtesting.MustIDBase16("1111111111111111"), - BucketID: influxdbtesting.MustIDBase16("5555f7ed2a035555"), - OrganizationID: influxdbtesting.MustIDBase16("059af7ed2a034000"), - Database: "mydb", - RetentionPolicy: "updaterp", - Default: true, - }, - }, - { - Name: "invalid org", - URLSuffix: "/1111111111111111?orgID=invalid", - Input: strings.NewReader(`{ - "database": "updatedb" -}`), - ExpectedErr: dbrp.ErrInvalidOrgID("invalid", platform.ErrInvalidIDLength), - }, - { - Name: "no org", - URLSuffix: "/1111111111111111", - Input: strings.NewReader(`{ - "database": "updatedb" -}`), - ExpectedErr: dbrp.ErrNoOrgProvided, - }, - { - Name: "not found", - URLSuffix: "/1111111111111111?orgID=059af7ed2a034001", - ExpectedErr: dbrp.ErrDBRPNotFound, - }, - } - - ctx := context.Background() - - for _, tt := range table { - t.Run(tt.Name, func(t *testing.T) { - if tt.ExpectedErr != nil && tt.ExpectedDBRP != nil { - t.Error("one of those has to be set") - } - svc, server, shutdown := initHttpService(t) - defer shutdown() - client := server.Client() - - if svc, ok := svc.(*dbrp.Service); ok { - svc.IDGen = mock.NewIDGenerator("1111111111111111", t) - } - - dbrp := &influxdb.DBRPMapping{ - BucketID: influxdbtesting.MustIDBase16("5555f7ed2a035555"), - OrganizationID: influxdbtesting.MustIDBase16("059af7ed2a034000"), - Database: "mydb", - RetentionPolicy: "autogen", - Default: true, - } - if err := svc.Create(ctx, dbrp); err != nil { - t.Fatal(err) - } - - req, _ := http.NewRequest(http.MethodPatch, server.URL+tt.URLSuffix, tt.Input) - resp, err := client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - - if tt.ExpectedErr != nil { - b, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - var actualErr errors2.Error - if err := json.Unmarshal(b, &actualErr); err != nil { - t.Fatal(err) - } - - assert.Equal(t, tt.ExpectedErr.Error(), actualErr.Error()) - return - } - dbrpResponse := struct { - Content *influxdb.DBRPMapping `json:"content"` - }{} - - if err := json.NewDecoder(resp.Body).Decode(&dbrpResponse); err != nil { - t.Fatal(err) - } - - if !cmp.Equal(tt.ExpectedDBRP, dbrpResponse.Content) { - t.Fatalf(cmp.Diff(tt.ExpectedDBRP, dbrpResponse.Content)) - } - }) - } -} - -func Test_handleDeleteDBRP(t *testing.T) { - table := []struct { - Name string - URLSuffix string - ExpectedErr error - ExpectStillExists bool - }{ - { - Name: "delete", - URLSuffix: "/1111111111111111?orgID=059af7ed2a034000", - }, - { - Name: "delete by org name", - URLSuffix: "/1111111111111111?org=org", - }, - { - Name: "invalid org", - URLSuffix: "/1111111111111111?orgID=invalid", - ExpectedErr: dbrp.ErrInvalidOrgID("invalid", platform.ErrInvalidIDLength), - }, - { - Name: "invalid org name", - URLSuffix: "/1111111111111111?org=invalid", - ExpectedErr: dbrp.ErrOrgNotFound("invalid"), - }, - { - Name: "no org", - URLSuffix: "/1111111111111111", - ExpectedErr: dbrp.ErrNoOrgProvided, - }, - { - // Not found is not an error for Delete. - Name: "not found", - URLSuffix: "/1111111111111111?orgID=059af7ed2a034001", - ExpectStillExists: true, - }, - } - - ctx := context.Background() - - for _, tt := range table { - t.Run(tt.Name, func(t *testing.T) { - svc, server, shutdown := initHttpService(t) - defer shutdown() - client := server.Client() - - d := &influxdb.DBRPMapping{ - ID: influxdbtesting.MustIDBase16("1111111111111111"), - BucketID: influxdbtesting.MustIDBase16("5555f7ed2a035555"), - OrganizationID: influxdbtesting.MustIDBase16("059af7ed2a034000"), - Database: "mydb", - RetentionPolicy: "testrp", - Default: true, - } - if err := svc.Create(ctx, d); err != nil { - t.Fatal(err) - } - - req, _ := http.NewRequest(http.MethodDelete, server.URL+tt.URLSuffix, nil) - resp, err := client.Do(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - - if tt.ExpectedErr != nil { - b, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - var actualErr errors2.Error - if err := json.Unmarshal(b, &actualErr); err != nil { - t.Fatal(err) - } - - assert.Equal(t, tt.ExpectedErr.Error(), actualErr.Error()) - return - } - - if resp.StatusCode != http.StatusNoContent { - t.Fatalf("expected status code %d, got %d", http.StatusNoContent, resp.StatusCode) - } - - gotDBRP, err := svc.FindByID(ctx, influxdbtesting.MustIDBase16("059af7ed2a034000"), influxdbtesting.MustIDBase16("1111111111111111")) - if tt.ExpectStillExists { - if err != nil { - t.Fatal(err) - } - if diff := cmp.Diff(d, gotDBRP); diff != "" { - t.Fatal(diff) - } - } else { - if err == nil { - t.Fatal("expected error got none") - } - if !errors.Is(err, dbrp.ErrDBRPNotFound) { - t.Fatalf("expected err dbrp not found, got %s", err) - } - } - }) - } -} diff --git a/dbrp/index.go b/dbrp/index.go deleted file mode 100644 index 765d2248ce2..00000000000 --- a/dbrp/index.go +++ /dev/null @@ -1,20 +0,0 @@ -package dbrp - -import ( - "encoding/json" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kv" -) - -var ( - ByOrgIDIndexMapping = kv.NewIndexMapping(bucket, byOrgIDIndexBucket, func(v []byte) ([]byte, error) { - var dbrp influxdb.DBRPMapping - if err := json.Unmarshal(v, &dbrp); err != nil { - return nil, err - } - - id, _ := dbrp.OrganizationID.Encode() - return id, nil - }) -) diff --git a/dbrp/middleware_auth.go b/dbrp/middleware_auth.go deleted file mode 100644 index 37610a01acf..00000000000 --- a/dbrp/middleware_auth.go +++ /dev/null @@ -1,63 +0,0 @@ -package dbrp - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.DBRPMappingService = (*AuthorizedService)(nil) - -type AuthorizedService struct { - influxdb.DBRPMappingService -} - -func NewAuthorizedService(s influxdb.DBRPMappingService) *AuthorizedService { - return &AuthorizedService{DBRPMappingService: s} -} - -func (svc AuthorizedService) FindByID(ctx context.Context, orgID, id platform.ID) (*influxdb.DBRPMapping, error) { - mapping, err := svc.DBRPMappingService.FindByID(ctx, orgID, id) - if err != nil { - return nil, err - } - if _, _, err := authorizer.AuthorizeRead(ctx, influxdb.BucketsResourceType, mapping.BucketID, orgID); err != nil { - return nil, err - } - return mapping, nil -} - -func (svc AuthorizedService) FindMany(ctx context.Context, filter influxdb.DBRPMappingFilter, opts ...influxdb.FindOptions) ([]*influxdb.DBRPMapping, int, error) { - dbrps, _, err := svc.DBRPMappingService.FindMany(ctx, filter, opts...) - if err != nil { - return nil, 0, err - } - return authorizer.AuthorizeFindDBRPs(ctx, dbrps) -} - -func (svc AuthorizedService) Create(ctx context.Context, t *influxdb.DBRPMapping) error { - if _, _, err := authorizer.AuthorizeWrite(ctx, influxdb.BucketsResourceType, t.BucketID, t.OrganizationID); err != nil { - return err - } - return svc.DBRPMappingService.Create(ctx, t) -} - -func (svc AuthorizedService) Update(ctx context.Context, u *influxdb.DBRPMapping) error { - if _, _, err := authorizer.AuthorizeWrite(ctx, influxdb.BucketsResourceType, u.BucketID, u.OrganizationID); err != nil { - return err - } - return svc.DBRPMappingService.Update(ctx, u) -} - -func (svc AuthorizedService) Delete(ctx context.Context, orgID, id platform.ID) error { - mapping, err := svc.DBRPMappingService.FindByID(ctx, orgID, id) - if err != nil { - return err - } - if _, _, err := authorizer.AuthorizeWrite(ctx, influxdb.BucketsResourceType, mapping.BucketID, orgID); err != nil { - return err - } - return svc.DBRPMappingService.Delete(ctx, orgID, id) -} diff --git a/dbrp/middleware_auth_test.go b/dbrp/middleware_auth_test.go deleted file mode 100644 index 7ba4464bdf3..00000000000 --- a/dbrp/middleware_auth_test.go +++ /dev/null @@ -1,639 +0,0 @@ -package dbrp_test - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/dbrp" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -func TestAuth_FindByID(t *testing.T) { - type fields struct { - service influxdb.DBRPMappingService - } - type args struct { - orgID platform.ID - id platform.ID - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access id by org id", - fields: fields{ - service: &mock.DBRPMappingService{ - FindByIDFn: func(_ context.Context, _, _ platform.ID) (*influxdb.DBRPMapping, error) { - return &influxdb.DBRPMapping{ - OrganizationID: platform.ID(1), - BucketID: platform.ID(1), - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - orgID: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "authorized to access id by id", - fields: fields{ - service: &mock.DBRPMappingService{ - FindByIDFn: func(_ context.Context, _, _ platform.ID) (*influxdb.DBRPMapping, error) { - return &influxdb.DBRPMapping{ - OrganizationID: platform.ID(1), - BucketID: platform.ID(1), - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - orgID: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access id by org id", - fields: fields{ - service: &mock.DBRPMappingService{ - FindByIDFn: func(_ context.Context, _, _ platform.ID) (*influxdb.DBRPMapping, error) { - return &influxdb.DBRPMapping{ - OrganizationID: platform.ID(2), - BucketID: platform.ID(1), - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - orgID: 2, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/0000000000000002/buckets/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - { - name: "unauthorized to access id by id", - fields: fields{ - service: &mock.DBRPMappingService{ - FindByIDFn: func(_ context.Context, _, _ platform.ID) (*influxdb.DBRPMapping, error) { - return &influxdb.DBRPMapping{ - OrganizationID: platform.ID(1), - BucketID: platform.ID(1), - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.DBRPResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - id: 1, - orgID: 2, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/0000000000000002/buckets/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := dbrp.NewAuthorizedService(tt.fields.service) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindByID(ctx, tt.args.orgID, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestAuth_FindMany(t *testing.T) { - type fields struct { - service influxdb.DBRPMappingService - } - type args struct { - filter influxdb.DBRPMappingFilter - permissions []influxdb.Permission - } - type wants struct { - err error - ms []*influxdb.DBRPMapping - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "no result", - fields: fields{ - service: &mock.DBRPMappingService{ - FindManyFn: func(ctx context.Context, dbrp influxdb.DBRPMappingFilter, opts ...influxdb.FindOptions) ([]*influxdb.DBRPMapping, int, error) { - return []*influxdb.DBRPMapping{ - { - ID: 1, - OrganizationID: 1, - BucketID: 1, - }, - { - ID: 2, - OrganizationID: 1, - BucketID: 2, - }, - { - ID: 3, - OrganizationID: 2, - BucketID: 3, - }, - { - ID: 4, - OrganizationID: 3, - BucketID: 4, - }, - }, 4, nil - }, - }, - }, - args: args{ - permissions: []influxdb.Permission{{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.DBRPResourceType, - OrgID: influxdbtesting.IDPtr(42), - }, - }}, - filter: influxdb.DBRPMappingFilter{}, - }, - wants: wants{ - err: nil, - ms: []*influxdb.DBRPMapping{}, - }, - }, - { - name: "partial", - fields: fields{ - service: &mock.DBRPMappingService{ - FindManyFn: func(ctx context.Context, dbrp influxdb.DBRPMappingFilter, opts ...influxdb.FindOptions) ([]*influxdb.DBRPMapping, int, error) { - return []*influxdb.DBRPMapping{ - { - ID: 1, - OrganizationID: 1, - BucketID: 1, - }, - { - ID: 2, - OrganizationID: 1, - BucketID: 2, - }, - { - ID: 3, - OrganizationID: 2, - BucketID: 3, - }, - { - ID: 4, - OrganizationID: 3, - BucketID: 4, - }, - }, 4, nil - }, - }, - }, - args: args{ - permissions: []influxdb.Permission{{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }}, - filter: influxdb.DBRPMappingFilter{}, - }, - wants: wants{ - err: nil, - ms: []*influxdb.DBRPMapping{ - { - ID: 1, - OrganizationID: 1, - BucketID: 1, - }, - { - ID: 2, - OrganizationID: 1, - BucketID: 2, - }, - }, - }, - }, - { - name: "all", - fields: fields{ - service: &mock.DBRPMappingService{ - FindManyFn: func(ctx context.Context, dbrp influxdb.DBRPMappingFilter, opts ...influxdb.FindOptions) ([]*influxdb.DBRPMapping, int, error) { - return []*influxdb.DBRPMapping{ - { - ID: 1, - OrganizationID: 1, - BucketID: 1, - }, - { - ID: 2, - OrganizationID: 1, - BucketID: 2, - }, - { - ID: 3, - OrganizationID: 2, - BucketID: 3, - }, - { - ID: 4, - OrganizationID: 3, - BucketID: 4, - }, - }, 4, nil - }, - }, - }, - args: args{ - permissions: []influxdb.Permission{ - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(2), - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(3), - }, - }, - }, - filter: influxdb.DBRPMappingFilter{}, - }, - wants: wants{ - err: nil, - ms: []*influxdb.DBRPMapping{ - { - ID: 1, - OrganizationID: 1, - BucketID: 1, - }, - { - ID: 2, - OrganizationID: 1, - BucketID: 2, - }, - { - ID: 3, - OrganizationID: 2, - BucketID: 3, - }, - { - ID: 4, - OrganizationID: 3, - BucketID: 4, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := dbrp.NewAuthorizedService(tt.fields.service) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - gots, ngots, err := s.FindMany(ctx, tt.args.filter) - if ngots != len(gots) { - t.Errorf("got wrong number back") - } - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - if diff := cmp.Diff(tt.wants.ms, gots, influxdbtesting.DBRPMappingCmpOptions...); diff != "" { - t.Errorf("unexpected result -want/+got:\n\t%s", diff) - } - }) - } -} - -func TestAuth_Create(t *testing.T) { - type fields struct { - service influxdb.DBRPMappingService - } - type args struct { - m influxdb.DBRPMapping - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized", - fields: fields{ - service: &mock.DBRPMappingService{}, - }, - args: args{ - m: influxdb.DBRPMapping{ - ID: 1, - OrganizationID: 1, - BucketID: 2, - }, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized", - fields: fields{ - service: &mock.DBRPMappingService{}, - }, - args: args{ - m: influxdb.DBRPMapping{ - ID: 1, - OrganizationID: 1, - BucketID: 2, - }, - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/0000000000000001/buckets/0000000000000002 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := dbrp.NewAuthorizedService(tt.fields.service) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.Create(ctx, &tt.args.m) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestAuth_Update(t *testing.T) { - type fields struct { - service influxdb.DBRPMappingService - } - type args struct { - orgID platform.ID - id platform.ID - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized", - fields: fields{ - service: &mock.DBRPMappingService{}, - }, - args: args{ - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - orgID: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized", - fields: fields{ - service: &mock.DBRPMappingService{}, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - orgID: 1, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/0000000000000001/buckets/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := dbrp.NewAuthorizedService(tt.fields.service) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - // Does not matter how we update, we only need to check auth. - err := s.Update(ctx, &influxdb.DBRPMapping{ID: tt.args.id, OrganizationID: tt.args.orgID, BucketID: 1}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestAuth_Delete(t *testing.T) { - type fields struct { - service influxdb.DBRPMappingService - } - type args struct { - orgID platform.ID - id platform.ID - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized", - fields: fields{ - service: &mock.DBRPMappingService{ - FindByIDFn: func(_ context.Context, _, _ platform.ID) (*influxdb.DBRPMapping, error) { - return &influxdb.DBRPMapping{ - OrganizationID: platform.ID(1), - BucketID: platform.ID(1), - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - orgID: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized", - fields: fields{ - service: &mock.DBRPMappingService{ - FindByIDFn: func(_ context.Context, _, _ platform.ID) (*influxdb.DBRPMapping, error) { - return &influxdb.DBRPMapping{ - OrganizationID: platform.ID(1), - BucketID: platform.ID(1), - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - orgID: 1, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/0000000000000001/buckets/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := dbrp.NewAuthorizedService(tt.fields.service) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.Delete(ctx, tt.args.orgID, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} diff --git a/dbrp/mocks/bucket_service.go b/dbrp/mocks/bucket_service.go deleted file mode 100644 index 7fe87bf5efc..00000000000 --- a/dbrp/mocks/bucket_service.go +++ /dev/null @@ -1,146 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2 (interfaces: BucketService) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - influxdb "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockBucketService is a mock of BucketService interface -type MockBucketService struct { - ctrl *gomock.Controller - recorder *MockBucketServiceMockRecorder -} - -// MockBucketServiceMockRecorder is the mock recorder for MockBucketService -type MockBucketServiceMockRecorder struct { - mock *MockBucketService -} - -// NewMockBucketService creates a new mock instance -func NewMockBucketService(ctrl *gomock.Controller) *MockBucketService { - mock := &MockBucketService{ctrl: ctrl} - mock.recorder = &MockBucketServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockBucketService) EXPECT() *MockBucketServiceMockRecorder { - return m.recorder -} - -// CreateBucket mocks base method -func (m *MockBucketService) CreateBucket(arg0 context.Context, arg1 *influxdb.Bucket) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateBucket", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// CreateBucket indicates an expected call of CreateBucket -func (mr *MockBucketServiceMockRecorder) CreateBucket(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBucket", reflect.TypeOf((*MockBucketService)(nil).CreateBucket), arg0, arg1) -} - -// DeleteBucket mocks base method -func (m *MockBucketService) DeleteBucket(arg0 context.Context, arg1 platform.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucket", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteBucket indicates an expected call of DeleteBucket -func (mr *MockBucketServiceMockRecorder) DeleteBucket(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucket", reflect.TypeOf((*MockBucketService)(nil).DeleteBucket), arg0, arg1) -} - -// FindBucket mocks base method -func (m *MockBucketService) FindBucket(arg0 context.Context, arg1 influxdb.BucketFilter) (*influxdb.Bucket, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FindBucket", arg0, arg1) - ret0, _ := ret[0].(*influxdb.Bucket) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FindBucket indicates an expected call of FindBucket -func (mr *MockBucketServiceMockRecorder) FindBucket(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBucket", reflect.TypeOf((*MockBucketService)(nil).FindBucket), arg0, arg1) -} - -// FindBucketByID mocks base method -func (m *MockBucketService) FindBucketByID(arg0 context.Context, arg1 platform.ID) (*influxdb.Bucket, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FindBucketByID", arg0, arg1) - ret0, _ := ret[0].(*influxdb.Bucket) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FindBucketByID indicates an expected call of FindBucketByID -func (mr *MockBucketServiceMockRecorder) FindBucketByID(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBucketByID", reflect.TypeOf((*MockBucketService)(nil).FindBucketByID), arg0, arg1) -} - -// FindBucketByName mocks base method -func (m *MockBucketService) FindBucketByName(arg0 context.Context, arg1 platform.ID, arg2 string) (*influxdb.Bucket, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FindBucketByName", arg0, arg1, arg2) - ret0, _ := ret[0].(*influxdb.Bucket) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FindBucketByName indicates an expected call of FindBucketByName -func (mr *MockBucketServiceMockRecorder) FindBucketByName(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBucketByName", reflect.TypeOf((*MockBucketService)(nil).FindBucketByName), arg0, arg1, arg2) -} - -// FindBuckets mocks base method -func (m *MockBucketService) FindBuckets(arg0 context.Context, arg1 influxdb.BucketFilter, arg2 ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "FindBuckets", varargs...) - ret0, _ := ret[0].([]*influxdb.Bucket) - ret1, _ := ret[1].(int) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// FindBuckets indicates an expected call of FindBuckets -func (mr *MockBucketServiceMockRecorder) FindBuckets(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBuckets", reflect.TypeOf((*MockBucketService)(nil).FindBuckets), varargs...) -} - -// UpdateBucket mocks base method -func (m *MockBucketService) UpdateBucket(arg0 context.Context, arg1 platform.ID, arg2 influxdb.BucketUpdate) (*influxdb.Bucket, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateBucket", arg0, arg1, arg2) - ret0, _ := ret[0].(*influxdb.Bucket) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateBucket indicates an expected call of UpdateBucket -func (mr *MockBucketServiceMockRecorder) UpdateBucket(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateBucket", reflect.TypeOf((*MockBucketService)(nil).UpdateBucket), arg0, arg1, arg2) -} diff --git a/dbrp/mocks/dbrp_mapping_service.go b/dbrp/mocks/dbrp_mapping_service.go deleted file mode 100644 index 6f529e58e80..00000000000 --- a/dbrp/mocks/dbrp_mapping_service.go +++ /dev/null @@ -1,115 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2 (interfaces: DBRPMappingService) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - influxdb "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockDBRPMappingService is a mock of DBRPMappingService interface -type MockDBRPMappingService struct { - ctrl *gomock.Controller - recorder *MockDBRPMappingServiceMockRecorder -} - -// MockDBRPMappingServiceMockRecorder is the mock recorder for MockDBRPMappingService -type MockDBRPMappingServiceMockRecorder struct { - mock *MockDBRPMappingService -} - -// NewMockDBRPMappingService creates a new mock instance -func NewMockDBRPMappingService(ctrl *gomock.Controller) *MockDBRPMappingService { - mock := &MockDBRPMappingService{ctrl: ctrl} - mock.recorder = &MockDBRPMappingServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockDBRPMappingService) EXPECT() *MockDBRPMappingServiceMockRecorder { - return m.recorder -} - -// Create mocks base method -func (m *MockDBRPMappingService) Create(arg0 context.Context, arg1 *influxdb.DBRPMapping) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Create", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Create indicates an expected call of Create -func (mr *MockDBRPMappingServiceMockRecorder) Create(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockDBRPMappingService)(nil).Create), arg0, arg1) -} - -// Delete mocks base method -func (m *MockDBRPMappingService) Delete(arg0 context.Context, arg1, arg2 platform.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// Delete indicates an expected call of Delete -func (mr *MockDBRPMappingServiceMockRecorder) Delete(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDBRPMappingService)(nil).Delete), arg0, arg1, arg2) -} - -// FindByID mocks base method -func (m *MockDBRPMappingService) FindByID(arg0 context.Context, arg1, arg2 platform.ID) (*influxdb.DBRPMapping, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FindByID", arg0, arg1, arg2) - ret0, _ := ret[0].(*influxdb.DBRPMapping) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FindByID indicates an expected call of FindByID -func (mr *MockDBRPMappingServiceMockRecorder) FindByID(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindByID", reflect.TypeOf((*MockDBRPMappingService)(nil).FindByID), arg0, arg1, arg2) -} - -// FindMany mocks base method -func (m *MockDBRPMappingService) FindMany(arg0 context.Context, arg1 influxdb.DBRPMappingFilter, arg2 ...influxdb.FindOptions) ([]*influxdb.DBRPMapping, int, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "FindMany", varargs...) - ret0, _ := ret[0].([]*influxdb.DBRPMapping) - ret1, _ := ret[1].(int) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// FindMany indicates an expected call of FindMany -func (mr *MockDBRPMappingServiceMockRecorder) FindMany(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindMany", reflect.TypeOf((*MockDBRPMappingService)(nil).FindMany), varargs...) -} - -// Update mocks base method -func (m *MockDBRPMappingService) Update(arg0 context.Context, arg1 *influxdb.DBRPMapping) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Update", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Update indicates an expected call of Update -func (mr *MockDBRPMappingServiceMockRecorder) Update(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockDBRPMappingService)(nil).Update), arg0, arg1) -} diff --git a/dbrp/service.go b/dbrp/service.go deleted file mode 100644 index 2ad700c69de..00000000000 --- a/dbrp/service.go +++ /dev/null @@ -1,610 +0,0 @@ -package dbrp - -// The DBRP Mapping `Service` maps database, retention policy pairs to buckets. -// Every `DBRPMapping` stored is scoped to an organization ID. -// The service must ensure the following invariants are valid at any time: -// - each orgID, database, retention policy triple must be unique; -// - for each orgID and database there must exist one and only one default mapping (`mapping.Default` set to `true`). -// The service does so using three kv buckets: -// - one for storing mappings; -// - one for storing an index of mappings by orgID and database; -// - one for storing the current default mapping for an orgID and a database. -// -// On *create*, the service creates the mapping. -// If another mapping with the same orgID, database, and retention policy exists, it fails. -// If the mapping is the first one for the specified orgID-database couple, it will be the default one. -// -// On *find*, the service find mappings. -// Every mapping returned uses the kv bucket where the default is specified to update the `mapping.Default` field. -// -// On *update*, the service updates the mapping. -// If the update causes another bucket to have the same orgID, database, and retention policy, it fails. -// If the update unsets `mapping.Default`, the first mapping found is set as default. -// -// On *delete*, the service updates the mapping. -// If the deletion deletes the default mapping, the first mapping found is set as default. - -import ( - "bytes" - "context" - "encoding/json" - "strings" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/snowflake" -) - -var ( - bucket = []byte("dbrpv1") - indexBucket = []byte("dbrpbyorganddbindexv1") - byOrgIDIndexBucket = []byte("dbrpbyorgv1") - defaultBucket = []byte("dbrpdefaultv1") -) - -var _ influxdb.DBRPMappingService = (*AuthorizedService)(nil) - -type Service struct { - store kv.Store - IDGen platform.IDGenerator - - bucketSvc influxdb.BucketService - byOrgAndDatabase *kv.Index - byOrg *kv.Index -} - -func indexForeignKey(dbrp influxdb.DBRPMapping) []byte { - return composeForeignKey(dbrp.OrganizationID, dbrp.Database) -} - -func composeForeignKey(orgID platform.ID, db string) []byte { - encID, _ := orgID.Encode() - key := make([]byte, len(encID)+len(db)) - copy(key, encID) - copy(key[len(encID):], db) - return key -} - -func NewService(ctx context.Context, bucketSvc influxdb.BucketService, st kv.Store) influxdb.DBRPMappingService { - return &Service{ - store: st, - IDGen: snowflake.NewDefaultIDGenerator(), - bucketSvc: bucketSvc, - byOrgAndDatabase: kv.NewIndex(kv.NewIndexMapping(bucket, indexBucket, func(v []byte) ([]byte, error) { - var dbrp influxdb.DBRPMapping - if err := json.Unmarshal(v, &dbrp); err != nil { - return nil, err - } - return indexForeignKey(dbrp), nil - }), kv.WithIndexReadPathEnabled), - byOrg: kv.NewIndex(ByOrgIDIndexMapping, kv.WithIndexReadPathEnabled), - } -} - -// getDefault gets the default mapping ID inside of a transaction. -func (s *Service) getDefault(tx kv.Tx, compKey []byte) ([]byte, error) { - b, err := tx.Bucket(defaultBucket) - if err != nil { - return nil, err - } - defID, err := b.Get(compKey) - if err != nil { - return nil, err - } - return defID, nil -} - -// getDefaultID returns the default mapping ID for the given orgID and db. -func (s *Service) getDefaultID(tx kv.Tx, compKey []byte) (platform.ID, error) { - defID, err := s.getDefault(tx, compKey) - if err != nil { - return 0, err - } - id := new(platform.ID) - if err := id.Decode(defID); err != nil { - return 0, err - } - return *id, nil -} - -// isDefault tells whether a mapping is the default one. -func (s *Service) isDefault(tx kv.Tx, compKey []byte, id []byte) (bool, error) { - defID, err := s.getDefault(tx, compKey) - if kv.IsNotFound(err) { - return false, nil - } - if err != nil { - return false, err - } - return bytes.Equal(id, defID), nil -} - -// isDefaultSet tells if there is a default mapping for the given composite key. -func (s *Service) isDefaultSet(tx kv.Tx, compKey []byte) (bool, error) { - b, err := tx.Bucket(defaultBucket) - if err != nil { - return false, ErrInternalService(err) - } - _, err = b.Get(compKey) - if kv.IsNotFound(err) { - return false, nil - } - if err != nil { - return false, ErrInternalService(err) - } - return true, nil -} - -// setAsDefault sets the given id as default for the given composite key. -func (s *Service) setAsDefault(tx kv.Tx, compKey []byte, id []byte) error { - b, err := tx.Bucket(defaultBucket) - if err != nil { - return ErrInternalService(err) - } - if err := b.Put(compKey, id); err != nil { - return ErrInternalService(err) - } - return nil -} - -// unsetDefault un-sets the default for the given composite key. -// Useful when a db/rp pair does not exist anymore. -func (s *Service) unsetDefault(tx kv.Tx, compKey []byte) error { - b, err := tx.Bucket(defaultBucket) - if err != nil { - return ErrInternalService(err) - } - if err = b.Delete(compKey); err != nil { - return ErrInternalService(err) - } - return nil -} - -// getFirstBut returns the first element in the db/rp index (not accounting for the `skipID`). -// If the length of the returned ID is 0, it means no element was found. -// The skip value is useful, for instance, if one wants to delete an element based on the result of this operation. -func (s *Service) getFirstBut(tx kv.Tx, compKey []byte, skipID []byte) (next []byte, err error) { - err = s.byOrgAndDatabase.Walk(context.Background(), tx, compKey, func(k, v []byte) (bool, error) { - if bytes.Equal(skipID, k) { - return true, nil - } - - next = k - - return false, nil - }) - return -} - -// isDBRPUnique verifies if the triple orgID-database-retention-policy is unique. -func (s *Service) isDBRPUnique(ctx context.Context, m influxdb.DBRPMapping) error { - return s.store.View(ctx, func(tx kv.Tx) error { - return s.byOrgAndDatabase.Walk(ctx, tx, composeForeignKey(m.OrganizationID, m.Database), func(k, v []byte) (bool, error) { - dbrp := &influxdb.DBRPMapping{} - if err := json.Unmarshal(v, dbrp); err != nil { - return false, ErrInternalService(err) - } - - if dbrp.ID == m.ID { - // Corner case. - // This is the very same DBRP, just skip it! - return true, nil - } - - if dbrp.RetentionPolicy == m.RetentionPolicy { - return false, ErrDBRPAlreadyExists("another DBRP mapping with same orgID, db, and rp exists") - } - - return true, nil - }) - }) -} - -// FindBy returns the mapping for the given ID. -func (s *Service) FindByID(ctx context.Context, orgID, id platform.ID) (*influxdb.DBRPMapping, error) { - encodedID, err := id.Encode() - if err != nil { - return nil, ErrInvalidDBRPID(id.String(), err) - } - - m := &influxdb.DBRPMapping{} - if err := s.store.View(ctx, func(tx kv.Tx) error { - bucket, err := tx.Bucket(bucket) - if err != nil { - return ErrInternalService(err) - } - b, err := bucket.Get(encodedID) - if err != nil { - return ErrDBRPNotFound - } - if err := json.Unmarshal(b, m); err != nil { - return ErrInternalService(err) - } - // If the given orgID is wrong, it is as if we did not found a mapping scoped to this org. - if m.OrganizationID != orgID { - return ErrDBRPNotFound - } - // Update the default value for this mapping. - m.Default, err = s.isDefault(tx, indexForeignKey(*m), encodedID) - if err != nil { - return ErrInternalService(err) - } - return nil - }); err != nil { - // if not found, fallback to virtual DBRP search - if err == ErrDBRPNotFound { - b, err := s.bucketSvc.FindBucketByID(ctx, id) - if err != nil || b == nil { - return nil, ErrDBRPNotFound - } - return bucketToMapping(b), nil - } - return nil, err - } - return m, nil -} - -// parseDBRP parses DB and RP strings out of a bucket name -func parseDBRP(bucketName string) (string, string) { - db, rp, isCut := strings.Cut(bucketName, "/") - if isCut { - return db, rp - } - return bucketName, "autogen" -} - -// FindMany returns a list of mappings that match filter and the total count of matching dbrp mappings. -// TODO(affo): find a smart way to apply FindOptions to a list of items. -func (s *Service) FindMany(ctx context.Context, filter influxdb.DBRPMappingFilter, opts ...influxdb.FindOptions) ([]*influxdb.DBRPMapping, int, error) { - // Memoize default IDs. - defs := make(map[string]*platform.ID) - get := func(tx kv.Tx, orgID platform.ID, db string) (*platform.ID, error) { - k := orgID.String() + db - if _, ok := defs[k]; !ok { - id, err := s.getDefaultID(tx, composeForeignKey(orgID, db)) - if kv.IsNotFound(err) { - // Still need to store a not-found result. - defs[k] = nil - } else if err != nil { - return nil, err - } else { - defs[k] = &id - } - } - return defs[k], nil - } - - ms := []*influxdb.DBRPMapping{} - add := func(tx kv.Tx) func(k, v []byte) (bool, error) { - return func(k, v []byte) (bool, error) { - m := influxdb.DBRPMapping{} - if err := json.Unmarshal(v, &m); err != nil { - return false, ErrInternalService(err) - } - // Updating the Default field must be done before filtering. - defID, err := get(tx, m.OrganizationID, m.Database) - if err != nil { - return false, ErrInternalService(err) - } - - m.Default = m.ID == *defID - if filterFunc(&m, filter) { - ms = append(ms, &m) - } - return true, nil - } - } - - err := s.store.View(ctx, func(tx kv.Tx) error { - // Optimized path, use index. - if orgID := filter.OrgID; orgID != nil { - var ( - db = "" - compKey []byte - index *kv.Index - ) - if filter.Database != nil && len(*filter.Database) > 0 { - db = *filter.Database - compKey = composeForeignKey(*orgID, db) - index = s.byOrgAndDatabase - - // Filtering by Org, Database and Default == true - if def := filter.Default; def != nil && *def { - defID, err := s.getDefault(tx, compKey) - if kv.IsNotFound(err) { - return nil - } - if err != nil { - return ErrInternalService(err) - } - bucket, err := tx.Bucket(bucket) - if err != nil { - return ErrInternalService(err) - } - v, err := bucket.Get(defID) - if err != nil { - return ErrInternalService(err) - } - _, err = add(tx)(defID, v) - return err - } - } else { - compKey, _ = orgID.Encode() - index = s.byOrg - } - - return index.Walk(ctx, tx, compKey, add(tx)) - } - bucket, err := tx.Bucket(bucket) - if err != nil { - return ErrInternalService(err) - } - cur, err := bucket.Cursor() - if err != nil { - return ErrInternalService(err) - } - - for k, v := cur.First(); k != nil; k, v = cur.Next() { - if _, err := add(tx)(k, v); err != nil { - return err - } - } - return nil - }) - if err != nil { - return ms, len(ms), err - } - - // a very general search, because if we search for database name of "hello", - // the bucket name could be "hello" (with autogen rp) or "hello/foo" which we wouldn't find - buckets, _, err := s.bucketSvc.FindBuckets(ctx, influxdb.BucketFilter{ - ID: filter.BucketID, - OrganizationID: filter.OrgID, - }, opts...) - if err != nil { - // we were unable to find any virtual mappings, so return what physical mappings we have - return ms, len(ms), nil - } -OUTER: - for _, bucket := range buckets { - if bucket == nil { - continue - } - newMapping := bucketToMapping(bucket) - // if any bucket already exists that is default for this database, - // this virtual mapping should not be the default - for _, m := range ms { - if m.Database == newMapping.Database { - if newMapping.Virtual && m.RetentionPolicy == newMapping.RetentionPolicy { - continue OUTER - } - if m.Default && newMapping.Default { - newMapping.Default = false - break - } - } - } - if filterFunc(newMapping, filter) { - ms = append(ms, newMapping) - } - } - - return ms, len(ms), nil -} - -// bucketToMapping converts a bucket to a DBRP mapping. -// Default if bucket name does not contain a slash (foo/bar) -func bucketToMapping(bucket *influxdb.Bucket) *influxdb.DBRPMapping { - if bucket == nil { - return nil - } - // for now, virtual DBRPs will use the same ID as their bucket to be able to find them by ID - dbrpID := bucket.ID - db, rp := parseDBRP(bucket.Name) - return &influxdb.DBRPMapping{ - ID: dbrpID, - Default: bucket.Name == db, - Database: db, - RetentionPolicy: rp, - OrganizationID: bucket.OrgID, - BucketID: bucket.ID, - Virtual: true, - } -} - -// Create creates a new mapping. -// If another mapping with same organization ID, database, and retention policy exists, an error is returned. -// If the mapping already contains a valid ID, that one is used for storing the mapping. -func (s *Service) Create(ctx context.Context, dbrp *influxdb.DBRPMapping) error { - if !dbrp.ID.Valid() { - dbrp.ID = s.IDGen.ID() - } - if err := dbrp.Validate(); err != nil { - return ErrInvalidDBRP(err) - } - - if _, err := s.bucketSvc.FindBucketByID(ctx, dbrp.BucketID); err != nil { - return err - } - - // If a dbrp with this particular ID already exists an error is returned. - if d, err := s.FindByID(ctx, dbrp.OrganizationID, dbrp.ID); err == nil && !d.Virtual { - return ErrDBRPAlreadyExists("dbrp already exist for this particular ID. If you are trying an update use the right function .Update") - } - // If a dbrp with this orgID, db, and rp exists an error is returned. - if err := s.isDBRPUnique(ctx, *dbrp); err != nil { - return err - } - - encodedID, err := dbrp.ID.Encode() - if err != nil { - return ErrInvalidDBRPID(dbrp.ID.String(), err) - } - - // OrganizationID has been validated by Validate - orgID, _ := dbrp.OrganizationID.Encode() - - return s.store.Update(ctx, func(tx kv.Tx) error { - bucket, err := tx.Bucket(bucket) - if err != nil { - return ErrInternalService(err) - } - - // populate indices - compKey := indexForeignKey(*dbrp) - if err := s.byOrgAndDatabase.Insert(tx, compKey, encodedID); err != nil { - return err - } - - if err := s.byOrg.Insert(tx, orgID, encodedID); err != nil { - return err - } - - defSet, err := s.isDefaultSet(tx, compKey) - if err != nil { - return err - } - if !defSet { - dbrp.Default = true - } - - b, err := json.Marshal(dbrp) - if err != nil { - return ErrInternalService(err) - } - if err := bucket.Put(encodedID, b); err != nil { - return ErrInternalService(err) - } - - if dbrp.Default { - if err := s.setAsDefault(tx, compKey, encodedID); err != nil { - return err - } - } - return nil - }) -} - -// Updates a mapping. -// If another mapping with same organization ID, database, and retention policy exists, an error is returned. -// Un-setting `Default` for a mapping will cause the first one to become the default. -func (s *Service) Update(ctx context.Context, dbrp *influxdb.DBRPMapping) error { - if err := dbrp.Validate(); err != nil { - return ErrInvalidDBRP(err) - } - oldDBRP, err := s.FindByID(ctx, dbrp.OrganizationID, dbrp.ID) - if err != nil { - return ErrDBRPNotFound - } - // Overwrite fields that cannot change. - dbrp.ID = oldDBRP.ID - dbrp.OrganizationID = oldDBRP.OrganizationID - dbrp.BucketID = oldDBRP.BucketID - dbrp.Database = oldDBRP.Database - - // If a dbrp with this orgID, db, and rp exists an error is returned. - if err := s.isDBRPUnique(ctx, *dbrp); err != nil { - return err - } - - encodedID, err := dbrp.ID.Encode() - if err != nil { - return ErrInternalService(err) - } - b, err := json.Marshal(dbrp) - if err != nil { - return ErrInternalService(err) - } - - return s.store.Update(ctx, func(tx kv.Tx) error { - bucket, err := tx.Bucket(bucket) - if err != nil { - return ErrInternalService(err) - } - if err := bucket.Put(encodedID, b); err != nil { - return err - } - compKey := indexForeignKey(*dbrp) - if dbrp.Default { - err = s.setAsDefault(tx, compKey, encodedID) - } else if oldDBRP.Default { - // This means default was unset. - // Need to find a new default. - first, ferr := s.getFirstBut(tx, compKey, encodedID) - if ferr != nil { - return ferr - } - if len(first) > 0 { - err = s.setAsDefault(tx, compKey, first) - } - // If no first was found, then this will remain the default. - } - return err - }) -} - -// Delete removes a mapping. -// Deleting a mapping that does not exists is not an error. -// Deleting the default mapping will cause the first one (if any) to become the default. -func (s *Service) Delete(ctx context.Context, orgID, id platform.ID) error { - dbrp, err := s.FindByID(ctx, orgID, id) - if err != nil { - return nil - } - encodedID, err := id.Encode() - if err != nil { - return ErrInternalService(err) - } - - encodedOrgID, err := orgID.Encode() - if err != nil { - return ErrInternalService(err) - } - - return s.store.Update(ctx, func(tx kv.Tx) error { - bucket, err := tx.Bucket(bucket) - if err != nil { - return ErrInternalService(err) - } - compKey := indexForeignKey(*dbrp) - if err := bucket.Delete(encodedID); err != nil { - return err - } - if err := s.byOrgAndDatabase.Delete(tx, compKey, encodedID); err != nil { - return ErrInternalService(err) - } - if err := s.byOrg.Delete(tx, encodedOrgID, encodedID); err != nil { - return ErrInternalService(err) - } - // If this was the default, we need to set a new default. - var derr error - if dbrp.Default { - first, err := s.getFirstBut(tx, compKey, encodedID) - if err != nil { - return err - } - if len(first) > 0 { - derr = s.setAsDefault(tx, compKey, first) - } else { - // This means no other mapping is in the index. - // Unset the default - derr = s.unsetDefault(tx, compKey) - } - } - return derr - }) -} - -// filterFunc is capable to validate if the dbrp is valid from a given filter. -// it runs true if the filtering data are contained in the dbrp. -func filterFunc(dbrp *influxdb.DBRPMapping, filter influxdb.DBRPMappingFilter) bool { - return (filter.ID == nil || (*filter.ID) == dbrp.ID) && - (filter.OrgID == nil || (*filter.OrgID) == dbrp.OrganizationID) && - (filter.BucketID == nil || (*filter.BucketID) == dbrp.BucketID) && - (filter.Database == nil || (*filter.Database) == dbrp.Database) && - (filter.RetentionPolicy == nil || (*filter.RetentionPolicy) == dbrp.RetentionPolicy) && - (filter.Default == nil || (*filter.Default) == dbrp.Default) && - (filter.Virtual == nil || (*filter.Virtual) == dbrp.Virtual) - -} diff --git a/dbrp/service_test.go b/dbrp/service_test.go deleted file mode 100644 index 0f69821c116..00000000000 --- a/dbrp/service_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package dbrp_test - -import ( - "context" - "fmt" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/dbrp" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/mock" - itesting "github.com/influxdata/influxdb/v2/testing" -) - -func initDBRPMappingService(f itesting.DBRPMappingFields, t *testing.T) (influxdb.DBRPMappingService, func()) { - s, closeStore := itesting.NewTestBoltStore(t) - if f.BucketSvc == nil { - f.BucketSvc = &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - // always find a bucket. - return &influxdb.Bucket{ - ID: id, - Name: fmt.Sprintf("bucket-%v", id), - }, nil - }, - FindBucketsFn: func(ctx context.Context, bf influxdb.BucketFilter, fo ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - return []*influxdb.Bucket{}, 0, nil - }, - } - } - - svc := dbrp.NewService(context.Background(), f.BucketSvc, s) - - if err := f.Populate(context.Background(), svc); err != nil { - t.Fatal(err) - } - return svc, func() { - if err := itesting.CleanupDBRPMappingsV2(context.Background(), svc); err != nil { - t.Error(err) - } - closeStore() - } -} - -func TestBoltDBRPMappingService(t *testing.T) { - t.Parallel() - itesting.DBRPMappingService(initDBRPMappingService, t) -} diff --git a/dbrp_mapping.go b/dbrp_mapping.go deleted file mode 100644 index 2bedb0a3236..00000000000 --- a/dbrp_mapping.go +++ /dev/null @@ -1,166 +0,0 @@ -package influxdb - -import ( - "context" - "strconv" - "strings" - "unicode" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// DBRPMappingService provides CRUD to DBRPMappingV2s. -type DBRPMappingService interface { - // FindBy returns the dbrp mapping for the specified ID. - // Requires orgID because every resource will be org-scoped. - FindByID(ctx context.Context, orgID, id platform.ID) (*DBRPMapping, error) - // FindMany returns a list of dbrp mappings that match filter and the total count of matching dbrp mappings. - FindMany(ctx context.Context, dbrp DBRPMappingFilter, opts ...FindOptions) ([]*DBRPMapping, int, error) - // Create creates a new dbrp mapping, if a different mapping exists an error is returned. - Create(ctx context.Context, dbrp *DBRPMapping) error - // Update a new dbrp mapping - Update(ctx context.Context, dbrp *DBRPMapping) error - // Delete removes a dbrp mapping. - // Deleting a mapping that does not exists is not an error. - // Requires orgID because every resource will be org-scoped. - Delete(ctx context.Context, orgID, id platform.ID) error -} - -// DBRPMapping represents a mapping of a database and retention policy to an organization ID and bucket ID. -type DBRPMapping struct { - ID platform.ID `json:"id"` - Database string `json:"database"` - RetentionPolicy string `json:"retention_policy"` - - // Default indicates if this mapping is the default for the cluster and database. - Default bool `json:"default"` - // Virtual indicates if this is a virtual mapping (tied to bucket name) or physical - Virtual bool `json:"virtual"` - - OrganizationID platform.ID `json:"orgID"` - BucketID platform.ID `json:"bucketID"` -} - -// Validate reports any validation errors for the mapping. -func (m DBRPMapping) Validate() error { - if !validName(m.Database) { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "database must contain at least one character and only be letters, numbers, '_', '-', and '.'", - } - } - if !validName(m.RetentionPolicy) { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "retentionPolicy must contain at least one character and only be letters, numbers, '_', '-', and '.'", - } - } - if !m.OrganizationID.Valid() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "organizationID is required", - } - } - if !m.BucketID.Valid() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "bucketID is required", - } - } - return nil -} - -// Equal checks if the two mappings are identical. -func (m *DBRPMapping) Equal(o *DBRPMapping) bool { - if m == o { - return true - } - if m == nil || o == nil { - return false - } - return m.Database == o.Database && - m.RetentionPolicy == o.RetentionPolicy && - m.Default == o.Default && - m.OrganizationID.Valid() && - o.OrganizationID.Valid() && - m.BucketID.Valid() && - o.BucketID.Valid() && - o.ID.Valid() && - m.ID == o.ID && - m.OrganizationID == o.OrganizationID && - m.BucketID == o.BucketID -} - -// DBRPMappingFilter represents a set of filters that restrict the returned results. -type DBRPMappingFilter struct { - ID *platform.ID - OrgID *platform.ID - BucketID *platform.ID - - Database *string - RetentionPolicy *string - Default *bool - Virtual *bool -} - -func (f DBRPMappingFilter) String() string { - var s strings.Builder - - s.WriteString("{ id:") - if f.ID != nil { - s.WriteString(f.ID.String()) - } else { - s.WriteString("") - } - - s.WriteString(" org_id:") - if f.ID != nil { - s.WriteString(f.OrgID.String()) - } else { - s.WriteString("") - } - - s.WriteString(" bucket_id:") - if f.ID != nil { - s.WriteString(f.OrgID.String()) - } else { - s.WriteString("") - } - - s.WriteString(" db:") - if f.Database != nil { - s.WriteString(*f.Database) - } else { - s.WriteString("") - } - - s.WriteString(" rp:") - if f.RetentionPolicy != nil { - s.WriteString(*f.RetentionPolicy) - } else { - s.WriteString("") - } - - s.WriteString(" default:") - if f.Default != nil { - s.WriteString(strconv.FormatBool(*f.Default)) - } else { - s.WriteString("") - } - s.WriteString("}") - return s.String() -} - -// validName checks to see if the given name can would be valid for DB/RP name -func validName(name string) bool { - for _, r := range name { - if !unicode.IsPrint(r) { - return false - } - } - return name != "" && - name != "." && - name != ".." && - !strings.ContainsAny(name, `/\`) -} diff --git a/dbrp_mapping_internal_test.go b/dbrp_mapping_internal_test.go deleted file mode 100644 index 2fd0a3606eb..00000000000 --- a/dbrp_mapping_internal_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package influxdb - -import ( - "testing" -) - -func Test_validName(t *testing.T) { - tests := []struct { - arg string - name string - want bool - }{ - { - name: "names cannot have unprintable characters", - arg: string([]byte{0x0D}), - want: false, - }, - { - name: "names cannot have .", - arg: ".", - want: false, - }, - { - name: "names cannot have ..", - arg: "..", - want: false, - }, - { - name: "names cannot have /", - arg: "/", - want: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := validName(tt.arg); got != tt.want { - t.Errorf("validName() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/dbrp_mapping_test.go b/dbrp_mapping_test.go deleted file mode 100644 index 20c3dc9a914..00000000000 --- a/dbrp_mapping_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package influxdb_test - -import ( - "testing" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - platformtesting "github.com/influxdata/influxdb/v2/testing" -) - -func TestDBRPMapping_Validate(t *testing.T) { - type fields struct { - Database string - RetentionPolicy string - Default bool - OrganizationID platform2.ID - BucketID platform2.ID - } - tests := []struct { - name string - fields fields - wantErr bool - }{ - { - name: "mapping requires a database", - fields: fields{ - Database: "", - }, - wantErr: true, - }, - { - name: "mapping requires an rp", - fields: fields{ - Database: "telegraf", - RetentionPolicy: "", - }, - wantErr: true, - }, - { - name: "mapping requires an orgid", - fields: fields{ - Database: "telegraf", - RetentionPolicy: "autogen", - }, - wantErr: true, - }, - { - name: "mapping requires a bucket id", - fields: fields{ - Database: "telegraf", - RetentionPolicy: "autogen", - OrganizationID: platformtesting.MustIDBase16("debac1e0deadbeef"), - }, - wantErr: true, - }, - { - name: "db cannot have non-letters/numbers/_/./-", - fields: fields{ - Database: string([]byte{0x0D}), - }, - wantErr: true, - }, - { - name: "rp cannot have non-printable characters", - fields: fields{ - Database: "telegraf", - RetentionPolicy: string([]byte{0x0D}), - }, - wantErr: true, - }, - { - name: "dash accepted as valid database", - fields: fields{ - Database: "howdy-doody", - RetentionPolicy: "autogen", - OrganizationID: platformtesting.MustIDBase16("debac1e0deadbeef"), - BucketID: platformtesting.MustIDBase16("5ca1ab1edeadbea7"), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - m := platform.DBRPMapping{ - Database: tt.fields.Database, - RetentionPolicy: tt.fields.RetentionPolicy, - Default: tt.fields.Default, - OrganizationID: tt.fields.OrganizationID, - BucketID: tt.fields.BucketID, - } - - if err := m.Validate(); (err != nil) != tt.wantErr { - t.Errorf("DBRPMapping.Validate() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/delete.go b/delete.go deleted file mode 100644 index 2a40546bb3a..00000000000 --- a/delete.go +++ /dev/null @@ -1,21 +0,0 @@ -package influxdb - -import ( - "context" - - "github.com/influxdata/influxql" - - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// Predicate is something that can match on a series key. -type Predicate interface { - Clone() Predicate - Matches(key []byte) bool - Marshal() ([]byte, error) -} - -// DeleteService will delete a bucket from the range and predict. -type DeleteService interface { - DeleteBucketRangePredicate(ctx context.Context, orgID, bucketID platform.ID, min, max int64, pred Predicate, measurement influxql.Expr) error -} diff --git a/docker/flux/Dockerfile b/docker/flux/Dockerfile deleted file mode 100644 index 30ba11f8aa1..00000000000 --- a/docker/flux/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM debian:buster-slim -COPY fluxd /usr/bin/fluxd - -EXPOSE 8093 - -COPY entrypoint.sh /entrypoint.sh -ENTRYPOINT ["/entrypoint.sh"] -CMD ["fluxd"] diff --git a/docker/flux/entrypoint.sh b/docker/flux/entrypoint.sh deleted file mode 100755 index 2af4476b4a7..00000000000 --- a/docker/flux/entrypoint.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -set -e - -if [ "${1:0:1}" = '-' ]; then - set -- fluxd "$@" -fi - -exec "$@" diff --git a/docker/influxd/Dockerfile b/docker/influxd/Dockerfile deleted file mode 100644 index fb16306358d..00000000000 --- a/docker/influxd/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -FROM debian:buster-slim AS dependency-base - -ENV DEBIAN_FRONTEND noninteractive - -RUN apt-get update \ - && apt-get install -y \ - ca-certificates \ - tzdata \ - && apt-get clean autoclean \ - && apt-get autoremove --yes \ - && rm -rf /var/lib/{apt,dpkg,cache,log} - -# NOTE: We separate these two stages so we can run the above -# quickly in CI, in case of flaky failure. -FROM dependency-base - -EXPOSE 8086 - -COPY influxd /usr/bin/ -COPY docker/influxd/entrypoint.sh /entrypoint.sh - -ENTRYPOINT ["/entrypoint.sh"] -CMD ["influxd"] diff --git a/docker/influxd/entrypoint.sh b/docker/influxd/entrypoint.sh deleted file mode 100755 index 5a4724fcc50..00000000000 --- a/docker/influxd/entrypoint.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -set -e - -if [ "${1:0:1}" = '-' ]; then - set -- influxd "$@" -fi - -exec "$@" diff --git a/document.go b/document.go deleted file mode 100644 index 989d8e682eb..00000000000 --- a/document.go +++ /dev/null @@ -1,50 +0,0 @@ -package influxdb - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// ErrDocumentNotFound is the error msg for a missing document. -const ErrDocumentNotFound = "document not found" - -// DocumentService is used to create/find instances of document stores. -type DocumentService interface { - CreateDocumentStore(ctx context.Context, name string) (DocumentStore, error) - FindDocumentStore(ctx context.Context, name string) (DocumentStore, error) -} - -// Document is a generic structure for stating data. -type Document struct { - ID platform.ID `json:"id"` - Meta DocumentMeta `json:"meta"` - Content interface{} `json:"content,omitempty"` // TODO(desa): maybe this needs to be json.Marshaller & json.Unmarshaler - Labels []*Label `json:"labels,omitempty"` // read only - - // This is needed for authorization. - // The service that passes documents around will take care of filling it - // via request parameters or others, as the kv store will take care of - // filling it once it returns a document. - // This is not stored in the kv store neither required in the API. - Organizations map[platform.ID]UserType `json:"-"` -} - -// DocumentMeta is information that is universal across documents. Ideally -// data in the meta should be indexed and queryable. -type DocumentMeta struct { - Name string `json:"name"` - Type string `json:"type,omitempty"` - Description string `json:"description,omitempty"` - Version string `json:"version,omitempty"` - CRUDLog -} - -// DocumentStore is used to perform CRUD operations on documents. It follows an options -// pattern that allows users to perform actions related to documents in a transactional way. -type DocumentStore interface { - CreateDocument(ctx context.Context, d *Document) error - FindDocument(ctx context.Context, id platform.ID) (*Document, error) - - FindDocuments(ctx context.Context, orgID platform.ID) ([]*Document, error) -} diff --git a/duration.go b/duration.go deleted file mode 100644 index de892e3f253..00000000000 --- a/duration.go +++ /dev/null @@ -1,39 +0,0 @@ -package influxdb - -import ( - "encoding/json" - "errors" - "time" -) - -// Duration is based on time.Duration to embed in any struct. -type Duration struct { - time.Duration -} - -// MarshalJSON implements json.Marshaler interface. -func (d Duration) MarshalJSON() ([]byte, error) { - return json.Marshal(d.String()) -} - -// UnmarshalJSON implements json.Unmarshaler interface. -func (d *Duration) UnmarshalJSON(b []byte) error { - var v interface{} - if err := json.Unmarshal(b, &v); err != nil { - return err - } - switch value := v.(type) { - case float64: - d.Duration = time.Duration(value) - return nil - case string: - var err error - d.Duration, err = time.ParseDuration(value) - if err != nil { - return err - } - return nil - default: - return errors.New("invalid duration") - } -} diff --git a/env b/env deleted file mode 100755 index b11a285c661..00000000000 --- a/env +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -set -e - -# https://stackoverflow.com/a/246128 -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" - -verbose_flag="" -while getopts vx arg; do - case "$arg" in - v) verbose_flag="--verbose";; - x) set -x;; - esac -done -shift $((OPTIND-1)) - -export PKG_CONFIG="${DIR}/scripts/pkg-config.sh" - -# If this script is being executed, it will be executed under bash -# so the bash source variable should be present. If the variable -# matches the current script, then we are being executed and the -# default argument should be to print the environment. -if [ $# -eq 0 ] && [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then - set -- env -fi - -if [ $# -gt 0 ]; then exec "$@"; fi diff --git a/etc/check-sql-migrations.sh b/etc/check-sql-migrations.sh deleted file mode 100755 index 79accad367c..00000000000 --- a/etc/check-sql-migrations.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -# This script verifies that for all SQL migrations there is both an "up" and a "down", and that the file names match. - -upMigrations=$(find sqlite/migrations/*.up.sql | cut -f 1 -d '.') -downMigrations=$(find sqlite/migrations/*.down.sql | cut -f 1 -d '.') - -differences="$(diff -y --suppress-common-lines <(echo "$upMigrations" ) <(echo "$downMigrations"))" - -if [[ -n ${differences} ]] -then - echo '------------------------------------------------------------------------------------' - echo "Problem detected with SQL migration files: Up and Down migration names do not match!" - echo '------------------------------------------------------------------------------------' - echo "Diff: Up Migrations without Down Migrations vs. Down Migrations without Up Migrations:" - echo "$differences" - exit 1 -fi diff --git a/etc/checkfmt.sh b/etc/checkfmt.sh deleted file mode 100755 index ae68a02887d..00000000000 --- a/etc/checkfmt.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -go install golang.org/x/tools/cmd/goimports - -HAS_FMT_ERR=0 -# For every Go file in the project, excluding vendor... - -for file in $(go list -f '{{$dir := .Dir}}{{range .GoFiles}}{{printf "%s/%s\n" $dir .}}{{end}}{{range .TestGoFiles}}{{printf "%s/%s\n" $dir .}}{{end}}{{range .IgnoredGoFiles}}{{printf "%s/%s\n" $dir .}}{{end}}{{range .CgoFiles}}{{printf "%s/%s\n" $dir .}}{{end}}' ./... ); do - # ... if file does not contain standard generated code comment (https://golang.org/s/generatedcode)... - if ! grep -Exq '^// Code generated .* DO NOT EDIT\.$' $file; then - FMT_OUT="$(goimports -l -d $file)" - # ... and if goimports had any output... - if [[ -n "$FMT_OUT" ]]; then - if [ "$HAS_FMT_ERR" -eq "0" ]; then - # Only print this once. - HAS_FMT_ERR=1 - echo 'Commit includes files that are not gofmt-ed' && \ - echo 'run "make fmt"' && \ - echo '' - fi - echo "$FMT_OUT" # Print output and continue, so developers don't fix one file at a t - fi - fi -done - -## print at the end too... sometimes it is nice to see what to do at the end. -if [ "$HAS_FMT_ERR" -eq "1" ]; then - echo 'Commit includes files that are not gofmt-ed' && \ - echo 'run "make fmt"' && \ - echo '' -fi -exit "$HAS_FMT_ERR" diff --git a/etc/checkgenerate.sh b/etc/checkgenerate.sh deleted file mode 100755 index 53aba3943e3..00000000000 --- a/etc/checkgenerate.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -set -e - -function check_changes () { - changes="$(git status --porcelain=v1 2>/dev/null)" - if [ -n "$changes" ] ; then - echo $1 - echo "$changes" - exit 1 - fi -} - -check_changes "git is dirty before running 'make generate-sources!'" -make generate-sources -check_changes "git is dirty after running 'make generate-sources'!" diff --git a/etc/checktidy.sh b/etc/checktidy.sh deleted file mode 100755 index 395f9177aab..00000000000 --- a/etc/checktidy.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -set -e - -export GO111MODULE=on -go mod tidy - -if ! git --no-pager diff --exit-code -- go.mod go.sum; then - >&2 echo "modules are not tidy, please run 'go mod tidy'" - exit 1 -fi diff --git a/etc/circle-detect-committed-binaries.sh b/etc/circle-detect-committed-binaries.sh deleted file mode 100755 index e30fdcb55c1..00000000000 --- a/etc/circle-detect-committed-binaries.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# This script wraps detect-committed-binaries.sh in a way that is usable from the Makefile. - -# Why not put these changes into detect-committed-binaries.sh? -# Because we aren't using CircleCI for the enterprise code base, -# and I don't want the OSS copy to diverge from the enterprise copy. - -# Why not just do the conditional logic in the Makefile? -# Because I don't know Makefile syntax and practices well enough to do it in a reasonable amount of time. -# If you know how to do it, please refactor the logic into the Makefile. - -if [ -n "$CIRCLE_PR_NUMBER" ] || [ -n "$CIRCLE_PULL_REQUEST" ]; then - # We want the PR number, but sometimes it isn't set (bug on CircleCI's side). - # https://discuss.circleci.com/t/circle-pr-number-missing-from-environment-variables/3745 - CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}" - # Looks like we're running on CircleCI. - # You might think we could use CIRCLE_COMPARE_URL, but that compares commits with the previous push, - # not with the base branch. - # This is roughly how you're supposed to determine the base branch/sha according to Circle: - # https://circleci.com/blog/enforce-build-standards/ - PR_URL="https://api.github.com/repos/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/pulls/$CIRCLE_PR_NUMBER?access_token=$GITHUB_READONLY_TOKEN" - COMMIT_RANGE="$(curl -s "$PR_URL" | jq -r '.head.sha + "..." + .base.sha')" - echo "Calculated commit range: $COMMIT_RANGE" -else - # We're not running on circle. - # There's no reliable way to figure out the appropriate base commit, - # so just take a reasonable guess that we're comparing to master. - COMMIT_RANGE="HEAD...master" -fi - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -"$DIR/detect-committed-binaries.sh" "$COMMIT_RANGE" diff --git a/etc/detect-committed-binaries.sh b/etc/detect-committed-binaries.sh deleted file mode 100755 index 2607a127746..00000000000 --- a/etc/detect-committed-binaries.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash - -if [ $# -ne 1 ]; then - >&2 echo "Usage: $0 REVISION_RANGE - -$0 iterates through each commit in REVISION_RANGE and exits 1 if all of these conditions are met: - -1. The commit introduces a file that git considers to be a binary file. -2. The commit body does NOT contain a line of matching the pattern 'Adds-Binary: path/to/binary/file'. - The path must be relative to the repository root. - -REVISION_RANGE is typically given as mybranch...master. Note: 3 dots, not 2." - exit 1 -fi - -ORIG_IFS="$IFS" -IFS=$'\n' # Set internal field separator to newlines only, so the "for binFile" loop can handle filenames containing spaces. -BINARY_FILES_INTRODUCED=0 -for rev in $(git rev-list "$1"); do - # This loop is a bit complicated, so here is the explanation: - # First, use git log on the single revision. - # We can't use --numstat because that doesn't differentiate between binary files added and removed. - # Grep for lines indicating a Binary file went from zero to non-zero bytes. - # Then cut down to just the entire field before the pipe. This will break if we ever have a binary file whose name contains the pipe character. - # Finally, print just the first field without leading or trailing spaces. (https://unix.stackexchange.com/a/205854) - for binFile in $(git log -1 --format='' --stat=255 "$rev" | grep ' Bin 0 ->' | cut -d '|' -f 1 | awk '{$1=$1;print}'); do - # We have found a new binary file in $rev. - # Was it in the commit's whitelist? - # (GitHub seems to use \r\n on Squash&Merge commit messages, which doesn't play well with grep -x; hence the awk line.) - if git log -1 --format=%b "$rev" | awk '{ sub("\r$", ""); print }' | grep -q -F -x "Adds-Binary: $binFile"; then - # Yes it was. Skip this file. - echo "Revision $rev $(git log -1 --format='[%s]' "$rev") added whitelisted binary file: $binFile" - continue - fi - - echo "Revision $rev $(git log -1 --format='[%s]' "$rev") introduced binary file: $binFile" - BINARY_FILES_INTRODUCED=1 - done -done -IFS="$ORIG_IFS" - -if [ $BINARY_FILES_INTRODUCED -eq 1 ]; then - echo - echo '--------------------------------------------------' - echo "This changeset introduced unexpected binary files. - -If you meant to include them, amend the commit(s) that introduced the file(s), -to include a line that matches exactly 'Adds-Binary: path/to/binary/file'. - -If you did not mean to include the file, please add an appropriate line to .gitignore -so that other developers do not mistakenly commit that file, and please amend your commit -to remove the file." - exit 1 -fi diff --git a/etc/fmt.sh b/etc/fmt.sh deleted file mode 100755 index e75e3731236..00000000000 --- a/etc/fmt.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -go install golang.org/x/tools/cmd/goimports - -# For every Go file in the project, excluding vendor... -for file in $(go list -f '{{$dir := .Dir}}{{range .GoFiles}}{{printf "%s/%s\n" $dir .}}{{end}}{{range .TestGoFiles}}{{printf "%s/%s\n" $dir .}}{{end}}{{range .IgnoredGoFiles}}{{printf "%s/%s\n" $dir .}}{{end}}{{range .CgoFiles}}{{printf "%s/%s\n" $dir .}}{{end}}' ./... ); do - # ... if file does not contain standard generated code comment (https://golang.org/s/generatedcode)... - if ! grep -Exq '^// Code generated .* DO NOT EDIT\.$' $file; then - gofmt -w -s $file - goimports -w $file - fi -done diff --git a/etc/pinger.sh b/etc/pinger.sh deleted file mode 100755 index 20745c186ef..00000000000 --- a/etc/pinger.sh +++ /dev/null @@ -1,6 +0,0 @@ -ping_cancelled=false # Keep track of whether the loop was cancelled, or succeeded -until nc -z 127.0.0.1 8086; do :; done & -trap "kill $!; ping_cancelled=true" SIGINT -wait $! # Wait for the loop to exit, one way or another -trap - INT # Remove the trap, now we're done with it -echo "Done pinging, cancelled=$ping_cancelled" diff --git a/etc/test-flux.sh b/etc/test-flux.sh deleted file mode 100755 index 02b193cfc49..00000000000 --- a/etc/test-flux.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/bash -set -eu -o pipefail -readonly GO=${GO:-go} - -log() { - local now - now=$(date '+%Y/%m/%d %H:%M:%S') - echo "[${now}]" "$@" -} - -determine_flux_revision() { - local version revision - version=$("$GO" list -m -f '{{.Version}}' github.com/influxdata/flux) - revision=$(printf "%s" "${version}" | cut -d- -f 3) - if [[ ${revision} != "" ]]; then - printf "%s\n" "${revision}" - else - printf "%s\n" "${version}" - fi -} - -download_flux_archive() { - local revision - revision=$(determine_flux_revision) - log "Downloading flux archive (${revision})..." - curl -sLo flux.zip "https://github.com/influxdata/flux/archive/${revision}.zip" -} - -build_test_harness() { - log "Building test harness..." - "$GO" build -o fluxtest ./internal/cmd/fluxtest-harness-influxdb -} - -skipped_tests() { - doc=$(cat <&2 echo Running go tests... - INFLUXDB_TEST_SSL_CERT_PATH="${cert}" INFLUXDB_TEST_SSL_KEY_PATH="${key}" "${GO}" test -mod=readonly ./cmd/influxd/launcher/_tlstests -} - -main() { - local cert key - cert=$(pwd)/test.crt - key=$(pwd)/test.key - - trap "rm -f '${cert}' '${key}'" EXIT - generate_keypair "${cert}" "${key}" - run_tls_tests "${cert}" "${key}" -} -main diff --git a/flags.yml b/flags.yml deleted file mode 100644 index 324bd4164e2..00000000000 --- a/flags.yml +++ /dev/null @@ -1,99 +0,0 @@ -# This file defines feature flags. -# -# It is used for code generation in the ./kit/feature package. -# If you change this file, run `make flags` to regenerate. -# -# Format details: -# -# - name: Human-readable name -# description: Human-readable description -# key: Programmatic name -# default: Used when unable to reach server and to infer flag type -# contact: Contact for information or issues regarding the flag -# lifetime: Expected lifetime of the flag; temporary or permanent, default temporary -# expose: Boolean indicating whether the flag should be exposed to callers; default false - -- name: App Metrics - description: Send UI Telementry to Tools cluster - should always be false in OSS - key: appMetrics - default: false - expose: true - contact: Bucky, Monitoring Team - lifetime: permanent - -- name: Group Window Aggregate Transpose - description: Enables the GroupWindowAggregateTransposeRule for all enabled window aggregates - key: groupWindowAggregateTranspose - default: false - contact: Query Team - -- name: New Label Package - description: Enables the refactored labels api - key: newLabels - default: false - contact: Alirie Gray - lifetime: temporary - -- name: Memory Optimized Fill - description: Enable the memory optimized fill() - key: memoryOptimizedFill - default: false - contact: Query Team - lifetime: temporary - -- name: Memory Optimized Schema Mutation - description: Enable the memory optimized schema mutation functions - key: memoryOptimizedSchemaMutation - default: false - contact: Query Team - lifetime: temporary - -- name: Query Tracing - description: Turn on query tracing for queries that are sampled - key: queryTracing - default: false - contact: Query Team - lifetime: permanent - -- name: Inject Latest Success Time - description: Inject the latest successful task run timestamp into a Task query extern when executing. - key: injectLatestSuccessTime - default: false - contact: Compute Team - -- name: Enforce Organization Dashboard Limits - description: Enforces the default limit params for the dashboards api when orgs are set - key: enforceOrgDashboardLimits - default: false - contact: Compute Team - -- name: Time Filter Flags - description: Filter task run list based on before and after flags - key: timeFilterFlags - contact: Compute Team - default: false - expose: true - -- name: Default Monaco Selection to EOF - description: Positions the cursor at the end of the line(s) when using the monaco editor - key: cursorAtEOF - default: false - contact: Monitoring Team - expose: true - lifetime: temporary - -- name: Refresh Single Cell - description: Refresh a single cell on the dashboard rather than the entire dashboard - key: refreshSingleCell - default: true - contact: Monitoring Team - expose: true - lifetime: temporary - -- name: New Dashboard Autorefresh - description: Enables the new dashboard autorefresh controls in the UI - key: newAutoRefresh - default: true - contact: Monitoring Team - expose: true - lifetime: temporary diff --git a/flux/client.go b/flux/client.go deleted file mode 100644 index ab392ec0ff6..00000000000 --- a/flux/client.go +++ /dev/null @@ -1,86 +0,0 @@ -package flux - -import ( - "context" - "crypto/tls" - "fmt" - "io" - "net/http" - "net/url" - "time" -) - -// Shared transports for all clients to prevent leaking connections. -var ( - skipVerifyTransport = &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - defaultTransport = &http.Transport{} -) - -// Client is how we interact with Flux. -type Client struct { - URL *url.URL - InsecureSkipVerify bool - Timeout time.Duration -} - -// Ping checks the connection of a Flux. -func (c *Client) Ping(ctx context.Context) error { - t := 2 * time.Second - if c.Timeout > 0 { - t = c.Timeout - } - ctx, cancel := context.WithTimeout(ctx, t) - defer cancel() - err := c.pingTimeout(ctx) - return err -} - -func (c *Client) pingTimeout(ctx context.Context) error { - resps := make(chan (error)) - go func() { - resps <- c.ping(c.URL) - }() - - select { - case resp := <-resps: - return resp - case <-ctx.Done(): - return fmt.Errorf("request to backend timed out") - } -} - -func (c *Client) ping(u *url.URL) error { - u.Path = "ping" - - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return err - } - - hc := &http.Client{} - if c.InsecureSkipVerify { - hc.Transport = skipVerifyTransport - } else { - hc.Transport = defaultTransport - } - - resp, err := hc.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusNoContent { - var err = fmt.Errorf(string(body)) - return err - } - - return nil -} diff --git a/fluxinit/init.go b/fluxinit/init.go deleted file mode 100644 index ff11815dfbf..00000000000 --- a/fluxinit/init.go +++ /dev/null @@ -1,22 +0,0 @@ -// This package imports all the influxdb-specific query builtins. From influxdb -// we must use this package and not the init package provided by flux. -// -// This package is used for initializing with a function call. As a -// convenience, the fluxinit/static package can be imported for use cases where -// static initialization is okay, such as tests. -package fluxinit - -import ( - "github.com/influxdata/flux/runtime" - _ "github.com/influxdata/flux/stdlib" - - // Import the stdlib - _ "github.com/influxdata/influxdb/v2/query/stdlib" -) - -// The FluxInit() function prepares the runtime for compilation and execution -// of Flux. This is a costly step and should only be performed if the intention -// is to compile and execute flux code. -func FluxInit() { - runtime.FinalizeBuiltIns() -} diff --git a/fluxinit/static/static.go b/fluxinit/static/static.go deleted file mode 100644 index cfa7888c3a4..00000000000 --- a/fluxinit/static/static.go +++ /dev/null @@ -1,11 +0,0 @@ -// The fluxinit/static package can be imported in test cases and other uses -// cases where it is okay to always initialize flux. -package static - -import ( - "github.com/influxdata/influxdb/v2/fluxinit" -) - -func init() { - fluxinit.FluxInit() -} diff --git a/gather/README.md b/gather/README.md deleted file mode 100644 index 5d097746eee..00000000000 --- a/gather/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# How to use this package - -## Make sure nats is running. Both publisher and subscriber are open - -```go -// NATS streaming server -m.natsServer = nats.NewServer(nats.Config{FilestoreDir: m.natsPath}) -if err := m.natsServer.Open(); err != nil { - m.logger.Error("Failed to start nats streaming server", zap.Error(err)) - return err -} - -publisher := nats.NewAsyncPublisher("nats-publisher") -if err := publisher.Open(); err != nil { - m.logger.Error("Failed to connect to streaming server", zap.Error(err)) - return err -} - -subscriber := nats.NewQueueSubscriber("nats-subscriber") -if err := subscriber.Open(); err != nil { - m.logger.Error("Failed to connect to streaming server", zap.Error(err)) - return err -} -``` - -## Make sure the scraperTargetStorageService is accessible - -```go -scraperTargetSvc influxdb.ScraperTargetStoreService = m.boltClient -``` - -## Setup recorder, Make sure subscriber subscribes use the correct recorder with the correct write service - -```go -recorder := gather.PlatformWriter{ - Timeout: time.Millisecond * 30, - Writer: writer, -} -subscriber.Subscribe(MetricsSubject, "", &RecorderHandler{ - Logger: logger, - Recorder: recorder, -}) -``` - -## Start the scheduler - -```go -scraperScheduler, err := gather.NewScheduler(10, m.logger, scraperTargetSvc, publisher, subscriber, 0, 0) -if err != nil { - m.logger.Error("Failed to create scraper subscriber", zap.Error(err)) - return err -} -``` \ No newline at end of file diff --git a/gather/metrics.go b/gather/metrics.go deleted file mode 100644 index 6e95c91c5e4..00000000000 --- a/gather/metrics.go +++ /dev/null @@ -1,69 +0,0 @@ -package gather - -import ( - "bytes" - "io" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/models" - dto "github.com/prometheus/client_model/go" -) - -// MetricsCollection is the struct including metrics and other requirements. -type MetricsCollection struct { - OrgID platform.ID `json:"orgID"` - BucketID platform.ID `json:"bucketID"` - MetricsSlice MetricsSlice `json:"metrics"` -} - -// Metrics is the default influx based metrics. -type Metrics struct { - Name string `json:"name"` - Tags map[string]string `json:"tags"` - Fields map[string]interface{} `json:"fields"` - Timestamp time.Time `json:"timestamp"` - Type dto.MetricType `json:"type"` -} - -// MetricsSlice is a slice of Metrics -type MetricsSlice []Metrics - -// Points convert the MetricsSlice to model.Points -func (ms MetricsSlice) Points() (models.Points, error) { - ps := make([]models.Point, len(ms)) - for mi, m := range ms { - point, err := models.NewPoint(m.Name, models.NewTags(m.Tags), m.Fields, m.Timestamp) - if err != nil { - return ps, err - } - - ps[mi] = point - } - return ps, nil -} - -// Reader returns an io.Reader that enumerates the metrics. -// All metrics are allocated into the underlying buffer. -func (ms MetricsSlice) Reader() (io.Reader, error) { - buf := new(bytes.Buffer) - for mi, m := range ms { - point, err := models.NewPoint(m.Name, models.NewTags(m.Tags), m.Fields, m.Timestamp) - if err != nil { - return nil, err - } - - _, err = buf.WriteString(point.String()) - if err != nil { - return nil, err - } - - if mi < len(ms)-1 && len(ms) > 1 { - _, err = buf.WriteString("\n") - if err != nil { - return nil, err - } - } - } - return buf, nil -} diff --git a/gather/metrics_test.go b/gather/metrics_test.go deleted file mode 100644 index 8f94fa6ecec..00000000000 --- a/gather/metrics_test.go +++ /dev/null @@ -1,203 +0,0 @@ -package gather - -import ( - "bytes" - "encoding/json" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - dto "github.com/prometheus/client_model/go" -) - -func TestMetricsReader(t *testing.T) { - cases := []struct { - name string - ms MetricsSlice - wants string - or string - }{ - { - name: "single value only", - ms: []Metrics{ - { - Name: "cpu_load_short", - Fields: map[string]interface{}{ - "value": 0.64, - }, - Type: -1, - Timestamp: time.Unix(0, 1422568543702900257), - }, - }, - wants: "cpu_load_short value=0.64 1422568543702900257", - }, - { - name: "gauge with type label produces lineprotocol with one type", - ms: []Metrics{ - { - Name: "error_metric", - Tags: map[string]string{ - "type": "gauge", - }, - Fields: map[string]interface{}{ - "value": "yes", - }, - Type: dto.MetricType_GAUGE, - Timestamp: time.Unix(0, 1422568543702900257), - }, - }, - wants: `error_metric,type=gauge value="yes" 1422568543702900257`, - }, - { - name: "single regular metrics", - ms: []Metrics{ - { - Name: "cpu_load_short", - Tags: map[string]string{ - "host": "server01", - "region": "us-west", - }, - Fields: map[string]interface{}{ - "value": 0.64, - }, - Type: dto.MetricType_GAUGE, - Timestamp: time.Unix(0, 1422568543702900257), - }, - }, - wants: "cpu_load_short,host=server01,region=us-west value=0.64 1422568543702900257", - }, - { - name: "multiple value only", - ms: []Metrics{ - { - Name: "cpu_load_short", - Fields: map[string]interface{}{ - "value": 0.64, - "region": "us-west", - }, - Type: -1, - Timestamp: time.Unix(0, 1522568543702900257), - }, - }, - wants: `cpu_load_short region="us-west",value=0.64 1522568543702900257`, - }, - { - name: "multiple metrics", - ms: []Metrics{ - { - Name: "cpu_load_short", - Tags: map[string]string{ - "region": "us-west", - }, - Fields: map[string]interface{}{ - "value": 0.64, - }, - Type: -1, - Timestamp: time.Unix(0, 1422568543702900257), - }, - { - Name: "cpu_load_short", - Tags: map[string]string{ - "region": "us-east", - }, - Fields: map[string]interface{}{ - "value": 0.34, - }, - Type: -1, - Timestamp: time.Unix(0, 1522568543702900257), - }, - }, - wants: "cpu_load_short,region=us-west value=0.64 1422568543702900257\ncpu_load_short,region=us-east value=0.34 1522568543702900257", - }, - } - for _, c := range cases { - r, err := c.ms.Reader() - if err != nil { - t.Fatalf("error in convert metrics to reader: %v", err) - } - buf := new(bytes.Buffer) - buf.ReadFrom(r) - - if diff1 := cmp.Diff(c.wants, buf.String(), nil); diff1 != "" { - if diff2 := cmp.Diff(c.or, buf.String(), nil); diff2 != "" { - t.Fatalf("convert metrics is incorrect, diff %s", diff1) - } - } - } -} - -func TestMetricsMarshal(t *testing.T) { - cases := []struct { - name string - ms []Metrics - }{ - { - name: "empty", - ms: make([]Metrics, 0), - }, - { - name: "single", - ms: []Metrics{ - { - Timestamp: time.Unix(12345, 0), - Tags: map[string]string{ - "b": "B", - "a": "A", - "c": "C", - }, - Fields: map[string]interface{}{ - "x": 12.3, - "y": "a long string", - }, - Type: dto.MetricType_SUMMARY, - }, - }, - }, - { - name: "multiple", - ms: []Metrics{ - { - Timestamp: time.Unix(12345, 0), - Tags: map[string]string{ - "b": "B", - "a": "A", - "c": "C", - }, - Fields: map[string]interface{}{ - "x": 12.3, - "y": "a long string", - }, - Type: dto.MetricType_SUMMARY, - }, - - { - Timestamp: time.Unix(12345, 0), - Tags: map[string]string{ - "b": "B2", - "a": "A2", - "c": "C2", - }, - Fields: map[string]interface{}{ - "x": 12.5, - "y": "a long string2", - }, - Type: dto.MetricType_GAUGE, - }, - }, - }, - } - for _, c := range cases { - b, err := json.Marshal(c.ms) - if err != nil { - t.Fatalf("error in marshaling metrics: %v", err) - } - result := make([]Metrics, 0) - err = json.Unmarshal(b, &result) - if err != nil { - t.Fatalf("error in unmarshalling metrics: b: %s, %v", string(b), err) - } - if diff := cmp.Diff(c.ms, result, nil); diff != "" { - t.Fatalf("unmarshalling metrics is incorrect, want %v, got %v", c.ms, result) - } - } -} diff --git a/gather/prometheus.go b/gather/prometheus.go deleted file mode 100644 index 568b293feb0..00000000000 --- a/gather/prometheus.go +++ /dev/null @@ -1,190 +0,0 @@ -package gather - -import ( - "crypto/tls" - "fmt" - "io" - "math" - "mime" - "net/http" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/matttproud/golang_protobuf_extensions/pbutil" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" -) - -// prometheusScraper handles parsing prometheus metrics. -// implements Scraper interfaces. -type prometheusScraper struct { - insecureHttp *http.Client -} - -// newPrometheusScraper create a new prometheusScraper. -func newPrometheusScraper() *prometheusScraper { - customTransport := http.DefaultTransport.(*http.Transport).Clone() - customTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - client := &http.Client{Transport: customTransport} - - return &prometheusScraper{insecureHttp: client} -} - -// Gather parse metrics from a scraper target url. -func (p *prometheusScraper) Gather(target influxdb.ScraperTarget) (collected MetricsCollection, err error) { - var ( - resp *http.Response - ) - - if target.AllowInsecure { - resp, err = p.insecureHttp.Get(target.URL) - } else { - resp, err = http.Get(target.URL) - } - - if err != nil { - return collected, err - } - defer resp.Body.Close() - - return p.parse(resp.Body, resp.Header, target) -} - -func (p *prometheusScraper) parse(r io.Reader, header http.Header, target influxdb.ScraperTarget) (collected MetricsCollection, err error) { - var parser expfmt.TextParser - now := time.Now() - - mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type")) - if err != nil && err.Error() == "mime: no media type" { - mediatype = "text/plain" - } else if err != nil { - return collected, err - } - // Prepare output - metricFamilies := make(map[string]*dto.MetricFamily) - if mediatype == "application/vnd.google.protobuf" && - params["encoding"] == "delimited" && - params["proto"] == "io.prometheus.client.MetricFamily" { - for { - mf := &dto.MetricFamily{} - if _, err := pbutil.ReadDelimited(r, mf); err != nil { - if err == io.EOF { - break - } - return collected, fmt.Errorf("reading metric family protocol buffer failed: %s", err) - } - metricFamilies[mf.GetName()] = mf - } - } else { - metricFamilies, err = parser.TextToMetricFamilies(r) - if err != nil { - return collected, fmt.Errorf("reading text format failed: %s", err) - } - } - ms := make([]Metrics, 0) - - // read metrics - for name, family := range metricFamilies { - for _, m := range family.Metric { - // reading tags - tags := makeLabels(m) - // reading fields - var fields map[string]interface{} - switch family.GetType() { - case dto.MetricType_SUMMARY: - // summary metric - fields = makeQuantiles(m) - fields["count"] = float64(m.GetSummary().GetSampleCount()) - - ss := float64(m.GetSummary().GetSampleSum()) - if !math.IsNaN(ss) { - fields["sum"] = ss - } - case dto.MetricType_HISTOGRAM: - // histogram metric - fields = makeBuckets(m) - fields["count"] = float64(m.GetHistogram().GetSampleCount()) - - ss := float64(m.GetHistogram().GetSampleSum()) - if !math.IsNaN(ss) { - fields["sum"] = ss - } - default: - // standard metric - fields = getNameAndValue(m) - } - if len(fields) == 0 { - continue - } - tm := now - if m.TimestampMs != nil && *m.TimestampMs > 0 { - tm = time.Unix(0, *m.TimestampMs*1000000) - } - me := Metrics{ - Timestamp: tm, - Tags: tags, - Fields: fields, - Name: name, - Type: family.GetType(), - } - ms = append(ms, me) - } - - } - - collected = MetricsCollection{ - MetricsSlice: ms, - OrgID: target.OrgID, - BucketID: target.BucketID, - } - - return collected, nil -} - -// Get labels from metric -func makeLabels(m *dto.Metric) map[string]string { - result := map[string]string{} - for _, lp := range m.Label { - result[lp.GetName()] = lp.GetValue() - } - return result -} - -// Get Buckets from histogram metric -func makeBuckets(m *dto.Metric) map[string]interface{} { - fields := make(map[string]interface{}) - for _, b := range m.GetHistogram().Bucket { - fields[fmt.Sprint(b.GetUpperBound())] = float64(b.GetCumulativeCount()) - } - return fields -} - -// Get name and value from metric -func getNameAndValue(m *dto.Metric) map[string]interface{} { - fields := make(map[string]interface{}) - if m.Gauge != nil { - if !math.IsNaN(m.GetGauge().GetValue()) { - fields["gauge"] = float64(m.GetGauge().GetValue()) - } - } else if m.Counter != nil { - if !math.IsNaN(m.GetCounter().GetValue()) { - fields["counter"] = float64(m.GetCounter().GetValue()) - } - } else if m.Untyped != nil { - if !math.IsNaN(m.GetUntyped().GetValue()) { - fields["value"] = float64(m.GetUntyped().GetValue()) - } - } - return fields -} - -// Get Quantiles from summary metric -func makeQuantiles(m *dto.Metric) map[string]interface{} { - fields := make(map[string]interface{}) - for _, q := range m.GetSummary().Quantile { - if !math.IsNaN(q.GetValue()) { - fields[fmt.Sprint(q.GetQuantile())] = float64(q.GetValue()) - } - } - return fields -} diff --git a/gather/scheduler.go b/gather/scheduler.go deleted file mode 100644 index 40ae6dc81b1..00000000000 --- a/gather/scheduler.go +++ /dev/null @@ -1,136 +0,0 @@ -package gather - -import ( - "context" - "sync" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/storage" - "go.uber.org/zap" -) - -// Scheduler is struct to run scrape jobs. -type Scheduler struct { - Targets influxdb.ScraperTargetStoreService - // Interval is between each metrics gathering event. - Interval time.Duration - - log *zap.Logger - - scrapeRequest chan *influxdb.ScraperTarget - done chan struct{} - wg sync.WaitGroup - writer storage.PointsWriter -} - -// NewScheduler creates a new Scheduler and subscriptions for scraper jobs. -func NewScheduler( - log *zap.Logger, - scrapeQueueLength int, - scrapesInProgress int, - targets influxdb.ScraperTargetStoreService, - writer storage.PointsWriter, - interval time.Duration, -) (*Scheduler, error) { - if interval == 0 { - interval = 60 * time.Second - } - scheduler := &Scheduler{ - Targets: targets, - Interval: interval, - log: log, - scrapeRequest: make(chan *influxdb.ScraperTarget, scrapeQueueLength), - done: make(chan struct{}), - - writer: writer, - } - - scheduler.wg.Add(1) - scraperPool := make(chan *prometheusScraper, scrapesInProgress) - for i := 0; i < scrapesInProgress; i++ { - scraperPool <- newPrometheusScraper() - } - go func() { - defer scheduler.wg.Done() - for { - select { - case req := <-scheduler.scrapeRequest: - select { - // Each request much acquire a scraper from the (limited) pool to run the scrape, - // then return it to the pool - case scraper := <-scraperPool: - scheduler.doScrape(scraper, req, func(s *prometheusScraper) { - scraperPool <- s - }) - case <-scheduler.done: - return - } - case <-scheduler.done: - return - } - } - }() - - scheduler.wg.Add(1) - go func() { - defer scheduler.wg.Done() - ticker := time.NewTicker(scheduler.Interval) - defer ticker.Stop() - for { - select { - case <-scheduler.done: - return - case <-ticker.C: - scheduler.doGather() - } - } - }() - - return scheduler, nil -} - -func (s *Scheduler) doScrape(scraper *prometheusScraper, req *influxdb.ScraperTarget, releaseScraper func(s *prometheusScraper)) { - s.wg.Add(1) - go func() { - defer s.wg.Done() - defer releaseScraper(scraper) - logger := s.log.With(zap.String("scraper-name", req.Name)) - if req == nil { - return - } - ms, err := scraper.Gather(*req) - if err != nil { - logger.Error("Unable to gather", zap.Error(err)) - return - } - ps, err := ms.MetricsSlice.Points() - if err != nil { - logger.Error("Unable to gather list of points", zap.Error(err)) - } - err = s.writer.WritePoints(context.Background(), ms.OrgID, ms.BucketID, ps) - if err != nil { - logger.Error("Unable to write gathered points", zap.Error(err)) - } - }() -} - -func (s *Scheduler) doGather() { - targets, err := s.Targets.ListTargets(context.Background(), influxdb.ScraperTargetFilter{}) - if err != nil { - s.log.Error("Cannot list targets", zap.Error(err)) - return - } - for i := range targets { - select { - case s.scrapeRequest <- &targets[i]: - default: - s.log.Warn("Skipping scrape due to scraper backlog", zap.String("target", targets[i].Name)) - } - } -} - -func (s *Scheduler) Close() { - close(s.done) - s.wg.Wait() -} diff --git a/gather/scheduler_test.go b/gather/scheduler_test.go deleted file mode 100644 index a3375235ab2..00000000000 --- a/gather/scheduler_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package gather - -import ( - "context" - "net/http/httptest" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/models" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func TestScheduler(t *testing.T) { - totalGatherJobs := 20 - - // Create top level logger - logger := zaptest.NewLogger(t) - ts := httptest.NewServer(&mockHTTPHandler{ - responseMap: map[string]string{ - "/metrics": sampleRespSmall, - }, - }) - defer ts.Close() - - storage := &mockStorage{ - Metrics: make(map[time.Time]Metrics), - Targets: []influxdb.ScraperTarget{ - { - ID: influxdbtesting.MustIDBase16("3a0d0a6365646120"), - Type: influxdb.PrometheusScraperType, - URL: ts.URL + "/metrics", - OrgID: *orgID, - BucketID: *bucketID, - }, - }, - } - - gatherJobs := make(chan []models.Point) - done := make(chan struct{}) - writer := &mock.PointsWriter{} - writer.WritePointsFn = func(ctx context.Context, orgID platform.ID, bucketID platform.ID, points []models.Point) error { - select { - case gatherJobs <- points: - case <-done: - } - return nil - } - - scheduler, err := NewScheduler(logger, 10, 2, storage, writer, 1*time.Millisecond) - require.NoError(t, err) - defer scheduler.Close() - defer close(done) //don't block the points writer forever - - // make sure all jobs are done - pointWrites := [][]models.Point{} - for i := 0; i < totalGatherJobs; i++ { - newWrite := <-gatherJobs - pointWrites = append(pointWrites, newWrite) - assert.Equal(t, 1, len(newWrite)) - newWrite[0].SetTime(time.Unix(0, 0)) // zero out the time so we don't have to compare it - assert.Equal(t, "go_goroutines gauge=36 0", newWrite[0].String()) - } - - if len(pointWrites) < totalGatherJobs { - t.Fatalf("metrics stored less than expected, got len %d", len(storage.Metrics)) - } -} - -const sampleRespSmall = ` -# HELP go_goroutines Number of goroutines that currently exist. -# TYPE go_goroutines gauge -go_goroutines 36 -` diff --git a/gather/scraper.go b/gather/scraper.go deleted file mode 100644 index 1d90008e9c9..00000000000 --- a/gather/scraper.go +++ /dev/null @@ -1,12 +0,0 @@ -package gather - -import ( - "context" - - "github.com/influxdata/influxdb/v2" -) - -// Scraper gathers metrics from a scraper target. -type Scraper interface { - Gather(ctx context.Context, target influxdb.ScraperTarget) (collected MetricsCollection, err error) -} diff --git a/gather/scraper_test.go b/gather/scraper_test.go deleted file mode 100644 index 7a9d2668a12..00000000000 --- a/gather/scraper_test.go +++ /dev/null @@ -1,300 +0,0 @@ -package gather - -import ( - "context" - "net/http" - "net/http/httptest" - "reflect" - "sync" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - dto "github.com/prometheus/client_model/go" -) - -var ( - orgID, _ = platform.IDFromString("020f755c3c082000") - bucketID, _ = platform.IDFromString("020f755c3c082001") -) - -func TestPrometheusScraper(t *testing.T) { - cases := []struct { - name string - ms []Metrics - handler *mockHTTPHandler - hasErr bool - }{ - { - name: "bad request", - hasErr: true, - }, - { - name: "empty request", - handler: &mockHTTPHandler{ - responseMap: map[string]string{ - "/metrics": "", - }, - }, - hasErr: true, - }, - { - name: "regular metrics", - handler: &mockHTTPHandler{ - responseMap: map[string]string{ - "/metrics": sampleResp, - }, - }, - ms: []Metrics{ - { - Name: "go_gc_duration_seconds", - Type: dto.MetricType_SUMMARY, - Fields: map[string]interface{}{ - "count": float64(326), - "sum": 0.07497837, - "0": 3.6257e-05, - "0.25": 0.0001434, - "0.5": 0.000194491, - "0.75": 0.000270339, - "1": 0.000789365, - }, - Tags: map[string]string{}, - }, - { - Name: "go_goroutines", - Type: dto.MetricType_GAUGE, - Tags: map[string]string{}, - Fields: map[string]interface{}{ - "gauge": float64(36), - }, - }, - { - Name: "go_info", - Type: dto.MetricType_GAUGE, - Tags: map[string]string{ - "version": "go1.10.3", - }, - Fields: map[string]interface{}{ - "gauge": float64(1), - }, - }, - { - Name: "go_memstats_alloc_bytes", - Type: dto.MetricType_GAUGE, - Tags: map[string]string{}, - Fields: map[string]interface{}{ - "gauge": 2.0091312e+07, - }, - }, - { - Name: "go_memstats_alloc_bytes_total", - Type: dto.MetricType_COUNTER, - Fields: map[string]interface{}{ - "counter": 4.183173328e+09, - }, - Tags: map[string]string{}, - }, - { - Name: "go_memstats_buck_hash_sys_bytes", - Type: dto.MetricType_GAUGE, - Tags: map[string]string{}, - Fields: map[string]interface{}{ - "gauge": 1.533852e+06, - }, - }, - { - Name: "go_memstats_frees_total", - Type: dto.MetricType_COUNTER, - Tags: map[string]string{}, - Fields: map[string]interface{}{ - "counter": 1.8944339e+07, - }, - }, - { - Name: "go_memstats_gc_cpu_fraction", - Type: dto.MetricType_GAUGE, - Tags: map[string]string{}, - Fields: map[string]interface{}{ - "gauge": 1.972734963012756e-05, - }, - }, - }, - hasErr: false, - }, - } - for _, c := range cases { - scraper := newPrometheusScraper() - var url string - if c.handler != nil { - ts := httptest.NewServer(c.handler) - defer ts.Close() - url = ts.URL - } - results, err := scraper.Gather(influxdb.ScraperTarget{ - URL: url + "/metrics", - OrgID: *orgID, - BucketID: *bucketID, - }) - if err != nil && !c.hasErr { - t.Fatalf("scraper parse err in testing %s: %v", c.name, err) - } - if len(c.ms) != len(results.MetricsSlice) { - t.Fatalf("scraper parse metrics incorrect length, want %d, got %d", - len(c.ms), len(results.MetricsSlice)) - } - for _, m := range results.MetricsSlice { - for _, cm := range c.ms { - if m.Name == cm.Name { - if diff := cmp.Diff(m, cm, metricsCmpOption); diff != "" { - t.Fatalf("scraper parse metrics want %v, got %v", cm, m) - } - } - } - - } - } -} - -const sampleResp = ` -# HELP go_gc_duration_seconds A summary of the GC invocation durations. -# TYPE go_gc_duration_seconds summary -go_gc_duration_seconds{quantile="0"} 3.6257e-05 -go_gc_duration_seconds{quantile="0.25"} 0.0001434 -go_gc_duration_seconds{quantile="0.5"} 0.000194491 -go_gc_duration_seconds{quantile="0.75"} 0.000270339 -go_gc_duration_seconds{quantile="1"} 0.000789365 -go_gc_duration_seconds_sum 0.07497837 -go_gc_duration_seconds_count 326 -# HELP go_goroutines Number of goroutines that currently exist. -# TYPE go_goroutines gauge -go_goroutines 36 -# HELP go_info Information about the Go environment. -# TYPE go_info gauge -go_info{version="go1.10.3"} 1 -# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. -# TYPE go_memstats_alloc_bytes gauge -go_memstats_alloc_bytes 2.0091312e+07 -# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. -# TYPE go_memstats_alloc_bytes_total counter -go_memstats_alloc_bytes_total 4.183173328e+09 -# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. -# TYPE go_memstats_buck_hash_sys_bytes gauge -go_memstats_buck_hash_sys_bytes 1.533852e+06 -# HELP go_memstats_frees_total Total number of frees. -# TYPE go_memstats_frees_total counter -go_memstats_frees_total 1.8944339e+07 -# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started. -# TYPE go_memstats_gc_cpu_fraction gauge -go_memstats_gc_cpu_fraction 1.972734963012756e-05 -` - -// mockStorage implement recorder interface -// and influxdb.ScraperTargetStoreService interface. -type mockStorage struct { - sync.RWMutex - influxdb.UserResourceMappingService - influxdb.OrganizationService - Metrics map[time.Time]Metrics - Targets []influxdb.ScraperTarget -} - -func (s *mockStorage) ListTargets(ctx context.Context, filter influxdb.ScraperTargetFilter) (targets []influxdb.ScraperTarget, err error) { - s.RLock() - defer s.RUnlock() - if s.Targets == nil { - s.Lock() - s.Targets = make([]influxdb.ScraperTarget, 0) - s.Unlock() - } - return s.Targets, nil -} - -func (s *mockStorage) AddTarget(ctx context.Context, t *influxdb.ScraperTarget, userID platform.ID) error { - s.Lock() - defer s.Unlock() - if s.Targets == nil { - s.Targets = make([]influxdb.ScraperTarget, 0) - } - s.Targets = append(s.Targets, *t) - return nil -} - -func (s *mockStorage) RemoveTarget(ctx context.Context, id platform.ID) error { - s.Lock() - defer s.Unlock() - - if s.Targets == nil { - return nil - } - for k, v := range s.Targets { - if v.ID == id { - s.Targets = append(s.Targets[:k], s.Targets[k+1:]...) - break - } - } - return nil -} - -func (s *mockStorage) GetTargetByID(ctx context.Context, id platform.ID) (target *influxdb.ScraperTarget, err error) { - s.RLock() - defer s.RUnlock() - - for k, v := range s.Targets { - if v.ID == id { - target = &s.Targets[k] - break - } - } - - return target, err - -} - -func (s *mockStorage) UpdateTarget(ctx context.Context, update *influxdb.ScraperTarget, userID platform.ID) (target *influxdb.ScraperTarget, err error) { - s.Lock() - defer s.Unlock() - - for k, v := range s.Targets { - if v.ID.String() == update.ID.String() { - s.Targets[k] = *update - break - } - } - - return update, err -} - -type mockHTTPHandler struct { - unauthorized bool - noContent bool - responseMap map[string]string -} - -func (h mockHTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if h.unauthorized { - w.WriteHeader(http.StatusUnauthorized) - return - } - if h.noContent { - w.WriteHeader(http.StatusNoContent) - return - } - s, ok := h.responseMap[r.URL.Path] - if !ok { - w.WriteHeader(http.StatusNotFound) - return - } - w.Header().Set("Content-Type", "text/plain; version=0.0.4; charset=utf-8") - w.Write([]byte(s)) -} - -var metricsCmpOption = cmp.Options{ - cmp.Comparer(func(x, y Metrics) bool { - return x.Name == y.Name && - x.Type == y.Type && - reflect.DeepEqual(x.Tags, y.Tags) && - reflect.DeepEqual(x.Fields, y.Fields) - }), -} diff --git a/go.mod b/go.mod deleted file mode 100644 index 02c9492423f..00000000000 --- a/go.mod +++ /dev/null @@ -1,225 +0,0 @@ -module github.com/influxdata/influxdb/v2 - -go 1.20 - -require ( - github.com/BurntSushi/toml v1.2.1 - github.com/Masterminds/squirrel v1.5.0 - github.com/NYTimes/gziphandler v1.0.1 - github.com/RoaringBitmap/roaring v0.4.16 - github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 - github.com/apache/arrow/go/v7 v7.0.1 - github.com/benbjohnson/clock v0.0.0-20161215174838-7dc76406b6d3 - github.com/benbjohnson/tmpl v1.0.0 - github.com/buger/jsonparser v1.1.1 - github.com/cespare/xxhash v1.1.0 - github.com/davecgh/go-spew v1.1.1 - github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8 - github.com/dustin/go-humanize v1.0.0 - github.com/editorconfig-checker/editorconfig-checker v0.0.0-20190819115812-1474bdeaf2a2 - github.com/elazarl/go-bindata-assetfs v1.0.1 - github.com/go-chi/chi v4.1.0+incompatible - github.com/go-stack/stack v1.8.0 - github.com/golang-jwt/jwt v3.2.1+incompatible - github.com/golang/gddo v0.0.0-20181116215533-9bd4a3295021 - github.com/golang/mock v1.6.0 - github.com/golang/snappy v0.0.4 - github.com/google/btree v1.0.1 - github.com/google/go-cmp v0.5.9 - github.com/google/go-jsonnet v0.17.0 - github.com/hashicorp/vault/api v1.0.2 - github.com/influxdata/cron v0.0.0-20201006132531-4bb0a200dcbe - github.com/influxdata/flux v0.194.3 - github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69 - github.com/influxdata/influx-cli/v2 v2.2.1-0.20221028161653-3285a03e9e28 - github.com/influxdata/influxql v1.1.1-0.20211004132434-7e7d61973256 - github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 - github.com/influxdata/pkg-config v0.2.11 - github.com/jmoiron/sqlx v1.3.4 - github.com/jsternberg/zap-logfmt v1.2.0 - github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef - github.com/kevinburke/go-bindata v3.22.0+incompatible - github.com/mattn/go-isatty v0.0.16 - github.com/mattn/go-sqlite3 v1.14.7 - github.com/matttproud/golang_protobuf_extensions v1.0.4 - github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5 - github.com/mna/pigeon v1.0.1-0.20180808201053-bb0192cfc2ae - github.com/opentracing/opentracing-go v1.2.0 - github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.11.1 - github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.30.0 - github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 - github.com/spf13/cast v1.3.0 - github.com/spf13/cobra v1.0.0 - github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.6.1 - github.com/stretchr/testify v1.8.1 - github.com/testcontainers/testcontainers-go v0.18.0 - github.com/tinylib/msgp v1.1.0 - github.com/uber/jaeger-client-go v2.28.0+incompatible - github.com/xlab/treeprint v1.0.0 - github.com/yudai/gojsondiff v1.0.0 - go.etcd.io/bbolt v1.3.6 - go.uber.org/multierr v1.6.0 - go.uber.org/zap v1.16.0 - golang.org/x/crypto v0.1.0 - golang.org/x/sync v0.1.0 - golang.org/x/sys v0.5.0 - golang.org/x/text v0.7.0 - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 - golang.org/x/tools v0.5.0 - google.golang.org/protobuf v1.28.1 - gopkg.in/yaml.v2 v2.4.0 - gopkg.in/yaml.v3 v3.0.1 - honnef.co/go/tools v0.4.0 -) - -require ( - cloud.google.com/go v0.82.0 // indirect - cloud.google.com/go/bigquery v1.8.0 // indirect - cloud.google.com/go/bigtable v1.10.1 // indirect - github.com/AlecAivazis/survey/v2 v2.3.4 // indirect - github.com/Azure/azure-pipeline-go v0.2.3 // indirect - github.com/Azure/azure-storage-blob-go v0.14.0 // indirect - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect - github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.9 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect - github.com/Azure/go-autorest/autorest/azure/auth v0.5.3 // indirect - github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect - github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/logger v0.2.1 // indirect - github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/DATA-DOG/go-sqlmock v1.4.1 // indirect - github.com/Masterminds/semver v1.4.2 // indirect - github.com/Masterminds/sprig v2.16.0+incompatible // indirect - github.com/Microsoft/go-winio v0.5.2 // indirect - github.com/SAP/go-hdb v0.14.1 // indirect - github.com/aokoli/goutils v1.0.1 // indirect - github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 // indirect - github.com/aws/aws-sdk-go v1.34.0 // indirect - github.com/aws/aws-sdk-go-v2 v1.11.0 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.6.1 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.7.1 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.9.0 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.19.0 // indirect - github.com/aws/smithy-go v1.9.0 // indirect - github.com/benbjohnson/immutable v0.3.0 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0 // indirect - github.com/cenkalti/backoff/v4 v4.2.0 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/containerd/containerd v1.6.18 // indirect - github.com/deepmap/oapi-codegen v1.6.0 // indirect - github.com/denisenkom/go-mssqldb v0.10.0 // indirect - github.com/dimchansky/utfbom v1.1.0 // indirect - github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/docker/docker v23.0.3+incompatible // indirect - github.com/docker/go-connections v0.4.0 // indirect - github.com/docker/go-units v0.5.0 // indirect - github.com/eclipse/paho.mqtt.golang v1.2.0 // indirect - github.com/editorconfig/editorconfig-core-go/v2 v2.1.1 // indirect - github.com/fatih/color v1.13.0 // indirect - github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect - github.com/gabriel-vasile/mimetype v1.4.0 // indirect - github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 // indirect - github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493 // indirect - github.com/go-sql-driver/mysql v1.6.0 // indirect - github.com/goccy/go-json v0.9.6 // indirect - github.com/gofrs/uuid v3.3.0+incompatible // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect - github.com/golang/geo v0.0.0-20190916061304-5b978397cfec // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/google/flatbuffers v22.9.30-0.20221019131441-5792623df42e+incompatible // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/gax-go/v2 v2.0.5 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-cleanhttp v0.5.1 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-retryablehttp v0.6.4 // indirect - github.com/hashicorp/go-rootcerts v1.0.0 // indirect - github.com/hashicorp/go-sockaddr v1.0.2 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect - github.com/hashicorp/vault/sdk v0.1.8 // indirect - github.com/huandu/xstrings v1.0.0 // indirect - github.com/imdario/mergo v0.3.12 // indirect - github.com/inconshreveable/mousetrap v1.0.0 // indirect - github.com/influxdata/gosnowflake v1.6.9 // indirect - github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040 // indirect - github.com/influxdata/influxdb-iox-client-go v1.0.0-beta.1 // indirect - github.com/influxdata/line-protocol/v2 v2.2.1 // indirect - github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/jstemmer/go-junit-report v0.9.1 // indirect - github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/klauspost/compress v1.14.2 // indirect - github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect - github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect - github.com/lib/pq v1.2.0 // indirect - github.com/magiconair/properties v1.8.7 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-ieproxy v0.0.1 // indirect - github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.1.2 // indirect - github.com/moby/patternmatcher v0.5.0 // indirect - github.com/moby/sys/sequential v0.5.0 // indirect - github.com/moby/term v0.0.0-20221128092401-c43b287e0e0f // indirect - github.com/morikuni/aec v1.0.0 // indirect - github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae // indirect - github.com/onsi/ginkgo v1.12.1 // indirect - github.com/onsi/gomega v1.10.3 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc2 // indirect - github.com/opencontainers/runc v1.1.5 // indirect - github.com/pelletier/go-toml v1.9.5 // indirect - github.com/philhofer/fwd v1.0.0 // indirect - github.com/pierrec/lz4 v2.0.5+incompatible // indirect - github.com/pierrec/lz4/v4 v4.1.12 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/procfs v0.7.3 // indirect - github.com/ryanuber/go-glob v1.0.0 // indirect - github.com/segmentio/kafka-go v0.2.0 // indirect - github.com/sergi/go-diff v1.1.0 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect - github.com/spf13/afero v1.2.2 // indirect - github.com/spf13/jwalterweatherman v1.0.0 // indirect - github.com/stretchr/objx v0.5.0 // indirect - github.com/subosito/gotenv v1.2.0 // indirect - github.com/uber-go/tally v3.3.15+incompatible // indirect - github.com/uber/athenadriver v1.1.4 // indirect - github.com/uber/jaeger-lib v2.4.1+incompatible // indirect - github.com/vertica/vertica-sql-go v1.1.1 // indirect - github.com/willf/bitset v1.1.11 // indirect - github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect - github.com/yudai/pp v2.0.1+incompatible // indirect - go.opencensus.io v0.23.0 // indirect - go.uber.org/atomic v1.7.0 // indirect - golang.org/x/exp v0.0.0-20211216164055-b2b84827b756 // indirect - golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a // indirect - golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect - golang.org/x/mod v0.7.0 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect - golang.org/x/term v0.5.0 // indirect - golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect - gonum.org/v1/gonum v0.11.0 // indirect - google.golang.org/api v0.47.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad // indirect - google.golang.org/grpc v1.47.0 // indirect - gopkg.in/ini.v1 v1.51.0 // indirect - gopkg.in/square/go-jose.v2 v2.5.1 // indirect -) - -replace github.com/nats-io/nats-streaming-server v0.11.2 => github.com/influxdata/nats-streaming-server v0.11.3-0.20201112040610-c277f7560803 diff --git a/go.sum b/go.sum deleted file mode 100644 index 1c9ec44831c..00000000000 --- a/go.sum +++ /dev/null @@ -1,1514 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.82.0 h1:FZ4B2YAzCzkwzGEOp1dqG8sAa3zNIvro1fHRTrB81RU= -cloud.google.com/go v0.82.0/go.mod h1:vlKccHJGuFBFufnAnuB08dfEH9Y3H7dzDzRECFdC2TA= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigtable v1.10.1 h1:QKcRHeAsraxIlrdCZ3LLobXKBvITqcOEnSbHG2rzL9g= -cloud.google.com/go/bigtable v1.10.1/go.mod h1:cyHeKlx6dcZCO0oSQucYdauseD8kIENGuDOJPKMCVg8= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= -github.com/AlecAivazis/survey/v2 v2.3.4 h1:pchTU9rsLUSvWEl2Aq9Pv3k0IE2fkqtGxazskAMd9Ng= -github.com/AlecAivazis/survey/v2 v2.3.4/go.mod h1:hrV6Y/kQCLhIZXGcriDCUBtB3wnN7156gMXJ3+b23xM= -github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= -github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= -github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= -github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.9 h1:P0ZF0dEYoUPUVDQo3mA1CvH5b8mKev7DDcmTwauuNME= -github.com/Azure/go-autorest/autorest v0.11.9/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.3 h1:lZifaPRAk1bqg5vGqreL6F8uLC5V0fDpY8nFvc3boFc= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.3/go.mod h1:4bJZhUhcq8LB20TruwHbAQsmUs2Xh+QR7utuJpLXX3A= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-sqlmock v1.4.1 h1:ThlnYciV1iM/V0OSF/dtkqWb6xo5qITT1TJBG1MRDJM= -github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/HdrHistogram/hdrhistogram-go v1.1.0 h1:6dpdDPTRoo78HxAJ6T1HfMiKSnqhgRRqzCuPshRkQ7I= -github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/sprig v2.16.0+incompatible h1:QZbMUPxRQ50EKAq3LFMnxddMu88/EUUG3qmxwtDmPsY= -github.com/Masterminds/sprig v2.16.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/squirrel v1.5.0 h1:JukIZisrUXadA9pl3rMkjhiamxiB0cXiu+HGp/Y8cY8= -github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/hcsshim v0.9.6 h1:VwnDOgLeoi2du6dAznfmspNqTiwczvjv4K7NxuY9jsY= -github.com/NYTimes/gziphandler v1.0.1 h1:iLrQrdwjDd52kHDA5op2UBJFjmOb9g+7scBan4RN8F0= -github.com/NYTimes/gziphandler v1.0.1/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= -github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/RoaringBitmap/roaring v0.4.16 h1:NholfewybRLOwACgfqfzn/N5xa6keKNs4fP00t0cwLo= -github.com/RoaringBitmap/roaring v0.4.16/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w= -github.com/SAP/go-hdb v0.14.1 h1:hkw4ozGZ/i4eak7ZuGkY5e0hxiXFdNUBNhr4AvZVNFE= -github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/aokoli/goutils v1.0.1 h1:7fpzNGoJ3VA8qcrm++XEE1QUe0mIwNeLa02Nwq7RDkg= -github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= -github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 h1:q4dksr6ICHXqG5hm0ZW5IHyeEJXoIJSOZeBLmWPNeIQ= -github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs= -github.com/apache/arrow/go/v7 v7.0.1 h1:WpCfq+AQxvXaI6/KplHE27MPMFx5av0o5NbPCTAGfy4= -github.com/apache/arrow/go/v7 v7.0.1/go.mod h1:JxDpochJbCVxqbX4G8i1jRqMrnTCQdf8pTccAfLD8Es= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.15.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= -github.com/aws/aws-sdk-go v1.34.0 h1:brux2dRrlwCF5JhTL7MUT3WUwo9zfDHZZp3+g3Mvlmo= -github.com/aws/aws-sdk-go v1.34.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/aws/aws-sdk-go-v2 v1.11.0 h1:HxyD62DyNhCfiFGUHqJ/xITD6rAjJ7Dm/2nLxLmO4Ag= -github.com/aws/aws-sdk-go-v2 v1.11.0/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJvBIZjzfPyQ= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0 h1:yVUAwvJC/0WNPbyl0nA3j1L6CW1CN8wBubCRqtG7JLI= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0/go.mod h1:Xn6sxgRuIDflLRJFj5Ev7UxABIkNbccFPV/p8itDReM= -github.com/aws/aws-sdk-go-v2/config v1.10.1 h1:z/ViqIjW6ZeuLWgTWMTSyZzaVWo/1cWeVf1Uu+RF01E= -github.com/aws/aws-sdk-go-v2/config v1.10.1/go.mod h1:auIv5pIIn3jIBHNRcVQcsczn6Pfa6Dyv80Fai0ueoJU= -github.com/aws/aws-sdk-go-v2/credentials v1.6.1 h1:A39JYth2fFCx+omN/gib/jIppx3rRnt2r7UKPq7Mh5Y= -github.com/aws/aws-sdk-go-v2/credentials v1.6.1/go.mod h1:QyvQk1IYTqBWSi1T6UgT/W8DMxBVa5pVuLFSRLLhGf8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.0 h1:OpZjuUy8Jt3CA1WgJgBC5Bz+uOjE5Ppx4NFTRaooUuA= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.0/go.mod h1:5E1J3/TTYy6z909QNR0QnXGBpfESYGDqd3O0zqONghU= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.7.1 h1:p9Dys1g2YdaqMalnp6AwCA+tpMMdJNGw5YYKP/u3sUk= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.7.1/go.mod h1:wN/mvkow08GauDwJ70jnzJ1e+hE+Q3Q7TwpYLXOe9oI= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.0 h1:zY8cNmbBXt3pzjgWgdIbzpQ6qxoCwt+Nx9JbrAf2mbY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.0/go.mod h1:NO3Q5ZTTQtO2xIg2+xTXYDiT7knSejfeDm7WGDaOo0U= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.0 h1:Z3aR/OXBnkYK9zXkNkfitHX6SmUBzSsx8VMHbH4Lvhw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.0/go.mod h1:anlUzBoEWglcUxUQwZA7HQOEVEnQALVZsizAapB2hq8= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.0 h1:c10Z7fWxtJCoyc8rv06jdh9xrKnu7bAJiRaKWvTb2mU= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.0/go.mod h1:6oXGy4GLpypD3uCh8wcqztigGgmhLToMfjavgh+VySg= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0 h1:lPLbw4Gn59uoKqvOfSnkJr54XWk5Ak1NK20ZEiSWb3U= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0/go.mod h1:80NaCIH9YU3rzTTs/J/ECATjXuRqzo/wB6ukO6MZ0XY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.0 h1:qGZWS/WgiFY+Zgad2u0gwBHpJxz6Ne401JE7iQI1nKs= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.0/go.mod h1:Mq6AEc+oEjCUlBuLiK5YwW4shSOAKCQ3tXN0sQeYoBA= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.9.0 h1:0BOlTqnNnrEO04oYKzDxMMe68t107pmIotn18HtVonY= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.9.0/go.mod h1:xKCZ4YFSF2s4Hnb/J0TLeOsKuGzICzcElaOKNGrVnx4= -github.com/aws/aws-sdk-go-v2/service/s3 v1.19.0 h1:5mRAms4TjSTOGYsqKYte5kHr1PzpMJSyLThjF3J+hw0= -github.com/aws/aws-sdk-go-v2/service/s3 v1.19.0/go.mod h1:Gwz3aVctJe6mUY9T//bcALArPUaFmNAy2rTB9qN4No8= -github.com/aws/aws-sdk-go-v2/service/sso v1.6.0 h1:JDgKIUZOmLFu/Rv6zXLrVTWCmzA0jcTdvsT8iFIKrAI= -github.com/aws/aws-sdk-go-v2/service/sso v1.6.0/go.mod h1:Q/l0ON1annSU+mc0JybDy1Gy6dnJxIcWjphO6qJPzvM= -github.com/aws/aws-sdk-go-v2/service/sts v1.10.0 h1:1jh8J+JjYRp+QWKOsaZt7rGUgoyrqiiVwIm+w0ymeUw= -github.com/aws/aws-sdk-go-v2/service/sts v1.10.0/go.mod h1:jLKCFqS+1T4i7HDqCP9GM4Uk75YW1cS0o82LdxpMyOE= -github.com/aws/smithy-go v1.9.0 h1:c7FUdEqrQA1/UVKKCNDFQPNKGp4FQg3YW4Ck5SLTG58= -github.com/aws/smithy-go v1.9.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/benbjohnson/clock v0.0.0-20161215174838-7dc76406b6d3 h1:wOysYcIdqv3WnvwqFFzrYCFALPED7qkUGaLXu359GSc= -github.com/benbjohnson/clock v0.0.0-20161215174838-7dc76406b6d3/go.mod h1:UMqtWQTnOe4byzwe7Zhwh8f8s+36uszN51sJrSIZlTE= -github.com/benbjohnson/immutable v0.3.0 h1:TVRhuZx2wG9SZ0LRdqlbs9S5BZ6Y24hJEHTCgWHZEIw= -github.com/benbjohnson/immutable v0.3.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= -github.com/benbjohnson/tmpl v1.0.0 h1:T5QPGJD0W6JJxyEEAlVnX3co/IkUrfHen1/42nlgAHo= -github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0 h1:MaVh0h9+KaMnJcoDvvIGp+O3fefdWm+8MBUX6ELTJTM= -github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ= -github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/c-bata/go-prompt v0.2.2 h1:uyKRz6Z6DUyj49QVijyM339UJV9yhbr70gESwbNU3e0= -github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= -github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/containerd v1.6.18 h1:qZbsLvmyu+Vlty0/Ex5xc0z2YtKpIsb5n45mAMI+2Ns= -github.com/containerd/containerd v1.6.18/go.mod h1:1RdCUu95+gc2v9t3IL+zIlpClSmew7/0YS8O5eQZrOw= -github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= -github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deepmap/oapi-codegen v1.6.0 h1:w/d1ntwh91XI0b/8ja7+u5SvA4IFfM0UNNLmiDR1gg0= -github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= -github.com/denisenkom/go-mssqldb v0.10.0 h1:QykgLZBorFE95+gO3u9esLd0BmbvpWp0/waNNZfHBM8= -github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8 h1:akOQj8IVgoeFfBTzGOEQakCYshWD6RNo1M5pivFXt70= -github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= -github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= -github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v23.0.3+incompatible h1:9GhVsShNWz1hO//9BNg/dpMnZW25KydO4wtVxWAIbho= -github.com/docker/docker v23.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0= -github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= -github.com/editorconfig-checker/editorconfig-checker v0.0.0-20190819115812-1474bdeaf2a2 h1:BoejGRtu+FygJB/0ZpkhTSmaM7QbsPxFgcspAbTElNI= -github.com/editorconfig-checker/editorconfig-checker v0.0.0-20190819115812-1474bdeaf2a2/go.mod h1:nnr6DXFepwb2+GC7evku5Mak3wGGRShiYy6fPkdIwVM= -github.com/editorconfig/editorconfig-core-go/v2 v2.1.1 h1:mhPg/0hGebcpiiQLqJD2PWWyoHRLEdZ3sXKaEvT1EQU= -github.com/editorconfig/editorconfig-core-go/v2 v2.1.1/go.mod h1:/LuhWJiQ9Gvo1DhVpa4ssm5qeg8rrztdtI7j/iCie2k= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elazarl/go-bindata-assetfs v1.0.1 h1:m0kkaHRKEu7tUIUFVwhGGGYClXvyl4RE03qmvRTNfbw= -github.com/elazarl/go-bindata-assetfs v1.0.1/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15 h1:nLPjjvpUAODOR6vY/7o0hBIk8iTr19Fvmf8aFx/kC7A= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= -github.com/frankban/quicktest v1.11.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= -github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/gabriel-vasile/mimetype v1.4.0 h1:Cn9dkdYsMIu56tGho+fqzh7XmvY2YyGU0FnbhiOsEro= -github.com/gabriel-vasile/mimetype v1.4.0/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= -github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493 h1:OTanQnFt0bi5iLFSdbEVA/idR6Q2WhCm+deb7ir2CcM= -github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/go-chi/chi v4.1.0+incompatible h1:ETj3cggsVIY2Xao5ExCu6YhEh5MD6JTfcBzS37R260w= -github.com/go-chi/chi v4.1.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= -github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= -github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= -github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= -github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= -github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= -github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/goccy/go-json v0.7.10/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/goccy/go-json v0.9.6 h1:5/4CtRQdtsX0sal8fdVhTaiMN01Ri8BExZZ8iRmHQ6E= -github.com/goccy/go-json v0.9.6/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= -github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= -github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/gddo v0.0.0-20181116215533-9bd4a3295021 h1:HYV500jCgk+IC68L5sWrLFIWMpaUFfXXpJSAb7XOoBk= -github.com/golang/gddo v0.0.0-20181116215533-9bd4a3295021/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4= -github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw= -github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/flatbuffers v22.9.30-0.20221019131441-5792623df42e+incompatible h1:Bqgl5d9t2UlT8pv9Oc/lkkI8yYk0jCwHkZKkHzbxEsc= -github.com/google/flatbuffers v22.9.30-0.20221019131441-5792623df42e+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-jsonnet v0.17.0 h1:/9NIEfhK1NQRKl3sP2536b2+x5HnZMdql7x3yK/l8JY= -github.com/google/go-jsonnet v0.17.0/go.mod h1:sOcuej3UW1vpPTZOr8L7RQimqai1a57bt5j22LzGZCw= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= -github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.0.0/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.6.4 h1:BbgctKO892xEyOXnGiaAwIoSq1QZ/SS4AhjoAh9DnfY= -github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/vault/api v1.0.2 h1:/V9fULvLwt58vme/6Rkt/p/GtlresQv+Z9E6dgdANhs= -github.com/hashicorp/vault/api v1.0.2/go.mod h1:AV/+M5VPDpB90arloVX0rVDUIHkONiwz5Uza9HRtpUE= -github.com/hashicorp/vault/sdk v0.1.8 h1:pfF3KwA1yPlfpmcumNsFM4uo91WMasX5gTuIkItu9r0= -github.com/hashicorp/vault/sdk v0.1.8/go.mod h1:tHZfc6St71twLizWNHvnnbiGFo1aq0eD2jGPLtP8kAU= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= -github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.0.0 h1:pO2K/gKgKaat5LdpAhxhluX2GPQMaI3W5FUz/I/UnWk= -github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/cron v0.0.0-20201006132531-4bb0a200dcbe h1:7j4SdN/BvQwN6WoUq7mv0kg5U9NhnFBxPGMafYRKym0= -github.com/influxdata/cron v0.0.0-20201006132531-4bb0a200dcbe/go.mod h1:XabtPPW2qsCg0tl+kjaPU+cFS+CjQXEXbT1VJvHT4og= -github.com/influxdata/flux v0.194.3 h1:3PKCi41NrUfFSz3Dp2Rt2Rs+bREP9VPRgrq8H14Ymag= -github.com/influxdata/flux v0.194.3/go.mod h1:hAo8pb/Rxp6afj8/roEzxANO5PNVObAdXtv2dBp1E6U= -github.com/influxdata/gosnowflake v1.6.9 h1:BhE39Mmh8bC+Rvd4QQsP2gHypfeYIH1wqW1AjGWxxrE= -github.com/influxdata/gosnowflake v1.6.9/go.mod h1:9W/BvCXOKx2gJtQ+jdi1Vudev9t9/UDOEHnlJZ/y1nU= -github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69 h1:WQsmW0fXO4ZE/lFGIE84G6rIV5SJN3P3sjIXAP1a8eU= -github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA= -github.com/influxdata/influx-cli/v2 v2.2.1-0.20221028161653-3285a03e9e28 h1:brESt4mwZknEUuwrYcGSG6JqjTKC5M+qVKgL73ondFg= -github.com/influxdata/influx-cli/v2 v2.2.1-0.20221028161653-3285a03e9e28/go.mod h1:rvb2oIMqPs+O9gL6r0kqJ2X0tbQ8WRRtteeWfCqdhZU= -github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040 h1:MBLCfcSsUyFPDJp6T7EoHp/Ph3Jkrm4EuUKLD2rUWHg= -github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= -github.com/influxdata/influxdb-iox-client-go v1.0.0-beta.1 h1:zDmAiE2o3Y/YZinI6CENzgQueJDuibUB9TWOZC5zCq0= -github.com/influxdata/influxdb-iox-client-go v1.0.0-beta.1/go.mod h1:Chl4pz0SRqoPmEavex4vZaQlunqXqrtEPWAN54THFfo= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/influxdata/influxql v1.1.1-0.20211004132434-7e7d61973256 h1:8io3jjCJ0j9NFvq3/m/rMrDiEILpsfOqWDPItUt/078= -github.com/influxdata/influxql v1.1.1-0.20211004132434-7e7d61973256/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= -github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU= -github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= -github.com/influxdata/line-protocol-corpus v0.0.0-20210519164801-ca6fa5da0184/go.mod h1:03nmhxzZ7Xk2pdG+lmMd7mHDfeVOYFyhOgwO61qWU98= -github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937 h1:MHJNQ+p99hFATQm6ORoLmpUCF7ovjwEFshs/NHzAbig= -github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937/go.mod h1:BKR9c0uHSmRgM/se9JhFHtTT7JTO67X23MtKMHtZcpo= -github.com/influxdata/line-protocol/v2 v2.0.0-20210312151457-c52fdecb625a/go.mod h1:6+9Xt5Sq1rWx+glMgxhcg2c0DUaehK+5TDcPZ76GypY= -github.com/influxdata/line-protocol/v2 v2.1.0/go.mod h1:QKw43hdUBg3GTk2iC3iyCxksNj7PX9aUSeYOYE/ceHY= -github.com/influxdata/line-protocol/v2 v2.2.1 h1:EAPkqJ9Km4uAxtMRgUubJyqAr6zgWM0dznKMLRauQRE= -github.com/influxdata/line-protocol/v2 v2.2.1/go.mod h1:DmB3Cnh+3oxmG6LOBIxce4oaL4CPj3OmMPgvauXh+tM= -github.com/influxdata/pkg-config v0.2.11 h1:RDlWAvkTARzPRGChq34x179TYlRndq8OU5Ro80E9g3Q= -github.com/influxdata/pkg-config v0.2.11/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= -github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b h1:i44CesU68ZBRvtCjBi3QSosCIKrjmMbYlQMFAwVLds4= -github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmoiron/sqlx v1.3.4 h1:wv+0IJZfL5z0uZoUjlpKgHkgaFSYD+r9CfrXjEXsO7w= -github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jsternberg/zap-logfmt v1.2.0 h1:1v+PK4/B48cy8cfQbxL4FmmNZrjnIMr2BsnyEmXqv2o= -github.com/jsternberg/zap-logfmt v1.2.0/go.mod h1:kz+1CUmCutPWABnNkOu9hOHKdT2q3TDYCcsFy9hpqb0= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef h1:2jNeR4YUziVtswNP9sEFAI913cVrzH85T+8Q6LpYbT0= -github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kevinburke/go-bindata v3.22.0+incompatible h1:/JmqEhIWQ7GRScV0WjX/0tqBrC5D21ALg0H0U/KZ/ts= -github.com/kevinburke/go-bindata v3.22.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi30bslSp9YqD9pysLxunQDdb2CPM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/asmfmt v1.3.1/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= -github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw= -github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= -github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= -github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= -github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= -github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= -github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= -github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= -github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.7 h1:fxWBnXkxfM6sRiuH3bqJ4CfzZojMOLVc0UTsTglEghA= -github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-tty v0.0.4 h1:NVikla9X8MN0SQAqCYzpGyXv0jY7MNl3HOWD2dkle7E= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.25 h1:dFwPR6SfLtrSwgDcIq2bcU/gVutB4sNApq2HBdqcakg= -github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5 h1:pXqZHmHOz6LN+zbbUgqyGgAWRnnZEI40IzG3tMsXcSI= -github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5/go.mod h1:JWhYAp2EXqUtsxTKdeGlY8Wp44M7VxThC9FEoNGi2IE= -github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= -github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mna/pigeon v1.0.1-0.20180808201053-bb0192cfc2ae h1:mQO+oxi0kpii/TX+ltfTCFuYkOjEn53JhaOObiMuvnk= -github.com/mna/pigeon v1.0.1-0.20180808201053-bb0192cfc2ae/go.mod h1:Iym28+kJVnC1hfQvv5MUtI6AiFFzvQjHcvI4RFTG/04= -github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= -github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= -github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= -github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/term v0.0.0-20221128092401-c43b287e0e0f h1:J/7hjLaHLD7epG0m6TBMGmp4NQ+ibBYLfeyJWdAIFLA= -github.com/moby/term v0.0.0-20221128092401-c43b287e0e0f/go.mod h1:15ce4BGCFxt7I5NQKT+HV0yEDxmf6fSysfEDiVo3zFM= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY= -github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= -github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= -github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= -github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs= -github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= -github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= -github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pierrec/lz4/v4 v4.1.11/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pierrec/lz4/v4 v4.1.12 h1:44l88ehTZAUGW4VlO1QC4zkilL99M6Y9MXNwEs0uzP8= -github.com/pierrec/lz4/v4 v4.1.12/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/term v1.2.0-beta.2 h1:L3y/h2jkuBVFdWiJvNfYfKmzcCnILw7mJWm2JQuMppw= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= -github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 h1:RnWNS9Hlm8BIkjr6wx8li5abe0fr73jljLycdfemTp0= -github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= -github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY= -github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= -github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.6.1 h1:VPZzIkznI1YhVMRi6vNFLHSwhnhReBfgTxIPccpfdZk= -github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/testcontainers/testcontainers-go v0.18.0 h1:8RXrcIQv5xX/uBOSmZd297gzvA7F0yuRA37/918o7Yg= -github.com/testcontainers/testcontainers-go v0.18.0/go.mod h1:rLC7hR2SWRjJZZNrUYiTKvUXCziNxzZiYtz9icTWYNQ= -github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= -github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/uber-go/tally v3.3.15+incompatible h1:9hLSgNBP28CjIaDmAuRTq9qV+UZY+9PcvAkXO4nNMwg= -github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= -github.com/uber/athenadriver v1.1.4 h1:k6k0RBeXjR7oZ8NO557MsRw3eX1cc/9B0GNx+W9eHiQ= -github.com/uber/athenadriver v1.1.4/go.mod h1:tQjho4NzXw55LGfSZEcETuYydpY1vtmixUabHkC1K/E= -github.com/uber/jaeger-client-go v2.28.0+incompatible h1:G4QSBfvPKvg5ZM2j9MrJFdfI5iSljY/WnJqOGFao6HI= -github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= -github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/vertica/vertica-sql-go v1.1.1 h1:sZYijzBbvdAbJcl4cYlKjR+Eh/X1hGKzukWuhh8PjvI= -github.com/vertica/vertica-sql-go v1.1.1/go.mod h1:fGr44VWdEvL+f+Qt5LkKLOT7GoxaWdoUCnPBU9h6t04= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE= -github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v1.0.0 h1:J0TkWtiuYgtdlrkkrDLISYBQ92M+X5m4LrIIMKrbDTs= -github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA= -github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI= -github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/zeebo/xxh3 v1.0.1/go.mod h1:8VHV24/3AZLn3b6Mlp/KuC33LWH687Wq6EnziEB+rsA= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= -go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= -golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20211216164055-b2b84827b756 h1:/5Bs7sWi0i3rOVO5KnM55OwugpsD4bRW1zywKoZjbkI= -golang.org/x/exp v0.0.0-20211216164055-b2b84827b756/go.mod h1:b9TAUYHmRtqA6klRHApnXMnj+OyLce4yF5cZCUbk2ps= -golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a h1:Jw5wfR+h9mnIYH+OtGT2im5wV1YGGDora5vTv/aa5bE= -golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211118161319-6a13c67c3ce4/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210601080250-7ecdf8ef093b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4= -golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= -gonum.org/v1/gonum v0.11.0 h1:f1IJhK4Km5tBJmaiJXtk/PkL4cdVX6J+tGiM187uT5E= -gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= -gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= -google.golang.org/api v0.47.0 h1:sQLWZQvP6jPGIP4JGPkJu4zHswrv81iobiyszr3b/0I= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210601144548-a796c710e9b6/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad h1:kqrS+lhvaMHCxul6sKQvKJ8nAAhlVItmZV822hYFH/U= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.46.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -honnef.co/go/tools v0.4.0 h1:lyXVV1c8wUBJRKqI8JgIpT8TW1VDagfYYaxbKa/HoL8= -honnef.co/go/tools v0.4.0/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA= -rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/http/README.md b/http/README.md deleted file mode 100644 index 7b99955d1e6..00000000000 --- a/http/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# HTTP Handler Style Guide - -### HTTP Handler -* Each handler should implement `http.Handler` - - This can be done by embedding a [`httprouter.Router`](https://github.com/julienschmidt/httprouter) - (a light weight HTTP router that supports variables in the routing pattern and matches against the request method) -* Required services should be exported on the struct - -```go -// ThingHandler represents an HTTP API handler for things. -type ThingHandler struct { - // embedded httprouter.Router as a lazy way to implement http.Handler - *httprouter.Router - - ThingService platform.ThingService - AuthorizationService platform.AuthorizationService - - Logger *zap.Logger -} -``` - -### HTTP Handler Constructor - -* Routes should be declared in the constructor - -```go -// NewThingHandler returns a new instance of ThingHandler. -func NewThingHandler() *ThingHandler { - h := &ThingHandler{ - Router: httprouter.New(), - Logger: zap.Nop(), - } - - h.HandlerFunc("POST", "/api/v2/things", h.handlePostThing) - h.HandlerFunc("GET", "/api/v2/things", h.handleGetThings) - - return h -} -``` - -### Route handlers (`http.HandlerFunc`s) - -* Each route handler should have an associated request struct and decode function -* The decode function should take a `context.Context` and an `*http.Request` and return the associated route request struct - -```go -type postThingRequest struct { - Thing *platform.Thing -} - -func decodePostThingRequest(ctx context.Context, r *http.Request) (*postThingRequest, error) { - t := &platform.Thing{} - if err := json.NewDecoder(r.Body).Decode(t); err != nil { - return nil, err - } - - return &postThingRequest{ - Thing: t, - }, nil -} -``` - -* Route `http.HandlerFuncs` should separate the decoding and encoding of HTTP requests/response from actual handler logic - -```go -// handlePostThing is the HTTP handler for the POST /api/v2/things route. -func (h *ThingHandler) handlePostThing(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - req, err := decodePostThingRequest(ctx, r) - if err != nil { - EncodeError(ctx, err, w) - return - } - - // Do stuff here - if err := h.ThingService.CreateThing(ctx, req.Thing); err != nil { - EncodeError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusCreated, req.Thing); err != nil { - h.Logger.Info("encoding response failed", zap.Error(err)) - return - } -} -``` - -* `http.HandlerFunc`'s that require particular encoding of http responses should implement an encode response function diff --git a/http/api_handler.go b/http/api_handler.go deleted file mode 100644 index 90e301edd28..00000000000 --- a/http/api_handler.go +++ /dev/null @@ -1,303 +0,0 @@ -package http - -import ( - "context" - "net/http" - - "github.com/go-chi/chi" - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - "github.com/influxdata/influxdb/v2/dbrp" - "github.com/influxdata/influxdb/v2/http/metric" - "github.com/influxdata/influxdb/v2/influxql" - "github.com/influxdata/influxdb/v2/kit/feature" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/prom" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/query/fluxlang" - "github.com/influxdata/influxdb/v2/static" - "github.com/influxdata/influxdb/v2/storage" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -// APIHandler is a collection of all the service handlers. -type APIHandler struct { - chi.Router -} - -// APIBackend is all services and associated parameters required to construct -// an APIHandler. -type APIBackend struct { - AssetsPath string // if empty then assets are served from bindata. - UIDisabled bool // if true requests for the UI will return 404 - Logger *zap.Logger - FluxLogEnabled bool - errors.HTTPErrorHandler - SessionRenewDisabled bool - // MaxBatchSizeBytes is the maximum number of bytes which can be written - // in a single points batch - MaxBatchSizeBytes int64 - - // WriteParserMaxBytes specifies the maximum number of bytes that may be allocated when processing a single - // write request. A value of zero specifies there is no limit. - WriteParserMaxBytes int - - // WriteParserMaxLines specifies the maximum number of lines that may be parsed when processing a single - // write request. A value of zero specifies there is no limit. - WriteParserMaxLines int - - // WriteParserMaxValues specifies the maximum number of values that may be parsed when processing a single - // write request. A value of zero specifies there is no limit. - WriteParserMaxValues int - - NewQueryService func(*influxdb.Source) (query.ProxyQueryService, error) - - WriteEventRecorder metric.EventRecorder - QueryEventRecorder metric.EventRecorder - - AlgoWProxy FeatureProxyHandler - - PointsWriter storage.PointsWriter - DeleteService influxdb.DeleteService - BackupService influxdb.BackupService - SqlBackupRestoreService influxdb.SqlBackupRestoreService - BucketManifestWriter influxdb.BucketManifestWriter - RestoreService influxdb.RestoreService - AuthorizationService influxdb.AuthorizationService - AuthorizationV1Service influxdb.AuthorizationService - PasswordV1Service influxdb.PasswordsService - AuthorizerV1 influxdb.AuthorizerV1 - OnboardingService influxdb.OnboardingService - DBRPService influxdb.DBRPMappingService - BucketService influxdb.BucketService - SessionService influxdb.SessionService - UserService influxdb.UserService - OrganizationService influxdb.OrganizationService - UserResourceMappingService influxdb.UserResourceMappingService - LabelService influxdb.LabelService - DashboardService influxdb.DashboardService - DashboardOperationLogService influxdb.DashboardOperationLogService - BucketOperationLogService influxdb.BucketOperationLogService - UserOperationLogService influxdb.UserOperationLogService - OrganizationOperationLogService influxdb.OrganizationOperationLogService - SourceService influxdb.SourceService - VariableService influxdb.VariableService - PasswordsService influxdb.PasswordsService - InfluxqldService influxql.ProxyQueryService - FluxService query.ProxyQueryService - FluxLanguageService fluxlang.FluxLanguageService - TaskService taskmodel.TaskService - CheckService influxdb.CheckService - TelegrafService influxdb.TelegrafConfigStore - ScraperTargetStoreService influxdb.ScraperTargetStoreService - SecretService influxdb.SecretService - LookupService influxdb.LookupService - OrgLookupService authorizer.OrgIDResolver - DocumentService influxdb.DocumentService - NotificationRuleStore influxdb.NotificationRuleStore - NotificationEndpointService influxdb.NotificationEndpointService - Flagger feature.Flagger - FlagsHandler http.Handler -} - -// PrometheusCollectors exposes the prometheus collectors associated with an APIBackend. -func (b *APIBackend) PrometheusCollectors() []prometheus.Collector { - var cs []prometheus.Collector - - if pc, ok := b.WriteEventRecorder.(prom.PrometheusCollector); ok { - cs = append(cs, pc.PrometheusCollectors()...) - } - - if pc, ok := b.QueryEventRecorder.(prom.PrometheusCollector); ok { - cs = append(cs, pc.PrometheusCollectors()...) - } - - return cs -} - -// APIHandlerOptFn is a functional input param to set parameters on -// the APIHandler. -type APIHandlerOptFn func(chi.Router) - -// WithResourceHandler registers a resource handler on the APIHandler. -func WithResourceHandler(resHandler kithttp.ResourceHandler) APIHandlerOptFn { - return func(h chi.Router) { - h.Mount(resHandler.Prefix(), resHandler) - } -} - -// NewAPIHandler constructs all api handlers beneath it and returns an APIHandler -func NewAPIHandler(b *APIBackend, opts ...APIHandlerOptFn) *APIHandler { - h := &APIHandler{ - Router: NewBaseChiRouter(kithttp.NewAPI(kithttp.WithLog(b.Logger))), - } - - b.UserResourceMappingService = authorizer.NewURMService(b.OrgLookupService, b.UserResourceMappingService) - - h.Handle("/api/v2", serveLinksHandler(b.HTTPErrorHandler)) - - checkBackend := NewCheckBackend(b.Logger.With(zap.String("handler", "check")), b) - checkBackend.CheckService = authorizer.NewCheckService(b.CheckService, - b.UserResourceMappingService, b.OrganizationService) - h.Mount(prefixChecks, NewCheckHandler(b.Logger, checkBackend)) - - deleteBackend := NewDeleteBackend(b.Logger.With(zap.String("handler", "delete")), b) - h.Mount(prefixDelete, NewDeleteHandler(b.Logger, deleteBackend)) - - documentBackend := NewDocumentBackend(b.Logger.With(zap.String("handler", "document")), b) - documentBackend.DocumentService = authorizer.NewDocumentService(b.DocumentService) - h.Mount(prefixDocuments, NewDocumentHandler(documentBackend)) - - fluxBackend := NewFluxBackend(b.Logger.With(zap.String("handler", "query")), b) - h.Mount(prefixQuery, NewFluxHandler(b.Logger, fluxBackend)) - - notificationEndpointBackend := NewNotificationEndpointBackend(b.Logger.With(zap.String("handler", "notificationEndpoint")), b) - notificationEndpointBackend.NotificationEndpointService = authorizer.NewNotificationEndpointService(b.NotificationEndpointService, - b.UserResourceMappingService, b.OrganizationService) - h.Mount(prefixNotificationEndpoints, NewNotificationEndpointHandler(notificationEndpointBackend.Logger(), notificationEndpointBackend)) - - notificationRuleBackend := NewNotificationRuleBackend(b.Logger.With(zap.String("handler", "notification_rule")), b) - notificationRuleBackend.NotificationRuleStore = authorizer.NewNotificationRuleStore(b.NotificationRuleStore, - b.UserResourceMappingService, b.OrganizationService) - h.Mount(prefixNotificationRules, NewNotificationRuleHandler(b.Logger, notificationRuleBackend)) - - scraperBackend := NewScraperBackend(b.Logger.With(zap.String("handler", "scraper")), b) - scraperBackend.ScraperStorageService = authorizer.NewScraperTargetStoreService(b.ScraperTargetStoreService, - b.UserResourceMappingService, - b.OrganizationService) - h.Mount(prefixTargets, NewScraperHandler(b.Logger, scraperBackend)) - - sourceBackend := NewSourceBackend(b.Logger.With(zap.String("handler", "source")), b) - sourceBackend.SourceService = authorizer.NewSourceService(b.SourceService) - sourceBackend.BucketService = authorizer.NewBucketService(b.BucketService) - h.Mount(prefixSources, NewSourceHandler(b.Logger, sourceBackend)) - - h.Mount("/api/v2/swagger.json", static.NewSwaggerHandler()) - - taskLogger := b.Logger.With(zap.String("handler", "bucket")) - taskBackend := NewTaskBackend(taskLogger, b) - taskBackend.TaskService = authorizer.NewTaskService(taskLogger, b.TaskService) - taskHandler := NewTaskHandler(b.Logger, taskBackend) - h.Mount(prefixTasks, taskHandler) - - telegrafBackend := NewTelegrafBackend(b.Logger.With(zap.String("handler", "telegraf")), b) - telegrafBackend.TelegrafService = authorizer.NewTelegrafConfigService(b.TelegrafService, b.UserResourceMappingService) - h.Mount(prefixTelegrafPlugins, NewTelegrafHandler(b.Logger, telegrafBackend)) - h.Mount(prefixTelegraf, NewTelegrafHandler(b.Logger, telegrafBackend)) - - h.Mount("/api/v2/flags", b.FlagsHandler) - - h.Mount(prefixResources, NewResourceListHandler()) - - variableBackend := NewVariableBackend(b.Logger.With(zap.String("handler", "variable")), b) - variableBackend.VariableService = authorizer.NewVariableService(b.VariableService) - h.Mount(prefixVariables, NewVariableHandler(b.Logger, variableBackend)) - - backupBackend := NewBackupBackend(b) - backupBackend.BackupService = authorizer.NewBackupService(backupBackend.BackupService) - backupBackend.SqlBackupRestoreService = authorizer.NewSqlBackupRestoreService(backupBackend.SqlBackupRestoreService) - h.Mount(prefixBackup, NewBackupHandler(backupBackend)) - - restoreBackend := NewRestoreBackend(b) - restoreBackend.RestoreService = authorizer.NewRestoreService(restoreBackend.RestoreService) - restoreBackend.SqlBackupRestoreService = authorizer.NewSqlBackupRestoreService(restoreBackend.SqlBackupRestoreService) - h.Mount(prefixRestore, NewRestoreHandler(restoreBackend)) - - h.Mount(dbrp.PrefixDBRP, dbrp.NewHTTPHandler(b.Logger, b.DBRPService, b.OrganizationService)) - - writeBackend := NewWriteBackend(b.Logger.With(zap.String("handler", "write")), b) - h.Mount(prefixWrite, NewWriteHandler(b.Logger, writeBackend, - WithMaxBatchSizeBytes(b.MaxBatchSizeBytes), - // WithParserOptions( - // models.WithParserMaxBytes(b.WriteParserMaxBytes), - // models.WithParserMaxLines(b.WriteParserMaxLines), - // models.WithParserMaxValues(b.WriteParserMaxValues), - // ), - )) - - for _, o := range opts { - o(h) - } - return h -} - -var apiLinks = map[string]interface{}{ - // when adding new links, please take care to keep this list alphabetical - // as this makes it easier to verify values against the swagger document. - "authorizations": "/api/v2/authorizations", - "backup": "/api/v2/backup", - "buckets": "/api/v2/buckets", - "dashboards": "/api/v2/dashboards", - "external": map[string]string{ - "statusFeed": "https://www.influxdata.com/feed/json", - }, - "flags": "/api/v2/flags", - "labels": "/api/v2/labels", - "variables": "/api/v2/variables", - "me": "/api/v2/me", - "notificationRules": "/api/v2/notificationRules", - "notificationEndpoints": "/api/v2/notificationEndpoints", - "orgs": "/api/v2/orgs", - "query": map[string]string{ - "self": "/api/v2/query", - "ast": "/api/v2/query/ast", - "analyze": "/api/v2/query/analyze", - "suggestions": "/api/v2/query/suggestions", - }, - "restore": "/api/v2/restore", - "setup": "/api/v2/setup", - "signin": "/api/v2/signin", - "signout": "/api/v2/signout", - "sources": "/api/v2/sources", - "scrapers": "/api/v2/scrapers", - "swagger": "/api/v2/swagger.json", - "system": map[string]string{ - "metrics": "/metrics", - "debug": "/debug/pprof", - "health": "/health", - }, - "tasks": "/api/v2/tasks", - "checks": "/api/v2/checks", - "telegrafs": "/api/v2/telegrafs", - "plugins": "/api/v2/telegraf/plugins", - "users": "/api/v2/users", - "write": "/api/v2/write", - "delete": "/api/v2/delete", -} - -func serveLinksHandler(errorHandler errors.HTTPErrorHandler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - if err := encodeResponse(ctx, w, http.StatusOK, apiLinks); err != nil { - errorHandler.HandleHTTPError(ctx, err, w) - } - } - return http.HandlerFunc(fn) -} - -func decodeIDFromCtx(ctx context.Context, name string) (platform.ID, error) { - params := httprouter.ParamsFromContext(ctx) - idStr := params.ByName(name) - - if idStr == "" { - return 0, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing " + name, - } - } - - var i platform.ID - if err := i.DecodeFromString(idStr); err != nil { - return 0, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - return i, nil -} diff --git a/http/api_handler_test.go b/http/api_handler_test.go deleted file mode 100644 index 2f13145a9c8..00000000000 --- a/http/api_handler_test.go +++ /dev/null @@ -1,186 +0,0 @@ -package http - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/google/go-cmp/cmp" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/pkg/httpc" - "github.com/stretchr/testify/require" - "github.com/yudai/gojsondiff" - "github.com/yudai/gojsondiff/formatter" - "go.uber.org/zap/zaptest" -) - -func TestAPIHandlerServeLinks(t *testing.T) { - tests := []struct { - name string - path string - method string - want int - }{ - { - name: "correct path - GET", - path: "/api/v2", - method: "GET", - want: http.StatusOK, - }, - { - name: "correct path with slash - GET", - path: "/api/v2/", - method: "GET", - want: http.StatusOK, - }, - { - name: "correct path - POST", - path: "/api/v2", - method: "POST", - want: http.StatusOK, - }, - { - name: "incorrect arbitrary path", - path: "/api/v2/asdf", - method: "GET", - want: http.StatusNotFound, - }, - { - // regression test for https://github.com/influxdata/influxdb/issues/21620 - name: "incorrect path at a subroute", - path: "/api/v2/query&foo=bar", - method: "GET", - want: http.StatusNotFound, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := httptest.NewRequest(tt.method, tt.path, nil) - w := httptest.NewRecorder() - h := NewAPIHandler(&APIBackend{Logger: zaptest.NewLogger(t)}) - - h.ServeHTTP(w, r) - - res := w.Result() - require.Equal(t, tt.want, res.StatusCode) - }) - } -} - -func TestAPIHandler_NotFound(t *testing.T) { - type args struct { - method string - path string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - args args - wants wants - }{ - { - name: "path not found", - args: args{ - method: "GET", - path: "/404", - }, - wants: wants{ - statusCode: http.StatusNotFound, - contentType: "application/json; charset=utf-8", - body: ` -{ - "code": "not found", - "message": "path not found" -}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - r := httptest.NewRequest(tt.args.method, tt.args.path, nil) - w := httptest.NewRecorder() - - b := &APIBackend{ - HTTPErrorHandler: kithttp.NewErrorHandler(zaptest.NewLogger(t)), - Logger: zaptest.NewLogger(t), - } - - h := NewAPIHandler(b) - h.ServeHTTP(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. get %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. get %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, error unmarshalling json %v", tt.name, err) - } else if tt.wants.body != "" && !eq { - t.Errorf("%q. ***%s***", tt.name, diff) - } - }) - } -} - -func jsonEqual(s1, s2 string) (eq bool, diff string, err error) { - if s1 == s2 { - return true, "", nil - } - - if s1 == "" { - return false, s2, fmt.Errorf("s1 is empty") - } - - if s2 == "" { - return false, s1, fmt.Errorf("s2 is empty") - } - - var o1 interface{} - if err = json.Unmarshal([]byte(s1), &o1); err != nil { - return - } - - var o2 interface{} - if err = json.Unmarshal([]byte(s2), &o2); err != nil { - return - } - - differ := gojsondiff.New() - d, err := differ.Compare([]byte(s1), []byte(s2)) - if err != nil { - return - } - - config := formatter.AsciiFormatterConfig{} - - formatter := formatter.NewAsciiFormatter(o1, config) - diff, err = formatter.Format(d) - - return cmp.Equal(o1, o2), diff, err -} - -func mustNewHTTPClient(t *testing.T, addr, token string) *httpc.Client { - t.Helper() - - httpClient, err := NewHTTPClient(addr, token, false) - if err != nil { - t.Fatal(err) - } - return httpClient -} diff --git a/http/auth_service.go b/http/auth_service.go deleted file mode 100644 index a7ef7109991..00000000000 --- a/http/auth_service.go +++ /dev/null @@ -1,729 +0,0 @@ -package http - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "time" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - platcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/pkg/httpc" - "go.uber.org/zap" -) - -const prefixAuthorization = "/api/v2/authorizations" - -// AuthorizationBackend is all services and associated parameters required to construct -// the AuthorizationHandler. -type AuthorizationBackend struct { - errors2.HTTPErrorHandler - log *zap.Logger - - AuthorizationService influxdb.AuthorizationService - OrganizationService influxdb.OrganizationService - UserService influxdb.UserService - LookupService influxdb.LookupService -} - -// NewAuthorizationBackend returns a new instance of AuthorizationBackend. -func NewAuthorizationBackend(log *zap.Logger, b *APIBackend) *AuthorizationBackend { - return &AuthorizationBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - - AuthorizationService: b.AuthorizationService, - OrganizationService: b.OrganizationService, - UserService: b.UserService, - LookupService: b.LookupService, - } -} - -// AuthorizationHandler represents an HTTP API handler for authorizations. -type AuthorizationHandler struct { - *httprouter.Router - errors2.HTTPErrorHandler - log *zap.Logger - - OrganizationService influxdb.OrganizationService - UserService influxdb.UserService - AuthorizationService influxdb.AuthorizationService - LookupService influxdb.LookupService -} - -// NewAuthorizationHandler returns a new instance of AuthorizationHandler. -func NewAuthorizationHandler(log *zap.Logger, b *AuthorizationBackend) *AuthorizationHandler { - h := &AuthorizationHandler{ - Router: NewRouter(b.HTTPErrorHandler), - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - - AuthorizationService: b.AuthorizationService, - OrganizationService: b.OrganizationService, - UserService: b.UserService, - LookupService: b.LookupService, - } - - h.HandlerFunc("POST", "/api/v2/authorizations", h.handlePostAuthorization) - h.HandlerFunc("GET", "/api/v2/authorizations", h.handleGetAuthorizations) - h.HandlerFunc("GET", "/api/v2/authorizations/:id", h.handleGetAuthorization) - h.HandlerFunc("PATCH", "/api/v2/authorizations/:id", h.handleUpdateAuthorization) - h.HandlerFunc("DELETE", "/api/v2/authorizations/:id", h.handleDeleteAuthorization) - return h -} - -type authResponse struct { - ID platform.ID `json:"id"` - Token string `json:"token"` - Status influxdb.Status `json:"status"` - Description string `json:"description"` - OrgID platform.ID `json:"orgID"` - Org string `json:"org"` - UserID platform.ID `json:"userID"` - User string `json:"user"` - Permissions []permissionResponse `json:"permissions"` - Links map[string]string `json:"links"` - CreatedAt time.Time `json:"createdAt"` - UpdatedAt time.Time `json:"updatedAt"` -} - -func newAuthResponse(a *influxdb.Authorization, org *influxdb.Organization, user *influxdb.User, ps []permissionResponse) *authResponse { - res := &authResponse{ - ID: a.ID, - Token: a.Token, - Status: a.Status, - Description: a.Description, - OrgID: a.OrgID, - UserID: a.UserID, - User: user.Name, - Org: org.Name, - Permissions: ps, - Links: map[string]string{ - "self": fmt.Sprintf("/api/v2/authorizations/%s", a.ID), - "user": fmt.Sprintf("/api/v2/users/%s", a.UserID), - }, - CreatedAt: a.CreatedAt, - UpdatedAt: a.UpdatedAt, - } - return res -} - -func (a *authResponse) toPlatform() *influxdb.Authorization { - res := &influxdb.Authorization{ - ID: a.ID, - Token: a.Token, - Status: a.Status, - Description: a.Description, - OrgID: a.OrgID, - UserID: a.UserID, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: a.CreatedAt, - UpdatedAt: a.UpdatedAt, - }, - } - for _, p := range a.Permissions { - res.Permissions = append(res.Permissions, influxdb.Permission{Action: p.Action, Resource: p.Resource.Resource}) - } - return res -} - -type permissionResponse struct { - Action influxdb.Action `json:"action"` - Resource resourceResponse `json:"resource"` -} - -type resourceResponse struct { - influxdb.Resource - Name string `json:"name,omitempty"` - Organization string `json:"org,omitempty"` -} - -func newPermissionsResponse(ctx context.Context, ps []influxdb.Permission, svc influxdb.LookupService) ([]permissionResponse, error) { - res := make([]permissionResponse, len(ps)) - for i, p := range ps { - res[i] = permissionResponse{ - Action: p.Action, - Resource: resourceResponse{ - Resource: p.Resource, - }, - } - - if p.Resource.ID != nil { - name, err := svc.FindResourceName(ctx, p.Resource.Type, *p.Resource.ID) - if errors2.ErrorCode(err) == errors2.ENotFound { - continue - } - if err != nil { - return nil, err - } - res[i].Resource.Name = name - } - - if p.Resource.OrgID != nil { - name, err := svc.FindResourceName(ctx, influxdb.OrgsResourceType, *p.Resource.OrgID) - if errors2.ErrorCode(err) == errors2.ENotFound { - continue - } - if err != nil { - return nil, err - } - res[i].Resource.Organization = name - } - } - return res, nil -} - -type authsResponse struct { - Links map[string]string `json:"links"` - Auths []*authResponse `json:"authorizations"` -} - -func newAuthsResponse(as []*authResponse) *authsResponse { - return &authsResponse{ - // TODO(desa): update links to include paging and filter information - Links: map[string]string{ - "self": "/api/v2/authorizations", - }, - Auths: as, - } -} - -// handlePostAuthorization is the HTTP handler for the POST /api/v2/authorizations route. -func (h *AuthorizationHandler) handlePostAuthorization(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - req, err := decodePostAuthorizationRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - user, err := getAuthorizedUser(r, h.UserService) - if err != nil { - h.HandleHTTPError(ctx, influxdb.ErrUnableToCreateToken, w) - return - } - - userID := user.ID - if req.UserID != nil && req.UserID.Valid() { - userID = *req.UserID - } - - auth := req.toPlatform(userID) - - org, err := h.OrganizationService.FindOrganizationByID(ctx, auth.OrgID) - if err != nil { - h.HandleHTTPError(ctx, influxdb.ErrUnableToCreateToken, w) - return - } - - if err := h.AuthorizationService.CreateAuthorization(ctx, auth); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - perms, err := newPermissionsResponse(ctx, auth.Permissions, h.LookupService) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - h.log.Debug("Auth created ", zap.String("auth", fmt.Sprint(auth))) - - if err := encodeResponse(ctx, w, http.StatusCreated, newAuthResponse(auth, org, user, perms)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type postAuthorizationRequest struct { - Status influxdb.Status `json:"status"` - OrgID platform.ID `json:"orgID"` - UserID *platform.ID `json:"userID,omitempty"` - Description string `json:"description"` - Permissions []influxdb.Permission `json:"permissions"` -} - -func (p *postAuthorizationRequest) toPlatform(userID platform.ID) *influxdb.Authorization { - return &influxdb.Authorization{ - OrgID: p.OrgID, - Status: p.Status, - Description: p.Description, - Permissions: p.Permissions, - UserID: userID, - } -} - -func newPostAuthorizationRequest(a *influxdb.Authorization) (*postAuthorizationRequest, error) { - res := &postAuthorizationRequest{ - OrgID: a.OrgID, - Description: a.Description, - Permissions: a.Permissions, - Status: a.Status, - } - - if a.UserID.Valid() { - res.UserID = &a.UserID - } - - res.SetDefaults() - - return res, res.Validate() -} - -func (p *postAuthorizationRequest) SetDefaults() { - if p.Status == "" { - p.Status = influxdb.Active - } -} - -func (p *postAuthorizationRequest) Validate() error { - if len(p.Permissions) == 0 { - return &errors2.Error{ - Code: errors2.EInvalid, - Msg: "authorization must include permissions", - } - } - - for _, perm := range p.Permissions { - if err := perm.Valid(); err != nil { - return &errors2.Error{ - Err: err, - } - } - } - - if !p.OrgID.Valid() { - return &errors2.Error{ - Err: platform.ErrInvalidID, - Code: errors2.EInvalid, - Msg: "org id required", - } - } - - if p.Status == "" { - p.Status = influxdb.Active - } - - err := p.Status.Valid() - if err != nil { - return err - } - - return nil -} - -func decodePostAuthorizationRequest(ctx context.Context, r *http.Request) (*postAuthorizationRequest, error) { - a := &postAuthorizationRequest{} - if err := json.NewDecoder(r.Body).Decode(a); err != nil { - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "invalid json structure", - Err: err, - } - } - - a.SetDefaults() - - return a, a.Validate() -} - -// handleGetAuthorizations is the HTTP handler for the GET /api/v2/authorizations route. -func (h *AuthorizationHandler) handleGetAuthorizations(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeGetAuthorizationsRequest(ctx, r) - if err != nil { - h.log.Info("Failed to decode request", zap.String("handler", "getAuthorizations"), zap.Error(err)) - h.HandleHTTPError(ctx, err, w) - return - } - - opts := influxdb.FindOptions{} - as, _, err := h.AuthorizationService.FindAuthorizations(ctx, req.filter, opts) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - auths := make([]*authResponse, 0, len(as)) - for _, a := range as { - o, err := h.OrganizationService.FindOrganizationByID(ctx, a.OrgID) - if err != nil { - h.log.Info("Failed to get organization", zap.String("handler", "getAuthorizations"), zap.String("orgID", a.OrgID.String()), zap.Error(err)) - continue - } - - u, err := h.UserService.FindUserByID(ctx, a.UserID) - if err != nil { - h.log.Info("Failed to get user", zap.String("handler", "getAuthorizations"), zap.String("userID", a.UserID.String()), zap.Error(err)) - continue - } - - ps, err := newPermissionsResponse(ctx, a.Permissions, h.LookupService) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - auths = append(auths, newAuthResponse(a, o, u, ps)) - } - - h.log.Debug("Auths retrieved ", zap.String("auths", fmt.Sprint(auths))) - - if err := encodeResponse(ctx, w, http.StatusOK, newAuthsResponse(auths)); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } -} - -type getAuthorizationsRequest struct { - filter influxdb.AuthorizationFilter -} - -func decodeGetAuthorizationsRequest(ctx context.Context, r *http.Request) (*getAuthorizationsRequest, error) { - qp := r.URL.Query() - - req := &getAuthorizationsRequest{} - - userID := qp.Get("userID") - if userID != "" { - id, err := platform.IDFromString(userID) - if err != nil { - return nil, err - } - req.filter.UserID = id - } - - user := qp.Get("user") - if user != "" { - req.filter.User = &user - } - - orgID := qp.Get("orgID") - if orgID != "" { - id, err := platform.IDFromString(orgID) - if err != nil { - return nil, err - } - req.filter.OrgID = id - } - - org := qp.Get("org") - if org != "" { - req.filter.Org = &org - } - - authID := qp.Get("id") - if authID != "" { - id, err := platform.IDFromString(authID) - if err != nil { - return nil, err - } - req.filter.ID = id - } - - return req, nil -} - -// handleGetAuthorization is the HTTP handler for the GET /api/v2/authorizations/:id route. -func (h *AuthorizationHandler) handleGetAuthorization(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeGetAuthorizationRequest(ctx, r) - if err != nil { - h.log.Info("Failed to decode request", zap.String("handler", "getAuthorization"), zap.Error(err)) - h.HandleHTTPError(ctx, err, w) - return - } - - a, err := h.AuthorizationService.FindAuthorizationByID(ctx, req.ID) - if err != nil { - // Don't log here, it should already be handled by the service - h.HandleHTTPError(ctx, err, w) - return - } - - o, err := h.OrganizationService.FindOrganizationByID(ctx, a.OrgID) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - u, err := h.UserService.FindUserByID(ctx, a.UserID) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - ps, err := newPermissionsResponse(ctx, a.Permissions, h.LookupService) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - h.log.Debug("Auth retrieved ", zap.String("auth", fmt.Sprint(a))) - - if err := encodeResponse(ctx, w, http.StatusOK, newAuthResponse(a, o, u, ps)); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } -} - -type getAuthorizationRequest struct { - ID platform.ID -} - -func decodeGetAuthorizationRequest(ctx context.Context, r *http.Request) (*getAuthorizationRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - - return &getAuthorizationRequest{ - ID: i, - }, nil -} - -// handleUpdateAuthorization is the HTTP handler for the PATCH /api/v2/authorizations/:id route that updates the authorization's status and desc. -func (h *AuthorizationHandler) handleUpdateAuthorization(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeUpdateAuthorizationRequest(ctx, r) - if err != nil { - h.log.Info("Failed to decode request", zap.String("handler", "updateAuthorization"), zap.Error(err)) - h.HandleHTTPError(ctx, err, w) - return - } - - a, err := h.AuthorizationService.FindAuthorizationByID(ctx, req.ID) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - a, err = h.AuthorizationService.UpdateAuthorization(ctx, a.ID, req.AuthorizationUpdate) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - o, err := h.OrganizationService.FindOrganizationByID(ctx, a.OrgID) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - u, err := h.UserService.FindUserByID(ctx, a.UserID) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - ps, err := newPermissionsResponse(ctx, a.Permissions, h.LookupService) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Auth updated", zap.String("auth", fmt.Sprint(a))) - - if err := encodeResponse(ctx, w, http.StatusOK, newAuthResponse(a, o, u, ps)); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } -} - -type updateAuthorizationRequest struct { - ID platform.ID - *influxdb.AuthorizationUpdate -} - -func decodeUpdateAuthorizationRequest(ctx context.Context, r *http.Request) (*updateAuthorizationRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - - upd := &influxdb.AuthorizationUpdate{} - if err := json.NewDecoder(r.Body).Decode(upd); err != nil { - return nil, err - } - - return &updateAuthorizationRequest{ - ID: i, - AuthorizationUpdate: upd, - }, nil -} - -// handleDeleteAuthorization is the HTTP handler for the DELETE /api/v2/authorizations/:id route. -func (h *AuthorizationHandler) handleDeleteAuthorization(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeDeleteAuthorizationRequest(ctx, r) - if err != nil { - h.log.Info("Failed to decode request", zap.String("handler", "deleteAuthorization"), zap.Error(err)) - h.HandleHTTPError(ctx, err, w) - return - } - - if err := h.AuthorizationService.DeleteAuthorization(ctx, req.ID); err != nil { - // Don't log here, it should already be handled by the service - h.HandleHTTPError(ctx, err, w) - return - } - - h.log.Debug("Auth deleted", zap.String("authID", fmt.Sprint(req.ID))) - - w.WriteHeader(http.StatusNoContent) -} - -type deleteAuthorizationRequest struct { - ID platform.ID -} - -func decodeDeleteAuthorizationRequest(ctx context.Context, r *http.Request) (*deleteAuthorizationRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - - return &deleteAuthorizationRequest{ - ID: i, - }, nil -} - -func getAuthorizedUser(r *http.Request, svc influxdb.UserService) (*influxdb.User, error) { - ctx := r.Context() - - a, err := platcontext.GetAuthorizer(ctx) - if err != nil { - return nil, err - } - - return svc.FindUserByID(ctx, a.GetUserID()) -} - -// AuthorizationService connects to Influx via HTTP using tokens to manage authorizations -type AuthorizationService struct { - Client *httpc.Client -} - -var _ influxdb.AuthorizationService = (*AuthorizationService)(nil) - -// FindAuthorizationByID finds the authorization against a remote influx server. -func (s *AuthorizationService) FindAuthorizationByID(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { - var b influxdb.Authorization - err := s.Client. - Get(prefixAuthorization, id.String()). - DecodeJSON(&b). - Do(ctx) - if err != nil { - return nil, err - } - return &b, nil -} - -// FindAuthorizationByToken returns a single authorization by Token. -func (s *AuthorizationService) FindAuthorizationByToken(ctx context.Context, token string) (*influxdb.Authorization, error) { - return nil, errors.New("not supported in HTTP authorization service") -} - -// FindAuthorizations returns a list of authorizations that match filter and the total count of matching authorizations. -// Additional options provide pagination & sorting. -func (s *AuthorizationService) FindAuthorizations(ctx context.Context, filter influxdb.AuthorizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - params := influxdb.FindOptionParams(opt...) - if filter.ID != nil { - params = append(params, [2]string{"id", filter.ID.String()}) - } - if filter.UserID != nil { - params = append(params, [2]string{"userID", filter.UserID.String()}) - } - if filter.User != nil { - params = append(params, [2]string{"user", *filter.User}) - } - if filter.OrgID != nil { - params = append(params, [2]string{"orgID", filter.OrgID.String()}) - } - if filter.Org != nil { - params = append(params, [2]string{"org", *filter.Org}) - } - - var as authsResponse - err := s.Client. - Get(prefixAuthorization). - QueryParams(params...). - DecodeJSON(&as). - Do(ctx) - if err != nil { - return nil, 0, err - } - - auths := make([]*influxdb.Authorization, 0, len(as.Auths)) - for _, a := range as.Auths { - auths = append(auths, a.toPlatform()) - } - - return auths, len(auths), nil -} - -// CreateAuthorization creates a new authorization and sets b.ID with the new identifier. -func (s *AuthorizationService) CreateAuthorization(ctx context.Context, a *influxdb.Authorization) error { - newAuth, err := newPostAuthorizationRequest(a) - if err != nil { - return err - } - - return s.Client. - PostJSON(newAuth, prefixAuthorization). - DecodeJSON(a). - Do(ctx) -} - -// UpdateAuthorization updates the status and description if available. -func (s *AuthorizationService) UpdateAuthorization(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { - var res authResponse - err := s.Client. - PatchJSON(upd, prefixAuthorization, id.String()). - DecodeJSON(&res). - Do(ctx) - if err != nil { - return nil, err - } - - return res.toPlatform(), nil -} - -// DeleteAuthorization removes a authorization by id. -func (s *AuthorizationService) DeleteAuthorization(ctx context.Context, id platform.ID) error { - return s.Client. - Delete(prefixAuthorization, id.String()). - Do(ctx) -} diff --git a/http/auth_test.go b/http/auth_test.go deleted file mode 100644 index 316f85a10b0..00000000000 --- a/http/auth_test.go +++ /dev/null @@ -1,988 +0,0 @@ -package http - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/httprouter" - platform "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorization" - pcontext "github.com/influxdata/influxdb/v2/context" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/tenant" - platformtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -// NewMockAuthorizationBackend returns a AuthorizationBackend with mock services. -func NewMockAuthorizationBackend(t *testing.T) *AuthorizationBackend { - return &AuthorizationBackend{ - log: zaptest.NewLogger(t), - - AuthorizationService: mock.NewAuthorizationService(), - OrganizationService: mock.NewOrganizationService(), - UserService: mock.NewUserService(), - LookupService: mock.NewLookupService(), - } -} - -func TestService_handleGetAuthorizations(t *testing.T) { - type fields struct { - AuthorizationService platform.AuthorizationService - UserService platform.UserService - OrganizationService platform.OrganizationService - } - - type args struct { - queryParams map[string][]string - } - - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get all authorizations", - fields: fields{ - &mock.AuthorizationService{ - FindAuthorizationsFn: func(ctx context.Context, filter platform.AuthorizationFilter, opts ...platform.FindOptions) ([]*platform.Authorization, int, error) { - return []*platform.Authorization{ - { - ID: platformtesting.MustIDBase16("0d0a657820696e74"), - Token: "hello", - UserID: platformtesting.MustIDBase16("2070616e656d2076"), - OrgID: platformtesting.MustIDBase16("3070616e656d2076"), - Description: "t1", - Permissions: platform.OperPermissions(), - }, - { - ID: platformtesting.MustIDBase16("6669646573207375"), - Token: "example", - UserID: platformtesting.MustIDBase16("6c7574652c206f6e"), - OrgID: platformtesting.MustIDBase16("9d70616e656d2076"), - Description: "t2", - Permissions: platform.OperPermissions(), - }, - }, 2, nil - }, - }, - &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform2.ID) (*platform.User, error) { - return &platform.User{ - ID: id, - Name: id.String(), - }, nil - }, - }, - &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform2.ID) (*platform.Organization, error) { - return &platform.Organization{ - ID: id, - Name: id.String(), - }, nil - }, - }, - }, - args: args{}, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: fmt.Sprintf(` -{ - "links": { - "self": "/api/v2/authorizations" - }, - "authorizations": [ - { - "links": { - "user": "/api/v2/users/2070616e656d2076", - "self": "/api/v2/authorizations/0d0a657820696e74" - }, - "id": "0d0a657820696e74", - "userID": "2070616e656d2076", - "user": "2070616e656d2076", - "org": "3070616e656d2076", - "orgID": "3070616e656d2076", - "status": "", - "token": "hello", - "description": "t1", - "permissions": %s, - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z" - }, - { - "links": { - "user": "/api/v2/users/6c7574652c206f6e", - "self": "/api/v2/authorizations/6669646573207375" - }, - "id": "6669646573207375", - "userID": "6c7574652c206f6e", - "user": "6c7574652c206f6e", - "org": "9d70616e656d2076", - "orgID": "9d70616e656d2076", - "status": "", - "token": "example", - "description": "t2", - "permissions": %s, - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z" - } - ] -} -`, - MustMarshal(platform.OperPermissions()), - MustMarshal(platform.OperPermissions())), - }, - }, - { - name: "skip authorizations with no org", - fields: fields{ - &mock.AuthorizationService{ - FindAuthorizationsFn: func(ctx context.Context, filter platform.AuthorizationFilter, opts ...platform.FindOptions) ([]*platform.Authorization, int, error) { - return []*platform.Authorization{ - { - ID: platformtesting.MustIDBase16("0d0a657820696e74"), - Token: "hello", - UserID: platformtesting.MustIDBase16("2070616e656d2076"), - OrgID: platformtesting.MustIDBase16("3070616e656d2076"), - Description: "t1", - Permissions: platform.OperPermissions(), - }, - { - ID: platformtesting.MustIDBase16("6669646573207375"), - Token: "example", - UserID: platformtesting.MustIDBase16("6c7574652c206f6e"), - OrgID: platformtesting.MustIDBase16("9d70616e656d2076"), - Description: "t2", - Permissions: platform.OperPermissions(), - }, - }, 2, nil - }, - }, - &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform2.ID) (*platform.User, error) { - if id.String() == "2070616e656d2076" { - return &platform.User{ - ID: id, - Name: id.String(), - }, nil - } - return nil, &errors.Error{} - }, - }, - &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform2.ID) (*platform.Organization, error) { - return &platform.Organization{ - ID: id, - Name: id.String(), - }, nil - }, - }, - }, - args: args{}, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: fmt.Sprintf(` -{ - "links": { - "self": "/api/v2/authorizations" - }, - "authorizations": [ - { - "links": { - "user": "/api/v2/users/2070616e656d2076", - "self": "/api/v2/authorizations/0d0a657820696e74" - }, - "id": "0d0a657820696e74", - "userID": "2070616e656d2076", - "user": "2070616e656d2076", - "org": "3070616e656d2076", - "orgID": "3070616e656d2076", - "status": "", - "token": "hello", - "description": "t1", - "permissions": %s, - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z" - } - ] -} -`, - MustMarshal(platform.OperPermissions())), - }, - }, - { - name: "skip authorizations with no user", - fields: fields{ - &mock.AuthorizationService{ - FindAuthorizationsFn: func(ctx context.Context, filter platform.AuthorizationFilter, opts ...platform.FindOptions) ([]*platform.Authorization, int, error) { - return []*platform.Authorization{ - { - ID: platformtesting.MustIDBase16("0d0a657820696e74"), - Token: "hello", - UserID: platformtesting.MustIDBase16("2070616e656d2076"), - OrgID: platformtesting.MustIDBase16("3070616e656d2076"), - Description: "t1", - Permissions: platform.OperPermissions(), - }, - { - ID: platformtesting.MustIDBase16("6669646573207375"), - Token: "example", - UserID: platformtesting.MustIDBase16("6c7574652c206f6e"), - OrgID: platformtesting.MustIDBase16("9d70616e656d2076"), - Description: "t2", - Permissions: platform.OperPermissions(), - }, - }, 2, nil - }, - }, - &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform2.ID) (*platform.User, error) { - return &platform.User{ - ID: id, - Name: id.String(), - }, nil - }, - }, - &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform2.ID) (*platform.Organization, error) { - if id.String() == "3070616e656d2076" { - return &platform.Organization{ - ID: id, - Name: id.String(), - }, nil - } - return nil, &errors.Error{} - }, - }, - }, - args: args{}, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: fmt.Sprintf(` -{ - "links": { - "self": "/api/v2/authorizations" - }, - "authorizations": [ - { - "links": { - "user": "/api/v2/users/2070616e656d2076", - "self": "/api/v2/authorizations/0d0a657820696e74" - }, - "id": "0d0a657820696e74", - "userID": "2070616e656d2076", - "user": "2070616e656d2076", - "org": "3070616e656d2076", - "orgID": "3070616e656d2076", - "status": "", - "token": "hello", - "description": "t1", - "permissions": %s, - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z" - } - ] -} -`, - MustMarshal(platform.OperPermissions())), - }, - }, - { - name: "get all authorizations when there are none", - fields: fields{ - AuthorizationService: &mock.AuthorizationService{ - FindAuthorizationsFn: func(ctx context.Context, filter platform.AuthorizationFilter, opts ...platform.FindOptions) ([]*platform.Authorization, int, error) { - return []*platform.Authorization{}, 0, nil - }, - }, - }, - args: args{}, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/authorizations" - }, - "authorizations": [] -}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - authorizationBackend := NewMockAuthorizationBackend(t) - authorizationBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - authorizationBackend.AuthorizationService = tt.fields.AuthorizationService - authorizationBackend.UserService = tt.fields.UserService - authorizationBackend.OrganizationService = tt.fields.OrganizationService - h := NewAuthorizationHandler(zaptest.NewLogger(t), authorizationBackend) - - r := httptest.NewRequest("GET", "http://any.url", nil) - - qp := r.URL.Query() - for k, vs := range tt.args.queryParams { - for _, v := range vs { - qp.Add(k, v) - } - } - r.URL.RawQuery = qp.Encode() - - w := httptest.NewRecorder() - - h.handleGetAuthorizations(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetAuthorizations() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetAuthorizations() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleGetAuthorizations(). error unmarshalling json %v", tt.name, err) - } else if tt.wants.body != "" && !eq { - t.Errorf("%q. handleGetAuthorizations() = ***%s***", tt.name, diff) - } - - }) - } -} - -func TestService_handleGetAuthorization(t *testing.T) { - type fields struct { - AuthorizationService platform.AuthorizationService - UserService platform.UserService - OrganizationService platform.OrganizationService - LookupService platform.LookupService - } - type args struct { - id string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get a authorization by id", - fields: fields{ - AuthorizationService: &mock.AuthorizationService{ - FindAuthorizationByIDFn: func(ctx context.Context, id platform2.ID) (*platform.Authorization, error) { - if id == platformtesting.MustIDBase16("020f755c3c082000") { - return &platform.Authorization{ - ID: platformtesting.MustIDBase16("020f755c3c082000"), - UserID: platformtesting.MustIDBase16("020f755c3c082000"), - OrgID: platformtesting.MustIDBase16("020f755c3c083000"), - Permissions: []platform.Permission{ - { - Action: platform.ReadAction, - Resource: platform.Resource{ - Type: platform.BucketsResourceType, - OrgID: platformtesting.IDPtr(platformtesting.MustIDBase16("020f755c3c083000")), - ID: func() *platform2.ID { - id := platformtesting.MustIDBase16("020f755c3c084000") - return &id - }(), - }, - }, - }, - Token: "hello", - }, nil - } - - return nil, fmt.Errorf("not found") - }, - }, - UserService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform2.ID) (*platform.User, error) { - return &platform.User{ - ID: id, - Name: "u1", - }, nil - }, - }, - OrganizationService: &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform2.ID) (*platform.Organization, error) { - return &platform.Organization{ - ID: id, - Name: "o1", - }, nil - }, - }, - LookupService: &mock.LookupService{ - NameFn: func(ctx context.Context, resource platform.ResourceType, id platform2.ID) (string, error) { - switch resource { - case platform.BucketsResourceType: - return "b1", nil - case platform.OrgsResourceType: - return "o1", nil - } - return "", fmt.Errorf("bad resource type %s", resource) - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z", - "description": "", - "id": "020f755c3c082000", - "links": { - "self": "/api/v2/authorizations/020f755c3c082000", - "user": "/api/v2/users/020f755c3c082000" - }, - "org": "o1", - "orgID": "020f755c3c083000", - "permissions": [ - { - "action": "read", - "resource": { - "type": "buckets", - "orgID": "020f755c3c083000", - "id": "020f755c3c084000", - "name": "b1", - "org": "o1" - } - } - ], - "status": "", - "token": "hello", - "user": "u1", - "userID": "020f755c3c082000" -} -`, - }, - }, - { - name: "not found", - fields: fields{ - AuthorizationService: &mock.AuthorizationService{ - FindAuthorizationByIDFn: func(ctx context.Context, id platform2.ID) (*platform.Authorization, error) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: "authorization not found", - } - }, - }, - UserService: &mock.UserService{}, - OrganizationService: &mock.OrganizationService{}, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNotFound, - body: `{"code":"not found","message":"authorization not found"}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - authorizationBackend := NewMockAuthorizationBackend(t) - authorizationBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - authorizationBackend.AuthorizationService = tt.fields.AuthorizationService - authorizationBackend.UserService = tt.fields.UserService - authorizationBackend.OrganizationService = tt.fields.OrganizationService - authorizationBackend.LookupService = tt.fields.LookupService - h := NewAuthorizationHandler(zaptest.NewLogger(t), authorizationBackend) - - r := httptest.NewRequest("GET", "http://any.url", nil) - - r = r.WithContext(context.WithValue( - context.Background(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - })) - - w := httptest.NewRecorder() - - h.handleGetAuthorization(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Logf("headers: %v body: %s", res.Header, body) - t.Errorf("%q. handleGetAuthorization() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetAuthorization() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleGetAuthorization. error unmarshalling json %v", tt.name, err) - } else if tt.wants.body != "" && !eq { - t.Errorf("%q. handleGetAuthorization() = -got/+want %s**", tt.name, diff) - } - }) - } -} - -func TestService_handlePostAuthorization(t *testing.T) { - type fields struct { - AuthorizationService platform.AuthorizationService - UserService platform.UserService - OrganizationService platform.OrganizationService - LookupService platform.LookupService - } - type args struct { - session *platform.Authorization - authorization *platform.Authorization - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "create a new authorization", - fields: fields{ - AuthorizationService: &mock.AuthorizationService{ - CreateAuthorizationFn: func(ctx context.Context, c *platform.Authorization) error { - c.ID = platformtesting.MustIDBase16("020f755c3c082000") - c.Token = "new-test-token" - return nil - }, - }, - LookupService: &mock.LookupService{ - NameFn: func(ctx context.Context, resource platform.ResourceType, id platform2.ID) (string, error) { - switch resource { - case platform.BucketsResourceType: - return "b1", nil - case platform.OrgsResourceType: - return "o1", nil - } - return "", fmt.Errorf("bad resource type %s", resource) - }, - }, - UserService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform2.ID) (*platform.User, error) { - if !id.Valid() { - return nil, platform2.ErrInvalidID - } - return &platform.User{ - ID: id, - Name: "u1", - }, nil - }, - }, - OrganizationService: &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform2.ID) (*platform.Organization, error) { - if !id.Valid() { - return nil, platform2.ErrInvalidID - } - return &platform.Organization{ - ID: id, - Name: "o1", - }, nil - }, - }, - }, - args: args{ - session: &platform.Authorization{ - Token: "session-token", - ID: platformtesting.MustIDBase16("020f755c3c082000"), - UserID: platformtesting.MustIDBase16("aaaaaaaaaaaaaaaa"), - OrgID: platformtesting.MustIDBase16("020f755c3c083000"), - Description: "can write to authorization resource", - Permissions: []platform.Permission{ - { - Action: platform.WriteAction, - Resource: platform.Resource{ - Type: platform.AuthorizationsResourceType, - OrgID: platformtesting.IDPtr(platformtesting.MustIDBase16("020f755c3c083000")), - }, - }, - }, - }, - authorization: &platform.Authorization{ - ID: platformtesting.MustIDBase16("020f755c3c082000"), - OrgID: platformtesting.MustIDBase16("020f755c3c083000"), - Description: "only read dashboards sucka", - Permissions: []platform.Permission{ - { - Action: platform.ReadAction, - Resource: platform.Resource{ - Type: platform.DashboardsResourceType, - OrgID: platformtesting.IDPtr(platformtesting.MustIDBase16("020f755c3c083000")), - }, - }, - }, - }, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` -{ - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z", - "description": "only read dashboards sucka", - "id": "020f755c3c082000", - "links": { - "self": "/api/v2/authorizations/020f755c3c082000", - "user": "/api/v2/users/aaaaaaaaaaaaaaaa" - }, - "org": "o1", - "orgID": "020f755c3c083000", - "permissions": [ - { - "action": "read", - "resource": { - "type": "dashboards", - "orgID": "020f755c3c083000", - "org": "o1" - } - } - ], - "status": "active", - "token": "new-test-token", - "user": "u1", - "userID": "aaaaaaaaaaaaaaaa" -} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - authorizationBackend := NewMockAuthorizationBackend(t) - authorizationBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - authorizationBackend.AuthorizationService = tt.fields.AuthorizationService - authorizationBackend.UserService = tt.fields.UserService - authorizationBackend.OrganizationService = tt.fields.OrganizationService - authorizationBackend.LookupService = tt.fields.LookupService - h := NewAuthorizationHandler(zaptest.NewLogger(t), authorizationBackend) - - req, err := newPostAuthorizationRequest(tt.args.authorization) - if err != nil { - t.Fatalf("failed to create new authorization request: %v", err) - } - b, err := json.Marshal(req) - if err != nil { - t.Fatalf("failed to unmarshal authorization: %v", err) - } - - r := httptest.NewRequest("GET", "http://any.url", bytes.NewReader(b)) - w := httptest.NewRecorder() - - ctx := pcontext.SetAuthorizer(context.Background(), tt.args.session) - r = r.WithContext(ctx) - - h.handlePostAuthorization(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Logf("headers: %v body: %s", res.Header, body) - t.Errorf("%q. handlePostAuthorization() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePostAuthorization() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handlePostAuthorization(). error unmarshalling json %v", tt.name, err) - } else if tt.wants.body != "" && !eq { - t.Errorf("%q. handlePostAuthorization() = ***%s***", tt.name, diff) - } - }) - } -} - -func TestService_handleDeleteAuthorization(t *testing.T) { - type fields struct { - AuthorizationService platform.AuthorizationService - UserService platform.UserService - OrganizationService platform.OrganizationService - } - type args struct { - id string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "remove a authorization by id", - fields: fields{ - &mock.AuthorizationService{ - DeleteAuthorizationFn: func(ctx context.Context, id platform2.ID) error { - if id == platformtesting.MustIDBase16("020f755c3c082000") { - return nil - } - - return fmt.Errorf("wrong id") - }, - }, - &mock.UserService{}, - &mock.OrganizationService{}, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNoContent, - }, - }, - { - name: "authorization not found", - fields: fields{ - &mock.AuthorizationService{ - DeleteAuthorizationFn: func(ctx context.Context, id platform2.ID) error { - return &errors.Error{ - Code: errors.ENotFound, - Msg: "authorization not found", - } - }, - }, - &mock.UserService{}, - &mock.OrganizationService{}, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNotFound, - body: `{"code":"not found","message":"authorization not found"}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - authorizationBackend := NewMockAuthorizationBackend(t) - authorizationBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - authorizationBackend.AuthorizationService = tt.fields.AuthorizationService - authorizationBackend.UserService = tt.fields.UserService - authorizationBackend.OrganizationService = tt.fields.OrganizationService - h := NewAuthorizationHandler(zaptest.NewLogger(t), authorizationBackend) - - r := httptest.NewRequest("GET", "http://any.url", nil) - - r = r.WithContext(context.WithValue( - context.Background(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - })) - - w := httptest.NewRecorder() - - h.handleDeleteAuthorization(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleDeleteAuthorization() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleDeleteAuthorization() = %v, want %v", tt.name, content, tt.wants.contentType) - } - - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleDeleteAuthorization(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handleDeleteAuthorization() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func initAuthorizationService(f platformtesting.AuthorizationFields, t *testing.T) (platform.AuthorizationService, string, func()) { - t.Helper() - if t.Name() == "TestAuthorizationService_FindAuthorizations/find_authorization_by_token" { - /* - TODO(goller): need a secure way to communicate get - authorization by token string via headers or something - */ - t.Skip("TestAuthorizationService_FindAuthorizations/find_authorization_by_token skipped because user tokens cannot be queried") - } - - if t.Name() == "TestAuthorizationService_CreateAuthorization/providing_a_non_existing_user_is_invalid" { - t.Skip("HTTP authorization service does not required a user id on the authentication struct. We get the user from the session token.") - } - - store := platformtesting.NewTestInmemStore(t) - tenantStore := tenant.NewStore(store) - tenantStore.OrgIDGen = f.OrgIDGenerator - tenantService := tenant.NewService(tenantStore) - - authStore, err := authorization.NewStore(store) - if err != nil { - t.Fatal(err) - } - authService := authorization.NewService(authStore, tenantService) - - svc := kv.NewService(zaptest.NewLogger(t), store, tenantService) - svc.IDGenerator = f.IDGenerator - svc.TokenGenerator = f.TokenGenerator - svc.TimeGenerator = f.TimeGenerator - - ctx := context.Background() - - for _, u := range f.Users { - if err := tenantService.CreateUser(ctx, u); err != nil { - t.Fatalf("failed to populate users") - } - } - - for _, o := range f.Orgs { - if err := tenantService.CreateOrganization(ctx, o); err != nil { - t.Fatalf("failed to populate orgs") - } - } - - var token string - - for _, a := range f.Authorizations { - if err := authService.CreateAuthorization(ctx, a); err != nil { - t.Fatalf("failed to populate authorizations") - } - - token = a.Token - } - - mus := &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform2.ID) (*platform.User, error) { - return &platform.User{}, nil - }, - } - - authorizationBackend := NewMockAuthorizationBackend(t) - authorizationBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - authorizationBackend.AuthorizationService = authService - authorizationBackend.UserService = mus - authorizationBackend.OrganizationService = tenantService - authorizationBackend.LookupService = &mock.LookupService{ - NameFn: func(ctx context.Context, resource platform.ResourceType, id platform2.ID) (string, error) { - switch resource { - case platform.BucketsResourceType: - return "b1", nil - case platform.OrgsResourceType: - return "o1", nil - } - return "", fmt.Errorf("bad resource type %s", resource) - }, - } - - authZ := NewAuthorizationHandler(zaptest.NewLogger(t), authorizationBackend) - authN := NewAuthenticationHandler(zaptest.NewLogger(t), kithttp.NewErrorHandler(zaptest.NewLogger(t))) - authN.AuthorizationService = authService - authN.Handler = authZ - authN.UserService = mus - - server := httptest.NewServer(authN) - - httpClient, err := NewHTTPClient(server.URL, token, false) - if err != nil { - t.Fatal(err) - } - - done := server.Close - - return &AuthorizationService{Client: httpClient}, "", done -} - -func TestAuthorizationService_CreateAuthorization(t *testing.T) { - platformtesting.CreateAuthorization(initAuthorizationService, t) -} - -func TestAuthorizationService_FindAuthorizationByID(t *testing.T) { - platformtesting.FindAuthorizationByID(initAuthorizationService, t) -} - -func TestAuthorizationService_FindAuthorizationByToken(t *testing.T) { - /* - TODO(goller): need a secure way to communicate get - authorization by token string via headers or something - */ - t.Skip() - platformtesting.FindAuthorizationByToken(initAuthorizationService, t) -} - -func TestAuthorizationService_FindAuthorizations(t *testing.T) { - platformtesting.FindAuthorizations(initAuthorizationService, t) -} - -func TestAuthorizationService_DeleteAuthorization(t *testing.T) { - platformtesting.DeleteAuthorization(initAuthorizationService, t) -} - -func TestAuthorizationService_UpdateAuthorization(t *testing.T) { - platformtesting.UpdateAuthorization(initAuthorizationService, t) -} - -func MustMarshal(o interface{}) []byte { - b, _ := json.Marshal(o) - return b -} diff --git a/http/authentication_middleware.go b/http/authentication_middleware.go deleted file mode 100644 index 3b9bb4be526..00000000000 --- a/http/authentication_middleware.go +++ /dev/null @@ -1,185 +0,0 @@ -package http - -import ( - "context" - "errors" - "fmt" - "net/http" - "time" - - "github.com/influxdata/httprouter" - platform "github.com/influxdata/influxdb/v2" - platcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/jsonweb" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/session" - "github.com/opentracing/opentracing-go" - "go.uber.org/zap" -) - -// AuthenticationHandler is a middleware for authenticating incoming requests. -type AuthenticationHandler struct { - errors2.HTTPErrorHandler - log *zap.Logger - - AuthorizationService platform.AuthorizationService - SessionService platform.SessionService - UserService platform.UserService - TokenParser *jsonweb.TokenParser - SessionRenewDisabled bool - - // This is only really used for it's lookup method the specific http - // handler used to register routes does not matter. - noAuthRouter *httprouter.Router - - Handler http.Handler -} - -// NewAuthenticationHandler creates an authentication handler. -func NewAuthenticationHandler(log *zap.Logger, h errors2.HTTPErrorHandler) *AuthenticationHandler { - return &AuthenticationHandler{ - log: log, - HTTPErrorHandler: h, - Handler: http.NotFoundHandler(), - TokenParser: jsonweb.NewTokenParser(jsonweb.EmptyKeyStore), - noAuthRouter: httprouter.New(), - } -} - -// RegisterNoAuthRoute excludes routes from needing authentication. -func (h *AuthenticationHandler) RegisterNoAuthRoute(method, path string) { - // the handler specified here does not matter. - h.noAuthRouter.HandlerFunc(method, path, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) -} - -const ( - tokenAuthScheme = "token" - sessionAuthScheme = "session" -) - -// ProbeAuthScheme probes the http request for the requests for token or cookie session. -func ProbeAuthScheme(r *http.Request) (string, error) { - _, tokenErr := GetToken(r) - _, sessErr := session.DecodeCookieSession(r.Context(), r) - - if tokenErr != nil && sessErr != nil { - return "", fmt.Errorf("token required") - } - - if tokenErr == nil { - return tokenAuthScheme, nil - } - - return sessionAuthScheme, nil -} - -func (h *AuthenticationHandler) unauthorized(ctx context.Context, w http.ResponseWriter, err error) { - h.log.Info("Unauthorized", zap.Error(err)) - UnauthorizedError(ctx, h, w) -} - -// ServeHTTP extracts the session or token from the http request and places the resulting authorizer on the request context. -func (h *AuthenticationHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if handler, _, _ := h.noAuthRouter.Lookup(r.Method, r.URL.Path); handler != nil { - h.Handler.ServeHTTP(w, r) - return - } - - ctx := r.Context() - scheme, err := ProbeAuthScheme(r) - if err != nil { - h.unauthorized(ctx, w, err) - return - } - - var auth platform.Authorizer - switch scheme { - case tokenAuthScheme: - auth, err = h.extractAuthorization(ctx, r) - case sessionAuthScheme: - auth, err = h.extractSession(ctx, r) - default: - // TODO: this error will be nil if it gets here, this should be remedied with some - // sentinel error I'm thinking - err = errors.New("invalid auth scheme") - } - if err != nil { - h.unauthorized(ctx, w, err) - return - } - - // jwt based auth is permission based rather than identity based - // and therefor has no associated user. if the user ID is invalid - // disregard the user active check - if auth.GetUserID().Valid() { - if err = h.isUserActive(ctx, auth); err != nil { - InactiveUserError(ctx, h, w) - return - } - } - - ctx = platcontext.SetAuthorizer(ctx, auth) - - if span := opentracing.SpanFromContext(ctx); span != nil { - span.SetTag("user_id", auth.GetUserID().String()) - } - - h.Handler.ServeHTTP(w, r.WithContext(ctx)) -} - -func (h *AuthenticationHandler) isUserActive(ctx context.Context, auth platform.Authorizer) error { - u, err := h.UserService.FindUserByID(ctx, auth.GetUserID()) - if err != nil { - return err - } - - if u.Status != "inactive" { - return nil - } - - return &errors2.Error{Code: errors2.EForbidden, Msg: "User is inactive"} -} - -func (h *AuthenticationHandler) extractAuthorization(ctx context.Context, r *http.Request) (platform.Authorizer, error) { - t, err := GetToken(r) - if err != nil { - return nil, err - } - - token, err := h.TokenParser.Parse(t) - if err == nil { - return token, nil - } - - // if the error returned signifies ths token is - // not a well formed JWT then use it as a lookup - // key for its associated authorization - // otherwise return the error - if !jsonweb.IsMalformedError(err) { - return nil, err - } - - return h.AuthorizationService.FindAuthorizationByToken(ctx, t) -} - -func (h *AuthenticationHandler) extractSession(ctx context.Context, r *http.Request) (*platform.Session, error) { - k, err := session.DecodeCookieSession(ctx, r) - if err != nil { - return nil, err - } - - s, err := h.SessionService.FindSession(ctx, k) - if err != nil { - return nil, err - } - - if !h.SessionRenewDisabled { - // if the session is not expired, renew the session - err = h.SessionService.RenewSession(ctx, s, time.Now().Add(platform.RenewSessionTime)) - if err != nil { - return nil, err - } - } - - return s, err -} diff --git a/http/authentication_test.go b/http/authentication_test.go deleted file mode 100644 index f4f853c1bef..00000000000 --- a/http/authentication_test.go +++ /dev/null @@ -1,400 +0,0 @@ -package http_test - -import ( - "context" - "errors" - "fmt" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - platformhttp "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/jsonweb" - "github.com/influxdata/influxdb/v2/kit/platform" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/session" - "go.uber.org/zap/zaptest" -) - -const token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJjbG91ZDIuaW5mbHV4ZGF0YS5jb20iLCJhdWQiOiJnYXRld2F5LmluZmx1eGRhdGEuY29tIiwiaWF0IjoxNTY4NjI4OTgwLCJraWQiOiJzb21lLWtleSIsInBlcm1pc3Npb25zIjpbeyJhY3Rpb24iOiJ3cml0ZSIsInJlc291cmNlIjp7InR5cGUiOiJidWNrZXRzIiwiaWQiOiIwMDAwMDAwMDAwMDAwMDAxIiwib3JnSUQiOiIwMDAwMDAwMDAwMDAwMDAyIn19XX0.74vjbExiOd702VSIMmQWaDT_GFvUI0-_P-SfQ_OOHB0" - -var one = platform.ID(1) - -func TestAuthenticationHandler(t *testing.T) { - type fields struct { - AuthorizationService influxdb.AuthorizationService - SessionService influxdb.SessionService - UserService influxdb.UserService - TokenParser *jsonweb.TokenParser - } - type args struct { - token string - session string - } - type wants struct { - code int - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "session provided", - fields: fields{ - AuthorizationService: mock.NewAuthorizationService(), - SessionService: &mock.SessionService{ - FindSessionFn: func(ctx context.Context, key string) (*influxdb.Session, error) { - return &influxdb.Session{}, nil - }, - RenewSessionFn: func(ctx context.Context, session *influxdb.Session, expiredAt time.Time) error { - return nil - }, - }, - }, - args: args{ - session: "abc123", - }, - wants: wants{ - code: http.StatusOK, - }, - }, - { - name: "session does not exist", - fields: fields{ - AuthorizationService: mock.NewAuthorizationService(), - SessionService: &mock.SessionService{ - FindSessionFn: func(ctx context.Context, key string) (*influxdb.Session, error) { - return nil, fmt.Errorf("session not found") - }, - }, - }, - args: args{ - session: "abc123", - }, - wants: wants{ - code: http.StatusUnauthorized, - }, - }, - { - name: "token provided", - fields: fields{ - AuthorizationService: &mock.AuthorizationService{ - FindAuthorizationByTokenFn: func(ctx context.Context, token string) (*influxdb.Authorization, error) { - return &influxdb.Authorization{}, nil - }, - }, - SessionService: mock.NewSessionService(), - }, - args: args{ - token: "abc123", - }, - wants: wants{ - code: http.StatusOK, - }, - }, - { - name: "token does not exist", - fields: fields{ - AuthorizationService: &mock.AuthorizationService{ - FindAuthorizationByTokenFn: func(ctx context.Context, token string) (*influxdb.Authorization, error) { - return nil, fmt.Errorf("authorization not found") - }, - }, - SessionService: mock.NewSessionService(), - }, - args: args{ - token: "abc123", - }, - wants: wants{ - code: http.StatusUnauthorized, - }, - }, - { - name: "associated user is inactive", - fields: fields{ - AuthorizationService: &mock.AuthorizationService{ - FindAuthorizationByTokenFn: func(ctx context.Context, token string) (*influxdb.Authorization, error) { - return &influxdb.Authorization{UserID: one}, nil - }, - }, - SessionService: mock.NewSessionService(), - UserService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - if !id.Valid() { - panic("user service should only be called with valid user ID") - } - - return &influxdb.User{Status: "inactive"}, nil - }, - }, - }, - args: args{ - token: "abc123", - }, - wants: wants{ - code: http.StatusForbidden, - }, - }, - { - name: "no auth provided", - fields: fields{ - AuthorizationService: mock.NewAuthorizationService(), - SessionService: mock.NewSessionService(), - }, - args: args{}, - wants: wants{ - code: http.StatusUnauthorized, - }, - }, - { - name: "jwt provided", - fields: fields{ - AuthorizationService: &mock.AuthorizationService{ - FindAuthorizationByTokenFn: func(ctx context.Context, token string) (*influxdb.Authorization, error) { - return nil, fmt.Errorf("authorization not found") - }, - }, - SessionService: mock.NewSessionService(), - UserService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - // ensure that this is not reached as jwt token authorizer produces - // invalid user id - if !id.Valid() { - panic("user service should only be called with valid user ID") - } - - return nil, errors.New("user not found") - }, - }, - TokenParser: jsonweb.NewTokenParser(jsonweb.KeyStoreFunc(func(string) ([]byte, error) { - return []byte("correct-key"), nil - })), - }, - args: args{ - token: token, - }, - wants: wants{ - code: http.StatusOK, - }, - }, - { - name: "jwt provided - bad signature", - fields: fields{ - AuthorizationService: &mock.AuthorizationService{ - FindAuthorizationByTokenFn: func(ctx context.Context, token string) (*influxdb.Authorization, error) { - panic("token lookup attempted") - }, - }, - SessionService: mock.NewSessionService(), - TokenParser: jsonweb.NewTokenParser(jsonweb.KeyStoreFunc(func(string) ([]byte, error) { - return []byte("incorrect-key"), nil - })), - }, - args: args{ - token: token, - }, - wants: wants{ - code: http.StatusUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - }) - - h := platformhttp.NewAuthenticationHandler(zaptest.NewLogger(t), kithttp.NewErrorHandler(zaptest.NewLogger(t))) - h.AuthorizationService = tt.fields.AuthorizationService - h.SessionService = tt.fields.SessionService - h.UserService = &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{}, nil - }, - } - - if tt.fields.UserService != nil { - h.UserService = tt.fields.UserService - } - - if tt.fields.TokenParser != nil { - h.TokenParser = tt.fields.TokenParser - } - - h.Handler = handler - - w := httptest.NewRecorder() - r := httptest.NewRequest("POST", "http://any.url", nil) - - if tt.args.session != "" { - session.SetCookieSession(tt.args.session, r) - } - - if tt.args.token != "" { - platformhttp.SetToken(tt.args.token, r) - } - - h.ServeHTTP(w, r) - - if got, want := w.Code, tt.wants.code; got != want { - t.Errorf("expected status code to be %d got %d", want, got) - } - }) - } -} - -func TestProbeAuthScheme(t *testing.T) { - type args struct { - token string - session string - } - type wants struct { - scheme string - err error - } - - tests := []struct { - name string - args args - wants wants - }{ - { - name: "session provided", - args: args{ - session: "abc123", - }, - wants: wants{ - scheme: "session", - }, - }, - { - name: "token provided", - args: args{ - token: "abc123", - }, - wants: wants{ - scheme: "token", - }, - }, - { - name: "no auth provided", - args: args{}, - wants: wants{ - err: fmt.Errorf("unknown authentication scheme"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := httptest.NewRequest("POST", "http://any.url", nil) - - if tt.args.session != "" { - session.SetCookieSession(tt.args.session, r) - } - - if tt.args.token != "" { - platformhttp.SetToken(tt.args.token, r) - } - - scheme, err := platformhttp.ProbeAuthScheme(r) - if (err != nil) != (tt.wants.err != nil) { - t.Errorf("unexpected error got %v want %v", err, tt.wants.err) - return - } - - if got, want := scheme, tt.wants.scheme; got != want { - t.Errorf("expected scheme to be %s got %s", want, got) - } - }) - } -} - -func TestAuthenticationHandler_NoAuthRoutes(t *testing.T) { - type route struct { - method string - path string - } - type fields struct { - excludedRoutes []route - } - type args struct { - method string - path string - } - type wants struct { - code int - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "route is no auth route", - fields: fields{ - excludedRoutes: []route{ - { - method: "GET", - path: "/debug/pprof", - }, - }, - }, - args: args{ - method: "GET", - path: "/debug/pprof", - }, - wants: wants{ - code: http.StatusOK, - }, - }, - { - name: "route is an auth route", - fields: fields{ - excludedRoutes: []route{}, - }, - args: args{ - method: "POST", - path: "/api/v2/write", - }, - wants: wants{ - code: http.StatusUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - }) - - h := platformhttp.NewAuthenticationHandler(zaptest.NewLogger(t), kithttp.NewErrorHandler(zaptest.NewLogger(t))) - h.AuthorizationService = mock.NewAuthorizationService() - h.SessionService = mock.NewSessionService() - h.Handler = handler - - for _, rte := range tt.fields.excludedRoutes { - h.RegisterNoAuthRoute(rte.method, rte.path) - } - - w := httptest.NewRecorder() - r := httptest.NewRequest(tt.args.method, tt.args.path, nil) - - h.ServeHTTP(w, r) - - if got, want := w.Code, tt.wants.code; got != want { - t.Errorf("expected status code to be %d got %d", want, got) - } - }) - } -} diff --git a/http/backup_service.go b/http/backup_service.go deleted file mode 100644 index 152dfd79bfb..00000000000 --- a/http/backup_service.go +++ /dev/null @@ -1,199 +0,0 @@ -package http - -import ( - "fmt" - "io" - "mime/multipart" - "net/http" - "strconv" - "time" - - "github.com/NYTimes/gziphandler" - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - "go.uber.org/zap" -) - -// BackupBackend is all services and associated parameters required to construct the BackupHandler. -type BackupBackend struct { - Logger *zap.Logger - errors.HTTPErrorHandler - - BackupService influxdb.BackupService - SqlBackupRestoreService influxdb.SqlBackupRestoreService - BucketManifestWriter influxdb.BucketManifestWriter -} - -// NewBackupBackend returns a new instance of BackupBackend. -func NewBackupBackend(b *APIBackend) *BackupBackend { - return &BackupBackend{ - Logger: b.Logger.With(zap.String("handler", "backup")), - - HTTPErrorHandler: b.HTTPErrorHandler, - BackupService: b.BackupService, - SqlBackupRestoreService: b.SqlBackupRestoreService, - BucketManifestWriter: b.BucketManifestWriter, - } -} - -// BackupHandler is http handler for backup service. -type BackupHandler struct { - *httprouter.Router - errors.HTTPErrorHandler - Logger *zap.Logger - - BackupService influxdb.BackupService - SqlBackupRestoreService influxdb.SqlBackupRestoreService - BucketManifestWriter influxdb.BucketManifestWriter -} - -const ( - prefixBackup = "/api/v2/backup" - backupKVStorePath = prefixBackup + "/kv" - backupShardPath = prefixBackup + "/shards/:shardID" - backupMetadataPath = prefixBackup + "/metadata" -) - -// NewBackupHandler creates a new handler at /api/v2/backup to receive backup requests. -func NewBackupHandler(b *BackupBackend) *BackupHandler { - h := &BackupHandler{ - HTTPErrorHandler: b.HTTPErrorHandler, - Router: NewRouter(b.HTTPErrorHandler), - Logger: b.Logger, - BackupService: b.BackupService, - SqlBackupRestoreService: b.SqlBackupRestoreService, - BucketManifestWriter: b.BucketManifestWriter, - } - - h.HandlerFunc(http.MethodGet, backupKVStorePath, h.handleBackupKVStore) // Deprecated - - h.Handler(http.MethodGet, backupShardPath, gziphandler.GzipHandler(http.HandlerFunc(h.handleBackupShard))) - h.Handler(http.MethodGet, backupMetadataPath, gziphandler.GzipHandler(h.requireOperPermissions(http.HandlerFunc(h.handleBackupMetadata)))) - - return h -} - -// requireOperPermissions returns an "unauthorized" response for requests that do not have OperPermissions. -// This is needed for the handleBackupMetadata handler, which sets a header prior to -// accessing any methods on the BackupService which would also return an "authorized" response. -func (h *BackupHandler) requireOperPermissions(next http.Handler) http.HandlerFunc { - fn := func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - if err := authorizer.IsAllowedAll(ctx, influxdb.OperPermissions()); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - next.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) -} - -func (h *BackupHandler) handleBackupKVStore(w http.ResponseWriter, r *http.Request) { - span, r := tracing.ExtractFromHTTPRequest(r, "BackupHandler.handleBackupKVStore") - defer span.Finish() - - ctx := r.Context() - - if err := h.BackupService.BackupKVStore(ctx, w); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } -} - -func (h *BackupHandler) handleBackupShard(w http.ResponseWriter, r *http.Request) { - span, r := tracing.ExtractFromHTTPRequest(r, "BackupHandler.handleBackupShard") - defer span.Finish() - - ctx := r.Context() - - params := httprouter.ParamsFromContext(ctx) - shardID, err := strconv.ParseUint(params.ByName("shardID"), 10, 64) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - var since time.Time - if s := r.URL.Query().Get("since"); s != "" { - if since, err = time.ParseInLocation(time.RFC3339, s, time.UTC); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - } - - if err := h.BackupService.BackupShard(ctx, w, shardID, since); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } -} - -func (h *BackupHandler) handleBackupMetadata(w http.ResponseWriter, r *http.Request) { - span, r := tracing.ExtractFromHTTPRequest(r, "BackupHandler.handleBackupMetadata") - defer span.Finish() - - ctx := r.Context() - - // Lock the sqlite and bolt databases prior to writing the response to prevent - // data inconsistencies. - h.BackupService.RLockKVStore() - defer h.BackupService.RUnlockKVStore() - - h.SqlBackupRestoreService.RLockSqlStore() - defer h.SqlBackupRestoreService.RUnlockSqlStore() - - dataWriter := multipart.NewWriter(w) - w.Header().Set("Content-Type", "multipart/mixed; boundary="+dataWriter.Boundary()) - - parts := []struct { - contentType string - contentDisposition string - writeFn func(io.Writer) error - }{ - { - "application/octet-stream", - fmt.Sprintf("attachment; name=%q", "kv"), - func(fw io.Writer) error { - return h.BackupService.BackupKVStore(ctx, fw) - }, - }, - { - "application/octet-stream", - fmt.Sprintf("attachment; name=%q", "sql"), - func(fw io.Writer) error { - return h.SqlBackupRestoreService.BackupSqlStore(ctx, fw) - }, - }, - { - "application/json; charset=utf-8", - fmt.Sprintf("attachment; name=%q", "buckets"), - func(fw io.Writer) error { - return h.BucketManifestWriter.WriteManifest(ctx, fw) - }, - }, - } - - for _, p := range parts { - pw, err := dataWriter.CreatePart(map[string][]string{ - "Content-Type": {p.contentType}, - "Content-Disposition": {p.contentDisposition}, - }) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := p.writeFn(pw); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - } - - if err := dataWriter.Close(); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } -} diff --git a/http/backup_service_test.go b/http/backup_service_test.go deleted file mode 100644 index 0da7e105d29..00000000000 --- a/http/backup_service_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package http - -import ( - "context" - "io" - "mime" - "mime/multipart" - "net/http" - "net/http/httptest" - "testing" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/mock" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func TestBackupMetaService(t *testing.T) { - ctrlr := gomock.NewController(t) - backupSvc := mock.NewMockBackupService(ctrlr) - sqlBackupSvc := mock.NewMockSqlBackupRestoreService(ctrlr) - bucketManifestWriter := mock.NewMockBucketManifestWriter(ctrlr) - - b := &BackupBackend{ - BackupService: backupSvc, - SqlBackupRestoreService: sqlBackupSvc, - BucketManifestWriter: bucketManifestWriter, - } - h := NewBackupHandler(b) - - rr := httptest.NewRecorder() - r, err := http.NewRequest(http.MethodGet, "/", nil) - require.NoError(t, err) - - backupSvc.EXPECT(). - BackupKVStore(gomock.Any(), gomock.Any()). - Return(nil) - - backupSvc.EXPECT().RLockKVStore() - backupSvc.EXPECT().UnlockKVStore() - - sqlBackupSvc.EXPECT(). - BackupSqlStore(gomock.Any(), gomock.Any()). - Return(nil) - - sqlBackupSvc.EXPECT().RLockSqlStore() - sqlBackupSvc.EXPECT().RUnlockSqlStore() - - bucketManifestWriter.EXPECT(). - WriteManifest(gomock.Any(), gomock.Any()). - Return(nil) - - h.handleBackupMetadata(rr, r) - rs := rr.Result() - require.Equal(t, rs.StatusCode, http.StatusOK) - - // Parse the multi-part response - // First get the boundary from the header - _, params, err := mime.ParseMediaType(rs.Header.Get("Content-Type")) - require.NoError(t, err) - mr := multipart.NewReader(rs.Body, params["boundary"]) - - // Go through the parts of the response and verify the part names appear in the correct order - wantContentTypes := []string{"application/octet-stream", "application/octet-stream", "application/json; charset=utf-8"} - wantPartNames := []string{"kv", "sql", "buckets"} - for i := 0; ; i++ { - p, err := mr.NextPart() - if err == io.EOF { - break - } - require.NoError(t, err) - require.Equal(t, wantContentTypes[i], p.Header.Get("Content-Type")) - - _, params, err := mime.ParseMediaType(p.Header.Get("Content-Disposition")) - require.NoError(t, err) - require.Equal(t, wantPartNames[i], params["name"]) - } -} - -func TestRequireOperPermissions(t *testing.T) { - tests := []struct { - name string - permList []influxdb.Permission - wantStatus int - wantContentType string - }{ - { - "authorized to do the backup", - influxdb.OperPermissions(), - http.StatusOK, - "text/plain; charset=utf-8", - }, - { - "not authorized to do the backup", - influxdb.ReadAllPermissions(), - http.StatusUnauthorized, - "application/json; charset=utf-8", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // next will only be called if the authorization allows it. this is a dummy function - // that will set the content-type header to something other than application/json - next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - w.Write([]byte("OK")) - }) - - rr := httptest.NewRecorder() - - r, err := http.NewRequest(http.MethodGet, "/", nil) - require.NoError(t, err) - ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, tt.permList)) - r = r.WithContext(ctx) - - h := BackupHandler{ - HTTPErrorHandler: kithttp.NewErrorHandler(zaptest.NewLogger(t)), - } - h.requireOperPermissions(next).ServeHTTP(rr, r) - rs := rr.Result() - - require.Equal(t, tt.wantStatus, rs.StatusCode) - require.Equal(t, tt.wantContentType, rs.Header.Get("Content-Type")) - }) - } -} diff --git a/http/check_service.go b/http/check_service.go deleted file mode 100644 index 560b42cc020..00000000000 --- a/http/check_service.go +++ /dev/null @@ -1,915 +0,0 @@ -package http - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "path" - "time" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - pctx "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/notification/check" - "github.com/influxdata/influxdb/v2/pkg/httpc" - "github.com/influxdata/influxdb/v2/query/fluxlang" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "go.uber.org/zap" -) - -// CheckBackend is all services and associated parameters required to construct -// the CheckBackendHandler. -type CheckBackend struct { - errors.HTTPErrorHandler - log *zap.Logger - - AlgoWProxy FeatureProxyHandler - TaskService taskmodel.TaskService - CheckService influxdb.CheckService - UserResourceMappingService influxdb.UserResourceMappingService - LabelService influxdb.LabelService - UserService influxdb.UserService - OrganizationService influxdb.OrganizationService - FluxLanguageService fluxlang.FluxLanguageService -} - -// NewCheckBackend returns a new instance of CheckBackend. -func NewCheckBackend(log *zap.Logger, b *APIBackend) *CheckBackend { - return &CheckBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - AlgoWProxy: b.AlgoWProxy, - TaskService: b.TaskService, - CheckService: b.CheckService, - UserResourceMappingService: b.UserResourceMappingService, - LabelService: b.LabelService, - UserService: b.UserService, - OrganizationService: b.OrganizationService, - FluxLanguageService: b.FluxLanguageService, - } -} - -// CheckHandler is the handler for the check service -type CheckHandler struct { - *httprouter.Router - errors.HTTPErrorHandler - log *zap.Logger - - TaskService taskmodel.TaskService - CheckService influxdb.CheckService - UserResourceMappingService influxdb.UserResourceMappingService - LabelService influxdb.LabelService - UserService influxdb.UserService - OrganizationService influxdb.OrganizationService - FluxLanguageService fluxlang.FluxLanguageService -} - -const ( - prefixChecks = "/api/v2/checks" - checksIDPath = "/api/v2/checks/:id" - checksIDQueryPath = "/api/v2/checks/:id/query" - checksIDMembersPath = "/api/v2/checks/:id/members" - checksIDMembersIDPath = "/api/v2/checks/:id/members/:userID" - checksIDOwnersPath = "/api/v2/checks/:id/owners" - checksIDOwnersIDPath = "/api/v2/checks/:id/owners/:userID" - checksIDLabelsPath = "/api/v2/checks/:id/labels" - checksIDLabelsIDPath = "/api/v2/checks/:id/labels/:lid" -) - -// NewCheckHandler returns a new instance of CheckHandler. -func NewCheckHandler(log *zap.Logger, b *CheckBackend) *CheckHandler { - h := &CheckHandler{ - Router: NewRouter(b.HTTPErrorHandler), - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - - CheckService: b.CheckService, - UserResourceMappingService: b.UserResourceMappingService, - LabelService: b.LabelService, - UserService: b.UserService, - TaskService: b.TaskService, - OrganizationService: b.OrganizationService, - FluxLanguageService: b.FluxLanguageService, - } - - h.Handler("POST", prefixChecks, withFeatureProxy(b.AlgoWProxy, http.HandlerFunc(h.handlePostCheck))) - h.HandlerFunc("GET", prefixChecks, h.handleGetChecks) - h.HandlerFunc("GET", checksIDPath, h.handleGetCheck) - h.HandlerFunc("GET", checksIDQueryPath, h.handleGetCheckQuery) - h.HandlerFunc("DELETE", checksIDPath, h.handleDeleteCheck) - h.Handler("PUT", checksIDPath, withFeatureProxy(b.AlgoWProxy, http.HandlerFunc(h.handlePutCheck))) - h.Handler("PATCH", checksIDPath, withFeatureProxy(b.AlgoWProxy, http.HandlerFunc(h.handlePatchCheck))) - - memberBackend := MemberBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log.With(zap.String("handler", "member")), - ResourceType: influxdb.ChecksResourceType, - UserType: influxdb.Member, - UserResourceMappingService: b.UserResourceMappingService, - UserService: b.UserService, - } - h.HandlerFunc("POST", checksIDMembersPath, newPostMemberHandler(memberBackend)) - h.HandlerFunc("GET", checksIDMembersPath, newGetMembersHandler(memberBackend)) - h.HandlerFunc("DELETE", checksIDMembersIDPath, newDeleteMemberHandler(memberBackend)) - - ownerBackend := MemberBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log.With(zap.String("handler", "member")), - ResourceType: influxdb.ChecksResourceType, - UserType: influxdb.Owner, - UserResourceMappingService: b.UserResourceMappingService, - UserService: b.UserService, - } - h.HandlerFunc("POST", checksIDOwnersPath, newPostMemberHandler(ownerBackend)) - h.HandlerFunc("GET", checksIDOwnersPath, newGetMembersHandler(ownerBackend)) - h.HandlerFunc("DELETE", checksIDOwnersIDPath, newDeleteMemberHandler(ownerBackend)) - - labelBackend := &LabelBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log.With(zap.String("handler", "label")), - LabelService: b.LabelService, - ResourceType: influxdb.ChecksResourceType, - } - h.HandlerFunc("GET", checksIDLabelsPath, newGetLabelsHandler(labelBackend)) - h.HandlerFunc("POST", checksIDLabelsPath, newPostLabelHandler(labelBackend)) - h.HandlerFunc("DELETE", checksIDLabelsIDPath, newDeleteLabelHandler(labelBackend)) - - return h -} - -type checkLinks struct { - Self string `json:"self"` - Labels string `json:"labels"` - Members string `json:"members"` - Owners string `json:"owners"` - Query string `json:"query"` -} - -type checkResponse struct { - influxdb.Check - Status string `json:"status"` - Labels []influxdb.Label `json:"labels"` - Links checkLinks `json:"links"` - LatestCompleted time.Time `json:"latestCompleted,omitempty"` - LatestScheduled time.Time `json:"latestScheduled,omitempty"` - LastRunStatus string `json:"LastRunStatus,omitempty"` - LastRunError string `json:"LastRunError,omitempty"` - TaskID platform.ID `json:"taskID,omitempty"` -} - -type postCheckRequest struct { - influxdb.CheckCreate - Labels []string `json:"labels"` -} - -type decodeLabels struct { - Labels []string `json:"labels"` -} - -func (resp checkResponse) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(resp.Check) - if err != nil { - return nil, err - } - - b2, err := json.Marshal(struct { - Labels []influxdb.Label `json:"labels"` - Links checkLinks `json:"links"` - Status string `json:"status"` - LatestCompleted time.Time `json:"latestCompleted,omitempty"` - LatestScheduled time.Time `json:"latestScheduled,omitempty"` - LastRunStatus string `json:"lastRunStatus,omitempty"` - LastRunError string `json:"lastRunError,omitempty"` - TaskID platform.ID `json:"taskID,omitempty"` - }{ - Links: resp.Links, - Labels: resp.Labels, - Status: resp.Status, - LatestCompleted: resp.LatestCompleted, - LatestScheduled: resp.LatestScheduled, - LastRunStatus: resp.LastRunStatus, - LastRunError: resp.LastRunError, - TaskID: resp.Check.GetTaskID(), - }) - if err != nil { - return nil, err - } - - return []byte(string(b1[:len(b1)-1]) + ", " + string(b2[1:])), nil -} - -type checksResponse struct { - Checks []*checkResponse `json:"checks"` - Links *influxdb.PagingLinks `json:"links"` -} - -func (h *CheckHandler) newCheckResponse(ctx context.Context, chk influxdb.Check, labels []*influxdb.Label) (*checkResponse, error) { - // TODO(desa): this should be handled in the check and not exposed in http land, but is currently blocking the FE. https://github.com/influxdata/influxdb/issues/15259 - task, err := h.TaskService.FindTaskByID(ctx, chk.GetTaskID()) - if err != nil { - return nil, err - } - - res := &checkResponse{ - Check: chk, - Links: checkLinks{ - Self: fmt.Sprintf("/api/v2/checks/%s", chk.GetID()), - Labels: fmt.Sprintf("/api/v2/checks/%s/labels", chk.GetID()), - Members: fmt.Sprintf("/api/v2/checks/%s/members", chk.GetID()), - Owners: fmt.Sprintf("/api/v2/checks/%s/owners", chk.GetID()), - Query: fmt.Sprintf("/api/v2/checks/%s/query", chk.GetID()), - }, - Labels: []influxdb.Label{}, - LatestCompleted: task.LatestCompleted, - LatestScheduled: task.LatestScheduled, - LastRunStatus: task.LastRunStatus, - LastRunError: task.LastRunError, - TaskID: chk.GetTaskID(), - } - - for _, l := range labels { - res.Labels = append(res.Labels, *l) - } - - res.Status = task.Status - - return res, nil -} - -func (h *CheckHandler) newChecksResponse(ctx context.Context, chks []influxdb.Check, labelService influxdb.LabelService, f influxdb.PagingFilter, opts influxdb.FindOptions) *checksResponse { - resp := &checksResponse{ - Checks: []*checkResponse{}, - Links: influxdb.NewPagingLinks(prefixChecks, opts, f, len(chks)), - } - for _, chk := range chks { - labels, _ := labelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: chk.GetID(), ResourceType: influxdb.ChecksResourceType}) - cr, err := h.newCheckResponse(ctx, chk, labels) - if err != nil { - h.log.Info("Failed to retrieve task associated with check", zap.String("checkID", chk.GetID().String())) - continue - } - - resp.Checks = append(resp.Checks, cr) - } - return resp -} - -func decodeGetCheckRequest(ctx context.Context, r *http.Request) (i platform.ID, err error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return i, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - if err := i.DecodeFromString(id); err != nil { - return i, err - } - return i, nil -} - -func (h *CheckHandler) handleGetChecks(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - filter, opts, err := decodeCheckFilter(ctx, r) - if err != nil { - h.log.Debug("Failed to decode request", zap.Error(err)) - h.HandleHTTPError(ctx, err, w) - return - } - chks, _, err := h.CheckService.FindChecks(ctx, *filter, *opts) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Checks retrieved", zap.String("checks", fmt.Sprint(chks))) - - if err := encodeResponse(ctx, w, http.StatusOK, h.newChecksResponse(ctx, chks, h.LabelService, filter, *opts)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -func (h *CheckHandler) handleGetCheckQuery(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id, err := decodeGetCheckRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - chk, err := h.CheckService.FindCheckByID(ctx, id) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - flux, err := chk.GenerateFlux(h.FluxLanguageService) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Check query retrieved", zap.String("check query", flux)) - if err := encodeResponse(ctx, w, http.StatusOK, newFluxResponse(flux)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type fluxResp struct { - Flux string `json:"flux"` -} - -func newFluxResponse(flux string) fluxResp { - return fluxResp{ - Flux: flux, - } -} - -func (h *CheckHandler) handleGetCheck(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id, err := decodeGetCheckRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - chk, err := h.CheckService.FindCheckByID(ctx, id) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Check retrieved", zap.String("check", fmt.Sprint(chk))) - - labels, err := h.LabelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: chk.GetID(), ResourceType: influxdb.ChecksResourceType}) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - cr, err := h.newCheckResponse(ctx, chk, labels) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusOK, cr); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -func decodeCheckFilter(ctx context.Context, r *http.Request) (*influxdb.CheckFilter, *influxdb.FindOptions, error) { - auth, err := pctx.GetAuthorizer(ctx) - if err != nil { - return nil, nil, err - } - f := &influxdb.CheckFilter{ - UserResourceMappingFilter: influxdb.UserResourceMappingFilter{ - UserID: auth.GetUserID(), - ResourceType: influxdb.ChecksResourceType, - }, - } - - opts, err := influxdb.DecodeFindOptions(r) - if err != nil { - return f, nil, err - } - - q := r.URL.Query() - if orgIDStr := q.Get("orgID"); orgIDStr != "" { - orgID, err := platform.IDFromString(orgIDStr) - if err != nil { - return f, opts, &errors.Error{ - Code: errors.EInvalid, - Msg: "orgID is invalid", - Err: err, - } - } - f.OrgID = orgID - } else if orgNameStr := q.Get("org"); orgNameStr != "" { - f.Org = &orgNameStr - } - return f, opts, err -} - -type decodeStatus struct { - Status influxdb.Status `json:"status"` -} - -func decodePostCheckRequest(r *http.Request) (postCheckRequest, error) { - b, err := io.ReadAll(r.Body) - if err != nil { - return postCheckRequest{}, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - defer r.Body.Close() - - chk, err := check.UnmarshalJSON(b) - if err != nil { - return postCheckRequest{}, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - var ds decodeStatus - if err := json.Unmarshal(b, &ds); err != nil { - return postCheckRequest{}, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - var dl decodeLabels - if err := json.Unmarshal(b, &dl); err != nil { - return postCheckRequest{}, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - return postCheckRequest{ - CheckCreate: influxdb.CheckCreate{ - Check: chk, - Status: ds.Status, - }, - Labels: dl.Labels, - }, nil -} - -func decodePutCheckRequest(ctx context.Context, lang fluxlang.FluxLanguageService, r *http.Request) (influxdb.CheckCreate, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return influxdb.CheckCreate{}, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - i := new(platform.ID) - if err := i.DecodeFromString(id); err != nil { - return influxdb.CheckCreate{}, &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid check id format", - } - } - - b, err := io.ReadAll(r.Body) - if err != nil { - return influxdb.CheckCreate{}, &errors.Error{ - Code: errors.EInvalid, - Msg: "unable to read HTTP body", - Err: err, - } - } - defer r.Body.Close() - - chk, err := check.UnmarshalJSON(b) - if err != nil { - return influxdb.CheckCreate{}, &errors.Error{ - Code: errors.EInvalid, - Msg: "malformed check body", - Err: err, - } - } - chk.SetID(*i) - - if err := chk.Valid(lang); err != nil { - return influxdb.CheckCreate{}, err - } - - var ds decodeStatus - err = json.Unmarshal(b, &ds) - if err != nil { - return influxdb.CheckCreate{}, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - return influxdb.CheckCreate{ - Check: chk, - Status: ds.Status, - }, nil -} - -type patchCheckRequest struct { - platform.ID - Update influxdb.CheckUpdate -} - -func decodePatchCheckRequest(ctx context.Context, r *http.Request) (*patchCheckRequest, error) { - id := httprouter.ParamsFromContext(ctx).ByName("id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - - var upd influxdb.CheckUpdate - if err := json.NewDecoder(r.Body).Decode(&upd); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: err.Error(), - } - } - if err := upd.Valid(); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: err.Error(), - } - } - - return &patchCheckRequest{ - ID: i, - Update: upd, - }, nil -} - -// handlePostCheck is the HTTP handler for the POST /api/v2/checks route. -func (h *CheckHandler) handlePostCheck(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - chk, err := decodePostCheckRequest(r) - if err != nil { - h.log.Debug("Failed to decode request", zap.Error(err)) - h.HandleHTTPError(ctx, err, w) - return - } - - auth, err := pctx.GetAuthorizer(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := h.CheckService.CreateCheck(ctx, chk.CheckCreate, auth.GetUserID()); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - labels := h.mapNewCheckLabels(ctx, chk.CheckCreate, chk.Labels) - - cr, err := h.newCheckResponse(ctx, chk, labels) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusCreated, cr); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -// mapNewCheckLabels takes label ids from create check and maps them to the newly created check -func (h *CheckHandler) mapNewCheckLabels(ctx context.Context, chk influxdb.CheckCreate, labels []string) []*influxdb.Label { - var ls []*influxdb.Label - for _, sid := range labels { - var lid platform.ID - err := lid.DecodeFromString(sid) - - if err != nil { - continue - } - - label, err := h.LabelService.FindLabelByID(ctx, lid) - if err != nil { - continue - } - - mapping := influxdb.LabelMapping{ - LabelID: label.ID, - ResourceID: chk.GetID(), - ResourceType: influxdb.ChecksResourceType, - } - - err = h.LabelService.CreateLabelMapping(ctx, &mapping) - if err != nil { - continue - } - - ls = append(ls, label) - } - return ls -} - -// handlePutCheck is the HTTP handler for the PUT /api/v2/checks route. -func (h *CheckHandler) handlePutCheck(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - chk, err := decodePutCheckRequest(ctx, h.FluxLanguageService, r) - if err != nil { - h.log.Debug("Failed to decode request", zap.Error(err)) - h.HandleHTTPError(ctx, err, w) - return - } - - c, err := h.CheckService.UpdateCheck(ctx, chk.GetID(), chk) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - labels, err := h.LabelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: c.GetID(), ResourceType: influxdb.ChecksResourceType}) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Check replaced", zap.String("check", fmt.Sprint(c))) - - cr, err := h.newCheckResponse(ctx, c, labels) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusOK, cr); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -// handlePatchCheck is the HTTP handler for the PATCH /api/v2/checks/:id route. -func (h *CheckHandler) handlePatchCheck(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePatchCheckRequest(ctx, r) - if err != nil { - h.log.Debug("Failed to decode request", zap.Error(err)) - h.HandleHTTPError(ctx, err, w) - return - } - - chk, err := h.CheckService.PatchCheck(ctx, req.ID, req.Update) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - labels, err := h.LabelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: chk.GetID(), ResourceType: influxdb.ChecksResourceType}) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Check patch", zap.String("check", fmt.Sprint(chk))) - - cr, err := h.newCheckResponse(ctx, chk, labels) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusOK, cr); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -func (h *CheckHandler) handleDeleteCheck(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - i, err := decodeGetCheckRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err = h.CheckService.DeleteCheck(ctx, i); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Check deleted", zap.String("checkID", fmt.Sprint(i))) - - w.WriteHeader(http.StatusNoContent) -} - -func checkIDPath(id platform.ID) string { - return path.Join(prefixChecks, id.String()) -} - -// CheckService is a client to interact with the handlers in this package over HTTP. -// It does not implement influxdb.CheckService because it returns a concrete representation of the API response -// and influxdb.Check as returned by that interface is not appropriate for this use case. -type CheckService struct { - Client *httpc.Client -} - -// FindCheckByID returns the Check matching the ID. -func (s *CheckService) FindCheckByID(ctx context.Context, id platform.ID) (*Check, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var cr Check - err := s.Client. - Get(checkIDPath(id)). - DecodeJSON(&cr). - Do(ctx) - if err != nil { - return nil, err - } - - return &cr, nil -} - -// FindCheck returns the first check matching the filter. -func (s *CheckService) FindCheck(ctx context.Context, filter influxdb.CheckFilter) (*Check, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - cs, n, err := s.FindChecks(ctx, filter) - if err != nil { - return nil, err - } - - if n == 0 && filter.Name != nil { - return nil, &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindBucket, - Msg: fmt.Sprintf("check %q not found", *filter.Name), - } - } else if n == 0 { - return nil, &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindBucket, - Msg: "check not found", - } - } - - return cs[0], nil -} - -// FindChecks returns a list of checks that match filter and the total count of matching checks. -// Additional options provide pagination & sorting. -func (s *CheckService) FindChecks(ctx context.Context, filter influxdb.CheckFilter, opt ...influxdb.FindOptions) ([]*Check, int, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - params := influxdb.FindOptionParams(opt...) - if filter.OrgID != nil { - params = append(params, [2]string{"orgID", filter.OrgID.String()}) - } - if filter.Org != nil { - params = append(params, [2]string{"org", *filter.Org}) - } - if filter.ID != nil { - params = append(params, [2]string{"id", filter.ID.String()}) - } - if filter.Name != nil { - params = append(params, [2]string{"name", *filter.Name}) - } - - var cr Checks - err := s.Client. - Get(prefixChecks). - QueryParams(params...). - DecodeJSON(&cr). - Do(ctx) - if err != nil { - return nil, 0, err - } - - return cr.Checks, len(cr.Checks), nil -} - -// CreateCheck creates a new check. -func (s *CheckService) CreateCheck(ctx context.Context, c *Check) (*Check, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var r Check - err := s.Client. - PostJSON(c, prefixChecks). - DecodeJSON(&r). - Do(ctx) - if err != nil { - return nil, err - } - - return &r, nil -} - -// UpdateCheck updates a check. -func (s *CheckService) UpdateCheck(ctx context.Context, id platform.ID, u *Check) (*Check, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var r Check - err := s.Client. - PutJSON(u, checkIDPath(id)). - DecodeJSON(&r). - Do(ctx) - if err != nil { - return nil, err - } - - return &r, nil -} - -// PatchCheck changes the status, description or name of a check. -func (s *CheckService) PatchCheck(ctx context.Context, id platform.ID, u influxdb.CheckUpdate) (*Check, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var r Check - err := s.Client. - PatchJSON(u, checkIDPath(id)). - DecodeJSON(&r). - Do(ctx) - if err != nil { - return nil, err - } - - return &r, nil -} - -// DeleteCheck removes a check. -func (s *CheckService) DeleteCheck(ctx context.Context, id platform.ID) error { - return s.Client. - Delete(checkIDPath(id)). - Do(ctx) -} - -// TODO(gavincabbage): These structures should be in a common place, like other models, -// -// but the common influxdb.Check is an interface that is not appropriate for an API client. -type Checks struct { - Checks []*Check `json:"checks"` - Links *influxdb.PagingLinks `json:"links"` -} - -type Check struct { - ID platform.ID `json:"id,omitempty"` - Name string `json:"name"` - OrgID platform.ID `json:"orgID,omitempty"` - OwnerID platform.ID `json:"ownerID,omitempty"` - CreatedAt time.Time `json:"createdAt,omitempty"` - UpdatedAt time.Time `json:"updatedAt,omitempty"` - Query *CheckQuery `json:"query"` - Status influxdb.Status `json:"status"` - Description string `json:"description"` - LatestCompleted time.Time `json:"latestCompleted"` - LastRunStatus string `json:"lastRunStatus"` - LastRunError string `json:"lastRunError"` - Labels []*influxdb.Label `json:"labels"` - Links *CheckLinks `json:"links"` - Type string `json:"type"` - TimeSince string `json:"timeSince"` - StaleTime string `json:"staleTime"` - ReportZero bool `json:"reportZero"` - Level string `json:"level"` - Every string `json:"every"` - Offset string `json:"offset"` - Tags []*influxdb.Tag `json:"tags"` - StatusMessageTemplate string `json:"statusMessageTemplate"` - Thresholds []*CheckThreshold `json:"thresholds"` -} - -type CheckQuery struct { - Text string `json:"text"` - EditMode string `json:"editMode"` - Name string `json:"name"` - BuilderConfig *CheckBuilderConfig `json:"builderConfig"` -} - -type CheckBuilderConfig struct { - Buckets []string `json:"buckets"` - Tags []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - } `json:"tags"` - Functions []struct { - Name string `json:"name"` - } `json:"functions"` - AggregateWindow struct { - Period string `json:"period"` - } `json:"aggregateWindow"` -} - -type CheckLinks struct { - Self string `json:"self"` - Labels string `json:"labels"` - Members string `json:"members"` - Owners string `json:"owners"` - Query string `json:"query"` -} - -type CheckThreshold struct { - check.ThresholdConfigBase - Type string `json:"type"` - Value float64 `json:"value,omitempty"` - Min float64 `json:"min,omitempty"` - Max float64 `json:"max,omitempty"` - Within bool `json:"within"` -} diff --git a/http/check_test.go b/http/check_test.go deleted file mode 100644 index 8952a46c040..00000000000 --- a/http/check_test.go +++ /dev/null @@ -1,1485 +0,0 @@ -package http - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "path" - "testing" - - "github.com/influxdata/flux/parser" - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - pcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/notification" - "github.com/influxdata/influxdb/v2/notification/check" - "github.com/influxdata/influxdb/v2/pkg/testttp" - "github.com/influxdata/influxdb/v2/query/fluxlang" - "github.com/influxdata/influxdb/v2/task/taskmodel" - influxTesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -// NewMockCheckBackend returns a CheckBackend with mock services. -func NewMockCheckBackend(t *testing.T) *CheckBackend { - return &CheckBackend{ - log: zaptest.NewLogger(t), - - CheckService: mock.NewCheckService(), - UserResourceMappingService: mock.NewUserResourceMappingService(), - LabelService: mock.NewLabelService(), - UserService: mock.NewUserService(), - OrganizationService: mock.NewOrganizationService(), - FluxLanguageService: fluxlang.DefaultService, - } -} - -func TestService_handleGetChecks(t *testing.T) { - type fields struct { - CheckService influxdb.CheckService - LabelService influxdb.LabelService - } - type args struct { - queryParams map[string][]string - } - type wants struct { - statusCode int - contentType string - body string - } - - fl1 := 100.32 - fl2 := 200.64 - fl4 := 100.1 - fl5 := 3023.2 - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get all checks", - fields: fields{ - &mock.CheckService{ - FindChecksFn: func(ctx context.Context, filter influxdb.CheckFilter, opts ...influxdb.FindOptions) ([]influxdb.Check, int, error) { - return []influxdb.Check{ - &check.Deadman{ - Base: check.Base{ - ID: influxTesting.MustIDBase16("0b501e7e557ab1ed"), - Name: "hello", - OrgID: influxTesting.MustIDBase16("50f7ba1150f7ba11"), - TaskID: 3, - }, - Level: notification.Info, - }, - &check.Threshold{ - Base: check.Base{ - ID: influxTesting.MustIDBase16("c0175f0077a77005"), - Name: "example", - OrgID: influxTesting.MustIDBase16("7e55e118dbabb1ed"), - TaskID: 3, - }, - Thresholds: []check.ThresholdConfig{ - &check.Greater{ - Value: fl1, - ThresholdConfigBase: check.ThresholdConfigBase{Level: notification.Critical}, - }, - &check.Lesser{ - Value: fl2, - ThresholdConfigBase: check.ThresholdConfigBase{Level: notification.Info}, - }, - &check.Range{Min: fl4, Max: fl5, Within: true}, - }, - }, - }, 2, nil - }, - }, - &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - labels := []*influxdb.Label{ - { - ID: influxTesting.MustIDBase16("fc3dc670a4be9b9a"), - Name: "label", - Properties: map[string]string{ - "color": "fff000", - }, - }, - } - return labels, nil - }, - }, - }, - args: args{ - map[string][]string{ - "limit": {"1"}, - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/checks?descending=false&limit=1&offset=0", - "next": "/api/v2/checks?descending=false&limit=1&offset=1" - }, - "checks": [ - { - "links": { - "self": "/api/v2/checks/0b501e7e557ab1ed", - "labels": "/api/v2/checks/0b501e7e557ab1ed/labels", - "query": "/api/v2/checks/0b501e7e557ab1ed/query", - "owners": "/api/v2/checks/0b501e7e557ab1ed/owners", - "members": "/api/v2/checks/0b501e7e557ab1ed/members" - }, - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z", - "id": "0b501e7e557ab1ed", - "orgID": "50f7ba1150f7ba11", - "name": "hello", - "level": "INFO", - "query": { - "builderConfig": { - "aggregateWindow": { - "fillValues": false, - "period": "" - }, - "buckets": [], - "functions": [], - "tags": [] - }, - "editMode": "", - "name": "", - "text": "" - }, - "reportZero": false, - "statusMessageTemplate": "", - "tags": null, - "taskID": "0000000000000003", - "type": "deadman", - "labels": [ - { - "id": "fc3dc670a4be9b9a", - "name": "label", - "properties": { - "color": "fff000" - } - } - ], - "status": "active", - "latestCompleted": "0001-01-01T00:00:00Z", - "latestScheduled": "0001-01-01T00:00:00Z" - }, - { - "links": { - "self": "/api/v2/checks/c0175f0077a77005", - "labels": "/api/v2/checks/c0175f0077a77005/labels", - "members": "/api/v2/checks/c0175f0077a77005/members", - "owners": "/api/v2/checks/c0175f0077a77005/owners", - "query": "/api/v2/checks/c0175f0077a77005/query" - }, - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z", - "id": "c0175f0077a77005", - "orgID": "7e55e118dbabb1ed", - "name": "example", - "query": { - "builderConfig": { - "aggregateWindow": { - "fillValues": false, - "period": "" - }, - "buckets": [], - "functions": [], - "tags": [] - }, - "editMode": "", - "name": "", - "text": "" - }, - "statusMessageTemplate": "", - "tags": null, - "taskID": "0000000000000003", - "thresholds": [ - { - "allValues": false, - "level": "CRIT", - "type": "greater", - "value": 100.32 - }, - { - "allValues": false, - "level": "INFO", - "type": "lesser", - "value": 200.64 - }, - { - "allValues": false, - "level": "UNKNOWN", - "max": 3023.2, - "min": 100.1, - "type": "range", - "within": true - } - ], - "type": "threshold", - "labels": [ - { - "id": "fc3dc670a4be9b9a", - "name": "label", - "properties": { - "color": "fff000" - } - } - ], - "status": "active", - "latestCompleted": "0001-01-01T00:00:00Z", - "latestScheduled": "0001-01-01T00:00:00Z" - } - ] -} -`, - }, - }, - { - name: "get all checks when there are none", - fields: fields{ - &mock.CheckService{ - FindChecksFn: func(ctx context.Context, filter influxdb.CheckFilter, opts ...influxdb.FindOptions) ([]influxdb.Check, int, error) { - return []influxdb.Check{}, 0, nil - }, - }, - &mock.LabelService{}, - }, - args: args{ - map[string][]string{ - "limit": {"1"}, - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/checks?descending=false&limit=1&offset=0" - }, - "checks": [] -}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - checkBackend := NewMockCheckBackend(t) - checkBackend.CheckService = tt.fields.CheckService - checkBackend.LabelService = tt.fields.LabelService - checkBackend.TaskService = &mock.TaskService{ - FindTaskByIDFn: func(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - return &taskmodel.Task{Status: "active"}, nil - }, - } - h := NewCheckHandler(zaptest.NewLogger(t), checkBackend) - - r := httptest.NewRequest("GET", "http://any.url", nil) - - qp := r.URL.Query() - for k, vs := range tt.args.queryParams { - for _, v := range vs { - qp.Add(k, v) - } - } - r.URL.RawQuery = qp.Encode() - r = r.WithContext(pcontext.SetAuthorizer(r.Context(), &influxdb.Session{UserID: influxTesting.MustIDBase16("6f626f7274697321")})) - - w := httptest.NewRecorder() - - h.handleGetChecks(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetChecks() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetChecks() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil || tt.wants.body != "" && !eq { - t.Errorf("%v", err) - t.Errorf("%q. handleGetChecks() = ***%v***", tt.name, diff) - } - }) - } -} - -func mustDuration(d string) *notification.Duration { - dur, err := parser.ParseDuration(d) - if err != nil { - panic(err) - } - - return (*notification.Duration)(dur) -} - -func TestService_handleGetCheckQuery(t *testing.T) { - type fields struct { - CheckService influxdb.CheckService - } - var l float64 = 10 - var u float64 = 40 - type args struct { - id string - } - type wants struct { - statusCode int - contentType string - body string - } - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get a check query by id", - fields: fields{ - &mock.CheckService{ - FindCheckByIDFn: func(ctx context.Context, id platform.ID) (influxdb.Check, error) { - if id == influxTesting.MustIDBase16("020f755c3c082000") { - return &check.Threshold{ - Base: check.Base{ - ID: influxTesting.MustIDBase16("020f755c3c082000"), - OrgID: influxTesting.MustIDBase16("020f755c3c082000"), - Name: "hello", - TaskID: 3, - Tags: []influxdb.Tag{ - {Key: "aaa", Value: "vaaa"}, - {Key: "bbb", Value: "vbbb"}, - }, - Every: mustDuration("1h"), - StatusMessageTemplate: "whoa! {check.yeah}", - Query: influxdb.DashboardQuery{ - Text: `from(bucket: "foo") |> range(start: -1d, stop: now()) |> filter(fn: (r) => r._field == "usage_idle") |> aggregateWindow(every: 1m, fn: mean) |> yield()`, - }, - }, - Thresholds: []check.ThresholdConfig{ - check.Greater{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Ok, - }, - Value: l, - }, - check.Lesser{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Info, - }, - Value: u, - }, - check.Range{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Warn, - }, - Min: l, - Max: u, - Within: true, - }, - check.Range{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Critical, - }, - Min: l, - Max: u, - Within: true, - }, - }, - }, nil - } - return nil, fmt.Errorf("not found") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: "{\"flux\":" + formatFluxJson(t, `import "influxdata/influxdb/monitor" -import "influxdata/influxdb/v1" -data = - from(bucket: "foo") - |> range(start: -1h) - |> filter(fn: (r) => r._field == "usage_idle") - |> aggregateWindow(every: 1h, fn: mean, createEmpty: false) -option task = {name: "hello", every: 1h} -check = {_check_id: "020f755c3c082000", _check_name: "hello", _type: "threshold", tags: {aaa: "vaaa", bbb: "vbbb"}} -ok = (r) => r["usage_idle"] > 10.0 -info = (r) => r["usage_idle"] < 40.0 -warn = (r) => r["usage_idle"] < 40.0 and r["usage_idle"] > 10.0 -crit = (r) => r["usage_idle"] < 40.0 and r["usage_idle"] > 10.0 -messageFn = (r) => "whoa! {check.yeah}" -data - |> v1["fieldsAsCols"]() - |> monitor["check"]( - data: check, - messageFn: messageFn, - ok: ok, - info: info, - warn: warn, - crit: crit, - ) -`) + "}\n", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - checkBackend := NewMockCheckBackend(t) - checkBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - checkBackend.CheckService = tt.fields.CheckService - checkBackend.TaskService = &mock.TaskService{ - FindTaskByIDFn: func(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - return &taskmodel.Task{}, nil - }, - } - - testttp. - Get(t, path.Join(prefixChecks, tt.args.id, "/query")). - Do(NewCheckHandler(zaptest.NewLogger(t), checkBackend)). - ExpectStatus(tt.wants.statusCode). - Expect(func(resp *testttp.Resp) { - content := resp.Rec.Header().Get("Content-Type") - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetCheckQuery() = %v, want %v", tt.name, content, tt.wants.contentType) - } - }). - ExpectBody(func(body *bytes.Buffer) { - if eq, diff, err := jsonEqual(body.String(), tt.wants.body); err != nil || tt.wants.body != "" && !eq { - if err != nil { - t.Errorf("jsonEqual error: %v", err) - } - t.Errorf("%q. handleGetChecks() = ***%v***", tt.name, diff) - } - }) - }) - } -} - -func formatFluxJson(t *testing.T, script string) string { - formatted := influxTesting.FormatFluxString(t, script) - - enc, err := json.Marshal(formatted) - if err != nil { - t.Fatalf("error marshalling flux: %v", err) - } - - var bb bytes.Buffer - json.HTMLEscape(&bb, enc) - std := bb.String() - return std -} - -func TestService_handleGetCheck(t *testing.T) { - type fields struct { - CheckService influxdb.CheckService - } - type args struct { - id string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get a check by id", - fields: fields{ - &mock.CheckService{ - FindCheckByIDFn: func(ctx context.Context, id platform.ID) (influxdb.Check, error) { - if id == influxTesting.MustIDBase16("020f755c3c082000") { - return &check.Deadman{ - Base: check.Base{ - ID: influxTesting.MustIDBase16("020f755c3c082000"), - OrgID: influxTesting.MustIDBase16("020f755c3c082000"), - Name: "hello", - Every: mustDuration("3h"), - TaskID: 3, - }, - Level: notification.Critical, - }, nil - } - return nil, fmt.Errorf("not found") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` - { - "links": { - "self": "/api/v2/checks/020f755c3c082000", - "labels": "/api/v2/checks/020f755c3c082000/labels", - "members": "/api/v2/checks/020f755c3c082000/members", - "owners": "/api/v2/checks/020f755c3c082000/owners", - "query": "/api/v2/checks/020f755c3c082000/query" - }, - "labels": [], - "level": "CRIT", - "every": "3h", - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z", - "id": "020f755c3c082000", - "query": { - "builderConfig": { - "aggregateWindow": { - "fillValues": false, - "period": "" - }, - "buckets": [], - "functions": [], - "tags": [] - }, - "editMode": "", - "name": "", - "text": "" - }, - "reportZero": false, - "status": "active", - "statusMessageTemplate": "", - "tags": null, - "taskID": "0000000000000003", - "type": "deadman", - "orgID": "020f755c3c082000", - "name": "hello", - "status": "active", - "latestCompleted": "0001-01-01T00:00:00Z", - "latestScheduled": "0001-01-01T00:00:00Z" - } - `, - }, - }, - { - name: "not found", - fields: fields{ - &mock.CheckService{ - FindCheckByIDFn: func(ctx context.Context, id platform.ID) (influxdb.Check, error) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: "check not found", - } - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - checkBackend := NewMockCheckBackend(t) - checkBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - checkBackend.CheckService = tt.fields.CheckService - checkBackend.TaskService = &mock.TaskService{ - FindTaskByIDFn: func(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - return &taskmodel.Task{Status: "active"}, nil - }, - } - h := NewCheckHandler(zaptest.NewLogger(t), checkBackend) - - r := httptest.NewRequest("GET", "http://any.url", nil) - - r = r.WithContext(context.WithValue( - context.Background(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - })) - - w := httptest.NewRecorder() - - h.handleGetCheck(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - t.Logf(res.Header.Get("X-Influx-Error")) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetCheck() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetCheck() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleGetCheck(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handleGetCheck() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestService_handlePostCheck(t *testing.T) { - type fields struct { - CheckService influxdb.CheckService - OrganizationService influxdb.OrganizationService - } - type args struct { - userID platform.ID - check influxdb.Check - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "create a new check", - fields: fields{ - CheckService: &mock.CheckService{ - CreateCheckFn: func(ctx context.Context, c influxdb.CheckCreate, userID platform.ID) error { - c.SetID(influxTesting.MustIDBase16("020f755c3c082000")) - c.SetOwnerID(userID) - return nil - }, - }, - OrganizationService: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, f influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return &influxdb.Organization{ID: influxTesting.MustIDBase16("6f626f7274697320")}, nil - }, - }, - }, - args: args{ - userID: influxTesting.MustIDBase16("6f626f7274697321"), - check: &check.Deadman{ - Base: check.Base{ - Name: "hello", - OrgID: influxTesting.MustIDBase16("6f626f7274697320"), - OwnerID: influxTesting.MustIDBase16("6f626f7274697321"), - Description: "desc1", - StatusMessageTemplate: "msg1", - Every: mustDuration("5m"), - TaskID: 3, - Tags: []influxdb.Tag{ - {Key: "k1", Value: "v1"}, - {Key: "k2", Value: "v2"}, - }, - }, - TimeSince: mustDuration("13s"), - ReportZero: true, - Level: notification.Warn, - }, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/checks/020f755c3c082000", - "labels": "/api/v2/checks/020f755c3c082000/labels", - "members": "/api/v2/checks/020f755c3c082000/members", - "owners": "/api/v2/checks/020f755c3c082000/owners", - "query": "/api/v2/checks/020f755c3c082000/query" - }, - "reportZero": true, - "statusMessageTemplate": "msg1", - "tags": [ - { - "key": "k1", - "value": "v1" - }, - { - "key": "k2", - "value": "v2" - } - ], - "query": { - "builderConfig": { - "aggregateWindow": { - "fillValues": false, - "period": "" - }, - "buckets": [], - "functions": [], - "tags": [] - }, - "editMode": "", - "name": "", - "text": "" -}, - "taskID": "0000000000000003", - "type": "deadman", - "timeSince": "13s", - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z", - "id": "020f755c3c082000", - "orgID": "6f626f7274697320", - "name": "hello", - "ownerID": "6f626f7274697321", - "description": "desc1", - "every": "5m", - "level": "WARN", - "labels": [], - "status": "active", - "latestCompleted": "0001-01-01T00:00:00Z", - "latestScheduled": "0001-01-01T00:00:00Z" -} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - checkBackend := NewMockCheckBackend(t) - checkBackend.CheckService = tt.fields.CheckService - checkBackend.OrganizationService = tt.fields.OrganizationService - checkBackend.TaskService = &mock.TaskService{ - FindTaskByIDFn: func(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - return &taskmodel.Task{Status: "active"}, nil - }, - } - h := NewCheckHandler(zaptest.NewLogger(t), checkBackend) - - b, err := json.Marshal(tt.args.check) - if err != nil { - t.Fatalf("failed to unmarshal check: %v", err) - } - r := httptest.NewRequest("GET", "http://any.url?org=30", bytes.NewReader(b)) - w := httptest.NewRecorder() - r = r.WithContext(pcontext.SetAuthorizer(r.Context(), &influxdb.Session{UserID: tt.args.userID})) - - h.handlePostCheck(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePostCheck() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePostCheck() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handlePostCheck(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handlePostCheck() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestService_handleDeleteCheck(t *testing.T) { - type fields struct { - CheckService influxdb.CheckService - } - type args struct { - id string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "remove a check by id", - fields: fields{ - &mock.CheckService{ - DeleteCheckFn: func(ctx context.Context, id platform.ID) error { - if id == influxTesting.MustIDBase16("020f755c3c082000") { - return nil - } - - return fmt.Errorf("wrong id") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNoContent, - }, - }, - { - name: "check not found", - fields: fields{ - &mock.CheckService{ - DeleteCheckFn: func(ctx context.Context, id platform.ID) error { - return &errors.Error{ - Code: errors.ENotFound, - Msg: "check not found", - } - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - checkBackend := NewMockCheckBackend(t) - checkBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - checkBackend.CheckService = tt.fields.CheckService - checkBackend.TaskService = &mock.TaskService{ - FindTaskByIDFn: func(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - return &taskmodel.Task{}, nil - }, - } - h := NewCheckHandler(zaptest.NewLogger(t), checkBackend) - - r := httptest.NewRequest("GET", "http://any.url", nil) - - r = r.WithContext(context.WithValue( - context.Background(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - })) - - w := httptest.NewRecorder() - - h.handleDeleteCheck(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleDeleteCheck() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleDeleteCheck() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleDeleteCheck(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handleDeleteCheck() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestService_handlePatchCheck(t *testing.T) { - type fields struct { - CheckService influxdb.CheckService - } - type args struct { - id string - name string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "update a check name", - fields: fields{ - &mock.CheckService{ - PatchCheckFn: func(ctx context.Context, id platform.ID, upd influxdb.CheckUpdate) (influxdb.Check, error) { - if id == influxTesting.MustIDBase16("020f755c3c082000") { - d := &check.Deadman{ - Base: check.Base{ - ID: influxTesting.MustIDBase16("020f755c3c082000"), - Name: "hello", - OrgID: influxTesting.MustIDBase16("020f755c3c082000"), - TaskID: 3, - }, - Level: notification.Critical, - } - - if upd.Name != nil { - d.Name = *upd.Name - } - - return d, nil - } - - return nil, fmt.Errorf("not found") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - name: "example", - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` - { - "links": { - "self": "/api/v2/checks/020f755c3c082000", - "labels": "/api/v2/checks/020f755c3c082000/labels", - "members": "/api/v2/checks/020f755c3c082000/members", - "owners": "/api/v2/checks/020f755c3c082000/owners", - "query": "/api/v2/checks/020f755c3c082000/query" - }, - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z", - "id": "020f755c3c082000", - "orgID": "020f755c3c082000", - "level": "CRIT", - "name": "example", - "query": { - "builderConfig": { - "aggregateWindow": { - "fillValues": false, - "period": "" - }, - "buckets": [], - "functions": [], - "tags": [] - }, - "editMode": "", - "name": "", - "text": "" - }, - "reportZero": false, - "status": "active", - "statusMessageTemplate": "", - "tags": null, - "taskID": "0000000000000003", - "type": "deadman", - "labels": [], - "latestCompleted": "0001-01-01T00:00:00Z", - "latestScheduled": "0001-01-01T00:00:00Z" - } - `, - }, - }, - { - name: "check not found", - fields: fields{ - &mock.CheckService{ - PatchCheckFn: func(ctx context.Context, id platform.ID, upd influxdb.CheckUpdate) (influxdb.Check, error) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: "check not found", - } - }, - }, - }, - args: args{ - id: "020f755c3c082000", - name: "hello", - }, - wants: wants{ - statusCode: http.StatusNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - checkBackend := NewMockCheckBackend(t) - checkBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - checkBackend.CheckService = tt.fields.CheckService - checkBackend.TaskService = &mock.TaskService{ - FindTaskByIDFn: func(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - return &taskmodel.Task{Status: "active"}, nil - }, - } - h := NewCheckHandler(zaptest.NewLogger(t), checkBackend) - - upd := influxdb.CheckUpdate{} - if tt.args.name != "" { - upd.Name = &tt.args.name - } - - b, err := json.Marshal(upd) - if err != nil { - t.Fatalf("failed to unmarshal check update: %v", err) - } - - r := httptest.NewRequest("GET", "http://any.url", bytes.NewReader(b)) - - r = r.WithContext(context.WithValue( - context.Background(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - })) - - w := httptest.NewRecorder() - - h.handlePatchCheck(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePatchCheck() = %v, want %v %v", tt.name, res.StatusCode, tt.wants.statusCode, w.Header()) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePatchCheck() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handlePatchCheck(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handlePatchCheck() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestService_handleUpdateCheck(t *testing.T) { - type fields struct { - CheckService influxdb.CheckService - } - type args struct { - id string - chk influxdb.Check - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "update a check name", - fields: fields{ - CheckService: &mock.CheckService{ - UpdateCheckFn: func(ctx context.Context, id platform.ID, chk influxdb.CheckCreate) (influxdb.Check, error) { - if id == influxTesting.MustIDBase16("020f755c3c082000") { - d := &check.Deadman{ - Base: check.Base{ - ID: influxTesting.MustIDBase16("020f755c3c082000"), - Name: "hello", - OrgID: influxTesting.MustIDBase16("020f755c3c082000"), - TaskID: 3, - Every: mustDuration("1m"), - }, - } - - d = chk.Check.(*check.Deadman) - d.SetID(influxTesting.MustIDBase16("020f755c3c082000")) - d.SetOrgID(influxTesting.MustIDBase16("020f755c3c082000")) - - return d, nil - } - - return nil, fmt.Errorf("not found") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - chk: &check.Deadman{ - Base: check.Base{ - Name: "example", - TaskID: 3, - OwnerID: 42, - OrgID: influxTesting.MustIDBase16("020f755c3c082000"), - Every: mustDuration("1m"), - }, - Level: notification.Critical, - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` - { - "links": { - "self": "/api/v2/checks/020f755c3c082000", - "labels": "/api/v2/checks/020f755c3c082000/labels", - "members": "/api/v2/checks/020f755c3c082000/members", - "owners": "/api/v2/checks/020f755c3c082000/owners", - "query": "/api/v2/checks/020f755c3c082000/query" - }, - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z", - "id": "020f755c3c082000", - "every": "1m", - "orgID": "020f755c3c082000", - "ownerID": "000000000000002a", - "level": "CRIT", - "name": "example", - "query": { - "builderConfig": { - "aggregateWindow": { - "fillValues": false, - "period": "" - }, - "buckets": [], - "functions": [], - "tags": [] - }, - "editMode": "", - "name": "", - "text": "" - }, - "reportZero": false, - "status": "active", - "statusMessageTemplate": "", - "tags": null, - "taskID": "0000000000000003", - "type": "deadman", - "labels": [], - "latestCompleted": "0001-01-01T00:00:00Z", - "latestScheduled": "0001-01-01T00:00:00Z" - } - `, - }, - }, - { - name: "check not found", - fields: fields{ - CheckService: &mock.CheckService{ - UpdateCheckFn: func(ctx context.Context, id platform.ID, chk influxdb.CheckCreate) (influxdb.Check, error) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: "check not found", - } - }, - }, - }, - args: args{ - id: "020f755c3c082000", - chk: &check.Deadman{ - Base: check.Base{ - Name: "example", - OwnerID: 42, - OrgID: influxTesting.MustIDBase16("020f755c3c082000"), - Every: mustDuration("1m"), - }, - }, - }, - wants: wants{ - statusCode: http.StatusNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - checkBackend := NewMockCheckBackend(t) - checkBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - checkBackend.CheckService = tt.fields.CheckService - checkBackend.TaskService = &mock.TaskService{ - FindTaskByIDFn: func(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - return &taskmodel.Task{Status: "active"}, nil - }, - } - h := NewCheckHandler(zaptest.NewLogger(t), checkBackend) - - b, err := json.Marshal(tt.args.chk) - if err != nil { - t.Fatalf("failed to unmarshal check update: %v", err) - } - - r := httptest.NewRequest("GET", "http://any.url", bytes.NewReader(b)) - - r = r.WithContext(context.WithValue( - context.Background(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - })) - - w := httptest.NewRecorder() - - h.handlePutCheck(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePutCheck() = %v, want %v %v %v", tt.name, res.StatusCode, tt.wants.statusCode, w.Header(), string(body)) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePutCheck() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handlePutCheck(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handlePutCheck() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestService_handlePostCheckMember(t *testing.T) { - type fields struct { - UserService influxdb.UserService - } - type args struct { - checkID string - user *influxdb.User - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "add a check member", - fields: fields{ - UserService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ - ID: id, - Name: "name", - Status: influxdb.Active, - }, nil - }, - }, - }, - args: args{ - checkID: "020f755c3c082000", - user: &influxdb.User{ - ID: influxTesting.MustIDBase16("6f626f7274697320"), - }, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "logs": "/api/v2/users/6f626f7274697320/logs", - "self": "/api/v2/users/6f626f7274697320" - }, - "role": "member", - "id": "6f626f7274697320", - "name": "name", - "status": "active" -} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - checkBackend := NewMockCheckBackend(t) - checkBackend.UserService = tt.fields.UserService - checkBackend.TaskService = &mock.TaskService{ - FindTaskByIDFn: func(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - return &taskmodel.Task{}, nil - }, - } - h := NewCheckHandler(zaptest.NewLogger(t), checkBackend) - - b, err := json.Marshal(tt.args.user) - if err != nil { - t.Fatalf("failed to marshal user: %v", err) - } - - path := fmt.Sprintf("/api/v2/checks/%s/members", tt.args.checkID) - r := httptest.NewRequest("POST", path, bytes.NewReader(b)) - w := httptest.NewRecorder() - - h.ServeHTTP(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePostCheckMember() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePostCheckMember() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handlePostCheckMember(). error unmarshalling json %v", tt.name, err) - } else if tt.wants.body != "" && !eq { - t.Errorf("%q. handlePostCheckMember() = ***%s***", tt.name, diff) - } - }) - } -} - -func TestService_handlePostCheckOwner(t *testing.T) { - type fields struct { - UserService influxdb.UserService - } - type args struct { - checkID string - user *influxdb.User - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "add a check owner", - fields: fields{ - UserService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ - ID: id, - Name: "name", - Status: influxdb.Active, - }, nil - }, - }, - }, - args: args{ - checkID: "020f755c3c082000", - user: &influxdb.User{ - ID: influxTesting.MustIDBase16("6f626f7274697320"), - }, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "logs": "/api/v2/users/6f626f7274697320/logs", - "self": "/api/v2/users/6f626f7274697320" - }, - "role": "owner", - "id": "6f626f7274697320", - "name": "name", - "status": "active" -} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - checkBackend := NewMockCheckBackend(t) - checkBackend.UserService = tt.fields.UserService - h := NewCheckHandler(zaptest.NewLogger(t), checkBackend) - - b, err := json.Marshal(tt.args.user) - if err != nil { - t.Fatalf("failed to marshal user: %v", err) - } - - path := fmt.Sprintf("/api/v2/checks/%s/owners", tt.args.checkID) - r := httptest.NewRequest("POST", path, bytes.NewReader(b)) - w := httptest.NewRecorder() - - h.ServeHTTP(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePostCheckOwner() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePostCheckOwner() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handlePostCheckOwner(). error unmarshalling json %v", tt.name, err) - } else if tt.wants.body != "" && !eq { - t.Errorf("%q. handlePostCheckOwner() = ***%s***", tt.name, diff) - } - }) - } -} diff --git a/http/client.go b/http/client.go deleted file mode 100644 index bf59135ce6d..00000000000 --- a/http/client.go +++ /dev/null @@ -1,148 +0,0 @@ -package http - -import ( - "crypto/tls" - "net" - "net/http" - "net/url" - "time" - - "github.com/influxdata/influxdb/v2/dbrp" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/pkg/httpc" -) - -// NewHTTPClient creates a new httpc.Client type. This call sets all -// the options that are important to the http pkg on the httpc client. -// The default status fn and so forth will all be set for the caller. -// In addition, some options can be specified. Those will be added to the defaults. -func NewHTTPClient(addr, token string, insecureSkipVerify bool, opts ...httpc.ClientOptFn) (*httpc.Client, error) { - u, err := url.Parse(addr) - if err != nil { - return nil, err - } - - defaultOpts := []httpc.ClientOptFn{ - httpc.WithAddr(addr), - httpc.WithContentType("application/json"), - httpc.WithHTTPClient(NewClient(u.Scheme, insecureSkipVerify)), - httpc.WithInsecureSkipVerify(insecureSkipVerify), - httpc.WithStatusFn(CheckError), - } - if token != "" { - defaultOpts = append(defaultOpts, httpc.WithAuthToken(token)) - } - opts = append(defaultOpts, opts...) - return httpc.New(opts...) -} - -// Service connects to an InfluxDB via HTTP. -type Service struct { - Addr string - Token string - InsecureSkipVerify bool - - *TaskService - *NotificationRuleService - *VariableService - *WriteService - *CheckService - *NotificationEndpointService - *TelegrafService - *LabelService - DBRPMappingService *dbrp.Client -} - -// NewService returns a service that is an HTTP client to a remote. -// Address and token are needed for those services that do not use httpc.Client, -// but use those for configuring. -// Usually one would do: -// -// ``` -// c := NewHTTPClient(addr, token, insecureSkipVerify) -// s := NewService(c, addr token) -// ``` -// -// So one should provide the same `addr` and `token` to both calls to ensure consistency -// in the behavior of the returned service. -func NewService(httpClient *httpc.Client, addr, token string) (*Service, error) { - return &Service{ - Addr: addr, - Token: token, - TaskService: &TaskService{Client: httpClient}, - NotificationRuleService: &NotificationRuleService{Client: httpClient}, - VariableService: &VariableService{Client: httpClient}, - WriteService: &WriteService{ - Addr: addr, - Token: token, - }, - CheckService: &CheckService{Client: httpClient}, - NotificationEndpointService: &NotificationEndpointService{Client: httpClient}, - TelegrafService: NewTelegrafService(httpClient), - LabelService: &LabelService{Client: httpClient}, - DBRPMappingService: dbrp.NewClient(httpClient), - }, nil -} - -// NewURL concats addr and path. -func NewURL(addr, path string) (*url.URL, error) { - u, err := url.Parse(addr) - if err != nil { - return nil, err - } - u.Path = path - return u, nil -} - -// NewClient returns an http.Client that pools connections and injects a span. -func NewClient(scheme string, insecure bool) *http.Client { - return httpClient(scheme, insecure) -} - -// SpanTransport injects the http.RoundTripper.RoundTrip() request -// with a span. -type SpanTransport struct { - base http.RoundTripper -} - -// RoundTrip implements the http.RoundTripper, intercepting the base -// round trippers call and injecting a span. -func (s *SpanTransport) RoundTrip(r *http.Request) (*http.Response, error) { - span, _ := tracing.StartSpanFromContext(r.Context()) - defer span.Finish() - tracing.InjectToHTTPRequest(span, r) - return s.base.RoundTrip(r) -} - -// DefaultTransport wraps http.DefaultTransport in SpanTransport to inject -// tracing headers into all outgoing requests. -var DefaultTransport http.RoundTripper = &SpanTransport{base: http.DefaultTransport} - -// DefaultTransportInsecure is identical to DefaultTransport, with -// the exception that tls.Config is configured with InsecureSkipVerify -// set to true. -var DefaultTransportInsecure http.RoundTripper = &SpanTransport{ - base: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - ForceAttemptHTTP2: true, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - }, - }, -} - -func httpClient(scheme string, insecure bool) *http.Client { - if scheme == "https" && insecure { - return &http.Client{Transport: DefaultTransportInsecure} - } - return &http.Client{Transport: DefaultTransport} -} diff --git a/http/config.go b/http/config.go deleted file mode 100644 index def5beeda23..00000000000 --- a/http/config.go +++ /dev/null @@ -1,116 +0,0 @@ -package http - -import ( - "encoding/json" - "fmt" - "net/http" - "time" - - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - "github.com/influxdata/influxdb/v2/kit/cli" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/spf13/pflag" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -const prefixConfig = "/api/v2/config" - -func errInvalidType(dest interface{}, flag string) error { - return &errors.Error{ - Code: errors.EInternal, - Err: fmt.Errorf("unknown destination type %T for %q", dest, flag), - } -} - -type parsedOpt map[string]optValue - -type optValue []byte - -func (o optValue) MarshalJSON() ([]byte, error) { return o, nil } - -type ConfigHandler struct { - chi.Router - - log *zap.Logger - api *kithttp.API - - config parsedOpt -} - -// NewConfigHandler creates a handler that will return a JSON object with key/value pairs for the configuration values -// used during the launcher startup. The opts slice provides a list of options names along with a pointer to their -// value. -func NewConfigHandler(log *zap.Logger, opts []cli.Opt) (*ConfigHandler, error) { - h := &ConfigHandler{ - log: log, - api: kithttp.NewAPI(kithttp.WithLog(log)), - } - - if err := h.parseOptions(opts); err != nil { - return nil, err - } - - r := chi.NewRouter() - r.Use( - middleware.Recoverer, - middleware.RequestID, - middleware.RealIP, - h.mwAuthorize, - ) - - r.Get("/", h.handleGetConfig) - h.Router = r - return h, nil -} - -func (h *ConfigHandler) Prefix() string { - return prefixConfig -} - -func (h *ConfigHandler) parseOptions(opts []cli.Opt) error { - h.config = make(parsedOpt) - - for _, o := range opts { - var b []byte - switch o.DestP.(type) { - // Known types for configuration values. Currently, these can all be encoded directly with json.Marshal. - case *string, *int, *int32, *int64, *bool, *time.Duration, *[]string, *map[string]string, pflag.Value, *platform.ID, *zapcore.Level: - var err error - b, err = json.Marshal(o.DestP) - if err != nil { - return err - } - default: - // Return an error if we don't know how to marshal this type. - return errInvalidType(o.DestP, o.Flag) - } - - h.config[o.Flag] = b - } - - return nil -} - -func (h *ConfigHandler) handleGetConfig(w http.ResponseWriter, r *http.Request) { - h.api.Respond(w, r, http.StatusOK, map[string]parsedOpt{"config": h.config}) -} - -func (h *ConfigHandler) mwAuthorize(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - if err := authorizer.IsAllowedAll(r.Context(), influxdb.OperPermissions()); err != nil { - h.api.Err(w, r, &errors.Error{ - Code: errors.EUnauthorized, - Msg: fmt.Sprintf("access to %s requires operator permissions", h.Prefix()), - }) - return - } - next.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) -} diff --git a/http/config_test.go b/http/config_test.go deleted file mode 100644 index 0b14beaf68c..00000000000 --- a/http/config_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package http - -import ( - "bytes" - "context" - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/influxdb/v2" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/cli" - "github.com/influxdata/influxdb/v2/kit/platform" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/mock" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func TestConfigHandler(t *testing.T) { - t.Run("known types", func(t *testing.T) { - stringFlag := "some string" - boolFlag := true - idFlag := platform.ID(1) - - opts := []cli.Opt{ - { - DestP: &stringFlag, - Flag: "string-flag", - }, - { - DestP: &boolFlag, - Flag: "bool-flag", - }, - { - DestP: &idFlag, - Flag: "id-flag", - }, - } - - want := map[string]interface{}{ - "config": map[string]interface{}{ - "string-flag": stringFlag, - "bool-flag": boolFlag, - "id-flag": idFlag, - }, - } - wantJsonBytes, err := json.Marshal(want) - require.NoError(t, err) - var wantDecoded map[string]interface{} - require.NoError(t, json.NewDecoder(bytes.NewReader(wantJsonBytes)).Decode(&wantDecoded)) - - h, err := NewConfigHandler(zaptest.NewLogger(t), opts) - require.NoError(t, err) - - rr := httptest.NewRecorder() - - r, err := http.NewRequest(http.MethodGet, "/", nil) - require.NoError(t, err) - ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, influxdb.OperPermissions())) - r = r.WithContext(ctx) - h.ServeHTTP(rr, r) - rs := rr.Result() - - var gotDecoded map[string]interface{} - require.NoError(t, json.NewDecoder(rs.Body).Decode(&gotDecoded)) - require.Equal(t, gotDecoded, wantDecoded) - }) - - t.Run("unknown type", func(t *testing.T) { - var floatFlag float64 - - opts := []cli.Opt{ - { - DestP: &floatFlag, - Flag: "float-flag", - }, - } - - h, err := NewConfigHandler(zaptest.NewLogger(t), opts) - require.Nil(t, h) - require.Equal(t, errInvalidType(&floatFlag, "float-flag"), err) - }) -} - -func TestConfigHandler_Authorization(t *testing.T) { - tests := []struct { - name string - permList []influxdb.Permission - wantStatus int - }{ - { - "authorized to see config", - influxdb.OperPermissions(), - http.StatusOK, - }, - { - "not authorized to see config", - influxdb.ReadAllPermissions(), - http.StatusUnauthorized, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - }) - - rr := httptest.NewRecorder() - - r, err := http.NewRequest(http.MethodGet, "/", nil) - require.NoError(t, err) - ctx := influxdbcontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, tt.permList)) - r = r.WithContext(ctx) - - h := ConfigHandler{ - api: kithttp.NewAPI(kithttp.WithLog(zaptest.NewLogger(t))), - } - h.mwAuthorize(next).ServeHTTP(rr, r) - rs := rr.Result() - - require.Equal(t, tt.wantStatus, rs.StatusCode) - }) - } -} diff --git a/http/debug.go b/http/debug.go deleted file mode 100644 index 1ce32267839..00000000000 --- a/http/debug.go +++ /dev/null @@ -1,51 +0,0 @@ -package http - -import ( - "context" - "encoding/json" - "net/http" - - "github.com/influxdata/influxdb/v2" -) - -// Flusher flushes data from a store to reset; used for testing. -type Flusher interface { - Flush(ctx context.Context) -} - -func Debug(ctx context.Context, next http.Handler, f Flusher, service influxdb.OnboardingService) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/debug/flush" { - // DebugFlush clears all services for testing. - f.Flush(ctx) - w.Header().Set("Content-Type", "text/html; charset=utf-8") - w.WriteHeader(http.StatusOK) - return - } - if r.URL.Path == "/debug/provision" { - data := &influxdb.OnboardingRequest{ - User: "dev_user", - Password: "password", - Org: "InfluxData", - Bucket: "project", - } - res, err := service.OnboardInitialUser(ctx, data) - if err != nil { - w.WriteHeader(http.StatusBadRequest) - w.Write([]byte(err.Error())) - return - } - body, err := json.Marshal(res) - if err != nil { - w.WriteHeader(http.StatusBadRequest) - w.Write([]byte(err.Error())) - return - } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - w.Write(body) - return - } - next.ServeHTTP(w, r) - } -} diff --git a/http/delete_handler.go b/http/delete_handler.go deleted file mode 100644 index d575ebca8b1..00000000000 --- a/http/delete_handler.go +++ /dev/null @@ -1,361 +0,0 @@ -package http - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - http "net/http" - "time" - - "github.com/influxdata/influxql" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - pcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/predicate" - "go.uber.org/zap" -) - -// DeleteBackend is all services and associated parameters required to construct -// the DeleteHandler. -type DeleteBackend struct { - log *zap.Logger - errors.HTTPErrorHandler - - DeleteService influxdb.DeleteService - BucketService influxdb.BucketService - OrganizationService influxdb.OrganizationService -} - -// NewDeleteBackend returns a new instance of DeleteBackend -func NewDeleteBackend(log *zap.Logger, b *APIBackend) *DeleteBackend { - return &DeleteBackend{ - log: log, - - HTTPErrorHandler: b.HTTPErrorHandler, - DeleteService: b.DeleteService, - BucketService: b.BucketService, - OrganizationService: b.OrganizationService, - } -} - -// DeleteHandler receives a delete request with a predicate and sends it to storage. -type DeleteHandler struct { - errors.HTTPErrorHandler - *httprouter.Router - - log *zap.Logger - - DeleteService influxdb.DeleteService - BucketService influxdb.BucketService - OrganizationService influxdb.OrganizationService -} - -const ( - prefixDelete = "/api/v2/delete" -) - -var ( - msgStartTooSoon = fmt.Sprintf("invalid start time, start time must not be before %s", time.Unix(0, models.MinNanoTime).UTC().Format(time.RFC3339Nano)) - msgStopTooLate = fmt.Sprintf("invalid stop time, stop time must not be after %s", time.Unix(0, models.MaxNanoTime).UTC().Format(time.RFC3339Nano)) -) - -// NewDeleteHandler creates a new handler at /api/v2/delete to receive delete requests. -func NewDeleteHandler(log *zap.Logger, b *DeleteBackend) *DeleteHandler { - h := &DeleteHandler{ - HTTPErrorHandler: b.HTTPErrorHandler, - Router: NewRouter(b.HTTPErrorHandler), - log: log, - - BucketService: b.BucketService, - DeleteService: b.DeleteService, - OrganizationService: b.OrganizationService, - } - - h.HandlerFunc("POST", prefixDelete, h.handleDelete) - return h -} - -func (h *DeleteHandler) handleDelete(w http.ResponseWriter, r *http.Request) { - span, r := tracing.ExtractFromHTTPRequest(r, "DeleteHandler") - defer span.Finish() - - ctx := r.Context() - defer r.Body.Close() - - a, err := pcontext.GetAuthorizer(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - dr, measurement, err := decodeDeleteRequest( - ctx, r, - h.OrganizationService, - h.BucketService, - ) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - p, err := influxdb.NewPermissionAtID(dr.Bucket.ID, influxdb.WriteAction, influxdb.BucketsResourceType, dr.Org.ID) - if err != nil { - h.HandleHTTPError(ctx, &errors.Error{ - Code: errors.EInternal, - Op: "http/handleDelete", - Msg: fmt.Sprintf("unable to create permission for bucket: %v", err), - Err: err, - }, w) - return - } - - if pset, err := a.PermissionSet(); err != nil || !pset.Allowed(*p) { - h.HandleHTTPError(ctx, &errors.Error{ - Code: errors.EForbidden, - Op: "http/handleDelete", - Msg: "insufficient permissions to delete", - }, w) - return - } - - if err := h.DeleteService.DeleteBucketRangePredicate(r.Context(), dr.Org.ID, dr.Bucket.ID, dr.Start, dr.Stop, dr.Predicate, measurement); err != nil { - h.HandleHTTPError(ctx, &errors.Error{ - Code: errors.EInternal, - Op: "http/handleDelete", - Msg: fmt.Sprintf("unable to delete: %v", err), - Err: err, - }, w) - return - } - - h.log.Debug("Deleted", - zap.String("orgID", fmt.Sprint(dr.Org.ID.String())), - zap.String("bucketID", fmt.Sprint(dr.Bucket.ID.String())), - ) - - w.WriteHeader(http.StatusNoContent) -} - -func decodeDeleteRequest(ctx context.Context, r *http.Request, orgSvc influxdb.OrganizationService, bucketSvc influxdb.BucketService) (*deleteRequest, influxql.Expr, error) { - dr := new(deleteRequest) - buf, err := io.ReadAll(r.Body) - if err != nil { - je := &errors.Error{ - Code: errors.EInvalid, - Msg: "error reading json body", - Err: err, - } - return nil, nil, je - } - buffer := bytes.NewBuffer(buf) - err = json.NewDecoder(buffer).Decode(dr) - if err != nil { - je := &errors.Error{ - Code: errors.EInvalid, - Msg: "error decoding json body", - Err: err, - } - return nil, nil, je - } - - var drd deleteRequestDecode - err = json.Unmarshal(buf, &drd) - if err != nil { - je := &errors.Error{ - Code: errors.EInvalid, - Msg: "error decoding json body for predicate", - Err: err, - } - return nil, nil, je - } - var measurementExpr influxql.Expr - if drd.Predicate != "" { - expr, err := influxql.ParseExpr(drd.Predicate) - if err != nil { - return nil, nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid request; error parsing predicate", - Err: err, - } - } - measurementExpr, _, err = influxql.PartitionExpr(influxql.CloneExpr(expr), func(e influxql.Expr) (bool, error) { - switch e := e.(type) { - case *influxql.BinaryExpr: - switch e.Op { - case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: - tag, ok := e.LHS.(*influxql.VarRef) - if ok && tag.Val == "_measurement" { - return true, nil - } - } - } - return false, nil - }) - - var walkError error - influxql.WalkFunc(expr, func(e influxql.Node) { - if v, ok := e.(*influxql.BinaryExpr); ok { - if vv, ok := v.LHS.(*influxql.VarRef); ok && v.Op == influxql.EQ { - if vv.Val == "_field" { - walkError = &errors.Error{ - Code: errors.ENotImplemented, - Msg: "", - Err: fmt.Errorf("delete by field is not supported"), - } - } - } - } - }) - if walkError != nil { - return nil, nil, walkError - } - if err != nil { - return nil, nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid request; error partitioning predicate", - Err: err, - } - } - } - - if dr.Org, err = queryOrganization(ctx, r, orgSvc); err != nil { - return nil, nil, err - } - - if dr.Bucket, err = queryBucket(ctx, dr.Org.ID, r, bucketSvc); err != nil { - return nil, nil, err - } - return dr, measurementExpr, nil -} - -type deleteRequest struct { - Org *influxdb.Organization - Bucket *influxdb.Bucket - Start int64 - Stop int64 - Predicate influxdb.Predicate -} - -type deleteRequestDecode struct { - Start string `json:"start"` - Stop string `json:"stop"` - Predicate string `json:"predicate"` -} - -// DeleteRequest is the request send over http to delete points. -type DeleteRequest struct { - OrgID string `json:"-"` - Org string `json:"-"` // org name - BucketID string `json:"-"` - Bucket string `json:"-"` - Start string `json:"start"` - Stop string `json:"stop"` - Predicate string `json:"predicate"` -} - -func (dr *deleteRequest) UnmarshalJSON(b []byte) error { - var drd deleteRequestDecode - if err := json.Unmarshal(b, &drd); err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Invalid delete predicate node request", - Err: err, - } - } - *dr = deleteRequest{} - start, err := time.Parse(time.RFC3339Nano, drd.Start) - if err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Op: "http/Delete", - Msg: "invalid RFC3339Nano for field start, please format your time with RFC3339Nano format, example: 2009-01-02T23:00:00Z", - } - } - if err = models.CheckTime(start); err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Op: "http/Delete", - Msg: msgStartTooSoon, - } - } - dr.Start = start.UnixNano() - - stop, err := time.Parse(time.RFC3339Nano, drd.Stop) - if err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Op: "http/Delete", - Msg: "invalid RFC3339Nano for field stop, please format your time with RFC3339Nano format, example: 2009-01-01T23:00:00Z", - } - } - if err = models.CheckTime(stop); err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Op: "http/Delete", - Msg: msgStopTooLate, - } - } - dr.Stop = stop.UnixNano() - node, err := predicate.Parse(drd.Predicate) - if err != nil { - return err - } - dr.Predicate, err = predicate.New(node) - return err -} - -// DeleteService sends data over HTTP to delete points. -type DeleteService struct { - Addr string - Token string - InsecureSkipVerify bool -} - -// DeleteBucketRangePredicate send delete request over http to delete points. -func (s *DeleteService) DeleteBucketRangePredicate(ctx context.Context, dr DeleteRequest) error { - u, err := NewURL(s.Addr, prefixDelete) - if err != nil { - return err - } - buf := new(bytes.Buffer) - if err := json.NewEncoder(buf).Encode(dr); err != nil { - return err - } - req, err := http.NewRequest("POST", u.String(), buf) - if err != nil { - return err - } - - req.Header.Set("Content-Type", "application/json; charset=utf-8") - SetToken(s.Token, req) - - params := req.URL.Query() - if dr.OrgID != "" { - params.Set("orgID", dr.OrgID) - } else if dr.Org != "" { - params.Set("org", dr.Org) - } - - if dr.BucketID != "" { - params.Set("bucketID", dr.BucketID) - } else if dr.Bucket != "" { - params.Set("bucket", dr.Bucket) - } - req.URL.RawQuery = params.Encode() - - hc := NewClient(u.Scheme, s.InsecureSkipVerify) - - resp, err := hc.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - return CheckError(resp) -} diff --git a/http/delete_test.go b/http/delete_test.go deleted file mode 100644 index 99d72b472af..00000000000 --- a/http/delete_test.go +++ /dev/null @@ -1,531 +0,0 @@ -package http - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - pcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/models" - influxtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -var user1ID = influxtesting.MustIDBase16("020f755c3c082001") - -// NewMockDeleteBackend returns a DeleteBackend with mock services. -func NewMockDeleteBackend(t *testing.T) *DeleteBackend { - return &DeleteBackend{ - log: zaptest.NewLogger(t), - - DeleteService: mock.NewDeleteService(), - BucketService: mock.NewBucketService(), - OrganizationService: mock.NewOrganizationService(), - } -} - -func TestDelete(t *testing.T) { - type fields struct { - DeleteService influxdb.DeleteService - OrganizationService influxdb.OrganizationService - BucketService influxdb.BucketService - } - - type args struct { - queryParams map[string][]string - body []byte - authorizer influxdb.Authorizer - } - - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "missing start time", - args: args{ - queryParams: map[string][]string{}, - body: []byte(`{}`), - authorizer: &influxdb.Authorization{UserID: user1ID}, - }, - fields: fields{}, - wants: wants{ - statusCode: http.StatusBadRequest, - contentType: "application/json; charset=utf-8", - body: `{ - "code": "invalid", - "message": "error decoding json body: invalid RFC3339Nano for field start, please format your time with RFC3339Nano format, example: 2009-01-02T23:00:00Z" - }`, - }, - }, - { - name: "missing stop time", - args: args{ - queryParams: map[string][]string{}, - body: []byte(`{"start":"2009-01-01T23:00:00Z"}`), - authorizer: &influxdb.Authorization{UserID: user1ID}, - }, - fields: fields{}, - wants: wants{ - statusCode: http.StatusBadRequest, - contentType: "application/json; charset=utf-8", - body: `{ - "code": "invalid", - "message": "error decoding json body: invalid RFC3339Nano for field stop, please format your time with RFC3339Nano format, example: 2009-01-01T23:00:00Z" - }`, - }, - }, - { - name: "start time too soon", - args: args{ - queryParams: map[string][]string{}, - body: []byte(fmt.Sprintf(`{"start":"%s"}`, time.Unix(0, models.MinNanoTime-1).UTC().Format(time.RFC3339Nano))), - authorizer: &influxdb.Authorization{UserID: user1ID}, - }, - fields: fields{}, - wants: wants{ - statusCode: http.StatusBadRequest, - contentType: "application/json; charset=utf-8", - body: fmt.Sprintf(`{ - "code": "invalid", - "message": "error decoding json body: %s" - }`, msgStartTooSoon), - }, - }, - { - name: "stop time too late", - args: args{ - queryParams: map[string][]string{}, - body: []byte(fmt.Sprintf(`{"start":"2020-01-01T01:01:01Z", "stop":"%s"}`, time.Unix(0, models.MaxNanoTime+1).UTC().Format(time.RFC3339Nano))), - authorizer: &influxdb.Authorization{UserID: user1ID}, - }, - fields: fields{}, - wants: wants{ - statusCode: http.StatusBadRequest, - contentType: "application/json; charset=utf-8", - body: fmt.Sprintf(`{ - "code": "invalid", - "message": "error decoding json body: %s" - }`, msgStopTooLate), - }, - }, - { - name: "missing org", - args: args{ - queryParams: map[string][]string{}, - body: []byte(`{"start":"2009-01-01T23:00:00Z","stop":"2009-11-10T01:00:00Z"}`), - authorizer: &influxdb.Authorization{UserID: user1ID}, - }, - fields: fields{ - OrganizationService: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, f influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "Please provide either orgID or org", - } - }, - }, - }, - wants: wants{ - statusCode: http.StatusBadRequest, - contentType: "application/json; charset=utf-8", - body: `{ - "code": "invalid", - "message": "Please provide either orgID or org" - }`, - }, - }, - { - name: "missing bucket", - args: args{ - queryParams: map[string][]string{ - "org": {"org1"}, - }, - body: []byte(`{"start":"2009-01-01T23:00:00Z","stop":"2009-11-10T01:00:00Z"}`), - authorizer: &influxdb.Authorization{UserID: user1ID}, - }, - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketFn: func(ctx context.Context, f influxdb.BucketFilter) (*influxdb.Bucket, error) { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "Please provide either bucketID or bucket", - } - }, - }, - OrganizationService: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, f influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: platform.ID(1), - }, nil - }, - }, - }, - wants: wants{ - statusCode: http.StatusBadRequest, - contentType: "application/json; charset=utf-8", - body: `{ - "code": "invalid", - "message": "Please provide either bucketID or bucket" - }`, - }, - }, - { - name: "insufficient permissions delete", - args: args{ - queryParams: map[string][]string{ - "org": {"org1"}, - "bucket": {"buck1"}, - }, - body: []byte(`{"start":"2009-01-01T23:00:00Z","stop":"2019-11-10T01:00:00Z"}`), - authorizer: &influxdb.Authorization{UserID: user1ID}, - }, - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketFn: func(ctx context.Context, f influxdb.BucketFilter) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: platform.ID(2), - Name: "bucket1", - }, nil - }, - }, - OrganizationService: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, f influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: platform.ID(1), - }, nil - }, - }, - }, - wants: wants{ - statusCode: http.StatusForbidden, - contentType: "application/json; charset=utf-8", - body: `{ - "code": "forbidden", - "message": "insufficient permissions to delete" - }`, - }, - }, - { - name: "no predicate delete", - args: args{ - queryParams: map[string][]string{ - "org": {"org1"}, - "bucket": {"buck1"}, - }, - body: []byte(`{"start":"2009-01-01T23:00:00Z","stop":"2019-11-10T01:00:00Z"}`), - authorizer: &influxdb.Authorization{ - UserID: user1ID, - Status: influxdb.Active, - Permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxtesting.IDPtr(platform.ID(2)), - OrgID: influxtesting.IDPtr(platform.ID(1)), - }, - }, - }, - }, - }, - fields: fields{ - DeleteService: mock.NewDeleteService(), - BucketService: &mock.BucketService{ - FindBucketFn: func(ctx context.Context, f influxdb.BucketFilter) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: platform.ID(2), - Name: "bucket1", - }, nil - }, - }, - OrganizationService: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, f influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: platform.ID(1), - Name: "org1", - }, nil - }, - }, - }, - wants: wants{ - statusCode: http.StatusNoContent, - body: ``, - }, - }, - { - name: "unsupported delete", - args: args{ - queryParams: map[string][]string{ - "org": {"org1"}, - "bucket": {"buck1"}, - }, - body: []byte(`{ - "start":"2009-01-01T23:00:00Z", - "stop":"2019-11-10T01:00:00Z", - "predicate": "tag1=\"v1\" and (tag2=\"v2\" or tag3=\"v3\")" - }`), - authorizer: &influxdb.Authorization{ - UserID: user1ID, - Status: influxdb.Active, - Permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxtesting.IDPtr(platform.ID(2)), - OrgID: influxtesting.IDPtr(platform.ID(1)), - }, - }, - }, - }, - }, - fields: fields{ - DeleteService: mock.NewDeleteService(), - BucketService: &mock.BucketService{ - FindBucketFn: func(ctx context.Context, f influxdb.BucketFilter) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: platform.ID(2), - Name: "bucket1", - }, nil - }, - }, - OrganizationService: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, f influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: platform.ID(1), - Name: "org1", - }, nil - }, - }, - }, - wants: wants{ - statusCode: http.StatusBadRequest, - body: `{ - "code": "invalid", - "message": "error decoding json body: the logical operator OR is not supported yet at position 25" - }`, - }, - }, - { - name: "unsupported delete measurements", - args: args{ - queryParams: map[string][]string{ - "org": {"org1"}, - "bucket": {"buck1"}, - }, - body: []byte(`{ - "start":"2009-01-01T23:00:00Z", - "stop":"2019-11-10T01:00:00Z", - "predicate": "_measurement=\"cpu\" or _measurement=\"mem\"" - }`), - authorizer: &influxdb.Authorization{ - UserID: user1ID, - Status: influxdb.Active, - Permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxtesting.IDPtr(platform.ID(2)), - OrgID: influxtesting.IDPtr(platform.ID(1)), - }, - }, - }, - }, - }, - fields: fields{ - DeleteService: mock.NewDeleteService(), - BucketService: &mock.BucketService{ - FindBucketFn: func(ctx context.Context, f influxdb.BucketFilter) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: platform.ID(2), - Name: "bucket1", - }, nil - }, - }, - OrganizationService: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, f influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: platform.ID(1), - Name: "org1", - }, nil - }, - }, - }, - wants: wants{ - statusCode: http.StatusBadRequest, - body: `{ - "code": "invalid", - "message": "error decoding json body: the logical operator OR is not supported yet at position 19" - }`, - }, - }, - { - name: "unsupported delete by field", - args: args{ - queryParams: map[string][]string{ - "org": {"org1"}, - "bucket": {"buck1"}, - }, - body: []byte(`{ - "start":"2009-01-01T23:00:00Z", - "stop":"2019-11-10T01:00:00Z", - "predicate": "_field=\"cpu\"" - }`), - authorizer: &influxdb.Authorization{ - UserID: user1ID, - Status: influxdb.Active, - Permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxtesting.IDPtr(platform.ID(2)), - OrgID: influxtesting.IDPtr(platform.ID(1)), - }, - }, - }, - }, - }, - fields: fields{ - DeleteService: mock.NewDeleteService(), - BucketService: &mock.BucketService{ - FindBucketFn: func(ctx context.Context, f influxdb.BucketFilter) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: platform.ID(2), - Name: "bucket1", - }, nil - }, - }, - OrganizationService: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, f influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: platform.ID(1), - Name: "org1", - }, nil - }, - }, - }, - wants: wants{ - statusCode: http.StatusNotImplemented, - body: `{ - "code": "not implemented", - "message": "delete by field is not supported" - }`, - }, - }, - { - name: "complex delete", - args: args{ - queryParams: map[string][]string{ - "org": {"org1"}, - "bucket": {"buck1"}, - }, - body: []byte(`{ - "start":"2009-01-01T23:00:00Z", - "stop":"2019-11-10T01:00:00Z", - "predicate": "_measurement=\"testing\" and tag1=\"v1\" and (tag2=\"v2\" and tag3=\"v3\")" - }`), - authorizer: &influxdb.Authorization{ - UserID: user1ID, - Status: influxdb.Active, - Permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxtesting.IDPtr(platform.ID(2)), - OrgID: influxtesting.IDPtr(platform.ID(1)), - }, - }, - }, - }, - }, - fields: fields{ - DeleteService: mock.NewDeleteService(), - BucketService: &mock.BucketService{ - FindBucketFn: func(ctx context.Context, f influxdb.BucketFilter) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: platform.ID(2), - Name: "bucket1", - }, nil - }, - }, - OrganizationService: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, f influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: platform.ID(1), - Name: "org1", - }, nil - }, - }, - }, - wants: wants{ - statusCode: http.StatusNoContent, - body: ``, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - deleteBackend := NewMockDeleteBackend(t) - deleteBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - deleteBackend.DeleteService = tt.fields.DeleteService - deleteBackend.OrganizationService = tt.fields.OrganizationService - deleteBackend.BucketService = tt.fields.BucketService - h := NewDeleteHandler(zaptest.NewLogger(t), deleteBackend) - - r := httptest.NewRequest("POST", "http://any.tld", bytes.NewReader(tt.args.body)) - - qp := r.URL.Query() - for k, vs := range tt.args.queryParams { - for _, v := range vs { - qp.Add(k, v) - } - } - r = r.WithContext(pcontext.SetAuthorizer(r.Context(), tt.args.authorizer)) - r.URL.RawQuery = qp.Encode() - - w := httptest.NewRecorder() - - h.handleDelete(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleDelete() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleDelete() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleDelete(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handleDelete() = ***%s***", tt.name, diff) - } - } - }) - } -} diff --git a/http/document_service.go b/http/document_service.go deleted file mode 100644 index f09a2a1cc65..00000000000 --- a/http/document_service.go +++ /dev/null @@ -1,204 +0,0 @@ -package http - -import ( - "context" - "fmt" - "net/http" - "path" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/pkg/httpc" - "go.uber.org/zap" -) - -const prefixDocuments = "/api/v2/documents" - -// DocumentService is an interface HTTP-exposed portion of the document service. -type DocumentService interface { - GetDocuments(ctx context.Context, namespace string, orgID platform.ID) ([]*influxdb.Document, error) -} - -// DocumentBackend is all services and associated parameters required to construct -// the DocumentHandler. -type DocumentBackend struct { - log *zap.Logger - errors.HTTPErrorHandler - - DocumentService influxdb.DocumentService -} - -// NewDocumentBackend returns a new instance of DocumentBackend. -func NewDocumentBackend(log *zap.Logger, b *APIBackend) *DocumentBackend { - return &DocumentBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - DocumentService: b.DocumentService, - } -} - -// DocumentHandler represents an HTTP API handler for documents. -type DocumentHandler struct { - *httprouter.Router - - log *zap.Logger - errors.HTTPErrorHandler - - DocumentService influxdb.DocumentService - LabelService influxdb.LabelService -} - -const ( - documentsPath = "/api/v2/documents/:ns" -) - -// NewDocumentHandler returns a new instance of DocumentHandler. -// TODO(desa): this should probably take a namespace -func NewDocumentHandler(b *DocumentBackend) *DocumentHandler { - h := &DocumentHandler{ - Router: NewRouter(b.HTTPErrorHandler), - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log, - - DocumentService: b.DocumentService, - } - - h.HandlerFunc("GET", documentsPath, h.handleGetDocuments) - - return h -} - -type documentResponse struct { - Links map[string]string `json:"links"` - *influxdb.Document -} - -func newDocumentResponse(ns string, d *influxdb.Document) *documentResponse { - if d.Labels == nil { - d.Labels = []*influxdb.Label{} - } - return &documentResponse{ - Links: map[string]string{ - "self": fmt.Sprintf("/api/v2/documents/%s/%s", ns, d.ID), - }, - Document: d, - } -} - -type documentsResponse struct { - Documents []*documentResponse `json:"documents"` -} - -func newDocumentsResponse(ns string, docs []*influxdb.Document) *documentsResponse { - ds := make([]*documentResponse, 0, len(docs)) - for _, doc := range docs { - ds = append(ds, newDocumentResponse(ns, doc)) - } - - return &documentsResponse{ - Documents: ds, - } -} - -// handleGetDocuments is the HTTP handler for the GET /api/v2/documents/:ns route. -func (h *DocumentHandler) handleGetDocuments(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeGetDocumentsRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - s, err := h.DocumentService.FindDocumentStore(ctx, req.Namespace) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - ds, err := s.FindDocuments(ctx, req.OrgID) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - h.log.Debug("Documents retrieved", zap.String("documents", fmt.Sprint(ds))) - - if err := encodeResponse(ctx, w, http.StatusOK, newDocumentsResponse(req.Namespace, ds)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type getDocumentsRequest struct { - Namespace string - Org string - OrgID platform.ID -} - -func decodeGetDocumentsRequest(ctx context.Context, r *http.Request) (*getDocumentsRequest, error) { - params := httprouter.ParamsFromContext(ctx) - ns := params.ByName("ns") - if ns == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing namespace", - } - } - - qp := r.URL.Query() - req := &getDocumentsRequest{ - Namespace: ns, - Org: qp.Get("org"), - } - - if oidStr := qp.Get("orgID"); oidStr != "" { - oid, err := platform.IDFromString(oidStr) - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "Invalid orgID", - } - } - req.OrgID = *oid - } - return req, nil -} - -type documentService struct { - Client *httpc.Client -} - -// NewDocumentService creates a client to connect to Influx via HTTP to manage documents. -func NewDocumentService(client *httpc.Client) DocumentService { - return &documentService{ - Client: client, - } -} - -func buildDocumentsPath(namespace string) string { - return path.Join(prefixDocuments, namespace) -} - -// GetDocuments returns the documents for a `namespace` and an `orgID`. -// Returned documents do not contain their content. -func (s *documentService) GetDocuments(ctx context.Context, namespace string, orgID platform.ID) ([]*influxdb.Document, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var resp documentsResponse - r := s.Client. - Get(buildDocumentsPath(namespace)). - DecodeJSON(&resp) - r = r.QueryParams([2]string{"orgID", orgID.String()}) - if err := r.Do(ctx); err != nil { - return nil, err - } - docs := make([]*influxdb.Document, len(resp.Documents)) - for i := 0; i < len(docs); i++ { - docs[i] = resp.Documents[i].Document - } - return docs, nil -} diff --git a/http/document_service_test.go b/http/document_service_test.go deleted file mode 100644 index 839646eb270..00000000000 --- a/http/document_service_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package http - -import ( - "context" - "net/http/httptest" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - icontext "github.com/influxdata/influxdb/v2/context" - httpmock "github.com/influxdata/influxdb/v2/http/mock" - "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/tenant" - itesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -const namespace = "templates" - -type fixture struct { - Org *influxdb.Organization - Labels []*influxdb.Label - Document *influxdb.Document - AnotherDocument *influxdb.Document -} - -func setup(t *testing.T) (func(auth influxdb.Authorizer) *httptest.Server, func(serverUrl string) DocumentService, fixture) { - ctx := context.Background() - - store := itesting.NewTestInmemStore(t) - tenantStore := tenant.NewStore(store) - tenantService := tenant.NewService(tenantStore) - svc := kv.NewService(zaptest.NewLogger(t), store, tenantService) - - ds, err := svc.CreateDocumentStore(ctx, namespace) - if err != nil { - t.Fatalf("failed to create document store: %v", err) - } - - user := &influxdb.User{Name: "user"} - itesting.MustCreateUsers(ctx, tenantService, user) - - // Need this to make resource creation work. - // We are not testing authorization in the setup. - auth := mock.NewMockAuthorizer(true, nil) - auth.UserID = user.ID - ctx = icontext.SetAuthorizer(ctx, auth) - - org := &influxdb.Organization{Name: "org"} - itesting.MustCreateOrgs(ctx, tenantService, org) - doc := &influxdb.Document{ - Content: "I am a free document", - } - if err := ds.CreateDocument(ctx, doc); err != nil { - panic(err) - } - // Organizations are needed only for creation. - // Need to cleanup for comparison later. - doc.Organizations = nil - adoc := &influxdb.Document{ - Content: "I am another document", - } - if err := ds.CreateDocument(ctx, adoc); err != nil { - panic(err) - } - - backend := NewMockDocumentBackend(t) - backend.HTTPErrorHandler = http.NewErrorHandler(zaptest.NewLogger(t)) - backend.DocumentService = authorizer.NewDocumentService(svc) - serverFn := func(auth influxdb.Authorizer) *httptest.Server { - handler := httpmock.NewAuthMiddlewareHandler(NewDocumentHandler(backend), auth) - return httptest.NewServer(handler) - } - clientFn := func(serverUrl string) DocumentService { - return NewDocumentService(mustNewHTTPClient(t, serverUrl, "")) - } - f := fixture{ - Org: org, - Document: doc, - AnotherDocument: adoc, - } - return serverFn, clientFn, f -} - -func (f fixture) auth(action influxdb.Action) *influxdb.Authorization { - a := &influxdb.Authorization{ - Permissions: []influxdb.Permission{ - { - Action: action, - Resource: influxdb.Resource{ - Type: influxdb.DocumentsResourceType, - OrgID: &f.Org.ID, - }, - }, - }, - Status: influxdb.Active, - } - if action == influxdb.WriteAction { - a.Permissions = append(a.Permissions, influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.DocumentsResourceType, - OrgID: &f.Org.ID, - }, - }) - } - return a -} - -func (f fixture) authKO() *influxdb.Authorization { - return &influxdb.Authorization{ - Status: influxdb.Active, - } -} - -// TestDocumentService tests all the service functions using the document HTTP client. -func TestDocumentService(t *testing.T) { - tests := []struct { - name string - fn func(t *testing.T) - }{ - { - name: "GetDocuments", - fn: GetDocuments, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.fn(t) - }) - } -} - -func GetDocuments(t *testing.T) { - t.Run("get existing documents", func(t *testing.T) { - serverFn, clientFn, fx := setup(t) - server := serverFn(fx.auth(influxdb.ReadAction)) - defer server.Close() - client := clientFn(server.URL) - got, err := client.GetDocuments(context.Background(), namespace, fx.Org.ID) - if err != nil { - t.Fatal(err) - } - want := []*influxdb.Document{fx.Document, fx.AnotherDocument} - want[0].Content = nil // response will not contain the content of documents - want[1].Content = nil // response will not contain the content of documents - if diff := cmp.Diff(got, want); diff != "" { - t.Errorf("got unexpected document:\n\t%s", diff) - } - }) - - t.Run("unauthorized", func(t *testing.T) { - serverFn, clientFn, fx := setup(t) - server := serverFn(fx.authKO()) - defer server.Close() - client := clientFn(server.URL) - docs, err := client.GetDocuments(context.Background(), namespace, fx.Org.ID) - require.Error(t, err, "call should be unauthorized") - assert.Empty(t, docs) - }) -} diff --git a/http/document_test.go b/http/document_test.go deleted file mode 100644 index a669f50f8f4..00000000000 --- a/http/document_test.go +++ /dev/null @@ -1,171 +0,0 @@ -package http - -import ( - "context" - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - pcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/mock" - influxtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -var ( - doc1ID = influxtesting.MustIDBase16("020f755c3c082010") - doc2ID = influxtesting.MustIDBase16("020f755c3c082011") - doc1 = influxdb.Document{ - ID: doc1ID, - Meta: influxdb.DocumentMeta{ - Name: "doc1", - Type: "typ1", - Description: "desc1", - }, - Content: "content1", - } - doc2 = influxdb.Document{ - ID: doc2ID, - Meta: influxdb.DocumentMeta{ - Name: "doc2", - }, - Content: "content2", - } - - docs = []*influxdb.Document{ - &doc1, - &doc2, - } - docsResp = `{ - "documents":[ - { - "id": "020f755c3c082010", - "links": { - "self": "/api/v2/documents/template/020f755c3c082010" - }, - "content": "content1", - "meta": { - "name": "doc1", - "type": "typ1", - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z", - "description": "desc1" - } - }, - { - "id": "020f755c3c082011", - "links": { - "self": "/api/v2/documents/template/020f755c3c082011" - }, - "content": "content2", - "meta": { - "name": "doc2", - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z" - } - } - ] - }` - findDocsServiceMock = &mock.DocumentService{ - FindDocumentStoreFn: func(context.Context, string) (influxdb.DocumentStore, error) { - return &mock.DocumentStore{ - FindDocumentsFn: func(ctx context.Context, _ platform.ID) ([]*influxdb.Document, error) { - return docs, nil - }, - }, nil - }, - } -) - -// NewMockDocumentBackend returns a DocumentBackend with mock services. -func NewMockDocumentBackend(t *testing.T) *DocumentBackend { - return &DocumentBackend{ - log: zaptest.NewLogger(t), - - DocumentService: mock.NewDocumentService(), - } -} - -func TestService_handleGetDocuments(t *testing.T) { - type fields struct { - DocumentService influxdb.DocumentService - } - type args struct { - authorizer influxdb.Authorizer - orgID platform.ID - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get all documents", - fields: fields{ - DocumentService: findDocsServiceMock, - }, - args: args{ - authorizer: mock.NewMockAuthorizer(true, nil), - orgID: platform.ID(2), - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: docsResp, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - documentBackend := NewMockDocumentBackend(t) - documentBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - documentBackend.DocumentService = tt.fields.DocumentService - h := NewDocumentHandler(documentBackend) - - r := httptest.NewRequest("GET", "http://any.url", nil) - qp := r.URL.Query() - qp.Add("orgID", tt.args.orgID.String()) - r.URL.RawQuery = qp.Encode() - - r = r.WithContext(pcontext.SetAuthorizer(r.Context(), tt.args.authorizer)) - r = r.WithContext(context.WithValue(r.Context(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "ns", - Value: "template", - }})) - w := httptest.NewRecorder() - h.handleGetDocuments(w, r) - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetDocuments() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetDocuments() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleGetDocuments(). error unmarshaling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handleGetDocuments() = ***%s***", tt.name, diff) - } - } - }) - } -} diff --git a/http/duration.go b/http/duration.go deleted file mode 100644 index ae56628b15e..00000000000 --- a/http/duration.go +++ /dev/null @@ -1,166 +0,0 @@ -package http - -import ( - "fmt" - "strconv" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// ErrInvalidDuration is returned when parsing a malformatted duration. -var ErrInvalidDuration = &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid duration", -} - -// ParseDuration parses a time duration from a string. -// This is needed instead of time.ParseDuration because this will support -// the full syntax that InfluxQL supports for specifying durations -// including weeks and days. -func ParseDuration(s string) (time.Duration, error) { - // Return an error if the string is blank or one character - if len(s) < 2 { - return 0, ErrInvalidDuration - } - - // Split string into individual runes. - a := split(s) - - // Start with a zero duration. - var d time.Duration - i := 0 - - // Check for a negative. - isNegative := false - if a[i] == '-' { - isNegative = true - i++ - } - - var measure int64 - var unit string - - // Parsing loop. - for i < len(a) { - // Find the number portion. - start := i - for ; i < len(a) && isDigit(a[i]); i++ { - // Scan for the digits. - } - - // Check if we reached the end of the string prematurely. - if i >= len(a) || i == start { - return 0, ErrInvalidDuration - } - - // Parse the numeric part. - n, err := strconv.ParseInt(string(a[start:i]), 10, 64) - if err != nil { - return 0, ErrInvalidDuration - } - measure = n - - // Extract the unit of measure. - unit = string(a[i]) - switch a[i] { - case 'n': - d += time.Duration(n) * time.Nanosecond - if i+1 < len(a) && a[i+1] == 's' { - unit = string(a[i : i+2]) - i += 2 - continue - } - case 'u', 'µ': - d += time.Duration(n) * time.Microsecond - if i+1 < len(a) && a[i+1] == 's' { - unit = string(a[i : i+2]) - i += 2 - continue - } - case 'm': - // Check for `mo` and `ms`; month and millisecond, respectively. - if i+1 < len(a) { - switch a[i+1] { - case 's': // ms == milliseconds - unit = string(a[i : i+2]) - d += time.Duration(n) * time.Millisecond - i += 2 - continue - case 'o': // mo == month - // TODO(goller): use real duration values: - // https://github.com/influxdata/platform/issues/657 - unit = string(a[i : i+2]) - d += time.Duration(n) * 30 * 24 * time.Hour - i += 2 - continue - } - } - d += time.Duration(n) * time.Minute - case 's': - d += time.Duration(n) * time.Second - case 'h': - d += time.Duration(n) * time.Hour - case 'd': - d += time.Duration(n) * 24 * time.Hour - case 'w': - // TODO(goller): use real duration values: - // https://github.com/influxdata/platform/issues/657 - d += time.Duration(n) * 7 * 24 * time.Hour - case 'y': - // TODO(goller): use real duration values: - // https://github.com/influxdata/platform/issues/657 - d += time.Duration(n) * 365 * 24 * time.Hour - default: - return 0, ErrInvalidDuration - } - i++ - } - - // Check to see if we overflowed a duration - if d < 0 && !isNegative { - return 0, fmt.Errorf("overflowed duration %d%s: choose a smaller duration or INF", measure, unit) - } - - if isNegative { - d = -d - } - return d, nil -} - -// FormatDuration formats a duration to a string. -func FormatDuration(d time.Duration) string { - if d == 0 { - return "0s" - } else if d%(365*24*time.Hour) == 0 { - return fmt.Sprintf("%dy", d/(365*24*time.Hour)) - } else if d%(30*24*time.Hour) == 0 { - return fmt.Sprintf("%dmo", d/(30*24*time.Hour)) - } else if d%(7*24*time.Hour) == 0 { - return fmt.Sprintf("%dw", d/(7*24*time.Hour)) - } else if d%(24*time.Hour) == 0 { - return fmt.Sprintf("%dd", d/(24*time.Hour)) - } else if d%time.Hour == 0 { - return fmt.Sprintf("%dh", d/time.Hour) - } else if d%time.Minute == 0 { - return fmt.Sprintf("%dm", d/time.Minute) - } else if d%time.Second == 0 { - return fmt.Sprintf("%ds", d/time.Second) - } else if d%time.Millisecond == 0 { - return fmt.Sprintf("%dms", d/time.Millisecond) - } else if d%time.Microsecond == 0 { - return fmt.Sprintf("%dus", d/time.Microsecond) - } - return fmt.Sprintf("%dns", d/time.Nanosecond) -} - -// split splits a string into a slice of runes. -func split(s string) (a []rune) { - for _, ch := range s { - a = append(a, ch) - } - return -} - -// isDigit returns true if the rune is a digit. -func isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') } diff --git a/http/duration_test.go b/http/duration_test.go deleted file mode 100644 index 5e5cbf33f34..00000000000 --- a/http/duration_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package http - -import ( - "fmt" - "testing" - "time" -) - -// Ensure a time duration can be parsed. -func TestParseDuration(t *testing.T) { - var tests = []struct { - s string - want time.Duration - wantErr bool - }{ - {s: `10n`, want: 10 * time.Nanosecond}, - {s: `10ns`, want: 10 * time.Nanosecond}, - {s: `10u`, want: 10 * time.Microsecond}, - {s: `10µ`, want: 10 * time.Microsecond}, - {s: `10us`, want: 10 * time.Microsecond}, - {s: `10µs`, want: 10 * time.Microsecond}, - {s: `15ms`, want: 15 * time.Millisecond}, - {s: `100s`, want: 100 * time.Second}, - {s: `2m`, want: 2 * time.Minute}, - {s: `2mo`, want: 2 * 30 * 24 * time.Hour}, - {s: `2h`, want: 2 * time.Hour}, - {s: `2d`, want: 2 * 24 * time.Hour}, - {s: `2w`, want: 2 * 7 * 24 * time.Hour}, - {s: `2y`, want: 2 * 365 * 24 * time.Hour}, - {s: `1h30m`, want: time.Hour + 30*time.Minute}, - {s: `30ms3000u`, want: 30*time.Millisecond + 3000*time.Microsecond}, - {s: `-5s`, want: -5 * time.Second}, - {s: `-5m30s`, want: -5*time.Minute - 30*time.Second}, - {s: ``, wantErr: true}, - {s: `3`, wantErr: true}, - {s: `3mm`, wantErr: true}, - {s: `3nm`, wantErr: true}, - {s: `1000`, wantErr: true}, - {s: `w`, wantErr: true}, - {s: `ms`, wantErr: true}, - {s: `1.2w`, wantErr: true}, - {s: `10x`, wantErr: true}, - } - - for i, tt := range tests { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - got, err := ParseDuration(tt.s) - if (err != nil) != tt.wantErr { - t.Errorf("ParseDuration() error = %v, wantErr %v", err, tt.wantErr) - return - } - - if got != tt.want { - t.Errorf("ParseDuration() = %v, want %v", got, tt.want) - } - }) - } -} - -// Ensure a time duration can be formatted. -func TestFormatDuration(t *testing.T) { - var tests = []struct { - d time.Duration - want string - }{ - {d: 3 * time.Nanosecond, want: `3ns`}, - {d: 3 * time.Microsecond, want: `3us`}, - {d: 1001 * time.Microsecond, want: `1001us`}, - {d: 15 * time.Millisecond, want: `15ms`}, - {d: 100 * time.Second, want: `100s`}, - {d: 2 * time.Minute, want: `2m`}, - {d: 2 * time.Hour, want: `2h`}, - {d: 2 * 24 * time.Hour, want: `2d`}, - {d: 2 * 7 * 24 * time.Hour, want: `2w`}, - {d: 2 * 30 * 24 * time.Hour, want: `2mo`}, - {d: 2 * 365 * 24 * time.Hour, want: `2y`}, - } - - for i, tt := range tests { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - got := FormatDuration(tt.d) - if got != tt.want { - t.Errorf("FormatDuration() = %s, want %s", got, tt.want) - } - }) - } -} diff --git a/http/errors.go b/http/errors.go deleted file mode 100644 index 9824f116b2a..00000000000 --- a/http/errors.go +++ /dev/null @@ -1,121 +0,0 @@ -package http - -import ( - "bytes" - "context" - "encoding/json" - stderrors "errors" - "fmt" - "io" - "mime" - "net/http" - "strings" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" - khttp "github.com/influxdata/influxdb/v2/kit/transport/http" -) - -// AuthzError is returned for authorization errors. When this error type is returned, -// the user can be presented with a generic "authorization failed" error, but -// the system can log the underlying AuthzError() so that operators have insight -// into what actually failed with authorization. -type AuthzError interface { - error - AuthzError() error -} - -// CheckErrorStatus for status and any error in the response. -func CheckErrorStatus(code int, res *http.Response) error { - err := CheckError(res) - if err != nil { - return err - } - - if res.StatusCode != code { - return fmt.Errorf("unexpected status code: %s", res.Status) - } - - return nil -} - -// CheckError reads the http.Response and returns an error if one exists. -// It will automatically recognize the errors returned by Influx services -// and decode the error into an internal error type. If the error cannot -// be determined in that way, it will create a generic error message. -// -// If there is no error, then this returns nil. -func CheckError(resp *http.Response) (err error) { - switch resp.StatusCode / 100 { - case 4, 5: - // We will attempt to parse this error outside of this block. - case 2: - return nil - default: - // TODO(jsternberg): Figure out what to do here? - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("unexpected status code: %d %s", resp.StatusCode, resp.Status), - } - } - - perr := &errors.Error{ - Code: khttp.StatusCodeToErrorCode(resp.StatusCode), - } - - if resp.StatusCode == http.StatusUnsupportedMediaType { - perr.Msg = fmt.Sprintf("invalid media type: %q", resp.Header.Get("Content-Type")) - return perr - } - - contentType := resp.Header.Get("Content-Type") - if contentType == "" { - // Assume JSON if there is no content-type. - contentType = "application/json" - } - mediatype, _, _ := mime.ParseMediaType(contentType) - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - perr.Msg = "failed to read error response" - perr.Err = err - return perr - } - - switch mediatype { - case "application/json": - if err := json.Unmarshal(buf.Bytes(), perr); err != nil { - perr.Msg = fmt.Sprintf("attempted to unmarshal error as JSON but failed: %q", err) - perr.Err = firstLineAsError(buf) - } - default: - perr.Err = firstLineAsError(buf) - } - - if perr.Code == "" { - // given it was unset during attempt to unmarshal as JSON - perr.Code = khttp.StatusCodeToErrorCode(resp.StatusCode) - } - - return perr -} - -func firstLineAsError(buf bytes.Buffer) error { - line, _ := buf.ReadString('\n') - return stderrors.New(strings.TrimSuffix(line, "\n")) -} - -// UnauthorizedError encodes a error message and status code for unauthorized access. -func UnauthorizedError(ctx context.Context, h errors.HTTPErrorHandler, w http.ResponseWriter) { - h.HandleHTTPError(ctx, &errors.Error{ - Code: errors.EUnauthorized, - Msg: "unauthorized access", - }, w) -} - -// InactiveUserError encode a error message and status code for inactive users. -func InactiveUserError(ctx context.Context, h errors.HTTPErrorHandler, w http.ResponseWriter) { - h.HandleHTTPError(ctx, &errors.Error{ - Code: errors.EForbidden, - Msg: "User is inactive", - }, w) -} diff --git a/http/errors_test.go b/http/errors_test.go deleted file mode 100644 index 69825b1a15c..00000000000 --- a/http/errors_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package http_test - -import ( - "context" - "encoding/json" - stderrors "errors" - "io" - "net/http/httptest" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "go.uber.org/zap/zaptest" -) - -func TestCheckError(t *testing.T) { - for _, tt := range []struct { - name string - write func(w *httptest.ResponseRecorder) - want error - }{ - { - name: "platform error", - write: func(w *httptest.ResponseRecorder) { - h := kithttp.NewErrorHandler(zaptest.NewLogger(t)) - err := &errors.Error{ - Msg: "expected", - Code: errors.EInvalid, - } - h.HandleHTTPError(context.Background(), err, w) - }, - want: &errors.Error{ - Msg: "expected", - Code: errors.EInvalid, - }, - }, - { - name: "text error", - write: func(w *httptest.ResponseRecorder) { - w.Header().Set("Content-Type", "text/plain") - w.WriteHeader(500) - _, _ = io.WriteString(w, "upstream timeout\n") - }, - want: &errors.Error{ - Code: errors.EInternal, - Err: stderrors.New("upstream timeout"), - }, - }, - { - name: "error with bad json", - write: func(w *httptest.ResponseRecorder) { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(500) - _, _ = io.WriteString(w, "upstream timeout\n") - }, - want: &errors.Error{ - Code: errors.EInternal, - Msg: `attempted to unmarshal error as JSON but failed: "invalid character 'u' looking for beginning of value"`, - Err: stderrors.New("upstream timeout"), - }, - }, - { - name: "error with no content-type (encoded as json - with code)", - write: func(w *httptest.ResponseRecorder) { - w.WriteHeader(500) - _, _ = io.WriteString(w, `{"error": "service unavailable", "code": "unavailable"}`) - }, - want: &errors.Error{ - Code: errors.EUnavailable, - Err: stderrors.New("service unavailable"), - }, - }, - { - name: "error with no content-type (encoded as json - no code)", - write: func(w *httptest.ResponseRecorder) { - w.WriteHeader(503) - _, _ = io.WriteString(w, `{"error": "service unavailable"}`) - }, - want: &errors.Error{ - Code: errors.EUnavailable, - Err: stderrors.New("service unavailable"), - }, - }, - { - name: "error with no content-type (not json encoded)", - write: func(w *httptest.ResponseRecorder) { - w.WriteHeader(503) - }, - want: &errors.Error{ - Code: errors.EUnavailable, - Msg: `attempted to unmarshal error as JSON but failed: "unexpected end of JSON input"`, - Err: stderrors.New(""), - }, - }, - } { - t.Run(tt.name, func(t *testing.T) { - w := httptest.NewRecorder() - tt.write(w) - - resp := w.Result() - cmpopt := cmp.Transformer("error", func(e error) string { - if e, ok := e.(*errors.Error); ok { - out, _ := json.Marshal(e) - return string(out) - } - return e.Error() - }) - if got, want := http.CheckError(resp), tt.want; !cmp.Equal(want, got, cmpopt) { - t.Fatalf("unexpected error -want/+got:\n%s", cmp.Diff(want, got, cmpopt)) - } - }) - } -} diff --git a/http/handler.go b/http/handler.go deleted file mode 100644 index 494f6d63e73..00000000000 --- a/http/handler.go +++ /dev/null @@ -1,212 +0,0 @@ -package http - -import ( - "context" - "encoding/json" - "net/http" - - "github.com/go-chi/chi" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/prom" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/pprof" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -const ( - // MetricsPath exposes the prometheus metrics over /metrics. - MetricsPath = "/metrics" - // ReadyPath exposes the readiness of the service over /ready. - ReadyPath = "/ready" - // HealthPath exposes the health of the service over /health. - HealthPath = "/health" - // DebugPath exposes /debug/pprof for go debugging. - DebugPath = "/debug" -) - -// Handler provides basic handling of metrics, health and debug endpoints. -// All other requests are passed down to the sub handler. -type Handler struct { - name string - r chi.Router - - requests *prometheus.CounterVec - requestDur *prometheus.HistogramVec - - // log logs all HTTP requests as they are served - log *zap.Logger -} - -type ( - handlerOpts struct { - log *zap.Logger - apiHandler http.Handler - healthHandler http.Handler - readyHandler http.Handler - pprofEnabled bool - - // NOTE: Track the registry even if metricsExposed = false - // so we can report HTTP metrics via telemetry. - metricsRegistry *prom.Registry - metricsExposed bool - } - - HandlerOptFn func(opts *handlerOpts) -) - -func (o *handlerOpts) metricsHTTPHandler() http.Handler { - if o.metricsRegistry != nil && o.metricsExposed { - return o.metricsRegistry.HTTPHandler() - } - handlerFunc := func(rw http.ResponseWriter, r *http.Request) { - kithttp.WriteErrorResponse(r.Context(), rw, errors.EForbidden, "metrics disabled") - } - return http.HandlerFunc(handlerFunc) -} - -func WithLog(l *zap.Logger) HandlerOptFn { - return func(opts *handlerOpts) { - opts.log = l - } -} - -func WithAPIHandler(h http.Handler) HandlerOptFn { - return func(opts *handlerOpts) { - opts.apiHandler = h - } -} - -func WithPprofEnabled(enabled bool) HandlerOptFn { - return func(opts *handlerOpts) { - opts.pprofEnabled = enabled - } -} - -func WithMetrics(reg *prom.Registry, exposed bool) HandlerOptFn { - return func(opts *handlerOpts) { - opts.metricsRegistry = reg - opts.metricsExposed = exposed - } -} - -type AddHeader struct { - WriteHeader func(header http.Header) -} - -// Middleware is a middleware that mutates the header of all responses -func (h *AddHeader) Middleware(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - h.WriteHeader(w.Header()) - next.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) -} - -// NewRootHandler creates a new handler with the given name and registers any root-level -// (non-API) routes enabled by the caller. -func NewRootHandler(name string, opts ...HandlerOptFn) *Handler { - opt := handlerOpts{ - log: zap.NewNop(), - healthHandler: http.HandlerFunc(HealthHandler), - readyHandler: ReadyHandler(), - pprofEnabled: false, - metricsRegistry: nil, - metricsExposed: false, - } - for _, o := range opts { - o(&opt) - } - - h := &Handler{ - name: name, - log: opt.log, - } - h.initMetrics() - - r := chi.NewRouter() - buildHeader := &AddHeader{ - WriteHeader: func(header http.Header) { - header.Add("X-Influxdb-Build", "OSS") - header.Add("X-Influxdb-Version", influxdb.GetBuildInfo().Version) - }, - } - r.Use(buildHeader.Middleware) - // only gather metrics for system handlers - r.Group(func(r chi.Router) { - r.Use( - kithttp.Metrics(name, h.requests, h.requestDur), - ) - r.Mount(MetricsPath, opt.metricsHTTPHandler()) - r.Mount(ReadyPath, opt.readyHandler) - r.Mount(HealthPath, opt.healthHandler) - r.Mount(DebugPath, pprof.NewHTTPHandler(opt.pprofEnabled)) - }) - - // gather metrics and traces for everything else - r.Group(func(r chi.Router) { - r.Use( - kithttp.Trace(name), - kithttp.Metrics(name, h.requests, h.requestDur), - ) - r.Mount("/", opt.apiHandler) - }) - - h.r = r - - if opt.metricsRegistry != nil { - opt.metricsRegistry.MustRegister(h.PrometheusCollectors()...) - } - return h -} - -// ServeHTTP delegates a request to the appropriate subhandler. -func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - h.r.ServeHTTP(w, r) -} - -// PrometheusCollectors satisfies prom.PrometheusCollector. -func (h *Handler) PrometheusCollectors() []prometheus.Collector { - return []prometheus.Collector{ - h.requests, - h.requestDur, - } -} - -func (h *Handler) initMetrics() { - const namespace = "http" - const handlerSubsystem = "api" - - labelNames := []string{"handler", "method", "path", "status", "user_agent", "response_code"} - h.requests = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: handlerSubsystem, - Name: "requests_total", - Help: "Number of http requests received", - }, labelNames) - - h.requestDur = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: handlerSubsystem, - Name: "request_duration_seconds", - Help: "Time taken to respond to HTTP request", - }, labelNames) -} - -func encodeResponse(ctx context.Context, w http.ResponseWriter, code int, res interface{}) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.WriteHeader(code) - - return json.NewEncoder(w).Encode(res) -} - -func logEncodingError(log *zap.Logger, r *http.Request, err error) { - // If we encounter an error while encoding the response to an http request - // the best thing we can do is log that error, as we may have already written - // the headers for the http request in question. - log.Error("Error encoding response", - zap.String("path", r.URL.Path), - zap.String("method", r.Method), - zap.Error(err)) -} diff --git a/http/handler_test.go b/http/handler_test.go deleted file mode 100644 index 783e937b9db..00000000000 --- a/http/handler_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package http - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/influxdb/v2/kit/prom" - "github.com/influxdata/influxdb/v2/kit/prom/promtest" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" -) - -func TestHandler_ServeHTTP(t *testing.T) { - type fields struct { - name string - handler http.Handler - handlerHidden bool - log *zap.Logger - } - tests := []struct { - name string - fields fields - }{ - { - name: "should record metrics when http handling", - fields: fields{ - name: "test", - handler: http.HandlerFunc(func(http.ResponseWriter, *http.Request) {}), - log: zaptest.NewLogger(t), - }, - }, - { - name: "should record metrics even when not exposed over HTTP", - fields: fields{ - name: "test", - handler: http.HandlerFunc(func(http.ResponseWriter, *http.Request) {}), - handlerHidden: true, - log: zaptest.NewLogger(t), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - reg := prom.NewRegistry(zaptest.NewLogger(t)) - h := NewRootHandler( - tt.fields.name, - WithLog(tt.fields.log), - WithAPIHandler(tt.fields.handler), - WithMetrics(reg, !tt.fields.handlerHidden), - ) - - req := httptest.NewRequest(http.MethodGet, "/", nil) - req.Header.Set("User-Agent", "ua1") - h.ServeHTTP(httptest.NewRecorder(), req) - - mfs, err := reg.Gather() - require.NoError(t, err) - - c := promtest.MustFindMetric(t, mfs, "http_api_requests_total", map[string]string{ - "handler": "test", - "method": "GET", - "path": "/", - "status": "2XX", - "user_agent": "ua1", - "response_code": "200", - }) - require.Equal(t, 1, int(c.GetCounter().GetValue())) - - g := promtest.MustFindMetric(t, mfs, "http_api_request_duration_seconds", map[string]string{ - "handler": "test", - "method": "GET", - "path": "/", - "status": "2XX", - "user_agent": "ua1", - "response_code": "200", - }) - require.Equal(t, 1, int(g.GetHistogram().GetSampleCount())) - - req = httptest.NewRequest(http.MethodGet, "/metrics", nil) - recorder := httptest.NewRecorder() - h.ServeHTTP(recorder, req) - - if tt.fields.handlerHidden { - require.Equal(t, http.StatusForbidden, recorder.Code) - } else { - require.Equal(t, http.StatusOK, recorder.Code) - } - }) - } -} diff --git a/http/health.go b/http/health.go deleted file mode 100644 index 9d7ff084244..00000000000 --- a/http/health.go +++ /dev/null @@ -1,16 +0,0 @@ -package http - -import ( - "fmt" - "net/http" - - platform "github.com/influxdata/influxdb/v2" -) - -// HealthHandler returns the status of the process. -func HealthHandler(w http.ResponseWriter, r *http.Request) { - msg := fmt.Sprintf(`{"name":"influxdb", "message":"ready for queries and writes", "status":"pass", "checks":[], "version": %q, "commit": %q}`, platform.GetBuildInfo().Version, platform.GetBuildInfo().Commit) - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, msg) -} diff --git a/http/health_test.go b/http/health_test.go deleted file mode 100644 index 3e5f754f52b..00000000000 --- a/http/health_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package http - -import ( - "encoding/json" - "io" - "net/http" - "net/http/httptest" - "testing" -) - -func TestHealthHandler(t *testing.T) { - type wants struct { - statusCode int - contentType string - status string - } - tests := []struct { - name string - w *httptest.ResponseRecorder - r *http.Request - wants wants - }{ - { - name: "health endpoint returns pass", - w: httptest.NewRecorder(), - r: httptest.NewRequest(http.MethodGet, "/health", nil), - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - status: "pass", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - HealthHandler(tt.w, tt.r) - res := tt.w.Result() - contentType := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. HealthHandler() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && contentType != tt.wants.contentType { - t.Errorf("%q. HealthHandler() = %v, want %v", tt.name, contentType, tt.wants.contentType) - } - var content map[string]interface{} - if err := json.Unmarshal(body, &content); err != nil { - t.Errorf("%q, HealthHandler(). error unmarshalling json %v", tt.name, err) - return - } - if _, found := content["name"]; !found { - t.Errorf("%q. HealthHandler() no name reported", tt.name) - } - if content["status"] != tt.wants.status { - t.Errorf("%q. HealthHandler() status= %v, want %v", tt.name, content["status"], tt.wants.status) - } - if _, found := content["message"]; !found { - t.Errorf("%q. HealthHandler() no message reported", tt.name) - } - if _, found := content["checks"]; !found { - t.Errorf("%q. HealthHandler() no checks reported", tt.name) - } - if _, found := content["version"]; !found { - t.Errorf("%q. HealthHandler() no version reported", tt.name) - } - if _, found := content["commit"]; !found { - t.Errorf("%q. HealthHandler() no commit reported", tt.name) - } - }) - } -} diff --git a/http/influxdb/client.go b/http/influxdb/client.go deleted file mode 100644 index bc4788b2fda..00000000000 --- a/http/influxdb/client.go +++ /dev/null @@ -1,52 +0,0 @@ -package influxdb - -import ( - "crypto/tls" - "net/http" - "net/url" - - "github.com/influxdata/influxdb/v2/kit/tracing" -) - -// Shared transports for all clients to prevent leaking connections -var ( - skipVerifyTransport = &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - defaultTransport = &http.Transport{} -) - -func newURL(addr, path string) (*url.URL, error) { - u, err := url.Parse(addr) - if err != nil { - return nil, err - } - u.Path = path - return u, nil -} - -func newTraceClient(scheme string, insecure bool) *traceClient { - hc := &traceClient{ - Client: http.Client{ - Transport: defaultTransport, - }, - } - if scheme == "https" && insecure { - hc.Transport = skipVerifyTransport - } - - return hc -} - -// traceClient always injects any opentracing trace into the client requests. -type traceClient struct { - http.Client -} - -// Do injects the trace and then performs the request. -func (c *traceClient) Do(r *http.Request) (*http.Response, error) { - span, _ := tracing.StartSpanFromContext(r.Context()) - defer span.Finish() - tracing.InjectToHTTPRequest(span, r) - return c.Client.Do(r) -} diff --git a/http/influxdb/source_proxy_query_service.go b/http/influxdb/source_proxy_query_service.go deleted file mode 100644 index c153b5cea22..00000000000 --- a/http/influxdb/source_proxy_query_service.go +++ /dev/null @@ -1,107 +0,0 @@ -package influxdb - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/csv" - "github.com/influxdata/flux/lang" - platform "github.com/influxdata/influxdb/v2" - platformhttp "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/kit/check" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/query" -) - -type SourceProxyQueryService struct { - InsecureSkipVerify bool - URL string - OrganizationID platform2.ID - platform.SourceFields - platform.V1SourceFields -} - -func (s *SourceProxyQueryService) Query(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) { - switch req.Request.Compiler.CompilerType() { - case lang.FluxCompilerType: - return s.fluxQuery(ctx, w, req) - } - - return flux.Statistics{}, fmt.Errorf("compiler type not supported") -} - -func (s *SourceProxyQueryService) fluxQuery(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - request := struct { - Query string `json:"query"` - Type string `json:"type"` - Dialect flux.Dialect `json:"dialect"` - }{} - - switch c := req.Request.Compiler.(type) { - case lang.FluxCompiler: - request.Query = c.Query - request.Type = lang.FluxCompilerType - default: - return flux.Statistics{}, tracing.LogError(span, fmt.Errorf("compiler type not supported: %s", c.CompilerType())) - } - - request.Dialect = req.Dialect - if request.Dialect == nil { - request.Dialect = &csv.Dialect{ - ResultEncoderConfig: csv.ResultEncoderConfig{ - Annotations: nil, - NoHeader: false, - Delimiter: ',', - }, - } - } - - u, err := newURL(s.URL, "/api/v2/query") - if err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - - qp := u.Query() - qp.Set("organizationID", req.Request.OrganizationID.String()) - u.RawQuery = qp.Encode() - - var body bytes.Buffer - if err := json.NewEncoder(&body).Encode(request); err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - - hreq, err := http.NewRequest("POST", u.String(), &body) - if err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - hreq.Header.Set("Authorization", s.Token) - hreq.Header.Set("Content-Type", "application/json") - hreq = hreq.WithContext(ctx) - - hc := newTraceClient(u.Scheme, s.InsecureSkipVerify) - resp, err := hc.Do(hreq) - if err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - defer resp.Body.Close() - if err := platformhttp.CheckError(resp); err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - - if _, err = io.Copy(w, resp.Body); err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - return flux.Statistics{}, nil -} - -func (s *SourceProxyQueryService) Check(context.Context) check.Response { - return platformhttp.QueryHealthCheck(s.URL, s.InsecureSkipVerify) -} diff --git a/http/label_service.go b/http/label_service.go deleted file mode 100644 index 08e3d3aac86..00000000000 --- a/http/label_service.go +++ /dev/null @@ -1,648 +0,0 @@ -package http - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "path" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/pkg/httpc" - "go.uber.org/zap" -) - -// LabelHandler represents an HTTP API handler for labels -type LabelHandler struct { - *httprouter.Router - errors.HTTPErrorHandler - log *zap.Logger - - LabelService influxdb.LabelService -} - -const ( - prefixLabels = "/api/v2/labels" - labelsIDPath = "/api/v2/labels/:id" -) - -// NewLabelHandler returns a new instance of LabelHandler -func NewLabelHandler(log *zap.Logger, s influxdb.LabelService, he errors.HTTPErrorHandler) *LabelHandler { - h := &LabelHandler{ - Router: NewRouter(he), - HTTPErrorHandler: he, - log: log, - LabelService: s, - } - - h.HandlerFunc("POST", prefixLabels, h.handlePostLabel) - h.HandlerFunc("GET", prefixLabels, h.handleGetLabels) - - h.HandlerFunc("GET", labelsIDPath, h.handleGetLabel) - h.HandlerFunc("PATCH", labelsIDPath, h.handlePatchLabel) - h.HandlerFunc("DELETE", labelsIDPath, h.handleDeleteLabel) - - return h -} - -func (h *LabelHandler) Prefix() string { - return prefixLabels -} - -// handlePostLabel is the HTTP handler for the POST /api/v2/labels route. -func (h *LabelHandler) handlePostLabel(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePostLabelRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := h.LabelService.CreateLabel(ctx, req.Label); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Label created", zap.String("label", fmt.Sprint(req.Label))) - if err := encodeResponse(ctx, w, http.StatusCreated, newLabelResponse(req.Label)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type postLabelRequest struct { - Label *influxdb.Label -} - -func (b postLabelRequest) Validate() error { - if b.Label.Name == "" { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "label requires a name", - } - } - if !b.Label.OrgID.Valid() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "label requires a valid orgID", - } - } - return nil -} - -// TODO(jm): ensure that the specified org actually exists -func decodePostLabelRequest(ctx context.Context, r *http.Request) (*postLabelRequest, error) { - l := &influxdb.Label{} - if err := json.NewDecoder(r.Body).Decode(l); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unable to decode label request", - Err: err, - } - } - - req := &postLabelRequest{ - Label: l, - } - - return req, req.Validate() -} - -// handleGetLabels is the HTTP handler for the GET /api/v2/labels route. -func (h *LabelHandler) handleGetLabels(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeGetLabelsRequest(r.URL.Query()) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - labels, err := h.LabelService.FindLabels(ctx, req.filter) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Labels retrieved", zap.String("labels", fmt.Sprint(labels))) - err = encodeResponse(ctx, w, http.StatusOK, newLabelsResponse(labels)) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } -} - -type getLabelsRequest struct { - filter influxdb.LabelFilter -} - -func decodeGetLabelsRequest(qp url.Values) (*getLabelsRequest, error) { - req := &getLabelsRequest{ - filter: influxdb.LabelFilter{ - Name: qp.Get("name"), - }, - } - - if orgID := qp.Get("orgID"); orgID != "" { - id, err := platform.IDFromString(orgID) - if err != nil { - return nil, err - } - req.filter.OrgID = id - } - - return req, nil -} - -// handleGetLabel is the HTTP handler for the GET /api/v2/labels/id route. -func (h *LabelHandler) handleGetLabel(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeGetLabelRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - l, err := h.LabelService.FindLabelByID(ctx, req.LabelID) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Label retrieved", zap.String("label", fmt.Sprint(l))) - if err := encodeResponse(ctx, w, http.StatusOK, newLabelResponse(l)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type getLabelRequest struct { - LabelID platform.ID -} - -func decodeGetLabelRequest(ctx context.Context, r *http.Request) (*getLabelRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "label id is not valid", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - req := &getLabelRequest{ - LabelID: i, - } - - return req, nil -} - -// handleDeleteLabel is the HTTP handler for the DELETE /api/v2/labels/:id route. -func (h *LabelHandler) handleDeleteLabel(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeDeleteLabelRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := h.LabelService.DeleteLabel(ctx, req.LabelID); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Label deleted", zap.String("labelID", fmt.Sprint(req.LabelID))) - w.WriteHeader(http.StatusNoContent) -} - -type deleteLabelRequest struct { - LabelID platform.ID -} - -func decodeDeleteLabelRequest(ctx context.Context, r *http.Request) (*deleteLabelRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - req := &deleteLabelRequest{ - LabelID: i, - } - - return req, nil -} - -// handlePatchLabel is the HTTP handler for the PATCH /api/v2/labels route. -func (h *LabelHandler) handlePatchLabel(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePatchLabelRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - l, err := h.LabelService.UpdateLabel(ctx, req.LabelID, req.Update) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Label updated", zap.String("label", fmt.Sprint(l))) - if err := encodeResponse(ctx, w, http.StatusOK, newLabelResponse(l)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type patchLabelRequest struct { - Update influxdb.LabelUpdate - LabelID platform.ID -} - -func decodePatchLabelRequest(ctx context.Context, r *http.Request) (*patchLabelRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - - upd := &influxdb.LabelUpdate{} - if err := json.NewDecoder(r.Body).Decode(upd); err != nil { - return nil, err - } - - return &patchLabelRequest{ - Update: *upd, - LabelID: i, - }, nil -} - -type labelResponse struct { - Links map[string]string `json:"links"` - Label influxdb.Label `json:"label"` -} - -func newLabelResponse(l *influxdb.Label) *labelResponse { - return &labelResponse{ - Links: map[string]string{ - "self": fmt.Sprintf("/api/v2/labels/%s", l.ID), - }, - Label: *l, - } -} - -type labelsResponse struct { - Links map[string]string `json:"links"` - Labels []*influxdb.Label `json:"labels"` -} - -func newLabelsResponse(ls []*influxdb.Label) *labelsResponse { - return &labelsResponse{ - Links: map[string]string{ - "self": "/api/v2/labels", - }, - Labels: ls, - } -} - -// LabelBackend is all services and associated parameters required to construct -// label handlers. -type LabelBackend struct { - log *zap.Logger - errors.HTTPErrorHandler - LabelService influxdb.LabelService - ResourceType influxdb.ResourceType -} - -// newGetLabelsHandler returns a handler func for a GET to /labels endpoints -func newGetLabelsHandler(b *LabelBackend) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - req, err := decodeGetLabelMappingsRequest(ctx, b.ResourceType) - if err != nil { - b.HandleHTTPError(ctx, err, w) - return - } - - labels, err := b.LabelService.FindResourceLabels(ctx, req.filter) - if err != nil { - b.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusOK, newLabelsResponse(labels)); err != nil { - logEncodingError(b.log, r, err) - return - } - } -} - -type getLabelMappingsRequest struct { - filter influxdb.LabelMappingFilter -} - -func decodeGetLabelMappingsRequest(ctx context.Context, rt influxdb.ResourceType) (*getLabelMappingsRequest, error) { - req := &getLabelMappingsRequest{} - - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - req.filter.ResourceID = i - req.filter.ResourceType = rt - - return req, nil -} - -// newPostLabelHandler returns a handler func for a POST to /labels endpoints -func newPostLabelHandler(b *LabelBackend) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - req, err := decodePostLabelMappingRequest(ctx, r, b.ResourceType) - if err != nil { - b.HandleHTTPError(ctx, err, w) - return - } - - if err := req.Mapping.Validate(); err != nil { - b.HandleHTTPError(ctx, err, w) - return - } - - if err := b.LabelService.CreateLabelMapping(ctx, &req.Mapping); err != nil { - b.HandleHTTPError(ctx, err, w) - return - } - - label, err := b.LabelService.FindLabelByID(ctx, req.Mapping.LabelID) - if err != nil { - b.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusCreated, newLabelResponse(label)); err != nil { - logEncodingError(b.log, r, err) - return - } - } -} - -type postLabelMappingRequest struct { - Mapping influxdb.LabelMapping -} - -func decodePostLabelMappingRequest(ctx context.Context, r *http.Request, rt influxdb.ResourceType) (*postLabelMappingRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var rid platform.ID - if err := rid.DecodeFromString(id); err != nil { - return nil, err - } - - mapping := &influxdb.LabelMapping{} - if err := json.NewDecoder(r.Body).Decode(mapping); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "Invalid post label map request", - } - } - - mapping.ResourceID = rid - mapping.ResourceType = rt - - if err := mapping.Validate(); err != nil { - return nil, err - } - - req := &postLabelMappingRequest{ - Mapping: *mapping, - } - - return req, nil -} - -// newDeleteLabelHandler returns a handler func for a DELETE to /labels endpoints -func newDeleteLabelHandler(b *LabelBackend) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - req, err := decodeDeleteLabelMappingRequest(ctx, r) - if err != nil { - b.HandleHTTPError(ctx, err, w) - return - } - - mapping := &influxdb.LabelMapping{ - LabelID: req.LabelID, - ResourceID: req.ResourceID, - ResourceType: b.ResourceType, - } - - if err := b.LabelService.DeleteLabelMapping(ctx, mapping); err != nil { - b.HandleHTTPError(ctx, err, w) - return - } - - w.WriteHeader(http.StatusNoContent) - } -} - -type deleteLabelMappingRequest struct { - ResourceID platform.ID - LabelID platform.ID -} - -func decodeDeleteLabelMappingRequest(ctx context.Context, r *http.Request) (*deleteLabelMappingRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing resource id", - } - } - - var rid platform.ID - if err := rid.DecodeFromString(id); err != nil { - return nil, err - } - - id = params.ByName("lid") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "label id is missing", - } - } - - var lid platform.ID - if err := lid.DecodeFromString(id); err != nil { - return nil, err - } - - return &deleteLabelMappingRequest{ - LabelID: lid, - ResourceID: rid, - }, nil -} - -func labelIDPath(id platform.ID) string { - return path.Join(prefixLabels, id.String()) -} - -// LabelService connects to Influx via HTTP using tokens to manage labels -type LabelService struct { - Client *httpc.Client - OpPrefix string -} - -// FindLabelByID returns a single label by ID. -func (s *LabelService) FindLabelByID(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - var lr labelResponse - err := s.Client. - Get(labelIDPath(id)). - DecodeJSON(&lr). - Do(ctx) - if err != nil { - return nil, err - } - return &lr.Label, nil -} - -// FindLabels is a client for the find labels response from the server. -func (s *LabelService) FindLabels(ctx context.Context, filter influxdb.LabelFilter, opt ...influxdb.FindOptions) ([]*influxdb.Label, error) { - params := influxdb.FindOptionParams(opt...) - if filter.OrgID != nil { - params = append(params, [2]string{"orgID", filter.OrgID.String()}) - } - if filter.Name != "" { - params = append(params, [2]string{"name", filter.Name}) - } - - var lr labelsResponse - err := s.Client. - Get(prefixLabels). - QueryParams(params...). - DecodeJSON(&lr). - Do(ctx) - if err != nil { - return nil, err - } - return lr.Labels, nil -} - -// FindResourceLabels returns a list of labels, derived from a label mapping filter. -func (s *LabelService) FindResourceLabels(ctx context.Context, filter influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - if err := filter.Valid(); err != nil { - return nil, err - } - - var r labelsResponse - err := s.Client. - Get(resourceIDPath(filter.ResourceType, filter.ResourceID, "labels")). - DecodeJSON(&r). - Do(ctx) - if err != nil { - return nil, err - } - return r.Labels, nil -} - -// CreateLabel creates a new label. -func (s *LabelService) CreateLabel(ctx context.Context, l *influxdb.Label) error { - var lr labelResponse - err := s.Client. - PostJSON(l, prefixLabels). - DecodeJSON(&lr). - Do(ctx) - if err != nil { - return err - } - - // this is super dirty >_< - *l = lr.Label - return nil -} - -// UpdateLabel updates a label and returns the updated label. -func (s *LabelService) UpdateLabel(ctx context.Context, id platform.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) { - var lr labelResponse - err := s.Client. - PatchJSON(upd, labelIDPath(id)). - DecodeJSON(&lr). - Do(ctx) - if err != nil { - return nil, err - } - return &lr.Label, nil -} - -// DeleteLabel removes a label by ID. -func (s *LabelService) DeleteLabel(ctx context.Context, id platform.ID) error { - return s.Client. - Delete(labelIDPath(id)). - Do(ctx) -} - -// CreateLabelMapping will create a labbel mapping -func (s *LabelService) CreateLabelMapping(ctx context.Context, m *influxdb.LabelMapping) error { - if err := m.Validate(); err != nil { - return err - } - - urlPath := resourceIDPath(m.ResourceType, m.ResourceID, "labels") - return s.Client. - PostJSON(m, urlPath). - DecodeJSON(m). - Do(ctx) -} - -func (s *LabelService) DeleteLabelMapping(ctx context.Context, m *influxdb.LabelMapping) error { - if err := m.Validate(); err != nil { - return err - } - - return s.Client. - Delete(resourceIDMappingPath(m.ResourceType, m.ResourceID, "labels", m.LabelID)). - Do(ctx) -} - -func resourceIDMappingPath(resourceType influxdb.ResourceType, resourceID platform.ID, p string, labelID platform.ID) string { - return path.Join("/api/v2/", string(resourceType), resourceID.String(), p, labelID.String()) -} diff --git a/http/label_test.go b/http/label_test.go deleted file mode 100644 index 57fff91198e..00000000000 --- a/http/label_test.go +++ /dev/null @@ -1,671 +0,0 @@ -package http - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/httprouter" - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/label" - "github.com/influxdata/influxdb/v2/mock" - platformtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func TestService_handleGetLabels(t *testing.T) { - type fields struct { - LabelService platform.LabelService - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - wants wants - }{ - { - name: "get all labels", - fields: fields{ - &mock.LabelService{ - FindLabelsFn: func(ctx context.Context, filter platform.LabelFilter) ([]*platform.Label, error) { - return []*platform.Label{ - { - ID: platformtesting.MustIDBase16("0b501e7e557ab1ed"), - Name: "hello", - Properties: map[string]string{ - "color": "fff000", - }, - }, - { - ID: platformtesting.MustIDBase16("c0175f0077a77005"), - Name: "example", - Properties: map[string]string{ - "color": "fff000", - }, - }, - }, nil - }, - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/labels" - }, - "labels": [ - { - "id": "0b501e7e557ab1ed", - "name": "hello", - "properties": { - "color": "fff000" - } - }, - { - "id": "c0175f0077a77005", - "name": "example", - "properties": { - "color": "fff000" - } - } - ] -} -`, - }, - }, - { - name: "get all labels when there are none", - fields: fields{ - &mock.LabelService{ - FindLabelsFn: func(ctx context.Context, filter platform.LabelFilter) ([]*platform.Label, error) { - return []*platform.Label{}, nil - }, - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/labels" - }, - "labels": [] -}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := NewLabelHandler(zaptest.NewLogger(t), tt.fields.LabelService, kithttp.NewErrorHandler(zaptest.NewLogger(t))) - - r := httptest.NewRequest("GET", "http://any.url", nil) - - w := httptest.NewRecorder() - - h.handleGetLabels(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetLabels() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetLabels() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil || tt.wants.body != "" && !eq { - t.Errorf("%q. handleGetLabels() = ***%v***", tt.name, diff) - } - }) - } -} - -func TestService_handleGetLabel(t *testing.T) { - type fields struct { - LabelService platform.LabelService - } - type args struct { - id string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get a label by id", - fields: fields{ - &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id platform2.ID) (*platform.Label, error) { - if id == platformtesting.MustIDBase16("020f755c3c082000") { - return &platform.Label{ - ID: platformtesting.MustIDBase16("020f755c3c082000"), - Name: "mylabel", - Properties: map[string]string{ - "color": "fff000", - }, - }, nil - } - - return nil, fmt.Errorf("not found") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/labels/020f755c3c082000" - }, - "label": { - "id": "020f755c3c082000", - "name": "mylabel", - "properties": { - "color": "fff000" - } - } -} -`, - }, - }, - { - name: "not found", - fields: fields{ - &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id platform2.ID) (*platform.Label, error) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: platform.ErrLabelNotFound, - } - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := NewLabelHandler(zaptest.NewLogger(t), tt.fields.LabelService, kithttp.NewErrorHandler(zaptest.NewLogger(t))) - - r := httptest.NewRequest("GET", "http://any.url", nil) - - r = r.WithContext(context.WithValue( - context.Background(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - })) - - w := httptest.NewRecorder() - - h.handleGetLabel(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetLabel() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetLabel() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleGetLabel(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handleGetLabel() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestService_handlePostLabel(t *testing.T) { - type fields struct { - LabelService platform.LabelService - } - type args struct { - label *platform.Label - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "create a new label", - fields: fields{ - &mock.LabelService{ - CreateLabelFn: func(ctx context.Context, l *platform.Label) error { - l.ID = platformtesting.MustIDBase16("020f755c3c082000") - return nil - }, - }, - }, - args: args{ - label: &platform.Label{ - Name: "mylabel", - OrgID: platformtesting.MustIDBase16("020f755c3c082008"), - }, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/labels/020f755c3c082000" - }, - "label": { - "id": "020f755c3c082000", - "name": "mylabel", - "orgID": "020f755c3c082008" - } -} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := NewLabelHandler(zaptest.NewLogger(t), tt.fields.LabelService, kithttp.NewErrorHandler(zaptest.NewLogger(t))) - - l, err := json.Marshal(tt.args.label) - if err != nil { - t.Fatalf("failed to marshal label: %v", err) - } - - r := httptest.NewRequest("GET", "http://any.url", bytes.NewReader(l)) - w := httptest.NewRecorder() - - h.handlePostLabel(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePostLabel() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePostLabel() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil || tt.wants.body != "" && !eq { - t.Errorf("%q. handlePostLabel() = ***%v***", tt.name, diff) - } - }) - } -} - -func TestService_handleDeleteLabel(t *testing.T) { - type fields struct { - LabelService platform.LabelService - } - type args struct { - id string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "remove a label by id", - fields: fields{ - &mock.LabelService{ - DeleteLabelFn: func(ctx context.Context, id platform2.ID) error { - if id == platformtesting.MustIDBase16("020f755c3c082000") { - return nil - } - - return fmt.Errorf("wrong id") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNoContent, - }, - }, - { - name: "label not found", - fields: fields{ - &mock.LabelService{ - DeleteLabelFn: func(ctx context.Context, id platform2.ID) error { - return &errors.Error{ - Code: errors.ENotFound, - Msg: platform.ErrLabelNotFound, - } - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := NewLabelHandler(zaptest.NewLogger(t), tt.fields.LabelService, kithttp.NewErrorHandler(zaptest.NewLogger(t))) - - r := httptest.NewRequest("GET", "http://any.url", nil) - - r = r.WithContext(context.WithValue( - context.Background(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - })) - - w := httptest.NewRecorder() - - h.handleDeleteLabel(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePostLabel() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePostLabel() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handlePostLabel(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handlePostLabel() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestService_handlePatchLabel(t *testing.T) { - type fields struct { - LabelService platform.LabelService - } - type args struct { - id string - properties map[string]string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "update label properties", - fields: fields{ - &mock.LabelService{ - UpdateLabelFn: func(ctx context.Context, id platform2.ID, upd platform.LabelUpdate) (*platform.Label, error) { - if id == platformtesting.MustIDBase16("020f755c3c082000") { - l := &platform.Label{ - ID: platformtesting.MustIDBase16("020f755c3c082000"), - Name: "mylabel", - Properties: map[string]string{ - "color": "fff000", - }, - } - - for k, v := range upd.Properties { - if v == "" { - delete(l.Properties, k) - } else { - l.Properties[k] = v - } - } - - return l, nil - } - - return nil, fmt.Errorf("not found") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - properties: map[string]string{ - "color": "aaabbb", - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/labels/020f755c3c082000" - }, - "label": { - "id": "020f755c3c082000", - "name": "mylabel", - "properties": { - "color": "aaabbb" - } - } -} -`, - }, - }, - { - name: "label not found", - fields: fields{ - &mock.LabelService{ - UpdateLabelFn: func(ctx context.Context, id platform2.ID, upd platform.LabelUpdate) (*platform.Label, error) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: platform.ErrLabelNotFound, - } - }, - }, - }, - args: args{ - id: "020f755c3c082000", - properties: map[string]string{ - "color": "aaabbb", - }, - }, - wants: wants{ - statusCode: http.StatusNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := NewLabelHandler(zaptest.NewLogger(t), tt.fields.LabelService, kithttp.NewErrorHandler(zaptest.NewLogger(t))) - - upd := platform.LabelUpdate{} - if len(tt.args.properties) > 0 { - upd.Properties = tt.args.properties - } - - l, err := json.Marshal(upd) - if err != nil { - t.Fatalf("failed to marshal label update: %v", err) - } - - r := httptest.NewRequest("GET", "http://any.url", bytes.NewReader(l)) - - r = r.WithContext(context.WithValue( - context.Background(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - })) - - w := httptest.NewRecorder() - - h.handlePatchLabel(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePatchLabel() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePatchLabel() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handlePatchLabel(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handlePatchLabel() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func initLabelService(f platformtesting.LabelFields, t *testing.T) (platform.LabelService, string, func()) { - store := platformtesting.NewTestInmemStore(t) - labelStore, err := label.NewStore(store) - if err != nil { - t.Fatal(err) - } - - if f.IDGenerator != nil { - labelStore.IDGenerator = f.IDGenerator - } - - labelService := label.NewService(labelStore) - - ctx := context.Background() - for _, l := range f.Labels { - mock.SetIDForFunc(&labelStore.IDGenerator, l.ID, func() { - if err := labelService.CreateLabel(ctx, l); err != nil { - t.Fatalf("failed to populate labels: %v", err) - } - }) - } - - for _, m := range f.Mappings { - if err := labelService.CreateLabelMapping(ctx, m); err != nil { - t.Fatalf("failed to populate label mappings: %v", err) - } - } - - handler := NewLabelHandler(zaptest.NewLogger(t), labelService, kithttp.NewErrorHandler(zaptest.NewLogger(t))) - server := httptest.NewServer(handler) - client := LabelService{ - Client: mustNewHTTPClient(t, server.URL, ""), - } - done := server.Close - - return &client, "", done -} - -func TestLabelService(t *testing.T) { - tests := []struct { - name string - testFn func( - init func(platformtesting.LabelFields, *testing.T) (platform.LabelService, string, func()), - t *testing.T, - ) - }{ - { - name: "create label", - testFn: platformtesting.CreateLabel, - }, - { - name: "delete label", - testFn: platformtesting.DeleteLabel, - }, - { - name: "update label", - testFn: platformtesting.UpdateLabel, - }, - { - name: "find labels", - testFn: platformtesting.FindLabels, - }, - { - name: "find label by ID", - testFn: platformtesting.FindLabelByID, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.testFn(initLabelService, t) - }) - } -} diff --git a/http/legacy.go b/http/legacy.go deleted file mode 100644 index 64101a2acf9..00000000000 --- a/http/legacy.go +++ /dev/null @@ -1,38 +0,0 @@ -package http - -import ( - "github.com/influxdata/influxdb/v2/http/legacy" -) - -// newLegacyBackend constructs a legacy backend from an api backend. -func newLegacyBackend(b *APIBackend) *legacy.Backend { - return &legacy.Backend{ - HTTPErrorHandler: b.HTTPErrorHandler, - Logger: b.Logger, - // TODO(sgc): /write support - // MaxBatchSizeBytes: b.APIBackend.MaxBatchSizeBytes, - AuthorizationService: b.AuthorizationService, - OrganizationService: b.OrganizationService, - BucketService: b.BucketService, - PointsWriter: b.PointsWriter, - DBRPMappingService: b.DBRPService, - InfluxqldQueryService: b.InfluxqldService, - WriteEventRecorder: b.WriteEventRecorder, - } -} - -// newLegacyHandler constructs a legacy handler from a backend. -func newLegacyHandler(b *legacy.Backend, config legacy.HandlerConfig) *legacy.Handler { - h := &legacy.Handler{ - HTTPErrorHandler: b.HTTPErrorHandler, - } - - pointsWriterBackend := legacy.NewPointsWriterBackend(b) - h.PointsWriterHandler = legacy.NewWriterHandler(pointsWriterBackend, legacy.WithMaxBatchSizeBytes(b.MaxBatchSizeBytes)) - - influxqlBackend := legacy.NewInfluxQLBackend(b) - h.InfluxQLHandler = legacy.NewInfluxQLHandler(influxqlBackend, config) - - h.PingHandler = legacy.NewPingHandler() - return h -} diff --git a/http/legacy/backend.go b/http/legacy/backend.go deleted file mode 100644 index 5f157560168..00000000000 --- a/http/legacy/backend.go +++ /dev/null @@ -1,81 +0,0 @@ -package legacy - -import ( - http2 "net/http" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/http/metric" - "github.com/influxdata/influxdb/v2/influxql" - "github.com/influxdata/influxdb/v2/kit/cli" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/storage" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -// Handler is a collection of all the service handlers. -type Handler struct { - errors.HTTPErrorHandler - PointsWriterHandler *WriteHandler - PingHandler *PingHandler - InfluxQLHandler *InfluxqlHandler -} - -type Backend struct { - errors.HTTPErrorHandler - Logger *zap.Logger - MaxBatchSizeBytes int64 - - WriteEventRecorder metric.EventRecorder - AuthorizationService influxdb.AuthorizationService - OrganizationService influxdb.OrganizationService - BucketService influxdb.BucketService - PointsWriter storage.PointsWriter - DBRPMappingService influxdb.DBRPMappingService - InfluxqldQueryService influxql.ProxyQueryService -} - -// HandlerConfig provides configuration for the legacy handler. -type HandlerConfig struct { - DefaultRoutingKey string -} - -func NewHandlerConfig() *HandlerConfig { - return &HandlerConfig{} -} - -// Opts returns the CLI options for use with kit/cli. -// Currently set values on c are provided as the defaults. -func (c *HandlerConfig) Opts() []cli.Opt { - return []cli.Opt{ - { - DestP: &c.DefaultRoutingKey, - Flag: "influxql-default-routing-key", - Default: "defaultQueue", - Desc: "Default routing key for publishing new query requests", - }, - } -} - -func (h *Handler) ServeHTTP(w http2.ResponseWriter, r *http2.Request) { - if r.URL.Path == "/write" { - h.PointsWriterHandler.ServeHTTP(w, r) - return - } - - if r.URL.Path == "/ping" { - h.PingHandler.ServeHTTP(w, r) - return - } - - if r.URL.Path == "/query" { - h.InfluxQLHandler.ServeHTTP(w, r) - return - } - - w.WriteHeader(http2.StatusNotFound) -} - -func (h *Handler) PrometheusCollectors() []prometheus.Collector { - return h.InfluxQLHandler.PrometheusCollectors() -} diff --git a/http/legacy/common.go b/http/legacy/common.go deleted file mode 100644 index a0d7f6d8c70..00000000000 --- a/http/legacy/common.go +++ /dev/null @@ -1,28 +0,0 @@ -package legacy - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - pcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// getAuthorization extracts authorization information from a context.Context. -// It guards against non influxdb.Authorization values for authorization and -// InfluxQL feature flag not enabled. -func getAuthorization(ctx context.Context) (*influxdb.Authorization, error) { - authorizer, err := pcontext.GetAuthorizer(ctx) - if err != nil { - return nil, err - } - - a, ok := authorizer.(*influxdb.Authorization) - if !ok { - return nil, &errors.Error{ - Code: errors.EForbidden, - Msg: "insufficient permissions; session not supported", - } - } - return a, nil -} diff --git a/http/legacy/influx1x_authentication_handler.go b/http/legacy/influx1x_authentication_handler.go deleted file mode 100644 index ff4a3f2be08..00000000000 --- a/http/legacy/influx1x_authentication_handler.go +++ /dev/null @@ -1,141 +0,0 @@ -package legacy - -import ( - "context" - "errors" - "fmt" - "net/http" - "strings" - - "github.com/influxdata/influxdb/v2" - platcontext "github.com/influxdata/influxdb/v2/context" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/opentracing/opentracing-go" -) - -type Authorizer interface { - Authorize(ctx context.Context, c influxdb.CredentialsV1) (*influxdb.Authorization, error) -} - -type Influx1xAuthenticationHandler struct { - errors2.HTTPErrorHandler - next http.Handler - auth Authorizer -} - -// NewInflux1xAuthenticationHandler creates an authentication handler to process -// InfluxDB 1.x authentication requests. -func NewInflux1xAuthenticationHandler(next http.Handler, auth Authorizer, h errors2.HTTPErrorHandler) *Influx1xAuthenticationHandler { - return &Influx1xAuthenticationHandler{ - HTTPErrorHandler: h, - next: next, - auth: auth, - } -} - -// ServeHTTP extracts the session or token from the http request and places the resulting authorizer on the request context. -func (h *Influx1xAuthenticationHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // The ping endpoint does not need authorization - if r.URL.Path == "/ping" { - h.next.ServeHTTP(w, r) - return - } - ctx := r.Context() - - creds, err := h.parseCredentials(r) - if err != nil { - unauthorizedError(ctx, h, w) - return - } - - auth, err := h.auth.Authorize(ctx, creds) - if err != nil { - var erri *errors2.Error - if errors.As(err, &erri) { - switch erri.Code { - case errors2.EForbidden, errors2.EUnauthorized: - h.HandleHTTPError(ctx, erri, w) - return - } - } - unauthorizedError(ctx, h, w) - return - } - - ctx = platcontext.SetAuthorizer(ctx, auth) - - if span := opentracing.SpanFromContext(ctx); span != nil { - span.SetTag("user_id", auth.GetUserID().String()) - } - - h.next.ServeHTTP(w, r.WithContext(ctx)) -} - -func parseToken(token string) (user, pass string, ok bool) { - s := strings.IndexByte(token, ':') - if s < 0 { - // Token - return "", token, true - } - - // Token : - return token[:s], token[s+1:], true -} - -// parseCredentials parses a request and returns the authentication credentials. -// The credentials may be present as URL query params, or as a Basic -// Authentication header. -// As params: http://127.0.0.1/query?u=username&p=token -// As basic auth: http://username:token@127.0.0.1 -// As Token in Authorization header: Token -func (h *Influx1xAuthenticationHandler) parseCredentials(r *http.Request) (influxdb.CredentialsV1, error) { - q := r.URL.Query() - - // Check for username and password in URL params. - if u, p := q.Get("u"), q.Get("p"); u != "" && p != "" { - return influxdb.CredentialsV1{ - Scheme: influxdb.SchemeV1URL, - Username: u, - Token: p, - }, nil - } - - // Check for the HTTP Authorization header. - if s := r.Header.Get("Authorization"); s != "" { - // Check for Bearer token. - strs := strings.Split(s, " ") - if len(strs) == 2 { - switch strs[0] { - case "Token": - if u, p, ok := parseToken(strs[1]); ok { - return influxdb.CredentialsV1{ - Scheme: influxdb.SchemeV1Token, - Username: u, - Token: p, - }, nil - } - - // fallback to only a token - } - } - - // Check for basic auth. - if u, p, ok := r.BasicAuth(); ok { - return influxdb.CredentialsV1{ - Scheme: influxdb.SchemeV1Basic, - Username: u, - Token: p, - }, nil - } - } - - return influxdb.CredentialsV1{}, fmt.Errorf("unable to parse authentication credentials") -} - -// unauthorizedError encodes a error message and status code for unauthorized access. -func unauthorizedError(ctx context.Context, h errors2.HTTPErrorHandler, w http.ResponseWriter) { - h.HandleHTTPError(ctx, &errors2.Error{ - Code: errors2.EUnauthorized, - Msg: "unauthorized access", - }, w) -} diff --git a/http/legacy/influx1x_authentication_handler_test.go b/http/legacy/influx1x_authentication_handler_test.go deleted file mode 100644 index c9f373fc88b..00000000000 --- a/http/legacy/influx1x_authentication_handler_test.go +++ /dev/null @@ -1,193 +0,0 @@ -package legacy - -import ( - "context" - "fmt" - "net/http" - "net/http/httptest" - "testing" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/mock" - itesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -const tokenScheme = "Token " // TODO(goller): I'd like this to be Bearer - -func setToken(token string, req *http.Request) { - req.Header.Set("Authorization", fmt.Sprintf("%s%s", tokenScheme, token)) -} - -func TestInflux1xAuthenticationHandler(t *testing.T) { - var userID = itesting.MustIDBase16("0000000000001010") - - type fields struct { - AuthorizeFn func(ctx context.Context, c influxdb.CredentialsV1) (*influxdb.Authorization, error) - } - - type exp struct { - code int - } - - basic := func(u, p string) func(r *http.Request) { - return func(r *http.Request) { - r.SetBasicAuth(u, p) - } - } - - token := func(u, p string) func(r *http.Request) { - return func(r *http.Request) { - if u == "" { - setToken(p, r) - } else { - setToken(u+":"+p, r) - } - } - } - - query := func(u, p string) func(r *http.Request) { - return func(r *http.Request) { - v := r.URL.Query() - v.Add("u", u) - v.Add("p", p) - r.URL.RawQuery = v.Encode() - } - } - - const ( - User = "sydney" - Token = "my-token" - ) - - tests := []struct { - name string - fields fields - auth func(r *http.Request) - exp exp - }{ - // successful requests - { - name: "basic auth", - fields: fields{}, - auth: basic(User, Token), - exp: exp{ - code: http.StatusOK, - }, - }, - { - name: "query string", - fields: fields{}, - auth: query(User, Token), - exp: exp{ - code: http.StatusOK, - }, - }, - { - name: "Token as user:token", - fields: fields{}, - auth: token(User, Token), - exp: exp{ - code: http.StatusOK, - }, - }, - { - name: "Token as token", - fields: fields{}, - auth: token("", Token), - exp: exp{ - code: http.StatusOK, - }, - }, - { - name: "token does not exist", - fields: fields{ - AuthorizeFn: func(ctx context.Context, c influxdb.CredentialsV1) (*influxdb.Authorization, error) { - return nil, &errors.Error{Code: errors.EUnauthorized} - }, - }, - exp: exp{ - code: http.StatusUnauthorized, - }, - }, - { - name: "authorize returns error EForbidden", - fields: fields{ - AuthorizeFn: func(ctx context.Context, c influxdb.CredentialsV1) (*influxdb.Authorization, error) { - return nil, &errors.Error{Code: errors.EForbidden} - }, - }, - auth: basic(User, Token), - exp: exp{ - code: http.StatusForbidden, - }, - }, - { - name: "authorize returns error EUnauthorized", - fields: fields{ - AuthorizeFn: func(ctx context.Context, c influxdb.CredentialsV1) (*influxdb.Authorization, error) { - return nil, &errors.Error{Code: errors.EUnauthorized} - }, - }, - auth: basic(User, Token), - exp: exp{ - code: http.StatusUnauthorized, - }, - }, - { - name: "authorize returns error other", - fields: fields{ - AuthorizeFn: func(ctx context.Context, c influxdb.CredentialsV1) (*influxdb.Authorization, error) { - return nil, &errors.Error{Code: errors.EInvalid} - }, - }, - auth: basic(User, Token), - exp: exp{ - code: http.StatusUnauthorized, - }, - }, - { - name: "no auth provided", - fields: fields{}, - exp: exp{ - code: http.StatusUnauthorized, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - t.Cleanup(ctrl.Finish) - - var h *Influx1xAuthenticationHandler - { - auth := &mock.AuthorizerV1{AuthorizeFn: tt.fields.AuthorizeFn} - if auth.AuthorizeFn == nil { - auth.AuthorizeFn = func(ctx context.Context, c influxdb.CredentialsV1) (*influxdb.Authorization, error) { - return &influxdb.Authorization{UserID: userID}, nil - } - } - next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - }) - - h = NewInflux1xAuthenticationHandler(next, auth, kithttp.NewErrorHandler(zaptest.NewLogger(t))) - } - - w := httptest.NewRecorder() - r := httptest.NewRequest("POST", "http://any.url", nil) - if tt.auth != nil { - tt.auth(r) - } - h.ServeHTTP(w, r) - - if got, want := w.Code, tt.exp.code; got != want { - t.Errorf("expected status code to be %d got %d", want, got) - } - }) - } -} diff --git a/http/legacy/influxql_handler.go b/http/legacy/influxql_handler.go deleted file mode 100644 index fffd148692a..00000000000 --- a/http/legacy/influxql_handler.go +++ /dev/null @@ -1,57 +0,0 @@ -package legacy - -import ( - "net/http" - - platform "github.com/influxdata/influxdb/v2" - influxqld "github.com/influxdata/influxdb/v2/influxql" - "github.com/influxdata/influxdb/v2/influxql/control" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/query" - "go.uber.org/zap" -) - -// InfluxqlHandler mimics the /query handler from influxdb, but, enriches -// with org and forwards requests to the transpiler service. -type InfluxqlHandler struct { - *InfluxQLBackend - HandlerConfig - Metrics *control.ControllerMetrics -} - -type InfluxQLBackend struct { - errors.HTTPErrorHandler - Logger *zap.Logger - AuthorizationService platform.AuthorizationService - OrganizationService platform.OrganizationService - ProxyQueryService query.ProxyQueryService - InfluxqldQueryService influxqld.ProxyQueryService -} - -// NewInfluxQLBackend constructs an InfluxQLBackend from a LegacyBackend. -func NewInfluxQLBackend(b *Backend) *InfluxQLBackend { - return &InfluxQLBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - Logger: b.Logger.With(zap.String("handler", "influxql")), - AuthorizationService: b.AuthorizationService, - OrganizationService: b.OrganizationService, - InfluxqldQueryService: b.InfluxqldQueryService, - } -} - -// NewInfluxQLHandler returns a new instance of InfluxqlHandler to handle influxql v1 queries -func NewInfluxQLHandler(b *InfluxQLBackend, config HandlerConfig) *InfluxqlHandler { - return &InfluxqlHandler{ - InfluxQLBackend: b, - HandlerConfig: config, - Metrics: control.NewControllerMetrics([]string{}), - } -} - -func (h *InfluxqlHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - h.handleInfluxqldQuery(w, req) -} - -// DefaultChunkSize is the default number of points to write in -// one chunk. -const DefaultChunkSize = 10000 diff --git a/http/legacy/influxqld_handler.go b/http/legacy/influxqld_handler.go deleted file mode 100644 index 8f8da4b5006..00000000000 --- a/http/legacy/influxqld_handler.go +++ /dev/null @@ -1,182 +0,0 @@ -package legacy - -import ( - "encoding/json" - "io" - "mime" - "net/http" - "os" - "strconv" - "strings" - - "github.com/influxdata/flux/iocounter" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/influxql" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -const ( - traceIDHeader = "Trace-Id" -) - -func (h *InfluxqlHandler) PrometheusCollectors() []prometheus.Collector { - return []prometheus.Collector{ - h.Metrics.Requests, - h.Metrics.RequestsLatency, - } -} - -// HandleQuery mimics the influxdb 1.0 /query -func (h *InfluxqlHandler) handleInfluxqldQuery(w http.ResponseWriter, r *http.Request) { - span, r := tracing.ExtractFromHTTPRequest(r, "handleInfluxqldQuery") - defer span.Finish() - - if id, _, found := tracing.InfoFromSpan(span); found { - w.Header().Set(traceIDHeader, id) - } - - ctx := r.Context() - defer r.Body.Close() - - auth, err := getAuthorization(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if !auth.IsActive() { - h.HandleHTTPError(ctx, &errors.Error{ - Code: errors.EForbidden, - Msg: "insufficient permissions", - }, w) - return - } - - o, err := h.OrganizationService.FindOrganization(ctx, influxdb.OrganizationFilter{ - ID: &auth.OrgID, - }) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - var query string - // Attempt to read the form value from the "q" form value. - if qp := strings.TrimSpace(r.FormValue("q")); qp != "" { - query = qp - } else if r.MultipartForm != nil && r.MultipartForm.File != nil { - // If we have a multipart/form-data, try to retrieve a file from 'q'. - if fhs := r.MultipartForm.File["q"]; len(fhs) > 0 { - d, err := os.ReadFile(fhs[0].Filename) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - query = string(d) - } - } else { - ct := r.Header.Get("Content-Type") - mt, _, err := mime.ParseMediaType(ct) - if err != nil { - h.HandleHTTPError(ctx, &errors.Error{ - Code: errors.EInvalid, - Err: err, - }, w) - return - } - - if mt == "application/vnd.influxql" { - if d, err := io.ReadAll(r.Body); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } else { - query = string(d) - } - } - } - - // parse the parameters - rawParams := r.FormValue("params") - var params map[string]interface{} - if rawParams != "" { - decoder := json.NewDecoder(strings.NewReader(rawParams)) - decoder.UseNumber() - if err := decoder.Decode(¶ms); err != nil { - h.HandleHTTPError(ctx, &errors.Error{ - Code: errors.EInvalid, - Msg: "error parsing query parameters", - Err: err, - }, w) - return - } - - // Convert json.Number into int64 and float64 values - for k, v := range params { - if v, ok := v.(json.Number); ok { - var err error - if strings.Contains(string(v), ".") { - params[k], err = v.Float64() - } else { - params[k], err = v.Int64() - } - - if err != nil { - h.HandleHTTPError(ctx, &errors.Error{ - Code: errors.EInvalid, - Msg: "error parsing json value", - Err: err, - }, w) - return - } - } - } - } - - // Parse chunk size. Use default if not provided or cannot be parsed - chunked := r.FormValue("chunked") == "true" - chunkSize := DefaultChunkSize - if chunked { - if n, err := strconv.ParseInt(r.FormValue("chunk_size"), 10, 64); err == nil && int(n) > 0 { - chunkSize = int(n) - } - } - - formatString := r.Header.Get("Accept") - encodingFormat := influxql.EncodingFormatFromMimeType(formatString) - w.Header().Set("Content-Type", encodingFormat.ContentType()) - - req := &influxql.QueryRequest{ - DB: r.FormValue("db"), - RP: r.FormValue("rp"), - Epoch: r.FormValue("epoch"), - EncodingFormat: encodingFormat, - OrganizationID: o.ID, - Query: query, - Params: params, - Source: r.Header.Get("User-Agent"), - Authorization: auth, - Chunked: chunked, - ChunkSize: chunkSize, - } - - var respSize int64 - cw := iocounter.Writer{Writer: w} - _, err = h.InfluxqldQueryService.Query(ctx, &cw, req) - respSize = cw.Count() - - if err != nil { - if respSize == 0 { - // Only record the error headers IFF nothing has been written to w. - h.HandleHTTPError(ctx, err, w) - return - } - h.Logger.Info("error writing response to client", - zap.String("org", o.Name), - zap.String("handler", "influxql"), - zap.Error(err), - ) - } -} diff --git a/http/legacy/influxqld_handler_test.go b/http/legacy/influxqld_handler_test.go deleted file mode 100644 index fe3f1da9d20..00000000000 --- a/http/legacy/influxqld_handler_test.go +++ /dev/null @@ -1,264 +0,0 @@ -//lint:file-ignore U1000 this error seems to be misreporting -package legacy - -import ( - "context" - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/google/go-cmp/cmp" - platform "github.com/influxdata/influxdb/v2" - pcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/influxql" - imock "github.com/influxdata/influxdb/v2/influxql/mock" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/mock" - "go.uber.org/zap/zaptest" -) - -var cmpOpts = []cmp.Option{ - // Ignore request ID when comparing headers. - cmp.Comparer(func(h1, h2 http.Header) bool { - for k, v1 := range h1 { - if k == "X-Request-Id" || k == "Request-Id" { - continue - } - if v2, ok := h2[k]; !ok || !cmp.Equal(v1, v2) { - return false - } - } - for k, v2 := range h2 { - if k == "X-Request-Id" || k == "Request-Id" { - continue - } - if v1, ok := h1[k]; !ok || !cmp.Equal(v2, v1) { - return false - } - } - return true - }), -} - -func TestInfluxQLdHandler_HandleQuery(t *testing.T) { - ctx := context.Background() - - type fields struct { - OrganizationService platform.OrganizationService - ProxyQueryService influxql.ProxyQueryService - } - type args struct { - w *httptest.ResponseRecorder - r *http.Request - } - tests := []struct { - name string - fields fields - args args - context context.Context - wantCode int - wantHeader http.Header - wantBody []byte - }{ - { - name: "no token causes http error", - args: args{ - r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx), - w: httptest.NewRecorder(), - }, - wantCode: http.StatusInternalServerError, - wantHeader: http.Header{ - "X-Platform-Error-Code": {"internal error"}, - "Content-Type": {"application/json; charset=utf-8"}, - }, - wantBody: []byte(`{"code":"internal error","message":"authorizer not found on context"}`), - }, - { - name: "inactive authorizer", - context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Inactive}), - args: args{ - r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx), - w: httptest.NewRecorder(), - }, - wantCode: http.StatusForbidden, - wantHeader: http.Header{ - "Content-Type": {"application/json; charset=utf-8"}, - "X-Platform-Error-Code": {"forbidden"}, - }, - wantBody: []byte(`{"code":"forbidden","message":"insufficient permissions"}`), - }, - { - name: "unknown organization", - context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}), - fields: fields{ - OrganizationService: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { - return nil, &errors.Error{ - Code: errors.EForbidden, - Msg: "nope", - } - }, - }, - }, - args: args{ - r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx), - w: httptest.NewRecorder(), - }, - wantCode: http.StatusForbidden, - wantHeader: http.Header{ - "Content-Type": {"application/json; charset=utf-8"}, - "X-Platform-Error-Code": {"forbidden"}, - }, - wantBody: []byte(`{"code":"forbidden","message":"nope"}`), - }, - { - name: "bad query", - context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}), - fields: fields{ - OrganizationService: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { - return &platform.Organization{}, nil - }, - }, - ProxyQueryService: &imock.ProxyQueryService{ - QueryF: func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) { - return influxql.Statistics{}, &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: "bad query", - } - }, - }, - }, - args: args{ - r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx), - w: httptest.NewRecorder(), - }, - wantCode: http.StatusUnprocessableEntity, - wantHeader: http.Header{ - "X-Platform-Error-Code": {"unprocessable entity"}, - "Content-Type": {"application/json; charset=utf-8"}, - }, - wantBody: []byte(`{"code":"unprocessable entity","message":"bad query"}`), - }, - { - name: "query fails during write", - context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}), - fields: fields{ - OrganizationService: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { - return &platform.Organization{}, nil - }, - }, - ProxyQueryService: &imock.ProxyQueryService{ - QueryF: func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) { - _, _ = io.WriteString(w, "fail") - return influxql.Statistics{}, &errors.Error{ - Code: errors.EInternal, - Msg: "during query", - } - }, - }, - }, - args: args{ - r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx), - w: httptest.NewRecorder(), - }, - wantBody: []byte("fail"), - wantCode: http.StatusOK, - wantHeader: http.Header{ - "Content-Type": {"application/json"}, - }, - }, - { - name: "good query unknown accept header", - context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}), - fields: fields{ - OrganizationService: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { - return &platform.Organization{}, nil - }, - }, - ProxyQueryService: &imock.ProxyQueryService{ - QueryF: func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) { - _, err := io.WriteString(w, "good") - return influxql.Statistics{}, err - }, - }, - }, - args: args{ - r: WithHeader(httptest.NewRequest("POST", "/query", nil).WithContext(ctx), "Accept", "application/foo"), - w: httptest.NewRecorder(), - }, - wantBody: []byte("good"), - wantCode: http.StatusOK, - wantHeader: http.Header{ - "Content-Type": {"application/json"}, - }, - }, - { - name: "good query", - context: pcontext.SetAuthorizer(ctx, &platform.Authorization{Status: platform.Active}), - fields: fields{ - OrganizationService: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { - return &platform.Organization{}, nil - }, - }, - ProxyQueryService: &imock.ProxyQueryService{ - QueryF: func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) { - _, err := io.WriteString(w, "good") - return influxql.Statistics{}, err - }, - }, - }, - args: args{ - r: httptest.NewRequest("POST", "/query", nil).WithContext(ctx), - w: httptest.NewRecorder(), - }, - wantBody: []byte("good"), - wantCode: http.StatusOK, - wantHeader: http.Header{ - "Content-Type": {"application/json"}, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - b := &InfluxQLBackend{ - HTTPErrorHandler: kithttp.NewErrorHandler(zaptest.NewLogger(t)), - OrganizationService: tt.fields.OrganizationService, - InfluxqldQueryService: tt.fields.ProxyQueryService, - } - - h := NewInfluxQLHandler(b, *NewHandlerConfig()) - h.Logger = zaptest.NewLogger(t) - - if tt.context != nil { - tt.args.r = tt.args.r.WithContext(tt.context) - } - - tt.args.r.Header.Add("Content-Type", "application/vnd.influxql") - - h.handleInfluxqldQuery(tt.args.w, tt.args.r) - - if got, want := tt.args.w.Code, tt.wantCode; got != want { - t.Errorf("HandleQuery() status code = got %d / want %d", got, want) - } - - if got, want := tt.args.w.Result().Header, tt.wantHeader; !cmp.Equal(got, want, cmpOpts...) { - t.Errorf("HandleQuery() headers = got(-)/want(+) %s", cmp.Diff(got, want)) - } - - if got, want := tt.args.w.Body.Bytes(), tt.wantBody; !cmp.Equal(got, want) { - t.Errorf("HandleQuery() body = got(-)/want(+) %s", cmp.Diff(string(got), string(want))) - } - }) - } -} - -func WithHeader(r *http.Request, key, value string) *http.Request { - r.Header.Set(key, value) - return r -} diff --git a/http/legacy/ping_handle_test.go b/http/legacy/ping_handle_test.go deleted file mode 100644 index 500c4a0da48..00000000000 --- a/http/legacy/ping_handle_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package legacy - -import ( - "net/http" - "net/http/httptest" - "testing" -) - -func TestPingHandler(t *testing.T) { - tests := []struct { - name string - w *httptest.ResponseRecorder - r *http.Request - }{ - { - name: "GET request", - w: httptest.NewRecorder(), - r: httptest.NewRequest(http.MethodGet, "/ping", nil), - }, - { - name: "HEAD request", - w: httptest.NewRecorder(), - r: httptest.NewRequest(http.MethodHead, "/ping", nil), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - NewPingHandler().pingHandler(tt.w, tt.r) - res := tt.w.Result() - build := res.Header.Get("X-Influxdb-Build") - version := res.Header.Get("X-Influxdb-Version") - - if res.StatusCode != http.StatusNoContent { - t.Errorf("%q. PingHandler() = %v, want %v", tt.name, res.StatusCode, http.StatusNoContent) - } - if build != "" { - t.Errorf("%q. PingHandler() = %v, want empty string", tt.name, build) - } - if version != "" { - t.Errorf("%q. PingHandler() = %v, want empty string", tt.name, version) - } - }) - } -} diff --git a/http/legacy/ping_handler.go b/http/legacy/ping_handler.go deleted file mode 100644 index 21d2beeaf5b..00000000000 --- a/http/legacy/ping_handler.go +++ /dev/null @@ -1,27 +0,0 @@ -package legacy - -import ( - "net/http" - - "github.com/influxdata/httprouter" -) - -type PingHandler struct { - *httprouter.Router -} - -func NewPingHandler() *PingHandler { - h := &PingHandler{ - Router: httprouter.New(), - } - - h.HandlerFunc("GET", "/ping", h.pingHandler) - h.HandlerFunc("HEAD", "/ping", h.pingHandler) - return h -} - -// handlePostLegacyWrite is the HTTP handler for the POST /write route. -func (h *PingHandler) pingHandler(w http.ResponseWriter, r *http.Request) { - // X-Influxdb-Version and X-Influxdb-Build header are sets by buildHeader in http/handler.go - w.WriteHeader(http.StatusNoContent) -} diff --git a/http/legacy/router.go b/http/legacy/router.go deleted file mode 100644 index 34f2fb1fb6c..00000000000 --- a/http/legacy/router.go +++ /dev/null @@ -1,88 +0,0 @@ -package legacy - -import ( - "fmt" - "net/http" - "os" - "runtime/debug" - "sync" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - influxlogger "github.com/influxdata/influxdb/v2/logger" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -// NewRouter returns a new router with a 404 handler, a 405 handler, and a panic handler. -func NewRouter(h errors.HTTPErrorHandler) *httprouter.Router { - b := baseHandler{HTTPErrorHandler: h} - router := httprouter.New() - router.NotFound = http.HandlerFunc(b.notFound) - router.MethodNotAllowed = http.HandlerFunc(b.methodNotAllowed) - router.PanicHandler = b.panic - router.AddMatchedRouteToContext = true - return router -} - -type baseHandler struct { - errors.HTTPErrorHandler -} - -// notFound represents a 404 handler that return a JSON response. -func (h baseHandler) notFound(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - pe := &errors.Error{ - Code: errors.ENotFound, - Msg: "path not found", - } - - h.HandleHTTPError(ctx, pe, w) -} - -// methodNotAllowed represents a 405 handler that return a JSON response. -func (h baseHandler) methodNotAllowed(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - allow := w.Header().Get("Allow") - pe := &errors.Error{ - Code: errors.EMethodNotAllowed, - Msg: fmt.Sprintf("allow: %s", allow), - } - - h.HandleHTTPError(ctx, pe, w) -} - -// panic handles panics recovered from http handlers. -// It returns a json response with http status code 500 and the recovered error message. -func (h baseHandler) panic(w http.ResponseWriter, r *http.Request, rcv interface{}) { - ctx := r.Context() - pe := &errors.Error{ - Code: errors.EInternal, - Msg: "a panic has occurred", - Err: fmt.Errorf("%s: %v", r.URL.String(), rcv), - } - - l := getPanicLogger() - if entry := l.Check(zapcore.ErrorLevel, pe.Msg); entry != nil { - entry.Stack = string(debug.Stack()) - entry.Write(zap.Error(pe.Err)) - } - - h.HandleHTTPError(ctx, pe, w) -} - -var panicLogger = zap.NewNop() -var panicLoggerOnce sync.Once - -// getPanicLogger returns a logger for panicHandler. -func getPanicLogger() *zap.Logger { - panicLoggerOnce.Do(func() { - conf := influxlogger.NewConfig() - logger, err := conf.New(os.Stderr) - if err == nil { - panicLogger = logger.With(zap.String("handler", "panic")) - } - }) - - return panicLogger -} diff --git a/http/legacy/write_handler.go b/http/legacy/write_handler.go deleted file mode 100644 index b63be63cb71..00000000000 --- a/http/legacy/write_handler.go +++ /dev/null @@ -1,282 +0,0 @@ -package legacy - -import ( - "context" - "fmt" - "io" - "net/http" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/http/metric" - "github.com/influxdata/influxdb/v2/http/points" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/storage" - "github.com/influxdata/influxdb/v2/tsdb" - "go.uber.org/zap" -) - -var _ http.Handler = (*WriteHandler)(nil) - -const ( - opWriteHandler = "http/v1WriteHandler" -) - -// PointsWriterBackend contains all the services needed to run a PointsWriterHandler. -type PointsWriterBackend struct { - errors.HTTPErrorHandler - Logger *zap.Logger - - EventRecorder metric.EventRecorder - BucketService influxdb.BucketService - PointsWriter storage.PointsWriter - DBRPMappingService influxdb.DBRPMappingService -} - -// NewPointsWriterBackend creates a new backend for legacy work. -func NewPointsWriterBackend(b *Backend) *PointsWriterBackend { - return &PointsWriterBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - Logger: b.Logger.With(zap.String("handler", "points_writer")), - EventRecorder: b.WriteEventRecorder, - BucketService: b.BucketService, - PointsWriter: b.PointsWriter, - DBRPMappingService: b.DBRPMappingService, - } -} - -// PointsWriterHandler represents an HTTP API handler for writing points. -type WriteHandler struct { - errors.HTTPErrorHandler - EventRecorder metric.EventRecorder - BucketService influxdb.BucketService - PointsWriter storage.PointsWriter - DBRPMappingService influxdb.DBRPMappingService - - router *httprouter.Router - logger *zap.Logger - maxBatchSizeBytes int64 -} - -// NewWriterHandler returns a new instance of PointsWriterHandler. -func NewWriterHandler(b *PointsWriterBackend, opts ...WriteHandlerOption) *WriteHandler { - h := &WriteHandler{ - HTTPErrorHandler: b.HTTPErrorHandler, - EventRecorder: b.EventRecorder, - BucketService: b.BucketService, - PointsWriter: b.PointsWriter, - DBRPMappingService: b.DBRPMappingService, - - router: NewRouter(b.HTTPErrorHandler), - logger: b.Logger.With(zap.String("handler", "points_writer")), - } - - for _, opt := range opts { - opt(h) - } - - h.router.HandlerFunc(http.MethodPost, "/write", h.handleWrite) - - return h -} - -// WriteHandlerOption is a functional option for a *PointsWriterHandler -type WriteHandlerOption func(*WriteHandler) - -// WithMaxBatchSizeBytes configures the maximum size for a -// (decompressed) points batch allowed by the write handler -func WithMaxBatchSizeBytes(n int64) WriteHandlerOption { - return func(w *WriteHandler) { - w.maxBatchSizeBytes = n - } -} - -// ServeHTTP implements http.Handler -func (h *WriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - h.router.ServeHTTP(w, r) -} - -// handleWrite handles requests for the v1 write endpoint -func (h *WriteHandler) handleWrite(w http.ResponseWriter, r *http.Request) { - span, r := tracing.ExtractFromHTTPRequest(r, "WriteHandler") - defer span.Finish() - - ctx := r.Context() - auth, err := getAuthorization(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - // The legacy write endpoint allows reading the DBRP mapping of buckets with only write permissions. - // Add the extra permissions we need here (rather than forcing clients to change). - extraPerms := []influxdb.Permission{} - for _, perm := range auth.Permissions { - if perm.Action == influxdb.WriteAction && perm.Resource.Type == influxdb.BucketsResourceType { - extraPerms = append(extraPerms, influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: perm.Resource, - }) - } - } - auth.Permissions = append(extraPerms, auth.Permissions...) - - sw := kithttp.NewStatusResponseWriter(w) - recorder := newWriteUsageRecorder(sw, h.EventRecorder) - var requestBytes int - defer func() { - // Close around the requestBytes variable to placate the linter. - recorder.Record(ctx, requestBytes, auth.OrgID, r.URL.Path) - }() - - req, err := decodeWriteRequest(ctx, r, h.maxBatchSizeBytes) - if err != nil { - h.HandleHTTPError(ctx, err, sw) - return - } - - bucket, err := h.findBucket(ctx, auth.OrgID, req.Database, req.RetentionPolicy) - if err != nil { - h.HandleHTTPError(ctx, err, sw) - return - } - span.LogKV("bucket_id", bucket.ID) - - if err := checkBucketWritePermissions(auth, bucket.OrgID, bucket.ID); err != nil { - h.HandleHTTPError(ctx, err, sw) - return - } - - parsed, err := points.NewParser(req.Precision).Parse(ctx, auth.OrgID, bucket.ID, req.Body) - if err != nil { - h.HandleHTTPError(ctx, err, sw) - return - } - - if err := h.PointsWriter.WritePoints(ctx, auth.OrgID, bucket.ID, parsed.Points); err != nil { - if partialErr, ok := err.(tsdb.PartialWriteError); ok { - h.HandleHTTPError(ctx, &errors.Error{ - Code: errors.EUnprocessableEntity, - Op: opWriteHandler, - Msg: "failure writing points to database", - Err: partialErr, - }, sw) - return - } - - h.HandleHTTPError(ctx, &errors.Error{ - Code: errors.EInternal, - Op: opWriteHandler, - Msg: "unexpected error writing points to database", - Err: err, - }, sw) - return - } - - w.WriteHeader(http.StatusNoContent) -} - -// findBucket finds a bucket for the specified database and -// retention policy combination. -func (h *WriteHandler) findBucket(ctx context.Context, orgID platform.ID, db, rp string) (*influxdb.Bucket, error) { - mapping, err := h.findMapping(ctx, orgID, db, rp) - if err != nil { - return nil, err - } - - return h.BucketService.FindBucketByID(ctx, mapping.BucketID) -} - -// checkBucketWritePermissions checks an Authorizer for write permissions to a -// specific Bucket. -func checkBucketWritePermissions(auth influxdb.Authorizer, orgID, bucketID platform.ID) error { - p, err := influxdb.NewPermissionAtID(bucketID, influxdb.WriteAction, influxdb.BucketsResourceType, orgID) - if err != nil { - return &errors.Error{ - Code: errors.EInternal, - Op: opWriteHandler, - Msg: fmt.Sprintf("unable to create permission for bucket: %v", err), - Err: err, - } - } - if pset, err := auth.PermissionSet(); err != nil || !pset.Allowed(*p) { - return &errors.Error{ - Code: errors.EForbidden, - Op: opWriteHandler, - Msg: "insufficient permissions for write", - Err: err, - } - } - return nil -} - -// findMapping finds a DBRPMapping for the database and retention policy -// combination. -func (h *WriteHandler) findMapping(ctx context.Context, orgID platform.ID, db, rp string) (*influxdb.DBRPMapping, error) { - filter := influxdb.DBRPMappingFilter{ - OrgID: &orgID, - Database: &db, - } - if rp != "" { - filter.RetentionPolicy = &rp - } else { - b := true // Can't get a direct pointer to `true`... - filter.Default = &b - } - - mappings, count, err := h.DBRPMappingService.FindMany(ctx, filter) - if err != nil { - return nil, err - } - if count == 0 { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: "no dbrp mapping found", - } - } - return mappings[0], nil -} - -// writeRequest is a transport-agnostic write request. It holds all inputs for -// processing a v1 write request. -type writeRequest struct { - OrganizationName string - Database string - RetentionPolicy string - Precision string - Body io.ReadCloser -} - -// decodeWriteRequest extracts write request information from an inbound -// http.Request and returns a writeRequest. -func decodeWriteRequest(_ context.Context, r *http.Request, maxBatchSizeBytes int64) (*writeRequest, error) { - qp := r.URL.Query() - precision := qp.Get("precision") - if precision == "" { - precision = "ns" - } - db := qp.Get("db") - if db == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "missing db", - } - } - - encoding := r.Header.Get("Content-Encoding") - body, err := points.BatchReadCloser(r.Body, encoding, maxBatchSizeBytes) - if err != nil { - return nil, err - } - - return &writeRequest{ - OrganizationName: qp.Get("org"), - Database: db, - RetentionPolicy: qp.Get("rp"), - Precision: precision, - Body: body, - }, nil -} diff --git a/http/legacy/write_handler_test.go b/http/legacy/write_handler_test.go deleted file mode 100644 index f985b732270..00000000000 --- a/http/legacy/write_handler_test.go +++ /dev/null @@ -1,502 +0,0 @@ -package legacy - -import ( - "context" - "fmt" - "io" - "net/http" - "net/http/httptest" - "reflect" - "strings" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2" - pcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/dbrp" - "github.com/influxdata/influxdb/v2/http/mocks" - "github.com/influxdata/influxdb/v2/kit/platform" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zaptest" -) - -var generator = snowflake.NewDefaultIDGenerator() - -func TestWriteHandler_BucketAndMappingExistsDefaultRP(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - var ( - // Mocked Services - eventRecorder = mocks.NewMockEventRecorder(ctrl) - dbrpMappingSvc = mocks.NewMockDBRPMappingService(ctrl) - bucketService = mocks.NewMockBucketService(ctrl) - pointsWriter = mocks.NewMockPointsWriter(ctrl) - - // Found Resources - orgID = generator.ID() - bucket = &influxdb.Bucket{ - ID: generator.ID(), - OrgID: orgID, - Name: "mydb/autogen", - RetentionPolicyName: "autogen", - RetentionPeriod: 72 * time.Hour, - } - mapping = &influxdb.DBRPMapping{ - OrganizationID: orgID, - BucketID: bucket.ID, - Database: "mydb", - RetentionPolicy: "autogen", - Default: true, - } - - lineProtocolBody = "m,t1=v1 f1=2 100" - ) - - findAutogenMapping := dbrpMappingSvc. - EXPECT(). - FindMany(gomock.Any(), influxdb.DBRPMappingFilter{ - OrgID: &mapping.OrganizationID, - Database: &mapping.Database, - Default: &mapping.Default, - }).Return([]*influxdb.DBRPMapping{mapping}, 1, nil) - - findBucketByID := bucketService. - EXPECT(). - FindBucketByID(gomock.Any(), bucket.ID).Return(bucket, nil) - - points := parseLineProtocol(t, lineProtocolBody) - writePoints := pointsWriter. - EXPECT(). - WritePoints(gomock.Any(), orgID, bucket.ID, pointsMatcher{points}).Return(nil) - - recordWriteEvent := eventRecorder.EXPECT(). - Record(gomock.Any(), gomock.Any()) - - gomock.InOrder( - findAutogenMapping, - findBucketByID, - writePoints, - recordWriteEvent, - ) - - perms := newPermissions(influxdb.WriteAction, influxdb.BucketsResourceType, &orgID, nil) - auth := newAuthorization(orgID, perms...) - ctx := pcontext.SetAuthorizer(context.Background(), auth) - r := newWriteRequest(ctx, lineProtocolBody) - params := r.URL.Query() - params.Set("db", "mydb") - params.Set("rp", "") - r.URL.RawQuery = params.Encode() - - handler := NewWriterHandler(&PointsWriterBackend{ - HTTPErrorHandler: kithttp.NewErrorHandler(zaptest.NewLogger(t)), - Logger: zaptest.NewLogger(t), - BucketService: bucketService, - DBRPMappingService: dbrp.NewAuthorizedService(dbrpMappingSvc), - PointsWriter: pointsWriter, - EventRecorder: eventRecorder, - }) - w := httptest.NewRecorder() - handler.ServeHTTP(w, r) - assert.Equal(t, http.StatusNoContent, w.Code) - assert.Equal(t, "", w.Body.String()) -} - -func TestWriteHandler_BucketAndMappingExistsSpecificRP(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - var ( - // Mocked Services - eventRecorder = mocks.NewMockEventRecorder(ctrl) - dbrpMappingSvc = mocks.NewMockDBRPMappingService(ctrl) - bucketService = mocks.NewMockBucketService(ctrl) - pointsWriter = mocks.NewMockPointsWriter(ctrl) - - // Found Resources - orgID = generator.ID() - bucket = &influxdb.Bucket{ - ID: generator.ID(), - OrgID: orgID, - Name: "mydb/autogen", - RetentionPolicyName: "autogen", - RetentionPeriod: 72 * time.Hour, - } - mapping = &influxdb.DBRPMapping{ - OrganizationID: orgID, - BucketID: bucket.ID, - Database: "mydb", - RetentionPolicy: "autogen", - Default: true, - } - - lineProtocolBody = "m,t1=v1 f1=2 100" - ) - - findAutogenMapping := dbrpMappingSvc. - EXPECT(). - FindMany(gomock.Any(), influxdb.DBRPMappingFilter{ - OrgID: &mapping.OrganizationID, - Database: &mapping.Database, - RetentionPolicy: &mapping.RetentionPolicy, - }).Return([]*influxdb.DBRPMapping{mapping}, 1, nil) - - findBucketByID := bucketService. - EXPECT(). - FindBucketByID(gomock.Any(), bucket.ID).Return(bucket, nil) - - points := parseLineProtocol(t, lineProtocolBody) - writePoints := pointsWriter. - EXPECT(). - WritePoints(gomock.Any(), orgID, bucket.ID, pointsMatcher{points}).Return(nil) - - recordWriteEvent := eventRecorder.EXPECT(). - Record(gomock.Any(), gomock.Any()) - - gomock.InOrder( - findAutogenMapping, - findBucketByID, - writePoints, - recordWriteEvent, - ) - - perms := newPermissions(influxdb.WriteAction, influxdb.BucketsResourceType, &orgID, nil) - auth := newAuthorization(orgID, perms...) - ctx := pcontext.SetAuthorizer(context.Background(), auth) - r := newWriteRequest(ctx, lineProtocolBody) - params := r.URL.Query() - params.Set("db", "mydb") - params.Set("rp", "autogen") - r.URL.RawQuery = params.Encode() - - handler := NewWriterHandler(&PointsWriterBackend{ - HTTPErrorHandler: kithttp.NewErrorHandler(zaptest.NewLogger(t)), - Logger: zaptest.NewLogger(t), - BucketService: bucketService, - DBRPMappingService: dbrp.NewAuthorizedService(dbrpMappingSvc), - PointsWriter: pointsWriter, - EventRecorder: eventRecorder, - }) - w := httptest.NewRecorder() - handler.ServeHTTP(w, r) - assert.Equal(t, http.StatusNoContent, w.Code) - assert.Equal(t, "", w.Body.String()) -} - -func TestWriteHandler_PartialWrite(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - var ( - // Mocked Services - eventRecorder = mocks.NewMockEventRecorder(ctrl) - dbrpMappingSvc = mocks.NewMockDBRPMappingService(ctrl) - bucketService = mocks.NewMockBucketService(ctrl) - pointsWriter = mocks.NewMockPointsWriter(ctrl) - - // Found Resources - orgID = generator.ID() - bucket = &influxdb.Bucket{ - ID: generator.ID(), - OrgID: orgID, - Name: "mydb/autogen", - RetentionPolicyName: "autogen", - RetentionPeriod: 72 * time.Hour, - } - mapping = &influxdb.DBRPMapping{ - OrganizationID: orgID, - BucketID: bucket.ID, - Database: "mydb", - RetentionPolicy: "autogen", - Default: true, - } - - lineProtocolBody = "m,t1=v1 f1=2 100" - ) - - findAutogenMapping := dbrpMappingSvc. - EXPECT(). - FindMany(gomock.Any(), influxdb.DBRPMappingFilter{ - OrgID: &mapping.OrganizationID, - Database: &mapping.Database, - RetentionPolicy: &mapping.RetentionPolicy, - }).Return([]*influxdb.DBRPMapping{mapping}, 1, nil) - - findBucketByID := bucketService. - EXPECT(). - FindBucketByID(gomock.Any(), bucket.ID).Return(bucket, nil) - - points := parseLineProtocol(t, lineProtocolBody) - writePoints := pointsWriter. - EXPECT(). - WritePoints(gomock.Any(), orgID, bucket.ID, pointsMatcher{points}). - Return(tsdb.PartialWriteError{Reason: "bad points", Dropped: 1}) - - recordWriteEvent := eventRecorder.EXPECT(). - Record(gomock.Any(), gomock.Any()) - - gomock.InOrder( - findAutogenMapping, - findBucketByID, - writePoints, - recordWriteEvent, - ) - - perms := newPermissions(influxdb.WriteAction, influxdb.BucketsResourceType, &orgID, nil) - auth := newAuthorization(orgID, perms...) - ctx := pcontext.SetAuthorizer(context.Background(), auth) - r := newWriteRequest(ctx, lineProtocolBody) - params := r.URL.Query() - params.Set("db", "mydb") - params.Set("rp", "autogen") - r.URL.RawQuery = params.Encode() - - handler := NewWriterHandler(&PointsWriterBackend{ - HTTPErrorHandler: kithttp.NewErrorHandler(zaptest.NewLogger(t)), - Logger: zaptest.NewLogger(t), - BucketService: bucketService, - DBRPMappingService: dbrp.NewAuthorizedService(dbrpMappingSvc), - PointsWriter: pointsWriter, - EventRecorder: eventRecorder, - }) - w := httptest.NewRecorder() - handler.ServeHTTP(w, r) - assert.Equal(t, http.StatusUnprocessableEntity, w.Code) - assert.Equal(t, `{"code":"unprocessable entity","message":"failure writing points to database: partial write: bad points dropped=1"}`, w.Body.String()) -} - -func TestWriteHandler_BucketAndMappingExistsNoPermissions(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - var ( - // Mocked Services - eventRecorder = mocks.NewMockEventRecorder(ctrl) - dbrpMappingSvc = mocks.NewMockDBRPMappingService(ctrl) - bucketService = mocks.NewMockBucketService(ctrl) - pointsWriter = mocks.NewMockPointsWriter(ctrl) - - // Found Resources - orgID = generator.ID() - bucket = &influxdb.Bucket{ - ID: generator.ID(), - OrgID: orgID, - Name: "mydb/autogen", - RetentionPolicyName: "autogen", - RetentionPeriod: 72 * time.Hour, - } - mapping = &influxdb.DBRPMapping{ - OrganizationID: orgID, - BucketID: bucket.ID, - Database: "mydb", - RetentionPolicy: "autogen", - Default: true, - } - - lineProtocolBody = "m,t1=v1 f1=2 100" - ) - - findAutogenMapping := dbrpMappingSvc. - EXPECT(). - FindMany(gomock.Any(), influxdb.DBRPMappingFilter{ - OrgID: &mapping.OrganizationID, - Database: &mapping.Database, - Default: &mapping.Default, - }).Return([]*influxdb.DBRPMapping{mapping}, 1, nil) - - findBucketByID := bucketService. - EXPECT(). - FindBucketByID(gomock.Any(), bucket.ID).Return(bucket, nil) - - recordWriteEvent := eventRecorder.EXPECT(). - Record(gomock.Any(), gomock.Any()) - - gomock.InOrder( - findAutogenMapping, - findBucketByID, - recordWriteEvent, - ) - - perms := newPermissions(influxdb.ReadAction, influxdb.BucketsResourceType, &orgID, nil) - auth := newAuthorization(orgID, perms...) - ctx := pcontext.SetAuthorizer(context.Background(), auth) - r := newWriteRequest(ctx, lineProtocolBody) - params := r.URL.Query() - params.Set("db", "mydb") - params.Set("rp", "") - r.URL.RawQuery = params.Encode() - - handler := NewWriterHandler(&PointsWriterBackend{ - HTTPErrorHandler: kithttp.NewErrorHandler(zaptest.NewLogger(t)), - Logger: zaptest.NewLogger(t), - BucketService: bucketService, - DBRPMappingService: dbrp.NewAuthorizedService(dbrpMappingSvc), - PointsWriter: pointsWriter, - EventRecorder: eventRecorder, - }) - w := httptest.NewRecorder() - handler.ServeHTTP(w, r) - assert.Equal(t, http.StatusForbidden, w.Code) - assert.Equal(t, "{\"code\":\"forbidden\",\"message\":\"insufficient permissions for write\"}", w.Body.String()) -} - -func TestWriteHandler_MappingNotExists(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - var ( - // Mocked Services - eventRecorder = mocks.NewMockEventRecorder(ctrl) - dbrpMappingSvc = mocks.NewMockDBRPMappingService(ctrl) - bucketService = mocks.NewMockBucketService(ctrl) - pointsWriter = mocks.NewMockPointsWriter(ctrl) - - // Found Resources - orgID = generator.ID() - bucket = &influxdb.Bucket{ - ID: generator.ID(), - OrgID: orgID, - Name: "mydb/autogen", - RetentionPolicyName: "autogen", - RetentionPeriod: 72 * time.Hour, - } - mapping = &influxdb.DBRPMapping{ - OrganizationID: orgID, - BucketID: bucket.ID, - Database: "mydb", - RetentionPolicy: "autogen", - } - - lineProtocolBody = "m,t1=v1 f1=2 100" - badRp = "foo" - ) - - findAutogenMapping := dbrpMappingSvc. - EXPECT(). - FindMany(gomock.Any(), influxdb.DBRPMappingFilter{ - OrgID: &mapping.OrganizationID, - Database: &mapping.Database, - RetentionPolicy: &badRp, - }).Return(nil, 0, dbrp.ErrDBRPNotFound) - - recordWriteEvent := eventRecorder.EXPECT(). - Record(gomock.Any(), gomock.Any()) - - gomock.InOrder( - findAutogenMapping, - recordWriteEvent, - ) - - perms := newPermissions(influxdb.WriteAction, influxdb.BucketsResourceType, &orgID, nil) - auth := newAuthorization(orgID, perms...) - ctx := pcontext.SetAuthorizer(context.Background(), auth) - r := newWriteRequest(ctx, lineProtocolBody) - params := r.URL.Query() - params.Set("db", "mydb") - params.Set("rp", badRp) - r.URL.RawQuery = params.Encode() - - handler := NewWriterHandler(&PointsWriterBackend{ - HTTPErrorHandler: kithttp.NewErrorHandler(zaptest.NewLogger(t)), - Logger: zaptest.NewLogger(t), - BucketService: bucketService, - DBRPMappingService: dbrp.NewAuthorizedService(dbrpMappingSvc), - PointsWriter: pointsWriter, - EventRecorder: eventRecorder, - }) - w := httptest.NewRecorder() - handler.ServeHTTP(w, r) - assert.Equal(t, http.StatusNotFound, w.Code) - assert.Equal(t, `{"code":"not found","message":"unable to find DBRP"}`, w.Body.String()) -} - -func parseLineProtocol(t *testing.T, line string) []models.Point { - t.Helper() - points, err := models.ParsePoints([]byte(line)) - if err != nil { - t.Error(err) - } - return points -} - -type pointsMatcher struct { - points []models.Point -} - -func (m pointsMatcher) Matches(x interface{}) bool { - other, ok := x.([]models.Point) - if !ok { - return false - } - - if len(m.points) != len(other) { - return false - } - - for i := 0; i < len(m.points)-1; i++ { - p := m.points[i] - op := other[i] - - if !reflect.DeepEqual(p.Name(), op.Name()) { - return false - } - - if !reflect.DeepEqual(p.Tags(), op.Tags()) { - return false - } - - fields, err := p.Fields() - if err != nil { - return false - } - ofields, err := op.Fields() - if err != nil { - return false - } - if !reflect.DeepEqual(fields, ofields) { - return false - } - } - - return true -} - -func (m pointsMatcher) String() string { - return fmt.Sprintf("%#v", m.points) -} - -func newPermissions(action influxdb.Action, resourceType influxdb.ResourceType, orgID, id *platform.ID) []influxdb.Permission { - return []influxdb.Permission{ - { - Action: action, - Resource: influxdb.Resource{ - Type: resourceType, - OrgID: orgID, - ID: id, - }, - }, - } -} - -func newAuthorization(orgID platform.ID, permissions ...influxdb.Permission) *influxdb.Authorization { - return &influxdb.Authorization{ - ID: generator.ID(), - Status: influxdb.Active, - OrgID: orgID, - Permissions: permissions, - } -} - -func newWriteRequest(ctx context.Context, body string) *http.Request { - var r io.Reader - if body != "" { - r = strings.NewReader(body) - } - return httptest.NewRequest(http.MethodPost, "http://localhost:9999/write", r).WithContext(ctx) -} diff --git a/http/legacy/write_usage_recorder.go b/http/legacy/write_usage_recorder.go deleted file mode 100644 index 5b37f6cff14..00000000000 --- a/http/legacy/write_usage_recorder.go +++ /dev/null @@ -1,31 +0,0 @@ -package legacy - -import ( - "context" - - "github.com/influxdata/influxdb/v2/http/metric" - "github.com/influxdata/influxdb/v2/kit/platform" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" -) - -func newWriteUsageRecorder(w *kithttp.StatusResponseWriter, recorder metric.EventRecorder) *writeUsageRecorder { - return &writeUsageRecorder{ - Writer: w, - EventRecorder: recorder, - } -} - -type writeUsageRecorder struct { - Writer *kithttp.StatusResponseWriter - EventRecorder metric.EventRecorder -} - -func (w *writeUsageRecorder) Record(ctx context.Context, requestBytes int, orgID platform.ID, endpoint string) { - w.EventRecorder.Record(ctx, metric.Event{ - OrgID: orgID, - Endpoint: endpoint, - RequestBytes: requestBytes, - ResponseBytes: w.Writer.ResponseBytes(), - Status: w.Writer.Code(), - }) -} diff --git a/http/metric/recorder.go b/http/metric/recorder.go deleted file mode 100644 index 1e6ab2f8236..00000000000 --- a/http/metric/recorder.go +++ /dev/null @@ -1,27 +0,0 @@ -package metric - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// EventRecorder records meta-data associated with http requests. -type EventRecorder interface { - Record(ctx context.Context, e Event) -} - -// Event represents the meta data associated with an API request. -type Event struct { - OrgID platform.ID - Endpoint string - RequestBytes int - ResponseBytes int - Status int -} - -// NopEventRecorder never records events. -type NopEventRecorder struct{} - -// Record never records events. -func (n *NopEventRecorder) Record(ctx context.Context, e Event) {} diff --git a/http/middleware.go b/http/middleware.go deleted file mode 100644 index fe87060c34f..00000000000 --- a/http/middleware.go +++ /dev/null @@ -1,168 +0,0 @@ -package http - -import ( - "bytes" - "errors" - "io" - "net/http" - "path" - "strings" - "time" - - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "go.uber.org/zap" -) - -// LoggingMW middleware for logging inflight http requests. -func LoggingMW(log *zap.Logger) kithttp.Middleware { - return func(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - srw := kithttp.NewStatusResponseWriter(w) - - var buf bytes.Buffer - r.Body = &bodyEchoer{ - rc: r.Body, - teedR: io.TeeReader(r.Body, &buf), - } - - defer func(start time.Time) { - errField := zap.Skip() - if errStr := w.Header().Get(kithttp.PlatformErrorCodeHeader); errStr != "" { - errField = zap.Error(errors.New(errStr)) - } - - errReferenceField := zap.Skip() - if errReference := w.Header().Get(kithttp.PlatformErrorCodeHeader); errReference != "" { - errReferenceField = zap.String("error_code", errReference) - } - - fields := []zap.Field{ - zap.String("method", r.Method), - zap.String("host", r.Host), - zap.String("path", r.URL.Path), - zap.String("query", r.URL.Query().Encode()), - zap.String("proto", r.Proto), - zap.Int("status_code", srw.Code()), - zap.Int("response_size", srw.ResponseBytes()), - zap.Int64("content_length", r.ContentLength), - zap.String("referrer", r.Referer()), - zap.String("remote", r.RemoteAddr), - zap.String("user_agent", kithttp.UserAgent(r)), - zap.Duration("took", time.Since(start)), - errField, - errReferenceField, - } - - invalidMethodFn, ok := mapURLPath(r.URL.Path) - if !ok || !invalidMethodFn(r.Method) { - fields = append(fields, zap.ByteString("body", buf.Bytes())) - } - - log.Debug("Request", fields...) - }(time.Now()) - - next.ServeHTTP(srw, r) - } - return http.HandlerFunc(fn) - } -} - -type isValidMethodFn func(method string) bool - -func mapURLPath(rawPath string) (isValidMethodFn, bool) { - if fn, ok := blacklistEndpoints[rawPath]; ok { - return fn, true - } - - shiftPath := func(p string) (head, tail string) { - p = path.Clean("/" + p) - i := strings.Index(p[1:], "/") + 1 - if i <= 0 { - return p[1:], "/" - } - return p[1:i], p[i:] - } - - // ugh, should probably make this whole operation use a trie - partsMatch := func(raw, source string) bool { - return raw == source || (strings.HasPrefix(source, ":") && raw != "") - } - - compareRawSourceURLs := func(raw, source string) bool { - sourceHead, sourceTail := shiftPath(source) - for rawHead, rawTail := shiftPath(rawPath); rawHead != ""; { - if !partsMatch(rawHead, sourceHead) { - return false - } - rawHead, rawTail = shiftPath(rawTail) - sourceHead, sourceTail = shiftPath(sourceTail) - } - return sourceHead == "" - } - - for sourcePath, fn := range blacklistEndpoints { - match := compareRawSourceURLs(rawPath, sourcePath) - if match { - return fn, true - } - } - - return nil, false -} - -func ignoreMethod(ignoredMethods ...string) isValidMethodFn { - if len(ignoredMethods) == 0 { - return func(string) bool { return true } - } - - ignoreMap := make(map[string]bool) - for _, method := range ignoredMethods { - ignoreMap[method] = true - } - - return func(method string) bool { - return ignoreMap[method] - } -} - -const ( - prefixSetup = "/api/v2/setup" - organizationsIDSecretsPath = "/api/v2/orgs/:id/secrets" - organizationsIDSecretsDeletePath = "/api/v2/orgs/:id/secrets/delete" -) - -// TODO(@jsteenb2): make this a stronger type that handlers can register routes that should not be logged. -var blacklistEndpoints = map[string]isValidMethodFn{ - "/api/v2/signin": ignoreMethod(), - "/api/v2/signout": ignoreMethod(), - "/api/v2/me": ignoreMethod(), - "/api/v2/me/password": ignoreMethod(), - "/api/v2/users/:id/password": ignoreMethod(), - "/api/v2/packages/apply": ignoreMethod(), - prefixWrite: ignoreMethod("POST"), - "/write": ignoreMethod("POST"), - organizationsIDSecretsPath: ignoreMethod("PATCH"), - organizationsIDSecretsDeletePath: ignoreMethod("POST"), - prefixSetup: ignoreMethod("POST"), - prefixNotificationEndpoints: ignoreMethod("POST"), - notificationEndpointsIDPath: ignoreMethod("PUT"), - restoreKVPath: ignoreMethod(), - restoreSqlPath: ignoreMethod(), - restoreBucketPath: ignoreMethod(), - restoreShardPath: ignoreMethod(), - "/api/v2/remotes": ignoreMethod("POST"), - "/api/v2/remotes/:id": ignoreMethod("PATCH"), -} - -type bodyEchoer struct { - rc io.ReadCloser - teedR io.Reader -} - -func (b *bodyEchoer) Read(p []byte) (int, error) { - return b.teedR.Read(p) -} - -func (b *bodyEchoer) Close() error { - return b.rc.Close() -} diff --git a/http/middleware_test.go b/http/middleware_test.go deleted file mode 100644 index 4c59377088b..00000000000 --- a/http/middleware_test.go +++ /dev/null @@ -1,270 +0,0 @@ -package http - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "net/url" - "strconv" - "strings" - "testing" - - "github.com/influxdata/influxdb/v2/logger" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -func TestLoggingMW(t *testing.T) { - newDebugLogger := func(t *testing.T) (*zap.Logger, *bytes.Buffer) { - t.Helper() - - var buf bytes.Buffer - log, err := (&logger.Config{ - Format: "auto", - Level: zapcore.DebugLevel, - }).New(&buf) - if err != nil { - t.Fatal(err) - } - - return log, &buf - } - - urlWithQueries := func(path string, queryPairs ...string) url.URL { - u := url.URL{ - Path: path, - } - params := u.Query() - for i := 0; i < len(queryPairs)/2; i++ { - k, v := queryPairs[i*2], queryPairs[i*2+1] - params.Add(k, v) - } - return u - } - - encodeBody := func(t *testing.T, k, v string) *bytes.Buffer { - t.Helper() - - m := map[string]string{k: v} - - var buf bytes.Buffer - err := json.NewEncoder(&buf).Encode(m) - if err != nil { - t.Fatal(err) - } - - return &buf - } - - getKVPair := func(s string) (string, string) { - k, v, _ := strings.Cut(s, "=") - if v != "" { - v = strings.TrimSuffix(v, "\n") - } - return k, v - } - - echoHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var m map[string]string - err := json.NewDecoder(r.Body).Decode(&m) - if err != nil { - w.WriteHeader(422) - return - } - defer r.Body.Close() - - // set a non 200 status code here - w.WriteHeader(202) - - _, err = w.Write([]byte("ack")) - if err != nil { - w.WriteHeader(500) - return - } - }) - - teeReader := func(r *bytes.Buffer, w io.Writer) io.Reader { - if r == nil { - return nil - } - return io.TeeReader(r, w) - } - - type testRun struct { - name string - method string - path string - queryPairs []string - hasBody bool - hideBody bool - } - - testEndpoint := func(tt testRun) func(t *testing.T) { - fn := func(t *testing.T) { - t.Helper() - - log, buf := newDebugLogger(t) - - reqURL := urlWithQueries(tt.path, tt.queryPairs...) - var body *bytes.Buffer - if tt.hasBody { - body = encodeBody(t, "bin", "shake") - } - - var trackerBuf bytes.Buffer - req := httptest.NewRequest(tt.method, reqURL.String(), teeReader(body, &trackerBuf)) - rec := httptest.NewRecorder() - - LoggingMW(log)(echoHandler).ServeHTTP(rec, req) - - expected := map[string]string{ - "method": tt.method, - "host": "example.com", - "path": reqURL.Path, - "query": reqURL.RawQuery, - "proto": "HTTP/1.1", - "status_code": strconv.Itoa(rec.Code), - "response_size": strconv.Itoa(rec.Body.Len()), - "content_length": strconv.FormatInt(req.ContentLength, 10), - } - if tt.hasBody { - expected["body"] = fmt.Sprintf("%q", trackerBuf.String()) - } - - // skip first 4 pairs, is the base logger stuff - for _, pair := range strings.Split(buf.String(), " ")[4:] { - k, v := getKVPair(pair) - switch k { - case "took", "remote": - if v == "" { - t.Errorf("unexpected value(%q) for key(%q): expected=non empty string", v, k) - } - case "body": - if tt.hideBody && v != "" { - t.Errorf("expected body to be \"\" but got=%q", v) - continue - } - fallthrough - case "user_agent": - default: - if expectedV := expected[k]; expectedV != v { - t.Errorf("unexpected value(%q) for key(%q): expected=%q", v, k, expectedV) - } - } - } - } - - return fn - } - - t.Run("logs the http request", func(t *testing.T) { - tests := []testRun{ - { - name: "GET", - method: "GET", - path: "/foo", - queryPairs: []string{"dodgers", "are", "the", "terrible"}, - }, - { - name: "POST", - method: "POST", - path: "/foo", - queryPairs: []string{"bin", "shake"}, - hasBody: true, - }, - { - name: "PUT", - method: "PUT", - path: "/foo", - queryPairs: []string{"ninja", "turtles"}, - hasBody: true, - }, - { - name: "PATCH", - method: "PATCH", - path: "/foo", - queryPairs: []string{"peach", "daisy", "mario", "luigi"}, - hasBody: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, testEndpoint(tt)) - } - }) - - t.Run("does not log body for blacklisted routes", func(t *testing.T) { - tests := []testRun{ - { - name: "signin", - method: "POSTT", - path: "/api/v2/signin", - }, - { - name: "signout", - method: "POST", - path: "/api/v2/signout", - }, - { - name: "me path", - method: "POST", - path: "/api/v2/me", - }, - { - name: "me password path", - method: "POST", - path: "/api/v2/me/password", - }, - { - name: "user password path", - method: "POST", - path: "/api/v2/users/user-id/password", - }, - { - name: "write path", - method: "POST", - path: "/api/v2/write", - }, - { - name: "legacy write path", - method: "POST", - path: "/write", - }, - { - name: "orgs id secrets path", - method: "PATCH", - path: "/api/v2/orgs/org-id/secrets", - }, - { - name: "orgs id secrets delete path", - method: "POST", - path: "/api/v2/orgs/org-id/secrets/delete", - }, - { - name: "setup path", - method: "POST", - path: "/api/v2/setup", - }, - { - name: "notifications endpoints path", - method: "POST", - path: "/api/v2/notificationEndpoints", - }, - { - name: "notifications endpoints id path", - method: "PUT", - path: "/api/v2/notificationEndpoints/notification-id", - }, - } - - for _, tt := range tests { - tt.hasBody = true - tt.hideBody = true - t.Run(tt.name, testEndpoint(tt)) - } - }) - -} diff --git a/http/mock/middleware.go b/http/mock/middleware.go deleted file mode 100644 index 769f95c2cc8..00000000000 --- a/http/mock/middleware.go +++ /dev/null @@ -1,27 +0,0 @@ -package mock - -import ( - "net/http" - - "github.com/influxdata/influxdb/v2" - platcontext "github.com/influxdata/influxdb/v2/context" -) - -// NewAuthMiddlewareHandler create a mocked middleware handler. -func NewAuthMiddlewareHandler(handler http.Handler, auth influxdb.Authorizer) http.Handler { - return &authMiddlewareHandler{ - handler: handler, - auth: auth, - } -} - -type authMiddlewareHandler struct { - handler http.Handler - auth influxdb.Authorizer -} - -func (m *authMiddlewareHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - r = r.WithContext(platcontext.SetAuthorizer(ctx, m.auth)) - m.handler.ServeHTTP(w, r) -} diff --git a/http/mocks/bucket_service.go b/http/mocks/bucket_service.go deleted file mode 100644 index 7fe87bf5efc..00000000000 --- a/http/mocks/bucket_service.go +++ /dev/null @@ -1,146 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2 (interfaces: BucketService) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - influxdb "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockBucketService is a mock of BucketService interface -type MockBucketService struct { - ctrl *gomock.Controller - recorder *MockBucketServiceMockRecorder -} - -// MockBucketServiceMockRecorder is the mock recorder for MockBucketService -type MockBucketServiceMockRecorder struct { - mock *MockBucketService -} - -// NewMockBucketService creates a new mock instance -func NewMockBucketService(ctrl *gomock.Controller) *MockBucketService { - mock := &MockBucketService{ctrl: ctrl} - mock.recorder = &MockBucketServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockBucketService) EXPECT() *MockBucketServiceMockRecorder { - return m.recorder -} - -// CreateBucket mocks base method -func (m *MockBucketService) CreateBucket(arg0 context.Context, arg1 *influxdb.Bucket) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateBucket", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// CreateBucket indicates an expected call of CreateBucket -func (mr *MockBucketServiceMockRecorder) CreateBucket(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBucket", reflect.TypeOf((*MockBucketService)(nil).CreateBucket), arg0, arg1) -} - -// DeleteBucket mocks base method -func (m *MockBucketService) DeleteBucket(arg0 context.Context, arg1 platform.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucket", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteBucket indicates an expected call of DeleteBucket -func (mr *MockBucketServiceMockRecorder) DeleteBucket(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucket", reflect.TypeOf((*MockBucketService)(nil).DeleteBucket), arg0, arg1) -} - -// FindBucket mocks base method -func (m *MockBucketService) FindBucket(arg0 context.Context, arg1 influxdb.BucketFilter) (*influxdb.Bucket, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FindBucket", arg0, arg1) - ret0, _ := ret[0].(*influxdb.Bucket) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FindBucket indicates an expected call of FindBucket -func (mr *MockBucketServiceMockRecorder) FindBucket(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBucket", reflect.TypeOf((*MockBucketService)(nil).FindBucket), arg0, arg1) -} - -// FindBucketByID mocks base method -func (m *MockBucketService) FindBucketByID(arg0 context.Context, arg1 platform.ID) (*influxdb.Bucket, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FindBucketByID", arg0, arg1) - ret0, _ := ret[0].(*influxdb.Bucket) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FindBucketByID indicates an expected call of FindBucketByID -func (mr *MockBucketServiceMockRecorder) FindBucketByID(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBucketByID", reflect.TypeOf((*MockBucketService)(nil).FindBucketByID), arg0, arg1) -} - -// FindBucketByName mocks base method -func (m *MockBucketService) FindBucketByName(arg0 context.Context, arg1 platform.ID, arg2 string) (*influxdb.Bucket, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FindBucketByName", arg0, arg1, arg2) - ret0, _ := ret[0].(*influxdb.Bucket) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FindBucketByName indicates an expected call of FindBucketByName -func (mr *MockBucketServiceMockRecorder) FindBucketByName(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBucketByName", reflect.TypeOf((*MockBucketService)(nil).FindBucketByName), arg0, arg1, arg2) -} - -// FindBuckets mocks base method -func (m *MockBucketService) FindBuckets(arg0 context.Context, arg1 influxdb.BucketFilter, arg2 ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "FindBuckets", varargs...) - ret0, _ := ret[0].([]*influxdb.Bucket) - ret1, _ := ret[1].(int) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// FindBuckets indicates an expected call of FindBuckets -func (mr *MockBucketServiceMockRecorder) FindBuckets(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBuckets", reflect.TypeOf((*MockBucketService)(nil).FindBuckets), varargs...) -} - -// UpdateBucket mocks base method -func (m *MockBucketService) UpdateBucket(arg0 context.Context, arg1 platform.ID, arg2 influxdb.BucketUpdate) (*influxdb.Bucket, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateBucket", arg0, arg1, arg2) - ret0, _ := ret[0].(*influxdb.Bucket) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateBucket indicates an expected call of UpdateBucket -func (mr *MockBucketServiceMockRecorder) UpdateBucket(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateBucket", reflect.TypeOf((*MockBucketService)(nil).UpdateBucket), arg0, arg1, arg2) -} diff --git a/http/mocks/dbrp_mapping_service.go b/http/mocks/dbrp_mapping_service.go deleted file mode 100644 index 6f529e58e80..00000000000 --- a/http/mocks/dbrp_mapping_service.go +++ /dev/null @@ -1,115 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2 (interfaces: DBRPMappingService) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - influxdb "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockDBRPMappingService is a mock of DBRPMappingService interface -type MockDBRPMappingService struct { - ctrl *gomock.Controller - recorder *MockDBRPMappingServiceMockRecorder -} - -// MockDBRPMappingServiceMockRecorder is the mock recorder for MockDBRPMappingService -type MockDBRPMappingServiceMockRecorder struct { - mock *MockDBRPMappingService -} - -// NewMockDBRPMappingService creates a new mock instance -func NewMockDBRPMappingService(ctrl *gomock.Controller) *MockDBRPMappingService { - mock := &MockDBRPMappingService{ctrl: ctrl} - mock.recorder = &MockDBRPMappingServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockDBRPMappingService) EXPECT() *MockDBRPMappingServiceMockRecorder { - return m.recorder -} - -// Create mocks base method -func (m *MockDBRPMappingService) Create(arg0 context.Context, arg1 *influxdb.DBRPMapping) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Create", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Create indicates an expected call of Create -func (mr *MockDBRPMappingServiceMockRecorder) Create(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockDBRPMappingService)(nil).Create), arg0, arg1) -} - -// Delete mocks base method -func (m *MockDBRPMappingService) Delete(arg0 context.Context, arg1, arg2 platform.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// Delete indicates an expected call of Delete -func (mr *MockDBRPMappingServiceMockRecorder) Delete(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDBRPMappingService)(nil).Delete), arg0, arg1, arg2) -} - -// FindByID mocks base method -func (m *MockDBRPMappingService) FindByID(arg0 context.Context, arg1, arg2 platform.ID) (*influxdb.DBRPMapping, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FindByID", arg0, arg1, arg2) - ret0, _ := ret[0].(*influxdb.DBRPMapping) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FindByID indicates an expected call of FindByID -func (mr *MockDBRPMappingServiceMockRecorder) FindByID(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindByID", reflect.TypeOf((*MockDBRPMappingService)(nil).FindByID), arg0, arg1, arg2) -} - -// FindMany mocks base method -func (m *MockDBRPMappingService) FindMany(arg0 context.Context, arg1 influxdb.DBRPMappingFilter, arg2 ...influxdb.FindOptions) ([]*influxdb.DBRPMapping, int, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "FindMany", varargs...) - ret0, _ := ret[0].([]*influxdb.DBRPMapping) - ret1, _ := ret[1].(int) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// FindMany indicates an expected call of FindMany -func (mr *MockDBRPMappingServiceMockRecorder) FindMany(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindMany", reflect.TypeOf((*MockDBRPMappingService)(nil).FindMany), varargs...) -} - -// Update mocks base method -func (m *MockDBRPMappingService) Update(arg0 context.Context, arg1 *influxdb.DBRPMapping) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Update", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Update indicates an expected call of Update -func (mr *MockDBRPMappingServiceMockRecorder) Update(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockDBRPMappingService)(nil).Update), arg0, arg1) -} diff --git a/http/mocks/event_recorder.go b/http/mocks/event_recorder.go deleted file mode 100644 index 726c2f82d77..00000000000 --- a/http/mocks/event_recorder.go +++ /dev/null @@ -1,48 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/http/metric (interfaces: EventRecorder) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - metric "github.com/influxdata/influxdb/v2/http/metric" -) - -// MockEventRecorder is a mock of EventRecorder interface -type MockEventRecorder struct { - ctrl *gomock.Controller - recorder *MockEventRecorderMockRecorder -} - -// MockEventRecorderMockRecorder is the mock recorder for MockEventRecorder -type MockEventRecorderMockRecorder struct { - mock *MockEventRecorder -} - -// NewMockEventRecorder creates a new mock instance -func NewMockEventRecorder(ctrl *gomock.Controller) *MockEventRecorder { - mock := &MockEventRecorder{ctrl: ctrl} - mock.recorder = &MockEventRecorderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockEventRecorder) EXPECT() *MockEventRecorderMockRecorder { - return m.recorder -} - -// Record mocks base method -func (m *MockEventRecorder) Record(arg0 context.Context, arg1 metric.Event) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Record", arg0, arg1) -} - -// Record indicates an expected call of Record -func (mr *MockEventRecorderMockRecorder) Record(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Record", reflect.TypeOf((*MockEventRecorder)(nil).Record), arg0, arg1) -} diff --git a/http/mocks/organization_service.go b/http/mocks/organization_service.go deleted file mode 100644 index f2028604f14..00000000000 --- a/http/mocks/organization_service.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2 (interfaces: OrganizationService) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - influxdb "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockOrganizationService is a mock of OrganizationService interface -type MockOrganizationService struct { - ctrl *gomock.Controller - recorder *MockOrganizationServiceMockRecorder -} - -// MockOrganizationServiceMockRecorder is the mock recorder for MockOrganizationService -type MockOrganizationServiceMockRecorder struct { - mock *MockOrganizationService -} - -// NewMockOrganizationService creates a new mock instance -func NewMockOrganizationService(ctrl *gomock.Controller) *MockOrganizationService { - mock := &MockOrganizationService{ctrl: ctrl} - mock.recorder = &MockOrganizationServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockOrganizationService) EXPECT() *MockOrganizationServiceMockRecorder { - return m.recorder -} - -// CreateOrganization mocks base method -func (m *MockOrganizationService) CreateOrganization(arg0 context.Context, arg1 *influxdb.Organization) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateOrganization", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// CreateOrganization indicates an expected call of CreateOrganization -func (mr *MockOrganizationServiceMockRecorder) CreateOrganization(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrganization", reflect.TypeOf((*MockOrganizationService)(nil).CreateOrganization), arg0, arg1) -} - -// DeleteOrganization mocks base method -func (m *MockOrganizationService) DeleteOrganization(arg0 context.Context, arg1 platform.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteOrganization", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteOrganization indicates an expected call of DeleteOrganization -func (mr *MockOrganizationServiceMockRecorder) DeleteOrganization(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOrganization", reflect.TypeOf((*MockOrganizationService)(nil).DeleteOrganization), arg0, arg1) -} - -// FindOrganization mocks base method -func (m *MockOrganizationService) FindOrganization(arg0 context.Context, arg1 influxdb.OrganizationFilter) (*influxdb.Organization, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FindOrganization", arg0, arg1) - ret0, _ := ret[0].(*influxdb.Organization) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FindOrganization indicates an expected call of FindOrganization -func (mr *MockOrganizationServiceMockRecorder) FindOrganization(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOrganization", reflect.TypeOf((*MockOrganizationService)(nil).FindOrganization), arg0, arg1) -} - -// FindOrganizationByID mocks base method -func (m *MockOrganizationService) FindOrganizationByID(arg0 context.Context, arg1 platform.ID) (*influxdb.Organization, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FindOrganizationByID", arg0, arg1) - ret0, _ := ret[0].(*influxdb.Organization) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FindOrganizationByID indicates an expected call of FindOrganizationByID -func (mr *MockOrganizationServiceMockRecorder) FindOrganizationByID(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOrganizationByID", reflect.TypeOf((*MockOrganizationService)(nil).FindOrganizationByID), arg0, arg1) -} - -// FindOrganizations mocks base method -func (m *MockOrganizationService) FindOrganizations(arg0 context.Context, arg1 influxdb.OrganizationFilter, arg2 ...influxdb.FindOptions) ([]*influxdb.Organization, int, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "FindOrganizations", varargs...) - ret0, _ := ret[0].([]*influxdb.Organization) - ret1, _ := ret[1].(int) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// FindOrganizations indicates an expected call of FindOrganizations -func (mr *MockOrganizationServiceMockRecorder) FindOrganizations(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindOrganizations", reflect.TypeOf((*MockOrganizationService)(nil).FindOrganizations), varargs...) -} - -// UpdateOrganization mocks base method -func (m *MockOrganizationService) UpdateOrganization(arg0 context.Context, arg1 platform.ID, arg2 influxdb.OrganizationUpdate) (*influxdb.Organization, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateOrganization", arg0, arg1, arg2) - ret0, _ := ret[0].(*influxdb.Organization) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateOrganization indicates an expected call of UpdateOrganization -func (mr *MockOrganizationServiceMockRecorder) UpdateOrganization(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateOrganization", reflect.TypeOf((*MockOrganizationService)(nil).UpdateOrganization), arg0, arg1, arg2) -} diff --git a/http/mocks/points_writer.go b/http/mocks/points_writer.go deleted file mode 100644 index 0d16494f07b..00000000000 --- a/http/mocks/points_writer.go +++ /dev/null @@ -1,51 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/storage (interfaces: PointsWriter) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2/kit/platform" - models "github.com/influxdata/influxdb/v2/models" -) - -// MockPointsWriter is a mock of PointsWriter interface -type MockPointsWriter struct { - ctrl *gomock.Controller - recorder *MockPointsWriterMockRecorder -} - -// MockPointsWriterMockRecorder is the mock recorder for MockPointsWriter -type MockPointsWriterMockRecorder struct { - mock *MockPointsWriter -} - -// NewMockPointsWriter creates a new mock instance -func NewMockPointsWriter(ctrl *gomock.Controller) *MockPointsWriter { - mock := &MockPointsWriter{ctrl: ctrl} - mock.recorder = &MockPointsWriterMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockPointsWriter) EXPECT() *MockPointsWriterMockRecorder { - return m.recorder -} - -// WritePoints mocks base method -func (m *MockPointsWriter) WritePoints(arg0 context.Context, arg1, arg2 platform.ID, arg3 []models.Point) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WritePoints", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(error) - return ret0 -} - -// WritePoints indicates an expected call of WritePoints -func (mr *MockPointsWriterMockRecorder) WritePoints(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WritePoints", reflect.TypeOf((*MockPointsWriter)(nil).WritePoints), arg0, arg1, arg2, arg3) -} diff --git a/http/notification_endpoint.go b/http/notification_endpoint.go deleted file mode 100644 index 64c02427a5b..00000000000 --- a/http/notification_endpoint.go +++ /dev/null @@ -1,729 +0,0 @@ -package http - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - pctx "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/influxdata/influxdb/v2/pkg/httpc" - "go.uber.org/zap" -) - -// NotificationEndpointBackend is all services and associated parameters required to construct -// the NotificationEndpointBackendHandler. -type NotificationEndpointBackend struct { - errors.HTTPErrorHandler - log *zap.Logger - - NotificationEndpointService influxdb.NotificationEndpointService - UserResourceMappingService influxdb.UserResourceMappingService - LabelService influxdb.LabelService - UserService influxdb.UserService -} - -// NewNotificationEndpointBackend returns a new instance of NotificationEndpointBackend. -func NewNotificationEndpointBackend(log *zap.Logger, b *APIBackend) *NotificationEndpointBackend { - return &NotificationEndpointBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - NotificationEndpointService: b.NotificationEndpointService, - UserResourceMappingService: b.UserResourceMappingService, - LabelService: b.LabelService, - UserService: b.UserService, - } -} - -func (b *NotificationEndpointBackend) Logger() *zap.Logger { - return b.log -} - -// NotificationEndpointHandler is the handler for the notificationEndpoint service -type NotificationEndpointHandler struct { - *httprouter.Router - errors.HTTPErrorHandler - log *zap.Logger - - NotificationEndpointService influxdb.NotificationEndpointService - UserResourceMappingService influxdb.UserResourceMappingService - LabelService influxdb.LabelService - UserService influxdb.UserService -} - -const ( - prefixNotificationEndpoints = "/api/v2/notificationEndpoints" - notificationEndpointsIDPath = "/api/v2/notificationEndpoints/:id" - notificationEndpointsIDMembersPath = "/api/v2/notificationEndpoints/:id/members" - notificationEndpointsIDMembersIDPath = "/api/v2/notificationEndpoints/:id/members/:userID" - notificationEndpointsIDOwnersPath = "/api/v2/notificationEndpoints/:id/owners" - notificationEndpointsIDOwnersIDPath = "/api/v2/notificationEndpoints/:id/owners/:userID" - notificationEndpointsIDLabelsPath = "/api/v2/notificationEndpoints/:id/labels" - notificationEndpointsIDLabelsIDPath = "/api/v2/notificationEndpoints/:id/labels/:lid" -) - -// NewNotificationEndpointHandler returns a new instance of NotificationEndpointHandler. -func NewNotificationEndpointHandler(log *zap.Logger, b *NotificationEndpointBackend) *NotificationEndpointHandler { - h := &NotificationEndpointHandler{ - Router: NewRouter(b.HTTPErrorHandler), - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - - NotificationEndpointService: b.NotificationEndpointService, - UserResourceMappingService: b.UserResourceMappingService, - LabelService: b.LabelService, - UserService: b.UserService, - } - h.HandlerFunc("POST", prefixNotificationEndpoints, h.handlePostNotificationEndpoint) - h.HandlerFunc("GET", prefixNotificationEndpoints, h.handleGetNotificationEndpoints) - h.HandlerFunc("GET", notificationEndpointsIDPath, h.handleGetNotificationEndpoint) - h.HandlerFunc("DELETE", notificationEndpointsIDPath, h.handleDeleteNotificationEndpoint) - h.HandlerFunc("PUT", notificationEndpointsIDPath, h.handlePutNotificationEndpoint) - h.HandlerFunc("PATCH", notificationEndpointsIDPath, h.handlePatchNotificationEndpoint) - - memberBackend := MemberBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log.With(zap.String("handler", "member")), - ResourceType: influxdb.NotificationEndpointResourceType, - UserType: influxdb.Member, - UserResourceMappingService: b.UserResourceMappingService, - UserService: b.UserService, - } - h.HandlerFunc("POST", notificationEndpointsIDMembersPath, newPostMemberHandler(memberBackend)) - h.HandlerFunc("GET", notificationEndpointsIDMembersPath, newGetMembersHandler(memberBackend)) - h.HandlerFunc("DELETE", notificationEndpointsIDMembersIDPath, newDeleteMemberHandler(memberBackend)) - - ownerBackend := MemberBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log.With(zap.String("handler", "member")), - ResourceType: influxdb.NotificationEndpointResourceType, - UserType: influxdb.Owner, - UserResourceMappingService: b.UserResourceMappingService, - UserService: b.UserService, - } - h.HandlerFunc("POST", notificationEndpointsIDOwnersPath, newPostMemberHandler(ownerBackend)) - h.HandlerFunc("GET", notificationEndpointsIDOwnersPath, newGetMembersHandler(ownerBackend)) - h.HandlerFunc("DELETE", notificationEndpointsIDOwnersIDPath, newDeleteMemberHandler(ownerBackend)) - - labelBackend := &LabelBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log.With(zap.String("handler", "label")), - LabelService: b.LabelService, - ResourceType: influxdb.NotificationEndpointResourceType, - } - h.HandlerFunc("GET", notificationEndpointsIDLabelsPath, newGetLabelsHandler(labelBackend)) - h.HandlerFunc("POST", notificationEndpointsIDLabelsPath, newPostLabelHandler(labelBackend)) - h.HandlerFunc("DELETE", notificationEndpointsIDLabelsIDPath, newDeleteLabelHandler(labelBackend)) - - return h -} - -type notificationEndpointLinks struct { - Self string `json:"self"` - Labels string `json:"labels"` - Members string `json:"members"` - Owners string `json:"owners"` -} - -type postNotificationEndpointRequest struct { - influxdb.NotificationEndpoint - Labels []string `json:"labels"` -} - -type notificationEndpointResponse struct { - influxdb.NotificationEndpoint - Labels []influxdb.Label `json:"labels"` - Links notificationEndpointLinks `json:"links"` -} - -func (resp notificationEndpointResponse) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(resp.NotificationEndpoint) - if err != nil { - return nil, err - } - - b2, err := json.Marshal(struct { - Labels []influxdb.Label `json:"labels"` - Links notificationEndpointLinks `json:"links"` - }{ - Links: resp.Links, - Labels: resp.Labels, - }) - if err != nil { - return nil, err - } - - return []byte(string(b1[:len(b1)-1]) + ", " + string(b2[1:])), nil -} - -type notificationEndpointsResponse struct { - NotificationEndpoints []notificationEndpointResponse `json:"notificationEndpoints"` - Links *influxdb.PagingLinks `json:"links"` -} - -func newNotificationEndpointResponse(edp influxdb.NotificationEndpoint, labels []*influxdb.Label) notificationEndpointResponse { - res := notificationEndpointResponse{ - NotificationEndpoint: edp, - Links: notificationEndpointLinks{ - Self: fmt.Sprintf("/api/v2/notificationEndpoints/%s", edp.GetID()), - Labels: fmt.Sprintf("/api/v2/notificationEndpoints/%s/labels", edp.GetID()), - Members: fmt.Sprintf("/api/v2/notificationEndpoints/%s/members", edp.GetID()), - Owners: fmt.Sprintf("/api/v2/notificationEndpoints/%s/owners", edp.GetID()), - }, - Labels: []influxdb.Label{}, - } - - for _, l := range labels { - res.Labels = append(res.Labels, *l) - } - - return res -} - -func newNotificationEndpointsResponse(ctx context.Context, edps []influxdb.NotificationEndpoint, labelService influxdb.LabelService, f influxdb.PagingFilter, opts influxdb.FindOptions) *notificationEndpointsResponse { - resp := ¬ificationEndpointsResponse{ - NotificationEndpoints: make([]notificationEndpointResponse, len(edps)), - Links: influxdb.NewPagingLinks(prefixNotificationEndpoints, opts, f, len(edps)), - } - for i, edp := range edps { - labels, _ := labelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: edp.GetID(), ResourceType: influxdb.NotificationEndpointResourceType}) - resp.NotificationEndpoints[i] = newNotificationEndpointResponse(edp, labels) - } - return resp -} - -func decodeGetNotificationEndpointRequest(ctx context.Context) (i platform.ID, err error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return i, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - if err := i.DecodeFromString(id); err != nil { - return i, err - } - return i, nil -} - -func (h *NotificationEndpointHandler) handleGetNotificationEndpoints(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - filter, opts, err := decodeNotificationEndpointFilter(ctx, r) - if err != nil { - h.log.Debug("Failed to decode request", zap.Error(err)) - h.HandleHTTPError(ctx, err, w) - return - } - edps, _, err := h.NotificationEndpointService.FindNotificationEndpoints(ctx, filter, opts) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("NotificationEndpoints retrieved", zap.String("notificationEndpoints", fmt.Sprint(edps))) - - if err := encodeResponse(ctx, w, http.StatusOK, newNotificationEndpointsResponse(ctx, edps, h.LabelService, filter, opts)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -func (h *NotificationEndpointHandler) handleGetNotificationEndpoint(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id, err := decodeGetNotificationEndpointRequest(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - edp, err := h.NotificationEndpointService.FindNotificationEndpointByID(ctx, id) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("NotificationEndpoint retrieved", zap.String("notificationEndpoint", fmt.Sprint(edp))) - - labels, err := h.LabelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: edp.GetID(), ResourceType: influxdb.NotificationEndpointResourceType}) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusOK, newNotificationEndpointResponse(edp, labels)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -func decodeNotificationEndpointFilter(ctx context.Context, r *http.Request) (influxdb.NotificationEndpointFilter, influxdb.FindOptions, error) { - f := influxdb.NotificationEndpointFilter{ - UserResourceMappingFilter: influxdb.UserResourceMappingFilter{ - ResourceType: influxdb.NotificationEndpointResourceType, - }, - } - - opts, err := influxdb.DecodeFindOptions(r) - if err != nil { - return influxdb.NotificationEndpointFilter{}, influxdb.FindOptions{}, err - } - - q := r.URL.Query() - if orgIDStr := q.Get("orgID"); orgIDStr != "" { - orgID, err := platform.IDFromString(orgIDStr) - if err != nil { - return influxdb.NotificationEndpointFilter{}, influxdb.FindOptions{}, &errors.Error{ - Code: errors.EInvalid, - Msg: "orgID is invalid", - Err: err, - } - } - f.OrgID = orgID - } else if orgNameStr := q.Get("org"); orgNameStr != "" { - *f.Org = orgNameStr - } - - if userID := q.Get("user"); userID != "" { - id, err := platform.IDFromString(userID) - if err != nil { - return influxdb.NotificationEndpointFilter{}, influxdb.FindOptions{}, err - } - f.UserID = *id - } - - return f, *opts, err -} - -func decodePostNotificationEndpointRequest(r *http.Request) (postNotificationEndpointRequest, error) { - b, err := io.ReadAll(r.Body) - if err != nil { - return postNotificationEndpointRequest{}, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - defer r.Body.Close() - edp, err := endpoint.UnmarshalJSON(b) - if err != nil { - return postNotificationEndpointRequest{}, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - var dl decodeLabels - if err := json.Unmarshal(b, &dl); err != nil { - return postNotificationEndpointRequest{}, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - return postNotificationEndpointRequest{ - NotificationEndpoint: edp, - Labels: dl.Labels, - }, nil -} - -func decodePutNotificationEndpointRequest(ctx context.Context, r *http.Request) (influxdb.NotificationEndpoint, error) { - buf := new(bytes.Buffer) - if _, err := buf.ReadFrom(r.Body); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - defer r.Body.Close() - - edp, err := endpoint.UnmarshalJSON(buf.Bytes()) - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - params := httprouter.ParamsFromContext(ctx) - i, err := platform.IDFromString(params.ByName("id")) - if err != nil { - return nil, err - } - edp.SetID(*i) - return edp, nil -} - -type patchNotificationEndpointRequest struct { - platform.ID - Update influxdb.NotificationEndpointUpdate -} - -func decodePatchNotificationEndpointRequest(ctx context.Context, r *http.Request) (patchNotificationEndpointRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id, err := platform.IDFromString(params.ByName("id")) - if err != nil { - return patchNotificationEndpointRequest{}, err - } - req := patchNotificationEndpointRequest{ - ID: *id, - } - - var upd influxdb.NotificationEndpointUpdate - if err := json.NewDecoder(r.Body).Decode(&upd); err != nil { - return patchNotificationEndpointRequest{}, &errors.Error{ - Code: errors.EInvalid, - Msg: err.Error(), - } - } - if err := upd.Valid(); err != nil { - return patchNotificationEndpointRequest{}, &errors.Error{ - Code: errors.EInvalid, - Msg: err.Error(), - } - } - - req.Update = upd - return req, nil -} - -// handlePostNotificationEndpoint is the HTTP handler for the POST /api/v2/notificationEndpoints route. -func (h *NotificationEndpointHandler) handlePostNotificationEndpoint(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - edp, err := decodePostNotificationEndpointRequest(r) - if err != nil { - h.log.Debug("Failed to decode request", zap.Error(err)) - h.HandleHTTPError(ctx, err, w) - return - } - - auth, err := pctx.GetAuthorizer(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - err = h.NotificationEndpointService.CreateNotificationEndpoint(ctx, edp.NotificationEndpoint, auth.GetUserID()) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - labels := h.mapNewNotificationEndpointLabels(ctx, edp.NotificationEndpoint, edp.Labels) - - h.log.Debug("NotificationEndpoint created", zap.String("notificationEndpoint", fmt.Sprint(edp))) - - if err := encodeResponse(ctx, w, http.StatusCreated, newNotificationEndpointResponse(edp, labels)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -func (h *NotificationEndpointHandler) mapNewNotificationEndpointLabels(ctx context.Context, nre influxdb.NotificationEndpoint, labels []string) []*influxdb.Label { - var ls []*influxdb.Label - for _, sid := range labels { - var lid platform.ID - err := lid.DecodeFromString(sid) - - if err != nil { - continue - } - - label, err := h.LabelService.FindLabelByID(ctx, lid) - if err != nil { - continue - } - - mapping := influxdb.LabelMapping{ - LabelID: label.ID, - ResourceID: nre.GetID(), - ResourceType: influxdb.NotificationEndpointResourceType, - } - - err = h.LabelService.CreateLabelMapping(ctx, &mapping) - if err != nil { - continue - } - - ls = append(ls, label) - } - return ls -} - -// handlePutNotificationEndpoint is the HTTP handler for the PUT /api/v2/notificationEndpoints route. -func (h *NotificationEndpointHandler) handlePutNotificationEndpoint(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - edp, err := decodePutNotificationEndpointRequest(ctx, r) - if err != nil { - h.log.Debug("Failed to decode request", zap.Error(err)) - h.HandleHTTPError(ctx, err, w) - return - } - auth, err := pctx.GetAuthorizer(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - edp, err = h.NotificationEndpointService.UpdateNotificationEndpoint(ctx, edp.GetID(), edp, auth.GetUserID()) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - labels, err := h.LabelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: edp.GetID(), ResourceType: influxdb.NotificationEndpointResourceType}) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("NotificationEndpoint replaced", zap.String("notificationEndpoint", fmt.Sprint(edp))) - - if err := encodeResponse(ctx, w, http.StatusOK, newNotificationEndpointResponse(edp, labels)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -// handlePatchNotificationEndpoint is the HTTP handler for the PATCH /api/v2/notificationEndpoints/:id route. -func (h *NotificationEndpointHandler) handlePatchNotificationEndpoint(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePatchNotificationEndpointRequest(ctx, r) - if err != nil { - h.log.Debug("Failed to decode request", zap.Error(err)) - h.HandleHTTPError(ctx, err, w) - return - } - - edp, err := h.NotificationEndpointService.PatchNotificationEndpoint(ctx, req.ID, req.Update) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - labels, err := h.LabelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: edp.GetID(), ResourceType: influxdb.NotificationEndpointResourceType}) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("NotificationEndpoint patch", zap.String("notificationEndpoint", fmt.Sprint(edp))) - - if err := encodeResponse(ctx, w, http.StatusOK, newNotificationEndpointResponse(edp, labels)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -func (h *NotificationEndpointHandler) handleDeleteNotificationEndpoint(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - i, err := decodeGetNotificationEndpointRequest(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - flds, _, err := h.NotificationEndpointService.DeleteNotificationEndpoint(ctx, i) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - keys := make([]string, len(flds)) - for k, fld := range flds { - if fld.Key == "" { - h.HandleHTTPError(ctx, &errors.Error{ - Op: "http/handleDeleteNotificationEndpoint", - Msg: "Bad Secret Key in endpoint " + i.String(), - }, w) - return - } - keys[k] = fld.Key - } - h.log.Debug("NotificationEndpoint deleted", zap.String("notificationEndpointID", fmt.Sprint(i))) - - w.WriteHeader(http.StatusNoContent) -} - -// NotificationEndpointService is an http client for the influxdb.NotificationEndpointService server implementation. -type NotificationEndpointService struct { - Client *httpc.Client -} - -// NewNotificationEndpointService constructs a new http NotificationEndpointService. -func NewNotificationEndpointService(client *httpc.Client) *NotificationEndpointService { - return &NotificationEndpointService{ - Client: client, - } -} - -var _ influxdb.NotificationEndpointService = (*NotificationEndpointService)(nil) - -// FindNotificationEndpointByID returns a single notification endpoint by ID. -func (s *NotificationEndpointService) FindNotificationEndpointByID(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - if !id.Valid() { - return nil, fmt.Errorf("invalid ID: please provide a valid ID") - } - var resp notificationEndpointDecoder - err := s.Client. - Get(prefixNotificationEndpoints, id.String()). - DecodeJSON(&resp). - Do(ctx) - if err != nil { - return nil, err - } - return resp.endpoint, nil -} - -// FindNotificationEndpoints returns a list of notification endpoints that match filter and the total count of matching notification endpoints. -// Additional options provide pagination & sorting. -func (s *NotificationEndpointService) FindNotificationEndpoints(ctx context.Context, filter influxdb.NotificationEndpointFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) { - params := influxdb.FindOptionParams(opt...) - if filter.ID != nil { - params = append(params, [2]string{"id", filter.ID.String()}) - } - if filter.OrgID != nil { - params = append(params, [2]string{"orgID", filter.OrgID.String()}) - } - if filter.Org != nil { - params = append(params, [2]string{"org", *filter.Org}) - } - - var resp struct { - Endpoints []notificationEndpointDecoder `json:"notificationEndpoints"` - } - err := s.Client. - Get(prefixNotificationEndpoints). - QueryParams(params...). - DecodeJSON(&resp). - Do(ctx) - if err != nil { - return nil, 0, err - } - - var endpoints []influxdb.NotificationEndpoint - for _, e := range resp.Endpoints { - endpoints = append(endpoints, e.endpoint) - } - return endpoints, len(endpoints), nil -} - -// CreateNotificationEndpoint creates a new notification endpoint and sets b.ID with the new identifier. -// TODO(@jsteenb2): this is unsatisfactory, we have no way of grabbing the new notification endpoint without -// -// serious hacky hackertoning. Put it on the list... -func (s *NotificationEndpointService) CreateNotificationEndpoint(ctx context.Context, ne influxdb.NotificationEndpoint, userID platform.ID) error { - var resp notificationEndpointDecoder - err := s.Client. - PostJSON(¬ificationEndpointEncoder{ne: ne}, prefixNotificationEndpoints). - DecodeJSON(&resp). - Do(ctx) - if err != nil { - return err - } - // :sadpanda: - ne.SetID(resp.endpoint.GetID()) - ne.SetOrgID(resp.endpoint.GetOrgID()) - return nil -} - -// UpdateNotificationEndpoint updates a single notification endpoint. -// Returns the new notification endpoint after update. -func (s *NotificationEndpointService) UpdateNotificationEndpoint(ctx context.Context, id platform.ID, ne influxdb.NotificationEndpoint, userID platform.ID) (influxdb.NotificationEndpoint, error) { - if !id.Valid() { - return nil, fmt.Errorf("invalid ID: please provide a valid ID") - } - var resp notificationEndpointDecoder - err := s.Client. - PutJSON(¬ificationEndpointEncoder{ne: ne}, prefixNotificationEndpoints, id.String()). - DecodeJSON(&resp). - Do(ctx) - if err != nil { - return nil, err - } - return resp.endpoint, nil -} - -// PatchNotificationEndpoint updates a single notification endpoint with changeset. -// Returns the new notification endpoint state after update. -func (s *NotificationEndpointService) PatchNotificationEndpoint(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpointUpdate) (influxdb.NotificationEndpoint, error) { - if !id.Valid() { - return nil, fmt.Errorf("invalid ID: please provide a valid ID") - } - if err := upd.Valid(); err != nil { - return nil, err - } - - var resp notificationEndpointDecoder - err := s.Client. - PatchJSON(upd, prefixNotificationEndpoints, id.String()). - DecodeJSON(&resp). - Do(ctx) - if err != nil { - return nil, err - } - return resp.endpoint, nil -} - -// DeleteNotificationEndpoint removes a notification endpoint by ID, returns secret fields, orgID for further deletion. -// TODO: axe this delete design, makes little sense in how its currently being done. Right now, as an http client, -// -// I am forced to know how the store handles this and then figure out what the server does in between me and that store, -// then see what falls out :flushed... for now returning nothing for secrets, orgID, and only returning an error. This makes -// the code/design smell super obvious imo -func (s *NotificationEndpointService) DeleteNotificationEndpoint(ctx context.Context, id platform.ID) ([]influxdb.SecretField, platform.ID, error) { - if !id.Valid() { - return nil, 0, fmt.Errorf("invalid ID: please provide a valid ID") - } - err := s.Client. - Delete(prefixNotificationEndpoints, id.String()). - Do(ctx) - return nil, 0, err -} - -type notificationEndpointEncoder struct { - ne influxdb.NotificationEndpoint -} - -func (n *notificationEndpointEncoder) MarshalJSON() ([]byte, error) { - b, err := json.Marshal(n.ne) - if err != nil { - return nil, err - } - - ughhh := make(map[string]interface{}) - if err := json.Unmarshal(b, &ughhh); err != nil { - return nil, err - } - n.ne.BackfillSecretKeys() - - // this makes me queezy and altogether sad - fieldMap := map[string]string{ - "-password": "password", - "-routing-key": "routingKey", - "-token": "token", - "-username": "username", - } - for _, sec := range n.ne.SecretFields() { - var v string - if sec.Value != nil { - v = *sec.Value - } - ughhh[fieldMap[sec.Key]] = v - } - return json.Marshal(ughhh) -} - -type notificationEndpointDecoder struct { - endpoint influxdb.NotificationEndpoint -} - -func (n *notificationEndpointDecoder) UnmarshalJSON(b []byte) error { - newEndpoint, err := endpoint.UnmarshalJSON(b) - if err != nil { - return err - } - n.endpoint = newEndpoint - return nil -} diff --git a/http/notification_endpoint_test.go b/http/notification_endpoint_test.go deleted file mode 100644 index 63bfe125cd5..00000000000 --- a/http/notification_endpoint_test.go +++ /dev/null @@ -1,1156 +0,0 @@ -package http - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "path" - "testing" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - pcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/influxdata/influxdb/v2/notification/endpoint/service" - endpointTesting "github.com/influxdata/influxdb/v2/notification/endpoint/service/testing" - "github.com/influxdata/influxdb/v2/pkg/testttp" - "github.com/influxdata/influxdb/v2/secret" - "github.com/influxdata/influxdb/v2/tenant" - influxTesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -// NewMockNotificationEndpointBackend returns a NotificationEndpointBackend with mock services. -func NewMockNotificationEndpointBackend(t *testing.T) *NotificationEndpointBackend { - return &NotificationEndpointBackend{ - log: zaptest.NewLogger(t), - HTTPErrorHandler: kithttp.NewErrorHandler(zaptest.NewLogger(t)), - NotificationEndpointService: &mock.NotificationEndpointService{}, - UserResourceMappingService: mock.NewUserResourceMappingService(), - LabelService: mock.NewLabelService(), - UserService: mock.NewUserService(), - } -} - -func TestService_handleGetNotificationEndpoints(t *testing.T) { - type fields struct { - NotificationEndpointService influxdb.NotificationEndpointService - LabelService influxdb.LabelService - } - type args struct { - queryParams map[string][]string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get all notification endpoints", - fields: fields{ - &mock.NotificationEndpointService{ - FindNotificationEndpointsF: func(ctx context.Context, filter influxdb.NotificationEndpointFilter, opts ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) { - return []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: influxTesting.MustIDBase16Ptr("0b501e7e557ab1ed"), - Name: "hello", - OrgID: influxTesting.MustIDBase16Ptr("50f7ba1150f7ba11"), - Status: influxdb.Active, - }, - URL: "http://example.com", - }, - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: influxTesting.MustIDBase16Ptr("c0175f0077a77005"), - Name: "example", - OrgID: influxTesting.MustIDBase16Ptr("7e55e118dbabb1ed"), - Status: influxdb.Inactive, - }, - URL: "example.com", - Username: influxdb.SecretField{Key: "http-user-key"}, - Password: influxdb.SecretField{Key: "http-password-key"}, - AuthMethod: "basic", - Method: "POST", - ContentTemplate: "template", - Headers: map[string]string{ - "x-header-1": "header 1", - "x-header-2": "header 2", - }, - }, - }, 2, nil - }, - }, - &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - labels := []*influxdb.Label{ - { - ID: influxTesting.MustIDBase16("fc3dc670a4be9b9a"), - Name: "label", - Properties: map[string]string{ - "color": "fff000", - }, - }, - } - return labels, nil - }, - }, - }, - args: args{ - map[string][]string{ - "limit": {"1"}, - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` - { - "links": { - "self": "/api/v2/notificationEndpoints?descending=false&limit=1&offset=0", - "next": "/api/v2/notificationEndpoints?descending=false&limit=1&offset=1" - }, - "notificationEndpoints": [ - { - "createdAt": "0001-01-01T00:00:00Z", - "id": "0b501e7e557ab1ed", - "labels": [ - { - "id": "fc3dc670a4be9b9a", - "name": "label", - "properties": { - "color": "fff000" - } - } - ], - "links": { - "labels": "/api/v2/notificationEndpoints/0b501e7e557ab1ed/labels", - "members": "/api/v2/notificationEndpoints/0b501e7e557ab1ed/members", - "owners": "/api/v2/notificationEndpoints/0b501e7e557ab1ed/owners", - "self": "/api/v2/notificationEndpoints/0b501e7e557ab1ed" - }, - "name": "hello", - "orgID": "50f7ba1150f7ba11", - "status": "active", - "type": "slack", - "token": "", - "updatedAt": "0001-01-01T00:00:00Z", - "url": "http://example.com" - }, - { - "createdAt": "0001-01-01T00:00:00Z", - "url": "example.com", - "id": "c0175f0077a77005", - "labels": [ - { - "id": "fc3dc670a4be9b9a", - "name": "label", - "properties": { - "color": "fff000" - } - } - ], - "links": { - "labels": "/api/v2/notificationEndpoints/c0175f0077a77005/labels", - "members": "/api/v2/notificationEndpoints/c0175f0077a77005/members", - "owners": "/api/v2/notificationEndpoints/c0175f0077a77005/owners", - "self": "/api/v2/notificationEndpoints/c0175f0077a77005" - }, - "name": "example", - "orgID": "7e55e118dbabb1ed", - "authMethod": "basic", - "contentTemplate": "template", - "password": "secret: http-password-key", - "token":"", - "method": "POST", - "status": "inactive", - "type": "http", - "headers": { - "x-header-1": "header 1", - "x-header-2": "header 2" - }, - "updatedAt": "0001-01-01T00:00:00Z", - "username": "secret: http-user-key" - } - ] - }`, - }, - }, - { - name: "get all notification endpoints when there are none", - fields: fields{ - &mock.NotificationEndpointService{ - FindNotificationEndpointsF: func(ctx context.Context, filter influxdb.NotificationEndpointFilter, opts ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) { - return []influxdb.NotificationEndpoint{}, 0, nil - }, - }, - &mock.LabelService{}, - }, - args: args{ - map[string][]string{ - "limit": {"1"}, - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/notificationEndpoints?descending=false&limit=1&offset=0" - }, - "notificationEndpoints": [] -}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - notificationEndpointBackend := NewMockNotificationEndpointBackend(t) - notificationEndpointBackend.NotificationEndpointService = tt.fields.NotificationEndpointService - notificationEndpointBackend.LabelService = tt.fields.LabelService - h := NewNotificationEndpointHandler(zaptest.NewLogger(t), notificationEndpointBackend) - - r := httptest.NewRequest("GET", "http://any.url", nil) - - qp := r.URL.Query() - for k, vs := range tt.args.queryParams { - for _, v := range vs { - qp.Add(k, v) - } - } - r.URL.RawQuery = qp.Encode() - r = r.WithContext(pcontext.SetAuthorizer(r.Context(), &influxdb.Session{UserID: user1ID})) - - w := httptest.NewRecorder() - - h.handleGetNotificationEndpoints(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetNotificationEndpoints() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetNotificationEndpoints() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil || tt.wants.body != "" && !eq { - t.Errorf("%q. handleGetNotificationEndpoints() = ***%v***", tt.name, diff) - } - }) - } -} - -func TestService_handleGetNotificationEndpoint(t *testing.T) { - type fields struct { - NotificationEndpointService influxdb.NotificationEndpointService - } - type args struct { - id string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get a notification endpoint by id", - fields: fields{ - &mock.NotificationEndpointService{ - FindNotificationEndpointByIDF: func(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - if id == influxTesting.MustIDBase16("020f755c3c082000") { - return &endpoint.HTTP{ - Base: endpoint.Base{ - ID: influxTesting.MustIDBase16Ptr("020f755c3c082000"), - OrgID: influxTesting.MustIDBase16Ptr("020f755c3c082000"), - Name: "hello", - Status: influxdb.Active, - }, - URL: "example.com", - Username: influxdb.SecretField{Key: "http-user-key"}, - Password: influxdb.SecretField{Key: "http-password-key"}, - AuthMethod: "basic", - Method: "POST", - ContentTemplate: "template", - }, nil - } - return nil, fmt.Errorf("not found") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` - { - "links": { - "self": "/api/v2/notificationEndpoints/020f755c3c082000", - "labels": "/api/v2/notificationEndpoints/020f755c3c082000/labels", - "members": "/api/v2/notificationEndpoints/020f755c3c082000/members", - "owners": "/api/v2/notificationEndpoints/020f755c3c082000/owners" - }, - "labels": [], - "authMethod": "basic", - "method": "POST", - "contentTemplate": "template", - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z", - "id": "020f755c3c082000", - "url": "example.com", - "username": "secret: http-user-key", - "password": "secret: http-password-key", - "token":"", - "status": "active", - "type": "http", - "orgID": "020f755c3c082000", - "name": "hello" - } - `, - }, - }, - { - name: "not found", - fields: fields{ - &mock.NotificationEndpointService{ - FindNotificationEndpointByIDF: func(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: "notification endpoint not found", - } - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - notificationEndpointBackend := NewMockNotificationEndpointBackend(t) - notificationEndpointBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - notificationEndpointBackend.NotificationEndpointService = tt.fields.NotificationEndpointService - h := NewNotificationEndpointHandler(zaptest.NewLogger(t), notificationEndpointBackend) - - r := httptest.NewRequest("GET", "http://any.url", nil) - - r = r.WithContext(context.WithValue( - context.Background(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - })) - - w := httptest.NewRecorder() - - h.handleGetNotificationEndpoint(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - t.Logf(res.Header.Get("X-Influx-Error")) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetNotificationEndpoint() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetNotificationEndpoint() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleGetNotificationEndpoint(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handleGetNotificationEndpoint() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestService_handlePostNotificationEndpoint(t *testing.T) { - type fields struct { - Secrets map[string]string - SecretService influxdb.SecretService - NotificationEndpointService influxdb.NotificationEndpointService - OrganizationService influxdb.OrganizationService - } - type args struct { - endpoint interface{} - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "create a new notification endpoint", - fields: fields{ - Secrets: map[string]string{}, - NotificationEndpointService: &mock.NotificationEndpointService{ - CreateNotificationEndpointF: func(ctx context.Context, edp influxdb.NotificationEndpoint, userID platform.ID) error { - edp.SetID(influxTesting.MustIDBase16("020f755c3c082000")) - edp.BackfillSecretKeys() - return nil - }, - }, - OrganizationService: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, f influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return &influxdb.Organization{ID: influxTesting.MustIDBase16("6f626f7274697320")}, nil - }, - }, - }, - args: args{ - endpoint: map[string]interface{}{ - "name": "hello", - "type": "http", - "orgID": "6f626f7274697320", - "description": "desc1", - "status": "active", - "url": "example.com", - "username": "user1", - "password": "password1", - "authMethod": "basic", - "method": "POST", - "contentTemplate": "template", - }, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/notificationEndpoints/020f755c3c082000", - "labels": "/api/v2/notificationEndpoints/020f755c3c082000/labels", - "members": "/api/v2/notificationEndpoints/020f755c3c082000/members", - "owners": "/api/v2/notificationEndpoints/020f755c3c082000/owners" - }, - "url": "example.com", - "status": "active", - "username": "secret: 020f755c3c082000-username", - "password": "secret: 020f755c3c082000-password", - "token":"", - "authMethod": "basic", - "contentTemplate": "template", - "type": "http", - "method": "POST", - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z", - "id": "020f755c3c082000", - "orgID": "6f626f7274697320", - "name": "hello", - "description": "desc1", - "labels": [] -} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - notificationEndpointBackend := NewMockNotificationEndpointBackend(t) - notificationEndpointBackend.NotificationEndpointService = tt.fields.NotificationEndpointService - - testttp. - PostJSON(t, prefixNotificationEndpoints, tt.args.endpoint). - WrapCtx(authCtxFn(user1ID)). - Do(NewNotificationEndpointHandler(zaptest.NewLogger(t), notificationEndpointBackend)). - ExpectHeader("Content-Type", tt.wants.contentType). - ExpectStatus(tt.wants.statusCode). - ExpectBody(func(body *bytes.Buffer) { - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(body.String(), tt.wants.body); err != nil { - t.Errorf("%q, handlePostNotificationEndpoint(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handlePostNotificationEndpoint() = ***%s***", tt.name, diff) - } - } - }) - - }) - } -} - -func TestService_handleDeleteNotificationEndpoint(t *testing.T) { - type fields struct { - NotificationEndpointService influxdb.NotificationEndpointService - } - type args struct { - id string - } - type wants struct { - statusCode int - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "remove a notification endpoint by id", - fields: fields{ - NotificationEndpointService: &mock.NotificationEndpointService{ - DeleteNotificationEndpointF: func(ctx context.Context, id platform.ID) ([]influxdb.SecretField, platform.ID, error) { - if id == influxTesting.MustIDBase16("020f755c3c082000") { - return []influxdb.SecretField{ - {Key: "k1"}, - }, influxTesting.MustIDBase16("020f755c3c082001"), nil - } - - return nil, 0, fmt.Errorf("wrong id") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNoContent, - }, - }, - { - name: "notification endpoint not found", - fields: fields{ - NotificationEndpointService: &mock.NotificationEndpointService{ - DeleteNotificationEndpointF: func(ctx context.Context, id platform.ID) ([]influxdb.SecretField, platform.ID, error) { - return nil, 0, &errors.Error{ - Code: errors.ENotFound, - Msg: "notification endpoint not found", - } - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - notificationEndpointBackend := NewMockNotificationEndpointBackend(t) - notificationEndpointBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - notificationEndpointBackend.NotificationEndpointService = tt.fields.NotificationEndpointService - - testttp. - Delete(t, path.Join(prefixNotificationEndpoints, tt.args.id)). - Do(NewNotificationEndpointHandler(zaptest.NewLogger(t), notificationEndpointBackend)). - ExpectStatus(tt.wants.statusCode). - ExpectBody(func(buf *bytes.Buffer) { - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(buf.String(), tt.wants.body); err != nil { - t.Errorf("%q, handleDeleteNotificationEndpoint(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handleDeleteNotificationEndpoint() = ***%s***", tt.name, diff) - } - } - }) - }) - } -} - -func TestService_handlePatchNotificationEndpoint(t *testing.T) { - type fields struct { - NotificationEndpointService influxdb.NotificationEndpointService - } - type args struct { - id string - name string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "update a notification endpoint name", - fields: fields{ - &mock.NotificationEndpointService{ - PatchNotificationEndpointF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpointUpdate) (influxdb.NotificationEndpoint, error) { - if id == influxTesting.MustIDBase16("020f755c3c082000") { - d := &endpoint.Slack{ - Base: endpoint.Base{ - ID: influxTesting.MustIDBase16Ptr("020f755c3c082000"), - Name: "hello", - OrgID: influxTesting.MustIDBase16Ptr("020f755c3c082000"), - Status: influxdb.Active, - }, - URL: "http://example.com", - } - - if upd.Name != nil { - d.Name = *upd.Name - } - - return d, nil - } - - return nil, fmt.Errorf("not found") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - name: "example", - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` - { - "links": { - "self": "/api/v2/notificationEndpoints/020f755c3c082000", - "labels": "/api/v2/notificationEndpoints/020f755c3c082000/labels", - "members": "/api/v2/notificationEndpoints/020f755c3c082000/members", - "owners": "/api/v2/notificationEndpoints/020f755c3c082000/owners" - }, - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z", - "id": "020f755c3c082000", - "orgID": "020f755c3c082000", - "url": "http://example.com", - "name": "example", - "status": "active", - "type": "slack", - "token": "", - "labels": [] - } - `, - }, - }, - { - name: "notification endpoint not found", - fields: fields{ - &mock.NotificationEndpointService{ - PatchNotificationEndpointF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpointUpdate) (influxdb.NotificationEndpoint, error) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: "notification endpoint not found", - } - }, - }, - }, - args: args{ - id: "020f755c3c082000", - name: "hello", - }, - wants: wants{ - statusCode: http.StatusNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - notificationEndpointBackend := NewMockNotificationEndpointBackend(t) - notificationEndpointBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - notificationEndpointBackend.NotificationEndpointService = tt.fields.NotificationEndpointService - h := NewNotificationEndpointHandler(zaptest.NewLogger(t), notificationEndpointBackend) - - upd := influxdb.NotificationEndpointUpdate{} - if tt.args.name != "" { - upd.Name = &tt.args.name - } - - b, err := json.Marshal(upd) - if err != nil { - t.Fatalf("failed to unmarshal notification endpoint update: %v", err) - } - - r := httptest.NewRequest("GET", "http://any.url", bytes.NewReader(b)) - r = r.WithContext(pcontext.SetAuthorizer(r.Context(), &influxdb.Session{UserID: user1ID})) - - r = r.WithContext(context.WithValue( - context.Background(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - })) - - w := httptest.NewRecorder() - - h.handlePatchNotificationEndpoint(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePatchNotificationEndpoint() = %v, want %v %v", tt.name, res.StatusCode, tt.wants.statusCode, w.Header()) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePatchNotificationEndpoint() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handlePatchNotificationEndpoint(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handlePatchNotificationEndpoint() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestService_handleUpdateNotificationEndpoint(t *testing.T) { - type fields struct { - NotificationEndpointService influxdb.NotificationEndpointService - } - type args struct { - id string - edp map[string]interface{} - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "update a notification endpoint name", - fields: fields{ - NotificationEndpointService: &mock.NotificationEndpointService{ - UpdateNotificationEndpointF: func(ctx context.Context, id platform.ID, edp influxdb.NotificationEndpoint, userID platform.ID) (influxdb.NotificationEndpoint, error) { - if id == influxTesting.MustIDBase16("020f755c3c082000") { - edp.SetID(id) - edp.BackfillSecretKeys() - return edp, nil - } - - return nil, fmt.Errorf("not found") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - edp: map[string]interface{}{ - "name": "example", - "status": "active", - "orgID": "020f755c3c082001", - "type": "slack", - "token": "", - "url": "example.com", - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` - { - "links": { - "self": "/api/v2/notificationEndpoints/020f755c3c082000", - "labels": "/api/v2/notificationEndpoints/020f755c3c082000/labels", - "members": "/api/v2/notificationEndpoints/020f755c3c082000/members", - "owners": "/api/v2/notificationEndpoints/020f755c3c082000/owners" - }, - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z", - "id": "020f755c3c082000", - "orgID": "020f755c3c082001", - "name": "example", - "url": "example.com", - "type": "slack", - "status": "active", - "token": "", - "labels": [] - } - `, - }, - }, - { - name: "notification endpoint not found", - fields: fields{ - NotificationEndpointService: &mock.NotificationEndpointService{ - UpdateNotificationEndpointF: func(ctx context.Context, id platform.ID, edp influxdb.NotificationEndpoint, userID platform.ID) (influxdb.NotificationEndpoint, error) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: "notification endpoint not found", - } - }, - }, - }, - args: args{ - id: "020f755c3c082000", - edp: map[string]interface{}{ - "type": "slack", - "name": "example", - }, - }, - wants: wants{ - statusCode: http.StatusNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - notificationEndpointBackend := NewMockNotificationEndpointBackend(t) - notificationEndpointBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - notificationEndpointBackend.NotificationEndpointService = tt.fields.NotificationEndpointService - - resp := testttp. - PutJSON(t, path.Join(prefixNotificationEndpoints, tt.args.id), tt.args.edp). - WrapCtx(authCtxFn(user1ID)). - Do(NewNotificationEndpointHandler(zaptest.NewLogger(t), notificationEndpointBackend)). - ExpectStatus(tt.wants.statusCode). - ExpectBody(func(body *bytes.Buffer) { - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(body.String(), tt.wants.body); err != nil { - t.Errorf("%q, handlePutNotificationEndpoint(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handlePutNotificationEndpoint() = ***%s***", tt.name, diff) - } - } - }) - if tt.wants.contentType != "" { - resp.ExpectHeader("Content-Type", tt.wants.contentType) - } - }) - } -} - -func TestService_handlePostNotificationEndpointMember(t *testing.T) { - type fields struct { - UserService influxdb.UserService - } - type args struct { - notificationEndpointID string - user *influxdb.User - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "add a notification endpoint member", - fields: fields{ - UserService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ - ID: id, - Name: "name", - Status: influxdb.Active, - }, nil - }, - }, - }, - args: args{ - notificationEndpointID: "020f755c3c082000", - user: &influxdb.User{ - ID: influxTesting.MustIDBase16("6f626f7274697320"), - }, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "logs": "/api/v2/users/6f626f7274697320/logs", - "self": "/api/v2/users/6f626f7274697320" - }, - "role": "member", - "id": "6f626f7274697320", - "name": "name", - "status": "active" -} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - notificationEndpointBackend := NewMockNotificationEndpointBackend(t) - notificationEndpointBackend.UserService = tt.fields.UserService - h := NewNotificationEndpointHandler(zaptest.NewLogger(t), notificationEndpointBackend) - - b, err := json.Marshal(tt.args.user) - if err != nil { - t.Fatalf("failed to marshal user: %v", err) - } - - path := fmt.Sprintf("/api/v2/notificationEndpoints/%s/members", tt.args.notificationEndpointID) - r := httptest.NewRequest("POST", path, bytes.NewReader(b)) - r = r.WithContext(pcontext.SetAuthorizer(r.Context(), &influxdb.Session{UserID: user1ID})) - w := httptest.NewRecorder() - - h.ServeHTTP(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePostNotificationEndpointMember() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePostNotificationEndpointMember() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handlePostNotificationEndpointMember(). error unmarshalling json %v", tt.name, err) - } else if tt.wants.body != "" && !eq { - t.Errorf("%q. handlePostNotificationEndpointMember() = ***%s***", tt.name, diff) - } - }) - } -} - -func TestService_handlePostNotificationEndpointOwner(t *testing.T) { - type fields struct { - UserService influxdb.UserService - } - type args struct { - notificationEndpointID string - user *influxdb.User - } - type wants struct { - statusCode int - contentType string - body string - } - - cases := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "add a notification endpoint owner", - fields: fields{ - UserService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ - ID: id, - Name: "name", - Status: influxdb.Active, - }, nil - }, - }, - }, - args: args{ - notificationEndpointID: "020f755c3c082000", - user: &influxdb.User{ - ID: influxTesting.MustIDBase16("6f626f7274697320"), - }, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "logs": "/api/v2/users/6f626f7274697320/logs", - "self": "/api/v2/users/6f626f7274697320" - }, - "role": "owner", - "id": "6f626f7274697320", - "name": "name", - "status": "active" -} -`, - }, - }, - } - - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - notificationEndpointBackend := NewMockNotificationEndpointBackend(t) - notificationEndpointBackend.UserService = tt.fields.UserService - h := NewNotificationEndpointHandler(zaptest.NewLogger(t), notificationEndpointBackend) - - b, err := json.Marshal(tt.args.user) - if err != nil { - t.Fatalf("failed to marshal user: %v", err) - } - - path := fmt.Sprintf("/api/v2/notificationEndpoints/%s/owners", tt.args.notificationEndpointID) - r := httptest.NewRequest("POST", path, bytes.NewReader(b)) - w := httptest.NewRecorder() - - h.ServeHTTP(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePostNotificationEndpointOwner() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePostNotificationEndpointOwner() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handlePostNotificationEndpointOwner(). error unmarshalling json %v", tt.name, err) - } else if tt.wants.body != "" && !eq { - t.Errorf("%q. handlePostNotificationEndpointOwner() = ***%s***", tt.name, diff) - } - }) - } -} - -func initNotificationEndpointService(f endpointTesting.NotificationEndpointFields, t *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()) { - ctx := context.Background() - store := influxTesting.NewTestInmemStore(t) - logger := zaptest.NewLogger(t) - - tenantStore := tenant.NewStore(store) - tenantService := tenant.NewService(tenantStore) - - kvSvc := kv.NewService(logger, store, tenantService) - kvSvc.IDGenerator = f.IDGenerator - kvSvc.TimeGenerator = f.TimeGenerator - - secretStore, err := secret.NewStore(store) - require.NoError(t, err) - secretSvc := secret.NewService(secretStore) - - endpointStore := service.NewStore(store) - endpointStore.IDGenerator = f.IDGenerator - endpointStore.TimeGenerator = f.TimeGenerator - endpointService := service.New(endpointStore, secretSvc) - - for _, o := range f.Orgs { - withOrgID(tenantStore, o.ID, func() { - if err := tenantService.CreateOrganization(ctx, o); err != nil { - t.Fatalf("failed to populate org: %v", err) - } - }) - } - - for _, v := range f.NotificationEndpoints { - if err := endpointStore.PutNotificationEndpoint(ctx, v); err != nil { - t.Fatalf("failed to update endpoint: %v", err) - } - } - - fakeBackend := NewMockNotificationEndpointBackend(t) - fakeBackend.NotificationEndpointService = endpointService - fakeBackend.UserResourceMappingService = tenantService - fakeBackend.UserService = tenantService - - handler := NewNotificationEndpointHandler(zaptest.NewLogger(t), fakeBackend) - auth := func(next http.Handler) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - r = r.WithContext(pcontext.SetAuthorizer(r.Context(), &influxdb.Session{UserID: user1ID})) - next.ServeHTTP(w, r) - } - } - server := httptest.NewServer(auth(handler)) - done := server.Close - - client := mustNewHTTPClient(t, server.URL, "") - - return NewNotificationEndpointService(client), secretSvc, done -} - -func TestNotificationEndpointService(t *testing.T) { - t.Skip("wonky") - - tests := []struct { - name string - testFn func(init func(endpointTesting.NotificationEndpointFields, *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()), t *testing.T) - }{ - { - name: "CreateNotificationEndpoint", - testFn: endpointTesting.CreateNotificationEndpoint, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.testFn(initNotificationEndpointService, t) - }) - } -} - -func authCtxFn(userID platform.ID) func(context.Context) context.Context { - return func(ctx context.Context) context.Context { - return pcontext.SetAuthorizer(ctx, &influxdb.Session{UserID: userID}) - } -} - -func withOrgID(store *tenant.Store, orgID platform.ID, fn func()) { - backup := store.OrgIDGen - defer func() { store.OrgIDGen = backup }() - - store.OrgIDGen = mock.NewStaticIDGenerator(orgID) - - fn() -} diff --git a/http/notification_rule.go b/http/notification_rule.go deleted file mode 100644 index 67a8c3426cc..00000000000 --- a/http/notification_rule.go +++ /dev/null @@ -1,889 +0,0 @@ -package http - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "net/http" - "path" - "time" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - pctx "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/notification/rule" - "github.com/influxdata/influxdb/v2/pkg/httpc" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "go.uber.org/zap" -) - -var _ influxdb.NotificationRuleStore = (*NotificationRuleService)(nil) - -type statusDecode struct { - Status *influxdb.Status `json:"status"` -} - -// NotificationRuleBackend is all services and associated parameters required to construct -// the NotificationRuleBackendHandler. -type NotificationRuleBackend struct { - errors.HTTPErrorHandler - log *zap.Logger - - AlgoWProxy FeatureProxyHandler - NotificationRuleStore influxdb.NotificationRuleStore - NotificationEndpointService influxdb.NotificationEndpointService - UserResourceMappingService influxdb.UserResourceMappingService - LabelService influxdb.LabelService - UserService influxdb.UserService - OrganizationService influxdb.OrganizationService - TaskService taskmodel.TaskService -} - -// NewNotificationRuleBackend returns a new instance of NotificationRuleBackend. -func NewNotificationRuleBackend(log *zap.Logger, b *APIBackend) *NotificationRuleBackend { - return &NotificationRuleBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - AlgoWProxy: b.AlgoWProxy, - - NotificationRuleStore: b.NotificationRuleStore, - NotificationEndpointService: b.NotificationEndpointService, - UserResourceMappingService: b.UserResourceMappingService, - LabelService: b.LabelService, - UserService: b.UserService, - OrganizationService: b.OrganizationService, - TaskService: b.TaskService, - } -} - -// NotificationRuleHandler is the handler for the notification rule service -type NotificationRuleHandler struct { - *httprouter.Router - errors.HTTPErrorHandler - log *zap.Logger - - NotificationRuleStore influxdb.NotificationRuleStore - NotificationEndpointService influxdb.NotificationEndpointService - UserResourceMappingService influxdb.UserResourceMappingService - LabelService influxdb.LabelService - UserService influxdb.UserService - OrganizationService influxdb.OrganizationService - TaskService taskmodel.TaskService -} - -const ( - prefixNotificationRules = "/api/v2/notificationRules" - notificationRulesIDPath = "/api/v2/notificationRules/:id" - notificationRulesIDQueryPath = "/api/v2/notificationRules/:id/query" - notificationRulesIDMembersPath = "/api/v2/notificationRules/:id/members" - notificationRulesIDMembersIDPath = "/api/v2/notificationRules/:id/members/:userID" - notificationRulesIDOwnersPath = "/api/v2/notificationRules/:id/owners" - notificationRulesIDOwnersIDPath = "/api/v2/notificationRules/:id/owners/:userID" - notificationRulesIDLabelsPath = "/api/v2/notificationRules/:id/labels" - notificationRulesIDLabelsIDPath = "/api/v2/notificationRules/:id/labels/:lid" -) - -// NewNotificationRuleHandler returns a new instance of NotificationRuleHandler. -func NewNotificationRuleHandler(log *zap.Logger, b *NotificationRuleBackend) *NotificationRuleHandler { - h := &NotificationRuleHandler{ - Router: NewRouter(b.HTTPErrorHandler), - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - - NotificationRuleStore: b.NotificationRuleStore, - NotificationEndpointService: b.NotificationEndpointService, - UserResourceMappingService: b.UserResourceMappingService, - LabelService: b.LabelService, - UserService: b.UserService, - OrganizationService: b.OrganizationService, - TaskService: b.TaskService, - } - - h.Handler("POST", prefixNotificationRules, withFeatureProxy(b.AlgoWProxy, http.HandlerFunc(h.handlePostNotificationRule))) - h.HandlerFunc("GET", prefixNotificationRules, h.handleGetNotificationRules) - h.HandlerFunc("GET", notificationRulesIDPath, h.handleGetNotificationRule) - h.HandlerFunc("GET", notificationRulesIDQueryPath, h.handleGetNotificationRuleQuery) - h.HandlerFunc("DELETE", notificationRulesIDPath, h.handleDeleteNotificationRule) - h.Handler("PUT", notificationRulesIDPath, withFeatureProxy(b.AlgoWProxy, http.HandlerFunc(h.handlePutNotificationRule))) - h.Handler("PATCH", notificationRulesIDPath, withFeatureProxy(b.AlgoWProxy, http.HandlerFunc(h.handlePatchNotificationRule))) - - memberBackend := MemberBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log.With(zap.String("handler", "member")), - ResourceType: influxdb.NotificationRuleResourceType, - UserType: influxdb.Member, - UserResourceMappingService: b.UserResourceMappingService, - UserService: b.UserService, - } - h.HandlerFunc("POST", notificationRulesIDMembersPath, newPostMemberHandler(memberBackend)) - h.HandlerFunc("GET", notificationRulesIDMembersPath, newGetMembersHandler(memberBackend)) - h.HandlerFunc("DELETE", notificationRulesIDMembersIDPath, newDeleteMemberHandler(memberBackend)) - - ownerBackend := MemberBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log.With(zap.String("handler", "member")), - ResourceType: influxdb.NotificationRuleResourceType, - UserType: influxdb.Owner, - UserResourceMappingService: b.UserResourceMappingService, - UserService: b.UserService, - } - h.HandlerFunc("POST", notificationRulesIDOwnersPath, newPostMemberHandler(ownerBackend)) - h.HandlerFunc("GET", notificationRulesIDOwnersPath, newGetMembersHandler(ownerBackend)) - h.HandlerFunc("DELETE", notificationRulesIDOwnersIDPath, newDeleteMemberHandler(ownerBackend)) - - labelBackend := &LabelBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log.With(zap.String("handler", "label")), - LabelService: b.LabelService, - ResourceType: influxdb.NotificationRuleResourceType, - } - h.HandlerFunc("GET", notificationRulesIDLabelsPath, newGetLabelsHandler(labelBackend)) - h.HandlerFunc("POST", notificationRulesIDLabelsPath, newPostLabelHandler(labelBackend)) - h.HandlerFunc("DELETE", notificationRulesIDLabelsIDPath, newDeleteLabelHandler(labelBackend)) - - return h -} - -type notificationRuleLinks struct { - Self string `json:"self"` - Labels string `json:"labels"` - Members string `json:"members"` - Owners string `json:"owners"` - Query string `json:"query"` -} - -type notificationRuleResponse struct { - influxdb.NotificationRule - Labels []influxdb.Label `json:"labels"` - Links notificationRuleLinks `json:"links"` - Status string `json:"status"` - LatestCompleted time.Time `json:"latestCompleted,omitempty"` - LatestScheduled time.Time `json:"latestScheduled,omitempty"` - LastRunStatus string `json:"LastRunStatus,omitempty"` - LastRunError string `json:"LastRunError,omitempty"` - TaskID platform.ID `json:"taskID,omitempty"` -} - -type ruleResponseMeta struct { - Labels []influxdb.Label `json:"labels"` - Links notificationRuleLinks `json:"links"` - Status string `json:"status"` - LatestCompleted time.Time `json:"latestCompleted,omitempty"` - LatestScheduled time.Time `json:"latestScheduled,omitempty"` - LastRunStatus string `json:"lastRunStatus,omitempty"` - LastRunError string `json:"lastRunError,omitempty"` - TaskID platform.ID `json:"taskID,omitempty"` -} - -func (resp *notificationRuleResponse) UnmarshalJSON(v []byte) (err error) { - var responseMeta ruleResponseMeta - if err = json.Unmarshal(v, &responseMeta); err != nil { - return - } - - resp.Labels = responseMeta.Labels - resp.Links = responseMeta.Links - resp.Status = responseMeta.Status - resp.LatestCompleted = responseMeta.LatestCompleted - resp.LatestScheduled = responseMeta.LatestScheduled - resp.LastRunStatus = responseMeta.LastRunStatus - resp.LastRunError = responseMeta.LastRunError - - resp.NotificationRule, err = rule.UnmarshalJSON(v) - return -} - -func (resp notificationRuleResponse) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(resp.NotificationRule) - if err != nil { - return nil, err - } - - b2, err := json.Marshal(ruleResponseMeta{ - Links: resp.Links, - Labels: resp.Labels, - Status: resp.Status, - LatestCompleted: resp.LatestCompleted, - LatestScheduled: resp.LatestScheduled, - LastRunStatus: resp.LastRunStatus, - LastRunError: resp.LastRunError, - TaskID: resp.TaskID, - }) - if err != nil { - return nil, err - } - - return []byte(string(b1[:len(b1)-1]) + ", " + string(b2[1:])), nil -} - -type notificationRulesResponse struct { - NotificationRules []*notificationRuleResponse `json:"notificationRules"` - Links *influxdb.PagingLinks `json:"links"` -} - -func (h *NotificationRuleHandler) newNotificationRuleResponse(ctx context.Context, nr influxdb.NotificationRule, labels []*influxdb.Label) (*notificationRuleResponse, error) { - // TODO(desa): this should be handled in the rule service and not exposed in http land, but is currently blocking the FE. https://github.com/influxdata/influxdb/issues/15259 - t, err := h.TaskService.FindTaskByID(ctx, nr.GetTaskID()) - if err != nil { - return nil, err - } - - res := ¬ificationRuleResponse{ - NotificationRule: nr, - Links: notificationRuleLinks{ - Self: fmt.Sprintf("/api/v2/notificationRules/%s", nr.GetID()), - Labels: fmt.Sprintf("/api/v2/notificationRules/%s/labels", nr.GetID()), - Members: fmt.Sprintf("/api/v2/notificationRules/%s/members", nr.GetID()), - Owners: fmt.Sprintf("/api/v2/notificationRules/%s/owners", nr.GetID()), - Query: fmt.Sprintf("/api/v2/notificationRules/%s/query", nr.GetID()), - }, - Labels: []influxdb.Label{}, - Status: t.Status, - LatestCompleted: t.LatestCompleted, - LatestScheduled: t.LatestScheduled, - LastRunStatus: t.LastRunStatus, - LastRunError: t.LastRunError, - TaskID: t.ID, - } - - for _, l := range labels { - res.Labels = append(res.Labels, *l) - } - - return res, nil -} - -func (h *NotificationRuleHandler) newNotificationRulesResponse(ctx context.Context, nrs []influxdb.NotificationRule, labelService influxdb.LabelService, f influxdb.PagingFilter, opts influxdb.FindOptions) (*notificationRulesResponse, error) { - resp := ¬ificationRulesResponse{ - NotificationRules: []*notificationRuleResponse{}, - Links: influxdb.NewPagingLinks(prefixNotificationRules, opts, f, len(nrs)), - } - for _, nr := range nrs { - labels, _ := labelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: nr.GetID(), ResourceType: influxdb.NotificationRuleResourceType}) - res, err := h.newNotificationRuleResponse(ctx, nr, labels) - if err != nil { - continue - } - resp.NotificationRules = append(resp.NotificationRules, res) - } - return resp, nil -} - -func decodeGetNotificationRuleRequest(ctx context.Context, r *http.Request) (i platform.ID, err error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return i, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - if err := i.DecodeFromString(id); err != nil { - return i, err - } - return i, nil -} - -func (h *NotificationRuleHandler) handleGetNotificationRules(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - filter, opts, err := decodeNotificationRuleFilter(ctx, r) - if err != nil { - h.log.Debug("Failed to decode request", zap.Error(err)) - h.HandleHTTPError(ctx, err, w) - return - } - nrs, _, err := h.NotificationRuleStore.FindNotificationRules(ctx, *filter, *opts) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Notification rules retrieved", zap.String("notificationRules", fmt.Sprint(nrs))) - - res, err := h.newNotificationRulesResponse(ctx, nrs, h.LabelService, filter, *opts) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusOK, res); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -func (h *NotificationRuleHandler) handleGetNotificationRuleQuery(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id, err := decodeGetNotificationRuleRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - nr, err := h.NotificationRuleStore.FindNotificationRuleByID(ctx, id) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - edp, err := h.NotificationEndpointService.FindNotificationEndpointByID(ctx, nr.GetEndpointID()) - if err != nil { - h.HandleHTTPError(ctx, &errors.Error{ - Code: errors.EInternal, - Op: "http/handleGetNotificationRuleQuery", - Err: err, - }, w) - return - } - flux, err := nr.GenerateFlux(edp) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Notification rule query retrieved", zap.String("notificationRuleQuery", fmt.Sprint(flux))) - if err := encodeResponse(ctx, w, http.StatusOK, newFluxResponse(flux)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -func (h *NotificationRuleHandler) handleGetNotificationRule(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id, err := decodeGetNotificationRuleRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - nr, err := h.NotificationRuleStore.FindNotificationRuleByID(ctx, id) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Notification rule retrieved", zap.String("notificationRule", fmt.Sprint(nr))) - - labels, err := h.LabelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: nr.GetID(), ResourceType: influxdb.NotificationRuleResourceType}) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - res, err := h.newNotificationRuleResponse(ctx, nr, labels) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusOK, res); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -func decodeNotificationRuleFilter(ctx context.Context, r *http.Request) (*influxdb.NotificationRuleFilter, *influxdb.FindOptions, error) { - f := &influxdb.NotificationRuleFilter{} - urm, err := decodeUserResourceMappingFilter(ctx, r, influxdb.NotificationRuleResourceType) - if err == nil { - f.UserResourceMappingFilter = *urm - } - - opts, err := influxdb.DecodeFindOptions(r) - if err != nil { - return f, nil, err - } - - q := r.URL.Query() - if orgIDStr := q.Get("orgID"); orgIDStr != "" { - orgID, err := platform.IDFromString(orgIDStr) - if err != nil { - return f, opts, &errors.Error{ - Code: errors.EInvalid, - Msg: "orgID is invalid", - Err: err, - } - } - f.OrgID = orgID - } else if orgNameStr := q.Get("org"); orgNameStr != "" { - f.Organization = &orgNameStr - } - - for _, tag := range q["tag"] { - tp, err := influxdb.NewTag(tag) - // ignore malformed tag pairs - if err == nil { - f.Tags = append(f.Tags, tp) - } - } - - return f, opts, err -} - -func decodeUserResourceMappingFilter(ctx context.Context, r *http.Request, typ influxdb.ResourceType) (*influxdb.UserResourceMappingFilter, error) { - q := r.URL.Query() - f := &influxdb.UserResourceMappingFilter{ - ResourceType: typ, - } - if idStr := q.Get("resourceID"); idStr != "" { - id, err := platform.IDFromString(idStr) - if err != nil { - return nil, err - } - f.ResourceID = *id - } - - if idStr := q.Get("userID"); idStr != "" { - id, err := platform.IDFromString(idStr) - if err != nil { - return nil, err - } - f.UserID = *id - } - return f, nil -} - -type postNotificationRuleRequest struct { - influxdb.NotificationRuleCreate - Labels []string `json:"labels"` -} - -func decodePostNotificationRuleRequest(ctx context.Context, r *http.Request) (postNotificationRuleRequest, error) { - var pnrr postNotificationRuleRequest - var sts statusDecode - var dl decodeLabels - - buf := new(bytes.Buffer) - _, err := buf.ReadFrom(r.Body) - if err != nil { - return pnrr, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - defer r.Body.Close() - - nr, err := rule.UnmarshalJSON(buf.Bytes()) - if err != nil { - return pnrr, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - if err := json.Unmarshal(buf.Bytes(), &sts); err != nil { - return pnrr, err - } - - if err := json.Unmarshal(buf.Bytes(), &dl); err != nil { - return pnrr, err - } - - pnrr = postNotificationRuleRequest{ - NotificationRuleCreate: influxdb.NotificationRuleCreate{ - NotificationRule: nr, - Status: *sts.Status, - }, - Labels: dl.Labels, - } - - return pnrr, nil -} - -func decodePutNotificationRuleRequest(ctx context.Context, r *http.Request) (influxdb.NotificationRuleCreate, error) { - var nrc influxdb.NotificationRuleCreate - var sts statusDecode - - buf := new(bytes.Buffer) - _, err := buf.ReadFrom(r.Body) - if err != nil { - return nrc, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - defer r.Body.Close() - nr, err := rule.UnmarshalJSON(buf.Bytes()) - if err != nil { - return nrc, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nrc, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - i := new(platform.ID) - if err := i.DecodeFromString(id); err != nil { - return nrc, err - } - nr.SetID(*i) - - err = json.Unmarshal(buf.Bytes(), &sts) - if err != nil { - return nrc, err - } - - nrc = influxdb.NotificationRuleCreate{ - NotificationRule: nr, - Status: *sts.Status, - } - - return nrc, nil -} - -type patchNotificationRuleRequest struct { - platform.ID - Update influxdb.NotificationRuleUpdate -} - -func decodePatchNotificationRuleRequest(ctx context.Context, r *http.Request) (*patchNotificationRuleRequest, error) { - req := &patchNotificationRuleRequest{} - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - req.ID = i - - upd := &influxdb.NotificationRuleUpdate{} - if err := json.NewDecoder(r.Body).Decode(upd); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: err.Error(), - } - } - if err := upd.Valid(); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: err.Error(), - } - } - - req.Update = *upd - return req, nil -} - -// handlePostNotificationRule is the HTTP handler for the POST /api/v2/notificationRules route. -func (h *NotificationRuleHandler) handlePostNotificationRule(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - nr, err := decodePostNotificationRuleRequest(ctx, r) - if err != nil { - h.log.Debug("Failed to decode request", zap.Error(err)) - h.HandleHTTPError(ctx, err, w) - return - } - - auth, err := pctx.GetAuthorizer(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := h.NotificationRuleStore.CreateNotificationRule(ctx, nr.NotificationRuleCreate, auth.GetUserID()); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Notification rule created", zap.String("notificationRule", fmt.Sprint(nr))) - - labels := h.mapNewNotificationRuleLabels(ctx, nr.NotificationRuleCreate, nr.Labels) - - res, err := h.newNotificationRuleResponse(ctx, nr, labels) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusCreated, res); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -func (h *NotificationRuleHandler) mapNewNotificationRuleLabels(ctx context.Context, nrc influxdb.NotificationRuleCreate, labels []string) []*influxdb.Label { - var ls []*influxdb.Label - for _, sid := range labels { - var lid platform.ID - err := lid.DecodeFromString(sid) - - if err != nil { - continue - } - - label, err := h.LabelService.FindLabelByID(ctx, lid) - if err != nil { - continue - } - - mapping := influxdb.LabelMapping{ - LabelID: label.ID, - ResourceID: nrc.GetID(), - ResourceType: influxdb.NotificationRuleResourceType, - } - - err = h.LabelService.CreateLabelMapping(ctx, &mapping) - if err != nil { - continue - } - - ls = append(ls, label) - } - return ls -} - -// handlePutNotificationRule is the HTTP handler for the PUT /api/v2/notificationRule route. -func (h *NotificationRuleHandler) handlePutNotificationRule(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - nrc, err := decodePutNotificationRuleRequest(ctx, r) - if err != nil { - h.log.Debug("Failed to decode request", zap.Error(err)) - h.HandleHTTPError(ctx, err, w) - return - } - auth, err := pctx.GetAuthorizer(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - nr, err := h.NotificationRuleStore.UpdateNotificationRule(ctx, nrc.GetID(), nrc, auth.GetUserID()) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - labels, err := h.LabelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: nr.GetID(), ResourceType: influxdb.NotificationRuleResourceType}) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Notification rule updated", zap.String("notificationRule", fmt.Sprint(nr))) - - res, err := h.newNotificationRuleResponse(ctx, nr, labels) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusOK, res); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -// handlePatchNotificationRule is the HTTP handler for the PATCH /api/v2/notificationRule/:id route. -func (h *NotificationRuleHandler) handlePatchNotificationRule(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePatchNotificationRuleRequest(ctx, r) - if err != nil { - h.log.Debug("Failed to decode request", zap.Error(err)) - h.HandleHTTPError(ctx, err, w) - return - } - - nr, err := h.NotificationRuleStore.PatchNotificationRule(ctx, req.ID, req.Update) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - labels, err := h.LabelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: nr.GetID(), ResourceType: influxdb.NotificationRuleResourceType}) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Notification rule patch", zap.String("notificationRule", fmt.Sprint(nr))) - - res, err := h.newNotificationRuleResponse(ctx, nr, labels) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusOK, res); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -func (h *NotificationRuleHandler) handleDeleteNotificationRule(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - i, err := decodeGetNotificationRuleRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err = h.NotificationRuleStore.DeleteNotificationRule(ctx, i); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Notification rule deleted", zap.String("notificationRuleID", fmt.Sprint(i))) - - w.WriteHeader(http.StatusNoContent) -} - -// NotificationRuleService is an http client that implements the NotificationRuleStore interface -type NotificationRuleService struct { - Client *httpc.Client -} - -// NewNotificationRuleService wraps an httpc.Client in a NotificationRuleService -func NewNotificationRuleService(client *httpc.Client) *NotificationRuleService { - return &NotificationRuleService{ - Client: client, - } -} - -type notificationRuleCreateEncoder struct { - nrc influxdb.NotificationRuleCreate -} - -func (n notificationRuleCreateEncoder) MarshalJSON() ([]byte, error) { - b, err := n.nrc.NotificationRule.MarshalJSON() - if err != nil { - return nil, err - } - var v map[string]interface{} - err = json.Unmarshal(b, &v) - if err != nil { - return nil, err - } - v["status"] = n.nrc.Status - return json.Marshal(v) -} - -type notificationRuleDecoder struct { - rule influxdb.NotificationRule -} - -func (n *notificationRuleDecoder) UnmarshalJSON(b []byte) error { - newRule, err := rule.UnmarshalJSON(b) - if err != nil { - return err - } - n.rule = newRule - return nil -} - -// CreateNotificationRule creates a new NotificationRule from a NotificationRuleCreate -// the Status on the NotificationRuleCreate is used to determine the status (active/inactive) of the associated Task -func (s *NotificationRuleService) CreateNotificationRule(ctx context.Context, nr influxdb.NotificationRuleCreate, userID platform.ID) error { - var resp notificationRuleDecoder - err := s.Client. - PostJSON(notificationRuleCreateEncoder{nrc: nr}, prefixNotificationRules). - DecodeJSON(&resp). - Do(ctx) - - if err != nil { - return err - } - - nr.NotificationRule.SetID(resp.rule.GetID()) - nr.NotificationRule.SetOrgID(resp.rule.GetOrgID()) - - return nil -} - -// FindNotificationRuleByID finds and returns one Notification Rule with a matching ID -func (s *NotificationRuleService) FindNotificationRuleByID(ctx context.Context, id platform.ID) (influxdb.NotificationRule, error) { - var resp notificationRuleResponse - err := s.Client. - Get(getNotificationRulesIDPath(id)). - DecodeJSON(&resp). - Do(ctx) - - return resp.NotificationRule, err -} - -// FindNotificationRules returns a list of notification rules that match filter and the total count of matching notification rules. -// Additional options provide pagination & sorting. -func (s *NotificationRuleService) FindNotificationRules(ctx context.Context, filter influxdb.NotificationRuleFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationRule, int, error) { - var params = influxdb.FindOptionParams(opt...) - if filter.OrgID != nil { - params = append(params, [2]string{"orgID", filter.OrgID.String()}) - } - - if filter.Organization != nil { - params = append(params, [2]string{"org", *filter.Organization}) - } - - if len(filter.Tags) != 0 { - // loop over tags and append a string of format key:value for each - for _, tag := range filter.Tags { - keyvalue := fmt.Sprintf("%s:%s", tag.Key, tag.Value) - params = append(params, [2]string{"tag", keyvalue}) - } - } - - var resp struct { - NotificationRules []notificationRuleDecoder - } - err := s.Client. - Get(prefixNotificationRules). - QueryParams(params...). - DecodeJSON(&resp). - Do(ctx) - if err != nil { - return nil, 0, err - } - - var rules []influxdb.NotificationRule - for _, r := range resp.NotificationRules { - rules = append(rules, r.rule) - } - - return rules, len(rules), nil -} - -// UpdateNotificationRule updates a single notification rule. -// Returns the new notification rule after update. -func (s *NotificationRuleService) UpdateNotificationRule(ctx context.Context, id platform.ID, nr influxdb.NotificationRuleCreate, userID platform.ID) (influxdb.NotificationRule, error) { - var resp notificationRuleDecoder - err := s.Client. - PutJSON(notificationRuleCreateEncoder{nrc: nr}, getNotificationRulesIDPath(id)). - DecodeJSON(&resp). - Do(ctx) - if err != nil { - return nil, err - } - - return resp.rule, nil -} - -// PatchNotificationRule updates a single notification rule with changeset. -// Returns the new notification rule state after update. -func (s *NotificationRuleService) PatchNotificationRule(ctx context.Context, id platform.ID, upd influxdb.NotificationRuleUpdate) (influxdb.NotificationRule, error) { - var resp notificationRuleDecoder - err := s.Client. - PatchJSON(&upd, getNotificationRulesIDPath(id)). - DecodeJSON(&resp). - Do(ctx) - if err != nil { - return nil, err - } - - return resp.rule, nil -} - -// DeleteNotificationRule removes a notification rule by ID. -func (s *NotificationRuleService) DeleteNotificationRule(ctx context.Context, id platform.ID) error { - return s.Client. - Delete(getNotificationRulesIDPath(id)). - Do(ctx) -} - -func getNotificationRulesIDPath(id platform.ID) string { - return path.Join(prefixNotificationRules, id.String()) -} diff --git a/http/notification_rule_test.go b/http/notification_rule_test.go deleted file mode 100644 index 1066cc01475..00000000000 --- a/http/notification_rule_test.go +++ /dev/null @@ -1,319 +0,0 @@ -package http - -import ( - "context" - "encoding/json" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/notification" - "github.com/influxdata/influxdb/v2/notification/rule" - "github.com/influxdata/influxdb/v2/task/taskmodel" - influxTesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func NewMockNotificationRuleBackend(t *testing.T) *NotificationRuleBackend { - return &NotificationRuleBackend{ - log: zaptest.NewLogger(t), - - AlgoWProxy: &NoopProxyHandler{}, - UserResourceMappingService: mock.NewUserResourceMappingService(), - LabelService: mock.NewLabelService(), - UserService: mock.NewUserService(), - OrganizationService: mock.NewOrganizationService(), - TaskService: &mock.TaskService{ - FindTaskByIDFn: func(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - return &taskmodel.Task{Status: "active"}, nil - }, - }, - } -} - -func Test_newNotificationRuleResponses(t *testing.T) { - type args struct { - opt influxdb.FindOptions - filter influxdb.NotificationRuleFilter - nrs []influxdb.NotificationRule - } - tests := []struct { - name string - args args - want string - }{ - { - args: args{ - opt: influxdb.FindOptions{ - Limit: 50, - Offset: 0, - Descending: true, - }, - filter: influxdb.NotificationRuleFilter{ - OrgID: influxTesting.IDPtr(platform.ID(2)), - }, - nrs: []influxdb.NotificationRule{ - &rule.Slack{ - Channel: "ch1", - MessageTemplate: "message 1{var1}", - Base: rule.Base{ - ID: platform.ID(1), - OrgID: platform.ID(2), - OwnerID: platform.ID(3), - EndpointID: 4, - Name: "name1", - Description: "desc1", - Every: mustDuration("5m"), - Offset: mustDuration("15s"), - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{Key: "k1", Value: "v1"}, - Operator: influxdb.Equal, - }, - { - Tag: influxdb.Tag{Key: "k2", Value: "v2"}, - Operator: influxdb.NotRegexEqual, - }, - }, - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - { - CurrentLevel: notification.Warn, - }, - }, - }, - }, - &rule.PagerDuty{ - MessageTemplate: "body 2{var2}", - Base: rule.Base{ - ID: platform.ID(11), - OrgID: platform.ID(2), - OwnerID: platform.ID(33), - EndpointID: 44, - Name: "name2", - Description: "desc2", - }, - }, - }, - }, - want: `{ - "links": { - "self": "/api/v2/notificationRules?descending=true&limit=50&offset=0&orgID=0000000000000002" - }, - "notificationRules": [ - { - "channel": "ch1", - "createdAt": "0001-01-01T00:00:00Z", - "description": "desc1", - "endpointID": "0000000000000004", - "every": "5m", - "id": "0000000000000001", - "labels": [ - ], - "links": { - "labels": "/api/v2/notificationRules/0000000000000001/labels", - "members": "/api/v2/notificationRules/0000000000000001/members", - "owners": "/api/v2/notificationRules/0000000000000001/owners", - "query": "/api/v2/notificationRules/0000000000000001/query", - "self": "/api/v2/notificationRules/0000000000000001" - }, - "messageTemplate": "message 1{var1}", - "name": "name1", - "offset": "15s", - "orgID": "0000000000000002", - "ownerID": "0000000000000003", - "runbookLink": "", - "statusRules": [ - { - "currentLevel": "CRIT", - "previousLevel": null - }, - { - "currentLevel": "WARN", - "previousLevel": null - } - ], - "tagRules": [ - { - "key": "k1", - "operator": "equal", - "value": "v1" - }, - { - "key": "k2", - "operator": "notequalregex", - "value": "v2" - } - ], - "type": "slack", - "updatedAt": "0001-01-01T00:00:00Z", - "status": "active", - "latestCompleted": "0001-01-01T00:00:00Z", - "latestScheduled": "0001-01-01T00:00:00Z" - }, - { - "createdAt": "0001-01-01T00:00:00Z", - "description": "desc2", - "endpointID": "000000000000002c", - "id": "000000000000000b", - "labels": [ - ], - "links": { - "labels": "/api/v2/notificationRules/000000000000000b/labels", - "members": "/api/v2/notificationRules/000000000000000b/members", - "owners": "/api/v2/notificationRules/000000000000000b/owners", - "query": "/api/v2/notificationRules/000000000000000b/query", - "self": "/api/v2/notificationRules/000000000000000b" - }, - "messageTemplate": "body 2{var2}", - "name": "name2", - "orgID": "0000000000000002", - "ownerID": "0000000000000021", - "runbookLink": "", - "type": "pagerduty", - "updatedAt": "0001-01-01T00:00:00Z", - "status": "active", - "latestCompleted": "0001-01-01T00:00:00Z", - "latestScheduled": "0001-01-01T00:00:00Z" - } - ] -}`, - }, - } - handler := NewNotificationRuleHandler(zaptest.NewLogger(t), NewMockNotificationRuleBackend(t)) - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - - res, err := handler.newNotificationRulesResponse(ctx, tt.args.nrs, mock.NewLabelService(), tt.args.filter, tt.args.opt) - if err != nil { - t.Fatalf("newNotificationRulesResponse() build response %v", err) - } - - got, err := json.Marshal(res) - if err != nil { - t.Fatalf("newNotificationRulesResponse() JSON marshal %v", err) - } - if eq, diff, _ := jsonEqual(string(got), tt.want); tt.want != "" && !eq { - t.Errorf("%q. newNotificationRulesResponse() = ***%s***", tt.name, diff) - } - }) - } -} - -func Test_newNotificationRuleResponse(t *testing.T) { - type args struct { - nr influxdb.NotificationRule - } - tests := []struct { - name string - args args - want string - }{ - { - args: args{ - nr: &rule.Slack{ - Channel: "ch1", - MessageTemplate: "message 1{var1}", - Base: rule.Base{ - ID: platform.ID(1), - OrgID: platform.ID(2), - OwnerID: platform.ID(3), - EndpointID: 4, - Name: "name1", - Description: "desc1", - Every: mustDuration("5m"), - Offset: mustDuration("15s"), - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{Key: "k1", Value: "v1"}, - Operator: influxdb.Equal, - }, - { - Tag: influxdb.Tag{Key: "k2", Value: "v2"}, - Operator: influxdb.NotRegexEqual, - }, - }, - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - { - CurrentLevel: notification.Warn, - }, - }, - }, - }, - }, - want: `{ - "channel": "ch1", - "createdAt": "0001-01-01T00:00:00Z", - "description": "desc1", - "endpointID": "0000000000000004", - "every": "5m", - "id": "0000000000000001", - "labels": [ - ], - "links": { - "labels": "/api/v2/notificationRules/0000000000000001/labels", - "members": "/api/v2/notificationRules/0000000000000001/members", - "owners": "/api/v2/notificationRules/0000000000000001/owners", - "query": "/api/v2/notificationRules/0000000000000001/query", - "self": "/api/v2/notificationRules/0000000000000001" - }, - "messageTemplate": "message 1{var1}", - "name": "name1", - "offset": "15s", - "orgID": "0000000000000002", - "ownerID": "0000000000000003", - "runbookLink": "", - "status": "active", - "statusRules": [ - { - "currentLevel": "CRIT", - "previousLevel": null - }, - { - "currentLevel": "WARN", - "previousLevel": null - } - ], - "tagRules": [ - { - "key": "k1", - "operator": "equal", - "value": "v1" - }, - { - "key": "k2", - "operator": "notequalregex", - "value": "v2" - } - ], - "type": "slack", - "updatedAt": "0001-01-01T00:00:00Z", - "latestCompleted": "0001-01-01T00:00:00Z", - "latestScheduled": "0001-01-01T00:00:00Z" -}`, - }, - } - handler := NewNotificationRuleHandler(zaptest.NewLogger(t), NewMockNotificationRuleBackend(t)) - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - res, err := handler.newNotificationRuleResponse(context.Background(), tt.args.nr, []*influxdb.Label{}) - if err != nil { - t.Fatalf("newNotificationRuleResponse() building response %v", err) - } - got, err := json.Marshal(res) - if err != nil { - t.Fatalf("newNotificationRuleResponse() JSON marshal %v", err) - } - if eq, diff, _ := jsonEqual(string(got), tt.want); tt.want != "" && !eq { - t.Errorf("%q. newNotificationRuleResponse() = ***%s***", tt.name, diff) - } - }) - } -} diff --git a/http/paging_test.go b/http/paging_test.go deleted file mode 100644 index 4dea52ead15..00000000000 --- a/http/paging_test.go +++ /dev/null @@ -1,195 +0,0 @@ -package http - -import ( - "net/http/httptest" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/mock" -) - -func TestPaging_DecodeFindOptions(t *testing.T) { - type args struct { - queryParams map[string]string - } - type wants struct { - opts influxdb.FindOptions - } - - tests := []struct { - name string - args args - wants wants - }{ - { - name: "decode FindOptions", - args: args{ - map[string]string{ - "offset": "10", - "limit": "10", - "sortBy": "updateTime", - "descending": "true", - }, - }, - wants: wants{ - opts: influxdb.FindOptions{ - Offset: 10, - Limit: 10, - SortBy: "updateTime", - Descending: true, - }, - }, - }, - { - name: "decode FindOptions with default values", - args: args{ - map[string]string{ - "limit": "10", - }, - }, - wants: wants{ - opts: influxdb.FindOptions{ - Offset: 0, - Limit: 10, - SortBy: "", - Descending: false, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := httptest.NewRequest("GET", "http://any.url", nil) - qp := r.URL.Query() - for k, v := range tt.args.queryParams { - qp.Set(k, v) - } - r.URL.RawQuery = qp.Encode() - - opts, err := influxdb.DecodeFindOptions(r) - if err != nil { - t.Errorf("%q failed, err: %s", tt.name, err.Error()) - } - - if opts.Offset != tt.wants.opts.Offset { - t.Errorf("%q. influxdb.DecodeFindOptions() = %v, want %v", tt.name, opts.Offset, tt.wants.opts.Offset) - } - if opts.Limit != tt.wants.opts.Limit { - t.Errorf("%q. influxdb.DecodeFindOptions() = %v, want %v", tt.name, opts.Limit, tt.wants.opts.Limit) - } - if opts.SortBy != tt.wants.opts.SortBy { - t.Errorf("%q. influxdb.DecodeFindOptions() = %v, want %v", tt.name, opts.SortBy, tt.wants.opts.SortBy) - } - if opts.Descending != tt.wants.opts.Descending { - t.Errorf("%q. influxdb.DecodeFindOptions() = %v, want %v", tt.name, opts.Descending, tt.wants.opts.Descending) - } - }) - } -} - -func TestPaging_NewPagingLinks(t *testing.T) { - type args struct { - basePath string - num int - opts influxdb.FindOptions - filter mock.PagingFilter - } - type wants struct { - links influxdb.PagingLinks - } - - tests := []struct { - name string - args args - wants wants - }{ - { - name: "new PagingLinks", - args: args{ - basePath: "/api/v2/buckets", - num: 50, - opts: influxdb.FindOptions{ - Offset: 10, - Limit: 10, - Descending: true, - }, - filter: mock.PagingFilter{ - Name: "name", - Type: []string{"type1", "type2"}, - }, - }, - wants: wants{ - links: influxdb.PagingLinks{ - Prev: "/api/v2/buckets?descending=true&limit=10&name=name&offset=0&type=type1&type=type2", - Self: "/api/v2/buckets?descending=true&limit=10&name=name&offset=10&type=type1&type=type2", - Next: "/api/v2/buckets?descending=true&limit=10&name=name&offset=20&type=type1&type=type2", - }, - }, - }, - { - name: "new PagingLinks with empty prev link", - args: args{ - basePath: "/api/v2/buckets", - num: 50, - opts: influxdb.FindOptions{ - Offset: 0, - Limit: 10, - Descending: true, - }, - filter: mock.PagingFilter{ - Name: "name", - Type: []string{"type1", "type2"}, - }, - }, - wants: wants{ - links: influxdb.PagingLinks{ - Prev: "", - Self: "/api/v2/buckets?descending=true&limit=10&name=name&offset=0&type=type1&type=type2", - Next: "/api/v2/buckets?descending=true&limit=10&name=name&offset=10&type=type1&type=type2", - }, - }, - }, - { - name: "new PagingLinks with empty next link", - args: args{ - basePath: "/api/v2/buckets", - num: 5, - opts: influxdb.FindOptions{ - Offset: 10, - Limit: 10, - Descending: true, - }, - filter: mock.PagingFilter{ - Name: "name", - Type: []string{"type1", "type2"}, - }, - }, - wants: wants{ - links: influxdb.PagingLinks{ - Prev: "/api/v2/buckets?descending=true&limit=10&name=name&offset=0&type=type1&type=type2", - Self: "/api/v2/buckets?descending=true&limit=10&name=name&offset=10&type=type1&type=type2", - Next: "", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - links := influxdb.NewPagingLinks(tt.args.basePath, tt.args.opts, tt.args.filter, tt.args.num) - - if links.Prev != tt.wants.links.Prev { - t.Errorf("%q. influxdb.NewPagingLinks() = %v, want %v", tt.name, links.Prev, tt.wants.links.Prev) - } - - if links.Self != tt.wants.links.Self { - t.Errorf("%q. influxdb.NewPagingLinks() = %v, want %v", tt.name, links.Self, tt.wants.links.Self) - } - - if links.Next != tt.wants.links.Next { - t.Errorf("%q. influxdb.NewPagingLinks() = %v, want %v", tt.name, links.Next, tt.wants.links.Next) - } - }) - } -} diff --git a/http/platform_handler.go b/http/platform_handler.go deleted file mode 100644 index 12fa2774de6..00000000000 --- a/http/platform_handler.go +++ /dev/null @@ -1,85 +0,0 @@ -package http - -import ( - "net/http" - "strings" - - "github.com/NYTimes/gziphandler" - "github.com/influxdata/influxdb/v2/http/legacy" - "github.com/influxdata/influxdb/v2/kit/feature" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/static" -) - -// PlatformHandler is a collection of all the service handlers. -type PlatformHandler struct { - AssetHandler http.Handler - DocsHandler http.HandlerFunc - APIHandler http.Handler - LegacyHandler http.Handler -} - -// NewPlatformHandler returns a platform handler that serves the API and associated assets. -func NewPlatformHandler(b *APIBackend, opts ...APIHandlerOptFn) *PlatformHandler { - h := NewAuthenticationHandler(b.Logger, b.HTTPErrorHandler) - h.Handler = feature.NewHandler(b.Logger, b.Flagger, feature.Flags(), NewAPIHandler(b, opts...)) - h.AuthorizationService = b.AuthorizationService - h.SessionService = b.SessionService - h.SessionRenewDisabled = b.SessionRenewDisabled - h.UserService = b.UserService - - h.RegisterNoAuthRoute("GET", "/api/v2") - h.RegisterNoAuthRoute("POST", "/api/v2/signin") - h.RegisterNoAuthRoute("POST", "/api/v2/signout") - h.RegisterNoAuthRoute("POST", "/api/v2/setup") - h.RegisterNoAuthRoute("GET", "/api/v2/setup") - h.RegisterNoAuthRoute("GET", "/api/v2/swagger.json") - - assetHandler := static.NewAssetHandler(b.AssetsPath) - if b.UIDisabled { - b.Logger.Debug("http server running with UI disabled") - assetHandler = http.NotFoundHandler() - } - - wrappedHandler := kithttp.SetCORS(h) - wrappedHandler = kithttp.SkipOptions(wrappedHandler) - - legacyBackend := newLegacyBackend(b) - lh := newLegacyHandler(legacyBackend, *legacy.NewHandlerConfig()) - // legacy reponses can optionally be gzip encoded - gh := gziphandler.GzipHandler(lh) - - return &PlatformHandler{ - AssetHandler: assetHandler, - DocsHandler: Redoc("/api/v2/swagger.json"), - APIHandler: wrappedHandler, - LegacyHandler: legacy.NewInflux1xAuthenticationHandler(gh, b.AuthorizerV1, b.HTTPErrorHandler), - } -} - -// ServeHTTP delegates a request to the appropriate subhandler. -func (h *PlatformHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // TODO(affo): change this to be mounted prefixes: https://github.com/influxdata/idpe/issues/6689. - if r.URL.Path == "/write" || - r.URL.Path == "/query" || - r.URL.Path == "/ping" { - h.LegacyHandler.ServeHTTP(w, r) - return - } - - if strings.HasPrefix(r.URL.Path, "/docs") { - h.DocsHandler.ServeHTTP(w, r) - return - } - - // Serve the static UI assets for any basepath that does not start with - // addressable parts of the platform API. - if !strings.HasPrefix(r.URL.Path, "/v1") && - !strings.HasPrefix(r.URL.Path, "/api/v2") && - !strings.HasPrefix(r.URL.Path, "/private/") { - h.AssetHandler.ServeHTTP(w, r) - return - } - - h.APIHandler.ServeHTTP(w, r) -} diff --git a/http/points/batch_reader.go b/http/points/batch_reader.go deleted file mode 100644 index 1ec6e8775d3..00000000000 --- a/http/points/batch_reader.go +++ /dev/null @@ -1,25 +0,0 @@ -package points - -import ( - "compress/gzip" - "io" - - io2 "github.com/influxdata/influxdb/v2/kit/io" -) - -// BatchReadCloser (potentially) wraps an io.ReadCloser in Gzip -// decompression and limits the reading to a specific number of bytes. -func BatchReadCloser(rc io.ReadCloser, encoding string, maxBatchSizeBytes int64) (io.ReadCloser, error) { - switch encoding { - case "gzip", "x-gzip": - var err error - rc, err = gzip.NewReader(rc) - if err != nil { - return nil, err - } - } - if maxBatchSizeBytes > 0 { - rc = io2.NewLimitedReadCloser(rc, maxBatchSizeBytes) - } - return rc, nil -} diff --git a/http/points/points_parser.go b/http/points/points_parser.go deleted file mode 100644 index bc1757528e2..00000000000 --- a/http/points/points_parser.go +++ /dev/null @@ -1,129 +0,0 @@ -package points - -import ( - "compress/gzip" - "context" - "errors" - "fmt" - "io" - "time" - - io2 "github.com/influxdata/influxdb/v2/kit/io" - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/models" - "github.com/opentracing/opentracing-go" -) - -var ( - // ErrMaxBatchSizeExceeded is returned when a points batch exceeds - // the defined upper limit in bytes. This pertains to the size of the - // batch after inflation from any compression (i.e. ungzipped). - ErrMaxBatchSizeExceeded = errors.New("points batch is too large") -) - -const ( - opPointsWriter = "http/pointsWriter" - msgUnableToReadData = "unable to read data" -) - -// ParsedPoints contains the points parsed as well as the total number of bytes -// after decompression. -type ParsedPoints struct { - Points models.Points - RawSize int -} - -// Parser parses batches of Points. -type Parser struct { - Precision string - //ParserOptions []models.ParserOption -} - -// Parse parses the points from an io.ReadCloser for a specific Bucket. -func (pw *Parser) Parse(ctx context.Context, orgID, bucketID platform.ID, rc io.ReadCloser) (*ParsedPoints, error) { - span, ctx := opentracing.StartSpanFromContext(ctx, "write points") - defer span.Finish() - return pw.parsePoints(ctx, orgID, bucketID, rc) -} - -func (pw *Parser) parsePoints(ctx context.Context, orgID, bucketID platform.ID, rc io.ReadCloser) (*ParsedPoints, error) { - data, err := readAll(ctx, rc) - if err != nil { - code := errors2.EInternal - if errors.Is(err, ErrMaxBatchSizeExceeded) { - code = errors2.ETooLarge - } else if errors.Is(err, gzip.ErrHeader) || errors.Is(err, gzip.ErrChecksum) { - code = errors2.EInvalid - } - return nil, &errors2.Error{ - Code: code, - Op: opPointsWriter, - Msg: msgUnableToReadData, - Err: err, - } - } - - span, _ := tracing.StartSpanFromContextWithOperationName(ctx, "encoding and parsing") - - points, err := models.ParsePointsWithPrecision(data, time.Now().UTC(), pw.Precision) - span.LogKV("values_total", len(points)) - span.Finish() - if err != nil { - tracing.LogError(span, fmt.Errorf("error parsing points: %v", err)) - - code := errors2.EInvalid - // TODO - backport these - // if errors.Is(err, models.ErrLimitMaxBytesExceeded) || - // errors.Is(err, models.ErrLimitMaxLinesExceeded) || - // errors.Is(err, models.ErrLimitMaxValuesExceeded) { - // code = influxdb.ETooLarge - // } - - return nil, &errors2.Error{ - Code: code, - Op: opPointsWriter, - Msg: "", - Err: err, - } - } - - return &ParsedPoints{ - Points: points, - RawSize: len(data), - }, nil -} - -func readAll(ctx context.Context, rc io.ReadCloser) (data []byte, err error) { - defer func() { - if cerr := rc.Close(); cerr != nil && err == nil { - if errors.Is(cerr, io2.ErrReadLimitExceeded) { - cerr = ErrMaxBatchSizeExceeded - } - err = cerr - } - }() - - span, _ := tracing.StartSpanFromContextWithOperationName(ctx, "read request body") - - defer func() { - span.LogKV("request_bytes", len(data)) - span.Finish() - }() - - data, err = io.ReadAll(rc) - if err != nil { - return nil, err - - } - return data, nil -} - -// NewParser returns a new Parser -func NewParser(precision string /*parserOptions ...models.ParserOption*/) *Parser { - return &Parser{ - Precision: precision, - //ParserOptions: parserOptions, - } -} diff --git a/http/proxy_handler.go b/http/proxy_handler.go deleted file mode 100644 index a765dca6da9..00000000000 --- a/http/proxy_handler.go +++ /dev/null @@ -1,49 +0,0 @@ -package http - -import ( - "net/http" -) - -var _ http.Handler = &proxyHandler{} - -// withFeatureProxy wraps an HTTP handler in a proxyHandler -func withFeatureProxy(proxy FeatureProxyHandler, h http.Handler) *proxyHandler { - if proxy == nil { - proxy = &NoopProxyHandler{} - } - return &proxyHandler{ - proxy: proxy, - handler: h, - } -} - -// proxyHandler is a wrapper around an http.Handler that conditionally forwards -// a request to another HTTP backend using a proxy. If the proxy doesn't decide -// to forward the request, we fall-back to our normal http.Handler behavior. -type proxyHandler struct { - proxy FeatureProxyHandler - handler http.Handler -} - -// ServeHTTP implements http.Handler interface. It first -func (h *proxyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if h.proxy.Do(w, r) { - return - } - h.handler.ServeHTTP(w, r) -} - -// FeatureProxyHandler is an HTTP proxy that conditionally forwards requests to -// another backend. -type FeatureProxyHandler interface { - Do(w http.ResponseWriter, r *http.Request) bool -} - -// NoopProxyHandler is a no-op FeatureProxyHandler. It should be used if -// no feature-flag driven proxying is necessary. -type NoopProxyHandler struct{} - -// Do implements FeatureProxyHandler. -func (h *NoopProxyHandler) Do(http.ResponseWriter, *http.Request) bool { - return false -} diff --git a/http/query.go b/http/query.go deleted file mode 100644 index 8cafc0dfe95..00000000000 --- a/http/query.go +++ /dev/null @@ -1,362 +0,0 @@ -package http - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "mime" - "net/http" - "time" - "unicode/utf8" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/csv" - "github.com/influxdata/flux/lang" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/jsonweb" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/query/fluxlang" -) - -// QueryRequest is a flux query request. -type QueryRequest struct { - Type string `json:"type"` - Query string `json:"query"` - - // Flux fields - Extern json.RawMessage `json:"extern,omitempty"` - AST json.RawMessage `json:"ast,omitempty"` - Dialect QueryDialect `json:"dialect"` - Now time.Time `json:"now"` - - Org *influxdb.Organization `json:"-"` - - // PreferNoContent specifies if the Response to this request should - // contain any result. This is done for avoiding unnecessary - // bandwidth consumption in certain cases. For example, when the - // query produces side effects and the results do not matter. E.g.: - // from(...) |> ... |> to() - // For example, tasks do not use the results of queries, but only - // care about their side effects. - // To obtain a QueryRequest with no result, add the header - // `Prefer: return-no-content` to the HTTP request. - PreferNoContent bool - // PreferNoContentWithError is the same as above, but it forces the - // Response to contain an error if that is a Flux runtime error encoded - // in the response body. - // To obtain a QueryRequest with no result but runtime errors, - // add the header `Prefer: return-no-content-with-error` to the HTTP request. - PreferNoContentWithError bool -} - -// QueryDialect is the formatting options for the query response. -type QueryDialect struct { - Header *bool `json:"header"` - Delimiter string `json:"delimiter"` - CommentPrefix string `json:"commentPrefix"` - DateTimeFormat string `json:"dateTimeFormat"` - Annotations []string `json:"annotations"` -} - -// WithDefaults adds default values to the request. -func (r QueryRequest) WithDefaults() QueryRequest { - if r.Type == "" { - r.Type = "flux" - } - if r.Dialect.Delimiter == "" { - r.Dialect.Delimiter = "," - } - if r.Dialect.DateTimeFormat == "" { - r.Dialect.DateTimeFormat = "RFC3339" - } - if r.Dialect.Header == nil { - header := true - r.Dialect.Header = &header - } - return r -} - -// Validate checks the query request and returns an error if the request is invalid. -func (r QueryRequest) Validate() error { - if r.Query == "" && r.AST == nil { - return errors.New(`request body requires either query or AST`) - } - - if r.Type != "flux" { - return fmt.Errorf(`unknown query type: %s`, r.Type) - } - - if len(r.Dialect.CommentPrefix) > 1 { - return fmt.Errorf("invalid dialect comment prefix: must be length 0 or 1") - } - - if len(r.Dialect.Delimiter) != 1 { - return fmt.Errorf("invalid dialect delimeter: must be length 1") - } - - rune, size := utf8.DecodeRuneInString(r.Dialect.Delimiter) - if rune == utf8.RuneError && size == 1 { - return fmt.Errorf("invalid dialect delimeter character") - } - - for _, a := range r.Dialect.Annotations { - switch a { - case "group", "datatype", "default": - default: - return fmt.Errorf(`unknown dialect annotation type: %s`, a) - } - } - - switch r.Dialect.DateTimeFormat { - case "RFC3339", "RFC3339Nano": - default: - return fmt.Errorf(`unknown dialect date time format: %s`, r.Dialect.DateTimeFormat) - } - - return nil -} - -// QueryAnalysis is a structured response of errors. -type QueryAnalysis struct { - Errors []queryParseError `json:"errors"` -} - -type queryParseError struct { - Line int `json:"line"` - Column int `json:"column"` - Character int `json:"character"` - Message string `json:"message"` -} - -// Analyze attempts to parse the query request and returns any errors -// encountered in a structured way. -func (r QueryRequest) Analyze(l fluxlang.FluxLanguageService) (*QueryAnalysis, error) { - switch r.Type { - case "flux": - return r.analyzeFluxQuery(l) - } - - return nil, fmt.Errorf("unknown query request type %s", r.Type) -} - -func (r QueryRequest) analyzeFluxQuery(l fluxlang.FluxLanguageService) (*QueryAnalysis, error) { - a := &QueryAnalysis{} - pkg, err := query.Parse(l, r.Query) - if pkg == nil { - return nil, err - } - errCount := ast.Check(pkg) - if errCount == 0 { - a.Errors = []queryParseError{} - return a, nil - } - a.Errors = make([]queryParseError, 0, errCount) - ast.Walk(ast.CreateVisitor(func(node ast.Node) { - loc := node.Location() - for _, err := range node.Errs() { - a.Errors = append(a.Errors, queryParseError{ - Line: loc.Start.Line, - Column: loc.Start.Column, - Message: err.Msg, - }) - } - }), pkg) - return a, nil -} - -// ProxyRequest returns a request to proxy from the flux. -func (r QueryRequest) ProxyRequest() (*query.ProxyRequest, error) { - return r.proxyRequest(time.Now) -} - -func (r QueryRequest) proxyRequest(now func() time.Time) (*query.ProxyRequest, error) { - if err := r.Validate(); err != nil { - return nil, err - } - - n := r.Now - if n.IsZero() { - n = now() - } - - // Query is preferred over AST - var compiler flux.Compiler - if r.Query != "" { - switch r.Type { - case "flux": - fallthrough - default: - compiler = lang.FluxCompiler{ - Now: n, - Extern: r.Extern, - Query: r.Query, - } - } - } else if len(r.AST) > 0 { - c := lang.ASTCompiler{ - Extern: r.Extern, - AST: r.AST, - Now: n, - } - compiler = c - } - - delimiter, _ := utf8.DecodeRuneInString(r.Dialect.Delimiter) - - noHeader := false - if r.Dialect.Header != nil { - noHeader = !*r.Dialect.Header - } - - var dialect flux.Dialect - if r.PreferNoContent { - dialect = &query.NoContentDialect{} - } else { - // TODO(nathanielc): Use commentPrefix and dateTimeFormat - // once they are supported. - encConfig := csv.ResultEncoderConfig{ - NoHeader: noHeader, - Delimiter: delimiter, - Annotations: r.Dialect.Annotations, - } - if r.PreferNoContentWithError { - dialect = &query.NoContentWithErrorDialect{ - ResultEncoderConfig: encConfig, - } - } else { - dialect = &csv.Dialect{ - ResultEncoderConfig: encConfig, - } - } - } - - return &query.ProxyRequest{ - Request: query.Request{ - OrganizationID: r.Org.ID, - Compiler: compiler, - }, - Dialect: dialect, - }, nil -} - -// QueryRequestFromProxyRequest converts a query.ProxyRequest into a QueryRequest. -// The ProxyRequest must contain supported compilers and dialects otherwise an error occurs. -func QueryRequestFromProxyRequest(req *query.ProxyRequest) (*QueryRequest, error) { - qr := new(QueryRequest) - switch c := req.Request.Compiler.(type) { - case lang.FluxCompiler: - qr.Type = "flux" - qr.Query = c.Query - qr.Extern = c.Extern - qr.Now = c.Now - case lang.ASTCompiler: - qr.Type = "flux" - qr.AST = c.AST - qr.Now = c.Now - default: - return nil, fmt.Errorf("unsupported compiler %T", c) - } - switch d := req.Dialect.(type) { - case *csv.Dialect: - var header = !d.ResultEncoderConfig.NoHeader - qr.Dialect.Header = &header - qr.Dialect.Delimiter = string(d.ResultEncoderConfig.Delimiter) - qr.Dialect.CommentPrefix = "#" - qr.Dialect.DateTimeFormat = "RFC3339" - qr.Dialect.Annotations = d.ResultEncoderConfig.Annotations - case *query.NoContentDialect: - qr.PreferNoContent = true - case *query.NoContentWithErrorDialect: - qr.PreferNoContentWithError = true - default: - return nil, fmt.Errorf("unsupported dialect %T", d) - } - return qr, nil -} - -const fluxContentType = "application/vnd.flux" - -func decodeQueryRequest(ctx context.Context, r *http.Request, svc influxdb.OrganizationService) (*QueryRequest, int, error) { - var req QueryRequest - body := &countReader{Reader: r.Body} - - var contentType = "application/json" - if ct := r.Header.Get("Content-Type"); ct != "" { - contentType = ct - } - mt, _, err := mime.ParseMediaType(contentType) - if err != nil { - return nil, body.bytesRead, err - } - switch mt { - case fluxContentType: - octets, err := io.ReadAll(body) - if err != nil { - return nil, body.bytesRead, err - } - req.Query = string(octets) - case "application/json": - fallthrough - default: - if err := json.NewDecoder(body).Decode(&req); err != nil { - return nil, body.bytesRead, - fmt.Errorf("failed parsing request body as JSON; if sending a raw Flux script, set 'Content-Type: %s' in your request headers: %w", fluxContentType, err) - } - } - - switch hv := r.Header.Get(query.PreferHeaderKey); hv { - case query.PreferNoContentHeaderValue: - req.PreferNoContent = true - case query.PreferNoContentWErrHeaderValue: - req.PreferNoContentWithError = true - } - - req = req.WithDefaults() - if err := req.Validate(); err != nil { - return nil, body.bytesRead, err - } - - req.Org, err = queryOrganization(ctx, r, svc) - return &req, body.bytesRead, err -} - -type countReader struct { - bytesRead int - io.Reader -} - -func (r *countReader) Read(p []byte) (n int, err error) { - n, err = r.Reader.Read(p) - r.bytesRead += n - return n, err -} - -func decodeProxyQueryRequest(ctx context.Context, r *http.Request, auth influxdb.Authorizer, svc influxdb.OrganizationService) (*query.ProxyRequest, int, error) { - req, n, err := decodeQueryRequest(ctx, r, svc) - if err != nil { - return nil, n, err - } - - pr, err := req.ProxyRequest() - if err != nil { - return nil, n, err - } - - var token *influxdb.Authorization - switch a := auth.(type) { - case *influxdb.Authorization: - token = a - case *influxdb.Session: - token = a.EphemeralAuth(req.Org.ID) - case *jsonweb.Token: - token = a.EphemeralAuth(req.Org.ID) - default: - return pr, n, influxdb.ErrAuthorizerNotSupported - } - - pr.Request.Authorization = token - return pr, n, nil -} diff --git a/http/query_handler.go b/http/query_handler.go deleted file mode 100644 index 0718721ed2a..00000000000 --- a/http/query_handler.go +++ /dev/null @@ -1,654 +0,0 @@ -package http - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "time" - - "github.com/NYTimes/gziphandler" - "github.com/influxdata/flux" - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/csv" - "github.com/influxdata/flux/iocounter" - "github.com/influxdata/flux/lang" - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - pcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/http/metric" - "github.com/influxdata/influxdb/v2/kit/check" - "github.com/influxdata/influxdb/v2/kit/feature" - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/logger" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/query/fluxlang" - "github.com/pkg/errors" - prom "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -const ( - prefixQuery = "/api/v2/query" - traceIDHeader = "Trace-Id" -) - -// FluxBackend is all services and associated parameters required to construct -// the FluxHandler. -type FluxBackend struct { - errors2.HTTPErrorHandler - log *zap.Logger - FluxLogEnabled bool - QueryEventRecorder metric.EventRecorder - - AlgoWProxy FeatureProxyHandler - OrganizationService influxdb.OrganizationService - ProxyQueryService query.ProxyQueryService - FluxLanguageService fluxlang.FluxLanguageService - Flagger feature.Flagger -} - -// NewFluxBackend returns a new instance of FluxBackend. -func NewFluxBackend(log *zap.Logger, b *APIBackend) *FluxBackend { - return &FluxBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - FluxLogEnabled: b.FluxLogEnabled, - QueryEventRecorder: b.QueryEventRecorder, - AlgoWProxy: b.AlgoWProxy, - ProxyQueryService: b.FluxService, - OrganizationService: b.OrganizationService, - FluxLanguageService: b.FluxLanguageService, - Flagger: b.Flagger, - } -} - -// HTTPDialect is an encoding dialect that can write metadata to HTTP headers -type HTTPDialect interface { - SetHeaders(w http.ResponseWriter) -} - -// FluxHandler implements handling flux queries. -type FluxHandler struct { - *httprouter.Router - errors2.HTTPErrorHandler - log *zap.Logger - FluxLogEnabled bool - - Now func() time.Time - OrganizationService influxdb.OrganizationService - ProxyQueryService query.ProxyQueryService - FluxLanguageService fluxlang.FluxLanguageService - - EventRecorder metric.EventRecorder - - Flagger feature.Flagger -} - -// Prefix provides the route prefix. -func (*FluxHandler) Prefix() string { - return prefixQuery -} - -// NewFluxHandler returns a new handler at /api/v2/query for flux queries. -func NewFluxHandler(log *zap.Logger, b *FluxBackend) *FluxHandler { - h := &FluxHandler{ - Router: NewRouter(b.HTTPErrorHandler), - Now: time.Now, - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - FluxLogEnabled: b.FluxLogEnabled, - - ProxyQueryService: b.ProxyQueryService, - OrganizationService: b.OrganizationService, - EventRecorder: b.QueryEventRecorder, - FluxLanguageService: b.FluxLanguageService, - Flagger: b.Flagger, - } - - // query reponses can optionally be gzip encoded - qh := gziphandler.GzipHandler(http.HandlerFunc(h.handleQuery)) - h.Handler("POST", prefixQuery, withFeatureProxy(b.AlgoWProxy, qh)) - h.Handler("POST", "/api/v2/query/ast", withFeatureProxy(b.AlgoWProxy, http.HandlerFunc(h.postFluxAST))) - h.Handler("POST", "/api/v2/query/analyze", withFeatureProxy(b.AlgoWProxy, http.HandlerFunc(h.postQueryAnalyze))) - h.Handler("GET", "/api/v2/query/suggestions", withFeatureProxy(b.AlgoWProxy, http.HandlerFunc(h.getFluxSuggestions))) - h.Handler("GET", "/api/v2/query/suggestions/:name", withFeatureProxy(b.AlgoWProxy, http.HandlerFunc(h.getFluxSuggestion))) - return h -} - -func (h *FluxHandler) handleQuery(w http.ResponseWriter, r *http.Request) { - const op = "http/handlePostQuery" - span, r := tracing.ExtractFromHTTPRequest(r, "FluxHandler") - defer span.Finish() - - ctx := r.Context() - log := h.log.With(logger.TraceFields(ctx)...) - if id, _, found := tracing.InfoFromContext(ctx); found { - w.Header().Set(traceIDHeader, id) - } - - // TODO(desa): I really don't like how we're recording the usage metrics here - // Ideally this will be moved when we solve https://github.com/influxdata/influxdb/issues/13403 - var orgID platform.ID - var requestBytes int - sw := kithttp.NewStatusResponseWriter(w) - w = sw - defer func() { - h.EventRecorder.Record(ctx, metric.Event{ - OrgID: orgID, - Endpoint: r.URL.Path, // This should be sufficient for the time being as it should only be single endpoint. - RequestBytes: requestBytes, - ResponseBytes: sw.ResponseBytes(), - Status: sw.Code(), - }) - }() - - a, err := pcontext.GetAuthorizer(ctx) - if err != nil { - err := &errors2.Error{ - Code: errors2.EUnauthorized, - Msg: "authorization is invalid or missing in the query request", - Op: op, - Err: err, - } - h.HandleHTTPError(ctx, err, w) - return - } - - req, n, err := decodeProxyQueryRequest(ctx, r, a, h.OrganizationService) - if err != nil && err != influxdb.ErrAuthorizerNotSupported { - err := &errors2.Error{ - Code: errors2.EInvalid, - Msg: "failed to decode request body", - Op: op, - Err: err, - } - h.HandleHTTPError(ctx, err, w) - return - } - req.Request.Source = r.Header.Get("User-Agent") - orgID = req.Request.OrganizationID - requestBytes = n - - // Transform the context into one with the request's authorization. - ctx = pcontext.SetAuthorizer(ctx, req.Request.Authorization) - if h.Flagger != nil { - ctx, _ = feature.Annotate(ctx, h.Flagger) - } - - hd, ok := req.Dialect.(HTTPDialect) - if !ok { - err := &errors2.Error{ - Code: errors2.EInvalid, - Msg: fmt.Sprintf("unsupported dialect over HTTP: %T", req.Dialect), - Op: op, - } - h.HandleHTTPError(ctx, err, w) - return - } - hd.SetHeaders(w) - - cw := iocounter.Writer{Writer: w} - stats, err := h.ProxyQueryService.Query(ctx, &cw, req) - if err != nil { - if cw.Count() == 0 { - // Only record the error headers IFF nothing has been written to w. - h.HandleHTTPError(ctx, err, w) - return - } - _ = tracing.LogError(span, err) - log.Info("Error writing response to client", - zap.String("handler", "flux"), - zap.Error(err), - ) - } - - // Detailed logging for flux queries if enabled - if h.FluxLogEnabled { - h.logFluxQuery(cw.Count(), stats, req.Request.Compiler, err) - } - -} - -func (h *FluxHandler) logFluxQuery(n int64, stats flux.Statistics, compiler flux.Compiler, err error) { - var q string - c, ok := compiler.(lang.FluxCompiler) - if !ok { - q = "unknown" - } - q = c.Query - - h.log.Info("Executed Flux query", - zap.String("compiler_type", string(compiler.CompilerType())), - zap.Int64("response_size", n), - zap.String("query", q), - zap.Error(err), - zap.Duration("stat_total_duration", stats.TotalDuration), - zap.Duration("stat_compile_duration", stats.CompileDuration), - zap.Duration("stat_execute_duration", stats.ExecuteDuration), - zap.Int64("stat_max_allocated", stats.MaxAllocated), - zap.Int64("stat_total_allocated", stats.TotalAllocated), - ) -} - -type langRequest struct { - Query string `json:"query"` -} - -type postFluxASTResponse struct { - AST *ast.Package `json:"ast"` -} - -// postFluxAST returns a flux AST for provided flux string -func (h *FluxHandler) postFluxAST(w http.ResponseWriter, r *http.Request) { - span, r := tracing.ExtractFromHTTPRequest(r, "FluxHandler") - defer span.Finish() - - var request langRequest - ctx := r.Context() - - err := json.NewDecoder(r.Body).Decode(&request) - if err != nil { - h.HandleHTTPError(ctx, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "invalid json", - Err: err, - }, w) - return - } - - pkg, err := query.Parse(h.FluxLanguageService, request.Query) - if err != nil { - h.HandleHTTPError(ctx, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "invalid AST", - Err: err, - }, w) - return - } - - res := postFluxASTResponse{ - AST: pkg, - } - - if err := encodeResponse(ctx, w, http.StatusOK, res); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -// postQueryAnalyze parses a query and returns any query errors. -func (h *FluxHandler) postQueryAnalyze(w http.ResponseWriter, r *http.Request) { - span, r := tracing.ExtractFromHTTPRequest(r, "FluxHandler") - defer span.Finish() - - ctx := r.Context() - - var req QueryRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - h.HandleHTTPError(ctx, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "invalid json", - Err: err, - }, w) - return - } - - a, err := req.Analyze(h.FluxLanguageService) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - if err := encodeResponse(ctx, w, http.StatusOK, a); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -// fluxParams contain flux funciton parameters as defined by the semantic graph -type fluxParams map[string]string - -// suggestionResponse provides the parameters available for a given Flux function -type suggestionResponse struct { - Name string `json:"name"` - Params fluxParams `json:"params"` -} - -// suggestionsResponse provides a list of available Flux functions -type suggestionsResponse struct { - Functions []suggestionResponse `json:"funcs"` -} - -// getFluxSuggestions returns a list of available Flux functions for the Flux Builder -func (h *FluxHandler) getFluxSuggestions(w http.ResponseWriter, r *http.Request) { - span, r := tracing.ExtractFromHTTPRequest(r, "FluxHandler") - defer span.Finish() - - ctx := r.Context() - completer := h.FluxLanguageService.Completer() - names := completer.FunctionNames() - var functions []suggestionResponse - for _, name := range names { - suggestion, err := completer.FunctionSuggestion(name) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - filteredParams := make(fluxParams) - for key, value := range suggestion.Params { - if key == "table" { - continue - } - - filteredParams[key] = value - } - - functions = append(functions, suggestionResponse{ - Name: name, - Params: filteredParams, - }) - } - res := suggestionsResponse{Functions: functions} - - if err := encodeResponse(ctx, w, http.StatusOK, res); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -// getFluxSuggestion returns the function parameters for the requested function -func (h *FluxHandler) getFluxSuggestion(w http.ResponseWriter, r *http.Request) { - span, r := tracing.ExtractFromHTTPRequest(r, "FluxHandler") - defer span.Finish() - - ctx := r.Context() - name := httprouter.ParamsFromContext(ctx).ByName("name") - completer := h.FluxLanguageService.Completer() - - suggestion, err := completer.FunctionSuggestion(name) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - res := suggestionResponse{Name: name, Params: suggestion.Params} - if err := encodeResponse(ctx, w, http.StatusOK, res); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -// PrometheusCollectors satisifies the prom.PrometheusCollector interface. -func (h *FluxHandler) PrometheusCollectors() []prom.Collector { - // TODO: gather and return relevant metrics. - return nil -} - -var _ query.ProxyQueryService = (*FluxService)(nil) - -// FluxService connects to Influx via HTTP using tokens to run queries. -type FluxService struct { - Addr string - Token string - Name string - InsecureSkipVerify bool -} - -// Query runs a flux query against a influx server and sends the results to the io.Writer. -// Will use the token from the context over the token within the service struct. -func (s *FluxService) Query(ctx context.Context, w io.Writer, r *query.ProxyRequest) (flux.Statistics, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - u, err := NewURL(s.Addr, prefixQuery) - if err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - params := url.Values{} - params.Set(OrgID, r.Request.OrganizationID.String()) - u.RawQuery = params.Encode() - - qreq, err := QueryRequestFromProxyRequest(r) - if err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - var body bytes.Buffer - if err := json.NewEncoder(&body).Encode(qreq); err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - - hreq, err := http.NewRequest("POST", u.String(), &body) - if err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - - SetToken(s.Token, hreq) - - hreq.Header.Set("Content-Type", "application/json") - hreq.Header.Set("Accept", "text/csv") - if r.Request.Source != "" { - hreq.Header.Add("User-Agent", r.Request.Source) - } else if s.Name != "" { - hreq.Header.Add("User-Agent", s.Name) - } - - // Now that the request is all set, we can apply header mutators. - if err := r.Request.ApplyOptions(hreq.Header); err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - - hreq = hreq.WithContext(ctx) - hc := NewClient(u.Scheme, s.InsecureSkipVerify) - resp, err := hc.Do(hreq) - if err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - defer resp.Body.Close() - - if err := CheckError(resp); err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - - if _, err := io.Copy(w, resp.Body); err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - return flux.Statistics{}, nil -} - -func (s FluxService) Check(ctx context.Context) check.Response { - return QueryHealthCheck(s.Addr, s.InsecureSkipVerify) -} - -var _ query.QueryService = (*FluxQueryService)(nil) - -// FluxQueryService implements query.QueryService by making HTTP requests to the /api/v2/query API endpoint. -type FluxQueryService struct { - Addr string - Token string - Name string - InsecureSkipVerify bool -} - -// Query runs a flux query against a influx server and decodes the result -func (s *FluxQueryService) Query(ctx context.Context, r *query.Request) (flux.ResultIterator, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - u, err := NewURL(s.Addr, prefixQuery) - if err != nil { - return nil, tracing.LogError(span, err) - } - params := url.Values{} - params.Set(OrgID, r.OrganizationID.String()) - u.RawQuery = params.Encode() - - preq := &query.ProxyRequest{ - Request: *r, - Dialect: csv.DefaultDialect(), - } - qreq, err := QueryRequestFromProxyRequest(preq) - if err != nil { - return nil, tracing.LogError(span, err) - } - var body bytes.Buffer - if err := json.NewEncoder(&body).Encode(qreq); err != nil { - return nil, tracing.LogError(span, err) - } - - hreq, err := http.NewRequest("POST", u.String(), &body) - if err != nil { - return nil, tracing.LogError(span, err) - } - - SetToken(s.Token, hreq) - - hreq.Header.Set("Content-Type", "application/json") - hreq.Header.Set("Accept", "text/csv") - if r.Source != "" { - hreq.Header.Add("User-Agent", r.Source) - } else if s.Name != "" { - hreq.Header.Add("User-Agent", s.Name) - } - hreq = hreq.WithContext(ctx) - - // Now that the request is all set, we can apply header mutators. - if err := r.ApplyOptions(hreq.Header); err != nil { - return nil, tracing.LogError(span, err) - } - - hc := NewClient(u.Scheme, s.InsecureSkipVerify) - resp, err := hc.Do(hreq) - if err != nil { - return nil, tracing.LogError(span, err) - } - // Can't defer resp.Body.Close here because the CSV decoder depends on reading from resp.Body after this function returns. - - if err := CheckError(resp); err != nil { - return nil, tracing.LogError(span, err) - } - - decoder := csv.NewMultiResultDecoder(csv.ResultDecoderConfig{}) - itr, err := decoder.Decode(resp.Body) - if err != nil { - return nil, tracing.LogError(span, err) - } - - return itr, nil -} - -func (s FluxQueryService) Check(ctx context.Context) check.Response { - return QueryHealthCheck(s.Addr, s.InsecureSkipVerify) -} - -// GetQueryResponse runs a flux query with common parameters and returns the response from the query service. -func GetQueryResponse(qr *QueryRequest, addr *url.URL, org, token string, headers ...string) (*http.Response, error) { - if len(headers)%2 != 0 { - return nil, fmt.Errorf("headers must be key value pairs") - } - u := *addr - u.Path = prefixQuery - params := url.Values{} - params.Set(Org, org) - u.RawQuery = params.Encode() - - var body bytes.Buffer - if err := json.NewEncoder(&body).Encode(qr); err != nil { - return nil, err - } - - req, err := http.NewRequest("POST", u.String(), &body) - if err != nil { - return nil, err - } - - SetToken(token, req) - - // Default headers. - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Accept", "text/csv") - // Apply custom headers. - for i := 0; i < len(headers); i += 2 { - req.Header.Set(headers[i], headers[i+1]) - } - - insecureSkipVerify := false - hc := NewClient(u.Scheme, insecureSkipVerify) - return hc.Do(req) -} - -// GetQueryResponseBody reads the body of a response from some query service. -// It also checks for errors in the response. -func GetQueryResponseBody(res *http.Response) ([]byte, error) { - if err := CheckError(res); err != nil { - return nil, err - } - defer res.Body.Close() - return io.ReadAll(res.Body) -} - -// SimpleQuery runs a flux query with common parameters and returns CSV results. -func SimpleQuery(addr *url.URL, flux, org, token string, headers ...string) ([]byte, error) { - header := true - qr := &QueryRequest{ - Type: "flux", - Query: flux, - Dialect: QueryDialect{ - Header: &header, - Delimiter: ",", - CommentPrefix: "#", - DateTimeFormat: "RFC3339", - }, - } - res, err := GetQueryResponse(qr, addr, org, token, headers...) - if err != nil { - return nil, err - } - return GetQueryResponseBody(res) -} - -func QueryHealthCheck(url string, insecureSkipVerify bool) check.Response { - u, err := NewURL(url, "/health") - if err != nil { - return check.Response{ - Name: "query health", - Status: check.StatusFail, - Message: errors.Wrap(err, "could not form URL").Error(), - } - } - - hc := NewClient(u.Scheme, insecureSkipVerify) - resp, err := hc.Get(u.String()) - if err != nil { - return check.Response{ - Name: "query health", - Status: check.StatusFail, - Message: errors.Wrap(err, "error getting response").Error(), - } - } - defer resp.Body.Close() - - if resp.StatusCode/100 != 2 { - return check.Response{ - Name: "query health", - Status: check.StatusFail, - Message: fmt.Sprintf("http error %v", resp.StatusCode), - } - } - - var healthResponse check.Response - if err = json.NewDecoder(resp.Body).Decode(&healthResponse); err != nil { - return check.Response{ - Name: "query health", - Status: check.StatusFail, - Message: errors.Wrap(err, "error decoding JSON response").Error(), - } - } - - return healthResponse -} diff --git a/http/query_handler_test.go b/http/query_handler_test.go deleted file mode 100644 index 6997002a98c..00000000000 --- a/http/query_handler_test.go +++ /dev/null @@ -1,699 +0,0 @@ -package http - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "reflect" - "regexp" - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/flux" - "github.com/influxdata/flux/csv" - "github.com/influxdata/flux/lang" - "github.com/influxdata/influxdb/v2" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/http/metric" - "github.com/influxdata/influxdb/v2/kit/check" - "github.com/influxdata/influxdb/v2/kit/feature" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - tracetesting "github.com/influxdata/influxdb/v2/kit/tracing/testing" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - influxmock "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/query/fluxlang" - "github.com/influxdata/influxdb/v2/query/mock" - "github.com/influxdata/influxdb/v2/tenant" - itesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func TestFluxService_Query(t *testing.T) { - orgID, err := platform.IDFromString("abcdabcdabcdabcd") - if err != nil { - t.Fatal(err) - } - tests := []struct { - name string - token string - ctx context.Context - r *query.ProxyRequest - status int - want flux.Statistics - wantW string - wantErr bool - }{ - { - name: "query", - ctx: context.Background(), - token: "mytoken", - r: &query.ProxyRequest{ - Request: query.Request{ - OrganizationID: *orgID, - Compiler: lang.FluxCompiler{ - Query: "from()", - }, - }, - Dialect: csv.DefaultDialect(), - }, - status: http.StatusOK, - want: flux.Statistics{}, - wantW: "howdy\n", - }, - { - name: "missing org id", - ctx: context.Background(), - token: "mytoken", - r: &query.ProxyRequest{ - Request: query.Request{ - Compiler: lang.FluxCompiler{ - Query: "from()", - }, - }, - Dialect: csv.DefaultDialect(), - }, - wantErr: true, - }, - { - name: "error status", - token: "mytoken", - ctx: context.Background(), - r: &query.ProxyRequest{ - Request: query.Request{ - OrganizationID: *orgID, - Compiler: lang.FluxCompiler{ - Query: "from()", - }, - }, - Dialect: csv.DefaultDialect(), - }, - status: http.StatusUnauthorized, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if reqID := r.URL.Query().Get(OrgID); reqID == "" { - if name := r.URL.Query().Get(Org); name == "" { - // Request must have org or orgID. - kithttp.NewErrorHandler(zaptest.NewLogger(t)).HandleHTTPError(context.TODO(), influxdb.ErrInvalidOrgFilter, w) - return - } - } - w.WriteHeader(tt.status) - _, _ = fmt.Fprintln(w, "howdy") - })) - defer ts.Close() - s := &FluxService{ - Addr: ts.URL, - Token: tt.token, - } - - w := &bytes.Buffer{} - got, err := s.Query(tt.ctx, w, tt.r) - if (err != nil) != tt.wantErr { - t.Errorf("FluxService.Query() error = %v, wantErr %v", err, tt.wantErr) - return - } - if diff := cmp.Diff(tt.want, got); diff != "" { - t.Errorf("FluxService.Query() = -want/+got: %v", diff) - } - if gotW := w.String(); gotW != tt.wantW { - t.Errorf("FluxService.Query() = %v, want %v", gotW, tt.wantW) - } - }) - } -} - -func TestFluxQueryService_Query(t *testing.T) { - var orgID platform.ID - orgID.DecodeFromString("aaaaaaaaaaaaaaaa") - tests := []struct { - name string - token string - ctx context.Context - r *query.Request - csv string - status int - want string - wantErr bool - }{ - { - name: "error status", - token: "mytoken", - ctx: context.Background(), - r: &query.Request{ - OrganizationID: orgID, - Compiler: lang.FluxCompiler{ - Query: "from()", - }, - }, - status: http.StatusUnauthorized, - wantErr: true, - }, - { - name: "returns csv", - token: "mytoken", - ctx: context.Background(), - r: &query.Request{ - OrganizationID: orgID, - Compiler: lang.FluxCompiler{ - Query: "from()", - }, - }, - status: http.StatusOK, - csv: `#datatype,string,long,dateTime:RFC3339,double,long,string,boolean,string,string,string -#group,false,false,false,false,false,false,false,true,true,true -#default,_result,,,,,,,,, -,result,table,_time,usage_user,test,mystr,this,cpu,host,_measurement -,,0,2018-08-29T13:08:47Z,10.2,10,yay,true,cpu-total,a,cpui -`, - want: toCRLF(`,_result,0,2018-08-29T13:08:47Z,10.2,10,yay,true,cpu-total,a,cpui - -`), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var orgIDStr string - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - orgIDStr = r.URL.Query().Get(OrgID) - w.WriteHeader(tt.status) - fmt.Fprintln(w, tt.csv) - })) - s := &FluxQueryService{ - Addr: ts.URL, - Token: tt.token, - } - res, err := s.Query(tt.ctx, tt.r) - if (err != nil) != tt.wantErr { - t.Errorf("FluxQueryService.Query() error = %v, wantErr %v", err, tt.wantErr) - return - } - if res != nil && res.Err() != nil { - t.Errorf("FluxQueryService.Query() result error = %v", res.Err()) - return - } - if tt.wantErr { - return - } - defer res.Release() - - enc := csv.NewMultiResultEncoder(csv.ResultEncoderConfig{ - NoHeader: true, - Delimiter: ',', - }) - b := bytes.Buffer{} - n, err := enc.Encode(&b, res) - if err != nil { - t.Errorf("FluxQueryService.Query() encode error = %v", err) - return - } - if n != int64(len(tt.want)) { - t.Errorf("FluxQueryService.Query() encode result = %d, want %d", n, len(tt.want)) - } - if orgIDStr == "" { - t.Error("FluxQueryService.Query() encoded orgID is empty") - } - if got, want := orgIDStr, tt.r.OrganizationID.String(); got != want { - t.Errorf("FluxQueryService.Query() encoded orgID = %s, want %s", got, want) - } - - got := b.String() - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("FluxQueryService.Query() =\n%s\n%s", got, tt.want) - } - }) - } -} - -func TestFluxHandler_postFluxAST(t *testing.T) { - tests := []struct { - name string - w *httptest.ResponseRecorder - r *http.Request - want string - status int - }{ - { - name: "get ast from()", - w: httptest.NewRecorder(), - r: httptest.NewRequest("POST", "/api/v2/query/ast", bytes.NewBufferString(`{"query": "from()"}`)), - want: `{"ast":{"type":"Package","package":"main","files":[{"type":"File","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"metadata":"parser-type=rust","package":null,"imports":null,"body":[{"type":"ExpressionStatement","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"expression":{"type":"CallExpression","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":7},"source":"from()"},"callee":{"type":"Identifier","location":{"start":{"line":1,"column":1},"end":{"line":1,"column":5},"source":"from"},"name":"from"}}}]}]}} -`, - status: http.StatusOK, - }, - { - name: "error from bad json", - w: httptest.NewRecorder(), - r: httptest.NewRequest("POST", "/api/v2/query/ast", bytes.NewBufferString(`error!`)), - want: `{"code":"invalid","message":"invalid json: invalid character 'e' looking for beginning of value"}`, - status: http.StatusBadRequest, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := &FluxHandler{ - HTTPErrorHandler: kithttp.NewErrorHandler(zaptest.NewLogger(t)), - FluxLanguageService: fluxlang.DefaultService, - } - h.postFluxAST(tt.w, tt.r) - if got := tt.w.Body.String(); got != tt.want { - t.Errorf("http.postFluxAST = got\n%vwant\n%v", got, tt.want) - } - if got := tt.w.Code; got != tt.status { - t.Errorf("http.postFluxAST = got %d\nwant %d", got, tt.status) - } - }) - } -} - -func TestFluxService_Check(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(HealthHandler)) - defer ts.Close() - s := &FluxService{ - Addr: ts.URL, - } - got := s.Check(context.Background()) - want := check.Response{ - Name: "influxdb", - Status: "pass", - Message: "ready for queries and writes", - Checks: check.Responses{}, - } - if !cmp.Equal(want, got) { - t.Errorf("unexpected response -want/+got: " + cmp.Diff(want, got)) - } -} - -func TestFluxQueryService_Check(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(HealthHandler)) - defer ts.Close() - s := &FluxQueryService{ - Addr: ts.URL, - } - got := s.Check(context.Background()) - want := check.Response{ - Name: "influxdb", - Status: "pass", - Message: "ready for queries and writes", - Checks: check.Responses{}, - } - if !cmp.Equal(want, got) { - t.Errorf("unexpected response -want/+got: " + cmp.Diff(want, got)) - } -} - -var crlfPattern = regexp.MustCompile(`\r?\n`) - -func toCRLF(data string) string { - return crlfPattern.ReplaceAllString(data, "\r\n") -} - -type noopEventRecorder struct{} - -func (noopEventRecorder) Record(context.Context, metric.Event) {} - -var _ metric.EventRecorder = noopEventRecorder{} - -// Certain error cases must be encoded as influxdb.Error so they can be properly decoded clientside. -func TestFluxHandler_PostQuery_Errors(t *testing.T) { - defer tracetesting.SetupInMemoryTracing(t.Name())() - - store := itesting.NewTestInmemStore(t) - orgSVC := tenant.NewService(tenant.NewStore(store)) - b := &FluxBackend{ - HTTPErrorHandler: kithttp.NewErrorHandler(zaptest.NewLogger(t)), - log: zaptest.NewLogger(t), - QueryEventRecorder: noopEventRecorder{}, - OrganizationService: orgSVC, - ProxyQueryService: &mock.ProxyQueryService{ - QueryF: func(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) { - return flux.Statistics{}, &errors.Error{ - Code: errors.EInvalid, - Msg: "some query error", - } - }, - }, - FluxLanguageService: fluxlang.DefaultService, - Flagger: feature.DefaultFlagger(), - } - h := NewFluxHandler(zaptest.NewLogger(t), b) - - t.Run("missing authorizer", func(t *testing.T) { - ts := httptest.NewServer(h) - defer ts.Close() - - resp, err := http.Post(ts.URL+"/api/v2/query", "application/json", strings.NewReader("{}")) - if err != nil { - t.Fatal(err) - } - - defer resp.Body.Close() - - if actual := resp.Header.Get("Trace-Id"); actual == "" { - t.Error("expected trace ID header") - } - - if resp.StatusCode != http.StatusUnauthorized { - t.Errorf("expected unauthorized status, got %d", resp.StatusCode) - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - - var ierr errors.Error - if err := json.Unmarshal(body, &ierr); err != nil { - t.Logf("failed to json unmarshal into influxdb.error: %q", body) - t.Fatal(err) - } - - if !strings.Contains(ierr.Msg, "authorization is") { - t.Fatalf("expected error to mention authorization, got %s", ierr.Msg) - } - }) - - t.Run("authorizer but syntactically invalid JSON request", func(t *testing.T) { - w := httptest.NewRecorder() - req, err := http.NewRequest("POST", "/api/v2/query", strings.NewReader("oops")) - if err != nil { - t.Fatal(err) - } - authz := &influxdb.Authorization{} - req = req.WithContext(icontext.SetAuthorizer(req.Context(), authz)) - - h.handleQuery(w, req) - - if actual := w.Header().Get("Trace-Id"); actual == "" { - t.Error("expected trace ID header") - } - - if w.Code != http.StatusBadRequest { - t.Errorf("expected bad request status, got %d", w.Code) - } - - body := w.Body.Bytes() - var ierr errors.Error - if err := json.Unmarshal(body, &ierr); err != nil { - t.Logf("failed to json unmarshal into influxdb.error: %q", body) - t.Fatal(err) - } - - if !strings.Contains(ierr.Msg, "decode request body") { - t.Fatalf("expected error to mention decoding, got %s", ierr.Msg) - } - }) - - t.Run("valid request but executing query results in client error", func(t *testing.T) { - org := influxdb.Organization{Name: t.Name()} - if err := orgSVC.CreateOrganization(context.Background(), &org); err != nil { - t.Fatal(err) - } - - req, err := http.NewRequest("POST", "/api/v2/query?orgID="+org.ID.String(), bytes.NewReader([]byte("buckets()"))) - if err != nil { - t.Fatal(err) - } - authz := &influxdb.Authorization{} - req = req.WithContext(icontext.SetAuthorizer(req.Context(), authz)) - req.Header.Set("Content-Type", "application/vnd.flux") - - w := httptest.NewRecorder() - h.handleQuery(w, req) - - if actual := w.Header().Get("Trace-Id"); actual == "" { - t.Error("expected trace ID header") - } - - if w.Code != http.StatusBadRequest { - t.Errorf("expected bad request status, got %d", w.Code) - } - - body := w.Body.Bytes() - t.Logf("%s", body) - var ierr errors.Error - if err := json.Unmarshal(body, &ierr); err != nil { - t.Logf("failed to json unmarshal into influxdb.error: %q", body) - t.Fatal(err) - } - - if got, want := ierr.Code, errors.EInvalid; got != want { - t.Fatalf("unexpected error code -want/+got:\n\t- %v\n\t+ %v", want, got) - } - if ierr.Msg != "some query error" { - t.Fatalf("expected error message to mention 'some query error', got %s", ierr.Err.Error()) - } - }) -} - -func TestFluxService_Query_gzip(t *testing.T) { - // orgService is just to mock out orgs by returning - // the same org every time. - orgService := &influxmock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: id, - Name: id.String(), - }, nil - }, - - FindOrganizationF: func(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: platform.ID(1), - Name: platform.ID(1).String(), - }, nil - }, - } - - // queryService is test setup that returns the same CSV for all queries. - queryService := &mock.ProxyQueryService{ - QueryF: func(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) { - _, _ = w.Write([]byte(`#datatype,string,long,dateTime:RFC3339,double,long,string,boolean,string,string,string -#group,false,false,false,false,false,false,false,true,true,true -#default,_result,,,,,,,,, -,result,table,_time,usage_user,test,mystr,this,cpu,host,_measurement -,,0,2018-08-29T13:08:47Z,10.2,10,yay,true,cpu-total,a,cpui`)) - return flux.Statistics{}, nil - }, - } - - // authService is yet more test setup that returns an operator auth for any token. - authService := &influxmock.AuthorizationService{ - FindAuthorizationByTokenFn: func(ctx context.Context, token string) (*influxdb.Authorization, error) { - return &influxdb.Authorization{ - ID: platform.ID(1), - OrgID: platform.ID(1), - Permissions: influxdb.OperPermissions(), - }, nil - }, - } - - fluxBackend := &FluxBackend{ - HTTPErrorHandler: kithttp.NewErrorHandler(zaptest.NewLogger(t)), - log: zaptest.NewLogger(t), - QueryEventRecorder: noopEventRecorder{}, - OrganizationService: orgService, - ProxyQueryService: queryService, - FluxLanguageService: fluxlang.DefaultService, - Flagger: feature.DefaultFlagger(), - } - - fluxHandler := NewFluxHandler(zaptest.NewLogger(t), fluxBackend) - - // fluxHandling expects authorization to be on the request context. - // AuthenticationHandler extracts the token from headers and places - // the auth on context. - auth := NewAuthenticationHandler(zaptest.NewLogger(t), kithttp.NewErrorHandler(zaptest.NewLogger(t))) - auth.AuthorizationService = authService - auth.Handler = fluxHandler - auth.UserService = &influxmock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{}, nil - }, - } - - ts := httptest.NewServer(auth) - defer ts.Close() - - newFakeRequest := func() *http.Request { - req, err := http.NewRequest("POST", ts.URL+"/api/v2/query?orgID=0000000000000001", bytes.NewReader([]byte("buckets()"))) - if err != nil { - t.Fatal(err) - } - - req.Header.Set("Content-Type", "application/vnd.flux") - SetToken("not important hard coded test response", req) - return req - } - - // disable any gzip compression - client := &http.Client{ - Transport: &http.Transport{ - MaxIdleConns: 10, - IdleConnTimeout: 30 * time.Second, - DisableCompression: true, - }, - } - - req := newFakeRequest() - res, err := client.Do(req) - if err != nil { - t.Fatalf("unable to POST to server: %v", err) - } - - if res.StatusCode != http.StatusOK { - t.Errorf("unexpected status code %s", res.Status) - } - - identityBody, _ := io.ReadAll(res.Body) - _ = res.Body.Close() - - // now, we try to use gzip - req = newFakeRequest() - // If we enable compression, we should get the same response. - client = &http.Client{ - Transport: &http.Transport{ - MaxIdleConns: 10, - IdleConnTimeout: 30 * time.Second, - DisableCompression: false, - }, - } - - res, err = client.Do(req) - if err != nil { - t.Fatalf("unable to POST to server: %v", err) - } - - gzippedBody, _ := io.ReadAll(res.Body) - _ = res.Body.Close() - - if res.StatusCode != http.StatusOK { - t.Errorf("unexpected status code %s", res.Status) - } - - if string(identityBody) != string(gzippedBody) { - t.Errorf("unexpected difference in identity and compressed bodies:\n%s\n%s", string(identityBody), string(gzippedBody)) - } -} - -func Benchmark_Query_no_gzip(b *testing.B) { - benchmarkQuery(b, true) -} - -func Benchmark_Query_gzip(b *testing.B) { - benchmarkQuery(b, false) -} - -func benchmarkQuery(b *testing.B, disableCompression bool) { - // orgService is just to mock out orgs by returning - // the same org every time. - orgService := &influxmock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: id, - Name: id.String(), - }, nil - }, - - FindOrganizationF: func(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: platform.ID(1), - Name: platform.ID(1).String(), - }, nil - }, - } - - // queryService is test setup that returns the same CSV for all queries. - queryService := &mock.ProxyQueryService{ - QueryF: func(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) { - _, _ = w.Write([]byte(`#datatype,string,long,dateTime:RFC3339,double,long,string,boolean,string,string,string -#group,false,false,false,false,false,false,false,true,true,true -#default,_result,,,,,,,,, -,result,table,_time,usage_user,test,mystr,this,cpu,host,_measurement -,,0,2018-08-29T13:08:47Z,10.2,10,yay,true,cpu-total,a,cpui`)) - return flux.Statistics{}, nil - }, - } - - // authService is yet more test setup that returns an operator auth for any token. - authService := &influxmock.AuthorizationService{ - FindAuthorizationByTokenFn: func(ctx context.Context, token string) (*influxdb.Authorization, error) { - return &influxdb.Authorization{ - ID: platform.ID(1), - OrgID: platform.ID(1), - Permissions: influxdb.OperPermissions(), - }, nil - }, - } - - fluxBackend := &FluxBackend{ - HTTPErrorHandler: kithttp.NewErrorHandler(zaptest.NewLogger(b)), - log: zaptest.NewLogger(b), - QueryEventRecorder: noopEventRecorder{}, - OrganizationService: orgService, - ProxyQueryService: queryService, - FluxLanguageService: fluxlang.DefaultService, - Flagger: feature.DefaultFlagger(), - } - - fluxHandler := NewFluxHandler(zaptest.NewLogger(b), fluxBackend) - - // fluxHandling expects authorization to be on the request context. - // AuthenticationHandler extracts the token from headers and places - // the auth on context. - auth := NewAuthenticationHandler(zaptest.NewLogger(b), kithttp.NewErrorHandler(zaptest.NewLogger(b))) - auth.AuthorizationService = authService - auth.Handler = fluxHandler - - ts := httptest.NewServer(auth) - defer ts.Close() - - newFakeRequest := func() *http.Request { - req, err := http.NewRequest("POST", ts.URL+"/api/v2/query?orgID=0000000000000001", bytes.NewReader([]byte("buckets()"))) - if err != nil { - b.Fatal(err) - } - - req.Header.Set("Content-Type", "application/vnd.flux") - SetToken("not important hard coded test response", req) - return req - } - - client := &http.Client{ - Transport: &http.Transport{ - MaxIdleConns: 10, - IdleConnTimeout: 30 * time.Second, - DisableCompression: disableCompression, - }, - } - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - req := newFakeRequest() - - res, err := client.Do(req) - if err != nil { - b.Fatalf("unable to POST to server: %v", err) - } - - if res.StatusCode != http.StatusOK { - b.Errorf("unexpected status code %s", res.Status) - } - - _, _ = io.ReadAll(res.Body) - _ = res.Body.Close() - - } -} diff --git a/http/query_test.go b/http/query_test.go deleted file mode 100644 index 99b21977c80..00000000000 --- a/http/query_test.go +++ /dev/null @@ -1,665 +0,0 @@ -package http - -import ( - "bytes" - "context" - "encoding/json" - "net/http" - "net/http/httptest" - "reflect" - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/csv" - "github.com/influxdata/flux/lang" - platform "github.com/influxdata/influxdb/v2" - _ "github.com/influxdata/influxdb/v2/fluxinit/static" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/query" -) - -var cmpOptions = cmp.Options{ - cmpopts.IgnoreTypes(ast.BaseNode{}), - cmpopts.IgnoreUnexported(query.ProxyRequest{}), - cmpopts.IgnoreUnexported(query.Request{}), - cmpopts.EquateEmpty(), -} - -func TestQueryRequest_WithDefaults(t *testing.T) { - type fields struct { - AST json.RawMessage - Query string - Type string - Dialect QueryDialect - org *platform.Organization - } - tests := []struct { - name string - fields fields - want QueryRequest - }{ - { - name: "empty query has defaults set", - want: QueryRequest{ - Type: "flux", - Dialect: QueryDialect{ - Delimiter: ",", - DateTimeFormat: "RFC3339", - Header: func(x bool) *bool { return &x }(true), - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := QueryRequest{ - AST: tt.fields.AST, - Query: tt.fields.Query, - Type: tt.fields.Type, - Dialect: tt.fields.Dialect, - Org: tt.fields.org, - } - if got := r.WithDefaults(); !reflect.DeepEqual(got, tt.want) { - t.Errorf("QueryRequest.WithDefaults() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestQueryRequest_Validate(t *testing.T) { - type fields struct { - Extern json.RawMessage - AST json.RawMessage - Query string - Type string - Dialect QueryDialect - org *platform.Organization - } - tests := []struct { - name string - fields fields - wantErr bool - }{ - { - name: "requires query, spec, or ast", - fields: fields{ - Type: "flux", - }, - wantErr: true, - }, - { - name: "requires flux type", - fields: fields{ - Query: "howdy", - Type: "doody", - }, - wantErr: true, - }, - { - name: "comment must be a single character", - fields: fields{ - Query: "from()", - Type: "flux", - Dialect: QueryDialect{ - CommentPrefix: "error!", - }, - }, - wantErr: true, - }, - { - name: "delimiter must be a single character", - fields: fields{ - Query: "from()", - Type: "flux", - Dialect: QueryDialect{ - Delimiter: "", - }, - }, - wantErr: true, - }, - { - name: "characters must be unicode runes", - fields: fields{ - Query: "from()", - Type: "flux", - Dialect: QueryDialect{ - Delimiter: string([]byte{0x80}), - }, - }, - wantErr: true, - }, - { - name: "unknown annotations", - fields: fields{ - Query: "from()", - Type: "flux", - Dialect: QueryDialect{ - Delimiter: ",", - Annotations: []string{"error"}, - }, - }, - wantErr: true, - }, - { - name: "unknown date time format", - fields: fields{ - Query: "from()", - Type: "flux", - Dialect: QueryDialect{ - Delimiter: ",", - DateTimeFormat: "error", - }, - }, - wantErr: true, - }, - { - name: "valid query", - fields: fields{ - Query: "from()", - Type: "flux", - Dialect: QueryDialect{ - Delimiter: ",", - DateTimeFormat: "RFC3339", - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := QueryRequest{ - Extern: tt.fields.Extern, - AST: tt.fields.AST, - Query: tt.fields.Query, - Type: tt.fields.Type, - Dialect: tt.fields.Dialect, - Org: tt.fields.org, - } - if err := r.Validate(); (err != nil) != tt.wantErr { - t.Errorf("QueryRequest.Validate() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestQueryRequest_proxyRequest(t *testing.T) { - type fields struct { - Extern json.RawMessage - AST json.RawMessage - Query string - Type string - Dialect QueryDialect - Now time.Time - org *platform.Organization - } - tests := []struct { - name string - fields fields - now func() time.Time - want *query.ProxyRequest - wantErr bool - }{ - { - name: "requires query, spec, or ast", - fields: fields{ - Type: "flux", - }, - wantErr: true, - }, - { - name: "valid query", - fields: fields{ - Query: "howdy", - Type: "flux", - Dialect: QueryDialect{ - Delimiter: ",", - DateTimeFormat: "RFC3339", - }, - org: &platform.Organization{}, - }, - now: func() time.Time { return time.Unix(1, 1) }, - want: &query.ProxyRequest{ - Request: query.Request{ - Compiler: lang.FluxCompiler{ - Now: time.Unix(1, 1), - Query: `howdy`, - }, - }, - Dialect: &csv.Dialect{ - ResultEncoderConfig: csv.ResultEncoderConfig{ - NoHeader: false, - Delimiter: ',', - }, - }, - }, - }, - { - name: "valid AST", - fields: fields{ - AST: mustMarshal(&ast.Package{}), - Type: "flux", - Dialect: QueryDialect{ - Delimiter: ",", - DateTimeFormat: "RFC3339", - }, - Now: time.Unix(1, 1), - org: &platform.Organization{}, - }, - now: func() time.Time { return time.Unix(2, 2) }, - want: &query.ProxyRequest{ - Request: query.Request{ - Compiler: lang.ASTCompiler{ - AST: mustMarshal(&ast.Package{}), - Now: time.Unix(1, 1), - }, - }, - Dialect: &csv.Dialect{ - ResultEncoderConfig: csv.ResultEncoderConfig{ - NoHeader: false, - Delimiter: ',', - }, - }, - }, - }, - { - name: "valid AST with calculated now", - fields: fields{ - AST: mustMarshal(&ast.Package{}), - Type: "flux", - Dialect: QueryDialect{ - Delimiter: ",", - DateTimeFormat: "RFC3339", - }, - org: &platform.Organization{}, - }, - now: func() time.Time { return time.Unix(2, 2) }, - want: &query.ProxyRequest{ - Request: query.Request{ - Compiler: lang.ASTCompiler{ - AST: mustMarshal(&ast.Package{}), - Now: time.Unix(2, 2), - }, - }, - Dialect: &csv.Dialect{ - ResultEncoderConfig: csv.ResultEncoderConfig{ - NoHeader: false, - Delimiter: ',', - }, - }, - }, - }, - { - name: "valid AST with extern", - fields: fields{ - Extern: mustMarshal(&ast.File{ - Body: []ast.Statement{ - &ast.OptionStatement{ - Assignment: &ast.VariableAssignment{ - ID: &ast.Identifier{Name: "x"}, - Init: &ast.IntegerLiteral{Value: 0}, - }, - }, - }, - }), - AST: mustMarshal(&ast.Package{}), - Type: "flux", - Dialect: QueryDialect{ - Delimiter: ",", - DateTimeFormat: "RFC3339", - }, - org: &platform.Organization{}, - }, - now: func() time.Time { return time.Unix(1, 1) }, - want: &query.ProxyRequest{ - Request: query.Request{ - Compiler: lang.ASTCompiler{ - Extern: mustMarshal(&ast.File{ - Body: []ast.Statement{ - &ast.OptionStatement{ - Assignment: &ast.VariableAssignment{ - ID: &ast.Identifier{Name: "x"}, - Init: &ast.IntegerLiteral{Value: 0}, - }, - }, - }, - }), - AST: mustMarshal(&ast.Package{}), - Now: time.Unix(1, 1), - }, - }, - Dialect: &csv.Dialect{ - ResultEncoderConfig: csv.ResultEncoderConfig{ - NoHeader: false, - Delimiter: ',', - }, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := QueryRequest{ - Extern: tt.fields.Extern, - AST: tt.fields.AST, - Query: tt.fields.Query, - Type: tt.fields.Type, - Dialect: tt.fields.Dialect, - Now: tt.fields.Now, - Org: tt.fields.org, - } - got, err := r.proxyRequest(tt.now) - if (err != nil) != tt.wantErr { - t.Errorf("QueryRequest.ProxyRequest() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !cmp.Equal(got, tt.want, cmpOptions...) { - t.Errorf("QueryRequest.ProxyRequest() -want/+got\n%s", cmp.Diff(tt.want, got, cmpOptions...)) - } - }) - } -} - -func mustMarshal(p ast.Node) []byte { - bs, err := json.Marshal(p) - if err != nil { - panic(err) - } - return bs -} - -func Test_decodeQueryRequest(t *testing.T) { - type args struct { - ctx context.Context - r *http.Request - svc platform.OrganizationService - } - tests := []struct { - name string - args args - want *QueryRequest - wantErr bool - }{ - { - name: "valid query request", - args: args{ - r: httptest.NewRequest("POST", "/", bytes.NewBufferString(`{"query": "from()"}`)), - svc: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { - return &platform.Organization{ - ID: func() platform2.ID { s, _ := platform2.IDFromString("deadbeefdeadbeef"); return *s }(), - }, nil - }, - }, - }, - want: &QueryRequest{ - Query: "from()", - Type: "flux", - Dialect: QueryDialect{ - Delimiter: ",", - DateTimeFormat: "RFC3339", - Header: func(x bool) *bool { return &x }(true), - }, - Org: &platform.Organization{ - ID: func() platform2.ID { s, _ := platform2.IDFromString("deadbeefdeadbeef"); return *s }(), - }, - }, - }, - { - name: "valid query request with explicit content-type", - args: args{ - r: func() *http.Request { - r := httptest.NewRequest("POST", "/", bytes.NewBufferString(`{"query": "from()"}`)) - r.Header.Set("Content-Type", "application/json") - return r - }(), - svc: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { - return &platform.Organization{ - ID: func() platform2.ID { s, _ := platform2.IDFromString("deadbeefdeadbeef"); return *s }(), - }, nil - }, - }, - }, - want: &QueryRequest{ - Query: "from()", - Type: "flux", - Dialect: QueryDialect{ - Delimiter: ",", - DateTimeFormat: "RFC3339", - Header: func(x bool) *bool { return &x }(true), - }, - Org: &platform.Organization{ - ID: func() platform2.ID { s, _ := platform2.IDFromString("deadbeefdeadbeef"); return *s }(), - }, - }, - }, - { - name: "error decoding json", - args: args{ - r: httptest.NewRequest("POST", "/", bytes.NewBufferString(`error`)), - }, - wantErr: true, - }, - { - name: "error validating query", - args: args{ - r: httptest.NewRequest("POST", "/", bytes.NewBufferString(`{}`)), - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, _, err := decodeQueryRequest(tt.args.ctx, tt.args.r, tt.args.svc) - if (err != nil) != tt.wantErr { - t.Errorf("decodeQueryRequest() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("decodeQueryRequest() = %v, want %v", got, tt.want) - } - }) - } -} - -func Test_decodeProxyQueryRequest(t *testing.T) { - externJSON := `{ - "type": "File", - "body": [ - { - "type": "OptionStatement", - "assignment": { - "type": "VariableAssignment", - "id": { - "type": "Identifier", - "name": "x" - }, - "init": { - "type": "IntegerLiteral", - "value": "0" - } - } - } - ] - }` - type args struct { - ctx context.Context - r *http.Request - auth *platform.Authorization - svc platform.OrganizationService - } - tests := []struct { - name string - args args - want *query.ProxyRequest - wantErr bool - }{ - { - name: "valid post query request", - args: args{ - r: httptest.NewRequest("POST", "/", bytes.NewBufferString(`{"query": "from()"}`)), - svc: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { - return &platform.Organization{ - ID: func() platform2.ID { s, _ := platform2.IDFromString("deadbeefdeadbeef"); return *s }(), - }, nil - }, - }, - }, - want: &query.ProxyRequest{ - Request: query.Request{ - OrganizationID: func() platform2.ID { s, _ := platform2.IDFromString("deadbeefdeadbeef"); return *s }(), - Compiler: lang.FluxCompiler{ - Query: "from()", - }, - }, - Dialect: &csv.Dialect{ - ResultEncoderConfig: csv.ResultEncoderConfig{ - NoHeader: false, - Delimiter: ',', - }, - }, - }, - }, - { - name: "valid query including extern definition", - args: args{ - r: httptest.NewRequest("POST", "/", bytes.NewBufferString(` -{ - "extern": `+externJSON+`, - "query": "from(bucket: \"mybucket\")" -} -`)), - svc: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { - return &platform.Organization{ - ID: func() platform2.ID { s, _ := platform2.IDFromString("deadbeefdeadbeef"); return *s }(), - }, nil - }, - }, - }, - want: &query.ProxyRequest{ - Request: query.Request{ - OrganizationID: func() platform2.ID { s, _ := platform2.IDFromString("deadbeefdeadbeef"); return *s }(), - Compiler: lang.FluxCompiler{ - Extern: []byte(externJSON), - Query: `from(bucket: "mybucket")`, - }, - }, - Dialect: &csv.Dialect{ - ResultEncoderConfig: csv.ResultEncoderConfig{ - NoHeader: false, - Delimiter: ',', - }, - }, - }, - }, - { - name: "valid post vnd.flux query request", - args: args{ - r: func() *http.Request { - r := httptest.NewRequest("POST", "/api/v2/query?org=myorg", strings.NewReader(`from(bucket: "mybucket")`)) - r.Header.Set("Content-Type", "application/vnd.flux") - return r - }(), - svc: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { - return &platform.Organization{ - ID: func() platform2.ID { s, _ := platform2.IDFromString("deadbeefdeadbeef"); return *s }(), - }, nil - }, - }, - }, - want: &query.ProxyRequest{ - Request: query.Request{ - OrganizationID: func() platform2.ID { s, _ := platform2.IDFromString("deadbeefdeadbeef"); return *s }(), - Compiler: lang.FluxCompiler{ - Query: `from(bucket: "mybucket")`, - }, - }, - Dialect: &csv.Dialect{ - ResultEncoderConfig: csv.ResultEncoderConfig{ - NoHeader: false, - Delimiter: ',', - }, - }, - }, - }, - } - cmpOptions := append(cmpOptions, - cmpopts.IgnoreFields(lang.ASTCompiler{}, "Now"), - cmpopts.IgnoreFields(lang.FluxCompiler{}, "Now"), - ) - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, _, err := decodeProxyQueryRequest(tt.args.ctx, tt.args.r, tt.args.auth, tt.args.svc) - if (err != nil) != tt.wantErr { - t.Errorf("decodeProxyQueryRequest() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !cmp.Equal(tt.want, got, cmpOptions...) { - t.Errorf("decodeProxyQueryRequest() -want/+got\n%s", cmp.Diff(tt.want, got, cmpOptions...)) - } - }) - } -} - -func TestProxyRequestToQueryRequest_Compilers(t *testing.T) { - tests := []struct { - name string - pr query.ProxyRequest - want QueryRequest - }{ - { - name: "flux compiler copied", - pr: query.ProxyRequest{ - Dialect: &query.NoContentDialect{}, - Request: query.Request{ - Compiler: lang.FluxCompiler{ - Query: `howdy`, - Now: time.Unix(45, 45), - }, - }, - }, - want: QueryRequest{ - Type: "flux", - Query: `howdy`, - PreferNoContent: true, - Now: time.Unix(45, 45), - }, - }, - { - name: "AST compiler copied", - pr: query.ProxyRequest{ - Dialect: &query.NoContentDialect{}, - Request: query.Request{ - Compiler: lang.ASTCompiler{ - Now: time.Unix(45, 45), - AST: mustMarshal(&ast.Package{}), - }, - }, - }, - want: QueryRequest{ - Type: "flux", - PreferNoContent: true, - AST: mustMarshal(&ast.Package{}), - Now: time.Unix(45, 45), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - got, err := QueryRequestFromProxyRequest(&tt.pr) - if err != nil { - t.Error(err) - } else if !reflect.DeepEqual(*got, tt.want) { - t.Errorf("QueryRequestFromProxyRequest = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/http/ready.go b/http/ready.go deleted file mode 100644 index a70c32404eb..00000000000 --- a/http/ready.go +++ /dev/null @@ -1,37 +0,0 @@ -package http - -import ( - "encoding/json" - "fmt" - "net/http" - "time" - - "github.com/influxdata/influxdb/v2/toml" -) - -// ReadyHandler is a default readiness handler. The default behaviour is always ready. -func ReadyHandler() http.Handler { - up := time.Now() - fn := func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.WriteHeader(http.StatusOK) - - var status = struct { - Status string `json:"status"` - Start time.Time `json:"started"` - // TODO(jsteenb2): learn why and leave comment for this being a toml.Duration - Up toml.Duration `json:"up"` - }{ - Status: "ready", - Start: up, - Up: toml.Duration(time.Since(up)), - } - - enc := json.NewEncoder(w) - enc.SetIndent("", " ") - if err := enc.Encode(status); err != nil { - fmt.Fprintf(w, "Error encoding status data: %v\n", err) - } - } - return http.HandlerFunc(fn) -} diff --git a/http/ready_test.go b/http/ready_test.go deleted file mode 100644 index a7825ea5a5b..00000000000 --- a/http/ready_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package http - -import ( - "encoding/json" - "io" - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestReadyHandler(t *testing.T) { - w := httptest.NewRecorder() - r := httptest.NewRequest(http.MethodGet, "/health", nil) - ReadyHandler().ServeHTTP(w, r) - res := w.Result() - contentType := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != 200 { - t.Errorf("TestReadyHandler. ReadyHandler() StatusCode = %v, want 200", res.StatusCode) - } - if !strings.HasPrefix(contentType, "application/json") { - t.Errorf("TestReadyHandler. ReadyHandler() Content-Type = %v, want application/json", contentType) - } - var content map[string]interface{} - if err := json.Unmarshal(body, &content); err != nil { - t.Errorf("TestReadyHandler. ReadyHandler() error unmarshaling json body %v", err) - return - } - if val := content["status"]; val != "ready" { - t.Errorf("TestReadyHandler. ReadyHandler() .status = %v, want 'ready'", val) - } - if val := content["started"]; val == nil { - t.Errorf("TestReadyHandler. ReadyHandler() .started is not returned") - } - if val := content["up"]; val == nil { - t.Errorf("TestReadyHandler. ReadyHandler() .up is not returned") - } -} diff --git a/http/redoc.go b/http/redoc.go deleted file mode 100644 index dc491d4d1ef..00000000000 --- a/http/redoc.go +++ /dev/null @@ -1,39 +0,0 @@ -package http - -import ( - "fmt" - "net/http" -) - -const index = ` - - - InfluxDB 2 API - - - - - - - - - - -` - -// Redoc servers the swagger JSON using the redoc package. -func Redoc(swagger string) http.HandlerFunc { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/html; charset=utf-8") - w.WriteHeader(http.StatusOK) - - _, _ = w.Write([]byte(fmt.Sprintf(index, swagger))) - }) -} diff --git a/http/requests.go b/http/requests.go deleted file mode 100644 index b1d3e374d88..00000000000 --- a/http/requests.go +++ /dev/null @@ -1,78 +0,0 @@ -package http - -import ( - "context" - "net/http" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -const ( - // OrgID is the http query parameter to specify an organization by ID. - OrgID = "orgID" - // Org is the http query parameter that take either the ID or Name interchangeably - Org = "org" - // BucketID is the http query parameter to specify an bucket by ID. - BucketID = "bucketID" - // Bucket is the http query parameter take either the ID or Name interchangably - Bucket = "bucket" -) - -// queryOrganization returns the organization for any http request. -// -// It checks the org= and then orgID= parameter of the request. -// -// This will try to find the organization using an ID string or -// the name. It interprets the &org= parameter as either the name -// or the ID. -func queryOrganization(ctx context.Context, r *http.Request, svc platform.OrganizationService) (o *platform.Organization, err error) { - filter := platform.OrganizationFilter{} - if organization := r.URL.Query().Get(Org); organization != "" { - if id, err := platform2.IDFromString(organization); err == nil { - filter.ID = id - } else { - filter.Name = &organization - } - } - - if reqID := r.URL.Query().Get(OrgID); reqID != "" { - filter.ID, err = platform2.IDFromString(reqID) - if err != nil { - return nil, err - } - } - return svc.FindOrganization(ctx, filter) -} - -// queryBucket returns the bucket for any http request. -// -// It checks the bucket= and then bucketID= parameter of the request. -// -// This will try to find the bucket using an ID string or -// the name. It interprets the &bucket= parameter as either the name -// or the ID. -func queryBucket(ctx context.Context, orgID platform2.ID, r *http.Request, svc platform.BucketService) (b *platform.Bucket, err error) { - filter := platform.BucketFilter{OrganizationID: &orgID} - if bucket := r.URL.Query().Get(Bucket); bucket != "" { - if id, err := platform2.IDFromString(bucket); err == nil { - filter.ID = id - } else { - filter.Name = &bucket - } - } - if reqID := r.URL.Query().Get(BucketID); reqID != "" { - filter.ID, err = platform2.IDFromString(reqID) - if err != nil { - return nil, err - } - } - if filter.ID == nil && filter.Name == nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "Please provide either bucketID or bucket", - } - } - return svc.FindBucket(ctx, filter) -} diff --git a/http/requests_test.go b/http/requests_test.go deleted file mode 100644 index 98177e0205e..00000000000 --- a/http/requests_test.go +++ /dev/null @@ -1,248 +0,0 @@ -package http - -import ( - "context" - "net/http" - "net/http/httptest" - "reflect" - "testing" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" -) - -func Test_queryOrganization(t *testing.T) { - type args struct { - ctx context.Context - r *http.Request - svc platform.OrganizationService - } - tests := []struct { - name string - args args - want *platform.Organization - wantErr bool - }{ - { - name: "org id finds organization", - want: &platform.Organization{ - ID: platform2.ID(1), - }, - args: args{ - ctx: context.Background(), - r: httptest.NewRequest(http.MethodPost, "/api/v2/query?orgID=0000000000000001", nil), - svc: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { - if *filter.ID == platform2.ID(1) { - return &platform.Organization{ - ID: platform2.ID(1), - }, nil - } - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unknown org name", - } - }, - }, - }, - }, - { - name: "bad id returns error", - wantErr: true, - args: args{ - ctx: context.Background(), - r: httptest.NewRequest(http.MethodPost, "/api/v2/query?orgID=howdy", nil), - }, - }, - { - name: "org name finds organization", - want: &platform.Organization{ - ID: platform2.ID(1), - Name: "org1", - }, - args: args{ - ctx: context.Background(), - r: httptest.NewRequest(http.MethodPost, "/api/v2/query?org=org1", nil), - svc: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { - if *filter.Name == "org1" { - return &platform.Organization{ - ID: platform2.ID(1), - Name: "org1", - }, nil - } - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unknown org name", - } - }, - }, - }, - }, - { - name: "org id as org finds organization", - want: &platform.Organization{ - ID: platform2.ID(1), - }, - args: args{ - ctx: context.Background(), - r: httptest.NewRequest(http.MethodPost, "/api/v2/query?org=0000000000000001", nil), - svc: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { - if *filter.ID == platform2.ID(1) { - return &platform.Organization{ - ID: platform2.ID(1), - }, nil - } - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unknown org name", - } - }, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := queryOrganization(tt.args.ctx, tt.args.r, tt.args.svc) - if (err != nil) != tt.wantErr { - t.Errorf("queryOrganization() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("queryOrganization() = %v, want %v", got, tt.want) - } - }) - } -} - -func Test_queryBucket(t *testing.T) { - type args struct { - ctx context.Context - r *http.Request - svc platform.BucketService - } - tests := []struct { - name string - args args - want *platform.Bucket - wantErr bool - }{ - { - name: "bucket id finds bucket", - want: &platform.Bucket{ - ID: platform2.ID(1), - }, - args: args{ - ctx: context.Background(), - r: httptest.NewRequest(http.MethodPost, "/api/v2/query?bucketID=0000000000000001", nil), - svc: &mock.BucketService{ - FindBucketFn: func(ctx context.Context, filter platform.BucketFilter) (*platform.Bucket, error) { - if *filter.ID == platform2.ID(1) { - return &platform.Bucket{ - ID: platform2.ID(1), - }, nil - } - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unknown org name", - } - }, - }, - }, - }, - { - name: "bad id returns error", - wantErr: true, - args: args{ - ctx: context.Background(), - r: httptest.NewRequest(http.MethodPost, "/api/v2/query?bucketID=howdy", nil), - }, - }, - { - name: "bucket name finds bucket", - want: &platform.Bucket{ - ID: platform2.ID(1), - Name: "bucket1", - }, - args: args{ - ctx: context.Background(), - r: httptest.NewRequest(http.MethodPost, "/api/v2/query?bucket=bucket1", nil), - svc: &mock.BucketService{ - FindBucketFn: func(ctx context.Context, filter platform.BucketFilter) (*platform.Bucket, error) { - if *filter.Name == "bucket1" { - return &platform.Bucket{ - ID: platform2.ID(1), - Name: "bucket1", - }, nil - } - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unknown org name", - } - }, - }, - }, - }, - { - name: "bucket id as bucket finds bucket", - want: &platform.Bucket{ - ID: platform2.ID(1), - }, - args: args{ - ctx: context.Background(), - r: httptest.NewRequest(http.MethodPost, "/api/v2/query?bucket=0000000000000001", nil), - svc: &mock.BucketService{ - FindBucketFn: func(ctx context.Context, filter platform.BucketFilter) (*platform.Bucket, error) { - if *filter.ID == platform2.ID(1) { - return &platform.Bucket{ - ID: platform2.ID(1), - }, nil - } - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unknown bucket name", - } - }, - }, - }, - }, - { - name: "invalid orgID fails to return bucket", - want: &platform.Bucket{ - ID: platform2.ID(1), - }, - args: args{ - ctx: context.Background(), - r: httptest.NewRequest(http.MethodPost, "/api/v2/query?bucket=0000000000000001", nil), - svc: &mock.BucketService{ - FindBucketFn: func(ctx context.Context, filter platform.BucketFilter) (*platform.Bucket, error) { - if *filter.OrganizationID == platform2.ID(1) { - return &platform.Bucket{ - ID: platform2.ID(1), - }, nil - } - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unknown bucket", - } - }, - }, - }, - }} - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := queryBucket(tt.args.ctx, platform2.ID(1), tt.args.r, tt.args.svc) - if (err != nil) != tt.wantErr { - t.Errorf("queryBucket() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("queryBucket() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/http/resources.go b/http/resources.go deleted file mode 100644 index 1bf09ce8672..00000000000 --- a/http/resources.go +++ /dev/null @@ -1,19 +0,0 @@ -package http - -import ( - "encoding/json" - "net/http" - - "github.com/influxdata/influxdb/v2" -) - -const prefixResources = "/api/v2/resources" - -// NewResourceListHandler is the HTTP handler for the GET /api/v2/resources route. -func NewResourceListHandler() http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(influxdb.AllResourceTypes) - }) -} diff --git a/http/resources_test.go b/http/resources_test.go deleted file mode 100644 index 2808dddad03..00000000000 --- a/http/resources_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package http - -import ( - "encoding/json" - "io" - "net/http" - "net/http/httptest" - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2" -) - -func TestResourceListHandler(t *testing.T) { - w := httptest.NewRecorder() - - NewResourceListHandler().ServeHTTP(w, - httptest.NewRequest( - "GET", - "http://howdy.tld/api/v2/resources", - nil, - ), - ) - - expectedResponse := []string{ - string(influxdb.AuthorizationsResourceType), - string(influxdb.BucketsResourceType), - string(influxdb.DashboardsResourceType), - string(influxdb.OrgsResourceType), - string(influxdb.SourcesResourceType), - string(influxdb.TasksResourceType), - string(influxdb.TelegrafsResourceType), - string(influxdb.UsersResourceType), - string(influxdb.VariablesResourceType), - string(influxdb.ScraperResourceType), - string(influxdb.SecretsResourceType), - string(influxdb.LabelsResourceType), - string(influxdb.ViewsResourceType), - string(influxdb.DocumentsResourceType), - string(influxdb.NotificationRuleResourceType), - string(influxdb.NotificationEndpointResourceType), - string(influxdb.ChecksResourceType), - string(influxdb.DBRPResourceType), - string(influxdb.NotebooksResourceType), - string(influxdb.AnnotationsResourceType), - string(influxdb.RemotesResourceType), - string(influxdb.ReplicationsResourceType), - string(influxdb.InstanceResourceType), - } - - resp := w.Result() - body, _ := io.ReadAll(resp.Body) - if resp.StatusCode != http.StatusOK { - t.Logf(string(body)) - t.Errorf("unexpected status: %s", resp.Status) - } - - var actualReponse []string - if err := json.Unmarshal(body, &actualReponse); err != nil { - t.Errorf("unexpected response format: %v, error: %v", string(body), err) - } - - if !reflect.DeepEqual(actualReponse, expectedResponse) { - t.Errorf("expected response to equal %+#v, but was %+#v", expectedResponse, actualReponse) - } -} diff --git a/http/restore_service.go b/http/restore_service.go deleted file mode 100644 index a08a74a0e0c..00000000000 --- a/http/restore_service.go +++ /dev/null @@ -1,395 +0,0 @@ -package http - -import ( - "compress/gzip" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "strconv" - "time" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - context2 "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "go.uber.org/zap" -) - -// RestoreBackend is all services and associated parameters required to construct the RestoreHandler. -type RestoreBackend struct { - Logger *zap.Logger - errors.HTTPErrorHandler - - RestoreService influxdb.RestoreService - SqlBackupRestoreService influxdb.SqlBackupRestoreService - BucketService influxdb.BucketService - AuthorizationService influxdb.AuthorizationService -} - -// NewRestoreBackend returns a new instance of RestoreBackend. -func NewRestoreBackend(b *APIBackend) *RestoreBackend { - return &RestoreBackend{ - Logger: b.Logger.With(zap.String("handler", "restore")), - - HTTPErrorHandler: b.HTTPErrorHandler, - RestoreService: b.RestoreService, - SqlBackupRestoreService: b.SqlBackupRestoreService, - BucketService: b.BucketService, - AuthorizationService: b.AuthorizationService, - } -} - -// RestoreHandler is http handler for restore service. -type RestoreHandler struct { - *httprouter.Router - api *kithttp.API - errors.HTTPErrorHandler - Logger *zap.Logger - - RestoreService influxdb.RestoreService - SqlBackupRestoreService influxdb.SqlBackupRestoreService - BucketService influxdb.BucketService - AuthorizationService influxdb.AuthorizationService -} - -const ( - prefixRestore = "/api/v2/restore" - restoreKVPath = prefixRestore + "/kv" - restoreSqlPath = prefixRestore + "/sql" - restoreShardPath = prefixRestore + "/shards/:shardID" - - restoreBucketPath = prefixRestore + "/buckets/:bucketID" // Deprecated. Used by 2.0.x clients. - restoreBucketMetadataDeprecatedPath = prefixRestore + "/bucket-metadata" // Deprecated. Used by 2.1.0 of the CLI - restoreBucketMetadataPath = prefixRestore + "/bucketMetadata" -) - -// NewRestoreHandler creates a new handler at /api/v2/restore to receive restore requests. -func NewRestoreHandler(b *RestoreBackend) *RestoreHandler { - h := &RestoreHandler{ - HTTPErrorHandler: b.HTTPErrorHandler, - Router: NewRouter(b.HTTPErrorHandler), - Logger: b.Logger, - RestoreService: b.RestoreService, - SqlBackupRestoreService: b.SqlBackupRestoreService, - BucketService: b.BucketService, - AuthorizationService: b.AuthorizationService, - api: kithttp.NewAPI(kithttp.WithLog(b.Logger)), - } - - h.HandlerFunc(http.MethodPost, restoreKVPath, h.handleRestoreKVStore) - h.HandlerFunc(http.MethodPost, restoreSqlPath, h.handleRestoreSqlStore) - h.HandlerFunc(http.MethodPost, restoreBucketPath, h.handleRestoreBucket) - h.HandlerFunc(http.MethodPost, restoreBucketMetadataDeprecatedPath, h.handleRestoreBucketMetadata) - h.HandlerFunc(http.MethodPost, restoreBucketMetadataPath, h.handleRestoreBucketMetadata) - h.HandlerFunc(http.MethodPost, restoreShardPath, h.handleRestoreShard) - - return h -} - -func (h *RestoreHandler) getOperatorToken(ctx context.Context) (influxdb.Authorization, error) { - // Get the token post-restore - auths, _, err := h.AuthorizationService.FindAuthorizations(ctx, influxdb.AuthorizationFilter{}) - if err != nil { - return influxdb.Authorization{}, err - } - - var operToken *influxdb.Authorization - for _, a := range auths { - authCtx := context.Background() - authCtx = context2.SetAuthorizer(authCtx, a) - if authorizer.IsAllowedAll(authCtx, influxdb.OperPermissions()) == nil { - operToken = a - break - } - } - - if operToken == nil { - return influxdb.Authorization{}, fmt.Errorf("invalid backup without an operator token, consider editing the BoltDB in the backup with 'influxd recovery'") - } - - return *operToken, nil -} - -func (h *RestoreHandler) handleRestoreKVStore(w http.ResponseWriter, r *http.Request) { - span, r := tracing.ExtractFromHTTPRequest(r, "RestoreHandler.handleRestoreKVStore") - defer span.Finish() - - ctx := r.Context() - - var kvBytes io.Reader = r.Body - if r.Header.Get("Content-Encoding") == "gzip" { - gzr, err := gzip.NewReader(kvBytes) - if err != nil { - err = &errors.Error{ - Code: errors.EInvalid, - Msg: "failed to decode gzip request body", - Err: err, - } - h.HandleHTTPError(ctx, err, w) - } - defer gzr.Close() - kvBytes = gzr - } - - if err := h.RestoreService.RestoreKVStore(ctx, kvBytes); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - // Get the token post-restore - operatorToken, err := h.getOperatorToken(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - // Return the new token to the caller so it can continue the restore - response := make(map[string]string) - response["token"] = operatorToken.Token - - h.api.Respond(w, r, http.StatusOK, response) -} - -func (h *RestoreHandler) handleRestoreSqlStore(w http.ResponseWriter, r *http.Request) { - span, r := tracing.ExtractFromHTTPRequest(r, "RestoreHandler.handleRestoreSqlStore") - defer span.Finish() - - ctx := r.Context() - - var sqlBytes io.Reader = r.Body - if r.Header.Get("Content-Encoding") == "gzip" { - gzr, err := gzip.NewReader(sqlBytes) - if err != nil { - err = &errors.Error{ - Code: errors.EInvalid, - Msg: "failed to decode gzip request body", - Err: err, - } - h.HandleHTTPError(ctx, err, w) - } - defer gzr.Close() - sqlBytes = gzr - } - - if err := h.SqlBackupRestoreService.RestoreSqlStore(ctx, sqlBytes); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } -} - -func (h *RestoreHandler) handleRestoreBucket(w http.ResponseWriter, r *http.Request) { - span, r := tracing.ExtractFromHTTPRequest(r, "RestoreHandler.handleRestoreBucket") - defer span.Finish() - - ctx := r.Context() - - // Read bucket ID. - bucketID, err := decodeIDFromCtx(r.Context(), "bucketID") - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - // Read serialized DBI data. - buf, err := io.ReadAll(r.Body) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - shardIDMap, err := h.RestoreService.RestoreBucket(ctx, bucketID, buf) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := json.NewEncoder(w).Encode(shardIDMap); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } -} - -func (h *RestoreHandler) handleRestoreBucketMetadata(w http.ResponseWriter, r *http.Request) { - span, r := tracing.ExtractFromHTTPRequest(r, "RestoreHandler.handleRestoreBucketMetadata") - defer span.Finish() - ctx := r.Context() - - var b influxdb.BucketMetadataManifest - if err := h.api.DecodeJSON(r.Body, &b); err != nil { - h.api.Err(w, r, err) - return - } - - // Create the bucket - This will fail if the bucket already exists. - // TODO: Could we support restoring to an existing bucket? - var description string - if b.Description != nil { - description = *b.Description - } - var rp, sgd time.Duration - if len(b.RetentionPolicies) > 0 { - policy := b.RetentionPolicies[0] - rp = policy.Duration - sgd = policy.ShardGroupDuration - } - - bkt := influxdb.Bucket{ - OrgID: b.OrganizationID, - Name: b.BucketName, - Description: description, - RetentionPeriod: rp, - ShardGroupDuration: sgd, - } - if err := h.BucketService.CreateBucket(ctx, &bkt); err != nil { - h.api.Err(w, r, err) - return - } - - // Restore shard-level metadata for the new bucket. - // TODO: It's silly to marshal the DBI into binary here only to unmarshal it again within - // the RestoreService, but it's the easiest way to share code with the 2.0.x restore API - // and avoid introducing a circular dependency on the `meta` package. - // When we reach a point where we feel comfortable deleting the 2.0.x endpoints, consider - // refactoring this to pass a struct directly instead of the marshalled bytes. - dbi := manifestToDbInfo(b) - rawDbi, err := dbi.MarshalBinary() - if err != nil { - h.api.Err(w, r, err) - return - } - shardIDMap, err := h.RestoreService.RestoreBucket(ctx, bkt.ID, rawDbi) - if err != nil { - h.Logger.Warn("Cleaning up after failed bucket-restore", zap.String("bucket_id", bkt.ID.String())) - if err2 := h.BucketService.DeleteBucket(ctx, bkt.ID); err2 != nil { - h.Logger.Error("Failed to clean up bucket after failed restore", - zap.String("bucket_id", bkt.ID.String()), zap.Error(err2)) - } - h.api.Err(w, r, err) - return - } - - res := influxdb.RestoredBucketMappings{ - ID: bkt.ID, - Name: bkt.Name, - ShardMappings: make([]influxdb.RestoredShardMapping, 0, len(shardIDMap)), - } - - for old, new := range shardIDMap { - res.ShardMappings = append(res.ShardMappings, influxdb.RestoredShardMapping{OldId: old, NewId: new}) - } - - h.api.Respond(w, r, http.StatusCreated, res) -} - -func manifestToDbInfo(m influxdb.BucketMetadataManifest) meta.DatabaseInfo { - dbi := meta.DatabaseInfo{ - Name: m.BucketName, - DefaultRetentionPolicy: m.DefaultRetentionPolicy, - RetentionPolicies: make([]meta.RetentionPolicyInfo, len(m.RetentionPolicies)), - } - for i, rp := range m.RetentionPolicies { - dbi.RetentionPolicies[i] = manifestToRpInfo(rp) - } - - return dbi -} - -func manifestToRpInfo(m influxdb.RetentionPolicyManifest) meta.RetentionPolicyInfo { - rpi := meta.RetentionPolicyInfo{ - Name: m.Name, - ReplicaN: m.ReplicaN, - Duration: m.Duration, - ShardGroupDuration: m.ShardGroupDuration, - ShardGroups: make([]meta.ShardGroupInfo, len(m.ShardGroups)), - Subscriptions: make([]meta.SubscriptionInfo, len(m.Subscriptions)), - } - - for i, sg := range m.ShardGroups { - rpi.ShardGroups[i] = manifestToSgInfo(sg) - } - for i, s := range m.Subscriptions { - rpi.Subscriptions[i] = meta.SubscriptionInfo{ - Name: s.Name, - Mode: s.Mode, - Destinations: s.Destinations, - } - } - - return rpi -} - -func manifestToSgInfo(m influxdb.ShardGroupManifest) meta.ShardGroupInfo { - var delAt, truncAt time.Time - if m.DeletedAt != nil { - delAt = *m.DeletedAt - } - if m.TruncatedAt != nil { - truncAt = *m.TruncatedAt - } - sgi := meta.ShardGroupInfo{ - ID: m.ID, - StartTime: m.StartTime, - EndTime: m.EndTime, - DeletedAt: delAt, - TruncatedAt: truncAt, - Shards: make([]meta.ShardInfo, len(m.Shards)), - } - - for i, sh := range m.Shards { - sgi.Shards[i] = manifestToShardInfo(sh) - } - - return sgi -} - -func manifestToShardInfo(m influxdb.ShardManifest) meta.ShardInfo { - si := meta.ShardInfo{ - ID: m.ID, - Owners: make([]meta.ShardOwner, len(m.ShardOwners)), - } - for i, so := range m.ShardOwners { - si.Owners[i] = meta.ShardOwner{NodeID: so.NodeID} - } - - return si -} - -func (h *RestoreHandler) handleRestoreShard(w http.ResponseWriter, r *http.Request) { - span, r := tracing.ExtractFromHTTPRequest(r, "RestoreHandler.handleRestoreShard") - defer span.Finish() - - ctx := r.Context() - - params := httprouter.ParamsFromContext(ctx) - shardID, err := strconv.ParseUint(params.ByName("shardID"), 10, 64) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - var tsmBytes io.Reader = r.Body - if r.Header.Get("Content-Encoding") == "gzip" { - gzr, err := gzip.NewReader(tsmBytes) - if err != nil { - err = &errors.Error{ - Code: errors.EInvalid, - Msg: "failed to decode gzip request body", - Err: err, - } - h.HandleHTTPError(ctx, err, w) - } - defer gzr.Close() - tsmBytes = gzr - } - - if err := h.RestoreService.RestoreShard(ctx, shardID, tsmBytes); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } -} diff --git a/http/router.go b/http/router.go deleted file mode 100644 index 303c8478bc6..00000000000 --- a/http/router.go +++ /dev/null @@ -1,146 +0,0 @@ -package http - -import ( - "fmt" - "net/http" - "os" - "runtime/debug" - "sync" - - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" - "github.com/go-stack/stack" - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - influxlogger "github.com/influxdata/influxdb/v2/logger" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -// NewRouter returns a new router with a 404 handler, a 405 handler, and a panic handler. -func NewRouter(h errors.HTTPErrorHandler) *httprouter.Router { - b := baseHandler{HTTPErrorHandler: h} - router := httprouter.New() - router.NotFound = http.HandlerFunc(b.notFound) - router.MethodNotAllowed = http.HandlerFunc(b.methodNotAllowed) - router.PanicHandler = b.panic - router.AddMatchedRouteToContext = true - return router -} - -// NewBaseChiRouter returns a new chi router with a 404 handler, a 405 handler, and a panic handler. -func NewBaseChiRouter(api *kithttp.API) chi.Router { - router := chi.NewRouter() - router.NotFound(func(w http.ResponseWriter, r *http.Request) { - api.Err(w, r, &errors.Error{ - Code: errors.ENotFound, - Msg: "path not found", - }) - }) - router.MethodNotAllowed(func(w http.ResponseWriter, r *http.Request) { - api.Err(w, r, &errors.Error{ - Code: errors.EMethodNotAllowed, - Msg: fmt.Sprintf("allow: %s", w.Header().Get("Allow")), - }) - - }) - router.Use( - panicMW(api), - kithttp.SkipOptions, - middleware.StripSlashes, - kithttp.SetCORS, - ) - return router -} - -type baseHandler struct { - errors.HTTPErrorHandler -} - -// notFound represents a 404 handler that return a JSON response. -func (h baseHandler) notFound(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - pe := &errors.Error{ - Code: errors.ENotFound, - Msg: "path not found", - } - - h.HandleHTTPError(ctx, pe, w) -} - -// methodNotAllowed represents a 405 handler that return a JSON response. -func (h baseHandler) methodNotAllowed(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - allow := w.Header().Get("Allow") - pe := &errors.Error{ - Code: errors.EMethodNotAllowed, - Msg: fmt.Sprintf("allow: %s", allow), - } - - h.HandleHTTPError(ctx, pe, w) -} - -// panic handles panics recovered from http handlers. -// It returns a json response with http status code 500 and the recovered error message. -func (h baseHandler) panic(w http.ResponseWriter, r *http.Request, rcv interface{}) { - ctx := r.Context() - pe := &errors.Error{ - Code: errors.EInternal, - Msg: "a panic has occurred", - Err: fmt.Errorf("%s: %v", r.URL.String(), rcv), - } - - l := getPanicLogger() - if entry := l.Check(zapcore.ErrorLevel, pe.Msg); entry != nil { - entry.Stack = string(debug.Stack()) - entry.Write(zap.Error(pe.Err)) - } - - h.HandleHTTPError(ctx, pe, w) -} - -func panicMW(api *kithttp.API) func(http.Handler) http.Handler { - return func(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - defer func() { - panicErr := recover() - if panicErr == nil { - return - } - - pe := &errors.Error{ - Code: errors.EInternal, - Msg: "a panic has occurred", - Err: fmt.Errorf("%s: %v", r.URL.String(), panicErr), - } - - l := getPanicLogger() - if entry := l.Check(zapcore.ErrorLevel, pe.Msg); entry != nil { - entry.Stack = fmt.Sprintf("%+v", stack.Trace()) - entry.Write(zap.Error(pe.Err)) - } - - api.Err(w, r, pe) - }() - next.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) - } -} - -var panicLogger = zap.NewNop() -var panicLoggerOnce sync.Once - -// getPanicLogger returns a logger for panicHandler. -func getPanicLogger() *zap.Logger { - panicLoggerOnce.Do(func() { - conf := influxlogger.NewConfig() - logger, err := conf.New(os.Stderr) - if err == nil { - panicLogger = logger.With(zap.String("handler", "panic")) - } - }) - - return panicLogger -} diff --git a/http/router_test.go b/http/router_test.go deleted file mode 100644 index ccfcf34bd11..00000000000 --- a/http/router_test.go +++ /dev/null @@ -1,335 +0,0 @@ -package http - -import ( - "fmt" - "io" - "net/http" - "net/http/httptest" - "testing" - - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "go.uber.org/zap/zaptest" -) - -func TestRouter_NotFound(t *testing.T) { - type fields struct { - method string - path string - handlerFn http.HandlerFunc - } - type args struct { - method string - path string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "path not found", - fields: fields{ - method: "GET", - path: "/ping", - handlerFn: func(w http.ResponseWriter, r *http.Request) { - encodeResponse(r.Context(), w, http.StatusOK, map[string]string{"message": "pong"}) - }, - }, - args: args{ - method: "GET", - path: "/404", - }, - wants: wants{ - statusCode: http.StatusNotFound, - contentType: "application/json; charset=utf-8", - body: ` -{ - "code": "not found", - "message": "path not found" -}`, - }, - }, - { - name: "path found", - fields: fields{ - method: "GET", - path: "/ping", - handlerFn: func(w http.ResponseWriter, r *http.Request) { - encodeResponse(r.Context(), w, http.StatusOK, map[string]string{"message": "pong"}) - }, - }, - args: args{ - method: "GET", - path: "/ping", - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "message": "pong" -} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - router := NewRouter(kithttp.NewErrorHandler(zaptest.NewLogger(t))) - router.HandlerFunc(tt.fields.method, tt.fields.path, tt.fields.handlerFn) - - r := httptest.NewRequest(tt.args.method, tt.args.path, nil) - w := httptest.NewRecorder() - router.ServeHTTP(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. get %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. get %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq { - t.Errorf("%q. get ***%s***", tt.name, diff) - } - }) - } -} - -func TestRouter_Panic(t *testing.T) { - type fields struct { - method string - path string - handlerFn http.HandlerFunc - } - type args struct { - method string - path string - } - type wants struct { - statusCode int - contentType string - body string - logged bool - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "no panic", - fields: fields{ - method: "GET", - path: "/ping", - handlerFn: func(w http.ResponseWriter, r *http.Request) { - encodeResponse(r.Context(), w, http.StatusOK, map[string]string{"message": "pong"}) - }, - }, - args: args{ - method: "GET", - path: "/ping", - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - logged: false, - body: ` -{ - "message": "pong" -} -`, - }, - }, - { - name: "panic", - fields: fields{ - method: "GET", - path: "/ping", - handlerFn: func(w http.ResponseWriter, r *http.Request) { - panic("not implemented") - }, - }, - args: args{ - method: "GET", - path: "/ping", - }, - wants: wants{ - statusCode: http.StatusInternalServerError, - contentType: "application/json; charset=utf-8", - logged: true, - body: ` -{ - "code": "internal error", - "message": "a panic has occurred: /ping: not implemented" -}`, - }, - }, - } - - for _, tt := range tests[1:] { - t.Run(tt.name, func(t *testing.T) { - logger := getPanicLogger() - defer func() { - panicLogger = logger - }() - - tw := newTestLogWriter(t) - panicLogger = zaptest.NewLogger(tw) - - router := NewRouter(kithttp.NewErrorHandler(zaptest.NewLogger(t))) - router.HandlerFunc(tt.fields.method, tt.fields.path, tt.fields.handlerFn) - - r := httptest.NewRequest(tt.args.method, tt.args.path, nil) - w := httptest.NewRecorder() - router.ServeHTTP(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. get %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. get %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, _ := jsonEqual(tt.wants.body, string(body)); tt.wants.body != "" && !eq { - t.Errorf("%q. get ***%s***", tt.name, diff) - } - if tt.wants.logged != tw.Logged() { - t.Errorf("%q. get %v, want %v", tt.name, tt.wants.logged, tw.Logged()) - } - }) - } -} - -func TestRouter_MethodNotAllowed(t *testing.T) { - type fields struct { - method string - path string - handlerFn http.HandlerFunc - } - type args struct { - method string - path string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "method not allowed", - fields: fields{ - method: "GET", - path: "/ping", - handlerFn: func(w http.ResponseWriter, r *http.Request) { - encodeResponse(r.Context(), w, http.StatusOK, map[string]string{"message": "pong"}) - }, - }, - args: args{ - method: "POST", - path: "/ping", - }, - wants: wants{ - statusCode: http.StatusMethodNotAllowed, - contentType: "application/json; charset=utf-8", - body: ` -{ - "code": "method not allowed", - "message": "allow: GET, OPTIONS" -}`, - }, - }, - { - name: "method allowed", - fields: fields{ - method: "GET", - path: "/ping", - handlerFn: func(w http.ResponseWriter, r *http.Request) { - encodeResponse(r.Context(), w, http.StatusOK, map[string]string{"message": "pong"}) - }, - }, - args: args{ - method: "GET", - path: "/ping", - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "message": "pong" -} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - router := NewRouter(kithttp.NewErrorHandler(zaptest.NewLogger(t))) - router.HandlerFunc(tt.fields.method, tt.fields.path, tt.fields.handlerFn) - - r := httptest.NewRequest(tt.args.method, tt.args.path, nil) - w := httptest.NewRecorder() - router.ServeHTTP(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. get %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. get %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq { - t.Errorf("%q. get ***%s***", tt.name, diff) - } - - }) - } -} - -// testLogWriter is a zaptest.TestingT that captures logged messages. -type testLogWriter struct { - *testing.T - Messages []string -} - -func newTestLogWriter(t *testing.T) *testLogWriter { - return &testLogWriter{T: t} -} - -func (t *testLogWriter) Logf(format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - t.Messages = append(t.Messages, msg) - t.T.Log(msg) -} - -func (t *testLogWriter) Logged() bool { - return len(t.Messages) > 0 -} diff --git a/http/scraper_service.go b/http/scraper_service.go deleted file mode 100644 index 08272ecf824..00000000000 --- a/http/scraper_service.go +++ /dev/null @@ -1,645 +0,0 @@ -package http - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "net/http" - "path" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - pctx "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "go.uber.org/zap" -) - -const ( - prefixOrganizations = "/api/v2/orgs" - prefixBuckets = "/api/v2/buckets" -) - -// ScraperBackend is all services and associated parameters required to construct -// the ScraperHandler. -type ScraperBackend struct { - errors.HTTPErrorHandler - log *zap.Logger - - ScraperStorageService influxdb.ScraperTargetStoreService - BucketService influxdb.BucketService - OrganizationService influxdb.OrganizationService - UserService influxdb.UserService - UserResourceMappingService influxdb.UserResourceMappingService - LabelService influxdb.LabelService -} - -// NewScraperBackend returns a new instance of ScraperBackend. -func NewScraperBackend(log *zap.Logger, b *APIBackend) *ScraperBackend { - return &ScraperBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - - ScraperStorageService: b.ScraperTargetStoreService, - BucketService: b.BucketService, - OrganizationService: b.OrganizationService, - UserService: b.UserService, - UserResourceMappingService: b.UserResourceMappingService, - LabelService: b.LabelService, - } -} - -// ScraperHandler represents an HTTP API handler for scraper targets. -type ScraperHandler struct { - *httprouter.Router - errors.HTTPErrorHandler - log *zap.Logger - UserService influxdb.UserService - UserResourceMappingService influxdb.UserResourceMappingService - LabelService influxdb.LabelService - ScraperStorageService influxdb.ScraperTargetStoreService - BucketService influxdb.BucketService - OrganizationService influxdb.OrganizationService -} - -const ( - prefixTargets = "/api/v2/scrapers" - targetsIDMembersPath = prefixTargets + "/:id/members" - targetsIDMembersIDPath = prefixTargets + "/:id/members/:userID" - targetsIDOwnersPath = prefixTargets + "/:id/owners" - targetsIDOwnersIDPath = prefixTargets + "/:id/owners/:userID" - targetsIDLabelsPath = prefixTargets + "/:id/labels" - targetsIDLabelsIDPath = prefixTargets + "/:id/labels/:lid" -) - -// NewScraperHandler returns a new instance of ScraperHandler. -func NewScraperHandler(log *zap.Logger, b *ScraperBackend) *ScraperHandler { - h := &ScraperHandler{ - Router: NewRouter(b.HTTPErrorHandler), - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - UserService: b.UserService, - UserResourceMappingService: b.UserResourceMappingService, - LabelService: b.LabelService, - ScraperStorageService: b.ScraperStorageService, - BucketService: b.BucketService, - OrganizationService: b.OrganizationService, - } - h.HandlerFunc("POST", prefixTargets, h.handlePostScraperTarget) - h.HandlerFunc("GET", prefixTargets, h.handleGetScraperTargets) - h.HandlerFunc("GET", prefixTargets+"/:id", h.handleGetScraperTarget) - h.HandlerFunc("PATCH", prefixTargets+"/:id", h.handlePatchScraperTarget) - h.HandlerFunc("DELETE", prefixTargets+"/:id", h.handleDeleteScraperTarget) - - memberBackend := MemberBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log.With(zap.String("handler", "member")), - ResourceType: influxdb.ScraperResourceType, - UserType: influxdb.Member, - UserResourceMappingService: b.UserResourceMappingService, - UserService: b.UserService, - } - h.HandlerFunc("POST", targetsIDMembersPath, newPostMemberHandler(memberBackend)) - h.HandlerFunc("GET", targetsIDMembersPath, newGetMembersHandler(memberBackend)) - h.HandlerFunc("DELETE", targetsIDMembersIDPath, newDeleteMemberHandler(memberBackend)) - - ownerBackend := MemberBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log.With(zap.String("handler", "member")), - ResourceType: influxdb.ScraperResourceType, - UserType: influxdb.Owner, - UserResourceMappingService: b.UserResourceMappingService, - UserService: b.UserService, - } - h.HandlerFunc("POST", targetsIDOwnersPath, newPostMemberHandler(ownerBackend)) - h.HandlerFunc("GET", targetsIDOwnersPath, newGetMembersHandler(ownerBackend)) - h.HandlerFunc("DELETE", targetsIDOwnersIDPath, newDeleteMemberHandler(ownerBackend)) - - labelBackend := &LabelBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log.With(zap.String("handler", "label")), - LabelService: b.LabelService, - ResourceType: influxdb.ScraperResourceType, - } - h.HandlerFunc("GET", targetsIDLabelsPath, newGetLabelsHandler(labelBackend)) - h.HandlerFunc("POST", targetsIDLabelsPath, newPostLabelHandler(labelBackend)) - h.HandlerFunc("DELETE", targetsIDLabelsIDPath, newDeleteLabelHandler(labelBackend)) - - return h -} - -// handlePostScraperTarget is HTTP handler for the POST /api/v2/scrapers route. -func (h *ScraperHandler) handlePostScraperTarget(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeScraperTargetAddRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - auth, err := pctx.GetAuthorizer(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := h.ScraperStorageService.AddTarget(ctx, req, auth.GetUserID()); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Scraper created", zap.String("scraper", fmt.Sprint(req))) - - resp, err := h.newTargetResponse(ctx, *req) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - if err := encodeResponse(ctx, w, http.StatusCreated, resp); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -// handleDeleteScraperTarget is the HTTP handler for the DELETE /api/v2/scrapers/:id route. -func (h *ScraperHandler) handleDeleteScraperTarget(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id, err := decodeScraperTargetIDRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := h.ScraperStorageService.RemoveTarget(ctx, *id); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Scraper deleted", zap.String("scraperTargetID", fmt.Sprint(id))) - - w.WriteHeader(http.StatusNoContent) -} - -// handlePatchScraperTarget is the HTTP handler for the PATCH /api/v2/scrapers/:id route. -func (h *ScraperHandler) handlePatchScraperTarget(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - update, err := decodeScraperTargetUpdateRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - auth, err := pctx.GetAuthorizer(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - target, err := h.ScraperStorageService.UpdateTarget(ctx, update, auth.GetUserID()) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Scraper updated", zap.String("scraper", fmt.Sprint(target))) - - resp, err := h.newTargetResponse(ctx, *target) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusOK, resp); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -func (h *ScraperHandler) handleGetScraperTarget(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id, err := decodeScraperTargetIDRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - target, err := h.ScraperStorageService.GetTargetByID(ctx, *id) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Scraper retrieved", zap.String("scraper", fmt.Sprint(target))) - - resp, err := h.newTargetResponse(ctx, *target) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusOK, resp); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type getScraperTargetsRequest struct { - filter influxdb.ScraperTargetFilter -} - -func decodeScraperTargetsRequest(ctx context.Context, r *http.Request) (*getScraperTargetsRequest, error) { - qp := r.URL.Query() - req := &getScraperTargetsRequest{} - - initialID := platform.InvalidID() - if ids, ok := qp["id"]; ok { - req.filter.IDs = make(map[platform.ID]bool) - for _, id := range ids { - i := initialID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - req.filter.IDs[i] = false - } - } - if name := qp.Get("name"); name != "" { - req.filter.Name = &name - } - if orgID := qp.Get("orgID"); orgID != "" { - id := platform.InvalidID() - if err := id.DecodeFromString(orgID); err != nil { - return nil, err - } - req.filter.OrgID = &id - } else if org := qp.Get("org"); org != "" { - req.filter.Org = &org - } - - return req, nil -} - -// handleGetScraperTargets is the HTTP handler for the GET /api/v2/scrapers route. -func (h *ScraperHandler) handleGetScraperTargets(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeScraperTargetsRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - targets, err := h.ScraperStorageService.ListTargets(ctx, req.filter) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Scrapers retrieved", zap.String("scrapers", fmt.Sprint(targets))) - - resp, err := h.newListTargetsResponse(ctx, targets) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusOK, resp); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -func decodeScraperTargetUpdateRequest(ctx context.Context, r *http.Request) (*influxdb.ScraperTarget, error) { - update := &influxdb.ScraperTarget{} - if err := json.NewDecoder(r.Body).Decode(update); err != nil { - return nil, err - } - id, err := decodeScraperTargetIDRequest(ctx, r) - if err != nil { - return nil, err - } - update.ID = *id - return update, nil -} - -func decodeScraperTargetAddRequest(ctx context.Context, r *http.Request) (*influxdb.ScraperTarget, error) { - req := &influxdb.ScraperTarget{} - if err := json.NewDecoder(r.Body).Decode(req); err != nil { - return nil, err - } - return req, nil -} - -func decodeScraperTargetIDRequest(ctx context.Context, r *http.Request) (*platform.ID, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - - return &i, nil -} - -// ScraperService connects to Influx via HTTP using tokens to manage scraper targets. -type ScraperService struct { - Addr string - Token string - InsecureSkipVerify bool - // OpPrefix is for update invalid ops - OpPrefix string -} - -// ListTargets returns a list of all scraper targets. -func (s *ScraperService) ListTargets(ctx context.Context, filter influxdb.ScraperTargetFilter) ([]influxdb.ScraperTarget, error) { - url, err := NewURL(s.Addr, prefixTargets) - if err != nil { - return nil, err - } - - query := url.Query() - if filter.IDs != nil { - for id := range filter.IDs { - query.Add("id", id.String()) - } - } - if filter.Name != nil { - query.Set("name", *filter.Name) - } - if filter.OrgID != nil { - query.Set("orgID", filter.OrgID.String()) - } - if filter.Org != nil { - query.Set("org", *filter.Org) - } - - req, err := http.NewRequest("GET", url.String(), nil) - if err != nil { - return nil, err - } - - req.URL.RawQuery = query.Encode() - SetToken(s.Token, req) - - hc := NewClient(url.Scheme, s.InsecureSkipVerify) - resp, err := hc.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if err := CheckError(resp); err != nil { - return nil, err - } - - var targetsResp getTargetsResponse - if err := json.NewDecoder(resp.Body).Decode(&targetsResp); err != nil { - return nil, err - } - - targets := make([]influxdb.ScraperTarget, len(targetsResp.Targets)) - for k, v := range targetsResp.Targets { - targets[k] = v.ScraperTarget - } - - return targets, nil -} - -// UpdateTarget updates a single scraper target with changeset. -// Returns the new target state after update. -func (s *ScraperService) UpdateTarget(ctx context.Context, update *influxdb.ScraperTarget, userID platform.ID) (*influxdb.ScraperTarget, error) { - if !update.ID.Valid() { - return nil, &errors.Error{ - Code: errors.EInvalid, - Op: s.OpPrefix + influxdb.OpUpdateTarget, - Msg: "provided scraper target ID has invalid format", - } - } - url, err := NewURL(s.Addr, targetIDPath(update.ID)) - if err != nil { - return nil, err - } - - octets, err := json.Marshal(update) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("PATCH", url.String(), bytes.NewReader(octets)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/json") - SetToken(s.Token, req) - hc := NewClient(url.Scheme, s.InsecureSkipVerify) - - resp, err := hc.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := CheckError(resp); err != nil { - return nil, err - } - var targetResp targetResponse - if err := json.NewDecoder(resp.Body).Decode(&targetResp); err != nil { - return nil, err - } - - return &targetResp.ScraperTarget, nil -} - -// AddTarget creates a new scraper target and sets target.ID with the new identifier. -func (s *ScraperService) AddTarget(ctx context.Context, target *influxdb.ScraperTarget, userID platform.ID) error { - url, err := NewURL(s.Addr, prefixTargets) - if err != nil { - return err - } - - if !target.OrgID.Valid() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "provided organization ID has invalid format", - Op: s.OpPrefix + influxdb.OpAddTarget, - } - } - if !target.BucketID.Valid() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "provided bucket ID has invalid format", - Op: s.OpPrefix + influxdb.OpAddTarget, - } - } - - octets, err := json.Marshal(target) - if err != nil { - return err - } - - req, err := http.NewRequest("POST", url.String(), bytes.NewReader(octets)) - if err != nil { - return err - } - - req.Header.Set("Content-Type", "application/json") - SetToken(s.Token, req) - - hc := NewClient(url.Scheme, s.InsecureSkipVerify) - - resp, err := hc.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - // TODO(jsternberg): Should this check for a 201 explicitly? - if err := CheckError(resp); err != nil { - return err - } - - targetResp := new(targetResponse) - if err := json.NewDecoder(resp.Body).Decode(targetResp); err != nil { - return err - } - - return nil -} - -// RemoveTarget removes a scraper target by ID. -func (s *ScraperService) RemoveTarget(ctx context.Context, id platform.ID) error { - url, err := NewURL(s.Addr, targetIDPath(id)) - if err != nil { - return err - } - - req, err := http.NewRequest("DELETE", url.String(), nil) - if err != nil { - return err - } - SetToken(s.Token, req) - - hc := NewClient(url.Scheme, s.InsecureSkipVerify) - resp, err := hc.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - return CheckErrorStatus(http.StatusNoContent, resp) -} - -// GetTargetByID returns a single target by ID. -func (s *ScraperService) GetTargetByID(ctx context.Context, id platform.ID) (*influxdb.ScraperTarget, error) { - url, err := NewURL(s.Addr, targetIDPath(id)) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", url.String(), nil) - if err != nil { - return nil, err - } - SetToken(s.Token, req) - - hc := NewClient(url.Scheme, s.InsecureSkipVerify) - resp, err := hc.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := CheckError(resp); err != nil { - return nil, err - } - - var targetResp targetResponse - if err := json.NewDecoder(resp.Body).Decode(&targetResp); err != nil { - return nil, err - } - - return &targetResp.ScraperTarget, nil -} - -func targetIDPath(id platform.ID) string { - return path.Join(prefixTargets, id.String()) -} - -type getTargetsLinks struct { - Self string `json:"self"` -} - -type getTargetsResponse struct { - Links getTargetsLinks `json:"links"` - Targets []targetResponse `json:"configurations"` -} - -type targetLinks struct { - Self string `json:"self"` - Bucket string `json:"bucket,omitempty"` - Organization string `json:"organization,omitempty"` - Members string `json:"members"` - Owners string `json:"owners"` -} - -type targetResponse struct { - influxdb.ScraperTarget - Org string `json:"org,omitempty"` - Bucket string `json:"bucket,omitempty"` - Links targetLinks `json:"links"` -} - -func (h *ScraperHandler) newListTargetsResponse(ctx context.Context, targets []influxdb.ScraperTarget) (getTargetsResponse, error) { - res := getTargetsResponse{ - Links: getTargetsLinks{ - Self: prefixTargets, - }, - Targets: make([]targetResponse, 0, len(targets)), - } - - for _, target := range targets { - resp, err := h.newTargetResponse(ctx, target) - if err != nil { - return res, err - } - res.Targets = append(res.Targets, resp) - } - - return res, nil -} - -func (h *ScraperHandler) newTargetResponse(ctx context.Context, target influxdb.ScraperTarget) (targetResponse, error) { - res := targetResponse{ - Links: targetLinks{ - Self: targetIDPath(target.ID), - Members: fmt.Sprintf("/api/v2/scrapers/%s/members", target.ID), - Owners: fmt.Sprintf("/api/v2/scrapers/%s/owners", target.ID), - }, - ScraperTarget: target, - } - bucket, err := h.BucketService.FindBucketByID(ctx, target.BucketID) - if err == nil { - res.Bucket = bucket.Name - res.BucketID = bucket.ID - res.Links.Bucket = bucketIDPath(bucket.ID) - } else { - res.BucketID = platform.InvalidID() - } - - org, err := h.OrganizationService.FindOrganizationByID(ctx, target.OrgID) - if err == nil { - res.Org = org.Name - res.OrgID = org.ID - res.Links.Organization = organizationIDPath(org.ID) - } else { - res.OrgID = platform.InvalidID() - } - - return res, nil -} - -func organizationIDPath(id platform.ID) string { - return path.Join(prefixOrganizations, id.String()) -} - -func bucketIDPath(id platform.ID) string { - return path.Join(prefixBuckets, id.String()) -} diff --git a/http/scraper_service_test.go b/http/scraper_service_test.go deleted file mode 100644 index 693c0bedb90..00000000000 --- a/http/scraper_service_test.go +++ /dev/null @@ -1,884 +0,0 @@ -package http - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - platcontext "github.com/influxdata/influxdb/v2/context" - httpMock "github.com/influxdata/influxdb/v2/http/mock" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/tenant" - platformtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -const ( - targetOneIDString = "0000000000000111" - targetTwoIDString = "0000000000000222" -) - -var ( - targetOneID = platformtesting.MustIDBase16(targetOneIDString) - targetTwoID = platformtesting.MustIDBase16(targetTwoIDString) -) - -// NewMockScraperBackend returns a ScraperBackend with mock services. -func NewMockScraperBackend(t *testing.T) *ScraperBackend { - return &ScraperBackend{ - log: zaptest.NewLogger(t), - - ScraperStorageService: &mock.ScraperTargetStoreService{}, - BucketService: mock.NewBucketService(), - OrganizationService: mock.NewOrganizationService(), - UserService: mock.NewUserService(), - UserResourceMappingService: &mock.UserResourceMappingService{}, - LabelService: mock.NewLabelService(), - } -} - -func TestService_handleGetScraperTargets(t *testing.T) { - type fields struct { - ScraperTargetStoreService influxdb.ScraperTargetStoreService - OrganizationService influxdb.OrganizationService - BucketService influxdb.BucketService - } - - type args struct { - queryParams map[string][]string - } - - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get all scraper targets", - fields: fields{ - OrganizationService: &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: platformtesting.MustIDBase16("0000000000000211"), - Name: "org1", - }, nil - }, - }, - BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: platformtesting.MustIDBase16("0000000000000212"), - Name: "bucket1", - }, nil - }, - }, - ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - ListTargetsF: func(ctx context.Context, filter influxdb.ScraperTargetFilter) ([]influxdb.ScraperTarget, error) { - return []influxdb.ScraperTarget{ - { - ID: targetOneID, - Name: "target-1", - Type: influxdb.PrometheusScraperType, - URL: "www.one.url", - OrgID: platformtesting.MustIDBase16("0000000000000211"), - BucketID: platformtesting.MustIDBase16("0000000000000212"), - }, - { - ID: targetTwoID, - Name: "target-2", - Type: influxdb.PrometheusScraperType, - URL: "www.two.url", - OrgID: platformtesting.MustIDBase16("0000000000000211"), - BucketID: platformtesting.MustIDBase16("0000000000000212"), - }, - }, nil - }, - }, - }, - args: args{}, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: fmt.Sprintf( - ` - { - "links": { - "self": "/api/v2/scrapers" - }, - "configurations": [ - { - "id": "%s", - "name": "target-1", - "bucket": "bucket1", - "bucketID": "0000000000000212", - "org": "org1", - "orgID": "0000000000000211", - "type": "prometheus", - "url": "www.one.url", - "links": { - "bucket": "/api/v2/buckets/0000000000000212", - "organization": "/api/v2/orgs/0000000000000211", - "self": "/api/v2/scrapers/0000000000000111", - "members": "/api/v2/scrapers/0000000000000111/members", - "owners": "/api/v2/scrapers/0000000000000111/owners" - } - }, - { - "id": "%s", - "name": "target-2", - "bucket": "bucket1", - "bucketID": "0000000000000212", - "orgID": "0000000000000211", - "org": "org1", - "type": "prometheus", - "url": "www.two.url", - "links": { - "bucket": "/api/v2/buckets/0000000000000212", - "organization": "/api/v2/orgs/0000000000000211", - "self": "/api/v2/scrapers/0000000000000222", - "members": "/api/v2/scrapers/0000000000000222/members", - "owners": "/api/v2/scrapers/0000000000000222/owners" - } - } - ] - } - `, - targetOneIDString, - targetTwoIDString, - ), - }, - }, - { - name: "get all scraper targets when there are none", - fields: fields{ - OrganizationService: &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: platformtesting.MustIDBase16("0000000000000211"), - Name: "org1", - }, nil - }, - }, - BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: platformtesting.MustIDBase16("0000000000000212"), - Name: "bucket1", - }, nil - }, - }, - ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - ListTargetsF: func(ctx context.Context, filter influxdb.ScraperTargetFilter) ([]influxdb.ScraperTarget, error) { - return []influxdb.ScraperTarget{}, nil - }, - }, - }, - args: args{}, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` - { - "links": { - "self": "/api/v2/scrapers" - }, - "configurations": [] - } - `, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - scraperBackend := NewMockScraperBackend(t) - scraperBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - scraperBackend.ScraperStorageService = tt.fields.ScraperTargetStoreService - scraperBackend.OrganizationService = tt.fields.OrganizationService - scraperBackend.BucketService = tt.fields.BucketService - h := NewScraperHandler(zaptest.NewLogger(t), scraperBackend) - - r := httptest.NewRequest("GET", "http://any.tld", nil) - - qp := r.URL.Query() - for k, vs := range tt.args.queryParams { - for _, v := range vs { - qp.Add(k, v) - } - } - r.URL.RawQuery = qp.Encode() - - w := httptest.NewRecorder() - - h.handleGetScraperTargets(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetScraperTargets() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetScraperTargets() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleGetScraperTargets(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handleGetScraperTargets() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestService_handleGetScraperTarget(t *testing.T) { - type fields struct { - OrganizationService influxdb.OrganizationService - BucketService influxdb.BucketService - ScraperTargetStoreService influxdb.ScraperTargetStoreService - } - - type args struct { - id string - } - - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get a scraper target by id", - fields: fields{ - OrganizationService: &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: platformtesting.MustIDBase16("0000000000000211"), - Name: "org1", - }, nil - }, - }, - BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: platformtesting.MustIDBase16("0000000000000212"), - Name: "bucket1", - }, nil - }, - }, - ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - GetTargetByIDF: func(ctx context.Context, id platform.ID) (*influxdb.ScraperTarget, error) { - if id == targetOneID { - return &influxdb.ScraperTarget{ - ID: targetOneID, - Name: "target-1", - Type: influxdb.PrometheusScraperType, - URL: "www.some.url", - OrgID: platformtesting.MustIDBase16("0000000000000211"), - BucketID: platformtesting.MustIDBase16("0000000000000212"), - }, nil - } - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: "scraper target is not found", - } - }, - }, - }, - args: args{ - id: targetOneIDString, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: fmt.Sprintf( - ` - { - "id": "%s", - "name": "target-1", - "type": "prometheus", - "url": "www.some.url", - "bucket": "bucket1", - "bucketID": "0000000000000212", - "orgID": "0000000000000211", - "org": "org1", - "links": { - "bucket": "/api/v2/buckets/0000000000000212", - "organization": "/api/v2/orgs/0000000000000211", - "self": "/api/v2/scrapers/%s", - "members": "/api/v2/scrapers/%s/members", - "owners": "/api/v2/scrapers/%s/owners" - } - } - `, - targetOneIDString, targetOneIDString, targetOneIDString, targetOneIDString, - ), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - scraperBackend := NewMockScraperBackend(t) - scraperBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - scraperBackend.ScraperStorageService = tt.fields.ScraperTargetStoreService - scraperBackend.OrganizationService = tt.fields.OrganizationService - scraperBackend.BucketService = tt.fields.BucketService - h := NewScraperHandler(zaptest.NewLogger(t), scraperBackend) - - r := httptest.NewRequest("GET", "http://any.tld", nil) - - r = r.WithContext(context.WithValue( - context.Background(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - })) - - w := httptest.NewRecorder() - - h.handleGetScraperTarget(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetScraperTarget() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetScraperTarget() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleGetScraperTarget(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handleGetScraperTarget() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestService_handleDeleteScraperTarget(t *testing.T) { - type fields struct { - Service influxdb.ScraperTargetStoreService - } - - type args struct { - id string - } - - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "delete a scraper target by id", - fields: fields{ - Service: &mock.ScraperTargetStoreService{ - RemoveTargetF: func(ctx context.Context, id platform.ID) error { - if id == targetOneID { - return nil - } - - return fmt.Errorf("wrong id") - }, - }, - }, - args: args{ - id: targetOneIDString, - }, - wants: wants{ - statusCode: http.StatusNoContent, - }, - }, - { - name: "scraper target not found", - fields: fields{ - Service: &mock.ScraperTargetStoreService{ - RemoveTargetF: func(ctx context.Context, id platform.ID) error { - return &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrScraperTargetNotFound, - } - }, - }, - }, - args: args{ - id: targetTwoIDString, - }, - wants: wants{ - statusCode: http.StatusNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - scraperBackend := NewMockScraperBackend(t) - scraperBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - scraperBackend.ScraperStorageService = tt.fields.Service - h := NewScraperHandler(zaptest.NewLogger(t), scraperBackend) - - r := httptest.NewRequest("GET", "http://any.tld", nil) - - r = r.WithContext(context.WithValue( - context.Background(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - })) - - w := httptest.NewRecorder() - - h.handleDeleteScraperTarget(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleDeleteScraperTarget() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleDeleteScraperTarget() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleDeleteScraperTarget(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handleDeleteScraperTarget() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestService_handlePostScraperTarget(t *testing.T) { - type fields struct { - OrganizationService influxdb.OrganizationService - BucketService influxdb.BucketService - ScraperTargetStoreService influxdb.ScraperTargetStoreService - } - - type args struct { - target *influxdb.ScraperTarget - } - - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "create a new scraper target", - fields: fields{ - OrganizationService: &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: platformtesting.MustIDBase16("0000000000000211"), - Name: "org1", - }, nil - }, - }, - BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: platformtesting.MustIDBase16("0000000000000212"), - Name: "bucket1", - }, nil - }, - }, - ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - AddTargetF: func(ctx context.Context, st *influxdb.ScraperTarget, userID platform.ID) error { - st.ID = targetOneID - return nil - }, - }, - }, - args: args{ - target: &influxdb.ScraperTarget{ - Name: "hello", - Type: influxdb.PrometheusScraperType, - BucketID: platformtesting.MustIDBase16("0000000000000212"), - OrgID: platformtesting.MustIDBase16("0000000000000211"), - URL: "www.some.url", - }, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: fmt.Sprintf( - ` - { - "id": "%s", - "name": "hello", - "type": "prometheus", - "url": "www.some.url", - "orgID": "0000000000000211", - "org": "org1", - "bucket": "bucket1", - "bucketID": "0000000000000212", - "links": { - "bucket": "/api/v2/buckets/0000000000000212", - "organization": "/api/v2/orgs/0000000000000211", - "self": "/api/v2/scrapers/%s", - "members": "/api/v2/scrapers/%s/members", - "owners": "/api/v2/scrapers/%s/owners" - } - } - `, - targetOneIDString, targetOneIDString, targetOneIDString, targetOneIDString, - ), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - scraperBackend := NewMockScraperBackend(t) - scraperBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - scraperBackend.ScraperStorageService = tt.fields.ScraperTargetStoreService - scraperBackend.OrganizationService = tt.fields.OrganizationService - scraperBackend.BucketService = tt.fields.BucketService - h := NewScraperHandler(zaptest.NewLogger(t), scraperBackend) - - st, err := json.Marshal(tt.args.target) - if err != nil { - t.Fatalf("failed to unmarshal scraper target: %v", err) - } - - r := httptest.NewRequest("GET", "http://any.tld", bytes.NewReader(st)) - r = r.WithContext(platcontext.SetAuthorizer(r.Context(), &influxdb.Authorization{})) - w := httptest.NewRecorder() - - h.handlePostScraperTarget(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePostScraperTarget() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePostScraperTarget() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handlePostScraperTarget(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handlePostScraperTarget() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestService_handlePatchScraperTarget(t *testing.T) { - type fields struct { - BucketService influxdb.BucketService - OrganizationService influxdb.OrganizationService - ScraperTargetStoreService influxdb.ScraperTargetStoreService - } - - type args struct { - id string - update *influxdb.ScraperTarget - } - - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "update a scraper target", - fields: fields{ - OrganizationService: &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: platformtesting.MustIDBase16("0000000000000211"), - Name: "org1", - }, nil - }, - }, - BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: platformtesting.MustIDBase16("0000000000000212"), - Name: "bucket1", - }, nil - }, - }, - ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - UpdateTargetF: func(ctx context.Context, t *influxdb.ScraperTarget, userID platform.ID) (*influxdb.ScraperTarget, error) { - if t.ID == targetOneID { - return t, nil - } - - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: "scraper target is not found", - } - }, - }, - }, - args: args{ - id: targetOneIDString, - update: &influxdb.ScraperTarget{ - ID: targetOneID, - Name: "name", - BucketID: platformtesting.MustIDBase16("0000000000000212"), - Type: influxdb.PrometheusScraperType, - URL: "www.example.url", - OrgID: platformtesting.MustIDBase16("0000000000000211"), - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: fmt.Sprintf( - `{ - "id":"%s", - "name":"name", - "type":"prometheus", - "url":"www.example.url", - "org": "org1", - "orgID":"0000000000000211", - "bucket": "bucket1", - "bucketID":"0000000000000212", - "links":{ - "bucket": "/api/v2/buckets/0000000000000212", - "organization": "/api/v2/orgs/0000000000000211", - "self":"/api/v2/scrapers/%s", - "members":"/api/v2/scrapers/%s/members", - "owners":"/api/v2/scrapers/%s/owners" - } - }`, - targetOneIDString, targetOneIDString, targetOneIDString, targetOneIDString, - ), - }, - }, - { - name: "scraper target not found", - fields: fields{ - OrganizationService: &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: platformtesting.MustIDBase16("0000000000000211"), - Name: "org1", - }, nil - }, - }, - BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: platformtesting.MustIDBase16("0000000000000212"), - Name: "bucket1", - }, nil - }, - }, - ScraperTargetStoreService: &mock.ScraperTargetStoreService{ - UpdateTargetF: func(ctx context.Context, upd *influxdb.ScraperTarget, userID platform.ID) (*influxdb.ScraperTarget, error) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrScraperTargetNotFound, - } - }, - }, - }, - args: args{ - id: targetOneIDString, - update: &influxdb.ScraperTarget{ - ID: targetOneID, - Name: "name", - BucketID: platformtesting.MustIDBase16("0000000000000212"), - Type: influxdb.PrometheusScraperType, - URL: "www.example.url", - OrgID: platformtesting.MustIDBase16("0000000000000211"), - }, - }, - wants: wants{ - statusCode: http.StatusNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - scraperBackend := NewMockScraperBackend(t) - scraperBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - scraperBackend.ScraperStorageService = tt.fields.ScraperTargetStoreService - scraperBackend.OrganizationService = tt.fields.OrganizationService - scraperBackend.BucketService = tt.fields.BucketService - h := NewScraperHandler(zaptest.NewLogger(t), scraperBackend) - - var err error - st := make([]byte, 0) - if tt.args.update != nil { - st, err = json.Marshal(*tt.args.update) - if err != nil { - t.Fatalf("failed to unmarshal scraper target: %v", err) - } - } - - r := httptest.NewRequest("GET", "http://any.tld", bytes.NewReader(st)) - - r = r.WithContext(context.WithValue( - context.Background(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - })) - r = r.WithContext(platcontext.SetAuthorizer(r.Context(), &influxdb.Authorization{})) - w := httptest.NewRecorder() - - h.handlePatchScraperTarget(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePatchScraperTarget() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePatchScraperTarget() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handlePatchScraperTarget(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handlePatchScraperTarget() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func initScraperService(f platformtesting.TargetFields, t *testing.T) (influxdb.ScraperTargetStoreService, string, func()) { - t.Helper() - - store := platformtesting.NewTestInmemStore(t) - tenantStore := tenant.NewStore(store) - tenantService := tenant.NewService(tenantStore) - - svc := kv.NewService(zaptest.NewLogger(t), store, tenantService) - svc.IDGenerator = f.IDGenerator - - ctx := context.Background() - for _, target := range f.Targets { - if err := svc.PutTarget(ctx, target); err != nil { - t.Fatalf("failed to populate scraper targets") - } - } - - for _, o := range f.Organizations { - mock.SetIDForFunc(&tenantStore.OrgIDGen, o.ID, func() { - if err := tenantService.CreateOrganization(ctx, o); err != nil { - t.Fatalf("failed to populate orgs") - } - }) - } - - scraperBackend := NewMockScraperBackend(t) - scraperBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - scraperBackend.ScraperStorageService = svc - scraperBackend.OrganizationService = tenantService - scraperBackend.BucketService = &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: id, - Name: "bucket1", - }, nil - }, - } - - handler := NewScraperHandler(zaptest.NewLogger(t), scraperBackend) - server := httptest.NewServer(httpMock.NewAuthMiddlewareHandler( - handler, - &influxdb.Authorization{ - UserID: platformtesting.MustIDBase16("020f755c3c082002"), - Token: "tok", - }, - )) - client := struct { - influxdb.UserResourceMappingService - influxdb.OrganizationService - ScraperService - }{ - UserResourceMappingService: tenantService, - OrganizationService: tenantService, - ScraperService: ScraperService{ - Token: "tok", - Addr: server.URL, - }, - } - done := server.Close - - return &client, "", done -} - -func TestScraperService(t *testing.T) { - platformtesting.ScraperService(initScraperService, t) -} diff --git a/http/source_proxy_service.go b/http/source_proxy_service.go deleted file mode 100644 index 3df406378db..00000000000 --- a/http/source_proxy_service.go +++ /dev/null @@ -1,72 +0,0 @@ -package http - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/lang" - platform "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/check" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/query" -) - -type SourceProxyQueryService struct { - Addr string - InsecureSkipVerify bool - platform.SourceFields -} - -func (s *SourceProxyQueryService) Query(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) { - switch req.Request.Compiler.CompilerType() { - case lang.FluxCompilerType: - return s.queryFlux(ctx, w, req) - } - return flux.Statistics{}, fmt.Errorf("compiler type not supported") -} - -func (s *SourceProxyQueryService) queryFlux(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - u, err := NewURL(s.Addr, "/api/v2/query") - if err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - var body bytes.Buffer - if err := json.NewEncoder(&body).Encode(req); err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - - hreq, err := http.NewRequest("POST", u.String(), &body) - if err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - hreq.Header.Set("Authorization", fmt.Sprintf("Token %s", s.Token)) - hreq.Header.Set("Content-Type", "application/json") - hreq = hreq.WithContext(ctx) - - hc := NewClient(u.Scheme, s.InsecureSkipVerify) - resp, err := hc.Do(hreq) - if err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - defer resp.Body.Close() - if err := CheckError(resp); err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - - if _, err = io.Copy(w, resp.Body); err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - - return flux.Statistics{}, nil -} - -func (s *SourceProxyQueryService) Check(context.Context) check.Response { - return QueryHealthCheck(s.Addr, s.InsecureSkipVerify) -} diff --git a/http/source_service.go b/http/source_service.go deleted file mode 100644 index 6806f0199cf..00000000000 --- a/http/source_service.go +++ /dev/null @@ -1,693 +0,0 @@ -package http - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "time" - - "github.com/influxdata/flux/csv" - "github.com/influxdata/flux/lang" - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/pkg/httpc" - "github.com/influxdata/influxdb/v2/query" - "go.uber.org/zap" -) - -const ( - prefixSources = "/api/v2/sources" -) - -type sourceResponse struct { - *influxdb.Source - Links map[string]interface{} `json:"links"` -} - -func newSourceResponse(s *influxdb.Source) *sourceResponse { - s.Password = "" - s.SharedSecret = "" - - if s.Type == influxdb.SelfSourceType { - return &sourceResponse{ - Source: s, - Links: map[string]interface{}{ - "self": fmt.Sprintf("%s/%s", prefixSources, s.ID.String()), - "query": fmt.Sprintf("%s/%s/query", prefixSources, s.ID.String()), - "buckets": fmt.Sprintf("%s/%s/buckets", prefixSources, s.ID.String()), - "health": fmt.Sprintf("%s/%s/health", prefixSources, s.ID.String()), - }, - } - } - - return &sourceResponse{ - Source: s, - Links: map[string]interface{}{ - "self": fmt.Sprintf("%s/%s", prefixSources, s.ID.String()), - "query": fmt.Sprintf("%s/%s/query", prefixSources, s.ID.String()), - "buckets": fmt.Sprintf("%s/%s/buckets", prefixSources, s.ID.String()), - "health": fmt.Sprintf("%s/%s/health", prefixSources, s.ID.String()), - }, - } -} - -type sourcesResponse struct { - Sources []*sourceResponse `json:"sources"` - Links map[string]interface{} `json:"links"` -} - -func newSourcesResponse(srcs []*influxdb.Source) *sourcesResponse { - res := &sourcesResponse{ - Links: map[string]interface{}{ - "self": prefixSources, - }, - } - - res.Sources = make([]*sourceResponse, 0, len(srcs)) - for _, src := range srcs { - res.Sources = append(res.Sources, newSourceResponse(src)) - } - - return res -} - -// SourceBackend is all services and associated parameters required to construct -// the SourceHandler. -type SourceBackend struct { - errors.HTTPErrorHandler - log *zap.Logger - - SourceService influxdb.SourceService - LabelService influxdb.LabelService - BucketService influxdb.BucketService - NewQueryService func(s *influxdb.Source) (query.ProxyQueryService, error) -} - -// NewSourceBackend returns a new instance of SourceBackend. -func NewSourceBackend(log *zap.Logger, b *APIBackend) *SourceBackend { - return &SourceBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - - SourceService: b.SourceService, - LabelService: b.LabelService, - BucketService: b.BucketService, - NewQueryService: b.NewQueryService, - } -} - -// SourceHandler is a handler for sources -type SourceHandler struct { - *httprouter.Router - errors.HTTPErrorHandler - log *zap.Logger - SourceService influxdb.SourceService - LabelService influxdb.LabelService - BucketService influxdb.BucketService - - // TODO(desa): this was done so in order to remove an import cycle and to allow - // for http mocking. - NewQueryService func(s *influxdb.Source) (query.ProxyQueryService, error) -} - -// NewSourceHandler returns a new instance of SourceHandler. -func NewSourceHandler(log *zap.Logger, b *SourceBackend) *SourceHandler { - h := &SourceHandler{ - Router: NewRouter(b.HTTPErrorHandler), - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - - SourceService: b.SourceService, - LabelService: b.LabelService, - BucketService: b.BucketService, - NewQueryService: b.NewQueryService, - } - - h.HandlerFunc("POST", prefixSources, h.handlePostSource) - h.HandlerFunc("GET", "/api/v2/sources", h.handleGetSources) - h.HandlerFunc("GET", "/api/v2/sources/:id", h.handleGetSource) - h.HandlerFunc("PATCH", "/api/v2/sources/:id", h.handlePatchSource) - h.HandlerFunc("DELETE", "/api/v2/sources/:id", h.handleDeleteSource) - - h.HandlerFunc("GET", "/api/v2/sources/:id/buckets", h.handleGetSourcesBuckets) - h.HandlerFunc("POST", "/api/v2/sources/:id/query", h.handlePostSourceQuery) - h.HandlerFunc("GET", "/api/v2/sources/:id/health", h.handleGetSourceHealth) - - return h -} - -func decodeSourceQueryRequest(r *http.Request) (*query.ProxyRequest, error) { - // starts here - request := struct { - Query string `json:"query"` - Type string `json:"type"` - DB string `json:"db"` - RP string `json:"rp"` - Cluster string `json:"cluster"` - OrganizationID platform.ID `json:"organizationID"` - // TODO(desa): support influxql dialect - Dialect csv.Dialect `json:"dialect"` - }{} - - err := json.NewDecoder(r.Body).Decode(&request) - if err != nil { - return nil, err - } - - req := &query.ProxyRequest{} - req.Dialect = request.Dialect - - req.Request.OrganizationID = request.OrganizationID - - switch request.Type { - case lang.FluxCompilerType: - req.Request.Compiler = lang.FluxCompiler{ - Query: request.Query, - } - default: - return nil, fmt.Errorf("compiler type not supported") - } - - return req, nil -} - -// handlePostSourceQuery is the HTTP handler for POST /api/v2/sources/:id/query -func (h *SourceHandler) handlePostSourceQuery(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - gsr, err := decodeGetSourceRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - req, err := decodeSourceQueryRequest(r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - s, err := h.SourceService.FindSourceByID(ctx, gsr.SourceID) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - querySvc, err := h.NewQueryService(s) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - _, err = querySvc.Query(ctx, w, req) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } -} - -// handleGetSourcesBuckets is the HTTP handler for the GET /api/v2/sources/:id/buckets route. -func (h *SourceHandler) handleGetSourcesBuckets(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - req, err := decodeGetSourceBucketsRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - _, err = h.SourceService.FindSourceByID(ctx, req.getSourceRequest.SourceID) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - bs, _, err := h.BucketService.FindBuckets(ctx, req.getBucketsRequest.filter) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusOK, newBucketsResponse(ctx, req.opts, req.filter, bs, h.LabelService)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type getSourceBucketsRequest struct { - *getSourceRequest - *getBucketsRequest -} - -func decodeGetSourceBucketsRequest(ctx context.Context, r *http.Request) (*getSourceBucketsRequest, error) { - getSrcReq, err := decodeGetSourceRequest(ctx, r) - if err != nil { - return nil, err - } - getBucketsReq, err := decodeGetBucketsRequest(r) - if err != nil { - return nil, err - } - return &getSourceBucketsRequest{ - getBucketsRequest: getBucketsReq, - getSourceRequest: getSrcReq, - }, nil -} - -type getBucketsRequest struct { - filter influxdb.BucketFilter - opts influxdb.FindOptions -} - -func decodeGetBucketsRequest(r *http.Request) (*getBucketsRequest, error) { - qp := r.URL.Query() - req := &getBucketsRequest{} - - opts, err := influxdb.DecodeFindOptions(r) - if err != nil { - return nil, err - } - - req.opts = *opts - - if orgID := qp.Get("orgID"); orgID != "" { - id, err := platform.IDFromString(orgID) - if err != nil { - return nil, err - } - req.filter.OrganizationID = id - } - - if org := qp.Get("org"); org != "" { - req.filter.Org = &org - } - - if name := qp.Get("name"); name != "" { - req.filter.Name = &name - } - - if bucketID := qp.Get("id"); bucketID != "" { - id, err := platform.IDFromString(bucketID) - if err != nil { - return nil, err - } - req.filter.ID = id - } - - return req, nil -} - -type bucketResponse struct { - bucket - Links map[string]string `json:"links"` - Labels []influxdb.Label `json:"labels"` -} - -type bucket struct { - ID platform.ID `json:"id,omitempty"` - OrgID platform.ID `json:"orgID,omitempty"` - Type string `json:"type"` - Description string `json:"description,omitempty"` - Name string `json:"name"` - RetentionPolicyName string `json:"rp,omitempty"` // This to support v1 sources - RetentionRules []retentionRule `json:"retentionRules"` - influxdb.CRUDLog -} - -func newBucket(pb *influxdb.Bucket) *bucket { - if pb == nil { - return nil - } - - rules := []retentionRule{ - { - Type: "expire", - EverySeconds: int64(pb.RetentionPeriod.Round(time.Second) / time.Second), - ShardGroupDurationSeconds: int64(pb.ShardGroupDuration.Round(time.Second) / time.Second), - }, - } - - return &bucket{ - ID: pb.ID, - OrgID: pb.OrgID, - Type: pb.Type.String(), - Name: pb.Name, - Description: pb.Description, - RetentionPolicyName: pb.RetentionPolicyName, - RetentionRules: rules, - CRUDLog: pb.CRUDLog, - } -} - -// retentionRule is the retention rule action for a bucket. -type retentionRule struct { - Type string `json:"type"` - EverySeconds int64 `json:"everySeconds"` - ShardGroupDurationSeconds int64 `json:"shardGroupDurationSeconds"` -} - -func NewBucketResponse(b *influxdb.Bucket, labels []*influxdb.Label) *bucketResponse { - res := &bucketResponse{ - Links: map[string]string{ - "labels": fmt.Sprintf("/api/v2/buckets/%s/labels", b.ID), - "logs": fmt.Sprintf("/api/v2/buckets/%s/logs", b.ID), - "members": fmt.Sprintf("/api/v2/buckets/%s/members", b.ID), - "org": fmt.Sprintf("/api/v2/orgs/%s", b.OrgID), - "owners": fmt.Sprintf("/api/v2/buckets/%s/owners", b.ID), - "self": fmt.Sprintf("/api/v2/buckets/%s", b.ID), - "write": fmt.Sprintf("/api/v2/write?org=%s&bucket=%s", b.OrgID, b.ID), - }, - bucket: *newBucket(b), - Labels: []influxdb.Label{}, - } - - for _, l := range labels { - res.Labels = append(res.Labels, *l) - } - - return res -} - -type bucketsResponse struct { - Links *influxdb.PagingLinks `json:"links"` - Buckets []*bucketResponse `json:"buckets"` -} - -func newBucketsResponse(ctx context.Context, opts influxdb.FindOptions, f influxdb.BucketFilter, bs []*influxdb.Bucket, labelService influxdb.LabelService) *bucketsResponse { - rs := make([]*bucketResponse, 0, len(bs)) - for _, b := range bs { - labels, _ := labelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: b.ID, ResourceType: influxdb.BucketsResourceType}) - rs = append(rs, NewBucketResponse(b, labels)) - } - return &bucketsResponse{ - Links: influxdb.NewPagingLinks(prefixBuckets, opts, f, len(bs)), - Buckets: rs, - } -} - -// handlePostSource is the HTTP handler for the POST /api/v2/sources route. -func (h *SourceHandler) handlePostSource(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePostSourceRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := h.SourceService.CreateSource(ctx, req.Source); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - res := newSourceResponse(req.Source) - h.log.Debug("Source created", zap.String("source", fmt.Sprint(res))) - if err := encodeResponse(ctx, w, http.StatusCreated, res); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type postSourceRequest struct { - Source *influxdb.Source -} - -func decodePostSourceRequest(ctx context.Context, r *http.Request) (*postSourceRequest, error) { - b := &influxdb.Source{} - if err := json.NewDecoder(r.Body).Decode(b); err != nil { - return nil, err - } - - return &postSourceRequest{ - Source: b, - }, nil -} - -// handleGetSource is the HTTP handler for the GET /api/v2/sources/:id route. -func (h *SourceHandler) handleGetSource(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeGetSourceRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - s, err := h.SourceService.FindSourceByID(ctx, req.SourceID) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - res := newSourceResponse(s) - h.log.Debug("Source retrieved", zap.String("source", fmt.Sprint(res))) - - if err := encodeResponse(ctx, w, http.StatusOK, res); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -// handleGetSourceHealth is the HTTP handler for the GET /v1/sources/:id/health route. -func (h *SourceHandler) handleGetSourceHealth(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - msg := `{"name":"sources","message":"source is %shealthy","status":"%s","checks":[]}` - w.Header().Set("Content-Type", "application/json; charset=utf-8") - - req, err := decodeGetSourceRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - if _, err := h.SourceService.FindSourceByID(ctx, req.SourceID); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - // todo(leodido) > check source is actually healthy and reply with 503 if not - // w.WriteHeader(http.StatusServiceUnavailable) - // fmt.Fprintln(w, fmt.Sprintf(msg, "not ", "fail")) - - w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, fmt.Sprintf(msg, "", "pass")) -} - -type getSourceRequest struct { - SourceID platform.ID -} - -func decodeGetSourceRequest(ctx context.Context, r *http.Request) (*getSourceRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - req := &getSourceRequest{ - SourceID: i, - } - - return req, nil -} - -// handleDeleteSource is the HTTP handler for the DELETE /api/v2/sources/:id route. -func (h *SourceHandler) handleDeleteSource(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeDeleteSourceRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := h.SourceService.DeleteSource(ctx, req.SourceID); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Source deleted", zap.String("sourceID", fmt.Sprint(req.SourceID))) - - w.WriteHeader(http.StatusNoContent) -} - -type deleteSourceRequest struct { - SourceID platform.ID -} - -func decodeDeleteSourceRequest(ctx context.Context, r *http.Request) (*deleteSourceRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - req := &deleteSourceRequest{ - SourceID: i, - } - - return req, nil -} - -// handleGetSources is the HTTP handler for the GET /api/v2/sources route. -func (h *SourceHandler) handleGetSources(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeGetSourcesRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - srcs, _, err := h.SourceService.FindSources(ctx, req.findOptions) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - res := newSourcesResponse(srcs) - h.log.Debug("Sources retrieved", zap.String("sources", fmt.Sprint(res))) - - if err := encodeResponse(ctx, w, http.StatusOK, res); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type getSourcesRequest struct { - findOptions influxdb.FindOptions -} - -func decodeGetSourcesRequest(ctx context.Context, r *http.Request) (*getSourcesRequest, error) { - req := &getSourcesRequest{} - return req, nil -} - -// handlePatchSource is the HTTP handler for the PATH /api/v2/sources route. -func (h *SourceHandler) handlePatchSource(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePatchSourceRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - b, err := h.SourceService.UpdateSource(ctx, req.SourceID, req.Update) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Source updated", zap.String("source", fmt.Sprint(b))) - - if err := encodeResponse(ctx, w, http.StatusOK, b); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type patchSourceRequest struct { - Update influxdb.SourceUpdate - SourceID platform.ID -} - -func decodePatchSourceRequest(ctx context.Context, r *http.Request) (*patchSourceRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - - var upd influxdb.SourceUpdate - if err := json.NewDecoder(r.Body).Decode(&upd); err != nil { - return nil, err - } - - return &patchSourceRequest{ - Update: upd, - SourceID: i, - }, nil -} - -// SourceService connects to Influx via HTTP using tokens to manage sources -type SourceService struct { - Client *httpc.Client -} - -// FindSourceByID returns a single source by ID. -func (s *SourceService) FindSourceByID(ctx context.Context, id platform.ID) (*influxdb.Source, error) { - var b influxdb.Source - err := s.Client. - Get(prefixSources, id.String()). - DecodeJSON(&b). - Do(ctx) - if err != nil { - return nil, err - } - return &b, nil -} - -// FindSources returns a list of sources that match filter and the total count of matching sources. -// Additional options provide pagination & sorting. -func (s *SourceService) FindSources(ctx context.Context, opt influxdb.FindOptions) ([]*influxdb.Source, int, error) { - var bs []*influxdb.Source - err := s.Client. - Get(prefixSources). - DecodeJSON(&bs). - Do(ctx) - if err != nil { - return nil, 0, err - } - - return bs, len(bs), nil -} - -// CreateSource creates a new source and sets b.ID with the new identifier. -func (s *SourceService) CreateSource(ctx context.Context, b *influxdb.Source) error { - return s.Client. - PostJSON(b, prefixSources). - DecodeJSON(b). - Do(ctx) -} - -// UpdateSource updates a single source with changeset. -// Returns the new source state after update. -func (s *SourceService) UpdateSource(ctx context.Context, id platform.ID, upd influxdb.SourceUpdate) (*influxdb.Source, error) { - var b influxdb.Source - err := s.Client. - PatchJSON(upd, prefixSources, id.String()). - DecodeJSON(&b). - Do(ctx) - if err != nil { - return nil, err - } - return &b, nil -} - -// DeleteSource removes a source by ID. -func (s *SourceService) DeleteSource(ctx context.Context, id platform.ID) error { - return s.Client. - Delete(prefixSources, id.String()). - StatusFn(func(resp *http.Response) error { - return CheckErrorStatus(http.StatusNoContent, resp) - }). - Do(ctx) - -} diff --git a/http/source_service_test.go b/http/source_service_test.go deleted file mode 100644 index 235491cb932..00000000000 --- a/http/source_service_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package http - -import ( - "reflect" - "testing" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -func Test_newSourceResponse(t *testing.T) { - tests := []struct { - name string - s *platform.Source - want *sourceResponse - }{ - { - name: "self source returns links to this instance", - s: &platform.Source{ - ID: platform2.ID(1), - OrganizationID: platform2.ID(1), - Name: "Hi", - Type: platform.SelfSourceType, - URL: "/", - }, - want: &sourceResponse{ - Source: &platform.Source{ - ID: platform2.ID(1), - OrganizationID: platform2.ID(1), - Name: "Hi", - Type: platform.SelfSourceType, - URL: "/", - }, - Links: map[string]interface{}{ - "self": "/api/v2/sources/0000000000000001", - "query": "/api/v2/sources/0000000000000001/query", - "buckets": "/api/v2/sources/0000000000000001/buckets", - "health": "/api/v2/sources/0000000000000001/health", - }, - }, - }, - { - name: "v1 sources have proxied links", - s: &platform.Source{ - ID: platform2.ID(1), - OrganizationID: platform2.ID(1), - Name: "Hi", - Type: platform.V1SourceType, - URL: "/", - }, - want: &sourceResponse{ - Source: &platform.Source{ - ID: platform2.ID(1), - OrganizationID: platform2.ID(1), - Name: "Hi", - Type: platform.V1SourceType, - URL: "/", - }, - Links: map[string]interface{}{ - "self": "/api/v2/sources/0000000000000001", - "query": "/api/v2/sources/0000000000000001/query", - "buckets": "/api/v2/sources/0000000000000001/buckets", - "health": "/api/v2/sources/0000000000000001/health", - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := newSourceResponse(tt.s); !reflect.DeepEqual(got, tt.want) { - t.Errorf("newSourceResponse() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/http/swagger.yml b/http/swagger.yml deleted file mode 100644 index 99b1307d9e3..00000000000 --- a/http/swagger.yml +++ /dev/null @@ -1,2 +0,0 @@ -# this file has been moved! consume file at https://github.com/influxdata/openapi/blob/master/contracts/oss.yml -# continue to maintain and develop the definition at https://github.com/influxdata/openapi/blob/master/src/oss.yml diff --git a/http/swaggerV1Compat.yml b/http/swaggerV1Compat.yml deleted file mode 100644 index 9721e6493ad..00000000000 --- a/http/swaggerV1Compat.yml +++ /dev/null @@ -1,329 +0,0 @@ -openapi: "3.0.0" -info: - title: Influx API Service (V1 compatible endpoints) - version: 0.1.0 -servers: - - url: / - description: V1 compatible api endpoints. -paths: - /write: - post: # technically this functions with other methods as well - operationId: PostWriteV1 - tags: - - Write - summary: Write time series data into InfluxDB in a V1 compatible format - requestBody: - description: Line protocol body - required: true - content: - text/plain: - schema: - type: string - parameters: - - $ref: "#/components/parameters/TraceSpan" - - $ref: "#/components/parameters/AuthUserV1" - - $ref: "#/components/parameters/AuthPassV1" - - in: query - name: db - schema: - type: string - required: true - description: The bucket to write to. If none exist a bucket will be created with a default 3 day retention policy. - - in: query - name: rp - schema: - type: string - description: The retention policy name. - - in: query - name: precision - schema: - type: string - description: Write precision. - - in: header - name: Content-Encoding - description: When present, its value indicates to the database that compression is applied to the line-protocol body. - schema: - type: string - description: Specifies that the line protocol in the body is encoded with gzip or not encoded with identity. - default: identity - enum: - - gzip - - identity - responses: - "204": - description: Write data is correctly formatted and accepted for writing to the bucket. - "400": - description: Line protocol poorly formed and no points were written. Response can be used to determine the first malformed line in the body line-protocol. All data in body was rejected and not written. - content: - application/json: - schema: - $ref: "#/components/schemas/LineProtocolError" - "401": - description: Token does not have sufficient permissions to write to this organization and bucket or the organization and bucket do not exist. - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - "403": - description: No token was sent and they are required. - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - "413": - description: Write has been rejected because the payload is too large. Error message returns max size supported. All data in body was rejected and not written. - content: - application/json: - schema: - $ref: "#/components/schemas/LineProtocolLengthError" - "429": - description: Token is temporarily over quota. The Retry-After header describes when to try the write again. - headers: - Retry-After: - description: A non-negative decimal integer indicating the seconds to delay after the response is received. - schema: - type: integer - format: int32 - "503": - description: Server is temporarily unavailable to accept writes. The Retry-After header describes when to try the write again. - headers: - Retry-After: - description: A non-negative decimal integer indicating the seconds to delay after the response is received. - schema: - type: integer - format: int32 - default: - description: Internal server error - content: - application/json: - schema: - $ref: "#/components/schemas/Error" - /query: - post: # technically this functions with other methods as well - operationId: PostQueryV1 - tags: - - Query - summary: Query InfluxDB in a V1 compatible format - requestBody: - description: InfluxQL query to execute. - content: - text/plain: # although this should be `application/vnd.influxql`, oats breaks so we define the content-type header parameter - schema: - type: string - parameters: - - $ref: "#/components/parameters/TraceSpan" - - $ref: "#/components/parameters/AuthUserV1" - - $ref: "#/components/parameters/AuthPassV1" - - in: header - name: Accept - schema: - type: string - description: Specifies how query results should be encoded in the response. - default: application/json - enum: - - application/json - - application/csv - - text/csv - - application/x-msgpack - - in: header - name: Accept-Encoding - description: The Accept-Encoding request HTTP header advertises which content encoding, usually a compression algorithm, the client is able to understand. - schema: - type: string - description: Specifies that the query response in the body should be encoded with gzip or not encoded with identity. - default: identity - enum: - - gzip - - identity - - in: header - name: Content-Type - schema: - type: string - enum: - - application/vnd.influxql - - in: query - name: q - description: Defines the influxql query to run. - schema: - type: string - responses: - "200": - description: Query results - headers: - Content-Encoding: - description: The Content-Encoding entity header is used to compress the media-type. When present, its value indicates which encodings were applied to the entity-body - schema: - type: string - description: Specifies that the response in the body is encoded with gzip or not encoded with identity. - default: identity - enum: - - gzip - - identity - Trace-Id: - description: The Trace-Id header reports the request's trace ID, if one was generated. - schema: - type: string - description: Specifies the request's trace ID. - content: - application/csv: - schema: - $ref: "#/components/schemas/InfluxQLCSVResponse" - text/csv: - schema: - $ref: "#/components/schemas/InfluxQLCSVResponse" - application/json: - schema: - $ref: "#/components/schemas/InfluxQLResponse" - application/x-msgpack: - schema: - type: string - format: binary - "429": - description: Token is temporarily over quota. The Retry-After header describes when to try the read again. - headers: - Retry-After: - description: A non-negative decimal integer indicating the seconds to delay after the response is received. - schema: - type: integer - format: int32 - default: - description: Error processing query - content: - application/json: - schema: - $ref: "#/components/schemas/Error" -components: - parameters: - TraceSpan: - in: header - name: Zap-Trace-Span - description: OpenTracing span context - example: - trace_id: "1" - span_id: "1" - baggage: - key: value - required: false - schema: - type: string - AuthUserV1: - in: query - name: u - required: false - schema: - type: string - description: Username. - AuthPassV1: - in: query - name: p - required: false - schema: - type: string - description: User token. - schemas: - InfluxQLResponse: - properties: - results: - type: array - items: - type: object - properties: - statement_id: - type: integer - series: - type: array - items: - type: object - properties: - name: - type: string - columns: - type: array - items: - type: integer - values: - type: array - items: - type: array - items: {} - InfluxQLCSVResponse: - type: string - example: > - name,tags,time,test_field,test_tag - test_measurement,,1603740794286107366,1,tag_value - test_measurement,,1603740870053205649,2,tag_value - test_measurement,,1603741221085428881,3,tag_value - - Error: - properties: - code: - description: Code is the machine-readable error code. - readOnly: true - type: string - # This set of enumerations must remain in sync with the constants defined in errors.go - enum: - - internal error - - not found - - conflict - - invalid - - unprocessable entity - - empty value - - unavailable - - forbidden - - too many requests - - unauthorized - - method not allowed - message: - readOnly: true - description: Message is a human-readable message. - type: string - required: [code, message] - LineProtocolError: - properties: - code: - description: Code is the machine-readable error code. - readOnly: true - type: string - enum: - - internal error - - not found - - conflict - - invalid - - empty value - - unavailable - message: - readOnly: true - description: Message is a human-readable message. - type: string - op: - readOnly: true - description: Op describes the logical code operation during error. Useful for debugging. - type: string - err: - readOnly: true - description: Err is a stack of errors that occurred during processing of the request. Useful for debugging. - type: string - line: - readOnly: true - description: First line within sent body containing malformed data - type: integer - format: int32 - required: [code, message, op, err] - LineProtocolLengthError: - properties: - code: - description: Code is the machine-readable error code. - readOnly: true - type: string - enum: - - invalid - message: - readOnly: true - description: Message is a human-readable message. - type: string - maxLength: - readOnly: true - description: Max length in bytes for a body of line-protocol. - type: integer - format: int32 - required: [code, message, maxLength] diff --git a/http/task_service.go b/http/task_service.go deleted file mode 100644 index 9df6f3693a4..00000000000 --- a/http/task_service.go +++ /dev/null @@ -1,1767 +0,0 @@ -package http - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "path" - "strconv" - "time" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - pcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/pkg/httpc" - "github.com/influxdata/influxdb/v2/task/options" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "go.uber.org/zap" -) - -// TaskBackend is all services and associated parameters required to construct -// the TaskHandler. -type TaskBackend struct { - errors2.HTTPErrorHandler - log *zap.Logger - - AlgoWProxy FeatureProxyHandler - TaskService taskmodel.TaskService - AuthorizationService influxdb.AuthorizationService - OrganizationService influxdb.OrganizationService - UserResourceMappingService influxdb.UserResourceMappingService - LabelService influxdb.LabelService - UserService influxdb.UserService - BucketService influxdb.BucketService -} - -// NewTaskBackend returns a new instance of TaskBackend. -func NewTaskBackend(log *zap.Logger, b *APIBackend) *TaskBackend { - return &TaskBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - AlgoWProxy: b.AlgoWProxy, - TaskService: b.TaskService, - AuthorizationService: b.AuthorizationService, - OrganizationService: b.OrganizationService, - UserResourceMappingService: b.UserResourceMappingService, - LabelService: b.LabelService, - UserService: b.UserService, - BucketService: b.BucketService, - } -} - -// TaskHandler represents an HTTP API handler for tasks. -type TaskHandler struct { - *httprouter.Router - errors2.HTTPErrorHandler - log *zap.Logger - - TaskService taskmodel.TaskService - AuthorizationService influxdb.AuthorizationService - OrganizationService influxdb.OrganizationService - UserResourceMappingService influxdb.UserResourceMappingService - LabelService influxdb.LabelService - UserService influxdb.UserService - BucketService influxdb.BucketService -} - -const ( - prefixTasks = "/api/v2/tasks" - tasksIDPath = "/api/v2/tasks/:id" - tasksIDLogsPath = "/api/v2/tasks/:id/logs" - tasksIDMembersPath = "/api/v2/tasks/:id/members" - tasksIDMembersIDPath = "/api/v2/tasks/:id/members/:userID" - tasksIDOwnersPath = "/api/v2/tasks/:id/owners" - tasksIDOwnersIDPath = "/api/v2/tasks/:id/owners/:userID" - tasksIDRunsPath = "/api/v2/tasks/:id/runs" - tasksIDRunsIDPath = "/api/v2/tasks/:id/runs/:rid" - tasksIDRunsIDLogsPath = "/api/v2/tasks/:id/runs/:rid/logs" - tasksIDRunsIDRetryPath = "/api/v2/tasks/:id/runs/:rid/retry" - tasksIDLabelsPath = "/api/v2/tasks/:id/labels" - tasksIDLabelsIDPath = "/api/v2/tasks/:id/labels/:lid" -) - -// NewTaskHandler returns a new instance of TaskHandler. -func NewTaskHandler(log *zap.Logger, b *TaskBackend) *TaskHandler { - h := &TaskHandler{ - Router: NewRouter(b.HTTPErrorHandler), - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - - TaskService: b.TaskService, - AuthorizationService: b.AuthorizationService, - OrganizationService: b.OrganizationService, - UserResourceMappingService: b.UserResourceMappingService, - LabelService: b.LabelService, - UserService: b.UserService, - BucketService: b.BucketService, - } - - h.HandlerFunc("GET", prefixTasks, h.handleGetTasks) - h.Handler("POST", prefixTasks, withFeatureProxy(b.AlgoWProxy, http.HandlerFunc(h.handlePostTask))) - - h.HandlerFunc("GET", tasksIDPath, h.handleGetTask) - h.Handler("PATCH", tasksIDPath, withFeatureProxy(b.AlgoWProxy, http.HandlerFunc(h.handleUpdateTask))) - h.HandlerFunc("DELETE", tasksIDPath, h.handleDeleteTask) - - h.HandlerFunc("GET", tasksIDLogsPath, h.handleGetLogs) - h.HandlerFunc("GET", tasksIDRunsIDLogsPath, h.handleGetLogs) - - memberBackend := MemberBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log.With(zap.String("handler", "member")), - ResourceType: influxdb.TasksResourceType, - UserType: influxdb.Member, - UserResourceMappingService: b.UserResourceMappingService, - UserService: b.UserService, - } - h.HandlerFunc("POST", tasksIDMembersPath, newPostMemberHandler(memberBackend)) - h.HandlerFunc("GET", tasksIDMembersPath, newGetMembersHandler(memberBackend)) - h.HandlerFunc("DELETE", tasksIDMembersIDPath, newDeleteMemberHandler(memberBackend)) - - ownerBackend := MemberBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log.With(zap.String("handler", "member")), - ResourceType: influxdb.TasksResourceType, - UserType: influxdb.Owner, - UserResourceMappingService: b.UserResourceMappingService, - UserService: b.UserService, - } - h.HandlerFunc("POST", tasksIDOwnersPath, newPostMemberHandler(ownerBackend)) - h.HandlerFunc("GET", tasksIDOwnersPath, newGetMembersHandler(ownerBackend)) - h.HandlerFunc("DELETE", tasksIDOwnersIDPath, newDeleteMemberHandler(ownerBackend)) - - h.HandlerFunc("GET", tasksIDRunsPath, h.handleGetRuns) - h.HandlerFunc("POST", tasksIDRunsPath, h.handleForceRun) - h.HandlerFunc("GET", tasksIDRunsIDPath, h.handleGetRun) - h.HandlerFunc("POST", tasksIDRunsIDRetryPath, h.handleRetryRun) - h.HandlerFunc("DELETE", tasksIDRunsIDPath, h.handleCancelRun) - - labelBackend := &LabelBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log.With(zap.String("handler", "label")), - LabelService: b.LabelService, - ResourceType: influxdb.TasksResourceType, - } - h.HandlerFunc("GET", tasksIDLabelsPath, newGetLabelsHandler(labelBackend)) - h.HandlerFunc("POST", tasksIDLabelsPath, newPostLabelHandler(labelBackend)) - h.HandlerFunc("DELETE", tasksIDLabelsIDPath, newDeleteLabelHandler(labelBackend)) - - return h -} - -// Task is a package-specific Task format that preserves the expected format for the API, -// where time values are represented as strings -type Task struct { - ID platform.ID `json:"id"` - OrganizationID platform.ID `json:"orgID"` - Organization string `json:"org"` - OwnerID platform.ID `json:"ownerID"` - Name string `json:"name"` - Description string `json:"description,omitempty"` - Status string `json:"status"` - Flux string `json:"flux"` - Every string `json:"every,omitempty"` - Cron string `json:"cron,omitempty"` - Offset string `json:"offset,omitempty"` - LatestCompleted string `json:"latestCompleted,omitempty"` - LastRunStatus string `json:"lastRunStatus,omitempty"` - LastRunError string `json:"lastRunError,omitempty"` - CreatedAt string `json:"createdAt,omitempty"` - UpdatedAt string `json:"updatedAt,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` -} - -type taskResponse struct { - Links map[string]string `json:"links"` - Labels []influxdb.Label `json:"labels"` - Task -} - -// NewFrontEndTask converts a internal task type to a task that we want to display to users -func NewFrontEndTask(t taskmodel.Task) Task { - latestCompleted := "" - if !t.LatestCompleted.IsZero() { - latestCompleted = t.LatestCompleted.Format(time.RFC3339) - } - createdAt := "" - if !t.CreatedAt.IsZero() { - createdAt = t.CreatedAt.Format(time.RFC3339) - } - updatedAt := "" - if !t.UpdatedAt.IsZero() { - updatedAt = t.UpdatedAt.Format(time.RFC3339) - } - offset := "" - if t.Offset != 0*time.Second { - offset = customParseDuration(t.Offset) - } - - return Task{ - ID: t.ID, - OrganizationID: t.OrganizationID, - Organization: t.Organization, - OwnerID: t.OwnerID, - Name: t.Name, - Description: t.Description, - Status: t.Status, - Flux: t.Flux, - Every: t.Every, - Cron: t.Cron, - Offset: offset, - LatestCompleted: latestCompleted, - LastRunStatus: t.LastRunStatus, - LastRunError: t.LastRunError, - CreatedAt: createdAt, - UpdatedAt: updatedAt, - Metadata: t.Metadata, - } -} - -func convertTask(t Task) *taskmodel.Task { - var ( - latestCompleted time.Time - createdAt time.Time - updatedAt time.Time - offset time.Duration - ) - - if t.LatestCompleted != "" { - latestCompleted, _ = time.Parse(time.RFC3339, t.LatestCompleted) - } - - if t.CreatedAt != "" { - createdAt, _ = time.Parse(time.RFC3339, t.CreatedAt) - } - - if t.UpdatedAt != "" { - updatedAt, _ = time.Parse(time.RFC3339, t.UpdatedAt) - } - - if t.Offset != "" { - var duration options.Duration - if err := duration.Parse(t.Offset); err == nil { - offset, _ = duration.DurationFrom(time.Now()) - } - } - - return &taskmodel.Task{ - ID: t.ID, - OrganizationID: t.OrganizationID, - Organization: t.Organization, - OwnerID: t.OwnerID, - Name: t.Name, - Description: t.Description, - Status: t.Status, - Flux: t.Flux, - Every: t.Every, - Cron: t.Cron, - Offset: offset, - LatestCompleted: latestCompleted, - LastRunStatus: t.LastRunStatus, - LastRunError: t.LastRunError, - CreatedAt: createdAt, - UpdatedAt: updatedAt, - Metadata: t.Metadata, - } -} - -func customParseDuration(d time.Duration) string { - str := "" - if d < 0 { - str = "-" - d = d * -1 - } - - // parse hours - hours := d / time.Hour - if hours != 0 { - str = fmt.Sprintf("%s%dh", str, hours) - } - if d%time.Hour == 0 { - return str - } - // parse minutes - d = d - (time.Duration(hours) * time.Hour) - - min := d / time.Minute - if min != 0 { - str = fmt.Sprintf("%s%dm", str, min) - } - if d%time.Minute == 0 { - return str - } - - // parse seconds - d = d - time.Duration(min)*time.Minute - sec := d / time.Second - - if sec != 0 { - str = fmt.Sprintf("%s%ds", str, sec) - } - return str -} - -func newTaskResponse(t taskmodel.Task, labels []*influxdb.Label) taskResponse { - response := taskResponse{ - Links: map[string]string{ - "self": fmt.Sprintf("/api/v2/tasks/%s", t.ID), - "members": fmt.Sprintf("/api/v2/tasks/%s/members", t.ID), - "owners": fmt.Sprintf("/api/v2/tasks/%s/owners", t.ID), - "labels": fmt.Sprintf("/api/v2/tasks/%s/labels", t.ID), - "runs": fmt.Sprintf("/api/v2/tasks/%s/runs", t.ID), - "logs": fmt.Sprintf("/api/v2/tasks/%s/logs", t.ID), - }, - Task: NewFrontEndTask(t), - Labels: []influxdb.Label{}, - } - - for _, l := range labels { - response.Labels = append(response.Labels, *l) - } - - return response -} - -func newTasksPagingLinks(basePath string, ts []*taskmodel.Task, f taskmodel.TaskFilter) *influxdb.PagingLinks { - var self, next string - u := url.URL{ - Path: basePath, - } - - values := url.Values{} - for k, vs := range f.QueryParams() { - for _, v := range vs { - if v != "" { - values.Add(k, v) - } - } - } - - u.RawQuery = values.Encode() - self = u.String() - - if len(ts) >= f.Limit { - values.Set("after", ts[f.Limit-1].ID.String()) - u.RawQuery = values.Encode() - next = u.String() - } - - links := &influxdb.PagingLinks{ - Self: self, - Next: next, - } - - return links -} - -type tasksResponse struct { - Links *influxdb.PagingLinks `json:"links"` - Tasks []taskResponse `json:"tasks"` -} - -func newTasksResponse(ctx context.Context, ts []*taskmodel.Task, f taskmodel.TaskFilter, labelService influxdb.LabelService) tasksResponse { - rs := tasksResponse{ - Links: newTasksPagingLinks(prefixTasks, ts, f), - Tasks: make([]taskResponse, len(ts)), - } - - for i := range ts { - labels, _ := labelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: ts[i].ID, ResourceType: influxdb.TasksResourceType}) - rs.Tasks[i] = newTaskResponse(*ts[i], labels) - } - return rs -} - -type runResponse struct { - Links map[string]string `json:"links,omitempty"` - httpRun -} - -// httpRun is a version of the Run object used to communicate over the API -// it uses a pointer to a time.Time instead of a time.Time so that we can pass a nil -// value for empty time values -type httpRun struct { - ID platform.ID `json:"id,omitempty"` - TaskID platform.ID `json:"taskID"` - Status string `json:"status"` - ScheduledFor *time.Time `json:"scheduledFor"` - StartedAt *time.Time `json:"startedAt,omitempty"` - FinishedAt *time.Time `json:"finishedAt,omitempty"` - RequestedAt *time.Time `json:"requestedAt,omitempty"` - Log []taskmodel.Log `json:"log,omitempty"` -} - -func newRunResponse(r taskmodel.Run) runResponse { - run := httpRun{ - ID: r.ID, - TaskID: r.TaskID, - Status: r.Status, - Log: r.Log, - ScheduledFor: &r.ScheduledFor, - } - - if !r.StartedAt.IsZero() { - run.StartedAt = &r.StartedAt - } - if !r.FinishedAt.IsZero() { - run.FinishedAt = &r.FinishedAt - } - if !r.RequestedAt.IsZero() { - run.RequestedAt = &r.RequestedAt - } - - return runResponse{ - Links: map[string]string{ - "self": fmt.Sprintf("/api/v2/tasks/%s/runs/%s", r.TaskID, r.ID), - "task": fmt.Sprintf("/api/v2/tasks/%s", r.TaskID), - "logs": fmt.Sprintf("/api/v2/tasks/%s/runs/%s/logs", r.TaskID, r.ID), - "retry": fmt.Sprintf("/api/v2/tasks/%s/runs/%s/retry", r.TaskID, r.ID), - }, - httpRun: run, - } -} - -func convertRun(r httpRun) *taskmodel.Run { - run := &taskmodel.Run{ - ID: r.ID, - TaskID: r.TaskID, - Status: r.Status, - Log: r.Log, - } - - if r.StartedAt != nil { - run.StartedAt = *r.StartedAt - } - - if r.FinishedAt != nil { - run.FinishedAt = *r.FinishedAt - } - - if r.RequestedAt != nil { - run.RequestedAt = *r.RequestedAt - } - - if r.ScheduledFor != nil { - run.ScheduledFor = *r.ScheduledFor - } - - return run -} - -type runsResponse struct { - Links map[string]string `json:"links"` - Runs []*runResponse `json:"runs"` -} - -func newRunsResponse(rs []*taskmodel.Run, taskID platform.ID) runsResponse { - r := runsResponse{ - Links: map[string]string{ - "self": fmt.Sprintf("/api/v2/tasks/%s/runs", taskID), - "task": fmt.Sprintf("/api/v2/tasks/%s", taskID), - }, - Runs: make([]*runResponse, len(rs)), - } - - for i := range rs { - rs := newRunResponse(*rs[i]) - r.Runs[i] = &rs - } - return r -} - -func (h *TaskHandler) handleGetTasks(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeGetTasksRequest(ctx, r, h.OrganizationService) - if err != nil { - err = &errors2.Error{ - Err: err, - Code: errors2.EInvalid, - Msg: "failed to decode request", - } - h.HandleHTTPError(ctx, err, w) - return - } - - tasks, _, err := h.TaskService.FindTasks(ctx, req.filter) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Tasks retrieved", zap.String("tasks", fmt.Sprint(tasks))) - if err := encodeResponse(ctx, w, http.StatusOK, newTasksResponse(ctx, tasks, req.filter, h.LabelService)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type getTasksRequest struct { - filter taskmodel.TaskFilter -} - -func decodeGetTasksRequest(ctx context.Context, r *http.Request, orgs influxdb.OrganizationService) (*getTasksRequest, error) { - qp := r.URL.Query() - req := &getTasksRequest{} - - if after := qp.Get("after"); after != "" { - id, err := platform.IDFromString(after) - if err != nil { - return nil, err - } - req.filter.After = id - } - - if orgName := qp.Get("org"); orgName != "" { - o, err := orgs.FindOrganization(ctx, influxdb.OrganizationFilter{Name: &orgName}) - if err != nil { - if pErr, ok := err.(*errors2.Error); ok && pErr != nil { - if kv.IsNotFound(err) || pErr.Code == errors2.EUnauthorized { - return nil, &errors2.Error{ - Err: errors.New("org not found or unauthorized"), - Msg: "org " + orgName + " not found or unauthorized", - } - } - } - return nil, err - } - req.filter.Organization = o.Name - req.filter.OrganizationID = &o.ID - } - if oid := qp.Get("orgID"); oid != "" { - orgID, err := platform.IDFromString(oid) - if err != nil { - return nil, err - } - req.filter.OrganizationID = orgID - } - - if userID := qp.Get("user"); userID != "" { - id, err := platform.IDFromString(userID) - if err != nil { - return nil, err - } - req.filter.User = id - } - - if limit := qp.Get("limit"); limit != "" { - lim, err := strconv.Atoi(limit) - if err != nil { - return nil, err - } - if lim < 1 || lim > taskmodel.TaskMaxPageSize { - return nil, &errors2.Error{ - Code: errors2.EUnprocessableEntity, - Msg: fmt.Sprintf("limit must be between 1 and %d", taskmodel.TaskMaxPageSize), - } - } - req.filter.Limit = lim - } else { - req.filter.Limit = taskmodel.TaskDefaultPageSize - } - - if status := qp.Get("status"); status == "active" { - req.filter.Status = &status - } else if status := qp.Get("status"); status == "inactive" { - req.filter.Status = &status - } - - switch typ := qp.Get("type"); typ { - case "basic": - req.filter.Type = &taskmodel.TaskBasicType - case "system": - fallthrough - case "": - req.filter.Type = &taskmodel.TaskSystemType - default: - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: fmt.Sprintf("%q is not a valid task type", typ), - } - } - - if name := qp.Get("name"); name != "" { - req.filter.Name = &name - } - - return req, nil -} - -func (h *TaskHandler) handlePostTask(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePostTaskRequest(ctx, r) - if err != nil { - err = &errors2.Error{ - Err: err, - Code: errors2.EInvalid, - Msg: "failed to decode request", - } - h.HandleHTTPError(ctx, err, w) - return - } - - if err := h.populateTaskCreateOrg(ctx, &req.TaskCreate); err != nil { - err = &errors2.Error{ - Err: err, - Msg: "could not identify organization", - } - h.HandleHTTPError(ctx, err, w) - return - } - - if !req.TaskCreate.OrganizationID.Valid() { - err := &errors2.Error{ - Code: errors2.EInvalid, - Msg: "invalid organization id", - } - h.HandleHTTPError(ctx, err, w) - return - } - - task, err := h.TaskService.CreateTask(ctx, req.TaskCreate) - if err != nil { - if e, ok := err.(AuthzError); ok { - h.log.Error("Failed authentication", zap.Errors("error messages", []error{err, e.AuthzError()})) - } - - // if the error is not already a influxdb.error then make it into one - if _, ok := err.(*errors2.Error); !ok { - err = &errors2.Error{ - Err: err, - Code: errors2.EInternal, - Msg: "failed to create task", - } - } - - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusCreated, newTaskResponse(*task, []*influxdb.Label{})); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type postTaskRequest struct { - TaskCreate taskmodel.TaskCreate -} - -func decodePostTaskRequest(ctx context.Context, r *http.Request) (*postTaskRequest, error) { - var tc taskmodel.TaskCreate - if err := json.NewDecoder(r.Body).Decode(&tc); err != nil { - return nil, err - } - - // pull auth from ctx, populate OwnerID - auth, err := pcontext.GetAuthorizer(ctx) - if err != nil { - return nil, err - } - tc.OwnerID = auth.GetUserID() - // when creating a task we set the type so we can filter later. - tc.Type = taskmodel.TaskSystemType - - if err := tc.Validate(); err != nil { - return nil, err - } - - return &postTaskRequest{ - TaskCreate: tc, - }, nil -} - -func (h *TaskHandler) handleGetTask(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeGetTaskRequest(ctx, r) - if err != nil { - err = &errors2.Error{ - Err: err, - Code: errors2.EInvalid, - Msg: "failed to decode request", - } - h.HandleHTTPError(ctx, err, w) - return - } - - task, err := h.TaskService.FindTaskByID(ctx, req.TaskID) - if err != nil { - err = &errors2.Error{ - Err: err, - Code: errors2.ENotFound, - Msg: "failed to find task", - } - h.HandleHTTPError(ctx, err, w) - return - } - - labels, err := h.LabelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: task.ID, ResourceType: influxdb.TasksResourceType}) - if err != nil { - err = &errors2.Error{ - Err: err, - Msg: "failed to find resource labels", - } - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Task retrieved", zap.String("tasks", fmt.Sprint(task))) - if err := encodeResponse(ctx, w, http.StatusOK, newTaskResponse(*task, labels)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type getTaskRequest struct { - TaskID platform.ID -} - -func decodeGetTaskRequest(ctx context.Context, r *http.Request) (*getTaskRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - - req := &getTaskRequest{ - TaskID: i, - } - - return req, nil -} - -func (h *TaskHandler) handleUpdateTask(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeUpdateTaskRequest(ctx, r) - if err != nil { - err = &errors2.Error{ - Err: err, - Code: errors2.EInvalid, - Msg: "failed to decode request", - } - h.HandleHTTPError(ctx, err, w) - return - } - task, err := h.TaskService.UpdateTask(ctx, req.TaskID, req.Update) - if err != nil { - err := &errors2.Error{ - Err: err, - Msg: "failed to update task", - } - if err.Err == taskmodel.ErrTaskNotFound { - err.Code = errors2.ENotFound - } - h.HandleHTTPError(ctx, err, w) - return - } - - labels, err := h.LabelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: task.ID, ResourceType: influxdb.TasksResourceType}) - if err != nil { - err = &errors2.Error{ - Err: err, - Msg: "failed to find resource labels", - } - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Tasks updated", zap.String("task", fmt.Sprint(task))) - if err := encodeResponse(ctx, w, http.StatusOK, newTaskResponse(*task, labels)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type updateTaskRequest struct { - Update taskmodel.TaskUpdate - TaskID platform.ID -} - -func decodeUpdateTaskRequest(ctx context.Context, r *http.Request) (*updateTaskRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "you must provide a task ID", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - - var upd taskmodel.TaskUpdate - if err := json.NewDecoder(r.Body).Decode(&upd); err != nil { - return nil, err - } - - if err := upd.Validate(); err != nil { - return nil, err - } - - return &updateTaskRequest{ - Update: upd, - TaskID: i, - }, nil -} - -func (h *TaskHandler) handleDeleteTask(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeDeleteTaskRequest(ctx, r) - if err != nil { - err = &errors2.Error{ - Err: err, - Code: errors2.EInvalid, - Msg: "failed to decode request", - } - h.HandleHTTPError(ctx, err, w) - return - } - - if err := h.TaskService.DeleteTask(ctx, req.TaskID); err != nil { - err := &errors2.Error{ - Err: err, - Msg: "failed to delete task", - } - if err.Err == taskmodel.ErrTaskNotFound { - err.Code = errors2.ENotFound - } - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Tasks deleted", zap.String("taskID", fmt.Sprint(req.TaskID))) - w.WriteHeader(http.StatusNoContent) -} - -type deleteTaskRequest struct { - TaskID platform.ID -} - -func decodeDeleteTaskRequest(ctx context.Context, r *http.Request) (*deleteTaskRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "you must provide a task ID", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - - return &deleteTaskRequest{ - TaskID: i, - }, nil -} - -func (h *TaskHandler) handleGetLogs(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - req, err := decodeGetLogsRequest(ctx, r) - if err != nil { - err = &errors2.Error{ - Err: err, - Code: errors2.EInvalid, - Msg: "failed to decode request", - } - h.HandleHTTPError(ctx, err, w) - return - } - - auth, err := pcontext.GetAuthorizer(ctx) - if err != nil { - err = &errors2.Error{ - Err: err, - Code: errors2.EUnauthorized, - Msg: "failed to get authorizer", - } - h.HandleHTTPError(ctx, err, w) - return - } - - if k := auth.Kind(); k != influxdb.AuthorizationKind { - // Get the authorization for the task, if allowed. - authz, err := h.getAuthorizationForTask(ctx, auth, req.filter.Task) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - // We were able to access the authorizer for the task, so reassign that on the context for the rest of this call. - ctx = pcontext.SetAuthorizer(ctx, authz) - } - - logs, _, err := h.TaskService.FindLogs(ctx, req.filter) - if err != nil { - err := &errors2.Error{ - Err: err, - Msg: "failed to find task logs", - } - if err.Err == taskmodel.ErrTaskNotFound || err.Err == taskmodel.ErrNoRunsFound { - err.Code = errors2.ENotFound - } - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusOK, &getLogsResponse{Events: logs}); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type getLogsRequest struct { - filter taskmodel.LogFilter -} - -type getLogsResponse struct { - Events []*taskmodel.Log `json:"events"` -} - -func decodeGetLogsRequest(ctx context.Context, r *http.Request) (*getLogsRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "you must provide a task ID", - } - } - - req := &getLogsRequest{} - taskID, err := platform.IDFromString(id) - if err != nil { - return nil, err - } - req.filter.Task = *taskID - - if runID := params.ByName("rid"); runID != "" { - id, err := platform.IDFromString(runID) - if err != nil { - return nil, err - } - req.filter.Run = id - } - - return req, nil -} - -func (h *TaskHandler) handleGetRuns(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - req, err := decodeGetRunsRequest(ctx, r) - if err != nil { - err = &errors2.Error{ - Err: err, - Code: errors2.EInvalid, - Msg: "failed to decode request", - } - h.HandleHTTPError(ctx, err, w) - return - } - - auth, err := pcontext.GetAuthorizer(ctx) - if err != nil { - err = &errors2.Error{ - Err: err, - Code: errors2.EUnauthorized, - Msg: "failed to get authorizer", - } - h.HandleHTTPError(ctx, err, w) - return - } - - if k := auth.Kind(); k != influxdb.AuthorizationKind { - // Get the authorization for the task, if allowed. - authz, err := h.getAuthorizationForTask(ctx, auth, req.filter.Task) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - // We were able to access the authorizer for the task, so reassign that on the context for the rest of this call. - ctx = pcontext.SetAuthorizer(ctx, authz) - } - - runs, _, err := h.TaskService.FindRuns(ctx, req.filter) - if err != nil { - err := &errors2.Error{ - Err: err, - Msg: "failed to find runs", - } - if err.Err == taskmodel.ErrTaskNotFound || err.Err == taskmodel.ErrNoRunsFound { - err.Code = errors2.ENotFound - } - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusOK, newRunsResponse(runs, req.filter.Task)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type getRunsRequest struct { - filter taskmodel.RunFilter -} - -func decodeGetRunsRequest(ctx context.Context, r *http.Request) (*getRunsRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "you must provide a task ID", - } - } - - req := &getRunsRequest{} - taskID, err := platform.IDFromString(id) - if err != nil { - return nil, err - } - req.filter.Task = *taskID - - qp := r.URL.Query() - - if id := qp.Get("after"); id != "" { - afterID, err := platform.IDFromString(id) - if err != nil { - return nil, err - } - req.filter.After = afterID - } - - if limit := qp.Get("limit"); limit != "" { - i, err := strconv.Atoi(limit) - if err != nil { - return nil, err - } - - if i < 1 || i > taskmodel.TaskMaxPageSize { - return nil, taskmodel.ErrOutOfBoundsLimit - } - req.filter.Limit = i - } - - var at, bt string - var afterTime, beforeTime time.Time - if at = qp.Get("afterTime"); at != "" { - afterTime, err = time.Parse(time.RFC3339, at) - if err != nil { - return nil, err - } - req.filter.AfterTime = at - } - - if bt = qp.Get("beforeTime"); bt != "" { - beforeTime, err = time.Parse(time.RFC3339, bt) - if err != nil { - return nil, err - } - req.filter.BeforeTime = bt - } - - if at != "" && bt != "" && !beforeTime.After(afterTime) { - return nil, &errors2.Error{ - Code: errors2.EUnprocessableEntity, - Msg: "beforeTime must be later than afterTime", - } - } - - return req, nil -} - -func (h *TaskHandler) handleForceRun(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - req, err := decodeForceRunRequest(ctx, r) - if err != nil { - err = &errors2.Error{ - Err: err, - Code: errors2.EInvalid, - Msg: "failed to decode request", - } - h.HandleHTTPError(ctx, err, w) - return - } - - run, err := h.TaskService.ForceRun(ctx, req.TaskID, req.Timestamp) - if err != nil { - err := &errors2.Error{ - Err: err, - Msg: "failed to force run", - } - if err.Err == taskmodel.ErrTaskNotFound { - err.Code = errors2.ENotFound - } - h.HandleHTTPError(ctx, err, w) - return - } - if err := encodeResponse(ctx, w, http.StatusCreated, newRunResponse(*run)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type forceRunRequest struct { - TaskID platform.ID - Timestamp int64 -} - -func decodeForceRunRequest(ctx context.Context, r *http.Request) (forceRunRequest, error) { - params := httprouter.ParamsFromContext(ctx) - tid := params.ByName("id") - if tid == "" { - return forceRunRequest{}, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "you must provide a task ID", - } - } - - var ti platform.ID - if err := ti.DecodeFromString(tid); err != nil { - return forceRunRequest{}, err - } - - var req struct { - ScheduledFor string `json:"scheduledFor"` - } - - if r.ContentLength != 0 && r.ContentLength < 1000 { // prevent attempts to use up memory since r.Body should include at most one item (RunManually) - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return forceRunRequest{}, err - } - } - - var t time.Time - if req.ScheduledFor == "" { - t = time.Now() - } else { - var err error - t, err = time.Parse(time.RFC3339, req.ScheduledFor) - if err != nil { - return forceRunRequest{}, err - } - } - - return forceRunRequest{ - TaskID: ti, - Timestamp: t.Unix(), - }, nil -} - -func (h *TaskHandler) handleGetRun(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - req, err := decodeGetRunRequest(ctx, r) - if err != nil { - err = &errors2.Error{ - Err: err, - Code: errors2.EInvalid, - Msg: "failed to decode request", - } - h.HandleHTTPError(ctx, err, w) - return - } - - auth, err := pcontext.GetAuthorizer(ctx) - if err != nil { - err = &errors2.Error{ - Err: err, - Code: errors2.EUnauthorized, - Msg: "failed to get authorizer", - } - h.HandleHTTPError(ctx, err, w) - return - } - - if k := auth.Kind(); k != influxdb.AuthorizationKind { - // Get the authorization for the task, if allowed. - authz, err := h.getAuthorizationForTask(ctx, auth, req.TaskID) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - // We were able to access the authorizer for the task, so reassign that on the context for the rest of this call. - ctx = pcontext.SetAuthorizer(ctx, authz) - } - - run, err := h.TaskService.FindRunByID(ctx, req.TaskID, req.RunID) - if err != nil { - err := &errors2.Error{ - Err: err, - Msg: "failed to find run", - } - if err.Err == taskmodel.ErrTaskNotFound || err.Err == taskmodel.ErrRunNotFound { - err.Code = errors2.ENotFound - } - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusOK, newRunResponse(*run)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type getRunRequest struct { - TaskID platform.ID - RunID platform.ID -} - -func decodeGetRunRequest(ctx context.Context, r *http.Request) (*getRunRequest, error) { - params := httprouter.ParamsFromContext(ctx) - tid := params.ByName("id") - if tid == "" { - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "you must provide a task ID", - } - } - rid := params.ByName("rid") - if rid == "" { - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "you must provide a run ID", - } - } - - var ti, ri platform.ID - if err := ti.DecodeFromString(tid); err != nil { - return nil, err - } - if err := ri.DecodeFromString(rid); err != nil { - return nil, err - } - - return &getRunRequest{ - RunID: ri, - TaskID: ti, - }, nil -} - -type cancelRunRequest struct { - RunID platform.ID - TaskID platform.ID -} - -func decodeCancelRunRequest(ctx context.Context, r *http.Request) (*cancelRunRequest, error) { - params := httprouter.ParamsFromContext(ctx) - rid := params.ByName("rid") - if rid == "" { - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "you must provide a run ID", - } - } - tid := params.ByName("id") - if tid == "" { - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "you must provide a task ID", - } - } - - var i platform.ID - if err := i.DecodeFromString(rid); err != nil { - return nil, err - } - var t platform.ID - if err := t.DecodeFromString(tid); err != nil { - return nil, err - } - - return &cancelRunRequest{ - RunID: i, - TaskID: t, - }, nil -} - -func (h *TaskHandler) handleCancelRun(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - req, err := decodeCancelRunRequest(ctx, r) - if err != nil { - err = &errors2.Error{ - Err: err, - Code: errors2.EInvalid, - Msg: "failed to decode request", - } - h.HandleHTTPError(ctx, err, w) - return - } - - err = h.TaskService.CancelRun(ctx, req.TaskID, req.RunID) - if err != nil { - err := &errors2.Error{ - Err: err, - Msg: "failed to cancel run", - } - if err.Err == taskmodel.ErrTaskNotFound || err.Err == taskmodel.ErrRunNotFound { - err.Code = errors2.ENotFound - } - h.HandleHTTPError(ctx, err, w) - return - } -} - -func (h *TaskHandler) handleRetryRun(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - req, err := decodeRetryRunRequest(ctx, r) - if err != nil { - err = &errors2.Error{ - Err: err, - Code: errors2.EInvalid, - Msg: "failed to decode request", - } - h.HandleHTTPError(ctx, err, w) - return - } - - auth, err := pcontext.GetAuthorizer(ctx) - if err != nil { - err = &errors2.Error{ - Err: err, - Code: errors2.EUnauthorized, - Msg: "failed to get authorizer", - } - h.HandleHTTPError(ctx, err, w) - return - } - - if k := auth.Kind(); k != influxdb.AuthorizationKind { - // Get the authorization for the task, if allowed. - authz, err := h.getAuthorizationForTask(ctx, auth, req.TaskID) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - // We were able to access the authorizer for the task, so reassign that on the context for the rest of this call. - ctx = pcontext.SetAuthorizer(ctx, authz) - } - - run, err := h.TaskService.RetryRun(ctx, req.TaskID, req.RunID) - if err != nil { - err := &errors2.Error{ - Err: err, - Msg: "failed to retry run", - } - if err.Err == taskmodel.ErrTaskNotFound || err.Err == taskmodel.ErrRunNotFound { - err.Code = errors2.ENotFound - } - h.HandleHTTPError(ctx, err, w) - return - } - if err := encodeResponse(ctx, w, http.StatusOK, newRunResponse(*run)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type retryRunRequest struct { - RunID, TaskID platform.ID -} - -func decodeRetryRunRequest(ctx context.Context, r *http.Request) (*retryRunRequest, error) { - params := httprouter.ParamsFromContext(ctx) - tid := params.ByName("id") - if tid == "" { - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "you must provide a task ID", - } - } - rid := params.ByName("rid") - if rid == "" { - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "you must provide a run ID", - } - } - - var ti, ri platform.ID - if err := ti.DecodeFromString(tid); err != nil { - return nil, err - } - if err := ri.DecodeFromString(rid); err != nil { - return nil, err - } - - return &retryRunRequest{ - RunID: ri, - TaskID: ti, - }, nil -} - -func (h *TaskHandler) populateTaskCreateOrg(ctx context.Context, tc *taskmodel.TaskCreate) error { - if tc.OrganizationID.Valid() && tc.Organization != "" { - return nil - } - - if !tc.OrganizationID.Valid() && tc.Organization == "" { - return errors.New("missing orgID and organization name") - } - - if tc.OrganizationID.Valid() { - o, err := h.OrganizationService.FindOrganizationByID(ctx, tc.OrganizationID) - if err != nil { - return err - } - tc.Organization = o.Name - } else { - o, err := h.OrganizationService.FindOrganization(ctx, influxdb.OrganizationFilter{Name: &tc.Organization}) - if err != nil { - return err - } - tc.OrganizationID = o.ID - } - return nil -} - -// getAuthorizationForTask looks up the authorization associated with taskID, -// ensuring that the authorizer on ctx is allowed to view the task and the authorization. -// -// This method returns a *influxdb.Error, suitable for directly passing to h.HandleHTTPError. -func (h *TaskHandler) getAuthorizationForTask(ctx context.Context, auth influxdb.Authorizer, taskID platform.ID) (*influxdb.Authorization, *errors2.Error) { - sess, ok := auth.(*influxdb.Session) - if !ok { - return nil, &errors2.Error{ - Code: errors2.EUnauthorized, - Msg: "unable to authorize session", - } - } - // First look up the task, if we're allowed. - // This assumes h.TaskService validates access. - t, err := h.TaskService.FindTaskByID(ctx, taskID) - if err != nil { - return nil, &errors2.Error{ - Err: err, - Code: errors2.EUnauthorized, - Msg: "task ID unknown or unauthorized", - } - } - - return sess.EphemeralAuth(t.OrganizationID), nil -} - -// TaskService connects to Influx via HTTP using tokens to manage tasks. -type TaskService struct { - Client *httpc.Client -} - -// FindTaskByID returns a single task -func (t TaskService) FindTaskByID(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var tr taskResponse - err := t.Client.Get(taskIDPath(id)).DecodeJSON(&tr).Do(ctx) - if err != nil { - return nil, err - } - - return convertTask(tr.Task), nil -} - -// FindTasks returns a list of tasks that match a filter (limit 100) and the total count -// of matching tasks. -func (t TaskService) FindTasks(ctx context.Context, filter taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - // slice of 2-capacity string slices for storing parameter key-value pairs - var params [][2]string - - if filter.After != nil { - params = append(params, [2]string{"after", filter.After.String()}) - } - if filter.OrganizationID != nil { - params = append(params, [2]string{"orgID", filter.OrganizationID.String()}) - } - if filter.Organization != "" { - params = append(params, [2]string{"org", filter.Organization}) - } - if filter.User != nil { - params = append(params, [2]string{"user", filter.User.String()}) - } - if filter.Limit != 0 { - params = append(params, [2]string{"limit", strconv.Itoa(filter.Limit)}) - } - - if filter.Status != nil { - params = append(params, [2]string{"status", *filter.Status}) - } - - if filter.Type != nil { - params = append(params, [2]string{"type", *filter.Type}) - } - - var tr tasksResponse - err := t.Client. - Get(prefixTasks). - QueryParams(params...). - DecodeJSON(&tr). - Do(ctx) - if err != nil { - return nil, 0, err - } - - tasks := make([]*taskmodel.Task, len(tr.Tasks)) - for i := range tr.Tasks { - tasks[i] = convertTask(tr.Tasks[i].Task) - } - return tasks, len(tasks), nil -} - -// CreateTask creates a new task. -func (t TaskService) CreateTask(ctx context.Context, tc taskmodel.TaskCreate) (*taskmodel.Task, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - var tr taskResponse - - err := t.Client. - PostJSON(tc, prefixTasks). - DecodeJSON(&tr). - Do(ctx) - if err != nil { - return nil, err - } - - return convertTask(tr.Task), nil -} - -// UpdateTask updates a single task with changeset. -func (t TaskService) UpdateTask(ctx context.Context, id platform.ID, upd taskmodel.TaskUpdate) (*taskmodel.Task, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var tr taskResponse - err := t.Client. - PatchJSON(&upd, taskIDPath(id)). - Do(ctx) - if err != nil { - return nil, err - } - - return convertTask(tr.Task), nil -} - -// DeleteTask removes a task by ID and purges all associated data and scheduled runs. -func (t TaskService) DeleteTask(ctx context.Context, id platform.ID) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - return t.Client. - Delete(taskIDPath(id)). - Do(ctx) -} - -// FindLogs returns logs for a run. -func (t TaskService) FindLogs(ctx context.Context, filter taskmodel.LogFilter) ([]*taskmodel.Log, int, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if !filter.Task.Valid() { - return nil, 0, errors.New("task ID required") - } - - var urlPath string - if filter.Run == nil { - urlPath = path.Join(taskIDPath(filter.Task), "logs") - } else { - urlPath = path.Join(taskIDRunIDPath(filter.Task, *filter.Run), "logs") - } - - var logs getLogsResponse - err := t.Client. - Get(urlPath). - DecodeJSON(&logs). - Do(ctx) - - if err != nil { - return nil, 0, err - } - - return logs.Events, len(logs.Events), nil -} - -// FindRuns returns a list of runs that match a filter and the total count of returned runs. -func (t TaskService) FindRuns(ctx context.Context, filter taskmodel.RunFilter) ([]*taskmodel.Run, int, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var params [][2]string - - if !filter.Task.Valid() { - return nil, 0, errors.New("task ID required") - } - - if filter.After != nil { - params = append(params, [2]string{"after", filter.After.String()}) - } - - if filter.Limit < 0 || filter.Limit > taskmodel.TaskMaxPageSize { - return nil, 0, taskmodel.ErrOutOfBoundsLimit - } - - params = append(params, [2]string{"limit", strconv.Itoa(filter.Limit)}) - - var rs runsResponse - err := t.Client. - Get(taskIDRunsPath(filter.Task)). - QueryParams(params...). - DecodeJSON(&rs). - Do(ctx) - if err != nil { - return nil, 0, err - } - - runs := make([]*taskmodel.Run, len(rs.Runs)) - for i := range rs.Runs { - runs[i] = convertRun(rs.Runs[i].httpRun) - } - - return runs, len(runs), nil -} - -// FindRunByID returns a single run of a specific task. -func (t TaskService) FindRunByID(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var rs = &runResponse{} - err := t.Client. - Get(taskIDRunIDPath(taskID, runID)). - DecodeJSON(rs). - Do(ctx) - - if err != nil { - if errors2.ErrorCode(err) == errors2.ENotFound { - // ErrRunNotFound is expected as part of the FindRunByID contract, - // so return that actual error instead of a different error that looks like it. - // TODO cleanup backend error implementation - return nil, taskmodel.ErrRunNotFound - } - - return nil, err - } - - return convertRun(rs.httpRun), nil -} - -// RetryRun creates and returns a new run (which is a retry of another run). -func (t TaskService) RetryRun(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var rs runResponse - err := t.Client. - Post(nil, path.Join(taskIDRunIDPath(taskID, runID), "retry")). - DecodeJSON(&rs). - Do(ctx) - - if err != nil { - if errors2.ErrorCode(err) == errors2.ENotFound { - // ErrRunNotFound is expected as part of the RetryRun contract, - // so return that actual error instead of a different error that looks like it. - // TODO cleanup backend task error implementation - return nil, taskmodel.ErrRunNotFound - } - // RequestStillQueuedError is also part of the contract. - if e := taskmodel.ParseRequestStillQueuedError(err.Error()); e != nil { - return nil, *e - } - - return nil, err - } - - return convertRun(rs.httpRun), nil -} - -// ForceRun starts a run manually right now. -func (t TaskService) ForceRun(ctx context.Context, taskID platform.ID, scheduledFor int64) (*taskmodel.Run, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - type body struct { - scheduledFor string - } - b := body{scheduledFor: time.Unix(scheduledFor, 0).UTC().Format(time.RFC3339)} - - rs := &runResponse{} - err := t.Client. - PostJSON(b, taskIDRunsPath(taskID)). - DecodeJSON(&rs). - Do(ctx) - - if err != nil { - if errors2.ErrorCode(err) == errors2.ENotFound { - // ErrRunNotFound is expected as part of the RetryRun contract, - // so return that actual error instead of a different error that looks like it. - return nil, taskmodel.ErrRunNotFound - } - - // RequestStillQueuedError is also part of the contract. - if e := taskmodel.ParseRequestStillQueuedError(err.Error()); e != nil { - return nil, *e - } - - return nil, err - } - - return convertRun(rs.httpRun), nil -} - -func cancelPath(taskID, runID platform.ID) string { - return path.Join(taskID.String(), runID.String()) -} - -// CancelRun stops a longer running run. -func (t TaskService) CancelRun(ctx context.Context, taskID, runID platform.ID) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - err := t.Client. - Delete(cancelPath(taskID, runID)). - Do(ctx) - - if err != nil { - return err - } - - return nil -} - -func taskIDPath(id platform.ID) string { - return path.Join(prefixTasks, id.String()) -} - -func taskIDRunsPath(id platform.ID) string { - return path.Join(prefixTasks, id.String(), "runs") -} - -func taskIDRunIDPath(taskID, runID platform.ID) string { - return path.Join(prefixTasks, taskID.String(), "runs", runID.String()) -} diff --git a/http/task_service_test.go b/http/task_service_test.go deleted file mode 100644 index e896d60f5ba..00000000000 --- a/http/task_service_test.go +++ /dev/null @@ -1,1764 +0,0 @@ -package http - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorization" - pcontext "github.com/influxdata/influxdb/v2/context" - _ "github.com/influxdata/influxdb/v2/fluxinit/static" - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/label" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "github.com/influxdata/influxdb/v2/tenant" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" -) - -// NewMockTaskBackend returns a TaskBackend with mock services. -func NewMockTaskBackend(t *testing.T) *TaskBackend { - t.Helper() - store := influxdbtesting.NewTestInmemStore(t) - tenantService := tenant.NewService(tenant.NewStore(store)) - - return &TaskBackend{ - log: zaptest.NewLogger(t).With(zap.String("handler", "task")), - - AlgoWProxy: &NoopProxyHandler{}, - AuthorizationService: mock.NewAuthorizationService(), - TaskService: &mock.TaskService{}, - OrganizationService: &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ID: id, Name: "test"}, nil - }, - FindOrganizationF: func(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { - org := &influxdb.Organization{} - if filter.Name != nil { - if *filter.Name == "non-existent-org" { - return nil, &errors2.Error{ - Err: errors.New("org not found or unauthorized"), - Msg: "org " + *filter.Name + " not found or unauthorized", - Code: errors2.ENotFound, - } - } - org.Name = *filter.Name - } - if filter.ID != nil { - org.ID = *filter.ID - } - - return org, nil - }, - }, - UserResourceMappingService: tenantService, - LabelService: mock.NewLabelService(), - UserService: mock.NewUserService(), - } -} - -func TestTaskHandler_handleGetTasks(t *testing.T) { - type fields struct { - taskService taskmodel.TaskService - labelService influxdb.LabelService - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - getParams string - fields fields - wants wants - }{ - { - name: "get tasks", - fields: fields{ - taskService: &mock.TaskService{ - FindTasksFn: func(ctx context.Context, f taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { - tasks := []*taskmodel.Task{ - { - ID: 1, - Name: "task1", - Description: "A little Task", - OrganizationID: 1, - OwnerID: 1, - Organization: "test", - }, - { - ID: 2, - Name: "task2", - OrganizationID: 2, - OwnerID: 2, - Organization: "test", - }, - } - return tasks, len(tasks), nil - }, - }, - labelService: &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - labels := []*influxdb.Label{ - { - ID: influxdbtesting.MustIDBase16("fc3dc670a4be9b9a"), - Name: "label", - Properties: map[string]string{ - "color": "fff000", - }, - }, - } - return labels, nil - }, - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/tasks?limit=100" - }, - "tasks": [ - { - "links": { - "self": "/api/v2/tasks/0000000000000001", - "owners": "/api/v2/tasks/0000000000000001/owners", - "members": "/api/v2/tasks/0000000000000001/members", - "labels": "/api/v2/tasks/0000000000000001/labels", - "runs": "/api/v2/tasks/0000000000000001/runs", - "logs": "/api/v2/tasks/0000000000000001/logs" - }, - "id": "0000000000000001", - "name": "task1", - "description": "A little Task", - "labels": [ - { - "id": "fc3dc670a4be9b9a", - "name": "label", - "properties": { - "color": "fff000" - } - } - ], - "orgID": "0000000000000001", - "ownerID": "0000000000000001", - "org": "test", - "status": "", - "flux": "" - }, - { - "links": { - "self": "/api/v2/tasks/0000000000000002", - "owners": "/api/v2/tasks/0000000000000002/owners", - "members": "/api/v2/tasks/0000000000000002/members", - "labels": "/api/v2/tasks/0000000000000002/labels", - "runs": "/api/v2/tasks/0000000000000002/runs", - "logs": "/api/v2/tasks/0000000000000002/logs" - }, - "id": "0000000000000002", - "name": "task2", - "labels": [ - { - "id": "fc3dc670a4be9b9a", - "name": "label", - "properties": { - "color": "fff000" - } - } - ], - "orgID": "0000000000000002", - "ownerID": "0000000000000002", - "org": "test", - "status": "", - "flux": "" - } - ] -}`, - }, - }, - { - name: "get tasks by after and limit", - getParams: "after=0000000000000001&limit=1", - fields: fields{ - taskService: &mock.TaskService{ - FindTasksFn: func(ctx context.Context, f taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { - tasks := []*taskmodel.Task{ - { - ID: 2, - Name: "task2", - OrganizationID: 2, - OwnerID: 2, - Organization: "test", - }, - } - return tasks, len(tasks), nil - }, - }, - labelService: &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - labels := []*influxdb.Label{ - { - ID: influxdbtesting.MustIDBase16("fc3dc670a4be9b9a"), - Name: "label", - Properties: map[string]string{ - "color": "fff000", - }, - }, - } - return labels, nil - }, - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/tasks?after=0000000000000001&limit=1", - "next": "/api/v2/tasks?after=0000000000000002&limit=1" - }, - "tasks": [ - { - "links": { - "self": "/api/v2/tasks/0000000000000002", - "owners": "/api/v2/tasks/0000000000000002/owners", - "members": "/api/v2/tasks/0000000000000002/members", - "labels": "/api/v2/tasks/0000000000000002/labels", - "runs": "/api/v2/tasks/0000000000000002/runs", - "logs": "/api/v2/tasks/0000000000000002/logs" - }, - "id": "0000000000000002", - "name": "task2", - "labels": [ - { - "id": "fc3dc670a4be9b9a", - "name": "label", - "properties": { - "color": "fff000" - } - } - ], - "orgID": "0000000000000002", - "ownerID": "0000000000000002", - "org": "test", - "status": "", - "flux": "" - } - ] -}`, - }, - }, - { - name: "get tasks by org name", - getParams: "org=test2", - fields: fields{ - taskService: &mock.TaskService{ - FindTasksFn: func(ctx context.Context, f taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { - tasks := []*taskmodel.Task{ - { - ID: 2, - Name: "task2", - OrganizationID: 2, - OwnerID: 2, - Organization: "test2", - }, - } - return tasks, len(tasks), nil - }, - }, - labelService: &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - labels := []*influxdb.Label{ - { - ID: influxdbtesting.MustIDBase16("fc3dc670a4be9b9a"), - Name: "label", - Properties: map[string]string{ - "color": "fff000", - }, - }, - } - return labels, nil - }, - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/tasks?limit=100&org=test2" - }, - "tasks": [ - { - "links": { - "self": "/api/v2/tasks/0000000000000002", - "owners": "/api/v2/tasks/0000000000000002/owners", - "members": "/api/v2/tasks/0000000000000002/members", - "labels": "/api/v2/tasks/0000000000000002/labels", - "runs": "/api/v2/tasks/0000000000000002/runs", - "logs": "/api/v2/tasks/0000000000000002/logs" - }, - "id": "0000000000000002", - "name": "task2", - "labels": [ - { - "id": "fc3dc670a4be9b9a", - "name": "label", - "properties": { - "color": "fff000" - } - } - ], - "orgID": "0000000000000002", - "ownerID": "0000000000000002", - "org": "test2", - "status": "", - "flux": "" - } - ] -}`, - }, - }, - { - name: "get tasks by org name bad", - getParams: "org=non-existent-org", - fields: fields{ - taskService: &mock.TaskService{ - FindTasksFn: func(ctx context.Context, f taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { - tasks := []*taskmodel.Task{ - { - ID: 1, - Name: "task1", - OrganizationID: 1, - OwnerID: 1, - Organization: "test2", - }, - { - ID: 2, - Name: "task2", - OrganizationID: 2, - OwnerID: 2, - Organization: "test2", - }, - } - return tasks, len(tasks), nil - }, - }, - labelService: &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - labels := []*influxdb.Label{ - { - ID: influxdbtesting.MustIDBase16("fc3dc670a4be9b9a"), - Name: "label", - Properties: map[string]string{ - "color": "fff000", - }, - }, - } - return labels, nil - }, - }, - }, - wants: wants{ - statusCode: http.StatusBadRequest, - contentType: "application/json; charset=utf-8", - body: `{ -"code": "invalid", -"message": "failed to decode request: org non-existent-org not found or unauthorized: org not found or unauthorized" -}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := httptest.NewRequest("GET", "http://any.url?"+tt.getParams, nil) - w := httptest.NewRecorder() - - taskBackend := NewMockTaskBackend(t) - taskBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - taskBackend.TaskService = tt.fields.taskService - taskBackend.LabelService = tt.fields.labelService - h := NewTaskHandler(zaptest.NewLogger(t), taskBackend) - h.handleGetTasks(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetTasks() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetTasks() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(tt.wants.body, string(body)); err != nil { - t.Errorf("%q, handleGetTasks(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handleGetTasks() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestTaskHandler_handlePostTasks(t *testing.T) { - type args struct { - taskCreate taskmodel.TaskCreate - } - type fields struct { - taskService taskmodel.TaskService - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - args args - fields fields - wants wants - }{ - { - name: "create task", - args: args{ - taskCreate: taskmodel.TaskCreate{ - OrganizationID: 1, - Flux: "abc", - }, - }, - fields: fields{ - taskService: &mock.TaskService{ - CreateTaskFn: func(ctx context.Context, tc taskmodel.TaskCreate) (*taskmodel.Task, error) { - return &taskmodel.Task{ - ID: 1, - Name: "task1", - Description: "Brand New Task", - OrganizationID: 1, - OwnerID: 1, - Organization: "test", - Flux: "abc", - }, nil - }, - }, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/tasks/0000000000000001", - "owners": "/api/v2/tasks/0000000000000001/owners", - "members": "/api/v2/tasks/0000000000000001/members", - "labels": "/api/v2/tasks/0000000000000001/labels", - "runs": "/api/v2/tasks/0000000000000001/runs", - "logs": "/api/v2/tasks/0000000000000001/logs" - }, - "id": "0000000000000001", - "name": "task1", - "description": "Brand New Task", - "labels": [], - "orgID": "0000000000000001", - "ownerID": "0000000000000001", - "org": "test", - "status": "", - "flux": "abc" -} -`, - }, - }, - { - name: "create task - influxdb error creating task", - args: args{ - taskCreate: taskmodel.TaskCreate{ - OrganizationID: 1, - Flux: "abc", - }, - }, - fields: fields{ - taskService: &mock.TaskService{ - CreateTaskFn: func(ctx context.Context, tc taskmodel.TaskCreate) (*taskmodel.Task, error) { - return nil, errors2.NewError( - errors2.WithErrorErr(errors.New("something went wrong")), - errors2.WithErrorMsg("something really went wrong"), - errors2.WithErrorCode(errors2.EInvalid), - ) - }, - }, - }, - wants: wants{ - statusCode: http.StatusBadRequest, - contentType: "application/json; charset=utf-8", - body: ` -{ - "code": "invalid", - "message": "something really went wrong: something went wrong" -} -`, - }, - }, - { - name: "create task - error creating task", - args: args{ - taskCreate: taskmodel.TaskCreate{ - OrganizationID: 1, - Flux: "abc", - }, - }, - fields: fields{ - taskService: &mock.TaskService{ - CreateTaskFn: func(ctx context.Context, tc taskmodel.TaskCreate) (*taskmodel.Task, error) { - return nil, errors.New("something bad happened") - }, - }, - }, - wants: wants{ - statusCode: http.StatusInternalServerError, - contentType: "application/json; charset=utf-8", - body: ` -{ - "code": "internal error", - "message": "failed to create task: something bad happened" -} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - b, err := json.Marshal(tt.args.taskCreate) - if err != nil { - t.Fatalf("failed to unmarshal task: %v", err) - } - - r := httptest.NewRequest("POST", "http://any.url", bytes.NewReader(b)) - ctx := pcontext.SetAuthorizer(context.TODO(), new(influxdb.Authorization)) - r = r.WithContext(ctx) - - w := httptest.NewRecorder() - - taskBackend := NewMockTaskBackend(t) - taskBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - taskBackend.TaskService = tt.fields.taskService - h := NewTaskHandler(zaptest.NewLogger(t), taskBackend) - h.handlePostTask(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePostTask() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePostTask() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(tt.wants.body, string(body)); err != nil { - t.Errorf("%q, handlePostTask(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handlePostTask() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestTaskHandler_handleGetRun(t *testing.T) { - type fields struct { - taskService taskmodel.TaskService - } - type args struct { - taskID platform.ID - runID platform.ID - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get a run by id", - fields: fields{ - taskService: &mock.TaskService{ - FindRunByIDFn: func(ctx context.Context, taskID platform.ID, runID platform.ID) (*taskmodel.Run, error) { - scheduledFor, _ := time.Parse(time.RFC3339, "2018-12-01T17:00:13Z") - startedAt, _ := time.Parse(time.RFC3339Nano, "2018-12-01T17:00:03.155645Z") - finishedAt, _ := time.Parse(time.RFC3339Nano, "2018-12-01T17:00:13.155645Z") - requestedAt, _ := time.Parse(time.RFC3339, "2018-12-01T17:00:13Z") - run := taskmodel.Run{ - ID: runID, - TaskID: taskID, - Status: "success", - ScheduledFor: scheduledFor, - StartedAt: startedAt, - FinishedAt: finishedAt, - RequestedAt: requestedAt, - } - return &run, nil - }, - }, - }, - args: args{ - taskID: 1, - runID: 2, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/tasks/0000000000000001/runs/0000000000000002", - "task": "/api/v2/tasks/0000000000000001", - "retry": "/api/v2/tasks/0000000000000001/runs/0000000000000002/retry", - "logs": "/api/v2/tasks/0000000000000001/runs/0000000000000002/logs" - }, - "id": "0000000000000002", - "taskID": "0000000000000001", - "status": "success", - "scheduledFor": "2018-12-01T17:00:13Z", - "startedAt": "2018-12-01T17:00:03.155645Z", - "finishedAt": "2018-12-01T17:00:13.155645Z", - "requestedAt": "2018-12-01T17:00:13Z" -}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := httptest.NewRequest("GET", "http://any.url", nil) - r = r.WithContext(context.WithValue( - context.Background(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.taskID.String(), - }, - { - Key: "rid", - Value: tt.args.runID.String(), - }, - })) - r = r.WithContext(pcontext.SetAuthorizer(r.Context(), &influxdb.Authorization{Permissions: influxdb.OperPermissions()})) - w := httptest.NewRecorder() - taskBackend := NewMockTaskBackend(t) - taskBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - taskBackend.TaskService = tt.fields.taskService - h := NewTaskHandler(zaptest.NewLogger(t), taskBackend) - h.handleGetRun(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetRun() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetRun() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleGetRun(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handleGetRun() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestTaskHandler_handleGetRuns(t *testing.T) { - type fields struct { - taskService taskmodel.TaskService - } - type args struct { - taskID platform.ID - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get runs by task id", - fields: fields{ - taskService: &mock.TaskService{ - FindRunsFn: func(ctx context.Context, f taskmodel.RunFilter) ([]*taskmodel.Run, int, error) { - scheduledFor, _ := time.Parse(time.RFC3339, "2018-12-01T17:00:13Z") - startedAt, _ := time.Parse(time.RFC3339Nano, "2018-12-01T17:00:03.155645Z") - finishedAt, _ := time.Parse(time.RFC3339Nano, "2018-12-01T17:00:13.155645Z") - requestedAt, _ := time.Parse(time.RFC3339, "2018-12-01T17:00:13Z") - runs := []*taskmodel.Run{ - { - ID: platform.ID(2), - TaskID: f.Task, - Status: "success", - ScheduledFor: scheduledFor, - StartedAt: startedAt, - FinishedAt: finishedAt, - RequestedAt: requestedAt, - }, - } - return runs, len(runs), nil - }, - }, - }, - args: args{ - taskID: 1, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/tasks/0000000000000001/runs", - "task": "/api/v2/tasks/0000000000000001" - }, - "runs": [ - { - "links": { - "self": "/api/v2/tasks/0000000000000001/runs/0000000000000002", - "task": "/api/v2/tasks/0000000000000001", - "retry": "/api/v2/tasks/0000000000000001/runs/0000000000000002/retry", - "logs": "/api/v2/tasks/0000000000000001/runs/0000000000000002/logs" - }, - "id": "0000000000000002", - "taskID": "0000000000000001", - "status": "success", - "scheduledFor": "2018-12-01T17:00:13Z", - "startedAt": "2018-12-01T17:00:03.155645Z", - "finishedAt": "2018-12-01T17:00:13.155645Z", - "requestedAt": "2018-12-01T17:00:13Z" - } - ] -}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := httptest.NewRequest("GET", "http://any.url", nil) - r = r.WithContext(context.WithValue( - context.Background(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.taskID.String(), - }, - })) - r = r.WithContext(pcontext.SetAuthorizer(r.Context(), &influxdb.Authorization{Permissions: influxdb.OperPermissions()})) - w := httptest.NewRecorder() - taskBackend := NewMockTaskBackend(t) - taskBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - taskBackend.TaskService = tt.fields.taskService - h := NewTaskHandler(zaptest.NewLogger(t), taskBackend) - h.handleGetRuns(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetRuns() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetRuns() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleGetRuns(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handleGetRuns() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestTaskHandler_NotFoundStatus(t *testing.T) { - // Ensure that the HTTP handlers return 404s for missing resources, and OKs for matching. - - store := influxdbtesting.NewTestInmemStore(t) - tenantService := tenant.NewService(tenant.NewStore(store)) - - labelStore, _ := label.NewStore(store) - labelService := label.NewService(labelStore) - - taskBackend := NewMockTaskBackend(t) - taskBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - h := NewTaskHandler(zaptest.NewLogger(t), taskBackend) - h.UserResourceMappingService = tenantService - h.LabelService = labelService - h.UserService = tenantService - h.OrganizationService = tenantService - - o := influxdb.Organization{Name: "o"} - ctx := context.Background() - if err := h.OrganizationService.CreateOrganization(ctx, &o); err != nil { - t.Fatal(err) - } - - // Create a session to associate with the contexts, so authorization checks pass. - authz := &influxdb.Authorization{Permissions: influxdb.OperPermissions()} - - const taskID, runID = platform.ID(0xCCCCCC), platform.ID(0xAAAAAA) - - var ( - okTask = []interface{}{taskID} - okTaskRun = []interface{}{taskID, runID} - - notFoundTask = [][]interface{}{ - {taskID + 1}, - } - notFoundTaskRun = [][]interface{}{ - {taskID, runID + 1}, - {taskID + 1, runID}, - {taskID + 1, runID + 1}, - } - ) - - tcs := []struct { - name string - svc *mock.TaskService - method string - body string - pathFmt string - okPathArgs []interface{} - notFoundPathArgs [][]interface{} - }{ - { - name: "get task", - svc: &mock.TaskService{ - FindTaskByIDFn: func(_ context.Context, id platform.ID) (*taskmodel.Task, error) { - if id == taskID { - return &taskmodel.Task{ID: taskID, Organization: "o"}, nil - } - - return nil, taskmodel.ErrTaskNotFound - }, - }, - method: http.MethodGet, - pathFmt: "/tasks/%s", - okPathArgs: okTask, - notFoundPathArgs: notFoundTask, - }, - { - name: "update task", - svc: &mock.TaskService{ - UpdateTaskFn: func(_ context.Context, id platform.ID, _ taskmodel.TaskUpdate) (*taskmodel.Task, error) { - if id == taskID { - return &taskmodel.Task{ID: taskID, Organization: "o"}, nil - } - - return nil, taskmodel.ErrTaskNotFound - }, - }, - method: http.MethodPatch, - body: `{"status": "active"}`, - pathFmt: "/tasks/%s", - okPathArgs: okTask, - notFoundPathArgs: notFoundTask, - }, - { - name: "delete task", - svc: &mock.TaskService{ - DeleteTaskFn: func(_ context.Context, id platform.ID) error { - if id == taskID { - return nil - } - - return taskmodel.ErrTaskNotFound - }, - }, - method: http.MethodDelete, - pathFmt: "/tasks/%s", - okPathArgs: okTask, - notFoundPathArgs: notFoundTask, - }, - { - name: "get task logs", - svc: &mock.TaskService{ - FindLogsFn: func(_ context.Context, f taskmodel.LogFilter) ([]*taskmodel.Log, int, error) { - if f.Task == taskID { - return nil, 0, nil - } - - return nil, 0, taskmodel.ErrTaskNotFound - }, - }, - method: http.MethodGet, - pathFmt: "/tasks/%s/logs", - okPathArgs: okTask, - notFoundPathArgs: notFoundTask, - }, - { - name: "get run logs", - svc: &mock.TaskService{ - FindLogsFn: func(_ context.Context, f taskmodel.LogFilter) ([]*taskmodel.Log, int, error) { - if f.Task != taskID { - return nil, 0, taskmodel.ErrTaskNotFound - } - if *f.Run != runID { - return nil, 0, taskmodel.ErrNoRunsFound - } - - return nil, 0, nil - }, - }, - method: http.MethodGet, - pathFmt: "/tasks/%s/runs/%s/logs", - okPathArgs: okTaskRun, - notFoundPathArgs: notFoundTaskRun, - }, - { - name: "get runs: task not found", - svc: &mock.TaskService{ - FindRunsFn: func(_ context.Context, f taskmodel.RunFilter) ([]*taskmodel.Run, int, error) { - if f.Task != taskID { - return nil, 0, taskmodel.ErrTaskNotFound - } - - return nil, 0, nil - }, - }, - method: http.MethodGet, - pathFmt: "/tasks/%s/runs", - okPathArgs: okTask, - notFoundPathArgs: notFoundTask, - }, - { - name: "get runs: task found but no runs found", - svc: &mock.TaskService{ - FindRunsFn: func(_ context.Context, f taskmodel.RunFilter) ([]*taskmodel.Run, int, error) { - if f.Task != taskID { - return nil, 0, taskmodel.ErrNoRunsFound - } - - return nil, 0, nil - }, - }, - method: http.MethodGet, - pathFmt: "/tasks/%s/runs", - okPathArgs: okTask, - notFoundPathArgs: notFoundTask, - }, - { - name: "force run", - svc: &mock.TaskService{ - ForceRunFn: func(_ context.Context, tid platform.ID, _ int64) (*taskmodel.Run, error) { - if tid != taskID { - return nil, taskmodel.ErrTaskNotFound - } - - return &taskmodel.Run{ID: runID, TaskID: taskID, Status: taskmodel.RunScheduled.String()}, nil - }, - }, - method: http.MethodPost, - body: "{}", - pathFmt: "/tasks/%s/runs", - okPathArgs: okTask, - notFoundPathArgs: notFoundTask, - }, - { - name: "get run", - svc: &mock.TaskService{ - FindRunByIDFn: func(_ context.Context, tid, rid platform.ID) (*taskmodel.Run, error) { - if tid != taskID { - return nil, taskmodel.ErrTaskNotFound - } - if rid != runID { - return nil, taskmodel.ErrRunNotFound - } - - return &taskmodel.Run{ID: runID, TaskID: taskID, Status: taskmodel.RunScheduled.String()}, nil - }, - }, - method: http.MethodGet, - pathFmt: "/tasks/%s/runs/%s", - okPathArgs: okTaskRun, - notFoundPathArgs: notFoundTaskRun, - }, - { - name: "retry run", - svc: &mock.TaskService{ - RetryRunFn: func(_ context.Context, tid, rid platform.ID) (*taskmodel.Run, error) { - if tid != taskID { - return nil, taskmodel.ErrTaskNotFound - } - if rid != runID { - return nil, taskmodel.ErrRunNotFound - } - - return &taskmodel.Run{ID: runID, TaskID: taskID, Status: taskmodel.RunScheduled.String()}, nil - }, - }, - method: http.MethodPost, - pathFmt: "/tasks/%s/runs/%s/retry", - okPathArgs: okTaskRun, - notFoundPathArgs: notFoundTaskRun, - }, - { - name: "cancel run", - svc: &mock.TaskService{ - CancelRunFn: func(_ context.Context, tid, rid platform.ID) error { - if tid != taskID { - return taskmodel.ErrTaskNotFound - } - if rid != runID { - return taskmodel.ErrRunNotFound - } - - return nil - }, - }, - method: http.MethodDelete, - pathFmt: "/tasks/%s/runs/%s", - okPathArgs: okTaskRun, - notFoundPathArgs: notFoundTaskRun, - }, - } - - for _, tc := range tcs { - tc := tc - t.Run(tc.name, func(t *testing.T) { - h.TaskService = tc.svc - - okPath := fmt.Sprintf(tc.pathFmt, tc.okPathArgs...) - t.Run("matching ID: "+tc.method+" "+okPath, func(t *testing.T) { - w := httptest.NewRecorder() - r := httptest.NewRequest(tc.method, "http://task.example/api/v2"+okPath, strings.NewReader(tc.body)).WithContext( - pcontext.SetAuthorizer(context.Background(), authz), - ) - - h.ServeHTTP(w, r) - - res := w.Result() - defer res.Body.Close() - - if res.StatusCode < 200 || res.StatusCode > 299 { - t.Errorf("expected OK, got %d", res.StatusCode) - b, _ := io.ReadAll(res.Body) - t.Fatalf("body: %s", string(b)) - } - }) - - t.Run("mismatched ID", func(t *testing.T) { - for _, nfa := range tc.notFoundPathArgs { - path := fmt.Sprintf(tc.pathFmt, nfa...) - t.Run(tc.method+" "+path, func(t *testing.T) { - w := httptest.NewRecorder() - r := httptest.NewRequest(tc.method, "http://task.example/api/v2"+path, strings.NewReader(tc.body)).WithContext( - pcontext.SetAuthorizer(context.Background(), authz), - ) - - h.ServeHTTP(w, r) - - res := w.Result() - defer res.Body.Close() - - if res.StatusCode != http.StatusNotFound { - t.Errorf("expected Not Found, got %d", res.StatusCode) - b, _ := io.ReadAll(res.Body) - t.Fatalf("body: %s", string(b)) - } - }) - } - }) - }) - } -} - -func TestService_handlePostTaskLabel(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - labelMapping *influxdb.LabelMapping - taskID platform.ID - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "add label to task", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - Name: "label", - Properties: map[string]string{ - "color": "fff000", - }, - }, nil - }, - CreateLabelMappingFn: func(ctx context.Context, m *influxdb.LabelMapping) error { return nil }, - }, - }, - args: args{ - labelMapping: &influxdb.LabelMapping{ - ResourceID: 100, - LabelID: 1, - }, - taskID: 100, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` -{ - "label": { - "id": "0000000000000001", - "name": "label", - "properties": { - "color": "fff000" - } - }, - "links": { - "self": "/api/v2/labels/0000000000000001" - } -} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - taskBE := NewMockTaskBackend(t) - taskBE.LabelService = tt.fields.LabelService - h := NewTaskHandler(zaptest.NewLogger(t), taskBE) - - b, err := json.Marshal(tt.args.labelMapping) - if err != nil { - t.Fatalf("failed to unmarshal label mapping: %v", err) - } - - url := fmt.Sprintf("http://localhost:8086/api/v2/tasks/%s/labels", tt.args.taskID) - r := httptest.NewRequest("POST", url, bytes.NewReader(b)) - w := httptest.NewRecorder() - - h.ServeHTTP(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("got %v, want %v", res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("got %v, want %v", content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handlePostTaskLabel(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handlePostTaskLabel() = ***%s***", tt.name, diff) - } - } - }) - } -} - -// Test that org name to org ID translation happens properly in the HTTP layer. -// Regression test for https://github.com/influxdata/influxdb/issues/12089. -func TestTaskHandler_CreateTaskWithOrgName(t *testing.T) { - i := influxdbtesting.NewTestInmemStore(t) - - ts := tenant.NewService(tenant.NewStore(i)) - aStore, _ := authorization.NewStore(i) - as := authorization.NewService(aStore, ts) - ctx := context.Background() - - // Set up user and org. - u := &influxdb.User{Name: "u"} - if err := ts.CreateUser(ctx, u); err != nil { - t.Fatal(err) - } - o := &influxdb.Organization{Name: "o"} - if err := ts.CreateOrganization(ctx, o); err != nil { - t.Fatal(err) - } - - // Source and destination buckets for use in task. - bSrc := influxdb.Bucket{OrgID: o.ID, Name: "b-src"} - if err := ts.CreateBucket(ctx, &bSrc); err != nil { - t.Fatal(err) - } - bDst := influxdb.Bucket{OrgID: o.ID, Name: "b-dst"} - if err := ts.CreateBucket(ctx, &bDst); err != nil { - t.Fatal(err) - } - - authz := influxdb.Authorization{OrgID: o.ID, UserID: u.ID, Permissions: influxdb.OperPermissions()} - if err := as.CreateAuthorization(ctx, &authz); err != nil { - t.Fatal(err) - } - - taskSvc := &mock.TaskService{ - CreateTaskFn: func(_ context.Context, tc taskmodel.TaskCreate) (*taskmodel.Task, error) { - if tc.OrganizationID != o.ID { - t.Fatalf("expected task to be created with org ID %s, got %s", o.ID, tc.OrganizationID) - } - - return &taskmodel.Task{ID: 9, OrganizationID: o.ID, OwnerID: o.ID, Name: "x", Flux: tc.Flux}, nil - }, - } - - lStore, _ := label.NewStore(i) - h := NewTaskHandler(zaptest.NewLogger(t), &TaskBackend{ - log: zaptest.NewLogger(t), - - TaskService: taskSvc, - AuthorizationService: as, - OrganizationService: ts, - UserResourceMappingService: ts, - LabelService: label.NewService(lStore), - UserService: ts, - BucketService: ts, - }) - - const script = `option task = {name:"x", every:1m} from(bucket:"b-src") |> range(start:-1m) |> to(bucket:"b-dst", org:"o")` - - url := "http://localhost:8086/api/v2/tasks" - - b, err := json.Marshal(taskmodel.TaskCreate{ - Flux: script, - Organization: o.Name, - }) - if err != nil { - t.Fatal(err) - } - - r := httptest.NewRequest("POST", url, bytes.NewReader(b)).WithContext( - pcontext.SetAuthorizer(ctx, &authz), - ) - w := httptest.NewRecorder() - - h.handlePostTask(w, r) - - res := w.Result() - defer res.Body.Close() - body, err := io.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if res.StatusCode != http.StatusCreated { - t.Logf("response body: %s", body) - t.Fatalf("expected status created, got %v", res.StatusCode) - } - - // The task should have been created with a valid token. - var createdTask taskmodel.Task - if err := json.Unmarshal([]byte(body), &createdTask); err != nil { - t.Fatal(err) - } - if createdTask.Flux != script { - t.Fatalf("Unexpected script returned:\n got: %s\nwant: %s", createdTask.Flux, script) - } -} - -func TestTaskHandler_Sessions(t *testing.T) { - t.Skip("rework these") - // Common setup to get a working base for using tasks. - st := influxdbtesting.NewTestInmemStore(t) - - tStore := tenant.NewStore(st) - tSvc := tenant.NewService(tStore) - - labelStore, err := label.NewStore(st) - if err != nil { - t.Fatal(err) - } - labelService := label.NewService(labelStore) - - authStore, err := authorization.NewStore(st) - if err != nil { - t.Fatal(err) - } - authService := authorization.NewService(authStore, tSvc) - - ctx := context.Background() - - // Set up user and org. - u := &influxdb.User{Name: "u"} - if err := tSvc.CreateUser(ctx, u); err != nil { - t.Fatal(err) - } - o := &influxdb.Organization{Name: "o"} - if err := tSvc.CreateOrganization(ctx, o); err != nil { - t.Fatal(err) - } - - // Map user to org. - if err := tSvc.CreateUserResourceMapping(ctx, &influxdb.UserResourceMapping{ - ResourceType: influxdb.OrgsResourceType, - ResourceID: o.ID, - UserID: u.ID, - UserType: influxdb.Owner, - }); err != nil { - t.Fatal(err) - } - - // Source and destination buckets for use in task. - bSrc := influxdb.Bucket{OrgID: o.ID, Name: "b-src"} - if err := tSvc.CreateBucket(ctx, &bSrc); err != nil { - t.Fatal(err) - } - bDst := influxdb.Bucket{OrgID: o.ID, Name: "b-dst"} - if err := tSvc.CreateBucket(ctx, &bDst); err != nil { - t.Fatal(err) - } - - sessionAllPermsCtx := pcontext.SetAuthorizer(context.Background(), &influxdb.Session{ - UserID: u.ID, - Permissions: influxdb.OperPermissions(), - ExpiresAt: time.Now().Add(24 * time.Hour), - }) - - newHandler := func(t *testing.T, ts *mock.TaskService) *TaskHandler { - return NewTaskHandler(zaptest.NewLogger(t), &TaskBackend{ - HTTPErrorHandler: kithttp.NewErrorHandler(zaptest.NewLogger(t)), - log: zaptest.NewLogger(t), - - TaskService: ts, - AuthorizationService: authService, - OrganizationService: tSvc, - UserResourceMappingService: tSvc, - LabelService: labelService, - UserService: tSvc, - BucketService: tSvc, - }) - } - - t.Run("get runs for a task", func(t *testing.T) { - // Unique authorization to associate with our fake task. - taskAuth := &influxdb.Authorization{OrgID: o.ID, UserID: u.ID} - if err := authService.CreateAuthorization(ctx, taskAuth); err != nil { - t.Fatal(err) - } - - const taskID = platform.ID(12345) - const runID = platform.ID(9876) - - var findRunsCtx context.Context - ts := &mock.TaskService{ - FindRunsFn: func(ctx context.Context, f taskmodel.RunFilter) ([]*taskmodel.Run, int, error) { - findRunsCtx = ctx - if f.Task != taskID { - t.Fatalf("expected task ID %v, got %v", taskID, f.Task) - } - - return []*taskmodel.Run{ - {ID: runID, TaskID: taskID}, - }, 1, nil - }, - - FindTaskByIDFn: func(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - if id != taskID { - return nil, taskmodel.ErrTaskNotFound - } - - return &taskmodel.Task{ - ID: taskID, - OrganizationID: o.ID, - }, nil - }, - } - - h := newHandler(t, ts) - url := fmt.Sprintf("http://localhost:8086/api/v2/tasks/%s/runs", taskID) - valCtx := context.WithValue(sessionAllPermsCtx, httprouter.ParamsKey, httprouter.Params{{Key: "id", Value: taskID.String()}}) - r := httptest.NewRequest("GET", url, nil).WithContext(valCtx) - w := httptest.NewRecorder() - h.handleGetRuns(w, r) - - res := w.Result() - body, err := io.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if res.StatusCode != http.StatusOK { - t.Logf("response body: %s", body) - t.Fatalf("expected status OK, got %v", res.StatusCode) - } - - authr, err := pcontext.GetAuthorizer(findRunsCtx) - if err != nil { - t.Fatal(err) - } - if authr.Kind() != influxdb.AuthorizationKind { - t.Fatalf("expected context's authorizer to be of kind %q, got %q", influxdb.AuthorizationKind, authr.Kind()) - } - - orgID := authr.(*influxdb.Authorization).OrgID - - if orgID != o.ID { - t.Fatalf("expected context's authorizer org ID to be %v, got %v", o.ID, orgID) - } - - // Other user without permissions on the task or authorization should be disallowed. - otherUser := &influxdb.User{Name: "other-" + t.Name()} - if err := tSvc.CreateUser(ctx, otherUser); err != nil { - t.Fatal(err) - } - - valCtx = pcontext.SetAuthorizer(valCtx, &influxdb.Session{ - UserID: otherUser.ID, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) - - r = httptest.NewRequest("GET", url, nil).WithContext(valCtx) - w = httptest.NewRecorder() - h.handleGetRuns(w, r) - - res = w.Result() - body, err = io.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if res.StatusCode != http.StatusUnauthorized { - t.Logf("response body: %s", body) - t.Fatalf("expected status unauthorized, got %v", res.StatusCode) - } - }) - - t.Run("get single run for a task", func(t *testing.T) { - // Unique authorization to associate with our fake task. - taskAuth := &influxdb.Authorization{OrgID: o.ID, UserID: u.ID} - if err := authService.CreateAuthorization(ctx, taskAuth); err != nil { - t.Fatal(err) - } - - const taskID = platform.ID(12345) - const runID = platform.ID(9876) - - var findRunByIDCtx context.Context - ts := &mock.TaskService{ - FindRunByIDFn: func(ctx context.Context, tid, rid platform.ID) (*taskmodel.Run, error) { - findRunByIDCtx = ctx - if tid != taskID { - t.Fatalf("expected task ID %v, got %v", taskID, tid) - } - if rid != runID { - t.Fatalf("expected run ID %v, got %v", runID, rid) - } - - return &taskmodel.Run{ID: runID, TaskID: taskID}, nil - }, - - FindTaskByIDFn: func(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - if id != taskID { - return nil, taskmodel.ErrTaskNotFound - } - - return &taskmodel.Task{ - ID: taskID, - OrganizationID: o.ID, - }, nil - }, - } - - h := newHandler(t, ts) - url := fmt.Sprintf("http://localhost:8086/api/v2/tasks/%s/runs/%s", taskID, runID) - valCtx := context.WithValue(sessionAllPermsCtx, httprouter.ParamsKey, httprouter.Params{ - {Key: "id", Value: taskID.String()}, - {Key: "rid", Value: runID.String()}, - }) - r := httptest.NewRequest("GET", url, nil).WithContext(valCtx) - w := httptest.NewRecorder() - h.handleGetRun(w, r) - - res := w.Result() - body, err := io.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if res.StatusCode != http.StatusOK { - t.Logf("response body: %s", body) - t.Fatalf("expected status OK, got %v", res.StatusCode) - } - - // The context passed to TaskService.FindRunByID must be a valid authorization (not a session). - authr, err := pcontext.GetAuthorizer(findRunByIDCtx) - if err != nil { - t.Fatal(err) - } - if authr.Kind() != influxdb.AuthorizationKind { - t.Fatalf("expected context's authorizer to be of kind %q, got %q", influxdb.AuthorizationKind, authr.Kind()) - } - if authr.Identifier() != taskAuth.ID { - t.Fatalf("expected context's authorizer ID to be %v, got %v", taskAuth.ID, authr.Identifier()) - } - - // Other user without permissions on the task or authorization should be disallowed. - otherUser := &influxdb.User{Name: "other-" + t.Name()} - if err := tSvc.CreateUser(ctx, otherUser); err != nil { - t.Fatal(err) - } - - valCtx = pcontext.SetAuthorizer(valCtx, &influxdb.Session{ - UserID: otherUser.ID, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) - - r = httptest.NewRequest("GET", url, nil).WithContext(valCtx) - w = httptest.NewRecorder() - h.handleGetRuns(w, r) - - res = w.Result() - body, err = io.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if res.StatusCode != http.StatusUnauthorized { - t.Logf("response body: %s", body) - t.Fatalf("expected status unauthorized, got %v", res.StatusCode) - } - }) - - t.Run("get logs for a run", func(t *testing.T) { - // Unique authorization to associate with our fake task. - taskAuth := &influxdb.Authorization{OrgID: o.ID, UserID: u.ID} - if err := authService.CreateAuthorization(ctx, taskAuth); err != nil { - t.Fatal(err) - } - - const taskID = platform.ID(12345) - const runID = platform.ID(9876) - - var findLogsCtx context.Context - ts := &mock.TaskService{ - FindLogsFn: func(ctx context.Context, f taskmodel.LogFilter) ([]*taskmodel.Log, int, error) { - findLogsCtx = ctx - if f.Task != taskID { - t.Fatalf("expected task ID %v, got %v", taskID, f.Task) - } - if *f.Run != runID { - t.Fatalf("expected run ID %v, got %v", runID, *f.Run) - } - - line := taskmodel.Log{Time: "time", Message: "a log line"} - return []*taskmodel.Log{&line}, 1, nil - }, - - FindTaskByIDFn: func(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - if id != taskID { - return nil, taskmodel.ErrTaskNotFound - } - - return &taskmodel.Task{ - ID: taskID, - OrganizationID: o.ID, - }, nil - }, - } - - h := newHandler(t, ts) - url := fmt.Sprintf("http://localhost:8086/api/v2/tasks/%s/runs/%s/logs", taskID, runID) - valCtx := context.WithValue(sessionAllPermsCtx, httprouter.ParamsKey, httprouter.Params{ - {Key: "id", Value: taskID.String()}, - {Key: "rid", Value: runID.String()}, - }) - r := httptest.NewRequest("GET", url, nil).WithContext(valCtx) - w := httptest.NewRecorder() - h.handleGetLogs(w, r) - - res := w.Result() - body, err := io.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if res.StatusCode != http.StatusOK { - t.Logf("response body: %s", body) - t.Fatalf("expected status OK, got %v", res.StatusCode) - } - - // The context passed to TaskService.FindLogs must be a valid authorization (not a session). - authr, err := pcontext.GetAuthorizer(findLogsCtx) - if err != nil { - t.Fatal(err) - } - if authr.Kind() != influxdb.AuthorizationKind { - t.Fatalf("expected context's authorizer to be of kind %q, got %q", influxdb.AuthorizationKind, authr.Kind()) - } - if authr.Identifier() != taskAuth.ID { - t.Fatalf("expected context's authorizer ID to be %v, got %v", taskAuth.ID, authr.Identifier()) - } - - // Other user without permissions on the task or authorization should be disallowed. - otherUser := &influxdb.User{Name: "other-" + t.Name()} - if err := tSvc.CreateUser(ctx, otherUser); err != nil { - t.Fatal(err) - } - - valCtx = pcontext.SetAuthorizer(valCtx, &influxdb.Session{ - UserID: otherUser.ID, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) - - r = httptest.NewRequest("GET", url, nil).WithContext(valCtx) - w = httptest.NewRecorder() - h.handleGetRuns(w, r) - - res = w.Result() - body, err = io.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if res.StatusCode != http.StatusUnauthorized { - t.Logf("response body: %s", body) - t.Fatalf("expected status unauthorized, got %v", res.StatusCode) - } - }) - - t.Run("retry a run", func(t *testing.T) { - // Unique authorization to associate with our fake task. - taskAuth := &influxdb.Authorization{OrgID: o.ID, UserID: u.ID} - if err := authService.CreateAuthorization(ctx, taskAuth); err != nil { - t.Fatal(err) - } - - const taskID = platform.ID(12345) - const runID = platform.ID(9876) - - var retryRunCtx context.Context - ts := &mock.TaskService{ - RetryRunFn: func(ctx context.Context, tid, rid platform.ID) (*taskmodel.Run, error) { - retryRunCtx = ctx - if tid != taskID { - t.Fatalf("expected task ID %v, got %v", taskID, tid) - } - if rid != runID { - t.Fatalf("expected run ID %v, got %v", runID, rid) - } - - return &taskmodel.Run{ID: 10 * runID, TaskID: taskID}, nil - }, - - FindTaskByIDFn: func(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - if id != taskID { - return nil, taskmodel.ErrTaskNotFound - } - - return &taskmodel.Task{ - ID: taskID, - OrganizationID: o.ID, - }, nil - }, - } - - h := newHandler(t, ts) - url := fmt.Sprintf("http://localhost:8086/api/v2/tasks/%s/runs/%s/retry", taskID, runID) - valCtx := context.WithValue(sessionAllPermsCtx, httprouter.ParamsKey, httprouter.Params{ - {Key: "id", Value: taskID.String()}, - {Key: "rid", Value: runID.String()}, - }) - r := httptest.NewRequest("POST", url, nil).WithContext(valCtx) - w := httptest.NewRecorder() - h.handleRetryRun(w, r) - - res := w.Result() - body, err := io.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if res.StatusCode != http.StatusOK { - t.Logf("response body: %s", body) - t.Fatalf("expected status OK, got %v", res.StatusCode) - } - - // The context passed to TaskService.RetryRun must be a valid authorization (not a session). - authr, err := pcontext.GetAuthorizer(retryRunCtx) - if err != nil { - t.Fatal(err) - } - if authr.Kind() != influxdb.AuthorizationKind { - t.Fatalf("expected context's authorizer to be of kind %q, got %q", influxdb.AuthorizationKind, authr.Kind()) - } - if authr.Identifier() != taskAuth.ID { - t.Fatalf("expected context's authorizer ID to be %v, got %v", taskAuth.ID, authr.Identifier()) - } - - // Other user without permissions on the task or authorization should be disallowed. - otherUser := &influxdb.User{Name: "other-" + t.Name()} - if err := tSvc.CreateUser(ctx, otherUser); err != nil { - t.Fatal(err) - } - - valCtx = pcontext.SetAuthorizer(valCtx, &influxdb.Session{ - UserID: otherUser.ID, - ExpiresAt: time.Now().Add(24 * time.Hour), - }) - - r = httptest.NewRequest("POST", url, nil).WithContext(valCtx) - w = httptest.NewRecorder() - h.handleGetRuns(w, r) - - res = w.Result() - body, err = io.ReadAll(res.Body) - if err != nil { - t.Fatal(err) - } - if res.StatusCode != http.StatusUnauthorized { - t.Logf("response body: %s", body) - t.Fatalf("expected status unauthorized, got %v", res.StatusCode) - } - }) -} diff --git a/http/telegraf.go b/http/telegraf.go deleted file mode 100644 index ab0243dcc9f..00000000000 --- a/http/telegraf.go +++ /dev/null @@ -1,496 +0,0 @@ -package http - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "strings" - - "github.com/golang/gddo/httputil" - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - pctx "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/pkg/httpc" - "github.com/influxdata/influxdb/v2/telegraf/plugins" - "go.uber.org/zap" -) - -// TelegrafBackend is all services and associated parameters required to construct -// the TelegrafHandler. -type TelegrafBackend struct { - errors.HTTPErrorHandler - log *zap.Logger - - TelegrafService influxdb.TelegrafConfigStore - UserResourceMappingService influxdb.UserResourceMappingService - LabelService influxdb.LabelService - UserService influxdb.UserService - OrganizationService influxdb.OrganizationService -} - -// NewTelegrafBackend returns a new instance of TelegrafBackend. -func NewTelegrafBackend(log *zap.Logger, b *APIBackend) *TelegrafBackend { - return &TelegrafBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - - TelegrafService: b.TelegrafService, - UserResourceMappingService: b.UserResourceMappingService, - LabelService: b.LabelService, - UserService: b.UserService, - OrganizationService: b.OrganizationService, - } -} - -// TelegrafHandler is the handler for the telegraf service -type TelegrafHandler struct { - *httprouter.Router - errors.HTTPErrorHandler - log *zap.Logger - - TelegrafService influxdb.TelegrafConfigStore - UserResourceMappingService influxdb.UserResourceMappingService - LabelService influxdb.LabelService - UserService influxdb.UserService - OrganizationService influxdb.OrganizationService -} - -const ( - prefixTelegraf = "/api/v2/telegrafs" - telegrafsIDPath = "/api/v2/telegrafs/:id" - telegrafsIDMembersPath = "/api/v2/telegrafs/:id/members" - telegrafsIDMembersIDPath = "/api/v2/telegrafs/:id/members/:userID" - telegrafsIDOwnersPath = "/api/v2/telegrafs/:id/owners" - telegrafsIDOwnersIDPath = "/api/v2/telegrafs/:id/owners/:userID" - telegrafsIDLabelsPath = "/api/v2/telegrafs/:id/labels" - telegrafsIDLabelsIDPath = "/api/v2/telegrafs/:id/labels/:lid" - - prefixTelegrafPlugins = "/api/v2/telegraf" - telegrafPluginsPath = "/api/v2/telegraf/plugins" -) - -// NewTelegrafHandler returns a new instance of TelegrafHandler. -func NewTelegrafHandler(log *zap.Logger, b *TelegrafBackend) *TelegrafHandler { - h := &TelegrafHandler{ - Router: NewRouter(b.HTTPErrorHandler), - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - - TelegrafService: b.TelegrafService, - UserResourceMappingService: b.UserResourceMappingService, - LabelService: b.LabelService, - UserService: b.UserService, - OrganizationService: b.OrganizationService, - } - h.HandlerFunc("POST", prefixTelegraf, h.handlePostTelegraf) - h.HandlerFunc("GET", prefixTelegraf, h.handleGetTelegrafs) - h.HandlerFunc("GET", telegrafsIDPath, h.handleGetTelegraf) - h.HandlerFunc("DELETE", telegrafsIDPath, h.handleDeleteTelegraf) - h.HandlerFunc("PUT", telegrafsIDPath, h.handlePutTelegraf) - - h.HandlerFunc("GET", telegrafPluginsPath, h.handleGetTelegrafPlugins) - - memberBackend := MemberBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log.With(zap.String("handler", "member")), - ResourceType: influxdb.TelegrafsResourceType, - UserType: influxdb.Member, - UserResourceMappingService: b.UserResourceMappingService, - UserService: b.UserService, - } - h.HandlerFunc("POST", telegrafsIDMembersPath, newPostMemberHandler(memberBackend)) - h.HandlerFunc("GET", telegrafsIDMembersPath, newGetMembersHandler(memberBackend)) - h.HandlerFunc("DELETE", telegrafsIDMembersIDPath, newDeleteMemberHandler(memberBackend)) - - ownerBackend := MemberBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log.With(zap.String("handler", "member")), - ResourceType: influxdb.TelegrafsResourceType, - UserType: influxdb.Owner, - UserResourceMappingService: b.UserResourceMappingService, - UserService: b.UserService, - } - h.HandlerFunc("POST", telegrafsIDOwnersPath, newPostMemberHandler(ownerBackend)) - h.HandlerFunc("GET", telegrafsIDOwnersPath, newGetMembersHandler(ownerBackend)) - h.HandlerFunc("DELETE", telegrafsIDOwnersIDPath, newDeleteMemberHandler(ownerBackend)) - - labelBackend := &LabelBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log.With(zap.String("handler", "label")), - LabelService: b.LabelService, - ResourceType: influxdb.TelegrafsResourceType, - } - h.HandlerFunc("GET", telegrafsIDLabelsPath, newGetLabelsHandler(labelBackend)) - h.HandlerFunc("POST", telegrafsIDLabelsPath, newPostLabelHandler(labelBackend)) - h.HandlerFunc("DELETE", telegrafsIDLabelsIDPath, newDeleteLabelHandler(labelBackend)) - - return h -} - -type telegrafLinks struct { - Self string `json:"self"` - Labels string `json:"labels"` - Members string `json:"members"` - Owners string `json:"owners"` -} - -type telegrafResponse struct { - *influxdb.TelegrafConfig - Labels []influxdb.Label `json:"labels"` - Links telegrafLinks `json:"links"` -} - -type telegrafResponses struct { - TelegrafConfigs []*telegrafResponse `json:"configurations"` -} - -func getTelegrafPlugins(t string) (*plugins.TelegrafPlugins, error) { - if len(t) == 0 { - return plugins.AvailablePlugins() - } - - return plugins.ListAvailablePlugins(t) -} - -func (h *TelegrafHandler) handleGetTelegrafPlugins(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - telPlugins, err := getTelegrafPlugins(r.URL.Query().Get("type")) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusOK, telPlugins); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -func newTelegrafResponse(tc *influxdb.TelegrafConfig, labels []*influxdb.Label) *telegrafResponse { - res := &telegrafResponse{ - TelegrafConfig: tc, - Links: telegrafLinks{ - Self: fmt.Sprintf("/api/v2/telegrafs/%s", tc.ID), - Labels: fmt.Sprintf("/api/v2/telegrafs/%s/labels", tc.ID), - Members: fmt.Sprintf("/api/v2/telegrafs/%s/members", tc.ID), - Owners: fmt.Sprintf("/api/v2/telegrafs/%s/owners", tc.ID), - }, - Labels: []influxdb.Label{}, - } - - for _, l := range labels { - res.Labels = append(res.Labels, *l) - } - - return res -} - -func newTelegrafResponses(ctx context.Context, tcs []*influxdb.TelegrafConfig, labelService influxdb.LabelService) *telegrafResponses { - resp := &telegrafResponses{ - TelegrafConfigs: make([]*telegrafResponse, len(tcs)), - } - for i, c := range tcs { - labels, _ := labelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: c.ID, ResourceType: influxdb.TelegrafsResourceType}) - resp.TelegrafConfigs[i] = newTelegrafResponse(c, labels) - } - return resp -} - -func decodeGetTelegrafRequest(ctx context.Context) (i platform.ID, err error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return i, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - if err := i.DecodeFromString(id); err != nil { - return i, err - } - return i, nil -} - -func (h *TelegrafHandler) handleGetTelegrafs(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - filter, err := decodeTelegrafConfigFilter(ctx, r) - if err != nil { - h.log.Debug("Failed to decode request", zap.Error(err)) - h.HandleHTTPError(ctx, err, w) - return - } - tcs, _, err := h.TelegrafService.FindTelegrafConfigs(ctx, *filter) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Telegrafs retrieved", zap.String("telegrafs", fmt.Sprint(tcs))) - - if err := encodeResponse(ctx, w, http.StatusOK, newTelegrafResponses(ctx, tcs, h.LabelService)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -func (h *TelegrafHandler) handleGetTelegraf(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id, err := decodeGetTelegrafRequest(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - tc, err := h.TelegrafService.FindTelegrafConfigByID(ctx, id) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Telegraf retrieved", zap.String("telegraf", fmt.Sprint(tc))) - - offers := []string{"application/toml", "application/json", "application/octet-stream"} - defaultOffer := "application/toml" - mimeType := httputil.NegotiateContentType(r, offers, defaultOffer) - switch mimeType { - case "application/octet-stream": - w.Header().Set("Content-Type", "application/octet-stream") - w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s.toml\"", strings.Replace(strings.TrimSpace(tc.Name), " ", "_", -1))) - w.WriteHeader(http.StatusOK) - w.Write([]byte(tc.Config)) - case "application/json": - labels, err := h.LabelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: tc.ID, ResourceType: influxdb.TelegrafsResourceType}) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := encodeResponse(ctx, w, http.StatusOK, newTelegrafResponse(tc, labels)); err != nil { - logEncodingError(h.log, r, err) - return - } - case "application/toml": - w.Header().Set("Content-Type", "application/toml; charset=utf-8") - w.WriteHeader(http.StatusOK) - w.Write([]byte(tc.Config)) - } -} - -func decodeTelegrafConfigFilter(ctx context.Context, r *http.Request) (*influxdb.TelegrafConfigFilter, error) { - f := &influxdb.TelegrafConfigFilter{} - q := r.URL.Query() - - if orgIDStr := q.Get("orgID"); orgIDStr != "" { - orgID, err := platform.IDFromString(orgIDStr) - if err != nil { - return f, &errors.Error{ - Code: errors.EInvalid, - Msg: "orgID is invalid", - Err: err, - } - } - f.OrgID = orgID - } else if orgNameStr := q.Get("org"); orgNameStr != "" { - f.Organization = &orgNameStr - } - - return f, nil -} - -// handlePostTelegraf is the HTTP handler for the POST /api/v2/telegrafs route. -func (h *TelegrafHandler) handlePostTelegraf(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - tc := new(influxdb.TelegrafConfig) - if err := json.NewDecoder(r.Body).Decode(tc); err != nil { - h.log.Debug("Failed to decode request", zap.Error(err)) - h.HandleHTTPError(ctx, err, w) - return - } - - auth, err := pctx.GetAuthorizer(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err := h.TelegrafService.CreateTelegrafConfig(ctx, tc, auth.GetUserID()); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Telegraf created", zap.String("telegraf", fmt.Sprint(tc))) - - if err := encodeResponse(ctx, w, http.StatusCreated, newTelegrafResponse(tc, []*influxdb.Label{})); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -func decodePutTelegrafRequest(ctx context.Context, r *http.Request) (*influxdb.TelegrafConfig, error) { - tc := new(influxdb.TelegrafConfig) - if err := json.NewDecoder(r.Body).Decode(tc); err != nil { - return nil, err - } - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - i := new(platform.ID) - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - tc.ID = *i - return tc, nil -} - -// handlePutTelegraf is the HTTP handler for the POST /api/v2/telegrafs route. -func (h *TelegrafHandler) handlePutTelegraf(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - tc, err := decodePutTelegrafRequest(ctx, r) - if err != nil { - h.log.Debug("Failed to decode request", zap.Error(err)) - h.HandleHTTPError(ctx, err, w) - return - } - - auth, err := pctx.GetAuthorizer(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - tc, err = h.TelegrafService.UpdateTelegrafConfig(ctx, tc.ID, tc, auth.GetUserID()) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - labels, err := h.LabelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: tc.ID, ResourceType: influxdb.TelegrafsResourceType}) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Telegraf updated", zap.String("telegraf", fmt.Sprint(tc))) - - if err := encodeResponse(ctx, w, http.StatusOK, newTelegrafResponse(tc, labels)); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -func (h *TelegrafHandler) handleDeleteTelegraf(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - i, err := decodeGetTelegrafRequest(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - if err = h.TelegrafService.DeleteTelegrafConfig(ctx, i); err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Telegraf deleted", zap.String("telegrafID", fmt.Sprint(i))) - - w.WriteHeader(http.StatusNoContent) -} - -// TelegrafService is an http client that speaks to the telegraf service via HTTP. -type TelegrafService struct { - client *httpc.Client - *UserResourceMappingService -} - -// NewTelegrafService is a constructor for a telegraf service. -func NewTelegrafService(httpClient *httpc.Client) *TelegrafService { - return &TelegrafService{ - client: httpClient, - UserResourceMappingService: &UserResourceMappingService{ - Client: httpClient, - }, - } -} - -var _ influxdb.TelegrafConfigStore = (*TelegrafService)(nil) - -// FindTelegrafConfigByID returns a single telegraf config by ID. -func (s *TelegrafService) FindTelegrafConfigByID(ctx context.Context, id platform.ID) (*influxdb.TelegrafConfig, error) { - var cfg influxdb.TelegrafConfig - err := s.client. - Get(prefixTelegraf, id.String()). - Header("Accept", "application/json"). - DecodeJSON(&cfg). - Do(ctx) - if err != nil { - return nil, err - } - return &cfg, nil -} - -// FindTelegrafConfigs returns a list of telegraf configs that match filter and the total count of matching telegraf configs. -// Additional options provide pagination & sorting. -func (s *TelegrafService) FindTelegrafConfigs(ctx context.Context, f influxdb.TelegrafConfigFilter, opt ...influxdb.FindOptions) ([]*influxdb.TelegrafConfig, int, error) { - params := influxdb.FindOptionParams(opt...) - if f.OrgID != nil { - params = append(params, [2]string{"orgID", f.OrgID.String()}) - } - if f.Organization != nil { - params = append(params, [2]string{"organization", *f.Organization}) - } - - var resp struct { - Configs []*influxdb.TelegrafConfig `json:"configurations"` - } - err := s.client. - Get(prefixTelegraf). - QueryParams(params...). - DecodeJSON(&resp). - Do(ctx) - if err != nil { - return nil, 0, err - } - - return resp.Configs, len(resp.Configs), nil -} - -// CreateTelegrafConfig creates a new telegraf config and sets b.ID with the new identifier. -func (s *TelegrafService) CreateTelegrafConfig(ctx context.Context, tc *influxdb.TelegrafConfig, userID platform.ID) error { - var teleResp influxdb.TelegrafConfig - err := s.client. - PostJSON(tc, prefixTelegraf). - DecodeJSON(&teleResp). - Do(ctx) - if err != nil { - return err - } - *tc = teleResp - return nil -} - -// UpdateTelegrafConfig updates a single telegraf config. -// Returns the new telegraf config after update. -func (s *TelegrafService) UpdateTelegrafConfig(ctx context.Context, id platform.ID, tc *influxdb.TelegrafConfig, userID platform.ID) (*influxdb.TelegrafConfig, error) { - var teleResp influxdb.TelegrafConfig - err := s.client. - PutJSON(tc, prefixTelegraf, id.String()). - DecodeJSON(&teleResp). - Do(ctx) - if err != nil { - return nil, err - } - return &teleResp, nil -} - -// DeleteTelegrafConfig removes a telegraf config by ID. -func (s *TelegrafService) DeleteTelegrafConfig(ctx context.Context, id platform.ID) error { - return s.client. - Delete(prefixTelegraf, id.String()). - Do(ctx) -} diff --git a/http/telegraf_test.go b/http/telegraf_test.go deleted file mode 100644 index c00874bbb59..00000000000 --- a/http/telegraf_test.go +++ /dev/null @@ -1,947 +0,0 @@ -package http - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "strings" - "testing" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/mock" - "go.uber.org/zap/zaptest" -) - -// NewMockTelegrafBackend returns a TelegrafBackend with mock services. -func NewMockTelegrafBackend(t *testing.T) *TelegrafBackend { - return &TelegrafBackend{ - log: zaptest.NewLogger(t), - - TelegrafService: &mock.TelegrafConfigStore{}, - UserResourceMappingService: mock.NewUserResourceMappingService(), - LabelService: mock.NewLabelService(), - UserService: mock.NewUserService(), - OrganizationService: mock.NewOrganizationService(), - } -} - -func TestTelegrafHandler_handleGetTelegrafs(t *testing.T) { - type wants struct { - statusCode int - contentType string - body string - } - tests := []struct { - name string - svc *mock.TelegrafConfigStore - r *http.Request - wants wants - }{ - { - name: "get telegraf configs by organization id", - r: httptest.NewRequest("GET", "http://any.url/api/v2/telegrafs?orgID=0000000000000002", nil), - svc: &mock.TelegrafConfigStore{ - FindTelegrafConfigsF: func(ctx context.Context, filter platform.TelegrafConfigFilter, opt ...platform.FindOptions) ([]*platform.TelegrafConfig, int, error) { - if filter.OrgID != nil && *filter.OrgID == platform2.ID(2) { - return []*platform.TelegrafConfig{ - { - ID: platform2.ID(1), - OrgID: platform2.ID(2), - Name: "tc1", - Description: "", - Config: "[[inputs.cpu]]\n", - }, - }, 1, nil - } - - return []*platform.TelegrafConfig{}, 0, fmt.Errorf("not found") - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: `{ - "configurations": [ - { - "id": "0000000000000001", - "orgID": "0000000000000002", - "name": "tc1", - "config": "[[inputs.cpu]]\n", - "labels": [], - "links": { - "self": "/api/v2/telegrafs/0000000000000001", - "labels": "/api/v2/telegrafs/0000000000000001/labels", - "members": "/api/v2/telegrafs/0000000000000001/members", - "owners": "/api/v2/telegrafs/0000000000000001/owners" - } - } - ] - }`, - }, - }, - { - name: "get telegraf configs by organization name", - r: httptest.NewRequest("GET", "http://any.url/api/v2/telegrafs?org=tc1", nil), - svc: &mock.TelegrafConfigStore{ - FindTelegrafConfigsF: func(ctx context.Context, filter platform.TelegrafConfigFilter, opt ...platform.FindOptions) ([]*platform.TelegrafConfig, int, error) { - if filter.Organization != nil && *filter.Organization == "tc1" { - return []*platform.TelegrafConfig{ - { - ID: platform2.ID(1), - OrgID: platform2.ID(2), - Name: "tc1", - Description: "", - Config: "[[inputs.cpu]]\n", - }, - }, 1, nil - } - - return []*platform.TelegrafConfig{}, 0, fmt.Errorf("not found") - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: `{ - "configurations": [ - { - "id": "0000000000000001", - "orgID": "0000000000000002", - "name": "tc1", - "config": "[[inputs.cpu]]\n", - "labels": [], - "links": { - "self": "/api/v2/telegrafs/0000000000000001", - "labels": "/api/v2/telegrafs/0000000000000001/labels", - "members": "/api/v2/telegrafs/0000000000000001/members", - "owners": "/api/v2/telegrafs/0000000000000001/owners" - } - } - ] - }`, - }, - }, - { - name: "return CPU plugin for telegraf", - r: httptest.NewRequest("GET", "http://any.url/api/v2/telegrafs", nil), - svc: &mock.TelegrafConfigStore{ - FindTelegrafConfigsF: func(ctx context.Context, filter platform.TelegrafConfigFilter, opt ...platform.FindOptions) ([]*platform.TelegrafConfig, int, error) { - return []*platform.TelegrafConfig{ - { - ID: platform2.ID(1), - OrgID: platform2.ID(2), - Name: "my config", - Description: "my description", - Config: "[[inputs.cpu]]\n[[outputs.influxdb_v2]]\n", - }, - }, 1, nil - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: `{ - "configurations": [ - { - "id": "0000000000000001", - "orgID": "0000000000000002", - "name": "my config", - "description": "my description", - "config": "[[inputs.cpu]]\n[[outputs.influxdb_v2]]\n", - "labels": [], - "links": { - "self": "/api/v2/telegrafs/0000000000000001", - "labels": "/api/v2/telegrafs/0000000000000001/labels", - "members": "/api/v2/telegrafs/0000000000000001/members", - "owners": "/api/v2/telegrafs/0000000000000001/owners" - } - } - ] - }`, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - w := httptest.NewRecorder() - telegrafBackend := NewMockTelegrafBackend(t) - telegrafBackend.TelegrafService = tt.svc - h := NewTelegrafHandler(zaptest.NewLogger(t), telegrafBackend) - h.ServeHTTP(w, tt.r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetTelegrafs() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - t.Logf("headers: %v", res.Header) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetTelegrafs() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleGetTelegrafs(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handleGetTelegrafs() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestTelegrafHandler_handleGetTelegraf(t *testing.T) { - type wants struct { - statusCode int - contentType string - body string - } - tests := []struct { - name string - svc *mock.TelegrafConfigStore - r *http.Request - acceptHeader string - wants wants - }{ - { - name: "return JSON telegraf config", - r: httptest.NewRequest("GET", "http://any.url/api/v2/telegrafs/0000000000000001", nil), - acceptHeader: "application/json", - svc: &mock.TelegrafConfigStore{ - FindTelegrafConfigByIDF: func(ctx context.Context, id platform2.ID) (*platform.TelegrafConfig, error) { - return &platform.TelegrafConfig{ - ID: platform2.ID(1), - OrgID: platform2.ID(2), - Name: "my config", - Description: "", - Config: "[[inputs.cpu]]\n[[outputs.influxdb_v2]]\n", - }, nil - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: `{ - "id": "0000000000000001", - "orgID": "0000000000000002", - "name": "my config", - "config": "[[inputs.cpu]]\n[[outputs.influxdb_v2]]\n", - "labels": [], - "links": { - "self": "/api/v2/telegrafs/0000000000000001", - "labels": "/api/v2/telegrafs/0000000000000001/labels", - "members": "/api/v2/telegrafs/0000000000000001/members", - "owners": "/api/v2/telegrafs/0000000000000001/owners" - } - }`, - }, - }, - { - name: "return JSON telegraf config using fuzzy accept header matching", - r: httptest.NewRequest("GET", "http://any.url/api/v2/telegrafs/0000000000000001", nil), - acceptHeader: "application/json, text/plain, */*", - svc: &mock.TelegrafConfigStore{ - FindTelegrafConfigByIDF: func(ctx context.Context, id platform2.ID) (*platform.TelegrafConfig, error) { - return &platform.TelegrafConfig{ - ID: platform2.ID(1), - OrgID: platform2.ID(2), - Name: "my config", - Description: "", - Config: "[[inputs.cpu]]\n[[outputs.influxdb_v2]]\n", - }, nil - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: `{ - "id": "0000000000000001", - "orgID": "0000000000000002", - "name": "my config", - "config": "[[inputs.cpu]]\n[[outputs.influxdb_v2]]\n", - "labels": [], - "links": { - "self": "/api/v2/telegrafs/0000000000000001", - "labels": "/api/v2/telegrafs/0000000000000001/labels", - "members": "/api/v2/telegrafs/0000000000000001/members", - "owners": "/api/v2/telegrafs/0000000000000001/owners" - } - }`, - }, - }, - { - name: "return TOML telegraf config with accept header application/toml", - r: httptest.NewRequest("GET", "http://any.url/api/v2/telegrafs/0000000000000001", nil), - acceptHeader: "application/toml", - svc: &mock.TelegrafConfigStore{ - FindTelegrafConfigByIDF: func(ctx context.Context, id platform2.ID) (*platform.TelegrafConfig, error) { - return &platform.TelegrafConfig{ - ID: platform2.ID(1), - OrgID: platform2.ID(2), - Name: "my config", - Config: `# Configuration for telegraf agent -[agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 - - ## For failed writes, telegraf will cache metric_buffer_limit metrics for each - ## output, and will flush this buffer on a successful write. Oldest metrics - ## are dropped first when this buffer fills. - ## This buffer only fills when writes fail to output plugin(s). - metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s. - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". - precision = "" - - ## Logging configuration: - ## Run telegraf with debug log messages. - debug = false - ## Run telegraf in quiet mode (error log messages only). - quiet = false - ## Specify the log file name. The empty string means to log to stderr. - logfile = "" - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - ## If set to true, do no set the "host" tag in the telegraf agent. - omit_hostname = false -[[inputs.cpu]] - ## Whether to report per-cpu stats or not - percpu = true - ## Whether to report total system cpu stats or not - totalcpu = true - ## If true, collect raw CPU time metrics. - collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. - report_active = false -[[outputs.influxdb_v2]] - ## The URLs of the InfluxDB cluster nodes. - ## - ## Multiple URLs can be specified for a single cluster, only ONE of the - ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:8086 - urls = ["http://127.0.0.1:8086"] - - ## Token for authentication. - token = "no_more_secrets" - - ## Organization is the name of the organization you wish to write to; must exist. - organization = "my_org" - - ## Destination bucket to write into. - bucket = "my_bucket" -`, - }, nil - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/toml; charset=utf-8", - body: `# Configuration for telegraf agent -[agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 - - ## For failed writes, telegraf will cache metric_buffer_limit metrics for each - ## output, and will flush this buffer on a successful write. Oldest metrics - ## are dropped first when this buffer fills. - ## This buffer only fills when writes fail to output plugin(s). - metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s. - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". - precision = "" - - ## Logging configuration: - ## Run telegraf with debug log messages. - debug = false - ## Run telegraf in quiet mode (error log messages only). - quiet = false - ## Specify the log file name. The empty string means to log to stderr. - logfile = "" - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - ## If set to true, do no set the "host" tag in the telegraf agent. - omit_hostname = false -[[inputs.cpu]] - ## Whether to report per-cpu stats or not - percpu = true - ## Whether to report total system cpu stats or not - totalcpu = true - ## If true, collect raw CPU time metrics. - collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. - report_active = false -[[outputs.influxdb_v2]] - ## The URLs of the InfluxDB cluster nodes. - ## - ## Multiple URLs can be specified for a single cluster, only ONE of the - ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:8086 - urls = ["http://127.0.0.1:8086"] - - ## Token for authentication. - token = "no_more_secrets" - - ## Organization is the name of the organization you wish to write to; must exist. - organization = "my_org" - - ## Destination bucket to write into. - bucket = "my_bucket" -`, - }, - }, - { - name: "return TOML telegraf config with no accept header", - r: httptest.NewRequest("GET", "http://any.url/api/v2/telegrafs/0000000000000001", nil), - svc: &mock.TelegrafConfigStore{ - FindTelegrafConfigByIDF: func(ctx context.Context, id platform2.ID) (*platform.TelegrafConfig, error) { - return &platform.TelegrafConfig{ - ID: platform2.ID(1), - OrgID: platform2.ID(2), - Config: `# Configuration for telegraf agent -[agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 - - ## For failed writes, telegraf will cache metric_buffer_limit metrics for each - ## output, and will flush this buffer on a successful write. Oldest metrics - ## are dropped first when this buffer fills. - ## This buffer only fills when writes fail to output plugin(s). - metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s. - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". - precision = "" - - ## Logging configuration: - ## Run telegraf with debug log messages. - debug = false - ## Run telegraf in quiet mode (error log messages only). - quiet = false - ## Specify the log file name. The empty string means to log to stderr. - logfile = "" - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - ## If set to true, do no set the "host" tag in the telegraf agent. - omit_hostname = false -[[inputs.cpu]] - ## Whether to report per-cpu stats or not - percpu = true - ## Whether to report total system cpu stats or not - totalcpu = true - ## If true, collect raw CPU time metrics. - collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. - report_active = false -[[outputs.influxdb_v2]] - ## The URLs of the InfluxDB cluster nodes. - ## - ## Multiple URLs can be specified for a single cluster, only ONE of the - ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:8086 - urls = ["http://127.0.0.1:8086"] - - ## Token for authentication. - token = "no_more_secrets" - - ## Organization is the name of the organization you wish to write to; must exist. - organization = "my_org" - - ## Destination bucket to write into. - bucket = "my_bucket" -`, - }, nil - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/toml; charset=utf-8", - body: `# Configuration for telegraf agent -[agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 - - ## For failed writes, telegraf will cache metric_buffer_limit metrics for each - ## output, and will flush this buffer on a successful write. Oldest metrics - ## are dropped first when this buffer fills. - ## This buffer only fills when writes fail to output plugin(s). - metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s. - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". - precision = "" - - ## Logging configuration: - ## Run telegraf with debug log messages. - debug = false - ## Run telegraf in quiet mode (error log messages only). - quiet = false - ## Specify the log file name. The empty string means to log to stderr. - logfile = "" - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - ## If set to true, do no set the "host" tag in the telegraf agent. - omit_hostname = false -[[inputs.cpu]] - ## Whether to report per-cpu stats or not - percpu = true - ## Whether to report total system cpu stats or not - totalcpu = true - ## If true, collect raw CPU time metrics. - collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. - report_active = false -[[outputs.influxdb_v2]] - ## The URLs of the InfluxDB cluster nodes. - ## - ## Multiple URLs can be specified for a single cluster, only ONE of the - ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:8086 - urls = ["http://127.0.0.1:8086"] - - ## Token for authentication. - token = "no_more_secrets" - - ## Organization is the name of the organization you wish to write to; must exist. - organization = "my_org" - - ## Destination bucket to write into. - bucket = "my_bucket" -`, - }, - }, - { - name: "return TOML telegraf config with application/octet-stream", - r: httptest.NewRequest("GET", "http://any.url/api/v2/telegrafs/0000000000000001", nil), - acceptHeader: "application/octet-stream", - svc: &mock.TelegrafConfigStore{ - FindTelegrafConfigByIDF: func(ctx context.Context, id platform2.ID) (*platform.TelegrafConfig, error) { - return &platform.TelegrafConfig{ - ID: platform2.ID(1), - OrgID: platform2.ID(2), - Name: "my config", - Config: `# Configuration for telegraf agent -[agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 - - ## For failed writes, telegraf will cache metric_buffer_limit metrics for each - ## output, and will flush this buffer on a successful write. Oldest metrics - ## are dropped first when this buffer fills. - ## This buffer only fills when writes fail to output plugin(s). - metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s. - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". - precision = "" - - ## Logging configuration: - ## Run telegraf with debug log messages. - debug = false - ## Run telegraf in quiet mode (error log messages only). - quiet = false - ## Specify the log file name. The empty string means to log to stderr. - logfile = "" - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - ## If set to true, do no set the "host" tag in the telegraf agent. - omit_hostname = false -[[inputs.cpu]] - ## Whether to report per-cpu stats or not - percpu = true - ## Whether to report total system cpu stats or not - totalcpu = true - ## If true, collect raw CPU time metrics. - collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. - report_active = false -[[outputs.influxdb_v2]] - ## The URLs of the InfluxDB cluster nodes. - ## - ## Multiple URLs can be specified for a single cluster, only ONE of the - ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:8086 - urls = ["http://127.0.0.1:8086"] - - ## Token for authentication. - token = "no_more_secrets" - - ## Organization is the name of the organization you wish to write to; must exist. - organization = "my_org" - - ## Destination bucket to write into. - bucket = "my_bucket" -`, - }, nil - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/octet-stream", - body: `# Configuration for telegraf agent -[agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 - - ## For failed writes, telegraf will cache metric_buffer_limit metrics for each - ## output, and will flush this buffer on a successful write. Oldest metrics - ## are dropped first when this buffer fills. - ## This buffer only fills when writes fail to output plugin(s). - metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s. - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". - precision = "" - - ## Logging configuration: - ## Run telegraf with debug log messages. - debug = false - ## Run telegraf in quiet mode (error log messages only). - quiet = false - ## Specify the log file name. The empty string means to log to stderr. - logfile = "" - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - ## If set to true, do no set the "host" tag in the telegraf agent. - omit_hostname = false -[[inputs.cpu]] - ## Whether to report per-cpu stats or not - percpu = true - ## Whether to report total system cpu stats or not - totalcpu = true - ## If true, collect raw CPU time metrics. - collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. - report_active = false -[[outputs.influxdb_v2]] - ## The URLs of the InfluxDB cluster nodes. - ## - ## Multiple URLs can be specified for a single cluster, only ONE of the - ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:8086 - urls = ["http://127.0.0.1:8086"] - - ## Token for authentication. - token = "no_more_secrets" - - ## Organization is the name of the organization you wish to write to; must exist. - organization = "my_org" - - ## Destination bucket to write into. - bucket = "my_bucket" -`, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.r.Header.Set("Accept", tt.acceptHeader) - w := httptest.NewRecorder() - telegrafBackend := NewMockTelegrafBackend(t) - telegrafBackend.TelegrafService = tt.svc - h := NewTelegrafHandler(zaptest.NewLogger(t), telegrafBackend) - - h.ServeHTTP(w, tt.r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetTelegraf() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - t.Logf("headers: %v", res.Header) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetTelegraf() = %v, want %v", tt.name, content, tt.wants.contentType) - return - } - - if strings.Contains(tt.wants.contentType, "application/json") { - if eq, diff, _ := jsonEqual(string(body), tt.wants.body); tt.wants.body != "" && !eq { - t.Errorf("%q. handleGetTelegraf() = ***%s***", tt.name, diff) - } - } else if string(body) != tt.wants.body { - t.Errorf("%q. handleGetTelegraf() = \n***%v***\nwant\n***%v***", tt.name, string(body), tt.wants.body) - } - }) - } -} - -func Test_newTelegrafResponses(t *testing.T) { - type args struct { - tcs []*platform.TelegrafConfig - } - tests := []struct { - name string - args args - want string - }{ - { - args: args{ - tcs: []*platform.TelegrafConfig{ - { - ID: platform2.ID(1), - OrgID: platform2.ID(2), - Name: "my config", - Description: "", - Config: "[[inputs.cpu]]\n[[outputs.influxdb_v2]]\n", - }, - }, - }, - want: `{ - "configurations": [ - { - "id": "0000000000000001", - "orgID": "0000000000000002", - "name": "my config", - "config": "[[inputs.cpu]]\n[[outputs.influxdb_v2]]\n", - "labels": [], - "links": { - "self": "/api/v2/telegrafs/0000000000000001", - "labels": "/api/v2/telegrafs/0000000000000001/labels", - "members": "/api/v2/telegrafs/0000000000000001/members", - "owners": "/api/v2/telegrafs/0000000000000001/owners" - } - } - ] - }`, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - res := newTelegrafResponses(ctx, tt.args.tcs, mock.NewLabelService()) - got, err := json.Marshal(res) - if err != nil { - t.Fatalf("newTelegrafResponses() JSON marshal %v", err) - } - if eq, diff, _ := jsonEqual(string(got), tt.want); tt.want != "" && !eq { - t.Errorf("%q. newTelegrafResponses() = ***%s***", tt.name, diff) - } - }) - } -} - -func Test_newTelegrafResponse(t *testing.T) { - type args struct { - tc *platform.TelegrafConfig - } - tests := []struct { - name string - args args - want string - }{ - { - args: args{ - tc: &platform.TelegrafConfig{ - ID: platform2.ID(1), - OrgID: platform2.ID(2), - Name: "my config", - Description: "my description", - Config: "[[inputs.cpu]]\n[[outputs.influxdb_v2]]\n", - }, - }, - want: `{ - "id": "0000000000000001", - "orgID": "0000000000000002", - "name": "my config", - "description": "my description", - "config": "[[inputs.cpu]]\n[[outputs.influxdb_v2]]\n", - "labels": [], - "links": { - "self": "/api/v2/telegrafs/0000000000000001", - "labels": "/api/v2/telegrafs/0000000000000001/labels", - "members": "/api/v2/telegrafs/0000000000000001/members", - "owners": "/api/v2/telegrafs/0000000000000001/owners" - } - }`, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - res := newTelegrafResponse(tt.args.tc, []*platform.Label{}) - got, err := json.Marshal(res) - if err != nil { - t.Fatalf("newTelegrafResponse() JSON marshal %v", err) - } - if eq, diff, _ := jsonEqual(string(got), tt.want); tt.want != "" && !eq { - fmt.Println(string(got)) - t.Errorf("%q. newTelegrafResponse() = ***%s***", tt.name, diff) - } - }) - } -} diff --git a/http/tokens.go b/http/tokens.go deleted file mode 100644 index 010481debc4..00000000000 --- a/http/tokens.go +++ /dev/null @@ -1,42 +0,0 @@ -package http - -import ( - "errors" - "fmt" - "net/http" - "strings" -) - -const ( - tokenScheme = "Token " - bearerScheme = "Bearer " -) - -// errors -var ( - ErrAuthHeaderMissing = errors.New("authorization Header is missing") - ErrAuthBadScheme = errors.New("authorization Header Scheme is invalid") -) - -// GetToken will parse the token from http Authorization Header. -func GetToken(r *http.Request) (string, error) { - header := r.Header.Get("Authorization") - if header == "" { - return "", ErrAuthHeaderMissing - } - - if len(header) >= len(tokenScheme) && - strings.EqualFold(header[:len(tokenScheme)], tokenScheme) { - return header[len(tokenScheme):], nil - } else if len(header) > len(bearerScheme) && - strings.EqualFold(header[:len(bearerScheme)], bearerScheme) { - return header[len(bearerScheme):], nil - } - - return "", ErrAuthBadScheme -} - -// SetToken adds the token to the request. -func SetToken(token string, req *http.Request) { - req.Header.Set("Authorization", fmt.Sprintf("%s%s", tokenScheme, token)) -} diff --git a/http/tokens_test.go b/http/tokens_test.go deleted file mode 100644 index 5b326f2bbc9..00000000000 --- a/http/tokens_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package http - -import ( - "net/http" - "net/http/httptest" - "testing" -) - -func TestGetToken(t *testing.T) { - type args struct { - header string - } - type wants struct { - err error - result string - } - - tests := []struct { - name string - args args - wants wants - }{ - { - name: "empty header", - args: args{ - header: "", - }, - wants: wants{ - err: ErrAuthHeaderMissing, - }, - }, - { - name: "bad none empty header", - args: args{ - header: "a bad header", - }, - wants: wants{ - err: ErrAuthBadScheme, - }, - }, - { - name: "bad basic header", - args: args{ - header: "Basic header", - }, - wants: wants{ - err: ErrAuthBadScheme, - }, - }, - { - name: "good token", - args: args{ - header: "Token tok2", - }, - wants: wants{ - result: "tok2", - }, - }, - { - name: "bearer token", - args: args{ - header: "Bearer tok2", - }, - wants: wants{ - result: "tok2", - }, - }, - { - name: "short header", - args: args{ - header: "a", - }, - wants: wants{ - err: ErrAuthBadScheme, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - req := &http.Request{ - Header: make(http.Header), - } - req.Header.Set("Authorization", tt.args.header) - result, err := GetToken(req) - if err != tt.wants.err { - t.Errorf("err incorrect want %v, got %v", tt.wants.err, err) - return - } - if result != tt.wants.result { - t.Errorf("result incorrect want %s, got %s", tt.wants.result, result) - } - }) - } - -} - -func TestSetToken(t *testing.T) { - tests := []struct { - name string - token string - req *http.Request - want string - }{ - { - name: "adding token to Authorization header", - token: "1234", - req: httptest.NewRequest("GET", "/", nil), - want: "Token 1234", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - SetToken(tt.token, tt.req) - if got := tt.req.Header.Get("Authorization"); got != tt.want { - t.Errorf("SetToken() want %s, got %s", tt.want, got) - } - }) - } -} diff --git a/http/ua.go b/http/ua.go deleted file mode 100644 index 52179b1b9a1..00000000000 --- a/http/ua.go +++ /dev/null @@ -1,17 +0,0 @@ -package http - -import ( - "net/http" - - useragent "github.com/mileusna/useragent" -) - -func userAgent(r *http.Request) string { - header := r.Header.Get("User-Agent") - if header == "" { - return "unknown" - } - - ua := useragent.Parse(header) - return ua.Name -} diff --git a/http/ua_test.go b/http/ua_test.go deleted file mode 100644 index 29e65723c66..00000000000 --- a/http/ua_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package http - -import ( - nethttp "net/http" - "testing" -) - -func Test_userAgent(t *testing.T) { - tests := []struct { - name string - ua string - want string - }{ - { - name: "linux chrome", - ua: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36", - want: "Chrome", - }, - { - name: "telegraf", - ua: "Telegraf/1.12.6", - want: "Telegraf", - }, - { - name: "curl", - ua: "curl/7.67.0", - want: "curl", - }, - { - name: "go", - ua: "Go-http-client/1.1", - want: "Go-http-client", - }, - { - name: "influx client", - ua: "InfluxDBClient/0.0.1 (golang; windows; amd64)", - want: "InfluxDBClient", - }, - { - name: "iphone", - ua: "Mozilla/5.0 (iPhone; CPU iPhone OS 13_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.4 Mobile/15E148 Safari/604.1", - want: "Safari", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := &nethttp.Request{ - Header: nethttp.Header{ - "User-Agent": []string{tt.ua}, - }, - } - - got := userAgent(r) - if got != tt.want { - t.Fatalf("userAgent %v want %v", got, tt.want) - } - }) - } -} diff --git a/http/user_resource_mapping_service.go b/http/user_resource_mapping_service.go deleted file mode 100644 index e2b478b433b..00000000000 --- a/http/user_resource_mapping_service.go +++ /dev/null @@ -1,394 +0,0 @@ -package http - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "path" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/pkg/httpc" - "go.uber.org/zap" -) - -type resourceUserResponse struct { - Role influxdb.UserType `json:"role"` - *influxdb.UserResponse -} - -func newUserResponse(u *influxdb.User) *influxdb.UserResponse { - return &influxdb.UserResponse{ - Links: map[string]string{ - "self": fmt.Sprintf("/api/v2/users/%s", u.ID), - "logs": fmt.Sprintf("/api/v2/users/%s/logs", u.ID), - }, - User: *u, - } -} - -func newResourceUserResponse(u *influxdb.User, userType influxdb.UserType) *resourceUserResponse { - return &resourceUserResponse{ - Role: userType, - UserResponse: newUserResponse(u), - } -} - -type resourceUsersResponse struct { - Links map[string]string `json:"links"` - Users []*resourceUserResponse `json:"users"` -} - -func newResourceUsersResponse(opts influxdb.FindOptions, f influxdb.UserResourceMappingFilter, users []*influxdb.User) *resourceUsersResponse { - rs := resourceUsersResponse{ - Links: map[string]string{ - "self": fmt.Sprintf("/api/v2/%s/%s/%ss", f.ResourceType, f.ResourceID, f.UserType), - }, - Users: make([]*resourceUserResponse, 0, len(users)), - } - - for _, user := range users { - rs.Users = append(rs.Users, newResourceUserResponse(user, f.UserType)) - } - return &rs -} - -// MemberBackend is all services and associated parameters required to construct -// member handler. -type MemberBackend struct { - errors.HTTPErrorHandler - log *zap.Logger - - ResourceType influxdb.ResourceType - UserType influxdb.UserType - - UserResourceMappingService influxdb.UserResourceMappingService - UserService influxdb.UserService -} - -// newPostMemberHandler returns a handler func for a POST to /members or /owners endpoints -func newPostMemberHandler(b MemberBackend) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePostMemberRequest(ctx, r) - if err != nil { - b.HandleHTTPError(ctx, err, w) - return - } - - user, err := b.UserService.FindUserByID(ctx, req.MemberID) - if err != nil { - b.HandleHTTPError(ctx, err, w) - return - } - - mapping := &influxdb.UserResourceMapping{ - ResourceID: req.ResourceID, - ResourceType: b.ResourceType, - UserID: req.MemberID, - UserType: b.UserType, - } - - if err := b.UserResourceMappingService.CreateUserResourceMapping(ctx, mapping); err != nil { - b.HandleHTTPError(ctx, err, w) - return - } - b.log.Debug("Member/owner created", zap.String("mapping", fmt.Sprint(mapping))) - - if err := encodeResponse(ctx, w, http.StatusCreated, newResourceUserResponse(user, b.UserType)); err != nil { - b.HandleHTTPError(ctx, err, w) - return - } - } -} - -type postMemberRequest struct { - MemberID platform.ID - ResourceID platform.ID -} - -func decodePostMemberRequest(ctx context.Context, r *http.Request) (*postMemberRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var rid platform.ID - if err := rid.DecodeFromString(id); err != nil { - return nil, err - } - - u := &influxdb.User{} - if err := json.NewDecoder(r.Body).Decode(u); err != nil { - return nil, err - } - - if !u.ID.Valid() { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "user id missing or invalid", - } - } - - return &postMemberRequest{ - MemberID: u.ID, - ResourceID: rid, - }, nil -} - -// newGetMembersHandler returns a handler func for a GET to /members or /owners endpoints -func newGetMembersHandler(b MemberBackend) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeGetMembersRequest(ctx, r) - if err != nil { - b.HandleHTTPError(ctx, err, w) - return - } - - filter := influxdb.UserResourceMappingFilter{ - ResourceID: req.ResourceID, - ResourceType: b.ResourceType, - UserType: b.UserType, - } - - opts := influxdb.FindOptions{} - mappings, _, err := b.UserResourceMappingService.FindUserResourceMappings(ctx, filter) - if err != nil { - b.HandleHTTPError(ctx, err, w) - return - } - - users := make([]*influxdb.User, 0, len(mappings)) - for _, m := range mappings { - if m.MappingType == influxdb.OrgMappingType { - continue - } - user, err := b.UserService.FindUserByID(ctx, m.UserID) - if err != nil { - b.HandleHTTPError(ctx, err, w) - return - } - - users = append(users, user) - } - b.log.Debug("Members/owners retrieved", zap.String("users", fmt.Sprint(users))) - - if err := encodeResponse(ctx, w, http.StatusOK, newResourceUsersResponse(opts, filter, users)); err != nil { - b.HandleHTTPError(ctx, err, w) - return - } - } -} - -type getMembersRequest struct { - MemberID platform.ID - ResourceID platform.ID -} - -func decodeGetMembersRequest(ctx context.Context, r *http.Request) (*getMembersRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - - req := &getMembersRequest{ - ResourceID: i, - } - - return req, nil -} - -// newDeleteMemberHandler returns a handler func for a DELETE to /members or /owners endpoints -func newDeleteMemberHandler(b MemberBackend) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeDeleteMemberRequest(ctx, r) - if err != nil { - b.HandleHTTPError(ctx, err, w) - return - } - - if err := b.UserResourceMappingService.DeleteUserResourceMapping(ctx, req.ResourceID, req.MemberID); err != nil { - b.HandleHTTPError(ctx, err, w) - return - } - b.log.Debug("Member deleted", zap.String("resourceID", req.ResourceID.String()), zap.String("memberID", req.MemberID.String())) - - w.WriteHeader(http.StatusNoContent) - } -} - -type deleteMemberRequest struct { - MemberID platform.ID - ResourceID platform.ID -} - -func decodeDeleteMemberRequest(ctx context.Context, r *http.Request) (*deleteMemberRequest, error) { - params := httprouter.ParamsFromContext(ctx) - id := params.ByName("id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing resource id", - } - } - - var rid platform.ID - if err := rid.DecodeFromString(id); err != nil { - return nil, err - } - - id = params.ByName("userID") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing member id", - } - } - - var mid platform.ID - if err := mid.DecodeFromString(id); err != nil { - return nil, err - } - - return &deleteMemberRequest{ - MemberID: mid, - ResourceID: rid, - }, nil -} - -// UserResourceMappingService is the struct of urm service -type UserResourceMappingService struct { - Client *httpc.Client -} - -// FindUserResourceMappings returns the user resource mappings -func (s *UserResourceMappingService) FindUserResourceMappings(ctx context.Context, f influxdb.UserResourceMappingFilter, opt ...influxdb.FindOptions) ([]*influxdb.UserResourceMapping, int, error) { - var results resourceUsersResponse - err := s.Client. - Get(resourceIDPath(f.ResourceType, f.ResourceID, string(f.UserType)+"s")). - DecodeJSON(&results). - Do(ctx) - if err != nil { - return nil, 0, err - } - - urs := make([]*influxdb.UserResourceMapping, len(results.Users)) - for k, item := range results.Users { - urs[k] = &influxdb.UserResourceMapping{ - ResourceID: f.ResourceID, - ResourceType: f.ResourceType, - UserID: item.User.ID, - UserType: item.Role, - } - } - return urs, len(urs), nil -} - -// CreateUserResourceMapping will create a user resource mapping -func (s *UserResourceMappingService) CreateUserResourceMapping(ctx context.Context, m *influxdb.UserResourceMapping) error { - if err := m.Validate(); err != nil { - return err - } - - urlPath := resourceIDPath(m.ResourceType, m.ResourceID, string(m.UserType)+"s") - return s.Client. - PostJSON(influxdb.User{ID: m.UserID}, urlPath). - DecodeJSON(m). - Do(ctx) -} - -// DeleteUserResourceMapping will delete user resource mapping based in criteria. -func (s *UserResourceMappingService) DeleteUserResourceMapping(ctx context.Context, resourceID platform.ID, userID platform.ID) error { - urlPath := resourceIDUserPath(influxdb.OrgsResourceType, resourceID, influxdb.Member, userID) - return s.Client. - Delete(urlPath). - Do(ctx) -} - -// SpecificURMSvc returns a urm service with specific resource and user types. -// this will help us stay compatible with the existing service contract but also allow for urm deletes to go through the correct -// api -func (s *UserResourceMappingService) SpecificURMSvc(rt influxdb.ResourceType, ut influxdb.UserType) *SpecificURMSvc { - return &SpecificURMSvc{ - Client: s.Client, - rt: rt, - ut: ut, - } -} - -// SpecificURMSvc is a URM client that speaks to a specific resource with a specified user type -type SpecificURMSvc struct { - Client *httpc.Client - rt influxdb.ResourceType - ut influxdb.UserType -} - -// FindUserResourceMappings returns the user resource mappings -func (s *SpecificURMSvc) FindUserResourceMappings(ctx context.Context, f influxdb.UserResourceMappingFilter, opt ...influxdb.FindOptions) ([]*influxdb.UserResourceMapping, int, error) { - var results resourceUsersResponse - err := s.Client. - Get(resourceIDPath(s.rt, f.ResourceID, string(s.ut)+"s")). - DecodeJSON(&results). - Do(ctx) - if err != nil { - return nil, 0, err - } - - urs := make([]*influxdb.UserResourceMapping, len(results.Users)) - for k, item := range results.Users { - urs[k] = &influxdb.UserResourceMapping{ - ResourceID: f.ResourceID, - ResourceType: f.ResourceType, - UserID: item.User.ID, - UserType: item.Role, - } - } - return urs, len(urs), nil -} - -// CreateUserResourceMapping will create a user resource mapping -func (s *SpecificURMSvc) CreateUserResourceMapping(ctx context.Context, m *influxdb.UserResourceMapping) error { - if err := m.Validate(); err != nil { - return err - } - - urlPath := resourceIDPath(s.rt, m.ResourceID, string(s.ut)+"s") - return s.Client. - PostJSON(influxdb.User{ID: m.UserID}, urlPath). - DecodeJSON(m). - Do(ctx) -} - -// DeleteUserResourceMapping will delete user resource mapping based in criteria. -func (s *SpecificURMSvc) DeleteUserResourceMapping(ctx context.Context, resourceID platform.ID, userID platform.ID) error { - urlPath := resourceIDUserPath(s.rt, resourceID, s.ut, userID) - return s.Client. - Delete(urlPath). - Do(ctx) -} - -func resourceIDPath(resourceType influxdb.ResourceType, resourceID platform.ID, p string) string { - return path.Join("/api/v2/", string(resourceType), resourceID.String(), p) -} - -func resourceIDUserPath(resourceType influxdb.ResourceType, resourceID platform.ID, userType influxdb.UserType, userID platform.ID) string { - return path.Join("/api/v2/", string(resourceType), resourceID.String(), string(userType)+"s", userID.String()) -} diff --git a/http/user_resource_mapping_test.go b/http/user_resource_mapping_test.go deleted file mode 100644 index d66a1e55d62..00000000000 --- a/http/user_resource_mapping_test.go +++ /dev/null @@ -1,388 +0,0 @@ -package http - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/httprouter" - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/mock" - "go.uber.org/zap/zaptest" -) - -func TestUserResourceMappingService_GetMembersHandler(t *testing.T) { - type fields struct { - userService platform.UserService - userResourceMappingService platform.UserResourceMappingService - } - type args struct { - resourceID string - userType platform.UserType - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get members", - fields: fields{ - userService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform2.ID) (*platform.User, error) { - return &platform.User{ID: id, Name: fmt.Sprintf("user%s", id), Status: platform.Active}, nil - }, - }, - userResourceMappingService: &mock.UserResourceMappingService{ - FindMappingsFn: func(ctx context.Context, filter platform.UserResourceMappingFilter) ([]*platform.UserResourceMapping, int, error) { - ms := []*platform.UserResourceMapping{ - { - ResourceID: filter.ResourceID, - ResourceType: filter.ResourceType, - UserType: filter.UserType, - UserID: 1, - }, - { - ResourceID: filter.ResourceID, - ResourceType: filter.ResourceType, - UserType: filter.UserType, - UserID: 2, - }, - } - return ms, len(ms), nil - }, - }, - }, - args: args{ - resourceID: "0000000000000099", - userType: platform.Member, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/%s/0000000000000099/members" - }, - "users": [ - { - "links": { - "logs": "/api/v2/users/0000000000000001/logs", - "self": "/api/v2/users/0000000000000001" - }, - "id": "0000000000000001", - "name": "user0000000000000001", - "role": "member", - "status": "active" - }, - { - "links": { - "logs": "/api/v2/users/0000000000000002/logs", - "self": "/api/v2/users/0000000000000002" - }, - "id": "0000000000000002", - "name": "user0000000000000002", - "role": "member", - "status": "active" - } - ] -}`, - }, - }, - - { - name: "get owners", - fields: fields{ - userService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform2.ID) (*platform.User, error) { - return &platform.User{ID: id, Name: fmt.Sprintf("user%s", id), Status: platform.Active}, nil - }, - }, - userResourceMappingService: &mock.UserResourceMappingService{ - FindMappingsFn: func(ctx context.Context, filter platform.UserResourceMappingFilter) ([]*platform.UserResourceMapping, int, error) { - ms := []*platform.UserResourceMapping{ - { - ResourceID: filter.ResourceID, - ResourceType: filter.ResourceType, - UserType: filter.UserType, - UserID: 1, - }, - { - ResourceID: filter.ResourceID, - ResourceType: filter.ResourceType, - UserType: filter.UserType, - UserID: 2, - }, - } - return ms, len(ms), nil - }, - }, - }, - args: args{ - resourceID: "0000000000000099", - userType: platform.Owner, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/%s/0000000000000099/owners" - }, - "users": [ - { - "links": { - "logs": "/api/v2/users/0000000000000001/logs", - "self": "/api/v2/users/0000000000000001" - }, - "id": "0000000000000001", - "name": "user0000000000000001", - "role": "owner", - "status": "active" - }, - { - "links": { - "logs": "/api/v2/users/0000000000000002/logs", - "self": "/api/v2/users/0000000000000002" - }, - "id": "0000000000000002", - "name": "user0000000000000002", - "role": "owner", - "status": "active" - } - ] -}`, - }, - }, - } - - for _, tt := range tests { - resourceTypes := []platform.ResourceType{ - platform.BucketsResourceType, - platform.DashboardsResourceType, - platform.OrgsResourceType, - platform.SourcesResourceType, - platform.TasksResourceType, - platform.TelegrafsResourceType, - platform.UsersResourceType, - } - - for _, resourceType := range resourceTypes { - t.Run(tt.name+"_"+string(resourceType), func(t *testing.T) { - r := httptest.NewRequest("GET", "http://any.url", nil) - r = r.WithContext(context.WithValue( - context.TODO(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.resourceID, - }, - })) - - w := httptest.NewRecorder() - memberBackend := MemberBackend{ - log: zaptest.NewLogger(t), - ResourceType: resourceType, - UserType: tt.args.userType, - UserResourceMappingService: tt.fields.userResourceMappingService, - UserService: tt.fields.userService, - } - h := newGetMembersHandler(memberBackend) - h.ServeHTTP(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. GetMembersHandler() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. GetMembersHandler() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, _ := jsonEqual(string(body), fmt.Sprintf(tt.wants.body, resourceType)); tt.wants.body != "" && !eq { - t.Errorf("%q. GetMembersHandler() = ***%s***", tt.name, diff) - } - }) - } - } -} - -func TestUserResourceMappingService_PostMembersHandler(t *testing.T) { - type fields struct { - userService platform.UserService - userResourceMappingService platform.UserResourceMappingService - } - type args struct { - resourceID string - userType platform.UserType - user platform.User - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "post members", - fields: fields{ - userService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform2.ID) (*platform.User, error) { - return &platform.User{ID: id, Name: fmt.Sprintf("user%s", id), Status: platform.Active}, nil - }, - }, - userResourceMappingService: &mock.UserResourceMappingService{ - CreateMappingFn: func(ctx context.Context, m *platform.UserResourceMapping) error { - return nil - }, - }, - }, - args: args{ - resourceID: "0000000000000099", - user: platform.User{ - ID: 1, - Name: "user0000000000000001", - Status: platform.Active, - }, - userType: platform.Member, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "logs": "/api/v2/users/0000000000000001/logs", - "self": "/api/v2/users/0000000000000001" - }, - "id": "0000000000000001", - "name": "user0000000000000001", - "role": "member", - "status": "active" -}`, - }, - }, - - { - name: "post owners", - fields: fields{ - userService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform2.ID) (*platform.User, error) { - return &platform.User{ID: id, Name: fmt.Sprintf("user%s", id), Status: platform.Active}, nil - }, - }, - userResourceMappingService: &mock.UserResourceMappingService{ - CreateMappingFn: func(ctx context.Context, m *platform.UserResourceMapping) error { - return nil - }, - }, - }, - args: args{ - resourceID: "0000000000000099", - user: platform.User{ - ID: 2, - Name: "user0000000000000002", - Status: platform.Active, - }, - userType: platform.Owner, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "logs": "/api/v2/users/0000000000000002/logs", - "self": "/api/v2/users/0000000000000002" - }, - "id": "0000000000000002", - "name": "user0000000000000002", - "role": "owner", - "status": "active" -}`, - }, - }, - } - - for _, tt := range tests { - resourceTypes := []platform.ResourceType{ - platform.BucketsResourceType, - platform.DashboardsResourceType, - platform.OrgsResourceType, - platform.SourcesResourceType, - platform.TasksResourceType, - platform.TelegrafsResourceType, - platform.UsersResourceType, - } - - for _, resourceType := range resourceTypes { - t.Run(tt.name+"_"+string(resourceType), func(t *testing.T) { - b, err := json.Marshal(tt.args.user) - if err != nil { - t.Fatalf("failed to unmarshal user: %v", err) - } - - r := httptest.NewRequest("POST", "http://any.url", bytes.NewReader(b)) - r = r.WithContext(context.WithValue( - context.TODO(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.resourceID, - }, - })) - - w := httptest.NewRecorder() - memberBackend := MemberBackend{ - log: zaptest.NewLogger(t), - ResourceType: resourceType, - UserType: tt.args.userType, - UserResourceMappingService: tt.fields.userResourceMappingService, - UserService: tt.fields.userService, - } - h := newPostMemberHandler(memberBackend) - h.ServeHTTP(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. PostMembersHandler() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. PostMembersHandler() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, PostMembersHandler(). error unmarshalling json %v", tt.name, err) - } else if tt.wants.body != "" && !eq { - t.Errorf("%q. PostMembersHandler() = ***%s***", tt.name, diff) - } - }) - } - } -} diff --git a/http/variable_service.go b/http/variable_service.go deleted file mode 100644 index c39cece6b44..00000000000 --- a/http/variable_service.go +++ /dev/null @@ -1,540 +0,0 @@ -package http - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "strings" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/pkg/httpc" - "go.uber.org/zap" -) - -const ( - prefixVariables = "/api/v2/variables" -) - -// VariableBackend is all services and associated parameters required to construct -// the VariableHandler. -type VariableBackend struct { - errors.HTTPErrorHandler - log *zap.Logger - VariableService influxdb.VariableService - LabelService influxdb.LabelService -} - -// NewVariableBackend creates a backend used by the variable handler. -func NewVariableBackend(log *zap.Logger, b *APIBackend) *VariableBackend { - return &VariableBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - VariableService: b.VariableService, - LabelService: b.LabelService, - } -} - -// VariableHandler is the handler for the variable service -type VariableHandler struct { - *httprouter.Router - - errors.HTTPErrorHandler - log *zap.Logger - - VariableService influxdb.VariableService - LabelService influxdb.LabelService -} - -// NewVariableHandler creates a new VariableHandler -func NewVariableHandler(log *zap.Logger, b *VariableBackend) *VariableHandler { - h := &VariableHandler{ - Router: NewRouter(b.HTTPErrorHandler), - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - - VariableService: b.VariableService, - LabelService: b.LabelService, - } - - entityPath := fmt.Sprintf("%s/:id", prefixVariables) - entityLabelsPath := fmt.Sprintf("%s/labels", entityPath) - entityLabelsIDPath := fmt.Sprintf("%s/:lid", entityLabelsPath) - - h.HandlerFunc("GET", prefixVariables, h.handleGetVariables) - h.HandlerFunc("POST", prefixVariables, h.handlePostVariable) - h.HandlerFunc("GET", entityPath, h.handleGetVariable) - h.HandlerFunc("PATCH", entityPath, h.handlePatchVariable) - h.HandlerFunc("PUT", entityPath, h.handlePutVariable) - h.HandlerFunc("DELETE", entityPath, h.handleDeleteVariable) - - labelBackend := &LabelBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: b.log.With(zap.String("handler", "label")), - LabelService: b.LabelService, - ResourceType: influxdb.VariablesResourceType, - } - h.HandlerFunc("GET", entityLabelsPath, newGetLabelsHandler(labelBackend)) - h.HandlerFunc("POST", entityLabelsPath, newPostLabelHandler(labelBackend)) - h.HandlerFunc("DELETE", entityLabelsIDPath, newDeleteLabelHandler(labelBackend)) - - return h -} - -type getVariablesResponse struct { - Variables []variableResponse `json:"variables"` - Links *influxdb.PagingLinks `json:"links"` -} - -func (r getVariablesResponse) Toinfluxdb() []*influxdb.Variable { - variables := make([]*influxdb.Variable, len(r.Variables)) - for i := range r.Variables { - variables[i] = r.Variables[i].Variable - } - return variables -} - -func newGetVariablesResponse(ctx context.Context, variables []*influxdb.Variable, f influxdb.VariableFilter, opts influxdb.FindOptions, labelService influxdb.LabelService) getVariablesResponse { - num := len(variables) - resp := getVariablesResponse{ - Variables: make([]variableResponse, 0, num), - Links: influxdb.NewPagingLinks(prefixVariables, opts, f, num), - } - - for _, variable := range variables { - labels, _ := labelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: variable.ID, ResourceType: influxdb.VariablesResourceType}) - resp.Variables = append(resp.Variables, newVariableResponse(variable, labels)) - } - - return resp -} - -type getVariablesRequest struct { - filter influxdb.VariableFilter - opts influxdb.FindOptions -} - -func decodeGetVariablesRequest(ctx context.Context, r *http.Request) (*getVariablesRequest, error) { - opts, err := influxdb.DecodeFindOptions(r) - if err != nil { - return nil, err - } - - req := &getVariablesRequest{ - opts: *opts, - } - qp := r.URL.Query() - if orgID := qp.Get("orgID"); orgID != "" { - id, err := platform.IDFromString(orgID) - if err != nil { - return nil, err - } - req.filter.OrganizationID = id - } - - if org := qp.Get("org"); org != "" { - req.filter.Organization = &org - } - - return req, nil -} - -func (h *VariableHandler) handleGetVariables(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeGetVariablesRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - variables, err := h.VariableService.FindVariables(ctx, req.filter, req.opts) - if err != nil { - h.HandleHTTPError(ctx, &errors.Error{ - Code: errors.EInternal, - Msg: "could not read variables", - Err: err, - }, w) - return - } - h.log.Debug("Variables retrieved", zap.String("vars", fmt.Sprint(variables))) - err = encodeResponse(ctx, w, http.StatusOK, newGetVariablesResponse(ctx, variables, req.filter, req.opts, h.LabelService)) - if err != nil { - logEncodingError(h.log, r, err) - return - } -} - -func requestVariableID(ctx context.Context) (platform.ID, error) { - params := httprouter.ParamsFromContext(ctx) - urlID := params.ByName("id") - if urlID == "" { - return platform.InvalidID(), &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - id, err := platform.IDFromString(urlID) - if err != nil { - return platform.InvalidID(), err - } - - return *id, nil -} - -func (h *VariableHandler) handleGetVariable(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id, err := requestVariableID(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - variable, err := h.VariableService.FindVariableByID(ctx, id) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - labels, err := h.LabelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: variable.ID, ResourceType: influxdb.VariablesResourceType}) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Variable retrieved", zap.String("var", fmt.Sprint(variable))) - err = encodeResponse(ctx, w, http.StatusOK, newVariableResponse(variable, labels)) - if err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type variableLinks struct { - Self string `json:"self"` - Labels string `json:"labels"` - Org string `json:"org"` -} - -type variableResponse struct { - *influxdb.Variable - Labels []influxdb.Label `json:"labels"` - Links variableLinks `json:"links"` -} - -func newVariableResponse(m *influxdb.Variable, labels []*influxdb.Label) variableResponse { - res := variableResponse{ - Variable: m, - Labels: []influxdb.Label{}, - Links: variableLinks{ - Self: fmt.Sprintf("/api/v2/variables/%s", m.ID), - Labels: fmt.Sprintf("/api/v2/variables/%s/labels", m.ID), - Org: fmt.Sprintf("/api/v2/orgs/%s", m.OrganizationID), - }, - } - - for _, l := range labels { - res.Labels = append(res.Labels, *l) - } - return res -} - -func (h *VariableHandler) handlePostVariable(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePostVariableRequest(r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - err = h.VariableService.CreateVariable(ctx, req.variable) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Variable created", zap.String("var", fmt.Sprint(req.variable))) - if err := encodeResponse(ctx, w, http.StatusCreated, newVariableResponse(req.variable, []*influxdb.Label{})); err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type postVariableRequest struct { - variable *influxdb.Variable -} - -func (r *postVariableRequest) Valid() error { - return r.variable.Valid() -} - -func decodePostVariableRequest(r *http.Request) (*postVariableRequest, error) { - m := &influxdb.Variable{} - - err := json.NewDecoder(r.Body).Decode(m) - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: err.Error(), - } - } - - req := &postVariableRequest{ - variable: m, - } - - if err := req.Valid(); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: err.Error(), - } - } - - return req, nil -} - -func (h *VariableHandler) handlePatchVariable(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePatchVariableRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - variable, err := h.VariableService.UpdateVariable(ctx, req.id, req.variableUpdate) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - labels, err := h.LabelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: variable.ID, ResourceType: influxdb.VariablesResourceType}) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Variable updated", zap.String("var", fmt.Sprint(variable))) - err = encodeResponse(ctx, w, http.StatusOK, newVariableResponse(variable, labels)) - if err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type patchVariableRequest struct { - id platform.ID - variableUpdate *influxdb.VariableUpdate -} - -func (r *patchVariableRequest) Valid() error { - return r.variableUpdate.Valid() -} - -func decodePatchVariableRequest(ctx context.Context, r *http.Request) (*patchVariableRequest, error) { - u := &influxdb.VariableUpdate{} - - err := json.NewDecoder(r.Body).Decode(u) - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: err.Error(), - } - } - - id, err := requestVariableID(ctx) - if err != nil { - return nil, err - } - - req := &patchVariableRequest{ - id: id, - variableUpdate: u, - } - - if err := req.Valid(); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: err.Error(), - } - } - - return req, nil -} - -func (h *VariableHandler) handlePutVariable(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePutVariableRequest(ctx, r) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - v := req.variable - v.ID = req.id - - err = h.VariableService.ReplaceVariable(ctx, v) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - labels, err := h.LabelService.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: v.ID, ResourceType: influxdb.VariablesResourceType}) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Variable replaced", zap.String("var", fmt.Sprint(req.variable))) - err = encodeResponse(ctx, w, http.StatusOK, newVariableResponse(req.variable, labels)) - if err != nil { - logEncodingError(h.log, r, err) - return - } -} - -type putVariableRequest struct { - id platform.ID - variable *influxdb.Variable -} - -func (r *putVariableRequest) Valid() error { - return r.variable.Valid() -} - -func decodePutVariableRequest(ctx context.Context, r *http.Request) (*putVariableRequest, error) { - m := &influxdb.Variable{} - - err := json.NewDecoder(r.Body).Decode(m) - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - id, err := requestVariableID(ctx) - if err != nil { - return nil, err - } - - req := &putVariableRequest{ - variable: m, - id: id, - } - - if err := req.Valid(); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - return req, nil -} - -func (h *VariableHandler) handleDeleteVariable(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id, err := requestVariableID(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - err = h.VariableService.DeleteVariable(ctx, id) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - h.log.Debug("Variable deleted", zap.String("variableID", fmt.Sprint(id))) - w.WriteHeader(http.StatusNoContent) -} - -// VariableService is a variable service over HTTP to the influxdb server -type VariableService struct { - Client *httpc.Client -} - -// FindVariableByID finds a single variable from the store by its ID -func (s *VariableService) FindVariableByID(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { - var mr variableResponse - err := s.Client. - Get(prefixVariables, id.String()). - DecodeJSON(&mr). - Do(ctx) - if err != nil { - return nil, err - } - - return mr.Variable, nil -} - -// FindVariables returns a list of variables that match filter. -// Additional options provide pagination & sorting. -func (s *VariableService) FindVariables(ctx context.Context, filter influxdb.VariableFilter, opts ...influxdb.FindOptions) ([]*influxdb.Variable, error) { - params := influxdb.FindOptionParams(opts...) - if filter.OrganizationID != nil { - params = append(params, [2]string{"orgID", filter.OrganizationID.String()}) - } - if filter.Organization != nil { - params = append(params, [2]string{"org", *filter.Organization}) - } - if filter.ID != nil { - params = append(params, [2]string{"id", filter.ID.String()}) - } - - var ms getVariablesResponse - err := s.Client. - Get(prefixVariables). - QueryParams(params...). - DecodeJSON(&ms). - Do(ctx) - if err != nil { - return nil, err - } - - return ms.Toinfluxdb(), nil -} - -// CreateVariable creates a new variable and assigns it an influxdb.ID -func (s *VariableService) CreateVariable(ctx context.Context, m *influxdb.Variable) error { - m.Name = strings.TrimSpace(m.Name) - if err := m.Valid(); err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - return s.Client. - PostJSON(m, prefixVariables). - DecodeJSON(m). - Do(ctx) -} - -// UpdateVariable updates a single variable with a changeset -func (s *VariableService) UpdateVariable(ctx context.Context, id platform.ID, update *influxdb.VariableUpdate) (*influxdb.Variable, error) { - var m influxdb.Variable - err := s.Client. - PatchJSON(update, prefixVariables, id.String()). - DecodeJSON(&m). - Do(ctx) - if err != nil { - return nil, err - } - - return &m, nil -} - -// ReplaceVariable replaces a single variable -func (s *VariableService) ReplaceVariable(ctx context.Context, variable *influxdb.Variable) error { - return s.Client. - PutJSON(variable, prefixVariables, variable.ID.String()). - DecodeJSON(variable). - Do(ctx) -} - -// DeleteVariable removes a variable from the store -func (s *VariableService) DeleteVariable(ctx context.Context, id platform.ID) error { - return s.Client. - Delete(prefixVariables, id.String()). - Do(ctx) -} diff --git a/http/variable_test.go b/http/variable_test.go deleted file mode 100644 index 78bf6b3dde9..00000000000 --- a/http/variable_test.go +++ /dev/null @@ -1,1030 +0,0 @@ -package http - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "strconv" - "testing" - "time" - - "github.com/influxdata/httprouter" - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/tenant" - itesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -var faketime = time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC) - -// NewMockVariableBackend returns a VariableBackend with mock services. -func NewMockVariableBackend(t *testing.T) *VariableBackend { - return &VariableBackend{ - HTTPErrorHandler: kithttp.NewErrorHandler(zaptest.NewLogger(t)), - log: zaptest.NewLogger(t), - VariableService: mock.NewVariableService(), - LabelService: mock.NewLabelService(), - } -} - -func TestVariableService_handleGetVariables(t *testing.T) { - type fields struct { - VariableService platform.VariableService - LabelService platform.LabelService - } - type args struct { - queryParams map[string][]string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get all variables", - fields: fields{ - &mock.VariableService{ - FindVariablesF: func(ctx context.Context, filter platform.VariableFilter, opts ...platform.FindOptions) ([]*platform.Variable, error) { - return []*platform.Variable{ - { - ID: itesting.MustIDBase16("6162207574726f71"), - OrganizationID: platform2.ID(1), - Name: "variable-a", - Selected: []string{"b"}, - Arguments: &platform.VariableArguments{ - Type: "constant", - Values: platform.VariableConstantValues{"a", "b"}, - }, - CRUDLog: platform.CRUDLog{ - CreatedAt: faketime, - UpdatedAt: faketime, - }, - }, - { - ID: itesting.MustIDBase16("61726920617a696f"), - OrganizationID: platform2.ID(1), - Name: "variable-b", - Selected: []string{"c"}, - Arguments: &platform.VariableArguments{ - Type: "map", - Values: platform.VariableMapValues{"a": "b", "c": "d"}, - }, - CRUDLog: platform.CRUDLog{ - CreatedAt: faketime, - UpdatedAt: faketime, - }, - }, - }, nil - }, - }, - &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f platform.LabelMappingFilter) ([]*platform.Label, error) { - labels := []*platform.Label{ - { - ID: itesting.MustIDBase16("fc3dc670a4be9b9a"), - Name: "label", - Properties: map[string]string{ - "color": "fff000", - }, - }, - } - return labels, nil - }, - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: `{ - "links":{ - "self":"/api/v2/variables?descending=false&limit=` + strconv.Itoa(platform.DefaultPageSize) + `&offset=0" - }, - "variables":[ - { - "arguments":{ - "type":"constant", - "values":[ - "a", - "b" - ] - }, - "createdAt": "2006-05-04T01:02:03Z", - "updatedAt": "2006-05-04T01:02:03Z", - "description":"", - "id":"6162207574726f71", - "labels":[ - { - "id":"fc3dc670a4be9b9a", - "name":"label", - "properties":{ - "color":"fff000" - } - } - ], - "links":{ - "labels":"/api/v2/variables/6162207574726f71/labels", - "org":"/api/v2/orgs/0000000000000001", - "self":"/api/v2/variables/6162207574726f71" - }, - "name":"variable-a", - "orgID":"0000000000000001", - "selected":[ - "b" - ] - }, - { - "arguments":{ - "type":"map", - "values":{ - "a":"b", - "c":"d" - } - }, - "createdAt": "2006-05-04T01:02:03Z", - "updatedAt": "2006-05-04T01:02:03Z", - "description":"", - "id":"61726920617a696f", - "labels":[ - { - "id":"fc3dc670a4be9b9a", - "name":"label", - "properties":{ - "color":"fff000" - } - } - ], - "links":{ - "labels":"/api/v2/variables/61726920617a696f/labels", - "org":"/api/v2/orgs/0000000000000001", - "self":"/api/v2/variables/61726920617a696f" - }, - "name":"variable-b", - "orgID":"0000000000000001", - "selected":[ - "c" - ] - } - ] - }`, - }, - }, - { - name: "get all variables when there are none", - fields: fields{ - &mock.VariableService{ - FindVariablesF: func(ctx context.Context, filter platform.VariableFilter, opts ...platform.FindOptions) ([]*platform.Variable, error) { - return []*platform.Variable{}, nil - }, - }, - &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f platform.LabelMappingFilter) ([]*platform.Label, error) { - return []*platform.Label{}, nil - }, - }, - }, - args: args{ - map[string][]string{ - "limit": {"1"}, - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: `{"links":{"self":"/api/v2/variables?descending=false&limit=1&offset=0"},"variables":[]}`, - }, - }, - { - name: "get all variables belonging to an org", - fields: fields{ - &mock.VariableService{ - FindVariablesF: func(ctx context.Context, filter platform.VariableFilter, opts ...platform.FindOptions) ([]*platform.Variable, error) { - return []*platform.Variable{ - { - ID: itesting.MustIDBase16("6162207574726f71"), - OrganizationID: itesting.MustIDBase16("0000000000000001"), - Name: "variable-a", - Selected: []string{"b"}, - Arguments: &platform.VariableArguments{ - Type: "constant", - Values: platform.VariableConstantValues{"a", "b"}, - }, - CRUDLog: platform.CRUDLog{ - CreatedAt: faketime, - UpdatedAt: faketime, - }, - }, - }, nil - }, - }, - &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f platform.LabelMappingFilter) ([]*platform.Label, error) { - labels := []*platform.Label{ - { - ID: itesting.MustIDBase16("fc3dc670a4be9b9a"), - Name: "label", - Properties: map[string]string{ - "color": "fff000", - }, - }, - } - return labels, nil - }, - }, - }, - args: args{ - map[string][]string{ - "orgID": {"0000000000000001"}, - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: `{ - "links": { - "self": "/api/v2/variables?descending=false&limit=` + strconv.Itoa(platform.DefaultPageSize) + `&offset=0&orgID=0000000000000001" - }, - "variables": [ - { - "arguments": { - "type": "constant", - "values": [ - "a", - "b" - ] - }, - "description": "", - "id": "6162207574726f71", - "labels": [ - { - "id": "fc3dc670a4be9b9a", - "name": "label", - "properties": { - "color": "fff000" - } - } - ], - "links": { - "labels": "/api/v2/variables/6162207574726f71/labels", - "org": "/api/v2/orgs/0000000000000001", - "self": "/api/v2/variables/6162207574726f71" - }, - "name": "variable-a", - "orgID": "0000000000000001", - "selected": [ - "b" - ], - "createdAt": "2006-05-04T01:02:03Z", - "updatedAt": "2006-05-04T01:02:03Z" - } - ] - }`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - variableBackend := NewMockVariableBackend(t) - variableBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - variableBackend.LabelService = tt.fields.LabelService - variableBackend.VariableService = tt.fields.VariableService - h := NewVariableHandler(zaptest.NewLogger(t), variableBackend) - - r := httptest.NewRequest("GET", "http://howdy.tld", nil) - - qp := r.URL.Query() - for k, vs := range tt.args.queryParams { - for _, v := range vs { - qp.Add(k, v) - } - } - r.URL.RawQuery = qp.Encode() - - w := httptest.NewRecorder() - - h.handleGetVariables(w, r) - - res := w.Result() - contentType := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetVariables() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if contentType != tt.wants.contentType { - t.Errorf("%q. handleGetVariables() = %v, want %v", tt.name, contentType, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleGetDashboards(). error unmarshalling json %v", tt.name, err) - } else if tt.wants.body != "" && !eq { - t.Errorf("%q. handleGetDashboards() = ***%s***", tt.name, diff) - } - }) - } -} - -func TestVariableService_handleGetVariable(t *testing.T) { - type fields struct { - VariableService platform.VariableService - } - type args struct { - id string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - args args - fields fields - wants wants - }{ - { - name: "get a single variable by id", - args: args{ - id: "75650d0a636f6d70", - }, - fields: fields{ - &mock.VariableService{ - FindVariableByIDF: func(ctx context.Context, id platform2.ID) (*platform.Variable, error) { - return &platform.Variable{ - ID: itesting.MustIDBase16("75650d0a636f6d70"), - OrganizationID: platform2.ID(1), - Name: "variable-a", - Selected: []string{"b"}, - Arguments: &platform.VariableArguments{ - Type: "constant", - Values: platform.VariableConstantValues{"a", "b"}, - }, - CRUDLog: platform.CRUDLog{ - CreatedAt: faketime, - UpdatedAt: faketime, - }, - }, nil - }, - }, - }, - wants: wants{ - statusCode: 200, - contentType: "application/json; charset=utf-8", - body: `{"id":"75650d0a636f6d70","orgID":"0000000000000001","name":"variable-a","description":"","selected":["b"],"arguments":{"type":"constant","values":["a","b"]},"createdAt":"2006-05-04T01:02:03Z","updatedAt":"2006-05-04T01:02:03Z","labels":[],"links":{"self":"/api/v2/variables/75650d0a636f6d70","labels":"/api/v2/variables/75650d0a636f6d70/labels","org":"/api/v2/orgs/0000000000000001"}}`, - }, - }, - { - name: "get a non-existent variable", - args: args{ - id: "75650d0a636f6d70", - }, - fields: fields{ - &mock.VariableService{ - FindVariableByIDF: func(ctx context.Context, id platform2.ID) (*platform.Variable, error) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: fmt.Sprintf("variable with ID %v not found", id), - } - }, - }, - }, - wants: wants{ - statusCode: 404, - contentType: "application/json; charset=utf-8", - body: `{"code":"not found","message":"variable with ID 75650d0a636f6d70 not found"}`, - }, - }, - { - name: "request an invalid variable ID", - args: args{ - id: "baz", - }, - fields: fields{ - &mock.VariableService{ - FindVariableByIDF: func(ctx context.Context, id platform2.ID) (*platform.Variable, error) { - return nil, nil - }, - }, - }, - wants: wants{ - statusCode: 400, - contentType: "application/json; charset=utf-8", - body: `{"code":"invalid","message":"id must have a length of 16 bytes"}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - variableBackend := NewMockVariableBackend(t) - variableBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - variableBackend.VariableService = tt.fields.VariableService - h := NewVariableHandler(zaptest.NewLogger(t), variableBackend) - r := httptest.NewRequest("GET", "http://howdy.tld", nil) - r = r.WithContext(context.WithValue( - context.TODO(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - })) - w := httptest.NewRecorder() - - h.handleGetVariable(w, r) - - res := w.Result() - contentType := res.Header.Get("Content-Type") - bodyBytes, _ := io.ReadAll(res.Body) - body := string(bodyBytes[:]) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("got = %v, want %v", res.StatusCode, tt.wants.statusCode) - } - if contentType != tt.wants.contentType { - t.Errorf("got = %v, want %v", contentType, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, error unmarshalling json %v", tt.name, err) - } else if tt.wants.body != "" && !eq { - t.Errorf("%q. ***%s***", tt.name, diff) - } - }) - } -} - -func TestVariableService_handlePostVariable(t *testing.T) { - type fields struct { - VariableService platform.VariableService - } - type args struct { - variable string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "create a new variable", - fields: fields{ - &mock.VariableService{ - CreateVariableF: func(ctx context.Context, m *platform.Variable) error { - m.ID = itesting.MustIDBase16("75650d0a636f6d70") - m.OrganizationID = platform2.ID(1) - m.UpdatedAt = faketime - m.CreatedAt = faketime - return nil - }, - }, - }, - args: args{ - variable: ` -{ - "name": "my-great-variable", - "orgID": "0000000000000001", - "arguments": { - "type": "constant", - "values": [ - "bar", - "foo" - ] - }, - "selected": [ - "'foo'" - ], - "createdAt": "2006-05-04T01:02:03Z", - "updatedAt": "2006-05-04T01:02:03Z" -} -`, - }, - wants: wants{ - statusCode: 201, - contentType: "application/json; charset=utf-8", - body: `{"id":"75650d0a636f6d70","orgID":"0000000000000001","name":"my-great-variable","description":"","selected":["'foo'"],"arguments":{"type":"constant","values":["bar","foo"]},"createdAt":"2006-05-04T01:02:03Z","updatedAt":"2006-05-04T01:02:03Z","labels":[],"links":{"self":"/api/v2/variables/75650d0a636f6d70","labels":"/api/v2/variables/75650d0a636f6d70/labels","org":"/api/v2/orgs/0000000000000001"}} -`, - }, - }, - { - name: "create a variable with invalid fields", - fields: fields{ - &mock.VariableService{ - CreateVariableF: func(ctx context.Context, m *platform.Variable) error { - m.ID = itesting.MustIDBase16("0") - return nil - }, - }, - }, - args: args{ - variable: `{"data": "nonsense"}`, - }, - wants: wants{ - statusCode: 400, - contentType: "application/json; charset=utf-8", - body: `{"code":"invalid","message":"missing variable name"}`, - }, - }, - { - name: "create a variable with invalid json", - fields: fields{ - &mock.VariableService{ - CreateVariableF: func(ctx context.Context, m *platform.Variable) error { - m.ID = itesting.MustIDBase16("0") - return nil - }, - }, - }, - args: args{ - variable: `howdy`, - }, - wants: wants{ - statusCode: 400, - contentType: "application/json; charset=utf-8", - body: `{"code":"invalid","message":"invalid character 'h' looking for beginning of value"}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - variableBackend := NewMockVariableBackend(t) - variableBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - variableBackend.VariableService = tt.fields.VariableService - h := NewVariableHandler(zaptest.NewLogger(t), variableBackend) - r := httptest.NewRequest("GET", "http://howdy.tld", bytes.NewReader([]byte(tt.args.variable))) - w := httptest.NewRecorder() - - h.handlePostVariable(w, r) - - res := w.Result() - contentType := res.Header.Get("Content-Type") - bodyBytes, _ := io.ReadAll(res.Body) - body := string(bodyBytes[:]) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("got = %v, want %v", res.StatusCode, tt.wants.statusCode) - } - if contentType != tt.wants.contentType { - t.Errorf("got = %v, want %v", contentType, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, error unmarshalling json %v", tt.name, err) - } else if tt.wants.body != "" && !eq { - t.Errorf("%q. ***%s***", tt.name, diff) - } - }) - } -} - -func TestVariableService_handlePutVariable(t *testing.T) { - type fields struct { - VariableService platform.VariableService - } - type args struct { - id string - variable string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "PUT a variable", - fields: fields{ - &mock.VariableService{ - ReplaceVariableF: func(ctx context.Context, m *platform.Variable) error { - m.ID = itesting.MustIDBase16("75650d0a636f6d70") - m.OrganizationID = platform2.ID(1) - m.UpdatedAt = faketime - m.CreatedAt = faketime - return nil - }, - }, - }, - args: args{ - id: "75650d0a636f6d70", - variable: ` -{ - "name": "my-great-variable", - "orgID": "0000000000000001", - "arguments": { - "type": "constant", - "values": [ - "bar", - "foo" - ] - }, - "selected": [ - "'foo'" - ], - "createdAt": "2006-05-04T01:02:03Z", - "updatedAt": "2006-05-04T01:02:03Z" -} -`, - }, - wants: wants{ - statusCode: 200, - contentType: "application/json; charset=utf-8", - body: `{"id":"75650d0a636f6d70","orgID":"0000000000000001","name":"my-great-variable","description":"","selected":["'foo'"],"arguments":{"type":"constant","values":["bar","foo"]},"createdAt":"2006-05-04T01:02:03Z","updatedAt":"2006-05-04T01:02:03Z","labels":[],"links":{"self":"/api/v2/variables/75650d0a636f6d70","labels":"/api/v2/variables/75650d0a636f6d70/labels","org":"/api/v2/orgs/0000000000000001"}} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - variableBackend := NewMockVariableBackend(t) - variableBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - variableBackend.VariableService = tt.fields.VariableService - h := NewVariableHandler(zaptest.NewLogger(t), variableBackend) - r := httptest.NewRequest("GET", "http://howdy.tld", bytes.NewReader([]byte(tt.args.variable))) - r = r.WithContext(context.WithValue( - context.TODO(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - })) - w := httptest.NewRecorder() - - h.handlePutVariable(w, r) - - res := w.Result() - contentType := res.Header.Get("Content-Type") - bodyBytes, _ := io.ReadAll(res.Body) - body := string(bodyBytes[:]) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("got = %v, want %v", res.StatusCode, tt.wants.statusCode) - } - if contentType != tt.wants.contentType { - t.Errorf("got = %v, want %v", contentType, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, error unmarshalling json %v", tt.name, err) - } else if tt.wants.body != "" && !eq { - t.Errorf("%q. ***%s***", tt.name, diff) - } - }) - } -} - -func TestVariableService_handlePatchVariable(t *testing.T) { - type fields struct { - VariableService platform.VariableService - } - type args struct { - id string - update string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "update a variable name", - fields: fields{ - &mock.VariableService{ - UpdateVariableF: func(ctx context.Context, id platform2.ID, u *platform.VariableUpdate) (*platform.Variable, error) { - return &platform.Variable{ - ID: itesting.MustIDBase16("75650d0a636f6d70"), - OrganizationID: platform2.ID(2), - Name: "new-name", - Arguments: &platform.VariableArguments{ - Type: "constant", - Values: platform.VariableConstantValues{}, - }, - Selected: []string{}, - CRUDLog: platform.CRUDLog{ - CreatedAt: faketime, - UpdatedAt: faketime, - }, - }, nil - }, - }, - }, - args: args{ - id: "75650d0a636f6d70", - update: `{"name": "new-name"}`, - }, - wants: wants{ - statusCode: 200, - contentType: "application/json; charset=utf-8", - body: `{"id":"75650d0a636f6d70","orgID":"0000000000000002","name":"new-name","description":"","selected":[],"arguments":{"type":"constant","values":[]},"createdAt":"2006-05-04T01:02:03Z","updatedAt": "2006-05-04T01:02:03Z","labels":[],"links":{"self":"/api/v2/variables/75650d0a636f6d70","labels":"/api/v2/variables/75650d0a636f6d70/labels","org":"/api/v2/orgs/0000000000000002"}}`, - }, - }, - { - name: "with an empty json body", - fields: fields{ - &mock.VariableService{}, - }, - args: args{ - id: "75650d0a636f6d70", - update: `{}`, - }, - wants: wants{ - statusCode: 400, - contentType: "application/json; charset=utf-8", - body: `{"code":"invalid","message":"no fields supplied in update"}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - variableBackend := NewMockVariableBackend(t) - variableBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - variableBackend.VariableService = tt.fields.VariableService - h := NewVariableHandler(zaptest.NewLogger(t), variableBackend) - r := httptest.NewRequest("GET", "http://howdy.tld", bytes.NewReader([]byte(tt.args.update))) - r = r.WithContext(context.WithValue( - context.TODO(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - })) - w := httptest.NewRecorder() - - h.handlePatchVariable(w, r) - - res := w.Result() - contentType := res.Header.Get("Content-Type") - bodyBytes, _ := io.ReadAll(res.Body) - body := string(bodyBytes[:]) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("got = %v, want %v", res.StatusCode, tt.wants.statusCode) - } - if contentType != tt.wants.contentType { - t.Errorf("got = %v, want %v", contentType, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, error unmarshalling json %v", tt.name, err) - } else if tt.wants.body != "" && !eq { - t.Errorf("%q. ***%s***", tt.name, diff) - } - }) - } -} - -func TestVariableService_handleDeleteVariable(t *testing.T) { - type fields struct { - VariableService platform.VariableService - } - type args struct { - id string - } - type wants struct { - statusCode int - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "delete a variable", - fields: fields{ - &mock.VariableService{ - DeleteVariableF: func(ctx context.Context, id platform2.ID) error { - return nil - }, - }, - }, - args: args{ - id: "75650d0a636f6d70", - }, - wants: wants{ - statusCode: 204, - }, - }, - { - name: "delete a non-existent variable", - fields: fields{ - &mock.VariableService{ - DeleteVariableF: func(ctx context.Context, id platform2.ID) error { - return &errors.Error{ - Code: errors.ENotFound, - Msg: fmt.Sprintf("variable with ID %v not found", id), - } - }, - }, - }, - args: args{ - id: "75650d0a636f6d70", - }, - wants: wants{ - statusCode: 404, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - variableBackend := NewMockVariableBackend(t) - variableBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - variableBackend.VariableService = tt.fields.VariableService - h := NewVariableHandler(zaptest.NewLogger(t), variableBackend) - r := httptest.NewRequest("GET", "http://howdy.tld", nil) - r = r.WithContext(context.WithValue( - context.TODO(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "id", - Value: tt.args.id, - }, - })) - w := httptest.NewRecorder() - - h.handleDeleteVariable(w, r) - - statusCode := w.Result().StatusCode - - if statusCode != tt.wants.statusCode { - t.Errorf("got = %v, want %v", statusCode, tt.wants.statusCode) - } - }) - } -} - -func TestService_handlePostVariableLabel(t *testing.T) { - type fields struct { - LabelService platform.LabelService - } - type args struct { - labelMapping *platform.LabelMapping - variableID platform2.ID - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "add label to variable", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id platform2.ID) (*platform.Label, error) { - return &platform.Label{ - ID: 1, - Name: "label", - Properties: map[string]string{ - "color": "fff000", - }, - }, nil - }, - CreateLabelMappingFn: func(ctx context.Context, m *platform.LabelMapping) error { return nil }, - }, - }, - args: args{ - labelMapping: &platform.LabelMapping{ - ResourceID: 100, - LabelID: 1, - }, - variableID: 100, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` -{ - "label": { - "id": "0000000000000001", - "name": "label", - "properties": { - "color": "fff000" - } - }, - "links": { - "self": "/api/v2/labels/0000000000000001" - } -} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - variableBackend := NewMockVariableBackend(t) - variableBackend.HTTPErrorHandler = kithttp.NewErrorHandler(zaptest.NewLogger(t)) - variableBackend.LabelService = tt.fields.LabelService - h := NewVariableHandler(zaptest.NewLogger(t), variableBackend) - - b, err := json.Marshal(tt.args.labelMapping) - if err != nil { - t.Fatalf("failed to unmarshal label mapping: %v", err) - } - - url := fmt.Sprintf("http://localhost:8086/api/v2/variables/%s/labels", tt.args.variableID) - r := httptest.NewRequest("POST", url, bytes.NewReader(b)) - w := httptest.NewRecorder() - - h.ServeHTTP(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("got %v, want %v", res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("got %v, want %v", content, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, error unmarshalling json %v", tt.name, err) - } else if tt.wants.body != "" && !eq { - t.Errorf("%q. ***%s***", tt.name, diff) - } - }) - } -} - -func initVariableService(f itesting.VariableFields, t *testing.T) (platform.VariableService, string, func()) { - store := itesting.NewTestInmemStore(t) - tenantService := tenant.NewService(tenant.NewStore(store)) - - svc := kv.NewService(zaptest.NewLogger(t), store, tenantService) - svc.IDGenerator = f.IDGenerator - svc.TimeGenerator = f.TimeGenerator - - ctx := context.Background() - - for _, v := range f.Variables { - if err := svc.ReplaceVariable(ctx, v); err != nil { - t.Fatalf("failed to replace variable: %v", err) - } - } - - fakeBackend := NewMockVariableBackend(t) - fakeBackend.VariableService = svc - - handler := NewVariableHandler(zaptest.NewLogger(t), fakeBackend) - server := httptest.NewServer(handler) - client := VariableService{ - Client: mustNewHTTPClient(t, server.URL, ""), - } - done := server.Close - - return &client, "", done -} - -func TestVariableService(t *testing.T) { - itesting.VariableService(initVariableService, t, itesting.WithHTTPValidation()) -} diff --git a/http/write_handler.go b/http/write_handler.go deleted file mode 100644 index 0b5e84a4062..00000000000 --- a/http/write_handler.go +++ /dev/null @@ -1,372 +0,0 @@ -package http - -import ( - "compress/gzip" - "context" - "fmt" - "io" - "net/http" - - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - pcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/http/metric" - "github.com/influxdata/influxdb/v2/http/points" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/storage" - "github.com/influxdata/influxdb/v2/tsdb" - "go.uber.org/zap" -) - -// WriteBackend is all services and associated parameters required to construct -// the WriteHandler. -type WriteBackend struct { - errors.HTTPErrorHandler - log *zap.Logger - WriteEventRecorder metric.EventRecorder - - PointsWriter storage.PointsWriter - BucketService influxdb.BucketService - OrganizationService influxdb.OrganizationService -} - -// NewWriteBackend returns a new instance of WriteBackend. -func NewWriteBackend(log *zap.Logger, b *APIBackend) *WriteBackend { - return &WriteBackend{ - HTTPErrorHandler: b.HTTPErrorHandler, - log: log, - WriteEventRecorder: b.WriteEventRecorder, - - PointsWriter: b.PointsWriter, - BucketService: b.BucketService, - OrganizationService: b.OrganizationService, - } -} - -// WriteHandler receives line protocol and sends to a publish function. -type WriteHandler struct { - errors.HTTPErrorHandler - BucketService influxdb.BucketService - OrganizationService influxdb.OrganizationService - PointsWriter storage.PointsWriter - EventRecorder metric.EventRecorder - - router *httprouter.Router - log *zap.Logger - maxBatchSizeBytes int64 - // parserOptions []models.ParserOption -} - -// WriteHandlerOption is a functional option for a *WriteHandler -type WriteHandlerOption func(*WriteHandler) - -// WithMaxBatchSizeBytes configures the maximum size for a -// (decompressed) points batch allowed by the write handler -func WithMaxBatchSizeBytes(n int64) WriteHandlerOption { - return func(w *WriteHandler) { - w.maxBatchSizeBytes = n - } -} - -//func WithParserOptions(opts ...models.ParserOption) WriteHandlerOption { -// return func(w *WriteHandler) { -// w.parserOptions = opts -// } -//} - -// Prefix provides the route prefix. -func (*WriteHandler) Prefix() string { - return prefixWrite -} - -const ( - prefixWrite = "/api/v2/write" - msgInvalidGzipHeader = "gzipped HTTP body contains an invalid header" - msgInvalidPrecision = "invalid precision; valid precision units are ns, us, ms, and s" - - opWriteHandler = "http/writeHandler" -) - -// NewWriteHandler creates a new handler at /api/v2/write to receive line protocol. -func NewWriteHandler(log *zap.Logger, b *WriteBackend, opts ...WriteHandlerOption) *WriteHandler { - h := &WriteHandler{ - HTTPErrorHandler: b.HTTPErrorHandler, - PointsWriter: b.PointsWriter, - BucketService: b.BucketService, - OrganizationService: b.OrganizationService, - EventRecorder: b.WriteEventRecorder, - - router: NewRouter(b.HTTPErrorHandler), - log: log, - } - - for _, opt := range opts { - opt(h) - } - - h.router.HandlerFunc(http.MethodPost, prefixWrite, h.handleWrite) - return h -} - -func (h *WriteHandler) findBucket(ctx context.Context, orgID platform.ID, bucket string) (*influxdb.Bucket, error) { - if id, err := platform.IDFromString(bucket); err == nil { - b, err := h.BucketService.FindBucket(ctx, influxdb.BucketFilter{ - OrganizationID: &orgID, - ID: id, - }) - if err != nil && errors.ErrorCode(err) != errors.ENotFound { - return nil, err - } else if err == nil { - return b, err - } - } - - return h.BucketService.FindBucket(ctx, influxdb.BucketFilter{ - OrganizationID: &orgID, - Name: &bucket, - }) -} - -func (h *WriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - h.router.ServeHTTP(w, r) -} - -func (h *WriteHandler) handleWrite(w http.ResponseWriter, r *http.Request) { - span, r := tracing.ExtractFromHTTPRequest(r, "WriteHandler") - defer span.Finish() - - ctx := r.Context() - auth, err := pcontext.GetAuthorizer(ctx) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - req, err := decodeWriteRequest(ctx, r, h.maxBatchSizeBytes) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - - org, err := queryOrganization(ctx, r, h.OrganizationService) - if err != nil { - h.HandleHTTPError(ctx, err, w) - return - } - span.LogKV("org_id", org.ID) - - sw := kithttp.NewStatusResponseWriter(w) - recorder := NewWriteUsageRecorder(sw, h.EventRecorder) - var requestBytes int - defer func() { - // Close around the requestBytes variable to placate the linter. - recorder.Record(ctx, requestBytes, org.ID, r.URL.Path) - }() - - bucket, err := h.findBucket(ctx, org.ID, req.Bucket) - if err != nil { - h.HandleHTTPError(ctx, err, sw) - return - } - span.LogKV("bucket_id", bucket.ID) - - if err := checkBucketWritePermissions(auth, org.ID, bucket.ID); err != nil { - h.HandleHTTPError(ctx, err, sw) - return - } - - // TODO: Backport? - //opts := append([]models.ParserOption{}, h.parserOptions...) - //opts = append(opts, models.WithParserPrecision(req.Precision)) - parsed, err := points.NewParser(req.Precision).Parse(ctx, org.ID, bucket.ID, req.Body) - if err != nil { - h.HandleHTTPError(ctx, err, sw) - return - } - requestBytes = parsed.RawSize - - if err := h.PointsWriter.WritePoints(ctx, org.ID, bucket.ID, parsed.Points); err != nil { - if partialErr, ok := err.(tsdb.PartialWriteError); ok { - h.HandleHTTPError(ctx, &errors.Error{ - Code: errors.EUnprocessableEntity, - Op: opWriteHandler, - Msg: "failure writing points to database", - Err: partialErr, - }, sw) - return - } - - h.HandleHTTPError(ctx, &errors.Error{ - Code: errors.EInternal, - Op: opWriteHandler, - Msg: "unexpected error writing points to database", - Err: err, - }, sw) - return - } - - sw.WriteHeader(http.StatusNoContent) -} - -// checkBucketWritePermissions checks an Authorizer for write permissions to a -// specific Bucket. -func checkBucketWritePermissions(auth influxdb.Authorizer, orgID, bucketID platform.ID) error { - p, err := influxdb.NewPermissionAtID(bucketID, influxdb.WriteAction, influxdb.BucketsResourceType, orgID) - if err != nil { - return &errors.Error{ - Code: errors.EInternal, - Op: opWriteHandler, - Msg: fmt.Sprintf("unable to create permission for bucket: %v", err), - Err: err, - } - } - if pset, err := auth.PermissionSet(); err != nil || !pset.Allowed(*p) { - return &errors.Error{ - Code: errors.EForbidden, - Op: opWriteHandler, - Msg: "insufficient permissions for write", - Err: err, - } - } - return nil -} - -// writeRequest is a request object holding information about a batch of points -// to be written to a Bucket. -type writeRequest struct { - Org string - Bucket string - Precision string - Body io.ReadCloser -} - -// decodeWriteRequest extracts information from an http.Request object to -// produce a writeRequest. -func decodeWriteRequest(ctx context.Context, r *http.Request, maxBatchSizeBytes int64) (*writeRequest, error) { - qp := r.URL.Query() - precision := qp.Get("precision") - if precision == "" { - precision = "ns" - } - - if !models.ValidPrecision(precision) { - return nil, &errors.Error{ - Code: errors.EInvalid, - Op: "http/newWriteRequest", - Msg: msgInvalidPrecision, - } - } - - bucket := qp.Get("bucket") - if bucket == "" { - return nil, &errors.Error{ - Code: errors.ENotFound, - Op: "http/newWriteRequest", - Msg: "bucket not found", - } - } - - encoding := r.Header.Get("Content-Encoding") - body, err := points.BatchReadCloser(r.Body, encoding, maxBatchSizeBytes) - if err != nil { - return nil, err - } - - return &writeRequest{ - Bucket: qp.Get("bucket"), - Org: qp.Get("org"), - Precision: precision, - Body: body, - }, nil -} - -// WriteService sends data over HTTP to influxdb via line protocol. -type WriteService struct { - Addr string - Token string - Precision string - InsecureSkipVerify bool -} - -var _ influxdb.WriteService = (*WriteService)(nil) - -func compressWithGzip(data io.Reader) (io.Reader, error) { - pr, pw := io.Pipe() - gw := gzip.NewWriter(pw) - var err error - - go func() { - _, err = io.Copy(gw, data) - gw.Close() - pw.Close() - }() - - return pr, err -} - -// WriteTo writes to the bucket matching the filter. -func (s *WriteService) WriteTo(ctx context.Context, filter influxdb.BucketFilter, r io.Reader) error { - precision := s.Precision - if precision == "" { - precision = "ns" - } - - if !models.ValidPrecision(precision) { - return &errors.Error{ - Code: errors.EInvalid, - Op: "http/Write", - Msg: msgInvalidPrecision, - } - } - - u, err := NewURL(s.Addr, prefixWrite) - if err != nil { - return err - } - - r, err = compressWithGzip(r) - if err != nil { - return err - } - - req, err := http.NewRequestWithContext(ctx, http.MethodPost, u.String(), r) - if err != nil { - return err - } - - req.Header.Set("Content-Type", "text/plain; charset=utf-8") - req.Header.Set("Content-Encoding", "gzip") - SetToken(s.Token, req) - - params := req.URL.Query() - - // In other CLI commands that take either an ID or a name as input, the ID - // is prioritized and used to short-circuit looking up the name. We simulate - // the same behavior here for a consistent experience. - if filter.OrganizationID != nil && filter.OrganizationID.Valid() { - params.Set("org", filter.OrganizationID.String()) - } else if filter.Org != nil && *filter.Org != "" { - params.Set("org", *filter.Org) - } - if filter.ID != nil && filter.ID.Valid() { - params.Set("bucket", filter.ID.String()) - } else if filter.Name != nil && *filter.Name != "" { - params.Set("bucket", *filter.Name) - } - params.Set("precision", precision) - req.URL.RawQuery = params.Encode() - - hc := NewClient(u.Scheme, s.InsecureSkipVerify) - - resp, err := hc.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - return CheckError(resp) -} diff --git a/http/write_handler_test.go b/http/write_handler_test.go deleted file mode 100644 index 41445a4e5bd..00000000000 --- a/http/write_handler_test.go +++ /dev/null @@ -1,457 +0,0 @@ -package http - -import ( - "compress/gzip" - "context" - "fmt" - "io" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/http/metric" - httpmock "github.com/influxdata/influxdb/v2/http/mock" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/mock" - influxtesting "github.com/influxdata/influxdb/v2/testing" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func TestWriteService_WriteTo(t *testing.T) { - type args struct { - org string - orgId platform.ID - bucket string - bucketId platform.ID - r io.Reader - } - - orgId := platform.ID(1) - org := "org" - bucketId := platform.ID(2) - bucket := "bucket" - - tests := []struct { - name string - args args - status int - want string - wantFilters influxdb.BucketFilter - wantErr bool - }{ - { - name: "write with org and bucket IDs", - args: args{ - orgId: orgId, - bucketId: bucketId, - r: strings.NewReader("m,t1=v1 f1=2"), - }, - status: http.StatusNoContent, - want: "m,t1=v1 f1=2", - wantFilters: influxdb.BucketFilter{ - ID: &bucketId, - OrganizationID: &orgId, - }, - }, - { - name: "write with org ID and bucket name", - args: args{ - orgId: orgId, - bucket: bucket, - r: strings.NewReader("m,t1=v1 f1=2"), - }, - status: http.StatusNoContent, - want: "m,t1=v1 f1=2", - wantFilters: influxdb.BucketFilter{ - Name: &bucket, - OrganizationID: &orgId, - }, - }, - { - name: "write with org name and bucket ID", - args: args{ - org: org, - bucketId: bucketId, - r: strings.NewReader("m,t1=v1 f1=2"), - }, - status: http.StatusNoContent, - want: "m,t1=v1 f1=2", - wantFilters: influxdb.BucketFilter{ - ID: &bucketId, - Org: &org, - }, - }, - { - name: "write with org and bucket names", - args: args{ - org: org, - bucket: bucket, - r: strings.NewReader("m,t1=v1 f1=2"), - }, - status: http.StatusNoContent, - want: "m,t1=v1 f1=2", - wantFilters: influxdb.BucketFilter{ - Name: &bucket, - Org: &org, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var org, bucket *string - var orgId, bucketId *platform.ID - var lp []byte - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - orgStr := r.URL.Query().Get("org") - bucketStr := r.URL.Query().Get("bucket") - var err error - if orgId, err = platform.IDFromString(orgStr); err != nil { - org = &orgStr - } - if bucketId, err = platform.IDFromString(bucketStr); err != nil { - bucket = &bucketStr - } - defer r.Body.Close() - in, _ := gzip.NewReader(r.Body) - defer in.Close() - lp, _ = io.ReadAll(in) - w.WriteHeader(tt.status) - })) - s := &WriteService{ - Addr: ts.URL, - } - err := s.WriteTo( - context.Background(), - influxdb.BucketFilter{ID: &tt.args.bucketId, Name: &tt.args.bucket, OrganizationID: &tt.args.orgId, Org: &tt.args.org}, - tt.args.r, - ) - require.Equalf(t, err != nil, tt.wantErr, "error didn't match expectations: %v", err) - require.Equal(t, tt.wantFilters.OrganizationID, orgId) - require.Equal(t, tt.wantFilters.Org, org) - require.Equal(t, tt.wantFilters.ID, bucketId) - require.Equal(t, tt.wantFilters.Name, bucket) - require.Equal(t, tt.want, string(lp)) - }) - } -} - -func TestWriteHandler_handleWrite(t *testing.T) { - // state is the internal state of org and bucket services - type state struct { - org *influxdb.Organization // org to return in org service - orgErr error // err to return in org service - bucket *influxdb.Bucket // bucket to return in bucket service - bucketErr error // err to return in bucket service - writeErr error // err to return from the points writer - opts []WriteHandlerOption // write handle configured options - } - - // want is the expected output of the HTTP endpoint - type wants struct { - body string - code int - } - - // request is sent to the HTTP endpoint - type request struct { - auth influxdb.Authorizer - org string - bucket string - body string - } - - tests := []struct { - name string - request request - state state - wants wants - }{ - { - name: "simple body is accepted", - request: request{ - org: "043e0780ee2b1000", - bucket: "04504b356e23b000", - body: "m1,t1=v1 f1=1", - auth: bucketWritePermission("043e0780ee2b1000", "04504b356e23b000"), - }, - state: state{ - org: testOrg("043e0780ee2b1000"), - bucket: testBucket("043e0780ee2b1000", "04504b356e23b000"), - }, - wants: wants{ - code: 204, - }, - }, - { - name: "partial write error is unprocessable", - request: request{ - org: "043e0780ee2b1000", - bucket: "04504b356e23b000", - body: "m1,t1=v1 f1=1", - auth: bucketWritePermission("043e0780ee2b1000", "04504b356e23b000"), - }, - state: state{ - org: testOrg("043e0780ee2b1000"), - bucket: testBucket("043e0780ee2b1000", "04504b356e23b000"), - writeErr: tsdb.PartialWriteError{Reason: "bad points", Dropped: 1}, - }, - wants: wants{ - code: 422, - body: `{"code":"unprocessable entity","message":"failure writing points to database: partial write: bad points dropped=1"}`, - }, - }, - { - name: "points writer error is an internal error", - request: request{ - org: "043e0780ee2b1000", - bucket: "04504b356e23b000", - body: "m1,t1=v1 f1=1", - auth: bucketWritePermission("043e0780ee2b1000", "04504b356e23b000"), - }, - state: state{ - org: testOrg("043e0780ee2b1000"), - bucket: testBucket("043e0780ee2b1000", "04504b356e23b000"), - writeErr: fmt.Errorf("error"), - }, - wants: wants{ - code: 500, - body: `{"code":"internal error","message":"unexpected error writing points to database: error"}`, - }, - }, - { - name: "empty request body is ok", - request: request{ - org: "043e0780ee2b1000", - bucket: "04504b356e23b000", - auth: bucketWritePermission("043e0780ee2b1000", "04504b356e23b000"), - }, - state: state{ - org: testOrg("043e0780ee2b1000"), - bucket: testBucket("043e0780ee2b1000", "04504b356e23b000"), - }, - wants: wants{ - code: 204, - }, - }, - { - name: "org error returns 404 error", - request: request{ - org: "043e0780ee2b1000", - bucket: "04504b356e23b000", - body: "m1,t1=v1 f1=1", - auth: bucketWritePermission("043e0780ee2b1000", "04504b356e23b000"), - }, - state: state{ - orgErr: &errors.Error{Code: errors.ENotFound, Msg: "not found"}, - }, - wants: wants{ - code: 404, - body: `{"code":"not found","message":"not found"}`, - }, - }, - { - name: "missing bucket returns 404", - request: request{ - org: "043e0780ee2b1000", - bucket: "", - body: "m1,t1=v1 f1=1", - auth: bucketWritePermission("043e0780ee2b1000", "04504b356e23b000"), - }, - state: state{ - org: testOrg("043e0780ee2b1000"), - bucket: testBucket("043e0780ee2b1000", "04504b356e23b000"), - }, - wants: wants{ - code: 404, - body: `{"code":"not found","message":"bucket not found"}`, - }, - }, - { - name: "bucket error returns 404 error", - request: request{ - org: "043e0780ee2b1000", - bucket: "04504b356e23b000", - body: "m1,t1=v1 f1=1", - auth: bucketWritePermission("043e0780ee2b1000", "04504b356e23b000"), - }, - state: state{ - org: testOrg("043e0780ee2b1000"), - bucketErr: &errors.Error{Code: errors.ENotFound, Msg: "not found"}, - }, - wants: wants{ - code: 404, - body: `{"code":"not found","message":"not found"}`, - }, - }, - { - name: "500 when bucket service returns internal error", - request: request{ - org: "043e0780ee2b1000", - bucket: "04504b356e23b000", - auth: bucketWritePermission("043e0780ee2b1000", "04504b356e23b000"), - }, - state: state{ - org: testOrg("043e0780ee2b1000"), - bucketErr: &errors.Error{Code: errors.EInternal, Msg: "internal error"}, - }, - wants: wants{ - code: 500, - body: `{"code":"internal error","message":"internal error"}`, - }, - }, - { - name: "invalid line protocol returns 400", - request: request{ - org: "043e0780ee2b1000", - bucket: "04504b356e23b000", - auth: bucketWritePermission("043e0780ee2b1000", "04504b356e23b000"), - body: "invalid", - }, - state: state{ - org: testOrg("043e0780ee2b1000"), - bucket: testBucket("043e0780ee2b1000", "04504b356e23b000"), - }, - wants: wants{ - code: 400, - body: `{"code":"invalid","message":"unable to parse 'invalid': missing fields"}`, - }, - }, - { - name: "forbidden to write with insufficient permission", - request: request{ - org: "043e0780ee2b1000", - bucket: "04504b356e23b000", - body: "m1,t1=v1 f1=1", - auth: bucketWritePermission("043e0780ee2b1000", "000000000000000a"), - }, - state: state{ - org: testOrg("043e0780ee2b1000"), - bucket: testBucket("043e0780ee2b1000", "04504b356e23b000"), - }, - wants: wants{ - code: 403, - body: `{"code":"forbidden","message":"insufficient permissions for write"}`, - }, - }, - { - // authorization extraction happens in a different middleware. - name: "no authorizer is an internal error", - request: request{ - org: "043e0780ee2b1000", - bucket: "04504b356e23b000", - }, - state: state{ - org: testOrg("043e0780ee2b1000"), - bucket: testBucket("043e0780ee2b1000", "04504b356e23b000"), - }, - wants: wants{ - code: 500, - body: `{"code":"internal error","message":"authorizer not found on context"}`, - }, - }, - { - name: "large requests rejected", - request: request{ - org: "043e0780ee2b1000", - bucket: "04504b356e23b000", - body: "m1,t1=v1 f1=1", - auth: bucketWritePermission("043e0780ee2b1000", "04504b356e23b000"), - }, - state: state{ - org: testOrg("043e0780ee2b1000"), - bucket: testBucket("043e0780ee2b1000", "04504b356e23b000"), - opts: []WriteHandlerOption{WithMaxBatchSizeBytes(5)}, - }, - wants: wants{ - code: 413, - body: `{"code":"request too large","message":"unable to read data: points batch is too large"}`, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - orgs := mock.NewOrganizationService() - orgs.FindOrganizationF = func(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return tt.state.org, tt.state.orgErr - } - buckets := mock.NewBucketService() - buckets.FindBucketFn = func(context.Context, influxdb.BucketFilter) (*influxdb.Bucket, error) { - return tt.state.bucket, tt.state.bucketErr - } - - b := &APIBackend{ - HTTPErrorHandler: kithttp.NewErrorHandler(zaptest.NewLogger(t)), - Logger: zaptest.NewLogger(t), - OrganizationService: orgs, - BucketService: buckets, - PointsWriter: &mock.PointsWriter{Err: tt.state.writeErr}, - WriteEventRecorder: &metric.NopEventRecorder{}, - } - writeHandler := NewWriteHandler(zaptest.NewLogger(t), NewWriteBackend(zaptest.NewLogger(t), b), tt.state.opts...) - handler := httpmock.NewAuthMiddlewareHandler(writeHandler, tt.request.auth) - - r := httptest.NewRequest( - "POST", - "http://localhost:8086/api/v2/write", - strings.NewReader(tt.request.body), - ) - - params := r.URL.Query() - params.Set("org", tt.request.org) - params.Set("bucket", tt.request.bucket) - r.URL.RawQuery = params.Encode() - - w := httptest.NewRecorder() - handler.ServeHTTP(w, r) - if got, want := w.Code, tt.wants.code; got != want { - t.Errorf("unexpected status code: got %d want %d", got, want) - } - - if got, want := w.Body.String(), tt.wants.body; got != want { - t.Errorf("unexpected body: got %s want %s", got, want) - } - }) - } -} - -func bucketWritePermission(org, bucket string) *influxdb.Authorization { - oid := influxtesting.MustIDBase16(org) - bid := influxtesting.MustIDBase16(bucket) - return &influxdb.Authorization{ - OrgID: oid, - Status: influxdb.Active, - Permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: &oid, - ID: &bid, - }, - }, - }, - } -} - -func testOrg(org string) *influxdb.Organization { - oid := influxtesting.MustIDBase16(org) - return &influxdb.Organization{ - ID: oid, - } -} - -func testBucket(org, bucket string) *influxdb.Bucket { - oid := influxtesting.MustIDBase16(org) - bid := influxtesting.MustIDBase16(bucket) - - return &influxdb.Bucket{ - ID: bid, - OrgID: oid, - } -} diff --git a/http/write_usage_recorder.go b/http/write_usage_recorder.go deleted file mode 100644 index c1fec2f329f..00000000000 --- a/http/write_usage_recorder.go +++ /dev/null @@ -1,35 +0,0 @@ -package http - -import ( - "context" - - "github.com/influxdata/influxdb/v2/http/metric" - "github.com/influxdata/influxdb/v2/kit/platform" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" -) - -func NewWriteUsageRecorder(w *kithttp.StatusResponseWriter, recorder metric.EventRecorder) *WriteUsageRecorder { - return &WriteUsageRecorder{ - Writer: w, - EventRecorder: recorder, - } -} - -type WriteUsageRecorder struct { - Writer *kithttp.StatusResponseWriter - EventRecorder metric.EventRecorder -} - -func (w *WriteUsageRecorder) Write(b []byte) (int, error) { - return w.Writer.Write(b) -} - -func (w *WriteUsageRecorder) Record(ctx context.Context, requestBytes int, orgID platform.ID, endpoint string) { - w.EventRecorder.Record(ctx, metric.Event{ - OrgID: orgID, - Endpoint: endpoint, - RequestBytes: requestBytes, - ResponseBytes: w.Writer.ResponseBytes(), - Status: w.Writer.Code(), - }) -} diff --git a/influxql/control/prometheus.go b/influxql/control/prometheus.go deleted file mode 100644 index f970f5dedd0..00000000000 --- a/influxql/control/prometheus.go +++ /dev/null @@ -1,70 +0,0 @@ -package control - -import ( - "github.com/prometheus/client_golang/prometheus" -) - -// controllerMetrics holds metrics related to the query controller. -type ControllerMetrics struct { - Requests *prometheus.CounterVec - NotImplemented *prometheus.CounterVec - RequestsLatency *prometheus.HistogramVec - ExecutingDuration *prometheus.HistogramVec -} - -const ( - LabelSuccess = "success" - LabelGenericError = "generic_err" - LabelParseErr = "parse_err" - LabelInterruptedErr = "interrupt_err" - LabelRuntimeError = "runtime_error" - LabelNotImplError = "not_implemented" - LabelNotExecuted = "not_executed" -) - -func NewControllerMetrics(labels []string) *ControllerMetrics { - const ( - namespace = "influxql" - subsystem = "service" - ) - - return &ControllerMetrics{ - Requests: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "requests_total", - Help: "Count of the query requests", - }, append(labels, "result")), - - NotImplemented: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "not_implemented_total", - Help: "Count of the query requests executing unimplemented operations", - }, []string{"operation"}), - - RequestsLatency: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "requests_latency_seconds", - Help: "Histogram of times spent for end-to-end latency (from issuing query request, to receiving the first byte of the response)", - Buckets: prometheus.ExponentialBuckets(1e-3, 5, 7), - }, append(labels, "result")), - - ExecutingDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "executing_duration_seconds", - Help: "Histogram of times spent executing queries", - Buckets: prometheus.ExponentialBuckets(1e-3, 5, 7), - }, append(labels, "result")), - } -} - -func (cm *ControllerMetrics) PrometheusCollectors() []prometheus.Collector { - return []prometheus.Collector{ - cm.Requests, - cm.NotImplemented, - cm.ExecutingDuration, - } -} diff --git a/influxql/errors.go b/influxql/errors.go deleted file mode 100644 index 2362ce71d0a..00000000000 --- a/influxql/errors.go +++ /dev/null @@ -1,15 +0,0 @@ -package influxql - -// NotImplementedError is returned when a specific operation is unavailable. -type NotImplementedError struct { - Op string // Op is the name of the unimplemented operation -} - -func (e *NotImplementedError) Error() string { - return "not implemented: " + e.Op -} - -// ErrNotImplemented creates a NotImplementedError specifying op is unavailable. -func ErrNotImplemented(op string) error { - return &NotImplementedError{Op: op} -} diff --git a/influxql/mock/proxy_query_service.go b/influxql/mock/proxy_query_service.go deleted file mode 100644 index ca240279556..00000000000 --- a/influxql/mock/proxy_query_service.go +++ /dev/null @@ -1,24 +0,0 @@ -package mock - -import ( - "context" - "io" - - "github.com/influxdata/influxdb/v2/influxql" - "github.com/influxdata/influxdb/v2/kit/check" -) - -var _ influxql.ProxyQueryService = (*ProxyQueryService)(nil) - -// ProxyQueryService mocks the InfluxQL QueryService for testing. -type ProxyQueryService struct { - QueryF func(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) -} - -func (s *ProxyQueryService) Query(ctx context.Context, w io.Writer, req *influxql.QueryRequest) (influxql.Statistics, error) { - return s.QueryF(ctx, w, req) -} - -func (s *ProxyQueryService) Check(ctx context.Context) check.Response { - return check.Response{Name: "Mock InfluxQL Proxy Query Service", Status: check.StatusPass} -} diff --git a/influxql/query/call_iterator.go b/influxql/query/call_iterator.go deleted file mode 100644 index 936d0886dd7..00000000000 --- a/influxql/query/call_iterator.go +++ /dev/null @@ -1,1600 +0,0 @@ -package query - -import ( - "fmt" - "math" - "sort" - "time" - - "github.com/influxdata/influxdb/v2/influxql/query/internal/gota" - "github.com/influxdata/influxql" -) - -/* -This file contains iterator implementations for each function call available -in InfluxQL. Call iterators are separated into two groups: - -1. Map/reduce-style iterators - these are passed to IteratorCreator so that - processing can be at the low-level storage and aggregates are returned. - -2. Raw aggregate iterators - these require the full set of data for a window. - These are handled by the select() function and raw points are streamed in - from the low-level storage. - -There are helpers to aid in building aggregate iterators. For simple map/reduce -iterators, you can use the reduceIterator types and pass a reduce function. This -reduce function is passed a previous and current value and the new timestamp, -value, and auxiliary fields are returned from it. - -For raw aggregate iterators, you can use the reduceSliceIterators which pass -in a slice of all points to the function and return a point. For more complex -iterator types, you may need to create your own iterators by hand. - -Once your iterator is complete, you'll need to add it to the NewCallIterator() -function if it is to be available to IteratorCreators and add it to the select() -function to allow it to be included during planning. -*/ - -// NewCallIterator returns a new iterator for a Call. -func NewCallIterator(input Iterator, opt IteratorOptions) (Iterator, error) { - name := opt.Expr.(*influxql.Call).Name - switch name { - case "count": - return newCountIterator(input, opt) - case "min": - return newMinIterator(input, opt) - case "max": - return newMaxIterator(input, opt) - case "sum": - return newSumIterator(input, opt) - case "first": - return newFirstIterator(input, opt) - case "last": - return newLastIterator(input, opt) - case "mean": - return newMeanIterator(input, opt) - case "sum_hll": - return NewSumHllIterator(input, opt) - case "merge_hll": - return NewMergeHllIterator(input, opt) - default: - return nil, fmt.Errorf("unsupported function call: %s", name) - } -} - -// newCountIterator returns an iterator for operating on a count() call. -func newCountIterator(input Iterator, opt IteratorOptions) (Iterator, error) { - // FIXME: Wrap iterator in int-type iterator and always output int value. - - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, IntegerPointEmitter) { - fn := NewFloatFuncIntegerReducer(FloatCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) - return fn, fn - } - return newFloatReduceIntegerIterator(input, opt, createFn), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { - fn := NewIntegerFuncReducer(IntegerCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) - return fn, fn - } - return newIntegerReduceIntegerIterator(input, opt, createFn), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, IntegerPointEmitter) { - fn := NewUnsignedFuncIntegerReducer(UnsignedCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) - return fn, fn - } - return newUnsignedReduceIntegerIterator(input, opt, createFn), nil - case StringIterator: - createFn := func() (StringPointAggregator, IntegerPointEmitter) { - fn := NewStringFuncIntegerReducer(StringCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) - return fn, fn - } - return newStringReduceIntegerIterator(input, opt, createFn), nil - case BooleanIterator: - createFn := func() (BooleanPointAggregator, IntegerPointEmitter) { - fn := NewBooleanFuncIntegerReducer(BooleanCountReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) - return fn, fn - } - return newBooleanReduceIntegerIterator(input, opt, createFn), nil - default: - return nil, fmt.Errorf("unsupported count iterator type: %T", input) - } -} - -// FloatCountReduce returns the count of points. -func FloatCountReduce(prev *IntegerPoint, curr *FloatPoint) (int64, int64, []interface{}) { - if prev == nil { - return ZeroTime, 1, nil - } - return ZeroTime, prev.Value + 1, nil -} - -// IntegerCountReduce returns the count of points. -func IntegerCountReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { - if prev == nil { - return ZeroTime, 1, nil - } - return ZeroTime, prev.Value + 1, nil -} - -// UnsignedCountReduce returns the count of points. -func UnsignedCountReduce(prev *IntegerPoint, curr *UnsignedPoint) (int64, int64, []interface{}) { - if prev == nil { - return ZeroTime, 1, nil - } - return ZeroTime, prev.Value + 1, nil -} - -// StringCountReduce returns the count of points. -func StringCountReduce(prev *IntegerPoint, curr *StringPoint) (int64, int64, []interface{}) { - if prev == nil { - return ZeroTime, 1, nil - } - return ZeroTime, prev.Value + 1, nil -} - -// BooleanCountReduce returns the count of points. -func BooleanCountReduce(prev *IntegerPoint, curr *BooleanPoint) (int64, int64, []interface{}) { - if prev == nil { - return ZeroTime, 1, nil - } - return ZeroTime, prev.Value + 1, nil -} - -// newMinIterator returns an iterator for operating on a min() call. -func newMinIterator(input Iterator, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatFuncReducer(FloatMinReduce, nil) - return fn, fn - } - return newFloatReduceFloatIterator(input, opt, createFn), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { - fn := NewIntegerFuncReducer(IntegerMinReduce, nil) - return fn, fn - } - return newIntegerReduceIntegerIterator(input, opt, createFn), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { - fn := NewUnsignedFuncReducer(UnsignedMinReduce, nil) - return fn, fn - } - return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil - case BooleanIterator: - createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { - fn := NewBooleanFuncReducer(BooleanMinReduce, nil) - return fn, fn - } - return newBooleanReduceBooleanIterator(input, opt, createFn), nil - default: - return nil, fmt.Errorf("unsupported min iterator type: %T", input) - } -} - -// FloatMinReduce returns the minimum value between prev & curr. -func FloatMinReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { - if prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { - return curr.Time, curr.Value, cloneAux(curr.Aux) - } - return prev.Time, prev.Value, prev.Aux -} - -// IntegerMinReduce returns the minimum value between prev & curr. -func IntegerMinReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { - if prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { - return curr.Time, curr.Value, cloneAux(curr.Aux) - } - return prev.Time, prev.Value, prev.Aux -} - -// UnsignedMinReduce returns the minimum value between prev & curr. -func UnsignedMinReduce(prev, curr *UnsignedPoint) (int64, uint64, []interface{}) { - if prev == nil || curr.Value < prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { - return curr.Time, curr.Value, cloneAux(curr.Aux) - } - return prev.Time, prev.Value, prev.Aux -} - -// BooleanMinReduce returns the minimum value between prev & curr. -func BooleanMinReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { - if prev == nil || (curr.Value != prev.Value && !curr.Value) || (curr.Value == prev.Value && curr.Time < prev.Time) { - return curr.Time, curr.Value, cloneAux(curr.Aux) - } - return prev.Time, prev.Value, prev.Aux -} - -// newMaxIterator returns an iterator for operating on a max() call. -func newMaxIterator(input Iterator, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatFuncReducer(FloatMaxReduce, nil) - return fn, fn - } - return newFloatReduceFloatIterator(input, opt, createFn), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { - fn := NewIntegerFuncReducer(IntegerMaxReduce, nil) - return fn, fn - } - return newIntegerReduceIntegerIterator(input, opt, createFn), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { - fn := NewUnsignedFuncReducer(UnsignedMaxReduce, nil) - return fn, fn - } - return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil - case BooleanIterator: - createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { - fn := NewBooleanFuncReducer(BooleanMaxReduce, nil) - return fn, fn - } - return newBooleanReduceBooleanIterator(input, opt, createFn), nil - default: - return nil, fmt.Errorf("unsupported max iterator type: %T", input) - } -} - -// FloatMaxReduce returns the maximum value between prev & curr. -func FloatMaxReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { - if prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { - return curr.Time, curr.Value, cloneAux(curr.Aux) - } - return prev.Time, prev.Value, prev.Aux -} - -// IntegerMaxReduce returns the maximum value between prev & curr. -func IntegerMaxReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { - if prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { - return curr.Time, curr.Value, cloneAux(curr.Aux) - } - return prev.Time, prev.Value, prev.Aux -} - -// UnsignedMaxReduce returns the maximum value between prev & curr. -func UnsignedMaxReduce(prev, curr *UnsignedPoint) (int64, uint64, []interface{}) { - if prev == nil || curr.Value > prev.Value || (curr.Value == prev.Value && curr.Time < prev.Time) { - return curr.Time, curr.Value, cloneAux(curr.Aux) - } - return prev.Time, prev.Value, prev.Aux -} - -// BooleanMaxReduce returns the minimum value between prev & curr. -func BooleanMaxReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { - if prev == nil || (curr.Value != prev.Value && curr.Value) || (curr.Value == prev.Value && curr.Time < prev.Time) { - return curr.Time, curr.Value, cloneAux(curr.Aux) - } - return prev.Time, prev.Value, prev.Aux -} - -// newSumIterator returns an iterator for operating on a sum() call. -func newSumIterator(input Iterator, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatFuncReducer(FloatSumReduce, &FloatPoint{Value: 0, Time: ZeroTime}) - return fn, fn - } - return newFloatReduceFloatIterator(input, opt, createFn), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { - fn := NewIntegerFuncReducer(IntegerSumReduce, &IntegerPoint{Value: 0, Time: ZeroTime}) - return fn, fn - } - return newIntegerReduceIntegerIterator(input, opt, createFn), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { - fn := NewUnsignedFuncReducer(UnsignedSumReduce, &UnsignedPoint{Value: 0, Time: ZeroTime}) - return fn, fn - } - return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil - default: - return nil, fmt.Errorf("unsupported sum iterator type: %T", input) - } -} - -// FloatSumReduce returns the sum prev value & curr value. -func FloatSumReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { - if prev == nil { - return ZeroTime, curr.Value, nil - } - return prev.Time, prev.Value + curr.Value, nil -} - -// IntegerSumReduce returns the sum prev value & curr value. -func IntegerSumReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { - if prev == nil { - return ZeroTime, curr.Value, nil - } - return prev.Time, prev.Value + curr.Value, nil -} - -// UnsignedSumReduce returns the sum prev value & curr value. -func UnsignedSumReduce(prev, curr *UnsignedPoint) (int64, uint64, []interface{}) { - if prev == nil { - return ZeroTime, curr.Value, nil - } - return prev.Time, prev.Value + curr.Value, nil -} - -// newFirstIterator returns an iterator for operating on a first() call. -func newFirstIterator(input Iterator, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatFuncReducer(FloatFirstReduce, nil) - return fn, fn - } - return newFloatReduceFloatIterator(input, opt, createFn), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { - fn := NewIntegerFuncReducer(IntegerFirstReduce, nil) - return fn, fn - } - return newIntegerReduceIntegerIterator(input, opt, createFn), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { - fn := NewUnsignedFuncReducer(UnsignedFirstReduce, nil) - return fn, fn - } - return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil - case StringIterator: - createFn := func() (StringPointAggregator, StringPointEmitter) { - fn := NewStringFuncReducer(StringFirstReduce, nil) - return fn, fn - } - return newStringReduceStringIterator(input, opt, createFn), nil - case BooleanIterator: - createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { - fn := NewBooleanFuncReducer(BooleanFirstReduce, nil) - return fn, fn - } - return newBooleanReduceBooleanIterator(input, opt, createFn), nil - default: - return nil, fmt.Errorf("unsupported first iterator type: %T", input) - } -} - -// FloatFirstReduce returns the first point sorted by time. -func FloatFirstReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { - if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { - return curr.Time, curr.Value, cloneAux(curr.Aux) - } - return prev.Time, prev.Value, prev.Aux -} - -// IntegerFirstReduce returns the first point sorted by time. -func IntegerFirstReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { - if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { - return curr.Time, curr.Value, cloneAux(curr.Aux) - } - return prev.Time, prev.Value, prev.Aux -} - -// UnsignedFirstReduce returns the first point sorted by time. -func UnsignedFirstReduce(prev, curr *UnsignedPoint) (int64, uint64, []interface{}) { - if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { - return curr.Time, curr.Value, cloneAux(curr.Aux) - } - return prev.Time, prev.Value, prev.Aux -} - -// StringFirstReduce returns the first point sorted by time. -func StringFirstReduce(prev, curr *StringPoint) (int64, string, []interface{}) { - if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { - return curr.Time, curr.Value, cloneAux(curr.Aux) - } - return prev.Time, prev.Value, prev.Aux -} - -// BooleanFirstReduce returns the first point sorted by time. -func BooleanFirstReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { - if prev == nil || curr.Time < prev.Time || (curr.Time == prev.Time && !curr.Value && prev.Value) { - return curr.Time, curr.Value, cloneAux(curr.Aux) - } - return prev.Time, prev.Value, prev.Aux -} - -// newLastIterator returns an iterator for operating on a last() call. -func newLastIterator(input Iterator, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatFuncReducer(FloatLastReduce, nil) - return fn, fn - } - return newFloatReduceFloatIterator(input, opt, createFn), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { - fn := NewIntegerFuncReducer(IntegerLastReduce, nil) - return fn, fn - } - return newIntegerReduceIntegerIterator(input, opt, createFn), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { - fn := NewUnsignedFuncReducer(UnsignedLastReduce, nil) - return fn, fn - } - return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil - case StringIterator: - createFn := func() (StringPointAggregator, StringPointEmitter) { - fn := NewStringFuncReducer(StringLastReduce, nil) - return fn, fn - } - return newStringReduceStringIterator(input, opt, createFn), nil - case BooleanIterator: - createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { - fn := NewBooleanFuncReducer(BooleanLastReduce, nil) - return fn, fn - } - return newBooleanReduceBooleanIterator(input, opt, createFn), nil - default: - return nil, fmt.Errorf("unsupported last iterator type: %T", input) - } -} - -// FloatLastReduce returns the last point sorted by time. -func FloatLastReduce(prev, curr *FloatPoint) (int64, float64, []interface{}) { - if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { - return curr.Time, curr.Value, cloneAux(curr.Aux) - } - return prev.Time, prev.Value, prev.Aux -} - -// IntegerLastReduce returns the last point sorted by time. -func IntegerLastReduce(prev, curr *IntegerPoint) (int64, int64, []interface{}) { - if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { - return curr.Time, curr.Value, cloneAux(curr.Aux) - } - return prev.Time, prev.Value, prev.Aux -} - -// UnsignedLastReduce returns the last point sorted by time. -func UnsignedLastReduce(prev, curr *UnsignedPoint) (int64, uint64, []interface{}) { - if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { - return curr.Time, curr.Value, cloneAux(curr.Aux) - } - return prev.Time, prev.Value, prev.Aux -} - -// StringLastReduce returns the first point sorted by time. -func StringLastReduce(prev, curr *StringPoint) (int64, string, []interface{}) { - if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value > prev.Value) { - return curr.Time, curr.Value, cloneAux(curr.Aux) - } - return prev.Time, prev.Value, prev.Aux -} - -// BooleanLastReduce returns the first point sorted by time. -func BooleanLastReduce(prev, curr *BooleanPoint) (int64, bool, []interface{}) { - if prev == nil || curr.Time > prev.Time || (curr.Time == prev.Time && curr.Value && !prev.Value) { - return curr.Time, curr.Value, cloneAux(curr.Aux) - } - return prev.Time, prev.Value, prev.Aux -} - -// NewDistinctIterator returns an iterator for operating on a distinct() call. -func NewDistinctIterator(input Iterator, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatDistinctReducer() - return fn, fn - } - return newFloatReduceFloatIterator(input, opt, createFn), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { - fn := NewIntegerDistinctReducer() - return fn, fn - } - return newIntegerReduceIntegerIterator(input, opt, createFn), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { - fn := NewUnsignedDistinctReducer() - return fn, fn - } - return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil - case StringIterator: - createFn := func() (StringPointAggregator, StringPointEmitter) { - fn := NewStringDistinctReducer() - return fn, fn - } - return newStringReduceStringIterator(input, opt, createFn), nil - case BooleanIterator: - createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { - fn := NewBooleanDistinctReducer() - return fn, fn - } - return newBooleanReduceBooleanIterator(input, opt, createFn), nil - default: - return nil, fmt.Errorf("unsupported distinct iterator type: %T", input) - } -} - -// newMeanIterator returns an iterator for operating on a mean() call. -func newMeanIterator(input Iterator, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatMeanReducer() - return fn, fn - } - return newFloatReduceFloatIterator(input, opt, createFn), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, FloatPointEmitter) { - fn := NewIntegerMeanReducer() - return fn, fn - } - return newIntegerReduceFloatIterator(input, opt, createFn), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { - fn := NewUnsignedMeanReducer() - return fn, fn - } - return newUnsignedReduceFloatIterator(input, opt, createFn), nil - default: - return nil, fmt.Errorf("unsupported mean iterator type: %T", input) - } -} - -// NewMedianIterator returns an iterator for operating on a median() call. -func NewMedianIterator(input Iterator, opt IteratorOptions) (Iterator, error) { - return newMedianIterator(input, opt) -} - -// newMedianIterator returns an iterator for operating on a median() call. -func newMedianIterator(input Iterator, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatSliceFuncReducer(FloatMedianReduceSlice) - return fn, fn - } - return newFloatReduceFloatIterator(input, opt, createFn), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, FloatPointEmitter) { - fn := NewIntegerSliceFuncFloatReducer(IntegerMedianReduceSlice) - return fn, fn - } - return newIntegerReduceFloatIterator(input, opt, createFn), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { - fn := NewUnsignedSliceFuncFloatReducer(UnsignedMedianReduceSlice) - return fn, fn - } - return newUnsignedReduceFloatIterator(input, opt, createFn), nil - default: - return nil, fmt.Errorf("unsupported median iterator type: %T", input) - } -} - -// FloatMedianReduceSlice returns the median value within a window. -func FloatMedianReduceSlice(a []FloatPoint) []FloatPoint { - if len(a) == 1 { - return a - } - - // OPTIMIZE(benbjohnson): Use getSortedRange() from v0.9.5.1. - - // Return the middle value from the points. - // If there are an even number of points then return the mean of the two middle points. - sort.Sort(floatPointsByValue(a)) - if len(a)%2 == 0 { - lo, hi := a[len(a)/2-1], a[(len(a)/2)] - return []FloatPoint{{Time: ZeroTime, Value: lo.Value + (hi.Value-lo.Value)/2}} - } - return []FloatPoint{{Time: ZeroTime, Value: a[len(a)/2].Value}} -} - -// IntegerMedianReduceSlice returns the median value within a window. -func IntegerMedianReduceSlice(a []IntegerPoint) []FloatPoint { - if len(a) == 1 { - return []FloatPoint{{Time: ZeroTime, Value: float64(a[0].Value)}} - } - - // OPTIMIZE(benbjohnson): Use getSortedRange() from v0.9.5.1. - - // Return the middle value from the points. - // If there are an even number of points then return the mean of the two middle points. - sort.Sort(integerPointsByValue(a)) - if len(a)%2 == 0 { - lo, hi := a[len(a)/2-1], a[(len(a)/2)] - return []FloatPoint{{Time: ZeroTime, Value: float64(lo.Value) + float64(hi.Value-lo.Value)/2}} - } - return []FloatPoint{{Time: ZeroTime, Value: float64(a[len(a)/2].Value)}} -} - -// UnsignedMedianReduceSlice returns the median value within a window. -func UnsignedMedianReduceSlice(a []UnsignedPoint) []FloatPoint { - if len(a) == 1 { - return []FloatPoint{{Time: ZeroTime, Value: float64(a[0].Value)}} - } - - // OPTIMIZE(benbjohnson): Use getSortedRange() from v0.9.5.1. - - // Return the middle value from the points. - // If there are an even number of points then return the mean of the two middle points. - sort.Sort(unsignedPointsByValue(a)) - if len(a)%2 == 0 { - lo, hi := a[len(a)/2-1], a[(len(a)/2)] - return []FloatPoint{{Time: ZeroTime, Value: float64(lo.Value) + float64(hi.Value-lo.Value)/2}} - } - return []FloatPoint{{Time: ZeroTime, Value: float64(a[len(a)/2].Value)}} -} - -// newModeIterator returns an iterator for operating on a mode() call. -func NewModeIterator(input Iterator, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatSliceFuncReducer(FloatModeReduceSlice) - return fn, fn - } - return newFloatReduceFloatIterator(input, opt, createFn), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { - fn := NewIntegerSliceFuncReducer(IntegerModeReduceSlice) - return fn, fn - } - return newIntegerReduceIntegerIterator(input, opt, createFn), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { - fn := NewUnsignedSliceFuncReducer(UnsignedModeReduceSlice) - return fn, fn - } - return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil - case StringIterator: - createFn := func() (StringPointAggregator, StringPointEmitter) { - fn := NewStringSliceFuncReducer(StringModeReduceSlice) - return fn, fn - } - return newStringReduceStringIterator(input, opt, createFn), nil - case BooleanIterator: - createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { - fn := NewBooleanSliceFuncReducer(BooleanModeReduceSlice) - return fn, fn - } - return newBooleanReduceBooleanIterator(input, opt, createFn), nil - default: - return nil, fmt.Errorf("unsupported median iterator type: %T", input) - } -} - -// FloatModeReduceSlice returns the mode value within a window. -func FloatModeReduceSlice(a []FloatPoint) []FloatPoint { - if len(a) == 1 { - return a - } - - sort.Sort(floatPointsByValue(a)) - - mostFreq := 0 - currFreq := 0 - currMode := a[0].Value - mostMode := a[0].Value - mostTime := a[0].Time - currTime := a[0].Time - - for _, p := range a { - if p.Value != currMode { - currFreq = 1 - currMode = p.Value - currTime = p.Time - continue - } - currFreq++ - if mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) { - continue - } - mostFreq = currFreq - mostMode = p.Value - mostTime = p.Time - } - - return []FloatPoint{{Time: ZeroTime, Value: mostMode}} -} - -// IntegerModeReduceSlice returns the mode value within a window. -func IntegerModeReduceSlice(a []IntegerPoint) []IntegerPoint { - if len(a) == 1 { - return a - } - sort.Sort(integerPointsByValue(a)) - - mostFreq := 0 - currFreq := 0 - currMode := a[0].Value - mostMode := a[0].Value - mostTime := a[0].Time - currTime := a[0].Time - - for _, p := range a { - if p.Value != currMode { - currFreq = 1 - currMode = p.Value - currTime = p.Time - continue - } - currFreq++ - if mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) { - continue - } - mostFreq = currFreq - mostMode = p.Value - mostTime = p.Time - } - - return []IntegerPoint{{Time: ZeroTime, Value: mostMode}} -} - -// UnsignedModeReduceSlice returns the mode value within a window. -func UnsignedModeReduceSlice(a []UnsignedPoint) []UnsignedPoint { - if len(a) == 1 { - return a - } - sort.Sort(unsignedPointsByValue(a)) - - mostFreq := 0 - currFreq := 0 - currMode := a[0].Value - mostMode := a[0].Value - mostTime := a[0].Time - currTime := a[0].Time - - for _, p := range a { - if p.Value != currMode { - currFreq = 1 - currMode = p.Value - currTime = p.Time - continue - } - currFreq++ - if mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) { - continue - } - mostFreq = currFreq - mostMode = p.Value - mostTime = p.Time - } - - return []UnsignedPoint{{Time: ZeroTime, Value: mostMode}} -} - -// StringModeReduceSlice returns the mode value within a window. -func StringModeReduceSlice(a []StringPoint) []StringPoint { - if len(a) == 1 { - return a - } - - sort.Sort(stringPointsByValue(a)) - - mostFreq := 0 - currFreq := 0 - currMode := a[0].Value - mostMode := a[0].Value - mostTime := a[0].Time - currTime := a[0].Time - - for _, p := range a { - if p.Value != currMode { - currFreq = 1 - currMode = p.Value - currTime = p.Time - continue - } - currFreq++ - if mostFreq > currFreq || (mostFreq == currFreq && currTime > mostTime) { - continue - } - mostFreq = currFreq - mostMode = p.Value - mostTime = p.Time - } - - return []StringPoint{{Time: ZeroTime, Value: mostMode}} -} - -// BooleanModeReduceSlice returns the mode value within a window. -func BooleanModeReduceSlice(a []BooleanPoint) []BooleanPoint { - if len(a) == 1 { - return a - } - - trueFreq := 0 - falsFreq := 0 - mostMode := false - - for _, p := range a { - if p.Value { - trueFreq++ - } else { - falsFreq++ - } - } - // In case either of true or false are mode then retuned mode value wont be - // of metric with oldest timestamp - if trueFreq >= falsFreq { - mostMode = true - } - - return []BooleanPoint{{Time: ZeroTime, Value: mostMode}} -} - -// newStddevIterator returns an iterator for operating on a stddev() call. -func newStddevIterator(input Iterator, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatSliceFuncReducer(FloatStddevReduceSlice) - return fn, fn - } - return newFloatReduceFloatIterator(input, opt, createFn), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, FloatPointEmitter) { - fn := NewIntegerSliceFuncFloatReducer(IntegerStddevReduceSlice) - return fn, fn - } - return newIntegerReduceFloatIterator(input, opt, createFn), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { - fn := NewUnsignedSliceFuncFloatReducer(UnsignedStddevReduceSlice) - return fn, fn - } - return newUnsignedReduceFloatIterator(input, opt, createFn), nil - default: - return nil, fmt.Errorf("unsupported stddev iterator type: %T", input) - } -} - -// FloatStddevReduceSlice returns the stddev value within a window. -func FloatStddevReduceSlice(a []FloatPoint) []FloatPoint { - // If there is only one point then return NaN. - if len(a) < 2 { - return []FloatPoint{{Time: ZeroTime, Value: math.NaN()}} - } - - // Calculate the mean. - var mean float64 - var count int - for _, p := range a { - if math.IsNaN(p.Value) { - continue - } - count++ - mean += (p.Value - mean) / float64(count) - } - - // Calculate the variance. - var variance float64 - for _, p := range a { - if math.IsNaN(p.Value) { - continue - } - variance += math.Pow(p.Value-mean, 2) - } - return []FloatPoint{{ - Time: ZeroTime, - Value: math.Sqrt(variance / float64(count-1)), - }} -} - -// IntegerStddevReduceSlice returns the stddev value within a window. -func IntegerStddevReduceSlice(a []IntegerPoint) []FloatPoint { - // If there is only one point then return NaN. - if len(a) < 2 { - return []FloatPoint{{Time: ZeroTime, Value: math.NaN()}} - } - - // Calculate the mean. - var mean float64 - var count int - for _, p := range a { - count++ - mean += (float64(p.Value) - mean) / float64(count) - } - - // Calculate the variance. - var variance float64 - for _, p := range a { - variance += math.Pow(float64(p.Value)-mean, 2) - } - return []FloatPoint{{ - Time: ZeroTime, - Value: math.Sqrt(variance / float64(count-1)), - }} -} - -// UnsignedStddevReduceSlice returns the stddev value within a window. -func UnsignedStddevReduceSlice(a []UnsignedPoint) []FloatPoint { - // If there is only one point then return NaN. - if len(a) < 2 { - return []FloatPoint{{Time: ZeroTime, Value: math.NaN()}} - } - - // Calculate the mean. - var mean float64 - var count int - for _, p := range a { - count++ - mean += (float64(p.Value) - mean) / float64(count) - } - - // Calculate the variance. - var variance float64 - for _, p := range a { - variance += math.Pow(float64(p.Value)-mean, 2) - } - return []FloatPoint{{ - Time: ZeroTime, - Value: math.Sqrt(variance / float64(count-1)), - }} -} - -// newSpreadIterator returns an iterator for operating on a spread() call. -func newSpreadIterator(input Iterator, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatSpreadReducer() - return fn, fn - } - return newFloatReduceFloatIterator(input, opt, createFn), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { - fn := NewIntegerSpreadReducer() - return fn, fn - } - return newIntegerReduceIntegerIterator(input, opt, createFn), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { - fn := NewUnsignedSpreadReducer() - return fn, fn - } - return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil - default: - return nil, fmt.Errorf("unsupported spread iterator type: %T", input) - } -} - -func newTopIterator(input Iterator, opt IteratorOptions, n int, keepTags bool) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatTopReducer(n) - return fn, fn - } - itr := newFloatReduceFloatIterator(input, opt, createFn) - itr.keepTags = keepTags - return itr, nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { - fn := NewIntegerTopReducer(n) - return fn, fn - } - itr := newIntegerReduceIntegerIterator(input, opt, createFn) - itr.keepTags = keepTags - return itr, nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { - fn := NewUnsignedTopReducer(n) - return fn, fn - } - itr := newUnsignedReduceUnsignedIterator(input, opt, createFn) - itr.keepTags = keepTags - return itr, nil - default: - return nil, fmt.Errorf("unsupported top iterator type: %T", input) - } -} - -func newBottomIterator(input Iterator, opt IteratorOptions, n int, keepTags bool) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatBottomReducer(n) - return fn, fn - } - itr := newFloatReduceFloatIterator(input, opt, createFn) - itr.keepTags = keepTags - return itr, nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { - fn := NewIntegerBottomReducer(n) - return fn, fn - } - itr := newIntegerReduceIntegerIterator(input, opt, createFn) - itr.keepTags = keepTags - return itr, nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { - fn := NewUnsignedBottomReducer(n) - return fn, fn - } - itr := newUnsignedReduceUnsignedIterator(input, opt, createFn) - itr.keepTags = keepTags - return itr, nil - default: - return nil, fmt.Errorf("unsupported bottom iterator type: %T", input) - } -} - -// newPercentileIterator returns an iterator for operating on a percentile() call. -func newPercentileIterator(input Iterator, opt IteratorOptions, percentile float64) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - floatPercentileReduceSlice := NewFloatPercentileReduceSliceFunc(percentile) - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatSliceFuncReducer(floatPercentileReduceSlice) - return fn, fn - } - return newFloatReduceFloatIterator(input, opt, createFn), nil - case IntegerIterator: - integerPercentileReduceSlice := NewIntegerPercentileReduceSliceFunc(percentile) - createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { - fn := NewIntegerSliceFuncReducer(integerPercentileReduceSlice) - return fn, fn - } - return newIntegerReduceIntegerIterator(input, opt, createFn), nil - case UnsignedIterator: - unsignedPercentileReduceSlice := NewUnsignedPercentileReduceSliceFunc(percentile) - createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { - fn := NewUnsignedSliceFuncReducer(unsignedPercentileReduceSlice) - return fn, fn - } - return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil - default: - return nil, fmt.Errorf("unsupported percentile iterator type: %T", input) - } -} - -// NewFloatPercentileReduceSliceFunc returns the percentile value within a window. -func NewFloatPercentileReduceSliceFunc(percentile float64) FloatReduceSliceFunc { - return func(a []FloatPoint) []FloatPoint { - length := len(a) - i := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1 - - if i < 0 || i >= length { - return nil - } - - sort.Sort(floatPointsByValue(a)) - return []FloatPoint{{Time: a[i].Time, Value: a[i].Value, Aux: cloneAux(a[i].Aux)}} - } -} - -// NewIntegerPercentileReduceSliceFunc returns the percentile value within a window. -func NewIntegerPercentileReduceSliceFunc(percentile float64) IntegerReduceSliceFunc { - return func(a []IntegerPoint) []IntegerPoint { - length := len(a) - i := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1 - - if i < 0 || i >= length { - return nil - } - - sort.Sort(integerPointsByValue(a)) - return []IntegerPoint{{Time: a[i].Time, Value: a[i].Value, Aux: cloneAux(a[i].Aux)}} - } -} - -// NewUnsignedPercentileReduceSliceFunc returns the percentile value within a window. -func NewUnsignedPercentileReduceSliceFunc(percentile float64) UnsignedReduceSliceFunc { - return func(a []UnsignedPoint) []UnsignedPoint { - length := len(a) - i := int(math.Floor(float64(length)*percentile/100.0+0.5)) - 1 - - if i < 0 || i >= length { - return nil - } - - sort.Sort(unsignedPointsByValue(a)) - return []UnsignedPoint{{Time: a[i].Time, Value: a[i].Value, Aux: cloneAux(a[i].Aux)}} - } -} - -// newDerivativeIterator returns an iterator for operating on a derivative() call. -func newDerivativeIterator(input Iterator, opt IteratorOptions, interval Interval, isNonNegative bool) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatDerivativeReducer(interval, isNonNegative, opt.Ascending) - return fn, fn - } - return newFloatStreamFloatIterator(input, createFn, opt), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, FloatPointEmitter) { - fn := NewIntegerDerivativeReducer(interval, isNonNegative, opt.Ascending) - return fn, fn - } - return newIntegerStreamFloatIterator(input, createFn, opt), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { - fn := NewUnsignedDerivativeReducer(interval, isNonNegative, opt.Ascending) - return fn, fn - } - return newUnsignedStreamFloatIterator(input, createFn, opt), nil - default: - return nil, fmt.Errorf("unsupported derivative iterator type: %T", input) - } -} - -// newDifferenceIterator returns an iterator for operating on a difference() call. -func newDifferenceIterator(input Iterator, opt IteratorOptions, isNonNegative bool) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatDifferenceReducer(isNonNegative) - return fn, fn - } - return newFloatStreamFloatIterator(input, createFn, opt), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { - fn := NewIntegerDifferenceReducer(isNonNegative) - return fn, fn - } - return newIntegerStreamIntegerIterator(input, createFn, opt), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { - fn := NewUnsignedDifferenceReducer(isNonNegative) - return fn, fn - } - return newUnsignedStreamUnsignedIterator(input, createFn, opt), nil - default: - return nil, fmt.Errorf("unsupported difference iterator type: %T", input) - } -} - -// newElapsedIterator returns an iterator for operating on a elapsed() call. -func newElapsedIterator(input Iterator, opt IteratorOptions, interval Interval) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, IntegerPointEmitter) { - fn := NewFloatElapsedReducer(interval) - return fn, fn - } - return newFloatStreamIntegerIterator(input, createFn, opt), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { - fn := NewIntegerElapsedReducer(interval) - return fn, fn - } - return newIntegerStreamIntegerIterator(input, createFn, opt), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, IntegerPointEmitter) { - fn := NewUnsignedElapsedReducer(interval) - return fn, fn - } - return newUnsignedStreamIntegerIterator(input, createFn, opt), nil - case BooleanIterator: - createFn := func() (BooleanPointAggregator, IntegerPointEmitter) { - fn := NewBooleanElapsedReducer(interval) - return fn, fn - } - return newBooleanStreamIntegerIterator(input, createFn, opt), nil - case StringIterator: - createFn := func() (StringPointAggregator, IntegerPointEmitter) { - fn := NewStringElapsedReducer(interval) - return fn, fn - } - return newStringStreamIntegerIterator(input, createFn, opt), nil - default: - return nil, fmt.Errorf("unsupported elapsed iterator type: %T", input) - } -} - -// newMovingAverageIterator returns an iterator for operating on a moving_average() call. -func newMovingAverageIterator(input Iterator, n int, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatMovingAverageReducer(n) - return fn, fn - } - return newFloatStreamFloatIterator(input, createFn, opt), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, FloatPointEmitter) { - fn := NewIntegerMovingAverageReducer(n) - return fn, fn - } - return newIntegerStreamFloatIterator(input, createFn, opt), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { - fn := NewUnsignedMovingAverageReducer(n) - return fn, fn - } - return newUnsignedStreamFloatIterator(input, createFn, opt), nil - default: - return nil, fmt.Errorf("unsupported moving average iterator type: %T", input) - } -} - -// newExponentialMovingAverageIterator returns an iterator for operating on an exponential_moving_average() call. -func newExponentialMovingAverageIterator(input Iterator, n, nHold int, warmupType gota.WarmupType, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewExponentialMovingAverageReducer(n, nHold, warmupType) - return fn, fn - } - return newFloatStreamFloatIterator(input, createFn, opt), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, FloatPointEmitter) { - fn := NewExponentialMovingAverageReducer(n, nHold, warmupType) - return fn, fn - } - return newIntegerStreamFloatIterator(input, createFn, opt), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { - fn := NewExponentialMovingAverageReducer(n, nHold, warmupType) - return fn, fn - } - return newUnsignedStreamFloatIterator(input, createFn, opt), nil - default: - return nil, fmt.Errorf("unsupported exponential moving average iterator type: %T", input) - } -} - -// newDoubleExponentialMovingAverageIterator returns an iterator for operating on a double_exponential_moving_average() call. -func newDoubleExponentialMovingAverageIterator(input Iterator, n int, nHold int, warmupType gota.WarmupType, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewDoubleExponentialMovingAverageReducer(n, nHold, warmupType) - return fn, fn - } - return newFloatStreamFloatIterator(input, createFn, opt), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, FloatPointEmitter) { - fn := NewDoubleExponentialMovingAverageReducer(n, nHold, warmupType) - return fn, fn - } - return newIntegerStreamFloatIterator(input, createFn, opt), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { - fn := NewDoubleExponentialMovingAverageReducer(n, nHold, warmupType) - return fn, fn - } - return newUnsignedStreamFloatIterator(input, createFn, opt), nil - default: - return nil, fmt.Errorf("unsupported double exponential moving average iterator type: %T", input) - } -} - -// newTripleExponentialMovingAverageIterator returns an iterator for operating on a triple_exponential_moving_average() call. -func newTripleExponentialMovingAverageIterator(input Iterator, n int, nHold int, warmupType gota.WarmupType, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewTripleExponentialMovingAverageReducer(n, nHold, warmupType) - return fn, fn - } - return newFloatStreamFloatIterator(input, createFn, opt), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, FloatPointEmitter) { - fn := NewTripleExponentialMovingAverageReducer(n, nHold, warmupType) - return fn, fn - } - return newIntegerStreamFloatIterator(input, createFn, opt), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { - fn := NewTripleExponentialMovingAverageReducer(n, nHold, warmupType) - return fn, fn - } - return newUnsignedStreamFloatIterator(input, createFn, opt), nil - default: - return nil, fmt.Errorf("unsupported triple exponential moving average iterator type: %T", input) - } -} - -// newRelativeStrengthIndexIterator returns an iterator for operating on a triple_exponential_moving_average() call. -func newRelativeStrengthIndexIterator(input Iterator, n int, nHold int, warmupType gota.WarmupType, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewRelativeStrengthIndexReducer(n, nHold, warmupType) - return fn, fn - } - return newFloatStreamFloatIterator(input, createFn, opt), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, FloatPointEmitter) { - fn := NewRelativeStrengthIndexReducer(n, nHold, warmupType) - return fn, fn - } - return newIntegerStreamFloatIterator(input, createFn, opt), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { - fn := NewRelativeStrengthIndexReducer(n, nHold, warmupType) - return fn, fn - } - return newUnsignedStreamFloatIterator(input, createFn, opt), nil - default: - return nil, fmt.Errorf("unsupported relative strength index iterator type: %T", input) - } -} - -// newTripleExponentialDerivativeIterator returns an iterator for operating on a triple_exponential_moving_average() call. -func newTripleExponentialDerivativeIterator(input Iterator, n int, nHold int, warmupType gota.WarmupType, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewTripleExponentialDerivativeReducer(n, nHold, warmupType) - return fn, fn - } - return newFloatStreamFloatIterator(input, createFn, opt), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, FloatPointEmitter) { - fn := NewTripleExponentialDerivativeReducer(n, nHold, warmupType) - return fn, fn - } - return newIntegerStreamFloatIterator(input, createFn, opt), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { - fn := NewTripleExponentialDerivativeReducer(n, nHold, warmupType) - return fn, fn - } - return newUnsignedStreamFloatIterator(input, createFn, opt), nil - default: - return nil, fmt.Errorf("unsupported triple exponential derivative iterator type: %T", input) - } -} - -// newKaufmansEfficiencyRatioIterator returns an iterator for operating on a kaufmans_efficiency_ratio() call. -func newKaufmansEfficiencyRatioIterator(input Iterator, n int, nHold int, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewKaufmansEfficiencyRatioReducer(n, nHold) - return fn, fn - } - return newFloatStreamFloatIterator(input, createFn, opt), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, FloatPointEmitter) { - fn := NewKaufmansEfficiencyRatioReducer(n, nHold) - return fn, fn - } - return newIntegerStreamFloatIterator(input, createFn, opt), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { - fn := NewKaufmansEfficiencyRatioReducer(n, nHold) - return fn, fn - } - return newUnsignedStreamFloatIterator(input, createFn, opt), nil - default: - return nil, fmt.Errorf("unsupported kaufmans efficiency ratio iterator type: %T", input) - } -} - -// newKaufmansAdaptiveMovingAverageIterator returns an iterator for operating on a kaufmans_adaptive_moving_average() call. -func newKaufmansAdaptiveMovingAverageIterator(input Iterator, n int, nHold int, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewKaufmansAdaptiveMovingAverageReducer(n, nHold) - return fn, fn - } - return newFloatStreamFloatIterator(input, createFn, opt), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, FloatPointEmitter) { - fn := NewKaufmansAdaptiveMovingAverageReducer(n, nHold) - return fn, fn - } - return newIntegerStreamFloatIterator(input, createFn, opt), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { - fn := NewKaufmansAdaptiveMovingAverageReducer(n, nHold) - return fn, fn - } - return newUnsignedStreamFloatIterator(input, createFn, opt), nil - default: - return nil, fmt.Errorf("unsupported kaufmans adaptive moving average iterator type: %T", input) - } -} - -// newChandeMomentumOscillatorIterator returns an iterator for operating on a triple_exponential_moving_average() call. -func newChandeMomentumOscillatorIterator(input Iterator, n int, nHold int, warmupType gota.WarmupType, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewChandeMomentumOscillatorReducer(n, nHold, warmupType) - return fn, fn - } - return newFloatStreamFloatIterator(input, createFn, opt), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, FloatPointEmitter) { - fn := NewChandeMomentumOscillatorReducer(n, nHold, warmupType) - return fn, fn - } - return newIntegerStreamFloatIterator(input, createFn, opt), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { - fn := NewChandeMomentumOscillatorReducer(n, nHold, warmupType) - return fn, fn - } - return newUnsignedStreamFloatIterator(input, createFn, opt), nil - default: - return nil, fmt.Errorf("unsupported chande momentum oscillator iterator type: %T", input) - } -} - -// newCumulativeSumIterator returns an iterator for operating on a cumulative_sum() call. -func newCumulativeSumIterator(input Iterator, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatCumulativeSumReducer() - return fn, fn - } - return newFloatStreamFloatIterator(input, createFn, opt), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { - fn := NewIntegerCumulativeSumReducer() - return fn, fn - } - return newIntegerStreamIntegerIterator(input, createFn, opt), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { - fn := NewUnsignedCumulativeSumReducer() - return fn, fn - } - return newUnsignedStreamUnsignedIterator(input, createFn, opt), nil - default: - return nil, fmt.Errorf("unsupported cumulative sum iterator type: %T", input) - } -} - -// newHoltWintersIterator returns an iterator for operating on a holt_winters() call. -func newHoltWintersIterator(input Iterator, opt IteratorOptions, h, m int, includeFitData bool, interval time.Duration) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatHoltWintersReducer(h, m, includeFitData, interval) - return fn, fn - } - return newFloatReduceFloatIterator(input, opt, createFn), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, FloatPointEmitter) { - fn := NewFloatHoltWintersReducer(h, m, includeFitData, interval) - return fn, fn - } - return newIntegerReduceFloatIterator(input, opt, createFn), nil - default: - return nil, fmt.Errorf("unsupported elapsed iterator type: %T", input) - } -} - -// NewSampleIterator returns an iterator for operating on a sample() call (exported for use in test). -func NewSampleIterator(input Iterator, opt IteratorOptions, size int) (Iterator, error) { - return newSampleIterator(input, opt, size) -} - -// newSampleIterator returns an iterator for operating on a sample() call. -func newSampleIterator(input Iterator, opt IteratorOptions, size int) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatSampleReducer(size) - return fn, fn - } - return newFloatReduceFloatIterator(input, opt, createFn), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { - fn := NewIntegerSampleReducer(size) - return fn, fn - } - return newIntegerReduceIntegerIterator(input, opt, createFn), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, UnsignedPointEmitter) { - fn := NewUnsignedSampleReducer(size) - return fn, fn - } - return newUnsignedReduceUnsignedIterator(input, opt, createFn), nil - case StringIterator: - createFn := func() (StringPointAggregator, StringPointEmitter) { - fn := NewStringSampleReducer(size) - return fn, fn - } - return newStringReduceStringIterator(input, opt, createFn), nil - case BooleanIterator: - createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { - fn := NewBooleanSampleReducer(size) - return fn, fn - } - return newBooleanReduceBooleanIterator(input, opt, createFn), nil - default: - return nil, fmt.Errorf("unsupported elapsed iterator type: %T", input) - } -} - -// newIntegralIterator returns an iterator for operating on a integral() call. -func newIntegralIterator(input Iterator, opt IteratorOptions, interval Interval) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatIntegralReducer(interval, opt) - return fn, fn - } - return newFloatStreamFloatIterator(input, createFn, opt), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, FloatPointEmitter) { - fn := NewIntegerIntegralReducer(interval, opt) - return fn, fn - } - return newIntegerStreamFloatIterator(input, createFn, opt), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, FloatPointEmitter) { - fn := NewUnsignedIntegralReducer(interval, opt) - return fn, fn - } - return newUnsignedStreamFloatIterator(input, createFn, opt), nil - default: - return nil, fmt.Errorf("unsupported integral iterator type: %T", input) - } -} - -// NewSumHllIterator returns an iterator for operating on a distinct() call. -func NewSumHllIterator(input Iterator, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case FloatIterator: - createFn := func() (FloatPointAggregator, StringPointEmitter) { - fn := NewFloatSumHllReducer() - return fn, fn - } - return newFloatReduceStringIterator(input, opt, createFn), nil - case IntegerIterator: - createFn := func() (IntegerPointAggregator, StringPointEmitter) { - fn := NewIntegerSumHllReducer() - return fn, fn - } - return newIntegerReduceStringIterator(input, opt, createFn), nil - case UnsignedIterator: - createFn := func() (UnsignedPointAggregator, StringPointEmitter) { - fn := NewUnsignedSumHllReducer() - return fn, fn - } - return newUnsignedReduceStringIterator(input, opt, createFn), nil - case StringIterator: - createFn := func() (StringPointAggregator, StringPointEmitter) { - fn := NewStringSumHllReducer() - return fn, fn - } - return newStringReduceStringIterator(input, opt, createFn), nil - case BooleanIterator: - createFn := func() (BooleanPointAggregator, StringPointEmitter) { - fn := NewBooleanSumHllReducer() - return fn, fn - } - return newBooleanReduceStringIterator(input, opt, createFn), nil - default: - return nil, fmt.Errorf("unsupported sum_hll iterator type: %T", input) - } -} - -// NewSumHllIterator returns an iterator for operating on a distinct() call. -func NewMergeHllIterator(input Iterator, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case StringIterator: - createFn := func() (StringPointAggregator, StringPointEmitter) { - fn := NewStringMergeHllReducer() - return fn, fn - } - return newStringReduceStringIterator(input, opt, createFn), nil - default: - return nil, fmt.Errorf("unsupported merge_hll iterator type: %T", input) - } -} - -func NewCountHllIterator(input Iterator, opt IteratorOptions) (Iterator, error) { - switch input := input.(type) { - case StringIterator: - createFn := func() (StringPointAggregator, UnsignedPointEmitter) { - fn := NewCountHllReducer() - return fn, fn - } - return newStringStreamUnsignedIterator(input, createFn, opt), nil - default: - return nil, fmt.Errorf("unsupported count_hll iterator type: %T", input) - } -} diff --git a/influxql/query/call_iterator_test.go b/influxql/query/call_iterator_test.go deleted file mode 100644 index 58e46182d86..00000000000 --- a/influxql/query/call_iterator_test.go +++ /dev/null @@ -1,1213 +0,0 @@ -package query_test - -import ( - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxql" -) - -// Ensure that a float iterator can be created for a count() call. -func TestCallIterator_Count_Float(t *testing.T) { - itr, _ := query.NewCallIterator( - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Name: "cpu", Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Name: "cpu", Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Name: "cpu", Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - - {Name: "cpu", Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Name: "cpu", Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - {Name: "mem", Time: 23, Value: 10, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`count("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that an integer iterator can be created for a count() call. -func TestCallIterator_Count_Integer(t *testing.T) { - itr, _ := query.NewCallIterator( - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Name: "cpu", Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Name: "cpu", Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Name: "cpu", Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - - {Name: "cpu", Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Name: "cpu", Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - {Name: "mem", Time: 23, Value: 10, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`count("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that an unsigned iterator can be created for a count() call. -func TestCallIterator_Count_Unsigned(t *testing.T) { - itr, _ := query.NewCallIterator( - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Name: "cpu", Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Name: "cpu", Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Name: "cpu", Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - - {Name: "cpu", Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Name: "cpu", Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - {Name: "mem", Time: 23, Value: 10, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`count("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a string iterator can be created for a count() call. -func TestCallIterator_Count_String(t *testing.T) { - itr, _ := query.NewCallIterator( - &StringIterator{Points: []query.StringPoint{ - {Name: "cpu", Time: 0, Value: "d", Tags: ParseTags("region=us-east,host=hostA")}, - {Name: "cpu", Time: 2, Value: "b", Tags: ParseTags("region=us-east,host=hostA")}, - {Name: "cpu", Time: 1, Value: "b", Tags: ParseTags("region=us-west,host=hostA")}, - {Name: "cpu", Time: 5, Value: "e", Tags: ParseTags("region=us-east,host=hostA")}, - - {Name: "cpu", Time: 1, Value: "c", Tags: ParseTags("region=us-west,host=hostB")}, - {Name: "cpu", Time: 23, Value: "a", Tags: ParseTags("region=us-west,host=hostB")}, - {Name: "mem", Time: 23, Value: "b", Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`count("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a boolean iterator can be created for a count() call. -func TestCallIterator_Count_Boolean(t *testing.T) { - itr, _ := query.NewCallIterator( - &BooleanIterator{Points: []query.BooleanPoint{ - {Name: "cpu", Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, - {Name: "cpu", Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, - {Name: "cpu", Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")}, - {Name: "cpu", Time: 5, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, - - {Name: "cpu", Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, - {Name: "cpu", Time: 23, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, - {Name: "mem", Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`count("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 3, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.IntegerPoint{Name: "cpu", Time: 5, Value: 1, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.IntegerPoint{Name: "cpu", Time: 0, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.IntegerPoint{Name: "cpu", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.IntegerPoint{Name: "mem", Time: 20, Value: 1, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a float iterator can be created for a min() call. -func TestCallIterator_Min_Float(t *testing.T) { - itr, _ := query.NewCallIterator( - &FloatIterator{Points: []query.FloatPoint{ - {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 4, Value: 12, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`min("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.FloatPoint{Time: 1, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 4}}, - {&query.FloatPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a integer iterator can be created for a min() call. -func TestCallIterator_Min_Integer(t *testing.T) { - itr, _ := query.NewCallIterator( - &IntegerIterator{Points: []query.IntegerPoint{ - {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 4, Value: 12, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`min("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.IntegerPoint{Time: 1, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 4}}, - {&query.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a unsigned iterator can be created for a min() call. -func TestCallIterator_Min_Unsigned(t *testing.T) { - itr, _ := query.NewCallIterator( - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 4, Value: 12, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`min("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.UnsignedPoint{Time: 1, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 4}}, - {&query.UnsignedPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.UnsignedPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.UnsignedPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a boolean iterator can be created for a min() call. -func TestCallIterator_Min_Boolean(t *testing.T) { - itr, _ := query.NewCallIterator( - &BooleanIterator{Points: []query.BooleanPoint{ - {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 5, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`min("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.BooleanPoint{Time: 2, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.BooleanPoint{Time: 5, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.BooleanPoint{Time: 1, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.BooleanPoint{Time: 23, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a float iterator can be created for a max() call. -func TestCallIterator_Max_Float(t *testing.T) { - itr, _ := query.NewCallIterator( - &FloatIterator{Points: []query.FloatPoint{ - {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`max("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.FloatPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.FloatPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a integer iterator can be created for a max() call. -func TestCallIterator_Max_Integer(t *testing.T) { - itr, _ := query.NewCallIterator( - &IntegerIterator{Points: []query.IntegerPoint{ - {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`max("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.IntegerPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a unsigned iterator can be created for a max() call. -func TestCallIterator_Max_Unsigned(t *testing.T) { - itr, _ := query.NewCallIterator( - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`max("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.UnsignedPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.UnsignedPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.UnsignedPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.UnsignedPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a boolean iterator can be created for a max() call. -func TestCallIterator_Max_Boolean(t *testing.T) { - itr, _ := query.NewCallIterator( - &BooleanIterator{Points: []query.BooleanPoint{ - {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 5, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`max("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.BooleanPoint{Time: 0, Value: true, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.BooleanPoint{Time: 5, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.BooleanPoint{Time: 1, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.BooleanPoint{Time: 23, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a float iterator can be created for a sum() call. -func TestCallIterator_Sum_Float(t *testing.T) { - itr, _ := query.NewCallIterator( - &FloatIterator{Points: []query.FloatPoint{ - {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`sum("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.FloatPoint{Time: 0, Value: 35, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.FloatPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.FloatPoint{Time: 0, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.FloatPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that an integer iterator can be created for a sum() call. -func TestCallIterator_Sum_Integer(t *testing.T) { - itr, _ := query.NewCallIterator( - &IntegerIterator{Points: []query.IntegerPoint{ - {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`sum("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.IntegerPoint{Time: 0, Value: 35, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.IntegerPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.IntegerPoint{Time: 0, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.IntegerPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that an unsigned iterator can be created for a sum() call. -func TestCallIterator_Sum_Unsigned(t *testing.T) { - itr, _ := query.NewCallIterator( - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 5, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`sum("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.UnsignedPoint{Time: 0, Value: 35, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.UnsignedPoint{Time: 5, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.UnsignedPoint{Time: 0, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.UnsignedPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a float iterator can be created for a first() call. -func TestCallIterator_First_Float(t *testing.T) { - itr, _ := query.NewCallIterator( - &FloatIterator{Points: []query.FloatPoint{ - {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`first("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.FloatPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.FloatPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that an integer iterator can be created for a first() call. -func TestCallIterator_First_Integer(t *testing.T) { - itr, _ := query.NewCallIterator( - &IntegerIterator{Points: []query.IntegerPoint{ - {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`first("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.IntegerPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.IntegerPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that an unsigned iterator can be created for a first() call. -func TestCallIterator_First_Unsigned(t *testing.T) { - itr, _ := query.NewCallIterator( - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`first("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.UnsignedPoint{Time: 0, Value: 15, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.UnsignedPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.UnsignedPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.UnsignedPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a string iterator can be created for a first() call. -func TestCallIterator_First_String(t *testing.T) { - itr, _ := query.NewCallIterator( - &StringIterator{Points: []query.StringPoint{ - {Time: 2, Value: "b", Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 0, Value: "d", Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: "b", Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 6, Value: "e", Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: "c", Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: "a", Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`first("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.StringPoint{Time: 0, Value: "d", Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.StringPoint{Time: 6, Value: "e", Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.StringPoint{Time: 1, Value: "c", Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.StringPoint{Time: 23, Value: "a", Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a boolean iterator can be created for a first() call. -func TestCallIterator_First_Boolean(t *testing.T) { - itr, _ := query.NewCallIterator( - &BooleanIterator{Points: []query.BooleanPoint{ - {Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 6, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`first("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.BooleanPoint{Time: 0, Value: true, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.BooleanPoint{Time: 6, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.BooleanPoint{Time: 1, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.BooleanPoint{Time: 23, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a float iterator can be created for a last() call. -func TestCallIterator_Last_Float(t *testing.T) { - itr, _ := query.NewCallIterator( - &FloatIterator{Points: []query.FloatPoint{ - {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`last("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.FloatPoint{Time: 2, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.FloatPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.FloatPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that an integer iterator can be created for a last() call. -func TestCallIterator_Last_Integer(t *testing.T) { - itr, _ := query.NewCallIterator( - &IntegerIterator{Points: []query.IntegerPoint{ - {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`last("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.IntegerPoint{Time: 2, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.IntegerPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.IntegerPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that an unsigned iterator can be created for a last() call. -func TestCallIterator_Last_Unsigned(t *testing.T) { - itr, _ := query.NewCallIterator( - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`last("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.UnsignedPoint{Time: 2, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.UnsignedPoint{Time: 6, Value: 20, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.UnsignedPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.UnsignedPoint{Time: 23, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a string iterator can be created for a last() call. -func TestCallIterator_Last_String(t *testing.T) { - itr, _ := query.NewCallIterator( - &StringIterator{Points: []query.StringPoint{ - {Time: 2, Value: "b", Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 0, Value: "d", Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: "b", Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 6, Value: "e", Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: "c", Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: "a", Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`last("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.StringPoint{Time: 2, Value: "b", Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.StringPoint{Time: 6, Value: "e", Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.StringPoint{Time: 1, Value: "c", Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.StringPoint{Time: 23, Value: "a", Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a boolean iterator can be created for a last() call. -func TestCallIterator_Last_Boolean(t *testing.T) { - itr, _ := query.NewCallIterator( - &BooleanIterator{Points: []query.BooleanPoint{ - {Time: 2, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 6, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`last("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.BooleanPoint{Time: 2, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 3}}, - {&query.BooleanPoint{Time: 6, Value: false, Tags: ParseTags("host=hostA"), Aggregated: 1}}, - {&query.BooleanPoint{Time: 1, Value: true, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - {&query.BooleanPoint{Time: 23, Value: false, Tags: ParseTags("host=hostB"), Aggregated: 1}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a float iterator can be created for a mode() call. -func TestCallIterator_Mode_Float(t *testing.T) { - itr, _ := query.NewModeIterator(&FloatIterator{Points: []query.FloatPoint{ - {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 3, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 4, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 7, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 8, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 22, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 24, Value: 25, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`mode("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.FloatPoint{Time: 0, Value: 10, Tags: ParseTags("host=hostA"), Aggregated: 0}}, - {&query.FloatPoint{Time: 5, Value: 21, Tags: ParseTags("host=hostA"), Aggregated: 0}}, - {&query.FloatPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB"), Aggregated: 0}}, - {&query.FloatPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB"), Aggregated: 0}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a integer iterator can be created for a mode() call. -func TestCallIterator_Mode_Integer(t *testing.T) { - itr, _ := query.NewModeIterator(&IntegerIterator{Points: []query.IntegerPoint{ - {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 3, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 4, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 7, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 8, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 22, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 24, Value: 25, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`mode("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.IntegerPoint{Time: 0, Value: 10, Tags: ParseTags("host=hostA")}}, - {&query.IntegerPoint{Time: 5, Value: 21, Tags: ParseTags("host=hostA")}}, - {&query.IntegerPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB")}}, - {&query.IntegerPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB")}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a unsigned iterator can be created for a mode() call. -func TestCallIterator_Mode_Unsigned(t *testing.T) { - itr, _ := query.NewModeIterator(&UnsignedIterator{Points: []query.UnsignedPoint{ - {Time: 0, Value: 15, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 10, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 2, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 3, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 4, Value: 10, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 6, Value: 20, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 7, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 8, Value: 21, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: 11, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 22, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: 8, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 24, Value: 25, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`mode("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.UnsignedPoint{Time: 0, Value: 10, Tags: ParseTags("host=hostA")}}, - {&query.UnsignedPoint{Time: 5, Value: 21, Tags: ParseTags("host=hostA")}}, - {&query.UnsignedPoint{Time: 1, Value: 11, Tags: ParseTags("host=hostB")}}, - {&query.UnsignedPoint{Time: 20, Value: 8, Tags: ParseTags("host=hostB")}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a string iterator can be created for a mode() call. -func TestCallIterator_Mode_String(t *testing.T) { - itr, _ := query.NewModeIterator(&StringIterator{Points: []query.StringPoint{ - {Time: 0, Value: "15", Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: "10", Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 2, Value: "10", Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 3, Value: "10", Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 4, Value: "10", Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 6, Value: "20", Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 7, Value: "21", Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 7, Value: "21", Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: "11", Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 22, Value: "8", Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: "8", Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 24, Value: "25", Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`mode("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.StringPoint{Time: 0, Value: "10", Tags: ParseTags("host=hostA")}}, - {&query.StringPoint{Time: 5, Value: "21", Tags: ParseTags("host=hostA")}}, - {&query.StringPoint{Time: 1, Value: "11", Tags: ParseTags("host=hostB")}}, - {&query.StringPoint{Time: 20, Value: "8", Tags: ParseTags("host=hostB")}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -// Ensure that a boolean iterator can be created for a modBooleanl. -func TestCallIterator_Mode_Boolean(t *testing.T) { - itr, _ := query.NewModeIterator(&BooleanIterator{Points: []query.BooleanPoint{ - {Time: 0, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 1, Value: true, Tags: ParseTags("region=us-west,host=hostA")}, - {Time: 2, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 3, Value: true, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 4, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 6, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 7, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, - {Time: 8, Value: false, Tags: ParseTags("region=us-east,host=hostA")}, - - {Time: 1, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 22, Value: false, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 23, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, - {Time: 24, Value: true, Tags: ParseTags("region=us-west,host=hostB")}, - }}, - query.IteratorOptions{ - Expr: MustParseExpr(`mode("value")`), - Dimensions: []string{"host"}, - Interval: query.Interval{Duration: 5 * time.Nanosecond}, - Ordered: true, - Ascending: true, - }, - ) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(a, [][]query.Point{ - {&query.BooleanPoint{Time: 0, Value: true, Tags: ParseTags("host=hostA")}}, - {&query.BooleanPoint{Time: 5, Value: false, Tags: ParseTags("host=hostA")}}, - {&query.BooleanPoint{Time: 1, Value: false, Tags: ParseTags("host=hostB")}}, - {&query.BooleanPoint{Time: 20, Value: true, Tags: ParseTags("host=hostB")}}, - }); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } -} - -func TestNewCallIterator_UnsupportedExprName(t *testing.T) { - _, err := query.NewCallIterator( - &FloatIterator{}, - query.IteratorOptions{ - Expr: MustParseExpr(`foobar("value")`), - }, - ) - - if err == nil || err.Error() != "unsupported function call: foobar" { - t.Errorf("unexpected error: %s", err) - } -} - -func BenchmarkCountIterator_1K(b *testing.B) { benchmarkCountIterator(b, 1000) } -func BenchmarkCountIterator_100K(b *testing.B) { benchmarkCountIterator(b, 100000) } -func BenchmarkCountIterator_1M(b *testing.B) { benchmarkCountIterator(b, 1000000) } - -func benchmarkCountIterator(b *testing.B, pointN int) { - benchmarkCallIterator(b, query.IteratorOptions{ - Expr: MustParseExpr("count(value)"), - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - }, pointN) -} - -func benchmarkCallIterator(b *testing.B, opt query.IteratorOptions, pointN int) { - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - // Create a lightweight point generator. - p := query.FloatPoint{Name: "cpu", Value: 100} - input := FloatPointGenerator{ - N: pointN, - Fn: func(i int) *query.FloatPoint { return &p }, - } - - // Execute call against input. - itr, err := query.NewCallIterator(&input, opt) - if err != nil { - b.Fatal(err) - } - query.DrainIterator(itr) - } -} - -func BenchmarkSampleIterator_1k(b *testing.B) { benchmarkSampleIterator(b, 1000) } -func BenchmarkSampleIterator_100k(b *testing.B) { benchmarkSampleIterator(b, 100000) } -func BenchmarkSampleIterator_1M(b *testing.B) { benchmarkSampleIterator(b, 1000000) } - -func benchmarkSampleIterator(b *testing.B, pointN int) { - b.ReportAllocs() - - // Create a lightweight point generator. - p := query.FloatPoint{Name: "cpu"} - input := FloatPointGenerator{ - N: pointN, - Fn: func(i int) *query.FloatPoint { - p.Value = float64(i) - return &p - }, - } - - for i := 0; i < b.N; i++ { - // Execute call against input. - itr, err := query.NewSampleIterator(&input, query.IteratorOptions{}, 100) - if err != nil { - b.Fatal(err) - } - query.DrainIterator(itr) - } -} - -func BenchmarkDistinctIterator_1K(b *testing.B) { benchmarkDistinctIterator(b, 1000) } -func BenchmarkDistinctIterator_100K(b *testing.B) { benchmarkDistinctIterator(b, 100000) } -func BenchmarkDistinctIterator_1M(b *testing.B) { benchmarkDistinctIterator(b, 1000000) } - -func benchmarkDistinctIterator(b *testing.B, pointN int) { - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - // Create a lightweight point generator. - p := query.FloatPoint{Name: "cpu"} - input := FloatPointGenerator{ - N: pointN, - Fn: func(i int) *query.FloatPoint { - p.Value = float64(i % 10) - return &p - }, - } - - // Execute call against input. - itr, err := query.NewDistinctIterator(&input, query.IteratorOptions{}) - if err != nil { - b.Fatal(err) - } - query.DrainIterator(itr) - } -} - -func BenchmarkModeIterator_1K(b *testing.B) { benchmarkModeIterator(b, 1000) } -func BenchmarkModeIterator_100K(b *testing.B) { benchmarkModeIterator(b, 100000) } -func BenchmarkModeIterator_1M(b *testing.B) { benchmarkModeIterator(b, 1000000) } - -func benchmarkModeIterator(b *testing.B, pointN int) { - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - // Create a lightweight point generator. - p := query.FloatPoint{Name: "cpu"} - input := FloatPointGenerator{ - N: pointN, - Fn: func(i int) *query.FloatPoint { - p.Value = float64(10) - return &p - }, - } - - // Execute call against input. - itr, err := query.NewModeIterator(&input, query.IteratorOptions{}) - if err != nil { - b.Fatal(err) - } - query.DrainIterator(itr) - } -} - -type FloatPointGenerator struct { - i int - N int - Fn func(i int) *query.FloatPoint -} - -func (g *FloatPointGenerator) Close() error { return nil } -func (g *FloatPointGenerator) Stats() query.IteratorStats { return query.IteratorStats{} } - -func (g *FloatPointGenerator) Next() (*query.FloatPoint, error) { - if g.i == g.N { - return nil, nil - } - p := g.Fn(g.i) - g.i++ - return p, nil -} diff --git a/influxql/query/cast.go b/influxql/query/cast.go deleted file mode 100644 index 8c02f4a3f4c..00000000000 --- a/influxql/query/cast.go +++ /dev/null @@ -1,88 +0,0 @@ -package query - -import "github.com/influxdata/influxql" - -// castToType will coerce the underlying interface type to another -// interface depending on the type. -func castToType(v interface{}, typ influxql.DataType) interface{} { - switch typ { - case influxql.Float: - if val, ok := castToFloat(v); ok { - v = val - } - case influxql.Integer: - if val, ok := castToInteger(v); ok { - v = val - } - case influxql.Unsigned: - if val, ok := castToUnsigned(v); ok { - v = val - } - case influxql.String, influxql.Tag: - if val, ok := castToString(v); ok { - v = val - } - case influxql.Boolean: - if val, ok := castToBoolean(v); ok { - v = val - } - } - return v -} - -func castToFloat(v interface{}) (float64, bool) { - switch v := v.(type) { - case float64: - return v, true - case int64: - return float64(v), true - case uint64: - return float64(v), true - default: - return float64(0), false - } -} - -func castToInteger(v interface{}) (int64, bool) { - switch v := v.(type) { - case float64: - return int64(v), true - case int64: - return v, true - case uint64: - return int64(v), true - default: - return int64(0), false - } -} - -func castToUnsigned(v interface{}) (uint64, bool) { - switch v := v.(type) { - case float64: - return uint64(v), true - case uint64: - return v, true - case int64: - return uint64(v), true - default: - return uint64(0), false - } -} - -func castToString(v interface{}) (string, bool) { - switch v := v.(type) { - case string: - return v, true - default: - return "", false - } -} - -func castToBoolean(v interface{}) (bool, bool) { - switch v := v.(type) { - case bool: - return v, true - default: - return false, false - } -} diff --git a/influxql/query/compile.go b/influxql/query/compile.go deleted file mode 100644 index 7dd120545dc..00000000000 --- a/influxql/query/compile.go +++ /dev/null @@ -1,1233 +0,0 @@ -package query - -import ( - "context" - "errors" - "fmt" - "strings" - "time" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxql" -) - -// CompileOptions are the customization options for the compiler. -type CompileOptions struct { - Now time.Time -} - -// Statement is a compiled query statement. -type Statement interface { - // Prepare prepares the statement by mapping shards and finishing the creation - // of the query plan. - Prepare(ctx context.Context, shardMapper ShardMapper, opt SelectOptions) (PreparedStatement, error) -} - -// compiledStatement represents a select statement that has undergone some initial processing to -// determine if it is valid and to have some initial modifications done on the AST. -type compiledStatement struct { - // Condition is the condition used for accessing data. - Condition influxql.Expr - - // TimeRange is the TimeRange for selecting data. - TimeRange influxql.TimeRange - - // Interval holds the time grouping interval. - Interval Interval - - // InheritedInterval marks if the interval was inherited by a parent. - // If this is set, then an interval that was inherited will not cause - // a query that shouldn't have an interval to fail. - InheritedInterval bool - - // ExtraIntervals is the number of extra intervals that will be read in addition - // to the TimeRange. It is a multiple of Interval and only applies to queries that - // have an Interval. It is used to extend the TimeRange of the mapped shards to - // include additional non-emitted intervals used by derivative and other functions. - // It will be set to the highest number of extra intervals that need to be read even - // if it doesn't apply to all functions. The number will always be positive. - // This value may be set to a non-zero value even if there is no interval for the - // compiled query. - ExtraIntervals int - - // Ascending is true if the time ordering is ascending. - Ascending bool - - // FunctionCalls holds a reference to the call expression of every function - // call that has been encountered. - FunctionCalls []*influxql.Call - - // OnlySelectors is set to true when there are no aggregate functions. - OnlySelectors bool - - // HasDistinct is set when the distinct() function is encountered. - HasDistinct bool - - // FillOption contains the fill option for aggregates. - FillOption influxql.FillOption - - // TopBottomFunction is set to top or bottom when one of those functions are - // used in the statement. - TopBottomFunction string - - // HasAuxiliaryFields is true when the function requires auxiliary fields. - HasAuxiliaryFields bool - - // Fields holds all of the fields that will be used. - Fields []*compiledField - - // TimeFieldName stores the name of the time field's column. - // The column names generated by the compiler will not conflict with - // this name. - TimeFieldName string - - // Limit is the number of rows per series this query should be limited to. - Limit int - - // HasTarget is true if this query is being written into a target. - HasTarget bool - - // Options holds the configured compiler options. - Options CompileOptions - - stmt *influxql.SelectStatement -} - -func newCompiler(opt CompileOptions) *compiledStatement { - if opt.Now.IsZero() { - opt.Now = time.Now().UTC() - } - return &compiledStatement{ - OnlySelectors: true, - TimeFieldName: "time", - Options: opt, - } -} - -func Compile(stmt *influxql.SelectStatement, opt CompileOptions) (_ Statement, err error) { - c := newCompiler(opt) - c.stmt = stmt.Clone() - if err := c.preprocess(c.stmt); err != nil { - return nil, err - } - if err := c.compile(c.stmt); err != nil { - return nil, err - } - c.stmt.TimeAlias = c.TimeFieldName - c.stmt.Condition = c.Condition - - defer func() { - if e := recover(); e != nil && err == nil { - var ok bool - err, ok = e.(error) - if !ok { - err = fmt.Errorf("panic: %v", e) - } - err = fmt.Errorf("likely malformed statement, unable to rewrite: %w", err) - } - }() - - // Convert DISTINCT into a call. - c.stmt.RewriteDistinct() - - // Remove "time" from fields list. - c.stmt.RewriteTimeFields() - - // Rewrite any regex conditions that could make use of the index. - c.stmt.RewriteRegexConditions() - return c, nil -} - -// preprocess retrieves and records the global attributes of the current statement. -func (c *compiledStatement) preprocess(stmt *influxql.SelectStatement) error { - c.Ascending = stmt.TimeAscending() - c.Limit = stmt.Limit - c.HasTarget = stmt.Target != nil - - valuer := influxql.NowValuer{Now: c.Options.Now, Location: stmt.Location} - cond, t, err := influxql.ConditionExpr(stmt.Condition, &valuer) - if err != nil { - return err - } - // Verify that the condition is actually ok to use. - if err := c.validateCondition(cond); err != nil { - return err - } - c.Condition = cond - c.TimeRange = t - - // Read the dimensions of the query, validate them, and retrieve the interval - // if it exists. - if err := c.compileDimensions(stmt); err != nil { - return err - } - - // Retrieve the fill option for the statement. - c.FillOption = stmt.Fill - - // Resolve the min and max times now that we know if there is an interval or not. - if c.TimeRange.Min.IsZero() { - c.TimeRange.Min = time.Unix(0, influxql.MinTime).UTC() - } - if c.TimeRange.Max.IsZero() { - // If the interval is non-zero, then we have an aggregate query and - // need to limit the maximum time to now() for backwards compatibility - // and usability. - if !c.Interval.IsZero() { - c.TimeRange.Max = c.Options.Now - } else { - c.TimeRange.Max = time.Unix(0, influxql.MaxTime).UTC() - } - } - return nil -} - -func (c *compiledStatement) compile(stmt *influxql.SelectStatement) error { - if err := c.compileFields(stmt); err != nil { - return err - } - if err := c.validateFields(); err != nil { - return err - } - - // Look through the sources and compile each of the subqueries (if they exist). - // We do this after compiling the outside because subqueries may require - // inherited state. - for _, source := range stmt.Sources { - switch source := source.(type) { - case *influxql.SubQuery: - source.Statement.OmitTime = true - if err := c.subquery(source.Statement); err != nil { - return err - } - source.Statement.RewriteRegexConditions() - } - } - return nil -} - -func (c *compiledStatement) compileFields(stmt *influxql.SelectStatement) error { - valuer := MathValuer{} - - c.Fields = make([]*compiledField, 0, len(stmt.Fields)) - for _, f := range stmt.Fields { - // Remove any time selection (it is automatically selected by default) - // and set the time column name to the alias of the time field if it exists. - // Such as SELECT time, max(value) FROM cpu will be SELECT max(value) FROM cpu - // and SELECT time AS timestamp, max(value) FROM cpu will return "timestamp" - // as the column name for the time. - if ref, ok := f.Expr.(*influxql.VarRef); ok && ref.Val == "time" { - if f.Alias != "" { - c.TimeFieldName = f.Alias - } - continue - } - - // Append this field to the list of processed fields and compile it. - f.Expr = influxql.Reduce(f.Expr, &valuer) - field := &compiledField{ - global: c, - Field: f, - AllowWildcard: true, - } - c.Fields = append(c.Fields, field) - if err := field.compileExpr(field.Field.Expr); err != nil { - return err - } - } - return nil -} - -type compiledField struct { - // This holds the global state from the compiled statement. - global *compiledStatement - - // Field is the top level field that is being compiled. - Field *influxql.Field - - // AllowWildcard is set to true if a wildcard or regular expression is allowed. - AllowWildcard bool -} - -// compileExpr creates the node that executes the expression and connects that -// node to the WriteEdge as the output. -func (c *compiledField) compileExpr(expr influxql.Expr) error { - switch expr := expr.(type) { - case *influxql.VarRef: - // A bare variable reference will require auxiliary fields. - c.global.HasAuxiliaryFields = true - return nil - case *influxql.Wildcard: - // Wildcards use auxiliary fields. We assume there will be at least one - // expansion. - c.global.HasAuxiliaryFields = true - if !c.AllowWildcard { - return errors.New("unable to use wildcard in a binary expression") - } - return nil - case *influxql.RegexLiteral: - if !c.AllowWildcard { - return errors.New("unable to use regex in a binary expression") - } - c.global.HasAuxiliaryFields = true - return nil - case *influxql.Call: - if isMathFunction(expr) { - return c.compileMathFunction(expr) - } - - // Register the function call in the list of function calls. - c.global.FunctionCalls = append(c.global.FunctionCalls, expr) - - switch expr.Name { - case "percentile": - return c.compilePercentile(expr.Args) - case "sample": - return c.compileSample(expr.Args) - case "distinct": - return c.compileDistinct(expr.Args, false) - case "top", "bottom": - return c.compileTopBottom(expr) - case "derivative", "non_negative_derivative": - isNonNegative := expr.Name == "non_negative_derivative" - return c.compileDerivative(expr.Args, isNonNegative) - case "difference", "non_negative_difference": - isNonNegative := expr.Name == "non_negative_difference" - return c.compileDifference(expr.Args, isNonNegative) - case "cumulative_sum": - return c.compileCumulativeSum(expr.Args) - case "moving_average": - return c.compileMovingAverage(expr.Args) - case "exponential_moving_average", "double_exponential_moving_average", "triple_exponential_moving_average", "relative_strength_index", "triple_exponential_derivative": - return c.compileExponentialMovingAverage(expr.Name, expr.Args) - case "kaufmans_efficiency_ratio", "kaufmans_adaptive_moving_average": - return c.compileKaufmans(expr.Name, expr.Args) - case "chande_momentum_oscillator": - return c.compileChandeMomentumOscillator(expr.Args) - case "elapsed": - return c.compileElapsed(expr.Args) - case "integral": - return c.compileIntegral(expr.Args) - case "count_hll": - return c.compileCountHll(expr.Args) - case "holt_winters", "holt_winters_with_fit": - withFit := expr.Name == "holt_winters_with_fit" - return c.compileHoltWinters(expr.Args, withFit) - default: - return c.compileFunction(expr) - } - case *influxql.Distinct: - call := expr.NewCall() - c.global.FunctionCalls = append(c.global.FunctionCalls, call) - return c.compileDistinct(call.Args, false) - case *influxql.BinaryExpr: - // Disallow wildcards in binary expressions. RewriteFields, which expands - // wildcards, is too complicated if we allow wildcards inside of expressions. - c.AllowWildcard = false - - // Check if either side is a literal so we only compile one side if it is. - if _, ok := expr.LHS.(influxql.Literal); ok { - if _, ok := expr.RHS.(influxql.Literal); ok { - return errors.New("cannot perform a binary expression on two literals") - } - return c.compileExpr(expr.RHS) - } else if _, ok := expr.RHS.(influxql.Literal); ok { - return c.compileExpr(expr.LHS) - } else { - // Validate both sides of the expression. - if err := c.compileExpr(expr.LHS); err != nil { - return err - } - if err := c.compileExpr(expr.RHS); err != nil { - return err - } - return nil - } - case *influxql.ParenExpr: - return c.compileExpr(expr.Expr) - case influxql.Literal: - return errors.New("field must contain at least one variable") - } - return errors.New("unimplemented") -} - -// compileNestedExpr ensures that the expression is compiled as if it were -// a nested expression. -func (c *compiledField) compileNestedExpr(expr influxql.Expr) error { - // Intercept the distinct call so we can pass nested as true. - switch expr := expr.(type) { - case *influxql.Call: - if expr.Name == "distinct" { - return c.compileDistinct(expr.Args, true) - } - case *influxql.Distinct: - call := expr.NewCall() - return c.compileDistinct(call.Args, true) - } - return c.compileExpr(expr) -} - -func (c *compiledField) compileSymbol(name string, field influxql.Expr) error { - // Must be a variable reference, wildcard, or regexp. - switch field.(type) { - case *influxql.VarRef: - return nil - case *influxql.Wildcard: - if !c.AllowWildcard { - return fmt.Errorf("unsupported expression with wildcard: %s()", name) - } - c.global.OnlySelectors = false - return nil - case *influxql.RegexLiteral: - if !c.AllowWildcard { - return fmt.Errorf("unsupported expression with regex field: %s()", name) - } - c.global.OnlySelectors = false - return nil - default: - return fmt.Errorf("expected field argument in %s()", name) - } -} - -func (c *compiledField) compileFunction(expr *influxql.Call) error { - // Validate the function call and mark down some meta properties - // related to the function for query validation. - switch expr.Name { - case "max", "min", "first", "last": - // top/bottom are not included here since they are not typical functions. - case "count", "sum", "mean", "median", "mode", "stddev", "spread", "sum_hll": - // These functions are not considered selectors. - c.global.OnlySelectors = false - default: - return fmt.Errorf("undefined function %s()", expr.Name) - } - - if exp, got := 1, len(expr.Args); exp != got { - return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got) - } - - // If this is a call to count(), allow distinct() to be used as the function argument. - if expr.Name == "count" { - // If we have count(), the argument may be a distinct() call. - if arg0, ok := expr.Args[0].(*influxql.Call); ok && arg0.Name == "distinct" { - return c.compileDistinct(arg0.Args, true) - } else if arg0, ok := expr.Args[0].(*influxql.Distinct); ok { - call := arg0.NewCall() - return c.compileDistinct(call.Args, true) - } - } - return c.compileSymbol(expr.Name, expr.Args[0]) -} - -func (c *compiledField) compilePercentile(args []influxql.Expr) error { - if exp, got := 2, len(args); got != exp { - return fmt.Errorf("invalid number of arguments for percentile, expected %d, got %d", exp, got) - } - - switch args[1].(type) { - case *influxql.IntegerLiteral: - case *influxql.NumberLiteral: - default: - return fmt.Errorf("expected float argument in percentile()") - } - return c.compileSymbol("percentile", args[0]) -} - -func (c *compiledField) compileSample(args []influxql.Expr) error { - if exp, got := 2, len(args); got != exp { - return fmt.Errorf("invalid number of arguments for sample, expected %d, got %d", exp, got) - } - - switch arg1 := args[1].(type) { - case *influxql.IntegerLiteral: - if arg1.Val <= 0 { - return fmt.Errorf("sample window must be greater than 1, got %d", arg1.Val) - } - default: - return fmt.Errorf("expected integer argument in sample()") - } - return c.compileSymbol("sample", args[0]) -} - -func (c *compiledField) compileDerivative(args []influxql.Expr, isNonNegative bool) error { - name := "derivative" - if isNonNegative { - name = "non_negative_derivative" - } - - if min, max, got := 1, 2, len(args); got > max || got < min { - return fmt.Errorf("invalid number of arguments for %s, expected at least %d but no more than %d, got %d", name, min, max, got) - } - - // Retrieve the duration from the derivative() call, if specified. - if len(args) == 2 { - switch arg1 := args[1].(type) { - case *influxql.DurationLiteral: - if arg1.Val <= 0 { - return fmt.Errorf("duration argument must be positive, got %s", influxql.FormatDuration(arg1.Val)) - } - default: - return fmt.Errorf("second argument to %s must be a duration, got %T", name, args[1]) - } - } - c.global.OnlySelectors = false - if c.global.ExtraIntervals < 1 { - c.global.ExtraIntervals = 1 - } - - // Must be a variable reference, function, wildcard, or regexp. - switch arg0 := args[0].(type) { - case *influxql.Call: - if c.global.Interval.IsZero() { - return fmt.Errorf("%s aggregate requires a GROUP BY interval", name) - } - return c.compileNestedExpr(arg0) - default: - if !c.global.Interval.IsZero() && !c.global.InheritedInterval { - return fmt.Errorf("aggregate function required inside the call to %s", name) - } - return c.compileSymbol(name, arg0) - } -} - -func (c *compiledField) compileElapsed(args []influxql.Expr) error { - if min, max, got := 1, 2, len(args); got > max || got < min { - return fmt.Errorf("invalid number of arguments for elapsed, expected at least %d but no more than %d, got %d", min, max, got) - } - - // Retrieve the duration from the elapsed() call, if specified. - if len(args) == 2 { - switch arg1 := args[1].(type) { - case *influxql.DurationLiteral: - if arg1.Val <= 0 { - return fmt.Errorf("duration argument must be positive, got %s", influxql.FormatDuration(arg1.Val)) - } - default: - return fmt.Errorf("second argument to elapsed must be a duration, got %T", args[1]) - } - } - c.global.OnlySelectors = false - if c.global.ExtraIntervals < 1 { - c.global.ExtraIntervals = 1 - } - - // Must be a variable reference, function, wildcard, or regexp. - switch arg0 := args[0].(type) { - case *influxql.Call: - if c.global.Interval.IsZero() { - return fmt.Errorf("elapsed aggregate requires a GROUP BY interval") - } - return c.compileNestedExpr(arg0) - default: - if !c.global.Interval.IsZero() && !c.global.InheritedInterval { - return fmt.Errorf("aggregate function required inside the call to elapsed") - } - return c.compileSymbol("elapsed", arg0) - } -} - -func (c *compiledField) compileDifference(args []influxql.Expr, isNonNegative bool) error { - name := "difference" - if isNonNegative { - name = "non_negative_difference" - } - - if got := len(args); got != 1 { - return fmt.Errorf("invalid number of arguments for %s, expected 1, got %d", name, got) - } - c.global.OnlySelectors = false - if c.global.ExtraIntervals < 1 { - c.global.ExtraIntervals = 1 - } - - // Must be a variable reference, function, wildcard, or regexp. - switch arg0 := args[0].(type) { - case *influxql.Call: - if c.global.Interval.IsZero() { - return fmt.Errorf("%s aggregate requires a GROUP BY interval", name) - } - return c.compileNestedExpr(arg0) - default: - if !c.global.Interval.IsZero() && !c.global.InheritedInterval { - return fmt.Errorf("aggregate function required inside the call to %s", name) - } - return c.compileSymbol(name, arg0) - } -} - -func (c *compiledField) compileCumulativeSum(args []influxql.Expr) error { - if got := len(args); got != 1 { - return fmt.Errorf("invalid number of arguments for cumulative_sum, expected 1, got %d", got) - } - c.global.OnlySelectors = false - if c.global.ExtraIntervals < 1 { - c.global.ExtraIntervals = 1 - } - - // Must be a variable reference, function, wildcard, or regexp. - switch arg0 := args[0].(type) { - case *influxql.Call: - if c.global.Interval.IsZero() { - return fmt.Errorf("cumulative_sum aggregate requires a GROUP BY interval") - } - return c.compileNestedExpr(arg0) - default: - if !c.global.Interval.IsZero() && !c.global.InheritedInterval { - return fmt.Errorf("aggregate function required inside the call to cumulative_sum") - } - return c.compileSymbol("cumulative_sum", arg0) - } -} - -func (c *compiledField) compileMovingAverage(args []influxql.Expr) error { - if got := len(args); got != 2 { - return fmt.Errorf("invalid number of arguments for moving_average, expected 2, got %d", got) - } - - arg1, ok := args[1].(*influxql.IntegerLiteral) - if !ok { - return fmt.Errorf("second argument for moving_average must be an integer, got %T", args[1]) - } else if arg1.Val <= 1 { - return fmt.Errorf("moving_average window must be greater than 1, got %d", arg1.Val) - } - c.global.OnlySelectors = false - if c.global.ExtraIntervals < int(arg1.Val) { - c.global.ExtraIntervals = int(arg1.Val) - } - - // Must be a variable reference, function, wildcard, or regexp. - switch arg0 := args[0].(type) { - case *influxql.Call: - if c.global.Interval.IsZero() { - return fmt.Errorf("moving_average aggregate requires a GROUP BY interval") - } - return c.compileNestedExpr(arg0) - default: - if !c.global.Interval.IsZero() && !c.global.InheritedInterval { - return fmt.Errorf("aggregate function required inside the call to moving_average") - } - return c.compileSymbol("moving_average", arg0) - } -} - -func (c *compiledField) compileExponentialMovingAverage(name string, args []influxql.Expr) error { - if got := len(args); got < 2 || got > 4 { - return fmt.Errorf("invalid number of arguments for %s, expected at least 2 but no more than 4, got %d", name, got) - } - - arg1, ok := args[1].(*influxql.IntegerLiteral) - if !ok { - return fmt.Errorf("%s period must be an integer", name) - } else if arg1.Val < 1 { - return fmt.Errorf("%s period must be greater than or equal to 1", name) - } - - if len(args) >= 3 { - switch arg2 := args[2].(type) { - case *influxql.IntegerLiteral: - if name == "triple_exponential_derivative" && arg2.Val < 1 && arg2.Val != -1 { - return fmt.Errorf("%s hold period must be greater than or equal to 1", name) - } - if arg2.Val < 0 && arg2.Val != -1 { - return fmt.Errorf("%s hold period must be greater than or equal to 0", name) - } - default: - return fmt.Errorf("%s hold period must be an integer", name) - } - } - - if len(args) >= 4 { - switch arg3 := args[3].(type) { - case *influxql.StringLiteral: - switch arg3.Val { - case "exponential", "simple": - default: - return fmt.Errorf("%s warmup type must be one of: 'exponential' 'simple'", name) - } - default: - return fmt.Errorf("%s warmup type must be a string", name) - } - } - - c.global.OnlySelectors = false - if c.global.ExtraIntervals < int(arg1.Val) { - c.global.ExtraIntervals = int(arg1.Val) - } - - switch arg0 := args[0].(type) { - case *influxql.Call: - if c.global.Interval.IsZero() { - return fmt.Errorf("%s aggregate requires a GROUP BY interval", name) - } - return c.compileExpr(arg0) - default: - if !c.global.Interval.IsZero() && !c.global.InheritedInterval { - return fmt.Errorf("aggregate function required inside the call to %s", name) - } - return c.compileSymbol(name, arg0) - } -} - -func (c *compiledField) compileKaufmans(name string, args []influxql.Expr) error { - if got := len(args); got < 2 || got > 3 { - return fmt.Errorf("invalid number of arguments for %s, expected at least 2 but no more than 3, got %d", name, got) - } - - arg1, ok := args[1].(*influxql.IntegerLiteral) - if !ok { - return fmt.Errorf("%s period must be an integer", name) - } else if arg1.Val < 1 { - return fmt.Errorf("%s period must be greater than or equal to 1", name) - } - - if len(args) >= 3 { - switch arg2 := args[2].(type) { - case *influxql.IntegerLiteral: - if arg2.Val < 0 && arg2.Val != -1 { - return fmt.Errorf("%s hold period must be greater than or equal to 0", name) - } - default: - return fmt.Errorf("%s hold period must be an integer", name) - } - } - - c.global.OnlySelectors = false - if c.global.ExtraIntervals < int(arg1.Val) { - c.global.ExtraIntervals = int(arg1.Val) - } - - switch arg0 := args[0].(type) { - case *influxql.Call: - if c.global.Interval.IsZero() { - return fmt.Errorf("%s aggregate requires a GROUP BY interval", name) - } - return c.compileExpr(arg0) - default: - if !c.global.Interval.IsZero() && !c.global.InheritedInterval { - return fmt.Errorf("aggregate function required inside the call to %s", name) - } - return c.compileSymbol(name, arg0) - } -} - -func (c *compiledField) compileChandeMomentumOscillator(args []influxql.Expr) error { - if got := len(args); got < 2 || got > 4 { - return fmt.Errorf("invalid number of arguments for chande_momentum_oscillator, expected at least 2 but no more than 4, got %d", got) - } - - arg1, ok := args[1].(*influxql.IntegerLiteral) - if !ok { - return fmt.Errorf("chande_momentum_oscillator period must be an integer") - } else if arg1.Val < 1 { - return fmt.Errorf("chande_momentum_oscillator period must be greater than or equal to 1") - } - - if len(args) >= 3 { - switch arg2 := args[2].(type) { - case *influxql.IntegerLiteral: - if arg2.Val < 0 && arg2.Val != -1 { - return fmt.Errorf("chande_momentum_oscillator hold period must be greater than or equal to 0") - } - default: - return fmt.Errorf("chande_momentum_oscillator hold period must be an integer") - } - } - - c.global.OnlySelectors = false - if c.global.ExtraIntervals < int(arg1.Val) { - c.global.ExtraIntervals = int(arg1.Val) - } - - if len(args) >= 4 { - switch arg3 := args[3].(type) { - case *influxql.StringLiteral: - switch arg3.Val { - case "none", "exponential", "simple": - default: - return fmt.Errorf("chande_momentum_oscillator warmup type must be one of: 'none' 'exponential' 'simple'") - } - default: - return fmt.Errorf("chande_momentum_oscillator warmup type must be a string") - } - } - - switch arg0 := args[0].(type) { - case *influxql.Call: - if c.global.Interval.IsZero() { - return fmt.Errorf("chande_momentum_oscillator aggregate requires a GROUP BY interval") - } - return c.compileExpr(arg0) - default: - if !c.global.Interval.IsZero() && !c.global.InheritedInterval { - return fmt.Errorf("aggregate function required inside the call to chande_momentum_oscillator") - } - return c.compileSymbol("chande_momentum_oscillator", arg0) - } -} - -func (c *compiledField) compileIntegral(args []influxql.Expr) error { - if min, max, got := 1, 2, len(args); got > max || got < min { - return fmt.Errorf("invalid number of arguments for integral, expected at least %d but no more than %d, got %d", min, max, got) - } - - if len(args) == 2 { - switch arg1 := args[1].(type) { - case *influxql.DurationLiteral: - if arg1.Val <= 0 { - return fmt.Errorf("duration argument must be positive, got %s", influxql.FormatDuration(arg1.Val)) - } - default: - return errors.New("second argument must be a duration") - } - } - c.global.OnlySelectors = false - - // Must be a variable reference, wildcard, or regexp. - return c.compileSymbol("integral", args[0]) -} - -func (c *compiledField) compileCountHll(args []influxql.Expr) error { - if exp, got := 1, len(args); exp != got { - return fmt.Errorf("invalid number of arguments for count_hll, expected %d, got %d", exp, got) - } - c.global.OnlySelectors = false - switch arg0 := args[0].(type) { - case *influxql.Call: - return c.compileExpr(arg0) - default: - return c.compileSymbol("count_hll", arg0) - } -} - -func (c *compiledField) compileHoltWinters(args []influxql.Expr, withFit bool) error { - name := "holt_winters" - if withFit { - name = "holt_winters_with_fit" - } - - if exp, got := 3, len(args); got != exp { - return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", name, exp, got) - } - - n, ok := args[1].(*influxql.IntegerLiteral) - if !ok { - return fmt.Errorf("expected integer argument as second arg in %s", name) - } else if n.Val <= 0 { - return fmt.Errorf("second arg to %s must be greater than 0, got %d", name, n.Val) - } - - s, ok := args[2].(*influxql.IntegerLiteral) - if !ok { - return fmt.Errorf("expected integer argument as third arg in %s", name) - } else if s.Val < 0 { - return fmt.Errorf("third arg to %s cannot be negative, got %d", name, s.Val) - } - c.global.OnlySelectors = false - - call, ok := args[0].(*influxql.Call) - if !ok { - return fmt.Errorf("must use aggregate function with %s", name) - } else if c.global.Interval.IsZero() { - return fmt.Errorf("%s aggregate requires a GROUP BY interval", name) - } - return c.compileNestedExpr(call) -} - -func (c *compiledField) compileDistinct(args []influxql.Expr, nested bool) error { - if len(args) == 0 { - return errors.New("distinct function requires at least one argument") - } else if len(args) != 1 { - return errors.New("distinct function can only have one argument") - } - - if _, ok := args[0].(*influxql.VarRef); !ok { - return errors.New("expected field argument in distinct()") - } - if !nested { - c.global.HasDistinct = true - } - c.global.OnlySelectors = false - return nil -} - -func (c *compiledField) compileTopBottom(call *influxql.Call) error { - if c.global.TopBottomFunction != "" { - return fmt.Errorf("selector function %s() cannot be combined with other functions", c.global.TopBottomFunction) - } - - if exp, got := 2, len(call.Args); got < exp { - return fmt.Errorf("invalid number of arguments for %s, expected at least %d, got %d", call.Name, exp, got) - } - - limit, ok := call.Args[len(call.Args)-1].(*influxql.IntegerLiteral) - if !ok { - return fmt.Errorf("expected integer as last argument in %s(), found %s", call.Name, call.Args[len(call.Args)-1]) - } else if limit.Val <= 0 { - return fmt.Errorf("limit (%d) in %s function must be at least 1", limit.Val, call.Name) - } else if c.global.Limit > 0 && int(limit.Val) > c.global.Limit { - return fmt.Errorf("limit (%d) in %s function can not be larger than the LIMIT (%d) in the select statement", limit.Val, call.Name, c.global.Limit) - } - - if _, ok := call.Args[0].(*influxql.VarRef); !ok { - return fmt.Errorf("expected first argument to be a field in %s(), found %s", call.Name, call.Args[0]) - } - - if len(call.Args) > 2 { - for _, v := range call.Args[1 : len(call.Args)-1] { - ref, ok := v.(*influxql.VarRef) - if !ok { - return fmt.Errorf("only fields or tags are allowed in %s(), found %s", call.Name, v) - } - - // Add a field for each of the listed dimensions when not writing the results. - if !c.global.HasTarget { - field := &compiledField{ - global: c.global, - Field: &influxql.Field{Expr: ref}, - } - c.global.Fields = append(c.global.Fields, field) - if err := field.compileExpr(ref); err != nil { - return err - } - } - } - } - c.global.TopBottomFunction = call.Name - return nil -} - -func (c *compiledField) compileMathFunction(expr *influxql.Call) error { - // How many arguments are we expecting? - nargs := 1 - switch expr.Name { - case "atan2", "pow", "log": - nargs = 2 - } - - // Did we get the expected number of args? - if got := len(expr.Args); got != nargs { - return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, nargs, got) - } - - // Compile all the argument expressions that are not just literals. - for _, arg := range expr.Args { - if _, ok := arg.(influxql.Literal); ok { - continue - } - if err := c.compileExpr(arg); err != nil { - return err - } - } - return nil -} - -func (c *compiledStatement) compileDimensions(stmt *influxql.SelectStatement) error { - for _, d := range stmt.Dimensions { - // Reduce the expression before attempting anything. Do not evaluate the call. - expr := influxql.Reduce(d.Expr, nil) - - switch expr := expr.(type) { - case *influxql.VarRef: - if strings.EqualFold(expr.Val, "time") { - return errors.New("time() is a function and expects at least one argument") - } - case *influxql.Call: - // Ensure the call is time() and it has one or two duration arguments. - // If we already have a duration - if expr.Name != "time" { - return errors.New("only time() calls allowed in dimensions") - } else if got := len(expr.Args); got < 1 || got > 2 { - return errors.New("time dimension expected 1 or 2 arguments") - } else if lit, ok := expr.Args[0].(*influxql.DurationLiteral); !ok { - return errors.New("time dimension must have duration argument") - } else if c.Interval.Duration != 0 { - return errors.New("multiple time dimensions not allowed") - } else { - c.Interval.Duration = lit.Val - if len(expr.Args) == 2 { - switch lit := expr.Args[1].(type) { - case *influxql.DurationLiteral: - c.Interval.Offset = lit.Val % c.Interval.Duration - case *influxql.TimeLiteral: - c.Interval.Offset = lit.Val.Sub(lit.Val.Truncate(c.Interval.Duration)) - case *influxql.Call: - if lit.Name != "now" { - return errors.New("time dimension offset function must be now()") - } else if len(lit.Args) != 0 { - return errors.New("time dimension offset now() function requires no arguments") - } - now := c.Options.Now - c.Interval.Offset = now.Sub(now.Truncate(c.Interval.Duration)) - - // Use the evaluated offset to replace the argument. Ideally, we would - // use the interval assigned above, but the query engine hasn't been changed - // to use the compiler information yet. - expr.Args[1] = &influxql.DurationLiteral{Val: c.Interval.Offset} - case *influxql.StringLiteral: - // If literal looks like a date time then parse it as a time literal. - if lit.IsTimeLiteral() { - t, err := lit.ToTimeLiteral(stmt.Location) - if err != nil { - return err - } - c.Interval.Offset = t.Val.Sub(t.Val.Truncate(c.Interval.Duration)) - } else { - return errors.New("time dimension offset must be duration or now()") - } - default: - return errors.New("time dimension offset must be duration or now()") - } - } - } - case *influxql.Wildcard: - case *influxql.RegexLiteral: - default: - return errors.New("only time and tag dimensions allowed") - } - - // Assign the reduced/changed expression to the dimension. - d.Expr = expr - } - return nil -} - -// validateFields validates that the fields are mutually compatible with each other. -// This runs at the end of compilation but before linking. -func (c *compiledStatement) validateFields() error { - // Validate that at least one field has been selected. - if len(c.Fields) == 0 { - return errors.New("at least 1 non-time field must be queried") - } - // Ensure there are not multiple calls if top/bottom is present. - if len(c.FunctionCalls) > 1 && c.TopBottomFunction != "" { - return fmt.Errorf("selector function %s() cannot be combined with other functions", c.TopBottomFunction) - } else if len(c.FunctionCalls) == 0 { - switch c.FillOption { - case influxql.NoFill: - return errors.New("fill(none) must be used with a function") - case influxql.LinearFill: - return errors.New("fill(linear) must be used with a function") - } - if !c.Interval.IsZero() && !c.InheritedInterval { - return errors.New("GROUP BY requires at least one aggregate function") - } - } - // If a distinct() call is present, ensure there is exactly one function. - if c.HasDistinct && (len(c.FunctionCalls) != 1 || c.HasAuxiliaryFields) { - return errors.New("aggregate function distinct() cannot be combined with other functions or fields") - } - // Validate we are using a selector or raw query if auxiliary fields are required. - if c.HasAuxiliaryFields { - if !c.OnlySelectors { - return fmt.Errorf("mixing aggregate and non-aggregate queries is not supported") - } else if len(c.FunctionCalls) > 1 { - return fmt.Errorf("mixing multiple selector functions with tags or fields is not supported") - } - } - return nil -} - -// validateCondition verifies that all elements in the condition are appropriate. -// For example, aggregate calls don't work in the condition and should throw an -// error as an invalid expression. -func (c *compiledStatement) validateCondition(expr influxql.Expr) error { - switch expr := expr.(type) { - case *influxql.BinaryExpr: - // Verify each side of the binary expression. We do not need to - // verify the binary expression itself since that should have been - // done by influxql.ConditionExpr. - if err := c.validateCondition(expr.LHS); err != nil { - return err - } - if err := c.validateCondition(expr.RHS); err != nil { - return err - } - return nil - case *influxql.Call: - if !isMathFunction(expr) { - return fmt.Errorf("invalid function call in condition: %s", expr) - } - - // How many arguments are we expecting? - nargs := 1 - switch expr.Name { - case "atan2", "pow": - nargs = 2 - } - - // Did we get the expected number of args? - if got := len(expr.Args); got != nargs { - return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, nargs, got) - } - - // Are all the args valid? - for _, arg := range expr.Args { - if err := c.validateCondition(arg); err != nil { - return err - } - } - return nil - default: - return nil - } -} - -// subquery compiles and validates a compiled statement for the subquery using -// this compiledStatement as the parent. -func (c *compiledStatement) subquery(stmt *influxql.SelectStatement) error { - subquery := newCompiler(c.Options) - if err := subquery.preprocess(stmt); err != nil { - return err - } - - // Substitute now() into the subquery condition. Then use ConditionExpr to - // validate the expression. Do not store the results. We have no way to store - // and read those results at the moment. - valuer := influxql.MultiValuer( - &influxql.NowValuer{Now: c.Options.Now, Location: stmt.Location}, - &MathValuer{}, - ) - stmt.Condition = influxql.Reduce(stmt.Condition, valuer) - - // If the ordering is different and the sort field was specified for the subquery, - // throw an error. - if len(stmt.SortFields) != 0 && subquery.Ascending != c.Ascending { - return errors.New("subqueries must be ordered in the same direction as the query itself") - } - subquery.Ascending = c.Ascending - - // Find the intersection between this time range and the parent. - // If the subquery doesn't have a time range, this causes it to - // inherit the parent's time range. - subquery.TimeRange = subquery.TimeRange.Intersect(c.TimeRange) - - // If the fill option is null, set it to none so we don't waste time on - // null values with a redundant fill iterator. - if !subquery.Interval.IsZero() && subquery.FillOption == influxql.NullFill { - subquery.FillOption = influxql.NoFill - } - - // Inherit the grouping interval if the subquery has none. - if !c.Interval.IsZero() && subquery.Interval.IsZero() { - subquery.Interval = c.Interval - subquery.InheritedInterval = true - } - return subquery.compile(stmt) -} - -func (c *compiledStatement) Prepare(ctx context.Context, shardMapper ShardMapper, sopt SelectOptions) (PreparedStatement, error) { - // If this is a query with a grouping, there is a bucket limit, and the minimum time has not been specified, - // we need to limit the possible time range that can be used when mapping shards but not when actually executing - // the select statement. Determine the shard time range here. - timeRange := c.TimeRange - if sopt.MaxBucketsN > 0 && !c.stmt.IsRawQuery && timeRange.MinTimeNano() == influxql.MinTime { - interval, err := c.stmt.GroupByInterval() - if err != nil { - return nil, err - } - - offset, err := c.stmt.GroupByOffset() - if err != nil { - return nil, err - } - - if interval > 0 { - // Determine the last bucket using the end time. - opt := IteratorOptions{ - Interval: Interval{ - Duration: interval, - Offset: offset, - }, - } - last, _ := opt.Window(c.TimeRange.MaxTimeNano() - 1) - - // Determine the time difference using the number of buckets. - // Determine the maximum difference between the buckets based on the end time. - maxDiff := last - models.MinNanoTime - if maxDiff/int64(interval) > int64(sopt.MaxBucketsN) { - timeRange.Min = time.Unix(0, models.MinNanoTime) - } else { - timeRange.Min = time.Unix(0, last-int64(interval)*int64(sopt.MaxBucketsN-1)) - } - } - } - - // Modify the time range if there are extra intervals and an interval. - if !c.Interval.IsZero() && c.ExtraIntervals > 0 { - if c.Ascending { - newTime := timeRange.Min.Add(time.Duration(-c.ExtraIntervals) * c.Interval.Duration) - if !newTime.Before(time.Unix(0, influxql.MinTime).UTC()) { - timeRange.Min = newTime - } else { - timeRange.Min = time.Unix(0, influxql.MinTime).UTC() - } - } else { - newTime := timeRange.Max.Add(time.Duration(c.ExtraIntervals) * c.Interval.Duration) - if !newTime.After(time.Unix(0, influxql.MaxTime).UTC()) { - timeRange.Max = newTime - } else { - timeRange.Max = time.Unix(0, influxql.MaxTime).UTC() - } - } - } - - // Create an iterator creator based on the shards in the cluster. - shards, err := shardMapper.MapShards(ctx, c.stmt.Sources, timeRange, sopt) - if err != nil { - return nil, err - } - - // Rewrite wildcards, if any exist. - mapper := queryFieldMapper{FieldMapper: newFieldMapperAdapter(shards, ctx)} - stmt, err := c.stmt.RewriteFields(mapper) - if err != nil { - shards.Close() - return nil, err - } - - // Validate if the types are correct now that they have been assigned. - if err := validateTypes(stmt); err != nil { - shards.Close() - return nil, err - } - - // Determine base options for iterators. - opt, err := newIteratorOptionsStmt(stmt, sopt) - if err != nil { - shards.Close() - return nil, err - } - opt.StartTime, opt.EndTime = c.TimeRange.MinTimeNano(), c.TimeRange.MaxTimeNano() - opt.Ascending = c.Ascending - - if sopt.MaxBucketsN > 0 && !stmt.IsRawQuery && c.TimeRange.MinTimeNano() > influxql.MinTime { - interval, err := stmt.GroupByInterval() - if err != nil { - shards.Close() - return nil, err - } - - if interval > 0 { - // Determine the start and end time matched to the interval (may not match the actual times). - first, _ := opt.Window(opt.StartTime) - last, _ := opt.Window(opt.EndTime - 1) - - // Determine the number of buckets by finding the time span and dividing by the interval. - buckets := (last - first + int64(interval)) / int64(interval) - if int(buckets) > sopt.MaxBucketsN { - shards.Close() - return nil, fmt.Errorf("max-select-buckets limit exceeded: (%d/%d)", buckets, sopt.MaxBucketsN) - } - } - } - - columns := stmt.ColumnNames() - return &preparedStatement{ - stmt: stmt, - opt: opt, - ic: shards, - columns: columns, - maxPointN: sopt.MaxPointN, - now: c.Options.Now, - }, nil -} diff --git a/influxql/query/compile_internal_test.go b/influxql/query/compile_internal_test.go deleted file mode 100644 index 379b3c56c9a..00000000000 --- a/influxql/query/compile_internal_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package query - -import ( - "testing" - - "github.com/influxdata/influxql" - "github.com/stretchr/testify/require" -) - -func TestCompile_RewriteSubqueryRegex(t *testing.T) { - q := `SELECT top(mean, 10), host FROM (SELECT mean(value) FROM cpu WHERE id =~ /^(server-1|server-2|server-3)$/ GROUP BY host)` - stmt, err := influxql.ParseStatement(q) - require.NoError(t, err) - s := stmt.(*influxql.SelectStatement) - - compiled, err := Compile(s, CompileOptions{}) - require.NoError(t, err) - - c := compiled.(*compiledStatement) - require.Len(t, c.stmt.Sources, 1) - - subquery := c.stmt.Sources[0] - require.Equal(t, `(SELECT mean(value) FROM cpu WHERE id = 'server-1' OR id = 'server-2' OR id = 'server-3' GROUP BY host)`, subquery.String()) -} diff --git a/influxql/query/compile_test.go b/influxql/query/compile_test.go deleted file mode 100644 index ab2b64f8c29..00000000000 --- a/influxql/query/compile_test.go +++ /dev/null @@ -1,441 +0,0 @@ -package query_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxql" -) - -func TestCompile_Success(t *testing.T) { - for _, tt := range []string{ - `SELECT time, value FROM cpu`, - `SELECT value FROM cpu`, - `SELECT value, host FROM cpu`, - `SELECT * FROM cpu`, - `SELECT time, * FROM cpu`, - `SELECT value, * FROM cpu`, - `SELECT max(value) FROM cpu`, - `SELECT max(value), host FROM cpu`, - `SELECT max(value), * FROM cpu`, - `SELECT max(*) FROM cpu`, - `SELECT max(/val/) FROM cpu`, - `SELECT min(value) FROM cpu`, - `SELECT min(value), host FROM cpu`, - `SELECT min(value), * FROM cpu`, - `SELECT min(*) FROM cpu`, - `SELECT min(/val/) FROM cpu`, - `SELECT first(value) FROM cpu`, - `SELECT first(value), host FROM cpu`, - `SELECT first(value), * FROM cpu`, - `SELECT first(*) FROM cpu`, - `SELECT first(/val/) FROM cpu`, - `SELECT last(value) FROM cpu`, - `SELECT last(value), host FROM cpu`, - `SELECT last(value), * FROM cpu`, - `SELECT last(*) FROM cpu`, - `SELECT last(/val/) FROM cpu`, - `SELECT count(value) FROM cpu`, - `SELECT count(distinct(value)) FROM cpu`, - `SELECT count(distinct value) FROM cpu`, - `SELECT count(*) FROM cpu`, - `SELECT count(/val/) FROM cpu`, - `SELECT mean(value) FROM cpu`, - `SELECT mean(*) FROM cpu`, - `SELECT mean(/val/) FROM cpu`, - `SELECT min(value), max(value) FROM cpu`, - `SELECT min(*), max(*) FROM cpu`, - `SELECT min(/val/), max(/val/) FROM cpu`, - `SELECT first(value), last(value) FROM cpu`, - `SELECT first(*), last(*) FROM cpu`, - `SELECT first(/val/), last(/val/) FROM cpu`, - `SELECT count(value) FROM cpu WHERE time >= now() - 1h GROUP BY time(10m)`, - `SELECT distinct value FROM cpu`, - `SELECT distinct(value) FROM cpu`, - `SELECT value / total FROM cpu`, - `SELECT min(value) / total FROM cpu`, - `SELECT max(value) / total FROM cpu`, - `SELECT top(value, 1) FROM cpu`, - `SELECT top(value, host, 1) FROM cpu`, - `SELECT top(value, 1), host FROM cpu`, - `SELECT min(top) FROM (SELECT top(value, host, 1) FROM cpu) GROUP BY region`, - `SELECT bottom(value, 1) FROM cpu`, - `SELECT bottom(value, host, 1) FROM cpu`, - `SELECT bottom(value, 1), host FROM cpu`, - `SELECT max(bottom) FROM (SELECT bottom(value, host, 1) FROM cpu) GROUP BY region`, - `SELECT percentile(value, 75) FROM cpu`, - `SELECT percentile(value, 75.0) FROM cpu`, - `SELECT sample(value, 2) FROM cpu`, - `SELECT sample(*, 2) FROM cpu`, - `SELECT sample(/val/, 2) FROM cpu`, - `SELECT elapsed(value) FROM cpu`, - `SELECT elapsed(value, 10s) FROM cpu`, - `SELECT integral(value) FROM cpu`, - `SELECT integral(value, 10s) FROM cpu`, - `SELECT max(value) FROM cpu WHERE time >= now() - 1m GROUP BY time(10s, 5s)`, - `SELECT max(value) FROM cpu WHERE time >= now() - 1m GROUP BY time(10s, '2000-01-01T00:00:05Z')`, - `SELECT max(value) FROM cpu WHERE time >= now() - 1m GROUP BY time(10s, now())`, - `SELECT max(mean) FROM (SELECT mean(value) FROM cpu GROUP BY host)`, - `SELECT top(mean, 10), host FROM (SELECT mean(value) FROM cpu WHERE id =~ /^(server-1|server-2|server-3)$/ GROUP BY host)`, - `SELECT max(derivative) FROM (SELECT derivative(mean(value)) FROM cpu) WHERE time >= now() - 1m GROUP BY time(10s)`, - `SELECT max(value) FROM (SELECT value + total FROM cpu) WHERE time >= now() - 1m GROUP BY time(10s)`, - `SELECT value FROM cpu WHERE time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T01:00:00Z'`, - `SELECT value FROM (SELECT value FROM cpu) ORDER BY time DESC`, - `SELECT count(distinct(value)), max(value) FROM cpu`, - `SELECT derivative(distinct(value)), difference(distinct(value)) FROM cpu WHERE time >= now() - 1m GROUP BY time(5s)`, - `SELECT moving_average(distinct(value), 3) FROM cpu WHERE time >= now() - 5m GROUP BY time(1m)`, - `SELECT elapsed(distinct(value)) FROM cpu WHERE time >= now() - 5m GROUP BY time(1m)`, - `SELECT cumulative_sum(distinct(value)) FROM cpu WHERE time >= now() - 5m GROUP BY time(1m)`, - `SELECT last(value) / (1 - 0) FROM cpu`, - `SELECT abs(value) FROM cpu`, - `SELECT sin(value) FROM cpu`, - `SELECT cos(value) FROM cpu`, - `SELECT tan(value) FROM cpu`, - `SELECT asin(value) FROM cpu`, - `SELECT acos(value) FROM cpu`, - `SELECT atan(value) FROM cpu`, - `SELECT sqrt(value) FROM cpu`, - `SELECT pow(value, 2) FROM cpu`, - `SELECT pow(value, 3.14) FROM cpu`, - `SELECT pow(2, value) FROM cpu`, - `SELECT pow(3.14, value) FROM cpu`, - `SELECT exp(value) FROM cpu`, - `SELECT atan2(value, 0.1) FROM cpu`, - `SELECT atan2(0.2, value) FROM cpu`, - `SELECT atan2(value, 1) FROM cpu`, - `SELECT atan2(2, value) FROM cpu`, - `SELECT ln(value) FROM cpu`, - `SELECT log(value, 2) FROM cpu`, - `SELECT log2(value) FROM cpu`, - `SELECT log10(value) FROM cpu`, - `SELECT sin(value) - sin(1.3) FROM cpu`, - `SELECT value FROM cpu WHERE sin(value) > 0.5`, - `SELECT sum("out")/sum("in") FROM (SELECT derivative("out") AS "out", derivative("in") AS "in" FROM "m0" WHERE time >= now() - 5m GROUP BY "index") GROUP BY time(1m) fill(none)`, - } { - t.Run(tt, func(t *testing.T) { - stmt, err := influxql.ParseStatement(tt) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - s := stmt.(*influxql.SelectStatement) - - opt := query.CompileOptions{} - if _, err := query.Compile(s, opt); err != nil { - t.Errorf("unexpected error: %s", err) - } - }) - } -} - -func TestCompile_Failures(t *testing.T) { - for _, tt := range []struct { - s string - err string - }{ - {s: `SELECT time FROM cpu`, err: `at least 1 non-time field must be queried`}, - {s: `SELECT value, mean(value) FROM cpu`, err: `mixing aggregate and non-aggregate queries is not supported`}, - {s: `SELECT value, max(value), min(value) FROM cpu`, err: `mixing multiple selector functions with tags or fields is not supported`}, - {s: `SELECT top(value, 10), max(value) FROM cpu`, err: `selector function top() cannot be combined with other functions`}, - {s: `SELECT bottom(value, 10), max(value) FROM cpu`, err: `selector function bottom() cannot be combined with other functions`}, - {s: `SELECT count() FROM cpu`, err: `invalid number of arguments for count, expected 1, got 0`}, - {s: `SELECT count(value, host) FROM cpu`, err: `invalid number of arguments for count, expected 1, got 2`}, - {s: `SELECT min() FROM cpu`, err: `invalid number of arguments for min, expected 1, got 0`}, - {s: `SELECT min(value, host) FROM cpu`, err: `invalid number of arguments for min, expected 1, got 2`}, - {s: `SELECT max() FROM cpu`, err: `invalid number of arguments for max, expected 1, got 0`}, - {s: `SELECT max(value, host) FROM cpu`, err: `invalid number of arguments for max, expected 1, got 2`}, - {s: `SELECT sum() FROM cpu`, err: `invalid number of arguments for sum, expected 1, got 0`}, - {s: `SELECT sum(value, host) FROM cpu`, err: `invalid number of arguments for sum, expected 1, got 2`}, - {s: `SELECT first() FROM cpu`, err: `invalid number of arguments for first, expected 1, got 0`}, - {s: `SELECT first(value, host) FROM cpu`, err: `invalid number of arguments for first, expected 1, got 2`}, - {s: `SELECT last() FROM cpu`, err: `invalid number of arguments for last, expected 1, got 0`}, - {s: `SELECT last(value, host) FROM cpu`, err: `invalid number of arguments for last, expected 1, got 2`}, - {s: `SELECT mean() FROM cpu`, err: `invalid number of arguments for mean, expected 1, got 0`}, - {s: `SELECT mean(value, host) FROM cpu`, err: `invalid number of arguments for mean, expected 1, got 2`}, - {s: `SELECT distinct(value), max(value) FROM cpu`, err: `aggregate function distinct() cannot be combined with other functions or fields`}, - {s: `SELECT count(distinct()) FROM cpu`, err: `distinct function requires at least one argument`}, - {s: `SELECT count(distinct(value, host)) FROM cpu`, err: `distinct function can only have one argument`}, - {s: `SELECT count(distinct(2)) FROM cpu`, err: `expected field argument in distinct()`}, - {s: `SELECT value FROM cpu GROUP BY now()`, err: `only time() calls allowed in dimensions`}, - {s: `SELECT value FROM cpu GROUP BY time()`, err: `time dimension expected 1 or 2 arguments`}, - {s: `SELECT value FROM cpu GROUP BY time(5m, 30s, 1ms)`, err: `time dimension expected 1 or 2 arguments`}, - {s: `SELECT value FROM cpu GROUP BY time('unexpected')`, err: `time dimension must have duration argument`}, - {s: `SELECT value FROM cpu GROUP BY time(5m), time(1m)`, err: `multiple time dimensions not allowed`}, - {s: `SELECT value FROM cpu GROUP BY time(5m, unexpected())`, err: `time dimension offset function must be now()`}, - {s: `SELECT value FROM cpu GROUP BY time(5m, now(1m))`, err: `time dimension offset now() function requires no arguments`}, - {s: `SELECT value FROM cpu GROUP BY time(5m, 'unexpected')`, err: `time dimension offset must be duration or now()`}, - {s: `SELECT value FROM cpu GROUP BY 'unexpected'`, err: `only time and tag dimensions allowed`}, - {s: `SELECT top(value) FROM cpu`, err: `invalid number of arguments for top, expected at least 2, got 1`}, - {s: `SELECT top('unexpected', 5) FROM cpu`, err: `expected first argument to be a field in top(), found 'unexpected'`}, - {s: `SELECT top(value, 'unexpected', 5) FROM cpu`, err: `only fields or tags are allowed in top(), found 'unexpected'`}, - {s: `SELECT top(value, 2.5) FROM cpu`, err: `expected integer as last argument in top(), found 2.500`}, - {s: `SELECT top(value, -1) FROM cpu`, err: `limit (-1) in top function must be at least 1`}, - {s: `SELECT top(value, 3) FROM cpu LIMIT 2`, err: `limit (3) in top function can not be larger than the LIMIT (2) in the select statement`}, - {s: `SELECT bottom(value) FROM cpu`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, - {s: `SELECT bottom('unexpected', 5) FROM cpu`, err: `expected first argument to be a field in bottom(), found 'unexpected'`}, - {s: `SELECT bottom(value, 'unexpected', 5) FROM cpu`, err: `only fields or tags are allowed in bottom(), found 'unexpected'`}, - {s: `SELECT bottom(value, 2.5) FROM cpu`, err: `expected integer as last argument in bottom(), found 2.500`}, - {s: `SELECT bottom(value, -1) FROM cpu`, err: `limit (-1) in bottom function must be at least 1`}, - {s: `SELECT bottom(value, 3) FROM cpu LIMIT 2`, err: `limit (3) in bottom function can not be larger than the LIMIT (2) in the select statement`}, - // TODO(jsternberg): This query is wrong, but we cannot enforce this because of previous behavior: https://github.com/influxdata/influxdb/pull/8771 - //{s: `SELECT value FROM cpu WHERE time >= now() - 10m OR time < now() - 5m`, err: `cannot use OR with time conditions`}, - {s: `SELECT value FROM cpu WHERE value`, err: `invalid condition expression: value`}, - {s: `SELECT count(value), * FROM cpu`, err: `mixing aggregate and non-aggregate queries is not supported`}, - {s: `SELECT max(*), host FROM cpu`, err: `mixing aggregate and non-aggregate queries is not supported`}, - {s: `SELECT count(value), /ho/ FROM cpu`, err: `mixing aggregate and non-aggregate queries is not supported`}, - {s: `SELECT max(/val/), * FROM cpu`, err: `mixing aggregate and non-aggregate queries is not supported`}, - {s: `SELECT a(value) FROM cpu`, err: `undefined function a()`}, - {s: `SELECT count(max(value)) FROM myseries`, err: `expected field argument in count()`}, - {s: `SELECT count(distinct('value')) FROM myseries`, err: `expected field argument in distinct()`}, - {s: `SELECT distinct('value') FROM myseries`, err: `expected field argument in distinct()`}, - {s: `SELECT min(max(value)) FROM myseries`, err: `expected field argument in min()`}, - {s: `SELECT min(distinct(value)) FROM myseries`, err: `expected field argument in min()`}, - {s: `SELECT max(max(value)) FROM myseries`, err: `expected field argument in max()`}, - {s: `SELECT sum(max(value)) FROM myseries`, err: `expected field argument in sum()`}, - {s: `SELECT first(max(value)) FROM myseries`, err: `expected field argument in first()`}, - {s: `SELECT last(max(value)) FROM myseries`, err: `expected field argument in last()`}, - {s: `SELECT mean(max(value)) FROM myseries`, err: `expected field argument in mean()`}, - {s: `SELECT median(max(value)) FROM myseries`, err: `expected field argument in median()`}, - {s: `SELECT mode(max(value)) FROM myseries`, err: `expected field argument in mode()`}, - {s: `SELECT stddev(max(value)) FROM myseries`, err: `expected field argument in stddev()`}, - {s: `SELECT spread(max(value)) FROM myseries`, err: `expected field argument in spread()`}, - {s: `SELECT top() FROM myseries`, err: `invalid number of arguments for top, expected at least 2, got 0`}, - {s: `SELECT top(field1) FROM myseries`, err: `invalid number of arguments for top, expected at least 2, got 1`}, - {s: `SELECT top(field1,foo) FROM myseries`, err: `expected integer as last argument in top(), found foo`}, - {s: `SELECT top(field1,host,'server',foo) FROM myseries`, err: `expected integer as last argument in top(), found foo`}, - {s: `SELECT top(field1,5,'server',2) FROM myseries`, err: `only fields or tags are allowed in top(), found 5`}, - {s: `SELECT top(field1,max(foo),'server',2) FROM myseries`, err: `only fields or tags are allowed in top(), found max(foo)`}, - {s: `SELECT top(value, 10) + count(value) FROM myseries`, err: `selector function top() cannot be combined with other functions`}, - {s: `SELECT top(max(value), 10) FROM myseries`, err: `expected first argument to be a field in top(), found max(value)`}, - {s: `SELECT bottom() FROM myseries`, err: `invalid number of arguments for bottom, expected at least 2, got 0`}, - {s: `SELECT bottom(field1) FROM myseries`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, - {s: `SELECT bottom(field1,foo) FROM myseries`, err: `expected integer as last argument in bottom(), found foo`}, - {s: `SELECT bottom(field1,host,'server',foo) FROM myseries`, err: `expected integer as last argument in bottom(), found foo`}, - {s: `SELECT bottom(field1,5,'server',2) FROM myseries`, err: `only fields or tags are allowed in bottom(), found 5`}, - {s: `SELECT bottom(field1,max(foo),'server',2) FROM myseries`, err: `only fields or tags are allowed in bottom(), found max(foo)`}, - {s: `SELECT bottom(value, 10) + count(value) FROM myseries`, err: `selector function bottom() cannot be combined with other functions`}, - {s: `SELECT bottom(max(value), 10) FROM myseries`, err: `expected first argument to be a field in bottom(), found max(value)`}, - {s: `SELECT top(value, 10), bottom(value, 10) FROM cpu`, err: `selector function top() cannot be combined with other functions`}, - {s: `SELECT bottom(value, 10), top(value, 10) FROM cpu`, err: `selector function bottom() cannot be combined with other functions`}, - {s: `SELECT sample(value) FROM myseries`, err: `invalid number of arguments for sample, expected 2, got 1`}, - {s: `SELECT sample(value, 2, 3) FROM myseries`, err: `invalid number of arguments for sample, expected 2, got 3`}, - {s: `SELECT sample(value, 0) FROM myseries`, err: `sample window must be greater than 1, got 0`}, - {s: `SELECT sample(value, 2.5) FROM myseries`, err: `expected integer argument in sample()`}, - {s: `SELECT percentile() FROM myseries`, err: `invalid number of arguments for percentile, expected 2, got 0`}, - {s: `SELECT percentile(field1) FROM myseries`, err: `invalid number of arguments for percentile, expected 2, got 1`}, - {s: `SELECT percentile(field1, foo) FROM myseries`, err: `expected float argument in percentile()`}, - {s: `SELECT percentile(max(field1), 75) FROM myseries`, err: `expected field argument in percentile()`}, - {s: `SELECT field1 FROM foo group by time(1s)`, err: `GROUP BY requires at least one aggregate function`}, - {s: `SELECT field1 FROM foo fill(none)`, err: `fill(none) must be used with a function`}, - {s: `SELECT field1 FROM foo fill(linear)`, err: `fill(linear) must be used with a function`}, - {s: `SELECT count(value), value FROM foo`, err: `mixing aggregate and non-aggregate queries is not supported`}, - {s: `SELECT count(value) FROM foo group by time`, err: `time() is a function and expects at least one argument`}, - {s: `SELECT count(value) FROM foo group by 'time'`, err: `only time and tag dimensions allowed`}, - {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time()`, err: `time dimension expected 1 or 2 arguments`}, - {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(b)`, err: `time dimension must have duration argument`}, - {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(1s), time(2s)`, err: `multiple time dimensions not allowed`}, - {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(1s, b)`, err: `time dimension offset must be duration or now()`}, - {s: `SELECT count(value) FROM foo where time > now() and time < now() group by time(1s, '5s')`, err: `time dimension offset must be duration or now()`}, - {s: `SELECT distinct(field1), sum(field1) FROM myseries`, err: `aggregate function distinct() cannot be combined with other functions or fields`}, - {s: `SELECT distinct(field1), field2 FROM myseries`, err: `aggregate function distinct() cannot be combined with other functions or fields`}, - {s: `SELECT distinct(field1, field2) FROM myseries`, err: `distinct function can only have one argument`}, - {s: `SELECT distinct() FROM myseries`, err: `distinct function requires at least one argument`}, - {s: `SELECT distinct field1, field2 FROM myseries`, err: `aggregate function distinct() cannot be combined with other functions or fields`}, - {s: `SELECT count(distinct field1, field2) FROM myseries`, err: `invalid number of arguments for count, expected 1, got 2`}, - {s: `select count(distinct(too, many, arguments)) from myseries`, err: `distinct function can only have one argument`}, - {s: `select count() from myseries`, err: `invalid number of arguments for count, expected 1, got 0`}, - {s: `SELECT derivative(field1), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, - {s: `select derivative() from myseries`, err: `invalid number of arguments for derivative, expected at least 1 but no more than 2, got 0`}, - {s: `select derivative(mean(value), 1h, 3) from myseries`, err: `invalid number of arguments for derivative, expected at least 1 but no more than 2, got 3`}, - {s: `SELECT derivative(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to derivative`}, - {s: `SELECT derivative(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`}, - {s: `SELECT derivative(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, - {s: `SELECT derivative(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`}, - {s: `SELECT derivative(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`}, - {s: `SELECT derivative(mean(value), 1h) FROM myseries where time < now() and time > now() - 1d`, err: `derivative aggregate requires a GROUP BY interval`}, - {s: `SELECT derivative(value, -2h) FROM myseries`, err: `duration argument must be positive, got -2h`}, - {s: `SELECT derivative(value, 10) FROM myseries`, err: `second argument to derivative must be a duration, got *influxql.IntegerLiteral`}, - {s: `SELECT derivative(f, true) FROM myseries`, err: `second argument to derivative must be a duration, got *influxql.BooleanLiteral`}, - {s: `SELECT non_negative_derivative(field1), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, - {s: `select non_negative_derivative() from myseries`, err: `invalid number of arguments for non_negative_derivative, expected at least 1 but no more than 2, got 0`}, - {s: `select non_negative_derivative(mean(value), 1h, 3) from myseries`, err: `invalid number of arguments for non_negative_derivative, expected at least 1 but no more than 2, got 3`}, - {s: `SELECT non_negative_derivative(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to non_negative_derivative`}, - {s: `SELECT non_negative_derivative(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`}, - {s: `SELECT non_negative_derivative(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, - {s: `SELECT non_negative_derivative(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`}, - {s: `SELECT non_negative_derivative(mean(value), 1h) FROM myseries where time < now() and time > now() - 1d`, err: `non_negative_derivative aggregate requires a GROUP BY interval`}, - {s: `SELECT non_negative_derivative(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`}, - {s: `SELECT non_negative_derivative(value, -2h) FROM myseries`, err: `duration argument must be positive, got -2h`}, - {s: `SELECT non_negative_derivative(value, 10) FROM myseries`, err: `second argument to non_negative_derivative must be a duration, got *influxql.IntegerLiteral`}, - {s: `SELECT difference(field1), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, - {s: `SELECT difference() from myseries`, err: `invalid number of arguments for difference, expected 1, got 0`}, - {s: `SELECT difference(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to difference`}, - {s: `SELECT difference(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`}, - {s: `SELECT difference(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, - {s: `SELECT difference(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`}, - {s: `SELECT difference(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`}, - {s: `SELECT difference(mean(value)) FROM myseries where time < now() and time > now() - 1d`, err: `difference aggregate requires a GROUP BY interval`}, - {s: `SELECT non_negative_difference(field1), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, - {s: `SELECT non_negative_difference() from myseries`, err: `invalid number of arguments for non_negative_difference, expected 1, got 0`}, - {s: `SELECT non_negative_difference(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to non_negative_difference`}, - {s: `SELECT non_negative_difference(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`}, - {s: `SELECT non_negative_difference(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, - {s: `SELECT non_negative_difference(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`}, - {s: `SELECT non_negative_difference(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`}, - {s: `SELECT non_negative_difference(mean(value)) FROM myseries where time < now() and time > now() - 1d`, err: `non_negative_difference aggregate requires a GROUP BY interval`}, - {s: `SELECT elapsed() FROM myseries`, err: `invalid number of arguments for elapsed, expected at least 1 but no more than 2, got 0`}, - {s: `SELECT elapsed(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to elapsed`}, - {s: `SELECT elapsed(value, 1s, host) FROM myseries`, err: `invalid number of arguments for elapsed, expected at least 1 but no more than 2, got 3`}, - {s: `SELECT elapsed(value, 0s) FROM myseries`, err: `duration argument must be positive, got 0s`}, - {s: `SELECT elapsed(value, -10s) FROM myseries`, err: `duration argument must be positive, got -10s`}, - {s: `SELECT elapsed(value, 10) FROM myseries`, err: `second argument to elapsed must be a duration, got *influxql.IntegerLiteral`}, - {s: `SELECT elapsed(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`}, - {s: `SELECT elapsed(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, - {s: `SELECT elapsed(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`}, - {s: `SELECT elapsed(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`}, - {s: `SELECT elapsed(mean(value)) FROM myseries where time < now() and time > now() - 1d`, err: `elapsed aggregate requires a GROUP BY interval`}, - {s: `SELECT moving_average(field1, 2), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, - {s: `SELECT moving_average(field1, 1), field1 FROM myseries`, err: `moving_average window must be greater than 1, got 1`}, - {s: `SELECT moving_average(field1, 0), field1 FROM myseries`, err: `moving_average window must be greater than 1, got 0`}, - {s: `SELECT moving_average(field1, -1), field1 FROM myseries`, err: `moving_average window must be greater than 1, got -1`}, - {s: `SELECT moving_average(field1, 2.0), field1 FROM myseries`, err: `second argument for moving_average must be an integer, got *influxql.NumberLiteral`}, - {s: `SELECT moving_average() from myseries`, err: `invalid number of arguments for moving_average, expected 2, got 0`}, - {s: `SELECT moving_average(value) FROM myseries`, err: `invalid number of arguments for moving_average, expected 2, got 1`}, - {s: `SELECT moving_average(value, 2) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to moving_average`}, - {s: `SELECT moving_average(top(value), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`}, - {s: `SELECT moving_average(bottom(value), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, - {s: `SELECT moving_average(max(), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`}, - {s: `SELECT moving_average(percentile(value), 2) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`}, - {s: `SELECT moving_average(mean(value), 2) FROM myseries where time < now() and time > now() - 1d`, err: `moving_average aggregate requires a GROUP BY interval`}, - {s: `SELECT cumulative_sum(field1), field1 FROM myseries`, err: `mixing aggregate and non-aggregate queries is not supported`}, - {s: `SELECT cumulative_sum() from myseries`, err: `invalid number of arguments for cumulative_sum, expected 1, got 0`}, - {s: `SELECT cumulative_sum(value) FROM myseries group by time(1h)`, err: `aggregate function required inside the call to cumulative_sum`}, - {s: `SELECT cumulative_sum(top(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for top, expected at least 2, got 1`}, - {s: `SELECT cumulative_sum(bottom(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for bottom, expected at least 2, got 1`}, - {s: `SELECT cumulative_sum(max()) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for max, expected 1, got 0`}, - {s: `SELECT cumulative_sum(percentile(value)) FROM myseries where time < now() and time > now() - 1d group by time(1h)`, err: `invalid number of arguments for percentile, expected 2, got 1`}, - {s: `SELECT cumulative_sum(mean(value)) FROM myseries where time < now() and time > now() - 1d`, err: `cumulative_sum aggregate requires a GROUP BY interval`}, - {s: `SELECT integral() FROM myseries`, err: `invalid number of arguments for integral, expected at least 1 but no more than 2, got 0`}, - {s: `SELECT integral(value, 10s, host) FROM myseries`, err: `invalid number of arguments for integral, expected at least 1 but no more than 2, got 3`}, - {s: `SELECT integral(value, -10s) FROM myseries`, err: `duration argument must be positive, got -10s`}, - {s: `SELECT integral(value, 10) FROM myseries`, err: `second argument must be a duration`}, - {s: `SELECT holt_winters(value) FROM myseries where time < now() and time > now() - 1d`, err: `invalid number of arguments for holt_winters, expected 3, got 1`}, - {s: `SELECT holt_winters(value, 10, 2) FROM myseries where time < now() and time > now() - 1d`, err: `must use aggregate function with holt_winters`}, - {s: `SELECT holt_winters(min(value), 10, 2) FROM myseries where time < now() and time > now() - 1d`, err: `holt_winters aggregate requires a GROUP BY interval`}, - {s: `SELECT holt_winters(min(value), 0, 2) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `second arg to holt_winters must be greater than 0, got 0`}, - {s: `SELECT holt_winters(min(value), false, 2) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `expected integer argument as second arg in holt_winters`}, - {s: `SELECT holt_winters(min(value), 10, 'string') FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `expected integer argument as third arg in holt_winters`}, - {s: `SELECT holt_winters(min(value), 10, -1) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `third arg to holt_winters cannot be negative, got -1`}, - {s: `SELECT holt_winters_with_fit(value) FROM myseries where time < now() and time > now() - 1d`, err: `invalid number of arguments for holt_winters_with_fit, expected 3, got 1`}, - {s: `SELECT holt_winters_with_fit(value, 10, 2) FROM myseries where time < now() and time > now() - 1d`, err: `must use aggregate function with holt_winters_with_fit`}, - {s: `SELECT holt_winters_with_fit(min(value), 10, 2) FROM myseries where time < now() and time > now() - 1d`, err: `holt_winters_with_fit aggregate requires a GROUP BY interval`}, - {s: `SELECT holt_winters_with_fit(min(value), 0, 2) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `second arg to holt_winters_with_fit must be greater than 0, got 0`}, - {s: `SELECT holt_winters_with_fit(min(value), false, 2) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `expected integer argument as second arg in holt_winters_with_fit`}, - {s: `SELECT holt_winters_with_fit(min(value), 10, 'string') FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `expected integer argument as third arg in holt_winters_with_fit`}, - {s: `SELECT holt_winters_with_fit(min(value), 10, -1) FROM myseries where time < now() and time > now() - 1d GROUP BY time(1d)`, err: `third arg to holt_winters_with_fit cannot be negative, got -1`}, - {s: `SELECT mean(value) + value FROM cpu WHERE time < now() and time > now() - 1h GROUP BY time(10m)`, err: `mixing aggregate and non-aggregate queries is not supported`}, - // TODO: Remove this restriction in the future: https://github.com/influxdata/influxdb/issues/5968 - {s: `SELECT mean(cpu_total - cpu_idle) FROM cpu`, err: `expected field argument in mean()`}, - {s: `SELECT derivative(mean(cpu_total - cpu_idle), 1s) FROM cpu WHERE time < now() AND time > now() - 1d GROUP BY time(1h)`, err: `expected field argument in mean()`}, - // TODO: The error message will change when math is allowed inside an aggregate: https://github.com/influxdata/influxdb/pull/5990#issuecomment-195565870 - {s: `SELECT count(foo + sum(bar)) FROM cpu`, err: `expected field argument in count()`}, - {s: `SELECT (count(foo + sum(bar))) FROM cpu`, err: `expected field argument in count()`}, - {s: `SELECT sum(value) + count(foo + sum(bar)) FROM cpu`, err: `expected field argument in count()`}, - {s: `SELECT top(value, 2), max(value) FROM cpu`, err: `selector function top() cannot be combined with other functions`}, - {s: `SELECT bottom(value, 2), max(value) FROM cpu`, err: `selector function bottom() cannot be combined with other functions`}, - {s: `SELECT min(derivative) FROM (SELECT derivative(mean(value), 1h) FROM myseries) where time < now() and time > now() - 1d`, err: `derivative aggregate requires a GROUP BY interval`}, - {s: `SELECT min(mean) FROM (SELECT mean(value) FROM myseries GROUP BY time)`, err: `time() is a function and expects at least one argument`}, - {s: `SELECT value FROM myseries WHERE value OR time >= now() - 1m`, err: `invalid condition expression: value`}, - {s: `SELECT value FROM myseries WHERE time >= now() - 1m OR value`, err: `invalid condition expression: value`}, - {s: `SELECT value FROM (SELECT value FROM cpu ORDER BY time DESC) ORDER BY time ASC`, err: `subqueries must be ordered in the same direction as the query itself`}, - {s: `SELECT sin(value, 3) FROM cpu`, err: `invalid number of arguments for sin, expected 1, got 2`}, - {s: `SELECT cos(2.3, value, 3) FROM cpu`, err: `invalid number of arguments for cos, expected 1, got 3`}, - {s: `SELECT tan(value, 3) FROM cpu`, err: `invalid number of arguments for tan, expected 1, got 2`}, - {s: `SELECT asin(value, 3) FROM cpu`, err: `invalid number of arguments for asin, expected 1, got 2`}, - {s: `SELECT acos(value, 3.2) FROM cpu`, err: `invalid number of arguments for acos, expected 1, got 2`}, - {s: `SELECT atan() FROM cpu`, err: `invalid number of arguments for atan, expected 1, got 0`}, - {s: `SELECT sqrt(42, 3, 4) FROM cpu`, err: `invalid number of arguments for sqrt, expected 1, got 3`}, - {s: `SELECT abs(value, 3) FROM cpu`, err: `invalid number of arguments for abs, expected 1, got 2`}, - {s: `SELECT ln(value, 3) FROM cpu`, err: `invalid number of arguments for ln, expected 1, got 2`}, - {s: `SELECT log2(value, 3) FROM cpu`, err: `invalid number of arguments for log2, expected 1, got 2`}, - {s: `SELECT log10(value, 3) FROM cpu`, err: `invalid number of arguments for log10, expected 1, got 2`}, - {s: `SELECT pow(value, 3, 3) FROM cpu`, err: `invalid number of arguments for pow, expected 2, got 3`}, - {s: `SELECT atan2(value, 3, 3) FROM cpu`, err: `invalid number of arguments for atan2, expected 2, got 3`}, - {s: `SELECT sin(1.3) FROM cpu`, err: `field must contain at least one variable`}, - {s: `SELECT nofunc(1.3) FROM cpu`, err: `undefined function nofunc()`}, - {s: `SELECT * FROM cpu WHERE ( host =~ /foo/ ^ other AND env =~ /bar/ ) and time >= now()-15m`, err: `likely malformed statement, unable to rewrite: interface conversion: influxql.Expr is *influxql.BinaryExpr, not *influxql.RegexLiteral`}, - } { - t.Run(tt.s, func(t *testing.T) { - stmt, err := influxql.ParseStatement(tt.s) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - s := stmt.(*influxql.SelectStatement) - - opt := query.CompileOptions{} - if _, err := query.Compile(s, opt); err == nil { - t.Error("expected error") - } else if have, want := err.Error(), tt.err; have != want { - t.Errorf("unexpected error: %s != %s", have, want) - } - }) - } -} - -func TestPrepare_MapShardsTimeRange(t *testing.T) { - for _, tt := range []struct { - s string - start, end string - }{ - { - s: `SELECT max(value) FROM cpu WHERE time >= '2018-09-03T15:00:00Z' AND time <= '2018-09-03T16:00:00Z' GROUP BY time(10m)`, - start: "2018-09-03T15:00:00Z", - end: "2018-09-03T16:00:00Z", - }, - { - s: `SELECT derivative(mean(value)) FROM cpu WHERE time >= '2018-09-03T15:00:00Z' AND time <= '2018-09-03T16:00:00Z' GROUP BY time(10m)`, - start: "2018-09-03T14:50:00Z", - end: "2018-09-03T16:00:00Z", - }, - { - s: `SELECT moving_average(mean(value), 3) FROM cpu WHERE time >= '2018-09-03T15:00:00Z' AND time <= '2018-09-03T16:00:00Z' GROUP BY time(10m)`, - start: "2018-09-03T14:30:00Z", - end: "2018-09-03T16:00:00Z", - }, - { - s: `SELECT moving_average(mean(value), 3) FROM cpu WHERE time <= '2018-09-03T16:00:00Z' GROUP BY time(10m)`, - start: "1677-09-21T00:12:43.145224194Z", - end: "2018-09-03T16:00:00Z", - }, - } { - t.Run(tt.s, func(t *testing.T) { - stmt, err := influxql.ParseStatement(tt.s) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - s := stmt.(*influxql.SelectStatement) - - opt := query.CompileOptions{} - c, err := query.Compile(s, opt) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - shardMapper := ShardMapper{ - MapShardsFn: func(_ context.Context, _ influxql.Sources, tr influxql.TimeRange) query.ShardGroup { - if got, want := tr.Min, mustParseTime(tt.start); !got.Equal(want) { - t.Errorf("unexpected start time: got=%s want=%s", got, want) - } - if got, want := tr.Max, mustParseTime(tt.end); !got.Equal(want) { - t.Errorf("unexpected end time: got=%s want=%s", got, want) - } - return &ShardGroup{} - }, - } - - if _, err := c.Prepare(context.Background(), &shardMapper, query.SelectOptions{}); err != nil { - t.Fatalf("unexpected error: %s", err) - } - }) - } -} diff --git a/influxql/query/cursor.go b/influxql/query/cursor.go deleted file mode 100644 index 03ff56d267f..00000000000 --- a/influxql/query/cursor.go +++ /dev/null @@ -1,447 +0,0 @@ -package query - -import ( - "math" - "time" - - "github.com/influxdata/influxql" -) - -var NullFloat interface{} = (*float64)(nil) - -// Series represents the metadata about a series. -type Series struct { - // Name is the measurement name. - Name string - - // Tags for the series. - Tags Tags - - // This is an internal id used to easily compare if a series is the - // same as another series. Whenever the internal cursor changes - // to a new series, this id gets incremented. It is not exposed to - // the user so we can implement this in whatever way we want. - // If a series is not generated by a cursor, this id is zero and - // it will instead attempt to compare the name and tags. - id uint64 -} - -// SameSeries checks if this is the same series as another one. -// It does not necessarily check for equality so this is different from -// checking to see if the name and tags are the same. It checks whether -// the two are part of the same series in the response. -func (s Series) SameSeries(other Series) bool { - if s.id != 0 && other.id != 0 { - return s.id == other.id - } - return s.Name == other.Name && s.Tags.ID() == other.Tags.ID() -} - -// Equal checks to see if the Series are identical. -func (s Series) Equal(other Series) bool { - if s.id != 0 && other.id != 0 { - // If the ids are the same, then we can short-circuit and assume they - // are the same. If they are not the same, do the long check since - // they may still be identical, but not necessarily generated from - // the same cursor. - if s.id == other.id { - return true - } - } - return s.Name == other.Name && s.Tags.ID() == other.Tags.ID() -} - -// Row represents a single row returned by the query engine. -type Row struct { - // Time returns the time for this row. If the cursor was created to - // return time as one of the values, the time will also be included as - // a time.Time in the appropriate column within Values. - // This ensures that time is always present in the Row structure - // even if it hasn't been requested in the output. - Time int64 - - // Series contains the series metadata for this row. - Series Series - - // Values contains the values within the current row. - Values []interface{} -} - -type Cursor interface { - // Scan will retrieve the next row and assign the result to - // the passed in Row. If the Row has not been initialized, the Cursor - // will initialize the Row. - // To increase speed and memory usage, the same Row can be used and - // the previous values will be overwritten while using the same memory. - Scan(row *Row) bool - - // Stats returns the IteratorStats from the underlying iterators. - Stats() IteratorStats - - // Err returns any errors that were encountered from scanning the rows. - Err() error - - // Columns returns the column names and types. - Columns() []influxql.VarRef - - // Close closes the underlying resources that the cursor is using. - Close() error -} - -// RowCursor returns a Cursor that iterates over Rows. -func RowCursor(rows []Row, columns []influxql.VarRef) Cursor { - return &rowCursor{ - rows: rows, - columns: columns, - } -} - -type rowCursor struct { - rows []Row - columns []influxql.VarRef - - series Series -} - -func (cur *rowCursor) Scan(row *Row) bool { - if len(cur.rows) == 0 { - return false - } - - *row = cur.rows[0] - if row.Series.Name != cur.series.Name || !row.Series.Tags.Equals(&cur.series.Tags) { - cur.series.Name = row.Series.Name - cur.series.Tags = row.Series.Tags - cur.series.id++ - } - cur.rows = cur.rows[1:] - return true -} - -func (cur *rowCursor) Stats() IteratorStats { - return IteratorStats{} -} - -func (cur *rowCursor) Err() error { - return nil -} - -func (cur *rowCursor) Columns() []influxql.VarRef { - return cur.columns -} - -func (cur *rowCursor) Close() error { - return nil -} - -type scannerFunc func(m map[string]interface{}) (int64, string, Tags) - -type scannerCursorBase struct { - fields []influxql.Expr - m map[string]interface{} - - series Series - columns []influxql.VarRef - loc *time.Location - - scan scannerFunc - valuer influxql.ValuerEval -} - -func newScannerCursorBase(scan scannerFunc, fields []*influxql.Field, loc *time.Location) scannerCursorBase { - typmap := FunctionTypeMapper{} - exprs := make([]influxql.Expr, len(fields)) - columns := make([]influxql.VarRef, len(fields)) - for i, f := range fields { - exprs[i] = f.Expr - columns[i] = influxql.VarRef{ - Val: f.Name(), - Type: influxql.EvalType(f.Expr, nil, typmap), - } - } - if loc == nil { - loc = time.UTC - } - - m := make(map[string]interface{}) - return scannerCursorBase{ - fields: exprs, - m: m, - columns: columns, - loc: loc, - scan: scan, - valuer: influxql.ValuerEval{ - Valuer: influxql.MultiValuer( - MathValuer{}, - influxql.MapValuer(m), - ), - IntegerFloatDivision: true, - }, - } -} - -func (cur *scannerCursorBase) Scan(row *Row) bool { - ts, name, tags := cur.scan(cur.m) - if ts == ZeroTime { - return false - } - - row.Time = ts - if name != cur.series.Name || tags.ID() != cur.series.Tags.ID() { - cur.series.Name = name - cur.series.Tags = tags - cur.series.id++ - } - row.Series = cur.series - - if len(cur.columns) > len(row.Values) { - row.Values = make([]interface{}, len(cur.columns)) - } - - for i, expr := range cur.fields { - // A special case if the field is time to reduce memory allocations. - if ref, ok := expr.(*influxql.VarRef); ok && ref.Val == "time" { - row.Values[i] = time.Unix(0, row.Time).In(cur.loc) - continue - } - v := cur.valuer.Eval(expr) - if fv, ok := v.(float64); ok && math.IsNaN(fv) { - // If the float value is NaN, convert it to a null float - // so this can be serialized correctly, but not mistaken for - // a null value that needs to be filled. - v = NullFloat - } - row.Values[i] = v - } - return true -} - -func (cur *scannerCursorBase) Columns() []influxql.VarRef { - return cur.columns -} - -func (cur *scannerCursorBase) clear(m map[string]interface{}) { - for k := range m { - delete(m, k) - } -} - -var _ Cursor = (*scannerCursor)(nil) - -type scannerCursor struct { - scanner IteratorScanner - scannerCursorBase -} - -func newScannerCursor(s IteratorScanner, fields []*influxql.Field, opt IteratorOptions) *scannerCursor { - cur := &scannerCursor{scanner: s} - cur.scannerCursorBase = newScannerCursorBase(cur.scan, fields, opt.Location) - return cur -} - -func (s *scannerCursor) scan(m map[string]interface{}) (int64, string, Tags) { - ts, name, tags := s.scanner.Peek() - // if a new series, clear the map of previous values - if name != s.series.Name || tags.ID() != s.series.Tags.ID() { - s.clear(m) - } - if ts == ZeroTime { - return ts, name, tags - } - s.scanner.ScanAt(ts, name, tags, m) - return ts, name, tags -} - -func (cur *scannerCursor) Stats() IteratorStats { - return cur.scanner.Stats() -} - -func (cur *scannerCursor) Err() error { - return cur.scanner.Err() -} - -func (cur *scannerCursor) Close() error { - return cur.scanner.Close() -} - -var _ Cursor = (*multiScannerCursor)(nil) - -type multiScannerCursor struct { - scanners []IteratorScanner - err error - ascending bool - scannerCursorBase -} - -func newMultiScannerCursor(scanners []IteratorScanner, fields []*influxql.Field, opt IteratorOptions) *multiScannerCursor { - cur := &multiScannerCursor{ - scanners: scanners, - ascending: opt.Ascending, - } - cur.scannerCursorBase = newScannerCursorBase(cur.scan, fields, opt.Location) - return cur -} - -func (cur *multiScannerCursor) scan(m map[string]interface{}) (ts int64, name string, tags Tags) { - ts = ZeroTime - for _, s := range cur.scanners { - curTime, curName, curTags := s.Peek() - if curTime == ZeroTime { - if err := s.Err(); err != nil { - cur.err = err - return ZeroTime, "", Tags{} - } - continue - } - - if ts == ZeroTime { - ts, name, tags = curTime, curName, curTags - continue - } - - if cur.ascending { - if (curName < name) || (curName == name && curTags.ID() < tags.ID()) || (curName == name && curTags.ID() == tags.ID() && curTime < ts) { - ts, name, tags = curTime, curName, curTags - } - continue - } - - if (curName > name) || (curName == name && curTags.ID() > tags.ID()) || (curName == name && curTags.ID() == tags.ID() && curTime > ts) { - ts, name, tags = curTime, curName, curTags - } - } - - if ts == ZeroTime { - return ts, name, tags - } - // if a new series, clear the map of previous values - if name != cur.series.Name || tags.ID() != cur.series.Tags.ID() { - cur.clear(m) - } - for _, s := range cur.scanners { - s.ScanAt(ts, name, tags, m) - } - return ts, name, tags -} - -func (cur *multiScannerCursor) Stats() IteratorStats { - var stats IteratorStats - for _, s := range cur.scanners { - stats.Add(s.Stats()) - } - return stats -} - -func (cur *multiScannerCursor) Err() error { - return cur.err -} - -func (cur *multiScannerCursor) Close() error { - var err error - for _, s := range cur.scanners { - if e := s.Close(); e != nil && err == nil { - err = e - } - } - return err -} - -type filterCursor struct { - Cursor - // fields holds the mapping of field names to the index in the row - // based off of the column metadata. This only contains the fields - // we need and will exclude the ones we do not. - fields map[string]IteratorMap - filter influxql.Expr - m map[string]interface{} - valuer influxql.ValuerEval -} - -func newFilterCursor(cur Cursor, filter influxql.Expr) *filterCursor { - fields := make(map[string]IteratorMap) - for _, name := range influxql.ExprNames(filter) { - for i, col := range cur.Columns() { - if name.Val == col.Val { - fields[name.Val] = FieldMap{ - Index: i, - Type: name.Type, - } - break - } - } - - // If the field is not a column, assume it is a tag value. - // We do not know what the tag values will be, but there really - // isn't any different between NullMap and a TagMap that's pointed - // at the wrong location for the purposes described here. - if _, ok := fields[name.Val]; !ok { - fields[name.Val] = TagMap(name.Val) - } - } - m := make(map[string]interface{}) - return &filterCursor{ - Cursor: cur, - fields: fields, - filter: filter, - m: m, - valuer: influxql.ValuerEval{Valuer: influxql.MapValuer(m)}, - } -} - -func (cur *filterCursor) Scan(row *Row) bool { - for cur.Cursor.Scan(row) { - // Use the field mappings to prepare the map for the valuer. - for name, f := range cur.fields { - cur.m[name] = f.Value(row) - } - - if cur.valuer.EvalBool(cur.filter) { - // Passes the filter! Return true. We no longer need to - // search for a suitable value. - return true - } - } - return false -} - -type nullCursor struct { - columns []influxql.VarRef -} - -func newNullCursor(fields []*influxql.Field) *nullCursor { - columns := make([]influxql.VarRef, len(fields)) - for i, f := range fields { - columns[i].Val = f.Name() - } - return &nullCursor{columns: columns} -} - -func (cur *nullCursor) Scan(row *Row) bool { - return false -} - -func (cur *nullCursor) Stats() IteratorStats { - return IteratorStats{} -} - -func (cur *nullCursor) Err() error { - return nil -} - -func (cur *nullCursor) Columns() []influxql.VarRef { - return cur.columns -} - -func (cur *nullCursor) Close() error { - return nil -} - -// DrainCursor will read and discard all values from a Cursor and return the error -// if one happens. -func DrainCursor(cur Cursor) error { - var row Row - for cur.Scan(&row) { - // Do nothing with the result. - } - return cur.Err() -} diff --git a/influxql/query/emitter.go b/influxql/query/emitter.go deleted file mode 100644 index d07c4794f34..00000000000 --- a/influxql/query/emitter.go +++ /dev/null @@ -1,81 +0,0 @@ -package query - -import ( - "github.com/influxdata/influxdb/v2/models" -) - -// Emitter reads from a cursor into rows. -type Emitter struct { - cur Cursor - chunkSize int - - series Series - row *models.Row - columns []string -} - -// NewEmitter returns a new instance of Emitter that pulls from itrs. -func NewEmitter(cur Cursor, chunkSize int) *Emitter { - columns := make([]string, len(cur.Columns())) - for i, col := range cur.Columns() { - columns[i] = col.Val - } - return &Emitter{ - cur: cur, - chunkSize: chunkSize, - columns: columns, - } -} - -// Close closes the underlying iterators. -func (e *Emitter) Close() error { - return e.cur.Close() -} - -// Emit returns the next row from the iterators. -func (e *Emitter) Emit() (*models.Row, bool, error) { - // Continually read from the cursor until it is exhausted. - for { - // Scan the next row. If there are no rows left, return the current row. - var row Row - if !e.cur.Scan(&row) { - if err := e.cur.Err(); err != nil { - return nil, false, err - } - r := e.row - e.row = nil - return r, false, nil - } - - // If there's no row yet then create one. - // If the name and tags match the existing row, append to that row if - // the number of values doesn't exceed the chunk size. - // Otherwise return existing row and add values to next emitted row. - if e.row == nil { - e.createRow(row.Series, row.Values) - } else if e.series.SameSeries(row.Series) { - if e.chunkSize > 0 && len(e.row.Values) >= e.chunkSize { - r := e.row - r.Partial = true - e.createRow(row.Series, row.Values) - return r, true, nil - } - e.row.Values = append(e.row.Values, row.Values) - } else { - r := e.row - e.createRow(row.Series, row.Values) - return r, true, nil - } - } -} - -// createRow creates a new row attached to the emitter. -func (e *Emitter) createRow(series Series, values []interface{}) { - e.series = series - e.row = &models.Row{ - Name: series.Name, - Tags: series.Tags.KeyValues(), - Columns: e.columns, - Values: [][]interface{}{values}, - } -} diff --git a/influxql/query/execution_context.go b/influxql/query/execution_context.go deleted file mode 100644 index 9359ebf6483..00000000000 --- a/influxql/query/execution_context.go +++ /dev/null @@ -1,34 +0,0 @@ -package query - -import ( - "context" - - iql "github.com/influxdata/influxdb/v2/influxql" -) - -// ExecutionContext contains state that the query is currently executing with. -type ExecutionContext struct { - // The statement ID of the executing query. - statementID int - - // Output channel where results and errors should be sent. - Results chan *Result - - // StatisticsGatherer gathers metrics about the execution of a query. - StatisticsGatherer *iql.StatisticsGatherer - - // Options used to start this query. - ExecutionOptions -} - -// Send sends a Result to the Results channel and will exit if the query has -// been interrupted or aborted. -func (ectx *ExecutionContext) Send(ctx context.Context, result *Result) error { - result.StatementID = ectx.statementID - select { - case <-ctx.Done(): - return ctx.Err() - case ectx.Results <- result: - } - return nil -} diff --git a/influxql/query/executor.go b/influxql/query/executor.go deleted file mode 100644 index 1ddd5b04aaf..00000000000 --- a/influxql/query/executor.go +++ /dev/null @@ -1,357 +0,0 @@ -package query - -import ( - "context" - "errors" - "fmt" - "os" - "runtime/debug" - "strconv" - "time" - - iql "github.com/influxdata/influxdb/v2/influxql" - "github.com/influxdata/influxdb/v2/influxql/control" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxql" - "github.com/opentracing/opentracing-go/log" - "go.uber.org/zap" -) - -var ( - // ErrInvalidQuery is returned when executing an unknown query type. - ErrInvalidQuery = errors.New("invalid query") - - // ErrNotExecuted is returned when a statement is not executed in a query. - // This can occur when a previous statement in the same query has errored. - ErrNotExecuted = errors.New("not executed") - - // ErrQueryInterrupted is an error returned when the query is interrupted. - ErrQueryInterrupted = errors.New("query interrupted") -) - -const ( - // PanicCrashEnv is the environment variable that, when set, will prevent - // the handler from recovering any panics. - PanicCrashEnv = "INFLUXDB_PANIC_CRASH" -) - -// ErrDatabaseNotFound returns a database not found error for the given database name. -func ErrDatabaseNotFound(name string) error { return fmt.Errorf("database not found: %s", name) } - -// ErrMaxSelectPointsLimitExceeded is an error when a query hits the maximum number of points. -func ErrMaxSelectPointsLimitExceeded(n, limit int) error { - return fmt.Errorf("max-select-point limit exceeed: (%d/%d)", n, limit) -} - -// ErrMaxConcurrentQueriesLimitExceeded is an error when a query cannot be run -// because the maximum number of queries has been reached. -func ErrMaxConcurrentQueriesLimitExceeded(n, limit int) error { - return fmt.Errorf("max-concurrent-queries limit exceeded(%d, %d)", n, limit) -} - -// Authorizer determines if certain operations are authorized. -type Authorizer interface { - // AuthorizeDatabase indicates whether the given Privilege is authorized on the database with the given name. - AuthorizeDatabase(p influxql.Privilege, name string) bool - - // AuthorizeQuery returns an error if the query cannot be executed - AuthorizeQuery(database string, query *influxql.Query) error - - // AuthorizeSeriesRead determines if a series is authorized for reading - AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool - - // AuthorizeSeriesWrite determines if a series is authorized for writing - AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool -} - -// OpenAuthorizer is the Authorizer used when authorization is disabled. -// It allows all operations. -type openAuthorizer struct{} - -// OpenAuthorizer can be shared by all goroutines. -var OpenAuthorizer = openAuthorizer{} - -// AuthorizeDatabase returns true to allow any operation on a database. -func (a openAuthorizer) AuthorizeDatabase(influxql.Privilege, string) bool { return true } - -// AuthorizeSeriesRead allows access to any series. -func (a openAuthorizer) AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool { - return true -} - -// AuthorizeSeriesWrite allows access to any series. -func (a openAuthorizer) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool { - return true -} - -// AuthorizeSeriesRead allows any query to execute. -func (a openAuthorizer) AuthorizeQuery(_ string, _ *influxql.Query) error { return nil } - -// AuthorizerIsOpen returns true if the provided Authorizer is guaranteed to -// authorize anything. A nil Authorizer returns true for this function, and this -// function should be preferred over directly checking if an Authorizer is nil -// or not. -func AuthorizerIsOpen(a Authorizer) bool { - if u, ok := a.(interface{ AuthorizeUnrestricted() bool }); ok { - return u.AuthorizeUnrestricted() - } - return a == nil || a == OpenAuthorizer -} - -// ExecutionOptions contains the options for executing a query. -type ExecutionOptions struct { - // OrgID is the organization for which this query is being executed. - OrgID platform.ID - - // The database the query is running against. - Database string - - // The retention policy the query is running against. - RetentionPolicy string - - // How to determine whether the query is allowed to execute, - // what resources can be returned in SHOW queries, etc. - Authorizer Authorizer - - // The requested maximum number of points to return in each result. - ChunkSize int - - // If this query is being executed in a read-only context. - ReadOnly bool - - // Node to execute on. - NodeID uint64 - - // Quiet suppresses non-essential output from the query executor. - Quiet bool -} - -type ( - iteratorsContextKey struct{} -) - -// NewContextWithIterators returns a new context.Context with the *Iterators slice added. -// The query planner will add instances of AuxIterator to the Iterators slice. -func NewContextWithIterators(ctx context.Context, itr *Iterators) context.Context { - return context.WithValue(ctx, iteratorsContextKey{}, itr) -} - -// StatementExecutor executes a statement within the Executor. -type StatementExecutor interface { - // ExecuteStatement executes a statement. Results should be sent to the - // results channel in the ExecutionContext. - ExecuteStatement(ctx context.Context, stmt influxql.Statement, ectx *ExecutionContext) error -} - -// StatementNormalizer normalizes a statement before it is executed. -type StatementNormalizer interface { - // NormalizeStatement adds a default database and policy to the - // measurements in the statement. - NormalizeStatement(ctx context.Context, stmt influxql.Statement, database, retentionPolicy string, ectx *ExecutionContext) error -} - -var ( - nullNormalizer StatementNormalizer = &nullNormalizerImpl{} -) - -type nullNormalizerImpl struct{} - -func (n *nullNormalizerImpl) NormalizeStatement(ctx context.Context, stmt influxql.Statement, database, retentionPolicy string, ectx *ExecutionContext) error { - return nil -} - -// Executor executes every statement in an Query. -type Executor struct { - // Used for executing a statement in the query. - StatementExecutor StatementExecutor - - // StatementNormalizer normalizes a statement before it is executed. - StatementNormalizer StatementNormalizer - - Metrics *control.ControllerMetrics - - log *zap.Logger -} - -// NewExecutor returns a new instance of Executor. -func NewExecutor(logger *zap.Logger, cm *control.ControllerMetrics) *Executor { - return &Executor{ - StatementNormalizer: nullNormalizer, - Metrics: cm, - log: logger.With(zap.String("service", "query")), - } -} - -// Close kills all running queries and prevents new queries from being attached. -func (e *Executor) Close() error { - return nil -} - -// ExecuteQuery executes each statement within a query. -func (e *Executor) ExecuteQuery(ctx context.Context, query *influxql.Query, opt ExecutionOptions) (<-chan *Result, *iql.Statistics) { - results := make(chan *Result) - statistics := new(iql.Statistics) - go e.executeQuery(ctx, query, opt, results, statistics) - return results, statistics -} - -func (e *Executor) executeQuery(ctx context.Context, query *influxql.Query, opt ExecutionOptions, results chan *Result, statistics *iql.Statistics) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer func() { - close(results) - span.Finish() - }() - - defer e.recover(query, results) - - gatherer := new(iql.StatisticsGatherer) - - statusLabel := control.LabelSuccess - defer func(start time.Time) { - dur := time.Since(start) - e.Metrics.ExecutingDuration.WithLabelValues(statusLabel).Observe(dur.Seconds()) - }(time.Now()) - - ectx := &ExecutionContext{StatisticsGatherer: gatherer, ExecutionOptions: opt} - - // Setup the execution context that will be used when executing statements. - ectx.Results = results - - var i int -LOOP: - for ; i < len(query.Statements); i++ { - ectx.statementID = i - stmt := query.Statements[i] - - // If a default database wasn't passed in by the caller, check the statement. - defaultDB := opt.Database - if defaultDB == "" { - if s, ok := stmt.(influxql.HasDefaultDatabase); ok { - defaultDB = s.DefaultDatabase() - } - } - - // Do not let queries manually use the system measurements. If we find - // one, return an error. This prevents a person from using the - // measurement incorrectly and causing a panic. - if stmt, ok := stmt.(*influxql.SelectStatement); ok { - for _, s := range stmt.Sources { - switch s := s.(type) { - case *influxql.Measurement: - if influxql.IsSystemName(s.Name) { - command := "the appropriate meta command" - switch s.Name { - case "_fieldKeys": - command = "SHOW FIELD KEYS" - case "_measurements": - command = "SHOW MEASUREMENTS" - case "_series": - command = "SHOW SERIES" - case "_tagKeys": - command = "SHOW TAG KEYS" - case "_tags": - command = "SHOW TAG VALUES" - } - _ = ectx.Send(ctx, &Result{ - Err: fmt.Errorf("unable to use system source '%s': use %s instead", s.Name, command), - }) - break LOOP - } - } - } - } - - // Rewrite statements, if necessary. - // This can occur on meta read statements which convert to SELECT statements. - newStmt, err := RewriteStatement(stmt) - if err != nil { - _ = ectx.Send(ctx, &Result{Err: err}) - break - } - stmt = newStmt - - if err := e.StatementNormalizer.NormalizeStatement(ctx, stmt, defaultDB, opt.RetentionPolicy, ectx); err != nil { - if err := ectx.Send(ctx, &Result{Err: err}); err != nil { - return - } - break - } - - statistics.StatementCount += 1 - - // Log each normalized statement. - if !ectx.Quiet { - e.log.Info("Executing query", zap.Stringer("query", stmt)) - span.LogFields(log.String("normalized_query", stmt.String())) - } - - gatherer.Reset() - stmtStart := time.Now() - // Send any other statements to the underlying statement executor. - err = tracing.LogError(span, e.StatementExecutor.ExecuteStatement(ctx, stmt, ectx)) - stmtDur := time.Since(stmtStart) - stmtStats := gatherer.Statistics() - stmtStats.ExecuteDuration = stmtDur - stmtStats.PlanDuration - statistics.Add(stmtStats) - - // Send an error for this result if it failed for some reason. - if err != nil { - statusLabel = control.LabelNotExecuted - e.Metrics.Requests.WithLabelValues(statusLabel).Inc() - _ = ectx.Send(ctx, &Result{ - StatementID: i, - Err: err, - }) - // Stop after the first error. - break - } - - e.Metrics.Requests.WithLabelValues(statusLabel).Inc() - - // Check if the query was interrupted during an uninterruptible statement. - if err := ctx.Err(); err != nil { - statusLabel = control.LabelInterruptedErr - e.Metrics.Requests.WithLabelValues(statusLabel).Inc() - break - } - } - - // Send error results for any statements which were not executed. - for ; i < len(query.Statements)-1; i++ { - if err := ectx.Send(ctx, &Result{ - StatementID: i, - Err: ErrNotExecuted, - }); err != nil { - break - } - } -} - -// Determines if the Executor will recover any panics or let them crash -// the server. -var willCrash bool - -func init() { - var err error - if willCrash, err = strconv.ParseBool(os.Getenv(PanicCrashEnv)); err != nil { - willCrash = false - } -} - -func (e *Executor) recover(query *influxql.Query, results chan *Result) { - if err := recover(); err != nil { - e.log.Error(fmt.Sprintf("%s [panic:%s] %s", query.String(), err, debug.Stack())) - results <- &Result{ - StatementID: -1, - Err: fmt.Errorf("%s [panic:%s]", query.String(), err), - } - - if willCrash { - e.log.Error("\n\n=====\nAll goroutines now follow:") - e.log.Error(string(debug.Stack())) - os.Exit(1) - } - } -} diff --git a/influxql/query/executor_test.go b/influxql/query/executor_test.go deleted file mode 100644 index c8b91d2d21f..00000000000 --- a/influxql/query/executor_test.go +++ /dev/null @@ -1,199 +0,0 @@ -package query_test - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/golang/mock/gomock" - iql "github.com/influxdata/influxdb/v2/influxql" - "github.com/influxdata/influxdb/v2/influxql/control" - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/influxql/query/mocks" - "github.com/influxdata/influxql" - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zaptest" -) - -var errUnexpected = errors.New("unexpected error") - -type StatementExecutor struct { - ExecuteStatementFn func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error -} - -func (e *StatementExecutor) ExecuteStatement(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error { - return e.ExecuteStatementFn(ctx, stmt, ectx) -} - -func NewQueryExecutor(t *testing.T) *query.Executor { - return query.NewExecutor(zaptest.NewLogger(t), control.NewControllerMetrics([]string{})) -} - -func TestQueryExecutor_Interrupt(t *testing.T) { - q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) - if err != nil { - t.Fatal(err) - } - - e := NewQueryExecutor(t) - e.StatementExecutor = &StatementExecutor{ - ExecuteStatementFn: func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error { - select { - case <-ctx.Done(): - return nil - case <-time.After(100 * time.Millisecond): - t.Error("killing the query did not close the channel after 100 milliseconds") - return errUnexpected - } - }, - } - - ctx, cancel := context.WithCancel(context.Background()) - results, _ := e.ExecuteQuery(ctx, q, query.ExecutionOptions{}) - cancel() - - result := <-results - if result != nil && result.Err != query.ErrQueryInterrupted { - t.Errorf("unexpected error: %s", result.Err) - } -} - -func TestQueryExecutor_Abort(t *testing.T) { - q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) - if err != nil { - t.Fatal(err) - } - - ch1 := make(chan struct{}) - ch2 := make(chan struct{}) - - e := NewQueryExecutor(t) - e.StatementExecutor = &StatementExecutor{ - ExecuteStatementFn: func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error { - <-ch1 - if err := ectx.Send(ctx, &query.Result{Err: errUnexpected}); err == nil { - t.Errorf("expected error") - } - close(ch2) - return nil - }, - } - - ctx, cancel := context.WithCancel(context.Background()) - cancel() - - results, _ := e.ExecuteQuery(ctx, q, query.ExecutionOptions{}) - close(ch1) - - <-ch2 - discardOutput(results) -} - -func TestQueryExecutor_Panic(t *testing.T) { - q, err := influxql.ParseQuery(`SELECT count(value) FROM cpu`) - if err != nil { - t.Fatal(err) - } - - e := NewQueryExecutor(t) - e.StatementExecutor = &StatementExecutor{ - ExecuteStatementFn: func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error { - panic("test error") - }, - } - - results, _ := e.ExecuteQuery(context.Background(), q, query.ExecutionOptions{}) - result := <-results - if len(result.Series) != 0 { - t.Errorf("expected %d rows, got %d", 0, len(result.Series)) - } - if result.Err == nil || result.Err.Error() != "SELECT count(value) FROM cpu [panic:test error]" { - t.Errorf("unexpected error: %s", result.Err) - } -} - -func TestQueryExecutor_InvalidSource(t *testing.T) { - e := NewQueryExecutor(t) - e.StatementExecutor = &StatementExecutor{ - ExecuteStatementFn: func(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error { - return errors.New("statement executed unexpectedly") - }, - } - - for i, tt := range []struct { - q string - err string - }{ - { - q: `SELECT fieldKey, fieldType FROM _fieldKeys`, - err: `unable to use system source '_fieldKeys': use SHOW FIELD KEYS instead`, - }, - { - q: `SELECT "name" FROM _measurements`, - err: `unable to use system source '_measurements': use SHOW MEASUREMENTS instead`, - }, - { - q: `SELECT "key" FROM _series`, - err: `unable to use system source '_series': use SHOW SERIES instead`, - }, - { - q: `SELECT tagKey FROM _tagKeys`, - err: `unable to use system source '_tagKeys': use SHOW TAG KEYS instead`, - }, - { - q: `SELECT "key", value FROM _tags`, - err: `unable to use system source '_tags': use SHOW TAG VALUES instead`, - }, - } { - q, err := influxql.ParseQuery(tt.q) - if err != nil { - t.Errorf("%d. unable to parse: %s", i, tt.q) - continue - } - - results, _ := e.ExecuteQuery(context.Background(), q, query.ExecutionOptions{}) - result := <-results - if len(result.Series) != 0 { - t.Errorf("%d. expected %d rows, got %d", 0, i, len(result.Series)) - } - if result.Err == nil || result.Err.Error() != tt.err { - t.Errorf("%d. unexpected error: %s", i, result.Err) - } - } -} - -// This test verifies Statistics are gathered -// and that ExecuteDuration accounts for PlanDuration -func TestExecutor_ExecuteQuery_Statistics(t *testing.T) { - ctl := gomock.NewController(t) - defer ctl.Finish() - - stmt := influxql.MustParseStatement("SELECT f0 FROM m0") - q := &influxql.Query{Statements: influxql.Statements{stmt, stmt}} - - se := mocks.NewMockStatementExecutor(ctl) - se.EXPECT().ExecuteStatement(gomock.Any(), stmt, gomock.Any()). - Times(2). - DoAndReturn(func(ctx context.Context, statement influxql.Statement, ectx *query.ExecutionContext) error { - time.Sleep(10 * time.Millisecond) - ectx.StatisticsGatherer.Append(iql.NewImmutableCollector(iql.Statistics{PlanDuration: 5 * time.Millisecond})) - return nil - }) - - e := NewQueryExecutor(t) - e.StatementExecutor = se - - ctx := context.Background() - results, stats := e.ExecuteQuery(ctx, q, query.ExecutionOptions{Quiet: true}) - <-results - assert.GreaterOrEqual(t, int64(stats.ExecuteDuration), int64(10*time.Millisecond)) - assert.Equal(t, 10*time.Millisecond, stats.PlanDuration) - assert.Equal(t, 2, stats.StatementCount) -} - -func discardOutput(results <-chan *query.Result) { - for range results { - // Read all results and discard. - } -} diff --git a/influxql/query/explain.go b/influxql/query/explain.go deleted file mode 100644 index a486237d20f..00000000000 --- a/influxql/query/explain.go +++ /dev/null @@ -1,86 +0,0 @@ -package query - -import ( - "bytes" - "context" - "fmt" - "io" - "strings" - - "github.com/influxdata/influxql" -) - -func (p *preparedStatement) Explain(ctx context.Context) (string, error) { - // Determine the cost of all iterators created as part of this plan. - ic := &explainIteratorCreator{ic: p.ic} - p.ic = ic - cur, err := p.Select(ctx) - p.ic = ic.ic - - if err != nil { - return "", err - } - cur.Close() - - var buf bytes.Buffer - for i, node := range ic.nodes { - if i > 0 { - buf.WriteString("\n") - } - - expr := "" - if node.Expr != nil { - expr = node.Expr.String() - } - fmt.Fprintf(&buf, "EXPRESSION: %s\n", expr) - if len(node.Aux) != 0 { - refs := make([]string, len(node.Aux)) - for i, ref := range node.Aux { - refs[i] = ref.String() - } - fmt.Fprintf(&buf, "AUXILIARY FIELDS: %s\n", strings.Join(refs, ", ")) - } - fmt.Fprintf(&buf, "NUMBER OF SHARDS: %d\n", node.Cost.NumShards) - fmt.Fprintf(&buf, "NUMBER OF SERIES: %d\n", node.Cost.NumSeries) - fmt.Fprintf(&buf, "CACHED VALUES: %d\n", node.Cost.CachedValues) - fmt.Fprintf(&buf, "NUMBER OF FILES: %d\n", node.Cost.NumFiles) - fmt.Fprintf(&buf, "NUMBER OF BLOCKS: %d\n", node.Cost.BlocksRead) - fmt.Fprintf(&buf, "SIZE OF BLOCKS: %d\n", node.Cost.BlockSize) - } - return buf.String(), nil -} - -type planNode struct { - Expr influxql.Expr - Aux []influxql.VarRef - Cost IteratorCost -} - -type explainIteratorCreator struct { - ic interface { - IteratorCreator - io.Closer - } - nodes []planNode -} - -func (e *explainIteratorCreator) CreateIterator(ctx context.Context, m *influxql.Measurement, opt IteratorOptions) (Iterator, error) { - cost, err := e.ic.IteratorCost(ctx, m, opt) - if err != nil { - return nil, err - } - e.nodes = append(e.nodes, planNode{ - Expr: opt.Expr, - Aux: opt.Aux, - Cost: cost, - }) - return &nilFloatIterator{}, nil -} - -func (e *explainIteratorCreator) IteratorCost(ctx context.Context, m *influxql.Measurement, opt IteratorOptions) (IteratorCost, error) { - return e.ic.IteratorCost(ctx, m, opt) -} - -func (e *explainIteratorCreator) Close() error { - return e.ic.Close() -} diff --git a/influxql/query/functions.gen.go b/influxql/query/functions.gen.go deleted file mode 100644 index 07bad78d304..00000000000 --- a/influxql/query/functions.gen.go +++ /dev/null @@ -1,2500 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: functions.gen.go.tmpl - -package query - -import ( - "bytes" - "encoding/binary" - "math/rand" - "sort" - "time" - - "github.com/influxdata/influxdb/v2/pkg/estimator/hll" -) - -// FloatPointAggregator aggregates points to produce a single point. -type FloatPointAggregator interface { - AggregateFloat(p *FloatPoint) -} - -// FloatBulkPointAggregator aggregates multiple points at a time. -type FloatBulkPointAggregator interface { - AggregateFloatBulk(points []FloatPoint) -} - -// FloatPointEmitter produces a single point from an aggregate. -type FloatPointEmitter interface { - Emit() []FloatPoint -} - -// FloatReduceFunc is the function called by a FloatPoint reducer. -type FloatReduceFunc func(prev *FloatPoint, curr *FloatPoint) (t int64, v float64, aux []interface{}) - -// FloatFuncReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type FloatFuncReducer struct { - prev *FloatPoint - fn FloatReduceFunc -} - -// NewFloatFuncReducer creates a new FloatFuncFloatReducer. -func NewFloatFuncReducer(fn FloatReduceFunc, prev *FloatPoint) *FloatFuncReducer { - return &FloatFuncReducer{fn: fn, prev: prev} -} - -// AggregateFloat takes a FloatPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *FloatFuncReducer) AggregateFloat(p *FloatPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &FloatPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateFloat. -func (r *FloatFuncReducer) Emit() []FloatPoint { - return []FloatPoint{*r.prev} -} - -// FloatReduceSliceFunc is the function called by a FloatPoint reducer. -type FloatReduceSliceFunc func(a []FloatPoint) []FloatPoint - -// FloatSliceFuncReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type FloatSliceFuncReducer struct { - points []FloatPoint - fn FloatReduceSliceFunc -} - -// NewFloatSliceFuncReducer creates a new FloatSliceFuncReducer. -func NewFloatSliceFuncReducer(fn FloatReduceSliceFunc) *FloatSliceFuncReducer { - return &FloatSliceFuncReducer{fn: fn} -} - -// AggregateFloat copies the FloatPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *FloatSliceFuncReducer) AggregateFloat(p *FloatPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice. -// This is a more efficient version of calling AggregateFloat on each point. -func (r *FloatSliceFuncReducer) AggregateFloatBulk(points []FloatPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *FloatSliceFuncReducer) Emit() []FloatPoint { - return r.fn(r.points) -} - -// FloatReduceIntegerFunc is the function called by a FloatPoint reducer. -type FloatReduceIntegerFunc func(prev *IntegerPoint, curr *FloatPoint) (t int64, v int64, aux []interface{}) - -// FloatFuncIntegerReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type FloatFuncIntegerReducer struct { - prev *IntegerPoint - fn FloatReduceIntegerFunc -} - -// NewFloatFuncIntegerReducer creates a new FloatFuncIntegerReducer. -func NewFloatFuncIntegerReducer(fn FloatReduceIntegerFunc, prev *IntegerPoint) *FloatFuncIntegerReducer { - return &FloatFuncIntegerReducer{fn: fn, prev: prev} -} - -// AggregateFloat takes a FloatPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *FloatFuncIntegerReducer) AggregateFloat(p *FloatPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &IntegerPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateFloat. -func (r *FloatFuncIntegerReducer) Emit() []IntegerPoint { - return []IntegerPoint{*r.prev} -} - -// FloatReduceIntegerSliceFunc is the function called by a FloatPoint reducer. -type FloatReduceIntegerSliceFunc func(a []FloatPoint) []IntegerPoint - -// FloatSliceFuncIntegerReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type FloatSliceFuncIntegerReducer struct { - points []FloatPoint - fn FloatReduceIntegerSliceFunc -} - -// NewFloatSliceFuncIntegerReducer creates a new FloatSliceFuncIntegerReducer. -func NewFloatSliceFuncIntegerReducer(fn FloatReduceIntegerSliceFunc) *FloatSliceFuncIntegerReducer { - return &FloatSliceFuncIntegerReducer{fn: fn} -} - -// AggregateFloat copies the FloatPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *FloatSliceFuncIntegerReducer) AggregateFloat(p *FloatPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice. -// This is a more efficient version of calling AggregateFloat on each point. -func (r *FloatSliceFuncIntegerReducer) AggregateFloatBulk(points []FloatPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *FloatSliceFuncIntegerReducer) Emit() []IntegerPoint { - return r.fn(r.points) -} - -// FloatReduceUnsignedFunc is the function called by a FloatPoint reducer. -type FloatReduceUnsignedFunc func(prev *UnsignedPoint, curr *FloatPoint) (t int64, v uint64, aux []interface{}) - -// FloatFuncUnsignedReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type FloatFuncUnsignedReducer struct { - prev *UnsignedPoint - fn FloatReduceUnsignedFunc -} - -// NewFloatFuncUnsignedReducer creates a new FloatFuncUnsignedReducer. -func NewFloatFuncUnsignedReducer(fn FloatReduceUnsignedFunc, prev *UnsignedPoint) *FloatFuncUnsignedReducer { - return &FloatFuncUnsignedReducer{fn: fn, prev: prev} -} - -// AggregateFloat takes a FloatPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *FloatFuncUnsignedReducer) AggregateFloat(p *FloatPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &UnsignedPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateFloat. -func (r *FloatFuncUnsignedReducer) Emit() []UnsignedPoint { - return []UnsignedPoint{*r.prev} -} - -// FloatReduceUnsignedSliceFunc is the function called by a FloatPoint reducer. -type FloatReduceUnsignedSliceFunc func(a []FloatPoint) []UnsignedPoint - -// FloatSliceFuncUnsignedReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type FloatSliceFuncUnsignedReducer struct { - points []FloatPoint - fn FloatReduceUnsignedSliceFunc -} - -// NewFloatSliceFuncUnsignedReducer creates a new FloatSliceFuncUnsignedReducer. -func NewFloatSliceFuncUnsignedReducer(fn FloatReduceUnsignedSliceFunc) *FloatSliceFuncUnsignedReducer { - return &FloatSliceFuncUnsignedReducer{fn: fn} -} - -// AggregateFloat copies the FloatPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *FloatSliceFuncUnsignedReducer) AggregateFloat(p *FloatPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice. -// This is a more efficient version of calling AggregateFloat on each point. -func (r *FloatSliceFuncUnsignedReducer) AggregateFloatBulk(points []FloatPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *FloatSliceFuncUnsignedReducer) Emit() []UnsignedPoint { - return r.fn(r.points) -} - -// FloatReduceStringFunc is the function called by a FloatPoint reducer. -type FloatReduceStringFunc func(prev *StringPoint, curr *FloatPoint) (t int64, v string, aux []interface{}) - -// FloatFuncStringReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type FloatFuncStringReducer struct { - prev *StringPoint - fn FloatReduceStringFunc -} - -// NewFloatFuncStringReducer creates a new FloatFuncStringReducer. -func NewFloatFuncStringReducer(fn FloatReduceStringFunc, prev *StringPoint) *FloatFuncStringReducer { - return &FloatFuncStringReducer{fn: fn, prev: prev} -} - -// AggregateFloat takes a FloatPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *FloatFuncStringReducer) AggregateFloat(p *FloatPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &StringPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateFloat. -func (r *FloatFuncStringReducer) Emit() []StringPoint { - return []StringPoint{*r.prev} -} - -// FloatReduceStringSliceFunc is the function called by a FloatPoint reducer. -type FloatReduceStringSliceFunc func(a []FloatPoint) []StringPoint - -// FloatSliceFuncStringReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type FloatSliceFuncStringReducer struct { - points []FloatPoint - fn FloatReduceStringSliceFunc -} - -// NewFloatSliceFuncStringReducer creates a new FloatSliceFuncStringReducer. -func NewFloatSliceFuncStringReducer(fn FloatReduceStringSliceFunc) *FloatSliceFuncStringReducer { - return &FloatSliceFuncStringReducer{fn: fn} -} - -// AggregateFloat copies the FloatPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *FloatSliceFuncStringReducer) AggregateFloat(p *FloatPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice. -// This is a more efficient version of calling AggregateFloat on each point. -func (r *FloatSliceFuncStringReducer) AggregateFloatBulk(points []FloatPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *FloatSliceFuncStringReducer) Emit() []StringPoint { - return r.fn(r.points) -} - -// FloatReduceBooleanFunc is the function called by a FloatPoint reducer. -type FloatReduceBooleanFunc func(prev *BooleanPoint, curr *FloatPoint) (t int64, v bool, aux []interface{}) - -// FloatFuncBooleanReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type FloatFuncBooleanReducer struct { - prev *BooleanPoint - fn FloatReduceBooleanFunc -} - -// NewFloatFuncBooleanReducer creates a new FloatFuncBooleanReducer. -func NewFloatFuncBooleanReducer(fn FloatReduceBooleanFunc, prev *BooleanPoint) *FloatFuncBooleanReducer { - return &FloatFuncBooleanReducer{fn: fn, prev: prev} -} - -// AggregateFloat takes a FloatPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *FloatFuncBooleanReducer) AggregateFloat(p *FloatPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &BooleanPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateFloat. -func (r *FloatFuncBooleanReducer) Emit() []BooleanPoint { - return []BooleanPoint{*r.prev} -} - -// FloatReduceBooleanSliceFunc is the function called by a FloatPoint reducer. -type FloatReduceBooleanSliceFunc func(a []FloatPoint) []BooleanPoint - -// FloatSliceFuncBooleanReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type FloatSliceFuncBooleanReducer struct { - points []FloatPoint - fn FloatReduceBooleanSliceFunc -} - -// NewFloatSliceFuncBooleanReducer creates a new FloatSliceFuncBooleanReducer. -func NewFloatSliceFuncBooleanReducer(fn FloatReduceBooleanSliceFunc) *FloatSliceFuncBooleanReducer { - return &FloatSliceFuncBooleanReducer{fn: fn} -} - -// AggregateFloat copies the FloatPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *FloatSliceFuncBooleanReducer) AggregateFloat(p *FloatPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateFloatBulk performs a bulk copy of FloatPoints into the internal slice. -// This is a more efficient version of calling AggregateFloat on each point. -func (r *FloatSliceFuncBooleanReducer) AggregateFloatBulk(points []FloatPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *FloatSliceFuncBooleanReducer) Emit() []BooleanPoint { - return r.fn(r.points) -} - -// FloatSumHllReducer returns the HLL sketch for a series, in string form -type FloatSumHllReducer struct { - plus *hll.Plus -} - -// func NewFloatSumHllReducer creates a new FloatSumHllReducer -func NewFloatSumHllReducer() *FloatSumHllReducer { - return &FloatSumHllReducer{plus: hll.NewDefaultPlus()} -} - -// AggregateFloat aggregates a point into the reducer. -func (r *FloatSumHllReducer) AggregateFloat(p *FloatPoint) { - - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, p.Value) - b := buf.Bytes() - - r.plus.Add(b) -} - -// Emit emits the distinct points that have been aggregated into the reducer. -func (r *FloatSumHllReducer) Emit() []StringPoint { - return []StringPoint{ - marshalPlus(r.plus, nil), - } -} - -// FloatDistinctReducer returns the distinct points in a series. -type FloatDistinctReducer struct { - m map[float64]FloatPoint -} - -// NewFloatDistinctReducer creates a new FloatDistinctReducer. -func NewFloatDistinctReducer() *FloatDistinctReducer { - return &FloatDistinctReducer{m: make(map[float64]FloatPoint)} -} - -// AggregateFloat aggregates a point into the reducer. -func (r *FloatDistinctReducer) AggregateFloat(p *FloatPoint) { - if _, ok := r.m[p.Value]; !ok { - r.m[p.Value] = *p - } -} - -// Emit emits the distinct points that have been aggregated into the reducer. -func (r *FloatDistinctReducer) Emit() []FloatPoint { - points := make([]FloatPoint, 0, len(r.m)) - for _, p := range r.m { - points = append(points, FloatPoint{Time: p.Time, Value: p.Value}) - } - sort.Sort(floatPoints(points)) - return points -} - -// FloatElapsedReducer calculates the elapsed of the aggregated points. -type FloatElapsedReducer struct { - unitConversion int64 - prev FloatPoint - curr FloatPoint -} - -// NewFloatElapsedReducer creates a new FloatElapsedReducer. -func NewFloatElapsedReducer(interval Interval) *FloatElapsedReducer { - return &FloatElapsedReducer{ - unitConversion: int64(interval.Duration), - prev: FloatPoint{Nil: true}, - curr: FloatPoint{Nil: true}, - } -} - -// AggregateFloat aggregates a point into the reducer and updates the current window. -func (r *FloatElapsedReducer) AggregateFloat(p *FloatPoint) { - r.prev = r.curr - r.curr = *p -} - -// Emit emits the elapsed of the reducer at the current point. -func (r *FloatElapsedReducer) Emit() []IntegerPoint { - if !r.prev.Nil { - elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion - return []IntegerPoint{ - {Time: r.curr.Time, Value: elapsed}, - } - } - return nil -} - -// FloatSampleReducer implements a reservoir sampling to calculate a random subset of points -type FloatSampleReducer struct { - count int // how many points we've iterated over - rng *rand.Rand // random number generator for each reducer - - points floatPoints // the reservoir -} - -// NewFloatSampleReducer creates a new FloatSampleReducer -func NewFloatSampleReducer(size int) *FloatSampleReducer { - return &FloatSampleReducer{ - rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ - points: make(floatPoints, size), - } -} - -// AggregateFloat aggregates a point into the reducer. -func (r *FloatSampleReducer) AggregateFloat(p *FloatPoint) { - r.count++ - // Fill the reservoir with the first n points - if r.count-1 < len(r.points) { - p.CopyTo(&r.points[r.count-1]) - return - } - - // Generate a random integer between 1 and the count and - // if that number is less than the length of the slice - // replace the point at that index rnd with p. - rnd := r.rng.Intn(r.count) - if rnd < len(r.points) { - p.CopyTo(&r.points[rnd]) - } -} - -// Emit emits the reservoir sample as many points. -func (r *FloatSampleReducer) Emit() []FloatPoint { - min := len(r.points) - if r.count < min { - min = r.count - } - pts := r.points[:min] - sort.Sort(pts) - return pts -} - -// IntegerPointAggregator aggregates points to produce a single point. -type IntegerPointAggregator interface { - AggregateInteger(p *IntegerPoint) -} - -// IntegerBulkPointAggregator aggregates multiple points at a time. -type IntegerBulkPointAggregator interface { - AggregateIntegerBulk(points []IntegerPoint) -} - -// IntegerPointEmitter produces a single point from an aggregate. -type IntegerPointEmitter interface { - Emit() []IntegerPoint -} - -// IntegerReduceFloatFunc is the function called by a IntegerPoint reducer. -type IntegerReduceFloatFunc func(prev *FloatPoint, curr *IntegerPoint) (t int64, v float64, aux []interface{}) - -// IntegerFuncFloatReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type IntegerFuncFloatReducer struct { - prev *FloatPoint - fn IntegerReduceFloatFunc -} - -// NewIntegerFuncFloatReducer creates a new IntegerFuncFloatReducer. -func NewIntegerFuncFloatReducer(fn IntegerReduceFloatFunc, prev *FloatPoint) *IntegerFuncFloatReducer { - return &IntegerFuncFloatReducer{fn: fn, prev: prev} -} - -// AggregateInteger takes a IntegerPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *IntegerFuncFloatReducer) AggregateInteger(p *IntegerPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &FloatPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateInteger. -func (r *IntegerFuncFloatReducer) Emit() []FloatPoint { - return []FloatPoint{*r.prev} -} - -// IntegerReduceFloatSliceFunc is the function called by a IntegerPoint reducer. -type IntegerReduceFloatSliceFunc func(a []IntegerPoint) []FloatPoint - -// IntegerSliceFuncFloatReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type IntegerSliceFuncFloatReducer struct { - points []IntegerPoint - fn IntegerReduceFloatSliceFunc -} - -// NewIntegerSliceFuncFloatReducer creates a new IntegerSliceFuncFloatReducer. -func NewIntegerSliceFuncFloatReducer(fn IntegerReduceFloatSliceFunc) *IntegerSliceFuncFloatReducer { - return &IntegerSliceFuncFloatReducer{fn: fn} -} - -// AggregateInteger copies the IntegerPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *IntegerSliceFuncFloatReducer) AggregateInteger(p *IntegerPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice. -// This is a more efficient version of calling AggregateInteger on each point. -func (r *IntegerSliceFuncFloatReducer) AggregateIntegerBulk(points []IntegerPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *IntegerSliceFuncFloatReducer) Emit() []FloatPoint { - return r.fn(r.points) -} - -// IntegerReduceFunc is the function called by a IntegerPoint reducer. -type IntegerReduceFunc func(prev *IntegerPoint, curr *IntegerPoint) (t int64, v int64, aux []interface{}) - -// IntegerFuncReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type IntegerFuncReducer struct { - prev *IntegerPoint - fn IntegerReduceFunc -} - -// NewIntegerFuncReducer creates a new IntegerFuncIntegerReducer. -func NewIntegerFuncReducer(fn IntegerReduceFunc, prev *IntegerPoint) *IntegerFuncReducer { - return &IntegerFuncReducer{fn: fn, prev: prev} -} - -// AggregateInteger takes a IntegerPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *IntegerFuncReducer) AggregateInteger(p *IntegerPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &IntegerPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateInteger. -func (r *IntegerFuncReducer) Emit() []IntegerPoint { - return []IntegerPoint{*r.prev} -} - -// IntegerReduceSliceFunc is the function called by a IntegerPoint reducer. -type IntegerReduceSliceFunc func(a []IntegerPoint) []IntegerPoint - -// IntegerSliceFuncReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type IntegerSliceFuncReducer struct { - points []IntegerPoint - fn IntegerReduceSliceFunc -} - -// NewIntegerSliceFuncReducer creates a new IntegerSliceFuncReducer. -func NewIntegerSliceFuncReducer(fn IntegerReduceSliceFunc) *IntegerSliceFuncReducer { - return &IntegerSliceFuncReducer{fn: fn} -} - -// AggregateInteger copies the IntegerPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *IntegerSliceFuncReducer) AggregateInteger(p *IntegerPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice. -// This is a more efficient version of calling AggregateInteger on each point. -func (r *IntegerSliceFuncReducer) AggregateIntegerBulk(points []IntegerPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *IntegerSliceFuncReducer) Emit() []IntegerPoint { - return r.fn(r.points) -} - -// IntegerReduceUnsignedFunc is the function called by a IntegerPoint reducer. -type IntegerReduceUnsignedFunc func(prev *UnsignedPoint, curr *IntegerPoint) (t int64, v uint64, aux []interface{}) - -// IntegerFuncUnsignedReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type IntegerFuncUnsignedReducer struct { - prev *UnsignedPoint - fn IntegerReduceUnsignedFunc -} - -// NewIntegerFuncUnsignedReducer creates a new IntegerFuncUnsignedReducer. -func NewIntegerFuncUnsignedReducer(fn IntegerReduceUnsignedFunc, prev *UnsignedPoint) *IntegerFuncUnsignedReducer { - return &IntegerFuncUnsignedReducer{fn: fn, prev: prev} -} - -// AggregateInteger takes a IntegerPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *IntegerFuncUnsignedReducer) AggregateInteger(p *IntegerPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &UnsignedPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateInteger. -func (r *IntegerFuncUnsignedReducer) Emit() []UnsignedPoint { - return []UnsignedPoint{*r.prev} -} - -// IntegerReduceUnsignedSliceFunc is the function called by a IntegerPoint reducer. -type IntegerReduceUnsignedSliceFunc func(a []IntegerPoint) []UnsignedPoint - -// IntegerSliceFuncUnsignedReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type IntegerSliceFuncUnsignedReducer struct { - points []IntegerPoint - fn IntegerReduceUnsignedSliceFunc -} - -// NewIntegerSliceFuncUnsignedReducer creates a new IntegerSliceFuncUnsignedReducer. -func NewIntegerSliceFuncUnsignedReducer(fn IntegerReduceUnsignedSliceFunc) *IntegerSliceFuncUnsignedReducer { - return &IntegerSliceFuncUnsignedReducer{fn: fn} -} - -// AggregateInteger copies the IntegerPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *IntegerSliceFuncUnsignedReducer) AggregateInteger(p *IntegerPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice. -// This is a more efficient version of calling AggregateInteger on each point. -func (r *IntegerSliceFuncUnsignedReducer) AggregateIntegerBulk(points []IntegerPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *IntegerSliceFuncUnsignedReducer) Emit() []UnsignedPoint { - return r.fn(r.points) -} - -// IntegerReduceStringFunc is the function called by a IntegerPoint reducer. -type IntegerReduceStringFunc func(prev *StringPoint, curr *IntegerPoint) (t int64, v string, aux []interface{}) - -// IntegerFuncStringReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type IntegerFuncStringReducer struct { - prev *StringPoint - fn IntegerReduceStringFunc -} - -// NewIntegerFuncStringReducer creates a new IntegerFuncStringReducer. -func NewIntegerFuncStringReducer(fn IntegerReduceStringFunc, prev *StringPoint) *IntegerFuncStringReducer { - return &IntegerFuncStringReducer{fn: fn, prev: prev} -} - -// AggregateInteger takes a IntegerPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *IntegerFuncStringReducer) AggregateInteger(p *IntegerPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &StringPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateInteger. -func (r *IntegerFuncStringReducer) Emit() []StringPoint { - return []StringPoint{*r.prev} -} - -// IntegerReduceStringSliceFunc is the function called by a IntegerPoint reducer. -type IntegerReduceStringSliceFunc func(a []IntegerPoint) []StringPoint - -// IntegerSliceFuncStringReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type IntegerSliceFuncStringReducer struct { - points []IntegerPoint - fn IntegerReduceStringSliceFunc -} - -// NewIntegerSliceFuncStringReducer creates a new IntegerSliceFuncStringReducer. -func NewIntegerSliceFuncStringReducer(fn IntegerReduceStringSliceFunc) *IntegerSliceFuncStringReducer { - return &IntegerSliceFuncStringReducer{fn: fn} -} - -// AggregateInteger copies the IntegerPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *IntegerSliceFuncStringReducer) AggregateInteger(p *IntegerPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice. -// This is a more efficient version of calling AggregateInteger on each point. -func (r *IntegerSliceFuncStringReducer) AggregateIntegerBulk(points []IntegerPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *IntegerSliceFuncStringReducer) Emit() []StringPoint { - return r.fn(r.points) -} - -// IntegerReduceBooleanFunc is the function called by a IntegerPoint reducer. -type IntegerReduceBooleanFunc func(prev *BooleanPoint, curr *IntegerPoint) (t int64, v bool, aux []interface{}) - -// IntegerFuncBooleanReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type IntegerFuncBooleanReducer struct { - prev *BooleanPoint - fn IntegerReduceBooleanFunc -} - -// NewIntegerFuncBooleanReducer creates a new IntegerFuncBooleanReducer. -func NewIntegerFuncBooleanReducer(fn IntegerReduceBooleanFunc, prev *BooleanPoint) *IntegerFuncBooleanReducer { - return &IntegerFuncBooleanReducer{fn: fn, prev: prev} -} - -// AggregateInteger takes a IntegerPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *IntegerFuncBooleanReducer) AggregateInteger(p *IntegerPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &BooleanPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateInteger. -func (r *IntegerFuncBooleanReducer) Emit() []BooleanPoint { - return []BooleanPoint{*r.prev} -} - -// IntegerReduceBooleanSliceFunc is the function called by a IntegerPoint reducer. -type IntegerReduceBooleanSliceFunc func(a []IntegerPoint) []BooleanPoint - -// IntegerSliceFuncBooleanReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type IntegerSliceFuncBooleanReducer struct { - points []IntegerPoint - fn IntegerReduceBooleanSliceFunc -} - -// NewIntegerSliceFuncBooleanReducer creates a new IntegerSliceFuncBooleanReducer. -func NewIntegerSliceFuncBooleanReducer(fn IntegerReduceBooleanSliceFunc) *IntegerSliceFuncBooleanReducer { - return &IntegerSliceFuncBooleanReducer{fn: fn} -} - -// AggregateInteger copies the IntegerPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *IntegerSliceFuncBooleanReducer) AggregateInteger(p *IntegerPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateIntegerBulk performs a bulk copy of IntegerPoints into the internal slice. -// This is a more efficient version of calling AggregateInteger on each point. -func (r *IntegerSliceFuncBooleanReducer) AggregateIntegerBulk(points []IntegerPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *IntegerSliceFuncBooleanReducer) Emit() []BooleanPoint { - return r.fn(r.points) -} - -// IntegerSumHllReducer returns the HLL sketch for a series, in string form -type IntegerSumHllReducer struct { - plus *hll.Plus -} - -// func NewIntegerSumHllReducer creates a new IntegerSumHllReducer -func NewIntegerSumHllReducer() *IntegerSumHllReducer { - return &IntegerSumHllReducer{plus: hll.NewDefaultPlus()} -} - -// AggregateInteger aggregates a point into the reducer. -func (r *IntegerSumHllReducer) AggregateInteger(p *IntegerPoint) { - - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, p.Value) - b := buf.Bytes() - - r.plus.Add(b) -} - -// Emit emits the distinct points that have been aggregated into the reducer. -func (r *IntegerSumHllReducer) Emit() []StringPoint { - return []StringPoint{ - marshalPlus(r.plus, nil), - } -} - -// IntegerDistinctReducer returns the distinct points in a series. -type IntegerDistinctReducer struct { - m map[int64]IntegerPoint -} - -// NewIntegerDistinctReducer creates a new IntegerDistinctReducer. -func NewIntegerDistinctReducer() *IntegerDistinctReducer { - return &IntegerDistinctReducer{m: make(map[int64]IntegerPoint)} -} - -// AggregateInteger aggregates a point into the reducer. -func (r *IntegerDistinctReducer) AggregateInteger(p *IntegerPoint) { - if _, ok := r.m[p.Value]; !ok { - r.m[p.Value] = *p - } -} - -// Emit emits the distinct points that have been aggregated into the reducer. -func (r *IntegerDistinctReducer) Emit() []IntegerPoint { - points := make([]IntegerPoint, 0, len(r.m)) - for _, p := range r.m { - points = append(points, IntegerPoint{Time: p.Time, Value: p.Value}) - } - sort.Sort(integerPoints(points)) - return points -} - -// IntegerElapsedReducer calculates the elapsed of the aggregated points. -type IntegerElapsedReducer struct { - unitConversion int64 - prev IntegerPoint - curr IntegerPoint -} - -// NewIntegerElapsedReducer creates a new IntegerElapsedReducer. -func NewIntegerElapsedReducer(interval Interval) *IntegerElapsedReducer { - return &IntegerElapsedReducer{ - unitConversion: int64(interval.Duration), - prev: IntegerPoint{Nil: true}, - curr: IntegerPoint{Nil: true}, - } -} - -// AggregateInteger aggregates a point into the reducer and updates the current window. -func (r *IntegerElapsedReducer) AggregateInteger(p *IntegerPoint) { - r.prev = r.curr - r.curr = *p -} - -// Emit emits the elapsed of the reducer at the current point. -func (r *IntegerElapsedReducer) Emit() []IntegerPoint { - if !r.prev.Nil { - elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion - return []IntegerPoint{ - {Time: r.curr.Time, Value: elapsed}, - } - } - return nil -} - -// IntegerSampleReducer implements a reservoir sampling to calculate a random subset of points -type IntegerSampleReducer struct { - count int // how many points we've iterated over - rng *rand.Rand // random number generator for each reducer - - points integerPoints // the reservoir -} - -// NewIntegerSampleReducer creates a new IntegerSampleReducer -func NewIntegerSampleReducer(size int) *IntegerSampleReducer { - return &IntegerSampleReducer{ - rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ - points: make(integerPoints, size), - } -} - -// AggregateInteger aggregates a point into the reducer. -func (r *IntegerSampleReducer) AggregateInteger(p *IntegerPoint) { - r.count++ - // Fill the reservoir with the first n points - if r.count-1 < len(r.points) { - p.CopyTo(&r.points[r.count-1]) - return - } - - // Generate a random integer between 1 and the count and - // if that number is less than the length of the slice - // replace the point at that index rnd with p. - rnd := r.rng.Intn(r.count) - if rnd < len(r.points) { - p.CopyTo(&r.points[rnd]) - } -} - -// Emit emits the reservoir sample as many points. -func (r *IntegerSampleReducer) Emit() []IntegerPoint { - min := len(r.points) - if r.count < min { - min = r.count - } - pts := r.points[:min] - sort.Sort(pts) - return pts -} - -// UnsignedPointAggregator aggregates points to produce a single point. -type UnsignedPointAggregator interface { - AggregateUnsigned(p *UnsignedPoint) -} - -// UnsignedBulkPointAggregator aggregates multiple points at a time. -type UnsignedBulkPointAggregator interface { - AggregateUnsignedBulk(points []UnsignedPoint) -} - -// UnsignedPointEmitter produces a single point from an aggregate. -type UnsignedPointEmitter interface { - Emit() []UnsignedPoint -} - -// UnsignedReduceFloatFunc is the function called by a UnsignedPoint reducer. -type UnsignedReduceFloatFunc func(prev *FloatPoint, curr *UnsignedPoint) (t int64, v float64, aux []interface{}) - -// UnsignedFuncFloatReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type UnsignedFuncFloatReducer struct { - prev *FloatPoint - fn UnsignedReduceFloatFunc -} - -// NewUnsignedFuncFloatReducer creates a new UnsignedFuncFloatReducer. -func NewUnsignedFuncFloatReducer(fn UnsignedReduceFloatFunc, prev *FloatPoint) *UnsignedFuncFloatReducer { - return &UnsignedFuncFloatReducer{fn: fn, prev: prev} -} - -// AggregateUnsigned takes a UnsignedPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *UnsignedFuncFloatReducer) AggregateUnsigned(p *UnsignedPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &FloatPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateUnsigned. -func (r *UnsignedFuncFloatReducer) Emit() []FloatPoint { - return []FloatPoint{*r.prev} -} - -// UnsignedReduceFloatSliceFunc is the function called by a UnsignedPoint reducer. -type UnsignedReduceFloatSliceFunc func(a []UnsignedPoint) []FloatPoint - -// UnsignedSliceFuncFloatReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type UnsignedSliceFuncFloatReducer struct { - points []UnsignedPoint - fn UnsignedReduceFloatSliceFunc -} - -// NewUnsignedSliceFuncFloatReducer creates a new UnsignedSliceFuncFloatReducer. -func NewUnsignedSliceFuncFloatReducer(fn UnsignedReduceFloatSliceFunc) *UnsignedSliceFuncFloatReducer { - return &UnsignedSliceFuncFloatReducer{fn: fn} -} - -// AggregateUnsigned copies the UnsignedPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *UnsignedSliceFuncFloatReducer) AggregateUnsigned(p *UnsignedPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateUnsignedBulk performs a bulk copy of UnsignedPoints into the internal slice. -// This is a more efficient version of calling AggregateUnsigned on each point. -func (r *UnsignedSliceFuncFloatReducer) AggregateUnsignedBulk(points []UnsignedPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *UnsignedSliceFuncFloatReducer) Emit() []FloatPoint { - return r.fn(r.points) -} - -// UnsignedReduceIntegerFunc is the function called by a UnsignedPoint reducer. -type UnsignedReduceIntegerFunc func(prev *IntegerPoint, curr *UnsignedPoint) (t int64, v int64, aux []interface{}) - -// UnsignedFuncIntegerReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type UnsignedFuncIntegerReducer struct { - prev *IntegerPoint - fn UnsignedReduceIntegerFunc -} - -// NewUnsignedFuncIntegerReducer creates a new UnsignedFuncIntegerReducer. -func NewUnsignedFuncIntegerReducer(fn UnsignedReduceIntegerFunc, prev *IntegerPoint) *UnsignedFuncIntegerReducer { - return &UnsignedFuncIntegerReducer{fn: fn, prev: prev} -} - -// AggregateUnsigned takes a UnsignedPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *UnsignedFuncIntegerReducer) AggregateUnsigned(p *UnsignedPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &IntegerPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateUnsigned. -func (r *UnsignedFuncIntegerReducer) Emit() []IntegerPoint { - return []IntegerPoint{*r.prev} -} - -// UnsignedReduceIntegerSliceFunc is the function called by a UnsignedPoint reducer. -type UnsignedReduceIntegerSliceFunc func(a []UnsignedPoint) []IntegerPoint - -// UnsignedSliceFuncIntegerReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type UnsignedSliceFuncIntegerReducer struct { - points []UnsignedPoint - fn UnsignedReduceIntegerSliceFunc -} - -// NewUnsignedSliceFuncIntegerReducer creates a new UnsignedSliceFuncIntegerReducer. -func NewUnsignedSliceFuncIntegerReducer(fn UnsignedReduceIntegerSliceFunc) *UnsignedSliceFuncIntegerReducer { - return &UnsignedSliceFuncIntegerReducer{fn: fn} -} - -// AggregateUnsigned copies the UnsignedPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *UnsignedSliceFuncIntegerReducer) AggregateUnsigned(p *UnsignedPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateUnsignedBulk performs a bulk copy of UnsignedPoints into the internal slice. -// This is a more efficient version of calling AggregateUnsigned on each point. -func (r *UnsignedSliceFuncIntegerReducer) AggregateUnsignedBulk(points []UnsignedPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *UnsignedSliceFuncIntegerReducer) Emit() []IntegerPoint { - return r.fn(r.points) -} - -// UnsignedReduceFunc is the function called by a UnsignedPoint reducer. -type UnsignedReduceFunc func(prev *UnsignedPoint, curr *UnsignedPoint) (t int64, v uint64, aux []interface{}) - -// UnsignedFuncReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type UnsignedFuncReducer struct { - prev *UnsignedPoint - fn UnsignedReduceFunc -} - -// NewUnsignedFuncReducer creates a new UnsignedFuncUnsignedReducer. -func NewUnsignedFuncReducer(fn UnsignedReduceFunc, prev *UnsignedPoint) *UnsignedFuncReducer { - return &UnsignedFuncReducer{fn: fn, prev: prev} -} - -// AggregateUnsigned takes a UnsignedPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *UnsignedFuncReducer) AggregateUnsigned(p *UnsignedPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &UnsignedPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateUnsigned. -func (r *UnsignedFuncReducer) Emit() []UnsignedPoint { - return []UnsignedPoint{*r.prev} -} - -// UnsignedReduceSliceFunc is the function called by a UnsignedPoint reducer. -type UnsignedReduceSliceFunc func(a []UnsignedPoint) []UnsignedPoint - -// UnsignedSliceFuncReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type UnsignedSliceFuncReducer struct { - points []UnsignedPoint - fn UnsignedReduceSliceFunc -} - -// NewUnsignedSliceFuncReducer creates a new UnsignedSliceFuncReducer. -func NewUnsignedSliceFuncReducer(fn UnsignedReduceSliceFunc) *UnsignedSliceFuncReducer { - return &UnsignedSliceFuncReducer{fn: fn} -} - -// AggregateUnsigned copies the UnsignedPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *UnsignedSliceFuncReducer) AggregateUnsigned(p *UnsignedPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateUnsignedBulk performs a bulk copy of UnsignedPoints into the internal slice. -// This is a more efficient version of calling AggregateUnsigned on each point. -func (r *UnsignedSliceFuncReducer) AggregateUnsignedBulk(points []UnsignedPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *UnsignedSliceFuncReducer) Emit() []UnsignedPoint { - return r.fn(r.points) -} - -// UnsignedReduceStringFunc is the function called by a UnsignedPoint reducer. -type UnsignedReduceStringFunc func(prev *StringPoint, curr *UnsignedPoint) (t int64, v string, aux []interface{}) - -// UnsignedFuncStringReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type UnsignedFuncStringReducer struct { - prev *StringPoint - fn UnsignedReduceStringFunc -} - -// NewUnsignedFuncStringReducer creates a new UnsignedFuncStringReducer. -func NewUnsignedFuncStringReducer(fn UnsignedReduceStringFunc, prev *StringPoint) *UnsignedFuncStringReducer { - return &UnsignedFuncStringReducer{fn: fn, prev: prev} -} - -// AggregateUnsigned takes a UnsignedPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *UnsignedFuncStringReducer) AggregateUnsigned(p *UnsignedPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &StringPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateUnsigned. -func (r *UnsignedFuncStringReducer) Emit() []StringPoint { - return []StringPoint{*r.prev} -} - -// UnsignedReduceStringSliceFunc is the function called by a UnsignedPoint reducer. -type UnsignedReduceStringSliceFunc func(a []UnsignedPoint) []StringPoint - -// UnsignedSliceFuncStringReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type UnsignedSliceFuncStringReducer struct { - points []UnsignedPoint - fn UnsignedReduceStringSliceFunc -} - -// NewUnsignedSliceFuncStringReducer creates a new UnsignedSliceFuncStringReducer. -func NewUnsignedSliceFuncStringReducer(fn UnsignedReduceStringSliceFunc) *UnsignedSliceFuncStringReducer { - return &UnsignedSliceFuncStringReducer{fn: fn} -} - -// AggregateUnsigned copies the UnsignedPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *UnsignedSliceFuncStringReducer) AggregateUnsigned(p *UnsignedPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateUnsignedBulk performs a bulk copy of UnsignedPoints into the internal slice. -// This is a more efficient version of calling AggregateUnsigned on each point. -func (r *UnsignedSliceFuncStringReducer) AggregateUnsignedBulk(points []UnsignedPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *UnsignedSliceFuncStringReducer) Emit() []StringPoint { - return r.fn(r.points) -} - -// UnsignedReduceBooleanFunc is the function called by a UnsignedPoint reducer. -type UnsignedReduceBooleanFunc func(prev *BooleanPoint, curr *UnsignedPoint) (t int64, v bool, aux []interface{}) - -// UnsignedFuncBooleanReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type UnsignedFuncBooleanReducer struct { - prev *BooleanPoint - fn UnsignedReduceBooleanFunc -} - -// NewUnsignedFuncBooleanReducer creates a new UnsignedFuncBooleanReducer. -func NewUnsignedFuncBooleanReducer(fn UnsignedReduceBooleanFunc, prev *BooleanPoint) *UnsignedFuncBooleanReducer { - return &UnsignedFuncBooleanReducer{fn: fn, prev: prev} -} - -// AggregateUnsigned takes a UnsignedPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *UnsignedFuncBooleanReducer) AggregateUnsigned(p *UnsignedPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &BooleanPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateUnsigned. -func (r *UnsignedFuncBooleanReducer) Emit() []BooleanPoint { - return []BooleanPoint{*r.prev} -} - -// UnsignedReduceBooleanSliceFunc is the function called by a UnsignedPoint reducer. -type UnsignedReduceBooleanSliceFunc func(a []UnsignedPoint) []BooleanPoint - -// UnsignedSliceFuncBooleanReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type UnsignedSliceFuncBooleanReducer struct { - points []UnsignedPoint - fn UnsignedReduceBooleanSliceFunc -} - -// NewUnsignedSliceFuncBooleanReducer creates a new UnsignedSliceFuncBooleanReducer. -func NewUnsignedSliceFuncBooleanReducer(fn UnsignedReduceBooleanSliceFunc) *UnsignedSliceFuncBooleanReducer { - return &UnsignedSliceFuncBooleanReducer{fn: fn} -} - -// AggregateUnsigned copies the UnsignedPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *UnsignedSliceFuncBooleanReducer) AggregateUnsigned(p *UnsignedPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateUnsignedBulk performs a bulk copy of UnsignedPoints into the internal slice. -// This is a more efficient version of calling AggregateUnsigned on each point. -func (r *UnsignedSliceFuncBooleanReducer) AggregateUnsignedBulk(points []UnsignedPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *UnsignedSliceFuncBooleanReducer) Emit() []BooleanPoint { - return r.fn(r.points) -} - -// UnsignedSumHllReducer returns the HLL sketch for a series, in string form -type UnsignedSumHllReducer struct { - plus *hll.Plus -} - -// func NewUnsignedSumHllReducer creates a new UnsignedSumHllReducer -func NewUnsignedSumHllReducer() *UnsignedSumHllReducer { - return &UnsignedSumHllReducer{plus: hll.NewDefaultPlus()} -} - -// AggregateUnsigned aggregates a point into the reducer. -func (r *UnsignedSumHllReducer) AggregateUnsigned(p *UnsignedPoint) { - - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, p.Value) - b := buf.Bytes() - - r.plus.Add(b) -} - -// Emit emits the distinct points that have been aggregated into the reducer. -func (r *UnsignedSumHllReducer) Emit() []StringPoint { - return []StringPoint{ - marshalPlus(r.plus, nil), - } -} - -// UnsignedDistinctReducer returns the distinct points in a series. -type UnsignedDistinctReducer struct { - m map[uint64]UnsignedPoint -} - -// NewUnsignedDistinctReducer creates a new UnsignedDistinctReducer. -func NewUnsignedDistinctReducer() *UnsignedDistinctReducer { - return &UnsignedDistinctReducer{m: make(map[uint64]UnsignedPoint)} -} - -// AggregateUnsigned aggregates a point into the reducer. -func (r *UnsignedDistinctReducer) AggregateUnsigned(p *UnsignedPoint) { - if _, ok := r.m[p.Value]; !ok { - r.m[p.Value] = *p - } -} - -// Emit emits the distinct points that have been aggregated into the reducer. -func (r *UnsignedDistinctReducer) Emit() []UnsignedPoint { - points := make([]UnsignedPoint, 0, len(r.m)) - for _, p := range r.m { - points = append(points, UnsignedPoint{Time: p.Time, Value: p.Value}) - } - sort.Sort(unsignedPoints(points)) - return points -} - -// UnsignedElapsedReducer calculates the elapsed of the aggregated points. -type UnsignedElapsedReducer struct { - unitConversion int64 - prev UnsignedPoint - curr UnsignedPoint -} - -// NewUnsignedElapsedReducer creates a new UnsignedElapsedReducer. -func NewUnsignedElapsedReducer(interval Interval) *UnsignedElapsedReducer { - return &UnsignedElapsedReducer{ - unitConversion: int64(interval.Duration), - prev: UnsignedPoint{Nil: true}, - curr: UnsignedPoint{Nil: true}, - } -} - -// AggregateUnsigned aggregates a point into the reducer and updates the current window. -func (r *UnsignedElapsedReducer) AggregateUnsigned(p *UnsignedPoint) { - r.prev = r.curr - r.curr = *p -} - -// Emit emits the elapsed of the reducer at the current point. -func (r *UnsignedElapsedReducer) Emit() []IntegerPoint { - if !r.prev.Nil { - elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion - return []IntegerPoint{ - {Time: r.curr.Time, Value: elapsed}, - } - } - return nil -} - -// UnsignedSampleReducer implements a reservoir sampling to calculate a random subset of points -type UnsignedSampleReducer struct { - count int // how many points we've iterated over - rng *rand.Rand // random number generator for each reducer - - points unsignedPoints // the reservoir -} - -// NewUnsignedSampleReducer creates a new UnsignedSampleReducer -func NewUnsignedSampleReducer(size int) *UnsignedSampleReducer { - return &UnsignedSampleReducer{ - rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ - points: make(unsignedPoints, size), - } -} - -// AggregateUnsigned aggregates a point into the reducer. -func (r *UnsignedSampleReducer) AggregateUnsigned(p *UnsignedPoint) { - r.count++ - // Fill the reservoir with the first n points - if r.count-1 < len(r.points) { - p.CopyTo(&r.points[r.count-1]) - return - } - - // Generate a random integer between 1 and the count and - // if that number is less than the length of the slice - // replace the point at that index rnd with p. - rnd := r.rng.Intn(r.count) - if rnd < len(r.points) { - p.CopyTo(&r.points[rnd]) - } -} - -// Emit emits the reservoir sample as many points. -func (r *UnsignedSampleReducer) Emit() []UnsignedPoint { - min := len(r.points) - if r.count < min { - min = r.count - } - pts := r.points[:min] - sort.Sort(pts) - return pts -} - -// StringPointAggregator aggregates points to produce a single point. -type StringPointAggregator interface { - AggregateString(p *StringPoint) -} - -// StringBulkPointAggregator aggregates multiple points at a time. -type StringBulkPointAggregator interface { - AggregateStringBulk(points []StringPoint) -} - -// StringPointEmitter produces a single point from an aggregate. -type StringPointEmitter interface { - Emit() []StringPoint -} - -// StringReduceFloatFunc is the function called by a StringPoint reducer. -type StringReduceFloatFunc func(prev *FloatPoint, curr *StringPoint) (t int64, v float64, aux []interface{}) - -// StringFuncFloatReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type StringFuncFloatReducer struct { - prev *FloatPoint - fn StringReduceFloatFunc -} - -// NewStringFuncFloatReducer creates a new StringFuncFloatReducer. -func NewStringFuncFloatReducer(fn StringReduceFloatFunc, prev *FloatPoint) *StringFuncFloatReducer { - return &StringFuncFloatReducer{fn: fn, prev: prev} -} - -// AggregateString takes a StringPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *StringFuncFloatReducer) AggregateString(p *StringPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &FloatPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateString. -func (r *StringFuncFloatReducer) Emit() []FloatPoint { - return []FloatPoint{*r.prev} -} - -// StringReduceFloatSliceFunc is the function called by a StringPoint reducer. -type StringReduceFloatSliceFunc func(a []StringPoint) []FloatPoint - -// StringSliceFuncFloatReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type StringSliceFuncFloatReducer struct { - points []StringPoint - fn StringReduceFloatSliceFunc -} - -// NewStringSliceFuncFloatReducer creates a new StringSliceFuncFloatReducer. -func NewStringSliceFuncFloatReducer(fn StringReduceFloatSliceFunc) *StringSliceFuncFloatReducer { - return &StringSliceFuncFloatReducer{fn: fn} -} - -// AggregateString copies the StringPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *StringSliceFuncFloatReducer) AggregateString(p *StringPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice. -// This is a more efficient version of calling AggregateString on each point. -func (r *StringSliceFuncFloatReducer) AggregateStringBulk(points []StringPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *StringSliceFuncFloatReducer) Emit() []FloatPoint { - return r.fn(r.points) -} - -// StringReduceIntegerFunc is the function called by a StringPoint reducer. -type StringReduceIntegerFunc func(prev *IntegerPoint, curr *StringPoint) (t int64, v int64, aux []interface{}) - -// StringFuncIntegerReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type StringFuncIntegerReducer struct { - prev *IntegerPoint - fn StringReduceIntegerFunc -} - -// NewStringFuncIntegerReducer creates a new StringFuncIntegerReducer. -func NewStringFuncIntegerReducer(fn StringReduceIntegerFunc, prev *IntegerPoint) *StringFuncIntegerReducer { - return &StringFuncIntegerReducer{fn: fn, prev: prev} -} - -// AggregateString takes a StringPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *StringFuncIntegerReducer) AggregateString(p *StringPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &IntegerPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateString. -func (r *StringFuncIntegerReducer) Emit() []IntegerPoint { - return []IntegerPoint{*r.prev} -} - -// StringReduceIntegerSliceFunc is the function called by a StringPoint reducer. -type StringReduceIntegerSliceFunc func(a []StringPoint) []IntegerPoint - -// StringSliceFuncIntegerReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type StringSliceFuncIntegerReducer struct { - points []StringPoint - fn StringReduceIntegerSliceFunc -} - -// NewStringSliceFuncIntegerReducer creates a new StringSliceFuncIntegerReducer. -func NewStringSliceFuncIntegerReducer(fn StringReduceIntegerSliceFunc) *StringSliceFuncIntegerReducer { - return &StringSliceFuncIntegerReducer{fn: fn} -} - -// AggregateString copies the StringPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *StringSliceFuncIntegerReducer) AggregateString(p *StringPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice. -// This is a more efficient version of calling AggregateString on each point. -func (r *StringSliceFuncIntegerReducer) AggregateStringBulk(points []StringPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *StringSliceFuncIntegerReducer) Emit() []IntegerPoint { - return r.fn(r.points) -} - -// StringReduceUnsignedFunc is the function called by a StringPoint reducer. -type StringReduceUnsignedFunc func(prev *UnsignedPoint, curr *StringPoint) (t int64, v uint64, aux []interface{}) - -// StringFuncUnsignedReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type StringFuncUnsignedReducer struct { - prev *UnsignedPoint - fn StringReduceUnsignedFunc -} - -// NewStringFuncUnsignedReducer creates a new StringFuncUnsignedReducer. -func NewStringFuncUnsignedReducer(fn StringReduceUnsignedFunc, prev *UnsignedPoint) *StringFuncUnsignedReducer { - return &StringFuncUnsignedReducer{fn: fn, prev: prev} -} - -// AggregateString takes a StringPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *StringFuncUnsignedReducer) AggregateString(p *StringPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &UnsignedPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateString. -func (r *StringFuncUnsignedReducer) Emit() []UnsignedPoint { - return []UnsignedPoint{*r.prev} -} - -// StringReduceUnsignedSliceFunc is the function called by a StringPoint reducer. -type StringReduceUnsignedSliceFunc func(a []StringPoint) []UnsignedPoint - -// StringSliceFuncUnsignedReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type StringSliceFuncUnsignedReducer struct { - points []StringPoint - fn StringReduceUnsignedSliceFunc -} - -// NewStringSliceFuncUnsignedReducer creates a new StringSliceFuncUnsignedReducer. -func NewStringSliceFuncUnsignedReducer(fn StringReduceUnsignedSliceFunc) *StringSliceFuncUnsignedReducer { - return &StringSliceFuncUnsignedReducer{fn: fn} -} - -// AggregateString copies the StringPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *StringSliceFuncUnsignedReducer) AggregateString(p *StringPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice. -// This is a more efficient version of calling AggregateString on each point. -func (r *StringSliceFuncUnsignedReducer) AggregateStringBulk(points []StringPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *StringSliceFuncUnsignedReducer) Emit() []UnsignedPoint { - return r.fn(r.points) -} - -// StringReduceFunc is the function called by a StringPoint reducer. -type StringReduceFunc func(prev *StringPoint, curr *StringPoint) (t int64, v string, aux []interface{}) - -// StringFuncReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type StringFuncReducer struct { - prev *StringPoint - fn StringReduceFunc -} - -// NewStringFuncReducer creates a new StringFuncStringReducer. -func NewStringFuncReducer(fn StringReduceFunc, prev *StringPoint) *StringFuncReducer { - return &StringFuncReducer{fn: fn, prev: prev} -} - -// AggregateString takes a StringPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *StringFuncReducer) AggregateString(p *StringPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &StringPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateString. -func (r *StringFuncReducer) Emit() []StringPoint { - return []StringPoint{*r.prev} -} - -// StringReduceSliceFunc is the function called by a StringPoint reducer. -type StringReduceSliceFunc func(a []StringPoint) []StringPoint - -// StringSliceFuncReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type StringSliceFuncReducer struct { - points []StringPoint - fn StringReduceSliceFunc -} - -// NewStringSliceFuncReducer creates a new StringSliceFuncReducer. -func NewStringSliceFuncReducer(fn StringReduceSliceFunc) *StringSliceFuncReducer { - return &StringSliceFuncReducer{fn: fn} -} - -// AggregateString copies the StringPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *StringSliceFuncReducer) AggregateString(p *StringPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice. -// This is a more efficient version of calling AggregateString on each point. -func (r *StringSliceFuncReducer) AggregateStringBulk(points []StringPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *StringSliceFuncReducer) Emit() []StringPoint { - return r.fn(r.points) -} - -// StringReduceBooleanFunc is the function called by a StringPoint reducer. -type StringReduceBooleanFunc func(prev *BooleanPoint, curr *StringPoint) (t int64, v bool, aux []interface{}) - -// StringFuncBooleanReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type StringFuncBooleanReducer struct { - prev *BooleanPoint - fn StringReduceBooleanFunc -} - -// NewStringFuncBooleanReducer creates a new StringFuncBooleanReducer. -func NewStringFuncBooleanReducer(fn StringReduceBooleanFunc, prev *BooleanPoint) *StringFuncBooleanReducer { - return &StringFuncBooleanReducer{fn: fn, prev: prev} -} - -// AggregateString takes a StringPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *StringFuncBooleanReducer) AggregateString(p *StringPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &BooleanPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateString. -func (r *StringFuncBooleanReducer) Emit() []BooleanPoint { - return []BooleanPoint{*r.prev} -} - -// StringReduceBooleanSliceFunc is the function called by a StringPoint reducer. -type StringReduceBooleanSliceFunc func(a []StringPoint) []BooleanPoint - -// StringSliceFuncBooleanReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type StringSliceFuncBooleanReducer struct { - points []StringPoint - fn StringReduceBooleanSliceFunc -} - -// NewStringSliceFuncBooleanReducer creates a new StringSliceFuncBooleanReducer. -func NewStringSliceFuncBooleanReducer(fn StringReduceBooleanSliceFunc) *StringSliceFuncBooleanReducer { - return &StringSliceFuncBooleanReducer{fn: fn} -} - -// AggregateString copies the StringPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *StringSliceFuncBooleanReducer) AggregateString(p *StringPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateStringBulk performs a bulk copy of StringPoints into the internal slice. -// This is a more efficient version of calling AggregateString on each point. -func (r *StringSliceFuncBooleanReducer) AggregateStringBulk(points []StringPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *StringSliceFuncBooleanReducer) Emit() []BooleanPoint { - return r.fn(r.points) -} - -// StringSumHllReducer returns the HLL sketch for a series, in string form -type StringSumHllReducer struct { - plus *hll.Plus -} - -// func NewStringSumHllReducer creates a new StringSumHllReducer -func NewStringSumHllReducer() *StringSumHllReducer { - return &StringSumHllReducer{plus: hll.NewDefaultPlus()} -} - -// AggregateString aggregates a point into the reducer. -func (r *StringSumHllReducer) AggregateString(p *StringPoint) { - - b := []byte(p.Value) - - r.plus.Add(b) -} - -// Emit emits the distinct points that have been aggregated into the reducer. -func (r *StringSumHllReducer) Emit() []StringPoint { - return []StringPoint{ - marshalPlus(r.plus, nil), - } -} - -// StringDistinctReducer returns the distinct points in a series. -type StringDistinctReducer struct { - m map[string]StringPoint -} - -// NewStringDistinctReducer creates a new StringDistinctReducer. -func NewStringDistinctReducer() *StringDistinctReducer { - return &StringDistinctReducer{m: make(map[string]StringPoint)} -} - -// AggregateString aggregates a point into the reducer. -func (r *StringDistinctReducer) AggregateString(p *StringPoint) { - if _, ok := r.m[p.Value]; !ok { - r.m[p.Value] = *p - } -} - -// Emit emits the distinct points that have been aggregated into the reducer. -func (r *StringDistinctReducer) Emit() []StringPoint { - points := make([]StringPoint, 0, len(r.m)) - for _, p := range r.m { - points = append(points, StringPoint{Time: p.Time, Value: p.Value}) - } - sort.Sort(stringPoints(points)) - return points -} - -// StringElapsedReducer calculates the elapsed of the aggregated points. -type StringElapsedReducer struct { - unitConversion int64 - prev StringPoint - curr StringPoint -} - -// NewStringElapsedReducer creates a new StringElapsedReducer. -func NewStringElapsedReducer(interval Interval) *StringElapsedReducer { - return &StringElapsedReducer{ - unitConversion: int64(interval.Duration), - prev: StringPoint{Nil: true}, - curr: StringPoint{Nil: true}, - } -} - -// AggregateString aggregates a point into the reducer and updates the current window. -func (r *StringElapsedReducer) AggregateString(p *StringPoint) { - r.prev = r.curr - r.curr = *p -} - -// Emit emits the elapsed of the reducer at the current point. -func (r *StringElapsedReducer) Emit() []IntegerPoint { - if !r.prev.Nil { - elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion - return []IntegerPoint{ - {Time: r.curr.Time, Value: elapsed}, - } - } - return nil -} - -// StringSampleReducer implements a reservoir sampling to calculate a random subset of points -type StringSampleReducer struct { - count int // how many points we've iterated over - rng *rand.Rand // random number generator for each reducer - - points stringPoints // the reservoir -} - -// NewStringSampleReducer creates a new StringSampleReducer -func NewStringSampleReducer(size int) *StringSampleReducer { - return &StringSampleReducer{ - rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ - points: make(stringPoints, size), - } -} - -// AggregateString aggregates a point into the reducer. -func (r *StringSampleReducer) AggregateString(p *StringPoint) { - r.count++ - // Fill the reservoir with the first n points - if r.count-1 < len(r.points) { - p.CopyTo(&r.points[r.count-1]) - return - } - - // Generate a random integer between 1 and the count and - // if that number is less than the length of the slice - // replace the point at that index rnd with p. - rnd := r.rng.Intn(r.count) - if rnd < len(r.points) { - p.CopyTo(&r.points[rnd]) - } -} - -// Emit emits the reservoir sample as many points. -func (r *StringSampleReducer) Emit() []StringPoint { - min := len(r.points) - if r.count < min { - min = r.count - } - pts := r.points[:min] - sort.Sort(pts) - return pts -} - -// BooleanPointAggregator aggregates points to produce a single point. -type BooleanPointAggregator interface { - AggregateBoolean(p *BooleanPoint) -} - -// BooleanBulkPointAggregator aggregates multiple points at a time. -type BooleanBulkPointAggregator interface { - AggregateBooleanBulk(points []BooleanPoint) -} - -// BooleanPointEmitter produces a single point from an aggregate. -type BooleanPointEmitter interface { - Emit() []BooleanPoint -} - -// BooleanReduceFloatFunc is the function called by a BooleanPoint reducer. -type BooleanReduceFloatFunc func(prev *FloatPoint, curr *BooleanPoint) (t int64, v float64, aux []interface{}) - -// BooleanFuncFloatReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type BooleanFuncFloatReducer struct { - prev *FloatPoint - fn BooleanReduceFloatFunc -} - -// NewBooleanFuncFloatReducer creates a new BooleanFuncFloatReducer. -func NewBooleanFuncFloatReducer(fn BooleanReduceFloatFunc, prev *FloatPoint) *BooleanFuncFloatReducer { - return &BooleanFuncFloatReducer{fn: fn, prev: prev} -} - -// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *BooleanFuncFloatReducer) AggregateBoolean(p *BooleanPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &FloatPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean. -func (r *BooleanFuncFloatReducer) Emit() []FloatPoint { - return []FloatPoint{*r.prev} -} - -// BooleanReduceFloatSliceFunc is the function called by a BooleanPoint reducer. -type BooleanReduceFloatSliceFunc func(a []BooleanPoint) []FloatPoint - -// BooleanSliceFuncFloatReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type BooleanSliceFuncFloatReducer struct { - points []BooleanPoint - fn BooleanReduceFloatSliceFunc -} - -// NewBooleanSliceFuncFloatReducer creates a new BooleanSliceFuncFloatReducer. -func NewBooleanSliceFuncFloatReducer(fn BooleanReduceFloatSliceFunc) *BooleanSliceFuncFloatReducer { - return &BooleanSliceFuncFloatReducer{fn: fn} -} - -// AggregateBoolean copies the BooleanPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *BooleanSliceFuncFloatReducer) AggregateBoolean(p *BooleanPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice. -// This is a more efficient version of calling AggregateBoolean on each point. -func (r *BooleanSliceFuncFloatReducer) AggregateBooleanBulk(points []BooleanPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *BooleanSliceFuncFloatReducer) Emit() []FloatPoint { - return r.fn(r.points) -} - -// BooleanReduceIntegerFunc is the function called by a BooleanPoint reducer. -type BooleanReduceIntegerFunc func(prev *IntegerPoint, curr *BooleanPoint) (t int64, v int64, aux []interface{}) - -// BooleanFuncIntegerReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type BooleanFuncIntegerReducer struct { - prev *IntegerPoint - fn BooleanReduceIntegerFunc -} - -// NewBooleanFuncIntegerReducer creates a new BooleanFuncIntegerReducer. -func NewBooleanFuncIntegerReducer(fn BooleanReduceIntegerFunc, prev *IntegerPoint) *BooleanFuncIntegerReducer { - return &BooleanFuncIntegerReducer{fn: fn, prev: prev} -} - -// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *BooleanFuncIntegerReducer) AggregateBoolean(p *BooleanPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &IntegerPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean. -func (r *BooleanFuncIntegerReducer) Emit() []IntegerPoint { - return []IntegerPoint{*r.prev} -} - -// BooleanReduceIntegerSliceFunc is the function called by a BooleanPoint reducer. -type BooleanReduceIntegerSliceFunc func(a []BooleanPoint) []IntegerPoint - -// BooleanSliceFuncIntegerReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type BooleanSliceFuncIntegerReducer struct { - points []BooleanPoint - fn BooleanReduceIntegerSliceFunc -} - -// NewBooleanSliceFuncIntegerReducer creates a new BooleanSliceFuncIntegerReducer. -func NewBooleanSliceFuncIntegerReducer(fn BooleanReduceIntegerSliceFunc) *BooleanSliceFuncIntegerReducer { - return &BooleanSliceFuncIntegerReducer{fn: fn} -} - -// AggregateBoolean copies the BooleanPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *BooleanSliceFuncIntegerReducer) AggregateBoolean(p *BooleanPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice. -// This is a more efficient version of calling AggregateBoolean on each point. -func (r *BooleanSliceFuncIntegerReducer) AggregateBooleanBulk(points []BooleanPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *BooleanSliceFuncIntegerReducer) Emit() []IntegerPoint { - return r.fn(r.points) -} - -// BooleanReduceUnsignedFunc is the function called by a BooleanPoint reducer. -type BooleanReduceUnsignedFunc func(prev *UnsignedPoint, curr *BooleanPoint) (t int64, v uint64, aux []interface{}) - -// BooleanFuncUnsignedReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type BooleanFuncUnsignedReducer struct { - prev *UnsignedPoint - fn BooleanReduceUnsignedFunc -} - -// NewBooleanFuncUnsignedReducer creates a new BooleanFuncUnsignedReducer. -func NewBooleanFuncUnsignedReducer(fn BooleanReduceUnsignedFunc, prev *UnsignedPoint) *BooleanFuncUnsignedReducer { - return &BooleanFuncUnsignedReducer{fn: fn, prev: prev} -} - -// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *BooleanFuncUnsignedReducer) AggregateBoolean(p *BooleanPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &UnsignedPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean. -func (r *BooleanFuncUnsignedReducer) Emit() []UnsignedPoint { - return []UnsignedPoint{*r.prev} -} - -// BooleanReduceUnsignedSliceFunc is the function called by a BooleanPoint reducer. -type BooleanReduceUnsignedSliceFunc func(a []BooleanPoint) []UnsignedPoint - -// BooleanSliceFuncUnsignedReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type BooleanSliceFuncUnsignedReducer struct { - points []BooleanPoint - fn BooleanReduceUnsignedSliceFunc -} - -// NewBooleanSliceFuncUnsignedReducer creates a new BooleanSliceFuncUnsignedReducer. -func NewBooleanSliceFuncUnsignedReducer(fn BooleanReduceUnsignedSliceFunc) *BooleanSliceFuncUnsignedReducer { - return &BooleanSliceFuncUnsignedReducer{fn: fn} -} - -// AggregateBoolean copies the BooleanPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *BooleanSliceFuncUnsignedReducer) AggregateBoolean(p *BooleanPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice. -// This is a more efficient version of calling AggregateBoolean on each point. -func (r *BooleanSliceFuncUnsignedReducer) AggregateBooleanBulk(points []BooleanPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *BooleanSliceFuncUnsignedReducer) Emit() []UnsignedPoint { - return r.fn(r.points) -} - -// BooleanReduceStringFunc is the function called by a BooleanPoint reducer. -type BooleanReduceStringFunc func(prev *StringPoint, curr *BooleanPoint) (t int64, v string, aux []interface{}) - -// BooleanFuncStringReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type BooleanFuncStringReducer struct { - prev *StringPoint - fn BooleanReduceStringFunc -} - -// NewBooleanFuncStringReducer creates a new BooleanFuncStringReducer. -func NewBooleanFuncStringReducer(fn BooleanReduceStringFunc, prev *StringPoint) *BooleanFuncStringReducer { - return &BooleanFuncStringReducer{fn: fn, prev: prev} -} - -// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *BooleanFuncStringReducer) AggregateBoolean(p *BooleanPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &StringPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean. -func (r *BooleanFuncStringReducer) Emit() []StringPoint { - return []StringPoint{*r.prev} -} - -// BooleanReduceStringSliceFunc is the function called by a BooleanPoint reducer. -type BooleanReduceStringSliceFunc func(a []BooleanPoint) []StringPoint - -// BooleanSliceFuncStringReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type BooleanSliceFuncStringReducer struct { - points []BooleanPoint - fn BooleanReduceStringSliceFunc -} - -// NewBooleanSliceFuncStringReducer creates a new BooleanSliceFuncStringReducer. -func NewBooleanSliceFuncStringReducer(fn BooleanReduceStringSliceFunc) *BooleanSliceFuncStringReducer { - return &BooleanSliceFuncStringReducer{fn: fn} -} - -// AggregateBoolean copies the BooleanPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *BooleanSliceFuncStringReducer) AggregateBoolean(p *BooleanPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice. -// This is a more efficient version of calling AggregateBoolean on each point. -func (r *BooleanSliceFuncStringReducer) AggregateBooleanBulk(points []BooleanPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *BooleanSliceFuncStringReducer) Emit() []StringPoint { - return r.fn(r.points) -} - -// BooleanReduceFunc is the function called by a BooleanPoint reducer. -type BooleanReduceFunc func(prev *BooleanPoint, curr *BooleanPoint) (t int64, v bool, aux []interface{}) - -// BooleanFuncReducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type BooleanFuncReducer struct { - prev *BooleanPoint - fn BooleanReduceFunc -} - -// NewBooleanFuncReducer creates a new BooleanFuncBooleanReducer. -func NewBooleanFuncReducer(fn BooleanReduceFunc, prev *BooleanPoint) *BooleanFuncReducer { - return &BooleanFuncReducer{fn: fn, prev: prev} -} - -// AggregateBoolean takes a BooleanPoint and invokes the reduce function with the -// current and new point to modify the current point. -func (r *BooleanFuncReducer) AggregateBoolean(p *BooleanPoint) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &BooleanPoint{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with AggregateBoolean. -func (r *BooleanFuncReducer) Emit() []BooleanPoint { - return []BooleanPoint{*r.prev} -} - -// BooleanReduceSliceFunc is the function called by a BooleanPoint reducer. -type BooleanReduceSliceFunc func(a []BooleanPoint) []BooleanPoint - -// BooleanSliceFuncReducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type BooleanSliceFuncReducer struct { - points []BooleanPoint - fn BooleanReduceSliceFunc -} - -// NewBooleanSliceFuncReducer creates a new BooleanSliceFuncReducer. -func NewBooleanSliceFuncReducer(fn BooleanReduceSliceFunc) *BooleanSliceFuncReducer { - return &BooleanSliceFuncReducer{fn: fn} -} - -// AggregateBoolean copies the BooleanPoint into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *BooleanSliceFuncReducer) AggregateBoolean(p *BooleanPoint) { - r.points = append(r.points, *p.Clone()) -} - -// AggregateBooleanBulk performs a bulk copy of BooleanPoints into the internal slice. -// This is a more efficient version of calling AggregateBoolean on each point. -func (r *BooleanSliceFuncReducer) AggregateBooleanBulk(points []BooleanPoint) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *BooleanSliceFuncReducer) Emit() []BooleanPoint { - return r.fn(r.points) -} - -// BooleanSumHllReducer returns the HLL sketch for a series, in string form -type BooleanSumHllReducer struct { - plus *hll.Plus -} - -// func NewBooleanSumHllReducer creates a new BooleanSumHllReducer -func NewBooleanSumHllReducer() *BooleanSumHllReducer { - return &BooleanSumHllReducer{plus: hll.NewDefaultPlus()} -} - -// AggregateBoolean aggregates a point into the reducer. -func (r *BooleanSumHllReducer) AggregateBoolean(p *BooleanPoint) { - - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, p.Value) - b := buf.Bytes() - - r.plus.Add(b) -} - -// Emit emits the distinct points that have been aggregated into the reducer. -func (r *BooleanSumHllReducer) Emit() []StringPoint { - return []StringPoint{ - marshalPlus(r.plus, nil), - } -} - -// BooleanDistinctReducer returns the distinct points in a series. -type BooleanDistinctReducer struct { - m map[bool]BooleanPoint -} - -// NewBooleanDistinctReducer creates a new BooleanDistinctReducer. -func NewBooleanDistinctReducer() *BooleanDistinctReducer { - return &BooleanDistinctReducer{m: make(map[bool]BooleanPoint)} -} - -// AggregateBoolean aggregates a point into the reducer. -func (r *BooleanDistinctReducer) AggregateBoolean(p *BooleanPoint) { - if _, ok := r.m[p.Value]; !ok { - r.m[p.Value] = *p - } -} - -// Emit emits the distinct points that have been aggregated into the reducer. -func (r *BooleanDistinctReducer) Emit() []BooleanPoint { - points := make([]BooleanPoint, 0, len(r.m)) - for _, p := range r.m { - points = append(points, BooleanPoint{Time: p.Time, Value: p.Value}) - } - sort.Sort(booleanPoints(points)) - return points -} - -// BooleanElapsedReducer calculates the elapsed of the aggregated points. -type BooleanElapsedReducer struct { - unitConversion int64 - prev BooleanPoint - curr BooleanPoint -} - -// NewBooleanElapsedReducer creates a new BooleanElapsedReducer. -func NewBooleanElapsedReducer(interval Interval) *BooleanElapsedReducer { - return &BooleanElapsedReducer{ - unitConversion: int64(interval.Duration), - prev: BooleanPoint{Nil: true}, - curr: BooleanPoint{Nil: true}, - } -} - -// AggregateBoolean aggregates a point into the reducer and updates the current window. -func (r *BooleanElapsedReducer) AggregateBoolean(p *BooleanPoint) { - r.prev = r.curr - r.curr = *p -} - -// Emit emits the elapsed of the reducer at the current point. -func (r *BooleanElapsedReducer) Emit() []IntegerPoint { - if !r.prev.Nil { - elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion - return []IntegerPoint{ - {Time: r.curr.Time, Value: elapsed}, - } - } - return nil -} - -// BooleanSampleReducer implements a reservoir sampling to calculate a random subset of points -type BooleanSampleReducer struct { - count int // how many points we've iterated over - rng *rand.Rand // random number generator for each reducer - - points booleanPoints // the reservoir -} - -// NewBooleanSampleReducer creates a new BooleanSampleReducer -func NewBooleanSampleReducer(size int) *BooleanSampleReducer { - return &BooleanSampleReducer{ - rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ - points: make(booleanPoints, size), - } -} - -// AggregateBoolean aggregates a point into the reducer. -func (r *BooleanSampleReducer) AggregateBoolean(p *BooleanPoint) { - r.count++ - // Fill the reservoir with the first n points - if r.count-1 < len(r.points) { - p.CopyTo(&r.points[r.count-1]) - return - } - - // Generate a random integer between 1 and the count and - // if that number is less than the length of the slice - // replace the point at that index rnd with p. - rnd := r.rng.Intn(r.count) - if rnd < len(r.points) { - p.CopyTo(&r.points[rnd]) - } -} - -// Emit emits the reservoir sample as many points. -func (r *BooleanSampleReducer) Emit() []BooleanPoint { - min := len(r.points) - if r.count < min { - min = r.count - } - pts := r.points[:min] - sort.Sort(pts) - return pts -} diff --git a/influxql/query/functions.gen.go.tmpl b/influxql/query/functions.gen.go.tmpl deleted file mode 100644 index 4910bbda772..00000000000 --- a/influxql/query/functions.gen.go.tmpl +++ /dev/null @@ -1,238 +0,0 @@ -package query - -import ( -"encoding/binary" -"bytes" -"sort" -"time" -"math/rand" - -"github.com/influxdata/influxdb/v2/pkg/estimator/hll" -) - -{{with $types := .}}{{range $k := $types}} - -// {{$k.Name}}PointAggregator aggregates points to produce a single point. -type {{$k.Name}}PointAggregator interface { - Aggregate{{$k.Name}}(p *{{$k.Name}}Point) -} - -// {{$k.Name}}BulkPointAggregator aggregates multiple points at a time. -type {{$k.Name}}BulkPointAggregator interface { - Aggregate{{$k.Name}}Bulk(points []{{$k.Name}}Point) -} - -// {{$k.Name}}PointEmitter produces a single point from an aggregate. -type {{$k.Name}}PointEmitter interface { - Emit() []{{$k.Name}}Point -} - -{{range $v := $types}} - -// {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func is the function called by a {{$k.Name}}Point reducer. -type {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func func(prev *{{$v.Name}}Point, curr *{{$k.Name}}Point) (t int64, v {{$v.Type}}, aux []interface{}) - -// {{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer is a reducer that reduces -// the passed in points to a single point using a reduce function. -type {{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer struct { - prev *{{$v.Name}}Point - fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func -} - -// New{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer creates a new {{$k.Name}}Func{{$v.Name}}Reducer. -func New{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer(fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Func, prev *{{$v.Name}}Point) *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer { - return &{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer{fn: fn, prev: prev} -} - -// Aggregate{{$k.Name}} takes a {{$k.Name}}Point and invokes the reduce function with the -// current and new point to modify the current point. -func (r *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { - t, v, aux := r.fn(r.prev, p) - if r.prev == nil { - r.prev = &{{$v.Name}}Point{} - } - r.prev.Time = t - r.prev.Value = v - r.prev.Aux = aux - if p.Aggregated > 1 { - r.prev.Aggregated += p.Aggregated - } else { - r.prev.Aggregated++ - } -} - -// Emit emits the point that was generated when reducing the points fed in with Aggregate{{$k.Name}}. -func (r *{{$k.Name}}Func{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Emit() []{{$v.Name}}Point { - return []{{$v.Name}}Point{*r.prev} -} - -// {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc is the function called by a {{$k.Name}}Point reducer. -type {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc func(a []{{$k.Name}}Point) []{{$v.Name}}Point - -// {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer is a reducer that aggregates -// the passed in points and then invokes the function to reduce the points when they are emitted. -type {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer struct { - points []{{$k.Name}}Point - fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc -} - -// New{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer creates a new {{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer. -func New{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer(fn {{$k.Name}}Reduce{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}SliceFunc) *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer { - return &{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer{fn: fn} -} - -// Aggregate{{$k.Name}} copies the {{$k.Name}}Point into the internal slice to be passed -// to the reduce function when Emit is called. -func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { - r.points = append(r.points, *p.Clone()) -} - -// Aggregate{{$k.Name}}Bulk performs a bulk copy of {{$k.Name}}Points into the internal slice. -// This is a more efficient version of calling Aggregate{{$k.Name}} on each point. -func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Aggregate{{$k.Name}}Bulk(points []{{$k.Name}}Point) { - r.points = append(r.points, points...) -} - -// Emit invokes the reduce function on the aggregated points to generate the aggregated points. -// This method does not clear the points from the internal slice. -func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Emit() []{{$v.Name}}Point { - return r.fn(r.points) -} -{{end}} - -// {{$k.Name}}SumHllReducer returns the HLL sketch for a series, in string form -type {{$k.Name}}SumHllReducer struct { - plus *hll.Plus -} - -// func New{{$k.Name}}SumHllReducer creates a new {{$k.Name}}SumHllReducer -func New{{$k.Name}}SumHllReducer() *{{$k.Name}}SumHllReducer { - return &{{$k.Name}}SumHllReducer{plus:hll.NewDefaultPlus()} -} - -// Aggregate{{$k.Name}} aggregates a point into the reducer. -func (r *{{$k.Name}}SumHllReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { - {{if eq $k.Type "string"}} - b := []byte(p.Value) - {{else}} - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, p.Value) - b := buf.Bytes() - {{end}} - r.plus.Add(b) -} - -// Emit emits the distinct points that have been aggregated into the reducer. -func (r *{{$k.Name}}SumHllReducer) Emit() []StringPoint { - return []StringPoint{ - marshalPlus(r.plus, nil), - } -} - -// {{$k.Name}}DistinctReducer returns the distinct points in a series. -type {{$k.Name}}DistinctReducer struct { - m map[{{$k.Type}}]{{$k.Name}}Point -} - -// New{{$k.Name}}DistinctReducer creates a new {{$k.Name}}DistinctReducer. -func New{{$k.Name}}DistinctReducer() *{{$k.Name}}DistinctReducer { - return &{{$k.Name}}DistinctReducer{m: make(map[{{$k.Type}}]{{$k.Name}}Point)} -} - -// Aggregate{{$k.Name}} aggregates a point into the reducer. -func (r *{{$k.Name}}DistinctReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { - if _, ok := r.m[p.Value]; !ok { - r.m[p.Value] = *p - } -} - -// Emit emits the distinct points that have been aggregated into the reducer. -func (r *{{$k.Name}}DistinctReducer) Emit() []{{$k.Name}}Point { - points := make([]{{$k.Name}}Point, 0, len(r.m)) - for _, p := range r.m { - points = append(points, {{$k.Name}}Point{Time: p.Time, Value: p.Value}) - } - sort.Sort({{$k.name}}Points(points)) - return points -} - -// {{$k.Name}}ElapsedReducer calculates the elapsed of the aggregated points. -type {{$k.Name}}ElapsedReducer struct { - unitConversion int64 - prev {{$k.Name}}Point - curr {{$k.Name}}Point -} - -// New{{$k.Name}}ElapsedReducer creates a new {{$k.Name}}ElapsedReducer. -func New{{$k.Name}}ElapsedReducer(interval Interval) *{{$k.Name}}ElapsedReducer { - return &{{$k.Name}}ElapsedReducer{ - unitConversion: int64(interval.Duration), - prev: {{$k.Name}}Point{Nil: true}, - curr: {{$k.Name}}Point{Nil: true}, - } -} - -// Aggregate{{$k.Name}} aggregates a point into the reducer and updates the current window. -func (r *{{$k.Name}}ElapsedReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { - r.prev = r.curr - r.curr = *p -} - -// Emit emits the elapsed of the reducer at the current point. -func (r *{{$k.Name}}ElapsedReducer) Emit() []IntegerPoint { - if !r.prev.Nil { - elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion - return []IntegerPoint{ - {Time: r.curr.Time, Value: elapsed}, - } - } - return nil -} - -// {{$k.Name}}SampleReducer implements a reservoir sampling to calculate a random subset of points -type {{$k.Name}}SampleReducer struct { - count int // how many points we've iterated over - rng *rand.Rand // random number generator for each reducer - - points {{$k.name}}Points // the reservoir -} - -// New{{$k.Name}}SampleReducer creates a new {{$k.Name}}SampleReducer -func New{{$k.Name}}SampleReducer(size int) *{{$k.Name}}SampleReducer { - return &{{$k.Name}}SampleReducer{ - rng: rand.New(rand.NewSource(time.Now().UnixNano())), // seed with current time as suggested by https://golang.org/pkg/math/rand/ - points: make({{$k.name}}Points, size), - } -} - -// Aggregate{{$k.Name}} aggregates a point into the reducer. -func (r *{{$k.Name}}SampleReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { - r.count++ - // Fill the reservoir with the first n points - if r.count-1 < len(r.points) { - p.CopyTo(&r.points[r.count-1]) - return - } - - // Generate a random integer between 1 and the count and - // if that number is less than the length of the slice - // replace the point at that index rnd with p. - rnd := r.rng.Intn(r.count) - if rnd < len(r.points) { - p.CopyTo(&r.points[rnd]) - } -} - -// Emit emits the reservoir sample as many points. -func (r *{{$k.Name}}SampleReducer) Emit() []{{$k.Name}}Point { - min := len(r.points) - if r.count < min { - min = r.count - } - pts := r.points[:min] - sort.Sort(pts) - return pts -} - - -{{end}}{{end}} diff --git a/influxql/query/functions.go b/influxql/query/functions.go deleted file mode 100644 index 1fc7ebc7821..00000000000 --- a/influxql/query/functions.go +++ /dev/null @@ -1,2271 +0,0 @@ -package query - -import ( - "container/heap" - "encoding/base64" - "fmt" - "math" - "sort" - "time" - - "github.com/influxdata/influxdb/v2/influxql/query/internal/gota" - "github.com/influxdata/influxdb/v2/influxql/query/neldermead" - "github.com/influxdata/influxdb/v2/pkg/estimator/hll" - "github.com/influxdata/influxql" -) - -var hllPrefix = []byte("HLL_") -var hllErrorPrefix = []byte("HLLERROR ") - -// FieldMapper is a FieldMapper that wraps another FieldMapper and exposes -// the functions implemented by the query engine. -type queryFieldMapper struct { - influxql.FieldMapper -} - -func (m queryFieldMapper) CallType(name string, args []influxql.DataType) (influxql.DataType, error) { - if mapper, ok := m.FieldMapper.(influxql.CallTypeMapper); ok { - typ, err := mapper.CallType(name, args) - if err != nil { - return influxql.Unknown, err - } else if typ != influxql.Unknown { - return typ, nil - } - } - - // Use the default FunctionTypeMapper for the query engine. - typmap := FunctionTypeMapper{} - return typmap.CallType(name, args) -} - -// CallTypeMapper returns the types for call iterator functions. -// Call iterator functions are commonly implemented within the storage engine -// so this mapper is limited to only the return values of those functions. -type CallTypeMapper struct{} - -func (CallTypeMapper) MapType(measurement *influxql.Measurement, field string) influxql.DataType { - return influxql.Unknown -} - -func (CallTypeMapper) CallType(name string, args []influxql.DataType) (influxql.DataType, error) { - // If the function is not implemented by the embedded field mapper, then - // see if we implement the function and return the type here. - switch name { - case "mean": - return influxql.Float, nil - case "count": - return influxql.Integer, nil - case "min", "max", "sum", "first", "last": - // TODO(jsternberg): Verify the input type. - return args[0], nil - } - return influxql.Unknown, nil -} - -// FunctionTypeMapper handles the type mapping for all functions implemented by the -// query engine. -type FunctionTypeMapper struct { - CallTypeMapper -} - -func (FunctionTypeMapper) MapType(measurement *influxql.Measurement, field string) influxql.DataType { - return influxql.Unknown -} - -func (m FunctionTypeMapper) CallType(name string, args []influxql.DataType) (influxql.DataType, error) { - if typ, err := m.CallTypeMapper.CallType(name, args); typ != influxql.Unknown || err != nil { - return typ, err - } - - // Handle functions implemented by the query engine. - switch name { - case "median", "integral", "stddev", - "derivative", "non_negative_derivative", - "moving_average", - "exponential_moving_average", - "double_exponential_moving_average", - "triple_exponential_moving_average", - "relative_strength_index", - "triple_exponential_derivative", - "kaufmans_efficiency_ratio", - "kaufmans_adaptive_moving_average", - "chande_momentum_oscillator", - "holt_winters", "holt_winters_with_fit": - return influxql.Float, nil - case "elapsed": - return influxql.Integer, nil - default: - // TODO(jsternberg): Do not use default for this. - return args[0], nil - } -} - -// FloatMeanReducer calculates the mean of the aggregated points. -type FloatMeanReducer struct { - sum float64 - count uint32 -} - -// NewFloatMeanReducer creates a new FloatMeanReducer. -func NewFloatMeanReducer() *FloatMeanReducer { - return &FloatMeanReducer{} -} - -// AggregateFloat aggregates a point into the reducer. -func (r *FloatMeanReducer) AggregateFloat(p *FloatPoint) { - if p.Aggregated >= 2 { - r.sum += p.Value * float64(p.Aggregated) - r.count += p.Aggregated - } else { - r.sum += p.Value - r.count++ - } -} - -// Emit emits the mean of the aggregated points as a single point. -func (r *FloatMeanReducer) Emit() []FloatPoint { - return []FloatPoint{{ - Time: ZeroTime, - Value: r.sum / float64(r.count), - Aggregated: r.count, - }} -} - -// IntegerMeanReducer calculates the mean of the aggregated points. -type IntegerMeanReducer struct { - sum int64 - count uint32 -} - -// NewIntegerMeanReducer creates a new IntegerMeanReducer. -func NewIntegerMeanReducer() *IntegerMeanReducer { - return &IntegerMeanReducer{} -} - -// AggregateInteger aggregates a point into the reducer. -func (r *IntegerMeanReducer) AggregateInteger(p *IntegerPoint) { - if p.Aggregated >= 2 { - r.sum += p.Value * int64(p.Aggregated) - r.count += p.Aggregated - } else { - r.sum += p.Value - r.count++ - } -} - -// Emit emits the mean of the aggregated points as a single point. -func (r *IntegerMeanReducer) Emit() []FloatPoint { - return []FloatPoint{{ - Time: ZeroTime, - Value: float64(r.sum) / float64(r.count), - Aggregated: r.count, - }} -} - -// UnsignedMeanReducer calculates the mean of the aggregated points. -type UnsignedMeanReducer struct { - sum uint64 - count uint32 -} - -// NewUnsignedMeanReducer creates a new UnsignedMeanReducer. -func NewUnsignedMeanReducer() *UnsignedMeanReducer { - return &UnsignedMeanReducer{} -} - -// AggregateUnsigned aggregates a point into the reducer. -func (r *UnsignedMeanReducer) AggregateUnsigned(p *UnsignedPoint) { - if p.Aggregated >= 2 { - r.sum += p.Value * uint64(p.Aggregated) - r.count += p.Aggregated - } else { - r.sum += p.Value - r.count++ - } -} - -// Emit emits the mean of the aggregated points as a single point. -func (r *UnsignedMeanReducer) Emit() []FloatPoint { - return []FloatPoint{{ - Time: ZeroTime, - Value: float64(r.sum) / float64(r.count), - Aggregated: r.count, - }} -} - -type FloatSpreadReducer struct { - min, max float64 - count uint32 -} - -func NewFloatSpreadReducer() *FloatSpreadReducer { - return &FloatSpreadReducer{ - min: math.Inf(1), - max: math.Inf(-1), - } -} - -func (r *FloatSpreadReducer) AggregateFloat(p *FloatPoint) { - r.min = math.Min(r.min, p.Value) - r.max = math.Max(r.max, p.Value) - r.count++ -} - -func (r *FloatSpreadReducer) Emit() []FloatPoint { - return []FloatPoint{{ - Time: ZeroTime, - Value: r.max - r.min, - Aggregated: r.count, - }} -} - -type IntegerSpreadReducer struct { - min, max int64 - count uint32 -} - -func NewIntegerSpreadReducer() *IntegerSpreadReducer { - return &IntegerSpreadReducer{ - min: math.MaxInt64, - max: math.MinInt64, - } -} - -func (r *IntegerSpreadReducer) AggregateInteger(p *IntegerPoint) { - if p.Value < r.min { - r.min = p.Value - } - if p.Value > r.max { - r.max = p.Value - } - r.count++ -} - -func (r *IntegerSpreadReducer) Emit() []IntegerPoint { - return []IntegerPoint{{ - Time: ZeroTime, - Value: r.max - r.min, - Aggregated: r.count, - }} -} - -type UnsignedSpreadReducer struct { - min, max uint64 - count uint32 -} - -func NewUnsignedSpreadReducer() *UnsignedSpreadReducer { - return &UnsignedSpreadReducer{ - min: math.MaxUint64, - max: 0, - } -} - -func (r *UnsignedSpreadReducer) AggregateUnsigned(p *UnsignedPoint) { - if p.Value < r.min { - r.min = p.Value - } - if p.Value > r.max { - r.max = p.Value - } - r.count++ -} - -func (r *UnsignedSpreadReducer) Emit() []UnsignedPoint { - return []UnsignedPoint{{ - Time: ZeroTime, - Value: r.max - r.min, - Aggregated: r.count, - }} -} - -// FloatDerivativeReducer calculates the derivative of the aggregated points. -type FloatDerivativeReducer struct { - interval Interval - prev FloatPoint - curr FloatPoint - isNonNegative bool - ascending bool -} - -// NewFloatDerivativeReducer creates a new FloatDerivativeReducer. -func NewFloatDerivativeReducer(interval Interval, isNonNegative, ascending bool) *FloatDerivativeReducer { - return &FloatDerivativeReducer{ - interval: interval, - isNonNegative: isNonNegative, - ascending: ascending, - prev: FloatPoint{Nil: true}, - curr: FloatPoint{Nil: true}, - } -} - -// AggregateFloat aggregates a point into the reducer and updates the current window. -func (r *FloatDerivativeReducer) AggregateFloat(p *FloatPoint) { - // Skip past a point when it does not advance the stream. A joined series - // may have multiple points at the same time so we will discard anything - // except the first point we encounter. - if !r.curr.Nil && r.curr.Time == p.Time { - return - } - - r.prev = r.curr - r.curr = *p -} - -// Emit emits the derivative of the reducer at the current point. -func (r *FloatDerivativeReducer) Emit() []FloatPoint { - if r.prev.Nil { - return nil - } - // Calculate the derivative of successive points by dividing the - // difference of each value by the elapsed time normalized to the interval. - diff := r.curr.Value - r.prev.Value - elapsed := r.curr.Time - r.prev.Time - if !r.ascending { - elapsed = -elapsed - } - value := diff / (float64(elapsed) / float64(r.interval.Duration)) - - // Mark this point as read by changing the previous point to nil. - r.prev.Nil = true - - // Drop negative values for non-negative derivatives. - if r.isNonNegative && diff < 0 { - return nil - } - return []FloatPoint{{Time: r.curr.Time, Value: value}} -} - -// IntegerDerivativeReducer calculates the derivative of the aggregated points. -type IntegerDerivativeReducer struct { - interval Interval - prev IntegerPoint - curr IntegerPoint - isNonNegative bool - ascending bool -} - -// NewIntegerDerivativeReducer creates a new IntegerDerivativeReducer. -func NewIntegerDerivativeReducer(interval Interval, isNonNegative, ascending bool) *IntegerDerivativeReducer { - return &IntegerDerivativeReducer{ - interval: interval, - isNonNegative: isNonNegative, - ascending: ascending, - prev: IntegerPoint{Nil: true}, - curr: IntegerPoint{Nil: true}, - } -} - -// AggregateInteger aggregates a point into the reducer and updates the current window. -func (r *IntegerDerivativeReducer) AggregateInteger(p *IntegerPoint) { - // Skip past a point when it does not advance the stream. A joined series - // may have multiple points at the same time so we will discard anything - // except the first point we encounter. - if !r.curr.Nil && r.curr.Time == p.Time { - return - } - - r.prev = r.curr - r.curr = *p -} - -// Emit emits the derivative of the reducer at the current point. -func (r *IntegerDerivativeReducer) Emit() []FloatPoint { - if r.prev.Nil { - return nil - } - - // Calculate the derivative of successive points by dividing the - // difference of each value by the elapsed time normalized to the interval. - diff := float64(r.curr.Value - r.prev.Value) - elapsed := r.curr.Time - r.prev.Time - if !r.ascending { - elapsed = -elapsed - } - value := diff / (float64(elapsed) / float64(r.interval.Duration)) - - // Mark this point as read by changing the previous point to nil. - r.prev.Nil = true - - // Drop negative values for non-negative derivatives. - if r.isNonNegative && diff < 0 { - return nil - } - return []FloatPoint{{Time: r.curr.Time, Value: value}} -} - -// UnsignedDerivativeReducer calculates the derivative of the aggregated points. -type UnsignedDerivativeReducer struct { - interval Interval - prev UnsignedPoint - curr UnsignedPoint - isNonNegative bool - ascending bool -} - -// NewUnsignedDerivativeReducer creates a new UnsignedDerivativeReducer. -func NewUnsignedDerivativeReducer(interval Interval, isNonNegative, ascending bool) *UnsignedDerivativeReducer { - return &UnsignedDerivativeReducer{ - interval: interval, - isNonNegative: isNonNegative, - ascending: ascending, - prev: UnsignedPoint{Nil: true}, - curr: UnsignedPoint{Nil: true}, - } -} - -// AggregateUnsigned aggregates a point into the reducer and updates the current window. -func (r *UnsignedDerivativeReducer) AggregateUnsigned(p *UnsignedPoint) { - // Skip past a point when it does not advance the stream. A joined series - // may have multiple points at the same time so we will discard anything - // except the first point we encounter. - if !r.curr.Nil && r.curr.Time == p.Time { - return - } - - r.prev = r.curr - r.curr = *p -} - -// Emit emits the derivative of the reducer at the current point. -func (r *UnsignedDerivativeReducer) Emit() []FloatPoint { - if r.prev.Nil { - return nil - } - // Calculate the derivative of successive points by dividing the - // difference of each value by the elapsed time normalized to the interval. - var diff float64 - if r.curr.Value > r.prev.Value { - diff = float64(r.curr.Value - r.prev.Value) - } else { - diff = -float64(r.prev.Value - r.curr.Value) - } - elapsed := r.curr.Time - r.prev.Time - if !r.ascending { - elapsed = -elapsed - } - value := diff / (float64(elapsed) / float64(r.interval.Duration)) - - // Mark this point as read by changing the previous point to nil. - r.prev.Nil = true - - // Drop negative values for non-negative derivatives. - if r.isNonNegative && diff < 0 { - return nil - } - return []FloatPoint{{Time: r.curr.Time, Value: value}} -} - -// FloatDifferenceReducer calculates the derivative of the aggregated points. -type FloatDifferenceReducer struct { - isNonNegative bool - prev FloatPoint - curr FloatPoint -} - -// NewFloatDifferenceReducer creates a new FloatDifferenceReducer. -func NewFloatDifferenceReducer(isNonNegative bool) *FloatDifferenceReducer { - return &FloatDifferenceReducer{ - isNonNegative: isNonNegative, - prev: FloatPoint{Nil: true}, - curr: FloatPoint{Nil: true}, - } -} - -// AggregateFloat aggregates a point into the reducer and updates the current window. -func (r *FloatDifferenceReducer) AggregateFloat(p *FloatPoint) { - // Skip past a point when it does not advance the stream. A joined series - // may have multiple points at the same time so we will discard anything - // except the first point we encounter. - if !r.curr.Nil && r.curr.Time == p.Time { - return - } - - r.prev = r.curr - r.curr = *p -} - -// Emit emits the difference of the reducer at the current point. -func (r *FloatDifferenceReducer) Emit() []FloatPoint { - if r.prev.Nil { - return nil - } - - // Calculate the difference of successive points. - value := r.curr.Value - r.prev.Value - - // If it is non_negative_difference discard any negative value. Since - // prev is still marked as unread. The correctness can be ensured. - if r.isNonNegative && value < 0 { - return nil - } - - // Mark this point as read by changing the previous point to nil. - r.prev.Nil = true - return []FloatPoint{{Time: r.curr.Time, Value: value}} -} - -// IntegerDifferenceReducer calculates the derivative of the aggregated points. -type IntegerDifferenceReducer struct { - isNonNegative bool - prev IntegerPoint - curr IntegerPoint -} - -// NewIntegerDifferenceReducer creates a new IntegerDifferenceReducer. -func NewIntegerDifferenceReducer(isNonNegative bool) *IntegerDifferenceReducer { - return &IntegerDifferenceReducer{ - isNonNegative: isNonNegative, - prev: IntegerPoint{Nil: true}, - curr: IntegerPoint{Nil: true}, - } -} - -// AggregateInteger aggregates a point into the reducer and updates the current window. -func (r *IntegerDifferenceReducer) AggregateInteger(p *IntegerPoint) { - // Skip past a point when it does not advance the stream. A joined series - // may have multiple points at the same time so we will discard anything - // except the first point we encounter. - if !r.curr.Nil && r.curr.Time == p.Time { - return - } - - r.prev = r.curr - r.curr = *p -} - -// Emit emits the difference of the reducer at the current point. -func (r *IntegerDifferenceReducer) Emit() []IntegerPoint { - if r.prev.Nil { - return nil - } - - // Calculate the difference of successive points. - value := r.curr.Value - r.prev.Value - - // If it is non_negative_difference discard any negative value. Since - // prev is still marked as unread. The correctness can be ensured. - if r.isNonNegative && value < 0 { - return nil - } - - // Mark this point as read by changing the previous point to nil. - r.prev.Nil = true - - return []IntegerPoint{{Time: r.curr.Time, Value: value}} -} - -// UnsignedDifferenceReducer calculates the derivative of the aggregated points. -type UnsignedDifferenceReducer struct { - isNonNegative bool - prev UnsignedPoint - curr UnsignedPoint -} - -// NewUnsignedDifferenceReducer creates a new UnsignedDifferenceReducer. -func NewUnsignedDifferenceReducer(isNonNegative bool) *UnsignedDifferenceReducer { - return &UnsignedDifferenceReducer{ - isNonNegative: isNonNegative, - prev: UnsignedPoint{Nil: true}, - curr: UnsignedPoint{Nil: true}, - } -} - -// AggregateUnsigned aggregates a point into the reducer and updates the current window. -func (r *UnsignedDifferenceReducer) AggregateUnsigned(p *UnsignedPoint) { - // Skip past a point when it does not advance the stream. A joined series - // may have multiple points at the same time so we will discard anything - // except the first point we encounter. - if !r.curr.Nil && r.curr.Time == p.Time { - return - } - - r.prev = r.curr - r.curr = *p -} - -// Emit emits the difference of the reducer at the current point. -func (r *UnsignedDifferenceReducer) Emit() []UnsignedPoint { - if r.prev.Nil { - return nil - } - - // If it is non_negative_difference discard any negative value. Since - // prev is still marked as unread. The correctness can be ensured. - if r.isNonNegative && r.curr.Value < r.prev.Value { - return nil - } - - // Calculate the difference of successive points. - value := r.curr.Value - r.prev.Value - - // Mark this point as read by changing the previous point to nil. - r.prev.Nil = true - - return []UnsignedPoint{{Time: r.curr.Time, Value: value}} -} - -// FloatMovingAverageReducer calculates the moving average of the aggregated points. -type FloatMovingAverageReducer struct { - pos int - sum float64 - time int64 - buf []float64 -} - -// NewFloatMovingAverageReducer creates a new FloatMovingAverageReducer. -func NewFloatMovingAverageReducer(n int) *FloatMovingAverageReducer { - return &FloatMovingAverageReducer{ - buf: make([]float64, 0, n), - } -} - -// AggregateFloat aggregates a point into the reducer and updates the current window. -func (r *FloatMovingAverageReducer) AggregateFloat(p *FloatPoint) { - if len(r.buf) != cap(r.buf) { - r.buf = append(r.buf, p.Value) - } else { - r.sum -= r.buf[r.pos] - r.buf[r.pos] = p.Value - } - r.sum += p.Value - r.time = p.Time - r.pos++ - if r.pos >= cap(r.buf) { - r.pos = 0 - } -} - -// Emit emits the moving average of the current window. Emit should be called -// after every call to AggregateFloat and it will produce one point if there -// is enough data to fill a window, otherwise it will produce zero points. -func (r *FloatMovingAverageReducer) Emit() []FloatPoint { - if len(r.buf) != cap(r.buf) { - return []FloatPoint{} - } - return []FloatPoint{ - { - Value: r.sum / float64(len(r.buf)), - Time: r.time, - Aggregated: uint32(len(r.buf)), - }, - } -} - -// IntegerMovingAverageReducer calculates the moving average of the aggregated points. -type IntegerMovingAverageReducer struct { - pos int - sum int64 - time int64 - buf []int64 -} - -// NewIntegerMovingAverageReducer creates a new IntegerMovingAverageReducer. -func NewIntegerMovingAverageReducer(n int) *IntegerMovingAverageReducer { - return &IntegerMovingAverageReducer{ - buf: make([]int64, 0, n), - } -} - -// AggregateInteger aggregates a point into the reducer and updates the current window. -func (r *IntegerMovingAverageReducer) AggregateInteger(p *IntegerPoint) { - if len(r.buf) != cap(r.buf) { - r.buf = append(r.buf, p.Value) - } else { - r.sum -= r.buf[r.pos] - r.buf[r.pos] = p.Value - } - r.sum += p.Value - r.time = p.Time - r.pos++ - if r.pos >= cap(r.buf) { - r.pos = 0 - } -} - -// Emit emits the moving average of the current window. Emit should be called -// after every call to AggregateInteger and it will produce one point if there -// is enough data to fill a window, otherwise it will produce zero points. -func (r *IntegerMovingAverageReducer) Emit() []FloatPoint { - if len(r.buf) != cap(r.buf) { - return []FloatPoint{} - } - return []FloatPoint{ - { - Value: float64(r.sum) / float64(len(r.buf)), - Time: r.time, - Aggregated: uint32(len(r.buf)), - }, - } -} - -// UnsignedMovingAverageReducer calculates the moving average of the aggregated points. -type UnsignedMovingAverageReducer struct { - pos int - sum uint64 - time int64 - buf []uint64 -} - -// NewUnsignedMovingAverageReducer creates a new UnsignedMovingAverageReducer. -func NewUnsignedMovingAverageReducer(n int) *UnsignedMovingAverageReducer { - return &UnsignedMovingAverageReducer{ - buf: make([]uint64, 0, n), - } -} - -// AggregateUnsigned aggregates a point into the reducer and updates the current window. -func (r *UnsignedMovingAverageReducer) AggregateUnsigned(p *UnsignedPoint) { - if len(r.buf) != cap(r.buf) { - r.buf = append(r.buf, p.Value) - } else { - r.sum -= r.buf[r.pos] - r.buf[r.pos] = p.Value - } - r.sum += p.Value - r.time = p.Time - r.pos++ - if r.pos >= cap(r.buf) { - r.pos = 0 - } -} - -// Emit emits the moving average of the current window. Emit should be called -// after every call to AggregateUnsigned and it will produce one point if there -// is enough data to fill a window, otherwise it will produce zero points. -func (r *UnsignedMovingAverageReducer) Emit() []FloatPoint { - if len(r.buf) != cap(r.buf) { - return []FloatPoint{} - } - return []FloatPoint{ - { - Value: float64(r.sum) / float64(len(r.buf)), - Time: r.time, - Aggregated: uint32(len(r.buf)), - }, - } -} - -type ExponentialMovingAverageReducer struct { - ema gota.EMA - holdPeriod uint32 - count uint32 - v float64 - t int64 -} - -func NewExponentialMovingAverageReducer(period int, holdPeriod int, warmupType gota.WarmupType) *ExponentialMovingAverageReducer { - ema := gota.NewEMA(period, warmupType) - if holdPeriod == -1 { - holdPeriod = ema.WarmCount() - } - return &ExponentialMovingAverageReducer{ - ema: *ema, - holdPeriod: uint32(holdPeriod), - } -} - -func (r *ExponentialMovingAverageReducer) AggregateFloat(p *FloatPoint) { - r.aggregate(p.Value, p.Time) -} -func (r *ExponentialMovingAverageReducer) AggregateInteger(p *IntegerPoint) { - r.aggregate(float64(p.Value), p.Time) -} -func (r *ExponentialMovingAverageReducer) AggregateUnsigned(p *UnsignedPoint) { - r.aggregate(float64(p.Value), p.Time) -} -func (r *ExponentialMovingAverageReducer) aggregate(v float64, t int64) { - r.v = r.ema.Add(v) - r.t = t - r.count++ -} -func (r *ExponentialMovingAverageReducer) Emit() []FloatPoint { - if r.count <= r.holdPeriod { - return nil - } - - return []FloatPoint{ - { - Value: r.v, - Time: r.t, - Aggregated: r.count, - }, - } -} - -type DoubleExponentialMovingAverageReducer struct { - dema gota.DEMA - holdPeriod uint32 - count uint32 - v float64 - t int64 -} - -func NewDoubleExponentialMovingAverageReducer(period int, holdPeriod int, warmupType gota.WarmupType) *DoubleExponentialMovingAverageReducer { - dema := gota.NewDEMA(period, warmupType) - if holdPeriod == -1 { - holdPeriod = dema.WarmCount() - } - return &DoubleExponentialMovingAverageReducer{ - dema: *dema, - holdPeriod: uint32(holdPeriod), - } -} - -func (r *DoubleExponentialMovingAverageReducer) AggregateFloat(p *FloatPoint) { - r.aggregate(p.Value, p.Time) -} -func (r *DoubleExponentialMovingAverageReducer) AggregateInteger(p *IntegerPoint) { - r.aggregate(float64(p.Value), p.Time) -} -func (r *DoubleExponentialMovingAverageReducer) AggregateUnsigned(p *UnsignedPoint) { - r.aggregate(float64(p.Value), p.Time) -} -func (r *DoubleExponentialMovingAverageReducer) aggregate(v float64, t int64) { - r.v = r.dema.Add(v) - r.t = t - r.count++ -} -func (r *DoubleExponentialMovingAverageReducer) Emit() []FloatPoint { - if r.count <= r.holdPeriod { - return nil - } - - return []FloatPoint{ - { - Value: r.v, - Time: r.t, - Aggregated: r.count, - }, - } -} - -type TripleExponentialMovingAverageReducer struct { - tema gota.TEMA - holdPeriod uint32 - count uint32 - v float64 - t int64 -} - -func NewTripleExponentialMovingAverageReducer(period int, holdPeriod int, warmupType gota.WarmupType) *TripleExponentialMovingAverageReducer { - tema := gota.NewTEMA(period, warmupType) - if holdPeriod == -1 { - holdPeriod = tema.WarmCount() - } - return &TripleExponentialMovingAverageReducer{ - tema: *tema, - holdPeriod: uint32(holdPeriod), - } -} - -func (r *TripleExponentialMovingAverageReducer) AggregateFloat(p *FloatPoint) { - r.aggregate(p.Value, p.Time) -} -func (r *TripleExponentialMovingAverageReducer) AggregateInteger(p *IntegerPoint) { - r.aggregate(float64(p.Value), p.Time) -} -func (r *TripleExponentialMovingAverageReducer) AggregateUnsigned(p *UnsignedPoint) { - r.aggregate(float64(p.Value), p.Time) -} -func (r *TripleExponentialMovingAverageReducer) aggregate(v float64, t int64) { - r.v = r.tema.Add(v) - r.t = t - r.count++ -} -func (r *TripleExponentialMovingAverageReducer) Emit() []FloatPoint { - if r.count <= r.holdPeriod { - return nil - } - - return []FloatPoint{ - { - Value: r.v, - Time: r.t, - Aggregated: r.count, - }, - } -} - -type RelativeStrengthIndexReducer struct { - rsi gota.RSI - holdPeriod uint32 - count uint32 - v float64 - t int64 -} - -func NewRelativeStrengthIndexReducer(period int, holdPeriod int, warmupType gota.WarmupType) *RelativeStrengthIndexReducer { - rsi := gota.NewRSI(period, warmupType) - if holdPeriod == -1 { - holdPeriod = rsi.WarmCount() - } - return &RelativeStrengthIndexReducer{ - rsi: *rsi, - holdPeriod: uint32(holdPeriod), - } -} -func (r *RelativeStrengthIndexReducer) AggregateFloat(p *FloatPoint) { - r.aggregate(p.Value, p.Time) -} -func (r *RelativeStrengthIndexReducer) AggregateInteger(p *IntegerPoint) { - r.aggregate(float64(p.Value), p.Time) -} -func (r *RelativeStrengthIndexReducer) AggregateUnsigned(p *UnsignedPoint) { - r.aggregate(float64(p.Value), p.Time) -} -func (r *RelativeStrengthIndexReducer) aggregate(v float64, t int64) { - r.v = r.rsi.Add(v) - r.t = t - r.count++ -} -func (r *RelativeStrengthIndexReducer) Emit() []FloatPoint { - if r.count <= r.holdPeriod { - return nil - } - - return []FloatPoint{ - { - Value: r.v, - Time: r.t, - Aggregated: r.count, - }, - } -} - -type TripleExponentialDerivativeReducer struct { - trix gota.TRIX - holdPeriod uint32 - count uint32 - v float64 - t int64 -} - -func NewTripleExponentialDerivativeReducer(period int, holdPeriod int, warmupType gota.WarmupType) *TripleExponentialDerivativeReducer { - trix := gota.NewTRIX(period, warmupType) - if holdPeriod == -1 { - holdPeriod = trix.WarmCount() - } - return &TripleExponentialDerivativeReducer{ - trix: *trix, - holdPeriod: uint32(holdPeriod), - } -} -func (r *TripleExponentialDerivativeReducer) AggregateFloat(p *FloatPoint) { - r.aggregate(p.Value, p.Time) -} -func (r *TripleExponentialDerivativeReducer) AggregateInteger(p *IntegerPoint) { - r.aggregate(float64(p.Value), p.Time) -} -func (r *TripleExponentialDerivativeReducer) AggregateUnsigned(p *UnsignedPoint) { - r.aggregate(float64(p.Value), p.Time) -} -func (r *TripleExponentialDerivativeReducer) aggregate(v float64, t int64) { - r.v = r.trix.Add(v) - r.t = t - r.count++ -} -func (r *TripleExponentialDerivativeReducer) Emit() []FloatPoint { - if r.count <= r.holdPeriod { - return nil - } - if math.IsInf(r.v, 0) { - return nil - } - - return []FloatPoint{ - { - Value: r.v, - Time: r.t, - Aggregated: r.count, - }, - } -} - -type KaufmansEfficiencyRatioReducer struct { - ker gota.KER - holdPeriod uint32 - count uint32 - v float64 - t int64 -} - -func NewKaufmansEfficiencyRatioReducer(period int, holdPeriod int) *KaufmansEfficiencyRatioReducer { - ker := gota.NewKER(period) - if holdPeriod == -1 { - holdPeriod = ker.WarmCount() - } - return &KaufmansEfficiencyRatioReducer{ - ker: *ker, - holdPeriod: uint32(holdPeriod), - } -} -func (r *KaufmansEfficiencyRatioReducer) AggregateFloat(p *FloatPoint) { - r.aggregate(p.Value, p.Time) -} -func (r *KaufmansEfficiencyRatioReducer) AggregateInteger(p *IntegerPoint) { - r.aggregate(float64(p.Value), p.Time) -} -func (r *KaufmansEfficiencyRatioReducer) AggregateUnsigned(p *UnsignedPoint) { - r.aggregate(float64(p.Value), p.Time) -} -func (r *KaufmansEfficiencyRatioReducer) aggregate(v float64, t int64) { - r.v = r.ker.Add(v) - r.t = t - r.count++ -} -func (r *KaufmansEfficiencyRatioReducer) Emit() []FloatPoint { - if r.count <= r.holdPeriod { - return nil - } - if math.IsInf(r.v, 0) { - return nil - } - - return []FloatPoint{ - { - Value: r.v, - Time: r.t, - Aggregated: r.count, - }, - } -} - -type KaufmansAdaptiveMovingAverageReducer struct { - kama gota.KAMA - holdPeriod uint32 - count uint32 - v float64 - t int64 -} - -func NewKaufmansAdaptiveMovingAverageReducer(period int, holdPeriod int) *KaufmansAdaptiveMovingAverageReducer { - kama := gota.NewKAMA(period) - if holdPeriod == -1 { - holdPeriod = kama.WarmCount() - } - return &KaufmansAdaptiveMovingAverageReducer{ - kama: *kama, - holdPeriod: uint32(holdPeriod), - } -} -func (r *KaufmansAdaptiveMovingAverageReducer) AggregateFloat(p *FloatPoint) { - r.aggregate(p.Value, p.Time) -} -func (r *KaufmansAdaptiveMovingAverageReducer) AggregateInteger(p *IntegerPoint) { - r.aggregate(float64(p.Value), p.Time) -} -func (r *KaufmansAdaptiveMovingAverageReducer) AggregateUnsigned(p *UnsignedPoint) { - r.aggregate(float64(p.Value), p.Time) -} -func (r *KaufmansAdaptiveMovingAverageReducer) aggregate(v float64, t int64) { - r.v = r.kama.Add(v) - r.t = t - r.count++ -} -func (r *KaufmansAdaptiveMovingAverageReducer) Emit() []FloatPoint { - if r.count <= r.holdPeriod { - return nil - } - if math.IsInf(r.v, 0) { - return nil - } - - return []FloatPoint{ - { - Value: r.v, - Time: r.t, - Aggregated: r.count, - }, - } -} - -type ChandeMomentumOscillatorReducer struct { - cmo gota.AlgSimple - holdPeriod uint32 - count uint32 - v float64 - t int64 -} - -func NewChandeMomentumOscillatorReducer(period int, holdPeriod int, warmupType gota.WarmupType) *ChandeMomentumOscillatorReducer { - var cmo gota.AlgSimple - if warmupType == gota.WarmupType(-1) { - cmo = gota.NewCMO(period) - } else { - cmo = gota.NewCMOS(period, warmupType) - } - - if holdPeriod == -1 { - holdPeriod = cmo.WarmCount() - } - return &ChandeMomentumOscillatorReducer{ - cmo: cmo, - holdPeriod: uint32(holdPeriod), - } -} -func (r *ChandeMomentumOscillatorReducer) AggregateFloat(p *FloatPoint) { - r.aggregate(p.Value, p.Time) -} -func (r *ChandeMomentumOscillatorReducer) AggregateInteger(p *IntegerPoint) { - r.aggregate(float64(p.Value), p.Time) -} -func (r *ChandeMomentumOscillatorReducer) AggregateUnsigned(p *UnsignedPoint) { - r.aggregate(float64(p.Value), p.Time) -} -func (r *ChandeMomentumOscillatorReducer) aggregate(v float64, t int64) { - r.v = r.cmo.Add(v) - r.t = t - r.count++ -} -func (r *ChandeMomentumOscillatorReducer) Emit() []FloatPoint { - if r.count <= r.holdPeriod { - return nil - } - - return []FloatPoint{ - { - Value: r.v, - Time: r.t, - Aggregated: r.count, - }, - } -} - -// FloatCumulativeSumReducer cumulates the values from each point. -type FloatCumulativeSumReducer struct { - curr FloatPoint -} - -// NewFloatCumulativeSumReducer creates a new FloatCumulativeSumReducer. -func NewFloatCumulativeSumReducer() *FloatCumulativeSumReducer { - return &FloatCumulativeSumReducer{ - curr: FloatPoint{Nil: true}, - } -} - -func (r *FloatCumulativeSumReducer) AggregateFloat(p *FloatPoint) { - r.curr.Value += p.Value - r.curr.Time = p.Time - r.curr.Nil = false -} - -func (r *FloatCumulativeSumReducer) Emit() []FloatPoint { - var pts []FloatPoint - if !r.curr.Nil { - pts = []FloatPoint{r.curr} - } - return pts -} - -// IntegerCumulativeSumReducer cumulates the values from each point. -type IntegerCumulativeSumReducer struct { - curr IntegerPoint -} - -// NewIntegerCumulativeSumReducer creates a new IntegerCumulativeSumReducer. -func NewIntegerCumulativeSumReducer() *IntegerCumulativeSumReducer { - return &IntegerCumulativeSumReducer{ - curr: IntegerPoint{Nil: true}, - } -} - -func (r *IntegerCumulativeSumReducer) AggregateInteger(p *IntegerPoint) { - r.curr.Value += p.Value - r.curr.Time = p.Time - r.curr.Nil = false -} - -func (r *IntegerCumulativeSumReducer) Emit() []IntegerPoint { - var pts []IntegerPoint - if !r.curr.Nil { - pts = []IntegerPoint{r.curr} - } - return pts -} - -// UnsignedCumulativeSumReducer cumulates the values from each point. -type UnsignedCumulativeSumReducer struct { - curr UnsignedPoint -} - -// NewUnsignedCumulativeSumReducer creates a new UnsignedCumulativeSumReducer. -func NewUnsignedCumulativeSumReducer() *UnsignedCumulativeSumReducer { - return &UnsignedCumulativeSumReducer{ - curr: UnsignedPoint{Nil: true}, - } -} - -func (r *UnsignedCumulativeSumReducer) AggregateUnsigned(p *UnsignedPoint) { - r.curr.Value += p.Value - r.curr.Time = p.Time - r.curr.Nil = false -} - -func (r *UnsignedCumulativeSumReducer) Emit() []UnsignedPoint { - var pts []UnsignedPoint - if !r.curr.Nil { - pts = []UnsignedPoint{r.curr} - } - return pts -} - -// FloatHoltWintersReducer forecasts a series into the future. -// This is done using the Holt-Winters damped method. -// 1. Using the series the initial values are calculated using a SSE. -// 2. The series is forecasted into the future using the iterative relations. -type FloatHoltWintersReducer struct { - // Season period - m int - seasonal bool - - // Horizon - h int - - // Interval between points - interval int64 - // interval / 2 -- used to perform rounding - halfInterval int64 - - // Whether to include all data or only future values - includeFitData bool - - // NelderMead optimizer - optim *neldermead.Optimizer - // Small difference bound for the optimizer - epsilon float64 - - y []float64 - points []FloatPoint -} - -const ( - // Arbitrary weight for initializing some initial guesses. - // This should be in the range [0,1] - hwWeight = 0.5 - // Epsilon value for the minimization process - hwDefaultEpsilon = 1.0e-4 - // Define a grid of initial guesses for the parameters: alpha, beta, gamma, and phi. - // Keep in mind that this grid is N^4 so we should keep N small - // The starting lower guess - hwGuessLower = 0.3 - // The upper bound on the grid - hwGuessUpper = 1.0 - // The step between guesses - hwGuessStep = 0.4 -) - -// NewFloatHoltWintersReducer creates a new FloatHoltWintersReducer. -func NewFloatHoltWintersReducer(h, m int, includeFitData bool, interval time.Duration) *FloatHoltWintersReducer { - seasonal := true - if m < 2 { - seasonal = false - } - return &FloatHoltWintersReducer{ - h: h, - m: m, - seasonal: seasonal, - includeFitData: includeFitData, - interval: int64(interval), - halfInterval: int64(interval) / 2, - optim: neldermead.New(), - epsilon: hwDefaultEpsilon, - } -} - -func (r *FloatHoltWintersReducer) aggregate(time int64, value float64) { - r.points = append(r.points, FloatPoint{ - Time: time, - Value: value, - }) -} - -// AggregateFloat aggregates a point into the reducer and updates the current window. -func (r *FloatHoltWintersReducer) AggregateFloat(p *FloatPoint) { - r.aggregate(p.Time, p.Value) -} - -// AggregateInteger aggregates a point into the reducer and updates the current window. -func (r *FloatHoltWintersReducer) AggregateInteger(p *IntegerPoint) { - r.aggregate(p.Time, float64(p.Value)) -} - -func (r *FloatHoltWintersReducer) roundTime(t int64) int64 { - // Overflow safe round function - remainder := t % r.interval - if remainder > r.halfInterval { - // Round up - return (t/r.interval + 1) * r.interval - } - // Round down - return (t / r.interval) * r.interval -} - -// Emit returns the points generated by the HoltWinters algorithm. -func (r *FloatHoltWintersReducer) Emit() []FloatPoint { - if l := len(r.points); l < 2 || r.seasonal && l < r.m || r.h <= 0 { - return nil - } - // First fill in r.y with values and NaNs for missing values - start, stop := r.roundTime(r.points[0].Time), r.roundTime(r.points[len(r.points)-1].Time) - count := (stop - start) / r.interval - if count <= 0 { - return nil - } - r.y = make([]float64, 1, count) - r.y[0] = r.points[0].Value - t := r.roundTime(r.points[0].Time) - for _, p := range r.points[1:] { - rounded := r.roundTime(p.Time) - if rounded <= t { - // Drop values that occur for the same time bucket - continue - } - t += r.interval - // Add any missing values before the next point - for rounded != t { - // Add in a NaN so we can skip it later. - r.y = append(r.y, math.NaN()) - t += r.interval - } - r.y = append(r.y, p.Value) - } - - // Seasonality - m := r.m - - // Starting guesses - // NOTE: Since these values are guesses - // in the cases where we were missing data, - // we can just skip the value and call it good. - - l0 := 0.0 - if r.seasonal { - for i := 0; i < m; i++ { - if !math.IsNaN(r.y[i]) { - l0 += (1 / float64(m)) * r.y[i] - } - } - } else { - l0 += hwWeight * r.y[0] - } - - b0 := 0.0 - if r.seasonal { - for i := 0; i < m && m+i < len(r.y); i++ { - if !math.IsNaN(r.y[i]) && !math.IsNaN(r.y[m+i]) { - b0 += 1 / float64(m*m) * (r.y[m+i] - r.y[i]) - } - } - } else { - if !math.IsNaN(r.y[1]) { - b0 = hwWeight * (r.y[1] - r.y[0]) - } - } - - var s []float64 - if r.seasonal { - s = make([]float64, m) - for i := 0; i < m; i++ { - if !math.IsNaN(r.y[i]) { - s[i] = r.y[i] / l0 - } else { - s[i] = 0 - } - } - } - - parameters := make([]float64, 6+len(s)) - parameters[4] = l0 - parameters[5] = b0 - o := len(parameters) - len(s) - for i := range s { - parameters[i+o] = s[i] - } - - // Determine best fit for the various parameters - minSSE := math.Inf(1) - var bestParams []float64 - for alpha := hwGuessLower; alpha < hwGuessUpper; alpha += hwGuessStep { - for beta := hwGuessLower; beta < hwGuessUpper; beta += hwGuessStep { - for gamma := hwGuessLower; gamma < hwGuessUpper; gamma += hwGuessStep { - for phi := hwGuessLower; phi < hwGuessUpper; phi += hwGuessStep { - parameters[0] = alpha - parameters[1] = beta - parameters[2] = gamma - parameters[3] = phi - sse, params := r.optim.Optimize(r.sse, parameters, r.epsilon, 1) - if sse < minSSE || bestParams == nil { - minSSE = sse - bestParams = params - } - } - } - } - } - - // Forecast - forecasted := r.forecast(r.h, bestParams) - var points []FloatPoint - if r.includeFitData { - start := r.points[0].Time - points = make([]FloatPoint, 0, len(forecasted)) - for i, v := range forecasted { - if !math.IsNaN(v) { - t := start + r.interval*(int64(i)) - points = append(points, FloatPoint{ - Value: v, - Time: t, - }) - } - } - } else { - stop := r.points[len(r.points)-1].Time - points = make([]FloatPoint, 0, r.h) - for i, v := range forecasted[len(r.y):] { - if !math.IsNaN(v) { - t := stop + r.interval*(int64(i)+1) - points = append(points, FloatPoint{ - Value: v, - Time: t, - }) - } - } - } - // Clear data set - r.y = r.y[0:0] - return points -} - -// Using the recursive relations compute the next values -func (r *FloatHoltWintersReducer) next(alpha, beta, gamma, phi, phiH, yT, lTp, bTp, sTm, sTmh float64) (yTh, lT, bT, sT float64) { - lT = alpha*(yT/sTm) + (1-alpha)*(lTp+phi*bTp) - bT = beta*(lT-lTp) + (1-beta)*phi*bTp - sT = gamma*(yT/(lTp+phi*bTp)) + (1-gamma)*sTm - yTh = (lT + phiH*bT) * sTmh - return -} - -// Forecast the data h points into the future. -func (r *FloatHoltWintersReducer) forecast(h int, params []float64) []float64 { - // Constrain parameters - r.constrain(params) - - yT := r.y[0] - - phi := params[3] - phiH := phi - - lT := params[4] - bT := params[5] - - // seasonals is a ring buffer of past sT values - var seasonals []float64 - var m, so int - if r.seasonal { - seasonals = params[6:] - m = len(params[6:]) - if m == 1 { - seasonals[0] = 1 - } - // Season index offset - so = m - 1 - } - - forecasted := make([]float64, len(r.y)+h) - forecasted[0] = yT - l := len(r.y) - var hm int - stm, stmh := 1.0, 1.0 - for t := 1; t < l+h; t++ { - if r.seasonal { - hm = t % m - stm = seasonals[(t-m+so)%m] - stmh = seasonals[(t-m+hm+so)%m] - } - var sT float64 - yT, lT, bT, sT = r.next( - params[0], // alpha - params[1], // beta - params[2], // gamma - phi, - phiH, - yT, - lT, - bT, - stm, - stmh, - ) - phiH += math.Pow(phi, float64(t)) - - if r.seasonal { - seasonals[(t+so)%m] = sT - so++ - } - - forecasted[t] = yT - } - return forecasted -} - -// Compute sum squared error for the given parameters. -func (r *FloatHoltWintersReducer) sse(params []float64) float64 { - sse := 0.0 - forecasted := r.forecast(0, params) - for i := range forecasted { - // Skip missing values since we cannot use them to compute an error. - if !math.IsNaN(r.y[i]) { - // Compute error - if math.IsNaN(forecasted[i]) { - // Penalize forecasted NaNs - return math.Inf(1) - } - diff := forecasted[i] - r.y[i] - sse += diff * diff - } - } - return sse -} - -// Constrain alpha, beta, gamma, phi in the range [0, 1] -func (r *FloatHoltWintersReducer) constrain(x []float64) { - // alpha - if x[0] > 1 { - x[0] = 1 - } - if x[0] < 0 { - x[0] = 0 - } - // beta - if x[1] > 1 { - x[1] = 1 - } - if x[1] < 0 { - x[1] = 0 - } - // gamma - if x[2] > 1 { - x[2] = 1 - } - if x[2] < 0 { - x[2] = 0 - } - // phi - if x[3] > 1 { - x[3] = 1 - } - if x[3] < 0 { - x[3] = 0 - } -} - -// FloatIntegralReducer calculates the time-integral of the aggregated points. -type FloatIntegralReducer struct { - interval Interval - sum float64 - prev FloatPoint - window struct { - start int64 - end int64 - } - ch chan FloatPoint - opt IteratorOptions -} - -// NewFloatIntegralReducer creates a new FloatIntegralReducer. -func NewFloatIntegralReducer(interval Interval, opt IteratorOptions) *FloatIntegralReducer { - return &FloatIntegralReducer{ - interval: interval, - prev: FloatPoint{Nil: true}, - ch: make(chan FloatPoint, 1), - opt: opt, - } -} - -// AggregateFloat aggregates a point into the reducer. -func (r *FloatIntegralReducer) AggregateFloat(p *FloatPoint) { - // If this is the first point, just save it - if r.prev.Nil { - r.prev = *p - if !r.opt.Interval.IsZero() { - // Record the end of the time interval. - // We do not care for whether the last number is inclusive or exclusive - // because we treat both the same for the involved math. - if r.opt.Ascending { - r.window.start, r.window.end = r.opt.Window(p.Time) - } else { - r.window.end, r.window.start = r.opt.Window(p.Time) - } - } - return - } - - // If this point has the same timestamp as the previous one, - // skip the point. Points sent into this reducer are expected - // to be fed in order. - if r.prev.Time == p.Time { - r.prev = *p - return - } else if !r.opt.Interval.IsZero() && ((r.opt.Ascending && p.Time >= r.window.end) || (!r.opt.Ascending && p.Time <= r.window.end)) { - // If our previous time is not equal to the window, we need to - // interpolate the area at the end of this interval. - if r.prev.Time != r.window.end { - value := linearFloat(r.window.end, r.prev.Time, p.Time, r.prev.Value, p.Value) - elapsed := float64(r.window.end-r.prev.Time) / float64(r.interval.Duration) - r.sum += 0.5 * (value + r.prev.Value) * elapsed - - r.prev.Value = value - r.prev.Time = r.window.end - } - - // Emit the current point through the channel and then clear it. - r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} - if r.opt.Ascending { - r.window.start, r.window.end = r.opt.Window(p.Time) - } else { - r.window.end, r.window.start = r.opt.Window(p.Time) - } - r.sum = 0.0 - } - - // Normal operation: update the sum using the trapezium rule - elapsed := float64(p.Time-r.prev.Time) / float64(r.interval.Duration) - r.sum += 0.5 * (p.Value + r.prev.Value) * elapsed - r.prev = *p -} - -// Emit emits the time-integral of the aggregated points as a single point. -// InfluxQL convention dictates that outside a group-by-time clause we return -// a timestamp of zero. Within a group-by-time, we can set the time to ZeroTime -// and a higher level will change it to the start of the time group. -func (r *FloatIntegralReducer) Emit() []FloatPoint { - select { - case pt, ok := <-r.ch: - if !ok { - return nil - } - return []FloatPoint{pt} - default: - return nil - } -} - -// Close flushes any in progress points to ensure any remaining points are -// emitted. -func (r *FloatIntegralReducer) Close() error { - // If our last point is at the start time, then discard this point since - // there is no area within this bucket. Otherwise, send off what we - // currently have as the final point. - if !r.prev.Nil && r.prev.Time != r.window.start { - r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} - } - close(r.ch) - return nil -} - -// IntegerIntegralReducer calculates the time-integral of the aggregated points. -type IntegerIntegralReducer struct { - interval Interval - sum float64 - prev IntegerPoint - window struct { - start int64 - end int64 - } - ch chan FloatPoint - opt IteratorOptions -} - -// NewIntegerIntegralReducer creates a new IntegerIntegralReducer. -func NewIntegerIntegralReducer(interval Interval, opt IteratorOptions) *IntegerIntegralReducer { - return &IntegerIntegralReducer{ - interval: interval, - prev: IntegerPoint{Nil: true}, - ch: make(chan FloatPoint, 1), - opt: opt, - } -} - -// AggregateInteger aggregates a point into the reducer. -func (r *IntegerIntegralReducer) AggregateInteger(p *IntegerPoint) { - // If this is the first point, just save it - if r.prev.Nil { - r.prev = *p - - // Record the end of the time interval. - // We do not care for whether the last number is inclusive or exclusive - // because we treat both the same for the involved math. - if r.opt.Ascending { - r.window.start, r.window.end = r.opt.Window(p.Time) - } else { - r.window.end, r.window.start = r.opt.Window(p.Time) - } - - // If we see the minimum allowable time, set the time to zero so we don't - // break the default returned time for aggregate queries without times. - if r.window.start == influxql.MinTime { - r.window.start = 0 - } - return - } - - // If this point has the same timestamp as the previous one, - // skip the point. Points sent into this reducer are expected - // to be fed in order. - value := float64(p.Value) - if r.prev.Time == p.Time { - r.prev = *p - return - } else if (r.opt.Ascending && p.Time >= r.window.end) || (!r.opt.Ascending && p.Time <= r.window.end) { - // If our previous time is not equal to the window, we need to - // interpolate the area at the end of this interval. - if r.prev.Time != r.window.end { - value = linearFloat(r.window.end, r.prev.Time, p.Time, float64(r.prev.Value), value) - elapsed := float64(r.window.end-r.prev.Time) / float64(r.interval.Duration) - r.sum += 0.5 * (value + float64(r.prev.Value)) * elapsed - - r.prev.Time = r.window.end - } - - // Emit the current point through the channel and then clear it. - r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} - if r.opt.Ascending { - r.window.start, r.window.end = r.opt.Window(p.Time) - } else { - r.window.end, r.window.start = r.opt.Window(p.Time) - } - r.sum = 0.0 - } - - // Normal operation: update the sum using the trapezium rule - elapsed := float64(p.Time-r.prev.Time) / float64(r.interval.Duration) - r.sum += 0.5 * (value + float64(r.prev.Value)) * elapsed - r.prev = *p -} - -// Emit emits the time-integral of the aggregated points as a single FLOAT point -// InfluxQL convention dictates that outside a group-by-time clause we return -// a timestamp of zero. Within a group-by-time, we can set the time to ZeroTime -// and a higher level will change it to the start of the time group. -func (r *IntegerIntegralReducer) Emit() []FloatPoint { - select { - case pt, ok := <-r.ch: - if !ok { - return nil - } - return []FloatPoint{pt} - default: - return nil - } -} - -// Close flushes any in progress points to ensure any remaining points are -// emitted. -func (r *IntegerIntegralReducer) Close() error { - // If our last point is at the start time, then discard this point since - // there is no area within this bucket. Otherwise, send off what we - // currently have as the final point. - if !r.prev.Nil && r.prev.Time != r.window.start { - r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} - } - close(r.ch) - return nil -} - -// IntegerIntegralReducer calculates the time-integral of the aggregated points. -type UnsignedIntegralReducer struct { - interval Interval - sum float64 - prev UnsignedPoint - window struct { - start int64 - end int64 - } - ch chan FloatPoint - opt IteratorOptions -} - -// NewUnsignedIntegralReducer creates a new UnsignedIntegralReducer. -func NewUnsignedIntegralReducer(interval Interval, opt IteratorOptions) *UnsignedIntegralReducer { - return &UnsignedIntegralReducer{ - interval: interval, - prev: UnsignedPoint{Nil: true}, - ch: make(chan FloatPoint, 1), - opt: opt, - } -} - -// AggregateUnsigned aggregates a point into the reducer. -func (r *UnsignedIntegralReducer) AggregateUnsigned(p *UnsignedPoint) { - // If this is the first point, just save it - if r.prev.Nil { - r.prev = *p - - // Record the end of the time interval. - // We do not care for whether the last number is inclusive or exclusive - // because we treat both the same for the involved math. - if r.opt.Ascending { - r.window.start, r.window.end = r.opt.Window(p.Time) - } else { - r.window.end, r.window.start = r.opt.Window(p.Time) - } - - // If we see the minimum allowable time, set the time to zero so we don't - // break the default returned time for aggregate queries without times. - if r.window.start == influxql.MinTime { - r.window.start = 0 - } - return - } - - // If this point has the same timestamp as the previous one, - // skip the point. Points sent into this reducer are expected - // to be fed in order. - value := float64(p.Value) - if r.prev.Time == p.Time { - r.prev = *p - return - } else if (r.opt.Ascending && p.Time >= r.window.end) || (!r.opt.Ascending && p.Time <= r.window.end) { - // If our previous time is not equal to the window, we need to - // interpolate the area at the end of this interval. - if r.prev.Time != r.window.end { - value = linearFloat(r.window.end, r.prev.Time, p.Time, float64(r.prev.Value), value) - elapsed := float64(r.window.end-r.prev.Time) / float64(r.interval.Duration) - r.sum += 0.5 * (value + float64(r.prev.Value)) * elapsed - - r.prev.Time = r.window.end - } - - // Emit the current point through the channel and then clear it. - r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} - if r.opt.Ascending { - r.window.start, r.window.end = r.opt.Window(p.Time) - } else { - r.window.end, r.window.start = r.opt.Window(p.Time) - } - r.sum = 0.0 - } - - // Normal operation: update the sum using the trapezium rule - elapsed := float64(p.Time-r.prev.Time) / float64(r.interval.Duration) - r.sum += 0.5 * (value + float64(r.prev.Value)) * elapsed - r.prev = *p -} - -// Emit emits the time-integral of the aggregated points as a single FLOAT point -// InfluxQL convention dictates that outside a group-by-time clause we return -// a timestamp of zero. Within a group-by-time, we can set the time to ZeroTime -// and a higher level will change it to the start of the time group. -func (r *UnsignedIntegralReducer) Emit() []FloatPoint { - select { - case pt, ok := <-r.ch: - if !ok { - return nil - } - return []FloatPoint{pt} - default: - return nil - } -} - -// Close flushes any in progress points to ensure any remaining points are -// emitted. -func (r *UnsignedIntegralReducer) Close() error { - // If our last point is at the start time, then discard this point since - // there is no area within this bucket. Otherwise, send off what we - // currently have as the final point. - if !r.prev.Nil && r.prev.Time != r.window.start { - r.ch <- FloatPoint{Time: r.window.start, Value: r.sum} - } - close(r.ch) - return nil -} - -type FloatTopReducer struct { - h *floatPointsByFunc -} - -func NewFloatTopReducer(n int) *FloatTopReducer { - return &FloatTopReducer{ - h: floatPointsSortBy(make([]FloatPoint, 0, n), func(a, b *FloatPoint) bool { - if a.Value != b.Value { - return a.Value < b.Value - } - return a.Time > b.Time - }), - } -} - -func (r *FloatTopReducer) AggregateFloat(p *FloatPoint) { - if r.h.Len() == cap(r.h.points) { - // Compare the minimum point and the aggregated point. If our value is - // larger, replace the current min value. - if !r.h.cmp(&r.h.points[0], p) { - return - } - p.CopyTo(&r.h.points[0]) - heap.Fix(r.h, 0) - return - } - - var clone FloatPoint - p.CopyTo(&clone) - heap.Push(r.h, clone) -} - -func (r *FloatTopReducer) Emit() []FloatPoint { - // Ensure the points are sorted with the maximum value last. While the - // first point may be the minimum value, the rest is not guaranteed to be - // in any particular order while it is a heap. - points := make([]FloatPoint, len(r.h.points)) - for i, p := range r.h.points { - p.Aggregated = 0 - points[i] = p - } - h := floatPointsByFunc{points: points, cmp: r.h.cmp} - sort.Sort(sort.Reverse(&h)) - return points -} - -type IntegerTopReducer struct { - h *integerPointsByFunc -} - -func NewIntegerTopReducer(n int) *IntegerTopReducer { - return &IntegerTopReducer{ - h: integerPointsSortBy(make([]IntegerPoint, 0, n), func(a, b *IntegerPoint) bool { - if a.Value != b.Value { - return a.Value < b.Value - } - return a.Time > b.Time - }), - } -} - -func (r *IntegerTopReducer) AggregateInteger(p *IntegerPoint) { - if r.h.Len() == cap(r.h.points) { - // Compare the minimum point and the aggregated point. If our value is - // larger, replace the current min value. - if !r.h.cmp(&r.h.points[0], p) { - return - } - p.CopyTo(&r.h.points[0]) - heap.Fix(r.h, 0) - return - } - - var clone IntegerPoint - p.CopyTo(&clone) - heap.Push(r.h, clone) -} - -func (r *IntegerTopReducer) Emit() []IntegerPoint { - // Ensure the points are sorted with the maximum value last. While the - // first point may be the minimum value, the rest is not guaranteed to be - // in any particular order while it is a heap. - points := make([]IntegerPoint, len(r.h.points)) - for i, p := range r.h.points { - p.Aggregated = 0 - points[i] = p - } - h := integerPointsByFunc{points: points, cmp: r.h.cmp} - sort.Sort(sort.Reverse(&h)) - return points -} - -type UnsignedTopReducer struct { - h *unsignedPointsByFunc -} - -func NewUnsignedTopReducer(n int) *UnsignedTopReducer { - return &UnsignedTopReducer{ - h: unsignedPointsSortBy(make([]UnsignedPoint, 0, n), func(a, b *UnsignedPoint) bool { - if a.Value != b.Value { - return a.Value < b.Value - } - return a.Time > b.Time - }), - } -} - -func (r *UnsignedTopReducer) AggregateUnsigned(p *UnsignedPoint) { - if r.h.Len() == cap(r.h.points) { - // Compare the minimum point and the aggregated point. If our value is - // larger, replace the current min value. - if !r.h.cmp(&r.h.points[0], p) { - return - } - p.CopyTo(&r.h.points[0]) - heap.Fix(r.h, 0) - return - } - - var clone UnsignedPoint - p.CopyTo(&clone) - heap.Push(r.h, clone) -} - -func (r *UnsignedTopReducer) Emit() []UnsignedPoint { - // Ensure the points are sorted with the maximum value last. While the - // first point may be the minimum value, the rest is not guaranteed to be - // in any particular order while it is a heap. - points := make([]UnsignedPoint, len(r.h.points)) - for i, p := range r.h.points { - p.Aggregated = 0 - points[i] = p - } - h := unsignedPointsByFunc{points: points, cmp: r.h.cmp} - sort.Sort(sort.Reverse(&h)) - return points -} - -type FloatBottomReducer struct { - h *floatPointsByFunc -} - -func NewFloatBottomReducer(n int) *FloatBottomReducer { - return &FloatBottomReducer{ - h: floatPointsSortBy(make([]FloatPoint, 0, n), func(a, b *FloatPoint) bool { - if a.Value != b.Value { - return a.Value > b.Value - } - return a.Time > b.Time - }), - } -} - -func (r *FloatBottomReducer) AggregateFloat(p *FloatPoint) { - if r.h.Len() == cap(r.h.points) { - // Compare the minimum point and the aggregated point. If our value is - // larger, replace the current min value. - if !r.h.cmp(&r.h.points[0], p) { - return - } - p.CopyTo(&r.h.points[0]) - heap.Fix(r.h, 0) - return - } - - var clone FloatPoint - p.CopyTo(&clone) - heap.Push(r.h, clone) -} - -func (r *FloatBottomReducer) Emit() []FloatPoint { - // Ensure the points are sorted with the maximum value last. While the - // first point may be the minimum value, the rest is not guaranteed to be - // in any particular order while it is a heap. - points := make([]FloatPoint, len(r.h.points)) - for i, p := range r.h.points { - p.Aggregated = 0 - points[i] = p - } - h := floatPointsByFunc{points: points, cmp: r.h.cmp} - sort.Sort(sort.Reverse(&h)) - return points -} - -type IntegerBottomReducer struct { - h *integerPointsByFunc -} - -func NewIntegerBottomReducer(n int) *IntegerBottomReducer { - return &IntegerBottomReducer{ - h: integerPointsSortBy(make([]IntegerPoint, 0, n), func(a, b *IntegerPoint) bool { - if a.Value != b.Value { - return a.Value > b.Value - } - return a.Time > b.Time - }), - } -} - -func (r *IntegerBottomReducer) AggregateInteger(p *IntegerPoint) { - if r.h.Len() == cap(r.h.points) { - // Compare the minimum point and the aggregated point. If our value is - // larger, replace the current min value. - if !r.h.cmp(&r.h.points[0], p) { - return - } - p.CopyTo(&r.h.points[0]) - heap.Fix(r.h, 0) - return - } - - var clone IntegerPoint - p.CopyTo(&clone) - heap.Push(r.h, clone) -} - -func (r *IntegerBottomReducer) Emit() []IntegerPoint { - // Ensure the points are sorted with the maximum value last. While the - // first point may be the minimum value, the rest is not guaranteed to be - // in any particular order while it is a heap. - points := make([]IntegerPoint, len(r.h.points)) - for i, p := range r.h.points { - p.Aggregated = 0 - points[i] = p - } - h := integerPointsByFunc{points: points, cmp: r.h.cmp} - sort.Sort(sort.Reverse(&h)) - return points -} - -type UnsignedBottomReducer struct { - h *unsignedPointsByFunc -} - -func NewUnsignedBottomReducer(n int) *UnsignedBottomReducer { - return &UnsignedBottomReducer{ - h: unsignedPointsSortBy(make([]UnsignedPoint, 0, n), func(a, b *UnsignedPoint) bool { - if a.Value != b.Value { - return a.Value > b.Value - } - return a.Time > b.Time - }), - } -} - -func (r *UnsignedBottomReducer) AggregateUnsigned(p *UnsignedPoint) { - if r.h.Len() == cap(r.h.points) { - // Compare the minimum point and the aggregated point. If our value is - // larger, replace the current min value. - if !r.h.cmp(&r.h.points[0], p) { - return - } - p.CopyTo(&r.h.points[0]) - heap.Fix(r.h, 0) - return - } - - var clone UnsignedPoint - p.CopyTo(&clone) - heap.Push(r.h, clone) -} - -func (r *UnsignedBottomReducer) Emit() []UnsignedPoint { - // Ensure the points are sorted with the maximum value last. While the - // first point may be the minimum value, the rest is not guaranteed to be - // in any particular order while it is a heap. - points := make([]UnsignedPoint, len(r.h.points)) - for i, p := range r.h.points { - p.Aggregated = 0 - points[i] = p - } - h := unsignedPointsByFunc{points: points, cmp: r.h.cmp} - sort.Sort(sort.Reverse(&h)) - return points -} - -type StringMergeHllReducer struct { - plus *hll.Plus - err error -} - -func NewStringMergeHllReducer() *StringMergeHllReducer { - return &StringMergeHllReducer{plus: nil} -} - -func unmarshalPlus(s string) (*hll.Plus, error) { - if string(hllPrefix) != s[:len(hllPrefix)] { - if string(hllErrorPrefix) == s[:len(hllErrorPrefix)] { - // parse a special error out of the string. - return nil, fmt.Errorf("%v", s[len(hllErrorPrefix):]) - } - return nil, fmt.Errorf("bad prefix for hll.Plus") - } - data := []byte(s[len(hllPrefix):]) - if len(data) == 0 { - // explicitly treat as empty no-op - return nil, nil - } - b := make([]byte, base64.StdEncoding.DecodedLen(len(data))) - _, _ = base64.StdEncoding.Decode(b, data) - h := new(hll.Plus) - if err := h.UnmarshalBinary(b); err != nil { - return nil, err - } - return h, nil -} - -func (r *StringMergeHllReducer) AggregateString(p *StringPoint) { - // we cannot return an error because returning an error slows all aggregation - // functions by ~1%. So we hack around it by marshalling the error as a string. - if r.err != nil { - return - } - h, err := unmarshalPlus(p.Value) - if err != nil { - r.err = err - return - } - if r.plus == nil { - r.plus = h - return - } - err = r.plus.Merge(h) - if err != nil { - r.err = err - return - } -} - -func marshalPlus(p *hll.Plus, err error) StringPoint { - if err != nil { - return StringPoint{ - Time: ZeroTime, - Value: string(hllErrorPrefix) + err.Error(), - } - } - if p == nil { - return StringPoint{ - Time: ZeroTime, - Value: string(hllPrefix), - } - } - b, err := p.MarshalBinary() - if err != nil { - return StringPoint{ - Time: ZeroTime, - Value: string(hllErrorPrefix) + err.Error(), - } - } - hllValue := make([]byte, len(hllPrefix)+base64.StdEncoding.EncodedLen(len(b))) - copy(hllValue, hllPrefix) - base64.StdEncoding.Encode(hllValue[len(hllPrefix):], b) - return StringPoint{ - Time: ZeroTime, - Value: string(hllValue), - } -} - -func (r *StringMergeHllReducer) Emit() []StringPoint { - return []StringPoint{ - marshalPlus(r.plus, r.err), - } -} - -type CountHllReducer struct { - next UnsignedPoint -} - -func NewCountHllReducer() *CountHllReducer { - return &CountHllReducer{} -} - -func (r *CountHllReducer) AggregateString(p *StringPoint) { - r.next.Name = p.Name - r.next.Time = p.Time - h, err := unmarshalPlus(p.Value) - if err != nil { - r.next.Value = 0 - return - } - r.next.Value = h.Count() -} - -func (r *CountHllReducer) Emit() []UnsignedPoint { - return []UnsignedPoint{ - r.next, - } -} diff --git a/influxql/query/functions_test.go b/influxql/query/functions_test.go deleted file mode 100644 index f31bbe7bbf4..00000000000 --- a/influxql/query/functions_test.go +++ /dev/null @@ -1,606 +0,0 @@ -package query_test - -import ( - "crypto/sha1" - "fmt" - "math" - "math/rand" - "runtime" - "strconv" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/pkg/deep" - "github.com/influxdata/influxql" - tassert "github.com/stretchr/testify/assert" - trequire "github.com/stretchr/testify/require" -) - -func almostEqual(got, exp float64) bool { - return math.Abs(got-exp) < 1e-5 && !math.IsNaN(got) -} - -func TestHoltWinters_AusTourists(t *testing.T) { - if runtime.GOARCH != "amd64" { - t.Skip("Expected HoltWinters outputs only valid when GOARCH = amd64") - } - - hw := query.NewFloatHoltWintersReducer(10, 4, false, 1) - // Dataset from http://www.inside-r.org/packages/cran/fpp/docs/austourists - austourists := []query.FloatPoint{ - {Time: 1, Value: 30.052513}, - {Time: 2, Value: 19.148496}, - {Time: 3, Value: 25.317692}, - {Time: 4, Value: 27.591437}, - {Time: 5, Value: 32.076456}, - {Time: 6, Value: 23.487961}, - {Time: 7, Value: 28.47594}, - {Time: 8, Value: 35.123753}, - {Time: 9, Value: 36.838485}, - {Time: 10, Value: 25.007017}, - {Time: 11, Value: 30.72223}, - {Time: 12, Value: 28.693759}, - {Time: 13, Value: 36.640986}, - {Time: 14, Value: 23.824609}, - {Time: 15, Value: 29.311683}, - {Time: 16, Value: 31.770309}, - {Time: 17, Value: 35.177877}, - {Time: 18, Value: 19.775244}, - {Time: 19, Value: 29.60175}, - {Time: 20, Value: 34.538842}, - {Time: 21, Value: 41.273599}, - {Time: 22, Value: 26.655862}, - {Time: 23, Value: 28.279859}, - {Time: 24, Value: 35.191153}, - {Time: 25, Value: 41.727458}, - {Time: 26, Value: 24.04185}, - {Time: 27, Value: 32.328103}, - {Time: 28, Value: 37.328708}, - {Time: 29, Value: 46.213153}, - {Time: 30, Value: 29.346326}, - {Time: 31, Value: 36.48291}, - {Time: 32, Value: 42.977719}, - {Time: 33, Value: 48.901525}, - {Time: 34, Value: 31.180221}, - {Time: 35, Value: 37.717881}, - {Time: 36, Value: 40.420211}, - {Time: 37, Value: 51.206863}, - {Time: 38, Value: 31.887228}, - {Time: 39, Value: 40.978263}, - {Time: 40, Value: 43.772491}, - {Time: 41, Value: 55.558567}, - {Time: 42, Value: 33.850915}, - {Time: 43, Value: 42.076383}, - {Time: 44, Value: 45.642292}, - {Time: 45, Value: 59.76678}, - {Time: 46, Value: 35.191877}, - {Time: 47, Value: 44.319737}, - {Time: 48, Value: 47.913736}, - } - - for _, p := range austourists { - hw.AggregateFloat(&p) - } - points := hw.Emit() - - forecasted := []query.FloatPoint{ - {Time: 49, Value: 51.85064132137853}, - {Time: 50, Value: 43.26055282315273}, - {Time: 51, Value: 41.827258044814464}, - {Time: 52, Value: 54.3990354591749}, - {Time: 53, Value: 54.62334472770803}, - {Time: 54, Value: 45.57155693625209}, - {Time: 55, Value: 44.06051240252263}, - {Time: 56, Value: 57.30029870759433}, - {Time: 57, Value: 57.53591513519172}, - {Time: 58, Value: 47.999008139396096}, - } - - if exp, got := len(forecasted), len(points); exp != got { - t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) - } - - for i := range forecasted { - if exp, got := forecasted[i].Time, points[i].Time; got != exp { - t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) - } - if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { - t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) - } - } -} - -func TestHoltWinters_AusTourists_Missing(t *testing.T) { - if runtime.GOARCH != "amd64" { - t.Skip("Expected HoltWinters outputs only valid when GOARCH = amd64") - } - - hw := query.NewFloatHoltWintersReducer(10, 4, false, 1) - // Dataset from http://www.inside-r.org/packages/cran/fpp/docs/austourists - austourists := []query.FloatPoint{ - {Time: 1, Value: 30.052513}, - {Time: 3, Value: 25.317692}, - {Time: 4, Value: 27.591437}, - {Time: 5, Value: 32.076456}, - {Time: 6, Value: 23.487961}, - {Time: 7, Value: 28.47594}, - {Time: 9, Value: 36.838485}, - {Time: 10, Value: 25.007017}, - {Time: 11, Value: 30.72223}, - {Time: 12, Value: 28.693759}, - {Time: 13, Value: 36.640986}, - {Time: 14, Value: 23.824609}, - {Time: 15, Value: 29.311683}, - {Time: 16, Value: 31.770309}, - {Time: 17, Value: 35.177877}, - {Time: 19, Value: 29.60175}, - {Time: 20, Value: 34.538842}, - {Time: 21, Value: 41.273599}, - {Time: 22, Value: 26.655862}, - {Time: 23, Value: 28.279859}, - {Time: 24, Value: 35.191153}, - {Time: 25, Value: 41.727458}, - {Time: 26, Value: 24.04185}, - {Time: 27, Value: 32.328103}, - {Time: 28, Value: 37.328708}, - {Time: 30, Value: 29.346326}, - {Time: 31, Value: 36.48291}, - {Time: 32, Value: 42.977719}, - {Time: 34, Value: 31.180221}, - {Time: 35, Value: 37.717881}, - {Time: 36, Value: 40.420211}, - {Time: 37, Value: 51.206863}, - {Time: 38, Value: 31.887228}, - {Time: 41, Value: 55.558567}, - {Time: 42, Value: 33.850915}, - {Time: 43, Value: 42.076383}, - {Time: 44, Value: 45.642292}, - {Time: 45, Value: 59.76678}, - {Time: 46, Value: 35.191877}, - {Time: 47, Value: 44.319737}, - {Time: 48, Value: 47.913736}, - } - - for _, p := range austourists { - hw.AggregateFloat(&p) - } - points := hw.Emit() - - forecasted := []query.FloatPoint{ - {Time: 49, Value: 54.84533610387743}, - {Time: 50, Value: 41.19329421863249}, - {Time: 51, Value: 45.71673175112451}, - {Time: 52, Value: 56.05759298805955}, - {Time: 53, Value: 59.32337460282217}, - {Time: 54, Value: 44.75280096850461}, - {Time: 55, Value: 49.98865098113751}, - {Time: 56, Value: 61.86084934967605}, - {Time: 57, Value: 65.95805633454883}, - {Time: 58, Value: 50.1502170480547}, - } - - if exp, got := len(forecasted), len(points); exp != got { - t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) - } - - for i := range forecasted { - if exp, got := forecasted[i].Time, points[i].Time; got != exp { - t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) - } - if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { - t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) - } - } -} - -func TestHoltWinters_USPopulation(t *testing.T) { - if runtime.GOARCH != "amd64" { - t.Skip("Expected HoltWinters outputs only valid when GOARCH = amd64") - } - - series := []query.FloatPoint{ - {Time: 1, Value: 3.93}, - {Time: 2, Value: 5.31}, - {Time: 3, Value: 7.24}, - {Time: 4, Value: 9.64}, - {Time: 5, Value: 12.90}, - {Time: 6, Value: 17.10}, - {Time: 7, Value: 23.20}, - {Time: 8, Value: 31.40}, - {Time: 9, Value: 39.80}, - {Time: 10, Value: 50.20}, - {Time: 11, Value: 62.90}, - {Time: 12, Value: 76.00}, - {Time: 13, Value: 92.00}, - {Time: 14, Value: 105.70}, - {Time: 15, Value: 122.80}, - {Time: 16, Value: 131.70}, - {Time: 17, Value: 151.30}, - {Time: 18, Value: 179.30}, - {Time: 19, Value: 203.20}, - } - hw := query.NewFloatHoltWintersReducer(10, 0, true, 1) - for _, p := range series { - hw.AggregateFloat(&p) - } - points := hw.Emit() - - forecasted := []query.FloatPoint{ - {Time: 1, Value: 3.93}, - {Time: 2, Value: 4.957405463559748}, - {Time: 3, Value: 7.012210102535647}, - {Time: 4, Value: 10.099589257439924}, - {Time: 5, Value: 14.229926188104242}, - {Time: 6, Value: 19.418878968703797}, - {Time: 7, Value: 25.68749172281409}, - {Time: 8, Value: 33.062351305731305}, - {Time: 9, Value: 41.575791076125206}, - {Time: 10, Value: 51.26614395589263}, - {Time: 11, Value: 62.178047564264595}, - {Time: 12, Value: 74.36280483872488}, - {Time: 13, Value: 87.87880423073163}, - {Time: 14, Value: 102.79200429905801}, - {Time: 15, Value: 119.17648832929542}, - {Time: 16, Value: 137.11509549747296}, - {Time: 17, Value: 156.70013608313175}, - {Time: 18, Value: 178.03419933863566}, - {Time: 19, Value: 201.23106385518594}, - {Time: 20, Value: 226.4167216525905}, - {Time: 21, Value: 253.73052878285205}, - {Time: 22, Value: 283.32649700397553}, - {Time: 23, Value: 315.37474308085984}, - {Time: 24, Value: 350.06311454009256}, - {Time: 25, Value: 387.59901328556873}, - {Time: 26, Value: 428.21144141893404}, - {Time: 27, Value: 472.1532969569147}, - {Time: 28, Value: 519.7039509590035}, - {Time: 29, Value: 571.1721419458248}, - } - - if exp, got := len(forecasted), len(points); exp != got { - t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) - } - for i := range forecasted { - if exp, got := forecasted[i].Time, points[i].Time; got != exp { - t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) - } - if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { - t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) - } - } -} - -func TestHoltWinters_USPopulation_Missing(t *testing.T) { - if runtime.GOARCH != "amd64" { - t.Skip("Expected HoltWinters outputs only valid when GOARCH = amd64") - } - - series := []query.FloatPoint{ - {Time: 1, Value: 3.93}, - {Time: 2, Value: 5.31}, - {Time: 3, Value: 7.24}, - {Time: 4, Value: 9.64}, - {Time: 5, Value: 12.90}, - {Time: 6, Value: 17.10}, - {Time: 7, Value: 23.20}, - {Time: 8, Value: 31.40}, - {Time: 10, Value: 50.20}, - {Time: 11, Value: 62.90}, - {Time: 12, Value: 76.00}, - {Time: 13, Value: 92.00}, - {Time: 15, Value: 122.80}, - {Time: 16, Value: 131.70}, - {Time: 17, Value: 151.30}, - {Time: 19, Value: 203.20}, - } - hw := query.NewFloatHoltWintersReducer(10, 0, true, 1) - for _, p := range series { - hw.AggregateFloat(&p) - } - points := hw.Emit() - - forecasted := []query.FloatPoint{ - {Time: 1, Value: 3.93}, - {Time: 2, Value: 4.8931364428135105}, - {Time: 3, Value: 6.962653629047061}, - {Time: 4, Value: 10.056207765903274}, - {Time: 5, Value: 14.18435088129532}, - {Time: 6, Value: 19.362939306110846}, - {Time: 7, Value: 25.613247940326584}, - {Time: 8, Value: 32.96213087008264}, - {Time: 9, Value: 41.442230043017204}, - {Time: 10, Value: 51.09223428526052}, - {Time: 11, Value: 61.95719155158485}, - {Time: 12, Value: 74.08887794968567}, - {Time: 13, Value: 87.54622778052787}, - {Time: 14, Value: 102.39582960014131}, - {Time: 15, Value: 118.7124941463221}, - {Time: 16, Value: 136.57990089987464}, - {Time: 17, Value: 156.09133107941278}, - {Time: 18, Value: 177.35049601833734}, - {Time: 19, Value: 200.472471161683}, - {Time: 20, Value: 225.58474737097785}, - {Time: 21, Value: 252.82841286206823}, - {Time: 22, Value: 282.35948095261017}, - {Time: 23, Value: 314.3503808953992}, - {Time: 24, Value: 348.99163145856954}, - {Time: 25, Value: 386.49371962730555}, - {Time: 26, Value: 427.08920989407727}, - {Time: 27, Value: 471.0351131332573}, - {Time: 28, Value: 518.615548088049}, - {Time: 29, Value: 570.1447331101863}, - } - - if exp, got := len(forecasted), len(points); exp != got { - t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) - } - for i := range forecasted { - if exp, got := forecasted[i].Time, points[i].Time; got != exp { - t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) - } - if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { - t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) - } - } -} -func TestHoltWinters_RoundTime(t *testing.T) { - if runtime.GOARCH != "amd64" { - t.Skip("Expected HoltWinters outputs only valid when GOARCH = amd64") - } - - maxTime := time.Unix(0, influxql.MaxTime).Round(time.Second).UnixNano() - data := []query.FloatPoint{ - {Time: maxTime - int64(5*time.Second), Value: 1}, - {Time: maxTime - int64(4*time.Second+103*time.Millisecond), Value: 10}, - {Time: maxTime - int64(3*time.Second+223*time.Millisecond), Value: 2}, - {Time: maxTime - int64(2*time.Second+481*time.Millisecond), Value: 11}, - } - hw := query.NewFloatHoltWintersReducer(2, 2, true, time.Second) - for _, p := range data { - hw.AggregateFloat(&p) - } - points := hw.Emit() - - forecasted := []query.FloatPoint{ - {Time: maxTime - int64(5*time.Second), Value: 1}, - {Time: maxTime - int64(4*time.Second), Value: 10.006729104838234}, - {Time: maxTime - int64(3*time.Second), Value: 1.998341814469269}, - {Time: maxTime - int64(2*time.Second), Value: 10.997858830631172}, - {Time: maxTime - int64(1*time.Second), Value: 4.085860238030013}, - {Time: maxTime - int64(0*time.Second), Value: 11.35713604403339}, - } - - if exp, got := len(forecasted), len(points); exp != got { - t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) - } - for i := range forecasted { - if exp, got := forecasted[i].Time, points[i].Time; got != exp { - t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) - } - if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { - t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) - } - } -} - -func TestHoltWinters_MaxTime(t *testing.T) { - if runtime.GOARCH != "amd64" { - t.Skip("Expected HoltWinters outputs only valid when GOARCH = amd64") - } - - data := []query.FloatPoint{ - {Time: influxql.MaxTime - 1, Value: 1}, - {Time: influxql.MaxTime, Value: 2}, - } - hw := query.NewFloatHoltWintersReducer(1, 0, true, 1) - for _, p := range data { - hw.AggregateFloat(&p) - } - points := hw.Emit() - - forecasted := []query.FloatPoint{ - {Time: influxql.MaxTime - 1, Value: 1}, - {Time: influxql.MaxTime, Value: 2.001516944066403}, - {Time: influxql.MaxTime + 1, Value: 2.5365248972488343}, - } - - if exp, got := len(forecasted), len(points); exp != got { - t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) - } - for i := range forecasted { - if exp, got := forecasted[i].Time, points[i].Time; got != exp { - t.Errorf("unexpected time on points[%d] got %v exp %v", i, got, exp) - } - if exp, got := forecasted[i].Value, points[i].Value; !almostEqual(got, exp) { - t.Errorf("unexpected value on points[%d] got %v exp %v", i, got, exp) - } - } -} - -// TestSample_AllSamplesSeen attempts to verify that it is possible -// to get every subsample in a reasonable number of iterations. -// -// The idea here is that 30 iterations should be enough to hit every possible -// sequence at least once. -func TestSample_AllSamplesSeen(t *testing.T) { - ps := []query.FloatPoint{ - {Time: 1, Value: 1}, - {Time: 2, Value: 2}, - {Time: 3, Value: 3}, - } - - // List of all the possible subsamples - samples := [][]query.FloatPoint{ - { - {Time: 1, Value: 1}, - {Time: 2, Value: 2}, - }, - { - {Time: 1, Value: 1}, - {Time: 3, Value: 3}, - }, - { - {Time: 2, Value: 2}, - {Time: 3, Value: 3}, - }, - } - - // 30 iterations should be sufficient to guarantee that - // we hit every possible subsample. - for i := 0; i < 30; i++ { - s := query.NewFloatSampleReducer(2) - for _, p := range ps { - s.AggregateFloat(&p) - } - - points := s.Emit() - - for i, sample := range samples { - // if we find a sample that it matches, remove it from - // this list of possible samples - if deep.Equal(sample, points) { - samples = append(samples[:i], samples[i+1:]...) - break - } - } - - // if samples is empty we've seen every sample, so we're done - if len(samples) == 0 { - return - } - - // The FloatSampleReducer is seeded with time.Now().UnixNano(), and without this sleep, - // this test will fail on machines where UnixNano doesn't return full resolution. - // Specifically, some Windows machines will only return timestamps accurate to 100ns. - // While iterating through this test without an explicit sleep, - // we would only see one or two unique seeds across all the calls to NewFloatSampleReducer. - time.Sleep(time.Millisecond) - } - - // If we missed a sample, report the error - if len(samples) != 0 { - t.Fatalf("expected all samples to be seen; unseen samples: %#v", samples) - } -} - -func TestSample_SampleSizeLessThanNumPoints(t *testing.T) { - s := query.NewFloatSampleReducer(2) - - ps := []query.FloatPoint{ - {Time: 1, Value: 1}, - {Time: 2, Value: 2}, - {Time: 3, Value: 3}, - } - - for _, p := range ps { - s.AggregateFloat(&p) - } - - points := s.Emit() - - if exp, got := 2, len(points); exp != got { - t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) - } -} - -func TestSample_SampleSizeGreaterThanNumPoints(t *testing.T) { - s := query.NewFloatSampleReducer(4) - - ps := []query.FloatPoint{ - {Time: 1, Value: 1}, - {Time: 2, Value: 2}, - {Time: 3, Value: 3}, - } - - for _, p := range ps { - s.AggregateFloat(&p) - } - - points := s.Emit() - - if exp, got := len(ps), len(points); exp != got { - t.Fatalf("unexpected number of points emitted: got %d exp %d", got, exp) - } - - if !deep.Equal(ps, points) { - t.Fatalf("unexpected points: %s", spew.Sdump(points)) - } -} - -func TestHll_SumAndMergeHll(t *testing.T) { - assert := tassert.New(t) - require := trequire.New(t) - - // Make 3000 random strings - r := rand.New(rand.NewSource(42)) - input := make([]*query.StringPoint, 0, 3000) - for i := 0; i < 3000; i++ { - input = append(input, &query.StringPoint{Value: strconv.FormatUint(r.Uint64(), 10)}) - } - - // Insert overlapping sections of the same points array to different reducers - s1 := query.NewStringSumHllReducer() - for _, p := range input[:2000] { - s1.AggregateString(p) - } - point1 := s1.Emit() - s2 := query.NewStringSumHllReducer() - for _, p := range input[1000:] { - s2.AggregateString(p) - } - point2 := s2.Emit() - // Demonstration of the input: repeatably seeded pseudorandom - // stringified integers (so we are testing the counting of unique strings, - // not unique integers). - require.Equal("17190211103962133664", input[2999].Value) - - checkStringFingerprint := func(prefix string, length int, hash string, check string) { - assert.Equal(length, len(check)) - assert.Equal(prefix, check[:len(prefix)]) - h := sha1.New() - h.Write([]byte(check)) - assert.Equal(hash, fmt.Sprintf("%x", h.Sum(nil))) - } - - require.Equal(len(point1), 1) - require.Equal(len(point2), 1) - checkStringFingerprint("HLL_AhABAAAAAAAAB9BIDQAJAAAUUaKsA4K/AtARkuMBsJwEyp8O", - 6964, "c59fa799fe8e78ab5347de385bf2a7c5b8085882", point1[0].Value) - checkStringFingerprint("HLL_AhABAAAAAAAAB9Db0QAHAAAUaP6aAaSRAoK/Ap70B/xSysEE", - 6996, "5f1696dfb455baab7fdb56ffd2197d27b09d6dcf", point2[0].Value) - - m := query.NewStringMergeHllReducer() - m.AggregateString(&point1[0]) - m.AggregateString(&point2[0]) - merged := m.Emit() - require.Equal(1, len(merged)) - checkStringFingerprint("HLL_AhAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAAA", - 87396, "e5320860aa322efe9af268e171df916d2186c75f", merged[0].Value) - - m.AggregateString(&query.StringPoint{ - Time: query.ZeroTime, - Value: "some random string", - }) - mergedError := m.Emit() - // mid-level errors are: - require.Equal(1, len(mergedError)) - assert.Equal("HLLERROR bad prefix for hll.Plus", mergedError[0].Value) - - c := query.NewCountHllReducer() - c.AggregateString(&merged[0]) - counted := c.Emit() - require.Equal(1, len(counted)) - // Counted 4000 points, 3000 distinct points, answer is 2994 ≈ 3000 - assert.Equal(uint64(2994), counted[0].Value) - - c.AggregateString(&query.StringPoint{ - Time: query.ZeroTime, - Value: "HLLERROR bad prefix for hll.Plus", - }) - counted = c.Emit() - require.Equal(1, len(counted)) - // When we hit marshal/unmarshal errors - assert.Equal(uint64(0), counted[0].Value) -} diff --git a/influxql/query/internal/gota/README.md b/influxql/query/internal/gota/README.md deleted file mode 100644 index 457c58ec902..00000000000 --- a/influxql/query/internal/gota/README.md +++ /dev/null @@ -1,3 +0,0 @@ -This is a port of [gota](https://github.com/phemmer/gota) to be adapted inside of InfluxDB. - -This port was made with the permission of the author, Patrick Hemmer, and has been modified to remove dependencies that are not part of InfluxDB. diff --git a/influxql/query/internal/gota/cmo.go b/influxql/query/internal/gota/cmo.go deleted file mode 100644 index 772644f1899..00000000000 --- a/influxql/query/internal/gota/cmo.go +++ /dev/null @@ -1,127 +0,0 @@ -package gota - -// CMO - Chande Momentum Oscillator (https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/cmo) -type CMO struct { - points []cmoPoint - sumUp float64 - sumDown float64 - count int - idx int // index of newest point -} - -type cmoPoint struct { - price float64 - diff float64 -} - -// NewCMO constructs a new CMO. -func NewCMO(inTimePeriod int) *CMO { - return &CMO{ - points: make([]cmoPoint, inTimePeriod-1), - } -} - -// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed". -func (cmo *CMO) WarmCount() int { - return len(cmo.points) -} - -// Add adds a new sample value to the algorithm and returns the computed value. -func (cmo *CMO) Add(v float64) float64 { - idxOldest := cmo.idx + 1 - if idxOldest == len(cmo.points) { - idxOldest = 0 - } - - var diff float64 - if cmo.count != 0 { - prev := cmo.points[cmo.idx] - diff = v - prev.price - if diff > 0 { - cmo.sumUp += diff - } else if diff < 0 { - cmo.sumDown -= diff - } - } - - var outV float64 - if cmo.sumUp != 0 || cmo.sumDown != 0 { - outV = 100.0 * ((cmo.sumUp - cmo.sumDown) / (cmo.sumUp + cmo.sumDown)) - } - - oldest := cmo.points[idxOldest] - //NOTE: because we're just adding and subtracting the difference, and not recalculating sumUp/sumDown using cmo.points[].price, it's possible for imprecision to creep in over time. Not sure how significant this is going to be, but if we want to fix it, we could recalculate it from scratch every N points. - if oldest.diff > 0 { - cmo.sumUp -= oldest.diff - } else if oldest.diff < 0 { - cmo.sumDown += oldest.diff - } - - p := cmoPoint{ - price: v, - diff: diff, - } - cmo.points[idxOldest] = p - cmo.idx = idxOldest - - if !cmo.Warmed() { - cmo.count++ - } - - return outV -} - -// Warmed indicates whether the algorithm has enough data to generate accurate results. -func (cmo *CMO) Warmed() bool { - return cmo.count == len(cmo.points)+2 -} - -// CMOS is a smoothed version of the Chande Momentum Oscillator. -// This is the version of CMO utilized by ta-lib. -type CMOS struct { - emaUp EMA - emaDown EMA - lastV float64 -} - -// NewCMOS constructs a new CMOS. -func NewCMOS(inTimePeriod int, warmType WarmupType) *CMOS { - ema := NewEMA(inTimePeriod+1, warmType) - ema.alpha = float64(1) / float64(inTimePeriod) - return &CMOS{ - emaUp: *ema, - emaDown: *ema, - } -} - -// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed". -func (cmos CMOS) WarmCount() int { - return cmos.emaUp.WarmCount() -} - -// Warmed indicates whether the algorithm has enough data to generate accurate results. -func (cmos CMOS) Warmed() bool { - return cmos.emaUp.Warmed() -} - -// Last returns the last output value. -func (cmos CMOS) Last() float64 { - up := cmos.emaUp.Last() - down := cmos.emaDown.Last() - return 100.0 * ((up - down) / (up + down)) -} - -// Add adds a new sample value to the algorithm and returns the computed value. -func (cmos *CMOS) Add(v float64) float64 { - var up float64 - var down float64 - if v > cmos.lastV { - up = v - cmos.lastV - } else if v < cmos.lastV { - down = cmos.lastV - v - } - cmos.emaUp.Add(up) - cmos.emaDown.Add(down) - cmos.lastV = v - return cmos.Last() -} diff --git a/influxql/query/internal/gota/cmo_test.go b/influxql/query/internal/gota/cmo_test.go deleted file mode 100644 index 2a8dffeaf5b..00000000000 --- a/influxql/query/internal/gota/cmo_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package gota - -import "testing" - -func TestCMO(t *testing.T) { - list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1} - - expList := []float64{100, 100, 100, 100, 100, 80, 60, 40, 20, 0, -20, -40, -60, -80, -100, -100, -100, -100, -100} - - cmo := NewCMO(10) - var actList []float64 - for _, v := range list { - if vOut := cmo.Add(v); cmo.Warmed() { - actList = append(actList, vOut) - } - } - - if diff := diffFloats(expList, actList, 1e-7); diff != "" { - t.Errorf("unexpected floats:\n%s", diff) - } -} - -func TestCMOS(t *testing.T) { - list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1} - - // expList is generated by the following code: - // expList, _ := talib.Cmo(list, 10, nil) - expList := []float64{100, 100, 100, 100, 100, 80, 61.999999999999986, 45.79999999999999, 31.22, 18.097999999999992, 6.288199999999988, -4.340620000000012, -13.906558000000008, -22.515902200000014, -30.264311980000013, -37.23788078200001, -43.51409270380002, -49.16268343342002, -54.24641509007802} - - cmo := NewCMOS(10, WarmSMA) - var actList []float64 - for _, v := range list { - if vOut := cmo.Add(v); cmo.Warmed() { - actList = append(actList, vOut) - } - } - - if diff := diffFloats(expList, actList, 1e-7); diff != "" { - t.Errorf("unexpected floats:\n%s", diff) - } -} diff --git a/influxql/query/internal/gota/ema.go b/influxql/query/internal/gota/ema.go deleted file mode 100644 index 69681443cc9..00000000000 --- a/influxql/query/internal/gota/ema.go +++ /dev/null @@ -1,188 +0,0 @@ -package gota - -import ( - "fmt" -) - -type AlgSimple interface { - Add(float64) float64 - Warmed() bool - WarmCount() int -} - -type WarmupType int8 - -const ( - WarmEMA WarmupType = iota // Exponential Moving Average - WarmSMA // Simple Moving Average -) - -func ParseWarmupType(wt string) (WarmupType, error) { - switch wt { - case "exponential": - return WarmEMA, nil - case "simple": - return WarmSMA, nil - default: - return 0, fmt.Errorf("invalid warmup type '%s'", wt) - } -} - -// EMA - Exponential Moving Average (http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:moving_averages#exponential_moving_average_calculation) -type EMA struct { - inTimePeriod int - last float64 - count int - alpha float64 - warmType WarmupType -} - -// NewEMA constructs a new EMA. -// -// When warmed with WarmSMA the first inTimePeriod samples will result in a simple average, switching to exponential moving average after warmup is complete. -// -// When warmed with WarmEMA the algorithm immediately starts using an exponential moving average for the output values. During the warmup period the alpha value is scaled to prevent unbalanced weighting on initial values. -func NewEMA(inTimePeriod int, warmType WarmupType) *EMA { - return &EMA{ - inTimePeriod: inTimePeriod, - alpha: 2 / float64(inTimePeriod+1), - warmType: warmType, - } -} - -// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed". -func (ema *EMA) WarmCount() int { - return ema.inTimePeriod - 1 -} - -// Warmed indicates whether the algorithm has enough data to generate accurate results. -func (ema *EMA) Warmed() bool { - return ema.count == ema.inTimePeriod -} - -// Last returns the last output value. -func (ema *EMA) Last() float64 { - return ema.last -} - -// Add adds a new sample value to the algorithm and returns the computed value. -func (ema *EMA) Add(v float64) float64 { - var avg float64 - if ema.count == 0 { - avg = v - } else { - lastAvg := ema.Last() - if !ema.Warmed() { - if ema.warmType == WarmSMA { - avg = (lastAvg*float64(ema.count) + v) / float64(ema.count+1) - } else { // ema.warmType == WarmEMA - // scale the alpha so that we don't excessively weight the result towards the first value - alpha := 2 / float64(ema.count+2) - avg = (v-lastAvg)*alpha + lastAvg - } - } else { - avg = (v-lastAvg)*ema.alpha + lastAvg - } - } - - ema.last = avg - if ema.count < ema.inTimePeriod { - // don't just keep incrementing to prevent potential overflow - ema.count++ - } - return avg -} - -// DEMA - Double Exponential Moving Average (https://en.wikipedia.org/wiki/Double_exponential_moving_average) -type DEMA struct { - ema1 EMA - ema2 EMA -} - -// NewDEMA constructs a new DEMA. -// -// When warmed with WarmSMA the first inTimePeriod samples will result in a simple average, switching to exponential moving average after warmup is complete. -// -// When warmed with WarmEMA the algorithm immediately starts using an exponential moving average for the output values. During the warmup period the alpha value is scaled to prevent unbalanced weighting on initial values. -func NewDEMA(inTimePeriod int, warmType WarmupType) *DEMA { - return &DEMA{ - ema1: *NewEMA(inTimePeriod, warmType), - ema2: *NewEMA(inTimePeriod, warmType), - } -} - -// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed". -func (dema *DEMA) WarmCount() int { - if dema.ema1.warmType == WarmEMA { - return dema.ema1.WarmCount() - } - return dema.ema1.WarmCount() + dema.ema2.WarmCount() -} - -// Add adds a new sample value to the algorithm and returns the computed value. -func (dema *DEMA) Add(v float64) float64 { - avg1 := dema.ema1.Add(v) - var avg2 float64 - if dema.ema1.Warmed() || dema.ema1.warmType == WarmEMA { - avg2 = dema.ema2.Add(avg1) - } else { - avg2 = avg1 - } - return 2*avg1 - avg2 -} - -// Warmed indicates whether the algorithm has enough data to generate accurate results. -func (dema *DEMA) Warmed() bool { - return dema.ema2.Warmed() -} - -// TEMA - Triple Exponential Moving Average (https://en.wikipedia.org/wiki/Triple_exponential_moving_average) -type TEMA struct { - ema1 EMA - ema2 EMA - ema3 EMA -} - -// NewTEMA constructs a new TEMA. -// -// When warmed with WarmSMA the first inTimePeriod samples will result in a simple average, switching to exponential moving average after warmup is complete. -// -// When warmed with WarmEMA the algorithm immediately starts using an exponential moving average for the output values. During the warmup period the alpha value is scaled to prevent unbalanced weighting on initial values. -func NewTEMA(inTimePeriod int, warmType WarmupType) *TEMA { - return &TEMA{ - ema1: *NewEMA(inTimePeriod, warmType), - ema2: *NewEMA(inTimePeriod, warmType), - ema3: *NewEMA(inTimePeriod, warmType), - } -} - -// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed". -func (tema *TEMA) WarmCount() int { - if tema.ema1.warmType == WarmEMA { - return tema.ema1.WarmCount() - } - return tema.ema1.WarmCount() + tema.ema2.WarmCount() + tema.ema3.WarmCount() -} - -// Add adds a new sample value to the algorithm and returns the computed value. -func (tema *TEMA) Add(v float64) float64 { - avg1 := tema.ema1.Add(v) - var avg2 float64 - if tema.ema1.Warmed() || tema.ema1.warmType == WarmEMA { - avg2 = tema.ema2.Add(avg1) - } else { - avg2 = avg1 - } - var avg3 float64 - if tema.ema2.Warmed() || tema.ema2.warmType == WarmEMA { - avg3 = tema.ema3.Add(avg2) - } else { - avg3 = avg2 - } - return 3*avg1 - 3*avg2 + avg3 -} - -// Warmed indicates whether the algorithm has enough data to generate accurate results. -func (tema *TEMA) Warmed() bool { - return tema.ema3.Warmed() -} diff --git a/influxql/query/internal/gota/ema_test.go b/influxql/query/internal/gota/ema_test.go deleted file mode 100644 index 3114506783b..00000000000 --- a/influxql/query/internal/gota/ema_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package gota - -import "testing" - -func TestEMA(t *testing.T) { - list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1} - - // expList is generated by the following code: - // expList, _ := talib.Ema(list, 10, nil) - expList := []float64{5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.136363636363637, 11.475206611570249, 11.570623591284749, 11.466873847414794, 11.200169511521196, 10.800138691244614, 10.291022565563775, 9.692654826370362, 9.021263039757569, 8.290124305256192, 7.510101704300521, 6.690083212609517, 5.837340810316878, 4.957824299350173} - - ema := NewEMA(10, WarmSMA) - var actList []float64 - for _, v := range list { - if vOut := ema.Add(v); ema.Warmed() { - actList = append(actList, vOut) - } - } - - if diff := diffFloats(expList, actList, 0.0000001); diff != "" { - t.Errorf("unexpected floats:\n%s", diff) - } -} - -func TestDEMA(t *testing.T) { - list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1} - - // expList is generated by the following code: - // expList, _ := talib.Dema(list, 10, nil) - expList := []float64{13.568840926166246, 12.701748119313985, 11.701405062848783, 10.611872766773773, 9.465595022565749, 8.28616628396151, 7.090477085921927, 5.8903718513360275, 4.693925476073202, 3.5064225149113692, 2.331104912318361} - - dema := NewDEMA(10, WarmSMA) - var actList []float64 - for _, v := range list { - if vOut := dema.Add(v); dema.Warmed() { - actList = append(actList, vOut) - } - } - - if diff := diffFloats(expList, actList, 0.0000001); diff != "" { - t.Errorf("unexpected floats:\n%s", diff) - } -} - -func TestTEMA(t *testing.T) { - list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1} - - // expList is generated by the following code: - // expList, _ := talib.Tema(list, 4, nil) - expList := []float64{10, 11, 12, 13, 14, 15, 14.431999999999995, 13.345600000000001, 12.155520000000001, 11, 9.906687999999997, 8.86563072, 7.8589122560000035, 6.871005491200005, 5.891160883200005, 4.912928706560004, 3.932955104051203, 2.9498469349785603, 1.9633255712030717, 0.9736696408637435} - - tema := NewTEMA(4, WarmSMA) - var actList []float64 - for _, v := range list { - if vOut := tema.Add(v); tema.Warmed() { - actList = append(actList, vOut) - } - } - - if diff := diffFloats(expList, actList, 0.0000001); diff != "" { - t.Errorf("unexpected floats:\n%s", diff) - } -} - -func TestEmaWarmCount(t *testing.T) { - period := 9 - ema := NewEMA(period, WarmSMA) - - var i int - for i = 0; i < period*10; i++ { - ema.Add(float64(i)) - if ema.Warmed() { - break - } - } - - if got, want := i, ema.WarmCount(); got != want { - t.Errorf("unexpected warm count: got=%d want=%d", got, want) - } -} - -func TestDemaWarmCount(t *testing.T) { - period := 9 - dema := NewDEMA(period, WarmSMA) - - var i int - for i = 0; i < period*10; i++ { - dema.Add(float64(i)) - if dema.Warmed() { - break - } - } - - if got, want := i, dema.WarmCount(); got != want { - t.Errorf("unexpected warm count: got=%d want=%d", got, want) - } -} - -func TestTemaWarmCount(t *testing.T) { - period := 9 - tema := NewTEMA(period, WarmSMA) - - var i int - for i = 0; i < period*10; i++ { - tema.Add(float64(i)) - if tema.Warmed() { - break - } - } - - if got, want := i, tema.WarmCount(); got != want { - t.Errorf("unexpected warm count: got=%d want=%d", got, want) - } -} diff --git a/influxql/query/internal/gota/kama.go b/influxql/query/internal/gota/kama.go deleted file mode 100644 index a43f96d8e4f..00000000000 --- a/influxql/query/internal/gota/kama.go +++ /dev/null @@ -1,113 +0,0 @@ -package gota - -import ( - "math" -) - -// KER - Kaufman's Efficiency Ratio (http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:kaufman_s_adaptive_moving_average#efficiency_ratio_er) -type KER struct { - points []kerPoint - noise float64 - count int - idx int // index of newest point -} - -type kerPoint struct { - price float64 - diff float64 -} - -// NewKER constructs a new KER. -func NewKER(inTimePeriod int) *KER { - return &KER{ - points: make([]kerPoint, inTimePeriod), - } -} - -// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed". -func (ker *KER) WarmCount() int { - return len(ker.points) -} - -// Add adds a new sample value to the algorithm and returns the computed value. -func (ker *KER) Add(v float64) float64 { - //TODO this does not return a sensible value if not warmed. - n := len(ker.points) - idxOldest := ker.idx + 1 - if idxOldest >= n { - idxOldest = 0 - } - - signal := math.Abs(v - ker.points[idxOldest].price) - - kp := kerPoint{ - price: v, - diff: math.Abs(v - ker.points[ker.idx].price), - } - ker.noise -= ker.points[idxOldest].diff - ker.noise += kp.diff - noise := ker.noise - - ker.idx = idxOldest - ker.points[ker.idx] = kp - - if !ker.Warmed() { - ker.count++ - } - - if signal == 0 || noise == 0 { - return 0 - } - return signal / noise -} - -// Warmed indicates whether the algorithm has enough data to generate accurate results. -func (ker *KER) Warmed() bool { - return ker.count == len(ker.points)+1 -} - -// KAMA - Kaufman's Adaptive Moving Average (http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:kaufman_s_adaptive_moving_average) -type KAMA struct { - ker KER - last float64 -} - -// NewKAMA constructs a new KAMA. -func NewKAMA(inTimePeriod int) *KAMA { - ker := NewKER(inTimePeriod) - return &KAMA{ - ker: *ker, - } -} - -// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed". -func (kama *KAMA) WarmCount() int { - return kama.ker.WarmCount() -} - -// Add adds a new sample value to the algorithm and returns the computed value. -func (kama *KAMA) Add(v float64) float64 { - if !kama.Warmed() { - /* - // initialize with a simple moving average - kama.last = 0 - for _, v := range kama.ker.points[:kama.ker.count] { - kama.last += v - } - kama.last /= float64(kama.ker.count + 1) - */ - // initialize with the last value - kama.last = kama.ker.points[kama.ker.idx].price - } - - er := kama.ker.Add(v) - sc := math.Pow(er*(2.0/(2.0+1.0)-2.0/(30.0+1.0))+2.0/(30.0+1.0), 2) - - kama.last = kama.last + sc*(v-kama.last) - return kama.last -} - -// Warmed indicates whether the algorithm has enough data to generate accurate results. -func (kama *KAMA) Warmed() bool { - return kama.ker.Warmed() -} diff --git a/influxql/query/internal/gota/kama_test.go b/influxql/query/internal/gota/kama_test.go deleted file mode 100644 index d9a2f65815a..00000000000 --- a/influxql/query/internal/gota/kama_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package gota - -import "testing" - -func TestKER(t *testing.T) { - list := []float64{20, 21, 22, 23, 22, 21} - - expList := []float64{1, 1.0 / 3, 1.0 / 3} - - ker := NewKER(3) - var actList []float64 - for _, v := range list { - if vOut := ker.Add(v); ker.Warmed() { - actList = append(actList, vOut) - } - } - - if diff := diffFloats(expList, actList, 0.0000001); diff != "" { - t.Errorf("unexpected floats:\n%s", diff) - } -} - -func TestKAMA(t *testing.T) { - list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1} - - // expList is generated by the following code: - // expList, _ := talib.Cmo(list, 10, nil) - expList := []float64{10.444444444444445, 11.135802469135802, 11.964334705075446, 12.869074836153025, 13.81615268675168, 13.871008014588556, 13.71308456353558, 13.553331356741122, 13.46599437575161, 13.4515677602438, 13.29930139347417, 12.805116570729284, 11.752584300922967, 10.036160535131103, 7.797866963961725, 6.109926091089847, 4.727736717272138, 3.5154092873734104, 2.3974496040963396} - - kama := NewKAMA(10) - var actList []float64 - for _, v := range list { - if vOut := kama.Add(v); kama.Warmed() { - actList = append(actList, vOut) - } - } - - if diff := diffFloats(expList, actList, 0.0000001); diff != "" { - t.Errorf("unexpected floats:\n%s", diff) - } -} - -func TestKAMAWarmCount(t *testing.T) { - period := 9 - kama := NewKAMA(period) - - var i int - for i = 0; i < period*10; i++ { - kama.Add(float64(i)) - if kama.Warmed() { - break - } - } - - if got, want := i, kama.WarmCount(); got != want { - t.Errorf("unexpected warm count: got=%d want=%d", got, want) - } -} - -var BenchmarkKAMAVal float64 - -func BenchmarkKAMA(b *testing.B) { - list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1} - for n := 0; n < b.N; n++ { - kama := NewKAMA(5) - for _, v := range list { - BenchmarkKAMAVal = kama.Add(v) - } - } -} diff --git a/influxql/query/internal/gota/rsi.go b/influxql/query/internal/gota/rsi.go deleted file mode 100644 index 82811c3546a..00000000000 --- a/influxql/query/internal/gota/rsi.go +++ /dev/null @@ -1,48 +0,0 @@ -package gota - -// RSI - Relative Strength Index (http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:relative_strength_index_rsi) -type RSI struct { - emaUp EMA - emaDown EMA - lastV float64 -} - -// NewRSI constructs a new RSI. -func NewRSI(inTimePeriod int, warmType WarmupType) *RSI { - ema := NewEMA(inTimePeriod+1, warmType) - ema.alpha = float64(1) / float64(inTimePeriod) - return &RSI{ - emaUp: *ema, - emaDown: *ema, - } -} - -// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed". -func (rsi RSI) WarmCount() int { - return rsi.emaUp.WarmCount() -} - -// Warmed indicates whether the algorithm has enough data to generate accurate results. -func (rsi RSI) Warmed() bool { - return rsi.emaUp.Warmed() -} - -// Last returns the last output value. -func (rsi RSI) Last() float64 { - return 100 - (100 / (1 + rsi.emaUp.Last()/rsi.emaDown.Last())) -} - -// Add adds a new sample value to the algorithm and returns the computed value. -func (rsi *RSI) Add(v float64) float64 { - var up float64 - var down float64 - if v > rsi.lastV { - up = v - rsi.lastV - } else if v < rsi.lastV { - down = rsi.lastV - v - } - rsi.emaUp.Add(up) - rsi.emaDown.Add(down) - rsi.lastV = v - return rsi.Last() -} diff --git a/influxql/query/internal/gota/rsi_test.go b/influxql/query/internal/gota/rsi_test.go deleted file mode 100644 index 66675c3b6f8..00000000000 --- a/influxql/query/internal/gota/rsi_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package gota - -import "testing" - -func TestRSI(t *testing.T) { - list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1} - - // expList is generated by the following code: - // expList, _ := talib.Rsi(list, 10, nil) - expList := []float64{100, 100, 100, 100, 100, 90, 81, 72.89999999999999, 65.61, 59.04899999999999, 53.144099999999995, 47.82969, 43.04672099999999, 38.74204889999999, 34.86784400999999, 31.381059608999994, 28.242953648099995, 25.418658283289997, 22.876792454961} - - rsi := NewRSI(10, WarmSMA) - var actList []float64 - for _, v := range list { - if vOut := rsi.Add(v); rsi.Warmed() { - actList = append(actList, vOut) - } - } - - if diff := diffFloats(expList, actList, 0.0000001); diff != "" { - t.Errorf("unexpected floats:\n%s", diff) - } -} diff --git a/influxql/query/internal/gota/trix.go b/influxql/query/internal/gota/trix.go deleted file mode 100644 index 0619e2122dd..00000000000 --- a/influxql/query/internal/gota/trix.go +++ /dev/null @@ -1,53 +0,0 @@ -package gota - -// Trix - TRIple Exponential average (http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:trix) -type TRIX struct { - ema1 EMA - ema2 EMA - ema3 EMA - last float64 - count int -} - -// NewTRIX constructs a new TRIX. -func NewTRIX(inTimePeriod int, warmType WarmupType) *TRIX { - ema1 := NewEMA(inTimePeriod, warmType) - ema2 := NewEMA(inTimePeriod, warmType) - ema3 := NewEMA(inTimePeriod, warmType) - return &TRIX{ - ema1: *ema1, - ema2: *ema2, - ema3: *ema3, - } -} - -// Add adds a new sample value to the algorithm and returns the computed value. -func (trix *TRIX) Add(v float64) float64 { - cur := trix.ema1.Add(v) - if trix.ema1.Warmed() || trix.ema1.warmType == WarmEMA { - cur = trix.ema2.Add(cur) - if trix.ema2.Warmed() || trix.ema2.warmType == WarmEMA { - cur = trix.ema3.Add(cur) - } - } - - rate := ((cur / trix.last) - 1) * 100 - trix.last = cur - if !trix.Warmed() && trix.ema3.Warmed() { - trix.count++ - } - return rate -} - -// WarmCount returns the number of samples that must be provided for the algorithm to be fully "warmed". -func (trix *TRIX) WarmCount() int { - if trix.ema1.warmType == WarmEMA { - return trix.ema1.WarmCount() + 1 - } - return trix.ema1.WarmCount()*3 + 1 -} - -// Warmed indicates whether the algorithm has enough data to generate accurate results. -func (trix *TRIX) Warmed() bool { - return trix.count == 2 -} diff --git a/influxql/query/internal/gota/trix_test.go b/influxql/query/internal/gota/trix_test.go deleted file mode 100644 index e7b3933ee07..00000000000 --- a/influxql/query/internal/gota/trix_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package gota - -import "testing" - -func TestTRIX(t *testing.T) { - list := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1} - - // expList is generated by the following code: - // expList, _ := talib.Trix(list, 4, nil) - expList := []float64{18.181818181818187, 15.384615384615374, 13.33333333333333, 11.764705882352944, 10.526315789473696, 8.304761904761904, 5.641927541329594, 3.0392222148232007, 0.7160675740302658, -1.2848911076603242, -2.9999661985600667, -4.493448741755901, -5.836238000516913, -7.099092024379772, -8.352897627933453, -9.673028502435233, -11.147601363985949, -12.891818138458877, -15.074463280730022} - - trix := NewTRIX(4, WarmSMA) - var actList []float64 - for _, v := range list { - if vOut := trix.Add(v); trix.Warmed() { - actList = append(actList, vOut) - } - } - - if diff := diffFloats(expList, actList, 1e-7); diff != "" { - t.Errorf("unexpected floats:\n%s", diff) - } -} diff --git a/influxql/query/internal/gota/utils_test.go b/influxql/query/internal/gota/utils_test.go deleted file mode 100644 index a0b73607908..00000000000 --- a/influxql/query/internal/gota/utils_test.go +++ /dev/null @@ -1,10 +0,0 @@ -package gota - -import ( - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" -) - -func diffFloats(exp, act []float64, delta float64) string { - return cmp.Diff(exp, act, cmpopts.EquateApprox(0, delta)) -} diff --git a/influxql/query/internal/internal.pb.go b/influxql/query/internal/internal.pb.go deleted file mode 100644 index 80fcd2f87ce..00000000000 --- a/influxql/query/internal/internal.pb.go +++ /dev/null @@ -1,1038 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.27.1 -// protoc v3.17.3 -// source: internal/internal.proto - -package query - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Point struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` - Tags *string `protobuf:"bytes,2,req,name=Tags" json:"Tags,omitempty"` - Time *int64 `protobuf:"varint,3,req,name=Time" json:"Time,omitempty"` - Nil *bool `protobuf:"varint,4,req,name=Nil" json:"Nil,omitempty"` - Aux []*Aux `protobuf:"bytes,5,rep,name=Aux" json:"Aux,omitempty"` - Aggregated *uint32 `protobuf:"varint,6,opt,name=Aggregated" json:"Aggregated,omitempty"` - FloatValue *float64 `protobuf:"fixed64,7,opt,name=FloatValue" json:"FloatValue,omitempty"` - IntegerValue *int64 `protobuf:"varint,8,opt,name=IntegerValue" json:"IntegerValue,omitempty"` - StringValue *string `protobuf:"bytes,9,opt,name=StringValue" json:"StringValue,omitempty"` - BooleanValue *bool `protobuf:"varint,10,opt,name=BooleanValue" json:"BooleanValue,omitempty"` - UnsignedValue *uint64 `protobuf:"varint,12,opt,name=UnsignedValue" json:"UnsignedValue,omitempty"` - Stats *IteratorStats `protobuf:"bytes,11,opt,name=Stats" json:"Stats,omitempty"` - Trace []byte `protobuf:"bytes,13,opt,name=Trace" json:"Trace,omitempty"` -} - -func (x *Point) Reset() { - *x = Point{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_internal_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Point) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Point) ProtoMessage() {} - -func (x *Point) ProtoReflect() protoreflect.Message { - mi := &file_internal_internal_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Point.ProtoReflect.Descriptor instead. -func (*Point) Descriptor() ([]byte, []int) { - return file_internal_internal_proto_rawDescGZIP(), []int{0} -} - -func (x *Point) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *Point) GetTags() string { - if x != nil && x.Tags != nil { - return *x.Tags - } - return "" -} - -func (x *Point) GetTime() int64 { - if x != nil && x.Time != nil { - return *x.Time - } - return 0 -} - -func (x *Point) GetNil() bool { - if x != nil && x.Nil != nil { - return *x.Nil - } - return false -} - -func (x *Point) GetAux() []*Aux { - if x != nil { - return x.Aux - } - return nil -} - -func (x *Point) GetAggregated() uint32 { - if x != nil && x.Aggregated != nil { - return *x.Aggregated - } - return 0 -} - -func (x *Point) GetFloatValue() float64 { - if x != nil && x.FloatValue != nil { - return *x.FloatValue - } - return 0 -} - -func (x *Point) GetIntegerValue() int64 { - if x != nil && x.IntegerValue != nil { - return *x.IntegerValue - } - return 0 -} - -func (x *Point) GetStringValue() string { - if x != nil && x.StringValue != nil { - return *x.StringValue - } - return "" -} - -func (x *Point) GetBooleanValue() bool { - if x != nil && x.BooleanValue != nil { - return *x.BooleanValue - } - return false -} - -func (x *Point) GetUnsignedValue() uint64 { - if x != nil && x.UnsignedValue != nil { - return *x.UnsignedValue - } - return 0 -} - -func (x *Point) GetStats() *IteratorStats { - if x != nil { - return x.Stats - } - return nil -} - -func (x *Point) GetTrace() []byte { - if x != nil { - return x.Trace - } - return nil -} - -type Aux struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - DataType *int32 `protobuf:"varint,1,req,name=DataType" json:"DataType,omitempty"` - FloatValue *float64 `protobuf:"fixed64,2,opt,name=FloatValue" json:"FloatValue,omitempty"` - IntegerValue *int64 `protobuf:"varint,3,opt,name=IntegerValue" json:"IntegerValue,omitempty"` - StringValue *string `protobuf:"bytes,4,opt,name=StringValue" json:"StringValue,omitempty"` - BooleanValue *bool `protobuf:"varint,5,opt,name=BooleanValue" json:"BooleanValue,omitempty"` - UnsignedValue *uint64 `protobuf:"varint,6,opt,name=UnsignedValue" json:"UnsignedValue,omitempty"` -} - -func (x *Aux) Reset() { - *x = Aux{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_internal_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Aux) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Aux) ProtoMessage() {} - -func (x *Aux) ProtoReflect() protoreflect.Message { - mi := &file_internal_internal_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Aux.ProtoReflect.Descriptor instead. -func (*Aux) Descriptor() ([]byte, []int) { - return file_internal_internal_proto_rawDescGZIP(), []int{1} -} - -func (x *Aux) GetDataType() int32 { - if x != nil && x.DataType != nil { - return *x.DataType - } - return 0 -} - -func (x *Aux) GetFloatValue() float64 { - if x != nil && x.FloatValue != nil { - return *x.FloatValue - } - return 0 -} - -func (x *Aux) GetIntegerValue() int64 { - if x != nil && x.IntegerValue != nil { - return *x.IntegerValue - } - return 0 -} - -func (x *Aux) GetStringValue() string { - if x != nil && x.StringValue != nil { - return *x.StringValue - } - return "" -} - -func (x *Aux) GetBooleanValue() bool { - if x != nil && x.BooleanValue != nil { - return *x.BooleanValue - } - return false -} - -func (x *Aux) GetUnsignedValue() uint64 { - if x != nil && x.UnsignedValue != nil { - return *x.UnsignedValue - } - return 0 -} - -type IteratorOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Expr *string `protobuf:"bytes,1,opt,name=Expr" json:"Expr,omitempty"` - Aux []string `protobuf:"bytes,2,rep,name=Aux" json:"Aux,omitempty"` - Fields []*VarRef `protobuf:"bytes,17,rep,name=Fields" json:"Fields,omitempty"` - Sources []*Measurement `protobuf:"bytes,3,rep,name=Sources" json:"Sources,omitempty"` - Interval *Interval `protobuf:"bytes,4,opt,name=Interval" json:"Interval,omitempty"` - Dimensions []string `protobuf:"bytes,5,rep,name=Dimensions" json:"Dimensions,omitempty"` - GroupBy []string `protobuf:"bytes,19,rep,name=GroupBy" json:"GroupBy,omitempty"` - Fill *int32 `protobuf:"varint,6,opt,name=Fill" json:"Fill,omitempty"` - FillValue *float64 `protobuf:"fixed64,7,opt,name=FillValue" json:"FillValue,omitempty"` - Condition *string `protobuf:"bytes,8,opt,name=Condition" json:"Condition,omitempty"` - StartTime *int64 `protobuf:"varint,9,opt,name=StartTime" json:"StartTime,omitempty"` - EndTime *int64 `protobuf:"varint,10,opt,name=EndTime" json:"EndTime,omitempty"` - Location *string `protobuf:"bytes,21,opt,name=Location" json:"Location,omitempty"` - Ascending *bool `protobuf:"varint,11,opt,name=Ascending" json:"Ascending,omitempty"` - Limit *int64 `protobuf:"varint,12,opt,name=Limit" json:"Limit,omitempty"` - Offset *int64 `protobuf:"varint,13,opt,name=Offset" json:"Offset,omitempty"` - SLimit *int64 `protobuf:"varint,14,opt,name=SLimit" json:"SLimit,omitempty"` - SOffset *int64 `protobuf:"varint,15,opt,name=SOffset" json:"SOffset,omitempty"` - StripName *bool `protobuf:"varint,22,opt,name=StripName" json:"StripName,omitempty"` - Dedupe *bool `protobuf:"varint,16,opt,name=Dedupe" json:"Dedupe,omitempty"` - MaxSeriesN *int64 `protobuf:"varint,18,opt,name=MaxSeriesN" json:"MaxSeriesN,omitempty"` - Ordered *bool `protobuf:"varint,20,opt,name=Ordered" json:"Ordered,omitempty"` -} - -func (x *IteratorOptions) Reset() { - *x = IteratorOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_internal_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *IteratorOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*IteratorOptions) ProtoMessage() {} - -func (x *IteratorOptions) ProtoReflect() protoreflect.Message { - mi := &file_internal_internal_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use IteratorOptions.ProtoReflect.Descriptor instead. -func (*IteratorOptions) Descriptor() ([]byte, []int) { - return file_internal_internal_proto_rawDescGZIP(), []int{2} -} - -func (x *IteratorOptions) GetExpr() string { - if x != nil && x.Expr != nil { - return *x.Expr - } - return "" -} - -func (x *IteratorOptions) GetAux() []string { - if x != nil { - return x.Aux - } - return nil -} - -func (x *IteratorOptions) GetFields() []*VarRef { - if x != nil { - return x.Fields - } - return nil -} - -func (x *IteratorOptions) GetSources() []*Measurement { - if x != nil { - return x.Sources - } - return nil -} - -func (x *IteratorOptions) GetInterval() *Interval { - if x != nil { - return x.Interval - } - return nil -} - -func (x *IteratorOptions) GetDimensions() []string { - if x != nil { - return x.Dimensions - } - return nil -} - -func (x *IteratorOptions) GetGroupBy() []string { - if x != nil { - return x.GroupBy - } - return nil -} - -func (x *IteratorOptions) GetFill() int32 { - if x != nil && x.Fill != nil { - return *x.Fill - } - return 0 -} - -func (x *IteratorOptions) GetFillValue() float64 { - if x != nil && x.FillValue != nil { - return *x.FillValue - } - return 0 -} - -func (x *IteratorOptions) GetCondition() string { - if x != nil && x.Condition != nil { - return *x.Condition - } - return "" -} - -func (x *IteratorOptions) GetStartTime() int64 { - if x != nil && x.StartTime != nil { - return *x.StartTime - } - return 0 -} - -func (x *IteratorOptions) GetEndTime() int64 { - if x != nil && x.EndTime != nil { - return *x.EndTime - } - return 0 -} - -func (x *IteratorOptions) GetLocation() string { - if x != nil && x.Location != nil { - return *x.Location - } - return "" -} - -func (x *IteratorOptions) GetAscending() bool { - if x != nil && x.Ascending != nil { - return *x.Ascending - } - return false -} - -func (x *IteratorOptions) GetLimit() int64 { - if x != nil && x.Limit != nil { - return *x.Limit - } - return 0 -} - -func (x *IteratorOptions) GetOffset() int64 { - if x != nil && x.Offset != nil { - return *x.Offset - } - return 0 -} - -func (x *IteratorOptions) GetSLimit() int64 { - if x != nil && x.SLimit != nil { - return *x.SLimit - } - return 0 -} - -func (x *IteratorOptions) GetSOffset() int64 { - if x != nil && x.SOffset != nil { - return *x.SOffset - } - return 0 -} - -func (x *IteratorOptions) GetStripName() bool { - if x != nil && x.StripName != nil { - return *x.StripName - } - return false -} - -func (x *IteratorOptions) GetDedupe() bool { - if x != nil && x.Dedupe != nil { - return *x.Dedupe - } - return false -} - -func (x *IteratorOptions) GetMaxSeriesN() int64 { - if x != nil && x.MaxSeriesN != nil { - return *x.MaxSeriesN - } - return 0 -} - -func (x *IteratorOptions) GetOrdered() bool { - if x != nil && x.Ordered != nil { - return *x.Ordered - } - return false -} - -type Measurements struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Items []*Measurement `protobuf:"bytes,1,rep,name=Items" json:"Items,omitempty"` -} - -func (x *Measurements) Reset() { - *x = Measurements{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_internal_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Measurements) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Measurements) ProtoMessage() {} - -func (x *Measurements) ProtoReflect() protoreflect.Message { - mi := &file_internal_internal_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Measurements.ProtoReflect.Descriptor instead. -func (*Measurements) Descriptor() ([]byte, []int) { - return file_internal_internal_proto_rawDescGZIP(), []int{3} -} - -func (x *Measurements) GetItems() []*Measurement { - if x != nil { - return x.Items - } - return nil -} - -type Measurement struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Database *string `protobuf:"bytes,1,opt,name=Database" json:"Database,omitempty"` - RetentionPolicy *string `protobuf:"bytes,2,opt,name=RetentionPolicy" json:"RetentionPolicy,omitempty"` - Name *string `protobuf:"bytes,3,opt,name=Name" json:"Name,omitempty"` - Regex *string `protobuf:"bytes,4,opt,name=Regex" json:"Regex,omitempty"` - IsTarget *bool `protobuf:"varint,5,opt,name=IsTarget" json:"IsTarget,omitempty"` - SystemIterator *string `protobuf:"bytes,6,opt,name=SystemIterator" json:"SystemIterator,omitempty"` -} - -func (x *Measurement) Reset() { - *x = Measurement{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_internal_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Measurement) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Measurement) ProtoMessage() {} - -func (x *Measurement) ProtoReflect() protoreflect.Message { - mi := &file_internal_internal_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Measurement.ProtoReflect.Descriptor instead. -func (*Measurement) Descriptor() ([]byte, []int) { - return file_internal_internal_proto_rawDescGZIP(), []int{4} -} - -func (x *Measurement) GetDatabase() string { - if x != nil && x.Database != nil { - return *x.Database - } - return "" -} - -func (x *Measurement) GetRetentionPolicy() string { - if x != nil && x.RetentionPolicy != nil { - return *x.RetentionPolicy - } - return "" -} - -func (x *Measurement) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *Measurement) GetRegex() string { - if x != nil && x.Regex != nil { - return *x.Regex - } - return "" -} - -func (x *Measurement) GetIsTarget() bool { - if x != nil && x.IsTarget != nil { - return *x.IsTarget - } - return false -} - -func (x *Measurement) GetSystemIterator() string { - if x != nil && x.SystemIterator != nil { - return *x.SystemIterator - } - return "" -} - -type Interval struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Duration *int64 `protobuf:"varint,1,opt,name=Duration" json:"Duration,omitempty"` - Offset *int64 `protobuf:"varint,2,opt,name=Offset" json:"Offset,omitempty"` -} - -func (x *Interval) Reset() { - *x = Interval{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_internal_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Interval) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Interval) ProtoMessage() {} - -func (x *Interval) ProtoReflect() protoreflect.Message { - mi := &file_internal_internal_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Interval.ProtoReflect.Descriptor instead. -func (*Interval) Descriptor() ([]byte, []int) { - return file_internal_internal_proto_rawDescGZIP(), []int{5} -} - -func (x *Interval) GetDuration() int64 { - if x != nil && x.Duration != nil { - return *x.Duration - } - return 0 -} - -func (x *Interval) GetOffset() int64 { - if x != nil && x.Offset != nil { - return *x.Offset - } - return 0 -} - -type IteratorStats struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - SeriesN *int64 `protobuf:"varint,1,opt,name=SeriesN" json:"SeriesN,omitempty"` - PointN *int64 `protobuf:"varint,2,opt,name=PointN" json:"PointN,omitempty"` -} - -func (x *IteratorStats) Reset() { - *x = IteratorStats{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_internal_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *IteratorStats) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*IteratorStats) ProtoMessage() {} - -func (x *IteratorStats) ProtoReflect() protoreflect.Message { - mi := &file_internal_internal_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use IteratorStats.ProtoReflect.Descriptor instead. -func (*IteratorStats) Descriptor() ([]byte, []int) { - return file_internal_internal_proto_rawDescGZIP(), []int{6} -} - -func (x *IteratorStats) GetSeriesN() int64 { - if x != nil && x.SeriesN != nil { - return *x.SeriesN - } - return 0 -} - -func (x *IteratorStats) GetPointN() int64 { - if x != nil && x.PointN != nil { - return *x.PointN - } - return 0 -} - -type VarRef struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Val *string `protobuf:"bytes,1,req,name=Val" json:"Val,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=Type" json:"Type,omitempty"` -} - -func (x *VarRef) Reset() { - *x = VarRef{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_internal_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *VarRef) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VarRef) ProtoMessage() {} - -func (x *VarRef) ProtoReflect() protoreflect.Message { - mi := &file_internal_internal_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VarRef.ProtoReflect.Descriptor instead. -func (*VarRef) Descriptor() ([]byte, []int) { - return file_internal_internal_proto_rawDescGZIP(), []int{7} -} - -func (x *VarRef) GetVal() string { - if x != nil && x.Val != nil { - return *x.Val - } - return "" -} - -func (x *VarRef) GetType() int32 { - if x != nil && x.Type != nil { - return *x.Type - } - return 0 -} - -var File_internal_internal_proto protoreflect.FileDescriptor - -var file_internal_internal_proto_rawDesc = []byte{ - 0x0a, 0x17, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x22, 0x85, 0x03, 0x0a, 0x05, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, - 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x54, 0x61, - 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x02, 0x28, 0x03, - 0x52, 0x04, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x4e, 0x69, 0x6c, 0x18, 0x04, 0x20, - 0x02, 0x28, 0x08, 0x52, 0x03, 0x4e, 0x69, 0x6c, 0x12, 0x1c, 0x0a, 0x03, 0x41, 0x75, 0x78, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x41, 0x75, - 0x78, 0x52, 0x03, 0x41, 0x75, 0x78, 0x12, 0x1e, 0x0a, 0x0a, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x41, 0x67, 0x67, 0x72, - 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x46, 0x6c, 0x6f, 0x61, - 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x49, 0x6e, 0x74, 0x65, 0x67, 0x65, - 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x49, 0x6e, - 0x74, 0x65, 0x67, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x53, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x22, 0x0a, 0x0c, - 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0c, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x24, 0x0a, 0x0d, 0x55, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x55, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, - 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x49, 0x74, - 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x53, 0x74, 0x61, - 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x54, 0x72, 0x61, 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x05, 0x54, 0x72, 0x61, 0x63, 0x65, 0x22, 0xd1, 0x01, 0x0a, 0x03, 0x41, 0x75, 0x78, - 0x12, 0x1a, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x02, - 0x28, 0x05, 0x52, 0x08, 0x44, 0x61, 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x0a, - 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x22, 0x0a, 0x0c, - 0x49, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0c, 0x49, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x20, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, - 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x55, 0x6e, 0x73, 0x69, 0x67, 0x6e, - 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x55, - 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x85, 0x05, 0x0a, - 0x0f, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x12, 0x0a, 0x04, 0x45, 0x78, 0x70, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x45, 0x78, 0x70, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x41, 0x75, 0x78, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x03, 0x41, 0x75, 0x78, 0x12, 0x25, 0x0a, 0x06, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, - 0x18, 0x11, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, - 0x61, 0x72, 0x52, 0x65, 0x66, 0x52, 0x06, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x2c, 0x0a, - 0x07, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x52, 0x07, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x08, 0x49, - 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x52, 0x08, - 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x69, 0x6d, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x69, - 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x47, 0x72, 0x6f, 0x75, - 0x70, 0x42, 0x79, 0x18, 0x13, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x47, 0x72, 0x6f, 0x75, 0x70, - 0x42, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x04, 0x46, 0x69, 0x6c, 0x6c, 0x12, 0x1c, 0x0a, 0x09, 0x46, 0x69, 0x6c, 0x6c, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x46, 0x69, 0x6c, 0x6c, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x18, 0x0a, 0x07, 0x45, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x07, 0x45, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x4c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x15, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x4c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x41, 0x73, 0x63, 0x65, 0x6e, - 0x64, 0x69, 0x6e, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x05, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x4f, 0x66, - 0x66, 0x73, 0x65, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4f, 0x66, 0x66, 0x73, - 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x0e, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x06, 0x53, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x4f, - 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x53, 0x4f, 0x66, - 0x66, 0x73, 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x74, 0x72, 0x69, 0x70, 0x4e, 0x61, 0x6d, - 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x53, 0x74, 0x72, 0x69, 0x70, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x65, 0x64, 0x75, 0x70, 0x65, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x06, 0x44, 0x65, 0x64, 0x75, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x4d, 0x61, - 0x78, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4e, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, - 0x4d, 0x61, 0x78, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4e, 0x12, 0x18, 0x0a, 0x07, 0x4f, 0x72, - 0x64, 0x65, 0x72, 0x65, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x4f, 0x72, 0x64, - 0x65, 0x72, 0x65, 0x64, 0x22, 0x38, 0x0a, 0x0c, 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x12, 0x28, 0x0a, 0x05, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x4d, 0x65, 0x61, 0x73, - 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0xc1, - 0x01, 0x0a, 0x0b, 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1a, - 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x52, 0x65, - 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x52, 0x65, 0x67, 0x65, - 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x1a, - 0x0a, 0x08, 0x49, 0x73, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x08, 0x49, 0x73, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x53, 0x79, - 0x73, 0x74, 0x65, 0x6d, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, - 0x6f, 0x72, 0x22, 0x3e, 0x0a, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x1a, - 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x4f, 0x66, - 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4f, 0x66, 0x66, 0x73, - 0x65, 0x74, 0x22, 0x41, 0x0a, 0x0d, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x53, 0x74, - 0x61, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4e, 0x12, 0x16, 0x0a, - 0x06, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x4e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x50, - 0x6f, 0x69, 0x6e, 0x74, 0x4e, 0x22, 0x2e, 0x0a, 0x06, 0x56, 0x61, 0x72, 0x52, 0x65, 0x66, 0x12, - 0x10, 0x0a, 0x03, 0x56, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x03, 0x56, 0x61, - 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x04, 0x54, 0x79, 0x70, 0x65, 0x42, 0x09, 0x5a, 0x07, 0x2e, 0x3b, 0x71, 0x75, 0x65, 0x72, 0x79, -} - -var ( - file_internal_internal_proto_rawDescOnce sync.Once - file_internal_internal_proto_rawDescData = file_internal_internal_proto_rawDesc -) - -func file_internal_internal_proto_rawDescGZIP() []byte { - file_internal_internal_proto_rawDescOnce.Do(func() { - file_internal_internal_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_internal_proto_rawDescData) - }) - return file_internal_internal_proto_rawDescData -} - -var file_internal_internal_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_internal_internal_proto_goTypes = []interface{}{ - (*Point)(nil), // 0: query.Point - (*Aux)(nil), // 1: query.Aux - (*IteratorOptions)(nil), // 2: query.IteratorOptions - (*Measurements)(nil), // 3: query.Measurements - (*Measurement)(nil), // 4: query.Measurement - (*Interval)(nil), // 5: query.Interval - (*IteratorStats)(nil), // 6: query.IteratorStats - (*VarRef)(nil), // 7: query.VarRef -} -var file_internal_internal_proto_depIdxs = []int32{ - 1, // 0: query.Point.Aux:type_name -> query.Aux - 6, // 1: query.Point.Stats:type_name -> query.IteratorStats - 7, // 2: query.IteratorOptions.Fields:type_name -> query.VarRef - 4, // 3: query.IteratorOptions.Sources:type_name -> query.Measurement - 5, // 4: query.IteratorOptions.Interval:type_name -> query.Interval - 4, // 5: query.Measurements.Items:type_name -> query.Measurement - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name -} - -func init() { file_internal_internal_proto_init() } -func file_internal_internal_proto_init() { - if File_internal_internal_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_internal_internal_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Point); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_internal_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Aux); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_internal_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*IteratorOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_internal_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Measurements); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_internal_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Measurement); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_internal_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Interval); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_internal_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*IteratorStats); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_internal_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VarRef); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_internal_proto_rawDesc, - NumEnums: 0, - NumMessages: 8, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_internal_internal_proto_goTypes, - DependencyIndexes: file_internal_internal_proto_depIdxs, - MessageInfos: file_internal_internal_proto_msgTypes, - }.Build() - File_internal_internal_proto = out.File - file_internal_internal_proto_rawDesc = nil - file_internal_internal_proto_goTypes = nil - file_internal_internal_proto_depIdxs = nil -} diff --git a/influxql/query/internal/internal.proto b/influxql/query/internal/internal.proto deleted file mode 100644 index 0a501550384..00000000000 --- a/influxql/query/internal/internal.proto +++ /dev/null @@ -1,83 +0,0 @@ -syntax = "proto2"; -package query; -option go_package = ".;query"; - -message Point { - required string Name = 1; - required string Tags = 2; - required int64 Time = 3; - required bool Nil = 4; - repeated Aux Aux = 5; - optional uint32 Aggregated = 6; - - optional double FloatValue = 7; - optional int64 IntegerValue = 8; - optional string StringValue = 9; - optional bool BooleanValue = 10; - optional uint64 UnsignedValue = 12; - - optional IteratorStats Stats = 11; - optional bytes Trace = 13; -} - -message Aux { - required int32 DataType = 1; - optional double FloatValue = 2; - optional int64 IntegerValue = 3; - optional string StringValue = 4; - optional bool BooleanValue = 5; - optional uint64 UnsignedValue = 6; -} - -message IteratorOptions { - optional string Expr = 1; - repeated string Aux = 2; - repeated VarRef Fields = 17; - repeated Measurement Sources = 3; - optional Interval Interval = 4; - repeated string Dimensions = 5; - repeated string GroupBy = 19; - optional int32 Fill = 6; - optional double FillValue = 7; - optional string Condition = 8; - optional int64 StartTime = 9; - optional int64 EndTime = 10; - optional string Location = 21; - optional bool Ascending = 11; - optional int64 Limit = 12; - optional int64 Offset = 13; - optional int64 SLimit = 14; - optional int64 SOffset = 15; - optional bool StripName = 22; - optional bool Dedupe = 16; - optional int64 MaxSeriesN = 18; - optional bool Ordered = 20; -} - -message Measurements { - repeated Measurement Items = 1; -} - -message Measurement { - optional string Database = 1; - optional string RetentionPolicy = 2; - optional string Name = 3; - optional string Regex = 4; - optional bool IsTarget = 5; - optional string SystemIterator = 6; -} - -message Interval { - optional int64 Duration = 1; - optional int64 Offset = 2; -} - -message IteratorStats { - optional int64 SeriesN = 1; - optional int64 PointN = 2; -} - -message VarRef { - required string Val = 1; - optional int32 Type = 2; -} diff --git a/influxql/query/iterator.gen.go b/influxql/query/iterator.gen.go deleted file mode 100644 index d3dcafd571d..00000000000 --- a/influxql/query/iterator.gen.go +++ /dev/null @@ -1,13525 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: iterator.gen.go.tmpl - -//lint:file-ignore U1000 this is generated code -package query - -import ( - "container/heap" - "context" - "io" - "sort" - "sync" - "time" - - "github.com/influxdata/influxql" - "google.golang.org/protobuf/proto" -) - -// DefaultStatsInterval is the default value for IteratorEncoder.StatsInterval. -const DefaultStatsInterval = time.Second - -// FloatIterator represents a stream of float points. -type FloatIterator interface { - Iterator - Next() (*FloatPoint, error) -} - -// newFloatIterators converts a slice of Iterator to a slice of FloatIterator. -// Drop and closes any iterator in itrs that is not a FloatIterator and cannot -// be cast to a FloatIterator. -func newFloatIterators(itrs []Iterator) []FloatIterator { - a := make([]FloatIterator, 0, len(itrs)) - for _, itr := range itrs { - switch itr := itr.(type) { - case FloatIterator: - a = append(a, itr) - default: - itr.Close() - } - } - return a -} - -// bufFloatIterator represents a buffered FloatIterator. -type bufFloatIterator struct { - itr FloatIterator - buf *FloatPoint -} - -// newBufFloatIterator returns a buffered FloatIterator. -func newBufFloatIterator(itr FloatIterator) *bufFloatIterator { - return &bufFloatIterator{itr: itr} -} - -// Stats returns statistics from the input iterator. -func (itr *bufFloatIterator) Stats() IteratorStats { return itr.itr.Stats() } - -// Close closes the underlying iterator. -func (itr *bufFloatIterator) Close() error { return itr.itr.Close() } - -// peek returns the next point without removing it from the iterator. -func (itr *bufFloatIterator) peek() (*FloatPoint, error) { - p, err := itr.Next() - if err != nil { - return nil, err - } - itr.unread(p) - return p, nil -} - -// peekTime returns the time of the next point. -// Returns zero time if no more points available. -func (itr *bufFloatIterator) peekTime() (int64, error) { - p, err := itr.peek() - if p == nil || err != nil { - return ZeroTime, err - } - return p.Time, nil -} - -// Next returns the current buffer, if exists, or calls the underlying iterator. -func (itr *bufFloatIterator) Next() (*FloatPoint, error) { - buf := itr.buf - if buf != nil { - itr.buf = nil - return buf, nil - } - return itr.itr.Next() -} - -// NextInWindow returns the next value if it is between [startTime, endTime). -// If the next value is outside the range then it is moved to the buffer. -func (itr *bufFloatIterator) NextInWindow(startTime, endTime int64) (*FloatPoint, error) { - v, err := itr.Next() - if v == nil || err != nil { - return nil, err - } else if t := v.Time; t >= endTime || t < startTime { - itr.unread(v) - return nil, nil - } - return v, nil -} - -// unread sets v to the buffer. It is read on the next call to Next(). -func (itr *bufFloatIterator) unread(v *FloatPoint) { itr.buf = v } - -// floatMergeIterator represents an iterator that combines multiple float iterators. -type floatMergeIterator struct { - inputs []FloatIterator - heap *floatMergeHeap - init bool - - closed bool - mu sync.RWMutex - - // Current iterator and window. - curr *floatMergeHeapItem - window struct { - name string - tags string - startTime int64 - endTime int64 - } -} - -// newFloatMergeIterator returns a new instance of floatMergeIterator. -func newFloatMergeIterator(inputs []FloatIterator, opt IteratorOptions) *floatMergeIterator { - itr := &floatMergeIterator{ - inputs: inputs, - heap: &floatMergeHeap{ - items: make([]*floatMergeHeapItem, 0, len(inputs)), - opt: opt, - }, - } - - // Initialize heap items. - for _, input := range inputs { - // Wrap in buffer, ignore any inputs without anymore points. - bufInput := newBufFloatIterator(input) - - // Append to the heap. - itr.heap.items = append(itr.heap.items, &floatMergeHeapItem{itr: bufInput}) - } - - return itr -} - -// Stats returns an aggregation of stats from the underlying iterators. -func (itr *floatMergeIterator) Stats() IteratorStats { - var stats IteratorStats - for _, input := range itr.inputs { - stats.Add(input.Stats()) - } - return stats -} - -// Close closes the underlying iterators. -func (itr *floatMergeIterator) Close() error { - itr.mu.Lock() - defer itr.mu.Unlock() - - for _, input := range itr.inputs { - input.Close() - } - itr.curr = nil - itr.inputs = nil - itr.heap.items = nil - itr.closed = true - return nil -} - -// Next returns the next point from the iterator. -func (itr *floatMergeIterator) Next() (*FloatPoint, error) { - itr.mu.RLock() - defer itr.mu.RUnlock() - if itr.closed { - return nil, nil - } - - // Initialize the heap. This needs to be done lazily on the first call to this iterator - // so that iterator initialization done through the Select() call returns quickly. - // Queries can only be interrupted after the Select() call completes so any operations - // done during iterator creation cannot be interrupted, which is why we do it here - // instead so an interrupt can happen while initializing the heap. - if !itr.init { - items := itr.heap.items - itr.heap.items = make([]*floatMergeHeapItem, 0, len(items)) - for _, item := range items { - if p, err := item.itr.peek(); err != nil { - return nil, err - } else if p == nil { - continue - } - itr.heap.items = append(itr.heap.items, item) - } - heap.Init(itr.heap) - itr.init = true - } - - for { - // Retrieve the next iterator if we don't have one. - if itr.curr == nil { - if len(itr.heap.items) == 0 { - return nil, nil - } - itr.curr = heap.Pop(itr.heap).(*floatMergeHeapItem) - - // Read point and set current window. - p, err := itr.curr.itr.Next() - if err != nil { - return nil, err - } - tags := p.Tags.Subset(itr.heap.opt.Dimensions) - itr.window.name, itr.window.tags = p.Name, tags.ID() - itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) - return p, nil - } - - // Read the next point from the current iterator. - p, err := itr.curr.itr.Next() - if err != nil { - return nil, err - } - - // If there are no more points then remove iterator from heap and find next. - if p == nil { - itr.curr = nil - continue - } - - // Check if the point is inside of our current window. - inWindow := true - if window := itr.window; window.name != p.Name { - inWindow = false - } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { - inWindow = false - } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { - inWindow = false - } else if !opt.Ascending && p.Time < window.startTime { - inWindow = false - } - - // If it's outside our window then push iterator back on the heap and find new iterator. - if !inWindow { - itr.curr.itr.unread(p) - heap.Push(itr.heap, itr.curr) - itr.curr = nil - continue - } - - return p, nil - } -} - -// floatMergeHeap represents a heap of floatMergeHeapItems. -// Items are sorted by their next window and then by name/tags. -type floatMergeHeap struct { - opt IteratorOptions - items []*floatMergeHeapItem -} - -func (h *floatMergeHeap) Len() int { return len(h.items) } -func (h *floatMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } -func (h *floatMergeHeap) Less(i, j int) bool { - x, err := h.items[i].itr.peek() - if err != nil { - return true - } - y, err := h.items[j].itr.peek() - if err != nil { - return false - } - - if h.opt.Ascending { - if x.Name != y.Name { - return x.Name < y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { - return xTags.ID() < yTags.ID() - } - } else { - if x.Name != y.Name { - return x.Name > y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { - return xTags.ID() > yTags.ID() - } - } - - xt, _ := h.opt.Window(x.Time) - yt, _ := h.opt.Window(y.Time) - - if h.opt.Ascending { - return xt < yt - } - return xt > yt -} - -func (h *floatMergeHeap) Push(x interface{}) { - h.items = append(h.items, x.(*floatMergeHeapItem)) -} - -func (h *floatMergeHeap) Pop() interface{} { - old := h.items - n := len(old) - item := old[n-1] - h.items = old[0 : n-1] - return item -} - -type floatMergeHeapItem struct { - itr *bufFloatIterator -} - -// floatSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. -type floatSortedMergeIterator struct { - inputs []FloatIterator - heap *floatSortedMergeHeap - init bool -} - -// newFloatSortedMergeIterator returns an instance of floatSortedMergeIterator. -func newFloatSortedMergeIterator(inputs []FloatIterator, opt IteratorOptions) Iterator { - itr := &floatSortedMergeIterator{ - inputs: inputs, - heap: &floatSortedMergeHeap{ - items: make([]*floatSortedMergeHeapItem, 0, len(inputs)), - opt: opt, - }, - } - - // Initialize heap items. - for _, input := range inputs { - // Append to the heap. - itr.heap.items = append(itr.heap.items, &floatSortedMergeHeapItem{itr: input}) - } - - return itr -} - -// Stats returns an aggregation of stats from the underlying iterators. -func (itr *floatSortedMergeIterator) Stats() IteratorStats { - var stats IteratorStats - for _, input := range itr.inputs { - stats.Add(input.Stats()) - } - return stats -} - -// Close closes the underlying iterators. -func (itr *floatSortedMergeIterator) Close() error { - for _, input := range itr.inputs { - input.Close() - } - return nil -} - -// Next returns the next points from the iterator. -func (itr *floatSortedMergeIterator) Next() (*FloatPoint, error) { return itr.pop() } - -// pop returns the next point from the heap. -// Reads the next point from item's cursor and puts it back on the heap. -func (itr *floatSortedMergeIterator) pop() (*FloatPoint, error) { - // Initialize the heap. See the MergeIterator to see why this has to be done lazily. - if !itr.init { - items := itr.heap.items - itr.heap.items = make([]*floatSortedMergeHeapItem, 0, len(items)) - for _, item := range items { - var err error - if item.point, err = item.itr.Next(); err != nil { - return nil, err - } else if item.point == nil { - continue - } - itr.heap.items = append(itr.heap.items, item) - } - heap.Init(itr.heap) - itr.init = true - } - - if len(itr.heap.items) == 0 { - return nil, nil - } - - // Read the next item from the heap. - item := heap.Pop(itr.heap).(*floatSortedMergeHeapItem) - if item.err != nil { - return nil, item.err - } else if item.point == nil { - return nil, nil - } - - // Copy the point for return. - p := item.point.Clone() - - // Read the next item from the cursor. Push back to heap if one exists. - if item.point, item.err = item.itr.Next(); item.point != nil { - heap.Push(itr.heap, item) - } - - return p, nil -} - -// floatSortedMergeHeap represents a heap of floatSortedMergeHeapItems. -// Items are sorted with the following priority: -// - By their measurement name; -// - By their tag keys/values; -// - By time; or -// - By their Aux field values. -type floatSortedMergeHeap struct { - opt IteratorOptions - items []*floatSortedMergeHeapItem -} - -func (h *floatSortedMergeHeap) Len() int { return len(h.items) } -func (h *floatSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } -func (h *floatSortedMergeHeap) Less(i, j int) bool { - x, y := h.items[i].point, h.items[j].point - - if h.opt.Ascending { - if x.Name != y.Name { - return x.Name < y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { - return xTags.ID() < yTags.ID() - } - - if x.Time != y.Time { - return x.Time < y.Time - } - - if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { - for i := 0; i < len(x.Aux); i++ { - v1, ok1 := x.Aux[i].(string) - v2, ok2 := y.Aux[i].(string) - if !ok1 || !ok2 { - // Unsupported types used in Aux fields. Maybe they - // need to be added here? - return false - } else if v1 == v2 { - continue - } - return v1 < v2 - } - } - return false // Times and/or Aux fields are equal. - } - - if x.Name != y.Name { - return x.Name > y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { - return xTags.ID() > yTags.ID() - } - - if x.Time != y.Time { - return x.Time > y.Time - } - - if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { - for i := 0; i < len(x.Aux); i++ { - v1, ok1 := x.Aux[i].(string) - v2, ok2 := y.Aux[i].(string) - if !ok1 || !ok2 { - // Unsupported types used in Aux fields. Maybe they - // need to be added here? - return false - } else if v1 == v2 { - continue - } - return v1 > v2 - } - } - return false // Times and/or Aux fields are equal. -} - -func (h *floatSortedMergeHeap) Push(x interface{}) { - h.items = append(h.items, x.(*floatSortedMergeHeapItem)) -} - -func (h *floatSortedMergeHeap) Pop() interface{} { - old := h.items - n := len(old) - item := old[n-1] - h.items = old[0 : n-1] - return item -} - -type floatSortedMergeHeapItem struct { - point *FloatPoint - err error - itr FloatIterator -} - -// floatIteratorScanner scans the results of a FloatIterator into a map. -type floatIteratorScanner struct { - input *bufFloatIterator - err error - keys []influxql.VarRef - defaultValue interface{} -} - -// newFloatIteratorScanner creates a new IteratorScanner. -func newFloatIteratorScanner(input FloatIterator, keys []influxql.VarRef, defaultValue interface{}) *floatIteratorScanner { - return &floatIteratorScanner{ - input: newBufFloatIterator(input), - keys: keys, - defaultValue: defaultValue, - } -} - -func (s *floatIteratorScanner) Peek() (int64, string, Tags) { - if s.err != nil { - return ZeroTime, "", Tags{} - } - - p, err := s.input.peek() - if err != nil { - s.err = err - return ZeroTime, "", Tags{} - } else if p == nil { - return ZeroTime, "", Tags{} - } - return p.Time, p.Name, p.Tags -} - -func (s *floatIteratorScanner) ScanAt(ts int64, name string, tags Tags, m map[string]interface{}) { - if s.err != nil { - return - } - - p, err := s.input.Next() - if err != nil { - s.err = err - return - } else if p == nil { - s.useDefaults(m) - return - } else if p.Time != ts || p.Name != name || !p.Tags.Equals(&tags) { - s.useDefaults(m) - s.input.unread(p) - return - } - - if k := s.keys[0]; k.Val != "" { - if p.Nil { - if s.defaultValue != SkipDefault { - m[k.Val] = castToType(s.defaultValue, k.Type) - } - } else { - m[k.Val] = p.Value - } - } - for i, v := range p.Aux { - k := s.keys[i+1] - switch v.(type) { - case float64, int64, uint64, string, bool: - m[k.Val] = v - default: - // Insert the fill value if one was specified. - if s.defaultValue != SkipDefault { - m[k.Val] = castToType(s.defaultValue, k.Type) - } - } - } -} - -func (s *floatIteratorScanner) useDefaults(m map[string]interface{}) { - if s.defaultValue == SkipDefault { - return - } - for _, k := range s.keys { - if k.Val == "" { - continue - } - m[k.Val] = castToType(s.defaultValue, k.Type) - } -} - -func (s *floatIteratorScanner) Stats() IteratorStats { return s.input.Stats() } -func (s *floatIteratorScanner) Err() error { return s.err } -func (s *floatIteratorScanner) Close() error { return s.input.Close() } - -// floatParallelIterator represents an iterator that pulls data in a separate goroutine. -type floatParallelIterator struct { - input FloatIterator - ch chan floatPointError - - once sync.Once - closing chan struct{} - wg sync.WaitGroup -} - -// newFloatParallelIterator returns a new instance of floatParallelIterator. -func newFloatParallelIterator(input FloatIterator) *floatParallelIterator { - itr := &floatParallelIterator{ - input: input, - ch: make(chan floatPointError, 256), - closing: make(chan struct{}), - } - itr.wg.Add(1) - go itr.monitor() - return itr -} - -// Stats returns stats from the underlying iterator. -func (itr *floatParallelIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the underlying iterators. -func (itr *floatParallelIterator) Close() error { - itr.once.Do(func() { close(itr.closing) }) - itr.wg.Wait() - return itr.input.Close() -} - -// Next returns the next point from the iterator. -func (itr *floatParallelIterator) Next() (*FloatPoint, error) { - v, ok := <-itr.ch - if !ok { - return nil, io.EOF - } - return v.point, v.err -} - -// monitor runs in a separate goroutine and actively pulls the next point. -func (itr *floatParallelIterator) monitor() { - defer close(itr.ch) - defer itr.wg.Done() - - for { - // Read next point. - p, err := itr.input.Next() - if p != nil { - p = p.Clone() - } - - select { - case <-itr.closing: - return - case itr.ch <- floatPointError{point: p, err: err}: - } - } -} - -type floatPointError struct { - point *FloatPoint - err error -} - -// floatLimitIterator represents an iterator that limits points per group. -type floatLimitIterator struct { - input FloatIterator - opt IteratorOptions - n int - - prev struct { - name string - tags Tags - } -} - -// newFloatLimitIterator returns a new instance of floatLimitIterator. -func newFloatLimitIterator(input FloatIterator, opt IteratorOptions) *floatLimitIterator { - return &floatLimitIterator{ - input: input, - opt: opt, - } -} - -// Stats returns stats from the underlying iterator. -func (itr *floatLimitIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the underlying iterators. -func (itr *floatLimitIterator) Close() error { return itr.input.Close() } - -// Next returns the next point from the iterator. -func (itr *floatLimitIterator) Next() (*FloatPoint, error) { - for { - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Reset window and counter if a new window is encountered. - if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { - itr.prev.name = p.Name - itr.prev.tags = p.Tags - itr.n = 0 - } - - // Increment counter. - itr.n++ - - // Read next point if not beyond the offset. - if itr.n <= itr.opt.Offset { - continue - } - - // Read next point if we're beyond the limit. - if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { - continue - } - - return p, nil - } -} - -type floatFillIterator struct { - input *bufFloatIterator - prev FloatPoint - startTime int64 - endTime int64 - auxFields []interface{} - init bool - opt IteratorOptions - - window struct { - name string - tags Tags - time int64 - offset int64 - } -} - -func newFloatFillIterator(input FloatIterator, expr influxql.Expr, opt IteratorOptions) *floatFillIterator { - if opt.Fill == influxql.NullFill { - if expr, ok := expr.(*influxql.Call); ok && expr.Name == "count" { - opt.Fill = influxql.NumberFill - opt.FillValue = float64(0) - } - } - - var startTime, endTime int64 - if opt.Ascending { - startTime, _ = opt.Window(opt.StartTime) - endTime, _ = opt.Window(opt.EndTime) - } else { - startTime, _ = opt.Window(opt.EndTime) - endTime, _ = opt.Window(opt.StartTime) - } - - var auxFields []interface{} - if len(opt.Aux) > 0 { - auxFields = make([]interface{}, len(opt.Aux)) - } - - return &floatFillIterator{ - input: newBufFloatIterator(input), - prev: FloatPoint{Nil: true}, - startTime: startTime, - endTime: endTime, - auxFields: auxFields, - opt: opt, - } -} - -func (itr *floatFillIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *floatFillIterator) Close() error { return itr.input.Close() } - -func (itr *floatFillIterator) Next() (*FloatPoint, error) { - if !itr.init { - p, err := itr.input.peek() - if p == nil || err != nil { - return nil, err - } - itr.window.name, itr.window.tags = p.Name, p.Tags - itr.window.time = itr.startTime - if itr.startTime == influxql.MinTime { - itr.window.time, _ = itr.opt.Window(p.Time) - } - if itr.opt.Location != nil { - _, itr.window.offset = itr.opt.Zone(itr.window.time) - } - itr.init = true - } - - p, err := itr.input.Next() - if err != nil { - return nil, err - } - - // Check if the next point is outside of our window or is nil. - if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { - // If we are inside of an interval, unread the point and continue below to - // constructing a new point. - if itr.opt.Ascending && itr.window.time <= itr.endTime { - itr.input.unread(p) - p = nil - goto CONSTRUCT - } else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { - itr.input.unread(p) - p = nil - goto CONSTRUCT - } - - // We are *not* in a current interval. If there is no next point, - // we are at the end of all intervals. - if p == nil { - return nil, nil - } - - // Set the new interval. - itr.window.name, itr.window.tags = p.Name, p.Tags - itr.window.time = itr.startTime - if itr.window.time == influxql.MinTime { - itr.window.time, _ = itr.opt.Window(p.Time) - } - if itr.opt.Location != nil { - _, itr.window.offset = itr.opt.Zone(itr.window.time) - } - itr.prev = FloatPoint{Nil: true} - } - - // Check if the point is our next expected point. -CONSTRUCT: - if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { - if p != nil { - itr.input.unread(p) - } - - p = &FloatPoint{ - Name: itr.window.name, - Tags: itr.window.tags, - Time: itr.window.time, - Aux: itr.auxFields, - } - - switch itr.opt.Fill { - case influxql.LinearFill: - if !itr.prev.Nil { - next, err := itr.input.peek() - if err != nil { - return nil, err - } else if next != nil && next.Name == itr.window.name && next.Tags.ID() == itr.window.tags.ID() { - interval := int64(itr.opt.Interval.Duration) - start := itr.window.time / interval - p.Value = linearFloat(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value) - } else { - p.Nil = true - } - } else { - p.Nil = true - } - - case influxql.NullFill: - p.Nil = true - case influxql.NumberFill: - p.Value, _ = castToFloat(itr.opt.FillValue) - case influxql.PreviousFill: - if !itr.prev.Nil { - p.Value = itr.prev.Value - p.Nil = itr.prev.Nil - } else { - p.Nil = true - } - } - } else { - itr.prev = *p - } - - // Advance the expected time. Do not advance to a new window here - // as there may be lingering points with the same timestamp in the previous - // window. - if itr.opt.Ascending { - itr.window.time += int64(itr.opt.Interval.Duration) - } else { - itr.window.time -= int64(itr.opt.Interval.Duration) - } - - // Check to see if we have passed over an offset change and adjust the time - // to account for this new offset. - if itr.opt.Location != nil { - if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset { - diff := itr.window.offset - offset - if abs(diff) < int64(itr.opt.Interval.Duration) { - itr.window.time += diff - } - itr.window.offset = offset - } - } - return p, nil -} - -// floatIntervalIterator represents a float implementation of IntervalIterator. -type floatIntervalIterator struct { - input FloatIterator - opt IteratorOptions -} - -func newFloatIntervalIterator(input FloatIterator, opt IteratorOptions) *floatIntervalIterator { - return &floatIntervalIterator{input: input, opt: opt} -} - -func (itr *floatIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *floatIntervalIterator) Close() error { return itr.input.Close() } - -func (itr *floatIntervalIterator) Next() (*FloatPoint, error) { - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - p.Time, _ = itr.opt.Window(p.Time) - // If we see the minimum allowable time, set the time to zero so we don't - // break the default returned time for aggregate queries without times. - if p.Time == influxql.MinTime { - p.Time = 0 - } - return p, nil -} - -// floatInterruptIterator represents a float implementation of InterruptIterator. -type floatInterruptIterator struct { - input FloatIterator - closing <-chan struct{} - count int -} - -func newFloatInterruptIterator(input FloatIterator, closing <-chan struct{}) *floatInterruptIterator { - return &floatInterruptIterator{input: input, closing: closing} -} - -func (itr *floatInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *floatInterruptIterator) Close() error { return itr.input.Close() } - -func (itr *floatInterruptIterator) Next() (*FloatPoint, error) { - // Only check if the channel is closed every N points. This - // intentionally checks on both 0 and N so that if the iterator - // has been interrupted before the first point is emitted it will - // not emit any points. - if itr.count&0xFF == 0xFF { - select { - case <-itr.closing: - return nil, itr.Close() - default: - // Reset iterator count to zero and fall through to emit the next point. - itr.count = 0 - } - } - - // Increment the counter for every point read. - itr.count++ - return itr.input.Next() -} - -// floatCloseInterruptIterator represents a float implementation of CloseInterruptIterator. -type floatCloseInterruptIterator struct { - input FloatIterator - closing <-chan struct{} - done chan struct{} - once sync.Once -} - -func newFloatCloseInterruptIterator(input FloatIterator, closing <-chan struct{}) *floatCloseInterruptIterator { - itr := &floatCloseInterruptIterator{ - input: input, - closing: closing, - done: make(chan struct{}), - } - go itr.monitor() - return itr -} - -func (itr *floatCloseInterruptIterator) monitor() { - select { - case <-itr.closing: - itr.Close() - case <-itr.done: - } -} - -func (itr *floatCloseInterruptIterator) Stats() IteratorStats { - return itr.input.Stats() -} - -func (itr *floatCloseInterruptIterator) Close() error { - itr.once.Do(func() { - close(itr.done) - itr.input.Close() - }) - return nil -} - -func (itr *floatCloseInterruptIterator) Next() (*FloatPoint, error) { - p, err := itr.input.Next() - if err != nil { - // Check if the iterator was closed. - select { - case <-itr.done: - return nil, nil - default: - return nil, err - } - } - return p, nil -} - -// floatReduceFloatIterator executes a reducer for every interval and buffers the result. -type floatReduceFloatIterator struct { - input *bufFloatIterator - create func() (FloatPointAggregator, FloatPointEmitter) - dims []string - opt IteratorOptions - points []FloatPoint - keepTags bool -} - -func newFloatReduceFloatIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, FloatPointEmitter)) *floatReduceFloatIterator { - return &floatReduceFloatIterator{ - input: newBufFloatIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *floatReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *floatReduceFloatIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *floatReduceFloatIterator) Next() (*FloatPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// floatReduceFloatPoint stores the reduced data for a name/tag combination. -type floatReduceFloatPoint struct { - Name string - Tags Tags - Aggregator FloatPointAggregator - Emitter FloatPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *floatReduceFloatIterator) reduce() ([]FloatPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*floatReduceFloatPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &floatReduceFloatPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateFloat(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]FloatPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = floatPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// floatStreamFloatIterator streams inputs into the iterator and emits points gradually. -type floatStreamFloatIterator struct { - input *bufFloatIterator - create func() (FloatPointAggregator, FloatPointEmitter) - dims []string - opt IteratorOptions - m map[string]*floatReduceFloatPoint - points []FloatPoint -} - -// newFloatStreamFloatIterator returns a new instance of floatStreamFloatIterator. -func newFloatStreamFloatIterator(input FloatIterator, createFn func() (FloatPointAggregator, FloatPointEmitter), opt IteratorOptions) *floatStreamFloatIterator { - return &floatStreamFloatIterator{ - input: newBufFloatIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*floatReduceFloatPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *floatStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *floatStreamFloatIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *floatStreamFloatIterator) Next() (*FloatPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *floatStreamFloatIterator) reduce() ([]FloatPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []FloatPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &floatReduceFloatPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateFloat(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// floatReduceIntegerIterator executes a reducer for every interval and buffers the result. -type floatReduceIntegerIterator struct { - input *bufFloatIterator - create func() (FloatPointAggregator, IntegerPointEmitter) - dims []string - opt IteratorOptions - points []IntegerPoint - keepTags bool -} - -func newFloatReduceIntegerIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, IntegerPointEmitter)) *floatReduceIntegerIterator { - return &floatReduceIntegerIterator{ - input: newBufFloatIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *floatReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *floatReduceIntegerIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *floatReduceIntegerIterator) Next() (*IntegerPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// floatReduceIntegerPoint stores the reduced data for a name/tag combination. -type floatReduceIntegerPoint struct { - Name string - Tags Tags - Aggregator FloatPointAggregator - Emitter IntegerPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *floatReduceIntegerIterator) reduce() ([]IntegerPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*floatReduceIntegerPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &floatReduceIntegerPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateFloat(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]IntegerPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = integerPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// floatStreamIntegerIterator streams inputs into the iterator and emits points gradually. -type floatStreamIntegerIterator struct { - input *bufFloatIterator - create func() (FloatPointAggregator, IntegerPointEmitter) - dims []string - opt IteratorOptions - m map[string]*floatReduceIntegerPoint - points []IntegerPoint -} - -// newFloatStreamIntegerIterator returns a new instance of floatStreamIntegerIterator. -func newFloatStreamIntegerIterator(input FloatIterator, createFn func() (FloatPointAggregator, IntegerPointEmitter), opt IteratorOptions) *floatStreamIntegerIterator { - return &floatStreamIntegerIterator{ - input: newBufFloatIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*floatReduceIntegerPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *floatStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *floatStreamIntegerIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *floatStreamIntegerIterator) Next() (*IntegerPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *floatStreamIntegerIterator) reduce() ([]IntegerPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []IntegerPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &floatReduceIntegerPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateFloat(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// floatReduceUnsignedIterator executes a reducer for every interval and buffers the result. -type floatReduceUnsignedIterator struct { - input *bufFloatIterator - create func() (FloatPointAggregator, UnsignedPointEmitter) - dims []string - opt IteratorOptions - points []UnsignedPoint - keepTags bool -} - -func newFloatReduceUnsignedIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, UnsignedPointEmitter)) *floatReduceUnsignedIterator { - return &floatReduceUnsignedIterator{ - input: newBufFloatIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *floatReduceUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *floatReduceUnsignedIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *floatReduceUnsignedIterator) Next() (*UnsignedPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// floatReduceUnsignedPoint stores the reduced data for a name/tag combination. -type floatReduceUnsignedPoint struct { - Name string - Tags Tags - Aggregator FloatPointAggregator - Emitter UnsignedPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *floatReduceUnsignedIterator) reduce() ([]UnsignedPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*floatReduceUnsignedPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &floatReduceUnsignedPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateFloat(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]UnsignedPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = unsignedPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// floatStreamUnsignedIterator streams inputs into the iterator and emits points gradually. -type floatStreamUnsignedIterator struct { - input *bufFloatIterator - create func() (FloatPointAggregator, UnsignedPointEmitter) - dims []string - opt IteratorOptions - m map[string]*floatReduceUnsignedPoint - points []UnsignedPoint -} - -// newFloatStreamUnsignedIterator returns a new instance of floatStreamUnsignedIterator. -func newFloatStreamUnsignedIterator(input FloatIterator, createFn func() (FloatPointAggregator, UnsignedPointEmitter), opt IteratorOptions) *floatStreamUnsignedIterator { - return &floatStreamUnsignedIterator{ - input: newBufFloatIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*floatReduceUnsignedPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *floatStreamUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *floatStreamUnsignedIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *floatStreamUnsignedIterator) Next() (*UnsignedPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *floatStreamUnsignedIterator) reduce() ([]UnsignedPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []UnsignedPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &floatReduceUnsignedPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateFloat(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// floatReduceStringIterator executes a reducer for every interval and buffers the result. -type floatReduceStringIterator struct { - input *bufFloatIterator - create func() (FloatPointAggregator, StringPointEmitter) - dims []string - opt IteratorOptions - points []StringPoint - keepTags bool -} - -func newFloatReduceStringIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, StringPointEmitter)) *floatReduceStringIterator { - return &floatReduceStringIterator{ - input: newBufFloatIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *floatReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *floatReduceStringIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *floatReduceStringIterator) Next() (*StringPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// floatReduceStringPoint stores the reduced data for a name/tag combination. -type floatReduceStringPoint struct { - Name string - Tags Tags - Aggregator FloatPointAggregator - Emitter StringPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *floatReduceStringIterator) reduce() ([]StringPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*floatReduceStringPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &floatReduceStringPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateFloat(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]StringPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = stringPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// floatStreamStringIterator streams inputs into the iterator and emits points gradually. -type floatStreamStringIterator struct { - input *bufFloatIterator - create func() (FloatPointAggregator, StringPointEmitter) - dims []string - opt IteratorOptions - m map[string]*floatReduceStringPoint - points []StringPoint -} - -// newFloatStreamStringIterator returns a new instance of floatStreamStringIterator. -func newFloatStreamStringIterator(input FloatIterator, createFn func() (FloatPointAggregator, StringPointEmitter), opt IteratorOptions) *floatStreamStringIterator { - return &floatStreamStringIterator{ - input: newBufFloatIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*floatReduceStringPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *floatStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *floatStreamStringIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *floatStreamStringIterator) Next() (*StringPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *floatStreamStringIterator) reduce() ([]StringPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []StringPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &floatReduceStringPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateFloat(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// floatReduceBooleanIterator executes a reducer for every interval and buffers the result. -type floatReduceBooleanIterator struct { - input *bufFloatIterator - create func() (FloatPointAggregator, BooleanPointEmitter) - dims []string - opt IteratorOptions - points []BooleanPoint - keepTags bool -} - -func newFloatReduceBooleanIterator(input FloatIterator, opt IteratorOptions, createFn func() (FloatPointAggregator, BooleanPointEmitter)) *floatReduceBooleanIterator { - return &floatReduceBooleanIterator{ - input: newBufFloatIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *floatReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *floatReduceBooleanIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *floatReduceBooleanIterator) Next() (*BooleanPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// floatReduceBooleanPoint stores the reduced data for a name/tag combination. -type floatReduceBooleanPoint struct { - Name string - Tags Tags - Aggregator FloatPointAggregator - Emitter BooleanPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *floatReduceBooleanIterator) reduce() ([]BooleanPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*floatReduceBooleanPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &floatReduceBooleanPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateFloat(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]BooleanPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = booleanPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// floatStreamBooleanIterator streams inputs into the iterator and emits points gradually. -type floatStreamBooleanIterator struct { - input *bufFloatIterator - create func() (FloatPointAggregator, BooleanPointEmitter) - dims []string - opt IteratorOptions - m map[string]*floatReduceBooleanPoint - points []BooleanPoint -} - -// newFloatStreamBooleanIterator returns a new instance of floatStreamBooleanIterator. -func newFloatStreamBooleanIterator(input FloatIterator, createFn func() (FloatPointAggregator, BooleanPointEmitter), opt IteratorOptions) *floatStreamBooleanIterator { - return &floatStreamBooleanIterator{ - input: newBufFloatIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*floatReduceBooleanPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *floatStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *floatStreamBooleanIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *floatStreamBooleanIterator) Next() (*BooleanPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *floatStreamBooleanIterator) reduce() ([]BooleanPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []BooleanPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &floatReduceBooleanPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateFloat(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// floatDedupeIterator only outputs unique points. -// This differs from the DistinctIterator in that it compares all aux fields too. -// This iterator is relatively inefficient and should only be used on small -// datasets such as meta query results. -type floatDedupeIterator struct { - input FloatIterator - m map[string]struct{} // lookup of points already sent -} - -type floatIteratorMapper struct { - cur Cursor - row Row - driver IteratorMap // which iterator to use for the primary value, can be nil - fields []IteratorMap // which iterator to use for an aux field - point FloatPoint -} - -func newFloatIteratorMapper(cur Cursor, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *floatIteratorMapper { - return &floatIteratorMapper{ - cur: cur, - driver: driver, - fields: fields, - point: FloatPoint{ - Aux: make([]interface{}, len(fields)), - }, - } -} - -func (itr *floatIteratorMapper) Next() (*FloatPoint, error) { - if !itr.cur.Scan(&itr.row) { - if err := itr.cur.Err(); err != nil { - return nil, err - } - return nil, nil - } - - itr.point.Time = itr.row.Time - itr.point.Name = itr.row.Series.Name - itr.point.Tags = itr.row.Series.Tags - - if itr.driver != nil { - if v := itr.driver.Value(&itr.row); v != nil { - if v, ok := castToFloat(v); ok { - itr.point.Value = v - itr.point.Nil = false - } else { - itr.point.Value = 0 - itr.point.Nil = true - } - } else { - itr.point.Value = 0 - itr.point.Nil = true - } - } - for i, f := range itr.fields { - itr.point.Aux[i] = f.Value(&itr.row) - } - return &itr.point, nil -} - -func (itr *floatIteratorMapper) Stats() IteratorStats { - return itr.cur.Stats() -} - -func (itr *floatIteratorMapper) Close() error { - return itr.cur.Close() -} - -type floatFilterIterator struct { - input FloatIterator - cond influxql.Expr - opt IteratorOptions - m map[string]interface{} -} - -func newFloatFilterIterator(input FloatIterator, cond influxql.Expr, opt IteratorOptions) FloatIterator { - // Strip out time conditions from the WHERE clause. - // TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct. - n := influxql.RewriteFunc(influxql.CloneExpr(cond), func(n influxql.Node) influxql.Node { - switch n := n.(type) { - case *influxql.BinaryExpr: - if n.LHS.String() == "time" { - return &influxql.BooleanLiteral{Val: true} - } - } - return n - }) - - cond, _ = n.(influxql.Expr) - if cond == nil { - return input - } else if n, ok := cond.(*influxql.BooleanLiteral); ok && n.Val { - return input - } - - return &floatFilterIterator{ - input: input, - cond: cond, - opt: opt, - m: make(map[string]interface{}), - } -} - -func (itr *floatFilterIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *floatFilterIterator) Close() error { return itr.input.Close() } - -func (itr *floatFilterIterator) Next() (*FloatPoint, error) { - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } - - for i, ref := range itr.opt.Aux { - itr.m[ref.Val] = p.Aux[i] - } - for k, v := range p.Tags.KeyValues() { - itr.m[k] = v - } - - if !influxql.EvalBool(itr.cond, itr.m) { - continue - } - return p, nil - } -} - -type floatTagSubsetIterator struct { - input FloatIterator - point FloatPoint - lastTags Tags - dimensions []string -} - -func newFloatTagSubsetIterator(input FloatIterator, opt IteratorOptions) *floatTagSubsetIterator { - return &floatTagSubsetIterator{ - input: input, - dimensions: opt.GetDimensions(), - } -} - -func (itr *floatTagSubsetIterator) Next() (*FloatPoint, error) { - p, err := itr.input.Next() - if err != nil { - return nil, err - } else if p == nil { - return nil, nil - } - - itr.point.Name = p.Name - if !p.Tags.Equal(itr.lastTags) { - itr.point.Tags = p.Tags.Subset(itr.dimensions) - itr.lastTags = p.Tags - } - itr.point.Time = p.Time - itr.point.Value = p.Value - itr.point.Aux = p.Aux - itr.point.Aggregated = p.Aggregated - itr.point.Nil = p.Nil - return &itr.point, nil -} - -func (itr *floatTagSubsetIterator) Stats() IteratorStats { - return itr.input.Stats() -} - -func (itr *floatTagSubsetIterator) Close() error { - return itr.input.Close() -} - -// newFloatDedupeIterator returns a new instance of floatDedupeIterator. -func newFloatDedupeIterator(input FloatIterator) *floatDedupeIterator { - return &floatDedupeIterator{ - input: input, - m: make(map[string]struct{}), - } -} - -// Stats returns stats from the input iterator. -func (itr *floatDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *floatDedupeIterator) Close() error { return itr.input.Close() } - -// Next returns the next unique point from the input iterator. -func (itr *floatDedupeIterator) Next() (*FloatPoint, error) { - for { - // Read next point. - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Serialize to bytes to store in lookup. - buf, err := proto.Marshal(encodeFloatPoint(p)) - if err != nil { - return nil, err - } - - // If the point has already been output then move to the next point. - if _, ok := itr.m[string(buf)]; ok { - continue - } - - // Otherwise mark it as emitted and return point. - itr.m[string(buf)] = struct{}{} - return p, nil - } -} - -// floatReaderIterator represents an iterator that streams from a reader. -type floatReaderIterator struct { - r io.Reader - dec *FloatPointDecoder -} - -// newFloatReaderIterator returns a new instance of floatReaderIterator. -func newFloatReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *floatReaderIterator { - dec := NewFloatPointDecoder(ctx, r) - dec.stats = stats - - return &floatReaderIterator{ - r: r, - dec: dec, - } -} - -// Stats returns stats about points processed. -func (itr *floatReaderIterator) Stats() IteratorStats { return itr.dec.stats } - -// Close closes the underlying reader, if applicable. -func (itr *floatReaderIterator) Close() error { - if r, ok := itr.r.(io.ReadCloser); ok { - return r.Close() - } - return nil -} - -// Next returns the next point from the iterator. -func (itr *floatReaderIterator) Next() (*FloatPoint, error) { - // OPTIMIZE(benbjohnson): Reuse point on iterator. - - // Unmarshal next point. - p := &FloatPoint{} - if err := itr.dec.DecodeFloatPoint(p); err == io.EOF { - return nil, nil - } else if err != nil { - return nil, err - } - return p, nil -} - -// IntegerIterator represents a stream of integer points. -type IntegerIterator interface { - Iterator - Next() (*IntegerPoint, error) -} - -// newIntegerIterators converts a slice of Iterator to a slice of IntegerIterator. -// Drop and closes any iterator in itrs that is not a IntegerIterator and cannot -// be cast to a IntegerIterator. -func newIntegerIterators(itrs []Iterator) []IntegerIterator { - a := make([]IntegerIterator, 0, len(itrs)) - for _, itr := range itrs { - switch itr := itr.(type) { - case IntegerIterator: - a = append(a, itr) - default: - itr.Close() - } - } - return a -} - -// bufIntegerIterator represents a buffered IntegerIterator. -type bufIntegerIterator struct { - itr IntegerIterator - buf *IntegerPoint -} - -// newBufIntegerIterator returns a buffered IntegerIterator. -func newBufIntegerIterator(itr IntegerIterator) *bufIntegerIterator { - return &bufIntegerIterator{itr: itr} -} - -// Stats returns statistics from the input iterator. -func (itr *bufIntegerIterator) Stats() IteratorStats { return itr.itr.Stats() } - -// Close closes the underlying iterator. -func (itr *bufIntegerIterator) Close() error { return itr.itr.Close() } - -// peek returns the next point without removing it from the iterator. -func (itr *bufIntegerIterator) peek() (*IntegerPoint, error) { - p, err := itr.Next() - if err != nil { - return nil, err - } - itr.unread(p) - return p, nil -} - -// peekTime returns the time of the next point. -// Returns zero time if no more points available. -func (itr *bufIntegerIterator) peekTime() (int64, error) { - p, err := itr.peek() - if p == nil || err != nil { - return ZeroTime, err - } - return p.Time, nil -} - -// Next returns the current buffer, if exists, or calls the underlying iterator. -func (itr *bufIntegerIterator) Next() (*IntegerPoint, error) { - buf := itr.buf - if buf != nil { - itr.buf = nil - return buf, nil - } - return itr.itr.Next() -} - -// NextInWindow returns the next value if it is between [startTime, endTime). -// If the next value is outside the range then it is moved to the buffer. -func (itr *bufIntegerIterator) NextInWindow(startTime, endTime int64) (*IntegerPoint, error) { - v, err := itr.Next() - if v == nil || err != nil { - return nil, err - } else if t := v.Time; t >= endTime || t < startTime { - itr.unread(v) - return nil, nil - } - return v, nil -} - -// unread sets v to the buffer. It is read on the next call to Next(). -func (itr *bufIntegerIterator) unread(v *IntegerPoint) { itr.buf = v } - -// integerMergeIterator represents an iterator that combines multiple integer iterators. -type integerMergeIterator struct { - inputs []IntegerIterator - heap *integerMergeHeap - init bool - - closed bool - mu sync.RWMutex - - // Current iterator and window. - curr *integerMergeHeapItem - window struct { - name string - tags string - startTime int64 - endTime int64 - } -} - -// newIntegerMergeIterator returns a new instance of integerMergeIterator. -func newIntegerMergeIterator(inputs []IntegerIterator, opt IteratorOptions) *integerMergeIterator { - itr := &integerMergeIterator{ - inputs: inputs, - heap: &integerMergeHeap{ - items: make([]*integerMergeHeapItem, 0, len(inputs)), - opt: opt, - }, - } - - // Initialize heap items. - for _, input := range inputs { - // Wrap in buffer, ignore any inputs without anymore points. - bufInput := newBufIntegerIterator(input) - - // Append to the heap. - itr.heap.items = append(itr.heap.items, &integerMergeHeapItem{itr: bufInput}) - } - - return itr -} - -// Stats returns an aggregation of stats from the underlying iterators. -func (itr *integerMergeIterator) Stats() IteratorStats { - var stats IteratorStats - for _, input := range itr.inputs { - stats.Add(input.Stats()) - } - return stats -} - -// Close closes the underlying iterators. -func (itr *integerMergeIterator) Close() error { - itr.mu.Lock() - defer itr.mu.Unlock() - - for _, input := range itr.inputs { - input.Close() - } - itr.curr = nil - itr.inputs = nil - itr.heap.items = nil - itr.closed = true - return nil -} - -// Next returns the next point from the iterator. -func (itr *integerMergeIterator) Next() (*IntegerPoint, error) { - itr.mu.RLock() - defer itr.mu.RUnlock() - if itr.closed { - return nil, nil - } - - // Initialize the heap. This needs to be done lazily on the first call to this iterator - // so that iterator initialization done through the Select() call returns quickly. - // Queries can only be interrupted after the Select() call completes so any operations - // done during iterator creation cannot be interrupted, which is why we do it here - // instead so an interrupt can happen while initializing the heap. - if !itr.init { - items := itr.heap.items - itr.heap.items = make([]*integerMergeHeapItem, 0, len(items)) - for _, item := range items { - if p, err := item.itr.peek(); err != nil { - return nil, err - } else if p == nil { - continue - } - itr.heap.items = append(itr.heap.items, item) - } - heap.Init(itr.heap) - itr.init = true - } - - for { - // Retrieve the next iterator if we don't have one. - if itr.curr == nil { - if len(itr.heap.items) == 0 { - return nil, nil - } - itr.curr = heap.Pop(itr.heap).(*integerMergeHeapItem) - - // Read point and set current window. - p, err := itr.curr.itr.Next() - if err != nil { - return nil, err - } - tags := p.Tags.Subset(itr.heap.opt.Dimensions) - itr.window.name, itr.window.tags = p.Name, tags.ID() - itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) - return p, nil - } - - // Read the next point from the current iterator. - p, err := itr.curr.itr.Next() - if err != nil { - return nil, err - } - - // If there are no more points then remove iterator from heap and find next. - if p == nil { - itr.curr = nil - continue - } - - // Check if the point is inside of our current window. - inWindow := true - if window := itr.window; window.name != p.Name { - inWindow = false - } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { - inWindow = false - } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { - inWindow = false - } else if !opt.Ascending && p.Time < window.startTime { - inWindow = false - } - - // If it's outside our window then push iterator back on the heap and find new iterator. - if !inWindow { - itr.curr.itr.unread(p) - heap.Push(itr.heap, itr.curr) - itr.curr = nil - continue - } - - return p, nil - } -} - -// integerMergeHeap represents a heap of integerMergeHeapItems. -// Items are sorted by their next window and then by name/tags. -type integerMergeHeap struct { - opt IteratorOptions - items []*integerMergeHeapItem -} - -func (h *integerMergeHeap) Len() int { return len(h.items) } -func (h *integerMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } -func (h *integerMergeHeap) Less(i, j int) bool { - x, err := h.items[i].itr.peek() - if err != nil { - return true - } - y, err := h.items[j].itr.peek() - if err != nil { - return false - } - - if h.opt.Ascending { - if x.Name != y.Name { - return x.Name < y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { - return xTags.ID() < yTags.ID() - } - } else { - if x.Name != y.Name { - return x.Name > y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { - return xTags.ID() > yTags.ID() - } - } - - xt, _ := h.opt.Window(x.Time) - yt, _ := h.opt.Window(y.Time) - - if h.opt.Ascending { - return xt < yt - } - return xt > yt -} - -func (h *integerMergeHeap) Push(x interface{}) { - h.items = append(h.items, x.(*integerMergeHeapItem)) -} - -func (h *integerMergeHeap) Pop() interface{} { - old := h.items - n := len(old) - item := old[n-1] - h.items = old[0 : n-1] - return item -} - -type integerMergeHeapItem struct { - itr *bufIntegerIterator -} - -// integerSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. -type integerSortedMergeIterator struct { - inputs []IntegerIterator - heap *integerSortedMergeHeap - init bool -} - -// newIntegerSortedMergeIterator returns an instance of integerSortedMergeIterator. -func newIntegerSortedMergeIterator(inputs []IntegerIterator, opt IteratorOptions) Iterator { - itr := &integerSortedMergeIterator{ - inputs: inputs, - heap: &integerSortedMergeHeap{ - items: make([]*integerSortedMergeHeapItem, 0, len(inputs)), - opt: opt, - }, - } - - // Initialize heap items. - for _, input := range inputs { - // Append to the heap. - itr.heap.items = append(itr.heap.items, &integerSortedMergeHeapItem{itr: input}) - } - - return itr -} - -// Stats returns an aggregation of stats from the underlying iterators. -func (itr *integerSortedMergeIterator) Stats() IteratorStats { - var stats IteratorStats - for _, input := range itr.inputs { - stats.Add(input.Stats()) - } - return stats -} - -// Close closes the underlying iterators. -func (itr *integerSortedMergeIterator) Close() error { - for _, input := range itr.inputs { - input.Close() - } - return nil -} - -// Next returns the next points from the iterator. -func (itr *integerSortedMergeIterator) Next() (*IntegerPoint, error) { return itr.pop() } - -// pop returns the next point from the heap. -// Reads the next point from item's cursor and puts it back on the heap. -func (itr *integerSortedMergeIterator) pop() (*IntegerPoint, error) { - // Initialize the heap. See the MergeIterator to see why this has to be done lazily. - if !itr.init { - items := itr.heap.items - itr.heap.items = make([]*integerSortedMergeHeapItem, 0, len(items)) - for _, item := range items { - var err error - if item.point, err = item.itr.Next(); err != nil { - return nil, err - } else if item.point == nil { - continue - } - itr.heap.items = append(itr.heap.items, item) - } - heap.Init(itr.heap) - itr.init = true - } - - if len(itr.heap.items) == 0 { - return nil, nil - } - - // Read the next item from the heap. - item := heap.Pop(itr.heap).(*integerSortedMergeHeapItem) - if item.err != nil { - return nil, item.err - } else if item.point == nil { - return nil, nil - } - - // Copy the point for return. - p := item.point.Clone() - - // Read the next item from the cursor. Push back to heap if one exists. - if item.point, item.err = item.itr.Next(); item.point != nil { - heap.Push(itr.heap, item) - } - - return p, nil -} - -// integerSortedMergeHeap represents a heap of integerSortedMergeHeapItems. -// Items are sorted with the following priority: -// - By their measurement name; -// - By their tag keys/values; -// - By time; or -// - By their Aux field values. -type integerSortedMergeHeap struct { - opt IteratorOptions - items []*integerSortedMergeHeapItem -} - -func (h *integerSortedMergeHeap) Len() int { return len(h.items) } -func (h *integerSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } -func (h *integerSortedMergeHeap) Less(i, j int) bool { - x, y := h.items[i].point, h.items[j].point - - if h.opt.Ascending { - if x.Name != y.Name { - return x.Name < y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { - return xTags.ID() < yTags.ID() - } - - if x.Time != y.Time { - return x.Time < y.Time - } - - if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { - for i := 0; i < len(x.Aux); i++ { - v1, ok1 := x.Aux[i].(string) - v2, ok2 := y.Aux[i].(string) - if !ok1 || !ok2 { - // Unsupported types used in Aux fields. Maybe they - // need to be added here? - return false - } else if v1 == v2 { - continue - } - return v1 < v2 - } - } - return false // Times and/or Aux fields are equal. - } - - if x.Name != y.Name { - return x.Name > y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { - return xTags.ID() > yTags.ID() - } - - if x.Time != y.Time { - return x.Time > y.Time - } - - if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { - for i := 0; i < len(x.Aux); i++ { - v1, ok1 := x.Aux[i].(string) - v2, ok2 := y.Aux[i].(string) - if !ok1 || !ok2 { - // Unsupported types used in Aux fields. Maybe they - // need to be added here? - return false - } else if v1 == v2 { - continue - } - return v1 > v2 - } - } - return false // Times and/or Aux fields are equal. -} - -func (h *integerSortedMergeHeap) Push(x interface{}) { - h.items = append(h.items, x.(*integerSortedMergeHeapItem)) -} - -func (h *integerSortedMergeHeap) Pop() interface{} { - old := h.items - n := len(old) - item := old[n-1] - h.items = old[0 : n-1] - return item -} - -type integerSortedMergeHeapItem struct { - point *IntegerPoint - err error - itr IntegerIterator -} - -// integerIteratorScanner scans the results of a IntegerIterator into a map. -type integerIteratorScanner struct { - input *bufIntegerIterator - err error - keys []influxql.VarRef - defaultValue interface{} -} - -// newIntegerIteratorScanner creates a new IteratorScanner. -func newIntegerIteratorScanner(input IntegerIterator, keys []influxql.VarRef, defaultValue interface{}) *integerIteratorScanner { - return &integerIteratorScanner{ - input: newBufIntegerIterator(input), - keys: keys, - defaultValue: defaultValue, - } -} - -func (s *integerIteratorScanner) Peek() (int64, string, Tags) { - if s.err != nil { - return ZeroTime, "", Tags{} - } - - p, err := s.input.peek() - if err != nil { - s.err = err - return ZeroTime, "", Tags{} - } else if p == nil { - return ZeroTime, "", Tags{} - } - return p.Time, p.Name, p.Tags -} - -func (s *integerIteratorScanner) ScanAt(ts int64, name string, tags Tags, m map[string]interface{}) { - if s.err != nil { - return - } - - p, err := s.input.Next() - if err != nil { - s.err = err - return - } else if p == nil { - s.useDefaults(m) - return - } else if p.Time != ts || p.Name != name || !p.Tags.Equals(&tags) { - s.useDefaults(m) - s.input.unread(p) - return - } - - if k := s.keys[0]; k.Val != "" { - if p.Nil { - if s.defaultValue != SkipDefault { - m[k.Val] = castToType(s.defaultValue, k.Type) - } - } else { - m[k.Val] = p.Value - } - } - for i, v := range p.Aux { - k := s.keys[i+1] - switch v.(type) { - case float64, int64, uint64, string, bool: - m[k.Val] = v - default: - // Insert the fill value if one was specified. - if s.defaultValue != SkipDefault { - m[k.Val] = castToType(s.defaultValue, k.Type) - } - } - } -} - -func (s *integerIteratorScanner) useDefaults(m map[string]interface{}) { - if s.defaultValue == SkipDefault { - return - } - for _, k := range s.keys { - if k.Val == "" { - continue - } - m[k.Val] = castToType(s.defaultValue, k.Type) - } -} - -func (s *integerIteratorScanner) Stats() IteratorStats { return s.input.Stats() } -func (s *integerIteratorScanner) Err() error { return s.err } -func (s *integerIteratorScanner) Close() error { return s.input.Close() } - -// integerParallelIterator represents an iterator that pulls data in a separate goroutine. -type integerParallelIterator struct { - input IntegerIterator - ch chan integerPointError - - once sync.Once - closing chan struct{} - wg sync.WaitGroup -} - -// newIntegerParallelIterator returns a new instance of integerParallelIterator. -func newIntegerParallelIterator(input IntegerIterator) *integerParallelIterator { - itr := &integerParallelIterator{ - input: input, - ch: make(chan integerPointError, 256), - closing: make(chan struct{}), - } - itr.wg.Add(1) - go itr.monitor() - return itr -} - -// Stats returns stats from the underlying iterator. -func (itr *integerParallelIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the underlying iterators. -func (itr *integerParallelIterator) Close() error { - itr.once.Do(func() { close(itr.closing) }) - itr.wg.Wait() - return itr.input.Close() -} - -// Next returns the next point from the iterator. -func (itr *integerParallelIterator) Next() (*IntegerPoint, error) { - v, ok := <-itr.ch - if !ok { - return nil, io.EOF - } - return v.point, v.err -} - -// monitor runs in a separate goroutine and actively pulls the next point. -func (itr *integerParallelIterator) monitor() { - defer close(itr.ch) - defer itr.wg.Done() - - for { - // Read next point. - p, err := itr.input.Next() - if p != nil { - p = p.Clone() - } - - select { - case <-itr.closing: - return - case itr.ch <- integerPointError{point: p, err: err}: - } - } -} - -type integerPointError struct { - point *IntegerPoint - err error -} - -// integerLimitIterator represents an iterator that limits points per group. -type integerLimitIterator struct { - input IntegerIterator - opt IteratorOptions - n int - - prev struct { - name string - tags Tags - } -} - -// newIntegerLimitIterator returns a new instance of integerLimitIterator. -func newIntegerLimitIterator(input IntegerIterator, opt IteratorOptions) *integerLimitIterator { - return &integerLimitIterator{ - input: input, - opt: opt, - } -} - -// Stats returns stats from the underlying iterator. -func (itr *integerLimitIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the underlying iterators. -func (itr *integerLimitIterator) Close() error { return itr.input.Close() } - -// Next returns the next point from the iterator. -func (itr *integerLimitIterator) Next() (*IntegerPoint, error) { - for { - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Reset window and counter if a new window is encountered. - if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { - itr.prev.name = p.Name - itr.prev.tags = p.Tags - itr.n = 0 - } - - // Increment counter. - itr.n++ - - // Read next point if not beyond the offset. - if itr.n <= itr.opt.Offset { - continue - } - - // Read next point if we're beyond the limit. - if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { - continue - } - - return p, nil - } -} - -type integerFillIterator struct { - input *bufIntegerIterator - prev IntegerPoint - startTime int64 - endTime int64 - auxFields []interface{} - init bool - opt IteratorOptions - - window struct { - name string - tags Tags - time int64 - offset int64 - } -} - -func newIntegerFillIterator(input IntegerIterator, expr influxql.Expr, opt IteratorOptions) *integerFillIterator { - if opt.Fill == influxql.NullFill { - if expr, ok := expr.(*influxql.Call); ok && expr.Name == "count" { - opt.Fill = influxql.NumberFill - opt.FillValue = int64(0) - } - } - - var startTime, endTime int64 - if opt.Ascending { - startTime, _ = opt.Window(opt.StartTime) - endTime, _ = opt.Window(opt.EndTime) - } else { - startTime, _ = opt.Window(opt.EndTime) - endTime, _ = opt.Window(opt.StartTime) - } - - var auxFields []interface{} - if len(opt.Aux) > 0 { - auxFields = make([]interface{}, len(opt.Aux)) - } - - return &integerFillIterator{ - input: newBufIntegerIterator(input), - prev: IntegerPoint{Nil: true}, - startTime: startTime, - endTime: endTime, - auxFields: auxFields, - opt: opt, - } -} - -func (itr *integerFillIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *integerFillIterator) Close() error { return itr.input.Close() } - -func (itr *integerFillIterator) Next() (*IntegerPoint, error) { - if !itr.init { - p, err := itr.input.peek() - if p == nil || err != nil { - return nil, err - } - itr.window.name, itr.window.tags = p.Name, p.Tags - itr.window.time = itr.startTime - if itr.startTime == influxql.MinTime { - itr.window.time, _ = itr.opt.Window(p.Time) - } - if itr.opt.Location != nil { - _, itr.window.offset = itr.opt.Zone(itr.window.time) - } - itr.init = true - } - - p, err := itr.input.Next() - if err != nil { - return nil, err - } - - // Check if the next point is outside of our window or is nil. - if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { - // If we are inside of an interval, unread the point and continue below to - // constructing a new point. - if itr.opt.Ascending && itr.window.time <= itr.endTime { - itr.input.unread(p) - p = nil - goto CONSTRUCT - } else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { - itr.input.unread(p) - p = nil - goto CONSTRUCT - } - - // We are *not* in a current interval. If there is no next point, - // we are at the end of all intervals. - if p == nil { - return nil, nil - } - - // Set the new interval. - itr.window.name, itr.window.tags = p.Name, p.Tags - itr.window.time = itr.startTime - if itr.window.time == influxql.MinTime { - itr.window.time, _ = itr.opt.Window(p.Time) - } - if itr.opt.Location != nil { - _, itr.window.offset = itr.opt.Zone(itr.window.time) - } - itr.prev = IntegerPoint{Nil: true} - } - - // Check if the point is our next expected point. -CONSTRUCT: - if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { - if p != nil { - itr.input.unread(p) - } - - p = &IntegerPoint{ - Name: itr.window.name, - Tags: itr.window.tags, - Time: itr.window.time, - Aux: itr.auxFields, - } - - switch itr.opt.Fill { - case influxql.LinearFill: - if !itr.prev.Nil { - next, err := itr.input.peek() - if err != nil { - return nil, err - } else if next != nil && next.Name == itr.window.name && next.Tags.ID() == itr.window.tags.ID() { - interval := int64(itr.opt.Interval.Duration) - start := itr.window.time / interval - p.Value = linearInteger(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value) - } else { - p.Nil = true - } - } else { - p.Nil = true - } - - case influxql.NullFill: - p.Nil = true - case influxql.NumberFill: - p.Value, _ = castToInteger(itr.opt.FillValue) - case influxql.PreviousFill: - if !itr.prev.Nil { - p.Value = itr.prev.Value - p.Nil = itr.prev.Nil - } else { - p.Nil = true - } - } - } else { - itr.prev = *p - } - - // Advance the expected time. Do not advance to a new window here - // as there may be lingering points with the same timestamp in the previous - // window. - if itr.opt.Ascending { - itr.window.time += int64(itr.opt.Interval.Duration) - } else { - itr.window.time -= int64(itr.opt.Interval.Duration) - } - - // Check to see if we have passed over an offset change and adjust the time - // to account for this new offset. - if itr.opt.Location != nil { - if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset { - diff := itr.window.offset - offset - if abs(diff) < int64(itr.opt.Interval.Duration) { - itr.window.time += diff - } - itr.window.offset = offset - } - } - return p, nil -} - -// integerIntervalIterator represents a integer implementation of IntervalIterator. -type integerIntervalIterator struct { - input IntegerIterator - opt IteratorOptions -} - -func newIntegerIntervalIterator(input IntegerIterator, opt IteratorOptions) *integerIntervalIterator { - return &integerIntervalIterator{input: input, opt: opt} -} - -func (itr *integerIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *integerIntervalIterator) Close() error { return itr.input.Close() } - -func (itr *integerIntervalIterator) Next() (*IntegerPoint, error) { - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - p.Time, _ = itr.opt.Window(p.Time) - // If we see the minimum allowable time, set the time to zero so we don't - // break the default returned time for aggregate queries without times. - if p.Time == influxql.MinTime { - p.Time = 0 - } - return p, nil -} - -// integerInterruptIterator represents a integer implementation of InterruptIterator. -type integerInterruptIterator struct { - input IntegerIterator - closing <-chan struct{} - count int -} - -func newIntegerInterruptIterator(input IntegerIterator, closing <-chan struct{}) *integerInterruptIterator { - return &integerInterruptIterator{input: input, closing: closing} -} - -func (itr *integerInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *integerInterruptIterator) Close() error { return itr.input.Close() } - -func (itr *integerInterruptIterator) Next() (*IntegerPoint, error) { - // Only check if the channel is closed every N points. This - // intentionally checks on both 0 and N so that if the iterator - // has been interrupted before the first point is emitted it will - // not emit any points. - if itr.count&0xFF == 0xFF { - select { - case <-itr.closing: - return nil, itr.Close() - default: - // Reset iterator count to zero and fall through to emit the next point. - itr.count = 0 - } - } - - // Increment the counter for every point read. - itr.count++ - return itr.input.Next() -} - -// integerCloseInterruptIterator represents a integer implementation of CloseInterruptIterator. -type integerCloseInterruptIterator struct { - input IntegerIterator - closing <-chan struct{} - done chan struct{} - once sync.Once -} - -func newIntegerCloseInterruptIterator(input IntegerIterator, closing <-chan struct{}) *integerCloseInterruptIterator { - itr := &integerCloseInterruptIterator{ - input: input, - closing: closing, - done: make(chan struct{}), - } - go itr.monitor() - return itr -} - -func (itr *integerCloseInterruptIterator) monitor() { - select { - case <-itr.closing: - itr.Close() - case <-itr.done: - } -} - -func (itr *integerCloseInterruptIterator) Stats() IteratorStats { - return itr.input.Stats() -} - -func (itr *integerCloseInterruptIterator) Close() error { - itr.once.Do(func() { - close(itr.done) - itr.input.Close() - }) - return nil -} - -func (itr *integerCloseInterruptIterator) Next() (*IntegerPoint, error) { - p, err := itr.input.Next() - if err != nil { - // Check if the iterator was closed. - select { - case <-itr.done: - return nil, nil - default: - return nil, err - } - } - return p, nil -} - -// integerReduceFloatIterator executes a reducer for every interval and buffers the result. -type integerReduceFloatIterator struct { - input *bufIntegerIterator - create func() (IntegerPointAggregator, FloatPointEmitter) - dims []string - opt IteratorOptions - points []FloatPoint - keepTags bool -} - -func newIntegerReduceFloatIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, FloatPointEmitter)) *integerReduceFloatIterator { - return &integerReduceFloatIterator{ - input: newBufIntegerIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *integerReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *integerReduceFloatIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *integerReduceFloatIterator) Next() (*FloatPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// integerReduceFloatPoint stores the reduced data for a name/tag combination. -type integerReduceFloatPoint struct { - Name string - Tags Tags - Aggregator IntegerPointAggregator - Emitter FloatPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *integerReduceFloatIterator) reduce() ([]FloatPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*integerReduceFloatPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &integerReduceFloatPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateInteger(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]FloatPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = floatPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// integerStreamFloatIterator streams inputs into the iterator and emits points gradually. -type integerStreamFloatIterator struct { - input *bufIntegerIterator - create func() (IntegerPointAggregator, FloatPointEmitter) - dims []string - opt IteratorOptions - m map[string]*integerReduceFloatPoint - points []FloatPoint -} - -// newIntegerStreamFloatIterator returns a new instance of integerStreamFloatIterator. -func newIntegerStreamFloatIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, FloatPointEmitter), opt IteratorOptions) *integerStreamFloatIterator { - return &integerStreamFloatIterator{ - input: newBufIntegerIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*integerReduceFloatPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *integerStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *integerStreamFloatIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *integerStreamFloatIterator) Next() (*FloatPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *integerStreamFloatIterator) reduce() ([]FloatPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []FloatPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &integerReduceFloatPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateInteger(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// integerReduceIntegerIterator executes a reducer for every interval and buffers the result. -type integerReduceIntegerIterator struct { - input *bufIntegerIterator - create func() (IntegerPointAggregator, IntegerPointEmitter) - dims []string - opt IteratorOptions - points []IntegerPoint - keepTags bool -} - -func newIntegerReduceIntegerIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, IntegerPointEmitter)) *integerReduceIntegerIterator { - return &integerReduceIntegerIterator{ - input: newBufIntegerIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *integerReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *integerReduceIntegerIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *integerReduceIntegerIterator) Next() (*IntegerPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// integerReduceIntegerPoint stores the reduced data for a name/tag combination. -type integerReduceIntegerPoint struct { - Name string - Tags Tags - Aggregator IntegerPointAggregator - Emitter IntegerPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *integerReduceIntegerIterator) reduce() ([]IntegerPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*integerReduceIntegerPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &integerReduceIntegerPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateInteger(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]IntegerPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = integerPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// integerStreamIntegerIterator streams inputs into the iterator and emits points gradually. -type integerStreamIntegerIterator struct { - input *bufIntegerIterator - create func() (IntegerPointAggregator, IntegerPointEmitter) - dims []string - opt IteratorOptions - m map[string]*integerReduceIntegerPoint - points []IntegerPoint -} - -// newIntegerStreamIntegerIterator returns a new instance of integerStreamIntegerIterator. -func newIntegerStreamIntegerIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, IntegerPointEmitter), opt IteratorOptions) *integerStreamIntegerIterator { - return &integerStreamIntegerIterator{ - input: newBufIntegerIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*integerReduceIntegerPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *integerStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *integerStreamIntegerIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *integerStreamIntegerIterator) Next() (*IntegerPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *integerStreamIntegerIterator) reduce() ([]IntegerPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []IntegerPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &integerReduceIntegerPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateInteger(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// integerReduceUnsignedIterator executes a reducer for every interval and buffers the result. -type integerReduceUnsignedIterator struct { - input *bufIntegerIterator - create func() (IntegerPointAggregator, UnsignedPointEmitter) - dims []string - opt IteratorOptions - points []UnsignedPoint - keepTags bool -} - -func newIntegerReduceUnsignedIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, UnsignedPointEmitter)) *integerReduceUnsignedIterator { - return &integerReduceUnsignedIterator{ - input: newBufIntegerIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *integerReduceUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *integerReduceUnsignedIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *integerReduceUnsignedIterator) Next() (*UnsignedPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// integerReduceUnsignedPoint stores the reduced data for a name/tag combination. -type integerReduceUnsignedPoint struct { - Name string - Tags Tags - Aggregator IntegerPointAggregator - Emitter UnsignedPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *integerReduceUnsignedIterator) reduce() ([]UnsignedPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*integerReduceUnsignedPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &integerReduceUnsignedPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateInteger(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]UnsignedPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = unsignedPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// integerStreamUnsignedIterator streams inputs into the iterator and emits points gradually. -type integerStreamUnsignedIterator struct { - input *bufIntegerIterator - create func() (IntegerPointAggregator, UnsignedPointEmitter) - dims []string - opt IteratorOptions - m map[string]*integerReduceUnsignedPoint - points []UnsignedPoint -} - -// newIntegerStreamUnsignedIterator returns a new instance of integerStreamUnsignedIterator. -func newIntegerStreamUnsignedIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, UnsignedPointEmitter), opt IteratorOptions) *integerStreamUnsignedIterator { - return &integerStreamUnsignedIterator{ - input: newBufIntegerIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*integerReduceUnsignedPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *integerStreamUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *integerStreamUnsignedIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *integerStreamUnsignedIterator) Next() (*UnsignedPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *integerStreamUnsignedIterator) reduce() ([]UnsignedPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []UnsignedPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &integerReduceUnsignedPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateInteger(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// integerReduceStringIterator executes a reducer for every interval and buffers the result. -type integerReduceStringIterator struct { - input *bufIntegerIterator - create func() (IntegerPointAggregator, StringPointEmitter) - dims []string - opt IteratorOptions - points []StringPoint - keepTags bool -} - -func newIntegerReduceStringIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, StringPointEmitter)) *integerReduceStringIterator { - return &integerReduceStringIterator{ - input: newBufIntegerIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *integerReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *integerReduceStringIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *integerReduceStringIterator) Next() (*StringPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// integerReduceStringPoint stores the reduced data for a name/tag combination. -type integerReduceStringPoint struct { - Name string - Tags Tags - Aggregator IntegerPointAggregator - Emitter StringPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *integerReduceStringIterator) reduce() ([]StringPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*integerReduceStringPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &integerReduceStringPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateInteger(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]StringPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = stringPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// integerStreamStringIterator streams inputs into the iterator and emits points gradually. -type integerStreamStringIterator struct { - input *bufIntegerIterator - create func() (IntegerPointAggregator, StringPointEmitter) - dims []string - opt IteratorOptions - m map[string]*integerReduceStringPoint - points []StringPoint -} - -// newIntegerStreamStringIterator returns a new instance of integerStreamStringIterator. -func newIntegerStreamStringIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, StringPointEmitter), opt IteratorOptions) *integerStreamStringIterator { - return &integerStreamStringIterator{ - input: newBufIntegerIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*integerReduceStringPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *integerStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *integerStreamStringIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *integerStreamStringIterator) Next() (*StringPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *integerStreamStringIterator) reduce() ([]StringPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []StringPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &integerReduceStringPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateInteger(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// integerReduceBooleanIterator executes a reducer for every interval and buffers the result. -type integerReduceBooleanIterator struct { - input *bufIntegerIterator - create func() (IntegerPointAggregator, BooleanPointEmitter) - dims []string - opt IteratorOptions - points []BooleanPoint - keepTags bool -} - -func newIntegerReduceBooleanIterator(input IntegerIterator, opt IteratorOptions, createFn func() (IntegerPointAggregator, BooleanPointEmitter)) *integerReduceBooleanIterator { - return &integerReduceBooleanIterator{ - input: newBufIntegerIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *integerReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *integerReduceBooleanIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *integerReduceBooleanIterator) Next() (*BooleanPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// integerReduceBooleanPoint stores the reduced data for a name/tag combination. -type integerReduceBooleanPoint struct { - Name string - Tags Tags - Aggregator IntegerPointAggregator - Emitter BooleanPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *integerReduceBooleanIterator) reduce() ([]BooleanPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*integerReduceBooleanPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &integerReduceBooleanPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateInteger(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]BooleanPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = booleanPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// integerStreamBooleanIterator streams inputs into the iterator and emits points gradually. -type integerStreamBooleanIterator struct { - input *bufIntegerIterator - create func() (IntegerPointAggregator, BooleanPointEmitter) - dims []string - opt IteratorOptions - m map[string]*integerReduceBooleanPoint - points []BooleanPoint -} - -// newIntegerStreamBooleanIterator returns a new instance of integerStreamBooleanIterator. -func newIntegerStreamBooleanIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, BooleanPointEmitter), opt IteratorOptions) *integerStreamBooleanIterator { - return &integerStreamBooleanIterator{ - input: newBufIntegerIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*integerReduceBooleanPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *integerStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *integerStreamBooleanIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *integerStreamBooleanIterator) Next() (*BooleanPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *integerStreamBooleanIterator) reduce() ([]BooleanPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []BooleanPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &integerReduceBooleanPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateInteger(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// integerDedupeIterator only outputs unique points. -// This differs from the DistinctIterator in that it compares all aux fields too. -// This iterator is relatively inefficient and should only be used on small -// datasets such as meta query results. -type integerDedupeIterator struct { - input IntegerIterator - m map[string]struct{} // lookup of points already sent -} - -type integerIteratorMapper struct { - cur Cursor - row Row - driver IteratorMap // which iterator to use for the primary value, can be nil - fields []IteratorMap // which iterator to use for an aux field - point IntegerPoint -} - -func newIntegerIteratorMapper(cur Cursor, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *integerIteratorMapper { - return &integerIteratorMapper{ - cur: cur, - driver: driver, - fields: fields, - point: IntegerPoint{ - Aux: make([]interface{}, len(fields)), - }, - } -} - -func (itr *integerIteratorMapper) Next() (*IntegerPoint, error) { - if !itr.cur.Scan(&itr.row) { - if err := itr.cur.Err(); err != nil { - return nil, err - } - return nil, nil - } - - itr.point.Time = itr.row.Time - itr.point.Name = itr.row.Series.Name - itr.point.Tags = itr.row.Series.Tags - - if itr.driver != nil { - if v := itr.driver.Value(&itr.row); v != nil { - if v, ok := castToInteger(v); ok { - itr.point.Value = v - itr.point.Nil = false - } else { - itr.point.Value = 0 - itr.point.Nil = true - } - } else { - itr.point.Value = 0 - itr.point.Nil = true - } - } - for i, f := range itr.fields { - itr.point.Aux[i] = f.Value(&itr.row) - } - return &itr.point, nil -} - -func (itr *integerIteratorMapper) Stats() IteratorStats { - return itr.cur.Stats() -} - -func (itr *integerIteratorMapper) Close() error { - return itr.cur.Close() -} - -type integerFilterIterator struct { - input IntegerIterator - cond influxql.Expr - opt IteratorOptions - m map[string]interface{} -} - -func newIntegerFilterIterator(input IntegerIterator, cond influxql.Expr, opt IteratorOptions) IntegerIterator { - // Strip out time conditions from the WHERE clause. - // TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct. - n := influxql.RewriteFunc(influxql.CloneExpr(cond), func(n influxql.Node) influxql.Node { - switch n := n.(type) { - case *influxql.BinaryExpr: - if n.LHS.String() == "time" { - return &influxql.BooleanLiteral{Val: true} - } - } - return n - }) - - cond, _ = n.(influxql.Expr) - if cond == nil { - return input - } else if n, ok := cond.(*influxql.BooleanLiteral); ok && n.Val { - return input - } - - return &integerFilterIterator{ - input: input, - cond: cond, - opt: opt, - m: make(map[string]interface{}), - } -} - -func (itr *integerFilterIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *integerFilterIterator) Close() error { return itr.input.Close() } - -func (itr *integerFilterIterator) Next() (*IntegerPoint, error) { - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } - - for i, ref := range itr.opt.Aux { - itr.m[ref.Val] = p.Aux[i] - } - for k, v := range p.Tags.KeyValues() { - itr.m[k] = v - } - - if !influxql.EvalBool(itr.cond, itr.m) { - continue - } - return p, nil - } -} - -type integerTagSubsetIterator struct { - input IntegerIterator - point IntegerPoint - lastTags Tags - dimensions []string -} - -func newIntegerTagSubsetIterator(input IntegerIterator, opt IteratorOptions) *integerTagSubsetIterator { - return &integerTagSubsetIterator{ - input: input, - dimensions: opt.GetDimensions(), - } -} - -func (itr *integerTagSubsetIterator) Next() (*IntegerPoint, error) { - p, err := itr.input.Next() - if err != nil { - return nil, err - } else if p == nil { - return nil, nil - } - - itr.point.Name = p.Name - if !p.Tags.Equal(itr.lastTags) { - itr.point.Tags = p.Tags.Subset(itr.dimensions) - itr.lastTags = p.Tags - } - itr.point.Time = p.Time - itr.point.Value = p.Value - itr.point.Aux = p.Aux - itr.point.Aggregated = p.Aggregated - itr.point.Nil = p.Nil - return &itr.point, nil -} - -func (itr *integerTagSubsetIterator) Stats() IteratorStats { - return itr.input.Stats() -} - -func (itr *integerTagSubsetIterator) Close() error { - return itr.input.Close() -} - -// newIntegerDedupeIterator returns a new instance of integerDedupeIterator. -func newIntegerDedupeIterator(input IntegerIterator) *integerDedupeIterator { - return &integerDedupeIterator{ - input: input, - m: make(map[string]struct{}), - } -} - -// Stats returns stats from the input iterator. -func (itr *integerDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *integerDedupeIterator) Close() error { return itr.input.Close() } - -// Next returns the next unique point from the input iterator. -func (itr *integerDedupeIterator) Next() (*IntegerPoint, error) { - for { - // Read next point. - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Serialize to bytes to store in lookup. - buf, err := proto.Marshal(encodeIntegerPoint(p)) - if err != nil { - return nil, err - } - - // If the point has already been output then move to the next point. - if _, ok := itr.m[string(buf)]; ok { - continue - } - - // Otherwise mark it as emitted and return point. - itr.m[string(buf)] = struct{}{} - return p, nil - } -} - -// integerReaderIterator represents an iterator that streams from a reader. -type integerReaderIterator struct { - r io.Reader - dec *IntegerPointDecoder -} - -// newIntegerReaderIterator returns a new instance of integerReaderIterator. -func newIntegerReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *integerReaderIterator { - dec := NewIntegerPointDecoder(ctx, r) - dec.stats = stats - - return &integerReaderIterator{ - r: r, - dec: dec, - } -} - -// Stats returns stats about points processed. -func (itr *integerReaderIterator) Stats() IteratorStats { return itr.dec.stats } - -// Close closes the underlying reader, if applicable. -func (itr *integerReaderIterator) Close() error { - if r, ok := itr.r.(io.ReadCloser); ok { - return r.Close() - } - return nil -} - -// Next returns the next point from the iterator. -func (itr *integerReaderIterator) Next() (*IntegerPoint, error) { - // OPTIMIZE(benbjohnson): Reuse point on iterator. - - // Unmarshal next point. - p := &IntegerPoint{} - if err := itr.dec.DecodeIntegerPoint(p); err == io.EOF { - return nil, nil - } else if err != nil { - return nil, err - } - return p, nil -} - -// UnsignedIterator represents a stream of unsigned points. -type UnsignedIterator interface { - Iterator - Next() (*UnsignedPoint, error) -} - -// newUnsignedIterators converts a slice of Iterator to a slice of UnsignedIterator. -// Drop and closes any iterator in itrs that is not a UnsignedIterator and cannot -// be cast to a UnsignedIterator. -func newUnsignedIterators(itrs []Iterator) []UnsignedIterator { - a := make([]UnsignedIterator, 0, len(itrs)) - for _, itr := range itrs { - switch itr := itr.(type) { - case UnsignedIterator: - a = append(a, itr) - default: - itr.Close() - } - } - return a -} - -// bufUnsignedIterator represents a buffered UnsignedIterator. -type bufUnsignedIterator struct { - itr UnsignedIterator - buf *UnsignedPoint -} - -// newBufUnsignedIterator returns a buffered UnsignedIterator. -func newBufUnsignedIterator(itr UnsignedIterator) *bufUnsignedIterator { - return &bufUnsignedIterator{itr: itr} -} - -// Stats returns statistics from the input iterator. -func (itr *bufUnsignedIterator) Stats() IteratorStats { return itr.itr.Stats() } - -// Close closes the underlying iterator. -func (itr *bufUnsignedIterator) Close() error { return itr.itr.Close() } - -// peek returns the next point without removing it from the iterator. -func (itr *bufUnsignedIterator) peek() (*UnsignedPoint, error) { - p, err := itr.Next() - if err != nil { - return nil, err - } - itr.unread(p) - return p, nil -} - -// peekTime returns the time of the next point. -// Returns zero time if no more points available. -func (itr *bufUnsignedIterator) peekTime() (int64, error) { - p, err := itr.peek() - if p == nil || err != nil { - return ZeroTime, err - } - return p.Time, nil -} - -// Next returns the current buffer, if exists, or calls the underlying iterator. -func (itr *bufUnsignedIterator) Next() (*UnsignedPoint, error) { - buf := itr.buf - if buf != nil { - itr.buf = nil - return buf, nil - } - return itr.itr.Next() -} - -// NextInWindow returns the next value if it is between [startTime, endTime). -// If the next value is outside the range then it is moved to the buffer. -func (itr *bufUnsignedIterator) NextInWindow(startTime, endTime int64) (*UnsignedPoint, error) { - v, err := itr.Next() - if v == nil || err != nil { - return nil, err - } else if t := v.Time; t >= endTime || t < startTime { - itr.unread(v) - return nil, nil - } - return v, nil -} - -// unread sets v to the buffer. It is read on the next call to Next(). -func (itr *bufUnsignedIterator) unread(v *UnsignedPoint) { itr.buf = v } - -// unsignedMergeIterator represents an iterator that combines multiple unsigned iterators. -type unsignedMergeIterator struct { - inputs []UnsignedIterator - heap *unsignedMergeHeap - init bool - - closed bool - mu sync.RWMutex - - // Current iterator and window. - curr *unsignedMergeHeapItem - window struct { - name string - tags string - startTime int64 - endTime int64 - } -} - -// newUnsignedMergeIterator returns a new instance of unsignedMergeIterator. -func newUnsignedMergeIterator(inputs []UnsignedIterator, opt IteratorOptions) *unsignedMergeIterator { - itr := &unsignedMergeIterator{ - inputs: inputs, - heap: &unsignedMergeHeap{ - items: make([]*unsignedMergeHeapItem, 0, len(inputs)), - opt: opt, - }, - } - - // Initialize heap items. - for _, input := range inputs { - // Wrap in buffer, ignore any inputs without anymore points. - bufInput := newBufUnsignedIterator(input) - - // Append to the heap. - itr.heap.items = append(itr.heap.items, &unsignedMergeHeapItem{itr: bufInput}) - } - - return itr -} - -// Stats returns an aggregation of stats from the underlying iterators. -func (itr *unsignedMergeIterator) Stats() IteratorStats { - var stats IteratorStats - for _, input := range itr.inputs { - stats.Add(input.Stats()) - } - return stats -} - -// Close closes the underlying iterators. -func (itr *unsignedMergeIterator) Close() error { - itr.mu.Lock() - defer itr.mu.Unlock() - - for _, input := range itr.inputs { - input.Close() - } - itr.curr = nil - itr.inputs = nil - itr.heap.items = nil - itr.closed = true - return nil -} - -// Next returns the next point from the iterator. -func (itr *unsignedMergeIterator) Next() (*UnsignedPoint, error) { - itr.mu.RLock() - defer itr.mu.RUnlock() - if itr.closed { - return nil, nil - } - - // Initialize the heap. This needs to be done lazily on the first call to this iterator - // so that iterator initialization done through the Select() call returns quickly. - // Queries can only be interrupted after the Select() call completes so any operations - // done during iterator creation cannot be interrupted, which is why we do it here - // instead so an interrupt can happen while initializing the heap. - if !itr.init { - items := itr.heap.items - itr.heap.items = make([]*unsignedMergeHeapItem, 0, len(items)) - for _, item := range items { - if p, err := item.itr.peek(); err != nil { - return nil, err - } else if p == nil { - continue - } - itr.heap.items = append(itr.heap.items, item) - } - heap.Init(itr.heap) - itr.init = true - } - - for { - // Retrieve the next iterator if we don't have one. - if itr.curr == nil { - if len(itr.heap.items) == 0 { - return nil, nil - } - itr.curr = heap.Pop(itr.heap).(*unsignedMergeHeapItem) - - // Read point and set current window. - p, err := itr.curr.itr.Next() - if err != nil { - return nil, err - } - tags := p.Tags.Subset(itr.heap.opt.Dimensions) - itr.window.name, itr.window.tags = p.Name, tags.ID() - itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) - return p, nil - } - - // Read the next point from the current iterator. - p, err := itr.curr.itr.Next() - if err != nil { - return nil, err - } - - // If there are no more points then remove iterator from heap and find next. - if p == nil { - itr.curr = nil - continue - } - - // Check if the point is inside of our current window. - inWindow := true - if window := itr.window; window.name != p.Name { - inWindow = false - } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { - inWindow = false - } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { - inWindow = false - } else if !opt.Ascending && p.Time < window.startTime { - inWindow = false - } - - // If it's outside our window then push iterator back on the heap and find new iterator. - if !inWindow { - itr.curr.itr.unread(p) - heap.Push(itr.heap, itr.curr) - itr.curr = nil - continue - } - - return p, nil - } -} - -// unsignedMergeHeap represents a heap of unsignedMergeHeapItems. -// Items are sorted by their next window and then by name/tags. -type unsignedMergeHeap struct { - opt IteratorOptions - items []*unsignedMergeHeapItem -} - -func (h *unsignedMergeHeap) Len() int { return len(h.items) } -func (h *unsignedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } -func (h *unsignedMergeHeap) Less(i, j int) bool { - x, err := h.items[i].itr.peek() - if err != nil { - return true - } - y, err := h.items[j].itr.peek() - if err != nil { - return false - } - - if h.opt.Ascending { - if x.Name != y.Name { - return x.Name < y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { - return xTags.ID() < yTags.ID() - } - } else { - if x.Name != y.Name { - return x.Name > y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { - return xTags.ID() > yTags.ID() - } - } - - xt, _ := h.opt.Window(x.Time) - yt, _ := h.opt.Window(y.Time) - - if h.opt.Ascending { - return xt < yt - } - return xt > yt -} - -func (h *unsignedMergeHeap) Push(x interface{}) { - h.items = append(h.items, x.(*unsignedMergeHeapItem)) -} - -func (h *unsignedMergeHeap) Pop() interface{} { - old := h.items - n := len(old) - item := old[n-1] - h.items = old[0 : n-1] - return item -} - -type unsignedMergeHeapItem struct { - itr *bufUnsignedIterator -} - -// unsignedSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. -type unsignedSortedMergeIterator struct { - inputs []UnsignedIterator - heap *unsignedSortedMergeHeap - init bool -} - -// newUnsignedSortedMergeIterator returns an instance of unsignedSortedMergeIterator. -func newUnsignedSortedMergeIterator(inputs []UnsignedIterator, opt IteratorOptions) Iterator { - itr := &unsignedSortedMergeIterator{ - inputs: inputs, - heap: &unsignedSortedMergeHeap{ - items: make([]*unsignedSortedMergeHeapItem, 0, len(inputs)), - opt: opt, - }, - } - - // Initialize heap items. - for _, input := range inputs { - // Append to the heap. - itr.heap.items = append(itr.heap.items, &unsignedSortedMergeHeapItem{itr: input}) - } - - return itr -} - -// Stats returns an aggregation of stats from the underlying iterators. -func (itr *unsignedSortedMergeIterator) Stats() IteratorStats { - var stats IteratorStats - for _, input := range itr.inputs { - stats.Add(input.Stats()) - } - return stats -} - -// Close closes the underlying iterators. -func (itr *unsignedSortedMergeIterator) Close() error { - for _, input := range itr.inputs { - input.Close() - } - return nil -} - -// Next returns the next points from the iterator. -func (itr *unsignedSortedMergeIterator) Next() (*UnsignedPoint, error) { return itr.pop() } - -// pop returns the next point from the heap. -// Reads the next point from item's cursor and puts it back on the heap. -func (itr *unsignedSortedMergeIterator) pop() (*UnsignedPoint, error) { - // Initialize the heap. See the MergeIterator to see why this has to be done lazily. - if !itr.init { - items := itr.heap.items - itr.heap.items = make([]*unsignedSortedMergeHeapItem, 0, len(items)) - for _, item := range items { - var err error - if item.point, err = item.itr.Next(); err != nil { - return nil, err - } else if item.point == nil { - continue - } - itr.heap.items = append(itr.heap.items, item) - } - heap.Init(itr.heap) - itr.init = true - } - - if len(itr.heap.items) == 0 { - return nil, nil - } - - // Read the next item from the heap. - item := heap.Pop(itr.heap).(*unsignedSortedMergeHeapItem) - if item.err != nil { - return nil, item.err - } else if item.point == nil { - return nil, nil - } - - // Copy the point for return. - p := item.point.Clone() - - // Read the next item from the cursor. Push back to heap if one exists. - if item.point, item.err = item.itr.Next(); item.point != nil { - heap.Push(itr.heap, item) - } - - return p, nil -} - -// unsignedSortedMergeHeap represents a heap of unsignedSortedMergeHeapItems. -// Items are sorted with the following priority: -// - By their measurement name; -// - By their tag keys/values; -// - By time; or -// - By their Aux field values. -type unsignedSortedMergeHeap struct { - opt IteratorOptions - items []*unsignedSortedMergeHeapItem -} - -func (h *unsignedSortedMergeHeap) Len() int { return len(h.items) } -func (h *unsignedSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } -func (h *unsignedSortedMergeHeap) Less(i, j int) bool { - x, y := h.items[i].point, h.items[j].point - - if h.opt.Ascending { - if x.Name != y.Name { - return x.Name < y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { - return xTags.ID() < yTags.ID() - } - - if x.Time != y.Time { - return x.Time < y.Time - } - - if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { - for i := 0; i < len(x.Aux); i++ { - v1, ok1 := x.Aux[i].(string) - v2, ok2 := y.Aux[i].(string) - if !ok1 || !ok2 { - // Unsupported types used in Aux fields. Maybe they - // need to be added here? - return false - } else if v1 == v2 { - continue - } - return v1 < v2 - } - } - return false // Times and/or Aux fields are equal. - } - - if x.Name != y.Name { - return x.Name > y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { - return xTags.ID() > yTags.ID() - } - - if x.Time != y.Time { - return x.Time > y.Time - } - - if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { - for i := 0; i < len(x.Aux); i++ { - v1, ok1 := x.Aux[i].(string) - v2, ok2 := y.Aux[i].(string) - if !ok1 || !ok2 { - // Unsupported types used in Aux fields. Maybe they - // need to be added here? - return false - } else if v1 == v2 { - continue - } - return v1 > v2 - } - } - return false // Times and/or Aux fields are equal. -} - -func (h *unsignedSortedMergeHeap) Push(x interface{}) { - h.items = append(h.items, x.(*unsignedSortedMergeHeapItem)) -} - -func (h *unsignedSortedMergeHeap) Pop() interface{} { - old := h.items - n := len(old) - item := old[n-1] - h.items = old[0 : n-1] - return item -} - -type unsignedSortedMergeHeapItem struct { - point *UnsignedPoint - err error - itr UnsignedIterator -} - -// unsignedIteratorScanner scans the results of a UnsignedIterator into a map. -type unsignedIteratorScanner struct { - input *bufUnsignedIterator - err error - keys []influxql.VarRef - defaultValue interface{} -} - -// newUnsignedIteratorScanner creates a new IteratorScanner. -func newUnsignedIteratorScanner(input UnsignedIterator, keys []influxql.VarRef, defaultValue interface{}) *unsignedIteratorScanner { - return &unsignedIteratorScanner{ - input: newBufUnsignedIterator(input), - keys: keys, - defaultValue: defaultValue, - } -} - -func (s *unsignedIteratorScanner) Peek() (int64, string, Tags) { - if s.err != nil { - return ZeroTime, "", Tags{} - } - - p, err := s.input.peek() - if err != nil { - s.err = err - return ZeroTime, "", Tags{} - } else if p == nil { - return ZeroTime, "", Tags{} - } - return p.Time, p.Name, p.Tags -} - -func (s *unsignedIteratorScanner) ScanAt(ts int64, name string, tags Tags, m map[string]interface{}) { - if s.err != nil { - return - } - - p, err := s.input.Next() - if err != nil { - s.err = err - return - } else if p == nil { - s.useDefaults(m) - return - } else if p.Time != ts || p.Name != name || !p.Tags.Equals(&tags) { - s.useDefaults(m) - s.input.unread(p) - return - } - - if k := s.keys[0]; k.Val != "" { - if p.Nil { - if s.defaultValue != SkipDefault { - m[k.Val] = castToType(s.defaultValue, k.Type) - } - } else { - m[k.Val] = p.Value - } - } - for i, v := range p.Aux { - k := s.keys[i+1] - switch v.(type) { - case float64, int64, uint64, string, bool: - m[k.Val] = v - default: - // Insert the fill value if one was specified. - if s.defaultValue != SkipDefault { - m[k.Val] = castToType(s.defaultValue, k.Type) - } - } - } -} - -func (s *unsignedIteratorScanner) useDefaults(m map[string]interface{}) { - if s.defaultValue == SkipDefault { - return - } - for _, k := range s.keys { - if k.Val == "" { - continue - } - m[k.Val] = castToType(s.defaultValue, k.Type) - } -} - -func (s *unsignedIteratorScanner) Stats() IteratorStats { return s.input.Stats() } -func (s *unsignedIteratorScanner) Err() error { return s.err } -func (s *unsignedIteratorScanner) Close() error { return s.input.Close() } - -// unsignedParallelIterator represents an iterator that pulls data in a separate goroutine. -type unsignedParallelIterator struct { - input UnsignedIterator - ch chan unsignedPointError - - once sync.Once - closing chan struct{} - wg sync.WaitGroup -} - -// newUnsignedParallelIterator returns a new instance of unsignedParallelIterator. -func newUnsignedParallelIterator(input UnsignedIterator) *unsignedParallelIterator { - itr := &unsignedParallelIterator{ - input: input, - ch: make(chan unsignedPointError, 256), - closing: make(chan struct{}), - } - itr.wg.Add(1) - go itr.monitor() - return itr -} - -// Stats returns stats from the underlying iterator. -func (itr *unsignedParallelIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the underlying iterators. -func (itr *unsignedParallelIterator) Close() error { - itr.once.Do(func() { close(itr.closing) }) - itr.wg.Wait() - return itr.input.Close() -} - -// Next returns the next point from the iterator. -func (itr *unsignedParallelIterator) Next() (*UnsignedPoint, error) { - v, ok := <-itr.ch - if !ok { - return nil, io.EOF - } - return v.point, v.err -} - -// monitor runs in a separate goroutine and actively pulls the next point. -func (itr *unsignedParallelIterator) monitor() { - defer close(itr.ch) - defer itr.wg.Done() - - for { - // Read next point. - p, err := itr.input.Next() - if p != nil { - p = p.Clone() - } - - select { - case <-itr.closing: - return - case itr.ch <- unsignedPointError{point: p, err: err}: - } - } -} - -type unsignedPointError struct { - point *UnsignedPoint - err error -} - -// unsignedLimitIterator represents an iterator that limits points per group. -type unsignedLimitIterator struct { - input UnsignedIterator - opt IteratorOptions - n int - - prev struct { - name string - tags Tags - } -} - -// newUnsignedLimitIterator returns a new instance of unsignedLimitIterator. -func newUnsignedLimitIterator(input UnsignedIterator, opt IteratorOptions) *unsignedLimitIterator { - return &unsignedLimitIterator{ - input: input, - opt: opt, - } -} - -// Stats returns stats from the underlying iterator. -func (itr *unsignedLimitIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the underlying iterators. -func (itr *unsignedLimitIterator) Close() error { return itr.input.Close() } - -// Next returns the next point from the iterator. -func (itr *unsignedLimitIterator) Next() (*UnsignedPoint, error) { - for { - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Reset window and counter if a new window is encountered. - if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { - itr.prev.name = p.Name - itr.prev.tags = p.Tags - itr.n = 0 - } - - // Increment counter. - itr.n++ - - // Read next point if not beyond the offset. - if itr.n <= itr.opt.Offset { - continue - } - - // Read next point if we're beyond the limit. - if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { - continue - } - - return p, nil - } -} - -type unsignedFillIterator struct { - input *bufUnsignedIterator - prev UnsignedPoint - startTime int64 - endTime int64 - auxFields []interface{} - init bool - opt IteratorOptions - - window struct { - name string - tags Tags - time int64 - offset int64 - } -} - -func newUnsignedFillIterator(input UnsignedIterator, expr influxql.Expr, opt IteratorOptions) *unsignedFillIterator { - if opt.Fill == influxql.NullFill { - if expr, ok := expr.(*influxql.Call); ok && expr.Name == "count" { - opt.Fill = influxql.NumberFill - opt.FillValue = uint64(0) - } - } - - var startTime, endTime int64 - if opt.Ascending { - startTime, _ = opt.Window(opt.StartTime) - endTime, _ = opt.Window(opt.EndTime) - } else { - startTime, _ = opt.Window(opt.EndTime) - endTime, _ = opt.Window(opt.StartTime) - } - - var auxFields []interface{} - if len(opt.Aux) > 0 { - auxFields = make([]interface{}, len(opt.Aux)) - } - - return &unsignedFillIterator{ - input: newBufUnsignedIterator(input), - prev: UnsignedPoint{Nil: true}, - startTime: startTime, - endTime: endTime, - auxFields: auxFields, - opt: opt, - } -} - -func (itr *unsignedFillIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *unsignedFillIterator) Close() error { return itr.input.Close() } - -func (itr *unsignedFillIterator) Next() (*UnsignedPoint, error) { - if !itr.init { - p, err := itr.input.peek() - if p == nil || err != nil { - return nil, err - } - itr.window.name, itr.window.tags = p.Name, p.Tags - itr.window.time = itr.startTime - if itr.startTime == influxql.MinTime { - itr.window.time, _ = itr.opt.Window(p.Time) - } - if itr.opt.Location != nil { - _, itr.window.offset = itr.opt.Zone(itr.window.time) - } - itr.init = true - } - - p, err := itr.input.Next() - if err != nil { - return nil, err - } - - // Check if the next point is outside of our window or is nil. - if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { - // If we are inside of an interval, unread the point and continue below to - // constructing a new point. - if itr.opt.Ascending && itr.window.time <= itr.endTime { - itr.input.unread(p) - p = nil - goto CONSTRUCT - } else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { - itr.input.unread(p) - p = nil - goto CONSTRUCT - } - - // We are *not* in a current interval. If there is no next point, - // we are at the end of all intervals. - if p == nil { - return nil, nil - } - - // Set the new interval. - itr.window.name, itr.window.tags = p.Name, p.Tags - itr.window.time = itr.startTime - if itr.window.time == influxql.MinTime { - itr.window.time, _ = itr.opt.Window(p.Time) - } - if itr.opt.Location != nil { - _, itr.window.offset = itr.opt.Zone(itr.window.time) - } - itr.prev = UnsignedPoint{Nil: true} - } - - // Check if the point is our next expected point. -CONSTRUCT: - if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { - if p != nil { - itr.input.unread(p) - } - - p = &UnsignedPoint{ - Name: itr.window.name, - Tags: itr.window.tags, - Time: itr.window.time, - Aux: itr.auxFields, - } - - switch itr.opt.Fill { - case influxql.LinearFill: - if !itr.prev.Nil { - next, err := itr.input.peek() - if err != nil { - return nil, err - } else if next != nil && next.Name == itr.window.name && next.Tags.ID() == itr.window.tags.ID() { - interval := int64(itr.opt.Interval.Duration) - start := itr.window.time / interval - p.Value = linearUnsigned(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value) - } else { - p.Nil = true - } - } else { - p.Nil = true - } - - case influxql.NullFill: - p.Nil = true - case influxql.NumberFill: - p.Value, _ = castToUnsigned(itr.opt.FillValue) - case influxql.PreviousFill: - if !itr.prev.Nil { - p.Value = itr.prev.Value - p.Nil = itr.prev.Nil - } else { - p.Nil = true - } - } - } else { - itr.prev = *p - } - - // Advance the expected time. Do not advance to a new window here - // as there may be lingering points with the same timestamp in the previous - // window. - if itr.opt.Ascending { - itr.window.time += int64(itr.opt.Interval.Duration) - } else { - itr.window.time -= int64(itr.opt.Interval.Duration) - } - - // Check to see if we have passed over an offset change and adjust the time - // to account for this new offset. - if itr.opt.Location != nil { - if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset { - diff := itr.window.offset - offset - if abs(diff) < int64(itr.opt.Interval.Duration) { - itr.window.time += diff - } - itr.window.offset = offset - } - } - return p, nil -} - -// unsignedIntervalIterator represents a unsigned implementation of IntervalIterator. -type unsignedIntervalIterator struct { - input UnsignedIterator - opt IteratorOptions -} - -func newUnsignedIntervalIterator(input UnsignedIterator, opt IteratorOptions) *unsignedIntervalIterator { - return &unsignedIntervalIterator{input: input, opt: opt} -} - -func (itr *unsignedIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *unsignedIntervalIterator) Close() error { return itr.input.Close() } - -func (itr *unsignedIntervalIterator) Next() (*UnsignedPoint, error) { - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - p.Time, _ = itr.opt.Window(p.Time) - // If we see the minimum allowable time, set the time to zero so we don't - // break the default returned time for aggregate queries without times. - if p.Time == influxql.MinTime { - p.Time = 0 - } - return p, nil -} - -// unsignedInterruptIterator represents a unsigned implementation of InterruptIterator. -type unsignedInterruptIterator struct { - input UnsignedIterator - closing <-chan struct{} - count int -} - -func newUnsignedInterruptIterator(input UnsignedIterator, closing <-chan struct{}) *unsignedInterruptIterator { - return &unsignedInterruptIterator{input: input, closing: closing} -} - -func (itr *unsignedInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *unsignedInterruptIterator) Close() error { return itr.input.Close() } - -func (itr *unsignedInterruptIterator) Next() (*UnsignedPoint, error) { - // Only check if the channel is closed every N points. This - // intentionally checks on both 0 and N so that if the iterator - // has been interrupted before the first point is emitted it will - // not emit any points. - if itr.count&0xFF == 0xFF { - select { - case <-itr.closing: - return nil, itr.Close() - default: - // Reset iterator count to zero and fall through to emit the next point. - itr.count = 0 - } - } - - // Increment the counter for every point read. - itr.count++ - return itr.input.Next() -} - -// unsignedCloseInterruptIterator represents a unsigned implementation of CloseInterruptIterator. -type unsignedCloseInterruptIterator struct { - input UnsignedIterator - closing <-chan struct{} - done chan struct{} - once sync.Once -} - -func newUnsignedCloseInterruptIterator(input UnsignedIterator, closing <-chan struct{}) *unsignedCloseInterruptIterator { - itr := &unsignedCloseInterruptIterator{ - input: input, - closing: closing, - done: make(chan struct{}), - } - go itr.monitor() - return itr -} - -func (itr *unsignedCloseInterruptIterator) monitor() { - select { - case <-itr.closing: - itr.Close() - case <-itr.done: - } -} - -func (itr *unsignedCloseInterruptIterator) Stats() IteratorStats { - return itr.input.Stats() -} - -func (itr *unsignedCloseInterruptIterator) Close() error { - itr.once.Do(func() { - close(itr.done) - itr.input.Close() - }) - return nil -} - -func (itr *unsignedCloseInterruptIterator) Next() (*UnsignedPoint, error) { - p, err := itr.input.Next() - if err != nil { - // Check if the iterator was closed. - select { - case <-itr.done: - return nil, nil - default: - return nil, err - } - } - return p, nil -} - -// unsignedReduceFloatIterator executes a reducer for every interval and buffers the result. -type unsignedReduceFloatIterator struct { - input *bufUnsignedIterator - create func() (UnsignedPointAggregator, FloatPointEmitter) - dims []string - opt IteratorOptions - points []FloatPoint - keepTags bool -} - -func newUnsignedReduceFloatIterator(input UnsignedIterator, opt IteratorOptions, createFn func() (UnsignedPointAggregator, FloatPointEmitter)) *unsignedReduceFloatIterator { - return &unsignedReduceFloatIterator{ - input: newBufUnsignedIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *unsignedReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *unsignedReduceFloatIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *unsignedReduceFloatIterator) Next() (*FloatPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// unsignedReduceFloatPoint stores the reduced data for a name/tag combination. -type unsignedReduceFloatPoint struct { - Name string - Tags Tags - Aggregator UnsignedPointAggregator - Emitter FloatPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *unsignedReduceFloatIterator) reduce() ([]FloatPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*unsignedReduceFloatPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &unsignedReduceFloatPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateUnsigned(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]FloatPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = floatPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// unsignedStreamFloatIterator streams inputs into the iterator and emits points gradually. -type unsignedStreamFloatIterator struct { - input *bufUnsignedIterator - create func() (UnsignedPointAggregator, FloatPointEmitter) - dims []string - opt IteratorOptions - m map[string]*unsignedReduceFloatPoint - points []FloatPoint -} - -// newUnsignedStreamFloatIterator returns a new instance of unsignedStreamFloatIterator. -func newUnsignedStreamFloatIterator(input UnsignedIterator, createFn func() (UnsignedPointAggregator, FloatPointEmitter), opt IteratorOptions) *unsignedStreamFloatIterator { - return &unsignedStreamFloatIterator{ - input: newBufUnsignedIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*unsignedReduceFloatPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *unsignedStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *unsignedStreamFloatIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *unsignedStreamFloatIterator) Next() (*FloatPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *unsignedStreamFloatIterator) reduce() ([]FloatPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []FloatPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &unsignedReduceFloatPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateUnsigned(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// unsignedReduceIntegerIterator executes a reducer for every interval and buffers the result. -type unsignedReduceIntegerIterator struct { - input *bufUnsignedIterator - create func() (UnsignedPointAggregator, IntegerPointEmitter) - dims []string - opt IteratorOptions - points []IntegerPoint - keepTags bool -} - -func newUnsignedReduceIntegerIterator(input UnsignedIterator, opt IteratorOptions, createFn func() (UnsignedPointAggregator, IntegerPointEmitter)) *unsignedReduceIntegerIterator { - return &unsignedReduceIntegerIterator{ - input: newBufUnsignedIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *unsignedReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *unsignedReduceIntegerIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *unsignedReduceIntegerIterator) Next() (*IntegerPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// unsignedReduceIntegerPoint stores the reduced data for a name/tag combination. -type unsignedReduceIntegerPoint struct { - Name string - Tags Tags - Aggregator UnsignedPointAggregator - Emitter IntegerPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *unsignedReduceIntegerIterator) reduce() ([]IntegerPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*unsignedReduceIntegerPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &unsignedReduceIntegerPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateUnsigned(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]IntegerPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = integerPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// unsignedStreamIntegerIterator streams inputs into the iterator and emits points gradually. -type unsignedStreamIntegerIterator struct { - input *bufUnsignedIterator - create func() (UnsignedPointAggregator, IntegerPointEmitter) - dims []string - opt IteratorOptions - m map[string]*unsignedReduceIntegerPoint - points []IntegerPoint -} - -// newUnsignedStreamIntegerIterator returns a new instance of unsignedStreamIntegerIterator. -func newUnsignedStreamIntegerIterator(input UnsignedIterator, createFn func() (UnsignedPointAggregator, IntegerPointEmitter), opt IteratorOptions) *unsignedStreamIntegerIterator { - return &unsignedStreamIntegerIterator{ - input: newBufUnsignedIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*unsignedReduceIntegerPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *unsignedStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *unsignedStreamIntegerIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *unsignedStreamIntegerIterator) Next() (*IntegerPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *unsignedStreamIntegerIterator) reduce() ([]IntegerPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []IntegerPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &unsignedReduceIntegerPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateUnsigned(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// unsignedReduceUnsignedIterator executes a reducer for every interval and buffers the result. -type unsignedReduceUnsignedIterator struct { - input *bufUnsignedIterator - create func() (UnsignedPointAggregator, UnsignedPointEmitter) - dims []string - opt IteratorOptions - points []UnsignedPoint - keepTags bool -} - -func newUnsignedReduceUnsignedIterator(input UnsignedIterator, opt IteratorOptions, createFn func() (UnsignedPointAggregator, UnsignedPointEmitter)) *unsignedReduceUnsignedIterator { - return &unsignedReduceUnsignedIterator{ - input: newBufUnsignedIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *unsignedReduceUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *unsignedReduceUnsignedIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *unsignedReduceUnsignedIterator) Next() (*UnsignedPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// unsignedReduceUnsignedPoint stores the reduced data for a name/tag combination. -type unsignedReduceUnsignedPoint struct { - Name string - Tags Tags - Aggregator UnsignedPointAggregator - Emitter UnsignedPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *unsignedReduceUnsignedIterator) reduce() ([]UnsignedPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*unsignedReduceUnsignedPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &unsignedReduceUnsignedPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateUnsigned(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]UnsignedPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = unsignedPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// unsignedStreamUnsignedIterator streams inputs into the iterator and emits points gradually. -type unsignedStreamUnsignedIterator struct { - input *bufUnsignedIterator - create func() (UnsignedPointAggregator, UnsignedPointEmitter) - dims []string - opt IteratorOptions - m map[string]*unsignedReduceUnsignedPoint - points []UnsignedPoint -} - -// newUnsignedStreamUnsignedIterator returns a new instance of unsignedStreamUnsignedIterator. -func newUnsignedStreamUnsignedIterator(input UnsignedIterator, createFn func() (UnsignedPointAggregator, UnsignedPointEmitter), opt IteratorOptions) *unsignedStreamUnsignedIterator { - return &unsignedStreamUnsignedIterator{ - input: newBufUnsignedIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*unsignedReduceUnsignedPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *unsignedStreamUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *unsignedStreamUnsignedIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *unsignedStreamUnsignedIterator) Next() (*UnsignedPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *unsignedStreamUnsignedIterator) reduce() ([]UnsignedPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []UnsignedPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &unsignedReduceUnsignedPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateUnsigned(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// unsignedReduceStringIterator executes a reducer for every interval and buffers the result. -type unsignedReduceStringIterator struct { - input *bufUnsignedIterator - create func() (UnsignedPointAggregator, StringPointEmitter) - dims []string - opt IteratorOptions - points []StringPoint - keepTags bool -} - -func newUnsignedReduceStringIterator(input UnsignedIterator, opt IteratorOptions, createFn func() (UnsignedPointAggregator, StringPointEmitter)) *unsignedReduceStringIterator { - return &unsignedReduceStringIterator{ - input: newBufUnsignedIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *unsignedReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *unsignedReduceStringIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *unsignedReduceStringIterator) Next() (*StringPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// unsignedReduceStringPoint stores the reduced data for a name/tag combination. -type unsignedReduceStringPoint struct { - Name string - Tags Tags - Aggregator UnsignedPointAggregator - Emitter StringPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *unsignedReduceStringIterator) reduce() ([]StringPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*unsignedReduceStringPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &unsignedReduceStringPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateUnsigned(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]StringPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = stringPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// unsignedStreamStringIterator streams inputs into the iterator and emits points gradually. -type unsignedStreamStringIterator struct { - input *bufUnsignedIterator - create func() (UnsignedPointAggregator, StringPointEmitter) - dims []string - opt IteratorOptions - m map[string]*unsignedReduceStringPoint - points []StringPoint -} - -// newUnsignedStreamStringIterator returns a new instance of unsignedStreamStringIterator. -func newUnsignedStreamStringIterator(input UnsignedIterator, createFn func() (UnsignedPointAggregator, StringPointEmitter), opt IteratorOptions) *unsignedStreamStringIterator { - return &unsignedStreamStringIterator{ - input: newBufUnsignedIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*unsignedReduceStringPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *unsignedStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *unsignedStreamStringIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *unsignedStreamStringIterator) Next() (*StringPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *unsignedStreamStringIterator) reduce() ([]StringPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []StringPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &unsignedReduceStringPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateUnsigned(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// unsignedReduceBooleanIterator executes a reducer for every interval and buffers the result. -type unsignedReduceBooleanIterator struct { - input *bufUnsignedIterator - create func() (UnsignedPointAggregator, BooleanPointEmitter) - dims []string - opt IteratorOptions - points []BooleanPoint - keepTags bool -} - -func newUnsignedReduceBooleanIterator(input UnsignedIterator, opt IteratorOptions, createFn func() (UnsignedPointAggregator, BooleanPointEmitter)) *unsignedReduceBooleanIterator { - return &unsignedReduceBooleanIterator{ - input: newBufUnsignedIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *unsignedReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *unsignedReduceBooleanIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *unsignedReduceBooleanIterator) Next() (*BooleanPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// unsignedReduceBooleanPoint stores the reduced data for a name/tag combination. -type unsignedReduceBooleanPoint struct { - Name string - Tags Tags - Aggregator UnsignedPointAggregator - Emitter BooleanPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *unsignedReduceBooleanIterator) reduce() ([]BooleanPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*unsignedReduceBooleanPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &unsignedReduceBooleanPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateUnsigned(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]BooleanPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = booleanPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// unsignedStreamBooleanIterator streams inputs into the iterator and emits points gradually. -type unsignedStreamBooleanIterator struct { - input *bufUnsignedIterator - create func() (UnsignedPointAggregator, BooleanPointEmitter) - dims []string - opt IteratorOptions - m map[string]*unsignedReduceBooleanPoint - points []BooleanPoint -} - -// newUnsignedStreamBooleanIterator returns a new instance of unsignedStreamBooleanIterator. -func newUnsignedStreamBooleanIterator(input UnsignedIterator, createFn func() (UnsignedPointAggregator, BooleanPointEmitter), opt IteratorOptions) *unsignedStreamBooleanIterator { - return &unsignedStreamBooleanIterator{ - input: newBufUnsignedIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*unsignedReduceBooleanPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *unsignedStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *unsignedStreamBooleanIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *unsignedStreamBooleanIterator) Next() (*BooleanPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *unsignedStreamBooleanIterator) reduce() ([]BooleanPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []BooleanPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &unsignedReduceBooleanPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateUnsigned(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// unsignedDedupeIterator only outputs unique points. -// This differs from the DistinctIterator in that it compares all aux fields too. -// This iterator is relatively inefficient and should only be used on small -// datasets such as meta query results. -type unsignedDedupeIterator struct { - input UnsignedIterator - m map[string]struct{} // lookup of points already sent -} - -type unsignedIteratorMapper struct { - cur Cursor - row Row - driver IteratorMap // which iterator to use for the primary value, can be nil - fields []IteratorMap // which iterator to use for an aux field - point UnsignedPoint -} - -func newUnsignedIteratorMapper(cur Cursor, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *unsignedIteratorMapper { - return &unsignedIteratorMapper{ - cur: cur, - driver: driver, - fields: fields, - point: UnsignedPoint{ - Aux: make([]interface{}, len(fields)), - }, - } -} - -func (itr *unsignedIteratorMapper) Next() (*UnsignedPoint, error) { - if !itr.cur.Scan(&itr.row) { - if err := itr.cur.Err(); err != nil { - return nil, err - } - return nil, nil - } - - itr.point.Time = itr.row.Time - itr.point.Name = itr.row.Series.Name - itr.point.Tags = itr.row.Series.Tags - - if itr.driver != nil { - if v := itr.driver.Value(&itr.row); v != nil { - if v, ok := castToUnsigned(v); ok { - itr.point.Value = v - itr.point.Nil = false - } else { - itr.point.Value = 0 - itr.point.Nil = true - } - } else { - itr.point.Value = 0 - itr.point.Nil = true - } - } - for i, f := range itr.fields { - itr.point.Aux[i] = f.Value(&itr.row) - } - return &itr.point, nil -} - -func (itr *unsignedIteratorMapper) Stats() IteratorStats { - return itr.cur.Stats() -} - -func (itr *unsignedIteratorMapper) Close() error { - return itr.cur.Close() -} - -type unsignedFilterIterator struct { - input UnsignedIterator - cond influxql.Expr - opt IteratorOptions - m map[string]interface{} -} - -func newUnsignedFilterIterator(input UnsignedIterator, cond influxql.Expr, opt IteratorOptions) UnsignedIterator { - // Strip out time conditions from the WHERE clause. - // TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct. - n := influxql.RewriteFunc(influxql.CloneExpr(cond), func(n influxql.Node) influxql.Node { - switch n := n.(type) { - case *influxql.BinaryExpr: - if n.LHS.String() == "time" { - return &influxql.BooleanLiteral{Val: true} - } - } - return n - }) - - cond, _ = n.(influxql.Expr) - if cond == nil { - return input - } else if n, ok := cond.(*influxql.BooleanLiteral); ok && n.Val { - return input - } - - return &unsignedFilterIterator{ - input: input, - cond: cond, - opt: opt, - m: make(map[string]interface{}), - } -} - -func (itr *unsignedFilterIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *unsignedFilterIterator) Close() error { return itr.input.Close() } - -func (itr *unsignedFilterIterator) Next() (*UnsignedPoint, error) { - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } - - for i, ref := range itr.opt.Aux { - itr.m[ref.Val] = p.Aux[i] - } - for k, v := range p.Tags.KeyValues() { - itr.m[k] = v - } - - if !influxql.EvalBool(itr.cond, itr.m) { - continue - } - return p, nil - } -} - -type unsignedTagSubsetIterator struct { - input UnsignedIterator - point UnsignedPoint - lastTags Tags - dimensions []string -} - -func newUnsignedTagSubsetIterator(input UnsignedIterator, opt IteratorOptions) *unsignedTagSubsetIterator { - return &unsignedTagSubsetIterator{ - input: input, - dimensions: opt.GetDimensions(), - } -} - -func (itr *unsignedTagSubsetIterator) Next() (*UnsignedPoint, error) { - p, err := itr.input.Next() - if err != nil { - return nil, err - } else if p == nil { - return nil, nil - } - - itr.point.Name = p.Name - if !p.Tags.Equal(itr.lastTags) { - itr.point.Tags = p.Tags.Subset(itr.dimensions) - itr.lastTags = p.Tags - } - itr.point.Time = p.Time - itr.point.Value = p.Value - itr.point.Aux = p.Aux - itr.point.Aggregated = p.Aggregated - itr.point.Nil = p.Nil - return &itr.point, nil -} - -func (itr *unsignedTagSubsetIterator) Stats() IteratorStats { - return itr.input.Stats() -} - -func (itr *unsignedTagSubsetIterator) Close() error { - return itr.input.Close() -} - -// newUnsignedDedupeIterator returns a new instance of unsignedDedupeIterator. -func newUnsignedDedupeIterator(input UnsignedIterator) *unsignedDedupeIterator { - return &unsignedDedupeIterator{ - input: input, - m: make(map[string]struct{}), - } -} - -// Stats returns stats from the input iterator. -func (itr *unsignedDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *unsignedDedupeIterator) Close() error { return itr.input.Close() } - -// Next returns the next unique point from the input iterator. -func (itr *unsignedDedupeIterator) Next() (*UnsignedPoint, error) { - for { - // Read next point. - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Serialize to bytes to store in lookup. - buf, err := proto.Marshal(encodeUnsignedPoint(p)) - if err != nil { - return nil, err - } - - // If the point has already been output then move to the next point. - if _, ok := itr.m[string(buf)]; ok { - continue - } - - // Otherwise mark it as emitted and return point. - itr.m[string(buf)] = struct{}{} - return p, nil - } -} - -// unsignedReaderIterator represents an iterator that streams from a reader. -type unsignedReaderIterator struct { - r io.Reader - dec *UnsignedPointDecoder -} - -// newUnsignedReaderIterator returns a new instance of unsignedReaderIterator. -func newUnsignedReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *unsignedReaderIterator { - dec := NewUnsignedPointDecoder(ctx, r) - dec.stats = stats - - return &unsignedReaderIterator{ - r: r, - dec: dec, - } -} - -// Stats returns stats about points processed. -func (itr *unsignedReaderIterator) Stats() IteratorStats { return itr.dec.stats } - -// Close closes the underlying reader, if applicable. -func (itr *unsignedReaderIterator) Close() error { - if r, ok := itr.r.(io.ReadCloser); ok { - return r.Close() - } - return nil -} - -// Next returns the next point from the iterator. -func (itr *unsignedReaderIterator) Next() (*UnsignedPoint, error) { - // OPTIMIZE(benbjohnson): Reuse point on iterator. - - // Unmarshal next point. - p := &UnsignedPoint{} - if err := itr.dec.DecodeUnsignedPoint(p); err == io.EOF { - return nil, nil - } else if err != nil { - return nil, err - } - return p, nil -} - -// StringIterator represents a stream of string points. -type StringIterator interface { - Iterator - Next() (*StringPoint, error) -} - -// newStringIterators converts a slice of Iterator to a slice of StringIterator. -// Drop and closes any iterator in itrs that is not a StringIterator and cannot -// be cast to a StringIterator. -func newStringIterators(itrs []Iterator) []StringIterator { - a := make([]StringIterator, 0, len(itrs)) - for _, itr := range itrs { - switch itr := itr.(type) { - case StringIterator: - a = append(a, itr) - default: - itr.Close() - } - } - return a -} - -// bufStringIterator represents a buffered StringIterator. -type bufStringIterator struct { - itr StringIterator - buf *StringPoint -} - -// newBufStringIterator returns a buffered StringIterator. -func newBufStringIterator(itr StringIterator) *bufStringIterator { - return &bufStringIterator{itr: itr} -} - -// Stats returns statistics from the input iterator. -func (itr *bufStringIterator) Stats() IteratorStats { return itr.itr.Stats() } - -// Close closes the underlying iterator. -func (itr *bufStringIterator) Close() error { return itr.itr.Close() } - -// peek returns the next point without removing it from the iterator. -func (itr *bufStringIterator) peek() (*StringPoint, error) { - p, err := itr.Next() - if err != nil { - return nil, err - } - itr.unread(p) - return p, nil -} - -// peekTime returns the time of the next point. -// Returns zero time if no more points available. -func (itr *bufStringIterator) peekTime() (int64, error) { - p, err := itr.peek() - if p == nil || err != nil { - return ZeroTime, err - } - return p.Time, nil -} - -// Next returns the current buffer, if exists, or calls the underlying iterator. -func (itr *bufStringIterator) Next() (*StringPoint, error) { - buf := itr.buf - if buf != nil { - itr.buf = nil - return buf, nil - } - return itr.itr.Next() -} - -// NextInWindow returns the next value if it is between [startTime, endTime). -// If the next value is outside the range then it is moved to the buffer. -func (itr *bufStringIterator) NextInWindow(startTime, endTime int64) (*StringPoint, error) { - v, err := itr.Next() - if v == nil || err != nil { - return nil, err - } else if t := v.Time; t >= endTime || t < startTime { - itr.unread(v) - return nil, nil - } - return v, nil -} - -// unread sets v to the buffer. It is read on the next call to Next(). -func (itr *bufStringIterator) unread(v *StringPoint) { itr.buf = v } - -// stringMergeIterator represents an iterator that combines multiple string iterators. -type stringMergeIterator struct { - inputs []StringIterator - heap *stringMergeHeap - init bool - - closed bool - mu sync.RWMutex - - // Current iterator and window. - curr *stringMergeHeapItem - window struct { - name string - tags string - startTime int64 - endTime int64 - } -} - -// newStringMergeIterator returns a new instance of stringMergeIterator. -func newStringMergeIterator(inputs []StringIterator, opt IteratorOptions) *stringMergeIterator { - itr := &stringMergeIterator{ - inputs: inputs, - heap: &stringMergeHeap{ - items: make([]*stringMergeHeapItem, 0, len(inputs)), - opt: opt, - }, - } - - // Initialize heap items. - for _, input := range inputs { - // Wrap in buffer, ignore any inputs without anymore points. - bufInput := newBufStringIterator(input) - - // Append to the heap. - itr.heap.items = append(itr.heap.items, &stringMergeHeapItem{itr: bufInput}) - } - - return itr -} - -// Stats returns an aggregation of stats from the underlying iterators. -func (itr *stringMergeIterator) Stats() IteratorStats { - var stats IteratorStats - for _, input := range itr.inputs { - stats.Add(input.Stats()) - } - return stats -} - -// Close closes the underlying iterators. -func (itr *stringMergeIterator) Close() error { - itr.mu.Lock() - defer itr.mu.Unlock() - - for _, input := range itr.inputs { - input.Close() - } - itr.curr = nil - itr.inputs = nil - itr.heap.items = nil - itr.closed = true - return nil -} - -// Next returns the next point from the iterator. -func (itr *stringMergeIterator) Next() (*StringPoint, error) { - itr.mu.RLock() - defer itr.mu.RUnlock() - if itr.closed { - return nil, nil - } - - // Initialize the heap. This needs to be done lazily on the first call to this iterator - // so that iterator initialization done through the Select() call returns quickly. - // Queries can only be interrupted after the Select() call completes so any operations - // done during iterator creation cannot be interrupted, which is why we do it here - // instead so an interrupt can happen while initializing the heap. - if !itr.init { - items := itr.heap.items - itr.heap.items = make([]*stringMergeHeapItem, 0, len(items)) - for _, item := range items { - if p, err := item.itr.peek(); err != nil { - return nil, err - } else if p == nil { - continue - } - itr.heap.items = append(itr.heap.items, item) - } - heap.Init(itr.heap) - itr.init = true - } - - for { - // Retrieve the next iterator if we don't have one. - if itr.curr == nil { - if len(itr.heap.items) == 0 { - return nil, nil - } - itr.curr = heap.Pop(itr.heap).(*stringMergeHeapItem) - - // Read point and set current window. - p, err := itr.curr.itr.Next() - if err != nil { - return nil, err - } - tags := p.Tags.Subset(itr.heap.opt.Dimensions) - itr.window.name, itr.window.tags = p.Name, tags.ID() - itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) - return p, nil - } - - // Read the next point from the current iterator. - p, err := itr.curr.itr.Next() - if err != nil { - return nil, err - } - - // If there are no more points then remove iterator from heap and find next. - if p == nil { - itr.curr = nil - continue - } - - // Check if the point is inside of our current window. - inWindow := true - if window := itr.window; window.name != p.Name { - inWindow = false - } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { - inWindow = false - } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { - inWindow = false - } else if !opt.Ascending && p.Time < window.startTime { - inWindow = false - } - - // If it's outside our window then push iterator back on the heap and find new iterator. - if !inWindow { - itr.curr.itr.unread(p) - heap.Push(itr.heap, itr.curr) - itr.curr = nil - continue - } - - return p, nil - } -} - -// stringMergeHeap represents a heap of stringMergeHeapItems. -// Items are sorted by their next window and then by name/tags. -type stringMergeHeap struct { - opt IteratorOptions - items []*stringMergeHeapItem -} - -func (h *stringMergeHeap) Len() int { return len(h.items) } -func (h *stringMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } -func (h *stringMergeHeap) Less(i, j int) bool { - x, err := h.items[i].itr.peek() - if err != nil { - return true - } - y, err := h.items[j].itr.peek() - if err != nil { - return false - } - - if h.opt.Ascending { - if x.Name != y.Name { - return x.Name < y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { - return xTags.ID() < yTags.ID() - } - } else { - if x.Name != y.Name { - return x.Name > y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { - return xTags.ID() > yTags.ID() - } - } - - xt, _ := h.opt.Window(x.Time) - yt, _ := h.opt.Window(y.Time) - - if h.opt.Ascending { - return xt < yt - } - return xt > yt -} - -func (h *stringMergeHeap) Push(x interface{}) { - h.items = append(h.items, x.(*stringMergeHeapItem)) -} - -func (h *stringMergeHeap) Pop() interface{} { - old := h.items - n := len(old) - item := old[n-1] - h.items = old[0 : n-1] - return item -} - -type stringMergeHeapItem struct { - itr *bufStringIterator -} - -// stringSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. -type stringSortedMergeIterator struct { - inputs []StringIterator - heap *stringSortedMergeHeap - init bool -} - -// newStringSortedMergeIterator returns an instance of stringSortedMergeIterator. -func newStringSortedMergeIterator(inputs []StringIterator, opt IteratorOptions) Iterator { - itr := &stringSortedMergeIterator{ - inputs: inputs, - heap: &stringSortedMergeHeap{ - items: make([]*stringSortedMergeHeapItem, 0, len(inputs)), - opt: opt, - }, - } - - // Initialize heap items. - for _, input := range inputs { - // Append to the heap. - itr.heap.items = append(itr.heap.items, &stringSortedMergeHeapItem{itr: input}) - } - - return itr -} - -// Stats returns an aggregation of stats from the underlying iterators. -func (itr *stringSortedMergeIterator) Stats() IteratorStats { - var stats IteratorStats - for _, input := range itr.inputs { - stats.Add(input.Stats()) - } - return stats -} - -// Close closes the underlying iterators. -func (itr *stringSortedMergeIterator) Close() error { - for _, input := range itr.inputs { - input.Close() - } - return nil -} - -// Next returns the next points from the iterator. -func (itr *stringSortedMergeIterator) Next() (*StringPoint, error) { return itr.pop() } - -// pop returns the next point from the heap. -// Reads the next point from item's cursor and puts it back on the heap. -func (itr *stringSortedMergeIterator) pop() (*StringPoint, error) { - // Initialize the heap. See the MergeIterator to see why this has to be done lazily. - if !itr.init { - items := itr.heap.items - itr.heap.items = make([]*stringSortedMergeHeapItem, 0, len(items)) - for _, item := range items { - var err error - if item.point, err = item.itr.Next(); err != nil { - return nil, err - } else if item.point == nil { - continue - } - itr.heap.items = append(itr.heap.items, item) - } - heap.Init(itr.heap) - itr.init = true - } - - if len(itr.heap.items) == 0 { - return nil, nil - } - - // Read the next item from the heap. - item := heap.Pop(itr.heap).(*stringSortedMergeHeapItem) - if item.err != nil { - return nil, item.err - } else if item.point == nil { - return nil, nil - } - - // Copy the point for return. - p := item.point.Clone() - - // Read the next item from the cursor. Push back to heap if one exists. - if item.point, item.err = item.itr.Next(); item.point != nil { - heap.Push(itr.heap, item) - } - - return p, nil -} - -// stringSortedMergeHeap represents a heap of stringSortedMergeHeapItems. -// Items are sorted with the following priority: -// - By their measurement name; -// - By their tag keys/values; -// - By time; or -// - By their Aux field values. -type stringSortedMergeHeap struct { - opt IteratorOptions - items []*stringSortedMergeHeapItem -} - -func (h *stringSortedMergeHeap) Len() int { return len(h.items) } -func (h *stringSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } -func (h *stringSortedMergeHeap) Less(i, j int) bool { - x, y := h.items[i].point, h.items[j].point - - if h.opt.Ascending { - if x.Name != y.Name { - return x.Name < y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { - return xTags.ID() < yTags.ID() - } - - if x.Time != y.Time { - return x.Time < y.Time - } - - if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { - for i := 0; i < len(x.Aux); i++ { - v1, ok1 := x.Aux[i].(string) - v2, ok2 := y.Aux[i].(string) - if !ok1 || !ok2 { - // Unsupported types used in Aux fields. Maybe they - // need to be added here? - return false - } else if v1 == v2 { - continue - } - return v1 < v2 - } - } - return false // Times and/or Aux fields are equal. - } - - if x.Name != y.Name { - return x.Name > y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { - return xTags.ID() > yTags.ID() - } - - if x.Time != y.Time { - return x.Time > y.Time - } - - if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { - for i := 0; i < len(x.Aux); i++ { - v1, ok1 := x.Aux[i].(string) - v2, ok2 := y.Aux[i].(string) - if !ok1 || !ok2 { - // Unsupported types used in Aux fields. Maybe they - // need to be added here? - return false - } else if v1 == v2 { - continue - } - return v1 > v2 - } - } - return false // Times and/or Aux fields are equal. -} - -func (h *stringSortedMergeHeap) Push(x interface{}) { - h.items = append(h.items, x.(*stringSortedMergeHeapItem)) -} - -func (h *stringSortedMergeHeap) Pop() interface{} { - old := h.items - n := len(old) - item := old[n-1] - h.items = old[0 : n-1] - return item -} - -type stringSortedMergeHeapItem struct { - point *StringPoint - err error - itr StringIterator -} - -// stringIteratorScanner scans the results of a StringIterator into a map. -type stringIteratorScanner struct { - input *bufStringIterator - err error - keys []influxql.VarRef - defaultValue interface{} -} - -// newStringIteratorScanner creates a new IteratorScanner. -func newStringIteratorScanner(input StringIterator, keys []influxql.VarRef, defaultValue interface{}) *stringIteratorScanner { - return &stringIteratorScanner{ - input: newBufStringIterator(input), - keys: keys, - defaultValue: defaultValue, - } -} - -func (s *stringIteratorScanner) Peek() (int64, string, Tags) { - if s.err != nil { - return ZeroTime, "", Tags{} - } - - p, err := s.input.peek() - if err != nil { - s.err = err - return ZeroTime, "", Tags{} - } else if p == nil { - return ZeroTime, "", Tags{} - } - return p.Time, p.Name, p.Tags -} - -func (s *stringIteratorScanner) ScanAt(ts int64, name string, tags Tags, m map[string]interface{}) { - if s.err != nil { - return - } - - p, err := s.input.Next() - if err != nil { - s.err = err - return - } else if p == nil { - s.useDefaults(m) - return - } else if p.Time != ts || p.Name != name || !p.Tags.Equals(&tags) { - s.useDefaults(m) - s.input.unread(p) - return - } - - if k := s.keys[0]; k.Val != "" { - if p.Nil { - if s.defaultValue != SkipDefault { - m[k.Val] = castToType(s.defaultValue, k.Type) - } - } else { - m[k.Val] = p.Value - } - } - for i, v := range p.Aux { - k := s.keys[i+1] - switch v.(type) { - case float64, int64, uint64, string, bool: - m[k.Val] = v - default: - // Insert the fill value if one was specified. - if s.defaultValue != SkipDefault { - m[k.Val] = castToType(s.defaultValue, k.Type) - } - } - } -} - -func (s *stringIteratorScanner) useDefaults(m map[string]interface{}) { - if s.defaultValue == SkipDefault { - return - } - for _, k := range s.keys { - if k.Val == "" { - continue - } - m[k.Val] = castToType(s.defaultValue, k.Type) - } -} - -func (s *stringIteratorScanner) Stats() IteratorStats { return s.input.Stats() } -func (s *stringIteratorScanner) Err() error { return s.err } -func (s *stringIteratorScanner) Close() error { return s.input.Close() } - -// stringParallelIterator represents an iterator that pulls data in a separate goroutine. -type stringParallelIterator struct { - input StringIterator - ch chan stringPointError - - once sync.Once - closing chan struct{} - wg sync.WaitGroup -} - -// newStringParallelIterator returns a new instance of stringParallelIterator. -func newStringParallelIterator(input StringIterator) *stringParallelIterator { - itr := &stringParallelIterator{ - input: input, - ch: make(chan stringPointError, 256), - closing: make(chan struct{}), - } - itr.wg.Add(1) - go itr.monitor() - return itr -} - -// Stats returns stats from the underlying iterator. -func (itr *stringParallelIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the underlying iterators. -func (itr *stringParallelIterator) Close() error { - itr.once.Do(func() { close(itr.closing) }) - itr.wg.Wait() - return itr.input.Close() -} - -// Next returns the next point from the iterator. -func (itr *stringParallelIterator) Next() (*StringPoint, error) { - v, ok := <-itr.ch - if !ok { - return nil, io.EOF - } - return v.point, v.err -} - -// monitor runs in a separate goroutine and actively pulls the next point. -func (itr *stringParallelIterator) monitor() { - defer close(itr.ch) - defer itr.wg.Done() - - for { - // Read next point. - p, err := itr.input.Next() - if p != nil { - p = p.Clone() - } - - select { - case <-itr.closing: - return - case itr.ch <- stringPointError{point: p, err: err}: - } - } -} - -type stringPointError struct { - point *StringPoint - err error -} - -// stringLimitIterator represents an iterator that limits points per group. -type stringLimitIterator struct { - input StringIterator - opt IteratorOptions - n int - - prev struct { - name string - tags Tags - } -} - -// newStringLimitIterator returns a new instance of stringLimitIterator. -func newStringLimitIterator(input StringIterator, opt IteratorOptions) *stringLimitIterator { - return &stringLimitIterator{ - input: input, - opt: opt, - } -} - -// Stats returns stats from the underlying iterator. -func (itr *stringLimitIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the underlying iterators. -func (itr *stringLimitIterator) Close() error { return itr.input.Close() } - -// Next returns the next point from the iterator. -func (itr *stringLimitIterator) Next() (*StringPoint, error) { - for { - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Reset window and counter if a new window is encountered. - if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { - itr.prev.name = p.Name - itr.prev.tags = p.Tags - itr.n = 0 - } - - // Increment counter. - itr.n++ - - // Read next point if not beyond the offset. - if itr.n <= itr.opt.Offset { - continue - } - - // Read next point if we're beyond the limit. - if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { - continue - } - - return p, nil - } -} - -type stringFillIterator struct { - input *bufStringIterator - prev StringPoint - startTime int64 - endTime int64 - auxFields []interface{} - init bool - opt IteratorOptions - - window struct { - name string - tags Tags - time int64 - offset int64 - } -} - -func newStringFillIterator(input StringIterator, expr influxql.Expr, opt IteratorOptions) *stringFillIterator { - if opt.Fill == influxql.NullFill { - if expr, ok := expr.(*influxql.Call); ok && expr.Name == "count" { - opt.Fill = influxql.NumberFill - opt.FillValue = "" - } - } - - var startTime, endTime int64 - if opt.Ascending { - startTime, _ = opt.Window(opt.StartTime) - endTime, _ = opt.Window(opt.EndTime) - } else { - startTime, _ = opt.Window(opt.EndTime) - endTime, _ = opt.Window(opt.StartTime) - } - - var auxFields []interface{} - if len(opt.Aux) > 0 { - auxFields = make([]interface{}, len(opt.Aux)) - } - - return &stringFillIterator{ - input: newBufStringIterator(input), - prev: StringPoint{Nil: true}, - startTime: startTime, - endTime: endTime, - auxFields: auxFields, - opt: opt, - } -} - -func (itr *stringFillIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *stringFillIterator) Close() error { return itr.input.Close() } - -func (itr *stringFillIterator) Next() (*StringPoint, error) { - if !itr.init { - p, err := itr.input.peek() - if p == nil || err != nil { - return nil, err - } - itr.window.name, itr.window.tags = p.Name, p.Tags - itr.window.time = itr.startTime - if itr.startTime == influxql.MinTime { - itr.window.time, _ = itr.opt.Window(p.Time) - } - if itr.opt.Location != nil { - _, itr.window.offset = itr.opt.Zone(itr.window.time) - } - itr.init = true - } - - p, err := itr.input.Next() - if err != nil { - return nil, err - } - - // Check if the next point is outside of our window or is nil. - if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { - // If we are inside of an interval, unread the point and continue below to - // constructing a new point. - if itr.opt.Ascending && itr.window.time <= itr.endTime { - itr.input.unread(p) - p = nil - goto CONSTRUCT - } else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { - itr.input.unread(p) - p = nil - goto CONSTRUCT - } - - // We are *not* in a current interval. If there is no next point, - // we are at the end of all intervals. - if p == nil { - return nil, nil - } - - // Set the new interval. - itr.window.name, itr.window.tags = p.Name, p.Tags - itr.window.time = itr.startTime - if itr.window.time == influxql.MinTime { - itr.window.time, _ = itr.opt.Window(p.Time) - } - if itr.opt.Location != nil { - _, itr.window.offset = itr.opt.Zone(itr.window.time) - } - itr.prev = StringPoint{Nil: true} - } - - // Check if the point is our next expected point. -CONSTRUCT: - if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { - if p != nil { - itr.input.unread(p) - } - - p = &StringPoint{ - Name: itr.window.name, - Tags: itr.window.tags, - Time: itr.window.time, - Aux: itr.auxFields, - } - - switch itr.opt.Fill { - case influxql.LinearFill: - fallthrough - case influxql.NullFill: - p.Nil = true - case influxql.NumberFill: - p.Value, _ = castToString(itr.opt.FillValue) - case influxql.PreviousFill: - if !itr.prev.Nil { - p.Value = itr.prev.Value - p.Nil = itr.prev.Nil - } else { - p.Nil = true - } - } - } else { - itr.prev = *p - } - - // Advance the expected time. Do not advance to a new window here - // as there may be lingering points with the same timestamp in the previous - // window. - if itr.opt.Ascending { - itr.window.time += int64(itr.opt.Interval.Duration) - } else { - itr.window.time -= int64(itr.opt.Interval.Duration) - } - - // Check to see if we have passed over an offset change and adjust the time - // to account for this new offset. - if itr.opt.Location != nil { - if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset { - diff := itr.window.offset - offset - if abs(diff) < int64(itr.opt.Interval.Duration) { - itr.window.time += diff - } - itr.window.offset = offset - } - } - return p, nil -} - -// stringIntervalIterator represents a string implementation of IntervalIterator. -type stringIntervalIterator struct { - input StringIterator - opt IteratorOptions -} - -func newStringIntervalIterator(input StringIterator, opt IteratorOptions) *stringIntervalIterator { - return &stringIntervalIterator{input: input, opt: opt} -} - -func (itr *stringIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *stringIntervalIterator) Close() error { return itr.input.Close() } - -func (itr *stringIntervalIterator) Next() (*StringPoint, error) { - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - p.Time, _ = itr.opt.Window(p.Time) - // If we see the minimum allowable time, set the time to zero so we don't - // break the default returned time for aggregate queries without times. - if p.Time == influxql.MinTime { - p.Time = 0 - } - return p, nil -} - -// stringInterruptIterator represents a string implementation of InterruptIterator. -type stringInterruptIterator struct { - input StringIterator - closing <-chan struct{} - count int -} - -func newStringInterruptIterator(input StringIterator, closing <-chan struct{}) *stringInterruptIterator { - return &stringInterruptIterator{input: input, closing: closing} -} - -func (itr *stringInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *stringInterruptIterator) Close() error { return itr.input.Close() } - -func (itr *stringInterruptIterator) Next() (*StringPoint, error) { - // Only check if the channel is closed every N points. This - // intentionally checks on both 0 and N so that if the iterator - // has been interrupted before the first point is emitted it will - // not emit any points. - if itr.count&0xFF == 0xFF { - select { - case <-itr.closing: - return nil, itr.Close() - default: - // Reset iterator count to zero and fall through to emit the next point. - itr.count = 0 - } - } - - // Increment the counter for every point read. - itr.count++ - return itr.input.Next() -} - -// stringCloseInterruptIterator represents a string implementation of CloseInterruptIterator. -type stringCloseInterruptIterator struct { - input StringIterator - closing <-chan struct{} - done chan struct{} - once sync.Once -} - -func newStringCloseInterruptIterator(input StringIterator, closing <-chan struct{}) *stringCloseInterruptIterator { - itr := &stringCloseInterruptIterator{ - input: input, - closing: closing, - done: make(chan struct{}), - } - go itr.monitor() - return itr -} - -func (itr *stringCloseInterruptIterator) monitor() { - select { - case <-itr.closing: - itr.Close() - case <-itr.done: - } -} - -func (itr *stringCloseInterruptIterator) Stats() IteratorStats { - return itr.input.Stats() -} - -func (itr *stringCloseInterruptIterator) Close() error { - itr.once.Do(func() { - close(itr.done) - itr.input.Close() - }) - return nil -} - -func (itr *stringCloseInterruptIterator) Next() (*StringPoint, error) { - p, err := itr.input.Next() - if err != nil { - // Check if the iterator was closed. - select { - case <-itr.done: - return nil, nil - default: - return nil, err - } - } - return p, nil -} - -// stringReduceFloatIterator executes a reducer for every interval and buffers the result. -type stringReduceFloatIterator struct { - input *bufStringIterator - create func() (StringPointAggregator, FloatPointEmitter) - dims []string - opt IteratorOptions - points []FloatPoint - keepTags bool -} - -func newStringReduceFloatIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, FloatPointEmitter)) *stringReduceFloatIterator { - return &stringReduceFloatIterator{ - input: newBufStringIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *stringReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *stringReduceFloatIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *stringReduceFloatIterator) Next() (*FloatPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// stringReduceFloatPoint stores the reduced data for a name/tag combination. -type stringReduceFloatPoint struct { - Name string - Tags Tags - Aggregator StringPointAggregator - Emitter FloatPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *stringReduceFloatIterator) reduce() ([]FloatPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*stringReduceFloatPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &stringReduceFloatPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateString(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]FloatPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = floatPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// stringStreamFloatIterator streams inputs into the iterator and emits points gradually. -type stringStreamFloatIterator struct { - input *bufStringIterator - create func() (StringPointAggregator, FloatPointEmitter) - dims []string - opt IteratorOptions - m map[string]*stringReduceFloatPoint - points []FloatPoint -} - -// newStringStreamFloatIterator returns a new instance of stringStreamFloatIterator. -func newStringStreamFloatIterator(input StringIterator, createFn func() (StringPointAggregator, FloatPointEmitter), opt IteratorOptions) *stringStreamFloatIterator { - return &stringStreamFloatIterator{ - input: newBufStringIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*stringReduceFloatPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *stringStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *stringStreamFloatIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *stringStreamFloatIterator) Next() (*FloatPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *stringStreamFloatIterator) reduce() ([]FloatPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []FloatPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &stringReduceFloatPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateString(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// stringReduceIntegerIterator executes a reducer for every interval and buffers the result. -type stringReduceIntegerIterator struct { - input *bufStringIterator - create func() (StringPointAggregator, IntegerPointEmitter) - dims []string - opt IteratorOptions - points []IntegerPoint - keepTags bool -} - -func newStringReduceIntegerIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, IntegerPointEmitter)) *stringReduceIntegerIterator { - return &stringReduceIntegerIterator{ - input: newBufStringIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *stringReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *stringReduceIntegerIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *stringReduceIntegerIterator) Next() (*IntegerPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// stringReduceIntegerPoint stores the reduced data for a name/tag combination. -type stringReduceIntegerPoint struct { - Name string - Tags Tags - Aggregator StringPointAggregator - Emitter IntegerPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *stringReduceIntegerIterator) reduce() ([]IntegerPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*stringReduceIntegerPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &stringReduceIntegerPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateString(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]IntegerPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = integerPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// stringStreamIntegerIterator streams inputs into the iterator and emits points gradually. -type stringStreamIntegerIterator struct { - input *bufStringIterator - create func() (StringPointAggregator, IntegerPointEmitter) - dims []string - opt IteratorOptions - m map[string]*stringReduceIntegerPoint - points []IntegerPoint -} - -// newStringStreamIntegerIterator returns a new instance of stringStreamIntegerIterator. -func newStringStreamIntegerIterator(input StringIterator, createFn func() (StringPointAggregator, IntegerPointEmitter), opt IteratorOptions) *stringStreamIntegerIterator { - return &stringStreamIntegerIterator{ - input: newBufStringIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*stringReduceIntegerPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *stringStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *stringStreamIntegerIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *stringStreamIntegerIterator) Next() (*IntegerPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *stringStreamIntegerIterator) reduce() ([]IntegerPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []IntegerPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &stringReduceIntegerPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateString(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// stringReduceUnsignedIterator executes a reducer for every interval and buffers the result. -type stringReduceUnsignedIterator struct { - input *bufStringIterator - create func() (StringPointAggregator, UnsignedPointEmitter) - dims []string - opt IteratorOptions - points []UnsignedPoint - keepTags bool -} - -func newStringReduceUnsignedIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, UnsignedPointEmitter)) *stringReduceUnsignedIterator { - return &stringReduceUnsignedIterator{ - input: newBufStringIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *stringReduceUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *stringReduceUnsignedIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *stringReduceUnsignedIterator) Next() (*UnsignedPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// stringReduceUnsignedPoint stores the reduced data for a name/tag combination. -type stringReduceUnsignedPoint struct { - Name string - Tags Tags - Aggregator StringPointAggregator - Emitter UnsignedPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *stringReduceUnsignedIterator) reduce() ([]UnsignedPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*stringReduceUnsignedPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &stringReduceUnsignedPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateString(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]UnsignedPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = unsignedPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// stringStreamUnsignedIterator streams inputs into the iterator and emits points gradually. -type stringStreamUnsignedIterator struct { - input *bufStringIterator - create func() (StringPointAggregator, UnsignedPointEmitter) - dims []string - opt IteratorOptions - m map[string]*stringReduceUnsignedPoint - points []UnsignedPoint -} - -// newStringStreamUnsignedIterator returns a new instance of stringStreamUnsignedIterator. -func newStringStreamUnsignedIterator(input StringIterator, createFn func() (StringPointAggregator, UnsignedPointEmitter), opt IteratorOptions) *stringStreamUnsignedIterator { - return &stringStreamUnsignedIterator{ - input: newBufStringIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*stringReduceUnsignedPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *stringStreamUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *stringStreamUnsignedIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *stringStreamUnsignedIterator) Next() (*UnsignedPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *stringStreamUnsignedIterator) reduce() ([]UnsignedPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []UnsignedPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &stringReduceUnsignedPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateString(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// stringReduceStringIterator executes a reducer for every interval and buffers the result. -type stringReduceStringIterator struct { - input *bufStringIterator - create func() (StringPointAggregator, StringPointEmitter) - dims []string - opt IteratorOptions - points []StringPoint - keepTags bool -} - -func newStringReduceStringIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, StringPointEmitter)) *stringReduceStringIterator { - return &stringReduceStringIterator{ - input: newBufStringIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *stringReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *stringReduceStringIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *stringReduceStringIterator) Next() (*StringPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// stringReduceStringPoint stores the reduced data for a name/tag combination. -type stringReduceStringPoint struct { - Name string - Tags Tags - Aggregator StringPointAggregator - Emitter StringPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *stringReduceStringIterator) reduce() ([]StringPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*stringReduceStringPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &stringReduceStringPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateString(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]StringPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = stringPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// stringStreamStringIterator streams inputs into the iterator and emits points gradually. -type stringStreamStringIterator struct { - input *bufStringIterator - create func() (StringPointAggregator, StringPointEmitter) - dims []string - opt IteratorOptions - m map[string]*stringReduceStringPoint - points []StringPoint -} - -// newStringStreamStringIterator returns a new instance of stringStreamStringIterator. -func newStringStreamStringIterator(input StringIterator, createFn func() (StringPointAggregator, StringPointEmitter), opt IteratorOptions) *stringStreamStringIterator { - return &stringStreamStringIterator{ - input: newBufStringIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*stringReduceStringPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *stringStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *stringStreamStringIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *stringStreamStringIterator) Next() (*StringPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *stringStreamStringIterator) reduce() ([]StringPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []StringPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &stringReduceStringPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateString(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// stringReduceBooleanIterator executes a reducer for every interval and buffers the result. -type stringReduceBooleanIterator struct { - input *bufStringIterator - create func() (StringPointAggregator, BooleanPointEmitter) - dims []string - opt IteratorOptions - points []BooleanPoint - keepTags bool -} - -func newStringReduceBooleanIterator(input StringIterator, opt IteratorOptions, createFn func() (StringPointAggregator, BooleanPointEmitter)) *stringReduceBooleanIterator { - return &stringReduceBooleanIterator{ - input: newBufStringIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *stringReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *stringReduceBooleanIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *stringReduceBooleanIterator) Next() (*BooleanPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// stringReduceBooleanPoint stores the reduced data for a name/tag combination. -type stringReduceBooleanPoint struct { - Name string - Tags Tags - Aggregator StringPointAggregator - Emitter BooleanPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *stringReduceBooleanIterator) reduce() ([]BooleanPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*stringReduceBooleanPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &stringReduceBooleanPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateString(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]BooleanPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = booleanPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// stringStreamBooleanIterator streams inputs into the iterator and emits points gradually. -type stringStreamBooleanIterator struct { - input *bufStringIterator - create func() (StringPointAggregator, BooleanPointEmitter) - dims []string - opt IteratorOptions - m map[string]*stringReduceBooleanPoint - points []BooleanPoint -} - -// newStringStreamBooleanIterator returns a new instance of stringStreamBooleanIterator. -func newStringStreamBooleanIterator(input StringIterator, createFn func() (StringPointAggregator, BooleanPointEmitter), opt IteratorOptions) *stringStreamBooleanIterator { - return &stringStreamBooleanIterator{ - input: newBufStringIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*stringReduceBooleanPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *stringStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *stringStreamBooleanIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *stringStreamBooleanIterator) Next() (*BooleanPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *stringStreamBooleanIterator) reduce() ([]BooleanPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []BooleanPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &stringReduceBooleanPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateString(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// stringDedupeIterator only outputs unique points. -// This differs from the DistinctIterator in that it compares all aux fields too. -// This iterator is relatively inefficient and should only be used on small -// datasets such as meta query results. -type stringDedupeIterator struct { - input StringIterator - m map[string]struct{} // lookup of points already sent -} - -type stringIteratorMapper struct { - cur Cursor - row Row - driver IteratorMap // which iterator to use for the primary value, can be nil - fields []IteratorMap // which iterator to use for an aux field - point StringPoint -} - -func newStringIteratorMapper(cur Cursor, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *stringIteratorMapper { - return &stringIteratorMapper{ - cur: cur, - driver: driver, - fields: fields, - point: StringPoint{ - Aux: make([]interface{}, len(fields)), - }, - } -} - -func (itr *stringIteratorMapper) Next() (*StringPoint, error) { - if !itr.cur.Scan(&itr.row) { - if err := itr.cur.Err(); err != nil { - return nil, err - } - return nil, nil - } - - itr.point.Time = itr.row.Time - itr.point.Name = itr.row.Series.Name - itr.point.Tags = itr.row.Series.Tags - - if itr.driver != nil { - if v := itr.driver.Value(&itr.row); v != nil { - if v, ok := castToString(v); ok { - itr.point.Value = v - itr.point.Nil = false - } else { - itr.point.Value = "" - itr.point.Nil = true - } - } else { - itr.point.Value = "" - itr.point.Nil = true - } - } - for i, f := range itr.fields { - itr.point.Aux[i] = f.Value(&itr.row) - } - return &itr.point, nil -} - -func (itr *stringIteratorMapper) Stats() IteratorStats { - return itr.cur.Stats() -} - -func (itr *stringIteratorMapper) Close() error { - return itr.cur.Close() -} - -type stringFilterIterator struct { - input StringIterator - cond influxql.Expr - opt IteratorOptions - m map[string]interface{} -} - -func newStringFilterIterator(input StringIterator, cond influxql.Expr, opt IteratorOptions) StringIterator { - // Strip out time conditions from the WHERE clause. - // TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct. - n := influxql.RewriteFunc(influxql.CloneExpr(cond), func(n influxql.Node) influxql.Node { - switch n := n.(type) { - case *influxql.BinaryExpr: - if n.LHS.String() == "time" { - return &influxql.BooleanLiteral{Val: true} - } - } - return n - }) - - cond, _ = n.(influxql.Expr) - if cond == nil { - return input - } else if n, ok := cond.(*influxql.BooleanLiteral); ok && n.Val { - return input - } - - return &stringFilterIterator{ - input: input, - cond: cond, - opt: opt, - m: make(map[string]interface{}), - } -} - -func (itr *stringFilterIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *stringFilterIterator) Close() error { return itr.input.Close() } - -func (itr *stringFilterIterator) Next() (*StringPoint, error) { - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } - - for i, ref := range itr.opt.Aux { - itr.m[ref.Val] = p.Aux[i] - } - for k, v := range p.Tags.KeyValues() { - itr.m[k] = v - } - - if !influxql.EvalBool(itr.cond, itr.m) { - continue - } - return p, nil - } -} - -type stringTagSubsetIterator struct { - input StringIterator - point StringPoint - lastTags Tags - dimensions []string -} - -func newStringTagSubsetIterator(input StringIterator, opt IteratorOptions) *stringTagSubsetIterator { - return &stringTagSubsetIterator{ - input: input, - dimensions: opt.GetDimensions(), - } -} - -func (itr *stringTagSubsetIterator) Next() (*StringPoint, error) { - p, err := itr.input.Next() - if err != nil { - return nil, err - } else if p == nil { - return nil, nil - } - - itr.point.Name = p.Name - if !p.Tags.Equal(itr.lastTags) { - itr.point.Tags = p.Tags.Subset(itr.dimensions) - itr.lastTags = p.Tags - } - itr.point.Time = p.Time - itr.point.Value = p.Value - itr.point.Aux = p.Aux - itr.point.Aggregated = p.Aggregated - itr.point.Nil = p.Nil - return &itr.point, nil -} - -func (itr *stringTagSubsetIterator) Stats() IteratorStats { - return itr.input.Stats() -} - -func (itr *stringTagSubsetIterator) Close() error { - return itr.input.Close() -} - -// newStringDedupeIterator returns a new instance of stringDedupeIterator. -func newStringDedupeIterator(input StringIterator) *stringDedupeIterator { - return &stringDedupeIterator{ - input: input, - m: make(map[string]struct{}), - } -} - -// Stats returns stats from the input iterator. -func (itr *stringDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *stringDedupeIterator) Close() error { return itr.input.Close() } - -// Next returns the next unique point from the input iterator. -func (itr *stringDedupeIterator) Next() (*StringPoint, error) { - for { - // Read next point. - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Serialize to bytes to store in lookup. - buf, err := proto.Marshal(encodeStringPoint(p)) - if err != nil { - return nil, err - } - - // If the point has already been output then move to the next point. - if _, ok := itr.m[string(buf)]; ok { - continue - } - - // Otherwise mark it as emitted and return point. - itr.m[string(buf)] = struct{}{} - return p, nil - } -} - -// stringReaderIterator represents an iterator that streams from a reader. -type stringReaderIterator struct { - r io.Reader - dec *StringPointDecoder -} - -// newStringReaderIterator returns a new instance of stringReaderIterator. -func newStringReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *stringReaderIterator { - dec := NewStringPointDecoder(ctx, r) - dec.stats = stats - - return &stringReaderIterator{ - r: r, - dec: dec, - } -} - -// Stats returns stats about points processed. -func (itr *stringReaderIterator) Stats() IteratorStats { return itr.dec.stats } - -// Close closes the underlying reader, if applicable. -func (itr *stringReaderIterator) Close() error { - if r, ok := itr.r.(io.ReadCloser); ok { - return r.Close() - } - return nil -} - -// Next returns the next point from the iterator. -func (itr *stringReaderIterator) Next() (*StringPoint, error) { - // OPTIMIZE(benbjohnson): Reuse point on iterator. - - // Unmarshal next point. - p := &StringPoint{} - if err := itr.dec.DecodeStringPoint(p); err == io.EOF { - return nil, nil - } else if err != nil { - return nil, err - } - return p, nil -} - -// BooleanIterator represents a stream of boolean points. -type BooleanIterator interface { - Iterator - Next() (*BooleanPoint, error) -} - -// newBooleanIterators converts a slice of Iterator to a slice of BooleanIterator. -// Drop and closes any iterator in itrs that is not a BooleanIterator and cannot -// be cast to a BooleanIterator. -func newBooleanIterators(itrs []Iterator) []BooleanIterator { - a := make([]BooleanIterator, 0, len(itrs)) - for _, itr := range itrs { - switch itr := itr.(type) { - case BooleanIterator: - a = append(a, itr) - default: - itr.Close() - } - } - return a -} - -// bufBooleanIterator represents a buffered BooleanIterator. -type bufBooleanIterator struct { - itr BooleanIterator - buf *BooleanPoint -} - -// newBufBooleanIterator returns a buffered BooleanIterator. -func newBufBooleanIterator(itr BooleanIterator) *bufBooleanIterator { - return &bufBooleanIterator{itr: itr} -} - -// Stats returns statistics from the input iterator. -func (itr *bufBooleanIterator) Stats() IteratorStats { return itr.itr.Stats() } - -// Close closes the underlying iterator. -func (itr *bufBooleanIterator) Close() error { return itr.itr.Close() } - -// peek returns the next point without removing it from the iterator. -func (itr *bufBooleanIterator) peek() (*BooleanPoint, error) { - p, err := itr.Next() - if err != nil { - return nil, err - } - itr.unread(p) - return p, nil -} - -// peekTime returns the time of the next point. -// Returns zero time if no more points available. -func (itr *bufBooleanIterator) peekTime() (int64, error) { - p, err := itr.peek() - if p == nil || err != nil { - return ZeroTime, err - } - return p.Time, nil -} - -// Next returns the current buffer, if exists, or calls the underlying iterator. -func (itr *bufBooleanIterator) Next() (*BooleanPoint, error) { - buf := itr.buf - if buf != nil { - itr.buf = nil - return buf, nil - } - return itr.itr.Next() -} - -// NextInWindow returns the next value if it is between [startTime, endTime). -// If the next value is outside the range then it is moved to the buffer. -func (itr *bufBooleanIterator) NextInWindow(startTime, endTime int64) (*BooleanPoint, error) { - v, err := itr.Next() - if v == nil || err != nil { - return nil, err - } else if t := v.Time; t >= endTime || t < startTime { - itr.unread(v) - return nil, nil - } - return v, nil -} - -// unread sets v to the buffer. It is read on the next call to Next(). -func (itr *bufBooleanIterator) unread(v *BooleanPoint) { itr.buf = v } - -// booleanMergeIterator represents an iterator that combines multiple boolean iterators. -type booleanMergeIterator struct { - inputs []BooleanIterator - heap *booleanMergeHeap - init bool - - closed bool - mu sync.RWMutex - - // Current iterator and window. - curr *booleanMergeHeapItem - window struct { - name string - tags string - startTime int64 - endTime int64 - } -} - -// newBooleanMergeIterator returns a new instance of booleanMergeIterator. -func newBooleanMergeIterator(inputs []BooleanIterator, opt IteratorOptions) *booleanMergeIterator { - itr := &booleanMergeIterator{ - inputs: inputs, - heap: &booleanMergeHeap{ - items: make([]*booleanMergeHeapItem, 0, len(inputs)), - opt: opt, - }, - } - - // Initialize heap items. - for _, input := range inputs { - // Wrap in buffer, ignore any inputs without anymore points. - bufInput := newBufBooleanIterator(input) - - // Append to the heap. - itr.heap.items = append(itr.heap.items, &booleanMergeHeapItem{itr: bufInput}) - } - - return itr -} - -// Stats returns an aggregation of stats from the underlying iterators. -func (itr *booleanMergeIterator) Stats() IteratorStats { - var stats IteratorStats - for _, input := range itr.inputs { - stats.Add(input.Stats()) - } - return stats -} - -// Close closes the underlying iterators. -func (itr *booleanMergeIterator) Close() error { - itr.mu.Lock() - defer itr.mu.Unlock() - - for _, input := range itr.inputs { - input.Close() - } - itr.curr = nil - itr.inputs = nil - itr.heap.items = nil - itr.closed = true - return nil -} - -// Next returns the next point from the iterator. -func (itr *booleanMergeIterator) Next() (*BooleanPoint, error) { - itr.mu.RLock() - defer itr.mu.RUnlock() - if itr.closed { - return nil, nil - } - - // Initialize the heap. This needs to be done lazily on the first call to this iterator - // so that iterator initialization done through the Select() call returns quickly. - // Queries can only be interrupted after the Select() call completes so any operations - // done during iterator creation cannot be interrupted, which is why we do it here - // instead so an interrupt can happen while initializing the heap. - if !itr.init { - items := itr.heap.items - itr.heap.items = make([]*booleanMergeHeapItem, 0, len(items)) - for _, item := range items { - if p, err := item.itr.peek(); err != nil { - return nil, err - } else if p == nil { - continue - } - itr.heap.items = append(itr.heap.items, item) - } - heap.Init(itr.heap) - itr.init = true - } - - for { - // Retrieve the next iterator if we don't have one. - if itr.curr == nil { - if len(itr.heap.items) == 0 { - return nil, nil - } - itr.curr = heap.Pop(itr.heap).(*booleanMergeHeapItem) - - // Read point and set current window. - p, err := itr.curr.itr.Next() - if err != nil { - return nil, err - } - tags := p.Tags.Subset(itr.heap.opt.Dimensions) - itr.window.name, itr.window.tags = p.Name, tags.ID() - itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) - return p, nil - } - - // Read the next point from the current iterator. - p, err := itr.curr.itr.Next() - if err != nil { - return nil, err - } - - // If there are no more points then remove iterator from heap and find next. - if p == nil { - itr.curr = nil - continue - } - - // Check if the point is inside of our current window. - inWindow := true - if window := itr.window; window.name != p.Name { - inWindow = false - } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { - inWindow = false - } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { - inWindow = false - } else if !opt.Ascending && p.Time < window.startTime { - inWindow = false - } - - // If it's outside our window then push iterator back on the heap and find new iterator. - if !inWindow { - itr.curr.itr.unread(p) - heap.Push(itr.heap, itr.curr) - itr.curr = nil - continue - } - - return p, nil - } -} - -// booleanMergeHeap represents a heap of booleanMergeHeapItems. -// Items are sorted by their next window and then by name/tags. -type booleanMergeHeap struct { - opt IteratorOptions - items []*booleanMergeHeapItem -} - -func (h *booleanMergeHeap) Len() int { return len(h.items) } -func (h *booleanMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } -func (h *booleanMergeHeap) Less(i, j int) bool { - x, err := h.items[i].itr.peek() - if err != nil { - return true - } - y, err := h.items[j].itr.peek() - if err != nil { - return false - } - - if h.opt.Ascending { - if x.Name != y.Name { - return x.Name < y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { - return xTags.ID() < yTags.ID() - } - } else { - if x.Name != y.Name { - return x.Name > y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { - return xTags.ID() > yTags.ID() - } - } - - xt, _ := h.opt.Window(x.Time) - yt, _ := h.opt.Window(y.Time) - - if h.opt.Ascending { - return xt < yt - } - return xt > yt -} - -func (h *booleanMergeHeap) Push(x interface{}) { - h.items = append(h.items, x.(*booleanMergeHeapItem)) -} - -func (h *booleanMergeHeap) Pop() interface{} { - old := h.items - n := len(old) - item := old[n-1] - h.items = old[0 : n-1] - return item -} - -type booleanMergeHeapItem struct { - itr *bufBooleanIterator -} - -// booleanSortedMergeIterator is an iterator that sorts and merges multiple iterators into one. -type booleanSortedMergeIterator struct { - inputs []BooleanIterator - heap *booleanSortedMergeHeap - init bool -} - -// newBooleanSortedMergeIterator returns an instance of booleanSortedMergeIterator. -func newBooleanSortedMergeIterator(inputs []BooleanIterator, opt IteratorOptions) Iterator { - itr := &booleanSortedMergeIterator{ - inputs: inputs, - heap: &booleanSortedMergeHeap{ - items: make([]*booleanSortedMergeHeapItem, 0, len(inputs)), - opt: opt, - }, - } - - // Initialize heap items. - for _, input := range inputs { - // Append to the heap. - itr.heap.items = append(itr.heap.items, &booleanSortedMergeHeapItem{itr: input}) - } - - return itr -} - -// Stats returns an aggregation of stats from the underlying iterators. -func (itr *booleanSortedMergeIterator) Stats() IteratorStats { - var stats IteratorStats - for _, input := range itr.inputs { - stats.Add(input.Stats()) - } - return stats -} - -// Close closes the underlying iterators. -func (itr *booleanSortedMergeIterator) Close() error { - for _, input := range itr.inputs { - input.Close() - } - return nil -} - -// Next returns the next points from the iterator. -func (itr *booleanSortedMergeIterator) Next() (*BooleanPoint, error) { return itr.pop() } - -// pop returns the next point from the heap. -// Reads the next point from item's cursor and puts it back on the heap. -func (itr *booleanSortedMergeIterator) pop() (*BooleanPoint, error) { - // Initialize the heap. See the MergeIterator to see why this has to be done lazily. - if !itr.init { - items := itr.heap.items - itr.heap.items = make([]*booleanSortedMergeHeapItem, 0, len(items)) - for _, item := range items { - var err error - if item.point, err = item.itr.Next(); err != nil { - return nil, err - } else if item.point == nil { - continue - } - itr.heap.items = append(itr.heap.items, item) - } - heap.Init(itr.heap) - itr.init = true - } - - if len(itr.heap.items) == 0 { - return nil, nil - } - - // Read the next item from the heap. - item := heap.Pop(itr.heap).(*booleanSortedMergeHeapItem) - if item.err != nil { - return nil, item.err - } else if item.point == nil { - return nil, nil - } - - // Copy the point for return. - p := item.point.Clone() - - // Read the next item from the cursor. Push back to heap if one exists. - if item.point, item.err = item.itr.Next(); item.point != nil { - heap.Push(itr.heap, item) - } - - return p, nil -} - -// booleanSortedMergeHeap represents a heap of booleanSortedMergeHeapItems. -// Items are sorted with the following priority: -// - By their measurement name; -// - By their tag keys/values; -// - By time; or -// - By their Aux field values. -type booleanSortedMergeHeap struct { - opt IteratorOptions - items []*booleanSortedMergeHeapItem -} - -func (h *booleanSortedMergeHeap) Len() int { return len(h.items) } -func (h *booleanSortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } -func (h *booleanSortedMergeHeap) Less(i, j int) bool { - x, y := h.items[i].point, h.items[j].point - - if h.opt.Ascending { - if x.Name != y.Name { - return x.Name < y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { - return xTags.ID() < yTags.ID() - } - - if x.Time != y.Time { - return x.Time < y.Time - } - - if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { - for i := 0; i < len(x.Aux); i++ { - v1, ok1 := x.Aux[i].(string) - v2, ok2 := y.Aux[i].(string) - if !ok1 || !ok2 { - // Unsupported types used in Aux fields. Maybe they - // need to be added here? - return false - } else if v1 == v2 { - continue - } - return v1 < v2 - } - } - return false // Times and/or Aux fields are equal. - } - - if x.Name != y.Name { - return x.Name > y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { - return xTags.ID() > yTags.ID() - } - - if x.Time != y.Time { - return x.Time > y.Time - } - - if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { - for i := 0; i < len(x.Aux); i++ { - v1, ok1 := x.Aux[i].(string) - v2, ok2 := y.Aux[i].(string) - if !ok1 || !ok2 { - // Unsupported types used in Aux fields. Maybe they - // need to be added here? - return false - } else if v1 == v2 { - continue - } - return v1 > v2 - } - } - return false // Times and/or Aux fields are equal. -} - -func (h *booleanSortedMergeHeap) Push(x interface{}) { - h.items = append(h.items, x.(*booleanSortedMergeHeapItem)) -} - -func (h *booleanSortedMergeHeap) Pop() interface{} { - old := h.items - n := len(old) - item := old[n-1] - h.items = old[0 : n-1] - return item -} - -type booleanSortedMergeHeapItem struct { - point *BooleanPoint - err error - itr BooleanIterator -} - -// booleanIteratorScanner scans the results of a BooleanIterator into a map. -type booleanIteratorScanner struct { - input *bufBooleanIterator - err error - keys []influxql.VarRef - defaultValue interface{} -} - -// newBooleanIteratorScanner creates a new IteratorScanner. -func newBooleanIteratorScanner(input BooleanIterator, keys []influxql.VarRef, defaultValue interface{}) *booleanIteratorScanner { - return &booleanIteratorScanner{ - input: newBufBooleanIterator(input), - keys: keys, - defaultValue: defaultValue, - } -} - -func (s *booleanIteratorScanner) Peek() (int64, string, Tags) { - if s.err != nil { - return ZeroTime, "", Tags{} - } - - p, err := s.input.peek() - if err != nil { - s.err = err - return ZeroTime, "", Tags{} - } else if p == nil { - return ZeroTime, "", Tags{} - } - return p.Time, p.Name, p.Tags -} - -func (s *booleanIteratorScanner) ScanAt(ts int64, name string, tags Tags, m map[string]interface{}) { - if s.err != nil { - return - } - - p, err := s.input.Next() - if err != nil { - s.err = err - return - } else if p == nil { - s.useDefaults(m) - return - } else if p.Time != ts || p.Name != name || !p.Tags.Equals(&tags) { - s.useDefaults(m) - s.input.unread(p) - return - } - - if k := s.keys[0]; k.Val != "" { - if p.Nil { - if s.defaultValue != SkipDefault { - m[k.Val] = castToType(s.defaultValue, k.Type) - } - } else { - m[k.Val] = p.Value - } - } - for i, v := range p.Aux { - k := s.keys[i+1] - switch v.(type) { - case float64, int64, uint64, string, bool: - m[k.Val] = v - default: - // Insert the fill value if one was specified. - if s.defaultValue != SkipDefault { - m[k.Val] = castToType(s.defaultValue, k.Type) - } - } - } -} - -func (s *booleanIteratorScanner) useDefaults(m map[string]interface{}) { - if s.defaultValue == SkipDefault { - return - } - for _, k := range s.keys { - if k.Val == "" { - continue - } - m[k.Val] = castToType(s.defaultValue, k.Type) - } -} - -func (s *booleanIteratorScanner) Stats() IteratorStats { return s.input.Stats() } -func (s *booleanIteratorScanner) Err() error { return s.err } -func (s *booleanIteratorScanner) Close() error { return s.input.Close() } - -// booleanParallelIterator represents an iterator that pulls data in a separate goroutine. -type booleanParallelIterator struct { - input BooleanIterator - ch chan booleanPointError - - once sync.Once - closing chan struct{} - wg sync.WaitGroup -} - -// newBooleanParallelIterator returns a new instance of booleanParallelIterator. -func newBooleanParallelIterator(input BooleanIterator) *booleanParallelIterator { - itr := &booleanParallelIterator{ - input: input, - ch: make(chan booleanPointError, 256), - closing: make(chan struct{}), - } - itr.wg.Add(1) - go itr.monitor() - return itr -} - -// Stats returns stats from the underlying iterator. -func (itr *booleanParallelIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the underlying iterators. -func (itr *booleanParallelIterator) Close() error { - itr.once.Do(func() { close(itr.closing) }) - itr.wg.Wait() - return itr.input.Close() -} - -// Next returns the next point from the iterator. -func (itr *booleanParallelIterator) Next() (*BooleanPoint, error) { - v, ok := <-itr.ch - if !ok { - return nil, io.EOF - } - return v.point, v.err -} - -// monitor runs in a separate goroutine and actively pulls the next point. -func (itr *booleanParallelIterator) monitor() { - defer close(itr.ch) - defer itr.wg.Done() - - for { - // Read next point. - p, err := itr.input.Next() - if p != nil { - p = p.Clone() - } - - select { - case <-itr.closing: - return - case itr.ch <- booleanPointError{point: p, err: err}: - } - } -} - -type booleanPointError struct { - point *BooleanPoint - err error -} - -// booleanLimitIterator represents an iterator that limits points per group. -type booleanLimitIterator struct { - input BooleanIterator - opt IteratorOptions - n int - - prev struct { - name string - tags Tags - } -} - -// newBooleanLimitIterator returns a new instance of booleanLimitIterator. -func newBooleanLimitIterator(input BooleanIterator, opt IteratorOptions) *booleanLimitIterator { - return &booleanLimitIterator{ - input: input, - opt: opt, - } -} - -// Stats returns stats from the underlying iterator. -func (itr *booleanLimitIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the underlying iterators. -func (itr *booleanLimitIterator) Close() error { return itr.input.Close() } - -// Next returns the next point from the iterator. -func (itr *booleanLimitIterator) Next() (*BooleanPoint, error) { - for { - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Reset window and counter if a new window is encountered. - if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { - itr.prev.name = p.Name - itr.prev.tags = p.Tags - itr.n = 0 - } - - // Increment counter. - itr.n++ - - // Read next point if not beyond the offset. - if itr.n <= itr.opt.Offset { - continue - } - - // Read next point if we're beyond the limit. - if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { - continue - } - - return p, nil - } -} - -type booleanFillIterator struct { - input *bufBooleanIterator - prev BooleanPoint - startTime int64 - endTime int64 - auxFields []interface{} - init bool - opt IteratorOptions - - window struct { - name string - tags Tags - time int64 - offset int64 - } -} - -func newBooleanFillIterator(input BooleanIterator, expr influxql.Expr, opt IteratorOptions) *booleanFillIterator { - if opt.Fill == influxql.NullFill { - if expr, ok := expr.(*influxql.Call); ok && expr.Name == "count" { - opt.Fill = influxql.NumberFill - opt.FillValue = false - } - } - - var startTime, endTime int64 - if opt.Ascending { - startTime, _ = opt.Window(opt.StartTime) - endTime, _ = opt.Window(opt.EndTime) - } else { - startTime, _ = opt.Window(opt.EndTime) - endTime, _ = opt.Window(opt.StartTime) - } - - var auxFields []interface{} - if len(opt.Aux) > 0 { - auxFields = make([]interface{}, len(opt.Aux)) - } - - return &booleanFillIterator{ - input: newBufBooleanIterator(input), - prev: BooleanPoint{Nil: true}, - startTime: startTime, - endTime: endTime, - auxFields: auxFields, - opt: opt, - } -} - -func (itr *booleanFillIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *booleanFillIterator) Close() error { return itr.input.Close() } - -func (itr *booleanFillIterator) Next() (*BooleanPoint, error) { - if !itr.init { - p, err := itr.input.peek() - if p == nil || err != nil { - return nil, err - } - itr.window.name, itr.window.tags = p.Name, p.Tags - itr.window.time = itr.startTime - if itr.startTime == influxql.MinTime { - itr.window.time, _ = itr.opt.Window(p.Time) - } - if itr.opt.Location != nil { - _, itr.window.offset = itr.opt.Zone(itr.window.time) - } - itr.init = true - } - - p, err := itr.input.Next() - if err != nil { - return nil, err - } - - // Check if the next point is outside of our window or is nil. - if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { - // If we are inside of an interval, unread the point and continue below to - // constructing a new point. - if itr.opt.Ascending && itr.window.time <= itr.endTime { - itr.input.unread(p) - p = nil - goto CONSTRUCT - } else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { - itr.input.unread(p) - p = nil - goto CONSTRUCT - } - - // We are *not* in a current interval. If there is no next point, - // we are at the end of all intervals. - if p == nil { - return nil, nil - } - - // Set the new interval. - itr.window.name, itr.window.tags = p.Name, p.Tags - itr.window.time = itr.startTime - if itr.window.time == influxql.MinTime { - itr.window.time, _ = itr.opt.Window(p.Time) - } - if itr.opt.Location != nil { - _, itr.window.offset = itr.opt.Zone(itr.window.time) - } - itr.prev = BooleanPoint{Nil: true} - } - - // Check if the point is our next expected point. -CONSTRUCT: - if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { - if p != nil { - itr.input.unread(p) - } - - p = &BooleanPoint{ - Name: itr.window.name, - Tags: itr.window.tags, - Time: itr.window.time, - Aux: itr.auxFields, - } - - switch itr.opt.Fill { - case influxql.LinearFill: - fallthrough - case influxql.NullFill: - p.Nil = true - case influxql.NumberFill: - p.Value, _ = castToBoolean(itr.opt.FillValue) - case influxql.PreviousFill: - if !itr.prev.Nil { - p.Value = itr.prev.Value - p.Nil = itr.prev.Nil - } else { - p.Nil = true - } - } - } else { - itr.prev = *p - } - - // Advance the expected time. Do not advance to a new window here - // as there may be lingering points with the same timestamp in the previous - // window. - if itr.opt.Ascending { - itr.window.time += int64(itr.opt.Interval.Duration) - } else { - itr.window.time -= int64(itr.opt.Interval.Duration) - } - - // Check to see if we have passed over an offset change and adjust the time - // to account for this new offset. - if itr.opt.Location != nil { - if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset { - diff := itr.window.offset - offset - if abs(diff) < int64(itr.opt.Interval.Duration) { - itr.window.time += diff - } - itr.window.offset = offset - } - } - return p, nil -} - -// booleanIntervalIterator represents a boolean implementation of IntervalIterator. -type booleanIntervalIterator struct { - input BooleanIterator - opt IteratorOptions -} - -func newBooleanIntervalIterator(input BooleanIterator, opt IteratorOptions) *booleanIntervalIterator { - return &booleanIntervalIterator{input: input, opt: opt} -} - -func (itr *booleanIntervalIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *booleanIntervalIterator) Close() error { return itr.input.Close() } - -func (itr *booleanIntervalIterator) Next() (*BooleanPoint, error) { - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - p.Time, _ = itr.opt.Window(p.Time) - // If we see the minimum allowable time, set the time to zero so we don't - // break the default returned time for aggregate queries without times. - if p.Time == influxql.MinTime { - p.Time = 0 - } - return p, nil -} - -// booleanInterruptIterator represents a boolean implementation of InterruptIterator. -type booleanInterruptIterator struct { - input BooleanIterator - closing <-chan struct{} - count int -} - -func newBooleanInterruptIterator(input BooleanIterator, closing <-chan struct{}) *booleanInterruptIterator { - return &booleanInterruptIterator{input: input, closing: closing} -} - -func (itr *booleanInterruptIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *booleanInterruptIterator) Close() error { return itr.input.Close() } - -func (itr *booleanInterruptIterator) Next() (*BooleanPoint, error) { - // Only check if the channel is closed every N points. This - // intentionally checks on both 0 and N so that if the iterator - // has been interrupted before the first point is emitted it will - // not emit any points. - if itr.count&0xFF == 0xFF { - select { - case <-itr.closing: - return nil, itr.Close() - default: - // Reset iterator count to zero and fall through to emit the next point. - itr.count = 0 - } - } - - // Increment the counter for every point read. - itr.count++ - return itr.input.Next() -} - -// booleanCloseInterruptIterator represents a boolean implementation of CloseInterruptIterator. -type booleanCloseInterruptIterator struct { - input BooleanIterator - closing <-chan struct{} - done chan struct{} - once sync.Once -} - -func newBooleanCloseInterruptIterator(input BooleanIterator, closing <-chan struct{}) *booleanCloseInterruptIterator { - itr := &booleanCloseInterruptIterator{ - input: input, - closing: closing, - done: make(chan struct{}), - } - go itr.monitor() - return itr -} - -func (itr *booleanCloseInterruptIterator) monitor() { - select { - case <-itr.closing: - itr.Close() - case <-itr.done: - } -} - -func (itr *booleanCloseInterruptIterator) Stats() IteratorStats { - return itr.input.Stats() -} - -func (itr *booleanCloseInterruptIterator) Close() error { - itr.once.Do(func() { - close(itr.done) - itr.input.Close() - }) - return nil -} - -func (itr *booleanCloseInterruptIterator) Next() (*BooleanPoint, error) { - p, err := itr.input.Next() - if err != nil { - // Check if the iterator was closed. - select { - case <-itr.done: - return nil, nil - default: - return nil, err - } - } - return p, nil -} - -// booleanReduceFloatIterator executes a reducer for every interval and buffers the result. -type booleanReduceFloatIterator struct { - input *bufBooleanIterator - create func() (BooleanPointAggregator, FloatPointEmitter) - dims []string - opt IteratorOptions - points []FloatPoint - keepTags bool -} - -func newBooleanReduceFloatIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, FloatPointEmitter)) *booleanReduceFloatIterator { - return &booleanReduceFloatIterator{ - input: newBufBooleanIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *booleanReduceFloatIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *booleanReduceFloatIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *booleanReduceFloatIterator) Next() (*FloatPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// booleanReduceFloatPoint stores the reduced data for a name/tag combination. -type booleanReduceFloatPoint struct { - Name string - Tags Tags - Aggregator BooleanPointAggregator - Emitter FloatPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *booleanReduceFloatIterator) reduce() ([]FloatPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*booleanReduceFloatPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &booleanReduceFloatPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateBoolean(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]FloatPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = floatPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// booleanStreamFloatIterator streams inputs into the iterator and emits points gradually. -type booleanStreamFloatIterator struct { - input *bufBooleanIterator - create func() (BooleanPointAggregator, FloatPointEmitter) - dims []string - opt IteratorOptions - m map[string]*booleanReduceFloatPoint - points []FloatPoint -} - -// newBooleanStreamFloatIterator returns a new instance of booleanStreamFloatIterator. -func newBooleanStreamFloatIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, FloatPointEmitter), opt IteratorOptions) *booleanStreamFloatIterator { - return &booleanStreamFloatIterator{ - input: newBufBooleanIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*booleanReduceFloatPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *booleanStreamFloatIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *booleanStreamFloatIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *booleanStreamFloatIterator) Next() (*FloatPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *booleanStreamFloatIterator) reduce() ([]FloatPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []FloatPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &booleanReduceFloatPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateBoolean(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// booleanReduceIntegerIterator executes a reducer for every interval and buffers the result. -type booleanReduceIntegerIterator struct { - input *bufBooleanIterator - create func() (BooleanPointAggregator, IntegerPointEmitter) - dims []string - opt IteratorOptions - points []IntegerPoint - keepTags bool -} - -func newBooleanReduceIntegerIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, IntegerPointEmitter)) *booleanReduceIntegerIterator { - return &booleanReduceIntegerIterator{ - input: newBufBooleanIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *booleanReduceIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *booleanReduceIntegerIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *booleanReduceIntegerIterator) Next() (*IntegerPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// booleanReduceIntegerPoint stores the reduced data for a name/tag combination. -type booleanReduceIntegerPoint struct { - Name string - Tags Tags - Aggregator BooleanPointAggregator - Emitter IntegerPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *booleanReduceIntegerIterator) reduce() ([]IntegerPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*booleanReduceIntegerPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &booleanReduceIntegerPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateBoolean(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]IntegerPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = integerPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// booleanStreamIntegerIterator streams inputs into the iterator and emits points gradually. -type booleanStreamIntegerIterator struct { - input *bufBooleanIterator - create func() (BooleanPointAggregator, IntegerPointEmitter) - dims []string - opt IteratorOptions - m map[string]*booleanReduceIntegerPoint - points []IntegerPoint -} - -// newBooleanStreamIntegerIterator returns a new instance of booleanStreamIntegerIterator. -func newBooleanStreamIntegerIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, IntegerPointEmitter), opt IteratorOptions) *booleanStreamIntegerIterator { - return &booleanStreamIntegerIterator{ - input: newBufBooleanIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*booleanReduceIntegerPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *booleanStreamIntegerIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *booleanStreamIntegerIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *booleanStreamIntegerIterator) Next() (*IntegerPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *booleanStreamIntegerIterator) reduce() ([]IntegerPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []IntegerPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &booleanReduceIntegerPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateBoolean(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// booleanReduceUnsignedIterator executes a reducer for every interval and buffers the result. -type booleanReduceUnsignedIterator struct { - input *bufBooleanIterator - create func() (BooleanPointAggregator, UnsignedPointEmitter) - dims []string - opt IteratorOptions - points []UnsignedPoint - keepTags bool -} - -func newBooleanReduceUnsignedIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, UnsignedPointEmitter)) *booleanReduceUnsignedIterator { - return &booleanReduceUnsignedIterator{ - input: newBufBooleanIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *booleanReduceUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *booleanReduceUnsignedIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *booleanReduceUnsignedIterator) Next() (*UnsignedPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// booleanReduceUnsignedPoint stores the reduced data for a name/tag combination. -type booleanReduceUnsignedPoint struct { - Name string - Tags Tags - Aggregator BooleanPointAggregator - Emitter UnsignedPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *booleanReduceUnsignedIterator) reduce() ([]UnsignedPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*booleanReduceUnsignedPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &booleanReduceUnsignedPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateBoolean(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]UnsignedPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = unsignedPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// booleanStreamUnsignedIterator streams inputs into the iterator and emits points gradually. -type booleanStreamUnsignedIterator struct { - input *bufBooleanIterator - create func() (BooleanPointAggregator, UnsignedPointEmitter) - dims []string - opt IteratorOptions - m map[string]*booleanReduceUnsignedPoint - points []UnsignedPoint -} - -// newBooleanStreamUnsignedIterator returns a new instance of booleanStreamUnsignedIterator. -func newBooleanStreamUnsignedIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, UnsignedPointEmitter), opt IteratorOptions) *booleanStreamUnsignedIterator { - return &booleanStreamUnsignedIterator{ - input: newBufBooleanIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*booleanReduceUnsignedPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *booleanStreamUnsignedIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *booleanStreamUnsignedIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *booleanStreamUnsignedIterator) Next() (*UnsignedPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *booleanStreamUnsignedIterator) reduce() ([]UnsignedPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []UnsignedPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &booleanReduceUnsignedPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateBoolean(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// booleanReduceStringIterator executes a reducer for every interval and buffers the result. -type booleanReduceStringIterator struct { - input *bufBooleanIterator - create func() (BooleanPointAggregator, StringPointEmitter) - dims []string - opt IteratorOptions - points []StringPoint - keepTags bool -} - -func newBooleanReduceStringIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, StringPointEmitter)) *booleanReduceStringIterator { - return &booleanReduceStringIterator{ - input: newBufBooleanIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *booleanReduceStringIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *booleanReduceStringIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *booleanReduceStringIterator) Next() (*StringPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// booleanReduceStringPoint stores the reduced data for a name/tag combination. -type booleanReduceStringPoint struct { - Name string - Tags Tags - Aggregator BooleanPointAggregator - Emitter StringPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *booleanReduceStringIterator) reduce() ([]StringPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*booleanReduceStringPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &booleanReduceStringPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateBoolean(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]StringPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = stringPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// booleanStreamStringIterator streams inputs into the iterator and emits points gradually. -type booleanStreamStringIterator struct { - input *bufBooleanIterator - create func() (BooleanPointAggregator, StringPointEmitter) - dims []string - opt IteratorOptions - m map[string]*booleanReduceStringPoint - points []StringPoint -} - -// newBooleanStreamStringIterator returns a new instance of booleanStreamStringIterator. -func newBooleanStreamStringIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, StringPointEmitter), opt IteratorOptions) *booleanStreamStringIterator { - return &booleanStreamStringIterator{ - input: newBufBooleanIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*booleanReduceStringPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *booleanStreamStringIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *booleanStreamStringIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *booleanStreamStringIterator) Next() (*StringPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *booleanStreamStringIterator) reduce() ([]StringPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []StringPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &booleanReduceStringPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateBoolean(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// booleanReduceBooleanIterator executes a reducer for every interval and buffers the result. -type booleanReduceBooleanIterator struct { - input *bufBooleanIterator - create func() (BooleanPointAggregator, BooleanPointEmitter) - dims []string - opt IteratorOptions - points []BooleanPoint - keepTags bool -} - -func newBooleanReduceBooleanIterator(input BooleanIterator, opt IteratorOptions, createFn func() (BooleanPointAggregator, BooleanPointEmitter)) *booleanReduceBooleanIterator { - return &booleanReduceBooleanIterator{ - input: newBufBooleanIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *booleanReduceBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *booleanReduceBooleanIterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *booleanReduceBooleanIterator) Next() (*BooleanPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// booleanReduceBooleanPoint stores the reduced data for a name/tag combination. -type booleanReduceBooleanPoint struct { - Name string - Tags Tags - Aggregator BooleanPointAggregator - Emitter BooleanPointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *booleanReduceBooleanIterator) reduce() ([]BooleanPoint, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*booleanReduceBooleanPoint) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &booleanReduceBooleanPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.AggregateBoolean(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]BooleanPoint, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points) - 1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = booleanPointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// booleanStreamBooleanIterator streams inputs into the iterator and emits points gradually. -type booleanStreamBooleanIterator struct { - input *bufBooleanIterator - create func() (BooleanPointAggregator, BooleanPointEmitter) - dims []string - opt IteratorOptions - m map[string]*booleanReduceBooleanPoint - points []BooleanPoint -} - -// newBooleanStreamBooleanIterator returns a new instance of booleanStreamBooleanIterator. -func newBooleanStreamBooleanIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, BooleanPointEmitter), opt IteratorOptions) *booleanStreamBooleanIterator { - return &booleanStreamBooleanIterator{ - input: newBufBooleanIterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*booleanReduceBooleanPoint), - } -} - -// Stats returns stats from the input iterator. -func (itr *booleanStreamBooleanIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *booleanStreamBooleanIterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *booleanStreamBooleanIterator) Next() (*BooleanPoint, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *booleanStreamBooleanIterator) reduce() ([]BooleanPoint, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []BooleanPoint - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &booleanReduceBooleanPoint{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.AggregateBoolean(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} - -// booleanDedupeIterator only outputs unique points. -// This differs from the DistinctIterator in that it compares all aux fields too. -// This iterator is relatively inefficient and should only be used on small -// datasets such as meta query results. -type booleanDedupeIterator struct { - input BooleanIterator - m map[string]struct{} // lookup of points already sent -} - -type booleanIteratorMapper struct { - cur Cursor - row Row - driver IteratorMap // which iterator to use for the primary value, can be nil - fields []IteratorMap // which iterator to use for an aux field - point BooleanPoint -} - -func newBooleanIteratorMapper(cur Cursor, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *booleanIteratorMapper { - return &booleanIteratorMapper{ - cur: cur, - driver: driver, - fields: fields, - point: BooleanPoint{ - Aux: make([]interface{}, len(fields)), - }, - } -} - -func (itr *booleanIteratorMapper) Next() (*BooleanPoint, error) { - if !itr.cur.Scan(&itr.row) { - if err := itr.cur.Err(); err != nil { - return nil, err - } - return nil, nil - } - - itr.point.Time = itr.row.Time - itr.point.Name = itr.row.Series.Name - itr.point.Tags = itr.row.Series.Tags - - if itr.driver != nil { - if v := itr.driver.Value(&itr.row); v != nil { - if v, ok := castToBoolean(v); ok { - itr.point.Value = v - itr.point.Nil = false - } else { - itr.point.Value = false - itr.point.Nil = true - } - } else { - itr.point.Value = false - itr.point.Nil = true - } - } - for i, f := range itr.fields { - itr.point.Aux[i] = f.Value(&itr.row) - } - return &itr.point, nil -} - -func (itr *booleanIteratorMapper) Stats() IteratorStats { - return itr.cur.Stats() -} - -func (itr *booleanIteratorMapper) Close() error { - return itr.cur.Close() -} - -type booleanFilterIterator struct { - input BooleanIterator - cond influxql.Expr - opt IteratorOptions - m map[string]interface{} -} - -func newBooleanFilterIterator(input BooleanIterator, cond influxql.Expr, opt IteratorOptions) BooleanIterator { - // Strip out time conditions from the WHERE clause. - // TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct. - n := influxql.RewriteFunc(influxql.CloneExpr(cond), func(n influxql.Node) influxql.Node { - switch n := n.(type) { - case *influxql.BinaryExpr: - if n.LHS.String() == "time" { - return &influxql.BooleanLiteral{Val: true} - } - } - return n - }) - - cond, _ = n.(influxql.Expr) - if cond == nil { - return input - } else if n, ok := cond.(*influxql.BooleanLiteral); ok && n.Val { - return input - } - - return &booleanFilterIterator{ - input: input, - cond: cond, - opt: opt, - m: make(map[string]interface{}), - } -} - -func (itr *booleanFilterIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *booleanFilterIterator) Close() error { return itr.input.Close() } - -func (itr *booleanFilterIterator) Next() (*BooleanPoint, error) { - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } - - for i, ref := range itr.opt.Aux { - itr.m[ref.Val] = p.Aux[i] - } - for k, v := range p.Tags.KeyValues() { - itr.m[k] = v - } - - if !influxql.EvalBool(itr.cond, itr.m) { - continue - } - return p, nil - } -} - -type booleanTagSubsetIterator struct { - input BooleanIterator - point BooleanPoint - lastTags Tags - dimensions []string -} - -func newBooleanTagSubsetIterator(input BooleanIterator, opt IteratorOptions) *booleanTagSubsetIterator { - return &booleanTagSubsetIterator{ - input: input, - dimensions: opt.GetDimensions(), - } -} - -func (itr *booleanTagSubsetIterator) Next() (*BooleanPoint, error) { - p, err := itr.input.Next() - if err != nil { - return nil, err - } else if p == nil { - return nil, nil - } - - itr.point.Name = p.Name - if !p.Tags.Equal(itr.lastTags) { - itr.point.Tags = p.Tags.Subset(itr.dimensions) - itr.lastTags = p.Tags - } - itr.point.Time = p.Time - itr.point.Value = p.Value - itr.point.Aux = p.Aux - itr.point.Aggregated = p.Aggregated - itr.point.Nil = p.Nil - return &itr.point, nil -} - -func (itr *booleanTagSubsetIterator) Stats() IteratorStats { - return itr.input.Stats() -} - -func (itr *booleanTagSubsetIterator) Close() error { - return itr.input.Close() -} - -// newBooleanDedupeIterator returns a new instance of booleanDedupeIterator. -func newBooleanDedupeIterator(input BooleanIterator) *booleanDedupeIterator { - return &booleanDedupeIterator{ - input: input, - m: make(map[string]struct{}), - } -} - -// Stats returns stats from the input iterator. -func (itr *booleanDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *booleanDedupeIterator) Close() error { return itr.input.Close() } - -// Next returns the next unique point from the input iterator. -func (itr *booleanDedupeIterator) Next() (*BooleanPoint, error) { - for { - // Read next point. - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Serialize to bytes to store in lookup. - buf, err := proto.Marshal(encodeBooleanPoint(p)) - if err != nil { - return nil, err - } - - // If the point has already been output then move to the next point. - if _, ok := itr.m[string(buf)]; ok { - continue - } - - // Otherwise mark it as emitted and return point. - itr.m[string(buf)] = struct{}{} - return p, nil - } -} - -// booleanReaderIterator represents an iterator that streams from a reader. -type booleanReaderIterator struct { - r io.Reader - dec *BooleanPointDecoder -} - -// newBooleanReaderIterator returns a new instance of booleanReaderIterator. -func newBooleanReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *booleanReaderIterator { - dec := NewBooleanPointDecoder(ctx, r) - dec.stats = stats - - return &booleanReaderIterator{ - r: r, - dec: dec, - } -} - -// Stats returns stats about points processed. -func (itr *booleanReaderIterator) Stats() IteratorStats { return itr.dec.stats } - -// Close closes the underlying reader, if applicable. -func (itr *booleanReaderIterator) Close() error { - if r, ok := itr.r.(io.ReadCloser); ok { - return r.Close() - } - return nil -} - -// Next returns the next point from the iterator. -func (itr *booleanReaderIterator) Next() (*BooleanPoint, error) { - // OPTIMIZE(benbjohnson): Reuse point on iterator. - - // Unmarshal next point. - p := &BooleanPoint{} - if err := itr.dec.DecodeBooleanPoint(p); err == io.EOF { - return nil, nil - } else if err != nil { - return nil, err - } - return p, nil -} - -// encodeFloatIterator encodes all points from itr to the underlying writer. -func (enc *IteratorEncoder) encodeFloatIterator(itr FloatIterator) error { - ticker := time.NewTicker(enc.StatsInterval) - defer ticker.Stop() - - // Emit initial stats. - if err := enc.encodeStats(itr.Stats()); err != nil { - return err - } - - // Continually stream points from the iterator into the encoder. - penc := NewFloatPointEncoder(enc.w) - for { - // Emit stats periodically. - select { - case <-ticker.C: - if err := enc.encodeStats(itr.Stats()); err != nil { - return err - } - default: - } - - // Retrieve the next point from the iterator. - p, err := itr.Next() - if err != nil { - return err - } else if p == nil { - break - } - - // Write the point to the point encoder. - if err := penc.EncodeFloatPoint(p); err != nil { - return err - } - } - - // Emit final stats. - if err := enc.encodeStats(itr.Stats()); err != nil { - return err - } - return nil -} - -// encodeIntegerIterator encodes all points from itr to the underlying writer. -func (enc *IteratorEncoder) encodeIntegerIterator(itr IntegerIterator) error { - ticker := time.NewTicker(enc.StatsInterval) - defer ticker.Stop() - - // Emit initial stats. - if err := enc.encodeStats(itr.Stats()); err != nil { - return err - } - - // Continually stream points from the iterator into the encoder. - penc := NewIntegerPointEncoder(enc.w) - for { - // Emit stats periodically. - select { - case <-ticker.C: - if err := enc.encodeStats(itr.Stats()); err != nil { - return err - } - default: - } - - // Retrieve the next point from the iterator. - p, err := itr.Next() - if err != nil { - return err - } else if p == nil { - break - } - - // Write the point to the point encoder. - if err := penc.EncodeIntegerPoint(p); err != nil { - return err - } - } - - // Emit final stats. - if err := enc.encodeStats(itr.Stats()); err != nil { - return err - } - return nil -} - -// encodeUnsignedIterator encodes all points from itr to the underlying writer. -func (enc *IteratorEncoder) encodeUnsignedIterator(itr UnsignedIterator) error { - ticker := time.NewTicker(enc.StatsInterval) - defer ticker.Stop() - - // Emit initial stats. - if err := enc.encodeStats(itr.Stats()); err != nil { - return err - } - - // Continually stream points from the iterator into the encoder. - penc := NewUnsignedPointEncoder(enc.w) - for { - // Emit stats periodically. - select { - case <-ticker.C: - if err := enc.encodeStats(itr.Stats()); err != nil { - return err - } - default: - } - - // Retrieve the next point from the iterator. - p, err := itr.Next() - if err != nil { - return err - } else if p == nil { - break - } - - // Write the point to the point encoder. - if err := penc.EncodeUnsignedPoint(p); err != nil { - return err - } - } - - // Emit final stats. - if err := enc.encodeStats(itr.Stats()); err != nil { - return err - } - return nil -} - -// encodeStringIterator encodes all points from itr to the underlying writer. -func (enc *IteratorEncoder) encodeStringIterator(itr StringIterator) error { - ticker := time.NewTicker(enc.StatsInterval) - defer ticker.Stop() - - // Emit initial stats. - if err := enc.encodeStats(itr.Stats()); err != nil { - return err - } - - // Continually stream points from the iterator into the encoder. - penc := NewStringPointEncoder(enc.w) - for { - // Emit stats periodically. - select { - case <-ticker.C: - if err := enc.encodeStats(itr.Stats()); err != nil { - return err - } - default: - } - - // Retrieve the next point from the iterator. - p, err := itr.Next() - if err != nil { - return err - } else if p == nil { - break - } - - // Write the point to the point encoder. - if err := penc.EncodeStringPoint(p); err != nil { - return err - } - } - - // Emit final stats. - if err := enc.encodeStats(itr.Stats()); err != nil { - return err - } - return nil -} - -// encodeBooleanIterator encodes all points from itr to the underlying writer. -func (enc *IteratorEncoder) encodeBooleanIterator(itr BooleanIterator) error { - ticker := time.NewTicker(enc.StatsInterval) - defer ticker.Stop() - - // Emit initial stats. - if err := enc.encodeStats(itr.Stats()); err != nil { - return err - } - - // Continually stream points from the iterator into the encoder. - penc := NewBooleanPointEncoder(enc.w) - for { - // Emit stats periodically. - select { - case <-ticker.C: - if err := enc.encodeStats(itr.Stats()); err != nil { - return err - } - default: - } - - // Retrieve the next point from the iterator. - p, err := itr.Next() - if err != nil { - return err - } else if p == nil { - break - } - - // Write the point to the point encoder. - if err := penc.EncodeBooleanPoint(p); err != nil { - return err - } - } - - // Emit final stats. - if err := enc.encodeStats(itr.Stats()); err != nil { - return err - } - return nil -} diff --git a/influxql/query/iterator.gen.go.tmpl b/influxql/query/iterator.gen.go.tmpl deleted file mode 100644 index 1c3e7c6f270..00000000000 --- a/influxql/query/iterator.gen.go.tmpl +++ /dev/null @@ -1,1588 +0,0 @@ -//lint:file-ignore U1000 this is generated code -package query - -import ( - "context" - "container/heap" - "io" - "sort" - "sync" - "time" - - "github.com/influxdata/influxql" - "google.golang.org/protobuf/proto" -) - -// DefaultStatsInterval is the default value for IteratorEncoder.StatsInterval. -const DefaultStatsInterval = time.Second - -{{with $types := .}}{{range $k := $types}} - -// {{$k.Name}}Iterator represents a stream of {{$k.name}} points. -type {{$k.Name}}Iterator interface { - Iterator - Next() (*{{$k.Name}}Point, error) -} - -// new{{$k.Name}}Iterators converts a slice of Iterator to a slice of {{$k.Name}}Iterator. -// Drop and closes any iterator in itrs that is not a {{$k.Name}}Iterator and cannot -// be cast to a {{$k.Name}}Iterator. -func new{{$k.Name}}Iterators(itrs []Iterator) []{{$k.Name}}Iterator { - a := make([]{{$k.Name}}Iterator, 0, len(itrs)) - for _, itr := range itrs { - switch itr := itr.(type) { - case {{$k.Name}}Iterator: - a = append(a, itr) - default: - itr.Close() - } - } - return a -} - - -// buf{{$k.Name}}Iterator represents a buffered {{$k.Name}}Iterator. -type buf{{$k.Name}}Iterator struct { - itr {{$k.Name}}Iterator - buf *{{$k.Name}}Point -} - -// newBuf{{$k.Name}}Iterator returns a buffered {{$k.Name}}Iterator. -func newBuf{{$k.Name}}Iterator(itr {{$k.Name}}Iterator) *buf{{$k.Name}}Iterator { - return &buf{{$k.Name}}Iterator{itr: itr} -} - -// Stats returns statistics from the input iterator. -func (itr *buf{{$k.Name}}Iterator) Stats() IteratorStats { return itr.itr.Stats() } - -// Close closes the underlying iterator. -func (itr *buf{{$k.Name}}Iterator) Close() error { return itr.itr.Close() } - -// peek returns the next point without removing it from the iterator. -func (itr *buf{{$k.Name}}Iterator) peek() (*{{$k.Name}}Point, error) { - p, err := itr.Next() - if err != nil { - return nil, err - } - itr.unread(p) - return p, nil -} - -// peekTime returns the time of the next point. -// Returns zero time if no more points available. -func (itr *buf{{$k.Name}}Iterator) peekTime() (int64, error) { - p, err := itr.peek() - if p == nil || err != nil { - return ZeroTime, err - } - return p.Time, nil -} - -// Next returns the current buffer, if exists, or calls the underlying iterator. -func (itr *buf{{$k.Name}}Iterator) Next() (*{{$k.Name}}Point, error) { - buf := itr.buf - if buf != nil { - itr.buf = nil - return buf, nil - } - return itr.itr.Next() -} - -// NextInWindow returns the next value if it is between [startTime, endTime). -// If the next value is outside the range then it is moved to the buffer. -func (itr *buf{{$k.Name}}Iterator) NextInWindow(startTime, endTime int64) (*{{$k.Name}}Point, error) { - v, err := itr.Next() - if v == nil || err != nil { - return nil, err - } else if t := v.Time; t >= endTime || t < startTime { - itr.unread(v) - return nil, nil - } - return v, nil -} - -// unread sets v to the buffer. It is read on the next call to Next(). -func (itr *buf{{$k.Name}}Iterator) unread(v *{{$k.Name}}Point) { itr.buf = v } - -// {{$k.name}}MergeIterator represents an iterator that combines multiple {{$k.name}} iterators. -type {{$k.name}}MergeIterator struct { - inputs []{{$k.Name}}Iterator - heap *{{$k.name}}MergeHeap - init bool - - closed bool - mu sync.RWMutex - - // Current iterator and window. - curr *{{$k.name}}MergeHeapItem - window struct { - name string - tags string - startTime int64 - endTime int64 - } -} - -// new{{$k.Name}}MergeIterator returns a new instance of {{$k.name}}MergeIterator. -func new{{$k.Name}}MergeIterator(inputs []{{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}MergeIterator { - itr := &{{$k.name}}MergeIterator{ - inputs: inputs, - heap: &{{$k.name}}MergeHeap{ - items: make([]*{{$k.name}}MergeHeapItem, 0, len(inputs)), - opt: opt, - }, - } - - // Initialize heap items. - for _, input := range inputs { - // Wrap in buffer, ignore any inputs without anymore points. - bufInput := newBuf{{$k.Name}}Iterator(input) - - // Append to the heap. - itr.heap.items = append(itr.heap.items, &{{$k.name}}MergeHeapItem{itr: bufInput}) - } - - return itr -} - -// Stats returns an aggregation of stats from the underlying iterators. -func (itr *{{$k.name}}MergeIterator) Stats() IteratorStats { - var stats IteratorStats - for _, input := range itr.inputs { - stats.Add(input.Stats()) - } - return stats -} - -// Close closes the underlying iterators. -func (itr *{{$k.name}}MergeIterator) Close() error { - itr.mu.Lock() - defer itr.mu.Unlock() - - for _, input := range itr.inputs { - input.Close() - } - itr.curr = nil - itr.inputs = nil - itr.heap.items = nil - itr.closed = true - return nil -} - -// Next returns the next point from the iterator. -func (itr *{{$k.name}}MergeIterator) Next() (*{{$k.Name}}Point, error) { - itr.mu.RLock() - defer itr.mu.RUnlock() - if itr.closed { - return nil, nil - } - - // Initialize the heap. This needs to be done lazily on the first call to this iterator - // so that iterator initialization done through the Select() call returns quickly. - // Queries can only be interrupted after the Select() call completes so any operations - // done during iterator creation cannot be interrupted, which is why we do it here - // instead so an interrupt can happen while initializing the heap. - if !itr.init { - items := itr.heap.items - itr.heap.items = make([]*{{$k.name}}MergeHeapItem, 0, len(items)) - for _, item := range items { - if p, err := item.itr.peek(); err != nil { - return nil, err - } else if p == nil { - continue - } - itr.heap.items = append(itr.heap.items, item) - } - heap.Init(itr.heap) - itr.init = true - } - - for { - // Retrieve the next iterator if we don't have one. - if itr.curr == nil { - if len(itr.heap.items) == 0 { - return nil, nil - } - itr.curr = heap.Pop(itr.heap).(*{{$k.name}}MergeHeapItem) - - // Read point and set current window. - p, err := itr.curr.itr.Next() - if err != nil { - return nil, err - } - tags := p.Tags.Subset(itr.heap.opt.Dimensions) - itr.window.name, itr.window.tags = p.Name, tags.ID() - itr.window.startTime, itr.window.endTime = itr.heap.opt.Window(p.Time) - return p, nil - } - - // Read the next point from the current iterator. - p, err := itr.curr.itr.Next() - if err != nil { - return nil, err - } - - // If there are no more points then remove iterator from heap and find next. - if p == nil { - itr.curr = nil - continue - } - - // Check if the point is inside of our current window. - inWindow := true - if window := itr.window; window.name != p.Name { - inWindow = false - } else if tags := p.Tags.Subset(itr.heap.opt.Dimensions); window.tags != tags.ID() { - inWindow = false - } else if opt := itr.heap.opt; opt.Ascending && p.Time >= window.endTime { - inWindow = false - } else if !opt.Ascending && p.Time < window.startTime { - inWindow = false - } - - // If it's outside our window then push iterator back on the heap and find new iterator. - if !inWindow { - itr.curr.itr.unread(p) - heap.Push(itr.heap, itr.curr) - itr.curr = nil - continue - } - - return p, nil - } -} - -// {{$k.name}}MergeHeap represents a heap of {{$k.name}}MergeHeapItems. -// Items are sorted by their next window and then by name/tags. -type {{$k.name}}MergeHeap struct { - opt IteratorOptions - items []*{{$k.name}}MergeHeapItem -} - -func (h *{{$k.name}}MergeHeap) Len() int { return len(h.items) } -func (h *{{$k.name}}MergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } -func (h *{{$k.name}}MergeHeap) Less(i, j int) bool { - x, err := h.items[i].itr.peek() - if err != nil { - return true - } - y, err := h.items[j].itr.peek() - if err != nil { - return false - } - - if h.opt.Ascending { - if x.Name != y.Name { - return x.Name < y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { - return xTags.ID() < yTags.ID() - } - } else { - if x.Name != y.Name { - return x.Name > y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); xTags.ID() != yTags.ID() { - return xTags.ID() > yTags.ID() - } - } - - xt, _ := h.opt.Window(x.Time) - yt, _ := h.opt.Window(y.Time) - - if h.opt.Ascending { - return xt < yt - } - return xt > yt -} - - -func (h *{{$k.name}}MergeHeap) Push(x interface{}) { - h.items = append(h.items, x.(*{{$k.name}}MergeHeapItem)) -} - -func (h *{{$k.name}}MergeHeap) Pop() interface{} { - old := h.items - n := len(old) - item := old[n-1] - h.items = old[0 : n-1] - return item -} - -type {{$k.name}}MergeHeapItem struct { - itr *buf{{$k.Name}}Iterator -} - -// {{$k.name}}SortedMergeIterator is an iterator that sorts and merges multiple iterators into one. -type {{$k.name}}SortedMergeIterator struct { - inputs []{{$k.Name}}Iterator - heap *{{$k.name}}SortedMergeHeap - init bool -} - -// new{{$k.Name}}SortedMergeIterator returns an instance of {{$k.name}}SortedMergeIterator. -func new{{$k.Name}}SortedMergeIterator(inputs []{{$k.Name}}Iterator, opt IteratorOptions) Iterator { - itr := &{{$k.name}}SortedMergeIterator{ - inputs: inputs, - heap: &{{$k.name}}SortedMergeHeap{ - items: make([]*{{$k.name}}SortedMergeHeapItem, 0, len(inputs)), - opt: opt, - }, - } - - // Initialize heap items. - for _, input := range inputs { - // Append to the heap. - itr.heap.items = append(itr.heap.items, &{{$k.name}}SortedMergeHeapItem{itr: input}) - } - - return itr -} - -// Stats returns an aggregation of stats from the underlying iterators. -func (itr *{{$k.name}}SortedMergeIterator) Stats() IteratorStats { - var stats IteratorStats - for _, input := range itr.inputs { - stats.Add(input.Stats()) - } - return stats -} - -// Close closes the underlying iterators. -func (itr *{{$k.name}}SortedMergeIterator) Close() error { - for _, input := range itr.inputs { - input.Close() - } - return nil -} - -// Next returns the next points from the iterator. -func (itr *{{$k.name}}SortedMergeIterator) Next() (*{{$k.Name}}Point, error) { return itr.pop() } - -// pop returns the next point from the heap. -// Reads the next point from item's cursor and puts it back on the heap. -func (itr *{{$k.name}}SortedMergeIterator) pop() (*{{$k.Name}}Point, error) { - // Initialize the heap. See the MergeIterator to see why this has to be done lazily. - if !itr.init { - items := itr.heap.items - itr.heap.items = make([]*{{$k.name}}SortedMergeHeapItem, 0, len(items)) - for _, item := range items { - var err error - if item.point, err = item.itr.Next(); err != nil { - return nil, err - } else if item.point == nil { - continue - } - itr.heap.items = append(itr.heap.items, item) - } - heap.Init(itr.heap) - itr.init = true - } - - if len(itr.heap.items) == 0 { - return nil, nil - } - - // Read the next item from the heap. - item := heap.Pop(itr.heap).(*{{$k.name}}SortedMergeHeapItem) - if item.err != nil { - return nil, item.err - } else if item.point == nil { - return nil, nil - } - - // Copy the point for return. - p := item.point.Clone() - - // Read the next item from the cursor. Push back to heap if one exists. - if item.point, item.err = item.itr.Next(); item.point != nil { - heap.Push(itr.heap, item) - } - - return p, nil -} - -// {{$k.name}}SortedMergeHeap represents a heap of {{$k.name}}SortedMergeHeapItems. -// Items are sorted with the following priority: -// - By their measurement name; -// - By their tag keys/values; -// - By time; or -// - By their Aux field values. -// -type {{$k.name}}SortedMergeHeap struct { - opt IteratorOptions - items []*{{$k.name}}SortedMergeHeapItem -} - -func (h *{{$k.name}}SortedMergeHeap) Len() int { return len(h.items) } -func (h *{{$k.name}}SortedMergeHeap) Swap(i, j int) { h.items[i], h.items[j] = h.items[j], h.items[i] } -func (h *{{$k.name}}SortedMergeHeap) Less(i, j int) bool { - x, y := h.items[i].point, h.items[j].point - - if h.opt.Ascending { - if x.Name != y.Name { - return x.Name < y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { - return xTags.ID() < yTags.ID() - } - - if x.Time != y.Time{ - return x.Time < y.Time - } - - if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { - for i := 0; i < len(x.Aux); i++ { - v1, ok1 := x.Aux[i].(string) - v2, ok2 := y.Aux[i].(string) - if !ok1 || !ok2 { - // Unsupported types used in Aux fields. Maybe they - // need to be added here? - return false - } else if v1 == v2 { - continue - } - return v1 < v2 - } - } - return false // Times and/or Aux fields are equal. - } - - if x.Name != y.Name { - return x.Name > y.Name - } else if xTags, yTags := x.Tags.Subset(h.opt.Dimensions), y.Tags.Subset(h.opt.Dimensions); !xTags.Equals(&yTags) { - return xTags.ID() > yTags.ID() - } - - if x.Time != y.Time{ - return x.Time > y.Time - } - - if len(x.Aux) > 0 && len(x.Aux) == len(y.Aux) { - for i := 0; i < len(x.Aux); i++ { - v1, ok1 := x.Aux[i].(string) - v2, ok2 := y.Aux[i].(string) - if !ok1 || !ok2 { - // Unsupported types used in Aux fields. Maybe they - // need to be added here? - return false - } else if v1 == v2 { - continue - } - return v1 > v2 - } - } - return false // Times and/or Aux fields are equal. -} - -func (h *{{$k.name}}SortedMergeHeap) Push(x interface{}) { - h.items = append(h.items, x.(*{{$k.name}}SortedMergeHeapItem)) -} - -func (h *{{$k.name}}SortedMergeHeap) Pop() interface{} { - old := h.items - n := len(old) - item := old[n-1] - h.items = old[0 : n-1] - return item -} - -type {{$k.name}}SortedMergeHeapItem struct { - point *{{$k.Name}}Point - err error - itr {{$k.Name}}Iterator -} - -// {{$k.name}}IteratorScanner scans the results of a {{$k.Name}}Iterator into a map. -type {{$k.name}}IteratorScanner struct { - input *buf{{$k.Name}}Iterator - err error - keys []influxql.VarRef - defaultValue interface{} -} - -// new{{$k.Name}}IteratorScanner creates a new IteratorScanner. -func new{{$k.Name}}IteratorScanner(input {{$k.Name}}Iterator, keys []influxql.VarRef, defaultValue interface{}) *{{$k.name}}IteratorScanner { - return &{{$k.name}}IteratorScanner{ - input: newBuf{{$k.Name}}Iterator(input), - keys: keys, - defaultValue: defaultValue, - } -} - -func (s *{{$k.name}}IteratorScanner) Peek() (int64, string, Tags) { - if s.err != nil { - return ZeroTime, "", Tags{} - } - - p, err := s.input.peek() - if err != nil { - s.err = err - return ZeroTime, "", Tags{} - } else if p == nil { - return ZeroTime, "", Tags{} - } - return p.Time, p.Name, p.Tags -} - -func (s *{{$k.name}}IteratorScanner) ScanAt(ts int64, name string, tags Tags, m map[string]interface{}) { - if s.err != nil { - return - } - - p, err := s.input.Next() - if err != nil { - s.err = err - return - } else if p == nil { - s.useDefaults(m) - return - } else if p.Time != ts || p.Name != name || !p.Tags.Equals(&tags) { - s.useDefaults(m) - s.input.unread(p) - return - } - - if k := s.keys[0]; k.Val != "" { - if p.Nil { - if s.defaultValue != SkipDefault { - m[k.Val] = castToType(s.defaultValue, k.Type) - } - } else { - m[k.Val] = p.Value - } - } - for i, v := range p.Aux { - k := s.keys[i+1] - switch v.(type) { - case float64, int64, uint64, string, bool: - m[k.Val] = v - default: - // Insert the fill value if one was specified. - if s.defaultValue != SkipDefault { - m[k.Val] = castToType(s.defaultValue, k.Type) - } - } - } -} - -func (s *{{$k.name}}IteratorScanner) useDefaults(m map[string]interface{}) { - if s.defaultValue == SkipDefault { - return - } - for _, k := range s.keys { - if k.Val == "" { - continue - } - m[k.Val] = castToType(s.defaultValue, k.Type) - } -} - -func (s *{{$k.name}}IteratorScanner) Stats() IteratorStats { return s.input.Stats() } -func (s *{{$k.name}}IteratorScanner) Err() error { return s.err } -func (s *{{$k.name}}IteratorScanner) Close() error { return s.input.Close() } - -// {{$k.name}}ParallelIterator represents an iterator that pulls data in a separate goroutine. -type {{$k.name}}ParallelIterator struct { - input {{$k.Name}}Iterator - ch chan {{$k.name}}PointError - - once sync.Once - closing chan struct{} - wg sync.WaitGroup -} - -// new{{$k.Name}}ParallelIterator returns a new instance of {{$k.name}}ParallelIterator. -func new{{$k.Name}}ParallelIterator(input {{$k.Name}}Iterator) *{{$k.name}}ParallelIterator { - itr := &{{$k.name}}ParallelIterator{ - input: input, - ch: make(chan {{$k.name}}PointError, 256), - closing: make(chan struct{}), - } - itr.wg.Add(1) - go itr.monitor() - return itr -} - -// Stats returns stats from the underlying iterator. -func (itr *{{$k.name}}ParallelIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the underlying iterators. -func (itr *{{$k.name}}ParallelIterator) Close() error { - itr.once.Do(func() { close(itr.closing) }) - itr.wg.Wait() - return itr.input.Close() -} - -// Next returns the next point from the iterator. -func (itr *{{$k.name}}ParallelIterator) Next() (*{{$k.Name}}Point, error) { - v, ok := <-itr.ch - if !ok { - return nil, io.EOF - } - return v.point, v.err -} - -// monitor runs in a separate goroutine and actively pulls the next point. -func (itr *{{$k.name}}ParallelIterator) monitor() { - defer close(itr.ch) - defer itr.wg.Done() - - for { - // Read next point. - p, err := itr.input.Next() - if p != nil { - p = p.Clone() - } - - select { - case <-itr.closing: - return - case itr.ch <- {{$k.name}}PointError{point: p, err: err}: - } - } -} - -type {{$k.name}}PointError struct { - point *{{$k.Name}}Point - err error -} - -// {{$k.name}}LimitIterator represents an iterator that limits points per group. -type {{$k.name}}LimitIterator struct { - input {{$k.Name}}Iterator - opt IteratorOptions - n int - - prev struct { - name string - tags Tags - } -} - -// new{{$k.Name}}LimitIterator returns a new instance of {{$k.name}}LimitIterator. -func new{{$k.Name}}LimitIterator(input {{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}LimitIterator { - return &{{$k.name}}LimitIterator{ - input: input, - opt: opt, - } -} - -// Stats returns stats from the underlying iterator. -func (itr *{{$k.name}}LimitIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the underlying iterators. -func (itr *{{$k.name}}LimitIterator) Close() error { return itr.input.Close() } - -// Next returns the next point from the iterator. -func (itr *{{$k.name}}LimitIterator) Next() (*{{$k.Name}}Point, error) { - for { - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Reset window and counter if a new window is encountered. - if p.Name != itr.prev.name || !p.Tags.Equals(&itr.prev.tags) { - itr.prev.name = p.Name - itr.prev.tags = p.Tags - itr.n = 0 - } - - // Increment counter. - itr.n++ - - // Read next point if not beyond the offset. - if itr.n <= itr.opt.Offset { - continue - } - - // Read next point if we're beyond the limit. - if itr.opt.Limit > 0 && (itr.n-itr.opt.Offset) > itr.opt.Limit { - continue - } - - return p, nil - } -} - -type {{$k.name}}FillIterator struct { - input *buf{{$k.Name}}Iterator - prev {{$k.Name}}Point - startTime int64 - endTime int64 - auxFields []interface{} - init bool - opt IteratorOptions - - window struct { - name string - tags Tags - time int64 - offset int64 - } -} - -func new{{$k.Name}}FillIterator(input {{$k.Name}}Iterator, expr influxql.Expr, opt IteratorOptions) *{{$k.name}}FillIterator { - if opt.Fill == influxql.NullFill { - if expr, ok := expr.(*influxql.Call); ok && expr.Name == "count" { - opt.Fill = influxql.NumberFill - opt.FillValue = {{$k.Zero}} - } - } - - var startTime, endTime int64 - if opt.Ascending { - startTime, _ = opt.Window(opt.StartTime) - endTime, _ = opt.Window(opt.EndTime) - } else { - startTime, _ = opt.Window(opt.EndTime) - endTime, _ = opt.Window(opt.StartTime) - } - - var auxFields []interface{} - if len(opt.Aux) > 0 { - auxFields = make([]interface{}, len(opt.Aux)) - } - - return &{{$k.name}}FillIterator{ - input: newBuf{{$k.Name}}Iterator(input), - prev: {{$k.Name}}Point{Nil: true}, - startTime: startTime, - endTime: endTime, - auxFields: auxFields, - opt: opt, - } -} - -func (itr *{{$k.name}}FillIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *{{$k.name}}FillIterator) Close() error { return itr.input.Close() } - -func (itr *{{$k.name}}FillIterator) Next() (*{{$k.Name}}Point, error) { - if !itr.init { - p, err := itr.input.peek() - if p == nil || err != nil { - return nil, err - } - itr.window.name, itr.window.tags = p.Name, p.Tags - itr.window.time = itr.startTime - if itr.startTime == influxql.MinTime { - itr.window.time, _ = itr.opt.Window(p.Time) - } - if itr.opt.Location != nil { - _, itr.window.offset = itr.opt.Zone(itr.window.time) - } - itr.init = true - } - - p, err := itr.input.Next() - if err != nil { - return nil, err - } - - // Check if the next point is outside of our window or is nil. - if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { - // If we are inside of an interval, unread the point and continue below to - // constructing a new point. - if itr.opt.Ascending && itr.window.time <= itr.endTime { - itr.input.unread(p) - p = nil - goto CONSTRUCT - } else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { - itr.input.unread(p) - p = nil - goto CONSTRUCT - } - - // We are *not* in a current interval. If there is no next point, - // we are at the end of all intervals. - if p == nil { - return nil, nil - } - - // Set the new interval. - itr.window.name, itr.window.tags = p.Name, p.Tags - itr.window.time = itr.startTime - if itr.window.time == influxql.MinTime { - itr.window.time, _ = itr.opt.Window(p.Time) - } - if itr.opt.Location != nil { - _, itr.window.offset = itr.opt.Zone(itr.window.time) - } - itr.prev = {{$k.Name}}Point{Nil: true} - } - - // Check if the point is our next expected point. -CONSTRUCT: - if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { - if p != nil { - itr.input.unread(p) - } - - p = &{{$k.Name}}Point{ - Name: itr.window.name, - Tags: itr.window.tags, - Time: itr.window.time, - Aux: itr.auxFields, - } - - switch itr.opt.Fill { - case influxql.LinearFill: - {{- if or (eq $k.Name "Float") (eq $k.Name "Integer") (eq $k.Name "Unsigned")}} - if !itr.prev.Nil { - next, err := itr.input.peek() - if err != nil { - return nil, err - } else if next != nil && next.Name == itr.window.name && next.Tags.ID() == itr.window.tags.ID() { - interval := int64(itr.opt.Interval.Duration) - start := itr.window.time / interval - p.Value = linear{{$k.Name}}(start, itr.prev.Time/interval, next.Time/interval, itr.prev.Value, next.Value) - } else { - p.Nil = true - } - } else { - p.Nil = true - } - {{else}} - fallthrough - {{- end}} - case influxql.NullFill: - p.Nil = true - case influxql.NumberFill: - p.Value, _ = castTo{{$k.Name}}(itr.opt.FillValue) - case influxql.PreviousFill: - if !itr.prev.Nil { - p.Value = itr.prev.Value - p.Nil = itr.prev.Nil - } else { - p.Nil = true - } - } - } else { - itr.prev = *p - } - - // Advance the expected time. Do not advance to a new window here - // as there may be lingering points with the same timestamp in the previous - // window. - if itr.opt.Ascending { - itr.window.time += int64(itr.opt.Interval.Duration) - } else { - itr.window.time -= int64(itr.opt.Interval.Duration) - } - - // Check to see if we have passed over an offset change and adjust the time - // to account for this new offset. - if itr.opt.Location != nil { - if _, offset := itr.opt.Zone(itr.window.time - 1); offset != itr.window.offset { - diff := itr.window.offset - offset - if abs(diff) < int64(itr.opt.Interval.Duration) { - itr.window.time += diff - } - itr.window.offset = offset - } - } - return p, nil -} - -// {{$k.name}}IntervalIterator represents a {{$k.name}} implementation of IntervalIterator. -type {{$k.name}}IntervalIterator struct { - input {{$k.Name}}Iterator - opt IteratorOptions -} - -func new{{$k.Name}}IntervalIterator(input {{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}IntervalIterator { - return &{{$k.name}}IntervalIterator{input: input, opt: opt} -} - -func (itr *{{$k.name}}IntervalIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *{{$k.name}}IntervalIterator) Close() error { return itr.input.Close() } - -func (itr *{{$k.name}}IntervalIterator) Next() (*{{$k.Name}}Point, error) { - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - p.Time, _ = itr.opt.Window(p.Time) - // If we see the minimum allowable time, set the time to zero so we don't - // break the default returned time for aggregate queries without times. - if p.Time == influxql.MinTime { - p.Time = 0 - } - return p, nil -} - -// {{$k.name}}InterruptIterator represents a {{$k.name}} implementation of InterruptIterator. -type {{$k.name}}InterruptIterator struct { - input {{$k.Name}}Iterator - closing <-chan struct{} - count int -} - -func new{{$k.Name}}InterruptIterator(input {{$k.Name}}Iterator, closing <-chan struct{}) *{{$k.name}}InterruptIterator { - return &{{$k.name}}InterruptIterator{input: input, closing: closing} -} - -func (itr *{{$k.name}}InterruptIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *{{$k.name}}InterruptIterator) Close() error { return itr.input.Close() } - -func (itr *{{$k.name}}InterruptIterator) Next() (*{{$k.Name}}Point, error) { - // Only check if the channel is closed every N points. This - // intentionally checks on both 0 and N so that if the iterator - // has been interrupted before the first point is emitted it will - // not emit any points. - if itr.count & 0xFF == 0xFF { - select { - case <-itr.closing: - return nil, itr.Close() - default: - // Reset iterator count to zero and fall through to emit the next point. - itr.count = 0 - } - } - - // Increment the counter for every point read. - itr.count++ - return itr.input.Next() -} - -// {{$k.name}}CloseInterruptIterator represents a {{$k.name}} implementation of CloseInterruptIterator. -type {{$k.name}}CloseInterruptIterator struct { - input {{$k.Name}}Iterator - closing <-chan struct{} - done chan struct{} - once sync.Once -} - -func new{{$k.Name}}CloseInterruptIterator(input {{$k.Name}}Iterator, closing <-chan struct{}) *{{$k.name}}CloseInterruptIterator { - itr := &{{$k.name}}CloseInterruptIterator{ - input: input, - closing: closing, - done: make(chan struct{}), - } - go itr.monitor() - return itr -} - -func (itr *{{$k.name}}CloseInterruptIterator) monitor() { - select { - case <-itr.closing: - itr.Close() - case <-itr.done: - } -} - -func (itr *{{$k.name}}CloseInterruptIterator) Stats() IteratorStats { - return itr.input.Stats() -} - -func (itr *{{$k.name}}CloseInterruptIterator) Close() error { - itr.once.Do(func() { - close(itr.done) - itr.input.Close() - }) - return nil -} - -func (itr *{{$k.name}}CloseInterruptIterator) Next() (*{{$k.Name}}Point, error) { - p, err := itr.input.Next() - if err != nil { - // Check if the iterator was closed. - select { - case <-itr.done: - return nil, nil - default: - return nil, err - } - } - return p, nil -} - -{{range $v := $types}} - -// {{$k.name}}Reduce{{$v.Name}}Iterator executes a reducer for every interval and buffers the result. -type {{$k.name}}Reduce{{$v.Name}}Iterator struct { - input *buf{{$k.Name}}Iterator - create func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter) - dims []string - opt IteratorOptions - points []{{$v.Name}}Point - keepTags bool -} - -func new{{$k.Name}}Reduce{{$v.Name}}Iterator(input {{$k.Name}}Iterator, opt IteratorOptions, createFn func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter)) *{{$k.name}}Reduce{{$v.Name}}Iterator { - return &{{$k.name}}Reduce{{$v.Name}}Iterator{ - input: newBuf{{$k.Name}}Iterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - } -} - -// Stats returns stats from the input iterator. -func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Close() error { return itr.input.Close() } - -// Next returns the minimum value for the next available interval. -func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) Next() (*{{$v.Name}}Point, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// {{$k.name}}Reduce{{$v.Name}}Point stores the reduced data for a name/tag combination. -type {{$k.name}}Reduce{{$v.Name}}Point struct { - Name string - Tags Tags - Aggregator {{$k.Name}}PointAggregator - Emitter {{$v.Name}}PointEmitter -} - -// reduce executes fn once for every point in the next window. -// The previous value for the dimension is passed to fn. -func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, error) { - // Calculate next window. - var ( - startTime, endTime int64 - window struct { - name string - tags string - } - ) - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } else if p.Nil { - continue - } - - // Unread the point so it can be processed. - itr.input.unread(p) - startTime, endTime = itr.opt.Window(p.Time) - window.name, window.tags = p.Name, p.Tags.Subset(itr.opt.Dimensions).ID() - break - } - - // Create points by tags. - m := make(map[string]*{{$k.name}}Reduce{{$v.Name}}Point) - for { - // Read next point. - curr, err := itr.input.NextInWindow(startTime, endTime) - if err != nil { - return nil, err - } else if curr == nil { - break - } else if curr.Nil { - continue - } else if curr.Name != window.name { - itr.input.unread(curr) - break - } - - // Ensure this point is within the same final window. - if curr.Name != window.name { - itr.input.unread(curr) - break - } else if tags := curr.Tags.Subset(itr.opt.Dimensions); tags.ID() != window.tags { - itr.input.unread(curr) - break - } - - // Retrieve the tags on this point for this level of the query. - // This may be different than the bucket dimensions. - tags := curr.Tags.Subset(itr.dims) - id := tags.ID() - - // Retrieve the aggregator for this name/tag combination or create one. - rp := m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &{{$k.name}}Reduce{{$v.Name}}Point{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - m[id] = rp - } - rp.Aggregator.Aggregate{{$k.Name}}(curr) - } - - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - - // Reverse sort points by name & tag. - // This ensures a consistent order of output. - if len(keys) > 0 { - var sorted sort.Interface = sort.StringSlice(keys) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Sort(sorted) - } - - // Assume the points are already sorted until proven otherwise. - sortedByTime := true - // Emit the points for each name & tag combination. - a := make([]{{$v.Name}}Point, 0, len(m)) - for _, k := range keys { - rp := m[k] - points := rp.Emitter.Emit() - for i := len(points)-1; i >= 0; i-- { - points[i].Name = rp.Name - if !itr.keepTags { - points[i].Tags = rp.Tags - } - // Set the points time to the interval time if the reducer didn't provide one. - if points[i].Time == ZeroTime { - points[i].Time = startTime - } else { - sortedByTime = false - } - a = append(a, points[i]) - } - } - // Points may be out of order. Perform a stable sort by time if requested. - if !sortedByTime && itr.opt.Ordered { - var sorted sort.Interface = {{$v.name}}PointsByTime(a) - if itr.opt.Ascending { - sorted = sort.Reverse(sorted) - } - sort.Stable(sorted) - } - return a, nil -} - -// {{$k.name}}Stream{{$v.Name}}Iterator streams inputs into the iterator and emits points gradually. -type {{$k.name}}Stream{{$v.Name}}Iterator struct { - input *buf{{$k.Name}}Iterator - create func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter) - dims []string - opt IteratorOptions - m map[string]*{{$k.name}}Reduce{{$v.Name}}Point - points []{{$v.Name}}Point -} - -// new{{$k.Name}}Stream{{$v.Name}}Iterator returns a new instance of {{$k.name}}Stream{{$v.Name}}Iterator. -func new{{$k.Name}}Stream{{$v.Name}}Iterator(input {{$k.Name}}Iterator, createFn func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter), opt IteratorOptions) *{{$k.name}}Stream{{$v.Name}}Iterator { - return &{{$k.name}}Stream{{$v.Name}}Iterator{ - input: newBuf{{$k.Name}}Iterator(input), - create: createFn, - dims: opt.GetDimensions(), - opt: opt, - m: make(map[string]*{{$k.name}}Reduce{{$v.Name}}Point), - } -} - -// Stats returns stats from the input iterator. -func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) Close() error { return itr.input.Close() } - -// Next returns the next value for the stream iterator. -func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) Next() (*{{$v.Name}}Point, error) { - // Calculate next window if we have no more points. - if len(itr.points) == 0 { - var err error - itr.points, err = itr.reduce() - if len(itr.points) == 0 { - return nil, err - } - } - - // Pop next point off the stack. - p := &itr.points[len(itr.points)-1] - itr.points = itr.points[:len(itr.points)-1] - return p, nil -} - -// reduce creates and manages aggregators for every point from the input. -// After aggregating a point, it always tries to emit a value using the emitter. -func (itr *{{$k.name}}Stream{{$v.Name}}Iterator) reduce() ([]{{$v.Name}}Point, error) { - // We have already read all of the input points. - if itr.m == nil { - return nil, nil - } - - for { - // Read next point. - curr, err := itr.input.Next() - if err != nil { - return nil, err - } else if curr == nil { - // Close all of the aggregators to flush any remaining points to emit. - var points []{{$v.Name}}Point - for _, rp := range itr.m { - if aggregator, ok := rp.Aggregator.(io.Closer); ok { - if err := aggregator.Close(); err != nil { - return nil, err - } - - pts := rp.Emitter.Emit() - if len(pts) == 0 { - continue - } - - for i := range pts { - pts[i].Name = rp.Name - pts[i].Tags = rp.Tags - } - points = append(points, pts...) - } - } - - // Eliminate the aggregators and emitters. - itr.m = nil - return points, nil - } else if curr.Nil { - continue - } - tags := curr.Tags.Subset(itr.dims) - - id := curr.Name - if len(tags.m) > 0 { - id += "\x00" + tags.ID() - } - - // Retrieve the aggregator for this name/tag combination or create one. - rp := itr.m[id] - if rp == nil { - aggregator, emitter := itr.create() - rp = &{{$k.name}}Reduce{{.Name}}Point{ - Name: curr.Name, - Tags: tags, - Aggregator: aggregator, - Emitter: emitter, - } - itr.m[id] = rp - } - rp.Aggregator.Aggregate{{$k.Name}}(curr) - - // Attempt to emit points from the aggregator. - points := rp.Emitter.Emit() - if len(points) == 0 { - continue - } - - for i := range points { - points[i].Name = rp.Name - points[i].Tags = rp.Tags - } - return points, nil - } -} -{{end}} - -// {{$k.name}}DedupeIterator only outputs unique points. -// This differs from the DistinctIterator in that it compares all aux fields too. -// This iterator is relatively inefficient and should only be used on small -// datasets such as meta query results. -type {{$k.name}}DedupeIterator struct { - input {{$k.Name}}Iterator - m map[string]struct{} // lookup of points already sent -} - -type {{$k.name}}IteratorMapper struct { - cur Cursor - row Row - driver IteratorMap // which iterator to use for the primary value, can be nil - fields []IteratorMap // which iterator to use for an aux field - point {{$k.Name}}Point -} - -func new{{$k.Name}}IteratorMapper(cur Cursor, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) *{{$k.name}}IteratorMapper { - return &{{$k.name}}IteratorMapper{ - cur: cur, - driver: driver, - fields: fields, - point: {{$k.Name}}Point{ - Aux: make([]interface{}, len(fields)), - }, - } -} - -func (itr *{{$k.name}}IteratorMapper) Next() (*{{$k.Name}}Point, error) { - if !itr.cur.Scan(&itr.row) { - if err := itr.cur.Err(); err != nil { - return nil, err - } - return nil, nil - } - - itr.point.Time = itr.row.Time - itr.point.Name = itr.row.Series.Name - itr.point.Tags = itr.row.Series.Tags - - if itr.driver != nil { - if v := itr.driver.Value(&itr.row); v != nil { - if v, ok := castTo{{$k.Name}}(v); ok { - itr.point.Value = v - itr.point.Nil = false - } else { - itr.point.Value = {{$k.Nil}} - itr.point.Nil = true - } - } else { - itr.point.Value = {{$k.Nil}} - itr.point.Nil = true - } - } - for i, f := range itr.fields { - itr.point.Aux[i] = f.Value(&itr.row) - } - return &itr.point, nil -} - -func (itr *{{$k.name}}IteratorMapper) Stats() IteratorStats { - return itr.cur.Stats() -} - -func (itr *{{$k.name}}IteratorMapper) Close() error { - return itr.cur.Close() -} - -type {{$k.name}}FilterIterator struct { - input {{$k.Name}}Iterator - cond influxql.Expr - opt IteratorOptions - m map[string]interface{} -} - -func new{{$k.Name}}FilterIterator(input {{$k.Name}}Iterator, cond influxql.Expr, opt IteratorOptions) {{$k.Name}}Iterator { - // Strip out time conditions from the WHERE clause. - // TODO(jsternberg): This should really be done for us when creating the IteratorOptions struct. - n := influxql.RewriteFunc(influxql.CloneExpr(cond), func(n influxql.Node) influxql.Node { - switch n := n.(type) { - case *influxql.BinaryExpr: - if n.LHS.String() == "time" { - return &influxql.BooleanLiteral{Val: true} - } - } - return n - }) - - cond, _ = n.(influxql.Expr) - if cond == nil { - return input - } else if n, ok := cond.(*influxql.BooleanLiteral); ok && n.Val { - return input - } - - return &{{$k.name}}FilterIterator{ - input: input, - cond: cond, - opt: opt, - m: make(map[string]interface{}), - } -} - -func (itr *{{$k.name}}FilterIterator) Stats() IteratorStats { return itr.input.Stats() } -func (itr *{{$k.name}}FilterIterator) Close() error { return itr.input.Close() } - -func (itr *{{$k.name}}FilterIterator) Next() (*{{$k.Name}}Point, error) { - for { - p, err := itr.input.Next() - if err != nil || p == nil { - return nil, err - } - - for i, ref := range itr.opt.Aux { - itr.m[ref.Val] = p.Aux[i] - } - for k, v := range p.Tags.KeyValues() { - itr.m[k] = v - } - - if !influxql.EvalBool(itr.cond, itr.m) { - continue - } - return p, nil - } -} - -type {{$k.name}}TagSubsetIterator struct { - input {{$k.Name}}Iterator - point {{$k.Name}}Point - lastTags Tags - dimensions []string -} - -func new{{$k.Name}}TagSubsetIterator(input {{$k.Name}}Iterator, opt IteratorOptions) *{{$k.name}}TagSubsetIterator { - return &{{$k.name}}TagSubsetIterator{ - input: input, - dimensions: opt.GetDimensions(), - } -} - -func (itr *{{$k.name}}TagSubsetIterator) Next() (*{{$k.Name}}Point, error) { - p, err := itr.input.Next() - if err != nil { - return nil, err - } else if p == nil { - return nil, nil - } - - itr.point.Name = p.Name - if !p.Tags.Equal(itr.lastTags) { - itr.point.Tags = p.Tags.Subset(itr.dimensions) - itr.lastTags = p.Tags - } - itr.point.Time = p.Time - itr.point.Value = p.Value - itr.point.Aux = p.Aux - itr.point.Aggregated = p.Aggregated - itr.point.Nil = p.Nil - return &itr.point, nil -} - -func (itr *{{$k.name}}TagSubsetIterator) Stats() IteratorStats { - return itr.input.Stats() -} - -func (itr *{{$k.name}}TagSubsetIterator) Close() error { - return itr.input.Close() -} - -// new{{$k.Name}}DedupeIterator returns a new instance of {{$k.name}}DedupeIterator. -func new{{$k.Name}}DedupeIterator(input {{$k.Name}}Iterator) *{{$k.name}}DedupeIterator { - return &{{$k.name}}DedupeIterator{ - input: input, - m: make(map[string]struct{}), - } -} - -// Stats returns stats from the input iterator. -func (itr *{{$k.name}}DedupeIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *{{$k.name}}DedupeIterator) Close() error { return itr.input.Close() } - -// Next returns the next unique point from the input iterator. -func (itr *{{$k.name}}DedupeIterator) Next() (*{{$k.Name}}Point, error) { - for { - // Read next point. - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Serialize to bytes to store in lookup. - buf, err := proto.Marshal(encode{{$k.Name}}Point(p)) - if err != nil { - return nil, err - } - - // If the point has already been output then move to the next point. - if _, ok := itr.m[string(buf)]; ok { - continue - } - - // Otherwise mark it as emitted and return point. - itr.m[string(buf)] = struct{}{} - return p, nil - } -} - -// {{$k.name}}ReaderIterator represents an iterator that streams from a reader. -type {{$k.name}}ReaderIterator struct { - r io.Reader - dec *{{$k.Name}}PointDecoder -} - -// new{{$k.Name}}ReaderIterator returns a new instance of {{$k.name}}ReaderIterator. -func new{{$k.Name}}ReaderIterator(ctx context.Context, r io.Reader, stats IteratorStats) *{{$k.name}}ReaderIterator { - dec := New{{$k.Name}}PointDecoder(ctx, r) - dec.stats = stats - - return &{{$k.name}}ReaderIterator{ - r: r, - dec: dec, - } -} - -// Stats returns stats about points processed. -func (itr *{{$k.name}}ReaderIterator) Stats() IteratorStats { return itr.dec.stats } - -// Close closes the underlying reader, if applicable. -func (itr *{{$k.name}}ReaderIterator) Close() error { - if r, ok := itr.r.(io.ReadCloser); ok { - return r.Close() - } - return nil -} - -// Next returns the next point from the iterator. -func (itr *{{$k.name}}ReaderIterator) Next() (*{{$k.Name}}Point, error) { - // OPTIMIZE(benbjohnson): Reuse point on iterator. - - // Unmarshal next point. - p := &{{$k.Name}}Point{} - if err := itr.dec.Decode{{$k.Name}}Point(p); err == io.EOF { - return nil, nil - } else if err != nil { - return nil, err - } - return p, nil -} -{{end}} - -{{range .}} -// encode{{.Name}}Iterator encodes all points from itr to the underlying writer. -func (enc *IteratorEncoder) encode{{.Name}}Iterator(itr {{.Name}}Iterator) error { - ticker := time.NewTicker(enc.StatsInterval) - defer ticker.Stop() - - // Emit initial stats. - if err := enc.encodeStats(itr.Stats()); err != nil { - return err - } - - // Continually stream points from the iterator into the encoder. - penc := New{{.Name}}PointEncoder(enc.w) - for { - // Emit stats periodically. - select { - case <-ticker.C: - if err := enc.encodeStats(itr.Stats()); err != nil { - return err - } - default: - } - - // Retrieve the next point from the iterator. - p, err := itr.Next() - if err != nil { - return err - } else if p == nil { - break - } - - // Write the point to the point encoder. - if err := penc.Encode{{.Name}}Point(p); err != nil { - return err - } - } - - // Emit final stats. - if err := enc.encodeStats(itr.Stats()); err != nil { - return err - } - return nil -} - -{{end}} - -{{end}} diff --git a/influxql/query/iterator.go b/influxql/query/iterator.go deleted file mode 100644 index 80e7e88042a..00000000000 --- a/influxql/query/iterator.go +++ /dev/null @@ -1,1376 +0,0 @@ -package query - -import ( - "context" - "encoding/binary" - "errors" - "fmt" - "io" - "regexp" - "time" - - internal "github.com/influxdata/influxdb/v2/influxql/query/internal" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxql" - "google.golang.org/protobuf/proto" -) - -// ErrUnknownCall is returned when operating on an unknown function call. -var ErrUnknownCall = errors.New("unknown call") - -const ( - // secToNs is the number of nanoseconds in a second. - secToNs = int64(time.Second) -) - -// Iterator represents a generic interface for all Iterators. -// Most iterator operations are done on the typed sub-interfaces. -type Iterator interface { - Stats() IteratorStats - Close() error -} - -// Iterators represents a list of iterators. -type Iterators []Iterator - -// Stats returns the aggregation of all iterator stats. -func (a Iterators) Stats() IteratorStats { - var stats IteratorStats - for _, itr := range a { - stats.Add(itr.Stats()) - } - return stats -} - -// Close closes all iterators. -// We are seeing an occasional panic in this function -// which looks like a nil reference from one -// itr.Close() call, thus we check for nil elements -// in the slice a. This is often called as error -// clean-up, so the state of the iterators may be -// unhappy. -func (a Iterators) Close() (err error) { - err = nil - for _, itr := range a { - if itr != nil { - if e := itr.Close(); e != nil && err == nil { - err = e - } - } - } - return err -} - -// filterNonNil returns a slice of iterators that removes all nil iterators. -func (a Iterators) filterNonNil() []Iterator { - other := make([]Iterator, 0, len(a)) - for _, itr := range a { - if itr == nil { - continue - } - other = append(other, itr) - } - return other -} - -// dataType determines what slice type this set of iterators should be. -// An iterator type is chosen by looking at the first element in the slice -// and then returning the data type for that iterator. -func (a Iterators) dataType() influxql.DataType { - if len(a) == 0 { - return influxql.Unknown - } - - switch a[0].(type) { - case FloatIterator: - return influxql.Float - case IntegerIterator: - return influxql.Integer - case UnsignedIterator: - return influxql.Unsigned - case StringIterator: - return influxql.String - case BooleanIterator: - return influxql.Boolean - default: - return influxql.Unknown - } -} - -// coerce forces an array of iterators to be a single type. -// Iterators that are not of the same type as the first element in the slice -// will be closed and dropped. -func (a Iterators) coerce() interface{} { - typ := a.dataType() - switch typ { - case influxql.Float: - return newFloatIterators(a) - case influxql.Integer: - return newIntegerIterators(a) - case influxql.Unsigned: - return newUnsignedIterators(a) - case influxql.String: - return newStringIterators(a) - case influxql.Boolean: - return newBooleanIterators(a) - } - return a -} - -// Merge combines all iterators into a single iterator. -// A sorted merge iterator or a merge iterator can be used based on opt. -func (a Iterators) Merge(opt IteratorOptions) (Iterator, error) { - // Check if this is a call expression. - call, ok := opt.Expr.(*influxql.Call) - - // Merge into a single iterator. - if !ok && opt.MergeSorted() { - itr := NewSortedMergeIterator(a, opt) - if itr != nil && opt.InterruptCh != nil { - itr = NewInterruptIterator(itr, opt.InterruptCh) - } - return itr, nil - } - - // We do not need an ordered output so use a merge iterator. - itr := NewMergeIterator(a, opt) - if itr == nil { - return nil, nil - } - - if opt.InterruptCh != nil { - itr = NewInterruptIterator(itr, opt.InterruptCh) - } - - if !ok { - // This is not a call expression so do not use a call iterator. - return itr, nil - } - - // When merging the count() function, use sum() to sum the counted points. - if call.Name == "count" { - opt.Expr = &influxql.Call{ - Name: "sum", - Args: call.Args, - } - } - // When merging the sum_hll() function, use merge_hll() to sum the counted points. - if call.Name == "sum_hll" { - opt.Expr = &influxql.Call{ - Name: "merge_hll", - Args: call.Args, - } - } - return NewCallIterator(itr, opt) -} - -// NewMergeIterator returns an iterator to merge itrs into one. -// Inputs must either be merge iterators or only contain a single name/tag in -// sorted order. The iterator will output all points by window, name/tag, then -// time. This iterator is useful when you need all of the points for an -// interval. -func NewMergeIterator(inputs []Iterator, opt IteratorOptions) Iterator { - inputs = Iterators(inputs).filterNonNil() - if n := len(inputs); n == 0 { - return nil - } else if n == 1 { - return inputs[0] - } - - // Aggregate functions can use a more relaxed sorting so that points - // within a window are grouped. This is much more efficient. - switch inputs := Iterators(inputs).coerce().(type) { - case []FloatIterator: - return newFloatMergeIterator(inputs, opt) - case []IntegerIterator: - return newIntegerMergeIterator(inputs, opt) - case []UnsignedIterator: - return newUnsignedMergeIterator(inputs, opt) - case []StringIterator: - return newStringMergeIterator(inputs, opt) - case []BooleanIterator: - return newBooleanMergeIterator(inputs, opt) - default: - panic(fmt.Sprintf("unsupported merge iterator type: %T", inputs)) - } -} - -// NewParallelMergeIterator returns an iterator that breaks input iterators -// into groups and processes them in parallel. -func NewParallelMergeIterator(inputs []Iterator, opt IteratorOptions, parallelism int) Iterator { - inputs = Iterators(inputs).filterNonNil() - if len(inputs) == 0 { - return nil - } else if len(inputs) == 1 { - return inputs[0] - } - - // Limit parallelism to the number of inputs. - if len(inputs) < parallelism { - parallelism = len(inputs) - } - - // Determine the number of inputs per output iterator. - n := len(inputs) / parallelism - - // Group iterators together. - outputs := make([]Iterator, parallelism) - for i := range outputs { - var slice []Iterator - if i < len(outputs)-1 { - slice = inputs[i*n : (i+1)*n] - } else { - slice = inputs[i*n:] - } - - outputs[i] = newParallelIterator(NewMergeIterator(slice, opt)) - } - - // Merge all groups together. - return NewMergeIterator(outputs, opt) -} - -// NewSortedMergeIterator returns an iterator to merge itrs into one. -// Inputs must either be sorted merge iterators or only contain a single -// name/tag in sorted order. The iterator will output all points by name/tag, -// then time. This iterator is useful when you need all points for a name/tag -// to be in order. -func NewSortedMergeIterator(inputs []Iterator, opt IteratorOptions) Iterator { - inputs = Iterators(inputs).filterNonNil() - if len(inputs) == 0 { - return nil - } else if len(inputs) == 1 { - return inputs[0] - } - - switch inputs := Iterators(inputs).coerce().(type) { - case []FloatIterator: - return newFloatSortedMergeIterator(inputs, opt) - case []IntegerIterator: - return newIntegerSortedMergeIterator(inputs, opt) - case []UnsignedIterator: - return newUnsignedSortedMergeIterator(inputs, opt) - case []StringIterator: - return newStringSortedMergeIterator(inputs, opt) - case []BooleanIterator: - return newBooleanSortedMergeIterator(inputs, opt) - default: - panic(fmt.Sprintf("unsupported sorted merge iterator type: %T", inputs)) - } -} - -// newParallelIterator returns an iterator that runs in a separate goroutine. -func newParallelIterator(input Iterator) Iterator { - if input == nil { - return nil - } - - switch itr := input.(type) { - case FloatIterator: - return newFloatParallelIterator(itr) - case IntegerIterator: - return newIntegerParallelIterator(itr) - case UnsignedIterator: - return newUnsignedParallelIterator(itr) - case StringIterator: - return newStringParallelIterator(itr) - case BooleanIterator: - return newBooleanParallelIterator(itr) - default: - panic(fmt.Sprintf("unsupported parallel iterator type: %T", itr)) - } -} - -// NewLimitIterator returns an iterator that limits the number of points per grouping. -func NewLimitIterator(input Iterator, opt IteratorOptions) Iterator { - switch input := input.(type) { - case FloatIterator: - return newFloatLimitIterator(input, opt) - case IntegerIterator: - return newIntegerLimitIterator(input, opt) - case UnsignedIterator: - return newUnsignedLimitIterator(input, opt) - case StringIterator: - return newStringLimitIterator(input, opt) - case BooleanIterator: - return newBooleanLimitIterator(input, opt) - default: - panic(fmt.Sprintf("unsupported limit iterator type: %T", input)) - } -} - -// NewFilterIterator returns an iterator that filters the points based on the -// condition. This iterator is not nearly as efficient as filtering points -// within the query engine and is only used when filtering subqueries. -func NewFilterIterator(input Iterator, cond influxql.Expr, opt IteratorOptions) Iterator { - if input == nil { - return nil - } - - switch input := input.(type) { - case FloatIterator: - return newFloatFilterIterator(input, cond, opt) - case IntegerIterator: - return newIntegerFilterIterator(input, cond, opt) - case UnsignedIterator: - return newUnsignedFilterIterator(input, cond, opt) - case StringIterator: - return newStringFilterIterator(input, cond, opt) - case BooleanIterator: - return newBooleanFilterIterator(input, cond, opt) - default: - panic(fmt.Sprintf("unsupported filter iterator type: %T", input)) - } -} - -// NewTagSubsetIterator will strip each of the points to a subset of the tag key values -// for each point it processes. -func NewTagSubsetIterator(input Iterator, opt IteratorOptions) Iterator { - if input == nil { - return nil - } - - switch input := input.(type) { - case FloatIterator: - return newFloatTagSubsetIterator(input, opt) - case IntegerIterator: - return newIntegerTagSubsetIterator(input, opt) - case UnsignedIterator: - return newUnsignedTagSubsetIterator(input, opt) - case StringIterator: - return newStringTagSubsetIterator(input, opt) - case BooleanIterator: - return newBooleanTagSubsetIterator(input, opt) - default: - panic(fmt.Sprintf("unsupported tag subset iterator type: %T", input)) - } -} - -// NewDedupeIterator returns an iterator that only outputs unique points. -// This iterator maintains a serialized copy of each row so it is inefficient -// to use on large datasets. It is intended for small datasets such as meta queries. -func NewDedupeIterator(input Iterator) Iterator { - if input == nil { - return nil - } - - switch input := input.(type) { - case FloatIterator: - return newFloatDedupeIterator(input) - case IntegerIterator: - return newIntegerDedupeIterator(input) - case UnsignedIterator: - return newUnsignedDedupeIterator(input) - case StringIterator: - return newStringDedupeIterator(input) - case BooleanIterator: - return newBooleanDedupeIterator(input) - default: - panic(fmt.Sprintf("unsupported dedupe iterator type: %T", input)) - } -} - -// NewFillIterator returns an iterator that fills in missing points in an aggregate. -func NewFillIterator(input Iterator, expr influxql.Expr, opt IteratorOptions) Iterator { - switch input := input.(type) { - case FloatIterator: - return newFloatFillIterator(input, expr, opt) - case IntegerIterator: - return newIntegerFillIterator(input, expr, opt) - case UnsignedIterator: - return newUnsignedFillIterator(input, expr, opt) - case StringIterator: - return newStringFillIterator(input, expr, opt) - case BooleanIterator: - return newBooleanFillIterator(input, expr, opt) - default: - panic(fmt.Sprintf("unsupported fill iterator type: %T", input)) - } -} - -// NewIntervalIterator returns an iterator that sets the time on each point to the interval. -func NewIntervalIterator(input Iterator, opt IteratorOptions) Iterator { - switch input := input.(type) { - case FloatIterator: - return newFloatIntervalIterator(input, opt) - case IntegerIterator: - return newIntegerIntervalIterator(input, opt) - case UnsignedIterator: - return newUnsignedIntervalIterator(input, opt) - case StringIterator: - return newStringIntervalIterator(input, opt) - case BooleanIterator: - return newBooleanIntervalIterator(input, opt) - default: - panic(fmt.Sprintf("unsupported interval iterator type: %T", input)) - } -} - -// NewInterruptIterator returns an iterator that will stop producing output -// when the passed-in channel is closed. -func NewInterruptIterator(input Iterator, closing <-chan struct{}) Iterator { - switch input := input.(type) { - case FloatIterator: - return newFloatInterruptIterator(input, closing) - case IntegerIterator: - return newIntegerInterruptIterator(input, closing) - case UnsignedIterator: - return newUnsignedInterruptIterator(input, closing) - case StringIterator: - return newStringInterruptIterator(input, closing) - case BooleanIterator: - return newBooleanInterruptIterator(input, closing) - default: - panic(fmt.Sprintf("unsupported interrupt iterator type: %T", input)) - } -} - -// IteratorScanner is used to scan the results of an iterator into a map. -type IteratorScanner interface { - // Peek retrieves information about the next point. It returns a timestamp, the name, and the tags. - Peek() (int64, string, Tags) - - // ScanAt will take a time, name, and tags and scan the point that matches those into the map. - ScanAt(ts int64, name string, tags Tags, values map[string]interface{}) - - // Stats returns the IteratorStats from the Iterator. - Stats() IteratorStats - - // Err returns an error that was encountered while scanning. - Err() error - - io.Closer -} - -// SkipDefault is a sentinel value to tell the IteratorScanner to skip setting the -// default value if none was present. This causes the map to use the previous value -// if it was previously set. -var SkipDefault = interface{}(0) - -// NewIteratorScanner produces an IteratorScanner for the Iterator. -func NewIteratorScanner(input Iterator, keys []influxql.VarRef, defaultValue interface{}) IteratorScanner { - switch input := input.(type) { - case FloatIterator: - return newFloatIteratorScanner(input, keys, defaultValue) - case IntegerIterator: - return newIntegerIteratorScanner(input, keys, defaultValue) - case UnsignedIterator: - return newUnsignedIteratorScanner(input, keys, defaultValue) - case StringIterator: - return newStringIteratorScanner(input, keys, defaultValue) - case BooleanIterator: - return newBooleanIteratorScanner(input, keys, defaultValue) - default: - panic(fmt.Sprintf("unsupported type for iterator scanner: %T", input)) - } -} - -// DrainIterator reads and discards all points from itr. -func DrainIterator(itr Iterator) { - defer itr.Close() - switch itr := itr.(type) { - case FloatIterator: - for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { - } - case IntegerIterator: - for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { - } - case UnsignedIterator: - for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { - } - case StringIterator: - for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { - } - case BooleanIterator: - for p, _ := itr.Next(); p != nil; p, _ = itr.Next() { - } - default: - panic(fmt.Sprintf("unsupported iterator type for draining: %T", itr)) - } -} - -// DrainIterators reads and discards all points from itrs. -func DrainIterators(itrs []Iterator) { - defer Iterators(itrs).Close() - for { - var hasData bool - - for _, itr := range itrs { - switch itr := itr.(type) { - case FloatIterator: - if p, _ := itr.Next(); p != nil { - hasData = true - } - case IntegerIterator: - if p, _ := itr.Next(); p != nil { - hasData = true - } - case UnsignedIterator: - if p, _ := itr.Next(); p != nil { - hasData = true - } - case StringIterator: - if p, _ := itr.Next(); p != nil { - hasData = true - } - case BooleanIterator: - if p, _ := itr.Next(); p != nil { - hasData = true - } - default: - panic(fmt.Sprintf("unsupported iterator type for draining: %T", itr)) - } - } - - // Exit once all iterators return a nil point. - if !hasData { - break - } - } -} - -// NewReaderIterator returns an iterator that streams from a reader. -func NewReaderIterator(ctx context.Context, r io.Reader, typ influxql.DataType, stats IteratorStats) Iterator { - switch typ { - case influxql.Float: - return newFloatReaderIterator(ctx, r, stats) - case influxql.Integer: - return newIntegerReaderIterator(ctx, r, stats) - case influxql.Unsigned: - return newUnsignedReaderIterator(ctx, r, stats) - case influxql.String: - return newStringReaderIterator(ctx, r, stats) - case influxql.Boolean: - return newBooleanReaderIterator(ctx, r, stats) - default: - return &nilFloatReaderIterator{r: r} - } -} - -// IteratorCreator is an interface to create Iterators. -type IteratorCreator interface { - // Creates a simple iterator for use in an InfluxQL query. - CreateIterator(ctx context.Context, source *influxql.Measurement, opt IteratorOptions) (Iterator, error) - - // Determines the potential cost for creating an iterator. - IteratorCost(ctx context.Context, source *influxql.Measurement, opt IteratorOptions) (IteratorCost, error) -} - -// IteratorOptions is an object passed to CreateIterator to specify creation options. -type IteratorOptions struct { - // OrgID is the organization for which this query is being executed. - OrgID platform.ID - - // Expression to iterate for. - // This can be VarRef or a Call. - Expr influxql.Expr - - // Auxiliary tags or values to also retrieve for the point. - Aux []influxql.VarRef - - // Data sources from which to receive data. This is only used for encoding - // measurements over RPC and is no longer used in the open source version. - Sources []influxql.Source - - // Group by interval and tags. - Interval Interval - Dimensions []string // The final dimensions of the query (stays the same even in subqueries). - GroupBy map[string]struct{} // Dimensions to group points by in intermediate iterators. - Location *time.Location - - // Fill options. - Fill influxql.FillOption - FillValue interface{} - - // Condition to filter by. - Condition influxql.Expr - - // Time range for the iterator. - StartTime int64 - EndTime int64 - - // Sorted in time ascending order if true. - Ascending bool - - // Limits the number of points per series. - Limit, Offset int - - // Limits the number of series. - SLimit, SOffset int - - // Removes the measurement name. Useful for meta queries. - StripName bool - - // Removes duplicate rows from raw queries. - Dedupe bool - - // Determines if this is a query for raw data or an aggregate/selector. - Ordered bool - - // Limits on the creation of iterators. - MaxSeriesN int - - // If this channel is set and is closed, the iterator should try to exit - // and close as soon as possible. - InterruptCh <-chan struct{} - - // Authorizer can limit access to data - Authorizer Authorizer -} - -// newIteratorOptionsStmt creates the iterator options from stmt. -func newIteratorOptionsStmt(stmt *influxql.SelectStatement, sopt SelectOptions) (opt IteratorOptions, err error) { - // Determine time range from the condition. - valuer := &influxql.NowValuer{Location: stmt.Location} - condition, timeRange, err := influxql.ConditionExpr(stmt.Condition, valuer) - if err != nil { - return IteratorOptions{}, err - } - - if !timeRange.Min.IsZero() { - opt.StartTime = timeRange.Min.UnixNano() - } else { - opt.StartTime = influxql.MinTime - } - if !timeRange.Max.IsZero() { - opt.EndTime = timeRange.Max.UnixNano() - } else { - opt.EndTime = influxql.MaxTime - } - opt.Location = stmt.Location - - // Determine group by interval. - interval, err := stmt.GroupByInterval() - if err != nil { - return opt, err - } - // Set duration to zero if a negative interval has been used. - if interval < 0 { - interval = 0 - } else if interval > 0 { - opt.Interval.Offset, err = stmt.GroupByOffset() - if err != nil { - return opt, err - } - } - opt.Interval.Duration = interval - - // Always request an ordered output for the top level iterators. - // The emitter will always emit points as ordered. - opt.Ordered = true - - // Determine dimensions. - opt.GroupBy = make(map[string]struct{}, len(opt.Dimensions)) - for _, d := range stmt.Dimensions { - if d, ok := d.Expr.(*influxql.VarRef); ok { - opt.Dimensions = append(opt.Dimensions, d.Val) - opt.GroupBy[d.Val] = struct{}{} - } - } - - opt.Condition = condition - opt.Ascending = stmt.TimeAscending() - opt.Dedupe = stmt.Dedupe - opt.StripName = stmt.StripName - - opt.Fill, opt.FillValue = stmt.Fill, stmt.FillValue - if opt.Fill == influxql.NullFill && stmt.Target != nil { - // Set the fill option to none if a target has been given. - // Null values will get ignored when being written to the target - // so fill(null) wouldn't write any null values to begin with. - opt.Fill = influxql.NoFill - } - opt.Limit, opt.Offset = stmt.Limit, stmt.Offset - opt.SLimit, opt.SOffset = stmt.SLimit, stmt.SOffset - opt.MaxSeriesN = sopt.MaxSeriesN - opt.OrgID = sopt.OrgID - - return opt, nil -} - -func newIteratorOptionsSubstatement(ctx context.Context, stmt *influxql.SelectStatement, opt IteratorOptions) (IteratorOptions, error) { - subOpt, err := newIteratorOptionsStmt(stmt, SelectOptions{ - OrgID: opt.OrgID, - MaxSeriesN: opt.MaxSeriesN, - }) - if err != nil { - return IteratorOptions{}, err - } - - if subOpt.StartTime < opt.StartTime { - subOpt.StartTime = opt.StartTime - } - if subOpt.EndTime > opt.EndTime { - subOpt.EndTime = opt.EndTime - } - if !subOpt.Interval.IsZero() && subOpt.EndTime == influxql.MaxTime { - if now := ctx.Value(nowKey); now != nil { - subOpt.EndTime = now.(time.Time).UnixNano() - } - } - // Propagate the dimensions to the inner subquery. - subOpt.Dimensions = opt.Dimensions - for d := range opt.GroupBy { - subOpt.GroupBy[d] = struct{}{} - } - subOpt.InterruptCh = opt.InterruptCh - - // Extract the time range and condition from the condition. - valuer := &influxql.NowValuer{Location: stmt.Location} - cond, t, err := influxql.ConditionExpr(stmt.Condition, valuer) - if err != nil { - return IteratorOptions{}, err - } - subOpt.Condition = cond - // If the time range is more constrained, use it instead. A less constrained time - // range should be ignored. - if !t.Min.IsZero() && t.MinTimeNano() > opt.StartTime { - subOpt.StartTime = t.MinTimeNano() - } - if !t.Max.IsZero() && t.MaxTimeNano() < opt.EndTime { - subOpt.EndTime = t.MaxTimeNano() - } - - // Propagate the SLIMIT and SOFFSET from the outer query. - subOpt.SLimit += opt.SLimit - subOpt.SOffset += opt.SOffset - - // Propagate the ordering from the parent query. - subOpt.Ascending = opt.Ascending - - // If the inner query uses a null fill option and is not a raw query, - // switch it to none so we don't hit an unnecessary penalty from the - // fill iterator. Null values will end up getting stripped by an outer - // query anyway so there's no point in having them here. We still need - // all other types of fill iterators because they can affect the result - // of the outer query. We also do not do this for raw queries because - // there is no fill iterator for them and fill(none) doesn't work with - // raw queries. - if !stmt.IsRawQuery && subOpt.Fill == influxql.NullFill { - subOpt.Fill = influxql.NoFill - } - - // Inherit the ordering method from the outer query. - subOpt.Ordered = opt.Ordered - - // If there is no interval for this subquery, but the outer query has an - // interval, inherit the parent interval. - interval, err := stmt.GroupByInterval() - if err != nil { - return IteratorOptions{}, err - } else if interval == 0 { - subOpt.Interval = opt.Interval - } - return subOpt, nil -} - -// MergeSorted returns true if the options require a sorted merge. -func (opt IteratorOptions) MergeSorted() bool { - return opt.Ordered -} - -// SeekTime returns the time the iterator should start from. -// For ascending iterators this is the start time, for descending iterators it's the end time. -func (opt IteratorOptions) SeekTime() int64 { - if opt.Ascending { - return opt.StartTime - } - return opt.EndTime -} - -// StopTime returns the time the iterator should end at. -// For ascending iterators this is the end time, for descending iterators it's the start time. -func (opt IteratorOptions) StopTime() int64 { - if opt.Ascending { - return opt.EndTime - } - return opt.StartTime -} - -// Window returns the time window [start,end) that t falls within. -func (opt IteratorOptions) Window(t int64) (start, end int64) { - if opt.Interval.IsZero() { - return opt.StartTime, opt.EndTime + 1 - } - - // Subtract the offset to the time so we calculate the correct base interval. - t -= int64(opt.Interval.Offset) - - // Retrieve the zone offset for the start time. - var zone int64 - if opt.Location != nil { - _, zone = opt.Zone(t) - } - - // Truncate time by duration. - dt := (t + zone) % int64(opt.Interval.Duration) - if dt < 0 { - // Negative modulo rounds up instead of down, so offset - // with the duration. - dt += int64(opt.Interval.Duration) - } - - // Find the start time. - if influxql.MinTime+dt >= t { - start = influxql.MinTime - } else { - start = t - dt - } - - start += int64(opt.Interval.Offset) - - // Look for the start offset again because the first time may have been - // after the offset switch. Now that we are at midnight in UTC, we can - // lookup the zone offset again to get the real starting offset. - if opt.Location != nil { - _, startOffset := opt.Zone(start) - // Do not adjust the offset if the offset change is greater than or - // equal to the duration. - if o := zone - startOffset; o != 0 && abs(o) < int64(opt.Interval.Duration) { - start += o - } - } - - // Find the end time. - if dt := int64(opt.Interval.Duration) - dt; influxql.MaxTime-dt <= t { - end = influxql.MaxTime - } else { - end = t + dt - } - - // Retrieve the zone offset for the end time. - if opt.Location != nil { - _, endOffset := opt.Zone(end) - // Adjust the end time if the offset is different from the start offset. - // Only apply the offset if it is smaller than the duration. - // This prevents going back in time and creating time windows - // that don't make any sense. - if o := zone - endOffset; o != 0 && abs(o) < int64(opt.Interval.Duration) { - // If the offset is greater than 0, that means we are adding time. - // Added time goes into the previous interval because the clocks - // move backwards. If the offset is less than 0, then we are skipping - // time. Skipped time comes after the switch so if we have a time - // interval that lands on the switch, it comes from the next - // interval and not the current one. For this reason, we need to know - // when the actual switch happens by seeing if the time switch is within - // the current interval. We calculate the zone offset with the offset - // and see if the value is the same. If it is, we apply the - // offset. - if o > 0 { - end += o - } else if _, z := opt.Zone(end + o); z == endOffset { - end += o - } - } - } - end += int64(opt.Interval.Offset) - return -} - -// DerivativeInterval returns the time interval for the derivative function. -func (opt IteratorOptions) DerivativeInterval() Interval { - // Use the interval on the derivative() call, if specified. - if expr, ok := opt.Expr.(*influxql.Call); ok && len(expr.Args) == 2 { - return Interval{Duration: expr.Args[1].(*influxql.DurationLiteral).Val} - } - - // Otherwise use the group by interval, if specified. - if opt.Interval.Duration > 0 { - return Interval{Duration: opt.Interval.Duration} - } - - return Interval{Duration: time.Second} -} - -// ElapsedInterval returns the time interval for the elapsed function. -func (opt IteratorOptions) ElapsedInterval() Interval { - // Use the interval on the elapsed() call, if specified. - if expr, ok := opt.Expr.(*influxql.Call); ok && len(expr.Args) == 2 { - return Interval{Duration: expr.Args[1].(*influxql.DurationLiteral).Val} - } - - return Interval{Duration: time.Nanosecond} -} - -// IntegralInterval returns the time interval for the integral function. -func (opt IteratorOptions) IntegralInterval() Interval { - // Use the interval on the integral() call, if specified. - if expr, ok := opt.Expr.(*influxql.Call); ok && len(expr.Args) == 2 { - return Interval{Duration: expr.Args[1].(*influxql.DurationLiteral).Val} - } - - return Interval{Duration: time.Second} -} - -// GetDimensions retrieves the dimensions for this query. -func (opt IteratorOptions) GetDimensions() []string { - if len(opt.GroupBy) > 0 { - dimensions := make([]string, 0, len(opt.GroupBy)) - for dim := range opt.GroupBy { - dimensions = append(dimensions, dim) - } - return dimensions - } - return opt.Dimensions -} - -// Zone returns the zone information for the given time. The offset is in nanoseconds. -func (opt *IteratorOptions) Zone(ns int64) (string, int64) { - if opt.Location == nil { - return "", 0 - } - - t := time.Unix(0, ns).In(opt.Location) - name, offset := t.Zone() - return name, secToNs * int64(offset) -} - -// MarshalBinary encodes opt into a binary format. -func (opt *IteratorOptions) MarshalBinary() ([]byte, error) { - return proto.Marshal(encodeIteratorOptions(opt)) -} - -// UnmarshalBinary decodes from a binary format in to opt. -func (opt *IteratorOptions) UnmarshalBinary(buf []byte) error { - var pb internal.IteratorOptions - if err := proto.Unmarshal(buf, &pb); err != nil { - return err - } - - other, err := decodeIteratorOptions(&pb) - if err != nil { - return err - } - *opt = *other - - return nil -} - -func encodeIteratorOptions(opt *IteratorOptions) *internal.IteratorOptions { - pb := &internal.IteratorOptions{ - Interval: encodeInterval(opt.Interval), - Dimensions: opt.Dimensions, - Fill: proto.Int32(int32(opt.Fill)), - StartTime: proto.Int64(opt.StartTime), - EndTime: proto.Int64(opt.EndTime), - Ascending: proto.Bool(opt.Ascending), - Limit: proto.Int64(int64(opt.Limit)), - Offset: proto.Int64(int64(opt.Offset)), - SLimit: proto.Int64(int64(opt.SLimit)), - SOffset: proto.Int64(int64(opt.SOffset)), - StripName: proto.Bool(opt.StripName), - Dedupe: proto.Bool(opt.Dedupe), - MaxSeriesN: proto.Int64(int64(opt.MaxSeriesN)), - Ordered: proto.Bool(opt.Ordered), - } - - // Set expression, if set. - if opt.Expr != nil { - pb.Expr = proto.String(opt.Expr.String()) - } - - // Set the location, if set. - if opt.Location != nil { - pb.Location = proto.String(opt.Location.String()) - } - - // Convert and encode aux fields as variable references. - if opt.Aux != nil { - pb.Fields = make([]*internal.VarRef, len(opt.Aux)) - pb.Aux = make([]string, len(opt.Aux)) - for i, ref := range opt.Aux { - pb.Fields[i] = encodeVarRef(ref) - pb.Aux[i] = ref.Val - } - } - - // Encode group by dimensions from a map. - if opt.GroupBy != nil { - dimensions := make([]string, 0, len(opt.GroupBy)) - for dim := range opt.GroupBy { - dimensions = append(dimensions, dim) - } - pb.GroupBy = dimensions - } - - // Convert and encode sources to measurements. - if opt.Sources != nil { - sources := make([]*internal.Measurement, len(opt.Sources)) - for i, source := range opt.Sources { - mm := source.(*influxql.Measurement) - sources[i] = encodeMeasurement(mm) - } - pb.Sources = sources - } - - // Fill value can only be a number. Set it if available. - if v, ok := opt.FillValue.(float64); ok { - pb.FillValue = proto.Float64(v) - } - - // Set condition, if set. - if opt.Condition != nil { - pb.Condition = proto.String(opt.Condition.String()) - } - - return pb -} - -func decodeIteratorOptions(pb *internal.IteratorOptions) (*IteratorOptions, error) { - opt := &IteratorOptions{ - Interval: decodeInterval(pb.GetInterval()), - Dimensions: pb.GetDimensions(), - Fill: influxql.FillOption(pb.GetFill()), - StartTime: pb.GetStartTime(), - EndTime: pb.GetEndTime(), - Ascending: pb.GetAscending(), - Limit: int(pb.GetLimit()), - Offset: int(pb.GetOffset()), - SLimit: int(pb.GetSLimit()), - SOffset: int(pb.GetSOffset()), - StripName: pb.GetStripName(), - Dedupe: pb.GetDedupe(), - MaxSeriesN: int(pb.GetMaxSeriesN()), - Ordered: pb.GetOrdered(), - } - - // Set expression, if set. - if pb.Expr != nil { - expr, err := influxql.ParseExpr(pb.GetExpr()) - if err != nil { - return nil, err - } - opt.Expr = expr - } - - if pb.Location != nil { - loc, err := time.LoadLocation(pb.GetLocation()) - if err != nil { - return nil, err - } - opt.Location = loc - } - - // Convert and decode variable references. - if fields := pb.GetFields(); fields != nil { - opt.Aux = make([]influxql.VarRef, len(fields)) - for i, ref := range fields { - opt.Aux[i] = decodeVarRef(ref) - } - } else if aux := pb.GetAux(); aux != nil { - opt.Aux = make([]influxql.VarRef, len(aux)) - for i, name := range aux { - opt.Aux[i] = influxql.VarRef{Val: name} - } - } - - // Convert and decode sources to measurements. - if pb.Sources != nil { - sources := make([]influxql.Source, len(pb.GetSources())) - for i, source := range pb.GetSources() { - mm, err := decodeMeasurement(source) - if err != nil { - return nil, err - } - sources[i] = mm - } - opt.Sources = sources - } - - // Convert group by dimensions to a map. - if pb.GroupBy != nil { - dimensions := make(map[string]struct{}, len(pb.GroupBy)) - for _, dim := range pb.GetGroupBy() { - dimensions[dim] = struct{}{} - } - opt.GroupBy = dimensions - } - - // Set the fill value, if set. - if pb.FillValue != nil { - opt.FillValue = pb.GetFillValue() - } - - // Set condition, if set. - if pb.Condition != nil { - expr, err := influxql.ParseExpr(pb.GetCondition()) - if err != nil { - return nil, err - } - opt.Condition = expr - } - - return opt, nil -} - -func encodeMeasurement(mm *influxql.Measurement) *internal.Measurement { - pb := &internal.Measurement{ - Database: proto.String(mm.Database), - RetentionPolicy: proto.String(mm.RetentionPolicy), - Name: proto.String(mm.Name), - SystemIterator: proto.String(mm.SystemIterator), - IsTarget: proto.Bool(mm.IsTarget), - } - if mm.Regex != nil { - pb.Regex = proto.String(mm.Regex.Val.String()) - } - return pb -} - -func decodeMeasurement(pb *internal.Measurement) (*influxql.Measurement, error) { - mm := &influxql.Measurement{ - Database: pb.GetDatabase(), - RetentionPolicy: pb.GetRetentionPolicy(), - Name: pb.GetName(), - SystemIterator: pb.GetSystemIterator(), - IsTarget: pb.GetIsTarget(), - } - - if pb.Regex != nil { - regex, err := regexp.Compile(pb.GetRegex()) - if err != nil { - return nil, fmt.Errorf("invalid binary measurement regex: value=%q, err=%s", pb.GetRegex(), err) - } - mm.Regex = &influxql.RegexLiteral{Val: regex} - } - - return mm, nil -} - -// Interval represents a repeating interval for a query. -type Interval struct { - Duration time.Duration - Offset time.Duration -} - -// IsZero returns true if the interval has no duration. -func (i Interval) IsZero() bool { return i.Duration == 0 } - -func encodeInterval(i Interval) *internal.Interval { - return &internal.Interval{ - Duration: proto.Int64(i.Duration.Nanoseconds()), - Offset: proto.Int64(i.Offset.Nanoseconds()), - } -} - -func decodeInterval(pb *internal.Interval) Interval { - return Interval{ - Duration: time.Duration(pb.GetDuration()), - Offset: time.Duration(pb.GetOffset()), - } -} - -func encodeVarRef(ref influxql.VarRef) *internal.VarRef { - return &internal.VarRef{ - Val: proto.String(ref.Val), - Type: proto.Int32(int32(ref.Type)), - } -} - -func decodeVarRef(pb *internal.VarRef) influxql.VarRef { - return influxql.VarRef{ - Val: pb.GetVal(), - Type: influxql.DataType(pb.GetType()), - } -} - -type nilFloatIterator struct{} - -func (*nilFloatIterator) Stats() IteratorStats { return IteratorStats{} } -func (*nilFloatIterator) Close() error { return nil } -func (*nilFloatIterator) Next() (*FloatPoint, error) { return nil, nil } - -type nilFloatReaderIterator struct { - r io.Reader -} - -func (*nilFloatReaderIterator) Stats() IteratorStats { return IteratorStats{} } -func (itr *nilFloatReaderIterator) Close() error { - if r, ok := itr.r.(io.ReadCloser); ok { - itr.r = nil - return r.Close() - } - return nil -} -func (*nilFloatReaderIterator) Next() (*FloatPoint, error) { return nil, nil } - -// IteratorStats represents statistics about an iterator. -// Some statistics are available immediately upon iterator creation while -// some are derived as the iterator processes data. -type IteratorStats struct { - SeriesN int // series represented - PointN int // points returned -} - -// Add aggregates fields from s and other together. Overwrites s. -func (s *IteratorStats) Add(other IteratorStats) { - s.SeriesN += other.SeriesN - s.PointN += other.PointN -} - -func encodeIteratorStats(stats *IteratorStats) *internal.IteratorStats { - return &internal.IteratorStats{ - SeriesN: proto.Int64(int64(stats.SeriesN)), - PointN: proto.Int64(int64(stats.PointN)), - } -} - -func decodeIteratorStats(pb *internal.IteratorStats) IteratorStats { - return IteratorStats{ - SeriesN: int(pb.GetSeriesN()), - PointN: int(pb.GetPointN()), - } -} - -// IteratorCost contains statistics retrieved for explaining what potential -// cost may be incurred by instantiating an iterator. -type IteratorCost struct { - // The total number of shards that are touched by this query. - NumShards int64 - - // The total number of non-unique series that are accessed by this query. - // This number matches the number of cursors created by the query since - // one cursor is created for every series. - NumSeries int64 - - // CachedValues returns the number of cached values that may be read by this - // query. - CachedValues int64 - - // The total number of non-unique files that may be accessed by this query. - // This will count the number of files accessed by each series so files - // will likely be double counted. - NumFiles int64 - - // The number of blocks that had the potential to be accessed. - BlocksRead int64 - - // The amount of data that can be potentially read. - BlockSize int64 -} - -// Combine combines the results of two IteratorCost structures into one. -func (c IteratorCost) Combine(other IteratorCost) IteratorCost { - return IteratorCost{ - NumShards: c.NumShards + other.NumShards, - NumSeries: c.NumSeries + other.NumSeries, - CachedValues: c.CachedValues + other.CachedValues, - NumFiles: c.NumFiles + other.NumFiles, - BlocksRead: c.BlocksRead + other.BlocksRead, - BlockSize: c.BlockSize + other.BlockSize, - } -} - -// floatFastDedupeIterator outputs unique points where the point has a single aux field. -type floatFastDedupeIterator struct { - input FloatIterator - m map[fastDedupeKey]struct{} // lookup of points already sent -} - -// newFloatFastDedupeIterator returns a new instance of floatFastDedupeIterator. -func newFloatFastDedupeIterator(input FloatIterator) *floatFastDedupeIterator { - return &floatFastDedupeIterator{ - input: input, - m: make(map[fastDedupeKey]struct{}), - } -} - -// Stats returns stats from the input iterator. -func (itr *floatFastDedupeIterator) Stats() IteratorStats { return itr.input.Stats() } - -// Close closes the iterator and all child iterators. -func (itr *floatFastDedupeIterator) Close() error { return itr.input.Close() } - -// Next returns the next unique point from the input iterator. -func (itr *floatFastDedupeIterator) Next() (*FloatPoint, error) { - for { - // Read next point. - // Skip if there are not any aux fields. - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } else if len(p.Aux) == 0 { - continue - } - - // If the point has already been output then move to the next point. - key := fastDedupeKey{name: p.Name} - key.values[0] = p.Aux[0] - if len(p.Aux) > 1 { - key.values[1] = p.Aux[1] - } - if _, ok := itr.m[key]; ok { - continue - } - - // Otherwise mark it as emitted and return point. - itr.m[key] = struct{}{} - return p, nil - } -} - -type fastDedupeKey struct { - name string - values [2]interface{} -} - -func abs(v int64) int64 { - sign := v >> 63 - return (v ^ sign) - sign -} - -// IteratorEncoder is an encoder for encoding an iterator's points to w. -type IteratorEncoder struct { - w io.Writer - - // Frequency with which stats are emitted. - StatsInterval time.Duration -} - -// NewIteratorEncoder encodes an iterator's points to w. -func NewIteratorEncoder(w io.Writer) *IteratorEncoder { - return &IteratorEncoder{ - w: w, - - StatsInterval: DefaultStatsInterval, - } -} - -// EncodeIterator encodes and writes all of itr's points to the underlying writer. -func (enc *IteratorEncoder) EncodeIterator(itr Iterator) error { - switch itr := itr.(type) { - case FloatIterator: - return enc.encodeFloatIterator(itr) - case IntegerIterator: - return enc.encodeIntegerIterator(itr) - case StringIterator: - return enc.encodeStringIterator(itr) - case BooleanIterator: - return enc.encodeBooleanIterator(itr) - default: - panic(fmt.Sprintf("unsupported iterator for encoder: %T", itr)) - } -} - -// encode a stats object in the point stream. -func (enc *IteratorEncoder) encodeStats(stats IteratorStats) error { - buf, err := proto.Marshal(&internal.Point{ - Name: proto.String(""), - Tags: proto.String(""), - Time: proto.Int64(0), - Nil: proto.Bool(false), - - Stats: encodeIteratorStats(&stats), - }) - if err != nil { - return err - } - - if err = binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { - return err - } - if _, err = enc.w.Write(buf); err != nil { - return err - } - return nil -} diff --git a/influxql/query/iterator_mapper.go b/influxql/query/iterator_mapper.go deleted file mode 100644 index 79675fa2c76..00000000000 --- a/influxql/query/iterator_mapper.go +++ /dev/null @@ -1,67 +0,0 @@ -package query - -import ( - "fmt" - "math" - - "github.com/influxdata/influxql" -) - -type IteratorMap interface { - Value(row *Row) interface{} -} - -type FieldMap struct { - Index int - Type influxql.DataType -} - -func (f FieldMap) Value(row *Row) interface{} { - v := castToType(row.Values[f.Index], f.Type) - if v == NullFloat { - // If the value is a null float, then convert it back to NaN - // so it is treated as a float for eval. - v = math.NaN() - } - return v -} - -type TagMap string - -func (s TagMap) Value(row *Row) interface{} { return row.Series.Tags.Value(string(s)) } - -type NullMap struct{} - -func (NullMap) Value(row *Row) interface{} { return nil } - -func NewIteratorMapper(cur Cursor, driver IteratorMap, fields []IteratorMap, opt IteratorOptions) Iterator { - if driver != nil { - switch driver := driver.(type) { - case FieldMap: - switch driver.Type { - case influxql.Float: - return newFloatIteratorMapper(cur, driver, fields, opt) - case influxql.Integer: - return newIntegerIteratorMapper(cur, driver, fields, opt) - case influxql.Unsigned: - return newUnsignedIteratorMapper(cur, driver, fields, opt) - case influxql.String, influxql.Tag: - return newStringIteratorMapper(cur, driver, fields, opt) - case influxql.Boolean: - return newBooleanIteratorMapper(cur, driver, fields, opt) - default: - // The driver doesn't appear to to have a valid driver type. - // We should close the cursor and return a blank iterator. - // We close the cursor because we own it and have a responsibility - // to close it once it is passed into this function. - cur.Close() - return &nilFloatIterator{} - } - case TagMap: - return newStringIteratorMapper(cur, driver, fields, opt) - default: - panic(fmt.Sprintf("unable to create iterator mapper with driver expression type: %T", driver)) - } - } - return newFloatIteratorMapper(cur, nil, fields, opt) -} diff --git a/influxql/query/iterator_mapper_test.go b/influxql/query/iterator_mapper_test.go deleted file mode 100644 index fe5a26c112e..00000000000 --- a/influxql/query/iterator_mapper_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package query_test - -import ( - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/pkg/deep" - "github.com/influxdata/influxql" -) - -func TestIteratorMapper(t *testing.T) { - cur := query.RowCursor([]query.Row{ - { - Time: 0, - Series: query.Series{ - Name: "cpu", - Tags: ParseTags("host=A"), - }, - Values: []interface{}{float64(1), "a"}, - }, - { - Time: 5, - Series: query.Series{ - Name: "cpu", - Tags: ParseTags("host=A"), - }, - Values: []interface{}{float64(3), "c"}, - }, - { - Time: 2, - Series: query.Series{ - Name: "cpu", - Tags: ParseTags("host=B"), - }, - Values: []interface{}{float64(2), "b"}, - }, - { - Time: 8, - Series: query.Series{ - Name: "cpu", - Tags: ParseTags("host=B"), - }, - Values: []interface{}{float64(8), "h"}, - }, - }, []influxql.VarRef{ - {Val: "val1", Type: influxql.Float}, - {Val: "val2", Type: influxql.String}, - }) - - opt := query.IteratorOptions{ - Ascending: true, - Aux: []influxql.VarRef{ - {Val: "val1", Type: influxql.Float}, - {Val: "val2", Type: influxql.String}, - }, - Dimensions: []string{"host"}, - } - itr := query.NewIteratorMapper(cur, nil, []query.IteratorMap{ - query.FieldMap{Index: 0}, - query.FieldMap{Index: 1}, - query.TagMap("host"), - }, opt) - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Aux: []interface{}{float64(1), "a", "A"}}}, - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 5, Aux: []interface{}{float64(3), "c", "A"}}}, - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 2, Aux: []interface{}{float64(2), "b", "B"}}}, - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 8, Aux: []interface{}{float64(8), "h", "B"}}}, - }) { - t.Errorf("unexpected points: %s", spew.Sdump(a)) - } -} diff --git a/influxql/query/iterator_test.go b/influxql/query/iterator_test.go deleted file mode 100644 index 23f2adf69d8..00000000000 --- a/influxql/query/iterator_test.go +++ /dev/null @@ -1,1897 +0,0 @@ -package query_test - -import ( - "bytes" - "context" - "fmt" - "reflect" - "strings" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/pkg/deep" - "github.com/influxdata/influxql" -) - -// Ensure that a set of iterators can be merged together, sorted by window and name/tag. -func TestMergeIterator_Float(t *testing.T) { - inputs := []*FloatIterator{ - {Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, - {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}, - }}, - {Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, - {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, - }}, - {Points: []query.FloatPoint{}}, - {Points: []query.FloatPoint{}}, - } - - itr := query.NewMergeIterator(FloatIterators(inputs), query.IteratorOptions{ - Interval: query.Interval{ - Duration: 10 * time.Nanosecond, - }, - Dimensions: []string{"host"}, - Ascending: true, - }) - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, - {&query.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, - {&query.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}}, - }) { - t.Errorf("unexpected points: %s", spew.Sdump(a)) - } - - for i, input := range inputs { - if !input.Closed { - t.Errorf("iterator %d not closed", i) - } - } -} - -// Ensure that a set of iterators can be merged together, sorted by window and name/tag. -func TestMergeIterator_Integer(t *testing.T) { - inputs := []*IntegerIterator{ - {Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, - {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}, - }}, - {Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, - {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, - }}, - {Points: []query.IntegerPoint{}}, - } - itr := query.NewMergeIterator(IntegerIterators(inputs), query.IteratorOptions{ - Interval: query.Interval{ - Duration: 10 * time.Nanosecond, - }, - Dimensions: []string{"host"}, - Ascending: true, - }) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, - {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, - {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, - {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, - {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, - {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, - {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, - {&query.IntegerPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, - {&query.IntegerPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}}, - }) { - t.Errorf("unexpected points: %s", spew.Sdump(a)) - } - - for i, input := range inputs { - if !input.Closed { - t.Errorf("iterator %d not closed", i) - } - } -} - -// Ensure that a set of iterators can be merged together, sorted by window and name/tag. -func TestMergeIterator_Unsigned(t *testing.T) { - inputs := []*UnsignedIterator{ - {Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, - {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}, - }}, - {Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, - {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, - }}, - {Points: []query.UnsignedPoint{}}, - } - itr := query.NewMergeIterator(UnsignedIterators(inputs), query.IteratorOptions{ - Interval: query.Interval{ - Duration: 10 * time.Nanosecond, - }, - Dimensions: []string{"host"}, - Ascending: true, - }) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, - {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, - {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, - {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, - {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, - {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, - {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, - {&query.UnsignedPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, - {&query.UnsignedPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}}, - }) { - t.Errorf("unexpected points: %s", spew.Sdump(a)) - } - - for i, input := range inputs { - if !input.Closed { - t.Errorf("iterator %d not closed", i) - } - } -} - -// Ensure that a set of iterators can be merged together, sorted by window and name/tag. -func TestMergeIterator_String(t *testing.T) { - inputs := []*StringIterator{ - {Points: []query.StringPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}, - {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: "h"}, - }}, - {Points: []query.StringPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}, - {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}, - }}, - {Points: []query.StringPoint{}}, - } - itr := query.NewMergeIterator(StringIterators(inputs), query.IteratorOptions{ - Interval: query.Interval{ - Duration: 10 * time.Nanosecond, - }, - Dimensions: []string{"host"}, - Ascending: true, - }) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}}, - {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}}, - {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}}, - {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}}, - {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}}, - {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}}, - {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}}, - {&query.StringPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}}, - {&query.StringPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: "h"}}, - }) { - t.Errorf("unexpected points: %s", spew.Sdump(a)) - } - - for i, input := range inputs { - if !input.Closed { - t.Errorf("iterator %d not closed", i) - } - } -} - -// Ensure that a set of iterators can be merged together, sorted by window and name/tag. -func TestMergeIterator_Boolean(t *testing.T) { - inputs := []*BooleanIterator{ - {Points: []query.BooleanPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}, - {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: true}, - }}, - {Points: []query.BooleanPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}, - {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: false}, - }}, - {Points: []query.BooleanPoint{}}, - } - itr := query.NewMergeIterator(BooleanIterators(inputs), query.IteratorOptions{ - Interval: query.Interval{ - Duration: 10 * time.Nanosecond, - }, - Dimensions: []string{"host"}, - Ascending: true, - }) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}}, - {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}}, - {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}}, - {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}}, - {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}}, - {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}}, - {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}}, - {&query.BooleanPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: false}}, - {&query.BooleanPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: true}}, - }) { - t.Errorf("unexpected points: %s", spew.Sdump(a)) - } - - for i, input := range inputs { - if !input.Closed { - t.Errorf("iterator %d not closed", i) - } - } -} - -func TestMergeIterator_Nil(t *testing.T) { - itr := query.NewMergeIterator([]query.Iterator{nil}, query.IteratorOptions{}) - if itr != nil { - t.Fatalf("unexpected iterator: %#v", itr) - } -} - -// Verifies that coercing will drop values that aren't the primary type. -// It's the responsibility of the engine to return the correct type. If they don't, -// we drop iterators that don't match. -func TestMergeIterator_Coerce_Float(t *testing.T) { - inputs := []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, - {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, - {Name: "mem", Tags: ParseTags("host=B"), Time: 11, Value: 8}, - }}, - } - - itr := query.NewMergeIterator(inputs, query.IteratorOptions{ - Interval: query.Interval{ - Duration: 10 * time.Nanosecond, - }, - Dimensions: []string{"host"}, - Ascending: true, - }) - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, - {&query.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, - }) { - t.Errorf("unexpected points: %s", spew.Sdump(a)) - } - - for i, input := range inputs { - switch input := input.(type) { - case *FloatIterator: - if !input.Closed { - t.Errorf("iterator %d not closed", i) - } - case *IntegerIterator: - if !input.Closed { - t.Errorf("iterator %d not closed", i) - } - case *UnsignedIterator: - if !input.Closed { - t.Errorf("iterator %d not closed", i) - } - } - } -} - -// Ensure that a set of iterators can be merged together, sorted by name/tag. -func TestSortedMergeIterator_Float(t *testing.T) { - inputs := []*FloatIterator{ - {Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, - {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}, - }}, - {Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, - {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, - }}, - {Points: []query.FloatPoint{}}, - } - itr := query.NewSortedMergeIterator(FloatIterators(inputs), query.IteratorOptions{ - Interval: query.Interval{ - Duration: 10 * time.Nanosecond, - }, - Dimensions: []string{"host"}, - Ascending: true, - }) - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, - {&query.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, - {&query.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}}, - }) { - t.Errorf("unexpected points: %s", spew.Sdump(a)) - } - - for i, input := range inputs { - if !input.Closed { - t.Errorf("iterator %d not closed", i) - } - } -} - -// Ensure that a set of iterators can be merged together, sorted by name/tag. -func TestSortedMergeIterator_Integer(t *testing.T) { - inputs := []*IntegerIterator{ - {Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, - {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}, - }}, - {Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, - {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, - }}, - {Points: []query.IntegerPoint{}}, - } - itr := query.NewSortedMergeIterator(IntegerIterators(inputs), query.IteratorOptions{ - Interval: query.Interval{ - Duration: 10 * time.Nanosecond, - }, - Dimensions: []string{"host"}, - Ascending: true, - }) - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, - {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, - {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, - {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, - {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, - {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, - {&query.IntegerPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, - {&query.IntegerPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, - {&query.IntegerPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}}, - }) { - t.Errorf("unexpected points: %s", spew.Sdump(a)) - } - - for i, input := range inputs { - if !input.Closed { - t.Errorf("iterator %d not closed", i) - } - } -} - -// Ensure that a set of iterators can be merged together, sorted by name/tag. -func TestSortedMergeIterator_Unsigned(t *testing.T) { - inputs := []*UnsignedIterator{ - {Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, - {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}, - }}, - {Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, - {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, - }}, - {Points: []query.UnsignedPoint{}}, - } - itr := query.NewSortedMergeIterator(UnsignedIterators(inputs), query.IteratorOptions{ - Interval: query.Interval{ - Duration: 10 * time.Nanosecond, - }, - Dimensions: []string{"host"}, - Ascending: true, - }) - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}}, - {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}}, - {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, - {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}}, - {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}}, - {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, - {&query.UnsignedPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, - {&query.UnsignedPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, - {&query.UnsignedPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}}, - }) { - t.Errorf("unexpected points: %s", spew.Sdump(a)) - } - - for i, input := range inputs { - if !input.Closed { - t.Errorf("iterator %d not closed", i) - } - } -} - -// Ensure that a set of iterators can be merged together, sorted by name/tag. -func TestSortedMergeIterator_String(t *testing.T) { - inputs := []*StringIterator{ - {Points: []query.StringPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}, - {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: "h"}, - }}, - {Points: []query.StringPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}, - {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}, - }}, - {Points: []query.StringPoint{}}, - } - itr := query.NewSortedMergeIterator(StringIterators(inputs), query.IteratorOptions{ - Interval: query.Interval{ - Duration: 10 * time.Nanosecond, - }, - Dimensions: []string{"host"}, - Ascending: true, - }) - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: "a"}}, - {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: "c"}}, - {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: "g"}}, - {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: "d"}}, - {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: "b"}}, - {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: "e"}}, - {&query.StringPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: "f"}}, - {&query.StringPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: "i"}}, - {&query.StringPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: "h"}}, - }) { - t.Errorf("unexpected points: %s", spew.Sdump(a)) - } - - for i, input := range inputs { - if !input.Closed { - t.Errorf("iterator %d not closed", i) - } - } -} - -// Ensure that a set of iterators can be merged together, sorted by name/tag. -func TestSortedMergeIterator_Boolean(t *testing.T) { - inputs := []*BooleanIterator{ - {Points: []query.BooleanPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}, - {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: true}, - }}, - {Points: []query.BooleanPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}, - {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: true}, - }}, - {Points: []query.BooleanPoint{}}, - } - itr := query.NewSortedMergeIterator(BooleanIterators(inputs), query.IteratorOptions{ - Interval: query.Interval{ - Duration: 10 * time.Nanosecond, - }, - Dimensions: []string{"host"}, - Ascending: true, - }) - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: true}}, - {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: true}}, - {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: true}}, - {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: false}}, - {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: false}}, - {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: true}}, - {&query.BooleanPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: false}}, - {&query.BooleanPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: true}}, - {&query.BooleanPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: true}}, - }) { - t.Errorf("unexpected points: %s", spew.Sdump(a)) - } - - for i, input := range inputs { - if !input.Closed { - t.Errorf("iterator %d not closed", i) - } - } -} - -func TestSortedMergeIterator_Nil(t *testing.T) { - itr := query.NewSortedMergeIterator([]query.Iterator{nil}, query.IteratorOptions{}) - if itr != nil { - t.Fatalf("unexpected iterator: %#v", itr) - } -} - -func TestSortedMergeIterator_Coerce_Float(t *testing.T) { - inputs := []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}, - {Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 1}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12, Value: 3}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 30, Value: 4}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 1, Value: 2}, - {Name: "mem", Tags: ParseTags("host=B"), Time: 4, Value: 8}, - }}, - } - - itr := query.NewSortedMergeIterator(inputs, query.IteratorOptions{ - Interval: query.Interval{ - Duration: 10 * time.Nanosecond, - }, - Dimensions: []string{"host"}, - Ascending: true, - }) - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 20, Value: 7}}, - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 11, Value: 5}}, - {&query.FloatPoint{Name: "cpu", Tags: ParseTags("host=B"), Time: 13, Value: 6}}, - {&query.FloatPoint{Name: "mem", Tags: ParseTags("host=A"), Time: 25, Value: 9}}, - }) { - t.Errorf("unexpected points: %s", spew.Sdump(a)) - } - - for i, input := range inputs { - switch input := input.(type) { - case *FloatIterator: - if !input.Closed { - t.Errorf("iterator %d not closed", i) - } - case *IntegerIterator: - if !input.Closed { - t.Errorf("iterator %d not closed", i) - } - case *UnsignedIterator: - if !input.Closed { - t.Errorf("iterator %d not closed", i) - } - } - } -} - -// Ensure limit iterators work with limit and offset. -func TestLimitIterator_Float(t *testing.T) { - input := &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0, Value: 1}, - {Name: "cpu", Time: 5, Value: 3}, - {Name: "cpu", Time: 10, Value: 5}, - {Name: "mem", Time: 5, Value: 3}, - {Name: "mem", Time: 7, Value: 8}, - }} - - itr := query.NewLimitIterator(input, query.IteratorOptions{ - Limit: 1, - Offset: 1, - }) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.FloatPoint{Name: "cpu", Time: 5, Value: 3}}, - {&query.FloatPoint{Name: "mem", Time: 7, Value: 8}}, - }) { - t.Fatalf("unexpected points: %s", spew.Sdump(a)) - } - - if !input.Closed { - t.Error("iterator not closed") - } -} - -// Ensure limit iterators work with limit and offset. -func TestLimitIterator_Integer(t *testing.T) { - input := &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Time: 0, Value: 1}, - {Name: "cpu", Time: 5, Value: 3}, - {Name: "cpu", Time: 10, Value: 5}, - {Name: "mem", Time: 5, Value: 3}, - {Name: "mem", Time: 7, Value: 8}, - }} - - itr := query.NewLimitIterator(input, query.IteratorOptions{ - Limit: 1, - Offset: 1, - }) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.IntegerPoint{Name: "cpu", Time: 5, Value: 3}}, - {&query.IntegerPoint{Name: "mem", Time: 7, Value: 8}}, - }) { - t.Fatalf("unexpected points: %s", spew.Sdump(a)) - } - - if !input.Closed { - t.Error("iterator not closed") - } -} - -// Ensure limit iterators work with limit and offset. -func TestLimitIterator_Unsigned(t *testing.T) { - input := &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Time: 0, Value: 1}, - {Name: "cpu", Time: 5, Value: 3}, - {Name: "cpu", Time: 10, Value: 5}, - {Name: "mem", Time: 5, Value: 3}, - {Name: "mem", Time: 7, Value: 8}, - }} - - itr := query.NewLimitIterator(input, query.IteratorOptions{ - Limit: 1, - Offset: 1, - }) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.UnsignedPoint{Name: "cpu", Time: 5, Value: 3}}, - {&query.UnsignedPoint{Name: "mem", Time: 7, Value: 8}}, - }) { - t.Fatalf("unexpected points: %s", spew.Sdump(a)) - } - - if !input.Closed { - t.Error("iterator not closed") - } -} - -// Ensure limit iterators work with limit and offset. -func TestLimitIterator_String(t *testing.T) { - input := &StringIterator{Points: []query.StringPoint{ - {Name: "cpu", Time: 0, Value: "a"}, - {Name: "cpu", Time: 5, Value: "b"}, - {Name: "cpu", Time: 10, Value: "c"}, - {Name: "mem", Time: 5, Value: "d"}, - {Name: "mem", Time: 7, Value: "e"}, - }} - - itr := query.NewLimitIterator(input, query.IteratorOptions{ - Limit: 1, - Offset: 1, - }) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.StringPoint{Name: "cpu", Time: 5, Value: "b"}}, - {&query.StringPoint{Name: "mem", Time: 7, Value: "e"}}, - }) { - t.Fatalf("unexpected points: %s", spew.Sdump(a)) - } - - if !input.Closed { - t.Error("iterator not closed") - } -} - -// Ensure limit iterators work with limit and offset. -func TestLimitIterator_Boolean(t *testing.T) { - input := &BooleanIterator{Points: []query.BooleanPoint{ - {Name: "cpu", Time: 0, Value: true}, - {Name: "cpu", Time: 5, Value: false}, - {Name: "cpu", Time: 10, Value: true}, - {Name: "mem", Time: 5, Value: false}, - {Name: "mem", Time: 7, Value: true}, - }} - - itr := query.NewLimitIterator(input, query.IteratorOptions{ - Limit: 1, - Offset: 1, - }) - - if a, err := Iterators([]query.Iterator{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.BooleanPoint{Name: "cpu", Time: 5, Value: false}}, - {&query.BooleanPoint{Name: "mem", Time: 7, Value: true}}, - }) { - t.Fatalf("unexpected points: %s", spew.Sdump(a)) - } - - if !input.Closed { - t.Error("iterator not closed") - } -} - -// Ensure limit iterator returns a subset of points. -func TestLimitIterator(t *testing.T) { - itr := query.NewLimitIterator( - &FloatIterator{Points: []query.FloatPoint{ - {Time: 0, Value: 0}, - {Time: 1, Value: 1}, - {Time: 2, Value: 2}, - {Time: 3, Value: 3}, - }}, - query.IteratorOptions{ - Limit: 2, - Offset: 1, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - }, - ) - - if a, err := (Iterators{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.FloatPoint{Time: 1, Value: 1}}, - {&query.FloatPoint{Time: 2, Value: 2}}, - }) { - t.Fatalf("unexpected points: %s", spew.Sdump(a)) - } -} - -func TestFillIterator_ImplicitStartTime(t *testing.T) { - opt := query.IteratorOptions{ - StartTime: influxql.MinTime, - EndTime: mustParseTime("2000-01-01T01:00:00Z").UnixNano() - 1, - Interval: query.Interval{ - Duration: 20 * time.Minute, - }, - Ascending: true, - } - start := mustParseTime("2000-01-01T00:00:00Z").UnixNano() - itr := query.NewFillIterator( - &FloatIterator{Points: []query.FloatPoint{ - {Time: start, Value: 0}, - }}, - nil, - opt, - ) - - if a, err := (Iterators{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.FloatPoint{Time: start, Value: 0}}, - {&query.FloatPoint{Time: start + int64(20*time.Minute), Nil: true}}, - {&query.FloatPoint{Time: start + int64(40*time.Minute), Nil: true}}, - }) { - t.Fatalf("unexpected points: %s", spew.Sdump(a)) - } -} - -// A count() GROUP BY query with an offset that caused an interval -// to cross a daylight savings change inserted an extra output row -// off by one hour in a grouped count() expression. -// https://github.com/influxdata/influxdb/issues/20238 - -func TestGroupByIterator_DST(t *testing.T) { - inputIter := &IntegerIterator{ - Points: []query.IntegerPoint{ - {Name: "a", Tags: ParseTags("t=A"), Time: 1584345600000000000, Value: 1}, - {Name: "a", Tags: ParseTags("t=A"), Time: 1584432000000000000, Value: 2}, - {Name: "a", Tags: ParseTags("t=A"), Time: 1584518400000000000, Value: 3}, - {Name: "a", Tags: ParseTags("t=A"), Time: 1585555200000000000, Value: 4}, - }, - } - const location = "Europe/Rome" - loc, err := time.LoadLocation(location) - if err != nil { - t.Fatalf("Cannot find timezone for %s: %s", location, err) - } - opt := query.IteratorOptions{ - StartTime: mustParseTime("2020-03-15T00:00:00Z").UnixNano(), - EndTime: mustParseTime("2020-04-01T00:00:00Z").UnixNano(), - Ascending: true, - Ordered: true, - StripName: false, - Fill: influxql.NullFill, - FillValue: nil, - Dedupe: false, - Interval: query.Interval{ - Duration: 7 * 24 * time.Hour, - Offset: 4 * 24 * time.Hour, - }, - Expr: MustParseExpr("count(Value)"), - Location: loc, - } - - groupByIter, err := query.NewCallIterator(inputIter, opt) - if err != nil { - t.Fatalf("Cannot create Count and Group By iterator: %s", err) - } else { - groupByIter = query.NewFillIterator(groupByIter, MustParseExpr("count(Value)"), opt) - } - - if a, err := (Iterators{groupByIter}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, [][]query.Point{ - {&query.IntegerPoint{Name: "a", Aggregated: 0, Time: mustParseTime("2020-03-09T00:00:00+01:00").UnixNano(), Value: 0}}, - {&query.IntegerPoint{Name: "a", Aggregated: 3, Time: mustParseTime("2020-03-16T00:00:00+01:00").UnixNano(), Value: 3}}, - {&query.IntegerPoint{Name: "a", Aggregated: 0, Time: mustParseTime("2020-03-23T00:00:00+01:00").UnixNano(), Value: 0}}, - {&query.IntegerPoint{Name: "a", Aggregated: 1, Time: mustParseTime("2020-03-30T00:00:00+02:00").UnixNano(), Value: 1}}, - }) { - t.Fatalf("unexpected points: %s", spew.Sdump(a)) - } -} - -func TestFillIterator_DST(t *testing.T) { - for _, tt := range []struct { - name string - start, end time.Time - points []time.Duration - opt query.IteratorOptions - }{ - { - name: "Start_GroupByDay_Ascending", - start: mustParseTime("2000-04-01T00:00:00-08:00"), - end: mustParseTime("2000-04-05T00:00:00-07:00"), - points: []time.Duration{ - 24 * time.Hour, - 47 * time.Hour, - 71 * time.Hour, - }, - opt: query.IteratorOptions{ - Interval: query.Interval{ - Duration: 24 * time.Hour, - }, - Location: LosAngeles, - Ascending: true, - }, - }, - { - name: "Start_GroupByDay_Descending", - start: mustParseTime("2000-04-01T00:00:00-08:00"), - end: mustParseTime("2000-04-05T00:00:00-07:00"), - points: []time.Duration{ - 71 * time.Hour, - 47 * time.Hour, - 24 * time.Hour, - }, - opt: query.IteratorOptions{ - Interval: query.Interval{ - Duration: 24 * time.Hour, - }, - Location: LosAngeles, - Ascending: false, - }, - }, - { - name: "Start_GroupByHour_Ascending", - start: mustParseTime("2000-04-02T00:00:00-08:00"), - end: mustParseTime("2000-04-02T05:00:00-07:00"), - points: []time.Duration{ - 1 * time.Hour, - 2 * time.Hour, - 3 * time.Hour, - }, - opt: query.IteratorOptions{ - Interval: query.Interval{ - Duration: 1 * time.Hour, - }, - Location: LosAngeles, - Ascending: true, - }, - }, - { - name: "Start_GroupByHour_Descending", - start: mustParseTime("2000-04-02T00:00:00-08:00"), - end: mustParseTime("2000-04-02T05:00:00-07:00"), - points: []time.Duration{ - 3 * time.Hour, - 2 * time.Hour, - 1 * time.Hour, - }, - opt: query.IteratorOptions{ - Interval: query.Interval{ - Duration: 1 * time.Hour, - }, - Location: LosAngeles, - Ascending: false, - }, - }, - { - name: "Start_GroupBy2Hour_Ascending", - start: mustParseTime("2000-04-02T00:00:00-08:00"), - end: mustParseTime("2000-04-02T07:00:00-07:00"), - points: []time.Duration{ - 2 * time.Hour, - 3 * time.Hour, - 5 * time.Hour, - }, - opt: query.IteratorOptions{ - Interval: query.Interval{ - Duration: 2 * time.Hour, - }, - Location: LosAngeles, - Ascending: true, - }, - }, - { - name: "Start_GroupBy2Hour_Descending", - start: mustParseTime("2000-04-02T00:00:00-08:00"), - end: mustParseTime("2000-04-02T07:00:00-07:00"), - points: []time.Duration{ - 5 * time.Hour, - 3 * time.Hour, - 2 * time.Hour, - }, - opt: query.IteratorOptions{ - Interval: query.Interval{ - Duration: 2 * time.Hour, - }, - Location: LosAngeles, - Ascending: false, - }, - }, - { - name: "End_GroupByDay_Ascending", - start: mustParseTime("2000-10-28T00:00:00-07:00"), - end: mustParseTime("2000-11-01T00:00:00-08:00"), - points: []time.Duration{ - 24 * time.Hour, - 49 * time.Hour, - 73 * time.Hour, - }, - opt: query.IteratorOptions{ - Interval: query.Interval{ - Duration: 24 * time.Hour, - }, - Location: LosAngeles, - Ascending: true, - }, - }, - { - name: "End_GroupByDay_Descending", - start: mustParseTime("2000-10-28T00:00:00-07:00"), - end: mustParseTime("2000-11-01T00:00:00-08:00"), - points: []time.Duration{ - 73 * time.Hour, - 49 * time.Hour, - 24 * time.Hour, - }, - opt: query.IteratorOptions{ - Interval: query.Interval{ - Duration: 24 * time.Hour, - }, - Location: LosAngeles, - Ascending: false, - }, - }, - { - name: "End_GroupByHour_Ascending", - start: mustParseTime("2000-10-29T00:00:00-07:00"), - end: mustParseTime("2000-10-29T03:00:00-08:00"), - points: []time.Duration{ - 1 * time.Hour, - 2 * time.Hour, - 3 * time.Hour, - }, - opt: query.IteratorOptions{ - Interval: query.Interval{ - Duration: 1 * time.Hour, - }, - Location: LosAngeles, - Ascending: true, - }, - }, - { - name: "End_GroupByHour_Descending", - start: mustParseTime("2000-10-29T00:00:00-07:00"), - end: mustParseTime("2000-10-29T03:00:00-08:00"), - points: []time.Duration{ - 3 * time.Hour, - 2 * time.Hour, - 1 * time.Hour, - }, - opt: query.IteratorOptions{ - Interval: query.Interval{ - Duration: 1 * time.Hour, - }, - Location: LosAngeles, - Ascending: false, - }, - }, - } { - t.Run(tt.name, func(t *testing.T) { - opt := tt.opt - opt.StartTime = tt.start.UnixNano() - opt.EndTime = tt.end.UnixNano() - 1 - - points := make([][]query.Point, 0, len(tt.points)+1) - if opt.Ascending { - points = append(points, []query.Point{ - &query.FloatPoint{ - Time: tt.start.UnixNano(), - }, - }) - } - for _, d := range tt.points { - points = append(points, []query.Point{ - &query.FloatPoint{ - Time: tt.start.Add(d).UnixNano(), - Nil: true, - }, - }) - } - if !opt.Ascending { - points = append(points, []query.Point{ - &query.FloatPoint{ - Time: tt.start.UnixNano(), - }, - }) - } - itr := query.NewFillIterator( - &FloatIterator{Points: []query.FloatPoint{{Time: tt.start.UnixNano(), Value: 0}}}, - nil, - opt, - ) - - if a, err := (Iterators{itr}).ReadAll(); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if !deep.Equal(a, points) { - t.Fatalf("unexpected points: %s", spew.Sdump(a)) - } - }) - } -} - -// Iterators is a test wrapper for iterators. -type Iterators []query.Iterator - -// Next returns the next value from each iterator. -// Returns nil if any iterator returns a nil. -func (itrs Iterators) Next() ([]query.Point, error) { - a := make([]query.Point, len(itrs)) - for i, itr := range itrs { - switch itr := itr.(type) { - case query.FloatIterator: - fp, err := itr.Next() - if fp == nil || err != nil { - return nil, err - } - a[i] = fp - case query.IntegerIterator: - ip, err := itr.Next() - if ip == nil || err != nil { - return nil, err - } - a[i] = ip - case query.UnsignedIterator: - up, err := itr.Next() - if up == nil || err != nil { - return nil, err - } - a[i] = up - case query.StringIterator: - sp, err := itr.Next() - if sp == nil || err != nil { - return nil, err - } - a[i] = sp - case query.BooleanIterator: - bp, err := itr.Next() - if bp == nil || err != nil { - return nil, err - } - a[i] = bp - default: - panic(fmt.Sprintf("iterator type not supported: %T", itr)) - } - } - return a, nil -} - -// ReadAll reads all points from all iterators. -func (itrs Iterators) ReadAll() ([][]query.Point, error) { - var a [][]query.Point - - // Read from every iterator until a nil is encountered. - for { - points, err := itrs.Next() - if err != nil { - return nil, err - } else if points == nil { - break - } - a = append(a, query.Points(points).Clone()) - } - - // Close all iterators. - query.Iterators(itrs).Close() - - return a, nil -} - -func TestIteratorOptions_Window_Interval(t *testing.T) { - opt := query.IteratorOptions{ - Interval: query.Interval{ - Duration: 10, - }, - } - - start, end := opt.Window(4) - if start != 0 { - t.Errorf("expected start to be 0, got %d", start) - } - if end != 10 { - t.Errorf("expected end to be 10, got %d", end) - } -} - -func TestIteratorOptions_Window_Offset(t *testing.T) { - opt := query.IteratorOptions{ - Interval: query.Interval{ - Duration: 10, - Offset: 8, - }, - } - - start, end := opt.Window(14) - if start != 8 { - t.Errorf("expected start to be 8, got %d", start) - } - if end != 18 { - t.Errorf("expected end to be 18, got %d", end) - } -} - -func TestIteratorOptions_Window_Default(t *testing.T) { - opt := query.IteratorOptions{ - StartTime: 0, - EndTime: 60, - } - - start, end := opt.Window(34) - if start != 0 { - t.Errorf("expected start to be 0, got %d", start) - } - if end != 61 { - t.Errorf("expected end to be 61, got %d", end) - } -} - -func TestIteratorOptions_Window_Location(t *testing.T) { - for _, tt := range []struct { - now time.Time - start, end time.Time - interval time.Duration - }{ - { - now: mustParseTime("2000-04-02T12:14:15-07:00"), - start: mustParseTime("2000-04-02T00:00:00-08:00"), - end: mustParseTime("2000-04-03T00:00:00-07:00"), - interval: 24 * time.Hour, - }, - { - now: mustParseTime("2000-04-02T01:17:12-08:00"), - start: mustParseTime("2000-04-02T00:00:00-08:00"), - end: mustParseTime("2000-04-03T00:00:00-07:00"), - interval: 24 * time.Hour, - }, - { - now: mustParseTime("2000-04-02T01:14:15-08:00"), - start: mustParseTime("2000-04-02T00:00:00-08:00"), - end: mustParseTime("2000-04-02T03:00:00-07:00"), - interval: 2 * time.Hour, - }, - { - now: mustParseTime("2000-04-02T03:17:12-07:00"), - start: mustParseTime("2000-04-02T03:00:00-07:00"), - end: mustParseTime("2000-04-02T04:00:00-07:00"), - interval: 2 * time.Hour, - }, - { - now: mustParseTime("2000-04-02T01:14:15-08:00"), - start: mustParseTime("2000-04-02T01:00:00-08:00"), - end: mustParseTime("2000-04-02T03:00:00-07:00"), - interval: 1 * time.Hour, - }, - { - now: mustParseTime("2000-04-02T03:17:12-07:00"), - start: mustParseTime("2000-04-02T03:00:00-07:00"), - end: mustParseTime("2000-04-02T04:00:00-07:00"), - interval: 1 * time.Hour, - }, - { - now: mustParseTime("2000-10-29T12:14:15-08:00"), - start: mustParseTime("2000-10-29T00:00:00-07:00"), - end: mustParseTime("2000-10-30T00:00:00-08:00"), - interval: 24 * time.Hour, - }, - { - now: mustParseTime("2000-10-29T01:17:12-07:00"), - start: mustParseTime("2000-10-29T00:00:00-07:00"), - end: mustParseTime("2000-10-30T00:00:00-08:00"), - interval: 24 * time.Hour, - }, - { - now: mustParseTime("2000-10-29T01:14:15-07:00"), - start: mustParseTime("2000-10-29T00:00:00-07:00"), - end: mustParseTime("2000-10-29T02:00:00-08:00"), - interval: 2 * time.Hour, - }, - { - now: mustParseTime("2000-10-29T03:17:12-08:00"), - start: mustParseTime("2000-10-29T02:00:00-08:00"), - end: mustParseTime("2000-10-29T04:00:00-08:00"), - interval: 2 * time.Hour, - }, - { - now: mustParseTime("2000-10-29T01:14:15-07:00"), - start: mustParseTime("2000-10-29T01:00:00-07:00"), - end: mustParseTime("2000-10-29T01:00:00-08:00"), - interval: 1 * time.Hour, - }, - { - now: mustParseTime("2000-10-29T02:17:12-07:00"), - start: mustParseTime("2000-10-29T02:00:00-07:00"), - end: mustParseTime("2000-10-29T03:00:00-07:00"), - interval: 1 * time.Hour, - }, - } { - t.Run(fmt.Sprintf("%s/%s", tt.now, tt.interval), func(t *testing.T) { - opt := query.IteratorOptions{ - Location: LosAngeles, - Interval: query.Interval{ - Duration: tt.interval, - }, - } - start, end := opt.Window(tt.now.UnixNano()) - if have, want := time.Unix(0, start).In(LosAngeles), tt.start; !have.Equal(want) { - t.Errorf("unexpected start time: %s != %s", have, want) - } - if have, want := time.Unix(0, end).In(LosAngeles), tt.end; !have.Equal(want) { - t.Errorf("unexpected end time: %s != %s", have, want) - } - }) - } -} - -func TestIteratorOptions_Window_MinTime(t *testing.T) { - opt := query.IteratorOptions{ - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - Interval: query.Interval{ - Duration: time.Hour, - }, - } - expected := time.Unix(0, influxql.MinTime).Add(time.Hour).Truncate(time.Hour) - - start, end := opt.Window(influxql.MinTime) - if start != influxql.MinTime { - t.Errorf("expected start to be %d, got %d", influxql.MinTime, start) - } - if have, want := end, expected.UnixNano(); have != want { - t.Errorf("expected end to be %d, got %d", want, have) - } -} - -func TestIteratorOptions_Window_MaxTime(t *testing.T) { - opt := query.IteratorOptions{ - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - Interval: query.Interval{ - Duration: time.Hour, - }, - } - expected := time.Unix(0, influxql.MaxTime).Truncate(time.Hour) - - start, end := opt.Window(influxql.MaxTime) - if have, want := start, expected.UnixNano(); have != want { - t.Errorf("expected start to be %d, got %d", want, have) - } - if end != influxql.MaxTime { - t.Errorf("expected end to be %d, got %d", influxql.MaxTime, end) - } -} - -func TestIteratorOptions_SeekTime_Ascending(t *testing.T) { - opt := query.IteratorOptions{ - StartTime: 30, - EndTime: 60, - Ascending: true, - } - - time := opt.SeekTime() - if time != 30 { - t.Errorf("expected time to be 30, got %d", time) - } -} - -func TestIteratorOptions_SeekTime_Descending(t *testing.T) { - opt := query.IteratorOptions{ - StartTime: 30, - EndTime: 60, - Ascending: false, - } - - time := opt.SeekTime() - if time != 60 { - t.Errorf("expected time to be 60, got %d", time) - } -} - -func TestIteratorOptions_DerivativeInterval_Default(t *testing.T) { - opt := query.IteratorOptions{} - expected := query.Interval{Duration: time.Second} - actual := opt.DerivativeInterval() - if actual != expected { - t.Errorf("expected derivative interval to be %v, got %v", expected, actual) - } -} - -func TestIteratorOptions_DerivativeInterval_GroupBy(t *testing.T) { - opt := query.IteratorOptions{ - Interval: query.Interval{ - Duration: 10, - Offset: 2, - }, - } - expected := query.Interval{Duration: 10} - actual := opt.DerivativeInterval() - if actual != expected { - t.Errorf("expected derivative interval to be %v, got %v", expected, actual) - } -} - -func TestIteratorOptions_DerivativeInterval_Call(t *testing.T) { - opt := query.IteratorOptions{ - Expr: &influxql.Call{ - Name: "mean", - Args: []influxql.Expr{ - &influxql.VarRef{Val: "value"}, - &influxql.DurationLiteral{Val: 2 * time.Second}, - }, - }, - Interval: query.Interval{ - Duration: 10, - Offset: 2, - }, - } - expected := query.Interval{Duration: 2 * time.Second} - actual := opt.DerivativeInterval() - if actual != expected { - t.Errorf("expected derivative interval to be %v, got %v", expected, actual) - } -} - -func TestIteratorOptions_ElapsedInterval_Default(t *testing.T) { - opt := query.IteratorOptions{} - expected := query.Interval{Duration: time.Nanosecond} - actual := opt.ElapsedInterval() - if actual != expected { - t.Errorf("expected elapsed interval to be %v, got %v", expected, actual) - } -} - -func TestIteratorOptions_ElapsedInterval_GroupBy(t *testing.T) { - opt := query.IteratorOptions{ - Interval: query.Interval{ - Duration: 10, - Offset: 2, - }, - } - expected := query.Interval{Duration: time.Nanosecond} - actual := opt.ElapsedInterval() - if actual != expected { - t.Errorf("expected elapsed interval to be %v, got %v", expected, actual) - } -} - -func TestIteratorOptions_ElapsedInterval_Call(t *testing.T) { - opt := query.IteratorOptions{ - Expr: &influxql.Call{ - Name: "mean", - Args: []influxql.Expr{ - &influxql.VarRef{Val: "value"}, - &influxql.DurationLiteral{Val: 2 * time.Second}, - }, - }, - Interval: query.Interval{ - Duration: 10, - Offset: 2, - }, - } - expected := query.Interval{Duration: 2 * time.Second} - actual := opt.ElapsedInterval() - if actual != expected { - t.Errorf("expected elapsed interval to be %v, got %v", expected, actual) - } -} - -func TestIteratorOptions_IntegralInterval_Default(t *testing.T) { - opt := query.IteratorOptions{} - expected := query.Interval{Duration: time.Second} - actual := opt.IntegralInterval() - if actual != expected { - t.Errorf("expected default integral interval to be %v, got %v", expected, actual) - } -} - -// Ensure iterator options can be marshaled to and from a binary format. -func TestIteratorOptions_MarshalBinary(t *testing.T) { - opt := &query.IteratorOptions{ - Expr: MustParseExpr("count(value)"), - Aux: []influxql.VarRef{{Val: "a"}, {Val: "b"}, {Val: "c"}}, - Interval: query.Interval{ - Duration: 1 * time.Hour, - Offset: 20 * time.Minute, - }, - Dimensions: []string{"region", "host"}, - GroupBy: map[string]struct{}{ - "region": {}, - "host": {}, - "cluster": {}, - }, - Fill: influxql.NumberFill, - FillValue: float64(100), - Condition: MustParseExpr(`foo = 'bar'`), - StartTime: 1000, - EndTime: 2000, - Ascending: true, - Limit: 100, - Offset: 200, - SLimit: 300, - SOffset: 400, - StripName: true, - Dedupe: true, - } - - // Marshal to binary. - buf, err := opt.MarshalBinary() - if err != nil { - t.Fatal(err) - } - - // Unmarshal back to an object. - var other query.IteratorOptions - if err := other.UnmarshalBinary(buf); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(&other, opt) { - t.Fatalf("unexpected options: %s", spew.Sdump(other)) - } -} - -// Ensure iterator can be encoded and decoded over a byte stream. -func TestIterator_EncodeDecode(t *testing.T) { - var buf bytes.Buffer - - // Create an iterator with several points & stats. - itr := &FloatIterator{ - Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 0}, - {Name: "mem", Tags: ParseTags("host=B"), Time: 1, Value: 10}, - }, - stats: query.IteratorStats{ - SeriesN: 2, - PointN: 0, - }, - } - - // Encode to the buffer. - enc := query.NewIteratorEncoder(&buf) - enc.StatsInterval = 100 * time.Millisecond - if err := enc.EncodeIterator(itr); err != nil { - t.Fatal(err) - } - - // Decode from the buffer. - dec := query.NewReaderIterator(context.Background(), &buf, influxql.Float, itr.Stats()) - - // Initial stats should exist immediately. - fdec := dec.(query.FloatIterator) - if stats := fdec.Stats(); !reflect.DeepEqual(stats, query.IteratorStats{SeriesN: 2, PointN: 0}) { - t.Fatalf("unexpected stats(initial): %#v", stats) - } - - // Read both points. - if p, err := fdec.Next(); err != nil { - t.Fatalf("unexpected error(0): %#v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 0, Value: 0}) { - t.Fatalf("unexpected point(0); %#v", p) - } - if p, err := fdec.Next(); err != nil { - t.Fatalf("unexpected error(1): %#v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "mem", Tags: ParseTags("host=B"), Time: 1, Value: 10}) { - t.Fatalf("unexpected point(1); %#v", p) - } - if p, err := fdec.Next(); err != nil { - t.Fatalf("unexpected error(eof): %#v", err) - } else if p != nil { - t.Fatalf("unexpected point(eof); %#v", p) - } -} - -// Test implementation of query.IntegerIterator -type IntegerConstIterator struct { - numPoints int - Closed bool - stats query.IteratorStats - point query.IntegerPoint -} - -func BenchmarkIterator_Aggregator(b *testing.B) { - input := &IntegerConstIterator{ - numPoints: b.N, - Closed: false, - stats: query.IteratorStats{}, - point: query.IntegerPoint{ - Name: "constPoint", - Value: 1, - }, - } - opt := query.IteratorOptions{ - Interval: query.Interval{ - Duration: 100 * time.Minute, - }, - Expr: &influxql.Call{ - Name: "count", - }, - } - - counter, err := query.NewCallIterator(input, opt) - if err != nil { - b.Fatalf("Bad counter: %v", err) - } - - b.ResetTimer() - point, err := counter.(query.IntegerIterator).Next() - if err != nil { - b.Fatalf("Unexpected error %v", err) - } - if point == nil { - b.Fatal("Expected point not to be nil") - } - if point.Value != int64(b.N) { - b.Fatalf("Expected %v != %v points", b.N, point.Value) - } -} - -func (itr *IntegerConstIterator) Stats() query.IteratorStats { return itr.stats } -func (itr *IntegerConstIterator) Close() error { itr.Closed = true; return nil } - -// Next returns the next value and shifts it off the beginning of the points slice. -func (itr *IntegerConstIterator) Next() (*query.IntegerPoint, error) { - if itr.numPoints == 0 || itr.Closed { - return nil, nil - } - itr.numPoints-- - itr.point.Time++ - return &itr.point, nil -} - -// Test implementation of influxql.FloatIterator -type FloatIterator struct { - Context context.Context - Points []query.FloatPoint - Closed bool - Delay time.Duration - stats query.IteratorStats - point query.FloatPoint -} - -func (itr *FloatIterator) Stats() query.IteratorStats { return itr.stats } -func (itr *FloatIterator) Close() error { itr.Closed = true; return nil } - -// Next returns the next value and shifts it off the beginning of the points slice. -func (itr *FloatIterator) Next() (*query.FloatPoint, error) { - if len(itr.Points) == 0 || itr.Closed { - return nil, nil - } - - // If we have asked for a delay, then delay the returning of the point - // until either an (optional) context is done or the time has passed. - if itr.Delay > 0 { - var done <-chan struct{} - if itr.Context != nil { - done = itr.Context.Done() - } - - timer := time.NewTimer(itr.Delay) - select { - case <-timer.C: - case <-done: - timer.Stop() - return nil, itr.Context.Err() - } - } - v := &itr.Points[0] - itr.Points = itr.Points[1:] - - // Copy the returned point into a static point that we return. - // This actual storage engine returns a point from the same memory location - // so we need to test that the query engine does not misuse this memory. - itr.point.Name = v.Name - itr.point.Tags = v.Tags - itr.point.Time = v.Time - itr.point.Value = v.Value - itr.point.Nil = v.Nil - if len(itr.point.Aux) != len(v.Aux) { - itr.point.Aux = make([]interface{}, len(v.Aux)) - } - copy(itr.point.Aux, v.Aux) - return &itr.point, nil -} - -func FloatIterators(inputs []*FloatIterator) []query.Iterator { - itrs := make([]query.Iterator, len(inputs)) - for i := range itrs { - itrs[i] = query.Iterator(inputs[i]) - } - return itrs -} - -// Test implementation of query.IntegerIterator -type IntegerIterator struct { - Points []query.IntegerPoint - Closed bool - stats query.IteratorStats - point query.IntegerPoint -} - -func (itr *IntegerIterator) Stats() query.IteratorStats { return itr.stats } -func (itr *IntegerIterator) Close() error { itr.Closed = true; return nil } - -// Next returns the next value and shifts it off the beginning of the points slice. -func (itr *IntegerIterator) Next() (*query.IntegerPoint, error) { - if len(itr.Points) == 0 || itr.Closed { - return nil, nil - } - - v := &itr.Points[0] - itr.Points = itr.Points[1:] - - // Copy the returned point into a static point that we return. - // This actual storage engine returns a point from the same memory location - // so we need to test that the query engine does not misuse this memory. - itr.point.Name = v.Name - itr.point.Tags = v.Tags - itr.point.Time = v.Time - itr.point.Value = v.Value - itr.point.Nil = v.Nil - if len(itr.point.Aux) != len(v.Aux) { - itr.point.Aux = make([]interface{}, len(v.Aux)) - } - copy(itr.point.Aux, v.Aux) - return &itr.point, nil -} - -func IntegerIterators(inputs []*IntegerIterator) []query.Iterator { - itrs := make([]query.Iterator, len(inputs)) - for i := range itrs { - itrs[i] = query.Iterator(inputs[i]) - } - return itrs -} - -// Test implementation of query.UnsignedIterator -type UnsignedIterator struct { - Points []query.UnsignedPoint - Closed bool - stats query.IteratorStats - point query.UnsignedPoint -} - -func (itr *UnsignedIterator) Stats() query.IteratorStats { return itr.stats } -func (itr *UnsignedIterator) Close() error { itr.Closed = true; return nil } - -// Next returns the next value and shifts it off the beginning of the points slice. -func (itr *UnsignedIterator) Next() (*query.UnsignedPoint, error) { - if len(itr.Points) == 0 || itr.Closed { - return nil, nil - } - - v := &itr.Points[0] - itr.Points = itr.Points[1:] - - // Copy the returned point into a static point that we return. - // This actual storage engine returns a point from the same memory location - // so we need to test that the query engine does not misuse this memory. - itr.point.Name = v.Name - itr.point.Tags = v.Tags - itr.point.Time = v.Time - itr.point.Value = v.Value - itr.point.Nil = v.Nil - if len(itr.point.Aux) != len(v.Aux) { - itr.point.Aux = make([]interface{}, len(v.Aux)) - } - copy(itr.point.Aux, v.Aux) - return &itr.point, nil -} - -func UnsignedIterators(inputs []*UnsignedIterator) []query.Iterator { - itrs := make([]query.Iterator, len(inputs)) - for i := range itrs { - itrs[i] = query.Iterator(inputs[i]) - } - return itrs -} - -// Test implementation of query.StringIterator -type StringIterator struct { - Points []query.StringPoint - Closed bool - stats query.IteratorStats - point query.StringPoint -} - -func (itr *StringIterator) Stats() query.IteratorStats { return itr.stats } -func (itr *StringIterator) Close() error { itr.Closed = true; return nil } - -// Next returns the next value and shifts it off the beginning of the points slice. -func (itr *StringIterator) Next() (*query.StringPoint, error) { - if len(itr.Points) == 0 || itr.Closed { - return nil, nil - } - - v := &itr.Points[0] - itr.Points = itr.Points[1:] - - // Copy the returned point into a static point that we return. - // This actual storage engine returns a point from the same memory location - // so we need to test that the query engine does not misuse this memory. - itr.point.Name = v.Name - itr.point.Tags = v.Tags - itr.point.Time = v.Time - itr.point.Value = v.Value - itr.point.Nil = v.Nil - if len(itr.point.Aux) != len(v.Aux) { - itr.point.Aux = make([]interface{}, len(v.Aux)) - } - copy(itr.point.Aux, v.Aux) - return &itr.point, nil -} - -func StringIterators(inputs []*StringIterator) []query.Iterator { - itrs := make([]query.Iterator, len(inputs)) - for i := range itrs { - itrs[i] = query.Iterator(inputs[i]) - } - return itrs -} - -// Test implementation of query.BooleanIterator -type BooleanIterator struct { - Points []query.BooleanPoint - Closed bool - stats query.IteratorStats - point query.BooleanPoint -} - -func (itr *BooleanIterator) Stats() query.IteratorStats { return itr.stats } -func (itr *BooleanIterator) Close() error { itr.Closed = true; return nil } - -// Next returns the next value and shifts it off the beginning of the points slice. -func (itr *BooleanIterator) Next() (*query.BooleanPoint, error) { - if len(itr.Points) == 0 || itr.Closed { - return nil, nil - } - - v := &itr.Points[0] - itr.Points = itr.Points[1:] - - // Copy the returned point into a static point that we return. - // This actual storage engine returns a point from the same memory location - // so we need to test that the query engine does not misuse this memory. - itr.point.Name = v.Name - itr.point.Tags = v.Tags - itr.point.Time = v.Time - itr.point.Value = v.Value - itr.point.Nil = v.Nil - if len(itr.point.Aux) != len(v.Aux) { - itr.point.Aux = make([]interface{}, len(v.Aux)) - } - copy(itr.point.Aux, v.Aux) - return &itr.point, nil -} - -func BooleanIterators(inputs []*BooleanIterator) []query.Iterator { - itrs := make([]query.Iterator, len(inputs)) - for i := range itrs { - itrs[i] = query.Iterator(inputs[i]) - } - return itrs -} - -// MustParseSelectStatement parses a select statement. Panic on error. -func MustParseSelectStatement(s string) *influxql.SelectStatement { - stmt, err := influxql.NewParser(strings.NewReader(s)).ParseStatement() - if err != nil { - panic(err) - } - return stmt.(*influxql.SelectStatement) -} - -// MustParseExpr parses an expression. Panic on error. -func MustParseExpr(s string) influxql.Expr { - expr, err := influxql.NewParser(strings.NewReader(s)).ParseExpr() - if err != nil { - panic(err) - } - return expr -} - -// mustParseTime parses an IS0-8601 string. Panic on error. -func mustParseTime(s string) time.Time { - t, err := time.Parse(time.RFC3339, s) - if err != nil { - panic(err.Error()) - } - return t -} - -func mustLoadLocation(s string) *time.Location { - l, err := time.LoadLocation(s) - if err != nil { - panic(err) - } - return l -} - -var LosAngeles = mustLoadLocation("America/Los_Angeles") diff --git a/influxql/query/linear.go b/influxql/query/linear.go deleted file mode 100644 index 7dc50dac995..00000000000 --- a/influxql/query/linear.go +++ /dev/null @@ -1,31 +0,0 @@ -package query - -// linearFloat computes the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue) -// and returns the value of the point on the line with time windowTime -// y = mx + b -func linearFloat(windowTime, previousTime, nextTime int64, previousValue, nextValue float64) float64 { - m := (nextValue - previousValue) / float64(nextTime-previousTime) // the slope of the line - x := float64(windowTime - previousTime) // how far into the interval we are - b := previousValue - return m*x + b -} - -// linearInteger computes the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue) -// and returns the value of the point on the line with time windowTime -// y = mx + b -func linearInteger(windowTime, previousTime, nextTime int64, previousValue, nextValue int64) int64 { - m := float64(nextValue-previousValue) / float64(nextTime-previousTime) // the slope of the line - x := float64(windowTime - previousTime) // how far into the interval we are - b := float64(previousValue) - return int64(m*x + b) -} - -// linearInteger computes the slope of the line between the points (previousTime, previousValue) and (nextTime, nextValue) -// and returns the value of the point on the line with time windowTime -// y = mx + b -func linearUnsigned(windowTime, previousTime, nextTime int64, previousValue, nextValue uint64) uint64 { - m := float64(nextValue-previousValue) / float64(nextTime-previousTime) // the slope of the line - x := float64(windowTime - previousTime) // how far into the interval we are - b := float64(previousValue) - return uint64(m*x + b) -} diff --git a/influxql/query/math.go b/influxql/query/math.go deleted file mode 100644 index 0670d8e2774..00000000000 --- a/influxql/query/math.go +++ /dev/null @@ -1,246 +0,0 @@ -package query - -import ( - "fmt" - "math" - - "github.com/influxdata/influxql" -) - -func isMathFunction(call *influxql.Call) bool { - switch call.Name { - case "abs", "sin", "cos", "tan", "asin", "acos", "atan", "atan2", "exp", "log", "ln", "log2", "log10", "sqrt", "pow", "floor", "ceil", "round": - return true - } - return false -} - -type MathTypeMapper struct{} - -func (MathTypeMapper) MapType(measurement *influxql.Measurement, field string) influxql.DataType { - return influxql.Unknown -} - -func (MathTypeMapper) CallType(name string, args []influxql.DataType) (influxql.DataType, error) { - switch name { - case "sin", "cos", "tan", "atan", "exp", "log", "ln", "log2", "log10", "sqrt": - var arg0 influxql.DataType - if len(args) > 0 { - arg0 = args[0] - } - switch arg0 { - case influxql.Float, influxql.Integer, influxql.Unsigned, influxql.Unknown: - return influxql.Float, nil - default: - return influxql.Unknown, fmt.Errorf("invalid argument type for the first argument in %s(): %s", name, arg0) - } - case "asin", "acos": - var arg0 influxql.DataType - if len(args) > 0 { - arg0 = args[0] - } - switch arg0 { - case influxql.Float, influxql.Unknown: - return influxql.Float, nil - default: - return influxql.Unknown, fmt.Errorf("invalid argument type for the first argument in %s(): %s", name, arg0) - } - case "atan2", "pow": - var arg0, arg1 influxql.DataType - if len(args) > 0 { - arg0 = args[0] - } - if len(args) > 1 { - arg1 = args[1] - } - - switch arg0 { - case influxql.Float, influxql.Integer, influxql.Unsigned, influxql.Unknown: - // Pass through to verify the second argument. - default: - return influxql.Unknown, fmt.Errorf("invalid argument type for the first argument in %s(): %s", name, arg0) - } - - switch arg1 { - case influxql.Float, influxql.Integer, influxql.Unsigned, influxql.Unknown: - return influxql.Float, nil - default: - return influxql.Unknown, fmt.Errorf("invalid argument type for the second argument in %s(): %s", name, arg1) - } - case "abs", "floor", "ceil", "round": - var arg0 influxql.DataType - if len(args) > 0 { - arg0 = args[0] - } - switch arg0 { - case influxql.Float, influxql.Integer, influxql.Unsigned, influxql.Unknown: - return args[0], nil - default: - return influxql.Unknown, fmt.Errorf("invalid argument type for the first argument in %s(): %s", name, arg0) - } - } - return influxql.Unknown, nil -} - -type MathValuer struct{} - -var _ influxql.CallValuer = MathValuer{} - -func (MathValuer) Value(key string) (interface{}, bool) { - return nil, false -} - -func (v MathValuer) Call(name string, args []interface{}) (interface{}, bool) { - if len(args) == 1 { - arg0 := args[0] - switch name { - case "abs": - switch arg0 := arg0.(type) { - case float64: - return math.Abs(arg0), true - case int64: - sign := arg0 >> 63 - return (arg0 ^ sign) - sign, true - case uint64: - return arg0, true - default: - return nil, true - } - case "sin": - if arg0, ok := asFloat(arg0); ok { - return math.Sin(arg0), true - } - return nil, true - case "cos": - if arg0, ok := asFloat(arg0); ok { - return math.Cos(arg0), true - } - return nil, true - case "tan": - if arg0, ok := asFloat(arg0); ok { - return math.Tan(arg0), true - } - return nil, true - case "floor": - switch arg0 := arg0.(type) { - case float64: - return math.Floor(arg0), true - case int64, uint64: - return arg0, true - default: - return nil, true - } - case "ceil": - switch arg0 := arg0.(type) { - case float64: - return math.Ceil(arg0), true - case int64, uint64: - return arg0, true - default: - return nil, true - } - case "round": - switch arg0 := arg0.(type) { - case float64: - return round(arg0), true - case int64, uint64: - return arg0, true - default: - return nil, true - } - case "asin": - if arg0, ok := asFloat(arg0); ok { - return math.Asin(arg0), true - } - return nil, true - case "acos": - if arg0, ok := asFloat(arg0); ok { - return math.Acos(arg0), true - } - return nil, true - case "atan": - if arg0, ok := asFloat(arg0); ok { - return math.Atan(arg0), true - } - return nil, true - case "exp": - if arg0, ok := asFloat(arg0); ok { - return math.Exp(arg0), true - } - return nil, true - case "ln": - if arg0, ok := asFloat(arg0); ok { - return math.Log(arg0), true - } - return nil, true - case "log2": - if arg0, ok := asFloat(arg0); ok { - return math.Log2(arg0), true - } - return nil, true - case "log10": - if arg0, ok := asFloat(arg0); ok { - return math.Log10(arg0), true - } - return nil, true - case "sqrt": - if arg0, ok := asFloat(arg0); ok { - return math.Sqrt(arg0), true - } - return nil, true - } - } else if len(args) == 2 { - arg0, arg1 := args[0], args[1] - switch name { - case "atan2": - if arg0, arg1, ok := asFloats(arg0, arg1); ok { - return math.Atan2(arg0, arg1), true - } - return nil, true - case "log": - if arg0, arg1, ok := asFloats(arg0, arg1); ok { - return math.Log(arg0) / math.Log(arg1), true - } - return nil, true - case "pow": - if arg0, arg1, ok := asFloats(arg0, arg1); ok { - return math.Pow(arg0, arg1), true - } - return nil, true - } - } - return nil, false -} - -func asFloat(x interface{}) (float64, bool) { - switch arg0 := x.(type) { - case float64: - return arg0, true - case int64: - return float64(arg0), true - case uint64: - return float64(arg0), true - default: - return 0, false - } -} - -func asFloats(x, y interface{}) (float64, float64, bool) { - arg0, ok := asFloat(x) - if !ok { - return 0, 0, false - } - arg1, ok := asFloat(y) - if !ok { - return 0, 0, false - } - return arg0, arg1, true -} - -func round(x float64) float64 { - t := math.Trunc(x) - if math.Abs(x-t) >= 0.5 { - return t + math.Copysign(1, x) - } - return t -} diff --git a/influxql/query/math_test.go b/influxql/query/math_test.go deleted file mode 100644 index 9e6c5e4b677..00000000000 --- a/influxql/query/math_test.go +++ /dev/null @@ -1,212 +0,0 @@ -package query_test - -import ( - "math" - "testing" - - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxql" -) - -func TestMath_TypeMapper(t *testing.T) { - for _, tt := range []struct { - s string - typ influxql.DataType - err bool - }{ - {s: `abs(f::float)`, typ: influxql.Float}, - {s: `abs(i::integer)`, typ: influxql.Integer}, - {s: `abs(u::unsigned)`, typ: influxql.Unsigned}, - {s: `abs(s::string)`, err: true}, - {s: `abs(b::boolean)`, err: true}, - {s: `sin(f::float)`, typ: influxql.Float}, - {s: `sin(i::integer)`, typ: influxql.Float}, - {s: `sin(u::unsigned)`, typ: influxql.Float}, - {s: `sin(s::string)`, err: true}, - {s: `sin(b::boolean)`, err: true}, - {s: `cos(f::float)`, typ: influxql.Float}, - {s: `cos(i::integer)`, typ: influxql.Float}, - {s: `cos(u::unsigned)`, typ: influxql.Float}, - {s: `cos(s::string)`, err: true}, - {s: `cos(b::boolean)`, err: true}, - {s: `tan(f::float)`, typ: influxql.Float}, - {s: `tan(i::integer)`, typ: influxql.Float}, - {s: `tan(u::unsigned)`, typ: influxql.Float}, - {s: `tan(s::string)`, err: true}, - {s: `tan(b::boolean)`, err: true}, - {s: `asin(f::float)`, typ: influxql.Float}, - {s: `asin(i::integer)`, err: true}, - {s: `asin(u::unsigned)`, err: true}, - {s: `asin(s::string)`, err: true}, - {s: `asin(b::boolean)`, err: true}, - {s: `acos(f::float)`, typ: influxql.Float}, - {s: `acos(i::integer)`, err: true}, - {s: `acos(u::unsigned)`, err: true}, - {s: `acos(s::string)`, err: true}, - {s: `acos(b::boolean)`, err: true}, - {s: `atan(f::float)`, typ: influxql.Float}, - {s: `atan(i::integer)`, typ: influxql.Float}, - {s: `atan(u::unsigned)`, typ: influxql.Float}, - {s: `atan(s::string)`, err: true}, - {s: `atan(b::boolean)`, err: true}, - {s: `atan2(y::float, x::float)`, typ: influxql.Float}, - {s: `atan2(y::integer, x::float)`, typ: influxql.Float}, - {s: `atan2(y::unsigned, x::float)`, typ: influxql.Float}, - {s: `atan2(y::string, x::float)`, err: true}, - {s: `atan2(y::boolean, x::float)`, err: true}, - {s: `atan2(y::float, x::float)`, typ: influxql.Float}, - {s: `atan2(y::float, x::integer)`, typ: influxql.Float}, - {s: `atan2(y::float, x::unsigned)`, typ: influxql.Float}, - {s: `atan2(y::float, x::string)`, err: true}, - {s: `atan2(y::float, x::boolean)`, err: true}, - {s: `exp(f::float)`, typ: influxql.Float}, - {s: `exp(i::integer)`, typ: influxql.Float}, - {s: `exp(u::unsigned)`, typ: influxql.Float}, - {s: `exp(s::string)`, err: true}, - {s: `exp(b::boolean)`, err: true}, - {s: `log(f::float)`, typ: influxql.Float}, - {s: `log(i::integer)`, typ: influxql.Float}, - {s: `log(u::unsigned)`, typ: influxql.Float}, - {s: `log(s::string)`, err: true}, - {s: `log(b::boolean)`, err: true}, - {s: `ln(f::float)`, typ: influxql.Float}, - {s: `ln(i::integer)`, typ: influxql.Float}, - {s: `ln(u::unsigned)`, typ: influxql.Float}, - {s: `ln(s::string)`, err: true}, - {s: `ln(b::boolean)`, err: true}, - {s: `log2(f::float)`, typ: influxql.Float}, - {s: `log2(i::integer)`, typ: influxql.Float}, - {s: `log2(u::unsigned)`, typ: influxql.Float}, - {s: `log2(s::string)`, err: true}, - {s: `log2(b::boolean)`, err: true}, - {s: `log10(f::float)`, typ: influxql.Float}, - {s: `log10(i::integer)`, typ: influxql.Float}, - {s: `log10(u::unsigned)`, typ: influxql.Float}, - {s: `log10(s::string)`, err: true}, - {s: `log10(b::boolean)`, err: true}, - {s: `sqrt(f::float)`, typ: influxql.Float}, - {s: `sqrt(i::integer)`, typ: influxql.Float}, - {s: `sqrt(u::unsigned)`, typ: influxql.Float}, - {s: `sqrt(s::string)`, err: true}, - {s: `sqrt(b::boolean)`, err: true}, - {s: `pow(y::float, x::float)`, typ: influxql.Float}, - {s: `pow(y::integer, x::float)`, typ: influxql.Float}, - {s: `pow(y::unsigned, x::float)`, typ: influxql.Float}, - {s: `pow(y::string, x::string)`, err: true}, - {s: `pow(y::boolean, x::boolean)`, err: true}, - {s: `pow(y::float, x::float)`, typ: influxql.Float}, - {s: `pow(y::float, x::integer)`, typ: influxql.Float}, - {s: `pow(y::float, x::unsigned)`, typ: influxql.Float}, - {s: `pow(y::float, x::string)`, err: true}, - {s: `pow(y::float, x::boolean)`, err: true}, - {s: `floor(f::float)`, typ: influxql.Float}, - {s: `floor(i::integer)`, typ: influxql.Integer}, - {s: `floor(u::unsigned)`, typ: influxql.Unsigned}, - {s: `floor(s::string)`, err: true}, - {s: `floor(b::boolean)`, err: true}, - {s: `ceil(f::float)`, typ: influxql.Float}, - {s: `ceil(i::integer)`, typ: influxql.Integer}, - {s: `ceil(u::unsigned)`, typ: influxql.Unsigned}, - {s: `ceil(s::string)`, err: true}, - {s: `ceil(b::boolean)`, err: true}, - {s: `round(f::float)`, typ: influxql.Float}, - {s: `round(i::integer)`, typ: influxql.Integer}, - {s: `round(u::unsigned)`, typ: influxql.Unsigned}, - {s: `round(s::string)`, err: true}, - {s: `round(b::boolean)`, err: true}, - } { - t.Run(tt.s, func(t *testing.T) { - expr := MustParseExpr(tt.s) - - typmap := influxql.TypeValuerEval{ - TypeMapper: query.MathTypeMapper{}, - } - if got, err := typmap.EvalType(expr); err != nil { - if !tt.err { - t.Errorf("unexpected error: %s", err) - } - } else if tt.err { - t.Error("expected error") - } else if want := tt.typ; got != want { - t.Errorf("unexpected type:\n\t-: \"%s\"\n\t+: \"%s\"", want, got) - } - }) - } -} - -func TestMathValuer_Call(t *testing.T) { - type values map[string]interface{} - for _, tt := range []struct { - s string - values values - exp interface{} - }{ - {s: `abs(f)`, values: values{"f": float64(2)}, exp: float64(2)}, - {s: `abs(f)`, values: values{"f": float64(-2)}, exp: float64(2)}, - {s: `abs(i)`, values: values{"i": int64(2)}, exp: int64(2)}, - {s: `abs(i)`, values: values{"i": int64(-2)}, exp: int64(2)}, - {s: `abs(u)`, values: values{"u": uint64(2)}, exp: uint64(2)}, - {s: `sin(f)`, values: values{"f": math.Pi / 2}, exp: math.Sin(math.Pi / 2)}, - {s: `sin(i)`, values: values{"i": int64(2)}, exp: math.Sin(2)}, - {s: `sin(u)`, values: values{"u": uint64(2)}, exp: math.Sin(2)}, - {s: `asin(f)`, values: values{"f": float64(0.5)}, exp: math.Asin(0.5)}, - {s: `cos(f)`, values: values{"f": math.Pi / 2}, exp: math.Cos(math.Pi / 2)}, - {s: `cos(i)`, values: values{"i": int64(2)}, exp: math.Cos(2)}, - {s: `cos(u)`, values: values{"u": uint64(2)}, exp: math.Cos(2)}, - {s: `acos(f)`, values: values{"f": float64(0.5)}, exp: math.Acos(0.5)}, - {s: `tan(f)`, values: values{"f": math.Pi / 2}, exp: math.Tan(math.Pi / 2)}, - {s: `tan(i)`, values: values{"i": int64(2)}, exp: math.Tan(2)}, - {s: `tan(u)`, values: values{"u": uint64(2)}, exp: math.Tan(2)}, - {s: `atan(f)`, values: values{"f": float64(2)}, exp: math.Atan(2)}, - {s: `atan(i)`, values: values{"i": int64(2)}, exp: math.Atan(2)}, - {s: `atan(u)`, values: values{"u": uint64(2)}, exp: math.Atan(2)}, - {s: `atan2(y, x)`, values: values{"y": float64(2), "x": float64(3)}, exp: math.Atan2(2, 3)}, - {s: `atan2(y, x)`, values: values{"y": int64(2), "x": int64(3)}, exp: math.Atan2(2, 3)}, - {s: `atan2(y, x)`, values: values{"y": uint64(2), "x": uint64(3)}, exp: math.Atan2(2, 3)}, - {s: `floor(f)`, values: values{"f": float64(2.5)}, exp: float64(2)}, - {s: `floor(i)`, values: values{"i": int64(2)}, exp: int64(2)}, - {s: `floor(u)`, values: values{"u": uint64(2)}, exp: uint64(2)}, - {s: `ceil(f)`, values: values{"f": float64(2.5)}, exp: float64(3)}, - {s: `ceil(i)`, values: values{"i": int64(2)}, exp: int64(2)}, - {s: `ceil(u)`, values: values{"u": uint64(2)}, exp: uint64(2)}, - {s: `round(f)`, values: values{"f": float64(2.4)}, exp: float64(2)}, - {s: `round(f)`, values: values{"f": float64(2.6)}, exp: float64(3)}, - {s: `round(i)`, values: values{"i": int64(2)}, exp: int64(2)}, - {s: `round(u)`, values: values{"u": uint64(2)}, exp: uint64(2)}, - {s: `exp(f)`, values: values{"f": float64(3)}, exp: math.Exp(3)}, - {s: `exp(i)`, values: values{"i": int64(3)}, exp: math.Exp(3)}, - {s: `exp(u)`, values: values{"u": uint64(3)}, exp: math.Exp(3)}, - {s: `log(f, 8)`, values: values{"f": float64(3)}, exp: math.Log(3) / math.Log(8)}, - {s: `log(i, 8)`, values: values{"i": int64(3)}, exp: math.Log(3) / math.Log(8)}, - {s: `log(u, 8)`, values: values{"u": uint64(3)}, exp: math.Log(3) / math.Log(8)}, - {s: `ln(f)`, values: values{"f": float64(3)}, exp: math.Log(3)}, - {s: `ln(i)`, values: values{"i": int64(3)}, exp: math.Log(3)}, - {s: `ln(u)`, values: values{"u": uint64(3)}, exp: math.Log(3)}, - {s: `log2(f)`, values: values{"f": float64(3)}, exp: math.Log2(3)}, - {s: `log2(i)`, values: values{"i": int64(3)}, exp: math.Log2(3)}, - {s: `log2(u)`, values: values{"u": uint64(3)}, exp: math.Log2(3)}, - {s: `log10(f)`, values: values{"f": float64(3)}, exp: math.Log10(3)}, - {s: `log10(i)`, values: values{"i": int64(3)}, exp: math.Log10(3)}, - {s: `log10(u)`, values: values{"u": uint64(3)}, exp: math.Log10(3)}, - {s: `sqrt(f)`, values: values{"f": float64(3)}, exp: math.Sqrt(3)}, - {s: `sqrt(i)`, values: values{"i": int64(3)}, exp: math.Sqrt(3)}, - {s: `sqrt(u)`, values: values{"u": uint64(3)}, exp: math.Sqrt(3)}, - {s: `pow(f, 2)`, values: values{"f": float64(4)}, exp: math.Pow(4, 2)}, - {s: `pow(i, 2)`, values: values{"i": int64(4)}, exp: math.Pow(4, 2)}, - {s: `pow(u, 2)`, values: values{"u": uint64(4)}, exp: math.Pow(4, 2)}, - } { - t.Run(tt.s, func(t *testing.T) { - expr := MustParseExpr(tt.s) - - valuer := influxql.ValuerEval{ - Valuer: influxql.MultiValuer( - influxql.MapValuer(tt.values), - query.MathValuer{}, - ), - } - if got, want := valuer.Eval(expr), tt.exp; got != want { - t.Errorf("unexpected value: %v != %v", want, got) - } - }) - } -} diff --git a/influxql/query/mocks/ShardGroup.go b/influxql/query/mocks/ShardGroup.go deleted file mode 100644 index 05bb7b7db70..00000000000 --- a/influxql/query/mocks/ShardGroup.go +++ /dev/null @@ -1,111 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/idpe/influxql/query (interfaces: ShardGroup) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - query "github.com/influxdata/influxdb/v2/influxql/query" - influxql "github.com/influxdata/influxql" -) - -// MockShardGroup is a mock of ShardGroup interface -type MockShardGroup struct { - ctrl *gomock.Controller - recorder *MockShardGroupMockRecorder -} - -// MockShardGroupMockRecorder is the mock recorder for MockShardGroup -type MockShardGroupMockRecorder struct { - mock *MockShardGroup -} - -// NewMockShardGroup creates a new mock instance -func NewMockShardGroup(ctrl *gomock.Controller) *MockShardGroup { - mock := &MockShardGroup{ctrl: ctrl} - mock.recorder = &MockShardGroupMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockShardGroup) EXPECT() *MockShardGroupMockRecorder { - return m.recorder -} - -// Close mocks base method -func (m *MockShardGroup) Close() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close") - ret0, _ := ret[0].(error) - return ret0 -} - -// Close indicates an expected call of Close -func (mr *MockShardGroupMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockShardGroup)(nil).Close)) -} - -// CreateIterator mocks base method -func (m *MockShardGroup) CreateIterator(arg0 context.Context, arg1 *influxql.Measurement, arg2 query.IteratorOptions) (query.Iterator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateIterator", arg0, arg1, arg2) - ret0, _ := ret[0].(query.Iterator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateIterator indicates an expected call of CreateIterator -func (mr *MockShardGroupMockRecorder) CreateIterator(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateIterator", reflect.TypeOf((*MockShardGroup)(nil).CreateIterator), arg0, arg1, arg2) -} - -// FieldDimensions mocks base method -func (m *MockShardGroup) FieldDimensions(arg0 context.Context, arg1 *influxql.Measurement) (map[string]influxql.DataType, map[string]struct{}, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FieldDimensions", arg0, arg1) - ret0, _ := ret[0].(map[string]influxql.DataType) - ret1, _ := ret[1].(map[string]struct{}) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// FieldDimensions indicates an expected call of FieldDimensions -func (mr *MockShardGroupMockRecorder) FieldDimensions(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FieldDimensions", reflect.TypeOf((*MockShardGroup)(nil).FieldDimensions), arg0, arg1) -} - -// IteratorCost mocks base method -func (m *MockShardGroup) IteratorCost(arg0 context.Context, arg1 *influxql.Measurement, arg2 query.IteratorOptions) (query.IteratorCost, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IteratorCost", arg0, arg1, arg2) - ret0, _ := ret[0].(query.IteratorCost) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// IteratorCost indicates an expected call of IteratorCost -func (mr *MockShardGroupMockRecorder) IteratorCost(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IteratorCost", reflect.TypeOf((*MockShardGroup)(nil).IteratorCost), arg0, arg1, arg2) -} - -// MapType mocks base method -func (m *MockShardGroup) MapType(arg0 context.Context, arg1 *influxql.Measurement, arg2 string) influxql.DataType { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MapType", arg0, arg1, arg2) - ret0, _ := ret[0].(influxql.DataType) - return ret0 -} - -// MapType indicates an expected call of MapType -func (mr *MockShardGroupMockRecorder) MapType(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MapType", reflect.TypeOf((*MockShardGroup)(nil).MapType), arg0, arg1, arg2) -} diff --git a/influxql/query/mocks/ShardMapper.go b/influxql/query/mocks/ShardMapper.go deleted file mode 100644 index d05757eae84..00000000000 --- a/influxql/query/mocks/ShardMapper.go +++ /dev/null @@ -1,52 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/idpe/influxql/query (interfaces: ShardMapper) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - query "github.com/influxdata/influxdb/v2/influxql/query" - influxql "github.com/influxdata/influxql" -) - -// MockShardMapper is a mock of ShardMapper interface -type MockShardMapper struct { - ctrl *gomock.Controller - recorder *MockShardMapperMockRecorder -} - -// MockShardMapperMockRecorder is the mock recorder for MockShardMapper -type MockShardMapperMockRecorder struct { - mock *MockShardMapper -} - -// NewMockShardMapper creates a new mock instance -func NewMockShardMapper(ctrl *gomock.Controller) *MockShardMapper { - mock := &MockShardMapper{ctrl: ctrl} - mock.recorder = &MockShardMapperMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockShardMapper) EXPECT() *MockShardMapperMockRecorder { - return m.recorder -} - -// MapShards mocks base method -func (m *MockShardMapper) MapShards(arg0 context.Context, arg1 influxql.Sources, arg2 influxql.TimeRange, arg3 query.SelectOptions) (query.ShardGroup, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MapShards", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(query.ShardGroup) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// MapShards indicates an expected call of MapShards -func (mr *MockShardMapperMockRecorder) MapShards(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MapShards", reflect.TypeOf((*MockShardMapper)(nil).MapShards), arg0, arg1, arg2, arg3) -} diff --git a/influxql/query/mocks/StatementExecutor.go b/influxql/query/mocks/StatementExecutor.go deleted file mode 100644 index ad6b77bcc4f..00000000000 --- a/influxql/query/mocks/StatementExecutor.go +++ /dev/null @@ -1,51 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/idpe/influxql/query (interfaces: StatementExecutor) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - query "github.com/influxdata/influxdb/v2/influxql/query" - influxql "github.com/influxdata/influxql" -) - -// MockStatementExecutor is a mock of StatementExecutor interface -type MockStatementExecutor struct { - ctrl *gomock.Controller - recorder *MockStatementExecutorMockRecorder -} - -// MockStatementExecutorMockRecorder is the mock recorder for MockStatementExecutor -type MockStatementExecutorMockRecorder struct { - mock *MockStatementExecutor -} - -// NewMockStatementExecutor creates a new mock instance -func NewMockStatementExecutor(ctrl *gomock.Controller) *MockStatementExecutor { - mock := &MockStatementExecutor{ctrl: ctrl} - mock.recorder = &MockStatementExecutorMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockStatementExecutor) EXPECT() *MockStatementExecutorMockRecorder { - return m.recorder -} - -// ExecuteStatement mocks base method -func (m *MockStatementExecutor) ExecuteStatement(arg0 context.Context, arg1 influxql.Statement, arg2 *query.ExecutionContext) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExecuteStatement", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// ExecuteStatement indicates an expected call of ExecuteStatement -func (mr *MockStatementExecutorMockRecorder) ExecuteStatement(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteStatement", reflect.TypeOf((*MockStatementExecutor)(nil).ExecuteStatement), arg0, arg1, arg2) -} diff --git a/influxql/query/neldermead/neldermead.go b/influxql/query/neldermead/neldermead.go deleted file mode 100644 index f2e628d7c80..00000000000 --- a/influxql/query/neldermead/neldermead.go +++ /dev/null @@ -1,239 +0,0 @@ -// Package neldermead is an implementation of the Nelder-Mead optimization method. -// Based on work by Michael F. Hutt: http://www.mikehutt.com/neldermead.html -package neldermead - -import "math" - -const ( - defaultMaxIterations = 1000 - // reflection coefficient - defaultAlpha = 1.0 - // contraction coefficient - defaultBeta = 0.5 - // expansion coefficient - defaultGamma = 2.0 -) - -// Optimizer represents the parameters to the Nelder-Mead simplex method. -type Optimizer struct { - // Maximum number of iterations. - MaxIterations int - // Reflection coefficient. - Alpha, - // Contraction coefficient. - Beta, - // Expansion coefficient. - Gamma float64 -} - -// New returns a new instance of Optimizer with all values set to the defaults. -func New() *Optimizer { - return &Optimizer{ - MaxIterations: defaultMaxIterations, - Alpha: defaultAlpha, - Beta: defaultBeta, - Gamma: defaultGamma, - } -} - -// Optimize applies the Nelder-Mead simplex method with the Optimizer's settings. -func (o *Optimizer) Optimize( - objfunc func([]float64) float64, - start []float64, - epsilon, - scale float64, -) (float64, []float64) { - n := len(start) - - //holds vertices of simplex - v := make([][]float64, n+1) - for i := range v { - v[i] = make([]float64, n) - } - - //value of function at each vertex - f := make([]float64, n+1) - - //reflection - coordinates - vr := make([]float64, n) - - //expansion - coordinates - ve := make([]float64, n) - - //contraction - coordinates - vc := make([]float64, n) - - //centroid - coordinates - vm := make([]float64, n) - - // create the initial simplex - // assume one of the vertices is 0,0 - - pn := scale * (math.Sqrt(float64(n+1)) - 1 + float64(n)) / (float64(n) * math.Sqrt(2)) - qn := scale * (math.Sqrt(float64(n+1)) - 1) / (float64(n) * math.Sqrt(2)) - - for i := 0; i < n; i++ { - v[0][i] = start[i] - } - - for i := 1; i <= n; i++ { - for j := 0; j < n; j++ { - if i-1 == j { - v[i][j] = pn + start[j] - } else { - v[i][j] = qn + start[j] - } - } - } - - // find the initial function values - for j := 0; j <= n; j++ { - f[j] = objfunc(v[j]) - } - - // begin the main loop of the minimization - for itr := 1; itr <= o.MaxIterations; itr++ { - - // find the indexes of the largest and smallest values - vg := 0 - vs := 0 - for i := 0; i <= n; i++ { - if f[i] > f[vg] { - vg = i - } - if f[i] < f[vs] { - vs = i - } - } - // find the index of the second largest value - vh := vs - for i := 0; i <= n; i++ { - if f[i] > f[vh] && f[i] < f[vg] { - vh = i - } - } - - // calculate the centroid - for i := 0; i <= n-1; i++ { - cent := 0.0 - for m := 0; m <= n; m++ { - if m != vg { - cent += v[m][i] - } - } - vm[i] = cent / float64(n) - } - - // reflect vg to new vertex vr - for i := 0; i <= n-1; i++ { - vr[i] = vm[i] + o.Alpha*(vm[i]-v[vg][i]) - } - - // value of function at reflection point - fr := objfunc(vr) - - if fr < f[vh] && fr >= f[vs] { - for i := 0; i <= n-1; i++ { - v[vg][i] = vr[i] - } - f[vg] = fr - } - - // investigate a step further in this direction - if fr < f[vs] { - for i := 0; i <= n-1; i++ { - ve[i] = vm[i] + o.Gamma*(vr[i]-vm[i]) - } - - // value of function at expansion point - fe := objfunc(ve) - - // by making fe < fr as opposed to fe < f[vs], - // Rosenbrocks function takes 63 iterations as opposed - // to 64 when using double variables. - - if fe < fr { - for i := 0; i <= n-1; i++ { - v[vg][i] = ve[i] - } - f[vg] = fe - } else { - for i := 0; i <= n-1; i++ { - v[vg][i] = vr[i] - } - f[vg] = fr - } - } - - // check to see if a contraction is necessary - if fr >= f[vh] { - if fr < f[vg] && fr >= f[vh] { - // perform outside contraction - for i := 0; i <= n-1; i++ { - vc[i] = vm[i] + o.Beta*(vr[i]-vm[i]) - } - } else { - // perform inside contraction - for i := 0; i <= n-1; i++ { - vc[i] = vm[i] - o.Beta*(vm[i]-v[vg][i]) - } - } - - // value of function at contraction point - fc := objfunc(vc) - - if fc < f[vg] { - for i := 0; i <= n-1; i++ { - v[vg][i] = vc[i] - } - f[vg] = fc - } else { - // at this point the contraction is not successful, - // we must halve the distance from vs to all the - // vertices of the simplex and then continue. - - for row := 0; row <= n; row++ { - if row != vs { - for i := 0; i <= n-1; i++ { - v[row][i] = v[vs][i] + (v[row][i]-v[vs][i])/2.0 - } - } - } - f[vg] = objfunc(v[vg]) - f[vh] = objfunc(v[vh]) - } - } - - // test for convergence - fsum := 0.0 - for i := 0; i <= n; i++ { - fsum += f[i] - } - favg := fsum / float64(n+1) - s := 0.0 - for i := 0; i <= n; i++ { - s += math.Pow((f[i]-favg), 2.0) / float64(n) - } - s = math.Sqrt(s) - if s < epsilon { - break - } - } - - // find the index of the smallest value - vs := 0 - for i := 0; i <= n; i++ { - if f[i] < f[vs] { - vs = i - } - } - - parameters := make([]float64, n) - for i := 0; i < n; i++ { - parameters[i] = v[vs][i] - } - - min := objfunc(v[vs]) - - return min, parameters -} diff --git a/influxql/query/neldermead/neldermead_test.go b/influxql/query/neldermead/neldermead_test.go deleted file mode 100644 index bccda220312..00000000000 --- a/influxql/query/neldermead/neldermead_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package neldermead_test - -import ( - "math" - "testing" - - "github.com/influxdata/influxdb/v2/influxql/query/neldermead" -) - -func round(num float64, precision float64) float64 { - rnum := num * math.Pow(10, precision) - var tnum float64 - if rnum < 0 { - tnum = math.Floor(rnum - 0.5) - } else { - tnum = math.Floor(rnum + 0.5) - } - rnum = tnum / math.Pow(10, precision) - return rnum -} - -func almostEqual(a, b, e float64) bool { - return math.Abs(a-b) < e -} - -func Test_Optimize(t *testing.T) { - - constraints := func(x []float64) { - for i := range x { - x[i] = round(x[i], 5) - } - } - // 100*(b-a^2)^2 + (1-a)^2 - // - // Obvious global minimum at (a,b) = (1,1) - // - // Useful visualization: - // https://www.wolframalpha.com/input/?i=minimize(100*(b-a%5E2)%5E2+%2B+(1-a)%5E2) - f := func(x []float64) float64 { - constraints(x) - // a = x[0] - // b = x[1] - return 100*(x[1]-x[0]*x[0])*(x[1]-x[0]*x[0]) + (1.0-x[0])*(1.0-x[0]) - } - - start := []float64{-1.2, 1.0} - - opt := neldermead.New() - epsilon := 1e-5 - min, parameters := opt.Optimize(f, start, epsilon, 1) - - if !almostEqual(min, 0, epsilon) { - t.Errorf("unexpected min: got %f exp 0", min) - } - - if !almostEqual(parameters[0], 1, 1e-2) { - t.Errorf("unexpected parameters[0]: got %f exp 1", parameters[0]) - } - - if !almostEqual(parameters[1], 1, 1e-2) { - t.Errorf("unexpected parameters[1]: got %f exp 1", parameters[1]) - } - -} diff --git a/influxql/query/point.gen.go b/influxql/query/point.gen.go deleted file mode 100644 index 27e511454b4..00000000000 --- a/influxql/query/point.gen.go +++ /dev/null @@ -1,1096 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: point.gen.go.tmpl - -package query - -//lint:file-ignore U1000 Ignore all unused code, it's generated - -import ( - "context" - "encoding/binary" - "io" - - internal "github.com/influxdata/influxdb/v2/influxql/query/internal" - "google.golang.org/protobuf/proto" -) - -// FloatPoint represents a point with a float64 value. -// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. -// See TestPoint_Fields in influxql/point_test.go for more details. -type FloatPoint struct { - Name string - Tags Tags - - Time int64 - Value float64 - Aux []interface{} - - // Total number of points that were combined into this point from an aggregate. - // If this is zero, the point is not the result of an aggregate function. - Aggregated uint32 - Nil bool -} - -func (v *FloatPoint) name() string { return v.Name } -func (v *FloatPoint) tags() Tags { return v.Tags } -func (v *FloatPoint) time() int64 { return v.Time } -func (v *FloatPoint) nil() bool { return v.Nil } -func (v *FloatPoint) value() interface{} { - if v.Nil { - return nil - } - return v.Value -} -func (v *FloatPoint) aux() []interface{} { return v.Aux } - -// Clone returns a copy of v. -func (v *FloatPoint) Clone() *FloatPoint { - if v == nil { - return nil - } - - other := *v - if v.Aux != nil { - other.Aux = make([]interface{}, len(v.Aux)) - copy(other.Aux, v.Aux) - } - - return &other -} - -// CopyTo makes a deep copy into the point. -func (v *FloatPoint) CopyTo(other *FloatPoint) { - other.Name, other.Tags = v.Name, v.Tags - other.Time = v.Time - other.Value, other.Nil = v.Value, v.Nil - if v.Aux != nil { - if len(other.Aux) != len(v.Aux) { - other.Aux = make([]interface{}, len(v.Aux)) - } - copy(other.Aux, v.Aux) - } -} - -func encodeFloatPoint(p *FloatPoint) *internal.Point { - return &internal.Point{ - Name: proto.String(p.Name), - Tags: proto.String(p.Tags.ID()), - Time: proto.Int64(p.Time), - Nil: proto.Bool(p.Nil), - Aux: encodeAux(p.Aux), - Aggregated: proto.Uint32(p.Aggregated), - - FloatValue: proto.Float64(p.Value), - } -} - -func decodeFloatPoint(pb *internal.Point) *FloatPoint { - return &FloatPoint{ - Name: pb.GetName(), - Tags: newTagsID(pb.GetTags()), - Time: pb.GetTime(), - Nil: pb.GetNil(), - Aux: decodeAux(pb.Aux), - Aggregated: pb.GetAggregated(), - Value: pb.GetFloatValue(), - } -} - -// floatPoints represents a slice of points sortable by value. -type floatPoints []FloatPoint - -func (a floatPoints) Len() int { return len(a) } -func (a floatPoints) Less(i, j int) bool { - if a[i].Time != a[j].Time { - return a[i].Time < a[j].Time - } - return a[i].Value < a[j].Value -} -func (a floatPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// floatPointsByValue represents a slice of points sortable by value. -type floatPointsByValue []FloatPoint - -func (a floatPointsByValue) Len() int { return len(a) } - -func (a floatPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value } - -func (a floatPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// floatPointsByTime represents a slice of points sortable by value. -type floatPointsByTime []FloatPoint - -func (a floatPointsByTime) Len() int { return len(a) } -func (a floatPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time } -func (a floatPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// floatPointByFunc represents a slice of points sortable by a function. -type floatPointsByFunc struct { - points []FloatPoint - cmp func(a, b *FloatPoint) bool -} - -func (a *floatPointsByFunc) Len() int { return len(a.points) } -func (a *floatPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) } -func (a *floatPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] } - -func (a *floatPointsByFunc) Push(x interface{}) { - a.points = append(a.points, x.(FloatPoint)) -} - -func (a *floatPointsByFunc) Pop() interface{} { - p := a.points[len(a.points)-1] - a.points = a.points[:len(a.points)-1] - return p -} - -func floatPointsSortBy(points []FloatPoint, cmp func(a, b *FloatPoint) bool) *floatPointsByFunc { - return &floatPointsByFunc{ - points: points, - cmp: cmp, - } -} - -// FloatPointEncoder encodes FloatPoint points to a writer. -type FloatPointEncoder struct { - w io.Writer -} - -// NewFloatPointEncoder returns a new instance of FloatPointEncoder that writes to w. -func NewFloatPointEncoder(w io.Writer) *FloatPointEncoder { - return &FloatPointEncoder{w: w} -} - -// EncodeFloatPoint marshals and writes p to the underlying writer. -func (enc *FloatPointEncoder) EncodeFloatPoint(p *FloatPoint) error { - // Marshal to bytes. - buf, err := proto.Marshal(encodeFloatPoint(p)) - if err != nil { - return err - } - - // Write the length. - if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { - return err - } - - // Write the encoded point. - if _, err := enc.w.Write(buf); err != nil { - return err - } - return nil -} - -// FloatPointDecoder decodes FloatPoint points from a reader. -type FloatPointDecoder struct { - r io.Reader - stats IteratorStats - ctx context.Context -} - -// NewFloatPointDecoder returns a new instance of FloatPointDecoder that reads from r. -func NewFloatPointDecoder(ctx context.Context, r io.Reader) *FloatPointDecoder { - return &FloatPointDecoder{r: r, ctx: ctx} -} - -// Stats returns iterator stats embedded within the stream. -func (dec *FloatPointDecoder) Stats() IteratorStats { return dec.stats } - -// DecodeFloatPoint reads from the underlying reader and unmarshals into p. -func (dec *FloatPointDecoder) DecodeFloatPoint(p *FloatPoint) error { - for { - // Read length. - var sz uint32 - if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { - return err - } - - // Read point data. - buf := make([]byte, sz) - if _, err := io.ReadFull(dec.r, buf); err != nil { - return err - } - - // Unmarshal into point. - var pb internal.Point - if err := proto.Unmarshal(buf, &pb); err != nil { - return err - } - - // If the point contains stats then read stats and retry. - if pb.Stats != nil { - dec.stats = decodeIteratorStats(pb.Stats) - continue - } - - // Decode into point object. - *p = *decodeFloatPoint(&pb) - - return nil - } -} - -// IntegerPoint represents a point with a int64 value. -// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. -// See TestPoint_Fields in influxql/point_test.go for more details. -type IntegerPoint struct { - Name string - Tags Tags - - Time int64 - Value int64 - Aux []interface{} - - // Total number of points that were combined into this point from an aggregate. - // If this is zero, the point is not the result of an aggregate function. - Aggregated uint32 - Nil bool -} - -func (v *IntegerPoint) name() string { return v.Name } -func (v *IntegerPoint) tags() Tags { return v.Tags } -func (v *IntegerPoint) time() int64 { return v.Time } -func (v *IntegerPoint) nil() bool { return v.Nil } -func (v *IntegerPoint) value() interface{} { - if v.Nil { - return nil - } - return v.Value -} -func (v *IntegerPoint) aux() []interface{} { return v.Aux } - -// Clone returns a copy of v. -func (v *IntegerPoint) Clone() *IntegerPoint { - if v == nil { - return nil - } - - other := *v - if v.Aux != nil { - other.Aux = make([]interface{}, len(v.Aux)) - copy(other.Aux, v.Aux) - } - - return &other -} - -// CopyTo makes a deep copy into the point. -func (v *IntegerPoint) CopyTo(other *IntegerPoint) { - other.Name, other.Tags = v.Name, v.Tags - other.Time = v.Time - other.Value, other.Nil = v.Value, v.Nil - if v.Aux != nil { - if len(other.Aux) != len(v.Aux) { - other.Aux = make([]interface{}, len(v.Aux)) - } - copy(other.Aux, v.Aux) - } -} - -func encodeIntegerPoint(p *IntegerPoint) *internal.Point { - return &internal.Point{ - Name: proto.String(p.Name), - Tags: proto.String(p.Tags.ID()), - Time: proto.Int64(p.Time), - Nil: proto.Bool(p.Nil), - Aux: encodeAux(p.Aux), - Aggregated: proto.Uint32(p.Aggregated), - - IntegerValue: proto.Int64(p.Value), - } -} - -func decodeIntegerPoint(pb *internal.Point) *IntegerPoint { - return &IntegerPoint{ - Name: pb.GetName(), - Tags: newTagsID(pb.GetTags()), - Time: pb.GetTime(), - Nil: pb.GetNil(), - Aux: decodeAux(pb.Aux), - Aggregated: pb.GetAggregated(), - Value: pb.GetIntegerValue(), - } -} - -// integerPoints represents a slice of points sortable by value. -type integerPoints []IntegerPoint - -func (a integerPoints) Len() int { return len(a) } -func (a integerPoints) Less(i, j int) bool { - if a[i].Time != a[j].Time { - return a[i].Time < a[j].Time - } - return a[i].Value < a[j].Value -} -func (a integerPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// integerPointsByValue represents a slice of points sortable by value. -type integerPointsByValue []IntegerPoint - -func (a integerPointsByValue) Len() int { return len(a) } - -func (a integerPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value } - -func (a integerPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// integerPointsByTime represents a slice of points sortable by value. -type integerPointsByTime []IntegerPoint - -func (a integerPointsByTime) Len() int { return len(a) } -func (a integerPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time } -func (a integerPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// integerPointByFunc represents a slice of points sortable by a function. -type integerPointsByFunc struct { - points []IntegerPoint - cmp func(a, b *IntegerPoint) bool -} - -func (a *integerPointsByFunc) Len() int { return len(a.points) } -func (a *integerPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) } -func (a *integerPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] } - -func (a *integerPointsByFunc) Push(x interface{}) { - a.points = append(a.points, x.(IntegerPoint)) -} - -func (a *integerPointsByFunc) Pop() interface{} { - p := a.points[len(a.points)-1] - a.points = a.points[:len(a.points)-1] - return p -} - -func integerPointsSortBy(points []IntegerPoint, cmp func(a, b *IntegerPoint) bool) *integerPointsByFunc { - return &integerPointsByFunc{ - points: points, - cmp: cmp, - } -} - -// IntegerPointEncoder encodes IntegerPoint points to a writer. -type IntegerPointEncoder struct { - w io.Writer -} - -// NewIntegerPointEncoder returns a new instance of IntegerPointEncoder that writes to w. -func NewIntegerPointEncoder(w io.Writer) *IntegerPointEncoder { - return &IntegerPointEncoder{w: w} -} - -// EncodeIntegerPoint marshals and writes p to the underlying writer. -func (enc *IntegerPointEncoder) EncodeIntegerPoint(p *IntegerPoint) error { - // Marshal to bytes. - buf, err := proto.Marshal(encodeIntegerPoint(p)) - if err != nil { - return err - } - - // Write the length. - if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { - return err - } - - // Write the encoded point. - if _, err := enc.w.Write(buf); err != nil { - return err - } - return nil -} - -// IntegerPointDecoder decodes IntegerPoint points from a reader. -type IntegerPointDecoder struct { - r io.Reader - stats IteratorStats - ctx context.Context -} - -// NewIntegerPointDecoder returns a new instance of IntegerPointDecoder that reads from r. -func NewIntegerPointDecoder(ctx context.Context, r io.Reader) *IntegerPointDecoder { - return &IntegerPointDecoder{r: r, ctx: ctx} -} - -// Stats returns iterator stats embedded within the stream. -func (dec *IntegerPointDecoder) Stats() IteratorStats { return dec.stats } - -// DecodeIntegerPoint reads from the underlying reader and unmarshals into p. -func (dec *IntegerPointDecoder) DecodeIntegerPoint(p *IntegerPoint) error { - for { - // Read length. - var sz uint32 - if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { - return err - } - - // Read point data. - buf := make([]byte, sz) - if _, err := io.ReadFull(dec.r, buf); err != nil { - return err - } - - // Unmarshal into point. - var pb internal.Point - if err := proto.Unmarshal(buf, &pb); err != nil { - return err - } - - // If the point contains stats then read stats and retry. - if pb.Stats != nil { - dec.stats = decodeIteratorStats(pb.Stats) - continue - } - - // Decode into point object. - *p = *decodeIntegerPoint(&pb) - - return nil - } -} - -// UnsignedPoint represents a point with a uint64 value. -// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. -// See TestPoint_Fields in influxql/point_test.go for more details. -type UnsignedPoint struct { - Name string - Tags Tags - - Time int64 - Value uint64 - Aux []interface{} - - // Total number of points that were combined into this point from an aggregate. - // If this is zero, the point is not the result of an aggregate function. - Aggregated uint32 - Nil bool -} - -func (v *UnsignedPoint) name() string { return v.Name } -func (v *UnsignedPoint) tags() Tags { return v.Tags } -func (v *UnsignedPoint) time() int64 { return v.Time } -func (v *UnsignedPoint) nil() bool { return v.Nil } -func (v *UnsignedPoint) value() interface{} { - if v.Nil { - return nil - } - return v.Value -} -func (v *UnsignedPoint) aux() []interface{} { return v.Aux } - -// Clone returns a copy of v. -func (v *UnsignedPoint) Clone() *UnsignedPoint { - if v == nil { - return nil - } - - other := *v - if v.Aux != nil { - other.Aux = make([]interface{}, len(v.Aux)) - copy(other.Aux, v.Aux) - } - - return &other -} - -// CopyTo makes a deep copy into the point. -func (v *UnsignedPoint) CopyTo(other *UnsignedPoint) { - other.Name, other.Tags = v.Name, v.Tags - other.Time = v.Time - other.Value, other.Nil = v.Value, v.Nil - if v.Aux != nil { - if len(other.Aux) != len(v.Aux) { - other.Aux = make([]interface{}, len(v.Aux)) - } - copy(other.Aux, v.Aux) - } -} - -func encodeUnsignedPoint(p *UnsignedPoint) *internal.Point { - return &internal.Point{ - Name: proto.String(p.Name), - Tags: proto.String(p.Tags.ID()), - Time: proto.Int64(p.Time), - Nil: proto.Bool(p.Nil), - Aux: encodeAux(p.Aux), - Aggregated: proto.Uint32(p.Aggregated), - } -} - -func decodeUnsignedPoint(pb *internal.Point) *UnsignedPoint { - return &UnsignedPoint{ - Name: pb.GetName(), - Tags: newTagsID(pb.GetTags()), - Time: pb.GetTime(), - Nil: pb.GetNil(), - Aux: decodeAux(pb.Aux), - Aggregated: pb.GetAggregated(), - Value: pb.GetUnsignedValue(), - } -} - -// unsignedPoints represents a slice of points sortable by value. -type unsignedPoints []UnsignedPoint - -func (a unsignedPoints) Len() int { return len(a) } -func (a unsignedPoints) Less(i, j int) bool { - if a[i].Time != a[j].Time { - return a[i].Time < a[j].Time - } - return a[i].Value < a[j].Value -} -func (a unsignedPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// unsignedPointsByValue represents a slice of points sortable by value. -type unsignedPointsByValue []UnsignedPoint - -func (a unsignedPointsByValue) Len() int { return len(a) } - -func (a unsignedPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value } - -func (a unsignedPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// unsignedPointsByTime represents a slice of points sortable by value. -type unsignedPointsByTime []UnsignedPoint - -func (a unsignedPointsByTime) Len() int { return len(a) } -func (a unsignedPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time } -func (a unsignedPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// unsignedPointByFunc represents a slice of points sortable by a function. -type unsignedPointsByFunc struct { - points []UnsignedPoint - cmp func(a, b *UnsignedPoint) bool -} - -func (a *unsignedPointsByFunc) Len() int { return len(a.points) } -func (a *unsignedPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) } -func (a *unsignedPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] } - -func (a *unsignedPointsByFunc) Push(x interface{}) { - a.points = append(a.points, x.(UnsignedPoint)) -} - -func (a *unsignedPointsByFunc) Pop() interface{} { - p := a.points[len(a.points)-1] - a.points = a.points[:len(a.points)-1] - return p -} - -func unsignedPointsSortBy(points []UnsignedPoint, cmp func(a, b *UnsignedPoint) bool) *unsignedPointsByFunc { - return &unsignedPointsByFunc{ - points: points, - cmp: cmp, - } -} - -// UnsignedPointEncoder encodes UnsignedPoint points to a writer. -type UnsignedPointEncoder struct { - w io.Writer -} - -// NewUnsignedPointEncoder returns a new instance of UnsignedPointEncoder that writes to w. -func NewUnsignedPointEncoder(w io.Writer) *UnsignedPointEncoder { - return &UnsignedPointEncoder{w: w} -} - -// EncodeUnsignedPoint marshals and writes p to the underlying writer. -func (enc *UnsignedPointEncoder) EncodeUnsignedPoint(p *UnsignedPoint) error { - // Marshal to bytes. - buf, err := proto.Marshal(encodeUnsignedPoint(p)) - if err != nil { - return err - } - - // Write the length. - if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { - return err - } - - // Write the encoded point. - if _, err := enc.w.Write(buf); err != nil { - return err - } - return nil -} - -// UnsignedPointDecoder decodes UnsignedPoint points from a reader. -type UnsignedPointDecoder struct { - r io.Reader - stats IteratorStats - ctx context.Context -} - -// NewUnsignedPointDecoder returns a new instance of UnsignedPointDecoder that reads from r. -func NewUnsignedPointDecoder(ctx context.Context, r io.Reader) *UnsignedPointDecoder { - return &UnsignedPointDecoder{r: r, ctx: ctx} -} - -// Stats returns iterator stats embedded within the stream. -func (dec *UnsignedPointDecoder) Stats() IteratorStats { return dec.stats } - -// DecodeUnsignedPoint reads from the underlying reader and unmarshals into p. -func (dec *UnsignedPointDecoder) DecodeUnsignedPoint(p *UnsignedPoint) error { - for { - // Read length. - var sz uint32 - if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { - return err - } - - // Read point data. - buf := make([]byte, sz) - if _, err := io.ReadFull(dec.r, buf); err != nil { - return err - } - - // Unmarshal into point. - var pb internal.Point - if err := proto.Unmarshal(buf, &pb); err != nil { - return err - } - - // If the point contains stats then read stats and retry. - if pb.Stats != nil { - dec.stats = decodeIteratorStats(pb.Stats) - continue - } - - // Decode into point object. - *p = *decodeUnsignedPoint(&pb) - - return nil - } -} - -// StringPoint represents a point with a string value. -// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. -// See TestPoint_Fields in influxql/point_test.go for more details. -type StringPoint struct { - Name string - Tags Tags - - Time int64 - Value string - Aux []interface{} - - // Total number of points that were combined into this point from an aggregate. - // If this is zero, the point is not the result of an aggregate function. - Aggregated uint32 - Nil bool -} - -func (v *StringPoint) name() string { return v.Name } -func (v *StringPoint) tags() Tags { return v.Tags } -func (v *StringPoint) time() int64 { return v.Time } -func (v *StringPoint) nil() bool { return v.Nil } -func (v *StringPoint) value() interface{} { - if v.Nil { - return nil - } - return v.Value -} -func (v *StringPoint) aux() []interface{} { return v.Aux } - -// Clone returns a copy of v. -func (v *StringPoint) Clone() *StringPoint { - if v == nil { - return nil - } - - other := *v - if v.Aux != nil { - other.Aux = make([]interface{}, len(v.Aux)) - copy(other.Aux, v.Aux) - } - - return &other -} - -// CopyTo makes a deep copy into the point. -func (v *StringPoint) CopyTo(other *StringPoint) { - other.Name, other.Tags = v.Name, v.Tags - other.Time = v.Time - other.Value, other.Nil = v.Value, v.Nil - if v.Aux != nil { - if len(other.Aux) != len(v.Aux) { - other.Aux = make([]interface{}, len(v.Aux)) - } - copy(other.Aux, v.Aux) - } -} - -func encodeStringPoint(p *StringPoint) *internal.Point { - return &internal.Point{ - Name: proto.String(p.Name), - Tags: proto.String(p.Tags.ID()), - Time: proto.Int64(p.Time), - Nil: proto.Bool(p.Nil), - Aux: encodeAux(p.Aux), - Aggregated: proto.Uint32(p.Aggregated), - - StringValue: proto.String(p.Value), - } -} - -func decodeStringPoint(pb *internal.Point) *StringPoint { - return &StringPoint{ - Name: pb.GetName(), - Tags: newTagsID(pb.GetTags()), - Time: pb.GetTime(), - Nil: pb.GetNil(), - Aux: decodeAux(pb.Aux), - Aggregated: pb.GetAggregated(), - Value: pb.GetStringValue(), - } -} - -// stringPoints represents a slice of points sortable by value. -type stringPoints []StringPoint - -func (a stringPoints) Len() int { return len(a) } -func (a stringPoints) Less(i, j int) bool { - if a[i].Time != a[j].Time { - return a[i].Time < a[j].Time - } - return a[i].Value < a[j].Value -} -func (a stringPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// stringPointsByValue represents a slice of points sortable by value. -type stringPointsByValue []StringPoint - -func (a stringPointsByValue) Len() int { return len(a) } - -func (a stringPointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value } - -func (a stringPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// stringPointsByTime represents a slice of points sortable by value. -type stringPointsByTime []StringPoint - -func (a stringPointsByTime) Len() int { return len(a) } -func (a stringPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time } -func (a stringPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// stringPointByFunc represents a slice of points sortable by a function. -type stringPointsByFunc struct { - points []StringPoint - cmp func(a, b *StringPoint) bool -} - -func (a *stringPointsByFunc) Len() int { return len(a.points) } -func (a *stringPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) } -func (a *stringPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] } - -func (a *stringPointsByFunc) Push(x interface{}) { - a.points = append(a.points, x.(StringPoint)) -} - -func (a *stringPointsByFunc) Pop() interface{} { - p := a.points[len(a.points)-1] - a.points = a.points[:len(a.points)-1] - return p -} - -func stringPointsSortBy(points []StringPoint, cmp func(a, b *StringPoint) bool) *stringPointsByFunc { - return &stringPointsByFunc{ - points: points, - cmp: cmp, - } -} - -// StringPointEncoder encodes StringPoint points to a writer. -type StringPointEncoder struct { - w io.Writer -} - -// NewStringPointEncoder returns a new instance of StringPointEncoder that writes to w. -func NewStringPointEncoder(w io.Writer) *StringPointEncoder { - return &StringPointEncoder{w: w} -} - -// EncodeStringPoint marshals and writes p to the underlying writer. -func (enc *StringPointEncoder) EncodeStringPoint(p *StringPoint) error { - // Marshal to bytes. - buf, err := proto.Marshal(encodeStringPoint(p)) - if err != nil { - return err - } - - // Write the length. - if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { - return err - } - - // Write the encoded point. - if _, err := enc.w.Write(buf); err != nil { - return err - } - return nil -} - -// StringPointDecoder decodes StringPoint points from a reader. -type StringPointDecoder struct { - r io.Reader - stats IteratorStats - ctx context.Context -} - -// NewStringPointDecoder returns a new instance of StringPointDecoder that reads from r. -func NewStringPointDecoder(ctx context.Context, r io.Reader) *StringPointDecoder { - return &StringPointDecoder{r: r, ctx: ctx} -} - -// Stats returns iterator stats embedded within the stream. -func (dec *StringPointDecoder) Stats() IteratorStats { return dec.stats } - -// DecodeStringPoint reads from the underlying reader and unmarshals into p. -func (dec *StringPointDecoder) DecodeStringPoint(p *StringPoint) error { - for { - // Read length. - var sz uint32 - if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { - return err - } - - // Read point data. - buf := make([]byte, sz) - if _, err := io.ReadFull(dec.r, buf); err != nil { - return err - } - - // Unmarshal into point. - var pb internal.Point - if err := proto.Unmarshal(buf, &pb); err != nil { - return err - } - - // If the point contains stats then read stats and retry. - if pb.Stats != nil { - dec.stats = decodeIteratorStats(pb.Stats) - continue - } - - // Decode into point object. - *p = *decodeStringPoint(&pb) - - return nil - } -} - -// BooleanPoint represents a point with a bool value. -// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. -// See TestPoint_Fields in influxql/point_test.go for more details. -type BooleanPoint struct { - Name string - Tags Tags - - Time int64 - Value bool - Aux []interface{} - - // Total number of points that were combined into this point from an aggregate. - // If this is zero, the point is not the result of an aggregate function. - Aggregated uint32 - Nil bool -} - -func (v *BooleanPoint) name() string { return v.Name } -func (v *BooleanPoint) tags() Tags { return v.Tags } -func (v *BooleanPoint) time() int64 { return v.Time } -func (v *BooleanPoint) nil() bool { return v.Nil } -func (v *BooleanPoint) value() interface{} { - if v.Nil { - return nil - } - return v.Value -} -func (v *BooleanPoint) aux() []interface{} { return v.Aux } - -// Clone returns a copy of v. -func (v *BooleanPoint) Clone() *BooleanPoint { - if v == nil { - return nil - } - - other := *v - if v.Aux != nil { - other.Aux = make([]interface{}, len(v.Aux)) - copy(other.Aux, v.Aux) - } - - return &other -} - -// CopyTo makes a deep copy into the point. -func (v *BooleanPoint) CopyTo(other *BooleanPoint) { - other.Name, other.Tags = v.Name, v.Tags - other.Time = v.Time - other.Value, other.Nil = v.Value, v.Nil - if v.Aux != nil { - if len(other.Aux) != len(v.Aux) { - other.Aux = make([]interface{}, len(v.Aux)) - } - copy(other.Aux, v.Aux) - } -} - -func encodeBooleanPoint(p *BooleanPoint) *internal.Point { - return &internal.Point{ - Name: proto.String(p.Name), - Tags: proto.String(p.Tags.ID()), - Time: proto.Int64(p.Time), - Nil: proto.Bool(p.Nil), - Aux: encodeAux(p.Aux), - Aggregated: proto.Uint32(p.Aggregated), - - BooleanValue: proto.Bool(p.Value), - } -} - -func decodeBooleanPoint(pb *internal.Point) *BooleanPoint { - return &BooleanPoint{ - Name: pb.GetName(), - Tags: newTagsID(pb.GetTags()), - Time: pb.GetTime(), - Nil: pb.GetNil(), - Aux: decodeAux(pb.Aux), - Aggregated: pb.GetAggregated(), - Value: pb.GetBooleanValue(), - } -} - -// booleanPoints represents a slice of points sortable by value. -type booleanPoints []BooleanPoint - -func (a booleanPoints) Len() int { return len(a) } -func (a booleanPoints) Less(i, j int) bool { - if a[i].Time != a[j].Time { - return a[i].Time < a[j].Time - } - return !a[i].Value -} -func (a booleanPoints) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// booleanPointsByValue represents a slice of points sortable by value. -type booleanPointsByValue []BooleanPoint - -func (a booleanPointsByValue) Len() int { return len(a) } - -func (a booleanPointsByValue) Less(i, j int) bool { return !a[i].Value } - -func (a booleanPointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// booleanPointsByTime represents a slice of points sortable by value. -type booleanPointsByTime []BooleanPoint - -func (a booleanPointsByTime) Len() int { return len(a) } -func (a booleanPointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time } -func (a booleanPointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// booleanPointByFunc represents a slice of points sortable by a function. -type booleanPointsByFunc struct { - points []BooleanPoint - cmp func(a, b *BooleanPoint) bool -} - -func (a *booleanPointsByFunc) Len() int { return len(a.points) } -func (a *booleanPointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) } -func (a *booleanPointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] } - -func (a *booleanPointsByFunc) Push(x interface{}) { - a.points = append(a.points, x.(BooleanPoint)) -} - -func (a *booleanPointsByFunc) Pop() interface{} { - p := a.points[len(a.points)-1] - a.points = a.points[:len(a.points)-1] - return p -} - -func booleanPointsSortBy(points []BooleanPoint, cmp func(a, b *BooleanPoint) bool) *booleanPointsByFunc { - return &booleanPointsByFunc{ - points: points, - cmp: cmp, - } -} - -// BooleanPointEncoder encodes BooleanPoint points to a writer. -type BooleanPointEncoder struct { - w io.Writer -} - -// NewBooleanPointEncoder returns a new instance of BooleanPointEncoder that writes to w. -func NewBooleanPointEncoder(w io.Writer) *BooleanPointEncoder { - return &BooleanPointEncoder{w: w} -} - -// EncodeBooleanPoint marshals and writes p to the underlying writer. -func (enc *BooleanPointEncoder) EncodeBooleanPoint(p *BooleanPoint) error { - // Marshal to bytes. - buf, err := proto.Marshal(encodeBooleanPoint(p)) - if err != nil { - return err - } - - // Write the length. - if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { - return err - } - - // Write the encoded point. - if _, err := enc.w.Write(buf); err != nil { - return err - } - return nil -} - -// BooleanPointDecoder decodes BooleanPoint points from a reader. -type BooleanPointDecoder struct { - r io.Reader - stats IteratorStats - ctx context.Context -} - -// NewBooleanPointDecoder returns a new instance of BooleanPointDecoder that reads from r. -func NewBooleanPointDecoder(ctx context.Context, r io.Reader) *BooleanPointDecoder { - return &BooleanPointDecoder{r: r, ctx: ctx} -} - -// Stats returns iterator stats embedded within the stream. -func (dec *BooleanPointDecoder) Stats() IteratorStats { return dec.stats } - -// DecodeBooleanPoint reads from the underlying reader and unmarshals into p. -func (dec *BooleanPointDecoder) DecodeBooleanPoint(p *BooleanPoint) error { - for { - // Read length. - var sz uint32 - if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { - return err - } - - // Read point data. - buf := make([]byte, sz) - if _, err := io.ReadFull(dec.r, buf); err != nil { - return err - } - - // Unmarshal into point. - var pb internal.Point - if err := proto.Unmarshal(buf, &pb); err != nil { - return err - } - - // If the point contains stats then read stats and retry. - if pb.Stats != nil { - dec.stats = decodeIteratorStats(pb.Stats) - continue - } - - // Decode into point object. - *p = *decodeBooleanPoint(&pb) - - return nil - } -} diff --git a/influxql/query/point.gen.go.tmpl b/influxql/query/point.gen.go.tmpl deleted file mode 100644 index 352bd679b83..00000000000 --- a/influxql/query/point.gen.go.tmpl +++ /dev/null @@ -1,243 +0,0 @@ -package query - -//lint:file-ignore U1000 Ignore all unused code, it's generated - -import ( - "context" - "encoding/binary" - "io" - - internal "github.com/influxdata/influxdb/v2/influxql/query/internal" - "google.golang.org/protobuf/proto" -) - -{{range .}} - -// {{.Name}}Point represents a point with a {{.Type}} value. -// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. -// See TestPoint_Fields in influxql/point_test.go for more details. -type {{.Name}}Point struct { - Name string - Tags Tags - - Time int64 - Value {{.Type}} - Aux []interface{} - - // Total number of points that were combined into this point from an aggregate. - // If this is zero, the point is not the result of an aggregate function. - Aggregated uint32 - Nil bool -} - -func (v *{{.Name}}Point) name() string { return v.Name } -func (v *{{.Name}}Point) tags() Tags { return v.Tags } -func (v *{{.Name}}Point) time() int64 { return v.Time } -func (v *{{.Name}}Point) nil() bool { return v.Nil } -func (v *{{.Name}}Point) value() interface{} { - if v.Nil { - return nil - } - return v.Value -} -func (v *{{.Name}}Point) aux() []interface{} { return v.Aux } - -// Clone returns a copy of v. -func (v *{{.Name}}Point) Clone() *{{.Name}}Point { - if v == nil { - return nil - } - - other := *v - if v.Aux != nil { - other.Aux = make([]interface{}, len(v.Aux)) - copy(other.Aux, v.Aux) - } - - return &other -} - -// CopyTo makes a deep copy into the point. -func (v *{{.Name}}Point) CopyTo(other *{{.Name}}Point) { - other.Name, other.Tags = v.Name, v.Tags - other.Time = v.Time - other.Value, other.Nil = v.Value, v.Nil - if v.Aux != nil { - if len(other.Aux) != len(v.Aux) { - other.Aux = make([]interface{}, len(v.Aux)) - } - copy(other.Aux, v.Aux) - } -} - -func encode{{.Name}}Point(p *{{.Name}}Point) *internal.Point { - return &internal.Point{ - Name: proto.String(p.Name), - Tags: proto.String(p.Tags.ID()), - Time: proto.Int64(p.Time), - Nil: proto.Bool(p.Nil), - Aux: encodeAux(p.Aux), - Aggregated: proto.Uint32(p.Aggregated), - - {{if eq .Name "Float"}} - FloatValue: proto.Float64(p.Value), - {{else if eq .Name "Integer"}} - IntegerValue: proto.Int64(p.Value), - {{else if eq .Name "String"}} - StringValue: proto.String(p.Value), - {{else if eq .Name "Boolean"}} - BooleanValue: proto.Bool(p.Value), - {{end}} - } -} - -func decode{{.Name}}Point(pb *internal.Point) *{{.Name}}Point { - return &{{.Name}}Point{ - Name: pb.GetName(), - Tags: newTagsID(pb.GetTags()), - Time: pb.GetTime(), - Nil: pb.GetNil(), - Aux: decodeAux(pb.Aux), - Aggregated: pb.GetAggregated(), - Value: pb.Get{{.Name}}Value(), - } -} - -// {{.name}}Points represents a slice of points sortable by value. -type {{.name}}Points []{{.Name}}Point - -func (a {{.name}}Points) Len() int { return len(a) } -func (a {{.name}}Points) Less(i, j int) bool { - if a[i].Time != a[j].Time { - return a[i].Time < a[j].Time - } - return {{if ne .Name "Boolean"}}a[i].Value < a[j].Value{{else}}!a[i].Value{{end}} -} -func (a {{.name}}Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// {{.name}}PointsByValue represents a slice of points sortable by value. -type {{.name}}PointsByValue []{{.Name}}Point - -func (a {{.name}}PointsByValue) Len() int { return len(a) } -{{if eq .Name "Boolean"}} -func (a {{.name}}PointsByValue) Less(i, j int) bool { return !a[i].Value } -{{else}} -func (a {{.name}}PointsByValue) Less(i, j int) bool { return a[i].Value < a[j].Value } -{{end}} -func (a {{.name}}PointsByValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// {{.name}}PointsByTime represents a slice of points sortable by value. -type {{.name}}PointsByTime []{{.Name}}Point - -func (a {{.name}}PointsByTime) Len() int { return len(a) } -func (a {{.name}}PointsByTime) Less(i, j int) bool { return a[i].Time < a[j].Time } -func (a {{.name}}PointsByTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// {{.name}}PointByFunc represents a slice of points sortable by a function. -type {{.name}}PointsByFunc struct { - points []{{.Name}}Point - cmp func(a, b *{{.Name}}Point) bool -} - -func (a *{{.name}}PointsByFunc) Len() int { return len(a.points) } -func (a *{{.name}}PointsByFunc) Less(i, j int) bool { return a.cmp(&a.points[i], &a.points[j]) } -func (a *{{.name}}PointsByFunc) Swap(i, j int) { a.points[i], a.points[j] = a.points[j], a.points[i] } - -func (a *{{.name}}PointsByFunc) Push(x interface{}) { - a.points = append(a.points, x.({{.Name}}Point)) -} - -func (a *{{.name}}PointsByFunc) Pop() interface{} { - p := a.points[len(a.points)-1] - a.points = a.points[:len(a.points)-1] - return p -} - -func {{.name}}PointsSortBy(points []{{.Name}}Point, cmp func(a, b *{{.Name}}Point) bool) *{{.name}}PointsByFunc { - return &{{.name}}PointsByFunc{ - points: points, - cmp: cmp, - } -} - -// {{.Name}}PointEncoder encodes {{.Name}}Point points to a writer. -type {{.Name}}PointEncoder struct { - w io.Writer -} - -// New{{.Name}}PointEncoder returns a new instance of {{.Name}}PointEncoder that writes to w. -func New{{.Name}}PointEncoder(w io.Writer) *{{.Name}}PointEncoder { - return &{{.Name}}PointEncoder{w: w} -} - -// Encode{{.Name}}Point marshals and writes p to the underlying writer. -func (enc *{{.Name}}PointEncoder) Encode{{.Name}}Point(p *{{.Name}}Point) error { - // Marshal to bytes. - buf, err := proto.Marshal(encode{{.Name}}Point(p)) - if err != nil { - return err - } - - // Write the length. - if err := binary.Write(enc.w, binary.BigEndian, uint32(len(buf))); err != nil { - return err - } - - // Write the encoded point. - if _, err := enc.w.Write(buf); err != nil { - return err - } - return nil -} - - -// {{.Name}}PointDecoder decodes {{.Name}}Point points from a reader. -type {{.Name}}PointDecoder struct { - r io.Reader - stats IteratorStats - ctx context.Context -} - -// New{{.Name}}PointDecoder returns a new instance of {{.Name}}PointDecoder that reads from r. -func New{{.Name}}PointDecoder(ctx context.Context, r io.Reader) *{{.Name}}PointDecoder { - return &{{.Name}}PointDecoder{r: r, ctx: ctx} -} - -// Stats returns iterator stats embedded within the stream. -func (dec *{{.Name}}PointDecoder) Stats() IteratorStats { return dec.stats } - -// Decode{{.Name}}Point reads from the underlying reader and unmarshals into p. -func (dec *{{.Name}}PointDecoder) Decode{{.Name}}Point(p *{{.Name}}Point) error { - for { - // Read length. - var sz uint32 - if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { - return err - } - - // Read point data. - buf := make([]byte, sz) - if _, err := io.ReadFull(dec.r, buf); err != nil { - return err - } - - // Unmarshal into point. - var pb internal.Point - if err := proto.Unmarshal(buf, &pb); err != nil { - return err - } - - // If the point contains stats then read stats and retry. - if pb.Stats != nil { - dec.stats = decodeIteratorStats(pb.Stats) - continue - } - - // Decode into point object. - *p = *decode{{.Name}}Point(&pb) - - return nil - } -} - -{{end}} diff --git a/influxql/query/point.go b/influxql/query/point.go deleted file mode 100644 index fdd03b8cd36..00000000000 --- a/influxql/query/point.go +++ /dev/null @@ -1,382 +0,0 @@ -package query - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "math" - "sort" - - internal "github.com/influxdata/influxdb/v2/influxql/query/internal" - "github.com/influxdata/influxql" - "google.golang.org/protobuf/proto" -) - -// ZeroTime is the Unix nanosecond timestamp for no time. -// This time is not used by the query engine or the storage engine as a valid time. -const ZeroTime = int64(math.MinInt64) - -// Point represents a value in a series that occurred at a given time. -type Point interface { - // Name and tags uniquely identify the series the value belongs to. - name() string - tags() Tags - - // The time that the value occurred at. - time() int64 - - // The value at the given time. - value() interface{} - - // Auxiliary values passed along with the value. - aux() []interface{} -} - -// Points represents a list of points. -type Points []Point - -// Clone returns a deep copy of a. -func (a Points) Clone() []Point { - other := make([]Point, len(a)) - for i, p := range a { - if p == nil { - other[i] = nil - continue - } - - switch p := p.(type) { - case *FloatPoint: - other[i] = p.Clone() - case *IntegerPoint: - other[i] = p.Clone() - case *UnsignedPoint: - other[i] = p.Clone() - case *StringPoint: - other[i] = p.Clone() - case *BooleanPoint: - other[i] = p.Clone() - default: - panic(fmt.Sprintf("unable to clone point: %T", p)) - } - } - return other -} - -// Tags represent a map of keys and values. -// It memorizes its key so it can be used efficiently during query execution. -type Tags struct { - id string - m map[string]string -} - -// NewTags returns a new instance of Tags. -func NewTags(m map[string]string) Tags { - if len(m) == 0 { - return Tags{} - } - return Tags{ - id: string(encodeTags(m)), - m: m, - } -} - -// newTagsID returns a new instance of Tags by parsing the given tag ID. -func newTagsID(id string) Tags { - m := decodeTags([]byte(id)) - if len(m) == 0 { - return Tags{} - } - return Tags{id: id, m: m} -} - -// Equal compares if the Tags are equal to each other. -func (t Tags) Equal(other Tags) bool { - return t.ID() == other.ID() -} - -// ID returns the string identifier for the tags. -func (t Tags) ID() string { return t.id } - -// KeyValues returns the underlying map for the tags. -func (t Tags) KeyValues() map[string]string { return t.m } - -// Keys returns a sorted list of all keys on the tag. -func (t *Tags) Keys() []string { - if t == nil { - return nil - } - - var a []string - for k := range t.m { - a = append(a, k) - } - sort.Strings(a) - return a -} - -// Values returns a sorted list of all values on the tag. -func (t *Tags) Values() []string { - if t == nil { - return nil - } - - a := make([]string, 0, len(t.m)) - for _, v := range t.m { - a = append(a, v) - } - sort.Strings(a) - return a -} - -// Value returns the value for a given key. -func (t *Tags) Value(k string) string { - if t == nil { - return "" - } - return t.m[k] -} - -// Subset returns a new tags object with a subset of the keys. -func (t *Tags) Subset(keys []string) Tags { - if len(keys) == 0 { - return Tags{} - } - - // If keys match existing keys, simply return this tagset. - if keysMatch(t.m, keys) { - return *t - } - - // Otherwise create new tag set. - m := make(map[string]string, len(keys)) - for _, k := range keys { - m[k] = t.m[k] - } - return NewTags(m) -} - -// Equals returns true if t equals other. -func (t *Tags) Equals(other *Tags) bool { - if t == nil && other == nil { - return true - } else if t == nil || other == nil { - return false - } - return t.id == other.id -} - -// keysMatch returns true if m has exactly the same keys as listed in keys. -func keysMatch(m map[string]string, keys []string) bool { - if len(keys) != len(m) { - return false - } - - for _, k := range keys { - if _, ok := m[k]; !ok { - return false - } - } - - return true -} - -// encodeTags converts a map of strings to an identifier. -func encodeTags(m map[string]string) []byte { - // Empty maps marshal to empty bytes. - if len(m) == 0 { - return nil - } - - // Extract keys and determine final size. - sz := (len(m) * 2) - 1 // separators - keys := make([]string, 0, len(m)) - for k, v := range m { - keys = append(keys, k) - sz += len(k) + len(v) - } - sort.Strings(keys) - - // Generate marshaled bytes. - b := make([]byte, sz) - buf := b - for _, k := range keys { - copy(buf, k) - buf[len(k)] = '\x00' - buf = buf[len(k)+1:] - } - for i, k := range keys { - v := m[k] - copy(buf, v) - if i < len(keys)-1 { - buf[len(v)] = '\x00' - buf = buf[len(v)+1:] - } - } - return b -} - -// decodeTags parses an identifier into a map of tags. -func decodeTags(id []byte) map[string]string { - a := bytes.Split(id, []byte{'\x00'}) - - // There must be an even number of segments. - if len(a) > 0 && len(a)%2 == 1 { - a = a[:len(a)-1] - } - - // Return nil if there are no segments. - if len(a) == 0 { - return nil - } - mid := len(a) / 2 - - // Decode key/value tags. - m := make(map[string]string) - for i := 0; i < mid; i++ { - m[string(a[i])] = string(a[i+mid]) - } - return m -} - -func encodeAux(aux []interface{}) []*internal.Aux { - pb := make([]*internal.Aux, len(aux)) - for i := range aux { - switch v := aux[i].(type) { - case float64: - pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.Float)), FloatValue: proto.Float64(v)} - case *float64: - pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.Float))} - case int64: - pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.Integer)), IntegerValue: proto.Int64(v)} - case *int64: - pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.Integer))} - case uint64: - pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.Unsigned)), UnsignedValue: proto.Uint64(v)} - case *uint64: - pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.Unsigned))} - case string: - pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.String)), StringValue: proto.String(v)} - case *string: - pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.String))} - case bool: - pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.Boolean)), BooleanValue: proto.Bool(v)} - case *bool: - pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.Boolean))} - default: - pb[i] = &internal.Aux{DataType: proto.Int32(int32(influxql.Unknown))} - } - } - return pb -} - -func decodeAux(pb []*internal.Aux) []interface{} { - if len(pb) == 0 { - return nil - } - - aux := make([]interface{}, len(pb)) - for i := range pb { - switch influxql.DataType(pb[i].GetDataType()) { - case influxql.Float: - if pb[i].FloatValue != nil { - aux[i] = *pb[i].FloatValue - } else { - aux[i] = (*float64)(nil) - } - case influxql.Integer: - if pb[i].IntegerValue != nil { - aux[i] = *pb[i].IntegerValue - } else { - aux[i] = (*int64)(nil) - } - case influxql.Unsigned: - if pb[i].UnsignedValue != nil { - aux[i] = *pb[i].UnsignedValue - } else { - aux[i] = (*uint64)(nil) - } - case influxql.String: - if pb[i].StringValue != nil { - aux[i] = *pb[i].StringValue - } else { - aux[i] = (*string)(nil) - } - case influxql.Boolean: - if pb[i].BooleanValue != nil { - aux[i] = *pb[i].BooleanValue - } else { - aux[i] = (*bool)(nil) - } - default: - aux[i] = nil - } - } - return aux -} - -func cloneAux(src []interface{}) []interface{} { - if src == nil { - return src - } - dest := make([]interface{}, len(src)) - copy(dest, src) - return dest -} - -// PointDecoder decodes generic points from a reader. -type PointDecoder struct { - r io.Reader - stats IteratorStats -} - -// NewPointDecoder returns a new instance of PointDecoder that reads from r. -func NewPointDecoder(r io.Reader) *PointDecoder { - return &PointDecoder{r: r} -} - -// Stats returns iterator stats embedded within the stream. -func (dec *PointDecoder) Stats() IteratorStats { return dec.stats } - -// DecodePoint reads from the underlying reader and unmarshals into p. -func (dec *PointDecoder) DecodePoint(p *Point) error { - for { - // Read length. - var sz uint32 - if err := binary.Read(dec.r, binary.BigEndian, &sz); err != nil { - return err - } - - // Read point data. - buf := make([]byte, sz) - if _, err := io.ReadFull(dec.r, buf); err != nil { - return err - } - - // Unmarshal into point. - var pb internal.Point - if err := proto.Unmarshal(buf, &pb); err != nil { - return err - } - - // If the point contains stats then read stats and retry. - if pb.Stats != nil { - dec.stats = decodeIteratorStats(pb.Stats) - continue - } - - if pb.IntegerValue != nil { - *p = decodeIntegerPoint(&pb) - } else if pb.UnsignedValue != nil { - *p = decodeUnsignedPoint(&pb) - } else if pb.StringValue != nil { - *p = decodeStringPoint(&pb) - } else if pb.BooleanValue != nil { - *p = decodeBooleanPoint(&pb) - } else { - *p = decodeFloatPoint(&pb) - } - - return nil - } -} diff --git a/influxql/query/point_test.go b/influxql/query/point_test.go deleted file mode 100644 index 564cbb2b35e..00000000000 --- a/influxql/query/point_test.go +++ /dev/null @@ -1,187 +0,0 @@ -package query_test - -import ( - "reflect" - "strings" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/pkg/deep" -) - -func TestPoint_Clone_Float(t *testing.T) { - p := &query.FloatPoint{ - Name: "cpu", - Tags: ParseTags("host=server01"), - Time: 5, - Value: 2, - Aux: []interface{}{float64(45)}, - } - c := p.Clone() - if p == c { - t.Errorf("clone has the same address as the original: %v == %v", p, c) - } - if !deep.Equal(p, c) { - t.Errorf("mismatched point: %s", spew.Sdump(c)) - } - if &p.Aux[0] == &c.Aux[0] { - t.Errorf("aux values share the same address: %v == %v", p.Aux, c.Aux) - } else if !deep.Equal(p.Aux, c.Aux) { - t.Errorf("mismatched aux fields: %v != %v", p.Aux, c.Aux) - } -} - -func TestPoint_Clone_Integer(t *testing.T) { - p := &query.IntegerPoint{ - Name: "cpu", - Tags: ParseTags("host=server01"), - Time: 5, - Value: 2, - Aux: []interface{}{float64(45)}, - } - c := p.Clone() - if p == c { - t.Errorf("clone has the same address as the original: %v == %v", p, c) - } - if !deep.Equal(p, c) { - t.Errorf("mismatched point: %s", spew.Sdump(c)) - } - if &p.Aux[0] == &c.Aux[0] { - t.Errorf("aux values share the same address: %v == %v", p.Aux, c.Aux) - } else if !deep.Equal(p.Aux, c.Aux) { - t.Errorf("mismatched aux fields: %v != %v", p.Aux, c.Aux) - } -} - -func TestPoint_Clone_String(t *testing.T) { - p := &query.StringPoint{ - Name: "cpu", - Tags: ParseTags("host=server01"), - Time: 5, - Value: "clone", - Aux: []interface{}{float64(45)}, - } - c := p.Clone() - if p == c { - t.Errorf("clone has the same address as the original: %v == %v", p, c) - } - if !deep.Equal(p, c) { - t.Errorf("mismatched point: %s", spew.Sdump(c)) - } - if &p.Aux[0] == &c.Aux[0] { - t.Errorf("aux values share the same address: %v == %v", p.Aux, c.Aux) - } else if !deep.Equal(p.Aux, c.Aux) { - t.Errorf("mismatched aux fields: %v != %v", p.Aux, c.Aux) - } -} - -func TestPoint_Clone_Boolean(t *testing.T) { - p := &query.BooleanPoint{ - Name: "cpu", - Tags: ParseTags("host=server01"), - Time: 5, - Value: true, - Aux: []interface{}{float64(45)}, - } - c := p.Clone() - if p == c { - t.Errorf("clone has the same address as the original: %v == %v", p, c) - } - if !deep.Equal(p, c) { - t.Errorf("mismatched point: %s", spew.Sdump(c)) - } - if &p.Aux[0] == &c.Aux[0] { - t.Errorf("aux values share the same address: %v == %v", p.Aux, c.Aux) - } else if !deep.Equal(p.Aux, c.Aux) { - t.Errorf("mismatched aux fields: %v != %v", p.Aux, c.Aux) - } -} - -func TestPoint_Clone_Nil(t *testing.T) { - var fp *query.FloatPoint - if p := fp.Clone(); p != nil { - t.Errorf("expected nil, got %v", p) - } - - var ip *query.IntegerPoint - if p := ip.Clone(); p != nil { - t.Errorf("expected nil, got %v", p) - } - - var sp *query.StringPoint - if p := sp.Clone(); p != nil { - t.Errorf("expected nil, got %v", p) - } - - var bp *query.BooleanPoint - if p := bp.Clone(); p != nil { - t.Errorf("expected nil, got %v", p) - } -} - -// TestPoint_Fields ensures that no additional fields are added to the point structs. -// This struct is very sensitive and can effect performance unless handled carefully. -// To avoid the struct becoming a dumping ground for every function that needs to store -// miscellaneous information, this test is meant to ensure that new fields don't slip -// into the struct. -func TestPoint_Fields(t *testing.T) { - allowedFields := map[string]bool{ - "Name": true, - "Tags": true, - "Time": true, - "Nil": true, - "Value": true, - "Aux": true, - "Aggregated": true, - } - - for _, typ := range []reflect.Type{ - reflect.TypeOf(query.FloatPoint{}), - reflect.TypeOf(query.IntegerPoint{}), - reflect.TypeOf(query.StringPoint{}), - reflect.TypeOf(query.BooleanPoint{}), - } { - f, ok := typ.FieldByNameFunc(func(name string) bool { - return !allowedFields[name] - }) - if ok { - t.Errorf("found an unallowed field in %s: %s %s", typ, f.Name, f.Type) - } - } -} - -// Ensure that tags can return a unique id. -func TestTags_ID(t *testing.T) { - tags := query.NewTags(map[string]string{"foo": "bar", "baz": "bat"}) - if id := tags.ID(); id != "baz\x00foo\x00bat\x00bar" { - t.Fatalf("unexpected id: %q", id) - } -} - -// Ensure that a subset can be created from a tag set. -func TestTags_Subset(t *testing.T) { - tags := query.NewTags(map[string]string{"a": "0", "b": "1", "c": "2"}) - subset := tags.Subset([]string{"b", "c", "d"}) - if keys := subset.Keys(); !reflect.DeepEqual(keys, []string{"b", "c", "d"}) { - t.Fatalf("unexpected keys: %+v", keys) - } else if v := subset.Value("a"); v != "" { - t.Fatalf("unexpected 'a' value: %s", v) - } else if v := subset.Value("b"); v != "1" { - t.Fatalf("unexpected 'b' value: %s", v) - } else if v := subset.Value("c"); v != "2" { - t.Fatalf("unexpected 'c' value: %s", v) - } else if v := subset.Value("d"); v != "" { - t.Fatalf("unexpected 'd' value: %s", v) - } -} - -// ParseTags returns an instance of Tags for a comma-delimited list of key/values. -func ParseTags(s string) query.Tags { - m := make(map[string]string) - for _, kv := range strings.Split(s, ",") { - a := strings.Split(kv, "=") - m[a[0]] = a[1] - } - return query.NewTags(m) -} diff --git a/influxql/query/proxy_executor.go b/influxql/query/proxy_executor.go deleted file mode 100644 index 1d41e4be751..00000000000 --- a/influxql/query/proxy_executor.go +++ /dev/null @@ -1,168 +0,0 @@ -package query - -import ( - "context" - "io" - "strings" - "time" - - iql "github.com/influxdata/influxdb/v2/influxql" - "github.com/influxdata/influxdb/v2/kit/check" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - influxlogger "github.com/influxdata/influxdb/v2/logger" - "github.com/influxdata/influxql" - "github.com/opentracing/opentracing-go/log" - "go.uber.org/zap" -) - -type ProxyExecutor struct { - log *zap.Logger - executor *Executor -} - -func NewProxyExecutor(log *zap.Logger, executor *Executor) *ProxyExecutor { - return &ProxyExecutor{log: log, executor: executor} -} - -func (s *ProxyExecutor) Check(ctx context.Context) check.Response { - return check.Response{Name: "Query Service", Status: check.StatusPass} -} - -func (s *ProxyExecutor) Query(ctx context.Context, w io.Writer, req *iql.QueryRequest) (iql.Statistics, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - logger := s.log.With(influxlogger.TraceFields(ctx)...) - logger.Info("executing new query", zap.String("query", req.Query)) - - p := influxql.NewParser(strings.NewReader(req.Query)) - p.SetParams(req.Params) - q, err := p.ParseQuery() - if err != nil { - return iql.Statistics{}, &errors.Error{ - Code: errors.EInvalid, - Msg: "failed to parse query", - Err: err, - } - } - - span.LogFields(log.String("query", q.String())) - - opts := ExecutionOptions{ - OrgID: req.OrganizationID, - Database: req.DB, - RetentionPolicy: req.RP, - ChunkSize: req.ChunkSize, - ReadOnly: true, - Authorizer: OpenAuthorizer, - } - - epoch := req.Epoch - rw := NewResponseWriter(req.EncodingFormat) - - results, stats := s.executor.ExecuteQuery(ctx, q, opts) - if req.Chunked { - for r := range results { - // Ignore nil results. - if r == nil { - continue - } - - // if requested, convert result timestamps to epoch - if epoch != "" { - convertToEpoch(r, epoch) - } - - err = rw.WriteResponse(ctx, w, Response{Results: []*Result{r}}) - if err != nil { - break - } - } - } else { - resp := Response{Results: GatherResults(results, epoch)} - err = rw.WriteResponse(ctx, w, resp) - } - - return *stats, err -} - -// GatherResults consumes the results from the given channel and organizes them correctly. -// Results for various statements need to be combined together. -func GatherResults(ch <-chan *Result, epoch string) []*Result { - var results []*Result - for r := range ch { - // Ignore nil results. - if r == nil { - continue - } - - // if requested, convert result timestamps to epoch - if epoch != "" { - convertToEpoch(r, epoch) - } - - // It's not chunked so buffer results in memory. - // Results for statements need to be combined together. - // We need to check if this new result is for the same statement as - // the last result, or for the next statement. - if l := len(results); l > 0 && results[l-1].StatementID == r.StatementID { - if r.Err != nil { - results[l-1] = r - continue - } - - cr := results[l-1] - rowsMerged := 0 - if len(cr.Series) > 0 { - lastSeries := cr.Series[len(cr.Series)-1] - - for _, row := range r.Series { - if !lastSeries.SameSeries(row) { - // Next row is for a different series than last. - break - } - // Values are for the same series, so append them. - lastSeries.Values = append(lastSeries.Values, row.Values...) - lastSeries.Partial = row.Partial - rowsMerged++ - } - } - - // Append remaining rows as new rows. - r.Series = r.Series[rowsMerged:] - cr.Series = append(cr.Series, r.Series...) - cr.Messages = append(cr.Messages, r.Messages...) - cr.Partial = r.Partial - } else { - results = append(results, r) - } - } - return results -} - -// convertToEpoch converts result timestamps from time.Time to the specified epoch. -func convertToEpoch(r *Result, epoch string) { - divisor := int64(1) - - switch epoch { - case "u": - divisor = int64(time.Microsecond) - case "ms": - divisor = int64(time.Millisecond) - case "s": - divisor = int64(time.Second) - case "m": - divisor = int64(time.Minute) - case "h": - divisor = int64(time.Hour) - } - - for _, s := range r.Series { - for _, v := range s.Values { - if ts, ok := v[0].(time.Time); ok { - v[0] = ts.UnixNano() / divisor - } - } - } -} diff --git a/influxql/query/query.go b/influxql/query/query.go deleted file mode 100644 index 2c1fd2d6beb..00000000000 --- a/influxql/query/query.go +++ /dev/null @@ -1,7 +0,0 @@ -package query // import "github.com/influxdata/influxdb/v2/influxql/query" - -//go:generate tmpl -data=@tmpldata iterator.gen.go.tmpl -//go:generate tmpl -data=@tmpldata point.gen.go.tmpl -//go:generate tmpl -data=@tmpldata functions.gen.go.tmpl - -//go:generate protoc --go_out=internal/ internal/internal.proto diff --git a/influxql/query/response.go b/influxql/query/response.go deleted file mode 100644 index 80ffa785c22..00000000000 --- a/influxql/query/response.go +++ /dev/null @@ -1,61 +0,0 @@ -package query - -import ( - "encoding/json" - "errors" -) - -// Response represents a list of statement results. -type Response struct { - Results []*Result - Err error -} - -// MarshalJSON encodes a Response struct into JSON. -func (r Response) MarshalJSON() ([]byte, error) { - // Define a struct that outputs "error" as a string. - var o struct { - Results []*Result `json:"results,omitempty"` - Err string `json:"error,omitempty"` - } - - // Copy fields to output struct. - o.Results = r.Results - if r.Err != nil { - o.Err = r.Err.Error() - } - - return json.Marshal(&o) -} - -// UnmarshalJSON decodes the data into the Response struct. -func (r *Response) UnmarshalJSON(b []byte) error { - var o struct { - Results []*Result `json:"results,omitempty"` - Err string `json:"error,omitempty"` - } - - err := json.Unmarshal(b, &o) - if err != nil { - return err - } - r.Results = o.Results - if o.Err != "" { - r.Err = errors.New(o.Err) - } - return nil -} - -// Error returns the first error from any statement. -// Returns nil if no errors occurred on any statements. -func (r *Response) Error() error { - if r.Err != nil { - return r.Err - } - for _, rr := range r.Results { - if rr.Err != nil { - return rr.Err - } - } - return nil -} diff --git a/influxql/query/response_writer.go b/influxql/query/response_writer.go deleted file mode 100644 index e32e9986482..00000000000 --- a/influxql/query/response_writer.go +++ /dev/null @@ -1,292 +0,0 @@ -package query - -//lint:file-ignore SA1019 Ignore for now - -import ( - "context" - "encoding/csv" - "encoding/json" - "io" - "strconv" - "time" - - "github.com/influxdata/influxdb/v2/influxql" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/models" - "github.com/tinylib/msgp/msgp" -) - -// ResponseWriter is an interface for writing a response. -type ResponseWriter interface { - // WriteResponse writes a response. - WriteResponse(ctx context.Context, w io.Writer, resp Response) error -} - -// NewResponseWriter creates a new ResponseWriter based on the Accept header -// in the request that wraps the ResponseWriter. -func NewResponseWriter(encoding influxql.EncodingFormat) ResponseWriter { - switch encoding { - case influxql.EncodingFormatAppCSV, influxql.EncodingFormatTextCSV: - return &csvFormatter{statementID: -1} - case influxql.EncodingFormatMessagePack: - return &msgpFormatter{} - case influxql.EncodingFormatJSON: - fallthrough - default: - // TODO(sgc): Add EncodingFormatJSONPretty - return &jsonFormatter{Pretty: false} - } -} - -type jsonFormatter struct { - Pretty bool -} - -func (f *jsonFormatter) WriteResponse(ctx context.Context, w io.Writer, resp Response) (err error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var b []byte - if f.Pretty { - b, err = json.MarshalIndent(resp, "", " ") - } else { - b, err = json.Marshal(resp) - } - - if err != nil { - _, err = io.WriteString(w, err.Error()) - } else { - _, err = w.Write(b) - } - - w.Write([]byte("\n")) - return err -} - -type csvFormatter struct { - statementID int - columns []string -} - -func (f *csvFormatter) WriteResponse(ctx context.Context, w io.Writer, resp Response) (err error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - wr := csv.NewWriter(w) - if resp.Err != nil { - wr.Write([]string{"error"}) - wr.Write([]string{resp.Err.Error()}) - wr.Flush() - return wr.Error() - } - - for _, result := range resp.Results { - if result.StatementID != f.statementID { - // If there are no series in the result, skip past this result. - if len(result.Series) == 0 { - continue - } - - // Set the statement id and print out a newline if this is not the first statement. - if f.statementID >= 0 { - // Flush the csv writer and write a newline. - wr.Flush() - if err := wr.Error(); err != nil { - return err - } - - if _, err := io.WriteString(w, "\n"); err != nil { - return err - } - } - f.statementID = result.StatementID - - // Print out the column headers from the first series. - f.columns = make([]string, 2+len(result.Series[0].Columns)) - f.columns[0] = "name" - f.columns[1] = "tags" - copy(f.columns[2:], result.Series[0].Columns) - if err := wr.Write(f.columns); err != nil { - return err - } - } - - for i, row := range result.Series { - if i > 0 && !stringsEqual(result.Series[i-1].Columns, row.Columns) { - // The columns have changed. Print a newline and reprint the header. - wr.Flush() - if err := wr.Error(); err != nil { - return err - } - - if _, err := io.WriteString(w, "\n"); err != nil { - return err - } - - f.columns = make([]string, 2+len(row.Columns)) - f.columns[0] = "name" - f.columns[1] = "tags" - copy(f.columns[2:], row.Columns) - if err := wr.Write(f.columns); err != nil { - return err - } - } - - f.columns[0] = row.Name - f.columns[1] = "" - if len(row.Tags) > 0 { - hashKey := models.NewTags(row.Tags).HashKey() - if len(hashKey) > 0 { - f.columns[1] = string(hashKey[1:]) - } - } - for _, values := range row.Values { - for i, value := range values { - if value == nil { - f.columns[i+2] = "" - continue - } - - switch v := value.(type) { - case float64: - f.columns[i+2] = strconv.FormatFloat(v, 'f', -1, 64) - case int64: - f.columns[i+2] = strconv.FormatInt(v, 10) - case uint64: - f.columns[i+2] = strconv.FormatUint(v, 10) - case string: - f.columns[i+2] = v - case bool: - if v { - f.columns[i+2] = "true" - } else { - f.columns[i+2] = "false" - } - case time.Time: - f.columns[i+2] = strconv.FormatInt(v.UnixNano(), 10) - case *float64, *int64, *string, *bool: - f.columns[i+2] = "" - } - } - wr.Write(f.columns) - } - } - } - wr.Flush() - return wr.Error() -} - -type msgpFormatter struct{} - -func (f *msgpFormatter) ContentType() string { - return "application/x-msgpack" -} - -func (f *msgpFormatter) WriteResponse(ctx context.Context, w io.Writer, resp Response) (err error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - enc := msgp.NewWriter(w) - defer enc.Flush() - - enc.WriteMapHeader(1) - if resp.Err != nil { - enc.WriteString("error") - enc.WriteString(resp.Err.Error()) - return nil - } else { - enc.WriteString("results") - enc.WriteArrayHeader(uint32(len(resp.Results))) - for _, result := range resp.Results { - if result.Err != nil { - enc.WriteMapHeader(1) - enc.WriteString("error") - enc.WriteString(result.Err.Error()) - continue - } - - sz := 2 - if len(result.Messages) > 0 { - sz++ - } - if result.Partial { - sz++ - } - enc.WriteMapHeader(uint32(sz)) - enc.WriteString("statement_id") - enc.WriteInt(result.StatementID) - if len(result.Messages) > 0 { - enc.WriteString("messages") - enc.WriteArrayHeader(uint32(len(result.Messages))) - for _, msg := range result.Messages { - enc.WriteMapHeader(2) - enc.WriteString("level") - enc.WriteString(msg.Level) - enc.WriteString("text") - enc.WriteString(msg.Text) - } - } - enc.WriteString("series") - enc.WriteArrayHeader(uint32(len(result.Series))) - for _, series := range result.Series { - sz := 2 - if series.Name != "" { - sz++ - } - if len(series.Tags) > 0 { - sz++ - } - if series.Partial { - sz++ - } - enc.WriteMapHeader(uint32(sz)) - if series.Name != "" { - enc.WriteString("name") - enc.WriteString(series.Name) - } - if len(series.Tags) > 0 { - enc.WriteString("tags") - enc.WriteMapHeader(uint32(len(series.Tags))) - for k, v := range series.Tags { - enc.WriteString(k) - enc.WriteString(v) - } - } - enc.WriteString("columns") - enc.WriteArrayHeader(uint32(len(series.Columns))) - for _, col := range series.Columns { - enc.WriteString(col) - } - enc.WriteString("values") - enc.WriteArrayHeader(uint32(len(series.Values))) - for _, values := range series.Values { - enc.WriteArrayHeader(uint32(len(values))) - for _, v := range values { - enc.WriteIntf(v) - } - } - if series.Partial { - enc.WriteString("partial") - enc.WriteBool(series.Partial) - } - } - if result.Partial { - enc.WriteString("partial") - enc.WriteBool(true) - } - } - } - return nil -} - -func stringsEqual(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { - return false - } - } - return true -} diff --git a/influxql/query/result.go b/influxql/query/result.go deleted file mode 100644 index b315064e219..00000000000 --- a/influxql/query/result.go +++ /dev/null @@ -1,140 +0,0 @@ -package query - -import ( - "encoding/json" - "errors" - "fmt" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxql" -) - -const ( - // WarningLevel is the message level for a warning. - WarningLevel = "warning" -) - -// TagSet is a fundamental concept within the query system. It represents a composite series, -// composed of multiple individual series that share a set of tag attributes. -type TagSet struct { - Filters []influxql.Expr - SeriesKeys []string - Key []byte -} - -// AddFilter adds a series-level filter to the Tagset. -func (t *TagSet) AddFilter(key string, filter influxql.Expr) { - t.SeriesKeys = append(t.SeriesKeys, key) - t.Filters = append(t.Filters, filter) -} - -func (t *TagSet) Len() int { return len(t.SeriesKeys) } -func (t *TagSet) Less(i, j int) bool { return t.SeriesKeys[i] < t.SeriesKeys[j] } -func (t *TagSet) Swap(i, j int) { - t.SeriesKeys[i], t.SeriesKeys[j] = t.SeriesKeys[j], t.SeriesKeys[i] - t.Filters[i], t.Filters[j] = t.Filters[j], t.Filters[i] -} - -// Reverse reverses the order of series keys and filters in the TagSet. -func (t *TagSet) Reverse() { - for i, j := 0, len(t.Filters)-1; i < j; i, j = i+1, j-1 { - t.Filters[i], t.Filters[j] = t.Filters[j], t.Filters[i] - t.SeriesKeys[i], t.SeriesKeys[j] = t.SeriesKeys[j], t.SeriesKeys[i] - } -} - -// LimitTagSets returns a tag set list with SLIMIT and SOFFSET applied. -func LimitTagSets(a []*TagSet, slimit, soffset int) []*TagSet { - // Ignore if no limit or offset is specified. - if slimit == 0 && soffset == 0 { - return a - } - - // If offset is beyond the number of tag sets then return nil. - if soffset > len(a) { - return nil - } - - // Clamp limit to the max number of tag sets. - if soffset+slimit > len(a) { - slimit = len(a) - soffset - } - return a[soffset : soffset+slimit] -} - -// Message represents a user-facing message to be included with the result. -type Message struct { - Level string `json:"level"` - Text string `json:"text"` -} - -// ReadOnlyWarning generates a warning message that tells the user the command -// they are using is being used for writing in a read only context. -// -// This is a temporary method while to be used while transitioning to read only -// operations for issue #6290. -func ReadOnlyWarning(stmt string) *Message { - return &Message{ - Level: WarningLevel, - Text: fmt.Sprintf("deprecated use of '%s' in a read only context, please use a POST request instead", stmt), - } -} - -// Result represents a resultset returned from a single statement. -// Rows represents a list of rows that can be sorted consistently by name/tag. -type Result struct { - // StatementID is just the statement's position in the query. It's used - // to combine statement results if they're being buffered in memory. - StatementID int - Series models.Rows - Messages []*Message - Partial bool - Err error -} - -// MarshalJSON encodes the result into JSON. -func (r *Result) MarshalJSON() ([]byte, error) { - // Define a struct that outputs "error" as a string. - var o struct { - StatementID int `json:"statement_id"` - Series []*models.Row `json:"series,omitempty"` - Messages []*Message `json:"messages,omitempty"` - Partial bool `json:"partial,omitempty"` - Err string `json:"error,omitempty"` - } - - // Copy fields to output struct. - o.StatementID = r.StatementID - o.Series = r.Series - o.Messages = r.Messages - o.Partial = r.Partial - if r.Err != nil { - o.Err = r.Err.Error() - } - - return json.Marshal(&o) -} - -// UnmarshalJSON decodes the data into the Result struct -func (r *Result) UnmarshalJSON(b []byte) error { - var o struct { - StatementID int `json:"statement_id"` - Series []*models.Row `json:"series,omitempty"` - Messages []*Message `json:"messages,omitempty"` - Partial bool `json:"partial,omitempty"` - Err string `json:"error,omitempty"` - } - - err := json.Unmarshal(b, &o) - if err != nil { - return err - } - r.StatementID = o.StatementID - r.Series = o.Series - r.Messages = o.Messages - r.Partial = o.Partial - if o.Err != "" { - r.Err = errors.New(o.Err) - } - return nil -} diff --git a/influxql/query/select.go b/influxql/query/select.go deleted file mode 100644 index 9543961d48c..00000000000 --- a/influxql/query/select.go +++ /dev/null @@ -1,985 +0,0 @@ -package query - -import ( - "context" - "fmt" - "io" - "sort" - "sync" - "time" - - iql "github.com/influxdata/influxdb/v2/influxql" - "github.com/influxdata/influxdb/v2/influxql/query/internal/gota" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxql" - "golang.org/x/sync/errgroup" -) - -var DefaultTypeMapper = influxql.MultiTypeMapper( - FunctionTypeMapper{}, - MathTypeMapper{}, -) - -// SelectOptions are options that customize the select call. -type SelectOptions struct { - // OrgID is the organization for which this query is being executed. - OrgID platform.ID - - // Node to exclusively read from. - // If zero, all nodes are used. - NodeID uint64 - - // Maximum number of concurrent series. - MaxSeriesN int - - // Maximum number of points to read from the query. - // This requires the passed in context to have a Monitor that is - // created using WithMonitor. - MaxPointN int - - // Maximum number of buckets for a statement. - MaxBucketsN int - - // StatisticsGatherer gathers metrics about the execution of the query. - StatisticsGatherer *iql.StatisticsGatherer -} - -// ShardMapper retrieves and maps shards into an IteratorCreator that can later be -// used for executing queries. -type ShardMapper interface { - MapShards(ctx context.Context, sources influxql.Sources, t influxql.TimeRange, opt SelectOptions) (ShardGroup, error) -} - -// TypeMapper maps a data type to the measurement and field. -type TypeMapper interface { - MapType(ctx context.Context, m *influxql.Measurement, field string) influxql.DataType -} - -// FieldMapper returns the data type for the field inside of the measurement. -type FieldMapper interface { - TypeMapper - FieldDimensions(ctx context.Context, m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) -} - -// contextFieldMapper adapts a FieldMapper to an influxql.FieldMapper as -// FieldMapper requires a context.Context and orgID -type fieldMapperAdapter struct { - fm FieldMapper - ctx context.Context -} - -func newFieldMapperAdapter(fm FieldMapper, ctx context.Context) *fieldMapperAdapter { - return &fieldMapperAdapter{fm: fm, ctx: ctx} -} - -func (c *fieldMapperAdapter) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { - return c.fm.FieldDimensions(c.ctx, m) -} - -func (c *fieldMapperAdapter) MapType(measurement *influxql.Measurement, field string) influxql.DataType { - return c.fm.MapType(c.ctx, measurement, field) -} - -// ShardGroup represents a shard or a collection of shards that can be accessed -// for creating iterators. -// When creating iterators, the resource used for reading the iterators should be -// separate from the resource used to map the shards. When the ShardGroup is closed, -// it should not close any resources associated with the created Iterator. Those -// resources belong to the Iterator and will be closed when the Iterator itself is -// closed. -// The query engine operates under this assumption and will close the shard group -// after creating the iterators, but before the iterators are actually read. -type ShardGroup interface { - IteratorCreator - FieldMapper - io.Closer -} - -// Select is a prepared statement that is ready to be executed. -type PreparedStatement interface { - // Select creates the Iterators that will be used to read the query. - Select(ctx context.Context) (Cursor, error) - - // Explain outputs the explain plan for this statement. - Explain(ctx context.Context) (string, error) - - // Close closes the resources associated with this prepared statement. - // This must be called as the mapped shards may hold open resources such - // as network connections. - Close() error -} - -// Prepare will compile the statement with the default compile options and -// then prepare the query. -func Prepare(ctx context.Context, stmt *influxql.SelectStatement, shardMapper ShardMapper, opt SelectOptions) (PreparedStatement, error) { - c, err := Compile(stmt, CompileOptions{}) - if err != nil { - return nil, err - } - return c.Prepare(ctx, shardMapper, opt) -} - -// Select compiles, prepares, and then initiates execution of the query using the -// default compile options. -func Select(ctx context.Context, stmt *influxql.SelectStatement, shardMapper ShardMapper, opt SelectOptions) (Cursor, error) { - s, err := Prepare(ctx, stmt, shardMapper, opt) - if err != nil { - return nil, err - } - // Must be deferred so it runs after Select. - defer s.Close() - return s.Select(ctx) -} - -type preparedStatement struct { - stmt *influxql.SelectStatement - opt IteratorOptions - ic interface { - IteratorCreator - io.Closer - } - columns []string - maxPointN int - now time.Time -} - -type contextKey string - -const nowKey contextKey = "now" - -func (p *preparedStatement) Select(ctx context.Context) (Cursor, error) { - // TODO(jsternberg): Remove this hacky method of propagating now. - // Each level of the query should use a time range discovered during - // compilation, but that requires too large of a refactor at the moment. - ctx = context.WithValue(ctx, nowKey, p.now) - - opt := p.opt - opt.InterruptCh = ctx.Done() - cur, err := buildCursor(ctx, p.stmt, p.ic, opt) - if err != nil { - return nil, err - } - - return cur, nil -} - -func (p *preparedStatement) Close() error { - return p.ic.Close() -} - -// buildExprIterator creates an iterator for an expression. -func buildExprIterator(ctx context.Context, expr influxql.Expr, ic IteratorCreator, sources influxql.Sources, opt IteratorOptions, selector, writeMode bool) (Iterator, error) { - opt.Expr = expr - b := exprIteratorBuilder{ - ic: ic, - sources: sources, - opt: opt, - selector: selector, - writeMode: writeMode, - } - - switch expr := expr.(type) { - case *influxql.VarRef: - return b.buildVarRefIterator(ctx, expr) - case *influxql.Call: - return b.buildCallIterator(ctx, expr) - default: - return nil, fmt.Errorf("invalid expression type: %T", expr) - } -} - -type exprIteratorBuilder struct { - ic IteratorCreator - sources influxql.Sources - opt IteratorOptions - selector bool - writeMode bool -} - -func (b *exprIteratorBuilder) buildVarRefIterator(ctx context.Context, expr *influxql.VarRef) (Iterator, error) { - inputs := make([]Iterator, 0, len(b.sources)) - if err := func() error { - for _, source := range b.sources { - switch source := source.(type) { - case *influxql.Measurement: - input, err := b.ic.CreateIterator(ctx, source, b.opt) - if err != nil { - return err - } - inputs = append(inputs, input) - case *influxql.SubQuery: - subquery := subqueryBuilder{ - ic: b.ic, - stmt: source.Statement, - } - - input, err := subquery.buildVarRefIterator(ctx, expr, b.opt) - if err != nil { - return err - } else if input != nil { - inputs = append(inputs, input) - } - } - } - return nil - }(); err != nil { - Iterators(inputs).Close() - return nil, err - } - - // Variable references in this section will always go into some call - // iterator. Combine it with a merge iterator. - itr := NewMergeIterator(inputs, b.opt) - if itr == nil { - itr = &nilFloatIterator{} - } - - if b.opt.InterruptCh != nil { - itr = NewInterruptIterator(itr, b.opt.InterruptCh) - } - return itr, nil -} - -func (b *exprIteratorBuilder) buildCallIterator(ctx context.Context, expr *influxql.Call) (Iterator, error) { - // TODO(jsternberg): Refactor this. This section needs to die in a fire. - opt := b.opt - // Eliminate limits and offsets if they were previously set. These are handled by the caller. - opt.Limit, opt.Offset = 0, 0 - switch expr.Name { - case "distinct": - opt.Ordered = true - input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, b.selector, false) - if err != nil { - return nil, err - } - input, err = NewDistinctIterator(input, opt) - if err != nil { - return nil, err - } - return NewIntervalIterator(input, opt), nil - case "sample": - opt.Ordered = true - input, err := buildExprIterator(ctx, expr.Args[0], b.ic, b.sources, opt, b.selector, false) - if err != nil { - return nil, err - } - size := expr.Args[1].(*influxql.IntegerLiteral) - - return newSampleIterator(input, opt, int(size.Val)) - case "holt_winters", "holt_winters_with_fit": - opt.Ordered = true - input, err := buildExprIterator(ctx, expr.Args[0], b.ic, b.sources, opt, b.selector, false) - if err != nil { - return nil, err - } - h := expr.Args[1].(*influxql.IntegerLiteral) - m := expr.Args[2].(*influxql.IntegerLiteral) - - includeFitData := expr.Name == "holt_winters_with_fit" - - interval := opt.Interval.Duration - // Redefine interval to be unbounded to capture all aggregate results - opt.StartTime = influxql.MinTime - opt.EndTime = influxql.MaxTime - opt.Interval = Interval{} - - return newHoltWintersIterator(input, opt, int(h.Val), int(m.Val), includeFitData, interval) - case "count_hll", "derivative", "non_negative_derivative", "difference", "non_negative_difference", "moving_average", "exponential_moving_average", "double_exponential_moving_average", "triple_exponential_moving_average", "relative_strength_index", "triple_exponential_derivative", "kaufmans_efficiency_ratio", "kaufmans_adaptive_moving_average", "chande_momentum_oscillator", "elapsed": - if !opt.Interval.IsZero() { - if opt.Ascending { - opt.StartTime -= int64(opt.Interval.Duration) - } else { - opt.EndTime += int64(opt.Interval.Duration) - } - } - opt.Ordered = true - - input, err := buildExprIterator(ctx, expr.Args[0], b.ic, b.sources, opt, b.selector, false) - if err != nil { - return nil, err - } - - switch expr.Name { - case "count_hll": - return NewCountHllIterator(input, opt) - case "derivative", "non_negative_derivative": - interval := opt.DerivativeInterval() - isNonNegative := (expr.Name == "non_negative_derivative") - return newDerivativeIterator(input, opt, interval, isNonNegative) - case "elapsed": - interval := opt.ElapsedInterval() - return newElapsedIterator(input, opt, interval) - case "difference", "non_negative_difference": - isNonNegative := (expr.Name == "non_negative_difference") - return newDifferenceIterator(input, opt, isNonNegative) - case "moving_average": - n := expr.Args[1].(*influxql.IntegerLiteral) - if n.Val > 1 && !opt.Interval.IsZero() { - if opt.Ascending { - opt.StartTime -= int64(opt.Interval.Duration) * (n.Val - 1) - } else { - opt.EndTime += int64(opt.Interval.Duration) * (n.Val - 1) - } - } - return newMovingAverageIterator(input, int(n.Val), opt) - case "exponential_moving_average", "double_exponential_moving_average", "triple_exponential_moving_average", "relative_strength_index", "triple_exponential_derivative": - n := expr.Args[1].(*influxql.IntegerLiteral) - if n.Val > 1 && !opt.Interval.IsZero() { - if opt.Ascending { - opt.StartTime -= int64(opt.Interval.Duration) * (n.Val - 1) - } else { - opt.EndTime += int64(opt.Interval.Duration) * (n.Val - 1) - } - } - - nHold := -1 - if len(expr.Args) >= 3 { - nHold = int(expr.Args[2].(*influxql.IntegerLiteral).Val) - } - - warmupType := gota.WarmEMA - if len(expr.Args) >= 4 { - if warmupType, err = gota.ParseWarmupType(expr.Args[3].(*influxql.StringLiteral).Val); err != nil { - return nil, err - } - } - - switch expr.Name { - case "exponential_moving_average": - return newExponentialMovingAverageIterator(input, int(n.Val), nHold, warmupType, opt) - case "double_exponential_moving_average": - return newDoubleExponentialMovingAverageIterator(input, int(n.Val), nHold, warmupType, opt) - case "triple_exponential_moving_average": - return newTripleExponentialMovingAverageIterator(input, int(n.Val), nHold, warmupType, opt) - case "relative_strength_index": - return newRelativeStrengthIndexIterator(input, int(n.Val), nHold, warmupType, opt) - case "triple_exponential_derivative": - return newTripleExponentialDerivativeIterator(input, int(n.Val), nHold, warmupType, opt) - } - case "kaufmans_efficiency_ratio", "kaufmans_adaptive_moving_average": - n := expr.Args[1].(*influxql.IntegerLiteral) - if n.Val > 1 && !opt.Interval.IsZero() { - if opt.Ascending { - opt.StartTime -= int64(opt.Interval.Duration) * (n.Val - 1) - } else { - opt.EndTime += int64(opt.Interval.Duration) * (n.Val - 1) - } - } - - nHold := -1 - if len(expr.Args) >= 3 { - nHold = int(expr.Args[2].(*influxql.IntegerLiteral).Val) - } - - switch expr.Name { - case "kaufmans_efficiency_ratio": - return newKaufmansEfficiencyRatioIterator(input, int(n.Val), nHold, opt) - case "kaufmans_adaptive_moving_average": - return newKaufmansAdaptiveMovingAverageIterator(input, int(n.Val), nHold, opt) - } - case "chande_momentum_oscillator": - n := expr.Args[1].(*influxql.IntegerLiteral) - if n.Val > 1 && !opt.Interval.IsZero() { - if opt.Ascending { - opt.StartTime -= int64(opt.Interval.Duration) * (n.Val - 1) - } else { - opt.EndTime += int64(opt.Interval.Duration) * (n.Val - 1) - } - } - - nHold := -1 - if len(expr.Args) >= 3 { - nHold = int(expr.Args[2].(*influxql.IntegerLiteral).Val) - } - - warmupType := gota.WarmupType(-1) - if len(expr.Args) >= 4 { - wt := expr.Args[3].(*influxql.StringLiteral).Val - if wt != "none" { - if warmupType, err = gota.ParseWarmupType(wt); err != nil { - return nil, err - } - } - } - - return newChandeMomentumOscillatorIterator(input, int(n.Val), nHold, warmupType, opt) - } - panic(fmt.Sprintf("invalid series aggregate function: %s", expr.Name)) - case "cumulative_sum": - opt.Ordered = true - input, err := buildExprIterator(ctx, expr.Args[0], b.ic, b.sources, opt, b.selector, false) - if err != nil { - return nil, err - } - return newCumulativeSumIterator(input, opt) - case "integral": - opt.Ordered = true - input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false) - if err != nil { - return nil, err - } - interval := opt.IntegralInterval() - return newIntegralIterator(input, opt, interval) - case "top": - if len(expr.Args) < 2 { - return nil, fmt.Errorf("top() requires 2 or more arguments, got %d", len(expr.Args)) - } - - var input Iterator - if len(expr.Args) > 2 { - // Create a max iterator using the groupings in the arguments. - dims := make(map[string]struct{}, len(expr.Args)-2+len(opt.GroupBy)) - for i := 1; i < len(expr.Args)-1; i++ { - ref := expr.Args[i].(*influxql.VarRef) - dims[ref.Val] = struct{}{} - } - for dim := range opt.GroupBy { - dims[dim] = struct{}{} - } - - call := &influxql.Call{ - Name: "max", - Args: expr.Args[:1], - } - callOpt := opt - callOpt.Expr = call - callOpt.GroupBy = dims - callOpt.Fill = influxql.NoFill - - builder := *b - builder.opt = callOpt - builder.selector = true - builder.writeMode = false - - i, err := builder.callIterator(ctx, call, callOpt) - if err != nil { - return nil, err - } - input = i - } else { - // There are no arguments so do not organize the points by tags. - builder := *b - builder.opt.Expr = expr.Args[0] - builder.selector = true - builder.writeMode = false - - ref := expr.Args[0].(*influxql.VarRef) - i, err := builder.buildVarRefIterator(ctx, ref) - if err != nil { - return nil, err - } - input = i - } - - n := expr.Args[len(expr.Args)-1].(*influxql.IntegerLiteral) - return newTopIterator(input, opt, int(n.Val), b.writeMode) - case "bottom": - if len(expr.Args) < 2 { - return nil, fmt.Errorf("bottom() requires 2 or more arguments, got %d", len(expr.Args)) - } - - var input Iterator - if len(expr.Args) > 2 { - // Create a max iterator using the groupings in the arguments. - dims := make(map[string]struct{}, len(expr.Args)-2) - for i := 1; i < len(expr.Args)-1; i++ { - ref := expr.Args[i].(*influxql.VarRef) - dims[ref.Val] = struct{}{} - } - for dim := range opt.GroupBy { - dims[dim] = struct{}{} - } - - call := &influxql.Call{ - Name: "min", - Args: expr.Args[:1], - } - callOpt := opt - callOpt.Expr = call - callOpt.GroupBy = dims - callOpt.Fill = influxql.NoFill - - builder := *b - builder.opt = callOpt - builder.selector = true - builder.writeMode = false - - i, err := builder.callIterator(ctx, call, callOpt) - if err != nil { - return nil, err - } - input = i - } else { - // There are no arguments so do not organize the points by tags. - builder := *b - builder.opt.Expr = expr.Args[0] - builder.selector = true - builder.writeMode = false - - ref := expr.Args[0].(*influxql.VarRef) - i, err := builder.buildVarRefIterator(ctx, ref) - if err != nil { - return nil, err - } - input = i - } - - n := expr.Args[len(expr.Args)-1].(*influxql.IntegerLiteral) - return newBottomIterator(input, b.opt, int(n.Val), b.writeMode) - } - - itr, err := func() (Iterator, error) { - switch expr.Name { - case "count": - switch arg0 := expr.Args[0].(type) { - case *influxql.Call: - if arg0.Name == "distinct" { - input, err := buildExprIterator(ctx, arg0, b.ic, b.sources, opt, b.selector, false) - if err != nil { - return nil, err - } - return newCountIterator(input, opt) - } - } - fallthrough - case "min", "max", "sum", "first", "last", "mean", "sum_hll", "merge_hll": - return b.callIterator(ctx, expr, opt) - case "median": - opt.Ordered = true - input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false) - if err != nil { - return nil, err - } - return newMedianIterator(input, opt) - case "mode": - input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false) - if err != nil { - return nil, err - } - return NewModeIterator(input, opt) - case "stddev": - input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false) - if err != nil { - return nil, err - } - return newStddevIterator(input, opt) - case "spread": - // OPTIMIZE(benbjohnson): convert to map/reduce - input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false) - if err != nil { - return nil, err - } - return newSpreadIterator(input, opt) - case "percentile": - opt.Ordered = true - input, err := buildExprIterator(ctx, expr.Args[0].(*influxql.VarRef), b.ic, b.sources, opt, false, false) - if err != nil { - return nil, err - } - var percentile float64 - switch arg := expr.Args[1].(type) { - case *influxql.NumberLiteral: - percentile = arg.Val - case *influxql.IntegerLiteral: - percentile = float64(arg.Val) - } - return newPercentileIterator(input, opt, percentile) - default: - return nil, fmt.Errorf("unsupported call: %s", expr.Name) - } - }() - - if err != nil { - return nil, err - } - - if !b.selector || !opt.Interval.IsZero() { - itr = NewIntervalIterator(itr, opt) - if !opt.Interval.IsZero() && opt.Fill != influxql.NoFill { - itr = NewFillIterator(itr, expr, opt) - } - } - if opt.InterruptCh != nil { - itr = NewInterruptIterator(itr, opt.InterruptCh) - } - return itr, nil -} - -func (b *exprIteratorBuilder) callIterator(ctx context.Context, expr *influxql.Call, opt IteratorOptions) (Iterator, error) { - inputs := make([]Iterator, 0, len(b.sources)) - if err := func() error { - for _, source := range b.sources { - switch source := source.(type) { - case *influxql.Measurement: - input, err := b.ic.CreateIterator(ctx, source, opt) - if err != nil { - return err - } - inputs = append(inputs, input) - case *influxql.SubQuery: - // Identify the name of the field we are using. - arg0 := expr.Args[0].(*influxql.VarRef) - - opt.Ordered = false - input, err := buildExprIterator(ctx, arg0, b.ic, []influxql.Source{source}, opt, b.selector, false) - if err != nil { - return err - } - - // Wrap the result in a call iterator. - i, err := NewCallIterator(input, opt) - if err != nil { - input.Close() - return err - } - inputs = append(inputs, i) - } - } - return nil - }(); err != nil { - Iterators(inputs).Close() - return nil, err - } - - itr, err := Iterators(inputs).Merge(opt) - if err != nil { - Iterators(inputs).Close() - return nil, err - } else if itr == nil { - itr = &nilFloatIterator{} - } - return itr, nil -} - -func buildCursor(ctx context.Context, stmt *influxql.SelectStatement, ic IteratorCreator, opt IteratorOptions) (Cursor, error) { - switch opt.Fill { - case influxql.NumberFill: - if v, ok := opt.FillValue.(int); ok { - opt.FillValue = int64(v) - } - case influxql.PreviousFill: - opt.FillValue = SkipDefault - } - - fields := make([]*influxql.Field, 0, len(stmt.Fields)+1) - if !stmt.OmitTime { - // Add a field with the variable "time" if we have not omitted time. - fields = append(fields, &influxql.Field{ - Expr: &influxql.VarRef{ - Val: "time", - Type: influxql.Time, - }, - }) - } - - // Iterate through each of the fields to add them to the value mapper. - valueMapper := newValueMapper() - for _, f := range stmt.Fields { - fields = append(fields, valueMapper.Map(f)) - - // If the field is a top() or bottom() call, we need to also add - // the extra variables if we are not writing into a target. - if stmt.Target != nil { - continue - } - - switch expr := f.Expr.(type) { - case *influxql.Call: - if expr.Name == "top" || expr.Name == "bottom" { - for i := 1; i < len(expr.Args)-1; i++ { - nf := influxql.Field{Expr: expr.Args[i]} - fields = append(fields, valueMapper.Map(&nf)) - } - } - } - } - - // Set the aliases on each of the columns to what the final name should be. - columns := stmt.ColumnNames() - for i, f := range fields { - f.Alias = columns[i] - } - - // Retrieve the refs to retrieve the auxiliary fields. - var auxKeys []influxql.VarRef - if len(valueMapper.refs) > 0 { - opt.Aux = make([]influxql.VarRef, 0, len(valueMapper.refs)) - for ref := range valueMapper.refs { - opt.Aux = append(opt.Aux, *ref) - } - sort.Sort(influxql.VarRefs(opt.Aux)) - - auxKeys = make([]influxql.VarRef, len(opt.Aux)) - for i, ref := range opt.Aux { - auxKeys[i] = valueMapper.symbols[ref.String()] - } - } - - // If there are no calls, then produce an auxiliary cursor. - if len(valueMapper.calls) == 0 { - // If all of the auxiliary keys are of an unknown type, - // do not construct the iterator and return a null cursor. - if !hasValidType(auxKeys) { - return newNullCursor(fields), nil - } - - itr, err := buildAuxIterator(ctx, ic, stmt.Sources, opt) - if err != nil { - return nil, err - } - - // Create a slice with an empty first element. - keys := []influxql.VarRef{{}} - keys = append(keys, auxKeys...) - - scanner := NewIteratorScanner(itr, keys, opt.FillValue) - return newScannerCursor(scanner, fields, opt), nil - } - - // Check to see if this is a selector statement. - // It is a selector if it is the only selector call and the call itself - // is a selector. - selector := len(valueMapper.calls) == 1 - if selector { - for call := range valueMapper.calls { - if !influxql.IsSelector(call) { - selector = false - } - } - } - - // Produce an iterator for every single call and create an iterator scanner - // associated with it. - var g errgroup.Group - var mu sync.Mutex - scanners := make([]IteratorScanner, 0, len(valueMapper.calls)) - for call := range valueMapper.calls { - call := call - - driver := valueMapper.table[call] - if driver.Type == influxql.Unknown { - // The primary driver of this call is of unknown type, so skip this. - continue - } - - g.Go(func() error { - itr, err := buildFieldIterator(ctx, call, ic, stmt.Sources, opt, selector, stmt.Target != nil) - if err != nil { - return err - } - - keys := make([]influxql.VarRef, 0, len(auxKeys)+1) - keys = append(keys, driver) - keys = append(keys, auxKeys...) - - scanner := NewIteratorScanner(itr, keys, opt.FillValue) - - mu.Lock() - scanners = append(scanners, scanner) - mu.Unlock() - - return nil - }) - } - - // Close all scanners if any iterator fails. - if err := g.Wait(); err != nil { - for _, s := range scanners { - s.Close() - } - return nil, err - } - - if len(scanners) == 0 { - return newNullCursor(fields), nil - } else if len(scanners) == 1 { - return newScannerCursor(scanners[0], fields, opt), nil - } - return newMultiScannerCursor(scanners, fields, opt), nil -} - -func buildAuxIterator(ctx context.Context, ic IteratorCreator, sources influxql.Sources, opt IteratorOptions) (Iterator, error) { - inputs := make([]Iterator, 0, len(sources)) - if err := func() error { - for _, source := range sources { - switch source := source.(type) { - case *influxql.Measurement: - input, err := ic.CreateIterator(ctx, source, opt) - if err != nil { - return err - } - inputs = append(inputs, input) - case *influxql.SubQuery: - b := subqueryBuilder{ - ic: ic, - stmt: source.Statement, - } - - input, err := b.buildAuxIterator(ctx, opt) - if err != nil { - return err - } else if input != nil { - inputs = append(inputs, input) - } - } - } - return nil - }(); err != nil { - Iterators(inputs).Close() - return nil, err - } - - // Merge iterators to read auxiliary fields. - input, err := Iterators(inputs).Merge(opt) - if err != nil { - Iterators(inputs).Close() - return nil, err - } else if input == nil { - input = &nilFloatIterator{} - } - - // Filter out duplicate rows, if required. - if opt.Dedupe { - // If there is no group by and it is a float iterator, see if we can use a fast dedupe. - if itr, ok := input.(FloatIterator); ok && len(opt.Dimensions) == 0 { - if sz := len(opt.Aux); sz > 0 && sz < 3 { - input = newFloatFastDedupeIterator(itr) - } else { - input = NewDedupeIterator(itr) - } - } else { - input = NewDedupeIterator(input) - } - } - // Apply limit & offset. - if opt.Limit > 0 || opt.Offset > 0 { - input = NewLimitIterator(input, opt) - } - return input, nil -} - -func buildFieldIterator(ctx context.Context, expr influxql.Expr, ic IteratorCreator, sources influxql.Sources, opt IteratorOptions, selector, writeMode bool) (Iterator, error) { - input, err := buildExprIterator(ctx, expr, ic, sources, opt, selector, writeMode) - if err != nil { - return nil, err - } - - // Apply limit & offset. - if opt.Limit > 0 || opt.Offset > 0 { - input = NewLimitIterator(input, opt) - } - return input, nil -} - -type valueMapper struct { - // An index that maps a node's string output to its symbol so that all - // nodes with the same signature are mapped the same. - symbols map[string]influxql.VarRef - // An index that maps a specific expression to a symbol. This ensures that - // only expressions that were mapped get symbolized. - table map[influxql.Expr]influxql.VarRef - // A collection of all of the calls in the table. - calls map[*influxql.Call]struct{} - // A collection of all of the calls in the table. - refs map[*influxql.VarRef]struct{} - i int -} - -func newValueMapper() *valueMapper { - return &valueMapper{ - symbols: make(map[string]influxql.VarRef), - table: make(map[influxql.Expr]influxql.VarRef), - calls: make(map[*influxql.Call]struct{}), - refs: make(map[*influxql.VarRef]struct{}), - } -} - -func (v *valueMapper) Map(field *influxql.Field) *influxql.Field { - clone := *field - clone.Expr = influxql.CloneExpr(field.Expr) - - influxql.Walk(v, clone.Expr) - clone.Expr = influxql.RewriteExpr(clone.Expr, v.rewriteExpr) - return &clone -} - -func (v *valueMapper) Visit(n influxql.Node) influxql.Visitor { - expr, ok := n.(influxql.Expr) - if !ok { - return v - } - - key := expr.String() - symbol, ok := v.symbols[key] - if !ok { - // This symbol has not been assigned yet. - // If this is a call or expression, mark the node - // as stored in the symbol table. - switch n := n.(type) { - case *influxql.Call: - if isMathFunction(n) { - return v - } - v.calls[n] = struct{}{} - case *influxql.VarRef: - v.refs[n] = struct{}{} - default: - return v - } - - // Determine the symbol name and the symbol type. - symbolName := fmt.Sprintf("val%d", v.i) - valuer := influxql.TypeValuerEval{ - TypeMapper: DefaultTypeMapper, - } - typ, _ := valuer.EvalType(expr) - - symbol = influxql.VarRef{ - Val: symbolName, - Type: typ, - } - - // Assign this symbol to the symbol table if it is not presently there - // and increment the value index number. - v.symbols[key] = symbol - v.i++ - } - // Store the symbol for this expression so we can later rewrite - // the query correctly. - v.table[expr] = symbol - return nil -} - -func (v *valueMapper) rewriteExpr(expr influxql.Expr) influxql.Expr { - symbol, ok := v.table[expr] - if !ok { - return expr - } - return &symbol -} - -func validateTypes(stmt *influxql.SelectStatement) error { - valuer := influxql.TypeValuerEval{ - TypeMapper: influxql.MultiTypeMapper( - FunctionTypeMapper{}, - MathTypeMapper{}, - ), - } - for _, f := range stmt.Fields { - if _, err := valuer.EvalType(f.Expr); err != nil { - return err - } - } - return nil -} - -// hasValidType returns true if there is at least one non-unknown type -// in the slice. -func hasValidType(refs []influxql.VarRef) bool { - for _, ref := range refs { - if ref.Type != influxql.Unknown { - return true - } - } - return false -} diff --git a/influxql/query/select_test.go b/influxql/query/select_test.go deleted file mode 100644 index b3bd717d700..00000000000 --- a/influxql/query/select_test.go +++ /dev/null @@ -1,4277 +0,0 @@ -package query_test - -import ( - "context" - "fmt" - "math/rand" - "reflect" - "runtime" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxql" -) - -// Second represents a helper for type converting durations. -const Second = int64(time.Second) - -func randomFloatSlice(seed int64, length int) []float64 { - r := rand.New(rand.NewSource(seed)) - out := make([]float64, 0, length) - for i := 0; i < 3000; i++ { - out = append(out, r.Float64()) - } - return out -} - -func floatIterator(fSlice []float64, name, tags string, startTime, step int64) *FloatIterator { - p := make([]query.FloatPoint, 0, len(fSlice)) - currentTime := startTime - for _, f := range fSlice { - p = append(p, query.FloatPoint{Name: name, Tags: ParseTags(tags), Time: currentTime, Value: f}) - currentTime += step - } - return &FloatIterator{ - Points: p, - } -} - -func TestSelect(t *testing.T) { - for _, tt := range []struct { - name string - q string - typ influxql.DataType - fields map[string]influxql.DataType - expr string - itrs []query.Iterator - rows []query.Row - now time.Time - err string - onlyArch string - }{ - { - name: "Min", - q: `SELECT min(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Float, - expr: `min(value::float)`, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(19)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, - }, - }, - { - name: "count_hll", - q: `SELECT count_hll(sum_hll(value)) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Float, - itrs: []query.Iterator{ - floatIterator(randomFloatSlice(42, 2000), "cpu", "region=west,host=A", 0*Second, 1), - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 31 * Second, Value: 100}, - }}, - floatIterator(randomFloatSlice(42, 3000)[1000:], "cpu", "region=south,host=A", 0*Second, 1), - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 0 * Second, Value: 20}, - }}, - }, - rows: []query.Row{ - // Note that for the first aggregate there are 2000 points in each series, but only 3000 unique points, 2994 ≈ 3000 - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(2994)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(1)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(1)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(1)}}, - }, - }, - { - name: "Distinct_Float", - q: `SELECT distinct(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: 19}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(20)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(19)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, - }, - }, - { - name: "Distinct_Integer", - q: `SELECT distinct(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: 19}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(20)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(19)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(2)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(10)}}, - }, - }, - { - name: "Distinct_Unsigned", - q: `SELECT distinct(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: 19}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(20)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(19)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(2)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(10)}}, - }, - }, - { - name: "Distinct_String", - q: `SELECT distinct(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.String, - itrs: []query.Iterator{ - &StringIterator{Points: []query.StringPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: "a"}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: "b"}, - }}, - &StringIterator{Points: []query.StringPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: "c"}, - }}, - &StringIterator{Points: []query.StringPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: "b"}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: "d"}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: "d"}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: "d"}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{"a"}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{"b"}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{"d"}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{"c"}}, - }, - }, - { - name: "Distinct_Boolean", - q: `SELECT distinct(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Boolean, - itrs: []query.Iterator{ - &BooleanIterator{Points: []query.BooleanPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: true}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: false}, - }}, - &BooleanIterator{Points: []query.BooleanPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: false}, - }}, - &BooleanIterator{Points: []query.BooleanPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: true}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: false}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: false}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: true}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{true}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{false}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{false}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{true}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{false}}, - }, - }, - { - name: "Mean_Float", - q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Float, - expr: `mean(value::float)`, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{19.5}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{2.5}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{3.2}}, - }, - }, - { - name: "Mean_Integer", - q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Integer, - expr: `mean(value::integer)`, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{19.5}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{2.5}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{3.2}}, - }, - }, - { - name: "Mean_Unsigned", - q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Unsigned, - expr: `mean(value::Unsigned)`, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{19.5}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{2.5}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{3.2}}, - }, - }, - { - name: "Mean_String", - q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.String, - itrs: []query.Iterator{&StringIterator{}}, - err: `unsupported mean iterator type: *query_test.StringIterator`, - }, - { - name: "Mean_Boolean", - q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Boolean, - itrs: []query.Iterator{&BooleanIterator{}}, - err: `unsupported mean iterator type: *query_test.BooleanIterator`, - }, - { - name: "Median_Float", - q: `SELECT median(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{19.5}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{2.5}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(3)}}, - }, - }, - { - name: "Median_Integer", - q: `SELECT median(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{19.5}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{2.5}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(3)}}, - }, - }, - { - name: "Median_Unsigned", - q: `SELECT median(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{19.5}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{2.5}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(3)}}, - }, - }, - { - name: "Median_String", - q: `SELECT median(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.String, - itrs: []query.Iterator{&StringIterator{}}, - err: `unsupported median iterator type: *query_test.StringIterator`, - }, - { - name: "Median_Boolean", - q: `SELECT median(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Boolean, - itrs: []query.Iterator{&BooleanIterator{}}, - err: `unsupported median iterator type: *query_test.BooleanIterator`, - }, - { - name: "Mode_Float", - q: `SELECT mode(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(10)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(1)}}, - }, - }, - { - name: "Mode_Integer", - q: `SELECT mode(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 54 * Second, Value: 5}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(10)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(2)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(100)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(10)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(1)}}, - }, - }, - { - name: "Mode_Unsigned", - q: `SELECT mode(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 54 * Second, Value: 5}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(10)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(2)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(100)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(10)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(1)}}, - }, - }, - { - name: "Mode_String", - q: `SELECT mode(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.String, - itrs: []query.Iterator{ - &StringIterator{Points: []query.StringPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: "a"}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: "a"}, - }}, - &StringIterator{Points: []query.StringPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: "cxxx"}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 6 * Second, Value: "zzzz"}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 7 * Second, Value: "zzzz"}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 8 * Second, Value: "zxxx"}, - }}, - &StringIterator{Points: []query.StringPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: "b"}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: "d"}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: "d"}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: "d"}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{"a"}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{"d"}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{"zzzz"}}, - }, - }, - { - name: "Mode_Boolean", - q: `SELECT mode(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Boolean, - itrs: []query.Iterator{ - &BooleanIterator{Points: []query.BooleanPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: true}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: false}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 2 * Second, Value: false}, - }}, - &BooleanIterator{Points: []query.BooleanPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: true}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 6 * Second, Value: false}, - }}, - &BooleanIterator{Points: []query.BooleanPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: false}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: true}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: false}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: true}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{false}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{true}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{true}}, - }, - }, - { - name: "Top_NoTags_Float", - q: `SELECT top(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(19)}}, - {Time: 31 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, - {Time: 53 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(5)}}, - {Time: 53 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(4)}}, - }, - }, - { - name: "Top_NoTags_Integer", - q: `SELECT top(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(19)}}, - {Time: 31 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(100)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(10)}}, - {Time: 53 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(5)}}, - {Time: 53 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(4)}}, - }, - }, - { - name: "Top_NoTags_Unsigned", - q: `SELECT top(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(19)}}, - {Time: 31 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(100)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(10)}}, - {Time: 53 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(5)}}, - {Time: 53 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(4)}}, - }, - }, - { - name: "Top_Tags_Float", - q: `SELECT top(value::float, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`, - typ: influxql.Float, - expr: `max(value::float)`, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20), "A"}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(10), "B"}}, - {Time: 31 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(100), "A"}}, - {Time: 53 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(5), "B"}}, - }, - }, - { - name: "Top_Tags_Integer", - q: `SELECT top(value::integer, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`, - typ: influxql.Integer, - expr: `max(value::integer)`, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(20), "A"}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(10), "B"}}, - {Time: 31 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(100), "A"}}, - {Time: 53 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(5), "B"}}, - }, - }, - { - name: "Top_Tags_Unsigned", - q: `SELECT top(value::Unsigned, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`, - typ: influxql.Unsigned, - expr: `max(value::Unsigned)`, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20), "A"}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(10), "B"}}, - {Time: 31 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(100), "A"}}, - {Time: 53 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(5), "B"}}, - }, - }, - { - name: "Top_GroupByTags_Float", - q: `SELECT top(value::float, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`, - typ: influxql.Float, - expr: `max(value::float)`, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, - }}, - }, - rows: []query.Row{ - {Time: 9 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=east")}, Values: []interface{}{float64(19), "A"}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{float64(20), "A"}}, - {Time: 31 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{float64(100), "A"}}, - }, - }, - { - name: "Top_GroupByTags_Integer", - q: `SELECT top(value::integer, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, - }}, - }, - rows: []query.Row{ - {Time: 9 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=east")}, Values: []interface{}{int64(19), "A"}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{int64(20), "A"}}, - {Time: 31 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{int64(100), "A"}}, - }, - }, - { - name: "Top_GroupByTags_Unsigned", - q: `SELECT top(value::Unsigned, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, - }}, - }, - rows: []query.Row{ - {Time: 9 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=east")}, Values: []interface{}{uint64(19), "A"}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{uint64(20), "A"}}, - {Time: 31 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{uint64(100), "A"}}, - }, - }, - { - name: "Top_AuxFields_Float", - q: `SELECT top(p1, 2), p2, p3 FROM cpu`, - fields: map[string]influxql.DataType{ - "p1": influxql.Float, - "p2": influxql.Float, - "p3": influxql.String, - }, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Value: 1, Aux: []interface{}{float64(2), "aaa"}}, - {Name: "cpu", Time: 1 * Second, Value: 2, Aux: []interface{}{float64(3), "bbb"}}, - {Name: "cpu", Time: 2 * Second, Value: 3, Aux: []interface{}{float64(4), "ccc"}}, - {Name: "cpu", Time: 3 * Second, Value: 4, Aux: []interface{}{float64(5), "ddd"}}, - }}, - }, - rows: []query.Row{ - {Time: 2 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(3), float64(4), "ccc"}}, - {Time: 3 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(4), float64(5), "ddd"}}, - }, - }, - { - name: "Top_AuxFields_Integer", - q: `SELECT top(p1, 2), p2, p3 FROM cpu`, - fields: map[string]influxql.DataType{ - "p1": influxql.Integer, - "p2": influxql.Integer, - "p3": influxql.String, - }, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Time: 0 * Second, Value: 1, Aux: []interface{}{int64(2), "aaa"}}, - {Name: "cpu", Time: 1 * Second, Value: 2, Aux: []interface{}{int64(3), "bbb"}}, - {Name: "cpu", Time: 2 * Second, Value: 3, Aux: []interface{}{int64(4), "ccc"}}, - {Name: "cpu", Time: 3 * Second, Value: 4, Aux: []interface{}{int64(5), "ddd"}}, - }}, - }, - rows: []query.Row{ - {Time: 2 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(3), int64(4), "ccc"}}, - {Time: 3 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4), int64(5), "ddd"}}, - }, - }, - { - name: "Top_AuxFields_Unsigned", - q: `SELECT top(p1, 2), p2, p3 FROM cpu`, - fields: map[string]influxql.DataType{ - "p1": influxql.Unsigned, - "p2": influxql.Unsigned, - "p3": influxql.String, - }, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Time: 0 * Second, Value: 1, Aux: []interface{}{uint64(2), "aaa"}}, - {Name: "cpu", Time: 1 * Second, Value: 2, Aux: []interface{}{uint64(3), "bbb"}}, - {Name: "cpu", Time: 2 * Second, Value: 3, Aux: []interface{}{uint64(4), "ccc"}}, - {Name: "cpu", Time: 3 * Second, Value: 4, Aux: []interface{}{uint64(5), "ddd"}}, - }}, - }, - rows: []query.Row{ - {Time: 2 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(3), uint64(4), "ccc"}}, - {Time: 3 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(4), uint64(5), "ddd"}}, - }, - }, - { - name: "Bottom_NoTags_Float", - q: `SELECT bottom(value::float, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, - {Time: 11 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(3)}}, - {Time: 31 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(1)}}, - {Time: 51 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(2)}}, - }, - }, - { - name: "Bottom_NoTags_Integer", - q: `SELECT bottom(value::integer, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(2)}}, - {Time: 11 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(3)}}, - {Time: 31 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(100)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(10)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(1)}}, - {Time: 51 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(2)}}, - }, - }, - { - name: "Bottom_NoTags_Unsigned", - q: `SELECT bottom(value::Unsigned, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s), host fill(none)`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(2)}}, - {Time: 11 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(3)}}, - {Time: 31 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(100)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(10)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(1)}}, - {Time: 51 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(2)}}, - }, - }, - { - name: "Bottom_Tags_Float", - q: `SELECT bottom(value::float, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`, - typ: influxql.Float, - expr: `min(value::float)`, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, - }}, - }, - rows: []query.Row{ - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(10), "B"}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2), "A"}}, - {Time: 31 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(100), "A"}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1), "B"}}, - }, - }, - { - name: "Bottom_Tags_Integer", - q: `SELECT bottom(value::integer, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, - }}, - }, - rows: []query.Row{ - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(10), "B"}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(2), "A"}}, - {Time: 31 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(100), "A"}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(1), "B"}}, - }, - }, - { - name: "Bottom_Tags_Unsigned", - q: `SELECT bottom(value::Unsigned, host::tag, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(30s) fill(none)`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, - }}, - }, - rows: []query.Row{ - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(10), "B"}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(2), "A"}}, - {Time: 31 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(100), "A"}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(1), "B"}}, - }, - }, - { - name: "Bottom_GroupByTags_Float", - q: `SELECT bottom(value::float, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`, - typ: influxql.Float, - expr: `min(value::float)`, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, - }}, - }, - rows: []query.Row{ - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=east")}, Values: []interface{}{float64(2), "A"}}, - {Time: 11 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{float64(3), "A"}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{float64(1), "B"}}, - }, - }, - { - name: "Bottom_GroupByTags_Integer", - q: `SELECT bottom(value::float, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`, - typ: influxql.Integer, - expr: `min(value::float)`, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, - }}, - }, - rows: []query.Row{ - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=east")}, Values: []interface{}{int64(2), "A"}}, - {Time: 11 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{int64(3), "A"}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{int64(1), "B"}}, - }, - }, - { - name: "Bottom_GroupByTags_Unsigned", - q: `SELECT bottom(value::float, host::tag, 1) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY region, time(30s) fill(none)`, - typ: influxql.Unsigned, - expr: `min(value::float)`, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100, Aux: []interface{}{"A"}}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4, Aux: []interface{}{"B"}}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5, Aux: []interface{}{"B"}}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19, Aux: []interface{}{"A"}}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2, Aux: []interface{}{"A"}}, - }}, - }, - rows: []query.Row{ - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=east")}, Values: []interface{}{uint64(2), "A"}}, - {Time: 11 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{uint64(3), "A"}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("region=west")}, Values: []interface{}{uint64(1), "B"}}, - }, - }, - { - name: "Bottom_AuxFields_Float", - q: `SELECT bottom(p1, 2), p2, p3 FROM cpu`, - fields: map[string]influxql.DataType{ - "p1": influxql.Float, - "p2": influxql.Float, - "p3": influxql.String, - }, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Value: 1, Aux: []interface{}{float64(2), "aaa"}}, - {Name: "cpu", Time: 1 * Second, Value: 2, Aux: []interface{}{float64(3), "bbb"}}, - {Name: "cpu", Time: 2 * Second, Value: 3, Aux: []interface{}{float64(4), "ccc"}}, - {Name: "cpu", Time: 3 * Second, Value: 4, Aux: []interface{}{float64(5), "ddd"}}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1), float64(2), "aaa"}}, - {Time: 1 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2), float64(3), "bbb"}}, - }, - }, - { - name: "Bottom_AuxFields_Integer", - q: `SELECT bottom(p1, 2), p2, p3 FROM cpu`, - fields: map[string]influxql.DataType{ - "p1": influxql.Integer, - "p2": influxql.Integer, - "p3": influxql.String, - }, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Time: 0 * Second, Value: 1, Aux: []interface{}{int64(2), "aaa"}}, - {Name: "cpu", Time: 1 * Second, Value: 2, Aux: []interface{}{int64(3), "bbb"}}, - {Name: "cpu", Time: 2 * Second, Value: 3, Aux: []interface{}{int64(4), "ccc"}}, - {Name: "cpu", Time: 3 * Second, Value: 4, Aux: []interface{}{int64(5), "ddd"}}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(1), int64(2), "aaa"}}, - {Time: 1 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(2), int64(3), "bbb"}}, - }, - }, - { - name: "Bottom_AuxFields_Unsigned", - q: `SELECT bottom(p1, 2), p2, p3 FROM cpu`, - fields: map[string]influxql.DataType{ - "p1": influxql.Unsigned, - "p2": influxql.Unsigned, - "p3": influxql.String, - }, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Time: 0 * Second, Value: 1, Aux: []interface{}{uint64(2), "aaa"}}, - {Name: "cpu", Time: 1 * Second, Value: 2, Aux: []interface{}{uint64(3), "bbb"}}, - {Name: "cpu", Time: 2 * Second, Value: 3, Aux: []interface{}{uint64(4), "ccc"}}, - {Name: "cpu", Time: 3 * Second, Value: 4, Aux: []interface{}{uint64(5), "ddd"}}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(1), uint64(2), "aaa"}}, - {Time: 1 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(2), uint64(3), "bbb"}}, - }, - }, - { - name: "Fill_Null_Float", - q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(null)`, - typ: influxql.Float, - expr: `mean(value::float)`, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - }, - }, - { - name: "Fill_Number_Float", - q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(1)`, - typ: influxql.Float, - expr: `mean(value::float)`, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(1)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(1)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(1)}}, - {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(1)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(1)}}, - }, - }, - { - name: "Fill_Previous_Float", - q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(previous)`, - typ: influxql.Float, - expr: `mean(value::float)`, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, - {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, - }, - }, - { - name: "Fill_Previous_Float_Two_Series", - q: `SELECT last(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(previous)`, - typ: influxql.Float, - expr: `last(value::float)`, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 40 * Second, Value: 30}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 30 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 40 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(20)}}, - {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(30)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(30)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(1)}}, - {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(2)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(2)}}, - }, - }, - { - name: "Fill_Linear_Float_One", - q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`, - typ: influxql.Float, - expr: `mean(value::float)`, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 32 * Second, Value: 4}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(3)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(4)}}, - {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - }, - }, - { - name: "Fill_Linear_Float_Many", - q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`, - typ: influxql.Float, - expr: `mean(value::float)`, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 62 * Second, Value: 7}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(3)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(4)}}, - {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(5)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(6)}}, - {Time: 60 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(7)}}, - }, - }, - { - name: "Fill_Linear_Float_MultipleSeries", - q: `SELECT mean(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`, - typ: influxql.Float, - expr: `mean(value::float)`, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 32 * Second, Value: 4}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(4)}}, - {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, - }, - }, - { - name: "Fill_Linear_Integer_One", - q: `SELECT max(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`, - typ: influxql.Integer, - expr: `max(value::integer)`, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 32 * Second, Value: 4}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(1)}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(2)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(4)}}, - {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - }, - }, - { - name: "Fill_Linear_Integer_Many", - q: `SELECT max(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:20Z' GROUP BY host, time(10s) fill(linear)`, - typ: influxql.Integer, - expr: `max(value::integer)`, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 72 * Second, Value: 10}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(1)}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(2)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(4)}}, - {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(5)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(7)}}, - {Time: 60 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(8)}}, - {Time: 70 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(10)}}, - }, - }, - { - name: "Fill_Linear_Integer_MultipleSeries", - q: `SELECT max(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`, - typ: influxql.Integer, - expr: `max(value::integer)`, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 32 * Second, Value: 4}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(2)}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(4)}}, - {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, - }, - }, - { - name: "Fill_Linear_Unsigned_One", - q: `SELECT max(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`, - typ: influxql.Unsigned, - expr: `max(value::Unsigned)`, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 32 * Second, Value: 4}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(1)}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(2)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(4)}}, - {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - }, - }, - { - name: "Fill_Linear_Unsigned_Many", - q: `SELECT max(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:20Z' GROUP BY host, time(10s) fill(linear)`, - typ: influxql.Unsigned, - expr: `max(value::Unsigned)`, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 72 * Second, Value: 10}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(1)}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(2)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(4)}}, - {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(5)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(7)}}, - {Time: 60 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(8)}}, - {Time: 70 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(10)}}, - }, - }, - { - name: "Fill_Linear_Unsigned_MultipleSeries", - q: `SELECT max(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY host, time(10s) fill(linear)`, - typ: influxql.Unsigned, - expr: `max(value::Unsigned)`, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 12 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 32 * Second, Value: 4}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(2)}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{nil}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(4)}}, - {Time: 40 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{nil}}, - }, - }, - { - name: "Stddev_Float", - q: `SELECT stddev(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{0.7071067811865476}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{0.7071067811865476}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{query.NullFloat}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{query.NullFloat}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{1.5811388300841898}}, - }, - }, - { - name: "Stddev_Integer", - q: `SELECT stddev(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{0.7071067811865476}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{0.7071067811865476}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{query.NullFloat}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{query.NullFloat}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{1.5811388300841898}}, - }, - }, - { - name: "Stddev_Unsigned", - q: `SELECT stddev(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{0.7071067811865476}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{0.7071067811865476}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{query.NullFloat}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{query.NullFloat}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{1.5811388300841898}}, - }, - }, - { - name: "Spread_Float", - q: `SELECT spread(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(1)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(1)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(0)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(0)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(4)}}, - }, - }, - { - name: "Spread_Integer", - q: `SELECT spread(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(1)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(1)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(0)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(0)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(4)}}, - }, - }, - { - name: "Spread_Unsigned", - q: `SELECT spread(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 5}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(1)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(1)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(0)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(0)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(4)}}, - }, - }, - { - name: "Percentile_Float", - q: `SELECT percentile(value, 90) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 9}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 8}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 7}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 54 * Second, Value: 6}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 55 * Second, Value: 5}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 56 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 57 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 58 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 59 * Second, Value: 1}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(20)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(3)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(9)}}, - }, - }, - { - name: "Percentile_Integer", - q: `SELECT percentile(value, 90) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 9}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 8}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 7}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 54 * Second, Value: 6}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 55 * Second, Value: 5}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 56 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 57 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 58 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 59 * Second, Value: 1}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(20)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(3)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(100)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(10)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(9)}}, - }, - }, - { - name: "Percentile_Unsigned", - q: `SELECT percentile(value, 90) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 50 * Second, Value: 10}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 51 * Second, Value: 9}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 52 * Second, Value: 8}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 53 * Second, Value: 7}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 54 * Second, Value: 6}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 55 * Second, Value: 5}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 56 * Second, Value: 4}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 57 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 58 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 59 * Second, Value: 1}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(20)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(3)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(100)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(10)}}, - {Time: 50 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(9)}}, - }, - }, - { - name: "Sample_Float", - q: `SELECT sample(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 5 * Second, Value: 10}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 10 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 15 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(20)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(10)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(19)}}, - {Time: 15 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(2)}}, - }, - }, - { - name: "Sample_Integer", - q: `SELECT sample(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 5 * Second, Value: 10}, - }}, - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 10 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 15 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(20)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{int64(10)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(19)}}, - {Time: 15 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{int64(2)}}, - }, - }, - { - name: "Sample_Unsigned", - q: `SELECT sample(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 5 * Second, Value: 10}, - }}, - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 10 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 15 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(20)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{uint64(10)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(19)}}, - {Time: 15 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{uint64(2)}}, - }, - }, - { - name: "Sample_String", - q: `SELECT sample(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.String, - itrs: []query.Iterator{ - &StringIterator{Points: []query.StringPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: "a"}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 5 * Second, Value: "b"}, - }}, - &StringIterator{Points: []query.StringPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 10 * Second, Value: "c"}, - {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 15 * Second, Value: "d"}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{"a"}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{"b"}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{"c"}}, - {Time: 15 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{"d"}}, - }, - }, - { - name: "Sample_Boolean", - q: `SELECT sample(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Boolean, - itrs: []query.Iterator{ - &BooleanIterator{Points: []query.BooleanPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: true}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 5 * Second, Value: false}, - }}, - &BooleanIterator{Points: []query.BooleanPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 10 * Second, Value: false}, - {Name: "cpu", Tags: ParseTags("region=east,host=B"), Time: 15 * Second, Value: true}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{true}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{false}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{false}}, - {Time: 15 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{true}}, - }, - }, - //{ - // name: "Raw", - // q: `SELECT v1::float, v2::float FROM cpu`, - // itrs: []query.Iterator{ - // &FloatIterator{Points: []query.FloatPoint{ - // {Time: 0, Aux: []interface{}{float64(1), nil}}, - // {Time: 1, Aux: []interface{}{nil, float64(2)}}, - // {Time: 5, Aux: []interface{}{float64(3), float64(4)}}, - // }}, - // }, - // points: [][]query.Point{ - // { - // &query.FloatPoint{Time: 0, Value: 1}, - // &query.FloatPoint{Time: 0, Nil: true}, - // }, - // { - // &query.FloatPoint{Time: 1, Nil: true}, - // &query.FloatPoint{Time: 1, Value: 2}, - // }, - // { - // &query.FloatPoint{Time: 5, Value: 3}, - // &query.FloatPoint{Time: 5, Value: 4}, - // }, - // }, - //}, - { - name: "ParenExpr_Min", - q: `SELECT (min(value)) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Float, - expr: `min(value::float)`, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(19)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(100)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, - }, - }, - { - name: "ParenExpr_Distinct", - q: `SELECT (distinct(value)) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 1 * Second, Value: 19}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 11 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 12 * Second, Value: 2}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(20)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(19)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(2)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(10)}}, - }, - }, - { - name: "Derivative_Float", - q: `SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 12 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-2.5)}}, - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2.25)}}, - {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-4)}}, - }, - }, - { - name: "Derivative_Integer", - q: `SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 12 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-2.5)}}, - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2.25)}}, - {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-4)}}, - }, - }, - { - name: "Derivative_Unsigned", - q: `SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 12 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-2.5)}}, - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2.25)}}, - {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-4)}}, - }, - }, - { - name: "Derivative_Desc_Float", - q: `SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z' ORDER BY desc`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 12 * Second, Value: 3}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 0 * Second, Value: 20}, - }}, - }, - rows: []query.Row{ - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(4)}}, - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-2.25)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2.5)}}, - }, - }, - { - name: "Derivative_Desc_Integer", - q: `SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z' ORDER BY desc`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Time: 12 * Second, Value: 3}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 0 * Second, Value: 20}, - }}, - }, - rows: []query.Row{ - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(4)}}, - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-2.25)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2.5)}}, - }, - }, - { - name: "Derivative_Desc_Unsigned", - q: `SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z' ORDER BY desc`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Time: 12 * Second, Value: 3}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 0 * Second, Value: 20}, - }}, - }, - rows: []query.Row{ - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(4)}}, - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-2.25)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2.5)}}, - }, - }, - { - name: "Derivative_Duplicate_Float", - q: `SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 0 * Second, Value: 19}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 4 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-2.5)}}, - }, - }, - { - name: "Derivative_Duplicate_Integer", - q: `SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 0 * Second, Value: 19}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 4 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-2.5)}}, - }, - }, - { - name: "Derivative_Duplicate_Unsigned", - q: `SELECT derivative(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 0 * Second, Value: 19}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 4 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-2.5)}}, - }, - }, - { - name: "Difference_Float", - q: `SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 12 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-10)}}, - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9)}}, - {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-16)}}, - }, - }, - { - name: "Difference_Integer", - q: `SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 12 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(-10)}}, - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(9)}}, - {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(-16)}}, - }, - }, - { - name: "Difference_Unsigned", - q: `SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 12 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(18446744073709551606)}}, - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9)}}, - {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(18446744073709551600)}}, - }, - }, - { - name: "Difference_Duplicate_Float", - q: `SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 0 * Second, Value: 19}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 4 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-10)}}, - }, - }, - { - name: "Difference_Duplicate_Integer", - q: `SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 0 * Second, Value: 19}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 4 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(-10)}}, - }, - }, - { - name: "Difference_Duplicate_Unsigned", - q: `SELECT difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 0 * Second, Value: 19}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 4 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(18446744073709551606)}}, - }, - }, - { - name: "Non_Negative_Difference_Float", - q: `SELECT non_negative_difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 8 * Second, Value: 29}, - {Name: "cpu", Time: 12 * Second, Value: 3}, - {Name: "cpu", Time: 16 * Second, Value: 39}, - }}, - }, - rows: []query.Row{ - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(19)}}, - {Time: 16 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(36)}}, - }, - }, - { - name: "Non_Negative_Difference_Integer", - q: `SELECT non_negative_difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 8 * Second, Value: 21}, - {Name: "cpu", Time: 12 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(11)}}, - }, - }, - { - name: "Non_Negative_Difference_Unsigned", - q: `SELECT non_negative_difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 8 * Second, Value: 21}, - {Name: "cpu", Time: 12 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(11)}}, - }, - }, - { - name: "Non_Negative_Difference_Duplicate_Float", - q: `SELECT non_negative_difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 0 * Second, Value: 19}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 4 * Second, Value: 3}, - {Name: "cpu", Time: 8 * Second, Value: 30}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 12 * Second, Value: 10}, - {Name: "cpu", Time: 12 * Second, Value: 3}, - {Name: "cpu", Time: 16 * Second, Value: 40}, - {Name: "cpu", Time: 16 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, - {Time: 16 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(30)}}, - }, - }, - { - name: "Non_Negative_Difference_Duplicate_Integer", - q: `SELECT non_negative_difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 0 * Second, Value: 19}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 4 * Second, Value: 3}, - {Name: "cpu", Time: 8 * Second, Value: 30}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 12 * Second, Value: 10}, - {Name: "cpu", Time: 12 * Second, Value: 3}, - {Name: "cpu", Time: 16 * Second, Value: 40}, - {Name: "cpu", Time: 16 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(20)}}, - {Time: 16 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(30)}}, - }, - }, - { - name: "Non_Negative_Difference_Duplicate_Unsigned", - q: `SELECT non_negative_difference(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 0 * Second, Value: 19}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 4 * Second, Value: 3}, - {Name: "cpu", Time: 8 * Second, Value: 30}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 12 * Second, Value: 10}, - {Name: "cpu", Time: 12 * Second, Value: 3}, - {Name: "cpu", Time: 16 * Second, Value: 40}, - {Name: "cpu", Time: 16 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20)}}, - {Time: 16 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(30)}}, - }, - }, - { - name: "Elapsed_Float", - q: `SELECT elapsed(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 11 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, - {Time: 11 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(3)}}, - }, - }, - { - name: "Elapsed_Integer", - q: `SELECT elapsed(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 11 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, - {Time: 11 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(3)}}, - }, - }, - { - name: "Elapsed_Unsigned", - q: `SELECT elapsed(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 11 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, - {Time: 11 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(3)}}, - }, - }, - { - name: "Elapsed_String", - q: `SELECT elapsed(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.String, - itrs: []query.Iterator{ - &StringIterator{Points: []query.StringPoint{ - {Name: "cpu", Time: 0 * Second, Value: "a"}, - {Name: "cpu", Time: 4 * Second, Value: "b"}, - {Name: "cpu", Time: 8 * Second, Value: "c"}, - {Name: "cpu", Time: 11 * Second, Value: "d"}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, - {Time: 11 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(3)}}, - }, - }, - { - name: "Elapsed_Boolean", - q: `SELECT elapsed(value, 1s) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Boolean, - itrs: []query.Iterator{ - &BooleanIterator{Points: []query.BooleanPoint{ - {Name: "cpu", Time: 0 * Second, Value: true}, - {Name: "cpu", Time: 4 * Second, Value: false}, - {Name: "cpu", Time: 8 * Second, Value: false}, - {Name: "cpu", Time: 11 * Second, Value: true}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(4)}}, - {Time: 11 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(3)}}, - }, - }, - { - name: "Integral_Float", - q: `SELECT integral(value) FROM cpu`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 10 * Second, Value: 20}, - {Name: "cpu", Time: 15 * Second, Value: 10}, - {Name: "cpu", Time: 20 * Second, Value: 0}, - {Name: "cpu", Time: 30 * Second, Value: -10}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(50)}}, - }, - }, - { - name: "Integral_Duplicate_Float", - q: `SELECT integral(value) FROM cpu`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 5 * Second, Value: 10}, - {Name: "cpu", Time: 5 * Second, Value: 30}, - {Name: "cpu", Time: 10 * Second, Value: 40}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(250)}}, - }, - }, - { - name: "Integral_Float_GroupByTime", - q: `SELECT integral(value) FROM cpu WHERE time > 0s AND time < 60s GROUP BY time(20s)`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 10 * Second, Value: 20}, - {Name: "cpu", Time: 15 * Second, Value: 10}, - {Name: "cpu", Time: 20 * Second, Value: 0}, - {Name: "cpu", Time: 30 * Second, Value: -10}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(100)}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-50)}}, - }, - }, - { - name: "Integral_Float_InterpolateGroupByTime", - q: `SELECT integral(value) FROM cpu WHERE time > 0s AND time < 60s GROUP BY time(20s)`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 10 * Second, Value: 20}, - {Name: "cpu", Time: 15 * Second, Value: 10}, - {Name: "cpu", Time: 25 * Second, Value: 0}, - {Name: "cpu", Time: 30 * Second, Value: -10}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(112.5)}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-12.5)}}, - }, - }, - { - name: "Integral_Integer", - q: `SELECT integral(value) FROM cpu`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 5 * Second, Value: 10}, - {Name: "cpu", Time: 10 * Second, Value: 0}, - {Name: "cpu", Time: 20 * Second, Value: -10}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(50)}}, - }, - }, - { - name: "Integral_Duplicate_Integer", - q: `SELECT integral(value, 2s) FROM cpu`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 5 * Second, Value: 10}, - {Name: "cpu", Time: 5 * Second, Value: 30}, - {Name: "cpu", Time: 10 * Second, Value: 40}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(125)}}, - }, - }, - { - name: "Integral_Unsigned", - q: `SELECT integral(value) FROM cpu`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 5 * Second, Value: 10}, - {Name: "cpu", Time: 10 * Second, Value: 0}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(100)}}, - }, - }, - { - name: "Integral_Duplicate_Unsigned", - q: `SELECT integral(value, 2s) FROM cpu`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 5 * Second, Value: 10}, - {Name: "cpu", Time: 5 * Second, Value: 30}, - {Name: "cpu", Time: 10 * Second, Value: 40}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(125)}}, - }, - }, - { - name: "MovingAverage_Float", - q: `SELECT moving_average(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 12 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(15)}}, - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(14.5)}}, - {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(11)}}, - }, - }, - { - name: "MovingAverage_Integer", - q: `SELECT moving_average(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 12 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(15)}}, - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(14.5)}}, - {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(11)}}, - }, - }, - { - name: "MovingAverage_Unsigned", - q: `SELECT moving_average(value, 2) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 12 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(15)}}, - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(14.5)}}, - {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(11)}}, - }, - }, - { - name: "CumulativeSum_Float", - q: `SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 12 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(30)}}, - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(49)}}, - {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(52)}}, - }, - }, - { - name: "CumulativeSum_Integer", - q: `SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 12 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(20)}}, - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(30)}}, - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(49)}}, - {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(52)}}, - }, - }, - { - name: "CumulativeSum_Unsigned", - q: `SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 8 * Second, Value: 19}, - {Name: "cpu", Time: 12 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20)}}, - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(30)}}, - {Time: 8 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(49)}}, - {Time: 12 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(52)}}, - }, - }, - { - name: "CumulativeSum_Duplicate_Float", - q: `SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Float, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 0 * Second, Value: 19}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 4 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(39)}}, - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(49)}}, - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(52)}}, - }, - }, - { - name: "CumulativeSum_Duplicate_Integer", - q: `SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Integer, - itrs: []query.Iterator{ - &IntegerIterator{Points: []query.IntegerPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 0 * Second, Value: 19}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 4 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(20)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(39)}}, - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(49)}}, - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(52)}}, - }, - }, - { - name: "CumulativeSum_Duplicate_Unsigned", - q: `SELECT cumulative_sum(value) FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:16Z'`, - typ: influxql.Unsigned, - itrs: []query.Iterator{ - &UnsignedIterator{Points: []query.UnsignedPoint{ - {Name: "cpu", Time: 0 * Second, Value: 20}, - {Name: "cpu", Time: 0 * Second, Value: 19}, - {Name: "cpu", Time: 4 * Second, Value: 10}, - {Name: "cpu", Time: 4 * Second, Value: 3}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(39)}}, - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(49)}}, - {Time: 4 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(52)}}, - }, - }, - { - name: "HoltWinters_GroupBy_Agg", - q: `SELECT holt_winters(mean(value), 2, 2) FROM cpu WHERE time >= '1970-01-01T00:00:10Z' AND time < '1970-01-01T00:00:20Z' GROUP BY time(2s)`, - typ: influxql.Float, - expr: `mean(value::float)`, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 10 * Second, Value: 4}, - {Name: "cpu", Time: 11 * Second, Value: 6}, - - {Name: "cpu", Time: 12 * Second, Value: 9}, - {Name: "cpu", Time: 13 * Second, Value: 11}, - - {Name: "cpu", Time: 14 * Second, Value: 5}, - {Name: "cpu", Time: 15 * Second, Value: 7}, - - {Name: "cpu", Time: 16 * Second, Value: 10}, - {Name: "cpu", Time: 17 * Second, Value: 12}, - - {Name: "cpu", Time: 18 * Second, Value: 6}, - {Name: "cpu", Time: 19 * Second, Value: 8}, - }}, - }, - rows: []query.Row{ - {Time: 20 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{11.960623419918432}}, - {Time: 22 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{7.953140268154609}}, - }, - onlyArch: "amd64", - }, - { - name: "DuplicateSelectors", - q: `SELECT min(value) * 2, min(value) / 2 FROM cpu WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-02T00:00:00Z' GROUP BY time(10s), host fill(none)`, - typ: influxql.Float, - expr: `min(value::float)`, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 0 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 11 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 31 * Second, Value: 100}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 9 * Second, Value: 19}, - {Name: "cpu", Tags: ParseTags("region=east,host=A"), Time: 10 * Second, Value: 2}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 5 * Second, Value: 10}, - }}, - }, - rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(38), float64(19) / 2}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(4), float64(1)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=A")}, Values: []interface{}{float64(200), float64(50)}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu", Tags: ParseTags("host=B")}, Values: []interface{}{float64(20), float64(5)}}, - }, - }, - { - name: "GroupByOffset", - q: `SELECT mean(value) FROM cpu WHERE time >= now() - 2m AND time < now() GROUP BY time(1m, now())`, - typ: influxql.Float, - expr: `mean(value::float)`, - itrs: []query.Iterator{ - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 34 * Second, Value: 20}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 57 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 92 * Second, Value: 100}, - }}, - &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 45 * Second, Value: 10}, - }}, - }, - rows: []query.Row{ - {Time: 30 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(11)}}, - {Time: 90 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(100)}}, - }, - now: mustParseTime("1970-01-01T00:02:30Z"), - }, - } { - t.Run(tt.name, func(t *testing.T) { - if tt.onlyArch != "" && runtime.GOARCH != tt.onlyArch { - t.Skipf("Expected outputs of %s only valid when GOARCH = %s", tt.name, tt.onlyArch) - } - - shardMapper := ShardMapper{ - MapShardsFn: func(_ context.Context, sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { - var fields map[string]influxql.DataType - if tt.typ != influxql.Unknown { - fields = map[string]influxql.DataType{"value": tt.typ} - } else { - fields = tt.fields - } - return &ShardGroup{ - Fields: fields, - Dimensions: []string{"host", "region"}, - CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { - if m.Name != "cpu" { - t.Fatalf("unexpected source: %s", m.Name) - } - if tt.expr != "" && !reflect.DeepEqual(opt.Expr, MustParseExpr(tt.expr)) { - t.Fatalf("unexpected expr: %s", spew.Sdump(opt.Expr)) - } - - itrs := tt.itrs - if _, ok := opt.Expr.(*influxql.Call); ok { - for i, itr := range itrs { - itr, err := query.NewCallIterator(itr, opt) - if err != nil { - return nil, err - } - itrs[i] = itr - } - } - return query.Iterators(itrs).Merge(opt) - }, - } - }, - } - - stmt := MustParseSelectStatement(tt.q) - stmt.OmitTime = true - cur, err := func(stmt *influxql.SelectStatement) (query.Cursor, error) { - c, err := query.Compile(stmt, query.CompileOptions{ - Now: tt.now, - }) - if err != nil { - return nil, err - } - - p, err := c.Prepare(context.Background(), &shardMapper, query.SelectOptions{}) - if err != nil { - return nil, err - } - return p.Select(context.Background()) - }(stmt) - if err != nil { - if tt.err == "" { - t.Fatal(err) - } else if have, want := err.Error(), tt.err; have != want { - t.Fatalf("unexpected error: have=%s want=%s", have, want) - } - } else if tt.err != "" { - t.Fatal("expected error") - } else if a, err := ReadCursor(cur); err != nil { - t.Fatalf("unexpected point: %s", err) - } else if diff := cmp.Diff(tt.rows, a); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } - }) - } -} - -// Ensure a SELECT with raw fields works for all types. -func TestSelect_Raw(t *testing.T) { - shardMapper := ShardMapper{ - MapShardsFn: func(_ context.Context, sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { - return &ShardGroup{ - Fields: map[string]influxql.DataType{ - "f": influxql.Float, - "i": influxql.Integer, - "u": influxql.Unsigned, - "s": influxql.String, - "b": influxql.Boolean, - }, - CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { - if m.Name != "cpu" { - t.Fatalf("unexpected source: %s", m.Name) - } - if !reflect.DeepEqual(opt.Aux, []influxql.VarRef{ - {Val: "b", Type: influxql.Boolean}, - {Val: "f", Type: influxql.Float}, - {Val: "i", Type: influxql.Integer}, - {Val: "s", Type: influxql.String}, - {Val: "u", Type: influxql.Unsigned}, - }) { - t.Fatalf("unexpected auxiliary fields: %v", opt.Aux) - } - return &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Aux: []interface{}{ - true, float64(20), int64(20), "a", uint64(20)}}, - {Name: "cpu", Time: 5 * Second, Aux: []interface{}{ - false, float64(10), int64(10), "b", uint64(10)}}, - {Name: "cpu", Time: 9 * Second, Aux: []interface{}{ - true, float64(19), int64(19), "c", uint64(19)}}, - }}, nil - }, - } - }, - } - - stmt := MustParseSelectStatement(`SELECT f, i, u, s, b FROM cpu`) - stmt.OmitTime = true - cur, err := query.Select(context.Background(), stmt, &shardMapper, query.SelectOptions{}) - if err != nil { - t.Errorf("parse error: %s", err) - } else if a, err := ReadCursor(cur); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff([]query.Row{ - { - Time: 0 * Second, - Series: query.Series{ - Name: "cpu", - }, - Values: []interface{}{float64(20), int64(20), uint64(20), "a", true}, - }, - { - Time: 5 * Second, - Series: query.Series{ - Name: "cpu", - }, - Values: []interface{}{float64(10), int64(10), uint64(10), "b", false}, - }, - { - Time: 9 * Second, - Series: query.Series{ - Name: "cpu", - }, - Values: []interface{}{float64(19), int64(19), uint64(19), "c", true}, - }, - }, a); diff != "" { - t.Errorf("unexpected points:\n%s", diff) - } -} - -// Ensure a SELECT binary expr queries can be executed as floats. -func TestSelect_BinaryExpr(t *testing.T) { - shardMapper := ShardMapper{ - MapShardsFn: func(_ context.Context, sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { - return &ShardGroup{ - Fields: map[string]influxql.DataType{ - "f": influxql.Float, - "i": influxql.Integer, - "u": influxql.Unsigned, - }, - CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { - if m.Name != "cpu" { - t.Fatalf("unexpected source: %s", m.Name) - } - makeAuxFields := func(value int) []interface{} { - aux := make([]interface{}, len(opt.Aux)) - for i := range aux { - switch opt.Aux[i].Type { - case influxql.Float: - aux[i] = float64(value) - case influxql.Integer: - aux[i] = int64(value) - case influxql.Unsigned: - aux[i] = uint64(value) - } - } - return aux - } - return &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Aux: makeAuxFields(20)}, - {Name: "cpu", Time: 5 * Second, Aux: makeAuxFields(10)}, - {Name: "cpu", Time: 9 * Second, Aux: makeAuxFields(19)}, - }}, nil - }, - } - }, - } - - for _, test := range []struct { - Name string - Statement string - Rows []query.Row - Err string - }{ - { - Name: "Float_AdditionRHS_Number", - Statement: `SELECT f + 2.0 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(22)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(12)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(21)}}, - }, - }, - { - Name: "Integer_AdditionRHS_Number", - Statement: `SELECT i + 2.0 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(22)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(12)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(21)}}, - }, - }, - { - Name: "Unsigned_AdditionRHS_Number", - Statement: `SELECT u + 2.0 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(22)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(12)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(21)}}, - }, - }, - { - Name: "Float_AdditionRHS_Integer", - Statement: `SELECT f + 2 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(22)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(12)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(21)}}, - }, - }, - { - Name: "Integer_AdditionRHS_Integer", - Statement: `SELECT i + 2 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(22)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(12)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(21)}}, - }, - }, - { - Name: "Unsigned_AdditionRHS_Integer", - Statement: `SELECT u + 2 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(22)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(12)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(21)}}, - }, - }, - { - Name: "Float_AdditionRHS_Unsigned", - Statement: `SELECT f + 9223372036854775808 FROM cpu`, - Rows: []query.Row{ // adding small floats to this does not change the value, this is expected - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9223372036854775808)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9223372036854775808)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9223372036854775808)}}, - }, - }, - { - Name: "Integer_AdditionRHS_Unsigned", - Statement: `SELECT i + 9223372036854775808 FROM cpu`, - Err: `type error: i::integer + 9223372036854775808: cannot use + with an integer and unsigned literal`, - }, - { - Name: "Unsigned_AdditionRHS_Unsigned", - Statement: `SELECT u + 9223372036854775808 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9223372036854775828)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9223372036854775818)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9223372036854775827)}}, - }, - }, - { - Name: "Float_AdditionLHS_Number", - Statement: `SELECT 2.0 + f FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(22)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(12)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(21)}}, - }, - }, - { - Name: "Integer_AdditionLHS_Number", - Statement: `SELECT 2.0 + i FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(22)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(12)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(21)}}, - }, - }, - { - Name: "Unsigned_AdditionLHS_Number", - Statement: `SELECT 2.0 + u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(22)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(12)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(21)}}, - }, - }, - { - Name: "Float_AdditionLHS_Integer", - Statement: `SELECT 2 + f FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(22)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(12)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(21)}}, - }, - }, - { - Name: "Integer_AdditionLHS_Integer", - Statement: `SELECT 2 + i FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(22)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(12)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(21)}}, - }, - }, - { - Name: "Unsigned_AdditionLHS_Integer", - Statement: `SELECT 2 + u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(22)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(12)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(21)}}, - }, - }, - { - Name: "Float_AdditionLHS_Unsigned", - Statement: `SELECT 9223372036854775808 + f FROM cpu`, - Rows: []query.Row{ // adding small floats to this does not change the value, this is expected - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9223372036854775808)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9223372036854775808)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9223372036854775808)}}, - }, - }, - { - Name: "Integer_AdditionLHS_Unsigned", - Statement: `SELECT 9223372036854775808 + i FROM cpu`, - Err: `type error: 9223372036854775808 + i::integer: cannot use + with an integer and unsigned literal`, - }, - { - Name: "Unsigned_AdditionLHS_Unsigned", - Statement: `SELECT 9223372036854775808 + u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9223372036854775828)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9223372036854775818)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9223372036854775827)}}, - }, - }, - { - Name: "Float_Add_Float", - Statement: `SELECT f + f FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, - }, - }, - { - Name: "Integer_Add_Integer", - Statement: `SELECT i + i FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(40)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(38)}}, - }, - }, - { - Name: "Unsigned_Add_Unsigned", - Statement: `SELECT u + u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(40)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(38)}}, - }, - }, - { - Name: "Float_Add_Integer", - Statement: `SELECT f + i FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, - }, - }, - { - Name: "Float_Add_Unsigned", - Statement: `SELECT f + u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, - }, - }, - { - Name: "Integer_Add_Unsigned", - Statement: `SELECT i + u FROM cpu`, - Err: `type error: i::integer + u::unsigned: cannot use + between an integer and unsigned, an explicit cast is required`, - }, - { - Name: "Float_MultiplicationRHS_Number", - Statement: `SELECT f * 2.0 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, - }, - }, - { - Name: "Integer_MultiplicationRHS_Number", - Statement: `SELECT i * 2.0 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, - }, - }, - { - Name: "Unsigned_MultiplicationRHS_Number", - Statement: `SELECT u * 2.0 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, - }, - }, - { - Name: "Float_MultiplicationRHS_Integer", - Statement: `SELECT f * 2 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, - }, - }, - { - Name: "Integer_MultiplicationRHS_Integer", - Statement: `SELECT i * 2 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(40)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(38)}}, - }, - }, - { - Name: "Unsigned_MultiplicationRHS_Integer", - Statement: `SELECT u * 2 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(40)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(38)}}, - }, - }, - // Skip unsigned literals for multiplication because there is inevitable - // overflow. While it is possible to do, the behavior is considered undefined - // and it's not a very good test because it would result in just plugging - // the values into the computer anyway to figure out what the correct answer - // is rather than calculating it myself and testing that I get the correct - // value. - { - Name: "Float_MultiplicationLHS_Number", - Statement: `SELECT 2.0 * f FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, - }, - }, - { - Name: "Integer_MultiplicationLHS_Number", - Statement: `SELECT 2.0 * i FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, - }, - }, - { - Name: "Unsigned_MultiplicationLHS_Number", - Statement: `SELECT 2.0 * u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, - }, - }, - { - Name: "Float_MultiplicationLHS_Integer", - Statement: `SELECT 2 * f FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(40)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(38)}}, - }, - }, - { - Name: "Integer_MultiplicationLHS_Integer", - Statement: `SELECT 2 * i FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(40)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(38)}}, - }, - }, - { - Name: "Unsigned_MultiplicationLHS_Integer", - Statement: `SELECT 2 * u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(40)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(38)}}, - }, - }, - // Skip unsigned literals for multiplication. See above. - { - Name: "Float_Multiply_Float", - Statement: `SELECT f * f FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(400)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(100)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(361)}}, - }, - }, - { - Name: "Integer_Multiply_Integer", - Statement: `SELECT i * i FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(400)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(100)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(361)}}, - }, - }, - { - Name: "Unsigned_Multiply_Unsigned", - Statement: `SELECT u * u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(400)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(100)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(361)}}, - }, - }, - { - Name: "Float_Multiply_Integer", - Statement: `SELECT f * i FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(400)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(100)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(361)}}, - }, - }, - { - Name: "Float_Multiply_Unsigned", - Statement: `SELECT f * u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(400)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(100)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(361)}}, - }, - }, - { - Name: "Integer_Multiply_Unsigned", - Statement: `SELECT i * u FROM cpu`, - Err: `type error: i::integer * u::unsigned: cannot use * between an integer and unsigned, an explicit cast is required`, - }, - { - Name: "Float_SubtractionRHS_Number", - Statement: `SELECT f - 2.0 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(18)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(8)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(17)}}, - }, - }, - { - Name: "Integer_SubtractionRHS_Number", - Statement: `SELECT i - 2.0 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(18)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(8)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(17)}}, - }, - }, - { - Name: "Unsigned_SubtractionRHS_Number", - Statement: `SELECT u - 2.0 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(18)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(8)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(17)}}, - }, - }, - { - Name: "Float_SubtractionRHS_Integer", - Statement: `SELECT f - 2 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(18)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(8)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(17)}}, - }, - }, - { - Name: "Integer_SubtractionRHS_Integer", - Statement: `SELECT i - 2 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(18)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(8)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(17)}}, - }, - }, - { - Name: "Unsigned_SubtractionRHS_Integer", - Statement: `SELECT u - 2 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(18)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(8)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(17)}}, - }, - }, - { - Name: "Float_SubtractionRHS_Unsigned", - Statement: `SELECT f - 9223372036854775808 FROM cpu`, - Rows: []query.Row{ // adding small floats to this does not change the value, this is expected - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-9223372036854775808)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-9223372036854775808)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-9223372036854775808)}}, - }, - }, - { - Name: "Integer_SubtractionRHS_Unsigned", - Statement: `SELECT i - 9223372036854775808 FROM cpu`, - Err: `type error: i::integer - 9223372036854775808: cannot use - with an integer and unsigned literal`, - }, - // Skip Unsigned_SubtractionRHS_Integer because it would result in underflow. - { - Name: "Float_SubtractionLHS_Number", - Statement: `SELECT 2.0 - f FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-18)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-8)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-17)}}, - }, - }, - { - Name: "Integer_SubtractionLHS_Number", - Statement: `SELECT 2.0 - i FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-18)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-8)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-17)}}, - }, - }, - { - Name: "Unsigned_SubtractionLHS_Number", - Statement: `SELECT 2.0 - u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-18)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-8)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-17)}}, - }, - }, - { - Name: "Float_SubtractionLHS_Integer", - Statement: `SELECT 2 - f FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-18)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-8)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-17)}}, - }, - }, - { - Name: "Integer_SubtractionLHS_Integer", - Statement: `SELECT 2 - i FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(-18)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(-8)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(-17)}}, - }, - }, - { - Name: "Unsigned_SubtractionLHS_Integer", - Statement: `SELECT 30 - u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(10)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(11)}}, - }, - }, - { - Name: "Float_SubtractionLHS_Unsigned", - Statement: `SELECT 9223372036854775808 - f FROM cpu`, // subtracting small floats to this does not change the value, this is expected - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9223372036854775828)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9223372036854775828)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(9223372036854775828)}}, - }, - }, - { - Name: "Integer_SubtractionLHS_Unsigned", - Statement: `SELECT 9223372036854775808 - i FROM cpu`, - Err: `type error: 9223372036854775808 - i::integer: cannot use - with an integer and unsigned literal`, - }, - { - Name: "Unsigned_SubtractionLHS_Unsigned", - Statement: `SELECT 9223372036854775808 - u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9223372036854775788)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9223372036854775798)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9223372036854775789)}}, - }, - }, - { - Name: "Float_Subtract_Float", - Statement: `SELECT f - f FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(0)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(0)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(0)}}, - }, - }, - { - Name: "Integer_Subtract_Integer", - Statement: `SELECT i - i FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(0)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(0)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(0)}}, - }, - }, - { - Name: "Unsigned_Subtract_Unsigned", - Statement: `SELECT u - u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(0)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(0)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(0)}}, - }, - }, - { - Name: "Float_Subtract_Integer", - Statement: `SELECT f - i FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(0)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(0)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(0)}}, - }, - }, - { - Name: "Float_Subtract_Unsigned", - Statement: `SELECT f - u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(0)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(0)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(0)}}, - }, - }, - { - Name: "Integer_Subtract_Unsigned", - Statement: `SELECT i - u FROM cpu`, - Err: `type error: i::integer - u::unsigned: cannot use - between an integer and unsigned, an explicit cast is required`, - }, - { - Name: "Float_DivisionRHS_Number", - Statement: `SELECT f / 2.0 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(10)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(5)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(19) / 2}}, - }, - }, - { - Name: "Integer_DivisionRHS_Number", - Statement: `SELECT i / 2.0 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(10)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(5)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(19) / 2}}, - }, - }, - { - Name: "Unsigned_DivisionRHS_Number", - Statement: `SELECT u / 2.0 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(10)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(5)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(19) / 2}}, - }, - }, - { - Name: "Float_DivisionRHS_Integer", - Statement: `SELECT f / 2 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(10)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(5)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(19) / 2}}, - }, - }, - { - Name: "Integer_DivisionRHS_Integer", - Statement: `SELECT i / 2 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(10)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(5)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(19) / 2}}, - }, - }, - { - Name: "Unsigned_DivisionRHS_Integer", - Statement: `SELECT u / 2 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(10)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(5)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(9)}}, - }, - }, - { - Name: "Float_DivisionRHS_Unsigned", - Statement: `SELECT f / 9223372036854775808 FROM cpu`, - Rows: []query.Row{ // dividing small floats does not result in a meaningful result, this is expected - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(20) / float64(9223372036854775808)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(10) / float64(9223372036854775808)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(19) / float64(9223372036854775808)}}, - }, - }, - { - Name: "Integer_DivisionRHS_Unsigned", - Statement: `SELECT i / 9223372036854775808 FROM cpu`, - Err: `type error: i::integer / 9223372036854775808: cannot use / with an integer and unsigned literal`, - }, - { - Name: "Unsigned_DivisionRHS_Unsigned", - Statement: `SELECT u / 9223372036854775808 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(0)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(0)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(0)}}, - }, - }, - { - Name: "Float_DivisionLHS_Number", - Statement: `SELECT 38.0 / f FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1.9)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(3.8)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2)}}, - }, - }, - { - Name: "Integer_DivisionLHS_Number", - Statement: `SELECT 38.0 / i FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1.9)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(3.8)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2)}}, - }, - }, - { - Name: "Unsigned_DivisionLHS_Number", - Statement: `SELECT 38.0 / u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1.9)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(3.8)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2)}}, - }, - }, - { - Name: "Float_DivisionLHS_Integer", - Statement: `SELECT 38 / f FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1.9)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(3.8)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2)}}, - }, - }, - { - Name: "Integer_DivisionLHS_Integer", - Statement: `SELECT 38 / i FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1.9)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(3.8)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(2)}}, - }, - }, - { - Name: "Unsigned_DivisionLHS_Integer", - Statement: `SELECT 38 / u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(1)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(3)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(2)}}, - }, - }, - { - Name: "Float_DivisionLHS_Unsigned", - Statement: `SELECT 9223372036854775808 / f FROM cpu`, - Rows: []query.Row{ // dividing large floats results in inaccurate outputs so these may not be correct, but that is considered normal for floating point - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(461168601842738816)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(922337203685477632)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(485440633518672384)}}, - }, - }, - { - Name: "Integer_DivisionLHS_Unsigned", - Statement: `SELECT 9223372036854775808 / i FROM cpu`, - Err: `type error: 9223372036854775808 / i::integer: cannot use / with an integer and unsigned literal`, - }, - { - Name: "Unsigned_DivisionLHS_Unsigned", - Statement: `SELECT 9223372036854775808 / u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(461168601842738790)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(922337203685477580)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(485440633518672410)}}, - }, - }, - { - Name: "Float_Divide_Float", - Statement: `SELECT f / f FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, - }, - }, - { - Name: "Integer_Divide_Integer", - Statement: `SELECT i / i FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, - }, - }, - { - Name: "Unsigned_Divide_Unsigned", - Statement: `SELECT u / u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(1)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(1)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(1)}}, - }, - }, - { - Name: "Float_Divide_Integer", - Statement: `SELECT f / i FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, - }, - }, - { - Name: "Float_Divide_Unsigned", - Statement: `SELECT f / u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(1)}}, - }, - }, - { - Name: "Integer_Divide_Unsigned", - Statement: `SELECT i / u FROM cpu`, - Err: `type error: i::integer / u::unsigned: cannot use / between an integer and unsigned, an explicit cast is required`, - }, - { - Name: "Integer_BitwiseAndRHS", - Statement: `SELECT i & 254 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(20)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(10)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(18)}}, - }, - }, - { - Name: "Unsigned_BitwiseAndRHS", - Statement: `SELECT u & 254 FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(10)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(18)}}, - }, - }, - { - Name: "Integer_BitwiseOrLHS", - Statement: `SELECT 4 | i FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(20)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(14)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(23)}}, - }, - }, - { - Name: "Unsigned_BitwiseOrLHS", - Statement: `SELECT 4 | u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(20)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(14)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(23)}}, - }, - }, - { - Name: "Integer_BitwiseXOr_Integer", - Statement: `SELECT i ^ i FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(0)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(0)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(0)}}, - }, - }, - { - Name: "Unsigned_BitwiseXOr_Integer", - Statement: `SELECT u ^ u FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(0)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(0)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{uint64(0)}}, - }, - }, - } { - t.Run(test.Name, func(t *testing.T) { - stmt := MustParseSelectStatement(test.Statement) - stmt.OmitTime = true - cur, err := query.Select(context.Background(), stmt, &shardMapper, query.SelectOptions{}) - if err != nil { - if have, want := err.Error(), test.Err; want != "" { - if have != want { - t.Errorf("%s: unexpected parse error: %s != %s", test.Name, have, want) - } - } else { - t.Errorf("%s: unexpected parse error: %s", test.Name, have) - } - } else if test.Err != "" { - t.Fatalf("%s: expected error", test.Name) - } else if a, err := ReadCursor(cur); err != nil { - t.Fatalf("%s: unexpected error: %s", test.Name, err) - } else if diff := cmp.Diff(test.Rows, a); diff != "" { - t.Errorf("%s: unexpected points:\n%s", test.Name, diff) - } - }) - } -} - -// Ensure a SELECT binary expr queries can be executed as booleans. -func TestSelect_BinaryExpr_Boolean(t *testing.T) { - shardMapper := ShardMapper{ - MapShardsFn: func(_ context.Context, sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { - return &ShardGroup{ - Fields: map[string]influxql.DataType{ - "one": influxql.Boolean, - "two": influxql.Boolean, - }, - CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { - if m.Name != "cpu" { - t.Fatalf("unexpected source: %s", m.Name) - } - makeAuxFields := func(value bool) []interface{} { - aux := make([]interface{}, len(opt.Aux)) - for i := range aux { - aux[i] = value - } - return aux - } - return &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Aux: makeAuxFields(true)}, - {Name: "cpu", Time: 5 * Second, Aux: makeAuxFields(false)}, - {Name: "cpu", Time: 9 * Second, Aux: makeAuxFields(true)}, - }}, nil - }, - } - }, - } - - for _, test := range []struct { - Name string - Statement string - Rows []query.Row - }{ - { - Name: "BinaryXOrRHS", - Statement: `SELECT one ^ true FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{false}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{true}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{false}}, - }, - }, - { - Name: "BinaryOrLHS", - Statement: `SELECT true | two FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{true}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{true}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{true}}, - }, - }, - { - Name: "TwoSeriesBitwiseAnd", - Statement: `SELECT one & two FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{true}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{false}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{true}}, - }, - }, - } { - t.Run(test.Name, func(t *testing.T) { - stmt := MustParseSelectStatement(test.Statement) - stmt.OmitTime = true - cur, err := query.Select(context.Background(), stmt, &shardMapper, query.SelectOptions{}) - if err != nil { - t.Errorf("%s: parse error: %s", test.Name, err) - } else if a, err := ReadCursor(cur); err != nil { - t.Fatalf("%s: unexpected error: %s", test.Name, err) - } else if diff := cmp.Diff(test.Rows, a); diff != "" { - t.Errorf("%s: unexpected points:\n%s", test.Name, diff) - } - }) - } -} - -// Ensure a SELECT binary expr with nil values can be executed. -// Nil values may be present when a field is missing from one iterator, -// but not the other. -func TestSelect_BinaryExpr_NilValues(t *testing.T) { - shardMapper := ShardMapper{ - MapShardsFn: func(_ context.Context, sources influxql.Sources, _ influxql.TimeRange) query.ShardGroup { - return &ShardGroup{ - Fields: map[string]influxql.DataType{ - "total": influxql.Float, - "value": influxql.Float, - }, - CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { - if m.Name != "cpu" { - t.Fatalf("unexpected source: %s", m.Name) - } - return &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Aux: []interface{}{float64(20), nil}}, - {Name: "cpu", Time: 5 * Second, Aux: []interface{}{float64(10), float64(15)}}, - {Name: "cpu", Time: 9 * Second, Aux: []interface{}{nil, float64(5)}}, - }}, nil - }, - } - }, - } - - for _, test := range []struct { - Name string - Statement string - Rows []query.Row - }{ - { - Name: "Addition", - Statement: `SELECT total + value FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{nil}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(25)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{nil}}, - }, - }, - { - Name: "Subtraction", - Statement: `SELECT total - value FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{nil}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(-5)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{nil}}, - }, - }, - { - Name: "Multiplication", - Statement: `SELECT total * value FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{nil}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(150)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{nil}}, - }, - }, - { - Name: "Division", - Statement: `SELECT total / value FROM cpu`, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{nil}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(10) / float64(15)}}, - {Time: 9 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{nil}}, - }, - }, - } { - t.Run(test.Name, func(t *testing.T) { - stmt := MustParseSelectStatement(test.Statement) - stmt.OmitTime = true - cur, err := query.Select(context.Background(), stmt, &shardMapper, query.SelectOptions{}) - if err != nil { - t.Errorf("%s: parse error: %s", test.Name, err) - } else if a, err := ReadCursor(cur); err != nil { - t.Fatalf("%s: unexpected error: %s", test.Name, err) - } else if diff := cmp.Diff(test.Rows, a); diff != "" { - t.Errorf("%s: unexpected points:\n%s", test.Name, diff) - } - }) - } -} - -type ShardMapper struct { - MapShardsFn func(ctx context.Context, sources influxql.Sources, t influxql.TimeRange) query.ShardGroup -} - -func (m *ShardMapper) MapShards(ctx context.Context, sources influxql.Sources, t influxql.TimeRange, opt query.SelectOptions) (query.ShardGroup, error) { - shards := m.MapShardsFn(ctx, sources, t) - return shards, nil -} - -type ShardGroup struct { - CreateIteratorFn func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) - Fields map[string]influxql.DataType - Dimensions []string -} - -func (sh *ShardGroup) CreateIterator(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { - return sh.CreateIteratorFn(ctx, m, opt) -} - -func (sh *ShardGroup) IteratorCost(ctx context.Context, source *influxql.Measurement, opt query.IteratorOptions) (query.IteratorCost, error) { - return query.IteratorCost{}, nil -} - -func (sh *ShardGroup) FieldDimensions(ctx context.Context, m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { - fields = make(map[string]influxql.DataType) - dimensions = make(map[string]struct{}) - - for f, typ := range sh.Fields { - fields[f] = typ - } - for _, d := range sh.Dimensions { - dimensions[d] = struct{}{} - } - return fields, dimensions, nil -} - -func (sh *ShardGroup) MapType(ctx context.Context, measurement *influxql.Measurement, field string) influxql.DataType { - if typ, ok := sh.Fields[field]; ok { - return typ - } - for _, d := range sh.Dimensions { - if d == field { - return influxql.Tag - } - } - return influxql.Unknown -} - -func (*ShardGroup) Close() error { - return nil -} - -func BenchmarkSelect_Raw_1K(b *testing.B) { benchmarkSelectRaw(b, 1000) } -func BenchmarkSelect_Raw_100K(b *testing.B) { benchmarkSelectRaw(b, 1000000) } - -func benchmarkSelectRaw(b *testing.B, pointN int) { - benchmarkSelect(b, MustParseSelectStatement(`SELECT fval FROM cpu`), NewRawBenchmarkIteratorCreator(pointN)) -} - -func benchmarkSelect(b *testing.B, stmt *influxql.SelectStatement, shardMapper query.ShardMapper) { - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - cur, err := query.Select(context.Background(), stmt, shardMapper, query.SelectOptions{}) - if err != nil { - b.Fatal(err) - } - query.DrainCursor(cur) - } -} - -// NewRawBenchmarkIteratorCreator returns a new mock iterator creator with generated fields. -func NewRawBenchmarkIteratorCreator(pointN int) query.ShardMapper { - return &ShardMapper{ - MapShardsFn: func(_ context.Context, sources influxql.Sources, t influxql.TimeRange) query.ShardGroup { - return &ShardGroup{ - Fields: map[string]influxql.DataType{ - "fval": influxql.Float, - }, - CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { - if opt.Expr != nil { - panic("unexpected expression") - } - - p := query.FloatPoint{ - Name: "cpu", - Aux: make([]interface{}, len(opt.Aux)), - } - - for i := range opt.Aux { - switch opt.Aux[i].Val { - case "fval": - p.Aux[i] = float64(100) - default: - panic("unknown iterator expr: " + opt.Expr.String()) - } - } - - return &FloatPointGenerator{N: pointN, Fn: func(i int) *query.FloatPoint { - p.Time = int64(time.Duration(i) * (10 * time.Second)) - return &p - }}, nil - }, - } - }, - } -} - -func benchmarkSelectDedupe(b *testing.B, seriesN, pointsPerSeries int) { - stmt := MustParseSelectStatement(`SELECT sval::string FROM cpu`) - stmt.Dedupe = true - - shardMapper := ShardMapper{ - MapShardsFn: func(_ context.Context, sources influxql.Sources, t influxql.TimeRange) query.ShardGroup { - return &ShardGroup{ - Fields: map[string]influxql.DataType{ - "sval": influxql.String, - }, - CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { - if opt.Expr != nil { - panic("unexpected expression") - } - - p := query.FloatPoint{ - Name: "tags", - Aux: []interface{}{nil}, - } - - return &FloatPointGenerator{N: seriesN * pointsPerSeries, Fn: func(i int) *query.FloatPoint { - p.Aux[0] = fmt.Sprintf("server%d", i%seriesN) - return &p - }}, nil - }, - } - }, - } - - b.ResetTimer() - benchmarkSelect(b, stmt, &shardMapper) -} - -func BenchmarkSelect_Dedupe_1K(b *testing.B) { benchmarkSelectDedupe(b, 1000, 100) } - -func benchmarkSelectTop(b *testing.B, seriesN, pointsPerSeries int) { - stmt := MustParseSelectStatement(`SELECT top(sval, 10) FROM cpu`) - - shardMapper := ShardMapper{ - MapShardsFn: func(_ context.Context, sources influxql.Sources, t influxql.TimeRange) query.ShardGroup { - return &ShardGroup{ - Fields: map[string]influxql.DataType{ - "sval": influxql.Float, - }, - CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { - if m.Name != "cpu" { - b.Fatalf("unexpected source: %s", m.Name) - } - if !reflect.DeepEqual(opt.Expr, MustParseExpr(`sval`)) { - b.Fatalf("unexpected expr: %s", spew.Sdump(opt.Expr)) - } - - p := query.FloatPoint{ - Name: "cpu", - } - - return &FloatPointGenerator{N: seriesN * pointsPerSeries, Fn: func(i int) *query.FloatPoint { - p.Value = float64(rand.Int63()) - p.Time = int64(time.Duration(i) * (10 * time.Second)) - return &p - }}, nil - }, - } - }, - } - - b.ResetTimer() - benchmarkSelect(b, stmt, &shardMapper) -} - -func BenchmarkSelect_Top_1K(b *testing.B) { benchmarkSelectTop(b, 1000, 1000) } - -// ReadCursor reads a Cursor into an array of points. -func ReadCursor(cur query.Cursor) ([]query.Row, error) { - defer cur.Close() - - var rows []query.Row - for { - var row query.Row - if !cur.Scan(&row) { - if err := cur.Err(); err != nil { - return nil, err - } - return rows, nil - } - rows = append(rows, row) - } -} diff --git a/influxql/query/statement_rewriter.go b/influxql/query/statement_rewriter.go deleted file mode 100644 index f9c21ca145f..00000000000 --- a/influxql/query/statement_rewriter.go +++ /dev/null @@ -1,510 +0,0 @@ -package query - -import ( - "errors" - "regexp" - - "github.com/influxdata/influxql" -) - -var matchAllRegex = regexp.MustCompile(`.+`) - -// RewriteStatement rewrites stmt into a new statement, if applicable. -func RewriteStatement(stmt influxql.Statement) (influxql.Statement, error) { - switch stmt := stmt.(type) { - case *influxql.ShowFieldKeysStatement: - return rewriteShowFieldKeysStatement(stmt) - case *influxql.ShowFieldKeyCardinalityStatement: - return rewriteShowFieldKeyCardinalityStatement(stmt) - case *influxql.ShowMeasurementsStatement: - return rewriteShowMeasurementsStatement(stmt) - case *influxql.ShowMeasurementCardinalityStatement: - return rewriteShowMeasurementCardinalityStatement(stmt) - case *influxql.ShowSeriesStatement: - return rewriteShowSeriesStatement(stmt) - case *influxql.ShowSeriesCardinalityStatement: - return rewriteShowSeriesCardinalityStatement(stmt) - case *influxql.ShowTagKeysStatement: - return rewriteShowTagKeysStatement(stmt) - case *influxql.ShowTagKeyCardinalityStatement: - return rewriteShowTagKeyCardinalityStatement(stmt) - case *influxql.ShowTagValuesStatement: - return rewriteShowTagValuesStatement(stmt) - case *influxql.ShowTagValuesCardinalityStatement: - return rewriteShowTagValuesCardinalityStatement(stmt) - default: - return stmt, nil - } -} - -func rewriteShowFieldKeysStatement(stmt *influxql.ShowFieldKeysStatement) (influxql.Statement, error) { - return &influxql.SelectStatement{ - Fields: influxql.Fields([]*influxql.Field{ - {Expr: &influxql.VarRef{Val: "fieldKey"}}, - {Expr: &influxql.VarRef{Val: "fieldType"}}, - }), - Sources: rewriteSources(stmt.Sources, "_fieldKeys", stmt.Database), - Condition: rewriteSourcesCondition(stmt.Sources, nil), - Offset: stmt.Offset, - Limit: stmt.Limit, - SortFields: stmt.SortFields, - OmitTime: true, - Dedupe: true, - IsRawQuery: true, - }, nil -} - -func rewriteShowFieldKeyCardinalityStatement(stmt *influxql.ShowFieldKeyCardinalityStatement) (influxql.Statement, error) { - // Check for time in WHERE clause (not supported). - if influxql.HasTimeExpr(stmt.Condition) { - return nil, errors.New("SHOW FIELD KEY CARDINALITY doesn't support time in WHERE clause") - } - - // Use all field keys, if zero. - if len(stmt.Sources) == 0 { - stmt.Sources = influxql.Sources{ - &influxql.Measurement{Regex: &influxql.RegexLiteral{Val: matchAllRegex}}, - } - } - - return &influxql.SelectStatement{ - Fields: []*influxql.Field{ - { - Expr: &influxql.Call{ - Name: "count", - Args: []influxql.Expr{ - &influxql.Call{ - Name: "distinct", - Args: []influxql.Expr{&influxql.VarRef{Val: "fieldKey"}}, - }, - }, - }, - Alias: "count", - }, - }, - Condition: stmt.Condition, - Dimensions: stmt.Dimensions, - Offset: stmt.Offset, - Limit: stmt.Limit, - OmitTime: true, - Sources: influxql.Sources{ - &influxql.SubQuery{ - Statement: &influxql.SelectStatement{ - Fields: []*influxql.Field{ - {Expr: &influxql.VarRef{Val: "fieldKey"}}, - {Expr: &influxql.VarRef{Val: "fieldType"}}, - }, - Sources: rewriteSources(stmt.Sources, "_fieldKeys", stmt.Database), - Condition: rewriteSourcesCondition(stmt.Sources, nil), - OmitTime: true, - Dedupe: true, - IsRawQuery: true, - }, - }, - }, - }, nil -} - -func rewriteShowMeasurementsStatement(stmt *influxql.ShowMeasurementsStatement) (influxql.Statement, error) { - var sources influxql.Sources - if stmt.Source != nil { - sources = influxql.Sources{stmt.Source} - } - - // Currently time based SHOW MEASUREMENT queries can't be supported because - // it's not possible to appropriate set operations such as a negated regex - // using the query engine. - if influxql.HasTimeExpr(stmt.Condition) { - return nil, errors.New("SHOW MEASUREMENTS doesn't support time in WHERE clause") - } - - // rewrite condition to push a source measurement into a "_name" tag. - stmt.Condition = rewriteSourcesCondition(sources, stmt.Condition) - return stmt, nil -} - -func rewriteShowMeasurementCardinalityStatement(stmt *influxql.ShowMeasurementCardinalityStatement) (influxql.Statement, error) { - // TODO(edd): currently we only support cardinality estimation for certain - // types of query. As the estimation coverage is expanded, this condition - // will become less strict. - if !stmt.Exact && stmt.Sources == nil && stmt.Condition == nil && stmt.Dimensions == nil && stmt.Limit == 0 && stmt.Offset == 0 { - return stmt, nil - } - - // Check for time in WHERE clause (not supported). - if influxql.HasTimeExpr(stmt.Condition) { - return nil, errors.New("SHOW MEASUREMENT EXACT CARDINALITY doesn't support time in WHERE clause") - } - - // Use all measurements, if zero. - if len(stmt.Sources) == 0 { - stmt.Sources = influxql.Sources{ - &influxql.Measurement{Regex: &influxql.RegexLiteral{Val: matchAllRegex}}, - } - } - - return &influxql.SelectStatement{ - Fields: []*influxql.Field{ - { - Expr: &influxql.Call{ - Name: "count", - Args: []influxql.Expr{ - &influxql.Call{ - Name: "distinct", - Args: []influxql.Expr{&influxql.VarRef{Val: "_name"}}, - }, - }, - }, - Alias: "count", - }, - }, - Sources: rewriteSources2(stmt.Sources, stmt.Database), - Condition: stmt.Condition, - Dimensions: stmt.Dimensions, - Offset: stmt.Offset, - Limit: stmt.Limit, - OmitTime: true, - StripName: true, - }, nil -} - -func rewriteShowSeriesStatement(stmt *influxql.ShowSeriesStatement) (influxql.Statement, error) { - s := &influxql.SelectStatement{ - Condition: stmt.Condition, - Offset: stmt.Offset, - Limit: stmt.Limit, - SortFields: stmt.SortFields, - OmitTime: true, - StripName: true, - Dedupe: true, - IsRawQuery: true, - } - // Check if we can exclusively use the index. - if !influxql.HasTimeExpr(stmt.Condition) { - s.Fields = []*influxql.Field{{Expr: &influxql.VarRef{Val: "key"}}} - s.Sources = rewriteSources(stmt.Sources, "_series", stmt.Database) - s.Condition = rewriteSourcesCondition(s.Sources, s.Condition) - return s, nil - } - - // The query is bounded by time then it will have to query TSM data rather - // than utilising the index via system iterators. - s.Fields = []*influxql.Field{ - {Expr: &influxql.VarRef{Val: "_seriesKey"}, Alias: "key"}, - } - s.Sources = rewriteSources2(stmt.Sources, stmt.Database) - return s, nil -} - -func rewriteShowSeriesCardinalityStatement(stmt *influxql.ShowSeriesCardinalityStatement) (influxql.Statement, error) { - // TODO(edd): currently we only support cardinality estimation for certain - // types of query. As the estimation coverage is expanded, this condition - // will become less strict. - if !stmt.Exact && stmt.Sources == nil && stmt.Condition == nil && stmt.Dimensions == nil && stmt.Limit == 0 && stmt.Offset == 0 { - return stmt, nil - } - - // Check for time in WHERE clause (not supported). - if influxql.HasTimeExpr(stmt.Condition) { - return nil, errors.New("SHOW SERIES EXACT CARDINALITY doesn't support time in WHERE clause") - } - - // Use all measurements, if zero. - if len(stmt.Sources) == 0 { - stmt.Sources = influxql.Sources{ - &influxql.Measurement{Regex: &influxql.RegexLiteral{Val: matchAllRegex}}, - } - } - - return &influxql.SelectStatement{ - Fields: []*influxql.Field{ - { - Expr: &influxql.Call{ - Name: "count", - Args: []influxql.Expr{&influxql.Call{ - Name: "distinct", - Args: []influxql.Expr{&influxql.VarRef{Val: "_seriesKey"}}, - }}, - }, - Alias: "count", - }, - }, - Sources: rewriteSources2(stmt.Sources, stmt.Database), - Condition: stmt.Condition, - Dimensions: stmt.Dimensions, - Offset: stmt.Offset, - Limit: stmt.Limit, - OmitTime: true, - }, nil -} - -func rewriteShowTagValuesStatement(stmt *influxql.ShowTagValuesStatement) (influxql.Statement, error) { - var expr influxql.Expr - if list, ok := stmt.TagKeyExpr.(*influxql.ListLiteral); ok { - for _, tagKey := range list.Vals { - tagExpr := &influxql.BinaryExpr{ - Op: influxql.EQ, - LHS: &influxql.VarRef{Val: "_tagKey"}, - RHS: &influxql.StringLiteral{Val: tagKey}, - } - - if expr != nil { - expr = &influxql.BinaryExpr{ - Op: influxql.OR, - LHS: expr, - RHS: tagExpr, - } - } else { - expr = tagExpr - } - } - } else { - expr = &influxql.BinaryExpr{ - Op: stmt.Op, - LHS: &influxql.VarRef{Val: "_tagKey"}, - RHS: stmt.TagKeyExpr, - } - } - - // Set condition or "AND" together. - condition := stmt.Condition - if condition == nil { - condition = expr - } else { - condition = &influxql.BinaryExpr{ - Op: influxql.AND, - LHS: &influxql.ParenExpr{Expr: condition}, - RHS: &influxql.ParenExpr{Expr: expr}, - } - } - condition = rewriteSourcesCondition(stmt.Sources, condition) - - return &influxql.ShowTagValuesStatement{ - Database: stmt.Database, - Op: stmt.Op, - TagKeyExpr: stmt.TagKeyExpr, - Condition: condition, - SortFields: stmt.SortFields, - Limit: stmt.Limit, - Offset: stmt.Offset, - }, nil -} - -func rewriteShowTagValuesCardinalityStatement(stmt *influxql.ShowTagValuesCardinalityStatement) (influxql.Statement, error) { - // Use all measurements, if zero. - if len(stmt.Sources) == 0 { - stmt.Sources = influxql.Sources{ - &influxql.Measurement{Regex: &influxql.RegexLiteral{Val: matchAllRegex}}, - } - } - - var expr influxql.Expr - if list, ok := stmt.TagKeyExpr.(*influxql.ListLiteral); ok { - for _, tagKey := range list.Vals { - tagExpr := &influxql.BinaryExpr{ - Op: influxql.EQ, - LHS: &influxql.VarRef{Val: "_tagKey"}, - RHS: &influxql.StringLiteral{Val: tagKey}, - } - - if expr != nil { - expr = &influxql.BinaryExpr{ - Op: influxql.OR, - LHS: expr, - RHS: tagExpr, - } - } else { - expr = tagExpr - } - } - } else { - expr = &influxql.BinaryExpr{ - Op: stmt.Op, - LHS: &influxql.VarRef{Val: "_tagKey"}, - RHS: stmt.TagKeyExpr, - } - } - - // Set condition or "AND" together. - condition := stmt.Condition - if condition == nil { - condition = expr - } else { - condition = &influxql.BinaryExpr{ - Op: influxql.AND, - LHS: &influxql.ParenExpr{Expr: condition}, - RHS: &influxql.ParenExpr{Expr: expr}, - } - } - - return &influxql.SelectStatement{ - Fields: []*influxql.Field{ - { - Expr: &influxql.Call{ - Name: "count", - Args: []influxql.Expr{ - &influxql.Call{ - Name: "distinct", - Args: []influxql.Expr{&influxql.VarRef{Val: "_tagValue"}}, - }, - }, - }, - Alias: "count", - }, - }, - Sources: rewriteSources2(stmt.Sources, stmt.Database), - Condition: condition, - Dimensions: stmt.Dimensions, - Offset: stmt.Offset, - Limit: stmt.Limit, - OmitTime: true, - }, nil -} - -func rewriteShowTagKeysStatement(stmt *influxql.ShowTagKeysStatement) (influxql.Statement, error) { - return &influxql.ShowTagKeysStatement{ - Database: stmt.Database, - Condition: rewriteSourcesCondition(stmt.Sources, stmt.Condition), - SortFields: stmt.SortFields, - Limit: stmt.Limit, - Offset: stmt.Offset, - SLimit: stmt.SLimit, - SOffset: stmt.SOffset, - }, nil -} - -func rewriteShowTagKeyCardinalityStatement(stmt *influxql.ShowTagKeyCardinalityStatement) (influxql.Statement, error) { - // Check for time in WHERE clause (not supported). - if influxql.HasTimeExpr(stmt.Condition) { - return nil, errors.New("SHOW TAG KEY EXACT CARDINALITY doesn't support time in WHERE clause") - } - - // Use all measurements, if zero. - if len(stmt.Sources) == 0 { - stmt.Sources = influxql.Sources{ - &influxql.Measurement{Regex: &influxql.RegexLiteral{Val: matchAllRegex}}, - } - } - - return &influxql.SelectStatement{ - Fields: []*influxql.Field{ - { - Expr: &influxql.Call{ - Name: "count", - Args: []influxql.Expr{ - &influxql.Call{ - Name: "distinct", - Args: []influxql.Expr{&influxql.VarRef{Val: "_tagKey"}}, - }, - }, - }, - Alias: "count", - }, - }, - Sources: rewriteSources2(stmt.Sources, stmt.Database), - Condition: stmt.Condition, - Dimensions: stmt.Dimensions, - Offset: stmt.Offset, - Limit: stmt.Limit, - OmitTime: true, - }, nil -} - -// rewriteSources rewrites sources to include the provided system iterator. -// -// rewriteSources also sets the default database where necessary. -func rewriteSources(sources influxql.Sources, systemIterator, defaultDatabase string) influxql.Sources { - newSources := influxql.Sources{} - for _, src := range sources { - if src == nil { - continue - } - mm := src.(*influxql.Measurement) - database := mm.Database - if database == "" { - database = defaultDatabase - } - - newM := mm.Clone() - newM.SystemIterator, newM.Database = systemIterator, database - newSources = append(newSources, newM) - } - - if len(newSources) <= 0 { - return append(newSources, &influxql.Measurement{ - Database: defaultDatabase, - SystemIterator: systemIterator, - }) - } - return newSources -} - -// rewriteSourcesCondition rewrites sources into `name` expressions. -// Merges with cond and returns a new condition. -func rewriteSourcesCondition(sources influxql.Sources, cond influxql.Expr) influxql.Expr { - if len(sources) == 0 { - return cond - } - - // Generate an OR'd set of filters on source name. - var scond influxql.Expr - for _, source := range sources { - mm := source.(*influxql.Measurement) - - // Generate a filtering expression on the measurement name. - var expr influxql.Expr - if mm.Regex != nil { - expr = &influxql.BinaryExpr{ - Op: influxql.EQREGEX, - LHS: &influxql.VarRef{Val: "_name"}, - RHS: &influxql.RegexLiteral{Val: mm.Regex.Val}, - } - } else if mm.Name != "" { - expr = &influxql.BinaryExpr{ - Op: influxql.EQ, - LHS: &influxql.VarRef{Val: "_name"}, - RHS: &influxql.StringLiteral{Val: mm.Name}, - } - } - - if scond == nil { - scond = expr - } else { - scond = &influxql.BinaryExpr{ - Op: influxql.OR, - LHS: scond, - RHS: expr, - } - } - } - - // This is the case where the original query has a WHERE on a tag, and also - // is requesting from a specific source. - if cond != nil && scond != nil { - return &influxql.BinaryExpr{ - Op: influxql.AND, - LHS: &influxql.ParenExpr{Expr: scond}, - RHS: &influxql.ParenExpr{Expr: cond}, - } - } else if cond != nil { - // This is the case where the original query has a WHERE on a tag but - // is not requesting from a specific source. - return cond - } - return scond -} - -func rewriteSources2(sources influxql.Sources, database string) influxql.Sources { - if len(sources) == 0 { - sources = influxql.Sources{&influxql.Measurement{Regex: &influxql.RegexLiteral{Val: matchAllRegex}}} - } - for _, source := range sources { - switch source := source.(type) { - case *influxql.Measurement: - if source.Database == "" { - source.Database = database - } - } - } - return sources -} diff --git a/influxql/query/statement_rewriter_test.go b/influxql/query/statement_rewriter_test.go deleted file mode 100644 index 978bc7b54d6..00000000000 --- a/influxql/query/statement_rewriter_test.go +++ /dev/null @@ -1,336 +0,0 @@ -package query_test - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxql" -) - -func TestRewriteStatement(t *testing.T) { - tests := []struct { - stmt string - s string - }{ - { - stmt: `SHOW FIELD KEYS`, - s: `SELECT fieldKey, fieldType FROM _fieldKeys`, - }, - { - stmt: `SHOW FIELD KEYS ON db0`, - s: `SELECT fieldKey, fieldType FROM db0.._fieldKeys`, - }, - { - stmt: `SHOW FIELD KEYS FROM cpu`, - s: `SELECT fieldKey, fieldType FROM _fieldKeys WHERE _name = 'cpu'`, - }, - { - stmt: `SHOW FIELD KEYS ON db0 FROM cpu`, - s: `SELECT fieldKey, fieldType FROM db0.._fieldKeys WHERE _name = 'cpu'`, - }, - { - stmt: `SHOW FIELD KEYS FROM /c.*/`, - s: `SELECT fieldKey, fieldType FROM _fieldKeys WHERE _name =~ /c.*/`, - }, - { - stmt: `SHOW FIELD KEYS ON db0 FROM /c.*/`, - s: `SELECT fieldKey, fieldType FROM db0.._fieldKeys WHERE _name =~ /c.*/`, - }, - { - stmt: `SHOW FIELD KEYS FROM mydb.myrp2.cpu`, - s: `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name = 'cpu'`, - }, - { - stmt: `SHOW FIELD KEYS ON db0 FROM mydb.myrp2.cpu`, - s: `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name = 'cpu'`, - }, - { - stmt: `SHOW FIELD KEYS FROM mydb.myrp2./c.*/`, - s: `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name =~ /c.*/`, - }, - { - stmt: `SHOW FIELD KEYS ON db0 FROM mydb.myrp2./c.*/`, - s: `SELECT fieldKey, fieldType FROM mydb.myrp2._fieldKeys WHERE _name =~ /c.*/`, - }, - { - stmt: "SHOW FIELD KEY CARDINALITY", - s: "SELECT count(distinct(fieldKey)) AS count FROM (SELECT fieldKey, fieldType FROM _fieldKeys WHERE _name =~ /.+/)", - }, - { - stmt: "SHOW FIELD KEY CARDINALITY ON db0", - s: "SELECT count(distinct(fieldKey)) AS count FROM (SELECT fieldKey, fieldType FROM db0.._fieldKeys WHERE _name =~ /.+/)", - }, - { - stmt: "SHOW FIELD KEY CARDINALITY ON db0 FROM /tsm1.*/", - s: "SELECT count(distinct(fieldKey)) AS count FROM (SELECT fieldKey, fieldType FROM db0.._fieldKeys WHERE _name =~ /tsm1.*/)", - }, - { - stmt: "SHOW FIELD KEY CARDINALITY ON db0 FROM /tsm1.*/ WHERE 1 = 1", - s: "SELECT count(distinct(fieldKey)) AS count FROM (SELECT fieldKey, fieldType FROM db0.._fieldKeys WHERE _name =~ /tsm1.*/) WHERE 1 = 1", - }, - { - stmt: `SHOW SERIES`, - s: `SELECT "key" FROM _series`, - }, - { - stmt: `SHOW SERIES ON db0`, - s: `SELECT "key" FROM db0.._series`, - }, - { - stmt: `SHOW SERIES FROM cpu`, - s: `SELECT "key" FROM _series WHERE _name = 'cpu'`, - }, - { - stmt: `SHOW SERIES ON db0 FROM cpu`, - s: `SELECT "key" FROM db0.._series WHERE _name = 'cpu'`, - }, - { - stmt: `SHOW SERIES FROM mydb.myrp1.cpu`, - s: `SELECT "key" FROM mydb.myrp1._series WHERE _name = 'cpu'`, - }, - { - stmt: `SHOW SERIES ON db0 FROM mydb.myrp1.cpu`, - s: `SELECT "key" FROM mydb.myrp1._series WHERE _name = 'cpu'`, - }, - { - stmt: `SHOW SERIES FROM mydb.myrp1./c.*/`, - s: `SELECT "key" FROM mydb.myrp1._series WHERE _name =~ /c.*/`, - }, - { - stmt: `SHOW SERIES FROM mydb.myrp1./c.*/ WHERE region = 'uswest'`, - s: `SELECT "key" FROM mydb.myrp1._series WHERE (_name =~ /c.*/) AND (region = 'uswest')`, - }, - { - stmt: `SHOW SERIES ON db0 FROM mydb.myrp1./c.*/`, - s: `SELECT "key" FROM mydb.myrp1._series WHERE _name =~ /c.*/`, - }, - { - stmt: `SHOW SERIES WHERE time > 0`, - s: `SELECT _seriesKey AS "key" FROM /.+/ WHERE time > 0`, - }, - { - stmt: `SHOW SERIES ON db0 WHERE time > 0`, - s: `SELECT _seriesKey AS "key" FROM db0../.+/ WHERE time > 0`, - }, - { - stmt: `SHOW SERIES FROM cpu WHERE time > 0`, - s: `SELECT _seriesKey AS "key" FROM cpu WHERE time > 0`, - }, - { - stmt: `SHOW SERIES ON db0 FROM cpu WHERE time > 0`, - s: `SELECT _seriesKey AS "key" FROM db0..cpu WHERE time > 0`, - }, - { - stmt: `SHOW SERIES FROM mydb.myrp1.cpu WHERE time > 0`, - s: `SELECT _seriesKey AS "key" FROM mydb.myrp1.cpu WHERE time > 0`, - }, - { - stmt: `SHOW SERIES ON db0 FROM mydb.myrp1.cpu WHERE time > 0`, - s: `SELECT _seriesKey AS "key" FROM mydb.myrp1.cpu WHERE time > 0`, - }, - { - stmt: `SHOW SERIES FROM mydb.myrp1./c.*/ WHERE time > 0`, - s: `SELECT _seriesKey AS "key" FROM mydb.myrp1./c.*/ WHERE time > 0`, - }, - { - stmt: `SHOW SERIES FROM mydb.myrp1./c.*/ WHERE region = 'uswest' AND time > 0`, - s: `SELECT _seriesKey AS "key" FROM mydb.myrp1./c.*/ WHERE region = 'uswest' AND time > 0`, - }, - { - stmt: `SHOW SERIES ON db0 FROM mydb.myrp1./c.*/ WHERE time > 0`, - s: `SELECT _seriesKey AS "key" FROM mydb.myrp1./c.*/ WHERE time > 0`, - }, - { - stmt: `SHOW SERIES CARDINALITY FROM m`, - s: `SELECT count(distinct(_seriesKey)) AS count FROM m`, - }, - { - stmt: `SHOW SERIES EXACT CARDINALITY`, - s: `SELECT count(distinct(_seriesKey)) AS count FROM /.+/`, - }, - { - stmt: `SHOW SERIES EXACT CARDINALITY FROM m`, - s: `SELECT count(distinct(_seriesKey)) AS count FROM m`, - }, - { - stmt: `SHOW TAG KEYS`, - s: `SHOW TAG KEYS`, - }, - { - stmt: `SHOW TAG KEYS ON db0`, - s: `SHOW TAG KEYS ON db0`, - }, - { - stmt: `SHOW TAG KEYS FROM cpu`, - s: `SHOW TAG KEYS WHERE _name = 'cpu'`, - }, - { - stmt: `SHOW TAG KEYS ON db0 FROM cpu`, - s: `SHOW TAG KEYS ON db0 WHERE _name = 'cpu'`, - }, - { - stmt: `SHOW TAG KEYS FROM /c.*/`, - s: `SHOW TAG KEYS WHERE _name =~ /c.*/`, - }, - { - stmt: `SHOW TAG KEYS ON db0 FROM /c.*/`, - s: `SHOW TAG KEYS ON db0 WHERE _name =~ /c.*/`, - }, - { - stmt: `SHOW TAG KEYS FROM cpu WHERE region = 'uswest'`, - s: `SHOW TAG KEYS WHERE (_name = 'cpu') AND (region = 'uswest')`, - }, - { - stmt: `SHOW TAG KEYS ON db0 FROM cpu WHERE region = 'uswest'`, - s: `SHOW TAG KEYS ON db0 WHERE (_name = 'cpu') AND (region = 'uswest')`, - }, - { - stmt: `SHOW TAG KEYS FROM mydb.myrp1.cpu`, - s: `SHOW TAG KEYS WHERE _name = 'cpu'`, - }, - { - stmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1.cpu`, - s: `SHOW TAG KEYS ON db0 WHERE _name = 'cpu'`, - }, - { - stmt: `SHOW TAG KEYS FROM mydb.myrp1./c.*/`, - s: `SHOW TAG KEYS WHERE _name =~ /c.*/`, - }, - { - stmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1./c.*/`, - s: `SHOW TAG KEYS ON db0 WHERE _name =~ /c.*/`, - }, - { - stmt: `SHOW TAG KEYS FROM mydb.myrp1.cpu WHERE region = 'uswest'`, - s: `SHOW TAG KEYS WHERE (_name = 'cpu') AND (region = 'uswest')`, - }, - { - stmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1.cpu WHERE region = 'uswest'`, - s: `SHOW TAG KEYS ON db0 WHERE (_name = 'cpu') AND (region = 'uswest')`, - }, - { - stmt: `SHOW TAG KEYS WHERE time > 0`, - s: `SHOW TAG KEYS WHERE time > 0`, - }, - { - stmt: `SHOW TAG KEYS ON db0 WHERE time > 0`, - s: `SHOW TAG KEYS ON db0 WHERE time > 0`, - }, - { - stmt: `SHOW TAG KEYS FROM cpu WHERE time > 0`, - s: `SHOW TAG KEYS WHERE (_name = 'cpu') AND (time > 0)`, - }, - { - stmt: `SHOW TAG KEYS ON db0 FROM cpu WHERE time > 0`, - s: `SHOW TAG KEYS ON db0 WHERE (_name = 'cpu') AND (time > 0)`, - }, - { - stmt: `SHOW TAG KEYS FROM /c.*/ WHERE time > 0`, - s: `SHOW TAG KEYS WHERE (_name =~ /c.*/) AND (time > 0)`, - }, - { - stmt: `SHOW TAG KEYS ON db0 FROM /c.*/ WHERE time > 0`, - s: `SHOW TAG KEYS ON db0 WHERE (_name =~ /c.*/) AND (time > 0)`, - }, - { - stmt: `SHOW TAG KEYS FROM cpu WHERE region = 'uswest' AND time > 0`, - s: `SHOW TAG KEYS WHERE (_name = 'cpu') AND (region = 'uswest' AND time > 0)`, - }, - { - stmt: `SHOW TAG KEYS ON db0 FROM cpu WHERE region = 'uswest' AND time > 0`, - s: `SHOW TAG KEYS ON db0 WHERE (_name = 'cpu') AND (region = 'uswest' AND time > 0)`, - }, - { - stmt: `SHOW TAG KEYS FROM mydb.myrp1.cpu WHERE time > 0`, - s: `SHOW TAG KEYS WHERE (_name = 'cpu') AND (time > 0)`, - }, - { - stmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1.cpu WHERE time > 0`, - s: `SHOW TAG KEYS ON db0 WHERE (_name = 'cpu') AND (time > 0)`, - }, - { - stmt: `SHOW TAG KEYS FROM mydb.myrp1./c.*/ WHERE time > 0`, - s: `SHOW TAG KEYS WHERE (_name =~ /c.*/) AND (time > 0)`, - }, - { - stmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1./c.*/ WHERE time > 0`, - s: `SHOW TAG KEYS ON db0 WHERE (_name =~ /c.*/) AND (time > 0)`, - }, - { - stmt: `SHOW TAG KEYS FROM mydb.myrp1.cpu WHERE region = 'uswest' AND time > 0`, - s: `SHOW TAG KEYS WHERE (_name = 'cpu') AND (region = 'uswest' AND time > 0)`, - }, - { - stmt: `SHOW TAG KEYS ON db0 FROM mydb.myrp1.cpu WHERE region = 'uswest' AND time > 0`, - s: `SHOW TAG KEYS ON db0 WHERE (_name = 'cpu') AND (region = 'uswest' AND time > 0)`, - }, - { - stmt: `SHOW TAG VALUES WITH KEY = "region"`, - s: `SHOW TAG VALUES WITH KEY = region WHERE _tagKey = 'region'`, - }, - { - stmt: `SHOW TAG VALUES WITH KEY = "region" WHERE "region" = 'uswest'`, - s: `SHOW TAG VALUES WITH KEY = region WHERE (region = 'uswest') AND (_tagKey = 'region')`, - }, - { - stmt: `SHOW TAG VALUES WITH KEY IN ("region", "server") WHERE "platform" = 'cloud'`, - s: `SHOW TAG VALUES WITH KEY IN (region, server) WHERE (platform = 'cloud') AND (_tagKey = 'region' OR _tagKey = 'server')`, - }, - { - stmt: `SHOW TAG VALUES WITH KEY = "region" WHERE "region" = 'uswest' AND time > 0`, - s: `SHOW TAG VALUES WITH KEY = region WHERE (region = 'uswest' AND time > 0) AND (_tagKey = 'region')`, - }, - { - stmt: `SHOW TAG VALUES WITH KEY = "region" ON db0`, - s: `SHOW TAG VALUES WITH KEY = region WHERE _tagKey = 'region'`, - }, - { - stmt: `SHOW TAG VALUES FROM cpu WITH KEY = "region"`, - s: `SHOW TAG VALUES WITH KEY = region WHERE (_name = 'cpu') AND (_tagKey = 'region')`, - }, - { - stmt: `SHOW TAG VALUES WITH KEY != "region"`, - s: `SHOW TAG VALUES WITH KEY != region WHERE _tagKey != 'region'`, - }, - { - stmt: `SHOW TAG VALUES WITH KEY =~ /re.*/`, - s: `SHOW TAG VALUES WITH KEY =~ /re.*/ WHERE _tagKey =~ /re.*/`, - }, - { - stmt: `SHOW TAG VALUES WITH KEY =~ /re.*/ WHERE time > 0`, - s: `SHOW TAG VALUES WITH KEY =~ /re.*/ WHERE (time > 0) AND (_tagKey =~ /re.*/)`, - }, - { - stmt: `SHOW TAG VALUES WITH KEY !~ /re.*/`, - s: `SHOW TAG VALUES WITH KEY !~ /re.*/ WHERE _tagKey !~ /re.*/`, - }, - { - stmt: `SHOW TAG VALUES WITH KEY !~ /re.*/ LIMIT 1`, - s: `SHOW TAG VALUES WITH KEY !~ /re.*/ WHERE _tagKey !~ /re.*/ LIMIT 1`, - }, - { - stmt: `SHOW TAG VALUES WITH KEY !~ /re.*/ OFFSET 2`, - s: `SHOW TAG VALUES WITH KEY !~ /re.*/ WHERE _tagKey !~ /re.*/ OFFSET 2`, - }, - { - stmt: `SELECT value FROM cpu`, - s: `SELECT value FROM cpu`, - }, - } - - for _, test := range tests { - t.Run(test.stmt, func(t *testing.T) { - stmt, err := influxql.ParseStatement(test.stmt) - if err != nil { - t.Errorf("error parsing statement: %s", err) - } else { - stmt, err = query.RewriteStatement(stmt) - if err != nil { - t.Errorf("error rewriting statement: %s", err) - } else if s := stmt.String(); s != test.s { - t.Errorf("error rendering string. expected %s, actual: %s", test.s, s) - } - } - }) - } -} diff --git a/influxql/query/subquery.go b/influxql/query/subquery.go deleted file mode 100644 index 2bb0b25a70d..00000000000 --- a/influxql/query/subquery.go +++ /dev/null @@ -1,126 +0,0 @@ -package query - -import ( - "context" - - "github.com/influxdata/influxql" -) - -type subqueryBuilder struct { - ic IteratorCreator - stmt *influxql.SelectStatement -} - -// buildAuxIterator constructs an auxiliary Iterator from a subquery. -func (b *subqueryBuilder) buildAuxIterator(ctx context.Context, opt IteratorOptions) (Iterator, error) { - // Map the desired auxiliary fields from the substatement. - indexes := b.mapAuxFields(opt.Aux) - - subOpt, err := newIteratorOptionsSubstatement(ctx, b.stmt, opt) - if err != nil { - return nil, err - } - - cur, err := buildCursor(ctx, b.stmt, b.ic, subOpt) - if err != nil { - return nil, err - } - - // Filter the cursor by a condition if one was given. - if opt.Condition != nil { - cur = newFilterCursor(cur, opt.Condition) - } - - // Construct the iterators for the subquery. - itr := NewIteratorMapper(cur, nil, indexes, subOpt) - if len(opt.GetDimensions()) != len(subOpt.GetDimensions()) { - itr = NewTagSubsetIterator(itr, opt) - } - return itr, nil -} - -func (b *subqueryBuilder) mapAuxFields(auxFields []influxql.VarRef) []IteratorMap { - indexes := make([]IteratorMap, len(auxFields)) - for i, name := range auxFields { - m := b.mapAuxField(&name) - if m == nil { - // If this field doesn't map to anything, use the NullMap so it - // shows up as null. - m = NullMap{} - } - indexes[i] = m - } - return indexes -} - -func (b *subqueryBuilder) mapAuxField(name *influxql.VarRef) IteratorMap { - offset := 0 - for i, f := range b.stmt.Fields { - if f.Name() == name.Val { - return FieldMap{ - Index: i + offset, - // Cast the result of the field into the desired type. - Type: name.Type, - } - } else if call, ok := f.Expr.(*influxql.Call); ok && (call.Name == "top" || call.Name == "bottom") { - // We may match one of the arguments in "top" or "bottom". - if len(call.Args) > 2 { - for j, arg := range call.Args[1 : len(call.Args)-1] { - if arg, ok := arg.(*influxql.VarRef); ok && arg.Val == name.Val { - return FieldMap{ - Index: i + j + 1, - Type: influxql.String, - } - } - } - // Increment the offset so we have the correct index for later fields. - offset += len(call.Args) - 2 - } - } - } - - // Unable to find this in the list of fields. - // Look within the dimensions and create a field if we find it. - for _, d := range b.stmt.Dimensions { - if d, ok := d.Expr.(*influxql.VarRef); ok && name.Val == d.Val { - return TagMap(d.Val) - } - } - - // Unable to find any matches. - return nil -} - -func (b *subqueryBuilder) buildVarRefIterator(ctx context.Context, expr *influxql.VarRef, opt IteratorOptions) (Iterator, error) { - // Look for the field or tag that is driving this query. - driver := b.mapAuxField(expr) - if driver == nil { - // Exit immediately if there is no driver. If there is no driver, there - // are no results. Period. - return nil, nil - } - - // Map the auxiliary fields to their index in the subquery. - indexes := b.mapAuxFields(opt.Aux) - subOpt, err := newIteratorOptionsSubstatement(ctx, b.stmt, opt) - if err != nil { - return nil, err - } - - cur, err := buildCursor(ctx, b.stmt, b.ic, subOpt) - if err != nil { - return nil, err - } - - // Filter the cursor by a condition if one was given. - if opt.Condition != nil { - cur = newFilterCursor(cur, opt.Condition) - } - - // Construct the iterators for the subquery. - itr := NewIteratorMapper(cur, driver, indexes, subOpt) - if len(opt.GetDimensions()) != len(subOpt.GetDimensions()) { - itr = NewTagSubsetIterator(itr, opt) - } - return itr, nil -} diff --git a/influxql/query/subquery_test.go b/influxql/query/subquery_test.go deleted file mode 100644 index ac572ec5f3f..00000000000 --- a/influxql/query/subquery_test.go +++ /dev/null @@ -1,420 +0,0 @@ -package query_test - -import ( - "context" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxql" -) - -type CreateIteratorFn func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator - -func TestSubquery(t *testing.T) { - for _, test := range []struct { - Name string - Statement string - Fields map[string]influxql.DataType - MapShardsFn func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn - Rows []query.Row - }{ - { - Name: "AuxiliaryFields", - Statement: `SELECT max / 2.0 FROM (SELECT max(value) FROM cpu GROUP BY time(5s)) WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:15Z'`, - Fields: map[string]influxql.DataType{"value": influxql.Float}, - MapShardsFn: func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn { - if got, want := tr.MinTimeNano(), 0*Second; got != want { - t.Errorf("unexpected min time: got=%d want=%d", got, want) - } - if got, want := tr.MaxTimeNano(), 15*Second-1; got != want { - t.Errorf("unexpected max time: got=%d want=%d", got, want) - } - return func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator { - if got, want := m.Name, "cpu"; got != want { - t.Errorf("unexpected source: got=%s want=%s", got, want) - } - if got, want := opt.Expr.String(), "max(value::float)"; got != want { - t.Errorf("unexpected expression: got=%s want=%s", got, want) - } - return &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Value: 5}, - {Name: "cpu", Time: 5 * Second, Value: 3}, - {Name: "cpu", Time: 10 * Second, Value: 8}, - }} - } - }, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{2.5}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{1.5}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(4)}}, - }, - }, - { - Name: "AuxiliaryFields_WithWhereClause", - Statement: `SELECT host FROM (SELECT max(value), host FROM cpu GROUP BY time(5s)) WHERE max > 4 AND time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:15Z'`, - Fields: map[string]influxql.DataType{ - "value": influxql.Float, - "host": influxql.Tag, - }, - MapShardsFn: func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn { - if got, want := tr.MinTimeNano(), 0*Second; got != want { - t.Errorf("unexpected min time: got=%d want=%d", got, want) - } - if got, want := tr.MaxTimeNano(), 15*Second-1; got != want { - t.Errorf("unexpected max time: got=%d want=%d", got, want) - } - return func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator { - if got, want := m.Name, "cpu"; got != want { - t.Errorf("unexpected source: got=%s want=%s", got, want) - } - if got, want := opt.Expr.String(), "max(value::float)"; got != want { - t.Errorf("unexpected expression: got=%s want=%s", got, want) - } - if got, want := opt.Aux, []influxql.VarRef{{Val: "host", Type: influxql.Tag}}; !cmp.Equal(got, want) { - t.Errorf("unexpected auxiliary fields:\n%s", cmp.Diff(want, got)) - } - return &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Value: 5, Aux: []interface{}{"server02"}}, - {Name: "cpu", Time: 5 * Second, Value: 3, Aux: []interface{}{"server01"}}, - {Name: "cpu", Time: 10 * Second, Value: 8, Aux: []interface{}{"server03"}}, - }} - } - }, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{"server02"}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{"server03"}}, - }, - }, - { - Name: "AuxiliaryFields_NonExistentField", - Statement: `SELECT host FROM (SELECT max(value) FROM cpu GROUP BY time(5s)) WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:15Z'`, - Fields: map[string]influxql.DataType{"value": influxql.Float}, - MapShardsFn: func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn { - return func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator { - return &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Value: 5}, - {Name: "cpu", Time: 5 * Second, Value: 3}, - {Name: "cpu", Time: 10 * Second, Value: 8}, - }} - } - }, - Rows: []query.Row(nil), - }, - { - Name: "AggregateOfMath", - Statement: `SELECT mean(percentage) FROM (SELECT value * 100.0 AS percentage FROM cpu) WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:15Z' GROUP BY time(5s)`, - Fields: map[string]influxql.DataType{"value": influxql.Float}, - MapShardsFn: func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn { - if got, want := tr.MinTimeNano(), 0*Second; got != want { - t.Errorf("unexpected min time: got=%d want=%d", got, want) - } - if got, want := tr.MaxTimeNano(), 15*Second-1; got != want { - t.Errorf("unexpected max time: got=%d want=%d", got, want) - } - return func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator { - if got, want := m.Name, "cpu"; got != want { - t.Errorf("unexpected source: got=%s want=%s", got, want) - } - if got, want := opt.Expr, influxql.Expr(nil); got != want { - t.Errorf("unexpected expression: got=%s want=%s", got, want) - } - if got, want := opt.Aux, []influxql.VarRef{{Val: "value", Type: influxql.Float}}; !cmp.Equal(got, want) { - t.Errorf("unexpected auxiliary fields:\n%s", cmp.Diff(want, got)) - } - return &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Aux: []interface{}{0.5}}, - {Name: "cpu", Time: 2 * Second, Aux: []interface{}{1.0}}, - {Name: "cpu", Time: 5 * Second, Aux: []interface{}{0.05}}, - {Name: "cpu", Time: 8 * Second, Aux: []interface{}{0.45}}, - {Name: "cpu", Time: 12 * Second, Aux: []interface{}{0.34}}, - }} - } - }, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(75)}}, - {Time: 5 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(25)}}, - {Time: 10 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(34)}}, - }, - }, - { - Name: "Cast", - Statement: `SELECT value::integer FROM (SELECT mean(value) AS value FROM cpu)`, - Fields: map[string]influxql.DataType{"value": influxql.Integer}, - MapShardsFn: func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn { - return func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator { - if got, want := m.Name, "cpu"; got != want { - t.Errorf("unexpected source: got=%s want=%s", got, want) - } - if got, want := opt.Expr.String(), "mean(value::integer)"; got != want { - t.Errorf("unexpected expression: got=%s want=%s", got, want) - } - return &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: 0 * Second, Value: float64(20) / float64(6)}, - }} - } - }, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(3)}}, - }, - }, - { - Name: "CountTag", - Statement: `SELECT count(host) FROM (SELECT value, host FROM cpu) WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:15Z'`, - Fields: map[string]influxql.DataType{ - "value": influxql.Float, - "host": influxql.Tag, - }, - MapShardsFn: func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn { - if got, want := tr.MinTimeNano(), 0*Second; got != want { - t.Errorf("unexpected min time: got=%d want=%d", got, want) - } - if got, want := tr.MaxTimeNano(), 15*Second-1; got != want { - t.Errorf("unexpected max time: got=%d want=%d", got, want) - } - return func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator { - if got, want := m.Name, "cpu"; got != want { - t.Errorf("unexpected source: got=%s want=%s", got, want) - } - if got, want := opt.Aux, []influxql.VarRef{ - {Val: "host", Type: influxql.Tag}, - {Val: "value", Type: influxql.Float}, - }; !cmp.Equal(got, want) { - t.Errorf("unexpected auxiliary fields:\n%s", cmp.Diff(want, got)) - } - return &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Aux: []interface{}{"server01", 5.0}}, - {Name: "cpu", Aux: []interface{}{"server02", 3.0}}, - {Name: "cpu", Aux: []interface{}{"server03", 8.0}}, - }} - } - }, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{int64(3)}}, - }, - }, - { - Name: "StripTags", - Statement: `SELECT max FROM (SELECT max(value) FROM cpu GROUP BY host) WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:00:15Z'`, - Fields: map[string]influxql.DataType{"value": influxql.Float}, - MapShardsFn: func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn { - if got, want := tr.MinTimeNano(), 0*Second; got != want { - t.Errorf("unexpected min time: got=%d want=%d", got, want) - } - if got, want := tr.MaxTimeNano(), 15*Second-1; got != want { - t.Errorf("unexpected max time: got=%d want=%d", got, want) - } - return func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator { - if got, want := m.Name, "cpu"; got != want { - t.Errorf("unexpected source: got=%s want=%s", got, want) - } - if got, want := opt.Expr.String(), "max(value::float)"; got != want { - t.Errorf("unexpected expression: got=%s want=%s", got, want) - } - return &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("host=server01"), Value: 5}, - {Name: "cpu", Tags: ParseTags("host=server02"), Value: 3}, - {Name: "cpu", Tags: ParseTags("host=server03"), Value: 8}, - }} - } - }, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{5.0}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{3.0}}, - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{8.0}}, - }, - }, - { - Name: "DifferentDimensionsWithSelectors", - Statement: `SELECT sum("max_min") FROM ( - SELECT max("value") - min("value") FROM cpu GROUP BY time(30s), host - ) WHERE time >= '1970-01-01T00:00:00Z' AND time < '1970-01-01T00:01:00Z' GROUP BY time(30s)`, - Fields: map[string]influxql.DataType{"value": influxql.Float}, - MapShardsFn: func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn { - if got, want := tr.MinTimeNano(), 0*Second; got != want { - t.Errorf("unexpected min time: got=%d want=%d", got, want) - } - if got, want := tr.MaxTimeNano(), 60*Second-1; got != want { - t.Errorf("unexpected max time: got=%d want=%d", got, want) - } - return func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator { - if got, want := m.Name, "cpu"; got != want { - t.Errorf("unexpected source: got=%s want=%s", got, want) - } - - var itr query.Iterator = &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 7}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 20 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 8}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 10 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 20 * Second, Value: 7}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 30 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 40 * Second, Value: 1}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 50 * Second, Value: 9}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 30 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 40 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 50 * Second, Value: 2}, - }} - if _, ok := opt.Expr.(*influxql.Call); ok { - i, err := query.NewCallIterator(itr, opt) - if err != nil { - panic(err) - } - itr = i - } - return itr - } - }, - Rows: []query.Row{ - {Time: 0 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(10)}}, - {Time: 30 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{float64(8)}}, - }, - }, - { - Name: "TimeOrderingInTheOuterQuery", - Statement: `select * from (select last(value) from cpu group by host) order by time asc`, - Fields: map[string]influxql.DataType{"value": influxql.Float}, - MapShardsFn: func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn { - - return func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator { - if got, want := m.Name, "cpu"; got != want { - t.Errorf("unexpected source: got=%s want=%s", got, want) - } - - var itr query.Iterator = &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Tags: ParseTags("host=A"), Time: 0 * Second, Value: 2}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 10 * Second, Value: 7}, - {Name: "cpu", Tags: ParseTags("host=A"), Time: 20 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 0 * Second, Value: 8}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 10 * Second, Value: 3}, - {Name: "cpu", Tags: ParseTags("host=B"), Time: 19 * Second, Value: 7}, - }} - if _, ok := opt.Expr.(*influxql.Call); ok { - i, err := query.NewCallIterator(itr, opt) - if err != nil { - panic(err) - } - itr = i - } - return itr - } - }, - Rows: []query.Row{ - {Time: 19 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{"B", float64(7)}}, - {Time: 20 * Second, Series: query.Series{Name: "cpu"}, Values: []interface{}{"A", float64(3)}}, - }, - }, - { - Name: "TimeZone", - Statement: `SELECT * FROM (SELECT * FROM cpu WHERE time >= '2019-04-17 09:00:00' and time < '2019-04-17 10:00:00' TZ('America/Chicago'))`, - Fields: map[string]influxql.DataType{"value": influxql.Float}, - MapShardsFn: func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn { - return func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator { - if got, want := time.Unix(0, opt.StartTime).UTC(), mustParseTime("2019-04-17T14:00:00Z"); !got.Equal(want) { - t.Errorf("unexpected min time: got=%q want=%q", got, want) - } - if got, want := time.Unix(0, opt.EndTime).UTC(), mustParseTime("2019-04-17T15:00:00Z").Add(-1); !got.Equal(want) { - t.Errorf("unexpected max time: got=%q want=%q", got, want) - } - return &FloatIterator{} - } - }, - }, - { - Name: "DifferentDimensionsOrderByDesc", - Statement: `SELECT value, mytag FROM (SELECT last(value) AS value FROM testing GROUP BY mytag) ORDER BY desc`, - Fields: map[string]influxql.DataType{"value": influxql.Float}, - MapShardsFn: func(t *testing.T, tr influxql.TimeRange) CreateIteratorFn { - return func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) query.Iterator { - if got, want := m.Name, "testing"; got != want { - t.Errorf("unexpected source: got=%s want=%s", got, want) - } - - if opt.Ascending { - t.Error("expected iterator to be descending, not ascending") - } - - var itr query.Iterator = &FloatIterator{Points: []query.FloatPoint{ - {Name: "testing", Tags: ParseTags("mytag=c"), Time: mustParseTime("2019-06-25T22:36:20.93605779Z").UnixNano(), Value: 2}, - {Name: "testing", Tags: ParseTags("mytag=c"), Time: mustParseTime("2019-06-25T22:36:20.671604877Z").UnixNano(), Value: 2}, - {Name: "testing", Tags: ParseTags("mytag=c"), Time: mustParseTime("2019-06-25T22:36:20.255794481Z").UnixNano(), Value: 2}, - {Name: "testing", Tags: ParseTags("mytag=b"), Time: mustParseTime("2019-06-25T22:36:18.176662543Z").UnixNano(), Value: 2}, - {Name: "testing", Tags: ParseTags("mytag=b"), Time: mustParseTime("2019-06-25T22:36:17.815979113Z").UnixNano(), Value: 2}, - {Name: "testing", Tags: ParseTags("mytag=b"), Time: mustParseTime("2019-06-25T22:36:17.265031598Z").UnixNano(), Value: 2}, - {Name: "testing", Tags: ParseTags("mytag=a"), Time: mustParseTime("2019-06-25T22:36:15.144253616Z").UnixNano(), Value: 2}, - {Name: "testing", Tags: ParseTags("mytag=a"), Time: mustParseTime("2019-06-25T22:36:14.719167205Z").UnixNano(), Value: 2}, - {Name: "testing", Tags: ParseTags("mytag=a"), Time: mustParseTime("2019-06-25T22:36:13.711721316Z").UnixNano(), Value: 2}, - }} - if _, ok := opt.Expr.(*influxql.Call); ok { - i, err := query.NewCallIterator(itr, opt) - if err != nil { - panic(err) - } - itr = i - } - return itr - } - }, - Rows: []query.Row{ - {Time: mustParseTime("2019-06-25T22:36:20.93605779Z").UnixNano(), Series: query.Series{Name: "testing"}, Values: []interface{}{float64(2), "c"}}, - {Time: mustParseTime("2019-06-25T22:36:18.176662543Z").UnixNano(), Series: query.Series{Name: "testing"}, Values: []interface{}{float64(2), "b"}}, - {Time: mustParseTime("2019-06-25T22:36:15.144253616Z").UnixNano(), Series: query.Series{Name: "testing"}, Values: []interface{}{float64(2), "a"}}, - }, - }, - } { - t.Run(test.Name, func(t *testing.T) { - shardMapper := ShardMapper{ - MapShardsFn: func(_ context.Context, sources influxql.Sources, tr influxql.TimeRange) query.ShardGroup { - fn := test.MapShardsFn(t, tr) - return &ShardGroup{ - Fields: test.Fields, - CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { - return fn(ctx, m, opt), nil - }, - } - }, - } - - stmt := MustParseSelectStatement(test.Statement) - stmt.OmitTime = true - cur, err := query.Select(context.Background(), stmt, &shardMapper, query.SelectOptions{}) - if err != nil { - t.Fatalf("unexpected parse error: %s", err) - } else if a, err := ReadCursor(cur); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if diff := cmp.Diff(test.Rows, a); diff != "" { - t.Fatalf("unexpected points:\n%s", diff) - } - }) - } -} - -// Ensure that the subquery gets passed the max series limit. -func TestSubquery_MaxSeriesN(t *testing.T) { - shardMapper := ShardMapper{ - MapShardsFn: func(_ context.Context, sources influxql.Sources, tr influxql.TimeRange) query.ShardGroup { - return &ShardGroup{ - Fields: map[string]influxql.DataType{ - "value": influxql.Float, - }, - CreateIteratorFn: func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { - if opt.MaxSeriesN != 1000 { - t.Errorf("max series limit has not been set") - } - return nil, nil - }, - } - }, - } - - stmt := MustParseSelectStatement(`SELECT max(value) FROM (SELECT value FROM cpu)`) - cur, err := query.Select(context.Background(), stmt, &shardMapper, query.SelectOptions{ - MaxSeriesN: 1000, - }) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - cur.Close() -} diff --git a/influxql/query/tmpldata b/influxql/query/tmpldata deleted file mode 100644 index 27ffdb4ff3b..00000000000 --- a/influxql/query/tmpldata +++ /dev/null @@ -1,37 +0,0 @@ -[ - { - "Name":"Float", - "name":"float", - "Type":"float64", - "Nil":"0", - "Zero":"float64(0)" - }, - { - "Name":"Integer", - "name":"integer", - "Type":"int64", - "Nil":"0", - "Zero":"int64(0)" - }, - { - "Name":"Unsigned", - "name":"unsigned", - "Type":"uint64", - "Nil":"0", - "Zero":"uint64(0)" - }, - { - "Name":"String", - "name":"string", - "Type":"string", - "Nil":"\"\"", - "Zero":"\"\"" - }, - { - "Name":"Boolean", - "name":"boolean", - "Type":"bool", - "Nil":"false", - "Zero":"false" - } -] diff --git a/influxql/query_request.go b/influxql/query_request.go deleted file mode 100644 index 3792c3ae57f..00000000000 --- a/influxql/query_request.go +++ /dev/null @@ -1,87 +0,0 @@ -package influxql - -import ( - "encoding/json" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -type EncodingFormat int - -func (f *EncodingFormat) UnmarshalJSON(bytes []byte) error { - var s string - - if err := json.Unmarshal(bytes, &s); err != nil { - return err - } - - *f = EncodingFormatFromMimeType(s) - return nil -} - -func (f EncodingFormat) MarshalJSON() ([]byte, error) { - return json.Marshal(f.ContentType()) -} - -const ( - EncodingFormatJSON EncodingFormat = iota - EncodingFormatTextCSV - EncodingFormatAppCSV - EncodingFormatMessagePack -) - -// Returns closed encoding format from the specified mime type. -// The default is JSON if no exact match is found. -func EncodingFormatFromMimeType(s string) EncodingFormat { - switch s { - case "application/csv": - return EncodingFormatAppCSV - case "text/csv": - return EncodingFormatTextCSV - case "application/x-msgpack": - return EncodingFormatMessagePack - default: - return EncodingFormatJSON - } -} - -func (f EncodingFormat) ContentType() string { - switch f { - case EncodingFormatAppCSV: - return "application/csv" - case EncodingFormatTextCSV: - return "text/csv" - case EncodingFormatMessagePack: - return "application/x-msgpack" - default: - return "application/json" - } -} - -type QueryRequest struct { - Authorization *influxdb.Authorization `json:"authorization,omitempty"` - OrganizationID platform.ID `json:"organization_id"` - DB string `json:"db"` - RP string `json:"rp"` - Epoch string `json:"epoch"` - EncodingFormat EncodingFormat `json:"encoding_format"` - ContentType string `json:"content_type"` // Content type is the desired response format. - Chunked bool `json:"chunked"` // Chunked indicates responses should be chunked using ChunkSize - ChunkSize int `json:"chunk_size"` // ChunkSize is the number of points to be encoded per batch. 0 indicates no chunking. - Query string `json:"query"` // Query contains the InfluxQL. - Params map[string]interface{} `json:"params,omitempty"` - Source string `json:"source"` // Source represents the ultimate source of the request. -} - -// The HTTP query requests represented the body expected by the QueryHandler -func (r *QueryRequest) Valid() error { - if !r.OrganizationID.Valid() { - return &errors.Error{ - Msg: "organization_id is not valid", - Code: errors.EInvalid, - } - } - return r.Authorization.Valid() -} diff --git a/influxql/query_request_test.go b/influxql/query_request_test.go deleted file mode 100644 index 1a08ea95b7a..00000000000 --- a/influxql/query_request_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package influxql - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestEncodingFormatFromMimeType(t *testing.T) { - tests := []struct { - s string - exp EncodingFormat - }{ - {s: "application/csv", exp: EncodingFormatAppCSV}, - {s: "text/csv", exp: EncodingFormatTextCSV}, - {s: "application/x-msgpack", exp: EncodingFormatMessagePack}, - {s: "application/json", exp: EncodingFormatJSON}, - {s: "*/*", exp: EncodingFormatJSON}, - {s: "", exp: EncodingFormatJSON}, - {s: "application/other", exp: EncodingFormatJSON}, - } - for _, tt := range tests { - t.Run(tt.s, func(t *testing.T) { - got := EncodingFormatFromMimeType(tt.s) - assert.Equal(t, tt.exp, got) - }) - } -} diff --git a/influxql/service.go b/influxql/service.go deleted file mode 100644 index 4430b61334f..00000000000 --- a/influxql/service.go +++ /dev/null @@ -1,96 +0,0 @@ -package influxql - -import ( - "context" - "fmt" - "io" - - "github.com/influxdata/influxdb/v2/kit/check" -) - -// ProxyQueryService performs InfluxQL queries and encodes the result into a writer. -// The results are opaque to a ProxyQueryService. -type ProxyQueryService interface { - check.Checker - Query(ctx context.Context, w io.Writer, req *QueryRequest) (Statistics, error) -} - -// ProxyMode enumerates the possible ProxyQueryService operating modes used by a downstream client. -type ProxyMode byte - -const ( - // ProxyModeHTTP specifies a ProxyQueryService that forwards InfluxQL requests via HTTP to influxqld. - ProxyModeHTTP ProxyMode = iota - - // ProxyModeQueue specifies a ProxyQueryService that pushes InfluxQL requests to a queue and influxqld issues a callback request to the initiating service. - ProxyModeQueue -) - -var proxyModeString = [...]string{ - ProxyModeHTTP: "http", - ProxyModeQueue: "queue", -} - -func (i ProxyMode) String() string { - if int(i) > len(proxyModeString) { - return "invalid" - } - return proxyModeString[i] -} - -func (i *ProxyMode) Set(v string) (err error) { - switch v { - case "http": - *i = ProxyModeHTTP - case "queue": - *i = ProxyModeQueue - default: - err = fmt.Errorf("unexpected %s type: %s", i.Type(), v) - } - return err -} - -func (i *ProxyMode) Type() string { return "proxy-mode" } - -// RequestMode is enumerates the possible influxqld operating modes for receiving InfluxQL requests. -type RequestMode byte - -const ( - // RequestModeHTTP specifies the HTTP listener should be active. - RequestModeHTTP RequestMode = iota - - // RequestModeQueue specifies the queue dispatcher should be active. - RequestModeQueue - - // RequestModeAll specifies both the HTTP listener and queue dispatcher should be active. - RequestModeAll -) - -var requestModeString = [...]string{ - RequestModeHTTP: "http", - RequestModeQueue: "queue", - RequestModeAll: "all", -} - -func (i RequestMode) String() string { - if int(i) > len(requestModeString) { - return "invalid" - } - return proxyModeString[i] -} - -func (i *RequestMode) Set(v string) (err error) { - switch v { - case "http": - *i = RequestModeHTTP - case "queue": - *i = RequestModeQueue - case "all": - *i = RequestModeAll - default: - err = fmt.Errorf("unexpected %s type: %s", i.Type(), v) - } - return err -} - -func (i *RequestMode) Type() string { return "request-mode" } diff --git a/influxql/statistics.go b/influxql/statistics.go deleted file mode 100644 index 3ff3bf1bf2b..00000000000 --- a/influxql/statistics.go +++ /dev/null @@ -1,123 +0,0 @@ -package influxql - -import ( - "sync" - "time" - - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/log" -) - -// Statistics is a collection of statistics about the processing of a query. -type Statistics struct { - PlanDuration time.Duration `json:"plan_duration"` // PlanDuration is the duration spent planning the query. - ExecuteDuration time.Duration `json:"execute_duration"` // ExecuteDuration is the duration spent executing the query. - StatementCount int `json:"statement_count"` // StatementCount is the number of InfluxQL statements executed - ScannedValues int `json:"scanned_values"` // ScannedValues is the number of values scanned from storage - ScannedBytes int `json:"scanned_bytes"` // ScannedBytes is the number of bytes scanned from storage -} - -// Adding returns the sum of s and other. -func (s Statistics) Adding(other Statistics) Statistics { - return Statistics{ - PlanDuration: s.PlanDuration + other.PlanDuration, - ExecuteDuration: s.ExecuteDuration + other.ExecuteDuration, - StatementCount: s.StatementCount + other.StatementCount, - ScannedValues: s.ScannedValues + other.ScannedValues, - ScannedBytes: s.ScannedBytes + other.ScannedBytes, - } -} - -// Add adds other to s. -func (s *Statistics) Add(other Statistics) { - s.PlanDuration += other.PlanDuration - s.ExecuteDuration += other.ExecuteDuration - s.StatementCount += other.StatementCount - s.ScannedValues += other.ScannedValues - s.ScannedBytes += other.ScannedBytes -} - -func (s *Statistics) LogToSpan(span opentracing.Span) { - if span == nil { - return - } - span.LogFields( - log.Float64("stats_plan_duration_seconds", s.PlanDuration.Seconds()), - log.Float64("stats_execute_duration_seconds", s.ExecuteDuration.Seconds()), - log.Int("stats_statement_count", s.StatementCount), - log.Int("stats_scanned_values", s.ScannedValues), - log.Int("stats_scanned_bytes", s.ScannedBytes), - ) -} - -// TotalDuration returns the sum of all durations for s. -func (s *Statistics) TotalDuration() time.Duration { - return s.PlanDuration + s.ExecuteDuration -} - -type CollectorFn func() Statistics - -func (fn CollectorFn) Statistics() Statistics { - return fn() -} - -type MutableCollector struct { - s *Statistics -} - -func NewMutableCollector(s *Statistics) *MutableCollector { - return &MutableCollector{s: s} -} - -func (c *MutableCollector) Statistics() Statistics { - return *c.s -} - -type ImmutableCollector struct { - s Statistics -} - -func NewImmutableCollector(s Statistics) *ImmutableCollector { - return &ImmutableCollector{s: s} -} - -func (c *ImmutableCollector) Statistics() Statistics { - return c.s -} - -type StatisticsCollector interface { - Statistics() Statistics -} - -type StatisticsGatherer struct { - mu sync.Mutex - collectors []StatisticsCollector -} - -func (sg *StatisticsGatherer) Append(sc StatisticsCollector) { - sg.mu.Lock() - defer sg.mu.Unlock() - sg.collectors = append(sg.collectors, sc) -} - -func (sg *StatisticsGatherer) Statistics() Statistics { - sg.mu.Lock() - defer sg.mu.Unlock() - - res := Statistics{} - for i := range sg.collectors { - res = res.Adding(sg.collectors[i].Statistics()) - } - return res -} - -func (sg *StatisticsGatherer) Reset() { - sg.mu.Lock() - defer sg.mu.Unlock() - - coll := sg.collectors - sg.collectors = sg.collectors[:0] - for i := range coll { - coll[i] = nil - } -} diff --git a/influxql/v1tests/query_test.go b/influxql/v1tests/query_test.go deleted file mode 100644 index 4b82bf8b939..00000000000 --- a/influxql/v1tests/query_test.go +++ /dev/null @@ -1,335 +0,0 @@ -package v1tests - -import ( - "context" - "fmt" - "math/rand" - "net/url" - "strings" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/tests" - "github.com/stretchr/testify/require" -) - -// Ensure parameterized queries can be executed -func TestServer_Query_Parameterized(t *testing.T) { - t.Parallel() - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu,host=foo value=1.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=bar value=1.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-02T01:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - minTime := mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano() - maxTime := mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano() - - test.addQueries([]*Query{ - { - name: "parameterized time", - params: url.Values{"db": []string{"db0"}, "params": []string{fmt.Sprintf(`{"0": %d, "1": %d}`, minTime, maxTime)}}, - command: `SELECT value FROM cpu WHERE time >= $0 AND time < $1`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, - }, - { - name: "parameterized tag", - params: url.Values{"db": []string{"db0"}, "params": []string{`{"0": "foo"}`}}, - command: `SELECT value FROM cpu WHERE host = $0`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure queries are properly chunked -func TestServer_Query_Chunked(t *testing.T) { - t.Parallel() - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu,host=foo value=1.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=bar value=1.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-02T01:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "query is chunked", - params: url.Values{"db": []string{"db0"}, "chunked": []string{"true"}, "chunk_size": []string{"1"}}, - command: `SELECT value FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]],"partial":true}],"partial":true}]} -{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-02T01:00:00Z",1]]}]}]}`, - }, - { - name: "query is not chunked", - params: url.Values{"db": []string{"db0"}, "chunked": []string{"false"}, "chunk_size": []string{"1"}}, - command: `SELECT value FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1],["2000-01-02T01:00:00Z",1]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure a more complex group-by is correct -func TestServer_Query_ComplexGroupby(t *testing.T) { - t.Parallel() - s := OpenServer(t) - defer s.Close() - - r := rand.New(rand.NewSource(1000)) - abc := []string{"a", "b", "c"} - startDate := time.Date(2021, 5, 10, 0, 0, 0, 0, time.UTC) - endDate := time.Date(2021, 5, 15, 0, 0, 0, 0, time.UTC) - writes := make([]string, 0) - for date := startDate; date.Before(endDate); date = date.Add(1 * time.Hour) { - line := fmt.Sprintf(`m0,tenant_id=t%s,env=e%s total_count=%d %d`, - abc[r.Intn(3)], abc[r.Intn(3)], 10+r.Intn(5), date.UnixNano()) - writes = append(writes, line) - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - minTime := startDate.UnixNano() - maxTime := endDate.UnixNano() - - test.addQueries([]*Query{ - { - name: "parameterized time", - params: url.Values{"db": []string{"db0"}, "params": []string{fmt.Sprintf(`{"0": %d, "1": %d}`, minTime, maxTime)}}, - command: `SELECT SUM(ncount) as scount FROM (SELECT NON_NEGATIVE_DIFFERENCE(total_count) as ncount FROM m0 WHERE time >= $0 AND time <= $1 AND tenant_id='tb' GROUP BY env) WHERE time >= $0 AND time <= $1 GROUP BY time(1d)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"m0","columns":["time","scount"],"values":[["2021-05-10T00:00:00Z",10],["2021-05-11T00:00:00Z",5],["2021-05-12T00:00:00Z",3],["2021-05-13T00:00:00Z",7],["2021-05-14T00:00:00Z",4],["2021-05-15T00:00:00Z",null]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_ShowDatabases(t *testing.T) { - t.Parallel() - s := OpenServer(t) - defer s.MustClose() - - ctx := context.Background() - ctx = icontext.SetAuthorizer(ctx, tests.MakeAuthorization(s.DefaultOrgID, s.DefaultUserID, influxdb.OperPermissions())) - - // create some buckets and mappings - buckets := []struct { - name string - db string - rp string - }{ - {"my-bucket", "my-bucket", "autogen"}, - {"telegraf/autogen", "telegraf", "autogen"}, - {"telegraf/1_week", "telegraf", "1_week"}, - {"telegraf/1_month", "telegraf", "1_month"}, - } - - for _, bi := range buckets { - b := influxdb.Bucket{ - OrgID: s.DefaultOrgID, - Type: influxdb.BucketTypeUser, - Name: bi.name, - RetentionPeriod: 0, - } - err := s.Launcher. - Launcher. - BucketService(). - CreateBucket(ctx, &b) - require.NoError(t, err) - - err = s.Launcher. - DBRPMappingService(). - Create(ctx, &influxdb.DBRPMapping{ - Database: bi.db, - RetentionPolicy: bi.rp, - Default: true, - OrganizationID: s.DefaultOrgID, - BucketID: b.ID, - }) - require.NoError(t, err) - } - - test := NewEmptyTest() - test.addQueries( - &Query{ - name: "show databases does not return duplicates", - command: "SHOW DATABASES", - exp: `{"results":[{"statement_id":0,"series":[{"name":"databases","columns":["name"],"values":[["my-bucket"],["telegraf"],["_monitoring"],["_tasks"],["db"]]}]}]}`, - }, - ) - - test.Run(context.Background(), t, s) -} - -func TestServer_Query_Subquery(t *testing.T) { - writes := []string{ - fmt.Sprintf(`request,region=west,status=200 duration_ms=100 %d`, mustParseTime(time.RFC3339Nano, "2004-04-09T01:00:00Z").UnixNano()), - fmt.Sprintf(`request,region=west,status=200 duration_ms=100 %d`, mustParseTime(time.RFC3339Nano, "2004-04-09T01:00:10Z").UnixNano()), - fmt.Sprintf(`request,region=west,status=200 duration_ms=100 %d`, mustParseTime(time.RFC3339Nano, "2004-04-09T01:00:20Z").UnixNano()), - fmt.Sprintf(`request,region=west,status=204 duration_ms=100 %d`, mustParseTime(time.RFC3339Nano, "2004-04-09T01:00:30Z").UnixNano()), - fmt.Sprintf(`request,region=west,status=204 duration_ms=100 %d`, mustParseTime(time.RFC3339Nano, "2004-04-09T01:00:40Z").UnixNano()), - fmt.Sprintf(`request,region=west,status=500 duration_ms=200 %d`, mustParseTime(time.RFC3339Nano, "2004-04-09T01:00:00Z").UnixNano()), - fmt.Sprintf(`request,region=west,status=500 duration_ms=200 %d`, mustParseTime(time.RFC3339Nano, "2004-04-09T01:00:10Z").UnixNano()), - fmt.Sprintf(`request,region=west,status=500 duration_ms=200 %d`, mustParseTime(time.RFC3339Nano, "2004-04-09T01:00:20Z").UnixNano()), - fmt.Sprintf(`request,region=west,status=504 duration_ms=200 %d`, mustParseTime(time.RFC3339Nano, "2004-04-09T01:00:30Z").UnixNano()), - fmt.Sprintf(`request,region=west,status=504 duration_ms=200 %d`, mustParseTime(time.RFC3339Nano, "2004-04-09T01:00:40Z").UnixNano()), - } - - ctx := context.Background() - s := NewTestServer(ctx, t, "db0", "rp0", writes...) - - cases := []Query{ - { - // This test verifies that data cached from the storage layer - // is complete in order to satisfy the two subqueries. - name: "different tag predicates for same field", - params: url.Values{"db": []string{"db0"}}, - command: ` - SELECT SUM(success) as sum_success, SUM(requests) as sum_fail - FROM ( - SELECT duration_ms as success - FROM request - WHERE status !~ /^5.*$/ AND region = 'west' - ), ( - SELECT duration_ms as requests - FROM request - WHERE status =~ /^5.*$/ AND region = 'west' - ) -`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"request","columns":["time","sum_success","sum_fail"],"values":[["1970-01-01T00:00:00Z",500,1000]]}]}]}`, - }, - { - name: "different time predicates for same field", - params: url.Values{"db": []string{"db0"}}, - command: ` - SELECT COUNT(r1) as r1, COUNT(r2) as r2 - FROM ( - SELECT duration_ms as r1 - FROM request - WHERE time >= '2004-04-09T01:00:00Z' AND time <= '2004-04-09T01:00:20Z' - ), ( - SELECT duration_ms as r2 - FROM request - WHERE time >= '2004-04-09T01:00:10Z' AND time <= '2004-04-09T01:00:40Z' - ) -`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"request","columns":["time","r1","r2"],"values":[["1970-01-01T00:00:00Z",6,8]]}]}]}`, - }, - { - name: "outer query with narrower time range than subqueries", - params: url.Values{"db": []string{"db0"}}, - command: ` - SELECT COUNT(r1) as r1, COUNT(r2) as r2 - FROM ( - SELECT duration_ms as r1 - FROM request - WHERE time >= '2004-04-09T01:00:00Z' AND time <= '2004-04-09T01:00:20Z' - ), ( - SELECT duration_ms as r2 - FROM request - WHERE time >= '2004-04-09T01:00:10Z' AND time <= '2004-04-09T01:00:40Z' - ) - WHERE time >= '2004-04-09T01:00:20Z' AND time <= '2004-04-09T01:00:30Z' -`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"request","columns":["time","r1","r2"],"values":[["2004-04-09T01:00:20Z",2,4]]}]}]}`, - }, - { - name: "outer query with narrower time range than subqueries using aggregates", - params: url.Values{"db": []string{"db0"}}, - command: ` - SELECT r1 as r1, r2 as r2 - FROM ( - SELECT COUNT(duration_ms) as r1 - FROM request - WHERE time >= '2004-04-09T01:00:00Z' AND time <= '2004-04-09T01:00:20Z' - ), ( - SELECT COUNT(duration_ms) as r2 - FROM request - WHERE time >= '2004-04-09T01:00:10Z' AND time <= '2004-04-09T01:00:40Z' - ) - WHERE time >= '2004-04-09T01:00:20Z' AND time <= '2004-04-09T01:00:30Z' -`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"request","columns":["time","r1","r2"],"values":[["2004-04-09T01:00:20Z",2,null],["2004-04-09T01:00:20Z",null,4]]}]}]}`, - }, - { - name: "outer query with no time range and subqueries using aggregates", - params: url.Values{"db": []string{"db0"}}, - command: ` - SELECT r1 as r1, r2 as r2 - FROM ( - SELECT COUNT(duration_ms) as r1 - FROM request - WHERE time >= '2004-04-09T01:00:00Z' AND time <= '2004-04-09T01:00:20Z' - ), ( - SELECT COUNT(duration_ms) as r2 - FROM request - WHERE time >= '2004-04-09T01:00:10Z' AND time <= '2004-04-09T01:00:40Z' - ) -`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"request","columns":["time","r1","r2"],"values":[["2004-04-09T01:00:00Z",6,null],["2004-04-09T01:00:10Z",null,8]]}]}]}`, - }, - { - name: "outer query with narrower time range than subqueries no aggregate", - params: url.Values{"db": []string{"db0"}}, - command: ` - SELECT r1 as r1, r2 as r2 - FROM ( - SELECT duration_ms as r1 - FROM request - WHERE time >= '2004-04-09T01:00:00Z' AND time <= '2004-04-09T01:00:20Z' - ), ( - SELECT duration_ms as r2 - FROM request - WHERE time >= '2004-04-09T01:00:10Z' AND time <= '2004-04-09T01:00:40Z' - ) - WHERE time >= '2004-04-09T01:00:20Z' AND time <= '2004-04-09T01:00:30Z' -`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"request","columns":["time","r1","r2"],"values":[["2004-04-09T01:00:20Z",100,null],["2004-04-09T01:00:20Z",null,100],["2004-04-09T01:00:20Z",200,null],["2004-04-09T01:00:20Z",null,200],["2004-04-09T01:00:30Z",null,200],["2004-04-09T01:00:30Z",null,100]]}]}]}`, - }, - { - name: "outer query with time range", - params: url.Values{"db": []string{"db0"}}, - command: ` - SELECT COUNT(r1) as r1, COUNT(r2) as r2 - FROM ( - SELECT duration_ms as r1 - FROM request - ), ( - SELECT duration_ms as r2 - FROM request - ) - WHERE time >= '2004-04-09T01:00:20Z' AND time <= '2004-04-09T01:00:30Z' -`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"request","columns":["time","r1","r2"],"values":[["2004-04-09T01:00:20Z",4,4]]}]}]}`, - }, - } - - for _, q := range cases { - t.Run(q.name, func(t *testing.T) { - s.Execute(ctx, t, q) - }) - } -} diff --git a/influxql/v1tests/server_helpers.go b/influxql/v1tests/server_helpers.go deleted file mode 100644 index 123ba6efbb8..00000000000 --- a/influxql/v1tests/server_helpers.go +++ /dev/null @@ -1,232 +0,0 @@ -package v1tests - -import ( - "context" - "encoding/json" - "io" - "net/http" - "net/url" - "strings" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/cmd/influxd/launcher" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/tests" - "github.com/influxdata/influxdb/v2/tests/pipeline" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zapcore" -) - -func OpenServer(t *testing.T, extra ...launcher.OptSetter) *tests.DefaultPipeline { - t.Helper() - - defaults := []launcher.OptSetter{ - func(o *launcher.InfluxdOpts) { - o.LogLevel = zapcore.ErrorLevel - }, - } - - p := tests.NewDefaultPipeline(t, append(defaults, extra...)...) - p.MustOpen() - return p -} - -type Query struct { - name string - command string - params url.Values - exp, got string - skip string - skipOthers bool // set to true to only run this test - repeat int -} - -// Execute runs the command and returns an err if it fails -func (q *Query) Execute(ctx context.Context, t *testing.T, db string, c *tests.Client) (err error) { - t.Helper() - - params := [][2]string{{"q", q.command}} - if qdb := q.params.Get("db"); len(qdb) > 0 { - params = append(params, [2]string{"db", qdb}) - } - - if epoch := q.params.Get("epoch"); len(epoch) > 0 { - params = append(params, [2]string{"epoch", epoch}) - } - - if parameters := q.params.Get("params"); len(parameters) > 0 { - params = append(params, [2]string{"params", parameters}) - } - - if chunked := q.params.Get("chunked"); len(chunked) > 0 { - params = append(params, [2]string{"chunked", chunked}) - } - - if chunkSize := q.params.Get("chunk_size"); len(chunkSize) > 0 { - params = append(params, [2]string{"chunk_size", chunkSize}) - } - - err = c.Client.Get("/query"). - QueryParams(params...). - Header("Accept", "application/json"). - RespFn(func(resp *http.Response) error { - require.Equal(t, "application/json", resp.Header.Get("Content-Type")) - b, err := io.ReadAll(resp.Body) - q.got = strings.TrimSpace(string(b)) - return err - }). - Do(ctx) - - return -} - -type Write struct { - data string - bucketID platform.ID -} - -type Writes []*Write - -type Test struct { - orgID platform.ID - bucketID platform.ID - db string - rp string - writes Writes - queries []*Query - noDefaultMapping bool - noWrites bool -} - -func NewTest(db, rp string) Test { - return Test{ - db: db, - rp: rp, - } -} - -// NewEmptyTest creates an empty test without a default database and retention policy mapping or -// any expected writes. -func NewEmptyTest() Test { - return Test{noDefaultMapping: true, noWrites: true} -} - -func (qt *Test) Run(ctx context.Context, t *testing.T, p *tests.DefaultPipeline) { - t.Helper() - fx, auth := qt.init(ctx, t, p) - ctx = icontext.SetAuthorizer(ctx, auth) - - skipOthers := false - for _, query := range qt.queries { - skipOthers = skipOthers || query.skipOthers - } - - var queries []*Query - if skipOthers { - queries = make([]*Query, 0, len(qt.queries)) - for _, query := range qt.queries { - if query.skipOthers { - queries = append(queries, query) - } - } - } else { - queries = qt.queries - } - - for _, query := range queries { - t.Run(query.name, func(t *testing.T) { - if query.skip != "" { - t.Skipf("SKIP:: %s", query.skip) - } - err := query.Execute(ctx, t, qt.db, fx.Admin) - assert.NoError(t, err) - assert.Equal(t, query.exp, query.got, - "%s: unexpected results\nquery: %s\nparams: %v\nexp: %s\nactual: %s\n", - query.name, query.command, query.params, query.exp, query.got) - }) - } -} - -func (qt *Test) addQueries(q ...*Query) { - qt.queries = append(qt.queries, q...) -} - -func (qt *Test) init(ctx context.Context, t *testing.T, p *tests.DefaultPipeline) (fx pipeline.BaseFixture, auth *influxdb.Authorization) { - t.Helper() - - qt.orgID = p.DefaultOrgID - qt.bucketID = p.DefaultBucketID - - fx = pipeline.NewBaseFixture(t, p.Pipeline, qt.orgID, qt.bucketID) - - if !qt.noWrites { - require.Greater(t, len(qt.writes), 0) - qt.writeTestData(ctx, t, fx.Admin) - p.Flush() - } - - auth = tests.MakeAuthorization(qt.orgID, p.DefaultUserID, influxdb.OperPermissions()) - - if !qt.noDefaultMapping { - ctx = icontext.SetAuthorizer(ctx, auth) - err := p.Launcher. - DBRPMappingService(). - Create(ctx, &influxdb.DBRPMapping{ - Database: qt.db, - RetentionPolicy: qt.rp, - Default: true, - OrganizationID: qt.orgID, - BucketID: qt.bucketID, - }) - require.NoError(t, err) - } - - return -} - -func (qt *Test) writeTestData(ctx context.Context, t *testing.T, c *tests.Client) { - t.Helper() - for _, w := range qt.writes { - bucketID := &qt.bucketID - if w.bucketID != 0 { - bucketID = &w.bucketID - } - err := c.WriteTo(ctx, influxdb.BucketFilter{ID: bucketID, OrganizationID: &qt.orgID}, strings.NewReader(w.data)) - require.NoError(t, err) - } -} - -func maxInt64() string { - maxInt64, _ := json.Marshal(^int64(0)) - return string(maxInt64) -} - -func now() time.Time { - return time.Now().UTC() -} - -func yesterday() time.Time { - return now().Add(-1 * time.Hour * 24) -} - -func mustParseTime(layout, value string) time.Time { - tm, err := time.Parse(layout, value) - if err != nil { - panic(err) - } - return tm -} - -func mustParseLocation(tzname string) *time.Location { - loc, err := time.LoadLocation(tzname) - if err != nil { - panic(err) - } - return loc -} - -var LosAngeles = mustParseLocation("America/Los_Angeles") diff --git a/influxql/v1tests/server_test.go b/influxql/v1tests/server_test.go deleted file mode 100644 index fae84173573..00000000000 --- a/influxql/v1tests/server_test.go +++ /dev/null @@ -1,6411 +0,0 @@ -package v1tests - -import ( - "context" - "fmt" - "net/url" - "strconv" - "strings" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/cmd/influxd/launcher" - "github.com/influxdata/influxdb/v2/models" - "github.com/stretchr/testify/require" -) - -const ( - NeedsReview = "Needs review" // Test requires further review - NotSupported = "Not supported" // Test exercises features not supported in Cloud 2 - SkippedOSS = "Skipped OSS" // Test was skipped in OSS - FixRequired = "Fix required: " // Test requires fix per linked issue - FlakyTest = "Flaky test" // Test was skipped in OSS as flakey -) - -// Ensure the server can query with default databases (via param) and default retention policy -func TestServer_Query_DefaultDBAndRP(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: fmt.Sprintf(`cpu value=1.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano())}, - } - - test.addQueries([]*Query{ - { - name: "default db and rp", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM cpu GROUP BY *`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, - }, - { - name: "default rp exists", - command: `show retention policies ON db0`, - exp: `{"results":[{"statement_id":0,"series":[{"columns":["name","duration","shardGroupDuration","replicaN","default"],"values":[["autogen","0s","168h0m0s",1,false],["rp0","0s","168h0m0s",1,true]]}]}]}`, - skip: "not supported", - }, - { - name: "default rp", - command: `SELECT * FROM db0..cpu GROUP BY *`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, - }, - { - name: "default dp", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM rp0.cpu GROUP BY *`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can have a database with multiple measurements. -func TestServer_Query_Multiple_Measurements(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - // Make sure we do writes for measurements that will span across shards - writes := []string{ - fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf("cpu1,host=server02 value=50,core=2 %d", mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), - } - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "measurement in one shard but not another shouldn't panic server", - command: `SELECT host,value FROM db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","host","value"],"values":[["2000-01-01T00:00:00Z","server01",100]]}]}]}`, - }, - { - name: "measurement in one shard but not another shouldn't panic server", - command: `SELECT host,value FROM db0.rp0.cpu GROUP BY host`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","host","value"],"values":[["2000-01-01T00:00:00Z","server01",100]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can query with relative time. -func TestServer_Query_SelectRelativeTime(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - now := now() - yesterday := yesterday() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: fmt.Sprintf("cpu,host=server01 value=100 %s\ncpu,host=server01 value=200 %s", strconv.FormatInt(yesterday.UnixNano(), 10), strconv.FormatInt(now.UnixNano(), 10))}, - } - - test.addQueries([]*Query{ - { - name: "single point with time pre-calculated for past time queries yesterday", - command: `SELECT * FROM db0.rp0.cpu where time >= '` + yesterday.Add(-1*time.Minute).Format(time.RFC3339Nano) + `' GROUP BY *`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, yesterday.Format(time.RFC3339Nano), now.Format(time.RFC3339Nano)), - }, - { - name: "single point with time pre-calculated for relative time queries now", - command: `SELECT * FROM db0.rp0.cpu where time >= now() - 1m GROUP BY *`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",200]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Aggregates_IntMax(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join([]string{ - fmt.Sprintf(`intmax value=%s %d`, maxInt64(), mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`intmax value=%s %d`, maxInt64(), mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), - }, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "large mean and stddev - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEAN(value), STDDEV(value) FROM intmax`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmax","columns":["time","mean","stddev"],"values":[["1970-01-01T00:00:00Z",` + maxInt64() + `,0]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_TimeZone(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - var writes []string - for _, start := range []time.Time{ - // One day before DST starts. - time.Date(2000, 4, 1, 0, 0, 0, 0, LosAngeles), - // Middle of DST. No change. - time.Date(2000, 6, 1, 0, 0, 0, 0, LosAngeles), - // One day before DST ends. - time.Date(2000, 10, 28, 0, 0, 0, 0, LosAngeles), - } { - ts := start - // Write every hour for 4 days. - for i := 0; i < 24*4; i++ { - writes = append(writes, fmt.Sprintf(`cpu,interval=daily value=0 %d`, ts.UnixNano())) - ts = ts.Add(time.Hour) - } - - // Write every 5 minutes for 3 hours. Start at 1 on the day with DST. - ts = start.Add(25 * time.Hour) - for i := 0; i < 12*3; i++ { - writes = append(writes, fmt.Sprintf(`cpu,interval=hourly value=0 %d`, ts.UnixNano())) - ts = ts.Add(5 * time.Minute) - } - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "timezone offset - dst start - daily", - command: `SELECT count(value) FROM cpu WHERE time >= '2000-04-02T00:00:00-08:00' AND time < '2000-04-04T00:00:00-07:00' AND interval = 'daily' GROUP BY time(1d) TZ('America/Los_Angeles')`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","count"],"values":[["2000-04-02T00:00:00-08:00",23],["2000-04-03T00:00:00-07:00",24]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "timezone offset - no change - daily", - command: `SELECT count(value) FROM cpu WHERE time >= '2000-06-01T00:00:00-07:00' AND time < '2000-06-03T00:00:00-07:00' AND interval = 'daily' GROUP BY time(1d) TZ('America/Los_Angeles')`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","count"],"values":[["2000-06-01T00:00:00-07:00",24],["2000-06-02T00:00:00-07:00",24]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "timezone offset - dst end - daily", - command: `SELECT count(value) FROM cpu WHERE time >= '2000-10-29T00:00:00-07:00' AND time < '2000-10-31T00:00:00-08:00' AND interval = 'daily' GROUP BY time(1d) TZ('America/Los_Angeles')`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","count"],"values":[["2000-10-29T00:00:00-07:00",25],["2000-10-30T00:00:00-08:00",24]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "timezone offset - dst start - hourly", - command: `SELECT count(value) FROM cpu WHERE time >= '2000-04-02T01:00:00-08:00' AND time < '2000-04-02T04:00:00-07:00' AND interval = 'hourly' GROUP BY time(1h) TZ('America/Los_Angeles')`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","count"],"values":[["2000-04-02T01:00:00-08:00",12],["2000-04-02T03:00:00-07:00",12]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "timezone offset - no change - hourly", - command: `SELECT count(value) FROM cpu WHERE time >= '2000-06-02T01:00:00-07:00' AND time < '2000-06-02T03:00:00-07:00' AND interval = 'hourly' GROUP BY time(1h) TZ('America/Los_Angeles')`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","count"],"values":[["2000-06-02T01:00:00-07:00",12],["2000-06-02T02:00:00-07:00",12]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "timezone offset - dst end - hourly", - command: `SELECT count(value) FROM cpu WHERE time >= '2000-10-29T01:00:00-07:00' AND time < '2000-10-29T02:00:00-08:00' AND interval = 'hourly' GROUP BY time(1h) TZ('America/Los_Angeles')`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","count"],"values":[["2000-10-29T01:00:00-07:00",12],["2000-10-29T01:00:00-08:00",12]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server correctly supports data with identical tag values. -func TestServer_Query_IdenticalTagValues(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf("cpu,t1=val1 value=1 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf("cpu,t2=val2 value=2 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - fmt.Sprintf("cpu,t1=val2 value=3 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:02:00Z").UnixNano()), - } - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "measurements with identical tag values - SELECT *, no GROUP BY", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"t1":"","t2":"val2"},"columns":["time","value"],"values":[["2000-01-01T00:01:00Z",2]]},{"name":"cpu","tags":{"t1":"val1","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"t1":"val2","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:02:00Z",3]]}]}]}`, - }, - { - name: "measurements with identical tag values - SELECT *, with GROUP BY", - command: `SELECT value FROM db0.rp0.cpu GROUP BY t1,t2`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"t1":"","t2":"val2"},"columns":["time","value"],"values":[["2000-01-01T00:01:00Z",2]]},{"name":"cpu","tags":{"t1":"val1","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"t1":"val2","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:02:00Z",3]]}]}]}`, - }, - { - name: "measurements with identical tag values - SELECT value no GROUP BY", - command: `SELECT value FROM db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1],["2000-01-01T00:01:00Z",2],["2000-01-01T00:02:00Z",3]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can handle a query that involves accessing no shards. -func TestServer_Query_NoShards(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - now := now() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: `cpu,host=server01 value=1 ` + strconv.FormatInt(now.UnixNano(), 10)}, - } - - test.addQueries([]*Query{ - { - name: "selecting value should succeed", - command: `SELECT value FROM db0.rp0.cpu WHERE time < now() - 1d`, - exp: `{"results":[{"statement_id":0}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can query a non-existent field -func TestServer_Query_NonExistent(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - now := now() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: `cpu,host=server01 value=1 ` + strconv.FormatInt(now.UnixNano(), 10)}, - } - - test.addQueries([]*Query{ - { - name: "selecting value should succeed", - command: `SELECT value FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - { - name: "selecting non-existent should succeed", - command: `SELECT foo FROM db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can perform basic math -func TestServer_Query_Math(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - now := now() - writes := []string{ - "float value=42 " + strconv.FormatInt(now.UnixNano(), 10), - "integer value=42i " + strconv.FormatInt(now.UnixNano(), 10), - } - - test := NewTest("db", "rp") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "SELECT multiple of float value", - command: `SELECT value * 2 from db.rp.float`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"float","columns":["time","value"],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - { - name: "SELECT multiple of float value", - command: `SELECT 2 * value from db.rp.float`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"float","columns":["time","value"],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - { - name: "SELECT multiple of integer value", - command: `SELECT value * 2 from db.rp.integer`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"integer","columns":["time","value"],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - { - name: "SELECT float multiple of integer value", - command: `SELECT value * 2.0 from db.rp.integer`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"integer","columns":["time","value"],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - { - name: "SELECT square of float value", - command: `SELECT value * value from db.rp.float`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"float","columns":["time","value_value"],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - { - name: "SELECT square of integer value", - command: `SELECT value * value from db.rp.integer`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"integer","columns":["time","value_value"],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - { - name: "SELECT square of integer, float value", - command: `SELECT value * value,float from db.rp.integer`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"integer","columns":["time","value_value","float"],"values":[["%s",1764,null]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - { - name: "SELECT square of integer value with alias", - command: `SELECT value * value as square from db.rp.integer`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"integer","columns":["time","square"],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - { - name: "SELECT sum of aggregates", - command: `SELECT max(value) + min(value) from db.rp.integer`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"integer","columns":["time","max_min"],"values":[["1970-01-01T00:00:00Z",84]]}]}]}`, - }, - { - name: "SELECT square of enclosed integer value", - command: `SELECT ((value) * (value)) from db.rp.integer`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"integer","columns":["time","value_value"],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - { - name: "SELECT square of enclosed integer value", - command: `SELECT (value * value) from db.rp.integer`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"integer","columns":["time","value_value"],"values":[["%s",1764]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can handle various simple non_negative_derivative queries. -func TestServer_Query_SelectRawNonNegativeDerivative(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: `cpu value=10 1278010021000000000 -cpu value=15 1278010022000000000 -cpu value=10 1278010023000000000 -cpu value=20 1278010024000000000 -`}, - } - - test.addQueries([]*Query{ - { - name: "calculate single non_negative_derivative", - command: `SELECT non_negative_derivative(value) from db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","non_negative_derivative"],"values":[["2010-07-01T18:47:02Z",5],["2010-07-01T18:47:04Z",10]]}]}]}`, - }, - { - name: "calculate single non_negative_derivative", - command: `SELECT non_negative_derivative(value, 10s) from db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","non_negative_derivative"],"values":[["2010-07-01T18:47:02Z",50],["2010-07-01T18:47:04Z",100]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can handle various group by time derivative queries. -func TestServer_Query_SelectGroupByTimeDerivative(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: `cpu value=10 1278010020000000000 -cpu value=15 1278010021000000000 -cpu value=20 1278010022000000000 -cpu value=25 1278010023000000000 - -cpu0,host=server01 ticks=10,total=100 1278010020000000000 -cpu0,host=server01 ticks=30,total=100 1278010021000000000 -cpu0,host=server01 ticks=32,total=100 1278010022000000000 -cpu0,host=server01 ticks=47,total=100 1278010023000000000 -cpu0,host=server02 ticks=40,total=100 1278010020000000000 -cpu0,host=server02 ticks=45,total=100 1278010021000000000 -cpu0,host=server02 ticks=84,total=100 1278010022000000000 -cpu0,host=server02 ticks=101,total=100 1278010023000000000 -`}, - } - - test.addQueries([]*Query{ - { - name: "calculate derivative of count with unit default (2s) group by time", - command: `SELECT derivative(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",2],["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of count with unit 4s group by time", - command: `SELECT derivative(count(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",4],["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of mean with unit default (2s) group by time", - command: `SELECT derivative(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - { - name: "calculate derivative of mean with unit 4s group by time", - command: `SELECT derivative(mean(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, - }, - { - name: "calculate derivative of median with unit default (2s) group by time", - command: `SELECT derivative(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - { - name: "calculate derivative of median with unit 4s group by time", - command: `SELECT derivative(median(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, - }, - { - name: "calculate derivative of mode with unit default (2s) group by time", - command: `SELECT derivative(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - { - name: "calculate derivative of mode with unit 4s group by time", - command: `SELECT derivative(mode(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, - }, - - { - name: "calculate derivative of sum with unit default (2s) group by time", - command: `SELECT derivative(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, - }, - { - name: "calculate derivative of sum with unit 4s group by time", - command: `SELECT derivative(sum(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",40]]}]}]}`, - }, - { - name: "calculate derivative of first with unit default (2s) group by time", - command: `SELECT derivative(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - { - name: "calculate derivative of first with unit 4s group by time", - command: `SELECT derivative(first(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, - }, - { - name: "calculate derivative of last with unit default (2s) group by time", - command: `SELECT derivative(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - { - name: "calculate derivative of last with unit 4s group by time", - command: `SELECT derivative(last(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, - }, - { - name: "calculate derivative of min with unit default (2s) group by time", - command: `SELECT derivative(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - { - name: "calculate derivative of min with unit 4s group by time", - command: `SELECT derivative(min(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, - }, - { - name: "calculate derivative of max with unit default (2s) group by time", - command: `SELECT derivative(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - { - name: "calculate derivative of max with unit 4s group by time", - command: `SELECT derivative(max(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, - }, - { - name: "calculate derivative of percentile with unit default (2s) group by time", - command: `SELECT derivative(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - { - name: "calculate derivative of percentile with unit 4s group by time", - command: `SELECT derivative(percentile(value, 50), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, - }, - { - name: "calculate derivative of ticks divided by aggregate", - command: `SELECT non_negative_derivative(mean(ticks), 1s) / last(total) * 100 AS usage FROM db0.rp0.cpu0 WHERE time >= '2010-07-01 18:47:00' AND time <= '2010-07-01 18:47:03' GROUP BY host, time(1s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu0","tags":{"host":"server01"},"columns":["time","usage"],"values":[["2010-07-01T18:47:00Z",null],["2010-07-01T18:47:01Z",20],["2010-07-01T18:47:02Z",2],["2010-07-01T18:47:03Z",15]]},{"name":"cpu0","tags":{"host":"server02"},"columns":["time","usage"],"values":[["2010-07-01T18:47:00Z",null],["2010-07-01T18:47:01Z",5],["2010-07-01T18:47:02Z",39],["2010-07-01T18:47:03Z",17]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can query with the count aggregate function -func TestServer_Query_Count(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - now := now() - - test := NewTest("db0", "rp0") - writes := []string{ - `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10), - `ram value1=1.0,value2=2.0 ` + strconv.FormatInt(now.UnixNano(), 10), - } - - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - hourAgo := now.Add(-time.Hour).UTC() - - test.addQueries([]*Query{ - { - name: "selecting count(value) should succeed", - command: `SELECT count(value) FROM db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, - }, - { - name: "selecting count(value) with where time should return result", - command: fmt.Sprintf(`SELECT count(value) FROM db0.rp0.cpu WHERE time >= '%s'`, hourAgo.Format(time.RFC3339Nano)), - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","count"],"values":[["%s",1]]}]}]}`, hourAgo.Format(time.RFC3339Nano)), - }, - { - name: "selecting count(value) with filter that excludes all results should return 0", - command: fmt.Sprintf(`SELECT count(value) FROM db0.rp0.cpu WHERE value=100 AND time >= '%s'`, hourAgo.Format(time.RFC3339Nano)), - exp: `{"results":[{"statement_id":0}]}`, - }, - { - name: "selecting count(value1) with matching filter against value2 should return correct result", - command: fmt.Sprintf(`SELECT count(value1) FROM db0.rp0.ram WHERE value2=2 AND time >= '%s'`, hourAgo.Format(time.RFC3339Nano)), - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"ram","columns":["time","count"],"values":[["%s",1]]}]}]}`, hourAgo.Format(time.RFC3339Nano)), - }, - { - name: "selecting count(value1) with non-matching filter against value2 should return correct result", - command: fmt.Sprintf(`SELECT count(value1) FROM db0.rp0.ram WHERE value2=3 AND time >= '%s'`, hourAgo.Format(time.RFC3339Nano)), - exp: `{"results":[{"statement_id":0}]}`, - }, - { - name: "selecting count(*) should expand the wildcard", - command: `SELECT count(*) FROM db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","count_value"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, - }, - { - name: "selecting count(2) should error", - command: `SELECT count(2) FROM db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"error":"expected field argument in count()"}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can limit concurrent series. -func TestServer_Query_MaxSelectSeriesN(t *testing.T) { - s := OpenServer(t, func(o *launcher.InfluxdOpts) { - o.CoordinatorConfig.MaxSelectSeriesN = 3 - }) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: `cpu,host=server01 value=1.0 0`}, - &Write{data: `cpu,host=server02 value=1.0 0`}, - &Write{data: `cpu,host=server03 value=1.0 0`}, - &Write{data: `cpu,host=server04 value=1.0 0`}, - } - - test.addQueries([]*Query{ - { - name: "exceeed max series", - command: `SELECT COUNT(value) FROM db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"error":"max-select-series limit exceeded: (4/3)"}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can query with Now(). -func TestServer_Query_Now(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - now := now() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10)}, - } - - test.addQueries([]*Query{ - { - name: "where with time < now() should work", - command: `SELECT * FROM db0.rp0.cpu where time < now()`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","host","value"],"values":[["%s","server01",1]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - { - name: "where with time < now() and GROUP BY * should work", - command: `SELECT * FROM db0.rp0.cpu where time < now() GROUP BY *`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - { - name: "where with time > now() should return an empty result", - command: `SELECT * FROM db0.rp0.cpu where time > now()`, - exp: `{"results":[{"statement_id":0}]}`, - }, - { - name: "where with time > now() with GROUP BY * should return an empty result", - command: `SELECT * FROM db0.rp0.cpu where time > now() GROUP BY *`, - exp: `{"results":[{"statement_id":0}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can query with epoch precisions. -func TestServer_Query_EpochPrecision(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - now := now() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10)}, - } - - test.addQueries([]*Query{ - { - name: "nanosecond precision", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - params: url.Values{"epoch": []string{"n"}}, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()), - }, - { - name: "microsecond precision", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - params: url.Values{"epoch": []string{"u"}}, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Microsecond)), - }, - { - name: "millisecond precision", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - params: url.Values{"epoch": []string{"ms"}}, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Millisecond)), - }, - { - name: "second precision", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - params: url.Values{"epoch": []string{"s"}}, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Second)), - }, - { - name: "minute precision", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - params: url.Values{"epoch": []string{"m"}}, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Minute)), - }, - { - name: "hour precision", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - params: url.Values{"epoch": []string{"h"}}, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Hour)), - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server works with tag queries. -func TestServer_Query_Tags(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - now := now() - - writes := []string{ - fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", now.UnixNano()), - fmt.Sprintf("cpu,host=server02 value=50,core=2 %d", now.Add(1).UnixNano()), - - fmt.Sprintf("cpu1,host=server01,region=us-west value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - fmt.Sprintf("cpu1,host=server02 value=200 %d", mustParseTime(time.RFC3339Nano, "2010-02-28T01:03:37.703820946Z").UnixNano()), - fmt.Sprintf("cpu1,host=server03 value=300 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()), - - fmt.Sprintf("cpu2,host=server01 value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - fmt.Sprintf("cpu2 value=200 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()), - - fmt.Sprintf("cpu3,company=acme01 value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - fmt.Sprintf("cpu3 value=200 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()), - - fmt.Sprintf("status_code,url=http://www.example.com value=404 %d", mustParseTime(time.RFC3339Nano, "2015-07-22T08:13:54.929026672Z").UnixNano()), - fmt.Sprintf("status_code,url=https://influxdb.com value=418 %d", mustParseTime(time.RFC3339Nano, "2015-07-22T09:52:24.914395083Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "tag without field should return error", - command: `SELECT host FROM db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"error":"statement must have at least one field in select clause"}]}`, - skip: SkippedOSS, // FIXME(benbjohnson): tags should stream as values - }, - { - name: "field with tag should succeed", - command: `SELECT host, value FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","host","value"],"values":[["%s","server01",100],["%s","server02",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - { - name: "field with tag and GROUP BY should succeed", - command: `SELECT host, value FROM db0.rp0.cpu GROUP BY host`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","host","value"],"values":[["%s","server01",100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","host","value"],"values":[["%s","server02",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - { - name: "field with two tags should succeed", - command: `SELECT host, value, core FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","host","value","core"],"values":[["%s","server01",100,4],["%s","server02",50,2]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - { - name: "field with two tags and GROUP BY should succeed", - command: `SELECT host, value, core FROM db0.rp0.cpu GROUP BY host`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","host","value","core"],"values":[["%s","server01",100,4]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","host","value","core"],"values":[["%s","server02",50,2]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - { - name: "select * with tags should succeed", - command: `SELECT * FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","core","host","value"],"values":[["%s",4,"server01",100],["%s",2,"server02",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - { - name: "select * with tags with GROUP BY * should succeed", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","core","value"],"values":[["%s",4,100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","core","value"],"values":[["%s",2,50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - { - name: "group by tag", - command: `SELECT value FROM db0.rp0.cpu GROUP by host`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","value"],"values":[["%s",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - { - name: "single field (EQ tag value1)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu1","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - { - name: "single field (2 EQ tags)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' AND region = 'us-west'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu1","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - { - name: "single field (OR different tags)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server03' OR region = 'us-west'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu1","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",300],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - { - name: "single field (OR with non-existent tag value)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' OR host = 'server66'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu1","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - { - name: "single field (OR with all tag values)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' OR host = 'server02' OR host = 'server03'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200],["2012-02-28T01:03:38.703820946Z",300],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - { - name: "single field (1 EQ and 1 NEQ tag)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' AND region != 'us-west'`, - exp: `{"results":[{"statement_id":0}]}`, - }, - { - name: "single field (EQ tag value2)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server02'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200]]}]}]}`, - }, - { - name: "single field (NEQ tag value1)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200],["2012-02-28T01:03:38.703820946Z",300]]}]}]}`, - }, - { - name: "single field (NEQ tag value1 AND NEQ tag value2)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' AND host != 'server02'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu1","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",300]]}]}]}`, - }, - { - name: "single field (NEQ tag value1 OR NEQ tag value2)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' OR host != 'server02'`, // Yes, this is always true, but that's the point. - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200],["2012-02-28T01:03:38.703820946Z",300],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - { - name: "single field (NEQ tag value1 AND NEQ tag value2 AND NEQ tag value3)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' AND host != 'server02' AND host != 'server03'`, - exp: `{"results":[{"statement_id":0}]}`, - }, - { - name: "single field (NEQ tag value1, point without any tags)", - command: `SELECT value FROM db0.rp0.cpu2 WHERE host != 'server01'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu2","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200]]}]}]}`, - }, - { - name: "single field (NEQ tag value1, point without any tags)", - command: `SELECT value FROM db0.rp0.cpu3 WHERE company !~ /acme01/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu3","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200]]}]}]}`, - }, - { - name: "single field (regex tag match)", - command: `SELECT value FROM db0.rp0.cpu3 WHERE company =~ /acme01/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu3","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - { - name: "single field (regex tag match)", - command: `SELECT value FROM db0.rp0.cpu3 WHERE company !~ /acme[23]/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu3","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - { - name: "single field (regex tag match with escaping)", - command: `SELECT value FROM db0.rp0.status_code WHERE url !~ /https\:\/\/influxdb\.com/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"status_code","columns":["time","value"],"values":[["2015-07-22T08:13:54.929026672Z",404]]}]}]}`, - }, - { - name: "single field (regex tag match with escaping)", - command: `SELECT value FROM db0.rp0.status_code WHERE url =~ /https\:\/\/influxdb\.com/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"status_code","columns":["time","value"],"values":[["2015-07-22T09:52:24.914395083Z",418]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can handle various group by time moving average queries. -func TestServer_Query_SelectGroupByTimeMovingAverage(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: `cpu value=10 1278010020000000000 -cpu value=15 1278010021000000000 -cpu value=20 1278010022000000000 -cpu value=25 1278010023000000000 -cpu value=30 1278010024000000000 -cpu value=35 1278010025000000000 -`}, - } - - test.addQueries([]*Query{ - { - name: "calculate moving average of count", - command: `SELECT moving_average(count(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",1],["2010-07-01T18:47:02Z",2],["2010-07-01T18:47:04Z",2]]}]}]}`, - }, - { - name: "calculate moving average of mean", - command: `SELECT moving_average(mean(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",17.5],["2010-07-01T18:47:04Z",27.5]]}]}]}`, - }, - { - name: "calculate moving average of median", - command: `SELECT moving_average(median(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",17.5],["2010-07-01T18:47:04Z",27.5]]}]}]}`, - }, - { - name: "calculate moving average of mode", - command: `SELECT moving_average(mode(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",15],["2010-07-01T18:47:04Z",25]]}]}]}`, - }, - { - name: "calculate moving average of sum", - command: `SELECT moving_average(sum(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",35],["2010-07-01T18:47:04Z",55]]}]}]}`, - }, - { - name: "calculate moving average of first", - command: `SELECT moving_average(first(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",15],["2010-07-01T18:47:04Z",25]]}]}]}`, - }, - { - name: "calculate moving average of last", - command: `SELECT moving_average(last(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",20],["2010-07-01T18:47:04Z",30]]}]}]}`, - }, - { - name: "calculate moving average of min", - command: `SELECT moving_average(min(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",15],["2010-07-01T18:47:04Z",25]]}]}]}`, - }, - { - name: "calculate moving average of max", - command: `SELECT moving_average(max(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",20],["2010-07-01T18:47:04Z",30]]}]}]}`, - }, - { - name: "calculate moving average of percentile", - command: `SELECT moving_average(percentile(value, 50), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",15],["2010-07-01T18:47:04Z",25]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can handle various group by time moving average queries. -func TestServer_Query_SelectGroupByTimeMovingAverageWithFill(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: `cpu value=10 1278010020000000000 -cpu value=15 1278010021000000000 -cpu value=30 1278010024000000000 -cpu value=35 1278010025000000000 -`}, - } - - test.addQueries([]*Query{ - { - name: "calculate moving average of count with fill 0", - command: `SELECT moving_average(count(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",1],["2010-07-01T18:47:02Z",1],["2010-07-01T18:47:04Z",1]]}]}]}`, - }, - { - name: "calculate moving average of count with fill previous", - command: `SELECT moving_average(count(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",2],["2010-07-01T18:47:04Z",2]]}]}]}`, - }, - { - name: "calculate moving average of mean with fill 0", - command: `SELECT moving_average(mean(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",6.25],["2010-07-01T18:47:02Z",6.25],["2010-07-01T18:47:04Z",16.25]]}]}]}`, - }, - { - name: "calculate moving average of mean with fill previous", - command: `SELECT moving_average(mean(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",12.5],["2010-07-01T18:47:04Z",22.5]]}]}]}`, - }, - { - name: "calculate moving average of median with fill 0", - command: `SELECT moving_average(median(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",6.25],["2010-07-01T18:47:02Z",6.25],["2010-07-01T18:47:04Z",16.25]]}]}]}`, - }, - { - name: "calculate moving average of median with fill previous", - command: `SELECT moving_average(median(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",12.5],["2010-07-01T18:47:04Z",22.5]]}]}]}`, - }, - { - name: "calculate moving average of mode with fill 0", - command: `SELECT moving_average(mode(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",5],["2010-07-01T18:47:02Z",5],["2010-07-01T18:47:04Z",15]]}]}]}`, - }, - { - name: "calculate moving average of mode with fill previous", - command: `SELECT moving_average(mode(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",10],["2010-07-01T18:47:04Z",20]]}]}]}`, - }, - { - name: "calculate moving average of sum with fill 0", - command: `SELECT moving_average(sum(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",12.5],["2010-07-01T18:47:02Z",12.5],["2010-07-01T18:47:04Z",32.5]]}]}]}`, - }, - { - name: "calculate moving average of sum with fill previous", - command: `SELECT moving_average(sum(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",25],["2010-07-01T18:47:04Z",45]]}]}]}`, - }, - { - name: "calculate moving average of first with fill 0", - command: `SELECT moving_average(first(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",5],["2010-07-01T18:47:02Z",5],["2010-07-01T18:47:04Z",15]]}]}]}`, - }, - { - name: "calculate moving average of first with fill previous", - command: `SELECT moving_average(first(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",10],["2010-07-01T18:47:04Z",20]]}]}]}`, - }, - { - name: "calculate moving average of last with fill 0", - command: `SELECT moving_average(last(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",7.5],["2010-07-01T18:47:02Z",7.5],["2010-07-01T18:47:04Z",17.5]]}]}]}`, - }, - { - name: "calculate moving average of last with fill previous", - command: `SELECT moving_average(last(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",15],["2010-07-01T18:47:04Z",25]]}]}]}`, - }, - { - name: "calculate moving average of min with fill 0", - command: `SELECT moving_average(min(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",5],["2010-07-01T18:47:02Z",5],["2010-07-01T18:47:04Z",15]]}]}]}`, - }, - { - name: "calculate moving average of min with fill previous", - command: `SELECT moving_average(min(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",10],["2010-07-01T18:47:04Z",20]]}]}]}`, - }, - { - name: "calculate moving average of max with fill 0", - command: `SELECT moving_average(max(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",7.5],["2010-07-01T18:47:02Z",7.5],["2010-07-01T18:47:04Z",17.5]]}]}]}`, - }, - { - name: "calculate moving average of max with fill previous", - command: `SELECT moving_average(max(value), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",15],["2010-07-01T18:47:04Z",25]]}]}]}`, - }, - { - name: "calculate moving average of percentile with fill 0", - command: `SELECT moving_average(percentile(value, 50), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:00Z",5],["2010-07-01T18:47:02Z",5],["2010-07-01T18:47:04Z",15]]}]}]}`, - }, - { - name: "calculate moving average of percentile with fill previous", - command: `SELECT moving_average(percentile(value, 50), 2) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:05' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","moving_average"],"values":[["2010-07-01T18:47:02Z",10],["2010-07-01T18:47:04Z",20]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} // Ensure the server correctly queries with an alias. -func TestServer_Query_Alias(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf("cpu value=1i,steps=3i %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf("cpu value=2i,steps=4i %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - } - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "baseline query - SELECT * FROM db0.rp0.cpu", - command: `SELECT * FROM db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","steps","value"],"values":[["2000-01-01T00:00:00Z",3,1],["2000-01-01T00:01:00Z",4,2]]}]}]}`, - }, - { - name: "basic query with alias - SELECT steps, value as v FROM db0.rp0.cpu", - command: `SELECT steps, value as v FROM db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","steps","v"],"values":[["2000-01-01T00:00:00Z",3,1],["2000-01-01T00:01:00Z",4,2]]}]}]}`, - }, - { - name: "double aggregate sum - SELECT sum(value), sum(steps) FROM db0.rp0.cpu", - command: `SELECT sum(value), sum(steps) FROM db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sum","sum_1"],"values":[["1970-01-01T00:00:00Z",3,7]]}]}]}`, - }, - { - name: "double aggregate sum reverse order - SELECT sum(steps), sum(value) FROM db0.rp0.cpu", - command: `SELECT sum(steps), sum(value) FROM db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sum","sum_1"],"values":[["1970-01-01T00:00:00Z",7,3]]}]}]}`, - }, - { - name: "double aggregate sum with alias - SELECT sum(value) as sumv, sum(steps) as sums FROM db0.rp0.cpu", - command: `SELECT sum(value) as sumv, sum(steps) as sums FROM db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sumv","sums"],"values":[["1970-01-01T00:00:00Z",3,7]]}]}]}`, - }, - { - name: "double aggregate with same value - SELECT sum(value), mean(value) FROM db0.rp0.cpu", - command: `SELECT sum(value), mean(value) FROM db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",3,1.5]]}]}]}`, - }, - { - name: "double aggregate with same value and same alias - SELECT mean(value) as mv, max(value) as mv FROM db0.rp0.cpu", - command: `SELECT mean(value) as mv, max(value) as mv FROM db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mv","mv"],"values":[["1970-01-01T00:00:00Z",1.5,2]]}]}]}`, - }, - { - name: "double aggregate with non-existent field - SELECT mean(value), max(foo) FROM db0.rp0.cpu", - command: `SELECT mean(value), max(foo) FROM db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean","max"],"values":[["1970-01-01T00:00:00Z",1.5,null]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can handle various group by time cumulative sum queries. -func TestServer_Query_SelectGroupByTimeCumulativeSum(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: `cpu value=10 1278010020000000000 -cpu value=15 1278010021000000000 -cpu value=20 1278010022000000000 -cpu value=25 1278010023000000000 -`}, - } - - test.addQueries([]*Query{ - { - name: "calculate cumulative sum of count", - command: `SELECT cumulative_sum(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",2],["2010-07-01T18:47:02Z",4]]}]}]}`, - }, - { - name: "calculate cumulative sum of mean", - command: `SELECT cumulative_sum(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",12.5],["2010-07-01T18:47:02Z",35]]}]}]}`, - }, - { - name: "calculate cumulative sum of median", - command: `SELECT cumulative_sum(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",12.5],["2010-07-01T18:47:02Z",35]]}]}]}`, - }, - { - name: "calculate cumulative sum of mode", - command: `SELECT cumulative_sum(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",30]]}]}]}`, - }, - { - name: "calculate cumulative sum of sum", - command: `SELECT cumulative_sum(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",25],["2010-07-01T18:47:02Z",70]]}]}]}`, - }, - { - name: "calculate cumulative sum of first", - command: `SELECT cumulative_sum(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",30]]}]}]}`, - }, - { - name: "calculate cumulative sum of last", - command: `SELECT cumulative_sum(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",40]]}]}]}`, - }, - { - name: "calculate cumulative sum of min", - command: `SELECT cumulative_sum(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",30]]}]}]}`, - }, - { - name: "calculate cumulative sum of max", - command: `SELECT cumulative_sum(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",40]]}]}]}`, - }, - { - name: "calculate cumulative sum of percentile", - command: `SELECT cumulative_sum(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",30]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can handle various group by time cumulative sum queries with fill. -func TestServer_Query_SelectGroupByTimeCumulativeSumWithFill(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: `cpu value=10 1278010020000000000 -cpu value=20 1278010021000000000 -`}, - } - - test.addQueries([]*Query{ - { - name: "calculate cumulative sum of count with fill 0", - command: `SELECT cumulative_sum(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",2],["2010-07-01T18:47:02Z",2]]}]}]}`, - }, - { - name: "calculate cumulative sum of count with fill previous", - command: `SELECT cumulative_sum(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",2],["2010-07-01T18:47:02Z",4]]}]}]}`, - }, - { - name: "calculate cumulative sum of mean with fill 0", - command: `SELECT cumulative_sum(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",15]]}]}]}`, - }, - { - name: "calculate cumulative sum of mean with fill previous", - command: `SELECT cumulative_sum(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",30]]}]}]}`, - }, - { - name: "calculate cumulative sum of median with fill 0", - command: `SELECT cumulative_sum(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",15]]}]}]}`, - }, - { - name: "calculate cumulative sum of median with fill previous", - command: `SELECT cumulative_sum(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",30]]}]}]}`, - }, - { - name: "calculate cumulative sum of mode with fill 0", - command: `SELECT cumulative_sum(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - { - name: "calculate cumulative sum of mode with fill previous", - command: `SELECT cumulative_sum(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",20]]}]}]}`, - }, - { - name: "calculate cumulative sum of sum with fill 0", - command: `SELECT cumulative_sum(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",30],["2010-07-01T18:47:02Z",30]]}]}]}`, - }, - { - name: "calculate cumulative sum of sum with fill previous", - command: `SELECT cumulative_sum(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",30],["2010-07-01T18:47:02Z",60]]}]}]}`, - }, - { - name: "calculate cumulative sum of first with fill 0", - command: `SELECT cumulative_sum(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - { - name: "calculate cumulative sum of first with fill previous", - command: `SELECT cumulative_sum(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",20]]}]}]}`, - }, - { - name: "calculate cumulative sum of last with fill 0", - command: `SELECT cumulative_sum(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",20]]}]}]}`, - }, - { - name: "calculate cumulative sum of last with fill previous", - command: `SELECT cumulative_sum(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",40]]}]}]}`, - }, - { - name: "calculate cumulative sum of min with fill 0", - command: `SELECT cumulative_sum(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - { - name: "calculate cumulative sum of min with fill previous", - command: `SELECT cumulative_sum(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",20]]}]}]}`, - }, - { - name: "calculate cumulative sum of max with fill 0", - command: `SELECT cumulative_sum(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",20]]}]}]}`, - }, - { - name: "calculate cumulative sum of max with fill previous", - command: `SELECT cumulative_sum(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",40]]}]}]}`, - }, - { - name: "calculate cumulative sum of percentile with fill 0", - command: `SELECT cumulative_sum(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - { - name: "calculate cumulative sum of percentile with fill previous", - command: `SELECT cumulative_sum(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",20]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Aggregates_Int(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join([]string{ - fmt.Sprintf(`int value=45 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - }, "\n")}, - } - - test.addQueries([]*Query{ - // int64 - { - name: "stddev with just one point - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT STDDEV(value) FROM int`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"int","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_MathWithFill(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: `cpu value=15 1278010020000000000 -`}, - } - - test.addQueries([]*Query{ - { - name: "multiplication with fill previous", - command: `SELECT 4*mean(value) FROM db0.rp0.cpu WHERE time >= '2010-07-01 18:47:00' AND time < '2010-07-01 18:48:30' GROUP BY time(30s) FILL(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["2010-07-01T18:47:00Z",60],["2010-07-01T18:47:30Z",60],["2010-07-01T18:48:00Z",60]]}]}]}`, - }, - { - name: "multiplication of mode value with fill previous", - command: `SELECT 4*mode(value) FROM db0.rp0.cpu WHERE time >= '2010-07-01 18:47:00' AND time < '2010-07-01 18:48:30' GROUP BY time(30s) FILL(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mode"],"values":[["2010-07-01T18:47:00Z",60],["2010-07-01T18:47:30Z",60],["2010-07-01T18:48:00Z",60]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// mergeMany ensures that when merging many series together and some of them have a different number -// of points than others in a group by interval the results are correct -func TestServer_Query_MergeMany(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - - var writes []string - for i := 1; i < 11; i++ { - for j := 1; j < 5+i%3; j++ { - data := fmt.Sprintf(`cpu,host=server_%d value=22 %d`, i, time.Unix(int64(j), int64(0)).UTC().UnixNano()) - writes = append(writes, data) - } - } - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "GROUP by time", - command: `SELECT count(value) FROM db0.rp0.cpu WHERE time >= '1970-01-01T00:00:01Z' AND time <= '1970-01-01T00:00:06Z' GROUP BY time(1s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:01Z",10],["1970-01-01T00:00:02Z",10],["1970-01-01T00:00:03Z",10],["1970-01-01T00:00:04Z",10],["1970-01-01T00:00:05Z",7],["1970-01-01T00:00:06Z",3]]}]}]}`, - }, - { - skip: "Skipped OSS", - name: "GROUP by tag - FIXME issue #2875", - command: `SELECT count(value) FROM db0.rp0.cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:00Z' group by host`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server03"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, - }, - { - name: "GROUP by field", - command: `SELECT count(value) FROM db0.rp0.cpu group by value`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"value":""},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",50]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_SelectGroupByTime_MultipleAggregates(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: `test,t=a x=1i 1000000000 -test,t=b y=1i 1000000000 -test,t=a x=2i 2000000000 -test,t=b y=2i 2000000000 -test,t=a x=3i 3000000000 -test,t=b y=3i 3000000000 -`}, - } - - test.addQueries([]*Query{ - { - name: "two aggregates with a group by host", - command: `SELECT mean(x) as x, mean(y) as y from db0.rp0.test where time >= 1s and time < 4s group by t, time(1s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"test","tags":{"t":"a"},"columns":["time","x","y"],"values":[["1970-01-01T00:00:01Z",1,null],["1970-01-01T00:00:02Z",2,null],["1970-01-01T00:00:03Z",3,null]]},{"name":"test","tags":{"t":"b"},"columns":["time","x","y"],"values":[["1970-01-01T00:00:01Z",null,1],["1970-01-01T00:00:02Z",null,2],["1970-01-01T00:00:03Z",null,3]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Regex(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu1,host=server01 value=10 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - fmt.Sprintf(`cpu2,host=server01 value=20 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - fmt.Sprintf(`cpu3,host=server01 value=30 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "default db and rp", - command: `SELECT * FROM /cpu[13]/`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu1","columns":["time","host","value"],"values":[["2015-02-28T01:03:36.703820946Z","server01",10]]},{"name":"cpu3","columns":["time","host","value"],"values":[["2015-02-28T01:03:36.703820946Z","server01",30]]}]}]}`, - }, - { - name: "default db and rp with GROUP BY *", - command: `SELECT * FROM /cpu[13]/ GROUP BY *`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, - }, - { - name: "specifying db and rp", - command: `SELECT * FROM db0.rp0./cpu[13]/ GROUP BY *`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, - }, - { - name: "default db and specified rp", - command: `SELECT * FROM rp0./cpu[13]/ GROUP BY *`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, - }, - { - name: "specified db and default rp", - command: `SELECT * FROM db0../cpu[13]/ GROUP BY *`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, - }, - { - name: "map field type with a regex source", - command: `SELECT value FROM /cpu[13]/`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu1","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Aggregates_Load(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join([]string{ - fmt.Sprintf(`load,region=us-east,host=serverA value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`load,region=us-east,host=serverB value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`load,region=us-west,host=serverC value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - }, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "group by multiple dimensions", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value) FROM load GROUP BY region, host`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"load","tags":{"host":"serverA","region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",20]]},{"name":"load","tags":{"host":"serverB","region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",30]]},{"name":"load","tags":{"host":"serverC","region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`, - }, - { - name: "group by multiple dimensions", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value)*2 FROM load`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"load","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",300]]}]}]}`, - }, - { - name: "group by multiple dimensions", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value)/2 FROM load`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"load","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",75]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_SLimitAndSOffset(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - - var writes []string - for i := 1; i < 10; i++ { - data := fmt.Sprintf(`cpu,region=us-east,host=server-%d value=%d %d`, i, i, time.Unix(int64(i), int64(0)).UnixNano()) - writes = append(writes, data) - } - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "SLIMIT 2 SOFFSET 1", - command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 1`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server-2","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-3","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, - }, - { - name: "SLIMIT 2 SOFFSET 3", - command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 3`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server-4","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-5","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, - }, - { - name: "SLIMIT 3 SOFFSET 8", - command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 3 SOFFSET 8`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server-9","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_CumulativeCount(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: `events signup=t 1005832000 -events signup=t 1048283000 -events signup=t 1784832000 -events signup=t 2000000000 -events signup=t 3084890000 -events signup=t 3838400000 -`}, - } - - test.addQueries([]*Query{ - { - name: "cumulative count", - command: `SELECT cumulative_sum(count(signup)) from db0.rp0.events where time >= 1s and time < 4s group by time(1s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"events","columns":["time","cumulative_sum"],"values":[["1970-01-01T00:00:01Z",3],["1970-01-01T00:00:02Z",4],["1970-01-01T00:00:03Z",6]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Aggregates_String(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join([]string{ - fmt.Sprintf(`stringdata value="first" %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - fmt.Sprintf(`stringdata value="last" %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:04Z").UnixNano()), - }, "\n")}, - } - - test.addQueries([]*Query{ - // strings - { - name: "STDDEV on string data - string", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT STDDEV(value) FROM stringdata`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"stringdata","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, - skip: "Skipped OSS", // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator - }, - { - name: "MEAN on string data - string", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEAN(value) FROM stringdata`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"stringdata","columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, - skip: "Skipped OSS", // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator - }, - { - name: "MEDIAN on string data - string", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEDIAN(value) FROM stringdata`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"stringdata","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, - skip: "Skipped OSS", // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator - }, - { - name: "COUNT on string data - string", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(value) FROM stringdata`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"stringdata","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",2]]}]}]}`, - skip: "Skipped OSS", // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator - }, - { - name: "FIRST on string data - string", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT FIRST(value) FROM stringdata`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"stringdata","columns":["time","first"],"values":[["2000-01-01T00:00:03Z","first"]]}]}]}`, - skip: "Skipped OSS", // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator - }, - { - name: "LAST on string data - string", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT LAST(value) FROM stringdata`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"stringdata","columns":["time","last"],"values":[["2000-01-01T00:00:04Z","last"]]}]}]}`, - skip: "Skipped OSS", // FIXME(benbjohnson): allow non-float var ref expr in cursor iterator - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_AggregateSelectors(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`network,host=server01,region=west,core=1 rx=10i,tx=20i,core=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`network,host=server02,region=west,core=2 rx=40i,tx=50i,core=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`network,host=server03,region=east,core=3 rx=40i,tx=55i,core=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`network,host=server04,region=east,core=4 rx=40i,tx=60i,core=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), - fmt.Sprintf(`network,host=server05,region=west,core=1 rx=50i,tx=70i,core=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), - fmt.Sprintf(`network,host=server06,region=east,core=2 rx=50i,tx=40i,core=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), - fmt.Sprintf(`network,host=server07,region=west,core=3 rx=70i,tx=30i,core=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - fmt.Sprintf(`network,host=server08,region=east,core=4 rx=90i,tx=10i,core=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), - fmt.Sprintf(`network,host=server09,region=east,core=1 rx=5i,tx=4i,core=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:20Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "baseline", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM network`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","core","core_1","host","region","rx","tx"],"values":[["2000-01-01T00:00:00Z",2,"1","server01","west",10,20],["2000-01-01T00:00:10Z",3,"2","server02","west",40,50],["2000-01-01T00:00:20Z",4,"3","server03","east",40,55],["2000-01-01T00:00:30Z",1,"4","server04","east",40,60],["2000-01-01T00:00:40Z",2,"1","server05","west",50,70],["2000-01-01T00:00:50Z",3,"2","server06","east",50,40],["2000-01-01T00:01:00Z",4,"3","server07","west",70,30],["2000-01-01T00:01:10Z",1,"4","server08","east",90,10],["2000-01-01T00:01:20Z",2,"1","server09","east",5,4]]}]}]}`, - }, - { - name: "max - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",90]]}]}]}`, - }, - { - name: "max - baseline 30s - epoch ms", - params: url.Values{"db": []string{"db0"}, "epoch": []string{"ms"}}, - command: `SELECT max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: fmt.Sprintf( - `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","max"],"values":[[%d,40],[%d,50],[%d,90]]}]}]}`, - mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()/int64(time.Millisecond), - mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()/int64(time.Millisecond), - mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()/int64(time.Millisecond), - ), - }, - { - name: "max - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","tx","max"],"values":[["2000-01-01T00:00:00Z",50,40],["2000-01-01T00:00:30Z",70,50],["2000-01-01T00:01:00Z",10,90]]}]}]}`, - }, - { - name: "max - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",90]]}]}]}`, - }, - { - name: "max - time and tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, tx, max(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","tx","max"],"values":[["2000-01-01T00:00:00Z",50,40],["2000-01-01T00:00:30Z",70,50],["2000-01-01T00:01:00Z",10,90]]}]}]}`, - }, - { - name: "min - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:30Z",40],["2000-01-01T00:01:00Z",5]]}]}]}`, - }, - { - name: "min - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","tx","min"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:00Z",4,5]]}]}]}`, - }, - { - name: "min - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:30Z",40],["2000-01-01T00:01:00Z",5]]}]}]}`, - }, - { - name: "min - time and tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, tx, min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","tx","min"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:00Z",4,5]]}]}]}`, - }, - { - name: "max,min - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT max(rx), min(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","max","min"],"values":[["2000-01-01T00:00:00Z",40,10],["2000-01-01T00:00:30Z",50,40],["2000-01-01T00:01:00Z",90,5]]}]}]}`, - }, - { - name: "first - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:30Z",40],["2000-01-01T00:01:00Z",70]]}]}]}`, - }, - { - name: "first - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, tx, first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","tx","first"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:00Z",30,70]]}]}]}`, - }, - { - name: "first - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:30Z",40],["2000-01-01T00:01:00Z",70]]}]}]}`, - }, - { - name: "first - time and tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, tx, first(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","tx","first"],"values":[["2000-01-01T00:00:00Z",20,10],["2000-01-01T00:00:30Z",60,40],["2000-01-01T00:01:00Z",30,70]]}]}]}`, - }, - { - name: "last - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",5]]}]}]}`, - }, - { - name: "last - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","tx","last"],"values":[["2000-01-01T00:00:00Z",55,40],["2000-01-01T00:00:30Z",40,50],["2000-01-01T00:01:00Z",4,5]]}]}]}`, - }, - { - name: "last - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",5]]}]}]}`, - }, - { - name: "last - time and tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, tx, last(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","tx","last"],"values":[["2000-01-01T00:00:00Z",55,40],["2000-01-01T00:00:30Z",40,50],["2000-01-01T00:01:00Z",4,5]]}]}]}`, - }, - { - name: "count - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT count(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:30Z",3],["2000-01-01T00:01:00Z",3]]}]}]}`, - }, - { - name: "count - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, count(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:30Z",3],["2000-01-01T00:01:00Z",3]]}]}]}`, - }, - { - name: "count - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, count(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"error":"mixing aggregate and non-aggregate queries is not supported"}]}`, - }, - { - name: "distinct - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","distinct"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",70],["2000-01-01T00:01:00Z",90],["2000-01-01T00:01:00Z",5]]}]}]}`, - }, - { - name: "distinct - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","distinct"],"values":[["2000-01-01T00:00:00Z",10],["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",70],["2000-01-01T00:01:00Z",90],["2000-01-01T00:01:00Z",5]]}]}]}`, - }, - { - name: "distinct - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, distinct(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"error":"aggregate function distinct() cannot be combined with other functions or fields"}]}`, - }, - { - name: "mean - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT mean(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",30],["2000-01-01T00:00:30Z",46.666666666666664],["2000-01-01T00:01:00Z",55]]}]}]}`, - }, - { - name: "mean - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, mean(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",30],["2000-01-01T00:00:30Z",46.666666666666664],["2000-01-01T00:01:00Z",55]]}]}]}`, - }, - { - name: "mean - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, mean(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"error":"mixing aggregate and non-aggregate queries is not supported"}]}`, - }, - { - name: "median - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT median(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","median"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",70]]}]}]}`, - }, - { - name: "median - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, median(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","median"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",70]]}]}]}`, - }, - { - name: "median - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, median(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"error":"mixing aggregate and non-aggregate queries is not supported"}]}`, - }, - { - name: "mode - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT mode(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","mode"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",5]]}]}]}`, - }, - { - name: "mode - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, mode(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","mode"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",5]]}]}]}`, - }, - { - name: "mode - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, mode(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"error":"mixing aggregate and non-aggregate queries is not supported"}]}`, - }, - { - name: "spread - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT spread(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","spread"],"values":[["2000-01-01T00:00:00Z",30],["2000-01-01T00:00:30Z",10],["2000-01-01T00:01:00Z",85]]}]}]}`, - }, - { - name: "spread - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, spread(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","spread"],"values":[["2000-01-01T00:00:00Z",30],["2000-01-01T00:00:30Z",10],["2000-01-01T00:01:00Z",85]]}]}]}`, - }, - { - name: "spread - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, spread(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"error":"mixing aggregate and non-aggregate queries is not supported"}]}`, - }, - { - name: "stddev - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT stddev(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","stddev"],"values":[["2000-01-01T00:00:00Z",17.320508075688775],["2000-01-01T00:00:30Z",5.773502691896258],["2000-01-01T00:01:00Z",44.44097208657794]]}]}]}`, - }, - { - name: "stddev - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, stddev(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","stddev"],"values":[["2000-01-01T00:00:00Z",17.320508075688775],["2000-01-01T00:00:30Z",5.773502691896258],["2000-01-01T00:01:00Z",44.44097208657794]]}]}]}`, - }, - { - name: "stddev - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, stddev(rx) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"error":"mixing aggregate and non-aggregate queries is not supported"}]}`, - }, - { - name: "percentile - baseline 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT percentile(rx, 75) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","percentile"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",70]]}]}]}`, - }, - { - name: "percentile - time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, percentile(rx, 75) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","percentile"],"values":[["2000-01-01T00:00:00Z",40],["2000-01-01T00:00:30Z",50],["2000-01-01T00:01:00Z",70]]}]}]}`, - }, - { - name: "percentile - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT tx, percentile(rx, 75) FROM network where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:29Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","tx","percentile"],"values":[["2000-01-01T00:00:00Z",50,40],["2000-01-01T00:00:30Z",70,50],["2000-01-01T00:01:00Z",30,70]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Selectors(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`network,host=server01,region=west,core=1 rx=10i,tx=20i,core=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`network,host=server02,region=west,core=2 rx=40i,tx=50i,core=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`network,host=server03,region=east,core=3 rx=40i,tx=55i,core=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`network,host=server04,region=east,core=4 rx=40i,tx=60i,core=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), - fmt.Sprintf(`network,host=server05,region=west,core=1 rx=50i,tx=70i,core=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), - fmt.Sprintf(`network,host=server06,region=east,core=2 rx=50i,tx=40i,core=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), - fmt.Sprintf(`network,host=server07,region=west,core=3 rx=70i,tx=30i,core=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - fmt.Sprintf(`network,host=server08,region=east,core=4 rx=90i,tx=10i,core=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), - fmt.Sprintf(`network,host=server09,region=east,core=1 rx=5i,tx=4i,core=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:20Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "max - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT max(tx) FROM network`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","max"],"values":[["2000-01-01T00:00:40Z",70]]}]}]}`, - }, - { - name: "min - tx", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT min(tx) FROM network`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","min"],"values":[["2000-01-01T00:01:20Z",4]]}]}]}`, - }, - { - name: "first", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT first(tx) FROM network`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",20]]}]}]}`, - }, - { - name: "last", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT last(tx) FROM network`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","last"],"values":[["2000-01-01T00:01:20Z",4]]}]}]}`, - }, - { - name: "percentile", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT percentile(tx, 50) FROM network`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","percentile"],"values":[["2000-01-01T00:00:50Z",40]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_TopBottomInt(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - // cpu data with overlapping duplicate values - // hour 0 - fmt.Sprintf(`cpu,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02 value=3.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`cpu,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - // hour 1 - fmt.Sprintf(`cpu,host=server04 value=3.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server05 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:10Z").UnixNano()), - fmt.Sprintf(`cpu,host=server06 value=6.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:20Z").UnixNano()), - // hour 2 - fmt.Sprintf(`cpu,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:10Z").UnixNano()), - - // memory data - // hour 0 - fmt.Sprintf(`memory,host=a,service=redis value=1000i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`memory,host=b,service=mysql value=2000i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`memory,host=b,service=redis value=1500i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - // hour 1 - fmt.Sprintf(`memory,host=a,service=redis value=1001i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), - fmt.Sprintf(`memory,host=b,service=mysql value=2001i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), - fmt.Sprintf(`memory,host=b,service=redis value=1501i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), - // hour 2 - fmt.Sprintf(`memory,host=a,service=redis value=1002i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), - fmt.Sprintf(`memory,host=b,service=mysql value=2002i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), - fmt.Sprintf(`memory,host=b,service=redis value=1502i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T02:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "top - cpu", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 1) FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T02:00:10Z",9]]}]}]}`, - }, - { - name: "bottom - cpu", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT BOTTOM(value, 1) FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","bottom"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, - }, - { - name: "top - cpu - 2 values", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 2) FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, - }, - { - name: "bottom - cpu - 2 values", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT BOTTOM(value, 2) FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","bottom"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",3]]}]}]}`, - }, - { - name: "top - cpu - 3 values - sorts on tie properly", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 3) FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, - }, - { - name: "bottom - cpu - 3 values - sorts on tie properly", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT BOTTOM(value, 3) FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","bottom"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",3],["2000-01-01T01:00:00Z",3]]}]}]}`, - }, - { - name: "top - cpu - with tag", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, host, 2) FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top","host"],"values":[["2000-01-01T01:00:10Z",7,"server05"],["2000-01-01T02:00:10Z",9,"server08"]]}]}]}`, - }, - { - name: "bottom - cpu - with tag", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT BOTTOM(value, host, 2) FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","bottom","host"],"values":[["2000-01-01T00:00:00Z",2,"server01"],["2000-01-01T00:00:10Z",3,"server02"]]}]}]}`, - }, - { - name: "top - cpu - 3 values with limit 2", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 3) FROM cpu limit 2`, - exp: `{"results":[{"statement_id":0,"error":"limit (3) in top function can not be larger than the LIMIT (2) in the select statement"}]}`, - }, - { - name: "bottom - cpu - 3 values with limit 2", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT BOTTOM(value, 3) FROM cpu limit 2`, - exp: `{"results":[{"statement_id":0,"error":"limit (3) in bottom function can not be larger than the LIMIT (2) in the select statement"}]}`, - }, - { - name: "top - cpu - hourly", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, - }, - { - name: "bottom - cpu - hourly", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT BOTTOM(value, 1) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","bottom"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T01:00:00Z",3],["2000-01-01T02:00:00Z",7]]}]}]}`, - }, - { - name: "top - cpu - 2 values hourly", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 2) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:10Z",3],["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:10Z",7],["2000-01-01T01:00:20Z",6],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, - }, - { - name: "bottom - cpu - 2 values hourly", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT BOTTOM(value, 2) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","bottom"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",3],["2000-01-01T01:00:00Z",3],["2000-01-01T01:00:20Z",6],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, - }, - { - name: "top - cpu - 3 values hourly - validates that a bucket can have less than limit if no values exist in that time bucket", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 3) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",3],["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:00Z",3],["2000-01-01T01:00:10Z",7],["2000-01-01T01:00:20Z",6],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, - }, - { - name: "bottom - cpu - 3 values hourly - validates that a bucket can have less than limit if no values exist in that time bucket", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT BOTTOM(value, 3) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:10Z' group by time(1h)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","bottom"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",3],["2000-01-01T00:00:20Z",4],["2000-01-01T01:00:00Z",3],["2000-01-01T01:00:10Z",7],["2000-01-01T01:00:20Z",6],["2000-01-01T02:00:00Z",7],["2000-01-01T02:00:10Z",9]]}]}]}`, - }, - { - name: "top - memory - 2 values, two tags", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, 2), host, service FROM memory`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T01:00:00Z",2001,"b","mysql"],["2000-01-01T02:00:00Z",2002,"b","mysql"]]}]}]}`, - }, - { - name: "bottom - memory - 2 values, two tags", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT BOTTOM(value, 2), host, service FROM memory`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"memory","columns":["time","bottom","host","service"],"values":[["2000-01-01T00:00:00Z",1000,"a","redis"],["2000-01-01T01:00:00Z",1001,"a","redis"]]}]}]}`, - }, - { - name: "top - memory - host tag with limit 2", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, host, 2) FROM memory`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"memory","columns":["time","top","host"],"values":[["2000-01-01T02:00:00Z",2002,"b"],["2000-01-01T02:00:00Z",1002,"a"]]}]}]}`, - }, - { - name: "bottom - memory - host tag with limit 2", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT BOTTOM(value, host, 2) FROM memory`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"memory","columns":["time","bottom","host"],"values":[["2000-01-01T00:00:00Z",1000,"a"],["2000-01-01T00:00:00Z",1500,"b"]]}]}]}`, - }, - { - name: "top - memory - host tag with limit 2, service tag in select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, host, 2), service FROM memory`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1002,"a","redis"]]}]}]}`, - }, - { - name: "bottom - memory - host tag with limit 2, service tag in select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT BOTTOM(value, host, 2), service FROM memory`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"memory","columns":["time","bottom","host","service"],"values":[["2000-01-01T00:00:00Z",1000,"a","redis"],["2000-01-01T00:00:00Z",1500,"b","redis"]]}]}]}`, - }, - { - name: "top - memory - service tag with limit 2, host tag in select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, service, 2), host FROM memory`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"memory","columns":["time","top","service","host"],"values":[["2000-01-01T02:00:00Z",2002,"mysql","b"],["2000-01-01T02:00:00Z",1502,"redis","b"]]}]}]}`, - }, - { - name: "bottom - memory - service tag with limit 2, host tag in select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT BOTTOM(value, service, 2), host FROM memory`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"memory","columns":["time","bottom","service","host"],"values":[["2000-01-01T00:00:00Z",1000,"redis","a"],["2000-01-01T00:00:00Z",2000,"mysql","b"]]}]}]}`, - }, - { - name: "top - memory - host and service tag with limit 2", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, host, service, 2) FROM memory`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1502,"b","redis"]]}]}]}`, - }, - { - name: "bottom - memory - host and service tag with limit 2", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT BOTTOM(value, host, service, 2) FROM memory`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"memory","columns":["time","bottom","host","service"],"values":[["2000-01-01T00:00:00Z",1000,"a","redis"],["2000-01-01T00:00:00Z",1500,"b","redis"]]}]}]}`, - }, - { - name: "top - memory - host tag with limit 2 with service tag in select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, host, 2), service FROM memory`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1002,"a","redis"]]}]}]}`, - }, - { - name: "bottom - memory - host tag with limit 2 with service tag in select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT BOTTOM(value, host, 2), service FROM memory`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"memory","columns":["time","bottom","host","service"],"values":[["2000-01-01T00:00:00Z",1000,"a","redis"],["2000-01-01T00:00:00Z",1500,"b","redis"]]}]}]}`, - }, - { - name: "top - memory - host and service tag with limit 3", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT TOP(value, host, service, 3) FROM memory`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"memory","columns":["time","top","host","service"],"values":[["2000-01-01T02:00:00Z",2002,"b","mysql"],["2000-01-01T02:00:00Z",1502,"b","redis"],["2000-01-01T02:00:00Z",1002,"a","redis"]]}]}]}`, - }, - { - name: "bottom - memory - host and service tag with limit 3", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT BOTTOM(value, host, service, 3) FROM memory`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"memory","columns":["time","bottom","host","service"],"values":[["2000-01-01T00:00:00Z",1000,"a","redis"],["2000-01-01T00:00:00Z",1500,"b","redis"],["2000-01-01T00:00:00Z",2000,"b","mysql"]]}]}]}`, - }, - - // TODO - // - Test that specifiying fields or tags in the function will rewrite the query to expand them to the fields - // - Test that a field can be used in the top function - // - Test that asking for a field will come back before a tag if they have the same name for a tag and a field - // - Test that `select top(value, host, 2)` when there is only one value for `host` it will only bring back one value - // - Test that `select top(value, host, 4) from foo where time > now() - 1d and time < now() group by time(1h)` and host is unique in some time buckets that it returns only the unique ones, and not always 4 values - - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_ExactTimeRange(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00.000000000Z").UnixNano()), - fmt.Sprintf(`cpu value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00.000000001Z").UnixNano()), - fmt.Sprintf(`cpu value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00.000000002Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "query point at exactly one time - rfc3339nano", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM cpu WHERE time = '2000-01-01T00:00:00.000000001Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00.000000001Z",2]]}]}]}`, - }, - { - name: "query point at exactly one time - timestamp", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM cpu WHERE time = 946684800000000001`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00.000000001Z",2]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Aggregates_Math(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`network,host=server01,region=west,core=1 rx=10i,tx=20i,core=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`network,host=server02,region=west,core=2 rx=40i,tx=50i,core=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`network,host=server03,region=east,core=3 rx=40i,tx=55i,core=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`network,host=server04,region=east,core=4 rx=40i,tx=60i,core=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), - fmt.Sprintf(`network,host=server05,region=west,core=1 rx=50i,tx=70i,core=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), - fmt.Sprintf(`network,host=server06,region=east,core=2 rx=50i,tx=40i,core=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), - fmt.Sprintf(`network,host=server07,region=west,core=3 rx=70i,tx=30i,core=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - fmt.Sprintf(`network,host=server08,region=east,core=4 rx=90i,tx=10i,core=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), - fmt.Sprintf(`network,host=server09,region=east,core=1 rx=5i,tx=4i,core=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:20Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "add two selectors", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT max(rx) + min(rx) FROM network WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:01:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","max_min"],"values":[["2000-01-01T00:00:00Z",95]]}]}]}`, - }, - { - name: "use math one two selectors separately", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT max(rx) * 1, min(rx) * 1 FROM network WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:01:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","max","min"],"values":[["2000-01-01T00:00:00Z",90,5]]}]}]}`, - }, - { - name: "math with a single selector", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT max(rx) * 1 FROM network WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:01:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"network","columns":["time","max"],"values":[["2000-01-01T00:01:10Z",90]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Aggregates_CPU(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join([]string{ - fmt.Sprintf(`cpu,region=uk,host=serverZ,service=redis value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - fmt.Sprintf(`cpu,region=uk,host=serverZ,service=mysql value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - }, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "aggregation with WHERE and AND", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value) FROM cpu WHERE region='uk' AND host='serverZ'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Aggregates_IntMany(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join([]string{ - fmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), - fmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), - fmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), - fmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - fmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), - }, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "mean and stddev - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEAN(value), STDDEV(value) FROM intmany WHERE time >= '2000-01-01' AND time < '2000-01-01T00:02:00Z' GROUP BY time(10m)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","mean","stddev"],"values":[["2000-01-01T00:00:00Z",5,2.138089935299395]]}]}]}`, - }, - { - name: "first - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT FIRST(value) FROM intmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, - }, - { - name: "first - int - epoch ms", - params: url.Values{"db": []string{"db0"}, "epoch": []string{"ms"}}, - command: `SELECT FIRST(value) FROM intmany`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","first"],"values":[[%d,2]]}]}]}`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()/int64(time.Millisecond)), - }, - { - name: "last - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT LAST(value) FROM intmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","last"],"values":[["2000-01-01T00:01:10Z",9]]}]}]}`, - }, - { - name: "spread - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SPREAD(value) FROM intmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","spread"],"values":[["1970-01-01T00:00:00Z",7]]}]}]}`, - }, - { - name: "median - even count - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEDIAN(value) FROM intmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4.5]]}]}]}`, - }, - { - name: "median - odd count - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEDIAN(value) FROM intmany where time < '2000-01-01T00:01:10Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, - }, - { - name: "mode - single - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MODE(value) FROM intmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","mode"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, - }, - { - name: "mode - multiple - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MODE(value) FROM intmany where time < '2000-01-01T00:01:10Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","mode"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, - }, - { - name: "distinct as call - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT(value) FROM intmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",2],["1970-01-01T00:00:00Z",4],["1970-01-01T00:00:00Z",5],["1970-01-01T00:00:00Z",7],["1970-01-01T00:00:00Z",9]]}]}]}`, - }, - { - name: "distinct alt syntax - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT value FROM intmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",2],["1970-01-01T00:00:00Z",4],["1970-01-01T00:00:00Z",5],["1970-01-01T00:00:00Z",7],["1970-01-01T00:00:00Z",9]]}]}]}`, - }, - { - name: "distinct select tag - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT(host) FROM intmany`, - exp: `{"results":[{"statement_id":0,"error":"statement must have at least one field in select clause"}]}`, - skip: SkippedOSS, // FIXME(benbjohnson): should be allowed, need to stream tag values - }, - { - name: "distinct alt select tag - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT host FROM intmany`, - exp: `{"results":[{"statement_id":0,"error":"statement must have at least one field in select clause"}]}`, - skip: SkippedOSS, // FIXME(benbjohnson): should be allowed, need to stream tag values - }, - { - name: "count distinct - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT value) FROM intmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, - }, - { - name: "count distinct as call - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT(value)) FROM intmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, - }, - { - name: "count distinct select tag - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT host) FROM intmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, - skip: SkippedOSS, // FIXME(benbjohnson): stream tag values - }, - { - name: "count distinct as call select tag - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT host) FROM intmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, - skip: SkippedOSS, // FIXME(benbjohnson): stream tag values - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Where_Fields(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu alert_id="alert",tenant_id="tenant",_cust="johnson brothers" %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - fmt.Sprintf(`cpu alert_id="alert",tenant_id="tenant",_cust="johnson brothers" %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - - fmt.Sprintf(`cpu load=100.0,core=4 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), - fmt.Sprintf(`cpu load=80.0,core=2 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:01:02Z").UnixNano()), - - fmt.Sprintf(`clicks local=true %d`, mustParseTime(time.RFC3339Nano, "2014-11-10T23:00:01Z").UnixNano()), - fmt.Sprintf(`clicks local=false %d`, mustParseTime(time.RFC3339Nano, "2014-11-10T23:00:02Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - // non type specific - { - name: "missing measurement with group by", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT load from missing group by *`, - exp: `{"results":[{"statement_id":0}]}`, - }, - - // string - { - name: "single string field", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT alert_id FROM cpu WHERE alert_id='alert'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","alert_id"],"values":[["2015-02-28T01:03:36.703820946Z","alert"]]}]}]}`, - }, - { - name: "string AND query, all fields in SELECT", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT alert_id,tenant_id,_cust FROM cpu WHERE alert_id='alert' AND tenant_id='tenant'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","alert_id","tenant_id","_cust"],"values":[["2015-02-28T01:03:36.703820946Z","alert","tenant","johnson brothers"]]}]}]}`, - }, - { - name: "string AND query, all fields in SELECT, one in parenthesis", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT alert_id,tenant_id FROM cpu WHERE alert_id='alert' AND (tenant_id='tenant')`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","alert_id","tenant_id"],"values":[["2015-02-28T01:03:36.703820946Z","alert","tenant"]]}]}]}`, - }, - { - name: "string underscored field", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT alert_id FROM cpu WHERE _cust='johnson brothers'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","alert_id"],"values":[["2015-02-28T01:03:36.703820946Z","alert"]]}]}]}`, - }, - { - name: "string no match", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT alert_id FROM cpu WHERE _cust='acme'`, - exp: `{"results":[{"statement_id":0}]}`, - }, - - // float64 - { - name: "float64 GT no match", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load > 100`, - exp: `{"results":[{"statement_id":0}]}`, - }, - { - name: "float64 GTE match one", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load >= 100`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100]]}]}]}`, - }, - { - name: "float64 EQ match upper bound", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load = 100`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100]]}]}]}`, - }, - { - name: "float64 LTE match two", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load <= 100`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100],["2009-11-10T23:01:02Z",80]]}]}]}`, - }, - { - name: "float64 GT match one", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load > 99`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100]]}]}]}`, - }, - { - name: "float64 EQ no match", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load = 99`, - exp: `{"results":[{"statement_id":0}]}`, - }, - { - name: "float64 LT match one", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load < 99`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:01:02Z",80]]}]}]}`, - }, - { - name: "float64 LT no match", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load < 80`, - exp: `{"results":[{"statement_id":0}]}`, - }, - { - name: "float64 NE match one", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load != 100`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:01:02Z",80]]}]}]}`, - }, - - // int64 - { - name: "int64 GT no match", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core > 4`, - exp: `{"results":[{"statement_id":0}]}`, - }, - { - name: "int64 GTE match one", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core >= 4`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4]]}]}]}`, - }, - { - name: "int64 EQ match upper bound", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core = 4`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4]]}]}]}`, - }, - { - name: "int64 LTE match two ", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core <= 4`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4],["2009-11-10T23:01:02Z",2]]}]}]}`, - }, - { - name: "int64 GT match one", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core > 3`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4]]}]}]}`, - }, - { - name: "int64 EQ no match", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core = 3`, - exp: `{"results":[{"statement_id":0}]}`, - }, - { - name: "int64 LT match one", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core < 3`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:01:02Z",2]]}]}]}`, - }, - { - name: "int64 LT no match", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core < 2`, - exp: `{"results":[{"statement_id":0}]}`, - }, - { - name: "int64 NE match one", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core != 4`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:01:02Z",2]]}]}]}`, - }, - - // bool - { - name: "bool EQ match true", - params: url.Values{"db": []string{"db0"}}, - command: `select local from clicks where local = true`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"clicks","columns":["time","local"],"values":[["2014-11-10T23:00:01Z",true]]}]}]}`, - }, - { - name: "bool EQ match false", - params: url.Values{"db": []string{"db0"}}, - command: `select local from clicks where local = false`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"clicks","columns":["time","local"],"values":[["2014-11-10T23:00:02Z",false]]}]}]}`, - }, - - { - name: "bool NE match one", - params: url.Values{"db": []string{"db0"}}, - command: `select local from clicks where local != true`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"clicks","columns":["time","local"],"values":[["2014-11-10T23:00:02Z",false]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Aggregates_IntMany_GroupBy(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join([]string{ - fmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), - fmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), - fmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), - fmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - fmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), - }, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "max order by time with time specified group by 10s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(10s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:40Z",5],["2000-01-01T00:00:50Z",5],["2000-01-01T00:01:00Z",7],["2000-01-01T00:01:10Z",9]]}]}]}`, - }, - { - name: "max order by time without time specified group by 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`, - }, - { - name: "max order by time with time specified group by 30s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(30s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`, - }, - { - name: "min order by time without time specified group by 15s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT min(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, - }, - { - name: "min order by time with time specified group by 15s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, min(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, - }, - { - name: "first order by time without time specified group by 15s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT first(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, - }, - { - name: "first order by time with time specified group by 15s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, first(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",7]]}]}]}`, - }, - { - name: "last order by time without time specified group by 15s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT last(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`, - }, - { - name: "last order by time with time specified group by 15s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT time, last(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:14Z' group by time(15s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",4],["2000-01-01T00:00:15Z",4],["2000-01-01T00:00:30Z",5],["2000-01-01T00:00:45Z",5],["2000-01-01T00:01:00Z",9]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Aggregates_IntMany_OrderByDesc(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join([]string{ - fmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), - fmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), - fmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), - fmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - fmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), - }, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "aggregate order by time desc", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT max(value) FROM intmany where time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T00:01:00Z' group by time(10s) order by time desc`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intmany","columns":["time","max"],"values":[["2000-01-01T00:01:00Z",7],["2000-01-01T00:00:50Z",5],["2000-01-01T00:00:40Z",5],["2000-01-01T00:00:30Z",4],["2000-01-01T00:00:20Z",4],["2000-01-01T00:00:10Z",4],["2000-01-01T00:00:00Z",2]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Aggregates_IntOverlap(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join([]string{ - fmt.Sprintf(`intoverlap,region=us-east value=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`intoverlap,region=us-east value=30 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`intoverlap,region=us-west value=100 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`intoverlap,region=us-east otherVal=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - }, "\n")}, - } - - test.addQueries([]*Query{ - /* { - name: "aggregation with no interval - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT count(value) FROM intoverlap WHERE time = '2000-01-01 00:00:00'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intoverlap","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, - }, - { - name: "sum - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM intoverlap WHERE time >= '2000-01-01 00:00:05' AND time <= '2000-01-01T00:00:10Z' GROUP BY time(10s), region`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["2000-01-01T00:00:10Z",30]]}]}]}`, - }, - */{ - name: "aggregation with a null field value - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM intoverlap GROUP BY region`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]},{"name":"intoverlap","tags":{"region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`, - }, - { - name: "multiple aggregations - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value), MEAN(value) FROM intoverlap GROUP BY region`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",50,25]]},{"name":"intoverlap","tags":{"region":"us-west"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",100,100]]}]}]}`, - }, - { - skip: SkippedOSS, - name: "multiple aggregations with division - int FIXME issue #2879", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value), mean(value), sum(value) / mean(value) as div FROM intoverlap GROUP BY region`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean","div"],"values":[["1970-01-01T00:00:00Z",50,25,2]]},{"name":"intoverlap","tags":{"region":"us-west"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",100,100,1]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Aggregates_FloatSingle(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join([]string{ - fmt.Sprintf(`floatsingle value=45.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - }, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "stddev with just one point - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT STDDEV(value) FROM floatsingle`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatsingle","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_LimitAndOffset(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`limited,tennant=paul foo=2 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), - fmt.Sprintf(`limited,tennant=paul foo=3 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), - fmt.Sprintf(`limited,tennant=paul foo=4 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:04Z").UnixNano()), - fmt.Sprintf(`limited,tennant=todd foo=5 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:05Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "limit on points", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from "limited" LIMIT 2`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3]]}]}]}`, - }, - { - name: "limit higher than the number of data points", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from "limited" LIMIT 20`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4],["2009-11-10T23:00:05Z",5]]}]}]}`, - }, - { - name: "limit and offset", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from "limited" LIMIT 2 OFFSET 1`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4]]}]}]}`, - }, - { - name: "limit + offset equal to total number of points", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from "limited" LIMIT 3 OFFSET 3`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:05Z",5]]}]}]}`, - }, - { - name: "limit - offset higher than number of points", - command: `select foo from "limited" LIMIT 2 OFFSET 20`, - exp: `{"results":[{"statement_id":0}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "limit on points with group by time", - command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "limit higher than the number of data points with group by time", - command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 20`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4],["2009-11-10T23:00:05Z",5]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "limit and offset with group by time", - command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2 OFFSET 1`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "limit + offset equal to the number of points with group by time", - command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 3 OFFSET 3`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:05Z",5]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "limit - offset higher than number of points with group by time", - command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2 OFFSET 20`, - exp: `{"results":[{"statement_id":0}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "limit - group by tennant", - command: `select foo from "limited" group by tennant limit 1`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"limited","tags":{"tennant":"paul"},"columns":["time","foo"],"values":[["2009-11-10T23:00:02Z",2]]},{"name":"limited","tags":{"tennant":"todd"},"columns":["time","foo"],"values":[["2009-11-10T23:00:05Z",5]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "limit and offset - group by tennant", - command: `select foo from "limited" group by tennant limit 1 offset 1`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"limited","tags":{"tennant":"paul"},"columns":["time","foo"],"values":[["2009-11-10T23:00:03Z",3]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Fill(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`fills val=3 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), - fmt.Sprintf(`fills val=5 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), - fmt.Sprintf(`fills val=4 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:06Z").UnixNano()), - fmt.Sprintf(`fills val=10 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:16Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "fill with value", - command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(1)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",10]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "fill with value, WHERE all values match condition", - command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' and val < 50 group by time(5s) FILL(1)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",10]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "fill with value, WHERE no values match condition", - command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' and val > 50 group by time(5s) FILL(1)`, - exp: `{"results":[{"statement_id":0}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "fill with previous", - command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",4],["2009-11-10T23:00:15Z",10]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "fill with none, i.e. clear out nulls", - command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(none)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:15Z",10]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "fill defaults to null", - command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",null],["2009-11-10T23:00:15Z",10]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "fill defaults to 0 for count", - command: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"fills","columns":["time","count"],"values":[["2009-11-10T23:00:00Z",2],["2009-11-10T23:00:05Z",1],["2009-11-10T23:00:10Z",0],["2009-11-10T23:00:15Z",1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "fill none drops 0s for count", - command: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) fill(none)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"fills","columns":["time","count"],"values":[["2009-11-10T23:00:00Z",2],["2009-11-10T23:00:05Z",1],["2009-11-10T23:00:15Z",1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "fill previous overwrites 0s for count", - command: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"fills","columns":["time","count"],"values":[["2009-11-10T23:00:00Z",2],["2009-11-10T23:00:05Z",1],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "fill with implicit start time", - command: `select mean(val) from fills where time < '2009-11-10T23:00:20Z' group by time(5s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",null],["2009-11-10T23:00:15Z",10]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Aggregates_FloatMany(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join([]string{ - fmt.Sprintf(`floatmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), - }, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "mean and stddev - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEAN(value), STDDEV(value) FROM floatmany WHERE time >= '2000-01-01' AND time < '2000-01-01T00:02:00Z' GROUP BY time(10m)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatmany","columns":["time","mean","stddev"],"values":[["2000-01-01T00:00:00Z",5,2.138089935299395]]}]}]}`, - }, - { - name: "first - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT FIRST(value) FROM floatmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatmany","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, - }, - { - name: "last - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT LAST(value) FROM floatmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatmany","columns":["time","last"],"values":[["2000-01-01T00:01:10Z",9]]}]}]}`, - }, - { - name: "spread - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SPREAD(value) FROM floatmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatmany","columns":["time","spread"],"values":[["1970-01-01T00:00:00Z",7]]}]}]}`, - }, - { - name: "median - even count - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEDIAN(value) FROM floatmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4.5]]}]}]}`, - }, - { - name: "median - odd count - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEDIAN(value) FROM floatmany where time < '2000-01-01T00:01:10Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, - }, - { - name: "mode - single - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MODE(value) FROM floatmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatmany","columns":["time","mode"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, - }, - { - name: "mode - multiple - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MODE(value) FROM floatmany where time < '2000-01-01T00:00:10Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatmany","columns":["time","mode"],"values":[["1970-01-01T00:00:00Z",2]]}]}]}`, - }, - { - name: "distinct as call - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT(value) FROM floatmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",2],["1970-01-01T00:00:00Z",4],["1970-01-01T00:00:00Z",5],["1970-01-01T00:00:00Z",7],["1970-01-01T00:00:00Z",9]]}]}]}`, - }, - { - name: "distinct alt syntax - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT value FROM floatmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",2],["1970-01-01T00:00:00Z",4],["1970-01-01T00:00:00Z",5],["1970-01-01T00:00:00Z",7],["1970-01-01T00:00:00Z",9]]}]}]}`, - }, - { - name: "distinct select tag - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT(host) FROM floatmany`, - exp: `{"results":[{"statement_id":0,"error":"statement must have at least one field in select clause"}]}`, - skip: SkippedOSS, // FIXME(benbjohnson): show be allowed, stream tag values - }, - { - name: "distinct alt select tag - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT host FROM floatmany`, - exp: `{"results":[{"statement_id":0,"error":"statement must have at least one field in select clause"}]}`, - skip: SkippedOSS, // FIXME(benbjohnson): show be allowed, stream tag values - }, - { - name: "count distinct - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT value) FROM floatmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, - }, - { - name: "count distinct as call - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT(value)) FROM floatmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, - }, - { - name: "count distinct select tag - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT host) FROM floatmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, - skip: SkippedOSS, // FIXME(benbjohnson): stream tag values - }, - { - name: "count distinct as call select tag - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT host) FROM floatmany`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, - skip: SkippedOSS, // FIXME(benbjohnson): stream tag values - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_ShowTagValues(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`_,__name__=metric1 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`_,__name__=metric2 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "show tag values with key", - command: "SHOW TAG VALUES WITH KEY = host", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["host","server02"]]},{"name":"disk","columns":["key","value"],"values":[["host","server03"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server02"],["host","server03"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "show tag values with key regex", - command: "SHOW TAG VALUES WITH KEY =~ /ho/", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["host","server02"]]},{"name":"disk","columns":["key","value"],"values":[["host","server03"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server02"],["host","server03"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values with key and where`, - command: `SHOW TAG VALUES FROM cpu WITH KEY = host WHERE region = 'uswest'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values with key regex and where`, - command: `SHOW TAG VALUES FROM cpu WITH KEY =~ /ho/ WHERE region = 'uswest'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values with key and where matches the regular expression`, - command: `SHOW TAG VALUES WITH KEY = host WHERE region =~ /ca.*/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"disk","columns":["key","value"],"values":[["host","server03"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server03"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values with key and where does not match the regular expression`, - command: `SHOW TAG VALUES WITH KEY = region WHERE host !~ /server0[12]/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"disk","columns":["key","value"],"values":[["region","caeast"]]},{"name":"gpu","columns":["key","value"],"values":[["region","caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values with key and where partially matches the regular expression`, - command: `SHOW TAG VALUES WITH KEY = host WHERE region =~ /us/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["host","server02"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server02"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values with key and where partially does not match the regular expression`, - command: `SHOW TAG VALUES WITH KEY = host WHERE region !~ /us/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"]]},{"name":"disk","columns":["key","value"],"values":[["host","server03"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server03"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values with key in and where does not match the regular expression`, - command: `SHOW TAG VALUES FROM cpu WITH KEY IN (host, region) WHERE region = 'uswest'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["region","uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values with key regex and where does not match the regular expression`, - command: `SHOW TAG VALUES FROM cpu WITH KEY =~ /(host|region)/ WHERE region = 'uswest'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["region","uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values with key and measurement matches regular expression`, - command: `SHOW TAG VALUES FROM /[cg]pu/ WITH KEY = host`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["host","server02"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server02"],["host","server03"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "show tag values with key where time", - command: "SHOW TAG VALUES WITH KEY = host WHERE time > 0", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["host","server02"]]},{"name":"disk","columns":["key","value"],"values":[["host","server03"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server02"],["host","server03"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "show tag values with key regex where time", - command: "SHOW TAG VALUES WITH KEY =~ /ho/ WHERE time > 0", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["host","server02"]]},{"name":"disk","columns":["key","value"],"values":[["host","server03"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server02"],["host","server03"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values with key and where time`, - command: `SHOW TAG VALUES FROM cpu WITH KEY = host WHERE region = 'uswest' AND time > 0`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values with key regex and where time`, - command: `SHOW TAG VALUES FROM cpu WITH KEY =~ /ho/ WHERE region = 'uswest' AND time > 0`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values with key and where matches the regular expression where time`, - command: `SHOW TAG VALUES WITH KEY = host WHERE region =~ /ca.*/ AND time > 0`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"disk","columns":["key","value"],"values":[["host","server03"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server03"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values with key and where does not match the regular expression where time`, - command: `SHOW TAG VALUES WITH KEY = region WHERE host !~ /server0[12]/ AND time > 0`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"disk","columns":["key","value"],"values":[["region","caeast"]]},{"name":"gpu","columns":["key","value"],"values":[["region","caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values with key and where partially matches the regular expression where time`, - command: `SHOW TAG VALUES WITH KEY = host WHERE region =~ /us/ AND time > 0`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["host","server02"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server02"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values with key and where partially does not match the regular expression where time`, - command: `SHOW TAG VALUES WITH KEY = host WHERE region !~ /us/ AND time > 0`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"]]},{"name":"disk","columns":["key","value"],"values":[["host","server03"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server03"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values with key in and where does not match the regular expression where time`, - command: `SHOW TAG VALUES FROM cpu WITH KEY IN (host, region) WHERE region = 'uswest' AND time > 0`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["region","uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values with key regex and where does not match the regular expression where time`, - command: `SHOW TAG VALUES FROM cpu WITH KEY =~ /(host|region)/ WHERE region = 'uswest' AND time > 0`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["region","uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values with key and measurement matches regular expression where time`, - command: `SHOW TAG VALUES FROM /[cg]pu/ WITH KEY = host WHERE time > 0`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"],["host","server02"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server02"],["host","server03"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values with key where label starts with underscore`, - command: `SHOW TAG VALUES FROM "_" WITH KEY = "__name__" WHERE "__name__" = 'metric1'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"_","columns":["key","value"],"values":[["__name__","metric1"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "show tag values with value filter", - skip: FixRequired + "https://github.com/influxdata/idpe/issues/7592", - command: "SHOW TAG VALUES WITH KEY = host WHERE value = 'server03'", - exp: `{"results":[{"statement_id":0,"series":[{"name":"disk","columns":["key","value"],"values":[["host","server03"]]},{"name":"gpu","columns":["key","value"],"values":[["host","server03"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "show tag values with no matching value filter", - command: "SHOW TAG VALUES WITH KEY = host WHERE value = 'no_such_value'", - exp: `{"results":[{"statement_id":0}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "show tag values with non-string value filter", - command: "SHOW TAG VALUES WITH KEY = host WHERE value = 5000", - exp: `{"results":[{"statement_id":0}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_ShowTagKeyCardinality(t *testing.T) { - t.Skip(NotSupported) - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: `show tag key cardinality`, - command: "SHOW TAG KEY CARDINALITY", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[2]]},{"name":"disk","columns":["count"],"values":[[2]]},{"name":"gpu","columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag key cardinality on db0`, - command: "SHOW TAG KEY CARDINALITY ON db0", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[2]]},{"name":"disk","columns":["count"],"values":[[2]]},{"name":"gpu","columns":["count"],"values":[[2]]}]}]}`, - }, - { - name: "show tag key cardinality from", - command: "SHOW TAG KEY CARDINALITY FROM cpu", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "show tag key cardinality from regex", - command: "SHOW TAG KEY CARDINALITY FROM /[cg]pu/", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[2]]},{"name":"gpu","columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "show tag key cardinality measurement not found", - command: "SHOW TAG KEY CARDINALITY FROM doesntexist", - exp: `{"results":[{"statement_id":0}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "show tag key cardinality with time in WHERE clause errors", - command: "SHOW TAG KEY CARDINALITY FROM cpu WHERE time > now() - 1h", - exp: `{"results":[{"statement_id":0,"error":"SHOW TAG KEY EXACT CARDINALITY doesn't support time in WHERE clause"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag key exact cardinality`, - command: "SHOW TAG KEY EXACT CARDINALITY", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[2]]},{"name":"disk","columns":["count"],"values":[[2]]},{"name":"gpu","columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag key exact cardinality on db0`, - command: "SHOW TAG KEY EXACT CARDINALITY ON db0", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[2]]},{"name":"disk","columns":["count"],"values":[[2]]},{"name":"gpu","columns":["count"],"values":[[2]]}]}]}`, - }, - { - name: "show tag key exact cardinality from", - command: "SHOW TAG KEY EXACT CARDINALITY FROM cpu", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "show tag key exact cardinality from regex", - command: "SHOW TAG KEY EXACT CARDINALITY FROM /[cg]pu/", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[2]]},{"name":"gpu","columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "show tag key exact cardinality measurement not found", - command: "SHOW TAG KEY EXACT CARDINALITY FROM doesntexist", - exp: `{"results":[{"statement_id":0}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "show tag key exact cardinality with time in WHERE clause errors", - command: "SHOW TAG KEY EXACT CARDINALITY FROM cpu WHERE time > now() - 1h", - exp: `{"results":[{"statement_id":0,"error":"SHOW TAG KEY EXACT CARDINALITY doesn't support time in WHERE clause"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values cardinality with key and where matches the regular expression`, - command: `SHOW TAG VALUES CARDINALITY WITH KEY = host WHERE region =~ /ca.*/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"disk","columns":["count"],"values":[[1]]},{"name":"gpu","columns":["count"],"values":[[1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values cardinality with key and where does not match the regular expression`, - command: `SHOW TAG VALUES CARDINALITY WITH KEY = region WHERE host !~ /server0[12]/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"disk","columns":["count"],"values":[[1]]},{"name":"gpu","columns":["count"],"values":[[1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values cardinality with key and where partially matches the regular expression`, - command: `SHOW TAG VALUES CARDINALITY WITH KEY = host WHERE region =~ /us/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[2]]},{"name":"gpu","columns":["count"],"values":[[1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values cardinality with key and where partially does not match the regular expression`, - command: `SHOW TAG VALUES CARDINALITY WITH KEY = host WHERE region !~ /us/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"disk","columns":["count"],"values":[[1]]},{"name":"gpu","columns":["count"],"values":[[1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values cardinality with key in and where does not match the regular expression`, - command: `SHOW TAG VALUES CARDINALITY FROM cpu WITH KEY IN (host, region) WHERE region = 'uswest'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values cardinality with key regex and where does not match the regular expression`, - command: `SHOW TAG VALUES CARDINALITY FROM cpu WITH KEY =~ /(host|region)/ WHERE region = 'uswest'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values cardinality with key and measurement matches regular expression`, - command: `SHOW TAG VALUES CARDINALITY FROM /[cg]pu/ WITH KEY = host`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[2]]},{"name":"gpu","columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values exact cardinality with key and where matches the regular expression`, - command: `SHOW TAG VALUES EXACT CARDINALITY WITH KEY = host WHERE region =~ /ca.*/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"disk","columns":["count"],"values":[[1]]},{"name":"gpu","columns":["count"],"values":[[1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values exact cardinality with key and where does not match the regular expression`, - command: `SHOW TAG VALUES EXACT CARDINALITY WITH KEY = region WHERE host !~ /server0[12]/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"disk","columns":["count"],"values":[[1]]},{"name":"gpu","columns":["count"],"values":[[1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values exact cardinality with key and where partially matches the regular expression`, - command: `SHOW TAG VALUES EXACT CARDINALITY WITH KEY = host WHERE region =~ /us/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[2]]},{"name":"gpu","columns":["count"],"values":[[1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values exact cardinality with key and where partially does not match the regular expression`, - command: `SHOW TAG VALUES EXACT CARDINALITY WITH KEY = host WHERE region !~ /us/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"disk","columns":["count"],"values":[[1]]},{"name":"gpu","columns":["count"],"values":[[1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values exact cardinality with key in and where does not match the regular expression`, - command: `SHOW TAG VALUES EXACT CARDINALITY FROM cpu WITH KEY IN (host, region) WHERE region = 'uswest'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values exact cardinality with key regex and where does not match the regular expression`, - command: `SHOW TAG VALUES EXACT CARDINALITY FROM cpu WITH KEY =~ /(host|region)/ WHERE region = 'uswest'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values exact cardinality with key and measurement matches regular expression`, - command: `SHOW TAG VALUES EXACT CARDINALITY FROM /[cg]pu/ WITH KEY = host`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[2]]},{"name":"gpu","columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Aggregates_FloatOverlap(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join([]string{ - fmt.Sprintf(`floatoverlap,region=us-east value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`floatoverlap,region=us-east value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`floatoverlap,region=us-west value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`floatoverlap,region=us-east otherVal=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - }, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "aggregation with no interval - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT count(value) FROM floatoverlap WHERE time = '2000-01-01 00:00:00'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatoverlap","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, - }, - { - name: "sum - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM floatoverlap WHERE time >= '2000-01-01 00:00:05' AND time <= '2000-01-01T00:00:10Z' GROUP BY time(10s), region`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",null],["2000-01-01T00:00:10Z",30]]}]}]}`, - }, - { - name: "aggregation with a null field value - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM floatoverlap GROUP BY region`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`, - }, - { - name: "multiple aggregations - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value), MEAN(value) FROM floatoverlap GROUP BY region`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",50,25]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",100,100]]}]}]}`, - }, - { - name: "multiple aggregations with division - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value) / mean(value) as div FROM floatoverlap GROUP BY region`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",2]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_ShowFieldKeyCardinality(t *testing.T) { - t.Skip(NotSupported) - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu,host=server01 field1=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server01,region=useast field4=200,field5=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server03,region=caeast field6=200,field7=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`disk,host=server03,region=caeast field8=200,field9=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: `show field key cardinality`, - command: `SHOW FIELD KEY CARDINALITY`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[3]]},{"name":"disk","columns":["count"],"values":[[2]]},{"name":"gpu","columns":["count"],"values":[[4]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show field key cardinality from measurement`, - command: `SHOW FIELD KEY CARDINALITY FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[3]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show field key cardinality measurement with regex`, - command: `SHOW FIELD KEY CARDINALITY FROM /[cg]pu/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[3]]},{"name":"gpu","columns":["count"],"values":[[4]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show field key exact cardinality`, - command: `SHOW FIELD KEY EXACT CARDINALITY`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[3]]},{"name":"disk","columns":["count"],"values":[[2]]},{"name":"gpu","columns":["count"],"values":[[4]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show field key exact cardinality from measurement`, - command: `SHOW FIELD KEY EXACT CARDINALITY FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[3]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show field key exact cardinality measurement with regex`, - command: `SHOW FIELD KEY EXACT CARDINALITY FROM /[cg]pu/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[3]]},{"name":"gpu","columns":["count"],"values":[[4]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_ShowFieldKeys(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu,host=server01 field1=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server01,region=useast field4=200,field5=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server03,region=caeast field6=200,field7=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`disk,host=server03,region=caeast field8=200,field9=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: `show field keys`, - command: `SHOW FIELD KEYS`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["fieldKey","fieldType"],"values":[["field1","float"],["field2","float"],["field3","float"]]},{"name":"disk","columns":["fieldKey","fieldType"],"values":[["field8","float"],["field9","float"]]},{"name":"gpu","columns":["fieldKey","fieldType"],"values":[["field4","float"],["field5","float"],["field6","float"],["field7","float"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show field keys from measurement`, - command: `SHOW FIELD KEYS FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["fieldKey","fieldType"],"values":[["field1","float"],["field2","float"],["field3","float"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show field keys measurement with regex`, - command: `SHOW FIELD KEYS FROM /[cg]pu/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["fieldKey","fieldType"],"values":[["field1","float"],["field2","float"],["field3","float"]]},{"name":"gpu","columns":["fieldKey","fieldType"],"values":[["field4","float"],["field5","float"],["field6","float"],["field7","float"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_FieldWithMultiplePeriodsMeasurementPrefixMatch(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`foo foo.bar.baz=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "baseline", - params: url.Values{"db": []string{"db0"}}, - command: `select * from foo`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"foo","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, - }, - { - name: "select field with periods", - params: url.Values{"db": []string{"db0"}}, - command: `select "foo.bar.baz" from foo`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"foo","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_FieldWithMultiplePeriods(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu foo.bar.baz=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "baseline", - params: url.Values{"db": []string{"db0"}}, - command: `select * from cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, - }, - { - name: "select field with periods", - params: url.Values{"db": []string{"db0"}}, - command: `select "foo.bar.baz" from cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","foo.bar.baz"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Aggregates_GroupByOffset(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join([]string{ - fmt.Sprintf(`offset,region=us-east,host=serverA value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`offset,region=us-east,host=serverB value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`offset,region=us-west,host=serverC value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - }, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "group by offset - standard", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value) FROM "offset" WHERE time >= '1999-12-31T23:59:55Z' AND time < '2000-01-01T00:00:15Z' GROUP BY time(10s, 5s) FILL(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"offset","columns":["time","sum"],"values":[["1999-12-31T23:59:55Z",120],["2000-01-01T00:00:05Z",30]]}]}]}`, - }, - { - name: "group by offset - misaligned time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value) FROM "offset" WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:20Z' GROUP BY time(10s, 5s) FILL(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"offset","columns":["time","sum"],"values":[["1999-12-31T23:59:55Z",120],["2000-01-01T00:00:05Z",30],["2000-01-01T00:00:15Z",0]]}]}]}`, - }, - { - name: "group by offset - negative time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value) FROM "offset" WHERE time >= '1999-12-31T23:59:55Z' AND time < '2000-01-01T00:00:15Z' GROUP BY time(10s, -5s) FILL(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"offset","columns":["time","sum"],"values":[["1999-12-31T23:59:55Z",120],["2000-01-01T00:00:05Z",30]]}]}]}`, - }, - { - name: "group by offset - modulo", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value) FROM "offset" WHERE time >= '1999-12-31T23:59:55Z' AND time < '2000-01-01T00:00:15Z' GROUP BY time(10s, 35s) FILL(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"offset","columns":["time","sum"],"values":[["1999-12-31T23:59:55Z",120],["2000-01-01T00:00:05Z",30]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// This will test that when using a group by, that it observes the time you asked for -// but will only put the values in the bucket that match the time range -func TestServer_Query_GroupByTimeCutoffs(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu value=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu value=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), - fmt.Sprintf(`cpu value=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:05Z").UnixNano()), - fmt.Sprintf(`cpu value=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:08Z").UnixNano()), - fmt.Sprintf(`cpu value=5i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:09Z").UnixNano()), - fmt.Sprintf(`cpu value=6i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - } - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "sum all time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",21]]}]}]}`, - }, - { - name: "sum all time grouped by time 5s", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:05Z",12],["2000-01-01T00:00:10Z",6]]}]}]}`, - }, - { - name: "sum all time grouped by time 5s missing first point", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:01Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",2],["2000-01-01T00:00:05Z",12],["2000-01-01T00:00:10Z",6]]}]}]}`, - }, - { - name: "sum all time grouped by time 5s missing first points (null for bucket)", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:02Z' and time <= '2000-01-01T00:00:10Z' group by time(5s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",null],["2000-01-01T00:00:05Z",12],["2000-01-01T00:00:10Z",6]]}]}]}`, - }, - { - name: "sum all time grouped by time 5s missing last point - 2 time intervals", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:09Z' group by time(5s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:05Z",12]]}]}]}`, - }, - { - name: "sum all time grouped by time 5s missing last 2 points - 2 time intervals", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T00:00:08Z' group by time(5s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",3],["2000-01-01T00:00:05Z",7]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_MapType(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`gpu speed=25 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - } - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "query value with a single measurement", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT value FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, - }, - { - name: "query wildcard with a single measurement", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, - }, - { - name: "query value with multiple measurements", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT value FROM cpu, gpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, - }, - { - name: "query wildcard with multiple measurements", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM cpu, gpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","speed","value"],"values":[["2000-01-01T00:00:00Z",null,2]]},{"name":"gpu","columns":["time","speed","value"],"values":[["2000-01-01T00:00:00Z",25,null]]}]}]}`, - }, - { - name: "query value with a regex measurement", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT value FROM /[cg]pu/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, - }, - { - name: "query wildcard with a regex measurement", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM /[cg]pu/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","speed","value"],"values":[["2000-01-01T00:00:00Z",null,2]]},{"name":"gpu","columns":["time","speed","value"],"values":[["2000-01-01T00:00:00Z",25,null]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Subqueries(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu,host=server01 usage_user=70i,usage_system=30i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01 usage_user=45i,usage_system=55i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01 usage_user=23i,usage_system=77i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02 usage_user=11i,usage_system=89i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02 usage_user=28i,usage_system=72i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02 usage_user=12i,usage_system=53i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - } - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT mean FROM (SELECT mean(usage_user) FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",31.5]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT value FROM (SELECT mean(usage_user) AS value FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00Z",31.5]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT mean(usage) FROM (SELECT 100 - usage_user AS usage FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",68.5]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT host FROM (SELECT min(usage_user), host FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","host"],"values":[["2000-01-01T00:00:00Z","server02"]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT host FROM (SELECT min(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","host"],"values":[["2000-01-01T00:00:00Z","server02"],["2000-01-01T00:00:20Z","server01"]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT host FROM (SELECT min(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' GROUP BY host`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","host"],"values":[["2000-01-01T00:00:20Z","server01"]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","host"],"values":[["2000-01-01T00:00:00Z","server02"]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT mean(min) FROM (SELECT min(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",17]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT mean(min) FROM (SELECT (min(usage_user)) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",17]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT max(min), host FROM (SELECT min(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","max","host"],"values":[["2000-01-01T00:00:20Z",23,"server01"]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT mean, host FROM (SELECT mean(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean","host"],"values":[["2000-01-01T00:00:00Z",46,"server01"],["2000-01-01T00:00:00Z",17,"server02"]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT host FROM (SELECT mean(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","host"],"values":[["2000-01-01T00:00:00Z","server01"],["2000-01-01T00:00:00Z","server02"]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT max(usage_system) FROM (SELECT min(usage_user), usage_system FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",89]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT min(top), host FROM (SELECT top(usage_user, host, 2) FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","min","host"],"values":[["2000-01-01T00:00:10Z",28,"server02"]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT min(top), host FROM (SELECT top(usage_user, 2), host FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","min","host"],"values":[["2000-01-01T00:00:10Z",45,"server01"]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT count(host) FROM (SELECT top(usage_user, host, 2) FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(derivative) FROM (SELECT derivative(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",-4.6]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT min(max) FROM (SELECT 100 - max(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",30]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT min(usage_system) FROM (SELECT max(usage_user), 100 - usage_system FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","min"],"values":[["2000-01-01T00:00:10Z",28]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT min(value) FROM (SELECT max(usage_user), usage_user - usage_system AS value FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","min"],"values":[["2000-01-01T00:00:10Z",-44]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT min(value) FROM (SELECT top(usage_user, 2), usage_user - usage_system AS value FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' GROUP BY host`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","min"],"values":[["2000-01-01T00:00:10Z",-10]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","min"],"values":[["2000-01-01T00:00:10Z",-44]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT min(value) FROM (SELECT max(usage_user), usage_user - usage_system AS value FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' AND host = 'server01'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","min"],"values":[["2000-01-01T00:00:00Z",40]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT value FROM (SELECT max(usage_user), usage_user - usage_system AS value FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' AND value > 0`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00Z",40]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT max FROM (SELECT max(usage_user) FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' AND host = 'server01'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","max"],"values":[["2000-01-01T00:00:00Z",70]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT mean(value) FROM (SELECT max(usage_user), usage_user - usage_system AS value FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' AND value > 0`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",40]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT mean(value) FROM (SELECT max(usage_user), usage_user - usage_system AS value FROM cpu GROUP BY host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z' AND host =~ /server/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",-2]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT top(usage_system, host, 2) FROM (SELECT min(usage_user), usage_system FROM cpu GROUP BY time(20s), host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","top","host"],"values":[["2000-01-01T00:00:00Z",89,"server02"],["2000-01-01T00:00:20Z",77,"server01"]]}]}]}`, - }, - { - params: url.Values{"db": []string{"db0"}}, - command: `SELECT bottom(usage_system, host, 2) FROM (SELECT max(usage_user), usage_system FROM cpu GROUP BY time(20s), host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","bottom","host"],"values":[["2000-01-01T00:00:00Z",30,"server01"],["2000-01-01T00:00:20Z",53,"server02"]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_SubqueryWithGroupBy(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu,host=server01,region=uswest value=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest value=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest value=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest value=4i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=uswest value=5i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=uswest value=6i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=uswest value=7i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=uswest value=8i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast value=9i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast value=10i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast value=11i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast value=12i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast value=13i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast value=14i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast value=15i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast value=16i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - } - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "group by time(2s) - time(2s), host", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT mean(mean) FROM (SELECT mean(value) FROM cpu GROUP BY time(2s), host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:04Z' GROUP BY time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",7.5],["2000-01-01T00:00:02Z",9.5]]}]}]}`, - }, - { - name: "group by time(4s), host - time(2s), host", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT mean(mean) FROM (SELECT mean(value) FROM cpu GROUP BY time(2s), host) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:04Z' GROUP BY time(4s), host`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",6.5]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",10.5]]}]}]}`, - }, - { - name: "group by time(2s), host - time(2s), host, region", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT mean(mean) FROM (SELECT mean(value) FROM cpu GROUP BY time(2s), host, region) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:04Z' GROUP BY time(2s), host`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",5.5],["2000-01-01T00:00:02Z",7.5]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",9.5],["2000-01-01T00:00:02Z",11.5]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_SubqueryMath(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf("m0 f2=4,f3=2 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf("m0 f1=5,f3=8 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf("m0 f1=5,f2=3,f3=6 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - } - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "SumThreeValues", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum FROM (SELECT f1 + f2 + f3 AS sum FROM m0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"m0","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",null],["2000-01-01T00:00:10Z",null],["2000-01-01T00:00:20Z",14]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_PercentileDerivative(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`counter value=12 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`counter value=34 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`counter value=78 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`counter value=89 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), - fmt.Sprintf(`counter value=101 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), - } - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "nth percentile of derivative", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT percentile(derivative, 95) FROM (SELECT derivative(value, 1s) FROM counter) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:50Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"counter","columns":["time","percentile"],"values":[["2000-01-01T00:00:20Z",4.4]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_UnderscoreMeasurement(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`_cpu value=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - } - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "select underscore with underscore prefix", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM _cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"_cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Wildcards(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`wildcard,region=us-east value=10 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`wildcard,region=us-east valx=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`wildcard,region=us-east value=30,valx=40 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - - fmt.Sprintf(`wgroup,region=us-east value=10.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`wgroup,region=us-east value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`wgroup,region=us-west value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - - fmt.Sprintf(`m1,region=us-east value=10.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`m2,host=server01 field=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "wildcard", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM wildcard`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"wildcard","columns":["time","region","value","valx"],"values":[["2000-01-01T00:00:00Z","us-east",10,null],["2000-01-01T00:00:10Z","us-east",null,20],["2000-01-01T00:00:20Z","us-east",30,40]]}]}]}`, - }, - { - name: "wildcard with group by", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM wildcard GROUP BY *`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"wildcard","tags":{"region":"us-east"},"columns":["time","value","valx"],"values":[["2000-01-01T00:00:00Z",10,null],["2000-01-01T00:00:10Z",null,20],["2000-01-01T00:00:20Z",30,40]]}]}]}`, - }, - { - name: "GROUP BY queries", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT mean(value) FROM wgroup GROUP BY *`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"wgroup","tags":{"region":"us-east"},"columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",15]]},{"name":"wgroup","tags":{"region":"us-west"},"columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",30]]}]}]}`, - }, - { - name: "GROUP BY queries with time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT mean(value) FROM wgroup WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:01:00Z' GROUP BY *,TIME(1m)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"wgroup","tags":{"region":"us-east"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",15]]},{"name":"wgroup","tags":{"region":"us-west"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",30]]}]}]}`, - }, - { - name: "wildcard and field in select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT value, * FROM wildcard`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"wildcard","columns":["time","value","region","value_1","valx"],"values":[["2000-01-01T00:00:00Z",10,"us-east",10,null],["2000-01-01T00:00:10Z",null,"us-east",null,20],["2000-01-01T00:00:20Z",30,"us-east",30,40]]}]}]}`, - }, - { - name: "field and wildcard in select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT value, * FROM wildcard`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"wildcard","columns":["time","value","region","value_1","valx"],"values":[["2000-01-01T00:00:00Z",10,"us-east",10,null],["2000-01-01T00:00:10Z",null,"us-east",null,20],["2000-01-01T00:00:20Z",30,"us-east",30,40]]}]}]}`, - }, - { - name: "field and wildcard in group by", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM wildcard GROUP BY region, *`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"wildcard","tags":{"region":"us-east"},"columns":["time","value","valx"],"values":[["2000-01-01T00:00:00Z",10,null],["2000-01-01T00:00:10Z",null,20],["2000-01-01T00:00:20Z",30,40]]}]}]}`, - }, - { - name: "wildcard and field in group by", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM wildcard GROUP BY *, region`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"wildcard","tags":{"region":"us-east"},"columns":["time","value","valx"],"values":[["2000-01-01T00:00:00Z",10,null],["2000-01-01T00:00:10Z",null,20],["2000-01-01T00:00:20Z",30,40]]}]}]}`, - }, - { - name: "wildcard with multiple measurements", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM m1, m2`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"m1","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:00Z",null,null,"us-east",10]]},{"name":"m2","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:01Z",20,"server01",null,null]]}]}]}`, - }, - { - name: "wildcard with multiple measurements via regex", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM /^m.*/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"m1","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:00Z",null,null,"us-east",10]]},{"name":"m2","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:01Z",20,"server01",null,null]]}]}]}`, - }, - { - name: "wildcard with multiple measurements via regex and limit", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM db0../^m.*/ LIMIT 2`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"m1","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:00Z",null,null,"us-east",10]]},{"name":"m2","columns":["time","field","host","region","value"],"values":[["2000-01-01T00:00:01Z",20,"server01",null,null]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_WildcardExpansion(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`wildcard,region=us-east,host=A value=10,cpu=80 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`wildcard,region=us-east,host=B value=20,cpu=90 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`wildcard,region=us-west,host=B value=30,cpu=70 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`wildcard,region=us-east,host=A value=40,cpu=60 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), - - fmt.Sprintf(`dupnames,region=us-east,day=1 value=10,day=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`dupnames,region=us-east,day=2 value=20,day=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`dupnames,region=us-west,day=3 value=30,day=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "wildcard", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM wildcard`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"wildcard","columns":["time","cpu","host","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`, - }, - { - name: "no wildcard in select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT cpu, host, region, value FROM wildcard`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"wildcard","columns":["time","cpu","host","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`, - }, - { - name: "no wildcard in select, preserve column order", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT host, cpu, region, value FROM wildcard`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"wildcard","columns":["time","host","cpu","region","value"],"values":[["2000-01-01T00:00:00Z","A",80,"us-east",10],["2000-01-01T00:00:10Z","B",90,"us-east",20],["2000-01-01T00:00:20Z","B",70,"us-west",30],["2000-01-01T00:00:30Z","A",60,"us-east",40]]}]}]}`, - }, - - { - name: "no wildcard with alias", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT cpu as c, host as h, region, value FROM wildcard`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"wildcard","columns":["time","c","h","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`, - }, - { - name: "duplicate tag and field key", - command: `SELECT * FROM dupnames`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"statement_id":0,"series":[{"name":"dupnames","columns":["time","day","day_1","region","value"],"values":[["2000-01-01T00:00:00Z",3,"1","us-east",10],["2000-01-01T00:00:10Z",2,"2","us-east",20],["2000-01-01T00:00:20Z",1,"3","us-west",30]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_ShowQueries_Future(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu,host=server01 value=100 %d`, models.MaxNanoTime), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: `show measurements`, - command: "SHOW MEASUREMENTS", - exp: `{"results":[{"statement_id":0,"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series`, - skip: NotSupported, - command: "SHOW SERIES", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["cpu,host=server01"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag keys`, - command: "SHOW TAG KEYS FROM cpu", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag values`, - command: "SHOW TAG VALUES WITH KEY = \"host\"", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["key","value"],"values":[["host","server01"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show field keys`, - command: "SHOW FIELD KEYS", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["fieldKey","fieldType"],"values":[["value","float"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_ShowSeries(t *testing.T) { - t.Skip(NotSupported) - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:01Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2020-11-10T23:00:04Z").UnixNano()), - fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2020-11-10T23:00:05Z").UnixNano()), - fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:06Z").UnixNano()), - fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:07Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: `show series`, - command: "SHOW SERIES", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["cpu,host=server01"],["cpu,host=server01,region=useast"],["cpu,host=server01,region=uswest"],["cpu,host=server02,region=useast"],["disk,host=server03,region=caeast"],["gpu,host=server02,region=useast"],["gpu,host=server03,region=caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series from measurement`, - command: "SHOW SERIES FROM cpu", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["cpu,host=server01"],["cpu,host=server01,region=useast"],["cpu,host=server01,region=uswest"],["cpu,host=server02,region=useast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series from regular expression`, - command: "SHOW SERIES FROM /[cg]pu/", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["cpu,host=server01"],["cpu,host=server01,region=useast"],["cpu,host=server01,region=uswest"],["cpu,host=server02,region=useast"],["gpu,host=server02,region=useast"],["gpu,host=server03,region=caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series with where tag`, - command: "SHOW SERIES WHERE region = 'uswest'", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["cpu,host=server01,region=uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series where tag matches regular expression`, - command: "SHOW SERIES WHERE region =~ /ca.*/", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["disk,host=server03,region=caeast"],["gpu,host=server03,region=caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series`, - command: "SHOW SERIES WHERE host !~ /server0[12]/", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["disk,host=server03,region=caeast"],["gpu,host=server03,region=caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series with from and where`, - command: "SHOW SERIES FROM cpu WHERE region = 'useast'", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["cpu,host=server01,region=useast"],["cpu,host=server02,region=useast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series with time`, - command: "SHOW SERIES WHERE time > 0", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["cpu,host=server01"],["cpu,host=server01,region=useast"],["cpu,host=server01,region=uswest"],["cpu,host=server02,region=useast"],["disk,host=server03,region=caeast"],["gpu,host=server02,region=useast"],["gpu,host=server03,region=caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series from measurement with time`, - command: "SHOW SERIES FROM cpu WHERE time > 0", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["cpu,host=server01"],["cpu,host=server01,region=useast"],["cpu,host=server01,region=uswest"],["cpu,host=server02,region=useast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series from regular expression with time`, - command: "SHOW SERIES FROM /[cg]pu/ WHERE time > 0", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["cpu,host=server01"],["cpu,host=server01,region=useast"],["cpu,host=server01,region=uswest"],["cpu,host=server02,region=useast"],["gpu,host=server02,region=useast"],["gpu,host=server03,region=caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series with where tag with time`, - command: "SHOW SERIES WHERE region = 'uswest' AND time > 0", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["cpu,host=server01,region=uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series where tag matches regular expression with time`, - command: "SHOW SERIES WHERE region =~ /ca.*/ AND time > 0", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["disk,host=server03,region=caeast"],["gpu,host=server03,region=caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series with != regex and time`, - command: "SHOW SERIES WHERE host !~ /server0[12]/ AND time > 0", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["disk,host=server03,region=caeast"],["gpu,host=server03,region=caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series with from and where with time`, - command: "SHOW SERIES FROM cpu WHERE region = 'useast' AND time > 0", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["key"],"values":[["cpu,host=server01,region=useast"],["cpu,host=server02,region=useast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_ShowSeriesCardinalityEstimation(t *testing.T) { - t.Skip(NotSupported) - // if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" { - // t.Skip("Skipping test in short, race and appveyor mode.") - // } - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = make(Writes, 0, 10) - // Add 1,000,000 series. - for j := 0; j < cap(test.writes); j++ { - writes := make([]string, 0, 50000) - for i := 0; i < cap(writes); i++ { - writes = append(writes, fmt.Sprintf(`cpu,l=%d,h=s%d v=1 %d`, j, i, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:01Z").UnixNano())) - } - test.writes = append(test.writes, &Write{data: strings.Join(writes, "\n")}) - } - - // These queries use index sketches to estimate cardinality. - test.addQueries([]*Query{ - { - name: `show series cardinality`, - command: "SHOW SERIES CARDINALITY", - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series cardinality on db0`, - command: "SHOW SERIES CARDINALITY ON db0", - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_ShowSeriesExactCardinality(t *testing.T) { - t.Skip(NotSupported) - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:01Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:04Z").UnixNano()), - fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:05Z").UnixNano()), - fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:06Z").UnixNano()), - fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:07Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: `show series cardinality from measurement`, - command: "SHOW SERIES CARDINALITY FROM cpu", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[4]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series cardinality from regular expression`, - command: "SHOW SERIES CARDINALITY FROM /[cg]pu/", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[4]]},{"name":"gpu","columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series cardinality with where tag`, - command: "SHOW SERIES CARDINALITY WHERE region = 'uswest'", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series cardinality where tag matches regular expression`, - command: "SHOW SERIES CARDINALITY WHERE region =~ /ca.*/", - exp: `{"results":[{"statement_id":0,"series":[{"name":"disk","columns":["count"],"values":[[1]]},{"name":"gpu","columns":["count"],"values":[[1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series cardinality`, - command: "SHOW SERIES CARDINALITY WHERE host !~ /server0[12]/", - exp: `{"results":[{"statement_id":0,"series":[{"name":"disk","columns":["count"],"values":[[1]]},{"name":"gpu","columns":["count"],"values":[[1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series cardinality with from and where`, - command: "SHOW SERIES CARDINALITY FROM cpu WHERE region = 'useast'", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series cardinality with WHERE time should fail`, - command: "SHOW SERIES CARDINALITY WHERE time > now() - 1h", - exp: `{"results":[{"statement_id":0,"error":"SHOW SERIES EXACT CARDINALITY doesn't support time in WHERE clause"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series exact cardinality`, - command: "SHOW SERIES EXACT CARDINALITY", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[4]]},{"name":"disk","columns":["count"],"values":[[1]]},{"name":"gpu","columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series exact cardinality from measurement`, - command: "SHOW SERIES EXACT CARDINALITY FROM cpu", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[4]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series exact cardinality from regular expression`, - command: "SHOW SERIES EXACT CARDINALITY FROM /[cg]pu/", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[4]]},{"name":"gpu","columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series exact cardinality with where tag`, - command: "SHOW SERIES EXACT CARDINALITY WHERE region = 'uswest'", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series exact cardinality where tag matches regular expression`, - command: "SHOW SERIES EXACT CARDINALITY WHERE region =~ /ca.*/", - exp: `{"results":[{"statement_id":0,"series":[{"name":"disk","columns":["count"],"values":[[1]]},{"name":"gpu","columns":["count"],"values":[[1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series exact cardinality`, - command: "SHOW SERIES EXACT CARDINALITY WHERE host !~ /server0[12]/", - exp: `{"results":[{"statement_id":0,"series":[{"name":"disk","columns":["count"],"values":[[1]]},{"name":"gpu","columns":["count"],"values":[[1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series exact cardinality with from and where`, - command: "SHOW SERIES EXACT CARDINALITY FROM cpu WHERE region = 'useast'", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show series exact cardinality with WHERE time should fail`, - command: "SHOW SERIES EXACT CARDINALITY WHERE time > now() - 1h", - exp: `{"results":[{"statement_id":0,"error":"SHOW SERIES EXACT CARDINALITY doesn't support time in WHERE clause"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_ShowMeasurements(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server02,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`other,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - } - - ctx := context.Background() - - client := s.MustNewAdminClient() - bucket2 := influxdb.Bucket{ - OrgID: s.DefaultOrgID, - Name: "b2", - } - bucket3 := influxdb.Bucket{ - OrgID: s.DefaultOrgID, - Name: "b3", - } - bucket4 := influxdb.Bucket{ - OrgID: s.DefaultOrgID, - Name: "b4", - } - require.NoError(t, client.CreateBucket(ctx, &bucket2)) - require.NoError(t, client.CreateBucket(ctx, &bucket3)) - require.NoError(t, client.CreateBucket(ctx, &bucket4)) - - require.NoError(t, client.DBRPMappingService.Create(ctx, &influxdb.DBRPMapping{ - Database: "databaseEmpty", - RetentionPolicy: "rp0", - Default: false, - OrganizationID: s.DefaultOrgID, - BucketID: bucket4.ID, - })) - - require.NoError(t, client.DBRPMappingService.Create(ctx, &influxdb.DBRPMapping{ - Database: "db0", - RetentionPolicy: "rp1", - Default: false, - OrganizationID: s.DefaultOrgID, - BucketID: bucket2.ID, - })) - - require.NoError(t, client.DBRPMappingService.Create(ctx, &influxdb.DBRPMapping{ - Database: "db1", - RetentionPolicy: "rp0", - Default: false, - OrganizationID: s.DefaultOrgID, - BucketID: bucket3.ID, - })) - - rp1Writes := []string{ - fmt.Sprintf(`other2,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - } - - db1Writes := []string{ - fmt.Sprintf(`cpu,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - &Write{bucketID: bucket2.ID, data: strings.Join(rp1Writes, "\n")}, - &Write{bucketID: bucket3.ID, data: strings.Join(db1Writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: `show measurements`, - command: "SHOW MEASUREMENTS", - // *unlike* 1.x, InfluxDB 2 shows measurements from the default retention policy when the rp is not specified, not all retention policies - exp: `{"results":[{"statement_id":0,"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["gpu"],["other"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurements with rp parameter`, - command: "SHOW MEASUREMENTS", - // we ignore the rp parameter for show measurements - exp: `{"results":[{"statement_id":0,"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["gpu"],["other"]]}]}]}`, - params: url.Values{"db": []string{"db0"}, "rp": []string{"rp1"}}, - }, - { - name: `show measurements with on`, - command: "SHOW MEASUREMENTS on db0", - exp: `{"results":[{"statement_id":0,"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["gpu"],["other"]]}]}]}`, - params: url.Values{"db": []string{"db0"}, "rp": []string{"rp0"}}, - }, - { - name: `show measurements with limit 2`, - command: "SHOW MEASUREMENTS LIMIT 2", - exp: `{"results":[{"statement_id":0,"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["gpu"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurements using WITH`, - command: "SHOW MEASUREMENTS WITH MEASUREMENT = cpu", - exp: `{"results":[{"statement_id":0,"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurements using WITH and regex`, - command: "SHOW MEASUREMENTS WITH MEASUREMENT =~ /[cg]pu/", - exp: `{"results":[{"statement_id":0,"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["gpu"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurements using WITH and regex - no matches`, - command: "SHOW MEASUREMENTS WITH MEASUREMENT =~ /.*zzzzz.*/", - exp: `{"results":[{"statement_id":0}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurements where tag matches regular expression`, - command: "SHOW MEASUREMENTS WHERE region =~ /ca.*/", - exp: `{"results":[{"statement_id":0,"series":[{"name":"measurements","columns":["name"],"values":[["gpu"],["other"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurements where tag does not match a regular expression`, - command: "SHOW MEASUREMENTS WHERE region !~ /ca.*/", - exp: `{"results":[{"statement_id":0,"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurements with limit 2 and time`, - command: "SHOW MEASUREMENTS WHERE time > 0 LIMIT 2", - exp: `{"results":[{"statement_id":0,"error":"SHOW MEASUREMENTS doesn't support time in WHERE clause"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurements using WITH and time`, - command: "SHOW MEASUREMENTS WITH MEASUREMENT = cpu WHERE time > 0", - exp: `{"results":[{"statement_id":0,"error":"SHOW MEASUREMENTS doesn't support time in WHERE clause"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurements using WITH and regex and time`, - command: "SHOW MEASUREMENTS WITH MEASUREMENT =~ /[cg]pu/ WHERE time > 0 ", - exp: `{"results":[{"statement_id":0,"error":"SHOW MEASUREMENTS doesn't support time in WHERE clause"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurements using WITH and regex and time - no matches`, - command: "SHOW MEASUREMENTS WITH MEASUREMENT =~ /.*zzzzz.*/ WHERE time > 0 ", - exp: `{"results":[{"statement_id":0,"error":"SHOW MEASUREMENTS doesn't support time in WHERE clause"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurements and time where tag matches regular expression `, - command: "SHOW MEASUREMENTS WHERE region =~ /ca.*/ AND time > 0", - exp: `{"results":[{"statement_id":0,"error":"SHOW MEASUREMENTS doesn't support time in WHERE clause"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurements and time where tag does not match a regular expression`, - command: "SHOW MEASUREMENTS WHERE region !~ /ca.*/ AND time > 0", - exp: `{"results":[{"statement_id":0,"error":"SHOW MEASUREMENTS doesn't support time in WHERE clause"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurements bad wildcard`, - command: "SHOW MEASUREMENTS on *", - exp: `{"results":[{"statement_id":0,"error":"query 'SHOW MEASUREMENTS ON *' not supported. use 'ON *.*' or specify a database"}]}`, - params: url.Values{"db": []string{"db0"}, "rp": []string{"rp0"}}, - }, - { - name: `show measurements bad rp wildcard`, - command: "SHOW MEASUREMENTS on *.rp0", - exp: `{"results":[{"statement_id":0,"error":"query 'SHOW MEASUREMENTS ON *.rp' not supported. use 'ON *.*' or specify a database"}]}`, - params: url.Values{"db": []string{"db0"}, "rp": []string{"rp0"}}, - }, - { - name: `show measurements on specific rp`, - command: "SHOW MEASUREMENTS on db0.rp0", - exp: `{"results":[{"statement_id":0,"series":[{"name":"measurements","columns":["name","database","retention policy"],"values":[["cpu","db0","rp0"],["gpu","db0","rp0"],["other","db0","rp0"]]}]}]}`, - params: url.Values{"db": []string{"db0"}, "rp": []string{"rp0"}}, - }, - { - name: `show measurements on all rps`, - command: "SHOW MEASUREMENTS on db0.*", - exp: `{"results":[{"statement_id":0,"series":[{"name":"measurements","columns":["name","database","retention policy"],"values":[["cpu","db0","rp0"],["gpu","db0","rp0"],["other","db0","rp0"],["other2","db0","rp1"]]}]}]}`, - params: url.Values{"db": []string{"db0"}, "rp": []string{"rp0"}}, - }, - { - name: `show measurements on all dbs and rps`, - command: "SHOW MEASUREMENTS on *.*", - exp: `{"results":[{"statement_id":0,"series":[{"name":"measurements","columns":["name","database","retention policy"],"values":[["other2","b2","autogen"],["cpu","b3","autogen"],["disk","b3","autogen"],["cpu","db","rp"],["gpu","db","rp"],["other","db","rp"],["cpu","db0","rp0"],["gpu","db0","rp0"],["other","db0","rp0"],["other2","db0","rp1"],["cpu","db1","rp0"],["disk","db1","rp0"]]}]}]}`, - params: url.Values{"db": []string{"db0"}, "rp": []string{"rp0"}}, - }, - }...) - - test.Run(ctx, t, s) -} - -func TestServer_Query_ShowMeasurementCardinalityEstimation(t *testing.T) { - // This test fails to build. The offending portions have been commented out - t.Skip(NotSupported) - // if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" { - // t.Skip("Skipping test in short, race and appveyor mode.") - // } - - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = make(Writes, 0, 10) - for j := 0; j < cap(test.writes); j++ { - writes := make([]string, 0, 10000) - for i := 0; i < cap(writes); i++ { - writes = append(writes, fmt.Sprintf(`cpu-%d-s%d v=1 %d`, j, i, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:01Z").UnixNano())) - } - test.writes = append(test.writes, &Write{data: strings.Join(writes, "\n")}) - } - - // These queries use index sketches to estimate cardinality. - test.addQueries([]*Query{ - { - name: `show measurement cardinality`, - command: "SHOW MEASUREMENT CARDINALITY", - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurement cardinality on db0`, - command: "SHOW MEASUREMENT CARDINALITY ON db0", - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_ShowMeasurementExactCardinality(t *testing.T) { - t.Skip(NotSupported) - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server02,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`other,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: `show measurement cardinality using FROM and regex`, - command: "SHOW MEASUREMENT CARDINALITY FROM /[cg]pu/", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurement cardinality using FROM and regex - no matches`, - command: "SHOW MEASUREMENT CARDINALITY FROM /.*zzzzz.*/", - exp: `{"results":[{"statement_id":0}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurement cardinality where tag matches regular expression`, - command: "SHOW MEASUREMENT CARDINALITY WHERE region =~ /ca.*/", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurement cardinality where tag does not match a regular expression`, - command: "SHOW MEASUREMENT CARDINALITY WHERE region !~ /ca.*/", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurement cardinality with time in WHERE clauses errors`, - command: `SHOW MEASUREMENT CARDINALITY WHERE time > now() - 1h`, - exp: `{"results":[{"statement_id":0,"error":"SHOW MEASUREMENT EXACT CARDINALITY doesn't support time in WHERE clause"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurement exact cardinality`, - command: "SHOW MEASUREMENT EXACT CARDINALITY", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["count"],"values":[[3]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurement exact cardinality using FROM`, - command: "SHOW MEASUREMENT EXACT CARDINALITY FROM cpu", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["count"],"values":[[1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurement exact cardinality using FROM and regex`, - command: "SHOW MEASUREMENT EXACT CARDINALITY FROM /[cg]pu/", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurement exact cardinality using FROM and regex - no matches`, - command: "SHOW MEASUREMENT EXACT CARDINALITY FROM /.*zzzzz.*/", - exp: `{"results":[{"statement_id":0}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurement exact cardinality where tag matches regular expression`, - command: "SHOW MEASUREMENT EXACT CARDINALITY WHERE region =~ /ca.*/", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurement exact cardinality where tag does not match a regular expression`, - command: "SHOW MEASUREMENT EXACT CARDINALITY WHERE region !~ /ca.*/", - exp: `{"results":[{"statement_id":0,"series":[{"columns":["count"],"values":[[2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show measurement exact cardinality with time in WHERE clauses errors`, - command: `SHOW MEASUREMENT EXACT CARDINALITY WHERE time > now() - 1h`, - exp: `{"results":[{"statement_id":0,"error":"SHOW MEASUREMENT EXACT CARDINALITY doesn't support time in WHERE clause"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_ShowTagKeys(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: `show tag keys`, - command: "SHOW TAG KEYS", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"disk","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"gpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag keys on db0`, - command: "SHOW TAG KEYS ON db0", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"disk","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"gpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, - }, - { - name: "show tag keys from", - command: "SHOW TAG KEYS FROM cpu", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "show tag keys from regex", - command: "SHOW TAG KEYS FROM /[cg]pu/", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"gpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "show tag keys measurement not found", - command: "SHOW TAG KEYS FROM doesntexist", - exp: `{"results":[{"statement_id":0}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag keys with time`, - command: "SHOW TAG KEYS WHERE time > 0", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"disk","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"gpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: `show tag keys on db0 with time`, - command: "SHOW TAG KEYS ON db0 WHERE time > 0", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"disk","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"gpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, - }, - { - name: "show tag keys with time from", - command: "SHOW TAG KEYS FROM cpu WHERE time > 0", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "show tag keys with time from regex", - command: "SHOW TAG KEYS FROM /[cg]pu/ WHERE time > 0", - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"gpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "show tag keys with time where", - command: "SHOW TAG KEYS WHERE host = 'server03' AND time > 0", - exp: `{"results":[{"statement_id":0,"series":[{"name":"disk","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"gpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "show tag keys with time measurement not found", - command: "SHOW TAG KEYS FROM doesntexist WHERE time > 0", - exp: `{"results":[{"statement_id":0}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_LargeTimestamp(t *testing.T) { - // This test fails to build. The offending portions have been commented out. - t.Skip(NeedsReview) - s := OpenServer(t) - defer s.Close() - - // if _, ok := s.(*RemoteServer); ok { - // t.Skip("Skipping. Cannot restart remote server") - // } - // - writes := []string{ - fmt.Sprintf(`cpu value=100 %d`, models.MaxNanoTime), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: `select value at max nano time`, - params: url.Values{"db": []string{"db0"}}, - command: fmt.Sprintf(`SELECT value FROM cpu WHERE time <= %d`, models.MaxNanoTime), - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["` + time.Unix(0, models.MaxNanoTime).UTC().Format(time.RFC3339Nano) + `",100]]}]}]}`, - }, - }...) - - // if err := test.init(s); err != nil { - // t.Fatalf("test init failed: %s", err) - // } - - // Open a new server with the same configuration file. - // This is to ensure the meta data was marshaled correctly. - // s2 := OpenServer((s.(*LocalServer)).Config) - // defer s2.(*LocalServer).Server.Close() - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_DotProduct(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu a=2,b=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu a=-5,b=8 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`cpu a=9,b=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "select dot product", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(a_b) FROM (SELECT a * b FROM cpu) WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:00:30Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",-7]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure time in where clause is inclusive -func TestServer_WhereTimeInclusive(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), - fmt.Sprintf(`cpu value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), - fmt.Sprintf(`cpu value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "all GTE/LTE", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * from cpu where time >= '2000-01-01T00:00:01Z' and time <= '2000-01-01T00:00:03Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:03Z",3]]}]}]}`, - }, - { - name: "all GTE", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * from cpu where time >= '2000-01-01T00:00:01Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:03Z",3]]}]}]}`, - }, - { - name: "all LTE", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * from cpu where time <= '2000-01-01T00:00:03Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:03Z",3]]}]}]}`, - }, - { - name: "first GTE/LTE", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * from cpu where time >= '2000-01-01T00:00:01Z' and time <= '2000-01-01T00:00:01Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1]]}]}]}`, - }, - { - name: "last GTE/LTE", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * from cpu where time >= '2000-01-01T00:00:03Z' and time <= '2000-01-01T00:00:03Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:03Z",3]]}]}]}`, - }, - { - name: "before GTE/LTE", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * from cpu where time <= '2000-01-01T00:00:00Z'`, - exp: `{"results":[{"statement_id":0}]}`, - }, - { - name: "all GT/LT", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * from cpu where time > '2000-01-01T00:00:00Z' and time < '2000-01-01T00:00:04Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:03Z",3]]}]}]}`, - }, - { - name: "first GT/LT", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * from cpu where time > '2000-01-01T00:00:00Z' and time < '2000-01-01T00:00:02Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1]]}]}]}`, - }, - { - name: "last GT/LT", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * from cpu where time > '2000-01-01T00:00:02Z' and time < '2000-01-01T00:00:04Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:03Z",3]]}]}]}`, - }, - { - name: "all GT", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * from cpu where time > '2000-01-01T00:00:00Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:03Z",3]]}]}]}`, - }, - { - name: "all LT", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * from cpu where time < '2000-01-01T00:00:04Z'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:01Z",1],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:03Z",3]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_ImplicitEndTime(t *testing.T) { - t.Skip(FlakyTest) - s := OpenServer(t) - defer s.Close() - - now := time.Now().UTC().Truncate(time.Second) - past := now.Add(-10 * time.Second) - future := now.Add(10 * time.Minute) - writes := []string{ - fmt.Sprintf(`cpu value=1 %d`, past.UnixNano()), - fmt.Sprintf(`cpu value=2 %d`, future.UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "raw query", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM cpu`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1],["%s",2]]}]}]}`, past.Format(time.RFC3339Nano), future.Format(time.RFC3339Nano)), - }, - { - name: "aggregate query", - params: url.Values{"db": []string{"db0"}}, - command: fmt.Sprintf(`SELECT mean(value) FROM cpu WHERE time > '%s' - 1m GROUP BY time(1m) FILL(none)`, now.Truncate(time.Minute).Format(time.RFC3339Nano)), - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","mean"],"values":[["%s",1]]}]}]}`, now.Truncate(time.Minute).Format(time.RFC3339Nano)), - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Sample_Wildcard(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu float=1,int=1i,string="hello, world",bool=true %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "sample() with wildcard", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sample(*, 1) FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sample_bool","sample_float","sample_int","sample_string"],"values":[["2000-01-01T00:00:00Z",true,1,1,"hello, world"]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Sample_LimitOffset(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu float=1,int=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu float=2,int=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - fmt.Sprintf(`cpu float=3,int=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:02:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "sample() with limit 1", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sample(float, 3), int FROM cpu LIMIT 1`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sample","int"],"values":[["2000-01-01T00:00:00Z",1,1]]}]}]}`, - }, - { - name: "sample() with offset 1", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sample(float, 3), int FROM cpu OFFSET 1`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sample","int"],"values":[["2000-01-01T00:01:00Z",2,2],["2000-01-01T00:02:00Z",3,3]]}]}]}`, - }, - { - name: "sample() with limit 1 offset 1", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sample(float, 3), int FROM cpu LIMIT 1 OFFSET 1`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sample","int"],"values":[["2000-01-01T00:01:00Z",2,2]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Validate that nested aggregates don't panic -func TestServer_NestedAggregateWithMathPanics(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - `cpu value=2i 120000000000`, - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "dividing by elapsed count should not panic", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value) / elapsed(sum(value), 1m) FROM cpu WHERE time > 0 AND time < 10m GROUP BY time(1m)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sum_elapsed"],"values":[["1970-01-01T00:00:00Z",null],["1970-01-01T00:01:00Z",null],["1970-01-01T00:02:00Z",null],["1970-01-01T00:03:00Z",null],["1970-01-01T00:04:00Z",null],["1970-01-01T00:05:00Z",null],["1970-01-01T00:06:00Z",null],["1970-01-01T00:07:00Z",null],["1970-01-01T00:08:00Z",null],["1970-01-01T00:09:00Z",null]]}]}]}`, - }, - { - name: "dividing by elapsed count with fill previous should not panic", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value) / elapsed(sum(value), 1m) FROM cpu WHERE time > 0 AND time < 10m GROUP BY time(1m) FILL(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","sum_elapsed"],"values":[["1970-01-01T00:00:00Z",null],["1970-01-01T00:01:00Z",null],["1970-01-01T00:02:00Z",null],["1970-01-01T00:03:00Z",2],["1970-01-01T00:04:00Z",2],["1970-01-01T00:05:00Z",2],["1970-01-01T00:06:00Z",2],["1970-01-01T00:07:00Z",2],["1970-01-01T00:08:00Z",2],["1970-01-01T00:09:00Z",2]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server will succeed and error for common scenarios. -func TestServer_Query_Common(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - now := now() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: fmt.Sprintf("cpu,host=server01 value=1 %s", strconv.FormatInt(now.UnixNano(), 10))}, - } - - test.addQueries([]*Query{ - { - name: "selecting a from a non-existent database should error", - command: `SELECT value FROM db1.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"error":"database not found: db1"}]}`, - }, - { - name: "selecting a from a non-existent retention policy should error", - command: `SELECT value FROM db0.rp1.cpu`, - exp: `{"results":[{"statement_id":0,"error":"retention policy not found: rp1"}]}`, - }, - { - name: "selecting a valid measurement and field should succeed", - command: `SELECT value FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - { - name: "explicitly selecting time and a valid measurement and field should succeed", - command: `SELECT time,value FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - { - name: "selecting a measurement that doesn't exist should result in empty set", - command: `SELECT value FROM db0.rp0.idontexist`, - exp: `{"results":[{"statement_id":0}]}`, - }, - { - name: "selecting a field that doesn't exist should result in empty set", - command: `SELECT idontexist FROM db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0}]}`, - }, - { - name: "selecting wildcard without specifying a database should error", - command: `SELECT * FROM cpu`, - exp: `{"results":[{"statement_id":0,"error":"database name required"}]}`, - }, - { - name: "selecting explicit field without specifying a database should error", - command: `SELECT value FROM cpu`, - exp: `{"results":[{"statement_id":0,"error":"database name required"}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can query two points. -func TestServer_Query_SelectTwoPoints(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - now := now() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: fmt.Sprintf("cpu value=100 %s\ncpu value=200 %s", strconv.FormatInt(now.UnixNano(), 10), strconv.FormatInt(now.Add(1).UnixNano(), 10))}, - } - - test.addQueries( - &Query{ - name: "selecting two points should result in two points", - command: `SELECT * FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - &Query{ - name: "selecting two points with GROUP BY * should result in two points", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - ) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can query two negative points. -func TestServer_Query_SelectTwoNegativePoints(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - now := now() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: fmt.Sprintf("cpu value=-100 %s\ncpu value=-200 %s", strconv.FormatInt(now.UnixNano(), 10), strconv.FormatInt(now.Add(1).UnixNano(), 10))}, - } - - test.addQueries(&Query{ - name: "selecting two negative points should succeed", - command: `SELECT * FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",-100],["%s",-200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can handle various simple derivative queries. -func TestServer_Query_SelectRawDerivative(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: "cpu value=210 1278010021000000000\ncpu value=10 1278010022000000000"}, - } - - test.addQueries([]*Query{ - { - name: "calculate single derivate", - command: `SELECT derivative(value) from db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-200]]}]}]}`, - }, - { - name: "calculate derivate with unit", - command: `SELECT derivative(value, 10s) from db0.rp0.cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-2000]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can handle various group by time derivative queries. -func TestServer_Query_SelectGroupByTimeDerivativeWithFill(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: `cpu value=10 1278010020000000000 -cpu value=20 1278010021000000000 -`}, - } - - test.addQueries([]*Query{ - { - name: "calculate derivative of count with unit default (2s) group by time with fill 0", - command: `SELECT derivative(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",2],["2010-07-01T18:47:02Z",-2]]}]}]}`, - }, - { - name: "calculate derivative of count with unit 4s group by time with fill 0", - command: `SELECT derivative(count(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",4],["2010-07-01T18:47:02Z",-4]]}]}]}`, - }, - { - name: "calculate derivative of count with unit default (2s) group by time with fill previous", - command: `SELECT derivative(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of count with unit 4s group by time with fill previous", - command: `SELECT derivative(count(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of mean with unit default (2s) group by time with fill 0", - command: `SELECT derivative(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",-15]]}]}]}`, - }, - { - name: "calculate derivative of mean with unit 4s group by time with fill 0", - command: `SELECT derivative(mean(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",30],["2010-07-01T18:47:02Z",-30]]}]}]}`, - }, - { - name: "calculate derivative of mean with unit default (2s) group by time with fill previous", - command: `SELECT derivative(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of mean with unit 4s group by time with fill previous", - command: `SELECT derivative(mean(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of median with unit default (2s) group by time with fill 0", - command: `SELECT derivative(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",-15]]}]}]}`, - }, - { - name: "calculate derivative of median with unit 4s group by time with fill 0", - command: `SELECT derivative(median(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",30],["2010-07-01T18:47:02Z",-30]]}]}]}`, - }, - { - name: "calculate derivative of median with unit default (2s) group by time with fill previous", - command: `SELECT derivative(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of median with unit 4s group by time with fill previous", - command: `SELECT derivative(median(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of mode with unit default (2s) group by time with fill 0", - command: `SELECT derivative(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, - }, - { - name: "calculate derivative of mode with unit 4s group by time with fill 0", - command: `SELECT derivative(mode(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, - }, - { - name: "calculate derivative of mode with unit default (2s) group by time with fill previous", - command: `SELECT derivative(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of mode with unit 4s group by time with fill previous", - command: `SELECT derivative(mode(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of sum with unit default (2s) group by time with fill 0", - command: `SELECT derivative(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",30],["2010-07-01T18:47:02Z",-30]]}]}]}`, - }, - { - name: "calculate derivative of sum with unit 4s group by time with fill 0", - command: `SELECT derivative(sum(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",60],["2010-07-01T18:47:02Z",-60]]}]}]}`, - }, - { - name: "calculate derivative of sum with unit default (2s) group by time with fill previous", - command: `SELECT derivative(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of sum with unit 4s group by time with fill previous", - command: `SELECT derivative(sum(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of first with unit default (2s) group by time with fill 0", - command: `SELECT derivative(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, - }, - { - name: "calculate derivative of first with unit 4s group by time with fill 0", - command: `SELECT derivative(first(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, - }, - { - name: "calculate derivative of first with unit default (2s) group by time with fill previous", - command: `SELECT derivative(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of first with unit 4s group by time with fill previous", - command: `SELECT derivative(first(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of last with unit default (2s) group by time with fill 0", - command: `SELECT derivative(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, - }, - { - name: "calculate derivative of last with unit 4s group by time with fill 0", - command: `SELECT derivative(last(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",40],["2010-07-01T18:47:02Z",-40]]}]}]}`, - }, - { - name: "calculate derivative of last with unit default (2s) group by time with fill previous", - command: `SELECT derivative(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of last with unit 4s group by time with fill previous", - command: `SELECT derivative(last(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of min with unit default (2s) group by time with fill 0", - command: `SELECT derivative(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, - }, - { - name: "calculate derivative of min with unit 4s group by time with fill 0", - command: `SELECT derivative(min(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, - }, - { - name: "calculate derivative of min with unit default (2s) group by time with fill previous", - command: `SELECT derivative(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of min with unit 4s group by time with fill previous", - command: `SELECT derivative(min(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of max with unit default (2s) group by time with fill 0", - command: `SELECT derivative(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, - }, - { - name: "calculate derivative of max with unit 4s group by time with fill 0", - command: `SELECT derivative(max(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",40],["2010-07-01T18:47:02Z",-40]]}]}]}`, - }, - { - name: "calculate derivative of max with unit default (2s) group by time with fill previous", - command: `SELECT derivative(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of max with unit 4s group by time with fill previous", - command: `SELECT derivative(max(value), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of percentile with unit default (2s) group by time with fill 0", - command: `SELECT derivative(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, - }, - { - name: "calculate derivative of percentile with unit 4s group by time with fill 0", - command: `SELECT derivative(percentile(value, 50), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, - }, - { - name: "calculate derivative of percentile with unit default (2s) group by time with fill previous", - command: `SELECT derivative(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate derivative of percentile with unit 4s group by time with fill previous", - command: `SELECT derivative(percentile(value, 50), 4s) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can handle various group by time difference queries. -func TestServer_Query_SelectGroupByTimeDifference(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: `cpu value=10 1278010020000000000 -cpu value=15 1278010021000000000 -cpu value=20 1278010022000000000 -cpu value=25 1278010023000000000 -`}, - } - - test.addQueries([]*Query{ - { - name: "calculate difference of count", - command: `SELECT difference(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",2],["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate difference of mean", - command: `SELECT difference(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - { - name: "calculate difference of median", - command: `SELECT difference(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - { - name: "calculate difference of mode", - command: `SELECT difference(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - { - name: "calculate difference of sum", - command: `SELECT difference(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",20]]}]}]}`, - }, - { - name: "calculate difference of first", - command: `SELECT difference(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - { - name: "calculate difference of last", - command: `SELECT difference(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - { - name: "calculate difference of min", - command: `SELECT difference(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - { - name: "calculate difference of max", - command: `SELECT difference(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - { - name: "calculate difference of percentile", - command: `SELECT difference(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",10]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Ensure the server can handle various group by time difference queries with fill. -func TestServer_Query_SelectGroupByTimeDifferenceWithFill(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: `cpu value=10 1278010020000000000 -cpu value=20 1278010021000000000 -`}, - } - - test.addQueries([]*Query{ - { - name: "calculate difference of count with fill 0", - command: `SELECT difference(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",2],["2010-07-01T18:47:02Z",-2]]}]}]}`, - }, - { - name: "calculate difference of count with fill previous", - command: `SELECT difference(count(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate difference of mean with fill 0", - command: `SELECT difference(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",-15]]}]}]}`, - }, - { - name: "calculate difference of mean with fill previous", - command: `SELECT difference(mean(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate difference of median with fill 0", - command: `SELECT difference(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",15],["2010-07-01T18:47:02Z",-15]]}]}]}`, - }, - { - name: "calculate difference of median with fill previous", - command: `SELECT difference(median(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate difference of mode with fill 0", - command: `SELECT difference(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, - }, - { - name: "calculate difference of mode with fill previous", - command: `SELECT difference(mode(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate difference of sum with fill 0", - command: `SELECT difference(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",30],["2010-07-01T18:47:02Z",-30]]}]}]}`, - }, - { - name: "calculate difference of sum with fill previous", - command: `SELECT difference(sum(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate difference of first with fill 0", - command: `SELECT difference(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, - }, - { - name: "calculate difference of first with fill previous", - command: `SELECT difference(first(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate difference of last with fill 0", - command: `SELECT difference(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, - }, - { - name: "calculate difference of last with fill previous", - command: `SELECT difference(last(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate difference of min with fill 0", - command: `SELECT difference(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, - }, - { - name: "calculate difference of min with fill previous", - command: `SELECT difference(min(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate difference of max with fill 0", - command: `SELECT difference(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",20],["2010-07-01T18:47:02Z",-20]]}]}]}`, - }, - { - name: "calculate difference of max with fill previous", - command: `SELECT difference(max(value)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - { - name: "calculate difference of percentile with fill 0", - command: `SELECT difference(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(0)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:00Z",10],["2010-07-01T18:47:02Z",-10]]}]}]}`, - }, - { - name: "calculate difference of percentile with fill previous", - command: `SELECT difference(percentile(value, 50)) from db0.rp0.cpu where time >= '2010-07-01 18:47:00' and time <= '2010-07-01 18:47:03' group by time(2s) fill(previous)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-07-01T18:47:02Z",0]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -// Test various aggregates when different series only have data for the same timestamp. -func TestServer_Query_Aggregates_IdenticalTime(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`series,host=a value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`series,host=b value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`series,host=c value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`series,host=d value=4 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`series,host=e value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`series,host=f value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`series,host=g value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`series,host=h value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`series,host=i value=5 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "last from multiple series with identical timestamp", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT last(value) FROM "series"`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"series","columns":["time","last"],"values":[["2000-01-01T00:00:00Z",5]]}]}]}`, - repeat: 100, - }, - { - name: "first from multiple series with identical timestamp", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT first(value) FROM "series"`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"series","columns":["time","first"],"values":[["2000-01-01T00:00:00Z",5]]}]}]}`, - repeat: 100, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_AcrossShardsAndFields(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu load=100 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu load=200 %d`, mustParseTime(time.RFC3339Nano, "2010-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu core=4 %d`, mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "two results for cpu", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT load FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","load"],"values":[["2000-01-01T00:00:00Z",100],["2010-01-01T00:00:00Z",200]]}]}]}`, - }, - { - name: "two results for cpu, multi-select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT core,load FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","core","load"],"values":[["2000-01-01T00:00:00Z",null,100],["2010-01-01T00:00:00Z",null,200],["2015-01-01T00:00:00Z",4,null]]}]}]}`, - }, - { - name: "two results for cpu, wildcard select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","core","load"],"values":[["2000-01-01T00:00:00Z",null,100],["2010-01-01T00:00:00Z",null,200],["2015-01-01T00:00:00Z",4,null]]}]}]}`, - }, - { - name: "one result for core", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT core FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","core"],"values":[["2015-01-01T00:00:00Z",4]]}]}]}`, - }, - { - name: "empty result set from non-existent field", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT foo FROM cpu`, - exp: `{"results":[{"statement_id":0}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_OrderedAcrossShards(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu value=7 %d`, mustParseTime(time.RFC3339Nano, "2010-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu value=14 %d`, mustParseTime(time.RFC3339Nano, "2010-01-08T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu value=28 %d`, mustParseTime(time.RFC3339Nano, "2010-01-15T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu value=56 %d`, mustParseTime(time.RFC3339Nano, "2010-01-22T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu value=112 %d`, mustParseTime(time.RFC3339Nano, "2010-01-29T00:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "derivative", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT derivative(value, 24h) FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-01-08T00:00:00Z",1],["2010-01-15T00:00:00Z",2],["2010-01-22T00:00:00Z",4],["2010-01-29T00:00:00Z",8]]}]}]}`, - }, - { - name: "non_negative_derivative", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT non_negative_derivative(value, 24h) FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","non_negative_derivative"],"values":[["2010-01-08T00:00:00Z",1],["2010-01-15T00:00:00Z",2],["2010-01-22T00:00:00Z",4],["2010-01-29T00:00:00Z",8]]}]}]}`, - }, - { - name: "difference", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT difference(value) FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","difference"],"values":[["2010-01-08T00:00:00Z",7],["2010-01-15T00:00:00Z",14],["2010-01-22T00:00:00Z",28],["2010-01-29T00:00:00Z",56]]}]}]}`, - }, - { - name: "cumulative_sum", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT cumulative_sum(value) FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","cumulative_sum"],"values":[["2010-01-01T00:00:00Z",7],["2010-01-08T00:00:00Z",21],["2010-01-15T00:00:00Z",49],["2010-01-22T00:00:00Z",105],["2010-01-29T00:00:00Z",217]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_Where_With_Tags(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`where_events,tennant=paul foo="bar" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), - fmt.Sprintf(`where_events,tennant=paul foo="baz" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), - fmt.Sprintf(`where_events,tennant=paul foo="bat" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:04Z").UnixNano()), - fmt.Sprintf(`where_events,tennant=todd foo="bar" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:05Z").UnixNano()), - fmt.Sprintf(`where_events,tennant=david foo="bap" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:06Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "tag field and time", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from where_events where (tennant = 'paul' OR tennant = 'david') AND time > 1s AND (foo = 'bar' OR foo = 'baz' OR foo = 'bap')`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"],["2009-11-10T23:00:06Z","bap"]]}]}]}`, - }, - { - name: "tag or field", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from where_events where tennant = 'paul' OR foo = 'bar'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"],["2009-11-10T23:00:04Z","bat"],["2009-11-10T23:00:05Z","bar"]]}]}]}`, - }, - { - name: "non-existent tag and field", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from where_events where tenant != 'paul' AND foo = 'bar'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:05Z","bar"]]}]}]}`, - }, - { - name: "non-existent tag or field", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from where_events where tenant != 'paul' OR foo = 'bar'`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"],["2009-11-10T23:00:04Z","bat"],["2009-11-10T23:00:05Z","bar"],["2009-11-10T23:00:06Z","bap"]]}]}]}`, - }, - { - name: "where comparing tag and field", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from where_events where tennant != foo`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"],["2009-11-10T23:00:04Z","bat"],["2009-11-10T23:00:05Z","bar"],["2009-11-10T23:00:06Z","bap"]]}]}]}`, - }, - { - name: "where comparing tag and tag", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from where_events where tennant = tennant`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"],["2009-11-10T23:00:04Z","bat"],["2009-11-10T23:00:05Z","bar"],["2009-11-10T23:00:06Z","bap"]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_With_EmptyTags(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu value=1 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01 value=2 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "where empty tag", - params: url.Values{"db": []string{"db0"}}, - command: `select value from cpu where host = ''`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2009-11-10T23:00:02Z",1]]}]}]}`, - }, - { - name: "where not empty tag", - params: url.Values{"db": []string{"db0"}}, - command: `select value from cpu where host != ''`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2009-11-10T23:00:03Z",2]]}]}]}`, - }, - { - name: "where regex none", - params: url.Values{"db": []string{"db0"}}, - command: `select value from cpu where host !~ /.*/`, - exp: `{"results":[{"statement_id":0}]}`, - }, - { - name: "where regex exact", - params: url.Values{"db": []string{"db0"}}, - command: `select value from cpu where host =~ /^server01$/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2009-11-10T23:00:03Z",2]]}]}]}`, - }, - { - name: "where regex exact (case insensitive)", - params: url.Values{"db": []string{"db0"}}, - command: `select value from cpu where host =~ /(?i)^SeRvEr01$/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2009-11-10T23:00:03Z",2]]}]}]}`, - }, - { - name: "where regex exact (not)", - params: url.Values{"db": []string{"db0"}}, - command: `select value from cpu where host !~ /^server01$/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2009-11-10T23:00:02Z",1]]}]}]}`, - }, - { - name: "where regex at least one char", - params: url.Values{"db": []string{"db0"}}, - command: `select value from cpu where host =~ /.+/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2009-11-10T23:00:03Z",2]]}]}]}`, - }, - { - name: "where regex not at least one char", - params: url.Values{"db": []string{"db0"}}, - command: `select value from cpu where host !~ /.+/`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2009-11-10T23:00:02Z",1]]}]}]}`, - }, - { - name: "group by empty tag", - params: url.Values{"db": []string{"db0"}}, - command: `select value from cpu group by host`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"host":""},"columns":["time","value"],"values":[["2009-11-10T23:00:02Z",1]]},{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["2009-11-10T23:00:03Z",2]]}]}]}`, - }, - { - name: "group by missing tag", - params: url.Values{"db": []string{"db0"}}, - command: `select value from cpu group by region`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","tags":{"region":""},"columns":["time","value"],"values":[["2009-11-10T23:00:02Z",1],["2009-11-10T23:00:03Z",2]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_ImplicitFill(t *testing.T) { - s := OpenServer(t, func(o *launcher.InfluxdOpts) { - o.CoordinatorConfig.MaxSelectBucketsN = 5 - }) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`fills val=1 %d`, mustParseTime(time.RFC3339Nano, "2010-01-01T11:30:00Z").UnixNano()), - fmt.Sprintf(`fills val=3 %d`, mustParseTime(time.RFC3339Nano, "2010-01-01T12:00:00Z").UnixNano()), - fmt.Sprintf(`fills val=5 %d`, mustParseTime(time.RFC3339Nano, "2010-01-01T16:30:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "fill with implicit start", - command: `select mean(val) from fills where time < '2010-01-01T18:00:00Z' group by time(1h)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"fills","columns":["time","mean"],"values":[["2010-01-01T16:00:00Z",5],["2010-01-01T17:00:00Z",null]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - { - name: "fill with implicit start - max select buckets", - command: `select mean(val) from fills where time < '2010-01-01T17:00:00Z' group by time(1h)`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"fills","columns":["time","mean"],"values":[["2010-01-01T12:00:00Z",3],["2010-01-01T13:00:00Z",null],["2010-01-01T14:00:00Z",null],["2010-01-01T15:00:00Z",null],["2010-01-01T16:00:00Z",5]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_MaxRowLimit(t *testing.T) { - t.Skip(NotSupported) - // config := NewConfig() - // config.HTTPD.MaxRowLimit = 10 - - s := OpenServer(t) - defer s.Close() - - writes := make([]string, 11) // write one extra value beyond the max row limit - expectedValues := make([]string, 10) - for i := 0; i < len(writes); i++ { - writes[i] = fmt.Sprintf(`cpu value=%d %d`, i, time.Unix(0, int64(i)).UnixNano()) - if i < len(expectedValues) { - expectedValues[i] = fmt.Sprintf(`["%s",%d]`, time.Unix(0, int64(i)).UTC().Format(time.RFC3339Nano), i) - } - } - expected := fmt.Sprintf(`{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[%s],"partial":true}]}]}`, strings.Join(expectedValues, ",")) - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "SELECT all values, no chunking", - command: `SELECT value FROM cpu`, - exp: expected, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_EvilIdentifiers(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: fmt.Sprintf("cpu select=1,in-bytes=2 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano())}, - } - - test.addQueries([]*Query{ - { - name: `query evil identifiers`, - command: `SELECT "select", "in-bytes" FROM cpu`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","select","in-bytes"],"values":[["2000-01-01T00:00:00Z",1,2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} - -func TestServer_Query_OrderByTime(t *testing.T) { - s := OpenServer(t) - defer s.Close() - - writes := []string{ - fmt.Sprintf(`cpu,host=server1 value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), - fmt.Sprintf(`cpu,host=server1 value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), - fmt.Sprintf(`cpu,host=server1 value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - - fmt.Sprintf(`power,presence=true value=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), - fmt.Sprintf(`power,presence=true value=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), - fmt.Sprintf(`power,presence=true value=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - fmt.Sprintf(`power,presence=false value=4 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:04Z").UnixNano()), - - fmt.Sprintf(`mem,host=server1 free=1 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), - fmt.Sprintf(`mem,host=server1 free=2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), - fmt.Sprintf(`mem,host=server2 used=3 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), - fmt.Sprintf(`mem,host=server2 used=4 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:02Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.writes = Writes{ - &Write{data: strings.Join(writes, "\n")}, - } - - test.addQueries([]*Query{ - { - name: "order on points", - params: url.Values{"db": []string{"db0"}}, - command: `select value from "cpu" ORDER BY time DESC`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:03Z",3],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:01Z",1]]}]}]}`, - }, - - { - name: "order desc with tags", - params: url.Values{"db": []string{"db0"}}, - command: `select value from "power" ORDER BY time DESC`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"power","columns":["time","value"],"values":[["2000-01-01T00:00:04Z",4],["2000-01-01T00:00:03Z",3],["2000-01-01T00:00:02Z",2],["2000-01-01T00:00:01Z",1]]}]}]}`, - }, - - { - name: "order desc with sparse data", - params: url.Values{"db": []string{"db0"}}, - command: `select used, free from "mem" ORDER BY time DESC`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"mem","columns":["time","used","free"],"values":[["2000-01-01T00:00:02Z",4,null],["2000-01-01T00:00:02Z",null,2],["2000-01-01T00:00:01Z",3,null],["2000-01-01T00:00:01Z",null,1]]}]}]}`, - }, - - { - name: "order desc with an aggregate and sparse data", - params: url.Values{"db": []string{"db0"}}, - command: `select first("used") AS "used", first("free") AS "free" from "mem" WHERE time >= '2000-01-01T00:00:01Z' AND time <= '2000-01-01T00:00:02Z' GROUP BY host, time(1s) FILL(none) ORDER BY time DESC`, - exp: `{"results":[{"statement_id":0,"series":[{"name":"mem","tags":{"host":"server2"},"columns":["time","used","free"],"values":[["2000-01-01T00:00:02Z",4,null],["2000-01-01T00:00:01Z",3,null]]},{"name":"mem","tags":{"host":"server1"},"columns":["time","used","free"],"values":[["2000-01-01T00:00:02Z",null,2],["2000-01-01T00:00:01Z",null,1]]}]}]}`, - }, - }...) - - ctx := context.Background() - test.Run(ctx, t, s) -} diff --git a/influxql/v1tests/test_server.go b/influxql/v1tests/test_server.go deleted file mode 100644 index aca585f794a..00000000000 --- a/influxql/v1tests/test_server.go +++ /dev/null @@ -1,82 +0,0 @@ -package v1tests - -import ( - "context" - "strings" - "testing" - - "github.com/influxdata/influxdb/v2" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/tests" - "github.com/influxdata/influxdb/v2/tests/pipeline" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type TestServer struct { - db string - rp string - p *tests.Pipeline - fx pipeline.BaseFixture - auth *influxdb.Authorization -} - -func NewTestServer(ctx context.Context, t *testing.T, db, rp string, writes ...string) *TestServer { - require.Greater(t, len(writes), 0) - - p := OpenServer(t) - t.Cleanup(func() { - _ = p.Close() - }) - - fx := pipeline.NewBaseFixture(t, p.Pipeline, p.DefaultOrgID, p.DefaultBucketID) - - // write test data - err := fx.Admin.WriteTo(ctx, influxdb.BucketFilter{ID: &p.DefaultBucketID, OrganizationID: &p.DefaultOrgID}, strings.NewReader(strings.Join(writes, "\n"))) - require.NoError(t, err) - - p.Flush() - - writeOrg, err := influxdb.NewPermissionAtID(p.DefaultOrgID, influxdb.WriteAction, influxdb.OrgsResourceType, p.DefaultOrgID) - require.NoError(t, err) - - bucketWritePerm, err := influxdb.NewPermissionAtID(p.DefaultBucketID, influxdb.WriteAction, influxdb.BucketsResourceType, p.DefaultOrgID) - require.NoError(t, err) - - bucketReadPerm, err := influxdb.NewPermissionAtID(p.DefaultBucketID, influxdb.ReadAction, influxdb.BucketsResourceType, p.DefaultOrgID) - require.NoError(t, err) - - auth := tests.MakeAuthorization(p.DefaultOrgID, p.DefaultUserID, []influxdb.Permission{*writeOrg, *bucketWritePerm, *bucketReadPerm}) - ctx = icontext.SetAuthorizer(ctx, auth) - err = p.Launcher. - DBRPMappingService(). - Create(ctx, &influxdb.DBRPMapping{ - Database: db, - RetentionPolicy: rp, - Default: true, - OrganizationID: p.DefaultOrgID, - BucketID: p.DefaultBucketID, - }) - require.NoError(t, err) - - return &TestServer{ - p: p.Pipeline, - db: db, - rp: rp, - fx: fx, - auth: auth, - } -} - -func (qr *TestServer) Execute(ctx context.Context, t *testing.T, query Query) { - t.Helper() - ctx = icontext.SetAuthorizer(ctx, qr.auth) - if query.skip != "" { - t.Skipf("SKIP:: %s", query.skip) - } - err := query.Execute(ctx, t, qr.db, qr.fx.Admin) - assert.NoError(t, err) - assert.Equal(t, query.exp, query.got, - "%s: unexpected results\nquery: %s\nparams: %v\nexp: %s\nactual: %s\n", - query.name, query.command, query.params, query.exp, query.got) -} diff --git a/influxql/v1validation/README.md b/influxql/v1validation/README.md deleted file mode 100644 index 47d972f32f1..00000000000 --- a/influxql/v1validation/README.md +++ /dev/null @@ -1,75 +0,0 @@ -This package asserts the fully compatibility between the InfluxDB v1 Influxql engine -and IDPE influxql. - -There is only one file that runs as subtest all the goldenfiles located in -`./goldenfiles` - -This is an example of goldenfile: - -```yaml -query: "select host, inactive from mem where time >=30000000000 AND time < 50000000000" -dataset: | - mem,host=gianarb page_tables=39534592i,vmalloc_chunk=0i,write_back_tmp=0i,dirty=884736i,high_total=0i,available=11992494080i,used=3284553728i,active=7172775936i,huge_pages_free=0i,swap_total=8589930496i,vmalloc_used=38604800i,free=4928421888i,commit_limit=16853958656i,committed_as=12584218624i,mapped=939278336i,vmalloc_total=35184372087808i,write_back=0i,buffered=989163520i,wired=0i,low_free=0i,huge_page_size=2097152i,swap_cached=120016896i,swap_free=8445227008i,inactive=3461185536i,slab=542363648i,high_free=0i,shared=903233536i,sreclaimable=449650688i,total=16528056320i,cached=7325917184i,available_percent=72.55840522208482,sunreclaim=92712960i,used_percent=19.87259520664557,huge_pages_total=0i,low_total=0i 0 - mem,host=gianarb slab=542392320i,shared=931356672i,vmalloc_used=38506496i,sunreclaim=92692480i,write_back=0i,total=16528056320i,commit_limit=16853958656i,huge_pages_free=0i,vmalloc_chunk=0i,active=7159382016i,huge_page_size=2097152i,swap_total=8589930496i,used=3266457600i,buffered=989843456i,wired=0i,high_free=0i,page_tables=38965248i,swap_cached=120016896i,write_back_tmp=0i,inactive=3454332928i,used_percent=19.7631078740177,committed_as=12570415104i,dirty=339968i,high_total=0i,huge_pages_total=0i,sreclaimable=449699840i,vmalloc_total=35184372087808i,free=4914728960i,cached=7357026304i,available_percent=72.49944859820032,low_free=0i,low_total=0i,swap_free=8445227008i,available=11982749696i,mapped=938016768i 20000000000 - cpu,cpu=cpu-total,host=gianarb usage_user=6.102117061021656,usage_system=2.938978829389874,usage_nice=0,usage_irq=1.0709838107097638,usage_guest=0,usage_idle=89.41469489414925,usage_iowait=0.024906600249037934,usage_softirq=0.4483188044832491,usage_steal=0,usage_guest_nice=0 30000000000 - cpu,cpu=cpu3,host=gianarb usage_user=5.711422845699912,usage_nice=0,usage_iowait=0,usage_irq=1.302605210421328,usage_steal=0,usage_guest=0,usage_guest_nice=0,usage_system=2.905811623247315,usage_idle=89.67935871744233,usage_softirq=0.4008016032064967 30000000000 - cpu,cpu=cpu2,host=gianarb usage_user=5.982053838488868,usage_idle=89.73080757727861,usage_irq=0.8973080757725367,usage_softirq=0.4985044865405501,usage_steal=0,usage_guest=0,usage_system=2.8913260219364374,usage_nice=0,usage_iowait=0,usage_guest_nice=0 30000000000 - cpu,cpu=cpu1,host=gianarb usage_iowait=0,usage_irq=1.0912698412698993,usage_softirq=0.4960317460319519,usage_guest_nice=0,usage_steal=0,usage_guest=0,usage_user=6.051587301585189,usage_system=3.075396825396861,usage_idle=89.28571428573105,usage_nice=0 30000000000 - cpu,cpu=cpu0,host=gianarb usage_user=6.560636182902656,usage_system=2.9821073558650495,usage_idle=88.86679920477891,usage_irq=1.0934393638174862,usage_steal=0,usage_nice=0,usage_iowait=0,usage_softirq=0.4970178926437982,usage_guest=0,usage_guest_nice=0 30000000000 - mem,host=gianarb used=3279171584i,wired=0i,committed_as=12718530560i,huge_pages_total=0i,active=7167565824i,swap_total=8589930496i,vmalloc_total=35184372087808i,available=11979239424i,cached=7348174848i,buffered=989859840i,commit_limit=16853958656i,low_free=0i,vmalloc_used=38834176i,total=16528056320i,inactive=3460194304i,available_percent=72.47821033562403,high_free=0i,huge_pages_free=0i,slab=542396416i,dirty=487424i,page_tables=39669760i,swap_free=8445227008i,vmalloc_chunk=0i,write_back=0i,free=4910850048i,huge_page_size=2097152i,low_total=0i,sunreclaim=92696576i,write_back_tmp=0i,used_percent=19.84003152283547,high_total=0i,mapped=938799104i,shared=922521600i,sreclaimable=449699840i,swap_cached=120016896i 30000000000 - cpu,cpu=cpu-total,host=gianarb usage_guest_nice=0,usage_user=5.682102628286863,usage_idle=89.88735919901404,usage_nice=0,usage_iowait=0.10012515644554432,usage_guest=0,usage_system=2.828535669587463,usage_irq=1.076345431789406,usage_softirq=0.4255319148935456,usage_steal=0 40000000000 - cpu,cpu=cpu3,host=gianarb usage_user=5.599999999992869,usage_steal=0,usage_guest_nice=0,usage_softirq=0.4999999999999318,usage_guest=0,usage_system=2.9999999999984537,usage_idle=89.39999999999075,usage_nice=0,usage_iowait=0.19999999999995852,usage_irq=1.2999999999994816 40000000000 -result: | - name: mem - time host inactive - ---- ---- -------- - 30000000000 gianarb 3460194304 - 40000000000 gianarb 3454791680 -``` - -There are only 3 required fields: - -* `query`: to specify the INFLUXQL query to run against the dataset. -* `dataset` are the data used for the query. -* `result` is the expected result returned from the query against the dataset. - -## Add a new testcase - -In order to `add a new test` you can create a new file with your dataset, your -query and your expected result. - -## Example of dataset creation - -You can use telegraf to create your own dataset. The one you see above comes -from this configuration: - -```toml -[agent] - interval = "10s" - round_interval = true - metric_batch_size = 1000 - metric_buffer_limit = 10000 - collection_jitter = "0s" - flush_interval = "10s" - flush_jitter = "0s" - precision = "" - hostname = "" - omit_hostname = false - -[[outputs.file]] - files = ["stdout", "./metrics.out"] - data_format = "influx" - -[[inputs.mem]] -[[inputs.cpu]] -``` - -Configure Telegraf to collect metrics using the `mem` and `cpu` input plugins. -Additionally, configure telegraf to use the `file` plugin for output, which will -write data as InfluxDB line protocol to `metrics.out`. `metrics.out` will become -the golden file used for the unit test. - -Timestamps within `metrics.out` may be normalized using the -`./cmd/goldenfilenormalizer` tool, which adjusts the first timestamp to a -specified base time and all later times relative to that. This is not mandatory, -but may simplify certain test cases, if start and end times are well defined. diff --git a/influxql/v1validation/goldenfiles/example_generated.yaml b/influxql/v1validation/goldenfiles/example_generated.yaml deleted file mode 100644 index b318691cb7c..00000000000 --- a/influxql/v1validation/goldenfiles/example_generated.yaml +++ /dev/null @@ -1,22 +0,0 @@ -description: | - This test suite demonstrates how to use the generated - option for producing a dataset based on TOML schema. - -tests: - - query: "select sum(f0) from m0" - result: | - name,tags,time,sum - m0,,0,10 - -generated: - start: "1000000000000" - end: "5000000000000" - toml: | - [[measurements]] - name = "m0" - tags = [ - { name = "tag0", source = [ "host1" ] }, - ] - fields = [ - { name = "f0", count = 10, source = 1 }, - ] diff --git a/influxql/v1validation/goldenfiles/join.yaml b/influxql/v1validation/goldenfiles/join.yaml deleted file mode 100644 index e0618fbf83a..00000000000 --- a/influxql/v1validation/goldenfiles/join.yaml +++ /dev/null @@ -1,27 +0,0 @@ -description: | - This test suite verifies correctness of queries containing - fields in the WHERE condition. In most cases, the field - in the SELECT projection is different, to test joins across - fields. - -tests: - - name: "no_aggregate" - query: "select f1 from m0 where f0 > 1 AND time >= 30000000000 AND time <= 50000000000" - result: | - name,tags,time,f1 - m0,,50000000000,30 - - - name: "aggregate" - description: | - This test verifies that an aggregate - - query: "SELECT sum(f1) FROM m0 WHERE f0 >= 1" - result: | - name,tags,time,sum - m0,,0,50 - -dataset: | - m0,t0=tv0 f0=0i,f1=10i 30000000000 - m0,t0=tv1 f0=1i,f1=20i 40000000000 - m0,t0=tv1 f0=2i,f1=30i 50000000000 - m1,t0=tv0 f0=0i,f1=10i 40000000000 diff --git a/influxql/v1validation/goldenfiles/select_aggregate.yaml b/influxql/v1validation/goldenfiles/select_aggregate.yaml deleted file mode 100644 index bf6d96ca48d..00000000000 --- a/influxql/v1validation/goldenfiles/select_aggregate.yaml +++ /dev/null @@ -1,39 +0,0 @@ -tests: - - query: "select sum(used), mean(active) from mem where time >= 30000000000 AND time < 50000000000" - result: | - name,tags,time,sum,mean - mem,,30000000000,6549852160,7164311552 - -dataset: | - mem,host=gianarb page_tables=39534592i,vmalloc_chunk=0i,write_back_tmp=0i,dirty=884736i,high_total=0i,available=11992494080i,used=3284553728i,active=7172775936i,huge_pages_free=0i,swap_total=8589930496i,vmalloc_used=38604800i,free=4928421888i,commit_limit=16853958656i,committed_as=12584218624i,mapped=939278336i,vmalloc_total=35184372087808i,write_back=0i,buffered=989163520i,wired=0i,low_free=0i,huge_page_size=2097152i,swap_cached=120016896i,swap_free=8445227008i,inactive=3461185536i,slab=542363648i,high_free=0i,shared=903233536i,sreclaimable=449650688i,total=16528056320i,cached=7325917184i,available_percent=72.55840522208482,sunreclaim=92712960i,used_percent=19.87259520664557,huge_pages_total=0i,low_total=0i 0 - mem,host=gianarb slab=542392320i,shared=931356672i,vmalloc_used=38506496i,sunreclaim=92692480i,write_back=0i,total=16528056320i,commit_limit=16853958656i,huge_pages_free=0i,vmalloc_chunk=0i,active=7159382016i,huge_page_size=2097152i,swap_total=8589930496i,used=3266457600i,buffered=989843456i,wired=0i,high_free=0i,page_tables=38965248i,swap_cached=120016896i,write_back_tmp=0i,inactive=3454332928i,used_percent=19.7631078740177,committed_as=12570415104i,dirty=339968i,high_total=0i,huge_pages_total=0i,sreclaimable=449699840i,vmalloc_total=35184372087808i,free=4914728960i,cached=7357026304i,available_percent=72.49944859820032,low_free=0i,low_total=0i,swap_free=8445227008i,available=11982749696i,mapped=938016768i 20000000000 - cpu,cpu=cpu-total,host=gianarb usage_user=6.102117061021656,usage_system=2.938978829389874,usage_nice=0,usage_irq=1.0709838107097638,usage_guest=0,usage_idle=89.41469489414925,usage_iowait=0.024906600249037934,usage_softirq=0.4483188044832491,usage_steal=0,usage_guest_nice=0 30000000000 - cpu,cpu=cpu3,host=gianarb usage_user=5.711422845699912,usage_nice=0,usage_iowait=0,usage_irq=1.302605210421328,usage_steal=0,usage_guest=0,usage_guest_nice=0,usage_system=2.905811623247315,usage_idle=89.67935871744233,usage_softirq=0.4008016032064967 30000000000 - cpu,cpu=cpu2,host=gianarb usage_user=5.982053838488868,usage_idle=89.73080757727861,usage_irq=0.8973080757725367,usage_softirq=0.4985044865405501,usage_steal=0,usage_guest=0,usage_system=2.8913260219364374,usage_nice=0,usage_iowait=0,usage_guest_nice=0 30000000000 - cpu,cpu=cpu1,host=gianarb usage_iowait=0,usage_irq=1.0912698412698993,usage_softirq=0.4960317460319519,usage_guest_nice=0,usage_steal=0,usage_guest=0,usage_user=6.051587301585189,usage_system=3.075396825396861,usage_idle=89.28571428573105,usage_nice=0 30000000000 - cpu,cpu=cpu0,host=gianarb usage_user=6.560636182902656,usage_system=2.9821073558650495,usage_idle=88.86679920477891,usage_irq=1.0934393638174862,usage_steal=0,usage_nice=0,usage_iowait=0,usage_softirq=0.4970178926437982,usage_guest=0,usage_guest_nice=0 30000000000 - mem,host=gianarb used=3279171584i,wired=0i,committed_as=12718530560i,huge_pages_total=0i,active=7167565824i,swap_total=8589930496i,vmalloc_total=35184372087808i,available=11979239424i,cached=7348174848i,buffered=989859840i,commit_limit=16853958656i,low_free=0i,vmalloc_used=38834176i,total=16528056320i,inactive=3460194304i,available_percent=72.47821033562403,high_free=0i,huge_pages_free=0i,slab=542396416i,dirty=487424i,page_tables=39669760i,swap_free=8445227008i,vmalloc_chunk=0i,write_back=0i,free=4910850048i,huge_page_size=2097152i,low_total=0i,sunreclaim=92696576i,write_back_tmp=0i,used_percent=19.84003152283547,high_total=0i,mapped=938799104i,shared=922521600i,sreclaimable=449699840i,swap_cached=120016896i 30000000000 - cpu,cpu=cpu-total,host=gianarb usage_guest_nice=0,usage_user=5.682102628286863,usage_idle=89.88735919901404,usage_nice=0,usage_iowait=0.10012515644554432,usage_guest=0,usage_system=2.828535669587463,usage_irq=1.076345431789406,usage_softirq=0.4255319148935456,usage_steal=0 40000000000 - cpu,cpu=cpu3,host=gianarb usage_user=5.599999999992869,usage_steal=0,usage_guest_nice=0,usage_softirq=0.4999999999999318,usage_guest=0,usage_system=2.9999999999984537,usage_idle=89.39999999999075,usage_nice=0,usage_iowait=0.19999999999995852,usage_irq=1.2999999999994816 40000000000 - cpu,cpu=cpu2,host=gianarb usage_guest=0,usage_guest_nice=0,usage_user=5.682951146558077,usage_system=2.8913260219341703,usage_iowait=0.09970089730813834,usage_irq=0.8973080757728201,usage_softirq=0.39880358923227005,usage_idle=90.02991026919126,usage_nice=0,usage_steal=0 40000000000 - mem,host=gianarb free=4954177536i,huge_pages_total=0i,sreclaimable=449699840i,committed_as=12572475392i,dirty=532480i,swap_total=8589930496i,inactive=3454791680i,wired=0i,commit_limit=16853958656i,used_percent=19.788658222577983,vmalloc_chunk=0i,vmalloc_total=35184372087808i,used=3270680576i,cached=7313321984i,slab=542412800i,sunreclaim=92712960i,available=12022804480i,buffered=989876224i,active=7161057280i,huge_pages_free=0i,mapped=933064704i,available_percent=72.741792786921,high_total=0i,huge_page_size=2097152i,page_tables=38985728i,shared=887447552i,vmalloc_used=38621184i,write_back=0i,write_back_tmp=0i,total=16528056320i,high_free=0i,low_free=0i,low_total=0i,swap_cached=120016896i,swap_free=8445227008i 40000000000 - cpu,cpu=cpu1,host=gianarb usage_user=5.923694779116283,usage_system=2.6104417670674573,usage_idle=90.0602409638421,usage_nice=0,usage_steal=0,usage_guest_nice=0,usage_iowait=0.10040160642565496,usage_irq=0.9036144578311087,usage_softirq=0.40160642570261984,usage_guest=0 40000000000 - cpu,cpu=cpu0,host=gianarb usage_user=5.432595573437017,usage_system=2.816901408448739,usage_idle=90.34205231386129,usage_nice=0,usage_softirq=0.4024144869220654,usage_iowait=0,usage_irq=1.006036217302304,usage_steal=0,usage_guest=0,usage_guest_nice=0 40000000000 - mem,host=gianarb free=4952854528i,cached=7313227776i,low_free=0i,low_total=0i,swap_free=8445227008i,write_back=0i,total=16528056320i,slab=542396416i,commit_limit=16853958656i,huge_page_size=2097152i,used=3272081408i,available_percent=72.7339120780537,vmalloc_used=38621184i,used_percent=19.797133701925816,committed_as=12559798272i,sunreclaim=92696576i,vmalloc_chunk=0i,sreclaimable=449699840i,swap_cached=120016896i,inactive=3454795776i,wired=0i,high_free=0i,huge_pages_free=0i,mapped=933007360i,vmalloc_total=35184372087808i,dirty=614400i,huge_pages_total=0i,page_tables=39014400i,shared=887349248i,swap_total=8589930496i,write_back_tmp=0i,available=12021501952i,buffered=989892608i,active=7161757696i,high_total=0i 50000000000 - cpu,cpu=cpu-total,host=gianarb usage_softirq=0.40251572327036017,usage_guest_nice=0,usage_iowait=0.0754716981132104,usage_system=2.088050314465226,usage_idle=91.19496855345912,usage_nice=0,usage_irq=1.0062893081763293,usage_steal=0,usage_guest=0,usage_user=5.232704402510964 50000000000 - cpu,cpu=cpu3,host=gianarb usage_system=1.9114688128780644,usage_nice=0,usage_iowait=0,usage_guest_nice=0,usage_user=4.225352112677345,usage_idle=92.15291750504086,usage_irq=1.3078470824950963,usage_softirq=0.4024144869215021,usage_steal=0,usage_guest=0 50000000000 - cpu,cpu=cpu2,host=gianarb usage_guest=0,usage_guest_nice=0,usage_user=4.646464646464331,usage_system=2.0202020202021824,usage_nice=0,usage_iowait=0,usage_softirq=0.30303030303026995,usage_idle=92.1212121211883,usage_irq=0.9090909090908099,usage_steal=0 50000000000 - cpu,cpu=cpu1,host=gianarb usage_steal=0,usage_guest=0,usage_guest_nice=0,usage_user=6.432160804022921,usage_idle=89.94974874371933,usage_iowait=0.20100502512564486,usage_softirq=0.4020100502512897,usage_system=2.1105527638193067,usage_nice=0,usage_irq=0.9045226130652948 50000000000 - cpu,cpu=cpu0,host=gianarb usage_guest=0,usage_guest_nice=0,usage_system=2.206619859579557,usage_idle=90.5717151454471,usage_nice=0,usage_irq=1.1033099297897786,usage_steal=0,usage_user=5.717151454361589,usage_iowait=0,usage_softirq=0.40120361083223216 50000000000 - cpu,cpu=cpu-total,host=gianarb usage_iowait=0.12537612838516934,usage_irq=1.02808425275836,usage_softirq=0.37612838515565056,usage_guest=0,usage_user=4.864593781349303,usage_system=2.557673019055687,usage_idle=91.04814443328343,usage_nice=0,usage_steal=0,usage_guest_nice=0 60000000000 - cpu,cpu=cpu3,host=gianarb usage_nice=0,usage_steal=0,usage_guest=0,usage_guest_nice=0,usage_softirq=0.3968253968255333,usage_user=5.853174603177146,usage_system=3.075396825396861,usage_idle=89.28571428573105,usage_iowait=0.19841269841269618,usage_irq=1.190476190476459 60000000000 - cpu,cpu=cpu2,host=gianarb usage_idle=90.45226130652605,usage_iowait=0,usage_irq=1.00502512562801,usage_steal=0,usage_guest=0,usage_guest_nice=0,usage_user=5.527638190956198,usage_system=2.613065326632883,usage_nice=0,usage_softirq=0.4020100502512897 60000000000 - cpu,cpu=cpu1,host=gianarb usage_irq=1.004016064257059,usage_softirq=0.40160642570276656,usage_guest=0,usage_idle=92.06827309236002,usage_nice=0,usage_iowait=0.10040160642569164,usage_steal=0,usage_guest_nice=0,usage_user=3.9156626505969085,usage_system=2.5100401606427902 60000000000 - mem,host=gianarb used_percent=19.80092536373932,dirty=606208i,low_free=0i,low_total=0i,swap_free=8445227008i,total=16528056320i,buffered=989941760i,sunreclaim=92696576i,swap_cached=120016896i,swap_total=8589930496i,vmalloc_chunk=0i,available_percent=72.72972390258651,sreclaimable=449699840i,huge_page_size=2097152i,page_tables=39030784i,vmalloc_total=35184372087808i,vmalloc_used=38621184i,free=4951887872i,cached=7313518592i,mapped=933064704i,write_back=0i,committed_as=12566306816i,high_free=0i,huge_pages_total=0i,available=12020809728i,inactive=3454889984i,commit_limit=16853958656i,huge_pages_free=0i,shared=887414784i,active=7163142144i,slab=542396416i,high_total=0i,write_back_tmp=0i,used=3272708096i,wired=0i 60000000000 - cpu,cpu=cpu0,host=gianarb usage_user=4.233870967743414,usage_idle=92.1370967742039,usage_irq=0.9072580645166096,usage_softirq=0.40322580645131717,usage_steal=0,usage_guest=0,usage_system=2.0161290322588776,usage_nice=0,usage_iowait=0.30241935483870275,usage_guest_nice=0 60000000000 - cpu,cpu=cpu-total,host=gianarb usage_system=2.1646111250960742,usage_nice=0,usage_iowait=0.07550969041027993,usage_softirq=0.4027183488547309,usage_guest=0,usage_guest_nice=0,usage_user=5.008809463876584,usage_irq=1.031965768940087,usage_steal=0,usage_idle=91.31638560282546 70000000000 - cpu,cpu=cpu3,host=gianarb usage_user=5.8116232464903925,usage_system=2.3046092184363167,usage_irq=1.1022044088174183,usage_guest_nice=0,usage_idle=90.28056112219949,usage_nice=0,usage_iowait=0.10020040080162232,usage_softirq=0.4008016032062045,usage_steal=0,usage_guest=0 70000000000 - cpu,cpu=cpu2,host=gianarb usage_idle=91.54929577468191,usage_nice=0,usage_iowait=0.20120724346075106,usage_irq=0.9054325955735942,usage_softirq=0.4024144869215021,usage_guest=0,usage_user=4.8289738430545945,usage_system=2.112676056336385,usage_guest_nice=0,usage_steal=0 70000000000 - cpu,cpu=cpu1,host=gianarb usage_user=4.325955734408353,usage_system=2.2132796780681665,usage_idle=91.95171026157863,usage_nice=0,usage_softirq=0.40241448692149784,usage_steal=0,usage_guest=0,usage_iowait=0.10060362173033871,usage_irq=1.0060362173035302,usage_guest_nice=0 70000000000 - cpu,cpu=cpu0,host=gianarb usage_user=4.964539007090499,usage_nice=0,usage_guest=0,usage_irq=1.114488348531155,usage_softirq=0.40526849037569845,usage_steal=0,usage_guest_nice=0,usage_system=2.0263424518750366,usage_idle=91.48936170212531,usage_iowait=0 70000000000 - mem,host=gianarb total=16528056320i,used_percent=19.80218925101049,shared=887414784i,swap_total=8589930496i,low_free=0i,swap_free=8445227008i,available=12020600832i,slab=542396416i,commit_limit=16853958656i,dirty=131072i,huge_page_size=2097152i,buffered=989958144i,wired=0i,sunreclaim=92696576i,write_back=0i,used=3272916992i,cached=7313534976i,committed_as=12573659136i,high_total=0i,sreclaimable=449699840i,free=4951646208i,vmalloc_total=35184372087808i,active=7163576320i,huge_pages_total=0i,mapped=933154816i,vmalloc_used=38604800i,swap_cached=120016896i,write_back_tmp=0i,inactive=3454906368i,available_percent=72.72846001531533,high_free=0i,low_total=0i,page_tables=38998016i,huge_pages_free=0i,vmalloc_chunk=0i 70000000000 diff --git a/influxql/v1validation/goldenfiles/select_simple.yaml b/influxql/v1validation/goldenfiles/select_simple.yaml deleted file mode 100644 index f819e95eb63..00000000000 --- a/influxql/v1validation/goldenfiles/select_simple.yaml +++ /dev/null @@ -1,40 +0,0 @@ -tests: - - query: "select host, inactive from mem where time >=30000000000 AND time < 50000000000" - result: | - name,tags,time,host,inactive - mem,,30000000000,gianarb,3460194304 - mem,,40000000000,gianarb,3454791680 - -dataset: | - mem,host=gianarb page_tables=39534592i,vmalloc_chunk=0i,write_back_tmp=0i,dirty=884736i,high_total=0i,available=11992494080i,used=3284553728i,active=7172775936i,huge_pages_free=0i,swap_total=8589930496i,vmalloc_used=38604800i,free=4928421888i,commit_limit=16853958656i,committed_as=12584218624i,mapped=939278336i,vmalloc_total=35184372087808i,write_back=0i,buffered=989163520i,wired=0i,low_free=0i,huge_page_size=2097152i,swap_cached=120016896i,swap_free=8445227008i,inactive=3461185536i,slab=542363648i,high_free=0i,shared=903233536i,sreclaimable=449650688i,total=16528056320i,cached=7325917184i,available_percent=72.55840522208482,sunreclaim=92712960i,used_percent=19.87259520664557,huge_pages_total=0i,low_total=0i 0 - mem,host=gianarb slab=542392320i,shared=931356672i,vmalloc_used=38506496i,sunreclaim=92692480i,write_back=0i,total=16528056320i,commit_limit=16853958656i,huge_pages_free=0i,vmalloc_chunk=0i,active=7159382016i,huge_page_size=2097152i,swap_total=8589930496i,used=3266457600i,buffered=989843456i,wired=0i,high_free=0i,page_tables=38965248i,swap_cached=120016896i,write_back_tmp=0i,inactive=3454332928i,used_percent=19.7631078740177,committed_as=12570415104i,dirty=339968i,high_total=0i,huge_pages_total=0i,sreclaimable=449699840i,vmalloc_total=35184372087808i,free=4914728960i,cached=7357026304i,available_percent=72.49944859820032,low_free=0i,low_total=0i,swap_free=8445227008i,available=11982749696i,mapped=938016768i 20000000000 - cpu,cpu=cpu-total,host=gianarb usage_user=6.102117061021656,usage_system=2.938978829389874,usage_nice=0,usage_irq=1.0709838107097638,usage_guest=0,usage_idle=89.41469489414925,usage_iowait=0.024906600249037934,usage_softirq=0.4483188044832491,usage_steal=0,usage_guest_nice=0 30000000000 - cpu,cpu=cpu3,host=gianarb usage_user=5.711422845699912,usage_nice=0,usage_iowait=0,usage_irq=1.302605210421328,usage_steal=0,usage_guest=0,usage_guest_nice=0,usage_system=2.905811623247315,usage_idle=89.67935871744233,usage_softirq=0.4008016032064967 30000000000 - cpu,cpu=cpu2,host=gianarb usage_user=5.982053838488868,usage_idle=89.73080757727861,usage_irq=0.8973080757725367,usage_softirq=0.4985044865405501,usage_steal=0,usage_guest=0,usage_system=2.8913260219364374,usage_nice=0,usage_iowait=0,usage_guest_nice=0 30000000000 - cpu,cpu=cpu1,host=gianarb usage_iowait=0,usage_irq=1.0912698412698993,usage_softirq=0.4960317460319519,usage_guest_nice=0,usage_steal=0,usage_guest=0,usage_user=6.051587301585189,usage_system=3.075396825396861,usage_idle=89.28571428573105,usage_nice=0 30000000000 - cpu,cpu=cpu0,host=gianarb usage_user=6.560636182902656,usage_system=2.9821073558650495,usage_idle=88.86679920477891,usage_irq=1.0934393638174862,usage_steal=0,usage_nice=0,usage_iowait=0,usage_softirq=0.4970178926437982,usage_guest=0,usage_guest_nice=0 30000000000 - mem,host=gianarb used=3279171584i,wired=0i,committed_as=12718530560i,huge_pages_total=0i,active=7167565824i,swap_total=8589930496i,vmalloc_total=35184372087808i,available=11979239424i,cached=7348174848i,buffered=989859840i,commit_limit=16853958656i,low_free=0i,vmalloc_used=38834176i,total=16528056320i,inactive=3460194304i,available_percent=72.47821033562403,high_free=0i,huge_pages_free=0i,slab=542396416i,dirty=487424i,page_tables=39669760i,swap_free=8445227008i,vmalloc_chunk=0i,write_back=0i,free=4910850048i,huge_page_size=2097152i,low_total=0i,sunreclaim=92696576i,write_back_tmp=0i,used_percent=19.84003152283547,high_total=0i,mapped=938799104i,shared=922521600i,sreclaimable=449699840i,swap_cached=120016896i 30000000000 - cpu,cpu=cpu-total,host=gianarb usage_guest_nice=0,usage_user=5.682102628286863,usage_idle=89.88735919901404,usage_nice=0,usage_iowait=0.10012515644554432,usage_guest=0,usage_system=2.828535669587463,usage_irq=1.076345431789406,usage_softirq=0.4255319148935456,usage_steal=0 40000000000 - cpu,cpu=cpu3,host=gianarb usage_user=5.599999999992869,usage_steal=0,usage_guest_nice=0,usage_softirq=0.4999999999999318,usage_guest=0,usage_system=2.9999999999984537,usage_idle=89.39999999999075,usage_nice=0,usage_iowait=0.19999999999995852,usage_irq=1.2999999999994816 40000000000 - cpu,cpu=cpu2,host=gianarb usage_guest=0,usage_guest_nice=0,usage_user=5.682951146558077,usage_system=2.8913260219341703,usage_iowait=0.09970089730813834,usage_irq=0.8973080757728201,usage_softirq=0.39880358923227005,usage_idle=90.02991026919126,usage_nice=0,usage_steal=0 40000000000 - mem,host=gianarb free=4954177536i,huge_pages_total=0i,sreclaimable=449699840i,committed_as=12572475392i,dirty=532480i,swap_total=8589930496i,inactive=3454791680i,wired=0i,commit_limit=16853958656i,used_percent=19.788658222577983,vmalloc_chunk=0i,vmalloc_total=35184372087808i,used=3270680576i,cached=7313321984i,slab=542412800i,sunreclaim=92712960i,available=12022804480i,buffered=989876224i,active=7161057280i,huge_pages_free=0i,mapped=933064704i,available_percent=72.741792786921,high_total=0i,huge_page_size=2097152i,page_tables=38985728i,shared=887447552i,vmalloc_used=38621184i,write_back=0i,write_back_tmp=0i,total=16528056320i,high_free=0i,low_free=0i,low_total=0i,swap_cached=120016896i,swap_free=8445227008i 40000000000 - cpu,cpu=cpu1,host=gianarb usage_user=5.923694779116283,usage_system=2.6104417670674573,usage_idle=90.0602409638421,usage_nice=0,usage_steal=0,usage_guest_nice=0,usage_iowait=0.10040160642565496,usage_irq=0.9036144578311087,usage_softirq=0.40160642570261984,usage_guest=0 40000000000 - cpu,cpu=cpu0,host=gianarb usage_user=5.432595573437017,usage_system=2.816901408448739,usage_idle=90.34205231386129,usage_nice=0,usage_softirq=0.4024144869220654,usage_iowait=0,usage_irq=1.006036217302304,usage_steal=0,usage_guest=0,usage_guest_nice=0 40000000000 - mem,host=gianarb free=4952854528i,cached=7313227776i,low_free=0i,low_total=0i,swap_free=8445227008i,write_back=0i,total=16528056320i,slab=542396416i,commit_limit=16853958656i,huge_page_size=2097152i,used=3272081408i,available_percent=72.7339120780537,vmalloc_used=38621184i,used_percent=19.797133701925816,committed_as=12559798272i,sunreclaim=92696576i,vmalloc_chunk=0i,sreclaimable=449699840i,swap_cached=120016896i,inactive=3454795776i,wired=0i,high_free=0i,huge_pages_free=0i,mapped=933007360i,vmalloc_total=35184372087808i,dirty=614400i,huge_pages_total=0i,page_tables=39014400i,shared=887349248i,swap_total=8589930496i,write_back_tmp=0i,available=12021501952i,buffered=989892608i,active=7161757696i,high_total=0i 50000000000 - cpu,cpu=cpu-total,host=gianarb usage_softirq=0.40251572327036017,usage_guest_nice=0,usage_iowait=0.0754716981132104,usage_system=2.088050314465226,usage_idle=91.19496855345912,usage_nice=0,usage_irq=1.0062893081763293,usage_steal=0,usage_guest=0,usage_user=5.232704402510964 50000000000 - cpu,cpu=cpu3,host=gianarb usage_system=1.9114688128780644,usage_nice=0,usage_iowait=0,usage_guest_nice=0,usage_user=4.225352112677345,usage_idle=92.15291750504086,usage_irq=1.3078470824950963,usage_softirq=0.4024144869215021,usage_steal=0,usage_guest=0 50000000000 - cpu,cpu=cpu2,host=gianarb usage_guest=0,usage_guest_nice=0,usage_user=4.646464646464331,usage_system=2.0202020202021824,usage_nice=0,usage_iowait=0,usage_softirq=0.30303030303026995,usage_idle=92.1212121211883,usage_irq=0.9090909090908099,usage_steal=0 50000000000 - cpu,cpu=cpu1,host=gianarb usage_steal=0,usage_guest=0,usage_guest_nice=0,usage_user=6.432160804022921,usage_idle=89.94974874371933,usage_iowait=0.20100502512564486,usage_softirq=0.4020100502512897,usage_system=2.1105527638193067,usage_nice=0,usage_irq=0.9045226130652948 50000000000 - cpu,cpu=cpu0,host=gianarb usage_guest=0,usage_guest_nice=0,usage_system=2.206619859579557,usage_idle=90.5717151454471,usage_nice=0,usage_irq=1.1033099297897786,usage_steal=0,usage_user=5.717151454361589,usage_iowait=0,usage_softirq=0.40120361083223216 50000000000 - cpu,cpu=cpu-total,host=gianarb usage_iowait=0.12537612838516934,usage_irq=1.02808425275836,usage_softirq=0.37612838515565056,usage_guest=0,usage_user=4.864593781349303,usage_system=2.557673019055687,usage_idle=91.04814443328343,usage_nice=0,usage_steal=0,usage_guest_nice=0 60000000000 - cpu,cpu=cpu3,host=gianarb usage_nice=0,usage_steal=0,usage_guest=0,usage_guest_nice=0,usage_softirq=0.3968253968255333,usage_user=5.853174603177146,usage_system=3.075396825396861,usage_idle=89.28571428573105,usage_iowait=0.19841269841269618,usage_irq=1.190476190476459 60000000000 - cpu,cpu=cpu2,host=gianarb usage_idle=90.45226130652605,usage_iowait=0,usage_irq=1.00502512562801,usage_steal=0,usage_guest=0,usage_guest_nice=0,usage_user=5.527638190956198,usage_system=2.613065326632883,usage_nice=0,usage_softirq=0.4020100502512897 60000000000 - cpu,cpu=cpu1,host=gianarb usage_irq=1.004016064257059,usage_softirq=0.40160642570276656,usage_guest=0,usage_idle=92.06827309236002,usage_nice=0,usage_iowait=0.10040160642569164,usage_steal=0,usage_guest_nice=0,usage_user=3.9156626505969085,usage_system=2.5100401606427902 60000000000 - mem,host=gianarb used_percent=19.80092536373932,dirty=606208i,low_free=0i,low_total=0i,swap_free=8445227008i,total=16528056320i,buffered=989941760i,sunreclaim=92696576i,swap_cached=120016896i,swap_total=8589930496i,vmalloc_chunk=0i,available_percent=72.72972390258651,sreclaimable=449699840i,huge_page_size=2097152i,page_tables=39030784i,vmalloc_total=35184372087808i,vmalloc_used=38621184i,free=4951887872i,cached=7313518592i,mapped=933064704i,write_back=0i,committed_as=12566306816i,high_free=0i,huge_pages_total=0i,available=12020809728i,inactive=3454889984i,commit_limit=16853958656i,huge_pages_free=0i,shared=887414784i,active=7163142144i,slab=542396416i,high_total=0i,write_back_tmp=0i,used=3272708096i,wired=0i 60000000000 - cpu,cpu=cpu0,host=gianarb usage_user=4.233870967743414,usage_idle=92.1370967742039,usage_irq=0.9072580645166096,usage_softirq=0.40322580645131717,usage_steal=0,usage_guest=0,usage_system=2.0161290322588776,usage_nice=0,usage_iowait=0.30241935483870275,usage_guest_nice=0 60000000000 - cpu,cpu=cpu-total,host=gianarb usage_system=2.1646111250960742,usage_nice=0,usage_iowait=0.07550969041027993,usage_softirq=0.4027183488547309,usage_guest=0,usage_guest_nice=0,usage_user=5.008809463876584,usage_irq=1.031965768940087,usage_steal=0,usage_idle=91.31638560282546 70000000000 - cpu,cpu=cpu3,host=gianarb usage_user=5.8116232464903925,usage_system=2.3046092184363167,usage_irq=1.1022044088174183,usage_guest_nice=0,usage_idle=90.28056112219949,usage_nice=0,usage_iowait=0.10020040080162232,usage_softirq=0.4008016032062045,usage_steal=0,usage_guest=0 70000000000 - cpu,cpu=cpu2,host=gianarb usage_idle=91.54929577468191,usage_nice=0,usage_iowait=0.20120724346075106,usage_irq=0.9054325955735942,usage_softirq=0.4024144869215021,usage_guest=0,usage_user=4.8289738430545945,usage_system=2.112676056336385,usage_guest_nice=0,usage_steal=0 70000000000 - cpu,cpu=cpu1,host=gianarb usage_user=4.325955734408353,usage_system=2.2132796780681665,usage_idle=91.95171026157863,usage_nice=0,usage_softirq=0.40241448692149784,usage_steal=0,usage_guest=0,usage_iowait=0.10060362173033871,usage_irq=1.0060362173035302,usage_guest_nice=0 70000000000 - cpu,cpu=cpu0,host=gianarb usage_user=4.964539007090499,usage_nice=0,usage_guest=0,usage_irq=1.114488348531155,usage_softirq=0.40526849037569845,usage_steal=0,usage_guest_nice=0,usage_system=2.0263424518750366,usage_idle=91.48936170212531,usage_iowait=0 70000000000 - mem,host=gianarb total=16528056320i,used_percent=19.80218925101049,shared=887414784i,swap_total=8589930496i,low_free=0i,swap_free=8445227008i,available=12020600832i,slab=542396416i,commit_limit=16853958656i,dirty=131072i,huge_page_size=2097152i,buffered=989958144i,wired=0i,sunreclaim=92696576i,write_back=0i,used=3272916992i,cached=7313534976i,committed_as=12573659136i,high_total=0i,sreclaimable=449699840i,free=4951646208i,vmalloc_total=35184372087808i,active=7163576320i,huge_pages_total=0i,mapped=933154816i,vmalloc_used=38604800i,swap_cached=120016896i,write_back_tmp=0i,inactive=3454906368i,available_percent=72.72846001531533,high_free=0i,low_total=0i,page_tables=38998016i,huge_pages_free=0i,vmalloc_chunk=0i 70000000000 diff --git a/influxql/v1validation/goldenfiles/select_star.yaml b/influxql/v1validation/goldenfiles/select_star.yaml deleted file mode 100644 index da17b137c5b..00000000000 --- a/influxql/v1validation/goldenfiles/select_star.yaml +++ /dev/null @@ -1,14 +0,0 @@ -tests: - - query: "select * from m0 where time >= 30000000000 AND time < 50000000000" - billing: - point_count: 1 - result: | - name,tags,time,f0,f1,t0 - m0,,30000000000,0,10,tv0 - m0,,40000000000,1,20,tv1 - -dataset: | - m0,t0=tv0 f0=0i,f1=10i 30000000000 - m0,t0=tv1 f0=1i,f1=20i 40000000000 - m0,t0=tv1 f0=2i,f1=30i 70000000000 - m1,t0=tv0 f0=0i,f1=10i 40000000000 diff --git a/influxql/v1validation/validation.json b/influxql/v1validation/validation.json deleted file mode 100644 index 283e0ec382b..00000000000 --- a/influxql/v1validation/validation.json +++ /dev/null @@ -1,73 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "$id": "http://influxdata.com/idpe.schema.json", - "title": "InfluxQL Test", - "description": "An InfluxQL Test Schema", - "type": "object", - "required": ["tests"], - "additionalProperties": false, - "properties": { - "description": { - "description": "An optional description of the test suite", - "type": "string" - }, - "tests": { - "type": "array", - "items": {"$ref": "#/definitions/test"} - }, - "dataset": { - "description": "The dataset to be used for this test", - "type": "string" - }, - "generated": { - "description": "The dataset to be used for this test", - "type": "object", - "required": ["start", "end", "toml"], - "properties": { - "start": { - "description": "The start time in restricted RFC3339 or nanoseconds", - "type": "string", - "pattern": "^(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}Z|\\d+)$" - }, - "end": { - "description": "The end time in restricted RFC3339", - "type": "string", - "pattern": "^(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}Z|\\d+)$" - }, - "toml": { - "description": "The TOML schema for the dataset", - "type": "string" - } - } - } - }, - "oneOf": [ - {"required": ["dataset"]}, - {"required": ["generated"]} - ], - "definitions": { - "test": { - "type": "object", - "required": ["query", "result"], - "additionalProperties": false, - "properties": { - "name": { - "description": "A name for this test", - "type": "string" - }, - "description": { - "description": "An optional description of the test", - "type": "string" - }, - "query": { - "description": "The InfluxQL query to under test", - "type": "string" - }, - "result": { - "description": "The expected results in CSV format", - "type": "string" - } - } - } - } -} diff --git a/influxql/v1validation/validation_test.go b/influxql/v1validation/validation_test.go deleted file mode 100644 index c34a19bc65b..00000000000 --- a/influxql/v1validation/validation_test.go +++ /dev/null @@ -1,192 +0,0 @@ -package v1validation - -import ( - "context" - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "strconv" - "strings" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/cmd/influxd/launcher" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/mock" - datagen "github.com/influxdata/influxdb/v2/pkg/data/gen" - "github.com/influxdata/influxdb/v2/storage/reads" - "github.com/influxdata/influxdb/v2/tests" - "github.com/influxdata/influxdb/v2/tests/pipeline" - "github.com/stretchr/testify/assert" - "go.uber.org/zap/zapcore" - "gopkg.in/yaml.v2" -) - -var skipMap = map[string]string{ - // file_name_without_extension: skip_reason -} - -type GeneratedDataset struct { - Start string `yaml:"start"` - End string `yaml:"end"` - Toml string `yaml:"toml"` -} - -type TestSuite struct { - Tests []Test `yaml:"tests"` - Dataset string `yaml:"dataset"` // Line protocol OR - Generated *GeneratedDataset `yaml:"generated"` // TOML schema description -} - -type Test struct { - Name string `yaml:"name"` - Query string `yaml:"query"` - Result string `yaml:"result"` -} - -func TestGoldenFiles(t *testing.T) { - err := filepath.WalkDir("./goldenfiles", func(path string, info os.DirEntry, err error) error { - if info.IsDir() { - return nil - } - base := filepath.Base(path) - ext := filepath.Ext(base) - testName := strings.TrimSuffix(base, ext) - t.Run(testName, func(t *testing.T) { - if reason, ok := skipMap[testName]; ok { - t.Skip(reason) - } - gf := testSuiteFromPath(t, path) - validate(t, gf) - }) - return nil - }) - if err != nil { - t.Fatal(err) - } -} - -// Unmarshal a TestSuite from a YAML file -func testSuiteFromPath(t *testing.T, path string) *TestSuite { - f, err := os.Open(path) - if err != nil { - t.Fatal(err) - } - b, err := io.ReadAll(f) - if err != nil { - t.Fatal(err) - } - - gf := &TestSuite{} - err = yaml.Unmarshal(b, gf) - if err != nil { - t.Fatal(err) - } - return gf -} - -func validate(t *testing.T, gf *TestSuite) { - t.Helper() - ctx := context.Background() - p := tests.NewDefaultPipeline(t, func(o *launcher.InfluxdOpts) { - o.LogLevel = zapcore.ErrorLevel - }) - p.MustOpen() - defer p.MustClose() - orgID := p.DefaultOrgID - bucketID := p.DefaultBucketID - fx := pipeline.NewBaseFixture(t, p.Pipeline, orgID, bucketID) - - var dataset string - - if gf.Generated != nil { - spec, err := datagen.NewSpecFromToml(gf.Generated.Toml) - if err != nil { - t.Fatalf("error processing TOML: %v", err) - } - - tryParse := func(s string) (time.Time, error) { - if v, err := strconv.Atoi(s); err == nil { - return time.Unix(0, int64(v)), nil - } - - return time.Parse(time.RFC3339, s) - } - - start, err := tryParse(gf.Generated.Start) - if err != nil { - t.Fatalf("error parsing start: %v", err) - } - end, err := tryParse(gf.Generated.End) - if err != nil { - t.Fatalf("error parsing end: %v", err) - } - - if end.Before(start) { - t.Fatal("error: start must be before end") - } - - sg := datagen.NewSeriesGeneratorFromSpec(spec, datagen.TimeRange{ - Start: start, - End: end, - }) - - rs := mock.NewResultSetFromSeriesGenerator(sg, mock.WithGeneratorMaxValues(10000)) - var sb strings.Builder - if err := reads.ResultSetToLineProtocol(&sb, rs); err != nil { - t.Fatalf("error generating data: %v", err) - } - dataset = sb.String() - if len(dataset) == 0 { - t.Fatal("no data generated") - } - } else { - dataset = gf.Dataset - } - - if err := fx.Admin.WriteBatch(dataset); err != nil { - t.Fatal(err) - } - p.Flush() - - ctx = icontext.SetAuthorizer(ctx, tests.MakeAuthorization(p.DefaultOrgID, p.DefaultUserID, influxdb.OperPermissions())) - - if err := p.Launcher.DBRPMappingService().Create(ctx, &influxdb.DBRPMapping{ - Database: "mydb", - RetentionPolicy: "autogen", - Default: true, - OrganizationID: orgID, - BucketID: bucketID, - }); err != nil { - t.Fatal(err) - } - - for i := range gf.Tests { - test := &gf.Tests[i] - name := test.Name - if len(name) == 0 { - name = fmt.Sprintf("query_%02d", i) - } - t.Run(name, func(t *testing.T) { - err := fx.Admin.Client.Get("/query"). - QueryParams([2]string{"db", "mydb"}). - QueryParams([2]string{"q", test.Query}). - QueryParams([2]string{"epoch", "ns"}). - Header("Content-Type", "application/vnd.influxql"). - Header("Accept", "application/csv"). - RespFn(func(resp *http.Response) error { - b, err := io.ReadAll(resp.Body) - assert.NoError(t, err) - assert.Equal(t, test.Result, string(b)) - return nil - }). - Do(ctx) - if err != nil { - t.Fatal(err) - } - }) - } -} diff --git a/inmem/kv.go b/inmem/kv.go deleted file mode 100644 index 38800cc5f8b..00000000000 --- a/inmem/kv.go +++ /dev/null @@ -1,473 +0,0 @@ -package inmem - -import ( - "bytes" - "context" - "fmt" - "io" - "sync" - - "github.com/google/btree" - "github.com/influxdata/influxdb/v2/kv" -) - -// ensure *KVStore implement kv.SchemaStore interface -var _ kv.SchemaStore = (*KVStore)(nil) - -// cursorBatchSize is the size of a batch sent by a forward cursors -// tree iterator -const cursorBatchSize = 1000 - -// KVStore is an in memory btree backed kv.Store. -type KVStore struct { - mu sync.RWMutex - buckets map[string]*Bucket - ro map[string]*bucket -} - -// NewKVStore creates an instance of a KVStore. -func NewKVStore() *KVStore { - return &KVStore{ - buckets: map[string]*Bucket{}, - ro: map[string]*bucket{}, - } -} - -// View opens up a transaction with a read lock. -func (s *KVStore) View(ctx context.Context, fn func(kv.Tx) error) error { - s.mu.RLock() - defer s.mu.RUnlock() - - return fn(&Tx{ - kv: s, - writable: false, - ctx: ctx, - }) -} - -// Update opens up a transaction with a write lock. -func (s *KVStore) Update(ctx context.Context, fn func(kv.Tx) error) error { - s.mu.Lock() - defer s.mu.Unlock() - - return fn(&Tx{ - kv: s, - writable: true, - ctx: ctx, - }) -} - -// CreateBucket creates a bucket with the provided name if one -// does not exist. -func (s *KVStore) CreateBucket(ctx context.Context, name []byte) error { - s.mu.Lock() - defer s.mu.Unlock() - - _, ok := s.buckets[string(name)] - if !ok { - bkt := &Bucket{btree: btree.New(2)} - s.buckets[string(name)] = bkt - s.ro[string(name)] = &bucket{Bucket: bkt} - } - - return nil -} - -// DeleteBucket creates a bucket with the provided name if one -// does not exist. -func (s *KVStore) DeleteBucket(ctx context.Context, name []byte) error { - s.mu.Lock() - defer s.mu.Unlock() - - delete(s.buckets, string(name)) - - return nil -} - -func (s *KVStore) RLock() { - s.mu.RLock() -} - -func (s *KVStore) RUnlock() { - s.mu.RUnlock() -} - -func (s *KVStore) Backup(ctx context.Context, w io.Writer) error { - panic("not implemented") -} - -func (s *KVStore) Restore(ctx context.Context, r io.Reader) error { - panic("not implemented") -} - -// Flush removes all data from the buckets. Used for testing. -func (s *KVStore) Flush(ctx context.Context) { - s.mu.Lock() - defer s.mu.Unlock() - for _, b := range s.buckets { - b.btree.Clear(false) - } -} - -// Buckets returns the names of all buckets within inmem.KVStore. -func (s *KVStore) Buckets(ctx context.Context) [][]byte { - s.mu.RLock() - defer s.mu.RUnlock() - - buckets := make([][]byte, 0, len(s.buckets)) - for b := range s.buckets { - buckets = append(buckets, []byte(b)) - } - return buckets -} - -// Tx is an in memory transaction. -// TODO: make transactions actually transactional -type Tx struct { - kv *KVStore - writable bool - ctx context.Context -} - -// Context returns the context for the transaction. -func (t *Tx) Context() context.Context { - return t.ctx -} - -// WithContext sets the context for the transaction. -func (t *Tx) WithContext(ctx context.Context) { - t.ctx = ctx -} - -// Bucket retrieves the bucket at the provided key. -func (t *Tx) Bucket(b []byte) (kv.Bucket, error) { - bkt, ok := t.kv.buckets[string(b)] - if !ok { - return nil, fmt.Errorf("bucket %q: %w", string(b), kv.ErrBucketNotFound) - } - - if t.writable { - return bkt, nil - } - - return t.kv.ro[string(b)], nil -} - -// Bucket is a btree that implements kv.Bucket. -type Bucket struct { - mu sync.RWMutex - btree *btree.BTree -} - -type bucket struct { - kv.Bucket -} - -// Put wraps the put method of a kv bucket and ensures that the -// bucket is writable. -func (b *bucket) Put(_, _ []byte) error { - return kv.ErrTxNotWritable -} - -// Delete wraps the delete method of a kv bucket and ensures that the -// bucket is writable. -func (b *bucket) Delete(_ []byte) error { - return kv.ErrTxNotWritable -} - -type item struct { - key []byte - value []byte -} - -// Less is used to implement btree.Item. -func (i *item) Less(b btree.Item) bool { - j, ok := b.(*item) - if !ok { - return false - } - - return bytes.Compare(i.key, j.key) < 0 -} - -// Get retrieves the value at the provided key. -func (b *Bucket) Get(key []byte) ([]byte, error) { - b.mu.RLock() - defer b.mu.RUnlock() - - i := b.btree.Get(&item{key: key}) - - if i == nil { - return nil, kv.ErrKeyNotFound - } - - j, ok := i.(*item) - if !ok { - return nil, fmt.Errorf("error item is type %T not *item", i) - } - - return j.value, nil -} - -// Get retrieves a batch of values for the provided keys. -func (b *Bucket) GetBatch(keys ...[]byte) ([][]byte, error) { - b.mu.RLock() - defer b.mu.RUnlock() - - values := make([][]byte, len(keys)) - - for idx, key := range keys { - i := b.btree.Get(&item{key: key}) - - if i == nil { - // leave value as nil slice - continue - } - - j, ok := i.(*item) - if !ok { - return nil, fmt.Errorf("error item is type %T not *item", i) - } - - values[idx] = j.value - } - - return values, nil -} - -// Put sets the key value pair provided. -func (b *Bucket) Put(key []byte, value []byte) error { - b.mu.Lock() - defer b.mu.Unlock() - - _ = b.btree.ReplaceOrInsert(&item{key: key, value: value}) - return nil -} - -// Delete removes the key provided. -func (b *Bucket) Delete(key []byte) error { - b.mu.Lock() - defer b.mu.Unlock() - - _ = b.btree.Delete(&item{key: key}) - return nil -} - -// Cursor creates a static cursor from all entries in the database. -func (b *Bucket) Cursor(opts ...kv.CursorHint) (kv.Cursor, error) { - var o kv.CursorHints - for _, opt := range opts { - opt(&o) - } - - // TODO we should do this by using the Ascend/Descend methods that - // the btree provides. - pairs, err := b.getAll(&o) - if err != nil { - return nil, err - } - - return kv.NewStaticCursor(pairs), nil -} - -func (b *Bucket) getAll(o *kv.CursorHints) ([]kv.Pair, error) { - b.mu.RLock() - defer b.mu.RUnlock() - - fn := o.PredicateFn - - var pairs []kv.Pair - var err error - b.btree.Ascend(func(i btree.Item) bool { - j, ok := i.(*item) - if !ok { - err = fmt.Errorf("error item is type %T not *item", i) - return false - } - - if fn == nil || fn(j.key, j.value) { - pairs = append(pairs, kv.Pair{Key: j.key, Value: j.value}) - } - - return true - }) - - if err != nil { - return nil, err - } - - return pairs, nil -} - -type pair struct { - kv.Pair - err error -} - -// ForwardCursor returns a directional cursor which starts at the provided seeked key -func (b *Bucket) ForwardCursor(seek []byte, opts ...kv.CursorOption) (kv.ForwardCursor, error) { - config := kv.NewCursorConfig(opts...) - if config.Prefix != nil && !bytes.HasPrefix(seek, config.Prefix) { - return nil, fmt.Errorf("seek bytes %q not prefixed with %q: %w", string(seek), string(config.Prefix), kv.ErrSeekMissingPrefix) - } - - var ( - pairs = make(chan []pair) - stop = make(chan struct{}) - send = func(batch []pair) bool { - if len(batch) == 0 { - return true - } - - select { - case pairs <- batch: - return true - case <-stop: - return false - } - } - ) - - go func() { - defer close(pairs) - - var ( - batch []pair - fn = config.Hints.PredicateFn - iterate = b.ascend - skipFirst = config.SkipFirst - seen int - ) - - if config.Direction == kv.CursorDescending { - iterate = b.descend - if len(seek) == 0 { - if item, ok := b.btree.Max().(*item); ok { - seek = item.key - } - } - } - - b.mu.RLock() - iterate(seek, config, func(i btree.Item) bool { - select { - case <-stop: - // if signalled to stop then exit iteration - return false - default: - } - - // if skip first - if skipFirst { - skipFirst = false - return true - } - - // enforce limit - if config.Limit != nil && seen >= *config.Limit { - return false - } - - j, ok := i.(*item) - if !ok { - batch = append(batch, pair{err: fmt.Errorf("error item is type %T not *item", i)}) - - return false - } - - if config.Prefix != nil && !bytes.HasPrefix(j.key, config.Prefix) { - return false - } - - if fn == nil || fn(j.key, j.value) { - batch = append(batch, pair{Pair: kv.Pair{Key: j.key, Value: j.value}}) - seen++ - } - - if len(batch) < cursorBatchSize { - return true - } - - if send(batch) { - // batch flushed successfully so we can - // begin a new batch - batch = nil - - return true - } - - // we've been signalled to stop - return false - }) - b.mu.RUnlock() - - // send if any left in batch - send(batch) - }() - - return &ForwardCursor{pairs: pairs, stop: stop}, nil -} - -func (b *Bucket) ascend(seek []byte, config kv.CursorConfig, it btree.ItemIterator) { - b.btree.AscendGreaterOrEqual(&item{key: seek}, it) -} - -func (b *Bucket) descend(seek []byte, config kv.CursorConfig, it btree.ItemIterator) { - b.btree.DescendLessOrEqual(&item{key: seek}, it) -} - -// ForwardCursor is a kv.ForwardCursor which iterates over an in-memory btree -type ForwardCursor struct { - pairs <-chan []pair - - cur []pair - n int - - stop chan struct{} - closed bool - // error found during iteration - err error -} - -// Err returns a non-nil error when an error occurred during cursor iteration. -func (c *ForwardCursor) Err() error { - return c.err -} - -// Close releases the producing goroutines for the forward cursor. -// It blocks until the producing goroutine exits. -func (c *ForwardCursor) Close() error { - - if c.closed { - return nil - } - - close(c.stop) - - c.closed = true - - return nil -} - -// Next returns the next key/value pair in the cursor -func (c *ForwardCursor) Next() ([]byte, []byte) { - if c.err != nil || c.closed { - return nil, nil - } - - if c.n >= len(c.cur) { - var ok bool - c.cur, ok = <-c.pairs - if !ok { - return nil, nil - } - - c.n = 0 - } - - pair := c.cur[c.n] - c.err = pair.err - c.n++ - - return pair.Key, pair.Value -} diff --git a/inmem/kv_test.go b/inmem/kv_test.go deleted file mode 100644 index 79a9146df58..00000000000 --- a/inmem/kv_test.go +++ /dev/null @@ -1,247 +0,0 @@ -package inmem_test - -import ( - "bufio" - "context" - "fmt" - "math" - "os" - "reflect" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration" - platformtesting "github.com/influxdata/influxdb/v2/testing" -) - -func initKVStore(f platformtesting.KVStoreFields, t *testing.T) (kv.Store, func()) { - s := inmem.NewKVStore() - - mustCreateBucket(t, s, f.Bucket) - - err := s.Update(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket(f.Bucket) - if err != nil { - return err - } - - for _, p := range f.Pairs { - if err := b.Put(p.Key, p.Value); err != nil { - return err - } - } - - return nil - }) - if err != nil { - t.Fatalf("failed to put keys: %v", err) - } - return s, func() {} -} - -func TestKVStore(t *testing.T) { - platformtesting.KVStore(initKVStore, t) -} - -func TestKVStore_Buckets(t *testing.T) { - tests := []struct { - name string - buckets []string - want [][]byte - }{ - { - name: "single bucket is returned if only one bucket is added", - buckets: []string{"b1"}, - want: [][]byte{[]byte("b1")}, - }, - { - name: "multiple buckets are returned if multiple buckets added", - buckets: []string{"b1", "b2", "b3"}, - want: [][]byte{[]byte("b1"), []byte("b2"), []byte("b3")}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := inmem.NewKVStore() - for _, b := range tt.buckets { - mustCreateBucket(t, s, []byte(b)) - } - - got := s.Buckets(context.Background()) - sort.Slice(got, func(i, j int) bool { - return string(got[i]) < string(got[j]) - }) - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("KVStore.Buckets() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestKVStore_Bucket_CursorHintPredicate(t *testing.T) { - s := inmem.NewKVStore() - bucket := "urm" - mustCreateBucket(t, s, []byte(bucket)) - - fillBucket(t, s, bucket, 10) - - t.Run("filter by key", func(t *testing.T) { - _ = s.View(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket([]byte(bucket)) - if err != nil { - return err - } - - cur, _ := b.Cursor(kv.WithCursorHintPredicate(func(key, _ []byte) bool { - return len(key) < 32 || string(key[16:]) == "8d5dc900004589c3" - })) - - count := 0 - for k, _ := cur.First(); len(k) > 0; k, _ = cur.Next() { - count++ - } - - if exp, got := 1, count; got != exp { - t.Errorf("unexpected number of keys, -got/+exp\n%s", cmp.Diff(got, exp)) - } - - return nil - }) - }) - - t.Run("filter by value", func(t *testing.T) { - _ = s.View(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket([]byte(bucket)) - if err != nil { - return err - } - - cur, _ := b.Cursor(kv.WithCursorHintPredicate(func(_, val []byte) bool { - return len(val) < 32 || string(val[16:]) == "8d5dc900004589c3" - })) - - count := 0 - for k, _ := cur.First(); len(k) > 0; k, _ = cur.Next() { - count++ - } - - if exp, got := 1, count; got != exp { - t.Errorf("unexpected number of keys, -got/+exp\n%s", cmp.Diff(got, exp)) - } - - return nil - }) - }) -} - -func openCursor(t testing.TB, s *inmem.KVStore, bucket string, fn func(cur kv.Cursor), hints ...kv.CursorHint) { - t.Helper() - - _ = s.View(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket([]byte(bucket)) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - cur, err := b.Cursor(hints...) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if fn != nil { - fn(cur) - } - - return nil - }) -} - -func BenchmarkKVStore_Bucket_Cursor(b *testing.B) { - scanAll := func(cur kv.Cursor) { - for k, v := cur.First(); k != nil; k, v = cur.Next() { - _, _ = k, v - } - } - - searchKey := "629ffa00003dd2ce" - predicate := kv.CursorPredicateFunc(func(key, _ []byte) bool { - return len(key) < 32 || string(key[16:]) == searchKey - }) - - b.Run("16000 keys", func(b *testing.B) { - s := inmem.NewKVStore() - bucket := "urm" - mustCreateBucket(b, s, []byte(bucket)) - fillBucket(b, s, bucket, 0) - - b.Run("without hint", func(b *testing.B) { - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - openCursor(b, s, bucket, scanAll) - } - }) - - b.Run("with hint", func(b *testing.B) { - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - openCursor(b, s, bucket, scanAll, kv.WithCursorHintPredicate(predicate)) - } - }) - }) -} - -const sourceFile = "kvdata/keys.txt" - -func fillBucket(t testing.TB, s *inmem.KVStore, bucket string, lines int64) { - t.Helper() - err := s.Update(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket([]byte(bucket)) - if err != nil { - return err - } - - f, err := os.Open(sourceFile) - if err != nil { - return err - } - defer f.Close() - - if lines == 0 { - lines = int64(math.MaxInt64) - } - - scan := bufio.NewScanner(bufio.NewReader(f)) - for scan.Scan() { - var key []byte - key = append(key, scan.Bytes()...) - _ = b.Put(key, key) - lines-- - if lines <= 0 { - break - } - } - - return nil - }) - - if err != nil { - t.Fatalf("unexpected error: %v", err) - } -} - -func mustCreateBucket(t testing.TB, store kv.SchemaStore, bucket []byte) { - t.Helper() - - migrationName := fmt.Sprintf("create bucket %q", string(bucket)) - - if err := migration.CreateBuckets(migrationName, bucket).Up(context.Background(), store); err != nil { - t.Fatal(err) - } -} diff --git a/inmem/kvdata/keys.txt b/inmem/kvdata/keys.txt deleted file mode 100644 index 5db2e7fd4f3..00000000000 --- a/inmem/kvdata/keys.txt +++ /dev/null @@ -1,16087 +0,0 @@ -0000a700045660f0f70876000045660e -000528e1a2ecae405079c100b04bfbad -00057b0004795a80121bd0000047959e -000680000404c1c0f58a8a0000404c1b -00082c0004bf1580f3212b00004a3f0c -0008ea000458cb808d5dc900004589c3 -000fbf000463b6d0970de20000463b68 -0011d700049f7f700e312b000049f7e9 -001c95000454ade0d444f50000453d19 -001e82000488ec503e37ec0000488ec1 -002b010004000720f855520000400071 -002c31d47cbbcb40c12dbe00d04ccdd8 -00300d0003dd30e0629ffa00003dd2ce -00308e0003e2aa20f3ebbb00003e2aa1 -0030d4000483f6e0c511f700003957bf -00385f00043941f0f08a63000043941e -003cf80003d84990ff2f1700003d8498 -003d750003d516f09634f800003d2924 -003f590003d60a402fcf5900003d6099 -0040f415945dcaf0cd9b330080483647 -00425e00043d482028e4a3000043d481 -0044470003fce520834f21000038758a -0044470003fce520ea76c300003921cb -0045584f57b50850bfb2db00304993e5 -004a24000490cf9000a9240000490cc7 -004cbaca22468e90a894ac009048b549 -005093000450561034ccea0000450559 -00522fed31a18c10d76d240000497227 -005507683504c5c0524de2005048dbfb -0059d00004cdd170b181be00004cdd0e -0059e2000469ad70f79dc90000469ad6 -005b0a0004837d105a42c30000481bf9 -005b5b58088297f0780cae00804a93a4 -005ea10004b9dc507196a100004b9dc2 -00600f0003d01380f6e13500003d0137 -0073920004442730f844fe0000444272 -0074920003f88e80ff2fe900003f88e7 -00756e15896598c0fab9d700104a428a -007ca80004ba7a50c4c9c700004ba792 -007ff600043f66e0ff28a3000043f66d -0088a775b2cf95a04c7fdc00d04a2125 -00897b0004749f1041cf5e0000472f16 -008a13000421be20f4dc470000421be1 -0091fb0003d11a90e7620900003d119d -0093620003f2adf0a3a0fe00003f2ad6 -0093790004a2fe90360429000039346c -009b070d426235f0f5b64500d0474dfb -009eec0003ee6550f917fa00003ee654 -00a3600003d84f00f4ba7200003d84ef -00a7bf00045d8b40d570a7000045d456 -00a7cf000400ab10fefaba0000400ab0 -00b1eb00043311f0f8389a000043311e -00b28b00048373306eaa8b000048372a -00b7dc00043ee8a0ff3dd1000043ee89 -00b8c200048e5ac06c8ff5000048e0a0 -00c1d28a77e9fc703386ce00304c46b5 -00c31951c8400500b3a9a400004a9842 -00c3ac00047d19805da096000047cf8b -00c4ea00044e3920702420000044e34f -00cbed000499eaf005c6c30000498bb7 -00ce6814b80154b045810a00404c3e5b -00d1f400048f50004c4722000048f49d -00d6ec0003eb1df002135100003eb1da -00dc5d0004339360b8027e0000433928 -00e00f0003ff2010f40d8600003ff200 -00e36b0004852600433e960000485244 -00e6cc000399de6005ebe50000399de3 -00e6cc000399de60834f21000038758a -00ec86000423b500f597370000423b4f -00ef010003ff6750f9506b00003ff674 -00f4b500047bb2c03793d0000047b889 -010d5d0003c197b0834f21000038758a -010d5d0003c197b0faaf2600003c197a -010e820004359720f362630000435971 -01151f0003f3c4e0f740fe00003f3c4d -0116e14ea9f39c70da39de0060473dc8 -012189033a338f50eb223900a04ac086 -0122fe0003ae20b06c30820000393484 -01286b0004050130ffceba0000405012 -012c980003cd7ea0fa3acd00003cd7e9 -0133a8000499d88005c6c30000498bb7 -01352abbe5582dd05c25e200d048b784 -01377400047b9180271896000047b79f -013b630003d11a90e7620900003d119d -013c1c000484c4e0f593a20000484c4b -014bc61437afdab0141774003047a914 -014ec9a34121798044e25600404b11c9 -015c94fa1c4a9b60bbae8b000047c24e -0160f963a81e4b9018597b0020475026 -0165563f68db10d06a89d70080498909 -01679181e7ddf170b6d06900604b255e -017628000424d790f9f27e0000424d78 -0179230004694fb05bb1e20000468fd0 -017b460004852600433e960000485244 -017cb70003dc2230f43e5600003dc222 -017e2f0003f894d011449200003f8949 -017e450004765600b351d20000473e7e -017f9f71d06377906102e8001049df9f -01875b0004353c30f92e5e00004353c2 -0188130004b9e8002e1ece00004b9e7d -01886600044810004b1c7300004480fb -018a6f1af146397072fab200204c720d -0190dc60c503085065a96f00004ab3c2 -019227d7977202800f3e8800f04cd635 -0193f463fbb519602ec6a100d04bcd74 -01952a0004c99850e5431400004c9888 -019759b96e7e646049b8ae00e04a9de3 -019d52db6cad116060087600604be3b6 -01a22287f8c977b0c76c5c00104bb694 -01af140004c99850e5431400004c9888 -01b245000476ddc0a639e600003cf5b0 -01b64541e1e0fe5043fa5600404b33a9 -01b9140003fa4ab0f48e8f00003fa4aa -01b97b0004777f600471a100004776e6 -01bd36ec8b5d33c09ec5de000048146d -01bf8a81bddebe303e36ab00004b4845 -01c0850004734280427fac0000473423 -01c0930004551e20d5b36c00004551a4 -01c597c642ed9a3072a67300504c9ed5 -01d49dac54ca12e0a113dc00e04a2457 -01d523acc90f3970373da100e0479870 -01d5c70004ba9bd057927b00004ba97a -01d9310003bef040b1068700003b603a -01dc8d0003d8f2c0f6ae9200003d8f2b -01ded90003c67ca00045f600003c67ca -01ded90003c67ca0834f21000038758a -01e0d4000483f6e0c511f700003957bf -01e2070003e39250f7ef5100003e3924 -01e7010003fa1e90f5711700003d8f2b -01e708000443a2c027a0fe0000443a29 -01e7b5000423fdd0a622280000423fd4 -01ea740004abe2b09e9433000049f049 -01eb8c0298bafd1036f8b500b04768f6 -01f0280004ba2560e1059800004b385a -01fde400038ccc50fdd962000038ccc4 -01ffd400048e71b0a559a1000047663a -02028b0004734280427fac0000473423 -0205480004ccd460e2614800004ccca5 -020fa40004ac8040a7a5a400004ab782 -02100200042861c042f5db00004285ff -02173472d26597e07f1d240080495f91 -021845db4cebaab098131400c04cb550 -02190b0003eda4e08b693800003ec60c -021d7b0004749f1041cf5e0000472f16 -02200c0003e80e90f524a400003e80e8 -02273e0003924c40834f21000038758a -02273e0003924c40d33d18000038bd74 -022c14af0b906d0053505600b049ac85 -023aa715f38810105ca68b000047e874 -023ace0004bedce0b0f67b00004bc82f -023c7600045981a0f9ad230000459819 -023f620003dfa970fb100b00003dfa96 -02411aff2a233c10b4f63900b04ac8cf -0241720003e57a60fa82ec00003e57a5 -0250a300043dd21000f35b000043dd21 -02540c0003f785d0f7db6200003f785c -025dcf000429aa90f4a5db0000429aa8 -0264bcb863bfb6c0ec5f7600404c313f -0267740004815d205c2db90000480967 -026aec0003f2adf0a3a0fe00003f2ad6 -026bfa0003de25f0fae80d00003de25e -026f35f11c01c6f0fff90a00704c72d8 -0273170003d3a0a088f00b00003d1a35 -0276db000490bdc0cc1cc20000490ba4 -027c0c0003f63dc0f7cbbb00003f63db -027ebc0004c44ce0b8bebc00004c44b0 -0284e4a9d0db5db06fe42f00f047ec2f -028ad90003c7a6f0834f21000038758a -028ad90003c7a6f0deab9900003c7a6c -028de200044fd9e040e0a7000044fccc -028f4e0003f3ade0f8ebbb00003f3add -02904c00042cb680f7649a000042cb67 -02921900044ea5b0ad47bf000044e354 -02960c000405961007f6ac0000401bd9 -029f330004812340abf1f1000048121a -02a1157fbf5ee5e03282db00304900de -02a3210003dbf290f8398f00003dbf28 -02a8900004722400015e020000472240 -02a8960004735640360429000039346c -02ad9ce8358ad2308fc374001047502b -02afd0000473818063e1b900004736f3 -02b6f5dd2f2d301058942500204966be -02c543000404c1c0f58a8a0000404c1b -02cb550004b31f808c3e3900004b31d2 -02cf6c000454e230f69fbf0000454de5 -02d86b36584bd6f06caa4e00304850f7 -02d9a83f0541fa00927c2c00404c513f -02dcf09b9008496005cab200a04c7823 -02df79e67731a5c0d8a7a80000498a27 -02e6c5b317d5250093e42500c0496c23 -02eafc22b7497150e7abbe00104a1555 -02eb790004a2fe90360429000039346c -02ed640004916f7008a7a70000484bb8 -02f67b0004b3d0f095a6ab00004b3d0b -0305900003fe8de001c28a00003fe8de -0306480003d6f100018a5600003d6f10 -03182f000473bbc06407740000473bb9 -0318770003d9ee50fa058f00003d9ee4 -0328dc0004726a00834f21000038758a -0328dc0004726a00ea76c300003921cb -032d904c70cb3860155f6b001048da00 -032f69d83e4176c0c326d800404cbcc6 -0331720003efbe001672ec00003efbd9 -0334a70004c604a04f1c2c00004c6039 -03357a000403ba10fb71900000403ba0 -0336f52d56aed0a0b181be00c04cdd0e -03442c0004c604a04f1c2c00004c6039 -0344b70003d826f0fa409800003d826e -03479eda171d81b03e06190030490bf4 -0348ae0004a87ed0bd45d000004a87e8 -0349b900047e1ee02298b5000047e1e8 -0352fab782a193a00c4e6600404b90bc -0358950003fefd30f8a95200003fefd2 -035c5600049f0bc0d8d02500004974a5 -035d2c00046ed600fa282f000046ed5f -035f580003f76410f9f00d00003f7640 -03668a0004096e408ca2c200004096c5 -03671c8c21d3efb017d485004047553d -0368130004af92d02e7a7400004af822 -0368ba700407c5004119e300a0448a71 -036d38aabcb4569049b8ae00f04a9de3 -036d4600043a76608d5eba000040c009 -0375f125c06be5f0c5ac850090483327 -03797e0004430e90fb9b5300004430e8 -037b01b653286010b6f2ce00804bd096 -037c0b0003ec9f808b693800003ec60c -037ca300043cd6c074a7dc000043c96f -037d940003e54940023cd200003e5494 -037df80003f3c4e0f740fe00003f3c4d -0380130004ba1ed01a021d00004b9eb6 -03866300043e17c00d8ce8000043e078 -038e450004755130171c85000047550e -03916c809b813a30fb5f3300b0490c9f -0391dd0004716240fb035d0000471623 -0392ac00041379501af1520000413794 -0392ce0004c2dbe06caaa100004bc4a5 -03950b0003eb1da002135100003eb1da -03a5a8000488dd50ee535b000043f8c9 -03a96b088583ba5003413300904c49ad -03a9fe000423b500f597370000423b4f -03ac8e668928d710ea5da300c048dd18 -03ac95000421be20f4dc470000421be1 -03b17b00047718e0a3bc960000477152 -03c2450004829a10ba27ac000047715b -03c57e00044421e00ac3b0000044420f -03cfa6c8dc0113905fcae800404c35c8 -03d2b5000472c5e06e0f5e0000472c3d -03d7f00003d0d270fca7b200003d0d26 -03dfa4000473105076c55d0000472e89 -03dfd0000473818063e1b900004736f3 -03e1b598c13444d0fcee8b0040483792 -03ea24a3341d0560eb26bc00e04bf005 -03eb760004c5aec0ed153300004c5add -03efbb0003ddcbc066280b00003ddcb7 -03f68000046f2b00d8527c000046e202 -03fa450004813220b1068700003b603a -03fd697cfa194d602115ab00504c7446 -04076000046a129002c4f5000046a129 -0409211625a785108401b900f047a28f -040bac000482f77053b97b0000482f6f -040c357251780590c1d46900a04ba9a8 -041128ccdf4f9b403233d100204bfca6 -041680000412d7f0f7b20c0000412d7e -0419f700042ef4a0f88c75000042ef49 -0421de00047e1ee02298b5000047e1e8 -0422480003d4b09002beaa00003d4b09 -0425900004096cb08ca2c200004096c5 -04265f000443fb8002f4760000443fb8 -042fabc838ba6490c91a7400604adabe -042ff60004370540fb90500000437053 -0432ec0003f7b0f0235bbb00003f79ae -0433630003cf5ac0fa133900003cf5ab -0436cc0003ae20b06c30820000393484 -0436cc0003ae20b0834f21000038758a -04370100040994705d495200004098ba -0439330004bf1580f3212b00004a3f0c -043a1ff2af33193035d62c00a048e929 -043a28ad8670cb008f2d7b003047ea1a -043d7c55091413d03955d900b0473da4 -0440dc0004726a00ea76c300003921cb -0452dbaf16e28a308e008500a047333e -045b730004cd50d0af0a45000047d45c -045cf500044d5750958420000044d569 -0460f50004511910032de20000451191 -0463660003d10300fd1a5800003d102f -04670a0004000720f9cfcf0000400071 -0469686706c2cb80d16acb00804cc043 -046a720003d35950fd10fc00003d3594 -04713c0004aa68e04ea9c900004aa54e -0478560004a34090fbc3ed00004a22fb -047a280003e39250f7ef5100003e3924 -047ae20004305420fb3ee70000430541 -0482a50004c6dcf0e832190000496f76 -0486420003bf6500b1068700003b603a -04866b0003c67ca00045f600003c67ca -04866b0003c67ca0834f21000038758a -0495d700049b021054c36d000049b019 -04978c0004a296b08b1c10000043299a -049d29000424ecc0af80020000424eb8 -04a1873a4aff48b01983b300d04cc88f -04a90b0003e5717002f35100003e5717 -04af620003e3de60c41d9400003e3de2 -04b69b400c4549303155ae0060499ede -04b744299a218fa0c5fb7600504c48ec -04b7790004a40f10e3f7be00004a3e46 -04b8a80004b4b230b727a400004b4a02 -04c1380003f372d0c1ac0d00003f36ec -04c820000454a700038c760000454a70 -04ca35776dc17610fb747d00a04aa799 -04cbbf0004594e0003917000004594e0 -04cbf500048a2770ba876b000048956b -04cef800048e5ce018bb86000048cf34 -04d38c0004a34090fbc3ed00004a22fb -04d4a40003ee46d03b6cdb00003ecfdb -04d6e90003f3ade0f8ebbb00003f3add -04d7f3c27ef4b0f039f67b00504b9158 -04dd9b190503a240c44c72009049e444 -04decd9f8b117f300f3e8800304cd635 -04e5a5000423fdd0a622280000423fd4 -04e737a636a6f4d0ea2e240080496b69 -04e7460004865c006caa4e00004850f7 -04ec3cc0b815acd0c057c200b04a07af -04ef4e0003f785d0f7db6200003f785c -04f6460003ff6750fae68a00003ff674 -04f7550004b4b230b727a400004b4a02 -04f94600043ce300fd275b000043ce2f -04fc72000430c8f0b743110000430c68 -04ffb55489c500b00f3e8800b04cd635 -05028a00040a1000fd8190000040a0ff -05075e000472b640834f21000038758a -050b110003cbe4d003919f00003cbe4d -05100d0003f05e70307d7200003ee9b0 -0510a6b5d5322a00cd409600a047f619 -051a430003ee6550fa68db00003ee654 -051bf5000488c5f04edcac0000488bb1 -05200d0003eccde06d959400003eccdb -0522c3000498ebe0f1bb220000498eba -0523a40004b94ed0e9c02800004b94ea -05269f00043311f0f9d67e000043311e -052af800042cb680f7649a000042cb67 -052d5b000472cd601a4eb50000472cc6 -052ec7000489ddb0668e820000489dc9 -0531d1ec377f77404fa5d200d047a53b -0539a8000488dd50ee535b000043f8c9 -053da10004736530f3980d00003e80e8 -053ee90003f63dc0f7cbbb00003f63db -0541330004c5f9c0a4250a00004c5f91 -054564000498ebe0f1bb220000498eba -05497b00047718e0a3bc960000477152 -054a5c0003cd7ea0fb9e7200003cd7e9 -054d380003e555b003f0d200003e555b -054eaa0003d89d7003ee4800003d89d7 -0553b42e68488ba00135e300304ccc98 -0555c10004c5f9c0a4250a00004c5f91 -055663000472cd601a4eb50000472cc6 -0558db0003e3c9b0a0817200003e3c98 -055ed9e00a9d1230a03df4009048f753 -0567cf00042095f0fdbd52000042095e -056822f3c4d3c3e02ee6e800504c28b0 -056847000405961007f6ac0000401bd9 -056ab5000472c5e06e0f5e0000472c3d -056fcf0004107a4035b2ac000041079f -0573f1250c07f4006c1dd900d047c461 -057619000498d1106405d7000049516e -0577580003dbf290f8398f00003dbf28 -05780f000491f6408b10ac000048e072 -0578b500047f4ad02699d2000047f1e7 -057a2400048e5ce018bb86000048cf34 -057abb0004a8948080f14a00004a893a -057bdc00043e70e0e9f27200003cbbf8 -057c76000458cfc08d5dc900004589c3 -057dae00049b18f0f28805000049b18b -0584db0003e2f4207f53fa00003e2f16 -05866d2310ec76302aad9800f0484dac -058792000444d2b00427b00000444d2b -058db00003d9ee50fa058f00003d9ee4 -0598c2000491f6408b10ac000048e072 -059bf7eea69add608c2fd0006047a523 -05a83300049b18f0f28805000049b18b -05b1a100047c209037f602000046d41c -05b4770003d826f0fa409800003d826e -05bddd0004723d100449af00004723d1 -05c1dd0004096cb08ca2c200004096c5 -05c21d0004b9e8002e1ece00004b9e7d -05c27e00045318e0045495000045318e -05c57a000401f7301fef5100003e66e9 -05d47500043404e04b0f6600003d01b7 -05d4c10003f76410f9f00d00003f7640 -05d6aa0003d84990ff2f1700003d8498 -05d9cf000498fb902d09f400004967bd -05dc261a6970246035bfac0020483b86 -05e5f1000476f2a06cf09000003c9539 -05e66f8f8aeb27e0b3936b004048d69f -05e67b0004b95aa0b9366600004b9528 -05e7be0003d01380f8c3f000003d0137 -05ed430003fefd30f8a95200003fefd2 -05f1a518b26b1000e801a300904c2934 -05fb53fa52b937d022c9e900c048e29e -05fd1cfee463ab70ec2ba8009049aae9 -0605720003f7b0f0235bbb00003f79ae -06065000048f15e0f593a20000484c4b -0609bf8dce240ac08d4b0a00404823ad -060a0200047188a06eb280000046d3ee -060b63699aa750d063ef4200604ccb23 -060c5d00043314c0f8389a000043311e -0613200004714c00fd73c900004714bf -061a66000472e97076c55d0000472e89 -061b130004353c30fa917900004353c2 -06264e4ed8a0e8b08157be00704a46f1 -0626660004b20c801ce85c00004b0f40 -0627ac0004834e809534b50000476fa5 -062dba000424d790fb2d9a0000424d78 -06330100040673f02adc6b0000406706 -0635565a15478be025db080030445f86 -06378240670e60d079507600404bf3c5 -064129b74e0f4ed07f2b7300d04ce459 -064746000487304031cbf50000487301 -064fa10003dfa970fc517200003dfa96 -06542d0003f88e80ff2fe900003f88e7 -065b97001c0884b055da7e00004b05e6 -066736471fb4d990dccaf800e0491a55 -066dd27a167d696005cab200304c7823 -066dfb14a4c142f05fd25300204bf377 -066fbf000465b6206c4ca70000465b57 -067b5e000472e97076c55d0000472e89 -067f460004865c006caa4e00004850f7 -06882500048f15e0f593a20000484c4b -06934cd4b59838c001302f00104832f8 -0697af000431f34091a48900003d0c65 -069f5e000472b640834f21000038758a -06a4720004a5351007038c00004a1c02 -06a4c10003de25f0fc476200003de25e -06a85100048855c04f0337000048855a -06a96400049748b0834f21000038758a -06a96400049748b0dee5190000393434 -06a970000464a4d05a989300004649d9 -06a9fb00042ef4a0f88c75000042ef49 -06b2e90003e57a60fc300d00003e57a5 -06b6ba000421bf50f372ac0000421be1 -06bcea000461ad401ec1c90000461ace -06c3510004717760fdc5af0000471775 -06c7db785daf1ed0e9b7be00604a26b0 -06cdd6000423fdd0a622280000423fd4 -06d0a40003e2a82005740d00003e2a82 -06d0a40003ed4550d22aec00003ed447 -06db25000495d9b0fbb1f40000495d97 -06e18dc98d3b326086776d00b0499d49 -06e9720003f41e2018eb5100003f41dc -06e996000484a4b021aa7e0000459dc1 -06eeebf7a2622a1006942c00004c4267 -06f14600043f66e0ff28a3000043f66d -06f3d10004c4d3807181a300004c4cf1 -06f6460004000720f9cfcf0000400071 -06f7f5000486832090d5d9000047e6ac -06f8ea00045fff80058ca7000045fff8 -0700a70004c1adb01236a500004c1ab5 -070a6354e72118e0430db9008048484f -071425000495d9b0fbb1f40000495d97 -0717510003cf5ac0fa133900003cf5ab -071a2c6b96f403601b60a800304b9e1e -07214a0004a8ac901e750400004a8932 -07287600046b4140400170000046b40f -0729900004050130ffceba0000405012 -0729a30004c010906868a100004c00fb -07333f86e25a591068d3a8008049e906 -07396200039ae0d005baa3000039ae0d -07396200039ae0d0834f21000038758a -073c760004664be005f5c900004664be -073d43000412d7f0f7b20c0000412d7e -073e8c000430c8f0b743110000430c68 -073ef8000490bdc0cc1cc20000490ba4 -07416b0004c399f098590a00004c399c -074325000495450040cb22000049016a -0748e9000391f71005d2c30000391f71 -0748e9000391f710834f21000038758a -074fd10004be85603d0c2c00004be838 -074ff00003d0c62005da8800003d0c62 -07528f0003fa1e90f5711700003d8f2b -075665e405925830c6fe450060473eb8 -0762ac000412e1301eb2c20000412e02 -0762ef2effa9b29045810a00b04c3e5b -0766530004be85603d0c2c00004be838 -07682000045cce1054879c000045c2af -076a8f000400ab10fefaba0000400ab0 -076afe0003bb20b0834f21000038758a -076afe0003bb20b0c79b4500003927b9 -077a8e000447b99005f1e30000447b99 -0784560004717990781090000047063b -0789e20004b9e7b025865600004b61db -078ef20003ff6750fae68a00003ff674 -07a05c0004ace18066b5c700004acd19 -07a1e10fb51e2440788ca100104c7d6d -07a63e76eb1730400c3a4e0030485d8a -07a6920003d9e050a36eaa00003d9e01 -07a6ba000405961007f6ac0000401bd9 -07ad0fedf896a380d1921d00204bbba6 -07adc500043417c051607c000043416a -07b502fcf5a81c30f4d27e00304b95f6 -07b606a20c9270d0632cc400f048b88f -07baf2000403ba10fcd3010000403ba0 -07be92b90f0202102dddd20070473f1c -07c0201d095a8d90ecbc7d00104aa784 -07cf0100040cfc00067d7a000040cfc0 -07d01b6453fa285030868b0000474c56 -07d07fd978f5ecf0a8852400f04993e9 -07d705ba8dfde4c0d17abc00104be28e -07df548fd81e408013164e0020487b01 -07e7a800049a9d8044c3a8000049a983 -07e831452639ade026594800504cd66f -07eec200040994705d495200004098ba -07f3620003e41be006a00d00003e41be -07f55153c8b63ef008870a000047fb7b -07fbbcfa6262f59016d33700104864ba -07fd4600043f82b0069e5e000043f82b -07fe1fb0a0ff75c0ce5d5a00204a8980 -07ff60a3e6999b502ee6e800e04c28b0 -08017e0e214386d035951700204c4928 -08028b00047e7ac0413d96000047e779 -0806b20004c5aec0ed153300004c5add -0809b40003d0d270fe1f6300003d0d26 -080daf00043311f0f9d67e000043311e -0812e80003cd7ea0fb9e7200003cd7e9 -08150731acee1c906af48500c047546b -08189300045f3a40cd98f50000458a03 -081a3b00044950a0cfd476000044944b -081b5b00043ee8a0ff3dd1000043ee89 -081e070003ee6550fa68db00003ee654 -082169313c44a190360429002039346c -082169313c44a190834f21002038758a -0821be0e72215e5091947900a04cd33a -08242c0004c5d03041cf5e0000472f16 -082b0a000483f850d25d9600004802d4 -0831e200044fd86040e0a7000044fccc -0834520004cbf1602f7c5600004a569e -0836ac000415d8c006f6c20000415d8c -0848fec4741f7de0e4a9f10050481498 -085856f66f4f30d06cf09000f03c9539 -085856f66f4f30d0834f2100f038758a -085ace0004b436f0f013a400004b4364 -085c119c2dbcbc30a68e7b00704b224e -0862bb80a7ef5740ece59800604bb4ae -08676c000450561034ccea0000450559 -08692faab8f04990baac5c00004ba3a4 -0871b40003d10300fe84c000003d102f -0875c80003f509701963bb00003f506a -087c1267b9e6d4b0e26e1d00404b9d06 -08812400049670a058942500004966be -08820c00040673f02adc6b0000406706 -0883aa299adff2e0fec3a400004b57b6 -088972dcd92d4d103cd14800204cd203 -088aa82b2d579a50fb246f007048eedc -088ab200043314c0f8389a000043311e -089b990003c795202f72e200003c7943 -089b990003c79520834f21000038758a -08b123000464caf006fcea0000464caf -08b1b900047863008cf7740000478609 -08bd785252be8f90ef53f500104895b1 -08c1f70003d35950fe891700003d3594 -08c4012587970750c7663900204b34ac -08c80b0003d1d79007173d00003d1d79 -08cb0ded15fd1e00eb26bc00f04bf005 -08cea3000424d790fb2d9a0000424d78 -08d0a80004bb57d0e8145c00004bb578 -08d2740004bacee066e19800004baccf -08d6ec0003ed7d5006576200003ed7d5 -08dcc40004868d4062036b00004868c9 -08e9e200045015a052f63b00004500da -08f73d0003d6f100018a5600003d6f10 -08f9cc0003dfa970fc517200003dfa96 -08fc7600046a87d00749c9000046a87d -0901170003d3bcd09c782000003cb9b5 -090170000451ba607a2ea20000450d5d -0902908606d54ff0b149d2008047d094 -0903cf0004000a3068ce8f0000400026 -0905330004c604a04f1c2c00004c6039 -09057a0003fe8de001c28a00003fe8de -09058dfd2dc55d606c3fed009049d8b2 -091547472d7e321071c59400804a9037 -0928c7868643ef3045f16f00404aa8e0 -092a910003c26550308b2600003c1900 -092da50004353c30fa917900004353c2 -092f510003dd520073673f00003db744 -0935a6ed7b1a43a0501ece00b04c6c32 -093eaa0003d252e007d31700003d252e -093f620003eccde06d959400003eccdb -09451400040a1000fea2c2000040a0ff -09459a00042861c042f5db00004285ff -0947b5185800c3d07eaba400504aec4e -094bd624bad78d00da89ae00b049b5f8 -0953580003de25f0fc476200003de25e -095847000401bd9007f6ac0000401bd9 -0959380003e54940023cd200003e5494 -0959ce50d3335de01d4ba800f04a4ab9 -095ba40004b4cf90a67de200004b4cf1 -0960aa0004305420fcebaf0000430541 -096ddc1c7769de901d85c900e04a8601 -09754300042095f0ff15dd000042095e -0975948f7c67da80271896009047b79f -097737000488eed00101e20000488ee9 -097a450004734280427fac0000473423 -097d4600043dd21000f35b000043dd21 -098524000491f6408b10ac000048e072 -09862c5ec471c4a0a0c056007049fe96 -0988b7b25ee072d00b2bc200b04a3d8e -098c0c0003e57a60fc300d00003e57a5 -0999c900045fe360292e7e000045fe29 -09a6520003d01380f8c3f000003d0137 -09a6ac000401f7301fef5100003e66e9 -09a6ad0efeb529009ba48500e0483a09 -09ad7000046a17704dff60000046a172 -09b2050004370540fd0ab10000437053 -09b76b0003d84990003cf800003d8499 -09b9c000049b18f0f28805000049b18b -09c727fb78f70a00bf0b74008047cc6d -09d9980004bacee066e19800004baccf -09dac17a500d53e029464e0020486645 -09e42c0004c04bd02c16ab00004b224b -09e7630003d11ff00865fb00003d11ff -09eb170003d4b09002beaa00003d4b09 -09f55d000472aed0cea15b0000472ae0 -09f67b0004b54c20a0c6ab00004b5493 -09f8406e58962d50834f21004038758a -09f8406e58962d50e6078800403c031f -09fbe500039ec2c03d1162000039c575 -09fc7200049d3760df4dc0000049d247 -0a03bb0003eb1da002135100003eb1da -0a05f10004769860de3b74000047586e -0a07510003e3c9b0a0817200003e3c98 -0a0c9b16395ecce07181a300b04c4cf1 -0a14470003fa1e90f5711700003d8f2b -0a208c595fd23050da99d700f049f172 -0a20c26bf8805f807aaca800d04b777d -0a27200004722400015e020000472240 -0a2adb0004994fd0e331d7000049917a -0a2b84cd35cce080c34bbe00d04a01f0 -0a2d720003f88e8000749200003f88e8 -0a2d97251e5148f0e7c26e00604ce1c4 -0a3223640ab2f7b090c99800504b9f11 -0a373700042bd5f008cbb5000042bd5f -0a3aac00040994705d495200004098ba -0a3b390003b6054008a2fe00003b6054 -0a3b390003b60540834f21000038758a -0a3bbb0003e2f4207f53fa00003e2f16 -0a3f5e00047311d0aad14c0000473115 -0a49520004070890ab34470000407087 -0a4ef80004907c905029d700004906ee -0a54950004672c409649c900004672bb -0a5bae000403ba10fcd3010000403ba0 -0a5c330004a296b08b1c10000043299a -0a61f700039ae0d005baa3000039ae0d -0a61f700039ae0d0834f21000038758a -0a630a5d3cb71b80f5b9d700504a776b -0a6679b421028290bd2b33004047a404 -0a7219e049495f80834f2100f038758a -0a7219e049495f80b1068700f03b603a -0a761f66fe22e0808b10ac00d048e072 -0a8e5c8e81b40ae037e96b00704c2cc3 -0a8e6d4e8a1f28a0ff23ac00d047e731 -0a8e740004ad2720dad1d000004a90f3 -0a9b760004c04bd02c16ab00004b224b -0a9ed9000391f71005d2c30000391f71 -0a9ed9000391f710834f21000038758a -0aa96800044606600961ef0000446066 -0aae720003cbe4d003919f00003cbe4d -0aaece0004be10c0e78e7400004b2ab0 -0aaf250004958e203962c300004958b5 -0ab0cc0003d0090035d53500003d0069 -0ab92300046b7720c69123000046b75b -0abd5b000473105076c55d0000472e89 -0abf096adcb4e1806c91de0070483353 -0ac16e9a19916d10ff98850090478439 -0ac1d7601a68f340f83d0a00904c1500 -0ac3210003d0d270fe1f6300003d0d26 -0acbf60004360d30b52dc500004360c0 -0acc6f00048e5df0488e82000048d1be -0acc760004be10c0e78e7400004b2ab0 -0ad3c6000431f34091a48900003d0c65 -0ad68b06e3ccce9057242c00004c05da -0adbdc0004a0c650baa5c000004a0c4d -0ae9d700048e5ce018bb86000048cf34 -0af4713787fda9702ffb8c00b04a6914 -0af5170003d89d7003ee4800003d89d7 -0afb3f0003d9ed603ea7e50000395bd7 -0b01240004925fb0636b3300004923a3 -0b08ac00048855c04f0337000048855a -0b0e996c8c5fd0c08a842f00d04836fa -0b15b402581b60909e7b79004049f7b5 -0b1a6219fb6097a0055df1009047cd7d -0b26970003cfb1b0ea76c300003921cb -0b29ed9c3faceb60427fac00a0473423 -0b2b760004c81c602f7c5600004a569e -0b2c0d0003e555b003f0d200003e555b -0b2c660003d10300fe84c000003d102f -0b2d939a9180d1e01a0ab200b04c5ba6 -0b2eec0003e5717002f35100003e5717 -0b3303d09709ccc0d63dd20030484991 -0b36a8000405013001286b0000405013 -0b3a85f2ccb13c409af3a8007049232b -0b3b01000418f97032b7120000418e3e -0b3c2c0004c81c602f7c5600004a569e -0b3fc26abd62b2c09073dc00e04a33be -0b41dd00040af79009c57a000040af79 -0b42ca00042c708009d801000042c708 -0b47750004cdd170b181be00004cdd0e -0b4ac30004925fb0636b3300004923a3 -0b4edb000498ebe0f1bb220000498eba -0b51900004079600ddf9860000407186 -0b5a532c32982d108e48b50040481bbf -0b5d62000399de3005ebe50000399de3 -0b5d62000399de30834f21000038758a -0b628a000400524009fb010000400524 -0b62a10004b31ff021aa7e0000459dc1 -0b714a0004a8948080f14a00004a893a -0b775c0004a9e16049b8ae00004a9de3 -0b78d20003ddcbc066280b00003ddcb7 -0b7d85b9212f08407c8c7200604a11fa -0b8733e4d8db7c70d4ef0a0040484181 -0b8aca00042b2dd00a0bb5000042b2dd -0b8fdc000438b1900a18500000438b19 -0b91b00003d35950fe891700003d3594 -0b94fc0003d19b9058c18f00003d19b1 -0b950be2e15ef1c050ce0f00d04beb0a -0b9c83cb0569aab04d828a00004014de -0b9f510004719710328daf000047196d -0bb79c0004594e0003917000004594e0 -0bb9a596d3145fd0807a7300604c94f5 -0bc1d70004a1c080af0a45000047d45c -0bc5230004511910032de20000451191 -0bc5f098a74af8a0b45e0f00704be5ea -0bc7750004cc90a079dfb300004cc8f7 -0bd12300046a129002c4f5000046a129 -0bd28000040a1000fea2c2000040a0ff -0bd5be0004cc90a079dfb300004cc8f7 -0bd5d100043f1760247bdc000043f16c -0bdd73000443fb8002f4760000443fb8 -0bdd7b0004770ab078faac0000420143 -0be2fefe119ebcd0f01dd9003047a9a9 -0be9170004c457707e2d6b00004c3523 -0be9a30004c457707e2d6b00004c3523 -0bee7e00046a0320ed5570000046a02d -0bf80f000400ab1000a7cf0000400ab1 -0bfa96c727f999401d4ba800c04a4ab9 -0c0a716d9958e0d039893300a04c47e8 -0c14d9e968c69e702fcece00004c7af0 -0c151400042095f0ff15dd000042095e -0c17710004305420fcebaf0000430541 -0c199600047f64a0f2730a000047f566 -0c1bbe0004a18b10a6238c00004a1821 -0c1c770003d84990003cf800003d8499 -0c1f8c0004a18b10a6238c00004a1821 -0c20ea000454a700038c760000454a70 -0c22560004baf500232e7400004b9ae5 -0c2bdc00043c67100afff6000043c671 -0c37aab9f7646c40617d3c00f04aa610 -0c39ef00044420f00ac3b0000044420f -0c3c2f0004770ab078faac0000420143 -0c3c960004792c400721b9000047927d -0c3e480003d852d0f36c9800003d84ef -0c439c00045bbcb0834f21000038758a -0c47be0004a288603d6bc200004a2866 -0c4e01cc03e15d70f6367b00d04bd2d4 -0c51a10004781dc0529dd20000474fa7 -0c55ab0004c68d405075a300004c67f3 -0c58098644e075708130a100c04c1989 -0c654b0003cb8de0df65e600003cb8dd -0c678c00049f99600e312b000049f7e9 -0c6943df9e853d8069a93c00f04ab8ee -0c6fb03fd50feb30baa5c000504a0c4d -0c77be00049f99600e312b000049f7e9 -0c7ca70004c6f960dab7d100004c6f57 -0c7dc500043d4700fa2e63000043d46f -0c7e4500047f64a0f2730a000047f566 -0c7fd000047f2a40a8f774000047f2a1 -0c8d7900043719e0fb90500000437053 -0c98db0003f7b120235bbb00003f79ae -0c9b96778033a8c02818ac003048de42 -0c9cd20003e2a82005740d00003e2a82 -0ca3bd0003f88e8000749200003f88e8 -0ca4a70004c33b802aad0a00004be716 -0ca54f16e6cb3f700725f100c0474a08 -0ca95a0004a8a2807415d000004a8a08 -0caddb163358d19054c36d001049b019 -0cb011f6368d6a8098131400004cb550 -0cb2b90cb8951b50b6b54a00304aa8b2 -0cbaf20003fe8de003059000003fe8de -0cbbac0004cb36c082385e00004cb367 -0cbf460004886f706af485000047546b -0cc05e0004cb36c082385e00004cb367 -0ccf08000444d2b00427b00000444d2b -0cd1af000471b4300b2c900000471b43 -0cd2451860dcb4f09f75e200e048d350 -0cd40f0003d0c62005da8800003d0c62 -0cd645000481ac6066c8d40000481a9c -0cd7e0000434ab200b6a5e0000434ab2 -0cde740004b8c2a069963900004b835e -0ce9aeb6bc73e770a2bc7900a04ce878 -0ced7000045318e0045495000045318e -0cf02c0004c385f02ee6e800004c28b0 -0cf3f099c48fed70cd1d0a00a04c0538 -0cf5f0420d5dfa003101a300d04beb57 -0cf6e20003c7fd10308b2600003c1900 -0cf7ed0004a7c900cdd5f40000491b41 -0d07860004886f706af485000047546b -0d0bac0004c99850e5431400004c9888 -0d168c000431f34091a48900003d0c65 -0d1d7300044421e00ac3b0000044420f -0d214ce0d3310d70daeee800204bee3f -0d25de0004829a10ba27ac000047715b -0d2dd6000424ecc0af80020000424eb8 -0d37190004884e90d570a7000045d456 -0d382b0004370540fd0ab10000437053 -0d43dc0004365c30198f5b00004365bf -0d4a4c0003c175f00b9e9000003c175f -0d4a4c0003c175f0834f21000038758a -0d52020004713ae00bd1dd00004713ae -0d5245000479f0b05c8fac0000479ef7 -0d52c3000477a1604c85d20000477a12 -0d57bb0003f190f00c080b00003f190f -0d5a390003d6f10003064800003d6f10 -0d5ca10004c586a0e832190000496f76 -0d633300047bbb609534b50000476fa5 -0d68d20003f47510233fbb00003f45ec -0d6fdc0004a288603d6bc200004a2866 -0d7c960004792c400721b9000047927d -0d7e7e00044c38f00c2de2000044c38f -0d7f5100046f8f60c2575d000046f8f1 -0d8d550004907d80837a240000490792 -0d8e9d0003b6054008a2fe00003b6054 -0d8e9d0003b60540834f21000038758a -0d8f510004723d100449af00004723d1 -0d9319d1a24e1210d115c100804c53dd -0d94d1af66a586b005c6c30010498bb7 -0d9646000405013001286b0000405013 -0d9bdb1735119ed003413300d04c49ad -0d9c6b0004112790bebd86000041126f -0d9da25342d303a06775b90060474921 -0da60900039ec2c03d1162000039c575 -0da60900039ec2c0834f21000038758a -0dac9043cf049ca0a2776d00c049fe67 -0db4b73ce11ebef0f0dadb00a049431a -0dc0db0003e41be006a00d00003e41be -0dcb330004907d80837a240000490792 -0dccef7d7d9d01d064e41300304ba7d0 -0dcd480004cda790a90d1b00004cda6f -0dce3cdc2e9ea81039f67b00304b9158 -0dceb20004beeff0806aa500004beefc -0dd84700040cfc00067d7a000040cfc0 -0dd93fcc3bf205b01bcb0a00604746a7 -0dee280004256090af80020000424eb8 -0dee740004b31e108c3e3900004b31d2 -0dfa660004bd77903bffa400004bd776 -0e06eb232d2435e0ca03ca004047254f -0e0bed0004a7c900cdd5f40000491b41 -0e0e280003eb1da003950b00003eb1da -0e1cf50004664be005f5c900004664be -0e1d1f0003e54940037d9400003e5494 -0e2d3b000447b99005f1e30000447b99 -0e31260797a24d6084945c00904b2f02 -0e39012144051e609279f400f04980e1 -0e3c7600046ab4100cedc9000046ab41 -0e404feddf0b5030a753ca0050472ed6 -0e407600045fff80058ca7000045fff8 -0e4b83a7d60ac880834f21005038758a -0e4b83a7d60ac880d33d18005038bd74 -0e4bfa0003ddcbc066280b00003ddcb7 -0e4f0d82ab0cd7804ea9c900a04aa54e -0e4f20855435d1f02298b500d047e1e8 -0e54a6abb0134fe03341a100f0475898 -0e5aba000415d8c006f6c20000415d8c -0e64a70004bedab050ce0f00004beb0a -0e6cc200048e5d60746624000048dffd -0e6ee41c87964c70cd68850080473a76 -0e7bbf00044fdb3040e0a7000044fccc -0e7dd600042a2ae00d1401000042a2ae -0e80330004a34090fbc3ed00004a22fb -0e8407a40595a19013d07600504c7f66 -0e85f7000399de3005ebe50000399de3 -0e85f7000399de30834f21000038758a -0e889500043fa0500d46fd000043fa05 -0e8f18d222d017f02c178c00404a6388 -0e8f910004cd1fe0d16acb00004cc043 -0e947458a2913db0f1fc5c00d04bc32e -0e97740004837c90e148470000420966 -0e9df4000498877040c52400004987f3 -0e9eb7d34bed3dc0b591d000e04a886f -0ea5c90004680f900d627e00004680f9 -0ea6d00004aa6a50617d3c00004aa610 -0ea8c2000494e0908615240000494dd8 -0eaf51000471fb50a4aeb100004353de -0eb186000421bf50f372ac0000421be1 -0eb66e0004cda790a90d1b00004cda6f -0eb9a100047ba4305dddf1000047b798 -0ebab100043d31b00d5e63000043d31b -0ebb2200048ea170060124000048ea11 -0ebc710003d4b09004224800003d4b09 -0ebfdc0004a37d004c21d90000483955 -0ec20f0004c7eeb0e832190000496f76 -0ec2f800048f70c091a48900003d0c65 -0ec5940003ed7d5006576200003ed7d5 -0ec60f6c277a274058a6960010487fc1 -0ec7ea00042d4af082cc9a000042d488 -0ecaf8000498877040c52400004987f3 -0ecb3f0003d1d79007173d00003d1d79 -0ecef8000495d9b0fbb1f40000495d97 -0ed00f000494e0908615240000494dd8 -0ed39812f2e2cef025afa400d04b0fe6 -0ed4c58f6fa16a602dffc000804ce734 -0ed6300003cbe4d0050b1100003cbe4d -0ed675000436653015a67e0000433f4d -0edb0a00048015c081c1d9000048015a -0ee2b100043e07800d8ce8000043e078 -0ee405d345eda53046513300604c3fb0 -0ee4d7000400ab1000a7cf0000400ab1 -0ef9d10004365c30198f5b00004365bf -0efb6cb6e2e2ed8076a1960070483903 -0efe150003d89d70054eaa00003d89d7 -0efe720003d252e007d31700003d252e -0f04db445a5915f0db4774006047b57c -0f06c28422c36de078ba6600f04ac76a -0f0b190004884e90d570a7000045d456 -0f10a40003f5d4800d7cfe00003f5d48 -0f15980004b7ca4014aa7e00004b7c98 -0f230a6337462de0ae2ea500d04c1600 -0f267e0004232c8092999f00003cb97e -0f28b12d1f436cf0f53aa100704bc3fc -0f298600041b0fa0e6bd520000419c37 -0f2dc5000433fc700d6ab10000433fc7 -0f2e070003e555b0054d3800003e555b -0f2e37c6d404b70065835500604bb08d -0f2f9a720a061fb072648500f0483736 -0f3bae0003fe8de003059000003fe8de -0f406b000401bd9007f6ac0000401bd9 -0f496b0004c3dee0d9937600004c3dad -0f4a280003e5717004a90b00003e5717 -0f4faf00043273506cb6b70000432730 -0f55d100043f82b0069e5e000043f82b -0f5c224516eee9607448d400d0473d5c -0f660c0004082ee04b0f6600003d01b7 -0f6a560003da0e806b0c0b00003da0e4 -0f6d5100049d77705ff833000049d76f -0f74db0003ebe6a00e317200003ebe6a -0f7c500004392460c4e71700003d2163 -0f7ce738320453b0ecc93300404c1ea1 -0f7d772e55147e309f75e2002048d350 -0f82f33a5f9c37503386ce00c04c46b5 -0f84a70004c22840f83d0a00004c1500 -0f88e27699d9bdf075d11700804c627f -0f937821293c6ae0171c8500a047550e -0f9ff00003d11ff00865fb00003d11ff -0fa5df0df0d201704faf6b0030485b02 -0fab0100040e78f00e6190000040e78f -0fb33300047f4ad02699d2000047f1e7 -0fb3413bd1addcd0807a7300004c94f5 -0fb851000486832090d5d9000047e6ac -0fc7590003d1d73033673f00003d1d70 -0fcac4a8190eda10b727a400a04b4a02 -0fcf89a2d8174ce0c2c9ab00d04c306f -0fda35000448aaf00e98420000448aaf -0fdceb00048960a01198c40000489537 -0fdf94d34447e3c0790f0a006047a8e4 -0fe1699aea425300c7d8a100a04be7f6 -0fe9720003ecf42054d0db00003ec9e0 -0feb740004759180c025d90000475912 -0ff0250004925fb0636b3300004923a3 -0ff2cb0004ccb2d063ef4200004ccb23 -0ffc0f0003d018400ea08900003d0184 -10045600049e5f51b80fed000049e5f1 -10087c0004a10691f1efed00004a105b -1011c97f4548fef1badbf50040488755 -1011d200047e97a13f6096000047e952 -1012150003d6f10103064800003d6f10 -10138f85c12e038171160d0080495e83 -1013ac000483dd71a639e600003cf5b0 -102049a279a67d01cdcde2003048d7a0 -1025d200048346e1b6ab74000048346a -102d96000484906108737400004848f9 -102dafc674c603b19786b200104c21ef -102fd0000473c7612c7c850000473be2 -10300500049b1a61b097ed000049b1a0 -10387c000435af610edab10000435af6 -10389df6fa934d6147cfc2000049ee73 -103910024d81900150e9d900d047bb32 -10557b00047e97a13f6096000047e952 -105aab0004b4b231b727a400004b4a02 -10605100048d2841cca5a8000048d276 -10608c9dd978cbb1faa2ce00604b8ad0 -10661157d51e0a0179c1ae004049e00a -10679fc154eb3a51e8145c00604bb578 -106b260003c156810ee5a700003c1568 -106b260003c15681834f21000038758a -10729bb634af77b1c502cb00904cd404 -1079c4fa3e413241c001e300604ce44d -10832000046f0b31b40490000046ebe3 -108468000424ecc1af80020000424eb8 -10940a18528ad1f17f2b7300c04ce459 -1094a80004b71a11080a7e00004b7198 -1095079df9330d2101587c00a04a2d84 -109658556611fe01a1480f0040494924 -1096e80004a28a0109247200004a1805 -10980c0003e2a82106d0a400003e2a82 -10a20c000421bf51f372ac0000421be1 -10a8c6b2da1c7d81eb39ec00504c93aa -10a9e200046a87d10749c9000046a87d -10b0c200049621812eb7a80000495f89 -10b78c00049e5f51b80fed000049e5f1 -10bc8e0003eb1da103950b00003eb1da -10ca090003d121610f640c00003d1216 -10cbbe0003d0c621074ff000003d0c62 -10cc1b290a5827d178310a00604c4cdb -10d36d00049b1a61b097ed000049b1a0 -10d8459b2ac717711e3ca800c04b2509 -10d87c00049f7f710e312b000049f7e9 -10d9f80003e54941037d9400003e5494 -10daeefb361c435131cbf500c0487301 -10f00500049d83615ff833000049d76f -10fa960004862161e351a80000485c95 -10ffea00042bd5f108cbb5000042bd5f -1105e60004cd41f1c502cb00004cd404 -110bac0004732e8105ebe50000399de3 -1111d70004a28a0109247200004a1805 -111c11db4d619ab118bb86007048cf34 -111fdae5d4fb2a21f819c100d04c5679 -11242d3a38b27521e5e20f00704c4b22 -1124a218e977014100a9240090490cc7 -112604ba898a2db1d66f2200f0490972 -1128d944d52b0cf1ed43d100504c3925 -112a0c0004218a41bb7d860000421757 -112d365308c76d615a8bf50060487207 -1133330004991e1189320d00004991db -1143ac00047cbde1b1068700003b603a -1145e20004ac6b11de645c00004ac6aa -114999c35b3a10118584c40060486b40 -114f550004b317b116c3a400004b313f -114fcd0d22f46911be5b0200304a90d9 -11565280a1954d21a9233700d048b428 -115ed6085f086d0179dfb300104cc8f7 -11610600043311f10fd725000043311f -1161230004506141d913bf000045060d -1163ac00047717e1a3bc960000477152 -11641cc6fc76e12109247200404a1805 -11670800044606610961ef0000446066 -116876000464caf106fcea0000464caf -11692400049621812eb7a80000495f89 -116a0d000490bfa13e06190000490bf4 -116f430003cbe4d1050b1100003cbe4d -117586000400524109fb010000400524 -11784700040af79109c57a000040af79 -1179350004cd41f1c502cb00004cd404 -117a47556894c7115d0456006049e1a8 -1181fb0003d0b6e16e6ff000003d0b69 -1187c00004725ab1834f21000038758a -1187c00004725ab1ea76c300003921cb -118c2f477829ecc1a3cee800f049f7b5 -1193210003d89d71054eaa00003d89d7 -1195af00047071c10fe090000047071c -1197fcb84992c751916dde0070483c8b -119906000432ead1104a6d0000432ead -11a65f000445b081d4c8fe0000445b03 -11a78c0004a288318b1c10000043299a -11a85100048c9eb192f0eb000048c9da -11acbe0003c2b3f10fc1f600003c2b3f -11acbe0003c2b3f1834f21000038758a -11ad01206a0700d136bfc200b049e472 -11b1a3000487b04113164e0000487b01 -11b2530004be10c1e78e7400004b2ab0 -11b2ab0004b18af1864ea100004b16a0 -11ba7e0004494a21cfd476000044944b -11cd99ebb932d1712168ae00e04a899b -11d463ba73d04b712168ae00904a899b -11d5170004c7afb12fcece00004c7af0 -11d62e9fedbafb01f08d5500e0499941 -11d6b20004c079c196a4a100004c078d -11d6f7c39d2d6a71503abb00704ab21d -11dc2000046a0321ed5570000046a02d -11df0cedc4376c21b3a9a400804a9842 -11e0c10003e41be107f36200003e41be -11e0eb0004851c31e2df9200004450fe -11e33f0003d9990131e31700003d9967 -11e48ac35d5a73118fe3330050493fe3 -11f28000040cfc0107cf01000040cfc0 -11fa630004725fb1834f21000038758a -11fa630004725fb1ea76c300003921cb -11fba40004bdeaf187863900004bc9f9 -1200760004be85613d0c2c00004be838 -120847000401dd011082ac0000401dd0 -1209d90004815f21e4a9f10000481498 -1212592f87add321f3212b00a04a3f0c -121cb9e9e6bc57912ee6e800c04c28b0 -121d0f45853df5318f24ac000048e0c6 -12230463b9dab1115928d4001047c0af -12236b0003d4b09104224800003d4b09 -1223fc089901dc916caaa100204bc4a5 -1226560004b2e3915afdc700004b2e30 -1228f85ff0e08a712cf20f00804c4d37 -122db55953461c11b988a100604c091d -123780000473105176c55d0000472e89 -123df10004815f21e4a9f10000481498 -1245dd000471ea911101af0000471ea9 -1248dd6795d7c8d145810a00004c3e5b -1252aa0003d9fab1effb3d00003d9fa4 -125d9a00042b2dd10a0bb5000042b2dd -125eb70004325a31276f1100004324f1 -1260d20003efb541ecfd3800003e9715 -1263740004797881eb4da1000047950b -12688e0003e5717104a90b00003e5717 -126b4e0003e555b1054d3800003e555b -126c05dc31530d81e7e77600004c55b2 -126c91d27009d7a109e485007047a763 -1274a7000451b32126463b0000451aa9 -1280f4684ca619d18c2fd000c047a523 -1283f6000438b1910a18500000438b19 -128476000451c4919be49500004519a9 -128777acc09656714e96ab00504ac07a -128aac00040adf3110e6c2000040adf3 -128d184a4cd4a931e7e77600704c55b2 -12a29199898e532197850a00f04c64ba -12a82f00047e1ee12298b5000047e1e8 -12ac1fdcb053050134d7a400404b1f0d -12ae00a0f01000c19f38510000485f95 -12b3eeb75de3298165fd2400a0495db4 -12b61f0003f8949111449200003f8949 -12b6920003d9990131e31700003d9967 -12bfb6000415d8c10836ac0000415d8c -12c7ac0004732e8105ebe50000399de3 -12d02668632258712aad0a00c04be716 -12d2fdc49e45e941e8b496008047c062 -12d3660003d0df411145fb00003d0df4 -12d74e0003ed7d5108d6ec00003ed7d5 -12e51882be99c221b9979700a047279b -12e8dc000472cd611a4eb50000472cc6 -12ea630004364a1111aab100004364a1 -12ecbc0004caaf516a431200004c9a20 -12f096bff356c4c1cd1d0a00204c0538 -12f42f000471bbb1a94e980000471bab -12f5380003f50fb11174db00003f50fb -12f6341b27ac1ea12cfe3900004badd4 -12f8760004c6a20101587c00004a2d84 -1302ce0004c6a20101587c00004a2d84 -1307d10004c58571e832190000496f76 -130f760004c45851e2c10a00004c30bd -131593c60b10fcd105add700d04a1d92 -13163e0003c175f10b9e9000003c175f -13163e0003c175f1834f21000038758a -1318f1f133660b511234a100d04c57cd -131a560003d28f712f298f00003d28d4 -131c960004747531476ac300004746ce -1321630434750e7197abec000048af81 -1323760004c457717e2d6b00004c3523 -132772a2865ef69139c9e200604b23f4 -1327c00004725ab1ea76c300003921cb -1329720003dd6e5169118f00003dc0f6 -132a2f0003f8016111e02d00003f8016 -132c790003d1d79108c80b00003d1d79 -1333110003cfae11baf6cd00003cba6a -1333ac00047717e1a3bc960000477152 -1336db0004916f7108a7a70000484bb8 -1338a80004bacee166e19800004baccf -1338febba178ae513962c300804958b5 -13397d1f6fc7495181dec300504839c7 -133b510003f190f10c080b00003f190f -1341b018c69256218ecf5c00b04aa101 -1342b100043c67110afff6000043c671 -1346b20004c45851e2c10a00004c30bd -134a16000401bd910958470000401bd9 -134ebac8717bff211f915b0070472f1d -13511f0003e2a82106d0a400003e2a82 -1353f5000485fca121aa7e0000459dc1 -135621976ed388212fcece00804c7af0 -1356760003d252e1093eaa00003d252e -135a500003c1f7b1121a7600003c1f7b -135a500003c1f7b1834f21000038758a -136d6800044420f10ac3b0000044420f -136e630004725fb1ea76c300003921cb -136ece0004c1a3b1d2c786000048c057 -136fa40004bdeaf187863900004bc9f9 -1376520003d0c621074ff000003d0c62 -1376c30004934c915f12f80000490b5b -1377c60ef438f3013233d100404bfca6 -1379170004c81c612f7c5600004a569e -137a9e0003cb9ce16a7b3900003cb819 -1385dd0cfb0b2b01d496ce00304b5774 -1395df09ff21ffe14621a400404a9b38 -1397877be5647011762eb200c04c5bab -13997000045a15f1e849a50000423041 -139a870003b891b10f7e9d00003b891b -139a870003b891b1834f21000038758a -139b750003d11ff109e76300003d11ff -13a3740004770ab178faac0000420143 -13a4520004cae6a18dbbcc00004cae64 -13aac1da91b2299119334600f048746a -13abf600043e17c10d8ce8000043e078 -13b5d34a9f31fc1114c9d200a0476fa4 -13ba13b759b3e0b139c3a400b04b21d5 -13bb7098085651315c9ece00c04b8661 -13bfcf00041b8d41aa2990000041b8cc -13c7a4000472c4116e0f5e0000472c3d -13cc2c383df1df81e8b496006047c062 -13cfc90004724b214071af000047249e -13d351aedb697751b2c62400a0485360 -13d4eb0004851c31e2df9200004450fe -13d9c5000434ab210b6a5e0000434ab2 -13e9c10004c079c196a4a100004c078d -13f5750003d9e051a36eaa00003d9e01 -13fc95000467098173f0930000467069 -13fecd0003cba63111e64e00003cba63 -140067e95d16cc917cea7e00204b7758 -1402cb0004ccd411d7efb300004ccadb -1403f00003d00611856fd300003d005f -140cfe00039ed5713d1162000039c575 -140cfe00039ed571834f21000038758a -14125000048e5c91746f33000047d607 -1413b30004ccd411d7efb300004ccadb -1414f82c974c52c17061e200a0488bef -141a720003dae6c1e7bd8f00003dae5f -141b330004916e9108a7a70000484bb8 -141d5b000472f371a753ca0000472ed6 -141d6b0004c44b51b8bebc00004c44b0 -142495000453264112e0930000453264 -142663000472f371a753ca0000472ed6 -1428bb2c21b4fe017d32c300104957c2 -14315b000472c4116e0f5e0000472c3d -1438fec9a68244e150d2e800704a32a5 -143c98f32226dd31822ab500404728ad -1443e85f50a7eb616d4696006048b417 -1448228b898253b117b13500804732c0 -1449e7a86fdba4b1ae25a300204881af -14542500048e5c91746f33000047d607 -145b5a2b7634b411951bd100704bf552 -145f54000484b9e1ea43540000484b62 -1463370004866c2125cdb90000482893 -14640b0003e406214636ec00003e4060 -1465c90004664bb1ce24a70000444c41 -1465fb86725146e19bc5be00e04ce49f -146aed381bdb4b713c1c2800f04b0ab4 -146bd000047b9c319a897b000047b9b4 -1470200003cbc4e14f415900003cbc4b -14714300040cfc0107cf01000040cfc0 -1474e1dc500b9de19f8a1900a04907e0 -147924000494e0918615240000494dd8 -1479c0a226215681c973ac00a047cd26 -147a0c0003fa3fa1a3306b00003fa3e6 -147a3b00044c38f10c2de2000044c38f -147b7aaaeb3582c14b01d20000483ac6 -147b990003ca68514252d900003ca678 -147c0c0003e41be107f36200003e41be -147e6b0003c2b3f10fc1f600003c2b3f -147e6b0003c2b3f1834f21000038758a -1483660003d0d16115648900003d0c7c -14836f10a7ec5781ae5b2500304914f9 -1485330004c04bd12c16ab00004b224b -148f37000428659142f5db00004285ff -1495922593cbad31e24c2000a044f1e2 -1496a885dedcdf7100028b00104753a8 -149b432e4621c041cf6a450070473357 -149ebc0004c614814f1c2c00004c6039 -14a1cf000498877140c52400004987f3 -14a9ab0004c2ec21697ae800004c2250 -14b3200004713ae10bd1dd00004713ae -14b426000484b9e1ea43540000484b62 -14b696000488dc61ee535b000043f8c9 -14bc2f000471b4310b2c900000471b43 -14c753ff03dcf3e1ecbfd100f04c50e6 -14c8ca0004885d01a3dac700004885c9 -14d192734c95b4a15440d400f0473dec -14d39c00045981f1f9ad230000459819 -14d77000049b1a61b097ed000049b1a0 -14e0c00003d0c151136e5800003d0c15 -14e47600046147516278ea00004582c9 -14e5ae000499e58105c6c30000498bb7 -14f4470004193b1132b7120000418e3e -14f644f7a563d6b1f1efed00404a105b -14fc9a00042a2ae10d1401000042a2ae -14fea30003c156810ee5a700003c1568 -14fea30003c15681834f21000038758a -15018f81902d23810d651700104c61c0 -1503510003f5d4810d7cfe00003f5d48 -150cbb63c08069a174128200c048e03d -150f937927cf6c417e623900804b4c07 -15176bbc6ba16d0146247600804c2b0a -15217000045a8851139820000045a885 -15278c2d2d1d45d1d16acb00a04cc043 -152ace0004bc54b191e41300004bc548 -1531a5e0222931217808a700604c65d7 -1535f40004423e3165b79200004423a6 -1537420003f92a6113f79c00003f92a6 -1539c149cf5993b1caeace00004b7802 -153b6d0004a0de21adf00500004a0ddf -153e887a8b918381f1c68b00a047cefc -15417915505e3cc11993cc00104ca33d -1542f8000490c9e14524250000490c99 -1548714e71097841de645c00604ac6aa -154b760004c33b812aad0a00004be716 -154bee897c69800185d41300a04baf11 -1555be7fa3c38eb1801f6d004049aea2 -155895000415d8c10836ac0000415d8c -1559fb0003d018410ea08900003d0184 -155ad07dc2ff3511231abb00d04a949d -155bc16b7ec0b96168e78600d048b41e -155dfe00042bd5f10a3737000042bd5f -156224000490c9e14524250000490c99 -1562f100043fa0510d46fd000043fa05 -1563510004716311fb035d0000471623 -1563ae00040052410b628a0000400524 -1568a70004c0a8912c16ab00004b224b -1571e20004626471e8fe7e000046241f -1574e01fc1d6ffa113d07600204c7f66 -1579fc5f2330fd8116fa4500b047e185 -15821300040af7910b41dd000040af79 -1584330004a18b11a6238c00004a1821 -15879c00046b63b1d8ac20000046b628 -15888731c8eb7e51604b0a007047af27 -158bfa0003ebe6a10e317200003ebe6a -158ee16c6d7eeaa18494c200a049488b -1592480003d4488107173d00003d1d79 -159462634716bb21c5542800404b5651 -1594844eb227847150ce0f00a04beb0a -159d940003ee85617c140b00003ee7f2 -159e875fd3210121e351a800f0485c95 -15a2e90003ed7d5108d6ec00003ed7d5 -15a5e20004bc54b191e41300004bc548 -15a8a30004374cb1144eb100004374cb -15a9c500043d31b10d5e63000043d31b -15abe90003d0d16115648900003d0c7c -15ae390004bb3db1617fa400004bb3d7 -15aea69b3639661165835500204bb08d -15b1b900047f64a1f2730a000047f566 -15b64600042e4ab1322a58000042e4a3 -15b696000488dc61ee535b000043f8c9 -15b9350003d0b6e16e6ff000003d0b69 -15bdde000483d3412963330000483ccc -15bff00003d0b6e16e6ff000003d0b69 -15d65e000433fc710d6ab10000433fc7 -15da6f00042e8c116e894e000042e8a6 -15db790004a288613d6bc200004a2866 -15e02f0004701fc13595af00004701ec -15e613000401bd910958470000401bd9 -15e68200048d2841cca5a8000048d276 -15e6c200040e78f10e6190000040e78f -15f1020003c1f7b1121a7600003c1f7b -15f1020003c1f7b1834f21000038758a -15f3bf0004680f910d627e00004680f9 -15f6c300049621812eb7a80000495f89 -15f8ed6fc68f08a1356c0f007049422c -15fbdb9cef058cd11c7dde006048345d -1600560004a113f1a2e55100004a1120 -16008afe10ea6ab18bdcb50030477127 -160ac200040dead1e1a047000040dea6 -160c6a40512fb8a154592a00404ca5ed -1619d3c3c8778c715a301f00504a7e5f -161eba0003fe26e1834f21000038758a -161eba0003fe26e1d33d18000038bd74 -161f9cb063f487c13233d100704bfca6 -1624fe0003f70ea114d59400003f70ea -16255d000472e97176c55d0000472e89 -162a270003d252e1093eaa00003d252e -162b41dd4a8c82f198131400204cb550 -162c200003cbc33114bf1100003cbc33 -162dc0d15368ec21ba27ac00f047715b -16316acc9bc8d6a1000bd000b047b877 -1632bc0004c614814f1c2c00004c6039 -163ea10004ae2f21cad19400004aaa26 -16412300044d5751958420000044d569 -1643e9de688aaa41f83d0a00b04c1500 -164bdc0004a15cd1f20fc200004a1567 -1650d400047e97a13f6096000047e952 -1653be0004a15cd1f20fc200004a1567 -1655780004482ce114d36700004482ce -1657210003d11ff109e76300003d11ff -1658a40003e549a1023cd200003e5494 -16692b0004a02731c64b8e000043fa11 -166acd9fefef6e31d2e4b50000473785 -166e7c0003ca68514252d900003ca678 -166e7c0003ca6851834f21000038758a -166e8b00047d5ab17ce5d2000047d58b -1670130004b9f17190c99800004b9f11 -167058cb38aac5316caa4e00104850f7 -1673600003d1d79108c80b00003d1d79 -1675fb0003d0d16115648900003d0c7c -16787c0004a0de21adf00500004a0ddf -167995f5e1ce02012b635e0090472ad5 -1679c10004c385f12ee6e800004c28b0 -167bae341a1a87b17628a800904ae686 -1680e042ad0bd5914992c3009047d520 -16828a00041b8d41aa2990000041b8cc -1683a80004932b81d92425000049325b -16849e744b517ed113f4a800a04ba11d -1684fe0003e1a871151d9400003e1a87 -16882f00047180f1152720000047180f -16915100049f99610e312b000049f7e9 -16a2a300042b2dd10b8aca000042b2dd -16a4a700044515e187235300004450b9 -16a502deb917cc31f0485c00f04b25b9 -16a73300047aa541b351d20000473e7e -16aa7e00042bcae1e148470000420966 -16abc8562f11e2515e32ab00b04bd23c -16aca80004b910a10c4e6600004b90bc -16b4a80004ac07f14e96ab00004ac07a -16b7059bc7838b8156a9e200d04aceb3 -16b96f0004a9e16149b8ae00004a9de3 -16bbdc0004356db11560e800004356db -16c1e30004cda791a90d1b00004cda6f -16cc672922adad21936a8b00e0475cf2 -16d0c00003d0c7c115648900003d0c7c -16d329f075f4388183ddd700704a761b -16d3660003d121610f640c00003d1216 -16d39ee1793bfb9186fab1004043539b -16d40f000498bd01ea76c300003921cb -16d924b9a42de32181a71700204aa0fe -16ddba00042c70810b42ca000042c708 -16ea0a233516068125cdb900f0482893 -16edad6ce3eecca1569c7d00b04a925e -16ee67809555982195111700604c56c8 -16f40cd5966a8931bebbd100d04c189c -16f7cdcd358c70f1ed153300c04c5add -16f887000448aaf10e98420000448aaf -16fa05000438b1910b8fdc0000438b19 -16ff620003f7b191235bbb00003f79ae -170a27000433f4d115a67e0000433f4d -170c05000499ead105c6c30000498bb7 -170f58abd8986341e3928200b0486558 -1723e0000435af610edab10000435af6 -17247dace68faa41a5481300e04b170c -1727a07065b3ae513806450070477943 -172be624d4312b716e31e200304bb304 -172fa54fb490782187617b00c04745ad -17379fe61b0da611e148470040420966 -17388975aacef731c2fbbe00f049e759 -173d0f71f4f5f89166d70a00f047fcfc -1744330004a288318b1c10000043299a -1745de0004747531476ac300004746ce -174b510003f0b9819facc000003d12d9 -174dde00047cf8e15da096000047cf8b -1750200004517a8116247600004517a8 -17520c0004216ea115e98600004216ea -17567c00047163d1160e98000047163d -1762630004392461c4e71700003d2163 -176c183668fcc701cdd5f40080491b41 -176c342af74823e16f0645000047c4c2 -177df2000396012115a8020000396012 -177df20003960121834f21000038758a -177ea30003b891b10f7e9d00003b891b -177ea30003b891b1834f21000038758a -1782f569750005710b719800c04bbda0 -1783fb0030c18e81fccfc00000472591 -17849500044c852144e67e000044c81e -1786430003f190f10d57bb00003f190f -178a995be99beee1a229d800704ca072 -178b8c0b5ea439d1c11964000048f4f2 -1791720003efbd911672ec00003efbd9 -179b78a02721af315d1dab00d04c4268 -179fbb0003e2e2a16c30820000393484 -17a1d200047cf8e15da096000047cf8b -17a4e93d575ed211f5878c00e04ab4f1 -17ab55aab99a8d216b3d4a00204aa5f7 -17b02c0004c67f815075a300004c67f3 -17b44c579079f47175d11700204c627f -17b9f37b8489bd214e0f1700d04a9224 -17cdc9000459dd0121aa7e0000459dc1 -17d10a0004c7eeb1e832190000496f76 -17d2cd0003cfae11baf6cd00003cba6a -17d5750003d5ded141e24800003d5dec -17e2127637dc298134047c00004a2970 -17e3590003d19b9158c18f00003d19b1 -17e54a0004aa24c1c7371700004aa246 -17ed2c00046f02a16ad0d7000046efab -17fcfb00042bd5f10a3737000042bd5f -180a8a00040f64a186cd1700003dc131 -180cdaaa2d4c96b13e450a00904c4775 -1812d499ae5adb3136c77500904ce2ec -1818d3c8d8fc0101448c5c00604b2345 -181ecc0003b4c8b13ea7e50000395bd7 -18200f00040052410b628a0000400524 -1822f800048e5c91746f33000047d607 -1825a30004c1a3b1d2c786000048c057 -1826c27ef1fa6021a33a45007047d0fd -1830a7000456bc3144ae7e0000456bb0 -18340b0003ef5ce145640d00003ef5ca -1834fe0003ebb281a2676200003ebb24 -183773dfec60cb21db99e200a04ac1e6 -183be258f70ae0f174dd6b00304c4344 -183c0f0004907d81837a240000490792 -18416f0004aa24c1c7371700004aa246 -18454300040af7910b41dd000040af79 -184e9600048b42616d4696000048b417 -1854ca00048b42616d4696000048b417 -1854db8b5991af5179538c000049cf1b -1856190003c7fcd13200d700003c1900 -18686b0004478a4181f17e0000445e35 -186e8000046ec671a06a80000046ec63 -186f13000434ab210cd7e00000434ab2 -186f3300047d74c17ba1f1000047d739 -1872d6b86481b891413d96003047e779 -187429d464230b41f6d2ce00604c2f6d -18746b000401dd011082ac0000401dd0 -1875db000422eaa11725d60000422eaa -1882090003d0df411145fb00003d0df4 -18889500045c2cb154879c000045c2af -188e1b0003910901171f890000391090 -188e1b0003910901834f21000038758a -1892ac0004007d3116fec200004007d3 -18989500045a24611b58ea0000459ef4 -18a118000432ead1104a6d0000432ead -18a4b900043311f10fd725000043311f -18a62800042c6a21d901d6000042c4e1 -18a6e8773867f7e14abc2c00104c4aac -18a96cf52e94de9149243300f049fd89 -18ad9d62e0689f117f8ed200a04ccf2e -18b40b0003e261c117399400003e261c -18b40d0003f50fb11174db00003f50fb -18c4250004908661eecc0f0000490838 -18c6e42c8b40ce8124f7dc00704a27d2 -18c98f0003d4d76117457500003d4d76 -18cec30004736571f3980d00003e80e8 -18d00b0003e15de1f2a76200003dc222 -18d46b000446d0b165b4000000446cf4 -18d60d0004974ee18ec76c0000456c64 -18e02f0003f8949111449200003f8949 -18e1e68f09b42d71eecc0f00c0490838 -18e4910003f8016111e02d00003f8016 -18e5528f97a1042128e02f0080480d14 -18fa8c0004325a31276f1100004324f1 -18fb26d3f2915421c2a05600a04a227d -18fc9600047a8aa1a0e733000047a8a4 -18fdc10004c68d415075a300004c67f3 -18fea50004bec6d1d5a1a300004beb72 -18ff590003dae84125727200003dae70 -18ff620003efb541ecfd3800003e9715 -1900fe0003e406214636ec00003e4060 -19050a0004c1adb11236a500004c1ab5 -190664648112ca21e93696000048ca75 -191b010004218a41bb7d860000421757 -19302f00047071c10fe090000047071c -1931590003cfae11baf6cd00003cba6a -19342747c1f4ce212cfe3900304badd4 -193c5600049a33312019cf000048ebce -1940bd382e39d3916fef7600c04c2c24 -1941ef000445135187235300004450b9 -1942ba00040adf3110e6c2000040adf3 -1944c6b876d3739108a7a70020484bb8 -1946760003c1f801121a7600003c1f7b -1946760003c1f801834f21000038758a -1948560004a15df1f9b7be00004a15d7 -194aaddaa26b5a219e7b7900a049f7b5 -194c8600042b2dd10b8aca000042b2dd -1950a90003d018410ffc0f00003d0184 -195cd8084f1b6eb1636b3300004923a3 -1960e90a77ce4121ebc56b00004c40f4 -1963550004bae7b1256eab00004b9a08 -1965cc0003ebe6a10f74db00003ebe6a -1966a300042a2ae10e7dd6000042a2ae -196a920003d9806119e65600003d9800 -196e070003f5d4810f10a400003f5d48 -196f51000471ea911101af0000471ea9 -1970820004cdc481ae29d000004cce2b -1979c70004b34e81c7663900004b34ac -197cbd93d64e34f14c4722005048f49d -197f3f0003d5f88118398f00003d5f88 -19825e0b101e1221e3730a00e048407f -19874f0003cab7a11836a900003cab7a -198b0a00047a8aa1a0e733000047a8a4 -1990a70004c6a20101587c00004a2d84 -19938c0004a15df1f9b7be00004a15d7 -1993ec97ac5065011983b300804cc88f -1997cf00040587f107f6ac0000401bd9 -199d406895a657c1ff0ec30060474e5d -19a1ec1ae98b9dd186d9d200d0481960 -19a84c00042c70810b42ca000042c708 -19ad970004488ea118704200004488ea -19b7530004425be11e747600004425b9 -19b91b0004cc90a179dfb300004cc8f7 -19b97875595df6416e9bac006047d3ed -19b97b000483d3412963330000483ccc -19baec0003e406214636ec00003e4060 -19bcc00003d0070135d53500003d0069 -19ccf79b40776291ec5f7600304c313f -19cd750003d193e169ff3d00003d1938 -19d02000045a8261183c76000045a826 -19e56400048f6741e3730a000048407f -19f22400048f3b315da096000047cf8b -19f3a7e4e7afdb21c7577400f0476983 -19ff2200048f9b81e148470000420966 -1a045fa02bfac7e13793d0001047b889 -1a067c000471bbb1a94e980000471bab -1a07ac0004815f21e4a9f10000481498 -1a085f000438b1910b8fdc0000438b19 -1a0c130004b31ff121aa7e0000459dc1 -1a0cd700040e78f10fab01000040e78f -1a15940003e15de1f2a76200003dc222 -1a1e144bd6d879d11d2a7300104cb575 -1a2555000496edb12d09f400004967bd -1a25d200047ec881b1068700003b603a -1a29f80003f190f10d57bb00003f190f -1a36ab0004b540d126c4ca00004886ff -1a3af9f10c422da1f0dfd0009047f07e -1a42740e2e7609919e9433008049f049 -1a48598ac438422194042c00804c24f9 -1a4e9d000433fc710f2dc50000433fc7 -1a550b0003f41dc118eb5100003f41dc -1a65cf00048e6041ea09f4000048e5fd -1a674521bfb2cfc1dcd25300404c471c -1a7022beac1301e1ca03ca005047254f -1a75d700048e6041ea09f4000048e5fd -1a76a10004b540d126c4ca00004886ff -1a7e8b00047aa541b351d20000473e7e -1a821c8ede58e5816eaa8b00d048372a -1a875c0004ab1a11bfeed000004ab19c -1a8fc5e8d98aa9c11e1dab00804c50ba -1a93dc0004a0c651baa5c000004a0c4d -1a940d0003efb541ecfd3800003e9715 -1a9847f0651e8ca1e84c2800504b947b -1a9b170003d87521dea11700003d86c2 -1a9ea50004c45851e2c10a00004c30bd -1aa4660003d1216110ca0900003d1216 -1aa7a53201810b11b5836000d044fd24 -1ab0dcb485351b2147e0a700404c6e26 -1ab1509c9eb35ec1927c2c00a04c513f -1ab5b900047e23d1aacdd9000047e23b -1ab6c200040708c1ab34470000407087 -1aba630004365bf1198f5b00004365bf -1abc720004a105e1f1efed00004a105b -1ac00b0003f506a11963bb00003f506a -1acefe000396012115a8020000396012 -1acefe0003960121834f21000038758a -1ad1e2000456c4b1198bbf0000456c4b -1ad4980003cf5ce1f83da500003cf5ab -1ad4b500047a9151a0e733000047a8a4 -1ad4d20003f41e2118eb5100003f41dc -1ad5af0004724b214071af000047249e -1ada4ee2355f73c17429ae00d04a32c5 -1add480004cd41f1c502cb00004cd404 -1add720003e282f119780b00003e282f -1ae1bd0004432a11d615f4000044329f -1ae2aa0003d351616f9a4800003d350e -1ae308000445c381199def0000445c38 -1ae55211a4fdbf81e2af0a0060482c44 -1aef46000487b04113164e0000487b01 -1af179000437f5b11990e80000437f5b -1afe847f9636d0c1e8167b00c04b0270 -1b00116bbb84a521accfbb00503e5555 -1b05a1aa6848ca413903ca0020472f62 -1b05af000471e781199c900000471e78 -1b0774000480c331c025d90000475912 -1b085b5981105201bb3322002048f88f -1b0f80000472c4116e0f5e0000472c3d -1b10a90003f92a6113f79c00003f92a6 -1b1db2000440c5f119ca300000440c5f -1b21a30004c399f198590a00004c399c -1b2b4f325f58cfd16775b90010474921 -1b2cbf00038e0dc119bdac000038e0dc -1b33590003d5ded141e24800003d5dec -1b34d20003e55d3119317200003e55d3 -1b3573d2cee23e21697ae800404c2250 -1b38a51a8e045771a6cd5a00104a86c9 -1b3b170003d9800119e65600003d9800 -1b406800042230311a00020000422303 -1b42240004878bf1ae40fe00003ed13c -1b48200003cbba2119fb1100003cbba2 -1b4f0f3026af66e148cfd100e04c0a89 -1b4fc20004a28a0109247200004a1805 -1b536b00048c38d121aa7e0000459dc1 -1b54ea00044a0a9179d76c000044a09e -1b5ac20004218a41bb7d860000421757 -1b6f660003d0c151136e5800003d0c15 -1b70d700046f4e7119e75d000046f4e7 -1b775e0003910901171f890000391090 -1b775e0003910901834f21000038758a -1b7bfa0003e8232119e0a400003e8232 -1b7c5f000435af6110387c0000435af6 -1b7da5000434ab210cd7e00000434ab2 -1b7e7d0004325a31276f1100004324f1 -1b8a390004b4b6a1b727a400004b4a02 -1b91c70004b0fa31666ace00004b0f98 -1b99bf73fd9a0f41104a8b0070481ace -1b9b69537bf57aa13e41f1004047c5a0 -1ba6630004343c818403dc000043438c -1bad750003d19b9158c18f00003d19b1 -1bb8f50004500cc162cd2300004500c6 -1bbc20000453264112e0930000453264 -1bbcef0003ebe6a10f74db00003ebe6a -1bbeb8c52409e801134c7200004a4515 -1bc27e000455e181834f21000038758a -1bc27e000455e181b1068700003b603a -1bcb860004866c2125cdb90000482893 -1bd0980003cbc33114bf1100003cbc33 -1bdd380003f70ea114d59400003f70ea -1bde740004bb57d1e8145c00004bb578 -1bede17baf40aa71e78e7400f04b2ab0 -1bef6000046c6d61834f21000038758a -1bf018f53dd327a16a5a0f00d04c58e1 -1bf0441536453a21f969f100b047caa9 -1bf95d0003c03371834f21000038758a -1bf95d0003c03371e6078800003c031f -1bffda0003d018410ffc0f00003d0184 -1bffe2bc492de511814ba8000049acb0 -1c08a700044a3f611ac67e000044a3f6 -1c08d700042a2ae10e7dd6000042a2ae -1c092c17495d37f15d1dab00304c4268 -1c095200040836f11a9ddd000040836f -1c123000043fd791e87abc000043fd66 -1c13c4a3c87c2ad171d5e300104cdd95 -1c14690004bbc3a14f599800004bbbf3 -1c15940003eca9f199399400003eca98 -1c203300049f5e21ada151000049f5c5 -1c2361d4c50696f1c4b03300e04a2d1f -1c2d70000454e231f69fbf0000454de5 -1c324500047b9c319a897b000047b9b4 -1c36720003d5ded141e24800003d5dec -1c3b6d0004a0a9a1e5207c00004a0a95 -1c3c0118e1a7ac51adedd700f049fbbd -1c3de60003cba63111e64e00003cba63 -1c41f80003f5d4810f10a400003f5d48 -1c44c00003d029516cf09000003c9539 -1c4905d98cbbb8d16cf1d800504c8b72 -1c4cd20003f4f7d11af7fa00003f4f7d -1c4ddd0003fa3f81a3306b00003fa3e6 -1c51bd0004451621d7f56800004450b2 -1c54b9cd2df554e1cdd2e800804c546f -1c59c31dfb5d2ba18157be00c04a46f1 -1c5eeb00043311f1116106000043311f -1c62f27cd9fecb311cff1400704c9e0a -1c639c00045a8851139820000045a885 -1c668a00041379411af1520000413794 -1c66b2aa304be8218a1e7400204b0a79 -1c76090003d0c7c115648900003d0c7c -1c779562c19b5561cb53ac005047f62b -1c78e80004374cb1144eb100004374cb -1c799f79bcbe564110e737001048809b -1c7d380003e1a871151d9400003e1a87 -1c80660003d0df4112d36600003d0df4 -1c857900043ae1111b43e0000043ae11 -1c89c56358336321568f7600704c51c1 -1c8b27b3e8a8c7912b161900a0496da0 -1c8d7000045b8981e47970000045b893 -1c8f4d0eb4321b21e8eff5001048b4ff -1c912300045b95f1bf7820000045b948 -1c949300044b07011b476c000044b070 -1c9495000451b34126463b0000451aa9 -1c94c2000491ad716d99d2000047e172 -1c95e60003cbaee186369e00003cbada -1c96480003d1d73133673f00003d1d70 -1c9a0d8fe2dec2211cc9e600b04ce364 -1ca7b600040e78f10fab01000040e78f -1cac870004478a4181f17e0000445e35 -1cad510004a2b7d1f83da500003cf5ab -1cb0db0003f77f716edbfa00003f77e8 -1cb1750003d2b821be2a4800003d2b80 -1cc616000401dd011208470000401dd0 -1ccd9000040995315d495200004098ba -1cce390004b4b6a1b727a400004b4a02 -1cd05000043c55b13eabc3000042eef5 -1cd1e3a2e8d31821db4774005047b57c -1cde7e0004500e9174dc7600004500d8 -1ce51f0003f50fb112f53800003f50fb -1ce7e50003b4c8213ea7e50000395bd7 -1ce92f0003f8949112b61f00003f8949 -1cf816000433fc710f2dc50000433fc7 -1cf8a80003924c31834f21000038758a -1cf8a80003924c31d33d18000038bd74 -1cfd23000459ef411b58ea0000459ef4 -1cfe9d00039b6e4105baa3000039ae0d -1cfe9d00039b6e41834f21000038758a -1d1095000464d371834f21000038758a -1d12000003f80161132a2f00003f8016 -1d149300044969311ccf6c0000449693 -1d18a700044fdc4140e0a7000044fccc -1d1b74000480c331c025d90000475912 -1d1f328e8ef769d17c3ba400004b6245 -1d22d41598d89f21a80f0200204a8b1c -1d26cd000432ead11199060000432ead -1d2850d3ad49fd41927c2c00404c513f -1d30910003d1216110ca0900003d1216 -1d37fa0003f0d9f1accfbb00003e5555 -1d381d64d2441d41dc71d000004aa942 -1d397a00041182914a546b000040eae8 -1d3dfd1ff6c8bb814f599800a04bbbf3 -1d4933dd9d240c2124f7dc00b04a27d2 -1d49be0003a7fc911bd4fe00003a7fc9 -1d49be0003a7fc91834f21000038758a -1d53dc0004a0be512d19240000496371 -1d54232b19e86a415317be00d04a6b8d -1d5624ccc4b288115dce7e00604b2387 -1d566d00043315c10fd725000043311f -1d588500048476b1c883740000484745 -1d5f550004b910a10c4e6600004b90bc -1d67d0000473c371514e8b0000473c2c -1d6edb00048f6741e3730a000048407f -1d712300046b63b1d8ac20000046b628 -1d7624b56f6df0f13233d100104bfca6 -1d7c379f37107c11231abb00704a949d -1d8005b81a0f0e21310b3300004846cb -1d87f1762fb6cac14dcfac003047a914 -1d89c70004bc54b191e41300004bc548 -1d956400048f9b81e148470000420966 -1d95d2000474c58130868b0000474c56 -1d98ba29363b11613b51de0050481c3a -1d994600043d47b11a665e000043d47b -1da2a10004b067e1cfee3900004b047e -1da46b0003fca6019288a400003e19e6 -1da7140004cafa11b81a4500004caf85 -1daa9e24678094217e15a100d04758af -1db49a000433f4d115a67e0000433f4d -1dcae58c72d935816595e200c04af2a7 -1dcfcf0003fe0af19288a400003e19e6 -1dddb3e13dc405c1834f2100e038758a -1dddb3e13dc405c1ea76c300e03921cb -1de13100040adf31128aac000040adf3 -1dea7d0b27173c41fe696b00704c01e5 -1deab10004356db11560e800004356db -1df4730004482ce114d36700004482ce -1df97a0004216ea115e98600004216ea -1e0c2b0004364a1112ea6300004364a1 -1e17620003e31e317f53fa00003e2f16 -1e22b20004bf41b179507600004bf3c5 -1e26910003c26111308b2600003c1900 -1e2b510003e424411d153800003e4244 -1e2ba2000484dc01e148470000420966 -1e30a80004bde8e151b65600004bde87 -1e322400043311f1116106000043311f -1e38a40003df3671f1589800003d4bb0 -1e3fc900047180f1152720000047180f -1e48f50004517a8116247600004517a8 -1e49a1565ed4e571864ea100c04b16a0 -1e51380003e261c117399400003e261c -1e6cb3000435af6110387c0000435af6 -1e6cfe0003a3dc616c30820000393484 -1e70a18b46fc61b1595219001048ed26 -1e76ac0004018b01614ec20000401865 -1e78d20003edb6d11d33fa00003edb6d -1e7d871507bd49c172ca4500104826ca -1e831900048c9eb192f0eb000048c9da -1e88020003b4c8213ea7e50000395bd7 -1e88020003b4c821834f21000038758a -1e8ecd0003cf5ce1f83da500003cf5ab -1e9152000407a3611d46ac0000407a36 -1e959a0004233e511d333700004233e5 -1e96e7960d81b1d1434b0a00c047fd1c -1ea2a791991a31516c187c004049aa7e -1ea6720003d4d76117457500003d4d76 -1eaf6351b9dcc9a1e1059800504b385a -1eb0a70004425be11e747600004425b9 -1eb1ee37b601de2154ed5100304a2b0d -1eb35d00047163d1160e98000047163d -1eb420000457927140f27e00004555c8 -1ec29800047137011d5b510000471370 -1ecab10004343c818403dc000043438c -1ecf8324d34d970108185c00a0484d3e -1ed00b0003f785e1f6880d00003f785c -1ed0f8116e1beb71c502cb00604cd404 -1ed26a131a3e56512cf20f00304c4d37 -1ed2db0004909ad1d66f220000490972 -1ee00500049fbe41adedd7000049fbbd -1ee2480003d5f88118398f00003d5f88 -1ee4ca000488dd01ee535b000043f8c9 -1ee9980004ac42a1df6ace00004ac099 -1ef24d0003f92a6115374200003f92a6 -1ef2d86dbfec7ee175b5b900e047fc39 -1ef3390df711224162036b00604868c9 -1efca700044c852144e67e000044c81e -1f01910003caba911d475e00003caba9 -1f045600046e1bc11db5dd000046e1bc -1f0e8a00040708c1ab34470000407087 -1f0eba0004007d3116fec200004007d3 -1f101a58d824ba2195a6ab00404b3d0b -1f120d000496edb12d09f400004967bd -1f179c00045849c19d0bbf000045826a -1f1d380003ea9bb19438a400003e585b -1f279c0004598b81bc68a70000459842 -1f2ca7000444cc01ce24a70000444c41 -1f3e7d00043273516cb6b70000432730 -1f40910003d0df4112d36600003d0df4 -1f415e08749339b1eb4da100e047950b -1f43160003cab7a11836a900003cab7a -1f46430003f50fb112f53800003f50fb -1f48f80003da0e816b0c0b00003da0e4 -1f4a720003cf5ce1f83da500003cf5ab -1f5a6ccd545491b11983b300f04cc88f -1f5d08bbfdce1df1837a240010490792 -1f6876000460a8317f74760000460a7f -1f6b94589f838db1bfe4d400e0475a42 -1f71300003f8949112b61f00003f8949 -1f72480003d9806119e65600003d9800 -1f73b0a2339bed41871964001049567a -1f794a2f0c584c11875a7e00f04ac49b -1f7ba40004abe2b19e9433000049f049 -1f7bb9de5247ae11ed153300e04c5add -1f7de3f5b7ffba81a0c6ab00104b5493 -1f807ba254ca29e1057b3300504cc4f7 -1f83c00004ccd411d7efb300004ccadb -1f8b6e9969817bf1733dc700404b1c51 -1f8f4e0003f70ea11624fe00003f70ea -1f90b5109373018156ab7600904bf5f9 -1f94b60003f80161132a2f00003f8016 -1f9613000401dd011208470000401dd0 -1f97420004ce7a41df274200004ce6f0 -1f975e2572b6e9410b2bc200d04a3d8e -1f985c0004b25be1f0485c00004b25b9 -1f9abc0004be09c1af0a45000047d45c -1f9ff50004865341ea64020000425ba1 -1fa1b40003d0c15114e0c000003d0c15 -1fa26e0004ce7a41df274200004ce6f0 -1faa9d0003a3dc116c30820000393484 -1fb7bf0004517ba18080ea0000451626 -1fbcd400047b0c11c6fe450000473eb8 -1fc00d000432ead11199060000432ead -1fc3bf000464f9a16a536c000044a043 -1fc672698fd7bfc14f6c8200204cc801 -1fd3370b6e4561210d651700f04c61c0 -1fd480922a71aa715f8a1900804985e2 -1fddc500043cde11e148470000420966 -1fdf430003cbc331162c2000003cbc33 -1fe3cc0004cbf1412f7c5600004a569e -1fefa2000484dc01e148470000420966 -1ff96afdeb2fa24174128b00e04749a5 -1ffeb47d809cb671ada151009049f5c5 -1fff170003d1d73133673f00003d1d70 -1fff530004425b911e747600004425b9 -200af15f89cd821279ae960080486684 -200db5d880aff512e5e20f00604c4b22 -200ea10004b1d67250947c00004a1491 -20169e0003cf84f26607a600003cf84c -201876000461ace21ec1c90000461ace -2022ac000412e0221eb2c20000412e02 -2024f500046c6c929ccc93000046aea9 -20267c00046f06521e9a98000046f065 -202ab1000439caf21ebbdc0000439caf -2033921ef0a115f2c8eb8c00b04a0994 -2033b9d797855e82313b33007047adc9 -20389ce02bfa6f724f6c8200704cc801 -203d940003e559a2accfbb00003e5555 -203f620003de06f2db8bfa00003de069 -2043170003d2b822be2a4800003d2b80 -204476000456bae21eeb6c0000456bae -2045170004c597b240b96b00004bf52b -204e8a0004122c62bf12ac0000412294 -2052ec9b7effcbf211557b0090473ce3 -2056a50004c482123d21c700004b2137 -2056ec0003f41dc218eb5100003f41dc -205b53f30bbdff72a2f28800404cccdf -205e990003c87792a66e7c00003c448f -205f550004bbc0224f599800004bbbf3 -205f700004a15cd2f20fc200004a1567 -205fa871c5b16b821e1dab00604c50ba -206d240004912a52754ba8000049129c -206f220004912a52754ba8000049129c -2070980003d9806219e65600003d9800 -207505460f70d89223521700c04cb863 -207c660003d0c7c216d0c000003d0c7c -207df10004837b32e148470000420966 -207e9e0003cbba2219fb1100003cbba2 -2080a7000468ca821f4f9c0000468ca8 -2081dd00041a57d28421dd000040652b -2085060003e1a8721684fe00003e1a87 -2085636e03c6cfb235890a00304c6d96 -20882f00046f02a26ad0d7000046efab -208a4e0003cbaf621ec42000003cbaf6 -208ae84adb66838213a1d20000474928 -208c020003a7fc921bd4fe00003a7fc9 -208c020003a7fc92834f21000038758a -20903021e1a0ef327c09ae00c04a272e -2090760004635a821f29c900004635a8 -20968000040adf32128aac000040adf3 -2096c90003cba63213fecd00003cba63 -209a820004364a1212ea6300004364a1 -20a5e200045247a21f1bbf000045247a -20a82000045980024119e30000448a71 -20af3700042af3921ede28000042af39 -20b2a200044de8d2e6f020000044ddc6 -20b8f500045a8262183c76000045a826 -20beba00040708c2ab34470000407087 -20c02b0004374cb215a8a300004374cb -20c3620003e282f219780b00003e282f -20c3fa0003ed19721ef0fe00003ed197 -20c5780004488ea218704200004488ea -20c73d0003d9800219e65600003d9800 -20d0ff511103ab923c1c2800304b0ab4 -20d39c000464ace29ae39c0000464acb -20d42400039313521f6c5c0000393135 -20d4240003931352834f21000038758a -20d4980003cf99323ea7e50000395bd7 -20d98f3870a8edd248cfd100004c0a89 -20dff50004865342ea64020000425ba1 -20e4fe0003f506a21963bb00003f506a -20e58fed722d7412c1d46900304ba9a8 -20e7065d979f9b024621a400004a9b38 -20e97e000443a7b227a0fe0000443a29 -20eed587ccb9aeb208da7400804b2f3d -20f0e65088977e627aee0f00c04bf7e1 -20f8f439410af7225f237600504c9d44 -20fd4f352f7ea152d2afac00804cb455 -2103dc0004394262ed9eb1000043941e -2107a10003efbd9217917200003efbd9 -210b170003dae6c2e7bd8f00003dae5f -210da39bef00b302a8eee8005049dfab -210da8f280b070f245f16f00604aa8e0 -210e0b7510c33a121303860000485452 -2114d8b969445d622eac2f003047d5af -211e240003da14b29c782000003cb9b5 -21257b00047d33b299acb5000047d32c -212fec4f4904304298590a00204c399c -213281e1c92dfa22d16e8b00504759fa -2136880004ce76e256a9e200004aceb3 -2138db0003e55d3219317200003e55d3 -2141412b48d88b22eda5a300804c591f -2141d10004365bf2198f5b00004365bf -214749699cb9f3f2e5207c00604a0a95 -21538600048b42626d4696000048b417 -2154ea0004586542a6bbbf00004585c2 -215a5f758e7e7db2f5c99600b047fe43 -215c3c6ed09b530210d1a300f04c3f17 -21610b0003e8232219e0a400003e8232 -21640b0003e222b291a35100003e2221 -2167510003ed1432ae40fe00003ed13c -21681641ee1d14a2a3bc9600a0477152 -216a623bcd2b4982e642f80060498524 -216ba40004bc6bc2f4dc1300004b363c -216bcf0003fcd4a2834f21000038758a -216bcf0003fcd4a2ea76c300003921cb -216f020003f92a6215374200003f92a6 -2174690004ae1cc20a1ea100004ae1c8 -2175f20003a3dc626c30820000393484 -2175f20003a3dc62834f21000038758a -217b3300049876d28b4df4000049876a -21812900042230321a00020000422303 -21850b0003e66e921fef5100003e66e9 -2190500004340fa220117900004340fa -21954aa91ae5a1127e2d6b00904c3523 -21976d00049f5d52ada151000049f5c5 -2198985fc7bed0d21008b500d047b67f -219ab7a48dc180428401b9003047a28f -21a4a7000456c4b2198bbf0000456c4b -21a9ae0004a0c652baa5c000004a0c4d -21aa9628b23bda82d39c2c00e04c76be -21aaa28e081f17a2de1ac30050494581 -21ab7833dcfebf4291ab220020497a29 -21add700049a40a2672b8c000049a3f4 -21b1d200047a8aa2a0e733000047a8a4 -21b85600049a6f3211df2200004969ab -21bcd400047547326af485000047546b -21c168000445c382199def0000445c38 -21ca18a5df76abe262a22400104861eb -21cb2c00045c20827b3970000045bbc4 -21d5a3000488f8c25085a10000475ab4 -21d9d54e6c116002780cae00704a93a4 -21db3837fed25a72e8145c00c04bb578 -21e00d0003f4f7d21af7fa00003f4f7d -21e557c841e5c4a2008e7400a04bb28f -21e722f1db97c4a26fd8bc00104c96a8 -21e727729b27a98263b67400104b64d8 -21e76b000488f8c25085a10000475ab4 -21e7abea0e7dd5e2a981c100804bedcc -21e9e20004699d522057bf00004699d5 -21f0708d0ab2e002c7fb6d00304a07b4 -21ff3f0003d17d32438b5900003d17c5 -220b5b000437f5b21990e80000437f5b -2210f09d333065c21e7f31008047328d -221251965521375260087600b04be3b6 -2215af000433f4d2170a270000433f4d -221c0f0003d0f99220c08900003d0f99 -221c6ba82449251246247600004c2b0a -221d0b0003eac89298980d00003eac6a -22253100039ecdd23d1162000039c575 -22253100039ecdd2834f21000038758a -2232460004216ea217520c00004216ea -2233f30f735fb8b24ea9c900204aa54e -2235060003e261c218b40b00003e261c -2236560003d18a026cbb3f00003d188c -2239219dbefc3dc2228b7600904c5d5f -224921dbbd014722c9e45600304a3f1a -224ed90003c87792834f21000038758a -224ed90003c87792a66e7c00003c448f -224f5bb061631a82fee9d900d047518a -22502f0003f80ef2611c9100003f80ae -2252f1000440c5f219ca300000440c5f -2258760004592ee22095c900004592ee -2258a40003ecf42254d0db00003ec9e0 -225c660003d0c15214e0c000003d0c15 -225fcf00040836f21a9ddd000040836f -2261f80003f70ea21624fe00003f70ea -2267681b8ed99a02f5b9d700304a776b -22694971e77c4832188c6900504b6703 -2269dd00041de122a87f01000041b58e -226a5f000445135287235300004450b9 -226bbb0003f75c7220a90b00003f75c7 -2276bc0004c079c296a4a100004c078d -2277dc00043de6222127f6000043de62 -2286f8000491b842cdd5f40000491b41 -2288989f1049867244aed500b04460d9 -228c680004356db216bbdc00004356db -2291f70003d5f882197f3f00003d5f88 -2293c20004a38982a5c36d00004a3892 -2297510003f785e2f6880d00003f785c -22975a5d43d9d772fbed7b00c047cc99 -22a9dd00046f4e7219e75d000046f4e7 -22aa530003cbc332162c2000003cbc33 -22b0a700046b7622c69123000046b75b -22bb630003d0b8c26e6ff000003d0b69 -22bf8e021aee3122a3eee800b04c6a17 -22c02f000471e782199c900000471e78 -22d2105594fdf02277f6c30010483b07 -22d9a51d3b936b122aad980040484dac -22da23db7b46451234d7a400904b1f0d -22e0f50004494502cfd476000044944b -22e21eb70f31734263dcd400a047e24e -22e8880003d4d76218c98f00003d4d76 -22e90b0003efe9a2ba93bb00003efe94 -22eb6c0004506182d913bf000045060d -22ecb5000483c092b212450000483c00 -22f36000046478725cc8950000464759 -22f511d126394af2cca5a800b048d276 -22f7244c6639c742195b5500504b20d1 -22facb608f7a9bf2df6ace00f04ac099 -22fafe0003a3dc126c30820000393484 -22fafe0003a3dc12834f21000038758a -23029d0003a7a802c511f700003957bf -2307bc9c4218070215d3d00000473333 -2307fa0003ecf42254d0db00003ec9e0 -23099000041379421af1520000413794 -2309c9000459dc1221aa7e0000459dc1 -230cf80003d1827221835900003d1827 -230f32d6325d41a2fcaac30020484252 -231999d004fda0f2672b8c005049a3f4 -231c2000044a3f621ac67e000044a3f6 -23222700043315c20fd725000043311f -232476000458299228e4a3000043d481 -232a430003e1a8721684fe00003e1a87 -232aa3000422eaa21875db0000422eaa -2335d200047ffff29be30a000047fffc -23381e1835c6148266c8d40080481a9c -2338910003d0c7c216d0c000003d0c7c -2341c500043de6422127f6000043de62 -2345980004b3555245863900004b3504 -2347510003f62d321963bb00003f506a -2349f90003cab7a219874f00003cab7a -234cdb0003eca9f299399400003eca98 -2355510004a38982a5c36d00004a3892 -23567d9517bdeba2a90d1b00a04cda6f -235ab82f88a92d0255a02500e04920d4 -235ba40004b3555245863900004b3504 -2360c4de8c2940b2a70379006049edb9 -2361570003ca9222ea76c300003921cb -2368b30004374cb215a8a300004374cb -23690b0003f785e2f6880d00003f785c -236cbe2db88878c23e5dd200804760ef -236dec75440164e2ae0ac3008047ebaf -2377fa0003edfe4221c8fe00003edfe4 -2377fdff446f85e213f4a800404ba11d -237962ac572b6c324625de00b0473375 -237fbf00044b07021b476c000044b070 -2387900004731d32e042b50000473140 -238cc561942278e20496450050475717 -238d91650d93412272a67300004c9ed5 -238e240004993362580edb0000499328 -238e560004b764d21400a800004b7647 -2399036db7292d82559774005047b86a -239ac50004305252c9c3c6000042f8b9 -239bcc777bd653323c928b005048222a -239e4500047be0729584d4000047bdd7 -239eba0004080bc2221e8a00004080bc -23a5cc0003efbd9217917200003efbd9 -23ab0a0004007d321892ac00004007d3 -23aba60003cf84f26607a600003cf84c -23ad78b59e527a0242b67b00d04ae743 -23ae660004b764d21400a800004b7647 -23b7d48e3ca65ec258460d00004986db -23b9d100043536522255c50000435365 -23bc2f00047d33b299acb5000047d32c -23bd2300044969321ccf6c0000449693 -23be660004bd26225e32ab00004bd23c -23c155000490cd3200a9240000490cc7 -23c3c20004a0de22adf00500004a0ddf -23c50b0003ec53f20e317200003ebe6a -23c5c80004731d32e042b50000473140 -23c61300039313521f6c5c0000393135 -23c6130003931352834f21000038758a -23ca720003da0e826b0c0b00003da0e4 -23d0e800043ae1121b43e0000043ae11 -23d48500047bdd5232b7120000418e3e -23d6d0397d97f0d2ab976d00c049a724 -23e0130004bca0a2a2175500004bc7df -23e09600047cf8e25da096000047cf8b -23e12c00043315c20fd725000043311f -23e33900043fd792e87abc000043fd66 -23e3620003e424421d153800003e4244 -23ed5d000472aee2cea15b0000472ae0 -23f16be6e25ad67205c6c30090498bb7 -23f49600047be0729584d4000047bdd7 -23fc993cd1306cd27dbd7b00a0474c4d -24007c0004a53eb2b486800000470089 -24053b000448128222a0730000448128 -240624000484ef828bdcb50000477127 -24092c00046ec672a06a80000046ec63 -240b83274e9e22c2dae77500304cd64f -2418f80003d39132774b6b00003d390f -241d310003a96aa2834f21000038758a -241d310003a96aa2c79b4500003927b9 -2425170003d852d2f36c9800003d84ef -242a7b0004bca0a2a2175500004bc7df -243489e08fb0a1b22c95de005047f6a3 -243e280003f41dc21a550b00003f41dc -2443430003cbba221b482000003cbba2 -2443f08919b81f92f821b90030477ecf -244ff3fa82059aa231273700e048b65f -2456128a0560eca2121bd000f047959e -24592c000433b7628599060000433b6f -245a5c0003cba63213fecd00003cba63 -245c6ff9d06a0282d16acb00b04cc043 -245fbb0003edb6d21d33fa00003edb6d -2460cd220400c962c677d100404c520a -2463020004abd3e218cda400004abd3c -246f4358c4635702ae29d000e04cce2b -2476ba79cc23acd2ea43540020484b62 -248528d2213ba75202d722009048f01d -2488a40003f79ae2235bbb00003f79ae -24900d0003f45ec2233fbb00003f45ec -24976ccf230f2432aca9d90000476913 -249c93000433f4d2170a270000433f4d -249cc2000498d132dc125f0000445904 -24a66600047258424b0f6600003d01b7 -24b1790003caba921d475e00003caba9 -24b34e0003e261c218b40b00003e261c -24b712000407a3621d46ac0000407a36 -24b7140004caa402e2821700004caa3d -24b9f10004801a82d3bd7b0000480198 -24c25d8d089d922275d11700e04c627f -24c579f8bb6a2d525cf7a400c04b6892 -24ca13000392e912eab7450000392d66 -24cce5b3511eb9c27628a800504ae686 -24cf5300044427a2f844fe0000444272 -24d0790003d980021b3b1700003d9800 -24d4b30003cf84f26607a600003cf84c -24d5d0360b58c0e2c3f2db00e048ec51 -24d77d0004c155921643e9de0688aaa4 -24dfa10003e282f21add7200003e282f -24dfe99bba95db128cf7d100004c64dc -24e2560003d413922368f800003d4139 -24e2560003dae6c2e7bd8f00003dae5f -24e3ad5cca636aa28ec94a00d04a99e1 -24e8148d1fcf098226ea1d00304bc160 -24eca10004c5f9c2a4250a00004c5f91 -24edd70f7e8a303201add2006047556e -24efec0004886f726af485000047546b -24f0181502f0969231817b00d047b386 -24f13fd015022bb218f8b500a0474639 -24f17a00040187529fc1520000401871 -24f2c3000475c0a25065d10000437f6d -2502390003d5f882197f3f00003d5f88 -2503460004888a02a5dbf50000488891 -2505333521ebc922a4c82f00d047e563 -250769d9bbe42f7246eab200604c1f2f -250a4500047e1c72c0c096000047e18b -25129800047119f2238b20000047119f -2512ab0004bae7b2256eab00004b9a08 -2513ae0004216ea217520c00004216ea -2516ba0004073e9223b28a00004073e9 -2520f500045d9002d570a7000045d456 -252dd100043d47b21a665e000043d47b -2532e90003f506a21ac00b00003f506a -2539ff6edc445da27b424500a0481b65 -253a720003db666223e80b00003db666 -253c02e3a4662fe26a0ad800304ca657 -253d860004070f12a19bcf0000406c88 -2541ae00049e5f52b80fed000049e5f1 -2545310003cbc6d223a29e00003cbc6d -25456b0004be9e12232e7400004b9ae5 -2548690004b67e02188c6900004b6703 -254c0b74d6babfc2932de20020487a25 -254c1f0004356db216bbdc00004356db -254fed4ec34b5a9257242c00f04c05da -25585e0004cb36c282385e00004cb367 -2558ef0003e55d321b34d200003e55d3 -2559350004365bf21aba6300004365bf -2559510004a1523250947c00004a1491 -255d380003eca9f299399400003eca98 -25627e0004233e521d333700004233e5 -2563860004888a02a5dbf50000488891 -256a1d0004b67e02188c6900004b6703 -256afeee5b4e859293ad3c00904c8474 -2571f80003e823221b7bfa00003e8232 -25795d000472aee2cea15b0000472ae0 -2585ab0004c43e726ddaa500004c435c -258dde196bfeb9525928d4006047c0af -2594009ceb8fcf827b01ab00104c58ad -25948416a92eaa22adf00500e04a0ddf -2594b069966912927a9e7400304b9a1d -2599380003f75002834f21000038758a -2599380003f75002e6078800003c031f -2599e2000451c4929be49500004519a9 -259b600004697d922438f500004697d9 -259cea0004593762945f6b00003d2683 -259e6d00043313528cad2c000043311e -259f164b7fb3fc229a897b008047b9b4 -259f4300042230321b40680000422303 -25a28f0003fc7c3224655200003fc7c3 -25a45b00046ccf6202c4f5000046a129 -25a5d6000424304224699a0000424304 -25a8a40003f62d321963bb00003f506a -25aa070003f4f7d21c4cd200003f4f7d -25aa7b0004bc3cd2f1fc5c00004bc32e -25b2a16dd4790d72199bd100a04bf5ff -25b9b00003d4d76218c98f00003d4d76 -25ba490003cac0221836a900003cab7a -25c0d7000422eaa21875db0000422eaa -25c0f70003bdb9b2d48899000038bd74 -25c4e800043d4702fa2e63000043d46f -25d5be0003b32052834f21000038758a -25d5be0003b32052c79b4500003927b9 -25d73f0003d18452e434fc00003d1832 -25d99bcada4c58426d8896009047a688 -25dc5c0004b0f1f28e39240000499126 -25e0b19bd01a5dc2137b0a00c04810aa -25e5900003fcd4c2834f21000038758a -25e5900003fcd4c2ea76c300003921cb -25ee1217bbb51c82b3f8a100b04c6e1a -25f0e16cc54315f2121bd000f047959e -25f1d9000478587237f602000046d41c -25f36000046b7282d8ac20000046b628 -25fac3000475c0a25065d10000437f6d -25fc8165aedffa62b81a4500204caf85 -2612240004851be2e2df9200004450fe -26124e00048624b2dd0de20000486247 -2613e00004394262ed9eb1000043941e -2616800004716312fb035d0000471623 -2616b100043f16c2247bdc000043f16c -2620530003cab7a219874f00003cab7a -262f9c00045f5182809095000045f3db -26326600047258424b0f6600003d01b7 -263db0f85d6ec4b279ae960020486684 -263df1000477a1624c85d20000477a12 -263df70003a7a642834f21000038758a -263df70003a7a642c511f700003957bf -2642190004993362580edb0000499328 -26475a25fb7ceb42fc353c00004cbb89 -2647bf0004594f3225057000004594f3 -264e35a79367e4f22e7a7400b04af822 -2657a80004a184b2382fbe00004a1804 -2659e2000457927240f27e00004555c8 -265d194cc5211e021289c700104acdb3 -265e720003d351626f9a4800003d350e -26619000041be8c2801eac0000419b2f -266345064cdef0c286d9d20030481960 -266590000437f5b21af1790000437f5b -2666160004007d321892ac00004007d3 -266a4334a4b248a2af60a700604c2494 -266d310003cbaf621ec42000003cbaf6 -26709693ce7d6b22ac438c00904a12e4 -26710a5aca0381620485cf002049322c -2671d300044003b2ee27480000440024 -2673f38be5d5f5625bd42800c04b7d26 -267924000498d132dc125f0000445904 -267ac792b1679672a884b500e0476ac6 -267b71c8b7ac780298590a00904c399c -267eab0004b0f1f28e39240000499126 -2680cc45f2f552a243b47c00104a4887 -2684db0003deaab2252bfa00003deaab -2685cf00048f15e2f593a20000484c4b -26894300040836f21c0952000040836f -268c716f2e02fd1231cbf50080487301 -2691cf0004938ce290d5d9000047e6ac -2698f80003dae70225727200003dae70 -269aaa0003d193e269ff3d00003d1938 -269cd20003ed1b121fef5100003e66e9 -26a4554404997fe26449de006047d12c -26ac2e93401ed1f276e2a100004b32af -26b7370004862162e351a80000485c95 -26bcb500048100224560b5000047ea75 -26bd0b0003ed19721ef0fe00003ed197 -26bd52000412e0221eb2c20000412e02 -26c12c00047137021d5b510000471370 -26c3920004425b921e747600004425b9 -26cda10004739a82d2e4b50000473785 -26d2880003d0fdb28a176600003d0fd0 -26d91c127233d25271d15a00e04a9f23 -26dbdf0003cbba221b482000003cbba2 -26e18400048e3982347fea000048e394 -26e40d0003f05d72307d7200003ee9b0 -26e48e0003f41dc21a550b00003f41dc -26f2d462572dae921a0ab200704c5ba6 -26f8f6eaa06319d25a42c300a0481bf9 -270bcf00040834a225946b000040834a -27167e000449aa6225ab600000449aa6 -2719a300048d4e821b2451000048d4e6 -271a7e0004b938f2649b5500004b86ae -271cddec6c5676c2b212450010483c00 -2725a436c1449032489e1d00004bb97e -2727ac00047bb2923793d0000047b889 -272a870003a7a802834f21000038758a -272a870003a7a802c511f700003957bf -272b01809a411a822c95de00c047f6a3 -27330de8dbbff3d2672b8c004049a3f4 -2733e0000439caf21ebbdc0000439caf -273556ea9183b582bf1c6900e04af990 -2736660004bd9892b0f67b00004bc82f -2739200003b3ec424d96a300003b3e4c -273cb500047d3482c15333000047bd67 -273cd574d9c4f58258c18f00503d19b1 -273ceb0004856a922057bf00004699d5 -273e240004851be2e2df9200004450fe -2740a80004b2fba26388a800004b2fa7 -274d7e000445f86225db080000445f86 -275630a10bdeb112d7efb300104ccadb -275777ffa424b162ac438c00104a12e4 -2759e2000461ace21ec1c90000461ace -2765e2000456bae21eeb6c0000456bae -2765f298b636be22a775c700a04b411e -276a1b0003d0f99220c08900003d0f99 -27733900048e3982347fea000048e394 -277420000469c08225cc760000469c08 -27763b3d55dce762abf1f1002048121a -27780f00041379421c668a0000413794 -2778f24cf23d8c220bef250030499acd -277bc200049fbe42adedd7000049fbbd -277cfe0003e222b291a35100003e2221 -27880b0003f62d321963bb00003f506a -278d8f0003d3313226336b00003d3313 -278f1f8b4089f94213010a00f04c1270 -279495000468ca821f4f9c0000468ca8 -27954a8133942672f7ec960070474425 -2798b60004aa24c2c7371700004aa246 -279bd000047bb2923793d0000047b889 -279d7213971a5542666b5e0060472cc3 -279df4000495958203b164000049593e -27a69f5db107cdc2a1133700c048555a -27a747cac2e899a2313b33003047adc9 -27ab062364fe60d2411ee800b049d1e5 -27ada169b55f595243d5de0010475db5 -27ae150003d980021b3b1700003d9800 -27ae9d0003cb8de2df65e600003cb8dd -27b4a7000451aa9226463b0000451aa9 -27b52900042af3921ede28000042af39 -27b678d92d6d8e324a2fd100e04c6cde -27b6c7000486938262036b00004868c9 -27b8760004500e9274dc7600004500d8 -27bd4300042ef4e2f7727d000042ef49 -27be820004365bf21aba6300004365bf -27bf5a0003cb9cd26a7b3900003cb819 -27c0f50004550cb22675e200004550cb -27cde20004635a821f29c900004635a8 -27d1d7000490c9e24524250000490c99 -27d43097b53810925945a800b0487c5e -27da2800042b7702267da5000042b770 -27db5d00046f06521e9a98000046f065 -27dcf698407afcf2f65fed00e04a1143 -27dd580d7140dab2807a2400404989a8 -27e21b0003d0e0921145fb00003d0df4 -27e2deaab39480223deac300e047ed8c -27e47c00049a40a2672b8c000049a3f4 -27e9cc0003e282f21add7200003e282f -27f33f0003d852d2f36c9800003d84ef -27fc8e0003f506a21ac00b00003f506a -280438c55324e0026cf1d800a04c8b72 -2808200003cbc012e9f27200003cbbf8 -280f4e0003e424421e2b5100003e4244 -28145d0004331d72f8389a000043311e -2815dbc15e10e1b27149f10020483a90 -281c8600042230321b40680000422303 -2824a700045247a21f1bbf000045247a -28251f0003f4f7d21c4cd200003f4f7d -28302500048f8962bb3322000048f88f -2831520003fcb37226f97a00003fcb37 -283412ec7eb244d2749c2c00a04c4d71 -2834c2000495ad324a800f0000495990 -28480b0003ed2aa226dd9400003ed2aa -2848cd177a3e92521303860010485452 -2852430003edb6d21e78d200003edb6d -28530a00047c4a326c1dd9000047c461 -2868dc000472f372a753ca0000472ed6 -286a4e00048c38d221aa7e0000459dc1 -286bdc0004340fa220117900004340fa -28700192c3255922256eab00d04b9a08 -28746001b896ec126c930a0080483506 -28752300045849c29d0bbf000045826a -28771300043ae1121c8579000043ae11 -28795c5e913ce9d22aad0a00a04be716 -287cd9c26c289982be085600f04a0d80 -287f59b8ee842d1263787600d04c446f -2884971f255823d24a677500104cc3de -288884f5dd6a8f32ae29c700804b0094 -288dc00004a15df2f9b7be00004a15d7 -2897620003e222b291a35100003e2221 -28a1cce55ed501f2a884b50040476ac6 -28ad2b9047c442b2acc56f00804a8f45 -28ae430003e823221b7bfa00003e8232 -28ae5e0004394262ed9eb1000043941e -28af742c94b22fc2a2e5a100c0476abd -28b72d93c5d9966225865600504b61db -28bd480004ce7662df274200004ce6f0 -28c2db00048f8962bb3322000048f88f -28c68c0004324f12276f1100004324f1 -28c876000443a29227a0fe0000443a29 -28ca770003caba921f019100003caba9 -28cb6b0003d1827221835900003d1827 -28d00f000494c6e2c6a60d0000494c6a -28db110003cbc05227619f00003cbc05 -28ddc4aa4c3790f2bbcc5c00804bbf0f -28df3d0003d18a026cbb3f00003d188c -28e00b0003f75c7220a90b00003f75c7 -28e173000443a7b227a0fe0000443a29 -28e22e3ff1bfbb12580edb0080499328 -28eaa50004c386122ee6e800004c28b0 -28f3590003dae7a2342e4800003dae6b -28fe13000407a3621e91520000407a36 -28ff3f094941a4f2a0c65300a04c714c -28ffa8000490cd3200a9240000490cc7 -2901f1000474b68263e1b900004736f3 -2914a70004699d522057bf00004699d5 -291c5c0004b319222df41300004b30ec -291d060003e55d321b34d200003e55d3 -292755bee36c3e2297850a00104c64ba -2928b50004761b3263e1b900004736f3 -29300b0003ee457227c3bb00003ee457 -2934383aaea40d62c2fbbe000049e759 -29349f8a21989642cd9b3300f0483647 -293e0c000400213227ac6b0000400213 -293e2400048555e2a11337000048555a -2950f7518c9b9f92e472c3006049585d -29511400040836f21c0952000040836f -29513940f425c202cb53ac001047f62b -2956b100043de6222127f6000043de62 -29590b0003edfe4221c8fe00003edfe4 -29604386888fbdf25f51f1005047b6d3 -29668a000409228227de0c0000409228 -2971cfa46a2bc22228aee800a04c2d9a -29750b0003f47512233fbb00003f45ec -29760bb71f96ca02046f74000047b8b0 -2979ba0003d02e82cb84c000003d02d0 -297b3300047e4892b29333000047e486 -2983a485ded12f8216470e00d04ca50d -298986000407bcd23c41dd000040534f -298cf80003d4e232282c0b00003d4e23 -2990130004b540d226c4ca00004886ff -2990690004ac18824f06ce00004ac185 -2997600004613872283cf50000461387 -29a36b0003d39c12d33b3f00003d39bc -29a8b0d6ec8590d222319800e04ba364 -29abcf0004018b02614ec20000401865 -29ade20004592ee22095c900004592ee -29afa00004233e521e959a00004233e5 -29b6920003d8c652e023e900003d06a9 -29c4890003d116d23f1e0900003d1167 -29c49a0004241e7228867e00004241e7 -29c4f470928ba7a2de966600404b215b -29c89600047b69f21008b5000047b67f -29cb010003fe8e2201c28a00003fe8de -29cb590003d351626f9a4800003d350e -29d1a4b3269501d257927b00604ba97a -29d4eb00048555e2a11337000048555a -29d7bf0004506182d913bf000045060d -29dc460cf7ee3a122c7c850050473be2 -29debc0004c350d2e5fe7e00004b1093 -29e895000461ad221ec1c90000461ace -29ea720003d18a026cbb3f00003d188c -29ef98302fac80f269c9a300d04c2401 -29efde189c847a4251b65600804bde87 -29f02000045b8182289476000045b818 -29f87600046aeb229ccc93000046aea9 -29f9c8fd717f61c2dcac8500b047cb8a -29fca80004bb7902af0a45000047d45c -29feab0004b319222df41300004b30ec -2a00ea00044c6492ba45e2000044c627 -2a0aa30003b31d92834f21000038758a -2a0aa30003b31d92c79b4500003927b9 -2a18ea00045b8982e47970000045b893 -2a1de7af547a1c5215447900504cc6ce -2a1f77027116aff2aacdd900d047e23b -2a267e000452cac228b8950000452cac -2a2c16000437f5b21af1790000437f5b -2a2c950004586542a6bbbf00004585c2 -2a2e1d0004ac18824f06ce00004ac185 -2a2e8000041379421c668a0000413794 -2a327c0004701fc23595af00004701ec -2a392fd9981646e28c3e3900804b31d2 -2a3b7400047d33b299acb5000047d32c -2a42e7000432c16228fddd0000432c16 -2a42ec0003eac89298980d00003eac6a -2a437900049bd36287f405000049bd32 -2a453904f00da392e67c7200304a439e -2a480b0003f79ae2235bbb00003f79ae -2a55d900047e4892b29333000047e486 -2a5dde00047be0729584d4000047bdd7 -2a65f981ac7bcf72edc6a500504c5e45 -2a68540003cab7b21836a900003cab7a -2a6a97fd1c6837524d35e20090487844 -2a6aab0004bde8e251b65600004bde87 -2a6b0a000473c372514e8b0000473c2c -2a6d06871c23508247e1e200104b89a1 -2a6d4d0ab0d56b12433e960070485244 -2a6ddd000412dd2228cd900000412dd2 -2a72280003e424421e2b5100003e4244 -2a7bf600043d481228e4a3000043d481 -2a85750003d979b229464800003d979b -2a8663000472c6a2e2d92c0000471a2f -2a8b3f0003d84fb29aa27200003d84c0 -2a8be3d399d2e2f2caeace00d04b7802 -2a8ecd0003cbdae229164e00003cbdae -2a8fdf0003cbaf62208a4e00003cbaf6 -2a91720003f45ec2233fbb00003f45ec -2a9178b470c0c8a2b207d0008047b93e -2a91f1000474b68263e1b900004736f3 -2a9b010004080bc2221e8a00004080bc -2a9eaf0004732e5205ebe50000399de3 -2aa03d0f76a670b2875a7e00404ac49b -2aa17e000445fbf2564e5f0000445fb8 -2aa47600045fe292292e7e000045fe29 -2aaddb000423c4022945d60000423c40 -2ab6720003d413922368f800003d4139 -2ab80eff2a3cd6827edff50020487edc -2ababc0004c294d26caa4e00004850f7 -2abb49474e1d4032a0e733008047a8a4 -2abbd4091972ac62c42ec700f0485383 -2abfaf0876e721c22c16ab00804b224b -2aca7c90aff45f9248486900104bd20b -2acbbe34c2ce085250026e00204ccf62 -2ace3a38cdc447328e8e96008048c3c2 -2ace8000046f9292296e7c000046f929 -2acf7ee26ed3020247e0a700904c6e26 -2acff600043536522255c50000435365 -2ade80f3eff9a2722dffc000004ce734 -2adf5d0003f85a52298d0e00003f85a5 -2ae1f4000499433240cb22000049016a -2ae20c000407fae22985860000407fae -2ae4fe7e9e215b3286f0a800804b2cf0 -2ae9ab0004c51242a945c100004c5115 -2aeb27fe6440e55211a9d200d047c80d -2aee46000412e0222022ac0000412e02 -2af5e20004b067e2cfee3900004b047e -2af6aa0003db666223e80b00003db666 -2af7270003ddf7923ea7e50000395bd7 -2af80f0003d0bce2918e5800003d0ba5 -2afb01000407fa22a19bcf0000406c88 -2afb250004331242f8389a000043311e -2afe7e00045fd8f2cc90f5000045fd8a -2b033d0003d193e269ff3d00003d1938 -2b051f0003edb6d21e78d200003edb6d -2b058ed2d675dc323106db004049685e -2b0b0a0004840bd2e3730a000048407f -2b0b9c0004649912e66095000046498b -2b12c3000496ef321443330000496ebc -2b142f00047ffff29be30a000047fffc -2b20f80003d9e7e27c2e7200003d9e7a -2b264e0004852602433e960000485244 -2b2b2706089696027f2b7300204ce459 -2b2ebc0004c350d2e5fe7e00004b1093 -2b35f80003ed197220c3fa00003ed197 -2b3af8000499433240cb22000049016a -2b3db44481120572c0edb9003047a8a7 -2b45c500043453e244ae7200003dce92 -2b4c76000456acb229db6c0000456acb -2b4ec3000498d132dc125f0000445904 -2b4fe76e0f0458725a301f00004a7e5f -2b526165bc56aa222bb4d400104849ae -2b53390003cbc6d223a29e00003cbc6d -2b5d7a000407fa22a19bcf0000406c88 -2b621d0004b18cd2864ea100004b16a0 -2b64770003caba921f019100003caba9 -2b6aa50004c294d26caa4e00004850f7 -2b73017d656f23b2fbb1f400a0495d97 -2b75b90004840bd2e3730a000048407f -2b76300003d0f992221c0f00003d0f99 -2b79510004a07bb2c7fb6d00004a07b4 -2b7a4c0003c14d9229f01d00003c14d9 -2b7a4c0003c14d92834f21000038758a -2b85d60004255fd2af80020000424eb8 -2b87bf7c9417ed62cec38c00204ab018 -2b8c0b0003ee9c1229e59400003ee9c1 -2b8e96000485b0924faf6b0000485b02 -2b9c050004a105e2f1efed00004a105b -2b9c7c0004a18072382fbe00004a1804 -2b9caa000407a3621e91520000407a36 -2bad4c000472c6a2e2d92c0000471a2f -2bbbc20004a1c3a207038c00004a1c02 -2bbca1d555e6f292a639e600a03cf5b0 -2bd09a000424304224699a0000424304 -2bd22d9507878cc22c9e4e00f0487ab7 -2bd800000448128222a0730000448128 -2be5240004936bd2e34f3300004936b1 -2be6280003e66e9221850b00003e66e9 -2be6d00004a8a3127415d000004a8a08 -2bf7af0004305252c9c3c6000042f8b9 -2c010d92025d677250947c00904a1491 -2c02e20004314a42b45658000042e389 -2c03d10004c67f825075a300004c67f3 -2c058f0003dae70225727200003dae70 -2c07700004a09252806805000049de71 -2c08188177836e824524250080490c99 -2c10b3000439caf2202ab10000439caf -2c13590003da0362b1068700003b603a -2c1a0c45a3723822f65fed00e04a1143 -2c249cd8346892b2ae29d000604cce2b -2c2737000488f8c25085a10000475ab4 -2c2997bda5c2f8d27808a700404c65d7 -2c2dde0004748ba244b28b000047485f -2c2fbe0004a002d2103c7c00004a000f -2c319000040670622adc6b0000406706 -2c379200044453622b017e0000444536 -2c3a58bb73ce0dc225e31700404abc64 -2c3cfb0004233e521e959a00004233e5 -2c3f160003cab7b21836a900003cab7a -2c43120004073e9223b28a00004073e9 -2c4624000485b0924faf6b0000485b02 -2c4819d9f127cb52341b1200d04cb22e -2c4bdbcc075f6902e5207c00c04a0a95 -2c53630003d006c235d53500003d0069 -2c562400048f8962bb3322000048f88f -2c5855676dcd246269963900304b835e -2c5c75e15faa4442181d55001048ea46 -2c5d5ec81572f9d2e042b500a0473140 -2c5e512a6f600a32b0f67b00a04bc82f -2c618400043ae1121c8579000043ae11 -2c61d70004a038e2cd636d00004a038b -2c6392000446235225db080000445f86 -2c65e20004bca0a2a2175500004bc7df -2c67ccf52a1532829607d00000475218 -2c68470004068532d8d2ec00003e2e68 -2c6b110003cb9b722aed9f00003cb9b7 -2c6d915268e775b29aad2b005049ae58 -2c6dbe0004ce36d21cc9e600004ce364 -2c708600042af39220af37000042af39 -2c761d0004bc3e525a967400004bc3e2 -2c78050004a038e2cd636d00004a038b -2c7b120004058d322b286b00004058d3 -2c8188b7672e0432fdd92400104966be -2ca2390004bc3e525a967400004bc3e2 -2ca2c300048245e24701b90000482450 -2ca9af00047119f2238b20000047119f -2cb0de3980709a726c187c009049aa7e -2cc4de473a7f15d2248ed000e04aae9f -2cca9ca975d547a2333eb200d04c7266 -2cd6e00004340fa221905000004340fa -2cdc3b06231a7802cb341300404b74f5 -2ce5720003dda182daecdb00003dda14 -2ceb1900048c45c295e0ac000048c415 -2cecb70003d18272230cf800003d1827 -2cedbaa96219f5e2298fd00080478e76 -2cee0b8c1716ac320aa62400904964f1 -2cefe000043f16c2247bdc000043f16c -2cf1fb0003cab7b21836a900003cab7a -2cf388d3d1c0cde2af69d200f047d7f4 -2cf4041d34bcdee2cbab0a00a04748e3 -2cf9230004697d922438f500004697d9 -2cf9df92ae234c22c8814800b04ce5c7 -2cfb460004880a62910dd2000047dfb9 -2d025e00043d4b82834f21000038758a -2d025e00043d4b82d33d18000038bd74 -2d03bf000460a8327f74760000460a7f -2d06390004b95602966893000046c653 -2d0847000418fca232b7120000418e3e -2d09dd00041be8c2801eac0000419b2f -2d0c1c213f1422126d009600e0475a62 -2d17620003f65092834f21000038758a -2d17620003f65092ea76c300003921cb -2d19c70004bd42527f2e7b00004bd35a -2d277000049dace283f17b00004796be -2d2e21740acf05921d2a7300a04cb575 -2d2fa10003f75c72226bbb00003f75c7 -2d36ba00040759b22b9a8a000040759b -2d3db90004798172b1068700003b603a -2d3f590003d3313226336b00003d3313 -2d43170004a86a62c2a05600004a227d -2d456b0004c4cfc27181a300004c4cf1 -2d4d170003d975923260fc00003d1dd9 -2d4e430003edfe422377fa00003edfe4 -2d5535000473287205ebe50000399de3 -2d57ba177bfde3226775b900b0474921 -2d590a0004c4cfc27181a300004c4cf1 -2d5d8f0003d18452e434fc00003d1832 -2d647c00049d2552df4dc0000049d247 -2d64b90004331242f8389a000043311e -2d6b3d0003d495f22c2d7500003d495f -2d6e5f000445f8e225db080000445f86 -2d7ac90003cbaf62208a4e00003cbaf6 -2d7bb873ba437f82f20fc200f04a1567 -2d8ed9cf8b64c6b2af0a4500a047d45c -2d970a000412e0222022ac0000412e02 -2da06bf4d05a99d25bc68b00e047427e -2da5d9f063782f2213d07600204c7f66 -2da9a1000473db427448d40000473d5c -2dac253b08971c029116a100404bc490 -2db1f100047334220d6ab10000433fc7 -2db3110004305252c9c3c6000042f8b9 -2db6cd210e23bb42c44e50003048e514 -2db923000464a2525a989300004649d9 -2dbb74000474c58230868b0000474c56 -2dc27c6d3a192b12a5dbf50040488891 -2dc39c0004594f3225057000004594f3 -2dc52300045af7422c3cea000045af74 -2dc70100040834a225946b000040834a -2dd95bae890284b2501ece00d04c6c32 -2dda4c0003c176422c0e9000003c1764 -2dda4c0003c17642834f21000038758a -2ddaee6c23bbd02242f5db00c04285ff -2de27e0004662e626c4ca70000465b57 -2de5e2000449aa6225ab600000449aa6 -2dec20000463aac22ca0760000463aac -2df2738e6def18b2f95c7600e04c01e1 -2df85600049dace283f17b00004796be -2dfd720003eebdf28d800b00003eeb74 -2e06430003ed197220c3fa00003ed197 -2e0d9f0003cb9f52e872cd00003cb9f0 -2e12aaf81fa6f99268800f00f049811d -2e1a1ef9d1653ed2de3b74002047586e -2e1d24000494c6e2c6a60d0000494c6a -2e1ee90003f79ae22488a400003f79ae -2e20470003fcb37226f97a00003fcb37 -2e21f1000475a7b26d00960000475a62 -2e2b8cc4489eb70260087600c04be3b6 -2e32de92ca6ac042004ca100e04c66e2 -2e39620003ae34026c30820000393484 -2e42205f4bd4de92fbc80f0000496110 -2e42880004cd3912edc6a500004c5e45 -2e48a90003d0f992221c0f00003d0f99 -2e51550004993362580edb0000499328 -2e51e64441c8ae8259593300404c5b10 -2e536b00048d0d023c069d00003a6629 -2e536b00048d0d02834f21000038758a -2e59c70004b9751204705c00004b96e2 -2e5eb02a07213582347fea004048e394 -2e65f4000445f86225db080000445f86 -2e69380003ed2aa226dd9400003ed2aa -2e72bc0004be9e12232e7400004b9ae5 -2e76ec0003d9de329c782000003cb9b5 -2e7c78df4f0f9e723cbe6600804ba29a -2e7d8d000433c532bd8e7e0000433c42 -2e7e8b00047bd6e2161774000047bd69 -2e83613be3f9fd72c5fb7600704c48ec -2e8a720003cbc05227619f00003cbc05 -2e8e7e0004af7962e9a7740000474812 -2e95e80003ca699243ba6b00003ca678 -2e98a700044421320ac3b0000044420f -2e9c8e0003e66e9221850b00003e66e9 -2e9e5c0003c2a53245e4be00003c29c4 -2e9fc20004a0a9a2e5207c00004a0a95 -2ea06800042b7702267da5000042b770 -2ea8510004888a02a5dbf50000488891 -2eab580003f45ec224900d00003f45ec -2eb667898f953f7299897b0040483ede -2eb9db0004241fc238100200004241f0 -2ebe8200048b82d233a9e2000048b81a -2ec495000451aa9226463b0000451aa9 -2ec4f5000469c08225cc760000469c08 -2ec55d0004726062ea76c300003921cb -2ec81f000439caf2202ab10000439caf -2ec96bc808175dc254592a00304ca5ed -2ecce80004351e422d4eb100004351e4 -2ed1f6c4b8abb9c2379d2b00d049a5f3 -2ed8d400047e1ac2832996000047e182 -2ee10e25d91465226a536c008044a043 -2ee9bd000445b0a2d4c8fe0000445b03 -2ee9dd00040143822db5900000401438 -2eea390003db6662253a7200003db666 -2eed940003ed1ef244ae7200003dce92 -2eedf100047334220d6ab10000433fc7 -2ef6390003d4139224e25600003d4139 -2ef8950004080bc2239eba00004080bc -2efec200041be8c2801eac0000419b2f -2f0431e773c482f2fa50d4005048300a -2f04588688594f12a4c1e600d04ce3e6 -2f08b67cb10cae624b1aa500604c0fc6 -2f0ac2000405fbb22dd60c0000405fbb -2f0bb73776b5d4f20337d0008047446a -2f0c1086305e17f20f567400204b9bfc -2f0e24000497dc9266b6f80000497dbe -2f13f00003d006c235d53500003d0069 -2f14720004a130b2ac438c00004a12e4 -2f18fe0003ee457227c3bb00003ee457 -2f2852e2c8e5e5e210dd24002048f4df -2f307c0004a3fa5225946b000040834a -2f31a100047ab6622a64d4000047ab51 -2f38d66027885bb2ec5f7600104c313f -2f38ea0004550cb22675e200004550cb -2f3b0973ccda05e2cd197b0020473bee -2f44a10004c610324f1c2c00004c6039 -2f476d0004a2ece2360429000039346c -2f4a4500047ab6622a64d4000047ab51 -2f4f730004ccae124f6c8200004cc801 -2f4fcc0004caf062b676d800004caee0 -2f51a1000473db427448d40000473d5c -2f5313f41691650230eb0200404a9a0a -2f53a80004a3fa5225946b000040834a -2f5427105edbc172a1037600c04cb593 -2f545d0004331862f8389a000043311e -2f5605000435365223b9d10000435365 -2f5a27000433e672bd8e7e0000433c42 -2f5e720003dc24322d8e5600003dc243 -2f5f9c0004550e022675e200004550cb -2f60520004caf062b676d800004caee0 -2f6bf9e7b3e91a621f32db00c0491530 -2f71d61031bce26297850a00f04c64ba -2f72e80004a130b2ac438c00004a12e4 -2f7a720003d4e232282c0b00003d4e23 -2f8f940004340fa221905000004340fa -2f95a300048555e2a11337000048555a -2f98530003cbc6d225453100003cbc6d -2f9b01000400213227ac6b0000400213 -2f9d5db5420d0f22205b7900d04a0da1 -2f9d8d0004331242f8389a000043311e -2fa1122b3c3a34323282db00604900de -2fa219000497dc9266b6f80000497dbe -2fa3a099c9dbdcc2d466db00b0496337 -2fb0ea000454ea822dfca70000454ea8 -2fb5e2000486b682ea76c300003921cb -2fb5f70003d18272230cf800003d1827 -2fb5fe00042af39220af37000042af39 -2fbfec00048b82d233a9e2000048b81a -2fc27e00044d78422e8923000044d784 -2fc40b0003e2f5927f53fa00003e2f16 -2fc9e200048d28d2cca5a8000048d276 -2fcc770003dae7022698f800003dae70 -2fd797c82dc241f22ffd64004049767f -2fdba900042ef4e2f7727d000042ef49 -2fdf3f0003d979b229464800003d979b -2fe0a7000443a29227a0fe0000443a29 -2fe17ec27923daf226594800104cd66f -2fe5ba000424304225a5d60000424304 -2fe6c300047d8ec214d896000047d8e8 -2fed1f0003edfe422377fa00003edfe4 -2ffca70004c6d9c235890a00004c6d96 -2fffae8e903ec12281c1d9007048015a -3003635931438503ab0124000048ed48 -300adf0004324f13276f1100004324f1 -300d9a0004250ca32ea27e00004250ca -3015350003d006c335d53500003d0069 -30172000046ecf83829daf000046eaba -301afe0003bb2113c79b4500003927b9 -3025e60003cbdae329164e00003cbdae -3027580003f75c73226bbb00003f75c7 -30317b0004840bd3e3730a000048407f -303496db9c8bec83009c25005049914d -3034a40003e31b937f53fa00003e2f16 -303e7fd3d0809ad3a80bac00304ca549 -30483805a654bab382385e00e04cb367 -3057530004430ee3fb9b5300004430e8 -305acf61876e8ec3e7438c00904a7c49 -30609300046b7623c69123000046b75b -3066b100043c55b33eabc3000042eef5 -306a893491ba5e93e331d700b049917a -306b252bc2e7455339d81200d0484dbc -3072708787aaa903c9336d00e04a1932 -3073a10003f79ae32488a400003f79ae -307b370004241e7328867e00004241e7 -30800d0003f76483f896ec00003f7640 -3082ab0004b18cc3864ea100004b16a0 -30831200040dead3e1a047000040dea6 -308469dcc78bba63b01d5500204974a7 -3088827292c465a3c15adb00b0490d0a -308baf000432c16328fddd0000432c16 -3091230004613873283cf50000461387 -3092d300042ef4e3f7727d000042ef49 -30945d00043388f32f24b9000043388f -30974e0003deaab32684db00003deaab -30a13e201e890ce36f75240000495e81 -30a573000445fbf3564e5f0000445fb8 -30a895000456ee5344ae7e0000456bb0 -30acca00048687530427b00000444d2b -30af5d00046e3cc31db5dd000046e1bc -30b1720003f470732f5ca400003f4707 -30bdcf000495958303b164000049593e -30bfbf0004682ee32f717000004682ee -30c152000409228327de0c0000409228 -30c1ca12d22f508354793300d04beeb4 -30c7cf000407fae32985860000407fae -30c801654be8099311557b0070473ce3 -30c9750003d28d432f298f00003d28d4 -30ccc40004867403a66fec0000486734 -30ce66f832d704a3da99d7001049f172 -30d3760004c497e395a6ab00004b3d0b -30d7bf000452cac328b8950000452cac -30db6000045b8183289476000045b818 -30e51065fda52d1387e7d100904c08cb -30ea8181144e7fd363ef4200c04ccb23 -30ee4c00041a2313e8e86b000041a1e0 -30efb000044515e3c9e17e00004450b0 -30f42f00046dfe732f8b20000046dfe7 -30fa8200048c9d43ba45e2000044c627 -30faec0003f05e73307d7200003ee9b0 -30fc0b0003d4179359791700003d40fe -30fe3e0003c176432c0e9000003c1764 -30fe3e0003c17643834f21000038758a -30ff710003f85a53298d0e00003f85a5 -30ffec00048c9d43ba45e2000044c627 -3100fe0003dda183daecdb00003dda14 -3102cc0003bb2113834f21000038758a -3102cc0003bb2113c79b4500003927b9 -3105750003d609932fcf5900003d6099 -311e0c0003fa7c132fd28a00003fa7c1 -311f760004c6b753ea76c300003921cb -3121fb8cd0f7fbd362ffec00c0488a6d -31265e00043920c3b1068700003b603a -312df100048428b305ebe50000399de3 -31328b00047f1eb32699d2000047f1e7 -3132fdd9e20c4c2394042c00404c24f9 -3134a7000445f8e325db080000445f86 -3139a8000484ef838bdcb50000477127 -313ab20004c6b753ea76c300003921cb -313b600003d33133278d8f00003d3313 -313b6000045d4123c17893000045d3fe -313c7af0553006f3105f0a00c047a896 -313e800004703b4333992300004650b7 -3141b900047b69f31008b5000047b67f -314211d306fdd633edc3250030496ac9 -3142db08b7e510737cdef8008049906d -314aaa0003db90933ae73f00003db903 -314c410003c794332f72e200003c7943 -314c410003c79433834f21000038758a -314eba00040dead3e1a047000040dea6 -3158c10003f45ec324900d00003f45ec -315c869ab5c609f3ff9b7300a04cdaef -316a926e34a036e3ac80b700f04a908c -316e06757596e7d3d7b7d400a048e6fb -316fa80004980f139279f400004980e1 -316fc811af0624e370838c00204a1246 -3174880003db6663253a7200003db666 -3176efc1a6e0e0a39312ec00a03dd2c3 -317aa10004b28f234954a800004b28ef -31860d5f8ce8498376e2a100104b32af -3186160004073e932516ba00004073e9 -31914600043d481328e4a3000043d481 -3198fe0003ae34036c30820000393484 -3198fe0003ae3403834f21000038758a -319d380003ee9c1329e59400003ee9c1 -319d583ad4cc5043b0980500c0499d53 -319d971984ba2f130101e200b0488ee9 -31a068000423c4032945d60000423c40 -31a576a1075a5ae31d68a700604c12a6 -31a9b18afb1e5903dccaf80080491a55 -31ab700004a14c63e1f45600004a1415 -31ae8200048d2483d2c786000048c057 -31b8d700046f9293296e7c000046f929 -31ba6acb08cb2163fe696b00a04c01e5 -31c38c0004a38983a5c36d00004a3892 -31c5e200045fe293292e7e000045fe29 -31c8db0003ee9b03307d7200003ee9b0 -31cab20004331863f8389a000043311e -31cac2000412dd2328cd900000412dd2 -31cdb00003d4139324e25600003d4139 -31cfac00047c11a3d9e02f000047c116 -31d3c300042e4c33322a58000042e4a3 -31d60200046fef33303cd7000046fef3 -31d7390003cfb99332ba9e00003cfb91 -31e3a80004974f138ec76c0000456c64 -31ea740004b31c432df41300004b30ec -31eee80004a26b43e9b7be00004a26b0 -31efb3afa0c17f034abc2c00704c4aac -31f4280004affe230bce1d00004affda -31fb4b0d235546932f812b00e049cd8b -31fdbb8719371083c6a60d0030494c6a -31fec19625a93623e5207c00104a0a95 -31ff620003f7689330caec00003f7689 -3200a80004affe230bce1d00004affda -3200d70003c19003308b2600003c1900 -3200d70003c19003834f21000038758a -320c950003fcb37328315200003fcb37 -320ca70004c3bd032ecab200004c3ba0 -32102a9cae1ce003a945c100f04c5115 -3215f16aefa24553f6f8b50080473500 -3216720003cb9b732aed9f00003cb9b7 -321780c6c5b435135ce49600e047ecc4 -321a9a322acd2f43e9a7740090474812 -32228b00047c222337f602000046d41c -3223dd37715e5953c4cac300e047e501 -3224934ce4acbe5374dd6b00504c4344 -3224b2965dff0f33c12dbe00004ccdd8 -322535000435365323b9d10000435365 -3227b8b02ead03a3907f7600004c00f2 -3231b00003dae7032698f800003dae70 -3233bf000460b8f33101700000460b8f -3248200003cd111330df1100003cd111 -3248aa0004080bc3239eba00004080bc -324fca35905f51d37196a100b04b9dc2 -3251310003cf5cb396393100003cf5c7 -32570100040670632adc6b0000406706 -325a27700c58478327f46900704bcf90 -325f700004a13483b5a43300004a1334 -3264560004a13483b5a43300004a1334 -32653c0004aa60b310dd24000048f4df -3266251439686923f593a20010484c4b -32794b0003cbc6d325453100003cbc6d -327ba4b5d7466a03c057c200d04a07af -327d64000490a563d66f220000490972 -327d68000424304325a5d60000424304 -3281dbe9b3df60033155ae00c0499ede -3283010004058d332b286b00004058d3 -32855f5e340903431c0e5600304af153 -328753bfe9e10373c779c900a04a847c -32875c0004ab31c349b8ae00004a9de3 -32876b00048d1fb3ea76c300003921cb -328f8c00049ed04394cd2b000049ed01 -3290fe0003f2544331540b00003f2544 -329d6400048f4a93cca5a8000048d276 -329e72d0f0dcbc1308112400a04923ab -32aa450004824e03b6542f00004824db -32abc0496a6ef213f335c700e04bc170 -32aee368a75eccb3f6d2ce00304c2f6d -32af97000472c6a3e2d92c0000471a2f -32b39df661f19c43492433002049fd89 -32ba300003cbc05328db1100003cbc05 -32bd960004824e03b6542f00004824db -32bf82fb5f3082a305cab200f04c7823 -32c1b70003cadbd33150ce00003cadbd -32cd52000400225327ac6b0000400213 -32d18a180d1c4d73a2a76b0070487282 -32d3be0004a09253806805000049de71 -32d7420004cc12e32d19240000496371 -32d84a15ce7578e3bcbf460050485753 -32e0330004a002d3103c7c00004a000f -32e24452d17d09d30e6d6b00f04c46de -32e2523e45bcbc33d3bd7b00a0480198 -32e5ae63ff1495a32019cf00f048ebce -32ed060003ed2aa328480b00003ed2aa -32f0760004501b5374dc7600004500d8 -32f48500047c43935d6cb5000047c432 -32f761cb76ad5253e713ac005047d4f4 -33027e0004b2fba36388a800004b2fa7 -3305f400044453632b017e0000444536 -3309e2000456acb329db6c0000456acb -330cc96dc07c0873cfa1a800b0488965 -330f435161ae0f33333eb200704c7266 -3311ee98029e6073c8814800804ce5c7 -3312551e7a62060301add2009047556e -331263fabf726ec37c7374002047e60a -3316169dc19789e375b5b9002047fc39 -33167e0004242c4331d40100004242c4 -3321700004699bf3e148470000420966 -3321ba00042b770327da28000042b770 -33251db866926c433b2dc700e04af70f -3336070003deaab32684db00003deaab -3338130004b5a373a7a5a400004ab782 -333ee90003ee457329300b00003ee457 -3341170003d9967331e31700003d9967 -33416d79dd6e6893c868b700d04aa326 -33568b00047c222337f602000046d41c -3358558ebf982e037397c200c04a1d94 -3358aa00040834a3270bcf000040834a -335c9a0004255fd3af80020000424eb8 -335d140003fc7c3325a28f00003fc7c3 -335e8b00047c43935d6cb5000047c432 -3362d1cb2a35227356ab7600204bf5f9 -3363b800048e3983347fea000048e394 -3364130004b67e03188c6900004b6703 -336f57000d842973c64b8e003043fa11 -336f8a2f5294d4f3a2e55100004a1120 -337170ee8ed474e325865600c04b61db -3377a80004a432f3c64b8e000043fa11 -337b910004731d33e042b50000473140 -337fa80004974f138ec76c0000456c64 -338047e9ae460ea30126c7009048c2ce -3386720003d495f32c2d7500003d495f -3387760004c77e33bb01c100004c77d5 -3387d9eb73f1e263e295a100f047afc9 -338bc300042e4a33322a58000042e4a3 -338e390003d979b32a857500003d979b -33915a0004aa47c3dad1d000004a90f3 -339b882c315428632398b500d047cabb -339c0b0003d359b3fd10fc00003d3594 -339d510004a3fa5325946b000040834a -339f0fdfad8abd630d302c00904c27b9 -33a5040004aa47c3dad1d000004a90f3 -33a6720003d8278332624800003d8278 -33a6820004880b2310e737000048809b -33ad720003e7e1d3accfbb00003e5555 -33af1200040759b32b9a8a000040759b -33b04976f41744f3f95a3900404b61bf -33b6240004867403a66fec0000486734 -33b7ec0004880b2310e737000048809b -33bf46a3401a9e13b739d200d0478593 -33c0880003d4e233298cf800003d4e23 -33c18600040319a3554ac2000040310e -33c7a46cb7572473c6017b00e047d574 -33cbc200049f5d53ada151000049f5c5 -33d6130004002133293e0c0000400213 -33d7330004cd3913edc6a500004c5e45 -33d9a50003cfb91332ba9e00003cfb91 -33e07e00043404534b0f6600003d01b7 -33e4ac0004859d0328f0ac00004859c9 -33e50a0004c51243a945c100004c5115 -33f9c90004a97363ea76c300003921cb -33fdb00003d33133278d8f00003d3313 -34001f87c6dfbe53e51bac003048148a -340793722ab54b33c5c60f00304c2c0f -3408f500046512833231e20000465128 -3409dd00047196d3328daf000047196d -340c1ce81eff2ec3a3dac700204885c9 -340e0c00040678f33c41dd000040534f -34166300043c974374a7dc000043c96f -341a7e00042afd838ccd7200003f3ba7 -341aab0004bb2a43008e7400004bb28f -3428edbf4ed295e3d834a700204c3f12 -34298f0003d1dd933260fc00003d1dd9 -34316400048f4a93cca5a8000048d276 -34339598807a35830d651700f04c61c0 -34428a000418e3e332b7120000418e3e -3450680004232fb325946b000040834a -34525c421dd040c3986b37004048c32d -34542f000483c093b212450000483c00 -3475900003fa1f434b0f6600003d01b7 -347abebf6a57eb73a3f96b00a04be947 -347ac90003cbdae32a8ecd00003cbdae -347ee30dcb39acf345863900b04b3504 -3482040004331863f8389a000043311e -3483c00004cd1bd3d16acb00004cc043 -348d1cfc2d19b1934f0b3300f04980d7 -348d430003fcb37328315200003fcb37 -3491bcbb93d40343fff90a00404c72d8 -3494950004593763945f6b00003d2683 -34955d0003c19003308b2600003c1900 -34955d0003c19003834f21000038758a -34b0345def282683001df100a047e861 -34b143000407fae32ae20c0000407fae -34bac200040143832db5900000401438 -34c079ca826c2a33f3212b00a04a3f0c -34c45100048d2483d2c786000048c057 -34c4d20003dc2363f2a76200003dc222 -34c5c900045af7432c3cea000045af74 -34c64abe3c22aac3c9e45600f04a3f1a -34ca4e9c9696cc93c2f82f00b047495c -34caa80004073e932516ba00004073e9 -34ccb50004732f034b085a0000473228 -34ce560004ac18834f06ce00004ac185 -34ce570004241e7329c49a00004241e7 -34d039471fb433c37e15a100f04758af -34d29690aeb9de332dffc000b04ce734 -34da9e000432c1632a42e70000432c16 -34dc130004b5a373a7a5a400004ab782 -34e0a10004324f1328c68c00004324f1 -34e3250004331223f8389a000043311e -34f4a5b5dc0550b346eab200704c1f2f -34f99a0004233e831d333700004233e5 -3505720003ef4ab333b8fe00003ef4ab -3506480003d1d70333673f00003d1d70 -3507ae000409228329668a0000409228 -350fbf000451170390b76c0000451167 -3511ca2ba25d0293c91a7400d04adabe -3514930004650b7333992300004650b7 -35177ebf1b270423eb223900804ac086 -352047000402ba1333daac0000402ba1 -3520a8000392f2a3eab7450000392d66 -352637981434bef335951700b04c4928 -3528614f488206b358a6960040487fc1 -352f268f82510233d856c300f04993a0 -35342000046a131302c4f5000046a129 -3534820003f85a532adf5d00003f85a5 -3548f5000463aac32ca0760000463aac -35495100049f79634ed2e8000049f790 -354d3e56a79888b3b7fdd700a04a3a91 -354e659869ebf263ef3db900c047d31f -3556aa0003d618a3e9db3d00003d6188 -35597000046bd44333dc20000046bd44 -3559c000049fbb83e233ed000049fbb4 -355be9584db23b13e788ca00c048d59b -355db24883b1ce03cfe9e200504b3706 -3560980003d86d9333ca9200003d86d9 -3561545ebaae64035df3a600004ca540 -3561c70004b212c3ea76c300003921cb -3563a80004a038e3cd636d00004a038b -35644642420b6f63bedc3d00404ca9d0 -3565e67b576888c33f6096003047e952 -356b430003cbc05328db1100003cbc05 -35702000044ea023ad47bf000044e354 -35784f47b70510937db42500e0497d23 -357dd200047ecbd341cf5e0000472f16 -3584ca00048d28a3cca5a8000048d276 -3592aa0003d8c653e023e900003d06a9 -359b3f0003dae6b3342e4800003dae6b -35a5f40004423e3365b79200004423a6 -35ae8a000405fbb32dd60c0000405fbb -35b9060003ee9c132b8c0b00003ee9c1 -35bf170003dae84325727200003dae70 -35c076000457d3c3d444f50000453d19 -35c14600043cd4e36d3a5e000043bb0c -35c47200049fbb83e233ed000049fbb4 -35c48e0003ee457329300b00003ee457 -35c5790004351e432d4eb100004351e4 -35c85600046f9803c2575d000046f8f1 -35d10d438732c6f3628fac00404ca0b1 -35d11ea8f13752b39a282f00a047688c -35d19163f7e5e19362e7d000204812ea -35d51b0004ce7663df274200004ce6f0 -35db7365a5dcba037f63a400804b34f4 -35e3a80004a130b3ac438c00004a12e4 -35e5172d9cacaf138a842f00c04836fa -35e728f75875258348077400a04768d6 -35eba74ee533652344aed500904460d9 -35eca10004c2f383ecc93300004c1ea1 -35ef3d0003dc24332d8e5600003dc243 -35f97a000407a5531d46ac0000407a36 -35fa480003dae84325727200003dae70 -36011f0003ed2aa328480b00003ed2aa -360b43000423c4032aaddb0000423c40 -36130a0003fc7c3325a28f00003fc7c3 -3613bf0004699bf3e148470000420966 -36156800042b770327da28000042b770 -36161d0004b1daa341f4b700004aba35 -361648021f3651031236a500d04c1ab5 -361a530004be59c347e856000049a4ea -361a85441cfedc3340c52400804987f3 -361e11664634e3a37aee0f00004bf7e1 -3627370004241fc338100200004241f0 -362a4e00048cf053834f21000038758a -362a4e00048cf053ea76c300003921cb -362d040004aa93136b3d4a00004aa5f7 -3631dd0004718ef334b9af00004718ef -3635940003e2e753a66c0b00003e2e6f -363bf50004880a63910dd2000047dfb9 -3641170003d609932fcf5900003d6099 -3648cf53bb4cbc734a677500604cc3de -365164000484b9e3ea43540000484b62 -3651db0004256103af80020000424eb8 -3653010004100e635687cf000040fe46 -3653129b02462e63f19de2000048d683 -3657d0000482b0f3bde1d20000482afc -365a300003cb9b732c6b1100003cb9b7 -365a9800047177c334e751000047177c -365cd7000412dd232a6ddd0000412dd2 -365fae0004002133293e0c0000400213 -3660770003d979b32a857500003d979b -3661cf000499433340cb22000049016a -3662ec0003dd5e4331e31700003d9967 -366d93d4a6e149d3e331d7005049917a -366e980004714f6335175100004714f6 -366f0c2a581e7e33f8857b000047bb6f -36764600040834a3270bcf000040834a -367b3d0003dbd6f335227200003dbd6f -367ca40003ee315335410b00003ee315 -367fae71324a46e3b1594a00504a84d9 -36805c0004b9371379d5c700004b8404 -3681a9e22297c3d3e801a300104c2934 -36900f00048e5e7322c9e9000048e29e -3692260003d4e233298cf800003d4e23 -36930a0004058d332c7b1200004058d3 -3699960004842153b1068700003b603a -369a175c258b59632398b500c047cabb -36a1b900047d1043a33a45000047d0fd -36a21300040670632c31900000406706 -36a27e00045f5183809095000045f3db -36a4fe0003f470732f5ca400003f4707 -36ac9500044d78432e8923000044d784 -36b68f0003fa7c132fd28a00003fa7c1 -36c1acbaf3c3f2634a800f0090495990 -36c4795885189273772bdc00304a6167 -36ca860b44e65e23e5431400f04c9888 -36cfed0004a07b43c057c200004a07af -36cff60003c66483d8667600003c4ac6 -36d123000450559334ccea0000450559 -36d33f0003d28d432f298f00003d28d4 -36d523000458c0e38d5dc900004589c3 -36d9d00004cd67b326594800004cd66f -36da0f3463b25ed3a7b5c700304afd0f -36dfda0003921c43834f21000038758a -36dfda0003921c43d33d18000038bd74 -36e1dd0004701ec33595af00004701ec -36e5dd0004122c63bf12ac0000412294 -36e96b0004c497f395a6ab00004b3d0b -36efbf13ce7706034f1c2c00904c6039 -36f3783a14f67633f8857b002047bb6f -36f8690004b764d31400a800004b7647 -36fb250004912a53754ba8000049129c -36ffac00047f2f532699d2000047f1e7 -37030e1928db0de39cda6600f0472bae -37032e02868cce0371312b00e04a2284 -37065a126152af33e7982800f04bb371 -37092b4f9e6697b367d4260080484be7 -370b5ab1ca02767390a59800604b1595 -371420000454e053f69fbf0000454de5 -3717d30003d0069335d53500003d0069 -371f0a00047b68131008b5000047b67f -37204700041079f335b2ac000041079f -3721170003d7bd1335be7200003d7bd1 -37222728093b5693d10f7600504c5ae7 -37239adc78df865318f8b50060474639 -37282f000483ad1302c4f5000046a129 -372cb60004aba733f20fc200004a1567 -372e7e000464a2535a989300004649d9 -3730d2476f6a7283834f2100a038758a -3730e6c1f4f0bb7328a27b00104bd681 -3734980003d618a3e9db3d00003d6188 -373973000445d1d3199def0000445c38 -373b86000488f2d3a8aceb0000488f29 -373f71000432c1632a42e70000432c16 -373fc20004a07b43c057c200004a07af -37406b000407fca3a19bcf0000406c88 -374514000407fae32ae20c0000407fae -37464e4e7584c3f3e1128b004047c4dc -374d750003dbf6a3afcffa00003dbe8d -374e0576b9e4ffc359dad000404a8b41 -375730fe44ed24333a5ac3005047f70c -3764410db0aafb63e719d700504a5fab -3764b90004331223f8389a000043311e -3767dc000434dbb38403dc000043438c -376af80004241e7329c49a00004241e7 -37749d2d51b24843ee91c000704a35ac -37764e00048cf053ea76c300003921cb -3777a100039346c3360429000039346c -3777a100039346c3834f21000038758a -37813c0004ab3363902c6f000048ec62 -37835b00043afe73362ce8000043afe7 -37873700042afd838ccd7200003f3ba7 -379c17e324f03f032747370010486327 -379d6b0004c58df3e832190000496f76 -37a82c0004c6b753834f21000038758a -37a82c0004c6b753ea76c300003921cb -37a930aad1a35563e6f9de0070475d97 -37aa1c8d870f2493a7c5640030498713 -37ad5200040e2fd3367f12000040e2fd -37ae0400043388f32f24b9000043388f -37b239671931af735075a300004c67f3 -37b3f30003f85a532adf5d00003f85a5 -37b82c0004c49b2303413300004c49ad -37bd040004ab3363902c6f000048ec62 -37c076000454ea832dfca70000454ea8 -37c0b5000483fb33d25d9600004802d4 -37c1120004324f1328c68c00004324f1 -37c2260003d495f32d6b3d00003d495f -37c49000046fbc536261af000046fb6f -37c7bf591dac8d53a2005c00a04abdc8 -37c90a0004c497e395a6ab00004b3d0b -37ca5c0003cbdae32a8ecd00003cbdae -37cd7000046c5383366420000046c538 -37d79e10ced2de037151a8006048531c -37e1b839c65dd9e3e2df9200c04450fe -37e3d400048e5c73a559a1000047663a -37e40f000409228329668a0000409228 -37e4fe0003f473c3233fbb00003f45ec -37f4737eb6728df31b014800304cca30 -37f85c0004bc3e535a967400004bc3e2 -37fcd400047be253161774000047bd69 -37fcea00045664133997dc000043c95a -3804fe0003f7689330caec00003f7689 -3805d9e56fc8ab138229d2000047f787 -3806e80004c294d36caa4e00004850f7 -3807c20004a14213e1f45600004a1415 -3807ed0004a14213e1f45600004a1415 -38085c0004bdead387863900004bc9f9 -380b13b5e73a79a3ede5c900004aa074 -380b48da6ecb07439ada9500e0472ef9 -3811ae0004a340a3fbc3ed00004a22fb -38165f00044554932b017e0000444536 -38221600040759b32d36ba000040759b -3824a10004c406d36fef7600004c2c24 -382da70003cadbd33150ce00003cadbd -382fb707cad408932aa0eb003048de23 -3832bc00044003b3ee27480000440024 -3837510003f2544331540b00003f2544 -383adbbb93681e23be4e4e005048e123 -383eb10004360cb3b52dc500004360c0 -3843ee9917e2c0f32108d4009047c50c -3844130004b319232df41300004b30ec -384914ff988746132f7c5600c04a569e -384996a3d3706c13618a0d0060493050 -384c010004241fc338100200004241f0 -3851e60003cd111330df1100003cd111 -385a070003ee9c132b8c0b00003ee9c1 -385ce09c9988fa730483dc00f04a2e04 -385ce5d6eea0f113199bd100904bf5ff -385d22dac0a6a0a3470a1d00c04b6f61 -385d720003dd30a3cbf58f00003dd2e7 -3861940004aa24e3c7371700004aa246 -3869230004682ee32f717000004682ee -386fbe0004a3d493465379000049c0ea -3872450004792c030721b9000047927d -3874a70004660e53c17893000045d3fe -387e9800046cd613f3c456000046cd5e -388047000405bc43a9a5860000405bb6 -3880546fc1338b430de76b00f048726b -3882920003d618a3e9db3d00003d6188 -38833f0003dae7a3342e4800003dae6b -38840f00048e5c73a559a1000047663a -388764c7f7e41293a68e7b00504b224e -3890ca00043024b3a3825600003dcae7 -3891e20004b0f1f38e39240000499126 -3895170003dc9f93a80d9400003dc9d9 -389ddb000425b953a19bcf0000406c88 -389e9600048c4ef3b61224000048c3c8 -38a420000456edf344ae7e0000456bb0 -38ab390003cf5cb396393100003cf5c7 -38b0da932779c3530f9ebc00c04c602e -38b8a47d8484524323eaa100b04b73e5 -38ba6b0003c954236cf09000003c9539 -38ba6b0003c95423834f21000038758a -38ba8b00047e4893b29333000047e486 -38bd7e0004445ef33740a700004445ef -38c14ac1352ac6933c069d00303a6629 -38c14ac1352ac693834f21003038758a -38c657000423c4032aaddb0000423c40 -38ccd700040143832ee9dd0000401438 -38d0b60004aba733f20fc200004a1567 -38d28000046dfe732f8b20000046dfe7 -38d6560004b9f4931a021d00004b9eb6 -38e23b00044f2803cf5876000044f27c -38e29c2835f24c534a4bec0030487eba -38ef5cd8847248835fcae800204c35c8 -38f8e800043dec2337b7f6000043dec2 -390186000407fca3a19bcf0000406c88 -39023f06bf35b553acc56f00604a8f45 -39051d72f38c6a2307d5d2006047a91e -3906460004058d332c7b1200004058d3 -390bed954daf379356690a00a04c22f2 -3912754868d20e33e41de200204858f3 -39164600040670632c31900000406706 -3916734b1a5514f339d8120070484dbc -392504f46d57385339a1de00b0473db1 -392893b94e83feb3231adb0000494ead -392fb50004242c4331d40100004242c4 -3931350003d0bae337c76600003d0bae -3933430003cb9b732c6b1100003cb9b7 -3933590003d8278332624800003d8278 -39339c000460b8f33101700000460b8f -39368a000401609337df010000401609 -393dba24c97e5633e6a31900d048d94b -393fd00004765fc372070a000047551b -39409000046d41c337f602000046d41c -39466f00042e4c33322a58000042e4a3 -3949f54027d79573874551005049b6ee -394d940003dd30a3cbf58f00003dd2e7 -3950ac00048c9d43ba45e2000044c627 -3951e5000419380332b7120000418e3e -3954b5000483fb33d25d9600004802d4 -3956450004792c030721b9000047927d -3962280004241f0338100200004241f0 -3967748688b44353ee62ab00004b393a -3967b6000412dd232a6ddd0000412dd2 -396f090004718293382a800000471829 -397447000400225327ac6b0000400213 -3977760004c84d13e832190000496f76 -3990ea00045af9236a536c000044a043 -39a0e3ad4dbbb2531b014800604cca30 -39a67e0004233e831d333700004233e5 -39a98d0004331223f8389a000043311e -39abfa0003efbba33834fe00003efbba -39af9700047257e34b0f6600003d01b7 -39b3b500042afd838ccd7200003f3ba7 -39b82f00047c4a336c1dd9000047c461 -39ba3ba31c726633c93ca800504b4292 -39c3011068484423b181be00004cdd0e -39c5e3000448911349ea8e0000447267 -39e104c602b724c301e1c700504b3f13 -39e6368761064f8342f5ab00204c2538 -39ec8d0003d6099331057500003d6099 -39ec980003d4cf3338911700003d4cf3 -39ecb27da9a0496321424500b0484122 -39eddd0003fa1f434b0f6600003d01b7 -39f21793b54a7cc30cc1e200e04af298 -39f7f9ed663e1ba328543300004a184e -39f8b500048340a32699d2000047f1e7 -39f9a7760fad3ce3455374003047aec0 -39fc7e35a87da56323ef460020487e41 -3a0514000405fbb32f0ac20000405fbb -3a085c0004bdead387863900004bc9f9 -3a23ec00048624b3dd0de20000486247 -3a2acd0003cb9f53e872cd00003cb9f0 -3a2b790004a296b378436d00004a2962 -3a30b500047f82c3fba82f000047f7da -3a31d846c078b3735d9c2f008047e6af -3a37ea00042230431a00020000422303 -3a3fa60003cf5cb396393100003cf5c7 -3a54760004593763945f6b00003d2683 -3a5801c13dad5e33cefb39004048e200 -3a6380918279cd83834f2100e038758a -3a6380918279cd83ea76c300e03921cb -3a6a150003dc24332f5e7200003dc243 -3a6ba60003cbaee386369e00003cbada -3a75750003d1dd933260fc00003d1dd9 -3a75fc00039346c3360429000039346c -3a75fc00039346c3834f21000038758a -3a7b24e55551a053c13ad200904cd3bc -3a7db1a593fc9af3fbf6c700f048d8d9 -3a8c4700040319a3554ac2000040310e -3a8c880003d495f32d6b3d00003d495f -3a8d900004351e432ecce800004351e4 -3a93a10003f4707330b17200003f4707 -3a970100040ef4534a546b000040eae8 -3a98ad290bc75aa3e8f4e800004385a3 -3aa60f21c3717f03f2320d00e049979b -3aa7d10004be014347e856000049a4ea -3aa92000042e4a33322a58000042e4a3 -3aabe04fdc9b9923bfe4d40010475a42 -3aaf590003d86d9333ca9200003d86d9 -3ab43beed39758a3eeec2c00a04c2ab9 -3abc2f00047086133953200000470861 -3abdd49fcab4f50327c2e800e049a9f6 -3ac3510003e4663347fb6200003e40b5 -3ac9620003ae20936c30820000393484 -3ac9620003ae2093834f21000038758a -3aca7b0004b655b322786900004b6530 -3acbffb2691ea6f3875b7000604a2401 -3acdd00004ab273349b8ae00004a9de3 -3ad00f0003fa7c13311e0c00003fa7c1 -3ad5d100043c95a33997dc000043c95a -3adf660003d0594339336300003d0594 -3ae660c17a196f4394bec7006048c69f -3ae7bb0003ef4ab333b8fe00003ef4ab -3ae7bf0004506e3344aed500004460d9 -3aea1d0004bc3cd3f1fc5c00004bc32e -3aef170003d1d70333673f00003d1d70 -3af047a5a70be233a2f28800b04cccdf -3af1d20004757903688ac30000475742 -3af287902dec73e3e154c40020485be5 -3af6bb1a1c8157339ab26600804b14db -3afea800040759b32d36ba000040759b -3aff9700047257e34b0f6600003d01b7 -3b08b5000478e833298fd00000478e76 -3b0bef8022ae3a239a2c2f001047367e -3b0c6b000402ba1333daac0000402ba1 -3b0d7b000476bb830253330000473d81 -3b0e8000046ea1a3396a7c000046ea1a -3b0fcf000400225327ac6b0000400213 -3b12db0004967d137b3c0f00004967be -3b1cff000455ec13b1068700003b603a -3b1ef80004999f83f83da500003cf5ab -3b2bbf00046c4963ac0170000046c45a -3b2c0b0003dae6b3342e4800003dae6b -3b2cf5000464f5636a536c000044a043 -3b2e3823018bb1530af4a700204c58ac -3b2f3300047858a337f602000046d41c -3b3366f2955869a3f08d550050499941 -3b3b9c00046ab5930cedc9000046ab41 -3b3fb600040143832ee9dd0000401438 -3b58a10004c4cfc37181a300004c4cf1 -3b5d5ee89a0aa7e393ad3c00e04c8474 -3b61d900047858a337f602000046d41c -3b6587b86da91313f80085009047fd33 -3b68b500047cf9135da096000047cf8b -3b6f5100047196d3328daf000047196d -3b7132e68e842a2398131400804cb550 -3b74760004580a23bdef600000458072 -3b74a70004421df33a1f0800004421df -3b7e0d09985966d3cd197b00f0473bee -3b92ee5853049083e7dca700804c2e51 -3b968d2a240fba9306be7e00304b9683 -3b9b2573db43d9a308737400404848f9 -3ba0ea00046512833231e20000465128 -3ba28b000476fd534c20850000476f21 -3ba9ff00042e4c33322a58000042e4a3 -3baa4595f10ebb339f5bd0006047be5d -3badfd5993f7fee34ca733006047d3b8 -3bb840f0a6f51543fc945c00804b51c0 -3bbe560003db90933ae73f00003db903 -3bbf9c0004543a333a636000004543a3 -3bc0ae0004ab273349b8ae00004a9de3 -3bd1d6000429c9133a64010000429c91 -3bd5eb5a701fa45380d282008048e068 -3bd8d70004250ca3300d9a00004250ca -3be1cc0003ee9b0331c8db00003ee9b0 -3be5730540c2ddb3de305100b048c7d4 -3be5af00047185435f2f200000471837 -3beceb00048669b325cdb90000482893 -3bf9ab0004c15193f83d0a00004c1500 -3bff3700042a65a33a849a000042a65a -3bff3700048669b325cdb90000482893 -3c03a10003f7689331ff6200003f7689 -3c04a40003f509231963bb00003f506a -3c06c0f87e58e0b38615240090494dd8 -3c0c2000045fd8f3cc90f5000045fd8a -3c0c9a0004233e831d333700004233e5 -3c0ddf3bddf83833ffccae00e04a9a1e -3c0ef80004999f83f83da500003cf5ab -3c12ff0003cadbd332c1b700003cadbd -3c15b900047d69c34066c3000047d486 -3c1f170003db90333ae73f00003db903 -3c20bbc2f82dfeb34a60a300f0473237 -3c21ad4183a5a3a38b2b1400c04cae15 -3c2f7000049b2833f4638c000049a4ac -3c34fe65ab482423ded12b003049d813 -3c36ce0004c15193f83d0a00004c1500 -3c38560004a07b43c057c200004a07af -3c3a070003f254433290fe00003f2544 -3c41dd00041b1493e6bd520000419c37 -3c432200048e5c73a559a1000047663a -3c445678038b9bf35881a1001047becf -3c45d20004757903688ac30000475742 -3c47210003d28d4330c97500003d28d4 -3c50770003d6099331057500003d6099 -3c583d00043388f330945d000043388f -3c5a579bf0b10163fa1b330050475519 -3c622800042d4a7382cc9a000042d488 -3c72ba00041c2e333b2586000041c2e3 -3c74bb3cf504e27372937600c04be8fa -3c7ab1000436be9303ee4800003d89d7 -3c7e5c0003cd111332482000003cd111 -3c825d111ea821931aedb900a0483270 -3c8371c6dd071e53595219002048ed26 -3c889a6cc5132d537f7e7b00d04affe0 -3c92ec0003ee315335410b00003ee315 -3c96580003d0069335d53500003d0069 -3c96b2780c5b9fc31b16a500e04c76b8 -3c974121acae8183b0bf86009048533a -3c98980003dbd6f335227200003dbd6f -3c99980004bd4a337f2e7b00004bd35a -3c9a6e0004cd6553dae77500004cd64f -3ca1a5000424eda3af80020000424eb8 -3ca40546ff0648d313f4a800e04ba11d -3ca5480004cd6553dae77500004cd64f -3ca645bd66defbb30725f10090474a08 -3caa7e00045d9003d570a7000045d456 -3cb67b0004af99f3bf1c6900004af990 -3cb746ca79e262b30c3a4e0060485d8a -3cb76000046bd44333dc20000046bd44 -3cb8980003dae7a3342e4800003dae6b -3cbbb6000405fbb32f0ac20000405fbb -3cbcc8030e25133328330200b04abd66 -3cc0a10004c15263f83d0a00004c1500 -3cc1f70003d9967333411700003d9967 -3cc6aa0003d7bd1335be7200003d7bd1 -3cc6ba0004208c633b6d5200004208c6 -3cc8a70004c46c43ec5f7600004c313f -3cd1d200047b86c3559774000047b86a -3cd5f8e71d795d9355da7e00004b05e6 -3cd6635cbd19bdc321aa7e00f0459dc1 -3cd9f40004932793d92425000049325b -3cdd376a1ecab773b041d2002047b588 -3ce1380003ecfdb33b6cdb00003ecfdb -3ce3fc7364769a4343af220060499544 -3cec770003d8278333a67200003d8278 -3ced2700039049d3fdd962000038ccc4 -3cefdc72812f4d93dc2cac00b04864bc -3cf61c5909b51fd3f86b7400d047cd8f -3d029ae3680654e318b77500604cced9 -3d0c68d90fc1fa0338e57b009047a8e1 -3d0e49d06f1f21634f6c8200104cc801 -3d10ae2a7937781358b896001048152f -3d137213bfae73a3a71c0500f04a11cb -3d14c02030313e9367d42600c0484be7 -3d1effb718cd43f3ffccae00d04a9a1e -3d1f6b00048c3a4321aa7e0000459dc1 -3d25d200047d69c34066c3000047d486 -3d27630003cfb91333d9a500003cfb91 -3d2b6b0003dc24332f5e7200003dc243 -3d2dcc0003f4707330b17200003f4707 -3d32b6348ca6c3d340cb22009049016a -3d34b30003cb9f53e872cd00003cb9f0 -3d3722126db8da23806aa500604beefc -3d3aa30004242c4333167e00004242c4 -3d4b590003d23c23c4e71700003d2163 -3d4e480003db90933ae73f00003db903 -3d50d70003fa7c13311e0c00003fa7c1 -3d53590003dbf6a3afcffa00003dbe8d -3d5395d1a9991d43815fed00804a2ab0 -3d59750003d631233c169200003d6312 -3d6ba40004bd4a337f2e7b00004bd35a -3d6f14bb2978d5d3a559a1009047663a -3d71221ff6514c83ae29d000704cce2b -3d7d5200040ef4534a546b000040eae8 -3d7e66000472af03cea15b0000472ae0 -3d81a18dfcb43243427fac0010473423 -3d83760004caf063b676d800004caee0 -3d86e00004351e432ecce800004351e4 -3d86fe0003a662933c069d00003a6629 -3d86fe0003a66293834f21000038758a -3d8cb30003cbaee386369e00003cbada -3d8eec0003eeb7938d800b00003eeb74 -3d8f190004850873f83da500003cf5ab -3d95dd00040e2fd3367f12000040e2fd -3d992c0004714f6335175100004714f6 -3d9ac200040534f33c41dd000040534f -3d9cfc0003d1b8f33c2f1700003d1b8f -3da402000425b953a19bcf0000406c88 -3da52c00047177c334e751000047177c -3da6afa42d8ffec3f4c8eb00f048df31 -3dba5000048e6043ea09f4000048e5fd -3dbbbb0003ebdd333c476200003ebdd3 -3dbc0f000498d033dc125f0000445904 -3dc0950004660e53c17893000045d3fe -3dc3510004718ef334b9af00004718ef -3dcde2000456edf344ae7e0000456bb0 -3dd45f082e1c89e3a5c36d00304a3892 -3dd4ca0004859d0328f0ac00004859c9 -3dd9590003c193733c855d00003c1937 -3dd9590003c19373834f21000038758a -3ddbde5d6e215483d2afac00704cb455 -3ddd5200040e98a30e6190000040e78f -3ddec3000473e7c3ea5e450000473b41 -3de40d0003ef4383e6b59400003dd3ac -3dec2f0004771583a3bc960000477152 -3df2a50004bf47e379507600004bf3c5 -3df8fd38d2234093d6a5d000704a8d25 -3dfc2c387d559c43a30b7900d04a1c32 -3e0aac00040187539fc1520000401871 -3e13510004701ec33595af00004701ec -3e135fd91f48f1d36c8ff5009048e0a0 -3e1dc70004b32fc36388a800004b2fa7 -3e222db097e0b64379dfb300504cc8f7 -3e2dc90004aa47c3dad1d000004a90f3 -3e32bc0004bf47e379507600004bf3c5 -3e3da40004aba7b3f20fc200004a1567 -3e401c92bbb691c3b80fed004049e5f1 -3e4645000482b0f3bde1d20000482afc -3e47bf00046496736fdd700000464940 -3e48ef0003ee9b0331c8db00003ee9b0 -3e4d6d7dc771f633abab3700d048ac68 -3e4f010004122c63bf12ac0000412294 -3e54be5ca6486763231adb00c0494ead -3e57c067caece3d325360d0080492704 -3e586b00041079f335b2ac000041079f -3e5b6df26558cfc38881d700f048edc5 -3e5be4dfb52805834d0e2c007048f4fc -3e5f3700042230431a00020000422303 -3e60b70003d86d9335609800003d86d9 -3e7031b187f7d35322319800b04ba364 -3e74db0003ee465327c3bb00003ee457 -3e757b000476fe034c20850000476f21 -3e774e0c9cba66337b50c400f048730a -3e77620003dbf6a3afcffa00003dbe8d -3e7d24000498a0c35b66190000498a06 -3e7ea10004ba4143baac5c00004ba3a4 -3e820900039c57533d1162000039c575 -3e820900039c5753834f21000038758a -3e82246c3defb1d3b181be00104cdd0e -3e8666000472af03cea15b0000472ae0 -3e92740004bdb723ea76c300003921cb -3e94980003d84c839aa27200003d84c0 -3e997f1f22eb17236fef7600c04c2c24 -3e9fe000043dd4a33d0579000043dd4a -3ea9cc0003f7689331ff6200003f7689 -3eabf600043afe73362ce8000043afe7 -3eb35c0004a893c31e750400004a8932 -3eb39c0003cadbd332c1b700003cadbd -3eb6e800049ecc83b63a190000490b79 -3eb9c7784cae3823e342c7008048cf6a -3eb9f80003f254433290fe00003f2544 -3ebcb50004771583a3bc960000477152 -3ebd2dd054496c33be0d2400d0490327 -3ebece0004ba4143baac5c00004ba3a4 -3ec4880003d1dd9334298f00003d1dd9 -3eccc310b1b05ad344aed500204460d9 -3ed1d80004250ca3300d9a00004250ca -3ed45ac551c662b30bef2500a0499acd -3ed4d700046f2b23d8527c000046e202 -3ed57b0f36f311a3bbbe7b00404ba909 -3ed8f5000466478342f5db00004285ff -3ee26e53aa7756d3cca5a800c048d276 -3ee36000046c5383366420000046c538 -3ee489382774f71344e25600a04b11c9 -3ee86b0003fa4b83f357cf00003fa4aa -3eea660004af2bc36595e200004af2a7 -3ef08e0003ef4ab335057200003ef4ab -3ef3f7bde6c504f3488e82002048d1be -3ef4abe0e91072b3f4dc1300b04b363c -3efd2c000433e203bd8e7e0000433c42 -3f00890003d0bae337c76600003d0bae -3f074c689a875f83c93ca800004b4292 -3f0f210003dae6b3359b3f00003dae6b -3f13cf000401609337df010000401609 -3f19d700049ecc83b63a190000490b79 -3f1a390004b239e3e1045c00004b2374 -3f1a8f0003ff9d63f7517a00003fefd2 -3f23fa0003e5560303f0d200003e555b -3f248051ae2228c303f322008049977f -3f260783d399d353d951f100104817f6 -3f270a000402ba133520470000402ba1 -3f2c5100042e4a33338bc3000042e4a3 -3f2c770003d9967333411700003d9967 -3f2f2200049233339af3a8000049232b -3f30720004a3caf3cdd5f40000491b41 -3f30dc000472f8a351706b0000400a56 -3f32eb00043388f330945d000043388f -3f36e80003cd111332482000003cd111 -3f3abf98970bdc63e2c10a00f04c30bd -3f3d9a00042230431a00020000422303 -3f3e6be56ee50ad34d65de00904738a4 -3f3fdc00043bb933cef850000043bb89 -3f46720003d2fe833deb6b00003d2fe8 -3f46760003d1d70335064800003d1d70 -3f49bd000445d1d3199def0000445c38 -3f50880003d28d4330c97500003d28d4 -3f52660004b239e3e1045c00004b2374 -3f52d1960357acd3dae77500004cd64f -3f5d140003c66483834f21000038758a -3f5d140003c66483d8667600003c4ac6 -3f6d2400048f816366c8d40000481a9c -3f6ec3000473e7c3ea5e450000473b41 -3f84776155896e23834f21000038758a -3f84776155896e23b1068700003b603a -3f879702760599a32142450090484122 -3f88f80003d44a6307173d00003d1d79 -3f89a8e99039aa1303bae800004a22eb -3f95e200046a131302c4f5000046a129 -3f99af13d3373d0313f4a800804ba11d -3f9af40fdfc80383c803ac00d047449e -3f9cfe0003eeb7938d800b00003eeb74 -3fa6570004242c4333167e00004242c4 -3fa7760004c1ec63ecc93300004c1ea1 -3fb1d200047d1043a33a45000047d0fd -3fb2390003d8278333a67200003d8278 -3fb50b0003efbba33834fe00003efbba -3fb86f00048f816366c8d40000481a9c -3fb87200049b6023da89ae000049b5f8 -3fbd940003f38b834ba0fe00003f384f -3fbfb80003cfb91333d9a500003cfb91 -3fc3a40004b31c432df41300004b30ec -3fcaca00042571c342cbea0000425680 -3fcbea0004241f0338100200004241f0 -3fcea30003a7fc033e57e500003a7fc0 -3fcea30003a7fc03834f21000038758a -3fd26de965a53d83dc2cac00a04864bc -3fd7034cbc2f33239584d4005047bdd7 -3fd927d455df9b6314aa7e00704b7c98 -3fdb3f0003d4cf3338911700003d4cf3 -3fde16000418e3e334428a0000418e3e -3fe05000043dec2337b7f6000043dec2 -3fe0fe0003f2f7d33ea2ec00003f2f7d -3fe1a40004aba7b3f20fc200004a1567 -3fe273ed1b9f31333c1405001049e378 -3fe4012c452434b37f63a400f04b34f4 -3fe5006e24f603232aad0a00a04be716 -3fe57e6103869fa372fab200604c720d -3feb1200042eef533eabc3000042eef5 -3ff6580003d0bc93a639e600003cf5b0 -3ffa18a485c53023dd3c2f00d0476938 -3ffc0b0003d9a2e33d9a4800003d9a2e -3ffee800049d8be396052b000049a6a1 -400d7b000476fe044c20850000476f21 -40130e0004c93944d1daac0000409b7b -4018d9d4b5a02d24123d5d00b047288b -401a7f60990b9754120b3700904864d5 -401aa3000395bd743ea7e50000395bd7 -401aa3000395bd74834f21000038758a -401fb00004445ef43740a700004445ef -4021952c02be1aa45cf7a400104b6892 -40236c000450559434ccea0000450559 -40247ff65acc6fc479d5c700004b8404 -40292400049233349af3a8000049232b -40390b0003e26844855b6200003e1816 -4043e7e4061e44648fe19800e04b9643 -4049550004971fb4b1068700003b603a -404c18f00433872457927b00304ba97a -405be90003d0594439336300003d0594 -405db5f55a3b3014476ac300704746ce -4066cfd395992d04badbf50060488755 -40711f7b648c68d4526a7400104bcac5 -4074c00003d116743f1e0900003d1167 -407e8b78a399642497d52b009049b385 -4085ae60c9c8eb343b4a4500c047ff5b -4086b0fe35d6f9448727220020497a0a -408aba9a7bef85f4b9366600804b9528 -408ba40004b577a4d496ce00004b5774 -408c8e0003ee3154367ca400003ee315 -40917870ab1cda84d25d9600d04802d4 -409ec200041908243f3ddd0000419082 -40a12300044ccd24c075e2000044cc2d -40a53020c95f9e849aad2b00f049ae58 -40a72000046d41c437f602000046d41c -40a7da0003d006943717d300003d0069 -40a872d2347c591415447900d04cc6ce -40a8dc000472f8a451706b0000400a56 -40a9750003d23c24c4e71700003d2163 -40b1120003c193743c855d00003c1937 -40b1120003c19374834f21000038758a -40b6925cc5c47e944bf24e004048adf6 -40c0a80004bae0f43d6aab00004bae0c -40c62c00048e5c747ce5d2000047d58b -40c7600003d7bd1437211700003d7bd1 -40ca890003f832943f7a1f00003f8329 -40cab2942d4d2144c001e300d04ce44d -40cac700048b42046d4696000048b417 -40d0280004bae0f43d6aab00004bae0c -40d5b00003dbd6f4367b3d00003dbd6f -40d68b00047858a437f602000046d41c -40da020004718294382a800000471829 -40db81c0c5d6ca046d469600c048b417 -40dcc400048e0ee46699f1000047c503 -40e5b00003d86d9435609800003d86d9 -40fa0fccf48da7d4d16acb00b04cc043 -4101480004ccb29463ef4200004ccb23 -41064567944ca894133df400b0490290 -4108ff6159aff93446e96b00204becd1 -410ac2000401ee94e07eac0000401ee4 -410b750004cd93f4749c2c00004c4d71 -4111e200045c201479a420000045bbc4 -411b4600048c3a4421aa7e0000459dc1 -412a88470f04fa1439893300f04c47e8 -41397b0004824664ba27ac000047715b -413ffa0003eeb7948d800b00003eeb74 -4145805731961c448e8e96003048c3c2 -4145dd000406d654400d520000406d65 -414e712fd3eeecd4513d3300f04c571a -4155c10004c49b2403413300004c49ad -4159bc01245564b45d6cb500e047c432 -4163c16ee3a5d3c467e28b006047f950 -416b575f3dd04bc47415d000504a8a08 -416edb000490a564d66f220000490972 -4170c40004865544ea64020000425ba1 -4172ce0004b19e34a7a5a400004ab782 -41731ba556f06de4a113dc00404a2457 -417d811fc046d4a4b676d800a04caee0 -417e31078a598364bc70d400e047a870 -41824342b82eed8488c374005047f20e -4185cc0003ef4ab435057200003ef4ab -4188704138d3a09495111700604c56c8 -4189170003db90343ae73f00003db903 -418c8d34d7860364632cc4009048b88f -4194fe00039c57543d1162000039c575 -4194fe00039c5754834f21000038758a -4199b00003d1dd9434298f00003d1dd9 -41a0fe0003e1f7943ff80b00003e1f79 -41a47600044b600440727e000044b600 -41a4b70003dae6b4359b3f00003dae6b -41adb90004810b84137b0a00004810aa -41af4600048c2f340126c7000048c2ce -41b37400047ab6642a64d4000047ab51 -41b3bf00046b40f4400170000046b40f -41b9860003fa4b84f357cf00003fa4aa -41b9a100047c5054e1128b000047c4dc -41ba615517010274b041d2005047b588 -41bb8c00049aa8b46c187c000049aa7e -41bc95000402ba143520470000402ba1 -41bc980003d880343ffb3d00003d8803 -41bf5ec6aa0d2f54c7663900c04b34ac -41c45ccc350f66349ecfbe00a04a06bb -41c7861af9b12ab4ee09d200604767e8 -41cc0d0003ed1a741ef0fe00003ed197 -41cc960004829844ba27ac000047715b -41d1c90004aabd447f164e00004aabd1 -41d56b0004c3bc042ecab200004c3ba0 -41d733000497dc9466b6f80000497dbe -41d8d700040e2fd437ad52000040e2fd -41e47c00049fbe44adedd7000049fbbd -41e4e0b71a22e454a00e8b00704786f7 -41e52900042a3f84c7c929000042a3e7 -41eba40004b428c46388a800004b2fa7 -41eece0004be014447e856000049a4ea -41f07c00043536842255c50000435365 -41f0850004758c547e15a100004758af -41f487802dac5734a67de200b04b4cf1 -41f8890003d0ce6440ad3500003d0ce6 -41fa4800042e4a34338bc3000042e4a3 -41fda70003f8fdc440afd300003f8fdc -4205940004a992d451a53c00004a9760 -4205dd00047249e44071af000047249e -4214ea00044fccc440e0a7000044fccc -421df5e195c75754c6e31400204c9ace -42224e00048e00a4746624000048dffd -4228820004ccb29463ef4200004ccb23 -422c9a000429c9143a64010000429c91 -422f010003fe3974f3980d00003e80e8 -422f1200041110a440d17a000041110a -4234d20003e31e04dd42cd00003cfb55 -42368200048e00a4746624000048dffd -4238930004619d846278ea00004619cf -423b6c0004555c8440f27e00004555c8 -423c6800042a65a43a849a000042a65a -423f8600048c2f340126c7000048c2ce -4242850496a3080409875500f04b6c57 -4245a40004a992d451a53c00004a9760 -42476535686a18447aaf6d0050499de8 -424e270003d1d70435064800003d1d70 -4251de0004810b84137b0a00004810aa -42546e23f4527414a771e200304b2cbb -4255848c8d06b7c41e750400b04a8932 -42590a0004c6dea4e832190000496f76 -425c9000046f013440ba02000046f013 -425ec800039b661405baa3000039ae0d -4276ab0004b26bd434d7a400004b1f0d -4279de5cf3357ea47b4f7400504743e0 -427b590003d631243c169200003d6312 -427c9300046a22c4703dc9000046a202 -427dc90004aa0ca4231abb00004a949d -42839c000454a924038c760000454a70 -42842f00046f92d4296e7c000046f929 -42853b000448a7144119e30000448a71 -4285850a63abd85444c3a8004049a983 -4287170004aa0ca4231abb00004a949d -42876000046b4174400170000046b40f -428b660003d10334410f6300003d1033 -428c5600049d80245ff833000049d76f -428d730004421df43a1f0800004421df -428dca3daab4b7041bcb0a00104746a7 -42926300043c95a43997dc000043c95a -4297c5ed42c2d404fc9dcf0040498969 -4299f1021a1853d410dd24002048f4df -429b17b640a71d841d68a700104c12a6 -42a3d81ed59872b47736e800f04c1854 -42a4fe0003de06f4db8bfa00003de069 -42a8d700046f486441275d000046f486 -42a9ef752e41e654d8dde200904bb9d3 -42b00b0003de06f4db8bfa00003de069 -42b79c0004599304bc68a70000459842 -42bc73000446f0b44154660000446f0b -42bdd7b058757dc4a80bac00604ca549 -42c3ae000418e3e434428a0000418e3e -42d64d39d8fb0154e4207c00b049b0bd -42d720876777ebf472070a00c047551b -42d89500041079f4372047000041079f -42d8fd5a2a6914b46fe42f001047ec2f -42da1520a6d9537417d485001047553d -42dc2f00047fbaf42fe5f1000047fa3f -42e2a10004b26bd434d7a400004b1f0d -42e2ce0004b19e34a7a5a400004ab782 -42e69eec04ee1a146fd8bc00e04c96a8 -42e89600047a7fa4f6fbd0000047a7df -42ec0d0003ecfdb43b6cdb00003ecfdb -42edb4a455838ef4c34bbe00a04a01f0 -42f0140003bf2964ea76c300003921cb -42f3ae000401609439368a0000401609 -42f737000488edd4b1068700003b603a -42fcc40004865544ea64020000425ba1 -42fe47ab6f0d5ec445242500d0490c99 -4301fe2401bae414171cbc00404cbe3a -430c0b0003d5dec441e24800003d5dec -4311c00004a13484b5a43300004a1334 -431daf00046f2b24d8527c000046e202 -431e09000395bd743ea7e50000395bd7 -431e09000395bd74834f21000038758a -431f0900046fbf1441d280000046fbf1 -43257000045c3a446278ea00004582c9 -4325a300048669b425cdb90000482893 -432b580003ee3154367ca400003ee315 -43325e00043ae7d441cfe0000043ae7d -43395892065012d4dc125f0090445904 -433db50003d006943717d300003d0069 -433fbf0004622fb442017000004622fb -4340930004543a343a636000004543a3 -4340d700046ea1a4396a7c000046ea1a -4341750003d359b4fd10fc00003d3594 -4341b50003d0bae439313500003d0bae -43460500043afe7437835b000043afe7 -434818eb2efe30f4fe3b0a0090479b3b -4348f7c3f84fc3e467af050020490300 -434e2b4da9314cd4f6f8b50000473500 -435157e22e695d04569c7d00104a925e -435825000496acd4edc3250000496ac9 -435cb06a3fc7831454f17b0010474fdd -435f1200041c2e343b2586000041c2e3 -4365860004208c643b6d5200004208c6 -43676b0003d1b8f43c2f1700003d1b8f -4368790003dbd6f4367b3d00003dbd6f -436a090003a7fc043e57e500003a7fc0 -436a090003a7fc04834f21000038758a -436aeb6c9868a3c42115ab00604c7446 -436b2500049233349af3a8000049232b -436ea76be3d93b941602ce00904c6057 -4373885ced5fe054ad2ae800b049cf48 -437ac84c728bc044499b5e0030472f3c -437df70003d7bd1437211700003d7bd1 -4383b500042d4a7482cc9a000042d488 -43881a91dde921841317ec000048d0af -43882500048f816466c8d40000481a9c -43885600049d80245ff833000049d76f -43893c0004cb7784815f1200004cb675 -438c2500048f6564ea5da3000048dd18 -438cf80003d9ed643ea7e50000395bd7 -4395c90004ab3364902c6f000048ec62 -439786794a279644ab0124007048ed48 -43a49300044f8784423d23000044f878 -43a8fc0003d25a14421f5900003d25a1 -43b46d831a620c44586e45001047c1d9 -43b5940003ebdd343c476200003ebdd3 -43b71200040e98a40e6190000040e78f -43b88a18400c5fd474128b00604749a5 -43ba6b0003ca67844252d900003ca678 -43ba6b0003ca6784834f21000038758a -43ba7b0004ba4144baac5c00004ba3a4 -43bf5c0004a992a451a53c00004a9760 -43cbe90003f8b914410f6300003d1033 -43cc2e75d2124ed4fee9d9009047518a -43ccd20003dd5e4431e31700003d9967 -43d6740004bbdec4c2866600004bbde4 -43e54300042ecbf45ae518000038b7a8 -43e66600047260b4ea76c300003921cb -43eabc0004c3e5f445810a00004c3e5b -43eb760004c352647e2d6b00004c3523 -43edf80003efbba439abfa00003efbba -43edfe0004241f0439622800004241f0 -43f7600003d4cf3439ec9800003d4cf3 -4402d00004a82e74aeb7be00004a1608 -44041ee64047fec45544d40010483f12 -4405e13ebdb574f408927b00404ac0ca -440f8468118fc0b49b34c200b0494013 -44129eed6f23cc943cd14800004cd203 -44142f00047fbaf42fe5f1000047fa3f -441b019839d27034a69f79003049ec54 -441fcf00040534f43c41dd000040534f -4421216f54b05b140135e300004ccc98 -44267f97eac27c24bef2bc00004c4b55 -4426c20004100e645687cf000040fe46 -442d720003ddbad442611700003ddbad -442ddb000425680442cbea0000425680 -4430aa0003fa4be4f48e8f00003fa4aa -4430c8cf8d329ea491947900104cd33a -44317c1b3a79903403413300804c49ad -4436da40e60699f4dab7d100c04c6f57 -4439c10004c129b43b6ca100004c1255 -443e1153303373e4b81a4500d04caf85 -44447ade0b2f3a4425ba6600c04b5d56 -4448eb00048bb624cd8a96000048baaa -444c0b0003edc78406576200003ed7d5 -4452240004932794d92425000049325b -446eb20004c424e4e832190000496f76 -447d330004c445d4ec5f7600004c313f -447f020004a82e74aeb7be00004a1608 -4480fd941f6f5ee4bcedae00a04a1704 -4483520003b344e4d33d18000038bd74 -44835b00043bce64430579000043bce6 -448c010004285ff442f5db00004285ff -448e300003d059443adf6600003d0594 -44966360f01f345426f4c4001048aa8a -449672a4d5624bc456302800e04b95f8 -44a1d700049dace483f17b00004796be -44a4ef200dabb2c4d54dd200a0481937 -44a7f600043c974474a7dc000043c96f -44aa0c7d2db1ed646795d20020476858 -44b47c0004a26b44e9b7be00004a26b0 -44c0482a4d1b97341b40eb00e048baf6 -44c1eb00043313a4f8389a000043311e -44c9720003ebb2e4a2676200003ebb24 -44cb782f0ee5fe0489320d00704991db -44d2ba0003fcbd9443768a00003fcbd9 -44e6530004c15264f83d0a00004c1500 -44e87d0004aab144834f21000038758a -44e87d0004aab144b1068700003b603a -44ed6400049192745aee240000487259 -44f40795e038e5c415c8d400d047feec -44f89a28f85455c46e889600b048119d -44f8fc0003d17c54438b5900003d17c5 -44ff2500049192745aee240000487259 -45042809db4a0e3478310a00604c4cdb -450748cc1ffb6bd4c6e86900604ac6f2 -450c9500040e2fd437ad52000040e2fd -450c9500044dfa24b1068700003b603a -4515a8000485b0944faf6b0000485b02 -451dd200047f1eb42699d2000047f1e7 -4529bc05cd431f247b2bd0006047762f -4537590003d2fe843deb6b00003d2fe8 -453a86f206fa62f4382fbe00e04a1804 -4545860004073574c733cf0000407353 -45493761b193a844d39c2c00204c76be -454b1200041223f443f28a000041223f -454b550004b5df64a67de200004b4cf1 -45587c00049ecc84b63a190000490b79 -455b0d94cc22641411895500804901ef -455d4600043c3174ad4f5b000043c2e2 -455f55723f987d14688ac30090475742 -4560710003db90343c1f1700003db903 -45617a000401894443d5dd0000401894 -4562ac0004100e645687cf000040fe46 -45654300042e5004322a58000042e4a3 -4565d20004771584a3bc960000477152 -4568820004cd6554dae77500004cd64f -4569921607b618a4baa5c000b04a0c4d -456b53000445f2c4f3980d00003e80e8 -456c280004b239e4e1045c00004b2374 -4575f10004737f1463e1b900004736f3 -45766600047260b4ea76c300003921cb -45797a00041b3be4b5f17a000041b35f -457a560003d9e7e47c2e7200003d9e7a -45880f000401609439368a0000401609 -458e1300041079f4372047000041079f -4595768559d493d447ac56009049a71f -459ea200044a1864d75c76000044a172 -459ec500043128b44406e2000043128b -45a2240003d37b846c30820000393484 -45a56b0004c00fe46868a100004c00fb -45a6cc0003ae2e646c30820000393484 -45aa740004b0b6047caca7000046c7a6 -45acb500047d69c44066c3000047d486 -45adc10004c6d884e832190000496f76 -45b28082cf4f4044fa1b330010475519 -45b9758321e298146868a100b04c00fb -45bdce1eed1b8614accdd000a04cd092 -45bdd600042c3d246681d6000042c3d0 -45c0200003cfc46444649800003cfc46 -45c4c200048e5c747ce5d2000047d58b -45cb3700042d4a7482cc9a000042d488 -45cd310003b13cc47288fe00003b0b6d -45d02f00046ddf3419fb1100003cbba2 -45d30138e2926cc41d96c3002047747a -45d75ebb4e6da754fff93500204cdbfa -45dac400043afe7437835b000043afe7 -45dd6db8766a031466b5c700d04acd19 -45df90a7b01f3d84464c25000049738f -45e1219e839d18a45da1a100c0477579 -45e1520003ff9d64f7517a00003fefd2 -45e3fc6fbbeb5384eb39ec00d04c93aa -45e4be0003c29c4444567c00003c29c4 -45e4be0003c29c44834f21000038758a -45e6560004b0b6047caca7000046c7a6 -45e9720003dce92444ae7200003dce92 -45ea6300043dd4a43d0579000043dd4a -45f36c000456bb0444ae7e0000456bb0 -45f40b0003f2f7d43ea2ec00003f2f7d -45f5f70003b3ebb44d96a300003b3e4c -45f62400049549f41e30250000495494 -45fc221410f50394e342c700b048cf6a -460cf1deaa705a446a37ac00b0475f27 -460fd2d12143ce142a330a0020484502 -46134291cb2ea9042eac2f00a047d5af -46137cbff1f72ba4e2a4c400e0485d79 -4614b70003d631243d597500003d6312 -461795076cd17b14f4638c00c049a4ac -461b131678b4a7e4228b7600404c5d5f -461d940003dc2364f2a76200003dc222 -461ee80004a296b478436d00004a2962 -46215d42526be2f4a0c65300804c714c -4629ef0004460d9444aed500004460d9 -46309500044980b4c8eb5b0000434333 -4633170003d610042fcf5900003d6099 -46342500048edf34ab0124000048ed48 -4634f500044c81e444e67e000044c81e -463bb20003d116743f1e0900003d1167 -4641230004599304bc68a70000459842 -4642387df7f6a1442dddd200e0473f1c -4647cda35d4a77746a536c005044a043 -46495b000472d684d33d18000038bd74 -4652d00004aa7884ecbc7d00004aa784 -465379fc78fcb6142b16190060496da0 -465a520003d0bae439313500003d0bae -465c47b0b5fe1fc45aeaa100f04b64f3 -4662090003b0db64360429000039346c -4662090003b0db64834f21000038758a -4665e30004cd3e3491947900004cd33a -466fe90003d12324462a0900003d121f -467186000408df1444ff120000408df1 -4676a50004c1ec64ecc93300004c1ea1 -4676f800042a65a43bff37000042a65a -46771094c4ab3584a945c100804c5115 -467a5210c2c44f94a3c3a400104b087e -467e7d00043115b444b872000043115b -4682db00048edf34ab0124000048ed48 -46844c0004241f0439622800004241f0 -46874e0003efbba439abfa00003efbba -46879def7ad18b04454dc100004c180c -468fbb0003e26844855b6200003e1816 -46971327be48676475219600d047f142 -46971a9edfb867346af9ab00104c4722 -4697250004331204449d8d0000433120 -469a63000472d684d33d18000038bd74 -469c2f00047ed7843933ac000047ed77 -469f620003ef5ca445640d00003ef5ca -46a8d7000429c9143bd1d60000429c91 -46ab020004aa7884ecbc7d00004aa784 -46b0560004a2bfa44ecf33000048f891 -46b80200042b77f4267da5000042b770 -46c04c26918d1ad4f335c700004bc170 -46c1f70003d4cf3439ec9800003d4cf3 -46c29800046d7f544d828a00004014de -46c59000040e98a40e6190000040e78f -46c7590003dc2364f2a76200003dc222 -46c8314ce219d0b42357230050484b67 -46cbf0c5b2db6e94f95a3900b04b61bf -46cf5d0003f832943f7a1f00003f8329 -46d33f0003d961f445798f00003d961f -46df750004cd67b426594800004cd66f -46e27592f9b39fc4b50ac30010475e8b -46e27d00042eef543eabc3000042eef5 -46e2d25bc63e03e417b13500f04732c0 -46eda40004aabd447f164e00004aabd1 -46fcfc0003d13c64462a0900003d121f -47036000045a46544598f5000045a465 -4705e200048e00a4746624000048dffd -470e1900049549f41e30250000495494 -4710d40004824e04b6542f00004824db -47149300044a1754d75c76000044a172 -4716a50004c77de45590a100004c77c3 -4717b5000424eda4af80020000424eb8 -471eaa0003d9a2e43d9a4800003d9a2e -47227e00045b208445cc95000045b208 -47261fd40146c314edc32500f0496ac9 -4731a80b3a7d27c4edc6a500f04c5e45 -473523000454a924038c760000454a70 -4736b3182aedef54c1ac0d00603f36ec -47397a0004073574c733cf0000407353 -473d720003e406044636ec00003e4060 -47413d34482d36c43e5dd200904760ef -4746070003ecfdb43ce13800003ecfdb -474db900047c43945d6cb5000047c432 -474fbf00044ea294ad47bf000044e354 -4759fb0003d0ce6440ad3500003d0ce6 -47638ef22d5db9545075a300304c67f3 -476aba00040ff9640e6190000040e78f -476bbf00045c9bc4a9b8a70000456aad -4770770003d1b8f43d9cfc00003d1b8f -4771580003d059443adf6600003d0594 -477a10851aa71ae437e96b00d04c2cc3 -477b3f0003d880343ffb3d00003d8803 -477bfbf58ec6f9d4a8f774007047f2a1 -477cb4c284a2ce14392a8b00e0474367 -477f22000498d034dc125f0000445904 -47810b0003e1f7943ff80b00003e1f79 -47843bd8c66084242895a100d0476e7f -4784c00003d121f4462a0900003d121f -47871200041908243f3ddd0000419082 -47907600045fae3445fdc9000045fae3 -47937123148ca374a894ac003048b549 -479d2300046a20e44614ea000046a20e -479e68caecd9f164c2c13c00d04c858b -479f9c00045c201479a420000045bbc4 -47a6390004b866d45c9ece00004b8661 -47a6ab0004affe240bce1d00004affda -47a7bf00044e6b245acc76000044e6ae -47a86307562150442da81300004b2eef -47b0770003db90343c1f1700003db903 -47b24600041c2e343c72ba000041c2e3 -47b5860003fe4914834f21000038758a -47b5860003fe4914d33d18000038bd74 -47b7de62dfea88440a0d7b007047eb9d -47c39c0003f8fdc440afd300003f8fdc -47ce9423855401c41993cc00904ca33d -47d218eb5faa92f4f01dd900b047a9a9 -47d2460004208c643cc6ba00004208c6 -47d36b00048771846272c700004876f1 -47da920003d92b64b1068700003b603a -47ddc1cc7033f334a19bd0008047a9bd -47de960004851b74e2df9200004450fe -47e7ffbfabc6cf4480d13c00e04a94ba -47f74e0003ebdd343dbbbb00003ebdd3 -47f7f60003ca67844252d900003ca678 -47f7f60003ca6784834f21000038758a -48046b0004043f844692ba00004043f8 -480859f830b946c4ee09d200904767e8 -48097a0004199fc4469f0100004199fc -480b8c0004a3cc4484c5c000004a3c7e -480b93d2bc2e43f45e9dcf0050498192 -480cea000466478442f5db00004285ff -480d0b0003e5560403f0d200003e555b -48123524322dbb24a19bd0000047a9bd -4812480003d610042fcf5900003d6099 -4817cf0003ff9d64f7517a00003fefd2 -48186b000405bc44a9a5860000405bb6 -481be44afeb71fe4f1c68b00f047cefc -481e7c000471783434e751000047177c -48202f00046f635446d320000046f635 -4823427f034a1bc46ddaa500704c435c -4834890003d0e2f42f72e200003c7943 -4834890003d0e2f4834f21000038758a -483a4c000419383432b7120000418e3e -483d1b97014f0d648b478c00304a0bbf -483e1d0004b22a349df23900004b22a1 -483ee80004bf47e479507600004bf3c5 -48417a00040664048421dd000040652b -4841af940b32fb4492e42800504b06fb -4843e90003d10334410f6300003d1033 -484eaa0003d5dec441e24800003d5dec -4852d50004456d44472f5300004456d4 -48541b4e95da69242fcb7000b04a02db -4857270003ee4a1427c3bb00003ee457 -485f55d646ff9034bde1d200a0482afc -4860690004b22a349df23900004b22a1 -48611400040534f43d9ac2000040534f -4863a60003cbd5249c782000003cb9b5 -4864a300043d40445217e0000043bed1 -48700b0003e26844855b6200003e1816 -48723b00044b600440727e000044b600 -48742ce8d7a781a40798b50080476752 -4875b00003d631243d597500003d6312 -4880b50004803d042525f100004803b3 -4881803a9be50de4e6ba7400404bba44 -4886aff1daf878442522d800a04c8746 -48875130bc1dd074837a2400e0490792 -4887620003e5560403f0d200003e555b -48890a0004c34a54e5fe7e00004b1093 -48897900043536d49efce8000043536a -4892d4a78a3d5ac4228b7600f04c5d5f -48a1720003f76484f896ec00003f7640 -48a3020004a992d451a53c00004a9760 -48a3620003e408b447490b00003e408b -48a57a0003fe8e2401c28a00003fe8de -48ab1900048b82d433a9e2000048b81a -48af5b00043613f4ddc2b1000043608c -48b5c90004622eb447627e00004622eb -48b5fc3b17b967d44edcac0090488bb1 -48b712000413fb2446d46b0000413fb2 -48bf8c0004a82e74aeb7be00004a1608 -48c2390004b9751404705c00004b96e2 -48c4ea00046b4174400170000046b40f -48c75d00046f60c447252c000046f60c -48c8720004a02614c34bbe00004a01f0 -48ca390004bd2f2448486900004bd20b -48d485000475cfd42c068b0000475cac -48d9f1000475cfd42c068b0000475cac -48dab51be59bff74f586c70070489335 -48eb6b000488eec40101e20000488ee9 -48f2c3000477f734f821b90000477ecf -48f543398f64dcb45bb1e2000048c7fe -48f5780004472a64a7c9970000447137 -48fe2400048e5c746c8ff5000048e0a0 -4909e2000453d1d4d444f50000453d19 -490a8591a71224e42f970a002047c6ed -490ee80004a02614c34bbe00004a01f0 -490f48a45d6f42f4315b97005047297e -49174300042a65a43bff37000042a65a -4918a70004c15194f83d0a00004c1500 -491e7b0004b9171439f67b00004b9158 -491fcf0003fe8e2401c28a00003fe8de -49231900048d11348f07ec000048d0e4 -4926aa0003d610042fcf5900003d6099 -492733000477f734f821b90000477ecf -492a8c000432b02447c8720000432b02 -492ece0004b2aaa48e4c1300004b2869 -49321900048e5c746c8ff5000048e0a0 -49370a000483f6a4c511f700003957bf -493b4817f38e7d543d21c700704b2137 -4941e21f439b3e84c48e8800004ccc87 -4949362168c8c144abab37002048ac68 -49504a0394da923401587c00904a2d84 -4957b500042c52e4481129000042c52e -495c8500047fa464f679d9000047fa25 -4960dc000472d684834f21000038758a -4960dc000472d684d33d18000038bd74 -4960e5be219166646868a100104c00fb -4962260003d2fe843f467200003d2fe8 -4962450004838f747264850000483736 -4964130004b9171439f67b00004b9158 -496483c8d963d254a67de200d04b4cf1 -4967bb0003e40b5447fb6200003e40b5 -496d64eb6c459a84604b0a00e047af27 -4972720003d1c374efd40c00003d0aca -497b209b29b198243f609600d047e952 -497ba0000429c9143bd1d60000429c91 -497bbf0004555c8440f27e00004555c8 -497fa40004af2bc46595e200004af2a7 -49840b0003d25a14421f5900003d25a1 -49840b0003d39c14d33b3f00003d39bc -498bbc7929d06a647c8a45007047e603 -498cca69f0bc95f46204d40080483b81 -498dd600042a4664939228000042a44d -498ecbf80a97c8245a42c30010481bf9 -4994280004b07fd4f3763900004b0783 -499767ecceb0d8747f2e7b00404bd35a -49a509a69615a154e91f6d00204a66d1 -49ae35000446f0b44154660000446f0b -49ae660004b655b422786900004b6530 -49af020004a7f934815fed00004a2ab0 -49b90a0004c34a54e5fe7e00004b1093 -49bf981889cc41742d09f400f04967bd -49bf9c00046b40f4400170000046b40f -49c27b0004b62be47c3ba400004b6245 -49c3550004b577a4d496ce00004b5774 -49c698000471fb84a4aeb100004353de -49c6b03aa4a8845493ad3c00004c8474 -49c83e7c2de38e74499b5e00e0472f3c -49ccac0004880b2410e737000048809b -49ccea00046c6d949ccc93000046aea9 -49cd053355e2d334cb6a3900d04b38b7 -49d5ae00049aa4a444c3a8000049a983 -49dc4446f61d31b43d6aab00f04bae0c -49dffa0003ddbad442611700003ddbad -49e35100047249e44071af000047249e -49e4560004a14214e1f45600004a1415 -49e4fe000446251425db080000445f86 -49ec76000464d9445a989300004649d9 -49ee2bf78348ad2455f73100104732ce -49f21d0004b32fc46388a800004b2fa7 -49f8140003bf2994ea76c300003921cb -4a051f0003ecfdb43ce13800003ecfdb -4a072000046f013440ba02000046f013 -4a075c0004a7f934815fed00004a2ab0 -4a0796b723829ee4ea5e4500b0473b41 -4a0b45899b455334f0f67e00c045cbab -4a0cea00046c6c949ccc93000046aea9 -4a10f74fc570faa4a0c6ab00c04b5493 -4a12e90003f2f7d43fe0fe00003f2f7d -4a15e20004b1ee4450947c00004a1491 -4a19d89744e04e94a779de0080476d81 -4a1c6b00040ac3b4af7590000040ac34 -4a217a0004007dd416fec200004007d3 -4a22ce0004c02bd431540b00003f2544 -4a29960004838f747264850000483736 -4a2d400004488b0449ea8e0000447267 -4a3096000483ba8435bfac0000483b86 -4a367e0004494e04cfd476000044944b -4a3fd5102dc92b0473353300604c79a2 -4a47cf00041110a440d17a000041110a -4a4a09cf283b89547bcb330010491651 -4a54ffc004d16564d9fca100404c703c -4a5bac00047f1554e2da8b000047f14e -4a5db40003d116744074c000003d1167 -4a5ee0539b5e13f4a5c36d00504a3892 -4a62a800041c2e343c72ba000041c2e3 -4a64d40004784c0437f602000046d41c -4a65c500043ae7d441cfe0000043ae7d -4a65e20004b2aaa48e4c1300004b2869 -4a660200046fbf1441d280000046fbf1 -4a661b0003d0d564492f6300003d0d56 -4a687600044f8784423d23000044f878 -4a68eb00048b47a4a92337000048b428 -4a690ed4c89493342c003300c049e5fc -4a6a7d92490d85d4f483f50020485316 -4a6c560004700f24303cd7000046fef3 -4a70daa8ec16a5d46e8896002048119d -4a78afe45b0721642f7c5600304a569e -4a7981f5cf6136744d65de00e04738a4 -4a79d90004811a746e8896000048119d -4a7d6320d07f0fe4d9242500d049325b -4a81d678246dd864b75bd00090474827 -4a83bf000465dcc44931700000465dcc -4a87210003d1b8f43d9cfc00003d1b8f -4a8ae90003ebdd343dbbbb00003ebdd3 -4a905981f59e8804e26e1d00f04b9d06 -4a92a80004208c643cc6ba00004208c6 -4a94db0003f4ae64e36b6200003f4ac8 -4a9635fe836e8854e9c02800204b94ea -4a9b401cfd22e0447f326600a04b9311 -4aaacd0003cb9a34a2f55900003cb9a1 -4aae560003d7e7f4b1068700003b603a -4ab5c70004b4a624af0a45000047d45c -4ab79c0004622fb442017000004622fb -4abd9da4f53deed4e7257b0050478771 -4abfdc00043bce64430579000043bce6 -4ac1300003f8329440ca8900003f8329 -4ac3be0004a19374c9336d00004a1932 -4ac3dc0004a19374c9336d00004a1932 -4ac5d90004758d64e02f1700003daba5 -4ad5d100043536842255c50000435365 -4ad6466fa13613b414d896003047d8e8 -4ad9d6000422b18449799a0000422b18 -4adf6b0003d17c54438b5900003d17c5 -4aec1e39b7660d243c928b003048222a -4aecdb0003e2e754a66c0b00003e2e6f -4aefdf6e597c1044992abc00c04c2301 -4afbd000047f1554e2da8b000047f14e -4b08760004699dc42057bf00004699d5 -4b0bfdaaa07fa16493e4250080496c23 -4b15e300044884c449ea8e0000447267 -4b17033691f3bfb49073dc00e04a33be -4b1a200003cfc46444649800003cfc46 -4b1a2129fedc8374123d5d003047288b -4b1f0a00040534f43d9ac2000040534f -4b26390004ac3574df6ace00004ac099 -4b29940003dce92444ae7200003dce92 -4b2ae986c7d1a3e49519f10020481701 -4b32d09a4c32b7b4c48e8800c04ccc87 -4b33a8000498bd1405c6c30000498bb7 -4b3c00000447267449ea8e0000447267 -4b49b50003d0ce6441f88900003d0ce6 -4b4a480003d341d449e31700003d341d -4b4a4800042eef543feb12000042eef5 -4b50280004bbdec4c2866600004bbde4 -4b51380003e2e754a66c0b00003e2e6f -4b527342f3d6ac140d651700304c61c0 -4b532e3ca207c584b5a43300704a1334 -4b55de2f352d3494ec402800f04ac8bf -4b57be00049a5cd4672b8c000049a3f4 -4b5a09222183d764a00e8b00604786f7 -4b5d860003fcbd9443768a00003fcbd9 -4b67420004ccb29463ef4200004ccb23 -4b6bb5000425680442cbea0000425680 -4b7383a055cff3145dddf1008047b798 -4b7df70003d8803441bc9800003d8803 -4b81f80003e1f79441a0fe00003e1f79 -4b82f22fe6197064988f6d00f04a17b5 -4b842f00048353e43d75d9000048353b -4b947600046a22c4703dc9000046a202 -4b958b6c64be6c840c53540070484c77 -4b98b500048469f4e2df9200004450fe -4b9c5c0004b27034ea76c300003921cb -4ba0d8d03fcf70b482385e00f04cb367 -4bae480003d9e7e47c2e7200003d9e7a -4bb02f0004810b84137b0a00004810aa -4bb99000041223f443f28a000041223f -4bbbbf00044a1754d75c76000044a172 -4bbcd19c002ae33444aed500004460d9 -4bc2ba0004112734bebd86000041126f -4bc36000046496746fdd700000464940 -4bc9750003d5b8b4cb9a5600003d333b -4bca0c00040eae844a546b000040eae8 -4bcba011399c86e4c578ac00f048cfda -4bccea68fc235514abb7f500a048ae72 -4bd07d0004aa0ca4231abb00004a949d -4bd0fe0003ef43843b6cdb00003ecfdb -4bd1958aedff1be46951f400a0495fba -4bd6d7973b6a79341198c40060489537 -4be024189f2ec9044d6ea500904be3a0 -4be5c900045e4504c17893000045d3fe -4be7600003d5dec4430c0b00003d5dec -4beeac000401894443d5dd0000401894 -4bef020003f8fdc441fda700003f8fdc -4bef700004a18074382fbe00004a1804 -4bf8980003d8b1148c72aa00003d8aea -4bf8f34a3a85efb48855cf006048fea0 -4bfb91c1d00629b4b72bed00b04a1325 -4c027d9a6af06a647f09d000104cc02e -4c167d00042ee9344a7ba9000042ee93 -4c17550004bb49d465065600004bb1f7 -4c18880003d2fe843f467200003d2fe8 -4c1b10bc6569ca243b6ca100004c1255 -4c1c09b62fa6d924912d7b006047e438 -4c294fdd26b1e3a4a2037900104a73a0 -4c2e8200048858c4e009a30000488586 -4c30aa000406d6544145dd0000406d65 -4c3380983cddc5845a967400004bc3e2 -4c3f600003d9a2e43ffc0b00003d9a2e -4c470ac8feecf654c0e9c700c04ac9a0 -4c483d0004ca0764a229d800004ca072 -4c50200003cbd5249c782000003cb9b5 -4c52300003d10334428b6600003d1033 -4c53220004995f2424d3d90000484b3f -4c5ab9bbd961f7540d87ac0000483d0c -4c5cd20003ef5ca445640d00003ef5ca -4c613bc6e45ae41428615100504a3f8d -4c75240004995f2424d3d90000484b3f -4c776c00044ea024ad47bf000044e354 -4c7b8c0004aa7884ecbc7d00004aa784 -4c7d140004190824409ec20000419082 -4c80ef0003f2f7d43fe0fe00003f2f7d -4c818f000455e4a4b1068700003b603a -4c8a624b383459e4023fac0060481cf9 -4c8bec00048858c4e009a30000488586 -4c8ca40003e6dd644afcfe00003e6dd6 -4c8e03d2dd2b14940bce1d00f04affda -4c8e090003d01b744b0f6600003d01b7 -4c9d970004480fb44b1c7300004480fb -4ca0690004ba9b24c1d46900004ba9a8 -4ca4a80004ac9754a7a5a400004ab782 -4ca58fffb0e2f014e788ca00b048d59b -4ca6920003dad7744b7eaa00003dad77 -4ca95100049f5f44ada151000049f5c5 -4cb2560003d961f445798f00003d961f -4cb7af00043128b44406e2000043128b -4cbd372e420f06e414d896004047d8e8 -4cc0980003d9a7144b6e4800003d9a71 -4cc2b2109d7e12c4501ece00c04c6c32 -4cc36000046336844b68f50000463368 -4cc449b8408f7d845e9dcf00c0498192 -4cd28a000408df1444ff120000408df1 -4cd2cb0004cd93f4749c2c00004c4d71 -4cd2d884a5957d6446e96b00a04becd1 -4cd6f9a957521ff413a1d20090474928 -4cd95500048e5c746c8ff5000048e0a0 -4cdbbf000456bb0444ae7e0000456bb0 -4cdca8717ddb6574da825600104bdef4 -4cdccc71d2b06ec463787600104c446f -4ce6660004afbfa44f5a7b00004afbe1 -4ce87e000436652415a67e0000433f4d -4cea98000471b7544b7b510000471b75 -4cee560004b765841400a800004b7647 -4cef08000446251425db080000445f86 -4ceffa0003f384f44ba0fe00003f384f -4cf08567c51e04842b781300004bb625 -4cf7620003e406044636ec00003e4060 -4cffe000043d40445217e0000043bed1 -4d01b766889018f40d8ce8006043e078 -4d03368ba12d99c4c5fb7600e04c48ec -4d0c77cfd47f96e46a0ad800c04ca657 -4d0e530003d116744074c000003d1167 -4d11c00004a22a94be085600004a0d80 -4d16560004b22a349df23900004b22a1 -4d21dd000405bc44a9a5860000405bb6 -4d28a80004addf946ea4b5000048094a -4d2dc10004c190f44b1aa500004c0fc6 -4d2dde00047f77442699d2000047f1e7 -4d340b0003d292749634f800003d2924 -4d35d86ee5aeb804d6af5500704ae4a2 -4d3cb500048469f4e2df9200004450fe -4d407c0004a02614c34bbe00004a01f0 -4d4254f2f3fe4b245029d700604906ee -4d440100042924d44bc1a5000042924d -4d459e2560618524333eb200904c7266 -4d47b20003d121f4462a0900003d121f -4d48b4ca558ff154057b3300804cc4f7 -4d49940003f76484f896ec00003f7640 -4d582000045cc0044bfc76000045cc00 -4d5a220003f8329440ca8900003f8329 -4d5a8b00047c14747cca45000047c145 -4d5f7900049b2f349aad2b000049ae58 -4d60a700044d5ab4958420000044d569 -4d62a50004c44b94b8bebc00004c44b0 -4d64f09ebfa01ad4b01d5500404974a7 -4d6f0a00047af914a67bac000047af8d -4d715f27f896bb345283a800e048ed41 -4d82f70e1f40901428f82f004047356c -4d89c08d70aa7904171cbc00b04cbe3a -4d91cf00048f65049109cf000048f640 -4d91dd00043115b444b872000043115b -4d9bbf00044e9d244c2020000044e9d2 -4da24500047747e41d96c3000047747a -4da51ca2c555153445a6db0030491843 -4da6270004331204449d8d0000433120 -4daa820004865b046caa4e00004850f7 -4dac5e0004cbe834af0a45000047d45c -4dac655f88dd69a4776333004047c306 -4db2260003d25a1443a8fc00003d25a1 -4db6c4f6d1661254c15adb0070490d0a -4dbacd0152433c44ecbfd100a04c50e6 -4dbd4a0004a7f934815fed00004a2ab0 -4dc11f0003ddbad4442d7200003ddbad -4dc66ab16933a6f47f63ac0070476201 -4dc82500048e624422c9e9000048e29e -4dca21ab4e59f024c0bbd100304c31e1 -4dcaab0004b44fa4205b7900004a0da1 -4dcdc90004a887b4b591d000004a886f -4dd94844f6e003a426c4ca00f04886ff -4de4c3c08a941d54dd94d40020479796 -4de57000045b208445cc95000045b208 -4de5b900047af914a67bac000047af8d -4de82000044c81e444e67e000044c81e -4deabd6959e19f543e450a00204c4775 -4debf15cbe147584ede5c900504aa074 -4df0a90003d0ce6441f88900003d0ce6 -4df85600047241644c65dd0000472416 -4dfe45000473e0347448d40000473d5c -4dffdd7ef65a9bd406942c00a04c4267 -4e01d9000473e5049a5c850000473e42 -4e040b0003f428a4cbb8fe00003f427c -4e05750003dc1e443c069d00003a6629 -4e05750003dc1e44834f21000038758a -4e06740004b765841400a800004b7647 -4e06b100043536842255c50000435365 -4e094000042eef543feb12000042eef5 -4e0b670004472a64a7c9970000447137 -4e0c470004127ad4a991dd00004127a7 -4e0f210003d8803441bc9800003d8803 -4e19720003ee8ad47c140b00003ee7f2 -4e22823b8c2b3a54aa26820070486072 -4e25750003d39c14d33b3f00003d39bc -4e27620003ebb2e4a2676200003ebb24 -4e282000045c3a446278ea00004582c9 -4e28770003d5dec4430c0b00003d5dec -4e354600043d40445217e0000043bed1 -4e3a430003e1f79441a0fe00003e1f79 -4e3c720004a22a94be085600004a0d80 -4e3de1f5b5288754055df100b047cd7d -4e41833c6ec348641a4eb50080472cc6 -4e43c826978bc554c6f1ae0060499d75 -4e44720004a3f1643e06190000490bf4 -4e46170004ca5bd4a80bac00004ca549 -4e519000040ac3b4af7590000040ac34 -4e52545ee647e264bf23be00804a6902 -4e525e00043536d49efce8000043536a -4e55a300048738343a5ac3000047f70c -4e5b0900046cd614f3c456000046cd5e -4e5c9a389f66beb47f6c510000489be6 -4e5d7ed89842fd74430db900c048484f -4e5fbf000461e3844d05700000461e38 -4e664e00048c3a4421aa7e0000459dc1 -4e68ae0004aa7a04fb747d00004aa799 -4e709300045a46544598f5000045a465 -4e72820004865b046caa4e00004850f7 -4e881da8a135fbd4c6fe450050473eb8 -4e88fc0003d223f459235900003d222f -4e8fb30003f8fdc441fda700003f8fdc -4e97a80004a562c479e92b00004a5626 -4e9c80b29bde357415d3d000a0473333 -4ea0a10004c129b43b6ca100004c1255 -4ea78f97ccf60de4c5542800004b5651 -4eaa23cf63cca7946d99d200b047e172 -4eab1200041b3a54b5f17a000041b35f -4eae0c0004043f844692ba00004043f8 -4eb2660004b45354205b7900004a0da1 -4eb82f00047c16e4b1068700003b603a -4ebb6000045011e452f63b00004500da -4ebfae000406d6544145dd0000406d65 -4ec00b0003e408b447490b00003e408b -4ec33300047d1374c15333000047bd67 -4ec700003bb46944762eb200204c5bab -4ecc2f00047e5264c4cac3000047e501 -4ecd8c61856838644a60a30010473237 -4ecf25000498a0c45b66190000498a06 -4ed5900004014de44d828a00004014de -4ed81600043bce6444835b000043bce6 -4ed97b00047aa1e4d36820000045f8dc -4edaa10004b5df64a67de200004b4cf1 -4ede9a4f415206447c40510000485c9e -4edf430003cfc46445c02000003cfc46 -4ee417e585db5d844f06ce00f04ac185 -4ee6070003dce92445e97200003dce92 -4ee62800042856c474b4680000428526 -4ee7170003d8b1148c72aa00003d8aea -4eedc900046a20e44614ea000046a20e -4eef9400043ae7d443325e000043ae7d -4ef5de00047f77442699d2000047f1e7 -4f0287221894f1b44f03d100e04c446c -4f04152f04563a54687051006048c35f -4f063062a485f2e4c8814800d04ce5c7 -4f165e58d5c42ab4e233ed004049fbb4 -4f1b01000413fb2446d46b0000413fb2 -4f1ddd0003fc5744ba404700003fc522 -4f21580003d10334428b6600003d1033 -4f23600003d17c5444f8fc00003d17c5 -4f25959711e65de4e5695100d049fc4f -4f302ee605914af4b212450000483c00 -4f36560004b73e8423eaa100004b73e5 -4f397e0004456d44472f5300004456d4 -4f39ad039762fea46276c3004047f7f4 -4f39bd9cf7401b14dd0de20080486247 -4f3bec00048c7e345aee240000487259 -4f3e8000046f635446d320000046f635 -4f44ea000465b7046c4ca70000465b57 -4f46e80004a38d842fa3a800004a21ff -4f46fe0003b3e4c44d96a300003b3e4c -4f46fe0003b3e4c4834f21000038758a -4f5bf5000487308431cbf50000487301 -4f5d9dec828d794438e57b004047a8e1 -4f64aa0004190824409ec20000419082 -4f65d70004a38d842fa3a800004a21ff -4f6a1900048edf34ab0124000048ed48 -4f6b9c00046a17244dff60000046a172 -4f7165d0040aa914ae29d000b04cce2b -4f885100048c7e345aee240000487259 -4f8929d6f7d494a43953200010470861 -4f8b4eece4ddda34e5fe7e00404b1093 -4f8c471cded8b44437e96b00c04c2cc3 -4f8fc76ba23ef5e465b75500a04b320f -4f8fe4a965db1a1472070a00d047551b -4f99e60003cb9a34a2f55900003cb9a1 -4f9a530004c4346474dd6b00004c4344 -4f9aca00042c52e4481129000042c52e -4fa18f0003d631943c169200003d6312 -4fa2460003fcbd9444d2ba00003fcbd9 -4faea200044a58c44e3476000044a58c -4faee80004c4346474dd6b00004c4344 -4fb190000400bf845512ba0000400bdc -4fb1dd00047081644e01af0000470816 -4fb73f0003d1c374efd40c00003d0aca -4fbd350004732d3455f73100004732ce -4fbf0a000477bd740253330000473d81 -4fc57252f2a09834fff93500604cdbfa -4fc58d000433ca74bd8e7e0000433c42 -4fc9f400049192745aee240000487259 -4fcb310004732d3455f73100004732ce -4fcc2000045ce9b4c6c9e2000044e600 -4fd8770003d9a2e43ffc0b00003d9a2e -4fda4e000487308431cbf50000487301 -4fe7a800049fe994a0c056000049fe96 -4feab20004c4999440b96b00004bf52b -4feef78e6565a454d66f220090490972 -4ff5806159f84d9486776d00d0499d49 -4ffe041b5e282fb491312b001049ebef -50039c5312ed9c353955d900f0473da4 -50158f4466d2b5d5e5fe7e00304b1093 -5017e90003d0d565492f6300003d0d56 -501d520003fcd495834f21000038758a -501d520003fcd495ea76c300003921cb -501dc500043613f5ddc2b1000043608c -5023ae00041223f5454b12000041223f -5025d75be59826e5ebc56b00004c40f4 -5026ab0004b24925e5fe7e00004b1093 -50292900042515452ea27e00004250ca -502ac200040b11f510e6c2000040adf3 -502fbf0004622eb547627e00004622eb -5030650004256805442ddb0000425680 -503daf00046f60c547252c000046f60c -5040ef0003ddbad5442d7200003ddbad -504b0100041a56a58421dd000040652b -504e530004c74b753a5ac3000047f70c -5051dd00040ac3b5af7590000040ac34 -505496000479b675fe3b0a0000479b3b -50587c00043539454f1ab10000435394 -5058a40003efe3950e317200003ebe6a -505943000401894545617a0000401894 -505b01000407fbb52985860000407fae -505dd200047a29a5b1068700003b603a -5064d70004285ff5448c0100004285ff -506ea10004b24925e5fe7e00004b1093 -507ac0a00797016579dfb300104cc8f7 -507c790003d25a1543a8fc00003d25a1 -507ee2000432b02547c8720000432b02 -5080c10003ef5ca5469f6200003ef5ca -508b0100040ae8354f2990000040ae83 -508bfa0003ebb2e5a2676200003ebb24 -508c9a00042b77f5267da5000042b770 -5092ab0004b1a395ffc23900004b1a36 -50970a000479b675fe3b0a0000479b3b -50a3390003cbc4b54f415900003cbc4b -50a6394d07531d351a4eb50080472cc6 -50aace0004b88cd5fec3a400004b57b6 -50b1b00003d961f546d33f00003d961f -50c7854e043db18556302800c04b95f8 -50c7a10003e40605473d7200003e4060 -50cb79c57075ee25e7438c00d04a7c49 -50d12300046c93052057bf00004699d5 -50d6370b63d98b25238ea100e04b0532 -50d97b0004751765e3f8850000475170 -50da0a92846c6d65d8c38c00604a165c -50e0031b08f6e335f95c7600104c01e1 -50e9330004c49ae503413300004c49ad -50e97a000407fbb52985860000407fae -50ea480003d1c375efd40c00003d0aca -50ed940003f544f53ea7e50000395bd7 -50efac00048251a5b6542f00004824db -50f58f0003d8b1158c72aa00003d8aea -50fa463de81628b51b7733004047ba43 -50ff790004a19375c9336d00004a1932 -51009a000422b18549799a0000422b18 -51028b00048251a5b6542f00004824db -5103dc000437ad45d0c80b00003d7b0c -51059f0003cb9a35a2f55900003cb9a1 -5105c900045f9dc54fd27e000045f9dc -510e7b0004b5df9525ba6600004b5d56 -51104700041280554ff2ac0000412805 -5117a2000484d855e148470000420966 -5122dcbe240da6950d852b00604a26d8 -51233f0003d341d549e31700003d341d -5123ae000408df154671860000408df1 -512ca70004c437e5e832190000496f76 -51340f00049597d503b164000049593e -513bd0000475cfd52c068b0000475cac -513cac00048b103525cdb90000482893 -513e0c0004198575a7b6ac000041984f -5142430003dce92545e97200003dce92 -514ea33208042d65e6f9de0030475d97 -514fe4e2ed9c57657aaca800a04b777d -5150d40004733275eab7450000392d66 -515970000468fd955bb1e20000468fd0 -515a62aa7fa32305bebbd100504c189c -515f1900048b103525cdb90000482893 -5164463b123632a5b42f7000204a1926 -5166cd0003cfc485dd42cd00003cfb55 -5168050004a22ab5be085600004a0d80 -51682f0004751765e3f8850000475170 -5169bcea50d7c7d5f8305e00904cbe36 -516b3c3811dab6d504705c00504b96e2 -516ca100043128b5459ec5000043128b -516dd200047ed9553deac3000047ed8c -5171b40003d121f54784c000003d121f -51729d0003cfc46545c02000003cfc46 -51757b00047ed9553deac3000047ed8c -5178980003d71065f1589800003d4bb0 -517990e39445c9350c53540040484c77 -517c5000043536d59efce8000043536a -5184a10004c46c4510d1a300004c3f17 -5185d700049d8be596052b000049a6a1 -51879400043bce6544835b000043bce6 -519c36e8a52948954d35e20010487844 -519ef7df5d6e4bc5bde1d20040482afc -51a5f2d47996dbf5b181be00804cdd0e -51ad24000491aca56d99d2000047e172 -51b3740004733275eab7450000392d66 -51b579000437f6d55065d10000437f6d -51bd86000400bf855512ba0000400bdc -51c3d5da57a1d1a5895e7e00704b3e6c -51c54300041110a5422f12000041110a -51c5d100043823855e75d10000438234 -51c5da634a2319b526ea1d00304bc160 -51c71300043ae7d543325e000043ae7d -51cb9c000465dcc54931700000465dcc -51cc330004a2ccf5914b7900004a2c80 -51d98214e387f005cc1cc20030490ba4 -51da800004715fe550767c00004715fe -51e2953accea476543b47c00704a4887 -51e30a00047747e51d96c3000047747a -51e52b00049aa4a544c3a8000049a983 -51ef07f5c603a7d55f12f80080490b5b -51f0b21b02b86415c152ce00c04c81a3 -51f7b6228256fd957f164e00104aabd1 -52005000043dd35500f35b000043dd21 -52057a0004148565dcb2ba000041484d -521073000447267549ea8e0000447267 -5211de000473e635f3980d00003e80e8 -5214a10004be9d1591c3d100004be9cd -5217510003e6dd654afcfe00003e6dd6 -5217790004a2ccf5914b7900004a2c80 -52183d00043312054697250000433120 -5218806e9b92f465fe696b00604c01e5 -52191e41e94081f5262b7500304ce5f1 -521acc9797612b7587abd0008048468c -521b22000491aca56d99d2000047e172 -521b6c000447be1505f1e30000447b99 -521b910004cd0bd5b1068700003b603a -521f7900049f05359e9433000049f049 -521fa40004b910a50c4e6600004b90bc -52209600047f1555e2da8b000047f14e -522211ec30594d25992abc00904c2301 -5222260003d17c5544f8fc00003d17c5 -5225720003f37c3527619f00003cbc05 -5231ba0003d02ee5cb84c000003d02d0 -5236880003d01b754b0f6600003d01b7 -52380b0003dad7754b7eaa00003dad77 -52395200040125f55d72ba000040125d -52442f000473360525db080000445f86 -52468b000477f735f821b90000477ecf -5249350004732a7505ebe50000399de3 -5249350004732a75834f21000038758a -5250defd44eb6ad5992abc00b04c2301 -525a9800043115b5467e7d000043115b -526266e49065b6757f8ed200d04ccf2e -52652b00049f05359e9433000049f049 -5269410910ade6c5e6a31900b048d94b -5269c90004ab273549b8ae00004a9de3 -527c5c0004b3176516c3a400004b313f -527e8a00040f937551297a000040f937 -527ee12f0b3449b51df26600204727d2 -52873f0003d9a7154b6e4800003d9a71 -52880100042515452ea27e00004250ca -528a8b000473db25ea5e450000473b41 -528ad50004451655e2df9200004450fe -528b170004ab5fb51e05a400004ab5d0 -529bdc0004a3cc4584c5c000004a3c7e -529d416f4cbbd9e5c511a3007048da7c -52a0b5000473360525db080000445f86 -52a3bf2497b6c6256adbd0004047cd68 -52a3f600043416a551607c000043416a -52a420000464fd755150760000464fd7 -52ac24f676570a253c1405006049e378 -52b10b0003f384f54ba0fe00003f384f -52b37e152f717145c61ba800d049f90f -52b80f00041223f5454b12000041223f -52bd5200040eae854a546b000040eae8 -52c39c0003f8edf59288a400003e19e6 -52c8f50004580b455149e200004580b4 -52cd720003efe3950e317200003ebe6a -52d2800004199fc548097a00004199fc -52d6a80003fcbd9544d2ba00003fcbd9 -52d80c0003ef5ca5469f6200003ef5ca -52dcd20003ef42e57f7c0d00003ef422 -52dd132aaf5a8f7587863900a04bc9f9 -52dd5200041a6f35518186000041a6f3 -52efbf000456c7d565cf12000040992f -52f20c000400a56551706b0000400a56 -52f27e000461ad351ec1c90000461ace -52fea80004043f8548046b00004043f8 -5301380003e947b55160db00003e947b -5305d9000477d33513f1f10000476ace -530ac30004803d052525f100004803b3 -530ac700048662b54119e30000448a71 -530df4ad222d18e5c8814800404ce5c7 -53108e0003e408b548a36200003e408b -53127c000470fc3551c2980000470fc3 -531c0d0003ebfae551ceec00003ebfae -531c93d6c950cc751008b5009047b67f -531ca8d4f5a0b0b5b3f8a100704c6e1a -531eab0004b3176516c3a400004b313f -531ec2000408239551de0c0000408239 -531ed789d6a7a9d5c13ad200904cd3bc -532f200004724a454071af000047249e -5335310003a80085b485f20000395839 -53364ddc00adef752e1ece00d04b9e7d -53376d0004a118c5f65fed00004a1143 -5337fa0003ebb465a2676200003ebb24 -533e13000401894545617a0000401894 -5342ac9276d07845ee62ab00804b393a -534d6200039b664505baa3000039ae0d -534d6200039b6645834f21000038758a -53514300042ee9354a7ba9000042ee93 -535366c907aba765004ca100304c66e2 -5354cceec3889e35e9b7be00104a26b0 -5355350004732a7505ebe50000399de3 -5356a50004c1ed45ecc93300004c1ea1 -5357430004285ff5448c0100004285ff -535c7c0004a118c5f65fed00004a1143 -535f230a13b91085199fa400804ae59a -535f5292be833b25f3669500f047297d -535fc600043162a551e28c000043162a -53619d639547d5e5ca156f00104a8ece -53641f843bec1c757edff50030487edc -5369cc0003e40605473d7200003e4060 -5371d70004a26c05a2e55100004a1120 -53740c0003e40b554967bb00003e40b5 -53742f0004749c5574128b00004749a5 -53777eb58037235559dad000a04a8b41 -537a150003d961f546d33f00003d961f -537c7caa2b89d8253341a100c0475898 -53849a00042c3d256681d6000042c3d0 -5384a20004339725834f21000038758a -5388afca35ae33d550a6db0080490f81 -538a17b7b0062a05d302f800e0495f06 -538bb500042b77f5267da5000042b770 -5396ca00042997355233370000429973 -539a46000413fb2548b7120000413fb2 -53a1b90004829845ba27ac000047715b -53a688efb1dcc74509247200d04a1805 -53a84c00042c52e54957b5000042c52e -53a8fe0003f37c3527619f00003cbc05 -53ae61112a324f156272c700e04876f1 -53b55500048e5c75488e82000048d1be -53b5fe0004256805442ddb0000425680 -53b6b100043bed155217e0000043bed1 -53b6f600044045255231b20000440452 -53b80f0003d12325462a0900003d121f -53b95a0004a800c522295100004a4236 -53b9ef0004450b0552516800004450b0 -53bbc2199c10f9451005640080496e15 -53c27e000456de155258950000456de1 -53ca0416d572c1157cca4500e047c145 -53caca00042924d54bc1a5000042924d -53cf790004a297558b1c10000043299a -53d04600045088e525946b000040834a -53d1350004732d3555f73100004732ce -53d15500049153b51f32db0000491530 -53d3e0000436a08511e64e00003cba63 -53dd5d0004727055360429000039346c -53e9b900047d66b57ce5d2000047d58b -53e9e60004ce37351cc9e600004ce364 -53ea920003d93925b1068700003b603a -53eddd0004096c758ca2c200004096c5 -53f12300046336854b68f50000463368 -53f28b000473db25ea5e450000473b41 -53f61b0003d02db5cb84c000003d02d0 -53f6218c2319e8659f8a1900a04907e0 -53fda270baca3155ae1cc200b04909ad -5403210003d0d5654a661b00003d0d56 -54036b0003d223f559235900003d222f -5409170003dd985552af6200003dd985 -5409350004ce37351cc9e600004ce364 -540bc00004727055360429000039346c -540c0f000408df154671860000408df1 -540d3b0004480fb54b1c7300004480fb -5411dd00040a1195fd8190000040a0ff -541f5f77dd7959a5155f6b005048da00 -5424f5a92e6cb0e52522d800504c8746 -5429538f4875acf531273700f048b65f -542ca70004500da552f63b00004500da -542d2c000471b7554b7b510000471b75 -542e530003d121f54784c000003d121f -5438a40003dddf15522d9400003dddf1 -5439860004004f9552b06b00004004f9 -543de800043128b5459ec5000043128b -5440b70004aa12c5c2cda400004aa129 -5441310003b3e4c54d96a300003b3e4c -5441310003b3e4c5834f21000038758a -5443b8ecbb33d1b5cd394c0050472f2b -544862d933e87e659e943300f049f049 -544e1900048f8a85bb3322000048f88f -5450760004c4346574dd6b00004c4344 -54597900043fa0c50d46fd000043fa05 -5460f5000454a8c5038c760000454a70 -5462200003cd7f05fa3acd00003cd7e9 -5465416a1a9175657b742f007047466f -5468502f702b83558a1e7400004b0a79 -546cb50004749c5574128b00004749a5 -54700f0003d0ca8515648900003d0c7c -54705c0004bc4055f53aa100004bc3fc -5470a40003edd9b599399400003eca98 -547747bd4d14153515ee240070495cda -5478ea0004580795bdef600000458072 -547bea00042c3d256681d6000042c3d0 -547c99c9c1ca1e1518cda400604abd3c -547e2800042a2d150d1401000042a2ae -54822c00048e5c75488e82000048d1be -548da6568cb986c556082800604bd838 -548db883c349c3c5379d2b004049a5f3 -5493cf00041a6f65518186000041a6f3 -54942a7423f3ac55f5b64500a0474dfb -5494fe0003e1f7c53ff80b00003e1f79 -5499940003ef42e57f7c0d00003ef422 -5499bf792d747f05b3f8a100004c6e1a -5499c10004c44b95b8bebc00004c44b0 -54a1c5290665008526b9b9005047b4bf -54a3e13a17a24a6532195100d04a7359 -54a9230004507c650d8ce8000043e078 -54adc500043d493528e4a3000043d481 -54ae8f0004014de54d828a00004014de -54b37400047d5c757ce5d2000047d58b -54b5de000482a875d25d9600004802d4 -54b9a500042a2d150d1401000042a2ae -54bc720004a276057c09ae00004a272e -54cc93000466474542f5db00004285ff -54ccf500045cc0054bfc76000045cc00 -54cd7b0004769255aca9d90000476913 -54ceba51787b6065e73c2f0000476a95 -54cf9d68b455f98566015100204a4971 -54d5b9000483f515834f21000038758a -54d5b9000483f515c511f700003957bf -54dc0b0003efe3950e317200003ebe6a -54e8a80004ba79b5c4c9c700004ba792 -54e91400041110a5422f12000041110a -54eb069c906525a5e3f8850090475170 -54f09500044d5ac5958420000044d569 -54f1839230368bf55da096002047cf8b -54f999046dd122d5c7d8a100304be7f6 -5502ab0004bae0f53d6aab00004bae0c -5508130004b24925e5fe7e00004b1093 -55099a00042515452ea27e00004250ca -550a7b0004bc3995f1fc5c00004bc32e -5510d20003f37c3527619f00003cbc05 -551851000432b025492a8c0000432b02 -5518ac5d178c74d5b491ab00d04c6432 -551988c82f0823259ee9de00e047bfd5 -551b01000406e4a5d1bac20000406d4c -551cd5d110c87f955fd25300204bf377 -551cdb0003f4acf5e36b6200003f4ac8 -5520770003d341d54b4a4800003d341d -5521a30004c6f935dab7d100004c6f57 -5524d7000422b1854ad9d60000422b18 -55309125db661f75b9511b00904cd68b -5530a018c4a54a5505df4600f04879ca -5538ad8f828c1be543fa5600604b33a9 -55421d00043115b5467e7d000043115b -554876000458393553a1c90000458393 -554fe300043312054697250000433120 -5558a700044e9d254c2020000044e9d2 -55637ff0470e69e5910dd200c047dfb9 -556d310004199fc548097a00004199fc -556d82118c660d6515ee240060495cda -5570280004b0b6057caca7000046c7a6 -55724e0003cf8ab553d82000003cf8ab -5572980004701ba5303cd7000046fef3 -55795500049549f51e30250000495494 -557f390003a7a8f5c511f700003957bf -558425000490d3f58f24ac000048e0c6 -558af68e4174f925b4e151001049eccd -558dbcfc3a2360d5b739d200e0478593 -558e28c366408f550f50eb00d048d6b4 -5592c3000473360525db080000445f86 -5593328c58c38bc5470a1d00f04b6f61 -55987723fae9bb4588ce24008048be41 -559b580003e408b548a36200003e408b -559d1ac108ba7bd55945a80000487c5e -55a27c00047241654c65dd0000472416 -55a6f60004400975ee27480000440024 -55aa7e0004bd4a357f2e7b00004bd35a -55b0438f1199d58511df2200a04969ab -55b5e20004b273b534d7a400004b1f0d -55ba740004b03985e8167b00004b0270 -55c2b100043dedd537b7f6000043dec2 -55ca130004043f8548046b00004043f8 -55ce660004ba0095af0a45000047d45c -55cf6b0004865b656caa4e00004850f7 -55d1d9000481b8d57b42450000481b65 -55d70a00047e1c25c0c096000047e18b -55d96711a5f9ae357808a700504c65d7 -55dd3c288c7e6f959d5e7400304ba528 -55ddb9000483f515c511f700003957bf -55e107c9f677c4851cc9e600104ce364 -55e6ac000420bc05dea11700003d86c2 -55eb210003dad7754ca69200003dad77 -55ed8f0003d223f559235900003d222f -55f4790004cc8a0515447900004cc6ce -55f9cf00042c52e54957b5000042c52e -55fcb3ea71059155b1594a00b04a84d9 -56010b0003ef43a53b6cdb00003ecfdb -56049500044edbf5c7faa2000044eda3 -56057b000483ad8502c4f5000046a129 -5607580003e40b554967bb00003e40b5 -5608a700045c2af554879c000045c2af -560a6300043823855e75d10000438234 -5619b900047339454625de0000473375 -561f1ad90e4953051e05a400504ab5d0 -56249600047c19a52b3ec3000047c197 -562a15640a1af6155c4c7d00404ab354 -5638960004771635ba27ac000047715b -563a070003e6dd654c8ca400003e6dd6 -563c59a4f42e62a5a2005c00904abdc8 -563d940003ec9e0554d0db00003ec9e0 -563de200048d0385c578ac000048cfda -563eaa0003d517159634f800003d2924 -56419f0003cbc4b54f415900003cbc4b -56492e5abefffed5f49b7000d04a39f9 -5653858613beef754c85d20000477a12 -56569351fcd400452075b9001047cc31 -5658eef78b74af85181d55008048ea46 -565fb0000444e225ce24a70000444c41 -56610a0004bf3fe579507600004bf3c5 -5662ac0004148565dcb2ba000041484d -566f0a000413fb2548b7120000413fb2 -56749900038b5cd5552918000038b5cd -56749900038b5cd5834f21000038758a -56754a0919809635c44e50002048e514 -5677d732b342e105917dde002047b62f -5683550004bd3e357f2e7b00004bd35a -5685b24cce8c89453953200030470861 -568b0fa55e3cf935d9e02f009047c116 -568bdc0004a2cd05914b7900004a2c80 -568c15e71c61ad459a9333006047e575 -568fa40004b2710534d7a400004b1f0d -56930a00047c19a52b3ec3000047c197 -569760000456c7d565cf12000040992f -56a015fa6477ec0570da45005047f5f6 -56a0b70003d9a7154cc09800003d9a71 -56a3f7c47eeb6f25bbae8b007047c24e -56a507a0a0bb4e0597d52b005049b385 -56a620fa657a0db5ab20d400a0476d95 -56ad580003d0d5654a661b00003d0d56 -56ae7100042e5005322a58000042e4a3 -56b5f80003f384f54ceffa00003f384f -56bae1f08e7aa8e5535056008049ac85 -56c850c1fdec32a5bedc3d00b04ca9d0 -56cdd1000436a08511e64e00003cba63 -56ceac00040310e5554ac2000040310e -56cec200040ae8354f2990000040ae83 -56d06b000400bdc55512ba0000400bdc -56d5980004b2710534d7a400004b1f0d -56db6b0003d245a554c73f00003d245a -56e056497d56521507038c00204a1c02 -56e09a00042c54f5481129000042c52e -56e456000470b135556ddd0000470b13 -56e5380003ebb475a2676200003ebb24 -56e924000490057515cc250000490048 -56ebae00040eae854bca0c000040eae8 -56f1f1000481b8d57b42450000481b65 -56f2240004914ee55aee240000487259 -56f2c30004749c5574128b00004749a5 -56f7a900042e5005322a58000042e4a3 -56f8130004b2ef552da81300004b2eef -56feb20004c6ce154a2fd100004c6cde -57045d00043313458cad2c000043311e -570d4c0004727055360429000039346c -570d4c0004727055834f21000038758a -570dc10004c6d0b54119e30000448a71 -570f5b00043823855e75d10000438234 -5712660004725ac5834f21000038758a -5712660004725ac5ea76c300003921cb -57136c00044a58c54e3476000044a58c -57167b0004b26bd534d7a400004b1f0d -5721b900047339454625de0000473375 -5721cf7272fb86c55228a800104b0541 -57228e4bc286f4455a8bf500c0487207 -57289300046a17254dff60000046a172 -57292b00049f2ea5d8d02500004974a5 -5729c10004c6ce154a2fd100004c6cde -57342651d56c6f05b82a1d00c04ba1b7 -5736e80004c3e5f545810a00004c3e5b -573a870003a80085834f21000038758a -573a870003a80085b485f20000395839 -57459600047fad052fe5f1000047fa3f -5745ef0004460d2596393100003cf5c7 -57546b00041280554ff2ac0000412805 -5755d11d2151447581a71700804aa0fe -57593cfc1c45cb55bd2b33008047a404 -575a6d8db9344a554119e30070448a71 -575ca80004b58c15fec3a400004b57b6 -57658e8d23e048256f928b00f04821af -576a660004ba0095af0a45000047d45c -577b5100047081654e01af0000470816 -577bd000047b4ce521aa7e0000459dc1 -57817900043539454f1ab10000435394 -5781a300048725d55aee240000487259 -57866b1c38c337753d1df100b0483530 -57946720bc696825ecbfd100b04c50e6 -5796390004b01a95ae29c700004b0094 -5798a70004c6ce754a2fd100004c6cde -579d527ee6c74be54a677500504cc3de -579f8e0003e1bac5c79b4500003927b9 -57a08500047d1375c15333000047bd67 -57a0d400047d74957ba1f1000047d739 -57a1080003c7fcc53200d700003c1900 -57a16400049686753106db000049685e -57a468893c41b9c5171cbc00904cbe3a -57a6d9d4101b66659b475500604b0de7 -57a8950004580795bdef600000458072 -57a99e6a85322d05e28ac30050474552 -57a9cef39dcf6f7576c55d0040472e89 -57ab92000445fb85564e5f0000445fb8 -57ad7b000483ad8502c4f5000046a129 -57b2ba00040fe4655687cf000040fe46 -57b696dd347fb065a70379007049edb9 -57b7ed00049e94e526707c000049e8be -57be7b0004b2ef552da81300004b2eef -57c09500045664153997dc000043c95a -57c512000432b025492a8c0000432b02 -57c5de000477d32513f1f10000476ace -57c7010003fdb405ba357a00003fdb3c -57c9e17c03031b9541cf5e0010472f16 -57cdba000422b1854ad9d60000422b18 -57ced80004ca5bd5a80bac00004ca549 -57d2db0004949385b121240000494934 -57d525a8f4fe2675300e5600904b9d6c -57d6b20004c547a5e801a300004c2934 -57d89a000433ca75bd8e7e0000433c42 -57d90d2bd0b5a2b51e1dab00a04c50ba -57db6b00048725d55aee240000487259 -57e9c10004c547a5e801a300004c2934 -57ebbf00045f9dc54fd27e000045f9dc -57edd90004733275eab7450000392d66 -57eeec0003ec9ed55670a400003ec9ed -57f0f80003d35dc5fd10fc00003d3594 -57f7c200049e94e526707c000049e8be -57fd6800042924d54d4401000042924d -5811d54012ab00354aa7330080474df4 -58147a3d16db7195f83d0a00004c1500 -581b550004b765851400a800004b7647 -581b6b0003d341d54b4a4800003d341d -581eac00040042050d7cfe00003f5d48 -5821b76c3d6311d525ba6600e04b5d56 -582211eea0fe4395a466c3000047d233 -58230a000476fd254c20850000476f21 -583ef4a09566a1f5a2f28800504cccdf -5842312a3fd44d457b51e200a0488f64 -5846580003d0c09556d8c000003d0c09 -584a580003d12325462a0900003d121f -584ca7000456c9558ec76c0000456c64 -584e5000042ee9354c167d000042ee93 -5853b30004cca8054f6c8200004cc801 -5858fa3aa1fe3d557736e800804c1854 -5863600003dad7754ca69200003dad77 -586671b7f0a0dd3571d5e300f04cdd95 -586e3bcc984ac7e5fba82f005047f7da -587360000484d855e148470000420966 -5878db0003efd7154b0f6600003d01b7 -587c4700040f937551297a000040f937 -587e660004b01a95ae29c700004b0094 -588095000469ce05f79dc90000469ad6 -5885d264364088652951d900404764c8 -5889668e8422a965188c6900404b6703 -58912b00049f2ea5d8d02500004974a5 -58957000045f9de54fd27e000045f9dc -58957562a93a1f953d6bc200b04a2866 -5896e1e41cf000b5232eab00104b38a4 -58971b8eea6877a50e7e7400c04b97e6 -589c470004148565dcb2ba000041484d -58a43683f0b625851dcfc200604a3949 -58b00f0004014de54ed59000004014de -58c00ff829e47f4510e0d400c047fda9 -58c1f4000496ad25edc3250000496ac9 -58d6c20003fae405577e8f00003fae40 -58dd720003ebfae551ceec00003ebfae -58de530004c46c4510d1a300004c3f17 -58e7c90004715fe550767c00004715fe -58e8d20003eec25557b3fa00003eec25 -58edf80003e6dd654c8ca400003e6dd6 -58f9380003ef43853b6cdb00003ecfdb -58fe7dae6770e375e9c02800a04b94ea -5903e105807ef1250e312b009049f7e9 -59046f00048e5c75ea5da3000048dd18 -5910eb00048b103525cdb90000482893 -5916a50004c54325e801a300004c2934 -591bcfb4b869c9a5b922ab00104bcf02 -591c0d0003e947b55160db00003e947b -591d07154f0a0525e1059800d04b385a -591ded820c305d653e77ed00f04a2a1d -59220c00041a6f35518186000041a6f3 -5922d50004434f1557b9bd00004434f1 -5924770003d9a7154cc09800003d9a71 -59252a0004ca1e351e612a00004ca1de -592b4e0003f384f54ceffa00003f384f -592ec6800705595502d722009048f01d -5930574012772e4506e15d00f0472db5 -5931ae9abea5bc159ab47d00e04a8a1a -593a39ed827b57d56af5d200e047fda6 -5941e188a4cef64541cf5e00d0472f16 -59498600038b5cd5552918000038b5cd -59498600038b5cd5834f21000038758a -594bfa0003e1f7c53ff80b00003e1f79 -594d6400048e5c75ea5da3000048dd18 -595b543d57ab60a527922400e0490ba7 -596d330004bf2f6556a9e200004aceb3 -5975db99f0f776255065d10080437f6d -59770a0004802825834f21000038758a -59770a0004802825ea76c300003921cb -59802f0004705c8588c30900004705b0 -59833d0003d517159634f800003d2924 -59847600046147a56278ea00004582c9 -598d5100049f7bc5a3cee8000049f7b5 -5998d20003dd985552af6200003dd985 -59992400048ed615b58360000044fd24 -599da500042aa0d558109a000042aa0d -59a4d442cadbcca542b67b00204ae743 -59a60cafbfd699958495de0020475632 -59a64e0004a887b5b591d000004a886f -59ae660004b33c0543fa5600004b33a9 -59b5acad8417d9d5f97346000048a7d1 -59ba720003d26905945f6b00003d2683 -59bc0f00040eae854bca0c000040eae8 -59bc2b6270a4f945ff9b7300d04cdaef -59c851000487308531cbf50000487301 -59c868000428577574b4680000428526 -59ceab07b375629571312b00904a2284 -59d1b900047d5cc54992c3000047d520 -59d274c64c7b908593de0f00804c4ca9 -59d5ab4bb33470c52c16ab00704b224b -59da56949d1b2e05fd0f74007047e6c2 -59dac700048c3765fba82f000047f7da -59db1485b1085675666b5e0000472cc3 -59dd86000420ca75c10c6b0000420c52 -59de8a000408239551de0c0000408239 -59dfbc0003cad945586d1200003cad94 -59e4f55d4f680e45a9b47600c04c4947 -59e5a300048be48588ce24000048be41 -59ea960004862d2590d5d9000047e6ac -59edf4000490d3f58f24ac000048e0c6 -59f33d0003d19b1558c18f00003d19b1 -59fecafd035588353a87dc00104a7ddf -5a08f5000464fd755150760000464fd7 -5a0d13fa599441f5fbb9b20040472eb6 -5a146800042997355233370000429973 -5a1f7000049f7bc5a3cee8000049f7b5 -5a1f8c0004a99135b3a9a400004a9842 -5a260f0004c628e51a0ab200004c5ba6 -5a29d2000477bd750253330000473d81 -5a2cd20003dddf15522d9400003dddf1 -5a2d7b000480dd1528e02f0000480d14 -5a2dc70004b2aaa58e4c1300004b2869 -5a2ebb0004a99135b3a9a400004a9842 -5a320c0004100ac55687cf000040fe46 -5a3b6d00049fbb85e233ed000049fbb4 -5a3c7200049aea75801f6d000049aea2 -5a3ece0004c74b753a5ac3000047f70c -5a49dd0004004f9552b06b00004004f9 -5a527e00046c93052057bf00004699d5 -5a55230004580b455149e200004580b4 -5a5e6349ca00d945c6a60d00c0494c6a -5a6594d3a7e934d595477400b04754c4 -5a67b80003cbc4b550a33900003cbc4b -5a6ab100043be6f58d5eba000040c009 -5a719f0003cd115530df1100003cd111 -5a74a80004b7714536adc700004b770e -5a760c000400818516fec200004007d3 -5a7a6827c58625e57e023900504b2107 -5a7eac00040933b558cec2000040933b -5a8b3d0003d26905945f6b00003d2683 -5a8be645d0ee12059c1cb5005047e576 -5a8c0b0003d222f559235900003d222f -5a8cdfad6f23b105a6238c00604a1821 -5a95d800042924d54d4401000042924d -5a9b03ef60bb46156e0f5e0000472c3d -5a9c0dfadc4a4cf535b36b0070487b8f -5a9e0e0085739905a16ba400604b8bcb -5aa2c8000399ab55d33d18000038bd74 -5aa38c00049fdf55c1ac0d00003f36ec -5aa6660004b4d0d5f24a7e00004b4d09 -5aa6e700043162a551e28c000043162a -5aa75d000470fc3551c2980000470fc3 -5aae27000433ca75bd8e7e0000433c42 -5ab35b000436a08511e64e00003cba63 -5ab3e1f81e735b05b3936b002048d69f -5ab42f000480dd1528e02f0000480d14 -5abb170004abaf559596bb00004abaf2 -5ac774000473d9d50253330000473d81 -5ac9170003dca165a80d9400003dc9d9 -5acefd00044045255231b20000440452 -5ad04f4655478895eb1e4e00504aa001 -5ad0760004444ea559a9ef00004444ea -5add70000456de155258950000456de1 -5aded43a8a088575a16ba400c04b8bcb -5ae5e2000451b4a5596bbf0000451b4a -5ae74600048be45588ce24000048be41 -5ae88eb5d354dd55db99e200e04ac1e6 -5aebf734adc06c6535bfac0060483b86 -5af0e800043692c5599eb1000043692c -5af4f500046a28e5703dc9000046a202 -5afa5dd70f2f6965e4da3900304b32d0 -5afb48fb8d6fdce5d76d2400a0497227 -5b01940004abaf559596bb00004abaf2 -5b026300043bed155217e0000043bed1 -5b05d700049f05359e9433000049f049 -5b06560003d40fe559791700003d40fe -5b070a0004802825ea76c300003921cb -5b09a300048c3765fba82f000047f7da -5b0d8f000455e9d5b1068700003b603a -5b0f6d00049e7e5583f17b00004796be -5b23590003d32fc559baaa00003d32fc -5b25510004a1c385af0a45000047d45c -5b25624aa2933325ef53f500d04895b1 -5b2b8600048be45588ce24000048be41 -5b2e5a72841e1b356204d40090483b81 -5b2ec25b5b64e8450af4a700504c58ac -5b3dd700049919b5009c25000049914d -5b3e8a0003fcc5759288a400003e19e6 -5b44d700040ae835508b01000040ae83 -5b465f0004450b0552516800004450b0 -5b4df700042ee9354c167d000042ee93 -5b55e60003cf8ab553d82000003cf8ab -5b58d70004014de54ed59000004014de -5b5eb1000437ad45d0c80b00003d7b0c -5b5fb600041280555110470000412805 -5b6ce8000437f5a559e6b10000437f5a -5b70e800043bcff5ea76c300003921cb -5b7de20004b9c1750f567400004b9bfc -5b836000045bab557698f5000045ba8b -5b8897f9e0da75a5271896000047b79f -5b8d5500049919b5009c25000049914d -5b925064aff9c245c677d100804c520a -5b9a3b00044edbf5c7faa2000044eda3 -5b9b6aa84641151596052b00b049a6a1 -5ba4f4a1bd715a55f53aa100204bc3fc -5baf761a44c5a53529b5d7001049a828 -5bafbf1f2d96a59569a93c00e04ab8ee -5bb1dd0003fdb405ba357a00003fdb3c -5bb9b9000473e4f59a5c850000473e42 -5bbb8dfe992d40a56a406f006048f4e0 -5bbf0a000473e4f59a5c850000473e42 -5bca240004866c4525cdb90000482893 -5bdb516fe116c3a52115ab00e04c7446 -5be44763fbbca025af0a4500b047d45c -5be7c33a017fa785c48e8800804ccc87 -5be973307510f1656fb37600804be61d -5be9c9c26c7dd23584c5c000404a3c7e -5bead00004a9e3b549b8ae00004a9de3 -5bec950004649d955a989300004649d9 -5becd20003ec9e0554d0db00003ec9e0 -5bf1c9000460f875a335c90000460e70 -5bf2ae94eccea6850e7e7400704b97e6 -5bf5c50004340e757ddf5b00004340e3 -5c04130004ae5b55199fa400004ae59a -5c04cd997c2b7c45af60a700304c2494 -5c061900048e5c75f969f1000047caa9 -5c0b170003d41a6559791700003d40fe -5c15cf00048e5c75f969f1000047caa9 -5c166beeefd7f7d55df3a600004ca540 -5c17170004ababb5a7a5a400004ab782 -5c2072a24615fb25c001e300d04ce44d -5c23760004ca1e151e612a00004ca1de -5c2a090003a7f5e55ac16200003a7f5e -5c2a090003a7f5e5834f21000038758a -5c304700040c0c6583e301000040c0c0 -5c327e00044e6ae55acc76000044e6ae -5c339c00045a12f55ae360000045a12f -5c368b00047e2025c0c096000047e18b -5c3c560004703dd5e69e0200004703da -5c3ecf02bf541fc56c3fed00b049d8b2 -5c40980003cb984592999f00003cb97e -5c424e0003cc4775b1068700003b603a -5c497a00040028d55ac1dd000040028d -5c4d45ba761133d5c0187c009049f08c -5c4fd0c004c48c155aee2400f0487259 -5c52e80004a2ccf5914b7900004a2c80 -5c578ce7db7ad835a229d800204ca072 -5c5ddd0004050bd584a30100004050b9 -5c5e560004b4d0d5f24a7e00004b4d09 -5c607d88580ae4159ac9f40090492620 -5c678e00043fa0c50d46fd000043fa05 -5c6a580003d116a53f1e0900003d1167 -5c6cfc0003d245a554c73f00003d245a -5c6d2300044a86755f4f6c000044a85a -5c77dc00049fd585c1ac0d00003f36ec -5c797b000479b675fe3b0a0000479b3b -5c8b510003ed19c51ef0fe00003ed197 -5c8ece0004b18c95864ea100004b16a0 -5c8f760ebcc7a6c5b3936b005048d69f -5c9a63000437ad45d0c80b00003d7b0c -5ca09900038b7a855ae518000038b7a8 -5ca09900038b7a85834f21000038758a -5ca5db00042437755b49d60000424377 -5cad750003d18365e434fc00003d1832 -5cb2740004b9c7d50f567400004b9bfc -5cb38c0004a9f26571d15a00004a9f23 -5cb6ba0004127ad5a991dd00004127a7 -5cb73300047d74957ba1f1000047d739 -5cb742ab55efbf9596fe6600504babe9 -5cbb6d00049e7e5583f17b00004796be -5cc1e2000458393553a1c90000458393 -5cc392e9b6a4f165cd8a96007048baaa -5cc5980004b9c7d50f567400004b9bfc -5ccd380003de9d455b64db00003de9d4 -5cd1ae00049fd585c1ac0d00003f36ec -5cd1cf6be07b7f65a3f96b00004be947 -5cd6720003d48d65b1068700003b603a -5cd7580003ebfae5531c0d00003ebfae -5cdd6b0004c58ec5d76d240000497227 -5cdf87545bbe5d253e62db00f0496416 -5ce2c700048c9ac5ba45e2000044c627 -5cecf5000468fd055bb1e20000468fd0 -5cef5623546cdb25d3b70a00d0475ceb -5cf2ba000420bc05dea11700003d86c2 -5cfadb00048e5c75ea5da3000048dd18 -5cfdd200047e17756d99d2000047e172 -5d028000040f9375527e8a000040f937 -5d0a80000471f6c55b8a7c0000471f6c -5d0d0a0004c58ec5d76d240000497227 -5d11e20004b18c95864ea100004b16a0 -5d14b3000435394550587c0000435394 -5d1d2f09ab8c9995ae40fe00303ed13c -5d302b00043416a552a3f6000043416a -5d31b9316ac939c50f9ebc00704c602e -5d3895000465ddb54931700000465dcc -5d462c79a89252253027a400304b24c5 -5d48687d13a219452c16ab00804b224b -5d4b510003cbc4b550a33900003cbc4b -5d4eba00040310e5554ac2000040310e -5d500f00041a6f3552dd52000041a6f3 -5d53250004995f2524d3d90000484b3f -5d580f00049191455aee240000487259 -5d6394000437f6d551b5790000437f6d -5d6ba80004a38d852fa3a800004a21ff -5d71a100047af915a67bac000047af8d -5d752ef7dbdf07c57264850050483736 -5d7c6b0004110fd55bfeba00004110fd -5d7d3c0004c875052522d800004c8746 -5d835e0004726075834f21000038758a -5d835e0004726075ea76c300003921cb -5d84324dfe5d9ed524d3d90030484b3f -5d880c0003dd985554091700003dd985 -5d886b000420bc05dea11700003d86c2 -5d8df100048270f5d25d9600004802d4 -5d8e070003e947b553013800003e947b -5d8fce6c245d95e58e4c1300304b2869 -5d91f20003b614a55c35be00003b614a -5d91f20003b614a5834f21000038758a -5d99ab0004be9cf591c3d100004be9cd -5d9ad580aeab1555e3730a00f048407f -5da21e083200bce5b6b59600f0481945 -5da2c3000473820563e1b900004736f3 -5da495000400a56552f20c0000400a56 -5dacfe0003b34115834f21000038758a -5dacfe0003b34115c79b4500003927b9 -5db7620003dcaeb5a3825600003dcae7 -5dbb04033083dbc5adfe7e00304af5e2 -5dc11edd72e2e725ecc93300d04c1ea1 -5dc6e3d992a5ca157151a8005048531c -5dcfe90003d0e6c52f72e200003c7943 -5dcfe90003d0e6c5834f21000038758a -5dd21300041280555110470000412805 -5dd2c700048c9ac5ba45e2000044c627 -5dda0c000400bdc55512ba0000400bdc -5ddbfa0003e8cb255c44a400003e8cb2 -5de28000040ae835508b01000040ae83 -5de54a0004aa0ab5231abb00004a949d -5de95d71d748a38596ff740080482e4c -5de9940003ec9ed55670a400003ec9ed -5dee7e0004bbe5750b719800004bbda0 -5df05c0004bbe5750b719800004bbda0 -5df09600048353e53d75d9000048353b -5df0d0cd97c15bf5fe696b00004c01e5 -5df8010004243ab55b49d60000424377 -5dfb700004a5ec258925ae00004a5ebb -5dfec200040971e54b0f6600003d01b7 -5dffec00048d2945a559a1000047663a -5e01de0004758c457e15a100004758af -5e100354d3f1ce65aac3f50080489c1b -5e23630003d0c09556d8c000003d0c09 -5e28d400047ed9553deac3000047ed8c -5e2a7e00046475955cc8950000464759 -5e2cac00048858c5e009a30000488586 -5e2de20004b3176516c3a400004b313f -5e38a40003e278f55ce10b00003e278f -5e39140004082395531ec20000408239 -5e3d6f0004aa0ab5231abb00004a949d -5e43b75ac28b8dd531e5d900a048391f -5e52e232842dd4655721c700904bda7d -5e5698000470b135556ddd0000470b13 -5e59a500042d4ad582cc9a000042d488 -5e5ace0004c527b52ecab200004c3ba0 -5e5c930004511925032de20000451191 -5e66cf16fc0dc385cfee3900704b047e -5e670100040eb8e51fef5100003e66e9 -5e73620003eec25557b3fa00003eec25 -5e74db0003f4f825e7697200003df2b4 -5e78b60004a99135b3a9a400004a9842 -5e7c6500042997355396ca0000429973 -5e7f2371a14ee7652895a100a0476e7f -5e7fbf00045cbc05f0f67e000045cbab -5e8094e7afed100594042c00104c24f9 -5e84ef0003dddf155438a400003dddf1 -5e8975a105826ad5b45e0f00a04be5ea -5e906b0003fa7d552fd28a00003fa7c1 -5e91465d438339751d1a1d00504bb048 -5e91e30004cd1fc5d16acb00004cc043 -5e9986000419e0355d35dd0000419e03 -5e9a7c000470a1a5cd3a020000470a18 -5e9bcf0004098ba55d495200004098ba -5e9e3b09135211a5580edb0030499328 -5e9f3d0003d41a6559791700003d40fe -5ea005223fbfa0c55928d400f047c0af -5ea7cf00040b11f510e6c2000040adf3 -5ea8aa0004004f9554398600004004f9 -5eae352c0c9355d522c9e9001048e29e -5eb86b00040125d55d72ba000040125d -5eba0086d5ae27553f157b00804820f5 -5eba6511998b52c5be4e4e00c048e123 -5ec0050004a276057c09ae00004a272e -5ec3f7fd045d2ee54f5a7b00c04afbe1 -5ec568000445fb85564e5f0000445fb8 -5eca37223702e015ed43d100604c3925 -5ecc20000461ad451ec1c90000461ace -5ecde56ae28106c59bc61700604c8f5a -5ecf3f0003da630599e00b00003da60a -5ed23b00044d44f55d85e2000044d44f -5ed459eb36ff72b5f7357000d046229c -5ed4c83459d457155f237600304c9d44 -5ed5843d0ba578c56e1b2200f0496f64 -5edd6bc1475b36057181a300f04c4cf1 -5ee0f0f27e4acda5f2880500f049b18b -5ee3207775797a352551d200004817f9 -5ee7620003e1f8353ff80b00003e1f79 -5ee80b0003f0bc651672ec00003efbd9 -5ee8a700046a03b5b1068700003b603a -5eeefd708b499f051e1dab00e04c50ba -5ef27c19bb28262539c3a400d04b21d5 -5ef3a80004932d45d92425000049325b -5ef490e1fb9ec1c57061e200e0488bef -5ef58600038b7a855ae518000038b7a8 -5ef58600038b7a85834f21000038758a -5ef87dcd745abff58cf77400d0478609 -5efcaa00043162a5535fc6000043162a -5efec3000473e4659a5c850000473e42 -5f04560004a5ec258925ae00004a5ebb -5f059a32ca7c1115a1735500404b35f8 -5f0b74000473e4659a5c850000473e42 -5f0fe5e2399585458881d7003048edc5 -5f1a480003d26a155db00b00003d26a1 -5f22a1845fa9ef85c13ad200604cd3bc -5f250b0003e5a51537c76600003d0bae -5f2c199e9aa317159bc61700304c8f5a -5f2d7b00047c19a52b3ec3000047c197 -5f2e6300043714b55df4e8000043714b -5f36bf5416498d751cde4500e047d682 -5f38560004a0ee653955d90000473da4 -5f3ad00004abaf559596bb00004abaf2 -5f3e9986f9e2ae05b85b8c00104a8d66 -5f44b701537a2df5a945c100a04c5115 -5f5020000450d6257a2ea20000450d5d -5f53cf0003fae405577e8f00003fae40 -5f56257b1416af858855cf006048fea0 -5f56ad1140a354c55317be00804a6b8d -5f5d76d91933ee15b1f24500204793b3 -5f5f330004836315e148470000420966 -5f62640e3ec49c75843a6600d04ae863 -5f62ac00041a3875e6bd520000419c37 -5f66745c7994f215c81a45008047e8e6 -5f6cfe0003a7f5e55ac16200003a7f5e -5f6cfe0003a7f5e5834f21000038758a -5f7888d2914a75b5b491ab00f04c6432 -5f7b8783cb0fa485361d4a00904a9ed9 -5f7da3b85ec670553ce06f003048e6b0 -5f80de32043d14556ddaa500504c435c -5f810341f0419bf503cdde00b047d165 -5f880b0003d19b1558c18f00003d19b1 -5f8b620003cac0951836a900003cab7a -5f8d2300044f02055df020000044f020 -5f95cf00048f4ec510dd24000048f4df -5f9f1cde03e15685e91f6d00804a66d1 -5fa69d00043416a552a3f6000043416a -5fc0ef0003ec9e05563d9400003ec9e0 -5fccd700041a6f3552dd52000041a6f3 -5fcd4300040f9375527e8a000040f937 -5fd01a0003f8b075360429000039346c -5fe1e30004cd1fc5d16acb00004cc043 -5fe75100046dd7355e742f000046dd73 -5fe774000477133592999f00003cb97e -5feb110003cb984592999f00003cb97e -5febbf00045d4595d570a7000045d456 -5ff25e0004352dd54b0f6600003d01b7 -5ff9590003cd7f05fa3acd00003cd7e9 -5ffba1f47b0bcb159bc5be00c04ce49f -60002f00047334a60d6ab10000433fc7 -600140000448afa65ea4000000448afa -6009be0003cf8ab655724e00003cf8ab -600d4a0004ab2736902c6f000048ec62 -600ee800049a8d7629b5d7000049a828 -6012a1f20c882b36fdd92400104966be -60146800042aa0d658109a000042aa0d -601479042d59210616470e00604ca50d -6015350004ce6146262b7500004ce5f1 -6015dbef6f7aca0663ef4200704ccb23 -6016280003dd985654091700003dd985 -6017c00004ce6146262b7500004ce5f1 -601fcf00040042060d7cfe00003f5d48 -601ff00003d0be765eb8c000003d0be7 -60203cf929b4fae6ba96cb00a04ccfd0 -60209300045cc1c6f0f67e000045cbab -6020cafaa7b06846cd1d0a00404c0538 -6026390004b44fa6205b7900004a0da1 -602a720003cb9ec6bfda7200003cb9ad -602b6c00044a86765f4f6c000044a85a -602f5b00043823465e75d10000438234 -6031d700049ae8b69aad2b000049ae58 -60365f0004460a767f765f00004460a0 -6041e20004ba79b6c4c9c700004ba792 -6042430003e947b653013800003e947b -6046720003d41a6659791700003d40fe -6047390003cd7f06fa3acd00003cd7e9 -604af2000400a56652f20c0000400a56 -604d800003cad946586d1200003cad94 -604e5f000445b9b6d4c8fe0000445b03 -604f5100046d5b765ee42f000046d5b7 -6055f6aa17030ae6e7982800504bb371 -605a480003d222f659235900003d222f -605d8c0ae7b812b66c91de00c0483353 -6061dbfdce5df1567745ae006049f017 -60687c0004a20966adf00500004a0ddf -606d427b00c178162202ce00b04abdf9 -606d680004434f1657b9bd00004434f1 -606df70003d245a656db6b00003d245a -606f96829c5085267139cf008049016d -6071b2000472a2864b1c7300004480fb -607b330004836316e148470000420966 -607eb20004c6ce164a2fd100004c6cde -607f4e5e9d7c1926f86b74003047cd8f -60810382ac9bc126503abb00904ab21d -608135000435394650587c0000435394 -608bf50003c2ac86ae325000003c2ac5 -608c200003cb984692999f00003cb97e -6098f80003d40fe659791700003d40fe -609c9300044bfd36cac23b000044bfb0 -60a27c00047183765f2f200000471837 -60a4a40003e18fc65f3c0d00003e18fc -60a76000044a85a65f4f6c000044a85a -60aaeca2c43c2e1682385e00204cb367 -60af8c0004a4daf6e331d7000049917a -60b5e348e40250066cf1d800404c8b72 -60be0c0003fa7d562fd28a00003fa7c1 -60bf74000477133692999f00003cb97e -60c1c334de910c76955c5c00a04b3d01 -60c1dd0003fea5f65f799000003fea5f -60c2947044807a6667bc7200904a5241 -60c2ba00040933b658cec2000040933b -60c7b00004423b1665b79200004423a6 -60c8d93d07bd57169bc5be00e04ce49f -60c8f6a036ee2d164ad77300104cdae7 -60ca2c00048e5c76f969f1000047caa9 -60ce7cf0bb3d34465b1f6d00604a7015 -60cfc654c49c4266ac699600c0484a33 -60cfe90003d0d1f65f4ff000003d0d1f -60d06b00040125f65d72ba000040125d -60d2150009e7c3a6992abc00004c2301 -60d52c000471a6e6781090000047063b -60d7620003ef43a63b6cdb00003ecfdb -60d7ca000472a2864b1c7300004480fb -60db79cffc071196103c7c00304a000f -60ddaa676e6cf006c6e31400b04c9ace -60e8edf74577f4b68e0085001047333e -60e8f3356154af1623521700304cb863 -60f8b513e9536746cd555100f04a0bab -60fbdc000436655615a67e0000433f4d -60ffb60004082396531ec20000408239 -6103b30004cc12d62d19240000496371 -61073700048b78a65c25e2000048b784 -61073f0003d32fc659baaa00003d32fc -610bca00047276b62b94dc00004726f1 -61133d412202b6d6a2e5a10010476abd -611893000456e536a9b8a70000456aad -61194c0004729936315b97000047297e -61196400049064367a7ba800004905d2 -611f6d0004a6bc565317be00004a6b8d -61259000040c4f165f95dd000040c4f1 -6127231befecd64624d3d90020484b3f -612a980004724a464071af000047249e -612c7c0004a6bc565317be00004a6b8d -612dc70004ba27f6e02f1700003daba5 -6132e0000437f6d651b5790000437f6d -6132f20004004f9654398600004004f9 -6136f800048e5e06d63bac00004737d8 -613876000457b6c65e9e7e0000457b6c -613cb50004736e06a99420000044dd09 -613cfc08d8dc3c66822ab500a04728ad -6142870003a970b6834f21000038758a -6142870003a970b6c79b4500003927b9 -61453500043bed1653b6b1000043bed1 -6145537e95f785d6c8eb8c00404a0994 -61475feb084502e6a1f40f00a0492ddf -6149fe00042997365396ca0000429973 -614c4700040f0916601aac000040f091 -615057c2a2b42d960b1e4500b047402d -61540b0003e222f691a35100003e2221 -6157800004729936315b97000047297e -615929000428577674b4680000428526 -615d5d00047276b62b94dc00004726f1 -6163168aa3fc9f06c0bbd100f04c31e1 -616aac5e855a62864c21d90030483955 -616c93000451b5e6596bbf0000451b4a -616da800048b78a65c25e2000048b784 -616dd700049f79964ed2e8000049f790 -6172ba000401ee96e07eac0000401ee4 -6175dd000400818616fec200004007d3 -617fcf000401ee96e07eac0000401ee4 -61838c0004a1276670838c00004a1246 -6187e000043692c6599eb1000043692c -618c2f00047334a60d6ab10000433fc7 -618d7a00041111c640d17a000041110a -61911f0003dddf165438a400003dddf1 -61960210af304ce662ffec00a0488a6d -619876000469ce06f79dc90000469ad6 -619c468ce961ddc67e2d6b00d04c3523 -619eec0003e5a51637c76600003d0bae -619f68c228c2d086188c6900f04b6703 -61a24600040fe46657b2ba000040fe46 -61a29d0003b614a65c35be00003b614a -61a29d0003b614a6834f21000038758a -61a43c5dabcb9c564c9dd200c047b61b -61a54b0003d02ea6cb84c000003d02d0 -61aad1f5aaff673676d87c00d04a67cd -61af6b0003d1b9d6603eaa00003d1b9d -61af6c00044afbe6b22c95000044aca4 -61b159e0860763b625570a0030475440 -61b1c10004bf3d4679507600004bf3c5 -61c00b0003edb7c61d33fa00003edb6d -61c4765e7f8c7ad6628fac00504ca0b1 -61ccaa00040310e656ceac000040310e -61ce4e3e22c299263d21c700504b2137 -61d1752199b303965beb0a00a047b4c5 -61d291d14d1ad196a2f9e700b0484b9f -61d58a2cfe2d79466181ab00904c61bc -61d6ce0004b5407626c4ca00004886ff -61daffc3c1bcac8650a6db0010490f81 -61dc690004ad14d69e9433000049f049 -61e1900004127ad6a991dd00004127a7 -61e45100043162a6535fc6000043162a -61e95200041a56a68421dd000040652b -61ebe23bd4732d86d2e4b50020473785 -61ecc605a45258d68fe19800f04b9643 -61f4e6060590166632f02f003047c511 -61f6070003ec9ed657eeec00003ec9ed -61fca7000451b4a6596bbf0000451b4a -61fdbe0004ce37361cc9e600004ce364 -6211c92603e686e6eb26bc00504bf005 -62178c00049e94e626707c000049e8be -621d794819dd6a861d68a700204c12a6 -6220c200048e5c76488e82000048d1be -6228130004bbbf4618bc1300004b8919 -622aeb7f58cea0f676d87c00204a67cd -622f920004444ea659a9ef00004444ea -6230f500044adb1660f420000044adb1 -62347acc64c1caf666b6f80050497dbe -62370615cffc7a4676a1960070483903 -62378600048738363a5ac3000047f70c -62397a0003fa7d562fd28a00003fa7c1 -623e5d420c3de40668b81300504b1827 -62452300044a59664e3476000044a58c -624c0d0003de9d465b64db00003de9d4 -624d7a0003d0c09658465800003d0c09 -624da9b903cbc0b65f8a1900304985e2 -6250960004748be6966893000046c653 -6254ca00048910e68bdcc4000048910a -62550e0003f80ae6611c9100003f80ae -625a7e0004b2710634d7a400004b1f0d -625e63b6c2713916b676d800c04caee0 -62674f4f038a0cb6b988a100204c091d -6269060003ec9e06563d9400003ec9e0 -626aa1308e34c036e56951002049fc4f -626f0900046eadf660e680000046eadf -6270280004b03986e8167b00004b0270 -62734e0003eec25658e8d200003eec25 -627946000437f5a659e6b10000437f5a -6283510003dd06569943fa00003dd043 -628da800048d0056c578ac000048cfda -6292090003d0dcd6834f21000038758a -6292090003d0dcd6d7e9f700003b08ea -629739f5ba5d2b66bfb2db00504993e5 -629eac00040028d65ac1dd000040028d -62a656a4f7742696834f21002038758a -62a656a4f7742696b1068700203b603a -62a68200048c05b6d2c786000048c057 -62b16b0004c547a6e801a300004c2934 -62bbea00042911b667c89a0000429119 -62c9550004923b8608112400004923ab -62d1140003c25136308b2600003c1900 -62d28b00047f9496578cb5000047d197 -62d3f3df49f527264b085a0040473228 -62d801539b2d8f66d834a700204c3f12 -62deac0004018656614ec20000401865 -62e2ac000419e076619ac20000419e07 -62e6a8000400bdc656d06b0000400bdc -62e8fe0003eafb46da5d9400003eafaf -62ec52729ea83836d9242500d049325b -62f2200003cf8af653d82000003cf8ab -62f268d69e0b2126122a7e00104ae10d -62f739808c737096fcc5b800e0484b7a -62fa720003cf99363ea7e50000395bd7 -62fa7b0004b273b634d7a400004b1f0d -6305720003e5a51637c76600003d0bae -63092c00046f2e7661aa80000046f2e7 -630bec00048906763e37ec0000488ec1 -630ccb4021dea1869584d4004047bdd7 -631180bd42a2eef60705cf00d048f697 -6315bc1362843a66a7cba80070491f9b -6317620003dc7ac6bfc51700003dc7a9 -631942eee5f88cb69a5c8500c0473e42 -631c84952b55a226eeec2c00b04c2ab9 -6320a700044e6ae65acc76000044e6ae -632b600003d245a656db6b00003d245a -632cbb488c4e3e66746624003048dffd -632ee3aa7a2f4d5627473700a0486327 -6333d513c7cdb8b609875500604b6c57 -63379c000461f08661fb600000461f08 -633bcf00041a56a68421dd000040652b -633ed306cd7d3ef641f4b700404aba35 -633f740004751766e3f8850000475170 -63408477fa5fe57677f6c30070483b07 -6340ea00045bab567698f5000045ba8b -6341e20004b5407626c4ca00004886ff -63473700042437765b49d60000424377 -63516400048ed616b58360000044fd24 -63517a000400818616fec200004007d3 -6353b5000428577674b4680000428526 -635418a2441069468b10ac002048e072 -6358200004649d965a989300004649d9 -6358ca000432b7068b1c10000043299a -635a4c0003c176c60b9e9000003c175f -63606b00041a6f66518186000041a6f3 -6365b00003d19b1659f33d00003d19b1 -637215771c3eddc64f1c2c00c04c6039 -63725c0003cf8ab655724e00003cf8ab -6373cd3a994e9c36e7dca700504c2e51 -6373d77538a3071673c02500504992f4 -6379230004611c2661f4ea00004611c2 -6380e7b31aace9d675696f00d04a93b4 -638138912b76d6b64072ce00404c4a14 -638cf80003d5e4b662575900003d5e4b -638d140003fae40658d6c200003fae40 -639046f5e0c699c63f8cb600904a86db -63906f00048e5f4658c18f00003d19b1 -63a412328f217386b6b54a00404aa8b2 -63ae9e0003cb9ec6bfda7200003cb9ad -63b09500045a12f65ae360000045a12f -63b155000490183640cb22000049016a -63b3f1c6c3188146cea15b0030472ae0 -63c8663057c80ef6d58c5100e048720f -63caf8000498d456dc125f0000445904 -63d1230004582c966278ea00004582c9 -63d20c0004110fd65bfeba00004110fd -63d27b0004bb55b613f4a800004ba11d -63d2a503c27c376655a02500d04920d4 -63d9940003dc2246ac8b1700003dc21f -63dacf0b69d62da622c9e9007048e29e -63dd230004619cf66278ea00004619cf -63dddd00046fb6f66261af000046fb6f -63e9537cc54d8bd6dab7d100e04c6f57 -63e9720003dd2ce6629ffa00003dd2ce -63ea480003d61f0662a2aa00003d61f0 -63f6ec0003e278f65ce10b00003e278f -63f8767cc102f5d616d33700504864ba -6407c9000471f6c65b8a7c0000471f6c -6409c10004c58ec6d76d240000497227 -640f60000460f876a335c90000460e70 -6410ea000468fd065bb1e20000468fd0 -6412e80004a3ac16cdd5f40000491b41 -64160500043bed1653b6b1000043bed1 -6416090003b9d4a6834f21000038758a -6416090003b9d4a6ea76c300003921cb -6418a7000445c536c45a5f0000444d4f -642097c711cf06969a38eb006048525e -6420bdbbf473e6c61e7f3100c047328d -64221b0003d0e6e62f72e200003c7943 -64239613a4cc5d36f821b90060477ecf -64254ede44f96456cbab0a00c04748e3 -6429c10004bf1806b0f67b00004bc82f -642b22000491aca66d99d2000047e172 -642e8a0003fc7c7624655200003fc7c3 -642ec200041e352662b301000041e352 -642fa610d0c5343679507600504bf3c5 -643933f0903ceea6133df40040490290 -643d0b0003e8cb265c44a400003e8cb2 -643fd10004c767d6481ae800004c51d7 -64429e0003cb99c69d08b300003cb987 -6446a800040fe46657b2ba000040fe46 -6448802502d5dbd6874551000049b6ee -644b2200049904066405d7000049516e -644b6d00049aea76801f6d000049aea2 -644dd70004a3ac16cdd5f40000491b41 -644f4d01f7cc0fe61387be002049e1aa -6450a700045f88a63997dc000043c95a -6454ee6b9a19a886ba9e3900004b9971 -645861e2318a7c467448d40040473d5c -64592400049211066d99d2000047e172 -645b108669f521663b2dc700804af70f -645fd00004839db632ed7b0000482c4a -6460470003fe4966834f21000038758a -6460470003fe4966d33d18000038bd74 -6462500003c25136308b2600003c1900 -6462500003c25136834f21000038758a -6468020004243ab65b49d60000424377 -6469b90004838f767264850000483736 -646e86d39cdc6ab68495de00c0475632 -646ef80004967d167b3c0f00004967be -64705100048956d6ba876b000048956b -647500e7bd66c5862525f100b04803b3 -647e1600040310e656ceac000040310e -64802fca7073377634047c00a04a2970 -6482f800042aa0d6599da5000042aa0d -648aa10004af88662e7a7400004af822 -64905dddeb459ed69ee9de00a047bfd5 -6491c13adc07c5166e2d3c00204c938c -64960d00049919b6009c25000049914d -64980b0003e22ab6633eec00003e22ab -649c8d0003d222f65a8c0b00003d222f -649d960004839db632ed7b0000482c4a -64a1350003bdb946d48899000038bd74 -64a224000496ad26edc3250000496ac9 -64a4dadd07154766c2f82f00b047495c -64a9f70003d40fe65b065600003d40fe -64ad8600040662168421dd000040652b -64aec300047f93a621aa7e0000459dc1 -64b2e90003ec9ed657eeec00003ec9ed -64b5e200048956d6ba876b000048956b -64b5e30003cad94659dfbc00003cad94 -64bc680004247f0651297a000040f937 -64c47600046a21164614ea000046a20e -64c88d83319be5d6c326d800f04cbcc6 -64ca070003eec25658e8d200003eec25 -64cca40003dd06569943fa00003dd043 -64d57a00041a58168421dd000040652b -64d5d96f29cb3706d45e5600b04ac7e1 -64d6f8000498d456dc125f0000445904 -64d79f3baa822b8648795a00004a7f96 -64e33f0003d26a165db00b00003d26a1 -64e4930004500c6662cd2300004500c6 -64e85c0004b719f6080a7e00004b7198 -64f5750003d31346630e4800003d3134 -64f969a679fb72a6a4250a00d04c5f91 -64fece0004af88662e7a7400004af822 -64ff0900047020c6b40490000046ebe3 -6501e2000450d6267a2ea20000450d5d -6504c10003c87916a66e7c00003c448f -6505d00004cdd9d64f6c8200004cc801 -650a0c00040125d65d72ba000040125d -650f0a00040933b65a7eac000040933b -6511a10004733be6ea76c300003921cb -6511b40003d0c09658465800003d0c09 -65180ce432cc4d26bfe4d400c0475a42 -651a68e8c97e06f641f4b700804aba35 -6521d60004256126af80020000424eb8 -6521f8de064e8fb6bef2bc00d04c4b55 -65257e7973a8ab3635951700f04c4928 -652e270003d32fc65b235900003d32fc -652e531e682124c6fbc12400d0493789 -653162a01a1edcc640b96b00004bf52b -6532c20004081e166d755200004081dd -653bd5f55a42ddd64bf24e003048adf6 -653e0c000419e0365d35dd0000419e03 -653e6fb9da98cba6832996009047e182 -653f160003cab116b2e17900003cab10 -653fac0004823b168d4b0a00004823ad -654590000419dfa6cde3010000419df3 -654b74000480dd1628e02f0000480d14 -654d590003cb9ec6bfda7200003cb9ad -654f120004098ba65d495200004098ba -655df100047455165ed30a00004743ed -6564ac00048541868bdcb50000477127 -65670a00047698e6a559a1000047663a -65697b00048358363d75d9000048353b -656e9802da86ce66ad964e00104aac2c -656eb70004329e56636ee200004329e5 -6572870003b61496834f21000038758a -6572870003b61496b1068700003b603a -6577685d57182d06e3dca100904c7c94 -65793adeee15c8d647e0a700704c6e26 -657b2be46ae966e690082800704afca0 -65802500048e73d6ea5da3000048dd18 -65841868b23197d631e5d9002048391f -65868b00047455165ed30a00004743ed -658a0ef61f13d446b3f8a100d04c6e1a -658bdc00043714b65df4e8000043714b -658ea10004ac9a36c0e9c700004ac9a0 -659cb30003cd115630df1100003cd111 -65a0c4463c26533679e92b00104a5626 -65a8f5000458f6e66441e20000458f6e -65acea00044d44f65d85e2000044d44f -65b0c200048e5c768b10ac000048e072 -65b29d00043692c65af0e8000043692c -65b44dc95af6f7f6a0547c008049f8f9 -65b4f500045a2ab61b58ea0000459ef4 -65b555000490f6368f24ac000048e0c6 -65b6086d1b8182469e45d7009048fb6c -65bbf00003d0c8c615648900003d0c7c -65be0200046edcc6fa282f000046ed5f -65c0a40003f4b046e36b6200003f4ac8 -65cc2f00047499c6d254d4000047496e -65cfd000048270f6d25d9600004802d4 -65d2ce0004b93806649b5500004b86ae -65d2dc064a427e467a9e7400f04b9a1d -65d5ee158e020936a945c100e04c5115 -65d7c9000471a5d6646b5d0000471a5d -65de72d91b637c06815fed00804a2ab0 -65df3f3f33a8c7865f51f1004047b6d3 -65e00f0003d0be765eb8c000003d0be7 -65e39a2db8671b86fff90a00004c72d8 -65eba8000494df368615240000494dd8 -65edfbfe8934a796f5878c00104ab4f1 -65f76b00048b78a65c25e2000048b784 -65f92400048f577628cd900000412dd2 -65fb630003d116a63f1e0900003d1167 -65fbcd740a9f48a6c7fb6d00e04a07b4 -6600770003d19b1659f33d00003d19b1 -660760a30c546566f8776b00a0485cd3 -6610be3128e7af867bcb330090491651 -66140f00048e5c768b10ac000048e072 -661aac000419a016469f0100004199fc -661eba00040e99e6834f21000038758a -661eba00040e99e6b1068700003b603a -661f818a762f2d363a5ac3008047f70c -6622963297ab06c626594800b04cd66f -6623d00004823b168d4b0a00004823ad -66242b0c931c1676f24a7e00b04b4d09 -6626070003de9d465ccd3800003de9d4 -6626374d3e6ecf063f410400104ab193 -662dd700049fd586c1ac0d00003f36ec -6630eb17a7e148169534b50040476fa5 -6637660003d116a63f1e0900003d1167 -6639a100047a99c6f821b90000477ecf -663fdf39102673169ada950090472ef9 -6640e1aefc66d666b676d800004caee0 -664244e1d55d6e0669d7c00040472e13 -6646130003fae40658d6c200003fae40 -6647bb0003dd06569943fa00003dd043 -664bb6000400bdc656d06b0000400bdc -664fcd3032943dc636968b006047e7d3 -66506f00048f577628cd900000412dd2 -66518253b83255a683e9c100e04c599f -66520d000494df368615240000494dd8 -665db5739ef1eb0633f8a700404c31fa -665f990003c2a54644567c00003c29c4 -6662ab0004b542e626c4ca00004886ff -66646fc6b9705466b17bd000b047919e -6679240004932d46d92425000049325b -6679350003d0d1f65f4ff000003d0d1f -6681f400048e5df6fbf6c7000048d8d9 -668ae800049d82f65ff833000049d76f -668e5000048e6256f969f1000047caa9 -6690ea00044f02065df020000044f020 -669553eb47432dd6465379000049c0ea -669c5b7608ab35566cf09000e03c9539 -669c5b7608ab3556834f2100e038758a -669ce8000438b876d36be00000438b71 -66b0d20003e18fc65f3c0d00003e18fc -66ba763c615b4796746624001048dffd -66c05c0004b024960bce1d00004affda -66c1750003d2fe86b41b6b00003d2fe2 -66c40f158013b5e64f0b3300804980d7 -66cd3153dbc7b9367e023900404b2107 -66d80b0003e1f8363ff80b00003e1f79 -66de6770da5af4a6a9233700b048b428 -66dece0004ac9a36c0e9c700004ac9a0 -66dfe4d14d72ec4665fd240020495db4 -66e28000040028d65c497a000040028d -66e6c20003fea5f65f799000003fea5f -66e874ef25a353f6d7cdd000404aaba5 -66e9a404a1c885b67ce5d2006047d58b -66ec690004b62f067c3ba400004b6245 -66ed8b7c8ee305169a9333001047e575 -66f3590003da0066656aaa00003da006 -66f57e0004423a6665b79200004423a6 -66f8b225acc75e36740dd70080499a10 -66f9db0004243ab65b49d60000424377 -66fb3300047f2c462699d2000047f1e7 -670578000446cf4665b4000000446cf4 -6709590003cb99c69d08b300003cb987 -67171900048c7e365aee240000487259 -671858709a7f8ef6f8008500b047fd33 -67193b000448afa65ea4000000448afa -671ce4c1219aa0c6ec402800504ac8bf -67219000040992f665cf12000040992f -6723ac000481b8d67b42450000481b65 -672990000401dd961082ac0000401dd0 -672eef195bcbea26ae2ea500e04c1600 -67361a73c08f121677e40f0080496421 -6737931054dcc41625360d00d0492704 -6739d800042aa0d6599da5000042aa0d -67442000046343066610760000463430 -674616d634d47af6aa761900c04987ac -67476b00048692b662036b00004868c9 -674f6b0003d40fe65b065600003d40fe -6755940003e222f691a35100003e2221 -675aa50004c129663b6ca100004c1255 -675f870003cad94659dfbc00003cad94 -676121c1c78bcd16eaba240070498dcc -6762740004b77a867aaca800004b777d -6765dd00046e7fc66611af000046e7fc -676cf500044a85a65f4f6c000044a85a -676da500042c01c665fc9a000042c01c -676f390003cb99c69d08b300003cb987 -6772892b98b84a664e0f1700904a9224 -67746b00040f0916601aac000040f091 -6777776a8f58a12688c374008047f20e -67789500044958e6d9052300004494db -677aa300042437765ca5db0000424377 -677abc0004c129663b6ca100004c1255 -677c980003cfbb6632ba9e00003cfb91 -6780790003d222f65a8c0b00003d222f -67865a961fdad396a3f5d900d0475cf3 -678ab3822e8e5e7638086f00a048f539 -678fe47327b1f32641cf5e0080472f16 -6792480003d1b9d6603eaa00003d1b9d -67927ebcb6b64166080a7e00104b7198 -679f0900046d5b765ee42f000046d5b7 -679f370004872856a2a76b0000487282 -67a48f1bc3459cb62202ce00b04abdf9 -67a4ea00044ffdd61725d60000422eaa -67a54a0004a9e3b649b8ae00004a9de3 -67a6200003cf84c66607a600003cf84c -67a656e1fcfc5336057b3300e04cc4f7 -67b103c3097d378640b96b00d04bf52b -67b61300040933b65a7eac000040933b -67b7a10003e278f65e38a400003e278f -67b89500045fe5e605ebe50000399de3 -67ba7e0004b797b67cea7e00004b7758 -67bc09b72b0a1116ebc1e200b04b6099 -67bc4d8dafb52e86b45e0f00104be5ea -67be17728a728fa68727220010497a0a -67c120e66343d78680680500e049de71 -67c2a80004110fd65d7c6b00004110fd -67c35e0003cab116b2e17900003cab10 -67c8b5000475e2963c069d00003a6629 -67ce5081f27eb2069ca1de000047c35b -67cf5d00047183765f2f200000471837 -67d2b100043823465e75d10000438234 -67d70800044515e652516800004450b0 -67d8280004afbe764f5a7b00004afbe1 -67dc1a727ce162c6f38a450060476dc0 -67dd720003e1f8363ff80b00003e1f79 -67e27e00042c3d066681d6000042c3d0 -67e49e132bcbb156dc53740040480442 -67e9940003ddcb7666280b00003ddcb7 -67ec0200039b558605baa3000039ae0d -67ec890003d0e6e62f72e200003c7943 -67ec890003d0e6e6834f21000038758a -67f83300049aa56644c3a8000049a983 -67f8b70003d32fc65b235900003d32fc -6805940003dc7ac6bfc51700003dc7a9 -68081f000437f5a65b6ce80000437f5a -6814a80004afbe764f5a7b00004afbe1 -681523000462e5466698ea0000462e54 -68159000041b3a56b5f17a000041b35f -68164feae213dd1613164e0040487b01 -681787354dba505668e78600b048b41e -6818a1ddeb2f4f06c779c900404a847c -6819330004c209e6ecc93300004c1ea1 -681ecb0004ce6146262b7500004ce5f1 -6822ec1e92ba0dc605b77600c04c81a1 -6823010003fc3b2666b20c00003fc3b2 -6823a4000472a2864b1c7300004480fb -6823e90003f80ae6611c9100003f80ae -6824820004cc370605baa3000039ae0d -6825c500043502a666b7f6000043502a -6828790004cc370605baa3000039ae0d -682d350003d01bc64b0f6600003d01b7 -6831b900047cb936dcac85000047cb8a -68368b0004773716360429000039346c -68376ad3212a4736526a7400404bcac5 -6837f22eec2b73f6ff85f100d047963e -683bc995610f2d46faa2ce00804b8ad0 -683d0485f90c4d462979c000404a5d06 -683d09511a880716e03c2f007047a9c4 -6842420003bf2906834f21000038758a -6842420003bf2906ea76c300003921cb -68477900049aa56644c3a8000049a983 -685228b70dcb2126f0dadb000049431a -68576d0004a5fc06e719d700004a5fab -6857ac0004773716360429000039346c -685bbe00049f7bc6a3cee8000049f7b5 -68655b00047276b62b94dc00004726f1 -68679c00045d4596d570a7000045d456 -6871e94035c9d1563b567400c04b2dc4 -6871f80003e8cb265ddbfa00003e8cb2 -68819000043692c65af0e8000043692c -6881de00047cb936dcac85000047cb8a -6886a1c78b780636d9fca100904c703c -688ca40003eccf966d959400003eccdb -688e7e00044ed56649ea8e0000447267 -688f190004861656b1068700003b603a -689481ebc4cc7a56c0c46900904bb187 -6896560004bbbf4618bc1300004b8919 -6896e800049fa586b58360000044fd24 -689ff5000486bce6b1068700003b603a -68a1627734dbf27622295100004a4236 -68a195f42b41234602814a00a04ab3dd -68a5a30004be09e6af0a45000047d45c -68aca40003f7b166235bbb00003f79ae -68ad431d666e7a16b9366600804b9528 -68af920004460a767f765f00004460a0 -68b4a30004395c66198f5b00004365bf -68b4c10003de9d465ccd3800003de9d4 -68b5a80004872856a2a76b0000487282 -68b75100046f7cb6676c2f000046f7cb -68b85c0004b797b67cea7e00004b7758 -68bf110003cd115630df1100003cd111 -68c01a0003f80be6611c9100003f80ae -68c483a134e2db46931fbe005049c96d -68c61d0004b18cd6864ea100004b16a0 -68c67c00046d34068beb20000046d318 -68c80f00048ede66b58360000044fd24 -68d7f1c2c460357660087600a04be3b6 -68de920003d5e4b662575900003d5e4b -68e4342fe6522b26cc755d00f0472ec5 -68e8838fd668b046bf3e7400b04b10dc -68e8a300043be6d6834f21000038758a -68e8a300043be6d6ea76c300003921cb -68e8b50004816916dd42cd00003cfb55 -68eb620003e39146c87cdb00003e19bc -68ebcf00041b9ea6678c47000041b9ea -68f2ae0003cab116b2e17900003cab10 -68f30e4da60dafd64d0e2c00e048f4fc -6900790003d26a165f1a4800003d26a1 -6900e800043eaf06679eb1000043eaf0 -69029f3c8b506d86c422e8007049e9cb -69047c0004a5fc06e719d700004a5fab -690a880003d0f5f6c79e1b00003d0f5b -690b6b00048692b662036b00004868c9 -690f0a0004098ba65e9bcf00004098ba -691dab0004c54326e801a300004c2934 -692333000483a406cb5e9e00003cbe13 -692a8c000432b11683e9dd0000432560 -693a4e00048c3766fba82f000047f7da -6942a800040125d65eb86b000040125d -6943ea000429119667c89a0000429119 -6948a700044a59664e3476000044a58c -694986000413916667dbcf0000413916 -694c9500044afad6b22c95000044aca4 -69509500045faa766278ea00004582c9 -6951d64cdfb7d906f0faa100904b94f9 -6953a0f1cb940106bb3322007048f88f -6955f70003acd336ea76c300003921cb -695aa200044df89692999f00003cb97e -695b170003d61f0662a2aa00003d61f0 -69622f492dde45f6a1037600c04cb593 -6965f100048251a6b6542f00004824db -6966ba0004018656614ec20000401865 -69689300044adb1660f420000044adb1 -6969350003d0f86667ec0f00003d0f86 -696f5e0004729936315b97000047297e -6970408575123fa6501ece00f04c6c32 -697fae000419e0365e99860000419e03 -6984c290cbce5b56cdd2e800904c546f -6986b60e2103466683d3dc00c049a10d -698d9cafa7358826ed97be003049b16e -69906b000419e076619ac20000419e07 -69919b215e763976ca156f00404a8ece -699d2b00049d1b766079ae000049d1b4 -69a0f500045050a6ee535b000043f8c9 -69a1a500042306b6e849a50000423041 -69a1e20004bc1b366c91de0000483353 -69a2a10004b8de36654a1d00004b8d76 -69a2ce0004bc1b366c91de0000483353 -69a2db000499421640cb22000049016a -69a412000484db56e148470000420966 -69a61300040028d65c497a000040028d -69a6fe00039b558605baa3000039ae0d -69a6fe00039b5586834f21000038758a -69a780000472d5666e0f5e0000472c3d -69ac2b00043714b65f2e63000043714b -69afc20004a6bc565317be00004a6b8d -69b45c0004b078c6f3763900004b0783 -69b4d40763c36a26d42e4500a04751c8 -69c8c4172bcfdd2691c3d100204be9cd -69c9d70004a14c56e1f45600004a1415 -69d2530004c2405669c9a300004c2401 -69d3590003d18366e434fc00003d1832 -69d3590003dd2ce6629ffa00003dd2ce -69d3df52ba386866739dae002049fda8 -69d5dd000411c3766867010000411c37 -69d8b30003cbd206e872cd00003cb9f0 -69db9e5f22bf24a69109cf005048f640 -69e2ce0004b8de36654a1d00004b8d76 -69e60200046eadf660e680000046eadf -69ebd0000476fd364c20850000476f21 -69f4dc000472d5666e0f5e0000472c3d -69fe8a0004000786f855520000400071 -6a03750004cc23f6513d3300004c571a -6a06530003d0be76601ff000003d0be7 -6a0ec2000400026668ce8f0000400026 -6a0fbb0003dc472695197500003dc470 -6a18d700042437765ca5db0000424377 -6a19ecee40bf8336ed97be002049b16e -6a1d380003edb7c61d33fa00003edb6d -6a26c3c95b2d07e64c472200e048f49d -6a2ee80004c2405669c9a300004c2401 -6a3093000461f08661fb600000461f08 -6a314dd7b7c5294694cd2b008049ed01 -6a33cbae0c69bfb6e399b900704834ea -6a347c000436628611aab100004364a1 -6a34fe0003e22ab6633eec00003e22ab -6a397000046a21164614ea000046a20e -6a3df8c83f5382c69f38510000485f95 -6a4457450104e9a6b63a190050490b79 -6a46e90003e278f65e38a400003e278f -6a4b600004638b166904f500004638b1 -6a66045e8c9f4026727a1d00b04b1b89 -6a6619000496ad26edc3250000496ac9 -6a75dd00041859b6818c6b00004178dd -6a75ef000445cce6199def0000445c38 -6a79e2000457b6c65e9e7e0000457b6c -6a7a460004110fd65d7c6b00004110fd -6a90900003c2bce66925fe00003c2bce -6a90900003c2bce6834f21000038758a -6a942500048f577628cd900000412dd2 -6a96520003d0d1f660cfe900003d0d1f -6a99dd000420c686c10c6b0000420c52 -6a9c4d993c0bb0d696052b00d049a6a1 -6aa0890003d0c9e669380c00003d0c9e -6aa18600041e352662b301000041e352 -6aa4ca00048931b62e11d20000484175 -6aad9cdda6b2bcd6024b70004049a7ff -6aae560003dc0f6669118f00003dc0f6 -6ab2c30004775f5669c24500004775f2 -6ab86f00048e5396c44e50000048e514 -6abaac0003fa1df674246b00003fa1de -6abdc90004582c966278ea00004582c9 -6ac8c192c0071706ec41b90030475cca -6ad4064f214c52265f742f0090478862 -6ad8d20003ee2d963b6cdb00003ecfdb -6ada9600048931b62e11d20000484175 -6add720003f77fe66edbfa00003f77e8 -6adf8fd2fdd043e63f410400d04ab193 -6ae15500048e5f866699f1000047c503 -6ae3590003d31346630e4800003d3134 -6ae5b3869c3baf66814ba800d049acb0 -6ae740f94d1eb7c6dae77500104cd64f -6ae80c0003e18fc660a4a400003e18fc -6aeacd0003cbb04669324e00003cbb04 -6af22c00048e5c768b10ac000048e072 -6af7750004cd1816d16acb00004cc043 -6af9c90004619cf66278ea00004619cf -6afb510003edb7c61d33fa00003edb6d -6afe97421fc18406cad19400d04aaa26 -6afebc0004bf2f6656a9e200004aceb3 -6b05c90004611c2661f4ea00004611c2 -6b0c7c0004a3ac16cdd5f40000491b41 -6b10d20003ebeef633ca9200003d86d9 -6b12211021de0cf6bd0ef8005048f120 -6b14c2000490183640cb22000049016a -6b15d20004748be6966893000046c653 -6b1ffa0003dcaeb6a3825600003dcae7 -6b21a1000473e4f69a5c850000473e42 -6b22430003e8cb265ddbfa00003e8cb2 -6b29d70004a14c56e1f45600004a1415 -6b31dd00046f2e7661aa80000046f2e7 -6b407ad5e9005b36df274200104ce6f0 -6b4773fac3c189f6ec2ba8009049aae9 -6b485c0004b7714636adc700004b770e -6b4a66000472d8a6b1d6630000472d89 -6b5082722a03c986546dd9007047d5d7 -6b540b0003f76d3669f59400003f76d3 -6b61170003dcaeb6a3825600003dcae7 -6b61940003ec9f366a0cdb00003ec9f3 -6b653648e4262726a466c3005047d233 -6b714300040c4f16612590000040c4f1 -6b735100046fb6f66261af000046fb6f -6b77c5559800b34656690a00f04c22f2 -6b791e5f74b8a7d6ac1b37007048c370 -6b793c0004cafa06b81a4500004caf85 -6b79776e5206fb76fb246f00b048eedc -6b79f40004423b1665b79200004423a6 -6b7e460004098ba65e9bcf00004098ba -6b81e20004bc4056f53aa100004bc3fc -6b84aa00040f0916614c47000040f091 -6b851e0b931c01b6cf523900004b134b -6b897a0004154ce66a59dd00004154ce -6b8bc20004a118c6f65fed00004a1143 -6b8bd59065dcca360dcbca0060472aa8 -6b94d70003fea5f660c1dd00003fea5f -6b9e450004773716360429000039346c -6b9e450004773716834f21000038758a -6b9ece0004bbe5760b719800004bbda0 -6b9f948be95449c651b65600804bde87 -6ba2530004c3e6f62fe5f1000047fa3f -6ba7bb0003ed7f5606576200003ed7d5 -6ba88b3613125476578cb500c047d197 -6bab600003d26a165f1a4800003d26a1 -6bb75e000472d8a6b1d6630000472d89 -6bb885000481c5b63b51de0000481c3a -6bb8e883fc00b6f6b207d000a047b93e -6bbb79d646215fe69af3a800a049232b -6bc40d0003f527e6f79f6200003f3add -6bccc400048541868bdcb50000477127 -6bcde200044a04366a536c000044a043 -6bcf6b0003d1b9d661af6b00003d1b9d -6bd561df42e45f667429ae00504a32c5 -6bdbbb62332e7b86ae9dd800504cb13a -6bdf010003fca5269288a400003e19e6 -6be4760004500c6662cd2300004500c6 -6be9720003f62cb61963bb00003f506a -6beba467e2dacfa60c0c850090478093 -6beec400043714b65f2e63000043714b -6befb600040125d65eb86b000040125d -6bf27e0004247f0651297a000040f937 -6bf552000404d7466a90470000404d74 -6c01940003f4b046e36b6200003f4ac8 -6c0496dc0babc6e6a6c41300104b89e3 -6c05730003f80ae662550e00003f80ae -6c0c980003cb81966a7b3900003cb819 -6c0f510003eafb46da5d9400003eafaf -6c13658cad90d0a669b30a00504834eb -6c162f0003f80be6611c9100003f80ae -6c180f000419e0365e99860000419e03 -6c192300045d4a96982b9c000045d4a7 -6c1bdc00049aa8866c187c000049aa7e -6c1e1d0004bd21c648486900004bd20b -6c1f9135f1c8241669c24500504775f2 -6c25380003eafb46da5d9400003eafaf -6c2dc900045cc1c6f0f67e000045cbab -6c2ec8603dd1b94638720f00d04bf280 -6c30130004b2ef562da81300004b2eef -6c31de000476901636f8b500004768f6 -6c38130004b93806649b5500004b86ae -6c385600049c5fa65c25e2000048b784 -6c3c4ddb29fc86a6b322c3004048e298 -6c42edcedad7c956ec5f7600004c313f -6c4a760003c2a656a8f51400003c29ff -6c4a920003da0e466b0c0b00003da0e4 -6c5068000437f5a65b6ce80000437f5a -6c50c00003d0f5f6c79e1b00003d0f5b -6c546b000408f12686cd1700003dc131 -6c59900004382346602f5b0000438234 -6c5a3f5be8b5d7a6256eab00d04b9a08 -6c5d750003d1938669ff3d00003d1938 -6c63c60004329e56636ee200004329e5 -6c69e20004af7f663aac1300004af7f1 -6c6a390004bd21c648486900004bd20b -6c7cc1108f31eea68fc37400c047502b -6c83a40004b13516cf523900004b134b -6c85750003da0066656aaa00003da006 -6c88280004b01a96ae29c700004b0094 -6c89d20004816916dd42cd00003cfb55 -6c8cf80003d50916d9b69200003d508f -6c917ed33a6f4d46e2b5ae00e04a51ea -6c92240004857656bcbf460000485753 -6c947600045faa766278ea00004582c9 -6c98a10004c100764b1aa500004c0fc6 -6c9bcf000409a6866b21520000409a68 -6c9c3300049a84a629b5d7000049a828 -6ca4b70003d5e4b6638cf800003d5e4b -6ca7c7d66722b4f604cfa800d0498750 -6cad790004360bc6ddc2b1000043608c -6cb23e4bcb589d16a1037600404cb593 -6cb34c8fbd5574b61b7733009047ba43 -6cb77900049a84a629b5d7000049a828 -6cb89600047b4f5621aa7e0000459dc1 -6cbf0c1055e0b0d608870a004047fb7b -6cca520003d0be76601ff000003d0be7 -6ccde2000469b5a66b6fbf0000469b5a -6cd1a300048c2f360126c7000048c2ce -6cdcb5000481c5563b51de0000481c3a -6cec2f0003f80be6611c9100003f80ae -6cf20200046efab66ad0d7000046efab -6cf3544dae32f19613010a00904c1270 -6cf5f100046151a625db080000445f86 -6cf62d0d27c1b536c4bec3004047d08c -6cf73d0003d18366e434fc00003d1832 -6cfb8ffff49d09965079c100504bfbad -6cfde20004b797b67cea7e00004b7758 -6cfec7826bc70776e2da8b00f047f14e -6d09380003f2f8363ea2ec00003f2f7d -6d11230004613436cd98f50000458a03 -6d15af0004719c16328daf000047196d -6d1c710003d61f0663ea4800003d61f0 -6d22890003f80ca6611c9100003f80ae -6d24db0003d0d1f660cfe900003d0d1f -6d279c0004614fd625db080000445f86 -6d2cd4000481c5563b51de0000481c3a -6d31700004582f0632ba9e00003cfb91 -6d38655900fda2a62c068b00c0475cac -6d41cf000496ad26edc3250000496ac9 -6d4923000458f6e66441e20000458f6e -6d4b4a9ee92b9fc669c9a300b04c2401 -6d50fe0003e3cf96c87cdb00003e19bc -6d527e00044bc0d66bfe3b000044bc0d -6d56530004c3e6f62fe5f1000047fa3f -6d59d60004261ee66d7fea00004261e1 -6d62500003c2bce66925fe00003c2bce -6d62500003c2bce6834f21000038758a -6d63bf00044e9d564c2020000044e9d2 -6d664e0003cf84c66607a600003cf84c -6d6e760003c90d366be5f600003c90d3 -6d6e760003c90d36834f21000038758a -6d6faf000431b5166bfadf0000431b51 -6d7b8c00049c5fa65c25e2000048b784 -6d7c050004a22a96be085600004a0d80 -6d7c280004b13516cf523900004b134b -6d7d0ba955e90b3616177400f047bd69 -6d7de200048be45688ce24000048be41 -6d81d2000476901636f8b500004768f6 -6d840a03d45fb276be4c1300d04bbc47 -6d840eaf4169e6b681c97b00c047617c -6d8829dd1d18a5c62a842800a04af74c -6d886800042601066c18020000426010 -6d8a430003e18fc660a4a400003e18fc -6d8a7b0004af88662e7a7400004af822 -6d8b11cc1f87ea0677e40f0000496421 -6d8cab0003887e16834f21000038758a -6d902e44c61346665afdc700504b2e30 -6d9528a3f6111296ba27ac00b047715b -6d97170004aa12c6c2cda400004aa129 -6d980c0003d0f5f6c79e1b00003d0f5b -6d983300049efc66ea76c300003921cb -6da4ef0003dd2ce663e97200003dd2ce -6dac7200039348466c30820000393484 -6dac720003934846834f21000038758a -6db0a40003ddcb7666280b00003ddcb7 -6db5db00042c01c665fc9a000042c01c -6db8ea000465b5766c4ca70000465b57 -6db9680004423a6665b79200004423a6 -6dbc0b0003f4b046e36b6200003f4ac8 -6dc5b2148a323f161df26600804727d2 -6dcaa0b5d1f673f684c5c000804a3c7e -6dcf9ca7fa4b55263d0c2c00904be838 -6dd5a30004bf42a656a9e200004aceb3 -6ddddd000471a5d6646b5d0000471a5d -6ddf0a000419e07662e2ac0000419e07 -6de13c0004aabeb67f164e00004aabd1 -6de16b0004c6cd1613f1f10000476ace -6de266000472d5666e0f5e0000472c3d -6df0aa000401865662deac0000401865 -6df49500040f0916614c47000040f091 -6df6817588032de6e287a800a04a21a9 -6df97af5aae35bc6ea2e2400f0496b69 -6e03580003e22ab664980b00003e22ab -6e040b0003f4ff56cbb8fe00003f427c -6e08c400048c469695e0ac000048c415 -6e156f0703cb4e2603b164002049593e -6e199a0004247f0651297a000040f937 -6e1a8e000446cf4665b4000000446cf4 -6e1cca00048c469695e0ac000048c415 -6e1d111994f0df6677a4c400b0489a14 -6e2002000425bf86a19bcf0000406c88 -6e23f9bf5466ec268a937600a04c4d0f -6e264500047675b60798b50000476752 -6e297a0003fc3b2666b20c00003fc3b2 -6e2bbea5c866db96d5a1a300e04beb72 -6e2d1400040c4f16612590000040c4f1 -6e32b43f712641164a690a00d04c7782 -6e3de32f1406c36678b83d00504c87b8 -6e43790004a16c86ed97be000049b16e -6e49620003b34ec67288fe00003b0b6d -6e49dd000418e9e632b7120000418e3e -6e4b56dc0ae2ff76454b7600204be7a1 -6e4e800003fea5f660c1dd00003fea5f -6e4ff4abaa18f666439d7b007047bf65 -6e50980003d9ed663ea7e50000395bd7 -6e51a100047cc3f66b86c3000047cc38 -6e560c00040992f665cf12000040992f -6e5b3700042c3d066681d6000042c3d0 -6e5bd000047cc3f66b86c3000047cc38 -6e616400049904066405d7000049516e -6e6478dd028456265fc41300104bc688 -6e655a0003f80ae662550e00003f80ae -6e68fe0003eeb0f60e317200003ebe6a -6e6c900003c2aca6acb29900003c2ac5 -6e72990003c953966cf09000003c9539 -6e72990003c95396834f21000038758a -6e731100043273066cb6b70000432730 -6e735100046e7fc66611af000046e7fc -6e7735ec948f08a663f1550010494f6d -6e7e7b0004bbb8363cbe6600004ba29a -6e80f500046343066610760000463430 -6e83ca000472d8a6b1d6630000472d89 -6e8b51000470cba66d102f0000470cba -6e8ea5a4b4b1c0164f06ce00004ac185 -6e904fb8d3afc43676235c00e04abc0d -6e954b8476f3afd65fd25300104bf377 -6e9c5000043bb0c66d3a5e000043bb0c -6e9dc70004b1dd3641f4b700004aba35 -6ea837991bfd90563027a400f04b24c5 -6ea8db4e4d12b6862979c000504a5d06 -6eaa8bba30274e26094e5300304c470e -6eaaca0004247de651297a000040f937 -6eb0690004b1dd3641f4b700004aba35 -6eb2560003d763366d3a7200003d7633 -6eb4ae6b1c1dd7e66868a100304c00fb -6ec099be783532c6fa50d400d048300a -6ec36b0003d188c66cbb3f00003d188c -6ec4470004148816dcb2ba000041484d -6ec4b70003d1b9d661af6b00003d1b9d -6eca1d0004b1dd3641f4b700004aba35 -6eca8a0004081dd66d755200004081dd -6ecacccb28393046e2c10a00004c30bd -6ecd24000494df368615240000494dd8 -6ed5230004583bf64119e30000448a71 -6ed8d20003eccf966d959400003eccdb -6edc770003d3134664f57500003d3134 -6edca80004b9c7d60f567400004b9bfc -6ee4690004b8de36654a1d00004b8d76 -6ee57a0004050bd684a30100004050b9 -6ee8c400048956d6ba876b000048956b -6eee7e00044ffdd61725d60000422eaa -6ef06b0003fa1df674246b00003fa1de -6ef10a00043502a666b7f6000043502a -6ef1be0004cde6664f6c8200004cc801 -6ef6280004261e166d7fea00004261e1 -6ef8db0003eccdb66d959400003eccdb -6ef9f100047d7df612302f000047d7dd -6efb2200048e727658c18f00003d19b1 -6f01e200046c6256366420000046c538 -6f05b900047eb21663dcd4000047e24e -6f0b0a00047eb21663dcd4000047e24e -6f0b806b39d8f926c326d800f04cbcc6 -6f0d1400041e3526642ec2000041e352 -6f0dde00047bfcc6161774000047bd69 -6f11d00004cc370605baa3000039ae0d -6f11d00004cc3706834f21000038758a -6f12451a3be14746f6d2ce00d04c2f6d -6f136d0004a00296120b3700004864d5 -6f17630003d0e7361145fb00003d0df4 -6f18b60004aa0ab6231abb00004a949d -6f19860004050bd684a30100004050b9 -6f1c770003d5e4b6638cf800003d5e4b -6f2ee80004a18236382fbe00004a1804 -6f2fb20003d0f86667ec0f00003d0f86 -6f32560003d992a6815b3d00003d9915 -6f46eae80cbf93d60427b00080444d2b -6f489600047675b60798b50000476752 -6f4dc9000462e5466698ea0000462e54 -6f5747162310c586de5dd700e04a0af5 -6f5806b4c4ef0fd6f4638c00c049a4ac -6f5b0a0004839db632ed7b0000482c4a -6f5d0b0003ebb9066dd35100003ebb90 -6f5fa80004a166a63c069d00003a6629 -6f5fa80004a166a6834f21000038758a -6f64960004738a864d65de00004738a4 -6f658600041b9ea6678c47000041b9ea -6f6f390003b48d06834f21000038758a -6f6f390003b48d06cf8e460000391feb -6f7885000483a406cb5e9e00003cbe13 -6f7c90000471be466a536c000044a043 -6f7ddb00042aa1e658109a000042aa0d -6f807c0004a00296120b3700004864d5 -6f8330f8d0c444066a89d700b0498909 -6f89e200048d28c6cca5a8000048d276 -6f8d34ae7d271f86001df1005047e861 -6f90a10004bf46d679507600004bf3c5 -6f933f0003d50916d9b69200003d508f -6f99cdd29c8ce5d687e7d100f04c08cb -6f99d200047bfcc6161774000047bd69 -6fab3300047cc166b1068700003b603a -6fb00654b2b86eb66a0ad800804ca657 -6fb1a300048d2686a559a1000047663a -6fb27e0004afbfb64f5a7b00004afbe1 -6fb36b0003d61f0663ea4800003d61f0 -6fbd4600043eaf06679eb1000043eaf0 -6fc089000490b856b63a190000490b79 -6fc1b900047f73f63a5ac3000047f70c -6fc30900046f7cb6676c2f000046f7cb -6fc4e800043d21566e67f6000043d215 -6fcb0a0004837ed6e148470000420966 -6fcccd49308ee4a697abec00e048af81 -6fcecc00039e81168bd802000039e80d -6fcf090790749fc687cba400704ac33e -6fd152000413916667dbcf0000413916 -6fd1900003fa1df674246b00003fa1de -6fd1de0004738a864d65de00004738a4 -6fd2dcfcf2051eb674327e00e04b1f66 -6fd3480004407836a30748000044077b -6fd3e90003d0b6966e6ff000003d0b69 -6fd68200048576d6bcbf460000485753 -6fdb53e5d2167d0679507600804bf3c5 -6fe0d40004838426a559a1000047663a -6fe6d300042e8a666e894e000042e8a6 -6fe7a40004b4d0d6f24a7e00004b4d09 -6fe9a12bf925f876b322c3001048e298 -6feb6b00048d2686a559a1000047663a -6fee6b0003c90d366be5f600003c90d3 -6fee6b0003c90d36834f21000038758a -6fef620003e807f66dd80d00003e807f -6ff0a80004af8fc62e7a7400004af822 -6ff2c300049211066d99d2000047e172 -6ff5db000429119667c89a0000429119 -6ff95ced22df09f61b2451008048d4e6 -6ffb0900046d3ee66eb280000046d3ee -6ffc1eaee9a0c0962849e300004cc1be -7000e2342b40f3571005640090496e15 -700cfe0003f62cb71963bb00003f506a -701186000411c3776867010000411c37 -70132200048e727758c18f00003d19b1 -7021de00047f73f73a5ac3000047f70c -70268a000400026768ce8f0000400026 -7028fe0003f77e876edbfa00003f77e8 -702f3dcb3d5fed17adedd7008049fbbd -7033550004af8fc72e7a7400004af822 -7038e7d8edb250174a4bec00d0487eba -7039644404c7cf1712302f006047d7dd -70401f0004382347602f5b0000438234 -704836cd72747bc783879100a04ce7c5 -704a5e00043d85576effe0000043d855 -7052d90003c2aca7834f21000038758a -7052d90003c2aca7acb29900003c2ac5 -7054900003c2ad37acb29900003c2ac5 -705aeea9d1d407b7c032c30070473db5 -70620c0004044bb795d84700004044b5 -7065dc956e5d77577495d000504ccaf0 -7066390003da006766f35900003da006 -706a0f0004bf83677aee0f00004bf7e1 -706d2400048e7377ea5da3000048dd18 -706e430003dd2ce763e97200003dd2ce -706e580003d0c9e769380c00003d0c9e -70725dd285f1a88729464e0020486645 -70728200048b4837a92337000048b428 -70749600047455175ed30a00004743ed -708216000419e07762e2ac0000419e07 -708926b6f98b6e67bd6f5e00b04729c4 -7089be770e6f14f78b1c10005043299a -708b4600048e18173c069d00003a6629 -70918e7e53903ff7be913c00404a807d -7092720003cb9ae7a2f55900003cb9a1 -70936d00049aa56744c3a8000049a983 -709523000457b6f75e9e7e0000457b6c -709a16000401865762deac0000401865 -709a802917ccca971cc9e600604ce364 -70a6ac000421aaf76f52c20000421aaf -70aba10003e22ab764980b00003e22ab -70b1cbc771fe40574a690a00304c7782 -70b3d000047a5d87bd2b33000047a404 -70b7fc20dac75597beedd700904a274d -70ba28000425f8776f4da50000425f87 -70ba500003c2aca7acb29900003c2ac5 -70ba730004c874d72522d800004c8746 -70c4760004be9cf791c3d100004be9cd -70c7a80004a166a73c069d00003a6629 -70ca200003cbc037e9f27200003cbbf8 -70ceec0003eccf976d959400003eccdb -70cf2a00039348476c30820000393484 -70cf2a0003934847834f21000038758a -70de970003d02e47cb84c000003d02d0 -70e21d0004329e57656eb700004329e5 -70e3010004000787f855520000400071 -70e40f0004971817b31af80000497148 -70eda100047a5d87bd2b33000047a404 -70ee390004b62f077c3ba400004b6245 -70f1380003eeb0f70e317200003ebe6a -70f241a2d5052747cd394c00e0472f2b -70f5750003dc0f6769118f00003dc0f6 -70f9e60003cbb04769324e00003cbb04 -70fad7323304c667dcac85001047cb8a -70fcd20003ee8bf7a176ec00003eda30 -70ff550004af31870cc1e200004af298 -7107170003d350e76f9a4800003d350e -7108c20004971817b31af80000497148 -710b92000445193752516800004450b0 -7115350004cd15f7c326d800004cbcc6 -7116560003d60d872fcf5900003d6099 -711747b39afbc52713d07600c04c7f66 -711cd20003ec9f376a0cdb00003ec9f3 -711cf9558662bd77145f3300b0490bbc -711fa8000499d78786776d0000499d49 -71267e0004af31870cc1e200004af298 -71379c504b7b2f17a8aceb0000488f29 -713b3d0003d854c770058f00003d854c -714682000485d307f73570000046229c -7147bf00046494076fdd700000464940 -7149aad20ff9c1e77f6c510010489be6 -714d71109fb35a67574ea100304af874 -71540100042911b767c89a0000429119 -7156740004b9d827e26e1d00004b9d06 -7156b100043cd2c76fd663000043cd2c -7157510003f76d3769f59400003f76d3 -71580f00048fb317fb246f000048eedc -715b110003cbafc7b1068700003b603a -7165e200044ed56749ea8e0000447267 -717125a4e3556ef728a27b00e04bd681 -7179058f213254d7931fbe006049c96d -717bf500048d8c57de3051000048c7d4 -717f6c00044e34f7702420000044e34f -71856400048e5c77d63bac00004737d8 -7186450003cf84c767a62000003cf84c -7189230004638b176904f500004638b1 -7190d010a5c52d176181ab00504c61bc -7191980004b9d827e26e1d00004b9d06 -71a1520004081e176d755200004081dd -71a33300047bb2973793d0000047b889 -71a5ab0004c129673b6ca100004c1255 -71a65000048e5c77d63bac00004737d8 -71aa3a55f29ce137be4c1300f04bbc47 -71aaa23b9fea6997f1bb2200e0498eba -71adf80003ddcb7767e99400003ddcb7 -71b1ab0004c49b4703413300004c49ad -71b2e80004c49b4703413300004c49ad -71b6ac0004154ce76a59dd00004154ce -71b9e60004cd7747bb3322000048f88f -71bb210003d3134764f57500003d3134 -71bbb600041e3527642ec2000041e352 -71bd2400048e7377ea5da3000048dd18 -71c47600046a2027703dc9000046a202 -71c64e000488f7477b51e20000488f64 -71c6f800042c01c7676da5000042c01c -71cfed000499d78786776d0000499d49 -71d64e00048d8c57de3051000048c7d4 -71d73421be4109578a43970040472a10 -71dc5a0315d60e57cdd5f40010491b41 -71de7e00043313c7f8389a000043311e -71e1dd000432b11783e9dd0000432560 -71e3170003da0e476b0c0b00003da0e4 -71e72ce8e6c6f3279f5bd000e047be5d -71ee197583da192780f14a00a04a893a -71ee740004bca7c7e8145c00004bb578 -71f5f94ae380de678ca7ac00804771c1 -71f793fed96316b75c25e2006048b784 -71f8ea0004646bc71f29c900004635a8 -71fcba64626fc307f9cbdc006049fe99 -71ffb30004ce8857a2bc7900004ce878 -721011a9f8415097dfdb7900a04a3b09 -721195add0d14bd7bebbd100c04c189c -7211ee781825b507411ee8007049d1e5 -7212f20003fc3b2768230100003fc3b2 -72151b0004ce8857a2bc7900004ce878 -72173d0003dc2247ac8b1700003dc21f -7218c7c07d8f9e477f09d000f04cc02e -7219c1f5f4a204b733f8a700704c31fa -722ec2000420c687c10c6b0000420c52 -7235d7d7097c7037dab7d100904c6f57 -72381f623e4ec057fc353c00504cbb89 -723a63000472e32769d7c00000472e13 -723b1e07c1f15407d2d27200d04a7e0f -7247170003d1f577708a5600003d1f57 -724aba0004201ba7710fcf00004201ba -7251990003bf4947ea76c300003921cb -7253190004878567a639e600003cf5b0 -7254f80003d1938769ff3d00003d1938 -725aac000407c7c7d8d2ec00003e2e68 -7260140003b48df7cf8e460000391feb -726c7c00049ef55796393100003cf5c7 -72750b0003eeb0f70e317200003ebe6a -72876b00048d0057c578ac000048cfda -728774000478543725db080000445f86 -728b4300042c3d0767e27e000042c3d0 -72917b449d337a670135e300404ccc98 -7298760004647107713dc90000464710 -729d86000404d7476a90470000404d74 -729eac0004014e77ea76c300003921cb -72a9d900047d4727af0a45000047d45c -72ab3d0003d7ae07e6218f00003d7ace -72b047000419dfa7cde3010000419df3 -72b5940003f77fe76edbfa00003f77e8 -72b5e20004b1dc7741f4b700004aba35 -72b76c00044ffdd71725d60000422eaa -72bf3d0003d60d872fcf5900003d6099 -72c2a50004bf3d4779507600004bf3c5 -72c5e200044df89792999f00003cb97e -72c5e60004cd7747bb3322000048f88f -72d05600046dd8b75e742f000046dd73 -72d313897e515897e2c10a00604c30bd -72d660c1251ebd475afdc700704b2e30 -72d6dc762e94cea77aee0f00e04bf7e1 -72d70653713ee35739893300804c47e8 -72dea10004b18c97864ea100004b16a0 -72e1900004000787f855520000400071 -72f4a40003f6338771c80d00003f6338 -72f4b70003da006766f35900003da006 -72f98f75e3d83f37f4c8eb002048df31 -72fe4b2c521d5fa7a84c2f00d047e727 -72feba0004148817dcb2ba000041484d -7303370004858c174066c3000047d486 -730377e0ae06cc37fbf6c7006048d8d9 -7304560004713c2771a9dd00004713c2 -7309e7449f17e1173b4a45004047ff5b -730b170004aa7a07fb747d00004aa799 -730da1a2e509f8d72f970a007047c6ed -730dd700048f49e7cca5a8000048d276 -730e2400048cd5d7834f21000038758a -730e2400048cd5d7ea76c300003921cb -731316e0671dc6f7546dd900e047d5d7 -7329a5000425bf87a19bcf0000406c88 -7331380003e973f771b0db00003e973f -733937848b1f1c172465b900e04809c8 -7349de00047e45072298b5000047e1e8 -735b190004878567a639e600003cf5b0 -736ca90003d0f86769693500003d0f86 -736e8a000409a6876b21520000409a68 -736fa06ee5c339575590a100304c77c3 -737016ac74a907f76951f40060495fba -737182e38a86c6a72699d200d047f1e7 -7371e200045bac777698f5000045ba8b -73742500049397d7d92425000049325b -7374dc4daf1af0074dcfac009047a914 -7375e20004b2e3975afdc700004b2e30 -737663000472e32769d7c00000472e13 -737b3f0003d85d579962aa00003d85b9 -7381980004b0fe7725afa400004b0fe6 -738e8a000420cf17c10c6b0000420c52 -738f19000487668775b5b9000047fc39 -738f5c0004a93b6775696f00004a93b4 -7391c900046ba8e771f67e000046ba8e -73921300041b9ea768ebcf000041b9ea -7395d365960c57d759593300b04c5b10 -73a73700048931b72e11d20000484175 -73aba40004b0fe7725afa400004b0fe6 -73b2d190bb903137b7d9cf001048efb1 -73baa50004c29417c6c9e2000044e600 -73c1ba8dfd7f10977bcb3300c0491651 -73c2560004bca7c7e8145c00004bb578 -73c297b54b099bf726b9b9003047b4bf -73c3590003d1d4e721835900003d1827 -73c4bdc9382332975da09600a047cf8b -73c7be34e427edf71236a500f04c1ab5 -73ca0c000421837772568a0000421837 -73cb920004444a577281bd00004444a5 -73d42b00043502a76825c5000043502a -73da0f0004c45e57e832190000496f76 -73db700004329e57656eb700004329e5 -73dbf500048d2687a559a1000047663a -73de7a109f0832077e2d6b00804c3523 -73e27c73b9246a47740dd70020499a10 -73e3a40004b5407726c4ca00004886ff -73e4a7000468f987728cf50000468f98 -73e987587860dd779d519800904bcdd3 -73e9d20004792367b17bd0000047919e -73eceb00048887f799ce4e0000488860 -73ee4cbf4a057c67d8ad4a00c04a85d4 -73f3fa0003ddba7772957200003ddba7 -73f42000044df89792999f00003cb97e -73f8956237a970c7d9937600804c3dad -73fca7000469b5a76b6fbf0000469b5a -73ff3d0003d763376d3a7200003d7633 -7401aedf34fb51978494c200d049488b -7403370004880fd74a4bec0000487eba -74053af6a72fe447400025008049726a -7405be0003b0b6d77288fe00003b0b6d -740a0f0004c51ea7481ae800004c51d7 -740d1400040002676a0ec20000400026 -740ebb0004a93b6775696f00004a93b4 -740fc20004a32347a639e600003cf5b0 -74112900042601076c18020000426010 -7415720003ee9c3729e59400003ee9c1 -7415d700049a8ce729b5d7000049a828 -7416720003cbe9b7b9b0b300003cbae5 -741bc9000471bf5771b75d0000471bf5 -741ca10004c51ea7481ae800004c51d7 -741d170003d60d872fcf5900003d6099 -741d4c000472c5976e0f5e0000472c3d -74236c00044bc0d76bfe3b000044bc0d -74280f00049402179b34c20000494013 -742dd700048f49e7cca5a8000048d276 -743158b3837cb737b80fed009049e5f1 -7433a2aef79384e7ba96cb00a04ccfd0 -7434c4000487668775b5b9000047fc39 -743cfe0003f77fe76edbfa00003f77e8 -743d31000411c37769d5dd0000411c37 -743ec3000497a43791ab220000497a29 -743ef800042911976943ea0000429119 -74448ac24d6b2f474a690a00704c7782 -7447c6000431b5176bfadf0000431b51 -744bdf0003cf84c767a62000003cf84c -744cd400047bfcc7161774000047bd69 -7451c01141f4a507c41346002048baf5 -7451dd00041112375bfeba00004110fd -7452a300042c01c7676da5000042c01c -7454aa00041391676949860000413916 -74562400048cd5d7ea76c300003921cb -745c200003cb81976a7b3900003cb819 -745eb9f1123d9057ae63c00000472eb4 -74605100048b4837a92337000048b428 -746c9500046a89479628a7000046a88a -746e7c00046d86472127f6000043de62 -7471e60004cd33d791947900004cd33a -7471ef000444b067b77df40000444acd -747a73ce7f3f0367152b33009047ebe0 -747ae71147c83b67788ca100804c7d6d -747b590003d188c76cbb3f00003d188c -74821b0003d0090735d53500003d0069 -7488dfe76194898745fca800204b9c79 -748a6d00043313c7f8389a000043311e -748aac000420c687c10c6b0000420c52 -748b28455f8a6a97ffc23900404b1a36 -74959422be4f684713010a00504c1270 -7495d115b38b4657dae77500504cd64f -7496c30004971817b31af80000497148 -7497e90003d07047c2700c00003d06de -7499990003bf28e7834f21000038758a -7499990003bf28e7ea76c300003921cb -7499c70004b9171739f67b00004b9158 -749bc5acd0401f4737caab00b04aff77 -74a1b50003d0c9e76aa08900003d0c9e -74af68ad39314ea79a5c850020473e42 -74b40b0003f3689773399400003f3689 -74b543873209a9a771d5e300204cdd95 -74bdd70004a265473d0579000043dd4a -74be430003ddcb7767e99400003ddcb7 -74bfbb0003eccdb76d959400003eccdb -74c3d400048ef2c7fb246f000048eedc -74c80f0003fc3b2768230100003fc3b2 -74c89a59c303bb37bf34c200b048fdd8 -74ca480003db744773673f00003db744 -74d5d00004cd33d791947900004cd33a -74d6ad166591502725d1e200004acd00 -74d86800043be8d7ebef9000003921cb -74d97b0004792367b17bd0000047919e -74d9d200047e45072298b5000047e1e8 -74df59de7ceab9772a842800804af74c -74e2d8b059f0edf7a67bac00a047af8d -74f4ef0003ec9f376b619400003ec9f3 -74f73ab2ad759177434b0a005047fd1c -74f9c0000499cc07d33d18000038bd74 -74fe990003c2bd376925fe00003c2bce -74ff6c00044d1c3773bde2000044d1c3 -7500540f7e853d87de5dd700c04a0af5 -75041976ac0d5e877745ae001049f017 -7504a80004b1d74750947c00004a1491 -750b180d7231b04731516400f049824e -750f046f351d9b979a55a300c048ac08 -750fac000473e4679a5c850000473e42 -7514d700042c3d0767e27e000042c3d0 -75190a0004c2294713f4a800004ba11d -75192a85024329a72c178c00404a6388 -751aaa122e28e29779a7ec002048db3f -751fccf4537480c7f5c996003047fe43 -7521860003fc7c7724655200003fc7c3 -7524c25e3a1d4f67e801a300804c2934 -752f33000481c5573b51de0000481c3a -7535ad7c3ea5f18797850a00304c64ba -7535ef2e706937f7a1035500f04b786c -75368236c0301ff7eb26bc00304bf005 -753b510003e7e5d774013800003e7e5d -753b798dfc2d7d37bcbf460060485753 -75419f0003cbc5474f415900003cbc4b -754472000499cc07d33d18000038bd74 -754611234337de57defeab00204b7855 -75467e000465b5776c4ca70000465b57 -7546c90003cbb0476aeacd00003cbb04 -75480f00048ef2c7fb246f000048eedc -754a28fb1621dfd7ee9ace00a04b15cb -754c20b2655036b7edc6a500404c5e45 -754da62a679641a7d17abc00204be28e -754fbb0003ebb9076dd35100003ebb90 -7551350003d0b6976e6ff000003d0b69 -755495000467069773f0930000467069 -75560c0004081dd76d755200004081dd -75573f0003d82727f8fa9200003d826e -7557afadf6319a877b4f7400d04743e0 -7558880003dc0f676aae5600003dc0f6 -7569ae0004a5ec278925ae00004a5ebb -7569c00004a16c87ed97be000049b16e -756ac20004069357c41e0c0000406931 -756ef800049879378b4df4000049876a -7571900003fa1de774246b00003fa1de -757336c73f1bc7478fe19800f04b9643 -75794c000472c5976e0f5e0000472c3d -757e430003f76d376b540b00003f76d3 -75801e0c0e7c5d7726707c007049e8be -75842f00047d2587c15333000047bd67 -7586720003caff87741acd00003caff8 -7588050004a265473d0579000043dd4a -75888ceea829d7972ab21d00b04bd5a3 -758c900004710367676c2f000046f7cb -758c9300045a5037a1af9c000045a4fa -758ec476ff2da63725db080010445f86 -75915b000472e3b769d7c00000472e13 -7597bb0003ef43f73b6cdb00003ecfdb -75a4db0003dd6b87741d8f00003dd6b8 -75a52300045d42b7c17893000045d3fe -75aa920003d85d579962aa00003d85b9 -75acf80003d854f770058f00003d854c -75aeac0004080d67221e8a00004080bc -75b498d5793c6997af60a700a04c2494 -75b9ab0004c58027060124000048ea11 -75bc5a9c13edb0d7f6367b00e04bd2d4 -75befa23e9b31577fc09c100504c80f7 -75c0680004261e176d7fea00004261e1 -75c360000461a7c77454f50000461a7c -75c6740004bd21c748486900004bd20b -75ca390003da0e476c4a9200003da0e4 -75ce800004154ce76b897a00004154ce -75cfc20004a32347a639e600003cf5b0 -75d52c0004705cb788c30900004705b0 -75d69800046d31b78beb20000046d318 -75da8b00047d4727af0a45000047d45c -75db4ec07a037a271cddde000047ab09 -75dfdf5d37a10527cd636d00604a038b -75e3620003dd9d47747f5100003dd9d4 -75e6114a48229fe7bafbac003047e383 -75e8e800043c96f774a7dc000043c96f -75ebca000472b407834f21000038758a -75f64e00048725d75aee240000487259 -75fa8a000411f417aaed7a0000411f3e -760808ff15da9e7791ab220050497a29 -76104a6b52ec5d87c0c096005047e18b -7611d900047cb8174c9dd2000047b61b -7611de00047e7a27413d96000047e779 -76170a00041b9ea768ebcf000041b9ea -7619f100047cb8174c9dd2000047b61b -76280280268247f77f63ac00b0476201 -7629de000483d7872963330000483ccc -762f09000470cba76d102f0000470cba -7631f400048e5c77d63bac00004737d8 -76349a00042911b767c89a0000429119 -7635848fc3ff70b72aa0eb00f048de23 -7636b100043d4177e9f27200003cbbf8 -7639421ea70b5617c2c13c00c04c858b -76397a0003d0f86769693500003d0f86 -763bea000428526774b4680000428526 -763c6b0004081e176d755200004081dd -763d24000491b887cdd5f40000491b41 -763dab0004c46c0710d1a300004c3f17 -7643af00043273076cb6b70000432730 -764402000399dff705ebe50000399de3 -76444b0ed7decf07fff93500e04cdbfa -7648200004500d8774dc7600004500d8 -764af802ccd67437fcee8b00f0483792 -764c83490db5d447b3236b00a04871c2 -764cb500047b506721aa7e0000459dc1 -764d170004c527b72ecab200004c3ba0 -764d380003f77e876edbfa00003f77e8 -7656d5000444c4f7b77df40000444acd -765932585f8a3c9754793300804beeb4 -765da4c2efbfd0d736042900f039346c -765da4c2efbfd0d7834f2100f038758a -76605600049b6f87874551000049b6ee -76636100042e8a676e894e000042e8a6 -766455767a72990729633300e0483ccc -766c583c8c0458477808a700e04c65d7 -766e2ff34c01c517ebda4500a0482f2d -7674d20003e807f76dd80d00003e807f -7677b600040002676a0ec20000400026 -7677c20004a222672fa3a800004a21ff -768743dc0352cef759593300f04c5b10 -7687abe5d139f7c7916c7600004bf4b9 -7691228e644f17a71993cc00304ca33d -7693d1393523426734fe190030495eed -769b37000423db272945d60000423c40 -769bdc00043d21576e67f6000043d215 -76a1c904814446f7c9fdb900e0473cc8 -76a41b00afab400730f2c3008047a88a -76ac1f00043502a76825c5000043502a -76b11625cf4e35875fcae800e04c35c8 -76b300be8339f667806aa500d04beefc -76b5de9c024fba078406240040486d8a -76b79c00046c6257366420000046c538 -76bbb500042b2577d94402000042b253 -76bd6800044350377541f40000443503 -76c5a30004880fd74a4bec0000487eba -76c770000499d78786776d0000499d49 -76cf790004a3b0c7dfdb7900004a3b09 -76d3b6000411c37769d5dd0000411c37 -76d4880003d193876c5d7500003d1938 -76d85c0004b17217a5481300004b170c -76d92b0004a3b0c7dfdb7900004a3b09 -76db620003f6bbf775740d00003f6bbf -76dd3100039e8117834f21000038758a -76dd3100039e81178bd802000039e80d -76de7b0004ac9a37c0e9c700004ac9a0 -76e1e9d8028cbc1710d1a300504c3f17 -76e28a000419dfa7cde3010000419df3 -76e72c59a263dc773b6ca100b04c1255 -76e9ba00042911976943ea0000429119 -76ebbb0003f76bf7759b6200003f76bf -76ef790004a263a73d0579000043dd4a -76f18b66168ce61737f63900b04af700 -76f1c500043d85576effe0000043d855 -76f33f0003d350e76f9a4800003d350e -76fd970004465667410f6300003d1033 -77000b0003d854c770058f00003d854c -77021600041391676949860000413916 -7705510004a24647a113dc00004a2457 -7705b25bb131bd7769c24500604775f2 -770a13000404d7476bf5520000404d74 -77126427d5a38617c0c46900404bb187 -7713ac000475e2573c069d00003a6629 -771be90003f88377a3a61f00003f8833 -771d5b000472e3b769d7c00000472e13 -771dae00049c5fa75c25e2000048b784 -77233300047cd6f76adbd0000047cd68 -77245600046efab76ad0d7000046efab -77267b0004b7e9b7a5481300004b170c -7730aee82d7f2847b8bebc00c04c44b0 -773a98000471550775d3510000471550 -773d62000399e07705ebe50000399de3 -773d62000399e077834f21000038758a -773f90047e9381e77181a300f04c4cf1 -77467c239601e617a2e55100c04a1120 -774bc20004a00297120b3700004864d5 -77545e0004cb4777d2afac00004cb455 -77563f2e7c0af3e716470e00c04ca50d -77573bd6e2655087e1f45600c04a1415 -7758020003b0b6d77288fe00003b0b6d -775db17b8258a37762e7d000004812ea -775df10004838427a559a1000047663a -77612b0004a263a73d0579000043dd4a -7762300003d0c9e76aa08900003d0c9e -7762aa0003d2fe87b41b6b00003d2fe2 -7763b5000425f8776f4da50000425f87 -77660200046d3ee76eb280000046d3ee -7766560004b90cf70c4e6600004b90bc -7770a700044102f703ee4800003d89d7 -7774c589567b8ba7b0bf8600c048533a -777d480004ce8857a2bc7900004ce878 -777ebb32a492f2671f3f7000904a0415 -7783800004725a674b0f6600003d01b7 -7786ba000421aaf76f52c20000421aaf -7789a800048942c78d5eba000040c009 -779f01000419e7677649900000419e76 -77aaed8d482d5b279bc5be00404ce49f -77ae7e0004b0fe7725afa400004b0fe6 -77b0eb00048c469795e0ac000048c415 -77b45600049a3ff7672b8c000049a3f4 -77b5dd00046edcc7fa282f000046ed5f -77b64f22b76f7de7199bd100604bf5ff -77b9060003ec9f376b619400003ec9f3 -77bb6b0004872857a2a76b0000487282 -77c80b0003eca0775670a400003ec9ed -77cb6b1b50e8f2874abc2c00e04c4aac -77cc2f0004713e8776a45600004713e8 -77cd353f41ddf057c4cac300d047e501 -77d4f500046cbdb7b1068700003b603a -77d7d7761bc6cb57cd342800c04b55ba -77e1d900047f2b0756cfac000047ce7f -77e32000046e6627766c56000046e662 -77f0790003d763376eb25600003d7633 -77f3fb7d14911c4746eab200a04c1f2f -77f8aa000409a6876c9bcf0000409a68 -77f8fe0003ef3b178d1bbb00003eec5e -77fdd00004aa5327e832190000496f76 -77feb100043cd2c76fd663000043cd2c -7801c35dc6b6d94768622c00c048ee52 -78077400047c051713f1f10000476ace -7809700004694f175bb1e20000468fd0 -7809d200047a9507790f0a000047a8e4 -780dedb836c114b705b77600e04c81a1 -780eeed0fad88497e9046900304aedc5 -78117b00047d1987578cb5000047d197 -7819040004aa0c07231abb00004a949d -7819110285620a8705c6c300d0498bb7 -7821f55c25983fa7b8bebc00e04c44b0 -7826ab0004af8fc72e7a7400004af822 -7828900003c2a51744567c00003c29c4 -782b6000045ba8b77698f5000045ba8b -782c82a7d5f4c6678615240090494dd8 -7831f80003f76d376b540b00003f76d3 -7831f88795bfb4d78130a100104c1989 -78325c0003cbb0476aeacd00003cbb04 -7833ff91ace532d7e2614800904ccca5 -78392900042bd68708cbb5000042bd5f -783aa200044e34f7702420000044e34f -783ca80004ac3667df6ace00004ac099 -783f580003dc0f676aae5600003dc0f6 -78453c0004aa0c07231abb00004a949d -784c7200049a84a729b5d7000049a828 -784fb60004154ce76b897a00004154ce -785b8b7c344e67d7fc9dcf0030498969 -785c2c0004bf42a756a9e200004aceb3 -785c8d0003da0e476c4a9200003da0e4 -785f0a00047a5d87bd2b33000047a404 -786771000431b5176d6faf0000431b51 -786ba000042601076d88680000426010 -786d4d05117b5bc7d64b740030475c97 -787cd400047e65977c8a45000047e603 -78810535738764b77fc53300404c5af4 -7881626a3b255fa7fcc5b80010484b7a -7884581326e492e7e8d7d100304c5526 -7886052791157ff7e4a9f100e0481498 -7886200003cbd3a727619f00003cbc05 -788716031c316d97a4c1e600704ce3e6 -7891040004ab2737902c6f000048ec62 -7893cf0004005c1777295200004005c1 -78963b00045063475065d10000437f6d -789a1c74ff728317bedc3d00c04ca9d0 -78a7510003cb81976c0c9800003cb819 -78a875efb1f29ee70471a100904776e6 -78a9860004201ba7710fcf00004201ba -78ae9c7b69153de79db9960040484438 -78af4e0003eccdb76ef8db00003eccdb -78b1750003d390f7774b6b00003d390f -78b1c000049aa8876c187c000049aa7e -78b224000488f7277b51e20000488f64 -78b39c00046494076fdd700000464940 -78b9170003dd3b57e6b59400003dd3ac -78bcd400047712f792999f00003cb97e -78c7600003d188c76ec36b00003d188c -78cdd900047333571f915b0000472f1d -78d34800043fd777e87abc000043fd66 -78df2200048ef2c7fb246f000048eedc -78e05100048576d7bcbf460000485753 -78e852f1dec8d1179e91a80020486484 -78ea740004af99a7bf1c6900004af990 -78ed977cba082d17af60a700d04c2494 -78ee24dbd17732378229d2001047f787 -78f426d1b27bd0d7d8d02500204974a5 -78fd7b600bafde077d1a7b00804b05b3 -7900f50004582177a9212300004581bf -7904b5000476901736f8b500004768f6 -7908db0003f6338771c80d00003f6338 -790c5c0004afbe774f5a7b00004afbe1 -790dcb3bfe7f6b97413d96005047e779 -790df100047333571f915b0000472f1d -790ef88c4df13ac7232eab00804b38a4 -790f800004725a674b0f6600003d01b7 -790f87f126bde2a730eb0200704a9a0a -791564000484c2c7104d640000484c2b -7922ce0004c45897e832190000496f76 -7923512932847817eeec2c00504c2ab9 -7924470004044bb795d84700004044b5 -792ed3f635801d57eab7450050392d66 -7930fe0003a976e7834f21000038758a -7930fe0003a976e7c79b4500003927b9 -7933760004c47177094e5300004c470e -79340d0003e973f771b0db00003e973f -793e0c000412246743f28a000041223f -7940f500046a2027703dc9000046a202 -7942520003d0b6976fd3e900003d0b69 -7943f60003ca6e174252d900003ca678 -7943f60003ca6e17834f21000038758a -79443c847ab290c71ce85c00904b0f40 -7948f6ee056e489733a9e2007048b81a -7949170004c2405769c9a300004c2401 -794b775c61ec6ec72a330a0080484502 -794e560004bc5447ef75e200004bc537 -794fc6d22e2d5497d10f7600404c5ae7 -7952280003ebb9076f5d0b00003ebb90 -7966ba0004080d67221e8a00004080bc -796be50003958487834f21000038758a -796be50003958487b485f20000395839 -79713f7030e10f57f5093300a04bf063 -797d940003ddba7772957200003ddba7 -797ea80004081dd76eca8a00004081dd -798005000499cc07834f21000038758a -798005000499cc07d33d18000038bd74 -79835d7f74a2de07205b7900504a0da1 -798a720003d28d972f298f00003d28d4 -798b57a1fbd5881787837600a04c7a16 -798b89d1d312de97d249de00b0483bde -7997c6b3e9c1b2576fb37600004be61d -7999e20004647107713dc90000464710 -799d569605dfc027c7d8a100b04be7f6 -79ac30f7de9ba567fcf3a800f049c220 -79adaf00047063b7781090000047063b -79b4770003d193876c5d7500003d1938 -79bb310004732d8755f73100004732ce -79bd551f009017c7d2afac00f04cb455 -79d70a000404d7476bf5520000404d74 -79dd7b00047eb21763dcd4000047e24e -79e13a6faa4a940700fcc40020488857 -79e87c461c6b9c2705df4600f04879ca -79ee95ff5ededc975c9ece00f04b8661 -79f0429b67db2397b50ac30010475e8b -79f1f9173a8ca5e7c61ba8009049f90f -79f5848372d1262743d5de0030475db5 -79fc280004b36467f4dc1300004b363c -79fd350004732d8755f73100004732ce -79fec781475638c71b2451007048d4e6 -79ffe0d78331d0375c8fac00b0479ef7 -7a0a980004713c2771a9dd00004713c2 -7a108500047cb3e7f8492c00004713e7 -7a15203dca023037cd1d0a00404c0538 -7a1d7a00041b3a97b5f17a000041b35f -7a1ea10004bcd757dede1d00004bcd24 -7a29940004a984d7b3a9a400004a9842 -7a2b6d00049b7e8725360d0000492704 -7a2c51000485d307f73570000046229c -7a3ca7000456ffe7b9b0b300003cbae5 -7a3d7b00047c0de701dc850000473725 -7a442c16240f28a7dfa4ac002048ca52 -7a4c860004261e176ef62800004261e1 -7a4c980003d7ae07e6218f00003d7ace -7a54fe0003ed1e2779180b00003ed1e2 -7a5da40004a984d7b3a9a400004a9842 -7a6447000420143778faac0000420143 -7a65060003f77e877028fe00003f77e8 -7a660c0003fdb347789a8a00003fdb34 -7a698c3554706a57ce5c96002047b9cc -7a6c710003d763376eb25600003d7633 -7a74050004a2320703bae800004a22eb -7a774600048b42b7a92337000048b428 -7a7c56000499e0f77ce5d2000047d58b -7a7ee2f830dc4df7aed91700604c3c7f -7a83170003db744773673f00003db744 -7a8530fb7d0c45d74f599800004bbbf3 -7a880b0003ea6947792eec00003ea694 -7a897a000421837772568a0000421837 -7a8eb168b90167476a536c00b044a043 -7a962311bd9988f7489e1d00b04bb97e -7a976d0004a2320703bae800004a22eb -7a980d0003e572b702f35100003e5717 -7a98ca00042e8a676fe6d3000042e8a6 -7aa20500043bb0c76e9c50000043bb0c -7aa4ebf427ef3ff7ba4033001049e4e9 -7aa79e2561fd85b7c733a400104bddf2 -7aaad1911675d367b05dcf0070490b47 -7aab53000444b067b77df40000444acd -7ab2740004b1d77750947c00004a1491 -7ab554689ede2ee72b3ec300e047c197 -7abbf5000489ca37975c510000489b26 -7abd2400048e5df7f4c8eb000048df31 -7ac216000409a6876c9bcf0000409a68 -7ac818d7dc819dc730f2c3007047a88a -7acda30004c49b4703413300004c49ad -7ad40c0003e807f76fef6200003e807f -7ad49507a83987c76b86c3002047cc38 -7ad77a79a7aaa0e7672bbe00a04a3910 -7ada4e000489ca37975c510000489b26 -7ada8000046d31b78beb20000046d318 -7ae2c200041c5ea782c447000041c5e7 -7af53b000446f5774154660000446f0b -7af5b200043fa1070d46fd000043fa05 -7af7bf00045c3ee779c170000045c3ee -7af9b00003d854c7713b3d00003d854c -7b09380003f3689773399400003f3689 -7b119a00042bc8c7e148470000420966 -7b11f40004955b47022c250000494e4c -7b146500042601076d88680000426010 -7b15e800043273076e73110000432730 -7b170a00048024c770d9f10000480249 -7b17bf00046ba8e771f67e000046ba8e -7b191b0004cde3b74f6c8200004cc801 -7b206b000415db9779b2ba0000415db9 -7b25c9000468f987728cf50000468f98 -7b28b70003d350e771071700003d350e -7b29b900048024c770d9f10000480249 -7b2c9300044a09e779d76c000044a09e -7b2ca300043f4c67e5e3e0000043f4c2 -7b2f170003d4f6579634f800003d2924 -7b30a80004b36467f4dc1300004b363c -7b35e39048c77887e41bd0003047719a -7b36e59a144fd1d7d16e8b00504759fa -7b375b00043eb417679eb1000043eaf0 -7b397000045bbc4779a420000045bbc4 -7b3df10004823b178d4b0a00004823ad -7b3f080004444a577281bd00004444a5 -7b4062e652d2d3d731141300b04b75e2 -7b41520004031a37554ac2000040310e -7b46970003d0089735d53500003d0069 -7b48a40003e7e5d774013800003e7e5d -7b491f0003eccdb76ef8db00003eccdb -7b4b39000431b5176d6faf0000431b51 -7b57ac0004759d177e15a100004758af -7b582f00047f86c7fba82f000047f7da -7b5a560003dd6b87741d8f00003dd6b8 -7b5c6b0004080d67221e8a00004080bc -7b61350003d07047c2700c00003d06de -7b6bbf000450d5d77a2ea20000450d5d -7b6ee6a3ede734e79534b50070476fa5 -7b71dd0003fa1de774246b00003fa1de -7b7a6bc66a607647d10f7600a04c5ae7 -7b81b00003d188c76ec36b00003d188c -7b88d20003ec89877a4bfa00003ec898 -7b89230004677dd742f5db00004285ff -7b8af80004955b47022c250000494e4c -7b8cf50004519bd77a3de200004519bd -7b91dc3e3aece407008e7400704bb28f -7b94b2d33f311747b189de0050477bb1 -7b95de22b9c2c7d774dd6b00e04c4344 -7b973d0003d812477a640b00003d8124 -7b97d400048f04f702d722000048f01d -7ba3600004608e377a30f500004608e3 -7ba36b0003cb81976c0c9800003cb819 -7ba4960004833607b149d2000047d094 -7ba6c20003fc7c7724655200003fc7c3 -7ba7bb0003dd9d47747f5100003dd9d4 -7ba9e2000485abe7b1068700003b603a -7bb2a10004af31870cc1e200004af298 -7bb2c30004809427b1068700003b603a -7bb3d00004759d177e15a100004758af -7bb9cce33ca84d37454b7600504be7a1 -7bbc9500045a5767a0d9e2000045a52e -7bc42f00046f1527852720000046f132 -7bc67e00042af3c71ede28000042af39 -7bc7e6056130bef765a96f00604ab3c2 -7bc8a700044d1c3773bde2000044d1c3 -7bcaecc6cf5a8467dff06900a04b2013 -7bccb500047e45072298b5000047e1e8 -7bce9e0003caff87741acd00003caff8 -7bd4a70004be9ef7232e7400004b9ae5 -7bd8db0003d0b6976fd3e900003d0b69 -7bda390004addfb7f507a400004adde2 -7bdc86000425f87770ba280000425f87 -7bdd82d2182baf078b4282007048b5fc -7bdfa1a7cb1a1a27d115c100704c53dd -7be09300044de1b77a4ca7000044de1b -7be3f5000486f287b1068700003b603a -7be7411936c95f5726594800804cd66f -7be895748db5d6878615240010494dd8 -7bea31bae2ae2697af0a45007047d45c -7bec760004513ab7a5876c00004513a1 -7bee8000047164a7160e98000047163d -7bee8b0004747fb702c4f5000046a129 -7bef1181a5b31cc7bf77a400004bb9a2 -7befa80004a0edb73955d90000473da4 -7bf510fa4fadeda76af48500e047546b -7bf9c900046a12a7aa0123000046a122 -7bffa10003ebb9076f5d0b00003ebb90 -7c00a14680b46337c0c096006047e18b -7c02460004081dd76eca8a00004081dd -7c067e00045d4a97982b9c000045d4a7 -7c0a200003cb8137b1068700003b603a -7c0cc00003d07047c2700c00003d06de -7c14a7000445137752516800004450b0 -7c17cf00040923d727de0c0000409228 -7c1ae80004a3f7673e06190000490bf4 -7c1c470003feb35714d59400003f70ea -7c27bf0004649b47e66095000046498b -7c28e037cab57c573174850070475360 -7c297300044102f703ee4800003d89d7 -7c2d57fa612b21379bc61700204c8f5a -7c2e652fb5b96267513d3300204c571a -7c2fb6000421aaf770a6ac0000421aaf -7c31113f4694fa17ba96cb00804ccfd0 -7c3241b18e981ca73101a300004beb57 -7c343cc4c6dcb34753b0c20080498071 -7c35d90004792367b17bd0000047919e -7c3ca300043c96f774a7dc000043c96f -7c48203b8fee9d476276c300d047f7f4 -7c4d2300046bb6477ad4ea000046bb64 -7c531100043228677b166c0000432286 -7c55303ef79d86479607d000a0475218 -7c59720003eeaeb70e317200003ebe6a -7c63510003ed19b71ef0fe00003ed197 -7c69dd000471bf5771b75d0000471bf5 -7c6f05d4de597ac79c1cb5006047e576 -7c79cb0003f75017e6078800003c031f -7c7eab0004af2de70cc1e200004af298 -7c83f25788ac90972c840500604a5040 -7c8cd91c13681247c681ae004049ff63 -7c91465eb719ddf7955ad000004a9a70 -7c94720003932e77834f21000038758a -7c94720003932e77d33d18000038bd74 -7c94ea000456ffe7b9b0b300003cbae5 -7c95330004c2e5a7697ae800004c2250 -7c9970ee898d72f7f57fdc00804a2ece -7c99de94d7a396d7ff85f1005047963e -7ca2e80004a5fc07e719d700004a5fab -7caa9800046db72740e0a7000044fccc -7cae970003cfb1e7ea76c300003921cb -7cb40f00048f04f702d722000048f01d -7cc0aa0004201ba7724aba00004201ba -7cc1980004ba0087af0a45000047d45c -7ccd940003f76bf7759b6200003f76bf -7ccece0004bda4d7b0f67b00004bc82f -7cd1062324d8cd476caa4e00d04850f7 -7cd380c866c9d167139dde0040474740 -7cd6ce0004c70a57af0a45000047d45c -7cdc6b000407a5571d46ac0000407a36 -7cdd720003f504e7cbb8fe00003f427c -7cddf70003d1f57772471700003d1f57 -7ce643eb87c806576d5a5600304b413c -7ce7ac00047333571f915b0000472f1d -7ceb4600048942c78d5eba000040c009 -7cec20000467069773f0930000467069 -7cecd20003f6bbf775740d00003f6bbf -7cf49cc49b21bf5796ff7400d0482e4c -7cf9940003ed7f5706576200003ed7d5 -7cfb8fd1447eba67336b0a00304735a9 -7cfba80004999057442f2200004985ca -7cfdd00004a893c71e750400004a8932 -7d02e80004a32337a639e600003cf5b0 -7d02f94252bd1ad7315b9700d047297e -7d04c10003f6338772f4a400003f6338 -7d04c4000485b2a74faf6b0000485b02 -7d0801000428526774b4680000428526 -7d0d68e5c02986c748370a003048472b -7d1abb4ebab2427725ba6600204b5d56 -7d1acb0004ce7a07df274200004ce6f0 -7d1eca000422dd977bba280000422dd9 -7d1f9200044554972b017e0000444536 -7d29fe0004261e176ef62800004261e1 -7d2a070003f77e877028fe00003f77e8 -7d2b2a0f8b9ccf8710e737008048809b -7d2ca7000444f0b7834f21000038758a -7d2ca7000444f0b7d33d18000038bd74 -7d340d0003f7b607235bbb00003f79ae -7d3923000461a7c77454f50000461a7c -7d3c560004710367676c2f000046f7cb -7d3e3846dd52bbd799897b0040483ede -7d3f11e7f8a3090781dec300204839c7 -7d42a50004c5ae67ed153300004c5add -7d464500047acd2786fab1000043539b -7d50ef0003ddba7773f3fa00003ddba7 -7d5f620003ee7f277c140b00003ee7f2 -7d651f0003e973f773313800003e973f -7d69088deafec117c152ce00504c81a3 -7d6a4500047f6327cb53ac000047f62b -7d6b310004732d8755f73100004732ce -7d6cba0b1d9721b7adda6600d04accab -7d6d6b0004c5ae67ed153300004c5add -7d6e7e00042c22d77bf801000042c22d -7d6ee6f511cff36741870e00f04cb7d3 -7d79f700042e8a676fe6d3000042e8a6 -7d7ac200040fe5475687cf000040fe46 -7d7b3d0003d9e7a77c2e7200003d9e7a -7d80900003c90f576be5f600003c90d3 -7d8b42d814c04fe789320d00604991db -7d9a2318b94e5027e2821700c04caa3d -7d9cf50004500d8774dc7600004500d8 -7d9d9600047f6327cb53ac000047f62b -7d9ec2000419dfe7cde3010000419df3 -7da4770003d854c7713b3d00003d854c -7db4c99b5e938da77b3c0f00804967be -7db56b875d05c517a4c82f00f047e563 -7dbe0784d89c1ac7dd3c2f0040476938 -7dc60200047020c7b40490000046ebe3 -7dc7510003eeae770e317200003ebe6a -7dc75300044350377541f40000443503 -7dcae80004c51ea7481ae800004c51d7 -7dd264774758f8877f2b7300c04ce459 -7dd44f2c059a2bc7628fac00c04ca0b1 -7de4770003d350e771071700003d350e -7df645000483925731e5d9000048391f -7df7020004a984d7b3a9a400004a9842 -7dfef12870aba9876c8ff5004048e0a0 -7dff4e0003e807f76fef6200003e807f -7e01db96191d6c9710d1a300504c3f17 -7e0390f283b06ae728aee800304c2d9a -7e0ee80004a32337a639e600003cf5b0 -7e12c700048b4857a92337000048b428 -7e14ea00046c7a677caca7000046c7a6 -7e1b94c4e64240577b2bd000b047762f -7e1eeeb73b36861753e6ce00104b0ae1 -7e2060d7ed530f47f6fbd000c047a7df -7e21d2000476f267ea76c300003921cb -7e22dfe0300e18675b3aab00a04b5cab -7e28a10004c08d2787e7d100004c08cb -7e296f0004a93b6775696f00004a93b4 -7e2a9700043273076e73110000432730 -7e2b760004c2542742f5ab00004c2538 -7e358600041705478c45900000417050 -7e37530004423e2765b79200004423a6 -7e3ec2000419e7677649900000419e76 -7e3ffd03e46986c71234a100e04c57cd -7e4407917213d5d747e0a700604c6e26 -7e54f80003d390f7774b6b00003d390f -7e55c10004c6f6c7dab7d100004c6f57 -7e58d7a60177ea37bd45d000404a87e8 -7e5dd200047ff137f5c996000047fe43 -7e5ff2b0774ee917c868b700704aa326 -7e64cd011b328347e6adc700304bc907 -7e692c000471550775d3510000471550 -7e7222a80881da6746262400b048dcdb -7e72b100043dec4737b7f6000043dec2 -7e84be2e8b5f0d1725cdb900a0482893 -7e8c330004a43387c64b8e000043fa11 -7e9025dcafda7f67834f21002038758a -7e9025dcafda7f67d33d18002038bd74 -7e90aa6c72790b6701302f00704832f8 -7e91fe000425f87770ba280000425f87 -7e92150003db744774ca4800003db744 -7ea73f0003d9b1f77d5b1700003d9b1f -7ea9d90004849977d63dd20000484991 -7eafd927dd032fb740cb2200c049016a -7eb02d13620e7b67da89ae009049b5f8 -7eb096000483925731e5d9000048391f -7eb1f10004849977d63dd20000484991 -7eb3266814b555f7d8dde200804bb9d3 -7ebfac0004838427a559a1000047663a -7ecac69d35c91d371e66a500c04be07e -7ed760000458a1e7cd98f50000458a03 -7ed85c0004b13517cf523900004b134b -7ee1c00004a18c1728543300004a184e -7ee3b000044102f703ee4800003d89d7 -7ee3b2a7ca61229736c77500004ce2ec -7ee42f000473d6677448d40000473d5c -7eea8b0004749e6741cf5e0000472f16 -7eeb010004005c1777295200004005c1 -7eedbe0003a3d8176c30820000393484 -7eedbe0003a3d817834f21000038758a -7eee13000421aaf770a6ac0000421aaf -7ef80d0003e452577d50a400003e4525 -7efa0500043bb0c76e9c50000043bb0c -7efb6a7bbd78b18770838c00204a1246 -7f0216000421837773ca0c0000421837 -7f039ee5b70b03471f915b00e0472f1d -7f03bf0004636b47a335c90000460e70 -7f04ea00045981874119e30000448a71 -7f09c900045a28371b58ea0000459ef4 -7f12b994013062a70705cf004048f697 -7f17bf00045c9f9754879c000045c2af -7f18720004a18c1728543300004a184e -7f1d19cf0653e857fc945c00f04b51c0 -7f22ac84ccb7cce7f38a4500d0476dc0 -7f234a4496c62d979a38eb00e048525e -7f26160004201ba7724aba00004201ba -7f2d7a00040e6af7a19bcf0000406c88 -7f2e630004340e377ddf5b00004340e3 -7f3a4c0003944af7360429000039346c -7f44b5000473d6677448d40000473d5c -7f4f510003ef3b178d1bbb00003eec5e -7f50f68542bc6f973d0c2c00104be838 -7f51060003f3689774b40b00003f3689 -7f51ae652ec3c767b4f63900a04ac8cf -7f546b00041a2157e8e86b000041a1e0 -7f5c2000045806a728e4a3000043d481 -7f5cfe0003f504e7cbb8fe00003f427c -7f612b00049e0eb779c1ae000049e00a -7f628000046e6627766c56000046e662 -7f62ec0003ef43f73b6cdb00003ecfdb -7f66d90003c2bd376925fe00003c2bce -7f66d90003c2bd37834f21000038758a -7f6e280003dd9d4775e36200003dd9d4 -7f79cf0004955b47022c250000494e4c -7f7a430003f6338772f4a400003f6338 -7f7ac30004738a874d65de00004738a4 -7f7c846b8bb6dcc7d834a700904c3f12 -7f88ba30410964673933ac001047ed77 -7f8c960004759d177e15a100004758af -7f91c000049e0eb779c1ae000049e00a -7f93aec1f7a8b0b7c6e86900604ac6f2 -7f97b500042aa1e758109a000042aa0d -7f985000043f6727ff28a3000043f66d -7f9cc10003e7e5d7753b5100003e7e5d -7f9d2400048e53f71b40eb000048baf6 -7f9e43554a3badc73ce7a40010472f27 -7f9e800003fa1de775719000003fa1de -7f9fad922d236c975085a10050475ab4 -7fa263000472ed2769d7c00000472e13 -7fa6b10004341d077e526300004341d0 -7fa6fb6bf3ac5f9786f42f0050473db7 -7fad90000408f12786cd1700003dc131 -7faf760004c0cb373cbe6600004ba29a -7fb230a2da3167e7a33a45009047d0fd -7fb2e18778a76ba7b80fed00c049e5f1 -7fb6e90003dd6b8775a4db00003dd6b8 -7fb9980004ba0087af0a45000047d45c -7fba240003d21617c79b4500003927b9 -7fbde200044b07671b476c000044b070 -7fc1c000049fdeb7c1ac0d00003f36ec -7fce430003e973f773313800003e973f -7fd8bc0004ca35a7a013220000491d12 -7fd97b00047675b70798b50000476752 -7fdad80004ca35a7a013220000491d12 -7fe0930004530187993bbf000044f1af -7fe2df40e1c13387b63a190030490b79 -7fe7510003ed1e2779180b00003ed1e2 -7fea7e0004bca7c7e8145c00004bb578 -7febb1954f79786799487900604ccbdf -7ff6e80004a263a73d0579000043dd4a -7ffa150003d1f57772471700003d1f57 -80025c0003caff8875867200003caff8 -80040999a0bbee28a0e733006047a8a4 -8011ee30c1db7858a5f7a800c04a0f4a -80129e8f3058fee867d4260030484be7 -8016d59a6c62bd38c5c60f00e04c2c0f -801b605b2de5965847e856006049a4ea -8025b900047f217888c374000047f20e -8026070003ddba7873f3fa00003ddba7 -8026b20004c2542842f5ab00004c2538 -8028d20003f780e86edbfa00003f77e8 -803673a5f87c9ed81b2451002048d4e6 -803a7e0004b4cbe8b727a400004b4a02 -803d52d9085979181cff1400204c9e0a -803f3300047fa408f679d9000047fa25 -80455a0004a98fc8b3a9a400004a9842 -804ee957d52d96785590a100e04c77c3 -80568f0003fdb348789a8a00003fdb34 -80589300045ba8b87698f5000045ba8b -805dd4eb1fdbf158df2e7400204bc8e5 -8060b3f6a04479088ec76c00e0456c64 -8061070f0e58cad8c81ec70090488ff3 -806c0b0003ed7f5806576200003ed7d5 -806e0785855b43c83e450a00b04c4775 -806eec0003ed19b81ef0fe00003ed197 -807125554acef268e9369600d048ca75 -80753c0004caeee8b676d800004caee0 -807e5c4823b6de883d75d900e048353b -807e7e000457b6f85e9e7e0000457b6c -80808fe04265e2b808da7400d04b2f3d -8089040004a98fc8b3a9a400004a9842 -8089b2b6eda9e3487491c700d04b7b30 -808fa800049ab1085aa70500004907ba -8093590003d5c2b838911700003d4cf3 -8094a40003ea6948792eec00003ea694 -809b84ae183b62280c0c8500c0478093 -80a0fe0003b4c8a83ea7e50000395bd7 -80b00b0003e2f1687f53fa00003e2f16 -80b2880004cd13f8d16acb00004cc043 -80b79c85f76708e86d99d2005047e172 -80bf3700042aa1e858109a000042aa0d -80c219000499421840cb22000049016a -80c33f0003d1d4e821835900003d1827 -80cd498179b5a4284af3ed00e04a64a5 -80d3511e8141d6e8382fbe00d04a1804 -80d420000460a7f87f74760000460a7f -80d52300045063485065d10000437f6d -80d8fe0004460a087f765f00004460a0 -80dc5c0004b4cbe8b727a400004b4a02 -80dfdc0004a49f3866015100004a4971 -80e2318f62215738616805001049e893 -80e6660004b1dd3841f4b700004aba35 -80e9310003bdb968834f21000038758a -80e9310003bdb968d33d18000038bd74 -80e9dd00040e6af8a19bcf0000406c88 -80ef620003ef42287f7c0d00003ef422 -80f00c0003f76bf876ebbb00003f76bf -80f3b30004cc1ff8513d3300004c571a -80f693154c9a70c8834f21005038758a -80f693154c9a70c8b1068700503b603a -80fa9e0003944af8360429000039346c -80fa9e0003944af8834f21000038758a -80fb20d3e7d0879839c9e200f04b23f4 -8106c30004920f686d99d2000047e172 -8107f6000436b7687f94a30000436b76 -8108493aa69f1d78e042b50080473140 -8113ed00049ab1085aa70500004907ba -8125860003ff86487fa70100003ff864 -81282000044958e8d9052300004494db -812f04c9adfcd708b351d20040473e7e -8130b70003db744874ca4800003db744 -8133390003a2df483d1162000039c575 -8134db0003f504e8cbb8fe00003f427c -813663000472ed2869d7c00000472e13 -81382d5d83704788e939f1003047bc39 -8139310003bcfd08ea76c300003921cb -813c76000444def8c45a5f0000444d4f -8141a33a49ce95b8b6568b000047fd7b -8145900003fa3e88a3306b00003fa3e6 -81471499889cff18bc2eb200604c19ee -814c01000423db282945d60000423c40 -814c47000419b2f8801eac0000419b2f -8156ec0003ec89887a4bfa00003ec898 -8156f30003bdb7d8d48899000038bd74 -81583bcb62074c08bf23be00304a6902 -8158c10003f6bbf876db6200003f6bbf -8159720003f7b608235bbb00003f79ae -815a75d26b77d6184ad77300304cdae7 -815c7c00043687b8d4dbdc0000436878 -815cd39d9d954fa8aad14c00a0473115 -815d90000415db9879b2ba0000415db9 -815e2800042d90f88005db000042d90f -815fb0000445137852516800004450b0 -81625e00043544f87fa7e0000043544f -8163510003f79ca880313800003f79ca -816438e493f648183f487d00604ab9de -81646b000420143878faac0000420143 -81725e00043a75788d5eba000040c009 -8175750003d812487a640b00003d8124 -81757a000400a5d851706b0000400a56 -8179dd00040658788421dd000040652b -817c980003cff4487ffa2000003cff44 -818ba2d139cd49e8d466db0030496337 -8195640004920f686d99d2000047e172 -8195c90004aa0c08231abb00004a949d -81a347b480153e389279f400e04980e1 -81a357492bad053863f15500b0494f6d -81a6580003d0f718c79e1b00003d0f5b -81ad483be7ba87882c003300a049e5fc -81b7a00004285268763bea0000428526 -81bfd200046cd4584c8e0900003d01b7 -81c737000488f7287b51e20000488f64 -81cb9c00045a5038a1af9c000045a4fa -81cd2300045162688080ea0000451626 -81d0b500047680b899017b0000476805 -81d7a563452d0f0847e85600f049a4ea -81db9c00045c3ee879c170000045c3ee -81dbae000421837873ca0c0000421837 -81e0b496dc16a4e8cd4096005047f619 -81e1720003ed19b81ef0fe00003ed197 -81e67e00045f3db8809095000045f3db -81f2f20003fa1de875719000003fa1de -81f2fa2e09273108d3c4ac006048bc72 -8206e90003dd9d4875e36200003dd9d4 -8212070003f3689874b40b00003f3689 -82181ccc1e84ebd8d496ce00204b5774 -8218b7c746f02ee8871964003049567a -822b3300047e9978cb5e9e00003cbe13 -822eb7ee129ee9e87c40510030485c9e -8234d400047680b899017b0000476805 -82360c0004044f28759b6200003f76bf -82391758db6b52a80d302c00e04c27b9 -823a280003e7e5d8753b5100003e7e5d -823aa50004c46c983386ce00004c46b5 -824abc0004c46c983386ce00004c46b5 -824b3fed77bed688d1ea1d00c04ac195 -824c280004b286f88e4c1300004b2869 -824e7b0004b1d74850947c00004a1491 -82586d713f7a2da86ea4b5006048094a -825ae1197bb0aae826d5c700504ac3e9 -825bcf0004069358c41e0c0000406931 -82608e0003dd6b8875a4db00003dd6b8 -8260d4ece49ab9f8121bd0009047959e -82635b00043f82f8069e5e000043f82b -826638fad59ab7e89be30a00b047fffc -826afe0003b32228834f21000038758a -826afe0003b32228c79b4500003927b9 -826e3b000450d5d87a2ea20000450d5d -827d5500048e5c78be4e4e000048e123 -827f9c00045bbc4879a420000045bbc4 -828943000419e768779f010000419e76 -828d7000045f9818810020000045f981 -828ddb00042852c874b4680000428526 -8297a60004cb4778d2afac00004cb455 -8299230004608e387a30f500004608e3 -82a8880003d390f878b17500003d390f -82aaebc1dd929ae8f9cbdc008049fe99 -82ab98cd676a94f88d5eba005040c009 -82ad45118ca369f86405d7009049516e -82ad510004a3f7983e06190000490bf4 -82ae7d00043228687b166c0000432286 -82b628beeccc8ac8abb5ae009049f440 -82b8a10004c5ae68ed153300004c5add -82bf066075c50db82818b5006047b7d9 -82bfdf0003caff8875867200003caff8 -82c4c20004999058442f2200004985ca -82c6480003d99098817aaa00003d9909 -82cd170004c4b308e5e20f00004c4b22 -82d7ffcbed9f0c681e750400604a8932 -82d85000043d4e38901b5b000043d4ad -82d8bb3794cec0782e7a7400c04af822 -82da2c00048e5c78be4e4e000048e123 -82e13ae902ec80589fcca800104bc359 -82e38d24cc2e4d98c5ac850040483327 -82e3bb0003ef3b188d1bbb00003eec5e -82eb74247d580d5848cfd100f04c0a89 -82ec980003d99158815b3d00003d9915 -82eebb02225eaa3835842800804bb5c5 -82f1520004178dd8818c6b00004178dd -82f21022423b71386a431200804c9a20 -82f402000395a1e8eab7450000392d66 -82f88500047536183174850000475360 -830e62d09c7c308890d5d900e047e6ac -8318f1b8fcf49678b29333003047e486 -831aebf5aedd83787c82c7006048d852 -831b3bdc6a731c9838e27e00404b1649 -831b7400047b6328917dde000047b62f -831b92000445e35881f17e0000445e35 -831ead25249ddef8f01dd9007047a9a9 -8321fc453f4162e892f0eb004048c9da -832275000436652815a67e0000433f4d -8323cf00041112385bfeba00004110fd -8325c00004a43388c64b8e000043fa11 -83356b0004c6f6c8dab7d100004c6f57 -8336480003da616899e00b00003da60a -833a870efc7bbf085330d400d0480268 -8344aa0004005c187893cf00004005c1 -83462d4ece356a48f77dd900104781f4 -834f12a2d6ffd5482da81300d04b2eef -834f510003ee7f287c140b00003ee7f2 -83552b00049ee478a69f79000049ec54 -835f51000471378882042f0000471378 -83605e93a58b02b8392a8b00d0474367 -83645c0004b36468f4dc1300004b363c -8364980003d9e7a87c2e7200003d9e7a -8364d20003f7b608235bbb00003f79ae -8372e90003f76bf876ebbb00003f76bf -837a7e000423db282945d60000423c40 -837aa200044de1b87a4ca7000044de1b -837adb000390c2f881dcbf0000390c2f -837adb000390c2f8834f21000038758a -8384ea0004519bd87a3de200004519bd -8385230004494e28cfd476000044944b -8389537d4cdaea78f2880500f049b18b -83905dbb8390c838f5093300604bf063 -839801000422dd987bba280000422dd9 -839dc900046bb6487ad4ea000046bb64 -839fbf00046c8908966893000046c653 -83a3750004cd3548a6c9be00004cd34c -83a5fb0003d0f718c79e1b00003d0f5b -83ba200003ce8068a5ee2000003ce7fb -83bda100047553e817d485000047553d -83c08500048222386f928b00004821af -83c7f149728387c853061900d048f892 -83c8280004bbdca80b719800004bbda0 -83cac200040d4eb882c20c000040d4eb -83d47600045a5768a0d9e2000045a52e -83d6730004c8f7583d0c2c00004be838 -83dfc10ee8a507d8a0c65300804c714c -83e3f00003d12f289facc000003d12d9 -83e4f50004611cd861f4ea00004611c2 -83e7790004a51d382c840500004a5040 -83ed5100049ef53896393100003cf5c7 -83fcf7e087073a881073ac0010478feb -83ffa80004a13818b5a43300004a1334 -84018600041112385bfeba00004110fd -84060d000490cca800a9240000490cc7 -84080560eb5b47983c0cb600d04a983d -840a1d0004b9d828e26e1d00004b9d06 -840fda302f85c2d8f4a4dc005047252b -840fe95212a2bbf8a3eee800604c6a17 -8414897ea65f0d1857812400604956ff -841cea00044b4ae842f5db00004285ff -841e0d000495d35815ee240000495cda -84202f0004768108ee09d200004767e8 -84209500046c9458834f21000038758a -8422720003d9b1f87d5b1700003d9b1f -84291f0003ed1e287a54fe00003ed1e2 -842d5200041c5e7882c447000041c5e7 -842d7000046336e84b68f50000463368 -8431dd000401894882e20c0000401894 -8436cfdde0cdbaf896052b003049a6a1 -8437bb0003ddd08882d59400003ddd08 -8439a500042d488882cc9a000042d488 -843c2b8c811edab82fa3a800d04a21ff -8440d400047b6328917dde000047b62f -84442158685adb48d8ad4a00b04a85d4 -8446b100043a75788d5eba000040c009 -844ddb00042c22d87bf801000042c22d -84524e0004a98fc8b3a9a400004a9842 -8452e80004c6d2284119e30000448a71 -845506c4a8a62128c025d90010475912 -8456ac000406180802135100003eb1da -8458a80004bbdca80b719800004bbda0 -8473260003c18d4883123e00003c18d4 -8473260003c18d48834f21000038758a -84740c0003f6bbf876db6200003f6bbf -847d750003dae618830a9200003dae61 -84842dc3d5dbe868bfeed000304ab19c -8489dd00046eaba8829daf000046eaba -848a560003d4e8a8282c0b00003d4e23 -8494fc0003d1d4e821835900003d1827 -849c7611246029e890c99800504b9f11 -849cbb1ce7b058b8a239a800a04891f1 -849cd13d361c3e489786b200404c21ef -849ecef1a5019aa894327400304b9ef4 -849f01000408f12886cd1700003dc131 -84a0050004a18c1828543300004a184e -84a00f0003fdb3487a660c00003fdb34 -84a4b50004768108ee09d200004767e8 -84a72100038758a8834f21000038758a -84ad2b0004a39ed8c667ed00004a39df -84b4fa3cec8e0ea8bb01c100d04c77d5 -84bd3c0004ab72589312ec00003dd2c3 -84bfa10003ea69487a880b00003ea694 -84c2910003c25eb8308b2600003c1900 -84c9d90004736588f3980d00003e80e8 -84cb6ca2b6f5841871c59400304a9037 -84cd520004199b2883444700004199b2 -84ce51029caf1458b680b50080481635 -84ce9e0003ce4e684f415900003cbc4b -84d161e8dc624398bfeed000304ab19c -84d1c44eed648a98262b7500c04ce5f1 -84d5900004112998bebd86000041126f -84d7a800049901886405d7000049516e -84d7d10004c4947835951700004c4928 -84dd720003e452587d50a400003e4525 -84df5fc150578f9850026e00c04ccf62 -84e0020003b0db28360429000039346c -84e0020003b0db28834f21000038758a -84e30a000476f5e84c20850000476f21 -84e7e500039b655805baa3000039ae0d -84ee4745e45a5b08e28ac300e0474552 -84ef550004b88b18fec3a400004b57b6 -84f88e1655f6506899ce4e00d0488860 -84f9750003d812887a640b00003d8124 -84fb6b0003d28d982f298f00003d28d4 -8504ac00048e1a888b10ac000048e072 -850bc3c7cebd41a88bdcc4006048910a -850cd70004285268763bea0000428526 -85127c00046f1528852720000046f132 -8513790004a2fb08834f21000038758a -8513790004a2fb08ea76c300003921cb -851daee7c2ac03b83ea5de0090474418 -85221752676f6ab8b61224004048c3c8 -85255200040fa9d883e186000040fa9d -8525940003f463b8e2a00b00003f3408 -8526c300047ca2f8e1128b000047c4dc -85340200042bb2a883e5a5000042bb2a -853564f3c7529f780af4a700d04c58ac -85450d90af46e7e82f812b00d049cd8b -85498261c4ec062844205c00e04b17d6 -8554b70004ab72589312ec00003dd2c3 -8554c74caee9ee68f2730a00b047f566 -8556c200040652b88421dd000040652b -8560fc0003d195a884076b00003d195a -85615100049ef53896393100003cf5c7 -85630a1d0b1206d87b3c0f00404967be -8564770003d390f878b17500003d390f -8564e800043438c88403dc000043438c -8566a8000415db987b206b0000415db9 -856914000419e768779f010000419e76 -856d7a00040c0c0883e301000040c0c0 -856de200045806a828e4a3000043d481 -856eb7c03a1c68f82c840500d04a5040 -85704700040ae8984f2990000040ae83 -8573df206d41ced8e1059800c04b385a -8576c0b2ab0c0c285df3a600904ca540 -857d2300044a0c3879d76c000044a09e -858711000432560883e9dd0000432560 -858952000407a5581d46ac0000407a36 -858d170003d812887a640b00003d8124 -858de2000489ca38975c510000489b26 -858e7e00046c7a687caca7000046c7a6 -8592260003d812487b973d00003d8124 -859ec3000490cca800a9240000490cc7 -85a1d10004340e387ddf5b00004340e3 -85a5d200047971d8eb4da1000047950b -85a940000446c59883dd780000446c59 -85af0dd6e85b54687fc53300904c5af4 -85b3ac000483925831e5d9000048391f -85b67e0004b602484ac41300004b4cb1 -85bb1c8d7e0ec868e1045c00c04b2374 -85c09500042014387a64470000420143 -85d2a5d4a6f43408790f0a00c047a8e4 -85d3d00004838668a559a1000047663a -85d449f49f37af387a4c05007049d0dc -85d6abbf36da09e8960f250080491ff6 -85d6bc0004c1f2f856082800004bd838 -85da460004005c187893cf00004005c1 -85dc210898d3a028d9937600704c3dad -85dd5100049b34589aad2b000049ae58 -85f45ace76b243784df8d4003047515d -85f9c00004a3f3083e06190000490bf4 -85fd170003d2ea08d6af5900003d2e96 -8600aa3285a12e483f487d00604ab9de -8601230004614d8825db080000445f86 -86017a0004050b9884a30100004050b9 -8601e200048e13883c069d00003a6629 -8601e200048e1388834f21000038758a -8602b10004341d087e526300004341d0 -860b4e0003ec89887b88d200003ec898 -8613c0028ba92b28af0a4500a047d45c -8619ab0004be61a8b45e0f00004be5ea -861a3ed77c9f656832195100a04a7359 -8623790004a2fb08ea76c300003921cb -862a8a7cefb19b081b40eb000048baf6 -8635900004148528dcb2ba000041484d -8643600004649b48e66095000046498b -86497000046a89089628a7000046a88a -864a9ac9603953081602ce00104c6057 -86535f0003f80e48611c9100003f80ae -86582f00048024c870d9f10000480249 -865f8fde9b1a7fe882221d00004b17ad -86616b0004c15558f83d0a00004c1500 -86639c00045f3398850b60000045f339 -866485d64182a2483155ae0010499ede -866c444185f9a5080d852b00604a26d8 -866e5e000390c2f881dcbf0000390c2f -866e5e000390c2f8834f21000038758a -8675648ff03e4038723b7000a04a203f -867a070003ebfb78531c0d00003ebfae -867c9600048041182525f100004803b3 -867f50307e93c4a8481ae800f04c51d7 -86802000044e88f8e8f4e800004385a3 -8680a300043f82f8069e5e000043f82b -8684560004a222682fa3a800004a21ff -86880b0003d3643885057500003d3643 -868cd400047a93f8105f0a000047a896 -868d310003ccebf885019f00003ccebf -86961d00043228687c53110000432286 -869a920003d854f870058f00003d854c -86b027cdeb30f0d898e0eb0010488523 -86b02c245fe6d4585fc41300b04bc688 -86b2200003cb9bb8bfda7200003cb9ad -86be3bcbf9fc5d5823ccd4007047859e -86c00d0003e18168855b6200003e1816 -86c73f0003d1c3a8efd40c00003d0aca -86c80f00048e5c78be4e4e000048e123 -86c9940003ee5d188560d200003ee5d1 -86c9a30004865f7890d5d9000047e6ac -86cba40004bb561813f4a800004ba11d -86cc760004be61a8b45e0f00004be5ea -86cfac00047cb8184c9dd2000047b61b -86d0d3d4b7e3db38d302f80020495f06 -86d12b0004a08138d683dc00004a080e -86da6300043d4e38901b5b000043d4ad -86db9c000467077873f0930000467069 -86e5c000049ee478a69f79000049ec54 -86e60eb90029fe785dddf1008047b798 -86e9f80003ed1e287a54fe00003ed1e2 -86ecd20003e572b802f35100003e5717 -86ef630003d005f8856fd300003d005f -86f2480003d2301885591700003d2301 -86f9900004069358c41e0c0000406931 -87018d000433b6f88599060000433b6f -8706a30003c18d4883123e00003c18d4 -8706a30003c18d48834f21000038758a -87080b0003d854f870058f00003d854c -870d940003e572b802f35100003e5717 -8716720004a7e3085317be00004a6b8d -871a8a0003ff86487fa70100003ff864 -871dbe0004cc29a8d33b3f00003d39bc -871e9e0003933af8eab7450000392d66 -87217900043b3028df90f800003d990b -87282f00047cd7386adbd0000047cd68 -8736cb0004cc29a8d33b3f00003d39bc -8740789997169a58864ea100a04b16a0 -8740897ed6312a48aa761900904987ac -87442f00046f1328852720000046f132 -874526dea3cbe718c883740020484745 -8748a80004af2de80cc1e200004af298 -8750d70003fdb3487a660c00003fdb34 -8752760003c90d686be5f600003c90d3 -8753ae13874c58d8c377d000a047e15b -875884ea6bf8f578488e8200b048d1be -8759ab0004c58aa8e832190000496f76 -875e260003d9e7a87d7b3d00003d9e7a -8764b500047cbc88b1068700003b603a -87710a0004344e788652b100004344e7 -8775686b3a5e68881236a500404c1ab5 -8777620003f79ca880313800003f79ca -877b550004b88b18fec3a400004b57b6 -877cdb0003ef42287f7c0d00003ef422 -87806b000419b2f8801eac0000419b2f -8789cc0003ea69487a880b00003ea694 -878eec0003ee3b281fef5100003e66e9 -8792070003ee7f287d5f6200003ee7f2 -8793119df24dd60865065600a04bb1f7 -87980d0003f76d6869f59400003f76d3 -8798850004749078b75bd00000474827 -879ce7de5062752863ef4200104ccb23 -879e450004803ca82525f100004803b3 -879e5a33c2baee1878b83d00f04c87b8 -879fd00004838668a559a1000047663a -87ad590003cbada886369e00003cbada -87ae890003f88378a3a61f00003f8833 -87b6ce0004af7948e9a7740000474812 -87b8d26417362828a229d800104ca072 -87b9c00004a08138d683dc00004a080e -87bc90cdfbafbb288b428200e048b5fc -87c57e83d93b2618d683dc00804a080e -87c9ba000422dd987d1eca0000422dd9 -87ce94f87224495856082800b04bd838 -87d09f0b1846a4b885f4bc00b04c8850 -87d5e60004cd33d891947900004cd33a -87dad69b4a783ef813a95500f048f4d6 -87de31eaf978dfb813f1f10030476ace -87df1300045060e825946b000040834a -87dfa60003ce4e684f415900003cbc4b -87e0d20003f780e86edbfa00003f77e8 -87e12400048e53f81b40eb000048baf6 -87e1de00047a8d28c0edb9000047a8a7 -87e3b6000415db987b206b0000415db9 -87e68b0004749078b75bd00000474827 -87e71a07378f8738e5431400104c9888 -87e9960004803ca82525f100004803b3 -87e9d10004355b8823e80b00003db666 -87ed6d3713dc4588a5c833005049bf8a -87edb0bdc6d03ac80c060f00004c294f -87f5e200045981d8f9ad230000459819 -87f6bd7546ec91588a937600204c4d0f -87f6cc0003bef008b1068700003b603a -87f9534ec154bae80f50eb00c048d6b4 -87fdef0004460a087f765f00004460a0 -87fff736752eff68b61224009048c3c8 -880047000411f418aaed7a0000411f3e -8802150003d9b1f87ea73f00003d9b1f -8810900004718f4834b9af00004718ef -88112900042d90f88005db000042d90f -88189000046ea718396a7c000046ea1a -881a0c00041a2158e8e86b000041a1e0 -882ca70004c59288e832190000496f76 -8833170003d99098817aaa00003d9909 -8834880003d812487b973d00003d8124 -883cec6a845ebbc8c5fb7600d04c48ec -883dd1d2f603e8985df9c100504c18b7 -883f9c000460a7f87f74760000460a7f -8847a800049f3478d8d02500004974a5 -884beea9c59cbdd80e6d6b00d04c46de -885a5f0004450b9887235300004450b9 -886b790004a1c04807038c00004a1c02 -886deb42c8d6c64832ed7b0040482c4a -886e6300043539b886fab1000043539b -886fd400048ed5f8ab0124000048ed48 -88721300042014387a64470000420143 -88727e0004b583b8fec3a400004b57b6 -88782f0003f88378a3a61f00003f8833 -887c09439ce7ed08e7dca700004c2e51 -8882cd0003cff4487ffa2000003cff44 -888c0b0003d82728f8fa9200003d826e -888cb30003ce4e684f415900003cbc4b -888d8e888e12072883376d00c049ea44 -88958f0003d99158815b3d00003d9915 -8898a700046c9758f560a7000046b995 -88a3d10004c46c983386ce00004c46b5 -88a9cf000495f3a8d302f80000495f06 -88acea000466ef08d4179c0000466ed9 -88b05175d23af35819bdac00d038e0dc -88b36c00045162688080ea0000451626 -88b8934fde51b7e8ba403300f049e4e9 -88bc0f00048ed5f8ab0124000048ed48 -88c42000044b4ae842f5db00004285ff -88c7d0a5967e1ab84dff6d003049fda4 -88c7e0c18eb93e98b4e15100b049eccd -88cce800043544f87fa7e0000043544f -88d152000411f418aaed7a0000411f3e -88d59a00042306b8e849a50000423041 -88d6594c6e89e9d8ae29d000f04cce2b -88dd7900043679f84b0f6600003d01b7 -88ddd700049e475836bfc2000049e472 -88df3f0003d5c2b838911700003d4cf3 -88e5c10004becd9846e96b00004becd1 -88f17000045f3db8809095000045f3db -88f1ba00042c22d87d6e7e000042c22d -88feca00042306b8e849a50000423041 -8906e90003e452587ef80d00003e4525 -890b0100041a2158e8e86b000041a1e0 -890c960004749e0841cf5e0000472f16 -890d7900043687b8d4dbdc0000436878 -890f046eed2f62387f7e7b00d04affe0 -89189dc05226d2c85ce496000047ecc4 -89235330b2cc52a828330200a04abd66 -8929a8000487668875b5b9000047fc39 -892c0b0003ec1b780e317200003ebe6a -892e6c000430d9088612e20000430d90 -89337000043228687c53110000432286 -893a070003ec89887b88d200003ec898 -893cb500047a2bf88401b9000047a28f -893d79000436b7687f94a30000436b76 -8941520003fca89887d86b00003fca89 -8941e2000463b0f88737bf0000463b0f -8945d00004aa2c78e148470000420966 -8945dd0004178dd8818c6b00004178dd -894b390003998bc8c511f700003957bf -89512400049722c8d76d240000497227 -89573d0003d28d982f298f00003d28d4 -895f374e10f42658023fac0060481cf9 -89672572b547e3386a5a0f00f04c58e1 -8968f2727a1c54a85ff83300a049d76f -896bfa0003e5101887dcdb00003e5101 -89708500047acd5886fab1000043539b -89732200049722c8d76d240000497227 -89740b0003ee2dd83b6cdb00003ecfdb -89749dfc2a458d7863787600d04c446f -8978720004a18248382fbe00004a1804 -897cf80003dbf5f8afcffa00003dbe8d -8985d00004cd3548a6c9be00004cd34c -89877daa0ef88db83314b50010475ae4 -898cd400047a2bf88401b9000047a28f -8990da9775260268481ae800104c51d7 -8993dd5ed444b34813a95500a048f4d6 -8994f5000468746887f9e20000468746 -8998c2000490cca800a9240000490cc7 -8998db0003ddd08882d59400003ddd08 -899fac0004849978d63dd20000484991 -89a0a80004bb39285aeaa100004b64f3 -89a76000045f9818810020000045f981 -89af1b00044840b83d0579000043dd4a -89b16a06dd6901782115ab00404c7446 -89b336bab2001fb8336b0a00404735a9 -89c52e67667b41e8e472c3008049585d -89c9f4000445e35881f17e0000445e35 -89cbe1711bf1a908adfe7e00004af5e2 -89d4b61b444cfd48022bc2003049a87e -89d8960004749e0841cf5e0000472f16 -89dbe17cf66658782963330050483ccc -89dc5f0004341d087fa6b100004341d0 -89e0aa7ad30350f883879100104ce7c5 -89eb590003dae618830a9200003dae61 -89ee15aa73dbf1487139cf003049016d -89f17b00047be2a8c15333000047bd67 -89f293b9e157e838ba876b00a048956b -89f9330004becd9846e96b00004becd1 -89fc790003d9e7a87d7b3d00003d9e7a -89fda500042b2578d94402000042b253 -89ffbb0003dc131886cd1700003dc131 -8a004365ded06d48ff98850040478439 -8a023881f42d2b58bc70d4005047a870 -8a02d9b349d460e8c4003300e049de50 -8a02f8e34fa86b48fc353c00404cbb89 -8a04050004a3b0c8dfdb7900004a3b09 -8a07fa0003efbd288d800b00003eeb74 -8a0d900004340e387f2e6300004340e3 -8a0f7c5dd7f3b4882fcece00f04c7af0 -8a108bc442b138782ecab200b04c3ba0 -8a1428f54d5049480d302c00e04c27b9 -8a15060003e2f16880b00b00003e2f16 -8a15c10004be2a48d17abc00004be28e -8a1ac3000495d35815ee240000495cda -8a1fcf00040d4eb882c20c000040d4eb -8a2eb100043f4c68e5e3e0000043f4c2 -8a310a00043687b8d4dbdc0000436878 -8a34be0003c7ff98308b2600003c1900 -8a34be0003c7ff98834f21000038758a -8a39f80003ee7f287d5f6200003ee7f2 -8a3c0f0003d12f289facc000003d12d9 -8a40a700044521c8c9e17e00004450b0 -8a412c0004705b0888c30900004705b0 -8a421a483d747cf844aed500904460d9 -8a4487e7f3b4fa88ec5f7600f04c313f -8a45bc820321a47802814a00104ab3dd -8a476d0004a2975878436d00004a2962 -8a48f80003d1a35888f00b00003d1a35 -8a4dc11b81e39bc8454dc100c04c180c -8a54fb47af33cfd8c2c9ab00504c306f -8a6465000422dd987d1eca0000422dd9 -8a647200049a98d844c3a8000049a983 -8a66d1388144d25834fe1900d0495eed -8a66ec0003eea1a851ceec00003ebfae -8a725c294f93dd0872937600d04be8fa -8a7f6000046846c8e148470000420966 -8a8186000412246843f28a000041223f -8a8370a4e511a9f8fba82f004047f7da -8a850d23bf2d7c082eb7a80070495f89 -8a86390003d9b1f87ea73f00003d9b1f -8a89330004be2a48d17abc00004be28e -8a926b0003c7ff98308b2600003c1900 -8a946800042d488882cc9a000042d488 -8a95e20004b4cbe8b727a400004b4a02 -8a9eb1000436bf888955c50000436bf8 -8aaaf80004920f686d99d2000047e172 -8aaec2000401894882e20c0000401894 -8ab97b00047ee4d8910dd2000047dfb9 -8abab200043312088cad2c000043311e -8abf990003c90d686be5f600003c90d3 -8abf990003c90d68834f21000038758a -8ac1e20004558358dfb53800003e4255 -8ac2fcd2212896f8d7b7d400d048e6fb -8aca480003d195a884076b00003d195a -8acf09000471378882042f0000471378 -8ad121b42d7f8cc8a3eee800d04c6a17 -8ad19000041c5e7882c447000041c5e7 -8ad1b2000472f4d892999f00003cb97e -8ad85c32f9af55d8a82d2b00d04a33cb -8ada880003d0bae8918e5800003d0ba5 -8adb6000044a0c3879d76c000044a09e -8ae29b0004ae58681a42740e02e76099 -8aedd6000425a9789349d600004256b4 -8aef08b4b1176cf8e966820080489127 -8af43f7979e8779848795a00b04a7f96 -8af8690004b1f2c84d05700000461e38 -8afbae0003ff864881258600003ff864 -8b08fe77cabf84d8ea5da300b048dd18 -8b18930004582178a9212300004581bf -8b1b020004abae3841f4b700004aba35 -8b218f0003d994d8815b3d00003d9915 -8b23473ff604dd28be5b0200604a90d9 -8b248500047acd5886fab1000043539b -8b262d10233563c8abfae800a04a077a -8b2d7a00040fa9d883e186000040fa9d -8b35d28934e3a3d81d85c900a04a8601 -8b48d20003efdef80e317200003ebe6a -8b4d350003d12f289facc000003d12d9 -8b4e9e0003cb9ce86a7b3900003cb819 -8b50fa883e24a708b7d8eb00b048aa26 -8b52ba000412246843f28a000041223f -8b54280004b5cb885b3aab00004b5cab -8b597567a8ad72c8578cb5003047d197 -8b62f4e9a9ab7fd8b31af80050497148 -8b6b3300047680b899017b0000476805 -8b777f069e156e88d6a5d000c04a8d25 -8b78db0003f45d18c2ebbb00003f45cd -8b840c0003f79ca881635100003f79ca -8b89f100047a98f819bdac000038e0dc -8b8a090003d0fd088a176600003d0fd0 -8b8bb979b17a5dd8b6ab7400a048346a -8b8ce804fac78858b75bd00070474827 -8b8d0a0004c80f98fc09c100004c80f7 -8b91900004199b2883444700004199b2 -8b9646000419b2f8814c470000419b2f -8b96c30004768108ee09d200004767e8 -8ba7ca000472f4d892999f00003cb97e -8ba9f100047d3b68c15333000047bd67 -8baa4bba605d4988bef2bc00904c4b55 -8baa84f101da13280116b200704c23e0 -8bade2000457b7385e9e7e0000457b6c -8bb00c0003ef422880ef6200003ef422 -8bb29800046e9898be0280000046e980 -8bba5700042c22d87d6e7e000042c22d -8bbcfe68cd360bd818b77500604cced9 -8bc0c10003e452587ef80d00003e4525 -8bc76b000485fcf821aa7e0000459dc1 -8bc9f100047fa408f679d9000047fa25 -8bcc9300046c9758f560a7000046b995 -8bcd30760a0295885ed30a00d04743ed -8bce230156af236866c8d40040481a9c -8bd9380003f780e86edbfa00003f77e8 -8be7154b490ebf0828f0ac00404859c9 -8be7a60004cbe3e8171cbc00004cbe3a -8bec6f00048e5eb8be4e4e000048e123 -8bed200003b9d4d8ea76c300003921cb -8bf513ab89ca7b0806edb9003047bb7f -8bf9cfa80658f4a89279f400f04980e1 -8bfc958e44c1a4d887abd0004048468c -8c00850004761b4863e1b900004736f3 -8c008500047cc3f86b86c3000047cc38 -8c06ac00040c0c0883e301000040c0c0 -8c12ca00042bb2a883e5a5000042bb2a -8c17390003ccebf885019f00003ccebf -8c187c00043438c88403dc000043438c -8c1b0100041a6fd88ac990000041a6fd -8c22cb0004cdd9984f6c8200004cc801 -8c2b53f40e7269a86fef7600904c2c24 -8c2bed0004a2320803bae800004a22eb -8c2eac0004050b9884a30100004050b9 -8c2fec388445fdb84072ce00b04c4a14 -8c30a16ac4e0d038568f7600e04c51c1 -8c39a10004749078b75bd00000474827 -8c3a150003d9909882c64800003d9909 -8c4093000451c9e8834f21000038758a -8c4093000451c9e8c511f700003957bf -8c53b7b067aff2e8a2776d007049fe67 -8c58280004b21638de966600004b215b -8c5dec0004ca35a8a013220000491d12 -8c639c00046cac288ae760000046cac2 -8c656b0004c2542842f5ab00004c2538 -8c704bbf49b0f4c8c01db9001047e249 -8c785f0004340e387f2e6300004340e3 -8c7ba000042d90f8815e28000042d90f -8c7d6f0004abb768a7a5a400004ab782 -8c811b0004cc80784f6c8200004cc801 -8c882b0004341d087fa6b100004341d0 -8c8a720003d3643885057500003d3643 -8c8d819cbc3d8f58e261d000f04a9bd8 -8c8ee2000432560883e9dd0000432560 -8c8f510003ec60c88b693800003ec60c -8c90db0003ee5d188560d200003ee5d1 -8c974e0003e2f16880b00b00003e2f16 -8c9e0c0004112998bebd86000041126f -8ca2720003d862e88b6c9800003d862e -8ca55d0003c1b7d8834f21000038758a -8ca55d0003c1b7d88b029000003c1b7d -8ca86b000446c59883dd780000446c59 -8cab430003cff448817c9800003cff44 -8cabf00003d005f8856fd300003d005f -8cb75d00047164a8160e98000047163d -8cb7b500042328e892999f00003cb97e -8cb9140003c1f838121a7600003c1f7b -8cbb7000049a7518ab976d000049a724 -8cc3510003f76dd88b713800003f76dd -8cc87600044115488b1c10000043299a -8cc8880003d9915882ec9800003d9915 -8cce42b75b983ac8da825600004bdef4 -8cd0d20003f463b8e2a00b00003f3408 -8cd2df00043299a88b1c10000043299a -8cd943c5e2c0bb682fc524007048edbb -8cdb590003d2301885591700003d2301 -8cdca80004b21638de966600004b215b -8cddb7e922f5e0383d75d900b048353b -8cde24000486c138b1068700003b603a -8ce1d00004abb768a7a5a400004ab782 -8cecc20004974a188ec76c0000456c64 -8cf24c02e9b5e1c8e392820070486558 -8cf516e14b0076587d88ac00b0485790 -8cf7b55c83e8fc08e8167b00304b0270 -8d09720003e18168855b6200003e1816 -8d0eaa0003d82728f8fa9200003d826e -8d0edb000498fd082d09f400004967bd -8d0f03c298c40d582b94dc00704726f1 -8d0f1200040a1738834f21000038758a -8d0f1200040a1738b1068700003b603a -8d1037ead327d9f8bef2bc00f04c4b55 -8d1afff36afdb4a84a677500b04cc3de -8d2155715bcfc3e8d2e8ac00004851f7 -8d2aba0003feb35814d59400003f70ea -8d2ca3b5996bf528a3c3a400904b087e -8d2fe500039e80d8834f21000038758a -8d2fe500039e80d88bd802000039e80d -8d32565f916cea584b1aa500f04c0fc6 -8d400d0003f334c83834fe00003efbba -8d46da6deb415d28e9840500704a2f67 -8d4e44afb72f90d8e404c400e048511d -8d57c9b27d2f1698d57ac7001048c407 -8d59f4000498fd082d09f400004967bd -8d5fb2a8e2e14588ee9ace00804b15cb -8d6c27f04551ce3831817b008047b386 -8d6c2f00046d31888beb20000046d318 -8d782f0004837ec85a42c30000481bf9 -8d7c7c00049ef58896393100003cf5c7 -8d80ce4e6c1baa0854592a00b04ca5ed -8d8124d467b97328975c510020489b26 -8d8bc3b47fdccd98df4dc0005049d247 -8d8dd70004a08138d683dc00004a080e -8d91e20004bacd4866e19800004baccf -8d94b3000436b7688107f60000436b76 -8d9996c8d4548f68a7917b0040475a65 -8da2390004bc93f8e6adc700004bc907 -8da40f0003ff864881258600003ff864 -8da9dd00041705088c45900000417050 -8daf600003dae618847d7500003dae61 -8daf6b000485fcf821aa7e0000459dc1 -8dafac00047c0808e8b496000047c062 -8db11cff36b5ef18a7b5c700904afd0f -8db42d13d04b48f813010a00b04c1270 -8db7ac00047884e8c79b4500003927b9 -8dbc6b00041720988c2eba0000417209 -8dbd678a2f93ddb856ab7600104bf5f9 -8dc17900043bc9d86d3a5e000043bb0c -8dc5d00004a98f88e832190000496f76 -8dcf2e6951017c88e03c2f009047a9c4 -8ddc850004761b4863e1b900004736f3 -8de7660003d02068e71de600003d0036 -8dea7e000433b6f88599060000433b6f -8def1defaaec9568c2c9ab00d04c306f -8df31fda7c539a186df87c001049ef23 -8df6668fd5048e88d834a700c04c3f12 -8df6a842e9671b481e612a00604ca1de -8dfb6d5fa3e10cf8c6e31400c04c9ace -8dfcef0003ddd0888437bb00003ddd08 -8dfd4c872dcb97a84a2fd100304c6cde -8dfe280003f79ca881635100003f79ca -8e009300045f3398850b60000045f339 -8e0095000419b2f8814c470000419b2f -8e04514299640ec84b1aa500f04c0fc6 -8e1388e8a62f6308a7256b00b04c825b -8e139400043544f881625e000043544f -8e20272c39b68538ea435400a0484b62 -8e21dc99fbff151825afa400104b0fe6 -8e24c4000485d8c80c3a4e0000485d8a -8e2bac000484907808737400004848f9 -8e2cdb0003f3ba788ccd7200003f3ba7 -8e2f19000485d8c80c3a4e0000485d8a -8e2f312c034c14e81b16a500b04c76b8 -8e2fbffc2414ea28fb1dc900e04a9306 -8e311400040d4eb883cac2000040d4eb -8e33f500048902484a4bec0000487eba -8e3a6d00043311e88cad2c000043311e -8e3a9800046eaef860e680000046eadf -8e3b1c621413c72880d28200e048e068 -8e3cb500047a99c8f821b90000477ecf -8e3e1b0003d0bae8918e5800003d0ba5 -8e427e00045ea3488cbc95000045ea34 -8e4fdc0004344e788652b100004344e7 -8e50fe0003eec5e88d1bbb00003eec5e -8e5aac0004096c588ca2c200004096c5 -8e5ef0507511194895111700704c56c8 -8e5f0a0004178dd882f15200004178dd -8e5ffe3e8bb10888f8b085000047c475 -8e62480003d8aea88c72aa00003d8aea -8e636700044713c8a7c9970000447137 -8e6973000444ddd80d5e63000043d31b -8e6b3d0003daba88e02f1700003daba5 -8e72200003cbada886369e00003cbada -8e75006d3e3279588130a100f04c1989 -8e75f400049050b8091dcf0000490507 -8e7b4e0003ef422880ef6200003ef422 -8e7c720004a39ed8c667ed00004a39df -8e7ca1270ac2bec8628fac00c04ca0b1 -8e7d347966533b78bba61d00a04b29fb -8e7ef800049050b8091dcf0000490507 -8e83010004112998bebd86000041126f -8e858d72168e4a18acfc2800d04b5d0f -8e908d0003d195a88560fc00003d195a -8e97620003e54978023cd200003e5494 -8ea0a40003ee30f83b6cdb00003ecfdb -8ea20c00041705488c45900000417050 -8eaecc0003a3dc286c30820000393484 -8eb9dad0b4b1b988452e8200e048cf02 -8eba8acd3ab9e3c8a90d1b00604cda6f -8ebbaad2ad3f7bc86e2d3c00404c938c -8ec0b70003d9909882c64800003d9909 -8ec2fc447f3cfa08c6f06900404bc6d7 -8ec3010003ff2918f2919000003ff200 -8ec895248656d2a83d0c2c00f04be838 -8ed06b00040c00988d5eba000040c009 -8ed1b900048041182525f100004803b3 -8ed3bb0003eeb7488d800b00003eeb74 -8ed42273344c89e8152b33006047ebe0 -8ed8760004589c388d5dc900004589c3 -8edb9c00045981d8f9ad230000459819 -8ee1d800042d48888439a5000042d488 -8ee663000472eb78ae63c00000472eb4 -8ee741125f58a5d8f4dc1300d04b363c -8ee7cf00041c5ea882c447000041c5e7 -8eea202a64fea12844aed500604460d9 -8eed380003f463b8e2a00b00003f3408 -8ef25e00043dec4837b7f6000043dec2 -8ef5dd0003fca89887d86b00003fca89 -8ef65f945215a5b89be30a00f047fffc -8ef6ab0004accb08adda6600004accab -8efe7c00046ee7488da298000046ee74 -8effb79992c2714828f82f00f047356c -8f058fce93b5a9281b16a500104c76b8 -8f08a40003e7e62874013800003e7e5d -8f0a4600040fa9d8852552000040fa9d -8f147c00049ef58896393100003cf5c7 -8f14ea000463f0586a536c000044a043 -8f150b0003efdef80e317200003ebe6a -8f20af99277e1428b6d06900204b255e -8f217e0004450b9887235300004450b9 -8f242600044045a85231b20000440452 -8f246500042d90f8815e28000042d90f -8f24760004c6f598dab7d100004c6f57 -8f25e2000461ad281ec1c90000461ace -8f26a10004accb08adda6600004accab -8f28a54888168ed804964500b0475717 -8f2ad5e118a66358481ae800b04c51d7 -8f3384bd4ede2ee89a55a300e048ac08 -8f3c560004a24648a113dc00004a2457 -8f3cf49db967cc58526a7400104bcac5 -8f3e8000046f1328852720000046f132 -8f41832b3d6d5b6844aed500204460d9 -8f46ce0004c6f598dab7d100004c6f57 -8f4893000456d1f865cf12000040992f -8f4fc683f8513c9803bae800704a22eb -8f511400040652b88556c2000040652b -8f54d700040189488431dd0000401894 -8f56bc0004be2a48d17abc00004be28e -8f57d0000476fa986775b90000474921 -8f586279233478e8c667ed00b04a39df -8f664600041c5e78842d52000041c5e7 -8f66a6562ab940f8eda5a300b04c591f -8f6aba00040034280d7cfe00003f5d48 -8f6c790003d9915882ec9800003d9915 -8f74971bef34e18861c2ab00204b9ce9 -8f74f500046336e84b68f50000463368 -8f78fe00044596c88e225f000044596c -8f7cd70003c1b7d8834f21000038758a -8f7cd70003c1b7d88b029000003c1b7d -8f869d0003cff448817c9800003cff44 -8f86c3000473d6687448d40000473d5c -8f88d7dc62b9b648c681ae005049ff63 -8f89ec08cb247e28f1fc5c00004bc32e -8f8c83e334c8432845fca800904b9c79 -8f8d0b0003e5101887dcdb00003e5101 -8f8ff00003d02068e71de600003d0036 -8f935b0004366d7811aab100004364a1 -8f9e4500047e4da8b29333000047e486 -8fa405019bb9071869c9a300804c2401 -8faa9b8f7c88f848e34f3300204936b1 -8faddb00042b2578d94402000042b253 -8fc1bd00044517e8b77df40000444acd -8fc3620003e2231891a35100003e2221 -8fc865df0fb5c7d81e66a500004be07e -8fc9a1000476fa986775b90000474921 -8fcb922557e586e86fd8bc00104c96a8 -8fcccd7ad9ebe7d80f567400404b9bfc -8fcd5b000472e16869d7c00000472e13 -8fd162699c995738d2c78600b048c057 -8fe0d80c2e809f08a8cdd200b0475ab9 -8fe8b623a0ee17f8ebda450000482f2d -8fe91e656b1f9288dede1d00a04bcd24 -8fe97b00047cb938dcac85000047cb8a -8ff1980004ba20d81a021d00004b9eb6 -8ff75b19d60500a87fc53300c04c5af4 -8ff89600047e4da8b29333000047e486 -8ffd4b48e88921181b014800d04cca30 -8ffe560003d230b88e58f800003d230b -8fffc9a05afdc8a80a0d7b006047eb9d -900263000472e16969d7c00000472e13 -900468000422f6198e9c020000422f61 -9006a0e4aa08f8a96699f100a047c503 -90106218901aa2d9a84c2f008047e727 -9010950004199b2984cd5200004199b2 -901b2cae0931b11979b85c00b04ba161 -901bfa0003dc131986cd1700003dc131 -901c76000456c6498ec76c0000456c64 -9025e2000487b06913164e0000487b01 -90260d000498658941cf5e0000472f16 -902895000466ef09d4179c0000466ed9 -902abb0004aa9b19c511f700003957bf -902ca70004be61a9b45e0f00004be5ea -9034530003ccebf9868d3100003ccebf -90354600043802298eebe00000438022 -903aa10004af2a890cc1e200004af298 -903b210003dae619847d7500003dae61 -903df10004838049a559a1000047663a -903e800004050b9986017a00004050b9 -90482b000436b7698107f60000436b76 -904bbf0004513ab9a5876c00004513a1 -904c560004a125a9a2e55100004a1120 -904d9000043438c98564e8000043438c -9050d700042bb2a9853402000042bb2a -90512a0004ca33e91993cc00004ca33d -905bb600040c0c09856d7a000040c0c0 -9061d700049e0eb979c1ae000049e00a -90653d5e240f07192e11d20080484175 -90656400048e5df9ea5da3000048dd18 -90658831c833e3994c208500d0476f21 -90660c000420bbd9dea11700003d86c2 -90700cd8d59a9679fccfc00050472591 -907124a0940fa0190116b200b04c23e0 -90724c3360a338b96d7b2500a049430a -9074b500047cf9495da096000047cf8b -907ce3640c8b08d9815f1200104cb675 -907ed00004a908e9ac80b700004a908c -9080890003d02069e71de600003d0036 -9083010003fcd4a9834f21000038758a -9083010003fcd4a9ea76c300003921cb -9084c23253cdb209e51bac001048148a -9089cf0004975f19b01d5500004974a7 -908f61b638e019395b9fa8006049febf -9096fad7a2e1138983200500604a16de -9098ef0003ee5d1986c99400003ee5d1 -909c7c00049a1c9911df2200004969ab -909f47b59104d139688ac300a0475742 -90a8a70004558359dfb53800003e4255 -90b072000430d9098612e20000430d90 -90b960b94ff51099d57ac700b048c407 -90baf800049590e93962c300004958b5 -90bbb30004ccce49a2f28800004cccdf -90bc3300049b1a399aad2b000049ae58 -90c0a6d7039e5f492849e300c04cc1be -90c308ad9d51fa7922786900b04b6530 -90c3620003efa1798f78fe00003efa17 -90c48d0003d3643986880b00003d3643 -90cff500048911498bdcc4000048910a -90d03d0004cbe3e9171cbc00004cbe3a -90d3020004a908e9ac80b700004a908c -90d49500040d4eb983cac2000040d4eb -90db4e0003ddd0898437bb00003ddd08 -90dbbe0003d005f986ef6300003d005f -90dc770003d2301986f24800003d2301 -90ddd70004975f19b01d5500004974a7 -90e05100048d8c59de3051000048c7d4 -90e3bf00046254998fb5700000462549 -90e9d6000424f9298f759a0000424f92 -90efa800049a1c9911df2200004969ab -90f02f6d6ddc44f9f95c7600d04c01e1 -90f6a31e0947a1a9abf1f100b048121a -90f76d9c48874f89022c250070494e4c -90fa160004178dd982f15200004178dd -90fb910004ccce49a2f28800004cccdf -90fc0f00048fb4994ecf33000048f891 -90fe1646ca6577193933ac009047ed77 -9109e20004b484b93e36ab00004b4845 -910ca7000463b0f98737bf0000463b0f -910d83ebfe29b9b94ccad000d04aa6ef -910d940003ebfb6951ceec00003ebfae -910f600004543a793a636000004543a3 -910f8b6b77cce309c677d100804c520a -911117979fe4e1d9e2af0a0080482c44 -91117a0003ff2919f2919000003ff200 -91130800044200798fc0a70000442007 -9113a60003cb8fd9a3ea9e00003cb8d0 -911f3d0057d05e892551d200604817f9 -9125dffc35128b791c176d00404a62f7 -912f3300047a2bf98401b9000047a28f -9130880003d195a98560fc00003d195a -9132240003d21ad9c79b4500003927b9 -913d6400049722c9d76d240000497227 -913ee000043544f981625e000043544f -91402f00047592c93c069d00003a6629 -91402f00047592c9834f21000038758a -9140f500045d90e9d570a7000045d456 -91482c0004c5ae19ed153300004c5add -914e4e00048911498bdcc4000048910a -91500c0003d0fd098a176600003d0fd0 -9154ea000468746987f9e20000468746 -915bb68152595b69a0132200c0491d12 -916224000486be39b1068700003b603a -9163580003e1816986c00d00003e1816 -9164f80003d37a59edb00b00003d379c -9168aa00043256098587110000432560 -916968000444ddd90d5e63000043d31b -916d55000495f3a9d302f80000495f06 -916f5b00043423d9b02aba0000407f4d -91734300042d48898439a5000042d488 -9175740796fc6289bb01c100404c77d5 -917f2200048ed5f9ab0124000048ed48 -91805000043d4ad9901b5b000043d4ad -91835e000472b4d9834f21000038758a -9184c200048fb4994ecf33000048f891 -91889955ea69e4c9723b7000704a203f -9195020003c1f7b9121a7600003c1f7b -91988216aef9eaf985f4bc00d04c8850 -919c130004b484b93e36ab00004b4845 -91a05c0004bbdca90b719800004bbda0 -91a06b0e984ff0b9ece59800c04bb4ae -91a3ae00040fa9d9852552000040fa9d -91a6c300047a4099bd2b33000047a404 -91a70100041705498c45900000417050 -91ac8500047fa409f679d9000047fa25 -91b9480004cc29a9d33b3f00003d39bc -91b9750003daba89e02f1700003daba5 -91c0a7000444f0a9834f21000038758a -91c0a7000444f0a9d33d18000038bd74 -91c68000040652b98556c2000040652b -91cbe50003a3dc296c30820000393484 -91cbe50003a3dc29834f21000038758a -91df620003e4e7e947490b00003e408b -91e1ba6a4ef4b679d856c300204993a0 -91e4a40003efdef90e317200003ebe6a -91e6ba0003fdb529ba357a00003fdb3c -91e90a0004360e99b52dc500004360c0 -91eb3300047a4099bd2b33000047a404 -91ec1000043250f9276f1100004324f1 -91ed7b00047447095065d10000437f6d -91eeab0004b95ad9f0faa100004b94f9 -91fe8b0004803ca92525f100004803b3 -920ca70004c5ae19ed153300004c5add -920eab0004aced7956a9e200004aceb3 -9210c23f6f04afb938720f00604bf280 -9213b600040189498431dd0000401894 -92173f0003d862e98b6c9800003d862e -9219e200046a89099628a7000046a88a -921b5386c8a748c9454dc100c04c180c -921eb6958639ed4936f8b500e04768f6 -922076000451167990b76c0000451167 -92226300043f6729ff28a3000043f66d -9226ce0004af706937f63900004af700 -922f330004754c8995477400004754c4 -92378f8385b8ab195330d40090480268 -9239dd00046d76c990c1af000046d76c -923aa800041c5e79842d52000041c5e7 -923bccd2d3d6776951a53c00504a9760 -923cea0004558359dfb53800003e4255 -923cec033afd5b090a1ea100c04ae1c8 -92442f00047447095065d10000437f6d -924a3500044759f925db080000445f86 -924a7e0004b785b9defeab00004b7855 -924e626b8f05ee19accdd000104cd092 -925a5e000436bf898955c50000436bf8 -9268a40003ec60c98b693800003ec60c -926e7e00044d5679f294a7000044d3b2 -9277aad4b2db2ec93ea5de00d0474418 -92803356bef41f497181a300804c4cf1 -928387ae211c56b983f17b00204796be -92882c0004bf1969b0f67b00004bc82f -92897edbf26bebe9ed153300104c5add -928a8b0004771869a3bc960000477152 -9291d690dc9f5d69834f21008038758a -9291d690dc9f5d69ea76c300803921cb -9292cd000433b6f987018d0000433b6f -9292d694e8334c29f679d900b047fa25 -9295a100047536193174850000475360 -9298910003f8c9b9a3a61f00003f8833 -929da8000487648975b5b9000047fc39 -92a2b3fd8a0466a9be4e4e00b048e123 -92a34fb73e4fe7494f03d100c04c446c -92a81f0004344e7987710a00004344e7 -92a91040c66122c9d1921d00804bbba6 -92a9e20004af706937f63900004af700 -92aa8f0003ff2919f2919000003ff200 -92ae19000498fd092d09f400004967bd -92b5f4393f3f6e1983376d002049ea44 -92bac200041a6fd98ac990000041a6fd -92bc26e069c626b9ae0eab00c04b419f -92bddd00041e348991760c000041e348 -92bf090004721a79916a8000004721a7 -92c5940003e5839903f0d200003e555b -92cf5e000472b4d9834f21000038758a -92d0950004050b9986017a00004050b9 -92d10266377dd309c9fdb90030473cc8 -92d3b80003cbada987ad5900003cbada -92d60c0003feb35914d59400003f70ea -92d6cbad0e9692b9e295a100f047afc9 -92db2ab1b4934e29dd0de20070486247 -92dcaa0004199b2984cd5200004199b2 -92de0500043438c98564e8000043438c -92e0528fe1f252895590a100304c77c3 -92e2d500044204d98fc0a70000442007 -92e3990003c66529f726e200003c532d -92e92c000433e1f9bd8e7e0000433c42 -92ec0200042bd68908cbb5000042bd5f -92ec0b0003ee2d393b6cdb00003ecfdb -92f54cb3e8f483298cf7740050478609 -92f7402604890399cdd2e800c04c546f -92fa0f456da4ffa91aedb90000483270 -92fb550004b785b9defeab00004b7855 -930dd600042bd68908cbb5000042bd5f -930e7d65244fdda9d3b70a0080475ceb -930ee80003ccebf9868d3100003ccebf -9316c780bca7a819b676d800b04caee0 -9318900004705b0988c30900004705b0 -931c0c0003d0ba59918e5800003d0ba5 -931d6800042bb2a9853402000042bb2a -931e869998a61899513d3300a04c571a -9320a40003f76dd98b713800003f76dd -9325510004a1be69af0a45000047d45c -9325ef0004416d2991657e00004416d2 -932a1300040c0c09856d7a000040c0c0 -932effa99b62220906f2a500204c04e1 -9330aa0003fca89989415200003fca89 -9332090003d0c65991a48900003d0c65 -9355060003ee5d1986c99400003ee5d1 -9357910004cd4399c502cb00004cd404 -935d105396c146e9ff9b7300804cdaef -93844e6654e7c1492ffb8c00804a6914 -9394770003d3643986880b00003d3643 -9397210003d005f986ef6300003d005f -9399230004699ad9e3bb9c00004699a5 -93a05c0004b1d89950947c00004a1491 -93a1a980a2f9fb09d54dd200f0481937 -93a1b362ddec03a96c1dd9002047c461 -93a3b500042a2d090d1401000042a2ae -93a483a0f89b5699b61224004048c3c8 -93aa8b00047fa4992fe5f1000047fa3f -93abc300042e4e69322a58000042e4a3 -93ac7200049ab1095aa70500004907ba -93be44d936aaec49d16acb00004cc043 -93c8a40003ec92b98b693800003ec60c -93c9edd4ddb150595ff83300a049d76f -93cf048dde4cf1195440d400c0473dec -93d33300048f8e69ea5da3000048dd18 -93d3c00004cdea2971d5e300004cdd95 -93d4b500047f73f93a5ac3000047f70c -93da1885a09f8b89687051005048c35f -93dd720003e19e699288a400003e19e6 -93e40b0003eec5e98d1bbb00003eec5e -93eb4d4f3c303ee96388a800d04b2fa7 -93f2d8def4eb96092778a800304b92a6 -93f9f80003e51019896bfa00003e5101 -93ff750660928b79d5a1a300d04beb72 -94086b00041c5ea982c447000041c5e7 -940b210003d2301986f24800003d2301 -940dae00049a5ca9672b8c000049a3f4 -940fa80004975f19b01d5500004974a7 -94109300046cac298ae760000046cac2 -9411200003b886193ea7e50000395bd7 -9412c200041705098c45900000417050 -94135894e3653b39316f8c00004a598f -941a7b0004b67079188c6900004b6703 -9421590003cb97e992999f00003cb97e -94299000041720998c2eba0000417209 -942bbe0004a1ad89e67aaa00003d8d7f -943123000467077973f0930000467069 -94342b00043539b9886e63000043539b -9434d400047cd7396adbd0000047cd68 -9435948b3dad2ab94f5a7b00304afbe1 -94375e1399e9cf592aba2400c0494764 -9438f598f178bd997cca45005047c145 -943ca70004c6f599dab7d100004c6f57 -943d2e67eac343d9d39c2c00904c76be -944263000472efd905ebe50000399de3 -9447bf00045a010992d570000045a010 -9449940003dd2c399312ec00003dd2c3 -944cacc9ace4bc59ed43d100c04c3925 -944cea00046476295cc8950000464759 -944f7100043256098587110000432560 -9450790003d1a3598a48f800003d1a35 -9453b8cb5a4a1479bc2eb200604c19ee -9455ae0004a18369382fbe00004a1804 -9455eb80025075e91c176d00004a62f7 -945effca4b0e426979dfb300f04cc8f7 -94640c0003e1816986c00d00003e1816 -94660900039ed5893d1162000039c575 -946b6b0003dc131989ffbb00003dc131 -946e50a692c69ce98f24ac009048e0c6 -9471ef00044479f9932ed5000044479f -9476a2a0e1bb78c9f819c100904c5679 -94777400047bb3593793d0000047b889 -947db900047f6329cb53ac000047f62b -947ec300047bb3593793d0000047b889 -947fe52fb9fe7ec93bffa400a04bd776 -9487fa0003f3ba798ccd7200003f3ba7 -948ba4000472f4d992999f00003cb97e -948dd700049cf24979538c000049cf1b -9498dc000472e16969d7c00000472e13 -94a0d20003e7e62974013800003e7e5d -94a3ac00047a4db9f85cb50000479fd4 -94a623f0b03f3299a6238c00304a1821 -94a67e00042d49b982cc9a000042d488 -94ac9300046a313902c4f5000046a129 -94b05c0004b1d89950947c00004a1491 -94b0a700046462d91f29c900004635a8 -94b64259b5cb9c0915c8d400b047feec -94b70a00047c44695d6cb5000047c432 -94bb510003eeb7498d800b00003eeb74 -94c27e0004256b499349d600004256b4 -94cb28b68108f8b95b29c900204a8e5b -94cb74000483ad497149f10000483a90 -94ce12205565cbf94066c3009047d486 -94d2c300047a42895c8fac0000479ef7 -94d46b00040ae8994f2990000040ae83 -94d62aa7fbf45269a7a5a400204ab782 -94d99bafea70d0b9122a7e00804ae10d -94ddfe0003c1f7b9121a7600003c1f7b -94ddfe0003c1f7b9834f21000038758a -94e6560003d994d9815b3d00003d9915 -94e77400047a42895c8fac0000479ef7 -94e7ac00047fa4992fe5f1000047fa3f -94ea7e00044b07691b476c000044b070 -94f061c824529da9c2c9ab00704c306f -94f5db00042a44d9939228000042a44d -94f73f0003d35e39fd10fc00003d3594 -94faa200038c4c09d33d18000038bd74 -94ff23000484cec975b5b9000047fc39 -95008500048359295a42c30000481bf9 -950441b2f56729893106db00e049685e -95073300048f8e69ea5da3000048dd18 -950ca70004c0a7b92c16ab00004b224b -951a02000471a9a9781090000047063b -951aba00041366c993e7cf000041366c -9521c70004bc1b396c91de0000483353 -95222f25b3d519b9f443ed00004a0f4e -952426de54265ab99311d2008048117b -952e8000046d31898beb20000046d318 -952fbb35bccbab09ee09d200f04767e8 -9532bc0004becd9946e96b00004becd1 -9533f56b00d78429f0b85c00904baab8 -953c160004344e7987710a00004344e7 -953d1800043311e98cad2c000043311e -953e77549dc50c69ee7e4e00004854cf -953f7ce4a2995dd9b713ed00e04a0ff5 -95429f000433b6f987018d0000433b6f -9548116cd466fc997d44b500b04777ca -955aba000406f22993c1860000406f22 -955e3fc1a666fd690b8fec00a048522f -9563170003d8aea98c72aa00003d8aea -9566e80004a1c04907038c00004a1c02 -9569640eaf6325492e11d200c0484175 -956d8fcb92f015b9f7507d00204a8eae -957af20003fca89989415200003fca89 -957b4777552430692849e300104cc1be -95800500049aecf9801f6d000049aea2 -95810b0003ec9f295670a400003ec9ed -9581580003d0fd098b8a0900003d0fd0 -958f630003cbada987ad5900003cbada -95917a0004218a09bb7d860000421757 -959c7c0004360ba9ddc2b1000043608c -959d5200040ae8994f2990000040ae83 -95abbf00044fba29ee535b000043f8c9 -95ae699621cb6439094e5300c04c470e -95b1380003e2231991a35100003e2221 -95b17000045ea3498cbc95000045ea34 -95b65f23de9463a9bf0b74006047cc6d -95ba720003d26839945f6b00003d2683 -95bd720003ee30f93b6cdb00003ecfdb -95c3fa0003e585b99438a400003e585b -95c70900046f0af9b40490000046ebe3 -95c8fc0003d230b98e58f800003d230b -95cd240004958d393962c300004958b5 -95d41cb36f694b1947cfc2003049ee73 -95d7170003d4514907173d00003d1d79 -95d7bb0003f7b2e9235bbb00003f79ae -95e8790003d862e98ca27200003d862e -95ef7000049fec895b9fa8000049febf -95f27c00046dd8b95e742f000046dd73 -95f312000420cb99a19bcf0000406c88 -95f4cb87e40a7639bfb1d700a04a41ca -95f9127f3a340249bcd1cf009048e91c -95fddaca6960bcc9c757740040476983 -95fde20004589c398d5dc900004589c3 -96005600049fec895b9fa8000049febf -96042f00048316990282c30000483168 -960bae5dfbad32c9e1128b003047c4dc -960d380003f0c3895160db00003e947b -960d700004611ea961f4ea00004611c2 -961a480003d82749f8fa9200003d826e -961a9e000430d909892e6c0000430d90 -961c0d0003f3b189f79f6200003f3add -9620f4e1e021fda9d53c5c00904b0454 -9626d52422461b4975d11700004c627f -9637510003f768d930caec00003f7689 -96380f00048f81e932ba9e00003cfb91 -96385328ca3e7149a5481300604b170c -9642ca00042328e992999f00003cb97e -9650b50004838439a559a1000047663a -96547900bbbb1bb962a22400104861eb -9656d50004451689e2df9200004450fe -96573d0003dc470995197500003dc470 -9658a70004c0a7b92c16ab00004b224b -965b6219b1b92779654a1d00b04b8d76 -965f857d2e07f6e9e8321900b0496f76 -9660a70004430ee9fb9b5300004430e8 -96630a00047c4519b9a496000047c43d -96655d48f71183e937f63900d04af700 -9665680004442779f844fe0000444272 -9668d40004837ec95a42c30000481bf9 -966b5d00046ee7498da298000046ee74 -967170f3e55062999070a800f04bb952 -967c0c0003ec60c98c8f5100003ec60c -9687510003efa1798f78fe00003efa17 -9689040004abb769a7a5a400004ab782 -968b0460c53fbfd9f2fc960020475364 -9691b900047c4519b9a496000047c43d -9697445d52564189022bc2000049a87e -969a2800042346e994f929000042346e -96a10b0003f45d19c2ebbb00003f45cd -96a41f000436bf898a9eb10000436bf8 -96ae430003e51019896bfa00003e5101 -96af7400047447095065d10000437f6d -96b25b65302337793154c2006048f26f -96b50bba62451d09513a5600a04acf1f -96b9ef00044596c98e225f000044596c -96c438d8705ba859a013220020491d12 -96cd29000422f6198e9c020000422f61 -96d3c35b94ab5d9915447900204cc6ce -96e0cd8709fe95999414d400f047ab9b -96e92400048fb4994ecf33000048f891 -96eaa30003998bc9834f21000038758a -96eaa30003998bc9c511f700003957bf -96ebd0000476fb799534b50000476fa5 -96ecd700041a6fd98c1b01000041a6fd -96f11f1058cc8419be337900f04a006c -96f7c20004a3e629834f21000038758a -96f7c20004a3e629ea76c300003921cb -96f8a80004b21639de966600004b215b -96fa1a5f90ae792974662400f048dffd -96fa760003c29ce944567c00003c29c4 -96fa760003c29ce9834f21000038758a -96fc770003dc131989ffbb00003dc131 -96fe8200043539b9886e63000043539b -97059700044759f925db080000445f86 -970885000475a6496d00960000475a62 -970b4536cd6f40795721c700604bda7d -970daf00046dd8195e742f000046dd73 -970e280003f76dd98cc35100003f76dd -97100100042328e992999f00003cb97e -97140b0003e7e62974013800003e7e5d -9716260003d1a3598a48f800003d1a35 -971ba21b078a80197d44b500504777ca -9721d70795bee6a9cf523900804b134b -972fea00042c23697bf801000042c22d -9732fb1de5a599c96388a800204b2fa7 -9733590003db13c903ee4800003d89d7 -9735d100043802298eebe00000438022 -9739ddd9b927f4592d09f400904967bd -973d520004044b5995d84700004044b5 -973e182339dfbe69e6182800c04b7174 -973fcc13ac0afb79f19de2003048d683 -9741a1000476fb799534b50000476fa5 -97421af381960289c44c72000049e444 -9744b70004a8fee9ea76c300003921cb -9744d20003ebef1933ca9200003d86d9 -9746660004bb6ab9c76c5c00004bb694 -97467e0004b8dfb97cea7e00004b7758 -9747529e873ab3590dcbca0090472aa8 -9748c4975f5e2b292792240080490ba7 -974c2000046336994b68f50000463368 -97515100049a1c9911df2200004969ab -9756941fc541b9b978436d00004a2962 -975f9c000468fe894119e30000448a71 -9766bc750d215e09e8d7d100304c5526 -97687c0004a2f829360429000039346c -976d0737e7cfca898ece0d0040496c55 -976d860003fdb529ba357a00003fdb3c -9771c70004ac9b993dd27400004ac9b6 -9773fe624edb8a7972a67300f04c9ed5 -9775e60003cf5c7996393100003cf5c7 -9777a353946b5819fbc3ed00204a22fb -9778760004c5ae19ed153300004c5add -977f17259163d389a2ca7b00b04bd7fc -978332bd54ca0549f593a200e0484c4b -978484ee0c87feb9ef53f500704895b1 -9784900004710889963e020000471088 -978cfa4dd41ad009dcd25300304c471c -9791e20004b9f909f88e5600004b9f51 -979ae90003eec5e98e50fe00003eec5e -97a12300046a88a99628a7000046a88a -97a20c000420cb99a19bcf0000406c88 -97a3510003f507391963bb00003f506a -97a847000409ef899616ac0000409ef8 -97a89a000424f9298f759a0000424f92 -97a8ea00045feca945fdc9000045fae3 -97a9de00047e4da9b29333000047e486 -97ac2d30e3cacbd9a6c9be00304cd34c -97aca5affb21d449f2320d001049979b -97b217e3d2744db9c578ac000048cfda -97b7170003d994d9815b3d00003d9915 -97b8760004672bb99649c900004672bb -97b89500046c6539966893000046c653 -97beaa0003d292499634f800003d2924 -97bec10003bb20f9834f21000038758a -97bec10003bb20f9c79b4500003927b9 -97c617981e4db109834f2100f038758a -97c617981e4db109b1068700f03b603a -97c85600046ea719396a7c000046ea1a -97cf5e41b53612a987837600404c7a16 -97d0f80003d1965984076b00003d195a -97d5e20004b2abc9c779c900004a847c -97dace0004b9f909f88e5600004b9f51 -97e5f22dfb69ad6978310a00a04c4cdb -97edfe33d23ebdd99116a100604bc490 -9800330004a12319a2e55100004a1120 -9808a700044f3029b52076000044f2f9 -9821510004a567d9c2a05600004a227d -98246b00040e656996b6ba000040e656 -9827f600043f9099ee535b000043f8c9 -982dbe00039ed5893d1162000039c575 -982dbe00039ed589834f21000038758a -9834d700041705098da9dd0000417050 -98356800044200798fc0a70000442007 -983f0a0004744a59c803ac000047449e -9847600003d862e98ca27200003d862e -984e4500047bc05950e9d9000047bb32 -984ea800041720998dbc6b0000417209 -984f550004b286f98e4c1300004b2869 -9851b40003d0fd098b8a0900003d0fd0 -985554169441acc9a9b47600004c4947 -98570a9eaa93f359255a7400004b063a -985cfe0003e31ad9c886ec00003e31a7 -9862b100043d4ad9901b5b000043d4ad -9864c1a06cc9cdd92818b500f047b7d9 -9866f80004980f494f0b3300004980d7 -98689500044edb99c7faa2000044eda3 -986cf5000463b689970de20000463b68 -986f06321441e6999df23900d04b22a1 -98797a00040145892db5900000401438 -9879de00047378c9d2e4b50000473785 -9884a70004698b3997439c00004698b3 -9885e20004ae20c99e9433000049f049 -988856000471aa7996eddd0000471aa7 -9888bbd2099508095fd25300904bf377 -988e8fc57b142d597149f100f0483a90 -988fc87417aab389fc09c100a04c80f7 -98955d000472c5a96e0f5e0000472c3d -98980c0003d0c65991a48900003d0c65 -989a560004ad14d99e9433000049f049 -989b9c00046254998fb5700000462549 -98a2739d17361fe9cac3220040493eb2 -98a28a6ddfe7da49b097ed00b049b1a0 -98a60d000495e9096f75240000495e81 -98a6560004bb6ab9c76c5c00004bb694 -98a8f9ed4d3c2219732c2c00904c366d -98afbb0003e31e89c886ec00003e31a7 -98b6030bd770c989a3f96b00004be947 -98bae1ab06bba7a94f03d100504c446c -98bd230004672c199649c900004672bb -98bf37000427a85996ae280000427a85 -98c1dddbee5d624925db080050445f86 -98caaa0003d5d95997724800003d5d95 -98cc0f000495e9096f75240000495e81 -98cfc16cf54980e9e6adc700304bc907 -98da430003eeb7498ed3bb00003eeb74 -98e0410003c25e99308b2600003c1900 -98e0410003c25e99834f21000038758a -98e78c0004a8ed1926c6e800004a7549 -98ec5600049fd549c1ac0d00003f36ec -98efe76b63065889cf6a450080473357 -98f374000483a7e902c4f5000046a129 -98f5cc0003f3ba798e2cdb00003f3ba7 -98f6ec0003f77ec96edbfa00003f77e8 -98f853c602b32339415b740020473feb -98f90b0003e2221991a35100003e2221 -98fd230004502b799738ea00004502b7 -98fdf7fce8957eb90135e300a04ccc98 -98ffcf00041e348991760c000041e348 -99079c00046479b95cc8950000464759 -99087c0004a2f829360429000039346c -99150b0003f507391963bb00003f506a -9917bb0003f334793834fe00003efbba -991c930004543a793a636000004543a3 -991e1b0003d0ba59918e5800003d0ba5 -9923be0004a125a9a2e55100004a1120 -9925f40004416d2991657e00004416d2 -99272973e84ece799098a700804c718f -992aab40f35a88797d1a7b00804b05b3 -9930db0003ee87797c140b00003ee7f2 -99319600047ec7f941cf5e0000472f16 -9948d400047edcc9cb5e9e00003cbe13 -994cb46818eda8e9df274200e04ce6f0 -994e280003ec60c98c8f5100003ec60c -994e3850a7526bb996b7be00f04a3d31 -994f6c00044d5699958420000044d569 -99550a0004c2efe935842800004bb5c5 -9955900003fdb529ba357a00003fdb3c -9958ea000456e1695258950000456de1 -995a9d000436bf898a9eb10000436bf8 -995db76264fd7509f263a8002048e815 -995e053dc4f356f94b6c96007047e6e2 -99645100048911498bdcc4000048910a -9966eb862bdca6394cc55b0060472660 -996a80000471faf9a4aeb100004353de -996b3900043299a98cd2df000043299a -996c2b6628781d29672bbe00204a3910 -996dbdb8ffe308995df9c100304c18b7 -996ec300047e3029832996000047e182 -996f3300047e3029832996000047e182 -9975e2000451167990b76c0000451167 -997c0b0003e19e699288a400003e19e6 -997e0d29478aa4a9103c7c00804a000f -99860200046cd82902c4f5000046a129 -9987b00004416ce9984a5f00004416ce -998a150003d8aea98e624800003d8aea -998b0c8bc4ae69290bce1d00104affda -998d12000430d909892e6c0000430d90 -998d9155337b6ea95dddf1002047b798 -99928b00047382a963e1b900004736f3 -9996a10004bc6f1947e856000049a4ea -999d7000045fcdc9980020000045fcdc -99a4a700045d4a79982b9c000045d4a7 -99a70a000476fa996775b90000474921 -99ab5100046d76c990c1af000046d76c -99aba10003f76dd98cc35100003f76dd -99b03d0004c874e92522d800004c8746 -99b1e200044b5d5960f420000044adb1 -99b52a0004c874e92522d800004c8746 -99bcb30003cb97e992999f00003cb97e -99bfae00040c00998ed06b000040c009 -99c0ff4ba43fcd79960f2500d0491ff6 -99c1f100047c2679bbae8b000047c24e -99c6f27092e2a4e9f3763900b04b0783 -99c7ed00049e81a9465379000049c0ea -99ca8000041a6fd98c1b01000041a6fd -99cf0a0004096c598e5aac00004096c5 -99cf0a0004791709ff98850000478439 -99d6931a305d9c098dbbcc00304cae64 -99d8200004550db92675e200004550cb -99d8f80003d70f69f1589800003d4bb0 -99d92400049879398b4df4000049876a -99e0a40003dd2c399312ec00003dd2c3 -99e86fac708d7909910dd2002047dfb9 -99e902e9d2f9fc2936c77500c04ce2ec -99eb210003d230b98ffe5600003d230b -99eca40003eac6a998980d00003eac6a -99f6a3118bfc88f90a1ea100a04ae1c8 -99f95b000472f56992999f00003cb97e -99ff8c00049fd549c1ac0d00003f36ec -9a03a10003eec5e98e50fe00003eec5e -9a060c000413ff1998057a0000413ff1 -9a06a200038cb6e9d33d18000038bd74 -9a078c0004a1e3597397c200004a1d94 -9a1650cadb60e4996a5a0f00e04c58e1 -9a18d5d11566e4d96a431200d04c9a20 -9a227e0004ae746942b67b00004ae743 -9a23ca0004726df9d8527c000046e202 -9a25e20004ae20c99e9433000049f049 -9a27a40004b785b9defeab00004b7855 -9a2e8b000475a6496d00960000475a62 -9a31b40003d01c794c8e0900003d01b7 -9a3c6f00048f81e932ba9e00003cfb91 -9a4374000483a7e902c4f5000046a129 -9a485c0004b224e92c16ab00004b224b -9a4971d4ef067da9e4da3900804b32d0 -9a5416dfdbf357497f8ed200b04ccf2e -9a61d700049590e93962c300004958b5 -9a6dd300043fa159c64b8e000043fa11 -9a7b170004ab72599312ec00003dd2c3 -9a7d2300044cc359c075e2000044cc2d -9a81720003dd04399943fa00003dd043 -9a880b0003eca98999399400003eca98 -9a8a5300043311e98e3a6d000043311e -9a92ba0c41dab19975696f00c04a93b4 -9a93620003f768d930caec00003f7689 -9a9a8b00047382a963e1b900004736f3 -9a9b170003dd3b59e6b59400003dd3ac -9a9bb2ef91dd6049cfee3900c04b047e -9aa15d0004726df9d8527c000046e202 -9aa8c09779334a69618a0d00e0493050 -9aaa480003d85b999962aa00003d85b9 -9aab6000044fba29ee535b000043f8c9 -9aac5c0004b0d149e5782800004b0d11 -9aadf8d656ea2fe9000bd0005047b877 -9aaece0004b21f5950947c00004a1491 -9ab5bfcf3d579b792749d9001047eff4 -9abb7af287f93e497b745600d04a0f8a -9abd1f0003efa17990c36200003efa17 -9ac5d700048e5ca9b3936b000048d69f -9ac8a70004c01f19fe696b00004c01e5 -9ac90a21931ca8d958460d00d04986db -9ac9f1000473d9090253330000473d81 -9aca223173385a195881a100c047becf -9ad6ec0003f7a7a9995b6200003f7a7a -9adc9a000425a9799349d600004256b4 -9ae63b00044f1af9993bbf000044f1af -9ae8d4000473b709ea5e450000473b41 -9aed20d5d1cbd7798bdcb500b0477127 -9af2450004826d4972ca4500004826ca -9af35cad9701c63964e41300a04ba7d0 -9af68000041705098da9dd0000417050 -9af8be0098d94bd9cfe9e200e04b3706 -9afaa9d97c19f859abb7f5004048ae72 -9afac200041717d999a5dd000041717d -9afb01962de6afb9ebc1e200d04b6099 -9afe1300041720998dbc6b0000417209 -9b09e20004b21f5950947c00004a1491 -9b127d000432c9893b6d5200004208c6 -9b1b6d00049cf24979538c000049cf1b -9b1c82fd3ef40329046f74009047b8b0 -9b2378f4a3a056e9d2d27200104a7e0f -9b24cf84fb28fe59ae86a500904c3a8b -9b296800044479f9932ed5000044479f -9b2a7b697a655959e5782800904b0d11 -9b2aab0004b0d149e5782800004b0d11 -9b2f6492daa5a6e97ce5d200d047d58b -9b35310003b0db193d1162000039c575 -9b35310003b0db19834f21000038758a -9b395500048e5ca9b3936b000048d69f -9b42020004721a79916a8000004721a7 -9b4c980003da60a999e00b00003da60a -9b56931e856225d9a273dc00f04a2854 -9b56ac0004190f093f3ddd0000419082 -9b5b43000422f6199004680000422f61 -9b5f01000420d559c10c6b0000420c52 -9b64ef0003f3ba798e2cdb00003f3ba7 -9b65701e9e5934e9dc537400c0480442 -9b67590003d26839945f6b00003d2683 -9b68c792971727897415d000d04a8a08 -9b70760004c61d79951bd100004bf552 -9b710600043312098cad2c000043311e -9b7346000485ace9b1068700003b603a -9b73ed00049a98d944c3a8000049a983 -9b750b0003e585b99438a400003e585b -9b75f80003eeb7498ed3bb00003eeb74 -9b78d40004838439a559a1000047663a -9b78e3247032b739df6ace00204ac099 -9b7b620003f05d89307d7200003ee9b0 -9b7c878d69e5511979c1ae006049e00a -9b879c00045a010992d570000045a010 -9b8fb26a6cc6cf694a2fd100404c6cde -9b912f00225767c9807a7300a04c94f5 -9b93aaf86f413c893e37ec0090488ec1 -9b980b0003d8c399d5af3d00003d8bcb -9bb5995b664cfbb9f24a7e00004b4d09 -9bb60b6f22c4883979e92b00304a5626 -9bb6ca00042d49b982cc9a000042d488 -9bb6ce1fae0ec3891dcfc200c04a3949 -9bb96200039ac599834f21000038758a -9bb96200039ac599d33d18000038bd74 -9bc58400043802299035460000438022 -9bc93fb3f262bf49f8305e00704cbe36 -9bc9d688c7ed1fc9746f3300a047d607 -9bcb8d27d22d1b1979d5c700a04b8404 -9bcc20000461bb199a78760000461bb1 -9bd33700042a44d9939228000042a44d -9bd7a900042e8d296e894e000042e8a6 -9bddb200044078f9a30748000044077b -9bea0c00041366c993e7cf000041366c -9bec77d3633003d97e2d6b00104c3523 -9bf7ea000425a9799349d600004256b4 -9bfa740004bdb14987863900004bc9f9 -9bffbb0003f768d930caec00003f7689 -9c028a000406f22993c1860000406f22 -9c03db65ff49ee8950026e00d04ccf62 -9c08d7000424f92990e9d60000424f92 -9c0a720003dc470995197500003dc470 -9c0c690004bd23f95e32ab00004bd23c -9c16660004b8f5995c25e2000048b784 -9c17590003d84c099aa27200003d84c0 -9c1813c3b0a1d089a775c700704b411e -9c1fd21fb196752992f03d00904cb9aa -9c22743b85170b392cf20f00a04c4d37 -9c235c0004a9923951a53c00004a9760 -9c28f6cefab7bd49af52950000472fac -9c37a0cba1f66619ad65d200d04795e6 -9c38ea000464acb99ae39c0000464acb -9c3b3f0003d59f1949e31700003d341d -9c3cb70003d8aea98e624800003d8aea -9c4b5ed84a826829c4003300c049de50 -9c4d0b0003ee87797c140b00003ee7f2 -9c564e00048c4e39b61224000048c3c8 -9c58bc0004cb5969a1037600004cb593 -9c5dc51e4a1c2d297a8c5c00604bca72 -9c6362fc56e61ec97b50c4006048730a -9c66ba000412ade99b35860000412ade -9c6b8c0004aa2d2968800f000049811d -9c750900043299a98cd2df000043299a -9c783871d419ca9944aed500b04460d9 -9c78520004cb5969a1037600004cb593 -9c7abb0004aa2d2968800f000049811d -9c821a77ce7077d94f03d100804c446c -9c88760004c6cee9af0a45000047d45c -9c89310003cb8d39a3ea9e00003cb8d0 -9c89580003d0c65993320900003d0c65 -9c923b00044ef5c9b1068700003b603a -9c98d40004837ec9e148470000420966 -9c9986000418e8c99b41dd0000418e8c -9c9be90003f834e93f7a1f00003f8329 -9c9ce51e02656329d254d4000047496e -9ca47500042efb599b567d000042efb5 -9ca495000456e1695258950000456de1 -9ca58d00043312098cad2c000043311e -9ca7020004a9a0c930eb0200004a9a0a -9cab8c0004aba7b9f20fc200004a1567 -9cae130004096c598e5aac00004096c5 -9cb28200048c4e39b61224000048c3c8 -9cb6e0fe4cd4f2895b1f6d00b04a7015 -9cbc8884910a2d4993de0f00d04c4ca9 -9cbcea8abdf2da795075a300b04c67f3 -9cbe5e0004360e99b52dc500004360c0 -9cc40f00040c00998ed06b000040c009 -9cc9f70003d230b98ffe5600003d230b -9ccac3000490ca095f12f80000490b5b -9ccbc70e31f8fc79fcf3a8002049c220 -9cce91ac5d8e01f92fb65600b04b9bf2 -9cd1dd0004716ee99b49af00004716ee -9cd72f8d9e8efde9e3dca100504c7c94 -9cd8f80003d7d2899ba97500003d7d28 -9cdd4300041e348992bddd000041e348 -9ce0b092176703996e2d3c00604c938c -9cf67b0004b2abc9c779c900004a847c -9cf97a000419d829ca5ec20000419d6d -9cf9940004a8fee9834f21000038758a -9cf9940004a8fee9ea76c300003921cb -9d024500047fa4992fe5f1000047fa3f -9d08000ac127e8e98ece0d00d0496c55 -9d08b424653727b9b52a7400804bb5b2 -9d0d720003dc22293c069d00003a6629 -9d0d720003dc2229834f21000038758a -9d18d700046f0af9b40490000046ebe3 -9d1df1000477467913f1f10000476ace -9d2479eb38b16669f7ec960000474425 -9d25f3bd2ee7f5e9e8d7d100d04c5526 -9d286800042346e994f929000042346e -9d2a2800042c23697bf801000042c22d -9d2cdb0003ed4549d22aec00003ed447 -9d34a80004bc38f9f1fc5c00004bc32e -9d3c8e0003e19e6993dd7200003e19e6 -9d3c9a8b12169f69e7e77600d04c55b2 -9d3d56a731cdf1e91cff1400c04c9e0a -9d3df80003efa17990c36200003efa17 -9d43510003f05d89307d7200003ee9b0 -9d4a7e0004519a999be49500004519a9 -9d4d7750bec910e91cff1400904c9e0a -9d4d9a89dc3bad79ee7e4e00d04854cf -9d507600046c90f92057bf00004699d5 -9d52560004ad03790d5e63000043d31b -9d5ba40ce54b08e9a945c100404c5115 -9d5e397792821629f6ce45001047fa9f -9d72ae5e718377c972937600e04be8fa -9d81b40003d0ba59931c0c00003d0ba5 -9d830caf7e7af51973312400f048f430 -9d89480004cd50d9af0a45000047d45c -9d8aa200038e1f5919bdac000038e0dc -9d8fb20003d0b8196e6ff000003d0b69 -9db0850004759ef97e15a100004758af -9db2920003d8c399d5af3d00003d8bcb -9db38c0004aba7b9f20fc200004a1567 -9db46eec01ad1189aa26820010486072 -9dbaa10004b4b459b727a400004b4a02 -9dbe740004ad03790d5e63000043d31b -9dbfcf0004044b5995d84700004044b5 -9dc00d00043311e98e3a6d000043311e -9dc1358cd5d8699995195a00204aaeec -9dcdcf00049050b9091dcf0000490507 -9dd9b90004733b6915d3d00000473333 -9ddf9dc9e813c4a969b30a00f04834eb -9debb80003cb97e994215900003cb97e -9df1de000483fe29604b0a000047af27 -9df46b000409ef899616ac0000409ef8 -9dfe51d8c0023d69f20fc200604a1567 -9e01c050a656b39999017b0020476805 -9e0721d78bfaf03944b28b004047485f -9e076f4261bb41b9e939f1003047bc39 -9e11d00004cc12c92d19240000496371 -9e145100048805195a8bf50000487207 -9e16df3a3c347ac9b5a43300604a1334 -9e1aad9753e8683993ad3c00804c8474 -9e1c9500046aea999ccc93000046aea9 -9e1e4e0003cb9b599c782000003cb9b5 -9e275d000471faf9a4aeb100004353de -9e28f9ed02c9ab09e1f45600c04a1415 -9e299000040e656996b6ba000040e656 -9e3886000422f6199004680000422f61 -9e3df100047bb3593793d0000047b889 -9e4649d885b30bf9d9eb0a00204752ad -9e4662405aeaf1d998590a00204c399c -9e4719de93ccf609916c7600604bf4b9 -9e4c980003d5d95997724800003d5d95 -9e50b30003cb94696a7b3900003cb819 -9e61e200045826a99d0bbf000045826a -9e64f2f3d23f2bd94eaedb00f0492077 -9e689130173fbfd9dc125f0080445904 -9e729be28aafcd499ac9f400b0492620 -9e729e0003cb98799d08b300003cb987 -9e7708699702f899a7256b00004c825b -9e7adb00048e9209bcd1cf000048e91c -9e7d5ebc61f5d619c1533300b047bd67 -9e81f100047c2679bbae8b000047c24e -9e831900048d28b9cca5a8000048d276 -9e84b70003dd2c3994499400003dd2c3 -9e8c5000043bdc499d1b5b000043bdc4 -9e912b00049e7b7979c1ae000049e00a -9e915a0004ab9639f20fc200004a1567 -9e939f8cc45fa619b7fdd700404a3a91 -9e93f600043409090d6ab10000433fc7 -9e949c87591412d91234a100704c57cd -9e984815b8063129955ad000a04a9a70 -9e9ddd00043174c9b45658000042e389 -9ea02f0004848d597149f10000483a90 -9ea3fba24b5fc47931141300204b75e2 -9eadc801c9b0bd99749c2c00f04c4d71 -9eb3cf00040ef6e99d2447000040ef6e -9eb6280004231099b0a7ea0000423105 -9ebbe00004344e998652b100004344e7 -9ec2ec0003ecab898b693800003ec60c -9ed33d0003d8c399d5af3d00003d8bcb -9edb200004710889963e020000471088 -9ee0c030ca7321c98bc1d90090480d59 -9ee168000424f92990e9d60000424f92 -9ee717b16b14f7b9f8b085001047c475 -9eef11000432c9893b6d5200004208c6 -9ef56f0004a908e9ac80b700004a908c -9ef82000046c6539966893000046c653 -9efa47e55b3fc56905add700704a1d92 -9eff318fb09400f96595e200804af2a7 -9f022400048634d9430db9000048484f -9f0c850004759ef97e15a100004758af -9f0cc82c11b6c1a9262b7500f04ce5f1 -9f1440b62af503d908a7a70060484bb8 -9f147600046a88a99628a7000046a88a -9f18200003cbdb0929164e00003cbdae -9f1f0b8b8e8f95b90116b200104c23e0 -9f25170003dc9eb9a80d9400003dc9d9 -9f25350004732d4955f73100004732ce -9f268b00047c2679bbae8b000047c24e -9f2db40003d0c65993320900003d0c65 -9f38d20003f0e2a97c140b00003ee7f2 -9f3d85150b9150c9a360850050481c8c -9f3f310004732d4955f73100004732ce -9f4858a7caa38d79ada15100d049f5c5 -9f4b4355815fc85908927b00204ac0ca -9f4eec0003e2221991a35100003e2221 -9f55e20004672bb99649c900004672bb -9f57d400048e5ca9b3936b000048d69f -9f587395d910c229cefb39007048e200 -9f5c2f00047b6329917dde000047b62f -9f5cdf46b63997c928aee800f04c2d9a -9f5f6b81c7f0066911edde00b0481705 -9f61f80003e585b995c3fa00003e585b -9f637ecd363ecfe9476ac300404746ce -9f6d67adb0d961a9a1ca0d0070498ce4 -9f6df527ceb7621961bcc200a048ed70 -9f731a692b73fe799d87ec0000487f26 -9f74b70003d2683995ba7200003d2683 -9f7adb000498751904cfa80000498750 -9f7b047df08d12f9e73c2f00a0476a95 -9f814600043f9099ee535b000043f8c9 -9f891400041e348992bddd000041e348 -9f8d5a0004ab9639f20fc200004a1567 -9f91de000476f28909d801000042c708 -9f9281d4ce9b8089409ac7004048c8d3 -9f95d900047c4519b9a496000047c43d -9f96c7447fd306a9a2bc7900104ce878 -9f99b6daf8518cc9eeec2c00904c2ab9 -9f9c764ce59ec60935b36b00a0487b8f -9f9d48521ae06e993962c300704958b5 -9fa2ba00041985c9a7b6ac000041984f -9fa7cf36d4f059191d6bac00f0484833 -9fa8950004698b3997439c00004698b3 -9fa9f100048467b9e2df9200004450fe -9fae9600048d0ee98f07ec000048d0e4 -9fb60500043802299035460000438022 -9fb64500047f81b970d42f000047f818 -9fbdcc0003e19e6993dd7200003e19e6 -9fc7ac00047cce996b86c3000047cc38 -9fc7e81174a14f09f5093300404bf063 -9fc8ea000463b689970de20000463b68 -9fce240004976789d7b7d4000048e6fb -9fcea50004c46c9901587c00004a2d84 -9fd3e8586402757918bc1300f04b8919 -9fd7ac0004c93b59eb39ec00004c93aa -9fda280003dc470996573d00003dc470 -9fdebc0004c46c9901587c00004a2d84 -9fe3170003d85b999962aa00003d85b9 -9fe4dc000472f56992999f00003cb97e -9fe5940003eac6a998980d00003eac6a -9fe6090003b3eb394d96a300003b3e4c -9fe6555a7c27d1f9ea09f4008048e5fd -9fe6a10004b3564945863900004b3504 -9fe89000046f01c940ba02000046f013 -9fe8c400048c90d9409ac7000048c8d3 -9fea8b00047cce996b86c3000047cc38 -9fec130004accb09adda6600004accab -9fecca00048d0ee98f07ec000048d0e4 -9ff4a7000466f299bd89e20000466f24 -9ff8980003cbd7099c782000003cb9b5 -9ffb086b4a62438998875e004047305f -9fff550004bd8019a2ca7b00004bd7fc -a0000b0003e31e8ac886ec00003e31a7 -a00129000427a85a96ae280000427a85 -a001f4000498751a04cfa80000498750 -a0059770ea9ee7caa3eee800004c6a17 -a0073aef99e9cbea73c02500804992f4 -a00c20a5649f3d8a46513300804c3fb0 -a015796ee8322e0a7b745600c04a0f8a -a018d20003dd043a9943fa00003dd043 -a0193990103d059a2d1c0f00e049695c -a0197b153878640a6181ab00d04c61bc -a01b46a7a624beda07396400e04931a2 -a0217cafffae9a4a139dde0010474740 -a023ac000475a74a6d00960000475a62 -a024a80004bb166afec3a400004b57b6 -a026ec0003ec9f2a5670a400003ec9ed -a02b6c0004502b7a9738ea00004502b7 -a033430004256b4a94c27e00004256b4 -a03aaa0003d2340a85591700003d2301 -a03fae00041366ca951aba000041366c -a041621f157fa3fa37297b0080473876 -a045fe00042a44da94f5db000042a44d -a0477400047378cad2e4b50000473785 -a049900003ff7bba9efa8a00003ff7bb -a04a98000471aa7a96eddd0000471aa7 -a04bf91ff9031d6a48370a007048472b -a04d4600043c8c6a3eabc3000042eef5 -a04defd3c93d62dabd0ef8002048f120 -a050660003d0ba5a931c0c00003d0ba5 -a069905ac98da8aa2ee6e800804c28b0 -a06edb0004958b7a3962c300004958b5 -a0747d0004aba71af20fc200004a1567 -a075380003eca98a99399400003eca98 -a077620003edf8baae40fe00003ed13c -a08a5c0003ca697a43ba6b00003ca678 -a08aa8000406f22a955aba0000406f22 -a08e7e0004b1bc5a727a1d00004b1b89 -a093789c4c0234ea53b97b0020482f6f -a0940d0003f0c38a5160db00003e947b -a09dd90004733a5a4625de0000473375 -a0a02fcb84a9f5da0ff59800504bc033 -a0a2ab0004aced6a56a9e200004aceb3 -a0a4a40003f0c38a5160db00003e947b -a0ac47000413ff1a98057a0000413ff1 -a0b490fafd08a67a2075b900a047cc31 -a0b58557434a0e2a697ae800504c2250 -a0b94b0003cf5c7a9775e600003cf5c7 -a0ba2beaa388117ad17abc00604be28e -a0c5377bb0e2c60a87617b00c04745ad -a0c5c8000473195a834f21000038758a -a0c5c8000473195aea76c300003921cb -a0d8fa38df6e328ae325de004047d295 -a0da7e0004bd801aa2ca7b00004bd7fc -a0e1cf000495e84a6f75240000495e81 -a0e1d700049f907aa0547c000049f8f9 -a0e3cf0004019aca9f886b00004019ac -a0e7cf0003fcc62a9288a400003e19e6 -a0e994c4ac8c8aaa4eaedb0070492077 -a0eb510003cb97ea94215900003cb97e -a0f0720004a1231aa2e55100004a1120 -a0f17000045fe3ba292e7e000045fe29 -a0f5ef0004416cea984a5f00004416ce -a0f65e0004353d1a4f1ab10000435394 -a0f7a40004b9f90af88e5600004b9f51 -a0f96400048e612a8b10ac000048e072 -a0f99f0003cbdb0a29164e00003cbdae -a0fc256bb0c6a9da36968b00d047e7d3 -a1028b00047a428a5c8fac0000479ef7 -a108dc000472ebaaae63c00000472eb4 -a10a150003dd2c3a94499400003dd2c3 -a11197287182c2ea091dcf00a0490507 -a117ac000475a74a6d00960000475a62 -a11d940003f7a7aa995b6200003f7a7a -a1218600041717da99a5dd000041717d -a123b20003d12d9a9facc000003d12d9 -a12d3bee5973942af819c100004c5679 -a133cf000401871a9fc1520000401871 -a138c00003d05e0a39336300003d0594 -a13ab100043536aa9efce8000043536a -a1424ace72889aea9596bb00404abaf2 -a1448500047a409abd2b33000047a404 -a14d750003da60aa99e00b00003da60a -a1509500045d4a7a982b9c000045d4a7 -a1536d0004a1824a382fbe00004a1804 -a155240004964f3a0aa62400004964f1 -a1576000045fcdca980020000045fcdc -a15aa30003c15cea834f21000038758a -a15aa30003c15cea9fed5d00003c15ce -a15fbb0003f0bc8a1672ec00003efbd9 -a16c7233e0b6ecba749c2c00a04c4d71 -a16ed20004ccce4aa2f28800004cccdf -a16fc53ae6cdf8ea79538c00b049cf1b -a170275df8bb143a22d38000b0472d48 -a170d00003c1910ab1655d00003c18e3 -a173d400048f3feaaccfbb00003e5555 -a1744c00042346ea969a28000042346e -a175350004cce7eae2614800004ccca5 -a175b005a15d18ea04cfa80070498750 -a176f75045c52eca38e27e00604b1649 -a17729a7b85648da3a527e00704ac745 -a179590003cb8d3aa3ea9e00003cb8d0 -a187510003e2f32a7f53fa00003e2f16 -a194c00003d127ea462a0900003d121f -a1967c0003c6652a834f21000038758a -a1967c0003c6652af726e200003c532d -a197a60003cbd70a9c782000003cb9b5 -a197b5000422eaea1725d60000422eaa -a1988805f929ee5a4f0337001048855a -a19c9acfe947050af6ce45008047fa9f -a19e7e000423701ae3680100004236f7 -a19eb100043bed6a5217e0000043bed1 -a1a1ef6bcf9285ca6cf1d800e04c8b72 -a1a655846910499a61d59400704ab0de -a1ade60003cbdb0a29164e00003cbdae -a1b68b00047acbca86fab1000043539b -a1b8b7b82b36d99a94aaa100204b4f65 -a1bddd00040a914aa05d90000040a914 -a1bf220004964f3a0aa62400004964f1 -a1c23238f618e02a72a67300704c9ed5 -a1ce260003d2924a97beaa00003d2924 -a1daf8000493057a618a0d0000493050 -a1dcdb0003e3c98aa0817200003e3c98 -a1de720003d2340a85591700003d2301 -a1dee7000432c98a3b6d5200004208c6 -a1e00f00048f3feaaccfbb00003e5555 -a1e2430003e585ba95c3fa00003e585b -a1e311bae333a19ac2866600304bbde4 -a1e6ce0004bc606a3c1c2800004b0ab4 -a1e979000434090a0d6ab10000433fc7 -a1e99ff2da0e0c9aacfc2800904b5d0f -a1e9d7000491dbeabf34c2000048fdd8 -a1eb6f30c9c16b3a1a0ab200704c5ba6 -a1f53c0004c93b5aeb39ec00004c93aa -a1fa7e00044d569a958420000044d569 -a1fe560003d89dba03ee4800003d89d7 -a204fe0003e555eaaccfbb00003e5555 -a205a1000475a64a6d00960000475a62 -a20b0900046ec63aa06a80000046ec63 -a215dfee9ec5c83a31817b002047b386 -a218d0eaeac680ba09a66600b04ac410 -a21a130004044b5a973d5200004044b5 -a21b6b0003d37a5aedb00b00003d379c -a21d720003dc14ea69118f00003dc0f6 -a21ea800040e656a98246b000040e656 -a225d90004733a5a4625de0000473375 -a229c8000473195aea76c300003921cb -a22a260003d2683a95ba7200003d2683 -a22f6000045a52eaa0d9e2000045a52e -a237a40004bb6abac76c5c00004bb694 -a23e150003d5d95a98caaa00003d5d95 -a2426e08ba619f6a6fb37600b04be61d -a242920003d7d28a9ba97500003d7d28 -a248a80004bb166afec3a400004b57b6 -a249023a6d2fed3a9ec5de004048146d -a24931000409ef8a97a8470000409ef8 -a24b83c53e51212a3e62db0060496416 -a2582000045598aaa10076000045598a -a25a6bf30ee2c59aa1735500004b35f8 -a25c1e635e2ec61a666ace00204b0f98 -a25c7600044f1afa993bbf000044f1af -a25e9f2f6a9d445adad1d000704a90f3 -a25ea50004bf4bca916c7600004bf4b9 -a2605c0004bc7b8a9070a800004bb952 -a263d000047ebafa0a0d7b000047eb9d -a264a80004ad037a0d5e63000043d31b -a26955000491dbeabf34c2000048fdd8 -a26e591526a8e7aa5bc68b005047427e -a2727100042e8d2a6e894e000042e8a6 -a277b00004423e2a65b79200004423a6 -a277b90193a8b45ac667ed00204a39df -a2817e00044204da8fc0a70000442007 -a284ac000485d8ca0c3a4e0000485d8a -a287cf00040dddfaa12c47000040dddf -a2894c3a951da9da87863900404bc9f9 -a28d3c0004a8ed1a26c6e800004a7549 -a28d41e4280704bac2c13c00e04c858b -a294b70003dc470a96573d00003dc470 -a29fa09575bcb05ae2c10a00104c30bd -a2a08c80914cb21a8663220010497376 -a2a30900046d448a37f602000046d41c -a2a7cbd458323acaf49b7000004a39f9 -a2a8fe0003eda30aa176ec00003eda30 -a2a9bf87f6c8852a9786b200a04c21ef -a2ae6c3ff89ba78a3c0cb600504a983d -a2af12000412adea9b35860000412ade -a2b30a00047f42ea2699d2000047f1e7 -a2b3a804685284ca470a1d00104b6f61 -a2b6aecf911ef49a08e67b00104bc7e3 -a2b89500045f51fa809095000045f3db -a2bcdab42934154a16c3a400904b313f -a2be071befcfad4a9d924500a047648f -a2cf0100041c9a8aa18d90000041c9a8 -a2d2bc0004bf4bca916c7600004bf4b9 -a2d545b082361a8ad42e4500204751c8 -a2d5c900046462da1f29c900004635a8 -a2d794de0cba598aeecc0f00d0490838 -a2d8ac87a0342f7af57fdc00304a2ece -a2d97c05df729ada9a897b008047b9b4 -a2daf8000496f8fa6caa4e00004850f7 -a2dbb36332a4dd2a4e4c85006047bcfc -a2e6ba000406c88aa19bcf0000406c88 -a2ef6000046336aa4b68f50000463368 -a2f3a60003cb8d3aa3ea9e00003cb8d0 -a2f75b0004340e5a7ddf5b00004340e3 -a2f8fb0004256b4a94c27e00004256b4 -a2fc0f00041366ca951aba000041366c -a303760004c6d15a4119e30000448a71 -a30c330004a1560ae7abbe00004a1555 -a30c4c00042a44da94f5db000042a44d -a30e3b00044e006aa18aa2000044e006 -a3130100040a120afd8190000040a0ff -a31722000495948a03b164000049593e -a31bc20004a22ffafbc3ed00004a22fb -a3250b0003e2f32a7f53fa00003e2f16 -a32531be6d3a528aa3f5d90000475cf3 -a327120004190f0a3f3ddd0000419082 -a32a8b00047e302a832996000047e182 -a32ca700045a4faaa1af9c000045a4fa -a32d1840a4f076ba36c77500d04ce2ec -a32dd10004344e9a8652b100004344e7 -a32fc00004cdddba71d5e300004cdd95 -a330fe00044129eaa2017e000044129e -a334f5000461bb1a9a78760000461bb1 -a33b5b000435463a7fa7e0000043544f -a33c690004b3552a45863900004b3504 -a33de20004550dba2675e200004550cb -a3420c000418e8ca9b41dd0000418e8c -a347b8f08192764ac0edb900c047a8a7 -a34f5b0004355d3aa1d1d100004355d3 -a34fb80003cf5c7a9775e600003cf5c7 -a3590a000434090a0d6ab10000433fc7 -a360b500047f717a3a5ac3000047f70c -a363a10003e2221a98f90b00003e2221 -a36646000406f22a955aba0000406f22 -a36772699b41e17ae41de200904858f3 -a371c9000464acba9ae39c0000464acb -a3727100042efb5a9b567d000042efb5 -a374710003d85b9a9aaa4800003d85b9 -a374fe0003e31e8ac886ec00003e31a7 -a379d900047a4ddaf85cb50000479fd4 -a38035d26038c6fa66d70a006047fcfc -a386ab0004af765a2a842800004af74c -a38825000499b5da05c6c30000498bb7 -a38a8b00047acbca86fab1000043539b -a38bc20004a4b16a1d4ba800004a4ab9 -a38d350004732d4a55f73100004732ce -a39487000433187af8389a000043311e -a395510004a4b16a1d4ba800004a4ab9 -a3ab660003d02d5acb84c000003d02d0 -a3aba8480634a97a44aed500e04460d9 -a3acd400047f717a3a5ac3000047f70c -a3b73776297a244ab5128b00f047ba56 -a3b7700004a4b16a1d4ba800004a4ab9 -a3bbe50003b343fa834f21000038758a -a3bbe50003b343fad33d18000038bd74 -a3bd50f0098a952a54c36d000049b019 -a3be6ac3254b166a951bd100d04bf552 -a3c124000495948a03b164000049593e -a3c5170003d2689a945f6b00003d2683 -a3cb385a7d15c33a05b77600504c81a1 -a3ce800004718f4a34b9af00004718ef -a3d084daa2b84e2affc23900b04b1a36 -a3d312943e79ed5a74128200b048e03d -a3d523000460fbfaa335c90000460e70 -a3d6c15487eaecda6e31e200204bb304 -a3d6db000499b5da05c6c30000498bb7 -a3d8ef0003dd043a9a817200003dd043 -a3e1a40004a86f8aa6cd5a00004a86c9 -a3e95be90743469a80f14a00304a893a -a3e9e20004af765a2a842800004af74c -a3ed310003cb9b5a9c782000003cb9b5 -a3f27b0004b224ea2c16ab00004b224b -a3f790000391f86a834f21000038758a -a3f790000391f86aa24ec30000391f86 -a3f7fa0003ebb24aa2676200003ebb24 -a3f9aa000484ceca75b5b9000047fc39 -a3fb3700048d0eea8f07ec000048d0e4 -a3fcea000456e10a44aed500004460d9 -a3fd590003c15cea834f21000038758a -a3fd590003c15cea9fed5d00003c15ce -a3fdc50004344e9a8652b100004344e7 -a409cf00042346ea969a28000042346e -a4105600046f01ca40ba02000046f013 -a416e2000430d55aa2b1dd0000430d55 -a417738366cd7dfa4072ce00704c4a14 -a4179c00045588fadfb53800003e4255 -a4205daa4997e13adf274200204ce6f0 -a4294600043e5dbaa315c5000043e5db -a42ebc00044078faa30748000044077b -a43a35000446f58a4154660000446f0b -a43d1e645ec7fcba1e612a00e04ca1de -a43dc70004af706a37f63900004af700 -a4412300046386daa9b8a70000456aad -a44280000471b7daa2ea7c0000471b7d -a443b0000445180ab7e97300004450ba -a44e8a000405364a3c41dd000040534f -a44f390003cb9a1aa2f55900003cb9a1 -a45b3f0003db1b5af8c49800003d9ee4 -a45c9600047bfe0a9ee9de000047bfd5 -a4623000044077baa30748000044077b -a4646b00044713caa7c9970000447137 -a46c65000427a85a98bf370000427a85 -a4724500047bfe0a9ee9de000047bfd5 -a472720003cb987a9d08b300003cb987 -a4740c0003eac6aa99eca400003eac6a -a47b510004716eea9b49af00004716ee -a489900003fa3e6aa3306b00003fa3e6 -a48a40b04957a97a5412ab00204b268e -a48b4bea4db0c07aa2175500504bc7df -a48fc60004305a0afb3ee70000430541 -a492760003d2924a97beaa00003d2924 -a49476000460e70aa335c90000460e70 -a496a4da572aeaca4072ce00804c4a14 -a496d32e3fad423a4faf6b0070485b02 -a4987d0004a86f8aa6cd5a00004a86c9 -a49c0b7afbef34da7e623900604b4c07 -a49d060003eca98a9a880b00003eca98 -a49d16d87e2a861a91e41300d04bc548 -a49f25000433135a8cad2c000043311e -a4a2a20004519a9a9be49500004519a9 -a4a5bae79573fb3a6d7b25001049430a -a4ad2900042c4e3ad901d6000042c4e1 -a4ad3100040e656a98246b000040e656 -a4af4573834eb33aa11337002048555a -a4afd0000473377a4625de0000473375 -a4b3b0000444de9ac45a5f0000444d4f -a4b54300042e8d2a6e894e000042e8a6 -a4ba760003c29dfa44567c00003c29c4 -a4ba760003c29dfa834f21000038758a -a4bbcf00041a1e7ae8e86b000041a1e0 -a4bf590003d9e01aa36eaa00003d9e01 -a4c2ddd9c4d08ada8b4df4002049876a -a4c478b7d64d420a832996001047e182 -a4cf6b0003d5d95a98caaa00003d5d95 -a4d460be877d0c6a0e0bbe00604a146e -a4d54a0004a9923a51a53c00004a9760 -a4d8b73f1e66811ad7efb300204ccadb -a4e1170003dcae7aa3825600003dcae7 -a4e1a1000473377a4625de0000473375 -a4e30a0004044b5a973d5200004044b5 -a4e6890003f8833aa3a61f00003f8833 -a4ea7c54e4e4f3ea77a4c40020489a14 -a4ec95000409ef8a97a8470000409ef8 -a4eeaa0003d8274af8fa9200003d826e -a4f51e4436e7581a0798b500f0476752 -a507fa0003f2ad6aa3a0fe00003f2ad6 -a50b9481866807ba23521700204cb863 -a50e9800046f09cab40490000046ebe3 -a5103370f045bfaa2f7ba400104bda86 -a515d3348a8b344adb2d5a00704aaee5 -a5163699b10faaaafbc12400b0493789 -a5193cd9a825434ae78e7400a04b2ab0 -a51f600003d84c0a9c175900003d84c0 -a521df619e610f8ab0980500f0499d53 -a522b100043bdc4a9d1b5b000043bdc4 -a529996bf203ea0aa0547c001049f8f9 -a52f2200048f3feaaccfbb00003e5555 -a535720003ec9f2a5670a400003ec9ed -a536870003b88c1a834f21000038758a -a536870003b88c1aa3b5f200003b88c1 -a5389a000433122a449d8d0000433120 -a53a660004b2566ae5fe7e00004b1093 -a53f5b0004365caa198f5b00004365bf -a548a700045826aa9d0bbf000045826a -a5558600040ef6ea9d2447000040ef6e -a558a80004bca72aa2175500004bc7df -a55a7e0004b99f5aba9e3900004b9971 -a5611400041717da9afac2000041717d -a56803b746004c9a6405d700d049516e -a56a7e00042a2eaa0d1401000042a2ae -a5715cb545cadd7a0471a100904776e6 -a57371a6c8b64e4a44aed500904460d9 -a57493312ea9ab7a74dd6b00004c4344 -a5782000046aea9a9ccc93000046aea9 -a57c0c0003f7a7aa9ad6ec00003f7a7a -a587c4856afa71da5b661900b0498a06 -a598560004a14b0ae1f45600004a1415 -a5a4944d23cd32aa61c2ab00704b9ce9 -a5a8d700046d448a37f602000046d41c -a5ac8d0003da60aa9b4c9800003da60a -a5add700048f8e9ae3730a000048407f -a5b3fe529b87c9daa2f9e70080484b9f -a5bb7c29dbb621ea82221d00a04b17ad -a5bf550004bca72aa2175500004bc7df -a5c48500047b731a09e485000047a763 -a5c6740004b54c3aa0c6ab00004b5493 -a5d0770003d85b9a9aaa4800003d85b9 -a5d799c769b202faa7256b00004c825b -a5df22000495e90a6f75240000495e81 -a5dfd4655bab966a752eab00904b00db -a5e66c92361023eadb2d5a00d04aaee5 -a5f0432a3cc62dda6699f1009047c503 -a5f0770003d7d28a9cd8f800003d7d28 -a5f1fb0003d013aaf6e13500003d0137 -a5f3d10004c46c9a01587c00004a2d84 -a5f77b5f217b8cba2ffd64006049767f -a5f8e800043dd3ea00f35b000043dd21 -a5fa3824b31a767a1cde45000047d682 -a60af78c21fa096a8406240090486d8a -a60c0f0003d1209aa4c3b200003d1209 -a60f8ea5fa19c5bad8a7a800d0498a27 -a611810004731e1ae042b50000473140 -a61282000485d3fa21aa7e0000459dc1 -a617550004b95adaf0faa100004b94f9 -a617e00004353deaa4aeb100004353de -a618d20003edf8baae40fe00003ed13c -a6258f0003d37a5aedb00b00003d379c -a62680000413ff1a9a060c0000413ff1 -a6355e327d26852aa82d2b00704a33cb -a636660004b19d3aa7a5a400004ab782 -a63918f5c15aa75afd68c2007048f520 -a639980004b54c3aa0c6ab00004b5493 -a6405c0004b2641ae9a7740000474812 -a646651c1183315aa2d2db00c04905ff -a64bbf0004672c1a9649c900004672bb -a64da800048576fabcbf460000485753 -a651029de4e527ba8b1c10009043299a -a652173d8a1099fa8b4df4000049876a -a652e90003e2221a98f90b00003e2221 -a654d4000480d09ad3bd7b0000480198 -a658999ad19e651aae0eab00c04b419f -a65e27000433187af8389a000043311e -a6617a0003ff7bba9efa8a00003ff7bb -a668b31f166b77eab6f2ce00c04bd096 -a66b1892a068a1cac4c9c700204ba792 -a671d2000477161aba27ac000047715b -a674f500045a04fad9052300004494db -a67791079301a68a6caa4e00104850f7 -a67d900004190f0a3f3ddd0000419082 -a67f210003dd043a9a817200003dd043 -a67f9c00045fe3ba292e7e000045fe29 -a685ab0004c46c3aec5f7600004c313f -a68b43683f79a3da733124000048f430 -a6947987edfe624a5079c100404bfbad -a69a8b00047b731a09e485000047a763 -a6a2db0004939f9a90d5d9000047e6ac -a6a3fd2a9124d09ac326d800504cbcc6 -a6a72280d8de2dfa5da1a10000477579 -a6b6560003d60c0a2fcf5900003d6099 -a6ba46000412adea9c66ba0000412ade -a6c1170003da649a99e00b00003da60a -a6c6c700048576fabcbf460000485753 -a6c8a0520296ca7a0721b900f047927d -a6c8cacd0460d9ca0cd1e200a048de9b -a6d312000413fb4a46d46b0000413fb2 -a6d3f9aaf7a0930a895e7e00504b3e6c -a6d4f500044a400a1ac67e000044a3f6 -a6d87c0004a1665af20fc200004a1567 -a6da530004c4978a35951700004c4928 -a6db1e56291b59ba608c7800c0484d95 -a6dc47000391f86a834f21000038758a -a6dc47000391f86aa24ec30000391f86 -a6dc760004513a1aa5876c00004513a1 -a6e6c3542233d5cafff90a00304c72d8 -a6e7140004cb732a815f1200004cb675 -a6e99600047e23da2298b5000047e1e8 -a6e9cf000497678ad7b7d4000048e6fb -a6ea7b0004ba9b7ac1d46900004ba9a8 -a6fb010004019aca9f886b00004019ac -a700e9593260e0fa79dfb300f04cc8f7 -a703ed0004a67e3a76d87c00004a67cd -a708560004a14b0ae1f45600004a1415 -a711f961b53fbaca26d5c700804ac3e9 -a717bf000465068abef420000046503a -a718a40003ecb1ca8b693800003ec60c -a719dd0003fe026a9288a400003e19e6 -a71a0eced57d73dada39de00e0473dc8 -a721fe000427a85a98bf370000427a85 -a722117118560e2aa80f0200504a8b1c -a724b30003ce7fbaa5ee2000003ce7fb -a72c2d0003f88a6aae40fe00003ed13c -a72c6c6eca987f4af0b85c00504baab8 -a72d4b0003d02f8acb84c000003d02d0 -a72e82000485d3fa21aa7e0000459dc1 -a72eb10004340e5a7ddf5b00004340e3 -a732070003eca98a9a880b00003eca98 -a739d700048f8e9ae3730a000048407f -a73d12f6c2659d9a788ca100e04c7d6d -a749ba0003d0084a35d53500003d0069 -a74a80dca5e7c25a449ba400504ba293 -a7509300044f302ab52076000044f2f9 -a7587b933735cdba2d192400e0496371 -a75ca07ff36fc60a10d1a300004c3f17 -a75e8740ca0baf6a9b475500704b0de7 -a75fd10004bf4bca916c7600004bf4b9 -a763f600043536aa9efce8000043536a -a766fa26c38c189a96b7be00704a3d31 -a76bfdb0af810a2a8a937600404c4d0f -a7732749aa39de6ad115c100604c53dd -a773ae000418e8ca9c99860000418e8c -a776e68dbc26008aa90d1b00904cda6f -a7774e99cdea70aa8f24ac006048e0c6 -a77d520003fea66a5f799000003fea5f -a781640004991dea89320d00004991db -a787550004b0d14ae5782800004b0d11 -a787f600043c95ea3997dc000043c95a -a7901fae015148dab7d9cf007048efb1 -a79797000472d5ead8527c000046e202 -a79a9e0003cf5b0aa639e600003cf5b0 -a79f1631edf09eaa83f17b00704796be -a7a3b5000423fd4aa622280000423fd4 -a7a42cec9d2718fa9786b200204c21ef -a7ab52ef58be48ca66b6f800f0497dbe -a7ac3300049d0e7a7a4c05000049d0dc -a7ad940003edf8baae40fe00003ed13c -a7b48d0003d84c0a9c175900003d84c0 -a7bd0a0004c7efaa5fcae800004c35c8 -a7bd9a000423701ae3680100004236f7 -a7c1720003e2e6faa66c0b00003e2e6f -a7d590000401871a9fc1520000401871 -a7d9ee90c91cccaa70da4500c047f5f6 -a7e295000472d5ead8527c000046e202 -a7e4b30003cb997aea76c300003921cb -a7e674409c9e53aa9e04d4004048020a -a7e70a00041717da9afac2000041717d -a7e9940003e3c98aa0817200003e3c98 -a7ea4500047b5f7ab1068700003b603a -a7f5d2000481525ae51bac000048148a -a7f70a000476fb7a9534b50000476fa5 -a7f97b000481525ae51bac000048148a -a7fac200040a914aa05d90000040a914 -a7fe970003d0080a35d53500003d0069 -a800130004ba9b7ac1d46900004ba9a8 -a8089430e616537a5ca81300904b184a -a809f700042efb5a9ca475000042efb5 -a8127b0004ac3bdaad964e00004aac2c -a817420004cd113ac326d800004cbcc6 -a81a6e0004cd113ac326d800004cbcc6 -a81a9d0003cb9b5a9e1e4e00003cb9b5 -a81d5b0004726dfad8527c000046e202 -a8210a0004c2f26a45f16f00004aa8e0 -a8252b0004a27ccaa2e5a10000476abd -a827b5000429a4daa6d9290000429a4d -a82a19000499b5da05c6c30000498bb7 -a832e90003f7a7aa9ad6ec00003f7a7a -a838f80003d70ccaf1589800003d4bb0 -a839a30004c706baaf0a45000047d45c -a842e90003eac6aa99eca400003eac6a -a846e20003c448fa834f21000038758a -a846e20003c448faa66e7c00003c448f -a847bb319c4f660a752196009047f142 -a84fba4dd523dbca2818ac001048de42 -a8583d0004cb596aa1037600004cb593 -a859e20004585c2aa6bbbf00004585c2 -a85d3a867c0a628a3bf7a800304975c0 -a85dc500043bf0fa362ce8000043afe7 -a863811530ee801a81c97b009047617c -a864c200048f656a8b10ac000048e072 -a8694c000472606a834f21000038758a -a8694c000472606aea76c300003921cb -a86a19000498751a04cfa80000498750 -a871f00003bef06ab1068700003b603a -a874770003da60aa9b4c9800003da60a -a87a828822ce61da8bb89600504781f5 -a880280004b8f23a5c25e2000048b784 -a88535042861345adad1d000704a90f3 -a885d20004839c1aebda450000482f2d -a88b390003b88c1a834f21000038758a -a88b390003b88c1aa3b5f200003b88c1 -a88d6b0004c2f26a45f16f00004aa8e0 -a88f3700048634da430db9000048484f -a893250004991dea89320d00004991db -a8969886de5ffaea5fcae800004c35c8 -a89a8f0003fcd4ba834f21000038758a -a89a8f0003fcd4baea76c300003921cb -a8a0880003d7d28a9cd8f800003d7d28 -a8a31200040dddfaa12c47000040dddf -a8a5750003d60c0a2fcf5900003d6099 -a8a6300003cb987a9e729e00003cb987 -a8a80b0003eda30aa176ec00003eda30 -a8a9eb000433187af8389a000043311e -a8b0314e5984b76a57242c00e04c05da -a8bd2c000470644a781090000047063b -a8cb8c0004ab9b8ad6dad000004aac27 -a8cfa40004b8f23a5c25e2000048b784 -a8d2b1d602e0eb7afec3a400904b57b6 -a8d6cb0004cdddba71d5e300004cdd95 -a8d6e70004305a0afb3ee70000430541 -a8d8690004b21f5a50947c00004a1491 -a8e151caef69d23a1ae85600404a4ea3 -a8e716dea160daba3dd27400504ac9b6 -a8eb6d0004a18b6aaeb7be00004a1608 -a8f0a952d5e8e8aae6622400104960ab -a8f2560003d70ccaf1589800003d4bb0 -a8f9de00047eb04a4560b5000047ea75 -a8fb9c00045d8a5ad570a7000045d456 -a8fc5d000433122a449d8d0000433120 -a8fcb96f9c7c936aa2e55100004a1120 -a8ff9e0903d4f38a90d5d9000047e6ac -a8ffbb0003f5066acbb8fe00003f427c -a9004700041984faa7b6ac000041984f -a90bfa0003efc15a1672ec00003efbd9 -a90e1d0004ac9b9a3dd27400004ac9b6 -a90ebb8975e7996af4c8eb004048df31 -a90fa800049e74aa465379000049c0ea -a9168a000406c88aa19bcf0000406c88 -a91767000447137aa7c9970000447137 -a91a0c000413fb4a46d46b0000413fb2 -a91cb500047f852a56cfac000047ce7f -a91d43000413ff1a9a060c0000413ff1 -a93d6f0004a9937ab3a9a400004a9842 -a942a8000412adea9c66ba0000412ade -a94305000490b7dab63a190000490b79 -a950d20003dc9d9aa80d9400003dc9d9 -a953170003d896aaa7af3d00003d896a -a9554a0004a9937ab3a9a400004a9842 -a95bcf00041c9a8aa18d90000041c9a8 -a95c6800043bdc4a9e8c50000043bdc4 -a963dc00049fec8a5b9fa8000049febf -a9689500045a52eaa0d9e2000045a52e -a968991086db159a5df3a600704ca540 -a975a8000489882adb4774000047b57c -a97d7b0004839c1aebda450000482f2d -a97e0200046ec63aa06a80000046ec63 -a9817b1e653e8e1a4ecf33002048f891 -a9820c0003fc3b6a66b20c00003fc3b2 -a9861ce0f53c7feaa7cba800f0491f9b -a986ab0004b4193aa775c700004b411e -a98b3d0003d60c0a2fcf5900003d6099 -a9912c2c6b00b0fa90d5d9000047e6ac -a996660004b1f16a34d7a400004b1f0d -a99f730004cc1c2a2849e300004cc1be -a9a85c0004bc84eab52a7400004bb5b2 -a9a92300045598aaa10076000045598a -a9a9c368354e7eeaa8852400404993e9 -a9ab1f51aa2f49ea40cb22003049016a -a9b876000444e20ac45a5f0000444d4f -a9bcb60004aa100a81a71700004aa0fe -a9c0410003c295ea834f21000038758a -a9c0410003c295eaa8339900003c295e -a9c05000043698ba198f5b00004365bf -a9c3260003c18eba834f21000038758a -a9c3260003c18ebaa86e3e00003c18eb -a9ccd400047f852a56cfac000047ce7f -a9d17a00041b58eaa87f01000041b58e -a9d1d6000423701ae3680100004236f7 -a9d80f000490b7dab63a190000490b79 -a9d96800044129eaa2017e000044129e -a9de1300040ef6ea9eb3cf000040ef6e -a9e530e7845904ba70d42f00d047f818 -a9e6200003cfc4badd42cd00003cfb55 -a9edc50004340e5a7ddf5b00004340e3 -a9efb20003d0d25aa8976600003d0d25 -a9f0720004a433cac64b8e000043fa11 -a9f9dd0004305a0afb3ee70000430541 -a9fac4c51df9c8dae399b900a04834ea -a9fd2400048e5c8a80d282000048e068 -aa06660004b3d19a95a6ab00004b3d0b -aa0a720003d59f1a49e31700003d341d -aa0f110003cb9a1aa2f55900003cb9a1 -aa132200048e5c8a80d282000048e068 -aa15860004010dfa5512ba0000400bdc -aa2374000481bc6a8e48b50000481bbf -aa25d72188993e5a3b6ca100204c1255 -aa2a4500047a445a8bb89600004781f5 -aa2b630003d020aa4b0f6600003d01b7 -aa2c0f000418e8ca9c99860000418e8c -aa2ec3000481bc6a8e48b50000481bbf -aa36660004bb7d3af4dc1300004b363c -aa38c1fc702456ea6a0ad800604ca657 -aa3cf5000450180a74dc7600004500d8 -aa3d0b0003ebb24aa2676200003ebb24 -aa3ddd0003fa3e6aa3306b00003fa3e6 -aa40fef68b76c51ad64b740060475c97 -aa46f7e38fa1d4baae86a500b04c3a8b -aa4b4b197f523deaef75e200004bc537 -aa557b530d15349abe0d240030490327 -aa5ef20003ff7bbaa0499000003ff7bb -aa60e377ddd1feeaedc6a500804c5e45 -aa619f0003cbc2ea27619f00003cbc05 -aa633d0003dcae7aa3825600003dcae7 -aa63453a5ea5b11aa1480f0080494924 -aa63b500042513fa2ea27e00004250ca -aa6494b4582d6eba4af3ed00004a64a5 -aa65750003d9e01aa36eaa00003d9e01 -aa69e34948d8a37a12302f005047d7dd -aa6ba40004b3e4bacfe9e200004b3706 -aa6c730004488f4a18704200004488ea -aa78b0514929beba40b96b00804bf52b -aa7a7c0003c29ffa834f21000038758a -aa7a7c0003c29ffaa8f51400003c29ff -aa7c5591c07108daa6c9be00d04cd34c -aa7ca80004b7e93aa5481300004b170c -aa88930004581bfaa9212300004581bf -aa8a5109ce4d3d9a6c930a0070483506 -aa8b6000044e006aa18aa2000044e006 -aa8c0e7152739c3af83d0a00304c1500 -aa8ff60004355d3aa1d1d100004355d3 -aa94280004b3e4bacfe9e200004b3706 -aa94690004b2566ae5fe7e00004b1093 -aa94e800043504ca66b7f6000043502a -aa9ae80004a433cac64b8e000043fa11 -aa9cfe0003ed198a1ef0fe00003ed197 -aa9e660004b484ba3e36ab00004b4845 -aa9eb7eee3eeaf0a54793300404beeb4 -aaa2df5a46e8390a7f09d000c04cc02e -aab2b5000472d5ead8527c000046e202 -aab3a40004af9adabf1c6900004af990 -aab5fb00042efb5a9ca475000042efb5 -aab8470004091eda86cd1700003dc131 -aab90b0003f2ad6aa3a0fe00003f2ad6 -aabb5d0003f8833aa3a61f00003f8833 -aac09500045a4faaa1af9c000045a4fa -aac424fb160c445aa7a5a400304ab782 -aac8130004b6707a188c6900004b6703 -aad0a700044dd09aa99420000044dd09 -aad36c000449fdaadebbbf0000449fd7 -aad5860004127a7aa991dd00004127a7 -aade7d000430d55aa2b1dd0000430d55 -aae28f0003fb08aaa9655200003fb08a -aae2cd0003cfb60add42cd00003cfb55 -aaebf6000435463a7fa7e0000043544f -aaeff04db967e90ae261d000104a9bd8 -aaf4015f3d52744a137b0a00304810aa -aaf55ec18d7b061a2357230000484b67 -aaf8a7000444bfaab77df40000444acd -aaf8d10004c93b5aeb39ec00004c93aa -aaf9ae0004a5384ab486800000470089 -aafba014f83ca2fabb01c100504c77d5 -aafc2d0003f8c07aae40fe00003ed13c -aafe460004019acaa0e3cf00004019ac -aaff5b00043e5dbaa315c5000043e5db -ab08330004a5384ab486800000470089 -ab08a70004c5fa0aa4250a00004c5f91 -ab08a9c08853074a14c9d20000476fa4 -ab10be0003ca6a8a4252d900003ca678 -ab198e1cf402104af88e5600504b9f51 -ab1cea000456aadaa9b8a70000456aad -ab1e0c000405bb6aa9a5860000405bb6 -ab20660003d12d9aa123b200003d12d9 -ab22c90003cb9b5a9e1e4e00003cb9b5 -ab23a40004b4f70a94aaa100004b4f65 -ab29380003dde0ea522d9400003dddf1 -ab2d980004b4163ab1068700003b603a -ab349600047a445a8bb89600004781f5 -ab36ab0004bca72aa2175500004bc7df -ab36f8798ed47d8af603ac00a047643d -ab39819a25dd155a00a9240050490cc7 -ab43aa3ade6eb89aa451f10010479e64 -ab4bc9000471b7daa2ea7c0000471b7d -ab4ff600043d857a6effe0000043d855 -ab51b200044077baa30748000044077b -ab57430003cb987a9e729e00003cb987 -ab581ce5b7928c1aa9b47600b04c4947 -ab589300046a122aaa0123000046a122 -ab64baf7f369557a449ba400804ba293 -ab681eaac6a93bbab1d66300d0472d89 -ab68890003d1209aa4c3b200003d1209 -ab68b48bb7d924daf88e5600104b9f51 -ab7e7c000471babaa94e980000471bab -ab7f0100041b8ccaaa2990000041b8cc -ab85d00004a9923a51a53c00004a9760 -ab94d4000481525ae51bac000048148a -abae560004bb7d3af4dc1300004b363c -abb10b0003ed1a5a1ef0fe00003ed197 -abb40104f56104fa8343ed00c04a1622 -abb70a000473377a4625de0000473375 -abb984ddc2a7c8aa0ff59800f04bc033 -abc1e2000460e70aa335c90000460e70 -abc5d9000473e04a7448d40000473d5c -abcdc9000462bf9aaab27e0000462bf9 -abcdf4dc8ee90cfa70fe96008048db60 -abce943de691c5aaa0c65300504c714c -abd72f1a5aa18f8ac7d8a100104be7f6 -abda0500043536aaa13ab1000043536a -abe6170004c8747a2522d800004c8746 -abeba80004a2f80a360429000039346c -abf0e8000435463a7fa7e0000043544f -abf29d00043bdc4a9e8c50000043bdc4 -abf42db6825009ea8bdcc4008048910a -abf48500047f6a9ab29333000047e486 -abf4b3e39d03dc0aca302f00b047f667 -abfab2000433122a449d8d0000433120 -ac012b0004a1560ae7abbe00004a1555 -ac023848cf1d73fa37e96b00204c2cc3 -ac0c5600049b36ba9aad2b000049ae58 -ac13f60003c7a43aa86e3e00003c18eb -ac16a10004b4193aa775c700004b411e -ac19cc0003e3c98aa1dcdb00003e3c98 -ac1efa8160be5b1abcd1cf006048e91c -ac28f500046c63aa9ccc93000046aea9 -ac294cedc14a092a6a536c00c044a043 -ac2cfe000444f10ad33d18000038bd74 -ac30d700040a914aa1bddd000040a914 -ac33ea00042cb6daf622ca000042cb67 -ac37e000043ae82a41cfe0000043ae7d -ac3a8b00047f6a9ab29333000047e486 -ac3e0c000411f3eaaaed7a0000411f3e -ac421d0004b936ea649b5500004b86ae -ac427e00044a593a4e3476000044a58c -ac435308d8fb9b6a42f5ab00504c2538 -ac512000042e3bcaaaf7a9000042e3bc -ac539c00046475fa5cc8950000464759 -ac54aa000401871aa133cf0000401871 -ac56a30003c18eba834f21000038758a -ac56a30003c18ebaa86e3e00003c18eb -ac5fa40004bb5cea35842800004bb5c5 -ac68a70004c5587ae832190000496f76 -ac6bea4564005d0a9720c2008048f4f5 -ac705100048c8dda409ac7000048c8d3 -ac747eb555a90e8ac2866600f04bbde4 -ac75c00004a66d7ae91f6d00004a66d1 -ac75d1000435412a4f1ab10000435394 -ac7a9fd63817549acad19400a04aaa26 -ac7ad90003c448fa834f21000038758a -ac7ad90003c448faa66e7c00003c448f -ac82c200040a120afd8190000040a0ff -ac867c0003c295ea834f21000038758a -ac867c0003c295eaa8339900003c295e -ac8ddb0004236feae3680100004236f7 -ac9cb978d55ff95af8305e00004cbe36 -ac9ee90003eda30aa2a8fe00003eda30 -aca3d400048f4dea13a955000048f4d6 -aca573000445158ac9e17e00004450b0 -aca61300040dddfaa287cf000040dddf -acad19feee5de0ea361d4a00f04a9ed9 -acce2e794bc7d1fa1993cc00b04ca33d -acceaa0003db5eeaea76c300003921cb -acd30a00040ef6ea9eb3cf000040ef6e -acd3dc0004a4b16a1d4ba800004a4ab9 -acd4720004a66d7ae91f6d00004a66d1 -acd61b0003d013aaf6e13500003d0137 -acd8ea000454a87a038c760000454a70 -ace04700040c07baab9aac000040c07b -acf6d37005d1ae2a2ec6a100b04bcd74 -acfb390003cb9caa6a7b3900003cb819 -acfbae0003ff7bbaa0499000003ff7bb -acfe04ec82391deaddd50a00604be4eb -ad005c0004b60f6a4ac41300004b4cb1 -ad01a1000474997acbab0a00004748e3 -ad06bc00043fd9dae87abc000043fd66 -ad0a4500047ebafa0a0d7b000047eb9d -ad0ecbe07ddc50baf0485c00f04b25b9 -ad142f00046e855a834f21000038758a -ad142f00046e855aea76c300003921cb -ad180b0003d7ae0ae6218f00003d7ace -ad1abd128b8705fa381c8500e0473c3b -ad1e16000406c88aa2e6ba0000406c88 -ad22760003c29ffa834f21000038758a -ad22760003c29ffaa8f51400003c29ff -ad23d000047be7ea25c02f000047be79 -ad26d00a02c3ca9a8b2b1400604cae15 -ad2786000486bebab1068700003b603a -ad3173b81f9bc75a2aad0a00704be716 -ad34f7725db80a0a0253330010473d81 -ad37bf00046c45aaac0170000046c45a -ad38500004353deaa4aeb100004353de -ad3aa10004bb794aaf0a45000047d45c -ad4245000474997acbab0a00004748e3 -ad42450004cb773a815f1200004cb675 -ad427e00044a3fca1ac67e000044a3f6 -ad43346951b9921a2d19240000496371 -ad44050004a2ed7af57fdc00004a2ece -ad4f22000491c73a08a7f5000048cc57 -ad4fda737c39bb2a1983b300d04cc88f -ad52849f8923301acec38c00d04ab018 -ad55d9000473e04a7448d40000473d5c -ad56480003d5152a9634f800003d2924 -ad5ad43d6f92c69a55977400a047b86a -ad5e09f5e5e64b4ac4b03300a04a2d1f -ad5fbb0003dde0ea522d9400003dddf1 -ad66db00048f656a8b10ac000048e072 -ad6ab2af1a8e37fa9534b500e0476fa5 -ad6d18c8960b3f0ac0187c00c049f08c -ad70280004b4f70a94aaa100004b4f65 -ad79176824aa2e0ade1ac30080494581 -ad828b00047f7faafba82f000047f7da -ad8eab0004b60f6a4ac41300004b4cb1 -ad90a70004c5587ae832190000496f76 -ad9d510004a754ba26c6e800004a7549 -ad9f760004be8edaada151000049f5c5 -ada28268acf4307a6eaa8b000048372a -ada3c6000431845ab45658000042e389 -adab6d0004a2ed7af57fdc00004a2ece -adad4300041c9a8aa2cf01000041c9a8 -adb7620003e2e6faa66c0b00003e2e6f -adbdd200047d60ea7cca45000047c145 -adc2d8170c1191da67bc7200a04a5241 -adc6f20004019acaa0e3cf00004019ac -adc9330004c2f3da35842800004bb5c5 -adcf74000483ae8a7149f10000483a90 -add9c6e9c24db27aecc93300304c1ea1 -addcf50004513a1aa5876c00004513a1 -addf9060d0f5768a7a4c05005049d0dc -adf0910003d12d9aa123b200003d12d9 -adf27e000423109ab0a7ea0000423105 -adffcf00040550fa3c2f1700003d1b8f -ae0248d4fb660caaa2bc7900f04ce878 -ae02ac00040a120afd8190000040a0ff -ae17590003d59f1a49e31700003d341d -ae19de000483fc6aba27ac000047715b -ae23f739343f6d8ab7c5a100904795f1 -ae26ce0004c4978a35951700004c4928 -ae28790004ce36da1cc9e600004ce364 -ae28b60004aba72af20fc200004a1567 -ae298f0003dc21faac8b1700003dc21f -ae2d108cca0eb24a08fb190090487de6 -ae2e4e0003cbd3ba9c782000003cb9b5 -ae2f910004cc095a1a021d00004b9eb6 -ae31d00004aa2d2a68800f000049811d -ae32500003c2ac5a834f21000038758a -ae32500003c2ac5aacb29900003c2ac5 -ae33310004732d1a55f73100004732ce -ae3d96000481475a9ec5de000048146d -ae3f9719db2f03aaadf00500e04a0ddf -ae40d6bd9df702fa902c6f004048ec62 -ae43510003cb9a1aa44f3900003cb9a1 -ae480b6d070246faa639e600203cf5b0 -ae4e5000048e5c8a80d282000048e068 -ae527e00044ca4baad0820000044ca4b -ae52bb0004a9937ab3a9a400004a9842 -ae56920003d5120a9634f800003d2924 -ae67b60003fa3e6aa4899000003fa3e6 -ae68a300043bf0fa362ce8000043afe7 -ae6d7000045d401ac17893000045d3fe -ae6d7874bc74745a28615100a04a3f8d -ae6de9362924a45a746f33001047d607 -ae6f210003dcae7aa4e11700003dcae7 -ae72390003d9e01aa4bf5900003d9e01 -ae73ef345ec4e97ad63dd200c0484991 -ae7ace0004af765a2a842800004af74c -ae7f3d0003d17caa438b5900003d17c5 -ae7f6d0004a16dfae24c20000044f1e2 -ae840b0003e5555aaccfbb00003e5555 -ae85300003f8833aa4e68900003f8833 -ae8f6d0004a433cac64b8e000043fa11 -ae90a300043c2e2aad4f5b000043c2e2 -ae91f80003ebb24aa3f7fa00003ebb24 -ae97b600040a914aa1bddd000040a914 -ae9c9e467d7d41aaae63c000c0472eb4 -ae9f620003dc9d9aa80d9400003dc9d9 -aea0ef0003e3c98aa1dcdb00003e3c98 -aea42000044e354aad47bf000044e354 -aea9fc524411099a45a6db0090491843 -aeb436c9a07d83caa4c1e600d04ce3e6 -aeb55500048fb38a4ecf33000048f891 -aeb85f00043536aaa13ab1000043536a -aeb9bd0004423e2a65b79200004423a6 -aebfa800048fb38a4ecf33000048f891 -aec5a10004746b4a1bcb0a00004746a7 -aec5c9000464686a25db080000445f86 -aed27c0003c878faa66e7c00003c448f -aed2e80004a166ba3c069d00003a6629 -aed431151c522dba46513300904c3fb0 -aed70a00047e6f3ac15333000047bd67 -aed792000445d29a199def0000445c38 -aede740004b393eaee62ab00004b393a -aedf0a000477bb9a0253330000473d81 -aee1b900047e6f3ac15333000047bd67 -aee4410003c6658af726e200003c532d -aee5621d1df63d8ac0bbd100504c31e1 -aeea7c0003c7a43a834f21000038758a -aeea7c0003c7a43aa86e3e00003c18eb -aeee45000481475a9ec5de000048146d -aef0a0846ce3158aef7ceb000048b819 -aef8fe0003e4664a47fb6200003e40b5 -aefd0a000435412a4f1ab10000435394 -af005a8b9361e9faa5c833007049bf8a -af01a5000423fd4aa622280000423fd4 -af04c400048801aa08fb190000487de6 -af0c3484d21cedbab2c6240000485360 -af147e00043404ba4b0f6600003d01b7 -af181dea98b69bea4ad77300004cdae7 -af1ff52ec1a853aa18cda400904abd3c -af25e8000430d55aa416e20000430d55 -af25f80003f2ad6aa507fa00003f2ad6 -af2a16000401871aa133cf0000401871 -af2ad5000444f10a834f21000038758a -af2ad5000444f10ad33d18000038bd74 -af2db9000473c0cacd197b0000473bee -af2e300003d1209aa60c0f00003d1209 -af2f4c928745fd9a32ed7b00e0482c4a -af2fd3066e3415da5075a300804c67f3 -af418f0003d896aaa7af3d00003d896a -af429600048801aa08fb190000487de6 -af4552000407087aab34470000407087 -af4a050004355d3aa34f5b00004355d3 -af4a55000388827a834f21000038758a -af4bcf00040039caadf847000040039c -af4d940003e4664a47fb6200003e40b5 -af5093a5de8c883aa451f100d0479e64 -af52107551ceeeca15cc2500f0490048 -af548e0003eda30aa2a8fe00003eda30 -af5568256cfd85da43cd9800104acc33 -af57910004ce62ba262b7500004ce5f1 -af58aa00040dddfaa287cf000040dddf -af5ace0004ba9b7ac1d46900004ba9a8 -af5dd2000433e6fac79b4500003927b9 -af61980004b393eaee62ab00004b393a -af6447000400a5fa51706b0000400a56 -af736c000456e11a5258950000456de1 -af774600048576fabcbf460000485753 -af785dcb46d50e6ae472c300b049585d -af7e7e0004b3e4bacfe9e200004b3706 -af7f4600048693ba62036b00004868c9 -af802f00047855daff98850000478439 -af837def111a2e0a3cbe6600f04ba29a -af899000042022aa710fcf00004201ba -af89de000473c0cacd197b0000473bee -af8ca70004585c2aa6bbbf00004585c2 -af8da100047b731a09e485000047a763 -af8eae72c6789b0aa4250a00904c5f91 -af8fafc6c225233aaccdd000004cd092 -afa16372a97b0aba3d43be00104a59a0 -afa40c0003d0d25aa8976600003d0d25 -afa4db0003dd78faae418f00003dd78f -afa5a100047a4dbaf85cb50000479fd4 -afaadb30e30d2aaa66e19800104baccf -afabbf000457907a40f27e00004555c8 -afbb010004022bcaae298600004022bc -afbc6b00041984faa7b6ac000041984f -afccb1fe497b6f2a33a63900d04bd6fd -afd2a8000406c88aa2e6ba0000406c88 -afd3b20003d013aaf6e13500003d0137 -afd40b0003ed13caae40fe00003ed13c -afdd78000447137aa7c9970000447137 -afdf79b3691ed60a232e7400304b9ae5 -afeb5d0004701c6a303cd7000046fef3 -aff1f20003b0c4aa1f6c5c0000393135 -aff8fe0003ec61ba8b693800003ec60c -aff92300045d8a5ad570a7000045d456 -b0009300044a400b1ac67e000044a3f6 -b001a30004c1f2db56082800004bd838 -b002ac000401458b2db5900000401438 -b006ac00041b58eba87f01000041b58e -b00898f9ffaf960b4df8d4008047515d -b00ea10004ae13ab9e9433000049f049 -b0152400048f829b66c8d40000481a9c -b01c0a3f03b2fe4b0b719800c04bbda0 -b01d170004c6db4be832190000496f76 -b0235b000436a15b11e64e00003cba63 -b025c10004beb5cb3101a300004beb57 -b027e90003d0cc1be023e900003d06a9 -b02d4c99e6cfb1cb2a986900904b3824 -b03047000401458b2db5900000401438 -b032ec0003f0bc8b1672ec00003efbd9 -b03d23bed2e20e6b195b5500d04b20d1 -b03d7b000479f19b5c8fac0000479ef7 -b040a7000456e10b5258950000456de1 -b0434c70501b08ebfdd0b500204827c7 -b043bd6e0de6958b81c1d900b048015a -b04bbf00046a0c4bb1068700003b603a -b04ca80004b54c3ba0c6ab00004b5493 -b0529cf55528824be5fe7e00404b1093 -b05d54447a3c708b5bf0eb00b048e0a0 -b065330004beb5cb3101a300004beb57 -b06b550004acb8ab72299800004acb60 -b07169cad6cbb59b96a4a100404c078d -b071de00047bfe0b9ee9de000047bfd5 -b072e80004a166bb3c069d00003a6629 -b073ed74d12ef40b91312b006049ebef -b0787c00043504cb66b7f6000043502a -b0808f676fee8bab1443330090496ebc -b080ea00046503fb6a536c000044a043 -b082fbd312d3dacbc2a05600b04a227d -b08374000483ae8b7149f10000483a90 -b089b7cd058b48db50d2e800a04a32a5 -b08e0cadb0ac2f0bff23ac00c047e731 -b08e9600048de45b2818ac000048de42 -b09c93000466f29bbd89e20000466f24 -b09db9000474997bcbab0a00004748e3 -b0a0a10004c2f26b45f16f00004aa8e0 -b0ac47000408312b4b0f6600003d01b7 -b0b30f7fb6bf25abaf529500f0472fac -b0b4607d41607e2bb1f24500004793b3 -b0b6b4d8e8d8594be6ba7400304bba44 -b0bef800048f656b8b10ac000048e072 -b0c11400041c9a8ba2cf01000041c9a8 -b0c924000490b7dbb63a190000490b79 -b0c9ab0004be94cba3f96b00004be947 -b0c9db000424eb8baf80020000424eb8 -b0ca4500047cce9b6b86c3000047cc38 -b0cb0100040ac34baf7590000040ac34 -b0cb620003e01fbbaf740d00003e01fb -b0cbdf0003cf5b0ba79a9e00003cf5b0 -b0cce741863b952bfd8c3300b049b337 -b0d763c6c4779ccbc6c9e2007044e600 -b0e14821861d80cbce5c9600d047b9cc -b0e35984c14a2e5b5b05b9004047a9ae -b0edd200047bc05b50e9d9000047bb32 -b0f048aa89e4121b41870e00604cb7d3 -b0f066000448407b3d0579000043dd4a -b0f8b50004771a8b4b0f6600003d01b7 -b0fae80004be94cba3f96b00004be947 -b0fef20003fa3e6ba4899000003fa3e6 -b1048e0003dcae7ba4e11700003dcae7 -b105d80004cb052b501ece00004c6c32 -b10a220003f8833ba4e68900003f8833 -b112760003c2ac5b834f21000038758a -b112760003c2ac5bacb29900003c2ac5 -b11a7e0004acb8ab72299800004acb60 -b11b6b0003cb9a1ba44f3900003cb9a1 -b11cb70003d9e01ba4bf5900003d9e01 -b11da100047d0ddbc15333000047bd67 -b1220c0004127a7ba991dd00004127a7 -b1269800046d4b1bafab51000046d4b1 -b128130004b1bc5b727a1d00004b1b89 -b1283d0004cb7dfb41870e00004cb7d3 -b1292b00049d0e7b7a4c05000049d0dc -b12ff600043bf0fb362ce8000043afe7 -b1334e0003ebb24ba3f7fa00003ebb24 -b13caa631dc4d1eb1b7733000047ba43 -b13d170003dbe8dbafcffa00003dbe8d -b141d69c5408b72b00fcc400c0488857 -b1434600048693bb62036b00004868c9 -b14571726c4c534b36adc700d04b770e -b147cf0003fb08aba9655200003fb08a -b149eb21bbbb07cbaad14c00c0473115 -b14d22473a624b0b92f03d00a04cb9aa -b153f600043504cb66b7f6000043502a -b155023b4915fc2bd254d4007047496e -b157cf000405bb6ba9a5860000405bb6 -b1603d0004c874fb2522d800004c8746 -b1617e7b17ce608b9098a700a04c718f -b1655d0003c18e3b834f21000038758a -b1655d0003c18e3bb002a300003c18e3 -b16c96000475757b688ac30000475742 -b16f11db7e988b6b3314b500b0475ae4 -b172b15a7d99b81b6868a100404c00fb -b174280004ad18dbb591d000004a886f -b17a6763e45e250be30cb700004a8190 -b17b48c23ba0a87b105f0a002047a896 -b17b5c0004a86f8ba6cd5a00004a86c9 -b17cbd5be8d372fb5544d40090483f12 -b184b50004764c4bb351d20000473e7e -b18b170003d76ecbafe73d00003d76ec -b19095000454a87b038c760000454a70 -b1932000046e323bb035dd000046e323 -b198b500047f81bb70d42f000047f818 -b19cf91c11a5c82b356dd900904803d4 -b19cfe000445135bb7e97300004450ba -b19dd6e221ba232b2699d2007047f1e7 -b1a70ccb612b6bfb801f6d009049aea2 -b1ad44704d302f1bc42ec70080485383 -b1b1330004c6f40b2c3cea000045af74 -b1b297000430d55ba416e20000430d55 -b1bf9495a8efc00bb097ed007049b1a0 -b1c5c000049d24ebdf4dc0000049d247 -b1c7fa0003eac8fb98980d00003eac6a -b1cf298b5e1c7b3bfbb9b200c0472eb6 -b1d002000423105bb0a7ea0000423105 -b1d0760004581bfba9212300004581bf -b1d0a90003d1209ba60c0f00003d1209 -b1d49d63a909f7eb7eaba400404aec4e -b1d7a10003e2e6fba7c17200003e2e6f -b1d7a70ea7f6ffdb96689300c046c653 -b1d86b000407f4dbb02aba0000407f4d -b1db4e0003f2ad6ba507fa00003f2ad6 -b1e009a7b94f0cdb4e1c7c00f04a6a02 -b1e87c000436a15b11e64e00003cba63 -b1e90a0004c6f40b2c3cea000045af74 -b1f024f17b811baba8aceb00b0488f29 -b1f1170003d5120b9634f800003d2924 -b1f29d0004353deba617e000004353de -b2007c0003fce4dbea76c300003921cb -b2067e0004bb5ceb35842800004bb5c5 -b20a7e0004b8f23b5c25e2000048b784 -b2114600043d5d2bb09eb1000043d5d2 -b212500003c2bd2b6925fe00003c2bce -b212500003c2bd2b834f21000038758a -b214fe0003e5acdb27619f00003cbc05 -b215fca9126e90db442f2200c04985ca -b216c200041b8ccbaa2990000041b8cc -b21979000435412b4f1ab10000435394 -b228c20004909d5bd66f220000490972 -b2294c953f06256babfae800f04a077a -b232ba0004217b3bbb7d860000421757 -b232f69163de96bba73e7b00f04b4979 -b236c200042175abbb7d860000421757 -b23a6300043698bb198f5b00004365bf -b23c47000411f3ebaaed7a0000411f3e -b23d0b0003e56f4b03f0d200003e555b -b243b5f301d9c74b4baa4500f0474ed0 -b245c90004613c7b61f4ea00004611c2 -b2462b7f298d16cb6caa4e00504850f7 -b24c2f00047c441bb9a496000047c43d -b256190004959c1b6610760000463430 -b256ba000420ce0bdaad860000420cdb -b25ba800049ff65bc681ae000049ff63 -b25fa80004a1bf8baf0a45000047d45c -b263210003dc9d9ba950d200003dc9d9 -b272a30003b603ab834f21000038758a -b272a30003b603abb1068700003b603a -b274ac01e44f74eb529dd20010474fa7 -b2762aeb9b3ced0b8dbbcc00f04cae64 -b27d46cc419ca9bbcc77d000b047bf61 -b280930004611f7b61f4ea00004611c2 -b2845f0004355d3ba34f5b00004355d3 -b29a2400048de45b2818ac000048de42 -b29c5a916b079a9b6e9bac004047d3ed -b29d0f17bc45c84bae25a300904881af -b2a2fe0003b0c4ab1f6c5c0000393135 -b2a2fe0003b0c4ab834f21000038758a -b2a35100046d831bb1582f000046d831 -b2a9131c89008c8b8e4c1300d04b2869 -b2ac4c000429a4dba827b50000429a4d -b2ac7600046a122baa0123000046a122 -b2af76b46bbbdeab5085a10040475ab4 -b2b5791a810e49bbe7abbe00004a1555 -b2b89d90f646c86ba56bac00c047fd36 -b2c6c300048246ab4701b90000482450 -b2cbbf000462bf9baab27e0000462bf9 -b2cf7400048246ab4701b90000482450 -b2d160ea9515a21b499a240050488892 -b2d56b3c06b34d1b9a2c2f00a047367e -b2d57900043d857b6effe0000043d855 -b2d7ec00048c460b95e0ac000048c415 -b2db74000480d5fb28e02f0000480d14 -b2dc7200049f0efb834f21000038758a -b2dc7200049f0efbea76c300003921cb -b2de24000491909b5aee240000487259 -b2f03300049ae92b3953200000470861 -b2f67e00044a11eb79d76c000044a09e -b2f6c2000419d82bca5ec20000419d6d -b2f7ed00049ff65bc681ae000049ff63 -b2fc70846daddb1b93de0f00204c4ca9 -b3006fa2a2b3d5cbd4ef0a0080484181 -b300d40004733a2b4625de0000473375 -b305ad6004ed2abb770085009047931d -b305be0003b3eb3b4d96a300003b3e4c -b305be0003b3eb3b834f21000038758a -b30a6f00042e3bcbaaf7a9000042e3bc -b30f5d000471babba94e980000471bab -b310810fc6e2bb5b4992c300d047d520 -b314a741150c35bbb491ab00f04c6432 -b316f5a692997c3bae2ea500904c1600 -b317df56d4cb9aab47ac56005049a71f -b31bdc0004a2309bfbc3ed00004a22fb -b31dc10004c6b71b834f21000038758a -b31dc10004c6b71bea76c300003921cb -b32c0d0003ebfc9b51ceec00003ebfae -b33722e8de0ae7cbd9fca100f04c703c -b33a3b000451851bf4e9d1000043d310 -b33a7b0004bc841ba2175500004bc7df -b33b07c7d4f3a27bb29333008047e486 -b33d74a1a1ff91cb8e48b500f0481bbf -b3460f0004c75dbb78310a00004c4cdb -b3461420c02f408bbf3e7400f04b10dc -b34d5200040c07bbab9aac000040c07b -b359db00042c4e3bd901d6000042c4e1 -b359f1000477d32b13f1f10000476ace -b35bd000047ab53ba639e600003cf5b0 -b35e6258d81c28cbbfb1d700f04a41ca -b35e8cd011dc183b2c068b00c0475cac -b361350004732d1b55f73100004732ce -b364690004bd867ba2ca7b00004bd7fc -b367b80003cf5b0ba79a9e00003cf5b0 -b36a3b00044aca4bb22c95000044aca4 -b371170003d1951b69ff3d00003d1938 -b3717f9bb300e6bb9ca3a800b04919b4 -b374a300043e107bb2135b000043e107 -b378770003d896aba9531700003d896a -b37adb00048e5f3b7ce5d2000047d58b -b38019fa8a7a77ab95111700e04c56c8 -b388d40004837b2b69b30a00004834eb -b38a0c0004094e9bb1de8a00004094e9 -b38a6e0004cd0e4bd16acb00004cc043 -b38c050004a66d7be91f6d00004a66d1 -b3912400048fb38b4ecf33000048f891 -b3954bcd5fa3a8db4701b90030482450 -b399ae0004a2309bfbc3ed00004a22fb -b39e9743a76ad3fb47b7ca00e0472aed -b3ac280004bb7d3bf4dc1300004b363c -b3b7d3200fc468bbe3928200f0486558 -b3b9fe000423fd4ba7a3b50000423fd4 -b3bf510003ed198b1ef0fe00003ed197 -b3c16b0004c30a9bc2c9ab00004c306f -b3c25f000445907bdc125f0000445904 -b3c77dbbbebcddeb54f17b0060474fdd -b3c8910003d0d25ba9efb200003d0d25 -b3cd380003ed198b1ef0fe00003ed197 -b3d5af000470137b829daf000046eaba -b3d5c70004bc841ba2175500004bc7df -b3d5f10004758b3b7e15a100004758af -b3dd0b0003efc15b1672ec00003efbd9 -b3de65b0d7ed1e9b4f1c2c00f04c6039 -b3e156bb1e5a4e2b36cf33007049950a -b3e21c569e34088bbf1c6900804af990 -b3e9d900047f852b56cfac000047ce7f -b3eddd0003fa229bdefa8f00003fa228 -b3f4b24b8f8b89abd115c100f04c53dd -b3f982dfc8382a7bc1196400d048f4f2 -b3face0004ae20ab9e9433000049f049 -b3fcbcfd30671c9bc667ed00b04a39df -b403510003e3943bf6cd3800003e3924 -b403eb0003c18e3b834f21000038758a -b403eb0003c18e3bb002a300003c18e3 -b41322134e46db1b762eb200704c5bab -b4228000041b58eba9d17a000041b58e -b4242f000475c9dbd64b740000475c97 -b427cf000402152bb2ad520000402152 -b4348eb77288a98b481ae800d04c51d7 -b434bb2525ec511bef3db900d047d31f -b439e2000464950b6fdd700000464940 -b43c0100042c4e3bd901d6000042c4e1 -b43d96000483f2dbea76c300003921cb -b43f4f0003cab10bb2e17900003cab10 -b4449500041984fba90047000041984f -b44f1d36ac9235ab91e41300c04bc548 -b455170004c5419bd115c100004c53dd -b458a70004c5419bd115c100004c53dd -b45c67454e08a32b3d0c2c00104be838 -b45cb5000475c9dbd64b740000475c97 -b45e24e8e9a4830beb1e4e00904aa001 -b4627b0004b4a7dbaf0a45000047d45c -b4661d0004bde3cbe78e7400004b2ab0 -b46636aaead8fc0b1198c40030489537 -b469f8a50bc3d14b5f12f80040490b5b -b46dbda988a338bb732c2c00904c366d -b46ffa0003dc21fbac8b1700003dc21f -b4775b00043698bb198f5b00004365bf -b478ea00045b924bb350a7000045b924 -b47a8a00041c9b9ba18d90000041c9a8 -b47bb186864d4a0b4e96ab00004ac07a -b47cca00048bb14b1b40eb000048baf6 -b47f9c00046c45abac0170000046c45a -b48a2400049885dbe67aaa00003d8d7f -b48c0500049a525b3155ae0000499ede -b491cc0003e2e6fba7c17200003e2e6f -b49ef21b51db67ebad2ae800e049cf48 -b4a28571a45f5e7b68622c00d048ee52 -b4a6cb0004cd40fbc502cb00004cd404 -b4ab510003f78a0b69f59400003f76d3 -b4ab7400047ec31b6fe42f000047ec2f -b4abb977afd19adb03413300804c49ad -b4b1d2000484850b430db9000048484f -b4b933f8d525fedbddd50a00d04be4eb -b4b9590003cb8d0ba3ea9e00003cb8d0 -b4c107fad2e1606b86f0a800304b2cf0 -b4c1330004c7215b72fab200004c720d -b4c9380003e5acdb27619f00003cbc05 -b4d45ddcd2c1216bb713ed00004a0ff5 -b4d4db0003f0bc8b1672ec00003efbd9 -b4d66e1755e69d7b0e6d6b00304c46de -b4dc13d80d90db9b78b83d00604c87b8 -b4df740004837b2b69b30a00004834eb -b4e0ca000431845bb45658000042e389 -b4e2070003dc9d9ba950d200003dc9d9 -b4eb742cc665f86bd1e1b90080473c21 -b4f65a675221cb3b51a53c00c04a9760 -b4f6ba6d4330210b955c5c00404b3d01 -b5009500044ca4bbad0820000044ca4b -b5026c6e628a44abd249de00c0483bde -b511d82635876b0b50ce0f00e04beb0a -b515e2000488eecb0101e20000488ee9 -b51ad561a6f0d60ba69f79008049ec54 -b51b1900048c4e3bb61224000048c3c8 -b51d9600047f6a9bb29333000047e486 -b52c160004353deba617e000004353de -b52dcf000429a4dba827b50000429a4d -b52fb6c79ec999ebb9a496004047c43d -b531dd00046dd8bb5e742f000046dd73 -b532560003dd78fbae418f00003dd78f -b532dd6bd551921b33f8a700304c31fa -b53570000451b2dbb3cc200000451b2d -b53d2400048e605b6c8ff5000048e0a0 -b543141fa43eb4cb99acb500b047d32c -b545af00046ebe3bb40490000046ebe3 -b54e450004cb7dfb41870e00004cb7d3 -b54f3700048801ab08fb190000487de6 -b54fbe2476ed0f0b44b28b00c047485f -b5528a00040185bbb3ed7a000040185b -b5576000046b95fbb3e4f5000046b95f -b5576ab2ae2ef8db1e612a00204ca1de -b55a421427f7398be5431400704c9888 -b55ecb0004cc390b513d3300004c571a -b55efa6dfa627fcb294f2200c0491637 -b55fae0004127a7baad58600004127a7 -b55fed0004a2ed7bf57fdc00004a2ece -b5609a000422795be319a50000422781 -b5683bbff5c91e0be1045c00d04b2374 -b56f620003efc15b1672ec00003efbd9 -b572720003d2fe2bb41b6b00003d2fe2 -b5744aad40a6a08b864b0a007047c158 -b576748dc2c6070b057b3300c04cc4f7 -b57943000405bb6bab1e0c0000405bb6 -b57eb6506a2d54db71d15a00e04a9f23 -b5805c0004b9ff7baf0a45000047d45c -b5838a615d3e7c7b46247600404c2b0a -b5872a76874a8a1be7c26e00c04ce1c4 -b587510003e5acdb27619f00003cbc05 -b5943bfacd7a9cfbf9b7be00604a15d7 -b599430003fb08abaae28f00003fb08a -b5995200040039cbadf847000040039c -b59f51ea575e528b1e66a500604be07e -b5a87600044e354bad47bf000044e354 -b5ad967035d7454b7a52190030493ea5 -b5b35c18137509ab843a6600704ae863 -b5baaa0003d8274bf8fa9200003d826e -b5bcea00044e351b702420000044e34f -b5bf09000470089bb486800000470089 -b5cbc300042e389bb45658000042e389 -b5cfe000043c2e2bad4f5b000043c2e2 -b5d143ab78cb4a4bba4c2f00a0482527 -b5d21ecd19dcd81bc9a0250050498a2f -b5d86b000400a5fb51706b0000400a56 -b5df5f0ef57e8a1b66e19800404baccf -b5e5a4b221589aab98e0eb00b0488523 -b5e8820004cc89fb15447900004cc6ce -b5eb6491fbed75bbce5d5a00804a8980 -b5eed3b32f29d06b4baa4500a0474ed0 -b5fa880004cc89fb15447900004cc6ce -b5ff08000446045b564e5f0000445fb8 -b602fe000395839b834f21000038758a -b602fe000395839bb485f20000395839 -b6086f09ace5fb5bebd9d900604791b1 -b60fc00004cd0c8b3d0579000043dd4a -b614e2d7894ef39b5a42c300f0481bf9 -b615350004cd0c8b3d0579000043dd4a -b61815dec71a41bbb8bebc00204c44b0 -b61dc00004a2cf7bea76c300003921cb -b62fc90004724a8b4071af000047249e -b630fa542bc5fffb87837600a04c7a16 -b637590003d5997bb41b6b00003d2fe2 -b63b8600048de47b2818ac000048de42 -b63db1f5e4e721dbff9b7300a04cdaef -b63f6b0003d896aba9531700003d896a -b643b6000411f3ebac3e0c0000411f3e -b648d700041b8ccbab7f01000041b8cc -b64a090003b603ab834f21000038758a -b64a090003b603abb1068700003b603a -b64d7b0004811a5b6e8896000048119d -b64fea0004241dfbb4f22800004241df -b650d1971787853b381c850040473c3b -b650f80981a9cffbf443ed00704a0f4e -b6556c01d46d902bea5da300c048dd18 -b656cb0004cc390b513d3300004c571a -b658500004360c0bb52dc500004360c0 -b6589000046d859b2127f6000043de62 -b666f8000497d25b7db4250000497d23 -b6682000044f2f9bb52076000044f2f9 -b66bd10004c75dbb78310a00004c4cdb -b674520004c84bcbe832190000496f76 -b6757a0004022bcbae298600004022bc -b6782f000473c0cbcd197b0000473bee -b679327a4a423aaba6cd5a00304a86c9 -b67fb6a03d65615ba1ca0d00b0498ce4 -b68186000407087bab34470000407087 -b682b30bd11f83ab4c7fdc00704a2125 -b689750003d4f06b9634f800003d2924 -b691b40003d0d25ba9efb200003d0d25 -b69355b3790b1efbb6426600804b9726 -b694db0003e01fbbaf740d00003e01fb -b694f9f2a4c7380bd683dc00b04a080e -b695960004816a5bdd42cd00003cfb55 -b6968b00047eebcb910dd2000047dfb9 -b698c2000495fcfb6951f40000495fba -b699c500043affcb362ce8000043afe7 -b6a7b600041b58eba9d17a000041b58e -b6b061fefa0a677b50e9d900c047bb32 -b6b52a008af3562b4c9dd2001047b61b -b6c3bb0003e4572b7d50a400003e4525 -b6c42f000470fbfb676c2f000046f7cb -b6ca3b00044fd24bb58360000044fd24 -b6cd720003e2e7dba66c0b00003e2e6f -b6cf6b00048b862ba559a1000047663a -b6d04c000423fd4ba7a3b50000423fd4 -b6d5a1000475ef4bbef420000046503a -b6e3474ea52663ebc0c096000047e18b -b6e4d55444caf0abaed91700f04c3c7f -b6e67b0004bb196bc0c46900004bb187 -b6eb410d3222b22ba273dc00b04a2854 -b6edd700048e73bb8f24ac000048e0c6 -b6edd9000483ce7b2963330000483ccc -b6f06fabfefe9b3bbef420009046503a -b6f40b0003d9ed5b3ea7e50000395bd7 -b6f929000424eb8baf80020000424eb8 -b707cf00042175abbb7d860000421757 -b7093100041984fba90047000041984f -b709c70004bb196bc0c46900004bb187 -b70e2149b6e9e72b3cd14800904cd203 -b7139c000456acdba9b8a70000456aad -b713bda88181fa7ba7256b00004c825b -b717c2aaeb35ffeb7b01ab00c04c58ad -b71900039f77d19b79a7ec008048db3f -b7252c00046f09cbb40490000046ebe3 -b72645000475ef4bbef420000046503a -b7282f0003f834eb3f7a1f00003f8329 -b72842d23971ef1bdab7d100b04c6f57 -b729dd000431845bb45658000042e389 -b72d8f0003d76ecbafe73d00003d76ec -b72ea50004c31ffb33f8a700004c31fa -b733d0000476268bcb9a5600003d333b -b736ab6b1ca9fd7b0ddad000704aa26c -b73fa4c00ab2aedb92901300104bbe59 -b74524000490467bd36820000045f8dc -b749c9000464724b713dc90000464710 -b74a2f0003f834eb3f7a1f00003f8329 -b74ae4ab4f3f911bfd8c3300f049b337 -b74e0d000491dbebbf34c2000048fdd8 -b74f170003dbe8dbafcffa00003dbe8d -b7518600040ac34baf7590000040ac34 -b753170004a7fe5b815fed00004a2ab0 -b755330004be8edbada151000049f5c5 -b75924000495fcfb6951f40000495fba -b7594000042e3bcbac5120000042e3bc -b7597b00047f959b67e28b000047f950 -b75a0c00041b35fbb5f17a000041b35f -b75f6a35cd1978abba876b00d048956b -b76149ca5ac5e24b33a9e2008048b81a -b7675f0960f50e3b42f5ab00a04c2538 -b771d7000493057b618a0d0000493050 -b7742f00047e25ebe148470000420966 -b7771b7e08ea2e4bde966600504b215b -b77e6300043affcb362ce8000043afe7 -b77eca000422b94bb62fea0000422b94 -b782c20003fa229bdefa8f00003fa228 -b786970003cfab4b69324e00003cbb04 -b78d3100040c07bbace047000040c07b -b79d7b000483fc6bba27ac000047715b -b79dacb63a26ec1b78c74600f048d257 -b79db83cf05b43bb714296006048d013 -b7a166385db4645b171cbc00204cbe3a -b7a4900004701c6b303cd7000046fef3 -b7a825000495ee9b022c250000494e4c -b7aa0c000400a5fb51706b0000400a56 -b7b096000473bfcbcd197b0000473bee -b7b8dc3bcc96459b48077400704768d6 -b7c4a80004b60f6b4ac41300004b4cb1 -b7ca9800046f0ecbb27f51000046f0ec -b7ced00004aa613b360429000039346c -b7d68a000420ce0bdaad860000420cdb -b7d9720003ebfc9b51ceec00003ebfae -b7dc0f0004127a7baad58600004127a7 -b7dec3000475c9dbd64b740000475c97 -b7e2560004af9adbbf1c6900004af990 -b7e6a10004bd801ba2ca7b00004bd7fc -b7f57b00047571bb0496450000475717 -b7fca80004b393ebee62ab00004b393a -b81068000423105bb0a7ea0000423105 -b812580003d0cc1be023e900003d06a9 -b815444c01f9198bfbb01300904adad4 -b81d7b00047e6f3bc15333000047bd67 -b82cc00003d0cc1be023e900003d06a9 -b838c00003d0f1dbb6cbb200003d0f1d -b83995dca2b7154b3d977900b04a14fa -b839de00047f882bfba82f000047f7da -b8400b0003d4d5bbb6ee9200003d4d5b -b8445ebd3b42352b907f7600f04c00f2 -b845db000424ebdbaf80020000424eb8 -b84df0c5c625835bd6dad000d04aac27 -b8545100048605db2057bf00004699d5 -b854ea000459914bbc68a70000459842 -b857607d3ace5f8b7cea7e00304b7758 -b8592c00046d4b1bafab51000046d4b1 -b86893000451b88b26463b0000451aa9 -b86a9a1fcb6bd77bdfa4ac003048ca52 -b874ed5b70334ecb5aee240090487259 -b876b0a639767feb3e36ab00e04b4845 -b8785a4c0c044f5b697ae800e04c2250 -b878f414badc844b617fa400504bb3d7 -b87ca3fae0fc3eab0e312b007049f7e9 -b87f220004991a1be331d7000049917a -b88288e9ef8719abc5c60f00404c2c0f -b88ac5000430c68bb743110000430c68 -b88d2400048fb35bfb246f000048eedc -b892ac0003fa229bdefa8f00003fa228 -b89493000469ef9b6c4ca70000465b57 -b895d700048e73bb8f24ac000048e0c6 -b899140003fb08abaae28f00003fb08a -b89d14000405bb6bab1e0c0000405bb6 -b89d43000411f3ebac3e0c0000411f3e -b8a1170003d70fcbf1589800003d4bb0 -b8a190000407f4dbb02aba0000407f4d -b8a190000420ce0bdaad860000420cdb -b8a1ef0004435fdbb7025f00004435fd -b8a2e90003e5555bae840b00003e5555 -b8ad750003d13a9bb73b5900003d13a9 -b8b4130004b4193ba775c700004b411e -b8b75d000471f86ba4aeb100004353de -b8ce7c00046e323bb035dd000046e323 -b8d09004c3fe1eeb6d009600c0475a62 -b8d3600003dc21fbae298f00003dc21f -b8d59000040fa00bb79b12000040fa00 -b8d9240004991a1be331d7000049917a -b8df170004a7fe5b815fed00004a2ab0 -b8e32c517588ba4b7a7ba800d04905d2 -b8e60f0004c6f40b2c3cea000045af74 -b8e968000444acdbb77df40000444acd -b8e9738338a87a2bf8305e00804cbe36 -b8ea5f000445135bb7e97300004450ba -b8ed64ee92b6501b84945c00c04b2f02 -b8f0f84aae9fdb1b6c1663001047293a -b8f75866cd5b9e7be9046900004aedc5 -b8fcd4493cae0c6b0282c30050483168 -b8fdc70004b1221b7ad4ea000046bb64 -b908980003d9faebeffb3d00003d9fa4 -b90d1b756e7f01db06f2a500604c04e1 -b9127e00042d49bb82cc9a000042d488 -b914684d0db1ba5bd881d200604755f8 -b91a8000041b8ccbab7f01000041b8cc -b91ee90003dd78fbafa4db00003dd78f -b92026647c53460b72299800e04acb60 -b921c71c8404907b514e8b0030473c2c -b923b00004450babb7e97300004450ba -b9286b0003fcbfdb43768a00003fcbd9 -b92ef80004991deb89320d00004991db -b935dd14c1d5549bef15ab00004c1090 -b93604000433a9cbb7c5060000433a9c -b940b30003cf90cbb4a7a600003cf90c -b9430a00047d092bc4bec3000047d08c -b9500b0003dae5abb7e69200003dae5a -b9540f00049018bb40cb22000049016a -b954a79763868ceb25d1e200604acd00 -b9568f0004031a2b554ac2000040310e -b961d100043d5d2bb09eb1000043d5d2 -b963e50003a79f9b834f21000038758a -b963e50003a79f9bea76c300003921cb -b9645f95e8a7128b61680500d049e893 -b967dc00049e6aebb80fed000049e5f1 -b96b0900047062dbb81e80000047062d -b974b500047f959b67e28b000047f950 -b978b5b193381abb1c0e5600504af153 -b979b6ad0213bebb574ea100404af874 -b97f742f6ec3127b2465b900004809c8 -b9812b00049fe6ab1efae8000049fe47 -b98a27000433928bb8027e0000433928 -b9938c0004abb15ba44f8c00004aba7f -b99824f5fa2d249bf85cb500b0479fd4 -b9a69ca3ad5aa3eb425d980070484cd5 -b9a6f1db95f0174be351a80040485c95 -b9ad12d1f27c02fb94bec700e048c69f -b9b3790004a7753be3b37900004a76ab -b9b4320003f90cbbb83ec000003f90cb -b9b4db0003ebfc9b51ceec00003ebfae -b9b5ae00049e6aebb80fed000049e5f1 -b9b7b50004236febe3680100004236f7 -b9b8a700046ace1bb8839c000046ace1 -b9bca10004c30a9bc2c9ab00004c306f -b9bcec6ee1c0d18ba8cdd200d0475ab9 -b9c28f000401430bb8a20c0000401430 -b9cc89518fe82b0b5aa70500804907ba -b9cd5500048f64eb708a5600003d1f57 -b9cf110003cf993b3ea7e50000395bd7 -b9d1940003dc138b86cd1700003dc131 -b9d6cd3ddb8d3a4b8f07ec00c048d0e4 -b9d9700004613c7b61f4ea00004611c2 -b9d9b9000475ef4bbef420000046503a -b9de070003ed13cbafd40b00003ed13c -b9e094fcc49c559b25c02f001047be79 -b9e2e80003f92a9b13f79c00003f92a6 -b9e89500040039cbaf4bcf000040039c -b9eddd000408052bb02aba0000407f4d -b9f2426f2d2a7e9b0485cf000049322c -b9f564000497494bd76d240000497227 -b9fc33adfba928fb1d1a1d00c04bb048 -b9fd1bac30778edb0721b9001047927d -b9fef800048f895b4ecf33000048f891 -ba01731efb65bbeb6102e800d049df9f -ba13590003cab10bb2e17900003cab10 -ba18ca00042e3bcbac5120000042e3bc -ba1bb8de91ce585b3aac1300f04af7f1 -ba1f0900046d831bb1582f000046d831 -ba20a80004b20d9b195b5500004b20d1 -ba223d0003f9786bcb84c000003d02d0 -ba27740004845d4be2df9200004450fe -ba2d0b0003f4fdfbcbb8fe00003f427c -ba2d0cee5bcc4e3b69e24e009048504f -ba2f0a00040c07bbace047000040c07b -ba30280004b20d9b195b5500004b20d1 -ba307200049fd54bc1ac0d00003f36ec -ba31e30004488f4b18704200004488ea -ba380d0003dddf5b522d9400003dddf1 -ba3c8e50033800cb06edb9000047bb7f -ba40f80003d9faebeffb3d00003d9fa4 -ba47ef1fb246543bbf77a400704bb9a2 -ba4d4600043e107bb2135b000043e107 -ba4e450004c96aab6fd8bc00004c96a8 -ba4ec30004845d4be2df9200004450fe -ba52d20004cd4abbbb3322000048f88f -ba55dd0004094e9bb1de8a00004094e9 -ba61b1776d59d36b7495d000304ccaf0 -ba6358df8f156b1b96a4a100004c078d -ba63dc0004a46f4b8157be00004a46f1 -ba66e80004c2945b6caa4e00004850f7 -ba680d0003f78a0b69f59400003f76d3 -ba6ba9841c4feb8b0101e200c0488ee9 -ba6c7200042f8e9bc9c3c6000042f8b9 -ba6f803196f7a95b8cda50000048f552 -ba7725000496b70bea2e240000496b69 -ba84ea00044aca4bb22c95000044aca4 -ba87ff7816dfde0b55f73100d04732ce -ba88900004712e8bb486800000470089 -ba8e6ffe034f3e5b53e6ce00104b0ae1 -ba8f730004cd4abbbb3322000048f88f -ba902000045a04cbd9052300004494db -ba950f506677924b1602ab00a04bc384 -ba9b01000402152bb2ad520000402152 -baa381049922479b27c2e8009049a9f6 -baa40c0003e01fbbb0cb6200003e01fb -baac140003bb27bbc79b4500003927b9 -baac526ec634124b3ec4ac008048573e -baacd400047a924bbc70d4000047a870 -baadd700048f49fb4c4722000048f49d -bab6560003d23c7bc4e71700003d2163 -bab8d20003e9484bb9417200003e9484 -babbf4fab2a2d70bc48e8800204ccc87 -babcb4b0b44f808b11df2200c04969ab -babef20004022bcbafbb0100004022bc -bac3d2f893e097cb7a8c5c00a04bca72 -bac54ee402fe5ffb9d5e7400a04ba528 -bacb3300049500ebe8f4e800004385a3 -bad4fe0003f7730b8b713800003f76dd -bad7d9c088b8d47b1b60a800004b9e1e -badcd40004824e6b62e7d000004812ea -baef33000490848beecc0f0000490838 -baf19283dec0dd6b45810a00d04c3e5b -baf3a8000490848beecc0f0000490838 -bb0213000407087baf45520000407087 -bb02ce0004c5419bd115c100004c53dd -bb0360000464f1fb6a536c000044a043 -bb0d9f0003cbae5bb9b0b300003cbae5 -bb186b0003fea66b5f799000003fea5f -bb19ab4d41f7c3bb439d7b00f047bf65 -bb1b54000484dbebe148470000420966 -bb2168000424eb8bb0c9db0000424eb8 -bb2a2400048f895b4ecf33000048f891 -bb2b6c000451b88b26463b0000451aa9 -bb2f3f5245b45deb1234a100a04c57cd -bb2fd8ff4e2a32dbc0e9c700504ac9a0 -bb326300043d0d0bbabe5e000043d0c9 -bb34280004b549fba0c6ab00004b5493 -bb3b590003d2fe2bb41b6b00003d2fe2 -bb3c790004cd0e4bd16acb00004cc043 -bb3f2a8b372d504be41bd0008047719a -bb40690004b4142b6d5a5600004b413c -bb445e0004c8747b2522d800004c8746 -bb48330004a11a0ba2e55100004a1120 -bb4b250004331d7bf8389a000043311e -bb53a40a2a7d99abf5093300e04bf063 -bb599c67f0cf002bd10f7600f04c5ae7 -bb5eb1e5705d00ebd1ea1d00d04ac195 -bb69520003fc522bba404700003fc522 -bb6ad1b1e5f8d19bf0dfd0008047f07e -bb6c770003d76ecbb18b1700003d76ec -bb7172172f13ff1b8584c40040486b40 -bb73775ac1030e9b44c3a800f049a983 -bb7596000482469bba27ac000047715b -bb7a1d0004b4142b6d5a5600004b413c -bb867e00045b924bb350a7000045b924 -bb8cd20003e195dbba3ffa00003e195d -bb8fcf000419ca6bba01520000419ca6 -bb92744cbba7d40bbcbf460050485753 -bb928a0003fdb3cbba357a00003fdb3c -bb92d500044517eb52516800004450b0 -bb93abdd615f837bf2fc9600a0475364 -bb94c8380e6b05bbcbebc200404a1d9c -bb95d220e32d354b4ed2e800e049f790 -bb970100042184fbbe9e0c000042182a -bb9a150003dbe8dbb13d1700003dbe8d -bb9aab0004afbf9b4f5a7b00004afbe1 -bb9b480003926b0b834f21000038758a -bb9b480003926b0bd33d18000038bd74 -bb9fc8c4ce992b8b3c069d00203a6629 -bb9fc8c4ce992b8b834f21002038758a -bba6740004bde3cbe78e7400004b2ab0 -bbaa1300040ac34bb0cb01000040ac34 -bbac8e0003dd78fbafa4db00003dd78f -bbacb31f469c400bb9511b00504cd68b -bbb2430003dc21fbae298f00003dc21f -bbb2c3000495fcfb6951f40000495fba -bbb3a10003e5555bae840b00003e5555 -bbb50b0003ed448bba3f5100003ed448 -bbb6db000496b70bea2e240000496b69 -bbc36e3d13a327cbc422e8008049e9cb -bbc5f100048246ab4701b90000482450 -bbc6bc0004c5927beda5a300004c591f -bbc8ea000456c6eb8ec76c0000456c64 -bbda7e00042560bb3b2586000041c2e3 -bbdaa4cf3d23a17bb922ab00a04bcf02 -bbddfb27d60c105b08185c0010484d3e -bbe00b0003e348bbbaa6ec00003e348b -bbe04700040185bbb3ed7a000040185b -bbe093000449f3fbba9ca70000449f3f -bbe3f60003c8300b834f21000038758a -bbe3f60003c8300bba7f9900003c8300 -bbe86b625624a5cb5c8fac0090479ef7 -bbebfa0003efe94bba93bb00003efe94 -bbf0db0003e9a1cbba659400003e9a1c -bbf20f0004c5927beda5a300004c591f -bbf2c20003fd6ddbba928f00003fd6dd -bbf6d90003c8787b834f21000038758a -bbf6d90003c8787ba66e7c00003c448f -bbf76c00044c627bba45e2000044c627 -bbf76cd995f62d6bcd555100d04a0bab -bc0c2f000471b54b0b2c900000471b43 -bc0c8150d136168b56690a00a04c22f2 -bc1026000484dd5b39d8120000484dbc -bc14db0003eda95ba176ec00003eda30 -bc15d100043d0c9bbabe5e000043d0c9 -bc1eec0003df16cbbabcfe00003df16c -bc21ba000423105bb1d0020000423105 -bc23bf00045585ebdfb53800003e4255 -bc264500047f717b3a5ac3000047f70c -bc32500003c6456b834f21000038758a -bc32500003c6456bba97f600003c6456 -bc39bf72636bae2bfbc80f0050496110 -bc3dc9000466668b3b6d5200004208c6 -bc42090003d0b3cbefd40c00003d0aca -bc4825000495948b03b164000049593e -bc48283ff51c537b733dc700004b1c51 -bc4ddd00046dd35bbaedaf000046dd35 -bc53700004a165ab3c069d00003a6629 -bc55d700048f86aba03df4000048f753 -bc6516441131296b8b10ac007048e072 -bc68519e6183995bb48aab00b04ac3aa -bc6b7000049ff65bc681ae000049ff63 -bc6f620003f4fdfbcbb8fe00003f427c -bc715f664bf9a67b65b75500e04b320f -bc768000046ebe3bb40490000046ebe3 -bc76d20004ccaeab4f6c8200004cc801 -bc7d3fbf747e6d6bd4ea1900a048f6ca -bc86ee1b9da2338b9596bb00604abaf2 -bc8760000451b2dbb3cc200000451b2d -bc89d100043ae82b41cfe0000043ae7d -bc8a1190de21edabddd50a00c04be4eb -bc8b805abcf5e30bcb341300f04b74f5 -bc939d3d26f55a2b83e9c100904c599f -bc970a00040039cbaf4bcf000040039c -bc994a0004a94bfb80d13c00004a94ba -bc9a720003cba6abbaf6cd00003cba6a -bc9a7f3a9831814b35890a00604c6d96 -bc9c9300044e351b702420000044e34f -bca1290004241dfbb4f22800004241df -bcac1d3bd9d7521babb5ae000049f440 -bcb07454358de6bbb7e885009047a624 -bcbd2b0004a2309bfbc3ed00004a22fb -bcbdf80003ed13cbafd40b00003ed13c -bcc152000402a76b9facc000003d12d9 -bcc2dca06d4c26eb004ca100204c66e2 -bcc51b0004ccaeab4f6c8200004cc801 -bcc92300044c853b44e67e000044c81e -bccc4ae2c5721c6ba981c100704bedcc -bccf620003f4fb4b1af7fa00003f4f7d -bccfab52b31635fb1602ce00904c6057 -bcd15e8478b1026b08737400104848f9 -bcd1990003bef07bb1068700003b603a -bcd35b0004360c0bb52dc500004360c0 -bcd92000042e389bb45658000042e389 -bcdd1456629f9aabab20d400a0476d95 -bcdec372720ca06b68d3a800d049e906 -bce5f1000481bc6b8e48b50000481bbf -bce8970aabb4e19b83e9c100e04c599f -bced2300046b95fbb3e4f5000046b95f -bcee0c000421757bbb7d860000421757 -bcef860867287c1b99487900704ccbdf -bcfd40461640e68bfab9d700304a428a -bd085c0004b3194b2df41300004b30ec -bd12ce0004acb8ab72299800004acb60 -bd18ea000451190b90b76c0000451167 -bd1ea8000407f4dbb1d86b0000407f4d -bd29c7fcccca9eabe6622400d04960ab -bd2ab22a7814563ba8f774007047f2a1 -bd2cf500044f2f9bb52076000044f2f9 -bd2e95e69ad8ae4b1f3f7000b04a0415 -bd316f0004a94bfb80d13c00004a94ba -bd335d0004717fabbbf12c00004717fa -bd3bfa0003f507eb1963bb00003f506a -bd3e45000476f9ab4c20850000476f21 -bd41550004909d1bd66f220000490972 -bd467aacea7a7c7b7aaf6d0010499de8 -bd468c132dd6dcabd881d200004755f8 -bd50c10003e01fbbb0cb6200003e01fb -bd521257c562f2cb5590a100804c77c3 -bd59d70004a2f33b07038c00004a1c02 -bd5c0b0003db349bbc157500003db349 -bd5d4153b544c59becbc7d00b04aa784 -bd5fcf0004031f5b554ac2000040310e -bd61a56791a322fb3f8cb600404a86db -bd6a725626e022bba7917b0000475a65 -bd6db90004837c7be148470000420966 -bd6f2ee6eaff9f5b465379008049c0ea -bd72feb6c37a08abb05dcf0010490b47 -bd7acd0003cfb13b834f21000038758a -bd7acd0003cfb13bea76c300003921cb -bd81a5fcd1a152ab22295100704a4236 -bd81b07d2f404c7b79136d00804a2163 -bd822a1a8a58e23ba2d2db00704905ff -bd82ec0003e94a0bb9417200003e9484 -bd8bfa0003efea0bba93bb00003efe94 -bd8c560004a2742b7c09ae00004a272e -bd924c00042386abb1068700003b603a -bd93a2bd044f491bb77ee80040499c27 -bd94050004a2f33b07038c00004a1c02 -bd99940003ec61bb8b693800003ec60c -bd9c65000424eb8bb0c9db0000424eb8 -bd9e4e00048c3dab21aa7e0000459dc1 -bda3370004858c4b4066c3000047d486 -bda5633a59db6f1bba96cb00604ccfd0 -bdb08500047d80dbaf69d2000047d7f4 -bdb1a2e1278e0c2ba2ca7b00b04bd7fc -bdb3ae0004022bcbafbb0100004022bc -bdb40b0003ef2fab3b6cdb00003ecfdb -bdb4db0003e4572b7d50a400003e4525 -bdbaee7fe01728cb5d045600a049e1a8 -bdc04700041b35fbb5f17a000041b35f -bdc2ad93c3178aeb864b0a009047c158 -bdc36b00048d7afbcdcde2000048d7a0 -bdc701000401540b9962aa00003d85b9 -bdc76d0004a2ce6b914b7900004a2c80 -bdc8d3fa3aed2e9be8d7d100404c5526 -bdce560004bb196bc0c46900004bb187 -bdcf0a000407087baf45520000407087 -bdcf107948dc879b05b77600e04c81a1 -bdd096000476f9ab4c20850000476f21 -bdd8301487e4721b5065d100e0437f6d -bddb4600048d7afbcdcde2000048d7a0 -bddd277ac98f843b35d62c006048e929 -bde4ea000459842bbc68a70000459842 -bde6560003d8ba4b8c72aa00003d8aea -bdeb3cf4ae19c0bb94042c00e04c24f9 -bdedf10004845d4be2df9200004450fe -bdee8b00047d80dbaf69d2000047d7f4 -bdf1fb0003d0f1dbb6cbb200003d0f1d -bdf33e47029a4e7b2ab21d00904bd5a3 -bdf46800042a456b939228000042a44d -bdf87c0004366ebbbccf5b00004366eb -bdf8961e2ddc418bb988a100a04c091d -bdfc3c0003cab10bb43f4f00003cab10 -be059a000422b94bb62fea0000422b94 -be07bd0facc600bb83879100b04ce7c5 -be096be59e663c6b08112400404923ab -be0bab99ef08913b92f03d00804cb9aa -be0c0b0003d17cab438b5900003d17c5 -be13738d6c85ff1bb82a1d00a04ba1b7 -be189600047a8afba0e733000047a8a4 -be205100048531bbf483f50000485316 -be3129000422796be319a50000422781 -be317b000480d5fb28e02f0000480d14 -be34e8614496da6be9046900a04aedc5 -be3622108eb1737b914b7900d04a2c80 -be4363474d0c16cb104d640040484c2b -be470500048f64eb708a5600003d1f57 -be4f210003dbe8dbb13d1700003dbe8d -be52aa0ef3ec661bef15ab00404c1090 -be57b32ef1c2dd5b4a800f00d0495990 -be57c0000472b61b834f21000038758a -be58c30ffffd3e4b2eb7a80080495f89 -be5c25000497494bd76d240000497227 -be5d850a024170abc7371700b04aa246 -be61e07201b9fcdbe5e20f00d04c4b22 -be64c320560bea5b8b478c00f04a0bbf -be670a00040ac34bb0cb01000040ac34 -be67a9a2ac0457dbe2821700504caa3d -be6b0a00047a8afba0e733000047a8a4 -be7ba8000490a93bd66f220000490972 -be7f05000490a93bd66f220000490972 -be7f6623fff7da6b46eab200b04c1f2f -be87ec00048b439b6d4696000048b417 -be8923000457b73b5e9e7e0000457b6c -be8b6b0003d76ecbb18b1700003d76ec -be8f33000497542b8ec76c0000456c64 -be942c0004c28f7b4b0f6600003d01b7 -be9556ecee1a595bc250c4004048dc79 -be9be01c8404236bdd94d40030479796 -be9e740004b4cb5bb727a400004b4a02 -bea16ce2de6ea03b6272c700b04876f1 -bea56400048efc7bb7d9cf000048efb1 -bead68000423105bb1d0020000423105 -beb05c0004ae2ebbcad19400004aaa26 -beb0d20003dddf5b522d9400003dddf1 -bec7590003dae5abb7e69200003dae5a -bec810000430c68bb743110000430c68 -bec87c0004a0de7b205b7900004a0da1 -becb0100042022ab710fcf00004201ba -bed041a45690078beb39ec00c04c93aa -bed276bd8a0a2e6b46e96b00104becd1 -bed4f5000466f24bbd89e20000466f24 -bedb85df364a2efb24d3d90010484b3f -bedb8c0004a2742b7c09ae00004a272e -bedccadaa3b9500b1d96c300c047747a -bee2a50004beb5cb3101a300004beb57 -bee8980003d7bd5b35be7200003d7bd1 -beef0a000402152bb427cf0000402152 -beef6d0004a2ce6b914b7900004a2c80 -bef22f7d5abf162b649b5500504b86ae -bef5a30004be587b47e856000049a4ea -bef9a40004ab2fdb26c6e800004a7549 -bf04d400047aba4b9414d4000047ab9b -bf05170003d13a9bb73b5900003d13a9 -bf06820004868f2b62036b00004868c9 -bf080f0004094e9bb38a0c00004094e9 -bf085c0004b4f70b94aaa100004b4f65 -bf0a27000433c42bbd8e7e0000433c42 -bf0b732d7f27bddb834f2100c038758a -bf0b732d7f27bddbea76c300c03921cb -bf15dd00041d810bbdba0c000041d810 -bf22880004cd413bc502cb00004cd404 -bf24aae0877067ab739dae00b049fda8 -bf29680004435fdbb7025f00004435fd -bf330100040fa00bb79b12000040fa00 -bf35f9731cf5456bb9511b00204cd68b -bf383ebeca43698bdf2e7400904bc8e5 -bf39bb17cc947ccb67af0500b0490300 -bf3eb482c04ffe8b8cf7d100904c64dc -bf43006cd89993eb6ddaa500004c435c -bf43660003d0b85bbd536300003d0b85 -bf441bb8a371188b63787600a04c446f -bf44b70003d2fe2bb5727200003d2fe2 -bf45d700048f895b4ecf33000048f891 -bf4731000473294b1e7f31000047328d -bf489600047cd34bc973ac000047cd26 -bf49ab0004c75dbb78310a00004c4cdb -bf506c4b52f219ab7c8c7200e04a11fa -bf50a7000458072bbdef600000458072 -bf52f80c19a7e3abb149d200f047d094 -bf540d8c8804758ba0c05600d049fe96 -bf547d0004ab2fdb26c6e800004a7549 -bf597ffc4df2833b815f1200904cb675 -bf5b0900046e980bbe0280000046e980 -bf5bd0000475ac5b5085a10000475ab4 -bf72c300047eaebb4560b5000047ea75 -bf7fb2b47b85d95b03b164008049593e -bf7fc0e6ef70461b341b1200d04cb22e -bf82ec0003f5d51b0d7cfe00003f5d48 -bf86720003d2b80bbe2a4800003d2b80 -bf867c0004718d8bbe135100004718d8 -bf8d140003c6456b834f21000038758a -bf8d140003c6456bba97f600003c6456 -bf917a00042022ab710fcf00004201ba -bf9574d321ff908b23ef460080487e41 -bf9acd0003cf90cbb4a7a600003cf90c -bf9b730004cce7bbe2614800004ccca5 -bfa4db0003efea0bba93bb00003efe94 -bfadcf531cd3059be3dca100c04c7c94 -bfaebf775d8a484b7aaca800504b777d -bfaf7000049b4aeb97d52b000049b385 -bfb5d2000477bb9b0253330000473d81 -bfbf670004488a6bea76c300003921cb -bfc00c0003d0eb6b44649800003cfc46 -bfc5e200044ea5ebad47bf000044e354 -bfca46000407f4dbb1d86b0000407f4d -bfcac300047f959b67e28b000047f950 -bfcecb0004cd0c8b3d0579000043dd4a -bfcf12000418f99b32b7120000418e3e -bfcfd30003f90cbbb83ec000003f90cb -bfd1d00004aa349b0ddad000004aa26c -bfd3fa0003ede21bbe53bb00003ede21 -bfd6580003d06fdbe023e900003d06a9 -bfd8130004ae2ebbcad19400004aaa26 -bfd952000401430bb8a20c0000401430 -bfdcfe0004450babb7e97300004450ba -bfdf3300047eaebb4560b5000047ea75 -bfe353000444acdbb77df40000444acd -bfe6a0b2e21912ebb8037000d049bca6 -bfe8b30003cbb26b69324e00003cbb04 -bfeb0100042182abbe9e0c000042182a -bffa19000498749ba7c5640000498713 -bffb8a4866b0b8fb5df9c100d04c18b7 -c00a7c0003c8300c834f21000038758a -c00a7c0003c8300cba7f9900003c8300 -c00d140003c29e2c44567c00003c29c4 -c00d4c0003f80edc611c9100003f80ae -c01297c0c1134f3c3d21c700304b2137 -c01c5a60f8d1e16c6a5a0f00504c58e1 -c01c5e0004cb724cd2afac00004cb455 -c01e8000040185bcb5528a000040185b -c01f5100046f60ccbea82f000046f60c -c020fd06cbedb53c373da10060479870 -c026bc0004c46e0c0e6d6b00004c46de -c02924000492529c636b3300004923a3 -c030ae0004aa349c0ddad000004aa26c -c0336b0003d1c69cefd40c00003d0aca -c033d0000480ef6cd3bd7b0000480198 -c0368f000403115c554ac2000040310e -c0377400047aba4c9414d4000047ab9b -c0379c00045942dcbedf60000045942d -c03b33000497542c8ec76c0000456c64 -c03ea50004c292acc6c9e2000044e600 -c0420f0004c46e0c0e6d6b00004c46de -c04447000412294cbf12ac0000412294 -c045dd0003fcd49c834f21000038758a -c045dd0003fcd49cea76c300003921cb -c0468a00041126fcbebd86000041126f -c04dcf000498749ca7c5640000498713 -c0521bd65d12eeec48486900d04bd20b -c0549a000433a9ccb7c5060000433a9c -c057510003df368cf1589800003d4bb0 -c058b91d4d30f4bca4250a00e04c5f91 -c05a2400049485fcaccfbb00003e5555 -c05f9db32d1596cc300e5600f04b9d6c -c0667d00042ea1ecbf1ba9000042ea1e -c066ba000410109c5687cf000040fe46 -c06895000456c6ec8ec76c0000456c64 -c0697000046503acbef420000046503a -c069b9000481475c9ec5de000048146d -c06a5000048efc7cb7d9cf000048efb1 -c073510003ea080cbe713800003ea080 -c07c3700039449dc360429000039346c -c085e70fefeb597cc9336d00004a1932 -c08e720003d23c7cc4e71700003d2163 -c098a40003f568acbf400d00003f568a -c09a560004ac3a8cdf6ace00004ac099 -c09abc0004c40fbcebc56b00004c40f4 -c09b4d55156a930cd6dad000004aac27 -c0a85c0004b20d9c195b5500004b20d1 -c0ac9000047062dcb81e80000047062d -c0ac9500046ace1cb8839c000046ace1 -c0b9940003e9484cb9417200003e9484 -c0c22e40c139b2fc1b014800904cca30 -c0c60f0004c40fbcebc56b00004c40f4 -c0c82f0004839c1cebda450000482f2d -c0cc650004241dfcb64fea00004241df -c0ce7e0004b9cdbca775c700004b411e -c0d09a000433928cb8027e0000433928 -c0d1380003f7730c8b713800003f76dd -c0d4680004360c0cb6585000004360c0 -c0ddb70003cab10cb43f4f00003cab10 -c0e0a80004b5dfcca67de200004b4cf1 -c0e0d20003f3347c3834fe00003efbba -c0f7cae6a09a828c8925ae00c04a5ebb -c0f85c0004b9cdbca775c700004b411e -c0f8d4000483cebc2963330000483ccc -c0fd7000045b948cbf7820000045b948 -c115ae0004a2e04ceb4d5100004a2dfa -c121be47483f107cd5a1a300004beb72 -c128b500047f0f0cc0c096000047e18b -c129720003dc7a9cbfc51700003dc7a9 -c130fb57cafaf29c815f1200904cb675 -c1317b00047bd7dc161774000047bd69 -c13218094f7c00bc1e3ca800a04b2509 -c132660004bb293c3cbe6600004ba29a -c1331b535d37ebccbe913c00304a807d -c1364e0004aa2eace148470000420966 -c1382c0004c7cccc13f1f10000476ace -c13b25000492f78c6caa4e00004850f7 -c14082000393571c360429000039346c -c14082000393571c834f21000038758a -c145f08511f9e9dc4d6ea500404be3a0 -c150c2000490848ceecc0f0000490838 -c1521d62fbf1398c09247200204a1805 -c153cf0003fc522cba404700003fc522 -c1598fac05dde78c5029d700704906ee -c15fd10004c6222cdf6ace00004ac099 -c165de00047a445c8bb89600004781f5 -c168ca00042e389cb5cbc3000042e389 -c16cd400047f0f0cc0c096000047e18b -c177510003df16ccbabcfe00003df16c -c17b590003dd317ccbf58f00003dd2e7 -c17c0a00db27f34ce287a800704a21a9 -c17cd8e1cd8d77dc2749d9005047eff4 -c18ca40003e195dcba3ffa00003e195d -c18e46000402152cb427cf0000402152 -c19ee80004c5756ce832190000496f76 -c1a3a40004730b8c98875e000047305f -c1a4b70003d4d5bcb8400b00003d4d5b -c1a7bb0003e9a1ccba659400003e9a1c -c1a9590003cbb26c69324e00003cbb04 -c1ab510003efea0cba93bb00003efe94 -c1b2a50004c292acc6c9e2000044e600 -c1b60f0004c643ccb491ab00004c6432 -c1b81a9800f793fceb39ec00804c93aa -c1b8470003fdb3ccba357a00003fdb3c -c1ba24000485beace154c40000485be5 -c1c3dc00043667fcc06a5e000043667f -c1c6ec0003ed448cba3f5100003ed448 -c1c73d0003d23c7cc4e71700003d2163 -c1c8d5d94056fccc6795d20010476858 -c1cdfe0003c29e2c44567c00003c29c4 -c1cdfe0003c29e2c834f21000038758a -c1ce7e00044cc2dcc075e2000044cc2d -c1ced7897b0c029c916dde00c0483c8b -c1d5ef2258e1fecc8ec94a00b04a99e1 -c1de390004bc841ca2175500004bc7df -c1dedb000499b59c05c6c30000498bb7 -c1e0d70004094e9cb38a0c00004094e9 -c1e3b26ea90082bc4b1aa500004c0fc6 -c1ed7e9c74b6cdcceaba2400b0498dcc -c1fcdb0003d0f1dcb838c000003d0f1d -c202260003d2fe2cb5727200003d2fe2 -c203cf0003fd6ddcba928f00003fd6dd -c2048700043311fcf8389a000043311e -c2110b0003efe94cba93bb00003efe94 -c2140bb238738dbcf507a400104adde2 -c215ea89590f214c452e82001048cf02 -c21d3100041b35fcb75a0c000041b35f -c22db900047e25ece148470000420966 -c22e0200046efa0ca99420000044dd09 -c23186000419ca6cba01520000419ca6 -c23b390003cbb26c69324e00003cbb04 -c23ea3654e45fedca3bc960050477152 -c243a100039449dc360429000039346c -c243a100039449dc834f21000038758a -c2462c00048f49fc4c4722000048f49d -c249d8000422b94cb77eca0000422b94 -c24c980003cb9adcbfda7200003cb9ad -c24dc90004a9b41c4621a400004a9b38 -c25211768496b71cf36695003047297d -c2533f0003d17cac438b5900003d17c5 -c25b1900048531bcf483f50000485316 -c25fd0b689b01ddc7b01ab00604c58ad -c260db0003f7730c8b713800003f76dd -c2629e0003cba6acbaf6cd00003cba6a -c264c400048c915c1b40eb000048baf6 -c265a3000485d35c6af485000047546b -c26e539ee421647c18bb8600c048cf34 -c270e299563ad3cc145f330030490bbc -c270f135449c5aac3e450a00f04c4775 -c275aa254ff34f1c2e1ece00104b9e7d -c2763ac45b76b88c06f2a500904c04e1 -c27712000420c52cc10c6b0000420c52 -c27dc500043be42cc12850000043be42 -c27f600003dae5acb9500b00003dae5a -c2817a000418f99c32b7120000418e3e -c28226204f4af87c9519f10070481701 -c28df1370b2369aca3f96b00704be947 -c291720003e556dcdd42cd00003cfb55 -c295de000476f9ac4c20850000476f21 -c297620003f4871cc1ac0d00003f36ec -c2a09500045d3fecc17893000045d3fe -c2a8e623d6424a0c3a7c5c00704b9e30 -c2aeb98cb3278f9c5aeaa100d04b64f3 -c2b02432681cda7cc032c30020473db5 -c2b5d583a95d1ffc03cdde008047d165 -c2b9dad3e48e034c7ba1f1006047d739 -c2bde200044adb7c60f420000044adb1 -c2bf080004461aec25db080000445f86 -c2ccaa000430c68cb88ac50000430c68 -c2cd6b0004c606ac1602ce00004c6057 -c2d325000492f78c6caa4e00004850f7 -c2d481954024f7fcaac3f50090489c1b -c2d6480003db349cbc157500003db349 -c2d6de16d340788c2525f100e04803b3 -c2da7b48687cfcec44aed500f04460d9 -c2da82a1331d5d8c524de2007048dbfb -c2e455f65edbeffc14433300b0496ebc -c2e75b00043ae82c41cfe0000043ae7d -c2e75d00046f0eccb27f51000046f0ec -c2f14300040185bcb5528a000040185b -c2f40f0003d02d6ccb84c000003d02d0 -c2f4a700044515ecb7e97300004450ba -c2f6390004b03abce8167b00004b0270 -c2f7420004cd4abcbb3322000048f88f -c2f9f100047f0ebcc0c096000047e18b -c2fa480003d7bd5c35be7200003d7bd1 -c2fca40003f36eccc1ac0d00003f36ec -c305350003d01c4c4b0f6600003d01b7 -c3088932e965a7fc74327e00104b1f66 -c30a64f1bdd079ece3dca100c04c7c94 -c3110a0004c606ac1602ce00004c6057 -c311720003de9ebc5b64db00003de9d4 -c316c8000399e1cc05ebe50000399de3 -c31fbf000449f3fcba9ca70000449f3f -c31fd10004c50eccecbfd100004c50e6 -c32015abe78557dcbd6f5e00f04729c4 -c322ca000424ebdcaf80020000424eb8 -c323b500042cb6dcf622ca000042cb67 -c32c0f000498817c40c52400004987f3 -c3314600043be8ecea76c300003921cb -c336db000491c73c08a7f5000048cc57 -c3388d0003d13a9cb8ad7500003d13a9 -c338c200048e8a4c1b2451000048d4e6 -c33c1f82d8041e8c61bcc2009048ed70 -c33d180004771a6c4c8e0900003d01b7 -c346b8a53b95c57cb727a400004b4a02 -c3492400048e8a4c1b2451000048d4e6 -c34ecd0003cbb36c1ec42000003cbaf6 -c350160004360c0cb6585000004360c0 -c350761d5b415f6cdf274200504ce6f0 -c3508500047f0ebcc0c096000047e18b -c35a720003cbb36c1ec42000003cbaf6 -c35aec0003e08accbd536300003d0b85 -c35dfe0004241dfcb64fea00004241df -c361750003d8ec2cc21a9200003d8ec2 -c3635100046dd35cbaedaf000046dd35 -c364a700044c627cba45e2000044c627 -c36552000400516c52b06b00004004f9 -c3680f00040fa00cb8d590000040fa00 -c36a1b0003d0eb6c44649800003cfc46 -c36dc70004ba48ac67bc7200004a5241 -c370ebb6ba9b1cfc84c5c000c04a3c7e -c3730100040af39c834f21000038758a -c3730100040af39cd33d18000038bd74 -c378090d524232ec8ecf5c00c04aa101 -c37cd700046f8f1cc2575d000046f8f1 -c385170003d23c9c85591700003d2301 -c3866300043d0c9cbabe5e000043d0c9 -c396ce0004c81decb1068700003b603a -c39f77b09b684b4ce642f80050498524 -c3a05600046cd49cc23ddd000046cd49 -c3a4ae0004aa576c4ea9c900004aa54e -c3a5520004132a0cc25fcf00004132a0 -c3a5a800048c3bacfba82f000047f7da -c3a7bf000464724c713dc90000464710 -c3aabc0003f90cbcb9b43200003f90cb -c3af810a5f62be3ceb4da100c047950b -c3afbb0003eda95ca176ec00003eda30 -c3b301000421757cbb7d860000421757 -c3b9dd00041c9b9ca18d90000041c9a8 -c3bdd415c464f29c63e1b900d04736f3 -c3bf3700048de45c2818ac000048de42 -c3bf390003a3dc4c834f21000038758a -c3bf390003a3dc4cd33d18000038bd74 -c3c43c4aa758fc3c5ef12b00204a6696 -c3c701f0f9d0bbfc3e18d4009047f941 -c3cb740004821b8c6f928b00004821af -c3ce4485d08f1f7c7f8ed200504ccf2e -c3d16983b246e8ec46247600904c2b0a -c3d4a700044c853c44e67e000044c81e -c3d543000401430cb9c28f0000401430 -c3db7441078699acddd50a00404be4eb -c3dc890003d06decc2700c00003d06de -c3dd2300046ae4ccf73570000046229c -c3e4f5000469586c5bb1e20000468fd0 -c3eaae1ad93c4fcc199fa400d04ae59a -c3f2c30004821b8c6f928b00004821af -c406530004c50eccecbfd100004c50e6 -c40694a6cace506c455374004047aec0 -c408ea0004515c1cc2c0a700004515c1 -c40b078c669bc7bc6af9ab00104c4722 -c40b8b5ee746c44ce2821700604caa3d -c41326d36be9d14c99487900504ccbdf -c41e0c0004031f5c554ac2000040310e -c4254fe1437fd9eca6c41300104b89e3 -c42743d05ebf6f0cef15ab00804c1090 -c42b590003d7e63cc2c67200003d7e63 -c4304dd4b2c18d3c78c746009048d257 -c4317a000410b5acc2b1dd0000410b5a -c4324800042e389cb5cbc3000042e389 -c439c10004bec3acd5a1a300004beb72 -c43f5be0c990977c5d1dab00104c4268 -c4409300045b590cc2c123000045b590 -c446f98b22692ecc70d42f005047f818 -c44a260003d4d5bcb8400b00003d4d5b -c451a30004c7070caf0a45000047d45c -c451c90004a8a46c7415d000004a8a08 -c4533d0003d2feec3deb6b00003d2fe8 -c454f5000464c3dcc2e9e20000464c3d -c466740004bd099cb6f2ce00004bd096 -c469af0004717facbbf12c00004717fa -c46aec0003f45cdcc2ebbb00003f45cd -c46c9300046479dc5cc8950000464759 -c46e96000486bc5cb1068700003b603a -c473ed0004a2f33c07038c00004a1c02 -c47ddff62223126c4aa7330070474df4 -c47e580003d0eb6c44649800003cfc46 -c481af00046dbb0c3b6d5200004208c6 -c4825ffe9da44f3c5f237600304c9d44 -c482880003d0b3ccefd40c00003d0aca -c484c00003d1298c9c782000003cb9b5 -c496c90003cf90ccb940b300003cf90c -c49eec0003efbd8c0e317200003ebe6a -c4a09d92f386b48cae0ac3004047ebaf -c4a6e80003cbae5cbb0d9f00003cbae5 -c4aabc0004c3fd6cd834a700004c3f12 -c4ac2e5412f17cecf819c100d04c5679 -c4b2560003d7bd5c35be7200003d7bd1 -c4b2ce0004bf43ac05c6c30000498bb7 -c4b62c3e1973e04ce9a77400d0474812 -c4bc797f3778d3ccf95c7600c04c01e1 -c4bcc39767fbb44c99ce4e0030488860 -c4bd720003f4fb4c1af7fa00003f4f7d -c4bea50004c7215c72fab200004c720d -c4c0050004a1556c3c069d00003a6629 -c4c39bafe96d2a3cb58360002044fd24 -c4c443cb971ce3acae29c700504b0094 -c4dc95000451190c90b76c0000451167 -c4dcc50003935c4c360429000039346c -c4ee24000433a9ccb936040000433a9c -c4f140c1e48d34cca4c1e600704ce3e6 -c4f44700040157ace7320c000040156e -c4f486000422b94cb77eca0000422b94 -c4f54a0004a8b7bc59dad000004a8b41 -c4f57a0003d0f1dcb838c000003d0f1d -c4f5cc0003e9484cbab8d200003e9484 -c4f7a10003e348bcbbe00b00003e348b -c4fc0d0003eda95ca176ec00003eda30 -c514344516a194fc2a64d4000047ab51 -c51508d02d3414bca1035500804b786c -c516260003dae5acb9500b00003dae5a -c5175f0004baf0fc404c18f000433872 -c51ae80004c46e0c0e6d6b00004c46de -c51e7e000459842cbc68a70000459842 -c521d600042afbcc6f4da50000425f87 -c524c2000490975cdd42cd00003cfb55 -c5374e156f69e2acfd68c2002048f520 -c5385600049fbb9ce233ed000049fbb4 -c538ac00048ca41cba45e2000044c627 -c53bec000487635c75b5b9000047fc39 -c53c5c0004b549fca0c6ab00004b5493 -c5410b0003f7b2cc235bbb00003f79ae -c541a1201ccd3e4c2fa3a800904a21ff -c54371000430c68cb88ac50000430c68 -c546130003fc522cbb695200003fc522 -c5466e0004ccaeac4f6c8200004cc801 -c546946cc1e9116cb8bebc00304c44b0 -c54701000406931cc41e0c0000406931 -c5471db892556d7c4ccad000404aa6ef -c54c0d0003ee387cc40bbb00003ee387 -c54e32da3a5d47ec7f164e00204aabd1 -c556430003df16ccbc1eec00003df16c -c560df098c382d6c5e32ab00e04bd23c -c565980004bd099cb6f2ce00004bd096 -c56aaa0003d9ef4cf8c49800003d9ee4 -c573250004991a1ce331d7000049917a -c5736b0003d2b80cbe2a4800003d2b80 -c573a800049ac0bc022bc2000049a87e -c580fe0003e3de2cc41d9400003e3de2 -c58301000422173cd04b6b00003d32c9 -c587cf000408052cb02aba0000407f4d -c58b4e0003e9a1ccbbf0db00003e9a1c -c58daf000433928cb98a270000433928 -c58dd90004769a0cde3b74000047586e -c5902f0004837b2c69b30a00004834eb -c592c200041d810cbdba0c000041d810 -c593d0000483326c3a5ac3000047f70c -c59e280003ed448cbbb50b00003ed448 -c5a2430003e195dcbb8cd200003e195d -c5a58f767ff665ac02533300f0473d81 -c5a70900046d510cc45a80000046d510 -c5a8fe000444d4fcc45a5f0000444d4f -c5ba3b14fddc396c74327e00e04b1f66 -c5c17000045cbbdcf0f67e000045cbab -c5c90b0003ede21cbe53bb00003ede21 -c5ca1b0003d1329c9facc000003d12d9 -c5d0ea000466f24cbd89e20000466f24 -c5d251fb7295983c1cc9e600d04ce364 -c5d3389cb5a4026ccc755d0000472ec5 -c5da800003fdb3ccbb928a00003fdb3c -c5ecd700040fa00cb8d590000040fa00 -c5f14300041b35fcb75a0c000041b35f -c5f3a10003cac07c1836a900003cab7a -c5f4880003d13a9cb8ad7500003d13a9 -c5f89a000433c42cbd8e7e0000433c42 -c5fbd1e6eec5501c171c85004047550e -c5ffdc0004a165ac3c069d00003a6629 -c5ffdc0004a165ac834f21000038758a -c600c00003d02d6ccb84c000003d02d0 -c601140003fd6ddcbbf2c200003fd6dd -c6055dffb8f1dd7c29633300e0483ccc -c6080f0003d0f92cc4948900003d0f92 -c620f50004649dcc5a989300004649d9 -c62170000464e47cc498200000464e47 -c6230a0003f90cbcb9b43200003f90cb -c6264500047f7fac6276c3000047f7f4 -c6266d00043311fcf8389a000043311e -c62a5ab2c004c60c71429600d048d013 -c6354a0004aa77ccea76c300003921cb -c64114000401430cb9c28f0000401430 -c642560003d2163cc4e71700003d2163 -c644db0003f568acbf400d00003f568a -c64ce80004366ebcbccf5b00004366eb -c655f80003efe94cbbebfa00003efe94 -c65d980004bb293c3cbe6600004ba29a -c65f2830778323fc5c2db90040480967 -c66216dcb03c4e0c18bb8600e048cf34 -c663f8f33812e7bc9b34c200d0494013 -c6680b0003d1c69cefd40c00003d0aca -c66cfe0003e5e4ecc4d3fa00003e5e4e -c6708efd76f8f67c2eac2f004047d5af -c674d4000483c29cb212450000483c00 -c68021000486059c21e9e200004699d5 -c6895200041126fcbebd86000041126f -c68b0a000419ca6cbb8fcf0000419ca6 -c692ec0003ea080cbe713800003ea080 -c69357c8054e9a4ce9840500204a2f67 -c694d20003dd6bbc741d8f00003dd6b8 -c694fe0003957bfc834f21000038758a -c694fe0003957bfcc511f700003957bf -c6952b0004a7746c91abbe00004a7741 -c69624bc4698849c3386ce00604c46b5 -c6982f00047bd7dc161774000047bd69 -c699648bd83d86cc5dce7e00f04b2387 -c69e5c0003cba6acbc9a7200003cba6a -c6a08d0003db349cbd5c0b00003db349 -c6b2c300047b8fbc000bd0000047b877 -c6b44bc419b950ac120b3700404864d5 -c6b86b000412294cbf12ac0000412294 -c6b9c9000458072cbdef600000458072 -c6bbf7bb8318086c1602ab00e04bc384 -c6bf6b000489882cdb4774000047b57c -c6c5940003f5d51c0d7cfe00003f5d48 -c6c5d200047a8afca0e733000047a8a4 -c6c923000457df0ccb5e9e00003cbe13 -c6dfb20003d02d6ccb84c000003d02d0 -c6e7be0004a167bc3c069d00003a6629 -c6ed5200042182acbe9e0c000042182a -c6f54300042ea1ecbf1ba9000042ea1e -c6f9af0004718d8cbe135100004718d8 -c6fb5a0003cbe52c27619f00003cbc05 -c702530004c40fbcebc56b00004c40f4 -c709d700049a5eac672b8c000049a3f4 -c7155500049885dce67aaa00003d8d7f -c72b430003cf90ccb940b300003cf90c -c72ca7000459dc6c21aa7e0000459dc1 -c72fea00042d04ecc5d1db000042d04e -c733d000047cd6cc6adbd0000047cd68 -c73928932839316c9df23900d04b22a1 -c739a800048ac75cabab37000048ac68 -c739deb8c2c3e3dc79b85c00f04ba161 -c740d400047d12fc6449de000047d12c -c7410d679982ad8cc7371700704aa246 -c745af00046e980cbe0280000046e980 -c745dd000471bbeca94e980000471bab -c747590003d1c69cefd40c00003d0aca -c7538216eb4124dc1400a800104b7647 -c7545d00043311fcf8389a000043311e -c75c404b3f6fcffc2ecab200d04c3ba0 -c75c9300045942dcbedf60000045942d -c75dae0004a2742c7c09ae00004a272e -c762300003cbae5cbb0d9f00003cbae5 -c7678c0004ab058ccec38c00004ab018 -c77312000408052cb02aba0000407f4d -c77357c942da5d1c57812400e04956ff -c775720003f3695c73399400003f3689 -c77cef0003e9484cbab8d200003e9484 -c787a40004ac974ca7a5a400004ab782 -c78bac000482505c4701b90000482450 -c78f4a75b796f19ce5782800d04b0d11 -c7904700040a12dcfd8190000040a0ff -c795cc0003e348bcbbe00b00003e348b -c797bb0003e08accbd536300003d0b85 -c799940003dc7a9cbfc51700003dc7a9 -c79b7400047d12fc6449de000047d12c -c7a196468c488f8c360429004039346c -c7a196468c488f8c834f21004038758a -c7a3ec000485373c0b8fec000048522f -c7a9f7aa7e08450ceda5a300504c591f -c7ad73000445186c87235300004450b9 -c7af4c65994eaffc4c85d20060477a12 -c7b9624cedf2aefc58b896004048152f -c7bb2e577e04958c666ace00504b0f98 -c7becd000433a9ccb936040000433a9c -c7c0575cd31f420ccc77d0004047bf61 -c7c6db00043fa11cc64b8e000043fa11 -c7c70900046f60ccbea82f000046f60c -c7cb0a0003fc522cbb695200003fc522 -c7cbd0f6c87d96dcb0f67b00204bc82f -c7d1550004990afc6405d7000049516e -c7d1f80003df16ccbc1eec00003df16c -c7d2200003cba4dc6a7b3900003cb819 -c7d3d0000482505c4701b90000482450 -c7e12b00049b35ec9aad2b000049ae58 -c7e851000485373c0b8fec000048522f -c7e89300046b75bcc69123000046b75b -c7e8b500047b540c21aa7e0000459dc1 -c7ea480003d850bcc68eaa00003d850b -c7f8760004444f7c59a9ef00004444ea -c8042000044e600cc6c9e2000044e600 -c80bd67091daa49c72070a009047551b -c80df4000495e83c6f75240000495e81 -c80ed00004aa2b8ce148470000420966 -c810b30003cb8d0ca3ea9e00003cb8d0 -c8151f0003e9a1ccbbf0db00003e9a1c -c815355d98130dccfe3b0a00b0479b3b -c827760004c1829c360429000039346c -c82cae1ff7fb57dc73353300804c79a2 -c83333000480d6dc8bc1d90000480d59 -c8408e0003ed448cbbb50b00003ed448 -c842560003dd317ccbf58f00003dd2e7 -c8440f000421757cbcee0c0000421757 -c84893000433928cb98a270000433928 -c84c539bbf6ebe6c022c250080494e4c -c855d600042d4afc82cc9a000042d488 -c860b70004a80cfca8cdd20000475ab9 -c8642f000481cecca8f774000047f2a1 -c865a30004c5817ce832190000496f76 -c8660f0004c1eb7c13f4a800004ba11d -c8665a6d8d27defcdc71de00804734af -c868a70004be94cca3f96b00004be947 -c869430003fdb3ccbb928a00003fdb3c -c86bd00003fcd79cea76c300003921cb -c86d1f0003e195dcbb8cd200003e195d -c8700a3d2d062d3c0483dc00504a2e04 -c8734a3abd5d007c98590a00804c399c -c8763b00044c21acc6dcf5000044c21a -c876b100043667fcc06a5e000043667f -c87f7eeed96cc60cfd0f74004047e6c2 -c87fbe0004a167bc3c069d00003a6629 -c8876000045b948cbf7820000045b948 -c88eac000405080c02135100003eb1da -c890690004b2f58c08da7400004b2f3d -c890aa0003fd6ddcbbf2c200003fd6dd -c891d4cc1acb218c529dd20000474fa7 -c894ac00048d325ccca5a8000048d276 -c894bd02512a3a6cb591d000604a886f -c89645000480ef6cd3bd7b0000480198 -c899c00004a7746c91abbe00004a7741 -c8a10980aa3f88fce34f3300204936b1 -c8aea50004c58b3c0af4a700004c58ac -c8af01000420c52cc10c6b0000420c52 -c8b433d16290bcac72fab200004c720d -c8b5ae00049fe6ac1efae8000049fe47 -c8b9590003cb9adcbfda7200003cb9ad -c8bdb8a3ccc35dac907f7600f04c00f2 -c8cfac00047a929c141774000047a914 -c8d00d0003ed117c3b6cdb00003ecfdb -c8d3370004863d5c62a22400004861eb -c8d3740004773a8cb486800000470089 -c8d5a8000487d34cb212450000483c00 -c8d661e95200f36c83879100d04ce7c5 -c8d9d9000480d6dc8bc1d90000480d59 -c8db170003d4d61cb6ee9200003d4d5b -c8e3740003927b9c834f21000038758a -c8e3740003927b9cc79b4500003927b9 -c8e40b0003d8ec2cc21a9200003d8ec2 -c8e9d900047aba4c9414d4000047ab9b -c8ea0c000401ecbc1fef5100003e66e9 -c8ec4f317330be8c8cf7d100404c64dc -c8f36000044cc2dcc075e2000044cc2d -c8f48a6a9334967c3903ca0080472f62 -c8f4a300043be42cc12850000043be42 -c8f701000413b55cc7a68a0000413b55 -c8fd86000407353cc733cf0000407353 -c90a7c0003c8301cba7f9900003c8300 -c90db00003db349cbd5c0b00003db349 -c9134e0003efe94cbbebfa00003efe94 -c913d000048153bc58b896000048152f -c91a560004bdeb9c56082800004bd838 -c91e07ff04559c3cc511a3002048da7c -c9200c0003d0f5bcc79e1b00003d0f5b -c9255e404582fd8cb189de0090477bb1 -c92ddb00042a3e7cc7c929000042a3e7 -c9340b0003f5d51c0d7cfe00003f5d48 -c93861d5a9e42eecd9937600404c3dad -c93ae20003c29ddc834f21000038758a -c93ae20003c29ddcc7a0be00003c29dd -c941f1000474654c5065d10000437f6d -c942c7000487d34cb212450000483c00 -c94646000419ca6cbb8fcf0000419ca6 -c94f170003d84f0c9aa27200003d84c0 -c9621b0003d06decc2700c00003d06de -c962af08c8cbb87cbf4b7900504a1115 -c96dc000049e6aecb80fed000049e5f1 -c96fd400048e8a4c1b2451000048d4e6 -c973df0003cba6acbc9a7200003cba6a -c977636a4c6b0eac080a7e00c04b7198 -c985d282511de8ec9e45d7001048fb6c -c9887600044eda3cc7faa2000044eda3 -c988770003d2b80cbf867200003d2b80 -c99d580003d0b85cbf436600003d0b85 -c99fbb0003ec908cc8276200003ec908 -c9a3170003d7e63cc2c67200003d7e63 -c9b0d700041d810cbf15dd000041d810 -c9b14c0004730b8c98875e000047305f -c9b42c0004c7070caf0a45000047d45c -c9b5f4000445135ce2df9200004450fe -c9bea50004c3bcdc2ecab200004c3ba0 -c9c3fa0003e94a0cb9417200003e9484 -c9c9f80003ede21cbfd3fa00003ede21 -c9cb0100041c9b9ca18d90000041c9a8 -c9d80b0003e31a7cc886ec00003e31a7 -c9dddd0004132a0cc25fcf00004132a0 -c9df510003e19bccc87cdb00003e19bc -c9f02000045d3fecc17893000045d3fe -c9fbd000047a9cbce03c2f000047a9c4 -ca04c10a2e9a87dcc2a05600b04a227d -ca05734c1b0bf1acee5e7400604b1894 -ca27dc000434333cc8eb5b0000434333 -ca2b7400047c04bc13f1f10000476ace -ca2d350003d0b3ccefd40c00003d0aca -ca2d8600042184fcbe9e0c000042182a -ca2da3000487673c75b5b9000047fc39 -ca3219000490b4ccb05dcf0000490b47 -ca35d200047f0f0cc0c096000047e18b -ca370963a50bf91cae2ea500a04c1600 -ca39040004aa349c0ddad000004aa26c -ca3d7a000419e81c7649900000419e76 -ca42fbd8a956a34c4abc2c00604c4aac -ca43d000047f7bdc3a5ac3000047f70c -ca462800042560bc3b2586000041c2e3 -ca4cc10003f568acc098a400003f568a -ca4d2d97b2004acc3ce7a400c0472f27 -ca4dd200047c35ccab34470000407087 -ca507c0004a3e4ece3f7be00004a3e46 -ca59eab81008d2fc25db0800f0445f86 -ca5a660004bdeb9c56082800004bd838 -ca5e280003ea080cc0735100003ea080 -ca68fe0003f45cdcc2ebbb00003f45cd -ca727100042ef5bcf7727d000042ef49 -ca7cc10003c8055c308b2600003c1900 -ca81af000433c42cbf0a270000433c42 -ca884fd85d6a929c5aee240060487259 -ca8a1d0004b2f58c08da7400004b2f3d -ca8cbb3dd7d1aafc454b7600904be7a1 -ca8fae00041126fcc0468a000041126f -ca96d50004451e5cc91f9200004451e5 -caa038c1ace42a1c35890a00004c6d96 -caa2ac000410b5acc2b1dd0000410b5a -caaa8b000483aadc7149f10000483a90 -caab7000049b0c0c58460d00004986db -cab05600049b0c0c58460d00004986db -cab085000483aadc7149f10000483a90 -cabecb0004cd103cc326d800004cbcc6 -cacaac000409329c855b6200003e1816 -cacaf1202bac76fcf586c70000489335 -cacf130004366ebcbdf87c00004366eb -cad5dd00046f8f1cc2575d000046f8f1 -cad8707e38b00bbca3cee800b049f7b5 -cae124000490a93cd66f220000490972 -cae17b0004837ebce148470000420966 -cae58f0003d1a30c492f6300003d0d56 -cae630f638aa926cc9ab5500704b3eb1 -cae70500048faf7c4ecf33000048f891 -cae72000046d859c2127f6000043de62 -cae9240004930b6c618a0d0000493050 -cae9e200048c3dac21aa7e0000459dc1 -caea9800046cd49cc23ddd000046cd49 -caf0050004a4eb3c1ae85600004a4ea3 -caf5720003ee387cc40bbb00003ee387 -cb00d7000421757cbcee0c0000421757 -cb07910004ccbb2c0253330000473d81 -cb0a69e0ca7b349c7495d000a04ccaf0 -cb0d40000448b4dc0e98420000448aaf -cb117ac0d3f8548c464c25000049738f -cb1dcf000490b4ccb05dcf0000490b47 -cb264e0004aa572c4ea9c900004aa54e -cb28b60004a88cccb591d000004a886f -cb2c7200042f8b9cc9c3c6000042f8b9 -cb2c82313bcc67fce5fe7e00f04b1093 -cb2e7c0003c957bc6cf09000003c9539 -cb2fa80004a2749c7c09ae00004a272e -cb3095000412294cc044470000412294 -cb365000042ea1ecc0667d000042ea1e -cb39475e4de4d4fc3bffa400c04bd776 -cb39c079432ade7cae9dd800e04cb13a -cb3e7e0004515c1cc2c0a700004515c1 -cb4050cb02bd06ccfb747d00504aa799 -cb414300042182acbfeb01000042182a -cb42d50004450b0cc9e17e00004450b0 -cb447600046479dc5cc8950000464759 -cb4b02fda62698ac009c25007049914d -cb4eac000406931cc41e0c0000406931 -cb573d0003dae99c830a9200003dae61 -cb60348690f4e88c6ce02500c0496ffd -cb614f25610956fc5079c100604bfbad -cb618600041c2ecc3b2586000041c2e3 -cb646b00040157ace7320c000040156e -cb6d380003e3de2cc41d9400003e3de2 -cb75866da9fcea4cd2e8ac00504851f7 -cb809924051ab9bc62036b00c04868c9 -cb8b60000460b97c3101700000460b8f -cb8c27fdf3015d8c6a431200d04c9a20 -cb8c7d0004aa2eace148470000420966 -cb91de000483ae6c7149f10000483a90 -cb934ebe0fcf63fcaed91700504c3c7f -cb95b90004732e8c05ebe50000399de3 -cb95b90004732e8c834f21000038758a -cb982f18b515defcbba61d00604b29fb -cba0c4000485373c0b8fec000048522f -cba6ac000419d6dcca5ec20000419d6d -cbaca0f52ee765ec8dbbcc00704cae64 -cbb5fbfd3febd7bc262b7500504ce5f1 -cbb8b70003dc7a9cc1297200003dc7a9 -cbb9fb0003d0f92cc4948900003d0f92 -cbbfc9316fa60c8cf8492c00804713e7 -cbc8ec0004a7ec7c8855cf000048fea0 -cbceec0003e3491cbaa6ec00003e348b -cbcf214871479aac1073ac0050478feb -cbd1db00042bc33cca6f37000042bc33 -cbd923000464c3dcc2e9e20000464c3d -cbeceb321029769cd53c5c00104b0454 -cbedb9bc54a16c7ce233ed008049fbb4 -cbfc7600045b590cc2c123000045b590 -cc04c15b6402184cbf4b7900404a1115 -cc04fc0003d2163cc4e71700003d2163 -cc0c0d0003ecbeaccac2ec00003ecbea -cc0dab4948e2b83cee96e8002049ad69 -cc0f9c0004676c0ccaa36000004676c0 -cc13990003c29ddc834f21000038758a -cc13990003c29ddcc7a0be00003c29dd -cc295381cc74ceec4d05700050461e38 -cc2c2f00047e6c9cfd0f74000047e6c2 -cc2d720003e2577ccaf40b00003e2577 -cc2f1200041b3c0cb5f17a000041b35f -cc34db0c68da9c1cfbed7b001047cc99 -cc39be000395d0ec834f21000038758a -cc39be000395d0ecd33d18000038bd74 -cc39c00004a119bca2e55100004a1120 -cc3d4ddd8db579bc9ad333006049226e -cc43910004ccbb2c0253330000473d81 -cc45b9000473e79cea5e450000473b41 -cc487c000434450ccb2eb10000434450 -cc4a7e00044bfb0ccac23b000044bfb0 -cc4b2cecbfa2b2bc3101a300304beb57 -cc4d380003e5e4ecc4d3fa00003e5e4e -cc50db0003ee36accafd7200003ee36a -cc53660003d1329c9facc000003d12d9 -cc5a8d77548fc7cc0e6d6b00204c46de -cc5b4e0003ede21cbfd3fa00003ede21 -cc5d4dca694b27acb32ceb00c0486f93 -cc6332b08f21acfcdcd25300704c471c -cc65900003ff28fcf2919000003ff200 -cc688500047eaebc4560b5000047ea75 -cc6a8000041d810cbf15dd000041d810 -cc6eb659cd72d51cbedc3d00c04ca9d0 -cc70020003b0db5c3d1162000039c575 -cc71cb49372a831ca2f28800504cccdf -cc759969e08ef18c2bb4d400904849ae -cc7d3c0004cb7dfc41870e00004cb7d3 -cc800d0003df368cf1589800003d4bb0 -cc836b0003d2b80cbf867200003d2b80 -cc84410003c957bc6cf09000003c9539 -cc84410003c957bc834f21000038758a -cc91e200048dc36c524de2000048dbfb -cc95ef000444d4fcc45a5f0000444d4f -cc99510bcf114b0c1ae85600504a4ea3 -cc9d4ffeba8257dc43af220090499544 -cc9dbdabea3240cc2b781300904bb625 -cc9ee000043667fcc1c3dc000043667f -cca1650446e1aabcfff93500d04cdbfa -cca2720003cb822c6a7b3900003cb819 -ccb0ac00048dc36c524de2000048dbfb -ccb78bfe4d11216c6b3d4a00504aa5f7 -ccb8c10003f36eccc2fca400003f36ec -ccbe46000420c52cc277120000420c52 -ccc719000485757cbcbf460000485753 -ccc9e30004cd106cc326d800004cbcc6 -cccc8d0003d8ec2cc3617500003d8ec2 -ccd20c000419e81c7649900000419e76 -ccd210eb7c2f8c1c9073dc00804a33be -ccd2c3000476e0ac0427b00000444d2b -ccd71ea187e5fbcc39893300004c47e8 -cce74600048779ec193346000048746a -cce88e0003ea080cc0735100003ea080 -cce9310003cbe13ccb5e9e00003cbe13 -cceae20003c8301c834f21000038758a -cceae20003c8301cba7f9900003c8300 -ccf2240003d84c6cff2f1700003d8498 -ccf332fbda31b79c91c3d100904be9cd -ccf5b40003d0b85cbf436600003d0b85 -ccf733000476e0ac0427b00000444d2b -ccfb1200040c501c5f95dd000040c4f1 -ccfbb20003d02d0ccb84c000003d02d0 -ccfc470003fcb6ccea76c300003921cb -ccff170003d333bccb9a5600003d333b -cd01720003f427cccbb8fe00003f427c -cd02070003f568acc098a400003f568a -cd085c89711d9cecc001e300604ce44d -cd0b1200041ee89ccb797a000041ee89 -cd11350004cde38c4f6c8200004cc801 -cd129d00043be42cc27dc5000043be42 -cd1c0f00041126fcc0468a000041126f -cd200b0003f75d4c20a90b00003f75c7 -cd203eee5373478c568f7600904c51c1 -cd22e80003cb9adcc24c9800003cb9ad -cd37f6a956eb7f9ceecc0f00c0490838 -cd38b500047e6c9cfd0f74000047e6c2 -cd3c1470cec1330c315164004049824e -cd41c553391448cc6adbd0008047cd68 -cd42090003d1329c9facc000003d12d9 -cd479164668d33dc9e04d400c048020a -cd47a0000433c42cbf0a270000433c42 -cd4bbb0003f361bcd1759400003f3618 -cd4f5c0004aa572c4ea9c900004aa54e -cd4fc142945115dcb9f30500e049098f -cd59093fc9d24b8c33f8a700f04c31fa -cd5cf50004519c7c7a3de200004519bd -cd67600003d7e63cc42b5900003d7e63 -cd6760000464e47cc498200000464e47 -cd6c760004c50eccecbfd100004c50e6 -cd71170003d850bcc68eaa00003d850b -cd732930d74c336ce4207c009049b0bd -cd792900042d04ecc5d1db000042d04e -cd7da300048ac75cabab37000048ac68 -cd7e5db916ea558ce713ac00c047d4f4 -cd82560003dd2e7ccbf58f00003dd2e7 -cd8e4c7486f09ddc0af4a700604c58ac -cd91940004a8a46c7415d000004a8a08 -cd950270c2cf193c754ba800e049129c -cd96300003d06decc3dc8900003d06de -cda2e80004a2f19c360429000039346c -cda2e80004a2f19c834f21000038758a -cda4c400048ca41cba45e2000044c627 -cdaa7e7812fcd67c9ecfbe00304a06bb -cdace53cf2916decf1bb220080498eba -cdadc00004a5384cb486800000470089 -cdba4e6b4bd74b1c697ae800c04c2250 -cdc4b30004366ebcbdf87c00004366eb -cdd17000045cbb2cf0f67e000045cbab -cdd213000412294cc044470000412294 -cdd76000044c695cba45e2000044c627 -cdd9de000479f15c5c8fac0000479ef7 -cdd9de000483ae6c7149f10000483a90 -cddd9000040157ace7320c000040156e -cde34c2bc2757a4c7fc53300b04c5af4 -cded7abc7e5e218ce7257b00f0478771 -cdedf700042ea1ecc0667d000042ea1e -cdf5a100047a929c141774000047a914 -cdf5d200047a627cb7e885000047a624 -cdfb6000045fd8accc90f5000045fd8a -cdff803d9f848b9cb351d20030473e7e -ce00f794514e4c4cd7efb300d04ccadb -ce02bc00043f9d4cee535b000043f8c9 -ce0b0a0004132a0cc3a55200004132a0 -ce17390003b0db5c3d1162000039c575 -ce17390003b0db5c834f21000038758a -ce191400042182acbfeb01000042182a -ce196345577f12ccf86fa800904a3ec3 -ce1a6282223f8a4cc6e31400804c9ace -ce292900042862bcccd27e000042862b -ce2c770003dc7a9cc1297200003dc7a9 -ce3820000453262ccca4760000453262 -ce4d700004585c8ca6bbbf00004585c2 -ce4d9600047d80dcaf69d2000047d7f4 -ce5bbf00045b16dccd1570000045b16d -ce613888f6958b7c91c3d100e04be9cd -ce6170000457decccb5e9e00003cbe13 -ce67835eb7e7f4ec4cc55b0060472660 -ce6aa50004c6df9ce832190000496f76 -ce736c000454debcf69fbf0000454de5 -ce79fb0003d0f5bcc79e1b00003d0f5b -ce7ae80004a3f9bc3e06190000490bf4 -ce7fca8d278cd2fc6d5a5600204b413c -ce8090000470a18ccd3a020000470a18 -ce89e200045be51ccd2fbf000045be51 -ce8cc200048faf7c4ecf33000048f891 -ce8cfe0004430eecfb9b5300004430e8 -ce8e8a00041e357c62b301000041e352 -ce8fbe01ec1eb3dc76c55d0070472e89 -ce909500046ae57ccd2c93000046ae57 -ce9b580003f45cdcc46aec00003f45cd -cea97a0004006d2c834f21000038758a -cea97a0004006d2cd33d18000038bd74 -ceb5cf00048f4fec9720c2000048f4f5 -ceb67500043403ec4b0f6600003d01b7 -cec57379a11d2a8c95e0ac001048c415 -cedd0b3a7fabbb9c90082800704afca0 -cee05c000484cd8c425d980000484cd5 -cee25e00043bce8c430579000043bce6 -cee7fa0003f6171cecfd3800003e9715 -ceef580003ee387cc54c0d00003ee387 -cef280000410b5acc4317a0000410b5a -cef6740004b03abce8167b00004b0270 -cef679a5afb21a6c917dde008047b62f -cef8fe8c18f204bcdab7d100e04c6f57 -ceffdc00043c95dc3997dc000043c95a -cf047600046b75bcc69123000046b75b -cf12e80004a2f19c360429000039346c -cf19a500043667fcc1c3dc000043667f -cf1ac0a8365edd8cc9bba80070497d0a -cf1f60000458a03ccd98f50000458a03 -cf2c0c0003f36eccc2fca400003f36ec -cf2d2a566a71c27c2b3ec3001047c197 -cf2dd700048f4fec9720c2000048f4f5 -cf2f6b00048d260ca559a1000047663a -cf32390004bb727ce9f27200003cbbf8 -cf3b0a000420c52cc277120000420c52 -cf3d7a000419df3ccde3010000419df3 -cf441591eb333a8cebc56b00b04c40f4 -cf4bf979aa5d5dbc3bf7a800604975c0 -cf4e80000406931cc547010000406931 -cf50880003d8ec2cc3617500003d8ec2 -cf50b500047565cc72070a000047551b -cf531c0e30558fbc56690a00c04c22f2 -cf612b00049f02dc7745ae000049f017 -cf654e5dc6826c4cf507a400704adde2 -cf6baed21dd678dcc8837400f0484745 -cf6cb60004a94bfc80d13c00004a94ba -cf6d060003e3de2cc580fe00003e3de2 -cf6ea50004c7086cd9fca100004c703c -cf785bba9744eafc72937600004be8fa -cf7c9bd59f3a638c3d6aab00d04bae0c -cf8152000413b55cc7a68a0000413b55 -cf83b0a01b67db1c54592a00804ca5ed -cf8573000444c41cce24a70000444c41 -cf86110c8ed12d8c762eb200e04c5bab -cf8924000495b93c19bdac000038e0dc -cf89e2000453a24cce3fbf0000453a24 -cf8b5d0004705d4c88c30900004705b0 -cf94fe0003e31a7cc886ec00003e31a7 -cf9d940003ec908cc8276200003ec908 -cf9ed052ed36cb3c1a4485001047fa4c -cfa12300045c20bc79a420000045bbc4 -cfa19f0003cb822c6a7b3900003cb819 -cfa1dd00046d802c4d828a00004014de -cfa706ca8218c51c53b0c200a0498071 -cfa8130004bca6fcb0f67b00004bc82f -cfaa8a000407353cc733cf0000407353 -cfad5100049f02dc7745ae000049f017 -cfad7a00040c501c5f95dd000040c4f1 -cfb20500043be42cc27dc5000043be42 -cfb4a40003e19bccc87cdb00003e19bc -cfbd59fc0a1d1bcc3c1c2800304b0ab4 -cfc0140003b86c2c3ea7e50000395bd7 -cfc6851fc18e2a7cc152ce00f04c81a3 -cfcc7600044c68fcba45e2000044c627 -cfd3e50003b13c1c7288fe00003b0b6d -cfd7b20003d0e15c1145fb00003d0df4 -cfdb22000495b93c19bdac000038e0dc -cfe1ee7337d374dcc0bbd100b04c31e1 -cfe4ea00044c21acc6dcf5000044c21a -cff0530003cb9adcc24c9800003cb9ad -cff45c0004b4142c6d5a5600004b413c -cff5f5d224337f3c03f322001049977f -cff7620003e19d6cba3ffa00003e195d -cff88d0003d7e63cc42b5900003d7e63 -cff9b50003d0f92cc6080f00003d0f92 -cffb2dd29e5f0b1c4d6ea500504be3a0 -cffd310003cbc88c4f415900003cbc4b -d004a700044adb7d60f420000044adb1 -d006dec16f3f870d907f7600b04c00f2 -d008fe0003e5598daccfbb00003e5555 -d015f100047c50ad6c1dd9000047c461 -d018b70003d2163dc6425600003d2163 -d01cf44f534aeedda5f7a800204a0f4a -d01fd849ebc18afd5c4c7d00d04ab354 -d02029d1091be43d315ac70010488f0a -d024f80003d8824d3ffb3d00003d8803 -d02a891bf853ae7d6fef7600f04c2c24 -d02b5bae8cdbc53dd58c51001048720f -d02fb500042a3e7dc7c929000042a3e7 -d04035a6cf601d2d95111700c04c56c8 -d041170003dc231df2a76200003dc222 -d0498f0003d9fdddcf109800003d9fdd -d04c32223eaef62d2fade200a048d7cd -d05e2958a41ec1bdece59800a04bb4ae -d066740004bbdfedc2866600004bbde4 -d06fa40004ae2ebdcad19400004aaa26 -d0731f75eec1822d2cb77900d04a4abb -d078a90003d06dedc3dc8900003d06de -d0841215a9949afdfbb01300a04adad4 -d087390003cb822d6a7b3900003cb819 -d08ab78a3fe9516dac619600d047e0a9 -d08c0db5a541af1dbfb2db00f04993e5 -d08c2bf79c7daa8decc93300504c1ea1 -d095330004c34abde5fe7e00004b1093 -d09b910b5e7ff72ddff06900004b2013 -d09e160004132a0dc3a55200004132a0 -d0a2db00048e5d6d80d282000048e068 -d0b5eec0be8bf52d03b5a8009048e18a -d0c7c8b9be8bc4fd9d924500e047648f -d0cdc500043bb89dcef850000043bb89 -d0ce09797ff2943d6caaa100804bc4a5 -d0ce7e00044f27cdcf5876000044f27c -d0d8720004a1880d09247200004a1805 -d0d896000482505d4701b90000482450 -d0de7e000423b52df428680000423b4f -d0e3da232f96551d6449de005047d12c -d0e5aff6a25dc4ed90a59800104b1595 -d0e6e80004a1880d09247200004a1805 -d0e86b00040a12ddfd8190000040a0ff -d0e95100049a5ead672b8c000049a3f4 -d0ea04638747e43d1d2a7300e04cb575 -d0f063000391febd834f21000038758a -d0f063000391febdcf8e460000391feb -d0f2c30004824e6d62e7d000004812ea -d0f4919a12cab3fd0f9ebc00704c602e -d0f6fe00039ae10d05baa3000039ae0d -d0f6fe00039ae10d834f21000038758a -d1040f00048f8f5de3730a000048407f -d105060003e5e4edc66cfe00003e5e4e -d1054942b32fd1ddfdd0b500f04827c7 -d108330004a7c43d43b47c00004a4887 -d10b37000487635d75b5b9000047fc39 -d10d2300044eda3dc7faa2000044eda3 -d11124000498768dea76c300003921cb -d114470003fc3b6d66b20c00003fc3b2 -d115441a874a81cd5b05b900b047a9ae -d119c7e03bc4713d4dff6d00f049fda4 -d11a920003d849bdff2f1700003d8498 -d11b0100040c501d5f95dd000040c4f1 -d11dbfce158647ed85f4bc00204c8850 -d12485000473585d28f82f000047356c -d1282000045d8c7dd570a7000045d456 -d12fac00047e191dc0c096000047e18b -d132ec0003dd6bbd741d8f00003dd6b8 -d1352c8700cbebcd2b635e0070472ad5 -d1361900048e5dfd18bb86000048cf34 -d136711332d6ffbdde3b7400f047586e -d1388e0003f45cddc46aec00003f45cd -d139fa9cd509daede009a30040488586 -d13c0e7f7688437da239a800504891f1 -d14170000454debdf69fbf0000454de5 -d143e9fc5b621ecdc377d0006047e15b -d14701199eb2344d2fe5f100e047fa3f -d1489500044944bdcfd476000044944b -d152db000490d27d80d282000048e068 -d152e3325674135df603ac005047643d -d1649300046c61fd9ccc93000046aea9 -d167cfce9a639f5dd141e200704b3fde -d168c10003ee387dc54c0d00003ee387 -d16a7e0004bbd83db1068700003b603a -d16f590003d3bcdd774b6b00003d390f -d1705237d1a5fd4d28aee800f04c2d9a -d170710003d850bdc7ea4800003d850b -d1744197567442edb491ab00504c6432 -d1775c0004ab2fdd26c6e800004a7549 -d17cbf00048e2ffd8f24ac000048e0c6 -d181b2dc44b66c5d5af5d9005047e2f1 -d18504a7a7a04f2dfc09c100604c80f7 -d18647a922f439ad55a02500804920d4 -d18b0a00047af2fdb9b0b300003cbae5 -d19593af4a9fe8adb6542f00704824db -d198c10003c805cd308b2600003c1900 -d19bb6000410b5adc4317a0000410b5a -d1a1d2000483360dc5ac850000483327 -d1a2bea4c2e2895d927c2c00604c513f -d1a3a000042d04edc72fea000042d04e -d1a8470004098c2d5d495200004098ba -d1ad8f0003d32c9dd04b6b00003d32c9 -d1b0e1a00cb86b1d6ea4b5001048094a -d1b7620003e2577dcaf40b00003e2577 -d1b97900048e2ffd8f24ac000048e0c6 -d1cc95000406931dc547010000406931 -d1d1830b8985ba9d00028b00904753a8 -d1d2ad14b4abdb2db3c6c30050499444 -d1e2dda8b36023bd5440d40020473dec -d1f62c00048f4a2dcca5a8000048d276 -d1faad0148e3866d2522d800504c8746 -d20a430003e3de2dc580fe00003e3de2 -d20d86000419d6ddca5ec20000419d6d -d215bd0004451e5dc91f9200004451e5 -d21733000498749da7c5640000498713 -d21b08000445e06dd0df530000445e06 -d21bd3a865e9776d8b99c700e04b2ff0 -d21d578b3b67b32d7a521900c0493ea5 -d21de200045a866d4119e30000448a71 -d225170003d8824d3ffb3d00003d8803 -d228d20003ee36adcafd7200003ee36a -d22adf00042f8b9dc9c3c6000042f8b9 -d22fed0004a16bbd47e856000049a4ea -d2317b00047e387dbafbac000047e383 -d235720003ecbeadcac2ec00003ecbea -d23a920003d5e38dd0eeaa00003d5e38 -d23c30c7fbd6dbbd6d99d2000047e172 -d23c7d0004aa572d4ea9c900004aa54e -d23ff500048726dd0de76b000048726b -d247590003d7b0cdd0c80b00003d7b0c -d24ba80004990afd6405d7000049516e -d251c15f43a53dbd228b7600104c5d5f -d257121a701eba5db351d200d0473e7e -d25990000409a76d6b21520000409a68 -d259f40004450b0dc9e17e00004450b0 -d25b9b94de77e19d18b77500904cced9 -d25dde456412d07daf69d2000047d7f4 -d26fa60003cbe13dcb5e9e00003cbe13 -d2731900048d0d7d3c069d00003a6629 -d2731900048d0d7d834f21000038758a -d2767e000423e8bdd104010000423e8b -d2780f00048f8f5de3730a000048407f -d27d5faf9775be2d3154c2002048f26f -d283750003d0f5bdc9200c00003d0f5b -d2873d0003dc591d69118f00003dc0f6 -d2927c00046faccdd15a98000046facc -d2967c0004712e8db486800000470089 -d2a50bf78f516f5ddfdb7900f04a3b09 -d2a6480003d413cd2368f800003d4139 -d2a75b0004340e7d7ddf5b00004340e3 -d2a86a69b8cc716d6f0645002047c4c2 -d2a9640004964f3d0aa62400004964f1 -d2ab7b4d78bb0e2d6d8896002047a688 -d2b6c300047e6c9dfd0f74000047e6c2 -d2b8db0003f3618dd1759400003f3618 -d2ba8f0003fa3f8da3306b00003fa3e6 -d2bfc7d314cef60ded43d100e04c3925 -d2c67c0004714b8de148470000420966 -d2d0951e2b1b781d80d13c00804a94ba -d2d44032fa8e763d7196a100f04b9dc2 -d2d6090003d02d0dcb84c000003d02d0 -d2d80200042bc33dca6f37000042bc33 -d2d8a90003d0f92dc6080f00003d0f92 -d2d8dd89705e295dc025d90030475912 -d2ddb17a90feb73d4119e300f0448a71 -d2df0e15140f678d806805004049de71 -d2e76b0003d2163dc6425600003d2163 -d2e7bffd927c7ccdadda6600404accab -d2eb2675e450b11d975c5100f0489b26 -d2f5d1000434450dcb2eb10000434450 -d2f6ec0003d209fd4b0f6600003d01b7 -d2f9720003dd2e7dcbf58f00003dd2e7 -d3000200042aa82d58109a000042aa0d -d303b00004444f7d59a9ef00004444ea -d309940003f75f5dd194db00003f75f5 -d30fbb0003f427cdcbb8fe00003f427c -d310314eca05d76d255a7400204b063a -d314980003cbc0fd27619f00003cbc05 -d318235fad3bc59d7b51e200c0488f64 -d31bdd202b94cf2d69e24e005048504f -d31e5b8784d16cbd14aa7e00004b7c98 -d320ca00048b46ada92337000048b428 -d32485000476e0ad0427b00000444d2b -d32a970003d008dd35d53500003d0069 -d32b8a911183f75dadedd7005049fbbd -d32e3f581c3ed9dd40c52400a04987f3 -d33047000409b7bdd1daac0000409b7b -d330c2000498833d40c52400004987f3 -d33fd43cd383352df2320d007049979b -d340a70004519c7d7a3de200004519bd -d359db000429c15d6f4da50000425f87 -d359f4000490b4cdb05dcf0000490b47 -d35c4e9251b8863d586e45008047c1d9 -d35f620003ed447dd22aec00003ed447 -d364b52da564305d9ab26600704b14db -d36ae0a20f3ed59d0b1e4500f047402d -d36e84950af370cde7d1de007047a8dd -d3718f0003d8824d3ffb3d00003d8803 -d3723947816ae96db739d20030478593 -d372ac000406d4cdd1bac20000406d4c -d37b6c00044bfb0dcac23b000044bfb0 -d381a40004a8a46d7415d000004a8a08 -d3836b00048c814d5aee240000487259 -d3867b0004ac3bdddf6ace00004ac099 -d38a2c00048f4a2dcca5a8000048d276 -d390a6d585eec9ad18bc1300404b8919 -d393580003e31a7dc9d80b00003e31a7 -d3a0fe0003e0c45dd257fa00003e0c45 -d3a1cf9b103bbd7d4f033700c048855a -d3a60f0004c606ad1602ce00004c6057 -d3a72e357609ff0dc2cda400b04aa129 -d3a9910003caadbde6905400003caada -d3b24e00048d260da559a1000047663a -d3b34450b03da50d232e7400004b9ae5 -d3b43602b16ab16d776333003047c306 -d3bc0c0003ec908dc99fbb00003ec908 -d3bfb500042a456d939228000042a44d -d3c2580003d1067d410f6300003d1033 -d3c61395048c23dd82bf2500a04974f6 -d3c8a55fa7a2de4d6079ae003049d1b4 -d3c91f0003e5e4edc66cfe00003e5e4e -d3cc930004676c0dcaa36000004676c0 -d3cda30004c802add115c100004c53dd -d3d44700041ee89dcb797a000041ee89 -d3dd43000413b55dc8f7010000413b55 -d3e0c10003e19bcdc9df5100003e19bc -d3f3c92bff1dfadd56cfac00f047ce7f -d3f8c26a1b348bcddaeee800504bee3f -d3fb5b00043cdb5dd27a63000043cdb5 -d400d40004821b8d6f928b00004821af -d402bb179b19fcddeb6885007047cf8d -d4045600049a4c1df4638c000049a4ac -d408250004984eddd36820000045f8dc -d40c0100042a456d939228000042a44d -d40e515a8e6ccf7d7b156b00004c4178 -d4153561d52bc52dbe085600d04a0d80 -d417b5000423b52df428680000423b4f -d41aba000401443d9962aa00003d85b9 -d41c65727add77fd2a986900b04b3824 -d422390003d850bdc7ea4800003d850b -d4288600042d04edc72fea000042d04e -d4298f0003d9466de67aaa00003d8d7f -d42ac200040c5a3d5f95dd000040c4f1 -d42d6f0004a9209d10dd24000048f4df -d435a100047b540d21aa7e0000459dc1 -d439a100047cba2db1068700003b603a -d43d170003d59c2dd2e27200003d59c2 -d43d2c000471372d1d5b510000471370 -d43d4a0004a9209d10dd24000048f4df -d43f0a000407353dc8fd860000407353 -d445720003e9243dd2f36200003e9243 -d4460c00040f21ad82c20c000040d4eb -d4470100041a262dd2f190000041a262 -d44a348262c5e8dd7a7ba800304905d2 -d4516c5fe9737b5d0b40a800104b92d8 -d45cd20003f2cddd7f7c0d00003ef422 -d45e2fe749cef3bd060124008048ea11 -d46152000403115d554ac2000040310e -d467d38e6b9f7b3d5b3aab00804b5cab -d476c3000391febd834f21000038758a -d476c3000391febdcf8e460000391feb -d476e80004a1b79db1068700003b603a -d47dcf00042a3e7dc92ddb000042a3e7 -d4852400048ebe7d2019cf000048ebce -d4889900038bd74d834f21000038758a -d4889900038bd74dd33d18000038bd74 -d48e5c479a776ffde832190030496f76 -d48f8c00049a4c1df4638c000049a4ac -d490fe0003e2aaadf29eec00003e2aa1 -d491ce0004c2286d1643e9de0688aaa4 -d49624563344fd0d9414d4000047ab9b -d496e06905784fadc973ac009047cd26 -d49b4a8f8fc6bdbdb6b59600f0481945 -d49bcff3e37ebf3dd1e1b90010473c21 -d49bd2e84c9a566d5beb0a009047b4c5 -d4a1cf000496b70dea2e240000496b69 -d4a535000473296d1e7f31000047328d -d4a8d4000480d6dd8bc1d90000480d59 -d4abbf0004565f6dd3517000004565f6 -d4b337000488ddedee535b000043f8c9 -d4b48500047e587d910dd2000047dfb9 -d4b82015100bc5adebd9d900e04791b1 -d4be480003d39bcdd33b3f00003d39bc -d4beac00040652dd8421dd000040652b -d4c0d20003f3669dd353fa00003f3669 -d4c0da8d5864dcada2037900104a73a0 -d4c3752eb868765d39a1de0030473db1 -d4c468000434333dca27dc0000434333 -d4c5e2000485324d6caa4e00004850f7 -d4c86f00048ebe7d2019cf000048ebce -d4cb6be3958c909d8fe3330030493fe3 -d4cd46000438b71dd36be00000438b71 -d4cea8e908511b0d7736e800b04c1854 -d4d9d600042862bdccd27e000042862b -d4dd2300045fd8adcc90f5000045fd8a -d4df2cf8727ddbcd1f32db0070491530 -d4e8a80004bbdfedc2866600004bbde4 -d4eb4600048652ad03f0d200003e555b -d4eee80004c2f8ade832190000496f76 -d4f7cf000403115d554ac2000040310e -d4fa7d34bf45bc4d66b5c700804acd19 -d4fc660003d0f5bdc9200c00003d0f5b -d4fec300047f0ebdc0c096000047e18b -d500b70004aabaddd7cdd000004aaba5 -d5085000043bce8d430579000043bce6 -d509940003e46bed47fb6200003e40b5 -d50a980004713edd76a45600004713e8 -d511e200044e6bfd5acc76000044e6ae -d5127a04d643cc2d060124002048ea11 -d514dc0004725a2d4b0f6600003d01b7 -d5193ce2da15e8bd86f42f0030473db7 -d51bde9589cc59dd8343ed00104a1622 -d51dd2b6250762bdb81a4500b04caf85 -d5217000045f8dcdd36820000045f8dc -d523170003d13a9d462a0900003d121f -d525e200045d8c7dd570a7000045d456 -d5279dd8261ba2bd3c069d00503a6629 -d5279dd8261ba2bd834f21005038758a -d537fa0003dd6bbd741d8f00003dd6b8 -d53a4e00048d7afdcdcde2000048d7a0 -d540c200048f8ecde3730a000048407f -d54b80dde5c055fd54793300604beeb4 -d54d51f28004c8ade009a300b0488586 -d556d8632be4c34d5af5d900b047e2f1 -d55ca7000466ed9dd4179c0000466ed9 -d55d590003cf8edd53d82000003cf8ab -d55d72dd0c6b78fd2cb77900704a4abb -d55e480003d9fdddcf109800003d9fdd -d56337000423b52df428680000423b4f -d569750003d1f9fdd3db3d00003d1f9f -d570ea00046ae5fdcd2c93000046ae57 -d57f8924bea91a5dfc353c00e04cbb89 -d5899600047e859d7ba1f1000047d739 -d58d380003ee9b1d307d7200003ee9b0 -d58ffe7ed3a9b55d78b83d00404c87b8 -d592ae926462d18d29b5d7004049a828 -d5a1ab0004c5927deda5a300004c591f -d5aa9d00039c539d834f21000038758a -d5aa9d00039c539dd42531000039c539 -d5b30a00047fbb1d2fe5f1000047fa3f -d5b475000436652d15a67e0000433f4d -d5b8a700045be51dcd2fbf000045be51 -d5baac000419df3dcde3010000419df3 -d5bb507de244712d5f237600c04c9d44 -d5bd5236148bda9df8c3d000f047a6c4 -d5c02f000470a18dcd3a020000470a18 -d5c0c00003d116dd3f1e0900003d1167 -d5c4980003ce7feda5ee2000003ce7fb -d5c8200004696bcdd4447600004696bc -d5cb60000453d19dd444f50000453d19 -d5d10d79b8ca195db4868000b0470089 -d5d12925910f48cddc71de00d04734af -d5d2b01a28a26a9de4486900304bd985 -d5d5ae00049b0c0d58460d00004986db -d5ddc148d45d075d1b03d4001048e88d -d5df821ec3125b5dba9e3900404b9971 -d5e2a8a6c1c1de4d28543300204a184e -d5e2cb0004cc522d057b3300004cc4f7 -d5f7f6ec0e2d922d8bc1d900d0480d59 -d5f8060b6798a66d341b1200304cb22e -d5f8690004ba205d1a021d00004b9eb6 -d5fb632fa5469dfd4560b5007047ea75 -d5fc2000046ae57dcd2c93000046ae57 -d5fe2c5335cf0addd2c78600e048c057 -d6024e00048d074dc578ac000048cfda -d6047efb1bfc573d9109cf001048f640 -d607b20003d0d48da639e600003cf5b0 -d60e6ac4b6841b8df3763900304b0783 -d618280004ae38adea76c300003921cb -d6182f000471d26dd4a3200000471d26 -d61d52843972a58dc883740090484745 -d61dc5000436878dd4dbdc0000436878 -d61dd9000483f15d5544d40000483f12 -d61dfc0f8ea88f9d46e96b00304becd1 -d62708000445b03dd4c8fe0000445b03 -d629a01fe8193d2d2cf20f00504c4d37 -d62b60000453262dcca4760000453262 -d62bd1fb9ee4231daf69d2001047d7f4 -d632720003d9ef4df8c49800003d9ee4 -d63633e47256fded2fb65600b04b9bf2 -d63b0a000419d6ddcba6ac0000419d6d -d63c2d458c5f091d88f00b00403d1a35 -d63e740004bca05d87863900004bc9f9 -d63f1400450da66de2821700e04caa3d -d63f37000488ddedee535b000043f8c9 -d64129e333e79f8d93de0f00704c4ca9 -d643580003ecbeadcc0c0d00003ecbea -d647d10004c6df9de832190000496f76 -d64f2175f85f82ad754ba8008049129c -d6524500047c119da639e600003cf5b0 -d656280003e19bcdc9df5100003e19bc -d656e90003ec908dc99fbb00003ec908 -d659060003ee36adcc50db00003ee36a -d65d590003ce7feda5ee2000003ce7fb -d663dd5b3b1a02bd63e1b900c04736f3 -d6662054d436f25d6ddaa500e04c435c -d6676d0004a1880d09247200004a1805 -d668a10004c3bcdd2ecab200004c3ba0 -d66d7b3aa6e5212dc12dbe00f04ccdd8 -d670d20003ee469d3b6cdb00003ecfdb -d675b900047a914da0e733000047a8a4 -d67ba10003e31a7dc9d80b00003e31a7 -d67c540003caadbde6905400003caada -d68242f6e6680d5debc56b00604c40f4 -d68651cf2766a33d41870e00b04cb7d3 -d6887f200df2485da80bac00704ca549 -d68ba10003e2577dcc2d7200003e2577 -d68dab0004c53a5d92901300004bbe59 -d68ece0004b9cdbda775c700004b411e -d69114000413b55dc8f7010000413b55 -d69657de67b2047d8cda50007048f552 -d69c9500046b45bddf6876000046b3d5 -d69d7000046c97adf560a7000046b995 -d6a1d200047af2fdb9b0b300003cbae5 -d6a76900048e2ffd8f24ac000048e0c6 -d6b2ab0004b9af8d232e7400004b9ae5 -d6b7760004c4402d931fbe000049c96d -d6b8c8e5953fcdcddf4dc0008049d247 -d6bdde000483fb7dd25d9600004802d4 -d6be480003d9ef4df8c49800003d9ee4 -d6c28b000483f15d5544d40000483f12 -d6c860a9a611e1ddf7507d00904a8eae -d6c9040004a9df6d49b8ae00004a9de3 -d6d4526e6fa57f7d18b77500704cced9 -d6db7000042f8b9dcb2c72000042f8b9 -d6dbcf3f9460a98d70d9f100d0480249 -d6dbf627cd4940bd2951d900504764c8 -d6de2400048b46ada92337000048b428 -d6e8a80004ac3a1ddf6ace00004ac099 -d6e8ea00045d456dd570a7000045d456 -d6e9fd626ef92bfd8c3e3900e04b31d2 -d6eadb730c5dcbfda556450000474cff -d6f0760004551a4dd5b36c00004551a4 -d6f0790003dd2e7dcd825600003dd2e7 -d6f616000407353dc8fd860000407353 -d6f9f70003d333bdccff1700003d333b -d6fa560004bd240d5e32ab00004bd23c -d6fc660003d02d0dccfbb200003d02d0 -d6fd6a11b1681c1d33a9e200b048b81a -d7000f00048f4fed9720c2000048f4f5 -d704c6ced848345de325de005047d295 -d70bb0a6905e107d06be7e00504b9683 -d71cd20003ee3f0d3b6cdb00003ecfdb -d71e8a0003fc3b6d66b20c00003fc3b2 -d722af919335785d61d59400504ab0de -d7255de2de0beadd697ae800304c2250 -d72b620003ddf6fd3ea7e50000395bd7 -d72f5100046d517dc45a80000046d510 -d7308e0003f427cdcd017200003f427c -d73282000434450dcc487c0000434450 -d7347b6be68a1ebd29c7220050496a49 -d7365408d5499a6d78310a00804c4cdb -d739d600042c260d7bf801000042c22d -d73c0100042c260d7bf801000042c22d -d73cb30003cf8edd53d82000003cf8ab -d73e3cf689fed03da229d800004ca072 -d7430729749ea5cd7f2e7b00704bd35a -d7481f000434333dca27dc0000434333 -d7498600038bd74d834f21000038758a -d7498600038bd74dd33d18000038bd74 -d750fb00042a3e7dc92ddb000042a3e7 -d754280004b3218d8c3e3900004b31d2 -d75a740004bb580d982b9c000045d4a7 -d75c31caab72869d5079c100804bfbad -d76b4f0003caadbde6905400003caada -d7707c0004a106fdf1efed00004a105b -d771bd00044329fdd615f4000044329f -d776560004bb580d982b9c000045d4a7 -d776ba0004098c2d5d495200004098ba -d7770a00047fbb1d2fe5f1000047fa3f -d777df0003cbe13dcce93100003cbe13 -d77ac7000485beade154c40000485be5 -d77bd10004c3fd6dd834a700004c3f12 -d780a80004b3218d8c3e3900004b31d2 -d7827e00046a077ded5570000046a02d -d7846b000405080d02135100003eb1da -d786b20004c4085d96393100003cf5c7 -d786e92437e86d3d3dd27400904ac9b6 -d78b590003d32c9dd04b6b00003d32c9 -d78e720003d8bcbdd5af3d00003d8bcb -d797f60aaf5b677d806805005049de71 -d7a42f000477d2ad13f1f10000476ace -d7a5310003958b4d834f21000038758a -d7a5310003958b4db485f20000395839 -d7a5c900045a16bd5ae360000045a12f -d7a9dd00041e357d62b301000041e352 -d7aceb00048b485da92337000048b428 -d7aee605c7d8c42da44f8c00004aba7f -d7b152000419377d32b7120000418e3e -d7b19f0003cbc0fd27619f00003cbc05 -d7b2720003d5e38dd0eeaa00003d5e38 -d7b6a300042bc33dcbd1db000042bc33 -d7b8a300043dd35d00f35b000043dd21 -d7bb760004c4085d96393100003cf5c7 -d7c8690004bdeb9d56082800004bd838 -d7ce54ce6ef2b0dd16470e00c04ca50d -d7d0adb1b0d6898d0b40a800804b92d8 -d7d2480003d7b0cdd0c80b00003d7b0c -d7da580003d0e15d1145fb00003d0df4 -d7e2f7b14c13659dbc2eb200804c19ee -d7eca80004bd099db6f2ce00004bd096 -d7edd05a691c59cd09e48500a047a763 -d7f00d0003f05a1d02135100003eb1da -d7f10b0003e48b7d47fb6200003e40b5 -d7f2200003cfbe2d834f21000038758a -d7f2200003cfbe2dea76c300003921cb -d7f27e000454a8bd038c760000454a70 -d7f4a300043bb89dcef850000043bb89 -d7fb170003d3bcdd774b6b00003d390f -d7fd750003d2e96dd6af5900003d2e96 -d80cb3f369740d3d6fd8bc00e04c96a8 -d816a18b33c766cd920d9600f047dca8 -d81b20000471986df34b090000471975 -d81f25000495b93d19bdac000038e0dc -d82552000405080d02135100003eb1da -d82a1d0004acd1fd66b5c700004acd19 -d8320c0003ff1a6d3ea7e50000395bd7 -d8320f0004c61f1d6181ab00004c61bc -d83d940003e3c9eda0817200003e3c98 -d840f86c9dc66d2d7fed24003049288d -d842480003d3bcdd774b6b00003d390f -d8429800046d400d6eb280000046d3ee -d84d3b458489210d50ce0f00504beb0a -d84f6d00049a883d29b5d7000049a828 -d85025000490175d40cb22000049016a -d854ea00044eb00de8f4e800004385a3 -d85bbb0003f3618dd1759400003f3618 -d85dde000483fb7dd25d9600004802d4 -d86c760004be8e4dada151000049f5c5 -d870fe0003e083fdd72d9400003e083f -d8793803da31ffbd8e39240070499126 -d87c6b000400516d52b06b00004004f9 -d882ce0004be8e4dada151000049f5c5 -d8857a00041e357d62b301000041e352 -d885d100043bc89dd71579000043bc89 -d886a200044f27cdcf5876000044f27c -d88ad9c5a8eab62dffc33300b0479d55 -d892390004acd1fd66b5c700004acd19 -d898a10274e286bd668e820080489dc9 -d89da4c1f1423d7dd9e02f000047c116 -d89fa10003e583fd054d3800003e555b -d8acae25b1db736d298fd000f0478e76 -d8afe500039c539d834f21000038758a -d8afe500039c539dd42531000039c539 -d8b01ef469b58a6d788ca100704c7d6d -d8bbca000472ae1dcea15b0000472ae0 -d8be3b00044a172dd75c76000044a172 -d8c2430003ee36adcc50db00003ee36a -d8c36000045e153dd744f5000045e153 -d8c3e27ef1169f8d79136d00604a2163 -d8c8e720dc345beda6c9be00304cd34c -d8ca16000419d6ddcba6ac0000419d6d -d8cc0c0003ecbeadcc0c0d00003ecbea -d8da7c00046dd25d19fb1100003cbba2 -d8e6f5d0764a2a4d7fed2400f049288d -d8e825adfd682bad40e0a700a044fccc -d8e9116fb526ac4df5c99600f047fe43 -d8ee150003d9fdddd0498f00003d9fdd -d8f3aee71074216d28f82f00b047356c -d8f96606a94b57dd356c0f00e049422c -d8fe63dbeef50b9d1a0ab200b04c5ba6 -d90a1300041ee89dcd0b12000041ee89 -d9103d87317dbbedaed91700d04c3c7f -d9142000044b07ad1b476c000044b070 -d9155d000472ae1dcea15b0000472ae0 -d91cd20003f75f5dd194db00003f75f5 -d9262178c2db31cd11edde00e0481705 -d9286b000400048d68ce8f0000400026 -d928ac1c7218d3ada9b47600b04c4947 -d92bb0000444e16dc45a5f0000444d4f -d92dcd79e60c215d66b5c700804acd19 -d933790004a166cd3c069d00003a6629 -d933790004a166cd834f21000038758a -d9346b000409b7bdd1daac0000409b7b -d9383d0004c85c1d1ec42000003cbaf6 -d93864dca6593dfdeb4d5100504a2dfa -d942450004c85c1d1ec42000003cbaf6 -d94def0004450b2dd7f56800004450b2 -d95027bd4061188d72ca4500904826ca -d951380003e0c45dd257fa00003e0c45 -d952db00048e5e1db3936b000048d69f -d95337000423e8bdd104010000423e8b -d954b7ea4d58185d333eb200b04c7266 -d95b67cba8d7bdedae5b2500a04914f9 -d968a10004c61f1d6181ab00004c61bc -d96a070003dd2e7dcd825600003dd2e7 -d9708600042862bdce2929000042862b -d976be288f40eb6d6f75240000495e81 -d976d99a14991aad3e77ed00704a2a1d -d977e749afeac75d19bdac00a038e0dc -d978d700046faccdd15a98000046facc -d97b0cd37554d5ad2fcb7000304a02db -d97cfe0003b08ead834f21000038758a -d97cfe0003b08eadd7e9f700003b08ea -d984e2400ceedf7d199bd100504bf5ff -d985c9000458094dbdef600000458072 -d9868000046e202dd8527c000046e202 -d98893000456c87d8ec76c0000456c64 -d989e20004b19d6da7a5a400004ab782 -d9923a5e62cddf1dd9937600a04c3dad -d993590003d28f6d2f298f00003d28d4 -d998b60004a9209d10dd24000048f4df -d99b2cae4bbd458da44f8c00704aba7f -d9a11200042f8b9dcb2c72000042f8b9 -d9a566ed0f8c63bd932de20010487a25 -d9a5d900047d12fd6449de000047d12c -d9a99a00042e1f5ded63b5000042e1e9 -d9ac3ca150b53fed99acb500f047d32c -d9ad3e4e9b9e23edcc1cc200f0490ba4 -d9add90004758dede02f1700003daba5 -d9b1db000425a90d9349d600004256b4 -d9b40d0003efbdfd1672ec00003efbd9 -d9b699c546fcd11d73353300e04c79a2 -d9b9140003c4ac6d834f21000038758a -d9b9140003c4ac6dd8667600003c4ac6 -d9beaa0003d59c2dd2e27200003d59c2 -d9c3bb0003ef300d3b6cdb00003ecfdb -d9cb627a9cdeae5d50026e00204ccf62 -d9cc910003d02d0dccfbb200003d02d0 -d9ce150003d333bdccff1700003d333b -d9d1865c1cacd52dc677d100e04c520a -d9d1c5000436655d15a67e0000433f4d -d9d688ced87738cdc152ce00c04c81a3 -d9d9cb42d6c5b04d92901300104bbe59 -d9ddcc0003f427cdcd017200003f427c -d9e346000487d34db212450000483c00 -d9ea80000419df3dcf3d7a0000419df3 -d9ec0b0003ed447dd22aec00003ed447 -d9ef3d0003d215edd88e7200003d215e -d9f8b3000434450dcc487c0000434450 -d9fad01918ac8b5db80370003049bca6 -d9fc36ea08fe787d3d1df10020483530 -da00db4d1f3a3a7dee5e7400e04b1894 -da017b98c6264a6d1d2a7300a04cb575 -da05dca6c6d6fe4da7c56400b0498713 -da0dc900045b173dcd1570000045b16d -da1065573570080db6542f00b04824db -da16ba000406d4cdd1bac20000406d4c -da1ce1b4e033c9bd5a967400e04bc3e2 -da1d720003e2e68dd8d2ec00003e2e68 -da22d01d665a835db48aab00404ac3aa -da30fe0003e9243dd2f36200003e9243 -da34c00003d0e15d1145fb00003d0df4 -da35ae24b96ac57da5564500f0474cff -da3fbf0004494dbdd9052300004494db -da41db00042c4e1dd901d6000042c4e1 -da43630003cbe13dcce93100003cbe13 -da43bf000449491dd8f23b0000449491 -da467c000470184d303cd7000046fef3 -da4e530004c77e0dbb01c100004c77d5 -da500b0003f1f57dd8f59400003f1f57 -da52ba0003fe283d834f21000038758a -da52ba0003fe283dd33d18000038bd74 -da52db000440688dd8cb8e0000440688 -da54d400047e387dbafbac000047e383 -da585f74b13072edc5c60f00e04c2c0f -da5ee6700e7dc9add302f80090495f06 -da5f630003cb8d0db4b95900003cb8d0 -da62155133db0c1dd63bac00504737d8 -da63bf00046b628dd8ac20000046b628 -da68efd16ff7239d4954a800204b28ef -da6d4c0004725abd834f21000038758a -da6d4c0004725abdea76c300003921cb -da6f294e9c90c81d45863900d04b3504 -da6faf81b4c4167d3e41f1002047c5a0 -da71e200045060ddd913bf000045060d -da7b3bfcaaef127d628df400c0495457 -da8370742d0473fd2d192400e0496371 -da873259c7dcfa1d2019cf008048ebce -da8a9d8eb80e362d8130a100204c1989 -da8ac506977bebdd94cd2b002049ed01 -da96e036891392bd69963900104b835e -da96f4e8caca526d1c7dde00f048345d -da9fa000042bc33dcbd1db000042bc33 -daa14a0004aa104d81a71700004aa0fe -daa7170003d39bcdd33b3f00003d39bc -daa92900042b253dd94402000042b253 -daaafa0618a2ba1db1212400a0494934 -daadcc0003e2577dcc2d7200003e2577 -daaeba000401ddcd1082ac0000401dd0 -dab16f0004aa104d81a71700004aa0fe -dabb110003cc023dd9373900003cc023 -dabb5e0004725a4d4b0f6600003d01b7 -dabc21eee0a92d6d1e302500b0495494 -dabf790004a166cd3c069d00003a6629 -daca401db401e68db5128b00f047ba56 -dad6b100043cdb5dd27a63000043cdb5 -dada45000483f15d5544d40000483f12 -dadc9500046b83add8ac20000046b628 -dae0900004705d4d88c30900004705b0 -dae4f80003d1f9fdd3db3d00003d1f9f -daf6c200041a262dd2f190000041a262 -daf7bb0003f3669dd353fa00003f3669 -dafbe000043bc99dd71579000043bc89 -dafd85d455c117cd9e9433005049f049 -db05750003d508fdd9b69200003d508f -db0bb3f9fcfd319dcdcde200f048d7a0 -db0c560004a2aadd3e77ed00004a2a1d -db0d2c00046f7e1dd9c680000046f7e1 -db0dcb0003edd98d99399400003eca98 -db14ccd3dcfddabd312d0400804a932b -db1bb0000445e06dd0df530000445e06 -db1d620003bdb42d0f7e9d00003b891b -db235cc40c422e9d01dc8500d0473725 -db264e00048d28ddcca5a8000048d276 -db29490c5ff8818d87f405003049bd32 -db2b600003d9fdddd0498f00003d9fdd -db2de20004b19d6da7a5a400004ab782 -db3c1a712dd2f87daeb7be00804a1608 -db45ab2a3e9b3a0df263a8009048e815 -db4e06a109efe67de5431400604c9888 -db54280004bcbaad7a8c5c00004bca72 -db586b000407b51dd9eeba0000407b51 -db5960bee8bb13ddeb688500e047cf8d -db692b0004a3e35ddea11700003d86c2 -db71980004bb7b1daf0a45000047d45c -db74b1241a5a192d87837600f04c7a16 -db76f06dbde27bfdd7cdd000404aaba5 -db7cfe0004430e1dda297e00004430e1 -db7f620003f50a3d1963bb00003f506a -db7fd6c60bc54a5d104a8b00b0481ace -db840eaa2de630dd134c7200304a4515 -db8e9e62da4fb29db3c6c30040499444 -db8fd2e9612123cd9d533300e0482723 -db940d0003f41e1d18eb5100003f41dc -db9a24000486f96db32ceb0000486f93 -db9f600003d7b0cdd2475900003d7b0c -dba189c4afde4b5da753ca0070472ed6 -dba33700048d0e3d1317ec000048d0af -dba86c46faef95fdb121240070494934 -dbab0a00047e66cd7c8a45000047e603 -dbab6b0003d5e38dd23a9200003d5e38 -dbb3083b27b8dc0d91abbe00704a7741 -dbb8cb12661a533dc4c9c700f04ba792 -dbba07c097dd5e5d32f02f00e047c511 -dbd24fb7d453ef2d6c166300b047293a -dbd4b5000477223d99017b0000476805 -dbd555000490de3dea76c300003921cb -dbd7491c5f4e445dbafbac001047e383 -dbd80b0003eafafdda5d9400003eafaf -dbd9a800048d0e3d1317ec000048d0af -dbddd700049eadedc422e8000049e9cb -dbe6ac000400077d68ce8f0000400026 -dbe82031301561cd6c1dd9008047c461 -dbef600003d32c9dd1ad8f00003d32c9 -dbf1be0003a79f7d834f21000038758a -dbf1be0003a79f7dea76c300003921cb -dbf68a000412e4bddab1520000412e4b -dbf8c00003d006ad35d53500003d0069 -dbfe0c000420cdbddaad860000420cdb -dc0579000438b71dd36be00000438b71 -dc05a2b3b157630d35890a00704c6d96 -dc0e5700042862bdce2929000042862b -dc139c0004565f6dd3517000004565f6 -dc21c70004b0b8adf5878c00004ab4f1 -dc2752dff2ea69ad69b30a00504834eb -dc292300045c39fd6278ea00004582c9 -dc2af2bee0029cbdf9b7be00204a15d7 -dc2b042bfe847dbd425d980050484cd5 -dc2d4300041ee89dcd0b12000041ee89 -dc2e45000483aadd7149f10000483a90 -dc317000045a036dda9820000045a036 -dc31e200044e6b9d5acc76000044e6ae -dc37d00004803d5d356dd900004803d4 -dc3d170003d13a9d462a0900003d121f -dc434e0003f3618dd2b8db00003f3618 -dc475100046f1e2dda982f000046f1e2 -dc563b07a0d3f62d83ddd700a04a761b -dc56ec0003dda14ddaecdb00003dda14 -dc5ace0004b1f37d34d7a400004b1f0d -dc5d34d3efd4ff3db3236b00004871c2 -dc5f0900046d400d6eb280000046d3ee -dc62500003c4ac6d834f21000038758a -dc62500003c4ac6dd8667600003c4ac6 -dc652bfb3103f7bd1234a100804c57cd -dc6d64000493168d54879c000045c2af -dc775e0004725a4d4b0f6600003d01b7 -dc787c000436878dd4dbdc0000436878 -dc815ad98787a29dc12dbe00004ccdd8 -dc8531000419df3dcf3d7a0000419df3 -dc89ae372173d86dc44e50001048e514 -dc8a720003db49edb7e69200003dae5a -dc8d197ec1eb471de404c4007048511d -dc8de15b2b6638ddd9eb0a00c04752ad -dc8f660003d1067d410f6300003d1033 -dc93fea4a9ae145dbbbe7b00204ba909 -dc9624a4d58d5d6d462624002048dcdb -dc97de951dd109fd2dffc000a04ce734 -dc992b0004a3e35ddea11700003d86c2 -dc99b2d44fc2775d6af9ab00904c4722 -dca16797bedcf45d82bf2500d04974f6 -dca39c00045f8dcdd36820000045f8dc -dca464ae964ddead8b2b1400404cae15 -dcae51101c94723dfd733300a047d43a -dcaf314632b0099db149d2008047d094 -dcc0510004851a2de2df9200004450fe -dcc40340c6d0ba7d091dcf00d0490507 -dcc52a0004caf9cdb81a4500004caf85 -dcc5940003f2cddd7f7c0d00003ef422 -dcc5960004803d5d356dd900004803d4 -dcc6df6d398350ed28e02f0070480d14 -dcc78669c4fb6d4d0c060f00004c294f -dccbbafa6a6137ed1400a800904b7647 -dcccb5000477223d99017b0000476805 -dcd13cffe76a861dc9ab5500b04b3eb1 -dcd49300044e6bfd5acc76000044e6ae -dcd6480003d849bdff2f1700003d8498 -dce307946aa1b69d7f326600504b9311 -dce5720003e31e9ddd42cd00003cfb55 -dce9f20003b08ead834f21000038758a -dce9f20003b08eadd7e9f700003b08ea -dcee9600048dc36d524de2000048dbfb -dcf405154381e8dd022c2500d0494e4c -dcf47798d3c049bd2c068b00f0475cac -dcfbb8c2b9524dddbeedd700304a274d -dcfd88147ecafb5db32ceb0050486f93 -dcfde20004851a2de2df9200004450fe -dd10320003f9217d4b0f6600003d01b7 -dd1646b8c273bcdd8bb89600d04781f5 -dd1a740004b7b32d7491c700004b7b30 -dd1d060003e0c45dd3a0fe00003e0c45 -dd1e3b54d7e4ad5d6a536c00c044a043 -dd24ef0003f75f5dd3099400003f75f5 -dd25bcbbadd1034d7c3ba400504b6245 -dd29435a0d45eecdcd8a96009048baaa -dd336d0004a15f3d3d977900004a14fa -dd380b0003de069ddb8bfa00003de069 -dd39919603b7a0cd5228a800504b0541 -dd39c9000466ed9dd4179c0000466ed9 -dd40749549243a1d69c9a300304c2401 -dd4168000445b03dd4c8fe0000445b03 -dd4a13ea87aba67d8b2b1400404cae15 -dd512987451448bd6af9ab00c04c4722 -dd52ed06da7f588d3a87dc00a04a7ddf -dd54690004b0b8adf5878c00004ab4f1 -dd5caa000409b7bdd330470000409b7b -dd6d8f0003d8bcbdd5af3d00003d8bcb -dd6f510003ec64cddc313800003ec64c -dd75ae00049aea3d801f6d000049aea2 -dd77cf42fe5bc15d023fac00d0481cf9 -dd7cfe000445904ddc125f0000445904 -dd8eec0003ee38cdc40bbb00003ee387 -dd91170003d2e96dd6af5900003d2e96 -dd936d00049eadedc422e8000049e9cb -dd94f50004696bcdd4447600004696bc -dd9602ac2f19a21d39a21d00204bde2a -dd96d00004aa32cdc868b700004aa326 -dd9eefd5c6f160ede6182800404b7174 -dd9f600003d59c2dd43d1700003d59c2 -dda57a00041fba4dbdba0c000041d810 -dda8f7afbba2942d7b742f00d047466f -ddac5f15dae7ca5dc2c13c00604c858b -ddac93000453d19dd444f50000453d19 -ddade20004551a4dd5b36c00004551a4 -ddaff3dd6aca267de3f7be00104a3e46 -ddb3aa4cc9d72d7d0d87ac0060483d0c -ddb45c0004b417cd6d5a5600004b413c -ddb5db00042e1f5ded63b5000042e1e9 -ddb7bda1a538dcbd78ba6600f04ac76a -ddb81043ef690aedb81a4500a04caf85 -ddb876000454a74d038c760000454a70 -ddb8883970386a6d3793d0008047b889 -ddc1940004ab34cd49b8ae00004a9de3 -ddc5d8000423e8bdd2767e0000423e8b -ddc968000445cced199def0000445c38 -ddd14ad9e02f258d38720f00d04bf280 -ddd1860004022bedae298600004022bc -ddd4a700045d410dc17893000045d3fe -ddd4fc0a07495d6d3a527e00a04ac745 -ddef97da7c13c15d54ed5100a04a2b0d -ddf1380003e083fdd72d9400003e083f -ddf2090003d0c87ddc576600003d0c87 -ddf3020004aa32cdc868b700004aa326 -ddf59600047d575dc6017b000047d574 -ddf5d70004975e8dd8d02500004974a5 -ddf82500048ebe7d2019cf000048ebce -ddfbddffa3515bbd4b085a0030473228 -ddfcb5000483371dc5ac850000483327 -ddff21dda3ffc05d91abbe00e04a7741 -de042f000483371dc5ac850000483327 -de043bfc34e72dcd617d3c00c04aa610 -de04a889edfaac7d53e6ce00004b0ae1 -de0680000471d26dd4a3200000471d26 -de07a40004bca05d87863900004bc9f9 -de09380003ddf6fd3ea7e50000395bd7 -de09380003ddf6fd834f21000038758a -de0e9d00043bb89dd0cdc5000043bb89 -de14db0003f2cddd7f7c0d00003ef422 -de16e90003ed447dd35f6200003ed447 -de17a10003e9243dd4457200003e9243 -de250a0004c4085d96393100003cf5c7 -de264500047d575dc6017b000047d574 -de2682291a110bbd4a2fd100604c6cde -de346b00041484dddcb2ba000041484d -de350b0003e2aaadf29eec00003e2aa1 -de3a1900048f04fd02d722000048f01d -de3e55acfecfab1de3f88500d0475170 -de40b70003d5e38dd23a9200003d5e38 -de4954935150792d356c0f00f049422c -de49a40004ab34cd49b8ae00004a9de3 -de4cb1dc5b7a5e9dec41b900d0475cca -de5088315460279d4d6ea500304be3a0 -de51c546c59f622d488e82000048d1be -de52fe0003bb20bdc79b4500003927b9 -de548d0003d7b0cdd2475900003d7b0c -de575300044329fdd615f4000044329f -de585600049a11ad83d3dc000049a10d -de5f530e64a6795d7dbd7b0060474c4d -de691aa72514cb9deb4d5100304a2dfa -de6d947c4b59b0bd8f07ec00c048d0e4 -de6fa80004a5993d316f8c00004a598f -de7c7600045d456dd570a7000045d456 -de7d2300045aa65ddd00ea000045aa65 -de83a29ff8e4f7ddfd7333007047d43a -de8c9600047eff7d2749d9000047eff4 -de90eaf9e6e82b5d256eab00904b9a08 -de94ac000487f01d7edff50000487edc -de9e80000406d4cdd372ac0000406d4c -dea0fe0003bdb42d0f7e9d00003b891b -dea0fe0003bdb42d834f21000038758a -dea2720003cfb55ddd42cd00003cfb55 -deab74000476fd2d4c20850000476f21 -deb066ec6fb35d5d442f2200704985ca -deb07880a94de75db52a7400d04bb5b2 -deb51f0003f3618dd2b8db00003f3618 -dec02386176b07edb99797004047279b -dec1d2000476c7ddea76c300003921cb -dec5b00003d32c9dd1ad8f00003d32c9 -dec8880003d1f9fdd5697500003d1f9f -dece760003d39bcdd4be4800003d39bc -ded245000473dc5d86f42f0000473db7 -dedba2000484deedea64020000425ba1 -dee22da0e30b42dddaeee800f04bee3f -dee4f80003d7d2bd9ba97500003d7d28 -deec369637e50b5d98875e008047305f -deedcf7f2dd589ddcb6a3900b04b38b7 -def2450004c9e8ed18bc1300004b8919 -def3990003c295fda8339900003c295e -df057900043608cdddc2b1000043608c -df05980004b7b32d7491c700004b7b30 -df06ab0004bbde5d13f4a800004ba11d -df0820000453a2edce3fbf0000453a24 -df0cf5000457fd0db1068700003b603a -df10d700041a262dd44701000041a262 -df11f3d2ca89ab4ddefeab00304b7855 -df130a00047c443d5d6cb5000047c432 -df13140004c85c1d1ec42000003cbaf6 -df158aadc78464ed56a9e200804aceb3 -df16430003f3669dd4c0d200003f3669 -df1a4a645da9fe1dbb01c100c04c77d5 -df1a880003d1067d410f6300003d1033 -df1d7a6ab38b62cd3deac300e047ed8c -df1e560004b33add43fa5600004b33a9 -df233d0003d413cd2368f800003d4139 -df297e6e87689d1d94327400f04b9ef4 -df302f000483d22d2963330000483ccc -df32d2a31e4abebd0de76b009048726b -df3aca000423c4ed2945d60000423c40 -df4703bc6f7c545d42f5ab00904c2538 -df4c89a9e75078dde2da8b000047f14e -df4e9c078fd657fd0f9ebc00a04c602e -df551d187f1e4a5de801a300304c2934 -df5e8a000400516d52b06b00004004f9 -df6164000484deedea64020000425ba1 -df62d5dfe09cccbd44aed500304460d9 -df65619fc06b692de966820080489127 -df65e60003cb8dddc84d3100003cb8dd -df685100048d074dc578ac000048cfda -df6896000473dc5d86f42f0000473db7 -df6a0c000407186dddf9860000407186 -df7697ae6165c99dbc2eb200904c19ee -df76d5000445bf4dde14fe0000445bf4 -df7899cf798eec1dd4ea19004048f6ca -df7f9c0003f9791d85019f00003ccebf -df9027a06652d5edc502cb00d04cd404 -df938c0003f92a6d13f79c00003f92a6 -df9add3625890acd9ab47d00d04a8a1a -df9e070003e0c45dd3a0fe00003e0c45 -dfa30d13d53868dd95195a00c04aaeec -dfa4f80003d61a5de9db3d00003d6188 -dfb527b99c70c6ad454b7600f04be7a1 -dfc0ea000460b97d3101700000460b8f -dfc1170004beb59d3101a300004beb57 -dfc4c1f563e711edfc09c100604c80f7 -dfc9d80004c9386dd1daac0000409b7b -dfca13000409b7bdd330470000409b7b -dfd36c00044a172dd75c76000044a172 -dfde730004c9386dd1daac0000409b7b -dfde8e000446cfed65b4000000446cf4 -dfdfd9985b4a5bfd0c060f00f04c294f -dfe1550004975e8dd8d02500004974a5 -dfe1750003d215edd88e7200003d215e -dfe5f00003bef01db1068700003b603a -dfea160003fce50debef9000003921cb -dfebf60003c7a6cd834f21000038758a -dfebf60003c7a6cddeab9900003c7a6c -dfebf600043bc89dd71579000043bc89 -dff623f28429655de30cb700504a8190 -e005060003f75f5ed3099400003f75f5 -e005efb17629af4ee7d1de001047a8dd -e008f80003d86c2edea11700003d86c2 -e00b1a79615abd8ee5e20f00904c4b22 -e00cfe0003e2e68ed8d2ec00003e2e68 -e011dd000432b83e8b1c10000043299a -e0177ecc5bb8df8eef75e200c04bc537 -e018679be8b7a8de3ec4ac007048573e -e021cc50213af77e87cba400304ac33e -e022a4994200777e33f8a700f04c31fa -e02578cbb8f49a5eb45e0f00f04be5ea -e0262400048f651ecca5a8000048d276 -e02a1900048f652e708a5600003d1f57 -e02e2400048c8dde409ac7000048c8d3 -e02f600004543b4e3a636000004543a3 -e031f70003d59c2ed43d1700003d59c2 -e032b20004c6089e4f1c2c00004c6039 -e0388e25ff7ef00ec13ad200804cd3bc -e038f13f64bb27be1e05a400404ab5d0 -e03aa10004ae1f7e9e9433000049f049 -e03d2300045e153ed744f5000045e153 -e0405000043453ee44ae7200003dce92 -e043902c2feee59e7c737400c047e60a -e046500003c20fce834f21000038758a -e046500003c20fced33d18000038bd74 -e046c20003fa228edefa8f00003fa228 -e049380003f1f57ed8f59400003f1f57 -e0498f0003d2711e5db00b00003d26a1 -e04a63000472ae1ecea15b0000472ae0 -e04aa2000449fd7edebbbf0000449fd7 -e05066000448b02e0e98420000448aaf -e0520d0004958dbe3962c300004958b5 -e0540b0003d508fed9b69200003d508f -e0575b0004385b1ee8f4e800004385a3 -e05a9e0003cc023ed9373900003cc023 -e05b57e009bcf21e442f2200604985ca -e060a300043a309edeab5b000043a309 -e0612c000393434e834f21000038758a -e0612c000393434edee5190000393434 -e068ea00044edb9ec7faa2000044eda3 -e069b6aebb7399cec15333002047bd67 -e076ab0004b8d78e654a1d00004b8d76 -e0831ef8db4e9efe094e5300904c470e -e0865f0004450b2ed7f56800004450b2 -e0867e00045c39fe6278ea00004582c9 -e08a560004b2f58e08da7400004b2f3d -e08e1d0004ba304e3d21c700004b2137 -e09416000436878ed61dc50000436878 -e095ad59ef03949e33a63900b04bd6fd -e09bfa0003e3c9eea0817200003e3c98 -e09f6d0004a2626e3d0579000043dd4a -e0a1170003db49eeb7e69200003dae5a -e0a537b5cfe8334eac1b37005048c370 -e0a6b20004c0a92e2c16ab00004b224b -e0a7a10003ed447ed35f6200003ed447 -e0a82000046b3d5edf6876000046b3d5 -e0a8fb000423e8bed2767e0000423e8b -e0a9cc0003e9243ed4457200003e9243 -e0aacc0003bb20be834f21000038758a -e0aacc0003bb20bec79b4500003927b9 -e0aba40004bd23fe5e32ab00004bd23c -e0b40200042c4e1ed901d6000042c4e1 -e0b73cc2a3362d9e26c6e800804a7549 -e0b73d0003d61a5ee9db3d00003d6188 -e0c1170003d9f7bef8c49800003d9ee4 -e0c416000438b71ed4cd460000438b71 -e0c4a0000449491ed14895000044944b -e0c7bd33ba8da11e1a448500a047fa4c -e0ca560003d2e9aed6af5900003d2e96 -e0cf510003e4255edfb53800003e4255 -e0d1f4000496111efbc80f0000496110 -e0d22400048e44eef4c8eb000048df31 -e0daaa0003d990bedf90f800003d990b -e0dc94ec8fdb302e9e91a800b0486484 -e0e1380003f75d4e20a90b00003f75c7 -e0e68b0004758deee02f1700003daba5 -e0e8760004494dbed9052300004494db -e0e8db0003def88edfbd7200003def88 -e0f2728a65f9500e63e27e00404b0a86 -e0f746000489ad9edb4774000047b57c -e1060500043bb89ed0cdc5000043bb89 -e106f8000496111efbc80f0000496110 -e10ada1621f0d43e08e67b00204bc7e3 -e10c760004bf3b0e5fd25300004bf377 -e1118f0003db4ebedfa29200003db4eb -e118ef5b5d81ac0e4c4722001048f49d -e11fed0004a106fef1efed00004a105b -e120850004803d5e356dd900004803d4 -e12301000401443e9962aa00003d85b9 -e12a720003d849beff2f1700003d8498 -e12ea10004ae1f7e9e9433000049f049 -e12eae6d93e5be0efbb1f400b0495d97 -e1308a0004ab0d0e02b6f5dd02f2d301 -e131649b5831968ed39c2c00104c76be -e13386000489ad9edb4774000047b57c -e139cf00048f8eeee3730a000048407f -e13c280004bb580e982b9c000045d4a7 -e13c7640bcfc16cebaac5c00304ba3a4 -e13c768cee8b6f6e06942c00804c4267 -e1465f000445f4fef3980d00003e80e8 -e14a0c000407b51ed9eeba0000407b51 -e14ca700045060ded913bf000045060d -e15476000449491ed8f23b0000449491 -e15a390004ba304e3d21c700004b2137 -e169e20004b936ae79d5c700004b8404 -e16b45d72fcc808e7397c200b04a1d94 -e16e30000440688ed8cb8e0000440688 -e170770003d1f9fed5697500003d1f9f -e1727e0004622eee47627e00004622eb -e17c130004b936ae79d5c700004b8404 -e17e880003d06a9ee023e900003d06a9 -e18095000406d4ced372ac0000406d4c -e1851b0004cc262ed33b3f00003d39bc -e1888d0003d2e96ed7fd7500003d2e96 -e188a700045aad0ef610f5000045aac5 -e193d4c12e0ad3fe9cda660060472bae -e194c4000487f01e7edff50000487edc -e19560923580438e25570a0020475440 -e195d600042b253ed94402000042b253 -e19c280004ba367e22319800004ba364 -e1a2270003d39bced4be4800003d39bc -e1a3620003dda14edaecdb00003dda14 -e1a9750003daba5ee02f1700003daba5 -e1aa4e0004a9df6e49b8ae00004a9de3 -e1ac4a229c39c2aec18f8c001049e17b -e1b0c40004851a2ee2df9200004450fe -e1b1060003e083fed870fe00003e083f -e1b18131b11c92be44205c00a04b17d6 -e1c70b7edbea643e2d1c0f002049695c -e1cb590003dd5e2e31e31700003d9967 -e1cdbf4fe0dbef0e08a7f5007048cc57 -e1ce8000041a262ed44701000041a262 -e1d7627267b1b00ec41346001048baf5 -e1d7a40004ba367e22319800004ba364 -e1d8aab8f5ccb55e7b156b00104c4178 -e1daba00041a66dee08fcf000041a66d -e1e3bff2aaba973ec81a4500f047e8e6 -e1e91f0003f3669ed4c0d200003f3669 -e1eb54dbc45786ce2df41300f04b30ec -e1ec7a49e8f030ced8d02500904974a5 -e1efa37ac12d429ee02f1700903daba5 -e1f0f5b0cab0954e95477400704754c4 -e1f271ed31efbd3e568f7600004c51c1 -e1f3bb0003ea144ee0936200003ea144 -e1f86b6d1000505e7736e800704c1854 -e1f8b500047a7e2ef6fbd0000047a7df -e20447000401ee4ee07eac0000401ee4 -e205380003eafafeda5d9400003eafaf -e20a160003fce45eebef9000003921cb -e20de20004baf10e57927b00004ba97a -e2105b6d4e165fae3d977900e04a14fa -e21cf53ce5a408ae9098a700c04c718f -e21e4500047d631e7cca45000047c145 -e222ea9540f6bbfe83e9c100204c599f -e22c23b445a4c7aebbcc5c00f04bbf0f -e22c770003d8bcbed78e7200003d8bcb -e230f982abf3bf0e37caab00204aff77 -e236db0004997a0ef2320d000049979b -e2397a000420cdbedaad860000420cdb -e23a82e09e7324ee1e66a500404be07e -e23d84e464b5c02e0d302c00804c27b9 -e24d900003ff7cde9efa8a00003ff7bb -e24eb20004c0a92e2c16ab00004b224b -e2577900049f02de7745ae000049f017 -e2587dee25b948be806aa500204beefc -e25d7a000412e4bedab1520000412e4b -e260f80003d18baee0980b00003d18ba -e26c950004611f3e61f4ea00004611c2 -e271dd000401ddce1082ac0000401dd0 -e273510003f6143ee13d3800003f6143 -e27e8d7c943a14fe6fef7600904c2c24 -e2886f00048e600eef53f500004895b1 -e298980003db49eeb7e69200003dae5a -e299853167bf93ceed153300904c5add -e299f40004997a0ef2320d000049979b -e29cc200048e5cae22c9e9000048e29e -e2a3f15fc0b5edce5a8bf50050487207 -e2a47817b2cf83ae5283a8008048ed41 -e2ab7c3e9397761ea1f40f0020492ddf -e2aeba000400077e68ce8f0000400026 -e2b001450aff85be9311d2002048117b -e2b9af00046f7e1ed9c680000046f7e1 -e2bd140003c7a6ce834f21000038758a -e2bd140003c7a6cedeab9900003c7a6c -e2bf6c00044b07ae1b476c000044b070 -e2bfd00004796f8eeb4da1000047950b -e2c09500046ae5fecd2c93000046ae57 -e2c3a8000498711e442f2200004985ca -e2c3cf000420966ee148470000420966 -e2d3a6d630e7198e5b661900d0498a06 -e2d40f00048e5cae22c9e9000048e29e -e2d4a70004c6c5ded115c100004c53dd -e2d9cb402922522e43cd9800704acc33 -e2dcd4000483d22e2963330000483ccc -e2e76000045a036eda9820000045a036 -e2ee0d000498711e442f2200004985ca -e2f2aa0003d413ce2368f800003d4139 -e2f7c200049f89ae0e312b000049f7e9 -e2fcf500046138be283cf50000461387 -e302740004acd1fe66b5c700004acd19 -e30984a1c537d56eef15ab00c04c1090 -e30c4700041fba4ebdba0c000041d810 -e30e7021be660f1e636b3300804923a3 -e315680004430e1eda297e00004430e1 -e318dfbb3db504ce0be70a0020478346 -e31a660004b0379ee8167b00004b0270 -e32095000451afee26463b0000451aa9 -e3217f414466557e36bfc2007049e472 -e3229e391012ef8ea1037600c04cb593 -e328720004a225eed953ed00004a21f1 -e32ac3000490c20eea76c300003921cb -e3315100049ae5de9aad2b000049ae58 -e3324500047d631e7cca45000047c145 -e335cf00048f8eeee3730a000048407f -e3395200040dea6ee1a047000040dea6 -e341d80004ca543e5df3a600004ca540 -e34935000436878ed61dc50000436878 -e34aab0004b3218e8c3e3900004b31d2 -e34f7400047f5ebe21aa7e0000459dc1 -e35bd18b77649b8e94b16f00904ab533 -e35fcca24205d36ebe337900d04a006c -e36b0900046f1e2eda982f000046f1e2 -e376560004b0379ee8167b00004b0270 -e37b6d7dcf13e58e0f3e8800104cd635 -e3864c000393434e834f21000038758a -e3864c000393434edee5190000393434 -e386af1e5525481ec18f8c00f049e17b -e38ae0000438b71ed4cd460000438b71 -e3926f673eb4b9ee4b6c9600b047e6e2 -e397707578a9a35e2108d400e047c50c -e399de0004802e6ea8f774000047f2a1 -e39c0c0003d0c87edc576600003d0c87 -e3a27e00045961bee23895000045961b -e3acfa097ea8c85e08a7f5006048cc57 -e3ae19000490d30ee6078800003c031f -e3b1c09a64eb1feeb6568b005047fd7b -e3b3e19eddff21dee2614800a04ccca5 -e3b4a40003ec64cedc313800003ec64c -e3b54141265d303e5d9c2f005047e6af -e3beec9cb7fbd0cedede1d00504bcd24 -e3c02f00047a627eb7e885000047a624 -e3c3b5000423c4be2945d60000423c40 -e3c9380003de069edb8bfa00003de069 -e3c9da350e93c5fe19334600e048746a -e3d67c0003c295fe834f21000038758a -e3d67c0003c295fea8339900003c295e -e3d9f20003a79faeea76c300003921cb -e3e1e200048c3cbeb61224000048c3c8 -e3e75bf494fc324e82385e00204cb367 -e3e78902a75b38deb1d6630030472d89 -e3f36000045194bee278f5000045194b -e3f5f11dca7d12dec64b8e00c043fa11 -e3f70a00047f776e3a5ac3000047f70c -e3fa3b00044fd34e40e0a7000044fccc -e3fa8a000409a76e6b21520000409a68 -e3faa200044f1e2ee24c20000044f1e2 -e3fc8e0003e2e68eda1d7200003e2e68 -e3fcb60004aa104e81a71700004aa0fe -e40870e4056ee84e951bd100a04bf552 -e40ec1d936bf7eee4edcac0060488bb1 -e412ba000412d8aef64d7a0000412d7e -e416260003d215eed9ef3d00003d215e -e4165c996fc9c9ee7c82c700c048d852 -e41875e9365479bef0faa100404b94f9 -e4208d00f6f56bfea2a76b0030487282 -e421720003f3408ee2a00b00003f3408 -e4217e0004450feee2df9200004450fe -e4260d000491d1cea013220000491d12 -e4270a8c73c24dee4625de0090473375 -e428880003d2e96ed7fd7500003d2e96 -e42a8a000412e06e1eb2c20000412e02 -e431720003ee07bee302ec00003ee07b -e437a8000491d1cea013220000491d12 -e437b20003d06b1ee023e900003d06a9 -e43c8d0003d508fedb057500003d508f -e43f8b8ef15252eeb9511b00f04cd68b -e440ac00048c3cbeb61224000048c3c8 -e441f40004984eded36820000045f8dc -e4451f0003e083fed870fe00003e083f -e44821b7da50ca8ed951f100c04817f6 -e44847000416948ee302ac0000416948 -e449060003f1f57eda500b00003f1f57 -e4499a00042c62aee2f67e000042c62a -e44ad78a2af81a6e6af5d2001047fda6 -e44f550004b0665e255a7400004b063a -e44f86000488408ed570a7000045d456 -e451084fd5bc178ee5fe7e00e04b1093 -e45227000433133e8cad2c000043311e -e4528200048d0e3e1317ec000048d0af -e45fa80004975e8ed8d02500004974a5 -e46002000422781ee319a50000422781 -e465379def53e9fe5ca81300304b184a -e4687600044ea58ead47bf000044e354 -e46a0500043bc89ed885d1000043bc89 -e46c16e249055edea6c9be00104cd34c -e47fed00049eadeec422e8000049e9cb -e481605f8c7b5ffe49ba4500a0481e5e -e48216734f5d045edc71d000f04aa942 -e4840b0003d8c38ed5af3d00003d8bcb -e487d9d2d81725fe10e0d4002047fda9 -e4897a000401ddce1082ac0000401dd0 -e4959000041484dedcb2ba000041484d -e4a2d489718ce0ce513d3300e04c571a -e4a3510003cc023edabb1100003cc023 -e4a75d000471a2fee2d92c0000471a2f -e4aeec0003f361bed1759400003f3618 -e4b0ab661e77aeaeaacdd9001047e23b -e4baca0004236f7ee3680100004236f7 -e4d0d700042c4e1eda41db000042c4e1 -e4d738279e44d90eea5e4500a0473b41 -e4d7bb0003f4ac8ee36b6200003f4ac8 -e4d86b000400077e68ce8f0000400026 -e4de4e0004886d0ea3dac700004885c9 -e4e75ef8bfa7b32e8b99c700a04b2ff0 -e4f3370004256bfe9349d600004256b4 -e4fc710003d8bcbed78e7200003d8bcb -e4ffd7f55d1a399e56ab7600d04bf5f9 -e500a70004699a5ee3bb9c00004699a5 -e50a560004b9fedeaf0a45000047d45c -e515dd0004329e6e636ee200004329e5 -e51bbb0003e2aaaef29eec00003e2aa1 -e521a1000475db9e43d5de0000475db5 -e525d0a59fe5563e104d640010484c2b -e5267e0004b0665e255a7400004b063a -e526a8000407b51edb586b0000407b51 -e529b900047d575ec6017b000047d574 -e531b9000479288e0721b9000047927d -e532c30004758c4e7e15a100004758af -e537620003f361bed1759400003f3618 -e537ac00047f5dbe21aa7e0000459dc1 -e5449a00042afcfe8ccd7200003f3ba7 -e5495b4c80a32b4e40347c00404a1e89 -e549d700049ef5ae96393100003cf5c7 -e54c60d49facc8be1a021d00904b9eb6 -e54ee6559231cdde4b01d200d0483ac6 -e552c300048e44eef4c8eb000048df31 -e5578c0004aa32cec868b700004aa326 -e5583cf27eb82d8e0c060f00704c294f -e559ef000445f4fef3980d00003e80e8 -e55a45000475db9e43d5de0000475db5 -e55ef800048efc7eb7d9cf000048efb1 -e563f5000486bdbeb1068700003b603a -e568d20003e4215ec87cdb00003e19bc -e56c8e0003dda14edc56ec00003dda14 -e571e20004494adecfd476000044944b -e585cf000496111efbc80f0000496110 -e586660004b4b0deb727a400004b4a02 -e58ac472171762bee2614800504ccca5 -e58ae49bc42e014e4e1c7c00b04a6a02 -e58ddd000401443e9962aa00003d85b9 -e59cfe0003f7b14e235bbb00003f79ae -e5a23b00044b078e1b476c000044b070 -e5a58f0003d1832ee434fc00003d1832 -e5a8c03a1537489e902c6f00a048ec62 -e5adf400048f438e733124000048f430 -e5b7c8f7ab94986e499a240010488892 -e5b9e200045b893ee47970000045b893 -e5ba7e0004586d7e9d0bbf000045826a -e5d4cfc34da7e57eb6ab74002048346a -e5d80b0003ee38cec40bbb00003ee387 -e5d8682d8024f63ecd342800c04b55ba -e5dcacae40a6e89e6407740010473bb9 -e5ddd70004a7746e91abbe00004a7741 -e5e57a00041fbd5ebdba0c000041d810 -e5e7ac00047e8bfe001df1000047e861 -e5ea920003d86c2edea11700003d86c2 -e5ed66b9f372d83e2778a800204b92a6 -e5f1d00004a9510eaf0a45000047d45c -e5f9c900045aa65edd00ea000045aa65 -e5fbf500048d29eed2c786000048c057 -e5fc8600042b253edaa929000042b253 -e60476000451afee26463b0000451aa9 -e606fe0003a79fae834f21000038758a -e606fe0003a79faeea76c300003921cb -e6097ba843626b8e3a41f100f047ae49 -e60a24000486ba9eb1068700003b603a -e6149a000426065ee4e6ca0000426065 -e619de000479288e0721b9000047927d -e61acafcf504d50ec9bba80080497d0a -e61b3d0003d4788e3c069d00003a6629 -e61b3d0003d4788e834f21000038758a -e6257a000400253e27ac6b0000400213 -e627f2d2d269b70ee7dca700104c2e51 -e62880944fb30dbe8fe19800104b9643 -e62e5f0004423b7e65b79200004423a6 -e633dc00043608ceddc2b1000043608c -e6368a000407186eddf9860000407186 -e63d060003eafafedbd80b00003eafaf -e63e560003d990bedf90f800003d990b -e64246000420cdbedbfe0c0000420cdb -e6424e00048d29eed2c786000048c057 -e64a36fd5f6d081e4ad77300004cdae7 -e6518c53d5c587ae5bf0eb008048e0a0 -e65a660004af83ce2e7a7400004af822 -e65c0500049ed07e94cd2b000049ed01 -e663510003f75d8e20a90b00003f75c7 -e66531377a83525ed3bd7b00b0480198 -e673cf0003fa228edefa8f00003fa228 -e674d20003ebec9ee4fd7200003ebec9 -e67570000458cb7e8d5dc900004589c3 -e67b620003e4255edfb53800003e4255 -e67d940003def88edfbd7200003def88 -e681f400048e5c7e2eac2f000047d5af -e689720003ee38cec40bbb00003ee387 -e68e070003f1f57eda500b00003f1f57 -e6932eac897cc74ea67bac001047af8d -e699cc0003e2e68eda1d7200003e2e68 -e69bc2000499e9fe05c6c30000498bb7 -e69e8b091828858e4954a800704b28ef -e6a06b00040d4f7e82c20c000040d4eb -e6a162000396017e834f21000038758a -e6a162000396017ee52fe50000396017 -e6ae0f03d749749e315ac70020488f0a -e6ae395c96e4e06e7d32c300a04957c2 -e6ae560004b9fedeaf0a45000047d45c -e6b8880003d508fedb057500003d508f -e6c1889a997e346ee84c2800104b947b -e6c7590003db4ebedfa29200003db4eb -e6c9358b32f7955eb72bed00f04a1325 -e6c9bd000445bf4ede14fe0000445bf4 -e6cbae000412e4bedbf68a0000412e4b -e6cff600043c676e0afff6000043c671 -e6d26b0003ca6b9e4252d900003ca678 -e6d26b0003ca6b9e834f21000038758a -e6d79af8b53e601e3386ce00f04c46b5 -e6d8880003d215eed9ef3d00003d215e -e6e3ac00047eff7e2749d9000047eff4 -e6e478000484d40e08185c0000484d3e -e6e607bb7d3337ee448c5c00904b2345 -e6e9809a4a8ba17ede3051007048c7d4 -e6fb14505be271be2fc52400f048edbb -e702e80004a3631eee91c000004a35ac -e7066c2c2dba4bae9bc61700204c8f5a -e70b54000484d40e08185c0000484d3e -e7117900043f4c2ee5e3e0000043f4c2 -e72020000449fd7edebbbf0000449fd7 -e7223b000450115e74dc7600004500d8 -e7235e0004730cfef4e9d1000043d310 -e726c400043bc89ed885d1000043bc89 -e72ef9f196eec68e0be70a0080478346 -e72f4bf74df744de11895500304901ef -e7329a14c825922ea90d1b00004cda6f -e736740004ba304e3d21c700004b2137 -e7383113377df4cec81ec700b0488ff3 -e73bb3c36f2180ae617fa400804bb3d7 -e748020004256bfe9349d600004256b4 -e74ab8941e680ece6e0f5e0050472c3d -e74da8000484deeeea64020000425ba1 -e7542f00047adcee313b33000047adc9 -e758deba3a5630bec502cb00e04cd404 -e75bb20003d0b9ce6e6ff000003d0b69 -e760fa64691a3bfe4e4c8500b047bcfc -e762b50004730cfef4e9d1000043d310 -e7682f000480d43e28e02f0000480d14 -e768abe2aa73f81e3174850090475360 -e768b1bd08b509fe8f2d7b003047ea1a -e76b4300042c4e1eda41db000042c4e1 -e774520004c84a5ee832190000496f76 -e777f5000489437e8d5eba000040c009 -e77b790004a1ea1e40347c00004a1e89 -e77c0b0003ea097ebe713800003ea080 -e77ca961fde7b8de17b13500704732c0 -e780c00003d06a9ee023e900003d06a9 -e785d3ebabc6e09eca302f00b047f667 -e78a46000407b51edb586b0000407b51 -e78b6b0003cc023edabb1100003cc023 -e790fe0003ee316e3b6cdb00003ecfdb -e791410d80b0691e2c7c850080473be2 -e791f4eb647ddfaea8f433000049bb1b -e7939c00046138be283cf50000461387 -e795d100043a309edeab5b000043a309 -e796bcb53e9de4fead65d200204795e6 -e7998efaccbb0eee78436d00c04a2962 -e799b02a0213098ef8c3d000d047a6c4 -e79ace0004c61f1e6181ab00004c61bc -e79b668d5285683e4ca73300e047d3b8 -e79bc2000499e9fe05c6c30000498bb7 -e79bc90004711f9e238b20000047119f -e79d520003fe38cef3980d00003e80e8 -e7a2f800048edc7e8881d7000048edc5 -e7a3fa0003f6bc1e75740d00003f6bbf -e7a75d00046dd25e19fb1100003cbba2 -e7a87d0004a9922e51a53c00004a9760 -e7a94fe4d37ca62e9db99600c0484438 -e7aaaa0003d7aceee6218f00003d7ace -e7ae68438c30b1cebd45d000604a87e8 -e7b6f800048f438e733124000048f430 -e7bcf500046b3d5edf6876000046b3d5 -e7c1580003d0c87eddf20900003d0c87 -e7c2480003d8d7fee67aaa00003d8d7f -e7c4790003dda14edc56ec00003dda14 -e7e8900004703daee69e0200004703da -e7e9940003ea144ee0936200003ea144 -e7ec330004a1ea1e40347c00004a1e89 -e7ee7e00046498bee66095000046498b -e7f1d11fdb01639e916c7600504bf4b9 -e7f21900049083deeecc0f0000490838 -e7f220dc1a0e095e9a282f005047688c -e7f3654653de250e83200500c04a16de -e7f5c10004c6e2deb3f8a100004c6e1a -e7f770000499d56e86776d0000499d49 -e7f9060003de069edd380b00003de069 -e7fa480003d23abec4e71700003d2163 -e803a40004ba367e22319800004ba364 -e805e77772e8684eba876b008048956b -e80c5a3773436ade4ac41300104b4cb1 -e80dc70004b8c5de69963900004b835e -e80e12673fd421fe67e28b00e047f950 -e810a70004543b4e3a636000004543a3 -e811790003caadaee6905400003caada -e8148500047b6dbe5f51f1000047b6d3 -e81a3a34833bcd0e0282c30040483168 -e81b590003dd3acee6b59400003dd3ac -e81c6b000401ee4ee07eac0000401ee4 -e820fc0003d18baee0980b00003d18ba -e8228200048b47aea92337000048b428 -e8384700040ac7beaf7590000040ac34 -e83f5d7024b32eae1e612a00904ca1de -e8428a000419c37ee6bd520000419c37 -e843cf0004062c3ee70c4700004062c3 -e848100004329e6e636ee200004329e5 -e848a80004b9764e04705c00004b96e2 -e84c0c0003ec64cedd6f5100003ec64c -e84cb500047adcee313b33000047adc9 -e85124000498711e442f2200004985ca -e85b7400047aab5e07d5d2000047a91e -e867620003f6143ee13d3800003f6143 -e86a4c0003c031fe834f21000038758a -e86a4c0003c031fee6078800003c031f -e87a7e00044ddc6ee6f020000044ddc6 -e87b5c577a0aa7aeb85b8c00604a8d66 -e87d2400048f34fe94bec7000048c69f -e87f0100040156eee7320c000040156e -e87f8c0004a9273e3e77ed00004a2a1d -e88259ae0142462e23ccd400f047859e -e8829e0003d0036ee71de600003d0036 -e892db532fda836e01e1c700f04b3f13 -e89847000418354ee76eac0000418354 -e899de000475744e688ac30000475742 -e89a8f0003ff7cde9efa8a00003ff7bb -e89f1200041a66dee08fcf000041a66d -e8a1357dbc8a0ece752eab00b04b00db -e8a2880003d119dee7620900003d119d -e8a353cc65e1860eb2eb70004049b64d -e8a8b5000480300ea8f774000047f2a1 -e8a9b8429a08800e22d3800090472d48 -e8aa1e19ed86ab4ea981c100e04bedcc -e8acd700046dd25e19fb1100003cbba2 -e8ade2000462427ee8fe7e000046241f -e8b4d20003df2b4ee7697200003df2b4 -e8b606a889570a5e465379005049c0ea -e8b6a800041484dede346b000041484d -e8b6d51b3f07ee1e2c9e4e0060487ab7 -e8b97b00047b917e271896000047b79f -e8bc4c00042b253edaa929000042b253 -e8bff60003ca6a0e4252d900003ca678 -e8cfae000420cdbedbfe0c0000420cdb -e8d45f8b6efa6d4e5ca68b007047e874 -e8d5900004217b5ebb7d860000421757 -e8d84af560884aae1b03d4009048e88d -e8dd520004084dee400d520000406d65 -e8e5cf00048fb22e4ecf33000048f891 -e8eb868d2a66717eb988a100c04c091d -e8ebed0004a029fec64b8e000043fa11 -e8f4073f84d6e8ee4ecf3300f048f891 -e8f847000408752ee74eac0000408752 -e8f91f0003eafafedbd80b00003eafaf -e8fe560003dae5fee7bd8f00003dae5f -e905700004682f3e2f717000004682ee -e9080d0003e2e6ced8d2ec00003e2e68 -e912c200041fbd5ebdba0c000041d810 -e917ed0004a15f3e3d977900004a14fa -e91bcc287b0587fe2522d800904c8746 -e92150f26155b4ce68fcac009048d0d6 -e921e200048d29eed2c786000048c057 -e9240f0003d0f21eb6cbb200003d0f1d -e927de333c7829fe7b01ab00c04c58ad -e92d32f8eddfbece2f7ba400204bda86 -e93ebc0004c7afae2fcece00004c7af0 -e940250004916d4e08a7a70000484bb8 -e940ccdd4f1bbf4e9ca1de00e047c35b -e9448500047bdd1e32b7120000418e3e -e94a9d61b6663bde5bb1e200d048c7fe -e94cac00048dd55e462624000048dcdb -e959170004bf070ef5093300004bf063 -e95b1200041c2f9e3b2586000041c2e3 -e95ba80004987b1eaa761900004987ac -e95c8500047964feeb4da1000047950b -e95e0d0004987b1eaa761900004987ac -e9600f000412e4bedbf68a0000412e4b -e961be0003cfb55edea27200003cfb55 -e96b3d0003d5107eea76c300003921cb -e96cce6bbdf1949e9ca3a800e04919b4 -e96de200048dd55e462624000048dcdb -e96fb5000423041ee849a50000423041 -e97301000405ce8ee8320c0000405ce8 -e97952000420d61ec10c6b0000420c52 -e97f3bd1aab7e27e409ac7008048c8d3 -e9824500047e356e5af5d9000047e2f1 -e985d3574db0868e8d4b0a00b04823ad -e9942f00047b917e271896000047b79f -e9982f000480300ea8f774000047f2a1 -e999380003ddd19e82d59400003ddd08 -e99b760004c438aee832190000496f76 -e99d1f818a9cdf2e50947c00d04a1491 -e99f3145e545aa0e94aaa100104b4f65 -e9a7ee0de6db8b6e7db42500c0497d23 -e9a9f4000445915edc125f0000445904 -e9bca70004bf070ef5093300004bf063 -e9bdf7000396017e834f21000038758a -e9bdf7000396017ee52fe50000396017 -e9bff4867efeaa6ed25d9600504802d4 -e9c3590003d4e24e282c0b00003d4e23 -e9c6f100043fd66ee87abc000043fd66 -e9c774000483862ea559a1000047663a -e9ccfb3e3800abee92f03d00b04cb9aa -e9ce507eca262d8e06f2a500804c04e1 -e9d97a091d4985ee6079ae00a049d1b4 -e9ddd166a385e61ee154c400a0485be5 -e9e58b3c47907f6e40347c00604a1e89 -e9eff2554628c80e6a37ac0020475f27 -ea0337cf6d197bae248ed000904aae9f -ea04cd6b2dc59a6e310b3300504846cb -ea071200040dea6ee1a047000040dea6 -ea0826d5e0e41b7e914b7900604a2c80 -ea0b5e2d8cbd376ee2b5ae00404a51ea -ea0f140004cb198eae9dd800004cb13a -ea0f9c00046ba5bee8d760000046ba5b -ea10690004b594dea7a5a400004ab782 -ea10a40003f3408ee2a00b00003f3408 -ea10fe0003ee07bee302ec00003ee07b -ea13550004b9764e04705c00004b96e2 -ea13ac0004781dbe529dd20000474fa7 -ea15d80004cb198eae9dd800004cb13a -ea1b6b0003d26a6e5db00b00003d26a1 -ea1dd700049d853e5ff833000049d76f -ea1f84b22a5e7acecac3220030493eb2 -ea22720003cb9f0ee872cd00003cb9f0 -ea2f74000482cd0e676c2f000046f7cb -ea3c72d9dc3613ae06e15d0040472db5 -ea498600041a1e0ee8e86b000041a1e0 -ea4c770003d86c2ee008f800003d86c2 -ea4dcc0003def88ee0e8db00003def88 -ea4df7000395fb0e834f21000038758a -ea4df7000395fb0ee8ce090000395fb0 -ea5805d6228f07ae9098a700304c718f -ea5c8779e715d86ebcedae00904a1704 -ea60b50004744a3ec803ac000047449e -ea61e200045163bee90fbf000045163b -ea61ecdfb4d2a81e1d6bac0020484833 -ea625b838a814a2eb17bd0006047919e -ea68c7004e41713ea80bac00904ca549 -ea69140003fa228ee046c200003fa228 -ea6e91347918ad2eac69960010484a33 -ea76090003d06b1ee023e900003d06a9 -ea774e0003e4255ee0cf5100003e4255 -ea77c27d38c7adee1cddde008047ab09 -ea79c900046241fee8fe7e000046241f -ea7e653146dd28ded953ed00804a21f1 -ea7ec20003ff7cde9efa8a00003ff7bb -ea7f3f0003d2415e85591700003d2301 -ea827c1b7d18141ecdd5f40050491b41 -ea83590003d28f6e2f298f00003d28d4 -ea89700004622eee47627e00004622eb -ea8e46000407186edf6a0c0000407186 -ea916ef219c9369e92f0eb007048c9da -ea93bf0004597f9ee9897000004597f9 -ea94a7000456c95e8ec76c0000456c64 -ea952300045a610ee8d4ea000045a610 -ea957000045961bee23895000045961b -ea95b40003d0c87eddf20900003d0c87 -ea96115e127b14eef6d2ce00504c2f6d -ea9ab10004385a3ee8f4e800004385a3 -eaa46b000416948ee302ac0000416948 -eaa7600003db4ebee1118f00003db4eb -eaa87acec2347b3e07d5d2001047a91e -eaacd40004744a3ec803ac000047449e -eaacfe0003e19d7eba3ffa00003e195d -eaaea50004c2007eecc93300004c1ea1 -eab11be0dfb1458ec9a02500c0498a2f -eab7b17d81790f4e004ca100204c66e2 -eab7e4ea04374f9e2fcece00204c7af0 -eab81f00043608cedf0579000043608c -eac1d9000476495e9d9245000047648f -eac2ee96df9c152e1efae8004049fe47 -eac35cc19199c5fea771e200f04b2cbb -eac74e0003de069edd380b00003de069 -eacda648cae8aade6caaa100204bc4a5 -ead0c200048ea16e060124000048ea11 -ead0c7b3a5bf88de44aed500804460d9 -ead0f5000460d8dee9a5e20000460d8d -ead33f0003dae99e830a9200003dae61 -ead4bc0d50600d5e92e42800204b06fb -ead6e20004329e6e636ee200004329e5 -ead733000475a7fe6d00960000475a62 -eada260003d990bee0daaa00003d990b -eadc020003a7fe2e834f21000038758a -eadc020003a7fe2ee959f200003a7fe2 -eae3370004236f7ee3680100004236f7 -eae37400047b394e31817b000047b386 -eae46800042a00eef9f27e0000424d78 -eaf1db00042c62aee2f67e000042c62a -eaf66fd353eb1b9ee2a4c40040485d79 -eafeca000422781ee319a50000422781 -eb0124000497170ea639e600003cf5b0 -eb015500048e5c7e2eac2f000047d5af -eb01940003f4ac8ee36b6200003f4ac8 -eb06880003d06b1ee023e900003d06a9 -eb092300045194bee278f5000045194b -eb0a280003ec64cedd6f5100003ec64c -eb0b010004217b5ebb7d860000421757 -eb1bc2de7ddda1bee7c26e00304ce1c4 -eb1ec300047b394e31817b000047b386 -eb2374000482cd0e676c2f000046f7cb -eb29a1645657259ed9fca100b04c703c -eb29f1000476495e9d9245000047648f -eb2d92622a40ebce094e5300e04c470e -eb3129a8a488aece6e2d3c00b04c938c -eb3a9e0003cbbf8ee9f27200003cbbf8 -eb3dab0004c55b8e35951700004c4928 -eb4320000471897e382a800000471829 -eb4aab0004b95afef0faa100004b94f9 -eb4ece0004c55b8e35951700004c4928 -eb51d100043bb8cecef850000043bb89 -eb5f170003d6188ee9db3d00003d6188 -eb64aa00041484dede346b000041484d -eb677eab9c6a264e41cf5e00b0472f16 -eb6b40a4249ed41e6c8ff500d048e0a0 -eb6bd28e214d45ce4066c3001047d486 -eb73a470a7adfd5e238ea100a04b0532 -eb75750003d1832ee434fc00003d1832 -eb75a10004759f4e7e15a100004758af -eb77bb0003e454be7d50a400003e4525 -eb80c40004853b3e6caa4e00004850f7 -eb8924000490ca7e4524250000490c99 -eb8f11000430559ec9c3c6000042f8b9 -eb9bd0000484949e7149f10000483a90 -eb9d458941e00d7ea30b7900704a1c32 -eba30a6e6392e43e4f6c8200804cc801 -ebb0a40003dd88ceea380d00003dd88c -ebb5e200044f63aeea76c300003921cb -ebb6530003d06a9ee17e8800003d06a9 -ebb6692286c10dde7aee0f00e04bf7e1 -ebb868000425ba1eea64020000425ba1 -ebbf510003f75f7ed194db00003f75f5 -ebc6bde483340d4e01dc850060473725 -ebc93101b39ce3fe96a4a100204c078d -ebd1ea6ef4f29c3e85d41300404baf11 -ebd5980004ad212eb591d000004a886f -ebd68a0003fcc6ee9288a400003e19e6 -ebdd40000448b02e0e98420000448aaf -ebddf70003daba5ee1a97500003daba5 -ebec2000046845eee148470000420966 -ebef900003921cbe834f21000038758a -ebef900003921cbeea76c300003921cb -ebf4ba7854158a1e38086f00a048f539 -ebf52bbef99d292e07038c00804a1c02 -ebf973000445136ec9e17e00004450b0 -ebfae90003ea144ee1f3bb00003ea144 -ec01ddf18dc61b5e7491c700f04b7b30 -ec05d6c970b2b41eded12b009049d813 -ec0a2ae7848f41cecfa1a800f0488965 -ec0ca7000450d81e7a2ea20000450d5d -ec0e45000479323e770085000047931d -ec1c8c7dafafd9ce29c7220060496a49 -ec207d2e96875dee316f8c00404a598f -ec227e0004699a5ee3bb9c00004699a5 -ec24a10004c7afae2fcece00004c7af0 -ec25dc1d82449d2e0126c7000048c2ce -ec2616000401ee4ee204470000401ee4 -ec287c0004a225eed953ed00004a21f1 -ec375c0004ab34ce49b8ae00004a9de3 -ec385600049f89ae0e312b000049f7e9 -ec3924000497170ea639e600003cf5b0 -ec39d519d0cdaffea8f43300b049bb1b -ec3b74000392d66eeab7450000392d66 -ec3cd100047323ce4a60a30000473237 -ec40690004b594dea7a5a400004ab782 -ec445dfdf189030ef9734600f048a7d1 -ec457469d7a6f93ef679d900d047fa25 -ec45c9000458c3ce8d5dc900004589c3 -ec525c0003cfb55edea27200003cfb55 -ec54790003d18baee260f800003d18ba -ec593b8d5d0d703e26c6e800804a7549 -ec5a296a69d7d85eb2eb70005049b64d -ec5b4e0003f6143ee2735100003f6143 -ec5c96000479323e770085000047931d -ec61310003cf8f3eea8e2000003cf8f3 -ec66abee6515b56e28330200704abd66 -ec68721a25167bce2a64d4007047ab51 -ec6cae0004a8657ed8ad4a00004a85d4 -ec75e60004cd8d7eaf0a45000047d45c -ec76c400043a309ee060a3000043a309 -ec86c300047adcee313b33000047adc9 -ec8733000475a7fe6d00960000475a62 -ec923962c09e278e9d87ec00e0487f26 -ec93fa0003ebec9ee4fd7200003ebec9 -eca8ef0003def88ee0e8db00003def88 -ecac914653b7e44e732c2c00f04c366d -ecb228000426065ee4e6ca0000426065 -ecbcf8d007ae45feaca9d90010476913 -ecbdaf000471a2fee2d92c0000471a2f -ecc2e000043bcfbeebef9000003921cb -ecc654550a50f30e9070a800b04bb952 -eccb22000494c80ec6a60d0000494c6a -ecd5e4168a4689becdd2e800f04c546f -ecd7dc0004a4a47e66015100004a4971 -ecda280003e4255ee0cf5100003e4255 -ecdd7b00047f776e3a5ac3000047f70c -ecdf170003d8d7fee67aaa00003d8d7f -ece1e28abe7676ee6ce0250040496ffd -ece49500044decdee6f020000044ddc6 -ece61600041a66dee1daba000041a66d -ecf3f63f9d71fe4ec76c5c00104bb694 -ecf8aa0003fa228ee046c200003fa228 -ecfa560003d7aceee6218f00003d7ace -ed02a8119d5f563e78872200804983af -ed05a10004759f4e7e15a100004758af -ed09a1000478e7ae298fd00000478e76 -ed0ac300049735ceea76c300003921cb -ed0ffa862fd67a7e3b51de0010481c3a -ed21980004ad212eb591d000004a886f -ed22fcf01573298ea2bc7900104ce878 -ed26390003d86c2ee008f800003d86c2 -ed2a8a000410844eeba46b0000410844 -ed2c1ab767c46ece400025004049726a -ed2ee80004c76b9e1b16a500004c76b8 -ed3ed20004cce04ec12dbe00004ccdd8 -ed3f5100046d38ee8b1c10000043299a -ed492300045b893ee47970000045b893 -ed49350004732d5e55f73100004732ce -ed4a45000478e7ae298fd00000478e76 -ed4ef8000490292e133df40000490290 -ed52150003d990bee0daaa00003d990b -ed5406f233705dbe3d0c2c00104be838 -ed54ea00046c880eebf8a7000046c880 -ed57a40004b6725e188c6900004b6703 -ed5cc2a69f6c009e3cd14800904cd203 -ed66260003db4ebee1118f00003db4eb -ed6924000491d1cea013220000491d12 -ed6f310004732d5e55f73100004732ce -ed6f730004cce04ec12dbe00004ccdd8 -ed6ff5000486f96eb32ceb0000486f93 -ed715d0004730cfef4e9d1000043d310 -ed79be000395fb0e834f21000038758a -ed79be000395fb0ee8ce090000395fb0 -ed7c9000046ecebeec0a02000046eceb -ed84257fc529b02e807a2400204989a8 -ed8579000436094eddc2b1000043608c -ed886b000410772eec02ba0000410772 -ed8916fc0acbb3ee2ecab200b04c3ba0 -ed89d700048e61ee746f33000047d607 -ed8a4500047e8bfe001df1000047e861 -ed8e3726322fab3e38720f00604bf280 -ed95b716d3671abe6868a100004c00fb -ed95de000473dc5e86f42f0000473db7 -ed9931000407186edf6a0c0000407186 -ed9e9d0003a8031eb485f20000395839 -eda13500043608cedf0579000043608c -eda15500048fb22e4ecf33000048f891 -eda2ca00042daa1eea76c300003921cb -eda9470957e5588ee2614800b04ccca5 -edaae80004be622e6fb37600004be61d -edafe3006be9ea2e7c8a4500c047e603 -edb7a0139be0bc6e668e820050489dc9 -edb895000420966ee2c3cf0000420966 -edba880003d0aceeefd40c00003d0aca -edc1f9fcc21a192e916c7600c04bf4b9 -edc2660004ba7d3e64e41300004ba7d0 -edc3dc0004a4a47e66015100004a4971 -edc5d100043f4c2ee5e3e0000043f4c2 -edc847000413ff5e98057a0000413ff1 -edcf330004744a3ec803ac000047449e -edd3a60003d0036ee71de600003d0036 -edd7620003efee5eba93bb00003efe94 -edd8eb6062b2910e0cd1e200e048de9b -eddc8e0003f3408ee4217200003f3408 -eddec300047a928e07d5d2000047a91e -ede13f9a43e7e0cec4bec300a047d08c -ede1dd00040a9fde834f21000038758a -ede1dd00040a9fdeb1068700003b603a -edef38bb7b3ad5feae86a500004c3a8b -edefecdf35a66ade727a1d00e04b1b89 -edfc89ab6e0edbbe38064500a0477943 -ee00890003d0aceeefd40c00003d0aca -ee0a8a0004022beeae298600004022bc -ee16aa0003d7d2be9ba97500003d7d28 -ee19dfafa33270de1387be003049e1aa -ee1bfa0003df2b4ee7697200003df2b4 -ee1e090003b13c1e7288fe00003b0b6d -ee275de063bc995e5bd42800c04b7d26 -ee28c10003c86baec79b4500003927b9 -ee2c8e0003ee07bee4317200003ee07b -ee33b20003d119dee7620900003d119d -ee35b9000475db9e43d5de0000475db5 -ee3d520004062c3ee70c4700004062c3 -ee41280003caadaee6905400003caada -ee46a20004513a7ea5876c00004513a1 -ee47510003e9715eecfd3800003e9715 -ee4a3e4abcc9473ecd8a96002048baaa -ee4a74714737f49e7f09d000a04cc02e -ee4ad0399f2fe7fef483f500d0485316 -ee549500040dea6ee33952000040dea6 -ee54b296b86db75e608c780080484d95 -ee657a00040156eee7320c000040156e -ee69f04596eab05eb7d8eb004048aa26 -ee6c770003daba5ee1a97500003daba5 -ee6c9000046ceabeed1202000046ceab -ee75750003dae5fee7bd8f00003dae5f -ee76d4d86deae8fef013a400904b4364 -ee78251ded02aedeae1cc200f04909ad -ee7870003d73561ef4a4dc000047252b -ee7cbf61b19c733e875b7000704a2401 -ee7efe0003a7fe2e834f21000038758a -ee7efe0003a7fe2ee959f200003a7fe2 -ee7fdc00049bb1fea8f433000049bb1b -ee80250004959f3e4a800f0000495990 -ee83c7a54230d4beac619600d047e0a9 -ee85af0004711f9e238b20000047119f -ee86fe0003bef02eb1068700003b603a -ee87bf00045ecffe038c760000454a70 -ee87bf00046a02deed5570000046a02d -ee880b0003f74f2ee6078800003c031f -ee9537000391fb9e834f21000038758a -ee9537000391fb9eecefda0000391fb9 -ee9a560004bc55ee6caaa100004bc4a5 -ee9bfa0003dc267ef2a76200003dc222 -ee9d7a000419cabeba01520000419ca6 -eea11b0004cd433ec502cb00004cd404 -eea3d000047cf91e5da096000047cf8b -eea7620003f473be233fbb00003f45ec -eea88e0003ea144ee1f3bb00003ea144 -eeafbe00049bb1fea8f433000049bb1b -eeb4760004566f9eed467e00004566f9 -eeb7da0003d06a9ee17e8800003d06a9 -eebf9c00045bbcde834f21000038758a -eec6bd9640ba9d9e1317ec00c048d0af -eec9ec0004c84a5ee832190000496f76 -eecc95000401ee4ee204470000401ee4 -eecfea0003c016feb485f20000395839 -eed68f0004022beeae298600004022bc -eed73700042e1e9eed63b5000042e1e9 -eed7550004b9dc4e7196a100004b9dc2 -eed8d70004236f7ee4baca00004236f7 -eeda280003f6143ee2735100003f6143 -eedb3300047a928e07d5d2000047a91e -eedc7c0004a1118ebf4b7900004a1115 -eedec300049735ceea76c300003921cb -eedff707c39bd8ce09d801001042c708 -eee948cc914eb63e2b94dc00a04726f1 -eeeaa10004b936ae79d5c700004b8404 -eeec95000416948ee448470000416948 -eeed2400048e5cae22c9e9000048e29e -eef86b00040658fe8421dd000040652b -eefcb5000473348e0d6ab10000433fc7 -ef03020004aa72fe2fcb7000004a02db -ef063ba8892fd62ec733a400a04bddf2 -ef157000046498bee66095000046498b -ef16480003d379ceedb00b00003d379c -ef18bf0635fd080edae77500e04cd64f -ef1ce7055206e55e6a406f003048f4e0 -ef20ae27fe26ec0e11a9d2002047c80d -ef20b432eed3f9de6b86c300f047cc38 -ef22260003d18baee260f800003d18ba -ef27170003d4e24e282c0b00003d4e23 -ef2dd20004801b7ed3bd7b0000480198 -ef3186000418354ee76eac0000418354 -ef35d9000481531e58b896000048152f -ef39d8000422781ee460020000422781 -ef39dd000419c37ee6bd520000419c37 -ef3ab20004bf070ef5093300004bf063 -ef402b00043a309ee060a3000043a309 -ef42c300047c517ee1128b000047c4dc -ef43200004703daee69e0200004703da -ef43abd7dc15e25e6df87c00a049ef23 -ef44d700042c62aee4499a000042c62a -ef47bfde8b6a998e7b156b00004c4178 -ef496f66c143e25e604b0a00a047af27 -ef49d3000440024eee27480000440024 -ef4cc10003ca6d3e4252d900003ca678 -ef5a7e0004b9dc4e7196a100004b9dc2 -ef5d980004af83ce2e7a7400004af822 -ef6b46000489265e4a4bec0000487eba -ef6ece0004b0665e255a7400004b063a -ef734e0003f4ac8ee4d7bb00003f4ac8 -ef79e200045f889e3997dc000043c95a -ef7aa10004b7b32e7491c700004b7b30 -ef7b2d438c73fa4e9720c2007048f4f5 -ef7bb7e41babb3be83d3dc003049a10d -ef7dcf00048f438e733124000048f430 -ef86c2000405ce8ee8320c0000405ce8 -ef8b8356a934468e3aac1300c04af7f1 -ef936b000489265e4a4bec0000487eba -ef9fb1fe2b4cd03e49ba450060481e5e -efa0880003d1832ee5a58f00003d1832 -efafe90003d1286e9c782000003cb9b5 -efb1db000423041ee849a50000423041 -efb6a800041a66dee1daba000041a66d -efc84bc7b1a8a9fe1efae800a049fe47 -efd7cee67c67f64e28f0ac00604859c9 -efe4280004b0379ee8167b00004b0270 -efea6300043f8c9eee535b000043f8c9 -efebeb1e9f69ceaeff0ec300c0474e5d -eff045f0b649156e433e9600c0485244 -eff18f0003d3a09e88f00b00003d1a35 -effa9e0003cb9f0ee872cd00003cb9f0 -efffb0615accd16ee5431400104c9888 -f0028200048868dfa3dac700004885c9 -f00c85000482742fd25d9600004802d4 -f01a2400048c3cbfb61224000048c3c8 -f01a560004bb791faf0a45000047d45c -f01b590003d7056feea67200003d7056 -f01e740004b1059f9b475500004b0de7 -f027551dc5b744afa5dbf500c0488891 -f0284e12e9ed56ff88ce24001048be41 -f029940003ed6def0e317200003ebe6a -f03766139ffcf10f7ba1f100a047d739 -f03dc90004694c5feed67e00004694c5 -f03e46c99307d10f0f50eb00e048d6b4 -f0404f8e5306075fd3c4ac002048bc72 -f053ec000484e04fea64020000425ba1 -f0546b000408752fe74eac0000408752 -f058fe0003efee5fba93bb00003efe94 -f05ef70382ecab3fa3dac700b04885c9 -f065cc0003f3408fe4217200003f3408 -f069f4000445136fc9e17e00004450b0 -f0730a000420966fe2c3cf0000420966 -f07c710003d8d7ffe7c24800003d8d7f -f08a530004c200ffecc93300004c1ea1 -f08a6300043941efed9eb1000043941e -f09b4800043fd66fe87abc000043fd66 -f0a1de000480300fa8f774000047f2a1 -f0a3ac000484694fe2df9200004450fe -f0a4552bca22e94f3e37ec00f0488ec1 -f0a49cc1e5418c9fd45e5600c04ac7e1 -f0aa883463d7724f47e1e200e04b89a1 -f0ab0100041a1e0fe8e86b000041a1e0 -f0b5f1000483732f6eaa8b000048372a -f0b92d3962eb26af8a937600f04c4d0f -f0c2ec0003ddd19f82d59400003ddd08 -f0c58f0003d6188fe9db3d00003d6188 -f0c59446937758af3f8cb600404a86db -f0c7be0004a1ea1f40347c00004a1e89 -f0cdcc0003ee07bfe4317200003ee07b -f0cf5767a791d05fb77ee800a0499c27 -f0d19b0f741b2daf654a1d00504b8d76 -f0d1b96cd3077f2fcc1cc200e0490ba4 -f0d298000470fe2ff370560000470bac -f0d3e0000436656f15a67e0000433f4d -f0d54a0004abcc8f25e31700004abc64 -f0d56400048e5e8f5bf0eb000048e0a0 -f0df0f8b42bbddafe8eff5006048b4ff -f0eee80004c429cfed43d100004c3925 -f0f2260003d7acefe7aaaa00003d7ace -f0f4fb000426065fe6149a0000426065 -f0f74b19386b49df91c3d100104be9cd -f0fb620003f75d8f20a90b00003f75c7 -f0fc9500046bb97fefa093000046bb97 -f0fdd7000493937f90d5d9000047e6ac -f0fed3a4b75827cfb9f305004049098f -f0ffc8cdcb00c9ef749c2c00a04c4d71 -f105b65b2d392bff6e1b220070496f64 -f10a560003d4e24f282c0b00003d4e23 -f1143959f105cb2fae9dd800d04cb13a -f11682000489ad9fdb4774000047b57c -f117bf00044ccf6fc075e2000044cc2d -f118a300043bb8cfcef850000043bb89 -f119a300048d7b0fcdcde2000048d7a0 -f11c6b000407bccf3c41dd000040534f -f11c9300046ba5bfe8d760000046ba5b -f11d7a000407d9efd9eeba0000407b51 -f11dcc0003ebec9fe674d200003ebec9 -f11f0a00040dea6fe33952000040dea6 -f120a40003f6496fefb00d00003f6496 -f128b500048333ef3a5ac3000047f70c -f12d0b0003ee4a1f3b6cdb00003ecfdb -f1310ac85b3a1e2f3101a300704beb57 -f135c70004b8bcdfa16ba400004b8bcb -f13a660004b8c29f69963900004b835e -f13c2f00048333ef3a5ac3000047f70c -f144760004c02dcff95c7600004c01e1 -f149f3025ea4139fe719d700404a5fab -f14e560004b42ddfcfe9e200004b3706 -f151d80004236f7fe4baca00004236f7 -f152580003d0acafefd40c00003d0aca -f1597b000475744f688ac30000475742 -f1599a514f0fb41f356dd900304803d4 -f15a2700043313aff8389a000043311e -f15dd20004743e6f17b13500004732c0 -f1623eac93cbc8ffc250c4003048dc79 -f1680b0003d9fa4feffb3d00003d9fa4 -f16ca700045163bfe90fbf000045163b -f174fe0003dd88cfea380d00003dd88c -f178919b00b9c4cf0aa62400804964f1 -f179350004732d5f55f73100004732ce -f179380003f74f2f834f21000038758a -f179380003f74f2fe6078800003c031f -f17964f210b0016f99017b00e0476805 -f179d90004743f8f5ed30a00004743ed -f17a740004b1059f9b475500004b0de7 -f17bed00049b349f9aad2b000049ae58 -f17cd4000476985fde3b74000047586e -f17eb20004c6105f4f1c2c00004c6039 -f17ec410cc97377fdaeee800904bee3f -f1880d0003ddd19f82d59400003ddd08 -f18adc497fd7a85f4ed2e800c049f790 -f18bbb0003f75d8f20a90b00003f75c7 -f18bbf00046241ffe8fe7e000046241f -f190effc6a922ddf94b16f00204ab533 -f1930a00047e607f7c8a45000047e603 -f194420004480fff4b1c7300004480fb -f1977000049ff15fa2776d000049fe67 -f198ea52cd84519f66015100604a4971 -f199c900045a610fe8d4ea000045a610 -f19b5b0004352d5fefaa6300004352d5 -f1a613000416948fe448470000416948 -f1afdf0003d0036fe8829e00003d0036 -f1b20c00041c2f9f3b2586000041c2e3 -f1b4ea000460d8dfe9a5e20000460d8d -f1b6ac000409a6ff6b21520000409a68 -f1b7110003cf83fff01cb300003cf83f -f1b77400047a7e2ff6fbd0000047a7df -f1bc0b0003e181bf855b6200003e1816 -f1bf01000405364f3c41dd000040534f -f1c05600049ff15fa2776d000049fe67 -f1c45c000391fb9f834f21000038758a -f1c45c000391fb9fecefda0000391fb9 -f1c7ac00047f890ffba82f000047f7da -f1c7ee64c25d5f7f53b97b00d0482f6f -f1cd750003d7d2bf9ba97500003d7d28 -f1d25e00043cdf3ff072b1000043cdf3 -f1d6b56b6bde5baf9d53330090482723 -f1d6d0828e924f8fcbebc200d04a1d9c -f1d74e0003dd3acfe81b5900003dd3ac -f1d9ba00042c62afe4499a000042c62a -f1dfac000484694fe2df9200004450fe -f1e5c00004a3f25f3e06190000490bf4 -f1ea7e0004bae9cf256eab00004b9a08 -f1ecdb0003d119dfe8a28800003d119d -f1f3a60004cbd56fc326d800004cbcc6 -f1f4b5f55be33b6f9fcca800d04bc359 -f1f883bc6ec8341f2aba2400b0494764 -f1f8db0003ee474f27c3bb00003ee457 -f1fa480003d36d6ff0973f00003d36d6 -f1fb43000422781fe460020000422781 -f208870004486c9f49ea8e0000447267 -f20a0f0004c5895fe832190000496f76 -f20dc900045b954fbf7820000045b948 -f215cc0003df2b4fe8b4d200003df2b4 -f216535b5d46954fa4250a00b04c5f91 -f21b390003cf8f3fea8e2000003cf8f3 -f21cb5e06adfee1f41870e00a04cb7d3 -f222eace109fc72f2849e300704cc1be -f22d940003efee5fba93bb00003efe94 -f22e720003d20e8fb1068700003b603a -f231c900045cbabff0f67e000045cbab -f232e90003f4ac8fe4d7bb00003f4ac8 -f235310004062c3fe843cf00004062c3 -f2371a885d5d42df6181ab00b04c61bc -f237297e4654279fae9dd800f04cb13a -f2415500049083dfeecc0f0000490838 -f2459700044778dfa7c9970000447137 -f24c5a00047323cf4a60a30000473237 -f24d0b0003f4fe1fcbb8fe00003f427c -f251f70003dae5ffe8fe5600003dae5f -f2579dd1cd81247f85f4bc00404c8850 -f2592900042a00eff9f27e0000424d78 -f25e8200048576afbcbf460000485753 -f26374000476985fde3b74000047586e -f2642f00047f776f3a5ac3000047f70c -f267fa0003e454bf7d50a400003e4525 -f268db0003eb1def02135100003eb1da -f269a3fc93bec08f4701b90040482450 -f269b00003d1832fe5a58f00003d1832 -f26da300048dd55f462624000048dcdb -f271062ba9aa987f8dbbcc00c04cae64 -f2754c130d22a34fd5a1a300b04beb72 -f278f500045992cfbc68a70000459842 -f27e4e000489265f4a4bec0000487eba -f2805c0004b436eff013a400004b4364 -f284a7000462427fe8fe7e000046241f -f287e91f8b59d44f68800f000049811d -f287f600043bb8cfcef850000043bb89 -f288530003caadafe8117900003caada -f29129000425ba1fea64020000425ba1 -f2a0a70004597f9fe9897000004597f9 -f2a3e385ef6d0f4f69d7c00080472e13 -f2ae720003d4bb0ff1589800003d4bb0 -f2c5f100047c517fe1128b000047c4dc -f2c67e0004ba9bcf57927b00004ba97a -f2cf53000443a2bf27a0fe0000443a29 -f2d0240542368b9fbebbd100d04c189c -f2dd980004b0deef9b475500004b0de7 -f2ddab0004c1acef1236a500004c1ab5 -f2e50b0003f36fdfc1ac0d00003f36ec -f2eba50dd39772ff3e18d4000047f941 -f2f2190004997a0ff2320d000049979b -f2f8020003970eff834f21000038758a -f2f8020003970effc79b4500003927b9 -f2faa10004afbfbf4f5a7b00004afbe1 -f3054686dbf8e3aff6fbd000d047a7df -f30746424c89cdefb42f7000204a1926 -f3078c00049ef35fbfc51700003dc7a9 -f308690004b8bcdfa16ba400004b8bcb -f30f420004ce886faf0a45000047d45c -f30f6b0003d8d7ffe7c24800003d8d7f -f315d00004a927bf3e77ed00004a2a1d -f317e20003fc92ffe6078800003c031f -f31ba40004bb792faf0a45000047d45c -f31bac000479614feb4da1000047950b -f322b20004c6105f4f1c2c00004c6039 -f322f200040156efe87f01000040156e -f3287744326e1ecf4560b500b047ea75 -f331d14fa712767f141774000047a914 -f3339ca2e5f6c3bf46513300704c3fb0 -f338ca00048ca54fdfa4ac000048ca52 -f33ba45da02d48af87e7d100b04c08cb -f33cf80003d2e9afd6af5900003d2e96 -f341c10004c2dfef697ae800004c2250 -f347be00049ef35fbfc51700003dc7a9 -f34b590003db2b2ffd1a5800003d102f -f34d4ef74c7c3e8f08fb1900a0487de6 -f361c10004be8e4fada151000049f5c5 -f36263000435971ff1e1d10000435971 -f362f6b4a57203ff26707c00a049e8be -f3692b0004a6a03f4e1c7c00004a6a02 -f36a13000418354fe898470000418354 -f36c5aec9ecc2f6f78872200d04983af -f370e679cce7c34fab976d006049a724 -f373b500042a00eff9f27e0000424d78 -f37a9600048ca54fdfa4ac000048ca52 -f37ef8000426065fe6149a0000426065 -f382df0ef74a797f5b9fa8005049febf -f390710003d7acefe7aaaa00003d7ace -f390ef0003ebec9fe674d200003ebec9 -f3967d000430d77fa2b1dd0000430d55 -f39c280004b2737f34d7a400004b1f0d -f39cd7000419c37fe8428a0000419c37 -f3a1dd000410844feba46b0000410844 -f3a28b000475512f171c85000047550e -f3a5860004018aff614ec20000401865 -f3acdb0003efab3f7c140b00003ee7f2 -f3ad720003f64edfea76c300003921cb -f3b17a0003fce52f834f21000038758a -f3b17a0003fce52fea76c300003921cb -f3b1b900047f7bcf3a5ac3000047f70c -f3b7a40004b0deef9b475500004b0de7 -f3c3a65dc10638efa71c0500c04a11cb -f3c476000466f3efbd89e20000466f24 -f3c486000423041fe96fb50000423041 -f3cdeb00043313aff8389a000043311e -f3d09e621ba4eb2fd6af5500b04ae4a2 -f3d190000410772fec02ba0000410772 -f3d4d7000405ce8fe973010000405ce8 -f3d65f000445b7eff2a7530000445b7e -f3e62400049397ffd92425000049325b -f3e8fe0003e454bf7d50a400003e4525 -f3eabd63a22ee44f649b5500004b86ae -f3ebac000475512f171c85000047550e -f3ebbb0003e2aa1ff29eec00003e2aa1 -f3ec9300044d3b2ff294a7000044d3b2 -f3f045e1a656dadf5ed30a00504743ed -f3f869c8c3783f9fb591d000e04a886f -f3fa0a1d8c4163dfc2c9ab00b04c306f -f3fad80004cb198fae9dd800004cb13a -f3ff3d0003d2e9afd6af5900003d2e96 -f4003c392f16f71ff85cb500d0479fd4 -f400820004cc35ffd33b3f00003d39bc -f40949c30e8994bf39a21d00204bde2a -f40d860003ff200ff2919000003ff200 -f417f99f26fbe2af513a5600c04acf1f -f41de3000448104f4b1c7300004480fb -f421170004c5946feda5a300004c591f -f425302085f555ff912d7b00f047e438 -f42dbdfc96d5a55f79507600304bf3c5 -f431f7553774a25fd115c100804c53dd -f432f7db6242477f3a7c5c00f04b9e30 -f4349600047f7ddffba82f000047f7da -f437620003ed19ff1ef0fe00003ed197 -f438eeb8ad7355bfde645c00704ac6aa -f439d9088cb5bb5f71d5e300d04cdd95 -f43b9c000457491ff2db600000457491 -f43cee53d7ff104fdcd25300404c471c -f43e560003dc222ff2a76200003dc222 -f4466c00043186bfb45658000042e389 -f447b80003d0036fe8829e00003d0036 -f44e5d44247d6a6fd76d240050497227 -f4578600048d27afa559a1000047663a -f457b80003cb8d0fb4b95900003cb8d0 -f458ef0003df2b4fe8b4d200003df2b4 -f4599f154a2d54cf3b6ca100104c1255 -f45cd10004c9386fd1daac0000409b7b -f45d6d9b5a80633f68fcac007048d0d6 -f460530003cbbf8feb3a9e00003cbbf8 -f46564000484d40f08185c0000484d3e -f46939c673a3145f57242c00804c05da -f470a40003e9715fecfd3800003e9715 -f470ef5abed42b7f1236a500104c1ab5 -f4753e70d61f6d6f70d9f100b0480249 -f476ce0004b1e9cfa7a5a400004ab782 -f476f9dc15e3b66ffbc3ed00304a22fb -f48298000471897f382a800000471829 -f489c63a8499e1ffc6f1ae0030499d75 -f48a5aa2b63cc23f09a66600b04ac410 -f48e8f0003fa4aaff357cf00003fa4aa -f490ea000456deaf5258950000456de1 -f493bf00046a885f0749c9000046a87d -f4992c000471975ff34b090000471975 -f49c770003d6188feb5f1700003d6188 -f49e0f0004be56af47e856000049a4ea -f49f0900046d38ef8b1c10000043299a -f4a51f0003dd3acfe81b5900003dd3ac -f4a5db000429aa8ff335290000429aa8 -f4a654509f0e082f0f9ebc00904c602e -f4a6bc0004c6c81fea76c300003921cb -f4b18b32188f397f36adc700d04b770e -f4b1b900047f7bcf3a5ac3000047f70c -f4b1c00004a6a03f4e1c7c00004a6a02 -f4b2560004b9392f649b5500004b86ae -f4ba720003d84efff36c9800003d84ef -f4bb5e000472eeafb1068700003b603a -f4bd24000490cf8f00a9240000490cc7 -f4bdc900046c880febf8a7000046c880 -f4be530003d119dfe8a28800003d119d -f4c3a8000490cf8f00a9240000490cc7 -f4c5d2000483371fc5ac850000483327 -f4d2c20003fcc91f9288a400003e19e6 -f4d30a0004062c3fe843cf00004062c3 -f4d57b00047d62af7cca45000047c145 -f4d5c9000465b61f6c4ca70000465b57 -f4d69cb69718706f4c20850020476f21 -f4d76000045d8f6fd570a7000045d456 -f4d9b00003dae5ffe8fe5600003dae5f -f4d9f2000395a08f834f21000038758a -f4d9f2000395a08fd33d18000038bd74 -f4dbd894a6ef111f13f1f10070476ace -f4dc47000421be1ff372ac0000421be1 -f4df3f0003d379cfedb00b00003d379c -f4e0a40003f75f7fd194db00003f75f5 -f4e213000408752fe8f8470000408752 -f4e5dd000409efdf9616ac0000409ef8 -f4f58f633c9a548fe51bac004048148a -f4f720000470bacff370560000470bac -f4fcaa00041a1e0fea4986000041a1e0 -f4fe47cc1c8fcadfac80b700d04a908c -f4feac0003fd9aaf2d8e5600003dc243 -f504ae0004aaaceffb747d00004aa799 -f5050b0003f2be0fa3a0fe00003f2ad6 -f51385d2a1402faf9ad33300f049226e -f515e200045c891ff3c7bf000045c891 -f51b2000046cd5eff3c456000046cd5e -f51f3300047f8a0ffba82f000047f7da -f521ae00049a4c1ff4638c000049a4ac -f521eb0003caadafe8117900003caada -f523fa0003ee474f27c3bb00003ee457 -f524a40003e80e8ff3980d00003e80e8 -f52cb5000476ae0fc025d90000475912 -f5317b000480419f2525f100004803b3 -f5332000046ecebfec0a02000046eceb -f533a40004bb791faf0a45000047d45c -f53701d6f425c30fef7ceb006048b819 -f5392993839f9edfeda5a300a04c591f -f53dd2000479323f770085000047931d -f545a55d2ae42fdf56cfac006047ce7f -f54e390004b7ca3f14aa7e00004b7c98 -f551170003d84f4f9aa27200003d84c0 -f5538c00049f349fd8d02500004974a5 -f556530004bdff3f47e856000049a4ea -f55e16daa7b6c86f3a41f1001047ae49 -f56685081d1542bf35842800104bb5c5 -f5680b0003ef4b8fe6b59400003dd3ac -f5751b26197ff55f42b67b00b04ae743 -f577580003dd88cfebb0a400003dd88c -f577af94f4f142ef2fe5f1000047fa3f -f57b09c32a58dfaf7cdef800a049906d -f57f5d00046e827ff4112c000046e827 -f582ec0003f75f7fd194db00003f75f5 -f5845000043c7e2f0afff6000043c671 -f589f4000493168f54879c000045c2af -f58a8a000404c1bff4257a0000404c1b -f58b8600048d27afa559a1000047663a -f592ebb7689f2d0fb31af80060497148 -f593e0000435429f86fab1000043539b -f5940100042e1e9fed63b5000042e1e9 -f59737000423b4fff428680000423b4f -f5980b0003ee495f27c3bb00003ee457 -f59a530004c60e7f4f1c2c00004c6039 -f59be000043bc4af5065d10000437f6d -f5a2c2000405358f3c41dd000040534f -f5a2f8000497ef0f022c250000494e4c -f5a647d6669cff0f70fe9600b048db60 -f5a8f80003dae99f830a9200003dae61 -f5ac6f8926e482ffbf3056005049c920 -f5ad0fefd95b74cf951bd100b04bf552 -f5b8de126d61641f25c02f00a047be79 -f5bbec0004866bbf25cdb90000482893 -f5bd02d3b4a56f9f5ef12b00804a6696 -f5c4690004b9764f04705c00004b96e2 -f5d0b50004746b5f1bcb0a00004746a7 -f5d3330004987b1faa761900004987ac -f5d52e9c17d1113f807a7300a04c94f5 -f5d8039bb9bcca3f9ba48500b0483a09 -f5d8d40004743e6f17b13500004732c0 -f5db170003d7056feea67200003d7056 -f5dd2c3f8770aa7f95e0ac00e048c415 -f5e61d0004b7ca3f14aa7e00004b7c98 -f5e7ae00040156efe87f01000040156e -f5f09a000433935fb8027e0000433928 -f5f1a30004c011df907f7600004c00f2 -f5f70a00047a459fb351d20000473e7e -f5faf1000440024fee27480000440024 -f5fb0a000418354fe898470000418354 -f5fb3c698a63c5bf7495d000d04ccaf0 -f5fb5b44bebcce7f4c21d900c0483955 -f5fdd2000473bbbf6407740000473bb9 -f5fe0d897cc1222fa779de00a0476d81 -f5ff940004385a3fea9ab100004385a3 -f60564694c0f524f2d19240020496371 -f60fcf00040e791f1fef5100003e66e9 -f618a70004566f9fed467e00004566f9 -f619ac32292fb19f36cf33005049950a -f61cd20003e3c9afa0817200003e3c98 -f61d072d109630bf22786900104b6530 -f624820004cc35ffd33b3f00003d39bc -f628530003cf8f3fec613100003cf8f3 -f628d20003ed6def0e317200003ebe6a -f62a24000488671fe009a30000488586 -f62b2000046ceabfed1202000046ceab -f6339c00046a02dfed5570000046a02d -f6391800043313aff8389a000043311e -f639fe000423041fe96fb50000423041 -f63a560004b31f7f8c3e3900004b31d2 -f63c93c656e81d7ff77dd900b04781f4 -f649d00004cc34afd33b3f00003d39bc -f6565563f030d17fb9a49600e047c43d -f6591b0004cd50bfaf0a45000047d45c -f6657900043d310ff4e9d1000043d310 -f672920003d8631f8b6c9800003d862e -f6752300045bbcdf834f21000038758a -f679520003fcc91f9288a400003e19e6 -f67fb6000405ce8fe973010000405ce8 -f681c500043a31dfdeab5b000043a309 -f681f2000395fc3f834f21000038758a -f681f2000395fc3ff520020000395fc3 -f69a45000476fe8f4c20850000476f21 -f69a740004b31f7f8c3e3900004b31d2 -f69df4000445134fd7f56800004450b2 -f6a1c7602a7df29fcd688500c0473a76 -f6ae920003d8f2bff5711700003d8f2b -f6b1f70003b88a1f3ea7e50000395bd7 -f6b1f70003b88a1f834f21000038758a -f6b8d20003ee474f27c3bb00003ee457 -f6ba8a0003ff3b9f43768a00003fcbd9 -f6c20c00040658ff8421dd000040652b -f6c236bb2bf2bbcf96a4a100a04c078d -f6c598e7d403f19f7d88ac0020485790 -f6c8ea00046b995ff560a7000046b995 -f6ca80000419c37fe8428a0000419c37 -f6d09600047a456fb351d20000473e7e -f6d9c500043f8c9fee535b000043f8c9 -f6da580003d0f21fb6cbb200003d0f1d -f6da6c000430c6cfb743110000430c68 -f6e17b0004791d6fb17bd0000047919e -f6e2630004369eef11e64e00003cba63 -f6e38c0004a2b7eff83da500003cf5ab -f6ef0100040fa07fb79b12000040fa00 -f6f14b0003cbbf8feb3a9e00003cbbf8 -f6f2ddd11f699e7fc6f06900604bc6d7 -f6f4eefad7ede43f72299800204acb60 -f6f65c0003cb9f0fea227200003cb9f0 -f6f8f80003d9fa4feffb3d00003d9fa4 -f6f9170004c011df907f7600004c00f2 -f6fb8c00049f349fd8d02500004974a5 -f6fda9a4b07ca5cf1ce85c00b04b0f40 -f7008500047a928f07d5d2000047a91e -f70f88cf6f27387fe331d7009049917a -f716762393513a1f18597b00b0475026 -f717a0000425ba1febb8680000425ba1 -f71f6b0003d6188feb5f1700003d6188 -f7205600047169dff5c9dd000047169d -f7297000046bab0f71f67e000046ba8e -f72dcac99fbbda4faeb7be00704a1608 -f732090003a3daaf6c30820000393484 -f735b226c7a3327f73353300f04c79a2 -f73ad20004cd1a7fd16acb00004cc043 -f73c76000463b6cf970de20000463b68 -f73c9500044fcd0f40e0a7000044fccc -f73d8526374d55bfe3b37900904a76ab -f73df1000483cf6f360429000039346c -f73df1000483cf6f834f21000038758a -f73f630003d0acafefd40c00003d0aca -f7403300049ef35fbfc51700003dc7a9 -f740fe0003f3c4dff5f40b00003f3c4d -f742530004c60e7f4f1c2c00004c6039 -f74a390004b31e0f8c3e3900004b31d2 -f74e4500047cbb6fb1068700003b603a -f757700004a14d8f50947c00004a1491 -f75f739d6a35430fecbfd100e04c50e6 -f75fdc0004355a2f23e80b00003db666 -f7649a00042cb67ff622ca000042cb67 -f76b7900049ff15fa2776d000049fe67 -f76e71fc6dfcab8f15cc250010490048 -f7712bebbae712fff2730a009047f566 -f771980004ac411f09a66600004ac410 -f773a800048e5c7ff4c8eb000048df31 -f774db0003f6496fefb00d00003f6496 -f778760004bf7f5f56ab7600004bf5f9 -f7864600041a1e0fea4986000041a1e0 -f78b6000045aac5ff610f5000045aac5 -f78f33000493937f90d5d9000047e6ac -f791c760a71864ff76235c00304abc0d -f791f10004815d1f5c2db90000480967 -f792d8b51602978f8e39240060499126 -f7a13abc51d0a14f986b37000048c32d -f7a3d400048e5c7ff4c8eb000048df31 -f7a9200003ae2f3f6c30820000393484 -f7b20c000412d7eff64d7a0000412d7e -f7b4c10003c87c1fc79b4500003927b9 -f7b6a7fd61cf07ff415b740050473feb -f7be200003cb8ddfc84d3100003cb8dd -f7bf6b000488ec4f3e37ec0000488ec1 -f7c0050004a5c28f3d43be00004a59a0 -f7c97e00044515dfb77df40000444acd -f7cb170003d36d6ff0973f00003d36d6 -f7cbbb0003f63dbff668a400003f63db -f7cdf100047372cf01dc850000473725 -f7cfb34108f1b90fe24bd000104840eb -f7cfbf0004694c5feed67e00004694c5 -f7d6970003d0093f35d53500003d0069 -f7db620003f785cff6880d00003f785c -f7e22686b0a8cf6fa981c100204bedcc -f7e943ece2362a5fa559a100e047663a -f7eac200040029ff5ac1dd000040028d -f7ef2e5fd2b2a78fe67c7200804a439e -f7ef510003e3924ff6cd3800003e3924 -f7f175d1e812dc8f30868b0010474c56 -f7f2cd0003cf83fff01cb300003cf83f -f7f5e2000454de5ff69fbf0000454de5 -f7f9040004a927bf3e77ed00004a2a1d -f7faec24c2be752fb680b500b0481635 -f7fea8000410772fed886b0000410772 -f8024c00041937cf32b7120000418e3e -f8046cacc4b80cef27f46900e04bcf90 -f805eb000433f4ff15a67e0000433f4d -f80b46000488ec4f3e37ec0000488ec1 -f80ca623e98357bf4fa5d2009047a53b -f8150a0004352d5fefaa6300004352d5 -f817a10003dd88cfebb0a400003dd88c -f81a8b000481531f58b896000048152f -f81c6b0003fcc91f9288a400003e19e6 -f825de000476ddbfa639e600003cf5b0 -f826ec0003f41e8f18eb5100003f41dc -f828222bb8a8d7bff86fa800e04a3ec3 -f82bb36e5b1e2a2fc6017b00f047d574 -f82db9000476ddbfa639e600003cf5b0 -f835f70003b3ec5f4d96a300003b3e4c -f835f70003b3ec5f834f21000038758a -f8398f0003dbf28ff6c76200003dbf28 -f83b620003e29f2ff8c49800003d9ee4 -f84131000410844fed2a8a0000410844 -f8488500047372cf01dc850000473725 -f84e0f0004c2292f13f4a800004ba11d -f85498f43f365d3faccdd000c04cd092 -f8582c0004be1b5faf0a45000047d45c -f85a1d0004bde28fe78e7400004b2ab0 -f86856000471970f328daf000047196d -f86a8878574c2e9f0ddad000a04aa26c -f86e54999ee957afbf34c2000048fdd8 -f876e1519cd0d03fa981c100d04bedcc -f87e0a8e368aa32f39ec6900e04bdc97 -f881380003e55b7faccfbb00003e5555 -f88bbf00045660eff70876000045660e -f88c7500042ef49ff7727d000042ef49 -f88f6d0004a5c28f3d43be00004a59a0 -f8902000046bb97fefa093000046bb97 -f8956f0004a8a27f7415d000004a8a08 -f89a760003c532df834f21000038758a -f89a760003c532dff726e200003c532d -f89bbf00046229cff73570000046229c -f8a9520003fefd2ff7517a00003fefd2 -f8b1c500043cdf3ff072b1000043cdf3 -f8bb0100040658ff8421dd000040652b -f8bf8c0004a2b7eff83da500003cf5ab -f8c05b9199b96f0f6407740030473bb9 -f8c1c500043941efed9eb1000043941e -f8c3275de2529a2f1602ce00704c6057 -f8c3f00003d0137ff6e13500003d0137 -f8c442d1dbe7e52f16c3a400304b313f -f8c6e00004385a3fea9ab100004385a3 -f8c77400047d62af7cca45000047c145 -f8ca8c4dd1d6bb4fb7e885007047a624 -f8ccc10003e9715fee475100003e9715 -f8cd998cf2058fcfae86a500604c3a8b -f8ce24000489ddaf668e820000489dc9 -f8ce6e0004cd678faf0a45000047d45c -f8ceba000419cabfba01520000419ca6 -f8cff45137bdf34fa2175500904bc7df -f8d0760004677eef42f5db00004285ff -f8d0d7f37cbe499f988f6d00b04a17b5 -f8d60f0004c6c8efd115c100004c53dd -f8dc0f2ee63dc59fea09f400d048e5fd -f8e6db00048e5c9f58c18f00003d19b1 -f8e7e000043a31dfdeab5b000043a309 -f8e9a8000489b2bf975c510000489b26 -f8eb4b6dc2085a7f3e37ec00a0488ec1 -f8ebbb0003f3addff79f6200003f3add -f8eda40004ab1a0fbfeed000004ab19c -f8eebce06d81006f71160d0090495e83 -f8ef4ee422604f0f3e06190080490bf4 -f8f2450004834e7f9534b50000476fa5 -f8f476000469ad6ff79dc90000469ad6 -f8f4790003d379cfef164800003d379c -f8f8820004cce04fc12dbe00004ccdd8 -f8fc960004834e7f9534b50000476fa5 -f8fe65b98dcfb9cfc803ac00f047449e -f907b0000441fbbff7925f0000441fbb -f909a9da3705acdf99487900f04ccbdf -f90b881524490b3ff4d27e00804b95f6 -f90dd00004a8a27f7415d000004a8a08 -f911f1000483cf6f360429000039346c -f911f400048e5c9f58c18f00003d19b1 -f912560004b8bcdfa16ba400004b8bcb -f913f9adfadb281fe24bd000704840eb -f91e293394cb9a0f8cf7d100e04c64dc -f91e480003d26a6f5db00b00003d26a1 -f921d1000435429f86fab1000043539b -f92d4b0003cf8f3fec613100003cf8f3 -f938a70004551e1fd5b36c00004551a4 -f939533b7e470c1fb2933300a047e486 -f940eb000489ddaf668e820000489dc9 -f94896000476fd3f4c20850000476f21 -f95eec0003f2f82f3ea2ec00003f2f7d -f95f9c000464fd8f5150760000464fd7 -f96648e332a381bfe7c26e00804ce1c4 -f966add52b8460bf1d68a700704c12a6 -f96722d407b4893f628df400c0495457 -f9757cb489aad57f65065600a04bb1f7 -f9792300045cce0f54879c000045c2af -f97a720003db2b2ffd1a5800003d102f -f97a740004b20c7f1ce85c00004b0f40 -f97bb15ffe88afff2ecab200704c3ba0 -f97eaa0003d26a6f5db00b00003d26a1 -f984b84444c1e70f3d6bc200e04a2866 -f987b0000444272ff844fe0000444272 -f98a618d7385151f2d19240020496371 -f997880003c15cffea76c300003921cb -f99c7d0004ab1a0fbfeed000004ab19c -f99ec70003c2ac8fae325000003c2ac5 -f9a80b0003e2aa1ff29eec00003e2aa1 -f9a82f000479288f0721b9000047927d -f9ac4e60f1d38bcffbf6c700a048d8d9 -f9b0040fb2f33b1f2df41300a04b30ec -f9b0c84ab326c2ef96fe6600104babe9 -f9b79c0004694faf5bb1e20000468fd0 -f9bb39000395fc3f834f21000038758a -f9bb39000395fc3ff520020000395fc3 -f9bec5000430d77fa2b1dd0000430d55 -f9ce8a000419e17f619ac20000419e07 -f9cf3700048ca54fdfa4ac000048ca52 -f9cfcf000400071ff855520000400071 -f9d0db0003e3107f7f53fa00003e2f16 -f9d3600003d7056ff01b5900003d7056 -f9d39c00045d89cfd570a7000045d456 -f9d67500043668bf15a67e0000433f4d -f9d67e00043311eff8389a000043311e -f9d802000428571f74b4680000428526 -f9d9cf00042e1e9feed737000042e1e9 -f9e01f685db2766fd63bac00004737d8 -f9e1d21d5172f34f530619002048f892 -f9e35d0004713e7ff8492c00004713e7 -f9e632bb4977376f95a6ab00404b3d0b -f9e63a0259d9902ffb1dc900e04a9306 -f9e786324c02baef4f1c2c00a04c6039 -f9e7ab3c2631744f5b29c900a04a8e5b -f9ea89f08b03539f7b156b00904c4178 -f9f00d0003f7640ff896ec00003f7640 -f9fc65000425ba1febb8680000425ba1 -fa058f0003d9ee4ff8c49800003d9ee4 -fa0a80f52f49ffbf312d0400e04a932b -fa0eccd6d1b3817f07396400504931a2 -fa1050000435971ff1e1d10000435971 -fa109600048346dfb6ab74000048346a -fa13390003cf5abff83da500003cf5ab -fa257000045b954fbf7820000045b948 -fa26c300048245bf4701b90000482450 -fa2712000413ff5f98057a0000413ff1 -fa27550004b0b8aff5878c00004ab4f1 -fa27cfaf3eb2f92fb7c5a100704795f1 -fa29cf000496709f58942500004966be -fa2e8a000413ff5f98057a0000413ff1 -fa2f0a00048346dfb6ab74000048346a -fa301585259c142f63dcd4001047e24e -fa40980003d826eff8fa9200003d826e -fa45181fcb64d9af7c09ae00004a272e -fa4f8e387638e5ffd2afac00304cb455 -fa519600047e7abf413d96000047e779 -fa56cc40f551aeaf5f742f0060478862 -fa573f0003d84efff36c9800003d84ef -fa5bc9d2b485372f59593300104c5b10 -fa62b10004355a2f23e80b00003db666 -fa63d000047e7abf413d96000047e779 -fa68db0003ee654ff917fa00003ee654 -fa6d750003dc222ff2a76200003dc222 -fa73d7f822484e1fa03df400b048f753 -fa74a700044d5a7ff294a7000044d3b2 -fa77bf00044d3b2ff294a7000044d3b2 -fa79d70004a6a03f4e1c7c00004a6a02 -fa7a66000472e36f69d7c00000472e13 -fa7c423cae4ef66fad964e00704aac2c -fa837900049e476f36bfc2000049e472 -fa83a40004b20c7f1ce85c00004b0f40 -fa8ae80004a0da8f205b7900004a0da1 -fa9143a335c68abf0b8fec00d048522f -fa91790004353c2ff92e5e00004353c2 -fa9fb6000410772fed886b0000410772 -faa09148d442cc0fd8c38c00304a165c -faa0980003d8631f8b6c9800003d862e -faa5d1000436654f15a67e0000433f4d -faa5e55b675dd3df86632200c0497376 -faaa30a6305d737ffb5f330010490c9f -fab70a000408752fe8f8470000408752 -fab9c7103d45433f87e7d100c04c08cb -fab9edca2dc87fbf1b16a500304c76b8 -fabcaa5935d509afb6426600404b9726 -fac1d299b401626ff969f1000047caa9 -fac5dd00046f0b9fb40490000046ebe3 -fac7e14e9c840def7b42450090481b65 -fac9170004c55b8f35951700004c4928 -fac97e000445b7eff2a7530000445b7e -fad1520003fa4aaff357cf00003fa4aa -fad5dd000430d77fa2b1dd0000430d55 -fad74600048ddbbfc250c4000048dc79 -fad847103b11d5af732c2c00004c366d -fae616000410844fed2a8a0000410844 -fae68a0003ff674ff9506b00003ff674 -fae6b20004be904f72937600004be8fa -fae85600049b020f54c36d000049b019 -faec770003d9fa4ff1680b00003d9fa4 -faf263000435429f86fab1000043539b -fb09cf00049397ffd92425000049325b -fb0b044d7cdb9f1f022bc2002049a87e -fb0f550004bb791faf0a45000047d45c -fb120f0004c0108f6868a100004c00fb -fb13ad864a766a3f6699f100f047c503 -fb193f5399f6ed2fc64b8e00a043fa11 -fb1ef51387a527df48cfd100004c0a89 -fb2087000433f4ff15a67e0000433f4d -fb2493000459819ff9ad230000459819 -fb2c890003d0f21fb6cbb200003d0f1d -fb2d6400048f4fff4c4722000048f49d -fb2d9a000424d78ff9f27e0000424d78 -fb2fd10004c44cdfb8bebc00004c44b0 -fb417b000482249f6f928b00004821af -fb41d100043a6c2f8d5eba000040c009 -fb4a990003c532df834f21000038758a -fb4a990003c532dff726e200003c532d -fb4bcf00040795ffddf9860000407186 -fb5802000429aa8ff335290000429aa8 -fb5a0f0004c2dbdf6caaa100004bc4a5 -fb5e530004c44cdfb8bebc00004c44b0 -fb630ff6f5b50e1f4a690a00104c7782 -fb66280003e9715fee475100003e9715 -fb6ecc00039b5a5f05baa3000039ae0d -fb6fdf0003cb9f0fea227200003cb9f0 -fb709751a283bcbf806aa500404beefc -fb75940003e3107f7f53fa00003e2f16 -fb767d6603b0877fc3f2db00c048ec51 -fb76b318d0ba857f3b567400804b2dc4 -fb7ac300047b4d7f21aa7e0000459dc1 -fb843e0824a7320f9d519800304bcdd3 -fb87064d45ff695fe7e77600904c55b2 -fb87600003d379cfef164800003d379c -fb88280004ba9bcf57927b00004ba97a -fb8893000457491ff2db600000457491 -fb88a10004c0108f6868a100004c00fb -fb8aba0003fe371ff3980d00003e80e8 -fb8ae200043186bfb45658000042e389 -fb8dde000475472f6af485000047546b -fb916b35f3b91c4f3d43be00d04a59a0 -fb925667ae443eef0116b200f04c23e0 -fb94130004bcd74fdede1d00004bcd24 -fb967b0004b95afff0faa100004b94f9 -fb975100046ed5fffa282f000046ed5f -fb980c0003f6496ff120a400003f6496 -fb9e720003cd7e9ffa3acd00003cd7e9 -fba3270003ec535f0e317200003ebe6a -fba69500047311cfaad14c0000473115 -fbac2c0004c429cfed43d100004c3925 -fbb0280004b45a3f205b7900004a0da1 -fbb12400048e5c7ff4c8eb000048df31 -fbb1f7b72de75f1f7f1d240060495f91 -fbb9b38fb59beabfee96e800a049ad69 -fbc140000448104f4b1c7300004480fb -fbc2244049da873fa360850010481c8c -fbc58c00038fa8bffdd962000038ccc4 -fbc926715fa202ff4c21d90060483955 -fbd1e30004475b2fea76c300003921cb -fbda480003d8f2bff5711700003d8f2b -fbdc26e1445fbf1fed153300d04c5add -fbdebb0004abccaf25e31700004abc64 -fbdee8e30e6ceb2f63e27e00b04b0a86 -fbdfda0003d0acaff1525800003d0aca -fbe047000404c1bff4257a0000404c1b -fbe1d258f893146f25e31700604abc64 -fbe3550004b0deef9b475500004b0de7 -fbe47200049b020f54c36d000049b019 -fbe6760003d36d6ff1fa4800003d36d6 -fbe9d2000475472f6af485000047546b -fbed940003e44bcf2d8e5600003dc243 -fbf1350004a4326fc7c6db000043fa11 -fbf228a437ca6f8fd5a1a300304beb72 -fbf6720003d8631f8b6c9800003d862e -fbf86b000421be1ff372ac0000421be1 -fbfcf7334499048f35bfac0080483b86 -fc0848ded78dcb5fcd636d00c04a038b -fc0e7c00046cd5eff3c456000046cd5e -fc13bb0003f2be0fa3a0fe00003f2ad6 -fc15940003f473bf233fbb00003f45ec -fc16b529bbfe949ff8776b0040485cd3 -fc185c0004ac6b0fde645c00004ac6aa -fc1aab0004ac6b0fde645c00004ac6aa -fc1f910004cdd16fb181be00004cdd0e -fc230e0004cbf14f2f7c5600004a569e -fc28a640be302d9fd7efb300f04ccadb -fc2b103638286e5f68b81300e04b1827 -fc300d0003e57a5ffa82ec00003e57a5 -fc367b0004bcd74fdede1d00004bcd24 -fc3720000471975ff34b090000471975 -fc3fc9000471623ffb035d0000471623 -fc40a700045c891ff3c7bf000045c891 -fc42450004cbd56fc326d800004cbcc6 -fc43d000047372cf01dc850000473725 -fc442c0003f92a8f13f79c00003f92a6 -fc47620003de25effae80d00003de25e -fc4e73c52e992def514e8b00b0473c2c -fc51720003dfa96ffb100b00003dfa96 -fc5abb0004a7fe7f2a330a0000484502 -fc617b000482249f6f928b00004821af -fc62110b781c703f936a8b00c0475cf2 -fc62bc0004c4d37f7181a300004c4cf1 -fc6ddb000423b4fff428680000423b4f -fc7c72000430c86fb743110000430c68 -fc7dd700049a458f672b8c000049a3f4 -fc81590003c197af834f21000038758a -fc81590003c197affaaf2600003c197a -fc83220004974e4f8ec76c0000456c64 -fc87ed00049d853f5ff833000049d76f -fc880ad548c68ccf40b96b00704bf52b -fc8be6c1265a728f63b67400804b64d8 -fc9ba40004ace17f66b5c700004acd19 -fc9d2c000470bacff370560000470bac -fca01979e5d73eefba4c2f0040482527 -fca3a000042e1e9feed737000042e1e9 -fcabaf000430c8efb743110000430c68 -fcb08d0003d7056ff01b5900003d7056 -fcb9af00046e827ff4112c000046e827 -fcbae80003cf83fff1b71100003cf83f -fcbc2f00047c50ff6699f1000047c503 -fcbf33000476ae0fc025d90000475912 -fcc8b25ef0a338af2fade2005048d7cd -fccbb775a8b45aefd2afac00404cb455 -fccdd9000480c22fc025d90000475912 -fcd301000403ba0ffb71900000403ba0 -fcd5380003e3107f7f53fa00003e2f16 -fcded50004430e8ffb9b5300004430e8 -fce9ab0004be5f0fb45e0f00004be5ea -fcea2a7b8f22e2ff0c4e6600004b90bc -fcebaf000430541ffb3ee70000430541 -fcec470003fa7c4f2fd28a00003fa7c1 -fcf1e12426abb96ffcaac300f0484252 -fcf82b8dc15d836f5d1dab00904c4268 -fcfb510003f3c4dff5f40b00003f3c4d -fcfdbe0003a3daaf6c30820000393484 -fcfdbe0003a3daaf834f21000038758a -fd057b00047a459fb351d20000473e7e -fd0a4500047e830fa4c82f000047e563 -fd0ab1000437053ffb90500000437053 -fd1cb500047c50ff6699f1000047c503 -fd1ffa0003eda03f06576200003ed7d5 -fd22fe000399de5f05ebe50000399de3 -fd28b30004352d5ff19b5b00004352d5 -fd325ebb44665e0fcca5a8007048d276 -fd37ccc722ff035f01587c00304a2d84 -fd3b02de2d808c2fd141e200404b3fde -fd3edac03c7e0f4f23eaa100604b73e5 -fd40d476a284e57f23521700704cb863 -fd43326dd9eba6efbe17ac006048388c -fd481600043941eff08a63000043941e -fd4cca0004866bbf25cdb90000482893 -fd5c280004ace17f66b5c700004acd19 -fd5e1900048e5c9f58c18f00003d19b1 -fd5fa800048e5abf6c8ff5000048e0a0 -fd62ac00040fa07fb79b12000040fa00 -fd678c0003f9787fcb84c000003d02d0 -fd69c50004365c2f198f5b00004365bf -fd6a390004ad217fb591d000004a886f -fd6bf60004355a2f23e80b00003db666 -fd6c2000046baaff71f67e000046ba8e -fd71570cf17c839f91947900104cd33a -fd74790003d9fa4ff1680b00003d9fa4 -fd79d9000482f76f53b97b0000482f6f -fd8204000433f4ff15a67e0000433f4d -fd8ae90003e2aa1ff3ebbb00003e2aa1 -fd8e3de327b455af91947900204cd33a -fd908285c0c94a5f834f21002038758a -fd908285c0c94a5fb1068700203b603a -fd9f2e14d803170fb676d800004caee0 -fda00b0003d84e5f9aa27200003d84c0 -fda2c2000419cabfba01520000419ca6 -fda65e00043d310ff4e9d1000043d310 -fda75c0004a8947f80f14a00004a893a -fda8b89a6c0e530f341b1200804cb22e -fdaa341bd7556aefd17abc00304be28e -fdb245000481531f58b896000048152f -fdbeec0003f2be0fa3a0fe00003f2ad6 -fdc5de00048333ef3a5ac3000047f70c -fdc6b20003c2a4ff45e4be00003c29c4 -fdc6ce0004c614af4f1c2c00004c6039 -fdc876000445134fd7f56800004450b2 -fddb79f81144a5cf0337d000d047446a -fddf8bfd5e4e147f15447900f04cc6ce -fddffa0003f4750f233fbb00003f45ec -fde10b0003ed454fd22aec00003ed447 -fde7620003e3924ff6cd3800003e3924 -fdec1e38b822332f6f928b00a04821af -fdef81df52498cbf772bdc00d04a6167 -fdf3220004974e4f8ec76c0000456c64 -fdf4db0003f785cff6880d00003f785c -fdf80f00049748afdee5190000393434 -fdfdd700048e71afa559a1000047663a -fe06f6389b88e72fa8eee800a049dfab -fe11170003d3a09f88f00b00003d1a35 -fe12165b35ca3cbf5c2db90060480967 -fe13ae0003ff200ff40d8600003ff200 -fe14280004bbf1ff3cbe6600004ba29a -fe16560004bde28fe78e7400004b2ab0 -fe18c200049748afdee5190000393434 -fe1cd70003c191df308b2600003c1900 -fe1cd70003c191df834f21000038758a -fe1f630003d0d26ffca7b200003d0d26 -fe20790003d84efff4ba7200003d84ef -fe2a235edac943cf5d6cb5008047c432 -fe2e430003f6496ff120a400003f6496 -fe2f760004c011df907f7600004c00f2 -fe36d88eb7de06ffe4486900a04bd985 -fe37dc000436094fddc2b1000043608c -fe37ea00042cb67ff622ca000042cb67 -fe39170004c5aebfed153300004c5add -fe3a9800047169dff5c9dd000047169d -fe3b7b31dd800d3f4a4bec00a0487eba -fe3d5500048e71afa559a1000047663a -fe3e7e00046b995ff560a7000046b995 -fe4447000412d7eff64d7a0000412d7e -fe44a300043ce2fffd275b000043ce2f -fe47f38389d5c28f87f405007049bd32 -fe49170003d4487f07173d00003d1d79 -fe4d7a00040716bfa19bcf0000406c88 -fe508d8c51fd7d4fa66fec0010486734 -fe59940003f63dbff668a400003f63db -fe5ed72c37695e0f2298b500c047e1e8 -fe6f3f0003d84f4f9aa27200003d84c0 -fe6f7900049bb1ffa8f433000049bb1b -fe7d940003f3addff79f6200003f3add -fe84c00003d102fffd1a5800003d102f -fe8535000435971ff362630000435971 -fe855eadc2aed80fe3b37900104a76ab -fe8825b5bb171f4f37297b00b0473876 -fe89170003d3594ffd10fc00003d3594 -fe89f1000482f76f53b97b0000482f6f -fe8f29cd2165d03f26f4c4006048aa8a -fe9915d72b0fd8cf39ec6900804bdc97 -fea2240004868d3f62036b00004868c9 -fea2c200040a0ffffd8190000040a0ff -feaa270003d36d6ff1fa4800003d36d6 -feaba80004991e1f89320d00004991db -feabf5f82ae4043fa56bac000047fd36 -fead7b0004746b0f1bcb0a00004746a7 -feadc500043c7e2f0afff6000043c671 -feaeba0003fa7c4f2fd28a00003fa7c1 -feb0880003dc222ff43e5600003dc222 -feb4910003d0acaff1525800003d0aca -fec0c00003d11a8fe7620900003d119d -fec4714d9afd31af454dc100804c180c -fecef522ed9e644f72fab200804c720d -fee3bf00044b68af79d76c000044a09e -fee49600047862ff8cf7740000478609 -feee020004714bfffd73c900004714bf -fef0374eaf840d4f5df9c100404c18b7 -fef1f3ce1068dfcf06942c00f04c4267 -ff00eb0004868d3f62036b00004868c9 -ff048f2227e937ef5aa70500b04907ba -ff09b9000478e7af298fd00000478e76 -ff0c470003ff3b9f43768a00003fcbd9 -ff0d430003fa4aaff48e8f00003fa4aa -ff0dada81bb36fcf815f1200204cb675 -ff0f6d00049d7c3f5ff833000049d76f -ff109600047b394f31817b000047b386 -ff117fe5c14095afd953ed00a04a21f1 -ff123b00044944dfcfd476000044944b -ff1497a9ac7683bfc2cda400e04aa129 -ff15538d5e4db14f294f2200a0491637 -ff15dd00042095effdbd52000042095e -ff17ed000499eaef05c6c30000498bb7 -ff1a1c3350eb6abfbf305600c049c920 -ff20d182aa9991bfee91c000304a35ac -ff23584a94a3e78f5412ab00804b268e -ff25c5000436094fddc2b1000043608c -ff28720004a5c28f3d43be00004a59a0 -ff2a560004bb3c9f6e31e200004bb304 -ff2c470003fefd2ff7517a00003fefd2 -ff2d63dce35881ef347fea00c048e394 -ff2eec0003efbdff1672ec00003efbd9 -ff2fd0000476495f9d9245000047648f -ff31dd000471775ffdc5af0000471775 -ff389300045aac5ff610f5000045aac5 -ff391cd1d0a3e37f3f157b00304820f5 -ff43cf00040029ff5ac1dd000040028d -ff44c10003e80e8ff524a400003e80e8 -ff482b2463490a7f8e4c1300004b2869 -ff4c8500047ba42f5dddf1000047b798 -ff53210003d8f2bff6ae9200003d8f2b -ff5cec00038ccc4ffdd962000038ccc4 -ff5dd7000490292f133df40000490290 -ff5eeea677c336af3ce06f000048e6b0 -ff5f170003d9ee4ff8c49800003d9ee4 -ff64a2b51f1075ef746f3300e047d607 -ff68ea000454de5ff69fbf0000454de5 -ff710f66b62d334fffc33300a0479d55 -ff713bc24291b68f47b7ca00f0472aed -ff768b00047ba42f5dddf1000047b798 -ff7d720003f7640ff896ec00003f7640 -ff8b590003d826eff8fa9200003d826e -ff8c5c0004bb791faf0a45000047d45c -ff8ef8000498fb8f2d09f400004967bd -ff905600046f8f5fc2575d000046f8f1 -ff90c7b7dfde1bdf920d96006047dca8 -ff96ab0004b9e7af25865600004b61db -ff9a480003d84f4f9aa27200003d84c0 -ffa39f55ae89585fc01db9009047e249 -ffa7790004a5350f07038c00004a1c02 -ffa84c000429aa8ff4a5db0000429aa8 -ffa8a70004bf157ff3212b00004a3f0c -ffab61c667c8dbaf6fb37600904be61d -ffaba2228275f14f024b70002049a7ff -ffb1f1000475917fc025d90000475912 -ffb3d0000483f84fd25d9600004802d4 -ffb66a4b1502daaf8a43970020472a10 -ffb80b0003f36f9fc1ac0d00003f36ec -ffb90b0003f7b0ef235bbb00003f79ae -ffbe3d7c5fe00bef26c4ca00604886ff -ffc68a00040673ef2adc6b0000406706 -ffc8980003cf5abff83da500003cf5ab -ffc9e20004b9e7ff2e1ece00004b9e7d -ffd06b0004096caf8ca2c200004096c5 -ffd57b00047a915fe7d1de000047a8dd -ffd6990003c7a6efdeab9900003c7a6c -ffd9edb688bbcf8fcea15b0040472ae0 -ffdb4600048d7b0fcdcde2000048d7a0 -ffde1831ef04e6cf8ca7ac00304771c1 -ffdec300047c50ff6699f1000047c503 -ffe42f00047e607f7c8a45000047e603 -ffe67100042ef49ff7727d000042ef49 -ffe8f849b99bd8ff05cab200004c7823 -ffea300003cf83fff1b71100003cf83f -ffedef000441fbbff7925f0000441fbb -fff251294abf807f0e0bbe00f04a146e -fff69d0004352d5ff19b5b00004352d5 diff --git a/inmem/service.go b/inmem/service.go deleted file mode 100644 index cbeea3877a7..00000000000 --- a/inmem/service.go +++ /dev/null @@ -1,86 +0,0 @@ -package inmem - -import ( - "context" - "sync" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/rand" - "github.com/influxdata/influxdb/v2/snowflake" -) - -// OpPrefix is the op prefix. -const OpPrefix = "inmem/" - -// Service implements various top level services. -type Service struct { - authorizationKV sync.Map - organizationKV sync.Map - bucketKV sync.Map - userKV sync.Map - dashboardKV sync.Map - viewKV sync.Map - variableKV sync.Map - dbrpMappingKV sync.Map - userResourceMappingKV sync.Map - labelKV sync.Map - labelMappingKV sync.Map - scraperTargetKV sync.Map - telegrafConfigKV sync.Map - onboardingKV sync.Map - basicAuthKV sync.Map - sessionKV sync.Map - sourceKV sync.Map - - TokenGenerator platform.TokenGenerator - IDGenerator platform2.IDGenerator - platform.TimeGenerator -} - -// NewService creates an instance of a Service. -func NewService() *Service { - s := &Service{ - TokenGenerator: rand.NewTokenGenerator(64), - IDGenerator: snowflake.NewIDGenerator(), - TimeGenerator: platform.RealTimeGenerator{}, - } - s.initializeSources(context.TODO()) - return s -} - -// Flush removes all data from the in-memory store -func (s *Service) Flush() { - s.flush(&s.authorizationKV) - s.flush(&s.organizationKV) - s.flush(&s.bucketKV) - s.flush(&s.userKV) - s.flush(&s.dashboardKV) - s.flush(&s.viewKV) - s.flush(&s.variableKV) - s.flush(&s.dbrpMappingKV) - s.flush(&s.userResourceMappingKV) - s.flush(&s.labelKV) - s.flush(&s.labelMappingKV) - s.flush(&s.scraperTargetKV) - s.flush(&s.telegrafConfigKV) - s.flush(&s.onboardingKV) - s.flush(&s.basicAuthKV) - s.flush(&s.sessionKV) - s.flush(&s.sourceKV) -} - -func (s *Service) flush(m *sync.Map) { - keys := []interface{}{} - f := func(key, value interface{}) bool { - keys = append(keys, key) - return true - } - - m.Range(f) - - for _, k := range keys { - m.Delete(k) - } - -} diff --git a/inmem/session_store.go b/inmem/session_store.go deleted file mode 100644 index ed7167d7af8..00000000000 --- a/inmem/session_store.go +++ /dev/null @@ -1,86 +0,0 @@ -package inmem - -import ( - "errors" - "sync" - "time" -) - -type SessionStore struct { - data map[string]string - - timers map[string]*time.Timer - - mu sync.RWMutex -} - -func NewSessionStore() *SessionStore { - return &SessionStore{ - data: map[string]string{}, - timers: map[string]*time.Timer{}, - } -} - -func (s *SessionStore) Set(key, val string, expireAt time.Time) error { - if !expireAt.IsZero() && expireAt.Before(time.Now()) { - // key is already expired. no problem - return nil - } - - s.mu.Lock() - s.data[key] = val - s.mu.Unlock() - - if !expireAt.IsZero() { - return s.ExpireAt(key, expireAt) - } - return nil -} - -func (s *SessionStore) Get(key string) (string, error) { - s.mu.RLock() - defer s.mu.RUnlock() - - return s.data[key], nil -} - -func (s *SessionStore) Delete(key string) error { - s.mu.Lock() - defer s.mu.Unlock() - timer := s.timers[key] - if timer != nil { - timer.Stop() - } - - delete(s.data, key) - delete(s.timers, key) - return nil -} - -func (s *SessionStore) ExpireAt(key string, expireAt time.Time) error { - s.mu.Lock() - - existingTimer, ok := s.timers[key] - if ok { - if !existingTimer.Stop() { - return errors.New("session has expired") - } - - } - - duration := time.Until(expireAt) - if duration <= 0 { - s.mu.Unlock() - s.Delete(key) - return nil - } - s.timers[key] = time.AfterFunc(time.Until(expireAt), s.timerExpireFunc(key)) - s.mu.Unlock() - return nil -} - -func (s *SessionStore) timerExpireFunc(key string) func() { - return func() { - s.Delete(key) - } -} diff --git a/inmem/session_store_test.go b/inmem/session_store_test.go deleted file mode 100644 index d4fb46e605f..00000000000 --- a/inmem/session_store_test.go +++ /dev/null @@ -1,162 +0,0 @@ -package inmem_test - -import ( - "testing" - "time" - - "github.com/influxdata/influxdb/v2/inmem" -) - -func TestSessionSet(t *testing.T) { - st := inmem.NewSessionStore() - err := st.Set("hi", "friend", time.Time{}) - if err != nil { - t.Fatal(err) - } - - err = st.Set("hi", "enemy", time.Time{}) - if err != nil { - t.Fatal(err) - } - - word, err := st.Get("hi") - if err != nil { - t.Fatal(err) - } - - if word != "enemy" { - t.Fatalf("got incorrect response: got %s expected: \"enemy\"", word) - } -} -func TestSessionGet(t *testing.T) { - st := inmem.NewSessionStore() - err := st.Set("hi", "friend", time.Now().Add(time.Second)) - if err != nil { - t.Fatal(err) - } - - word, err := st.Get("hi") - if err != nil { - t.Fatal(err) - } - - if word != "friend" { - t.Fatalf("got incorrect response: got %s expected: \"enemy\"", word) - } - - time.Sleep(time.Second * 2) - - word, err = st.Get("hi") - if err != nil { - t.Fatal(err) - } - - if word != "" { - t.Fatalf("expected no words back but got: %s", word) - } -} -func TestSessionDelete(t *testing.T) { - st := inmem.NewSessionStore() - err := st.Set("hi", "friend", time.Time{}) - if err != nil { - t.Fatal(err) - } - - if err := st.Delete("hi"); err != nil { - t.Fatal(err) - } - - word, err := st.Get("hi") - if err != nil { - t.Fatal(err) - } - - if word != "" { - t.Fatalf("expected no words back but got: %s", word) - } - - if err := st.Delete("hi"); err != nil { - t.Fatal(err) - } -} -func TestSessionExpireAt(t *testing.T) { - st := inmem.NewSessionStore() - err := st.Set("hi", "friend", time.Time{}) - if err != nil { - t.Fatal(err) - } - - if err := st.ExpireAt("hi", time.Now().Add(-20)); err != nil { - t.Fatal(err) - } - - word, err := st.Get("hi") - if err != nil { - t.Fatal(err) - } - - if word != "" { - t.Fatalf("expected no words back but got: %s", word) - } - - if err := st.Set("hello", "friend", time.Time{}); err != nil { - t.Fatal(err) - } - - if err := st.ExpireAt("hello", time.Now()); err != nil { - t.Fatal(err) - } - - word, err = st.Get("hello") - if err != nil { - t.Fatal(err) - } - - if word != "" { - t.Fatalf("expected no words back but got: %s", word) - } - - if err := st.Set("yo", "friend", time.Time{}); err != nil { - t.Fatal(err) - } - - if err := st.ExpireAt("yo", time.Now().Add(100*time.Microsecond)); err != nil { - t.Fatal(err) - } - - word, err = st.Get("yo") - if err != nil { - t.Fatal(err) - } - - if word != "friend" { - t.Fatalf("expected no words back but got: %q", word) - } - - // add more time to a key - if err := st.ExpireAt("yo", time.Now().Add(time.Second)); err != nil { - t.Fatal(err) - } - - time.Sleep(200 * time.Millisecond) - - word, err = st.Get("yo") - if err != nil { - t.Fatal(err) - } - - if word != "friend" { - t.Fatalf("expected key to still exist but got: %q", word) - } - - time.Sleep(time.Second) - - word, err = st.Get("yo") - if err != nil { - t.Fatal(err) - } - - if word != "" { - t.Fatalf("expected no words back but got: %s", word) - } -} diff --git a/inmem/source.go b/inmem/source.go deleted file mode 100644 index aba973eb1ee..00000000000 --- a/inmem/source.go +++ /dev/null @@ -1,111 +0,0 @@ -package inmem - -import ( - "context" - "fmt" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// DefaultSource is the default source. -var DefaultSource = platform.Source{ - Default: true, - Name: "autogen", - Type: platform.SelfSourceType, -} - -const ( - // DefaultSourceID it the default source identifier - DefaultSourceID = "020f755c3c082000" - // DefaultSourceOrganizationID is the default source's organization identifier - DefaultSourceOrganizationID = "50616e67652c206c" -) - -func init() { - if err := DefaultSource.ID.DecodeFromString(DefaultSourceID); err != nil { - panic(fmt.Sprintf("failed to decode default source id: %v", err)) - } - - if err := DefaultSource.OrganizationID.DecodeFromString(DefaultSourceOrganizationID); err != nil { - panic(fmt.Sprintf("failed to decode default source organization id: %v", err)) - } -} - -func (s *Service) initializeSources(ctx context.Context) error { - _, pe := s.FindSourceByID(ctx, DefaultSource.ID) - if pe != nil && errors.ErrorCode(pe) != errors.ENotFound { - return pe - } - - if errors.ErrorCode(pe) == errors.ENotFound { - if err := s.PutSource(ctx, &DefaultSource); err != nil { - return err - } - } - - return nil -} - -// DefaultSource retrieves the default source. -func (s *Service) DefaultSource(ctx context.Context) (*platform.Source, error) { - // TODO(desa): make this faster by putting the default source in an index. - srcs, _, err := s.FindSources(ctx, platform.FindOptions{}) - if err != nil { - return nil, err - } - - for _, src := range srcs { - if src.Default { - return src, nil - } - } - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: "no default source found", - } - -} - -// FindSourceByID retrieves a source by id. -func (s *Service) FindSourceByID(ctx context.Context, id platform2.ID) (*platform.Source, error) { - i, ok := s.sourceKV.Load(id.String()) - if !ok { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: platform.ErrSourceNotFound, - } - } - - src, ok := i.(*platform.Source) - if !ok { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("type %T is not a source", i), - } - } - return src, nil -} - -// FindSources retrieves all sources that match an arbitrary source filter. -// Filters using ID, or OrganizationID and source Name should be efficient. -// Other filters will do a linear scan across all sources searching for a match. -func (s *Service) FindSources(ctx context.Context, opt platform.FindOptions) ([]*platform.Source, int, error) { - var ds []*platform.Source - s.sourceKV.Range(func(k, v interface{}) bool { - d, ok := v.(*platform.Source) - if !ok { - return false - } - ds = append(ds, d) - return true - }) - return ds, len(ds), nil -} - -// PutSource will put a source without setting an ID. -func (s *Service) PutSource(ctx context.Context, src *platform.Source) error { - s.sourceKV.Store(src.ID.String(), src) - return nil -} diff --git a/internal/array_cursors.go b/internal/array_cursors.go deleted file mode 100644 index 1eeba3e67a4..00000000000 --- a/internal/array_cursors.go +++ /dev/null @@ -1,138 +0,0 @@ -package internal - -import "github.com/influxdata/influxdb/v2/tsdb" - -var ( - _ tsdb.IntegerArrayCursor = NewIntegerArrayCursorMock() - _ tsdb.FloatArrayCursor = NewFloatArrayCursorMock() - _ tsdb.UnsignedArrayCursor = NewUnsignedArrayCursorMock() - _ tsdb.StringArrayCursor = NewStringArrayCursorMock() - _ tsdb.BooleanArrayCursor = NewBooleanArrayCursorMock() -) - -// ArrayCursorMock provides a mock base implementation for batch cursors. -type ArrayCursorMock struct { - CloseFn func() - ErrFn func() error - StatsFn func() tsdb.CursorStats -} - -// NewArrayCursorMock returns an initialised ArrayCursorMock, which -// returns the zero value for all methods. -func NewArrayCursorMock() *ArrayCursorMock { - return &ArrayCursorMock{ - CloseFn: func() {}, - ErrFn: func() error { return nil }, - StatsFn: func() tsdb.CursorStats { return tsdb.CursorStats{} }, - } -} - -// Close closes the cursor. -func (c *ArrayCursorMock) Close() { c.CloseFn() } - -// Err returns the latest error, if any. -func (c *ArrayCursorMock) Err() error { return c.ErrFn() } - -func (c *ArrayCursorMock) Stats() tsdb.CursorStats { - return c.StatsFn() -} - -// IntegerArrayCursorMock provides a mock implementation of an IntegerArrayCursorMock. -type IntegerArrayCursorMock struct { - *ArrayCursorMock - NextFn func() *tsdb.IntegerArray -} - -// NewIntegerArrayCursorMock returns an initialised IntegerArrayCursorMock, which -// returns the zero value for all methods. -func NewIntegerArrayCursorMock() *IntegerArrayCursorMock { - return &IntegerArrayCursorMock{ - ArrayCursorMock: NewArrayCursorMock(), - NextFn: func() *tsdb.IntegerArray { return tsdb.NewIntegerArrayLen(0) }, - } -} - -// Next returns the next set of keys and values. -func (c *IntegerArrayCursorMock) Next() *tsdb.IntegerArray { - return c.NextFn() -} - -// FloatArrayCursorMock provides a mock implementation of a FloatArrayCursor. -type FloatArrayCursorMock struct { - *ArrayCursorMock - NextFn func() *tsdb.FloatArray -} - -// NewFloatArrayCursorMock returns an initialised FloatArrayCursorMock, which -// returns the zero value for all methods. -func NewFloatArrayCursorMock() *FloatArrayCursorMock { - return &FloatArrayCursorMock{ - ArrayCursorMock: NewArrayCursorMock(), - NextFn: func() *tsdb.FloatArray { return tsdb.NewFloatArrayLen(0) }, - } -} - -// Next returns the next set of keys and values. -func (c *FloatArrayCursorMock) Next() *tsdb.FloatArray { - return c.NextFn() -} - -// UnsignedArrayCursorMock provides a mock implementation of an UnsignedArrayCursorMock. -type UnsignedArrayCursorMock struct { - *ArrayCursorMock - NextFn func() *tsdb.UnsignedArray -} - -// NewUnsignedArrayCursorMock returns an initialised UnsignedArrayCursorMock, which -// returns the zero value for all methods. -func NewUnsignedArrayCursorMock() *UnsignedArrayCursorMock { - return &UnsignedArrayCursorMock{ - ArrayCursorMock: NewArrayCursorMock(), - NextFn: func() *tsdb.UnsignedArray { return tsdb.NewUnsignedArrayLen(0) }, - } -} - -// Next returns the next set of keys and values. -func (c *UnsignedArrayCursorMock) Next() *tsdb.UnsignedArray { - return c.NextFn() -} - -// StringArrayCursorMock provides a mock implementation of a StringArrayCursor. -type StringArrayCursorMock struct { - *ArrayCursorMock - NextFn func() *tsdb.StringArray -} - -// NewStringArrayCursorMock returns an initialised StringArrayCursorMock, which -// returns the zero value for all methods. -func NewStringArrayCursorMock() *StringArrayCursorMock { - return &StringArrayCursorMock{ - ArrayCursorMock: NewArrayCursorMock(), - NextFn: func() *tsdb.StringArray { return tsdb.NewStringArrayLen(0) }, - } -} - -// Next returns the next set of keys and values. -func (c *StringArrayCursorMock) Next() *tsdb.StringArray { - return c.NextFn() -} - -// BooleanArrayCursorMock provides a mock implementation of a BooleanArrayCursor. -type BooleanArrayCursorMock struct { - *ArrayCursorMock - NextFn func() *tsdb.BooleanArray -} - -// NewBooleanArrayCursorMock returns an initialised BooleanArrayCursorMock, which -// returns the zero value for all methods. -func NewBooleanArrayCursorMock() *BooleanArrayCursorMock { - return &BooleanArrayCursorMock{ - ArrayCursorMock: NewArrayCursorMock(), - NextFn: func() *tsdb.BooleanArray { return tsdb.NewBooleanArrayLen(0) }, - } -} - -// Next returns the next set of keys and values. -func (c *BooleanArrayCursorMock) Next() *tsdb.BooleanArray { - return c.NextFn() -} diff --git a/internal/authorizer.go b/internal/authorizer.go deleted file mode 100644 index 39e8145c6c7..00000000000 --- a/internal/authorizer.go +++ /dev/null @@ -1,38 +0,0 @@ -package internal - -import ( - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxql" -) - -// AuthorizerMock is a mockable implementation of a query.Authorizer. -type AuthorizerMock struct { - AuthorizeDatabaseFn func(influxql.Privilege, string) bool - AuthorizeQueryFn func(database string, query *influxql.Query) error - AuthorizeSeriesReadFn func(database string, measurement []byte, tags models.Tags) bool - AuthorizeSeriesWriteFn func(database string, measurement []byte, tags models.Tags) bool -} - -// AuthorizeDatabase determines if the provided privilege is sufficient to -// authorise access to the database. -func (a *AuthorizerMock) AuthorizeDatabase(p influxql.Privilege, name string) bool { - return a.AuthorizeDatabaseFn(p, name) -} - -// AuthorizeQuery determines if the query can be executed against the provided -// database. -func (a *AuthorizerMock) AuthorizeQuery(database string, query *influxql.Query) error { - return a.AuthorizeQueryFn(database, query) -} - -// AuthorizeSeriesRead determines if the series comprising measurement and tags -// can be read on the provided database. -func (a *AuthorizerMock) AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool { - return a.AuthorizeSeriesReadFn(database, measurement, tags) -} - -// AuthorizeSeriesWrite determines if the series comprising measurement and tags -// can be written to, on the provided database. -func (a *AuthorizerMock) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool { - return a.AuthorizeSeriesWriteFn(database, measurement, tags) -} diff --git a/internal/cmd/fluxtest-harness-influxdb/test.go b/internal/cmd/fluxtest-harness-influxdb/test.go deleted file mode 100644 index c9b170ee8e5..00000000000 --- a/internal/cmd/fluxtest-harness-influxdb/test.go +++ /dev/null @@ -1,285 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "os/exec" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/cmd/flux/cmd" - "github.com/influxdata/flux/lang" - "github.com/influxdata/flux/parser" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/cmd/influxd/launcher" - "github.com/influxdata/influxdb/v2/query" - "github.com/spf13/cobra" -) - -type testFlags struct { - wait bool -} - -var flags testFlags - -type testExecutor struct { - ctx context.Context - l *launcher.TestLauncher - writeOptAST *ast.File - readOptAST *ast.File - errOutput bytes.Buffer - i int - failed bool -} - -func NewTestExecutor(ctx context.Context) (cmd.TestExecutor, error) { - e := &testExecutor{ctx: ctx} - e.init() - - e.l = launcher.NewTestLauncher() - if err := e.l.Run(e, ctx); err != nil { - _ = e.l.Shutdown(context.Background()) - return nil, err - } - - if err := e.l.Setup(); err != nil { - _ = e.l.Shutdown(context.Background()) - return nil, err - } - return e, nil -} - -func (t *testExecutor) init() { - t.writeOptAST = prepareOptions(writeOptSource) - t.readOptAST = prepareOptions(readOptSource) -} - -func (t *testExecutor) Close() error { - if t.l == nil { - return nil - } - - if err := t.l.Shutdown(context.Background()); err != nil { - return err - } - t.l = nil - - if t.Failed() { - _, _ = io.Copy(os.Stdout, &t.errOutput) - } - return nil -} - -func (t *testExecutor) Run(pkg *ast.Package, fn cmd.TestResultFunc) error { - l := t.l.Launcher - b := &influxdb.Bucket{ - OrgID: t.l.Org.ID, - Name: fmt.Sprintf("%04d", t.i), - } - t.i++ - - s := l.BucketService() - if err := s.CreateBucket(t.ctx, b); err != nil { - return err - } - defer func() { _ = s.DeleteBucket(t.ctx, b.ID) }() - - // Define bucket and org options - bucketOpt := &ast.OptionStatement{ - Assignment: &ast.VariableAssignment{ - ID: &ast.Identifier{Name: "bucket"}, - Init: &ast.StringLiteral{Value: b.Name}, - }, - } - orgOpt := &ast.OptionStatement{ - Assignment: &ast.VariableAssignment{ - ID: &ast.Identifier{Name: "org"}, - Init: &ast.StringLiteral{Value: t.l.Org.Name}, - }, - } - - // During the first execution, we are performing the writes - // that are in the testcase. - err := t.executeWithOptions(bucketOpt, orgOpt, t.writeOptAST, pkg, func(ctx context.Context, results flux.ResultIterator) error { - for results.More() { - res := results.Next() - if err := res.Tables().Do(func(table flux.Table) error { - table.Done() - return nil - }); err != nil { - return err - } - } - return nil - }) - if err != nil { - // Some test assertions can fail in the first pass, so those errors do not fail the test case. - // However those errors can be useful when the error is unexpected, therefore we simply log the error here. - fmt.Printf("Error from write pass: %s\n", err) - } - - // Execute the read pass. - err = t.executeWithOptions(bucketOpt, orgOpt, t.readOptAST, pkg, fn) - if flags.wait { - // TODO(nathanielc): When the executor is given access to the test name, - // make the configName a function of the test name. - configName := "flux-test" - err := createInfluxDBConfig(t.ctx, configName, t.l.URL().String(), t.l.Org.Name, t.l.Auth.Token) - if err != nil { - return err - } - fmt.Printf("Use: `influx -c %s` to connect to the running test instance.\n", configName) - fmt.Printf("Test bucket is: %q\n", b.Name) - fmt.Println("Waiting, press enter to continue.") - wait := make(chan struct{}) - go func() { - scanner := bufio.NewScanner(os.Stdin) - scanner.Scan() - close(wait) - }() - - // wait for input on stdin or context cancelled - select { - case <-wait: - case <-t.ctx.Done(): - } - err = rmInfluxDBConfig(t.ctx, configName) - if err != nil { - return err - } - } - return err -} - -func (t *testExecutor) executeWithOptions(bucketOpt, orgOpt *ast.OptionStatement, optionsAST *ast.File, pkg *ast.Package, fn cmd.TestResultFunc) error { - options := optionsAST.Copy().(*ast.File) - options.Body = append([]ast.Statement{bucketOpt, orgOpt}, options.Body...) - - // Add options to pkg - pkg = pkg.Copy().(*ast.Package) - pkg.Files = append([]*ast.File{options}, pkg.Files...) - - bs, err := json.Marshal(pkg) - if err != nil { - return err - } - - req := &query.Request{ - OrganizationID: t.l.Org.ID, - Compiler: lang.ASTCompiler{AST: bs}, - } - - r, err := t.l.FluxQueryService().Query(t.ctx, req) - if err != nil { - return err - } - defer r.Release() - - return fn(t.ctx, r) -} - -// This options definition puts to() in the path of the CSV input. The tests -// get run in this case and they would normally pass, if we checked the -// results, but don't look at them. -const writeOptSource = ` -import "testing" -import c "csv" - -option testing.load = (tables=<-) => { - return tables |> to(bucket: bucket, org: org) -} -` - -// This options definition is for the second run, the test run. It loads the -// data from previously written bucket. We check the results after running this -// second pass and report on them. -const readOptSource = ` -import "testing" -import c "csv" - -option testing.load = (tables=<-) => { - return from(bucket: bucket) -} -` - -func prepareOptions(optionsSource string) *ast.File { - pkg := parser.ParseSource(optionsSource) - if ast.Check(pkg) > 0 { - panic(ast.GetError(pkg)) - } - return pkg.Files[0] -} - -func (t *testExecutor) Logf(s string, i ...interface{}) { - _, _ = fmt.Fprintf(&t.errOutput, s, i...) - _, _ = fmt.Fprintln(&t.errOutput) -} - -func (t *testExecutor) Errorf(s string, i ...interface{}) { - t.Logf(s, i...) - t.Fail() -} - -func (t *testExecutor) Fail() { - t.failed = true -} - -func (t *testExecutor) Failed() bool { - return t.failed -} - -func (t *testExecutor) Name() string { - return "flux" -} - -func (t *testExecutor) FailNow() { - t.Fail() - panic(errors.New("abort")) -} - -func tryExec(cmd *cobra.Command) (err error) { - defer func() { - if e := recover(); e != nil { - var ok bool - err, ok = e.(error) - if !ok { - err = errors.New(fmt.Sprint(e)) - } - } - }() - err = cmd.Execute() - return -} - -func main() { - c := cmd.TestCommand(NewTestExecutor) - c.Use = "fluxtest-harness-influxdb" - c.Flags().BoolVarP(&flags.wait, "wait", "w", false, "Wait for a kill signal before shutting down the InfluxDB test instances.") - if err := tryExec(c); err != nil { - fmt.Println(err) - os.Exit(1) - } -} - -func createInfluxDBConfig(ctx context.Context, name, host, org, token string) error { - cmd := exec.CommandContext(ctx, "influx", "config", "create", "-n", name, "-u", host, "-o", org, "-t", token) - out, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("Failed to create influx config. Error: %s Output: %s", err, string(out)) - } - return nil -} -func rmInfluxDBConfig(ctx context.Context, name string) error { - cmd := exec.CommandContext(ctx, "influx", "config", "rm", name) - out, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("Failed to remove influx config. Error: %s Output: %s", err, string(out)) - } - return nil -} diff --git a/internal/cmd/kvmigrate/main.go b/internal/cmd/kvmigrate/main.go deleted file mode 100644 index 675e2b86282..00000000000 --- a/internal/cmd/kvmigrate/main.go +++ /dev/null @@ -1,32 +0,0 @@ -package main - -import ( - "fmt" - "os" - "strings" - - "github.com/influxdata/influxdb/v2/kv/migration" - "github.com/influxdata/influxdb/v2/kv/migration/all" -) - -var usageMsg = "Usage: kvmigrate create " - -func usage() { - fmt.Println(usageMsg) - os.Exit(1) -} - -func main() { - if len(os.Args) < 3 { - usage() - } - - if os.Args[1] != "create" { - fmt.Printf("unrecognized command %q\n", os.Args[1]) - usage() - } - - if err := migration.CreateNewMigration(all.Migrations[:], strings.Join(os.Args[2:], " ")); err != nil { - panic(err) - } -} diff --git a/internal/fs/influx_dir.go b/internal/fs/influx_dir.go deleted file mode 100644 index e225f9a7dba..00000000000 --- a/internal/fs/influx_dir.go +++ /dev/null @@ -1,65 +0,0 @@ -package fs - -import ( - "fmt" - "os" - "os/user" - "path/filepath" - "strings" -) - -// DefaultTokenFile is deprecated, and will be only used for migration. -const DefaultTokenFile = "credentials" - -// DefaultConfigsFile stores cli credentials and hosts. -const DefaultConfigsFile = "configs" - -// InfluxDir retrieves the influxdbv2 directory. -func InfluxDir() (string, error) { - var dir string - // By default, store meta and data files in current users home directory - u, err := user.Current() - if err == nil { - dir = u.HomeDir - } else if home := os.Getenv("HOME"); home != "" { - dir = home - } else { - wd, err := os.Getwd() - if err != nil { - return "", err - } - dir = wd - } - dir = filepath.Join(dir, ".influxdbv2") - - return dir, nil -} - -// BoltFile returns the path to the bolt file for influxdb -func BoltFile() (string, error) { - dir, err := InfluxDir() - if err != nil { - return "", err - } - var file string - filepath.WalkDir(dir, func(p string, info os.DirEntry, err error) error { - if err != nil { - return err - } - if file != "" { - return fmt.Errorf("bolt file found") - } - - if strings.Contains(p, ".bolt") { - file = p - } - - return nil - }) - - if file == "" { - return "", fmt.Errorf("bolt file not found") - } - - return file, nil -} diff --git a/internal/meta_client.go b/internal/meta_client.go deleted file mode 100644 index 6b7859b726e..00000000000 --- a/internal/meta_client.go +++ /dev/null @@ -1,179 +0,0 @@ -package internal - -import ( - "time" - - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/influxdata/influxql" -) - -// MetaClientMock is a mockable implementation of meta.MetaClient. -type MetaClientMock struct { - CloseFn func() error - CreateContinuousQueryFn func(database, name, query string) error - CreateDatabaseFn func(name string) (*meta.DatabaseInfo, error) - CreateDatabaseWithRetentionPolicyFn func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) - CreateRetentionPolicyFn func(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error) - CreateShardGroupFn func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) - CreateSubscriptionFn func(database, rp, name, mode string, destinations []string) error - CreateUserFn func(name, password string, admin bool) (meta.User, error) - - DatabaseFn func(name string) *meta.DatabaseInfo - DatabasesFn func() []meta.DatabaseInfo - - DataFn func() meta.Data - DeleteShardGroupFn func(database string, policy string, id uint64) error - DropContinuousQueryFn func(database, name string) error - DropDatabaseFn func(name string) error - DropRetentionPolicyFn func(database, name string) error - DropSubscriptionFn func(database, rp, name string) error - DropShardFn func(id uint64) error - DropUserFn func(name string) error - - OpenFn func() error - - PrecreateShardGroupsFn func(from, to time.Time) error - PruneShardGroupsFn func() error - - RetentionPolicyFn func(database, name string) (rpi *meta.RetentionPolicyInfo, err error) - - AuthenticateFn func(username, password string) (ui meta.User, err error) - AdminUserExistsFn func() bool - SetAdminPrivilegeFn func(username string, admin bool) error - SetDataFn func(*meta.Data) error - SetPrivilegeFn func(username, database string, p influxql.Privilege) error - ShardGroupsByTimeRangeFn func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) - ShardOwnerFn func(shardID uint64) (database, policy string, sgi *meta.ShardGroupInfo) - TruncateShardGroupsFn func(t time.Time) error - UpdateRetentionPolicyFn func(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error - UpdateUserFn func(name, password string) error - UserPrivilegeFn func(username, database string) (*influxql.Privilege, error) - UserPrivilegesFn func(username string) (map[string]influxql.Privilege, error) - UserFn func(username string) (meta.User, error) - UsersFn func() []meta.UserInfo -} - -func (c *MetaClientMock) Close() error { - return c.CloseFn() -} - -func (c *MetaClientMock) CreateContinuousQuery(database, name, query string) error { - return c.CreateContinuousQueryFn(database, name, query) -} - -func (c *MetaClientMock) CreateDatabase(name string) (*meta.DatabaseInfo, error) { - return c.CreateDatabaseFn(name) -} - -func (c *MetaClientMock) CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { - return c.CreateDatabaseWithRetentionPolicyFn(name, spec) -} - -func (c *MetaClientMock) CreateRetentionPolicy(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error) { - return c.CreateRetentionPolicyFn(database, spec, makeDefault) -} - -func (c *MetaClientMock) CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { - return c.CreateShardGroupFn(database, policy, timestamp) -} - -func (c *MetaClientMock) CreateSubscription(database, rp, name, mode string, destinations []string) error { - return c.CreateSubscriptionFn(database, rp, name, mode, destinations) -} - -func (c *MetaClientMock) CreateUser(name, password string, admin bool) (meta.User, error) { - return c.CreateUserFn(name, password, admin) -} - -func (c *MetaClientMock) Database(name string) *meta.DatabaseInfo { - return c.DatabaseFn(name) -} - -func (c *MetaClientMock) Databases() []meta.DatabaseInfo { - return c.DatabasesFn() -} - -func (c *MetaClientMock) DeleteShardGroup(database string, policy string, id uint64) error { - return c.DeleteShardGroupFn(database, policy, id) -} - -func (c *MetaClientMock) DropContinuousQuery(database, name string) error { - return c.DropContinuousQueryFn(database, name) -} - -func (c *MetaClientMock) DropDatabase(name string) error { - return c.DropDatabaseFn(name) -} - -func (c *MetaClientMock) DropRetentionPolicy(database, name string) error { - return c.DropRetentionPolicyFn(database, name) -} - -func (c *MetaClientMock) DropShard(id uint64) error { - return c.DropShardFn(id) -} - -func (c *MetaClientMock) DropSubscription(database, rp, name string) error { - return c.DropSubscriptionFn(database, rp, name) -} - -func (c *MetaClientMock) DropUser(name string) error { - return c.DropUserFn(name) -} - -func (c *MetaClientMock) RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) { - return c.RetentionPolicyFn(database, name) -} - -func (c *MetaClientMock) SetAdminPrivilege(username string, admin bool) error { - return c.SetAdminPrivilegeFn(username, admin) -} - -func (c *MetaClientMock) SetPrivilege(username, database string, p influxql.Privilege) error { - return c.SetPrivilegeFn(username, database, p) -} - -func (c *MetaClientMock) ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) { - return c.ShardGroupsByTimeRangeFn(database, policy, min, max) -} - -func (c *MetaClientMock) ShardOwner(shardID uint64) (database, policy string, sgi *meta.ShardGroupInfo) { - return c.ShardOwnerFn(shardID) -} - -func (c *MetaClientMock) TruncateShardGroups(t time.Time) error { - return c.TruncateShardGroupsFn(t) -} - -func (c *MetaClientMock) UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error { - return c.UpdateRetentionPolicyFn(database, name, rpu, makeDefault) -} - -func (c *MetaClientMock) UpdateUser(name, password string) error { - return c.UpdateUserFn(name, password) -} - -func (c *MetaClientMock) UserPrivilege(username, database string) (*influxql.Privilege, error) { - return c.UserPrivilegeFn(username, database) -} - -func (c *MetaClientMock) UserPrivileges(username string) (map[string]influxql.Privilege, error) { - return c.UserPrivilegesFn(username) -} - -func (c *MetaClientMock) Authenticate(username, password string) (meta.User, error) { - return c.AuthenticateFn(username, password) -} -func (c *MetaClientMock) AdminUserExists() bool { return c.AdminUserExistsFn() } - -func (c *MetaClientMock) User(username string) (meta.User, error) { return c.UserFn(username) } -func (c *MetaClientMock) Users() []meta.UserInfo { return c.UsersFn() } - -func (c *MetaClientMock) Open() error { return c.OpenFn() } -func (c *MetaClientMock) Data() meta.Data { return c.DataFn() } -func (c *MetaClientMock) SetData(d *meta.Data) error { return c.SetDataFn(d) } - -func (c *MetaClientMock) PrecreateShardGroups(from, to time.Time) error { - return c.PrecreateShardGroupsFn(from, to) -} -func (c *MetaClientMock) PruneShardGroups() error { return c.PruneShardGroupsFn() } diff --git a/internal/rand/locked_source.go b/internal/rand/locked_source.go deleted file mode 100644 index 1e667287974..00000000000 --- a/internal/rand/locked_source.go +++ /dev/null @@ -1,33 +0,0 @@ -package rand - -import ( - "math/rand" - "sync" -) - -// LockedSource is taken from the Go "math/rand" package. -// The default rand functions use a similar type under the hood, this does not introduce any additional -// locking than using the default functions. -type LockedSource struct { - lk sync.Mutex - src rand.Source -} - -func NewLockedSourceFromSeed(seed int64) *LockedSource { - return &LockedSource{ - src: rand.NewSource(seed), - } -} - -func (r *LockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *LockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} diff --git a/internal/resource/resolve.go b/internal/resource/resolve.go deleted file mode 100644 index 6331c87b066..00000000000 --- a/internal/resource/resolve.go +++ /dev/null @@ -1,331 +0,0 @@ -package resource - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -// Resolver is a type which combines multiple resource services -// in order to resolve the resources associated org ID. -// Ideally you do not need to use this type, it is mostly a stop-gap -// while we migrate responsibilities off of *kv.Service. -// Consider it deprecated. -type Resolver struct { - AuthorizationFinder interface { - FindAuthorizationByID(context.Context, platform.ID) (*influxdb.Authorization, error) - } - BucketFinder interface { - FindBucketByID(context.Context, platform.ID) (*influxdb.Bucket, error) - } - OrganizationFinder interface { - FindOrganizationByID(context.Context, platform.ID) (*influxdb.Organization, error) - } - DashboardFinder interface { - FindDashboardByID(context.Context, platform.ID) (*influxdb.Dashboard, error) - } - SourceFinder interface { - FindSourceByID(context.Context, platform.ID) (*influxdb.Source, error) - } - TaskFinder interface { - FindTaskByID(context.Context, platform.ID) (*taskmodel.Task, error) - } - TelegrafConfigFinder interface { - FindTelegrafConfigByID(context.Context, platform.ID) (*influxdb.TelegrafConfig, error) - } - VariableFinder interface { - FindVariableByID(context.Context, platform.ID) (*influxdb.Variable, error) - } - TargetFinder interface { - GetTargetByID(context.Context, platform.ID) (*influxdb.ScraperTarget, error) - } - CheckFinder interface { - FindCheckByID(context.Context, platform.ID) (influxdb.Check, error) - } - NotificationEndpointFinder interface { - FindNotificationEndpointByID(context.Context, platform.ID) (influxdb.NotificationEndpoint, error) - } - NotificationRuleFinder interface { - FindNotificationRuleByID(context.Context, platform.ID) (influxdb.NotificationRule, error) - } -} - -// FindResourceOrganizationID is used to find the organization that a resource belongs to five the id of a resource and a resource type. -func (o *Resolver) FindResourceOrganizationID(ctx context.Context, rt influxdb.ResourceType, id platform.ID) (platform.ID, error) { - switch rt { - case influxdb.AuthorizationsResourceType: - if o.AuthorizationFinder == nil { - break - } - - r, err := o.AuthorizationFinder.FindAuthorizationByID(ctx, id) - if err != nil { - return platform.InvalidID(), err - } - - return r.OrgID, nil - case influxdb.BucketsResourceType: - if o.BucketFinder == nil { - break - } - - r, err := o.BucketFinder.FindBucketByID(ctx, id) - if err != nil { - return platform.InvalidID(), err - } - - return r.OrgID, nil - case influxdb.OrgsResourceType: - if o.OrganizationFinder == nil { - break - } - - r, err := o.OrganizationFinder.FindOrganizationByID(ctx, id) - if err != nil { - return platform.InvalidID(), err - } - - return r.ID, nil - case influxdb.DashboardsResourceType: - if o.DashboardFinder == nil { - break - } - - r, err := o.DashboardFinder.FindDashboardByID(ctx, id) - if err != nil { - return platform.InvalidID(), err - } - - return r.OrganizationID, nil - case influxdb.SourcesResourceType: - if o.SourceFinder == nil { - break - } - - r, err := o.SourceFinder.FindSourceByID(ctx, id) - if err != nil { - return platform.InvalidID(), err - } - - return r.OrganizationID, nil - case influxdb.TasksResourceType: - if o.TaskFinder == nil { - break - } - - r, err := o.TaskFinder.FindTaskByID(ctx, id) - if err != nil { - return platform.InvalidID(), err - } - - return r.OrganizationID, nil - case influxdb.TelegrafsResourceType: - if o.TelegrafConfigFinder == nil { - break - } - - r, err := o.TelegrafConfigFinder.FindTelegrafConfigByID(ctx, id) - if err != nil { - return platform.InvalidID(), err - } - - return r.OrgID, nil - case influxdb.VariablesResourceType: - if o.VariableFinder == nil { - break - } - - r, err := o.VariableFinder.FindVariableByID(ctx, id) - if err != nil { - return platform.InvalidID(), err - } - - return r.OrganizationID, nil - case influxdb.ScraperResourceType: - if o.TargetFinder == nil { - break - } - - r, err := o.TargetFinder.GetTargetByID(ctx, id) - if err != nil { - return platform.InvalidID(), err - } - - return r.OrgID, nil - case influxdb.ChecksResourceType: - if o.CheckFinder == nil { - break - } - - r, err := o.CheckFinder.FindCheckByID(ctx, id) - if err != nil { - return platform.InvalidID(), err - } - - return r.GetOrgID(), nil - case influxdb.NotificationEndpointResourceType: - if o.NotificationEndpointFinder == nil { - break - } - - r, err := o.NotificationEndpointFinder.FindNotificationEndpointByID(ctx, id) - if err != nil { - return platform.InvalidID(), err - } - - return r.GetOrgID(), nil - case influxdb.NotificationRuleResourceType: - if o.NotificationRuleFinder == nil { - break - } - - r, err := o.NotificationRuleFinder.FindNotificationRuleByID(ctx, id) - if err != nil { - return platform.InvalidID(), err - } - - return r.GetOrgID(), nil - } - - return platform.InvalidID(), &errors.Error{ - Msg: fmt.Sprintf("unsupported resource type %s", rt), - } -} - -// FindResourceName is used to find the name of the resource associated with the provided type and id. -func (o *Resolver) FindResourceName(ctx context.Context, rt influxdb.ResourceType, id platform.ID) (string, error) { - switch rt { - case influxdb.AuthorizationsResourceType: - // keeping this consistent with the original kv implementation - return "", nil - case influxdb.BucketsResourceType: - if o.BucketFinder == nil { - break - } - - r, err := o.BucketFinder.FindBucketByID(ctx, id) - if err != nil { - return "", err - } - - return r.Name, nil - case influxdb.OrgsResourceType: - if o.OrganizationFinder == nil { - break - } - - r, err := o.OrganizationFinder.FindOrganizationByID(ctx, id) - if err != nil { - return "", err - } - - return r.Name, nil - case influxdb.DashboardsResourceType: - if o.DashboardFinder == nil { - break - } - - r, err := o.DashboardFinder.FindDashboardByID(ctx, id) - if err != nil { - return "", err - } - - return r.Name, nil - case influxdb.SourcesResourceType: - if o.SourceFinder == nil { - break - } - - r, err := o.SourceFinder.FindSourceByID(ctx, id) - if err != nil { - return "", err - } - - return r.Name, nil - case influxdb.TasksResourceType: - if o.TaskFinder == nil { - break - } - - r, err := o.TaskFinder.FindTaskByID(ctx, id) - if err != nil { - return "", err - } - - return r.Name, nil - case influxdb.TelegrafsResourceType: - if o.TelegrafConfigFinder == nil { - break - } - - r, err := o.TelegrafConfigFinder.FindTelegrafConfigByID(ctx, id) - if err != nil { - return "", err - } - - return r.Name, nil - case influxdb.VariablesResourceType: - if o.VariableFinder == nil { - break - } - - r, err := o.VariableFinder.FindVariableByID(ctx, id) - if err != nil { - return "", nil - } - - return r.Name, nil - case influxdb.ScraperResourceType: - if o.TargetFinder == nil { - break - } - - r, err := o.TargetFinder.GetTargetByID(ctx, id) - if err != nil { - return "", err - } - - return r.Name, nil - case influxdb.ChecksResourceType: - if o.CheckFinder == nil { - break - } - - r, err := o.CheckFinder.FindCheckByID(ctx, id) - if err != nil { - return "", err - } - - return r.GetName(), nil - case influxdb.NotificationEndpointResourceType: - if o.NotificationEndpointFinder == nil { - break - } - - r, err := o.NotificationEndpointFinder.FindNotificationEndpointByID(ctx, id) - if err != nil { - return "", err - } - - return r.GetName(), nil - case influxdb.NotificationRuleResourceType: - if o.NotificationRuleFinder == nil { - break - } - - r, err := o.NotificationRuleFinder.FindNotificationRuleByID(ctx, id) - if err != nil { - return "", err - } - - return r.GetName(), nil - } - - // default behaviour (in-line with original implementation) is to just return - // an empty name - return "", nil -} diff --git a/internal/shard/writer.go b/internal/shard/writer.go deleted file mode 100644 index a315f5757f6..00000000000 --- a/internal/shard/writer.go +++ /dev/null @@ -1,167 +0,0 @@ -package shard - -import ( - "fmt" - "os" - "path/filepath" - "strconv" - - "github.com/influxdata/influxdb/v2/pkg/data/gen" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "go.uber.org/multierr" -) - -const ( - maxTSMFileSize = uint32(2048 * 1024 * 1024) // 2GB -) - -type Writer struct { - tw tsm1.TSMWriter - id uint64 - path string - ext string - files []string - gen, seq int - err error - buf []byte - auto bool -} - -type option func(w *Writer) - -// Generation specifies the generation number of the tsm files. -func Generation(gen int) option { - return func(w *Writer) { - w.gen = gen - } -} - -// Sequence specifies the starting sequence number of the tsm files. -func Sequence(seq int) option { - return func(w *Writer) { - w.seq = seq - } -} - -// Temporary configures the writer to create tsm.tmp files. -func Temporary() option { - return func(w *Writer) { - w.ext = tsm1.TSMFileExtension + "." + tsm1.TmpTSMFileExtension - } -} - -// AutoNumber will read the existing TSM file names and use generation + 1 -func AutoNumber() option { - return func(w *Writer) { - w.auto = true - } -} - -func NewWriter(id uint64, path string, opts ...option) *Writer { - w := &Writer{id: id, path: path, gen: 1, seq: 1, ext: tsm1.TSMFileExtension} - - for _, opt := range opts { - opt(w) - } - - w.nextTSM() - - return w -} - -func (w *Writer) Write(key []byte, values tsm1.Values) { - if w.err != nil { - return - } - - if w.tw.Size() > maxTSMFileSize { - w.closeTSM() - w.nextTSM() - } - - if err := w.tw.Write(key, values); err != nil { - if err == tsm1.ErrMaxBlocksExceeded { - w.closeTSM() - w.nextTSM() - } else { - w.err = err - } - } -} - -func (w *Writer) WriteV(key []byte, values gen.Values) { - if w.err != nil { - return - } - - if w.tw.Size() > maxTSMFileSize { - w.closeTSM() - w.nextTSM() - } - - minT, maxT := values.MinTime(), values.MaxTime() - var err error - if w.buf, err = values.Encode(w.buf); err != nil { - w.err = err - return - } - - if err := w.tw.WriteBlock(key, minT, maxT, w.buf); err != nil { - if err == tsm1.ErrMaxBlocksExceeded { - w.closeTSM() - w.nextTSM() - } else { - w.err = err - } - } -} - -// Close closes the writer. -func (w *Writer) Close() { - if w.tw != nil { - w.closeTSM() - } -} - -// ShardID returns the shard number of the writer. -func (w *Writer) ShardID() uint64 { return w.id } - -func (w *Writer) Err() error { return w.err } - -// Files returns the full paths of all the files written by the Writer. -func (w *Writer) Files() []string { return w.files } - -func (w *Writer) nextTSM() { - fileName := filepath.Join(w.path, strconv.Itoa(int(w.id)), fmt.Sprintf("%09d-%09d.%s", w.gen, w.seq, w.ext)) - w.files = append(w.files, fileName) - w.seq++ - - fd, err := os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0666) - if err != nil { - w.err = err - return - } - - // Create the writer for the new TSM file. - w.tw, err = tsm1.NewTSMWriter(fd) - if err != nil { - w.err = err - return - } -} - -func (w *Writer) closeTSM() { - var err error - if e := w.tw.WriteIndex(); e != nil && e != tsm1.ErrNoValues { - err = multierr.Append(err, e) - } - - if e := w.tw.Close(); e != nil { - err = multierr.Append(err, e) - } - - if err != nil { - w.err = err - } - w.tw = nil -} diff --git a/internal/testutil/strings.go b/internal/testutil/strings.go deleted file mode 100644 index 01fbf8d8e94..00000000000 --- a/internal/testutil/strings.go +++ /dev/null @@ -1,105 +0,0 @@ -package testutil - -import ( - "math/rand" - "strings" -) - -// MakeSentence returns a string made up of n words. -// MakeSentence uses rand.Int31n and therefore calling rand.Seed will produce -// deterministic results. -func MakeSentence(n int) string { - s := make([]string, n) - for i := 0; i < n; i++ { - s[i] = words[rand.Int31n(int32(len(words)))] - } - return strings.Join(s, " ") -} - -var words = [...]string{ - "lorem", "ipsum", "dolor", "sit", "amet", "consectetuer", "adipiscing", "elit", "integer", "in", "mi", "a", "mauris", - "ornare", "sagittis", "suspendisse", "potenti", "suspendisse", "dapibus", "dignissim", "dolor", "nam", - "sapien", "tellus", "tempus", "et", "tempus", "ac", "tincidunt", "in", "arcu", "duis", "dictum", "proin", "magna", - "nulla", "pellentesque", "non", "commodo", "et", "iaculis", "sit", "amet", "mi", "mauris", "condimentum", "massa", - "ut", "metus", "donec", "viverra", "sapien", "mattis", "rutrum", "tristique", "lacus", "eros", "semper", "tellus", - "et", "molestie", "nisi", "sapien", "eu", "massa", "vestibulum", "ante", "ipsum", "primis", "in", "faucibus", "orci", - "luctus", "et", "ultrices", "posuere", "cubilia", "curae", "fusce", "erat", "tortor", "mollis", "ut", "accumsan", - "ut", "lacinia", "gravida", "libero", "curabitur", "massa", "felis", "accumsan", "feugiat", "convallis", "sit", - "amet", "porta", "vel", "neque", "duis", "et", "ligula", "non", "elit", "ultricies", "rutrum", "suspendisse", - "tempor", "quisque", "posuere", "malesuada", "velit", "sed", "pellentesque", "mi", "a", "purus", "integer", - "imperdiet", "orci", "a", "eleifend", "mollis", "velit", "nulla", "iaculis", "arcu", "eu", "rutrum", "magna", "quam", - "sed", "elit", "nullam", "egestas", "integer", "interdum", "purus", "nec", "mauris", "vestibulum", "ac", "mi", "in", - "nunc", "suscipit", "dapibus", "duis", "consectetuer", "ipsum", "et", "pharetra", "sollicitudin", "metus", - "turpis", "facilisis", "magna", "vitae", "dictum", "ligula", "nulla", "nec", "mi", "nunc", "ante", "urna", "gravida", - "sit", "amet", "congue", "et", "accumsan", "vitae", "magna", "praesent", "luctus", "nullam", "in", "velit", - "praesent", "est", "curabitur", "turpis", "class", "aptent", "taciti", "sociosqu", "ad", "litora", "torquent", - "per", "conubia", "nostra", "per", "inceptos", "hymenaeos", "cras", "consectetuer", "nibh", "in", "lacinia", - "ornare", "turpis", "sem", "tempor", "massa", "sagittis", "feugiat", "mauris", "nibh", "non", "tellus", - "phasellus", "mi", "fusce", "enim", "mauris", "ultrices", "turpis", "eu", "adipiscing", "viverra", "justo", - "libero", "ullamcorper", "massa", "id", "ultrices", "velit", "est", "quis", "tortor", "quisque", "condimentum", - "lacus", "volutpat", "nonummy", "accumsan", "est", "nunc", "imperdiet", "magna", "vulputate", "aliquet", "nisi", - "risus", "at", "est", "aliquam", "imperdiet", "gravida", "tortor", "praesent", "interdum", "accumsan", "ante", - "vivamus", "est", "ligula", "consequat", "sed", "pulvinar", "eu", "consequat", "vitae", "eros", "nulla", "elit", - "nunc", "congue", "eget", "scelerisque", "a", "tempor", "ac", "nisi", "morbi", "facilisis", "pellentesque", - "habitant", "morbi", "tristique", "senectus", "et", "netus", "et", "malesuada", "fames", "ac", "turpis", "egestas", - "in", "hac", "habitasse", "platea", "dictumst", "suspendisse", "vel", "lorem", "ut", "ligula", "tempor", - "consequat", "quisque", "consectetuer", "nisl", "eget", "elit", "proin", "quis", "mauris", "ac", "orci", - "accumsan", "suscipit", "sed", "ipsum", "sed", "vel", "libero", "nec", "elit", "feugiat", "blandit", "vestibulum", - "purus", "nulla", "accumsan", "et", "volutpat", "at", "pellentesque", "vel", "urna", "suspendisse", "nonummy", - "aliquam", "pulvinar", "libero", "donec", "vulputate", "orci", "ornare", "bibendum", "condimentum", "lorem", - "elit", "dignissim", "sapien", "ut", "aliquam", "nibh", "augue", "in", "turpis", "phasellus", "ac", "eros", - "praesent", "luctus", "lorem", "a", "mollis", "lacinia", "leo", "turpis", "commodo", "sem", "in", "lacinia", "mi", - "quam", "et", "quam", "curabitur", "a", "libero", "vel", "tellus", "mattis", "imperdiet", "in", "congue", "neque", "ut", - "scelerisque", "bibendum", "libero", "lacus", "ullamcorper", "sapien", "quis", "aliquet", "massa", "velit", - "vel", "orci", "fusce", "in", "nulla", "quis", "est", "cursus", "gravida", "in", "nibh", "lorem", "ipsum", "dolor", "sit", - "amet", "consectetuer", "adipiscing", "elit", "integer", "fermentum", "pretium", "massa", "morbi", "feugiat", - "iaculis", "nunc", "aenean", "aliquam", "pretium", "orci", "cum", "sociis", "natoque", "penatibus", "et", "magnis", - "dis", "parturient", "montes", "nascetur", "ridiculus", "mus", "vivamus", "quis", "tellus", "vel", "quam", - "varius", "bibendum", "fusce", "est", "metus", "feugiat", "at", "porttitor", "et", "cursus", "quis", "pede", "nam", "ut", - "augue", "nulla", "posuere", "phasellus", "at", "dolor", "a", "enim", "cursus", "vestibulum", "duis", "id", "nisi", - "duis", "semper", "tellus", "ac", "nulla", "vestibulum", "scelerisque", "lobortis", "dolor", "aenean", "a", - "felis", "aliquam", "erat", "volutpat", "donec", "a", "magna", "vitae", "pede", "sagittis", "lacinia", "cras", - "vestibulum", "diam", "ut", "arcu", "mauris", "a", "nunc", "duis", "sollicitudin", "erat", "sit", "amet", "turpis", - "proin", "at", "libero", "eu", "diam", "lobortis", "fermentum", "nunc", "lorem", "turpis", "imperdiet", "id", - "gravida", "eget", "aliquet", "sed", "purus", "ut", "vehicula", "laoreet", "ante", "mauris", "eu", "nunc", "sed", "sit", - "amet", "elit", "nec", "ipsum", "aliquam", "egestas", "donec", "non", "nibh", "cras", "sodales", "pretium", "massa", - "praesent", "hendrerit", "est", "et", "risus", "vivamus", "eget", "pede", "curabitur", "tristique", - "scelerisque", "dui", "nullam", "ullamcorper", "vivamus", "venenatis", "velit", "eget", "enim", "nunc", "eu", - "nunc", "eget", "felis", "malesuada", "fermentum", "quisque", "magna", "mauris", "ligula", "felis", "luctus", "a", - "aliquet", "nec", "vulputate", "eget", "magna", "quisque", "placerat", "diam", "sed", "arcu", "praesent", - "sollicitudin", "aliquam", "non", "sapien", "quisque", "id", "augue", "class", "aptent", "taciti", "sociosqu", - "ad", "litora", "torquent", "per", "conubia", "nostra", "per", "inceptos", "hymenaeos", "etiam", "lacus", "lectus", - "mollis", "quis", "mattis", "nec", "commodo", "facilisis", "nibh", "sed", "sodales", "sapien", "ac", "ante", "duis", - "eget", "lectus", "in", "nibh", "lacinia", "auctor", "fusce", "interdum", "lectus", "non", "dui", "integer", - "accumsan", "quisque", "quam", "curabitur", "scelerisque", "imperdiet", "nisl", "suspendisse", "potenti", - "nam", "massa", "leo", "iaculis", "sed", "accumsan", "id", "ultrices", "nec", "velit", "suspendisse", "potenti", - "mauris", "bibendum", "turpis", "ac", "viverra", "sollicitudin", "metus", "massa", "interdum", "orci", "non", - "imperdiet", "orci", "ante", "at", "ipsum", "etiam", "eget", "magna", "mauris", "at", "tortor", "eu", "lectus", - "tempor", "tincidunt", "phasellus", "justo", "purus", "pharetra", "ut", "ultricies", "nec", "consequat", "vel", - "nisi", "fusce", "vitae", "velit", "at", "libero", "sollicitudin", "sodales", "aenean", "mi", "libero", "ultrices", - "id", "suscipit", "vitae", "dapibus", "eu", "metus", "aenean", "vestibulum", "nibh", "ac", "massa", "vivamus", - "vestibulum", "libero", "vitae", "purus", "in", "hac", "habitasse", "platea", "dictumst", "curabitur", - "blandit", "nunc", "non", "arcu", "ut", "nec", "nibh", "morbi", "quis", "leo", "vel", "magna", "commodo", "rhoncus", - "donec", "congue", "leo", "eu", "lacus", "pellentesque", "at", "erat", "id", "mi", "consequat", "congue", "praesent", - "a", "nisl", "ut", "diam", "interdum", "molestie", "fusce", "suscipit", "rhoncus", "sem", "donec", "pretium", - "aliquam", "molestie", "vivamus", "et", "justo", "at", "augue", "aliquet", "dapibus", "pellentesque", "felis", - "morbi", "semper", "in", "venenatis", "imperdiet", "neque", "donec", "auctor", "molestie", "augue", "nulla", "id", - "arcu", "sit", "amet", "dui", "lacinia", "convallis", "proin", "tincidunt", "proin", "a", "ante", "nunc", "imperdiet", - "augue", "nullam", "sit", "amet", "arcu", "quisque", "laoreet", "viverra", "felis", "lorem", "ipsum", "dolor", "sit", - "amet", "consectetuer", "adipiscing", "elit", "in", "hac", "habitasse", "platea", "dictumst", "pellentesque", - "habitant", "morbi", "tristique", "senectus", "et", "netus", "et", "malesuada", "fames", "ac", "turpis", "egestas", - "class", "aptent", "taciti", "sociosqu", "ad", "litora", "torquent", "per", "conubia", "nostra", "per", "inceptos", - "hymenaeos", "nullam", "nibh", "sapien", "volutpat", "ut", "placerat", "quis", "ornare", "at", "lorem", "class", - "aptent", "taciti", "sociosqu", "ad", "litora", "torquent", "per", "conubia", "nostra", "per", "inceptos", - "hymenaeos", "morbi", "dictum", "massa", "id", "libero", "ut", "neque", "phasellus", "tincidunt", "nibh", "ut", - "tincidunt", "lacinia", "lacus", "nulla", "aliquam", "mi", "a", "interdum", "dui", "augue", "non", "pede", "duis", - "nunc", "magna", "vulputate", "a", "porta", "at", "tincidunt", "a", "nulla", "praesent", "facilisis", - "suspendisse", "sodales", "feugiat", "purus", "cras", "et", "justo", "a", "mauris", "mollis", "imperdiet", "morbi", - "erat", "mi", "ultrices", "eget", "aliquam", "elementum", "iaculis", "id", "velit", "in", "scelerisque", "enim", - "sit", "amet", "turpis", "sed", "aliquam", "odio", "nonummy", "ullamcorper", "mollis", "lacus", "nibh", "tempor", - "dolor", "sit", "amet", "varius", "sem", "neque", "ac", "dui", "nunc", "et", "est", "eu", "massa", "eleifend", "mollis", - "mauris", "aliquet", "orci", "quis", "tellus", "ut", "mattis", "praesent", "mollis", "consectetuer", "quam", - "nulla", "nulla", "nunc", "accumsan", "nunc", "sit", "amet", "scelerisque", "porttitor", "nibh", "pede", "lacinia", - "justo", "tristique", "mattis", "purus", "eros", "non", "velit", "aenean", "sagittis", "commodo", "erat", - "aliquam", "id", "lacus", "morbi", "vulputate", "vestibulum", "elit", -} diff --git a/internal/testutil/unzip.go b/internal/testutil/unzip.go deleted file mode 100644 index b60ef672cc5..00000000000 --- a/internal/testutil/unzip.go +++ /dev/null @@ -1,66 +0,0 @@ -package testutil - -import ( - "archive/zip" - "fmt" - "io" - "os" - "path/filepath" - "strings" -) - -// Unzip will extract a zip archive into dest -func Unzip(src string, dest string) error { - r, err := zip.OpenReader(src) - if err != nil { - return err - } - defer r.Close() - - for _, f := range r.File { - - // Store filename/path for returning and using later on - fpath := filepath.Join(dest, f.Name) - - // Check for ZipSlip. More Info: http://bit.ly/2MsjAWE - if !strings.HasPrefix(fpath, filepath.Clean(dest)+string(os.PathSeparator)) { - return fmt.Errorf("%s: illegal file path", fpath) - } - - if f.FileInfo().IsDir() { - // Make Folder - if err := os.MkdirAll(fpath, os.ModePerm); err != nil { - return err - } - continue - } - - if err = os.MkdirAll(filepath.Dir(fpath), os.ModePerm); err != nil { - return err - } - - outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) - if err != nil { - return err - } - - rc, err := f.Open() - if err != nil { - return err - } - - _, err = io.Copy(outFile, rc) - if err != nil { - return err - } - - if err := outFile.Close(); err != nil { - return err - } - - if err := rc.Close(); err != nil { - return err - } - } - return nil -} diff --git a/internal/tsdb_store.go b/internal/tsdb_store.go deleted file mode 100644 index 8e2f13d8b4f..00000000000 --- a/internal/tsdb_store.go +++ /dev/null @@ -1,144 +0,0 @@ -package internal - -import ( - "context" - "io" - "time" - - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxql" - "go.uber.org/zap" -) - -// TSDBStoreMock is a mockable implementation of tsdb.Store. -type TSDBStoreMock struct { - BackupShardFn func(id uint64, since time.Time, w io.Writer) error - BackupSeriesFileFn func(database string, w io.Writer) error - ExportShardFn func(id uint64, ExportStart time.Time, ExportEnd time.Time, w io.Writer) error - CloseFn func() error - CreateShardFn func(database, policy string, shardID uint64, enabled bool) error - CreateShardSnapshotFn func(id uint64) (string, error) - DatabasesFn func() []string - DeleteDatabaseFn func(name string) error - DeleteMeasurementFn func(ctx context.Context, database, name string) error - DeleteRetentionPolicyFn func(database, name string) error - DeleteSeriesFn func(ctx context.Context, database string, sources []influxql.Source, condition influxql.Expr) error - DeleteShardFn func(id uint64) error - DiskSizeFn func() (int64, error) - ExpandSourcesFn func(sources influxql.Sources) (influxql.Sources, error) - ImportShardFn func(id uint64, r io.Reader) error - MeasurementsCardinalityFn func(database string) (int64, error) - MeasurementNamesFn func(ctx context.Context, auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error) - OpenFn func() error - PathFn func() string - RestoreShardFn func(id uint64, r io.Reader) error - SeriesCardinalityFn func(database string) (int64, error) - SetShardEnabledFn func(shardID uint64, enabled bool) error - ShardFn func(id uint64) *tsdb.Shard - ShardGroupFn func(ids []uint64) tsdb.ShardGroup - ShardIDsFn func() []uint64 - ShardNFn func() int - ShardRelativePathFn func(id uint64) (string, error) - ShardsFn func(ids []uint64) []*tsdb.Shard - TagKeysFn func(ctx context.Context, auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagKeys, error) - TagValuesFn func(ctx context.Context, auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagValues, error) - WithLoggerFn func(log *zap.Logger) - WriteToShardFn func(shardID uint64, points []models.Point) error -} - -func (s *TSDBStoreMock) BackupShard(id uint64, since time.Time, w io.Writer) error { - return s.BackupShardFn(id, since, w) -} -func (s *TSDBStoreMock) BackupSeriesFile(database string, w io.Writer) error { - return s.BackupSeriesFileFn(database, w) -} -func (s *TSDBStoreMock) ExportShard(id uint64, ExportStart time.Time, ExportEnd time.Time, w io.Writer) error { - return s.ExportShardFn(id, ExportStart, ExportEnd, w) -} -func (s *TSDBStoreMock) Close() error { return s.CloseFn() } -func (s *TSDBStoreMock) CreateShard(database string, retentionPolicy string, shardID uint64, enabled bool) error { - return s.CreateShardFn(database, retentionPolicy, shardID, enabled) -} -func (s *TSDBStoreMock) CreateShardSnapshot(id uint64) (string, error) { - return s.CreateShardSnapshotFn(id) -} -func (s *TSDBStoreMock) Databases() []string { - return s.DatabasesFn() -} -func (s *TSDBStoreMock) DeleteDatabase(name string) error { - return s.DeleteDatabaseFn(name) -} -func (s *TSDBStoreMock) DeleteMeasurement(ctx context.Context, database string, name string) error { - return s.DeleteMeasurementFn(ctx, database, name) -} -func (s *TSDBStoreMock) DeleteRetentionPolicy(database string, name string) error { - return s.DeleteRetentionPolicyFn(database, name) -} -func (s *TSDBStoreMock) DeleteSeries(ctx context.Context, database string, sources []influxql.Source, condition influxql.Expr) error { - return s.DeleteSeriesFn(ctx, database, sources, condition) -} -func (s *TSDBStoreMock) DeleteShard(shardID uint64) error { - return s.DeleteShardFn(shardID) -} -func (s *TSDBStoreMock) DiskSize() (int64, error) { - return s.DiskSizeFn() -} -func (s *TSDBStoreMock) ExpandSources(sources influxql.Sources) (influxql.Sources, error) { - return s.ExpandSourcesFn(sources) -} -func (s *TSDBStoreMock) ImportShard(id uint64, r io.Reader) error { - return s.ImportShardFn(id, r) -} -func (s *TSDBStoreMock) MeasurementNames(ctx context.Context, auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error) { - return s.MeasurementNamesFn(ctx, auth, database, cond) -} -func (s *TSDBStoreMock) MeasurementsCardinality(database string) (int64, error) { - return s.MeasurementsCardinalityFn(database) -} -func (s *TSDBStoreMock) Open() error { - return s.OpenFn() -} -func (s *TSDBStoreMock) Path() string { - return s.PathFn() -} -func (s *TSDBStoreMock) RestoreShard(id uint64, r io.Reader) error { - return s.RestoreShardFn(id, r) -} -func (s *TSDBStoreMock) SeriesCardinality(database string) (int64, error) { - return s.SeriesCardinalityFn(database) -} -func (s *TSDBStoreMock) SetShardEnabled(shardID uint64, enabled bool) error { - return s.SetShardEnabledFn(shardID, enabled) -} -func (s *TSDBStoreMock) Shard(id uint64) *tsdb.Shard { - return s.ShardFn(id) -} -func (s *TSDBStoreMock) ShardGroup(ids []uint64) tsdb.ShardGroup { - return s.ShardGroupFn(ids) -} -func (s *TSDBStoreMock) ShardIDs() []uint64 { - return s.ShardIDsFn() -} -func (s *TSDBStoreMock) ShardN() int { - return s.ShardNFn() -} -func (s *TSDBStoreMock) ShardRelativePath(id uint64) (string, error) { - return s.ShardRelativePathFn(id) -} -func (s *TSDBStoreMock) Shards(ids []uint64) []*tsdb.Shard { - return s.ShardsFn(ids) -} -func (s *TSDBStoreMock) TagKeys(ctx context.Context, auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagKeys, error) { - return s.TagKeysFn(ctx, auth, shardIDs, cond) -} -func (s *TSDBStoreMock) TagValues(ctx context.Context, auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagValues, error) { - return s.TagValuesFn(ctx, auth, shardIDs, cond) -} -func (s *TSDBStoreMock) WithLogger(log *zap.Logger) { - s.WithLoggerFn(log) -} -func (s *TSDBStoreMock) WriteToShard(shardID uint64, points []models.Point) error { - return s.WriteToShardFn(shardID, points) -} diff --git a/jsonweb/fuzz.go b/jsonweb/fuzz.go deleted file mode 100644 index 352cbeef03b..00000000000 --- a/jsonweb/fuzz.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build gofuzz - -package jsonweb - -// TODO(DStrand1): Convert this to Go 1.18's new Fuzzing with testing.F - -// FuzzJsonWeb is the entry point for fuzzing when built with go-fuzz-build. -func FuzzJsonWeb(data []byte) int { - var keyStore = KeyStoreFunc(func(kid string) ([]byte, error) { - if kid != "some-key" { - return nil, ErrKeyNotFound - } - - return []byte("correct-key"), nil - }) - - parser := NewTokenParser(keyStore) - if _, err := parser.Parse(string(data)); err != nil { - // An error here means this input is not interesting - // to the fuzzer. - return 0 - } - // The input valid, and the fuzzer should increase priority - // along these lines. - return 1 -} diff --git a/jsonweb/token.go b/jsonweb/token.go deleted file mode 100644 index 7a197f41cab..00000000000 --- a/jsonweb/token.go +++ /dev/null @@ -1,136 +0,0 @@ -package jsonweb - -import ( - "errors" - - "github.com/golang-jwt/jwt" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -const kind = "jwt" - -var ( - // ErrKeyNotFound should be returned by a KeyStore when - // a key cannot be located for the provided key ID - ErrKeyNotFound = errors.New("key not found") - - // EmptyKeyStore is a KeyStore implementation which contains no keys - EmptyKeyStore = KeyStoreFunc(func(string) ([]byte, error) { - return nil, ErrKeyNotFound - }) -) - -// KeyStore is a type which holds a set of keys accessed -// via an id -type KeyStore interface { - Key(string) ([]byte, error) -} - -// KeyStoreFunc is a function which can be used as a KeyStore -type KeyStoreFunc func(string) ([]byte, error) - -// Key delegates to the receiver KeyStoreFunc -func (k KeyStoreFunc) Key(v string) ([]byte, error) { return k(v) } - -// TokenParser is a type which can parse and validate tokens -type TokenParser struct { - keyStore KeyStore - parser *jwt.Parser -} - -// NewTokenParser returns a configured token parser used to -// parse Token types from strings -func NewTokenParser(keyStore KeyStore) *TokenParser { - return &TokenParser{ - keyStore: keyStore, - parser: &jwt.Parser{ - ValidMethods: []string{jwt.SigningMethodHS256.Alg()}, - }, - } -} - -// Parse takes a string then parses and validates it as a jwt based on -// the key described within the token -func (t *TokenParser) Parse(v string) (*Token, error) { - jwt, err := t.parser.ParseWithClaims(v, &Token{}, func(jwt *jwt.Token) (interface{}, error) { - token, ok := jwt.Claims.(*Token) - if !ok { - return nil, errors.New("missing kid in token claims") - } - - // fetch key for "kid" from key store - return t.keyStore.Key(token.KeyID) - }) - - if err != nil { - return nil, err - } - - token, ok := jwt.Claims.(*Token) - if !ok { - return nil, errors.New("token is unexpected type") - } - - return token, nil -} - -// IsMalformedError returns true if the error returned represents -// a jwt malformed token error -func IsMalformedError(err error) bool { - verr, ok := err.(*jwt.ValidationError) - return ok && verr.Errors&jwt.ValidationErrorMalformed > 0 -} - -// Token is a structure which is serialized as a json web token -// It contains the necessary claims required to authorize -type Token struct { - jwt.StandardClaims - // KeyID is the identifier of the key used to sign the token - KeyID string `json:"kid"` - // Permissions is the set of authorized permissions for the token - Permissions []influxdb.Permission `json:"permissions"` - // UserID for the token - UserID string `json:"uid,omitempty"` -} - -// PermissionSet returns the set of permissions associated with the token. -func (t *Token) PermissionSet() (influxdb.PermissionSet, error) { - return t.Permissions, nil -} - -// Identifier returns the identifier for this Token -// as found in the standard claims -func (t *Token) Identifier() platform.ID { - id, err := platform.IDFromString(t.Id) - if err != nil || id == nil { - return platform.ID(1) - } - - return *id -} - -// GetUserID returns an invalid id as tokens are generated -// with permissions rather than for or by a particular user -func (t *Token) GetUserID() platform.ID { - id, err := platform.IDFromString(t.UserID) - if err != nil { - return platform.InvalidID() - } - return *id -} - -// Kind returns the string "jwt" which is used for auditing -func (t *Token) Kind() string { - return kind -} - -// EphemeralAuth creates a influxdb Auth form a jwt token -func (t *Token) EphemeralAuth(orgID platform.ID) *influxdb.Authorization { - return &influxdb.Authorization{ - ID: t.Identifier(), - OrgID: orgID, - Status: influxdb.Active, - Permissions: t.Permissions, - } -} diff --git a/jsonweb/token_test.go b/jsonweb/token_test.go deleted file mode 100644 index 77a79678e04..00000000000 --- a/jsonweb/token_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package jsonweb - -import ( - "reflect" - "testing" - - "github.com/golang-jwt/jwt" - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var ( - one = platform.ID(1) - two = platform.ID(2) - keyStore = KeyStoreFunc(func(kid string) ([]byte, error) { - if kid != "some-key" { - return nil, ErrKeyNotFound - } - - return []byte("correct-key"), nil - }) -) - -func Test_TokenParser(t *testing.T) { - for _, test := range []struct { - name string - keyStore KeyStore - input string - // expectations - token *Token - err error - }{ - { - name: "happy path", - input: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJjbG91ZDIuaW5mbHV4ZGF0YS5jb20iLCJhdWQiOiJnYXRld2F5LmluZmx1eGRhdGEuY29tIiwiaWF0IjoxNTY4NjI4OTgwLCJraWQiOiJzb21lLWtleSIsInBlcm1pc3Npb25zIjpbeyJhY3Rpb24iOiJ3cml0ZSIsInJlc291cmNlIjp7InR5cGUiOiJidWNrZXRzIiwiaWQiOiIwMDAwMDAwMDAwMDAwMDAxIiwib3JnSUQiOiIwMDAwMDAwMDAwMDAwMDAyIn19XX0.74vjbExiOd702VSIMmQWaDT_GFvUI0-_P-SfQ_OOHB0", - token: &Token{ - StandardClaims: jwt.StandardClaims{ - Issuer: "cloud2.influxdata.com", - Audience: "gateway.influxdata.com", - IssuedAt: 1568628980, - }, - KeyID: "some-key", - Permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: &one, - OrgID: &two, - }, - }, - }, - }, - }, - { - name: "key not found", - input: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJjbG91ZDIuaW5mbHV4ZGF0YS5jb20iLCJhdWQiOiJnYXRld2F5LmluZmx1eGRhdGEuY29tIiwiaWF0IjoxNTY4NjMxMTQ0LCJraWQiOiJzb21lLW90aGVyLWtleSIsInBlcm1pc3Npb25zIjpbeyJhY3Rpb24iOiJyZWFkIiwicmVzb3VyY2UiOnsidHlwZSI6InRhc2tzIiwiaWQiOiIwMDAwMDAwMDAwMDAwMDAzIiwib3JnSUQiOiIwMDAwMDAwMDAwMDAwMDA0In19XX0.QVXJ3kGP1gsxisNZe7QmphXox-vjZr6MAMbd00CQlfA", - err: &jwt.ValidationError{ - Inner: ErrKeyNotFound, - Errors: jwt.ValidationErrorUnverifiable, - }, - }, - { - name: "invalid signature", - input: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJjbG91ZDIuaW5mbHV4ZGF0YS5jb20iLCJhdWQiOiJnYXRld2F5LmluZmx1eGRhdGEuY29tIiwiaWF0IjoxNTY4NjMxMTQ0LCJraWQiOiJzb21lLWtleSIsInBlcm1pc3Npb25zIjpbeyJhY3Rpb24iOiJyZWFkIiwicmVzb3VyY2UiOnsidHlwZSI6InRhc2tzIiwiaWQiOiIwMDAwMDAwMDAwMDAwMDAzIiwib3JnSUQiOiIwMDAwMDAwMDAwMDAwMDA0In19XX0.RwmNs5u6NnjNq9xTdAIERFrI5ow-6lJpND3jRrTwkaE", - err: &jwt.ValidationError{ - Inner: jwt.ErrSignatureInvalid, - Errors: jwt.ValidationErrorSignatureInvalid, - }, - }, - } { - t.Run(test.name, func(t *testing.T) { - parser := NewTokenParser(keyStore) - - token, err := parser.Parse(test.input) - if !reflect.DeepEqual(test.err, err) { - t.Errorf("expected %[1]s (%#[1]v), got %[2]s (%#[2]v)", test.err, err) - } - - if diff := cmp.Diff(test.token, token); diff != "" { - t.Errorf("unexpected token:\n%s", diff) - } - - // if err is nil then token should be present - if err == nil { - // ensure this does not panic - token.Identifier() - } - }) - } -} diff --git a/keyvalue_log.go b/keyvalue_log.go deleted file mode 100644 index 7e59e8a4bc2..00000000000 --- a/keyvalue_log.go +++ /dev/null @@ -1,27 +0,0 @@ -package influxdb - -import ( - "context" - "time" -) - -// KeyValueLog is a generic type logs key-value pairs. This interface is intended to be used to construct other -// higher-level log-like resources such as an oplog or audit log. -// -// The idea is to create a log who values can be accessed at the key k: -// k -> [(v0,t0) (v1,t1) ... (vn,tn)] -// -// Logs may be retrieved in ascending or descending time order and support limits and offsets. -type KeyValueLog interface { - // AddLogEntry adds an entry (v,t) to the log defined for the key k. - AddLogEntry(ctx context.Context, k []byte, v []byte, t time.Time) error - - // ForEachLogEntry iterates through all the log entries at key k and applies the function fn for each record. - ForEachLogEntry(ctx context.Context, k []byte, opts FindOptions, fn func(v []byte, t time.Time) error) error - - // FirstLogEntry is used to retrieve the first entry in the log at key k. - FirstLogEntry(ctx context.Context, k []byte) ([]byte, time.Time, error) - - // LastLogEntry is used to retrieve the last entry in the log at key k. - LastLogEntry(ctx context.Context, k []byte) ([]byte, time.Time, error) -} diff --git a/kit/check/check.go b/kit/check/check.go deleted file mode 100644 index 0e75c5f0ada..00000000000 --- a/kit/check/check.go +++ /dev/null @@ -1,238 +0,0 @@ -// Package check standardizes /health and /ready endpoints. -// This allows you to easily know when your server is ready and healthy. -package check - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "sort" - "sync" -) - -// Status string to indicate the overall status of the check. -type Status string - -const ( - // StatusFail indicates a specific check has failed. - StatusFail Status = "fail" - // StatusPass indicates a specific check has passed. - StatusPass Status = "pass" - - // DefaultCheckName is the name of the default checker. - DefaultCheckName = "internal" -) - -// Check wraps a map of service names to status checkers. -type Check struct { - healthChecks []Checker - readyChecks []Checker - healthOverride override - readyOverride override - - passthroughHandler http.Handler -} - -// Checker indicates a service whose health can be checked. -type Checker interface { - Check(ctx context.Context) Response -} - -// NewCheck returns a Health with a default checker. -func NewCheck() *Check { - ch := &Check{} - ch.healthOverride.disable() - ch.readyOverride.disable() - return ch -} - -// AddHealthCheck adds the check to the list of ready checks. -// If c is a NamedChecker, the name will be added. -func (c *Check) AddHealthCheck(check Checker) { - if nc, ok := check.(NamedChecker); ok { - c.healthChecks = append(c.healthChecks, Named(nc.CheckName(), nc)) - } else { - c.healthChecks = append(c.healthChecks, check) - } -} - -// AddReadyCheck adds the check to the list of ready checks. -// If c is a NamedChecker, the name will be added. -func (c *Check) AddReadyCheck(check Checker) { - if nc, ok := check.(NamedChecker); ok { - c.readyChecks = append(c.readyChecks, Named(nc.CheckName(), nc)) - } else { - c.readyChecks = append(c.readyChecks, check) - } -} - -// CheckHealth evaluates c's set of health checks and returns a populated Response. -func (c *Check) CheckHealth(ctx context.Context) Response { - response := Response{ - Name: "Health", - Status: StatusPass, - Checks: make(Responses, len(c.healthChecks)), - } - - status, overriding := c.healthOverride.get() - if overriding { - response.Status = status - overrideResponse := Response{ - Name: "manual-override", - Message: "health manually overridden", - } - response.Checks = append(response.Checks, overrideResponse) - } - for i, ch := range c.healthChecks { - resp := ch.Check(ctx) - if resp.Status != StatusPass && !overriding { - response.Status = resp.Status - } - response.Checks[i] = resp - } - sort.Sort(response.Checks) - return response -} - -// CheckReady evaluates c's set of ready checks and returns a populated Response. -func (c *Check) CheckReady(ctx context.Context) Response { - response := Response{ - Name: "Ready", - Status: StatusPass, - Checks: make(Responses, len(c.readyChecks)), - } - - status, overriding := c.readyOverride.get() - if overriding { - response.Status = status - overrideResponse := Response{ - Name: "manual-override", - Message: "ready manually overridden", - } - response.Checks = append(response.Checks, overrideResponse) - } - for i, c := range c.readyChecks { - resp := c.Check(ctx) - if resp.Status != StatusPass && !overriding { - response.Status = resp.Status - } - response.Checks[i] = resp - } - sort.Sort(response.Checks) - return response -} - -// SetPassthrough allows you to set a handler to use if the request is not a ready or health check. -// This can be useful if you intend to use this as a middleware. -func (c *Check) SetPassthrough(h http.Handler) { - c.passthroughHandler = h -} - -// ServeHTTP serves /ready and /health requests with the respective checks. -func (c *Check) ServeHTTP(w http.ResponseWriter, r *http.Request) { - const ( - pathReady = "/ready" - pathHealth = "/health" - queryForce = "force" - ) - - path := r.URL.Path - - // Allow requests not intended for checks to pass through. - if path != pathReady && path != pathHealth { - if c.passthroughHandler != nil { - c.passthroughHandler.ServeHTTP(w, r) - return - } - - // We can't handle this request. - w.WriteHeader(http.StatusNotFound) - return - } - - ctx := r.Context() - query := r.URL.Query() - - switch path { - case pathReady: - switch query.Get(queryForce) { - case "true": - switch query.Get("ready") { - case "true": - c.readyOverride.enable(StatusPass) - case "false": - c.readyOverride.enable(StatusFail) - } - case "false": - c.readyOverride.disable() - } - writeResponse(w, c.CheckReady(ctx)) - case pathHealth: - switch query.Get(queryForce) { - case "true": - switch query.Get("healthy") { - case "true": - c.healthOverride.enable(StatusPass) - case "false": - c.healthOverride.enable(StatusFail) - } - case "false": - c.healthOverride.disable() - } - writeResponse(w, c.CheckHealth(ctx)) - } -} - -// writeResponse writes a Response to the wire as JSON. The HTTP status code -// accompanying the payload is the primary means for signaling the status of the -// checks. The possible status codes are: -// -// - 200 OK: All checks pass. -// - 503 Service Unavailable: Some checks are failing. -// - 500 Internal Server Error: There was a problem serializing the Response. -func writeResponse(w http.ResponseWriter, resp Response) { - status := http.StatusOK - if resp.Status == StatusFail { - status = http.StatusServiceUnavailable - } - - msg, err := json.MarshalIndent(resp, "", " ") - if err != nil { - msg = []byte(`{"message": "error marshaling response", "status": "fail"}`) - status = http.StatusInternalServerError - } - w.WriteHeader(status) - fmt.Fprintln(w, string(msg)) -} - -// override is a manual override for an entire group of checks. -type override struct { - mtx sync.Mutex - status Status - active bool -} - -// get returns the Status of an override as well as whether or not an override -// is currently active. -func (m *override) get() (Status, bool) { - m.mtx.Lock() - defer m.mtx.Unlock() - return m.status, m.active -} - -// disable disables the override. -func (m *override) disable() { - m.mtx.Lock() - m.active = false - m.status = StatusFail - m.mtx.Unlock() -} - -// enable turns on the override and establishes a specific Status for which to. -func (m *override) enable(s Status) { - m.mtx.Lock() - m.active = true - m.status = s - m.mtx.Unlock() -} diff --git a/kit/check/check_test.go b/kit/check/check_test.go deleted file mode 100644 index 90b15073df6..00000000000 --- a/kit/check/check_test.go +++ /dev/null @@ -1,535 +0,0 @@ -package check - -import ( - "context" - "encoding/json" - "errors" - "io" - "net" - "net/http" - "net/http/httptest" - "reflect" - "testing" - "time" -) - -func TestEmptyCheck(t *testing.T) { - c := NewCheck() - resp := c.CheckReady(context.Background()) - if len(resp.Checks) > 0 { - t.Errorf("no checks added but %d returned", len(resp.Checks)) - } - - if resp.Name != "Ready" { - t.Errorf("expected: \"Ready\", got: %q", resp.Name) - } - - if resp.Status != StatusPass { - t.Errorf("expected: %q, got: %q", StatusPass, resp.Status) - } -} - -func TestAddHealthCheck(t *testing.T) { - h := NewCheck() - h.AddHealthCheck(Named("awesome", ErrCheck(func() error { - return nil - }))) - r := h.CheckHealth(context.Background()) - if r.Status != StatusPass { - t.Error("Health should fail because one of the check is unhealthy") - } - - if len(r.Checks) != 1 { - t.Fatalf("check not in results: %+v", r.Checks) - } - - v := r.Checks[0] - if v.Status != StatusPass { - t.Errorf("the added check should be pass not %q.", v.Status) - } -} - -func TestAddUnHealthyCheck(t *testing.T) { - h := NewCheck() - h.AddHealthCheck(Named("failure", ErrCheck(func() error { - return errors.New("Oops! I am sorry") - }))) - r := h.CheckHealth(context.Background()) - if r.Status != StatusFail { - t.Error("Health should fail because one of the check is unhealthy") - } - - if len(r.Checks) != 1 { - t.Fatal("check not in results") - } - - v := r.Checks[0] - if v.Status != StatusFail { - t.Errorf("the added check should be fail not %s.", v.Status) - } - if v.Message != "Oops! I am sorry" { - t.Errorf( - "the error should be 'Oops! I am sorry' not %s.", - v.Message, - ) - } -} - -func buildCheckWithServer() (*Check, *httptest.Server) { - c := NewCheck() - return c, httptest.NewServer(c) -} - -type mockCheck struct { - status Status - name string -} - -func (m mockCheck) Check(_ context.Context) Response { - return Response{ - Name: m.name, - Status: m.status, - } -} - -func mockPass(name string) Checker { - return mockCheck{status: StatusPass, name: name} -} - -func mockFail(name string) Checker { - return mockCheck{status: StatusFail, name: name} -} - -func respBuilder(body io.ReadCloser) (*Response, error) { - defer body.Close() - d := json.NewDecoder(body) - r := &Response{} - return r, d.Decode(r) -} - -func TestBasicHTTPHandler(t *testing.T) { - _, ts := buildCheckWithServer() - defer ts.Close() - - resp, err := http.Get(ts.URL + "/ready") - if err != nil { - t.Fatal(err) - } - - actual, err := respBuilder(resp.Body) - if err != nil { - t.Fatal(err) - } - expected := &Response{ - Name: "Ready", - Status: StatusPass, - } - if !reflect.DeepEqual(expected, actual) { - t.Errorf("unexpected response. expected %v, actual %v", expected, actual) - } -} - -func TestHealthSorting(t *testing.T) { - c, ts := buildCheckWithServer() - defer ts.Close() - - c.AddHealthCheck(mockPass("a")) - c.AddHealthCheck(mockPass("c")) - c.AddHealthCheck(mockPass("b")) - c.AddHealthCheck(mockFail("k")) - c.AddHealthCheck(mockFail("b")) - - resp, err := http.Get(ts.URL + "/health") - if err != nil { - t.Fatal(err) - } - actual, err := respBuilder(resp.Body) - if err != nil { - t.Fatal(err) - } - - expected := &Response{ - Name: "Health", - Status: "fail", - Checks: Responses{ - Response{Name: "b", Status: "fail"}, - Response{Name: "k", Status: "fail"}, - Response{Name: "a", Status: "pass"}, - Response{Name: "b", Status: "pass"}, - Response{Name: "c", Status: "pass"}, - }, - } - - if !reflect.DeepEqual(expected, actual) { - t.Errorf("unexpected response. expected %v, actual %v", expected, actual) - } -} - -func TestForceHealthy(t *testing.T) { - c, ts := buildCheckWithServer() - defer ts.Close() - - c.AddHealthCheck(mockFail("a")) - - _, err := http.Get(ts.URL + "/health?force=true&healthy=true") - if err != nil { - t.Fatal(err) - } - - resp, err := http.Get(ts.URL + "/health") - if err != nil { - t.Fatal(err) - } - actual, err := respBuilder(resp.Body) - if err != nil { - t.Fatal(err) - } - - expected := &Response{ - Name: "Health", - Status: "pass", - Checks: Responses{ - Response{Name: "manual-override", Message: "health manually overridden"}, - Response{Name: "a", Status: "fail"}, - }, - } - - if !reflect.DeepEqual(expected, actual) { - t.Errorf("unexpected response. expected %v, actual %v", expected, actual) - } - - _, err = http.Get(ts.URL + "/health?force=false") - if err != nil { - t.Fatal(err) - } - - expected = &Response{ - Name: "Health", - Status: "fail", - Checks: Responses{ - Response{Name: "a", Status: "fail"}, - }, - } - - resp, err = http.Get(ts.URL + "/health") - if err != nil { - t.Fatal(err) - } - actual, err = respBuilder(resp.Body) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(expected, actual) { - t.Errorf("unexpected response. expected %v, actual %v", expected, actual) - } -} - -func TestForceUnhealthy(t *testing.T) { - c, ts := buildCheckWithServer() - defer ts.Close() - - c.AddHealthCheck(mockPass("a")) - - _, err := http.Get(ts.URL + "/health?force=true&healthy=false") - if err != nil { - t.Fatal(err) - } - - resp, err := http.Get(ts.URL + "/health") - if err != nil { - t.Fatal(err) - } - actual, err := respBuilder(resp.Body) - if err != nil { - t.Fatal(err) - } - - expected := &Response{ - Name: "Health", - Status: "fail", - Checks: Responses{ - Response{Name: "manual-override", Message: "health manually overridden"}, - Response{Name: "a", Status: "pass"}, - }, - } - - if !reflect.DeepEqual(expected, actual) { - t.Errorf("unexpected response. expected %v, actual %v", expected, actual) - } - - _, err = http.Get(ts.URL + "/health?force=false") - if err != nil { - t.Fatal(err) - } - - expected = &Response{ - Name: "Health", - Status: "pass", - Checks: Responses{ - Response{Name: "a", Status: "pass"}, - }, - } - - resp, err = http.Get(ts.URL + "/health") - if err != nil { - t.Fatal(err) - } - actual, err = respBuilder(resp.Body) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(expected, actual) { - t.Errorf("unexpected response. expected %v, actual %v", expected, actual) - } -} - -func TestForceReady(t *testing.T) { - c, ts := buildCheckWithServer() - defer ts.Close() - - c.AddReadyCheck(mockFail("a")) - - _, err := http.Get(ts.URL + "/ready?force=true&ready=true") - if err != nil { - t.Fatal(err) - } - - resp, err := http.Get(ts.URL + "/ready") - if err != nil { - t.Fatal(err) - } - actual, err := respBuilder(resp.Body) - if err != nil { - t.Fatal(err) - } - - expected := &Response{ - Name: "Ready", - Status: "pass", - Checks: Responses{ - Response{Name: "manual-override", Message: "ready manually overridden"}, - Response{Name: "a", Status: "fail"}, - }, - } - - if !reflect.DeepEqual(expected, actual) { - t.Errorf("unexpected response. expected %v, actual %v", expected, actual) - } - - _, err = http.Get(ts.URL + "/ready?force=false") - if err != nil { - t.Fatal(err) - } - - expected = &Response{ - Name: "Ready", - Status: "fail", - Checks: Responses{ - Response{Name: "a", Status: "fail"}, - }, - } - - resp, err = http.Get(ts.URL + "/ready") - if err != nil { - t.Fatal(err) - } - actual, err = respBuilder(resp.Body) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(expected, actual) { - t.Errorf("unexpected response. expected %v, actual %v", expected, actual) - } -} - -func TestForceNotReady(t *testing.T) { - c, ts := buildCheckWithServer() - defer ts.Close() - - c.AddReadyCheck(mockPass("a")) - - _, err := http.Get(ts.URL + "/ready?force=true&ready=false") - if err != nil { - t.Fatal(err) - } - - resp, err := http.Get(ts.URL + "/ready") - if err != nil { - t.Fatal(err) - } - actual, err := respBuilder(resp.Body) - if err != nil { - t.Fatal(err) - } - - expected := &Response{ - Name: "Ready", - Status: "fail", - Checks: Responses{ - Response{Name: "manual-override", Message: "ready manually overridden"}, - Response{Name: "a", Status: "pass"}, - }, - } - - if !reflect.DeepEqual(expected, actual) { - t.Errorf("unexpected response. expected %v, actual %v", expected, actual) - } - - _, err = http.Get(ts.URL + "/ready?force=false") - if err != nil { - t.Fatal(err) - } - - expected = &Response{ - Name: "Ready", - Status: "pass", - Checks: Responses{ - Response{Name: "a", Status: "pass"}, - }, - } - - resp, err = http.Get(ts.URL + "/ready") - if err != nil { - t.Fatal(err) - } - actual, err = respBuilder(resp.Body) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(expected, actual) { - t.Errorf("unexpected response. expected %v, actual %v", expected, actual) - } -} - -func TestNoCrossOver(t *testing.T) { - c, ts := buildCheckWithServer() - defer ts.Close() - - c.AddHealthCheck(mockPass("a")) - c.AddHealthCheck(mockPass("c")) - c.AddReadyCheck(mockPass("b")) - c.AddReadyCheck(mockFail("k")) - c.AddHealthCheck(mockFail("b")) - - resp, err := http.Get(ts.URL + "/health") - if err != nil { - t.Fatal(err) - } - actual, err := respBuilder(resp.Body) - if err != nil { - t.Fatal(err) - } - - expected := &Response{ - Name: "Health", - Status: "fail", - Checks: Responses{ - Response{Name: "b", Status: "fail"}, - Response{Name: "a", Status: "pass"}, - Response{Name: "c", Status: "pass"}, - }, - } - - if !reflect.DeepEqual(expected, actual) { - t.Errorf("unexpected response. expected %v, actual %v", expected, actual) - } - - resp, err = http.Get(ts.URL + "/ready") - if err != nil { - t.Fatal(err) - } - actual, err = respBuilder(resp.Body) - if err != nil { - t.Fatal(err) - } - - expected = &Response{ - Name: "Ready", - Status: "fail", - Checks: Responses{ - Response{Name: "k", Status: "fail"}, - Response{Name: "b", Status: "pass"}, - }, - } - - if !reflect.DeepEqual(expected, actual) { - t.Errorf("unexpected response. expected %v, actual %v", expected, actual) - } -} - -func TestPassthrough(t *testing.T) { - c, ts := buildCheckWithServer() - defer ts.Close() - - resp, err := http.Get(ts.URL) - if err != nil { - t.Fatal(err) - } - - if resp.StatusCode != 404 { - t.Fatalf("failed to error when no passthrough is present, status: %d", resp.StatusCode) - } - - used := false - s := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - used = true - w.Write([]byte("hi")) - }) - - c.SetPassthrough(s) - - resp, err = http.Get(ts.URL) - if err != nil { - t.Fatal(err) - } - - if resp.StatusCode != 200 { - t.Fatalf("bad response code from passthrough, status: %d", resp.StatusCode) - } - - if !used { - t.Fatal("passthrough server not used") - } -} - -func ExampleNewCheck() { - // Run the default healthcheck. it always return 200. It is good if you - // have a service without any dependency - h := NewCheck() - h.CheckHealth(context.Background()) -} - -func ExampleCheck_CheckHealth() { - h := NewCheck() - h.AddHealthCheck(Named("google", CheckerFunc(func(ctx context.Context) Response { - var r net.Resolver - _, err := r.LookupHost(ctx, "google.com") - if err != nil { - return Error(err) - } - return Pass() - }))) - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - h.CheckHealth(ctx) -} - -func ExampleCheck_ServeHTTP() { - c := NewCheck() - http.ListenAndServe(":6060", c) -} - -func ExampleCheck_SetPassthrough() { - c := NewCheck() - - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("Hello friends!")) - }) - - c.SetPassthrough(http.DefaultServeMux) - http.ListenAndServe(":6060", c) -} diff --git a/kit/check/helpers.go b/kit/check/helpers.go deleted file mode 100644 index 87e9288ef57..00000000000 --- a/kit/check/helpers.go +++ /dev/null @@ -1,73 +0,0 @@ -package check - -import ( - "context" - "fmt" -) - -// NamedChecker is a superset of Checker that also indicates the name of the service. -// Prefer to implement NamedChecker if your service has a fixed name, -// as opposed to calling *Health.AddNamed. -type NamedChecker interface { - Checker - CheckName() string -} - -// CheckerFunc is an adapter of a plain func() error to the Checker interface. -type CheckerFunc func(ctx context.Context) Response - -// Check implements Checker. -func (f CheckerFunc) Check(ctx context.Context) Response { - return f(ctx) -} - -// Named returns a Checker that will attach a name to the Response from the check. -// This way, it is possible to augment a Response with a human-readable name, but not have to encode -// that logic in the actual check itself. -func Named(name string, checker Checker) Checker { - return CheckerFunc(func(ctx context.Context) Response { - resp := checker.Check(ctx) - resp.Name = name - return resp - }) -} - -// NamedFunc is the same as Named except it takes a CheckerFunc. -func NamedFunc(name string, fn CheckerFunc) Checker { - return Named(name, fn) -} - -// ErrCheck will create a health checker that executes a function. If the function returns an error, -// it will return an unhealthy response. Otherwise, it will be as if the Ok function was called. -// Note: it is better to use CheckFunc, because with Check, the context is ignored. -func ErrCheck(fn func() error) Checker { - return CheckerFunc(func(_ context.Context) Response { - if err := fn(); err != nil { - return Error(err) - } - return Pass() - }) -} - -// Pass is a utility function to generate a passing status response with the default parameters. -func Pass() Response { - return Response{ - Status: StatusPass, - } -} - -// Info is a utility function to generate a healthy status with a printf message. -func Info(msg string, args ...interface{}) Response { - return Response{ - Status: StatusPass, - Message: fmt.Sprintf(msg, args...), - } -} - -// Error is a utility function for creating a response from an error message. -func Error(err error) Response { - return Response{ - Status: StatusFail, - Message: err.Error(), - } -} diff --git a/kit/check/response.go b/kit/check/response.go deleted file mode 100644 index 9eb56f0d4a4..00000000000 --- a/kit/check/response.go +++ /dev/null @@ -1,39 +0,0 @@ -package check - -// Response is a result of a collection of health checks. -type Response struct { - Name string `json:"name"` - Status Status `json:"status"` - Message string `json:"message,omitempty"` - Checks Responses `json:"checks,omitempty"` -} - -// HasCheck verifies whether the receiving Response has a check with the given name or not. -func (r *Response) HasCheck(name string) bool { - found := false - for _, check := range r.Checks { - if check.Name == name { - found = true - break - } - } - return found -} - -// Responses is a sortable collection of Response objects. -type Responses []Response - -func (r Responses) Len() int { return len(r) } - -// Less defines the order in which responses are sorted. -// -// Failing responses are always sorted before passing responses. Responses with -// the same status are then sorted according to the name of the check. -func (r Responses) Less(i, j int) bool { - if r[i].Status == r[j].Status { - return r[i].Name < r[j].Name - } - return r[i].Status < r[j].Status -} - -func (r Responses) Swap(i, j int) { r[i], r[j] = r[j], r[i] } diff --git a/kit/cli/doc.go b/kit/cli/doc.go deleted file mode 100644 index 42f555179e4..00000000000 --- a/kit/cli/doc.go +++ /dev/null @@ -1,47 +0,0 @@ -// Package cli creates simple CLI options with ENV overrides using viper. -// -// This is a small simplification over viper to move most of the boilerplate -// into one place. -// -// In this example the flags can be set with MYPROGRAM_MONITOR_HOST and -// MYPROGRAM_NUMBER or with the flags --monitor-host and --number -// -// var flags struct { -// monitorHost string -// number int -// } -// -// func main() { -// cmd := cli.NewCommand(&cli.Program{ -// Run: run, -// Name: "myprogram", -// Opts: []cli.Opt{ -// { -// DestP: &flags.monitorHost, -// Flag: "monitor-host", -// Default: "http://localhost:8086", -// Desc: "host to send influxdb metrics", -// }, -// { -// DestP: &flags.number, -// Flag: "number", -// Default: 2, -// Desc: "number of times to loop", -// -// }, -// }, -// }) -// -// if err := cmd.Execute(); err != nil { -// fmt.Fprintln(os.Stderr, err) -// os.Exit(1) -// } -// } -// -// func run() error { -// for i := 0; i < number; i++ { -// fmt.Printf("%d\n", i) -// feturn nil -// } -// } -package cli diff --git a/kit/cli/idflag.go b/kit/cli/idflag.go deleted file mode 100644 index a6a5be5521f..00000000000 --- a/kit/cli/idflag.go +++ /dev/null @@ -1,60 +0,0 @@ -package cli - -import ( - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -// Wrapper for influxdb.ID -type idValue platform.ID - -func newIDValue(val platform.ID, p *platform.ID) *idValue { - *p = val - return (*idValue)(p) -} - -func (i *idValue) String() string { return platform.ID(*i).String() } -func (i *idValue) Set(s string) error { - id, err := platform.IDFromString(s) - if err != nil { - return err - } - *i = idValue(*id) - return nil -} - -func (i *idValue) Type() string { - return "ID" -} - -// IDVar defines an influxdb.ID flag with specified name, default value, and usage string. -// The argument p points to an influxdb.ID variable in which to store the value of the flag. -func IDVar(fs *pflag.FlagSet, p *platform.ID, name string, value platform.ID, usage string) { - IDVarP(fs, p, name, "", value, usage) -} - -// IDVarP is like IDVar, but accepts a shorthand letter that can be used after a single dash. -func IDVarP(fs *pflag.FlagSet, p *platform.ID, name, shorthand string, value platform.ID, usage string) { - fs.VarP(newIDValue(value, p), name, shorthand, usage) -} - -type OrgBucket struct { - Org platform.ID - Bucket platform.ID -} - -func (o *OrgBucket) AddFlags(cmd *cobra.Command) { - fs := cmd.Flags() - IDVar(fs, &o.Org, "org-id", platform.InvalidID(), "organization id") - IDVar(fs, &o.Bucket, "bucket-id", platform.InvalidID(), "bucket id") -} - -func (o *OrgBucket) OrgBucketID() (orgID, bucketID platform.ID) { - return o.Org, o.Bucket -} - -func (o *OrgBucket) Name() [platform.IDLength]byte { - // TODO: FIX THIS - panic("TODO: Fix") -} diff --git a/kit/cli/loglevelflag.go b/kit/cli/loglevelflag.go deleted file mode 100644 index 77e99724986..00000000000 --- a/kit/cli/loglevelflag.go +++ /dev/null @@ -1,42 +0,0 @@ -package cli - -import ( - "fmt" - - "github.com/spf13/pflag" - "go.uber.org/zap/zapcore" -) - -type levelValue zapcore.Level - -func newLevelValue(val zapcore.Level, p *zapcore.Level) *levelValue { - *p = val - return (*levelValue)(p) -} - -func (l *levelValue) String() string { - return zapcore.Level(*l).String() -} -func (l *levelValue) Set(s string) error { - var level zapcore.Level - if err := level.Set(s); err != nil { - return fmt.Errorf("unknown log level; supported levels are debug, info, warn, error") - } - *l = levelValue(level) - return nil -} - -func (l *levelValue) Type() string { - return "Log-Level" -} - -// LevelVar defines a zapcore.Level flag with specified name, default value, and usage string. -// The argument p points to a zapcore.Level variable in which to store the value of the flag. -func LevelVar(fs *pflag.FlagSet, p *zapcore.Level, name string, value zapcore.Level, usage string) { - LevelVarP(fs, p, name, "", value, usage) -} - -// LevelVarP is like LevelVar, but accepts a shorthand letter that can be used after a single dash. -func LevelVarP(fs *pflag.FlagSet, p *zapcore.Level, name, shorthand string, value zapcore.Level, usage string) { - fs.VarP(newLevelValue(value, p), name, shorthand, usage) -} diff --git a/kit/cli/viper.go b/kit/cli/viper.go deleted file mode 100644 index 8dce3f25878..00000000000 --- a/kit/cli/viper.go +++ /dev/null @@ -1,376 +0,0 @@ -package cli - -import ( - "fmt" - "os" - "path" - "strings" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/spf13/cast" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "github.com/spf13/viper" - "go.uber.org/zap/zapcore" -) - -// Opt is a single command-line option -type Opt struct { - DestP interface{} // pointer to the destination - - EnvVar string - Flag string - Hidden bool - Persistent bool - Required bool - Short rune // using rune b/c it guarantees correctness. a short must always be a string of length 1 - - Default interface{} - Desc string -} - -// Program parses CLI options -type Program struct { - // Run is invoked by cobra on execute. - Run func() error - // Name is the name of the program in help usage and the env var prefix. - Name string - // Opts are the command line/env var options to the program - Opts []Opt -} - -// NewCommand creates a new cobra command to be executed that respects env vars. -// -// Uses the upper-case version of the program's name as a prefix -// to all environment variables. -// -// This is to simplify the viper/cobra boilerplate. -func NewCommand(v *viper.Viper, p *Program) (*cobra.Command, error) { - cmd := &cobra.Command{ - Use: p.Name, - Args: cobra.NoArgs, - RunE: func(_ *cobra.Command, _ []string) error { - return p.Run() - }, - } - - v.SetEnvPrefix(strings.ToUpper(p.Name)) - v.AutomaticEnv() - // This normalizes "-" to an underscore in env names. - v.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) - - // done before we bind flags to viper keys. - // order of precedence (1 highest -> 3 lowest): - // 1. flags - // 2. env vars - // 3. config file - if err := initializeConfig(v); err != nil { - return nil, fmt.Errorf("failed to load config file: %w", err) - } - if err := BindOptions(v, cmd, p.Opts); err != nil { - return nil, fmt.Errorf("failed to bind config options: %w", err) - } - - return cmd, nil -} - -func initializeConfig(v *viper.Viper) error { - configPath := v.GetString("CONFIG_PATH") - if configPath == "" { - // Default to looking in the working directory of the running process. - configPath = "." - } - - switch strings.ToLower(path.Ext(configPath)) { - case ".json", ".toml", ".yaml", ".yml": - v.SetConfigFile(configPath) - default: - v.AddConfigPath(configPath) - } - - if err := v.ReadInConfig(); err != nil && !os.IsNotExist(err) { - if _, ok := err.(viper.ConfigFileNotFoundError); !ok { - return err - } - } - return nil -} - -// BindOptions adds opts to the specified command and automatically -// registers those options with viper. -func BindOptions(v *viper.Viper, cmd *cobra.Command, opts []Opt) error { - for _, o := range opts { - flagset := cmd.Flags() - if o.Persistent { - flagset = cmd.PersistentFlags() - } - envVal := lookupEnv(v, &o) - hasShort := o.Short != 0 - - switch destP := o.DestP.(type) { - case *string: - var d string - if o.Default != nil { - d = o.Default.(string) - } - if hasShort { - flagset.StringVarP(destP, o.Flag, string(o.Short), d, o.Desc) - } else { - flagset.StringVar(destP, o.Flag, d, o.Desc) - } - if err := v.BindPFlag(o.Flag, flagset.Lookup(o.Flag)); err != nil { - return fmt.Errorf("failed to bind flag %q: %w", o.Flag, err) - } - if envVal != nil { - if s, err := cast.ToStringE(envVal); err == nil { - *destP = s - } - } - - case *int: - var d int - if o.Default != nil { - d = o.Default.(int) - } - if hasShort { - flagset.IntVarP(destP, o.Flag, string(o.Short), d, o.Desc) - } else { - flagset.IntVar(destP, o.Flag, d, o.Desc) - } - if err := v.BindPFlag(o.Flag, flagset.Lookup(o.Flag)); err != nil { - return fmt.Errorf("failed to bind flag %q: %w", o.Flag, err) - } - if envVal != nil { - if i, err := cast.ToIntE(envVal); err == nil { - *destP = i - } - } - - case *int32: - var d int32 - if o.Default != nil { - // N.B. since our CLI kit types default values as interface{} and - // literal numbers get typed as int by default, it's very easy to - // create an int32 CLI flag with an int default value. - // - // The compiler doesn't know to complain in that case, so you end up - // with a runtime panic when trying to bind the CLI options. - // - // To avoid that headache, we support both int32 and int defaults - // for int32 fields. This introduces a new runtime bomb if somebody - // specifies an int default > math.MaxInt32, but that's hopefully - // less likely. - var ok bool - d, ok = o.Default.(int32) - if !ok { - d = int32(o.Default.(int)) - } - } - if hasShort { - flagset.Int32VarP(destP, o.Flag, string(o.Short), d, o.Desc) - } else { - flagset.Int32Var(destP, o.Flag, d, o.Desc) - } - if err := v.BindPFlag(o.Flag, flagset.Lookup(o.Flag)); err != nil { - return fmt.Errorf("failed to bind flag %q: %w", o.Flag, err) - } - if envVal != nil { - if i, err := cast.ToInt32E(envVal); err == nil { - *destP = i - } - } - - case *int64: - var d int64 - if o.Default != nil { - // N.B. since our CLI kit types default values as interface{} and - // literal numbers get typed as int by default, it's very easy to - // create an int64 CLI flag with an int default value. - // - // The compiler doesn't know to complain in that case, so you end up - // with a runtime panic when trying to bind the CLI options. - // - // To avoid that headache, we support both int64 and int defaults - // for int64 fields. - var ok bool - d, ok = o.Default.(int64) - if !ok { - d = int64(o.Default.(int)) - } - } - if hasShort { - flagset.Int64VarP(destP, o.Flag, string(o.Short), d, o.Desc) - } else { - flagset.Int64Var(destP, o.Flag, d, o.Desc) - } - if err := v.BindPFlag(o.Flag, flagset.Lookup(o.Flag)); err != nil { - return fmt.Errorf("failed to bind flag %q: %w", o.Flag, err) - } - if envVal != nil { - if i, err := cast.ToInt64E(envVal); err == nil { - *destP = i - } - } - - case *bool: - var d bool - if o.Default != nil { - d = o.Default.(bool) - } - if hasShort { - flagset.BoolVarP(destP, o.Flag, string(o.Short), d, o.Desc) - } else { - flagset.BoolVar(destP, o.Flag, d, o.Desc) - } - if err := v.BindPFlag(o.Flag, flagset.Lookup(o.Flag)); err != nil { - return fmt.Errorf("failed to bind flag %q: %w", o.Flag, err) - } - if envVal != nil { - if b, err := cast.ToBoolE(envVal); err == nil { - *destP = b - } - } - - case *time.Duration: - var d time.Duration - if o.Default != nil { - d = o.Default.(time.Duration) - } - if hasShort { - flagset.DurationVarP(destP, o.Flag, string(o.Short), d, o.Desc) - } else { - flagset.DurationVar(destP, o.Flag, d, o.Desc) - } - if err := v.BindPFlag(o.Flag, flagset.Lookup(o.Flag)); err != nil { - return fmt.Errorf("failed to bind flag %q: %w", o.Flag, err) - } - if envVal != nil { - if d, err := cast.ToDurationE(envVal); err == nil { - *destP = d - } - } - - case *[]string: - var d []string - if o.Default != nil { - d = o.Default.([]string) - } - if hasShort { - flagset.StringSliceVarP(destP, o.Flag, string(o.Short), d, o.Desc) - } else { - flagset.StringSliceVar(destP, o.Flag, d, o.Desc) - } - if err := v.BindPFlag(o.Flag, flagset.Lookup(o.Flag)); err != nil { - return fmt.Errorf("failed to bind flag %q: %w", o.Flag, err) - } - if envVal != nil { - if ss, err := cast.ToStringSliceE(envVal); err == nil { - *destP = ss - } - } - - case *map[string]string: - var d map[string]string - if o.Default != nil { - d = o.Default.(map[string]string) - } - if hasShort { - flagset.StringToStringVarP(destP, o.Flag, string(o.Short), d, o.Desc) - } else { - flagset.StringToStringVar(destP, o.Flag, d, o.Desc) - } - if err := v.BindPFlag(o.Flag, flagset.Lookup(o.Flag)); err != nil { - return fmt.Errorf("failed to bind flag %q: %w", o.Flag, err) - } - if envVal != nil { - if sms, err := cast.ToStringMapStringE(envVal); err == nil { - *destP = sms - } - } - - case pflag.Value: - if hasShort { - flagset.VarP(destP, o.Flag, string(o.Short), o.Desc) - } else { - flagset.Var(destP, o.Flag, o.Desc) - } - if o.Default != nil { - _ = destP.Set(o.Default.(string)) - } - if err := v.BindPFlag(o.Flag, flagset.Lookup(o.Flag)); err != nil { - return fmt.Errorf("failed to bind flag %q: %w", o.Flag, err) - } - if envVal != nil { - if s, err := cast.ToStringE(envVal); err == nil { - _ = destP.Set(s) - } - } - - case *platform.ID: - var d platform.ID - if o.Default != nil { - d = o.Default.(platform.ID) - } - if hasShort { - IDVarP(flagset, destP, o.Flag, string(o.Short), d, o.Desc) - } else { - IDVar(flagset, destP, o.Flag, d, o.Desc) - } - if envVal != nil { - if s, err := cast.ToStringE(envVal); err == nil { - _ = (*destP).DecodeFromString(s) - } - } - - case *zapcore.Level: - var l zapcore.Level - if o.Default != nil { - l = o.Default.(zapcore.Level) - } - if hasShort { - LevelVarP(flagset, destP, o.Flag, string(o.Short), l, o.Desc) - } else { - LevelVar(flagset, destP, o.Flag, l, o.Desc) - } - if envVal != nil { - if s, err := cast.ToStringE(envVal); err == nil { - _ = (*destP).Set(s) - } - } - - default: - // if you get this error, sorry about that! - // anyway, go ahead and make a PR and add another type. - return fmt.Errorf("unknown destination type %t", o.DestP) - } - - // N.B. these "Mark" calls must run after the block above, - // otherwise cobra will return a "no such flag" error. - - // Cobra will complain if a flag marked as required isn't present on the CLI. - // To support setting required args via config and env variables, we only enforce - // the required check if we didn't find a value in the viper instance. - if o.Required && envVal == nil { - if err := cmd.MarkFlagRequired(o.Flag); err != nil { - return fmt.Errorf("failed to mark flag %q as required: %w", o.Flag, err) - } - } - if o.Hidden { - if err := flagset.MarkHidden(o.Flag); err != nil { - return fmt.Errorf("failed to mark flag %q as hidden: %w", o.Flag, err) - } - } - } - - return nil -} - -// lookupEnv returns the value for a CLI option found in the environment, if any. -func lookupEnv(v *viper.Viper, o *Opt) interface{} { - envVar := o.Flag - if o.EnvVar != "" { - envVar = o.EnvVar - } - return v.Get(envVar) -} diff --git a/kit/cli/viper_test.go b/kit/cli/viper_test.go deleted file mode 100644 index 136cea5d9d3..00000000000 --- a/kit/cli/viper_test.go +++ /dev/null @@ -1,522 +0,0 @@ -package cli - -import ( - "encoding/json" - "fmt" - "math" - "os" - "path" - "path/filepath" - "testing" - "time" - - "github.com/BurntSushi/toml" - "github.com/spf13/viper" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zapcore" - "gopkg.in/yaml.v3" -) - -type customFlag bool - -func (c customFlag) String() string { - if c == true { - return "on" - } - return "off" -} - -func (c *customFlag) Set(s string) error { - if s == "on" { - *c = true - } else { - *c = false - } - - return nil -} - -func (c *customFlag) Type() string { - return "fancy-bool" -} - -func ExampleNewCommand() { - var monitorHost string - var number int - var smallerNumber int32 - var longerNumber int64 - var sleep bool - var duration time.Duration - var stringSlice []string - var fancyBool customFlag - var logLevel zapcore.Level - cmd, err := NewCommand(viper.New(), &Program{ - Run: func() error { - fmt.Println(monitorHost) - for i := 0; i < number; i++ { - fmt.Printf("%d\n", i) - } - fmt.Println(longerNumber - int64(smallerNumber)) - fmt.Println(sleep) - fmt.Println(duration) - fmt.Println(stringSlice) - fmt.Println(fancyBool) - fmt.Println(logLevel.String()) - return nil - }, - Name: "myprogram", - Opts: []Opt{ - { - DestP: &monitorHost, - Flag: "monitor-host", - Default: "http://localhost:8086", - Desc: "host to send influxdb metrics", - }, - { - DestP: &number, - Flag: "number", - Default: 2, - Desc: "number of times to loop", - }, - { - DestP: &smallerNumber, - Flag: "smaller-number", - Default: math.MaxInt32, - Desc: "limited size number", - }, - { - DestP: &longerNumber, - Flag: "longer-number", - Default: math.MaxInt64, - Desc: "explicitly expanded-size number", - }, - { - DestP: &sleep, - Flag: "sleep", - Default: true, - Desc: "whether to sleep", - }, - { - DestP: &duration, - Flag: "duration", - Default: time.Minute, - Desc: "how long to sleep", - }, - { - DestP: &stringSlice, - Flag: "string-slice", - Default: []string{"foo", "bar"}, - Desc: "things come in lists", - }, - { - DestP: &fancyBool, - Flag: "fancy-bool", - Default: "on", - Desc: "things that implement pflag.Value", - }, - { - DestP: &logLevel, - Flag: "log-level", - Default: zapcore.WarnLevel, - }, - }, - }) - if err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - return - } - - cmd.SetArgs([]string{}) - if err := cmd.Execute(); err != nil { - _, _ = fmt.Fprintln(os.Stderr, err) - } - // Output: - // http://localhost:8086 - // 0 - // 1 - // 9223372034707292160 - // true - // 1m0s - // [foo bar] - // on - // warn -} - -func Test_NewProgram(t *testing.T) { - config := map[string]string{ - // config values should be same as flags - "foo": "bar", - "shoe-fly": "yadon", - "number": "2147483647", - "long-number": "9223372036854775807", - "log-level": "debug", - } - - tests := []struct { - name string - envVarVal string - args []string - expected string - }{ - { - name: "no vals reads from config", - expected: "bar", - }, - { - name: "reads from env var", - envVarVal: "foobar", - expected: "foobar", - }, - { - name: "reads from flag", - args: []string{"--foo=baz"}, - expected: "baz", - }, - { - name: "flag has highest precedence", - envVarVal: "foobar", - args: []string{"--foo=baz"}, - expected: "baz", - }, - } - - for _, tt := range tests { - for _, writer := range configWriters { - fn := func(t *testing.T) { - testDir := t.TempDir() - - confFile, err := writer.writeFn(testDir, config) - require.NoError(t, err) - - defer setEnvVar("TEST_CONFIG_PATH", confFile)() - - if tt.envVarVal != "" { - defer setEnvVar("TEST_FOO", tt.envVarVal)() - } - - var testVar string - var testFly string - var testNumber int32 - var testLongNumber int64 - var logLevel zapcore.Level - program := &Program{ - Name: "test", - Opts: []Opt{ - { - DestP: &testVar, - Flag: "foo", - Required: true, - }, - { - DestP: &testFly, - Flag: "shoe-fly", - }, - { - DestP: &testNumber, - Flag: "number", - }, - { - DestP: &testLongNumber, - Flag: "long-number", - }, - { - DestP: &logLevel, - Flag: "log-level", - }, - }, - Run: func() error { return nil }, - } - - cmd, err := NewCommand(viper.New(), program) - require.NoError(t, err) - cmd.SetArgs(append([]string{}, tt.args...)) - require.NoError(t, cmd.Execute()) - - require.Equal(t, tt.expected, testVar) - assert.Equal(t, "yadon", testFly) - assert.Equal(t, int32(math.MaxInt32), testNumber) - assert.Equal(t, int64(math.MaxInt64), testLongNumber) - assert.Equal(t, zapcore.DebugLevel, logLevel) - } - - t.Run(fmt.Sprintf("%s_%s", tt.name, writer.ext), fn) - } - } -} - -func setEnvVar(key, val string) func() { - old := os.Getenv(key) - os.Setenv(key, val) - return func() { - os.Setenv(key, old) - } -} - -type configWriter func(dir string, config interface{}) (string, error) -type labeledWriter struct { - ext string - writeFn configWriter -} - -var configWriters = []labeledWriter{ - {ext: "json", writeFn: writeJsonConfig}, - {ext: "toml", writeFn: writeTomlConfig}, - {ext: "yml", writeFn: yamlConfigWriter(true)}, - {ext: "yaml", writeFn: yamlConfigWriter(false)}, -} - -func writeJsonConfig(dir string, config interface{}) (string, error) { - b, err := json.Marshal(config) - if err != nil { - return "", err - } - confFile := path.Join(dir, "config.json") - if err := os.WriteFile(confFile, b, os.ModePerm); err != nil { - return "", err - } - return confFile, nil -} - -func writeTomlConfig(dir string, config interface{}) (string, error) { - confFile := path.Join(dir, "config.toml") - w, err := os.OpenFile(confFile, os.O_CREATE|os.O_EXCL|os.O_WRONLY, os.ModePerm) - if err != nil { - return "", err - } - defer w.Close() - - if err := toml.NewEncoder(w).Encode(config); err != nil { - return "", err - } - - return confFile, nil -} - -func yamlConfigWriter(shortExt bool) configWriter { - fileName := "config.yaml" - if shortExt { - fileName = "config.yml" - } - - return func(dir string, config interface{}) (string, error) { - confFile := path.Join(dir, fileName) - w, err := os.OpenFile(confFile, os.O_CREATE|os.O_EXCL|os.O_WRONLY, os.ModePerm) - if err != nil { - return "", err - } - defer w.Close() - - if err := yaml.NewEncoder(w).Encode(config); err != nil { - return "", err - } - - return confFile, nil - } -} - -func Test_RequiredFlag(t *testing.T) { - var testVar string - program := &Program{ - Name: "test", - Opts: []Opt{ - { - DestP: &testVar, - Flag: "foo", - Required: true, - }, - }, - } - - cmd, err := NewCommand(viper.New(), program) - require.NoError(t, err) - cmd.SetArgs([]string{}) - err = cmd.Execute() - require.Error(t, err) - require.Equal(t, `required flag(s) "foo" not set`, err.Error()) -} - -func Test_ConfigPrecedence(t *testing.T) { - jsonConfig := map[string]interface{}{"log-level": zapcore.DebugLevel} - tomlConfig := map[string]interface{}{"log-level": zapcore.InfoLevel} - yamlConfig := map[string]interface{}{"log-level": zapcore.WarnLevel} - ymlConfig := map[string]interface{}{"log-level": zapcore.ErrorLevel} - - tests := []struct { - name string - writeJson bool - writeToml bool - writeYaml bool - writeYml bool - expectedLevel zapcore.Level - }{ - { - name: "JSON is used if present", - writeJson: true, - writeToml: true, - writeYaml: true, - writeYml: true, - expectedLevel: zapcore.DebugLevel, - }, - { - name: "TOML is used if no JSON present", - writeJson: false, - writeToml: true, - writeYaml: true, - writeYml: true, - expectedLevel: zapcore.InfoLevel, - }, - { - name: "YAML is used if no JSON or TOML present", - writeJson: false, - writeToml: false, - writeYaml: true, - writeYml: true, - expectedLevel: zapcore.WarnLevel, - }, - { - name: "YML is used if no other option present", - writeJson: false, - writeToml: false, - writeYaml: false, - writeYml: true, - expectedLevel: zapcore.ErrorLevel, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - testDir := t.TempDir() - defer setEnvVar("TEST_CONFIG_PATH", testDir)() - - if tt.writeJson { - _, err := writeJsonConfig(testDir, jsonConfig) - require.NoError(t, err) - } - if tt.writeToml { - _, err := writeTomlConfig(testDir, tomlConfig) - require.NoError(t, err) - } - if tt.writeYaml { - _, err := yamlConfigWriter(false)(testDir, yamlConfig) - require.NoError(t, err) - } - if tt.writeYml { - _, err := yamlConfigWriter(true)(testDir, ymlConfig) - require.NoError(t, err) - } - - var logLevel zapcore.Level - program := &Program{ - Name: "test", - Opts: []Opt{ - { - DestP: &logLevel, - Flag: "log-level", - }, - }, - Run: func() error { return nil }, - } - - cmd, err := NewCommand(viper.New(), program) - require.NoError(t, err) - cmd.SetArgs([]string{}) - require.NoError(t, cmd.Execute()) - - require.Equal(t, tt.expectedLevel, logLevel) - } - - t.Run(tt.name, fn) - } -} - -func Test_ConfigPathDotDirectory(t *testing.T) { - testDir := t.TempDir() - - tests := []struct { - name string - dir string - }{ - { - name: "dot at start", - dir: ".directory", - }, - { - name: "dot in middle", - dir: "config.d", - }, - { - name: "dot at end", - dir: "forgotmyextension.", - }, - } - - config := map[string]string{ - "foo": "bar", - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - configDir := filepath.Join(testDir, tc.dir) - require.NoError(t, os.Mkdir(configDir, 0700)) - - _, err := writeTomlConfig(configDir, config) - require.NoError(t, err) - defer setEnvVar("TEST_CONFIG_PATH", configDir)() - - var foo string - program := &Program{ - Name: "test", - Opts: []Opt{ - { - DestP: &foo, - Flag: "foo", - }, - }, - Run: func() error { return nil }, - } - - cmd, err := NewCommand(viper.New(), program) - require.NoError(t, err) - cmd.SetArgs([]string{}) - require.NoError(t, cmd.Execute()) - - require.Equal(t, "bar", foo) - }) - } -} - -func Test_LoadConfigCwd(t *testing.T) { - testDir := t.TempDir() - - pwd, err := os.Getwd() - require.NoError(t, err) - defer os.Chdir(pwd) - - require.NoError(t, os.Chdir(testDir)) - - config := map[string]string{ - "foo": "bar", - } - _, err = writeJsonConfig(testDir, config) - require.NoError(t, err) - - var foo string - program := &Program{ - Name: "test", - Opts: []Opt{ - { - DestP: &foo, - Flag: "foo", - }, - }, - Run: func() error { return nil }, - } - - cmd, err := NewCommand(viper.New(), program) - require.NoError(t, err) - cmd.SetArgs([]string{}) - require.NoError(t, cmd.Execute()) - - require.Equal(t, "bar", foo) -} diff --git a/kit/errors/errors.go b/kit/errors/errors.go deleted file mode 100644 index 8c12720cd18..00000000000 --- a/kit/errors/errors.go +++ /dev/null @@ -1,107 +0,0 @@ -package errors - -import ( - "fmt" - "net/http" -) - -// TODO: move to base directory - -const ( - // InternalError indicates an unexpected error condition. - InternalError = 1 - // MalformedData indicates malformed input, such as unparsable JSON. - MalformedData = 2 - // InvalidData indicates that data is well-formed, but invalid. - InvalidData = 3 - // Forbidden indicates a forbidden operation. - Forbidden = 4 - // NotFound indicates a resource was not found. - NotFound = 5 -) - -// Error indicates an error with a reference code and an HTTP status code. -type Error struct { - Reference int `json:"referenceCode"` - Code int `json:"statusCode"` - Err string `json:"err"` -} - -// Error implements the error interface. -func (e Error) Error() string { - return e.Err -} - -// Errorf constructs an Error with the given reference code and format. -func Errorf(ref int, format string, i ...interface{}) error { - return Error{ - Reference: ref, - Err: fmt.Sprintf(format, i...), - } -} - -// New creates a new error with a message and error code. -func New(msg string, ref ...int) error { - refCode := InternalError - if len(ref) == 1 { - refCode = ref[0] - } - return Error{ - Reference: refCode, - Err: msg, - } -} - -func Wrap(err error, msg string, ref ...int) error { - if err == nil { - return nil - } - e, ok := err.(Error) - if ok { - refCode := e.Reference - if len(ref) == 1 { - refCode = ref[0] - } - return Error{ - Reference: refCode, - Code: e.Code, - Err: fmt.Sprintf("%s: %s", msg, e.Err), - } - } - refCode := InternalError - if len(ref) == 1 { - refCode = ref[0] - } - return Error{ - Reference: refCode, - Err: fmt.Sprintf("%s: %s", msg, err.Error()), - } -} - -// InternalErrorf constructs an InternalError with the given format. -func InternalErrorf(format string, i ...interface{}) error { - return Errorf(InternalError, format, i...) -} - -// MalformedDataf constructs a MalformedData error with the given format. -func MalformedDataf(format string, i ...interface{}) error { - return Errorf(MalformedData, format, i...) -} - -// InvalidDataf constructs an InvalidData error with the given format. -func InvalidDataf(format string, i ...interface{}) error { - return Errorf(InvalidData, format, i...) -} - -// Forbiddenf constructs a Forbidden error with the given format. -func Forbiddenf(format string, i ...interface{}) error { - return Errorf(Forbidden, format, i...) -} - -func BadRequestError(msg string) error { - return Error{ - Reference: InvalidData, - Code: http.StatusBadRequest, - Err: msg, - } -} diff --git a/kit/errors/list.go b/kit/errors/list.go deleted file mode 100644 index c4636135c7b..00000000000 --- a/kit/errors/list.go +++ /dev/null @@ -1,59 +0,0 @@ -package errors - -import ( - "errors" - "strings" -) - -// List represents a list of errors. -type List struct { - errs []error - err error // cached error -} - -// Append adds err to the errors list. -func (l *List) Append(err error) { - l.errs = append(l.errs, err) - l.err = nil -} - -// AppendString adds a new error that formats as the given text. -func (l *List) AppendString(text string) { - l.errs = append(l.errs, errors.New(text)) - l.err = nil -} - -// Clear removes all the previously appended errors from the list. -func (l *List) Clear() { - for i := range l.errs { - l.errs[i] = nil - } - l.errs = l.errs[:0] - l.err = nil -} - -// Err returns an error composed of the list of errors, separated by a new line, or nil if no errors -// were appended. -func (l *List) Err() error { - if len(l.errs) == 0 { - return nil - } - - if l.err != nil { - switch len(l.errs) { - case 1: - l.err = l.errs[0] - - default: - var sb strings.Builder - sb.WriteString(l.errs[0].Error()) - for _, err := range l.errs[1:] { - sb.WriteRune('\n') - sb.WriteString(err.Error()) - } - l.err = errors.New(sb.String()) - } - } - - return l.err -} diff --git a/kit/feature/_codegen/main.go b/kit/feature/_codegen/main.go deleted file mode 100644 index ee27f27dd9c..00000000000 --- a/kit/feature/_codegen/main.go +++ /dev/null @@ -1,271 +0,0 @@ -package main - -import ( - "bytes" - "flag" - "fmt" - "go/format" - "io" - "os" - "strings" - "text/template" - - "github.com/Masterminds/sprig" - "github.com/influxdata/influxdb/v2/kit/feature" - yaml "gopkg.in/yaml.v2" -) - -const tmpl = `// Code generated by the feature package; DO NOT EDIT. - -package feature - -{{ .Qualify | import }} - -{{ range $_, $flag := .Flags }} -var {{ $flag.Key }} = {{ $.Qualify | package }}{{ $flag.Default | maker }}( - {{ $flag.Name | quote }}, - {{ $flag.Key | quote }}, - {{ $flag.Contact | quote }}, - {{ $flag.Default | conditionalQuote }}, - {{ $.Qualify | package }}{{ $flag.Lifetime | lifetime }}, - {{ $flag.Expose }}, -) - -// {{ $flag.Name | replace " " "_" | camelcase }} - {{ $flag.Description }} -func {{ $flag.Name | replace " " "_" | camelcase }}() {{ $.Qualify | package }}{{ $flag.Default | flagType }} { - return {{ $flag.Key }} -} -{{ end }} - -var all = []{{ .Qualify | package }}Flag{ -{{ range $_, $flag := .Flags }} {{ $flag.Key }}, -{{ end }}} - -var byKey = map[string]{{ $.Qualify | package }}Flag{ -{{ range $_, $flag := .Flags }} {{ $flag.Key | quote }}: {{ $flag.Key }}, -{{ end }}} -` - -type flagConfig struct { - Name string - Description string - Key string - Default interface{} - Contact string - Lifetime feature.Lifetime - Expose bool -} - -func (f flagConfig) Valid() error { - var problems []string - if f.Key == "" { - problems = append(problems, "missing key") - } - if f.Contact == "" { - problems = append(problems, "missing contact") - } - if f.Default == nil { - problems = append(problems, "missing default") - } - if f.Description == "" { - problems = append(problems, "missing description") - } - - if len(problems) > 0 { - name := f.Name - if name == "" { - if f.Key != "" { - name = f.Key - } else { - name = "anonymous" - } - } - // e.g. "my flag: missing key; missing default" - return fmt.Errorf("%s: %s\n", name, strings.Join(problems, "; ")) - } - return nil -} - -type flagValidationError struct { - errs []error -} - -func newFlagValidationError(errs []error) *flagValidationError { - if len(errs) == 0 { - return nil - } - return &flagValidationError{errs} -} - -func (e *flagValidationError) Error() string { - var s strings.Builder - s.WriteString("flag validation error: \n") - for _, err := range e.errs { - s.WriteString(err.Error()) - } - return s.String() -} - -func validate(flags []flagConfig) error { - var ( - errs []error - seen = make(map[string]bool, len(flags)) - ) - for _, flag := range flags { - if err := flag.Valid(); err != nil { - errs = append(errs, err) - } else if _, repeated := seen[flag.Key]; repeated { - errs = append(errs, fmt.Errorf("duplicate flag key '%s'\n", flag.Key)) - } - seen[flag.Key] = true - } - if len(errs) != 0 { - return newFlagValidationError(errs) - } - - return nil -} - -var argv = struct { - in, out *string - qualify *bool -}{ - in: flag.String("in", "", "flag configuration path"), - out: flag.String("out", "", "flag generation destination path"), - qualify: flag.Bool("qualify", false, "qualify types with imported package name"), -} - -func main() { - if err := run(); err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } - os.Exit(0) -} - -func run() error { - flag.Parse() - - in, err := os.Open(*argv.in) - if err != nil { - return err - } - defer in.Close() - - configuration, err := io.ReadAll(in) - if err != nil { - return err - } - - var flags []flagConfig - err = yaml.Unmarshal(configuration, &flags) - if err != nil { - return err - } - err = validate(flags) - if err != nil { - return err - } - - t, err := template.New("flags").Funcs(templateFunctions()).Parse(tmpl) - if err != nil { - return err - } - - out, err := os.Create(*argv.out) - if err != nil { - return err - } - defer out.Close() - - var ( - buf = new(bytes.Buffer) - vars = struct { - Qualify bool - Flags []flagConfig - }{ - Qualify: *argv.qualify, - Flags: flags, - } - ) - if err := t.Execute(buf, vars); err != nil { - return err - } - - raw, err := io.ReadAll(buf) - if err != nil { - return err - } - - formatted, err := format.Source(raw) - if err != nil { - return err - } - - _, err = out.Write(formatted) - return err -} - -func templateFunctions() template.FuncMap { - functions := sprig.TxtFuncMap() - - functions["lifetime"] = func(t interface{}) string { - switch t { - case feature.Permanent: - return "Permanent" - default: - return "Temporary" - } - } - - functions["conditionalQuote"] = func(t interface{}) string { - switch t.(type) { - case string: - return fmt.Sprintf("%q", t) - default: - return fmt.Sprintf("%v", t) - } - } - - functions["flagType"] = func(t interface{}) string { - switch t.(type) { - case bool: - return "BoolFlag" - case float64: - return "FloatFlag" - case int: - return "IntFlag" - default: - return "StringFlag" - } - } - - functions["maker"] = func(t interface{}) string { - switch t.(type) { - case bool: - return "MakeBoolFlag" - case float64: - return "MakeFloatFlag" - case int: - return "MakeIntFlag" - default: - return "MakeStringFlag" - } - } - - functions["package"] = func(t interface{}) string { - if t.(bool) { - return "feature." - } - return "" - } - - functions["import"] = func(t interface{}) string { - if t.(bool) { - return "import \"github.com/influxdata/influxdb/v2/kit/feature\"" - } - return "" - } - - return functions -} diff --git a/kit/feature/doc.go b/kit/feature/doc.go deleted file mode 100644 index 3ba2bdad3b6..00000000000 --- a/kit/feature/doc.go +++ /dev/null @@ -1,80 +0,0 @@ -// Package feature provides feature flagging capabilities for InfluxDB servers. -// This document describes this package and how it is used to control -// experimental features in `influxd`. -// -// Flags are configured in `flags.yml` at the top of this repository. -// Running `make flags` generates Go code based on this configuration -// to programmatically test flag values in a given request context. -// Boolean flags are the most common case, but integers, floats and -// strings are supported for more complicated experiments. -// -// The `Flagger` interface is the crux of this package. -// It computes a map of feature flag values for a given request context. -// The default implementation always returns the flag default configured -// in `flags.yml`. The override implementation allows an operator to -// override feature flag defaults at startup. Changing these overrides -// requires a restart. -// -// In `influxd`, a `Flagger` instance is provided to a `Handler` middleware -// configured to intercept all API requests and annotate their request context -// with a map of feature flags. -// -// A flag can opt in to be exposed externally in `flags.yml`. If exposed, -// this flag will be included in the response from the `/api/v2/flags` -// endpoint. This allows the UI and other API clients to control their -// behavior according to the flag in addition to the server itself. -// -// A concrete example to illustrate the above: -// -// I have a feature called "My Feature" that will involve turning on new code -// in both the UI and the server. -// -// First, I add an entry to `flags.yml`. -// -// ```yaml -// - name: My Feature -// description: My feature is awesome -// key: myFeature -// default: false -// expose: true -// contact: My Name -// -// ``` -// -// My flag type is inferred to be boolean by my default of `false` when I run -// `make flags` and the `feature` package now includes `func MyFeature() BoolFlag`. -// -// # I use this to control my backend code with -// -// ```go -// -// if feature.MyFeature.Enabled(ctx) { -// // new code... -// } else { -// -// // new code... -// } -// -// ``` -// -// and the `/api/v2/flags` response provides the same information to the frontend. -// -// ```json -// -// { -// "myFeature": false -// } -// -// ``` -// -// While `false` by default, I can turn on my experimental feature by starting -// my server with a flag override. -// -// ``` -// env INFLUXD_FEATURE_FLAGS="{\"flag1\":\value1\",\"key2\":\"value2\"}" influxd -// ``` -// -// ``` -// influxd --feature-flags flag1=value1,flag2=value2 -// ``` -package feature diff --git a/kit/feature/feature.go b/kit/feature/feature.go deleted file mode 100644 index 94bb2b62746..00000000000 --- a/kit/feature/feature.go +++ /dev/null @@ -1,141 +0,0 @@ -package feature - -import ( - "context" - "strings" - - "github.com/opentracing/opentracing-go" -) - -type contextKey string - -const featureContextKey contextKey = "influx/feature/v1" - -// Flagger returns flag values. -type Flagger interface { - // Flags returns a map of flag keys to flag values. - // - // If an authorization is present on the context, it may be used to compute flag - // values according to the affiliated user ID and its organization and other mappings. - // Otherwise, they should be computed generally or return a default. - // - // One or more flags may be provided to restrict the results. - // Otherwise, all flags should be computed. - Flags(context.Context, ...Flag) (map[string]interface{}, error) -} - -// Annotate the context with a map computed of computed flags. -func Annotate(ctx context.Context, f Flagger, flags ...Flag) (context.Context, error) { - computed, err := f.Flags(ctx, flags...) - if err != nil { - return nil, err - } - - span := opentracing.SpanFromContext(ctx) - if span != nil { - for k, v := range computed { - span.LogKV(k, v) - } - } - - return context.WithValue(ctx, featureContextKey, computed), nil -} - -// FlagsFromContext returns the map of flags attached to the context -// by Annotate, or nil if none is found. -func FlagsFromContext(ctx context.Context) map[string]interface{} { - v, ok := ctx.Value(featureContextKey).(map[string]interface{}) - if !ok { - return nil - } - return v -} - -type ByKeyFn func(string) (Flag, bool) - -// ExposedFlagsFromContext returns the filtered map of exposed flags attached -// to the context by Annotate, or nil if none is found. -func ExposedFlagsFromContext(ctx context.Context, byKey ByKeyFn) map[string]interface{} { - m := FlagsFromContext(ctx) - if m == nil { - return nil - } - - filtered := make(map[string]interface{}) - for k, v := range m { - if flag, found := byKey(k); found && flag.Expose() { - filtered[k] = v - } - } - - return filtered -} - -// Lifetime represents the intended lifetime of the feature flag. -// -// The zero value is Temporary, the most common case, but Permanent -// is included to mark special cases where a flag is not intended -// to be removed, e.g. enabling debug tracing for an organization. -// -// TODO(gavincabbage): This may become a stale date, which can then -// -// be used to trigger a notification to the contact when the flag -// has become stale, to encourage flag cleanup. -type Lifetime int - -const ( - // Temporary indicates a flag is intended to be removed after a feature is no longer in development. - Temporary Lifetime = iota - // Permanent indicates a flag is not intended to be removed. - Permanent -) - -// UnmarshalYAML implements yaml.Unmarshaler and interprets a case-insensitive text -// representation as a lifetime constant. -func (l *Lifetime) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - - switch strings.ToLower(s) { - case "permanent": - *l = Permanent - default: - *l = Temporary - } - - return nil -} - -type defaultFlagger struct{} - -// DefaultFlagger returns a flagger that always returns default values. -func DefaultFlagger() Flagger { - return &defaultFlagger{} -} - -// Flags returns a map of default values. It never returns an error. -func (*defaultFlagger) Flags(_ context.Context, flags ...Flag) (map[string]interface{}, error) { - if len(flags) == 0 { - flags = Flags() - } - - m := make(map[string]interface{}, len(flags)) - for _, flag := range flags { - m[flag.Key()] = flag.Default() - } - - return m, nil -} - -// Flags returns all feature flags. -func Flags() []Flag { - return all -} - -// ByKey returns the Flag corresponding to the given key. -func ByKey(k string) (Flag, bool) { - v, found := byKey[k] - return v, found -} diff --git a/kit/feature/feature_test.go b/kit/feature/feature_test.go deleted file mode 100644 index 541a23a2ea0..00000000000 --- a/kit/feature/feature_test.go +++ /dev/null @@ -1,185 +0,0 @@ -package feature_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2/kit/feature" -) - -func Test_feature(t *testing.T) { - - cases := []struct { - name string - flag feature.Flag - err error - values map[string]interface{} - ctx context.Context - expected interface{} - }{ - { - name: "bool happy path", - flag: newFlag("test", false), - values: map[string]interface{}{ - "test": true, - }, - expected: true, - }, - { - name: "int happy path", - flag: newFlag("test", 0), - values: map[string]interface{}{ - "test": int32(42), - }, - expected: int32(42), - }, - { - name: "float happy path", - flag: newFlag("test", 0.0), - values: map[string]interface{}{ - "test": 42.42, - }, - expected: 42.42, - }, - { - name: "string happy path", - flag: newFlag("test", ""), - values: map[string]interface{}{ - "test": "restaurantattheendoftheuniverse", - }, - expected: "restaurantattheendoftheuniverse", - }, - { - name: "bool missing use default", - flag: newFlag("test", false), - expected: false, - }, - { - name: "bool missing use default true", - flag: newFlag("test", true), - expected: true, - }, - { - name: "int missing use default", - flag: newFlag("test", 65), - expected: int32(65), - }, - { - name: "float missing use default", - flag: newFlag("test", 65.65), - expected: 65.65, - }, - { - name: "string missing use default", - flag: newFlag("test", "mydefault"), - expected: "mydefault", - }, - - { - name: "bool invalid use default", - flag: newFlag("test", true), - values: map[string]interface{}{ - "test": "notabool", - }, - expected: true, - }, - { - name: "int invalid use default", - flag: newFlag("test", 42), - values: map[string]interface{}{ - "test": 99.99, - }, - expected: int32(42), - }, - { - name: "float invalid use default", - flag: newFlag("test", 42.42), - values: map[string]interface{}{ - "test": 99, - }, - expected: 42.42, - }, - { - name: "string invalid use default", - flag: newFlag("test", "restaurantattheendoftheuniverse"), - values: map[string]interface{}{ - "test": true, - }, - expected: "restaurantattheendoftheuniverse", - }, - } - - for _, test := range cases { - t.Run("flagger "+test.name, func(t *testing.T) { - flagger := testFlagsFlagger{ - m: test.values, - err: test.err, - } - - var actual interface{} - switch flag := test.flag.(type) { - case feature.BoolFlag: - actual = flag.Enabled(test.ctx, flagger) - case feature.FloatFlag: - actual = flag.Float(test.ctx, flagger) - case feature.IntFlag: - actual = flag.Int(test.ctx, flagger) - case feature.StringFlag: - actual = flag.String(test.ctx, flagger) - default: - t.Errorf("unknown flag type %T (%#v)", flag, flag) - } - - if actual != test.expected { - t.Errorf("unexpected flag value: got %v, want %v", actual, test.expected) - } - }) - - t.Run("annotate "+test.name, func(t *testing.T) { - flagger := testFlagsFlagger{ - m: test.values, - err: test.err, - } - - ctx, err := feature.Annotate(context.Background(), flagger) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - var actual interface{} - switch flag := test.flag.(type) { - case feature.BoolFlag: - actual = flag.Enabled(ctx) - case feature.FloatFlag: - actual = flag.Float(ctx) - case feature.IntFlag: - actual = flag.Int(ctx) - case feature.StringFlag: - actual = flag.String(ctx) - default: - t.Errorf("unknown flag type %T (%#v)", flag, flag) - } - - if actual != test.expected { - t.Errorf("unexpected flag value: got %v, want %v", actual, test.expected) - } - }) - } -} - -type testFlagsFlagger struct { - m map[string]interface{} - err error -} - -func (f testFlagsFlagger) Flags(ctx context.Context, flags ...feature.Flag) (map[string]interface{}, error) { - if f.err != nil { - return nil, f.err - } - - return f.m, nil -} - -func newFlag(key string, defaultValue interface{}) feature.Flag { - return feature.MakeFlag(key, key, "", defaultValue, feature.Temporary, false) -} diff --git a/kit/feature/flag.go b/kit/feature/flag.go deleted file mode 100644 index 7c21d8d5bb7..00000000000 --- a/kit/feature/flag.go +++ /dev/null @@ -1,217 +0,0 @@ -//go:generate go run ./_codegen/main.go --in ../../flags.yml --out ./list.go - -package feature - -import ( - "context" - "fmt" -) - -// Flag represents a generic feature flag with a key and a default. -type Flag interface { - // Key returns the programmatic backend identifier for the flag. - Key() string - // Default returns the type-agnostic zero value for the flag. - // Type-specific flag implementations may expose a typed default - // (e.g. BoolFlag includes a boolean Default field). - Default() interface{} - // Expose the flag. - Expose() bool -} - -// MakeFlag constructs a Flag. The concrete implementation is inferred from the provided default. -func MakeFlag(name, key, owner string, defaultValue interface{}, lifetime Lifetime, expose bool) Flag { - if v, ok := defaultValue.(int); ok { - defaultValue = int32(v) - } - b := MakeBase(name, key, owner, defaultValue, lifetime, expose) - switch v := defaultValue.(type) { - case bool: - return BoolFlag{b, v} - case float64: - return FloatFlag{b, v} - case int32: - return IntFlag{b, v} - case string: - return StringFlag{b, v} - default: - return StringFlag{b, fmt.Sprintf("%v", v)} - } -} - -// flag base type. -type Base struct { - // name of the flag. - name string - // key is the programmatic backend identifier for the flag. - key string - // defaultValue for the flag. - defaultValue interface{} - // owner is an individual or team responsible for the flag. - owner string - // lifetime of the feature flag. - lifetime Lifetime - // expose the flag. - expose bool -} - -var _ Flag = Base{} - -// MakeBase constructs a flag flag. -func MakeBase(name, key, owner string, defaultValue interface{}, lifetime Lifetime, expose bool) Base { - return Base{ - name: name, - key: key, - owner: owner, - defaultValue: defaultValue, - lifetime: lifetime, - expose: expose, - } -} - -// Key returns the programmatic backend identifier for the flag. -func (f Base) Key() string { - return f.key -} - -// Default returns the type-agnostic zero value for the flag. -func (f Base) Default() interface{} { - return f.defaultValue -} - -// Expose the flag. -func (f Base) Expose() bool { - return f.expose -} - -func (f Base) value(ctx context.Context, flagger ...Flagger) (interface{}, bool) { - var ( - m map[string]interface{} - ok bool - ) - if len(flagger) < 1 { - m, ok = ctx.Value(featureContextKey).(map[string]interface{}) - } else { - var err error - m, err = flagger[0].Flags(ctx, f) - ok = err == nil - } - if !ok { - return nil, false - } - - v, ok := m[f.Key()] - if !ok { - return nil, false - } - - return v, true -} - -// StringFlag implements Flag for string values. -type StringFlag struct { - Base - defaultString string -} - -var _ Flag = StringFlag{} - -// MakeStringFlag returns a string flag with the given Base and default. -func MakeStringFlag(name, key, owner string, defaultValue string, lifetime Lifetime, expose bool) StringFlag { - b := MakeBase(name, key, owner, defaultValue, lifetime, expose) - return StringFlag{b, defaultValue} -} - -// String value of the flag on the request context. -func (f StringFlag) String(ctx context.Context, flagger ...Flagger) string { - i, ok := f.value(ctx, flagger...) - if !ok { - return f.defaultString - } - s, ok := i.(string) - if !ok { - return f.defaultString - } - return s -} - -// FloatFlag implements Flag for float values. -type FloatFlag struct { - Base - defaultFloat float64 -} - -var _ Flag = FloatFlag{} - -// MakeFloatFlag returns a string flag with the given Base and default. -func MakeFloatFlag(name, key, owner string, defaultValue float64, lifetime Lifetime, expose bool) FloatFlag { - b := MakeBase(name, key, owner, defaultValue, lifetime, expose) - return FloatFlag{b, defaultValue} -} - -// Float value of the flag on the request context. -func (f FloatFlag) Float(ctx context.Context, flagger ...Flagger) float64 { - i, ok := f.value(ctx, flagger...) - if !ok { - return f.defaultFloat - } - v, ok := i.(float64) - if !ok { - return f.defaultFloat - } - return v -} - -// IntFlag implements Flag for integer values. -type IntFlag struct { - Base - defaultInt int32 -} - -var _ Flag = IntFlag{} - -// MakeIntFlag returns a string flag with the given Base and default. -func MakeIntFlag(name, key, owner string, defaultValue int32, lifetime Lifetime, expose bool) IntFlag { - b := MakeBase(name, key, owner, defaultValue, lifetime, expose) - return IntFlag{b, defaultValue} -} - -// Int value of the flag on the request context. -func (f IntFlag) Int(ctx context.Context, flagger ...Flagger) int32 { - i, ok := f.value(ctx, flagger...) - if !ok { - return f.defaultInt - } - v, ok := i.(int32) - if !ok { - return f.defaultInt - } - return v -} - -// BoolFlag implements Flag for boolean values. -type BoolFlag struct { - Base - defaultBool bool -} - -var _ Flag = BoolFlag{} - -// MakeBoolFlag returns a string flag with the given Base and default. -func MakeBoolFlag(name, key, owner string, defaultValue bool, lifetime Lifetime, expose bool) BoolFlag { - b := MakeBase(name, key, owner, defaultValue, lifetime, expose) - return BoolFlag{b, defaultValue} -} - -// Enabled indicates whether flag is true or false on the request context. -func (f BoolFlag) Enabled(ctx context.Context, flagger ...Flagger) bool { - i, ok := f.value(ctx, flagger...) - if !ok { - return f.defaultBool - } - v, ok := i.(bool) - if !ok { - return f.defaultBool - } - return v -} diff --git a/kit/feature/flux.go b/kit/feature/flux.go deleted file mode 100644 index 90498296e59..00000000000 --- a/kit/feature/flux.go +++ /dev/null @@ -1,45 +0,0 @@ -package feature - -import ( - "fmt" - - fluxfeature "github.com/influxdata/flux/dependencies/feature" -) - -type fluxFlag struct { - flag fluxfeature.Flag -} - -func (f fluxFlag) Key() string { - return f.flag.Key() -} - -func (f fluxFlag) Default() interface{} { - // Flux uses int for int flags and influxdb uses int32. - // Convert to int32 here so influxdb understands our flag. - switch v := f.flag.Default().(type) { - case int: - return int32(v) - default: - return v - } -} - -func (f fluxFlag) Expose() bool { - return false -} - -func (f fluxFlag) AuthenticationOptional() bool { - return true -} - -func init() { - for _, flag := range fluxfeature.Flags() { - if _, ok := byKey[flag.Key()]; ok { - panic(fmt.Errorf("duplicate feature flag defined in flux and idpe: %s", flag.Key())) - } - wrappedFlag := fluxFlag{flag: flag} - all = append(all, wrappedFlag) - byKey[flag.Key()] = wrappedFlag - } -} diff --git a/kit/feature/http_proxy.go b/kit/feature/http_proxy.go deleted file mode 100644 index ba877526644..00000000000 --- a/kit/feature/http_proxy.go +++ /dev/null @@ -1,73 +0,0 @@ -package feature - -import ( - "context" - "net/http" - "net/http/httputil" - "net/url" - - "go.uber.org/zap" -) - -// HTTPProxy is an HTTP proxy that's guided by a feature flag. If the feature flag -// presented to it is enabled, it will perform the proxying behavior. Otherwise -// it will be a no-op. -type HTTPProxy struct { - proxy *httputil.ReverseProxy - logger *zap.Logger - enabler ProxyEnabler -} - -// NewHTTPProxy returns a new Proxy. -func NewHTTPProxy(dest *url.URL, logger *zap.Logger, enabler ProxyEnabler) *HTTPProxy { - return &HTTPProxy{ - proxy: newReverseProxy(dest, enabler.Key()), - logger: logger, - enabler: enabler, - } -} - -// Do performs the proxying. It returns whether or not the request was proxied. -func (p *HTTPProxy) Do(w http.ResponseWriter, r *http.Request) bool { - if p.enabler.Enabled(r.Context()) { - p.proxy.ServeHTTP(w, r) - return true - } - return false -} - -const ( - // headerProxyFlag is the HTTP header for enriching the request and response - // with the feature flag key that precipitated the proxying behavior. - headerProxyFlag = "X-Platform-Proxy-Flag" -) - -// newReverseProxy creates a new single-host reverse proxy. -func newReverseProxy(dest *url.URL, enablerKey string) *httputil.ReverseProxy { - proxy := httputil.NewSingleHostReverseProxy(dest) - - defaultDirector := proxy.Director - proxy.Director = func(r *http.Request) { - defaultDirector(r) - - r.Header.Set(headerProxyFlag, enablerKey) - - // Override r.Host to prevent us sending this request back to ourselves. - // A bug in the stdlib causes this value to be preferred over the - // r.URL.Host (which is set in the default Director) if r.Host isn't - // empty (which it isn't). - // https://github.com/golang/go/issues/28168 - r.Host = dest.Host - } - proxy.ModifyResponse = func(r *http.Response) error { - r.Header.Set(headerProxyFlag, enablerKey) - return nil - } - return proxy -} - -// ProxyEnabler is a boolean feature flag. -type ProxyEnabler interface { - Key() string - Enabled(ctx context.Context, fs ...Flagger) bool -} diff --git a/kit/feature/http_proxy_test.go b/kit/feature/http_proxy_test.go deleted file mode 100644 index 0853375d962..00000000000 --- a/kit/feature/http_proxy_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package feature - -import ( - "context" - "fmt" - "io" - "net/http" - "net/http/httptest" - "net/url" - "testing" - - "go.uber.org/zap" - "go.uber.org/zap/zaptest" -) - -const ( - destBody = "hello from destination" - srcBody = "hello from source" - flagKey = "fancy-feature" -) - -func TestHTTPProxy_Proxying(t *testing.T) { - en := enabler{key: flagKey, state: true} - logger := zaptest.NewLogger(t) - resp, err := testHTTPProxy(logger, en) - if err != nil { - t.Error(err) - } - - proxyFlag := resp.Header.Get("X-Platform-Proxy-Flag") - if proxyFlag != flagKey { - t.Error("X-Platform-Proxy-Flag header not populated") - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - t.Error(err) - } - - bodyStr := string(body) - if bodyStr != destBody { - t.Errorf("expected body of destination handler, but got: %q", bodyStr) - } -} - -func TestHTTPProxy_DefaultBehavior(t *testing.T) { - en := enabler{key: flagKey, state: false} - logger := zaptest.NewLogger(t) - resp, err := testHTTPProxy(logger, en) - if err != nil { - t.Error(err) - } - - proxyFlag := resp.Header.Get("X-Platform-Proxy-Flag") - if proxyFlag != "" { - t.Error("X-Platform-Proxy-Flag header populated") - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - t.Error(err) - } - - bodyStr := string(body) - if bodyStr != srcBody { - t.Errorf("expected body of source handler, but got: %q", bodyStr) - } -} - -func TestHTTPProxy_RequestHeader(t *testing.T) { - h := func(w http.ResponseWriter, r *http.Request) { - proxyFlag := r.Header.Get("X-Platform-Proxy-Flag") - if proxyFlag != flagKey { - t.Error("expected X-Proxy-Flag to contain feature flag key") - } - } - - s := httptest.NewServer(http.HandlerFunc(h)) - defer s.Close() - - sURL, err := url.Parse(s.URL) - if err != nil { - t.Error(err) - } - - logger := zaptest.NewLogger(t) - en := enabler{key: flagKey, state: true} - proxy := NewHTTPProxy(sURL, logger, en) - - w := httptest.NewRecorder() - r := httptest.NewRequest(http.MethodGet, "http://example.com/foo", nil) - srcHandler(proxy)(w, r) -} - -func testHTTPProxy(logger *zap.Logger, enabler ProxyEnabler) (*http.Response, error) { - s := httptest.NewServer(http.HandlerFunc(destHandler)) - defer s.Close() - - sURL, err := url.Parse(s.URL) - if err != nil { - return nil, err - } - - proxy := NewHTTPProxy(sURL, logger, enabler) - - w := httptest.NewRecorder() - r := httptest.NewRequest(http.MethodGet, "http://example.com/foo", nil) - srcHandler(proxy)(w, r) - - return w.Result(), nil -} - -func destHandler(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, destBody) -} - -func srcHandler(proxy *HTTPProxy) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if proxy.Do(w, r) { - return - } - fmt.Fprint(w, srcBody) - } -} - -type enabler struct { - key string - state bool -} - -func (e enabler) Key() string { - return e.key -} - -func (e enabler) Enabled(context.Context, ...Flagger) bool { - return e.state -} diff --git a/kit/feature/list.go b/kit/feature/list.go deleted file mode 100644 index 19f67215639..00000000000 --- a/kit/feature/list.go +++ /dev/null @@ -1,201 +0,0 @@ -// Code generated by the feature package; DO NOT EDIT. - -package feature - -var appMetrics = MakeBoolFlag( - "App Metrics", - "appMetrics", - "Bucky, Monitoring Team", - false, - Permanent, - true, -) - -// AppMetrics - Send UI Telementry to Tools cluster - should always be false in OSS -func AppMetrics() BoolFlag { - return appMetrics -} - -var groupWindowAggregateTranspose = MakeBoolFlag( - "Group Window Aggregate Transpose", - "groupWindowAggregateTranspose", - "Query Team", - false, - Temporary, - false, -) - -// GroupWindowAggregateTranspose - Enables the GroupWindowAggregateTransposeRule for all enabled window aggregates -func GroupWindowAggregateTranspose() BoolFlag { - return groupWindowAggregateTranspose -} - -var newLabels = MakeBoolFlag( - "New Label Package", - "newLabels", - "Alirie Gray", - false, - Temporary, - false, -) - -// NewLabelPackage - Enables the refactored labels api -func NewLabelPackage() BoolFlag { - return newLabels -} - -var memoryOptimizedFill = MakeBoolFlag( - "Memory Optimized Fill", - "memoryOptimizedFill", - "Query Team", - false, - Temporary, - false, -) - -// MemoryOptimizedFill - Enable the memory optimized fill() -func MemoryOptimizedFill() BoolFlag { - return memoryOptimizedFill -} - -var memoryOptimizedSchemaMutation = MakeBoolFlag( - "Memory Optimized Schema Mutation", - "memoryOptimizedSchemaMutation", - "Query Team", - false, - Temporary, - false, -) - -// MemoryOptimizedSchemaMutation - Enable the memory optimized schema mutation functions -func MemoryOptimizedSchemaMutation() BoolFlag { - return memoryOptimizedSchemaMutation -} - -var queryTracing = MakeBoolFlag( - "Query Tracing", - "queryTracing", - "Query Team", - false, - Permanent, - false, -) - -// QueryTracing - Turn on query tracing for queries that are sampled -func QueryTracing() BoolFlag { - return queryTracing -} - -var injectLatestSuccessTime = MakeBoolFlag( - "Inject Latest Success Time", - "injectLatestSuccessTime", - "Compute Team", - false, - Temporary, - false, -) - -// InjectLatestSuccessTime - Inject the latest successful task run timestamp into a Task query extern when executing. -func InjectLatestSuccessTime() BoolFlag { - return injectLatestSuccessTime -} - -var enforceOrgDashboardLimits = MakeBoolFlag( - "Enforce Organization Dashboard Limits", - "enforceOrgDashboardLimits", - "Compute Team", - false, - Temporary, - false, -) - -// EnforceOrganizationDashboardLimits - Enforces the default limit params for the dashboards api when orgs are set -func EnforceOrganizationDashboardLimits() BoolFlag { - return enforceOrgDashboardLimits -} - -var timeFilterFlags = MakeBoolFlag( - "Time Filter Flags", - "timeFilterFlags", - "Compute Team", - false, - Temporary, - true, -) - -// TimeFilterFlags - Filter task run list based on before and after flags -func TimeFilterFlags() BoolFlag { - return timeFilterFlags -} - -var cursorAtEOF = MakeBoolFlag( - "Default Monaco Selection to EOF", - "cursorAtEOF", - "Monitoring Team", - false, - Temporary, - true, -) - -// DefaultMonacoSelectionToEof - Positions the cursor at the end of the line(s) when using the monaco editor -func DefaultMonacoSelectionToEof() BoolFlag { - return cursorAtEOF -} - -var refreshSingleCell = MakeBoolFlag( - "Refresh Single Cell", - "refreshSingleCell", - "Monitoring Team", - true, - Temporary, - true, -) - -// RefreshSingleCell - Refresh a single cell on the dashboard rather than the entire dashboard -func RefreshSingleCell() BoolFlag { - return refreshSingleCell -} - -var newAutoRefresh = MakeBoolFlag( - "New Dashboard Autorefresh", - "newAutoRefresh", - "Monitoring Team", - true, - Temporary, - true, -) - -// NewDashboardAutorefresh - Enables the new dashboard autorefresh controls in the UI -func NewDashboardAutorefresh() BoolFlag { - return newAutoRefresh -} - -var all = []Flag{ - appMetrics, - groupWindowAggregateTranspose, - newLabels, - memoryOptimizedFill, - memoryOptimizedSchemaMutation, - queryTracing, - injectLatestSuccessTime, - enforceOrgDashboardLimits, - timeFilterFlags, - cursorAtEOF, - refreshSingleCell, - newAutoRefresh, -} - -var byKey = map[string]Flag{ - "appMetrics": appMetrics, - "groupWindowAggregateTranspose": groupWindowAggregateTranspose, - "newLabels": newLabels, - "memoryOptimizedFill": memoryOptimizedFill, - "memoryOptimizedSchemaMutation": memoryOptimizedSchemaMutation, - "queryTracing": queryTracing, - "injectLatestSuccessTime": injectLatestSuccessTime, - "enforceOrgDashboardLimits": enforceOrgDashboardLimits, - "timeFilterFlags": timeFilterFlags, - "cursorAtEOF": cursorAtEOF, - "refreshSingleCell": refreshSingleCell, - "newAutoRefresh": newAutoRefresh, -} diff --git a/kit/feature/middleware.go b/kit/feature/middleware.go deleted file mode 100644 index 20d374c7460..00000000000 --- a/kit/feature/middleware.go +++ /dev/null @@ -1,70 +0,0 @@ -package feature - -import ( - "context" - "encoding/json" - "net/http" - - "go.uber.org/zap" -) - -// Handler is a middleware that annotates the context with a map of computed feature flags. -// To accurately compute identity-scoped flags, this middleware should be executed after any -// authorization middleware has annotated the request context with an authorizer. -type Handler struct { - log *zap.Logger - next http.Handler - flagger Flagger - flags []Flag -} - -// NewHandler returns a configured feature flag middleware that will annotate request context -// with a computed map of the given flags using the provided Flagger. -func NewHandler(log *zap.Logger, flagger Flagger, flags []Flag, next http.Handler) http.Handler { - return &Handler{ - log: log, - next: next, - flagger: flagger, - flags: flags, - } -} - -// ServeHTTP annotates the request context with a map of computed feature flags before -// continuing to serve the request. -func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - ctx, err := Annotate(r.Context(), h.flagger, h.flags...) - if err != nil { - h.log.Warn("Unable to annotate context with feature flags", zap.Error(err)) - } else { - r = r.WithContext(ctx) - } - - if h.next != nil { - h.next.ServeHTTP(w, r) - } -} - -// HTTPErrorHandler is an influxdb.HTTPErrorHandler. It's defined here instead -// of referencing the other interface type, because we want to try our best to -// avoid cyclical dependencies when feature package is used throughout the -// codebase. -type HTTPErrorHandler interface { - HandleHTTPError(ctx context.Context, err error, w http.ResponseWriter) -} - -// NewFlagsHandler returns a handler that returns the map of computed feature flags on the request context. -func NewFlagsHandler(errorHandler HTTPErrorHandler, byKey ByKeyFn) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.WriteHeader(http.StatusOK) - - var ( - ctx = r.Context() - flags = ExposedFlagsFromContext(ctx, byKey) - ) - if err := json.NewEncoder(w).Encode(flags); err != nil { - errorHandler.HandleHTTPError(ctx, err, w) - } - } - return http.HandlerFunc(fn) -} diff --git a/kit/feature/middleware_test.go b/kit/feature/middleware_test.go deleted file mode 100644 index 73e3bd05ba4..00000000000 --- a/kit/feature/middleware_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package feature_test - -import ( - "bytes" - "context" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/influxdb/v2/kit/feature" - "go.uber.org/zap/zaptest" -) - -func Test_Handler(t *testing.T) { - var ( - w = &httptest.ResponseRecorder{} - r = httptest.NewRequest(http.MethodGet, "http://nowhere.test", new(bytes.Buffer)). - WithContext(context.Background()) - - original = r.Context() - ) - - handler := &checkHandler{t: t, f: func(t *testing.T, r *http.Request) { - if r.Context() == original { - t.Error("expected annotated context") - } - }} - - subject := feature.NewHandler(zaptest.NewLogger(t), feature.DefaultFlagger(), feature.Flags(), handler) - - subject.ServeHTTP(w, r) - - if !handler.called { - t.Error("expected handler to be called") - } -} - -type checkHandler struct { - t *testing.T - f func(t *testing.T, r *http.Request) - called bool -} - -func (h *checkHandler) ServeHTTP(_ http.ResponseWriter, r *http.Request) { - h.called = true - h.f(h.t, r) -} diff --git a/kit/feature/override/override.go b/kit/feature/override/override.go deleted file mode 100644 index abc782743a4..00000000000 --- a/kit/feature/override/override.go +++ /dev/null @@ -1,83 +0,0 @@ -package override - -import ( - "context" - "fmt" - "strconv" - "strings" - - "github.com/influxdata/influxdb/v2/kit/feature" -) - -// Flagger can override default flag values. -type Flagger struct { - overrides map[string]string - byKey feature.ByKeyFn -} - -// Make a Flagger that returns defaults with any overrides parsed from the string. -func Make(overrides map[string]string, byKey feature.ByKeyFn) (Flagger, error) { - if byKey == nil { - byKey = feature.ByKey - } - - // Check all provided override keys correspond to an existing Flag. - var missing []string - for k := range overrides { - if _, found := byKey(k); !found { - missing = append(missing, k) - } - } - if len(missing) > 0 { - return Flagger{}, fmt.Errorf("configured overrides for non-existent flags: %s", strings.Join(missing, ",")) - } - - return Flagger{ - overrides: overrides, - byKey: byKey, - }, nil -} - -// Flags returns a map of default values with overrides applied. It never returns an error. -func (f Flagger) Flags(_ context.Context, flags ...feature.Flag) (map[string]interface{}, error) { - if len(flags) == 0 { - flags = feature.Flags() - } - - m := make(map[string]interface{}, len(flags)) - for _, flag := range flags { - if s, overridden := f.overrides[flag.Key()]; overridden { - iface, err := f.coerce(s, flag) - if err != nil { - return nil, err - } - m[flag.Key()] = iface - } else { - m[flag.Key()] = flag.Default() - } - } - - return m, nil -} - -func (f Flagger) coerce(s string, flag feature.Flag) (iface interface{}, err error) { - if base, ok := flag.(feature.Base); ok { - flag, _ = f.byKey(base.Key()) - } - - switch flag.Default().(type) { - case bool: - iface, err = strconv.ParseBool(s) - case int32: - iface, err = strconv.Atoi(s) - case float64: - iface, err = strconv.ParseFloat(s, 64) - default: - iface = s - } - - if err != nil { - return nil, fmt.Errorf("coercing string %q based on flag type %T: %v", s, flag, err) - } - return -} diff --git a/kit/feature/override/override_test.go b/kit/feature/override/override_test.go deleted file mode 100644 index 031ed98200a..00000000000 --- a/kit/feature/override/override_test.go +++ /dev/null @@ -1,183 +0,0 @@ -package override - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2/kit/feature" -) - -func TestFlagger(t *testing.T) { - - cases := []struct { - name string - env map[string]string - defaults []feature.Flag - expected map[string]interface{} - expectMakeErr bool - expectFlagsErr bool - byKey feature.ByKeyFn - }{ - { - name: "enabled happy path filtering", - env: map[string]string{ - "flag1": "new1", - "flag3": "new3", - }, - defaults: []feature.Flag{ - newFlag("flag0", "original0"), - newFlag("flag1", "original1"), - newFlag("flag2", "original2"), - newFlag("flag3", "original3"), - newFlag("flag4", "original4"), - }, - byKey: newByKey(map[string]feature.Flag{ - "flag0": newFlag("flag0", "original0"), - "flag1": newFlag("flag1", "original1"), - "flag2": newFlag("flag2", "original2"), - "flag3": newFlag("flag3", "original3"), - "flag4": newFlag("flag4", "original4"), - }), - expected: map[string]interface{}{ - "flag0": "original0", - "flag1": "new1", - "flag2": "original2", - "flag3": "new3", - "flag4": "original4", - }, - }, - { - name: "enabled happy path types", - env: map[string]string{ - "intflag": "43", - "floatflag": "43.43", - "boolflag": "true", - }, - defaults: []feature.Flag{ - newFlag("intflag", 42), - newFlag("floatflag", 42.42), - newFlag("boolflag", false), - }, - byKey: newByKey(map[string]feature.Flag{ - "intflag": newFlag("intflag", 42), - "floatflag": newFlag("floatflag", 43.43), - "boolflag": newFlag("boolflag", false), - }), - expected: map[string]interface{}{ - "intflag": 43, - "floatflag": 43.43, - "boolflag": true, - }, - }, - { - name: "type coerce error", - env: map[string]string{ - "key": "not_an_int", - }, - defaults: []feature.Flag{ - newFlag("key", 42), - }, - byKey: newByKey(map[string]feature.Flag{ - "key": newFlag("key", 42), - }), - expectFlagsErr: true, - }, - { - name: "typed base flags", - env: map[string]string{ - "flag1": "411", - "flag2": "new2", - "flag3": "true", - }, - defaults: []feature.Flag{ - newBaseFlag("flag0", "original0"), - newBaseFlag("flag1", 41), - newBaseFlag("flag2", "original2"), - newBaseFlag("flag3", false), - newBaseFlag("flag4", "original4"), - }, - byKey: newByKey(map[string]feature.Flag{ - "flag0": newFlag("flag0", "original0"), - "flag1": newFlag("flag1", 41), - "flag2": newFlag("flag2", "original2"), - "flag3": newFlag("flag3", false), - "flag4": newFlag("flag4", "original4"), - }), - expected: map[string]interface{}{ - "flag0": "original0", - "flag1": 411, - "flag2": "new2", - "flag3": true, - "flag4": "original4", - }, - }, - { - name: "override for non-existent flag", - env: map[string]string{ - "dne": "foobar", - }, - defaults: []feature.Flag{ - newBaseFlag("key", "value"), - }, - byKey: newByKey(map[string]feature.Flag{ - "key": newFlag("key", "value"), - }), - expectMakeErr: true, - }, - } - - for _, test := range cases { - t.Run(test.name, func(t *testing.T) { - subject, err := Make(test.env, test.byKey) - if err != nil { - if test.expectMakeErr { - return - } - t.Fatalf("unexpected error making Flagger: %v", err) - } - - computed, err := subject.Flags(context.Background(), test.defaults...) - if err != nil { - if test.expectFlagsErr { - return - } - t.Fatalf("unexpected error calling Flags: %v", err) - } - - if len(computed) != len(test.expected) { - t.Fatalf("incorrect number of flags computed: expected %d, got %d", len(test.expected), len(computed)) - } - - // check for extra or incorrect keys - for k, v := range computed { - if xv, found := test.expected[k]; !found { - t.Errorf("unexpected key %s", k) - } else if v != xv { - t.Errorf("incorrect value for key %s: expected %v [%T], got %v [%T]", k, xv, xv, v, v) - } - } - - // check for missing keys - for k := range test.expected { - if _, found := computed[k]; !found { - t.Errorf("missing expected key %s", k) - } - } - }) - } -} - -func newFlag(key string, defaultValue interface{}) feature.Flag { - return feature.MakeFlag(key, key, "", defaultValue, feature.Temporary, false) -} - -func newBaseFlag(key string, defaultValue interface{}) feature.Base { - return feature.MakeBase(key, key, "", defaultValue, feature.Temporary, false) -} - -func newByKey(m map[string]feature.Flag) feature.ByKeyFn { - return func(k string) (feature.Flag, bool) { - v, found := m[k] - return v, found - } -} diff --git a/kit/io/limited_read_closer.go b/kit/io/limited_read_closer.go deleted file mode 100644 index 71f1ff14f7f..00000000000 --- a/kit/io/limited_read_closer.go +++ /dev/null @@ -1,59 +0,0 @@ -package io - -import ( - "errors" - "io" -) - -var ErrReadLimitExceeded = errors.New("read limit exceeded") - -// LimitedReadCloser wraps an io.ReadCloser in limiting behavior using -// io.LimitedReader. It allows us to obtain the limit error at the time of close -// instead of just when writing. -type LimitedReadCloser struct { - R io.ReadCloser // underlying reader - N int64 // max bytes remaining - err error - closed bool - limitExceeded bool -} - -// NewLimitedReadCloser returns a new LimitedReadCloser. -func NewLimitedReadCloser(r io.ReadCloser, n int64) *LimitedReadCloser { - return &LimitedReadCloser{ - R: r, - N: n, - } -} - -func (l *LimitedReadCloser) Read(p []byte) (n int, err error) { - if l.N <= 0 { - l.limitExceeded = true - return 0, io.EOF - } - if int64(len(p)) > l.N { - p = p[0:l.N] - } - n, err = l.R.Read(p) - l.N -= int64(n) - return -} - -// Close returns an ErrReadLimitExceeded when the wrapped reader exceeds the set -// limit for number of bytes. This is safe to call more than once but not -// concurrently. -func (l *LimitedReadCloser) Close() (err error) { - if l.limitExceeded { - l.err = ErrReadLimitExceeded - } - if l.closed { - // Close has already been called. - return l.err - } - if err := l.R.Close(); err != nil && l.err == nil { - l.err = err - } - // Prevent l.closer.Close from being called again. - l.closed = true - return l.err -} diff --git a/kit/io/limited_read_closer_test.go b/kit/io/limited_read_closer_test.go deleted file mode 100644 index 09965f9921a..00000000000 --- a/kit/io/limited_read_closer_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package io - -import ( - "bytes" - "errors" - "io" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestLimitedReadCloser_Exceeded(t *testing.T) { - b := &closer{Reader: bytes.NewBufferString("howdy")} - rc := NewLimitedReadCloser(b, 3) - - out, err := io.ReadAll(rc) - require.NoError(t, err) - assert.Equal(t, []byte("how"), out) - assert.Equal(t, ErrReadLimitExceeded, rc.Close()) -} - -func TestLimitedReadCloser_Happy(t *testing.T) { - b := &closer{Reader: bytes.NewBufferString("ho")} - rc := NewLimitedReadCloser(b, 2) - - out, err := io.ReadAll(rc) - require.NoError(t, err) - assert.Equal(t, []byte("ho"), out) - assert.Nil(t, err) -} - -func TestLimitedReadCloseWithErrorAndLimitExceeded(t *testing.T) { - b := &closer{ - Reader: bytes.NewBufferString("howdy"), - err: errors.New("some error"), - } - rc := NewLimitedReadCloser(b, 3) - - out, err := io.ReadAll(rc) - require.NoError(t, err) - assert.Equal(t, []byte("how"), out) - // LimitExceeded error trumps the close error. - assert.Equal(t, ErrReadLimitExceeded, rc.Close()) -} - -func TestLimitedReadCloseWithError(t *testing.T) { - closeErr := errors.New("some error") - b := &closer{ - Reader: bytes.NewBufferString("howdy"), - err: closeErr, - } - rc := NewLimitedReadCloser(b, 10) - - out, err := io.ReadAll(rc) - require.NoError(t, err) - assert.Equal(t, []byte("howdy"), out) - assert.Equal(t, closeErr, rc.Close()) -} - -func TestMultipleCloseOnlyClosesOnce(t *testing.T) { - closeErr := errors.New("some error") - b := &closer{ - Reader: bytes.NewBufferString("howdy"), - err: closeErr, - } - rc := NewLimitedReadCloser(b, 10) - - out, err := io.ReadAll(rc) - require.NoError(t, err) - assert.Equal(t, []byte("howdy"), out) - assert.Equal(t, closeErr, rc.Close()) - assert.Equal(t, closeErr, rc.Close()) - assert.Equal(t, 1, b.closeCount) -} - -type closer struct { - io.Reader - err error - closeCount int -} - -func (c *closer) Close() error { - c.closeCount++ - return c.err -} diff --git a/kit/metric/client.go b/kit/metric/client.go deleted file mode 100644 index ca31abec9e6..00000000000 --- a/kit/metric/client.go +++ /dev/null @@ -1,151 +0,0 @@ -package metric - -import ( - "time" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/prometheus/client_golang/prometheus" -) - -// REDClient is a metrics client for collection RED metrics. -type REDClient struct { - metrics []metricCollector -} - -// New creates a new REDClient. -func New(reg prometheus.Registerer, service string, opts ...ClientOptFn) *REDClient { - opt := metricOpts{ - namespace: "service", - service: service, - counterMetrics: map[string]VecOpts{ - "call_total": { - Help: "Number of calls", - LabelNames: []string{"method"}, - CounterFn: func(vec *prometheus.CounterVec, o CollectFnOpts) { - vec.With(prometheus.Labels{"method": o.Method}).Inc() - }, - }, - "error_total": { - Help: "Number of errors encountered", - LabelNames: []string{"method", "code"}, - CounterFn: func(vec *prometheus.CounterVec, o CollectFnOpts) { - if o.Err != nil { - vec.With(prometheus.Labels{ - "method": o.Method, - "code": errors.ErrorCode(o.Err), - }).Inc() - } - }, - }, - }, - histogramMetrics: map[string]VecOpts{ - "duration": { - Help: "Duration of calls", - LabelNames: []string{"method"}, - HistogramFn: func(vec *prometheus.HistogramVec, o CollectFnOpts) { - vec. - With(prometheus.Labels{"method": o.Method}). - Observe(time.Since(o.Start).Seconds()) - }, - }, - }, - } - for _, o := range opts { - o(&opt) - } - - client := new(REDClient) - for metricName, vecOpts := range opt.counterMetrics { - client.metrics = append(client.metrics, &counter{ - fn: vecOpts.CounterFn, - CounterVec: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: opt.namespace, - Subsystem: opt.serviceName(), - Name: metricName, - Help: vecOpts.Help, - }, vecOpts.LabelNames), - }) - } - - for metricName, vecOpts := range opt.histogramMetrics { - client.metrics = append(client.metrics, &histogram{ - fn: vecOpts.HistogramFn, - HistogramVec: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: opt.namespace, - Subsystem: opt.serviceName(), - Name: metricName, - Help: vecOpts.Help, - }, vecOpts.LabelNames), - }) - } - - reg.MustRegister(client.collectors()...) - - return client -} - -type RecordFn func(err error, opts ...func(opts *CollectFnOpts)) error - -// RecordAdditional provides an extension to the base method, err data provided -// to the metrics. -func RecordAdditional(props map[string]interface{}) func(opts *CollectFnOpts) { - return func(opts *CollectFnOpts) { - opts.AdditionalProps = props - } -} - -// Record returns a record fn that is called on any given return err. If an error is encountered -// it will register the err metric. The err is never altered. -func (c *REDClient) Record(method string) RecordFn { - start := time.Now() - return func(err error, opts ...func(opts *CollectFnOpts)) error { - opt := CollectFnOpts{ - Method: method, - Start: start, - Err: err, - } - for _, o := range opts { - o(&opt) - } - - for _, metric := range c.metrics { - metric.collect(opt) - } - - return err - } -} - -func (c *REDClient) collectors() []prometheus.Collector { - var collectors []prometheus.Collector - for _, metric := range c.metrics { - collectors = append(collectors, metric) - } - return collectors -} - -type metricCollector interface { - prometheus.Collector - - collect(o CollectFnOpts) -} - -type counter struct { - *prometheus.CounterVec - - fn CounterFn -} - -func (c *counter) collect(o CollectFnOpts) { - c.fn(c.CounterVec, o) -} - -type histogram struct { - *prometheus.HistogramVec - - fn HistogramFn -} - -func (h *histogram) collect(o CollectFnOpts) { - h.fn(h.HistogramVec, o) -} diff --git a/kit/metric/metrics_options.go b/kit/metric/metrics_options.go deleted file mode 100644 index b9bbd7d883e..00000000000 --- a/kit/metric/metrics_options.go +++ /dev/null @@ -1,84 +0,0 @@ -package metric - -import ( - "fmt" - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -type ( - // CollectFnOpts provides arguments to the collect operation of a metric. - CollectFnOpts struct { - Method string - Start time.Time - Err error - AdditionalProps map[string]interface{} - } - - CounterFn func(vec *prometheus.CounterVec, o CollectFnOpts) - - HistogramFn func(vec *prometheus.HistogramVec, o CollectFnOpts) - - // VecOpts expands on the - VecOpts struct { - Name string - Help string - LabelNames []string - - CounterFn CounterFn - HistogramFn HistogramFn - } -) - -type metricOpts struct { - namespace string - service string - serviceSuffix string - counterMetrics map[string]VecOpts - histogramMetrics map[string]VecOpts -} - -func (o metricOpts) serviceName() string { - if o.serviceSuffix != "" { - return fmt.Sprintf("%s_%s", o.service, o.serviceSuffix) - } - return o.service -} - -// ClientOptFn is an option used by a metric middleware. -type ClientOptFn func(*metricOpts) - -// WithVec sets a new counter vector to be collected. -func WithVec(opts VecOpts) ClientOptFn { - return func(o *metricOpts) { - if opts.CounterFn != nil { - if o.counterMetrics == nil { - o.counterMetrics = make(map[string]VecOpts) - } - o.counterMetrics[opts.Name] = opts - } - } -} - -// WithSuffix returns a metric option that applies a suffix to the service name of the metric. -func WithSuffix(suffix string) ClientOptFn { - return func(opts *metricOpts) { - opts.serviceSuffix = suffix - } -} - -func ApplyMetricOpts(opts ...ClientOptFn) *metricOpts { - o := metricOpts{} - for _, opt := range opts { - opt(&o) - } - return &o -} - -func (o *metricOpts) ApplySuffix(prefix string) string { - if o.serviceSuffix != "" { - return fmt.Sprintf("%s_%s", prefix, o.serviceSuffix) - } - return prefix -} diff --git a/kit/migration/errors.go b/kit/migration/errors.go deleted file mode 100644 index c0d259033e5..00000000000 --- a/kit/migration/errors.go +++ /dev/null @@ -1,14 +0,0 @@ -package migration - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -func ErrInvalidMigration(n string) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf(`DB contains record of unknown migration %q - if you are downgrading from a more recent version of influxdb, please run the "influxd downgrade" command from that version to revert your metadata to be compatible with this version prior to starting influxd.`, n), - } -} diff --git a/kit/platform/errors/errors.go b/kit/platform/errors/errors.go deleted file mode 100644 index 3d3f7e73664..00000000000 --- a/kit/platform/errors/errors.go +++ /dev/null @@ -1,271 +0,0 @@ -package errors - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "strings" -) - -// Some error code constant, ideally we want define common platform codes here -// projects on use platform's error, should have their own central place like this. -// Any time this set of constants changes, you must also update the swagger for Error.properties.code.enum. -const ( - EInternal = "internal error" - ENotImplemented = "not implemented" - ENotFound = "not found" - EConflict = "conflict" // action cannot be performed - EInvalid = "invalid" // validation failed - EUnprocessableEntity = "unprocessable entity" // data type is correct, but out of range - EEmptyValue = "empty value" - EUnavailable = "unavailable" - EForbidden = "forbidden" - ETooManyRequests = "too many requests" - EUnauthorized = "unauthorized" - EMethodNotAllowed = "method not allowed" - ETooLarge = "request too large" -) - -// Error is the error struct of platform. -// -// Errors may have error codes, human-readable messages, -// and a logical stack trace. -// -// The Code targets automated handlers so that recovery can occur. -// Msg is used by the system operator to help diagnose and fix the problem. -// Op and Err chain errors together in a logical stack trace to -// further help operators. -// -// To create a simple error, -// -// &Error{ -// Code:ENotFound, -// } -// -// To show where the error happens, add Op. -// -// &Error{ -// Code: ENotFound, -// Op: "bolt.FindUserByID" -// } -// -// To show an error with a unpredictable value, add the value in Msg. -// -// &Error{ -// Code: EConflict, -// Message: fmt.Sprintf("organization with name %s already exist", aName), -// } -// -// To show an error wrapped with another error. -// -// &Error{ -// Code:EInternal, -// Err: err, -// }. -type Error struct { - Code string - Msg string - Op string - Err error -} - -// NewError returns an instance of an error. -func NewError(options ...func(*Error)) *Error { - err := &Error{} - for _, o := range options { - o(err) - } - - return err -} - -// WithErrorErr sets the err on the error. -func WithErrorErr(err error) func(*Error) { - return func(e *Error) { - e.Err = err - } -} - -// WithErrorCode sets the code on the error. -func WithErrorCode(code string) func(*Error) { - return func(e *Error) { - e.Code = code - } -} - -// WithErrorMsg sets the message on the error. -func WithErrorMsg(msg string) func(*Error) { - return func(e *Error) { - e.Msg = msg - } -} - -// WithErrorOp sets the message on the error. -func WithErrorOp(op string) func(*Error) { - return func(e *Error) { - e.Op = op - } -} - -// Error implements the error interface by writing out the recursive messages. -func (e *Error) Error() string { - if e.Msg != "" && e.Err != nil { - var b strings.Builder - b.WriteString(e.Msg) - b.WriteString(": ") - b.WriteString(e.Err.Error()) - return b.String() - } else if e.Msg != "" { - return e.Msg - } else if e.Err != nil { - return e.Err.Error() - } - return fmt.Sprintf("<%s>", e.Code) -} - -// ErrorCode returns the code of the root error, if available; otherwise returns EINTERNAL. -func ErrorCode(err error) string { - if err == nil { - return "" - } - - e, ok := err.(*Error) - if !ok { - return EInternal - } - - if e == nil { - return "" - } - - if e.Code != "" { - return e.Code - } - - if e.Err != nil { - return ErrorCode(e.Err) - } - - return EInternal -} - -// ErrorOp returns the op of the error, if available; otherwise return empty string. -func ErrorOp(err error) string { - if err == nil { - return "" - } - - e, ok := err.(*Error) - if !ok { - return "" - } - - if e == nil { - return "" - } - - if e.Op != "" { - return e.Op - } - - if e.Err != nil { - return ErrorOp(e.Err) - } - - return "" -} - -// ErrorMessage returns the human-readable message of the error, if available. -// Otherwise returns a generic error message. -func ErrorMessage(err error) string { - if err == nil { - return "" - } - - e, ok := err.(*Error) - if !ok { - return "An internal error has occurred." - } - - if e == nil { - return "" - } - - if e.Msg != "" { - return e.Msg - } - - if e.Err != nil { - return ErrorMessage(e.Err) - } - - return "An internal error has occurred." -} - -// errEncode an JSON encoding helper that is needed to handle the recursive stack of errors. -type errEncode struct { - Code string `json:"code"` // Code is the machine-readable error code. - Msg string `json:"message,omitempty"` // Msg is a human-readable message. - Op string `json:"op,omitempty"` // Op describes the logical code operation during error. - Err interface{} `json:"error,omitempty"` // Err is a stack of additional errors. -} - -// MarshalJSON recursively marshals the stack of Err. -func (e *Error) MarshalJSON() (result []byte, err error) { - ee := errEncode{ - Code: e.Code, - Msg: e.Msg, - Op: e.Op, - } - if e.Err != nil { - if _, ok := e.Err.(*Error); ok { - _, err := e.Err.(*Error).MarshalJSON() - if err != nil { - return result, err - } - ee.Err = e.Err - } else { - ee.Err = e.Err.Error() - } - } - return json.Marshal(ee) -} - -// UnmarshalJSON recursively unmarshals the error stack. -func (e *Error) UnmarshalJSON(b []byte) (err error) { - ee := new(errEncode) - err = json.Unmarshal(b, ee) - e.Code = ee.Code - e.Msg = ee.Msg - e.Op = ee.Op - e.Err = decodeInternalError(ee.Err) - return err -} - -func decodeInternalError(target interface{}) error { - if errStr, ok := target.(string); ok { - return errors.New(errStr) - } - if internalErrMap, ok := target.(map[string]interface{}); ok { - internalErr := new(Error) - if code, ok := internalErrMap["code"].(string); ok { - internalErr.Code = code - } - if msg, ok := internalErrMap["message"].(string); ok { - internalErr.Msg = msg - } - if op, ok := internalErrMap["op"].(string); ok { - internalErr.Op = op - } - internalErr.Err = decodeInternalError(internalErrMap["error"]) - return internalErr - } - return nil -} - -// HTTPErrorHandler is the interface to handle http error. -type HTTPErrorHandler interface { - HandleHTTPError(ctx context.Context, err error, w http.ResponseWriter) -} diff --git a/kit/platform/errors/errors.md b/kit/platform/errors/errors.md deleted file mode 100644 index 73c2bd06c1f..00000000000 --- a/kit/platform/errors/errors.md +++ /dev/null @@ -1,86 +0,0 @@ -# errors.go - -This is inspired from Ben Johnson's blog post [Failure is Your Domain](https://middlemost.com/failure-is-your-domain/) - -## The Error struct - -```go - - type Error struct { - Code string - Msg string - Op string - Err error - } -``` - - * Code is the machine readable code, for reference purpose. All the codes should be a constant string. For example. `const ENotFound = "source not found"`. - - * Msg is the human readable message for end user. For example, `Your credit card is declined.` - - * Op is the logical Operator, should be a constant defined inside the function. For example: "bolt.UserCreate". - - * Err is the embed error. You may embed either a third party error or and platform.Error. - -## Use Case Example - -We implement the following interface - -```go - - type OrganizationService interface { - FindOrganizationByID(ctx context.Context, id ID) (*Organization, error) - } - - func (c *Client)FindOrganizationByID(ctx context.Context, id platform.ID) (*platform.Organization, error) { - var o *platform.Organization - const op = "bolt.FindOrganizationByID" - err := c.db.View(func(tx *bolt.Tx) error { - org, err := c.findOrganizationByID(ctx, tx, id) - if err != nil { - return err - } - o = org - return nil - }) - - if err != nil { - return nil, &platform.Error{ - Code: platform.ENotFound, - Op: op, - Err: err, - } - } - return o, nil - } -``` - -To check the error code - -```go - - if platform.ErrorCode(err) == platform.ENotFound { - ... - } -``` - -To serialize the error - -```go - - b, err := json.Marshal(err) -``` - -To deserialize the error - -```go - - e := new(platform.Error) - err := json.Unmarshal(b, e) -``` - - - - - - diff --git a/kit/platform/errors/errors_test.go b/kit/platform/errors/errors_test.go deleted file mode 100644 index e9f9ae14c47..00000000000 --- a/kit/platform/errors/errors_test.go +++ /dev/null @@ -1,300 +0,0 @@ -package errors_test - -import ( - "encoding/json" - "errors" - "fmt" - "testing" - - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -const EFailedToGetStorageHost = "failed to get the storage host" - -func TestErrorMsg(t *testing.T) { - cases := []struct { - name string - err error - msg string - }{ - { - name: "simple error", - err: &errors2.Error{Code: errors2.ENotFound}, - msg: "", - }, - { - name: "with message", - err: &errors2.Error{ - Code: errors2.ENotFound, - Msg: "bucket not found", - }, - msg: "bucket not found", - }, - { - name: "with a third party error and no message", - err: &errors2.Error{ - Code: EFailedToGetStorageHost, - Err: errors.New("empty value"), - }, - msg: "empty value", - }, - { - name: "with a third party error and a message", - err: &errors2.Error{ - Code: EFailedToGetStorageHost, - Msg: "failed to get storage hosts", - Err: errors.New("empty value"), - }, - msg: "failed to get storage hosts: empty value", - }, - { - name: "with an internal error and no message", - err: &errors2.Error{ - Code: EFailedToGetStorageHost, - Err: &errors2.Error{ - Code: errors2.EEmptyValue, - Msg: "empty value", - }, - }, - msg: "empty value", - }, - { - name: "with an internal error and a message", - err: &errors2.Error{ - Code: EFailedToGetStorageHost, - Msg: "failed to get storage hosts", - Err: &errors2.Error{ - Code: errors2.EEmptyValue, - Msg: "empty value", - }, - }, - msg: "failed to get storage hosts: empty value", - }, - } - for _, c := range cases { - if c.msg != c.err.Error() { - t.Errorf("%s failed, want %s, got %s", c.name, c.msg, c.err.Error()) - } - } -} - -func TestErrorMessage(t *testing.T) { - cases := []struct { - name string - err error - want string - }{ - { - name: "nil error", - }, - { - name: "nil error of type *platform.Error", - err: (*errors2.Error)(nil), - }, - { - name: "simple error", - err: &errors2.Error{Msg: "simple error"}, - want: "simple error", - }, - { - name: "embedded error", - err: &errors2.Error{Err: &errors2.Error{Msg: "embedded error"}}, - want: "embedded error", - }, - { - name: "default error", - err: errors.New("s"), - want: "An internal error has occurred.", - }, - } - for _, c := range cases { - if result := errors2.ErrorMessage(c.err); c.want != result { - t.Errorf("%s failed, want %s, got %s", c.name, c.want, result) - } - } -} - -func TestErrorOp(t *testing.T) { - cases := []struct { - name string - err error - want string - }{ - { - name: "nil error", - }, - { - name: "nil error of type *platform.Error", - err: (*errors2.Error)(nil), - }, - { - name: "simple error", - err: &errors2.Error{Op: "op1"}, - want: "op1", - }, - { - name: "embedded error", - err: &errors2.Error{Op: "op1", Err: &errors2.Error{Code: errors2.EInvalid}}, - want: "op1", - }, - { - name: "embedded error without op in root level", - err: &errors2.Error{Err: &errors2.Error{Code: errors2.EInvalid, Op: "op2"}}, - want: "op2", - }, - { - name: "default error", - err: errors.New("s"), - want: "", - }, - } - for _, c := range cases { - if result := errors2.ErrorOp(c.err); c.want != result { - t.Errorf("%s failed, want %s, got %s", c.name, c.want, result) - } - } -} -func TestErrorCode(t *testing.T) { - cases := []struct { - name string - err error - want string - }{ - { - name: "nil error", - }, - { - name: "nil error of type *platform.Error", - err: (*errors2.Error)(nil), - }, - { - name: "simple error", - err: &errors2.Error{Code: errors2.ENotFound}, - want: errors2.ENotFound, - }, - { - name: "embedded error", - err: &errors2.Error{Code: errors2.ENotFound, Err: &errors2.Error{Code: errors2.EInvalid}}, - want: errors2.ENotFound, - }, - { - name: "embedded error with root level code", - err: &errors2.Error{Err: &errors2.Error{Code: errors2.EInvalid}}, - want: errors2.EInvalid, - }, - { - name: "default error", - err: errors.New("s"), - want: errors2.EInternal, - }, - } - for _, c := range cases { - if result := errors2.ErrorCode(c.err); c.want != result { - t.Errorf("%s failed, want %s, got %s", c.name, c.want, result) - } - } -} - -func TestJSON(t *testing.T) { - cases := []struct { - name string - err *errors2.Error - encoded string - }{ - { - name: "simple error", - err: &errors2.Error{Code: errors2.ENotFound}, - encoded: `{"code":"not found"}`, - }, - { - name: "with op", - err: &errors2.Error{ - Code: errors2.ENotFound, - Op: "bolt.FindAuthorizationByID", - }, - encoded: `{"code":"not found","op":"bolt.FindAuthorizationByID"}`, - }, - { - name: "with op and value", - err: &errors2.Error{ - Code: errors2.ENotFound, - Op: "bolt/FindAuthorizationByID", - Msg: fmt.Sprintf("with ID %d", 323), - }, - encoded: `{"code":"not found","message":"with ID 323","op":"bolt/FindAuthorizationByID"}`, - }, - { - name: "with a third party error", - err: &errors2.Error{ - Code: EFailedToGetStorageHost, - Op: "cmd/fluxd.injectDeps", - Err: errors.New("empty value"), - }, - encoded: `{"code":"failed to get the storage host","op":"cmd/fluxd.injectDeps","error":"empty value"}`, - }, - { - name: "with a internal error", - err: &errors2.Error{ - Code: EFailedToGetStorageHost, - Op: "cmd/fluxd.injectDeps", - Err: &errors2.Error{Code: errors2.EEmptyValue, Op: "cmd/fluxd.getStrList"}, - }, - encoded: `{"code":"failed to get the storage host","op":"cmd/fluxd.injectDeps","error":{"code":"empty value","op":"cmd/fluxd.getStrList"}}`, - }, - { - name: "with a deep internal error", - err: &errors2.Error{ - Code: EFailedToGetStorageHost, - Op: "cmd/fluxd.injectDeps", - Err: &errors2.Error{ - Code: errors2.EInvalid, - Op: "cmd/fluxd.getStrList", - Err: &errors2.Error{ - Code: errors2.EEmptyValue, - Err: errors.New("an err"), - }, - }, - }, - encoded: `{"code":"failed to get the storage host","op":"cmd/fluxd.injectDeps","error":{"code":"invalid","op":"cmd/fluxd.getStrList","error":{"code":"empty value","error":"an err"}}}`, - }, - } - for _, c := range cases { - result, err := json.Marshal(c.err) - // encode testing - if err != nil { - t.Errorf("%s encode failed, want err: %v, should be nil", c.name, err) - } - - if string(result) != c.encoded { - t.Errorf("%s encode failed, want result: %s, got %s", c.name, c.encoded, string(result)) - } - // decode testing - got := new(errors2.Error) - err = json.Unmarshal(result, got) - if err != nil { - t.Errorf("%s decode failed, want err: %v, should be nil", c.name, err) - } - decodeEqual(t, c.err, got, "decode: "+c.name) - } -} - -func decodeEqual(t *testing.T, want, result *errors2.Error, caseName string) { - if want.Code != result.Code { - t.Errorf("%s code failed, want %s, got %s", caseName, want.Code, result.Code) - } - if want.Op != result.Op { - t.Errorf("%s op failed, want %s, got %s", caseName, want.Op, result.Op) - } - if want.Msg != result.Msg { - t.Errorf("%s msg failed, want %s, got %s", caseName, want.Msg, result.Msg) - } - if want.Err != nil { - if _, ok := want.Err.(*errors2.Error); ok { - decodeEqual(t, want.Err.(*errors2.Error), result.Err.(*errors2.Error), caseName) - } else { - if want.Err.Error() != result.Err.Error() { - t.Errorf("%s Err failed, want %s, got %s", caseName, want.Err.Error(), result.Err.Error()) - } - } - } -} diff --git a/kit/platform/id.go b/kit/platform/id.go deleted file mode 100644 index 48d5c528a36..00000000000 --- a/kit/platform/id.go +++ /dev/null @@ -1,163 +0,0 @@ -package platform - -import ( - "database/sql/driver" - "encoding/binary" - "encoding/hex" - "strconv" - "unsafe" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// IDLength is the exact length a string (or a byte slice representing it) must have in order to be decoded into a valid ID. -const IDLength = 16 - -var ( - // ErrInvalidID signifies invalid IDs. - ErrInvalidID = &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid ID", - } - - // ErrInvalidIDLength is returned when an ID has the incorrect number of bytes. - ErrInvalidIDLength = &errors.Error{ - Code: errors.EInvalid, - Msg: "id must have a length of 16 bytes", - } -) - -// ErrCorruptID means the ID stored in the Store is corrupt. -func ErrCorruptID(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "corrupt ID provided", - Err: err, - } -} - -// ID is a unique identifier. -// -// Its zero value is not a valid ID. -type ID uint64 - -// IDGenerator represents a generator for IDs. -type IDGenerator interface { - // ID creates unique byte slice ID. - ID() ID -} - -// IDFromString creates an ID from a given string. -// -// It errors if the input string does not match a valid ID. -func IDFromString(str string) (*ID, error) { - var id ID - err := id.DecodeFromString(str) - if err != nil { - return nil, err - } - return &id, nil -} - -// InvalidID returns a zero ID. -func InvalidID() ID { - return 0 -} - -// Decode parses b as a hex-encoded byte-slice-string. -// -// It errors if the input byte slice does not have the correct length -// or if it contains all zeros. -func (i *ID) Decode(b []byte) error { - if len(b) != IDLength { - return ErrInvalidIDLength - } - - res, err := strconv.ParseUint(unsafeBytesToString(b), 16, 64) - if err != nil { - return ErrInvalidID - } - - if *i = ID(res); !i.Valid() { - return ErrInvalidID - } - return nil -} - -func unsafeBytesToString(in []byte) string { - return *(*string)(unsafe.Pointer(&in)) -} - -// DecodeFromString parses s as a hex-encoded string. -func (i *ID) DecodeFromString(s string) error { - return i.Decode([]byte(s)) -} - -// Encode converts ID to a hex-encoded byte-slice-string. -// -// It errors if the receiving ID holds its zero value. -func (i ID) Encode() ([]byte, error) { - if !i.Valid() { - return nil, ErrInvalidID - } - - b := make([]byte, hex.DecodedLen(IDLength)) - binary.BigEndian.PutUint64(b, uint64(i)) - - dst := make([]byte, hex.EncodedLen(len(b))) - hex.Encode(dst, b) - return dst, nil -} - -// Valid checks whether the receiving ID is a valid one or not. -func (i ID) Valid() bool { - return i != 0 -} - -// String returns the ID as a hex encoded string. -// -// Returns an empty string in the case the ID is invalid. -func (i ID) String() string { - enc, _ := i.Encode() - return string(enc) -} - -// GoString formats the ID the same as the String method. -// Without this, when using the %#v verb, an ID would be printed as a uint64, -// so you would see e.g. 0x2def021097c6000 instead of 02def021097c6000 -// (note the leading 0x, which means the former doesn't show up in searches for the latter). -func (i ID) GoString() string { - return `"` + i.String() + `"` -} - -// MarshalText encodes i as text. -// Providing this method is a fallback for json.Marshal, -// with the added benefit that IDs encoded as map keys will be the expected string encoding, -// rather than the effective fmt.Sprintf("%d", i) that json.Marshal uses by default for integer types. -func (i ID) MarshalText() ([]byte, error) { - return i.Encode() -} - -// UnmarshalText decodes i from a byte slice. -// Providing this method is also a fallback for json.Unmarshal, -// also relevant when IDs are used as map keys. -func (i *ID) UnmarshalText(b []byte) error { - return i.Decode(b) -} - -// Value implements the database/sql Valuer interface for adding IDs to a sql database. -func (i ID) Value() (driver.Value, error) { - return i.String(), nil -} - -// Scan implements the database/sql Scanner interface for retrieving IDs from a sql database. -func (i *ID) Scan(value interface{}) error { - switch v := value.(type) { - case int64: - return i.DecodeFromString(strconv.FormatInt(v, 10)) - case string: - return i.DecodeFromString(v) - default: - return ErrInvalidID - } -} diff --git a/kit/platform/id_test.go b/kit/platform/id_test.go deleted file mode 100644 index 9b5fc44b4d6..00000000000 --- a/kit/platform/id_test.go +++ /dev/null @@ -1,252 +0,0 @@ -package platform_test - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2/kit/platform" - platformtesting "github.com/influxdata/influxdb/v2/testing" -) - -func TestIDFromString(t *testing.T) { - tests := []struct { - name string - id string - want platform.ID - wantErr bool - err string - }{ - { - name: "Should be able to decode an all zeros ID", - id: "0000000000000000", - wantErr: true, - err: platform.ErrInvalidID.Error(), - }, - { - name: "Should be able to decode an all f ID", - id: "ffffffffffffffff", - want: platformtesting.MustIDBase16("ffffffffffffffff"), - }, - { - name: "Should be able to decode an ID", - id: "020f755c3c082000", - want: platformtesting.MustIDBase16("020f755c3c082000"), - }, - { - name: "Should not be able to decode a non hex ID", - id: "gggggggggggggggg", - wantErr: true, - err: platform.ErrInvalidID.Error(), - }, - { - name: "Should not be able to decode inputs with length less than 16 bytes", - id: "abc", - wantErr: true, - err: platform.ErrInvalidIDLength.Error(), - }, - { - name: "Should not be able to decode inputs with length greater than 16 bytes", - id: "abcdabcdabcdabcd0", - wantErr: true, - err: platform.ErrInvalidIDLength.Error(), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := platform.IDFromString(tt.id) - - // Check negative test cases - if (err != nil) && tt.wantErr { - if tt.err != err.Error() { - t.Errorf("IDFromString() errors out \"%s\", want \"%s\"", err, tt.err) - } - return - } - - // Check positive test cases - if !reflect.DeepEqual(*got, tt.want) && !tt.wantErr { - t.Errorf("IDFromString() outputs %v, want %v", got, tt.want) - } - }) - } -} - -func TestDecodeFromString(t *testing.T) { - var id platform.ID - err := id.DecodeFromString("020f755c3c082000") - if err != nil { - t.Errorf(err.Error()) - } - want := []byte{48, 50, 48, 102, 55, 53, 53, 99, 51, 99, 48, 56, 50, 48, 48, 48} - got, _ := id.Encode() - if !bytes.Equal(want, got) { - t.Errorf("got %s not equal to wanted %s", string(got), string(want)) - } - if id.String() != "020f755c3c082000" { - t.Errorf("expecting string representation to contain the right value") - } - if !id.Valid() { - t.Errorf("expecting ID to be a valid one") - } -} - -func TestEncode(t *testing.T) { - var id platform.ID - if _, err := id.Encode(); err == nil { - t.Errorf("encoding an invalid ID should not be possible") - } - - id.DecodeFromString("5ca1ab1eba5eba11") - want := []byte{53, 99, 97, 49, 97, 98, 49, 101, 98, 97, 53, 101, 98, 97, 49, 49} - got, _ := id.Encode() - if !bytes.Equal(want, got) { - t.Errorf("encoding error") - } - if id.String() != "5ca1ab1eba5eba11" { - t.Errorf("expecting string representation to contain the right value") - } - if !id.Valid() { - t.Errorf("expecting ID to be a valid one") - } -} - -func TestDecodeFromAllZeros(t *testing.T) { - var id platform.ID - err := id.Decode(make([]byte, platform.IDLength)) - if err == nil { - t.Errorf("expecting all zeros ID to not be a valid ID") - } -} - -func TestDecodeFromShorterString(t *testing.T) { - var id platform.ID - err := id.DecodeFromString("020f75") - if err == nil { - t.Errorf("expecting shorter inputs to error") - } - if id.String() != "" { - t.Errorf("expecting invalid ID to be serialized into empty string") - } -} - -func TestDecodeFromLongerString(t *testing.T) { - var id platform.ID - err := id.DecodeFromString("020f755c3c082000aaa") - if err == nil { - t.Errorf("expecting shorter inputs to error") - } - if id.String() != "" { - t.Errorf("expecting invalid ID to be serialized into empty string") - } -} - -func TestDecodeFromEmptyString(t *testing.T) { - var id platform.ID - err := id.DecodeFromString("") - if err == nil { - t.Errorf("expecting empty inputs to error") - } - if id.String() != "" { - t.Errorf("expecting invalid ID to be serialized into empty string") - } -} - -func TestMarshalling(t *testing.T) { - var id0 platform.ID - _, err := json.Marshal(id0) - if err == nil { - t.Errorf("expecting empty ID to not be a valid one") - } - - init := "ca55e77eca55e77e" - id1, err := platform.IDFromString(init) - if err != nil { - t.Errorf(err.Error()) - } - - serialized, err := json.Marshal(id1) - if err != nil { - t.Errorf(err.Error()) - } - - var id2 platform.ID - json.Unmarshal(serialized, &id2) - - bytes1, _ := id1.Encode() - bytes2, _ := id2.Encode() - - if !bytes.Equal(bytes1, bytes2) { - t.Errorf("error marshalling/unmarshalling ID") - } - - // When used as a map key, IDs must use their string encoding. - // If you only implement json.Marshaller, they will be encoded with Go's default integer encoding. - b, err := json.Marshal(map[platform.ID]int{0x1234: 5678}) - if err != nil { - t.Error(err) - } - const exp = `{"0000000000001234":5678}` - if string(b) != exp { - t.Errorf("expected map to json.Marshal as %s; got %s", exp, string(b)) - } - - var idMap map[platform.ID]int - if err := json.Unmarshal(b, &idMap); err != nil { - t.Error(err) - } - if len(idMap) != 1 { - t.Errorf("expected length 1, got %d", len(idMap)) - } - if idMap[0x1234] != 5678 { - t.Errorf("unmarshalled incorrectly; exp 0x1234:5678, got %v", idMap) - } -} - -func TestValid(t *testing.T) { - var id platform.ID - if id.Valid() { - t.Errorf("expecting initial ID to be invalid") - } - - if platform.InvalidID() != 0 { - t.Errorf("expecting invalid ID to return a zero ID, thus invalid") - } -} - -func TestID_GoString(t *testing.T) { - type idGoStringTester struct { - ID platform.ID - } - var x idGoStringTester - - const idString = "02def021097c6000" - if err := x.ID.DecodeFromString(idString); err != nil { - t.Fatal(err) - } - - sharpV := fmt.Sprintf("%#v", x) - want := `platform_test.idGoStringTester{ID:"` + idString + `"}` - if sharpV != want { - t.Fatalf("bad GoString: got %q, want %q", sharpV, want) - } -} - -func BenchmarkIDEncode(b *testing.B) { - var id platform.ID - id.DecodeFromString("5ca1ab1eba5eba11") - b.ResetTimer() - for i := 0; i < b.N; i++ { - b, _ := id.Encode() - _ = b - } -} - -func BenchmarkIDDecode(b *testing.B) { - for i := 0; i < b.N; i++ { - var id platform.ID - id.DecodeFromString("5ca1ab1eba5eba11") - } -} diff --git a/kit/prom/example_test.go b/kit/prom/example_test.go deleted file mode 100644 index 44657a4ef4a..00000000000 --- a/kit/prom/example_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package prom_test - -import ( - "fmt" - "io" - "math/rand" - "net/http" - "time" - - "github.com/influxdata/influxdb/v2/kit/prom" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -// RandomHandler implements an HTTP endpoint that prints a random float, -// and it tracks prometheus metrics about the numbers it returns. -type RandomHandler struct { - // Cumulative sum of values served. - valueCounter prometheus.Counter - - // Total times page served. - serveCounter prometheus.Counter -} - -var ( - _ http.Handler = (*RandomHandler)(nil) - _ prom.PrometheusCollector = (*RandomHandler)(nil) -) - -func NewRandomHandler() *RandomHandler { - return &RandomHandler{ - valueCounter: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "value_counter", - Help: "Cumulative sum of values served.", - }), - serveCounter: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "serve_counter", - Help: "Counter of times page has been served.", - }), - } -} - -// ServeHTTP serves a random float value and updates rh's internal metrics. -func (rh *RandomHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // Increment serveCounter every time we serve a page. - rh.serveCounter.Inc() - - n := rand.Float64() - // Track the cumulative values served. - rh.valueCounter.Add(n) - - fmt.Fprintf(w, "%v", n) -} - -// PrometheusCollectors implements prom.PrometheusCollector. -func (rh *RandomHandler) PrometheusCollectors() []prometheus.Collector { - return []prometheus.Collector{rh.valueCounter, rh.serveCounter} -} - -func Example() { - // A collection of endpoints and http.Handlers. - handlers := map[string]http.Handler{ - "/random": NewRandomHandler(), - "/time": http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, time.Now().String()) - }), - } - - // Use a local registry, not the global registry in the prometheus package. - reg := prom.NewRegistry(zap.NewNop()) - - // Build the mux out of handlers from above. - mux := http.NewServeMux() - for path, h := range handlers { - mux.Handle(path, h) - - // Only register those handlers which implement prom.PrometheusCollector. - if pc, ok := h.(prom.PrometheusCollector); ok { - reg.MustRegister(pc.PrometheusCollectors()...) - } - } - - // Add metrics to registry. - mux.Handle("/metrics", reg.HTTPHandler()) - - http.ListenAndServe("localhost:8080", mux) -} diff --git a/kit/prom/promtest/promtest.go b/kit/prom/promtest/promtest.go deleted file mode 100644 index 7ed467a15ea..00000000000 --- a/kit/prom/promtest/promtest.go +++ /dev/null @@ -1,146 +0,0 @@ -// Package promtest provides helpers for parsing and extracting prometheus metrics. -// These functions are only intended to be called from test files, -// as there is a dependency on the standard library testing package. -package promtest - -import ( - "fmt" - "io" - "net/http" - "strings" - "testing" - - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" -) - -// FromHTTPResponse parses the prometheus metrics from the given *http.Response. -// It relies on properly set response headers to correctly parse. -// It will unconditionally close the response body. -// -// This is particularly helpful when testing the output of the /metrics endpoint of a service. -// However, for comprehensive testing of metrics, it usually makes more sense to -// add collectors to a registry and call Registry.Gather to get the metrics without involving HTTP. -func FromHTTPResponse(r *http.Response) ([]*dto.MetricFamily, error) { - defer r.Body.Close() - - dec := expfmt.NewDecoder(r.Body, expfmt.ResponseFormat(r.Header)) - var mfs []*dto.MetricFamily - for { - mf := new(dto.MetricFamily) - if err := dec.Decode(mf); err != nil { - if err == io.EOF { - break - } else { - return nil, err - } - } - - mfs = append(mfs, mf) - } - - return mfs, nil -} - -// FindMetric iterates through mfs to find the first metric family matching name. -// If a metric family matches, then the metrics inside the family are searched, -// and the first metric whose labels match the given labels are returned. -// If no matches are found, FindMetric returns nil. -// -// FindMetric assumes that the labels on the metric family are well formed, -// i.e. there are no duplicate label names, and the label values are not empty strings. -func FindMetric(mfs []*dto.MetricFamily, name string, labels map[string]string) *dto.Metric { - _, m := findMetric(mfs, name, labels) - return m -} - -// MustFindMetric returns the matching metric, or if no matching metric could be found, -// it calls tb.Log with helpful output of what was actually available, before calling tb.FailNow. -func MustFindMetric(tb testing.TB, mfs []*dto.MetricFamily, name string, labels map[string]string) *dto.Metric { - tb.Helper() - - fam, m := findMetric(mfs, name, labels) - if fam == nil { - tb.Logf("metric family with name %q not found", name) - tb.Log("available names:") - for _, mf := range mfs { - tb.Logf("\t%s", mf.GetName()) - } - tb.FailNow() - return nil // Need an explicit return here for test. - } - - if m == nil { - tb.Logf("found metric family with name %q, but metric with labels %v not found", name, labels) - tb.Logf("available labels on metric family %q:", name) - - for _, m := range fam.Metric { - pairs := make([]string, len(m.Label)) - for i, l := range m.Label { - pairs[i] = fmt.Sprintf("%q: %q", l.GetName(), l.GetValue()) - } - tb.Logf("\t%s", strings.Join(pairs, ", ")) - } - tb.FailNow() - return nil // Need an explicit return here for test. - } - - return m -} - -// findMetric is a helper that returns the matching family and the matching metric. -// The exported FindMetric function specifically only finds the metric, not the family, -// but for test it is more helpful to identify whether the family was matched. -func findMetric(mfs []*dto.MetricFamily, name string, labels map[string]string) (*dto.MetricFamily, *dto.Metric) { - var fam *dto.MetricFamily - - for _, mf := range mfs { - if mf.GetName() == name { - fam = mf - break - } - } - - if fam == nil { - // No family matching the name. - return nil, nil - } - - for _, m := range fam.Metric { - if len(m.Label) != len(labels) { - continue - } - - match := true - for _, l := range m.Label { - if labels[l.GetName()] != l.GetValue() { - match = false - break - } - } - - if !match { - continue - } - - // All labels matched. - return fam, m - } - - // Didn't find a metric whose labels all matched. - return fam, nil -} - -// MustGather calls g.Gather and calls tb.Fatal if there was an error. -func MustGather(tb testing.TB, g prometheus.Gatherer) []*dto.MetricFamily { - tb.Helper() - - mfs, err := g.Gather() - if err != nil { - tb.Fatalf("error while gathering metrics: %v", err) - return nil // Need an explicit return here for test. - } - - return mfs -} diff --git a/kit/prom/promtest/promtest_test.go b/kit/prom/promtest/promtest_test.go deleted file mode 100644 index 760471758fe..00000000000 --- a/kit/prom/promtest/promtest_test.go +++ /dev/null @@ -1,210 +0,0 @@ -package promtest_test - -import ( - "bytes" - "errors" - "fmt" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/influxdata/influxdb/v2/kit/prom" - "github.com/influxdata/influxdb/v2/kit/prom/promtest" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "go.uber.org/zap/zaptest" -) - -func helperCollectors() []prometheus.Collector { - myCounter := prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "my", - Subsystem: "random", - Name: "counter", - Help: "Just a random counter.", - }) - myGaugeVec := prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "my", - Subsystem: "random", - Name: "gaugevec", - Help: "Just a random gauge vector.", - }, []string{"label1", "label2"}) - - myCounter.Inc() - myGaugeVec.WithLabelValues("one", "two").Set(3) - - return []prometheus.Collector{myCounter, myGaugeVec} -} - -func TestFindMetric(t *testing.T) { - reg := prom.NewRegistry(zaptest.NewLogger(t)) - reg.MustRegister(helperCollectors()...) - - mfs, err := reg.Gather() - if err != nil { - t.Fatal(err) - } - - c := promtest.MustFindMetric(t, mfs, "my_random_counter", nil) - if got := c.GetCounter().GetValue(); got != 1 { - t.Fatalf("expected counter to be 1, got %v", got) - } - - g := promtest.MustFindMetric(t, mfs, "my_random_gaugevec", map[string]string{"label1": "one", "label2": "two"}) - if got := g.GetGauge().GetValue(); got != 3 { - t.Fatalf("expected gauge to be 3, got %v", got) - } -} - -// fakeT helps us to assert that MustFindMetric calls FailNow when the metric isn't found. -type fakeT struct { - // Embed a T so we don't have to reimplement everything. - // It's fine to leave T nil - fakeT will panic if calling a method we haven't implemented. - *testing.T - - logBuf bytes.Buffer - - failed bool -} - -func (t *fakeT) Helper() {} - -func (t *fakeT) Log(args ...interface{}) { - fmt.Fprint(&t.logBuf, args...) - t.logBuf.WriteString("\n") -} - -func (t *fakeT) Logf(format string, args ...interface{}) { - fmt.Fprintf(&t.logBuf, format, args...) - t.logBuf.WriteString("\n") -} - -func (t *fakeT) FailNow() { - t.failed = true -} - -func (t *fakeT) Fatalf(format string, args ...interface{}) { - t.Logf(format, args...) - t.FailNow() -} - -func TestMustFindMetric(t *testing.T) { - reg := prom.NewRegistry(zaptest.NewLogger(t)) - reg.MustRegister(helperCollectors()...) - - mfs, err := reg.Gather() - if err != nil { - t.Fatal(err) - } - - ft := new(fakeT) - - // Doesn't log when metric is found. - _ = promtest.MustFindMetric(ft, mfs, "my_random_counter", nil) - if ft.failed { - t.Fatalf("MustFindMetric failed when it should not have. message: %s", ft.logBuf.String()) - } - - // Logs and fails when family name not found. - ft = new(fakeT) - _ = promtest.MustFindMetric(ft, mfs, "missing_name", nil) - if !ft.failed { - t.Fatal("MustFindMetric should have failed but didn't") - } - logged := ft.logBuf.String() - if !strings.Contains(logged, `name "missing_name" not found`) { - t.Fatalf("did not log the looked up name which was not found. message: %s", logged) - } - if !strings.Contains(logged, "my_random_counter") || !strings.Contains(logged, "my_random_gaugevec") { - t.Fatalf("did not log the available metric names. message: %s", logged) - } - - // Logs and fails when family name found but metric labels mismatch. - ft = new(fakeT) - _ = promtest.MustFindMetric(ft, mfs, "my_random_counter", map[string]string{"unknown": "label"}) - if !ft.failed { - t.Fatal("MustFindMetric should have failed but didn't") - } - - ft = new(fakeT) - _ = promtest.MustFindMetric(ft, mfs, "my_random_gaugevec", map[string]string{"unknown": "label"}) - if !ft.failed { - t.Fatal("MustFindMetric should have failed but didn't") - } - logged = ft.logBuf.String() - if !strings.Contains(logged, `"label1": "one"`) || !strings.Contains(logged, `"label2": "two"`) { - t.Fatalf("did not log the available label names. message: %s", logged) - } - - ft = new(fakeT) - _ = promtest.MustFindMetric(ft, mfs, "my_random_gaugevec", map[string]string{"label1": "one", "label2": "two", "label3": "imaginary"}) - if !ft.failed { - t.Fatal("MustFindMetric should have failed but didn't") - } - logged = ft.logBuf.String() - if !strings.Contains(logged, `"label1": "one"`) || !strings.Contains(logged, `"label2": "two"`) { - t.Fatalf("did not log the available label names. message: %s", logged) - } -} - -func TestMustGather(t *testing.T) { - expErr := errors.New("failed to gather") - g := prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { - return nil, expErr - }) - - ft := new(fakeT) - _ = promtest.MustGather(ft, g) - if !ft.failed { - t.Fatal("MustGather should have failed but didn't") - } - logged := ft.logBuf.String() - if !strings.HasPrefix(logged, "error while gathering metrics:") || !strings.Contains(logged, expErr.Error()) { - t.Fatalf("did not log the expected error message: %s", logged) - } - - expMF := []*dto.MetricFamily{} // Use a non-nil, zero-length slice for a simple-ish check. - g = prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { - return expMF, nil - }) - ft = new(fakeT) - gotMF := promtest.MustGather(ft, g) - if ft.failed { - t.Fatalf("MustGather should not have failed") - } - if gotMF == nil || len(gotMF) != 0 { - t.Fatalf("exp: %v, got: %v", expMF, gotMF) - } -} - -func TestFromHTTPResponse(t *testing.T) { - reg := prom.NewRegistry(zaptest.NewLogger(t)) - reg.MustRegister(helperCollectors()...) - - s := httptest.NewServer(reg.HTTPHandler()) - defer s.Close() - - resp, err := http.Get(s.URL) // Didn't specify a path for the handler, so any path should be fine. - if err != nil { - t.Fatal(err) - } - - mfs, err := promtest.FromHTTPResponse(resp) - if err != nil { - t.Fatal(err) - } - - if len(mfs) != 2 { - t.Fatalf("expected 2 metrics but got %d", len(mfs)) - } - - c := promtest.MustFindMetric(t, mfs, "my_random_counter", nil) - if got := c.GetCounter().GetValue(); got != 1 { - t.Fatalf("expected counter to be 1, got %v", got) - } - - g := promtest.MustFindMetric(t, mfs, "my_random_gaugevec", map[string]string{"label1": "one", "label2": "two"}) - if got := g.GetGauge().GetValue(); got != 3 { - t.Fatalf("expected gauge to be 3, got %v", got) - } -} diff --git a/kit/prom/registry.go b/kit/prom/registry.go deleted file mode 100644 index f734ce4c5d8..00000000000 --- a/kit/prom/registry.go +++ /dev/null @@ -1,59 +0,0 @@ -// Package prom provides a wrapper around a prometheus metrics registry -// so that all services are unified in how they expose prometheus metrics. -package prom - -import ( - "net/http" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - "go.uber.org/zap" -) - -// PrometheusCollector is the interface for a type to expose prometheus metrics. -// This interface is provided as a convention, so that you can optionally check -// if a type implements it and then pass its collectors to (*Registry).MustRegister. -type PrometheusCollector interface { - // PrometheusCollectors returns a slice of prometheus collectors - // containing metrics for the underlying instance. - PrometheusCollectors() []prometheus.Collector -} - -// Registry embeds a prometheus registry and adds a couple convenience methods. -type Registry struct { - *prometheus.Registry - - log *zap.Logger -} - -// NewRegistry returns a new registry. -func NewRegistry(log *zap.Logger) *Registry { - return &Registry{ - Registry: prometheus.NewRegistry(), - log: log, - } -} - -// HTTPHandler returns an http.Handler for the registry, -// so that the /metrics HTTP handler is uniformly configured across all apps in the platform. -func (r *Registry) HTTPHandler() http.Handler { - opts := promhttp.HandlerOpts{ - ErrorLog: promLogger{r: r}, - // TODO(mr): decide if we want to set MaxRequestsInFlight or Timeout. - } - return promhttp.HandlerFor(r.Registry, opts) -} - -// promLogger satisfies the promhttp.logger interface with the registry. -// Because normal usage is that WithLogger is called after HTTPHandler, -// we refer to the Registry rather than its logger. -type promLogger struct { - r *Registry -} - -var _ promhttp.Logger = (*promLogger)(nil) - -// Println implements promhttp.logger. -func (pl promLogger) Println(v ...interface{}) { - pl.r.log.Sugar().Info(v...) -} diff --git a/kit/prom/registry_test.go b/kit/prom/registry_test.go deleted file mode 100644 index a2ee75de916..00000000000 --- a/kit/prom/registry_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package prom_test - -import ( - "errors" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/influxdata/influxdb/v2/kit/prom" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" - "go.uber.org/zap/zaptest/observer" -) - -func TestRegistry_Logger(t *testing.T) { - core, logs := observer.New(zap.DebugLevel) - reg := prom.NewRegistry(zap.New(core)) - - // Normal use: HTTP handler is created immediately... - s := httptest.NewServer(reg.HTTPHandler()) - defer s.Close() - - // Force an error with a fake collector. - reg.MustRegister(errorCollector{}) - resp, err := http.Get(s.URL) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - - foundLog := false - for _, le := range logs.All() { - if strings.Contains(le.Message, "invalid metric from errorCollector") { - foundLog = true - break - } - } - - if !foundLog { - t.Fatalf("registry logger did not log error from metric collection") - } -} - -type errorCollector struct{} - -var _ prometheus.Collector = errorCollector{} - -var ecDesc = prometheus.NewDesc("error_collector_desc", "A required description for the error collector", nil, nil) - -func (errorCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- ecDesc -} - -func (errorCollector) Collect(ch chan<- prometheus.Metric) { - ch <- prometheus.NewInvalidMetric( - ecDesc, - errors.New("invalid metric from errorCollector"), - ) -} diff --git a/kit/signals/context.go b/kit/signals/context.go deleted file mode 100644 index 6e70e9863a2..00000000000 --- a/kit/signals/context.go +++ /dev/null @@ -1,30 +0,0 @@ -package signals - -import ( - "context" - "os" - "os/signal" -) - -// WithSignals returns a context that is canceled with any signal in sigs. -func WithSignals(ctx context.Context, sigs ...os.Signal) context.Context { - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, sigs...) - - ctx, cancel := context.WithCancel(ctx) - go func() { - defer cancel() - select { - case <-ctx.Done(): - return - case <-sigCh: - return - } - }() - return ctx -} - -// WithStandardSignals cancels the context on os.Interrupt, os.Kill. -func WithStandardSignals(ctx context.Context) context.Context { - return WithSignals(ctx, os.Interrupt, os.Kill) -} diff --git a/kit/signals/context_test.go b/kit/signals/context_test.go deleted file mode 100644 index 24714aff9d4..00000000000 --- a/kit/signals/context_test.go +++ /dev/null @@ -1,81 +0,0 @@ -//go:build !windows - -package signals - -import ( - "context" - "fmt" - "os" - "syscall" - "testing" - "time" -) - -func ExampleWithSignals() { - ctx := WithSignals(context.Background(), syscall.SIGUSR1) - go func() { - time.Sleep(500 * time.Millisecond) // after some time SIGUSR1 is sent - // mimicking a signal from the outside - syscall.Kill(syscall.Getpid(), syscall.SIGUSR1) - }() - - <-ctx.Done() - fmt.Println("finished") - // Output: - // finished -} - -func Example_withUnregisteredSignals() { - dctx, cancel := context.WithTimeout(context.TODO(), time.Millisecond*100) - defer cancel() - - ctx := WithSignals(dctx, syscall.SIGUSR1) - go func() { - time.Sleep(10 * time.Millisecond) // after some time SIGUSR2 is sent - // mimicking a signal from the outside, WithSignals will not handle it - syscall.Kill(syscall.Getpid(), syscall.SIGUSR2) - }() - - <-ctx.Done() - fmt.Println("finished") - // Output: - // finished -} - -func TestWithSignals(t *testing.T) { - tests := []struct { - name string - ctx context.Context - sigs []os.Signal - wantSignal bool - }{ - { - name: "sending signal SIGUSR2 should exit context.", - ctx: context.Background(), - sigs: []os.Signal{syscall.SIGUSR2}, - wantSignal: true, - }, - { - name: "sending signal SIGUSR2 should NOT exit context.", - ctx: context.Background(), - sigs: []os.Signal{syscall.SIGUSR1}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := WithSignals(tt.ctx, tt.sigs...) - syscall.Kill(syscall.Getpid(), syscall.SIGUSR2) - timer := time.NewTimer(500 * time.Millisecond) - select { - case <-ctx.Done(): - if !tt.wantSignal { - t.Errorf("unexpected exit with signal") - } - case <-timer.C: - if tt.wantSignal { - t.Errorf("expected to exit with signal but did not") - } - } - }) - } -} diff --git a/kit/tracing/testing/testing.go b/kit/tracing/testing/testing.go deleted file mode 100644 index af59cf06393..00000000000 --- a/kit/tracing/testing/testing.go +++ /dev/null @@ -1,24 +0,0 @@ -package testing - -import ( - "github.com/opentracing/opentracing-go" - "github.com/uber/jaeger-client-go" -) - -// SetupInMemoryTracing sets the global tracer to an in memory Jaeger instance for testing. -// The returned function should be deferred by the caller to tear down this setup after testing is complete. -func SetupInMemoryTracing(name string) func() { - var ( - old = opentracing.GlobalTracer() - tracer, closer = jaeger.NewTracer(name, - jaeger.NewConstSampler(true), - jaeger.NewInMemoryReporter(), - ) - ) - - opentracing.SetGlobalTracer(tracer) - return func() { - _ = closer.Close() - opentracing.SetGlobalTracer(old) - } -} diff --git a/kit/tracing/tracing.go b/kit/tracing/tracing.go deleted file mode 100644 index 9b05efae396..00000000000 --- a/kit/tracing/tracing.go +++ /dev/null @@ -1,226 +0,0 @@ -package tracing - -import ( - "context" - "errors" - "net/http" - "runtime" - "strings" - "time" - - "github.com/go-chi/chi" - "github.com/influxdata/httprouter" - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "github.com/opentracing/opentracing-go/log" - "github.com/prometheus/client_golang/prometheus" - "github.com/uber/jaeger-client-go" -) - -// LogError adds a span log for an error. -// Returns unchanged error, so useful to wrap as in: -// -// return 0, tracing.LogError(err) -func LogError(span opentracing.Span, err error) error { - if err == nil { - return nil - } - - // Get caller frame. - var pcs [1]uintptr - n := runtime.Callers(2, pcs[:]) - if n < 1 { - span.LogFields(log.Error(err)) - span.LogFields(log.Error(errors.New("runtime.Callers failed"))) - return err - } - - file, line := runtime.FuncForPC(pcs[0]).FileLine(pcs[0]) - span.LogFields(log.String("filename", file), log.Int("line", line), log.Error(err)) - - return err -} - -// InjectToHTTPRequest adds tracing headers to an HTTP request. -// Easier than adding this boilerplate everywhere. -func InjectToHTTPRequest(span opentracing.Span, req *http.Request) { - err := opentracing.GlobalTracer().Inject(span.Context(), opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(req.Header)) - if err != nil { - LogError(span, err) - } -} - -// ExtractFromHTTPRequest gets a child span of the parent referenced in HTTP request headers. -// Returns the request with updated tracing context. -// Easier than adding this boilerplate everywhere. -func ExtractFromHTTPRequest(req *http.Request, handlerName string) (opentracing.Span, *http.Request) { - spanContext, err := opentracing.GlobalTracer().Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(req.Header)) - if err != nil { - span, ctx := opentracing.StartSpanFromContext(req.Context(), "request") - annotateSpan(span, handlerName, req) - - _ = LogError(span, err) - - return span, req.WithContext(ctx) - } - - span := opentracing.StartSpan("request", opentracing.ChildOf(spanContext), ext.RPCServerOption(spanContext)) - annotateSpan(span, handlerName, req) - - return span, req.WithContext(opentracing.ContextWithSpan(req.Context(), span)) -} - -func annotateSpan(span opentracing.Span, handlerName string, req *http.Request) { - if route := httprouter.MatchedRouteFromContext(req.Context()); route != "" { - span.SetTag("route", route) - } - span.SetTag("method", req.Method) - if ctx := chi.RouteContext(req.Context()); ctx != nil { - span.SetTag("route", ctx.RoutePath) - } - span.SetTag("handler", handlerName) - span.LogKV("path", req.URL.Path) -} - -// span is a simple wrapper around opentracing.Span in order to -// get access to the duration of the span for metrics reporting. -type Span struct { - opentracing.Span - start time.Time - Duration time.Duration - hist prometheus.Observer - gauge prometheus.Gauge -} - -func StartSpanFromContextWithPromMetrics(ctx context.Context, operationName string, hist prometheus.Observer, gauge prometheus.Gauge, opts ...opentracing.StartSpanOption) (*Span, context.Context) { - start := time.Now() - s, sctx := StartSpanFromContextWithOperationName(ctx, operationName, opentracing.StartTime(start)) - gauge.Inc() - return &Span{s, start, 0, hist, gauge}, sctx -} - -func (s *Span) Finish() { - finish := time.Now() - s.Duration = finish.Sub(s.start) - s.Span.FinishWithOptions(opentracing.FinishOptions{ - FinishTime: finish, - }) - s.hist.Observe(s.Duration.Seconds()) - s.gauge.Dec() -} - -// StartSpanFromContext is an improved opentracing.StartSpanFromContext. -// Uses the calling function as the operation name, and logs the filename and line number. -// -// Passing nil context induces panic. -// Context without parent span reference triggers root span construction. -// This function never returns nil values. -// -// # Performance -// -// This function incurs a small performance penalty, roughly 1000 ns/op, 376 B/op, 6 allocs/op. -// Jaeger timestamp and duration precision is only µs, so this is pretty negligible. -// -// # Alternatives -// -// If this performance penalty is too much, try these, which are also demonstrated in benchmark tests: -// -// // Create a root span -// span := opentracing.StartSpan("operation name") -// ctx := opentracing.ContextWithSpan(context.Background(), span) -// -// // Create a child span -// span := opentracing.StartSpan("operation name", opentracing.ChildOf(sc)) -// ctx := opentracing.ContextWithSpan(context.Background(), span) -// -// // Sugar to create a child span -// span, ctx := opentracing.StartSpanFromContext(ctx, "operation name") -func StartSpanFromContext(ctx context.Context, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) { - if ctx == nil { - panic("StartSpanFromContext called with nil context") - } - - // Get caller frame. - var pcs [1]uintptr - n := runtime.Callers(2, pcs[:]) - if n < 1 { - span, ctx := opentracing.StartSpanFromContext(ctx, "unknown", opts...) - span.LogFields(log.Error(errors.New("runtime.Callers failed"))) - return span, ctx - } - fn := runtime.FuncForPC(pcs[0]) - name := fn.Name() - if lastSlash := strings.LastIndexByte(name, '/'); lastSlash > 0 { - name = name[lastSlash+1:] - } - - var span opentracing.Span - if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil { - // Create a child span. - opts = append(opts, opentracing.ChildOf(parentSpan.Context())) - span = opentracing.StartSpan(name, opts...) - } else { - // Create a root span. - span = opentracing.StartSpan(name) - } - // New context references this span, not the parent (if there was one). - ctx = opentracing.ContextWithSpan(ctx, span) - - file, line := fn.FileLine(pcs[0]) - span.LogFields(log.String("filename", file), log.Int("line", line)) - - return span, ctx -} - -// StartSpanFromContextWithOperationName is like StartSpanFromContext, but the caller determines the operation name. -func StartSpanFromContextWithOperationName(ctx context.Context, operationName string, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) { - if ctx == nil { - panic("StartSpanFromContextWithOperationName called with nil context") - } - - // Get caller frame. - var pcs [1]uintptr - n := runtime.Callers(2, pcs[:]) - if n < 1 { - span, ctx := opentracing.StartSpanFromContext(ctx, operationName, opts...) - span.LogFields(log.Error(errors.New("runtime.Callers failed"))) - return span, ctx - } - file, line := runtime.FuncForPC(pcs[0]).FileLine(pcs[0]) - - var span opentracing.Span - if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil { - opts = append(opts, opentracing.ChildOf(parentSpan.Context())) - // Create a child span. - span = opentracing.StartSpan(operationName, opts...) - } else { - // Create a root span. - span = opentracing.StartSpan(operationName, opts...) - } - // New context references this span, not the parent (if there was one). - ctx = opentracing.ContextWithSpan(ctx, span) - - span.LogFields(log.String("filename", file), log.Int("line", line)) - - return span, ctx -} - -// InfoFromSpan returns the traceID and if it was sampled from the span, given it is a jaeger span. -// It returns whether a span associated to the context has been found. -func InfoFromSpan(span opentracing.Span) (traceID string, sampled bool, found bool) { - if spanContext, ok := span.Context().(jaeger.SpanContext); ok { - traceID = spanContext.TraceID().String() - sampled = spanContext.IsSampled() - return traceID, sampled, true - } - return "", false, false -} - -// InfoFromContext returns the traceID and if it was sampled from the Jaeger span -// found in the given context. It returns whether a span associated to the context has been found. -func InfoFromContext(ctx context.Context) (traceID string, sampled bool, found bool) { - if span := opentracing.SpanFromContext(ctx); span != nil { - return InfoFromSpan(span) - } - return "", false, false -} diff --git a/kit/tracing/tracing_test.go b/kit/tracing/tracing_test.go deleted file mode 100644 index 50ea7f3d780..00000000000 --- a/kit/tracing/tracing_test.go +++ /dev/null @@ -1,333 +0,0 @@ -package tracing - -import ( - "context" - "fmt" - "net/http" - "net/url" - "runtime" - "testing" - - "github.com/go-chi/chi" - "github.com/influxdata/httprouter" - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/mocktracer" -) - -func TestInjectAndExtractHTTPRequest(t *testing.T) { - tracer := mocktracer.New() - - oldTracer := opentracing.GlobalTracer() - opentracing.SetGlobalTracer(tracer) - defer opentracing.SetGlobalTracer(oldTracer) - - request, err := http.NewRequest(http.MethodPost, "http://localhost/", nil) - if err != nil { - t.Fatal(err) - } - - span := tracer.StartSpan("operation name") - - InjectToHTTPRequest(span, request) - gotSpan, _ := ExtractFromHTTPRequest(request, "MyStruct") - - if span.(*mocktracer.MockSpan).SpanContext.TraceID != gotSpan.(*mocktracer.MockSpan).SpanContext.TraceID { - t.Error("injected and extracted traceIDs not equal") - } - - if span.(*mocktracer.MockSpan).SpanContext.SpanID != gotSpan.(*mocktracer.MockSpan).ParentID { - t.Error("injected span ID does not match extracted span parent ID") - } -} - -func TestExtractHTTPRequest(t *testing.T) { - var ( - tracer = mocktracer.New() - oldTracer = opentracing.GlobalTracer() - ctx = context.Background() - ) - - opentracing.SetGlobalTracer(tracer) - defer opentracing.SetGlobalTracer(oldTracer) - - for _, test := range []struct { - name string - handlerName string - path string - ctx context.Context - tags map[string]interface{} - method string - }{ - { - name: "happy path", - handlerName: "WriteHandler", - ctx: context.WithValue(ctx, httprouter.MatchedRouteKey, "/api/v2/write"), - method: http.MethodGet, - path: "/api/v2/write", - tags: map[string]interface{}{ - "route": "/api/v2/write", - "handler": "WriteHandler", - }, - }, - { - name: "happy path bucket handler", - handlerName: "BucketHandler", - ctx: context.WithValue(ctx, httprouter.MatchedRouteKey, "/api/v2/buckets/:bucket_id"), - path: "/api/v2/buckets/12345", - method: http.MethodGet, - tags: map[string]interface{}{ - "route": "/api/v2/buckets/:bucket_id", - "handler": "BucketHandler", - }, - }, - { - name: "happy path bucket handler (chi)", - handlerName: "BucketHandler", - ctx: context.WithValue( - ctx, - chi.RouteCtxKey, - &chi.Context{RoutePath: "/api/v2/buckets/:bucket_id", RouteMethod: "GET"}, - ), - path: "/api/v2/buckets/12345", - method: http.MethodGet, - tags: map[string]interface{}{ - "route": "/api/v2/buckets/:bucket_id", - "method": "GET", - "handler": "BucketHandler", - }, - }, - { - name: "empty path", - handlerName: "Home", - ctx: ctx, - method: http.MethodGet, - tags: map[string]interface{}{ - "handler": "Home", - }, - }, - } { - t.Run(test.name, func(t *testing.T) { - request, err := http.NewRequest(test.method, "http://localhost"+test.path, nil) - if err != nil { - t.Fatal(err) - } - - span := tracer.StartSpan("operation name") - - InjectToHTTPRequest(span, request) - gotSpan, _ := ExtractFromHTTPRequest(request.WithContext(test.ctx), test.handlerName) - - if op := gotSpan.(*mocktracer.MockSpan).OperationName; op != "request" { - t.Fatalf("operation name %q != request", op) - } - - tags := gotSpan.(*mocktracer.MockSpan).Tags() - for k, v := range test.tags { - found, ok := tags[k] - if !ok { - t.Errorf("tag not found in span %q", k) - continue - } - - if found != v { - t.Errorf("expected %v, found %v for tag %q", v, found, k) - } - } - }) - } -} - -func TestStartSpanFromContext(t *testing.T) { - tracer := mocktracer.New() - - oldTracer := opentracing.GlobalTracer() - opentracing.SetGlobalTracer(tracer) - defer opentracing.SetGlobalTracer(oldTracer) - - type testCase struct { - ctx context.Context - expectPanic bool - expectParent bool - } - var testCases []testCase - - testCases = append(testCases, - testCase{ - ctx: nil, - expectPanic: true, - expectParent: false, - }, - testCase{ - ctx: context.Background(), - expectPanic: false, - expectParent: false, - }) - - parentSpan := opentracing.StartSpan("parent operation name") - testCases = append(testCases, testCase{ - ctx: opentracing.ContextWithSpan(context.Background(), parentSpan), - expectPanic: false, - expectParent: true, - }) - - for i, tc := range testCases { - t.Run(fmt.Sprint(i), func(t *testing.T) { - var span opentracing.Span - var ctx context.Context - var gotPanic bool - - func(inputCtx context.Context) { - defer func() { - if recover() != nil { - gotPanic = true - } - }() - span, ctx = StartSpanFromContext(inputCtx) - }(tc.ctx) - - if tc.expectPanic != gotPanic { - t.Errorf("panic: expect %v got %v", tc.expectPanic, gotPanic) - } - if tc.expectPanic { - // No other valid checks if panic. - return - } - if ctx == nil { - t.Error("never expect non-nil ctx") - } - if span == nil { - t.Error("never expect non-nil Span") - } - foundParent := span.(*mocktracer.MockSpan).ParentID != 0 - if tc.expectParent != foundParent { - t.Errorf("parent: expect %v got %v", tc.expectParent, foundParent) - } - if ctx == tc.ctx { - t.Errorf("always expect fresh context") - } - }) - } -} - -func TestLogErrorNil(t *testing.T) { - tracer := mocktracer.New() - span := tracer.StartSpan("test").(*mocktracer.MockSpan) - - var err error - if err2 := LogError(span, err); err2 != nil { - t.Errorf("expected nil err, got '%s'", err2.Error()) - } - - if len(span.Logs()) > 0 { - t.Errorf("expected zero new span logs, got %d", len(span.Logs())) - println(span.Logs()[0].Fields[0].Key) - } -} - -/* -BenchmarkLocal_StartSpanFromContext-8 2000000 681 ns/op 224 B/op 4 allocs/op -BenchmarkLocal_StartSpanFromContext_runtimeCaller-8 3000000 534 ns/op -BenchmarkLocal_StartSpanFromContext_runtimeCallers-8 10000000 196 ns/op -BenchmarkLocal_StartSpanFromContext_runtimeFuncForPC-8 200000000 7.28 ns/op -BenchmarkLocal_StartSpanFromContext_runtimeCallersFrames-8 10000000 234 ns/op -BenchmarkLocal_StartSpanFromContext_runtimeFuncFileLine-8 20000000 103 ns/op -BenchmarkOpentracing_StartSpanFromContext-8 10000000 155 ns/op 96 B/op 3 allocs/op -BenchmarkOpentracing_StartSpan_root-8 200000000 7.68 ns/op 0 B/op 0 allocs/op -BenchmarkOpentracing_StartSpan_child-8 20000000 71.2 ns/op 48 B/op 2 allocs/op -*/ - -func BenchmarkLocal_StartSpanFromContext(b *testing.B) { - b.ReportAllocs() - - parentSpan := opentracing.StartSpan("parent operation name") - ctx := opentracing.ContextWithSpan(context.Background(), parentSpan) - - for n := 0; n < b.N; n++ { - StartSpanFromContext(ctx) - } -} - -func BenchmarkLocal_StartSpanFromContext_runtimeCaller(b *testing.B) { - for n := 0; n < b.N; n++ { - _, _, _, _ = runtime.Caller(1) - } -} - -func BenchmarkLocal_StartSpanFromContext_runtimeCallers(b *testing.B) { - var pcs [1]uintptr - - for n := 0; n < b.N; n++ { - _ = runtime.Callers(2, pcs[:]) - } -} - -func BenchmarkLocal_StartSpanFromContext_runtimeFuncForPC(b *testing.B) { - var pcs [1]uintptr - _ = runtime.Callers(2, pcs[:]) - - for n := 0; n < b.N; n++ { - _ = runtime.FuncForPC(pcs[0]) - } -} - -func BenchmarkLocal_StartSpanFromContext_runtimeCallersFrames(b *testing.B) { - pc, _, _, ok := runtime.Caller(1) - if !ok { - b.Fatal("runtime.Caller failed") - } - - for n := 0; n < b.N; n++ { - _, _ = runtime.CallersFrames([]uintptr{pc}).Next() - } -} - -func BenchmarkLocal_StartSpanFromContext_runtimeFuncFileLine(b *testing.B) { - var pcs [1]uintptr - _ = runtime.Callers(2, pcs[:]) - fn := runtime.FuncForPC(pcs[0]) - - for n := 0; n < b.N; n++ { - _, _ = fn.FileLine(pcs[0]) - } -} - -func BenchmarkOpentracing_StartSpanFromContext(b *testing.B) { - b.ReportAllocs() - - parentSpan := opentracing.StartSpan("parent operation name") - ctx := opentracing.ContextWithSpan(context.Background(), parentSpan) - - for n := 0; n < b.N; n++ { - _, _ = opentracing.StartSpanFromContext(ctx, "operation name") - } -} - -func BenchmarkOpentracing_StartSpan_root(b *testing.B) { - b.ReportAllocs() - - for n := 0; n < b.N; n++ { - _ = opentracing.StartSpan("operation name") - } -} - -func BenchmarkOpentracing_StartSpan_child(b *testing.B) { - b.ReportAllocs() - - parentSpan := opentracing.StartSpan("parent operation name") - - for n := 0; n < b.N; n++ { - _ = opentracing.StartSpan("operation name", opentracing.ChildOf(parentSpan.Context())) - } -} - -func BenchmarkOpentracing_ExtractFromHTTPRequest(b *testing.B) { - b.ReportAllocs() - - req := &http.Request{ - URL: &url.URL{Path: "/api/v2/organization/12345"}, - } - - for n := 0; n < b.N; n++ { - _, _ = ExtractFromHTTPRequest(req, "OrganizationHandler") - } -} diff --git a/kit/transport/http/api.go b/kit/transport/http/api.go deleted file mode 100644 index 2257991295f..00000000000 --- a/kit/transport/http/api.go +++ /dev/null @@ -1,266 +0,0 @@ -package http - -import ( - "compress/gzip" - "context" - "encoding/gob" - "encoding/json" - "fmt" - "io" - "net/http" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "go.uber.org/zap" -) - -// PlatformErrorCodeHeader shows the error code of platform error. -const PlatformErrorCodeHeader = "X-Platform-Error-Code" - -// API provides a consolidated means for handling API interface concerns. -// Concerns such as decoding/encoding request and response bodies as well -// as adding headers for content type and content encoding. -type API struct { - logger *zap.Logger - - prettyJSON bool - encodeGZIP bool - - unmarshalErrFn func(encoding string, err error) error - okErrFn func(err error) error - errFn func(ctx context.Context, err error) (interface{}, int, error) -} - -// APIOptFn is a functional option for setting fields on the API type. -type APIOptFn func(*API) - -// WithLog sets the logger. -func WithLog(logger *zap.Logger) APIOptFn { - return func(api *API) { - api.logger = logger - } -} - -// WithErrFn sets the err handling func for issues when writing to the response body. -func WithErrFn(fn func(ctx context.Context, err error) (interface{}, int, error)) APIOptFn { - return func(api *API) { - api.errFn = fn - } -} - -// WithOKErrFn is an error handler for failing validation for request bodies. -func WithOKErrFn(fn func(err error) error) APIOptFn { - return func(api *API) { - api.okErrFn = fn - } -} - -// WithPrettyJSON sets the json encoder to marshal indent or not. -func WithPrettyJSON(b bool) APIOptFn { - return func(api *API) { - api.prettyJSON = b - } -} - -// WithEncodeGZIP sets the encoder to gzip contents. -func WithEncodeGZIP() APIOptFn { - return func(api *API) { - api.encodeGZIP = true - } -} - -// WithUnmarshalErrFn sets the error handler for errors that occur when unmarshalling -// the request body. -func WithUnmarshalErrFn(fn func(encoding string, err error) error) APIOptFn { - return func(api *API) { - api.unmarshalErrFn = fn - } -} - -// NewAPI creates a new API type. -func NewAPI(opts ...APIOptFn) *API { - api := API{ - logger: zap.NewNop(), - prettyJSON: true, - unmarshalErrFn: func(encoding string, err error) error { - return &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("failed to unmarshal %s: %s", encoding, err), - } - }, - errFn: func(ctx context.Context, err error) (interface{}, int, error) { - msg := err.Error() - if msg == "" { - msg = "an internal error has occurred" - } - code := errors.ErrorCode(err) - return ErrBody{ - Code: code, - Msg: msg, - }, ErrorCodeToStatusCode(ctx, code), nil - }, - } - for _, o := range opts { - o(&api) - } - return &api -} - -// DecodeJSON decodes reader with json. -func (a *API) DecodeJSON(r io.Reader, v interface{}) error { - return a.decode("json", json.NewDecoder(r), v) -} - -// DecodeGob decodes reader with gob. -func (a *API) DecodeGob(r io.Reader, v interface{}) error { - return a.decode("gob", gob.NewDecoder(r), v) -} - -type ( - decoder interface { - Decode(interface{}) error - } - - oker interface { - OK() error - } -) - -func (a *API) decode(encoding string, dec decoder, v interface{}) error { - if err := dec.Decode(v); err != nil { - if a != nil && a.unmarshalErrFn != nil { - return a.unmarshalErrFn(encoding, err) - } - return err - } - - if vv, ok := v.(oker); ok { - err := vv.OK() - if a != nil && a.okErrFn != nil { - return a.okErrFn(err) - } - return err - } - - return nil -} - -// Respond writes to the response writer, handling all errors in writing. -func (a *API) Respond(w http.ResponseWriter, r *http.Request, status int, v interface{}) { - if status == http.StatusNoContent { - w.WriteHeader(status) - return - } - - var writer io.WriteCloser = noopCloser{Writer: w} - // we'll double close to make sure its always closed even - //on issues before the write - defer writer.Close() - - if a != nil && a.encodeGZIP { - w.Header().Set("Content-Encoding", "gzip") - writer = gzip.NewWriter(w) - } - - w.Header().Set("Content-Type", "application/json; charset=utf-8") - - // this marshal block is to catch failures before they hit the http writer. - // default behavior for http.ResponseWriter is when body is written and no - // status is set, it writes a 200. Or if a status is set before encoding - // and an error occurs, there is no means to write a proper status code - // (i.e. 500) when that is to occur. This brings that step out before - // and then writes the data and sets the status code after marshaling - // succeeds. - var ( - b []byte - err error - ) - if a == nil || a.prettyJSON { - b, err = json.MarshalIndent(v, "", "\t") - } else { - b, err = json.Marshal(v) - } - if err != nil { - a.Err(w, r, err) - return - } - - a.write(w, writer, status, b) -} - -// Write allows the user to write raw bytes to the response writer. This -// operation does not have a fail case, all failures here will be logged. -func (a *API) Write(w http.ResponseWriter, status int, b []byte) { - if status == http.StatusNoContent { - w.WriteHeader(status) - return - } - - var writer io.WriteCloser = noopCloser{Writer: w} - // we'll double close to make sure its always closed even - //on issues before the write - defer writer.Close() - - if a != nil && a.encodeGZIP { - w.Header().Set("Content-Encoding", "gzip") - writer = gzip.NewWriter(w) - } - - a.write(w, writer, status, b) -} - -func (a *API) write(w http.ResponseWriter, wc io.WriteCloser, status int, b []byte) { - w.WriteHeader(status) - if _, err := wc.Write(b); err != nil { - a.logErr("failed to write to response writer", zap.Error(err)) - } - - if err := wc.Close(); err != nil { - a.logErr("failed to close response writer", zap.Error(err)) - } -} - -// Err is used for writing an error to the response. -func (a *API) Err(w http.ResponseWriter, r *http.Request, err error) { - if err == nil { - return - } - - a.logErr("api error encountered", zap.Error(err)) - - v, status, err := a.errFn(r.Context(), err) - if err != nil { - a.logErr("failed to write err to response writer", zap.Error(err)) - a.Respond(w, r, http.StatusInternalServerError, ErrBody{ - Code: "internal error", - Msg: "an unexpected error occurred", - }) - return - } - - if eb, ok := v.(ErrBody); ok { - w.Header().Set(PlatformErrorCodeHeader, eb.Code) - } - - a.Respond(w, r, status, v) -} - -func (a *API) logErr(msg string, fields ...zap.Field) { - if a == nil || a.logger == nil { - return - } - a.logger.Error(msg, fields...) -} - -type noopCloser struct { - io.Writer -} - -func (n noopCloser) Close() error { - return nil -} - -// ErrBody is an err response body. -type ErrBody struct { - Code string `json:"code"` - Msg string `json:"message"` -} diff --git a/kit/transport/http/api_test.go b/kit/transport/http/api_test.go deleted file mode 100644 index b72df6b57c7..00000000000 --- a/kit/transport/http/api_test.go +++ /dev/null @@ -1,309 +0,0 @@ -package http_test - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "fmt" - "io" - "net/http" - "strings" - "testing" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/pkg/testttp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func Test_API(t *testing.T) { - t.Run("Decode", func(t *testing.T) { - t.Run("valid foo no errors", func(t *testing.T) { - expected := validatFoo{ - Foo: "valid", - Bar: 10, - } - - t.Run("json", func(t *testing.T) { - var api *kithttp.API // shows it is safe to use a nil value - - var out validatFoo - err := api.DecodeJSON(encodeJSON(t, expected), &out) - if err != nil { - t.Fatalf("unexpected err: %v", err) - } - - if expected != out { - t.Fatalf("unexpected vals:\n\texpected: %#v\n\tgot: %#v", expected, out) - } - }) - - t.Run("gob", func(t *testing.T) { - var out validatFoo - err := kithttp.NewAPI().DecodeGob(encodeGob(t, expected), &out) - if err != nil { - t.Fatalf("unexpected err: %v", err) - } - - if expected != out { - t.Fatalf("unexpected vals:\n\texpected: %#v\n\tgot: %#v", expected, out) - } - }) - }) - - t.Run("unmarshals fine with ok error", func(t *testing.T) { - badFoo := validatFoo{ - Foo: "", - Bar: 0, - } - - t.Run("json", func(t *testing.T) { - var out validatFoo - err := kithttp.NewAPI().DecodeJSON(encodeJSON(t, badFoo), &out) - if err == nil { - t.Fatal("expected an err") - } - }) - - t.Run("gob", func(t *testing.T) { - var out validatFoo - err := kithttp.NewAPI().DecodeGob(encodeGob(t, badFoo), &out) - if err == nil { - t.Fatal("expected an err") - } - }) - }) - - t.Run("unmarshal error", func(t *testing.T) { - invalidBody := []byte("[}-{]") - - var out validatFoo - err := kithttp.NewAPI().DecodeJSON(bytes.NewReader(invalidBody), &out) - if err == nil { - t.Fatal("expected an error") - } - }) - - t.Run("unmarshal err fn wraps unmarshalling error", func(t *testing.T) { - t.Run("json", func(t *testing.T) { - invalidBody := []byte("[}-{]") - - api := kithttp.NewAPI(kithttp.WithUnmarshalErrFn(unmarshalErrFn)) - - var out validatFoo - err := api.DecodeJSON(bytes.NewReader(invalidBody), &out) - expectInfluxdbError(t, errors.EInvalid, err) - }) - - t.Run("gob", func(t *testing.T) { - invalidBody := []byte("[}-{]") - - api := kithttp.NewAPI(kithttp.WithUnmarshalErrFn(unmarshalErrFn)) - - var out validatFoo - err := api.DecodeGob(bytes.NewReader(invalidBody), &out) - expectInfluxdbError(t, errors.EInvalid, err) - }) - }) - - t.Run("ok error fn wraps ok error", func(t *testing.T) { - badFoo := validatFoo{Foo: ""} - - t.Run("json", func(t *testing.T) { - api := kithttp.NewAPI(kithttp.WithOKErrFn(okErrFn)) - - var out validatFoo - err := api.DecodeJSON(encodeJSON(t, badFoo), &out) - expectInfluxdbError(t, errors.EUnprocessableEntity, err) - }) - - t.Run("gob", func(t *testing.T) { - api := kithttp.NewAPI(kithttp.WithOKErrFn(okErrFn)) - - var out validatFoo - err := api.DecodeGob(encodeGob(t, badFoo), &out) - expectInfluxdbError(t, errors.EUnprocessableEntity, err) - }) - }) - }) - - t.Run("Respond", func(t *testing.T) { - tests := []int{ - http.StatusCreated, - http.StatusOK, - http.StatusNoContent, - http.StatusForbidden, - http.StatusInternalServerError, - } - - for _, statusCode := range tests { - fn := func(t *testing.T) { - responder := kithttp.NewAPI() - - svr := func(w http.ResponseWriter, r *http.Request) { - responder.Respond(w, r, statusCode, map[string]string{ - "foo": "bar", - }) - } - - expectedBodyFn := func(body *bytes.Buffer) { - var resp map[string]string - require.NoError(t, json.NewDecoder(body).Decode(&resp)) - assert.Equal(t, "bar", resp["foo"]) - } - if statusCode == http.StatusNoContent { - expectedBodyFn = func(body *bytes.Buffer) { - require.Zero(t, body.Len()) - } - } - - testttp. - Get(t, "/foo"). - Do(http.HandlerFunc(svr)). - ExpectStatus(statusCode). - ExpectBody(expectedBodyFn) - } - t.Run(http.StatusText(statusCode), fn) - } - }) - - t.Run("Err", func(t *testing.T) { - tests := []struct { - statusCode int - expectedErr *errors.Error - }{ - { - statusCode: http.StatusBadRequest, - expectedErr: &errors.Error{ - Code: errors.EInvalid, - Msg: "failed to unmarshal", - }, - }, - { - statusCode: http.StatusForbidden, - expectedErr: &errors.Error{ - Code: errors.EForbidden, - Msg: "forbidden", - }, - }, - { - statusCode: http.StatusUnprocessableEntity, - expectedErr: &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: "failed validation", - }, - }, - { - statusCode: http.StatusInternalServerError, - expectedErr: &errors.Error{ - Code: errors.EInternal, - Msg: "internal error", - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - responder := kithttp.NewAPI() - - svr := func(w http.ResponseWriter, r *http.Request) { - responder.Err(w, r, tt.expectedErr) - } - - testttp. - Get(t, "/foo"). - Do(http.HandlerFunc(svr)). - ExpectStatus(tt.statusCode). - ExpectBody(func(body *bytes.Buffer) { - var err kithttp.ErrBody - require.NoError(t, json.NewDecoder(body).Decode(&err)) - assert.Equal(t, tt.expectedErr.Msg, err.Msg) - assert.Equal(t, tt.expectedErr.Code, err.Code) - }) - } - t.Run(http.StatusText(tt.statusCode), fn) - } - }) -} - -func expectInfluxdbError(t *testing.T, expectedCode string, err error) { - t.Helper() - - if err == nil { - t.Fatal("expected an error") - } - iErr, ok := err.(*errors.Error) - if !ok { - t.Fatalf("expected an influxdb error; got=%#v", err) - } - - if got := iErr.Code; expectedCode != got { - t.Fatalf("unexpected error code; expected=%s got=%s", expectedCode, got) - } -} - -func encodeGob(t *testing.T, v interface{}) io.Reader { - t.Helper() - - var buf bytes.Buffer - if err := gob.NewEncoder(&buf).Encode(v); err != nil { - t.Fatal(err) - } - return &buf -} - -func encodeJSON(t *testing.T, v interface{}) io.Reader { - t.Helper() - - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(v); err != nil { - t.Fatal(err) - } - return &buf -} - -func okErrFn(err error) error { - return &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: "failed validation", - Err: err, - } -} - -func unmarshalErrFn(encoding string, err error) error { - return &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("invalid %s request body", encoding), - Err: err, - } -} - -type validatFoo struct { - Foo string `gob:"foo"` - Bar int `gob:"bar"` -} - -func (v *validatFoo) OK() error { - var errs multiErr - if v.Foo == "" { - errs = append(errs, "foo must be at least 1 char") - } - if v.Bar < 0 { - errs = append(errs, "bar must be a positive real number") - } - return errs.toError() -} - -type multiErr []string - -func (m multiErr) toError() error { - if len(m) > 0 { - return m - } - return nil -} - -func (m multiErr) Error() string { - return strings.Join(m, "; ") -} diff --git a/kit/transport/http/error_handler.go b/kit/transport/http/error_handler.go deleted file mode 100644 index 8eb185fc985..00000000000 --- a/kit/transport/http/error_handler.go +++ /dev/null @@ -1,196 +0,0 @@ -package http - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "mime" - "net/http" - "strings" - - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "go.uber.org/zap" -) - -// ErrorHandler is the error handler in http package. -type ErrorHandler struct { - logger *zap.Logger -} - -func NewErrorHandler(logger *zap.Logger) ErrorHandler { - return ErrorHandler{logger: logger} -} - -// HandleHTTPError encodes err with the appropriate status code and format, -// sets the X-Platform-Error-Code headers on the response. -// We're no longer using X-Influx-Error and X-Influx-Reference. -// and sets the response status to the corresponding status code. -func (h ErrorHandler) HandleHTTPError(ctx context.Context, err error, w http.ResponseWriter) { - if err == nil { - return - } - - code := errors2.ErrorCode(err) - var msg string - if _, ok := err.(*errors2.Error); ok { - msg = err.Error() - } else { - msg = "An internal error has occurred - check server logs" - h.logger.Warn("internal error not returned to client", zap.Error(err)) - } - - WriteErrorResponse(ctx, w, code, msg) -} - -func WriteErrorResponse(ctx context.Context, w http.ResponseWriter, code string, msg string) { - w.Header().Set(PlatformErrorCodeHeader, code) - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.WriteHeader(ErrorCodeToStatusCode(ctx, code)) - e := struct { - Code string `json:"code"` - Message string `json:"message"` - }{ - Code: code, - Message: msg, - } - b, _ := json.Marshal(e) - _, _ = w.Write(b) -} - -// StatusCodeToErrorCode maps a http status code integer to an -// influxdb error code string. -func StatusCodeToErrorCode(statusCode int) string { - errorCode, ok := httpStatusCodeToInfluxDBError[statusCode] - if ok { - return errorCode - } - - return errors2.EInternal -} - -// ErrorCodeToStatusCode maps an influxdb error code string to a -// http status code integer. -func ErrorCodeToStatusCode(ctx context.Context, code string) int { - // If the client disconnects early or times out then return a different - // error than the passed in error code. Client timeouts return a 408 - // while disconnections return a non-standard Nginx HTTP 499 code. - if err := ctx.Err(); err == context.DeadlineExceeded { - return http.StatusRequestTimeout - } else if err == context.Canceled { - return 499 // https://httpstatuses.com/499 - } - - // Otherwise map internal error codes to HTTP status codes. - statusCode, ok := influxDBErrorToStatusCode[code] - if ok { - return statusCode - } - return http.StatusInternalServerError -} - -// influxDBErrorToStatusCode is a mapping of ErrorCode to http status code. -var influxDBErrorToStatusCode = map[string]int{ - errors2.EInternal: http.StatusInternalServerError, - errors2.ENotImplemented: http.StatusNotImplemented, - errors2.EInvalid: http.StatusBadRequest, - errors2.EUnprocessableEntity: http.StatusUnprocessableEntity, - errors2.EEmptyValue: http.StatusBadRequest, - errors2.EConflict: http.StatusUnprocessableEntity, - errors2.ENotFound: http.StatusNotFound, - errors2.EUnavailable: http.StatusServiceUnavailable, - errors2.EForbidden: http.StatusForbidden, - errors2.ETooManyRequests: http.StatusTooManyRequests, - errors2.EUnauthorized: http.StatusUnauthorized, - errors2.EMethodNotAllowed: http.StatusMethodNotAllowed, - errors2.ETooLarge: http.StatusRequestEntityTooLarge, -} - -var httpStatusCodeToInfluxDBError = map[int]string{} - -func init() { - for k, v := range influxDBErrorToStatusCode { - httpStatusCodeToInfluxDBError[v] = k - } -} - -// CheckErrorStatus for status and any error in the response. -func CheckErrorStatus(code int, res *http.Response) error { - err := CheckError(res) - if err != nil { - return err - } - - if res.StatusCode != code { - return fmt.Errorf("unexpected status code: %s", res.Status) - } - - return nil -} - -// CheckError reads the http.Response and returns an error if one exists. -// It will automatically recognize the errors returned by Influx services -// and decode the error into an internal error type. If the error cannot -// be determined in that way, it will create a generic error message. -// -// If there is no error, then this returns nil. -func CheckError(resp *http.Response) (err error) { - switch resp.StatusCode / 100 { - case 4, 5: - // We will attempt to parse this error outside of this block. - case 2: - return nil - default: - // TODO(jsternberg): Figure out what to do here? - return &errors2.Error{ - Code: errors2.EInternal, - Msg: fmt.Sprintf("unexpected status code: %d %s", resp.StatusCode, resp.Status), - } - } - - perr := &errors2.Error{ - Code: StatusCodeToErrorCode(resp.StatusCode), - } - - if resp.StatusCode == http.StatusUnsupportedMediaType { - perr.Msg = fmt.Sprintf("invalid media type: %q", resp.Header.Get("Content-Type")) - return perr - } - - contentType := resp.Header.Get("Content-Type") - if contentType == "" { - // Assume JSON if there is no content-type. - contentType = "application/json" - } - mediatype, _, _ := mime.ParseMediaType(contentType) - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - perr.Msg = "failed to read error response" - perr.Err = err - return perr - } - - switch mediatype { - case "application/json": - if err := json.Unmarshal(buf.Bytes(), perr); err != nil { - perr.Msg = fmt.Sprintf("attempted to unmarshal error as JSON but failed: %q", err) - perr.Err = firstLineAsError(buf) - } - default: - perr.Err = firstLineAsError(buf) - } - - if perr.Code == "" { - // given it was unset during attempt to unmarshal as JSON - perr.Code = StatusCodeToErrorCode(resp.StatusCode) - } - - return perr -} -func firstLineAsError(buf bytes.Buffer) error { - line, _ := buf.ReadString('\n') - return errors.New(strings.TrimSuffix(line, "\n")) -} diff --git a/kit/transport/http/error_handler_test.go b/kit/transport/http/error_handler_test.go deleted file mode 100644 index 60fe89e78a2..00000000000 --- a/kit/transport/http/error_handler_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package http_test - -import ( - "context" - "fmt" - "net/http/httptest" - "testing" - - "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "go.uber.org/zap/zaptest" -) - -func TestEncodeError(t *testing.T) { - ctx := context.TODO() - - w := httptest.NewRecorder() - - kithttp.NewErrorHandler(zaptest.NewLogger(t)).HandleHTTPError(ctx, nil, w) - - if w.Code != 200 { - t.Errorf("expected status code 200, got: %d", w.Code) - } -} - -func TestEncodeErrorWithError(t *testing.T) { - ctx := context.TODO() - err := &errors.Error{ - Code: errors.EInternal, - Msg: "an error occurred", - Err: fmt.Errorf("there's an error here, be aware"), - } - - w := httptest.NewRecorder() - - kithttp.NewErrorHandler(zaptest.NewLogger(t)).HandleHTTPError(ctx, err, w) - - if w.Code != 500 { - t.Errorf("expected status code 500, got: %d", w.Code) - } - - errHeader := w.Header().Get("X-Platform-Error-Code") - if errHeader != errors.EInternal { - t.Errorf("expected X-Platform-Error-Code: %s, got: %s", errors.EInternal, errHeader) - } - - // The http handler will flatten the message and it will not - // have an error property, so reading the serialization results - // in a different error. - pe := http.CheckError(w.Result()).(*errors.Error) - if want, got := errors.EInternal, pe.Code; want != got { - t.Errorf("unexpected code -want/+got:\n\t- %q\n\t+ %q", want, got) - } - if want, got := "an error occurred: there's an error here, be aware", pe.Msg; want != got { - t.Errorf("unexpected message -want/+got:\n\t- %q\n\t+ %q", want, got) - } -} diff --git a/kit/transport/http/feature_controller.go b/kit/transport/http/feature_controller.go deleted file mode 100644 index a3c562449aa..00000000000 --- a/kit/transport/http/feature_controller.go +++ /dev/null @@ -1,39 +0,0 @@ -package http - -import ( - "context" - "net/http" - - "github.com/influxdata/influxdb/v2/kit/feature" -) - -// Enabler allows the switching between two HTTP Handlers -type Enabler interface { - Enabled(ctx context.Context, flagger ...feature.Flagger) bool -} - -// FeatureHandler is used to switch requests between an existing and a feature flagged -// HTTP Handler on a per-request basis -type FeatureHandler struct { - enabler Enabler - flagger feature.Flagger - oldHandler http.Handler - newHandler http.Handler - prefix string -} - -func NewFeatureHandler(e Enabler, f feature.Flagger, old, new http.Handler, prefix string) *FeatureHandler { - return &FeatureHandler{e, f, old, new, prefix} -} - -func (h *FeatureHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if h.enabler.Enabled(r.Context(), h.flagger) { - h.newHandler.ServeHTTP(w, r) - return - } - h.oldHandler.ServeHTTP(w, r) -} - -func (h *FeatureHandler) Prefix() string { - return h.prefix -} diff --git a/kit/transport/http/handler.go b/kit/transport/http/handler.go deleted file mode 100644 index ba83a61baf7..00000000000 --- a/kit/transport/http/handler.go +++ /dev/null @@ -1,11 +0,0 @@ -package http - -import "net/http" - -// ResourceHandler is an HTTP handler for a resource. The prefix -// describes the url path prefix that relates to the handler -// endpoints. -type ResourceHandler interface { - Prefix() string - http.Handler -} diff --git a/kit/transport/http/middleware.go b/kit/transport/http/middleware.go deleted file mode 100644 index f3d45daddbe..00000000000 --- a/kit/transport/http/middleware.go +++ /dev/null @@ -1,265 +0,0 @@ -package http - -import ( - "context" - "fmt" - "net/http" - "path" - "regexp" - "strings" - "time" - - "github.com/go-chi/chi" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - ua "github.com/mileusna/useragent" - "github.com/prometheus/client_golang/prometheus" -) - -// Middleware constructor. -type Middleware func(http.Handler) http.Handler - -// SetCORS configures various headers to relax CORS browser checks -// -// Browsers implement Cross-Origin Resource Sharing (CORS) checks to limit -// cross-origin accesses in HTTP requests. Various CORS headers can be used to -// relax the standard, strict browser behavior. For details on CORS, see: -// https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS -// -// IMPORTANT: default CORS checks disallows attaching credentials (eg, session -// cookie) with cross-origin requests (even when Access-Control-Allow-Origin is -// set to the Origin) and we omit the 'Access-Control-Allow-Credentials' header -// here to preserve this behavior. Omitting this header provides security -// defense-in-depth. -func SetCORS(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - if origin := r.Header.Get("Origin"); origin != "" { - // Access-Control-Allow-Origin must be present in every response - w.Header().Set("Access-Control-Allow-Origin", origin) - } - if r.Method == http.MethodOptions { - // allow and stop processing in pre-flight requests - w.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE, PATCH") - w.Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, Authorization, User-Agent") - w.WriteHeader(http.StatusNoContent) - return - } - next.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) -} - -func Metrics(name string, reqMetric *prometheus.CounterVec, durMetric *prometheus.HistogramVec) Middleware { - return func(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - statusW := NewStatusResponseWriter(w) - - defer func(start time.Time) { - statusCode := statusW.Code() - // only log metrics for 2XX or 5XX requests - if !reportFromCode(statusCode) { - return - } - - label := prometheus.Labels{ - "handler": name, - "method": r.Method, - "path": normalizePath(r.URL.Path), - "status": statusW.StatusCodeClass(), - "response_code": fmt.Sprintf("%d", statusCode), - "user_agent": UserAgent(r), - } - - durMetric.With(label).Observe(time.Since(start).Seconds()) - reqMetric.With(label).Inc() - }(time.Now()) - - next.ServeHTTP(statusW, r) - } - return http.HandlerFunc(fn) - } -} - -func SkipOptions(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - // Preflight CORS requests from the browser will send an options request, - // so we need to make sure we satisfy them - if origin := r.Header.Get("Origin"); origin == "" && r.Method == http.MethodOptions { - w.WriteHeader(http.StatusMethodNotAllowed) - return - } - - next.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) -} - -func Trace(name string) Middleware { - return func(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - span, r := tracing.ExtractFromHTTPRequest(r, name) - defer span.Finish() - - span.LogKV("user_agent", UserAgent(r)) - for k, v := range r.Header { - if len(v) == 0 { - continue - } - - if k == "Authorization" || k == "User-Agent" { - continue - } - - // If header has multiple values, only the first value will be logged on the trace. - span.LogKV(k, v[0]) - } - - next.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) - } -} - -func UserAgent(r *http.Request) string { - header := r.Header.Get("User-Agent") - if header == "" { - return "unknown" - } - - return ua.Parse(header).Name -} - -// Constants used for normalizing paths -const ( - fileSlug = ":file_name" - shardSlug = ":shard_id" -) - -func normalizePath(p string) string { - // Normalize any paths used during backup or restore processes - p = normalizeBackupAndRestore(p) - - // Go through each part of the path and normalize IDs and UI assets - var parts []string - for head, tail := shiftPath(p); ; head, tail = shiftPath(tail) { - piece := head - - // Normalize any ID's in the path as the ":id" slug - if len(piece) == platform.IDLength { - if _, err := platform.IDFromString(head); err == nil { - piece = ":id" - } - } - parts = append(parts, piece) - - if tail == "/" { - // Normalize UI asset file names. The UI asset file is the last part of the path. - parts[len(parts)-1] = normalizeAssetFile(parts[len(parts)-1]) - break - } - } - return "/" + path.Join(parts...) -} - -func shiftPath(p string) (head, tail string) { - p = path.Clean("/" + p) - i := strings.Index(p[1:], "/") + 1 - if i <= 0 { - return p[1:], "/" - } - return p[1:i], p[i:] -} - -// Normalize the file name for a UI asset -// For example: 838442d56d.svg will return as :file_id.svg -// Files names that do not have one of the listed extensions will be returned unchanged -func normalizeAssetFile(f string) string { - exts := []string{ - ".js", - ".svg", - ".woff2", - ".wasm", - ".map", - ".LICENSE", - ".ttf", - ".woff", - ".eot", - } - - for _, ext := range exts { - if strings.HasSuffix(f, ext) { - return fileSlug + ext - } - } - - return f -} - -// Normalize paths used during the backup and restore process. -// Paths not matching any of the patterns will be returned unchanged. -func normalizeBackupAndRestore(pth string) string { - patterns := map[string]string{ - `restore/shards/\d+`: path.Join("restore/shards", shardSlug), - `backup/shards/\d+`: path.Join("backup/shards", shardSlug), - } - - for p, s := range patterns { - re := regexp.MustCompile(p) - if re.MatchString(pth) { - return re.ReplaceAllString(pth, s) - } - } - - return pth -} - -type OrgContext string - -const CtxOrgKey OrgContext = "orgID" - -// ValidResource make sure a resource exists when a sub system needs to be mounted to an api -func ValidResource(api *API, lookupOrgByResourceID func(context.Context, platform.ID) (platform.ID, error)) Middleware { - return func(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - statusW := NewStatusResponseWriter(w) - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - api.Err(w, r, platform.ErrCorruptID(err)) - return - } - - ctx := r.Context() - - orgID, err := lookupOrgByResourceID(ctx, *id) - if err != nil { - // if this function returns an error we will squash the error message and replace it with a not found error - api.Err(w, r, &errors.Error{ - Code: errors.ENotFound, - Msg: "404 page not found", - }) - return - } - - // embed OrgID into context - next.ServeHTTP(statusW, r.WithContext(context.WithValue(ctx, CtxOrgKey, orgID))) - } - return http.HandlerFunc(fn) - } -} - -// OrgIDFromContext .... -func OrgIDFromContext(ctx context.Context) *platform.ID { - v := ctx.Value(CtxOrgKey) - if v == nil { - return nil - } - id := v.(platform.ID) - return &id -} - -// reportFromCode is a helper function to determine if telemetry data should be -// reported for this response. -func reportFromCode(c int) bool { - return (c >= 200 && c <= 299) || (c >= 500 && c <= 599) -} diff --git a/kit/transport/http/middleware_test.go b/kit/transport/http/middleware_test.go deleted file mode 100644 index 841097b8b5f..00000000000 --- a/kit/transport/http/middleware_test.go +++ /dev/null @@ -1,233 +0,0 @@ -package http - -import ( - "net/http" - "net/http/httptest" - "path" - "testing" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/prom" - "github.com/influxdata/influxdb/v2/kit/prom/promtest" - "github.com/influxdata/influxdb/v2/pkg/testttp" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func TestMetrics(t *testing.T) { - labels := []string{"handler", "method", "path", "status", "response_code", "user_agent"} - - nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/" { - w.WriteHeader(http.StatusOK) - return - } - - if r.URL.Path == "/serverError" { - w.WriteHeader(http.StatusInternalServerError) - return - } - - if r.URL.Path == "/redirect" { - w.WriteHeader(http.StatusPermanentRedirect) - return - } - - w.WriteHeader(http.StatusNotFound) - }) - - tests := []struct { - name string - reqPath string - wantCount int - labelResponse string - labelStatus string - }{ - { - name: "counter increments on OK (2XX) ", - reqPath: "/", - wantCount: 1, - labelResponse: "200", - labelStatus: "2XX", - }, - { - name: "counter does not increment on not found (4XX)", - reqPath: "/badpath", - wantCount: 0, - }, - { - name: "counter increments on server error (5XX)", - reqPath: "/serverError", - wantCount: 1, - labelResponse: "500", - labelStatus: "5XX", - }, - { - name: "counter does not increment on redirect (3XX)", - reqPath: "/redirect", - wantCount: 0, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - counter := prometheus.NewCounterVec(prometheus.CounterOpts{Name: "counter"}, labels) - hist := prometheus.NewHistogramVec(prometheus.HistogramOpts{Name: "hist"}, labels) - reg := prom.NewRegistry(zaptest.NewLogger(t)) - reg.MustRegister(counter, hist) - - metricsMw := Metrics("testing", counter, hist) - svr := metricsMw(nextHandler) - r := httptest.NewRequest("GET", tt.reqPath, nil) - w := httptest.NewRecorder() - svr.ServeHTTP(w, r) - - mfs := promtest.MustGather(t, reg) - m := promtest.FindMetric(mfs, "counter", map[string]string{ - "handler": "testing", - "method": "GET", - "path": tt.reqPath, - "response_code": tt.labelResponse, - "status": tt.labelStatus, - "user_agent": "unknown", - }) - - if tt.wantCount == 0 { - require.Nil(t, m) - return - } - - require.Equal(t, tt.wantCount, int(m.Counter.GetValue())) - }) - } -} - -func Test_normalizePath(t *testing.T) { - tests := []struct { - name string - path string - expected string - }{ - { - name: "1", - path: path.Join("/api/v2/organizations", platform.ID(2).String()), - expected: "/api/v2/organizations/:id", - }, - { - name: "2", - path: "/api/v2/organizations", - expected: "/api/v2/organizations", - }, - { - name: "3", - path: "/", - expected: "/", - }, - { - name: "4", - path: path.Join("/api/v2/organizations", platform.ID(2).String(), "users", platform.ID(3).String()), - expected: "/api/v2/organizations/:id/users/:id", - }, - { - name: "5", - path: "/838442d56d.svg", - expected: "/" + fileSlug + ".svg", - }, - { - name: "6", - path: "/838442d56d.svg/extra", - expected: "/838442d56d.svg/extra", - }, - { - name: "7", - path: "/api/v2/restore/shards/1001", - expected: path.Join("/api/v2/restore/shards/", shardSlug), - }, - { - name: "8", - path: "/api/v2/restore/shards/1001/extra", - expected: path.Join("/api/v2/restore/shards/", shardSlug, "extra"), - }, - { - name: "9", - path: "/api/v2/backup/shards/1005", - expected: path.Join("/api/v2/backup/shards/", shardSlug), - }, - { - name: "10", - path: "/api/v2/backup/shards/1005/extra", - expected: path.Join("/api/v2/backup/shards/", shardSlug, "extra"), - }, - { - name: "11", - path: "/35bb8d560d.ttf", - expected: "/" + fileSlug + ".ttf", - }, - { - name: "12", - path: "/35bb8d560d.woff", - expected: "/" + fileSlug + ".woff", - }, - { - name: "13", - path: "/35bb8d560d.eot", - expected: "/" + fileSlug + ".eot", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - actual := normalizePath(tt.path) - assert.Equal(t, tt.expected, actual) - }) - } -} - -func TestCors(t *testing.T) { - nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("nextHandler")) - }) - - tests := []struct { - name string - method string - headers []string - expectedStatus int - expectedHeaders map[string]string - }{ - { - name: "OPTIONS without Origin", - method: "OPTIONS", - expectedStatus: http.StatusMethodNotAllowed, - }, - { - name: "OPTIONS with Origin", - method: "OPTIONS", - headers: []string{"Origin", "http://myapp.com"}, - expectedStatus: http.StatusNoContent, - }, - { - name: "GET with Origin", - method: "GET", - headers: []string{"Origin", "http://anotherapp.com"}, - expectedStatus: http.StatusOK, - expectedHeaders: map[string]string{ - "Access-Control-Allow-Origin": "http://anotherapp.com", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - svr := SkipOptions(SetCORS(nextHandler)) - - testttp. - HTTP(t, tt.method, "/", nil). - Headers("", "", tt.headers...). - Do(svr). - ExpectStatus(tt.expectedStatus). - ExpectHeaders(tt.expectedHeaders) - }) - } -} diff --git a/kit/transport/http/status_response_writer.go b/kit/transport/http/status_response_writer.go deleted file mode 100644 index 68efba4828e..00000000000 --- a/kit/transport/http/status_response_writer.go +++ /dev/null @@ -1,58 +0,0 @@ -package http - -import "net/http" - -type StatusResponseWriter struct { - statusCode int - responseBytes int - http.ResponseWriter -} - -func NewStatusResponseWriter(w http.ResponseWriter) *StatusResponseWriter { - return &StatusResponseWriter{ - ResponseWriter: w, - } -} - -func (w *StatusResponseWriter) Write(b []byte) (int, error) { - n, err := w.ResponseWriter.Write(b) - w.responseBytes += n - return n, err -} - -// WriteHeader writes the header and captures the status code. -func (w *StatusResponseWriter) WriteHeader(statusCode int) { - w.statusCode = statusCode - w.ResponseWriter.WriteHeader(statusCode) -} - -func (w *StatusResponseWriter) Code() int { - code := w.statusCode - if code == 0 { - // When statusCode is 0 then WriteHeader was never called and we can assume that - // the ResponseWriter wrote an http.StatusOK. - code = http.StatusOK - } - return code -} - -func (w *StatusResponseWriter) ResponseBytes() int { - return w.responseBytes -} - -func (w *StatusResponseWriter) StatusCodeClass() string { - class := "XXX" - switch w.Code() / 100 { - case 1: - class = "1XX" - case 2: - class = "2XX" - case 3: - class = "3XX" - case 4: - class = "4XX" - case 5: - class = "5XX" - } - return class -} diff --git a/kv/backup.go b/kv/backup.go deleted file mode 100644 index 0744bfb6773..00000000000 --- a/kv/backup.go +++ /dev/null @@ -1,10 +0,0 @@ -package kv - -import ( - "context" - "io" -) - -func (s *Service) Backup(ctx context.Context, w io.Writer) error { - return s.kv.Backup(ctx, w) -} diff --git a/kv/cursor.go b/kv/cursor.go deleted file mode 100644 index a8a54e57a79..00000000000 --- a/kv/cursor.go +++ /dev/null @@ -1,80 +0,0 @@ -package kv - -import ( - "bytes" - "sort" -) - -// staticCursor implements the Cursor interface for a slice of -// static key value pairs. -type staticCursor struct { - idx int - pairs []Pair -} - -// Pair is a struct for key value pairs. -type Pair struct { - Key []byte - Value []byte -} - -// NewStaticCursor returns an instance of a StaticCursor. It -// destructively sorts the provided pairs to be in key ascending order. -func NewStaticCursor(pairs []Pair) Cursor { - sort.Slice(pairs, func(i, j int) bool { - return bytes.Compare(pairs[i].Key, pairs[j].Key) < 0 - }) - return &staticCursor{ - pairs: pairs, - } -} - -// Seek searches the slice for the first key with the provided prefix. -func (c *staticCursor) Seek(prefix []byte) ([]byte, []byte) { - // TODO: do binary search for prefix since pairs are ordered. - for i, pair := range c.pairs { - if bytes.HasPrefix(pair.Key, prefix) { - c.idx = i - return pair.Key, pair.Value - } - } - - return nil, nil -} - -func (c *staticCursor) getValueAtIndex(delta int) ([]byte, []byte) { - idx := c.idx + delta - if idx < 0 { - return nil, nil - } - - if idx >= len(c.pairs) { - return nil, nil - } - - c.idx = idx - - pair := c.pairs[c.idx] - - return pair.Key, pair.Value -} - -// First retrieves the first element in the cursor. -func (c *staticCursor) First() ([]byte, []byte) { - return c.getValueAtIndex(-c.idx) -} - -// Last retrieves the last element in the cursor. -func (c *staticCursor) Last() ([]byte, []byte) { - return c.getValueAtIndex(len(c.pairs) - 1 - c.idx) -} - -// Next retrieves the next entry in the cursor. -func (c *staticCursor) Next() ([]byte, []byte) { - return c.getValueAtIndex(1) -} - -// Prev retrieves the previous entry in the cursor. -func (c *staticCursor) Prev() ([]byte, []byte) { - return c.getValueAtIndex(-1) -} diff --git a/kv/cursor_test.go b/kv/cursor_test.go deleted file mode 100644 index ec18503d35b..00000000000 --- a/kv/cursor_test.go +++ /dev/null @@ -1,244 +0,0 @@ -package kv_test - -import ( - "bytes" - "testing" - - "github.com/influxdata/influxdb/v2/kv" -) - -func TestStaticCursor_First(t *testing.T) { - type args struct { - pairs []kv.Pair - } - type wants struct { - key []byte - val []byte - } - - tests := []struct { - name string - args args - wants wants - }{ - { - name: "nil pairs", - args: args{ - pairs: nil, - }, - wants: wants{}, - }, - { - name: "empty pairs", - args: args{ - pairs: []kv.Pair{}, - }, - wants: wants{}, - }, - { - name: "unsorted pairs", - args: args{ - pairs: []kv.Pair{ - { - Key: []byte("bcd"), - Value: []byte("yoyo"), - }, - { - Key: []byte("abc"), - Value: []byte("oyoy"), - }, - }, - }, - wants: wants{ - key: []byte("abc"), - val: []byte("oyoy"), - }, - }, - { - name: "sorted pairs", - args: args{ - pairs: []kv.Pair{ - { - Key: []byte("abc"), - Value: []byte("oyoy"), - }, - { - Key: []byte("bcd"), - Value: []byte("yoyo"), - }, - }, - }, - wants: wants{ - key: []byte("abc"), - val: []byte("oyoy"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cur := kv.NewStaticCursor(tt.args.pairs) - - key, val := cur.First() - - if want, got := tt.wants.key, key; !bytes.Equal(want, got) { - t.Errorf("exptected to get key %s got %s", string(want), string(got)) - } - - if want, got := tt.wants.val, val; !bytes.Equal(want, got) { - t.Errorf("exptected to get value %s got %s", string(want), string(got)) - } - }) - } -} - -func TestStaticCursor_Last(t *testing.T) { - type args struct { - pairs []kv.Pair - } - type wants struct { - key []byte - val []byte - } - - tests := []struct { - name string - args args - wants wants - }{ - { - name: "nil pairs", - args: args{ - pairs: nil, - }, - wants: wants{}, - }, - { - name: "empty pairs", - args: args{ - pairs: []kv.Pair{}, - }, - wants: wants{}, - }, - { - name: "unsorted pairs", - args: args{ - pairs: []kv.Pair{ - { - Key: []byte("bcd"), - Value: []byte("yoyo"), - }, - { - Key: []byte("abc"), - Value: []byte("oyoy"), - }, - }, - }, - wants: wants{ - key: []byte("bcd"), - val: []byte("yoyo"), - }, - }, - { - name: "sorted pairs", - args: args{ - pairs: []kv.Pair{ - { - Key: []byte("abc"), - Value: []byte("oyoy"), - }, - { - Key: []byte("bcd"), - Value: []byte("yoyo"), - }, - }, - }, - wants: wants{ - key: []byte("bcd"), - val: []byte("yoyo"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cur := kv.NewStaticCursor(tt.args.pairs) - - key, val := cur.Last() - - if want, got := tt.wants.key, key; !bytes.Equal(want, got) { - t.Errorf("exptected to get key %s got %s", string(want), string(got)) - } - - if want, got := tt.wants.val, val; !bytes.Equal(want, got) { - t.Errorf("exptected to get value %s got %s", string(want), string(got)) - } - }) - } -} - -func TestStaticCursor_Seek(t *testing.T) { - type args struct { - prefix []byte - pairs []kv.Pair - } - type wants struct { - key []byte - val []byte - } - - tests := []struct { - name string - args args - wants wants - }{ - { - name: "sorted pairs", - args: args{ - prefix: []byte("bc"), - pairs: []kv.Pair{ - { - Key: []byte("abc"), - Value: []byte("oyoy"), - }, - { - Key: []byte("abcd"), - Value: []byte("oyoy"), - }, - { - Key: []byte("bcd"), - Value: []byte("yoyo"), - }, - { - Key: []byte("bcde"), - Value: []byte("yoyo"), - }, - { - Key: []byte("cde"), - Value: []byte("yyoo"), - }, - }, - }, - wants: wants{ - key: []byte("bcd"), - val: []byte("yoyo"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cur := kv.NewStaticCursor(tt.args.pairs) - - key, val := cur.Seek(tt.args.prefix) - - if want, got := tt.wants.key, key; !bytes.Equal(want, got) { - t.Errorf("exptected to get key %s got %s", string(want), string(got)) - } - - if want, got := tt.wants.val, val; !bytes.Equal(want, got) { - t.Errorf("exptected to get value %s got %s", string(want), string(got)) - } - }) - } -} diff --git a/kv/doc.go b/kv/doc.go deleted file mode 100644 index 3c396f57170..00000000000 --- a/kv/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// package kv -// -// The KV package is a set of services and abstractions built around key value storage. -// There exist in-memory and persisted implementations of the core `Store` family of -// interfaces outside of this package (see `inmem` and `bolt` packages). -// -// The `Store` interface exposes transactional access to a backing kv persistence layer. -// It allows for read-only (View) and read-write (Update) transactions to be opened. -// These methods take a function which is passed an implementation of the transaction interface (Tx). -// This interface exposes a way to manipulate namespaced keys and values (Buckets). -// -// All keys and values are namespaced (grouped) using buckets. Buckets can only be created on -// implementations of the `SchemaStore` interface. This is a superset of the `Store` interface, -// which has the additional bucket creation and deletion methods. -// -// Bucket creation and deletion should be facilitated via a migration (see `kv/migration`). -package kv diff --git a/kv/document.go b/kv/document.go deleted file mode 100644 index 87ac6322dd9..00000000000 --- a/kv/document.go +++ /dev/null @@ -1,253 +0,0 @@ -package kv - -import ( - "context" - "encoding/json" - "path" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -const ( - documentContentBucket = "/documents/content" - documentMetaBucket = "/documents/meta" -) - -// DocumentStore implements influxdb.DocumentStore. -type DocumentStore struct { - service *Service - namespace string -} - -// CreateDocumentStore creates an instance of a document store by instantiating the buckets for the store. -func (s *Service) CreateDocumentStore(ctx context.Context, ns string) (influxdb.DocumentStore, error) { - // TODO(desa): keep track of which namespaces exist. - return s.createDocumentStore(ctx, ns) -} - -func (s *Service) createDocumentStore(ctx context.Context, ns string) (influxdb.DocumentStore, error) { - return &DocumentStore{ - namespace: ns, - service: s, - }, nil -} - -// FindDocumentStore finds the buckets associated with the namespace provided. -func (s *Service) FindDocumentStore(ctx context.Context, ns string) (influxdb.DocumentStore, error) { - var ds influxdb.DocumentStore - - err := s.kv.View(ctx, func(tx Tx) error { - if _, err := tx.Bucket([]byte(path.Join(ns, documentContentBucket))); err != nil { - return err - } - - if _, err := tx.Bucket([]byte(path.Join(ns, documentMetaBucket))); err != nil { - return err - } - - ds = &DocumentStore{ - namespace: ns, - service: s, - } - - return nil - }) - - if err != nil { - return nil, err - } - - return ds, nil -} - -// CreateDocument creates an instance of a document and sets the ID. After which it applies each of the options provided. -func (s *DocumentStore) CreateDocument(ctx context.Context, d *influxdb.Document) error { - return s.service.kv.Update(ctx, func(tx Tx) error { - err := s.service.createDocument(ctx, tx, s.namespace, d) - if err != nil { - return err - } - - return nil - }) -} - -func (s *Service) createDocument(ctx context.Context, tx Tx, ns string, d *influxdb.Document) error { - d.ID = s.IDGenerator.ID() - d.Meta.CreatedAt = s.Now() - d.Meta.UpdatedAt = s.Now() - return s.putDocument(ctx, tx, ns, d) -} - -func (s *Service) putDocument(ctx context.Context, tx Tx, ns string, d *influxdb.Document) error { - if err := s.putDocumentMeta(ctx, tx, ns, d.ID, d.Meta); err != nil { - return err - } - - if err := s.putDocumentContent(ctx, tx, ns, d.ID, d.Content); err != nil { - return err - } - - // TODO(desa): index document meta - - return nil -} - -func (s *Service) putAtID(ctx context.Context, tx Tx, bucket string, id platform.ID, i interface{}) error { - v, err := json.Marshal(i) - if err != nil { - return err - } - - k, err := id.Encode() - if err != nil { - return err - } - - b, err := tx.Bucket([]byte(bucket)) - if err != nil { - return err - } - - if err := b.Put(k, v); err != nil { - return err - } - - return nil -} - -func (s *Service) putDocumentContent(ctx context.Context, tx Tx, ns string, id platform.ID, data interface{}) error { - return s.putAtID(ctx, tx, path.Join(ns, documentContentBucket), id, data) -} - -func (s *Service) putDocumentMeta(ctx context.Context, tx Tx, ns string, id platform.ID, m influxdb.DocumentMeta) error { - return s.putAtID(ctx, tx, path.Join(ns, documentMetaBucket), id, m) -} - -func (s *DocumentStore) PutDocument(ctx context.Context, d *influxdb.Document) error { - return s.service.kv.Update(ctx, func(tx Tx) error { - return s.service.putDocument(ctx, tx, s.namespace, d) - }) -} - -func (s *Service) findByID(ctx context.Context, tx Tx, bucket string, id platform.ID, i interface{}) error { - b, err := tx.Bucket([]byte(bucket)) - if err != nil { - return err - } - - k, err := id.Encode() - if err != nil { - return err - } - - v, err := b.Get(k) - if err != nil { - return err - } - - if err := json.Unmarshal(v, i); err != nil { - return err - } - - return nil -} - -func (s *Service) findDocumentMetaByID(ctx context.Context, tx Tx, ns string, id platform.ID) (*influxdb.DocumentMeta, error) { - m := &influxdb.DocumentMeta{} - - if err := s.findByID(ctx, tx, path.Join(ns, documentMetaBucket), id, m); err != nil { - return nil, err - } - - return m, nil -} - -func (s *Service) findDocumentContentByID(ctx context.Context, tx Tx, ns string, id platform.ID) (interface{}, error) { - var data interface{} - if err := s.findByID(ctx, tx, path.Join(ns, documentContentBucket), id, &data); err != nil { - return nil, err - } - - return data, nil -} - -// FindDocument retrieves the specified document with all its content and labels. -func (s *DocumentStore) FindDocument(ctx context.Context, id platform.ID) (*influxdb.Document, error) { - var d *influxdb.Document - err := s.service.kv.View(ctx, func(tx Tx) error { - m, err := s.service.findDocumentMetaByID(ctx, tx, s.namespace, id) - if err != nil { - return err - } - c, err := s.service.findDocumentContentByID(ctx, tx, s.namespace, id) - if err != nil { - return err - } - d = &influxdb.Document{ - ID: id, - Meta: *m, - Content: c, - } - - return nil - }) - - if IsNotFound(err) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrDocumentNotFound, - } - } - - if err != nil { - return nil, err - } - - return d, nil -} - -// FindDocuments retrieves all documents returned by the document find options. -func (s *DocumentStore) FindDocuments(ctx context.Context, _ platform.ID) ([]*influxdb.Document, error) { - var ds []*influxdb.Document - - err := s.service.kv.View(ctx, func(tx Tx) error { - // TODO(desa): might be a better way to do get all. - if err := s.service.findDocuments(ctx, tx, s.namespace, &ds); err != nil { - return err - } - - return nil - }) - - return ds, err -} - -func (s *Service) findDocuments(ctx context.Context, tx Tx, ns string, ds *[]*influxdb.Document) error { - metab, err := tx.Bucket([]byte(path.Join(ns, documentMetaBucket))) - if err != nil { - return err - } - - cur, err := metab.ForwardCursor(nil) - if err != nil { - return err - } - - for k, v := cur.Next(); len(k) != 0; k, v = cur.Next() { - d := &influxdb.Document{} - if err := d.ID.Decode(k); err != nil { - return err - } - - if err := json.Unmarshal(v, &d.Meta); err != nil { - return err - } - - *ds = append(*ds, d) - } - - return nil -} diff --git a/kv/encode.go b/kv/encode.go deleted file mode 100644 index bcbdcf008dc..00000000000 --- a/kv/encode.go +++ /dev/null @@ -1,56 +0,0 @@ -package kv - -import ( - "errors" - "strings" - - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// EncodeFn returns an encoding when called. Closures are your friend here. -type EncodeFn func() ([]byte, error) - -// Encode concatenates a list of encodings together. -func Encode(encodings ...EncodeFn) EncodeFn { - return func() ([]byte, error) { - var key []byte - for _, enc := range encodings { - part, err := enc() - if err != nil { - return key, err - } - key = append(key, part...) - } - return key, nil - } -} - -// EncString encodes a string. -func EncString(str string) EncodeFn { - return func() ([]byte, error) { - return []byte(str), nil - } -} - -// EncStringCaseInsensitive encodes a string and makes it case insensitive by lower casing -// everything. -func EncStringCaseInsensitive(str string) EncodeFn { - return EncString(strings.ToLower(str)) -} - -// EncID encodes an influx ID. -func EncID(id platform.ID) EncodeFn { - return func() ([]byte, error) { - if id == 0 { - return nil, errors.New("no ID was provided") - } - return id.Encode() - } -} - -// EncBytes is a basic pass through for providing raw bytes. -func EncBytes(b []byte) EncodeFn { - return func() ([]byte, error) { - return b, nil - } -} diff --git a/kv/errors.go b/kv/errors.go deleted file mode 100644 index 43742b42150..00000000000 --- a/kv/errors.go +++ /dev/null @@ -1,23 +0,0 @@ -package kv - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// UnexpectedIndexError is used when the error comes from an internal system. -func UnexpectedIndexError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("unexpected error retrieving index; Err: %v", err), - Op: "kv/index", - } -} - -// NotUniqueError is used when attempting to create a resource that already -// exists. -var NotUniqueError = &errors.Error{ - Code: errors.EConflict, - Msg: "name already exists", -} diff --git a/kv/index.go b/kv/index.go deleted file mode 100644 index d14708cf6a2..00000000000 --- a/kv/index.go +++ /dev/null @@ -1,424 +0,0 @@ -package kv - -import ( - "bytes" - "context" - "errors" -) - -// Index is used to define and manage an index for a source bucket. -// -// When using the index you must provide it with an IndexMapping. -// The IndexMapping provides the index with the contract it needs to populate -// the entire index and traverse a populated index correctly. -// The IndexMapping provides a way to retrieve the key on which to index with -// when provided with the value from the source. -// It also provides the way to access the source bucket. -// -// The following is an illustration of its use: -// -// byUserID := func(v []byte) ([]byte, error) { -// auth := &influxdb.Authorization{} -// -// if err := json.Unmarshal(v, auth); err != nil { -// return err -// } -// -// return auth.UserID.Encode() -// } -// -// // configure a write only index -// indexByUser := NewIndex(NewSource([]byte(`authorizationsbyuserv1/), byUserID)) -// -// indexByUser.Insert(tx, someUserID, someAuthID) -// -// indexByUser.Delete(tx, someUserID, someAuthID) -// -// indexByUser.Walk(tx, someUserID, func(k, v []byte) error { -// auth := &influxdb.Authorization{} -// if err := json.Unmarshal(v, auth); err != nil { -// return err -// } -// -// // do something with auth -// -// return nil -// }) -// -// // verify the current index against the source and return the differences -// // found in each -// diff, err := indexByUser.Verify(ctx, tx) -type Index struct { - IndexMapping - - // canRead configures whether or not Walk accesses the index at all - // or skips the index altogether and returns nothing. - // This is used when you want to integrate only the write path before - // releasing the read path. - canRead bool -} - -// IndexOption is a function which configures an index -type IndexOption func(*Index) - -// WithIndexReadPathEnabled enables the read paths of the index (Walk) -// This should be enabled once the index has been fully populated and -// the Insert and Delete paths are correctly integrated. -func WithIndexReadPathEnabled(i *Index) { - i.canRead = true -} - -// IndexMapping is a type which configures and Index to map items -// from a source bucket to an index bucket via a mapping known as -// IndexSourceOn. This function is called on the values in the source -// to derive the foreign key on which to index each item. -type IndexMapping interface { - SourceBucket() []byte - IndexBucket() []byte - IndexSourceOn(value []byte) (foreignKey []byte, err error) -} - -// IndexSourceOnFunc is a function which can be used to derive the foreign key -// of a value in a source bucket. -type IndexSourceOnFunc func([]byte) ([]byte, error) - -type indexMapping struct { - source []byte - index []byte - fn IndexSourceOnFunc -} - -func (i indexMapping) SourceBucket() []byte { return i.source } - -func (i indexMapping) IndexBucket() []byte { return i.index } - -func (i indexMapping) IndexSourceOn(v []byte) ([]byte, error) { - return i.fn(v) -} - -// NewIndexMapping creates an implementation of IndexMapping for the provided source bucket -// to a destination index bucket. -func NewIndexMapping(sourceBucket, indexBucket []byte, fn IndexSourceOnFunc) IndexMapping { - return indexMapping{ - source: sourceBucket, - index: indexBucket, - fn: fn, - } -} - -// NewIndex configures and returns a new *Index for a given index mapping. -// By default the read path (Walk) is disabled. This is because the index needs to -// be fully populated before depending upon the read path. -// The read path can be enabled using WithIndexReadPathEnabled option. -func NewIndex(mapping IndexMapping, opts ...IndexOption) *Index { - index := &Index{IndexMapping: mapping} - - for _, opt := range opts { - opt(index) - } - - return index -} - -func (i *Index) indexBucket(tx Tx) (Bucket, error) { - return tx.Bucket(i.IndexBucket()) -} - -func (i *Index) sourceBucket(tx Tx) (Bucket, error) { - return tx.Bucket(i.SourceBucket()) -} - -var ( - // ErrKeyInvalidCharacters is returned when a foreignKey or primaryKey contains - // - ErrKeyInvalidCharacters = errors.New("key: contains invalid characters") -) - -// IndexKey returns a value suitable for use as the key component -// when storing values in the index. IndexKey returns an -// ErrKeyInvalidCharacters error if either the foreignKey or primaryKey contains a /. -func IndexKey(foreignKey, primaryKey []byte) (newKey []byte, err error) { - if bytes.IndexByte(foreignKey, '/') != -1 { - return nil, ErrKeyInvalidCharacters - } - if bytes.IndexByte(primaryKey, '/') != -1 { - return nil, ErrKeyInvalidCharacters - } - - newKey = make([]byte, len(primaryKey)+len(foreignKey)+1) - copy(newKey, foreignKey) - newKey[len(foreignKey)] = '/' - copy(newKey[len(foreignKey)+1:], primaryKey) - - return -} - -func indexKeyParts(indexKey []byte) (fk, pk []byte, err error) { - // this function is called with items missing in index - fk, pk, ok := bytes.Cut(indexKey, []byte("/")) - if !ok { - return nil, nil, errors.New("malformed index key") - } - return -} - -// Insert creates a single index entry for the provided primary key on the foreign key. -func (i *Index) Insert(tx Tx, foreignKey, primaryKey []byte) error { - bkt, err := i.indexBucket(tx) - if err != nil { - return err - } - - key, err := IndexKey(foreignKey, primaryKey) - if err != nil { - return err - } - - return bkt.Put(key, primaryKey) -} - -// Delete removes the foreignKey and primaryKey mapping from the underlying index. -func (i *Index) Delete(tx Tx, foreignKey, primaryKey []byte) error { - bkt, err := i.indexBucket(tx) - if err != nil { - return err - } - - key, err := IndexKey(foreignKey, primaryKey) - if err != nil { - return err - } - - return bkt.Delete(key) -} - -// Walk walks the source bucket using keys found in the index using the provided foreign key -// given the index has been fully populated. -func (i *Index) Walk(ctx context.Context, tx Tx, foreignKey []byte, visitFn VisitFunc) error { - // skip walking if configured to do so as the index - // is currently being used purely to write the index - if !i.canRead { - return nil - } - - sourceBucket, err := i.sourceBucket(tx) - if err != nil { - return err - } - - indexBucket, err := i.indexBucket(tx) - if err != nil { - return err - } - - cursor, err := indexBucket.ForwardCursor(foreignKey, - WithCursorPrefix(foreignKey)) - if err != nil { - return err - } - - return indexWalk(foreignKey, cursor, sourceBucket, visitFn) -} - -// indexWalk consumes the indexKey and primaryKey pairs in the index bucket and looks up their -// associated primaryKey's value in the provided source bucket. -// When an item is located in the source, the provided visit function is called with primary key and associated value. -func indexWalk(foreignKey []byte, indexCursor ForwardCursor, sourceBucket Bucket, visit VisitFunc) (err error) { - var keys [][]byte - for ik, pk := indexCursor.Next(); ik != nil; ik, pk = indexCursor.Next() { - if fk, _, err := indexKeyParts(ik); err != nil { - return err - } else if string(fk) == string(foreignKey) { - keys = append(keys, pk) - } - } - - if err := indexCursor.Err(); err != nil { - return err - } - - if err := indexCursor.Close(); err != nil { - return err - } - - values, err := sourceBucket.GetBatch(keys...) - if err != nil { - return err - } - - for i, value := range values { - if value != nil { - if cont, err := visit(keys[i], value); !cont || err != nil { - return err - } - } - } - - return nil -} - -// IndexDiff contains a set of items present in the source not in index, -// along with a set of things in the index which are not in the source. -type IndexDiff struct { - // PresentInIndex is a map of foreign key to primary keys - // present in the index. - PresentInIndex map[string]map[string]struct{} - // MissingFromIndex is a map of foreign key to associated primary keys - // missing from the index given the source bucket. - // These items could be due to the fact an index populate migration has - // not yet occurred, the index populate code is incorrect or the write path - // for your resource type does not yet insert into the index as well (Create actions). - MissingFromIndex map[string]map[string]struct{} - // MissingFromSource is a map of foreign key to associated primary keys - // missing from the source but accounted for in the index. - // This happens when index items are not properly removed from the index - // when an item is removed from the source (Delete actions). - MissingFromSource map[string]map[string]struct{} -} - -func (i *IndexDiff) addMissingSource(fk, pk []byte) { - if i.MissingFromSource == nil { - i.MissingFromSource = map[string]map[string]struct{}{} - } - - if _, ok := i.MissingFromSource[string(fk)]; !ok { - i.MissingFromSource[string(fk)] = map[string]struct{}{} - } - - i.MissingFromSource[string(fk)][string(pk)] = struct{}{} -} - -func (i *IndexDiff) addMissingIndex(fk, pk []byte) { - if i.MissingFromIndex == nil { - i.MissingFromIndex = map[string]map[string]struct{}{} - } - - if _, ok := i.MissingFromIndex[string(fk)]; !ok { - i.MissingFromIndex[string(fk)] = map[string]struct{}{} - } - - i.MissingFromIndex[string(fk)][string(pk)] = struct{}{} -} - -// Corrupt returns a list of foreign keys which have corrupted indexes (partial) -// These are foreign keys which map to a subset of the primary keys which they should -// be associated with. -func (i *IndexDiff) Corrupt() (corrupt []string) { - for fk := range i.MissingFromIndex { - if _, ok := i.PresentInIndex[fk]; ok { - corrupt = append(corrupt, fk) - } - } - return -} - -// Verify returns the difference between a source and its index -// The difference contains items in the source that are not in the index -// and vice-versa. -func (i *Index) Verify(ctx context.Context, store Store) (diff IndexDiff, err error) { - return indexVerify(ctx, i, store, true) -} - -func indexVerify(ctx context.Context, mapping IndexMapping, store Store, includeMissingSource bool) (diff IndexDiff, err error) { - diff.PresentInIndex, err = indexReadAll(ctx, store, func(tx Tx) (Bucket, error) { - return tx.Bucket(mapping.IndexBucket()) - }) - if err != nil { - return diff, err - } - - sourceKVs, err := consumeBucket(ctx, store, func(tx Tx) (Bucket, error) { - return tx.Bucket(mapping.SourceBucket()) - }) - if err != nil { - return diff, err - } - - // pks is a map of primary keys in source - pks := map[string]struct{}{} - - // look for items missing from index - for _, kv := range sourceKVs { - pk, v := kv[0], kv[1] - - if includeMissingSource { - // this is only useful for missing source - pks[string(pk)] = struct{}{} - } - - fk, err := mapping.IndexSourceOn(v) - if err != nil { - return diff, err - } - - fkm, ok := diff.PresentInIndex[string(fk)] - if ok { - _, ok = fkm[string(pk)] - } - - if !ok { - diff.addMissingIndex(fk, pk) - } - } - - if includeMissingSource { - // look for items missing from source - for fk, fkm := range diff.PresentInIndex { - for pk := range fkm { - if _, ok := pks[pk]; !ok { - diff.addMissingSource([]byte(fk), []byte(pk)) - } - } - } - } - - return -} - -// indexReadAll returns the entire current state of the index -func indexReadAll(ctx context.Context, store Store, indexBucket func(Tx) (Bucket, error)) (map[string]map[string]struct{}, error) { - kvs, err := consumeBucket(ctx, store, indexBucket) - if err != nil { - return nil, err - } - - index := map[string]map[string]struct{}{} - for _, kv := range kvs { - fk, pk, err := indexKeyParts(kv[0]) - if err != nil { - return nil, err - } - - if fkm, ok := index[string(fk)]; ok { - fkm[string(pk)] = struct{}{} - continue - } - - index[string(fk)] = map[string]struct{}{string(pk): {}} - } - - return index, nil -} - -type kvSlice [][2][]byte - -// consumeBucket consumes the entire k/v space for the provided bucket function -// applied to the provided store -func consumeBucket(ctx context.Context, store Store, fn func(tx Tx) (Bucket, error)) (kvs kvSlice, err error) { - return kvs, store.View(ctx, func(tx Tx) error { - bkt, err := fn(tx) - if err != nil { - return err - } - - cursor, err := bkt.ForwardCursor(nil) - if err != nil { - return err - } - - return WalkCursor(ctx, cursor, func(k, v []byte) (bool, error) { - kvs = append(kvs, [2][]byte{k, v}) - return true, nil - }) - }) -} diff --git a/kv/index_migration.go b/kv/index_migration.go deleted file mode 100644 index 363c8981aa5..00000000000 --- a/kv/index_migration.go +++ /dev/null @@ -1,208 +0,0 @@ -package kv - -import ( - "context" - "fmt" -) - -const ( - // defaultIndexMigrationOpBatchSize configures the size of batch operations - // done by the index migration when populating or removing items from an - // entire index - defaultIndexMigrationOpBatchSize = 100 -) - -// IndexMigration is a migration for adding and removing an index. -// These are constructed via the Index.Migration function. -type IndexMigration struct { - IndexMapping - - operationBatchSize int - removeDanglingForeignKeys bool -} - -// IndexMigrationOption is a functional option for the IndexMigration type -type IndexMigrationOption func(*IndexMigration) - -// WithIndexMigationBatchSize configures the size of the batches when committing -// changes to entire index during migration (e.g. size of put batch on index populate). -func WithIndexMigationBatchSize(n int) IndexMigrationOption { - return func(m *IndexMigration) { - m.operationBatchSize = n - } -} - -// WithIndexMigrationCleanup removes index entries which point to -// missing items in the source bucket. -func WithIndexMigrationCleanup(m *IndexMigration) { - m.removeDanglingForeignKeys = true -} - -// NewIndexMigration construct a migration for creating and populating an index -func NewIndexMigration(mapping IndexMapping, opts ...IndexMigrationOption) *IndexMigration { - m := &IndexMigration{ - IndexMapping: mapping, - operationBatchSize: defaultIndexMigrationOpBatchSize, - } - - for _, opt := range opts { - opt(m) - } - - return m -} - -// Name returns a readable name for the index migration. -func (i *IndexMigration) MigrationName() string { - return fmt.Sprintf("add index %q", string(i.IndexBucket())) -} - -// Up initializes the index bucket and populates the index. -func (i *IndexMigration) Up(ctx context.Context, store SchemaStore) (err error) { - wrapErr := func(err error) error { - if err == nil { - return nil - } - - return fmt.Errorf("migration (up) %s: %w", i.MigrationName(), err) - } - - if err = store.CreateBucket(ctx, i.IndexBucket()); err != nil { - return wrapErr(err) - } - - _, err = i.Populate(ctx, store) - return wrapErr(err) -} - -// Down deletes all entries from the index. -func (i *IndexMigration) Down(ctx context.Context, store SchemaStore) error { - if err := store.DeleteBucket(ctx, i.IndexBucket()); err != nil { - return fmt.Errorf("migration (down) %s: %w", i.MigrationName(), err) - } - - return nil -} - -// Populate does a full population of the index using the IndexSourceOn IndexMapping function. -// Once completed it marks the index as ready for use. -// It return a nil error on success and the count of inserted items. -func (i *IndexMigration) Populate(ctx context.Context, store Store) (n int, err error) { - // verify the index to derive missing index - // we can skip missing source lookup as we're - // only interested in populating the missing index - diff, err := indexVerify(ctx, i, store, i.removeDanglingForeignKeys) - if err != nil { - return 0, fmt.Errorf("looking up missing indexes: %w", err) - } - - flush := func(batch kvSlice) error { - if len(batch) == 0 { - return nil - } - - if err := store.Update(ctx, func(tx Tx) error { - indexBucket, err := tx.Bucket(i.IndexBucket()) - if err != nil { - return err - } - - for _, pair := range batch { - // insert missing item into index - if err := indexBucket.Put(pair[0], pair[1]); err != nil { - return err - } - - n++ - } - - return nil - }); err != nil { - return fmt.Errorf("updating index: %w", err) - } - - return nil - } - - var batch kvSlice - - for fk, fkm := range diff.MissingFromIndex { - for pk := range fkm { - key, err := IndexKey([]byte(fk), []byte(pk)) - if err != nil { - return n, err - } - batch = append(batch, [2][]byte{key, []byte(pk)}) - - if len(batch) >= i.operationBatchSize { - if err := flush(batch); err != nil { - return n, err - } - - batch = batch[:0] - } - } - } - - if err := flush(batch); err != nil { - return n, err - } - - if i.removeDanglingForeignKeys { - return n, i.remove(ctx, store, diff.MissingFromSource) - } - - return n, nil -} - -func (i *IndexMigration) remove(ctx context.Context, store Store, mappings map[string]map[string]struct{}) error { - var ( - batch [][]byte - flush = func(batch [][]byte) error { - if len(batch) == 0 { - return nil - } - - if err := store.Update(ctx, func(tx Tx) error { - indexBucket, err := tx.Bucket(i.IndexBucket()) - if err != nil { - return err - } - - for _, indexKey := range batch { - // delete dangling foreign key - if err := indexBucket.Delete(indexKey); err != nil { - return err - } - } - - return nil - }); err != nil { - return fmt.Errorf("removing dangling foreign keys: %w", err) - } - - return nil - } - ) - - for fk, fkm := range mappings { - for pk := range fkm { - key, err := IndexKey([]byte(fk), []byte(pk)) - if err != nil { - return err - } - - batch = append(batch, key) - - if len(batch) >= i.operationBatchSize { - if err := flush(batch); err != nil { - return err - } - - batch = batch[:0] - } - } - } - - return flush(batch) -} diff --git a/kv/index_test.go b/kv/index_test.go deleted file mode 100644 index 663297e3019..00000000000 --- a/kv/index_test.go +++ /dev/null @@ -1,197 +0,0 @@ -package kv_test - -import ( - "context" - "errors" - "os" - "testing" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2/bolt" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func Test_Inmem_Index(t *testing.T) { - s := influxdbtesting.NewTestInmemStore(t) - influxdbtesting.TestIndex(t, s) -} - -func Test_Bolt_Index(t *testing.T) { - s, closeBolt := influxdbtesting.NewTestBoltStore(t) - defer closeBolt() - - influxdbtesting.TestIndex(t, s) -} - -func TestIndexKey(t *testing.T) { - tests := []struct { - name string - fk string - pk string - expKey string - expErr error - }{ - { - name: "returns key", - fk: "fk_part", - pk: "pk_part", - expKey: "fk_part/pk_part", - }, - { - name: "returns error for invalid foreign key", - fk: "fk/part", - pk: "pk_part", - expErr: kv.ErrKeyInvalidCharacters, - }, - { - name: "returns error for invalid primary key", - fk: "fk_part", - pk: "pk/part", - expErr: kv.ErrKeyInvalidCharacters, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - gotKey, gotErr := kv.IndexKey([]byte(test.fk), []byte(test.pk)) - if test.expErr != nil { - require.Error(t, gotErr) - assert.EqualError(t, test.expErr, gotErr.Error()) - assert.Nil(t, gotKey) - } else { - assert.NoError(t, gotErr) - assert.Equal(t, test.expKey, string(gotKey)) - } - }) - } -} - -func TestIndex_Walk(t *testing.T) { - t.Run("only selects exact keys", func(t *testing.T) { - ctrl := gomock.NewController(t) - t.Cleanup(ctrl.Finish) - - type keyValue struct{ key, val []byte } - makeIndexKV := func(fk, pk string) keyValue { - key, err := kv.IndexKey([]byte(fk), []byte(pk)) - if err != nil { - panic(err) - } - return keyValue{ - key: key, - val: []byte(pk), - } - } - - makeKV := func(key, val string) keyValue { - return keyValue{[]byte(key), []byte(val)} - } - - var ( - sourceBucket = []byte("source") - indexBucket = []byte("index") - foreignKey = []byte("jenkins") - idxkeyvals = []keyValue{ - makeIndexKV("jenkins-aws", "pk1"), - makeIndexKV("jenkins-aws", "pk2"), - makeIndexKV("jenkins-aws", "pk3"), - makeIndexKV("jenkins", "pk4"), - makeIndexKV("jenkins", "pk5"), - } - srckeyvals = []struct{ key, val []byte }{ - makeKV("pk4", "val4"), - makeKV("pk5", "val5"), - } - ) - - mapping := kv.NewIndexMapping(sourceBucket, indexBucket, func(data []byte) ([]byte, error) { - return nil, nil - }) - - tx := mock.NewMockTx(ctrl) - - src := mock.NewMockBucket(ctrl) - src.EXPECT(). - GetBatch(srckeyvals[0].key, srckeyvals[1].key). - Return([][]byte{srckeyvals[0].val, srckeyvals[1].val}, nil) - - tx.EXPECT(). - Bucket(sourceBucket). - Return(src, nil) - - idx := mock.NewMockBucket(ctrl) - tx.EXPECT(). - Bucket(indexBucket). - Return(idx, nil) - - cur := mock.NewMockForwardCursor(ctrl) - - i := 0 - cur.EXPECT(). - Next(). - DoAndReturn(func() ([]byte, []byte) { - var k, v []byte - if i < len(idxkeyvals) { - elem := idxkeyvals[i] - i++ - k, v = elem.key, elem.val - } - - return k, v - }). - Times(len(idxkeyvals) + 1) - cur.EXPECT(). - Err(). - Return(nil) - cur.EXPECT(). - Close(). - Return(nil) - idx.EXPECT(). - ForwardCursor(foreignKey, gomock.Any()). - Return(cur, nil) - - ctx := context.Background() - index := kv.NewIndex(mapping, kv.WithIndexReadPathEnabled) - - j := 0 - err := index.Walk(ctx, tx, foreignKey, func(k, v []byte) (bool, error) { - require.Less(t, j, len(srckeyvals)) - assert.Equal(t, srckeyvals[j].key, k) - assert.Equal(t, srckeyvals[j].val, v) - j++ - return true, nil - }) - - assert.NoError(t, err) - }) -} - -func Benchmark_Inmem_Index_Walk(b *testing.B) { - influxdbtesting.BenchmarkIndexWalk(b, inmem.NewKVStore(), 1000, 200) -} - -func Benchmark_Bolt_Index_Walk(b *testing.B) { - f, err := os.CreateTemp("", "influxdata-bolt-") - if err != nil { - b.Fatal(errors.New("unable to open temporary boltdb file")) - } - f.Close() - - path := f.Name() - s := bolt.NewKVStore(zaptest.NewLogger(b), path) - if err := s.Open(context.Background()); err != nil { - b.Fatal(err) - } - - defer func() { - s.Close() - os.Remove(path) - }() - - influxdbtesting.BenchmarkIndexWalk(b, s, 1000, 200) -} diff --git a/kv/initial_migration.go b/kv/initial_migration.go deleted file mode 100644 index 441ad94cf74..00000000000 --- a/kv/initial_migration.go +++ /dev/null @@ -1,100 +0,0 @@ -package kv - -import ( - "context" - "encoding/json" - - "github.com/influxdata/influxdb/v2/kit/platform" -) - -type InitialMigration struct{} - -// MigrationName returns the string initial migration -// which allows this store to be used as a migration -func (m InitialMigration) MigrationName() string { - return "initial migration" -} - -// Up initializes all the owned buckets of the underlying store -func (m InitialMigration) Up(ctx context.Context, store SchemaStore) error { - // please do not initialize anymore buckets here - // add them as a new migration to the list of migrations - // defined in NewInitialMigration. - - for _, bucket := range [][]byte{ - []byte("authorizationsv1"), - []byte("authorizationindexv1"), - []byte("bucketsv1"), - []byte("bucketindexv1"), - []byte("dashboardsv2"), - []byte("orgsdashboardsv1"), - []byte("dashboardcellviewsv1"), - kvlogBucket, - kvlogIndex, - []byte("labelsv1"), - []byte("labelmappingsv1"), - []byte("labelindexv1"), - []byte("onboardingv1"), - []byte("organizationsv1"), - []byte("organizationindexv1"), - taskBucket, - taskRunBucket, - taskIndexBucket, - []byte("userspasswordv1"), - scrapersBucket, - []byte("secretsv1"), - []byte("telegrafv1"), - []byte("telegrafPluginsv1"), - []byte("userresourcemappingsv1"), - []byte("notificationRulev1"), - []byte("usersv1"), - []byte("userindexv1"), - sourceBucket, - // these are the "document" (aka templates) key prefixes - []byte("templates/documents/content"), - []byte("templates/documents/meta"), - // store base backed services - []byte("checksv1"), - []byte("checkindexv1"), - []byte("notificationEndpointv1"), - []byte("notificationEndpointIndexv1"), - variableBucket, - variableIndexBucket, - variableOrgsIndex, - // deprecated: removed in later migration - []byte("sessionsv1"), - } { - if err := store.CreateBucket(ctx, bucket); err != nil { - return err - } - } - - // seed initial sources (default source) - return store.Update(ctx, func(tx Tx) error { - return putAsJson(tx, sourceBucket, DefaultSource.ID, DefaultSource) - }) -} - -// Down is a no operation required for service to be used as a migration -func (m InitialMigration) Down(ctx context.Context, store SchemaStore) error { - return nil -} - -func putAsJson(tx Tx, bucket []byte, id platform.ID, value interface{}) error { - data, err := json.Marshal(value) - if err != nil { - return err - } - - encodedID, err := id.Encode() - if err != nil { - return err - } - - b, err := tx.Bucket(bucket) - if err != nil { - return err - } - - return b.Put(encodedID, data) -} diff --git a/kv/kvlog.go b/kv/kvlog.go deleted file mode 100644 index 5e73f7b0cac..00000000000 --- a/kv/kvlog.go +++ /dev/null @@ -1,370 +0,0 @@ -package kv - -import ( - "bytes" - "context" - "crypto/sha1" - "encoding/binary" - "encoding/json" - "fmt" - "time" - - platform "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - kvlogBucket = []byte("keyvaluelogv1") - kvlogIndex = []byte("keyvaluelogindexv1") - - // ErrKeyValueLogBoundsNotFound is returned when oplog entries cannot be located - // for the provided bounds - ErrKeyValueLogBoundsNotFound = &errors.Error{ - Code: errors.ENotFound, - Msg: "oplog not found", - } -) - -var _ platform.KeyValueLog = (*Service)(nil) - -type keyValueLogBounds struct { - Start int64 `json:"start"` - Stop int64 `json:"stop"` -} - -func newKeyValueLogBounds(now time.Time) *keyValueLogBounds { - return &keyValueLogBounds{ - Start: now.UTC().UnixNano(), - Stop: now.UTC().UnixNano(), - } -} - -func (b *keyValueLogBounds) update(t time.Time) { - now := t.UTC().UnixNano() - if now < b.Start { - b.Start = now - } else if b.Stop < now { - b.Stop = now - } -} - -// StartTime retrieves the start value of a bounds as a time.Time -func (b *keyValueLogBounds) StartTime() time.Time { - return time.Unix(0, b.Start) -} - -// StopTime retrieves the stop value of a bounds as a time.Time -func (b *keyValueLogBounds) StopTime() time.Time { - return time.Unix(0, b.Stop) -} - -// Bounds returns the key boundaries for the keyvaluelog for a resourceType/resourceID pair. -func (b *keyValueLogBounds) Bounds(k []byte) ([]byte, []byte, error) { - start, err := encodeLogEntryKey(k, b.Start) - if err != nil { - return nil, nil, err - } - stop, err := encodeLogEntryKey(k, b.Stop) - if err != nil { - return nil, nil, err - } - return start, stop, nil -} - -func encodeLogEntryKey(key []byte, v int64) ([]byte, error) { - prefix := encodeKeyValueIndexKey(key) - k := make([]byte, len(prefix)+8) - - buf := bytes.NewBuffer(k) - _, err := buf.Write(prefix) - if err != nil { - return nil, err - } - - // This needs to be big-endian so that the iteration order is preserved when scanning keys - if err := binary.Write(buf, binary.BigEndian, v); err != nil { - return nil, err - } - return buf.Bytes(), err -} - -func decodeLogEntryKey(key []byte) ([]byte, time.Time, error) { - buf := bytes.NewReader(key[len(key)-8:]) - var ts int64 - // This needs to be big-endian so that the iteration order is preserved when scanning keys - err := binary.Read(buf, binary.BigEndian, &ts) - if err != nil { - return nil, time.Unix(0, 0), err - } - return key[:len(key)-8], time.Unix(0, ts), nil -} - -func encodeKeyValueIndexKey(k []byte) []byte { - // keys produced must be fixed length to ensure that we can iterate through the keyspace without any error. - h := sha1.New() - h.Write([]byte(k)) - return h.Sum(nil) -} - -func (s *Service) getKeyValueLogBounds(ctx context.Context, tx Tx, key []byte) (*keyValueLogBounds, error) { - k := encodeKeyValueIndexKey(key) - - b, err := tx.Bucket(kvlogIndex) - if err != nil { - return nil, err - } - - v, err := b.Get(k) - if IsNotFound(err) { - return nil, ErrKeyValueLogBoundsNotFound - } - - if err != nil { - return nil, err - } - - bounds := &keyValueLogBounds{} - if err := json.Unmarshal(v, bounds); err != nil { - return nil, err - } - - return bounds, nil -} - -func (s *Service) putKeyValueLogBounds(ctx context.Context, tx Tx, key []byte, bounds *keyValueLogBounds) error { - k := encodeKeyValueIndexKey(key) - - v, err := json.Marshal(bounds) - if err != nil { - return err - } - - b, err := tx.Bucket(kvlogIndex) - if err != nil { - return err - } - - if err := b.Put(k, v); err != nil { - return err - } - - return nil -} - -func (s *Service) updateKeyValueLogBounds(ctx context.Context, tx Tx, k []byte, t time.Time) error { - // retrieve the keyValue log boundaries - bounds, err := s.getKeyValueLogBounds(ctx, tx, k) - if err != nil && err != ErrKeyValueLogBoundsNotFound { - return err - } - - if err == ErrKeyValueLogBoundsNotFound { - // if the bounds don't exist yet, create them - bounds = newKeyValueLogBounds(t) - } - - // update the bounds to if needed - bounds.update(t) - if err := s.putKeyValueLogBounds(ctx, tx, k, bounds); err != nil { - return err - } - - return nil -} - -// ForEachLogEntry retrieves the keyValue log for a resource type ID combination. KeyValues may be returned in ascending and descending order. -func (s *Service) ForEachLogEntry(ctx context.Context, k []byte, opts platform.FindOptions, fn func([]byte, time.Time) error) error { - return s.kv.View(ctx, func(tx Tx) error { - return s.ForEachLogEntryTx(ctx, tx, k, opts, fn) - }) -} - -func (s *Service) ForEachLogEntryTx(ctx context.Context, tx Tx, k []byte, opts platform.FindOptions, fn func([]byte, time.Time) error) error { - b, err := s.getKeyValueLogBounds(ctx, tx, k) - if err != nil { - return err - } - - bkt, err := tx.Bucket(kvlogBucket) - if err != nil { - return err - } - - startKey, stopKey, err := b.Bounds(k) - if err != nil { - return err - } - - direction := CursorAscending - if opts.Descending { - direction = CursorDescending - startKey, stopKey = stopKey, startKey - } - cur, err := bkt.ForwardCursor(startKey, WithCursorDirection(direction)) - if err != nil { - return err - } - - count := 0 - - if opts.Offset > 0 { - // Skip offset many items - for i := 0; i < opts.Offset; i++ { - k, _ := cur.Next() - if bytes.Equal(k, stopKey) { - return nil - } - } - } - - for k, v := cur.Next(); k != nil; k, v = cur.Next() { - - if count >= opts.Limit && opts.Limit != 0 { - break - } - - _, ts, err := decodeLogEntryKey(k) - if err != nil { - return err - } - - if err := fn(v, ts); err != nil { - return err - } - - if bytes.Equal(k, stopKey) { - // if we've reached the stop key, there are no keys log entries left - // in the keyspace. - break - } - - count++ - } - - return nil - -} - -// AddLogEntry logs an keyValue for a particular resource type ID pairing. -func (s *Service) AddLogEntry(ctx context.Context, k, v []byte, t time.Time) error { - return s.kv.Update(ctx, func(tx Tx) error { - return s.AddLogEntryTx(ctx, tx, k, v, t) - }) -} - -func (s *Service) AddLogEntryTx(ctx context.Context, tx Tx, k, v []byte, t time.Time) error { - if err := s.updateKeyValueLogBounds(ctx, tx, k, t); err != nil { - return err - } - - if err := s.putLogEntry(ctx, tx, k, v, t); err != nil { - return err - } - - return nil -} - -func (s *Service) putLogEntry(ctx context.Context, tx Tx, k, v []byte, t time.Time) error { - key, err := encodeLogEntryKey(k, t.UTC().UnixNano()) - if err != nil { - return err - } - - b, err := tx.Bucket(kvlogBucket) - if err != nil { - return err - } - - if err := b.Put(key, v); err != nil { - return err - } - - return nil -} - -func (s *Service) getLogEntry(ctx context.Context, tx Tx, k []byte, t time.Time) ([]byte, time.Time, error) { - key, err := encodeLogEntryKey(k, t.UTC().UnixNano()) - if err != nil { - return nil, t, err - } - - b, err := tx.Bucket(kvlogBucket) - if err != nil { - return nil, t, err - } - - v, err := b.Get(key) - if IsNotFound(err) { - return nil, t, fmt.Errorf("log entry not found") - } - - if err != nil { - return nil, t, err - } - - return v, t, nil -} - -// FirstLogEntry retrieves the first log entry for a key value log. -func (s *Service) FirstLogEntry(ctx context.Context, k []byte) ([]byte, time.Time, error) { - var v []byte - var t time.Time - - err := s.kv.View(ctx, func(tx Tx) error { - val, ts, err := s.firstLogEntry(ctx, tx, k) - if err != nil { - return err - } - - v, t = val, ts - - return nil - }) - - if err != nil { - return nil, t, err - } - - return v, t, nil -} - -// LastLogEntry retrieves the first log entry for a key value log. -func (s *Service) LastLogEntry(ctx context.Context, k []byte) ([]byte, time.Time, error) { - var v []byte - var t time.Time - - err := s.kv.View(ctx, func(tx Tx) error { - val, ts, err := s.lastLogEntry(ctx, tx, k) - if err != nil { - return err - } - - v, t = val, ts - - return nil - }) - - if err != nil { - return nil, t, err - } - - return v, t, nil -} - -func (s *Service) firstLogEntry(ctx context.Context, tx Tx, k []byte) ([]byte, time.Time, error) { - bounds, err := s.getKeyValueLogBounds(ctx, tx, k) - if err != nil { - return nil, time.Time{}, err - } - - return s.getLogEntry(ctx, tx, k, bounds.StartTime()) -} - -func (s *Service) lastLogEntry(ctx context.Context, tx Tx, k []byte) ([]byte, time.Time, error) { - bounds, err := s.getKeyValueLogBounds(ctx, tx, k) - if err != nil { - return nil, time.Time{}, err - } - - return s.getLogEntry(ctx, tx, k, bounds.StopTime()) -} diff --git a/kv/kvlog_test.go b/kv/kvlog_test.go deleted file mode 100644 index 094df5f23a6..00000000000 --- a/kv/kvlog_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package kv_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/tenant" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func TestBoltKeyValueLog(t *testing.T) { - influxdbtesting.KeyValueLog(initBoltKeyValueLog, t) -} - -func initBoltKeyValueLog(f influxdbtesting.KeyValueLogFields, t *testing.T) (influxdb.KeyValueLog, func()) { - s, closeBolt := influxdbtesting.NewTestBoltStore(t) - svc, closeSvc := initKeyValueLog(s, f, t) - return svc, func() { - closeSvc() - closeBolt() - } -} - -func initKeyValueLog(s kv.SchemaStore, f influxdbtesting.KeyValueLogFields, t *testing.T) (influxdb.KeyValueLog, func()) { - ctx := context.Background() - svc := kv.NewService(zaptest.NewLogger(t), s, tenant.NewService(tenant.NewStore(s))) - - for _, e := range f.LogEntries { - if err := svc.AddLogEntry(ctx, e.Key, e.Value, e.Time); err != nil { - t.Fatalf("failed to populate log entries") - } - } - return svc, func() { - } -} diff --git a/kv/migration/all/0001_initial_migration.go b/kv/migration/all/0001_initial_migration.go deleted file mode 100644 index 2d7ec4d3384..00000000000 --- a/kv/migration/all/0001_initial_migration.go +++ /dev/null @@ -1,6 +0,0 @@ -package all - -import "github.com/influxdata/influxdb/v2/kv" - -// Migration0001_InitialMigration contains all the buckets created before the time of migrations in kv -var Migration0001_InitialMigration = kv.InitialMigration{} diff --git a/kv/migration/all/0002_urm_by_user_index.go b/kv/migration/all/0002_urm_by_user_index.go deleted file mode 100644 index 12c29cf9ee3..00000000000 --- a/kv/migration/all/0002_urm_by_user_index.go +++ /dev/null @@ -1,9 +0,0 @@ -package all - -import ( - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/tenant/index" -) - -// Migration0002_AddURMByUserIndex creates the URM by user index and populates missing entries based on the source. -var Migration0002_AddURMByUserIndex = kv.NewIndexMigration(index.URMByUserIndexMapping, kv.WithIndexMigrationCleanup) diff --git a/kv/migration/all/0003_task_owners.go b/kv/migration/all/0003_task_owners.go deleted file mode 100644 index aa3d98a5ac7..00000000000 --- a/kv/migration/all/0003_task_owners.go +++ /dev/null @@ -1,215 +0,0 @@ -package all - -import ( - "context" - "encoding/json" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -var taskBucket = []byte("tasksv1") - -// Migration0003_TaskOwnerIDUpMigration adds missing owner IDs to some legacy tasks -var Migration0003_TaskOwnerIDUpMigration = UpOnlyMigration( - "migrate task owner id", - func(ctx context.Context, store kv.SchemaStore) error { - var ownerlessTasks []*taskmodel.Task - // loop through the tasks and collect a set of tasks that are missing the owner id. - err := store.View(ctx, func(tx kv.Tx) error { - taskBucket, err := tx.Bucket(taskBucket) - if err != nil { - return taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - c, err := taskBucket.ForwardCursor([]byte{}) - if err != nil { - return taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - for k, v := c.Next(); k != nil; k, v = c.Next() { - kvTask := &kvTask{} - if err := json.Unmarshal(v, kvTask); err != nil { - return taskmodel.ErrInternalTaskServiceError(err) - } - - t := kvToInfluxTask(kvTask) - - if !t.OwnerID.Valid() { - ownerlessTasks = append(ownerlessTasks, t) - } - } - if err := c.Err(); err != nil { - return err - } - - return c.Close() - }) - if err != nil { - return err - } - - // loop through tasks - for _, t := range ownerlessTasks { - // open transaction - err := store.Update(ctx, func(tx kv.Tx) error { - taskKey, err := taskKey(t.ID) - if err != nil { - return err - } - b, err := tx.Bucket(taskBucket) - if err != nil { - return taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - if !t.OwnerID.Valid() { - v, err := b.Get(taskKey) - if kv.IsNotFound(err) { - return taskmodel.ErrTaskNotFound - } - authType := struct { - AuthorizationID platform.ID `json:"authorizationID"` - }{} - if err := json.Unmarshal(v, &authType); err != nil { - return taskmodel.ErrInternalTaskServiceError(err) - } - - // try populating the owner from auth - encodedID, err := authType.AuthorizationID.Encode() - if err == nil { - authBucket, err := tx.Bucket([]byte("authorizationsv1")) - if err != nil { - return err - } - - a, err := authBucket.Get(encodedID) - if err == nil { - auth := &influxdb.Authorization{} - if err := json.Unmarshal(a, auth); err != nil { - return err - } - - t.OwnerID = auth.GetUserID() - } - } - - } - - // try populating owner from urm - if !t.OwnerID.Valid() { - b, err := tx.Bucket([]byte("userresourcemappingsv1")) - if err != nil { - return err - } - - id, err := t.OrganizationID.Encode() - if err != nil { - return err - } - - cur, err := b.ForwardCursor(id, kv.WithCursorPrefix(id)) - if err != nil { - return err - } - - for k, v := cur.Next(); k != nil; k, v = cur.Next() { - m := &influxdb.UserResourceMapping{} - if err := json.Unmarshal(v, m); err != nil { - return err - } - if m.ResourceID == t.OrganizationID && m.ResourceType == influxdb.OrgsResourceType && m.UserType == influxdb.Owner { - t.OwnerID = m.UserID - break - } - } - - if err := cur.Close(); err != nil { - return err - } - } - - // if population fails return error - if !t.OwnerID.Valid() { - return &errors.Error{ - Code: errors.EInternal, - Msg: "could not populate owner ID for task", - } - } - - // save task - taskBytes, err := json.Marshal(t) - if err != nil { - return taskmodel.ErrInternalTaskServiceError(err) - } - - err = b.Put(taskKey, taskBytes) - if err != nil { - return taskmodel.ErrUnexpectedTaskBucketErr(err) - } - return nil - }) - if err != nil { - return err - } - } - return nil - }, -) - -type kvTask struct { - ID platform.ID `json:"id"` - Type string `json:"type,omitempty"` - OrganizationID platform.ID `json:"orgID"` - Organization string `json:"org"` - OwnerID platform.ID `json:"ownerID"` - Name string `json:"name"` - Description string `json:"description,omitempty"` - Status string `json:"status"` - Flux string `json:"flux"` - Every string `json:"every,omitempty"` - Cron string `json:"cron,omitempty"` - LastRunStatus string `json:"lastRunStatus,omitempty"` - LastRunError string `json:"lastRunError,omitempty"` - Offset influxdb.Duration `json:"offset,omitempty"` - LatestCompleted time.Time `json:"latestCompleted,omitempty"` - LatestScheduled time.Time `json:"latestScheduled,omitempty"` - CreatedAt time.Time `json:"createdAt,omitempty"` - UpdatedAt time.Time `json:"updatedAt,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` -} - -func kvToInfluxTask(k *kvTask) *taskmodel.Task { - return &taskmodel.Task{ - ID: k.ID, - Type: k.Type, - OrganizationID: k.OrganizationID, - Organization: k.Organization, - OwnerID: k.OwnerID, - Name: k.Name, - Description: k.Description, - Status: k.Status, - Flux: k.Flux, - Every: k.Every, - Cron: k.Cron, - LastRunStatus: k.LastRunStatus, - LastRunError: k.LastRunError, - Offset: k.Offset.Duration, - LatestCompleted: k.LatestCompleted, - LatestScheduled: k.LatestScheduled, - CreatedAt: k.CreatedAt, - UpdatedAt: k.UpdatedAt, - Metadata: k.Metadata, - } -} - -func taskKey(taskID platform.ID) ([]byte, error) { - encodedID, err := taskID.Encode() - if err != nil { - return nil, taskmodel.ErrInvalidTaskID - } - return encodedID, nil -} diff --git a/kv/migration/all/0003_task_owners_test.go b/kv/migration/all/0003_task_owners_test.go deleted file mode 100644 index fe22c1c4d81..00000000000 --- a/kv/migration/all/0003_task_owners_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package all - -import ( - "context" - "fmt" - "testing" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" -) - -func Test_(t *testing.T) { - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - - ts := newService(t, ctx, 2) - - taskBucket := []byte("tasksv1") - id := "05da585043e02000" - // create a task that has auth set and no ownerID - err := ts.Store.Update(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket(taskBucket) - if err != nil { - t.Fatal(err) - } - taskBody := fmt.Sprintf(`{"id":"05da585043e02000","type":"system","orgID":"05d3ae3492c9c000","org":"whos","authorizationID":"%s","name":"asdf","status":"active","flux":"option v = {\n bucket: \"bucks\",\n timeRangeStart: -1h,\n timeRangeStop: now()\n}\n\noption task = { \n name: \"asdf\",\n every: 5m,\n}\n\nfrom(bucket: \"_monitoring\")\n |\u003e range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |\u003e filter(fn: (r) =\u003e r[\"_measurement\"] == \"boltdb_reads_total\")\n |\u003e filter(fn: (r) =\u003e r[\"_field\"] == \"counter\")\n |\u003e to(bucket: \"bucks\", org: \"whos\")","every":"5m","latestCompleted":"2020-06-16T17:01:26.083319Z","latestScheduled":"2020-06-16T17:01:26.083319Z","lastRunStatus":"success","createdAt":"2020-06-15T19:10:29Z","updatedAt":"0001-01-01T00:00:00Z"}`, ts.Auth.ID.String()) - err = b.Put([]byte(id), []byte(taskBody)) - - if err != nil { - t.Fatal(err) - } - return nil - }) - if err != nil { - t.Fatal(err) - } - - err = Migration0003_TaskOwnerIDUpMigration.Up(context.Background(), ts.Store) - if err != nil { - t.Fatal(err) - } - - idType, _ := platform.IDFromString(id) - task, err := ts.Service.FindTaskByID(context.Background(), *idType) - if err != nil { - t.Fatal(err) - } - if task.OwnerID != ts.User.ID { - t.Fatal("failed to fill in ownerID") - } - - // create a task that has no auth or owner id but a urm exists - err = ts.Store.Update(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket([]byte("tasksv1")) - if err != nil { - t.Fatal(err) - } - taskBody := fmt.Sprintf(`{"id":"05da585043e02000","type":"system","orgID":"%s","org":"whos","name":"asdf","status":"active","flux":"option v = {\n bucket: \"bucks\",\n timeRangeStart: -1h,\n timeRangeStop: now()\n}\n\noption task = { \n name: \"asdf\",\n every: 5m,\n}\n\nfrom(bucket: \"_monitoring\")\n |\u003e range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |\u003e filter(fn: (r) =\u003e r[\"_measurement\"] == \"boltdb_reads_total\")\n |\u003e filter(fn: (r) =\u003e r[\"_field\"] == \"counter\")\n |\u003e to(bucket: \"bucks\", org: \"whos\")","every":"5m","latestCompleted":"2020-06-16T17:01:26.083319Z","latestScheduled":"2020-06-16T17:01:26.083319Z","lastRunStatus":"success","createdAt":"2020-06-15T19:10:29Z","updatedAt":"0001-01-01T00:00:00Z"}`, ts.Org.ID.String()) - err = b.Put([]byte(id), []byte(taskBody)) - if err != nil { - t.Fatal(err) - } - - return nil - }) - if err != nil { - t.Fatal(err) - } - - err = Migration0003_TaskOwnerIDUpMigration.Up(context.Background(), ts.Store) - if err != nil { - t.Fatal(err) - } - - task, err = ts.Service.FindTaskByID(context.Background(), *idType) - if err != nil { - t.Fatal(err) - } - if task.OwnerID != ts.User.ID { - t.Fatal("failed to fill in ownerID") - } -} diff --git a/kv/migration/all/0004_add_dbrp_buckets.go b/kv/migration/all/0004_add_dbrp_buckets.go deleted file mode 100644 index e3cd51a0781..00000000000 --- a/kv/migration/all/0004_add_dbrp_buckets.go +++ /dev/null @@ -1,17 +0,0 @@ -package all - -import "github.com/influxdata/influxdb/v2/kv/migration" - -var ( - dbrpBucket = []byte("dbrpv1") - dbrpIndexBucket = []byte("dbrpbyorganddbindexv1") - dbrpDefaultBucket = []byte("dbrpdefaultv1") -) - -// Migration0004_AddDbrpBuckets creates the buckets necessary for the DBRP Service to operate. -var Migration0004_AddDbrpBuckets = migration.CreateBuckets( - "create DBRP buckets", - dbrpBucket, - dbrpIndexBucket, - dbrpDefaultBucket, -) diff --git a/kv/migration/all/0005_add_pkger_buckets.go b/kv/migration/all/0005_add_pkger_buckets.go deleted file mode 100644 index 2359478d7f3..00000000000 --- a/kv/migration/all/0005_add_pkger_buckets.go +++ /dev/null @@ -1,15 +0,0 @@ -package all - -import "github.com/influxdata/influxdb/v2/kv/migration" - -var ( - pkgerStacksBucket = []byte("v1_pkger_stacks") - pkgerStackIndexBucket = []byte("v1_pkger_stacks_index") -) - -// Migration0005_AddPkgerBuckets creates the buckets necessary for the pkger service to operate. -var Migration0005_AddPkgerBuckets = migration.CreateBuckets( - "create pkger stacks buckets", - pkgerStacksBucket, - pkgerStackIndexBucket, -) diff --git a/kv/migration/all/0006_delete-bucket-sessionsv1.go b/kv/migration/all/0006_delete-bucket-sessionsv1.go deleted file mode 100644 index 8d83ea9af00..00000000000 --- a/kv/migration/all/0006_delete-bucket-sessionsv1.go +++ /dev/null @@ -1,7 +0,0 @@ -package all - -import "github.com/influxdata/influxdb/v2/kv/migration" - -// Migration0006_DeleteBucketSessionsv1 removes the sessionsv1 bucket -// from the backing kv store. -var Migration0006_DeleteBucketSessionsv1 = migration.DeleteBuckets("delete sessionsv1 bucket", []byte("sessionsv1")) diff --git a/kv/migration/all/0007_CreateMetaDataBucket.go b/kv/migration/all/0007_CreateMetaDataBucket.go deleted file mode 100644 index 3f37ca7ddb4..00000000000 --- a/kv/migration/all/0007_CreateMetaDataBucket.go +++ /dev/null @@ -1,10 +0,0 @@ -package all - -import ( - "github.com/influxdata/influxdb/v2/kv/migration" - "github.com/influxdata/influxdb/v2/v1/services/meta" -) - -var Migration0007_CreateMetaDataBucket = migration.CreateBuckets( - "Create TSM metadata buckets", - meta.BucketName) diff --git a/kv/migration/all/0008_LegacyAuthBuckets.go b/kv/migration/all/0008_LegacyAuthBuckets.go deleted file mode 100644 index 98bb7586e28..00000000000 --- a/kv/migration/all/0008_LegacyAuthBuckets.go +++ /dev/null @@ -1,9 +0,0 @@ -package all - -import ( - "github.com/influxdata/influxdb/v2/kv/migration" -) - -var Migration0008_LegacyAuthBuckets = migration.CreateBuckets( - "Create Legacy authorization buckets", - []byte("legacy/authorizationsv1"), []byte("legacy/authorizationindexv1")) diff --git a/kv/migration/all/0009_LegacyAuthPasswordBuckets.go b/kv/migration/all/0009_LegacyAuthPasswordBuckets.go deleted file mode 100644 index e79c3b59d81..00000000000 --- a/kv/migration/all/0009_LegacyAuthPasswordBuckets.go +++ /dev/null @@ -1,7 +0,0 @@ -package all - -import "github.com/influxdata/influxdb/v2/kv/migration" - -var Migration0009_LegacyAuthPasswordBuckets = migration.CreateBuckets( - "Create legacy auth password bucket", - []byte("legacy/authorizationPasswordv1")) diff --git a/kv/migration/all/0010_add-index-telegraf-by-org.go b/kv/migration/all/0010_add-index-telegraf-by-org.go deleted file mode 100644 index 9e904cdde38..00000000000 --- a/kv/migration/all/0010_add-index-telegraf-by-org.go +++ /dev/null @@ -1,9 +0,0 @@ -package all - -import ( - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/telegraf" -) - -// Migration0010_AddIndexTelegrafByOrg adds the index telegraf configs by organization ID -var Migration0010_AddIndexTelegrafByOrg = kv.NewIndexMigration(telegraf.ByOrganizationIndexMapping, kv.WithIndexMigrationCleanup) diff --git a/kv/migration/all/0011_populate-dashboards-owner-id.go b/kv/migration/all/0011_populate-dashboards-owner-id.go deleted file mode 100644 index 22559c36a96..00000000000 --- a/kv/migration/all/0011_populate-dashboards-owner-id.go +++ /dev/null @@ -1,127 +0,0 @@ -package all - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" -) - -// Migration0011_PopulateDashboardsOwnerId backfills owner IDs on dashboards based on the presence of user resource mappings -var Migration0011_PopulateDashboardsOwnerId = UpOnlyMigration("populate dashboards owner id", func(ctx context.Context, store kv.SchemaStore) error { - var urmBucket = []byte("userresourcemappingsv1") - type userResourceMapping struct { - UserID platform.ID `json:"userID"` - UserType influxdb.UserType `json:"userType"` - MappingType influxdb.MappingType `json:"mappingType"` - ResourceType influxdb.ResourceType `json:"resourceType"` - ResourceID platform.ID `json:"resourceID"` - } - - var mappings []*userResourceMapping - if err := store.View(ctx, func(tx kv.Tx) error { - bkt, err := tx.Bucket(urmBucket) - if err != nil { - return err - } - - cursor, err := bkt.ForwardCursor(nil) - if err != nil { - return err - } - - // collect all dashboard mappings - return kv.WalkCursor(ctx, cursor, func(_, v []byte) (bool, error) { - var mapping userResourceMapping - if err := json.Unmarshal(v, &mapping); err != nil { - return false, err - } - - // we're interesting in dashboard owners - if mapping.ResourceType == influxdb.DashboardsResourceType && - mapping.UserType == influxdb.Owner { - mappings = append(mappings, &mapping) - } - - return true, nil - }) - }); err != nil { - return err - } - - var dashboardsBucket = []byte("dashboardsv2") - // dashboard represents all visual and query data for a dashboard. - type dashboard struct { - ID platform.ID `json:"id,omitempty"` - OrganizationID platform.ID `json:"orgID,omitempty"` - Name string `json:"name"` - Description string `json:"description"` - Cells []*influxdb.Cell `json:"cells"` - Meta influxdb.DashboardMeta `json:"meta"` - OwnerID *platform.ID `json:"owner,omitempty"` - } - - var ( - batchSize = 100 - flush = func(batch []*userResourceMapping) (err error) { - ids := make([][]byte, len(batch)) - for i, urm := range batch { - ids[i], err = urm.ResourceID.Encode() - if err != nil { - return - } - } - - return store.Update(ctx, func(tx kv.Tx) error { - bkt, err := tx.Bucket(dashboardsBucket) - if err != nil { - return err - } - - values, err := bkt.GetBatch(ids...) - if err != nil { - return err - } - - for i, value := range values { - var dashboard dashboard - if err := json.Unmarshal(value, &dashboard); err != nil { - return err - } - - if dashboard.OwnerID != nil { - fmt.Printf("dashboard %q already has owner %q", dashboard.ID, dashboard.OwnerID) - continue - } - - // update bucket owner to owner dashboard urm mapping user target - dashboard.OwnerID = &batch[i].UserID - - updated, err := json.Marshal(dashboard) - if err != nil { - return err - } - - // update bucket entry - return bkt.Put(ids[i], updated) - } - - return nil - }) - } - ) - - for i := 0; i < len(mappings); i += batchSize { - end := i + batchSize - if end > len(mappings) { - end = len(mappings) - } - - flush(mappings[i:end]) - } - - return nil -}) diff --git a/kv/migration/all/0012_dbrp_by_org_index.go b/kv/migration/all/0012_dbrp_by_org_index.go deleted file mode 100644 index 421c3bfceb9..00000000000 --- a/kv/migration/all/0012_dbrp_by_org_index.go +++ /dev/null @@ -1,8 +0,0 @@ -package all - -import ( - "github.com/influxdata/influxdb/v2/dbrp" - "github.com/influxdata/influxdb/v2/kv" -) - -var Migration0012_DBRPByOrgIndex = kv.NewIndexMigration(dbrp.ByOrgIDIndexMapping, kv.WithIndexMigrationCleanup) diff --git a/kv/migration/all/0013_repair-DBRP-owner-and-bucket-IDs.go b/kv/migration/all/0013_repair-DBRP-owner-and-bucket-IDs.go deleted file mode 100644 index 6a996d42e40..00000000000 --- a/kv/migration/all/0013_repair-DBRP-owner-and-bucket-IDs.go +++ /dev/null @@ -1,127 +0,0 @@ -package all - -import ( - "context" - "encoding/json" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" -) - -var Migration0013_RepairDBRPOwnerAndBucketIDs = UpOnlyMigration( - "repair DBRP owner and bucket IDs", - func(ctx context.Context, store kv.SchemaStore) error { - type oldStyleMapping struct { - ID platform.ID `json:"id"` - Database string `json:"database"` - RetentionPolicy string `json:"retention_policy"` - Default bool `json:"default"` - - // These 2 fields were renamed. - OrganizationID platform.ID `json:"organization_id"` - BucketID platform.ID `json:"bucket_id"` - } - - // Collect DBRPs that are using the old schema. - var mappings []*oldStyleMapping - if err := store.View(ctx, func(tx kv.Tx) error { - bkt, err := tx.Bucket(dbrpBucket) - if err != nil { - return err - } - - cursor, err := bkt.ForwardCursor(nil) - if err != nil { - return err - } - - return kv.WalkCursor(ctx, cursor, func(_, v []byte) (bool, error) { - var mapping oldStyleMapping - if err := json.Unmarshal(v, &mapping); err != nil { - return false, err - } - - // DBRPs that are already stored in the new schema will end up with - // invalid (zero) values for the 2 ID fields when unmarshalled using - // the old JSON schema. - if mapping.OrganizationID.Valid() && mapping.BucketID.Valid() { - mappings = append(mappings, &mapping) - } - - return true, nil - }) - }); err != nil { - return err - } - - type newStyleDbrpMapping struct { - ID platform.ID `json:"id"` - Database string `json:"database"` - RetentionPolicy string `json:"retention_policy"` - Default bool `json:"default"` - - // New names for the 2 renamed fields. - OrganizationID platform.ID `json:"orgID"` - BucketID platform.ID `json:"bucketID"` - } - batchSize := 100 - writeBatch := func(batch []*oldStyleMapping) (err error) { - ids := make([][]byte, len(batch)) - for i, mapping := range batch { - ids[i], err = mapping.ID.Encode() - if err != nil { - return - } - } - - return store.Update(ctx, func(tx kv.Tx) error { - bkt, err := tx.Bucket(dbrpBucket) - if err != nil { - return err - } - - values, err := bkt.GetBatch(ids...) - if err != nil { - return err - } - - for i, value := range values { - var mapping newStyleDbrpMapping - if err := json.Unmarshal(value, &mapping); err != nil { - return err - } - - if !mapping.OrganizationID.Valid() { - mapping.OrganizationID = batch[i].OrganizationID - } - if !mapping.BucketID.Valid() { - mapping.BucketID = batch[i].BucketID - } - - updated, err := json.Marshal(mapping) - if err != nil { - return err - } - - if err := bkt.Put(ids[i], updated); err != nil { - return err - } - } - - return nil - }) - } - - for i := 0; i < len(mappings); i += batchSize { - end := i + batchSize - if end > len(mappings) { - end = len(mappings) - } - if err := writeBatch(mappings[i:end]); err != nil { - return err - } - } - - return nil - }, -) diff --git a/kv/migration/all/0014_reindex-DBRPs.go b/kv/migration/all/0014_reindex-DBRPs.go deleted file mode 100644 index 658c57d4b10..00000000000 --- a/kv/migration/all/0014_reindex-DBRPs.go +++ /dev/null @@ -1,8 +0,0 @@ -package all - -import ( - "github.com/influxdata/influxdb/v2/dbrp" - "github.com/influxdata/influxdb/v2/kv" -) - -var Migration0014_ReindexDBRPs = kv.NewIndexMigration(dbrp.ByOrgIDIndexMapping) diff --git a/kv/migration/all/0015_record-shard-group-durations-in-bucket-metadata.go b/kv/migration/all/0015_record-shard-group-durations-in-bucket-metadata.go deleted file mode 100644 index 98f05e9b494..00000000000 --- a/kv/migration/all/0015_record-shard-group-durations-in-bucket-metadata.go +++ /dev/null @@ -1,121 +0,0 @@ -package all - -import ( - "context" - "encoding/json" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/v1/services/meta" -) - -func repairMissingShardGroupDurations(ctx context.Context, store kv.SchemaStore) error { - type bucket struct { - ID platform.ID `json:"id,omitempty"` - OrgID platform.ID `json:"orgID,omitempty"` - Type int `json:"type"` - Name string `json:"name"` - Description string `json:"description"` - RetentionPolicyName string `json:"rp,omitempty"` // This to support v1 sources - RetentionPeriod time.Duration `json:"retentionPeriod"` - CreatedAt time.Time `json:"createdAt"` - UpdatedAt time.Time `json:"updatedAt"` - - // This is expected to be 0 for all buckets created before - // we began tracking it in metadata. - ShardGroupDuration time.Duration `json:"shardGroupDuration"` - } - bucketBucket := []byte("bucketsv1") - - // Collect buckets that need to be updated - var buckets []*bucket - if err := store.View(ctx, func(tx kv.Tx) error { - bkt, err := tx.Bucket(bucketBucket) - if err != nil { - return err - } - - cursor, err := bkt.ForwardCursor(nil) - if err != nil { - return err - } - - return kv.WalkCursor(ctx, cursor, func(_, v []byte) (bool, error) { - var b bucket - if err := json.Unmarshal(v, &b); err != nil { - return false, err - } - if b.ShardGroupDuration == 0 { - buckets = append(buckets, &b) - } - - return true, nil - }) - }); err != nil { - return err - } - - batchSize := 100 - writeBatch := func(batch []*bucket) (err error) { - ids := make([][]byte, len(batch)) - for i, b := range batch { - ids[i], err = b.ID.Encode() - if err != nil { - return err - } - } - - return store.Update(ctx, func(tx kv.Tx) error { - bkt, err := tx.Bucket(bucketBucket) - if err != nil { - return err - } - - values, err := bkt.GetBatch(ids...) - if err != nil { - return err - } - - for i, value := range values { - var b bucket - if err := json.Unmarshal(value, &b); err != nil { - return err - } - - if b.ShardGroupDuration == 0 { - // Backfill the duration using the same method used - // to derive the value within the storage engine. - b.ShardGroupDuration = meta.NormalisedShardDuration(0, b.RetentionPeriod) - } - - updated, err := json.Marshal(b) - if err != nil { - return err - } - if err := bkt.Put(ids[i], updated); err != nil { - return err - } - } - - return nil - }) - } - - for i := 0; i < len(buckets); i += batchSize { - end := i + batchSize - if end > len(buckets) { - end = len(buckets) - } - if err := writeBatch(buckets[i:end]); err != nil { - return err - } - } - - return nil -} - -var Migration0015_RecordShardGroupDurationsInBucketMetadata = UpOnlyMigration( - "record shard group durations in bucket metadata", - repairMissingShardGroupDurations, -) diff --git a/kv/migration/all/0015_record-shard-group-durations-in-bucket-metadata_test.go b/kv/migration/all/0015_record-shard-group-durations-in-bucket-metadata_test.go deleted file mode 100644 index 66dbd40f942..00000000000 --- a/kv/migration/all/0015_record-shard-group-durations-in-bucket-metadata_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package all - -import ( - "context" - "encoding/json" - "testing" - "time" - - "github.com/dustin/go-humanize" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" - "github.com/stretchr/testify/require" -) - -func TestMigration_ShardGroupDuration(t *testing.T) { - testRepairMissingShardGroupDurations(t, 15) -} - -func testRepairMissingShardGroupDurations(t *testing.T, migrationNum int) { - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - - // Run up to the migration before the migration-under-test. - ts := newService(t, ctx, migrationNum-2) - - // Seed some buckets. - buckets := []*influxdb.Bucket{ - { - ID: platform.ID(1), - Name: "infinite", - OrgID: ts.Org.ID, - RetentionPeriod: 0, - }, - { - ID: platform.ID(2), - Name: "1w", - OrgID: ts.Org.ID, - RetentionPeriod: humanize.Week, - }, - { - ID: platform.ID(3), - Name: "1d", - OrgID: ts.Org.ID, - RetentionPeriod: humanize.Day, - }, - { - ID: platform.ID(4), - Name: "1h", - OrgID: ts.Org.ID, - RetentionPeriod: time.Hour, - }, - } - - bucketBucket := []byte("bucketsv1") - ids := make([][]byte, len(buckets)) - err := ts.Store.Update(context.Background(), func(tx kv.Tx) error { - bkt, err := tx.Bucket(bucketBucket) - require.NoError(t, err) - for i, b := range buckets { - js, err := json.Marshal(b) - require.NoError(t, err) - - ids[i], err = b.ID.Encode() - require.NoError(t, err) - require.NoError(t, bkt.Put(ids[i], js)) - } - return nil - }) - require.NoError(t, err) - - // Run the migration-under-test. - require.NoError(t, Migrations[migrationNum-1].Up(context.Background(), ts.Store)) - - // Read the buckets back out of the store. - migratedBuckets := make([]influxdb.Bucket, len(buckets)) - err = ts.Store.View(context.Background(), func(tx kv.Tx) error { - bkt, err := tx.Bucket(bucketBucket) - require.NoError(t, err) - - rawBuckets, err := bkt.GetBatch(ids...) - require.NoError(t, err) - - for i, rawBucket := range rawBuckets { - require.NoError(t, json.Unmarshal(rawBucket, &migratedBuckets[i])) - } - - return nil - }) - require.NoError(t, err) - - // Check that normalized shard-group durations were backfilled. - require.Equal(t, humanize.Week, migratedBuckets[0].ShardGroupDuration) - require.Equal(t, humanize.Day, migratedBuckets[1].ShardGroupDuration) - require.Equal(t, time.Hour, migratedBuckets[2].ShardGroupDuration) - require.Equal(t, time.Hour, migratedBuckets[3].ShardGroupDuration) -} diff --git a/kv/migration/all/0016_add-annotations-notebooks-to-oper-token.go b/kv/migration/all/0016_add-annotations-notebooks-to-oper-token.go deleted file mode 100644 index efac3661b39..00000000000 --- a/kv/migration/all/0016_add-annotations-notebooks-to-oper-token.go +++ /dev/null @@ -1,236 +0,0 @@ -package all - -import ( - "context" - "encoding/json" - "sort" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" -) - -var Migration0016_AddAnnotationsNotebooksToOperToken = &Migration{ - name: "add annotations and notebooks resource types to operator token", - up: migrateTokensMigration( - func(t influxdb.Authorization) bool { - return permListsMatch(preNotebooksAnnotationsOpPerms(), t.Permissions) - }, - func(t *influxdb.Authorization) { - t.Permissions = append(t.Permissions, notebooksAndAnnotationsPerms(0)...) - }, - ), - down: migrateTokensMigration( - func(t influxdb.Authorization) bool { - return permListsMatch(append(preNotebooksAnnotationsOpPerms(), notebooksAndAnnotationsPerms(0)...), t.Permissions) - }, - func(t *influxdb.Authorization) { - newPerms := t.Permissions[:0] - for _, p := range t.Permissions { - switch p.Resource.Type { - case influxdb.AnnotationsResourceType: - case influxdb.NotebooksResourceType: - default: - newPerms = append(newPerms, p) - } - } - t.Permissions = newPerms - }, - ), -} - -func migrateTokensMigration( - checkToken func(influxdb.Authorization) bool, - updateToken func(*influxdb.Authorization), -) func(context.Context, kv.SchemaStore) error { - return func(ctx context.Context, store kv.SchemaStore) error { - authBucket := []byte("authorizationsv1") - - // First find all tokens matching the predicate. - var tokens []influxdb.Authorization - if err := store.View(ctx, func(tx kv.Tx) error { - bkt, err := tx.Bucket(authBucket) - if err != nil { - return err - } - cursor, err := bkt.ForwardCursor(nil) - if err != nil { - return err - } - - return kv.WalkCursor(ctx, cursor, func(_, v []byte) (bool, error) { - var t influxdb.Authorization - if err := json.Unmarshal(v, &t); err != nil { - return false, err - } - if checkToken(t) { - tokens = append(tokens, t) - } - return true, nil - }) - }); err != nil { - return err - } - - // Next, update all the extracted tokens. - for i := range tokens { - updateToken(&tokens[i]) - } - - // Finally, persist the updated tokens back to the DB. - if err := store.Update(ctx, func(tx kv.Tx) error { - bkt, err := tx.Bucket(authBucket) - if err != nil { - return err - } - for _, t := range tokens { - encodedID, err := t.ID.Encode() - if err != nil { - return err - } - v, err := json.Marshal(t) - if err != nil { - return err - } - if err := bkt.Put(encodedID, v); err != nil { - return err - } - } - return nil - }); err != nil { - return err - } - - return nil - } -} - -// notebooksAndAnnotationsPerms returns the list of additional permissions that need added for -// annotations and notebooks. -func notebooksAndAnnotationsPerms(orgID platform.ID) []influxdb.Permission { - resTypes := []influxdb.Resource{ - { - Type: influxdb.AnnotationsResourceType, - }, - { - Type: influxdb.NotebooksResourceType, - }, - } - perms := permListFromResources(resTypes) - if orgID.Valid() { - for i := range perms { - perms[i].Resource.OrgID = &orgID - } - } - return perms -} - -// preNotebooksAnnotationsOpPerms is the list of permissions from a 2.0.x operator token, -// prior to the addition of the notebooks and annotations resource types. -func preNotebooksAnnotationsOpPerms() []influxdb.Permission { - resTypes := []influxdb.Resource{ - { - Type: influxdb.AuthorizationsResourceType, - }, - { - Type: influxdb.BucketsResourceType, - }, - { - Type: influxdb.DashboardsResourceType, - }, - { - Type: influxdb.OrgsResourceType, - }, - { - Type: influxdb.SourcesResourceType, - }, - { - Type: influxdb.TasksResourceType, - }, - { - Type: influxdb.TelegrafsResourceType, - }, - { - Type: influxdb.UsersResourceType, - }, - { - Type: influxdb.VariablesResourceType, - }, - { - Type: influxdb.ScraperResourceType, - }, - { - Type: influxdb.SecretsResourceType, - }, - { - Type: influxdb.LabelsResourceType, - }, - { - Type: influxdb.ViewsResourceType, - }, - { - Type: influxdb.DocumentsResourceType, - }, - { - Type: influxdb.NotificationRuleResourceType, - }, - { - Type: influxdb.NotificationEndpointResourceType, - }, - { - Type: influxdb.ChecksResourceType, - }, - { - Type: influxdb.DBRPResourceType, - }, - } - - return permListFromResources(resTypes) -} - -func permListFromResources(l []influxdb.Resource) []influxdb.Permission { - output := make([]influxdb.Permission, 0, len(l)*2) - for _, r := range l { - output = append(output, []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: r, - }, - { - Action: influxdb.WriteAction, - Resource: r, - }, - }...) - } - - return output -} - -func sortPermList(l []influxdb.Permission) { - sort.Slice(l, func(i, j int) bool { - if l[i].Resource.String() < l[j].Resource.String() { - return true - } - if l[i].Resource.String() > l[j].Resource.String() { - return false - } - return l[i].Action < l[j].Action - }) -} - -func permListsMatch(l1, l2 []influxdb.Permission) bool { - if len(l1) != len(l2) { - return false - } - - sortPermList(l1) - sortPermList(l2) - - for i := 0; i < len(l1); i++ { - if !l1[i].Matches(l2[i]) { - return false - } - } - - return true -} diff --git a/kv/migration/all/0016_add-annotations-notebooks-to-oper-token_test.go b/kv/migration/all/0016_add-annotations-notebooks-to-oper-token_test.go deleted file mode 100644 index 270b2ebb688..00000000000 --- a/kv/migration/all/0016_add-annotations-notebooks-to-oper-token_test.go +++ /dev/null @@ -1,281 +0,0 @@ -package all - -import ( - "context" - "encoding/json" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/stretchr/testify/require" -) - -func TestMigration_AnnotationsNotebooksOperToken(t *testing.T) { - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - - // Run up to migration 15. - ts := newService(t, ctx, 15) - - // Auth bucket contains the authorizations AKA tokens - authBucket := []byte("authorizationsv1") - - // The store returned by newService will include an operator token with the - // current system's entire list of resources already, so remove that before - // proceeding with the tests. - err := ts.Store.Update(context.Background(), func(tx kv.Tx) error { - bkt, err := tx.Bucket(authBucket) - require.NoError(t, err) - - cursor, err := bkt.ForwardCursor(nil) - require.NoError(t, err) - - return kv.WalkCursor(ctx, cursor, func(k, _ []byte) (bool, error) { - err := bkt.Delete(k) - require.NoError(t, err) - return true, nil - }) - }) - require.NoError(t, err) - - // Verify that running the migration in the absence of an operator token will - // not crash influxdb. - require.NoError(t, Migration0016_AddAnnotationsNotebooksToOperToken.Up(context.Background(), ts.Store)) - - // Seed some authorizations - id1 := snowflake.NewIDGenerator().ID() - id2 := snowflake.NewIDGenerator().ID() - OrgID := ts.Org.ID - UserID := ts.User.ID - - auths := []influxdb.Authorization{ - { - ID: id1, // a non-operator token - OrgID: OrgID, - UserID: UserID, - Permissions: permsShouldNotChange(), - }, - { - ID: id2, // an operator token - OrgID: OrgID, - UserID: UserID, - Permissions: preNotebooksAnnotationsOpPerms(), - }, - } - - for _, a := range auths { - js, err := json.Marshal(a) - require.NoError(t, err) - idBytes, err := a.ID.Encode() - require.NoError(t, err) - - err = ts.Store.Update(context.Background(), func(tx kv.Tx) error { - bkt, err := tx.Bucket(authBucket) - require.NoError(t, err) - return bkt.Put(idBytes, js) - }) - require.NoError(t, err) - } - - encoded1, err := id1.Encode() - require.NoError(t, err) - encoded2, err := id2.Encode() - require.NoError(t, err) - - checkPerms := func(expectedOpPerms []influxdb.Permission) { - // the first item should never change - err = ts.Store.View(context.Background(), func(tx kv.Tx) error { - bkt, err := tx.Bucket(authBucket) - require.NoError(t, err) - - b, err := bkt.Get(encoded1) - require.NoError(t, err) - - var token influxdb.Authorization - require.NoError(t, json.Unmarshal(b, &token)) - require.Equal(t, auths[0], token) - - return nil - }) - require.NoError(t, err) - - // the second item is the 2.0.x operator token and should have been updated to match our expectations - err = ts.Store.View(context.Background(), func(tx kv.Tx) error { - bkt, err := tx.Bucket(authBucket) - require.NoError(t, err) - - b, err := bkt.Get(encoded2) - require.NoError(t, err) - - var token influxdb.Authorization - require.NoError(t, json.Unmarshal(b, &token)) - - require.ElementsMatch(t, expectedOpPerms, token.Permissions) - return nil - }) - require.NoError(t, err) - } - - // Test applying the migration for the 1st time. - require.NoError(t, Migration0016_AddAnnotationsNotebooksToOperToken.Up(context.Background(), ts.Store)) - checkPerms(append(preNotebooksAnnotationsOpPerms(), notebooksAndAnnotationsPerms(0)...)) - - // Downgrade the migration. - require.NoError(t, Migration0016_AddAnnotationsNotebooksToOperToken.Down(context.Background(), ts.Store)) - checkPerms(preNotebooksAnnotationsOpPerms()) - - // Test re-applying the migration after a downgrade. - require.NoError(t, Migration0016_AddAnnotationsNotebooksToOperToken.Up(context.Background(), ts.Store)) - checkPerms(append(preNotebooksAnnotationsOpPerms(), notebooksAndAnnotationsPerms(0)...)) -} - -func Test_PermListsMatch(t *testing.T) { - tests := []struct { - name string - l1 []influxdb.Permission - l2 []influxdb.Permission - want bool - }{ - { - "empty lists", - []influxdb.Permission{}, - []influxdb.Permission{}, - true, - }, - { - "not matching", - []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - }, - }, - }, - []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - }, - }, - }, - false, - }, - { - "matches same order", - []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - }, - }, - }, - []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - }, - }, - }, - true, - }, - { - "matches different order", - []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.DBRPResourceType, - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.DBRPResourceType, - }, - }, - }, - []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.DBRPResourceType, - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.DBRPResourceType, - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - }, - }, - }, - true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := permListsMatch(tt.l1, tt.l2) - require.Equal(t, tt.want, got) - }) - } -} - -// This set of permissions shouldn't change - it doesn't match the operator -// token. -func permsShouldNotChange() []influxdb.Permission { - return []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - }, - }, - } -} diff --git a/kv/migration/all/0017_add-annotations-notebooks-to-all-access-tokens.go b/kv/migration/all/0017_add-annotations-notebooks-to-all-access-tokens.go deleted file mode 100644 index 3a7aba4cff7..00000000000 --- a/kv/migration/all/0017_add-annotations-notebooks-to-all-access-tokens.go +++ /dev/null @@ -1,59 +0,0 @@ -package all - -import ( - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var Migration0017_AddAnnotationsNotebooksToAllAccessTokens = &Migration{ - name: "add annotations and notebooks resource types to all-access tokens", - up: migrateTokensMigration( - func(t influxdb.Authorization) bool { - return permListsMatch(preNotebooksAnnotationsAllAccessPerms(t.OrgID, t.UserID), t.Permissions) - }, - func(t *influxdb.Authorization) { - t.Permissions = append(t.Permissions, notebooksAndAnnotationsPerms(t.OrgID)...) - }, - ), - down: migrateTokensMigration( - func(t influxdb.Authorization) bool { - return permListsMatch(append(preNotebooksAnnotationsAllAccessPerms(t.OrgID, t.UserID), notebooksAndAnnotationsPerms(t.OrgID)...), t.Permissions) - }, - func(t *influxdb.Authorization) { - newPerms := t.Permissions[:0] - for _, p := range t.Permissions { - switch p.Resource.Type { - case influxdb.AnnotationsResourceType: - case influxdb.NotebooksResourceType: - default: - newPerms = append(newPerms, p) - } - } - t.Permissions = newPerms - }, - ), -} - -// preNotebooksAnnotationsAllAccessPerms is the list of permissions from a 2.0.x all-access token, -// prior to the addition of the notebooks and annotations resource types. -func preNotebooksAnnotationsAllAccessPerms(orgId platform.ID, userId platform.ID) []influxdb.Permission { - opPerms := preNotebooksAnnotationsOpPerms() - perms := make([]influxdb.Permission, 0, len(opPerms)-1) // -1 because write-org permission isn't included. - for _, p := range opPerms { - if p.Resource.Type == influxdb.OrgsResourceType { - // All-access grants read-only access to the enclosing org. - if p.Action == influxdb.WriteAction { - continue - } - p.Resource.ID = &orgId - } else if p.Resource.Type == influxdb.UsersResourceType { - // It grants read and write access to the associated user. - p.Resource.ID = &userId - } else { - // It grants read and write access to all other resources in the enclosing org. - p.Resource.OrgID = &orgId - } - perms = append(perms, p) - } - return perms -} diff --git a/kv/migration/all/0017_add-annotations-notebooks-to-all-access-tokens_test.go b/kv/migration/all/0017_add-annotations-notebooks-to-all-access-tokens_test.go deleted file mode 100644 index 614db924125..00000000000 --- a/kv/migration/all/0017_add-annotations-notebooks-to-all-access-tokens_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package all - -import ( - "context" - "encoding/json" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/stretchr/testify/require" -) - -func TestMigration_AnnotationsNotebooksAllAccessToken(t *testing.T) { - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - - // Run up to migration 16. - ts := newService(t, ctx, 16) - - // Auth bucket contains the authorizations AKA tokens - authBucket := []byte("authorizationsv1") - - // Verify that running the migration in the absence of an all-access token will - // not crash influxdb. - require.NoError(t, Migration0017_AddAnnotationsNotebooksToAllAccessTokens.Up(context.Background(), ts.Store)) - - // Seed some authorizations - id1 := snowflake.NewIDGenerator().ID() - id2 := snowflake.NewIDGenerator().ID() - OrgID := ts.Org.ID - UserID := ts.User.ID - - auths := []influxdb.Authorization{ - { - ID: id1, // a non-all-access token - OrgID: OrgID, - UserID: UserID, - Permissions: orgPermsShouldNotChange(OrgID), - }, - { - ID: id2, // an all-access token - OrgID: OrgID, - UserID: UserID, - Permissions: preNotebooksAnnotationsAllAccessPerms(OrgID, UserID), - }, - } - - for _, a := range auths { - js, err := json.Marshal(a) - require.NoError(t, err) - idBytes, err := a.ID.Encode() - require.NoError(t, err) - - err = ts.Store.Update(context.Background(), func(tx kv.Tx) error { - bkt, err := tx.Bucket(authBucket) - require.NoError(t, err) - return bkt.Put(idBytes, js) - }) - require.NoError(t, err) - } - - encoded1, err := id1.Encode() - require.NoError(t, err) - encoded2, err := id2.Encode() - require.NoError(t, err) - - checkPerms := func(expectedAllPerms []influxdb.Permission) { - // the first item should never change - err = ts.Store.View(context.Background(), func(tx kv.Tx) error { - bkt, err := tx.Bucket(authBucket) - require.NoError(t, err) - - b, err := bkt.Get(encoded1) - require.NoError(t, err) - - var token influxdb.Authorization - require.NoError(t, json.Unmarshal(b, &token)) - require.Equal(t, auths[0], token) - - return nil - }) - require.NoError(t, err) - - // the second item is a 2.0.x all-access token and should have been updated to match our expectations - err = ts.Store.View(context.Background(), func(tx kv.Tx) error { - bkt, err := tx.Bucket(authBucket) - require.NoError(t, err) - - b, err := bkt.Get(encoded2) - require.NoError(t, err) - - var token influxdb.Authorization - require.NoError(t, json.Unmarshal(b, &token)) - - require.ElementsMatch(t, expectedAllPerms, token.Permissions) - return nil - }) - require.NoError(t, err) - } - - // Test applying the migration for the 1st time. - require.NoError(t, Migration0017_AddAnnotationsNotebooksToAllAccessTokens.Up(context.Background(), ts.Store)) - checkPerms(append(preNotebooksAnnotationsAllAccessPerms(OrgID, UserID), notebooksAndAnnotationsPerms(OrgID)...)) - - // Downgrade the migration. - require.NoError(t, Migration0017_AddAnnotationsNotebooksToAllAccessTokens.Down(context.Background(), ts.Store)) - checkPerms(preNotebooksAnnotationsAllAccessPerms(OrgID, UserID)) - - // Test re-applying the migration after a downgrade. - require.NoError(t, Migration0017_AddAnnotationsNotebooksToAllAccessTokens.Up(context.Background(), ts.Store)) - checkPerms(append(preNotebooksAnnotationsAllAccessPerms(OrgID, UserID), notebooksAndAnnotationsPerms(OrgID)...)) -} - -// This set of permissions shouldn't change - it doesn't match an all-access token. -func orgPermsShouldNotChange(orgId platform.ID) []influxdb.Permission { - return []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - OrgID: &orgId, - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.ChecksResourceType, - OrgID: &orgId, - }, - }, - } -} diff --git a/kv/migration/all/0018_repair-missing-shard-group-durations.go b/kv/migration/all/0018_repair-missing-shard-group-durations.go deleted file mode 100644 index a34aefeb680..00000000000 --- a/kv/migration/all/0018_repair-missing-shard-group-durations.go +++ /dev/null @@ -1,9 +0,0 @@ -package all - -// NOTE: Down() is purposefully left as a no-op here because this migration fills in -// values that were missing because of a logic bug, and doesn't actually modify the -// metadata schema. -var Migration0018_RepairMissingShardGroupDurations = UpOnlyMigration( - "repair missing shard group durations", - repairMissingShardGroupDurations, -) diff --git a/kv/migration/all/0018_repair-missing-shard-group-durations_test.go b/kv/migration/all/0018_repair-missing-shard-group-durations_test.go deleted file mode 100644 index 1719cdbb509..00000000000 --- a/kv/migration/all/0018_repair-missing-shard-group-durations_test.go +++ /dev/null @@ -1,7 +0,0 @@ -package all - -import "testing" - -func TestMigration_PostUpgradeShardGroupDuration(t *testing.T) { - testRepairMissingShardGroupDurations(t, 18) -} diff --git a/kv/migration/all/0019_add-remotes-replications-to-tokens.go b/kv/migration/all/0019_add-remotes-replications-to-tokens.go deleted file mode 100644 index 70ff7fb753a..00000000000 --- a/kv/migration/all/0019_add-remotes-replications-to-tokens.go +++ /dev/null @@ -1,67 +0,0 @@ -package all - -import ( - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var Migration0019_AddRemotesReplicationsToTokens = &Migration{ - name: "add remotes and replications resource types to operator and all-access tokens", - up: migrateTokensMigration( - func(t influxdb.Authorization) bool { - return permListsMatch(preReplicationOpPerms(), t.Permissions) || - permListsMatch(preReplicationAllAccessPerms(t.OrgID, t.UserID), t.Permissions) - }, - func(t *influxdb.Authorization) { - if permListsMatch(preReplicationOpPerms(), t.Permissions) { - t.Permissions = append(t.Permissions, remotesAndReplicationsPerms(0)...) - } else { - t.Permissions = append(t.Permissions, remotesAndReplicationsPerms(t.OrgID)...) - } - }, - ), - down: migrateTokensMigration( - func(t influxdb.Authorization) bool { - return permListsMatch(append(preReplicationOpPerms(), remotesAndReplicationsPerms(0)...), t.Permissions) || - permListsMatch(append(preReplicationAllAccessPerms(t.OrgID, t.UserID), remotesAndReplicationsPerms(t.OrgID)...), t.Permissions) - }, - func(t *influxdb.Authorization) { - newPerms := t.Permissions[:0] - for _, p := range t.Permissions { - switch p.Resource.Type { - case influxdb.RemotesResourceType: - case influxdb.ReplicationsResourceType: - default: - newPerms = append(newPerms, p) - } - } - t.Permissions = newPerms - }, - ), -} - -func preReplicationOpPerms() []influxdb.Permission { - return append(preNotebooksAnnotationsOpPerms(), notebooksAndAnnotationsPerms(0)...) -} - -func preReplicationAllAccessPerms(orgID platform.ID, userID platform.ID) []influxdb.Permission { - return append(preNotebooksAnnotationsAllAccessPerms(orgID, userID), notebooksAndAnnotationsPerms(orgID)...) -} - -func remotesAndReplicationsPerms(orgID platform.ID) []influxdb.Permission { - resTypes := []influxdb.Resource{ - { - Type: influxdb.RemotesResourceType, - }, - { - Type: influxdb.ReplicationsResourceType, - }, - } - perms := permListFromResources(resTypes) - if orgID.Valid() { - for i := range perms { - perms[i].Resource.OrgID = &orgID - } - } - return perms -} diff --git a/kv/migration/all/0019_add-remotes-replications-to-tokens_test.go b/kv/migration/all/0019_add-remotes-replications-to-tokens_test.go deleted file mode 100644 index 54327e16dbf..00000000000 --- a/kv/migration/all/0019_add-remotes-replications-to-tokens_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package all - -import ( - "context" - "encoding/json" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/stretchr/testify/require" -) - -func TestMigration_RemotesReplicationsOperToken(t *testing.T) { - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - - // Run up to migration 18. - ts := newService(t, ctx, 18) - - // Auth bucket contains the authorizations AKA tokens - authBucket := []byte("authorizationsv1") - - // The store returned by newService will include an operator token with the - // current system's entire list of resources already, so remove that before - // proceeding with the tests. - err := ts.Store.Update(context.Background(), func(tx kv.Tx) error { - bkt, err := tx.Bucket(authBucket) - require.NoError(t, err) - - cursor, err := bkt.ForwardCursor(nil) - require.NoError(t, err) - - return kv.WalkCursor(ctx, cursor, func(k, _ []byte) (bool, error) { - err := bkt.Delete(k) - require.NoError(t, err) - return true, nil - }) - }) - require.NoError(t, err) - - // Verify that running the migration in the absence of an operator token will - // not crash influxdb. - require.NoError(t, Migration0019_AddRemotesReplicationsToTokens.Up(context.Background(), ts.Store)) - - // Seed some authorizations - id1 := snowflake.NewIDGenerator().ID() - id2 := snowflake.NewIDGenerator().ID() - OrgID := ts.Org.ID - UserID := ts.User.ID - - auths := []influxdb.Authorization{ - { - ID: id1, // a non-operator token - OrgID: OrgID, - UserID: UserID, - Permissions: permsShouldNotChange(), - }, - { - ID: id2, // an operator token - OrgID: OrgID, - UserID: UserID, - Permissions: preReplicationOpPerms(), - }, - } - - for _, a := range auths { - js, err := json.Marshal(a) - require.NoError(t, err) - idBytes, err := a.ID.Encode() - require.NoError(t, err) - - err = ts.Store.Update(context.Background(), func(tx kv.Tx) error { - bkt, err := tx.Bucket(authBucket) - require.NoError(t, err) - return bkt.Put(idBytes, js) - }) - require.NoError(t, err) - } - - encoded1, err := id1.Encode() - require.NoError(t, err) - encoded2, err := id2.Encode() - require.NoError(t, err) - - checkPerms := func(expectedAllPerms []influxdb.Permission) { - // the first item should never change - err = ts.Store.View(context.Background(), func(tx kv.Tx) error { - bkt, err := tx.Bucket(authBucket) - require.NoError(t, err) - - b, err := bkt.Get(encoded1) - require.NoError(t, err) - - var token influxdb.Authorization - require.NoError(t, json.Unmarshal(b, &token)) - require.Equal(t, auths[0], token) - - return nil - }) - require.NoError(t, err) - - // the second item is a 2.0.x all-access token and should have been updated to match our expectations - err = ts.Store.View(context.Background(), func(tx kv.Tx) error { - bkt, err := tx.Bucket(authBucket) - require.NoError(t, err) - - b, err := bkt.Get(encoded2) - require.NoError(t, err) - - var token influxdb.Authorization - require.NoError(t, json.Unmarshal(b, &token)) - - require.ElementsMatch(t, expectedAllPerms, token.Permissions) - return nil - }) - require.NoError(t, err) - } - - // Test applying the migration for the 1st time. - require.NoError(t, Migration0019_AddRemotesReplicationsToTokens.Up(context.Background(), ts.Store)) - checkPerms(append(preReplicationOpPerms(), remotesAndReplicationsPerms(0)...)) - - // Downgrade the migration. - require.NoError(t, Migration0019_AddRemotesReplicationsToTokens.Down(context.Background(), ts.Store)) - checkPerms(preReplicationOpPerms()) - - // Test re-applying the migration after a downgrade. - require.NoError(t, Migration0019_AddRemotesReplicationsToTokens.Up(context.Background(), ts.Store)) - checkPerms(append(preReplicationOpPerms(), remotesAndReplicationsPerms(0)...)) -} - -func TestMigration_RemotesReplicationsAllAccessToken(t *testing.T) { - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - - // Run up to migration 18. - ts := newService(t, ctx, 18) - - // Auth bucket contains the authorizations AKA tokens - authBucket := []byte("authorizationsv1") - - // Verify that running the migration in the absence of an all-access token will - // not crash influxdb. - require.NoError(t, Migration0019_AddRemotesReplicationsToTokens.Up(context.Background(), ts.Store)) - - // Seed some authorizations - id1 := snowflake.NewIDGenerator().ID() - id2 := snowflake.NewIDGenerator().ID() - OrgID := ts.Org.ID - UserID := ts.User.ID - - auths := []influxdb.Authorization{ - { - ID: id1, // a non-all-access token - OrgID: OrgID, - UserID: UserID, - Permissions: orgPermsShouldNotChange(OrgID), - }, - { - ID: id2, // an all-access token - OrgID: OrgID, - UserID: UserID, - Permissions: preReplicationAllAccessPerms(OrgID, UserID), - }, - } - - for _, a := range auths { - js, err := json.Marshal(a) - require.NoError(t, err) - idBytes, err := a.ID.Encode() - require.NoError(t, err) - - err = ts.Store.Update(context.Background(), func(tx kv.Tx) error { - bkt, err := tx.Bucket(authBucket) - require.NoError(t, err) - return bkt.Put(idBytes, js) - }) - require.NoError(t, err) - } - - encoded1, err := id1.Encode() - require.NoError(t, err) - encoded2, err := id2.Encode() - require.NoError(t, err) - - checkPerms := func(expectedAllPerms []influxdb.Permission) { - // the first item should never change - err = ts.Store.View(context.Background(), func(tx kv.Tx) error { - bkt, err := tx.Bucket(authBucket) - require.NoError(t, err) - - b, err := bkt.Get(encoded1) - require.NoError(t, err) - - var token influxdb.Authorization - require.NoError(t, json.Unmarshal(b, &token)) - require.Equal(t, auths[0], token) - - return nil - }) - require.NoError(t, err) - - // the second item is a 2.0.x all-access token and should have been updated to match our expectations - err = ts.Store.View(context.Background(), func(tx kv.Tx) error { - bkt, err := tx.Bucket(authBucket) - require.NoError(t, err) - - b, err := bkt.Get(encoded2) - require.NoError(t, err) - - var token influxdb.Authorization - require.NoError(t, json.Unmarshal(b, &token)) - - require.ElementsMatch(t, expectedAllPerms, token.Permissions) - return nil - }) - require.NoError(t, err) - } - - // Test applying the migration for the 1st time. - require.NoError(t, Migration0019_AddRemotesReplicationsToTokens.Up(context.Background(), ts.Store)) - checkPerms(append(preReplicationAllAccessPerms(OrgID, UserID), remotesAndReplicationsPerms(OrgID)...)) - - // Downgrade the migration. - require.NoError(t, Migration0019_AddRemotesReplicationsToTokens.Down(context.Background(), ts.Store)) - checkPerms(preReplicationAllAccessPerms(OrgID, UserID)) - - // Test re-applying the migration after a downgrade. - require.NoError(t, Migration0019_AddRemotesReplicationsToTokens.Up(context.Background(), ts.Store)) - checkPerms(append(preReplicationAllAccessPerms(OrgID, UserID), remotesAndReplicationsPerms(OrgID)...)) -} diff --git a/kv/migration/all/0020_add_remotes_replications_metrics_buckets.go b/kv/migration/all/0020_add_remotes_replications_metrics_buckets.go deleted file mode 100644 index 30d6dedfcac..00000000000 --- a/kv/migration/all/0020_add_remotes_replications_metrics_buckets.go +++ /dev/null @@ -1,14 +0,0 @@ -package all - -import "github.com/influxdata/influxdb/v2/kv/migration" - -var ( - remoteMetricsBucket = []byte("remotesv2") - replicationsMetricsBucket = []byte("replicationsv2") -) - -var Migration0020_Add_remotes_replications_metrics_buckets = migration.CreateBuckets( - "create remotes and replications metrics buckets", - remoteMetricsBucket, - replicationsMetricsBucket, -) diff --git a/kv/migration/all/all.go b/kv/migration/all/all.go deleted file mode 100644 index 2ee8878d21a..00000000000 --- a/kv/migration/all/all.go +++ /dev/null @@ -1,51 +0,0 @@ -package all - -import ( - "github.com/influxdata/influxdb/v2/kv/migration" -) - -// Migrations contains all the migrations required for the entire of the -// kv store backing influxdb's metadata. -var Migrations = [...]migration.Spec{ - // initial migrations - Migration0001_InitialMigration, - // add index user resource mappings by user id - Migration0002_AddURMByUserIndex, - // add index for tasks with missing owner IDs - Migration0003_TaskOwnerIDUpMigration, - // add dbrp buckets - Migration0004_AddDbrpBuckets, - // add pkger buckets - Migration0005_AddPkgerBuckets, - // delete bucket sessionsv1 - Migration0006_DeleteBucketSessionsv1, - // CreateMetaDataBucket - Migration0007_CreateMetaDataBucket, - // LegacyAuthBuckets - Migration0008_LegacyAuthBuckets, - // LegacyAuthPasswordBuckets - Migration0009_LegacyAuthPasswordBuckets, - // add index telegraf by org - Migration0010_AddIndexTelegrafByOrg, - // populate dashboards owner id - Migration0011_PopulateDashboardsOwnerId, - // Populate the DBRP service ByOrg index - Migration0012_DBRPByOrgIndex, - // repair DBRP owner and bucket IDs - Migration0013_RepairDBRPOwnerAndBucketIDs, - // reindex DBRPs - Migration0014_ReindexDBRPs, - // record shard group durations in bucket metadata - Migration0015_RecordShardGroupDurationsInBucketMetadata, - // add annotations and notebooks resource types to the operator token - Migration0016_AddAnnotationsNotebooksToOperToken, - // add annotations and notebooks resource types to all-access tokens - Migration0017_AddAnnotationsNotebooksToAllAccessTokens, - // repair missing shard group durations - Migration0018_RepairMissingShardGroupDurations, - // add remotes and replications resource types to operator and all-access tokens - Migration0019_AddRemotesReplicationsToTokens, - // add_remotes_replications_metrics_buckets - Migration0020_Add_remotes_replications_metrics_buckets, - // {{ do_not_edit . }} -} diff --git a/kv/migration/all/doc.go b/kv/migration/all/doc.go deleted file mode 100644 index fe46632a4da..00000000000 --- a/kv/migration/all/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// package all -// -// This package is the canonical location for all migrations being made against the -// single shared kv.Store implementation used by InfluxDB (while it remains a single store). -// -// The array all.Migrations contains the list of migration specifications which drives the -// serial set of migration operations required to correctly configure the backing metadata store -// for InfluxDB. -// -// This package is arranged like so: -// -// doc.go - this piece of documentation. -// all.go - definition of Migration array referencing each of the name migrations in number migration files (below). -// migration.go - an implementation of migration.Spec for convenience. -// 000X_migration_name.go (example) - N files contains the specific implementations of each migration enumerated in `all.go`. -// ... -// -// Managing this list of files and all.go can be fiddly. -// There is a buildable cli utility called `kvmigrate` in the `internal/cmd/kvmigrate` package. -// This has a command `create` which automatically creates a new migration in the expected location -// and appends it appropriately into the all.go Migration array. -package all diff --git a/kv/migration/all/migration.go b/kv/migration/all/migration.go deleted file mode 100644 index 508dec7feac..00000000000 --- a/kv/migration/all/migration.go +++ /dev/null @@ -1,55 +0,0 @@ -package all - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration" - "go.uber.org/zap" -) - -// Up is a convenience methods which creates a migrator for all -// migrations and calls Up on it. -func Up(ctx context.Context, logger *zap.Logger, store kv.SchemaStore) error { - migrator, err := migration.NewMigrator(logger, store, Migrations[:]...) - if err != nil { - return err - } - - return migrator.Up(ctx) -} - -// MigrationFunc is a function which can be used as either an up or down operation. -type MigrationFunc func(context.Context, kv.SchemaStore) error - -func noopMigration(context.Context, kv.SchemaStore) error { - return nil -} - -// Migration is a type which implements the migration packages Spec interface -// It can be used to conveniently create migration specs for the all package -type Migration struct { - name string - up MigrationFunc - down MigrationFunc -} - -// UpOnlyMigration is a migration with an up function and a noop down function -func UpOnlyMigration(name string, up MigrationFunc) *Migration { - return &Migration{name, up, noopMigration} -} - -// MigrationName returns the underlying name of the migation -func (m *Migration) MigrationName() string { - return m.name -} - -// Up delegates to the underlying anonymous up migration function -func (m *Migration) Up(ctx context.Context, store kv.SchemaStore) error { - return m.up(ctx, store) -} - -// Down delegates to the underlying anonymous down migration function -func (m *Migration) Down(ctx context.Context, store kv.SchemaStore) error { - return m.down(ctx, store) -} diff --git a/kv/migration/all/test_service_test.go b/kv/migration/all/test_service_test.go deleted file mode 100644 index 1487db26750..00000000000 --- a/kv/migration/all/test_service_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package all - -import ( - "context" - "testing" - - "github.com/benbjohnson/clock" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorization" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration" - "github.com/influxdata/influxdb/v2/tenant" - "go.uber.org/zap/zaptest" -) - -type testService struct { - Store kv.SchemaStore - Service *kv.Service - Org influxdb.Organization - User influxdb.User - Auth influxdb.Authorization - Clock clock.Clock -} - -func newService(t *testing.T, ctx context.Context, endMigration int) *testService { - t.Helper() - - var ( - ts = &testService{ - Store: inmem.NewKVStore(), - } - logger = zaptest.NewLogger(t) - ) - - // apply migrations up to (but not including) this one - migrator, err := migration.NewMigrator(logger, ts.Store, Migrations[:endMigration]...) - if err != nil { - t.Fatal(err) - } - - if err := migrator.Up(ctx); err != nil { - t.Fatal(err) - } - - store := tenant.NewStore(ts.Store) - tenantSvc := tenant.NewService(store) - - authStore, err := authorization.NewStore(ts.Store) - if err != nil { - t.Fatal(err) - } - authSvc := authorization.NewService(authStore, tenantSvc) - - ts.Service = kv.NewService(logger, ts.Store, tenantSvc) - - ts.User = influxdb.User{Name: t.Name() + "-user"} - if err := tenantSvc.CreateUser(ctx, &ts.User); err != nil { - t.Fatal(err) - } - ts.Org = influxdb.Organization{Name: t.Name() + "-org"} - if err := tenantSvc.CreateOrganization(ctx, &ts.Org); err != nil { - t.Fatal(err) - } - - if err := tenantSvc.CreateUserResourceMapping(ctx, &influxdb.UserResourceMapping{ - ResourceType: influxdb.OrgsResourceType, - ResourceID: ts.Org.ID, - UserID: ts.User.ID, - UserType: influxdb.Owner, - }); err != nil { - t.Fatal(err) - } - - ts.Auth = influxdb.Authorization{ - OrgID: ts.Org.ID, - UserID: ts.User.ID, - Permissions: influxdb.OperPermissions(), - } - if err := authSvc.CreateAuthorization(context.Background(), &ts.Auth); err != nil { - t.Fatal(err) - } - - return ts -} diff --git a/kv/migration/buckets.go b/kv/migration/buckets.go deleted file mode 100644 index f0098e6946f..00000000000 --- a/kv/migration/buckets.go +++ /dev/null @@ -1,84 +0,0 @@ -package migration - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kv" -) - -type bucketMigrationType string - -const ( - createBucketMigration = bucketMigrationType("create") - deleteBucketMigration = bucketMigrationType("delete") -) - -// BucketsMigration is a migration Spec which creates -// the provided list of buckets on a store when Up is called -// and deletes them on Down. -type BucketsMigration struct { - typ bucketMigrationType - name string - buckets [][]byte -} - -// CreateBuckets returns a new BucketsMigration Spec. -func CreateBuckets(name string, bucket []byte, extraBuckets ...[]byte) Spec { - buckets := append([][]byte{bucket}, extraBuckets...) - return BucketsMigration{createBucketMigration, name, buckets} -} - -// DeleteBuckets returns a new BucketsMigration Spec. -func DeleteBuckets(name string, bucket []byte, extraBuckets ...[]byte) Spec { - buckets := append([][]byte{bucket}, extraBuckets...) - return BucketsMigration{deleteBucketMigration, name, buckets} -} - -// MigrationName returns the name of the migration. -func (m BucketsMigration) MigrationName() string { - return m.name -} - -// Up creates the buckets on the store. -func (m BucketsMigration) Up(ctx context.Context, store kv.SchemaStore) error { - var fn func(context.Context, []byte) error - switch m.typ { - case createBucketMigration: - fn = store.CreateBucket - case deleteBucketMigration: - fn = store.DeleteBucket - default: - panic("unrecognized buckets migration type") - } - - for _, bucket := range m.buckets { - if err := fn(ctx, bucket); err != nil { - return err - } - - } - - return nil -} - -// Down delets the buckets on the store. -func (m BucketsMigration) Down(ctx context.Context, store kv.SchemaStore) error { - var fn func(context.Context, []byte) error - switch m.typ { - case createBucketMigration: - fn = store.DeleteBucket - case deleteBucketMigration: - fn = store.CreateBucket - default: - panic("unrecognized buckets migration type") - } - - for _, bucket := range m.buckets { - if err := fn(ctx, bucket); err != nil { - return err - } - - } - - return nil -} diff --git a/kv/migration/buckets_test.go b/kv/migration/buckets_test.go deleted file mode 100644 index ae8017098b0..00000000000 --- a/kv/migration/buckets_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package migration - -import ( - "context" - "errors" - "testing" - - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kv" -) - -func Test_BucketsMigration_CreateBuckets(t *testing.T) { - var ( - ctx = context.Background() - bucket = []byte("some_bucket") - store = inmem.NewKVStore() - ) - - // bucket should not exist - bucketShouldNotExist(t, store, bucket) - - // build new create buckets migration - migration := CreateBuckets("create bucket some_bucket", bucket) - - // apply migration up - if err := migration.Up(ctx, store); err != nil { - t.Fatal("unexpected error", err) - } - - // bucket should now exist - bucketShouldExist(t, store, bucket) - - // apply migration down - if err := migration.Down(ctx, store); err != nil { - t.Fatal("unexpected error", err) - } - - // bucket should no longer exist - bucketShouldNotExist(t, store, bucket) -} - -func Test_BucketsMigration_DeleteBuckets(t *testing.T) { - var ( - ctx = context.Background() - bucket = []byte("some_bucket") - store = inmem.NewKVStore() - ) - - // initially create bucket - if err := store.CreateBucket(ctx, bucket); err != nil { - t.Fatal("unexpected error", err) - } - - // ensure bucket is there to start with - bucketShouldExist(t, store, bucket) - - // build new delete buckets migration - migration := DeleteBuckets("delete bucket some_bucket", bucket) - - // apply migration up - if err := migration.Up(ctx, store); err != nil { - t.Fatal("unexpected error", err) - } - - // bucket should have been removed - bucketShouldNotExist(t, store, bucket) - - // apply migration down - if err := migration.Down(ctx, store); err != nil { - t.Fatal("unexpected error", err) - } - - // bucket should exist again - bucketShouldExist(t, store, bucket) -} - -func bucketShouldExist(t *testing.T, store kv.Store, bucket []byte) { - t.Helper() - - if err := store.View(context.Background(), func(tx kv.Tx) error { - _, err := tx.Bucket(bucket) - return err - }); err != nil { - t.Fatal("unexpected error", err) - } -} - -func bucketShouldNotExist(t *testing.T, store kv.Store, bucket []byte) { - t.Helper() - - if err := store.View(context.Background(), func(tx kv.Tx) error { - _, err := tx.Bucket(bucket) - return err - }); !errors.Is(err, kv.ErrBucketNotFound) { - t.Fatalf("expected bucket not found, got %q", err) - } -} diff --git a/kv/migration/create.go b/kv/migration/create.go deleted file mode 100644 index 70e22de122e..00000000000 --- a/kv/migration/create.go +++ /dev/null @@ -1,77 +0,0 @@ -package migration - -import ( - "bytes" - "fmt" - "go/format" - "html/template" - "os" - "strings" - - "golang.org/x/text/cases" - "golang.org/x/text/language" -) - -const newMigrationFmt = `package all - -var %s = &Migration{} -` - -// CreateNewMigration persists a new migration file in the appropriate location -// and updates the appropriate all.go list of migrations -func CreateNewMigration(existing []Spec, name string) error { - camelName := strings.Replace(cases.Title(language.Und).String(name), " ", "", -1) - - newMigrationNumber := len(existing) + 1 - - newMigrationVariable := fmt.Sprintf("Migration%04d_%s", newMigrationNumber, camelName) - - newMigrationFile := fmt.Sprintf("./kv/migration/all/%04d_%s.go", newMigrationNumber, strings.Replace(name, " ", "-", -1)) - - fmt.Println("Creating new migration:", newMigrationFile) - - if err := os.WriteFile(newMigrationFile, []byte(fmt.Sprintf(newMigrationFmt, newMigrationVariable)), 0644); err != nil { - return err - } - - fmt.Println("Inserting migration into ./kv/migration/all/all.go") - - tmplData, err := os.ReadFile("./kv/migration/all/all.go") - if err != nil { - return err - } - - type Context struct { - Name string - Variable string - } - - tmpl := template.Must( - template. - New("migrations"). - Funcs(template.FuncMap{"do_not_edit": func(c Context) string { - return fmt.Sprintf("%s\n%s,\n// {{ do_not_edit . }}", c.Name, c.Variable) - }}). - Parse(string(tmplData)), - ) - - buf := new(bytes.Buffer) - - if err := tmpl.Execute(buf, Context{ - Name: name, - Variable: newMigrationVariable, - }); err != nil { - return err - } - - src, err := format.Source(buf.Bytes()) - if err != nil { - return err - } - - if err := os.WriteFile("./kv/migration/all/all.go", src, 0644); err != nil { - return err - } - - return nil -} diff --git a/kv/migration/doc.go b/kv/migration/doc.go deleted file mode 100644 index 09600acc3b1..00000000000 --- a/kv/migration/doc.go +++ /dev/null @@ -1,28 +0,0 @@ -// package migration -// -// This package contains utility types for composing and running schema and data migrations -// in a strictly serial and ordered nature; against a backing kv.SchemaStore implementation. -// -// The goal is provide a mechanism to ensure an ordered set of changes are applied once -// and only once to a persisted kv store. To ensure we can make guarantees from one migration -// to the next, based on the mutations of the previous migrations. -// -// The package offers the `Migrator` type which takes a slice of `Spec` implementations. -// A spec is a single migration definition, which exposes a name, up and down operations -// expressed as an Up and Down function on the Spec implementation. -// -// The `Migrator` on a call to `Up(ctx)` applies these defined list of migrations respective `Up(...)` functions -// on a `kv.SchemaStore` in order and persists their invocation on the store in a reserved Bucket `migrationsv1`. -// This is to ensure the only once invocation of the migration takes place and allows to the resuming or introduction -// of new migrations at a later date. -// This means the defined list needs to remain static from the point of application. Otherwise an error will be raised. -// -// This package also offer utilities types for quickly defining common changes as specifications. -// For example creating buckets, when can be quickly constructed via `migration.CreateBuckets("create buckets ...", []byte("foo"), []byte{"bar"})`. -// -// As of today all migrations be found in a single defintion in the sub-package to this one -// named `all` (see `kv/migration/all/all.go`). -// The `migration.CreateNewMigration()` method can be used to manipulate this `all.go` file in the package and quickly -// add a new migration file to be populated. This is accessible on the command line via the `internal/cmd/kvmigrate` buildable go tool. -// Try `go run internal/cmd/kvmigrate/main.go`. -package migration diff --git a/kv/migration/migration.go b/kv/migration/migration.go deleted file mode 100644 index 937031d2d56..00000000000 --- a/kv/migration/migration.go +++ /dev/null @@ -1,376 +0,0 @@ -package migration - -import ( - "context" - "encoding/json" - "fmt" - "os" - "time" - - "github.com/influxdata/influxdb/v2/kit/migration" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" - "go.uber.org/zap" -) - -var migrationBucket = []byte("migrationsv1") - -type Store = kv.SchemaStore - -// MigrationState is a type for describing the state of a migration. -type MigrationState uint - -const ( - // DownMigrationState is for a migration not yet applied. - DownMigrationState MigrationState = iota - // UpMigration State is for a migration which has been applied. - UpMigrationState -) - -// String returns a string representation for a migration state. -func (s MigrationState) String() string { - switch s { - case DownMigrationState: - return "down" - case UpMigrationState: - return "up" - default: - return "unknown" - } -} - -// Migration is a record of a particular migration. -type Migration struct { - ID platform.ID `json:"id"` - Name string `json:"name"` - State MigrationState `json:"-"` - StartedAt *time.Time `json:"started_at"` - FinishedAt *time.Time `json:"finished_at,omitempty"` -} - -// Spec is a specification for a particular migration. -// It describes the name of the migration and up and down operations -// needed to fulfill the migration. -type Spec interface { - MigrationName() string - Up(ctx context.Context, store kv.SchemaStore) error - Down(ctx context.Context, store kv.SchemaStore) error -} - -// Migrator is a type which manages migrations. -// It takes a list of migration specifications and undo (down) all or apply (up) outstanding migrations. -// It records the state of the world in store under the migrations bucket. -type Migrator struct { - logger *zap.Logger - store Store - - Specs []Spec - - now func() time.Time - backupPath string -} - -// NewMigrator constructs and configures a new Migrator. -func NewMigrator(logger *zap.Logger, store Store, ms ...Spec) (*Migrator, error) { - m := &Migrator{ - logger: logger, - store: store, - now: func() time.Time { - return time.Now().UTC() - }, - } - - // create migration bucket if it does not exist - if err := store.CreateBucket(context.Background(), migrationBucket); err != nil { - return nil, err - } - - m.AddMigrations(ms...) - - return m, nil -} - -// AddMigrations appends the provided migration specs onto the Migrator. -func (m *Migrator) AddMigrations(ms ...Spec) { - m.Specs = append(m.Specs, ms...) -} - -// SetBackupPath records the filepath where pre-migration state should be written prior to running migrations. -func (m *Migrator) SetBackupPath(path string) { - m.backupPath = path -} - -// List returns a list of migrations and their states within the provided store. -func (m *Migrator) List(ctx context.Context) (migrations []Migration, _ error) { - if err := m.walk(ctx, m.store, func(id platform.ID, m Migration) { - migrations = append(migrations, m) - }); err != nil { - return nil, err - } - - migrationsLen := len(migrations) - for idx, spec := range m.Specs[migrationsLen:] { - migration := Migration{ - ID: platform.ID(migrationsLen + idx + 1), - Name: spec.MigrationName(), - } - - migrations = append(migrations, migration) - } - - return -} - -// Up applies each outstanding migration in order. -// Migrations are applied in order from the lowest indexed migration in a down state. -// -// For example, given: -// 0001 add bucket foo | (up) -// 0002 add bucket bar | (down) -// 0003 add index "foo on baz" | (down) -// -// Up would apply migration 0002 and then 0003. -func (m *Migrator) Up(ctx context.Context) error { - wrapErr := func(err error) error { - if err == nil { - return nil - } - - return fmt.Errorf("up: %w", err) - } - - var lastMigration int - if err := m.walk(ctx, m.store, func(id platform.ID, mig Migration) { - // we're interested in the last up migration - if mig.State == UpMigrationState { - lastMigration = int(id) - } - }); err != nil { - return wrapErr(err) - } - - migrationsToDo := len(m.Specs[lastMigration:]) - if migrationsToDo == 0 { - return nil - } - - if m.backupPath != "" && lastMigration != 0 { - m.logger.Info("Backing up pre-migration metadata", zap.String("backup_path", m.backupPath)) - if err := func() error { - out, err := os.Create(m.backupPath) - if err != nil { - return err - } - defer out.Close() - - if err := m.store.Backup(ctx, out); err != nil { - return err - } - return nil - }(); err != nil { - return fmt.Errorf("failed to back up pre-migration metadata: %w", err) - } - } - - m.logger.Info("Bringing up metadata migrations", zap.Int("migration_count", migrationsToDo)) - for idx, spec := range m.Specs[lastMigration:] { - startedAt := m.now() - migration := Migration{ - ID: platform.ID(lastMigration + idx + 1), - Name: spec.MigrationName(), - StartedAt: &startedAt, - } - - m.logMigrationEvent(UpMigrationState, migration, "started") - - if err := m.putMigration(ctx, m.store, migration); err != nil { - return wrapErr(err) - } - - if err := spec.Up(ctx, m.store); err != nil { - return wrapErr(err) - } - - finishedAt := m.now() - migration.FinishedAt = &finishedAt - migration.State = UpMigrationState - - if err := m.putMigration(ctx, m.store, migration); err != nil { - return wrapErr(err) - } - - m.logMigrationEvent(UpMigrationState, migration, "completed") - } - - return nil -} - -// Down applies the down operation of each currently applied migration. -// Migrations are applied in reverse order from the highest indexed migration in a down state. -// -// For example, given: -// 0001 add bucket foo | (up) -// 0002 add bucket bar | (up) -// 0003 add index "foo on baz" | (down) -// -// Down would call down() on 0002 and then on 0001. -func (m *Migrator) Down(ctx context.Context, untilMigration int) (err error) { - wrapErr := func(err error) error { - if err == nil { - return nil - } - - return fmt.Errorf("down: %w", err) - } - - var migrations []struct { - Spec - Migration - } - - if err := m.walk(ctx, m.store, func(id platform.ID, mig Migration) { - migrations = append( - migrations, - struct { - Spec - Migration - }{ - m.Specs[int(id)-1], - mig, - }, - ) - }); err != nil { - return wrapErr(err) - } - - migrationsToDo := len(migrations) - untilMigration - if migrationsToDo == 0 { - return nil - } - if migrationsToDo < 0 { - m.logger.Warn("KV metadata is already on a schema older than target, nothing to do") - return nil - } - - if m.backupPath != "" { - m.logger.Info("Backing up pre-migration metadata", zap.String("backup_path", m.backupPath)) - if err := func() error { - out, err := os.Create(m.backupPath) - if err != nil { - return err - } - defer out.Close() - - if err := m.store.Backup(ctx, out); err != nil { - return err - } - return nil - }(); err != nil { - return fmt.Errorf("failed to back up pre-migration metadata: %w", err) - } - } - - m.logger.Info("Tearing down metadata migrations", zap.Int("migration_count", migrationsToDo)) - for i := len(migrations) - 1; i >= untilMigration; i-- { - migration := migrations[i] - - m.logMigrationEvent(DownMigrationState, migration.Migration, "started") - - if err := migration.Spec.Down(ctx, m.store); err != nil { - return wrapErr(err) - } - - if err := m.deleteMigration(ctx, m.store, migration.Migration); err != nil { - return wrapErr(err) - } - - m.logMigrationEvent(DownMigrationState, migration.Migration, "completed") - } - - return nil -} - -func (m *Migrator) logMigrationEvent(state MigrationState, mig Migration, event string) { - m.logger.Debug( - "Executing metadata migration", - zap.String("migration_name", mig.Name), - zap.String("target_state", state.String()), - zap.String("migration_event", event), - ) -} - -func (m *Migrator) walk(ctx context.Context, store kv.Store, fn func(id platform.ID, m Migration)) error { - if err := store.View(ctx, func(tx kv.Tx) error { - bkt, err := tx.Bucket(migrationBucket) - if err != nil { - return err - } - - cursor, err := bkt.ForwardCursor(nil) - if err != nil { - return err - } - - return kv.WalkCursor(ctx, cursor, func(k, v []byte) (bool, error) { - var id platform.ID - if err := id.Decode(k); err != nil { - return false, fmt.Errorf("decoding migration id: %w", err) - } - - var mig Migration - if err := json.Unmarshal(v, &mig); err != nil { - return false, err - } - - idx := int(id) - 1 - if idx >= len(m.Specs) { - return false, migration.ErrInvalidMigration(mig.Name) - } - - if spec := m.Specs[idx]; spec.MigrationName() != mig.Name { - return false, fmt.Errorf("expected migration %q, found %q", spec.MigrationName(), mig.Name) - } - - if mig.FinishedAt != nil { - mig.State = UpMigrationState - } - - fn(id, mig) - - return true, nil - }) - }); err != nil { - return fmt.Errorf("reading migrations: %w", err) - } - - return nil -} - -func (m *Migrator) putMigration(ctx context.Context, store kv.Store, migration Migration) error { - return store.Update(ctx, func(tx kv.Tx) error { - bkt, err := tx.Bucket(migrationBucket) - if err != nil { - return err - } - - data, err := json.Marshal(migration) - if err != nil { - return err - } - - id, _ := migration.ID.Encode() - return bkt.Put(id, data) - }) -} - -func (m *Migrator) deleteMigration(ctx context.Context, store kv.Store, migration Migration) error { - return store.Update(ctx, func(tx kv.Tx) error { - bkt, err := tx.Bucket(migrationBucket) - if err != nil { - return err - } - - id, _ := migration.ID.Encode() - return bkt.Delete(id) - }) -} diff --git a/kv/migration/migration_private_test.go b/kv/migration/migration_private_test.go deleted file mode 100644 index fd14fcf0562..00000000000 --- a/kv/migration/migration_private_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package migration - -import ( - "time" -) - -// MigratorSetNow sets the now function on the migrator. -// This function is only reachable via tests defined within this -// package folder. -func MigratorSetNow(migrator *Migrator, now func() time.Time) { - migrator.now = now -} diff --git a/kv/migration/migration_test.go b/kv/migration/migration_test.go deleted file mode 100644 index fa25519bc5c..00000000000 --- a/kv/migration/migration_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package migration_test - -import ( - "context" - "errors" - "fmt" - "os" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/bolt" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration" - "github.com/influxdata/influxdb/v2/kv/migration/all" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" -) - -func newMigrator(t *testing.T, logger *zap.Logger, store kv.SchemaStore, now influxdbtesting.NowFunc) *migration.Migrator { - migrator, err := migration.NewMigrator(logger, store) - if err != nil { - t.Fatal(err) - } - - migration.MigratorSetNow(migrator, now) - return migrator -} - -func Test_Inmem_Migrator(t *testing.T) { - influxdbtesting.Migrator(t, inmem.NewKVStore(), newMigrator) -} - -func Test_Bolt_Migrator(t *testing.T) { - store, closeBolt, err := newTestBoltStoreWithoutMigrations(t) - if err != nil { - t.Fatalf("failed to create new kv store: %v", err) - } - defer closeBolt() - - influxdbtesting.Migrator(t, store, newMigrator) -} - -func Test_Bolt_MigratorWithBackup(t *testing.T) { - store, closeBolt, err := newTestBoltStoreWithoutMigrations(t) - require.NoError(t, err) - defer closeBolt() - - ctx := context.Background() - migrator := newMigrator(t, zaptest.NewLogger(t), store, time.Now) - backupPath := fmt.Sprintf("%s.bak", store.DB().Path()) - migrator.SetBackupPath(backupPath) - - // Run the first migration. - migrator.AddMigrations(all.Migration0001_InitialMigration) - require.NoError(t, migrator.Up(ctx)) - - // List of applied migrations should now have length 1. - ms, err := migrator.List(ctx) - require.NoError(t, err) - require.Equal(t, 1, len(ms)) - - // Backup file shouldn't exist because there was no previous state to back up. - _, err = os.Stat(backupPath) - require.True(t, os.IsNotExist(err)) - - // Run a few more migrations. - migrator.AddMigrations(all.Migrations[1:5]...) - require.NoError(t, migrator.Up(ctx)) - - // List of applied migrations should now have length 5. - ms, err = migrator.List(ctx) - require.NoError(t, err) - require.Equal(t, 5, len(ms)) - - // Backup file should now exist. - _, err = os.Stat(backupPath) - require.NoError(t, err) - - // Open a 2nd store using the backup file. - backupStore := bolt.NewKVStore(zaptest.NewLogger(t), backupPath, bolt.WithNoSync) - require.NoError(t, backupStore.Open(ctx)) - defer backupStore.Close() - - // List of applied migrations in the backup should be 1. - backupMigrator := newMigrator(t, zaptest.NewLogger(t), backupStore, time.Now) - backupMigrator.AddMigrations(all.Migration0001_InitialMigration) - backupMs, err := backupMigrator.List(ctx) - require.NoError(t, err) - require.Equal(t, 1, len(backupMs)) - - // Run the other migrations on the backup. - backupMigrator.AddMigrations(all.Migrations[1:5]...) - require.NoError(t, backupMigrator.Up(ctx)) - - // List of applied migrations in the backup should be 5. - backupMs, err = backupMigrator.List(ctx) - require.NoError(t, err) - require.Equal(t, 5, len(backupMs)) -} - -func newTestBoltStoreWithoutMigrations(t *testing.T) (*bolt.KVStore, func(), error) { - f, err := os.CreateTemp("", "influxdata-bolt-") - if err != nil { - return nil, nil, errors.New("unable to open temporary boltdb file") - } - f.Close() - - path := f.Name() - s := bolt.NewKVStore(zaptest.NewLogger(t), path, bolt.WithNoSync) - if err := s.Open(context.Background()); err != nil { - return nil, nil, err - } - - close := func() { - s.Close() - os.Remove(path) - } - - return s, close, nil -} diff --git a/kv/mock/bucket.go b/kv/mock/bucket.go deleted file mode 100644 index 062bb3eac98..00000000000 --- a/kv/mock/bucket.go +++ /dev/null @@ -1,136 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/kv (interfaces: Bucket) - -// Package mock is a generated GoMock package. -package mock - -import ( - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - kv "github.com/influxdata/influxdb/v2/kv" -) - -// MockBucket is a mock of Bucket interface -type MockBucket struct { - ctrl *gomock.Controller - recorder *MockBucketMockRecorder -} - -// MockBucketMockRecorder is the mock recorder for MockBucket -type MockBucketMockRecorder struct { - mock *MockBucket -} - -// NewMockBucket creates a new mock instance -func NewMockBucket(ctrl *gomock.Controller) *MockBucket { - mock := &MockBucket{ctrl: ctrl} - mock.recorder = &MockBucketMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockBucket) EXPECT() *MockBucketMockRecorder { - return m.recorder -} - -// Cursor mocks base method -func (m *MockBucket) Cursor(arg0 ...kv.CursorHint) (kv.Cursor, error) { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range arg0 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Cursor", varargs...) - ret0, _ := ret[0].(kv.Cursor) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Cursor indicates an expected call of Cursor -func (mr *MockBucketMockRecorder) Cursor(arg0 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Cursor", reflect.TypeOf((*MockBucket)(nil).Cursor), arg0...) -} - -// Delete mocks base method -func (m *MockBucket) Delete(arg0 []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// Delete indicates an expected call of Delete -func (mr *MockBucketMockRecorder) Delete(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockBucket)(nil).Delete), arg0) -} - -// ForwardCursor mocks base method -func (m *MockBucket) ForwardCursor(arg0 []byte, arg1 ...kv.CursorOption) (kv.ForwardCursor, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0} - for _, a := range arg1 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ForwardCursor", varargs...) - ret0, _ := ret[0].(kv.ForwardCursor) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ForwardCursor indicates an expected call of ForwardCursor -func (mr *MockBucketMockRecorder) ForwardCursor(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0}, arg1...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForwardCursor", reflect.TypeOf((*MockBucket)(nil).ForwardCursor), varargs...) -} - -// Get mocks base method -func (m *MockBucket) Get(arg0 []byte) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", arg0) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Get indicates an expected call of Get -func (mr *MockBucketMockRecorder) Get(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockBucket)(nil).Get), arg0) -} - -// GetBatch mocks base method -func (m *MockBucket) GetBatch(arg0 ...[]byte) ([][]byte, error) { - m.ctrl.T.Helper() - varargs := []interface{}{} - for _, a := range arg0 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBatch", varargs...) - ret0, _ := ret[0].([][]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBatch indicates an expected call of GetBatch -func (mr *MockBucketMockRecorder) GetBatch(arg0 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBatch", reflect.TypeOf((*MockBucket)(nil).GetBatch), arg0...) -} - -// Put mocks base method -func (m *MockBucket) Put(arg0, arg1 []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Put", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Put indicates an expected call of Put -func (mr *MockBucketMockRecorder) Put(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockBucket)(nil).Put), arg0, arg1) -} diff --git a/kv/mock/forward_cursor.go b/kv/mock/forward_cursor.go deleted file mode 100644 index 2faf2f6c0c5..00000000000 --- a/kv/mock/forward_cursor.go +++ /dev/null @@ -1,77 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/kv (interfaces: ForwardCursor) - -// Package mock is a generated GoMock package. -package mock - -import ( - reflect "reflect" - - gomock "github.com/golang/mock/gomock" -) - -// MockForwardCursor is a mock of ForwardCursor interface -type MockForwardCursor struct { - ctrl *gomock.Controller - recorder *MockForwardCursorMockRecorder -} - -// MockForwardCursorMockRecorder is the mock recorder for MockForwardCursor -type MockForwardCursorMockRecorder struct { - mock *MockForwardCursor -} - -// NewMockForwardCursor creates a new mock instance -func NewMockForwardCursor(ctrl *gomock.Controller) *MockForwardCursor { - mock := &MockForwardCursor{ctrl: ctrl} - mock.recorder = &MockForwardCursorMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockForwardCursor) EXPECT() *MockForwardCursorMockRecorder { - return m.recorder -} - -// Close mocks base method -func (m *MockForwardCursor) Close() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close") - ret0, _ := ret[0].(error) - return ret0 -} - -// Close indicates an expected call of Close -func (mr *MockForwardCursorMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockForwardCursor)(nil).Close)) -} - -// Err mocks base method -func (m *MockForwardCursor) Err() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Err") - ret0, _ := ret[0].(error) - return ret0 -} - -// Err indicates an expected call of Err -func (mr *MockForwardCursorMockRecorder) Err() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockForwardCursor)(nil).Err)) -} - -// Next mocks base method -func (m *MockForwardCursor) Next() ([]byte, []byte) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Next") - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].([]byte) - return ret0, ret1 -} - -// Next indicates an expected call of Next -func (mr *MockForwardCursorMockRecorder) Next() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockForwardCursor)(nil).Next)) -} diff --git a/kv/mock/tx.go b/kv/mock/tx.go deleted file mode 100644 index 6f560bba84f..00000000000 --- a/kv/mock/tx.go +++ /dev/null @@ -1,77 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/kv (interfaces: Tx) - -// Package mock is a generated GoMock package. -package mock - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - kv "github.com/influxdata/influxdb/v2/kv" -) - -// MockTx is a mock of Tx interface -type MockTx struct { - ctrl *gomock.Controller - recorder *MockTxMockRecorder -} - -// MockTxMockRecorder is the mock recorder for MockTx -type MockTxMockRecorder struct { - mock *MockTx -} - -// NewMockTx creates a new mock instance -func NewMockTx(ctrl *gomock.Controller) *MockTx { - mock := &MockTx{ctrl: ctrl} - mock.recorder = &MockTxMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockTx) EXPECT() *MockTxMockRecorder { - return m.recorder -} - -// Bucket mocks base method -func (m *MockTx) Bucket(arg0 []byte) (kv.Bucket, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Bucket", arg0) - ret0, _ := ret[0].(kv.Bucket) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Bucket indicates an expected call of Bucket -func (mr *MockTxMockRecorder) Bucket(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bucket", reflect.TypeOf((*MockTx)(nil).Bucket), arg0) -} - -// Context mocks base method -func (m *MockTx) Context() context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// Context indicates an expected call of Context -func (mr *MockTxMockRecorder) Context() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockTx)(nil).Context)) -} - -// WithContext mocks base method -func (m *MockTx) WithContext(arg0 context.Context) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "WithContext", arg0) -} - -// WithContext indicates an expected call of WithContext -func (mr *MockTxMockRecorder) WithContext(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WithContext", reflect.TypeOf((*MockTx)(nil).WithContext), arg0) -} diff --git a/kv/scrapers.go b/kv/scrapers.go deleted file mode 100644 index 4fc6d060ac2..00000000000 --- a/kv/scrapers.go +++ /dev/null @@ -1,330 +0,0 @@ -package kv - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - // ErrScraperNotFound is used when the scraper configuration is not found. - ErrScraperNotFound = &errors.Error{ - Msg: "scraper target is not found", - Code: errors.ENotFound, - } - - // ErrInvalidScraperID is used when the service was provided - // an invalid ID format. - ErrInvalidScraperID = &errors.Error{ - Code: errors.EInvalid, - Msg: "provided scraper target ID has invalid format", - } - - // ErrInvalidScrapersBucketID is used when the service was provided - // an invalid ID format. - ErrInvalidScrapersBucketID = &errors.Error{ - Code: errors.EInvalid, - Msg: "provided bucket ID has invalid format", - } - - // ErrInvalidScrapersOrgID is used when the service was provided - // an invalid ID format. - ErrInvalidScrapersOrgID = &errors.Error{ - Code: errors.EInvalid, - Msg: "provided organization ID has invalid format", - } -) - -// UnexpectedScrapersBucketError is used when the error comes from an internal system. -func UnexpectedScrapersBucketError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: "unexpected error retrieving scrapers bucket", - Err: err, - Op: "kv/scraper", - } -} - -// CorruptScraperError is used when the config cannot be unmarshalled from the -// bytes stored in the kv. -func CorruptScraperError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("Unknown internal scraper data error; Err: %v", err), - Op: "kv/scraper", - } -} - -// ErrUnprocessableScraper is used when a scraper is not able to be converted to JSON. -func ErrUnprocessableScraper(err error) *errors.Error { - return &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: fmt.Sprintf("unable to convert scraper target into JSON; Err %v", err), - } -} - -// InternalScraperServiceError is used when the error comes from an -// internal system. -func InternalScraperServiceError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("Unknown internal scraper data error; Err: %v", err), - Op: "kv/scraper", - } -} - -var ( - scrapersBucket = []byte("scraperv2") -) - -var _ influxdb.ScraperTargetStoreService = (*Service)(nil) - -func (s *Service) scrapersBucket(tx Tx) (Bucket, error) { - b, err := tx.Bucket([]byte(scrapersBucket)) - if err != nil { - return nil, UnexpectedScrapersBucketError(err) - } - - return b, nil -} - -// ListTargets will list all scrape targets. -func (s *Service) ListTargets(ctx context.Context, filter influxdb.ScraperTargetFilter) ([]influxdb.ScraperTarget, error) { - if filter.Org != nil { - org, err := s.orgs.FindOrganization(ctx, influxdb.OrganizationFilter{ - Name: filter.Org, - }) - if err != nil { - return nil, err - } - - filter.OrgID = &org.ID - } - - targets := []influxdb.ScraperTarget{} - err := s.kv.View(ctx, func(tx Tx) error { - var err error - targets, err = s.listTargets(ctx, tx, filter) - return err - }) - return targets, err -} - -func (s *Service) listTargets(ctx context.Context, tx Tx, filter influxdb.ScraperTargetFilter) ([]influxdb.ScraperTarget, error) { - targets := []influxdb.ScraperTarget{} - bucket, err := s.scrapersBucket(tx) - if err != nil { - return nil, err - } - - cur, err := bucket.ForwardCursor(nil) - if err != nil { - return nil, UnexpectedScrapersBucketError(err) - } - - for k, v := cur.Next(); k != nil; k, v = cur.Next() { - target, err := unmarshalScraper(v) - if err != nil { - return nil, err - } - if filter.IDs != nil { - if _, ok := filter.IDs[target.ID]; !ok { - continue - } - } - if filter.Name != nil && target.Name != *filter.Name { - continue - } - - if filter.OrgID != nil && target.OrgID != *filter.OrgID { - continue - } - - targets = append(targets, *target) - } - return targets, nil -} - -// AddTarget add a new scraper target into storage. -func (s *Service) AddTarget(ctx context.Context, target *influxdb.ScraperTarget, userID platform.ID) (err error) { - return s.kv.Update(ctx, func(tx Tx) error { - return s.addTarget(ctx, tx, target, userID) - }) -} - -func (s *Service) addTarget(ctx context.Context, tx Tx, target *influxdb.ScraperTarget, userID platform.ID) error { - if !target.OrgID.Valid() { - return ErrInvalidScrapersOrgID - } - - if !target.BucketID.Valid() { - return ErrInvalidScrapersBucketID - } - - target.ID = s.IDGenerator.ID() - if err := s.putTarget(ctx, tx, target); err != nil { - return err - } - - return nil -} - -// RemoveTarget removes a scraper target from the bucket. -func (s *Service) RemoveTarget(ctx context.Context, id platform.ID) error { - return s.kv.Update(ctx, func(tx Tx) error { - return s.removeTarget(ctx, tx, id) - }) -} - -func (s *Service) removeTarget(ctx context.Context, tx Tx, id platform.ID) error { - _, pe := s.findTargetByID(ctx, tx, id) - if pe != nil { - return pe - } - encID, err := id.Encode() - if err != nil { - return ErrInvalidScraperID - } - - bucket, err := s.scrapersBucket(tx) - if err != nil { - return err - } - - _, err = bucket.Get(encID) - if IsNotFound(err) { - return ErrScraperNotFound - } - if err != nil { - return InternalScraperServiceError(err) - } - - if err := bucket.Delete(encID); err != nil { - return InternalScraperServiceError(err) - } - - return nil -} - -// UpdateTarget updates a scraper target. -func (s *Service) UpdateTarget(ctx context.Context, update *influxdb.ScraperTarget, userID platform.ID) (*influxdb.ScraperTarget, error) { - var target *influxdb.ScraperTarget - err := s.kv.Update(ctx, func(tx Tx) error { - var err error - target, err = s.updateTarget(ctx, tx, update, userID) - return err - }) - - return target, err -} - -func (s *Service) updateTarget(ctx context.Context, tx Tx, update *influxdb.ScraperTarget, userID platform.ID) (*influxdb.ScraperTarget, error) { - if !update.ID.Valid() { - return nil, ErrInvalidScraperID - } - - target, err := s.findTargetByID(ctx, tx, update.ID) - if err != nil { - return nil, err - } - - // If the bucket or org are invalid, just use the ids from the original. - if !update.BucketID.Valid() { - update.BucketID = target.BucketID - } - if !update.OrgID.Valid() { - update.OrgID = target.OrgID - } - target = update - return target, s.putTarget(ctx, tx, target) -} - -// GetTargetByID retrieves a scraper target by id. -func (s *Service) GetTargetByID(ctx context.Context, id platform.ID) (*influxdb.ScraperTarget, error) { - var target *influxdb.ScraperTarget - err := s.kv.View(ctx, func(tx Tx) error { - var err error - target, err = s.findTargetByID(ctx, tx, id) - return err - }) - - return target, err -} - -func (s *Service) findTargetByID(ctx context.Context, tx Tx, id platform.ID) (*influxdb.ScraperTarget, error) { - encID, err := id.Encode() - if err != nil { - return nil, ErrInvalidScraperID - } - - bucket, err := s.scrapersBucket(tx) - if err != nil { - return nil, err - } - - v, err := bucket.Get(encID) - if IsNotFound(err) { - return nil, ErrScraperNotFound - } - if err != nil { - return nil, InternalScraperServiceError(err) - } - - target, err := unmarshalScraper(v) - if err != nil { - return nil, err - } - - return target, nil -} - -// PutTarget will put a scraper target without setting an ID. -func (s *Service) PutTarget(ctx context.Context, target *influxdb.ScraperTarget) error { - return s.kv.Update(ctx, func(tx Tx) error { - return s.putTarget(ctx, tx, target) - }) -} - -func (s *Service) putTarget(ctx context.Context, tx Tx, target *influxdb.ScraperTarget) error { - v, err := marshalScraper(target) - if err != nil { - return ErrUnprocessableScraper(err) - } - - encID, err := target.ID.Encode() - if err != nil { - return ErrInvalidScraperID - } - - bucket, err := s.scrapersBucket(tx) - if err != nil { - return err - } - - if err := bucket.Put(encID, v); err != nil { - return UnexpectedScrapersBucketError(err) - } - - return nil -} - -// unmarshalScraper turns the stored byte slice in the kv into a *influxdb.ScraperTarget. -func unmarshalScraper(v []byte) (*influxdb.ScraperTarget, error) { - s := &influxdb.ScraperTarget{} - if err := json.Unmarshal(v, s); err != nil { - return nil, CorruptScraperError(err) - } - return s, nil -} - -func marshalScraper(sc *influxdb.ScraperTarget) ([]byte, error) { - v, err := json.Marshal(sc) - if err != nil { - return nil, ErrUnprocessableScraper(err) - } - return v, nil -} diff --git a/kv/scrapers_test.go b/kv/scrapers_test.go deleted file mode 100644 index 75820a79355..00000000000 --- a/kv/scrapers_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package kv_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/tenant" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func TestBoltScraperTargetStoreService(t *testing.T) { - influxdbtesting.ScraperService(initBoltTargetService, t) -} - -func initBoltTargetService(f influxdbtesting.TargetFields, t *testing.T) (influxdb.ScraperTargetStoreService, string, func()) { - s, closeFn := influxdbtesting.NewTestBoltStore(t) - svc, op, closeSvc := initScraperTargetStoreService(s, f, t) - return svc, op, func() { - closeSvc() - closeFn() - } -} - -func initScraperTargetStoreService(s kv.SchemaStore, f influxdbtesting.TargetFields, t *testing.T) (influxdb.ScraperTargetStoreService, string, func()) { - ctx := context.Background() - tenantStore := tenant.NewStore(s) - tenantSvc := tenant.NewService(tenantStore) - - svc := kv.NewService(zaptest.NewLogger(t), s, tenantSvc) - - if f.IDGenerator != nil { - svc.IDGenerator = f.IDGenerator - } - - for _, target := range f.Targets { - if err := svc.PutTarget(ctx, target); err != nil { - t.Fatalf("failed to populate targets: %v", err) - } - } - - for _, o := range f.Organizations { - mock.SetIDForFunc(&tenantStore.OrgIDGen, o.ID, func() { - if err := tenantSvc.CreateOrganization(ctx, o); err != nil { - t.Fatalf("failed to populate organization") - } - }) - } - - return svc, kv.OpPrefix, func() { - for _, target := range f.Targets { - if err := svc.RemoveTarget(ctx, target.ID); err != nil { - t.Logf("failed to remove targets: %v", err) - } - } - for _, o := range f.Organizations { - if err := tenantSvc.DeleteOrganization(ctx, o.ID); err != nil { - t.Logf("failed to remove orgs: %v", err) - } - } - } -} diff --git a/kv/service.go b/kv/service.go deleted file mode 100644 index 33d2335487b..00000000000 --- a/kv/service.go +++ /dev/null @@ -1,83 +0,0 @@ -package kv - -import ( - "github.com/benbjohnson/clock" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/query/fluxlang" - "github.com/influxdata/influxdb/v2/rand" - "github.com/influxdata/influxdb/v2/resource" - "github.com/influxdata/influxdb/v2/resource/noop" - "github.com/influxdata/influxdb/v2/snowflake" - "go.uber.org/zap" -) - -// OpPrefix is the prefix for kv errors. -const OpPrefix = "kv/" - -// Service is the struct that influxdb services are implemented on. -type Service struct { - kv Store - log *zap.Logger - clock clock.Clock - Config ServiceConfig - audit resource.Logger - IDGenerator platform.IDGenerator - - // FluxLanguageService is used for parsing flux. - // If this is unset, operations that require parsing flux - // will fail. - FluxLanguageService fluxlang.FluxLanguageService - - TokenGenerator influxdb.TokenGenerator - // TODO(desa:ariel): this should not be embedded - influxdb.TimeGenerator - - orgs influxdb.OrganizationService - - variableStore *IndexStore -} - -// NewService returns an instance of a Service. -func NewService(log *zap.Logger, kv Store, orgs influxdb.OrganizationService, configs ...ServiceConfig) *Service { - s := &Service{ - log: log, - IDGenerator: snowflake.NewIDGenerator(), - TokenGenerator: rand.NewTokenGenerator(64), - kv: kv, - orgs: orgs, - audit: noop.ResourceLogger{}, - TimeGenerator: influxdb.RealTimeGenerator{}, - variableStore: newVariableStore(), - } - - if len(configs) > 0 { - s.Config = configs[0] - } - - s.clock = s.Config.Clock - if s.clock == nil { - s.clock = clock.New() - } - - s.FluxLanguageService = s.Config.FluxLanguageService - - return s -} - -// ServiceConfig allows us to configure Services -type ServiceConfig struct { - Clock clock.Clock - FluxLanguageService fluxlang.FluxLanguageService -} - -// WithResourceLogger sets the resource audit logger for the service. -func (s *Service) WithResourceLogger(audit resource.Logger) { - s.audit = audit -} - -// WithStore sets kv store for the service. -// Should only be used in tests for mocking. -func (s *Service) WithStore(store Store) { - s.kv = store -} diff --git a/kv/source.go b/kv/source.go deleted file mode 100644 index 43ddc6f041e..00000000000 --- a/kv/source.go +++ /dev/null @@ -1,319 +0,0 @@ -package kv - -import ( - "context" - "encoding/json" - "fmt" - - influxdb "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - sourceBucket = []byte("sourcesv1") -) - -// DefaultSource is the default source. -var DefaultSource = influxdb.Source{ - Default: true, - Name: "autogen", - Type: influxdb.SelfSourceType, -} - -const ( - // DefaultSourceID it the default source identifier - DefaultSourceID = "020f755c3c082000" - // DefaultSourceOrganizationID is the default source's organization identifier - DefaultSourceOrganizationID = "50616e67652c206c" -) - -func init() { - if err := DefaultSource.ID.DecodeFromString(DefaultSourceID); err != nil { - panic(fmt.Sprintf("failed to decode default source id: %v", err)) - } - - if err := DefaultSource.OrganizationID.DecodeFromString(DefaultSourceOrganizationID); err != nil { - panic(fmt.Sprintf("failed to decode default source organization id: %v", err)) - } -} - -// DefaultSource retrieves the default source. -func (s *Service) DefaultSource(ctx context.Context) (*influxdb.Source, error) { - var sr *influxdb.Source - - err := s.kv.View(ctx, func(tx Tx) error { - // TODO(desa): make this faster by putting the default source in an index. - srcs, err := s.findSources(ctx, tx, influxdb.FindOptions{}) - if err != nil { - return err - } - for _, src := range srcs { - if src.Default { - sr = src - return nil - } - } - return &errors.Error{ - Code: errors.ENotFound, - Msg: "no default source found", - } - }) - - if err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - return sr, nil -} - -// FindSourceByID retrieves a source by id. -func (s *Service) FindSourceByID(ctx context.Context, id platform.ID) (*influxdb.Source, error) { - var sr *influxdb.Source - - err := s.kv.View(ctx, func(tx Tx) error { - src, pe := s.findSourceByID(ctx, tx, id) - if pe != nil { - return &errors.Error{ - Err: pe, - } - } - sr = src - return nil - }) - return sr, err -} - -func (s *Service) findSourceByID(ctx context.Context, tx Tx, id platform.ID) (*influxdb.Source, error) { - encodedID, err := id.Encode() - if err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - b, err := tx.Bucket(sourceBucket) - if err != nil { - return nil, err - } - - v, err := b.Get(encodedID) - if IsNotFound(err) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrSourceNotFound, - } - } - - if err != nil { - return nil, err - } - - if err != nil { - return nil, err - } - - var sr influxdb.Source - if err := json.Unmarshal(v, &sr); err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - return &sr, nil -} - -// FindSources retrieves all sources that match an arbitrary source filter. -// Filters using ID, or OrganizationID and source Name should be efficient. -// Other filters will do a linear scan across all sources searching for a match. -func (s *Service) FindSources(ctx context.Context, opt influxdb.FindOptions) ([]*influxdb.Source, int, error) { - ss := []*influxdb.Source{} - err := s.kv.View(ctx, func(tx Tx) error { - srcs, err := s.findSources(ctx, tx, opt) - if err != nil { - return err - } - ss = srcs - return nil - }) - - if err != nil { - return nil, 0, &errors.Error{ - Op: influxdb.OpFindSources, - Err: err, - } - } - - return ss, len(ss), nil -} - -func (s *Service) findSources(ctx context.Context, tx Tx, opt influxdb.FindOptions) ([]*influxdb.Source, error) { - ss := []*influxdb.Source{} - - err := s.forEachSource(ctx, tx, func(s *influxdb.Source) bool { - ss = append(ss, s) - return true - }) - - if err != nil { - return nil, err - } - - return ss, nil -} - -// CreateSource creates a influxdb source and sets s.ID. -func (s *Service) CreateSource(ctx context.Context, src *influxdb.Source) error { - err := s.kv.Update(ctx, func(tx Tx) error { - src.ID = s.IDGenerator.ID() - - // Generating an organization id if it missing or invalid - if !src.OrganizationID.Valid() { - src.OrganizationID = s.IDGenerator.ID() - } - - return s.putSource(ctx, tx, src) - }) - if err != nil { - return &errors.Error{ - Err: err, - } - } - return nil -} - -// PutSource will put a source without setting an ID. -func (s *Service) PutSource(ctx context.Context, src *influxdb.Source) error { - return s.kv.Update(ctx, func(tx Tx) error { - return s.putSource(ctx, tx, src) - }) -} - -func (s *Service) putSource(ctx context.Context, tx Tx, src *influxdb.Source) error { - v, err := json.Marshal(src) - if err != nil { - return err - } - - encodedID, err := src.ID.Encode() - if err != nil { - return err - } - - b, err := tx.Bucket(sourceBucket) - if err != nil { - return err - } - - if err := b.Put(encodedID, v); err != nil { - return err - } - - return nil -} - -// forEachSource will iterate through all sources while fn returns true. -func (s *Service) forEachSource(ctx context.Context, tx Tx, fn func(*influxdb.Source) bool) error { - b, err := tx.Bucket(sourceBucket) - if err != nil { - return err - } - - cur, err := b.ForwardCursor(nil) - if err != nil { - return err - } - - for k, v := cur.Next(); k != nil; k, v = cur.Next() { - s := &influxdb.Source{} - if err := json.Unmarshal(v, s); err != nil { - return err - } - if !fn(s) { - break - } - } - - return nil -} - -// UpdateSource updates a source according the parameters set on upd. -func (s *Service) UpdateSource(ctx context.Context, id platform.ID, upd influxdb.SourceUpdate) (*influxdb.Source, error) { - var sr *influxdb.Source - err := s.kv.Update(ctx, func(tx Tx) error { - src, err := s.updateSource(ctx, tx, id, upd) - if err != nil { - return &errors.Error{ - Err: err, - } - } - sr = src - return nil - }) - - return sr, err -} - -func (s *Service) updateSource(ctx context.Context, tx Tx, id platform.ID, upd influxdb.SourceUpdate) (*influxdb.Source, error) { - src, pe := s.findSourceByID(ctx, tx, id) - if pe != nil { - return nil, pe - } - - if err := upd.Apply(src); err != nil { - return nil, err - } - - if err := s.putSource(ctx, tx, src); err != nil { - return nil, err - } - - return src, nil -} - -// DeleteSource deletes a source and prunes it from the index. -func (s *Service) DeleteSource(ctx context.Context, id platform.ID) error { - return s.kv.Update(ctx, func(tx Tx) error { - pe := s.deleteSource(ctx, tx, id) - if pe != nil { - return &errors.Error{ - Err: pe, - } - } - return nil - }) -} - -func (s *Service) deleteSource(ctx context.Context, tx Tx, id platform.ID) error { - if id == DefaultSource.ID { - return &errors.Error{ - Code: errors.EForbidden, - Msg: "cannot delete autogen source", - } - } - _, pe := s.findSourceByID(ctx, tx, id) - if pe != nil { - return pe - } - - encodedID, err := id.Encode() - if err != nil { - return &errors.Error{ - Err: err, - } - } - - b, err := tx.Bucket(sourceBucket) - if err != nil { - return err - } - - if err = b.Delete(encodedID); err != nil { - return &errors.Error{ - Err: err, - } - } - return nil -} diff --git a/kv/source_test.go b/kv/source_test.go deleted file mode 100644 index 5342afdfd93..00000000000 --- a/kv/source_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package kv_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func TestBoltSourceService(t *testing.T) { - t.Run("CreateSource", func(t *testing.T) { influxdbtesting.CreateSource(initBoltSourceService, t) }) - t.Run("FindSourceByID", func(t *testing.T) { influxdbtesting.FindSourceByID(initBoltSourceService, t) }) - t.Run("FindSources", func(t *testing.T) { influxdbtesting.FindSources(initBoltSourceService, t) }) - t.Run("DeleteSource", func(t *testing.T) { influxdbtesting.DeleteSource(initBoltSourceService, t) }) -} - -func initBoltSourceService(f influxdbtesting.SourceFields, t *testing.T) (influxdb.SourceService, string, func()) { - s, closeBolt := influxdbtesting.NewTestBoltStore(t) - svc, op, closeSvc := initSourceService(s, f, t) - return svc, op, func() { - closeSvc() - closeBolt() - } -} - -func initSourceService(s kv.SchemaStore, f influxdbtesting.SourceFields, t *testing.T) (influxdb.SourceService, string, func()) { - ctx := context.Background() - svc := kv.NewService(zaptest.NewLogger(t), s, &mock.OrganizationService{}) - svc.IDGenerator = f.IDGenerator - - for _, b := range f.Sources { - if err := svc.PutSource(ctx, b); err != nil { - t.Fatalf("failed to populate sources") - } - } - return svc, kv.OpPrefix, func() { - for _, b := range f.Sources { - if err := svc.DeleteSource(ctx, b.ID); err != nil { - t.Logf("failed to remove source: %v", err) - } - } - } -} diff --git a/kv/store.go b/kv/store.go deleted file mode 100644 index 25cc619d94e..00000000000 --- a/kv/store.go +++ /dev/null @@ -1,264 +0,0 @@ -package kv - -import ( - "context" - "errors" - "io" -) - -var ( - // ErrKeyNotFound is the error returned when the key requested is not found. - ErrKeyNotFound = errors.New("key not found") - // ErrBucketNotFound is the error returned when the bucket cannot be found. - ErrBucketNotFound = errors.New("bucket not found") - // ErrTxNotWritable is the error returned when an mutable operation is called during - // a non-writable transaction. - ErrTxNotWritable = errors.New("transaction is not writable") - // ErrSeekMissingPrefix is returned when seek bytes is missing the prefix defined via - // WithCursorPrefix - ErrSeekMissingPrefix = errors.New("seek missing prefix bytes") -) - -// IsNotFound returns a boolean indicating whether the error is known to report that a key or was not found. -func IsNotFound(err error) bool { - return err == ErrKeyNotFound -} - -// SchemaStore is a superset of Store along with store schema change -// functionality like bucket creation and deletion. -// -// This type is made available via the `kv/migration` package. -// It should be consumed via this package to create and delete buckets using a migration. -// Checkout the internal tool `cmd/internal/kvmigrate` for building a new migration Go file into -// the correct location (in kv/migration/all.go). -// Configuring your bucket here will ensure it is created properly on initialization of InfluxDB. -type SchemaStore interface { - Store - - // CreateBucket creates a bucket on the underlying store if it does not exist - CreateBucket(ctx context.Context, bucket []byte) error - // DeleteBucket deletes a bucket on the underlying store if it exists - DeleteBucket(ctx context.Context, bucket []byte) error -} - -// Store is an interface for a generic key value store. It is modeled after -// the boltdb database struct. -type Store interface { - // View opens up a transaction that will not write to any data. Implementing interfaces - // should take care to ensure that all view transactions do not mutate any data. - View(context.Context, func(Tx) error) error - // Update opens up a transaction that will mutate data. - Update(context.Context, func(Tx) error) error - // Backup copies all K:Vs to a writer, file format determined by implementation. - Backup(ctx context.Context, w io.Writer) error - // Restore replaces the underlying data file with the data from r. - Restore(ctx context.Context, r io.Reader) error - // RLock takes a read lock on the underlying KV store. - RLock() - // RUnlock releases a previously-taken read lock - RUnlock() -} - -// Tx is a transaction in the store. -type Tx interface { - // Bucket possibly creates and returns bucket, b. - Bucket(b []byte) (Bucket, error) - // Context returns the context associated with this Tx. - Context() context.Context - // WithContext associates a context with this Tx. - WithContext(ctx context.Context) -} - -type CursorPredicateFunc func(key, value []byte) bool - -type CursorHints struct { - KeyPrefix *string - KeyStart *string - PredicateFn CursorPredicateFunc -} - -// CursorHint configures CursorHints -type CursorHint func(*CursorHints) - -// WithCursorHintPrefix is a hint to the store -// that the caller is only interested keys with the -// specified prefix. -func WithCursorHintPrefix(prefix string) CursorHint { - return func(o *CursorHints) { - o.KeyPrefix = &prefix - } -} - -// WithCursorHintKeyStart is a hint to the store -// that the caller is interested in reading keys from -// start. -func WithCursorHintKeyStart(start string) CursorHint { - return func(o *CursorHints) { - o.KeyStart = &start - } -} - -// WithCursorHintPredicate is a hint to the store -// to return only key / values which return true for the -// f. -// -// The primary concern of the predicate is to improve performance. -// Therefore, it should perform tests on the data at minimal cost. -// If the predicate has no meaningful impact on reducing memory or -// CPU usage, there is no benefit to using it. -func WithCursorHintPredicate(f CursorPredicateFunc) CursorHint { - return func(o *CursorHints) { - o.PredicateFn = f - } -} - -// Bucket is the abstraction used to perform get/put/delete/get-many operations -// in a key value store. -type Bucket interface { - // TODO context? - // Get returns a key within this bucket. Errors if key does not exist. - Get(key []byte) ([]byte, error) - // GetBatch returns a corresponding set of values for the provided - // set of keys. If a value cannot be found for any provided key its - // value will be nil at the same index for the provided key. - GetBatch(keys ...[]byte) ([][]byte, error) - // Cursor returns a cursor at the beginning of this bucket optionally - // using the provided hints to improve performance. - Cursor(hints ...CursorHint) (Cursor, error) - // Put should error if the transaction it was called in is not writable. - Put(key, value []byte) error - // Delete should error if the transaction it was called in is not writable. - Delete(key []byte) error - // ForwardCursor returns a forward cursor from the seek position provided. - // Other options can be supplied to provide direction and hints. - ForwardCursor(seek []byte, opts ...CursorOption) (ForwardCursor, error) -} - -// Cursor is an abstraction for iterating/ranging through data. A concrete implementation -// of a cursor can be found in cursor.go. -type Cursor interface { - // Seek moves the cursor forward until reaching prefix in the key name. - Seek(prefix []byte) (k []byte, v []byte) - // First moves the cursor to the first key in the bucket. - First() (k []byte, v []byte) - // Last moves the cursor to the last key in the bucket. - Last() (k []byte, v []byte) - // Next moves the cursor to the next key in the bucket. - Next() (k []byte, v []byte) - // Prev moves the cursor to the prev key in the bucket. - Prev() (k []byte, v []byte) -} - -// ForwardCursor is an abstraction for interacting/ranging through data in one direction. -type ForwardCursor interface { - // Next moves the cursor to the next key in the bucket. - Next() (k, v []byte) - // Err returns non-nil if an error occurred during cursor iteration. - // This should always be checked after Next returns a nil key/value. - Err() error - // Close is reponsible for freeing any resources created by the cursor. - Close() error -} - -// CursorDirection is an integer used to define the direction -// a request cursor operates in. -type CursorDirection int - -const ( - // CursorAscending directs a cursor to range in ascending order - CursorAscending CursorDirection = iota - // CursorAscending directs a cursor to range in descending order - CursorDescending -) - -// CursorConfig is a type used to configure a new forward cursor. -// It includes a direction and a set of hints -type CursorConfig struct { - Direction CursorDirection - Hints CursorHints - Prefix []byte - SkipFirst bool - Limit *int -} - -// NewCursorConfig constructs and configures a CursorConfig used to configure -// a forward cursor. -func NewCursorConfig(opts ...CursorOption) CursorConfig { - conf := CursorConfig{} - for _, opt := range opts { - opt(&conf) - } - return conf -} - -// CursorOption is a functional option for configuring a forward cursor -type CursorOption func(*CursorConfig) - -// WithCursorDirection sets the cursor direction on a provided cursor config -func WithCursorDirection(direction CursorDirection) CursorOption { - return func(c *CursorConfig) { - c.Direction = direction - } -} - -// WithCursorHints configs the provided hints on the cursor config -func WithCursorHints(hints ...CursorHint) CursorOption { - return func(c *CursorConfig) { - for _, hint := range hints { - hint(&c.Hints) - } - } -} - -// WithCursorPrefix configures the forward cursor to retrieve keys -// with a particular prefix. This implies the cursor will start and end -// at a specific location based on the prefix [prefix, prefix + 1). -// -// The value of the seek bytes must be prefixed with the provided -// prefix, otherwise an error will be returned. -func WithCursorPrefix(prefix []byte) CursorOption { - return func(c *CursorConfig) { - c.Prefix = prefix - } -} - -// WithCursorSkipFirstItem skips returning the first item found within -// the seek. -func WithCursorSkipFirstItem() CursorOption { - return func(c *CursorConfig) { - c.SkipFirst = true - } -} - -// WithCursorLimit restricts the number of key values return by the cursor -// to the provided limit count. -func WithCursorLimit(limit int) CursorOption { - return func(c *CursorConfig) { - c.Limit = &limit - } -} - -// VisitFunc is called for each k, v byte slice pair from the underlying source bucket -// which are found in the index bucket for a provided foreign key. -type VisitFunc func(k, v []byte) (bool, error) - -// WalkCursor consumers the forward cursor call visit for each k/v pair found -func WalkCursor(ctx context.Context, cursor ForwardCursor, visit VisitFunc) (err error) { - defer func() { - if cerr := cursor.Close(); cerr != nil && err == nil { - err = cerr - } - }() - - for k, v := cursor.Next(); k != nil; k, v = cursor.Next() { - if cont, err := visit(k, v); !cont || err != nil { - return err - } - - if err := ctx.Err(); err != nil { - return err - } - } - - return cursor.Err() -} diff --git a/kv/store_base.go b/kv/store_base.go deleted file mode 100644 index 2b654550b29..00000000000 --- a/kv/store_base.go +++ /dev/null @@ -1,576 +0,0 @@ -package kv - -import ( - "context" - "encoding/json" - "errors" - "fmt" - - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/opentracing/opentracing-go" -) - -type Entity struct { - PK EncodeFn - UniqueKey EncodeFn - - Body interface{} -} - -// EncodeEntFn encodes the entity. This is used both for the key and vals in the store base. -type EncodeEntFn func(ent Entity) ([]byte, string, error) - -// EncIDKey encodes an entity into a key that represents the encoded ID provided. -func EncIDKey(ent Entity) ([]byte, string, error) { - if ent.PK == nil { - return nil, "ID", errors.New("no ID provided") - } - key, err := ent.PK() - return key, "ID", err -} - -// EncUniqKey encodes the unique key. -func EncUniqKey(ent Entity) ([]byte, string, error) { - if ent.UniqueKey == nil { - return nil, "Unique Key", errors.New("no unique key provided") - } - key, err := ent.UniqueKey() - return key, "Unique Key", err -} - -// EncBodyJSON JSON encodes the entity body and returns the raw bytes and indicates -// that it uses the entity body. -func EncBodyJSON(ent Entity) ([]byte, string, error) { - v, err := json.Marshal(ent.Body) - return v, "entity body", err -} - -// DecodeBucketValFn decodes the raw []byte. -type DecodeBucketValFn func(key, val []byte) (keyRepeat []byte, decodedVal interface{}, err error) - -// DecIndexID decodes the bucket val into an influxdb.ID. -func DecIndexID(key, val []byte) ([]byte, interface{}, error) { - var i platform.ID - return key, i, i.Decode(val) -} - -// ConvertValToEntFn converts a key and decoded bucket value to an entity. -type ConvertValToEntFn func(k []byte, v interface{}) (Entity, error) - -// DecodeOrgNameKey decodes a raw bucket key into the organization id and name -// used to create it. -func DecodeOrgNameKey(k []byte) (platform.ID, string, error) { - var orgID platform.ID - if err := orgID.Decode(k[:platform.IDLength]); err != nil { - return 0, "", err - } - return orgID, string(k[platform.IDLength:]), nil -} - -// NewOrgNameKeyStore creates a store for an entity's unique index on organization id and name. -// This is used throughout the kv pkg here to provide an entity uniquness by name within an org. -func NewOrgNameKeyStore(resource string, bktName []byte, caseSensitive bool) *StoreBase { - var decValToEntFn ConvertValToEntFn = func(k []byte, v interface{}) (Entity, error) { - id, ok := v.(platform.ID) - if err := IsErrUnexpectedDecodeVal(ok); err != nil { - return Entity{}, err - } - - ent := Entity{PK: EncID(id)} - if len(k) == 0 { - return ent, nil - } - - orgID, name, err := DecodeOrgNameKey(k) - if err != nil { - return Entity{}, err - } - nameEnc := EncString(name) - if !caseSensitive { - nameEnc = EncStringCaseInsensitive(name) - } - ent.UniqueKey = Encode(EncID(orgID), nameEnc) - return ent, nil - } - - return NewStoreBase(resource, bktName, EncUniqKey, EncIDKey, DecIndexID, decValToEntFn) -} - -// StoreBase is the base behavior for accessing buckets in kv. It provides mechanisms that can -// be used in composing stores together (i.e. IndexStore). -type StoreBase struct { - Resource string - BktName []byte - - EncodeEntKeyFn EncodeEntFn - EncodeEntBodyFn EncodeEntFn - DecodeEntFn DecodeBucketValFn - ConvertValToEntFn ConvertValToEntFn -} - -// NewStoreBase creates a new store base. -func NewStoreBase(resource string, bktName []byte, encKeyFn, encBodyFn EncodeEntFn, decFn DecodeBucketValFn, decToEntFn ConvertValToEntFn) *StoreBase { - return &StoreBase{ - Resource: resource, - BktName: bktName, - EncodeEntKeyFn: encKeyFn, - EncodeEntBodyFn: encBodyFn, - DecodeEntFn: decFn, - ConvertValToEntFn: decToEntFn, - } -} - -// EntKey returns the key for the entity provided. This is a shortcut for grabbing the EntKey without -// having to juggle the encoding funcs. -func (s *StoreBase) EntKey(ctx context.Context, ent Entity) ([]byte, error) { - span, ctx := s.startSpan(ctx) - defer span.Finish() - return s.encodeEnt(ctx, ent, s.EncodeEntKeyFn) -} - -type ( - // DeleteOpts provides indicators to the store.Delete call for deleting a given - // entity. The FilterFn indicates the current value should be deleted when returning - // true. - DeleteOpts struct { - DeleteRelationFns []DeleteRelationsFn - FilterFn FilterFn - } - - // DeleteRelationsFn is a hook that a store that composes other stores can use to - // delete an entity and any relations it may share. An example would be deleting an - // an entity and its associated index. - DeleteRelationsFn func(key []byte, decodedVal interface{}) error -) - -// Delete deletes entities by the provided options. -func (s *StoreBase) Delete(ctx context.Context, tx Tx, opts DeleteOpts) error { - span, ctx := s.startSpan(ctx) - defer span.Finish() - - if opts.FilterFn == nil { - return nil - } - - findOpts := FindOpts{ - CaptureFn: func(k []byte, v interface{}) error { - for _, deleteFn := range opts.DeleteRelationFns { - if err := deleteFn(k, v); err != nil { - return err - } - } - return s.bucketDelete(ctx, tx, k) - }, - FilterEntFn: opts.FilterFn, - } - return s.Find(ctx, tx, findOpts) -} - -// DeleteEnt deletes an entity. -func (s *StoreBase) DeleteEnt(ctx context.Context, tx Tx, ent Entity) error { - span, ctx := s.startSpan(ctx) - defer span.Finish() - - encodedID, err := s.EntKey(ctx, ent) - if err != nil { - return err - } - return s.bucketDelete(ctx, tx, encodedID) -} - -type ( - // FindOpts provided a means to search through the bucket. When a filter func - // is provided, that will run against the entity and if the filter responds true, - // will count it towards the number of entries seen and the capture func will be - // run with it provided to it. - FindOpts struct { - Descending bool - Offset int - Limit int - Prefix []byte - CaptureFn FindCaptureFn - FilterEntFn FilterFn - } - - // FindCaptureFn is the mechanism for closing over the key and decoded value pair - // for adding results to the call sites collection. This generic implementation allows - // it to be reused. The returned decodedVal should always satisfy whatever decoding - // of the bucket value was set on the storeo that calls Find. - FindCaptureFn func(key []byte, decodedVal interface{}) error - - // FilterFn will provide an indicator to the Find or Delete calls that the entity that - // was seen is one that is valid and should be either captured or deleted (depending on - // the caller of the filter func). - FilterFn func(key []byte, decodedVal interface{}) bool -) - -// Find provides a mechanism for looking through the bucket via -// the set options. When a prefix is provided, the prefix is used to -// seek the bucket. -func (s *StoreBase) Find(ctx context.Context, tx Tx, opts FindOpts) error { - span, ctx := s.startSpan(ctx) - defer span.Finish() - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - cur, err := s.bucketCursor(ctx, tx) - if err != nil { - return err - } - - iter := &iterator{ - cursor: cur, - descending: opts.Descending, - limit: opts.Limit, - offset: opts.Offset, - prefix: opts.Prefix, - decodeFn: s.DecodeEntFn, - filterFn: opts.FilterEntFn, - } - - for k, v, err := iter.Next(ctx); k != nil; k, v, err = iter.Next(ctx) { - if err != nil { - return err - } - if err := opts.CaptureFn(k, v); err != nil { - return err - } - } - return nil -} - -// FindEnt returns the decoded entity body via the provided entity. -// An example entity should not include a Body, but rather the ID, -// Name, or OrgID. -func (s *StoreBase) FindEnt(ctx context.Context, tx Tx, ent Entity) (interface{}, error) { - span, ctx := s.startSpan(ctx) - defer span.Finish() - - encodedID, err := s.EntKey(ctx, ent) - if err != nil { - // TODO: fix this error up - return nil, err - } - - body, err := s.bucketGet(ctx, tx, encodedID) - if err != nil { - return nil, err - } - - return s.decodeEnt(ctx, body) -} - -type ( - putOption struct { - isNew bool - isUpdate bool - } - - // PutOptionFn provides a hint to the store to make some guarantees about the - // put action. I.e. If it is new, then will validate there is no existing entity - // by the given PK. - PutOptionFn func(o *putOption) error -) - -// PutNew will create an entity that is not does not already exist. Guarantees uniqueness -// by the store's uniqueness guarantees. -func PutNew() PutOptionFn { - return func(o *putOption) error { - o.isNew = true - return nil - } -} - -// PutUpdate will update an entity that must already exist. -func PutUpdate() PutOptionFn { - return func(o *putOption) error { - o.isUpdate = true - return nil - } -} - -// Put will persist the entity. -func (s *StoreBase) Put(ctx context.Context, tx Tx, ent Entity, opts ...PutOptionFn) error { - span, ctx := s.startSpan(ctx) - defer span.Finish() - - var opt putOption - for _, o := range opts { - if err := o(&opt); err != nil { - return &errors2.Error{ - Code: errors2.EConflict, - Err: err, - } - } - } - - if err := s.putValidate(ctx, tx, ent, opt); err != nil { - return err - } - - encodedID, err := s.EntKey(ctx, ent) - if err != nil { - return err - } - - body, err := s.encodeEnt(ctx, ent, s.EncodeEntBodyFn) - if err != nil { - return err - } - - return s.bucketPut(ctx, tx, encodedID, body) -} - -func (s *StoreBase) putValidate(ctx context.Context, tx Tx, ent Entity, opt putOption) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if !opt.isUpdate && !opt.isNew { - return nil - } - - _, err := s.FindEnt(ctx, tx, ent) - if opt.isNew { - if err == nil || errors2.ErrorCode(err) != errors2.ENotFound { - return &errors2.Error{ - Code: errors2.EConflict, - Msg: fmt.Sprintf("%s is not unique", s.Resource), - Err: err, - } - } - return nil - } - return err -} - -func (s *StoreBase) bucket(ctx context.Context, tx Tx) (Bucket, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - bkt, err := tx.Bucket(s.BktName) - if err != nil { - return nil, &errors2.Error{ - Code: errors2.EInternal, - Msg: fmt.Sprintf("unexpected error retrieving bucket %q; Err %v", string(s.BktName), err), - Err: err, - } - } - return bkt, nil -} - -func (s *StoreBase) bucketCursor(ctx context.Context, tx Tx) (Cursor, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - b, err := s.bucket(ctx, tx) - if err != nil { - return nil, err - } - - cur, err := b.Cursor() - if err != nil { - return nil, &errors2.Error{ - Code: errors2.EInternal, - Msg: "failed to retrieve cursor", - Err: err, - } - } - return cur, nil -} - -func (s *StoreBase) bucketDelete(ctx context.Context, tx Tx, key []byte) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - b, err := s.bucket(ctx, tx) - if err != nil { - return err - } - - err = b.Delete(key) - if err == nil { - return nil - } - - iErr := &errors2.Error{ - Code: errors2.EInternal, - Err: err, - } - if IsNotFound(err) { - iErr.Code = errors2.ENotFound - iErr.Msg = fmt.Sprintf("%s does exist for key: %q", s.Resource, string(key)) - } - return iErr -} - -func (s *StoreBase) bucketGet(ctx context.Context, tx Tx, key []byte) ([]byte, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - b, err := s.bucket(ctx, tx) - if err != nil { - return nil, err - } - - body, err := b.Get(key) - if IsNotFound(err) { - return nil, &errors2.Error{ - Code: errors2.ENotFound, - Msg: fmt.Sprintf("%s not found for key %q", s.Resource, string(key)), - } - } - if err != nil { - return nil, &errors2.Error{ - Code: errors2.EInternal, - Err: err, - } - } - - return body, nil -} - -func (s *StoreBase) bucketPut(ctx context.Context, tx Tx, key, body []byte) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - b, err := s.bucket(ctx, tx) - if err != nil { - return err - } - - if err := b.Put(key, body); err != nil { - return &errors2.Error{ - Code: errors2.EInternal, - Err: err, - } - } - return nil -} - -func (s *StoreBase) decodeEnt(ctx context.Context, body []byte) (interface{}, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - _, v, err := s.DecodeEntFn([]byte{}, body) // ignore key here - if err != nil { - return nil, &errors2.Error{ - Code: errors2.EInternal, - Msg: fmt.Sprintf("failed to decode %s body", s.Resource), - Err: err, - } - } - return v, nil -} - -func (s *StoreBase) encodeEnt(ctx context.Context, ent Entity, fn EncodeEntFn) ([]byte, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if fn == nil { - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: fmt.Sprintf("no key was provided for %s", s.Resource), - } - } - - encoded, field, err := fn(ent) - if err != nil { - return encoded, &errors2.Error{ - Code: errors2.EInvalid, - Msg: fmt.Sprintf("provided %s %s is an invalid format", s.Resource, field), - Err: err, - } - } - return encoded, nil -} - -func (s *StoreBase) startSpan(ctx context.Context) (opentracing.Span, context.Context) { - span, ctx := tracing.StartSpanFromContext(ctx) - span.SetTag("Bucket", string(s.BktName)) - span.SetTag("Resource", s.Resource) - return span, ctx -} - -type iterator struct { - cursor Cursor - - counter int - descending bool - limit int - offset int - prefix []byte - - nextFn func() (key, val []byte) - - decodeFn func(key, val []byte) (k []byte, decodedVal interface{}, err error) - filterFn FilterFn -} - -func (i *iterator) Next(ctx context.Context) (key []byte, val interface{}, err error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if i.limit > 0 && i.counter >= i.limit+i.offset { - return nil, nil, nil - } - - var k, vRaw []byte - switch { - case i.nextFn != nil: - k, vRaw = i.nextFn() - case len(i.prefix) > 0: - k, vRaw = i.cursor.Seek(i.prefix) - i.nextFn = i.cursor.Next - case i.descending: - k, vRaw = i.cursor.Last() - i.nextFn = i.cursor.Prev - default: - k, vRaw = i.cursor.First() - i.nextFn = i.cursor.Next - } - - k, decodedVal, err := i.decodeFn(k, vRaw) - for ; ; k, decodedVal, err = i.decodeFn(i.nextFn()) { - if err != nil { - return nil, nil, err - } - if i.isNext(k, decodedVal) { - break - } - } - return k, decodedVal, nil -} - -func (i *iterator) isNext(k []byte, v interface{}) bool { - if len(k) == 0 { - return true - } - - if i.filterFn != nil && !i.filterFn(k, v) { - return false - } - - // increase counter here since the entity is a valid ent - // and counts towards the total the user is looking for - // i.e. limit = 5 => 5 valid ents - // i.e. offset = 5 => return valid ents after seeing 5 valid ents - i.counter++ - - if i.limit > 0 && i.counter >= i.limit+i.offset { - return true - } - if i.offset > 0 && i.counter <= i.offset { - return false - } - return true -} - -func IsErrUnexpectedDecodeVal(ok bool) error { - if ok { - return nil - } - return errors.New("unexpected value decoded") -} diff --git a/kv/store_base_test.go b/kv/store_base_test.go deleted file mode 100644 index 3c757e21dab..00000000000 --- a/kv/store_base_test.go +++ /dev/null @@ -1,485 +0,0 @@ -package kv_test - -import ( - "context" - "encoding/json" - "fmt" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration" - itesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestStoreBase(t *testing.T) { - newStoreBase := func(t *testing.T, bktSuffix string, encKeyFn, encBodyFn kv.EncodeEntFn, decFn kv.DecodeBucketValFn, decToEntFn kv.ConvertValToEntFn) (*kv.StoreBase, func(), kv.Store) { - t.Helper() - - svc, done := itesting.NewTestBoltStore(t) - - bucket := []byte("foo_" + bktSuffix) - store := kv.NewStoreBase("foo", bucket, encKeyFn, encBodyFn, decFn, decToEntFn) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - migrationName := fmt.Sprintf("create bucket %q", string(bucket)) - migration.CreateBuckets(migrationName, bucket).Up(ctx, svc) - - return store, done, svc - } - - newFooStoreBase := func(t *testing.T, bktSuffix string) (*kv.StoreBase, func(), kv.Store) { - return newStoreBase(t, bktSuffix, kv.EncIDKey, kv.EncBodyJSON, decJSONFooFn, decFooEntFn) - } - - t.Run("Put", func(t *testing.T) { - t.Run("basic", func(t *testing.T) { - base, done, kvStore := newFooStoreBase(t, "put") - defer done() - - testPutBase(t, kvStore, base, base.BktName) - }) - - t.Run("new", func(t *testing.T) { - base, done, kvStore := newFooStoreBase(t, "put") - defer done() - - expected := newFooEnt(3, 33, "name3") - update(t, kvStore, func(tx kv.Tx) error { - return base.Put(context.TODO(), tx, expected, kv.PutNew()) - }) - - var actual interface{} - view(t, kvStore, func(tx kv.Tx) error { - f, err := base.FindEnt(context.TODO(), tx, kv.Entity{PK: expected.PK}) - actual = f - return err - }) - - assert.Equal(t, expected.Body, actual) - }) - - t.Run("update", func(t *testing.T) { - base, done, kvStore := newFooStoreBase(t, "put") - defer done() - - expected := testPutBase(t, kvStore, base, base.BktName) - - updateEnt := newFooEnt(expected.ID, expected.OrgID, "new name") - update(t, kvStore, func(tx kv.Tx) error { - return base.Put(context.TODO(), tx, updateEnt, kv.PutUpdate()) - }) - - var actual interface{} - view(t, kvStore, func(tx kv.Tx) error { - f, err := base.FindEnt(context.TODO(), tx, kv.Entity{PK: kv.EncID(expected.ID)}) - actual = f - return err - }) - - expected.Name = "new name" - assert.Equal(t, expected, actual) - }) - - t.Run("error cases", func(t *testing.T) { - t.Run("new entity conflicts with existing", func(t *testing.T) { - base, done, kvStore := newFooStoreBase(t, "put") - defer done() - - expected := testPutBase(t, kvStore, base, base.BktName) - - err := kvStore.Update(context.TODO(), func(tx kv.Tx) error { - entCopy := newFooEnt(expected.ID, expected.OrgID, expected.Name) - return base.Put(context.TODO(), tx, entCopy, kv.PutNew()) - }) - require.Error(t, err) - assert.Equal(t, errors.EConflict, errors.ErrorCode(err)) - }) - }) - - t.Run("updating entity that does not exist", func(t *testing.T) { - base, done, kvStore := newFooStoreBase(t, "put") - defer done() - - expected := testPutBase(t, kvStore, base, base.BktName) - - err := kvStore.Update(context.TODO(), func(tx kv.Tx) error { - // ent by id does not exist - entCopy := newFooEnt(333, expected.OrgID, "name1") - return base.Put(context.TODO(), tx, entCopy, kv.PutUpdate()) - }) - require.Error(t, err) - assert.Equal(t, errors.ENotFound, errors.ErrorCode(err)) - }) - }) - - t.Run("DeleteEnt", func(t *testing.T) { - base, done, kvStore := newFooStoreBase(t, "delete_ent") - defer done() - - testDeleteEntBase(t, kvStore, base) - }) - - t.Run("Delete", func(t *testing.T) { - testDeleteBase(t, func(t *testing.T, suffix string) (storeBase, func(), kv.Store) { - return newFooStoreBase(t, suffix) - }) - }) - - t.Run("FindEnt", func(t *testing.T) { - base, done, kvStore := newFooStoreBase(t, "find_ent") - defer done() - - testFindEnt(t, kvStore, base) - }) - - t.Run("Find", func(t *testing.T) { - testFind(t, func(t *testing.T, suffix string) (storeBase, func(), kv.Store) { - return newFooStoreBase(t, suffix) - }) - }) -} - -func testPutBase(t *testing.T, kvStore kv.Store, base storeBase, bktName []byte) foo { - t.Helper() - - expected := foo{ - ID: 1, - OrgID: 9000, - Name: "foo_1", - } - - update(t, kvStore, func(tx kv.Tx) error { - return base.Put(context.TODO(), tx, kv.Entity{ - PK: kv.EncID(expected.ID), - UniqueKey: kv.Encode(kv.EncID(expected.OrgID), kv.EncString(expected.Name)), - Body: expected, - }) - }) - - var actual foo - decodeJSON(t, getEntRaw(t, kvStore, bktName, encodeID(t, expected.ID)), &actual) - - assert.Equal(t, expected, actual) - - return expected -} - -func testDeleteEntBase(t *testing.T, kvStore kv.Store, base storeBase) kv.Entity { - t.Helper() - - expected := newFooEnt(1, 9000, "foo_1") - seedEnts(t, kvStore, base, expected) - - update(t, kvStore, func(tx kv.Tx) error { - return base.DeleteEnt(context.TODO(), tx, kv.Entity{PK: expected.PK}) - }) - - err := kvStore.View(context.TODO(), func(tx kv.Tx) error { - _, err := base.FindEnt(context.TODO(), tx, kv.Entity{PK: expected.PK}) - return err - }) - isNotFoundErr(t, err) - return expected -} - -func testDeleteBase(t *testing.T, fn func(t *testing.T, suffix string) (storeBase, func(), kv.Store), assertFns ...func(*testing.T, kv.Store, storeBase, []foo)) { - expectedEnts := []kv.Entity{ - newFooEnt(1, 9000, "foo_0"), - newFooEnt(2, 9000, "foo_1"), - newFooEnt(3, 9003, "foo_2"), - newFooEnt(4, 9004, "foo_3"), - } - - tests := []struct { - name string - opts kv.DeleteOpts - expected []interface{} - }{ - { - name: "delete all", - opts: kv.DeleteOpts{ - FilterFn: func(k []byte, v interface{}) bool { - return true - }, - }, - }, - { - name: "delete IDs less than 4", - opts: kv.DeleteOpts{ - FilterFn: func(k []byte, v interface{}) bool { - if f, ok := v.(foo); ok { - return f.ID < 4 - } - return true - }, - }, - expected: toIfaces(expectedEnts[3]), - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - t.Helper() - - base, done, kvStore := fn(t, "delete") - defer done() - - seedEnts(t, kvStore, base, expectedEnts...) - - update(t, kvStore, func(tx kv.Tx) error { - return base.Delete(context.TODO(), tx, tt.opts) - }) - - var actuals []interface{} - view(t, kvStore, func(tx kv.Tx) error { - return base.Find(context.TODO(), tx, kv.FindOpts{ - CaptureFn: func(key []byte, decodedVal interface{}) error { - actuals = append(actuals, decodedVal) - return nil - }, - }) - }) - - assert.Equal(t, tt.expected, actuals) - - var entsLeft []foo - for _, expected := range tt.expected { - ent, ok := expected.(foo) - require.Truef(t, ok, "got: %#v", expected) - entsLeft = append(entsLeft, ent) - } - - for _, assertFn := range assertFns { - assertFn(t, kvStore, base, entsLeft) - } - } - t.Run(tt.name, fn) - } -} - -func testFindEnt(t *testing.T, kvStore kv.Store, base storeBase) kv.Entity { - t.Helper() - - expected := newFooEnt(1, 9000, "foo_1") - seedEnts(t, kvStore, base, expected) - - var actual interface{} - view(t, kvStore, func(tx kv.Tx) error { - f, err := base.FindEnt(context.TODO(), tx, kv.Entity{PK: expected.PK}) - actual = f - return err - }) - - assert.Equal(t, expected.Body, actual) - - return expected -} - -func testFind(t *testing.T, fn func(t *testing.T, suffix string) (storeBase, func(), kv.Store)) { - t.Helper() - - expectedEnts := []kv.Entity{ - newFooEnt(1, 9000, "foo_0"), - newFooEnt(2000, 9000, "foo_1"), - newFooEnt(3000000, 9003, "foo_2"), - newFooEnt(4000000000, 9004, "foo_3"), - } - - tests := []struct { - name string - opts kv.FindOpts - expected []interface{} - }{ - { - name: "no options", - expected: toIfaces(expectedEnts...), - }, - { - name: "with order descending", - opts: kv.FindOpts{Descending: true}, - expected: reverseSlc(toIfaces(expectedEnts...)), - }, - { - name: "with limit", - opts: kv.FindOpts{Limit: 1}, - expected: toIfaces(expectedEnts[0]), - }, - { - name: "with offset", - opts: kv.FindOpts{Offset: 1}, - expected: toIfaces(expectedEnts[1:]...), - }, - { - name: "with offset and limit", - opts: kv.FindOpts{ - Limit: 1, - Offset: 1, - }, - expected: toIfaces(expectedEnts[1]), - }, - { - name: "with descending, offset, and limit", - opts: kv.FindOpts{ - Descending: true, - Limit: 1, - Offset: 1, - }, - expected: toIfaces(expectedEnts[2]), - }, - { - name: "with id prefix", - opts: kv.FindOpts{ - Prefix: encodeID(t, 3000000)[:platform.IDLength-5], - }, - expected: toIfaces(expectedEnts[2], expectedEnts[3]), - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - base, done, kvStore := fn(t, "find") - defer done() - - seedEnts(t, kvStore, base, expectedEnts...) - - var actuals []interface{} - tt.opts.CaptureFn = func(key []byte, decodedVal interface{}) error { - actuals = append(actuals, decodedVal) - return nil - } - - view(t, kvStore, func(tx kv.Tx) error { - return base.Find(context.TODO(), tx, tt.opts) - }) - - assert.Equal(t, tt.expected, actuals) - } - t.Run(tt.name, fn) - } -} - -type foo struct { - ID platform.ID - OrgID platform.ID - - Name string -} - -func decodeJSON(t *testing.T, b []byte, v interface{}) { - t.Helper() - require.NoError(t, json.Unmarshal(b, &v)) -} - -type storeBase interface { - Delete(ctx context.Context, tx kv.Tx, opts kv.DeleteOpts) error - DeleteEnt(ctx context.Context, tx kv.Tx, ent kv.Entity) error - FindEnt(ctx context.Context, tx kv.Tx, ent kv.Entity) (interface{}, error) - Find(ctx context.Context, tx kv.Tx, opts kv.FindOpts) error - Put(ctx context.Context, tx kv.Tx, ent kv.Entity, opts ...kv.PutOptionFn) error -} - -func seedEnts(t *testing.T, kvStore kv.Store, store storeBase, ents ...kv.Entity) { - t.Helper() - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - for _, ent := range ents { - update(t, kvStore, func(tx kv.Tx) error { return store.Put(ctx, tx, ent) }) - } -} - -func update(t *testing.T, kvStore kv.Store, fn func(tx kv.Tx) error) { - t.Helper() - - require.NoError(t, kvStore.Update(context.TODO(), fn)) -} - -func view(t *testing.T, kvStore kv.Store, fn func(tx kv.Tx) error) { - t.Helper() - require.NoError(t, kvStore.View(context.TODO(), fn)) -} - -func getEntRaw(t *testing.T, kvStore kv.Store, bktName []byte, key []byte) []byte { - t.Helper() - - var actualRaw []byte - err := kvStore.View(context.TODO(), func(tx kv.Tx) error { - b, err := tx.Bucket(bktName) - require.NoError(t, err) - - actualRaw, err = b.Get(key) - return err - }) - require.NoError(t, err) - return actualRaw -} - -func encodeID(t *testing.T, id platform.ID) []byte { - t.Helper() - - b, err := id.Encode() - require.NoError(t, err) - return b -} - -func decJSONFooFn(key, val []byte) ([]byte, interface{}, error) { - var f foo - if err := json.Unmarshal(val, &f); err != nil { - return nil, nil, err - } - return key, f, nil -} - -func decFooEntFn(k []byte, v interface{}) (kv.Entity, error) { - f, ok := v.(foo) - if !ok { - return kv.Entity{}, fmt.Errorf("invalid entry: %#v", v) - } - return kv.Entity{ - PK: kv.EncID(f.ID), - UniqueKey: kv.Encode(kv.EncID(f.OrgID), kv.EncString(f.Name)), - Body: f, - }, nil -} - -func newFooEnt(id, orgID platform.ID, name string) kv.Entity { - f := foo{ID: id, Name: name, OrgID: orgID} - return kv.Entity{ - PK: kv.EncID(f.ID), - UniqueKey: kv.Encode(kv.EncID(f.OrgID), kv.EncString(f.Name)), - Body: f, - } -} - -func isNotFoundErr(t *testing.T, err error) { - t.Helper() - - iErr, ok := err.(*errors.Error) - if !ok { - require.FailNowf(t, "expected an *influxdb.Error type", "got: %#v", err) - } - assert.Equal(t, errors.ENotFound, iErr.Code) -} - -func toIfaces(ents ...kv.Entity) []interface{} { - var actuals []interface{} - for _, ent := range ents { - actuals = append(actuals, ent.Body) - } - return actuals -} - -func reverseSlc(slc []interface{}) []interface{} { - for i, j := 0, len(slc)-1; i < j; i, j = i+1, j-1 { - slc[i], slc[j] = slc[j], slc[i] - } - return slc -} diff --git a/kv/store_index.go b/kv/store_index.go deleted file mode 100644 index e4f342448fe..00000000000 --- a/kv/store_index.go +++ /dev/null @@ -1,249 +0,0 @@ -package kv - -import ( - "bytes" - "context" - "errors" - "fmt" - - ierrors "github.com/influxdata/influxdb/v2/kit/errors" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" -) - -// IndexStore provides a entity store that uses an index lookup. -// The index store manages deleting and creating indexes for the -// caller. The index is automatically used if the FindEnt entity -// entity does not have the primary key. -type IndexStore struct { - Resource string - EntStore *StoreBase - IndexStore *StoreBase -} - -// Delete deletes entities and associated indexes. -func (s *IndexStore) Delete(ctx context.Context, tx Tx, opts DeleteOpts) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - deleteIndexedRelationFn := func(k []byte, v interface{}) error { - ent, err := s.EntStore.ConvertValToEntFn(k, v) - if err != nil { - return err - } - return s.IndexStore.DeleteEnt(ctx, tx, ent) - } - opts.DeleteRelationFns = append(opts.DeleteRelationFns, deleteIndexedRelationFn) - return s.EntStore.Delete(ctx, tx, opts) -} - -// DeleteEnt deletes an entity and associated index. -func (s *IndexStore) DeleteEnt(ctx context.Context, tx Tx, ent Entity) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - existing, err := s.FindEnt(ctx, tx, ent) - if err != nil { - return err - } - - if err := s.EntStore.DeleteEnt(ctx, tx, ent); err != nil { - return err - } - - decodedEnt, err := s.EntStore.ConvertValToEntFn(nil, existing) - if err != nil { - return err - } - - return s.IndexStore.DeleteEnt(ctx, tx, decodedEnt) -} - -// Find provides a mechanism for looking through the bucket via -// the set options. When a prefix is provided, it will be used within -// the entity store. If you would like to search the index store, then -// you can by calling the index store directly. -func (s *IndexStore) Find(ctx context.Context, tx Tx, opts FindOpts) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - return s.EntStore.Find(ctx, tx, opts) -} - -// FindEnt returns the decoded entity body via teh provided entity. -// An example entity should not include a Body, but rather the ID, -// Name, or OrgID. If no ID is provided, then the algorithm assumes -// you are looking up the entity by the index. -func (s *IndexStore) FindEnt(ctx context.Context, tx Tx, ent Entity) (interface{}, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - _, err := s.EntStore.EntKey(ctx, ent) - if err != nil { - if _, idxErr := s.IndexStore.EntKey(ctx, ent); idxErr != nil { - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "no key was provided for " + s.Resource, - } - } - } - if err != nil { - return s.findByIndex(ctx, tx, ent) - } - return s.EntStore.FindEnt(ctx, tx, ent) -} - -func (s *IndexStore) findByIndex(ctx context.Context, tx Tx, ent Entity) (interface{}, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - idxEncodedID, err := s.IndexStore.FindEnt(ctx, tx, ent) - if err != nil { - return nil, err - } - - indexKey, err := s.IndexStore.EntKey(ctx, ent) - if err != nil { - return nil, err - } - - indexEnt, err := s.IndexStore.ConvertValToEntFn(indexKey, idxEncodedID) - if err != nil { - return nil, err - } - - return s.EntStore.FindEnt(ctx, tx, indexEnt) -} - -// Put will persist the entity into both the entity store and the index store. -func (s *IndexStore) Put(ctx context.Context, tx Tx, ent Entity, opts ...PutOptionFn) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var opt putOption - for _, o := range opts { - if err := o(&opt); err != nil { - return &errors2.Error{ - Code: errors2.EConflict, - Err: err, - } - } - } - - if err := s.putValidate(ctx, tx, ent, opt); err != nil { - return err - } - - if err := s.IndexStore.Put(ctx, tx, ent); err != nil { - return err - } - - return s.EntStore.Put(ctx, tx, ent) -} - -func (s *IndexStore) putValidate(ctx context.Context, tx Tx, ent Entity, opt putOption) error { - if opt.isNew { - return s.validNew(ctx, tx, ent) - } - if opt.isUpdate { - return s.validUpdate(ctx, tx, ent) - } - return nil -} - -func (s *IndexStore) validNew(ctx context.Context, tx Tx, ent Entity) error { - _, err := s.IndexStore.FindEnt(ctx, tx, ent) - if err == nil || errors2.ErrorCode(err) != errors2.ENotFound { - key, _ := s.IndexStore.EntKey(ctx, ent) - return &errors2.Error{ - Code: errors2.EConflict, - Msg: fmt.Sprintf("%s is not unique for key %s", s.Resource, string(key)), - Err: err, - } - } - - _, err = s.EntStore.FindEnt(ctx, tx, ent) - if err == nil || errors2.ErrorCode(err) != errors2.ENotFound { - return &errors2.Error{Code: errors2.EConflict, Err: err} - } - return nil -} - -func (s *IndexStore) validUpdate(ctx context.Context, tx Tx, ent Entity) (e error) { - // first check to make sure the existing entity exists in the ent store - existingVal, err := s.EntStore.FindEnt(ctx, tx, Entity{PK: ent.PK}) - if err != nil { - return err - } - - defer func() { - if e != nil { - return - } - // we need to cleanup the unique key entry when this is deemed - // a valid update - pk, err := ent.PK() - if err != nil { - e = ierrors.Wrap(err, "failed to encode PK") - return - } - existingEnt, err := s.EntStore.ConvertValToEntFn(pk, existingVal) - if err != nil { - e = ierrors.Wrap(err, "failed to convert value") - return - } - e = s.IndexStore.DeleteEnt(ctx, tx, existingEnt) - }() - - idxVal, err := s.IndexStore.FindEnt(ctx, tx, ent) - if err != nil { - if errors2.ErrorCode(err) == errors2.ENotFound { - return nil - } - return err - } - - idxKey, err := s.IndexStore.EntKey(ctx, ent) - if err != nil { - return err - } - - indexEnt, err := s.IndexStore.ConvertValToEntFn(idxKey, idxVal) - if err != nil { - return err - } - - if err := sameKeys(ent.PK, indexEnt.PK); err != nil { - if _, err := s.EntStore.FindEnt(ctx, tx, ent); errors2.ErrorCode(err) == errors2.ENotFound { - key, _ := ent.PK() - return &errors2.Error{ - Code: errors2.ENotFound, - Msg: fmt.Sprintf("%s does not exist for key %s", s.Resource, string(key)), - Err: err, - } - } - key, _ := indexEnt.UniqueKey() - return &errors2.Error{ - Code: errors2.EConflict, - Msg: fmt.Sprintf("%s entity update conflicts with an existing entity for key %s", s.Resource, string(key)), - } - } - - return nil -} - -func sameKeys(key1, key2 EncodeFn) error { - pk1, err := key1() - if err != nil { - return err - } - pk2, err := key2() - if err != nil { - return err - } - - if !bytes.Equal(pk1, pk2) { - return errors.New("keys differ") - } - return nil -} diff --git a/kv/store_index_test.go b/kv/store_index_test.go deleted file mode 100644 index f65af057f6a..00000000000 --- a/kv/store_index_test.go +++ /dev/null @@ -1,301 +0,0 @@ -package kv_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration" - itesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestIndexStore(t *testing.T) { - newStoreBase := func(resource string, bktName []byte, encKeyFn, encBodyFn kv.EncodeEntFn, decFn kv.DecodeBucketValFn, decToEntFn kv.ConvertValToEntFn) *kv.StoreBase { - return kv.NewStoreBase(resource, bktName, encKeyFn, encBodyFn, decFn, decToEntFn) - } - - newFooIndexStore := func(t *testing.T, bktSuffix string) (*kv.IndexStore, func(), kv.Store) { - t.Helper() - - kvStoreStore, done := itesting.NewTestBoltStore(t) - - const resource = "foo" - - bucketName := []byte("foo_ent_" + bktSuffix) - indexBucketName := []byte("foo_idx+" + bktSuffix) - - ctx := context.Background() - if err := migration.CreateBuckets("add foo buckets", bucketName, indexBucketName).Up(ctx, kvStoreStore); err != nil { - t.Fatal(err) - } - - indexStore := &kv.IndexStore{ - Resource: resource, - EntStore: newStoreBase(resource, bucketName, kv.EncIDKey, kv.EncBodyJSON, decJSONFooFn, decFooEntFn), - IndexStore: kv.NewOrgNameKeyStore(resource, indexBucketName, false), - } - - return indexStore, done, kvStoreStore - } - - t.Run("Put", func(t *testing.T) { - t.Run("basic", func(t *testing.T) { - indexStore, done, kvStore := newFooIndexStore(t, "put") - defer done() - - expected := testPutBase(t, kvStore, indexStore, indexStore.EntStore.BktName) - - key, err := indexStore.IndexStore.EntKey(context.TODO(), kv.Entity{ - UniqueKey: kv.Encode(kv.EncID(expected.OrgID), kv.EncString(expected.Name)), - }) - require.NoError(t, err) - - rawIndex := getEntRaw(t, kvStore, indexStore.IndexStore.BktName, key) - assert.Equal(t, encodeID(t, expected.ID), rawIndex) - }) - - t.Run("new entity", func(t *testing.T) { - indexStore, done, kvStore := newFooIndexStore(t, "put") - defer done() - - expected := foo{ID: 3, OrgID: 33, Name: "333"} - update(t, kvStore, func(tx kv.Tx) error { - ent := newFooEnt(expected.ID, expected.OrgID, expected.Name) - return indexStore.Put(context.TODO(), tx, ent, kv.PutNew()) - }) - - key, err := indexStore.IndexStore.EntKey(context.TODO(), kv.Entity{ - UniqueKey: kv.Encode(kv.EncID(expected.OrgID), kv.EncString(expected.Name)), - }) - require.NoError(t, err) - - rawIndex := getEntRaw(t, kvStore, indexStore.IndexStore.BktName, key) - assert.Equal(t, encodeID(t, expected.ID), rawIndex) - }) - - t.Run("updating entity that doesn't exist returns not found error", func(t *testing.T) { - indexStore, done, kvStore := newFooIndexStore(t, "put") - defer done() - - expected := testPutBase(t, kvStore, indexStore, indexStore.EntStore.BktName) - - err := kvStore.Update(context.Background(), func(tx kv.Tx) error { - ent := newFooEnt(33333, expected.OrgID, "safe name") - return indexStore.Put(context.TODO(), tx, ent, kv.PutUpdate()) - }) - require.Error(t, err) - assert.Equal(t, errors.ENotFound, errors.ErrorCode(err)) - }) - - t.Run("updating entity with no naming collision succeeds", func(t *testing.T) { - indexStore, done, kvStore := newFooIndexStore(t, "put") - defer done() - - expected := testPutBase(t, kvStore, indexStore, indexStore.EntStore.BktName) - - update(t, kvStore, func(tx kv.Tx) error { - entCopy := newFooEnt(expected.ID, expected.OrgID, "safe name") - return indexStore.Put(context.TODO(), tx, entCopy, kv.PutUpdate()) - }) - - err := kvStore.View(context.TODO(), func(tx kv.Tx) error { - _, err := indexStore.FindEnt(context.TODO(), tx, kv.Entity{ - PK: kv.EncID(expected.ID), - UniqueKey: kv.Encode(kv.EncID(expected.OrgID), kv.EncString(expected.Name)), - }) - return err - }) - require.NoError(t, err) - }) - - t.Run("updating an existing entity to a new unique identifier should delete the existing unique key", func(t *testing.T) { - indexStore, done, kvStore := newFooIndexStore(t, "put") - defer done() - - expected := testPutBase(t, kvStore, indexStore, indexStore.EntStore.BktName) - - update(t, kvStore, func(tx kv.Tx) error { - entCopy := newFooEnt(expected.ID, expected.OrgID, "safe name") - return indexStore.Put(context.TODO(), tx, entCopy, kv.PutUpdate()) - }) - - update(t, kvStore, func(tx kv.Tx) error { - ent := newFooEnt(33, expected.OrgID, expected.Name) - return indexStore.Put(context.TODO(), tx, ent, kv.PutNew()) - }) - }) - - t.Run("error cases", func(t *testing.T) { - t.Run("new entity conflicts with existing", func(t *testing.T) { - indexStore, done, kvStore := newFooIndexStore(t, "put") - defer done() - - expected := testPutBase(t, kvStore, indexStore, indexStore.EntStore.BktName) - - err := kvStore.Update(context.TODO(), func(tx kv.Tx) error { - entCopy := newFooEnt(expected.ID, expected.OrgID, expected.Name) - return indexStore.Put(context.TODO(), tx, entCopy, kv.PutNew()) - }) - require.Error(t, err) - assert.Equal(t, errors.EConflict, errors.ErrorCode(err)) - }) - - t.Run("updating entity that does not exist", func(t *testing.T) { - indexStore, done, kvStore := newFooIndexStore(t, "put") - defer done() - - expected := testPutBase(t, kvStore, indexStore, indexStore.EntStore.BktName) - - update(t, kvStore, func(tx kv.Tx) error { - ent := newFooEnt(9000, expected.OrgID, "name1") - return indexStore.Put(context.TODO(), tx, ent, kv.PutNew()) - }) - - err := kvStore.Update(context.TODO(), func(tx kv.Tx) error { - // ent by id does not exist - entCopy := newFooEnt(333, expected.OrgID, "name1") - return indexStore.Put(context.TODO(), tx, entCopy, kv.PutUpdate()) - }) - require.Error(t, err) - assert.Equal(t, errors.ENotFound, errors.ErrorCode(err), "got: "+err.Error()) - }) - - t.Run("updating entity that does collides with an existing entity", func(t *testing.T) { - indexStore, done, kvStore := newFooIndexStore(t, "put") - defer done() - - expected := testPutBase(t, kvStore, indexStore, indexStore.EntStore.BktName) - - update(t, kvStore, func(tx kv.Tx) error { - ent := newFooEnt(9000, expected.OrgID, "name1") - return indexStore.Put(context.TODO(), tx, ent, kv.PutNew()) - }) - - err := kvStore.Update(context.TODO(), func(tx kv.Tx) error { - // name conflicts - entCopy := newFooEnt(expected.ID, expected.OrgID, "name1") - return indexStore.Put(context.TODO(), tx, entCopy, kv.PutUpdate()) - }) - require.Error(t, err) - assert.Equal(t, errors.EConflict, errors.ErrorCode(err)) - assert.Contains(t, err.Error(), "update conflicts") - }) - }) - }) - - t.Run("DeleteEnt", func(t *testing.T) { - indexStore, done, kvStore := newFooIndexStore(t, "delete_ent") - defer done() - - expected := testDeleteEntBase(t, kvStore, indexStore) - - err := kvStore.View(context.TODO(), func(tx kv.Tx) error { - _, err := indexStore.IndexStore.FindEnt(context.TODO(), tx, kv.Entity{ - UniqueKey: expected.UniqueKey, - }) - return err - }) - isNotFoundErr(t, err) - }) - - t.Run("Delete", func(t *testing.T) { - fn := func(t *testing.T, suffix string) (storeBase, func(), kv.Store) { - return newFooIndexStore(t, suffix) - } - - testDeleteBase(t, fn, func(t *testing.T, kvStore kv.Store, base storeBase, foosLeft []foo) { - var expectedIndexIDs []interface{} - for _, ent := range foosLeft { - expectedIndexIDs = append(expectedIndexIDs, ent.ID) - } - - indexStore, ok := base.(*kv.IndexStore) - require.True(t, ok) - - // next to verify they are not within the index store - var actualIDs []interface{} - view(t, kvStore, func(tx kv.Tx) error { - return indexStore.IndexStore.Find(context.TODO(), tx, kv.FindOpts{ - CaptureFn: func(key []byte, decodedVal interface{}) error { - actualIDs = append(actualIDs, decodedVal) - return nil - }, - }) - }) - - assert.Equal(t, expectedIndexIDs, actualIDs) - }) - }) - - t.Run("FindEnt", func(t *testing.T) { - t.Run("by ID", func(t *testing.T) { - base, done, kvStoreStore := newFooIndexStore(t, "find_ent") - defer done() - testFindEnt(t, kvStoreStore, base) - }) - - t.Run("find by name", func(t *testing.T) { - base, done, kvStore := newFooIndexStore(t, "find_ent") - defer done() - - expected := newFooEnt(1, 9000, "foo_1") - seedEnts(t, kvStore, base, expected) - - var actual interface{} - view(t, kvStore, func(tx kv.Tx) error { - f, err := base.FindEnt(context.TODO(), tx, kv.Entity{ - UniqueKey: expected.UniqueKey, - }) - actual = f - return err - }) - - assert.Equal(t, expected.Body, actual) - }) - }) - - t.Run("Find", func(t *testing.T) { - t.Run("base", func(t *testing.T) { - fn := func(t *testing.T, suffix string) (storeBase, func(), kv.Store) { - return newFooIndexStore(t, suffix) - } - - testFind(t, fn) - }) - - t.Run("with entity filter", func(t *testing.T) { - base, done, kvStore := newFooIndexStore(t, "find_index_search") - defer done() - - expectedEnts := []kv.Entity{ - newFooEnt(1, 9000, "foo_0"), - newFooEnt(2, 9001, "foo_1"), - newFooEnt(3, 9003, "foo_2"), - } - - seedEnts(t, kvStore, base, expectedEnts...) - - var actuals []interface{} - view(t, kvStore, func(tx kv.Tx) error { - return base.Find(context.TODO(), tx, kv.FindOpts{ - FilterEntFn: func(key []byte, decodedVal interface{}) bool { - return decodedVal.(foo).ID < 3 - }, - CaptureFn: func(key []byte, decodedVal interface{}) error { - actuals = append(actuals, decodedVal) - return nil - }, - }) - }) - - expected := []interface{}{ - expectedEnts[0].Body, - expectedEnts[1].Body, - } - assert.Equal(t, expected, actuals) - }) - }) -} diff --git a/kv/task.go b/kv/task.go deleted file mode 100644 index 924ed705ef5..00000000000 --- a/kv/task.go +++ /dev/null @@ -1,1646 +0,0 @@ -package kv - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "strings" - "time" - - "github.com/influxdata/influxdb/v2" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/resource" - "github.com/influxdata/influxdb/v2/task/options" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -// Task Storage Schema -// taskBucket: -// : task data storage -// taskRunBucket: -// /: run data storage -// /manualRuns: list of runs to run manually -// /latestCompleted: run data for the latest completed run of a task -// taskIndexBucket -// /: index for tasks by org - -// We may want to add a / index to allow us to look up tasks by task name. - -var ( - taskBucket = []byte("tasksv1") - taskRunBucket = []byte("taskRunsv1") - taskIndexBucket = []byte("taskIndexsv1") -) - -var _ taskmodel.TaskService = (*Service)(nil) - -type matchableTask interface { - GetID() platform.ID - GetOrgID() platform.ID - GetOwnerID() platform.ID - GetType() string - GetName() string - GetStatus() string - ToInfluxDB() *taskmodel.Task -} - -type basicKvTask struct { - ID platform.ID `json:"id"` - Type string `json:"type,omitempty"` - OrganizationID platform.ID `json:"orgID"` - OwnerID platform.ID `json:"ownerID"` - Name string `json:"name"` - Description string `json:"description,omitempty"` - Status string `json:"status"` - Every string `json:"every,omitempty"` - Cron string `json:"cron,omitempty"` - LastRunStatus string `json:"lastRunStatus,omitempty"` - LastRunError string `json:"lastRunError,omitempty"` - Offset influxdb.Duration `json:"offset,omitempty"` - LatestCompleted time.Time `json:"latestCompleted,omitempty"` - LatestScheduled time.Time `json:"latestScheduled,omitempty"` - LatestSuccess time.Time `json:"latestSuccess,omitempty"` - LatestFailure time.Time `json:"latestFailure,omitempty"` -} - -func (kv basicKvTask) GetID() platform.ID { - return kv.ID -} - -func (kv basicKvTask) GetOrgID() platform.ID { - return kv.OrganizationID -} - -func (kv basicKvTask) GetOwnerID() platform.ID { - return kv.OwnerID -} - -func (kv basicKvTask) GetType() string { - return kv.Type -} - -func (kv basicKvTask) GetName() string { - return kv.Name -} - -func (kv basicKvTask) GetStatus() string { - return kv.Status -} - -func (kv basicKvTask) ToInfluxDB() *taskmodel.Task { - return &taskmodel.Task{ - ID: kv.ID, - Type: kv.Type, - OrganizationID: kv.OrganizationID, - OwnerID: kv.OwnerID, - Name: kv.Name, - Description: kv.Description, - Status: kv.Status, - Every: kv.Every, - Cron: kv.Cron, - LastRunStatus: kv.LastRunStatus, - LastRunError: kv.LastRunError, - Offset: kv.Offset.Duration, - LatestCompleted: kv.LatestCompleted, - LatestScheduled: kv.LatestScheduled, - LatestSuccess: kv.LatestSuccess, - LatestFailure: kv.LatestFailure, - } -} - -type kvTask struct { - basicKvTask - Organization string `json:"org"` - Flux string `json:"flux"` - CreatedAt time.Time `json:"createdAt,omitempty"` - UpdatedAt time.Time `json:"updatedAt,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` -} - -func (kv kvTask) ToInfluxDB() *taskmodel.Task { - res := kv.basicKvTask.ToInfluxDB() - res.Organization = kv.Organization - res.Flux = kv.Flux - res.CreatedAt = kv.CreatedAt - res.UpdatedAt = kv.UpdatedAt - res.Metadata = kv.Metadata - return res -} - -// FindTaskByID returns a single task -func (s *Service) FindTaskByID(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - var t *taskmodel.Task - err := s.kv.View(ctx, func(tx Tx) error { - task, err := s.findTaskByID(ctx, tx, id, false) - if err != nil { - return err - } - t = task.ToInfluxDB() - return nil - }) - if err != nil { - return nil, err - } - - return t, nil -} - -// findTaskByID is an internal method used to do any action with tasks internally -// that do not require authorization. -func (s *Service) findTaskByID(ctx context.Context, tx Tx, id platform.ID, basicOnly bool) (matchableTask, error) { - taskKey, err := taskKey(id) - if err != nil { - return nil, err - } - - b, err := tx.Bucket(taskBucket) - if err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - v, err := b.Get(taskKey) - if IsNotFound(err) { - return nil, taskmodel.ErrTaskNotFound - } - if err != nil { - return nil, err - } - var t matchableTask - if basicOnly { - t = &basicKvTask{} - } else { - t = &kvTask{} - } - if err := json.Unmarshal(v, t); err != nil { - return nil, taskmodel.ErrInternalTaskServiceError(err) - } - - return t, nil -} - -// FindTasks returns a list of tasks that match a filter (limit 100) and the total count -// of matching tasks. -func (s *Service) FindTasks(ctx context.Context, filter taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { - if filter.Organization != "" { - org, err := s.orgs.FindOrganization(ctx, influxdb.OrganizationFilter{ - Name: &filter.Organization, - }) - if err != nil { - return nil, 0, err - } - - filter.OrganizationID = &org.ID - } - - var ts []*taskmodel.Task - err := s.kv.View(ctx, func(tx Tx) error { - tasks, _, err := s.findTasks(ctx, tx, filter) - if err != nil { - return err - } - ts = tasks - return nil - }) - if err != nil { - return nil, 0, err - } - - return ts, len(ts), nil -} - -func (s *Service) findTasks(ctx context.Context, tx Tx, filter taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { - // complain about limits - if filter.Limit < 0 { - return nil, 0, taskmodel.ErrPageSizeTooSmall - } - if filter.Limit > taskmodel.TaskMaxPageSize { - return nil, 0, taskmodel.ErrPageSizeTooLarge - } - if filter.Limit == 0 { - filter.Limit = taskmodel.TaskDefaultPageSize - } - - // if no user or organization is passed, assume contexts auth is the user we are looking for. - // it is possible for a internal system to call this with no auth so we shouldnt fail if no auth is found. - if filter.OrganizationID == nil && filter.User == nil { - userAuth, err := icontext.GetAuthorizer(ctx) - if err == nil { - userID := userAuth.GetUserID() - if userID.Valid() { - filter.User = &userID - } - } - } - - // filter by user id. - if filter.User != nil { - return s.findTasksByUser(ctx, tx, filter) - } else if filter.OrganizationID != nil { - return s.findTasksByOrg(ctx, tx, *filter.OrganizationID, filter) - } - - return s.findAllTasks(ctx, tx, filter) -} - -// findTasksByUser is a subset of the find tasks function. Used for cleanliness -func (s *Service) findTasksByUser(ctx context.Context, tx Tx, filter taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { - var ts []*taskmodel.Task - - taskBucket, err := tx.Bucket(taskBucket) - if err != nil { - return nil, 0, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - var ( - seek []byte - opts []CursorOption - ) - - if filter.After != nil { - seek, err = taskKey(*filter.After) - if err != nil { - return nil, 0, err - } - - opts = append(opts, WithCursorSkipFirstItem()) - } - - c, err := taskBucket.ForwardCursor(seek, opts...) - if err != nil { - return nil, 0, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - matchFn := newTaskMatchFn(filter) - - for k, v := c.Next(); k != nil; k, v = c.Next() { - var task matchableTask - if filter.Type != nil && *filter.Type == taskmodel.TaskBasicType { - task = &basicKvTask{} - } else { - task = &kvTask{} - } - if err := json.Unmarshal(v, task); err != nil { - return nil, 0, taskmodel.ErrInternalTaskServiceError(err) - } - - if matchFn == nil || matchFn(task) { - ts = append(ts, task.ToInfluxDB()) - - if len(ts) >= filter.Limit { - break - } - } - } - if err := c.Err(); err != nil { - return nil, 0, err - } - - return ts, len(ts), c.Close() -} - -// findTasksByOrg is a subset of the find tasks function. Used for cleanliness -func (s *Service) findTasksByOrg(ctx context.Context, tx Tx, orgID platform.ID, filter taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { - var err error - if !orgID.Valid() { - return nil, 0, fmt.Errorf("finding tasks by organization ID: %w", platform.ErrInvalidID) - } - - var ts []*taskmodel.Task - - indexBucket, err := tx.Bucket(taskIndexBucket) - if err != nil { - return nil, 0, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - prefix, err := orgID.Encode() - if err != nil { - return nil, 0, taskmodel.ErrInvalidTaskID - } - - var ( - key = prefix - opts []CursorOption - ) - // we can filter by orgID - if filter.After != nil { - key, err = taskOrgKey(orgID, *filter.After) - if err != nil { - return nil, 0, err - } - - opts = append(opts, WithCursorSkipFirstItem()) - } - - c, err := indexBucket.ForwardCursor( - key, - append(opts, WithCursorPrefix(prefix))..., - ) - if err != nil { - return nil, 0, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - // free cursor resources - defer c.Close() - - matchFn := newTaskMatchFn(filter) - - for k, v := c.Next(); k != nil; k, v = c.Next() { - id, err := platform.IDFromString(string(v)) - if err != nil { - return nil, 0, taskmodel.ErrInvalidTaskID - } - - t, err := s.findTaskByID(ctx, tx, *id, filter.Type != nil && *filter.Type == taskmodel.TaskBasicType) - if err != nil { - if err == taskmodel.ErrTaskNotFound { - // we might have some crufty index's - err = nil - continue - } - return nil, 0, err - } - - // If the new task doesn't belong to the org we have looped outside the org filter - if t.GetOrgID() != orgID { - break - } - - if matchFn == nil || matchFn(t) { - ts = append(ts, t.ToInfluxDB()) - // Check if we are over running the limit - if len(ts) >= filter.Limit { - break - } - } - } - - return ts, len(ts), c.Err() -} - -type taskMatchFn func(matchableTask) bool - -// newTaskMatchFn returns a function for validating -// a task matches the filter. Will return nil if -// the filter should match all tasks. -func newTaskMatchFn(f taskmodel.TaskFilter) taskMatchFn { - if f.Type == nil && f.Name == nil && f.Status == nil && f.User == nil { - return nil - } - - return func(t matchableTask) bool { - if f.Type != nil { - expected := *f.Type - if expected == taskmodel.TaskBasicType { - // "basic" type == get "system" type tasks, but without full metadata - expected = taskmodel.TaskSystemType - } - typ := t.GetType() - // Default to "system" for old tasks without a persisted type - if typ == "" { - typ = taskmodel.TaskSystemType - } - if expected != typ { - return false - } - } - if f.Name != nil && t.GetName() != *f.Name { - return false - } - if f.Status != nil && t.GetStatus() != *f.Status { - return false - } - if f.User != nil && t.GetOwnerID() != *f.User { - return false - } - - return true - } -} - -// findAllTasks is a subset of the find tasks function. Used for cleanliness. -// This function should only be executed internally because it doesn't force organization or user filtering. -// Enforcing filters should be done in a validation layer. -func (s *Service) findAllTasks(ctx context.Context, tx Tx, filter taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { - var ts []*taskmodel.Task - taskBucket, err := tx.Bucket(taskBucket) - if err != nil { - return nil, 0, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - var ( - seek []byte - opts []CursorOption - ) - - if filter.After != nil { - seek, err = taskKey(*filter.After) - if err != nil { - return nil, 0, err - } - - opts = append(opts, WithCursorSkipFirstItem()) - } - - c, err := taskBucket.ForwardCursor(seek, opts...) - if err != nil { - return nil, 0, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - // free cursor resources - defer c.Close() - - matchFn := newTaskMatchFn(filter) - - for k, v := c.Next(); k != nil; k, v = c.Next() { - var task matchableTask - if filter.Type != nil && *filter.Type == taskmodel.TaskBasicType { - task = &basicKvTask{} - } else { - task = &kvTask{} - } - if err := json.Unmarshal(v, task); err != nil { - return nil, 0, taskmodel.ErrInternalTaskServiceError(err) - } - - if matchFn == nil || matchFn(task) { - ts = append(ts, task.ToInfluxDB()) - - if len(ts) >= filter.Limit { - break - } - } - } - - if err := c.Err(); err != nil { - return nil, 0, err - } - - return ts, len(ts), err -} - -// CreateTask creates a new task. -// The owner of the task is inferred from the authorizer associated with ctx. -func (s *Service) CreateTask(ctx context.Context, tc taskmodel.TaskCreate) (*taskmodel.Task, error) { - var orgFilter influxdb.OrganizationFilter - - if tc.Organization != "" { - orgFilter.Name = &tc.Organization - } else if tc.OrganizationID.Valid() { - orgFilter.ID = &tc.OrganizationID - - } else { - return nil, errors.New("organization required") - } - - org, err := s.orgs.FindOrganization(ctx, orgFilter) - if err != nil { - return nil, err - } - - var t *taskmodel.Task - err = s.kv.Update(ctx, func(tx Tx) error { - task, err := s.createTask(ctx, tx, org, tc) - if err != nil { - return err - } - t = task - return nil - }) - - return t, err -} - -func (s *Service) createTask(ctx context.Context, tx Tx, org *influxdb.Organization, tc taskmodel.TaskCreate) (*taskmodel.Task, error) { - // TODO: Uncomment this once the checks/notifications no longer create tasks in kv - // confirm the owner is a real user. - // if _, err = s.findUserByID(ctx, tx, tc.OwnerID); err != nil { - // return nil, influxdb.ErrInvalidOwnerID - // } - - opts, err := options.FromScriptAST(s.FluxLanguageService, tc.Flux) - if err != nil { - return nil, taskmodel.ErrTaskOptionParse(err) - } - - if tc.Status == "" { - tc.Status = string(taskmodel.TaskActive) - } - - createdAt := s.clock.Now().Truncate(time.Second).UTC() - task := &taskmodel.Task{ - ID: s.IDGenerator.ID(), - Type: tc.Type, - OrganizationID: org.ID, - Organization: org.Name, - OwnerID: tc.OwnerID, - Metadata: tc.Metadata, - Name: opts.Name, - Description: tc.Description, - Status: tc.Status, - Flux: tc.Flux, - Every: opts.Every.String(), - Cron: opts.Cron, - CreatedAt: createdAt, - LatestCompleted: createdAt, - LatestScheduled: createdAt, - } - - if opts.Offset != nil { - off, err := time.ParseDuration(opts.Offset.String()) - if err != nil { - return nil, taskmodel.ErrTaskTimeParse(err) - } - task.Offset = off - - } - - taskBucket, err := tx.Bucket(taskBucket) - if err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - indexBucket, err := tx.Bucket(taskIndexBucket) - if err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - taskBytes, err := json.Marshal(task) - if err != nil { - return nil, taskmodel.ErrInternalTaskServiceError(err) - } - - taskKey, err := taskKey(task.ID) - if err != nil { - return nil, err - } - - orgKey, err := taskOrgKey(task.OrganizationID, task.ID) - if err != nil { - return nil, err - } - - // write the task - err = taskBucket.Put(taskKey, taskBytes) - if err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - // write the org index - err = indexBucket.Put(orgKey, taskKey) - if err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - uid, _ := icontext.GetUserID(ctx) - if err := s.audit.Log(resource.Change{ - Type: resource.Create, - ResourceID: task.ID, - ResourceType: influxdb.TasksResourceType, - OrganizationID: task.OrganizationID, - UserID: uid, - ResourceBody: taskBytes, - Time: time.Now(), - }); err != nil { - return nil, err - } - - return task, nil -} - -// UpdateTask updates a single task with changeset. -func (s *Service) UpdateTask(ctx context.Context, id platform.ID, upd taskmodel.TaskUpdate) (*taskmodel.Task, error) { - var t *taskmodel.Task - err := s.kv.Update(ctx, func(tx Tx) error { - task, err := s.updateTask(ctx, tx, id, upd) - if err != nil { - return err - } - t = task - return nil - }) - if err != nil { - return nil, err - } - - return t, nil -} - -func (s *Service) updateTask(ctx context.Context, tx Tx, id platform.ID, upd taskmodel.TaskUpdate) (*taskmodel.Task, error) { - // retrieve the task - t, err := s.findTaskByID(ctx, tx, id, false) - if err != nil { - return nil, err - } - task := t.ToInfluxDB() - - updatedAt := s.clock.Now().UTC() - - // update the flux script - if !upd.Options.IsZero() || upd.Flux != nil { - if err = upd.UpdateFlux(s.FluxLanguageService, task.Flux); err != nil { - return nil, err - } - task.Flux = *upd.Flux - - opts, err := options.FromScriptAST(s.FluxLanguageService, *upd.Flux) - if err != nil { - return nil, taskmodel.ErrTaskOptionParse(err) - } - task.Name = opts.Name - task.Every = opts.Every.String() - task.Cron = opts.Cron - - var off time.Duration - if opts.Offset != nil { - off, err = time.ParseDuration(opts.Offset.String()) - if err != nil { - return nil, taskmodel.ErrTaskTimeParse(err) - } - } - task.Offset = off - task.UpdatedAt = updatedAt - } - - if upd.Description != nil { - task.Description = *upd.Description - task.UpdatedAt = updatedAt - } - - if upd.Status != nil && task.Status != *upd.Status { - task.Status = *upd.Status - task.UpdatedAt = updatedAt - - // task is transitioning from inactive to active, ensure scheduled and completed are updated - if task.Status == taskmodel.TaskStatusActive { - updatedAtTrunc := updatedAt.Truncate(time.Second).UTC() - task.LatestCompleted = updatedAtTrunc - task.LatestScheduled = updatedAtTrunc - } - } - - if upd.Metadata != nil { - task.Metadata = upd.Metadata - task.UpdatedAt = updatedAt - } - - if upd.LatestCompleted != nil { - // make sure we only update latest completed one way - tlc := task.LatestCompleted - ulc := *upd.LatestCompleted - - if !ulc.IsZero() && ulc.After(tlc) { - task.LatestCompleted = *upd.LatestCompleted - } - } - - if upd.LatestScheduled != nil { - // make sure we only update latest scheduled one way - if upd.LatestScheduled.After(task.LatestScheduled) { - task.LatestScheduled = *upd.LatestScheduled - } - } - - if upd.LatestSuccess != nil { - // make sure we only update latest success one way - tlc := task.LatestSuccess - ulc := *upd.LatestSuccess - - if !ulc.IsZero() && ulc.After(tlc) { - task.LatestSuccess = *upd.LatestSuccess - } - } - - if upd.LatestFailure != nil { - // make sure we only update latest failure one way - tlc := task.LatestFailure - ulc := *upd.LatestFailure - - if !ulc.IsZero() && ulc.After(tlc) { - task.LatestFailure = *upd.LatestFailure - } - } - - if upd.LastRunStatus != nil { - task.LastRunStatus = *upd.LastRunStatus - if *upd.LastRunStatus == "failed" && upd.LastRunError != nil { - task.LastRunError = *upd.LastRunError - } else { - task.LastRunError = "" - } - } - - // save the updated task - bucket, err := tx.Bucket(taskBucket) - if err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - key, err := taskKey(id) - if err != nil { - return nil, err - } - - taskBytes, err := json.Marshal(task) - if err != nil { - return nil, taskmodel.ErrInternalTaskServiceError(err) - } - - err = bucket.Put(key, taskBytes) - if err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - uid, _ := icontext.GetUserID(ctx) - if err := s.audit.Log(resource.Change{ - Type: resource.Update, - ResourceID: task.ID, - ResourceType: influxdb.TasksResourceType, - OrganizationID: task.OrganizationID, - UserID: uid, - ResourceBody: taskBytes, - Time: time.Now(), - }); err != nil { - return nil, err - } - - return task, nil -} - -// DeleteTask removes a task by ID and purges all associated data and scheduled runs. -func (s *Service) DeleteTask(ctx context.Context, id platform.ID) error { - err := s.kv.Update(ctx, func(tx Tx) error { - err := s.deleteTask(ctx, tx, id) - if err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - - return nil -} - -func (s *Service) deleteTask(ctx context.Context, tx Tx, id platform.ID) error { - taskBucket, err := tx.Bucket(taskBucket) - if err != nil { - return taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - runBucket, err := tx.Bucket(taskRunBucket) - if err != nil { - return taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - indexBucket, err := tx.Bucket(taskIndexBucket) - if err != nil { - return taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - // retrieve the task - task, err := s.findTaskByID(ctx, tx, id, true) - if err != nil { - return err - } - - // remove the orgs index - orgKey, err := taskOrgKey(task.GetOrgID(), task.GetID()) - if err != nil { - return err - } - - if err := indexBucket.Delete(orgKey); err != nil { - return taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - // remove latest completed - lastCompletedKey, err := taskLatestCompletedKey(task.GetID()) - if err != nil { - return err - } - - if err := runBucket.Delete(lastCompletedKey); err != nil { - return taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - // remove the runs - runs, _, err := s.findRuns(ctx, tx, taskmodel.RunFilter{Task: task.GetID()}) - if err != nil { - return err - } - - for _, run := range runs { - key, err := taskRunKey(task.GetID(), run.ID) - if err != nil { - return err - } - - if err := runBucket.Delete(key); err != nil { - return taskmodel.ErrUnexpectedTaskBucketErr(err) - } - } - // remove the task - key, err := taskKey(task.GetID()) - if err != nil { - return err - } - - if err := taskBucket.Delete(key); err != nil { - return taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - uid, _ := icontext.GetUserID(ctx) - return s.audit.Log(resource.Change{ - Type: resource.Delete, - ResourceID: task.GetID(), - ResourceType: influxdb.TasksResourceType, - OrganizationID: task.GetOrgID(), - UserID: uid, - Time: time.Now(), - }) -} - -// FindLogs returns logs for a run. -func (s *Service) FindLogs(ctx context.Context, filter taskmodel.LogFilter) ([]*taskmodel.Log, int, error) { - var logs []*taskmodel.Log - err := s.kv.View(ctx, func(tx Tx) error { - ls, _, err := s.findLogs(ctx, tx, filter) - if err != nil { - return err - } - logs = ls - return nil - }) - if err != nil { - return nil, 0, err - } - - return logs, len(logs), nil -} - -func (s *Service) findLogs(ctx context.Context, tx Tx, filter taskmodel.LogFilter) ([]*taskmodel.Log, int, error) { - if filter.Run != nil { - r, err := s.findRunByID(ctx, tx, filter.Task, *filter.Run) - if err != nil { - return nil, 0, err - } - rtn := make([]*taskmodel.Log, len(r.Log)) - for i := 0; i < len(r.Log); i++ { - rtn[i] = &r.Log[i] - } - return rtn, len(rtn), nil - } - - runs, _, err := s.findRuns(ctx, tx, taskmodel.RunFilter{Task: filter.Task}) - if err != nil { - return nil, 0, err - } - var logs []*taskmodel.Log - for _, run := range runs { - for i := 0; i < len(run.Log); i++ { - logs = append(logs, &run.Log[i]) - - } - } - return logs, len(logs), nil -} - -// FindRuns returns a list of runs that match a filter and the total count of returned runs. -func (s *Service) FindRuns(ctx context.Context, filter taskmodel.RunFilter) ([]*taskmodel.Run, int, error) { - var runs []*taskmodel.Run - err := s.kv.View(ctx, func(tx Tx) error { - rs, _, err := s.findRuns(ctx, tx, filter) - if err != nil { - return err - } - runs = rs - return nil - }) - if err != nil { - return nil, 0, err - } - - return runs, len(runs), nil -} - -func (s *Service) findRuns(ctx context.Context, tx Tx, filter taskmodel.RunFilter) ([]*taskmodel.Run, int, error) { - if filter.Limit == 0 { - filter.Limit = taskmodel.TaskDefaultPageSize - } - - if filter.Limit < 0 || filter.Limit > taskmodel.TaskMaxPageSize { - return nil, 0, taskmodel.ErrOutOfBoundsLimit - } - parsedFilterAfterTime := time.Time{} - parsedFilterBeforeTime := time.Now().UTC() - var err error - if len(filter.AfterTime) > 0 { - parsedFilterAfterTime, err = time.Parse(time.RFC3339, filter.AfterTime) - if err != nil { - return nil, 0, err - } - } - if filter.BeforeTime != "" { - parsedFilterBeforeTime, err = time.Parse(time.RFC3339, filter.BeforeTime) - if err != nil { - return nil, 0, err - } - } - - var runs []*taskmodel.Run - // manual runs - manualRuns, err := s.manualRuns(ctx, tx, filter.Task) - if err != nil { - return nil, 0, err - } - for _, run := range manualRuns { - if run.ScheduledFor.After(parsedFilterAfterTime) && run.ScheduledFor.Before(parsedFilterBeforeTime) { - runs = append(runs, run) - } - if len(runs) >= filter.Limit { - return runs, len(runs), nil - } - } - - // append currently running - currentlyRunning, err := s.currentlyRunning(ctx, tx, filter.Task) - if err != nil { - return nil, 0, err - } - for _, run := range currentlyRunning { - if run.ScheduledFor.After(parsedFilterAfterTime) && run.ScheduledFor.Before(parsedFilterBeforeTime) { - runs = append(runs, run) - } - if len(runs) >= filter.Limit { - return runs, len(runs), nil - } - } - - return runs, len(runs), nil -} - -// FindRunByID returns a single run. -func (s *Service) FindRunByID(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { - var run *taskmodel.Run - err := s.kv.View(ctx, func(tx Tx) error { - r, err := s.findRunByID(ctx, tx, taskID, runID) - if err != nil { - return err - } - run = r - return nil - }) - if err != nil { - return nil, err - } - - return run, nil -} - -func (s *Service) findRunByID(ctx context.Context, tx Tx, taskID, runID platform.ID) (*taskmodel.Run, error) { - bucket, err := tx.Bucket(taskRunBucket) - if err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - key, err := taskRunKey(taskID, runID) - if err != nil { - return nil, err - } - runBytes, err := bucket.Get(key) - if err != nil { - if IsNotFound(err) { - runs, err := s.manualRuns(ctx, tx, taskID) - for _, run := range runs { - if run.ID == runID { - return run, nil - } - } - if err != nil { - return nil, taskmodel.ErrRunNotFound - } - return nil, taskmodel.ErrRunNotFound - } - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - run := &taskmodel.Run{} - err = json.Unmarshal(runBytes, run) - if err != nil { - return nil, taskmodel.ErrInternalTaskServiceError(err) - } - - return run, nil -} - -// CancelRun cancels a currently running run. -func (s *Service) CancelRun(ctx context.Context, taskID, runID platform.ID) error { - err := s.kv.Update(ctx, func(tx Tx) error { - err := s.cancelRun(ctx, tx, taskID, runID) - if err != nil { - return err - } - return nil - }) - return err -} - -func (s *Service) cancelRun(ctx context.Context, tx Tx, taskID, runID platform.ID) error { - // get the run - run, err := s.findRunByID(ctx, tx, taskID, runID) - if err != nil { - return err - } - - // set status to canceled - run.Status = "canceled" - - // save - bucket, err := tx.Bucket(taskRunBucket) - if err != nil { - return taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - runBytes, err := json.Marshal(run) - if err != nil { - return taskmodel.ErrInternalTaskServiceError(err) - } - - runKey, err := taskRunKey(taskID, runID) - if err != nil { - return err - } - - if err := bucket.Put(runKey, runBytes); err != nil { - return taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - return nil -} - -// RetryRun creates and returns a new run (which is a retry of another run). -func (s *Service) RetryRun(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { - var r *taskmodel.Run - err := s.kv.Update(ctx, func(tx Tx) error { - run, err := s.retryRun(ctx, tx, taskID, runID) - if err != nil { - return err - } - r = run - return nil - }) - return r, err -} - -func (s *Service) retryRun(ctx context.Context, tx Tx, taskID, runID platform.ID) (*taskmodel.Run, error) { - // find the run - r, err := s.findRunByID(ctx, tx, taskID, runID) - if err != nil { - return nil, err - } - - r.ID = s.IDGenerator.ID() - r.Status = taskmodel.RunScheduled.String() - r.StartedAt = time.Time{} - r.FinishedAt = time.Time{} - r.RequestedAt = time.Time{} - - // add a clean copy of the run to the manual runs - bucket, err := tx.Bucket(taskRunBucket) - if err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - key, err := taskManualRunKey(taskID) - if err != nil { - return nil, err - } - - runs := []*taskmodel.Run{} - runsBytes, err := bucket.Get(key) - if err != nil { - if err != ErrKeyNotFound { - return nil, taskmodel.ErrRunNotFound - } - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - - } - - if runsBytes != nil { - if err := json.Unmarshal(runsBytes, &runs); err != nil { - return nil, taskmodel.ErrInternalTaskServiceError(err) - } - } - - runs = append(runs, r) - - // save manual runs - runsBytes, err = json.Marshal(runs) - if err != nil { - return nil, taskmodel.ErrInternalTaskServiceError(err) - } - - if err := bucket.Put(key, runsBytes); err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - return r, nil -} - -// ForceRun forces a run to occur with unix timestamp scheduledFor, to be executed as soon as possible. -// The value of scheduledFor may or may not align with the task's schedule. -func (s *Service) ForceRun(ctx context.Context, taskID platform.ID, scheduledFor int64) (*taskmodel.Run, error) { - var r *taskmodel.Run - err := s.kv.Update(ctx, func(tx Tx) error { - run, err := s.forceRun(ctx, tx, taskID, scheduledFor) - if err != nil { - return err - } - r = run - return nil - }) - return r, err -} - -func (s *Service) forceRun(ctx context.Context, tx Tx, taskID platform.ID, scheduledFor int64) (*taskmodel.Run, error) { - // create a run - t := time.Unix(scheduledFor, 0).UTC() - r := &taskmodel.Run{ - ID: s.IDGenerator.ID(), - TaskID: taskID, - Status: taskmodel.RunScheduled.String(), - RequestedAt: time.Now().UTC(), - ScheduledFor: t, - Log: []taskmodel.Log{}, - } - - // add a clean copy of the run to the manual runs - bucket, err := tx.Bucket(taskRunBucket) - if err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - runs, err := s.manualRuns(ctx, tx, taskID) - if err != nil { - return nil, err - } - - // check to see if this run is already queued - for _, run := range runs { - if run.ScheduledFor == r.ScheduledFor { - return nil, taskmodel.ErrTaskRunAlreadyQueued - } - } - runs = append(runs, r) - - // save manual runs - runsBytes, err := json.Marshal(runs) - if err != nil { - return nil, taskmodel.ErrInternalTaskServiceError(err) - } - - key, err := taskManualRunKey(taskID) - if err != nil { - return nil, err - } - - if err := bucket.Put(key, runsBytes); err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - return r, nil -} - -// CreateRun creates a run with a scheduledFor time as now. -func (s *Service) CreateRun(ctx context.Context, taskID platform.ID, scheduledFor time.Time, runAt time.Time) (*taskmodel.Run, error) { - var r *taskmodel.Run - err := s.kv.Update(ctx, func(tx Tx) error { - run, err := s.createRun(ctx, tx, taskID, scheduledFor, runAt) - if err != nil { - return err - } - r = run - return nil - }) - return r, err -} -func (s *Service) createRun(ctx context.Context, tx Tx, taskID platform.ID, scheduledFor time.Time, runAt time.Time) (*taskmodel.Run, error) { - id := s.IDGenerator.ID() - t := time.Unix(scheduledFor.Unix(), 0).UTC() - - run := taskmodel.Run{ - ID: id, - TaskID: taskID, - ScheduledFor: t, - RunAt: runAt, - Status: taskmodel.RunScheduled.String(), - Log: []taskmodel.Log{}, - } - - b, err := tx.Bucket(taskRunBucket) - if err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - runBytes, err := json.Marshal(run) - if err != nil { - return nil, taskmodel.ErrInternalTaskServiceError(err) - } - - runKey, err := taskRunKey(taskID, run.ID) - if err != nil { - return nil, err - } - if err := b.Put(runKey, runBytes); err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - return &run, nil -} - -func (s *Service) CurrentlyRunning(ctx context.Context, taskID platform.ID) ([]*taskmodel.Run, error) { - var runs []*taskmodel.Run - err := s.kv.View(ctx, func(tx Tx) error { - rs, err := s.currentlyRunning(ctx, tx, taskID) - if err != nil { - return err - } - runs = rs - return nil - }) - if err != nil { - return nil, err - } - - return runs, nil -} - -func (s *Service) currentlyRunning(ctx context.Context, tx Tx, taskID platform.ID) ([]*taskmodel.Run, error) { - bucket, err := tx.Bucket(taskRunBucket) - if err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - c, err := bucket.Cursor(WithCursorHintPrefix(taskID.String())) - if err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - var runs []*taskmodel.Run - - taskKey, err := taskKey(taskID) - if err != nil { - return nil, err - } - - k, v := c.Seek(taskKey) - for { - if k == nil || !strings.HasPrefix(string(k), string(taskKey)) { - break - } - if strings.HasSuffix(string(k), "manualRuns") || strings.HasSuffix(string(k), "latestCompleted") { - k, v = c.Next() - continue - } - r := &taskmodel.Run{} - if err := json.Unmarshal(v, r); err != nil { - return nil, taskmodel.ErrInternalTaskServiceError(err) - } - - // if the run no longer belongs to the task we are done - if r.TaskID != taskID { - break - } - runs = append(runs, r) - k, v = c.Next() - } - return runs, nil -} - -func (s *Service) ManualRuns(ctx context.Context, taskID platform.ID) ([]*taskmodel.Run, error) { - var runs []*taskmodel.Run - err := s.kv.View(ctx, func(tx Tx) error { - rs, err := s.manualRuns(ctx, tx, taskID) - if err != nil { - return err - } - runs = rs - return nil - }) - if err != nil { - return nil, err - } - - return runs, nil -} - -func (s *Service) manualRuns(ctx context.Context, tx Tx, taskID platform.ID) ([]*taskmodel.Run, error) { - b, err := tx.Bucket(taskRunBucket) - if err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - key, err := taskManualRunKey(taskID) - if err != nil { - return nil, err - } - - runs := []*taskmodel.Run{} - val, err := b.Get(key) - if err != nil { - if err == ErrKeyNotFound { - return runs, nil - } - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - if err := json.Unmarshal(val, &runs); err != nil { - return nil, taskmodel.ErrInternalTaskServiceError(err) - } - return runs, nil -} - -func (s *Service) StartManualRun(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { - var r *taskmodel.Run - err := s.kv.Update(ctx, func(tx Tx) error { - run, err := s.startManualRun(ctx, tx, taskID, runID) - if err != nil { - return err - } - r = run - return nil - }) - return r, err -} - -func (s *Service) startManualRun(ctx context.Context, tx Tx, taskID, runID platform.ID) (*taskmodel.Run, error) { - - mRuns, err := s.manualRuns(ctx, tx, taskID) - if err != nil { - return nil, taskmodel.ErrRunNotFound - } - - if len(mRuns) < 1 { - return nil, taskmodel.ErrRunNotFound - } - - var run *taskmodel.Run - for i, r := range mRuns { - if r.ID == runID { - run = r - mRuns = append(mRuns[:i], mRuns[i+1:]...) - } - } - if run == nil { - return nil, taskmodel.ErrRunNotFound - } - - // save manual runs - mRunsBytes, err := json.Marshal(mRuns) - if err != nil { - return nil, taskmodel.ErrInternalTaskServiceError(err) - } - - runsKey, err := taskManualRunKey(taskID) - if err != nil { - return nil, err - } - - b, err := tx.Bucket(taskRunBucket) - if err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - if err := b.Put(runsKey, mRunsBytes); err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - // add mRun to the list of currently running - mRunBytes, err := json.Marshal(run) - if err != nil { - return nil, taskmodel.ErrInternalTaskServiceError(err) - } - - runKey, err := taskRunKey(taskID, run.ID) - if err != nil { - return nil, err - } - - if err := b.Put(runKey, mRunBytes); err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - return run, nil -} - -// FinishRun removes runID from the list of running tasks and if its `now` is later then last completed update it. -func (s *Service) FinishRun(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { - var run *taskmodel.Run - err := s.kv.Update(ctx, func(tx Tx) error { - r, err := s.finishRun(ctx, tx, taskID, runID) - if err != nil { - return err - } - run = r - return nil - }) - return run, err -} - -func (s *Service) finishRun(ctx context.Context, tx Tx, taskID, runID platform.ID) (*taskmodel.Run, error) { - // get the run - r, err := s.findRunByID(ctx, tx, taskID, runID) - if err != nil { - return nil, err - } - - // tell task to update latest completed - scheduled := r.ScheduledFor - - var latestSuccess, latestFailure *time.Time - - if r.Status == "failed" { - latestFailure = &scheduled - } else { - latestSuccess = &scheduled - } - - _, err = s.updateTask(ctx, tx, taskID, taskmodel.TaskUpdate{ - LatestCompleted: &scheduled, - LatestSuccess: latestSuccess, - LatestFailure: latestFailure, - LastRunStatus: &r.Status, - LastRunError: func() *string { - if r.Status == "failed" { - // prefer the second to last log message as the error message - // per https://github.com/influxdata/influxdb/issues/15153#issuecomment-547706005 - if len(r.Log) > 1 { - return &r.Log[len(r.Log)-2].Message - } else if len(r.Log) > 0 { - return &r.Log[len(r.Log)-1].Message - } - } - return nil - }(), - }) - if err != nil { - return nil, err - } - - // remove run - bucket, err := tx.Bucket(taskRunBucket) - if err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - key, err := taskRunKey(taskID, runID) - if err != nil { - return nil, err - } - if err := bucket.Delete(key); err != nil { - return nil, taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - return r, nil -} - -// UpdateRunState sets the run state at the respective time. -func (s *Service) UpdateRunState(ctx context.Context, taskID, runID platform.ID, when time.Time, state taskmodel.RunStatus) error { - err := s.kv.Update(ctx, func(tx Tx) error { - err := s.updateRunState(ctx, tx, taskID, runID, when, state) - if err != nil { - return err - } - return nil - }) - return err -} - -func (s *Service) updateRunState(ctx context.Context, tx Tx, taskID, runID platform.ID, when time.Time, state taskmodel.RunStatus) error { - // find run - run, err := s.findRunByID(ctx, tx, taskID, runID) - if err != nil { - return err - } - - // update state - run.Status = state.String() - switch state { - case taskmodel.RunStarted: - run.StartedAt = when - case taskmodel.RunSuccess, taskmodel.RunFail, taskmodel.RunCanceled: - run.FinishedAt = when - } - - // save run - b, err := tx.Bucket(taskRunBucket) - if err != nil { - return taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - runBytes, err := json.Marshal(run) - if err != nil { - return taskmodel.ErrInternalTaskServiceError(err) - } - - runKey, err := taskRunKey(taskID, run.ID) - if err != nil { - return err - } - if err := b.Put(runKey, runBytes); err != nil { - return taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - return nil -} - -// AddRunLog adds a log line to the run. -func (s *Service) AddRunLog(ctx context.Context, taskID, runID platform.ID, when time.Time, log string) error { - err := s.kv.Update(ctx, func(tx Tx) error { - err := s.addRunLog(ctx, tx, taskID, runID, when, log) - if err != nil { - return err - } - return nil - }) - return err -} - -func (s *Service) addRunLog(ctx context.Context, tx Tx, taskID, runID platform.ID, when time.Time, log string) error { - // find run - run, err := s.findRunByID(ctx, tx, taskID, runID) - if err != nil { - return err - } - // update log - l := taskmodel.Log{RunID: runID, Time: when.Format(time.RFC3339Nano), Message: log} - run.Log = append(run.Log, l) - // save run - b, err := tx.Bucket(taskRunBucket) - if err != nil { - return taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - runBytes, err := json.Marshal(run) - if err != nil { - return taskmodel.ErrInternalTaskServiceError(err) - } - - runKey, err := taskRunKey(taskID, run.ID) - if err != nil { - return err - } - - if err := b.Put(runKey, runBytes); err != nil { - return taskmodel.ErrUnexpectedTaskBucketErr(err) - } - - return nil -} - -func taskKey(taskID platform.ID) ([]byte, error) { - encodedID, err := taskID.Encode() - if err != nil { - return nil, taskmodel.ErrInvalidTaskID - } - return encodedID, nil -} - -func taskLatestCompletedKey(taskID platform.ID) ([]byte, error) { - encodedID, err := taskID.Encode() - if err != nil { - return nil, taskmodel.ErrInvalidTaskID - } - return []byte(string(encodedID) + "/latestCompleted"), nil -} - -func taskManualRunKey(taskID platform.ID) ([]byte, error) { - encodedID, err := taskID.Encode() - if err != nil { - return nil, taskmodel.ErrInvalidTaskID - } - return []byte(string(encodedID) + "/manualRuns"), nil -} - -func taskOrgKey(orgID, taskID platform.ID) ([]byte, error) { - encodedOrgID, err := orgID.Encode() - if err != nil { - return nil, taskmodel.ErrInvalidTaskID - } - encodedID, err := taskID.Encode() - if err != nil { - return nil, taskmodel.ErrInvalidTaskID - } - - return []byte(string(encodedOrgID) + "/" + string(encodedID)), nil -} - -func taskRunKey(taskID, runID platform.ID) ([]byte, error) { - encodedID, err := taskID.Encode() - if err != nil { - return nil, taskmodel.ErrInvalidTaskID - } - encodedRunID, err := runID.Encode() - if err != nil { - return nil, taskmodel.ErrInvalidTaskID - } - - return []byte(string(encodedID) + "/" + string(encodedRunID)), nil -} diff --git a/kv/task_private_test.go b/kv/task_private_test.go deleted file mode 100644 index 19a7c448ca7..00000000000 --- a/kv/task_private_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package kv - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -func Test_newTaskMatchFN(t *testing.T) { - ct := func(typ string, name string) *basicKvTask { - return &basicKvTask{ - Type: typ, - OrganizationID: 1, - Name: name, - } - } - - const ( - NoTyp = "-" - NoNam = "-" - ) - - newMatch := func(typ string, name string) taskMatchFn { - var fil taskmodel.TaskFilter - - if typ != NoTyp { - fil.Type = &typ - } - - if name != NoNam { - fil.Name = &name - } - - return newTaskMatchFn(fil) - } - - type test struct { - name string - task matchableTask - fn taskMatchFn - exp bool - } - - tests := []struct { - name string - tests []test - }{ - { - "match type", - []test{ - { - name: "empty with system type", - task: ct("", "Foo"), - fn: newMatch(taskmodel.TaskSystemType, NoNam), - exp: true, - }, - { - name: "system with system type", - task: ct(taskmodel.TaskSystemType, "Foo"), - fn: newMatch(taskmodel.TaskSystemType, NoNam), - exp: true, - }, - { - name: "system with basic type", - task: ct(taskmodel.TaskSystemType, "Foo"), - fn: newMatch(taskmodel.TaskBasicType, NoNam), - exp: true, - }, - { - name: "equal", - task: ct("other type", "Foo"), - fn: newMatch("other type", NoNam), - exp: true, - }, - { - name: "not type", - task: ct(taskmodel.TaskSystemType, "Foo"), - fn: newMatch("other type", NoNam), - exp: false, - }, - }, - }, - { - "match name", - []test{ - { - name: "equal", - task: ct(taskmodel.TaskSystemType, "Foo"), - fn: newMatch(NoTyp, "Foo"), - exp: true, - }, - { - name: "not name", - task: ct(taskmodel.TaskSystemType, "Foo"), - fn: newMatch(NoTyp, "Bar"), - exp: false, - }, - }, - }, - } - for _, group := range tests { - t.Run(group.name, func(t *testing.T) { - for _, test := range group.tests { - t.Run(test.name, func(t *testing.T) { - if got, exp := test.fn(test.task), test.exp; got != exp { - t.Errorf("unxpected match result: -got/+exp\n%v", cmp.Diff(got, exp)) - } - }) - } - }) - } - - t.Run("match returns nil for no filter", func(t *testing.T) { - fn := newTaskMatchFn(taskmodel.TaskFilter{}) - if fn != nil { - t.Error("expected nil") - } - }) -} diff --git a/kv/task_test.go b/kv/task_test.go deleted file mode 100644 index dc9f1326f83..00000000000 --- a/kv/task_test.go +++ /dev/null @@ -1,530 +0,0 @@ -package kv_test - -import ( - "bytes" - "context" - "encoding/json" - "testing" - "time" - - "github.com/benbjohnson/clock" - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorization" - icontext "github.com/influxdata/influxdb/v2/context" - _ "github.com/influxdata/influxdb/v2/fluxinit/static" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/query/fluxlang" - "github.com/influxdata/influxdb/v2/task/options" - "github.com/influxdata/influxdb/v2/task/servicetest" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "github.com/influxdata/influxdb/v2/tenant" - itesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func TestBoltTaskService(t *testing.T) { - servicetest.TestTaskService( - t, - func(t *testing.T) (*servicetest.System, context.CancelFunc) { - store, close := itesting.NewTestBoltStore(t) - - tenantStore := tenant.NewStore(store) - ts := tenant.NewService(tenantStore) - - authStore, err := authorization.NewStore(store) - require.NoError(t, err) - authSvc := authorization.NewService(authStore, ts) - - ctx, cancelFunc := context.WithCancel(context.Background()) - service := kv.NewService(zaptest.NewLogger(t), store, ts, kv.ServiceConfig{ - FluxLanguageService: fluxlang.DefaultService, - }) - - go func() { - <-ctx.Done() - close() - }() - - return &servicetest.System{ - TaskControlService: service, - TaskService: service, - OrganizationService: ts.OrganizationService, - UserService: ts.UserService, - UserResourceMappingService: ts.UserResourceMappingService, - AuthorizationService: authSvc, - Ctx: ctx, - }, cancelFunc - }, - "transactional", - ) -} - -type testService struct { - Store kv.Store - Service *kv.Service - Org influxdb.Organization - User influxdb.User - Auth influxdb.Authorization - Clock clock.Clock -} - -func newService(t *testing.T, ctx context.Context, c clock.Clock) *testService { - t.Helper() - - if c == nil { - c = clock.New() - } - - var ( - ts = &testService{} - err error - store kv.SchemaStore - ) - - store = itesting.NewTestInmemStore(t) - if err != nil { - t.Fatal("failed to create InmemStore", err) - } - - ts.Store = store - - tenantStore := tenant.NewStore(store) - tenantSvc := tenant.NewService(tenantStore) - - authStore, err := authorization.NewStore(store) - require.NoError(t, err) - authSvc := authorization.NewService(authStore, tenantSvc) - - ts.Service = kv.NewService(zaptest.NewLogger(t), store, tenantSvc, kv.ServiceConfig{ - Clock: c, - FluxLanguageService: fluxlang.DefaultService, - }) - - ts.User = influxdb.User{Name: t.Name() + "-user"} - if err := tenantSvc.CreateUser(ctx, &ts.User); err != nil { - t.Fatal(err) - } - ts.Org = influxdb.Organization{Name: t.Name() + "-org"} - if err := tenantSvc.CreateOrganization(ctx, &ts.Org); err != nil { - t.Fatal(err) - } - - if err := tenantSvc.CreateUserResourceMapping(ctx, &influxdb.UserResourceMapping{ - ResourceType: influxdb.OrgsResourceType, - ResourceID: ts.Org.ID, - UserID: ts.User.ID, - UserType: influxdb.Owner, - }); err != nil { - t.Fatal(err) - } - - ts.Auth = influxdb.Authorization{ - OrgID: ts.Org.ID, - UserID: ts.User.ID, - Permissions: influxdb.OperPermissions(), - } - if err := authSvc.CreateAuthorization(context.Background(), &ts.Auth); err != nil { - t.Fatal(err) - } - - return ts -} - -func TestRetrieveTaskWithBadAuth(t *testing.T) { - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - - ts := newService(t, ctx, nil) - - ctx = icontext.SetAuthorizer(ctx, &ts.Auth) - - task, err := ts.Service.CreateTask(ctx, taskmodel.TaskCreate{ - Flux: `option task = {name: "a task",every: 1h} from(bucket:"test") |> range(start:-1h)`, - OrganizationID: ts.Org.ID, - OwnerID: ts.User.ID, - Status: string(taskmodel.TaskActive), - }) - if err != nil { - t.Fatal(err) - } - - // convert task to old one with a bad auth - err = ts.Store.Update(ctx, func(tx kv.Tx) error { - b, err := tx.Bucket([]byte("tasksv1")) - if err != nil { - return err - } - bID, err := task.ID.Encode() - if err != nil { - return err - } - task.OwnerID = platform.ID(1) - tbyte, err := json.Marshal(task) - if err != nil { - return err - } - // have to actually hack the bytes here because the system doesnt like us to encode bad id's. - tbyte = bytes.Replace(tbyte, []byte(`,"ownerID":"0000000000000001"`), []byte{}, 1) - if err := b.Put(bID, tbyte); err != nil { - return err - } - - return nil - }) - if err != nil { - t.Fatal(err) - } - - // lets see if we can list and find the task - newTask, err := ts.Service.FindTaskByID(ctx, task.ID) - if err != nil { - t.Fatal(err) - } - if newTask.ID != task.ID { - t.Fatal("miss matching taskID's") - } - - tasks, _, err := ts.Service.FindTasks(context.Background(), taskmodel.TaskFilter{}) - if err != nil { - t.Fatal(err) - } - if len(tasks) != 1 { - t.Fatal("failed to return task") - } - - // test status filter - active := string(taskmodel.TaskActive) - tasksWithActiveFilter, _, err := ts.Service.FindTasks(context.Background(), taskmodel.TaskFilter{Status: &active}) - if err != nil { - t.Fatal("could not find tasks") - } - if len(tasksWithActiveFilter) != 1 { - t.Fatal("failed to find active task with filter") - } -} - -func TestService_UpdateTask_InactiveToActive(t *testing.T) { - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - - c := clock.NewMock() - c.Set(time.Unix(1000, 0)) - - ts := newService(t, ctx, c) - - ctx = icontext.SetAuthorizer(ctx, &ts.Auth) - - originalTask, err := ts.Service.CreateTask(ctx, taskmodel.TaskCreate{ - Flux: `option task = {name: "a task",every: 1h} from(bucket:"test") |> range(start:-1h)`, - OrganizationID: ts.Org.ID, - OwnerID: ts.User.ID, - Status: string(taskmodel.TaskActive), - }) - if err != nil { - t.Fatal("CreateTask", err) - } - - v := taskmodel.TaskStatusInactive - c.Add(1 * time.Second) - exp := c.Now() - updatedTask, err := ts.Service.UpdateTask(ctx, originalTask.ID, taskmodel.TaskUpdate{Status: &v, LatestCompleted: &exp, LatestScheduled: &exp}) - if err != nil { - t.Fatal("UpdateTask", err) - } - - if got := updatedTask.LatestScheduled; !got.Equal(exp) { - t.Fatalf("unexpected -got/+exp\n%s", cmp.Diff(got.String(), exp.String())) - } - if got := updatedTask.LatestCompleted; !got.Equal(exp) { - t.Fatalf("unexpected -got/+exp\n%s", cmp.Diff(got.String(), exp.String())) - } - - c.Add(10 * time.Second) - exp = c.Now() - v = taskmodel.TaskStatusActive - updatedTask, err = ts.Service.UpdateTask(ctx, originalTask.ID, taskmodel.TaskUpdate{Status: &v}) - if err != nil { - t.Fatal("UpdateTask", err) - } - - if got := updatedTask.LatestScheduled; !got.Equal(exp) { - t.Fatalf("unexpected -got/+exp\n%s", cmp.Diff(got.String(), exp.String())) - } -} - -func TestTaskRunCancellation(t *testing.T) { - store, closeSvc := itesting.NewTestBoltStore(t) - defer closeSvc() - - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - - tenantStore := tenant.NewStore(store) - tenantSvc := tenant.NewService(tenantStore) - - authStore, err := authorization.NewStore(store) - require.NoError(t, err) - authSvc := authorization.NewService(authStore, tenantSvc) - - service := kv.NewService(zaptest.NewLogger(t), store, tenantSvc, kv.ServiceConfig{ - FluxLanguageService: fluxlang.DefaultService, - }) - - u := &influxdb.User{Name: t.Name() + "-user"} - if err := tenantSvc.CreateUser(ctx, u); err != nil { - t.Fatal(err) - } - o := &influxdb.Organization{Name: t.Name() + "-org"} - if err := tenantSvc.CreateOrganization(ctx, o); err != nil { - t.Fatal(err) - } - - if err := tenantSvc.CreateUserResourceMapping(ctx, &influxdb.UserResourceMapping{ - ResourceType: influxdb.OrgsResourceType, - ResourceID: o.ID, - UserID: u.ID, - UserType: influxdb.Owner, - }); err != nil { - t.Fatal(err) - } - - authz := influxdb.Authorization{ - OrgID: o.ID, - UserID: u.ID, - Permissions: influxdb.OperPermissions(), - } - if err := authSvc.CreateAuthorization(context.Background(), &authz); err != nil { - t.Fatal(err) - } - - ctx = icontext.SetAuthorizer(ctx, &authz) - - task, err := service.CreateTask(ctx, taskmodel.TaskCreate{ - Flux: `option task = {name: "a task",cron: "0 * * * *", offset: 20s} from(bucket:"test") |> range(start:-1h)`, - OrganizationID: o.ID, - OwnerID: u.ID, - }) - if err != nil { - t.Fatal(err) - } - - run, err := service.CreateRun(ctx, task.ID, time.Now().Add(time.Hour), time.Now().Add(time.Hour)) - if err != nil { - t.Fatal(err) - } - - if err := service.CancelRun(ctx, run.TaskID, run.ID); err != nil { - t.Fatal(err) - } - - canceled, err := service.FindRunByID(ctx, run.TaskID, run.ID) - if err != nil { - t.Fatal(err) - } - - if canceled.Status != taskmodel.RunCanceled.String() { - t.Fatalf("expected task run to be cancelled") - } -} - -func TestService_UpdateTask_RecordLatestSuccessAndFailure(t *testing.T) { - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - - c := clock.NewMock() - c.Set(time.Unix(1000, 0)) - - ts := newService(t, ctx, c) - - ctx = icontext.SetAuthorizer(ctx, &ts.Auth) - - originalTask, err := ts.Service.CreateTask(ctx, taskmodel.TaskCreate{ - Flux: `option task = {name: "a task",every: 1h} from(bucket:"test") |> range(start:-1h)`, - OrganizationID: ts.Org.ID, - OwnerID: ts.User.ID, - Status: string(taskmodel.TaskActive), - }) - if err != nil { - t.Fatal("CreateTask", err) - } - - c.Add(1 * time.Second) - exp := c.Now() - updatedTask, err := ts.Service.UpdateTask(ctx, originalTask.ID, taskmodel.TaskUpdate{ - LatestCompleted: &exp, - LatestScheduled: &exp, - - // These would be updated in a mutually exclusive manner, but we'll set - // them both to demonstrate that they do change. - LatestSuccess: &exp, - LatestFailure: &exp, - }) - if err != nil { - t.Fatal("UpdateTask", err) - } - - if got := updatedTask.LatestScheduled; !got.Equal(exp) { - t.Fatalf("unexpected -got/+exp\n%s", cmp.Diff(got.String(), exp.String())) - } - if got := updatedTask.LatestCompleted; !got.Equal(exp) { - t.Fatalf("unexpected -got/+exp\n%s", cmp.Diff(got.String(), exp.String())) - } - if got := updatedTask.LatestSuccess; !got.Equal(exp) { - t.Fatalf("unexpected -got/+exp\n%s", cmp.Diff(got.String(), exp.String())) - } - if got := updatedTask.LatestFailure; !got.Equal(exp) { - t.Fatalf("unexpected -got/+exp\n%s", cmp.Diff(got.String(), exp.String())) - } - - c.Add(5 * time.Second) - exp = c.Now() - updatedTask, err = ts.Service.UpdateTask(ctx, originalTask.ID, taskmodel.TaskUpdate{ - LatestCompleted: &exp, - LatestScheduled: &exp, - - // These would be updated in a mutually exclusive manner, but we'll set - // them both to demonstrate that they do change. - LatestSuccess: &exp, - LatestFailure: &exp, - }) - if err != nil { - t.Fatal("UpdateTask", err) - } - - if got := updatedTask.LatestScheduled; !got.Equal(exp) { - t.Fatalf("unexpected -got/+exp\n%s", cmp.Diff(got.String(), exp.String())) - } - if got := updatedTask.LatestCompleted; !got.Equal(exp) { - t.Fatalf("unexpected -got/+exp\n%s", cmp.Diff(got.String(), exp.String())) - } - if got := updatedTask.LatestSuccess; !got.Equal(exp) { - t.Fatalf("unexpected -got/+exp\n%s", cmp.Diff(got.String(), exp.String())) - } - if got := updatedTask.LatestFailure; !got.Equal(exp) { - t.Fatalf("unexpected -got/+exp\n%s", cmp.Diff(got.String(), exp.String())) - } -} - -type taskOptions struct { - name string - every string - cron string - offset string - concurrency int64 - retry int64 -} - -func TestExtractTaskOptions(t *testing.T) { - tcs := []struct { - name string - flux string - expected taskOptions - errMsg string - }{ - { - name: "all parameters", - flux: `option task = {name: "whatever", every: 1s, offset: 0s, concurrency: 2, retry: 2}`, - expected: taskOptions{ - name: "whatever", - every: "1s", - offset: "0s", - concurrency: 2, - retry: 2, - }, - }, - { - name: "some extra whitespace and bad content around it", - flux: `howdy() - option task = { name:"whatever", cron: "* * * * *" } - hello() - `, - expected: taskOptions{ - name: "whatever", - cron: "* * * * *", - concurrency: 1, - retry: 1, - }, - }, - { - name: "bad options", - flux: `option task = {name: "whatever", every: 1s, cron: "* * * * *"}`, - errMsg: "cannot use both cron and every in task options", - }, - { - name: "no options", - flux: `doesntexist()`, - errMsg: "no task options defined", - }, - { - name: "multiple assignments", - flux: ` - option task = {name: "whatever", every: 1s, offset: 0s, concurrency: 2, retry: 2} - option task = {name: "whatever", every: 1s, offset: 0s, concurrency: 2, retry: 2} - `, - errMsg: "multiple task options defined", - }, - { - name: "with script calling tableFind", - flux: ` - import "http" - import "json" - option task = {name: "Slack Metrics to #Community", cron: "0 9 * * 5"} - all_slack_messages = from(bucket: "metrics") - |> range(start: -7d, stop: now()) - |> filter(fn: (r) => - (r._measurement == "slack_channel_message")) - total_messages = all_slack_messages - |> group() - |> count() - |> tableFind(fn: (key) => true) - all_slack_messages |> yield() - `, - expected: taskOptions{ - name: "Slack Metrics to #Community", - cron: "0 9 * * 5", - concurrency: 1, - retry: 1, - }, - }, - } - - for _, tc := range tcs { - t.Run(tc.name, func(t *testing.T) { - - opts, err := options.FromScriptAST(fluxlang.DefaultService, tc.flux) - if tc.errMsg != "" { - require.Error(t, err) - assert.Equal(t, tc.errMsg, err.Error()) - return - } - - require.NoError(t, err) - - var offset options.Duration - if opts.Offset != nil { - offset = *opts.Offset - } - - var concur int64 - if opts.Concurrency != nil { - concur = *opts.Concurrency - } - - var retry int64 - if opts.Retry != nil { - retry = *opts.Retry - } - - assert.Equal(t, tc.expected.name, opts.Name) - assert.Equal(t, tc.expected.cron, opts.Cron) - assert.Equal(t, tc.expected.every, opts.Every.String()) - assert.Equal(t, tc.expected.offset, offset.String()) - assert.Equal(t, tc.expected.concurrency, concur) - assert.Equal(t, tc.expected.retry, retry) - }) - } -} diff --git a/kv/variable.go b/kv/variable.go deleted file mode 100644 index 206d8dc81ff..00000000000 --- a/kv/variable.go +++ /dev/null @@ -1,339 +0,0 @@ -package kv - -import ( - "context" - "encoding/json" - "strings" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - variableBucket = []byte("variablesv1") - variableIndexBucket = []byte("variablesindexv1") - // TODO: eradicate this with migration strategy - variableOrgsIndex = []byte("variableorgsv1") -) - -func decodeVariableOrgsIndexKey(indexKey []byte) (orgID platform.ID, variableID platform.ID, err error) { - if len(indexKey) != 2*platform.IDLength { - return 0, 0, &errors.Error{ - Code: errors.EInvalid, - Msg: "malformed variable orgs index key (please report this error)", - } - } - - if err := (&orgID).Decode(indexKey[:platform.IDLength]); err != nil { - return 0, 0, &errors.Error{ - Code: errors.EInvalid, - Msg: "bad org id", - Err: platform.ErrInvalidID, - } - } - - if err := (&variableID).Decode(indexKey[platform.IDLength:]); err != nil { - return 0, 0, &errors.Error{ - Code: errors.EInvalid, - Msg: "bad variable id", - Err: platform.ErrInvalidID, - } - } - - return orgID, variableID, nil -} - -func (s *Service) findOrganizationVariables(ctx context.Context, tx Tx, orgID platform.ID) ([]*influxdb.Variable, error) { - idx, err := tx.Bucket(variableOrgsIndex) - if err != nil { - return nil, err - } - - prefix, err := orgID.Encode() - if err != nil { - return nil, err - } - - cur, err := idx.ForwardCursor(prefix, WithCursorPrefix(prefix)) - if err != nil { - return nil, err - } - - variables := []*influxdb.Variable{} - for k, _ := cur.Next(); k != nil; k, _ = cur.Next() { - _, id, err := decodeVariableOrgsIndexKey(k) - if err != nil { - return nil, err - } - - m, err := s.findVariableByID(ctx, tx, id) - if err != nil { - return nil, err - } - - variables = append(variables, m) - } - - return variables, nil -} - -func newVariableStore() *IndexStore { - const resource = "variable" - - var decodeVarEntFn DecodeBucketValFn = func(key, val []byte) ([]byte, interface{}, error) { - var v influxdb.Variable - return key, &v, json.Unmarshal(val, &v) - } - - var decValToEntFn ConvertValToEntFn = func(_ []byte, i interface{}) (entity Entity, err error) { - v, ok := i.(*influxdb.Variable) - if err := IsErrUnexpectedDecodeVal(ok); err != nil { - return Entity{}, err - } - return Entity{ - PK: EncID(v.ID), - UniqueKey: Encode(EncID(v.OrganizationID), EncStringCaseInsensitive(v.Name)), - Body: v, - }, nil - } - - return &IndexStore{ - Resource: resource, - EntStore: NewStoreBase(resource, variableBucket, EncIDKey, EncBodyJSON, decodeVarEntFn, decValToEntFn), - IndexStore: NewOrgNameKeyStore(resource, variableIndexBucket, false), - } -} - -func (s *Service) findVariables(ctx context.Context, tx Tx, filter influxdb.VariableFilter, opt ...influxdb.FindOptions) ([]*influxdb.Variable, error) { - if filter.OrganizationID != nil { - return s.findOrganizationVariables(ctx, tx, *filter.OrganizationID) - } - - var o influxdb.FindOptions - if len(opt) > 0 { - o = opt[0] - } - - // TODO(jsteenb2): investigate why we don't implement the find options for vars? - variables := make([]*influxdb.Variable, 0) - err := s.variableStore.Find(ctx, tx, FindOpts{ - Descending: o.Descending, - Limit: o.Limit, - Offset: o.Offset, - FilterEntFn: filterVariablesFn(filter), - CaptureFn: func(key []byte, decodedVal interface{}) error { - variables = append(variables, decodedVal.(*influxdb.Variable)) - return nil - }, - }) - if err != nil { - return nil, err - } - return variables, nil -} - -func filterVariablesFn(filter influxdb.VariableFilter) func([]byte, interface{}) bool { - return func(key []byte, val interface{}) bool { - variable, ok := val.(*influxdb.Variable) - if !ok { - return false - } - - if filter.ID != nil { - return variable.ID == *filter.ID - } - - if filter.OrganizationID != nil { - return variable.OrganizationID == *filter.OrganizationID - } - - return true - } -} - -// FindVariables returns all variables in the store -func (s *Service) FindVariables(ctx context.Context, filter influxdb.VariableFilter, opt ...influxdb.FindOptions) ([]*influxdb.Variable, error) { - if filter.Organization != nil { - o, err := s.orgs.FindOrganization(ctx, influxdb.OrganizationFilter{ - Name: filter.Organization, - }) - if err != nil { - return nil, err - } - - filter.OrganizationID = &o.ID - } - - res := []*influxdb.Variable{} - err := s.kv.View(ctx, func(tx Tx) error { - variables, err := s.findVariables(ctx, tx, filter, opt...) - if err != nil && errors.ErrorCode(err) != errors.ENotFound { - return err - } - res = variables - return nil - }) - return res, err -} - -// FindVariableByID finds a single variable in the store by its ID -func (s *Service) FindVariableByID(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { - var variable *influxdb.Variable - err := s.kv.View(ctx, func(tx Tx) error { - m, err := s.findVariableByID(ctx, tx, id) - if err != nil { - return err - } - variable = m - return nil - }) - return variable, err -} - -func (s *Service) findVariableByID(ctx context.Context, tx Tx, id platform.ID) (*influxdb.Variable, error) { - body, err := s.variableStore.FindEnt(ctx, tx, Entity{PK: EncID(id)}) - if err != nil { - return nil, err - } - - variable, ok := body.(*influxdb.Variable) - return variable, IsErrUnexpectedDecodeVal(ok) -} - -// CreateVariable creates a new variable and assigns it an ID -func (s *Service) CreateVariable(ctx context.Context, v *influxdb.Variable) error { - return s.kv.Update(ctx, func(tx Tx) error { - if err := v.Valid(); err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - v.ID = s.IDGenerator.ID() - now := s.Now() - v.CreatedAt = now - v.UpdatedAt = now - return s.putVariable(ctx, tx, v, PutNew()) - }) -} - -// ReplaceVariable replaces a variable that exists in the store or creates it if it does not -func (s *Service) ReplaceVariable(ctx context.Context, v *influxdb.Variable) error { - return s.kv.Update(ctx, func(tx Tx) error { - if found, _ := s.findVariableByID(ctx, tx, v.ID); found != nil { - return s.putVariable(ctx, tx, v, PutUpdate()) - } - return s.putVariable(ctx, tx, v, PutNew()) - }) -} - -func (s *Service) putVariable(ctx context.Context, tx Tx, v *influxdb.Variable, putOpts ...PutOptionFn) error { - if err := s.putVariableOrgsIndex(tx, v); err != nil { - return err - } - - ent := Entity{ - PK: EncID(v.ID), - UniqueKey: Encode(EncID(v.OrganizationID), EncStringCaseInsensitive(v.Name)), - Body: v, - } - return s.variableStore.Put(ctx, tx, ent, putOpts...) -} - -// UpdateVariable updates a single variable in the store with a changeset -func (s *Service) UpdateVariable(ctx context.Context, id platform.ID, update *influxdb.VariableUpdate) (*influxdb.Variable, error) { - var v *influxdb.Variable - err := s.kv.Update(ctx, func(tx Tx) error { - m, err := s.findVariableByID(ctx, tx, id) - if err != nil { - return err - } - m.UpdatedAt = s.Now() - v = m - - // TODO: should be moved to service layer - update.Name = strings.TrimSpace(update.Name) - update.Apply(m) - - return s.putVariable(ctx, tx, v, PutUpdate()) - }) - - return v, err -} - -// DeleteVariable removes a single variable from the store by its ID -func (s *Service) DeleteVariable(ctx context.Context, id platform.ID) error { - return s.kv.Update(ctx, func(tx Tx) error { - v, err := s.findVariableByID(ctx, tx, id) - if err != nil { - return err - } - - if err := s.removeVariableOrgsIndex(tx, v); err != nil { - return err - } - return s.variableStore.DeleteEnt(ctx, tx, Entity{PK: EncID(id)}) - }) -} - -func encodeVariableOrgsIndex(variable *influxdb.Variable) ([]byte, error) { - oID, err := variable.OrganizationID.Encode() - if err != nil { - return nil, &errors.Error{ - Err: err, - Msg: "bad organization id", - } - } - - mID, err := variable.ID.Encode() - if err != nil { - return nil, &errors.Error{ - Err: err, - Msg: "bad variable id", - } - } - - key := make([]byte, 0, platform.IDLength*2) - key = append(key, oID...) - key = append(key, mID...) - - return key, nil -} - -func (s *Service) putVariableOrgsIndex(tx Tx, variable *influxdb.Variable) error { - key, err := encodeVariableOrgsIndex(variable) - if err != nil { - return err - } - - idx, err := tx.Bucket(variableOrgsIndex) - if err != nil { - return &errors.Error{Code: errors.EInternal, Err: err} - } - - if err := idx.Put(key, nil); err != nil { - return &errors.Error{Code: errors.EInternal, Err: err} - } - - return nil -} - -func (s *Service) removeVariableOrgsIndex(tx Tx, variable *influxdb.Variable) error { - key, err := encodeVariableOrgsIndex(variable) - if err != nil { - return err - } - - idx, err := tx.Bucket(variableOrgsIndex) - if err != nil { - return &errors.Error{Code: errors.EInternal, Err: err} - } - - if err := idx.Delete(key); err != nil { - return &errors.Error{Code: errors.EInternal, Err: err} - } - - return nil -} diff --git a/kv/variable_test.go b/kv/variable_test.go deleted file mode 100644 index e9bdaac9dce..00000000000 --- a/kv/variable_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package kv_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func TestBoltVariableService(t *testing.T) { - influxdbtesting.VariableService(initBoltVariableService, t) -} - -func initBoltVariableService(f influxdbtesting.VariableFields, t *testing.T) (influxdb.VariableService, string, func()) { - s, closeBolt := influxdbtesting.NewTestBoltStore(t) - svc, op, closeSvc := initVariableService(s, f, t) - return svc, op, func() { - closeSvc() - closeBolt() - } -} - -func initVariableService(s kv.SchemaStore, f influxdbtesting.VariableFields, t *testing.T) (influxdb.VariableService, string, func()) { - ctx := context.Background() - svc := kv.NewService(zaptest.NewLogger(t), s, &mock.OrganizationService{}) - svc.IDGenerator = f.IDGenerator - svc.TimeGenerator = f.TimeGenerator - if svc.TimeGenerator == nil { - svc.TimeGenerator = influxdb.RealTimeGenerator{} - } - - for _, variable := range f.Variables { - if err := svc.ReplaceVariable(ctx, variable); err != nil { - t.Fatalf("failed to populate test variables: %v", err) - } - } - - done := func() { - for _, variable := range f.Variables { - if err := svc.DeleteVariable(ctx, variable.ID); err != nil { - t.Logf("failed to clean up variables bolt test: %v", err) - } - } - } - - return svc, kv.OpPrefix, done -} diff --git a/label.go b/label.go deleted file mode 100644 index 1447d3f10b1..00000000000 --- a/label.go +++ /dev/null @@ -1,143 +0,0 @@ -package influxdb - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// ErrLabelNotFound is the error for a missing Label. -const ErrLabelNotFound = "label not found" - -const ( - OpFindLabels = "FindLabels" - OpFindLabelByID = "FindLabelByID" - OpFindLabelMapping = "FindLabelMapping" - OpCreateLabel = "CreateLabel" - OpCreateLabelMapping = "CreateLabelMapping" - OpUpdateLabel = "UpdateLabel" - OpDeleteLabel = "DeleteLabel" - OpDeleteLabelMapping = "DeleteLabelMapping" -) - -// errors on label -var ( - // ErrLabelNameisEmpty is error when org name is empty - ErrLabelNameisEmpty = &errors.Error{ - Code: errors.EInvalid, - Msg: "label name is empty", - } - - // ErrLabelExistsOnResource is used when attempting to add a label to a resource - // when that label already exists on the resource - ErrLabelExistsOnResource = &errors.Error{ - Code: errors.EConflict, - Msg: "Cannot add label, label already exists on resource", - } -) - -// LabelService represents a service for managing resource labels -type LabelService interface { - // FindLabelByID a single label by ID. - FindLabelByID(ctx context.Context, id platform.ID) (*Label, error) - - // FindLabels returns a list of labels that match a filter - FindLabels(ctx context.Context, filter LabelFilter, opt ...FindOptions) ([]*Label, error) - - // FindResourceLabels returns a list of labels that belong to a resource - FindResourceLabels(ctx context.Context, filter LabelMappingFilter) ([]*Label, error) - - // CreateLabel creates a new label - CreateLabel(ctx context.Context, l *Label) error - - // CreateLabelMapping maps a resource to an existing label - CreateLabelMapping(ctx context.Context, m *LabelMapping) error - - // UpdateLabel updates a label with a changeset. - UpdateLabel(ctx context.Context, id platform.ID, upd LabelUpdate) (*Label, error) - - // DeleteLabel deletes a label - DeleteLabel(ctx context.Context, id platform.ID) error - - // DeleteLabelMapping deletes a label mapping - DeleteLabelMapping(ctx context.Context, m *LabelMapping) error -} - -// Label is a tag set on a resource, typically used for filtering on a UI. -type Label struct { - ID platform.ID `json:"id,omitempty"` - OrgID platform.ID `json:"orgID,omitempty"` - Name string `json:"name"` - Properties map[string]string `json:"properties,omitempty"` -} - -// Validate returns an error if the label is invalid. -func (l *Label) Validate() error { - if l.Name == "" { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "label name is required", - } - } - - if !l.OrgID.Valid() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "orgID is required", - } - } - - return nil -} - -// LabelMapping is used to map resource to its labels. -// It should not be shared directly over the HTTP API. -type LabelMapping struct { - LabelID platform.ID `json:"labelID"` - ResourceID platform.ID `json:"resourceID,omitempty"` - ResourceType `json:"resourceType"` -} - -// Validate returns an error if the mapping is invalid. -func (l *LabelMapping) Validate() error { - if !l.LabelID.Valid() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "label id is required", - } - } - if !l.ResourceID.Valid() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "resource id is required", - } - } - if err := l.ResourceType.Valid(); err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - return nil -} - -// LabelUpdate represents a changeset for a label. -// Only the properties specified are updated. -type LabelUpdate struct { - Name string `json:"name,omitempty"` - Properties map[string]string `json:"properties,omitempty"` -} - -// LabelFilter represents a set of filters that restrict the returned results. -type LabelFilter struct { - Name string - OrgID *platform.ID -} - -// LabelMappingFilter represents a set of filters that restrict the returned results. -type LabelMappingFilter struct { - ResourceID platform.ID - ResourceType -} diff --git a/label/error.go b/label/error.go deleted file mode 100644 index b71999e59d3..00000000000 --- a/label/error.go +++ /dev/null @@ -1,34 +0,0 @@ -package label - -import ( - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - // NotUniqueIDError occurs when attempting to create a Label with an ID that already belongs to another one - NotUniqueIDError = &errors.Error{ - Code: errors.EConflict, - Msg: "ID already exists", - } - - // ErrFailureGeneratingID occurs ony when the random number generator - // cannot generate an ID in MaxIDGenerationN times. - ErrFailureGeneratingID = &errors.Error{ - Code: errors.EInternal, - Msg: "unable to generate valid id", - } - - // ErrLabelNotFound occurs when a label cannot be found by its ID - ErrLabelNotFound = &errors.Error{ - Code: errors.ENotFound, - Msg: "label not found", - } -) - -// ErrInternalServiceError is used when the error comes from an internal system. -func ErrInternalServiceError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Err: err, - } -} diff --git a/label/http_client.go b/label/http_client.go deleted file mode 100644 index e8a22d50ca9..00000000000 --- a/label/http_client.go +++ /dev/null @@ -1,140 +0,0 @@ -package label - -import ( - "context" - "path" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/pkg/httpc" -) - -var _ influxdb.LabelService = (*LabelClientService)(nil) - -type LabelClientService struct { - Client *httpc.Client -} - -func labelIDPath(id platform.ID) string { - return path.Join(prefixLabels, id.String()) -} - -func resourceIDPath(resourceType influxdb.ResourceType, resourceID platform.ID, p string) string { - return path.Join("/api/v2/", string(resourceType), resourceID.String(), p) -} - -func resourceIDMappingPath(resourceType influxdb.ResourceType, resourceID platform.ID, p string, labelID platform.ID) string { - return path.Join("/api/v2/", string(resourceType), resourceID.String(), p, labelID.String()) -} - -// CreateLabel creates a new label. -func (s *LabelClientService) CreateLabel(ctx context.Context, l *influxdb.Label) error { - var lr labelResponse - err := s.Client. - PostJSON(l, prefixLabels). - DecodeJSON(&lr). - Do(ctx) - if err != nil { - return err - } - - *l = lr.Label - return nil -} - -// FindLabelByID returns a single label by ID. -func (s *LabelClientService) FindLabelByID(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - var lr labelResponse - err := s.Client. - Get(labelIDPath(id)). - DecodeJSON(&lr). - Do(ctx) - if err != nil { - return nil, err - } - return &lr.Label, nil -} - -// FindLabels is a client for the find labels response from the server. -func (s *LabelClientService) FindLabels(ctx context.Context, filter influxdb.LabelFilter, opt ...influxdb.FindOptions) ([]*influxdb.Label, error) { - params := influxdb.FindOptionParams(opt...) - if filter.OrgID != nil { - params = append(params, [2]string{"orgID", filter.OrgID.String()}) - } - if filter.Name != "" { - params = append(params, [2]string{"name", filter.Name}) - } - - var lr labelsResponse - err := s.Client. - Get(prefixLabels). - QueryParams(params...). - DecodeJSON(&lr). - Do(ctx) - if err != nil { - return nil, err - } - return lr.Labels, nil -} - -// FindResourceLabels returns a list of labels, derived from a label mapping filter. -func (s *LabelClientService) FindResourceLabels(ctx context.Context, filter influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - if err := filter.Valid(); err != nil { - return nil, err - } - - var r labelsResponse - err := s.Client. - Get(resourceIDPath(filter.ResourceType, filter.ResourceID, "labels")). - DecodeJSON(&r). - Do(ctx) - if err != nil { - return nil, err - } - return r.Labels, nil -} - -// UpdateLabel updates a label and returns the updated label. -func (s *LabelClientService) UpdateLabel(ctx context.Context, id platform.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) { - var lr labelResponse - err := s.Client. - PatchJSON(upd, labelIDPath(id)). - DecodeJSON(&lr). - Do(ctx) - if err != nil { - return nil, err - } - return &lr.Label, nil -} - -// DeleteLabel removes a label by ID. -func (s *LabelClientService) DeleteLabel(ctx context.Context, id platform.ID) error { - return s.Client. - Delete(labelIDPath(id)). - Do(ctx) -} - -// ******* Label Mappings ******* // - -// CreateLabelMapping will create a label mapping -func (s *LabelClientService) CreateLabelMapping(ctx context.Context, m *influxdb.LabelMapping) error { - if err := m.Validate(); err != nil { - return err - } - - urlPath := resourceIDPath(m.ResourceType, m.ResourceID, "labels") - return s.Client. - PostJSON(m, urlPath). - DecodeJSON(m). - Do(ctx) -} - -func (s *LabelClientService) DeleteLabelMapping(ctx context.Context, m *influxdb.LabelMapping) error { - if err := m.Validate(); err != nil { - return err - } - - return s.Client. - Delete(resourceIDMappingPath(m.ResourceType, m.ResourceID, "labels", m.LabelID)). - Do(ctx) -} diff --git a/label/http_handler.go b/label/http_handler.go deleted file mode 100644 index 50bc7b0c8f5..00000000000 --- a/label/http_handler.go +++ /dev/null @@ -1,158 +0,0 @@ -package label - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "go.uber.org/zap" -) - -type LabelEmbeddedHandler struct { - chi.Router - api *kithttp.API - log *zap.Logger - labelSvc influxdb.LabelService - rt influxdb.ResourceType -} - -// NewHTTPEmbeddedHandler create a label handler for embedding in other service apis -func NewHTTPEmbeddedHandler(log *zap.Logger, rt influxdb.ResourceType, ls influxdb.LabelService) *LabelEmbeddedHandler { - h := &LabelEmbeddedHandler{ - api: kithttp.NewAPI(kithttp.WithLog(log)), - log: log, - labelSvc: ls, - rt: rt, - } - - r := chi.NewRouter() - r.NotFound(func(w http.ResponseWriter, r *http.Request) { - h.api.Err(w, r, &errors.Error{ - Code: errors.ENotFound, - Msg: "path not found", - }) - }) - r.MethodNotAllowed(func(w http.ResponseWriter, r *http.Request) { - h.api.Err(w, r, &errors.Error{ - Code: errors.EMethodNotAllowed, - Msg: fmt.Sprintf("allow: %s", w.Header().Get("Allow")), - }) - - }) - r.Use( - kithttp.SkipOptions, - middleware.StripSlashes, - kithttp.SetCORS, - middleware.Recoverer, - middleware.RequestID, - middleware.RealIP, - ) - - r.Route("/", func(r chi.Router) { - r.Post("/", h.handlePostLabelMapping) - r.Get("/", h.handleFindResourceLabels) - r.Delete("/{labelID}", h.handleDeleteLabelMapping) - }) - - h.Router = r - return h -} - -// handlePostLabelMapping create a new label mapping for the host service api -func (h *LabelEmbeddedHandler) handlePostLabelMapping(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - embeddedID, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, err) - return - } - - mapping := &influxdb.LabelMapping{} - if err := json.NewDecoder(r.Body).Decode(mapping); err != nil { - h.api.Err(w, r, &errors.Error{ - Code: errors.EInvalid, - Msg: "Invalid post label map request", - }) - } - - mapping.ResourceID = *embeddedID - mapping.ResourceType = h.rt - - if err := mapping.Validate(); err != nil { - h.api.Err(w, r, err) - return - } - - label, err := h.labelSvc.FindLabelByID(ctx, mapping.LabelID) - if err != nil { - h.api.Err(w, r, err) - return - } - - if err := h.labelSvc.CreateLabelMapping(ctx, mapping); err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusCreated, newLabelResponse(label)) -} - -// handleFindResourceLabels list labels that reference the host api -func (h *LabelEmbeddedHandler) handleFindResourceLabels(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - embeddedID, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, err) - return - } - filter := influxdb.LabelMappingFilter{ - ResourceID: *embeddedID, - ResourceType: h.rt, - } - - labels, err := h.labelSvc.FindResourceLabels(ctx, filter) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, newLabelsResponse(labels)) -} - -// handleDeleteLabelMapping delete a mapping for this host and label combination -func (h *LabelEmbeddedHandler) handleDeleteLabelMapping(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - embeddedID, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, err) - return - } - - labelID, err := platform.IDFromString(chi.URLParam(r, "labelID")) - if err != nil { - h.api.Err(w, r, err) - return - } - - mapping := &influxdb.LabelMapping{ - LabelID: *labelID, - ResourceID: *embeddedID, - ResourceType: h.rt, - } - - if err := h.labelSvc.DeleteLabelMapping(ctx, mapping); err != nil { - h.api.Err(w, r, err) - return - } - - w.WriteHeader(http.StatusNoContent) -} diff --git a/label/http_server.go b/label/http_server.go deleted file mode 100644 index 0e05690fa7b..00000000000 --- a/label/http_server.go +++ /dev/null @@ -1,193 +0,0 @@ -package label - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "go.uber.org/zap" -) - -type LabelHandler struct { - chi.Router - api *kithttp.API - log *zap.Logger - labelSvc influxdb.LabelService -} - -const ( - prefixLabels = "/api/v2/labels" -) - -func (h *LabelHandler) Prefix() string { - return prefixLabels -} - -func NewHTTPLabelHandler(log *zap.Logger, ls influxdb.LabelService) *LabelHandler { - h := &LabelHandler{ - api: kithttp.NewAPI(kithttp.WithLog(log)), - log: log, - labelSvc: ls, - } - - r := chi.NewRouter() - r.Use( - middleware.Recoverer, - middleware.RequestID, - middleware.RealIP, - ) - - r.Route("/", func(r chi.Router) { - r.Post("/", h.handlePostLabel) - r.Get("/", h.handleGetLabels) - - r.Route("/{id}", func(r chi.Router) { - r.Get("/", h.handleGetLabel) - r.Patch("/", h.handlePatchLabel) - r.Delete("/", h.handleDeleteLabel) - }) - }) - - h.Router = r - return h -} - -type labelResponse struct { - Links map[string]string `json:"links"` - Label influxdb.Label `json:"label"` -} - -func newLabelResponse(l *influxdb.Label) *labelResponse { - return &labelResponse{ - Links: map[string]string{ - "self": fmt.Sprintf("/api/v2/labels/%s", l.ID), - }, - Label: *l, - } -} - -type labelsResponse struct { - Links map[string]string `json:"links"` - Labels []*influxdb.Label `json:"labels"` -} - -func newLabelsResponse(ls []*influxdb.Label) *labelsResponse { - return &labelsResponse{ - Links: map[string]string{ - "self": "/api/v2/labels", - }, - Labels: ls, - } -} - -// handlePostLabel is the HTTP handler for the POST /api/v2/labels route. -func (h *LabelHandler) handlePostLabel(w http.ResponseWriter, r *http.Request) { - var label influxdb.Label - if err := h.api.DecodeJSON(r.Body, &label); err != nil { - h.api.Err(w, r, err) - return - } - - if err := label.Validate(); err != nil { - h.api.Err(w, r, err) - return - } - - if err := h.labelSvc.CreateLabel(r.Context(), &label); err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Label created", zap.String("label", fmt.Sprint(label))) - - h.api.Respond(w, r, http.StatusCreated, newLabelResponse(&label)) -} - -// handleGetLabel is the HTTP handler for the GET /api/v2/labels/id route. -func (h *LabelHandler) handleGetLabel(w http.ResponseWriter, r *http.Request) { - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, err) - return - } - - l, err := h.labelSvc.FindLabelByID(r.Context(), *id) - if err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Label retrieved", zap.String("label", fmt.Sprint(l))) - - h.api.Respond(w, r, http.StatusOK, newLabelResponse(l)) -} - -// handleGetLabels is the HTTP handler for the GET /api/v2/labels route. -func (h *LabelHandler) handleGetLabels(w http.ResponseWriter, r *http.Request) { - var filter influxdb.LabelFilter - qp := r.URL.Query() - - if name := qp.Get("name"); name != "" { - filter.Name = name - } - - if orgID := qp.Get("orgID"); orgID != "" { - i, err := platform.IDFromString(orgID) - if err == nil { - filter.OrgID = i - } - } - - labels, err := h.labelSvc.FindLabels(r.Context(), filter) - if err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Labels retrieved", zap.String("labels", fmt.Sprint(labels))) - - h.api.Respond(w, r, http.StatusOK, newLabelsResponse(labels)) -} - -// handlePatchLabel is the HTTP handler for the PATCH /api/v2/labels route. -func (h *LabelHandler) handlePatchLabel(w http.ResponseWriter, r *http.Request) { - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, err) - return - } - - upd := &influxdb.LabelUpdate{} - if err := json.NewDecoder(r.Body).Decode(upd); err != nil { - h.api.Err(w, r, err) - return - } - - l, err := h.labelSvc.UpdateLabel(r.Context(), *id, *upd) - if err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Label updated", zap.String("label", fmt.Sprint(l))) - - h.api.Respond(w, r, http.StatusOK, newLabelResponse(l)) -} - -// handleDeleteLabel is the HTTP handler for the DELETE /api/v2/labels/:id route. -func (h *LabelHandler) handleDeleteLabel(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, err) - return - } - if err := h.labelSvc.DeleteLabel(ctx, *id); err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Label deleted", zap.String("labelID", fmt.Sprint(id))) - - h.api.Respond(w, r, http.StatusNoContent, nil) -} diff --git a/label/http_server_test.go b/label/http_server_test.go deleted file mode 100644 index a1c39a9c6b9..00000000000 --- a/label/http_server_test.go +++ /dev/null @@ -1,623 +0,0 @@ -package label - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/go-chi/chi" - "github.com/google/go-cmp/cmp" - influxdb "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "github.com/yudai/gojsondiff" - "github.com/yudai/gojsondiff/formatter" - "go.uber.org/zap/zaptest" -) - -func TestService_handlePostLabel(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - label *influxdb.Label - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "create a new label", - fields: fields{ - &mock.LabelService{ - CreateLabelFn: func(ctx context.Context, l *influxdb.Label) error { - l.ID = influxdbtesting.MustIDBase16("020f755c3c082000") - return nil - }, - }, - }, - args: args{ - label: &influxdb.Label{ - Name: "mylabel", - OrgID: influxdbtesting.MustIDBase16("020f755c3c082008"), - }, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/labels/020f755c3c082000" - }, - "label": { - "id": "020f755c3c082000", - "name": "mylabel", - "orgID": "020f755c3c082008" - } -} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - handler := NewHTTPLabelHandler(zaptest.NewLogger(t), tt.fields.LabelService) - router := chi.NewRouter() - router.Mount(handler.Prefix(), handler) - - l, err := json.Marshal(tt.args.label) - if err != nil { - t.Fatalf("failed to marshal label: %v", err) - } - - r := httptest.NewRequest("GET", "http://any.url", bytes.NewReader(l)) - w := httptest.NewRecorder() - - handler.handlePostLabel(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePostLabel() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePostLabel() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil || tt.wants.body != "" && !eq { - t.Errorf("%q. handlePostLabel() = ***%v***", tt.name, diff) - } - }) - } -} - -func TestService_handleGetLabel(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - id string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get a label by id", - fields: fields{ - &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - if id == influxdbtesting.MustIDBase16("020f755c3c082000") { - return &influxdb.Label{ - ID: influxdbtesting.MustIDBase16("020f755c3c082000"), - Name: "mylabel", - Properties: map[string]string{ - "color": "fff000", - }, - }, nil - } - - return nil, fmt.Errorf("not found") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/labels/020f755c3c082000" - }, - "label": { - "id": "020f755c3c082000", - "name": "mylabel", - "properties": { - "color": "fff000" - } - } -} -`, - }, - }, - { - name: "not found", - fields: fields{ - &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrLabelNotFound, - } - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - handler := NewHTTPLabelHandler(zaptest.NewLogger(t), tt.fields.LabelService) - router := chi.NewRouter() - router.Mount(handler.Prefix(), handler) - - r := httptest.NewRequest("GET", "http://any.url", nil) - rctx := chi.NewRouteContext() - rctx.URLParams.Add("id", tt.args.id) - r = r.WithContext(context.WithValue(r.Context(), chi.RouteCtxKey, rctx)) - - w := httptest.NewRecorder() - - handler.handleGetLabel(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetLabel() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetLabel() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleGetLabel(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handleGetLabel() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestService_handleGetLabels(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - wants wants - }{ - { - name: "get all labels", - fields: fields{ - &mock.LabelService{ - FindLabelsFn: func(ctx context.Context, filter influxdb.LabelFilter) ([]*influxdb.Label, error) { - return []*influxdb.Label{ - { - ID: influxdbtesting.MustIDBase16("0b501e7e557ab1ed"), - Name: "hello", - Properties: map[string]string{ - "color": "fff000", - }, - }, - { - ID: influxdbtesting.MustIDBase16("c0175f0077a77005"), - Name: "example", - Properties: map[string]string{ - "color": "fff000", - }, - }, - }, nil - }, - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/labels" - }, - "labels": [ - { - "id": "0b501e7e557ab1ed", - "name": "hello", - "properties": { - "color": "fff000" - } - }, - { - "id": "c0175f0077a77005", - "name": "example", - "properties": { - "color": "fff000" - } - } - ] -} -`, - }, - }, - { - name: "get all labels when there are none", - fields: fields{ - &mock.LabelService{ - FindLabelsFn: func(ctx context.Context, filter influxdb.LabelFilter) ([]*influxdb.Label, error) { - return []*influxdb.Label{}, nil - }, - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/labels" - }, - "labels": [] -}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - handler := NewHTTPLabelHandler(zaptest.NewLogger(t), tt.fields.LabelService) - router := chi.NewRouter() - router.Mount(handler.Prefix(), handler) - - r := httptest.NewRequest("GET", "http://any.url", nil) - - w := httptest.NewRecorder() - - handler.handleGetLabels(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetLabels() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetLabels() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil || tt.wants.body != "" && !eq { - t.Errorf("%q. handleGetLabels() = ***%v***", tt.name, diff) - } - }) - } -} - -func TestService_handlePatchLabel(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - id string - properties map[string]string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "update label properties", - fields: fields{ - &mock.LabelService{ - UpdateLabelFn: func(ctx context.Context, id platform.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) { - if id == influxdbtesting.MustIDBase16("020f755c3c082000") { - l := &influxdb.Label{ - ID: influxdbtesting.MustIDBase16("020f755c3c082000"), - Name: "mylabel", - Properties: map[string]string{ - "color": "fff000", - }, - } - - for k, v := range upd.Properties { - if v == "" { - delete(l.Properties, k) - } else { - l.Properties[k] = v - } - } - - return l, nil - } - - return nil, fmt.Errorf("not found") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - properties: map[string]string{ - "color": "aaabbb", - }, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/labels/020f755c3c082000" - }, - "label": { - "id": "020f755c3c082000", - "name": "mylabel", - "properties": { - "color": "aaabbb" - } - } -} -`, - }, - }, - { - name: "label not found", - fields: fields{ - &mock.LabelService{ - UpdateLabelFn: func(ctx context.Context, id platform.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrLabelNotFound, - } - }, - }, - }, - args: args{ - id: "020f755c3c082000", - properties: map[string]string{ - "color": "aaabbb", - }, - }, - wants: wants{ - statusCode: http.StatusNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - handler := NewHTTPLabelHandler(zaptest.NewLogger(t), tt.fields.LabelService) - router := chi.NewRouter() - router.Mount(handler.Prefix(), handler) - - w := httptest.NewRecorder() - - upd := influxdb.LabelUpdate{} - if len(tt.args.properties) > 0 { - upd.Properties = tt.args.properties - } - - l, err := json.Marshal(upd) - if err != nil { - t.Fatalf("failed to marshal label update: %v", err) - } - - r := httptest.NewRequest("GET", "http://any.url", bytes.NewReader(l)) - rctx := chi.NewRouteContext() - rctx.URLParams.Add("id", tt.args.id) - r = r.WithContext(context.WithValue(r.Context(), chi.RouteCtxKey, rctx)) - - handler.handlePatchLabel(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handlePatchLabel() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePatchLabel() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handlePatchLabel(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handlePatchLabel() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func TestService_handleDeleteLabel(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - id string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "remove a label by id", - fields: fields{ - &mock.LabelService{ - DeleteLabelFn: func(ctx context.Context, id platform.ID) error { - if id == influxdbtesting.MustIDBase16("020f755c3c082000") { - return nil - } - - return fmt.Errorf("wrong id") - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNoContent, - }, - }, - { - name: "label not found", - fields: fields{ - &mock.LabelService{ - DeleteLabelFn: func(ctx context.Context, id platform.ID) error { - return &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrLabelNotFound, - } - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - handler := NewHTTPLabelHandler(zaptest.NewLogger(t), tt.fields.LabelService) - router := chi.NewRouter() - router.Mount(handler.Prefix(), handler) - - w := httptest.NewRecorder() - - r := httptest.NewRequest("GET", "http://any.url", nil) - rctx := chi.NewRouteContext() - rctx.URLParams.Add("id", tt.args.id) - r = r.WithContext(context.WithValue(r.Context(), chi.RouteCtxKey, rctx)) - - handler.handleDeleteLabel(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleDeleteLabel() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleDeleteLabel() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if eq, diff, err := jsonEqual(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleDeleteLabel(). error unmarshalling json %v", tt.name, err) - } else if !eq { - t.Errorf("%q. handleDeleteLabel() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func jsonEqual(s1, s2 string) (eq bool, diff string, err error) { - if s1 == s2 { - return true, "", nil - } - - if s1 == "" { - return false, s2, fmt.Errorf("s1 is empty") - } - - if s2 == "" { - return false, s1, fmt.Errorf("s2 is empty") - } - - var o1 interface{} - if err = json.Unmarshal([]byte(s1), &o1); err != nil { - return - } - - var o2 interface{} - if err = json.Unmarshal([]byte(s2), &o2); err != nil { - return - } - - differ := gojsondiff.New() - d, err := differ.Compare([]byte(s1), []byte(s2)) - if err != nil { - return - } - - config := formatter.AsciiFormatterConfig{} - - formatter := formatter.NewAsciiFormatter(o1, config) - diff, err = formatter.Format(d) - - return cmp.Equal(o1, o2), diff, err -} diff --git a/label/middleware_auth.go b/label/middleware_auth.go deleted file mode 100644 index 9b202b36ec6..00000000000 --- a/label/middleware_auth.go +++ /dev/null @@ -1,135 +0,0 @@ -package label - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.LabelService = (*AuthedLabelService)(nil) - -type AuthedLabelService struct { - s influxdb.LabelService - orgIDResolver authorizer.OrgIDResolver -} - -// NewAuthedLabelService constructs an instance of an authorizing label serivce. -func NewAuthedLabelService(s influxdb.LabelService, orgIDResolver authorizer.OrgIDResolver) *AuthedLabelService { - return &AuthedLabelService{ - s: s, - orgIDResolver: orgIDResolver, - } -} -func (s *AuthedLabelService) CreateLabel(ctx context.Context, l *influxdb.Label) error { - if _, _, err := authorizer.AuthorizeCreate(ctx, influxdb.LabelsResourceType, l.OrgID); err != nil { - return err - } - return s.s.CreateLabel(ctx, l) -} - -func (s *AuthedLabelService) FindLabels(ctx context.Context, filter influxdb.LabelFilter, opt ...influxdb.FindOptions) ([]*influxdb.Label, error) { - // TODO: we'll likely want to push this operation into the database eventually since fetching the whole list of data - // will likely be expensive. - ls, err := s.s.FindLabels(ctx, filter, opt...) - if err != nil { - return nil, err - } - ls, _, err = authorizer.AuthorizeFindLabels(ctx, ls) - return ls, err -} - -// FindLabelByID checks to see if the authorizer on context has read access to the label id provided. -func (s *AuthedLabelService) FindLabelByID(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - l, err := s.s.FindLabelByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := authorizer.AuthorizeRead(ctx, influxdb.LabelsResourceType, id, l.OrgID); err != nil { - return nil, err - } - return l, nil -} - -// FindResourceLabels retrieves all labels belonging to the filtering resource if the authorizer on context has read access to it. -// Then it filters the list down to only the labels that are authorized. -func (s *AuthedLabelService) FindResourceLabels(ctx context.Context, filter influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - if err := filter.ResourceType.Valid(); err != nil { - return nil, err - } - - orgID, err := s.orgIDResolver.FindResourceOrganizationID(ctx, filter.ResourceType, filter.ResourceID) - if err != nil { - return nil, err - } - - if _, _, err := authorizer.AuthorizeRead(ctx, filter.ResourceType, filter.ResourceID, orgID); err != nil { - return nil, err - } - - // first fetch all labels for this resource - ls, err := s.s.FindResourceLabels(ctx, filter) - if err != nil { - return nil, err - } - - // then filter the labels we got to return only the ones the user is authorized to read - ls, _, err = authorizer.AuthorizeFindLabels(ctx, ls) - return ls, err -} - -// UpdateLabel checks to see if the authorizer on context has write access to the label provided. -func (s *AuthedLabelService) UpdateLabel(ctx context.Context, id platform.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) { - l, err := s.s.FindLabelByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := authorizer.AuthorizeWrite(ctx, influxdb.LabelsResourceType, l.ID, l.OrgID); err != nil { - return nil, err - } - return s.s.UpdateLabel(ctx, id, upd) -} - -// DeleteLabel checks to see if the authorizer on context has write access to the label provided. -func (s *AuthedLabelService) DeleteLabel(ctx context.Context, id platform.ID) error { - l, err := s.s.FindLabelByID(ctx, id) - if err != nil { - return err - } - if _, _, err := authorizer.AuthorizeWrite(ctx, influxdb.LabelsResourceType, l.ID, l.OrgID); err != nil { - return err - } - return s.s.DeleteLabel(ctx, id) -} - -// CreateLabelMapping checks to see if the authorizer on context has write access to the label and the resource contained by the label mapping in creation. -func (s *AuthedLabelService) CreateLabelMapping(ctx context.Context, m *influxdb.LabelMapping) error { - l, err := s.s.FindLabelByID(ctx, m.LabelID) - if err != nil { - return err - } - - if _, _, err := authorizer.AuthorizeWrite(ctx, influxdb.LabelsResourceType, m.LabelID, l.OrgID); err != nil { - return err - } - if _, _, err := authorizer.AuthorizeWrite(ctx, m.ResourceType, m.ResourceID, l.OrgID); err != nil { - return err - } - return s.s.CreateLabelMapping(ctx, m) -} - -// DeleteLabelMapping checks to see if the authorizer on context has write access to the label and the resource of the label mapping to delete. -func (s *AuthedLabelService) DeleteLabelMapping(ctx context.Context, m *influxdb.LabelMapping) error { - l, err := s.s.FindLabelByID(ctx, m.LabelID) - if err != nil { - return err - } - if _, _, err := authorizer.AuthorizeWrite(ctx, influxdb.LabelsResourceType, m.LabelID, l.OrgID); err != nil { - return err - } - if _, _, err := authorizer.AuthorizeWrite(ctx, m.ResourceType, m.ResourceID, l.OrgID); err != nil { - return err - } - return s.s.DeleteLabelMapping(ctx, m) -} diff --git a/label/middleware_auth_test.go b/label/middleware_auth_test.go deleted file mode 100644 index 64adc9ac2fd..00000000000 --- a/label/middleware_auth_test.go +++ /dev/null @@ -1,1129 +0,0 @@ -package label - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -const ( - orgOneID = "020f755c3c083000" -) - -var ( - orgOneInfluxID = influxdbtesting.MustIDBase16(orgOneID) - orgSvc = &mock.OrganizationService{ - FindResourceOrganizationIDF: func(_ context.Context, _ influxdb.ResourceType, _ platform.ID) (platform.ID, error) { - return orgOneInfluxID, nil - }, - } -) - -var labelCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.Label) []*influxdb.Label { - out := append([]*influxdb.Label(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -func TestLabelService_FindLabelByID(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - permission influxdb.Permission - id platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access id", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: id, - OrgID: orgOneInfluxID, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access id", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: id, - OrgID: orgOneInfluxID, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - id: 1, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := NewAuthedLabelService(tt.fields.LabelService, orgSvc) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindLabelByID(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestLabelService_FindLabels(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - labels []*influxdb.Label - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all labels", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelsFn: func(ctx context.Context, filter influxdb.LabelFilter) ([]*influxdb.Label, error) { - return []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - { - ID: 2, - OrgID: orgOneInfluxID, - }, - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - { - ID: 2, - OrgID: orgOneInfluxID, - }, - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, - }, - }, - { - name: "authorized to access a single label", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelsFn: func(ctx context.Context, filter influxdb.LabelFilter) ([]*influxdb.Label, error) { - return []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - { - ID: 2, - OrgID: orgOneInfluxID, - }, - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - }, - }, - }, - { - name: "unable to access labels", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelsFn: func(ctx context.Context, filter influxdb.LabelFilter) ([]*influxdb.Label, error) { - return []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - { - ID: 2, - OrgID: orgOneInfluxID, - }, - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - ID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - // fixme(leodido) > should we return error in this case? - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := NewAuthedLabelService(tt.fields.LabelService, orgSvc) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - labels, err := s.FindLabels(ctx, influxdb.LabelFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(labels, tt.wants.labels, labelCmpOptions...); diff != "" { - t.Errorf("labels are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestLabelService_UpdateLabel(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to update label", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - UpdateLabelFn: func(ctx context.Context, id platform.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to update label", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - UpdateLabelFn: func(ctx context.Context, id platform.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := NewAuthedLabelService(tt.fields.LabelService, orgSvc) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - _, err := s.UpdateLabel(ctx, tt.args.id, influxdb.LabelUpdate{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestLabelService_DeleteLabel(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete label", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - DeleteLabelFn: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - ID: influxdbtesting.IDPtr(1), - OrgID: influxdbtesting.IDPtr(orgOneInfluxID), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete label", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - DeleteLabelFn: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - ID: influxdbtesting.IDPtr(1), - OrgID: influxdbtesting.IDPtr(orgOneInfluxID), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := NewAuthedLabelService(tt.fields.LabelService, orgSvc) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.DeleteLabel(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestLabelService_CreateLabel(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "unauthorized to create label with read only permission", - fields: fields{ - LabelService: &mock.LabelService{ - CreateLabelFn: func(ctx context.Context, l *influxdb.Label) error { - return nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - ID: influxdbtesting.IDPtr(orgOneInfluxID), - Type: influxdb.OrgsResourceType, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/020f755c3c083000/labels is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - { - name: "unauthorized to create label with wrong write permission", - fields: fields{ - LabelService: &mock.LabelService{ - CreateLabelFn: func(ctx context.Context, b *influxdb.Label) error { - return nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/020f755c3c083000/labels is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - - { - name: "authorized to create label", - fields: fields{ - LabelService: &mock.LabelService{ - CreateLabelFn: func(ctx context.Context, l *influxdb.Label) error { - return nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - OrgID: influxdbtesting.IDPtr(orgOneInfluxID), - Type: influxdb.LabelsResourceType, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := NewAuthedLabelService(tt.fields.LabelService, orgSvc) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.CreateLabel(ctx, &influxdb.Label{Name: "name", OrgID: orgOneInfluxID}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestLabelService_FindResourceLabels(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - filter influxdb.LabelMappingFilter - permissions []influxdb.Permission - } - type wants struct { - err error - labels []*influxdb.Label - } - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all labels belonging to a resource", - fields: fields{ - LabelService: &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - return []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - { - ID: 2, - OrgID: orgOneInfluxID, - }, - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, nil - }, - }, - }, - args: args{ - filter: influxdb.LabelMappingFilter{ - ResourceID: 10, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: nil, - labels: []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - { - ID: 2, - OrgID: orgOneInfluxID, - }, - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, - }, - }, - { - name: "authorized to access a single label", - fields: fields{ - LabelService: &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - return []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - { - ID: 2, - OrgID: orgOneInfluxID, - }, - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, nil - }, - }, - }, - args: args{ - filter: influxdb.LabelMappingFilter{ - ResourceID: 10, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - ID: influxdbtesting.IDPtr(3), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: nil, - labels: []*influxdb.Label{ - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, - }, - }, - { - name: "unable to access labels when missing read permission on labels", - fields: fields{ - LabelService: &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - return []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - { - ID: 2, - OrgID: orgOneInfluxID, - }, - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, nil - }, - }, - }, - args: args{ - filter: influxdb.LabelMappingFilter{ - ResourceID: 10, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - // fixme(leodido) > should we return error in this case? - }, - }, - { - name: "unable to access labels when missing read permission on filtering resource", - fields: fields{ - LabelService: &mock.LabelService{ - FindResourceLabelsFn: func(ctx context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - return []*influxdb.Label{ - { - ID: 1, - OrgID: orgOneInfluxID, - }, - { - ID: 2, - OrgID: orgOneInfluxID, - }, - { - ID: 3, - OrgID: orgOneInfluxID, - }, - }, nil - }, - }, - }, - args: args{ - filter: influxdb.LabelMappingFilter{ - ResourceID: 10, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/020f755c3c083000/buckets/000000000000000a is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := NewAuthedLabelService(tt.fields.LabelService, orgSvc) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - labels, err := s.FindResourceLabels(ctx, tt.args.filter) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(labels, tt.wants.labels, labelCmpOptions...); diff != "" { - t.Errorf("labels are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestLabelService_CreateLabelMapping(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - mapping influxdb.LabelMapping - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to create label mapping", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - CreateLabelMappingFn: func(ctx context.Context, lm *influxdb.LabelMapping) error { - return nil - }, - }, - }, - args: args{ - mapping: influxdb.LabelMapping{ - LabelID: 1, - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to create label mapping for resources on which the user does not have write access", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - CreateLabelMappingFn: func(ctx context.Context, lm *influxdb.LabelMapping) error { - return nil - }, - }, - }, - args: args{ - mapping: influxdb.LabelMapping{ - LabelID: 1, - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EUnauthorized, - Msg: "write:orgs/020f755c3c083000/buckets/0000000000000002 is unauthorized", - }, - }, - }, - { - name: "unauthorized to create label mapping", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - CreateLabelMappingFn: func(ctx context.Context, lm *influxdb.LabelMapping) error { - return nil - }, - }, - }, - args: args{ - mapping: influxdb.LabelMapping{ - LabelID: 1, - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := NewAuthedLabelService(tt.fields.LabelService, orgSvc) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.CreateLabelMapping(ctx, &tt.args.mapping) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestLabelService_DeleteLabelMapping(t *testing.T) { - type fields struct { - LabelService influxdb.LabelService - } - type args struct { - mapping influxdb.LabelMapping - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete label mapping", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - DeleteLabelMappingFn: func(ctx context.Context, m *influxdb.LabelMapping) error { - return nil - }, - }, - }, - args: args{ - mapping: influxdb.LabelMapping{ - LabelID: 1, - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete label mapping containing a resources on which the user does not have write access", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - DeleteLabelMappingFn: func(ctx context.Context, m *influxdb.LabelMapping) error { - return nil - }, - }, - }, - args: args{ - mapping: influxdb.LabelMapping{ - LabelID: 1, - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EUnauthorized, - Msg: "write:orgs/020f755c3c083000/buckets/0000000000000002 is unauthorized", - }, - }, - }, - { - name: "unauthorized to delete label mapping", - fields: fields{ - LabelService: &mock.LabelService{ - FindLabelByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: 1, - OrgID: orgOneInfluxID, - }, nil - }, - DeleteLabelMappingFn: func(ctx context.Context, m *influxdb.LabelMapping) error { - return nil - }, - }, - }, - args: args{ - mapping: influxdb.LabelMapping{ - LabelID: 1, - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.LabelsResourceType, - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/020f755c3c083000/labels/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := NewAuthedLabelService(tt.fields.LabelService, orgSvc) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.DeleteLabelMapping(ctx, &tt.args.mapping) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} diff --git a/label/middleware_logging.go b/label/middleware_logging.go deleted file mode 100644 index ca8d3ad2279..00000000000 --- a/label/middleware_logging.go +++ /dev/null @@ -1,128 +0,0 @@ -package label - -import ( - "context" - "fmt" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "go.uber.org/zap" -) - -type LabelLogger struct { - logger *zap.Logger - labelService influxdb.LabelService -} - -func NewLabelLogger(log *zap.Logger, s influxdb.LabelService) *LabelLogger { - return &LabelLogger{ - logger: log, - labelService: s, - } -} - -var _ influxdb.LabelService = (*LabelLogger)(nil) - -func (l *LabelLogger) CreateLabel(ctx context.Context, label *influxdb.Label) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to create label", zap.Error(err), dur) - return - } - l.logger.Debug("label create", dur) - }(time.Now()) - return l.labelService.CreateLabel(ctx, label) -} - -func (l *LabelLogger) FindLabelByID(ctx context.Context, id platform.ID) (label *influxdb.Label, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - msg := fmt.Sprintf("failed to find label with ID %v", id) - l.logger.Debug(msg, zap.Error(err), dur) - return - } - l.logger.Debug("label find by ID", dur) - }(time.Now()) - return l.labelService.FindLabelByID(ctx, id) -} - -func (l *LabelLogger) FindLabels(ctx context.Context, filter influxdb.LabelFilter, opt ...influxdb.FindOptions) (ls []*influxdb.Label, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to find labels matching the given filter", zap.Error(err), dur) - return - } - l.logger.Debug("labels find", dur) - - }(time.Now()) - return l.labelService.FindLabels(ctx, filter, opt...) -} - -func (l *LabelLogger) FindResourceLabels(ctx context.Context, filter influxdb.LabelMappingFilter) (ls []*influxdb.Label, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to find resource labels matching the given filter", zap.Error(err), dur) - return - } - l.logger.Debug("labels for resource find", dur) - - }(time.Now()) - return l.labelService.FindResourceLabels(ctx, filter) -} - -func (l *LabelLogger) UpdateLabel(ctx context.Context, id platform.ID, upd influxdb.LabelUpdate) (lbl *influxdb.Label, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to update label", zap.Error(err), dur) - return - } - l.logger.Debug("label update", dur) - - }(time.Now()) - return l.labelService.UpdateLabel(ctx, id, upd) -} - -func (l *LabelLogger) DeleteLabel(ctx context.Context, id platform.ID) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to delete label", zap.Error(err), dur) - return - } - l.logger.Debug("label delete", dur) - - }(time.Now()) - return l.labelService.DeleteLabel(ctx, id) -} - -func (l *LabelLogger) CreateLabelMapping(ctx context.Context, m *influxdb.LabelMapping) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to create label mapping", zap.Error(err), dur) - return - } - l.logger.Debug("label mapping create", dur) - - }(time.Now()) - return l.labelService.CreateLabelMapping(ctx, m) -} - -func (l *LabelLogger) DeleteLabelMapping(ctx context.Context, m *influxdb.LabelMapping) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to delete label mapping", zap.Error(err), dur) - return - } - l.logger.Debug("label mapping delete", dur) - - }(time.Now()) - return l.labelService.DeleteLabelMapping(ctx, m) -} diff --git a/label/middleware_logging_test.go b/label/middleware_logging_test.go deleted file mode 100644 index 7a4ced6eadd..00000000000 --- a/label/middleware_logging_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package label_test - -import ( - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/label" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func TestLabelLoggingService(t *testing.T) { - influxdbtesting.LabelService(initBoltLabelLoggingService, t) -} - -func initBoltLabelLoggingService(f influxdbtesting.LabelFields, t *testing.T) (influxdb.LabelService, string, func()) { - svc, s, closer := initBoltLabelService(f, t) - return label.NewLabelLogger(zaptest.NewLogger(t), svc), s, closer -} diff --git a/label/middleware_metrics.go b/label/middleware_metrics.go deleted file mode 100644 index df7cbc1544a..00000000000 --- a/label/middleware_metrics.go +++ /dev/null @@ -1,75 +0,0 @@ -package label - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/metric" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/prometheus/client_golang/prometheus" -) - -type LabelMetrics struct { - // RED metrics - rec *metric.REDClient - - labelService influxdb.LabelService -} - -func NewLabelMetrics(reg prometheus.Registerer, s influxdb.LabelService, opts ...metric.ClientOptFn) *LabelMetrics { - o := metric.ApplyMetricOpts(opts...) - return &LabelMetrics{ - rec: metric.New(reg, o.ApplySuffix("org")), - labelService: s, - } -} - -var _ influxdb.LabelService = (*LabelMetrics)(nil) - -func (m *LabelMetrics) CreateLabel(ctx context.Context, l *influxdb.Label) (err error) { - rec := m.rec.Record("create_label") - err = m.labelService.CreateLabel(ctx, l) - return rec(err) -} - -func (m *LabelMetrics) FindLabelByID(ctx context.Context, id platform.ID) (label *influxdb.Label, err error) { - rec := m.rec.Record("find_label_by_id") - l, err := m.labelService.FindLabelByID(ctx, id) - return l, rec(err) -} - -func (m *LabelMetrics) FindLabels(ctx context.Context, filter influxdb.LabelFilter, opt ...influxdb.FindOptions) (ls []*influxdb.Label, err error) { - rec := m.rec.Record("find_labels") - l, err := m.labelService.FindLabels(ctx, filter, opt...) - return l, rec(err) -} - -func (m *LabelMetrics) FindResourceLabels(ctx context.Context, filter influxdb.LabelMappingFilter) (ls []*influxdb.Label, err error) { - rec := m.rec.Record("find_labels_for_resource") - l, err := m.labelService.FindResourceLabels(ctx, filter) - return l, rec(err) -} - -func (m *LabelMetrics) UpdateLabel(ctx context.Context, id platform.ID, upd influxdb.LabelUpdate) (lbl *influxdb.Label, err error) { - rec := m.rec.Record("update_label") - l, err := m.labelService.UpdateLabel(ctx, id, upd) - return l, rec(err) -} - -func (m *LabelMetrics) DeleteLabel(ctx context.Context, id platform.ID) (err error) { - rec := m.rec.Record("delete_label") - err = m.labelService.DeleteLabel(ctx, id) - return rec(err) -} - -func (m *LabelMetrics) CreateLabelMapping(ctx context.Context, lm *influxdb.LabelMapping) (err error) { - rec := m.rec.Record("create_label_mapping") - err = m.labelService.CreateLabelMapping(ctx, lm) - return rec(err) -} - -func (m *LabelMetrics) DeleteLabelMapping(ctx context.Context, lm *influxdb.LabelMapping) (err error) { - rec := m.rec.Record("delete_label_mapping") - err = m.labelService.DeleteLabelMapping(ctx, lm) - return rec(err) -} diff --git a/label/middleware_metrics_test.go b/label/middleware_metrics_test.go deleted file mode 100644 index f35f0028c13..00000000000 --- a/label/middleware_metrics_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package label_test - -import ( - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/prom" - "github.com/influxdata/influxdb/v2/label" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap" -) - -func TestLabelMetricsService(t *testing.T) { - influxdbtesting.LabelService(initBoltLabelMetricsService, t) -} - -func initBoltLabelMetricsService(f influxdbtesting.LabelFields, t *testing.T) (influxdb.LabelService, string, func()) { - svc, s, closer := initBoltLabelService(f, t) - reg := prom.NewRegistry(zap.NewNop()) - return label.NewLabelMetrics(reg, svc), s, closer -} diff --git a/label/service.go b/label/service.go deleted file mode 100644 index e1419ce7bc6..00000000000 --- a/label/service.go +++ /dev/null @@ -1,214 +0,0 @@ -package label - -import ( - "context" - "strings" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" -) - -type Service struct { - store *Store -} - -func NewService(st *Store) influxdb.LabelService { - return &Service{ - store: st, - } -} - -// CreateLabel creates a new label. -func (s *Service) CreateLabel(ctx context.Context, l *influxdb.Label) error { - if err := l.Validate(); err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - l.Name = strings.TrimSpace(l.Name) - - err := s.store.Update(ctx, func(tx kv.Tx) error { - if err := uniqueLabelName(ctx, tx, l); err != nil { - return err - } - - if err := s.store.CreateLabel(ctx, tx, l); err != nil { - return err - } - - return nil - }) - - if err != nil { - return err - } - - return err -} - -// FindLabelByID finds a label by its ID -func (s *Service) FindLabelByID(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - var l *influxdb.Label - - err := s.store.View(ctx, func(tx kv.Tx) error { - label, e := s.store.GetLabel(ctx, tx, id) - if e != nil { - return e - } - l = label - return nil - }) - - if err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - return l, nil -} - -// FindLabels returns a list of labels that match a filter. -func (s *Service) FindLabels(ctx context.Context, filter influxdb.LabelFilter, opt ...influxdb.FindOptions) ([]*influxdb.Label, error) { - ls := []*influxdb.Label{} - err := s.store.View(ctx, func(tx kv.Tx) error { - labels, err := s.store.ListLabels(ctx, tx, filter) - if err != nil { - return err - } - ls = labels - return nil - }) - - if err != nil { - return nil, err - } - - return ls, nil -} - -func (s *Service) FindResourceLabels(ctx context.Context, filter influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - ls := []*influxdb.Label{} - if err := s.store.View(ctx, func(tx kv.Tx) error { - return s.store.FindResourceLabels(ctx, tx, filter, &ls) - }); err != nil { - return nil, err - } - - return ls, nil -} - -// UpdateLabel updates a label. -func (s *Service) UpdateLabel(ctx context.Context, id platform.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) { - var label *influxdb.Label - err := s.store.Update(ctx, func(tx kv.Tx) error { - l, e := s.store.UpdateLabel(ctx, tx, id, upd) - if e != nil { - return &errors.Error{ - Err: e, - } - } - label = l - return nil - }) - - return label, err -} - -// DeleteLabel deletes a label. -func (s *Service) DeleteLabel(ctx context.Context, id platform.ID) error { - err := s.store.Update(ctx, func(tx kv.Tx) error { - return s.store.DeleteLabel(ctx, tx, id) - }) - if err != nil { - return &errors.Error{ - Err: err, - } - } - return nil -} - -//******* Label Mappings *******// - -// CreateLabelMapping creates a new mapping between a resource and a label. -func (s *Service) CreateLabelMapping(ctx context.Context, m *influxdb.LabelMapping) error { - err := s.store.View(ctx, func(tx kv.Tx) error { - if _, err := s.store.GetLabel(ctx, tx, m.LabelID); err != nil { - return err - } - - ls := []*influxdb.Label{} - err := s.store.FindResourceLabels(ctx, tx, influxdb.LabelMappingFilter{ResourceID: m.ResourceID, ResourceType: m.ResourceType}, &ls) - if err != nil { - return err - } - for i := 0; i < len(ls); i++ { - if ls[i].ID == m.LabelID { - return influxdb.ErrLabelExistsOnResource - } - } - - return nil - }) - if err != nil { - return err - } - - return s.store.Update(ctx, func(tx kv.Tx) error { - return s.store.CreateLabelMapping(ctx, tx, m) - }) -} - -// DeleteLabelMapping deletes a label mapping. -func (s *Service) DeleteLabelMapping(ctx context.Context, m *influxdb.LabelMapping) error { - err := s.store.Update(ctx, func(tx kv.Tx) error { - return s.store.DeleteLabelMapping(ctx, tx, m) - }) - if err != nil { - return &errors.Error{ - Err: err, - } - } - return nil -} - -//******* helper functions *******// - -func unique(ctx context.Context, tx kv.Tx, indexBucket, indexKey []byte) error { - bucket, err := tx.Bucket(indexBucket) - if err != nil { - return kv.UnexpectedIndexError(err) - } - - _, err = bucket.Get(indexKey) - // if not found then this is _unique_. - if kv.IsNotFound(err) { - return nil - } - - // no error means this is not unique - if err == nil { - return kv.NotUniqueError - } - - // any other error is some sort of internal server error - return kv.UnexpectedIndexError(err) -} - -func uniqueLabelName(ctx context.Context, tx kv.Tx, lbl *influxdb.Label) error { - key, err := labelIndexKey(lbl) - if err != nil { - return err - } - - // labels are unique by `organization:label_name` - err = unique(ctx, tx, labelIndex, key) - if err == kv.NotUniqueError { - return labelAlreadyExistsError(lbl) - } - return err -} diff --git a/label/service_test.go b/label/service_test.go deleted file mode 100644 index 21d144d9d48..00000000000 --- a/label/service_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package label_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/label" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -func TestBoltLabelService(t *testing.T) { - influxdbtesting.LabelService(initBoltLabelService, t) -} - -func initBoltLabelService(f influxdbtesting.LabelFields, t *testing.T) (influxdb.LabelService, string, func()) { - s, closeBolt := influxdbtesting.NewTestBoltStore(t) - svc, op, closeSvc := initLabelService(s, f, t) - return svc, op, func() { - closeSvc() - closeBolt() - } -} - -func initLabelService(s kv.Store, f influxdbtesting.LabelFields, t *testing.T) (influxdb.LabelService, string, func()) { - st, err := label.NewStore(s) - if err != nil { - t.Fatalf("failed to create label store: %v", err) - } - - if f.IDGenerator != nil { - st.IDGenerator = f.IDGenerator - } - - svc := label.NewService(st) - ctx := context.Background() - - for _, l := range f.Labels { - mock.SetIDForFunc(&st.IDGenerator, l.ID, func() { - if err := svc.CreateLabel(ctx, l); err != nil { - t.Fatalf("failed to populate labels: %v", err) - } - }) - } - - for _, m := range f.Mappings { - if err := svc.CreateLabelMapping(ctx, m); err != nil { - t.Fatalf("failed to populate label mappings: %v", err) - } - } - - return svc, kv.OpPrefix, func() { - for _, l := range f.Labels { - if err := svc.DeleteLabel(ctx, l.ID); err != nil { - t.Logf("failed to remove label: %v", err) - } - } - } -} diff --git a/label/storage.go b/label/storage.go deleted file mode 100644 index 85394b8ef91..00000000000 --- a/label/storage.go +++ /dev/null @@ -1,107 +0,0 @@ -package label - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/snowflake" -) - -const MaxIDGenerationN = 100 -const ReservedIDs = 1000 - -var ( - labelBucket = []byte("labelsv1") - labelMappingBucket = []byte("labelmappingsv1") - labelIndex = []byte("labelindexv1") -) - -type Store struct { - kvStore kv.Store - IDGenerator platform.IDGenerator -} - -func NewStore(kvStore kv.Store) (*Store, error) { - st := &Store{ - kvStore: kvStore, - IDGenerator: snowflake.NewDefaultIDGenerator(), - } - return st, st.setup() -} - -// View opens up a transaction that will not write to any data. Implementing interfaces -// should take care to ensure that all view transactions do not mutate any data. -func (s *Store) View(ctx context.Context, fn func(kv.Tx) error) error { - return s.kvStore.View(ctx, fn) -} - -// Update opens up a transaction that will mutate data. -func (s *Store) Update(ctx context.Context, fn func(kv.Tx) error) error { - return s.kvStore.Update(ctx, fn) -} - -func (s *Store) setup() error { - return s.Update(context.Background(), func(tx kv.Tx) error { - if _, err := tx.Bucket(labelBucket); err != nil { - return err - } - - if _, err := tx.Bucket(labelMappingBucket); err != nil { - return err - } - - if _, err := tx.Bucket(labelIndex); err != nil { - return err - } - - return nil - }) -} - -// generateSafeID attempts to create ids for buckets -// and orgs that are without backslash, commas, and spaces, BUT ALSO do not already exist. -func (s *Store) generateSafeID(ctx context.Context, tx kv.Tx, bucket []byte) (platform.ID, error) { - for i := 0; i < MaxIDGenerationN; i++ { - id := s.IDGenerator.ID() - - err := s.uniqueID(ctx, tx, bucket, id) - if err == nil { - return id, nil - } - - if err == NotUniqueIDError { - continue - } - - return platform.InvalidID(), err - } - return platform.InvalidID(), ErrFailureGeneratingID -} - -func (s *Store) uniqueID(ctx context.Context, tx kv.Tx, bucket []byte, id platform.ID) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - encodedID, err := id.Encode() - if err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - b, err := tx.Bucket(bucket) - if err != nil { - return err - } - - _, err = b.Get(encodedID) - if kv.IsNotFound(err) { - return nil - } - - return NotUniqueIDError -} diff --git a/label/storage_label.go b/label/storage_label.go deleted file mode 100644 index 167ca6f6aef..00000000000 --- a/label/storage_label.go +++ /dev/null @@ -1,469 +0,0 @@ -package label - -import ( - "context" - "encoding/json" - "fmt" - "strings" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" -) - -func (s *Store) CreateLabel(ctx context.Context, tx kv.Tx, l *influxdb.Label) error { - // if the provided ID is invalid, or already maps to an existing Auth, then generate a new one - id, err := s.generateSafeID(ctx, tx, labelBucket) - if err != nil { - return nil - } - l.ID = id - - v, err := json.Marshal(l) - if err != nil { - return &errors.Error{ - Err: err, - } - } - - encodedID, err := l.ID.Encode() - if err != nil { - return &errors.Error{ - Err: err, - } - } - - idx, err := tx.Bucket(labelIndex) - if err != nil { - return &errors.Error{ - Err: err, - } - } - - key, err := labelIndexKey(l) - if err != nil { - return &errors.Error{ - Err: err, - } - } - - if err := idx.Put([]byte(key), encodedID); err != nil { - return &errors.Error{ - Err: err, - } - } - - b, err := tx.Bucket(labelBucket) - if err != nil { - return &errors.Error{ - Err: err, - } - } - - if err := b.Put(encodedID, v); err != nil { - return &errors.Error{ - Err: err, - } - } - - return nil -} - -func (s *Store) ListLabels(ctx context.Context, tx kv.Tx, filter influxdb.LabelFilter) ([]*influxdb.Label, error) { - ls := []*influxdb.Label{} - filterFn := filterLabelsFn(filter) - err := forEachLabel(ctx, tx, func(l *influxdb.Label) bool { - if filterFn(l) { - ls = append(ls, l) - } - return true - }) - - if err != nil { - return nil, err - } - - return ls, nil -} - -func (s *Store) GetLabel(ctx context.Context, tx kv.Tx, id platform.ID) (*influxdb.Label, error) { - encodedID, err := id.Encode() - if err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - b, err := tx.Bucket(labelBucket) - if err != nil { - return nil, err - } - - v, err := b.Get(encodedID) - if kv.IsNotFound(err) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrLabelNotFound, - } - } - - if err != nil { - return nil, err - } - - var l influxdb.Label - if err := json.Unmarshal(v, &l); err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - return &l, nil -} - -func (s *Store) UpdateLabel(ctx context.Context, tx kv.Tx, id platform.ID, upd influxdb.LabelUpdate) (*influxdb.Label, error) { - label, err := s.GetLabel(ctx, tx, id) - if err != nil { - return nil, err - } - - if len(upd.Properties) > 0 && label.Properties == nil { - label.Properties = make(map[string]string) - } - - for k, v := range upd.Properties { - if v == "" { - delete(label.Properties, k) - } else { - label.Properties[k] = v - } - } - - if upd.Name != "" { - upd.Name = strings.TrimSpace(upd.Name) - - idx, err := tx.Bucket(labelIndex) - if err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - key, err := labelIndexKey(label) - if err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - if err := idx.Delete(key); err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - label.Name = upd.Name - if err := uniqueLabelName(ctx, tx, label); err != nil { - return nil, &errors.Error{ - Err: err, - } - } - } - - if err := label.Validate(); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - v, err := json.Marshal(label) - if err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - encodedID, err := label.ID.Encode() - if err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - idx, err := tx.Bucket(labelIndex) - if err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - key, err := labelIndexKey(label) - if err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - if err := idx.Put([]byte(key), encodedID); err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - b, err := tx.Bucket(labelBucket) - if err != nil { - return nil, err - } - - if err := b.Put(encodedID, v); err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - return label, nil -} - -func (s *Store) DeleteLabel(ctx context.Context, tx kv.Tx, id platform.ID) error { - label, err := s.GetLabel(ctx, tx, id) - if err != nil { - return ErrLabelNotFound - } - encodedID, idErr := id.Encode() - if idErr != nil { - return &errors.Error{ - Err: idErr, - } - } - - b, err := tx.Bucket(labelBucket) - if err != nil { - return err - } - - if err := b.Delete(encodedID); err != nil { - return &errors.Error{ - Err: err, - } - } - - idx, err := tx.Bucket(labelIndex) - if err != nil { - return &errors.Error{ - Err: err, - } - } - - key, err := labelIndexKey(label) - if err != nil { - return &errors.Error{ - Err: err, - } - } - - if err := idx.Delete(key); err != nil { - return &errors.Error{ - Err: err, - } - } - - return nil -} - -//********* Label Mappings *********// - -func (s *Store) CreateLabelMapping(ctx context.Context, tx kv.Tx, m *influxdb.LabelMapping) error { - v, err := json.Marshal(m) - if err != nil { - return &errors.Error{ - Err: err, - } - } - - key, err := labelMappingKey(m) - if err != nil { - return &errors.Error{ - Err: err, - } - } - - idx, err := tx.Bucket(labelMappingBucket) - if err != nil { - return err - } - - if err := idx.Put(key, v); err != nil { - return &errors.Error{ - Err: err, - } - } - - return nil -} - -func (s *Store) FindResourceLabels(ctx context.Context, tx kv.Tx, filter influxdb.LabelMappingFilter, ls *[]*influxdb.Label) error { - if !filter.ResourceID.Valid() { - return &errors.Error{Code: errors.EInvalid, Msg: "filter requires a valid resource id", Err: platform.ErrInvalidID} - } - idx, err := tx.Bucket(labelMappingBucket) - if err != nil { - return err - } - - prefix, err := filter.ResourceID.Encode() - if err != nil { - return err - } - - cur, err := idx.ForwardCursor(prefix, kv.WithCursorPrefix(prefix)) - if err != nil { - return err - } - - for k, _ := cur.Next(); k != nil; k, _ = cur.Next() { - _, id, err := decodeLabelMappingKey(k) - if err != nil { - return err - } - - l, err := s.GetLabel(ctx, tx, id) - if l == nil && err != nil { - // TODO(jm): return error instead of continuing once orphaned mappings are fixed - // (see https://github.com/influxdata/influxdb/issues/11278) - continue - } - - *ls = append(*ls, l) - } - - if err := cur.Err(); err != nil { - return err - } - - return cur.Close() -} - -func (s *Store) DeleteLabelMapping(ctx context.Context, tx kv.Tx, m *influxdb.LabelMapping) error { - key, err := labelMappingKey(m) - if err != nil { - return &errors.Error{ - Err: err, - } - } - - idx, err := tx.Bucket(labelMappingBucket) - if err != nil { - return err - } - - if err := idx.Delete(key); err != nil { - return &errors.Error{ - Err: err, - } - } - - return nil -} - -//********* helper functions *********// - -func labelMappingKey(m *influxdb.LabelMapping) ([]byte, error) { - lid, err := m.LabelID.Encode() - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - rid, err := m.ResourceID.Encode() - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - key := make([]byte, platform.IDLength+platform.IDLength) // len(rid) + len(lid) - copy(key, rid) - copy(key[len(rid):], lid) - - return key, nil -} - -// labelAlreadyExistsError is used when creating a new label with -// a name that has already been used. Label names must be unique. -func labelAlreadyExistsError(lbl *influxdb.Label) error { - return &errors.Error{ - Code: errors.EConflict, - Msg: fmt.Sprintf("label with name %s already exists", lbl.Name), - } -} - -func labelIndexKey(l *influxdb.Label) ([]byte, error) { - orgID, err := l.OrgID.Encode() - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - k := make([]byte, platform.IDLength+len(l.Name)) - copy(k, orgID) - copy(k[platform.IDLength:], []byte(strings.ToLower((l.Name)))) - return k, nil -} - -func filterLabelsFn(filter influxdb.LabelFilter) func(l *influxdb.Label) bool { - return func(label *influxdb.Label) bool { - return (filter.Name == "" || (strings.EqualFold(filter.Name, label.Name))) && - ((filter.OrgID == nil) || (filter.OrgID != nil && *filter.OrgID == label.OrgID)) - } -} - -func decodeLabelMappingKey(key []byte) (resourceID platform.ID, labelID platform.ID, err error) { - if len(key) != 2*platform.IDLength { - return 0, 0, &errors.Error{Code: errors.EInvalid, Msg: "malformed label mapping key (please report this error)"} - } - - if err := (&resourceID).Decode(key[:platform.IDLength]); err != nil { - return 0, 0, &errors.Error{Code: errors.EInvalid, Msg: "bad resource id", Err: platform.ErrInvalidID} - } - - if err := (&labelID).Decode(key[platform.IDLength:]); err != nil { - return 0, 0, &errors.Error{Code: errors.EInvalid, Msg: "bad label id", Err: platform.ErrInvalidID} - } - - return resourceID, labelID, nil -} - -func forEachLabel(ctx context.Context, tx kv.Tx, fn func(*influxdb.Label) bool) error { - b, err := tx.Bucket(labelBucket) - if err != nil { - return err - } - - cur, err := b.ForwardCursor(nil) - if err != nil { - return err - } - - for k, v := cur.Next(); k != nil; k, v = cur.Next() { - l := &influxdb.Label{} - if err := json.Unmarshal(v, l); err != nil { - return err - } - if !fn(l) { - break - } - } - - if err := cur.Err(); err != nil { - return err - } - - return cur.Close() -} diff --git a/label/storage_test.go b/label/storage_test.go deleted file mode 100644 index 653413a1fcc..00000000000 --- a/label/storage_test.go +++ /dev/null @@ -1,282 +0,0 @@ -package label_test - -import ( - "context" - "fmt" - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/label" - "github.com/influxdata/influxdb/v2/mock" - "go.uber.org/zap/zaptest" -) - -func TestLabels(t *testing.T) { - setup := func(t *testing.T, store *label.Store, tx kv.Tx) { - for i := 1; i <= 10; i++ { - mock.SetIDForFunc(&store.IDGenerator, platform.ID(i), func() { - err := store.CreateLabel(context.Background(), tx, &influxdb.Label{ - Name: fmt.Sprintf("labelname%d", i), - OrgID: platform.ID(i), - }) - - if err != nil { - t.Fatal(err) - } - }) - } - } - - setupForList := func(t *testing.T, store *label.Store, tx kv.Tx) { - setup(t, store, tx) - - mock.SetIDForFunc(&store.IDGenerator, platform.ID(11), func() { - err := store.CreateLabel(context.Background(), tx, &influxdb.Label{ - Name: fmt.Sprintf("labelname%d", 11), - OrgID: platform.ID(5), - }) - if err != nil { - t.Fatal(err) - } - }) - } - - tt := []struct { - name string - setup func(*testing.T, *label.Store, kv.Tx) - update func(*testing.T, *label.Store, kv.Tx) - results func(*testing.T, *label.Store, kv.Tx) - }{ - { - name: "create", - setup: setup, - results: func(t *testing.T, store *label.Store, tx kv.Tx) { - labels, err := store.ListLabels(context.Background(), tx, influxdb.LabelFilter{}) - if err != nil { - t.Fatal(err) - } - - if len(labels) != 10 { - t.Fatalf("expected 10 labels, got: %d", len(labels)) - } - - expected := []*influxdb.Label{} - for i := 1; i <= 10; i++ { - expected = append(expected, &influxdb.Label{ - ID: platform.ID(i), - Name: fmt.Sprintf("labelname%d", i), - OrgID: platform.ID(i), - }) - } - if !reflect.DeepEqual(labels, expected) { - t.Fatalf("expected identical labels: \n%+v\n%+v", labels, expected) - } - }, - }, - { - name: "get", - setup: setup, - results: func(t *testing.T, store *label.Store, tx kv.Tx) { - label, err := store.GetLabel(context.Background(), tx, platform.ID(1)) - if err != nil { - t.Fatal(err) - } - - expected := &influxdb.Label{ - ID: platform.ID(1), - Name: "labelname1", - OrgID: platform.ID(1), - } - - if !reflect.DeepEqual(label, expected) { - t.Fatalf("expected identical label: \n%+v\n%+v", label, expected) - } - }, - }, - { - name: "list", - setup: setupForList, - results: func(t *testing.T, store *label.Store, tx kv.Tx) { - // list all - labels, err := store.ListLabels(context.Background(), tx, influxdb.LabelFilter{}) - if err != nil { - t.Fatal(err) - } - - if len(labels) != 11 { - t.Fatalf("expected 11 labels, got: %d", len(labels)) - } - - expected := []*influxdb.Label{} - for i := 1; i <= 10; i++ { - expected = append(expected, &influxdb.Label{ - ID: platform.ID(i), - Name: fmt.Sprintf("labelname%d", i), - OrgID: platform.ID(i), - }) - } - expected = append(expected, &influxdb.Label{ - ID: platform.ID(11), - Name: fmt.Sprintf("labelname%d", 11), - OrgID: platform.ID(5), - }) - - if !reflect.DeepEqual(labels, expected) { - t.Fatalf("expected identical labels: \n%+v\n%+v", labels, expected) - } - - // filter by name - l, err := store.ListLabels(context.Background(), tx, influxdb.LabelFilter{Name: "labelname5"}) - if err != nil { - t.Fatal(err) - } - - if len(l) != 1 { - t.Fatalf("expected 1 label, got: %d", len(l)) - } - - expectedLabel := []*influxdb.Label{&influxdb.Label{ - ID: platform.ID(5), - Name: "labelname5", - OrgID: platform.ID(5), - }} - if !reflect.DeepEqual(l, expectedLabel) { - t.Fatalf("label returned by list did not match expected: \n%+v\n%+v", l, expectedLabel) - } - - // filter by org id - id := platform.ID(5) - l, err = store.ListLabels(context.Background(), tx, influxdb.LabelFilter{OrgID: &id}) - if err != nil { - t.Fatal(err) - } - - if len(l) != 2 { - t.Fatalf("expected 2 labels, got: %d", len(l)) - } - - expectedLabel = []*influxdb.Label{ - &influxdb.Label{ - ID: platform.ID(5), - Name: "labelname5", - OrgID: platform.ID(5)}, - { - ID: platform.ID(11), - Name: "labelname11", - OrgID: platform.ID(5), - }} - if !reflect.DeepEqual(l, expectedLabel) { - t.Fatalf("label returned by list did not match expected: \n%+v\n%+v", l, expectedLabel) - } - }, - }, - { - name: "update", - setup: setup, - update: func(t *testing.T, store *label.Store, tx kv.Tx) { - upd := influxdb.LabelUpdate{Name: "newName"} - updated, err := store.UpdateLabel(context.Background(), tx, platform.ID(1), upd) - if err != nil { - t.Fatal(err) - } - - if updated.Name != upd.Name { - t.Fatalf("expected updated name %s, got: %s", upd.Name, updated.Name) - } - }, - results: func(t *testing.T, store *label.Store, tx kv.Tx) { - la, err := store.GetLabel(context.Background(), tx, platform.ID(1)) - if err != nil { - t.Fatal(err) - } - - if la.Name != "newName" { - t.Fatalf("expected update name to be %s, got: %s", "newName", la.Name) - } - }, - }, - { - name: "delete", - setup: setup, - update: func(t *testing.T, store *label.Store, tx kv.Tx) { - err := store.DeleteLabel(context.Background(), tx, platform.ID(5)) - if err != nil { - t.Fatal(err) - } - - err = store.DeleteLabel(context.Background(), tx, platform.ID(5)) - if err != label.ErrLabelNotFound { - t.Fatal("expected label not found error when deleting bucket that has already been deleted, got: ", err) - } - }, - results: func(t *testing.T, store *label.Store, tx kv.Tx) { - l, err := store.ListLabels(context.Background(), tx, influxdb.LabelFilter{}) - if err != nil { - t.Fatal(err) - } - - if len(l) != 9 { - t.Fatalf("expected 9 labels, got: %d", len(l)) - } - }, - }, - } - - for _, ts := range tt { - testScenario := ts - t.Run(testScenario.name, func(t *testing.T) { - t.Parallel() - - store := inmem.NewKVStore() - if err := all.Up(context.Background(), zaptest.NewLogger(t), store); err != nil { - t.Fatal(err) - } - - ts, err := label.NewStore(store) - if err != nil { - t.Fatal(err) - } - - // setup - if testScenario.setup != nil { - err := ts.Update(context.Background(), func(tx kv.Tx) error { - testScenario.setup(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - - // update - if testScenario.update != nil { - err := ts.Update(context.Background(), func(tx kv.Tx) error { - testScenario.update(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - - // results - if testScenario.results != nil { - err := ts.View(context.Background(), func(tx kv.Tx) error { - testScenario.results(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - }) - } -} diff --git a/label_test.go b/label_test.go deleted file mode 100644 index 4ae887c2cf4..00000000000 --- a/label_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package influxdb_test - -import ( - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - influxtest "github.com/influxdata/influxdb/v2/testing" -) - -const ( - orgOneID = "020f755c3c083000" -) - -func TestLabelValidate(t *testing.T) { - type fields struct { - Name string - OrgID platform.ID - } - tests := []struct { - name string - fields fields - wantErr bool - }{ - { - name: "valid label", - fields: fields{ - Name: "iot", - OrgID: influxtest.MustIDBase16(orgOneID), - }, - }, - { - name: "label requires a name", - fields: fields{ - OrgID: influxtest.MustIDBase16(orgOneID), - }, - wantErr: true, - }, - { - name: "label requires an organization ID", - fields: fields{ - Name: "iot", - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - m := influxdb.Label{ - Name: tt.fields.Name, - OrgID: tt.fields.OrgID, - } - if err := m.Validate(); (err != nil) != tt.wantErr { - t.Errorf("Label.Validate() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/logger/config.go b/logger/config.go deleted file mode 100644 index d7014b1b2a2..00000000000 --- a/logger/config.go +++ /dev/null @@ -1,19 +0,0 @@ -package logger - -import ( - "go.uber.org/zap/zapcore" -) - -type Config struct { - Format string `toml:"format"` - Level zapcore.LevelEnabler `toml:"level"` - SuppressLogo bool `toml:"suppress-logo"` -} - -// NewConfig returns a new instance of Config with defaults. -func NewConfig() Config { - return Config{ - Format: "auto", - Level: zapcore.Level(0), - } -} diff --git a/logger/context.go b/logger/context.go deleted file mode 100644 index 3b4b775e64f..00000000000 --- a/logger/context.go +++ /dev/null @@ -1,24 +0,0 @@ -package logger - -import ( - "context" - - "go.uber.org/zap" -) - -type key int - -const ( - loggerKey key = iota -) - -// NewContextWithLogger returns a new context with log added. -func NewContextWithLogger(ctx context.Context, log *zap.Logger) context.Context { - return context.WithValue(ctx, loggerKey, log) -} - -// LoggerFromContext returns the zap.Logger associated with ctx or nil if no logger has been assigned. -func LoggerFromContext(ctx context.Context) *zap.Logger { - l, _ := ctx.Value(loggerKey).(*zap.Logger) - return l -} diff --git a/logger/fields.go b/logger/fields.go deleted file mode 100644 index 0c5fe0a1f29..00000000000 --- a/logger/fields.go +++ /dev/null @@ -1,134 +0,0 @@ -package logger - -import ( - "context" - "time" - - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/pkg/snowflake" - "github.com/opentracing/opentracing-go" - "github.com/uber/jaeger-client-go" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -const ( - // OperationNameKey is the logging context key used for identifying name of an operation. - OperationNameKey = "op_name" - - // OperationEventKey is the logging context key used for identifying a notable - // event during the course of an operation. - OperationEventKey = "op_event" - - // OperationElapsedKey is the logging context key used for identifying time elapsed to finish an operation. - OperationElapsedKey = "op_elapsed" - - // DBInstanceKey is the logging context key used for identifying name of the relevant database. - DBInstanceKey = "db_instance" - - // DBRetentionKey is the logging context key used for identifying name of the relevant retention policy. - DBRetentionKey = "db_rp" - - // DBShardGroupKey is the logging context key used for identifying relevant shard group. - DBShardGroupKey = "db_shard_group" - - // DBShardIDKey is the logging context key used for identifying name of the relevant shard number. - DBShardIDKey = "db_shard_id" - - // TraceIDKey is the logging context key used for identifying the current trace. - TraceIDKey = "ot_trace_id" - - // TraceSampledKey is the logging context key used for determining whether the current trace will be sampled. - TraceSampledKey = "ot_trace_sampled" -) -const ( - eventStart = "start" - eventEnd = "end" -) - -var ( - gen = snowflake.New(0) -) - -func nextID() string { - return gen.NextString() -} - -// OperationName returns a field for tracking the name of an operation. -func OperationName(name string) zapcore.Field { - return zap.String(OperationNameKey, name) -} - -// OperationElapsed returns a field for tracking the duration of an operation. -func OperationElapsed(d time.Duration) zapcore.Field { - return zap.Duration(OperationElapsedKey, d) -} - -// OperationEventStart returns a field for tracking the start of an operation. -func OperationEventStart() zapcore.Field { - return zap.String(OperationEventKey, eventStart) -} - -// OperationEventEnd returns a field for tracking the end of an operation. -func OperationEventEnd() zapcore.Field { - return zap.String(OperationEventKey, eventEnd) -} - -// Database returns a field for tracking the name of a database. -func Database(name string) zapcore.Field { - return zap.String(DBInstanceKey, name) -} - -// Database returns a field for tracking the name of a database. -func RetentionPolicy(name string) zapcore.Field { - return zap.String(DBRetentionKey, name) -} - -// ShardGroup returns a field for tracking the shard group identifier. -func ShardGroup(id uint64) zapcore.Field { - return zap.Uint64(DBShardGroupKey, id) -} - -// Shard returns a field for tracking the shard identifier. -func Shard(id uint64) zapcore.Field { - return zap.Uint64(DBShardIDKey, id) -} - -// TraceFields returns a fields "ot_trace_id" and "ot_trace_sampled", values pulled from the (Jaeger) trace ID -// found in the given context. Returns nil if the context doesn't have a trace ID. -func TraceFields(ctx context.Context) []zap.Field { - id, sampled, found := tracing.InfoFromContext(ctx) - if !found { - return nil - } - return []zap.Field{zap.String(TraceIDKey, id), zap.Bool(TraceSampledKey, sampled)} -} - -// TraceID returns a field "trace_id", value pulled from the (Jaeger) trace ID found in the given context. -// Returns zap.Skip() if the context doesn't have a trace ID. -func TraceID(ctx context.Context) zap.Field { - if span := opentracing.SpanFromContext(ctx); span != nil { - if spanContext, ok := span.Context().(jaeger.SpanContext); ok { - return zap.String("trace_id", spanContext.TraceID().String()) - } - } - return zap.Skip() -} - -// NewOperation uses the exiting log to create a new logger with context -// containing a trace id and the operation. Prior to returning, a standardized message -// is logged indicating the operation has started. The returned function should be -// called when the operation concludes in order to log a corresponding message which -// includes an elapsed time and that the operation has ended. -func NewOperation(ctx context.Context, log *zap.Logger, msg, name string, fields ...zapcore.Field) (*zap.Logger, func()) { - f := []zapcore.Field{OperationName(name), TraceID(ctx)} - if len(fields) > 0 { - f = append(f, fields...) - } - - now := time.Now() - log = log.With(f...) - log.Info(msg+" (start)", OperationEventStart()) - - return log, func() { log.Info(msg+" (end)", OperationEventEnd(), OperationElapsed(time.Since(now))) } -} diff --git a/logger/logger.go b/logger/logger.go deleted file mode 100644 index 167fa766608..00000000000 --- a/logger/logger.go +++ /dev/null @@ -1,121 +0,0 @@ -package logger - -import ( - "fmt" - "io" - "time" - - zaplogfmt "github.com/jsternberg/zap-logfmt" - isatty "github.com/mattn/go-isatty" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -const TimeFormat = "2006-01-02T15:04:05.000000Z07:00" - -func (c *Config) New(defaultOutput io.Writer) (*zap.Logger, error) { - w := defaultOutput - format := c.Format - if format == "console" { - // Disallow the console logger if the output is not a terminal. - return nil, fmt.Errorf("unknown logging format: %s", format) - } - - // If the format is empty or auto, then set the format depending - // on whether or not a terminal is present. - if format == "" || format == "auto" { - if IsTerminal(w) { - format = "console" - } else { - format = "logfmt" - } - } - - encoder, err := newEncoder(format) - if err != nil { - return nil, err - } - return zap.New(zapcore.NewCore( - encoder, - zapcore.Lock(zapcore.AddSync(w)), - c.Level, - ), zap.Fields(zap.String("log_id", nextID()))), nil -} - -func newEncoder(format string) (zapcore.Encoder, error) { - config := newEncoderConfig() - switch format { - case "json": - return zapcore.NewJSONEncoder(config), nil - case "console": - return zapcore.NewConsoleEncoder(config), nil - case "logfmt": - return zaplogfmt.NewEncoder(config), nil - default: - return nil, fmt.Errorf("unknown logging format: %s", format) - } -} - -func newEncoderConfig() zapcore.EncoderConfig { - config := zap.NewProductionEncoderConfig() - config.EncodeTime = func(ts time.Time, encoder zapcore.PrimitiveArrayEncoder) { - encoder.AppendString(ts.UTC().Format(TimeFormat)) - } - config.EncodeDuration = func(d time.Duration, encoder zapcore.PrimitiveArrayEncoder) { - val := float64(d) / float64(time.Millisecond) - encoder.AppendString(fmt.Sprintf("%.3fms", val)) - } - config.LevelKey = "lvl" - return config -} - -// IsTerminal checks if w is a file and whether it is an interactive terminal session. -func IsTerminal(w io.Writer) bool { - if f, ok := w.(interface { - Fd() uintptr - }); ok { - return isatty.IsTerminal(f.Fd()) - } - return false -} - -const ( - year = 365 * 24 * time.Hour - week = 7 * 24 * time.Hour - day = 24 * time.Hour -) - -func DurationLiteral(key string, val time.Duration) zapcore.Field { - if val == 0 { - return zap.String(key, "0s") - } - - var ( - value int - unit string - ) - switch { - case val%year == 0: - value = int(val / year) - unit = "y" - case val%week == 0: - value = int(val / week) - unit = "w" - case val%day == 0: - value = int(val / day) - unit = "d" - case val%time.Hour == 0: - value = int(val / time.Hour) - unit = "h" - case val%time.Minute == 0: - value = int(val / time.Minute) - unit = "m" - case val%time.Second == 0: - value = int(val / time.Second) - unit = "s" - default: - value = int(val / time.Millisecond) - unit = "ms" - } - return zap.String(key, fmt.Sprintf("%d%s", value, unit)) -} diff --git a/logger/style_guide.md b/logger/style_guide.md deleted file mode 100644 index fc7417c4f89..00000000000 --- a/logger/style_guide.md +++ /dev/null @@ -1,220 +0,0 @@ -# Logging Style Guide - -The intention of logging is to give insight to the administrator of how -the server is running and also notify the administrator of any problems -or potential problems with the system. - -At the moment, log level filtering is the only option to configure -logging in InfluxDB. Adding a logging message and choosing its level -should be done according to the guidelines in this document for -operational clarity. The available log levels are: - -* Error -* Warn -* Info -* Debug - -InfluxDB uses structured logging. Structured logging is when you log -messages and attach context to those messages with more easily read data -regarding the state of the system. A structured log message is composed -of: - -* Time -* Level -* Message -* (Optionally) Additional context - -## Guidelines - -**Log messages** should be simple statements or phrases that begin with -a capital letter, but have no punctuation at the end. The message should be a -constant so that every time it is logged it is easily identified and can -be filtered by without regular expressions. - -Any **dynamic content** should be expressed by context. The key should -be a constant and the value is the dynamic content. - -Do not log messages in tight loops or other high performance locations. -It will likely create a performance problem. - -## Naming Conventions - -If the log encoding format uses keys for the time, message, or level, -the key names should be `ts` for time, `msg` for the message, and -`lvl` for the level. - -If the log encoding format does not use keys for the time, message, or -level and instead outputs them in some other method, this guideline can -be ignored. The output formats logfmt and json both use keys when -encoding these values. - -### Context Key Names - -The key for the dynamic content in the context should be formatted in -`snake_case`. The key should be completely lower case. - -## Levels - -As a reminder, levels are usually the only way to configure what is -logged. There are four available logging levels. - -* Error -* Warn -* Info -* Debug - -It is important to get the right logging level to ensure the log -messages are useful for end users to act on. - -In general, when considering which log level to use, you should use -**info**. If you are considering using another level, read the below -expanded descriptions to determine which level your message belongs in. - -### Error - -The **error** level is intended to communicate that there is a serious -problem with the server. **An error should be emitted only when an -on-call engineer can take some action to remedy the situation _and_ the -system cannot continue operating properly without remedying the -situation.** - -An example of what may qualify as an error level message is the creation -of the internal storage for the monitor service. For that system to -function at all, a database must be created. If no database is created, -the service itself cannot function. The error has a clear actionable -solution. Figure out why the database isn't being created and create it. - -An example of what does not qualify as an error is failing to parse a -query or a socket closing prematurely. Both of these usually indicate -some kind of user error rather than system error. Both are ephemeral -errors and they would not be clearly actionable to an administrator who -was paged at 3 AM. Both of these are examples of logging messages that -should be emitted at the info level with an error key rather than being -logged at the error level. - -Logged errors **must not propagate**. Propagating the error risks -logging it in multiple locations and confusing users when the same error -is reported multiple times. In general, if you are returning an error, -never log at any level. By returning the error, you are telling the -parent function to handle the error. Logging a message at any level is -handling the error. - -This logging message should be used very rarely and any messages that -use this logging level should not repeat frequently. Assume that -anything that is logged with error will page someone in the middle of -the night. - -### Warn - -The **warn** level is intended to communicate that there is likely to be -a serious problem with the server if it not addressed. **A warning -should be emitted only when a support engineer can take some action to -remedy the situation _and_ the system may not continue operating -properly in the near future without remedying the situation.** - -An example of what does not qualify as a warning is the -`log-queries-after` setting. While the message is "warning" that a query -was running for a long period of time, it is not clearly actionable and -does not indicate that the server will fail in the near future. This -should be logged at the info level instead. - -This logging message should be used very rarely and any messages that -use this logging level should not repeat frequently. Assume that -anything that is logged with warn will page someone in the middle of the -night and potentially ignored until normal working hours. - -### Info - -The **info** level should be used for almost anything. If you are not -sure which logging level to use, use info. Temporary or user errors -should be logged at the info level and any informational messages for -administrators should be logged at this level. Info level messages -should be safe for an administrator to discard if they really want to, -but most people will run the system at the info level. - -### Debug - -The **debug** level exists to log messages that are useful only for -debugging a bad running instance. - -This level should be rarely used if ever. If you intend to use this -level, please have a rationale ready. Most messages that could be -considered debug either shouldn't exist or should be logged at the info -level. Debug messages will be suppressed by default. - -## Value Formatting - -Formatting for strings, integers, and other standard values are usually -determined by the log format itself and those will be kept ambiguous. -The following specific formatting choices are for data types that could -be output in multiple ways. - -### Time - -Time values should be encoded using RFC3339 with microsecond precision. -The size of the string should be normalized to the same number of digits -every time to ensure that it is easier to read the time as a column. - -### Duration - -Duration values that denote a period of time should be output in -milliseconds with microsecond precision. The microseconds should be in -decimal form with three decimal points. Durations that denote a static -period of time should be output with a single number and a suffix with -the largest possible unit that doesn't cause the value to be a decimal. - -There are two types of durations. - -* Tracks a (usually small) period of time and is meant for timing how - long something take. The content is dynamic and may be graphed. -* Duration literal where the content is dynamic, is unlikely to be - graphed, and usually comes from some type of configuration. - -If the content is dynamic, the duration should be printed as a number of -milliseconds with a decimal indicating the number of microseconds. Any -duration lower than microseconds should be truncated. The decimal section -should always print exactly 3 points after the decimal point. - -If the content is static, the duration should be printed with a single -number and a suffix indicating the unit in years (`y`), weeks (`w`), -days (`d`), hours (`h`), minutes (`m`), seconds (`s`), or -milliseconds (`ms`). The suffix should be the greatest unit that can be -used without truncating the value. As an example, if the duration is -60 minutes, then `1h` should be used. If the duration is 61 minutes, -then `61m` should be used. - -For anything lower than milliseconds that is static, the duration should -be truncated. A value of zero should be shown as `0s`. - -## Stacktraces - -Logging stacktraces is special within the zap library. There are two -different ways to do it, but one of them will lead to more user-friendly -output and is the preferred way to log stacktraces. - -A stacktrace should only be computed if it is known that the log message -will be logged and the stacktrace should be attached to the log entry -using the special struct field rather than within the context. - -Below is a code sample using the zap logger. - -```go -var logger *zap.Logger - -// ... - -if entry := logger.Check(zapcore.InfoLevel, "A panic happened"); entry != nil { - entry.Stack = string(debug.Stack()) - entry.Write(/* additional context here */) -} -``` - -The reason for this is because certain encoders will handle the `Stack` -field in a special way. The console encoder, the user-friendly one used -when a TTY is present, will print out a newline and then pretty-print -the stack separately from the context. The logfmt encoder will encode -the stack as a normal context key so that it can follow the logfmt encoding. - -If the `zap.Stack(string)` method is used and included as part of the context, -then the stack will always be included within the context instead of handled -in the special way dictated by the encoder. diff --git a/lookup.go b/lookup.go deleted file mode 100644 index 082c7da9330..00000000000 --- a/lookup.go +++ /dev/null @@ -1,13 +0,0 @@ -package influxdb - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// LookupService provides field lookup for the resource and ID. -type LookupService interface { - // FindResourceName returns the name for the resource and ID. - FindResourceName(ctx context.Context, resource ResourceType, id platform.ID) (string, error) -} diff --git a/measurement.go b/measurement.go deleted file mode 100644 index fa29c5b4ea5..00000000000 --- a/measurement.go +++ /dev/null @@ -1,39 +0,0 @@ -package influxdb - -import "fmt" - -// Length of components of measurement names. -const ( - OrgIDLength = 8 - BucketIDLength = 8 - MeasurementLength = OrgIDLength + BucketIDLength -) - -// ReadMeasurement reads the provided measurement name and returns an Org ID and -// bucket ID. It returns an error if the provided name has an invalid length. -// -// ReadMeasurement does not allocate, and instead returns sub-slices of name, -// so callers should be careful about subsequent mutations to the provided name -// slice. -func ReadMeasurement(name []byte) (orgID, bucketID []byte, err error) { - if len(name) != MeasurementLength { - return nil, nil, fmt.Errorf("measurement %v has invalid length (%d)", name, len(name)) - } - return name[:OrgIDLength], name[len(name)-BucketIDLength:], nil -} - -// CreateMeasurement returns 16 bytes that represent a measurement. -// -// If either org or bucket are short then an error is returned, otherwise the -// first 8 bytes of each are combined and returned. -func CreateMeasurement(org, bucket []byte) ([]byte, error) { - if len(org) < OrgIDLength { - return nil, fmt.Errorf("org %v has invalid length (%d)", org, len(org)) - } else if len(bucket) < BucketIDLength { - return nil, fmt.Errorf("bucket %v has invalid length (%d)", bucket, len(bucket)) - } - - name := make([]byte, 0, MeasurementLength) - name = append(name, org[:OrgIDLength]...) - return append(name, bucket[:BucketIDLength]...), nil -} diff --git a/measurement_schema.go b/measurement_schema.go deleted file mode 100644 index e4fcf64fcbd..00000000000 --- a/measurement_schema.go +++ /dev/null @@ -1,445 +0,0 @@ -package influxdb - -import ( - "encoding/json" - "errors" - "fmt" - "sort" - "strconv" - "strings" - - influxid "github.com/influxdata/influxdb/v2/kit/platform" - influxerror "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/models" - "go.uber.org/multierr" -) - -// SchemaType differentiates the supported schema for a bucket. -type SchemaType int - -const ( - SchemaTypeImplicit SchemaType = iota // SchemaTypeImplicit specifies the bucket has an implicit schema. - SchemaTypeExplicit // SchemaTypeExplicit specifies the bucket has an explicit schema. -) - -// SchemaTypeFromString returns the SchemaType for s -// or nil if none exists. -func SchemaTypeFromString(s string) *SchemaType { - switch s { - case "implicit": - return SchemaTypeImplicit.Ptr() - case "explicit": - return SchemaTypeExplicit.Ptr() - default: - return nil - } -} - -func (s *SchemaType) String() string { - if s == nil { - return "" - } - - switch s := *s; s { - case SchemaTypeImplicit: - return "implicit" - case SchemaTypeExplicit: - return "explicit" - default: - return "SchemaType(" + strconv.FormatInt(int64(s), 10) + ")" - } -} - -func (s *SchemaType) UnmarshalJSON(d []byte) error { - var val string - if err := json.Unmarshal(d, &val); err != nil { - return err - } - - switch val { - case "implicit": - *s = SchemaTypeImplicit - case "explicit": - *s = SchemaTypeExplicit - default: - return errors.New("unexpected value") - } - - return nil -} - -func (s *SchemaType) Equals(other *SchemaType) bool { - if s == nil && other == nil { - return true - } else if s == nil || other == nil { - return false - } - - return *s == *other -} - -func (s SchemaType) MarshalJSON() ([]byte, error) { - switch s { - case SchemaTypeImplicit: - return []byte(`"implicit"`), nil - case SchemaTypeExplicit: - return []byte(`"explicit"`), nil - default: - return nil, errors.New("unexpected value") - } -} - -// Ptr returns a pointer to s. -func (s SchemaType) Ptr() *SchemaType { return &s } - -// SemanticColumnType specifies the semantics of a measurement column. -type SemanticColumnType int - -const ( - SemanticColumnTypeTimestamp SemanticColumnType = iota // SemanticColumnTypeTimestamp identifies the column is used as the timestamp - SemanticColumnTypeTag // SemanticColumnTypeTag identifies the column is used as a tag - SemanticColumnTypeField // SemanticColumnTypeField identifies the column is used as a field -) - -func SemanticColumnTypeFromString(s string) *SemanticColumnType { - switch s { - case "timestamp": - return SemanticColumnTypeTimestamp.Ptr() - case "tag": - return SemanticColumnTypeTag.Ptr() - case "field": - return SemanticColumnTypeField.Ptr() - default: - return nil - } -} - -func (s SemanticColumnType) String() string { - switch s { - case SemanticColumnTypeTimestamp: - return "timestamp" - case SemanticColumnTypeTag: - return "tag" - case SemanticColumnTypeField: - return "field" - default: - return "SemanticColumnType(" + strconv.FormatInt(int64(s), 10) + ")" - } -} - -func (s SemanticColumnType) Ptr() *SemanticColumnType { - return &s -} - -func (s *SemanticColumnType) UnmarshalJSON(d []byte) error { - var val string - if err := json.Unmarshal(d, &val); err != nil { - return err - } - - switch val { - case "timestamp": - *s = SemanticColumnTypeTimestamp - case "tag": - *s = SemanticColumnTypeTag - case "field": - *s = SemanticColumnTypeField - default: - return errors.New("unexpected value") - } - - return nil -} - -func (s SemanticColumnType) MarshalJSON() ([]byte, error) { - switch s { - case SemanticColumnTypeTimestamp: - return []byte(`"timestamp"`), nil - case SemanticColumnTypeTag: - return []byte(`"tag"`), nil - case SemanticColumnTypeField: - return []byte(`"field"`), nil - default: - return nil, errors.New("unexpected value") - } -} - -type SchemaColumnDataType uint - -const ( - SchemaColumnDataTypeFloat SchemaColumnDataType = iota - SchemaColumnDataTypeInteger - SchemaColumnDataTypeUnsigned - SchemaColumnDataTypeString - SchemaColumnDataTypeBoolean -) - -func SchemaColumnDataTypeFromString(s string) *SchemaColumnDataType { - switch s { - case "float": - return SchemaColumnDataTypeFloat.Ptr() - case "integer": - return SchemaColumnDataTypeInteger.Ptr() - case "unsigned": - return SchemaColumnDataTypeUnsigned.Ptr() - case "string": - return SchemaColumnDataTypeString.Ptr() - case "boolean": - return SchemaColumnDataTypeBoolean.Ptr() - default: - return nil - } -} - -// Ptr returns a pointer to s. -func (s SchemaColumnDataType) Ptr() *SchemaColumnDataType { return &s } - -func (s *SchemaColumnDataType) String() string { - if s == nil { - return "" - } - - switch *s { - case SchemaColumnDataTypeFloat: - return "float" - case SchemaColumnDataTypeInteger: - return "integer" - case SchemaColumnDataTypeUnsigned: - return "unsigned" - case SchemaColumnDataTypeString: - return "string" - case SchemaColumnDataTypeBoolean: - return "boolean" - default: - return "SchemaColumnDataType(" + strconv.FormatInt(int64(*s), 10) + ")" - } -} - -func (s *SchemaColumnDataType) UnmarshalJSON(d []byte) error { - var val string - if err := json.Unmarshal(d, &val); err != nil { - return err - } - - switch val { - case "float": - *s = SchemaColumnDataTypeFloat - case "integer": - *s = SchemaColumnDataTypeInteger - case "unsigned": - *s = SchemaColumnDataTypeUnsigned - case "string": - *s = SchemaColumnDataTypeString - case "boolean": - *s = SchemaColumnDataTypeBoolean - default: - return errors.New("unexpected value") - } - - return nil -} - -func (s SchemaColumnDataType) MarshalJSON() ([]byte, error) { - switch s { - case SchemaColumnDataTypeFloat: - return []byte(`"float"`), nil - case SchemaColumnDataTypeInteger: - return []byte(`"integer"`), nil - case SchemaColumnDataTypeUnsigned: - return []byte(`"unsigned"`), nil - case SchemaColumnDataTypeString: - return []byte(`"string"`), nil - case SchemaColumnDataTypeBoolean: - return []byte(`"boolean"`), nil - default: - return nil, errors.New("unexpected value") - } -} - -var ( - schemaTypeToFieldTypeMap = [...]models.FieldType{ - SchemaColumnDataTypeFloat: models.Float, - SchemaColumnDataTypeInteger: models.Integer, - SchemaColumnDataTypeUnsigned: models.Unsigned, - SchemaColumnDataTypeString: models.String, - SchemaColumnDataTypeBoolean: models.Boolean, - } -) - -// ToFieldType maps SchemaColumnDataType to the equivalent models.FieldType or -// models.Empty if no such mapping exists. -func (s SchemaColumnDataType) ToFieldType() models.FieldType { - if int(s) > len(schemaTypeToFieldTypeMap) { - return models.Empty - } - return schemaTypeToFieldTypeMap[s] -} - -type MeasurementSchema struct { - ID influxid.ID `json:"id,omitempty"` - OrgID influxid.ID `json:"orgID"` - BucketID influxid.ID `json:"bucketID"` - Name string `json:"name"` - Columns []MeasurementSchemaColumn `json:"columns"` - CRUDLog -} - -func (m *MeasurementSchema) Validate() error { - var err error - - err = multierr.Append(err, m.validateName("name", m.Name)) - err = multierr.Append(err, m.validateColumns()) - - return err -} - -// ValidateMeasurementSchemaName determines if name is a valid identifier for -// a measurement schema or column name and if not, returns an error. -func ValidateMeasurementSchemaName(name string) error { - if len(name) == 0 { - return ErrMeasurementSchemaNameTooShort - } - - if len(name) > 128 { - return ErrMeasurementSchemaNameTooLong - } - - if err := models.CheckToken([]byte(name)); err != nil { - return &influxerror.Error{ - Code: influxerror.EInvalid, - Err: err, - } - } - - if strings.HasPrefix(name, "_") { - return ErrMeasurementSchemaNameUnderscore - } - - if strings.Contains(name, `"`) || strings.Contains(name, `'`) { - return ErrMeasurementSchemaNameQuotes - } - - return nil -} - -func (m *MeasurementSchema) validateName(prefix, name string) error { - if err := ValidateMeasurementSchemaName(name); err != nil { - return fmt.Errorf("%s %q: %w", prefix, name, err) - } - - return nil -} - -// columns implements sort.Interface to efficiently sort a MeasurementSchemaColumn slice -// by using indices to store the sorted element indices. -type columns struct { - indices []int // indices is a list of indices representing a sorted columns - columns []MeasurementSchemaColumn -} - -// newColumns returns an instance of columns which contains a sorted version of c. -func newColumns(c []MeasurementSchemaColumn) columns { - colIndices := make([]int, len(c)) - for i := range c { - colIndices[i] = i - } - res := columns{ - indices: colIndices, - columns: c, - } - sort.Sort(res) - return res -} - -func (c columns) Len() int { - return len(c.columns) -} - -func (c columns) Less(i, j int) bool { - return c.columns[c.indices[i]].Name < c.columns[c.indices[j]].Name -} - -func (c columns) Swap(i, j int) { - c.indices[i], c.indices[j] = c.indices[j], c.indices[i] -} - -// Index returns the sorted -func (c columns) Index(i int) *MeasurementSchemaColumn { - return &c.columns[c.indices[i]] -} - -func (m *MeasurementSchema) validateColumns() (err error) { - if len(m.Columns) == 0 { - return ErrMeasurementSchemaColumnsMissing - } - - cols := newColumns(m.Columns) - - timeCount := 0 - fieldCount := 0 - for i := range cols.columns { - col := &cols.columns[i] - - err = multierr.Append(err, m.validateName("column name", col.Name)) - - // special handling for time column - if col.Name == "time" { - timeCount++ - if col.Type != SemanticColumnTypeTimestamp { - err = multierr.Append(err, ErrMeasurementSchemaColumnsTimeInvalidSemantic) - } else if col.DataType != nil { - err = multierr.Append(err, ErrMeasurementSchemaColumnsTimestampSemanticDataType) - } - continue - } - - // ensure no other columns have a timestamp semantic - switch col.Type { - case SemanticColumnTypeTimestamp: - if col.Name != "time" { - err = multierr.Append(err, ErrMeasurementSchemaColumnsTimestampSemanticInvalidName) - } else { - if col.DataType != nil { - err = multierr.Append(err, ErrMeasurementSchemaColumnsTimestampSemanticDataType) - } - } - - case SemanticColumnTypeTag: - // ensure tag columns don't include a data type value - if col.DataType != nil { - err = multierr.Append(err, ErrMeasurementSchemaColumnsTagSemanticDataType) - } - - case SemanticColumnTypeField: - if col.DataType == nil { - err = multierr.Append(err, ErrMeasurementSchemaColumnsFieldSemanticMissingDataType) - } - fieldCount++ - } - } - - if timeCount == 0 { - err = multierr.Append(err, ErrMeasurementSchemaColumnsMissingTime) - } - - // ensure there is at least one field defined - if fieldCount == 0 { - err = multierr.Append(err, ErrMeasurementSchemaColumnsMissingFields) - } - - // check for duplicate columns using general UTF-8 case insensitive comparison - for i := range cols.columns[1:] { - if strings.EqualFold(cols.Index(i).Name, cols.Index(i+1).Name) { - err = multierr.Append(err, ErrMeasurementSchemaColumnsDuplicateNames) - break - } - } - - return err -} - -type MeasurementSchemaColumn struct { - Name string `json:"name"` - Type SemanticColumnType `json:"type"` - DataType *SchemaColumnDataType `json:"dataType,omitempty"` -} diff --git a/measurement_schema_errors.go b/measurement_schema_errors.go deleted file mode 100644 index 351c828df61..00000000000 --- a/measurement_schema_errors.go +++ /dev/null @@ -1,72 +0,0 @@ -package influxdb - -import ( - influxerror "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - ErrMeasurementSchemaNameTooShort = &influxerror.Error{ - Code: influxerror.EInvalid, - Msg: "too short", - } - - ErrMeasurementSchemaNameTooLong = &influxerror.Error{ - Code: influxerror.EInvalid, - Msg: "too long", - } - - ErrMeasurementSchemaNameUnderscore = &influxerror.Error{ - Code: influxerror.EInvalid, - Msg: "must not begin with _", - } - - ErrMeasurementSchemaNameQuotes = &influxerror.Error{ - Code: influxerror.EInvalid, - Msg: "must not contains single or double quotes", - } - - ErrMeasurementSchemaColumnsMissing = &influxerror.Error{ - Code: influxerror.EInvalid, - Msg: "measurement schema columns missing", - } - - ErrMeasurementSchemaColumnsMissingTime = &influxerror.Error{ - Code: influxerror.EInvalid, - Msg: "measurement schema columns missing time column with a timestamp semantic", - } - - ErrMeasurementSchemaColumnsTimeInvalidSemantic = &influxerror.Error{ - Code: influxerror.EInvalid, - Msg: "measurement schema contains a time column with an invalid semantic", - } - - ErrMeasurementSchemaColumnsTimestampSemanticInvalidName = &influxerror.Error{ - Code: influxerror.EInvalid, - Msg: "measurement schema columns contains a timestamp column that is not named time", - } - - ErrMeasurementSchemaColumnsTimestampSemanticDataType = &influxerror.Error{ - Code: influxerror.EInvalid, - Msg: "measurement schema columns contains a time column with a data type", - } - - ErrMeasurementSchemaColumnsTagSemanticDataType = &influxerror.Error{ - Code: influxerror.EInvalid, - Msg: "measurement schema columns contains a tag column with a data type", - } - - ErrMeasurementSchemaColumnsFieldSemanticMissingDataType = &influxerror.Error{ - Code: influxerror.EInvalid, - Msg: "measurement schema columns contains a field column with missing data type", - } - - ErrMeasurementSchemaColumnsMissingFields = &influxerror.Error{ - Code: influxerror.EInvalid, - Msg: "measurement schema columns requires at least one field type column", - } - - ErrMeasurementSchemaColumnsDuplicateNames = &influxerror.Error{ - Code: influxerror.EInvalid, - Msg: "measurement schema columns contains duplicate column names", - } -) diff --git a/measurement_schema_test.go b/measurement_schema_test.go deleted file mode 100644 index f07dbceff72..00000000000 --- a/measurement_schema_test.go +++ /dev/null @@ -1,279 +0,0 @@ -package influxdb_test - -import ( - "fmt" - "strings" - "testing" - - "github.com/influxdata/influxdb/v2" - influxerror "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/stretchr/testify/assert" - "go.uber.org/multierr" -) - -func TestMeasurementSchema_Validate(t *testing.T) { - col1 := func(name string, st influxdb.SemanticColumnType) influxdb.MeasurementSchemaColumn { - return influxdb.MeasurementSchemaColumn{Name: name, Type: st} - } - col2 := func(name string, st influxdb.SemanticColumnType, dt influxdb.SchemaColumnDataType) influxdb.MeasurementSchemaColumn { - return influxdb.MeasurementSchemaColumn{Name: name, Type: st, DataType: &dt} - } - - // errp composes a new error from err with prefix p and quoted name - errp := func(p, name string, err error) error { - return fmt.Errorf("%s %q: %w", p, name, err) - } - - type fields struct { - Name string - Columns []influxdb.MeasurementSchemaColumn - } - - okCols := []influxdb.MeasurementSchemaColumn{ - col1("host", influxdb.SemanticColumnTypeTag), - col2("usage_user", influxdb.SemanticColumnTypeField, influxdb.SchemaColumnDataTypeFloat), - col1("time", influxdb.SemanticColumnTypeTimestamp), - } - - tests := []struct { - name string - fields fields - wantErr bool - errs []error - }{ - { - name: "is valid", - fields: fields{ - Name: "cpu", - Columns: okCols, - }, - }, - { - name: "name too short", - fields: fields{ - Name: "", - Columns: okCols, - }, - errs: []error{errp("name", "", influxdb.ErrMeasurementSchemaNameTooShort)}, - }, - { - name: "name too long", - fields: fields{ - Name: strings.Repeat("f", 129), - Columns: okCols, - }, - errs: []error{errp("name", strings.Repeat("f", 129), influxdb.ErrMeasurementSchemaNameTooLong)}, - }, - { - name: "name starts with underscore", - fields: fields{ - Name: "_cpu", - Columns: okCols, - }, - errs: []error{errp("name", "_cpu", influxdb.ErrMeasurementSchemaNameUnderscore)}, - }, - { - name: "name contains non-printable chars", - fields: fields{ - Name: "cp\x03u", - Columns: okCols, - }, - errs: []error{errp("name", "cp\x03u", &influxerror.Error{ - Code: influxerror.EInvalid, - Err: fmt.Errorf("non-printable character"), - })}, - }, - { - name: "name contains quotes", - fields: fields{ - Name: `"cpu"`, - Columns: okCols, - }, - errs: []error{errp("name", `"cpu"`, influxdb.ErrMeasurementSchemaNameQuotes)}, - }, - - // Columns validation - { - name: "missing columns", - fields: fields{ - Name: "cpu", - Columns: nil, - }, - errs: []error{influxdb.ErrMeasurementSchemaColumnsMissing}, - }, - { - name: "time column wrong semantic", - fields: fields{ - Name: "cpu", - Columns: []influxdb.MeasurementSchemaColumn{ - col1("host", influxdb.SemanticColumnTypeTag), - col2("usage_user", influxdb.SemanticColumnTypeField, influxdb.SchemaColumnDataTypeFloat), - col1("time", influxdb.SemanticColumnTypeField), - }, - }, - errs: []error{influxdb.ErrMeasurementSchemaColumnsTimeInvalidSemantic}, - }, - { - name: "time column with data type", - fields: fields{ - Name: "cpu", - Columns: []influxdb.MeasurementSchemaColumn{ - col1("host", influxdb.SemanticColumnTypeTag), - col2("usage_user", influxdb.SemanticColumnTypeField, influxdb.SchemaColumnDataTypeFloat), - col2("time", influxdb.SemanticColumnTypeTimestamp, influxdb.SchemaColumnDataTypeBoolean), - }, - }, - errs: []error{influxdb.ErrMeasurementSchemaColumnsTimestampSemanticDataType}, - }, - { - name: "missing time column", - fields: fields{ - Name: "cpu", - Columns: []influxdb.MeasurementSchemaColumn{ - col1("host", influxdb.SemanticColumnTypeTag), - col2("usage_user", influxdb.SemanticColumnTypeField, influxdb.SchemaColumnDataTypeFloat), - }, - }, - errs: []error{influxdb.ErrMeasurementSchemaColumnsMissingTime}, - }, - { - name: "timestamp column that is not named time", - fields: fields{ - Name: "cpu", - Columns: []influxdb.MeasurementSchemaColumn{ - col1("host", influxdb.SemanticColumnTypeTag), - col2("usage_user", influxdb.SemanticColumnTypeField, influxdb.SchemaColumnDataTypeFloat), - col1("foo", influxdb.SemanticColumnTypeTimestamp), - col1("time", influxdb.SemanticColumnTypeTimestamp), - }, - }, - errs: []error{influxdb.ErrMeasurementSchemaColumnsTimestampSemanticInvalidName}, - }, - { - name: "tag contains data type", - fields: fields{ - Name: "cpu", - Columns: []influxdb.MeasurementSchemaColumn{ - col2("host", influxdb.SemanticColumnTypeTag, influxdb.SchemaColumnDataTypeString), - col2("usage_user", influxdb.SemanticColumnTypeField, influxdb.SchemaColumnDataTypeFloat), - col1("time", influxdb.SemanticColumnTypeTimestamp), - }, - }, - errs: []error{influxdb.ErrMeasurementSchemaColumnsTagSemanticDataType}, - }, - { - name: "field missing data type", - fields: fields{ - Name: "cpu", - Columns: []influxdb.MeasurementSchemaColumn{ - col1("host", influxdb.SemanticColumnTypeTag), - col1("usage_user", influxdb.SemanticColumnTypeField), - col1("time", influxdb.SemanticColumnTypeTimestamp), - }, - }, - errs: []error{influxdb.ErrMeasurementSchemaColumnsFieldSemanticMissingDataType}, - }, - { - name: "missing fields", - fields: fields{ - Name: "cpu", - Columns: []influxdb.MeasurementSchemaColumn{ - col1("host", influxdb.SemanticColumnTypeTag), - col1("region", influxdb.SemanticColumnTypeTag), - col1("time", influxdb.SemanticColumnTypeTimestamp), - }, - }, - errs: []error{influxdb.ErrMeasurementSchemaColumnsMissingFields}, - }, - { - name: "duplicate column names", - fields: fields{ - Name: "cpu", - Columns: []influxdb.MeasurementSchemaColumn{ - col1("host", influxdb.SemanticColumnTypeTag), - col2("host", influxdb.SemanticColumnTypeField, influxdb.SchemaColumnDataTypeFloat), - col1("time", influxdb.SemanticColumnTypeTimestamp), - }, - }, - errs: []error{influxdb.ErrMeasurementSchemaColumnsDuplicateNames}, - }, - { - name: "duplicate column case insensitive names", - fields: fields{ - Name: "cpu", - Columns: []influxdb.MeasurementSchemaColumn{ - col1("host", influxdb.SemanticColumnTypeTag), - col2("HOST", influxdb.SemanticColumnTypeField, influxdb.SchemaColumnDataTypeFloat), - col1("time", influxdb.SemanticColumnTypeTimestamp), - }, - }, - errs: []error{influxdb.ErrMeasurementSchemaColumnsDuplicateNames}, - }, - - // column name validation - { - name: "column name too short", - fields: fields{ - Name: "cpu", - Columns: []influxdb.MeasurementSchemaColumn{ - col1("", influxdb.SemanticColumnTypeTag), - col2("usage_user", influxdb.SemanticColumnTypeField, influxdb.SchemaColumnDataTypeFloat), - col1("time", influxdb.SemanticColumnTypeTimestamp), - }, - }, - errs: []error{errp("column name", "", influxdb.ErrMeasurementSchemaNameTooShort)}, - }, - { - name: "column name too long", - fields: fields{ - Name: "cpu", - Columns: []influxdb.MeasurementSchemaColumn{ - col1(strings.Repeat("f", 129), influxdb.SemanticColumnTypeTag), - col2("usage_user", influxdb.SemanticColumnTypeField, influxdb.SchemaColumnDataTypeFloat), - col1("time", influxdb.SemanticColumnTypeTimestamp), - }, - }, - errs: []error{errp("column name", strings.Repeat("f", 129), influxdb.ErrMeasurementSchemaNameTooLong)}, - }, - { - name: "column name starts with underscore", - fields: fields{ - Name: "cpu", - Columns: []influxdb.MeasurementSchemaColumn{ - col1("_host", influxdb.SemanticColumnTypeTag), - col2("usage_user", influxdb.SemanticColumnTypeField, influxdb.SchemaColumnDataTypeFloat), - col1("time", influxdb.SemanticColumnTypeTimestamp), - }, - }, - errs: []error{errp("column name", "_host", influxdb.ErrMeasurementSchemaNameUnderscore)}, - }, - { - name: "column name contains quotes", - fields: fields{ - Name: "cpu", - Columns: []influxdb.MeasurementSchemaColumn{ - col1(`"host"`, influxdb.SemanticColumnTypeTag), - col2("usage_user", influxdb.SemanticColumnTypeField, influxdb.SchemaColumnDataTypeFloat), - col1("time", influxdb.SemanticColumnTypeTimestamp), - }, - }, - errs: []error{errp("column name", `"host"`, influxdb.ErrMeasurementSchemaNameQuotes)}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - m := &influxdb.MeasurementSchema{ - Name: tt.fields.Name, - Columns: tt.fields.Columns, - } - - if gotErr := m.Validate(); len(tt.errs) > 0 { - gotErrs := multierr.Errors(gotErr) - assert.ElementsMatch(t, gotErrs, tt.errs) - } else { - assert.NoError(t, gotErr) - } - - }) - } -} diff --git a/mock/annotation_service.go b/mock/annotation_service.go deleted file mode 100644 index 1ec16846c25..00000000000 --- a/mock/annotation_service.go +++ /dev/null @@ -1,213 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: annotation.go - -// Package mock is a generated GoMock package. -package mock - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - influxdb "github.com/influxdata/influxdb/v2" - platform "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockAnnotationService is a mock of AnnotationService interface -type MockAnnotationService struct { - ctrl *gomock.Controller - recorder *MockAnnotationServiceMockRecorder -} - -// MockAnnotationServiceMockRecorder is the mock recorder for MockAnnotationService -type MockAnnotationServiceMockRecorder struct { - mock *MockAnnotationService -} - -// NewMockAnnotationService creates a new mock instance -func NewMockAnnotationService(ctrl *gomock.Controller) *MockAnnotationService { - mock := &MockAnnotationService{ctrl: ctrl} - mock.recorder = &MockAnnotationServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockAnnotationService) EXPECT() *MockAnnotationServiceMockRecorder { - return m.recorder -} - -// CreateAnnotations mocks base method -func (m *MockAnnotationService) CreateAnnotations(ctx context.Context, orgID platform.ID, create []influxdb.AnnotationCreate) ([]influxdb.AnnotationEvent, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateAnnotations", ctx, orgID, create) - ret0, _ := ret[0].([]influxdb.AnnotationEvent) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateAnnotations indicates an expected call of CreateAnnotations -func (mr *MockAnnotationServiceMockRecorder) CreateAnnotations(ctx, orgID, create interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateAnnotations", reflect.TypeOf((*MockAnnotationService)(nil).CreateAnnotations), ctx, orgID, create) -} - -// ListAnnotations mocks base method -func (m *MockAnnotationService) ListAnnotations(ctx context.Context, orgID platform.ID, filter influxdb.AnnotationListFilter) ([]influxdb.StoredAnnotation, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListAnnotations", ctx, orgID, filter) - ret0, _ := ret[0].([]influxdb.StoredAnnotation) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListAnnotations indicates an expected call of ListAnnotations -func (mr *MockAnnotationServiceMockRecorder) ListAnnotations(ctx, orgID, filter interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAnnotations", reflect.TypeOf((*MockAnnotationService)(nil).ListAnnotations), ctx, orgID, filter) -} - -// GetAnnotation mocks base method -func (m *MockAnnotationService) GetAnnotation(ctx context.Context, id platform.ID) (*influxdb.StoredAnnotation, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAnnotation", ctx, id) - ret0, _ := ret[0].(*influxdb.StoredAnnotation) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetAnnotation indicates an expected call of GetAnnotation -func (mr *MockAnnotationServiceMockRecorder) GetAnnotation(ctx, id interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAnnotation", reflect.TypeOf((*MockAnnotationService)(nil).GetAnnotation), ctx, id) -} - -// DeleteAnnotations mocks base method -func (m *MockAnnotationService) DeleteAnnotations(ctx context.Context, orgID platform.ID, delete influxdb.AnnotationDeleteFilter) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteAnnotations", ctx, orgID, delete) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteAnnotations indicates an expected call of DeleteAnnotations -func (mr *MockAnnotationServiceMockRecorder) DeleteAnnotations(ctx, orgID, delete interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAnnotations", reflect.TypeOf((*MockAnnotationService)(nil).DeleteAnnotations), ctx, orgID, delete) -} - -// DeleteAnnotation mocks base method -func (m *MockAnnotationService) DeleteAnnotation(ctx context.Context, id platform.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteAnnotation", ctx, id) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteAnnotation indicates an expected call of DeleteAnnotation -func (mr *MockAnnotationServiceMockRecorder) DeleteAnnotation(ctx, id interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAnnotation", reflect.TypeOf((*MockAnnotationService)(nil).DeleteAnnotation), ctx, id) -} - -// UpdateAnnotation mocks base method -func (m *MockAnnotationService) UpdateAnnotation(ctx context.Context, id platform.ID, update influxdb.AnnotationCreate) (*influxdb.AnnotationEvent, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateAnnotation", ctx, id, update) - ret0, _ := ret[0].(*influxdb.AnnotationEvent) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateAnnotation indicates an expected call of UpdateAnnotation -func (mr *MockAnnotationServiceMockRecorder) UpdateAnnotation(ctx, id, update interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAnnotation", reflect.TypeOf((*MockAnnotationService)(nil).UpdateAnnotation), ctx, id, update) -} - -// ListStreams mocks base method -func (m *MockAnnotationService) ListStreams(ctx context.Context, orgID platform.ID, filter influxdb.StreamListFilter) ([]influxdb.StoredStream, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListStreams", ctx, orgID, filter) - ret0, _ := ret[0].([]influxdb.StoredStream) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListStreams indicates an expected call of ListStreams -func (mr *MockAnnotationServiceMockRecorder) ListStreams(ctx, orgID, filter interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListStreams", reflect.TypeOf((*MockAnnotationService)(nil).ListStreams), ctx, orgID, filter) -} - -// CreateOrUpdateStream mocks base method -func (m *MockAnnotationService) CreateOrUpdateStream(ctx context.Context, orgID platform.ID, stream influxdb.Stream) (*influxdb.ReadStream, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateOrUpdateStream", ctx, orgID, stream) - ret0, _ := ret[0].(*influxdb.ReadStream) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateOrUpdateStream indicates an expected call of CreateOrUpdateStream -func (mr *MockAnnotationServiceMockRecorder) CreateOrUpdateStream(ctx, orgID, stream interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdateStream", reflect.TypeOf((*MockAnnotationService)(nil).CreateOrUpdateStream), ctx, orgID, stream) -} - -// GetStream mocks base method -func (m *MockAnnotationService) GetStream(ctx context.Context, id platform.ID) (*influxdb.StoredStream, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetStream", ctx, id) - ret0, _ := ret[0].(*influxdb.StoredStream) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetStream indicates an expected call of GetStream -func (mr *MockAnnotationServiceMockRecorder) GetStream(ctx, id interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStream", reflect.TypeOf((*MockAnnotationService)(nil).GetStream), ctx, id) -} - -// UpdateStream mocks base method -func (m *MockAnnotationService) UpdateStream(ctx context.Context, id platform.ID, stream influxdb.Stream) (*influxdb.ReadStream, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateStream", ctx, id, stream) - ret0, _ := ret[0].(*influxdb.ReadStream) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateStream indicates an expected call of UpdateStream -func (mr *MockAnnotationServiceMockRecorder) UpdateStream(ctx, id, stream interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateStream", reflect.TypeOf((*MockAnnotationService)(nil).UpdateStream), ctx, id, stream) -} - -// DeleteStreams mocks base method -func (m *MockAnnotationService) DeleteStreams(ctx context.Context, orgID platform.ID, delete influxdb.BasicStream) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteStreams", ctx, orgID, delete) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteStreams indicates an expected call of DeleteStreams -func (mr *MockAnnotationServiceMockRecorder) DeleteStreams(ctx, orgID, delete interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteStreams", reflect.TypeOf((*MockAnnotationService)(nil).DeleteStreams), ctx, orgID, delete) -} - -// DeleteStreamByID mocks base method -func (m *MockAnnotationService) DeleteStreamByID(ctx context.Context, id platform.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteStreamByID", ctx, id) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteStreamByID indicates an expected call of DeleteStreamByID -func (mr *MockAnnotationServiceMockRecorder) DeleteStreamByID(ctx, id interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteStreamByID", reflect.TypeOf((*MockAnnotationService)(nil).DeleteStreamByID), ctx, id) -} diff --git a/mock/auth_service.go b/mock/auth_service.go deleted file mode 100644 index 758d5872480..00000000000 --- a/mock/auth_service.go +++ /dev/null @@ -1,70 +0,0 @@ -package mock - -import ( - "context" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -// AuthorizationService is a mock implementation of a retention.AuthorizationService, which -// also makes it a suitable mock to use wherever an platform.AuthorizationService is required. -type AuthorizationService struct { - // Methods for a retention.AuthorizationService - OpenFn func() error - CloseFn func() error - - // Methods for an platform.AuthorizationService - FindAuthorizationByIDFn func(context.Context, platform2.ID) (*platform.Authorization, error) - FindAuthorizationByTokenFn func(context.Context, string) (*platform.Authorization, error) - FindAuthorizationsFn func(context.Context, platform.AuthorizationFilter, ...platform.FindOptions) ([]*platform.Authorization, int, error) - CreateAuthorizationFn func(context.Context, *platform.Authorization) error - DeleteAuthorizationFn func(context.Context, platform2.ID) error - UpdateAuthorizationFn func(context.Context, platform2.ID, *platform.AuthorizationUpdate) (*platform.Authorization, error) -} - -// NewAuthorizationService returns a mock AuthorizationService where its methods will return -// zero values. -func NewAuthorizationService() *AuthorizationService { - return &AuthorizationService{ - FindAuthorizationByIDFn: func(context.Context, platform2.ID) (*platform.Authorization, error) { return nil, nil }, - FindAuthorizationByTokenFn: func(context.Context, string) (*platform.Authorization, error) { return nil, nil }, - FindAuthorizationsFn: func(context.Context, platform.AuthorizationFilter, ...platform.FindOptions) ([]*platform.Authorization, int, error) { - return nil, 0, nil - }, - CreateAuthorizationFn: func(context.Context, *platform.Authorization) error { return nil }, - DeleteAuthorizationFn: func(context.Context, platform2.ID) error { return nil }, - UpdateAuthorizationFn: func(context.Context, platform2.ID, *platform.AuthorizationUpdate) (*platform.Authorization, error) { - return nil, nil - }, - } -} - -// FindAuthorizationByID returns a single authorization by ID. -func (s *AuthorizationService) FindAuthorizationByID(ctx context.Context, id platform2.ID) (*platform.Authorization, error) { - return s.FindAuthorizationByIDFn(ctx, id) -} - -func (s *AuthorizationService) FindAuthorizationByToken(ctx context.Context, t string) (*platform.Authorization, error) { - return s.FindAuthorizationByTokenFn(ctx, t) -} - -// FindAuthorizations returns a list of authorizations that match filter and the total count of matching authorizations. -func (s *AuthorizationService) FindAuthorizations(ctx context.Context, filter platform.AuthorizationFilter, opts ...platform.FindOptions) ([]*platform.Authorization, int, error) { - return s.FindAuthorizationsFn(ctx, filter, opts...) -} - -// CreateAuthorization creates a new authorization and sets b.ID with the new identifier. -func (s *AuthorizationService) CreateAuthorization(ctx context.Context, authorization *platform.Authorization) error { - return s.CreateAuthorizationFn(ctx, authorization) -} - -// DeleteAuthorization removes a authorization by ID. -func (s *AuthorizationService) DeleteAuthorization(ctx context.Context, id platform2.ID) error { - return s.DeleteAuthorizationFn(ctx, id) -} - -// UpdateAuthorization updates the status and description if available. -func (s *AuthorizationService) UpdateAuthorization(ctx context.Context, id platform2.ID, upd *platform.AuthorizationUpdate) (*platform.Authorization, error) { - return s.UpdateAuthorizationFn(ctx, id, upd) -} diff --git a/mock/authorization.go b/mock/authorization.go deleted file mode 100644 index 9c0653a5371..00000000000 --- a/mock/authorization.go +++ /dev/null @@ -1,52 +0,0 @@ -package mock - -import ( - influxdb "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// ensure Authorizer implements influxdb.Authorizer -var _ influxdb.Authorizer = (*Authorizer)(nil) - -// Authorizer is an Authorizer for testing that can allow everything or use specific permissions -type Authorizer struct { - Permissions []influxdb.Permission - AllowAll bool - UserID platform.ID -} - -func NewMockAuthorizer(allowAll bool, permissions []influxdb.Permission) *Authorizer { - if allowAll { - return &Authorizer{ - AllowAll: true, - } - } - return &Authorizer{ - AllowAll: false, - Permissions: permissions, - } -} - -func (a *Authorizer) PermissionSet() (influxdb.PermissionSet, error) { - if a.AllowAll { - return influxdb.OperPermissions(), nil - } - - return a.Permissions, nil -} - -func (a *Authorizer) Identifier() platform.ID { - return 1 -} - -func (a *Authorizer) GetUserID() platform.ID { - if a.UserID.Valid() { - return a.UserID - } - - return 2 -} - -func (Authorizer) Kind() string { - return "mock" -} diff --git a/mock/authorizer_v1.go b/mock/authorizer_v1.go deleted file mode 100644 index 62b15226777..00000000000 --- a/mock/authorizer_v1.go +++ /dev/null @@ -1,15 +0,0 @@ -package mock - -import ( - "context" - - "github.com/influxdata/influxdb/v2" -) - -type AuthorizerV1 struct { - AuthorizeFn func(ctx context.Context, c influxdb.CredentialsV1) (*influxdb.Authorization, error) -} - -func (a *AuthorizerV1) Authorize(ctx context.Context, c influxdb.CredentialsV1) (*influxdb.Authorization, error) { - return a.AuthorizeFn(ctx, c) -} diff --git a/mock/backup_service.go b/mock/backup_service.go deleted file mode 100644 index 0fe178647db..00000000000 --- a/mock/backup_service.go +++ /dev/null @@ -1,268 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: backup.go - -// Package mock is a generated GoMock package. -package mock - -import ( - context "context" - io "io" - reflect "reflect" - time "time" - - gomock "github.com/golang/mock/gomock" - platform "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockBackupService is a mock of BackupService interface. -type MockBackupService struct { - ctrl *gomock.Controller - recorder *MockBackupServiceMockRecorder -} - -// MockBackupServiceMockRecorder is the mock recorder for MockBackupService. -type MockBackupServiceMockRecorder struct { - mock *MockBackupService -} - -// NewMockBackupService creates a new mock instance. -func NewMockBackupService(ctrl *gomock.Controller) *MockBackupService { - mock := &MockBackupService{ctrl: ctrl} - mock.recorder = &MockBackupServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockBackupService) EXPECT() *MockBackupServiceMockRecorder { - return m.recorder -} - -// BackupKVStore mocks base method. -func (m *MockBackupService) BackupKVStore(ctx context.Context, w io.Writer) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BackupKVStore", ctx, w) - ret0, _ := ret[0].(error) - return ret0 -} - -// BackupKVStore indicates an expected call of BackupKVStore. -func (mr *MockBackupServiceMockRecorder) BackupKVStore(ctx, w interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BackupKVStore", reflect.TypeOf((*MockBackupService)(nil).BackupKVStore), ctx, w) -} - -// BackupShard mocks base method. -func (m *MockBackupService) BackupShard(ctx context.Context, w io.Writer, shardID uint64, since time.Time) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BackupShard", ctx, w, shardID, since) - ret0, _ := ret[0].(error) - return ret0 -} - -// BackupShard indicates an expected call of BackupShard. -func (mr *MockBackupServiceMockRecorder) BackupShard(ctx, w, shardID, since interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BackupShard", reflect.TypeOf((*MockBackupService)(nil).BackupShard), ctx, w, shardID, since) -} - -// LockKVStore mocks base method. -func (m *MockBackupService) RLockKVStore() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RLockKVStore") -} - -// LockKVStore indicates an expected call of LockKVStore. -func (mr *MockBackupServiceMockRecorder) RLockKVStore() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RLockKVStore", reflect.TypeOf((*MockBackupService)(nil).RLockKVStore)) -} - -// UnlockKVStore mocks base method. -func (m *MockBackupService) RUnlockKVStore() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RUnlockKVStore") -} - -// UnlockKVStore indicates an expected call of UnlockKVStore. -func (mr *MockBackupServiceMockRecorder) UnlockKVStore() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RUnlockKVStore", reflect.TypeOf((*MockBackupService)(nil).RUnlockKVStore)) -} - -// MockSqlBackupRestoreService is a mock of SqlBackupRestoreService interface. -type MockSqlBackupRestoreService struct { - ctrl *gomock.Controller - recorder *MockSqlBackupRestoreServiceMockRecorder -} - -// MockSqlBackupRestoreServiceMockRecorder is the mock recorder for MockSqlBackupRestoreService. -type MockSqlBackupRestoreServiceMockRecorder struct { - mock *MockSqlBackupRestoreService -} - -// NewMockSqlBackupRestoreService creates a new mock instance. -func NewMockSqlBackupRestoreService(ctrl *gomock.Controller) *MockSqlBackupRestoreService { - mock := &MockSqlBackupRestoreService{ctrl: ctrl} - mock.recorder = &MockSqlBackupRestoreServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSqlBackupRestoreService) EXPECT() *MockSqlBackupRestoreServiceMockRecorder { - return m.recorder -} - -// BackupSqlStore mocks base method. -func (m *MockSqlBackupRestoreService) BackupSqlStore(ctx context.Context, w io.Writer) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BackupSqlStore", ctx, w) - ret0, _ := ret[0].(error) - return ret0 -} - -// BackupSqlStore indicates an expected call of BackupSqlStore. -func (mr *MockSqlBackupRestoreServiceMockRecorder) BackupSqlStore(ctx, w interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BackupSqlStore", reflect.TypeOf((*MockSqlBackupRestoreService)(nil).BackupSqlStore), ctx, w) -} - -// LockSqlStore mocks base method. -func (m *MockSqlBackupRestoreService) RLockSqlStore() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RLockSqlStore") -} - -// LockSqlStore indicates an expected call of LockSqlStore. -func (mr *MockSqlBackupRestoreServiceMockRecorder) RLockSqlStore() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RLockSqlStore", reflect.TypeOf((*MockSqlBackupRestoreService)(nil).RLockSqlStore)) -} - -// RestoreSqlStore mocks base method. -func (m *MockSqlBackupRestoreService) RestoreSqlStore(ctx context.Context, r io.Reader) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RestoreSqlStore", ctx, r) - ret0, _ := ret[0].(error) - return ret0 -} - -// RestoreSqlStore indicates an expected call of RestoreSqlStore. -func (mr *MockSqlBackupRestoreServiceMockRecorder) RestoreSqlStore(ctx, r interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreSqlStore", reflect.TypeOf((*MockSqlBackupRestoreService)(nil).RestoreSqlStore), ctx, r) -} - -// UnlockSqlStore mocks base method. -func (m *MockSqlBackupRestoreService) RUnlockSqlStore() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RUnlockSqlStore") -} - -// UnlockSqlStore indicates an expected call of UnlockSqlStore. -func (mr *MockSqlBackupRestoreServiceMockRecorder) RUnlockSqlStore() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RUnlockSqlStore", reflect.TypeOf((*MockSqlBackupRestoreService)(nil).RUnlockSqlStore)) -} - -// MockBucketManifestWriter is a mock of BucketManifestWriter interface. -type MockBucketManifestWriter struct { - ctrl *gomock.Controller - recorder *MockBucketManifestWriterMockRecorder -} - -// MockBucketManifestWriterMockRecorder is the mock recorder for MockBucketManifestWriter. -type MockBucketManifestWriterMockRecorder struct { - mock *MockBucketManifestWriter -} - -// NewMockBucketManifestWriter creates a new mock instance. -func NewMockBucketManifestWriter(ctrl *gomock.Controller) *MockBucketManifestWriter { - mock := &MockBucketManifestWriter{ctrl: ctrl} - mock.recorder = &MockBucketManifestWriterMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockBucketManifestWriter) EXPECT() *MockBucketManifestWriterMockRecorder { - return m.recorder -} - -// WriteManifest mocks base method. -func (m *MockBucketManifestWriter) WriteManifest(ctx context.Context, w io.Writer) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteManifest", ctx, w) - ret0, _ := ret[0].(error) - return ret0 -} - -// WriteManifest indicates an expected call of WriteManifest. -func (mr *MockBucketManifestWriterMockRecorder) WriteManifest(ctx, w interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteManifest", reflect.TypeOf((*MockBucketManifestWriter)(nil).WriteManifest), ctx, w) -} - -// MockRestoreService is a mock of RestoreService interface. -type MockRestoreService struct { - ctrl *gomock.Controller - recorder *MockRestoreServiceMockRecorder -} - -// MockRestoreServiceMockRecorder is the mock recorder for MockRestoreService. -type MockRestoreServiceMockRecorder struct { - mock *MockRestoreService -} - -// NewMockRestoreService creates a new mock instance. -func NewMockRestoreService(ctrl *gomock.Controller) *MockRestoreService { - mock := &MockRestoreService{ctrl: ctrl} - mock.recorder = &MockRestoreServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockRestoreService) EXPECT() *MockRestoreServiceMockRecorder { - return m.recorder -} - -// RestoreBucket mocks base method. -func (m *MockRestoreService) RestoreBucket(ctx context.Context, id platform.ID, rpiData []byte) (map[uint64]uint64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RestoreBucket", ctx, id, rpiData) - ret0, _ := ret[0].(map[uint64]uint64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RestoreBucket indicates an expected call of RestoreBucket. -func (mr *MockRestoreServiceMockRecorder) RestoreBucket(ctx, id, rpiData interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreBucket", reflect.TypeOf((*MockRestoreService)(nil).RestoreBucket), ctx, id, rpiData) -} - -// RestoreKVStore mocks base method. -func (m *MockRestoreService) RestoreKVStore(ctx context.Context, r io.Reader) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RestoreKVStore", ctx, r) - ret0, _ := ret[0].(error) - return ret0 -} - -// RestoreKVStore indicates an expected call of RestoreKVStore. -func (mr *MockRestoreServiceMockRecorder) RestoreKVStore(ctx, r interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreKVStore", reflect.TypeOf((*MockRestoreService)(nil).RestoreKVStore), ctx, r) -} - -// RestoreShard mocks base method. -func (m *MockRestoreService) RestoreShard(ctx context.Context, shardID uint64, r io.Reader) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RestoreShard", ctx, shardID, r) - ret0, _ := ret[0].(error) - return ret0 -} - -// RestoreShard indicates an expected call of RestoreShard. -func (mr *MockRestoreServiceMockRecorder) RestoreShard(ctx, shardID, r interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreShard", reflect.TypeOf((*MockRestoreService)(nil).RestoreShard), ctx, shardID, r) -} diff --git a/mock/bucket_service.go b/mock/bucket_service.go deleted file mode 100644 index 0e4cfe4652c..00000000000 --- a/mock/bucket_service.go +++ /dev/null @@ -1,107 +0,0 @@ -package mock - -import ( - "context" - "time" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -// BucketService is a mock implementation of a retention.BucketService, which -// also makes it a suitable mock to use wherever an platform.BucketService is required. -type BucketService struct { - // Methods for a retention.BucketService - OpenFn func() error - CloseFn func() error - - // Methods for an platform.BucketService - FindBucketByIDFn func(context.Context, platform2.ID) (*platform.Bucket, error) - FindBucketByIDCalls SafeCount - FindBucketByNameFn func(context.Context, platform2.ID, string) (*platform.Bucket, error) - FindBucketByNameCalls SafeCount - FindBucketFn func(context.Context, platform.BucketFilter) (*platform.Bucket, error) - FindBucketCalls SafeCount - FindBucketsFn func(context.Context, platform.BucketFilter, ...platform.FindOptions) ([]*platform.Bucket, int, error) - FindBucketsCalls SafeCount - CreateBucketFn func(context.Context, *platform.Bucket) error - CreateBucketCalls SafeCount - UpdateBucketFn func(context.Context, platform2.ID, platform.BucketUpdate) (*platform.Bucket, error) - UpdateBucketCalls SafeCount - DeleteBucketFn func(context.Context, platform2.ID) error - DeleteBucketCalls SafeCount -} - -// NewBucketService returns a mock BucketService where its methods will return -// zero values. -func NewBucketService() *BucketService { - return &BucketService{ - OpenFn: func() error { return nil }, - CloseFn: func() error { return nil }, - FindBucketByIDFn: func(context.Context, platform2.ID) (*platform.Bucket, error) { return nil, nil }, - FindBucketByNameFn: func(context.Context, platform2.ID, string) (*platform.Bucket, error) { - return &platform.Bucket{ - ID: platform2.ID(10), - Type: platform.BucketTypeSystem, - Name: "_tasks", - RetentionPeriod: time.Hour * 24 * 3, - Description: "System bucket for task logs", - }, nil - }, - FindBucketFn: func(context.Context, platform.BucketFilter) (*platform.Bucket, error) { return nil, nil }, - FindBucketsFn: func(context.Context, platform.BucketFilter, ...platform.FindOptions) ([]*platform.Bucket, int, error) { - return nil, 0, nil - }, - CreateBucketFn: func(context.Context, *platform.Bucket) error { return nil }, - UpdateBucketFn: func(context.Context, platform2.ID, platform.BucketUpdate) (*platform.Bucket, error) { return nil, nil }, - DeleteBucketFn: func(context.Context, platform2.ID) error { return nil }, - } -} - -// Open opens the BucketService. -func (s *BucketService) Open() error { return s.OpenFn() } - -// Close closes the BucketService. -func (s *BucketService) Close() error { return s.CloseFn() } - -// FindBucketByID returns a single bucket by ID. -func (s *BucketService) FindBucketByID(ctx context.Context, id platform2.ID) (*platform.Bucket, error) { - defer s.FindBucketByIDCalls.IncrFn()() - return s.FindBucketByIDFn(ctx, id) -} - -// FindBucketByName returns a single bucket by name. -func (s *BucketService) FindBucketByName(ctx context.Context, orgID platform2.ID, name string) (*platform.Bucket, error) { - defer s.FindBucketByNameCalls.IncrFn()() - return s.FindBucketByNameFn(ctx, orgID, name) -} - -// FindBucket returns the first bucket that matches filter. -func (s *BucketService) FindBucket(ctx context.Context, filter platform.BucketFilter) (*platform.Bucket, error) { - defer s.FindBucketCalls.IncrFn()() - return s.FindBucketFn(ctx, filter) -} - -// FindBuckets returns a list of buckets that match filter and the total count of matching buckets. -func (s *BucketService) FindBuckets(ctx context.Context, filter platform.BucketFilter, opts ...platform.FindOptions) ([]*platform.Bucket, int, error) { - defer s.FindBucketsCalls.IncrFn()() - return s.FindBucketsFn(ctx, filter, opts...) -} - -// CreateBucket creates a new bucket and sets b.ID with the new identifier. -func (s *BucketService) CreateBucket(ctx context.Context, bucket *platform.Bucket) error { - defer s.CreateBucketCalls.IncrFn()() - return s.CreateBucketFn(ctx, bucket) -} - -// UpdateBucket updates a single bucket with changeset. -func (s *BucketService) UpdateBucket(ctx context.Context, id platform2.ID, upd platform.BucketUpdate) (*platform.Bucket, error) { - defer s.UpdateBucketCalls.IncrFn()() - return s.UpdateBucketFn(ctx, id, upd) -} - -// DeleteBucket removes a bucket by ID. -func (s *BucketService) DeleteBucket(ctx context.Context, id platform2.ID) error { - defer s.DeleteBucketCalls.IncrFn()() - return s.DeleteBucketFn(ctx, id) -} diff --git a/mock/check_service.go b/mock/check_service.go deleted file mode 100644 index 9498058a13a..00000000000 --- a/mock/check_service.go +++ /dev/null @@ -1,89 +0,0 @@ -package mock - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// CheckService is a mock implementation of a retention.CheckService, which -// also makes it a suitable mock to use wherever an influxdb.CheckService is required. -type CheckService struct { - OrganizationService - UserResourceMappingService - - // Methods for an influxdb.CheckService - FindCheckByIDFn func(context.Context, platform.ID) (influxdb.Check, error) - FindCheckByIDCalls SafeCount - FindCheckFn func(context.Context, influxdb.CheckFilter) (influxdb.Check, error) - FindCheckCalls SafeCount - FindChecksFn func(context.Context, influxdb.CheckFilter, ...influxdb.FindOptions) ([]influxdb.Check, int, error) - FindChecksCalls SafeCount - CreateCheckFn func(context.Context, influxdb.CheckCreate, platform.ID) error - CreateCheckCalls SafeCount - UpdateCheckFn func(context.Context, platform.ID, influxdb.CheckCreate) (influxdb.Check, error) - UpdateCheckCalls SafeCount - PatchCheckFn func(context.Context, platform.ID, influxdb.CheckUpdate) (influxdb.Check, error) - PatchCheckCalls SafeCount - DeleteCheckFn func(context.Context, platform.ID) error - DeleteCheckCalls SafeCount -} - -// NewCheckService returns a mock CheckService where its methods will return -// zero values. -func NewCheckService() *CheckService { - return &CheckService{ - FindCheckByIDFn: func(context.Context, platform.ID) (influxdb.Check, error) { return nil, nil }, - FindCheckFn: func(context.Context, influxdb.CheckFilter) (influxdb.Check, error) { return nil, nil }, - FindChecksFn: func(context.Context, influxdb.CheckFilter, ...influxdb.FindOptions) ([]influxdb.Check, int, error) { - return nil, 0, nil - }, - CreateCheckFn: func(context.Context, influxdb.CheckCreate, platform.ID) error { return nil }, - UpdateCheckFn: func(context.Context, platform.ID, influxdb.CheckCreate) (influxdb.Check, error) { return nil, nil }, - PatchCheckFn: func(context.Context, platform.ID, influxdb.CheckUpdate) (influxdb.Check, error) { return nil, nil }, - DeleteCheckFn: func(context.Context, platform.ID) error { return nil }, - } -} - -// FindCheckByID returns a single check by ID. -func (s *CheckService) FindCheckByID(ctx context.Context, id platform.ID) (influxdb.Check, error) { - defer s.FindCheckByIDCalls.IncrFn()() - return s.FindCheckByIDFn(ctx, id) -} - -// FindCheck returns the first check that matches filter. -func (s *CheckService) FindCheck(ctx context.Context, filter influxdb.CheckFilter) (influxdb.Check, error) { - defer s.FindCheckCalls.IncrFn()() - return s.FindCheckFn(ctx, filter) -} - -// FindChecks returns a list of checks that match filter and the total count of matching checks. -func (s *CheckService) FindChecks(ctx context.Context, filter influxdb.CheckFilter, opts ...influxdb.FindOptions) ([]influxdb.Check, int, error) { - defer s.FindChecksCalls.IncrFn()() - return s.FindChecksFn(ctx, filter, opts...) -} - -// CreateCheck creates a new check and sets b.ID with the new identifier. -func (s *CheckService) CreateCheck(ctx context.Context, check influxdb.CheckCreate, userID platform.ID) error { - defer s.CreateCheckCalls.IncrFn()() - return s.CreateCheckFn(ctx, check, userID) -} - -// UpdateCheck updates everything except id orgID. -func (s *CheckService) UpdateCheck(ctx context.Context, id platform.ID, chk influxdb.CheckCreate) (influxdb.Check, error) { - defer s.UpdateCheckCalls.IncrFn()() - return s.UpdateCheckFn(ctx, id, chk) -} - -// PatchCheck updates a single check with changeset. -func (s *CheckService) PatchCheck(ctx context.Context, id platform.ID, upd influxdb.CheckUpdate) (influxdb.Check, error) { - defer s.PatchCheckCalls.IncrFn()() - return s.PatchCheckFn(ctx, id, upd) -} - -// DeleteCheck removes a check by ID. -func (s *CheckService) DeleteCheck(ctx context.Context, id platform.ID) error { - defer s.DeleteCheckCalls.IncrFn()() - return s.DeleteCheckFn(ctx, id) -} diff --git a/mock/dashboard_service.go b/mock/dashboard_service.go deleted file mode 100644 index 80aa18e2192..00000000000 --- a/mock/dashboard_service.go +++ /dev/null @@ -1,131 +0,0 @@ -package mock - -import ( - "context" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ platform.DashboardService = &DashboardService{} - -type DashboardService struct { - CreateDashboardF func(context.Context, *platform.Dashboard) error - CreateDashboardCalls SafeCount - FindDashboardByIDF func(context.Context, platform2.ID) (*platform.Dashboard, error) - FindDashboardByIDCalls SafeCount - FindDashboardsF func(context.Context, platform.DashboardFilter, platform.FindOptions) ([]*platform.Dashboard, int, error) - FindDashboardsCalls SafeCount - UpdateDashboardF func(context.Context, platform2.ID, platform.DashboardUpdate) (*platform.Dashboard, error) - UpdateDashboardCalls SafeCount - DeleteDashboardF func(context.Context, platform2.ID) error - DeleteDashboardCalls SafeCount - - AddDashboardCellF func(ctx context.Context, id platform2.ID, c *platform.Cell, opts platform.AddDashboardCellOptions) error - AddDashboardCellCalls SafeCount - RemoveDashboardCellF func(ctx context.Context, dashboardID platform2.ID, cellID platform2.ID) error - RemoveDashboardCellCalls SafeCount - GetDashboardCellViewF func(ctx context.Context, dashboardID platform2.ID, cellID platform2.ID) (*platform.View, error) - GetDashboardCellViewCalls SafeCount - UpdateDashboardCellViewF func(ctx context.Context, dashboardID platform2.ID, cellID platform2.ID, upd platform.ViewUpdate) (*platform.View, error) - UpdateDashboardCellViewCalls SafeCount - UpdateDashboardCellF func(ctx context.Context, dashboardID platform2.ID, cellID platform2.ID, upd platform.CellUpdate) (*platform.Cell, error) - UpdateDashboardCellCalls SafeCount - CopyDashboardCellF func(ctx context.Context, dashboardID platform2.ID, cellID platform2.ID) (*platform.Cell, error) - CopyDashboardCellCalls SafeCount - ReplaceDashboardCellsF func(ctx context.Context, id platform2.ID, cs []*platform.Cell) error - ReplaceDashboardCellsCalls SafeCount -} - -// NewDashboardService returns a mock of DashboardService where its methods will return zero values. -func NewDashboardService() *DashboardService { - return &DashboardService{ - CreateDashboardF: func(context.Context, *platform.Dashboard) error { return nil }, - FindDashboardByIDF: func(context.Context, platform2.ID) (*platform.Dashboard, error) { return nil, nil }, - FindDashboardsF: func(context.Context, platform.DashboardFilter, platform.FindOptions) ([]*platform.Dashboard, int, error) { - return nil, 0, nil - }, - UpdateDashboardF: func(context.Context, platform2.ID, platform.DashboardUpdate) (*platform.Dashboard, error) { - return nil, nil - }, - DeleteDashboardF: func(context.Context, platform2.ID) error { return nil }, - - AddDashboardCellF: func(ctx context.Context, id platform2.ID, c *platform.Cell, opts platform.AddDashboardCellOptions) error { - return nil - }, - RemoveDashboardCellF: func(ctx context.Context, dashboardID platform2.ID, cellID platform2.ID) error { return nil }, - GetDashboardCellViewF: func(ctx context.Context, dashboardID platform2.ID, cellID platform2.ID) (*platform.View, error) { - return nil, nil - }, - UpdateDashboardCellViewF: func(ctx context.Context, dashboardID platform2.ID, cellID platform2.ID, upd platform.ViewUpdate) (*platform.View, error) { - return nil, nil - }, - UpdateDashboardCellF: func(ctx context.Context, dashboardID platform2.ID, cellID platform2.ID, upd platform.CellUpdate) (*platform.Cell, error) { - return nil, nil - }, - CopyDashboardCellF: func(ctx context.Context, dashboardID platform2.ID, cellID platform2.ID) (*platform.Cell, error) { - return nil, nil - }, - ReplaceDashboardCellsF: func(ctx context.Context, id platform2.ID, cs []*platform.Cell) error { return nil }, - } -} - -func (s *DashboardService) FindDashboardByID(ctx context.Context, id platform2.ID) (*platform.Dashboard, error) { - defer s.FindDashboardByIDCalls.IncrFn()() - return s.FindDashboardByIDF(ctx, id) -} - -func (s *DashboardService) FindDashboards(ctx context.Context, filter platform.DashboardFilter, opts platform.FindOptions) ([]*platform.Dashboard, int, error) { - defer s.FindDashboardsCalls.IncrFn()() - return s.FindDashboardsF(ctx, filter, opts) -} - -func (s *DashboardService) CreateDashboard(ctx context.Context, b *platform.Dashboard) error { - defer s.CreateDashboardCalls.IncrFn()() - return s.CreateDashboardF(ctx, b) -} - -func (s *DashboardService) UpdateDashboard(ctx context.Context, id platform2.ID, upd platform.DashboardUpdate) (*platform.Dashboard, error) { - defer s.UpdateDashboardCalls.IncrFn()() - return s.UpdateDashboardF(ctx, id, upd) -} - -func (s *DashboardService) DeleteDashboard(ctx context.Context, id platform2.ID) error { - defer s.DeleteDashboardCalls.IncrFn()() - return s.DeleteDashboardF(ctx, id) -} - -func (s *DashboardService) GetDashboardCellView(ctx context.Context, dashboardID, cellID platform2.ID) (*platform.View, error) { - defer s.GetDashboardCellViewCalls.IncrFn()() - return s.GetDashboardCellViewF(ctx, dashboardID, cellID) -} - -func (s *DashboardService) UpdateDashboardCellView(ctx context.Context, dashboardID, cellID platform2.ID, upd platform.ViewUpdate) (*platform.View, error) { - defer s.UpdateDashboardCellViewCalls.IncrFn()() - return s.UpdateDashboardCellViewF(ctx, dashboardID, cellID, upd) -} - -func (s *DashboardService) AddDashboardCell(ctx context.Context, id platform2.ID, c *platform.Cell, opts platform.AddDashboardCellOptions) error { - defer s.AddDashboardCellCalls.IncrFn()() - return s.AddDashboardCellF(ctx, id, c, opts) -} - -func (s *DashboardService) ReplaceDashboardCells(ctx context.Context, id platform2.ID, cs []*platform.Cell) error { - defer s.ReplaceDashboardCellsCalls.IncrFn()() - return s.ReplaceDashboardCellsF(ctx, id, cs) -} - -func (s *DashboardService) RemoveDashboardCell(ctx context.Context, dashboardID platform2.ID, cellID platform2.ID) error { - defer s.RemoveDashboardCellCalls.IncrFn()() - return s.RemoveDashboardCellF(ctx, dashboardID, cellID) -} - -func (s *DashboardService) UpdateDashboardCell(ctx context.Context, dashboardID platform2.ID, cellID platform2.ID, upd platform.CellUpdate) (*platform.Cell, error) { - defer s.UpdateDashboardCellCalls.IncrFn()() - return s.UpdateDashboardCellF(ctx, dashboardID, cellID, upd) -} - -func (s *DashboardService) CopyDashboardCell(ctx context.Context, dashboardID platform2.ID, cellID platform2.ID) (*platform.Cell, error) { - defer s.CopyDashboardCellCalls.IncrFn()() - return s.CopyDashboardCellF(ctx, dashboardID, cellID) -} diff --git a/mock/dbrp_mapping.go b/mock/dbrp_mapping.go deleted file mode 100644 index 6937f735bd9..00000000000 --- a/mock/dbrp_mapping.go +++ /dev/null @@ -1,53 +0,0 @@ -package mock - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.DBRPMappingService = (*DBRPMappingService)(nil) - -type DBRPMappingService struct { - FindByIDFn func(ctx context.Context, orgID, id platform.ID) (*influxdb.DBRPMapping, error) - FindManyFn func(ctx context.Context, dbrp influxdb.DBRPMappingFilter, opts ...influxdb.FindOptions) ([]*influxdb.DBRPMapping, int, error) - CreateFn func(ctx context.Context, dbrp *influxdb.DBRPMapping) error - UpdateFn func(ctx context.Context, dbrp *influxdb.DBRPMapping) error - DeleteFn func(ctx context.Context, orgID, id platform.ID) error -} - -func (s *DBRPMappingService) FindByID(ctx context.Context, orgID, id platform.ID) (*influxdb.DBRPMapping, error) { - if s.FindByIDFn == nil { - return nil, nil - } - return s.FindByIDFn(ctx, orgID, id) -} - -func (s *DBRPMappingService) FindMany(ctx context.Context, dbrp influxdb.DBRPMappingFilter, opts ...influxdb.FindOptions) ([]*influxdb.DBRPMapping, int, error) { - if s.FindManyFn == nil { - return nil, 0, nil - } - return s.FindManyFn(ctx, dbrp, opts...) -} - -func (s *DBRPMappingService) Create(ctx context.Context, dbrp *influxdb.DBRPMapping) error { - if s.CreateFn == nil { - return nil - } - return s.CreateFn(ctx, dbrp) -} - -func (s *DBRPMappingService) Update(ctx context.Context, dbrp *influxdb.DBRPMapping) error { - if s.UpdateFn == nil { - return nil - } - return s.UpdateFn(ctx, dbrp) -} - -func (s *DBRPMappingService) Delete(ctx context.Context, orgID, id platform.ID) error { - if s.DeleteFn == nil { - return nil - } - return s.DeleteFn(ctx, orgID, id) -} diff --git a/mock/delete.go b/mock/delete.go deleted file mode 100644 index 32b792b2d3a..00000000000 --- a/mock/delete.go +++ /dev/null @@ -1,32 +0,0 @@ -package mock - -import ( - "context" - - "github.com/influxdata/influxql" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.DeleteService = &DeleteService{} - -// DeleteService is a mock delete server. -type DeleteService struct { - DeleteBucketRangePredicateF func(tx context.Context, orgID, bucketID platform.ID, min, max int64, pred influxdb.Predicate, measurement influxql.Expr) error -} - -// NewDeleteService returns a mock DeleteService where its methods will return -// zero values. -func NewDeleteService() DeleteService { - return DeleteService{ - DeleteBucketRangePredicateF: func(tx context.Context, orgID, bucketID platform.ID, min, max int64, pred influxdb.Predicate, measurement influxql.Expr) error { - return nil - }, - } -} - -// DeleteBucketRangePredicate calls DeleteBucketRangePredicateF. -func (s DeleteService) DeleteBucketRangePredicate(ctx context.Context, orgID, bucketID platform.ID, min, max int64, pred influxdb.Predicate, measurement influxql.Expr) error { - return s.DeleteBucketRangePredicateF(ctx, orgID, bucketID, min, max, pred, measurement) -} diff --git a/mock/dependencies.go b/mock/dependencies.go deleted file mode 100644 index 27b9cf65b44..00000000000 --- a/mock/dependencies.go +++ /dev/null @@ -1,41 +0,0 @@ -package mock - -import ( - "context" - - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -// BucketLookup implements the BucketLookup interface needed by flux "from" and "to". -type BucketLookup struct{} - -func (BucketLookup) Lookup(_ context.Context, orgID platform2.ID, name string) (platform2.ID, bool) { - if name == "my-bucket" { - return platform2.ID(1), true - } - return platform2.InvalidID(), false -} - -func (BucketLookup) LookupName(_ context.Context, orgID platform2.ID, id platform2.ID) string { - if id == 1 { - return "my-bucket" - } - return "" -} - -// OrganizationLookup implements the OrganizationLookup interface needed by flux "from" and "to". -type OrganizationLookup struct{} - -func (OrganizationLookup) Lookup(_ context.Context, name string) (platform2.ID, bool) { - if name == "my-org" { - return platform2.ID(2), true - } - return platform2.InvalidID(), false -} - -func (OrganizationLookup) LookupName(_ context.Context, id platform2.ID) string { - if id == 2 { - return "my-org" - } - return "" -} diff --git a/mock/document_service.go b/mock/document_service.go deleted file mode 100644 index ca9e5afc1f2..00000000000 --- a/mock/document_service.go +++ /dev/null @@ -1,76 +0,0 @@ -package mock - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.DocumentStore = &DocumentStore{} - -// DocumentService is mocked document service. -type DocumentService struct { - CreateDocumentStoreFn func(ctx context.Context, name string) (influxdb.DocumentStore, error) - FindDocumentStoreFn func(ctx context.Context, name string) (influxdb.DocumentStore, error) -} - -// CreateDocumentStore calls the mocked CreateDocumentStoreFn. -func (s *DocumentService) CreateDocumentStore(ctx context.Context, name string) (influxdb.DocumentStore, error) { - return s.CreateDocumentStoreFn(ctx, name) -} - -// FindDocumentStore calls the mocked FindDocumentStoreFn. -func (s *DocumentService) FindDocumentStore(ctx context.Context, name string) (influxdb.DocumentStore, error) { - return s.FindDocumentStoreFn(ctx, name) -} - -// NewDocumentService returns a mock of DocumentService where its methods will return zero values. -func NewDocumentService() *DocumentService { - return &DocumentService{ - CreateDocumentStoreFn: func(ctx context.Context, name string) (influxdb.DocumentStore, error) { - return nil, nil - }, - FindDocumentStoreFn: func(ctx context.Context, name string) (influxdb.DocumentStore, error) { - return nil, nil - }, - } -} - -// DocumentStore is the mocked document store. -type DocumentStore struct { - TimeGenerator TimeGenerator - CreateDocumentFn func(ctx context.Context, d *influxdb.Document) error - FindDocumentFn func(ctx context.Context, id platform.ID) (*influxdb.Document, error) - FindDocumentsFn func(ctx context.Context, oid platform.ID) ([]*influxdb.Document, error) -} - -// NewDocumentStore returns a mock of DocumentStore where its methods will return zero values. -func NewDocumentStore() *DocumentStore { - return &DocumentStore{ - CreateDocumentFn: func(ctx context.Context, d *influxdb.Document) error { - return nil - }, - FindDocumentFn: func(ctx context.Context, id platform.ID) (document *influxdb.Document, e error) { - return nil, nil - }, - FindDocumentsFn: func(ctx context.Context, oid platform.ID) ([]*influxdb.Document, error) { - return nil, nil - }, - } -} - -// CreateDocument will call the mocked CreateDocumentFn. -func (s *DocumentStore) CreateDocument(ctx context.Context, d *influxdb.Document) error { - return s.CreateDocumentFn(ctx, d) -} - -// FindDocument will call the mocked FindDocumentFn. -func (s *DocumentStore) FindDocument(ctx context.Context, id platform.ID) (*influxdb.Document, error) { - return s.FindDocumentFn(ctx, id) -} - -// FindDocuments will call the mocked FindDocumentsFn. -func (s *DocumentStore) FindDocuments(ctx context.Context, oid platform.ID) ([]*influxdb.Document, error) { - return s.FindDocumentsFn(ctx, oid) -} diff --git a/mock/flagger.go b/mock/flagger.go deleted file mode 100644 index 9e5aa263bdb..00000000000 --- a/mock/flagger.go +++ /dev/null @@ -1,27 +0,0 @@ -package mock - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kit/feature" -) - -// Flagger is a mock. -type Flagger struct { - m map[string]interface{} -} - -// NewFlagger returns a mock Flagger. -func NewFlagger(flags map[feature.Flag]interface{}) *Flagger { - m := make(map[string]interface{}, len(flags)) - for k, v := range flags { - m[k.Key()] = v - } - return &Flagger{m} -} - -// Flags returns a map of flag keys to flag values according to its configured flag map. -// It never returns an error. -func (f Flagger) Flags(context.Context, ...feature.Flag) (map[string]interface{}, error) { - return f.m, nil -} diff --git a/mock/generators.go b/mock/generators.go deleted file mode 100644 index 669a883abcb..00000000000 --- a/mock/generators.go +++ /dev/null @@ -1,114 +0,0 @@ -package mock - -import ( - "testing" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// IDGenerator is mock implementation of influxdb.IDGenerator. -type IDGenerator struct { - IDFn func() platform.ID -} - -// ID generates a new influxdb.ID from a mock function. -func (g IDGenerator) ID() platform.ID { - return g.IDFn() -} - -// NewIDGenerator is a simple way to create immutable id generator -func NewIDGenerator(s string, t *testing.T) IDGenerator { - t.Helper() - - id, err := platform.IDFromString(s) - if err != nil { - t.Fatal(err) - } - - return NewStaticIDGenerator(*id) -} - -// NewStaticIDGenerator returns an IDGenerator which produces the ID -// provided to this function on a call to ID(). -func NewStaticIDGenerator(id platform.ID) IDGenerator { - return IDGenerator{ - IDFn: func() platform.ID { - return id - }, - } -} - -// NewIncrementingIDGenerator returns an ID generator which starts at the -// provided ID and increments on each call to ID(). -func NewIncrementingIDGenerator(start platform.ID) IDGenerator { - return IDGenerator{ - IDFn: func() platform.ID { - defer func() { start++ }() - return start - }, - } -} - -// SetIDForFunc replaces the id generator at the end of the pointer with -// one which returns the provided id. It then invokes the provided function before -// restoring the original value at the end of the pointer. -func SetIDForFunc(gen *platform.IDGenerator, id platform.ID, fn func()) { - backup := *gen - defer func() { *gen = backup }() - - *gen = NewStaticIDGenerator(id) - - fn() -} - -type MockIDGenerator struct { - Last *platform.ID - Count int -} - -const FirstMockID int = 65536 - -func NewMockIDGenerator() *MockIDGenerator { - return &MockIDGenerator{ - Count: FirstMockID, - } -} - -func (g *MockIDGenerator) ID() platform.ID { - id := platform.ID(g.Count) - g.Count++ - - g.Last = &id - - return id -} - -// NewTokenGenerator is a simple way to create immutable token generator. -func NewTokenGenerator(s string, err error) TokenGenerator { - return TokenGenerator{ - TokenFn: func() (string, error) { - return s, err - }, - } -} - -// TokenGenerator is mock implementation of influxdb.TokenGenerator. -type TokenGenerator struct { - TokenFn func() (string, error) -} - -// Token generates a new influxdb.Token from a mock function. -func (g TokenGenerator) Token() (string, error) { - return g.TokenFn() -} - -// TimeGenerator stores a fake value of time. -type TimeGenerator struct { - FakeValue time.Time -} - -// Now will return the FakeValue stored in the struct. -func (g TimeGenerator) Now() time.Time { - return g.FakeValue -} diff --git a/mock/kv.go b/mock/kv.go deleted file mode 100644 index 44fa84f6a97..00000000000 --- a/mock/kv.go +++ /dev/null @@ -1,146 +0,0 @@ -package mock - -import ( - "context" - "io" - - "github.com/influxdata/influxdb/v2/kv" -) - -var _ (kv.Store) = (*Store)(nil) - -// Store is a mock kv.Store -type Store struct { - ViewFn func(func(kv.Tx) error) error - UpdateFn func(func(kv.Tx) error) error - BackupFn func(ctx context.Context, w io.Writer) error - RestoreFn func(ctx context.Context, r io.Reader) error -} - -// View opens up a transaction that will not write to any data. Implementing interfaces -// should take care to ensure that all view transactions do not mutate any data. -func (s *Store) View(ctx context.Context, fn func(kv.Tx) error) error { - return s.ViewFn(fn) -} - -// Update opens up a transaction that will mutate data. -func (s *Store) Update(ctx context.Context, fn func(kv.Tx) error) error { - return s.UpdateFn(fn) -} - -// RLock and RUnlock methods are to satisfy the kv.Store interface -func (s *Store) RLock() {} - -func (s *Store) RUnlock() {} - -func (s *Store) Backup(ctx context.Context, w io.Writer) error { - return s.BackupFn(ctx, w) -} - -func (s *Store) Restore(ctx context.Context, r io.Reader) error { - return s.RestoreFn(ctx, r) -} - -var _ (kv.Tx) = (*Tx)(nil) - -// Tx is mock of a kv.Tx. -type Tx struct { - BucketFn func(b []byte) (kv.Bucket, error) - ContextFn func() context.Context - WithContextFn func(ctx context.Context) -} - -// Bucket possibly creates and returns bucket, b. -func (t *Tx) Bucket(b []byte) (kv.Bucket, error) { - return t.BucketFn(b) -} - -// Context returns the context associated with this Tx. -func (t *Tx) Context() context.Context { - return t.ContextFn() -} - -// WithContext associates a context with this Tx. -func (t *Tx) WithContext(ctx context.Context) { - t.WithContextFn(ctx) -} - -var _ (kv.Bucket) = (*Bucket)(nil) - -// Bucket is the abstraction used to perform get/put/delete/get-many operations -// in a key value store -type Bucket struct { - GetFn func(key []byte) ([]byte, error) - GetBatchFn func(keys ...[]byte) ([][]byte, error) - CursorFn func() (kv.Cursor, error) - PutFn func(key, value []byte) error - DeleteFn func(key []byte) error - ForwardCursorFn func([]byte, ...kv.CursorOption) kv.ForwardCursor -} - -// Get returns a key within this bucket. Errors if key does not exist. -func (b *Bucket) Get(key []byte) ([]byte, error) { - return b.GetFn(key) -} - -// GetBatch returns a set of keys values within this bucket. -func (b *Bucket) GetBatch(keys ...[]byte) ([][]byte, error) { - return b.GetBatchFn(keys...) -} - -// Cursor returns a cursor at the beginning of this bucket. -func (b *Bucket) Cursor(opts ...kv.CursorHint) (kv.Cursor, error) { - return b.CursorFn() -} - -// Put should error if the transaction it was called in is not writable. -func (b *Bucket) Put(key, value []byte) error { - return b.PutFn(key, value) -} - -// Delete should error if the transaction it was called in is not writable. -func (b *Bucket) Delete(key []byte) error { - return b.DeleteFn(key) -} - -// ForwardCursor returns a cursor from the seek points in the configured direction. -func (b *Bucket) ForwardCursor(seek []byte, opts ...kv.CursorOption) (kv.ForwardCursor, error) { - return b.ForwardCursorFn(seek, opts...), nil -} - -var _ (kv.Cursor) = (*Cursor)(nil) - -// Cursor is an abstraction for iterating/ranging through data. A concrete implementation -// of a cursor can be found in cursor.go. -type Cursor struct { - SeekFn func(prefix []byte) (k []byte, v []byte) - FirstFn func() (k []byte, v []byte) - LastFn func() (k []byte, v []byte) - NextFn func() (k []byte, v []byte) - PrevFn func() (k []byte, v []byte) -} - -// Seek moves the cursor forward until reaching prefix in the key name. -func (c *Cursor) Seek(prefix []byte) (k []byte, v []byte) { - return c.SeekFn(prefix) -} - -// First moves the cursor to the first key in the bucket. -func (c *Cursor) First() (k []byte, v []byte) { - return c.FirstFn() -} - -// Last moves the cursor to the last key in the bucket. -func (c *Cursor) Last() (k []byte, v []byte) { - return c.LastFn() -} - -// Next moves the cursor to the next key in the bucket. -func (c *Cursor) Next() (k []byte, v []byte) { - return c.NextFn() -} - -// Prev moves the cursor to the prev key in the bucket. -func (c *Cursor) Prev() (k []byte, v []byte) { - return c.PrevFn() -} diff --git a/mock/label_service.go b/mock/label_service.go deleted file mode 100644 index 9aed8fac0f9..00000000000 --- a/mock/label_service.go +++ /dev/null @@ -1,99 +0,0 @@ -package mock - -import ( - "context" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ platform.LabelService = &LabelService{} - -// LabelService is a mock implementation of platform.LabelService -type LabelService struct { - CreateLabelFn func(context.Context, *platform.Label) error - CreateLabelCalls SafeCount - DeleteLabelFn func(context.Context, platform2.ID) error - DeleteLabelCalls SafeCount - FindLabelByIDFn func(ctx context.Context, id platform2.ID) (*platform.Label, error) - FindLabelByIDCalls SafeCount - FindLabelsFn func(context.Context, platform.LabelFilter) ([]*platform.Label, error) - FindLabelsCalls SafeCount - FindResourceLabelsFn func(context.Context, platform.LabelMappingFilter) ([]*platform.Label, error) - FindResourceLabelsCalls SafeCount - UpdateLabelFn func(context.Context, platform2.ID, platform.LabelUpdate) (*platform.Label, error) - UpdateLabelCalls SafeCount - CreateLabelMappingFn func(context.Context, *platform.LabelMapping) error - CreateLabelMappingCalls SafeCount - DeleteLabelMappingFn func(context.Context, *platform.LabelMapping) error - DeleteLabelMappingCalls SafeCount -} - -// NewLabelService returns a mock of LabelService -// where its methods will return zero values. -func NewLabelService() *LabelService { - return &LabelService{ - FindLabelByIDFn: func(ctx context.Context, id platform2.ID) (*platform.Label, error) { - return nil, nil - }, - FindLabelsFn: func(context.Context, platform.LabelFilter) ([]*platform.Label, error) { - return nil, nil - }, - FindResourceLabelsFn: func(context.Context, platform.LabelMappingFilter) ([]*platform.Label, error) { - return []*platform.Label{}, nil - }, - CreateLabelFn: func(context.Context, *platform.Label) error { return nil }, - CreateLabelMappingFn: func(context.Context, *platform.LabelMapping) error { return nil }, - UpdateLabelFn: func(context.Context, platform2.ID, platform.LabelUpdate) (*platform.Label, error) { return nil, nil }, - DeleteLabelFn: func(context.Context, platform2.ID) error { return nil }, - DeleteLabelMappingFn: func(context.Context, *platform.LabelMapping) error { return nil }, - } -} - -// FindLabelByID finds mappings by their ID -func (s *LabelService) FindLabelByID(ctx context.Context, id platform2.ID) (*platform.Label, error) { - defer s.FindLabelByIDCalls.IncrFn()() - return s.FindLabelByIDFn(ctx, id) -} - -// FindLabels finds mappings that match a given filter. -func (s *LabelService) FindLabels(ctx context.Context, filter platform.LabelFilter, opt ...platform.FindOptions) ([]*platform.Label, error) { - defer s.FindLabelsCalls.IncrFn()() - return s.FindLabelsFn(ctx, filter) -} - -// FindResourceLabels finds mappings that match a given filter. -func (s *LabelService) FindResourceLabels(ctx context.Context, filter platform.LabelMappingFilter) ([]*platform.Label, error) { - defer s.FindResourceLabelsCalls.IncrFn()() - return s.FindResourceLabelsFn(ctx, filter) -} - -// CreateLabel creates a new Label. -func (s *LabelService) CreateLabel(ctx context.Context, l *platform.Label) error { - defer s.CreateLabelCalls.IncrFn()() - return s.CreateLabelFn(ctx, l) -} - -// CreateLabelMapping creates a new Label mapping. -func (s *LabelService) CreateLabelMapping(ctx context.Context, m *platform.LabelMapping) error { - defer s.CreateLabelMappingCalls.IncrFn()() - return s.CreateLabelMappingFn(ctx, m) -} - -// UpdateLabel updates a label. -func (s *LabelService) UpdateLabel(ctx context.Context, id platform2.ID, upd platform.LabelUpdate) (*platform.Label, error) { - defer s.UpdateLabelCalls.IncrFn()() - return s.UpdateLabelFn(ctx, id, upd) -} - -// DeleteLabel removes a Label. -func (s *LabelService) DeleteLabel(ctx context.Context, id platform2.ID) error { - defer s.DeleteLabelCalls.IncrFn()() - return s.DeleteLabelFn(ctx, id) -} - -// DeleteLabelMapping removes a Label mapping. -func (s *LabelService) DeleteLabelMapping(ctx context.Context, m *platform.LabelMapping) error { - defer s.DeleteLabelMappingCalls.IncrFn()() - return s.DeleteLabelMappingFn(ctx, m) -} diff --git a/mock/lookup_service.go b/mock/lookup_service.go deleted file mode 100644 index cd9f6c64691..00000000000 --- a/mock/lookup_service.go +++ /dev/null @@ -1,27 +0,0 @@ -package mock - -import ( - "context" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -// LookupService provides field lookup for the resource and ID. -type LookupService struct { - NameFn func(ctx context.Context, resource platform.ResourceType, id platform2.ID) (string, error) -} - -// NewLookupService returns a mock of LookupService where its methods will return zero values. -func NewLookupService() *LookupService { - return &LookupService{ - NameFn: func(ctx context.Context, resource platform.ResourceType, id platform2.ID) (string, error) { - return "", nil - }, - } -} - -// FindResourceName returns the name for the resource and ID. -func (s *LookupService) FindResourceName(ctx context.Context, resource platform.ResourceType, id platform2.ID) (string, error) { - return s.NameFn(ctx, resource, id) -} diff --git a/mock/notebook_service.go b/mock/notebook_service.go deleted file mode 100644 index 63ac2f368d8..00000000000 --- a/mock/notebook_service.go +++ /dev/null @@ -1,111 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: notebook.go - -// Package mock is a generated GoMock package. -package mock - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - influxdb "github.com/influxdata/influxdb/v2" - platform "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockNotebookService is a mock of NotebookService interface. -type MockNotebookService struct { - ctrl *gomock.Controller - recorder *MockNotebookServiceMockRecorder -} - -// MockNotebookServiceMockRecorder is the mock recorder for MockNotebookService. -type MockNotebookServiceMockRecorder struct { - mock *MockNotebookService -} - -// NewMockNotebookService creates a new mock instance. -func NewMockNotebookService(ctrl *gomock.Controller) *MockNotebookService { - mock := &MockNotebookService{ctrl: ctrl} - mock.recorder = &MockNotebookServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockNotebookService) EXPECT() *MockNotebookServiceMockRecorder { - return m.recorder -} - -// CreateNotebook mocks base method. -func (m *MockNotebookService) CreateNotebook(ctx context.Context, create *influxdb.NotebookReqBody) (*influxdb.Notebook, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateNotebook", ctx, create) - ret0, _ := ret[0].(*influxdb.Notebook) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateNotebook indicates an expected call of CreateNotebook. -func (mr *MockNotebookServiceMockRecorder) CreateNotebook(ctx, create interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNotebook", reflect.TypeOf((*MockNotebookService)(nil).CreateNotebook), ctx, create) -} - -// DeleteNotebook mocks base method. -func (m *MockNotebookService) DeleteNotebook(ctx context.Context, id platform.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteNotebook", ctx, id) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteNotebook indicates an expected call of DeleteNotebook. -func (mr *MockNotebookServiceMockRecorder) DeleteNotebook(ctx, id interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNotebook", reflect.TypeOf((*MockNotebookService)(nil).DeleteNotebook), ctx, id) -} - -// GetNotebook mocks base method. -func (m *MockNotebookService) GetNotebook(ctx context.Context, id platform.ID) (*influxdb.Notebook, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetNotebook", ctx, id) - ret0, _ := ret[0].(*influxdb.Notebook) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetNotebook indicates an expected call of GetNotebook. -func (mr *MockNotebookServiceMockRecorder) GetNotebook(ctx, id interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNotebook", reflect.TypeOf((*MockNotebookService)(nil).GetNotebook), ctx, id) -} - -// ListNotebooks mocks base method. -func (m *MockNotebookService) ListNotebooks(ctx context.Context, filter influxdb.NotebookListFilter) ([]*influxdb.Notebook, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListNotebooks", ctx, filter) - ret0, _ := ret[0].([]*influxdb.Notebook) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListNotebooks indicates an expected call of ListNotebooks. -func (mr *MockNotebookServiceMockRecorder) ListNotebooks(ctx, filter interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNotebooks", reflect.TypeOf((*MockNotebookService)(nil).ListNotebooks), ctx, filter) -} - -// UpdateNotebook mocks base method. -func (m *MockNotebookService) UpdateNotebook(ctx context.Context, id platform.ID, update *influxdb.NotebookReqBody) (*influxdb.Notebook, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateNotebook", ctx, id, update) - ret0, _ := ret[0].(*influxdb.Notebook) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateNotebook indicates an expected call of UpdateNotebook. -func (mr *MockNotebookServiceMockRecorder) UpdateNotebook(ctx, id, update interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNotebook", reflect.TypeOf((*MockNotebookService)(nil).UpdateNotebook), ctx, id, update) -} diff --git a/mock/notification_endpoint_service.go b/mock/notification_endpoint_service.go deleted file mode 100644 index c35a3d61c6d..00000000000 --- a/mock/notification_endpoint_service.go +++ /dev/null @@ -1,92 +0,0 @@ -package mock - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.NotificationEndpointService = &NotificationEndpointService{} - -// NotificationEndpointService represents a service for managing notification rule data. -type NotificationEndpointService struct { - *OrganizationService - *UserResourceMappingService - FindNotificationEndpointByIDF func(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) - FindNotificationEndpointByIDCalls SafeCount - FindNotificationEndpointsF func(ctx context.Context, filter influxdb.NotificationEndpointFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) - FindNotificationEndpointsCalls SafeCount - CreateNotificationEndpointF func(ctx context.Context, nr influxdb.NotificationEndpoint, userID platform.ID) error - CreateNotificationEndpointCalls SafeCount - UpdateNotificationEndpointF func(ctx context.Context, id platform.ID, nr influxdb.NotificationEndpoint, userID platform.ID) (influxdb.NotificationEndpoint, error) - UpdateNotificationEndpointCalls SafeCount - PatchNotificationEndpointF func(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpointUpdate) (influxdb.NotificationEndpoint, error) - PatchNotificationEndpointCalls SafeCount - DeleteNotificationEndpointF func(ctx context.Context, id platform.ID) ([]influxdb.SecretField, platform.ID, error) - DeleteNotificationEndpointCalls SafeCount -} - -func NewNotificationEndpointService() *NotificationEndpointService { - return &NotificationEndpointService{ - OrganizationService: NewOrganizationService(), - UserResourceMappingService: NewUserResourceMappingService(), - FindNotificationEndpointByIDF: func(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - return nil, nil - }, - FindNotificationEndpointsF: func(ctx context.Context, filter influxdb.NotificationEndpointFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) { - return nil, 0, nil - }, - CreateNotificationEndpointF: func(ctx context.Context, nr influxdb.NotificationEndpoint, userID platform.ID) error { - return nil - }, - UpdateNotificationEndpointF: func(ctx context.Context, id platform.ID, nr influxdb.NotificationEndpoint, userID platform.ID) (influxdb.NotificationEndpoint, error) { - return nil, nil - }, - PatchNotificationEndpointF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpointUpdate) (influxdb.NotificationEndpoint, error) { - return nil, nil - }, - DeleteNotificationEndpointF: func(ctx context.Context, id platform.ID) ([]influxdb.SecretField, platform.ID, error) { - return nil, 0, nil - }, - } -} - -// FindNotificationEndpointByID returns a single telegraf config by ID. -func (s *NotificationEndpointService) FindNotificationEndpointByID(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - defer s.FindNotificationEndpointByIDCalls.IncrFn()() - return s.FindNotificationEndpointByIDF(ctx, id) -} - -// FindNotificationEndpoints returns a list of notification rules that match filter and the total count of matching notification rules. -// Additional options provide pagination & sorting. -func (s *NotificationEndpointService) FindNotificationEndpoints(ctx context.Context, filter influxdb.NotificationEndpointFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) { - defer s.FindNotificationEndpointsCalls.IncrFn()() - return s.FindNotificationEndpointsF(ctx, filter, opt...) -} - -// CreateNotificationEndpoint creates a new notification rule and sets ID with the new identifier. -func (s *NotificationEndpointService) CreateNotificationEndpoint(ctx context.Context, nr influxdb.NotificationEndpoint, userID platform.ID) error { - defer s.CreateNotificationEndpointCalls.IncrFn()() - return s.CreateNotificationEndpointF(ctx, nr, userID) -} - -// UpdateNotificationEndpoint updates a single notification rule. -// Returns the new notification rule after update. -func (s *NotificationEndpointService) UpdateNotificationEndpoint(ctx context.Context, id platform.ID, nr influxdb.NotificationEndpoint, userID platform.ID) (influxdb.NotificationEndpoint, error) { - defer s.UpdateNotificationEndpointCalls.IncrFn()() - return s.UpdateNotificationEndpointF(ctx, id, nr, userID) -} - -// PatchNotificationEndpoint updates a single notification rule with changeset. -// Returns the new notification rule after update. -func (s *NotificationEndpointService) PatchNotificationEndpoint(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpointUpdate) (influxdb.NotificationEndpoint, error) { - defer s.PatchNotificationEndpointCalls.IncrFn()() - return s.PatchNotificationEndpointF(ctx, id, upd) -} - -// DeleteNotificationEndpoint removes a notification rule by ID. -func (s *NotificationEndpointService) DeleteNotificationEndpoint(ctx context.Context, id platform.ID) ([]influxdb.SecretField, platform.ID, error) { - defer s.DeleteNotificationEndpointCalls.IncrFn()() - return s.DeleteNotificationEndpointF(ctx, id) -} diff --git a/mock/notification_rule_store.go b/mock/notification_rule_store.go deleted file mode 100644 index 277b5a90a14..00000000000 --- a/mock/notification_rule_store.go +++ /dev/null @@ -1,93 +0,0 @@ -package mock - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.NotificationRuleStore = &NotificationRuleStore{} - -// NotificationRuleStore represents a service for managing notification rule data. -type NotificationRuleStore struct { - *OrganizationService - *UserResourceMappingService - FindNotificationRuleByIDF func(ctx context.Context, id platform.ID) (influxdb.NotificationRule, error) - FindNotificationRuleByIDCalls SafeCount - FindNotificationRulesF func(ctx context.Context, filter influxdb.NotificationRuleFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationRule, int, error) - FindNotificationRulesCalls SafeCount - CreateNotificationRuleF func(ctx context.Context, nr influxdb.NotificationRuleCreate, userID platform.ID) error - CreateNotificationRuleCalls SafeCount - UpdateNotificationRuleF func(ctx context.Context, id platform.ID, nr influxdb.NotificationRuleCreate, userID platform.ID) (influxdb.NotificationRule, error) - UpdateNotificationRuleCalls SafeCount - PatchNotificationRuleF func(ctx context.Context, id platform.ID, upd influxdb.NotificationRuleUpdate) (influxdb.NotificationRule, error) - PatchNotificationRuleCalls SafeCount - DeleteNotificationRuleF func(ctx context.Context, id platform.ID) error - DeleteNotificationRuleCalls SafeCount -} - -// NewNotificationRuleStore creats a fake notification rules tore. -func NewNotificationRuleStore() *NotificationRuleStore { - return &NotificationRuleStore{ - OrganizationService: NewOrganizationService(), - UserResourceMappingService: NewUserResourceMappingService(), - FindNotificationRuleByIDF: func(ctx context.Context, id platform.ID) (influxdb.NotificationRule, error) { - return nil, nil - }, - FindNotificationRulesF: func(ctx context.Context, f influxdb.NotificationRuleFilter, _ ...influxdb.FindOptions) ([]influxdb.NotificationRule, int, error) { - return nil, 0, nil - }, - CreateNotificationRuleF: func(ctx context.Context, nr influxdb.NotificationRuleCreate, userID platform.ID) error { - return nil - }, - UpdateNotificationRuleF: func(ctx context.Context, id platform.ID, nr influxdb.NotificationRuleCreate, userID platform.ID) (influxdb.NotificationRule, error) { - return nil, nil - }, - PatchNotificationRuleF: func(ctx context.Context, id platform.ID, upd influxdb.NotificationRuleUpdate) (influxdb.NotificationRule, error) { - return nil, nil - }, - DeleteNotificationRuleF: func(ctx context.Context, id platform.ID) error { - return nil - }, - } -} - -// FindNotificationRuleByID returns a single telegraf config by ID. -func (s *NotificationRuleStore) FindNotificationRuleByID(ctx context.Context, id platform.ID) (influxdb.NotificationRule, error) { - defer s.FindNotificationRuleByIDCalls.IncrFn()() - return s.FindNotificationRuleByIDF(ctx, id) -} - -// FindNotificationRules returns a list of notification rules that match filter and the total count of matching notification rules. -// Additional options provide pagination & sorting. -func (s *NotificationRuleStore) FindNotificationRules(ctx context.Context, filter influxdb.NotificationRuleFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationRule, int, error) { - defer s.FindNotificationRulesCalls.IncrFn()() - return s.FindNotificationRulesF(ctx, filter, opt...) -} - -// CreateNotificationRule creates a new notification rule and sets ID with the new identifier. -func (s *NotificationRuleStore) CreateNotificationRule(ctx context.Context, nr influxdb.NotificationRuleCreate, userID platform.ID) error { - defer s.CreateNotificationRuleCalls.IncrFn()() - return s.CreateNotificationRuleF(ctx, nr, userID) -} - -// UpdateNotificationRule updates a single notification rule. -// Returns the new notification rule after update. -func (s *NotificationRuleStore) UpdateNotificationRule(ctx context.Context, id platform.ID, nr influxdb.NotificationRuleCreate, userID platform.ID) (influxdb.NotificationRule, error) { - defer s.UpdateNotificationRuleCalls.IncrFn()() - return s.UpdateNotificationRuleF(ctx, id, nr, userID) -} - -// PatchNotificationRule updates a single notification rule with changeset. -// Returns the new notification rule after update. -func (s *NotificationRuleStore) PatchNotificationRule(ctx context.Context, id platform.ID, upd influxdb.NotificationRuleUpdate) (influxdb.NotificationRule, error) { - defer s.PatchNotificationRuleCalls.IncrFn()() - return s.PatchNotificationRuleF(ctx, id, upd) -} - -// DeleteNotificationRule removes a notification rule by ID. -func (s *NotificationRuleStore) DeleteNotificationRule(ctx context.Context, id platform.ID) error { - defer s.DeleteNotificationRuleCalls.IncrFn()() - return s.DeleteNotificationRuleF(ctx, id) -} diff --git a/mock/onboarding_service.go b/mock/onboarding_service.go deleted file mode 100644 index 91244334246..00000000000 --- a/mock/onboarding_service.go +++ /dev/null @@ -1,45 +0,0 @@ -package mock - -import ( - "context" - - platform "github.com/influxdata/influxdb/v2" -) - -var _ platform.OnboardingService = (*OnboardingService)(nil) - -// OnboardingService is a mock implementation of platform.OnboardingService. -type OnboardingService struct { - PasswordsService - BucketService - OrganizationService - UserService - AuthorizationService - - IsOnboardingFn func(context.Context) (bool, error) - OnboardInitialUserFn func(context.Context, *platform.OnboardingRequest) (*platform.OnboardingResults, error) - OnboardUserFn func(context.Context, *platform.OnboardingRequest) (*platform.OnboardingResults, error) -} - -// NewOnboardingService returns a mock of OnboardingService where its methods will return zero values. -func NewOnboardingService() *OnboardingService { - return &OnboardingService{ - IsOnboardingFn: func(context.Context) (bool, error) { return false, nil }, - OnboardInitialUserFn: func(context.Context, *platform.OnboardingRequest) (*platform.OnboardingResults, error) { - return nil, nil - }, - OnboardUserFn: func(context.Context, *platform.OnboardingRequest) (*platform.OnboardingResults, error) { - return nil, nil - }, - } -} - -// IsOnboarding determine if onboarding request is allowed. -func (s *OnboardingService) IsOnboarding(ctx context.Context) (bool, error) { - return s.IsOnboardingFn(ctx) -} - -// OnboardInitialUser OnboardingResults. -func (s *OnboardingService) OnboardInitialUser(ctx context.Context, req *platform.OnboardingRequest) (*platform.OnboardingResults, error) { - return s.OnboardInitialUserFn(ctx, req) -} diff --git a/mock/operation_log_service.go b/mock/operation_log_service.go deleted file mode 100644 index 12502c05418..00000000000 --- a/mock/operation_log_service.go +++ /dev/null @@ -1,89 +0,0 @@ -package mock - -import ( - "context" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ platform.BucketOperationLogService = (*BucketOperationLogService)(nil) -var _ platform.DashboardOperationLogService = (*DashboardOperationLogService)(nil) -var _ platform.OrganizationOperationLogService = (*OrganizationOperationLogService)(nil) -var _ platform.UserOperationLogService = (*UserOperationLogService)(nil) - -// NewBucketOperationLogService returns a mock of BucketOperationLogService. -func NewBucketOperationLogService() *BucketOperationLogService { - return &BucketOperationLogService{ - GetBucketOperationLogFn: func(context.Context, platform2.ID, platform.FindOptions) ([]*platform.OperationLogEntry, int, error) { - return nil, 0, nil - }, - } -} - -// NewDashboardOperationLogService returns a mock of DashboardOperationLogService. -func NewDashboardOperationLogService() *DashboardOperationLogService { - return &DashboardOperationLogService{ - GetDashboardOperationLogFn: func(context.Context, platform2.ID, platform.FindOptions) ([]*platform.OperationLogEntry, int, error) { - return nil, 0, nil - }, - } -} - -// NewOrganizationOperationLogService returns a mock of OrganizationOperationLogService. -func NewOrganizationOperationLogService() *OrganizationOperationLogService { - return &OrganizationOperationLogService{ - GetOrganizationOperationLogFn: func(context.Context, platform2.ID, platform.FindOptions) ([]*platform.OperationLogEntry, int, error) { - return nil, 0, nil - }, - } -} - -// NewUserOperationLogService returns a mock of UserOperationLogService. -func NewUserOperationLogService() *UserOperationLogService { - return &UserOperationLogService{ - GetUserOperationLogFn: func(context.Context, platform2.ID, platform.FindOptions) ([]*platform.OperationLogEntry, int, error) { - return nil, 0, nil - }, - } -} - -// BucketOperationLogService is a mock implementation of platform.BucketOperationLogService. -type BucketOperationLogService struct { - GetBucketOperationLogFn func(context.Context, platform2.ID, platform.FindOptions) ([]*platform.OperationLogEntry, int, error) -} - -// DashboardOperationLogService is a mock implementation of platform.DashboardOperationLogService. -type DashboardOperationLogService struct { - GetDashboardOperationLogFn func(context.Context, platform2.ID, platform.FindOptions) ([]*platform.OperationLogEntry, int, error) -} - -// OrganizationOperationLogService is a mock implementation of platform.OrganizationOperationLogService. -type OrganizationOperationLogService struct { - GetOrganizationOperationLogFn func(context.Context, platform2.ID, platform.FindOptions) ([]*platform.OperationLogEntry, int, error) -} - -// UserOperationLogService is a mock implementation of platform.UserOperationLogService. -type UserOperationLogService struct { - GetUserOperationLogFn func(context.Context, platform2.ID, platform.FindOptions) ([]*platform.OperationLogEntry, int, error) -} - -// GetBucketOperationLog retrieves the operation log for the bucket with the provided id. -func (s *BucketOperationLogService) GetBucketOperationLog(ctx context.Context, id platform2.ID, opts platform.FindOptions) ([]*platform.OperationLogEntry, int, error) { - return s.GetBucketOperationLogFn(ctx, id, opts) -} - -// GetDashboardOperationLog retrieves the operation log for the dashboard with the provided id. -func (s *DashboardOperationLogService) GetDashboardOperationLog(ctx context.Context, id platform2.ID, opts platform.FindOptions) ([]*platform.OperationLogEntry, int, error) { - return s.GetDashboardOperationLogFn(ctx, id, opts) -} - -// GetOrganizationOperationLog retrieves the operation log for the org with the provided id. -func (s *OrganizationOperationLogService) GetOrganizationOperationLog(ctx context.Context, id platform2.ID, opts platform.FindOptions) ([]*platform.OperationLogEntry, int, error) { - return s.GetOrganizationOperationLogFn(ctx, id, opts) -} - -// GetUserOperationLog retrieves the operation log for the user with the provided id. -func (s *UserOperationLogService) GetUserOperationLog(ctx context.Context, id platform2.ID, opts platform.FindOptions) ([]*platform.OperationLogEntry, int, error) { - return s.GetUserOperationLogFn(ctx, id, opts) -} diff --git a/mock/org_service.go b/mock/org_service.go deleted file mode 100644 index 3f35bba4b3d..00000000000 --- a/mock/org_service.go +++ /dev/null @@ -1,75 +0,0 @@ -package mock - -import ( - "context" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ platform.OrganizationService = &OrganizationService{} - -// OrganizationService is a mock organization server. -type OrganizationService struct { - FindOrganizationByIDF func(ctx context.Context, id platform2.ID) (*platform.Organization, error) - FindOrganizationF func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) - FindOrganizationsF func(ctx context.Context, filter platform.OrganizationFilter, opt ...platform.FindOptions) ([]*platform.Organization, int, error) - CreateOrganizationF func(ctx context.Context, b *platform.Organization) error - UpdateOrganizationF func(ctx context.Context, id platform2.ID, upd platform.OrganizationUpdate) (*platform.Organization, error) - DeleteOrganizationF func(ctx context.Context, id platform2.ID) error - FindResourceOrganizationIDF func(ctx context.Context, rt platform.ResourceType, id platform2.ID) (platform2.ID, error) -} - -// NewOrganizationService returns a mock OrganizationService where its methods will return -// zero values. -func NewOrganizationService() *OrganizationService { - return &OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform2.ID) (*platform.Organization, error) { return nil, nil }, - FindOrganizationF: func(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { - return nil, nil - }, - FindOrganizationsF: func(ctx context.Context, filter platform.OrganizationFilter, opt ...platform.FindOptions) ([]*platform.Organization, int, error) { - return nil, 0, nil - }, - CreateOrganizationF: func(ctx context.Context, b *platform.Organization) error { return nil }, - UpdateOrganizationF: func(ctx context.Context, id platform2.ID, upd platform.OrganizationUpdate) (*platform.Organization, error) { - return nil, nil - }, - DeleteOrganizationF: func(ctx context.Context, id platform2.ID) error { return nil }, - } -} - -// FindOrganizationByID calls FindOrganizationByIDF. -func (s *OrganizationService) FindOrganizationByID(ctx context.Context, id platform2.ID) (*platform.Organization, error) { - return s.FindOrganizationByIDF(ctx, id) -} - -// FindOrganization calls FindOrganizationF. -func (s *OrganizationService) FindOrganization(ctx context.Context, filter platform.OrganizationFilter) (*platform.Organization, error) { - return s.FindOrganizationF(ctx, filter) -} - -// FindOrganizations calls FindOrganizationsF. -func (s *OrganizationService) FindOrganizations(ctx context.Context, filter platform.OrganizationFilter, opt ...platform.FindOptions) ([]*platform.Organization, int, error) { - return s.FindOrganizationsF(ctx, filter, opt...) -} - -// CreateOrganization calls CreateOrganizationF. -func (s *OrganizationService) CreateOrganization(ctx context.Context, b *platform.Organization) error { - return s.CreateOrganizationF(ctx, b) -} - -// UpdateOrganization calls UpdateOrganizationF. -func (s *OrganizationService) UpdateOrganization(ctx context.Context, id platform2.ID, upd platform.OrganizationUpdate) (*platform.Organization, error) { - return s.UpdateOrganizationF(ctx, id, upd) -} - -// DeleteOrganization calls DeleteOrganizationF. -func (s *OrganizationService) DeleteOrganization(ctx context.Context, id platform2.ID) error { - return s.DeleteOrganizationF(ctx, id) -} - -// FindResourceOrganizationID calls FindResourceOrganizationIDF. -func (s *OrganizationService) FindResourceOrganizationID(ctx context.Context, rt platform.ResourceType, id platform2.ID) (platform2.ID, error) { - return s.FindResourceOrganizationIDF(ctx, rt, id) -} diff --git a/mock/paging.go b/mock/paging.go deleted file mode 100644 index b7754382fbe..00000000000 --- a/mock/paging.go +++ /dev/null @@ -1,13 +0,0 @@ -package mock - -type PagingFilter struct { - Name string - Type []string -} - -func (f PagingFilter) QueryParams() map[string][]string { - qp := map[string][]string{} - qp["name"] = []string{f.Name} - qp["type"] = f.Type - return qp -} diff --git a/mock/passwords.go b/mock/passwords.go deleted file mode 100644 index 552fc19481e..00000000000 --- a/mock/passwords.go +++ /dev/null @@ -1,41 +0,0 @@ -package mock - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// PasswordsService is a mock implementation of a retention.PasswordsService, which -// also makes it a suitable mock to use wherever an platform.PasswordsService is required. -type PasswordsService struct { - SetPasswordFn func(context.Context, platform.ID, string) error - ComparePasswordFn func(context.Context, platform.ID, string) error - CompareAndSetPasswordFn func(context.Context, platform.ID, string, string) error -} - -// NewPasswordsService returns a mock PasswordsService where its methods will return -// zero values. -func NewPasswordsService() *PasswordsService { - return &PasswordsService{ - SetPasswordFn: func(context.Context, platform.ID, string) error { return fmt.Errorf("mock error") }, - ComparePasswordFn: func(context.Context, platform.ID, string) error { return fmt.Errorf("mock error") }, - CompareAndSetPasswordFn: func(context.Context, platform.ID, string, string) error { return fmt.Errorf("mock error") }, - } -} - -// SetPassword sets the users current password to be the provided password. -func (s *PasswordsService) SetPassword(ctx context.Context, userID platform.ID, password string) error { - return s.SetPasswordFn(ctx, userID, password) -} - -// ComparePassword password compares the provided password. -func (s *PasswordsService) ComparePassword(ctx context.Context, userID platform.ID, password string) error { - return s.ComparePasswordFn(ctx, userID, password) -} - -// CompareAndSetPassword compares the provided password and sets it to the new password. -func (s *PasswordsService) CompareAndSetPassword(ctx context.Context, userID platform.ID, old string, new string) error { - return s.CompareAndSetPasswordFn(ctx, userID, old, new) -} diff --git a/mock/passwords_service.go b/mock/passwords_service.go deleted file mode 100644 index 512deccc910..00000000000 --- a/mock/passwords_service.go +++ /dev/null @@ -1,78 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2 (interfaces: PasswordsService) - -// Package mock is a generated GoMock package. -package mock - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockPasswordsService is a mock of PasswordsService interface -type MockPasswordsService struct { - ctrl *gomock.Controller - recorder *MockPasswordsServiceMockRecorder -} - -// MockPasswordsServiceMockRecorder is the mock recorder for MockPasswordsService -type MockPasswordsServiceMockRecorder struct { - mock *MockPasswordsService -} - -// NewMockPasswordsService creates a new mock instance -func NewMockPasswordsService(ctrl *gomock.Controller) *MockPasswordsService { - mock := &MockPasswordsService{ctrl: ctrl} - mock.recorder = &MockPasswordsServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockPasswordsService) EXPECT() *MockPasswordsServiceMockRecorder { - return m.recorder -} - -// CompareAndSetPassword mocks base method -func (m *MockPasswordsService) CompareAndSetPassword(arg0 context.Context, arg1 platform.ID, arg2, arg3 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CompareAndSetPassword", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(error) - return ret0 -} - -// CompareAndSetPassword indicates an expected call of CompareAndSetPassword -func (mr *MockPasswordsServiceMockRecorder) CompareAndSetPassword(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompareAndSetPassword", reflect.TypeOf((*MockPasswordsService)(nil).CompareAndSetPassword), arg0, arg1, arg2, arg3) -} - -// ComparePassword mocks base method -func (m *MockPasswordsService) ComparePassword(arg0 context.Context, arg1 platform.ID, arg2 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ComparePassword", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// ComparePassword indicates an expected call of ComparePassword -func (mr *MockPasswordsServiceMockRecorder) ComparePassword(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ComparePassword", reflect.TypeOf((*MockPasswordsService)(nil).ComparePassword), arg0, arg1, arg2) -} - -// SetPassword mocks base method -func (m *MockPasswordsService) SetPassword(arg0 context.Context, arg1 platform.ID, arg2 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetPassword", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetPassword indicates an expected call of SetPassword -func (mr *MockPasswordsServiceMockRecorder) SetPassword(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPassword", reflect.TypeOf((*MockPasswordsService)(nil).SetPassword), arg0, arg1, arg2) -} diff --git a/mock/points_writer.go b/mock/points_writer.go deleted file mode 100644 index b2765c76926..00000000000 --- a/mock/points_writer.go +++ /dev/null @@ -1,62 +0,0 @@ -package mock - -import ( - "context" - "sync" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/models" -) - -// PointsWriter is a mock structure for writing points. -type PointsWriter struct { - timesWriteCalled int - mu sync.RWMutex - Points []models.Point - Err error - - WritePointsFn func(ctx context.Context, orgID platform.ID, bucketID platform.ID, points []models.Point) error -} - -// ForceError is for error testing, if WritePoints is called after ForceError, it will return that error. -func (p *PointsWriter) ForceError(err error) { - p.mu.Lock() - p.Err = err - p.mu.Unlock() -} - -// WritePoints writes points to the PointsWriter that will be exposed in the Values. -func (p *PointsWriter) WritePoints(ctx context.Context, orgID platform.ID, bucketID platform.ID, points []models.Point) error { - if p.WritePointsFn != nil { - return p.WritePointsFn(ctx, orgID, bucketID, points) - } - - p.mu.Lock() - p.timesWriteCalled++ - p.Points = append(p.Points, points...) - err := p.Err - p.mu.Unlock() - return err -} - -// Next returns the next (oldest) batch of values. -func (p *PointsWriter) Next() models.Point { - var points models.Point - p.mu.RLock() - if len(p.Points) == 0 { - p.mu.RUnlock() - return points - } - p.mu.RUnlock() - - p.mu.Lock() - defer p.mu.Unlock() - points, p.Points = p.Points[0], p.Points[1:] - return points -} - -func (p *PointsWriter) WritePointsCalled() int { - p.mu.Lock() - defer p.mu.Unlock() - return p.timesWriteCalled -} diff --git a/mock/reader.go b/mock/reader.go deleted file mode 100644 index d2587ad5d46..00000000000 --- a/mock/reader.go +++ /dev/null @@ -1,56 +0,0 @@ -package mock - -import ( - "context" - - "github.com/influxdata/flux/memory" - "github.com/influxdata/influxdb/v2/query" -) - -type StorageReader struct { - ReadFilterFn func(ctx context.Context, spec query.ReadFilterSpec, alloc memory.Allocator) (query.TableIterator, error) - ReadGroupFn func(ctx context.Context, spec query.ReadGroupSpec, alloc memory.Allocator) (query.TableIterator, error) - ReadTagKeysFn func(ctx context.Context, spec query.ReadTagKeysSpec, alloc memory.Allocator) (query.TableIterator, error) - ReadTagValuesFn func(ctx context.Context, spec query.ReadTagValuesSpec, alloc memory.Allocator) (query.TableIterator, error) - ReadWindowAggregateFn func(ctx context.Context, spec query.ReadWindowAggregateSpec, alloc memory.Allocator) (query.TableIterator, error) - ReadSeriesCardinalityFn func(ctx context.Context, spec query.ReadSeriesCardinalitySpec, alloc memory.Allocator) (query.TableIterator, error) - SupportReadSeriesCardinalityFn func(ctx context.Context) bool - CloseFn func() -} - -func (s *StorageReader) ReadFilter(ctx context.Context, spec query.ReadFilterSpec, alloc memory.Allocator) (query.TableIterator, error) { - return s.ReadFilterFn(ctx, spec, alloc) -} - -func (s *StorageReader) ReadGroup(ctx context.Context, spec query.ReadGroupSpec, alloc memory.Allocator) (query.TableIterator, error) { - return s.ReadGroupFn(ctx, spec, alloc) -} - -func (s *StorageReader) ReadTagKeys(ctx context.Context, spec query.ReadTagKeysSpec, alloc memory.Allocator) (query.TableIterator, error) { - return s.ReadTagKeysFn(ctx, spec, alloc) -} - -func (s *StorageReader) ReadTagValues(ctx context.Context, spec query.ReadTagValuesSpec, alloc memory.Allocator) (query.TableIterator, error) { - return s.ReadTagValuesFn(ctx, spec, alloc) -} - -func (s *StorageReader) ReadSeriesCardinality(ctx context.Context, spec query.ReadSeriesCardinalitySpec, alloc memory.Allocator) (query.TableIterator, error) { - return s.ReadSeriesCardinalityFn(ctx, spec, alloc) - -} -func (s *StorageReader) SupportReadSeriesCardinality(ctx context.Context) bool { - return s.SupportReadSeriesCardinalityFn(ctx) -} - -func (s *StorageReader) Close() { - // Only invoke the close function if it is set. - // We want this to be a no-op and work without - // explicitly setting up a close function. - if s.CloseFn != nil { - s.CloseFn() - } -} - -func (s *StorageReader) ReadWindowAggregate(ctx context.Context, spec query.ReadWindowAggregateSpec, alloc memory.Allocator) (query.TableIterator, error) { - return s.ReadWindowAggregateFn(ctx, spec, alloc) -} diff --git a/mock/reads_resultset.go b/mock/reads_resultset.go deleted file mode 100644 index 0e32a9eaafc..00000000000 --- a/mock/reads_resultset.go +++ /dev/null @@ -1,245 +0,0 @@ -package mock - -import ( - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/data/gen" - "github.com/influxdata/influxdb/v2/storage/reads" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -type GeneratorResultSet struct { - sg gen.SeriesGenerator - tags models.Tags - max int - count int - f floatTimeValuesGeneratorCursor - i integerTimeValuesGeneratorCursor - u unsignedTimeValuesGeneratorCursor - s stringTimeValuesGeneratorCursor - b booleanTimeValuesGeneratorCursor - cur cursors.Cursor -} - -var _ reads.ResultSet = (*GeneratorResultSet)(nil) - -type GeneratorOptionFn func(*GeneratorResultSet) - -// WithGeneratorMaxValues limits the number of values -// produced by GeneratorResultSet to n. -func WithGeneratorMaxValues(n int) GeneratorOptionFn { - return func(g *GeneratorResultSet) { - g.max = n - } -} - -// NewResultSetFromSeriesGenerator transforms a SeriesGenerator into a ResultSet, -// and therefore may be used anywhere a ResultSet is required. -func NewResultSetFromSeriesGenerator(sg gen.SeriesGenerator, opts ...GeneratorOptionFn) *GeneratorResultSet { - s := &GeneratorResultSet{sg: sg} - - for _, opt := range opts { - opt(s) - } - - s.f.max = s.max - s.i.max = s.max - s.u.max = s.max - s.s.max = s.max - s.b.max = s.max - s.f.count = &s.count - s.i.count = &s.count - s.u.count = &s.count - s.s.count = &s.count - s.b.count = &s.count - - return s -} - -func (g *GeneratorResultSet) Next() bool { - g.tags = g.tags[:0] - remain := g.max - g.count - return g.sg.Next() && (g.max == 0 || remain > 0) -} - -func (g *GeneratorResultSet) Cursor() cursors.Cursor { - switch g.sg.FieldType() { - case models.Float: - g.f.tv = g.sg.TimeValuesGenerator() - g.cur = &g.f - case models.Integer: - g.i.tv = g.sg.TimeValuesGenerator() - g.cur = &g.i - case models.Unsigned: - g.u.tv = g.sg.TimeValuesGenerator() - g.cur = &g.u - case models.String: - g.s.tv = g.sg.TimeValuesGenerator() - g.cur = &g.s - case models.Boolean: - g.b.tv = g.sg.TimeValuesGenerator() - g.cur = &g.b - default: - panic("unreachable") - } - - return g.cur -} - -func copyTags(dst, src models.Tags) models.Tags { - if cap(dst) < src.Len() { - dst = make(models.Tags, src.Len()) - } else { - dst = dst[:src.Len()] - } - copy(dst, src) - return dst -} - -func (g *GeneratorResultSet) Tags() models.Tags { - if len(g.tags) == 0 { - g.tags = copyTags(g.tags, g.sg.Tags()) - g.tags.Set(models.MeasurementTagKeyBytes, g.sg.Name()) - g.tags.Set(models.FieldKeyTagKeyBytes, g.sg.Field()) - } - return g.tags -} -func (g *GeneratorResultSet) Close() {} -func (g *GeneratorResultSet) Err() error { return nil } - -func (g *GeneratorResultSet) Stats() cursors.CursorStats { - var stats cursors.CursorStats - stats.Add(g.f.Stats()) - stats.Add(g.i.Stats()) - stats.Add(g.u.Stats()) - stats.Add(g.s.Stats()) - stats.Add(g.b.Stats()) - return stats -} - -// cursors - -type timeValuesGeneratorCursor struct { - tv gen.TimeValuesSequence - max int - count *int - stats cursors.CursorStats -} - -func (t timeValuesGeneratorCursor) Close() {} -func (t timeValuesGeneratorCursor) Err() error { return nil } -func (t timeValuesGeneratorCursor) Stats() cursors.CursorStats { return t.stats } -func (t *timeValuesGeneratorCursor) add(n int) { *t.count += n } -func (t *timeValuesGeneratorCursor) checkCount() bool { return t.max == 0 || *t.count < t.max } -func (t *timeValuesGeneratorCursor) remain() int { return t.max - *t.count } - -type floatTimeValuesGeneratorCursor struct { - timeValuesGeneratorCursor - a cursors.FloatArray -} - -func (c *floatTimeValuesGeneratorCursor) Next() *cursors.FloatArray { - if c.checkCount() && c.tv.Next() { - c.tv.Values().(gen.FloatValues).Copy(&c.a) - if remain := c.remain(); c.max > 0 && remain < c.a.Len() { - c.a.Timestamps = c.a.Timestamps[:remain] - c.a.Values = c.a.Values[:remain] - } - c.stats.ScannedBytes += len(c.a.Values) * 8 - c.stats.ScannedValues += c.a.Len() - c.add(c.a.Len()) - } else { - c.a.Timestamps = c.a.Timestamps[:0] - c.a.Values = c.a.Values[:0] - } - return &c.a -} - -type integerTimeValuesGeneratorCursor struct { - timeValuesGeneratorCursor - a cursors.IntegerArray -} - -func (c *integerTimeValuesGeneratorCursor) Next() *cursors.IntegerArray { - if c.checkCount() && c.tv.Next() { - c.tv.Values().(gen.IntegerValues).Copy(&c.a) - if remain := c.remain(); c.max > 0 && remain < c.a.Len() { - c.a.Timestamps = c.a.Timestamps[:remain] - c.a.Values = c.a.Values[:remain] - } - c.stats.ScannedBytes += len(c.a.Values) * 8 - c.stats.ScannedValues += c.a.Len() - c.add(c.a.Len()) - } else { - c.a.Timestamps = c.a.Timestamps[:0] - c.a.Values = c.a.Values[:0] - } - return &c.a -} - -type unsignedTimeValuesGeneratorCursor struct { - timeValuesGeneratorCursor - a cursors.UnsignedArray -} - -func (c *unsignedTimeValuesGeneratorCursor) Next() *cursors.UnsignedArray { - if c.checkCount() && c.tv.Next() { - c.tv.Values().(gen.UnsignedValues).Copy(&c.a) - if remain := c.remain(); c.max > 0 && remain < c.a.Len() { - c.a.Timestamps = c.a.Timestamps[:remain] - c.a.Values = c.a.Values[:remain] - } - c.stats.ScannedBytes += len(c.a.Values) * 8 - c.stats.ScannedValues += c.a.Len() - c.add(c.a.Len()) - } else { - c.a.Timestamps = c.a.Timestamps[:0] - c.a.Values = c.a.Values[:0] - } - return &c.a -} - -type stringTimeValuesGeneratorCursor struct { - timeValuesGeneratorCursor - a cursors.StringArray -} - -func (c *stringTimeValuesGeneratorCursor) Next() *cursors.StringArray { - if c.checkCount() && c.tv.Next() { - c.tv.Values().(gen.StringValues).Copy(&c.a) - if remain := c.remain(); c.max > 0 && remain < c.a.Len() { - c.a.Timestamps = c.a.Timestamps[:remain] - c.a.Values = c.a.Values[:remain] - } - for _, v := range c.a.Values { - c.stats.ScannedBytes += len(v) - } - c.stats.ScannedValues += c.a.Len() - c.add(c.a.Len()) - } else { - c.a.Timestamps = c.a.Timestamps[:0] - c.a.Values = c.a.Values[:0] - } - return &c.a -} - -type booleanTimeValuesGeneratorCursor struct { - timeValuesGeneratorCursor - a cursors.BooleanArray -} - -func (c *booleanTimeValuesGeneratorCursor) Next() *cursors.BooleanArray { - if c.checkCount() && c.tv.Next() { - c.tv.Values().(gen.BooleanValues).Copy(&c.a) - if remain := c.remain(); c.max > 0 && remain < c.a.Len() { - c.a.Timestamps = c.a.Timestamps[:remain] - c.a.Values = c.a.Values[:remain] - } - c.stats.ScannedBytes += len(c.a.Values) - c.stats.ScannedValues += c.a.Len() - c.add(c.a.Len()) - } else { - c.a.Timestamps = c.a.Timestamps[:0] - c.a.Values = c.a.Values[:0] - } - return &c.a -} diff --git a/mock/reads_resultset_test.go b/mock/reads_resultset_test.go deleted file mode 100644 index 1d385b4ee2a..00000000000 --- a/mock/reads_resultset_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package mock_test - -import ( - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/pkg/data/gen" - "github.com/influxdata/influxdb/v2/storage/reads" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -func mustNewSpecFromToml(tb testing.TB, toml string) *gen.Spec { - tb.Helper() - - spec, err := gen.NewSpecFromToml(toml) - if err != nil { - panic(err) - } - - return spec -} - -func TestNewResultSetFromSeriesGenerator(t *testing.T) { - checkResult := func(t *testing.T, rs reads.ResultSet, expData string, expStats cursors.CursorStats) { - t.Helper() - - var sb strings.Builder - err := reads.ResultSetToLineProtocol(&sb, rs) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - if got, exp := sb.String(), expData; !cmp.Equal(got, exp) { - t.Errorf("unexpected value -got/+exp\n%s", cmp.Diff(got, exp)) - } - - if got, exp := rs.Stats(), expStats; !cmp.Equal(got, exp) { - t.Errorf("unexpected value -got/+exp\n%s", cmp.Diff(got, exp)) - } - } - - t.Run("float", func(t *testing.T) { - spec := mustNewSpecFromToml(t, ` -[[measurements]] -name = "m0" -sample = 1.0 -tags = [ - { name = "tag0", source = { type = "sequence", start = 0, count = 3 } }, - { name = "tag1", source = { type = "sequence", start = 0, count = 2 } }, -] -fields = [ - { name = "v0", count = 3, source = 1.0 }, -]`) - - sg := gen.NewSeriesGeneratorFromSpec(spec, gen.TimeRange{ - Start: time.Unix(1000, 0), - End: time.Unix(2000, 0), - }) - const expData = `m0,tag0=value0,tag1=value0 v0=1 1000000000000 -m0,tag0=value0,tag1=value0 v0=1 1333333000000 -m0,tag0=value0,tag1=value0 v0=1 1666666000000 -m0,tag0=value0,tag1=value1 v0=1 1000000000000 -m0,tag0=value0,tag1=value1 v0=1 1333333000000 -m0,tag0=value0,tag1=value1 v0=1 1666666000000 -m0,tag0=value1,tag1=value0 v0=1 1000000000000 -m0,tag0=value1,tag1=value0 v0=1 1333333000000 -m0,tag0=value1,tag1=value0 v0=1 1666666000000 -m0,tag0=value1,tag1=value1 v0=1 1000000000000 -m0,tag0=value1,tag1=value1 v0=1 1333333000000 -m0,tag0=value1,tag1=value1 v0=1 1666666000000 -m0,tag0=value2,tag1=value0 v0=1 1000000000000 -m0,tag0=value2,tag1=value0 v0=1 1333333000000 -m0,tag0=value2,tag1=value0 v0=1 1666666000000 -m0,tag0=value2,tag1=value1 v0=1 1000000000000 -m0,tag0=value2,tag1=value1 v0=1 1333333000000 -m0,tag0=value2,tag1=value1 v0=1 1666666000000 -` - expStats := cursors.CursorStats{ScannedValues: 18, ScannedBytes: 18 * 8} - checkResult(t, mock.NewResultSetFromSeriesGenerator(sg), expData, expStats) - }) - - t.Run("max", func(t *testing.T) { - spec := mustNewSpecFromToml(t, ` -[[measurements]] -name = "m0" -sample = 1.0 -tags = [ - { name = "tag0", source = { type = "sequence", start = 0, count = 3 } }, - { name = "tag1", source = { type = "sequence", start = 0, count = 2 } }, -] -fields = [ - { name = "v0", count = 3, source = 1.0 }, -]`) - - sg := gen.NewSeriesGeneratorFromSpec(spec, gen.TimeRange{ - Start: time.Unix(1000, 0), - End: time.Unix(2000, 0), - }) - const expData = `m0,tag0=value0,tag1=value0 v0=1 1000000000000 -m0,tag0=value0,tag1=value0 v0=1 1333333000000 -m0,tag0=value0,tag1=value0 v0=1 1666666000000 -m0,tag0=value0,tag1=value1 v0=1 1000000000000 -m0,tag0=value0,tag1=value1 v0=1 1333333000000 -` - expStats := cursors.CursorStats{ScannedValues: 5, ScannedBytes: 5 * 8} - checkResult(t, mock.NewResultSetFromSeriesGenerator(sg, mock.WithGeneratorMaxValues(5)), expData, expStats) - }) - -} diff --git a/mock/reads_store.go b/mock/reads_store.go deleted file mode 100644 index ea44c3bb631..00000000000 --- a/mock/reads_store.go +++ /dev/null @@ -1,53 +0,0 @@ -package mock - -import ( - "context" - - "github.com/influxdata/influxdb/v2/storage/reads" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/influxdata/influxdb/v2/tsdb/cursors" - "google.golang.org/protobuf/proto" -) - -type ReadsStore struct { - ReadFilterFn func(ctx context.Context, req *datatypes.ReadFilterRequest) (reads.ResultSet, error) - ReadGroupFn func(ctx context.Context, req *datatypes.ReadGroupRequest) (reads.GroupResultSet, error) - WindowAggregateFn func(ctx context.Context, req *datatypes.ReadWindowAggregateRequest) (reads.ResultSet, error) - TagKeysFn func(ctx context.Context, req *datatypes.TagKeysRequest) (cursors.StringIterator, error) - TagValuesFn func(ctx context.Context, req *datatypes.TagValuesRequest) (cursors.StringIterator, error) - ReadSeriesCardinalityFn func(ctx context.Context, req *datatypes.ReadSeriesCardinalityRequest) (cursors.Int64Iterator, error) - SupportReadSeriesCardinalityFn func(ctx context.Context) bool - GetSourceFn func(orgID, bucketID uint64) proto.Message -} - -func (s *ReadsStore) ReadFilter(ctx context.Context, req *datatypes.ReadFilterRequest) (reads.ResultSet, error) { - return s.ReadFilterFn(ctx, req) -} - -func (s *ReadsStore) ReadGroup(ctx context.Context, req *datatypes.ReadGroupRequest) (reads.GroupResultSet, error) { - return s.ReadGroupFn(ctx, req) -} - -func (s *ReadsStore) WindowAggregate(ctx context.Context, req *datatypes.ReadWindowAggregateRequest) (reads.ResultSet, error) { - return s.WindowAggregateFn(ctx, req) -} - -func (s *ReadsStore) TagKeys(ctx context.Context, req *datatypes.TagKeysRequest) (cursors.StringIterator, error) { - return s.TagKeysFn(ctx, req) -} - -func (s *ReadsStore) TagValues(ctx context.Context, req *datatypes.TagValuesRequest) (cursors.StringIterator, error) { - return s.TagValuesFn(ctx, req) -} - -func (s *ReadsStore) ReadSeriesCardinality(ctx context.Context, req *datatypes.ReadSeriesCardinalityRequest) (cursors.Int64Iterator, error) { - return s.ReadSeriesCardinalityFn(ctx, req) -} - -func (s *ReadsStore) SupportReadSeriesCardinality(ctx context.Context) bool { - return s.SupportReadSeriesCardinalityFn(ctx) -} - -func (s *ReadsStore) GetSource(orgID, bucketID uint64) proto.Message { - return s.GetSourceFn(orgID, bucketID) -} diff --git a/mock/retention_service.go b/mock/retention_service.go deleted file mode 100644 index 149d907d877..00000000000 --- a/mock/retention_service.go +++ /dev/null @@ -1,31 +0,0 @@ -package mock - -import ( - "github.com/prometheus/client_golang/prometheus" -) - -type RetentionService struct { - OpenFn func() error - CloseFn func() error - PrometheusCollectorsFn func() []prometheus.Collector -} - -func NewRetentionService() *RetentionService { - return &RetentionService{ - OpenFn: func() error { return nil }, - CloseFn: func() error { return nil }, - PrometheusCollectorsFn: func() []prometheus.Collector { return nil }, - } -} - -func (s *RetentionService) Open() error { - return s.OpenFn() -} - -func (s *RetentionService) Close() error { - return s.CloseFn() -} - -func (s *RetentionService) PrometheusCollectors() []prometheus.Collector { - return s.PrometheusCollectorsFn() -} diff --git a/mock/safe_count.go b/mock/safe_count.go deleted file mode 100644 index 571fd7dad9c..00000000000 --- a/mock/safe_count.go +++ /dev/null @@ -1,36 +0,0 @@ -package mock - -import ( - "sync" -) - -// SafeCount provides a safe counter, useful for call counts to maintain -// thread safety. Removes burden of having to introduce serialization when -// concurrency is brought in. -type SafeCount struct { - mu sync.Mutex - i int -} - -// IncrFn increments the safe counter by 1. -func (s *SafeCount) IncrFn() func() { - s.mu.Lock() - return func() { - s.i++ - s.mu.Unlock() - } -} - -// Count returns the current count. -func (s *SafeCount) Count() int { - return s.i -} - -// Reset will reset the count to 0. -func (s *SafeCount) Reset() { - s.mu.Lock() - { - s.i = 0 - } - s.mu.Unlock() -} diff --git a/mock/scraper_service.go b/mock/scraper_service.go deleted file mode 100644 index 1b58e098a7e..00000000000 --- a/mock/scraper_service.go +++ /dev/null @@ -1,46 +0,0 @@ -package mock - -import ( - "context" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ platform.ScraperTargetStoreService = &ScraperTargetStoreService{} - -// ScraperTargetStoreService is a mock implementation of a platform.ScraperTargetStoreService. -type ScraperTargetStoreService struct { - UserResourceMappingService - OrganizationService - ListTargetsF func(ctx context.Context, filter platform.ScraperTargetFilter) ([]platform.ScraperTarget, error) - AddTargetF func(ctx context.Context, t *platform.ScraperTarget, userID platform2.ID) error - GetTargetByIDF func(ctx context.Context, id platform2.ID) (*platform.ScraperTarget, error) - RemoveTargetF func(ctx context.Context, id platform2.ID) error - UpdateTargetF func(ctx context.Context, t *platform.ScraperTarget, userID platform2.ID) (*platform.ScraperTarget, error) -} - -// ListTargets lists all the scraper targets. -func (s *ScraperTargetStoreService) ListTargets(ctx context.Context, filter platform.ScraperTargetFilter) ([]platform.ScraperTarget, error) { - return s.ListTargetsF(ctx, filter) -} - -// AddTarget adds a scraper target. -func (s *ScraperTargetStoreService) AddTarget(ctx context.Context, t *platform.ScraperTarget, userID platform2.ID) error { - return s.AddTargetF(ctx, t, userID) -} - -// GetTargetByID retrieves a scraper target by id. -func (s *ScraperTargetStoreService) GetTargetByID(ctx context.Context, id platform2.ID) (*platform.ScraperTarget, error) { - return s.GetTargetByIDF(ctx, id) -} - -// RemoveTarget deletes a scraper target. -func (s *ScraperTargetStoreService) RemoveTarget(ctx context.Context, id platform2.ID) error { - return s.RemoveTargetF(ctx, id) -} - -// UpdateTarget updates a scraper target. -func (s *ScraperTargetStoreService) UpdateTarget(ctx context.Context, t *platform.ScraperTarget, userID platform2.ID) (*platform.ScraperTarget, error) { - return s.UpdateTargetF(ctx, t, userID) -} diff --git a/mock/secret_service.go b/mock/secret_service.go deleted file mode 100644 index 07e45eddc55..00000000000 --- a/mock/secret_service.go +++ /dev/null @@ -1,74 +0,0 @@ -package mock - -import ( - "context" - "fmt" - - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -// SecretService is a mock implementation of a retention.SecretService, which -// also makes it a suitable mock to use wherever an platform.SecretService is required. -type SecretService struct { - LoadSecretFn func(ctx context.Context, orgID platform2.ID, k string) (string, error) - GetSecretKeysFn func(ctx context.Context, orgID platform2.ID) ([]string, error) - PutSecretFn func(ctx context.Context, orgID platform2.ID, k string, v string) error - PutSecretsFn func(ctx context.Context, orgID platform2.ID, m map[string]string) error - PatchSecretsFn func(ctx context.Context, orgID platform2.ID, m map[string]string) error - DeleteSecretFn func(ctx context.Context, orgID platform2.ID, ks ...string) error -} - -// NewSecretService returns a mock SecretService where its methods will return -// zero values. -func NewSecretService() *SecretService { - return &SecretService{ - LoadSecretFn: func(ctx context.Context, orgID platform2.ID, k string) (string, error) { - return "", fmt.Errorf("not implmemented") - }, - GetSecretKeysFn: func(ctx context.Context, orgID platform2.ID) ([]string, error) { - return nil, fmt.Errorf("not implmemented") - }, - PutSecretFn: func(ctx context.Context, orgID platform2.ID, k string, v string) error { - return fmt.Errorf("not implmemented") - }, - PutSecretsFn: func(ctx context.Context, orgID platform2.ID, m map[string]string) error { - return fmt.Errorf("not implmemented") - }, - PatchSecretsFn: func(ctx context.Context, orgID platform2.ID, m map[string]string) error { - return fmt.Errorf("not implmemented") - }, - DeleteSecretFn: func(ctx context.Context, orgID platform2.ID, ks ...string) error { - return fmt.Errorf("not implmemented") - }, - } -} - -// LoadSecret retrieves the secret value v found at key k for organization orgID. -func (s *SecretService) LoadSecret(ctx context.Context, orgID platform2.ID, k string) (string, error) { - return s.LoadSecretFn(ctx, orgID, k) -} - -// GetSecretKeys retrieves all secret keys that are stored for the organization orgID. -func (s *SecretService) GetSecretKeys(ctx context.Context, orgID platform2.ID) ([]string, error) { - return s.GetSecretKeysFn(ctx, orgID) -} - -// PutSecret stores the secret pair (k,v) for the organization orgID. -func (s *SecretService) PutSecret(ctx context.Context, orgID platform2.ID, k string, v string) error { - return s.PutSecretFn(ctx, orgID, k, v) -} - -// PutSecrets puts all provided secrets and overwrites any previous values. -func (s *SecretService) PutSecrets(ctx context.Context, orgID platform2.ID, m map[string]string) error { - return s.PutSecretsFn(ctx, orgID, m) -} - -// PatchSecrets patches all provided secrets and updates any previous values. -func (s *SecretService) PatchSecrets(ctx context.Context, orgID platform2.ID, m map[string]string) error { - return s.PatchSecretsFn(ctx, orgID, m) -} - -// DeleteSecret removes a single secret from the secret store. -func (s *SecretService) DeleteSecret(ctx context.Context, orgID platform2.ID, ks ...string) error { - return s.DeleteSecretFn(ctx, orgID, ks...) -} diff --git a/mock/session_service.go b/mock/session_service.go deleted file mode 100644 index de1699f078c..00000000000 --- a/mock/session_service.go +++ /dev/null @@ -1,51 +0,0 @@ -package mock - -import ( - "context" - "fmt" - "time" - - platform "github.com/influxdata/influxdb/v2" -) - -// SessionService is a mock implementation of a retention.SessionService, which -// also makes it a suitable mock to use wherever an platform.SessionService is required. -type SessionService struct { - FindSessionFn func(context.Context, string) (*platform.Session, error) - ExpireSessionFn func(context.Context, string) error - CreateSessionFn func(context.Context, string) (*platform.Session, error) - RenewSessionFn func(ctx context.Context, session *platform.Session, newExpiration time.Time) error -} - -// NewSessionService returns a mock SessionService where its methods will return -// zero values. -func NewSessionService() *SessionService { - return &SessionService{ - FindSessionFn: func(context.Context, string) (*platform.Session, error) { return nil, fmt.Errorf("mock session") }, - CreateSessionFn: func(context.Context, string) (*platform.Session, error) { return nil, fmt.Errorf("mock session") }, - ExpireSessionFn: func(context.Context, string) error { return fmt.Errorf("mock session") }, - RenewSessionFn: func(ctx context.Context, session *platform.Session, expiredAt time.Time) error { - return fmt.Errorf("mock session") - }, - } -} - -// FindSession returns the session found at the provided key. -func (s *SessionService) FindSession(ctx context.Context, key string) (*platform.Session, error) { - return s.FindSessionFn(ctx, key) -} - -// CreateSession creates a session for a user with the users maximal privileges. -func (s *SessionService) CreateSession(ctx context.Context, user string) (*platform.Session, error) { - return s.CreateSessionFn(ctx, user) -} - -// ExpireSession expires the session provided at key. -func (s *SessionService) ExpireSession(ctx context.Context, key string) error { - return s.ExpireSessionFn(ctx, key) -} - -// RenewSession extends the expire time to newExpiration. -func (s *SessionService) RenewSession(ctx context.Context, session *platform.Session, expiredAt time.Time) error { - return s.RenewSessionFn(ctx, session, expiredAt) -} diff --git a/mock/source_service.go b/mock/source_service.go deleted file mode 100644 index 381d5f624db..00000000000 --- a/mock/source_service.go +++ /dev/null @@ -1,64 +0,0 @@ -package mock - -import ( - "context" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ platform.SourceService = (*SourceService)(nil) - -// SourceService is a mock implementation of platform.SourceService. -type SourceService struct { - DefaultSourceFn func(context.Context) (*platform.Source, error) - FindSourceByIDFn func(context.Context, platform2.ID) (*platform.Source, error) - FindSourcesFn func(context.Context, platform.FindOptions) ([]*platform.Source, int, error) - CreateSourceFn func(context.Context, *platform.Source) error - UpdateSourceFn func(context.Context, platform2.ID, platform.SourceUpdate) (*platform.Source, error) - DeleteSourceFn func(context.Context, platform2.ID) error -} - -// NewSourceService returns a mock of SourceService where its methods will return zero values. -func NewSourceService() *SourceService { - return &SourceService{ - DefaultSourceFn: func(context.Context) (*platform.Source, error) { return nil, nil }, - FindSourceByIDFn: func(context.Context, platform2.ID) (*platform.Source, error) { return nil, nil }, - CreateSourceFn: func(context.Context, *platform.Source) error { return nil }, - UpdateSourceFn: func(context.Context, platform2.ID, platform.SourceUpdate) (*platform.Source, error) { return nil, nil }, - DeleteSourceFn: func(context.Context, platform2.ID) error { return nil }, - FindSourcesFn: func(context.Context, platform.FindOptions) ([]*platform.Source, int, error) { - return nil, 0, nil - }, - } -} - -// DefaultSource retrieves the default source. -func (s *SourceService) DefaultSource(ctx context.Context) (*platform.Source, error) { - return s.DefaultSourceFn(ctx) -} - -// FindSourceByID retrieves a source by its ID. -func (s *SourceService) FindSourceByID(ctx context.Context, id platform2.ID) (*platform.Source, error) { - return s.FindSourceByIDFn(ctx, id) -} - -// FindSources returns a list of all sources. -func (s *SourceService) FindSources(ctx context.Context, opts platform.FindOptions) ([]*platform.Source, int, error) { - return s.FindSourcesFn(ctx, opts) -} - -// CreateSource sets the sources ID and stores it. -func (s *SourceService) CreateSource(ctx context.Context, source *platform.Source) error { - return s.CreateSourceFn(ctx, source) -} - -// DeleteSource removes the source. -func (s *SourceService) DeleteSource(ctx context.Context, id platform2.ID) error { - return s.DeleteSourceFn(ctx, id) -} - -// UpdateSource updates the source. -func (s *SourceService) UpdateSource(ctx context.Context, id platform2.ID, upd platform.SourceUpdate) (*platform.Source, error) { - return s.UpdateSourceFn(ctx, id, upd) -} diff --git a/mock/task_service.go b/mock/task_service.go deleted file mode 100644 index 0dec5a27d16..00000000000 --- a/mock/task_service.go +++ /dev/null @@ -1,163 +0,0 @@ -package mock - -import ( - "context" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/task/backend" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -var _ taskmodel.TaskService = (*TaskService)(nil) -var _ backend.TaskControlService = (*TaskControlService)(nil) - -type TaskService struct { - FindTaskByIDFn func(context.Context, platform.ID) (*taskmodel.Task, error) - FindTaskByIDCalls SafeCount - FindTasksFn func(context.Context, taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) - FindTasksCalls SafeCount - CreateTaskFn func(context.Context, taskmodel.TaskCreate) (*taskmodel.Task, error) - CreateTaskCalls SafeCount - UpdateTaskFn func(context.Context, platform.ID, taskmodel.TaskUpdate) (*taskmodel.Task, error) - UpdateTaskCalls SafeCount - DeleteTaskFn func(context.Context, platform.ID) error - DeleteTaskCalls SafeCount - FindLogsFn func(context.Context, taskmodel.LogFilter) ([]*taskmodel.Log, int, error) - FindLogsCalls SafeCount - FindRunsFn func(context.Context, taskmodel.RunFilter) ([]*taskmodel.Run, int, error) - FindRunsCalls SafeCount - FindRunByIDFn func(context.Context, platform.ID, platform.ID) (*taskmodel.Run, error) - FindRunByIDCalls SafeCount - CancelRunFn func(context.Context, platform.ID, platform.ID) error - CancelRunCalls SafeCount - RetryRunFn func(context.Context, platform.ID, platform.ID) (*taskmodel.Run, error) - RetryRunCalls SafeCount - ForceRunFn func(context.Context, platform.ID, int64) (*taskmodel.Run, error) - ForceRunCalls SafeCount -} - -func NewTaskService() *TaskService { - return &TaskService{ - FindTaskByIDFn: func(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - return nil, nil - }, - FindTasksFn: func(ctx context.Context, f taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { - return nil, 0, nil - }, - CreateTaskFn: func(ctx context.Context, taskCreate taskmodel.TaskCreate) (*taskmodel.Task, error) { - return nil, nil - }, - UpdateTaskFn: func(ctx context.Context, id platform.ID, update taskmodel.TaskUpdate) (*taskmodel.Task, error) { - return nil, nil - }, - DeleteTaskFn: func(ctx context.Context, id platform.ID) error { - return nil - }, - FindLogsFn: func(ctx context.Context, f taskmodel.LogFilter) ([]*taskmodel.Log, int, error) { - return nil, 0, nil - }, - FindRunsFn: func(ctx context.Context, f taskmodel.RunFilter) ([]*taskmodel.Run, int, error) { - return nil, 0, nil - }, - FindRunByIDFn: func(ctx context.Context, id platform.ID, id2 platform.ID) (*taskmodel.Run, error) { - return nil, nil - }, - CancelRunFn: func(ctx context.Context, id platform.ID, id2 platform.ID) error { - return nil - }, - RetryRunFn: func(ctx context.Context, id platform.ID, id2 platform.ID) (*taskmodel.Run, error) { - return nil, nil - }, - ForceRunFn: func(ctx context.Context, id platform.ID, i int64) (*taskmodel.Run, error) { - return nil, nil - }, - } -} - -func (s *TaskService) FindTaskByID(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - defer s.FindTaskByIDCalls.IncrFn()() - return s.FindTaskByIDFn(ctx, id) -} - -func (s *TaskService) FindTasks(ctx context.Context, filter taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { - defer s.FindTasksCalls.IncrFn()() - return s.FindTasksFn(ctx, filter) -} - -func (s *TaskService) CreateTask(ctx context.Context, t taskmodel.TaskCreate) (*taskmodel.Task, error) { - defer s.CreateTaskCalls.IncrFn()() - return s.CreateTaskFn(ctx, t) -} - -func (s *TaskService) UpdateTask(ctx context.Context, id platform.ID, upd taskmodel.TaskUpdate) (*taskmodel.Task, error) { - defer s.UpdateTaskCalls.IncrFn()() - return s.UpdateTaskFn(ctx, id, upd) -} - -func (s *TaskService) DeleteTask(ctx context.Context, id platform.ID) error { - defer s.DeleteTaskCalls.IncrFn()() - return s.DeleteTaskFn(ctx, id) -} - -func (s *TaskService) FindLogs(ctx context.Context, filter taskmodel.LogFilter) ([]*taskmodel.Log, int, error) { - defer s.FindLogsCalls.IncrFn()() - return s.FindLogsFn(ctx, filter) -} - -func (s *TaskService) FindRuns(ctx context.Context, filter taskmodel.RunFilter) ([]*taskmodel.Run, int, error) { - defer s.FindRunsCalls.IncrFn()() - return s.FindRunsFn(ctx, filter) -} - -func (s *TaskService) FindRunByID(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { - defer s.FindRunByIDCalls.IncrFn()() - return s.FindRunByIDFn(ctx, taskID, runID) -} - -func (s *TaskService) CancelRun(ctx context.Context, taskID, runID platform.ID) error { - defer s.CancelRunCalls.IncrFn()() - return s.CancelRunFn(ctx, taskID, runID) -} - -func (s *TaskService) RetryRun(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { - defer s.RetryRunCalls.IncrFn()() - return s.RetryRunFn(ctx, taskID, runID) -} - -func (s *TaskService) ForceRun(ctx context.Context, taskID platform.ID, scheduledFor int64) (*taskmodel.Run, error) { - defer s.ForceRunCalls.IncrFn()() - return s.ForceRunFn(ctx, taskID, scheduledFor) -} - -type TaskControlService struct { - CreateRunFn func(ctx context.Context, taskID platform.ID, scheduledFor time.Time, runAt time.Time) (*taskmodel.Run, error) - CurrentlyRunningFn func(ctx context.Context, taskID platform.ID) ([]*taskmodel.Run, error) - ManualRunsFn func(ctx context.Context, taskID platform.ID) ([]*taskmodel.Run, error) - StartManualRunFn func(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) - FinishRunFn func(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) - UpdateRunStateFn func(ctx context.Context, taskID, runID platform.ID, when time.Time, state taskmodel.RunStatus) error - AddRunLogFn func(ctx context.Context, taskID, runID platform.ID, when time.Time, log string) error -} - -func (tcs *TaskControlService) CreateRun(ctx context.Context, taskID platform.ID, scheduledFor time.Time, runAt time.Time) (*taskmodel.Run, error) { - return tcs.CreateRunFn(ctx, taskID, scheduledFor, runAt) -} -func (tcs *TaskControlService) CurrentlyRunning(ctx context.Context, taskID platform.ID) ([]*taskmodel.Run, error) { - return tcs.CurrentlyRunningFn(ctx, taskID) -} -func (tcs *TaskControlService) ManualRuns(ctx context.Context, taskID platform.ID) ([]*taskmodel.Run, error) { - return tcs.ManualRunsFn(ctx, taskID) -} -func (tcs *TaskControlService) StartManualRun(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { - return tcs.StartManualRunFn(ctx, taskID, runID) -} -func (tcs *TaskControlService) FinishRun(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { - return tcs.FinishRunFn(ctx, taskID, runID) -} -func (tcs *TaskControlService) UpdateRunState(ctx context.Context, taskID, runID platform.ID, when time.Time, state taskmodel.RunStatus) error { - return tcs.UpdateRunStateFn(ctx, taskID, runID, when, state) -} -func (tcs *TaskControlService) AddRunLog(ctx context.Context, taskID, runID platform.ID, when time.Time, log string) error { - return tcs.AddRunLogFn(ctx, taskID, runID, when, log) -} diff --git a/mock/telegraf_service.go b/mock/telegraf_service.go deleted file mode 100644 index c05cf0cbafd..00000000000 --- a/mock/telegraf_service.go +++ /dev/null @@ -1,79 +0,0 @@ -package mock - -import ( - "context" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ platform.TelegrafConfigStore = (*TelegrafConfigStore)(nil) - -// TelegrafConfigStore represents a service for managing telegraf config data. -type TelegrafConfigStore struct { - *UserResourceMappingService - FindTelegrafConfigByIDF func(ctx context.Context, id platform2.ID) (*platform.TelegrafConfig, error) - FindTelegrafConfigByIDCalls SafeCount - FindTelegrafConfigsF func(ctx context.Context, filter platform.TelegrafConfigFilter, opt ...platform.FindOptions) ([]*platform.TelegrafConfig, int, error) - FindTelegrafConfigsCalls SafeCount - CreateTelegrafConfigF func(ctx context.Context, tc *platform.TelegrafConfig, userID platform2.ID) error - CreateTelegrafConfigCalls SafeCount - UpdateTelegrafConfigF func(ctx context.Context, id platform2.ID, tc *platform.TelegrafConfig, userID platform2.ID) (*platform.TelegrafConfig, error) - UpdateTelegrafConfigCalls SafeCount - DeleteTelegrafConfigF func(ctx context.Context, id platform2.ID) error - DeleteTelegrafConfigCalls SafeCount -} - -// NewTelegrafConfigStore constructs a new fake TelegrafConfigStore. -func NewTelegrafConfigStore() *TelegrafConfigStore { - return &TelegrafConfigStore{ - UserResourceMappingService: NewUserResourceMappingService(), - FindTelegrafConfigByIDF: func(ctx context.Context, id platform2.ID) (*platform.TelegrafConfig, error) { - return nil, nil - }, - FindTelegrafConfigsF: func(_ context.Context, f platform.TelegrafConfigFilter, opt ...platform.FindOptions) ([]*platform.TelegrafConfig, int, error) { - return nil, 0, nil - }, - CreateTelegrafConfigF: func(_ context.Context, tc *platform.TelegrafConfig, userID platform2.ID) error { - return nil - }, - UpdateTelegrafConfigF: func(_ context.Context, id platform2.ID, tc *platform.TelegrafConfig, userID platform2.ID) (*platform.TelegrafConfig, error) { - return nil, nil - }, - DeleteTelegrafConfigF: func(_ context.Context, id platform2.ID) error { - return nil - }, - } -} - -// FindTelegrafConfigByID returns a single telegraf config by ID. -func (s *TelegrafConfigStore) FindTelegrafConfigByID(ctx context.Context, id platform2.ID) (*platform.TelegrafConfig, error) { - defer s.FindTelegrafConfigByIDCalls.IncrFn()() - return s.FindTelegrafConfigByIDF(ctx, id) -} - -// FindTelegrafConfigs returns a list of telegraf configs that match filter and the total count of matching telegraf configs. -// Additional options provide pagination & sorting. -func (s *TelegrafConfigStore) FindTelegrafConfigs(ctx context.Context, filter platform.TelegrafConfigFilter, opt ...platform.FindOptions) ([]*platform.TelegrafConfig, int, error) { - defer s.FindTelegrafConfigsCalls.IncrFn()() - return s.FindTelegrafConfigsF(ctx, filter, opt...) -} - -// CreateTelegrafConfig creates a new telegraf config and sets b.ID with the new identifier. -func (s *TelegrafConfigStore) CreateTelegrafConfig(ctx context.Context, tc *platform.TelegrafConfig, userID platform2.ID) error { - defer s.CreateTelegrafConfigCalls.IncrFn()() - return s.CreateTelegrafConfigF(ctx, tc, userID) -} - -// UpdateTelegrafConfig updates a single telegraf config. -// Returns the new telegraf config after update. -func (s *TelegrafConfigStore) UpdateTelegrafConfig(ctx context.Context, id platform2.ID, tc *platform.TelegrafConfig, userID platform2.ID) (*platform.TelegrafConfig, error) { - defer s.UpdateTelegrafConfigCalls.IncrFn()() - return s.UpdateTelegrafConfigF(ctx, id, tc, userID) -} - -// DeleteTelegrafConfig removes a telegraf config by ID. -func (s *TelegrafConfigStore) DeleteTelegrafConfig(ctx context.Context, id platform2.ID) error { - defer s.DeleteTelegrafConfigCalls.IncrFn()() - return s.DeleteTelegrafConfigF(ctx, id) -} diff --git a/mock/user_resource_mapping_service.go b/mock/user_resource_mapping_service.go deleted file mode 100644 index d8cbffaf96c..00000000000 --- a/mock/user_resource_mapping_service.go +++ /dev/null @@ -1,44 +0,0 @@ -package mock - -import ( - "context" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ platform.UserResourceMappingService = &UserResourceMappingService{} - -// UserResourceMappingService is a mock implementation of platform.UserResourceMappingService -type UserResourceMappingService struct { - FindMappingsFn func(context.Context, platform.UserResourceMappingFilter) ([]*platform.UserResourceMapping, int, error) - CreateMappingFn func(context.Context, *platform.UserResourceMapping) error - DeleteMappingFn func(context.Context, platform2.ID, platform2.ID) error -} - -// NewUserResourceMappingService returns a mock of UserResourceMappingService -// where its methods will return zero values. -func NewUserResourceMappingService() *UserResourceMappingService { - return &UserResourceMappingService{ - FindMappingsFn: func(context.Context, platform.UserResourceMappingFilter) ([]*platform.UserResourceMapping, int, error) { - return nil, 0, nil - }, - CreateMappingFn: func(context.Context, *platform.UserResourceMapping) error { return nil }, - DeleteMappingFn: func(context.Context, platform2.ID, platform2.ID) error { return nil }, - } -} - -// FindUserResourceMappings finds mappings that match a given filter. -func (s *UserResourceMappingService) FindUserResourceMappings(ctx context.Context, filter platform.UserResourceMappingFilter, opt ...platform.FindOptions) ([]*platform.UserResourceMapping, int, error) { - return s.FindMappingsFn(ctx, filter) -} - -// CreateUserResourceMapping creates a new UserResourceMapping. -func (s *UserResourceMappingService) CreateUserResourceMapping(ctx context.Context, m *platform.UserResourceMapping) error { - return s.CreateMappingFn(ctx, m) -} - -// DeleteUserResourceMapping removes a UserResourceMapping. -func (s *UserResourceMappingService) DeleteUserResourceMapping(ctx context.Context, resourceID platform2.ID, userID platform2.ID) error { - return s.DeleteMappingFn(ctx, resourceID, userID) -} diff --git a/mock/user_service.go b/mock/user_service.go deleted file mode 100644 index 398b5a62636..00000000000 --- a/mock/user_service.go +++ /dev/null @@ -1,72 +0,0 @@ -package mock - -import ( - "context" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ platform.UserService = (*UserService)(nil) - -// UserService is a mock implementation of a retention.UserService, which -// also makes it a suitable mock to use wherever an platform.UserService is required. -type UserService struct { - // Methods for a platform.UserService - FindUserByIDFn func(context.Context, platform2.ID) (*platform.User, error) - FindUsersFn func(context.Context, platform.UserFilter, ...platform.FindOptions) ([]*platform.User, int, error) - CreateUserFn func(context.Context, *platform.User) error - DeleteUserFn func(context.Context, platform2.ID) error - FindUserFn func(context.Context, platform.UserFilter) (*platform.User, error) - UpdateUserFn func(context.Context, platform2.ID, platform.UserUpdate) (*platform.User, error) - FindPermissionForUserFn func(context.Context, platform2.ID) (platform.PermissionSet, error) -} - -// NewUserService returns a mock of UserService where its methods will return zero values. -func NewUserService() *UserService { - return &UserService{ - FindUserByIDFn: func(context.Context, platform2.ID) (*platform.User, error) { return nil, nil }, - FindUserFn: func(context.Context, platform.UserFilter) (*platform.User, error) { return nil, nil }, - CreateUserFn: func(context.Context, *platform.User) error { return nil }, - UpdateUserFn: func(context.Context, platform2.ID, platform.UserUpdate) (*platform.User, error) { return nil, nil }, - DeleteUserFn: func(context.Context, platform2.ID) error { return nil }, - FindUsersFn: func(context.Context, platform.UserFilter, ...platform.FindOptions) ([]*platform.User, int, error) { - return nil, 0, nil - }, - FindPermissionForUserFn: func(context.Context, platform2.ID) (platform.PermissionSet, error) { return nil, nil }, - } -} - -// FindUserByID returns a single User by ID. -func (s *UserService) FindUserByID(ctx context.Context, id platform2.ID) (*platform.User, error) { - return s.FindUserByIDFn(ctx, id) -} - -// FindUsers returns a list of Users that match filter and the total count of matching Users. -func (s *UserService) FindUsers(ctx context.Context, filter platform.UserFilter, opts ...platform.FindOptions) ([]*platform.User, int, error) { - return s.FindUsersFn(ctx, filter, opts...) -} - -// CreateUser creates a new User and sets b.ID with the new identifier. -func (s *UserService) CreateUser(ctx context.Context, User *platform.User) error { - return s.CreateUserFn(ctx, User) -} - -// DeleteUser removes a User by ID. -func (s *UserService) DeleteUser(ctx context.Context, id platform2.ID) error { - return s.DeleteUserFn(ctx, id) -} - -// FindUser finds the first user that matches a filter -func (s *UserService) FindUser(ctx context.Context, filter platform.UserFilter) (*platform.User, error) { - return s.FindUserFn(ctx, filter) -} - -// UpdateUser updates a user -func (s *UserService) UpdateUser(ctx context.Context, id platform2.ID, upd platform.UserUpdate) (*platform.User, error) { - return s.UpdateUserFn(ctx, id, upd) -} - -func (s *UserService) FindPermissionForUser(ctx context.Context, uid platform2.ID) (platform.PermissionSet, error) { - return s.FindPermissionForUserFn(ctx, uid) -} diff --git a/mock/variable_service.go b/mock/variable_service.go deleted file mode 100644 index 541baf44abe..00000000000 --- a/mock/variable_service.go +++ /dev/null @@ -1,71 +0,0 @@ -package mock - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.VariableService = &VariableService{} - -type VariableService struct { - CreateVariableF func(context.Context, *influxdb.Variable) error - CreateVariableCalls SafeCount - DeleteVariableF func(context.Context, platform.ID) error - DeleteVariableCalls SafeCount - FindVariableByIDF func(context.Context, platform.ID) (*influxdb.Variable, error) - FindVariableByIDCalls SafeCount - FindVariablesF func(context.Context, influxdb.VariableFilter, ...influxdb.FindOptions) ([]*influxdb.Variable, error) - FindVariablesCalls SafeCount - ReplaceVariableF func(context.Context, *influxdb.Variable) error - ReplaceVariableCalls SafeCount - UpdateVariableF func(ctx context.Context, id platform.ID, update *influxdb.VariableUpdate) (*influxdb.Variable, error) - UpdateVariableCalls SafeCount -} - -// NewVariableService returns a mock of VariableService where its methods will return zero values. -func NewVariableService() *VariableService { - return &VariableService{ - CreateVariableF: func(context.Context, *influxdb.Variable) error { return nil }, - DeleteVariableF: func(context.Context, platform.ID) error { return nil }, - FindVariableByIDF: func(context.Context, platform.ID) (*influxdb.Variable, error) { return nil, nil }, - FindVariablesF: func(context.Context, influxdb.VariableFilter, ...influxdb.FindOptions) ([]*influxdb.Variable, error) { - return nil, nil - }, - ReplaceVariableF: func(context.Context, *influxdb.Variable) error { return nil }, - UpdateVariableF: func(ctx context.Context, id platform.ID, update *influxdb.VariableUpdate) (*influxdb.Variable, error) { - return nil, nil - }, - } -} - -func (s *VariableService) CreateVariable(ctx context.Context, variable *influxdb.Variable) error { - defer s.CreateVariableCalls.IncrFn()() - return s.CreateVariableF(ctx, variable) -} - -func (s *VariableService) ReplaceVariable(ctx context.Context, variable *influxdb.Variable) error { - defer s.ReplaceVariableCalls.IncrFn()() - return s.ReplaceVariableF(ctx, variable) -} - -func (s *VariableService) FindVariables(ctx context.Context, filter influxdb.VariableFilter, opts ...influxdb.FindOptions) ([]*influxdb.Variable, error) { - defer s.FindVariablesCalls.IncrFn()() - return s.FindVariablesF(ctx, filter, opts...) -} - -func (s *VariableService) FindVariableByID(ctx context.Context, id platform.ID) (*influxdb.Variable, error) { - defer s.FindVariableByIDCalls.IncrFn()() - return s.FindVariableByIDF(ctx, id) -} - -func (s *VariableService) DeleteVariable(ctx context.Context, id platform.ID) error { - defer s.DeleteVariableCalls.IncrFn()() - return s.DeleteVariableF(ctx, id) -} - -func (s *VariableService) UpdateVariable(ctx context.Context, id platform.ID, update *influxdb.VariableUpdate) (*influxdb.Variable, error) { - defer s.UpdateVariableCalls.IncrFn()() - return s.UpdateVariableF(ctx, id, update) -} diff --git a/mock/write_service.go b/mock/write_service.go deleted file mode 100644 index 51bdc1b1910..00000000000 --- a/mock/write_service.go +++ /dev/null @@ -1,18 +0,0 @@ -package mock - -import ( - "context" - "io" - - platform "github.com/influxdata/influxdb/v2" -) - -// WriteService writes data read from the reader. -type WriteService struct { - WriteToF func(context.Context, platform.BucketFilter, io.Reader) error -} - -// WriteTo calls the mocked WriteToF function with arguments. -func (s *WriteService) WriteTo(ctx context.Context, filter platform.BucketFilter, r io.Reader) error { - return s.WriteToF(ctx, filter, r) -} diff --git a/models/consistency.go b/models/consistency.go deleted file mode 100644 index 2a3269bca11..00000000000 --- a/models/consistency.go +++ /dev/null @@ -1,48 +0,0 @@ -package models - -import ( - "errors" - "strings" -) - -// ConsistencyLevel represent a required replication criteria before a write can -// be returned as successful. -// -// The consistency level is handled in open-source InfluxDB but only applicable to clusters. -type ConsistencyLevel int - -const ( - // ConsistencyLevelAny allows for hinted handoff, potentially no write happened yet. - ConsistencyLevelAny ConsistencyLevel = iota - - // ConsistencyLevelOne requires at least one data node acknowledged a write. - ConsistencyLevelOne - - // ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write. - ConsistencyLevelQuorum - - // ConsistencyLevelAll requires all data nodes to acknowledge a write. - ConsistencyLevelAll -) - -var ( - // ErrInvalidConsistencyLevel is returned when parsing the string version - // of a consistency level. - ErrInvalidConsistencyLevel = errors.New("invalid consistency level") -) - -// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const. -func ParseConsistencyLevel(level string) (ConsistencyLevel, error) { - switch strings.ToLower(level) { - case "any": - return ConsistencyLevelAny, nil - case "one": - return ConsistencyLevelOne, nil - case "quorum": - return ConsistencyLevelQuorum, nil - case "all": - return ConsistencyLevelAll, nil - default: - return 0, ErrInvalidConsistencyLevel - } -} diff --git a/models/fieldtype_string.go b/models/fieldtype_string.go deleted file mode 100644 index d8016e8bf3e..00000000000 --- a/models/fieldtype_string.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by "stringer -type=FieldType"; DO NOT EDIT. - -package models - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[Integer-0] - _ = x[Float-1] - _ = x[Boolean-2] - _ = x[String-3] - _ = x[Empty-4] - _ = x[Unsigned-5] -} - -const _FieldType_name = "IntegerFloatBooleanStringEmptyUnsigned" - -var _FieldType_index = [...]uint8{0, 7, 12, 19, 25, 30, 38} - -func (i FieldType) String() string { - if i < 0 || i >= FieldType(len(_FieldType_index)-1) { - return "FieldType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _FieldType_name[_FieldType_index[i]:_FieldType_index[i+1]] -} diff --git a/models/gen.go b/models/gen.go deleted file mode 100644 index 0aaa43f2037..00000000000 --- a/models/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -package models - -//go:generate stringer -type=FieldType diff --git a/models/inline_fnv.go b/models/inline_fnv.go deleted file mode 100644 index eec1ae8b013..00000000000 --- a/models/inline_fnv.go +++ /dev/null @@ -1,32 +0,0 @@ -package models // import "github.com/influxdata/influxdb/models" - -// from stdlib hash/fnv/fnv.go -const ( - prime64 = 1099511628211 - offset64 = 14695981039346656037 -) - -// InlineFNV64a is an alloc-free port of the standard library's fnv64a. -// See https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function. -type InlineFNV64a uint64 - -// NewInlineFNV64a returns a new instance of InlineFNV64a. -func NewInlineFNV64a() InlineFNV64a { - return offset64 -} - -// Write adds data to the running hash. -func (s *InlineFNV64a) Write(data []byte) (int, error) { - hash := uint64(*s) - for _, c := range data { - hash ^= uint64(c) - hash *= prime64 - } - *s = InlineFNV64a(hash) - return len(data), nil -} - -// Sum64 returns the uint64 of the current resulting hash. -func (s *InlineFNV64a) Sum64() uint64 { - return uint64(*s) -} diff --git a/models/inline_fnv_test.go b/models/inline_fnv_test.go deleted file mode 100644 index cef6b6566c8..00000000000 --- a/models/inline_fnv_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package models_test - -import ( - "hash/fnv" - "testing" - "testing/quick" - - "github.com/influxdata/influxdb/v2/models" -) - -func TestInlineFNV64aEquivalenceFuzz(t *testing.T) { - f := func(data []byte) bool { - stdlibFNV := fnv.New64a() - stdlibFNV.Write(data) - want := stdlibFNV.Sum64() - - inlineFNV := models.NewInlineFNV64a() - inlineFNV.Write(data) - got := inlineFNV.Sum64() - - return want == got - } - cfg := &quick.Config{ - MaxCount: 10000, - } - if err := quick.Check(f, cfg); err != nil { - t.Fatal(err) - } -} diff --git a/models/inline_strconv_parse.go b/models/inline_strconv_parse.go deleted file mode 100644 index 0ad5468994e..00000000000 --- a/models/inline_strconv_parse.go +++ /dev/null @@ -1,34 +0,0 @@ -package models // import "github.com/influxdata/influxdb/models" - -import ( - "strconv" - "unsafe" -) - -// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt. -func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) { - s := unsafeBytesToString(b) - return strconv.ParseInt(s, base, bitSize) -} - -// parseUintBytes is a zero-alloc wrapper around strconv.ParseUint. -func parseUintBytes(b []byte, base int, bitSize int) (i uint64, err error) { - s := unsafeBytesToString(b) - return strconv.ParseUint(s, base, bitSize) -} - -// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat. -func parseFloatBytes(b []byte, bitSize int) (float64, error) { - s := unsafeBytesToString(b) - return strconv.ParseFloat(s, bitSize) -} - -// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool. -func parseBoolBytes(b []byte) (bool, error) { - return strconv.ParseBool(unsafeBytesToString(b)) -} - -// unsafeBytesToString converts a []byte to a string without a heap allocation. -func unsafeBytesToString(in []byte) string { - return *(*string)(unsafe.Pointer(&in)) -} diff --git a/models/inline_strconv_parse_test.go b/models/inline_strconv_parse_test.go deleted file mode 100644 index 119f543d78b..00000000000 --- a/models/inline_strconv_parse_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package models - -import ( - "strconv" - "testing" - "testing/quick" -) - -func TestParseIntBytesEquivalenceFuzz(t *testing.T) { - f := func(b []byte, base int, bitSize int) bool { - exp, expErr := strconv.ParseInt(string(b), base, bitSize) - got, gotErr := parseIntBytes(b, base, bitSize) - - return exp == got && checkErrs(expErr, gotErr) - } - - cfg := &quick.Config{ - MaxCount: 10000, - } - - if err := quick.Check(f, cfg); err != nil { - t.Fatal(err) - } -} - -func TestParseIntBytesValid64bitBase10EquivalenceFuzz(t *testing.T) { - buf := []byte{} - f := func(n int64) bool { - buf = strconv.AppendInt(buf[:0], n, 10) - - exp, expErr := strconv.ParseInt(string(buf), 10, 64) - got, gotErr := parseIntBytes(buf, 10, 64) - - return exp == got && checkErrs(expErr, gotErr) - } - - cfg := &quick.Config{ - MaxCount: 10000, - } - - if err := quick.Check(f, cfg); err != nil { - t.Fatal(err) - } -} - -func TestParseFloatBytesEquivalenceFuzz(t *testing.T) { - f := func(b []byte, bitSize int) bool { - exp, expErr := strconv.ParseFloat(string(b), bitSize) - got, gotErr := parseFloatBytes(b, bitSize) - - return exp == got && checkErrs(expErr, gotErr) - } - - cfg := &quick.Config{ - MaxCount: 10000, - } - - if err := quick.Check(f, cfg); err != nil { - t.Fatal(err) - } -} - -func TestParseFloatBytesValid64bitEquivalenceFuzz(t *testing.T) { - buf := []byte{} - f := func(n float64) bool { - buf = strconv.AppendFloat(buf[:0], n, 'f', -1, 64) - - exp, expErr := strconv.ParseFloat(string(buf), 64) - got, gotErr := parseFloatBytes(buf, 64) - - return exp == got && checkErrs(expErr, gotErr) - } - - cfg := &quick.Config{ - MaxCount: 10000, - } - - if err := quick.Check(f, cfg); err != nil { - t.Fatal(err) - } -} - -func TestParseBoolBytesEquivalence(t *testing.T) { - var buf []byte - for _, s := range []string{"1", "t", "T", "TRUE", "true", "True", "0", "f", "F", "FALSE", "false", "False", "fail", "TrUe", "FAlSE", "numbers", ""} { - buf = append(buf[:0], s...) - - exp, expErr := strconv.ParseBool(s) - got, gotErr := parseBoolBytes(buf) - - if got != exp || !checkErrs(expErr, gotErr) { - t.Errorf("Failed to parse boolean value %q correctly: wanted (%t, %v), got (%t, %v)", s, exp, expErr, got, gotErr) - } - } -} - -func checkErrs(a, b error) bool { - if (a == nil) != (b == nil) { - return false - } - - return a == nil || a.Error() == b.Error() -} diff --git a/models/points.go b/models/points.go deleted file mode 100644 index bad28d53a7b..00000000000 --- a/models/points.go +++ /dev/null @@ -1,2640 +0,0 @@ -// Package models implements basic objects used throughout the TICK stack. -package models // import "github.com/influxdata/influxdb/models" - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math" - "sort" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" - - "github.com/influxdata/influxdb/v2/pkg/escape" -) - -const ( - // Values used to store the field key and measurement name as special internal tags. - FieldKeyTagKey = "\xff" - MeasurementTagKey = "\x00" - - // reserved tag keys which when present cause the point to be discarded - // and an error returned - reservedFieldTagKey = "_field" - reservedMeasurementTagKey = "_measurement" - reservedTimeTagKey = "time" -) - -var ( - // Predefined byte representations of special tag keys. - FieldKeyTagKeyBytes = []byte(FieldKeyTagKey) - MeasurementTagKeyBytes = []byte(MeasurementTagKey) - - // set of reserved tag keys which cannot be present when a point is being parsed. - reservedTagKeys = [][]byte{ - FieldKeyTagKeyBytes, - MeasurementTagKeyBytes, - []byte(reservedFieldTagKey), - []byte(reservedMeasurementTagKey), - []byte(reservedTimeTagKey), - } -) - -type escapeSet struct { - k [1]byte - esc [2]byte -} - -var ( - measurementEscapeCodes = [...]escapeSet{ - {k: [1]byte{','}, esc: [2]byte{'\\', ','}}, - {k: [1]byte{' '}, esc: [2]byte{'\\', ' '}}, - } - - tagEscapeCodes = [...]escapeSet{ - {k: [1]byte{','}, esc: [2]byte{'\\', ','}}, - {k: [1]byte{' '}, esc: [2]byte{'\\', ' '}}, - {k: [1]byte{'='}, esc: [2]byte{'\\', '='}}, - } - - // ErrPointMustHaveAField is returned when operating on a point that does not have any fields. - ErrPointMustHaveAField = errors.New("point without fields is unsupported") - - // ErrInvalidNumber is returned when a number is expected but not provided. - ErrInvalidNumber = errors.New("invalid number") - - // ErrInvalidPoint is returned when a point cannot be parsed correctly. - ErrInvalidPoint = errors.New("point is invalid") - - // ErrInvalidKevValuePairs is returned when the number of key, value pairs - // is odd, indicating a missing value. - ErrInvalidKevValuePairs = errors.New("key/value pairs is an odd length") -) - -const ( - // MaxKeyLength is the largest allowed size of the combined measurement and tag keys. - MaxKeyLength = 65535 -) - -// Point defines the values that will be written to the database. -type Point interface { - // Name return the measurement name for the point. - Name() []byte - - // SetName updates the measurement name for the point. - SetName(string) - - // Tags returns the tag set for the point. - Tags() Tags - - // ForEachTag iterates over each tag invoking fn. If fn return false, iteration stops. - ForEachTag(fn func(k, v []byte) bool) - - // AddTag adds or replaces a tag value for a point. - AddTag(key, value string) - - // SetTags replaces the tags for the point. - SetTags(tags Tags) - - // HasTag returns true if the tag exists for the point. - HasTag(tag []byte) bool - - // Fields returns the fields for the point. - Fields() (Fields, error) - - // Time return the timestamp for the point. - Time() time.Time - - // SetTime updates the timestamp for the point. - SetTime(t time.Time) - - // UnixNano returns the timestamp of the point as nanoseconds since Unix epoch. - UnixNano() int64 - - // HashID returns a non-cryptographic checksum of the point's key. - HashID() uint64 - - // Key returns the key (measurement joined with tags) of the point. - Key() []byte - - // String returns a string representation of the point. If there is a - // timestamp associated with the point then it will be specified with the default - // precision of nanoseconds. - String() string - - // MarshalBinary returns a binary representation of the point. - MarshalBinary() ([]byte, error) - - // PrecisionString returns a string representation of the point. If there - // is a timestamp associated with the point then it will be specified in the - // given unit. - PrecisionString(precision string) string - - // RoundedString returns a string representation of the point. If there - // is a timestamp associated with the point, then it will be rounded to the - // given duration. - RoundedString(d time.Duration) string - - // Split will attempt to return multiple points with the same timestamp whose - // string representations are no longer than size. Points with a single field or - // a point without a timestamp may exceed the requested size. - Split(size int) []Point - - // Round will round the timestamp of the point to the given duration. - Round(d time.Duration) - - // StringSize returns the length of the string that would be returned by String(). - StringSize() int - - // AppendString appends the result of String() to the provided buffer and returns - // the result, potentially reducing string allocations. - AppendString(buf []byte) []byte - - // FieldIterator returns a FieldIterator that can be used to traverse the - // fields of a point without constructing the in-memory map. - FieldIterator() FieldIterator -} - -// FieldType represents the type of a field. -type FieldType int - -const ( - // Integer indicates the field's type is integer. - Integer FieldType = iota - - // Float indicates the field's type is float. - Float - - // Boolean indicates the field's type is boolean. - Boolean - - // String indicates the field's type is string. - String - - // Empty is used to indicate that there is no field. - Empty - - // Unsigned indicates the field's type is an unsigned integer. - Unsigned -) - -// FieldIterator provides a low-allocation interface to iterate through a point's fields. -type FieldIterator interface { - // Next indicates whether there any fields remaining. - Next() bool - - // FieldKey returns the key of the current field. - FieldKey() []byte - - // Type returns the FieldType of the current field. - Type() FieldType - - // StringValue returns the string value of the current field. - StringValue() string - - // IntegerValue returns the integer value of the current field. - IntegerValue() (int64, error) - - // UnsignedValue returns the unsigned value of the current field. - UnsignedValue() (uint64, error) - - // BooleanValue returns the boolean value of the current field. - BooleanValue() (bool, error) - - // FloatValue returns the float value of the current field. - FloatValue() (float64, error) - - // Reset resets the iterator to its initial state. - Reset() -} - -// Points represents a sortable list of points by timestamp. -type Points []Point - -// Len implements sort.Interface. -func (a Points) Len() int { return len(a) } - -// Less implements sort.Interface. -func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) } - -// Swap implements sort.Interface. -func (a Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// point is the default implementation of Point. -type point struct { - time time.Time - - // text encoding of measurement and tags - // key must always be stored sorted by tags, if the original line was not sorted, - // we need to resort it - key []byte - - // text encoding of field data - fields []byte - - // text encoding of timestamp - ts []byte - - // cached version of parsed fields from data - cachedFields map[string]interface{} - - // cached version of parsed name from key - cachedName string - - // cached version of parsed tags - cachedTags Tags - - it fieldIterator -} - -// type assertions -var ( - _ Point = (*point)(nil) - _ FieldIterator = (*point)(nil) -) - -const ( - // the number of characters for the largest possible int64 (9223372036854775807) - maxInt64Digits = 19 - - // the number of characters for the smallest possible int64 (-9223372036854775808) - minInt64Digits = 20 - - // the number of characters for the largest possible uint64 (18446744073709551615) - maxUint64Digits = 20 - - // the number of characters required for the largest float64 before a range check - // would occur during parsing - maxFloat64Digits = 25 - - // the number of characters required for smallest float64 before a range check occur - // would occur during parsing - minFloat64Digits = 27 -) - -// ParsePoints returns a slice of Points from a text representation of a point -// with each point separated by newlines. If any points fail to parse, a non-nil error -// will be returned in addition to the points that parsed successfully. -func ParsePoints(buf []byte) ([]Point, error) { - return ParsePointsWithPrecision(buf, time.Now().UTC(), "n") -} - -// ParsePointsString is identical to ParsePoints but accepts a string. -func ParsePointsString(buf string) ([]Point, error) { - return ParsePoints([]byte(buf)) -} - -// ParseKey returns the measurement name and tags from a point. -// -// NOTE: to minimize heap allocations, the returned Tags will refer to subslices of buf. -// This can have the unintended effect preventing buf from being garbage collected. -func ParseKey(buf []byte) (string, Tags) { - name, tags := ParseKeyBytes(buf) - return string(name), tags -} - -func ParseKeyBytes(buf []byte) ([]byte, Tags) { - return ParseKeyBytesWithTags(buf, nil) -} - -func ParseKeyBytesWithTags(buf []byte, tags Tags) ([]byte, Tags) { - // Ignore the error because scanMeasurement returns "missing fields" which we ignore - // when just parsing a key - state, i, _ := scanMeasurement(buf, 0) - - var name []byte - if state == tagKeyState { - tags = parseTags(buf, tags) - // scanMeasurement returns the location of the comma if there are tags, strip that off - name = buf[:i-1] - } else { - name = buf[:i] - } - return unescapeMeasurement(name), tags -} - -func ParseTags(buf []byte) Tags { - return parseTags(buf, nil) -} - -func ParseTagsWithTags(buf []byte, tags Tags) Tags { - return parseTags(buf, tags) -} - -func ParseName(buf []byte) []byte { - // Ignore the error because scanMeasurement returns "missing fields" which we ignore - // when just parsing a key - state, i, _ := scanMeasurement(buf, 0) - var name []byte - if state == tagKeyState { - name = buf[:i-1] - } else { - name = buf[:i] - } - - return unescapeMeasurement(name) -} - -// ValidPrecision checks if the precision is known. -func ValidPrecision(precision string) bool { - switch precision { - case "ns", "us", "ms", "s": - return true - default: - return false - } -} - -// ParsePointsWithPrecision is similar to ParsePoints, but allows the -// caller to provide a precision for time. -// -// NOTE: to minimize heap allocations, the returned Points will refer to subslices of buf. -// This can have the unintended effect preventing buf from being garbage collected. -func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) { - points := make([]Point, 0, bytes.Count(buf, []byte{'\n'})+1) - var ( - pos int - block []byte - failed []string - ) - for pos < len(buf) { - pos, block = scanLine(buf, pos) - pos++ - - if len(block) == 0 { - continue - } - - start := skipWhitespace(block, 0) - - // If line is all whitespace, just skip it - if start >= len(block) { - continue - } - - // lines which start with '#' are comments - if block[start] == '#' { - continue - } - - // strip the newline if one is present - if block[len(block)-1] == '\n' { - block = block[:len(block)-1] - } - - pt, err := parsePoint(block[start:], defaultTime, precision) - if err != nil { - failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:]), err)) - } else { - points = append(points, pt) - } - - } - if len(failed) > 0 { - return points, fmt.Errorf("%s", strings.Join(failed, "\n")) - } - return points, nil - -} - -func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) { - // scan the first block which is measurement[,tag1=value1,tag2=value2...] - pos, key, err := scanKey(buf, 0) - if err != nil { - return nil, err - } - - // measurement name is required - if len(key) == 0 { - return nil, fmt.Errorf("missing measurement") - } - - if len(key) > MaxKeyLength { - return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength) - } - - // scan the second block is which is field1=value1[,field2=value2,...] - pos, fields, err := scanFields(buf, pos) - if err != nil { - return nil, err - } - - // at least one field is required - if len(fields) == 0 { - return nil, fmt.Errorf("missing fields") - } - - var maxKeyErr error - err = walkFields(fields, func(k, v []byte) bool { - if sz := seriesKeySize(key, k); sz > MaxKeyLength { - maxKeyErr = fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength) - return false - } - return true - }) - - if err != nil { - return nil, err - } - - if maxKeyErr != nil { - return nil, maxKeyErr - } - - // scan the last block which is an optional integer timestamp - pos, ts, err := scanTime(buf, pos) - if err != nil { - return nil, err - } - - pt := &point{ - key: key, - fields: fields, - ts: ts, - } - - if len(ts) == 0 { - pt.time = defaultTime - pt.SetPrecision(precision) - } else { - ts, err := parseIntBytes(ts, 10, 64) - if err != nil { - return nil, err - } - pt.time, err = SafeCalcTime(ts, precision) - if err != nil { - return nil, err - } - - // Determine if there are illegal non-whitespace characters after the - // timestamp block. - for pos < len(buf) { - if buf[pos] != ' ' { - return nil, ErrInvalidPoint - } - pos++ - } - } - return pt, nil -} - -// GetPrecisionMultiplier will return a multiplier for the precision specified. -func GetPrecisionMultiplier(precision string) int64 { - d := time.Nanosecond - switch precision { - case "us": - d = time.Microsecond - case "ms": - d = time.Millisecond - case "s": - d = time.Second - } - return int64(d) -} - -// scanKey scans buf starting at i for the measurement and tag portion of the point. -// It returns the ending position and the byte slice of key within buf. If there -// are tags, they will be sorted if they are not already. -func scanKey(buf []byte, i int) (int, []byte, error) { - start := skipWhitespace(buf, i) - - i = start - - // Determines whether the tags are sort, assume they are - sorted := true - - // indices holds the indexes within buf of the start of each tag. For example, - // a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20] - // which indicates that the first tag starts at buf[4], seconds at buf[11], and - // last at buf[20] - indices := make([]int, 100) - - // tracks how many commas we've seen so we know how many values are indices. - // Since indices is an arbitrarily large slice, - // we need to know how many values in the buffer are in use. - commas := 0 - - // First scan the Point's measurement. - state, i, err := scanMeasurement(buf, i) - if err != nil { - return i, buf[start:i], err - } - - // Optionally scan tags if needed. - if state == tagKeyState { - i, commas, indices, err = scanTags(buf, i, indices) - if err != nil { - return i, buf[start:i], err - } - } - - // Iterate over tags keys ensure that we do not encounter any - // of the reserved tag keys such as _measurement or _field. - for j := 0; j < commas; j++ { - _, key := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=') - - for _, reserved := range reservedTagKeys { - if bytes.Equal(key, reserved) { - return i, buf[start:i], fmt.Errorf("cannot use reserved tag key %q", key) - } - } - } - - // Now we know where the key region is within buf, and the location of tags, we - // need to determine if duplicate tags exist and if the tags are sorted. This iterates - // over the list comparing each tag in the sequence with each other. - for j := 0; j < commas-1; j++ { - // get the left and right tags - _, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=') - _, right := scanTo(buf[indices[j+1]:indices[j+2]-1], 0, '=') - - // If left is greater than right, the tags are not sorted. We do not have to - // continue because the short path no longer works. - // If the tags are equal, then there are duplicate tags, and we should abort. - // If the tags are not sorted, this pass may not find duplicate tags and we - // need to do a more exhaustive search later. - if cmp := bytes.Compare(left, right); cmp > 0 { - sorted = false - break - } else if cmp == 0 { - return i, buf[start:i], fmt.Errorf("duplicate tags") - } - } - - // If the tags are not sorted, then sort them. This sort is inline and - // uses the tag indices we created earlier. The actual buffer is not sorted, the - // indices are using the buffer for value comparison. After the indices are sorted, - // the buffer is reconstructed from the sorted indices. - if !sorted && commas > 0 { - // Get the measurement name for later - measurement := buf[start : indices[0]-1] - - // Sort the indices - indices := indices[:commas] - insertionSort(0, commas, buf, indices) - - // Create a new key using the measurement and sorted indices - b := make([]byte, len(buf[start:i])) - pos := copy(b, measurement) - for _, i := range indices { - b[pos] = ',' - pos++ - _, v := scanToSpaceOr(buf, i, ',') - pos += copy(b[pos:], v) - } - - // Check again for duplicate tags now that the tags are sorted. - for j := 0; j < commas-1; j++ { - // get the left and right tags - _, left := scanTo(buf[indices[j]:], 0, '=') - _, right := scanTo(buf[indices[j+1]:], 0, '=') - - // If the tags are equal, then there are duplicate tags, and we should abort. - // If the tags are not sorted, this pass may not find duplicate tags and we - // need to do a more exhaustive search later. - if bytes.Equal(left, right) { - return i, b, fmt.Errorf("duplicate tags") - } - } - - return i, b, nil - } - - return i, buf[start:i], nil -} - -// The following constants allow us to specify which state to move to -// next, when scanning sections of a Point. -const ( - tagKeyState = iota - tagValueState - fieldsState -) - -// scanMeasurement examines the measurement part of a Point, returning -// the next state to move to, and the current location in the buffer. -func scanMeasurement(buf []byte, i int) (int, int, error) { - // Check first byte of measurement, anything except a comma is fine. - // It can't be a space, since whitespace is stripped prior to this - // function call. - if i >= len(buf) || buf[i] == ',' { - return -1, i, fmt.Errorf("missing measurement") - } - - for { - i++ - if i >= len(buf) { - // cpu - return -1, i, fmt.Errorf("missing fields") - } - - if buf[i-1] == '\\' { - // Skip character (it's escaped). - continue - } - - // Unescaped comma; move onto scanning the tags. - if buf[i] == ',' { - return tagKeyState, i + 1, nil - } - - // Unescaped space; move onto scanning the fields. - if buf[i] == ' ' { - // cpu value=1.0 - return fieldsState, i, nil - } - } -} - -// scanTags examines all the tags in a Point, keeping track of and -// returning the updated indices slice, number of commas and location -// in buf where to start examining the Point fields. -func scanTags(buf []byte, i int, indices []int) (int, int, []int, error) { - var ( - err error - commas int - state = tagKeyState - ) - - for { - switch state { - case tagKeyState: - // Grow our indices slice if we have too many tags. - if commas >= len(indices) { - newIndics := make([]int, cap(indices)*2) - copy(newIndics, indices) - indices = newIndics - } - indices[commas] = i - commas++ - - i, err = scanTagsKey(buf, i) - state = tagValueState // tag value always follows a tag key - case tagValueState: - state, i, err = scanTagsValue(buf, i) - case fieldsState: - // Grow our indices slice if we had exactly enough tags to fill it - if commas >= len(indices) { - // The parser is in `fieldsState`, so there are no more - // tags. We only need 1 more entry in the slice to store - // the final entry. - newIndics := make([]int, cap(indices)+1) - copy(newIndics, indices) - indices = newIndics - } - indices[commas] = i + 1 - return i, commas, indices, nil - } - - if err != nil { - return i, commas, indices, err - } - } -} - -// scanTagsKey scans each character in a tag key. -func scanTagsKey(buf []byte, i int) (int, error) { - // First character of the key. - if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' { - // cpu,{'', ' ', ',', '='} - return i, fmt.Errorf("missing tag key") - } - - // Examine each character in the tag key until we hit an unescaped - // equals (the tag value), or we hit an error (i.e., unescaped - // space or comma). - for { - i++ - - // Either we reached the end of the buffer or we hit an - // unescaped comma or space. - if i >= len(buf) || - ((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') { - // cpu,tag{'', ' ', ','} - return i, fmt.Errorf("missing tag value") - } - - if buf[i] == '=' && buf[i-1] != '\\' { - // cpu,tag= - return i + 1, nil - } - } -} - -// scanTagsValue scans each character in a tag value. -func scanTagsValue(buf []byte, i int) (int, int, error) { - // Tag value cannot be empty. - if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' { - // cpu,tag={',', ' '} - return -1, i, fmt.Errorf("missing tag value") - } - - // Examine each character in the tag value until we hit an unescaped - // comma (move onto next tag key), an unescaped space (move onto - // fields), or we error out. - for { - i++ - if i >= len(buf) { - // cpu,tag=value - return -1, i, fmt.Errorf("missing fields") - } - - // An unescaped equals sign is an invalid tag value. - if buf[i] == '=' && buf[i-1] != '\\' { - // cpu,tag={'=', 'fo=o'} - return -1, i, fmt.Errorf("invalid tag format") - } - - if buf[i] == ',' && buf[i-1] != '\\' { - // cpu,tag=foo, - return tagKeyState, i + 1, nil - } - - // cpu,tag=foo value=1.0 - // cpu, tag=foo\= value=1.0 - if buf[i] == ' ' && buf[i-1] != '\\' { - return fieldsState, i, nil - } - } -} - -func insertionSort(l, r int, buf []byte, indices []int) { - for i := l + 1; i < r; i++ { - for j := i; j > l && less(buf, indices, j, j-1); j-- { - indices[j], indices[j-1] = indices[j-1], indices[j] - } - } -} - -func less(buf []byte, indices []int, i, j int) bool { - // This grabs the tag names for i & j, it ignores the values - _, a := scanTo(buf, indices[i], '=') - _, b := scanTo(buf, indices[j], '=') - return bytes.Compare(a, b) < 0 -} - -// scanFields scans buf, starting at i for the fields section of a point. It returns -// the ending position and the byte slice of the fields within buf. -func scanFields(buf []byte, i int) (int, []byte, error) { - start := skipWhitespace(buf, i) - i = start - quoted := false - - // tracks how many '=' we've seen - equals := 0 - - // tracks how many commas we've seen - commas := 0 - - for { - // reached the end of buf? - if i >= len(buf) { - break - } - - // escaped characters? - if buf[i] == '\\' && i+1 < len(buf) { - i += 2 - continue - } - - // If the value is quoted, scan until we get to the end quote - // Only quote values in the field value since quotes are not significant - // in the field key - if buf[i] == '"' && equals > commas { - quoted = !quoted - i++ - continue - } - - // If we see an =, ensure that there is at least on char before and after it - if buf[i] == '=' && !quoted { - equals++ - - // check for "... =123" but allow "a\ =123" - if buf[i-1] == ' ' && buf[i-2] != '\\' { - return i, buf[start:i], fmt.Errorf("missing field key") - } - - // check for "...a=123,=456" but allow "a=123,a\,=456" - if buf[i-1] == ',' && buf[i-2] != '\\' { - return i, buf[start:i], fmt.Errorf("missing field key") - } - - // check for "... value=" - if i+1 >= len(buf) { - return i, buf[start:i], fmt.Errorf("missing field value") - } - - // check for "... value=,value2=..." - if buf[i+1] == ',' || buf[i+1] == ' ' { - return i, buf[start:i], fmt.Errorf("missing field value") - } - - if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' { - var err error - i, err = scanNumber(buf, i+1) - if err != nil { - return i, buf[start:i], err - } - continue - } - // If next byte is not a double-quote, the value must be a boolean - if buf[i+1] != '"' { - var err error - i, _, err = scanBoolean(buf, i+1) - if err != nil { - return i, buf[start:i], err - } - continue - } - } - - if buf[i] == ',' && !quoted { - commas++ - } - - // reached end of block? - if buf[i] == ' ' && !quoted { - break - } - i++ - } - - if quoted { - return i, buf[start:i], fmt.Errorf("unbalanced quotes") - } - - // check that all field sections had key and values (e.g. prevent "a=1,b" - if equals == 0 || commas != equals-1 { - return i, buf[start:i], fmt.Errorf("invalid field format") - } - - return i, buf[start:i], nil -} - -// scanTime scans buf, starting at i for the time section of a point. It -// returns the ending position and the byte slice of the timestamp within buf -// and error if the timestamp is not in the correct numeric format. -func scanTime(buf []byte, i int) (int, []byte, error) { - start := skipWhitespace(buf, i) - i = start - - for { - // reached the end of buf? - if i >= len(buf) { - break - } - - // Reached end of block or trailing whitespace? - if buf[i] == '\n' || buf[i] == ' ' { - break - } - - // Handle negative timestamps - if i == start && buf[i] == '-' { - i++ - continue - } - - // Timestamps should be integers, make sure they are so we don't need - // to actually parse the timestamp until needed. - if buf[i] < '0' || buf[i] > '9' { - return i, buf[start:i], fmt.Errorf("bad timestamp") - } - i++ - } - return i, buf[start:i], nil -} - -func isNumeric(b byte) bool { - return (b >= '0' && b <= '9') || b == '.' -} - -// scanNumber returns the end position within buf, start at i after -// scanning over buf for an integer, or float. It returns an -// error if a invalid number is scanned. -func scanNumber(buf []byte, i int) (int, error) { - start := i - var isInt, isUnsigned bool - - // Is negative number? - if i < len(buf) && buf[i] == '-' { - i++ - // There must be more characters now, as just '-' is illegal. - if i == len(buf) { - return i, ErrInvalidNumber - } - } - - // how many decimal points we've see - decimal := false - - // indicates the number is float in scientific notation - scientific := false - - for { - if i >= len(buf) { - break - } - - if buf[i] == ',' || buf[i] == ' ' { - break - } - - if buf[i] == 'i' && i > start && !(isInt || isUnsigned) { - isInt = true - i++ - continue - } else if buf[i] == 'u' && i > start && !(isInt || isUnsigned) { - isUnsigned = true - i++ - continue - } - - if buf[i] == '.' { - // Can't have more than 1 decimal (e.g. 1.1.1 should fail) - if decimal { - return i, ErrInvalidNumber - } - decimal = true - } - - // `e` is valid for floats but not as the first char - if i > start && (buf[i] == 'e' || buf[i] == 'E') { - scientific = true - i++ - continue - } - - // + and - are only valid at this point if they follow an e (scientific notation) - if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') { - i++ - continue - } - - // NaN is an unsupported value - if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') { - return i, ErrInvalidNumber - } - - if !isNumeric(buf[i]) { - return i, ErrInvalidNumber - } - i++ - } - - if (isInt || isUnsigned) && (decimal || scientific) { - return i, ErrInvalidNumber - } - - numericDigits := i - start - if isInt { - numericDigits-- - } - if decimal { - numericDigits-- - } - if buf[start] == '-' { - numericDigits-- - } - - if numericDigits == 0 { - return i, ErrInvalidNumber - } - - // It's more common that numbers will be within min/max range for their type but we need to prevent - // out or range numbers from being parsed successfully. This uses some simple heuristics to decide - // if we should parse the number to the actual type. It does not do it all the time because it incurs - // extra allocations and we end up converting the type again when writing points to disk. - if isInt { - // Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid) - if buf[i-1] != 'i' { - return i, ErrInvalidNumber - } - // Parse the int to check bounds the number of digits could be larger than the max range - // We subtract 1 from the index to remove the `i` from our tests - if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits { - if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil { - return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err) - } - } - } else if isUnsigned { - // Make sure the last char is a 'u' for unsigned - if buf[i-1] != 'u' { - return i, ErrInvalidNumber - } - // Make sure the first char is not a '-' for unsigned - if buf[start] == '-' { - return i, ErrInvalidNumber - } - // Parse the uint to check bounds the number of digits could be larger than the max range - // We subtract 1 from the index to remove the `u` from our tests - if len(buf[start:i-1]) >= maxUint64Digits { - if _, err := parseUintBytes(buf[start:i-1], 10, 64); err != nil { - return i, fmt.Errorf("unable to parse unsigned %s: %s", buf[start:i-1], err) - } - } - } else { - // Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range - if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits { - if _, err := parseFloatBytes(buf[start:i], 64); err != nil { - return i, fmt.Errorf("invalid float") - } - } - } - - return i, nil -} - -// scanBoolean returns the end position within buf, start at i after -// scanning over buf for boolean. Valid values for a boolean are -// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean -// is scanned. -func scanBoolean(buf []byte, i int) (int, []byte, error) { - start := i - - if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') { - return i, buf[start:i], fmt.Errorf("invalid boolean") - } - - i++ - for { - if i >= len(buf) { - break - } - - if buf[i] == ',' || buf[i] == ' ' { - break - } - i++ - } - - // Single char bool (t, T, f, F) is ok - if i-start == 1 { - return i, buf[start:i], nil - } - - // length must be 4 for true or TRUE - if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 { - return i, buf[start:i], fmt.Errorf("invalid boolean") - } - - // length must be 5 for false or FALSE - if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 { - return i, buf[start:i], fmt.Errorf("invalid boolean") - } - - // Otherwise - valid := false - switch buf[start] { - case 't': - valid = bytes.Equal(buf[start:i], []byte("true")) - case 'f': - valid = bytes.Equal(buf[start:i], []byte("false")) - case 'T': - valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True")) - case 'F': - valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False")) - } - - if !valid { - return i, buf[start:i], fmt.Errorf("invalid boolean") - } - - return i, buf[start:i], nil - -} - -// skipWhitespace returns the end position within buf, starting at i after -// scanning over spaces in tags. -func skipWhitespace(buf []byte, i int) int { - for i < len(buf) { - if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 { - break - } - i++ - } - return i -} - -// scanLine returns the end position in buf and the next line found within -// buf. -func scanLine(buf []byte, i int) (int, []byte) { - start := i - quoted := false - fields := false - - // tracks how many '=' and commas we've seen - // this duplicates some of the functionality in scanFields - equals := 0 - commas := 0 - for { - // reached the end of buf? - if i >= len(buf) { - break - } - - // skip past escaped characters - if buf[i] == '\\' && i+2 < len(buf) { - i += 2 - continue - } - - if buf[i] == ' ' { - fields = true - } - - // If we see a double quote, makes sure it is not escaped - if fields { - if !quoted && buf[i] == '=' { - i++ - equals++ - continue - } else if !quoted && buf[i] == ',' { - i++ - commas++ - continue - } else if buf[i] == '"' && equals > commas { - i++ - quoted = !quoted - continue - } - } - - if buf[i] == '\n' && !quoted { - break - } - - i++ - } - - return i, buf[start:i] -} - -// scanTo returns the end position in buf and the next consecutive block -// of bytes, starting from i and ending with stop byte, where stop byte -// has not been escaped. -// -// If there are leading spaces, they are skipped. -func scanTo(buf []byte, i int, stop byte) (int, []byte) { - start := i - for { - // reached the end of buf? - if i >= len(buf) { - break - } - - // Reached unescaped stop value? - if buf[i] == stop && (i == 0 || buf[i-1] != '\\') { - break - } - i++ - } - - return i, buf[start:i] -} - -// scanTo returns the end position in buf and the next consecutive block -// of bytes, starting from i and ending with stop byte. If there are leading -// spaces, they are skipped. -func scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) { - start := i - if buf[i] == stop || buf[i] == ' ' { - return i, buf[start:i] - } - - for { - i++ - if buf[i-1] == '\\' { - continue - } - - // reached the end of buf? - if i >= len(buf) { - return i, buf[start:i] - } - - // reached end of block? - if buf[i] == stop || buf[i] == ' ' { - return i, buf[start:i] - } - } -} - -func scanTagValue(buf []byte, i int) (int, []byte) { - start := i - for { - if i >= len(buf) { - break - } - - if buf[i] == ',' && buf[i-1] != '\\' { - break - } - i++ - } - if i > len(buf) { - return i, nil - } - return i, buf[start:i] -} - -func scanFieldValue(buf []byte, i int) (int, []byte) { - start := i - quoted := false - for i < len(buf) { - // Only escape char for a field value is a double-quote and backslash - if buf[i] == '\\' && i+1 < len(buf) && (buf[i+1] == '"' || buf[i+1] == '\\') { - i += 2 - continue - } - - // Quoted value? (e.g. string) - if buf[i] == '"' { - i++ - quoted = !quoted - continue - } - - if buf[i] == ',' && !quoted { - break - } - i++ - } - return i, buf[start:i] -} - -func EscapeMeasurement(in []byte) []byte { - for _, c := range measurementEscapeCodes { - if bytes.IndexByte(in, c.k[0]) != -1 { - in = bytes.Replace(in, c.k[:], c.esc[:], -1) - } - } - return in -} - -func unescapeMeasurement(in []byte) []byte { - if bytes.IndexByte(in, '\\') == -1 { - return in - } - - for i := range measurementEscapeCodes { - c := &measurementEscapeCodes[i] - if bytes.IndexByte(in, c.k[0]) != -1 { - in = bytes.Replace(in, c.esc[:], c.k[:], -1) - } - } - return in -} - -func escapeTag(in []byte) []byte { - for i := range tagEscapeCodes { - c := &tagEscapeCodes[i] - if bytes.IndexByte(in, c.k[0]) != -1 { - in = bytes.Replace(in, c.k[:], c.esc[:], -1) - } - } - return in -} - -func unescapeTag(in []byte) []byte { - if bytes.IndexByte(in, '\\') == -1 { - return in - } - - for i := range tagEscapeCodes { - c := &tagEscapeCodes[i] - if bytes.IndexByte(in, c.k[0]) != -1 { - in = bytes.Replace(in, c.esc[:], c.k[:], -1) - } - } - return in -} - -// escapeStringFieldReplacer replaces double quotes and backslashes -// with the same character preceded by a backslash. -// As of Go 1.7 this benchmarked better in allocations and CPU time -// compared to iterating through a string byte-by-byte and appending to a new byte slice, -// calling strings.Replace twice, and better than (*Regex).ReplaceAllString. -var escapeStringFieldReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`) - -// EscapeStringField returns a copy of in with any double quotes or -// backslashes with escaped values. -func EscapeStringField(in string) string { - return escapeStringFieldReplacer.Replace(in) -} - -// unescapeStringField returns a copy of in with any escaped double-quotes -// or backslashes unescaped. -func unescapeStringField(in string) string { - if strings.IndexByte(in, '\\') == -1 { - return in - } - - var out []byte - i := 0 - for { - if i >= len(in) { - break - } - // unescape backslashes - if in[i] == '\\' && i+1 < len(in) && in[i+1] == '\\' { - out = append(out, '\\') - i += 2 - continue - } - // unescape double-quotes - if in[i] == '\\' && i+1 < len(in) && in[i+1] == '"' { - out = append(out, '"') - i += 2 - continue - } - out = append(out, in[i]) - i++ - - } - return string(out) -} - -// NewPoint returns a new point with the given measurement name, tags, fields and timestamp. If -// an unsupported field value (NaN, or +/-Inf) or out of range time is passed, this function -// returns an error. -func NewPoint(name string, tags Tags, fields Fields, t time.Time) (Point, error) { - key, err := pointKey(name, tags, fields, t) - if err != nil { - return nil, err - } - - return &point{ - key: key, - time: t, - fields: fields.MarshalBinary(), - }, nil -} - -// pointKey checks some basic requirements for valid points, and returns the -// key, along with an possible error. -func pointKey(measurement string, tags Tags, fields Fields, t time.Time) ([]byte, error) { - if len(fields) == 0 { - return nil, ErrPointMustHaveAField - } - - if !t.IsZero() { - if err := CheckTime(t); err != nil { - return nil, err - } - } - - for key, value := range fields { - switch value := value.(type) { - case float64: - // Ensure the caller validates and handles invalid field values - if math.IsInf(value, 0) { - return nil, fmt.Errorf("+/-Inf is an unsupported value for field %s", key) - } - if math.IsNaN(value) { - return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) - } - case float32: - // Ensure the caller validates and handles invalid field values - if math.IsInf(float64(value), 0) { - return nil, fmt.Errorf("+/-Inf is an unsupported value for field %s", key) - } - if math.IsNaN(float64(value)) { - return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) - } - } - if len(key) == 0 { - return nil, fmt.Errorf("all fields must have non-empty names") - } - } - - key := MakeKey([]byte(measurement), tags) - for field := range fields { - sz := seriesKeySize(key, []byte(field)) - if sz > MaxKeyLength { - return nil, fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength) - } - } - - return key, nil -} - -func seriesKeySize(key, field []byte) int { - // 4 is the length of the tsm1.fieldKeySeparator constant. It's inlined here to avoid a circular - // dependency. - return len(key) + 4 + len(field) -} - -// NewPointFromBytes returns a new Point from a marshalled Point. -func NewPointFromBytes(b []byte) (Point, error) { - p := &point{} - if err := p.UnmarshalBinary(b); err != nil { - return nil, err - } - - // This does some basic validation to ensure there are fields and they - // can be unmarshalled as well. - iter := p.FieldIterator() - var hasField bool - for iter.Next() { - if len(iter.FieldKey()) == 0 { - continue - } - hasField = true - switch iter.Type() { - case Float: - _, err := iter.FloatValue() - if err != nil { - return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) - } - case Integer: - _, err := iter.IntegerValue() - if err != nil { - return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) - } - case Unsigned: - _, err := iter.UnsignedValue() - if err != nil { - return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) - } - case String: - // Skip since this won't return an error - case Boolean: - _, err := iter.BooleanValue() - if err != nil { - return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) - } - } - } - - if !hasField { - return nil, ErrPointMustHaveAField - } - - return p, nil -} - -// MustNewPoint returns a new point with the given measurement name, tags, fields and timestamp. If -// an unsupported field value (NaN) is passed, this function panics. -func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point { - pt, err := NewPoint(name, tags, fields, time) - if err != nil { - panic(err.Error()) - } - return pt -} - -// Key returns the key (measurement joined with tags) of the point. -func (p *point) Key() []byte { - return p.key -} - -func (p *point) name() []byte { - _, name := scanTo(p.key, 0, ',') - return name -} - -func (p *point) Name() []byte { - return escape.Unescape(p.name()) -} - -// SetName updates the measurement name for the point. -func (p *point) SetName(name string) { - p.cachedName = "" - p.key = MakeKey([]byte(name), p.Tags()) -} - -// Time return the timestamp for the point. -func (p *point) Time() time.Time { - return p.time -} - -// SetTime updates the timestamp for the point. -func (p *point) SetTime(t time.Time) { - p.time = t -} - -// Round will round the timestamp of the point to the given duration. -func (p *point) Round(d time.Duration) { - p.time = p.time.Round(d) -} - -// Tags returns the tag set for the point. -func (p *point) Tags() Tags { - if p.cachedTags != nil { - return p.cachedTags - } - p.cachedTags = parseTags(p.key, nil) - return p.cachedTags -} - -func (p *point) ForEachTag(fn func(k, v []byte) bool) { - walkTags(p.key, fn) -} - -func (p *point) HasTag(tag []byte) bool { - if len(p.key) == 0 { - return false - } - - var exists bool - walkTags(p.key, func(key, value []byte) bool { - if bytes.Equal(tag, key) { - exists = true - return false - } - return true - }) - - return exists -} - -func walkTags(buf []byte, fn func(key, value []byte) bool) { - if len(buf) == 0 { - return - } - - pos, name := scanTo(buf, 0, ',') - - // it's an empty key, so there are no tags - if len(name) == 0 { - return - } - - hasEscape := bytes.IndexByte(buf, '\\') != -1 - i := pos + 1 - var key, value []byte - for { - if i >= len(buf) { - break - } - i, key = scanTo(buf, i, '=') - i, value = scanTagValue(buf, i+1) - - if len(value) == 0 { - continue - } - - if hasEscape { - if !fn(unescapeTag(key), unescapeTag(value)) { - return - } - } else { - if !fn(key, value) { - return - } - } - - i++ - } -} - -// walkFields walks each field key and value via fn. If fn returns false, the iteration -// is stopped. The values are the raw byte slices and not the converted types. -func walkFields(buf []byte, fn func(key, value []byte) bool) error { - var i int - var key, val []byte - for len(buf) > 0 { - i, key = scanTo(buf, 0, '=') - if i > len(buf)-2 { - return fmt.Errorf("invalid value: field-key=%s", key) - } - buf = buf[i+1:] - i, val = scanFieldValue(buf, 0) - buf = buf[i:] - if !fn(key, val) { - break - } - - // slice off comma - if len(buf) > 0 { - buf = buf[1:] - } - } - return nil -} - -// parseTags parses buf into the provided destination tags, returning destination -// Tags, which may have a different length and capacity. -func parseTags(buf []byte, dst Tags) Tags { - if len(buf) == 0 { - return nil - } - - n := bytes.Count(buf, []byte(",")) - if cap(dst) < n { - dst = make(Tags, n) - } else { - dst = dst[:n] - } - - // Ensure existing behaviour when point has no tags and nil slice passed in. - if dst == nil { - dst = Tags{} - } - - // Series keys can contain escaped commas, therefore the number of commas - // in a series key only gives an estimation of the upper bound on the number - // of tags. - var i int - walkTags(buf, func(key, value []byte) bool { - dst[i].Key, dst[i].Value = key, value - i++ - return true - }) - return dst[:i] -} - -// MakeKey creates a key for a set of tags. -func MakeKey(name []byte, tags Tags) []byte { - return AppendMakeKey(nil, name, tags) -} - -// AppendMakeKey appends the key derived from name and tags to dst and returns the extended buffer. -func AppendMakeKey(dst []byte, name []byte, tags Tags) []byte { - // unescape the name and then re-escape it to avoid double escaping. - // The key should always be stored in escaped form. - dst = append(dst, EscapeMeasurement(unescapeMeasurement(name))...) - dst = tags.AppendHashKey(dst) - return dst -} - -// SetTags replaces the tags for the point. -func (p *point) SetTags(tags Tags) { - p.key = MakeKey(p.Name(), tags) - p.cachedTags = tags -} - -// AddTag adds or replaces a tag value for a point. -func (p *point) AddTag(key, value string) { - tags := p.Tags() - tags = append(tags, Tag{Key: []byte(key), Value: []byte(value)}) - sort.Sort(tags) - p.cachedTags = tags - p.key = MakeKey(p.Name(), tags) -} - -// Fields returns the fields for the point. -func (p *point) Fields() (Fields, error) { - if p.cachedFields != nil { - return p.cachedFields, nil - } - cf, err := p.unmarshalBinary() - if err != nil { - return nil, err - } - p.cachedFields = cf - return p.cachedFields, nil -} - -// SetPrecision will round a time to the specified precision. -func (p *point) SetPrecision(precision string) { - switch precision { - case "n", "ns": - case "u", "us": - p.SetTime(p.Time().Truncate(time.Microsecond)) - case "ms": - p.SetTime(p.Time().Truncate(time.Millisecond)) - case "s": - p.SetTime(p.Time().Truncate(time.Second)) - case "m": - p.SetTime(p.Time().Truncate(time.Minute)) - case "h": - p.SetTime(p.Time().Truncate(time.Hour)) - } -} - -// String returns the string representation of the point. -func (p *point) String() string { - if p.Time().IsZero() { - return string(p.Key()) + " " + string(p.fields) - } - return string(p.Key()) + " " + string(p.fields) + " " + strconv.FormatInt(p.UnixNano(), 10) -} - -// AppendString appends the string representation of the point to buf. -func (p *point) AppendString(buf []byte) []byte { - buf = append(buf, p.key...) - buf = append(buf, ' ') - buf = append(buf, p.fields...) - - if !p.time.IsZero() { - buf = append(buf, ' ') - buf = strconv.AppendInt(buf, p.UnixNano(), 10) - } - - return buf -} - -// StringSize returns the length of the string that would be returned by String(). -func (p *point) StringSize() int { - size := len(p.key) + len(p.fields) + 1 - - if !p.time.IsZero() { - digits := 1 // even "0" has one digit - t := p.UnixNano() - if t < 0 { - // account for negative sign, then negate - digits++ - t = -t - } - for t > 9 { // already accounted for one digit - digits++ - t /= 10 - } - size += digits + 1 // digits and a space - } - - return size -} - -// MarshalBinary returns a binary representation of the point. -func (p *point) MarshalBinary() ([]byte, error) { - if len(p.fields) == 0 { - return nil, ErrPointMustHaveAField - } - - tb, err := p.time.MarshalBinary() - if err != nil { - return nil, err - } - - b := make([]byte, 8+len(p.key)+len(p.fields)+len(tb)) - i := 0 - - binary.BigEndian.PutUint32(b[i:], uint32(len(p.key))) - i += 4 - - i += copy(b[i:], p.key) - - binary.BigEndian.PutUint32(b[i:i+4], uint32(len(p.fields))) - i += 4 - - i += copy(b[i:], p.fields) - - copy(b[i:], tb) - return b, nil -} - -// UnmarshalBinary decodes a binary representation of the point into a point struct. -func (p *point) UnmarshalBinary(b []byte) error { - var n int - - // Read key length. - if len(b) < 4 { - return io.ErrShortBuffer - } - n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:] - - // Read key. - if len(b) < n { - return io.ErrShortBuffer - } - p.key, b = b[:n], b[n:] - - // Read fields length. - if len(b) < 4 { - return io.ErrShortBuffer - } - n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:] - - // Read fields. - if len(b) < n { - return io.ErrShortBuffer - } - p.fields, b = b[:n], b[n:] - - // Read timestamp. - return p.time.UnmarshalBinary(b) -} - -// PrecisionString returns a string representation of the point. If there -// is a timestamp associated with the point then it will be specified in the -// given unit. -func (p *point) PrecisionString(precision string) string { - if p.Time().IsZero() { - return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) - } - return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), - p.UnixNano()/GetPrecisionMultiplier(precision)) -} - -// RoundedString returns a string representation of the point. If there -// is a timestamp associated with the point, then it will be rounded to the -// given duration. -func (p *point) RoundedString(d time.Duration) string { - if p.Time().IsZero() { - return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) - } - return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), - p.time.Round(d).UnixNano()) -} - -func (p *point) unmarshalBinary() (Fields, error) { - iter := p.FieldIterator() - fields := make(Fields, 8) - for iter.Next() { - if len(iter.FieldKey()) == 0 { - continue - } - switch iter.Type() { - case Float: - v, err := iter.FloatValue() - if err != nil { - return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) - } - fields[string(iter.FieldKey())] = v - case Integer: - v, err := iter.IntegerValue() - if err != nil { - return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) - } - fields[string(iter.FieldKey())] = v - case Unsigned: - v, err := iter.UnsignedValue() - if err != nil { - return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) - } - fields[string(iter.FieldKey())] = v - case String: - fields[string(iter.FieldKey())] = iter.StringValue() - case Boolean: - v, err := iter.BooleanValue() - if err != nil { - return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) - } - fields[string(iter.FieldKey())] = v - } - } - return fields, nil -} - -// HashID returns a non-cryptographic checksum of the point's key. -func (p *point) HashID() uint64 { - h := NewInlineFNV64a() - h.Write(p.key) - sum := h.Sum64() - return sum -} - -// UnixNano returns the timestamp of the point as nanoseconds since Unix epoch. -func (p *point) UnixNano() int64 { - return p.Time().UnixNano() -} - -// Split will attempt to return multiple points with the same timestamp whose -// string representations are no longer than size. Points with a single field or -// a point without a timestamp may exceed the requested size. -func (p *point) Split(size int) []Point { - if p.time.IsZero() || p.StringSize() <= size { - return []Point{p} - } - - // key string, timestamp string, spaces - size -= len(p.key) + len(strconv.FormatInt(p.time.UnixNano(), 10)) + 2 - - var points []Point - var start, cur int - - for cur < len(p.fields) { - end, _ := scanTo(p.fields, cur, '=') - end, _ = scanFieldValue(p.fields, end+1) - - if cur > start && end-start > size { - points = append(points, &point{ - key: p.key, - time: p.time, - fields: p.fields[start : cur-1], - }) - start = cur - } - - cur = end + 1 - } - - points = append(points, &point{ - key: p.key, - time: p.time, - fields: p.fields[start:], - }) - - return points -} - -// Tag represents a single key/value tag pair. -type Tag struct { - Key []byte - Value []byte -} - -// NewTag returns a new Tag. -func NewTag(key, value []byte) Tag { - return Tag{ - Key: key, - Value: value, - } -} - -// Size returns the size of the key and value. -func (t Tag) Size() int { return len(t.Key) + len(t.Value) } - -// Clone returns a shallow copy of Tag. -// -// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed. -// Use Clone to create a Tag with new byte slices that do not refer to the argument to ParsePointsWithPrecision. -func (t Tag) Clone() Tag { - other := Tag{ - Key: make([]byte, len(t.Key)), - Value: make([]byte, len(t.Value)), - } - - copy(other.Key, t.Key) - copy(other.Value, t.Value) - - return other -} - -// String returns the string reprsentation of the tag. -func (t *Tag) String() string { - var buf bytes.Buffer - buf.WriteByte('{') - buf.WriteString(string(t.Key)) - buf.WriteByte(' ') - buf.WriteString(string(t.Value)) - buf.WriteByte('}') - return buf.String() -} - -// Tags represents a sorted list of tags. -type Tags []Tag - -// NewTags returns a new Tags from a map. -func NewTags(m map[string]string) Tags { - if len(m) == 0 { - return nil - } - a := make(Tags, 0, len(m)) - for k, v := range m { - a = append(a, NewTag([]byte(k), []byte(v))) - } - sort.Sort(a) - return a -} - -// NewTagsKeyValues returns a new Tags from a list of key, value pairs, -// ensuring the returned result is correctly sorted. Duplicate keys are removed, -// however, it which duplicate that remains is undefined. -// NewTagsKeyValues will return ErrInvalidKevValuePairs if len(kvs) is not even. -// If the input is guaranteed to be even, the error can be safely ignored. -// If a has enough capacity, it will be reused. -func NewTagsKeyValues(a Tags, kv ...[]byte) (Tags, error) { - if len(kv)%2 == 1 { - return nil, ErrInvalidKevValuePairs - } - if len(kv) == 0 { - return nil, nil - } - - l := len(kv) / 2 - if cap(a) < l { - a = make(Tags, 0, l) - } else { - a = a[:0] - } - - for i := 0; i < len(kv)-1; i += 2 { - a = append(a, NewTag(kv[i], kv[i+1])) - } - - if !a.sorted() { - sort.Sort(a) - } - - // remove duplicates - j := 0 - for i := 0; i < len(a)-1; i++ { - if !bytes.Equal(a[i].Key, a[i+1].Key) { - if j != i { - // only copy if j has deviated from i, indicating duplicates - a[j] = a[i] - } - j++ - } - } - - a[j] = a[len(a)-1] - j++ - - return a[:j], nil -} - -// NewTagsKeyValuesStrings is equivalent to NewTagsKeyValues, except that -// it will allocate new byte slices for each key, value pair. -func NewTagsKeyValuesStrings(a Tags, kvs ...string) (Tags, error) { - kv := make([][]byte, len(kvs)) - for i := range kvs { - kv[i] = []byte(kvs[i]) - } - return NewTagsKeyValues(a, kv...) -} - -// Keys returns the list of keys for a tag set. -func (a Tags) Keys() []string { - if len(a) == 0 { - return nil - } - keys := make([]string, len(a)) - for i, tag := range a { - keys[i] = string(tag.Key) - } - return keys -} - -// Values returns the list of values for a tag set. -func (a Tags) Values() []string { - if len(a) == 0 { - return nil - } - values := make([]string, len(a)) - for i, tag := range a { - values[i] = string(tag.Value) - } - return values -} - -// String returns the string representation of the tags. -func (a Tags) String() string { - var buf bytes.Buffer - buf.WriteByte('[') - for i := range a { - buf.WriteString(a[i].String()) - if i < len(a)-1 { - buf.WriteByte(' ') - } - } - buf.WriteByte(']') - return buf.String() -} - -// Size returns the number of bytes needed to store all tags. Note, this is -// the number of bytes needed to store all keys and values and does not account -// for data structures or delimiters for example. -func (a Tags) Size() int { - var total int - for i := range a { - total += a[i].Size() - } - return total -} - -// Clone returns a copy of the slice where the elements are a result of calling `Clone` on the original elements -// -// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed. -// Use Clone to create Tags with new byte slices that do not refer to the argument to ParsePointsWithPrecision. -func (a Tags) Clone() Tags { - if len(a) == 0 { - return nil - } - - others := make(Tags, len(a)) - for i := range a { - others[i] = a[i].Clone() - } - - return others -} - -// KeyValues returns the Tags as a list of key, value pairs, -// maintaining the original order of a. v will be used if it has -// capacity. -func (a Tags) KeyValues(v [][]byte) [][]byte { - l := a.Len() * 2 - if cap(v) < l { - v = make([][]byte, 0, l) - } else { - v = v[:0] - } - for i := range a { - v = append(v, a[i].Key, a[i].Value) - } - return v -} - -// sorted returns true if a is sorted and is an optimization -// to avoid an allocation when calling sort.IsSorted, improving -// performance as much as 50%. -func (a Tags) sorted() bool { - for i := len(a) - 1; i > 0; i-- { - if bytes.Compare(a[i].Key, a[i-1].Key) == -1 { - return false - } - } - return true -} - -func (a Tags) Len() int { return len(a) } -func (a Tags) Less(i, j int) bool { return bytes.Compare(a[i].Key, a[j].Key) == -1 } -func (a Tags) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// Equal returns true if a equals other. -func (a Tags) Equal(other Tags) bool { - if len(a) != len(other) { - return false - } - for i := range a { - if !bytes.Equal(a[i].Key, other[i].Key) || !bytes.Equal(a[i].Value, other[i].Value) { - return false - } - } - return true -} - -// CompareTags returns -1 if a < b, 1 if a > b, and 0 if a == b. -func CompareTags(a, b Tags) int { - // Compare each key & value until a mismatch. - for i := 0; i < len(a) && i < len(b); i++ { - if cmp := bytes.Compare(a[i].Key, b[i].Key); cmp != 0 { - return cmp - } - if cmp := bytes.Compare(a[i].Value, b[i].Value); cmp != 0 { - return cmp - } - } - - // If all tags are equal up to this point then return shorter tagset. - if len(a) < len(b) { - return -1 - } else if len(a) > len(b) { - return 1 - } - - // All tags are equal. - return 0 -} - -// Get returns the value for a key. -func (a Tags) Get(key []byte) []byte { - // OPTIMIZE: Use sort.Search if tagset is large. - - for _, t := range a { - if bytes.Equal(t.Key, key) { - return t.Value - } - } - return nil -} - -// GetString returns the string value for a string key. -func (a Tags) GetString(key string) string { - return string(a.Get([]byte(key))) -} - -// Set sets the value for a key. -func (a *Tags) Set(key, value []byte) { - for i, t := range *a { - if bytes.Equal(t.Key, key) { - (*a)[i].Value = value - return - } - } - *a = append(*a, Tag{Key: key, Value: value}) - sort.Sort(*a) -} - -// SetString sets the string value for a string key. -func (a *Tags) SetString(key, value string) { - a.Set([]byte(key), []byte(value)) -} - -// Delete removes a tag by key. -func (a *Tags) Delete(key []byte) { - for i, t := range *a { - if bytes.Equal(t.Key, key) { - copy((*a)[i:], (*a)[i+1:]) - (*a)[len(*a)-1] = Tag{} - *a = (*a)[:len(*a)-1] - return - } - } -} - -// Map returns a map representation of the tags. -func (a Tags) Map() map[string]string { - m := make(map[string]string, len(a)) - for _, t := range a { - m[string(t.Key)] = string(t.Value) - } - return m -} - -// Merge merges the tags combining the two. If both define a tag with the -// same key, the merged value overwrites the old value. -// A new map is returned. -func (a Tags) Merge(other map[string]string) Tags { - merged := make(map[string]string, len(a)+len(other)) - for _, t := range a { - merged[string(t.Key)] = string(t.Value) - } - for k, v := range other { - merged[k] = v - } - return NewTags(merged) -} - -// HashKey hashes all of a tag's keys. -func (a Tags) HashKey() []byte { - return a.AppendHashKey(nil) -} - -func (a Tags) needsEscape() bool { - for i := range a { - t := &a[i] - for j := range tagEscapeCodes { - c := &tagEscapeCodes[j] - if bytes.IndexByte(t.Key, c.k[0]) != -1 || bytes.IndexByte(t.Value, c.k[0]) != -1 { - return true - } - } - } - return false -} - -// AppendHashKey appends the result of hashing all of a tag's keys and values to dst and returns the extended buffer. -func (a Tags) AppendHashKey(dst []byte) []byte { - // Empty maps marshal to empty bytes. - if len(a) == 0 { - return dst - } - - // Type invariant: Tags are sorted - - sz := 0 - var escaped Tags - if a.needsEscape() { - var tmp [20]Tag - if len(a) < len(tmp) { - escaped = tmp[:len(a)] - } else { - escaped = make(Tags, len(a)) - } - - for i := range a { - t := &a[i] - nt := &escaped[i] - nt.Key = escapeTag(t.Key) - nt.Value = escapeTag(t.Value) - sz += len(nt.Key) + len(nt.Value) - } - } else { - sz = a.Size() - escaped = a - } - - sz += len(escaped) + (len(escaped) * 2) // separators - - // Generate marshaled bytes. - if cap(dst)-len(dst) < sz { - nd := make([]byte, len(dst), len(dst)+sz) - copy(nd, dst) - dst = nd - } - buf := dst[len(dst) : len(dst)+sz] - idx := 0 - for i := range escaped { - k := &escaped[i] - if len(k.Value) == 0 { - continue - } - buf[idx] = ',' - idx++ - copy(buf[idx:], k.Key) - idx += len(k.Key) - buf[idx] = '=' - idx++ - copy(buf[idx:], k.Value) - idx += len(k.Value) - } - return dst[:len(dst)+idx] -} - -// CopyTags returns a shallow copy of tags. -func CopyTags(a Tags) Tags { - other := make(Tags, len(a)) - copy(other, a) - return other -} - -// DeepCopyTags returns a deep copy of tags. -func DeepCopyTags(a Tags) Tags { - // Calculate size of keys/values in bytes. - var n int - for _, t := range a { - n += len(t.Key) + len(t.Value) - } - - // Build single allocation for all key/values. - buf := make([]byte, n) - - // Copy tags to new set. - other := make(Tags, len(a)) - for i, t := range a { - copy(buf, t.Key) - other[i].Key, buf = buf[:len(t.Key)], buf[len(t.Key):] - - copy(buf, t.Value) - other[i].Value, buf = buf[:len(t.Value)], buf[len(t.Value):] - } - - return other -} - -// Fields represents a mapping between a Point's field names and their -// values. -type Fields map[string]interface{} - -// FieldIterator returns a FieldIterator that can be used to traverse the -// fields of a point without constructing the in-memory map. -func (p *point) FieldIterator() FieldIterator { - p.Reset() - return p -} - -type fieldIterator struct { - start, end int - key, keybuf []byte - valueBuf []byte - fieldType FieldType -} - -// Next indicates whether there any fields remaining. -func (p *point) Next() bool { - p.it.start = p.it.end - if p.it.start >= len(p.fields) { - return false - } - - p.it.end, p.it.key = scanTo(p.fields, p.it.start, '=') - if escape.IsEscaped(p.it.key) { - p.it.keybuf = escape.AppendUnescaped(p.it.keybuf[:0], p.it.key) - p.it.key = p.it.keybuf - } - - p.it.end, p.it.valueBuf = scanFieldValue(p.fields, p.it.end+1) - p.it.end++ - - if len(p.it.valueBuf) == 0 { - p.it.fieldType = Empty - return true - } - - c := p.it.valueBuf[0] - - if c == '"' { - p.it.fieldType = String - return true - } - - if strings.IndexByte(`0123456789-.nNiIu`, c) >= 0 { - if p.it.valueBuf[len(p.it.valueBuf)-1] == 'i' { - p.it.fieldType = Integer - p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1] - } else if p.it.valueBuf[len(p.it.valueBuf)-1] == 'u' { - p.it.fieldType = Unsigned - p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1] - } else { - p.it.fieldType = Float - } - return true - } - - // to keep the same behavior that currently exists, default to boolean - p.it.fieldType = Boolean - return true -} - -// FieldKey returns the key of the current field. -func (p *point) FieldKey() []byte { - return p.it.key -} - -// Type returns the FieldType of the current field. -func (p *point) Type() FieldType { - return p.it.fieldType -} - -// StringValue returns the string value of the current field. -func (p *point) StringValue() string { - return unescapeStringField(string(p.it.valueBuf[1 : len(p.it.valueBuf)-1])) -} - -// IntegerValue returns the integer value of the current field. -func (p *point) IntegerValue() (int64, error) { - n, err := parseIntBytes(p.it.valueBuf, 10, 64) - if err != nil { - return 0, fmt.Errorf("unable to parse integer value %q: %v", p.it.valueBuf, err) - } - return n, nil -} - -// UnsignedValue returns the unsigned value of the current field. -func (p *point) UnsignedValue() (uint64, error) { - n, err := parseUintBytes(p.it.valueBuf, 10, 64) - if err != nil { - return 0, fmt.Errorf("unable to parse unsigned value %q: %v", p.it.valueBuf, err) - } - return n, nil -} - -// BooleanValue returns the boolean value of the current field. -func (p *point) BooleanValue() (bool, error) { - b, err := parseBoolBytes(p.it.valueBuf) - if err != nil { - return false, fmt.Errorf("unable to parse bool value %q: %v", p.it.valueBuf, err) - } - return b, nil -} - -// FloatValue returns the float value of the current field. -func (p *point) FloatValue() (float64, error) { - f, err := parseFloatBytes(p.it.valueBuf, 64) - if err != nil { - return 0, fmt.Errorf("unable to parse floating point value %q: %v", p.it.valueBuf, err) - } - return f, nil -} - -// Reset resets the iterator to its initial state. -func (p *point) Reset() { - p.it.fieldType = Empty - p.it.key = nil - p.it.valueBuf = nil - p.it.start = 0 - p.it.end = 0 -} - -// MarshalBinary encodes all the fields to their proper type and returns the binary -// representation -// NOTE: uint64 is specifically not supported due to potential overflow when we decode -// again later to an int64 -// NOTE2: uint is accepted, and may be 64 bits, and is for some reason accepted... -func (p Fields) MarshalBinary() []byte { - sz := len(p) - 1 // separators - keys := make([]string, 0, len(p)) - for k := range p { - keys = append(keys, k) - sz += len(k) - } - - // Only sort if we have multiple fields to sort. - // This length check removes an allocation incurred by the sort. - if len(keys) > 1 { - sort.Strings(keys) - } - - b := make([]byte, 0, sz) - for i, k := range keys { - if i > 0 { - b = append(b, ',') - } - b = appendField(b, k, p[k]) - } - return b -} - -func appendField(b []byte, k string, v interface{}) []byte { - b = append(b, []byte(escape.String(k))...) - b = append(b, '=') - - // check popular types first - switch v := v.(type) { - case float64: - b = strconv.AppendFloat(b, v, 'f', -1, 64) - case int64: - b = strconv.AppendInt(b, v, 10) - b = append(b, 'i') - case string: - b = append(b, '"') - b = append(b, []byte(EscapeStringField(v))...) - b = append(b, '"') - case bool: - b = strconv.AppendBool(b, v) - case int32: - b = strconv.AppendInt(b, int64(v), 10) - b = append(b, 'i') - case int16: - b = strconv.AppendInt(b, int64(v), 10) - b = append(b, 'i') - case int8: - b = strconv.AppendInt(b, int64(v), 10) - b = append(b, 'i') - case int: - b = strconv.AppendInt(b, int64(v), 10) - b = append(b, 'i') - case uint64: - b = strconv.AppendUint(b, v, 10) - b = append(b, 'u') - case uint32: - b = strconv.AppendInt(b, int64(v), 10) - b = append(b, 'i') - case uint16: - b = strconv.AppendInt(b, int64(v), 10) - b = append(b, 'i') - case uint8: - b = strconv.AppendInt(b, int64(v), 10) - b = append(b, 'i') - case uint: - // TODO: 'uint' should be converted to writing as an unsigned integer, - // but we cannot since that would break backwards compatibility. - b = strconv.AppendInt(b, int64(v), 10) - b = append(b, 'i') - case float32: - b = strconv.AppendFloat(b, float64(v), 'f', -1, 32) - case []byte: - b = append(b, v...) - case nil: - // skip - default: - // Can't determine the type, so convert to string - b = append(b, '"') - b = append(b, []byte(EscapeStringField(fmt.Sprintf("%v", v)))...) - b = append(b, '"') - - } - - return b -} - -// ValidToken returns true if the provided token is a valid unicode string, and -// only contains printable, non-replacement characters. -func ValidToken(a []byte) bool { - if !utf8.Valid(a) { - return false - } - - for _, r := range string(a) { - if !unicode.IsPrint(r) || r == unicode.ReplacementChar { - return false - } - } - return true -} - -// ValidTagTokens returns true if all the provided tag key and values are -// valid. -// -// ValidTagTokens does not validate the special tag keys used to represent the -// measurement name and field key, but it does validate the associated values. -func ValidTagTokens(tags Tags) bool { - for _, tag := range tags { - // Validate all external tag keys. - if !bytes.Equal(tag.Key, MeasurementTagKeyBytes) && !bytes.Equal(tag.Key, FieldKeyTagKeyBytes) && !ValidToken(tag.Key) { - return false - } - - // Validate all tag values (this will also validate the field key, which is a tag value for the special field key tag key). - if !ValidToken(tag.Value) { - return false - } - } - return true -} - -// ValidKeyTokens returns true if the measurement name and all tags are valid. -func ValidKeyTokens(name string, tags Tags) bool { - if !ValidToken([]byte(name)) { - return false - } - - return ValidTagTokens(tags) -} - -var ( - errInvalidUTF8 = errors.New("invalid UTF-8 sequence") - errNonPrintable = errors.New("non-printable character") - errReplacementChar = fmt.Errorf("unicode replacement char %q cannot be used", unicode.ReplacementChar) -) - -// CheckToken returns an error when the given token is invalid -// for use as a tag or value key or measurement name. -func CheckToken(a []byte) error { - if !utf8.Valid(a) { - return errInvalidUTF8 - } - - for _, r := range string(a) { - if !unicode.IsPrint(r) { - return errNonPrintable - } - if r == unicode.ReplacementChar { - return errReplacementChar - } - } - return nil -} diff --git a/models/points_internal_test.go b/models/points_internal_test.go deleted file mode 100644 index 3a760d37b0e..00000000000 --- a/models/points_internal_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package models - -import "testing" - -func TestMarshalPointNoFields(t *testing.T) { - points, err := ParsePointsString("m,k=v f=0i") - if err != nil { - t.Fatal(err) - } - - // It's unclear how this can ever happen, but we've observed points that were marshalled without any fields. - points[0].(*point).fields = []byte{} - - if _, err := points[0].MarshalBinary(); err != ErrPointMustHaveAField { - t.Fatalf("got error %v, exp %v", err, ErrPointMustHaveAField) - } -} diff --git a/models/points_test.go b/models/points_test.go deleted file mode 100644 index c59856e215b..00000000000 --- a/models/points_test.go +++ /dev/null @@ -1,2609 +0,0 @@ -package models_test - -import ( - "bytes" - "errors" - "fmt" - "io" - "math" - "math/rand" - "reflect" - "strconv" - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/models" -) - -var ( - tags = models.NewTags(map[string]string{"foo": "bar", "apple": "orange", "host": "serverA", "region": "uswest"}) - fields = models.Fields{ - "int64": int64(math.MaxInt64), - "uint32": uint32(math.MaxUint32), - "string": "String field that has a decent length, probably some log message or something", - "boolean": false, - "float64-tiny": float64(math.SmallestNonzeroFloat64), - "float64-large": float64(math.MaxFloat64), - } - maxFloat64 = strconv.FormatFloat(math.MaxFloat64, 'f', 1, 64) - minFloat64 = strconv.FormatFloat(-math.MaxFloat64, 'f', 1, 64) - - sink interface{} -) - -func TestMarshal(t *testing.T) { - got := tags.HashKey() - if exp := ",apple=orange,foo=bar,host=serverA,region=uswest"; string(got) != exp { - t.Log("got: ", string(got)) - t.Log("exp: ", exp) - t.Error("invalid match") - } -} - -func TestMarshalFields(t *testing.T) { - for _, tt := range []struct { - name string - value interface{} - exp string - }{ - { - name: "Float", - value: float64(2), - exp: `value=2`, - }, - { - name: "Integer", - value: int64(2), - exp: `value=2i`, - }, - { - name: "Unsigned", - value: uint64(2), - exp: `value=2u`, - }, - { - name: "String", - value: "foobar", - exp: `value="foobar"`, - }, - { - name: "Boolean", - value: true, - exp: `value=true`, - }, - } { - t.Run(tt.name, func(t *testing.T) { - fields := map[string]interface{}{"value": tt.value} - if have, want := models.Fields(fields).MarshalBinary(), []byte(tt.exp); !bytes.Equal(have, want) { - t.Fatalf("unexpected field output: %s != %s", string(have), string(want)) - } - }) - } -} - -func TestTags_HashKey(t *testing.T) { - tags = models.NewTags(map[string]string{"A FOO": "bar", "APPLE": "orange", "host": "serverA", "region": "uswest"}) - got := tags.HashKey() - if exp := ",A\\ FOO=bar,APPLE=orange,host=serverA,region=uswest"; string(got) != exp { - t.Log("got: ", string(got)) - t.Log("exp: ", exp) - t.Error("invalid match") - } -} - -func BenchmarkMarshal(b *testing.B) { - for i := 0; i < b.N; i++ { - tags.HashKey() - } -} - -func TestPoint_Tags(t *testing.T) { - examples := []struct { - Point string - Tags models.Tags - Err error - }{ - {`cpu value=1`, models.Tags{}, nil}, - {"cpu,tag0=v0 value=1", models.NewTags(map[string]string{"tag0": "v0"}), nil}, - {"cpu,tag0=v0,tag1=v0 value=1", models.NewTags(map[string]string{"tag0": "v0", "tag1": "v0"}), nil}, - {`cpu,tag0=v\ 0 value=1`, models.NewTags(map[string]string{"tag0": "v 0"}), nil}, - {`cpu,tag0=v\ 0\ 1,tag1=v2 value=1`, models.NewTags(map[string]string{"tag0": "v 0 1", "tag1": "v2"}), nil}, - {`cpu,tag0=\, value=1`, models.NewTags(map[string]string{"tag0": ","}), nil}, - {`cpu,ta\ g0=\, value=1`, models.NewTags(map[string]string{"ta g0": ","}), nil}, - {`cpu,tag0=\,1 value=1`, models.NewTags(map[string]string{"tag0": ",1"}), nil}, - {`cpu,tag0=1\"\",t=k value=1`, models.NewTags(map[string]string{"tag0": `1\"\"`, "t": "k"}), nil}, - {"cpu,_measurement=v0,tag0=v0 value=1", nil, errors.New(`unable to parse 'cpu,_measurement=v0,tag0=v0 value=1': cannot use reserved tag key "_measurement"`)}, - // the following are all unsorted tag keys to ensure this works for both cases - {"cpu,tag0=v0,_measurement=v0 value=1", nil, errors.New(`unable to parse 'cpu,tag0=v0,_measurement=v0 value=1': cannot use reserved tag key "_measurement"`)}, - {"cpu,tag0=v0,_field=v0 value=1", nil, errors.New(`unable to parse 'cpu,tag0=v0,_field=v0 value=1': cannot use reserved tag key "_field"`)}, - {"cpu,tag0=v0,time=v0 value=1", nil, errors.New(`unable to parse 'cpu,tag0=v0,time=v0 value=1': cannot use reserved tag key "time"`)}, - } - - for _, example := range examples { - t.Run(example.Point, func(t *testing.T) { - pts, err := models.ParsePointsString(example.Point) - if err != nil { - if !reflect.DeepEqual(example.Err, err) { - t.Fatalf("expected %#v, found %#v", example.Err, err) - } - return - } - - if len(pts) != 1 { - t.Fatalf("parsed %d points, expected 1", len(pts)) - } - - // Repeat to test Tags() caching - for i := 0; i < 2; i++ { - tags := pts[0].Tags() - if !reflect.DeepEqual(tags, example.Tags) { - t.Fatalf("tag mismatch\ngot %s - %#v\nexp %s - %#v", tags.String(), tags, example.Tags.String(), example.Tags) - } - } - }) - } -} - -func TestPoint_StringSize(t *testing.T) { - testPoint_cube(t, func(p models.Point) { - l := p.StringSize() - s := p.String() - - if l != len(s) { - t.Errorf("Incorrect length for %q. got %v, exp %v", s, l, len(s)) - } - }) - -} - -func TestPoint_AppendString(t *testing.T) { - testPoint_cube(t, func(p models.Point) { - got := p.AppendString(nil) - exp := []byte(p.String()) - - if !reflect.DeepEqual(exp, got) { - t.Errorf("AppendString() didn't match String(): got %v, exp %v", got, exp) - } - }) -} - -func testPoint_cube(t *testing.T, f func(p models.Point)) { - // heard of a table-driven test? let's make a cube-driven test... - tagList := []models.Tags{nil, {models.NewTag([]byte("foo"), []byte("bar"))}, tags} - fieldList := []models.Fields{{"a": 42.0}, {"a": 42, "b": "things"}, fields} - timeList := []time.Time{time.Time{}, time.Unix(0, 0), time.Unix(-34526, 0), time.Unix(231845, 0), time.Now()} - - for _, tagSet := range tagList { - for _, fieldSet := range fieldList { - for _, pointTime := range timeList { - p, err := models.NewPoint("test", tagSet, fieldSet, pointTime) - if err != nil { - t.Errorf("unexpected error creating point: %v", err) - continue - } - - f(p) - } - } - } -} - -func TestTag_Clone(t *testing.T) { - tag := models.NewTag([]byte("key"), []byte("value")) - - c := tag.Clone() - - if &c.Key == &tag.Key || !bytes.Equal(c.Key, tag.Key) { - t.Fatalf("key %s should have been a clone of %s", c.Key, tag.Key) - } - - if &c.Value == &tag.Value || !bytes.Equal(c.Value, tag.Value) { - t.Fatalf("value %s should have been a clone of %s", c.Value, tag.Value) - } -} - -func TestTags_Clone(t *testing.T) { - tags := models.NewTags(map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}) - - clone := tags.Clone() - - for i := range tags { - tag := tags[i] - c := clone[i] - if &c.Key == &tag.Key || !bytes.Equal(c.Key, tag.Key) { - t.Fatalf("key %s should have been a clone of %s", c.Key, tag.Key) - } - - if &c.Value == &tag.Value || !bytes.Equal(c.Value, tag.Value) { - t.Fatalf("value %s should have been a clone of %s", c.Value, tag.Value) - } - } -} - -var p models.Point - -func BenchmarkNewPoint(b *testing.B) { - ts := time.Now() - for i := 0; i < b.N; i++ { - p, _ = models.NewPoint("measurement", tags, fields, ts) - } -} - -func BenchmarkNewPointFromBinary(b *testing.B) { - pts, err := models.ParsePointsString("cpu value1=1.0,value2=1.0,value3=3.0,value4=4,value5=\"five\" 1000000000") - if err != nil { - b.Fatalf("unexpected error ParsePointsString: %v", err) - } - - bytes, err := pts[0].MarshalBinary() - if err != nil { - b.Fatalf("unexpected error MarshalBinary: %v", err) - } - - for i := 0; i < b.N; i++ { - _, err := models.NewPointFromBytes(bytes) - if err != nil { - b.Fatalf("unexpected error NewPointsFromBytes: %v", err) - } - } -} - -func BenchmarkParsePointNoTags5000(b *testing.B) { - var batch [5000]string - for i := 0; i < len(batch); i++ { - batch[i] = `cpu value=1i 1000000000` - } - lines := strings.Join(batch[:], "\n") - b.ResetTimer() - for i := 0; i < b.N; i++ { - models.ParsePoints([]byte(lines)) - b.SetBytes(int64(len(lines))) - } -} - -func BenchmarkParsePointNoTags(b *testing.B) { - line := `cpu value=1i 1000000000` - for i := 0; i < b.N; i++ { - models.ParsePoints([]byte(line)) - b.SetBytes(int64(len(line))) - } -} - -func BenchmarkParsePointWithPrecisionN(b *testing.B) { - line := `cpu value=1i 1000000000` - defaultTime := time.Now().UTC() - for i := 0; i < b.N; i++ { - models.ParsePointsWithPrecision([]byte(line), defaultTime, "n") - b.SetBytes(int64(len(line))) - } -} - -func BenchmarkParsePointWithPrecisionU(b *testing.B) { - line := `cpu value=1i 1000000000` - defaultTime := time.Now().UTC() - for i := 0; i < b.N; i++ { - models.ParsePointsWithPrecision([]byte(line), defaultTime, "u") - b.SetBytes(int64(len(line))) - } -} - -func BenchmarkParsePointsTagsSorted2(b *testing.B) { - line := `cpu,host=serverA,region=us-west value=1i 1000000000` - for i := 0; i < b.N; i++ { - models.ParsePoints([]byte(line)) - b.SetBytes(int64(len(line))) - } -} - -func BenchmarkParsePointsTagsSorted5(b *testing.B) { - line := `cpu,env=prod,host=serverA,region=us-west,target=servers,zone=1c value=1i 1000000000` - for i := 0; i < b.N; i++ { - models.ParsePoints([]byte(line)) - b.SetBytes(int64(len(line))) - } -} - -func BenchmarkParsePointsTagsSorted10(b *testing.B) { - line := `cpu,env=prod,host=serverA,region=us-west,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5,target=servers,zone=1c value=1i 1000000000` - for i := 0; i < b.N; i++ { - models.ParsePoints([]byte(line)) - b.SetBytes(int64(len(line))) - } -} - -func BenchmarkParsePointsTagsUnSorted2(b *testing.B) { - line := `cpu,region=us-west,host=serverA value=1i 1000000000` - for i := 0; i < b.N; i++ { - pt, _ := models.ParsePoints([]byte(line)) - b.SetBytes(int64(len(line))) - pt[0].Key() - } -} - -func BenchmarkParsePointsTagsUnSorted5(b *testing.B) { - line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c value=1i 1000000000` - for i := 0; i < b.N; i++ { - pt, _ := models.ParsePoints([]byte(line)) - b.SetBytes(int64(len(line))) - pt[0].Key() - } -} - -func BenchmarkParsePointsTagsUnSorted10(b *testing.B) { - line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5 value=1i 1000000000` - for i := 0; i < b.N; i++ { - pt, _ := models.ParsePoints([]byte(line)) - b.SetBytes(int64(len(line))) - pt[0].Key() - } -} - -func BenchmarkParseKey(b *testing.B) { - line := `cpu,region=us-west,host=serverA,env=prod,target=servers,zone=1c,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5` - for i := 0; i < b.N; i++ { - models.ParseKey([]byte(line)) - } -} - -// TestPoint wraps a models.Point but also makes available the raw -// arguments to the Point. -// -// This is useful for ensuring that comparisons between results of -// operations on Points match the expected input data to the Point, -// since models.Point does not expose the raw input data (e.g., tags) -// via its API. -type TestPoint struct { - RawFields models.Fields - RawTags models.Tags - RawTime time.Time - models.Point -} - -// NewTestPoint returns a new TestPoint. -// -// NewTestPoint panics if it is not a valid models.Point. -func NewTestPoint(name string, tags models.Tags, fields models.Fields, time time.Time) TestPoint { - return TestPoint{ - RawTags: tags, - RawFields: fields, - RawTime: time, - Point: models.MustNewPoint(name, tags, fields, time), - } -} - -func test(t *testing.T, line string, point TestPoint) { - pts, err := models.ParsePointsWithPrecision([]byte(line), time.Unix(0, 0), "n") - if err != nil { - t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, line, err) - } - - if exp := 1; len(pts) != exp { - t.Fatalf(`ParsePoints("%s") len mismatch. got %d, exp %d`, line, len(pts), exp) - } - - if exp := point.Key(); !bytes.Equal(pts[0].Key(), exp) { - t.Errorf("ParsePoints(\"%s\") key mismatch.\ngot %v\nexp %v", line, string(pts[0].Key()), string(exp)) - } - - if exp := len(point.Tags()); len(pts[0].Tags()) != exp { - t.Errorf(`ParsePoints("%s") tags mismatch. got %v, exp %v`, line, pts[0].Tags(), exp) - } - - for _, tag := range pts[0].Tags() { - if !bytes.Equal(tag.Value, point.RawTags.Get(tag.Key)) { - t.Errorf(`ParsePoints("%s") tags mismatch. got %s, exp %s`, line, tag.Value, point.RawTags.Get(tag.Key)) - } - } - - for name, value := range point.RawFields { - fields, err := pts[0].Fields() - if err != nil { - t.Fatal(err) - } - val := fields[name] - expfval, ok := val.(float64) - - if ok && math.IsNaN(expfval) { - gotfval, ok := value.(float64) - if ok && !math.IsNaN(gotfval) { - t.Errorf(`ParsePoints("%s") field '%s' mismatch. exp NaN`, line, name) - } - } - if !reflect.DeepEqual(val, value) { - t.Errorf(`ParsePoints("%s") field '%s' mismatch. got %[3]v (%[3]T), exp %[4]v (%[4]T)`, line, name, val, value) - } - } - - if !pts[0].Time().Equal(point.Time()) { - t.Errorf(`ParsePoints("%s") time mismatch. got %v, exp %v`, line, pts[0].Time(), point.Time()) - } - - if !strings.HasPrefix(pts[0].String(), line) { - t.Errorf("ParsePoints string mismatch.\ngot: %v\nexp: %v", pts[0].String(), line) - } -} - -func TestParsePointNoValue(t *testing.T) { - pts, err := models.ParsePointsString("") - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, "", err) - } - - if exp := 0; len(pts) != exp { - t.Errorf(`ParsePoints("%s") len mismatch. got %v, exp %v`, "", len(pts), exp) - } -} - -func TestParsePointWhitespaceValue(t *testing.T) { - pts, err := models.ParsePointsString(" ") - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, "", err) - } - - if exp := 0; len(pts) != exp { - t.Errorf(`ParsePoints("%s") len mismatch. got %v, exp %v`, "", len(pts), exp) - } -} - -func TestParsePointNoFields(t *testing.T) { - expectedSuffix := "missing fields" - examples := []string{ - "cpu_load_short,host=server01,region=us-west", - "cpu", - "cpu,host==", - "=", - } - - for i, example := range examples { - _, err := models.ParsePointsString(example) - if err == nil { - t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) - } else if !strings.HasSuffix(err.Error(), expectedSuffix) { - t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) - } - } -} - -func TestParsePointNoTimestamp(t *testing.T) { - test(t, "cpu value=1", NewTestPoint("cpu", nil, models.Fields{"value": 1.0}, time.Unix(0, 0))) -} - -func TestParsePointMissingQuote(t *testing.T) { - expectedSuffix := "unbalanced quotes" - examples := []string{ - `cpu,host=serverA value="test`, - `cpu,host=serverA value="test""`, - } - - for i, example := range examples { - _, err := models.ParsePointsString(example) - if err == nil { - t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) - } else if !strings.HasSuffix(err.Error(), expectedSuffix) { - t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) - } - } -} - -func TestParsePointMissingTagKey(t *testing.T) { - expectedSuffix := "missing tag key" - examples := []string{ - `cpu, value=1`, - `cpu,`, - `cpu,,,`, - `cpu,host=serverA,=us-east value=1i`, - `cpu,host=serverAa\,,=us-east value=1i`, - `cpu,host=serverA\,,=us-east value=1i`, - `cpu, =serverA value=1i`, - } - - for i, example := range examples { - _, err := models.ParsePointsString(example) - if err == nil { - t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) - } else if !strings.HasSuffix(err.Error(), expectedSuffix) { - t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) - } - } - - _, err := models.ParsePointsString(`cpu,host=serverA,\ =us-east value=1i`) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,\ =us-east value=1i`, err) - } -} - -func TestParsePointMissingTagValue(t *testing.T) { - expectedSuffix := "missing tag value" - examples := []string{ - `cpu,host`, - `cpu,host,`, - `cpu,host=`, - `cpu,host value=1i`, - `cpu,host=serverA,region value=1i`, - `cpu,host=serverA,region= value=1i`, - `cpu,host=serverA,region=,zone=us-west value=1i`, - } - - for i, example := range examples { - _, err := models.ParsePointsString(example) - if err == nil { - t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) - } else if !strings.HasSuffix(err.Error(), expectedSuffix) { - t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) - } - } -} - -func TestParsePointInvalidTagFormat(t *testing.T) { - expectedSuffix := "invalid tag format" - examples := []string{ - `cpu,host=f=o,`, - `cpu,host=f\==o,`, - } - - for i, example := range examples { - _, err := models.ParsePointsString(example) - if err == nil { - t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got nil, exp error`, i, example) - } else if !strings.HasSuffix(err.Error(), expectedSuffix) { - t.Errorf(`[Example %d] ParsePoints("%s") mismatch. got %q, exp suffix %q`, i, example, err, expectedSuffix) - } - } -} - -func TestParsePointMissingFieldName(t *testing.T) { - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west =`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west =`) - } - - _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west =123i`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west =123i`) - } - - _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west a\ =123i`) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west a\ =123i`) - } - _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=123i,=456i`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=123i,=456i`) - } -} - -func TestParsePointMissingFieldValue(t *testing.T) { - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=`) - } - - _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value= 1000000000i`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value= 1000000000i`) - } - - _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=,value2=1i`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=,value2=1i`) - } - - _, err = models.ParsePointsString(`cpu,host=server01,region=us-west 1434055562000000000i`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=server01,region=us-west 1434055562000000000i`) - } - - _, err = models.ParsePointsString(`cpu,host=server01,region=us-west value=1i,b`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=server01,region=us-west value=1i,b`) - } - - _, err = models.ParsePointsString(`m f="blah"=123,r 1531703600000000000`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `m f="blah"=123,r 1531703600000000000`) - } -} - -func TestParsePointBadNumber(t *testing.T) { - for _, tt := range []string{ - "cpu v=- ", - "cpu v=-i ", - "cpu v=-. ", - "cpu v=. ", - "cpu v=1.0i ", - "cpu v=1ii ", - "cpu v=1a ", - "cpu v=-e-e-e ", - "cpu v=42+3 ", - "cpu v= ", - "cpu v=-123u", - } { - _, err := models.ParsePointsString(tt) - if err == nil { - t.Errorf("Point %q should be invalid", tt) - } - } -} - -func TestParsePointMaxInt64(t *testing.T) { - // out of range - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9223372036854775808i`) - exp := `unable to parse 'cpu,host=serverA,region=us-west value=9223372036854775808i': unable to parse integer 9223372036854775808: strconv.ParseInt: parsing "9223372036854775808": value out of range` - if err == nil || (err != nil && err.Error() != exp) { - t.Fatalf("Error mismatch:\nexp: %s\ngot: %v", exp, err) - } - - // max int - p, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9223372036854775807i`) - if err != nil { - t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=9223372036854775807i`, err) - } - fields, err := p[0].Fields() - if err != nil { - t.Fatal(err) - } - if exp, got := int64(9223372036854775807), fields["value"].(int64); exp != got { - t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got) - } - - // leading zeros - _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=0009223372036854775807i`) - if err != nil { - t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0009223372036854775807i`, err) - } -} - -func TestParsePointMinInt64(t *testing.T) { - // out of range - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-9223372036854775809i`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=-9223372036854775809i`) - } - - // min int - p, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-9223372036854775808i`) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-9223372036854775808i`, err) - } - fields, err := p[0].Fields() - if err != nil { - t.Fatal(err) - } - if exp, got := int64(-9223372036854775808), fields["value"].(int64); exp != got { - t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got) - } - - // leading zeros - _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=-0009223372036854775808i`) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-0009223372036854775808i`, err) - } -} - -func TestParsePointMaxFloat64(t *testing.T) { - // out of range - _, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "1"+string(maxFloat64))) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=...`) - } - - // max float - p, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, string(maxFloat64))) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=9223372036854775807`, err) - } - fields, err := p[0].Fields() - if err != nil { - t.Fatal(err) - } - if exp, got := math.MaxFloat64, fields["value"].(float64); exp != got { - t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got) - } - - // leading zeros - _, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "0000"+string(maxFloat64))) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0009223372036854775807`, err) - } -} - -func TestParsePointMinFloat64(t *testing.T) { - // out of range - _, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "-1"+string(minFloat64)[1:])) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=...`) - } - - // min float - p, err := models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, string(minFloat64))) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=...`, err) - } - fields, err := p[0].Fields() - if err != nil { - t.Fatal(err) - } - if exp, got := -math.MaxFloat64, fields["value"].(float64); exp != got { - t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got) - } - - // leading zeros - _, err = models.ParsePointsString(fmt.Sprintf(`cpu,host=serverA,region=us-west value=%s`, "-0000000"+string(minFloat64)[1:])) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=...`, err) - } -} - -func TestParsePointMaxUint64(t *testing.T) { - // out of range - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=18446744073709551616u`) - exp := `unable to parse 'cpu,host=serverA,region=us-west value=18446744073709551616u': unable to parse unsigned 18446744073709551616: strconv.ParseUint: parsing "18446744073709551616": value out of range` - if err == nil || (err != nil && err.Error() != exp) { - t.Fatalf("Error mismatch:\nexp: %s\ngot: %v", exp, err) - } - - // max int - p, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=18446744073709551615u`) - if err != nil { - t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=18446744073709551615u`, err) - } - fields, err := p[0].Fields() - if err != nil { - t.Fatal(err) - } - if exp, got := uint64(18446744073709551615), fields["value"].(uint64); exp != got { - t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got) - } - - // leading zeros - _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=00018446744073709551615u`) - if err != nil { - t.Fatalf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=00018446744073709551615u`, err) - } -} - -func TestParsePointMinUint64(t *testing.T) { - // out of range - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=--1u`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=-1u`) - } - - // min int - p, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=0u`) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0u`, err) - } - fields, err := p[0].Fields() - if err != nil { - t.Fatal(err) - } - if exp, got := uint64(0), fields["value"].(uint64); exp != got { - t.Fatalf("ParsePoints Value mismatch. \nexp: %v\ngot: %v", exp, got) - } - - // leading zeros - _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=0000u`) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=0000u`, err) - } -} - -func TestParsePointNumberNonNumeric(t *testing.T) { - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=.1a`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=.1a`) - } -} - -func TestParsePointNegativeWrongPlace(t *testing.T) { - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=0.-1`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=0.-1`) - } -} - -func TestParsePointOnlyNegativeSign(t *testing.T) { - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=-`) - } -} - -func TestParsePointFloatMultipleDecimals(t *testing.T) { - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.1.1`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=1.1.1`) - } -} - -func TestParsePointInteger(t *testing.T) { - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1i`) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1i`, err) - } -} - -func TestParsePointNegativeInteger(t *testing.T) { - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1i`) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1i`, err) - } -} - -func TestParsePointNegativeFloat(t *testing.T) { - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1.0`) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0`, err) - } -} - -func TestParsePointFloatNoLeadingDigit(t *testing.T) { - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=.1`) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0`, err) - } -} - -func TestParsePointFloatScientific(t *testing.T) { - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0e4`) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e4`, err) - } - - pts, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1e4`) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e4`, err) - } - - fields, err := pts[0].Fields() - if err != nil { - t.Fatal(err) - } - if fields["value"] != 1e4 { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1e4`, err) - } -} - -func TestParsePointFloatScientificUpper(t *testing.T) { - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0E4`) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0E4`, err) - } - - pts, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1E4`) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0E4`, err) - } - - fields, err := pts[0].Fields() - if err != nil { - t.Fatal(err) - } - if fields["value"] != 1e4 { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1E4`, err) - } -} - -func TestParsePointFloatScientificDecimal(t *testing.T) { - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=1.0e-4`) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=1.0e-4`, err) - } -} - -func TestParsePointFloatNegativeScientific(t *testing.T) { - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=-1.0e-4`) - if err != nil { - t.Errorf(`ParsePoints("%s") mismatch. got %v, exp nil`, `cpu,host=serverA,region=us-west value=-1.0e-4`, err) - } -} - -func TestParsePointBooleanInvalid(t *testing.T) { - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=a`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=a`) - } -} - -func TestParsePointScientificIntInvalid(t *testing.T) { - _, err := models.ParsePointsString(`cpu,host=serverA,region=us-west value=9ie10`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=9ie10`) - } - - _, err = models.ParsePointsString(`cpu,host=serverA,region=us-west value=9e10i`) - if err == nil { - t.Errorf(`ParsePoints("%s") mismatch. got nil, exp error`, `cpu,host=serverA,region=us-west value=9e10i`) - } -} - -func TestParsePointWhitespace(t *testing.T) { - examples := []string{ - `cpu value=1.0 1257894000000000000`, - `cpu value=1.0 1257894000000000000`, - `cpu value=1.0 1257894000000000000`, - `cpu value=1.0 1257894000000000000 `, - `cpu value=1.0 1257894000000000000 -`, - `cpu value=1.0 1257894000000000000 -`, - } - - expPoint := NewTestPoint("cpu", models.Tags{}, models.Fields{"value": 1.0}, time.Unix(0, 1257894000000000000)) - for i, example := range examples { - pts, err := models.ParsePoints([]byte(example)) - if err != nil { - t.Fatalf(`[Example %d] ParsePoints("%s") error. got %v, exp nil`, i, example, err) - } - - if got, exp := len(pts), 1; got != exp { - t.Fatalf("[Example %d] got %d points, expected %d", i, got, exp) - } - - if got, exp := string(pts[0].Name()), string(expPoint.Name()); got != exp { - t.Fatalf("[Example %d] got %v measurement, expected %v", i, got, exp) - } - - fields, err := pts[0].Fields() - if err != nil { - t.Fatal(err) - } - eFields, err := expPoint.Fields() - if err != nil { - t.Fatal(err) - } - if got, exp := len(fields), len(eFields); got != exp { - t.Fatalf("[Example %d] got %d fields, expected %d", i, got, exp) - } - - if got, exp := fields["value"], eFields["value"]; got != exp { - t.Fatalf(`[Example %d] got %v for field "value", expected %v`, i, got, exp) - } - - if got, exp := pts[0].Time().UnixNano(), expPoint.Time().UnixNano(); got != exp { - t.Fatalf(`[Example %d] got %d time, expected %d`, i, got, exp) - } - } -} - -func TestParsePointUnescape(t *testing.T) { - // commas in measurement name - test(t, `foo\,bar value=1i`, - NewTestPoint( - "foo,bar", // comma in the name - models.NewTags(map[string]string{}), - models.Fields{ - "value": int64(1), - }, - time.Unix(0, 0))) - - // comma in measurement name with tags - test(t, `cpu\,main,regions=east value=1.0`, - NewTestPoint( - "cpu,main", // comma in the name - models.NewTags(map[string]string{ - "regions": "east", - }), - models.Fields{ - "value": 1.0, - }, - time.Unix(0, 0))) - - // spaces in measurement name - test(t, `cpu\ load,region=east value=1.0`, - NewTestPoint( - "cpu load", // space in the name - models.NewTags(map[string]string{ - "region": "east", - }), - models.Fields{ - "value": 1.0, - }, - time.Unix(0, 0))) - - // equals in measurement name - test(t, `cpu\=load,region=east value=1.0`, - NewTestPoint( - `cpu\=load`, // backslash is literal - models.NewTags(map[string]string{ - "region": "east", - }), - models.Fields{ - "value": 1.0, - }, - time.Unix(0, 0))) - - // equals in measurement name - test(t, `cpu=load,region=east value=1.0`, - NewTestPoint( - `cpu=load`, // literal equals is fine in measurement name - models.NewTags(map[string]string{ - "region": "east", - }), - models.Fields{ - "value": 1.0, - }, - time.Unix(0, 0))) - - // commas in tag names - test(t, `cpu,region\,zone=east value=1.0`, - NewTestPoint("cpu", - models.NewTags(map[string]string{ - "region,zone": "east", // comma in the tag key - }), - models.Fields{ - "value": 1.0, - }, - time.Unix(0, 0))) - - // spaces in tag name - test(t, `cpu,region\ zone=east value=1.0`, - NewTestPoint("cpu", - models.NewTags(map[string]string{ - "region zone": "east", // space in the tag name - }), - models.Fields{ - "value": 1.0, - }, - time.Unix(0, 0))) - - // backslash with escaped equals in tag name - test(t, `cpu,reg\\=ion=east value=1.0`, - NewTestPoint("cpu", - models.NewTags(map[string]string{ - `reg\=ion`: "east", - }), - models.Fields{ - "value": 1.0, - }, - time.Unix(0, 0))) - - // space is tag name - test(t, `cpu,\ =east value=1.0`, - NewTestPoint("cpu", - models.NewTags(map[string]string{ - " ": "east", // tag name is single space - }), - models.Fields{ - "value": 1.0, - }, - time.Unix(0, 0))) - - // commas in tag values - test(t, `cpu,regions=east\,west value=1.0`, - NewTestPoint("cpu", - models.NewTags(map[string]string{ - "regions": "east,west", // comma in the tag value - }), - models.Fields{ - "value": 1.0, - }, - time.Unix(0, 0))) - - // backslash literal followed by escaped space - test(t, `cpu,regions=\\ east value=1.0`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{ - "regions": `\ east`, - }), - models.Fields{ - "value": 1.0, - }, - time.Unix(0, 0))) - - // backslash literal followed by escaped space - test(t, `cpu,regions=eas\\ t value=1.0`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{ - "regions": `eas\ t`, - }), - models.Fields{ - "value": 1.0, - }, - time.Unix(0, 0))) - - // backslash literal followed by trailing space - test(t, `cpu,regions=east\\ value=1.0`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{ - "regions": `east\ `, - }), - models.Fields{ - "value": 1.0, - }, - time.Unix(0, 0))) - - // spaces in tag values - test(t, `cpu,regions=east\ west value=1.0`, - NewTestPoint("cpu", - models.NewTags(map[string]string{ - "regions": "east west", // comma in the tag value - }), - models.Fields{ - "value": 1.0, - }, - time.Unix(0, 0))) - - // commas in field keys - test(t, `cpu,regions=east value\,ms=1.0`, - NewTestPoint("cpu", - models.NewTags(map[string]string{ - "regions": "east", - }), - models.Fields{ - "value,ms": 1.0, // comma in the field keys - }, - time.Unix(0, 0))) - - // spaces in field keys - test(t, `cpu,regions=east value\ ms=1.0`, - NewTestPoint("cpu", - models.NewTags(map[string]string{ - "regions": "east", - }), - models.Fields{ - "value ms": 1.0, // comma in the field keys - }, - time.Unix(0, 0))) - - // tag with no value - test(t, `cpu,regions=east value="1"`, - NewTestPoint("cpu", - models.NewTags(map[string]string{ - "regions": "east", - "foobar": "", - }), - models.Fields{ - "value": "1", - }, - time.Unix(0, 0))) - - // commas in field values - test(t, `cpu,regions=east value="1,0"`, - NewTestPoint("cpu", - models.NewTags(map[string]string{ - "regions": "east", - }), - models.Fields{ - "value": "1,0", // comma in the field value - }, - time.Unix(0, 0))) - - // random character escaped - test(t, `cpu,regions=eas\t value=1.0`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{ - "regions": "eas\\t", - }), - models.Fields{ - "value": 1.0, - }, - time.Unix(0, 0))) - - // backslash literal followed by escaped characters - test(t, `cpu,regions=\\,\,\=east value=1.0`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{ - "regions": `\,,=east`, - }), - models.Fields{ - "value": 1.0, - }, - time.Unix(0, 0))) - - // field keys using escape char. - test(t, `cpu \a=1i`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{}), - models.Fields{ - "\\a": int64(1), // Left as parsed since it's not a known escape sequence. - }, - time.Unix(0, 0))) - - // measurement, tag and tag value with equals - test(t, `cpu=load,equals\=foo=tag\=value value=1i`, - NewTestPoint( - "cpu=load", // Not escaped - models.NewTags(map[string]string{ - "equals=foo": "tag=value", // Tag and value unescaped - }), - models.Fields{ - "value": int64(1), - }, - time.Unix(0, 0))) - -} - -func TestParsePointWithTags(t *testing.T) { - test(t, - "cpu,host=serverA,region=us-east value=1.0 1000000000", - NewTestPoint("cpu", - models.NewTags(map[string]string{"host": "serverA", "region": "us-east"}), - models.Fields{"value": 1.0}, time.Unix(1, 0))) -} - -func TestParsePointWithDuplicateTags(t *testing.T) { - for i, tt := range []struct { - line string - err string - }{ - { - line: `cpu,host=serverA,host=serverB value=1i 1000000000`, - err: `unable to parse 'cpu,host=serverA,host=serverB value=1i 1000000000': duplicate tags`, - }, - { - line: `cpu,b=2,b=1,c=3 value=1i 1000000000`, - err: `unable to parse 'cpu,b=2,b=1,c=3 value=1i 1000000000': duplicate tags`, - }, - { - line: `cpu,b=2,c=3,b=1 value=1i 1000000000`, - err: `unable to parse 'cpu,b=2,c=3,b=1 value=1i 1000000000': duplicate tags`, - }, - } { - _, err := models.ParsePointsString(tt.line) - if err == nil || tt.err != err.Error() { - t.Errorf("%d. ParsePoint() expected error '%s'. got '%s'", i, tt.err, err) - } - } -} - -func TestParsePointWithVariousTags(t *testing.T) { - line := "m" - for i := 0; i < 1000; i++ { - line += fmt.Sprintf(",t%d=x", i+1) - _, err := models.ParsePointsString(line + " v=0") - if err != nil { - t.Errorf(`ParsePoints("%s") failed`, line) - } - } -} - -func TestParsePointWithStringField(t *testing.T) { - test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo",str2="bar" 1000000000`, - NewTestPoint("cpu", - models.NewTags(map[string]string{ - "host": "serverA", - "region": "us-east", - }), - models.Fields{ - "value": 1.0, - "str": "foo", - "str2": "bar", - }, - time.Unix(1, 0)), - ) - - test(t, `cpu,host=serverA,region=us-east str="foo \" bar" 1000000000`, - NewTestPoint("cpu", - models.NewTags(map[string]string{ - "host": "serverA", - "region": "us-east", - }), - models.Fields{ - "str": `foo " bar`, - }, - time.Unix(1, 0)), - ) - -} - -func TestParsePointWithStringWithSpaces(t *testing.T) { - test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo bar" 1000000000`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{ - "host": "serverA", - "region": "us-east", - }), - models.Fields{ - "value": 1.0, - "str": "foo bar", // spaces in string value - }, - time.Unix(1, 0)), - ) -} - -func TestParsePointWithStringWithNewline(t *testing.T) { - test(t, "cpu,host=serverA,region=us-east value=1.0,str=\"foo\nbar\" 1000000000", - NewTestPoint( - "cpu", - models.NewTags(map[string]string{ - "host": "serverA", - "region": "us-east", - }), - models.Fields{ - "value": 1.0, - "str": "foo\nbar", // newline in string value - }, - time.Unix(1, 0)), - ) -} - -func TestParsePointWithStringWithCommas(t *testing.T) { - // escaped comma - test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo\,bar" 1000000000`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{ - "host": "serverA", - "region": "us-east", - }), - models.Fields{ - "value": 1.0, - "str": `foo\,bar`, // commas in string value - }, - time.Unix(1, 0)), - ) - - // non-escaped comma - test(t, `cpu,host=serverA,region=us-east value=1.0,str="foo,bar" 1000000000`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{ - "host": "serverA", - "region": "us-east", - }), - models.Fields{ - "value": 1.0, - "str": "foo,bar", // commas in string value - }, - time.Unix(1, 0)), - ) - - // string w/ trailing escape chars - test(t, `cpu,host=serverA,region=us-east str="foo\\",str2="bar" 1000000000`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{ - "host": "serverA", - "region": "us-east", - }), - models.Fields{ - "str": "foo\\", // trailing escape char - "str2": "bar", - }, - time.Unix(1, 0)), - ) -} - -func TestParsePointQuotedMeasurement(t *testing.T) { - // non-escaped comma - test(t, `"cpu",host=serverA,region=us-east value=1.0 1000000000`, - NewTestPoint( - `"cpu"`, - models.NewTags(map[string]string{ - "host": "serverA", - "region": "us-east", - }), - models.Fields{ - "value": 1.0, - }, - time.Unix(1, 0)), - ) -} - -func TestParsePointQuotedTags(t *testing.T) { - test(t, `cpu,"host"="serverA",region=us-east value=1.0 1000000000`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{ - `"host"`: `"serverA"`, - "region": "us-east", - }), - models.Fields{ - "value": 1.0, - }, - time.Unix(1, 0)), - ) -} - -func TestParsePoint_TrailingSlash(t *testing.T) { - _, err := models.ParsePointsString(`a v=1 0\`) - if err == nil { - t.Fatalf("ParsePoints failed: %v", err) - } else if !strings.Contains(err.Error(), "bad timestamp") { - t.Fatalf("ParsePoints unexpected error: %v", err) - } -} - -func TestParsePointsUnbalancedQuotedTags(t *testing.T) { - pts, err := models.ParsePointsString("baz,mytag=\"a x=1 1441103862125\nbaz,mytag=a z=1 1441103862126") - if err != nil { - t.Fatalf("ParsePoints failed: %v", err) - } - - if exp := 2; len(pts) != exp { - t.Fatalf("ParsePoints count mismatch. got %v, exp %v", len(pts), exp) - } - - // Expected " in the tag value - exp := models.MustNewPoint("baz", models.NewTags(map[string]string{"mytag": `"a`}), - models.Fields{"x": float64(1)}, time.Unix(0, 1441103862125)) - - if pts[0].String() != exp.String() { - t.Errorf("Point mismatch:\ngot: %v\nexp: %v", pts[0].String(), exp.String()) - } - - // Expected two points to ensure we did not overscan the line - exp = models.MustNewPoint("baz", models.NewTags(map[string]string{"mytag": `a`}), - models.Fields{"z": float64(1)}, time.Unix(0, 1441103862126)) - - if pts[1].String() != exp.String() { - t.Errorf("Point mismatch:\ngot: %v\nexp: %v", pts[1].String(), exp.String()) - } - -} - -func TestParsePointEscapedStringsAndCommas(t *testing.T) { - // non-escaped comma and quotes - test(t, `cpu,host=serverA,region=us-east value="{Hello\"{,}\" World}" 1000000000`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{ - "host": "serverA", - "region": "us-east", - }), - models.Fields{ - "value": `{Hello"{,}" World}`, - }, - time.Unix(1, 0)), - ) - - // escaped comma and quotes - test(t, `cpu,host=serverA,region=us-east value="{Hello\"{\,}\" World}" 1000000000`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{ - "host": "serverA", - "region": "us-east", - }), - models.Fields{ - "value": `{Hello"{\,}" World}`, - }, - time.Unix(1, 0)), - ) -} - -func TestParsePointWithStringWithEquals(t *testing.T) { - test(t, `cpu,host=serverA,region=us-east str="foo=bar",value=1.0 1000000000`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{ - "host": "serverA", - "region": "us-east", - }), - models.Fields{ - "value": 1.0, - "str": "foo=bar", // spaces in string value - }, - time.Unix(1, 0)), - ) -} - -func TestParsePointWithStringWithBackslash(t *testing.T) { - test(t, `cpu value="test\\\"" 1000000000`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{}), - models.Fields{ - "value": `test\"`, - }, - time.Unix(1, 0)), - ) - - test(t, `cpu value="test\\" 1000000000`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{}), - models.Fields{ - "value": `test\`, - }, - time.Unix(1, 0)), - ) - - test(t, `cpu value="test\\\"" 1000000000`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{}), - models.Fields{ - "value": `test\"`, - }, - time.Unix(1, 0)), - ) - - test(t, `cpu value="test\"" 1000000000`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{}), - models.Fields{ - "value": `test"`, - }, - time.Unix(1, 0)), - ) -} - -func TestParsePointWithBoolField(t *testing.T) { - test(t, `cpu,host=serverA,region=us-east true=true,t=t,T=T,TRUE=TRUE,True=True,false=false,f=f,F=F,FALSE=FALSE,False=False 1000000000`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{ - "host": "serverA", - "region": "us-east", - }), - models.Fields{ - "t": true, - "T": true, - "true": true, - "True": true, - "TRUE": true, - "f": false, - "F": false, - "false": false, - "False": false, - "FALSE": false, - }, - time.Unix(1, 0)), - ) -} - -func TestParsePointUnicodeString(t *testing.T) { - test(t, `cpu,host=serverA,region=us-east value="wè" 1000000000`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{ - "host": "serverA", - "region": "us-east", - }), - models.Fields{ - "value": "wè", - }, - time.Unix(1, 0)), - ) -} - -func TestParsePointNegativeTimestamp(t *testing.T) { - test(t, `cpu value=1 -1`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{}), - models.Fields{ - "value": 1.0, - }, - time.Unix(0, -1)), - ) -} - -func TestParsePointMaxTimestamp(t *testing.T) { - test(t, fmt.Sprintf(`cpu value=1 %d`, models.MaxNanoTime), - NewTestPoint( - "cpu", - models.NewTags(map[string]string{}), - models.Fields{ - "value": 1.0, - }, - time.Unix(0, models.MaxNanoTime)), - ) -} - -func TestParsePointMinTimestamp(t *testing.T) { - test(t, `cpu value=1 -9223372036854775806`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{}), - models.Fields{ - "value": 1.0, - }, - time.Unix(0, models.MinNanoTime)), - ) -} - -func TestParsePointInvalidTimestamp(t *testing.T) { - examples := []string{ - "cpu value=1 9223372036854775808", - "cpu value=1 -92233720368547758078", - "cpu value=1 -", - "cpu value=1 -/", - "cpu value=1 -1?", - "cpu value=1 1-", - "cpu value=1 9223372036854775807 12", - } - - for i, example := range examples { - _, err := models.ParsePointsString(example) - if err == nil { - t.Fatalf("[Example %d] ParsePoints failed: %v", i, err) - } - } -} - -func TestNewPointFloatWithoutDecimal(t *testing.T) { - test(t, `cpu value=1 1000000000`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{}), - models.Fields{ - "value": 1.0, - }, - time.Unix(1, 0)), - ) -} -func TestNewPointNegativeFloat(t *testing.T) { - test(t, `cpu value=-0.64 1000000000`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{}), - models.Fields{ - "value": -0.64, - }, - time.Unix(1, 0)), - ) -} - -func TestNewPointFloatNoDecimal(t *testing.T) { - test(t, `cpu value=1. 1000000000`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{}), - models.Fields{ - "value": 1.0, - }, - time.Unix(1, 0)), - ) -} - -func TestNewPointFloatScientific(t *testing.T) { - test(t, `cpu value=6.632243e+06 1000000000`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{}), - models.Fields{ - "value": float64(6632243), - }, - time.Unix(1, 0)), - ) -} - -func TestNewPointLargeInteger(t *testing.T) { - test(t, `cpu value=6632243i 1000000000`, - NewTestPoint( - "cpu", - models.NewTags(map[string]string{}), - models.Fields{ - "value": int64(6632243), // if incorrectly encoded as a float, it would show up as 6.632243e+06 - }, - time.Unix(1, 0)), - ) -} - -func TestParsePointNaN(t *testing.T) { - _, err := models.ParsePointsString("cpu value=NaN 1000000000") - if err == nil { - t.Fatalf("ParsePoints expected error, got nil") - } - - _, err = models.ParsePointsString("cpu value=nAn 1000000000") - if err == nil { - t.Fatalf("ParsePoints expected error, got nil") - } - - _, err = models.ParsePointsString("cpu value=NaN") - if err == nil { - t.Fatalf("ParsePoints expected error, got nil") - } -} - -func TestNewPointLargeNumberOfTags(t *testing.T) { - tags := "" - for i := 0; i < 255; i++ { - tags += fmt.Sprintf(",tag%d=value%d", i, i) - } - - pt, err := models.ParsePointsString(fmt.Sprintf("cpu%s value=1", tags)) - if err != nil { - t.Fatalf("ParsePoints() with max tags failed: %v", err) - } - - if len(pt[0].Tags()) != 255 { - t.Fatalf("expected %d tags, got %d", 255, len(pt[0].Tags())) - } -} - -func TestParsePointIntsFloats(t *testing.T) { - pts, err := models.ParsePoints([]byte(`cpu,host=serverA,region=us-east int=10i,float=11.0,float2=12.1 1000000000`)) - if err != nil { - t.Fatalf(`ParsePoints() failed. got %s`, err) - } - - if exp := 1; len(pts) != exp { - t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) - } - pt := pts[0] - - fields, err := pt.Fields() - if err != nil { - t.Fatal(err) - } - if _, ok := fields["int"].(int64); !ok { - t.Errorf("ParsePoint() int field mismatch: got %T, exp %T", fields["int"], int64(10)) - } - - if _, ok := fields["float"].(float64); !ok { - t.Errorf("ParsePoint() float field mismatch: got %T, exp %T", fields["float64"], float64(11.0)) - } - - if _, ok := fields["float2"].(float64); !ok { - t.Errorf("ParsePoint() float field mismatch: got %T, exp %T", fields["float64"], float64(12.1)) - } -} - -func TestParsePointKeyUnsorted(t *testing.T) { - pts, err := models.ParsePoints([]byte("cpu,last=1,first=2 value=1i")) - if err != nil { - t.Fatalf(`ParsePoints() failed. got %s`, err) - } - - if exp := 1; len(pts) != exp { - t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) - } - pt := pts[0] - - if exp := "cpu,first=2,last=1"; string(pt.Key()) != exp { - t.Errorf("ParsePoint key not sorted. got %v, exp %v", string(pt.Key()), exp) - } -} - -func TestParsePointToString(t *testing.T) { - line := `cpu,host=serverA,region=us-east bool=false,float=11,float2=12.123,int=10i,str="string val" 1000000000` - pts, err := models.ParsePoints([]byte(line)) - if err != nil { - t.Fatalf(`ParsePoints() failed. got %s`, err) - } - if exp := 1; len(pts) != exp { - t.Errorf("ParsePoint() len mismatch: got %v, exp %v", len(pts), exp) - } - pt := pts[0] - - got := pt.String() - if line != got { - t.Errorf("ParsePoint() to string mismatch:\n got %v\n exp %v", got, line) - } - - pt = models.MustNewPoint("cpu", models.NewTags(map[string]string{"host": "serverA", "region": "us-east"}), - models.Fields{"int": 10, "float": float64(11.0), "float2": float64(12.123), "bool": false, "str": "string val"}, - time.Unix(1, 0)) - - got = pt.String() - if line != got { - t.Errorf("NewPoint() to string mismatch:\n got %v\n exp %v", got, line) - } -} - -func TestParsePointsWithPrecision(t *testing.T) { - tests := []struct { - name string - line string - precision string - exp string - }{ - { - name: "nanosecond by default", - line: `cpu,host=serverA,region=us-east value=1.0 946730096789012345`, - precision: "", - exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", - }, - { - name: "nanosecond", - line: `cpu,host=serverA,region=us-east value=1.0 946730096789012345`, - precision: "n", - exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", - }, - { - name: "microsecond", - line: `cpu,host=serverA,region=us-east value=1.0 946730096789012`, - precision: "us", - exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012000", - }, - { - name: "millisecond", - line: `cpu,host=serverA,region=us-east value=1.0 946730096789`, - precision: "ms", - exp: "cpu,host=serverA,region=us-east value=1.0 946730096789000000", - }, - { - name: "second", - line: `cpu,host=serverA,region=us-east value=1.0 946730096`, - precision: "s", - exp: "cpu,host=serverA,region=us-east value=1.0 946730096000000000", - }, - } - for _, test := range tests { - pts, err := models.ParsePointsWithPrecision([]byte(test.line), time.Now().UTC(), test.precision) - if err != nil { - t.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err) - } - if exp := 1; len(pts) != exp { - t.Errorf("%s: ParsePoint() len mismatch: got %v, exp %v", test.name, len(pts), exp) - } - pt := pts[0] - - got := pt.String() - if got != test.exp { - t.Errorf("%s: ParsePoint() to string mismatch:\n got %v\n exp %v", test.name, got, test.exp) - } - } -} - -func TestParsePointsWithPrecisionNoTime(t *testing.T) { - line := `cpu,host=serverA,region=us-east value=1.0` - tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z") - tests := []struct { - name string - precision string - exp string - }{ - { - name: "no precision", - precision: "", - exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", - }, - { - name: "nanosecond precision", - precision: "n", - exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", - }, - { - name: "microsecond precision", - precision: "us", - exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012000", - }, - { - name: "millisecond precision", - precision: "ms", - exp: "cpu,host=serverA,region=us-east value=1.0 946730096789000000", - }, - { - name: "second precision", - precision: "s", - exp: "cpu,host=serverA,region=us-east value=1.0 946730096000000000", - }, - } - - for _, test := range tests { - pts, err := models.ParsePointsWithPrecision([]byte(line), tm, test.precision) - if err != nil { - t.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err) - } - if exp := 1; len(pts) != exp { - t.Errorf("%s: ParsePoint() len mismatch: got %v, exp %v", test.name, len(pts), exp) - } - pt := pts[0] - - got := pt.String() - if got != test.exp { - t.Errorf("%s: ParsePoint() to string mismatch:\n got %v\n exp %v", test.name, got, test.exp) - } - } -} - -func TestParsePointsWithPrecisionComments(t *testing.T) { - tests := []struct { - name string - batch string - exp string - lenPoints int - }{ - { - name: "comment only", - batch: `# comment only`, - exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", - lenPoints: 0, - }, - { - name: "point with comment above", - batch: `# a point is below -cpu,host=serverA,region=us-east value=1.0 946730096789012345`, - exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", - lenPoints: 1, - }, - { - name: "point with comment below", - batch: `cpu,host=serverA,region=us-east value=1.0 946730096789012345 -# end of points`, - exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", - lenPoints: 1, - }, - { - name: "indented comment", - batch: ` # a point is below -cpu,host=serverA,region=us-east value=1.0 946730096789012345`, - exp: "cpu,host=serverA,region=us-east value=1.0 946730096789012345", - lenPoints: 1, - }, - } - for _, test := range tests { - pts, err := models.ParsePointsWithPrecision([]byte(test.batch), time.Now().UTC(), "") - if err != nil { - t.Fatalf(`%s: ParsePoints() failed. got %s`, test.name, err) - } - pointsLength := len(pts) - if exp := test.lenPoints; pointsLength != exp { - t.Errorf("%s: ParsePoint() len mismatch: got %v, exp %v", test.name, pointsLength, exp) - } - - if pointsLength > 0 { - pt := pts[0] - - got := pt.String() - if got != test.exp { - t.Errorf("%s: ParsePoint() to string mismatch:\n got %v\n exp %v", test.name, got, test.exp) - } - } - } -} - -func TestNewPointEscaped(t *testing.T) { - // commas - pt := models.MustNewPoint("cpu,main", models.NewTags(map[string]string{"tag,bar": "value"}), models.Fields{"name,bar": 1.0}, time.Unix(0, 0)) - if exp := `cpu\,main,tag\,bar=value name\,bar=1 0`; pt.String() != exp { - t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) - } - - // spaces - pt = models.MustNewPoint("cpu main", models.NewTags(map[string]string{"tag bar": "value"}), models.Fields{"name bar": 1.0}, time.Unix(0, 0)) - if exp := `cpu\ main,tag\ bar=value name\ bar=1 0`; pt.String() != exp { - t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) - } - - // equals - pt = models.MustNewPoint("cpu=main", models.NewTags(map[string]string{"tag=bar": "value=foo"}), models.Fields{"name=bar": 1.0}, time.Unix(0, 0)) - if exp := `cpu=main,tag\=bar=value\=foo name\=bar=1 0`; pt.String() != exp { - t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) - } -} - -func TestNewPointWithoutField(t *testing.T) { - _, err := models.NewPoint("cpu", models.NewTags(map[string]string{"tag": "bar"}), models.Fields{}, time.Unix(0, 0)) - if err == nil { - t.Fatalf(`NewPoint() expected error. got nil`) - } -} - -func TestNewPointUnhandledType(t *testing.T) { - // nil value - pt := models.MustNewPoint("cpu", nil, models.Fields{"value": nil}, time.Unix(0, 0)) - if exp := `cpu value= 0`; pt.String() != exp { - t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) - } - - // unsupported type gets stored as string - now := time.Unix(0, 0).UTC() - pt = models.MustNewPoint("cpu", nil, models.Fields{"value": now}, time.Unix(0, 0)) - if exp := `cpu value="1970-01-01 00:00:00 +0000 UTC" 0`; pt.String() != exp { - t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) - } - - fields, err := pt.Fields() - if err != nil { - t.Fatal(err) - } - if exp := "1970-01-01 00:00:00 +0000 UTC"; fields["value"] != exp { - t.Errorf("NewPoint().String() mismatch.\ngot %v\nexp %v", pt.String(), exp) - } -} - -func TestMakeKeyEscaped(t *testing.T) { - if exp, got := `cpu\ load`, models.MakeKey([]byte(`cpu\ load`), models.NewTags(map[string]string{})); string(got) != exp { - t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) - } - - if exp, got := `cpu\ load`, models.MakeKey([]byte(`cpu load`), models.NewTags(map[string]string{})); string(got) != exp { - t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) - } - - if exp, got := `cpu\,load`, models.MakeKey([]byte(`cpu\,load`), models.NewTags(map[string]string{})); string(got) != exp { - t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) - } - - if exp, got := `cpu\,load`, models.MakeKey([]byte(`cpu,load`), models.NewTags(map[string]string{})); string(got) != exp { - t.Errorf("MakeKey() mismatch.\ngot %v\nexp %v", got, exp) - } - -} - -func TestPrecisionString(t *testing.T) { - tags := map[string]interface{}{"value": float64(1)} - tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z") - tests := []struct { - name string - precision string - exp string - }{ - { - name: "no precision", - precision: "", - exp: "cpu value=1 946730096789012345", - }, - { - name: "nanosecond precision", - precision: "ns", - exp: "cpu value=1 946730096789012345", - }, - { - name: "microsecond precision", - precision: "us", - exp: "cpu value=1 946730096789012", - }, - { - name: "millisecond precision", - precision: "ms", - exp: "cpu value=1 946730096789", - }, - { - name: "second precision", - precision: "s", - exp: "cpu value=1 946730096", - }, - } - - for _, test := range tests { - pt := models.MustNewPoint("cpu", nil, tags, tm) - act := pt.PrecisionString(test.precision) - - if act != test.exp { - t.Errorf("%s: PrecisionString() mismatch:\n actual: %v\n exp: %v", - test.name, act, test.exp) - } - } -} - -func TestRoundedString(t *testing.T) { - tags := map[string]interface{}{"value": float64(1)} - tm, _ := time.Parse(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z") - tests := []struct { - name string - precision time.Duration - exp string - }{ - { - name: "no precision", - precision: time.Duration(0), - exp: "cpu value=1 946730096789012345", - }, - { - name: "nanosecond precision", - precision: time.Nanosecond, - exp: "cpu value=1 946730096789012345", - }, - { - name: "microsecond precision", - precision: time.Microsecond, - exp: "cpu value=1 946730096789012000", - }, - { - name: "millisecond precision", - precision: time.Millisecond, - exp: "cpu value=1 946730096789000000", - }, - { - name: "second precision", - precision: time.Second, - exp: "cpu value=1 946730097000000000", - }, - { - name: "minute precision", - precision: time.Minute, - exp: "cpu value=1 946730100000000000", - }, - { - name: "hour precision", - precision: time.Hour, - exp: "cpu value=1 946731600000000000", - }, - } - - for _, test := range tests { - pt := models.MustNewPoint("cpu", nil, tags, tm) - act := pt.RoundedString(test.precision) - - if act != test.exp { - t.Errorf("%s: RoundedString() mismatch:\n actual: %v\n exp: %v", - test.name, act, test.exp) - } - } -} - -func TestParsePointsStringWithExtraBuffer(t *testing.T) { - b := make([]byte, 70*5000) - buf := bytes.NewBuffer(b) - key := "cpu,host=A,region=uswest" - buf.WriteString(fmt.Sprintf("%s value=%.3f 1\n", key, rand.Float64())) - - points, err := models.ParsePointsString(buf.String()) - if err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - pointKey := string(points[0].Key()) - - if len(key) != len(pointKey) { - t.Fatalf("expected length of both keys are same but got %d and %d", len(key), len(pointKey)) - } - - if key != pointKey { - t.Fatalf("expected both keys are same but got %s and %s", key, pointKey) - } -} - -func TestParsePointsQuotesInFieldKey(t *testing.T) { - buf := `cpu "a=1 -cpu value=2 1` - points, err := models.ParsePointsString(buf) - if err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - fields, err := points[0].Fields() - if err != nil { - t.Fatal(err) - } - value, ok := fields["\"a"] - if !ok { - t.Fatalf("expected to parse field '\"a'") - } - - if value != float64(1) { - t.Fatalf("expected field value to be 1, got %v", value) - } - - // The following input should not parse - buf = `cpu "\, '= "\ v=1.0` - _, err = models.ParsePointsString(buf) - if err == nil { - t.Fatalf("expected parsing failure but got no error") - } -} - -func TestParsePointsQuotesInTags(t *testing.T) { - buf := `t159,label=hey\ "ya a=1i,value=0i -t159,label=another a=2i,value=1i 1` - points, err := models.ParsePointsString(buf) - if err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - if len(points) != 2 { - t.Fatalf("expected 2 points, got %d", len(points)) - } -} - -func TestParsePointsBlankLine(t *testing.T) { - buf := `cpu value=1i 1000000000 - -cpu value=2i 2000000000` - points, err := models.ParsePointsString(buf) - if err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - if len(points) != 2 { - t.Fatalf("expected 2 points, got %d", len(points)) - } -} - -func TestNewPointsWithBytesWithCorruptData(t *testing.T) { - corrupted := []byte{0, 0, 0, 3, 102, 111, 111, 0, 0, 0, 4, 61, 34, 65, 34, 1, 0, 0, 0, 14, 206, 86, 119, 24, 32, 72, 233, 168, 2, 148} - p, err := models.NewPointFromBytes(corrupted) - if p != nil || err == nil { - t.Fatalf("NewPointFromBytes: got: (%v, %v), expected: (nil, error)", p, err) - } -} - -func TestNewPointsWithShortBuffer(t *testing.T) { - _, err := models.NewPointFromBytes([]byte{0, 0, 0, 3, 4}) - if !errors.Is(err, io.ErrShortBuffer) { - t.Fatalf("NewPointFromBytes: got: (%v, %v), expected: (nil, error)", p, err) - } -} - -func TestNewPointsRejectsEmptyFieldNames(t *testing.T) { - if _, err := models.NewPoint("foo", nil, models.Fields{"": 1}, time.Now()); err == nil { - t.Fatalf("new point with empty field name. got: nil, expected: error") - } -} - -func TestNewPointsRejectsMaxKey(t *testing.T) { - var key string - // tsm field key is point key, separator (4 bytes) and field - for i := 0; i < models.MaxKeyLength-len("value")-4; i++ { - key += "a" - } - - // Test max key len - if _, err := models.NewPoint(key, nil, models.Fields{"value": 1, "ok": 2.0}, time.Now()); err != nil { - t.Fatalf("new point with max key. got: %v, expected: nil", err) - } - - if _, err := models.ParsePointsString(fmt.Sprintf("%v value=1,ok=2.0", key)); err != nil { - t.Fatalf("parse point with max key. got: %v, expected: nil", err) - } - - // Test 1 byte over max key len - key += "a" - if _, err := models.NewPoint(key, nil, models.Fields{"value": 1, "ok": 2.0}, time.Now()); err == nil { - t.Fatalf("new point with max key. got: nil, expected: error") - } - - if _, err := models.ParsePointsString(fmt.Sprintf("%v value=1,ok=2.0", key)); err == nil { - t.Fatalf("parse point with max key. got: nil, expected: error") - } - -} - -func TestPoint_FieldIterator_Simple(t *testing.T) { - - p, err := models.ParsePoints([]byte(`m v=42i,f=42 36`)) - if err != nil { - t.Fatal(err) - } - - if len(p) != 1 { - t.Fatalf("wrong number of points, got %d, exp %d", len(p), 1) - } - - fi := p[0].FieldIterator() - - if !fi.Next() { - t.Fatal("field iterator terminated before first field") - } - - if fi.Type() != models.Integer { - t.Fatalf("'42i' should be an Integer, got %v", fi.Type()) - } - - iv, err := fi.IntegerValue() - if err != nil { - t.Fatal(err) - } - if exp, got := int64(42), iv; exp != got { - t.Fatalf("'42i' should be %d, got %d", exp, got) - } - - if !fi.Next() { - t.Fatalf("field iterator terminated before second field") - } - - if fi.Type() != models.Float { - t.Fatalf("'42' should be a Float, got %v", fi.Type()) - } - - fv, err := fi.FloatValue() - if err != nil { - t.Fatal(err) - } - if exp, got := 42.0, fv; exp != got { - t.Fatalf("'42' should be %f, got %f", exp, got) - } - - if fi.Next() { - t.Fatal("field iterator didn't terminate") - } -} - -func toFields(fi models.FieldIterator) models.Fields { - m := make(models.Fields) - for fi.Next() { - var v interface{} - var err error - switch fi.Type() { - case models.Float: - v, err = fi.FloatValue() - case models.Integer: - v, err = fi.IntegerValue() - case models.Unsigned: - v, err = fi.UnsignedValue() - case models.String: - v = fi.StringValue() - case models.Boolean: - v, err = fi.BooleanValue() - case models.Empty: - v = nil - default: - panic("unknown type") - } - if err != nil { - panic(err) - } - m[string(fi.FieldKey())] = v - } - return m -} - -func TestPoint_FieldIterator_FieldMap(t *testing.T) { - - points, err := models.ParsePointsString(` -m v=42 -m v=42i -m v="string" -m v=true -m v="string\"with\"escapes" -m v=42i,f=42,g=42.314,u=123u -m a=2i,b=3i,c=true,d="stuff",e=-0.23,f=123.456 -`) - - if err != nil { - t.Fatal("failed to parse test points:", err) - } - - for _, p := range points { - exp, err := p.Fields() - if err != nil { - t.Fatal(err) - } - got := toFields(p.FieldIterator()) - - if !reflect.DeepEqual(got, exp) { - t.Errorf("FieldIterator failed for %#q: got %#v, exp %#v", p.String(), got, exp) - } - } -} - -func TestEscapeStringField(t *testing.T) { - cases := []struct { - in string - expOut string - }{ - {in: "abcdefg", expOut: "abcdefg"}, - {in: `one double quote " .`, expOut: `one double quote \" .`}, - {in: `quote " then backslash \ .`, expOut: `quote \" then backslash \\ .`}, - {in: `backslash \ then quote " .`, expOut: `backslash \\ then quote \" .`}, - } - - for _, c := range cases { - // Unescapes as expected. - got := models.EscapeStringField(c.in) - if got != c.expOut { - t.Errorf("unexpected result from EscapeStringField(%s)\ngot [%s]\nexp [%s]\n", c.in, got, c.expOut) - continue - } - - pointLine := fmt.Sprintf(`t s="%s"`, got) - test(t, pointLine, NewTestPoint( - "t", - models.NewTags(nil), - models.Fields{"s": c.in}, - time.Unix(0, 0), - )) - } -} - -func TestParseKeyBytes(t *testing.T) { - testCases := []struct { - input string - expectedName string - expectedTags map[string]string - }{ - {input: "m,k=v", expectedName: "m", expectedTags: map[string]string{"k": "v"}}, - {input: "m\\ q,k=v", expectedName: "m q", expectedTags: map[string]string{"k": "v"}}, - {input: "m,k\\ q=v", expectedName: "m", expectedTags: map[string]string{"k q": "v"}}, - {input: "m\\ q,k\\ q=v", expectedName: "m q", expectedTags: map[string]string{"k q": "v"}}, - } - - for _, testCase := range testCases { - t.Run(testCase.input, func(t *testing.T) { - name, tags := models.ParseKeyBytes([]byte(testCase.input)) - if !bytes.Equal([]byte(testCase.expectedName), name) { - t.Errorf("%s produced measurement %s but expected %s", testCase.input, string(name), testCase.expectedName) - } - if !tags.Equal(models.NewTags(testCase.expectedTags)) { - t.Errorf("%s produced tags %s but expected %s", testCase.input, tags.String(), models.NewTags(testCase.expectedTags).String()) - } - }) - } -} - -func TestParseName(t *testing.T) { - testCases := []struct { - input string - expectedName string - }{ - {input: "m,k=v", expectedName: "m"}, - {input: "m\\ q,k=v", expectedName: "m q"}, - } - - for _, testCase := range testCases { - t.Run(testCase.input, func(t *testing.T) { - name := models.ParseName([]byte(testCase.input)) - if !bytes.Equal([]byte(testCase.expectedName), name) { - t.Errorf("%s produced measurement %s but expected %s", testCase.input, string(name), testCase.expectedName) - } - }) - } -} - -func TestTags_KeyValues(t *testing.T) { - tags := models.NewTags(map[string]string{ - "tag0": "v0", - "tag1": "v1", - "tag2": "v2", - }) - - got := tags.KeyValues(nil) - exp := [][]byte{[]byte("tag0"), []byte("v0"), []byte("tag1"), []byte("v1"), []byte("tag2"), []byte("v2")} - if !cmp.Equal(got, exp) { - t.Errorf("unexpected, -got/+exp\n%s", cmp.Diff(got, exp)) - } - - v := make([][]byte, 0, 10) - v = tags.KeyValues(v) - got2 := tags.KeyValues(v) - if !cmp.Equal(got2, exp) { - t.Errorf("unexpected, -got/+exp\n%s", cmp.Diff(got2, exp)) - } -} - -func BenchmarkEscapeStringField_Plain(b *testing.B) { - s := "nothing special" - for i := 0; i < b.N; i++ { - sink = models.EscapeStringField(s) - } -} - -func BenchmarkEscapeString_Quotes(b *testing.B) { - s := `Hello, "world"` - for i := 0; i < b.N; i++ { - sink = models.EscapeStringField(s) - } -} - -func BenchmarkEscapeString_Backslashes(b *testing.B) { - s := `C:\windows\system32` - for i := 0; i < b.N; i++ { - sink = models.EscapeStringField(s) - } -} - -func BenchmarkEscapeString_QuotesAndBackslashes(b *testing.B) { - s1 := `a quote " then backslash \ .` - s2 := `a backslash \ then quote " .` - for i := 0; i < b.N; i++ { - sink = [...]string{models.EscapeStringField(s1), models.EscapeStringField(s2)} - } -} - -func BenchmarkParseTags(b *testing.B) { - tags := []byte("cpu,tag0=value0,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5") - for i := 0; i < b.N; i++ { - models.ParseTags(tags) - } -} - -func BenchmarkEscapeMeasurement(b *testing.B) { - benchmarks := []struct { - m []byte - }{ - {[]byte("this_is_a_test")}, - {[]byte("this,is,a,test")}, - } - - for _, bm := range benchmarks { - b.Run(string(bm.m), func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - models.EscapeMeasurement(bm.m) - } - }) - } -} - -func makeTags(key, val string, n int) models.Tags { - tags := make(models.Tags, n) - for i := range tags { - tags[i].Key = []byte(fmt.Sprintf("%s%03d", key, i)) - tags[i].Value = []byte(fmt.Sprintf("%s%03d", val, i)) - } - return tags -} - -func BenchmarkTags_HashKey(b *testing.B) { - benchmarks := []struct { - name string - t models.Tags - }{ - {"5 tags-no esc", makeTags("tag_foo", "val_bar", 5)}, - {"25 tags-no esc", makeTags("tag_foo", "val_bar", 25)}, - {"5 tags-esc", makeTags("tag foo", "val bar", 5)}, - {"25 tags-esc", makeTags("tag foo", "val bar", 25)}, - } - for _, bm := range benchmarks { - b.Run(bm.name, func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - bm.t.HashKey() - } - }) - } -} - -func BenchmarkMakeKey(b *testing.B) { - benchmarks := []struct { - m []byte - t models.Tags - }{ - {[]byte("this_is_a_test"), nil}, - {[]byte("this,is,a,test"), nil}, - {[]byte(`this\ is\ a\ test`), nil}, - - {[]byte("this_is_a_test"), makeTags("tag_foo", "val_bar", 8)}, - {[]byte("this,is,a,test"), makeTags("tag_foo", "val_bar", 8)}, - {[]byte("this_is_a_test"), makeTags("tag_foo", "val bar", 8)}, - {[]byte("this,is,a,test"), makeTags("tag_foo", "val bar", 8)}, - } - - for _, bm := range benchmarks { - b.Run(string(bm.m), func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - models.MakeKey(bm.m, bm.t) - } - }) - } -} - -func BenchmarkNewTagsKeyValues(b *testing.B) { - b.Run("sorted", func(b *testing.B) { - b.Run("no dupes", func(b *testing.B) { - kv := [][]byte{[]byte("tag0"), []byte("v0"), []byte("tag1"), []byte("v1"), []byte("tag2"), []byte("v2")} - - b.Run("preallocate", func(b *testing.B) { - t := make(models.Tags, 3) - b.ReportAllocs() - for i := 0; i < b.N; i++ { - _, _ = models.NewTagsKeyValues(t, kv...) - } - }) - - b.Run("allocate", func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - _, _ = models.NewTagsKeyValues(nil, kv...) - } - }) - }) - - b.Run("dupes", func(b *testing.B) { - kv := [][]byte{[]byte("tag0"), []byte("v0"), []byte("tag1"), []byte("v1"), []byte("tag1"), []byte("v1"), []byte("tag2"), []byte("v2"), []byte("tag2"), []byte("v2")} - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - _, _ = models.NewTagsKeyValues(nil, kv...) - } - }) - }) - b.Run("unsorted", func(b *testing.B) { - b.Run("no dupes", func(b *testing.B) { - kv := [][]byte{[]byte("tag1"), []byte("v1"), []byte("tag0"), []byte("v0"), []byte("tag2"), []byte("v2")} - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - _, _ = models.NewTagsKeyValues(nil, kv...) - } - }) - b.Run("dupes", func(b *testing.B) { - kv := [][]byte{[]byte("tag1"), []byte("v1"), []byte("tag2"), []byte("v2"), []byte("tag0"), []byte("v0"), []byte("tag1"), []byte("v1"), []byte("tag2"), []byte("v2")} - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - _, _ = models.NewTagsKeyValues(nil, kv...) - } - }) - }) -} diff --git a/models/rows.go b/models/rows.go deleted file mode 100644 index c087a4882d0..00000000000 --- a/models/rows.go +++ /dev/null @@ -1,62 +0,0 @@ -package models - -import ( - "sort" -) - -// Row represents a single row returned from the execution of a statement. -type Row struct { - Name string `json:"name,omitempty"` - Tags map[string]string `json:"tags,omitempty"` - Columns []string `json:"columns,omitempty"` - Values [][]interface{} `json:"values,omitempty"` - Partial bool `json:"partial,omitempty"` -} - -// SameSeries returns true if r contains values for the same series as o. -func (r *Row) SameSeries(o *Row) bool { - return r.tagsHash() == o.tagsHash() && r.Name == o.Name -} - -// tagsHash returns a hash of tag key/value pairs. -func (r *Row) tagsHash() uint64 { - h := NewInlineFNV64a() - keys := r.tagsKeys() - for _, k := range keys { - h.Write([]byte(k)) - h.Write([]byte(r.Tags[k])) - } - return h.Sum64() -} - -// tagKeys returns a sorted list of tag keys. -func (r *Row) tagsKeys() []string { - a := make([]string, 0, len(r.Tags)) - for k := range r.Tags { - a = append(a, k) - } - sort.Strings(a) - return a -} - -// Rows represents a collection of rows. Rows implements sort.Interface. -type Rows []*Row - -// Len implements sort.Interface. -func (p Rows) Len() int { return len(p) } - -// Less implements sort.Interface. -func (p Rows) Less(i, j int) bool { - // Sort by name first. - if p[i].Name != p[j].Name { - return p[i].Name < p[j].Name - } - - // Sort by tag set hash. Tags don't have a meaningful sort order so we - // just compute a hash and sort by that instead. This allows the tests - // to receive rows in a predictable order every time. - return p[i].tagsHash() < p[j].tagsHash() -} - -// Swap implements sort.Interface. -func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/models/statistic.go b/models/statistic.go deleted file mode 100644 index 9107d9025ab..00000000000 --- a/models/statistic.go +++ /dev/null @@ -1,33 +0,0 @@ -package models - -// Statistic is the representation of a statistic used by the monitoring service. -type Statistic struct { - Name string `json:"name"` - Tags map[string]string `json:"tags"` - Values map[string]interface{} `json:"values"` -} - -// StatisticTags is a map that can be merged with others without causing -// mutations to either map. -type StatisticTags map[string]string - -// Merge creates a new map containing the merged contents of tags and t. -// If both tags and the receiver map contain the same key, the value in tags -// is used in the resulting map. -// -// Merge always returns a usable map. -func (t StatisticTags) Merge(tags map[string]string) map[string]string { - // Add everything in tags to the result. - out := make(map[string]string, len(tags)) - for k, v := range tags { - out[k] = v - } - - // Only add values from t that don't appear in tags. - for k, v := range t { - if _, ok := tags[k]; !ok { - out[k] = v - } - } - return out -} diff --git a/models/tagkeysset.go b/models/tagkeysset.go deleted file mode 100644 index d165bdce337..00000000000 --- a/models/tagkeysset.go +++ /dev/null @@ -1,156 +0,0 @@ -package models - -import ( - "bytes" - "strings" -) - -// TagKeysSet provides set operations for combining Tags. -type TagKeysSet struct { - i int - keys [2][][]byte - tmp [][]byte -} - -// Clear removes all the elements of TagKeysSet and ensures all internal -// buffers are reset. -func (set *TagKeysSet) Clear() { - set.clear(set.keys[0]) - set.clear(set.keys[1]) - set.clear(set.tmp) - set.i = 0 - set.keys[0] = set.keys[0][:0] -} - -func (set *TagKeysSet) clear(b [][]byte) { - b = b[:cap(b)] - for i := range b { - b[i] = nil - } -} - -// KeysBytes returns the merged keys in lexicographical order. -// The slice is valid until the next call to UnionKeys, UnionBytes or Reset. -func (set *TagKeysSet) KeysBytes() [][]byte { - return set.keys[set.i&1] -} - -// Keys returns a copy of the merged keys in lexicographical order. -func (set *TagKeysSet) Keys() []string { - keys := set.KeysBytes() - s := make([]string, 0, len(keys)) - for i := range keys { - s = append(s, string(keys[i])) - } - return s -} - -func (set *TagKeysSet) String() string { - var s []string - for _, k := range set.KeysBytes() { - s = append(s, string(k)) - } - return strings.Join(s, ",") -} - -// IsSupersetKeys returns true if the TagKeysSet is a superset of all the keys -// contained in other. -func (set *TagKeysSet) IsSupersetKeys(other Tags) bool { - keys := set.keys[set.i&1] - i, j := 0, 0 - for i < len(keys) && j < len(other) { - if cmp := bytes.Compare(keys[i], other[j].Key); cmp > 0 { - return false - } else if cmp == 0 { - j++ - } - i++ - } - - return j == len(other) -} - -// IsSupersetBytes returns true if the TagKeysSet is a superset of all the keys -// in other. -// Other must be lexicographically sorted or the results are undefined. -func (set *TagKeysSet) IsSupersetBytes(other [][]byte) bool { - keys := set.keys[set.i&1] - i, j := 0, 0 - for i < len(keys) && j < len(other) { - if cmp := bytes.Compare(keys[i], other[j]); cmp > 0 { - return false - } else if cmp == 0 { - j++ - } - i++ - } - - return j == len(other) -} - -// UnionKeys updates the set so that it is the union of itself and all the -// keys contained in other. -func (set *TagKeysSet) UnionKeys(other Tags) { - if set.IsSupersetKeys(other) { - return - } - - if l := len(other); cap(set.tmp) < l { - set.tmp = make([][]byte, l) - } else { - set.tmp = set.tmp[:l] - } - - for i := range other { - set.tmp[i] = other[i].Key - } - - set.merge(set.tmp) -} - -// UnionBytes updates the set so that it is the union of itself and all the -// keys contained in other. -// Other must be lexicographically sorted or the results are undefined. -func (set *TagKeysSet) UnionBytes(other [][]byte) { - if set.IsSupersetBytes(other) { - return - } - - set.merge(other) -} - -func (set *TagKeysSet) merge(in [][]byte) { - keys := set.keys[set.i&1] - l := len(keys) + len(in) - set.i = (set.i + 1) & 1 - keya := set.keys[set.i&1] - if cap(keya) < l { - keya = make([][]byte, 0, l) - } else { - keya = keya[:0] - } - - i, j := 0, 0 - for i < len(keys) && j < len(in) { - ki, kj := keys[i], in[j] - if cmp := bytes.Compare(ki, kj); cmp < 0 { - i++ - } else if cmp > 0 { - ki = kj - j++ - } else { - i++ - j++ - } - - keya = append(keya, ki) - } - - if i < len(keys) { - keya = append(keya, keys[i:]...) - } else if j < len(in) { - keya = append(keya, in[j:]...) - } - - set.keys[set.i&1] = keya -} diff --git a/models/tagkeysset_test.go b/models/tagkeysset_test.go deleted file mode 100644 index e62c42e5f09..00000000000 --- a/models/tagkeysset_test.go +++ /dev/null @@ -1,325 +0,0 @@ -package models_test - -import ( - "bytes" - "math/rand" - "strconv" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/models" -) - -func TestTagKeysSet_UnionKeys(t *testing.T) { - tests := []struct { - name string - tags []models.Tags - exp string - }{ - { - name: "mixed", - tags: []models.Tags{ - models.ParseTags([]byte("foo,tag0=v0,tag1=v0,tag2=v0")), - models.ParseTags([]byte("foo,tag0=v0,tag1=v0,tag2=v1")), - models.ParseTags([]byte("foo,tag0=v0")), - models.ParseTags([]byte("foo,tag0=v0,tag3=v0")), - }, - exp: "tag0,tag1,tag2,tag3", - }, - { - name: "mixed 2", - tags: []models.Tags{ - models.ParseTags([]byte("foo,tag0=v0")), - models.ParseTags([]byte("foo,tag0=v0,tag3=v0")), - models.ParseTags([]byte("foo,tag0=v0,tag1=v0,tag2=v0")), - models.ParseTags([]byte("foo,tag0=v0,tag1=v0,tag2=v1")), - }, - exp: "tag0,tag1,tag2,tag3", - }, - { - name: "all different", - tags: []models.Tags{ - models.ParseTags([]byte("foo,tag0=v0")), - models.ParseTags([]byte("foo,tag1=v0")), - models.ParseTags([]byte("foo,tag2=v1")), - models.ParseTags([]byte("foo,tag3=v0")), - }, - exp: "tag0,tag1,tag2,tag3", - }, - { - name: "new tags,verify clear", - tags: []models.Tags{ - models.ParseTags([]byte("foo,tag9=v0")), - models.ParseTags([]byte("foo,tag8=v0")), - }, - exp: "tag8,tag9", - }, - } - - var km models.TagKeysSet - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - km.Clear() - for _, tags := range tt.tags { - km.UnionKeys(tags) - } - - if got := km.String(); !cmp.Equal(got, tt.exp) { - t.Errorf("unexpected keys -got/+exp\n%s", cmp.Diff(got, tt.exp)) - } - }) - } -} - -func TestTagKeysSet_IsSuperset(t *testing.T) { - var km models.TagKeysSet - km.UnionBytes(bytes.Split([]byte("tag0,tag3,tag5,tag7"), commaB)) - - tests := []struct { - name string - tags models.Tags - exp bool - }{ - { - tags: models.ParseTags([]byte("foo,tag0=v,tag3=v")), - exp: true, - }, - { - tags: models.ParseTags([]byte("foo,tag3=v")), - exp: true, - }, - { - tags: models.ParseTags([]byte("foo,tag7=v")), - exp: true, - }, - { - tags: models.ParseTags([]byte("foo,tag3=v,tag7=v")), - exp: true, - }, - { - tags: models.ParseTags([]byte("foo,tag0=v,tag3=v,tag5=v,tag7=v")), - exp: true, - }, - { - tags: models.ParseTags([]byte("foo")), - exp: true, - }, - { - tags: models.ParseTags([]byte("foo,tag0=v,tag2=v")), - exp: false, - }, - { - tags: models.ParseTags([]byte("foo,tag1=v")), - exp: false, - }, - { - tags: models.ParseTags([]byte("foo,tag6=v")), - exp: false, - }, - { - tags: models.ParseTags([]byte("foo,tag8=v")), - exp: false, - }, - { - tags: models.ParseTags([]byte("foo,tag0=v,tag3=v,tag5=v,tag8=v")), - exp: false, - }, - { - tags: models.ParseTags([]byte("foo,tag0=v,tag3=v,tag5=v,tag6=v")), - exp: false, - }, - { - tags: models.ParseTags([]byte("foo,tag0=v,tag3=v,tag5=v,tag7=v,tag8=v")), - exp: false, - }, - } - - for _, tt := range tests { - t.Run("tags/"+tt.name, func(t *testing.T) { - if got := km.IsSupersetKeys(tt.tags); got != tt.exp { - t.Errorf("unexpected IsSuperset -got/+exp\n%s", cmp.Diff(got, tt.exp)) - } - }) - } - - for _, tt := range tests { - t.Run("bytes/"+tt.name, func(t *testing.T) { - var keys [][]byte - for i := range tt.tags { - keys = append(keys, tt.tags[i].Key) - } - if got := km.IsSupersetBytes(keys); got != tt.exp { - t.Errorf("unexpected IsSupersetBytes -got/+exp\n%s", cmp.Diff(got, tt.exp)) - } - }) - } -} - -var commaB = []byte(",") - -func TestTagKeysSet_UnionBytes(t *testing.T) { - - tests := []struct { - name string - keys [][][]byte - exp string - }{ - { - name: "mixed", - keys: [][][]byte{ - bytes.Split([]byte("tag0,tag1,tag2"), commaB), - bytes.Split([]byte("tag0,tag1,tag2"), commaB), - bytes.Split([]byte("tag0"), commaB), - bytes.Split([]byte("tag0,tag3"), commaB), - }, - exp: "tag0,tag1,tag2,tag3", - }, - { - name: "mixed 2", - keys: [][][]byte{ - bytes.Split([]byte("tag0"), commaB), - bytes.Split([]byte("tag0,tag3"), commaB), - bytes.Split([]byte("tag0,tag1,tag2"), commaB), - bytes.Split([]byte("tag0,tag1,tag2"), commaB), - }, - exp: "tag0,tag1,tag2,tag3", - }, - { - name: "all different", - keys: [][][]byte{ - bytes.Split([]byte("tag0"), commaB), - bytes.Split([]byte("tag3"), commaB), - bytes.Split([]byte("tag1"), commaB), - bytes.Split([]byte("tag2"), commaB), - }, - exp: "tag0,tag1,tag2,tag3", - }, - { - name: "new tags,verify clear", - keys: [][][]byte{ - bytes.Split([]byte("tag9"), commaB), - bytes.Split([]byte("tag8"), commaB), - }, - exp: "tag8,tag9", - }, - } - - var km models.TagKeysSet - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - km.Clear() - for _, keys := range tt.keys { - km.UnionBytes(keys) - } - - if got := km.String(); !cmp.Equal(got, tt.exp) { - t.Errorf("unexpected keys -got/+exp\n%s", cmp.Diff(got, tt.exp)) - } - }) - } -} - -func BenchmarkTagKeysSet_UnionBytes(b *testing.B) { - keys := [][][]byte{ - bytes.Split([]byte("tag00,tag01,tag02"), commaB), - bytes.Split([]byte("tag00,tag01,tag02"), commaB), - bytes.Split([]byte("tag00,tag01,tag05,tag06,tag10,tag11,tag12,tag13,tag14,tag15"), commaB), - bytes.Split([]byte("tag00"), commaB), - bytes.Split([]byte("tag00,tag03"), commaB), - bytes.Split([]byte("tag01,tag03,tag13,tag14,tag15"), commaB), - bytes.Split([]byte("tag04,tag05"), commaB), - } - - seededRand := rand.New(rand.NewSource(20040409)) - - tests := []int{ - 10, - 1000, - 1000000, - } - - for _, n := range tests { - b.Run(strconv.Itoa(n), func(b *testing.B) { - b.ResetTimer() - - var km models.TagKeysSet - for i := 0; i < b.N; i++ { - for j := 0; j < n; j++ { - km.UnionBytes(keys[seededRand.Int()%len(keys)]) - } - km.Clear() - } - }) - } -} - -type XorShift64Star struct { - state uint64 -} - -func (x *XorShift64Star) Next() uint64 { - x.state ^= x.state >> 12 - x.state ^= x.state << 25 - x.state ^= x.state >> 27 - return x.state * 2685821657736338717 -} - -func BenchmarkTagKeysSet_UnionKeys(b *testing.B) { - tags := []models.Tags{ - models.ParseTags([]byte("foo,tag00=v0,tag01=v0,tag02=v0")), - models.ParseTags([]byte("foo,tag00=v0,tag01=v0,tag02=v0")), - models.ParseTags([]byte("foo,tag00=v0,tag01=v0,tag05=v0,tag06=v0,tag10=v0,tag11=v0,tag12=v0,tag13=v0,tag14=v0,tag15=v0")), - models.ParseTags([]byte("foo,tag00=v0")), - models.ParseTags([]byte("foo,tag00=v0,tag03=v0")), - models.ParseTags([]byte("foo,tag01=v0,tag03=v0,tag13=v0,tag14=v0,tag15=v0")), - models.ParseTags([]byte("foo,tag04=v0,tag05=v0")), - } - - rnd := XorShift64Star{state: 20040409} - - tests := []int{ - 10, - 1000, - 1000000, - } - - for _, n := range tests { - b.Run(strconv.Itoa(n), func(b *testing.B) { - b.ResetTimer() - - var km models.TagKeysSet - for i := 0; i < b.N; i++ { - for j := 0; j < n; j++ { - km.UnionKeys(tags[rnd.Next()%uint64(len(tags))]) - } - km.Clear() - } - }) - } -} - -func BenchmarkTagKeysSet_IsSuperset(b *testing.B) { - var km models.TagKeysSet - km.UnionBytes(bytes.Split([]byte("tag0,tag3,tag5,tag7"), commaB)) - - tests := []struct { - name string - tags models.Tags - }{ - {name: "last/true", tags: models.ParseTags([]byte("foo,tag7=v"))}, - {name: "last/false", tags: models.ParseTags([]byte("foo,tag8=v"))}, - {name: "first_last/true", tags: models.ParseTags([]byte("foo,tag0=v,tag7=v"))}, - {name: "all/true", tags: models.ParseTags([]byte("foo,tag0=v,tag3=v,tag5=v,tag7=v"))}, - {name: "first not last/false", tags: models.ParseTags([]byte("foo,tag0=v,tag8=v"))}, - {name: "all but last/false", tags: models.ParseTags([]byte("foo,tag0=v,tag3=v,tag5=v,tag7=v,tag8=v"))}, - } - - for _, n := range tests { - b.Run(n.name, func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - km.IsSupersetKeys(n.tags) - } - }) - } -} diff --git a/models/time.go b/models/time.go deleted file mode 100644 index 297892c6da3..00000000000 --- a/models/time.go +++ /dev/null @@ -1,74 +0,0 @@ -package models - -// Helper time methods since parsing time can easily overflow and we only support a -// specific time range. - -import ( - "fmt" - "math" - "time" -) - -const ( - // MinNanoTime is the minimum time that can be represented. - // - // 1677-09-21 00:12:43.145224194 +0000 UTC - // - // The two lowest minimum integers are used as sentinel values. The - // minimum value needs to be used as a value lower than any other value for - // comparisons and another separate value is needed to act as a sentinel - // default value that is unusable by the user, but usable internally. - // Because these two values need to be used for a special purpose, we do - // not allow users to write points at these two times. - MinNanoTime = int64(math.MinInt64) + 2 - - // MaxNanoTime is the maximum time that can be represented. - // - // 2262-04-11 23:47:16.854775806 +0000 UTC - // - // The highest time represented by a nanosecond needs to be used for an - // exclusive range in the shard group, so the maximum time needs to be one - // less than the possible maximum number of nanoseconds representable by an - // int64 so that we don't lose a point at that one time. - MaxNanoTime = int64(math.MaxInt64) - 1 -) - -var ( - minNanoTime = time.Unix(0, MinNanoTime).UTC() - maxNanoTime = time.Unix(0, MaxNanoTime).UTC() - - // ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch. - ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime) -) - -// SafeCalcTime safely calculates the time given. Will return error if the time is outside the -// supported range. -func SafeCalcTime(timestamp int64, precision string) (time.Time, error) { - mult := GetPrecisionMultiplier(precision) - if t, ok := safeSignedMult(timestamp, mult); ok { - tme := time.Unix(0, t).UTC() - return tme, CheckTime(tme) - } - - return time.Time{}, ErrTimeOutOfRange -} - -// CheckTime checks that a time is within the safe range. -func CheckTime(t time.Time) error { - if t.Before(minNanoTime) || t.After(maxNanoTime) { - return ErrTimeOutOfRange - } - return nil -} - -// Perform the multiplication and check to make sure it didn't overflow. -func safeSignedMult(a, b int64) (int64, bool) { - if a == 0 || b == 0 || a == 1 || b == 1 { - return a * b, true - } - if a == MinNanoTime || b == MaxNanoTime { - return 0, false - } - c := a * b - return c, c/b == a -} diff --git a/notebook.go b/notebook.go deleted file mode 100644 index fb301f77366..00000000000 --- a/notebook.go +++ /dev/null @@ -1,125 +0,0 @@ -package influxdb - -import ( - "context" - "database/sql/driver" - "encoding/json" - "fmt" - "strings" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - ErrOrgIDRequired = fieldRequiredError("OrgID") - ErrNameRequired = fieldRequiredError("Name") - ErrSpecRequired = fieldRequiredError("Spec") - ErrOffsetNegative = &errors.Error{ - Code: errors.EInvalid, - Msg: "offset cannot be negative", - } - ErrLimitLTEZero = &errors.Error{ - Code: errors.EInvalid, - Msg: "limit cannot be less-than or equal-to zero", - } - ErrNotebookNotFound = &errors.Error{ - Code: errors.ENotFound, - Msg: "notebook not found", - } -) - -func fieldRequiredError(field string) error { - return &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("%s required", field), - } -} - -// Notebook represents all visual and query data for a notebook. -type Notebook struct { - OrgID platform.ID `json:"orgID" db:"org_id"` - ID platform.ID `json:"id" db:"id"` - Name string `json:"name" db:"name"` - Spec NotebookSpec `json:"spec" db:"spec"` - CreatedAt time.Time `json:"createdAt" db:"created_at"` - UpdatedAt time.Time `json:"updatedAt" db:"updated_at"` -} - -// NotebookSpec is an abitrary JSON object provided by the client. -type NotebookSpec map[string]interface{} - -// Value implements the database/sql Valuer interface for adding NotebookSpecs to the database. -func (s NotebookSpec) Value() (driver.Value, error) { - spec, err := json.Marshal(s) - if err != nil { - return nil, err - } - - return string(spec), nil -} - -// Scan implements the database/sql Scanner interface for retrieving NotebookSpecs from the database. -func (s *NotebookSpec) Scan(value interface{}) error { - var spec NotebookSpec - if err := json.NewDecoder(strings.NewReader(value.(string))).Decode(&spec); err != nil { - return err - } - - *s = spec - return nil -} - -// NotebookService is the service contract for Notebooks. -type NotebookService interface { - GetNotebook(ctx context.Context, id platform.ID) (*Notebook, error) - CreateNotebook(ctx context.Context, create *NotebookReqBody) (*Notebook, error) - UpdateNotebook(ctx context.Context, id platform.ID, update *NotebookReqBody) (*Notebook, error) - DeleteNotebook(ctx context.Context, id platform.ID) error - ListNotebooks(ctx context.Context, filter NotebookListFilter) ([]*Notebook, error) -} - -// NotebookListFilter is a selection filter for listing notebooks. -type NotebookListFilter struct { - OrgID platform.ID - Page Page -} - -// Page contains pagination information -type Page struct { - Offset int - Limit int -} - -// Validate validates the Page -func (p Page) Validate() error { - if p.Offset < 0 { - return ErrOffsetNegative - } - if p.Limit <= 0 { - return ErrLimitLTEZero - } - return nil -} - -// NotebookReqBody contains fields for creating or updating notebooks. -type NotebookReqBody struct { - OrgID platform.ID `json:"orgID"` - Name string `json:"name"` - Spec NotebookSpec `json:"spec"` -} - -// Validate validates the creation object -func (n NotebookReqBody) Validate() error { - if !n.OrgID.Valid() { - return ErrOrgIDRequired - } - if n.Name == "" { - return ErrNameRequired - } - if n.Spec == nil { - return ErrSpecRequired - } - return nil -} diff --git a/notebook_test.go b/notebook_test.go deleted file mode 100644 index 6871e5562b7..00000000000 --- a/notebook_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package influxdb - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/stretchr/testify/require" -) - -func TestValidatePage(t *testing.T) { - tests := []struct { - name string - page Page - expectError error - }{ - { - name: "ok", - page: Page{ - Offset: 5, - Limit: 10, - }, - expectError: nil, - }, - { - name: "negative offset", - page: Page{ - Offset: -5, - Limit: 10, - }, - expectError: ErrOffsetNegative, - }, - { - name: "negative limit", - page: Page{ - Offset: 5, - Limit: -10, - }, - expectError: ErrLimitLTEZero, - }, - { - name: "zero limit", - page: Page{ - Offset: 5, - Limit: 0, - }, - expectError: ErrLimitLTEZero, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.page.Validate() - if tt.expectError == nil { - require.NoError(t, err) - } else { - require.Error(t, err) - require.Equal(t, tt.expectError, err) - } - }) - } -} - -func TestValidateReqBody(t *testing.T) { - testID, _ := platform.IDFromString("1234123412341234") - - tests := []struct { - name string - body NotebookReqBody - expectError error - }{ - { - name: "ok", - body: NotebookReqBody{ - OrgID: *testID, - Name: "Example", - Spec: map[string]interface{}{}, - }, - expectError: nil, - }, - { - name: "missing name", - body: NotebookReqBody{ - OrgID: *testID, - Spec: map[string]interface{}{}, - }, - expectError: ErrNameRequired, - }, - { - name: "missing spec", - body: NotebookReqBody{ - OrgID: *testID, - Name: "Example", - }, - expectError: ErrSpecRequired, - }, - { - name: "missing orgID", - body: NotebookReqBody{ - Name: "Example", - Spec: map[string]interface{}{}, - }, - expectError: ErrOrgIDRequired, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.body.Validate() - if tt.expectError == nil { - require.NoError(t, err) - } else { - require.Error(t, err) - require.Equal(t, tt.expectError, err) - } - }) - } -} diff --git a/notebooks/README.md b/notebooks/README.md deleted file mode 100644 index 1cf151abcca..00000000000 --- a/notebooks/README.md +++ /dev/null @@ -1,25 +0,0 @@ -## Notebooks - -This package provides an HTTP API for interacting with InfluxDB notebooks. The -HTTP handlers are located in the `transport` folder. The code for interacting -with the sqlite datastore is located in the `service.go` file. Definitions for -the basic types & interfaces associated with notebooks used throughout the -platform are located in the top-level `influxdb` package, in the `notebook.go` -file. - -### Anatomy - -The backend representation of a notebook is very simple: An object with an ID, -Name, associated organization ID, created/modified times, and a "spec". The -"spec" is a mechanism for storing a JSON string defined entirely by the frontend -UI. The notebook spec will likely be further defined in the future as the -notebooks feature is developed and more sophisticated backend behaviors are -required. - -### Use - -Basic CRUD actions are available for interacting with notebooks through the API. -Notebooks are persisted in the relational sqlite database, although they -currently do not make use of any relational features. Again, it is likely that -the more advanced features of the datastore will be utilized in the future as -the notebooks feature evolves. diff --git a/notebooks/middleware_logging.go b/notebooks/middleware_logging.go deleted file mode 100644 index c9359fb033a..00000000000 --- a/notebooks/middleware_logging.go +++ /dev/null @@ -1,84 +0,0 @@ -package notebooks - -import ( - "context" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "go.uber.org/zap" -) - -func NewLoggingService(logger *zap.Logger, underlying influxdb.NotebookService) *loggingService { - return &loggingService{ - logger: logger, - underlying: underlying, - } -} - -type loggingService struct { - logger *zap.Logger - underlying influxdb.NotebookService -} - -var _ influxdb.NotebookService = (*loggingService)(nil) - -func (l loggingService) GetNotebook(ctx context.Context, id platform.ID) (n *influxdb.Notebook, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to find notebook by ID", zap.Error(err), dur) - return - } - l.logger.Debug("notebook find by ID", dur) - }(time.Now()) - return l.underlying.GetNotebook(ctx, id) -} - -func (l loggingService) CreateNotebook(ctx context.Context, create *influxdb.NotebookReqBody) (n *influxdb.Notebook, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to create notebook", zap.Error(err), dur) - return - } - l.logger.Debug("notebook create", dur) - }(time.Now()) - return l.underlying.CreateNotebook(ctx, create) -} - -func (l loggingService) UpdateNotebook(ctx context.Context, id platform.ID, update *influxdb.NotebookReqBody) (n *influxdb.Notebook, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to update notebook", zap.Error(err), dur) - return - } - l.logger.Debug("notebook update", dur) - }(time.Now()) - return l.underlying.UpdateNotebook(ctx, id, update) -} - -func (l loggingService) DeleteNotebook(ctx context.Context, id platform.ID) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to delete notebook", zap.Error(err), dur) - return - } - l.logger.Debug("notebook delete", dur) - }(time.Now()) - return l.underlying.DeleteNotebook(ctx, id) -} - -func (l loggingService) ListNotebooks(ctx context.Context, filter influxdb.NotebookListFilter) (ns []*influxdb.Notebook, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to find notebooks", zap.Error(err), dur) - return - } - l.logger.Debug("notebooks find", dur) - }(time.Now()) - return l.underlying.ListNotebooks(ctx, filter) -} diff --git a/notebooks/middleware_metrics.go b/notebooks/middleware_metrics.go deleted file mode 100644 index c885a08e5e0..00000000000 --- a/notebooks/middleware_metrics.go +++ /dev/null @@ -1,55 +0,0 @@ -package notebooks - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/metric" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/prometheus/client_golang/prometheus" -) - -func NewMetricCollectingService(reg prometheus.Registerer, underlying influxdb.NotebookService, opts ...metric.ClientOptFn) *metricsService { - o := metric.ApplyMetricOpts(opts...) - return &metricsService{ - rec: metric.New(reg, o.ApplySuffix("notebook")), - underlying: underlying, - } -} - -type metricsService struct { - // RED metrics - rec *metric.REDClient - underlying influxdb.NotebookService -} - -var _ influxdb.NotebookService = (*metricsService)(nil) - -func (m metricsService) GetNotebook(ctx context.Context, id platform.ID) (*influxdb.Notebook, error) { - rec := m.rec.Record("find_notebook_by_id") - nb, err := m.underlying.GetNotebook(ctx, id) - return nb, rec(err) -} - -func (m metricsService) CreateNotebook(ctx context.Context, create *influxdb.NotebookReqBody) (*influxdb.Notebook, error) { - rec := m.rec.Record("create_notebook") - nb, err := m.underlying.CreateNotebook(ctx, create) - return nb, rec(err) -} - -func (m metricsService) UpdateNotebook(ctx context.Context, id platform.ID, update *influxdb.NotebookReqBody) (*influxdb.Notebook, error) { - rec := m.rec.Record("update_notebook") - nb, err := m.underlying.UpdateNotebook(ctx, id, update) - return nb, rec(err) -} - -func (m metricsService) DeleteNotebook(ctx context.Context, id platform.ID) (err error) { - rec := m.rec.Record("delete_notebook") - return rec(m.underlying.DeleteNotebook(ctx, id)) -} - -func (m metricsService) ListNotebooks(ctx context.Context, filter influxdb.NotebookListFilter) ([]*influxdb.Notebook, error) { - rec := m.rec.Record("find_notebooks") - nbs, err := m.underlying.ListNotebooks(ctx, filter) - return nbs, rec(err) -} diff --git a/notebooks/service.go b/notebooks/service.go deleted file mode 100644 index f9a8bf02029..00000000000 --- a/notebooks/service.go +++ /dev/null @@ -1,149 +0,0 @@ -package notebooks - -import ( - "context" - "database/sql" - "errors" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/influxdata/influxdb/v2/sqlite" -) - -var _ influxdb.NotebookService = (*Service)(nil) - -type Service struct { - store *sqlite.SqlStore - idGenerator platform.IDGenerator -} - -func NewService(store *sqlite.SqlStore) *Service { - return &Service{ - store: store, - idGenerator: snowflake.NewIDGenerator(), - } -} - -func (s *Service) GetNotebook(ctx context.Context, id platform.ID) (*influxdb.Notebook, error) { - var n influxdb.Notebook - - query := ` - SELECT id, org_id, name, spec, created_at, updated_at - FROM notebooks WHERE id = $1` - - if err := s.store.DB.GetContext(ctx, &n, query, id); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, influxdb.ErrNotebookNotFound - } - - return nil, err - } - - return &n, nil -} - -// CreateNotebook creates a notebook. Note that this and all "write" operations on the database need to use the Mutex lock, -// since sqlite can only handle 1 concurrent write operation at a time. -func (s *Service) CreateNotebook(ctx context.Context, create *influxdb.NotebookReqBody) (*influxdb.Notebook, error) { - s.store.Mu.Lock() - defer s.store.Mu.Unlock() - - nowTime := time.Now().UTC() - n := influxdb.Notebook{ - ID: s.idGenerator.ID(), - OrgID: create.OrgID, - Name: create.Name, - Spec: create.Spec, - CreatedAt: nowTime, - UpdatedAt: nowTime, - } - - query := ` - INSERT INTO notebooks (id, org_id, name, spec, created_at, updated_at) - VALUES (:id, :org_id, :name, :spec, :created_at, :updated_at)` - - _, err := s.store.DB.NamedExecContext(ctx, query, &n) - if err != nil { - return nil, err - } - - // Ideally, the create query would use "RETURNING" in order to avoid making a separate query. - // Unfortunately this breaks the scanning of values into the result struct, so we have to make a separate - // SELECT request to return the result from the database. - return s.GetNotebook(ctx, n.ID) -} - -// UpdateNotebook updates a notebook. -func (s *Service) UpdateNotebook(ctx context.Context, id platform.ID, update *influxdb.NotebookReqBody) (*influxdb.Notebook, error) { - s.store.Mu.Lock() - defer s.store.Mu.Unlock() - - nowTime := time.Now().UTC() - n := influxdb.Notebook{ - ID: id, - OrgID: update.OrgID, - Name: update.Name, - Spec: update.Spec, - UpdatedAt: nowTime, - } - - query := ` - UPDATE notebooks SET org_id = :org_id, name = :name, spec = :spec, updated_at = :updated_at - WHERE id = :id` - - _, err := s.store.DB.NamedExecContext(ctx, query, &n) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, influxdb.ErrNotebookNotFound - } - - return nil, err - } - - return s.GetNotebook(ctx, n.ID) -} - -// DeleteNotebook deletes a notebook. -func (s *Service) DeleteNotebook(ctx context.Context, id platform.ID) error { - s.store.Mu.Lock() - defer s.store.Mu.Unlock() - - query := ` - DELETE FROM notebooks - WHERE id = $1` - - res, err := s.store.DB.ExecContext(ctx, query, id.String()) - if err != nil { - return err - } - - r, err := res.RowsAffected() - if err != nil { - return err - } - - if r == 0 { - return influxdb.ErrNotebookNotFound - } - - return nil -} - -// ListNotebooks lists notebooks matching the provided filter. Currently, only org_id is used in the filter. -// Future uses may support pagination via this filter as well. -func (s *Service) ListNotebooks(ctx context.Context, filter influxdb.NotebookListFilter) ([]*influxdb.Notebook, error) { - ns := []*influxdb.Notebook{} - - query := ` - SELECT id, org_id, name, spec, created_at, updated_at - FROM notebooks - WHERE org_id = $1` - - if err := s.store.DB.SelectContext(ctx, &ns, query, filter.OrgID); err != nil { - return nil, err - } - - return ns, nil -} diff --git a/notebooks/service_test.go b/notebooks/service_test.go deleted file mode 100644 index 7fa54261f91..00000000000 --- a/notebooks/service_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package notebooks - -import ( - "context" - "fmt" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/influxdata/influxdb/v2/sqlite" - "github.com/influxdata/influxdb/v2/sqlite/migrations" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -var ( - idGen = snowflake.NewIDGenerator() -) - -func TestCreateAndGetNotebook(t *testing.T) { - t.Parallel() - - svc := newTestService(t) - ctx := context.Background() - - // getting an invalid id should return an error - got, err := svc.GetNotebook(ctx, idGen.ID()) - require.Nil(t, got) - require.ErrorIs(t, influxdb.ErrNotebookNotFound, err) - - testCreate := &influxdb.NotebookReqBody{ - OrgID: idGen.ID(), - Name: "some name", - Spec: map[string]interface{}{"hello": "goodbye"}, - } - - // create a notebook and assert the results - gotCreate, err := svc.CreateNotebook(ctx, testCreate) - require.NoError(t, err) - gotCreateBody := &influxdb.NotebookReqBody{ - OrgID: gotCreate.OrgID, - Name: gotCreate.Name, - Spec: gotCreate.Spec, - } - require.Equal(t, testCreate, gotCreateBody) - - // get the notebook with the ID that was created and assert the results - gotGet, err := svc.GetNotebook(ctx, gotCreate.ID) - require.NoError(t, err) - gotGetBody := &influxdb.NotebookReqBody{ - OrgID: gotGet.OrgID, - Name: gotGet.Name, - Spec: gotGet.Spec, - } - require.Equal(t, testCreate, gotGetBody) -} - -func TestUpdate(t *testing.T) { - t.Parallel() - - svc := newTestService(t) - ctx := context.Background() - - testCreate := &influxdb.NotebookReqBody{ - OrgID: idGen.ID(), - Name: "some name", - Spec: map[string]interface{}{"hello": "goodbye"}, - } - - testUpdate := &influxdb.NotebookReqBody{ - OrgID: testCreate.OrgID, - Name: "a new name", - Spec: map[string]interface{}{"aloha": "aloha"}, - } - - // attempting to update a non-existant notebook should return an error - got, err := svc.UpdateNotebook(ctx, idGen.ID(), testUpdate) - require.Nil(t, got) - require.ErrorIs(t, influxdb.ErrNotebookNotFound, err) - - // create the notebook so updating it can be tested - gotCreate, err := svc.CreateNotebook(ctx, testCreate) - require.NoError(t, err) - gotCreateBody := &influxdb.NotebookReqBody{ - OrgID: gotCreate.OrgID, - Name: gotCreate.Name, - Spec: gotCreate.Spec, - } - require.Equal(t, testCreate, gotCreateBody) - - // try to update the notebook and assert the results - gotUpdate, err := svc.UpdateNotebook(ctx, gotCreate.ID, testUpdate) - require.NoError(t, err) - gotUpdateBody := &influxdb.NotebookReqBody{ - OrgID: gotUpdate.OrgID, - Name: gotUpdate.Name, - Spec: gotUpdate.Spec, - } - - require.Equal(t, testUpdate, gotUpdateBody) - require.Equal(t, gotCreate.ID, gotUpdate.ID) - require.Equal(t, gotCreate.CreatedAt, gotUpdate.CreatedAt) - require.NotEqual(t, gotUpdate.CreatedAt, gotUpdate.UpdatedAt) -} - -func TestDelete(t *testing.T) { - t.Parallel() - - svc := newTestService(t) - ctx := context.Background() - - // attempting to delete a non-existant notebook should return an error - err := svc.DeleteNotebook(ctx, idGen.ID()) - fmt.Println(err) - require.ErrorIs(t, influxdb.ErrNotebookNotFound, err) - - testCreate := &influxdb.NotebookReqBody{ - OrgID: idGen.ID(), - Name: "some name", - Spec: map[string]interface{}{"hello": "goodbye"}, - } - - // create the notebook that we are going to try to delete - gotCreate, err := svc.CreateNotebook(ctx, testCreate) - require.NoError(t, err) - gotCreateBody := &influxdb.NotebookReqBody{ - OrgID: gotCreate.OrgID, - Name: gotCreate.Name, - Spec: gotCreate.Spec, - } - require.Equal(t, testCreate, gotCreateBody) - - // should be able to successfully delete the notebook now - err = svc.DeleteNotebook(ctx, gotCreate.ID) - require.NoError(t, err) - - // ensure the notebook no longer exists - _, err = svc.GetNotebook(ctx, gotCreate.ID) - require.ErrorIs(t, influxdb.ErrNotebookNotFound, err) -} - -func TestList(t *testing.T) { - t.Parallel() - - svc := newTestService(t) - ctx := context.Background() - - orgID := idGen.ID() - - // selecting with no matches for org_id should return an empty list and no error - got, err := svc.ListNotebooks(ctx, influxdb.NotebookListFilter{OrgID: orgID}) - require.NoError(t, err) - require.Equal(t, []*influxdb.Notebook{}, got) - - // create some notebooks to test the list operation with - creates := []*influxdb.NotebookReqBody{ - { - OrgID: orgID, - Name: "some name", - Spec: map[string]interface{}{"hello": "goodbye"}, - }, - { - OrgID: orgID, - Name: "another name", - Spec: map[string]interface{}{"aloha": "aloha"}, - }, - { - OrgID: orgID, - Name: "some name", - Spec: map[string]interface{}{"hola": "adios"}, - }, - } - - for _, c := range creates { - _, err := svc.CreateNotebook(ctx, c) - require.NoError(t, err) - } - - // there should now be notebooks returned from ListNotebooks - got, err = svc.ListNotebooks(ctx, influxdb.NotebookListFilter{OrgID: orgID}) - require.NoError(t, err) - require.Equal(t, len(creates), len(got)) - - // make sure the elements from the returned list were from the list of notebooks to create - for _, n := range got { - require.Contains(t, creates, &influxdb.NotebookReqBody{ - OrgID: n.OrgID, - Name: n.Name, - Spec: n.Spec, - }) - } -} - -func newTestService(t *testing.T) *Service { - store := sqlite.NewTestStore(t) - ctx := context.Background() - - sqliteMigrator := sqlite.NewMigrator(store, zap.NewNop()) - err := sqliteMigrator.Up(ctx, migrations.AllUp) - require.NoError(t, err) - - svc := NewService(store) - - return svc -} diff --git a/notebooks/transport/http.go b/notebooks/transport/http.go deleted file mode 100644 index fead512ca91..00000000000 --- a/notebooks/transport/http.go +++ /dev/null @@ -1,191 +0,0 @@ -package transport - -import ( - "net/http" - - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "go.uber.org/zap" -) - -const ( - prefixNotebooks = "/api/v2private/notebooks" - allNotebooksJSONKey = "flows" -) - -var ( - errBadOrg = &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid or missing org id", - } - - errBadId = &errors.Error{ - Code: errors.EInvalid, - Msg: "notebook id is invalid", - } -) - -// NotebookHandler is the handler for the notebook service -type NotebookHandler struct { - chi.Router - - log *zap.Logger - api *kithttp.API - - notebookService influxdb.NotebookService -} - -func NewNotebookHandler( - log *zap.Logger, - notebookService influxdb.NotebookService, -) *NotebookHandler { - h := &NotebookHandler{ - log: log, - api: kithttp.NewAPI(kithttp.WithLog(log)), - notebookService: notebookService, - } - - r := chi.NewRouter() - r.Use( - middleware.Recoverer, - middleware.RequestID, - middleware.RealIP, - ) - - r.Route("/", func(r chi.Router) { - r.Get("/", h.handleGetNotebooks) - r.Post("/", h.handleCreateNotebook) - - r.Route("/{id}", func(r chi.Router) { - r.Get("/", h.handleGetNotebook) - r.Delete("/", h.handleDeleteNotebook) - r.Put("/", h.handleUpdateNotebook) - r.Patch("/", h.handleUpdateNotebook) - }) - }) - - h.Router = r - - return h -} - -func (h *NotebookHandler) Prefix() string { - return prefixNotebooks -} - -// get a list of all notebooks for an org -func (h *NotebookHandler) handleGetNotebooks(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - orgID := r.URL.Query().Get("orgID") - o, err := platform.IDFromString(orgID) - if err != nil { - h.api.Err(w, r, errBadOrg) - return - } - - l, err := h.notebookService.ListNotebooks(ctx, influxdb.NotebookListFilter{OrgID: *o}) - if err != nil { - h.api.Err(w, r, err) - return - } - - p := map[string][]*influxdb.Notebook{ - allNotebooksJSONKey: l, - } - - h.api.Respond(w, r, http.StatusOK, p) -} - -// create a single notebook. -func (h *NotebookHandler) handleCreateNotebook(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - b, err := h.decodeNotebookReqBody(r) - if err != nil { - h.api.Err(w, r, err) - return - } - - n, err := h.notebookService.CreateNotebook(ctx, b) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, n) -} - -// get a single notebook. -func (h *NotebookHandler) handleGetNotebook(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, errBadId) - return - } - - b, err := h.notebookService.GetNotebook(ctx, *id) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, b) -} - -// delete a single notebook. -func (h *NotebookHandler) handleDeleteNotebook(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, errBadId) - return - } - - if err := h.notebookService.DeleteNotebook(ctx, *id); err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusNoContent, nil) -} - -// update a single notebook. -func (h *NotebookHandler) handleUpdateNotebook(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, errBadId) - return - } - - b, err := h.decodeNotebookReqBody(r) - if err != nil { - h.api.Err(w, r, err) - return - } - - u, err := h.notebookService.UpdateNotebook(ctx, *id, b) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, u) -} - -func (h *NotebookHandler) decodeNotebookReqBody(r *http.Request) (*influxdb.NotebookReqBody, error) { - b := &influxdb.NotebookReqBody{} - if err := h.api.DecodeJSON(r.Body, b); err != nil { - return nil, err - } - - if err := b.Validate(); err != nil { - return nil, err - } - - return b, nil -} diff --git a/notebooks/transport/http_test.go b/notebooks/transport/http_test.go deleted file mode 100644 index dbbf851aeb9..00000000000 --- a/notebooks/transport/http_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package transport - -import ( - "bytes" - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/mock" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -var ( - orgStr = "1234123412341234" - orgID, _ = platform.IDFromString(orgStr) - idStr = "4321432143214321" - id, _ = platform.IDFromString(idStr) - testNotebook = &influxdb.Notebook{ - OrgID: *orgID, - ID: *id, - Name: "test notebook", - Spec: influxdb.NotebookSpec{ - "hello": "goodbye", - }, - } - testReqBody = &influxdb.NotebookReqBody{ - OrgID: *orgID, - Name: "Test notebook", - Spec: influxdb.NotebookSpec{ - "hello": "goodbye", - }, - } -) - -func TestNotebookHandler(t *testing.T) { - t.Parallel() - - t.Run("get notebooks happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "GET", ts.URL, nil) - - q := req.URL.Query() - q.Add("orgID", orgStr) - req.URL.RawQuery = q.Encode() - - svc.EXPECT(). - ListNotebooks(gomock.Any(), influxdb.NotebookListFilter{OrgID: *orgID}). - Return([]*influxdb.Notebook{testNotebook}, nil) - - res := doTestRequest(t, req, http.StatusOK, true) - - got := map[string][]*influxdb.Notebook{} - err := json.NewDecoder(res.Body).Decode(&got) - require.NoError(t, err) - require.Equal(t, got[allNotebooksJSONKey], []*influxdb.Notebook{testNotebook}) - }) - - t.Run("create notebook happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "POST", ts.URL, testReqBody) - - svc.EXPECT(). - CreateNotebook(gomock.Any(), testReqBody). - Return(testNotebook, nil) - - res := doTestRequest(t, req, http.StatusOK, true) - - got := &influxdb.Notebook{} - err := json.NewDecoder(res.Body).Decode(got) - require.NoError(t, err) - require.Equal(t, got, testNotebook) - }) - - t.Run("get notebook happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "GET", ts.URL+"/"+idStr, nil) - - svc.EXPECT(). - GetNotebook(gomock.Any(), *id). - Return(testNotebook, nil) - - res := doTestRequest(t, req, http.StatusOK, true) - - got := &influxdb.Notebook{} - err := json.NewDecoder(res.Body).Decode(got) - require.NoError(t, err) - require.Equal(t, got, testNotebook) - }) - - t.Run("delete notebook happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "DELETE", ts.URL+"/"+idStr, nil) - - svc.EXPECT(). - DeleteNotebook(gomock.Any(), *id). - Return(nil) - - doTestRequest(t, req, http.StatusNoContent, false) - }) - - t.Run("update notebook happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "PUT", ts.URL+"/"+idStr, testReqBody) - - svc.EXPECT(). - UpdateNotebook(gomock.Any(), *id, testReqBody). - Return(testNotebook, nil) - - res := doTestRequest(t, req, http.StatusOK, true) - - got := &influxdb.Notebook{} - err := json.NewDecoder(res.Body).Decode(got) - require.NoError(t, err) - require.Equal(t, got, testNotebook) - }) - - t.Run("invalid notebook ids return 400", func(t *testing.T) { - methodsWithBody := []string{"PATCH", "PUT"} - methodsNoBody := []string{"GET", "DELETE"} - - for _, m := range methodsWithBody { - t.Run(m+" /notebooks", func(t *testing.T) { - ts, _ := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, m, ts.URL+"/badid", testReqBody) - doTestRequest(t, req, http.StatusBadRequest, false) - }) - } - - for _, m := range methodsNoBody { - t.Run(m+" /notebooks", func(t *testing.T) { - ts, _ := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, m, ts.URL+"/badid", nil) - doTestRequest(t, req, http.StatusBadRequest, false) - }) - } - }) - - t.Run("invalid org id to GET /notebooks returns 400", func(t *testing.T) { - ts, _ := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "GET", ts.URL, nil) - - q := req.URL.Query() - q.Add("orgID", "badid") - req.URL.RawQuery = q.Encode() - - doTestRequest(t, req, http.StatusBadRequest, false) - }) - - t.Run("invalid request body returns 400", func(t *testing.T) { - badBady := &influxdb.NotebookReqBody{ - OrgID: *orgID, - } - - methods := []string{"PUT", "PATCH"} - for _, m := range methods { - t.Run(m+"/notebooks/{id]", func(t *testing.T) { - ts, _ := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, m, ts.URL+"/"+idStr, badBady) - doTestRequest(t, req, http.StatusBadRequest, false) - }) - } - - t.Run("POST /notebooks", func(t *testing.T) { - ts, _ := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "POST", ts.URL+"/", badBady) - doTestRequest(t, req, http.StatusBadRequest, false) - }) - }) -} - -// The svc generated is returned so that the caller can specify the expected -// use of the mock service. -func newTestServer(t *testing.T) (*httptest.Server, *mock.MockNotebookService) { - ctrlr := gomock.NewController(t) - svc := mock.NewMockNotebookService(ctrlr) - server := NewNotebookHandler(zaptest.NewLogger(t), svc) - return httptest.NewServer(server), svc -} - -func newTestRequest(t *testing.T, method, path string, body interface{}) *http.Request { - dat, err := json.Marshal(body) - require.NoError(t, err) - - req, err := http.NewRequest(method, path, bytes.NewBuffer(dat)) - require.NoError(t, err) - - req.Header.Add("Content-Type", "application/json") - - return req -} - -func doTestRequest(t *testing.T, req *http.Request, wantCode int, needJSON bool) *http.Response { - res, err := http.DefaultClient.Do(req) - require.NoError(t, err) - require.Equal(t, wantCode, res.StatusCode) - if needJSON { - require.Equal(t, "application/json; charset=utf-8", res.Header.Get("Content-Type")) - } - return res -} diff --git a/notification.go b/notification.go deleted file mode 100644 index 3a15a5d9774..00000000000 --- a/notification.go +++ /dev/null @@ -1,135 +0,0 @@ -package influxdb - -import ( - "context" - "encoding/json" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// NotificationRule is a *Query* of a *Status Bucket* that returns the *Status*. -// When warranted by the rules, sends a *Message* to a 3rd Party -// using the *Notification Endpoint* and stores a receipt in the *Notifications Bucket*. -type NotificationRule interface { - Valid() error - Type() string - json.Marshaler - CRUDLogSetter - SetID(id platform.ID) - SetOrgID(id platform.ID) - SetName(name string) - SetDescription(description string) - GetID() platform.ID - GetCRUDLog() CRUDLog - GetOrgID() platform.ID - GetName() string - GetDescription() string - SetOwnerID(id platform.ID) - ClearPrivateData() - GetOwnerID() platform.ID - SetTaskID(id platform.ID) - GetTaskID() platform.ID - GetEndpointID() platform.ID - GetLimit() *Limit - GenerateFlux(NotificationEndpoint) (string, error) - MatchesTags(tags []Tag) bool -} - -// NotificationRuleStore represents a service for managing notification rule. -type NotificationRuleStore interface { - // FindNotificationRuleByID returns a single notification rule by ID. - FindNotificationRuleByID(ctx context.Context, id platform.ID) (NotificationRule, error) - - // FindNotificationRules returns a list of notification rules that match filter and the total count of matching notification rules. - // Additional options provide pagination & sorting. - FindNotificationRules(ctx context.Context, filter NotificationRuleFilter, opt ...FindOptions) ([]NotificationRule, int, error) - - // CreateNotificationRule creates a new notification rule and sets b.ID with the new identifier. - CreateNotificationRule(ctx context.Context, nr NotificationRuleCreate, userID platform.ID) error - - // UpdateNotificationRuleUpdateNotificationRule updates a single notification rule. - // Returns the new notification rule after update. - UpdateNotificationRule(ctx context.Context, id platform.ID, nr NotificationRuleCreate, userID platform.ID) (NotificationRule, error) - - // PatchNotificationRule updates a single notification rule with changeset. - // Returns the new notification rule state after update. - PatchNotificationRule(ctx context.Context, id platform.ID, upd NotificationRuleUpdate) (NotificationRule, error) - - // DeleteNotificationRule removes a notification rule by ID. - DeleteNotificationRule(ctx context.Context, id platform.ID) error -} - -// Limit don't notify me more than times every seconds. -// If set, limit cannot be empty. -type Limit struct { - Rate int `json:"limit,omitempty"` - // every seconds. - Every int `json:"limitEvery,omitempty"` -} - -// NotificationRuleFilter represents a set of filter that restrict the returned notification rules. -type NotificationRuleFilter struct { - OrgID *platform.ID - Organization *string - Tags []Tag - UserResourceMappingFilter -} - -// QueryParams Converts NotificationRuleFilter fields to url query params. -func (f NotificationRuleFilter) QueryParams() map[string][]string { - qp := map[string][]string{} - - if f.OrgID != nil { - qp["orgID"] = []string{f.OrgID.String()} - } - - if f.Organization != nil { - qp["org"] = []string{*f.Organization} - } - - qp["tag"] = []string{} - for _, tp := range f.Tags { - qp["tag"] = append(qp["tag"], tp.QueryParam()) - } - - return qp -} - -// NotificationRuleCreate is the struct providing data to create a Notification Rule. -type NotificationRuleCreate struct { - NotificationRule - Status Status `json:"status"` -} - -// NotificationRuleUpdate is the set of upgrade fields for patch request. -type NotificationRuleUpdate struct { - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - Status *Status `json:"status,omitempty"` -} - -// Valid will verify if the NotificationRuleUpdate is valid. -func (n *NotificationRuleUpdate) Valid() error { - if n.Name != nil && *n.Name == "" { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Notification Rule Name can't be empty", - } - } - - if n.Description != nil && *n.Description == "" { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Notification Rule Description can't be empty", - } - } - - if n.Status != nil { - if err := n.Status.Valid(); err != nil { - return err - } - } - - return nil -} diff --git a/notification/check/check.go b/notification/check/check.go deleted file mode 100644 index 6275385553f..00000000000 --- a/notification/check/check.go +++ /dev/null @@ -1,227 +0,0 @@ -package check - -import ( - "encoding/json" - "fmt" - - "github.com/influxdata/flux/ast" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/notification" - "github.com/influxdata/influxdb/v2/notification/flux" - "github.com/influxdata/influxdb/v2/query/fluxlang" -) - -// Base will embed inside a check. -type Base struct { - ID platform.ID `json:"id,omitempty"` - Name string `json:"name"` - Description string `json:"description,omitempty"` - OwnerID platform.ID `json:"ownerID,omitempty"` - OrgID platform.ID `json:"orgID,omitempty"` - Query influxdb.DashboardQuery `json:"query"` - - // Care should be taken to prevent TaskID from being exposed publicly. - TaskID platform.ID `json:"taskID,omitempty"` - // } todo: separate these - // NonCustomCheckBase will embed inside non-custom checks. - // type NonCustomCheckBase struct { - StatusMessageTemplate string `json:"statusMessageTemplate"` - Cron string `json:"cron,omitempty"` - Every *notification.Duration `json:"every,omitempty"` - // Offset represents a delay before execution. - // It gets marshalled from a string duration, i.e.: "10s" is 10 seconds - Offset *notification.Duration `json:"offset,omitempty"` - - Tags []influxdb.Tag `json:"tags"` - influxdb.CRUDLog -} - -// Valid returns err if the check is invalid. -func (b Base) Valid(lang fluxlang.FluxLanguageService) error { - if !b.ID.Valid() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Check ID is invalid", - } - } - if b.Name == "" { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Check Name can't be empty", - } - } - if !b.OrgID.Valid() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Check OrgID is invalid", - } - } - if b.Every == nil { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Check Every must exist", - } - } - if len(b.Every.Values) == 0 { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Check Every can't be empty", - } - } - if b.Offset != nil && len(b.Offset.Values) == 0 { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Check Offset can't be empty", - } - } - if b.Offset != nil && b.Offset.TimeDuration() >= b.Every.TimeDuration() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Offset should not be equal or greater than the interval", - } - } - for _, tag := range b.Tags { - if err := tag.Valid(); err != nil { - return err - } - } - - return nil -} - -func (b Base) generateFluxASTMessageFunction() ast.Statement { - fn := flux.Function(flux.FunctionParams("r"), flux.String(b.StatusMessageTemplate)) - return flux.DefineVariable("messageFn", fn) -} - -func (b Base) generateTaskOption() ast.Statement { - props := []*ast.Property{} - - props = append(props, flux.Property("name", flux.String(b.Name))) - - if b.Every != nil { - props = append(props, flux.Property("every", (*ast.DurationLiteral)(b.Every))) - } - - if b.Offset != nil { - props = append(props, flux.Property("offset", (*ast.DurationLiteral)(b.Offset))) - } - - return flux.DefineTaskOption(flux.Object(props...)) -} - -func (b Base) generateFluxASTCheckDefinition(checkType string) ast.Statement { - props := append([]*ast.Property{}, flux.Property("_check_id", flux.String(b.ID.String()))) - props = append(props, flux.Property("_check_name", flux.String(b.Name))) - props = append(props, flux.Property("_type", flux.String(checkType))) - - // TODO(desa): eventually tags will be flattened out into the data struct - tagProps := []*ast.Property{} - for _, tag := range b.Tags { - tagProps = append(tagProps, flux.Property(tag.Key, flux.String(tag.Value))) - } - - props = append(props, flux.Property("tags", flux.Object(tagProps...))) - - return flux.DefineVariable("check", flux.Object(props...)) -} - -// GetID implements influxdb.Getter interface. -func (b Base) GetID() platform.ID { - return b.ID -} - -// GetOrgID implements influxdb.Getter interface. -func (b Base) GetOrgID() platform.ID { - return b.OrgID -} - -// GetOwnerID gets the ownerID associated with a Base. -func (b Base) GetOwnerID() platform.ID { - return b.OwnerID -} - -// GetTaskID retrieves the task ID for a check. -func (b Base) GetTaskID() platform.ID { - return b.TaskID -} - -// GetCRUDLog implements influxdb.Getter interface. -func (b Base) GetCRUDLog() influxdb.CRUDLog { - return b.CRUDLog -} - -// GetName implements influxdb.Getter interface. -func (b *Base) GetName() string { - return b.Name -} - -// GetDescription implements influxdb.Getter interface. -func (b *Base) GetDescription() string { - return b.Description -} - -// SetID will set the primary key. -func (b *Base) SetID(id platform.ID) { - b.ID = id -} - -// SetOrgID will set the org key. -func (b *Base) SetOrgID(id platform.ID) { - b.OrgID = id -} - -// ClearPrivateData remove any data that we don't want to be exposed publicly. -func (b *Base) ClearPrivateData() { - b.TaskID = 0 -} - -// SetTaskID sets the taskID for a check. -func (b *Base) SetTaskID(id platform.ID) { - b.TaskID = id -} - -// SetOwnerID sets the taskID for a check. -func (b *Base) SetOwnerID(id platform.ID) { - b.OwnerID = id -} - -// SetName implements influxdb.Updator interface. -func (b *Base) SetName(name string) { - b.Name = name -} - -// SetDescription implements influxdb.Updator interface. -func (b *Base) SetDescription(description string) { - b.Description = description -} - -var typeToCheck = map[string](func() influxdb.Check){ - "deadman": func() influxdb.Check { return &Deadman{} }, - "threshold": func() influxdb.Check { return &Threshold{} }, - "custom": func() influxdb.Check { return &Custom{} }, -} - -// UnmarshalJSON will convert -func UnmarshalJSON(b []byte) (influxdb.Check, error) { - var raw struct { - Type string `json:"type"` - } - if err := json.Unmarshal(b, &raw); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unable to detect the check type from json", - } - } - convertedFunc, ok := typeToCheck[raw.Type] - if !ok { - return nil, &errors.Error{ - Msg: fmt.Sprintf("invalid check type %s", raw.Type), - } - } - converted := convertedFunc() - err := json.Unmarshal(b, converted) - return converted, err -} diff --git a/notification/check/check_test.go b/notification/check/check_test.go deleted file mode 100644 index 22a7dd1306d..00000000000 --- a/notification/check/check_test.go +++ /dev/null @@ -1,283 +0,0 @@ -package check_test - -import ( - "encoding/json" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/ast/astutil" - "github.com/influxdata/flux/parser" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/notification" - "github.com/influxdata/influxdb/v2/notification/check" - "github.com/influxdata/influxdb/v2/query/fluxlang" - influxTesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/require" -) - -const ( - id1 = "020f755c3c082000" - id2 = "020f755c3c082001" - id3 = "020f755c3c082002" -) - -var goodBase = check.Base{ - ID: influxTesting.MustIDBase16(id1), - Name: "name1", - OwnerID: influxTesting.MustIDBase16(id2), - OrgID: influxTesting.MustIDBase16(id3), - StatusMessageTemplate: "temp1", - Every: mustDuration("1m"), - Tags: []influxdb.Tag{ - {Key: "k1", Value: "v1"}, - {Key: "k2", Value: "v2"}, - }, -} - -func TestValidCheck(t *testing.T) { - cases := []struct { - name string - src influxdb.Check - err error - }{ - { - name: "invalid check id", - src: &check.Deadman{}, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "Check ID is invalid", - }, - }, - { - name: "empty name", - src: &check.Threshold{ - Base: check.Base{ - ID: influxTesting.MustIDBase16(id1), - }, - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "Check Name can't be empty", - }, - }, - { - name: "invalid org id", - src: &check.Threshold{ - Base: check.Base{ - ID: influxTesting.MustIDBase16(id1), - Name: "name1", - OwnerID: influxTesting.MustIDBase16(id2), - }, - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "Check OrgID is invalid", - }, - }, - { - name: "nil every", - src: &check.Deadman{ - Base: check.Base{ - ID: influxTesting.MustIDBase16(id1), - Name: "name1", - OwnerID: influxTesting.MustIDBase16(id2), - OrgID: influxTesting.MustIDBase16(id3), - StatusMessageTemplate: "temp1", - Tags: []influxdb.Tag{{Key: "key1"}}, - }, - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "Check Every must exist", - }, - }, - { - name: "offset greater then interval", - src: &check.Deadman{ - Base: check.Base{ - ID: influxTesting.MustIDBase16(id1), - Name: "name1", - OwnerID: influxTesting.MustIDBase16(id2), - OrgID: influxTesting.MustIDBase16(id3), - Every: mustDuration("1m"), - Offset: mustDuration("2m"), - }, - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "Offset should not be equal or greater than the interval", - }, - }, - { - name: "invalid tag", - src: &check.Deadman{ - Base: check.Base{ - ID: influxTesting.MustIDBase16(id1), - Name: "name1", - OwnerID: influxTesting.MustIDBase16(id2), - OrgID: influxTesting.MustIDBase16(id3), - StatusMessageTemplate: "temp1", - Every: mustDuration("1m"), - Tags: []influxdb.Tag{{Key: "key1"}}, - }, - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "tag must contain a key and a value", - }, - }, - { - name: "bad threshold", - src: &check.Threshold{ - Base: goodBase, - Thresholds: []check.ThresholdConfig{ - &check.Range{Min: 200, Max: 100}, - }, - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "range threshold min can't be larger than max", - }, - }, - } - for _, c := range cases { - got := c.src.Valid(fluxlang.DefaultService) - influxTesting.ErrorsEqual(t, got, c.err) - } -} - -var timeGen1 = mock.TimeGenerator{FakeValue: time.Date(2006, time.July, 13, 4, 19, 10, 0, time.UTC)} -var timeGen2 = mock.TimeGenerator{FakeValue: time.Date(2006, time.July, 14, 5, 23, 53, 10, time.UTC)} - -func mustDuration(d string) *notification.Duration { - dur, err := parser.ParseDuration(d) - if err != nil { - panic(err) - } - - return (*notification.Duration)(dur) -} - -func TestJSON(t *testing.T) { - cases := []struct { - name string - src influxdb.Check - }{ - { - name: "simple Deadman", - src: &check.Deadman{ - Base: check.Base{ - ID: influxTesting.MustIDBase16(id1), - OwnerID: influxTesting.MustIDBase16(id2), - Name: "name1", - OrgID: influxTesting.MustIDBase16(id3), - Every: mustDuration("1h"), - Query: influxdb.DashboardQuery{ - BuilderConfig: influxdb.BuilderConfig{ - Buckets: []string{}, - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{}, - Functions: []struct { - Name string `json:"name"` - }{}, - }, - }, - Tags: []influxdb.Tag{ - { - Key: "k1", - Value: "v1", - }, - { - Key: "k2", - Value: "v2", - }, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - TimeSince: mustDuration("33s"), - ReportZero: true, - Level: notification.Warn, - }, - }, - { - name: "simple threshold", - src: &check.Threshold{ - Base: check.Base{ - ID: influxTesting.MustIDBase16(id1), - Name: "name1", - OwnerID: influxTesting.MustIDBase16(id2), - OrgID: influxTesting.MustIDBase16(id3), - Every: mustDuration("1h"), - Query: influxdb.DashboardQuery{ - BuilderConfig: influxdb.BuilderConfig{ - Buckets: []string{}, - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{}, - Functions: []struct { - Name string `json:"name"` - }{}, - }, - }, - Tags: []influxdb.Tag{ - { - Key: "k1", - Value: "v1", - }, - { - Key: "k2", - Value: "v2", - }, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Thresholds: []check.ThresholdConfig{ - &check.Greater{ThresholdConfigBase: check.ThresholdConfigBase{AllValues: true}, Value: -1.36}, - &check.Range{Min: -10000, Max: 500}, - &check.Lesser{ThresholdConfigBase: check.ThresholdConfigBase{Level: notification.Critical}}, - }, - }, - }, - } - for _, c := range cases { - fn := func(t *testing.T) { - b, err := json.Marshal(c.src) - if err != nil { - t.Fatalf("%s marshal failed, err: %s", c.name, err.Error()) - } - got, err := check.UnmarshalJSON(b) - if err != nil { - t.Fatalf("%s unmarshal failed, err: %s", c.name, err.Error()) - } - if diff := cmp.Diff(got, c.src, cmpopts.IgnoreFields(notification.Duration{}, "BaseNode")); diff != "" { - t.Errorf("failed %s, Check are different -got/+want\ndiff %s", c.name, diff) - } - } - t.Run(c.name, fn) - } -} - -func mustFormatPackage(t *testing.T, pkg *ast.Package) string { - if len(pkg.Files) == 0 { - t.Fatal("package expected to have at least one file") - } - v, err := astutil.Format(pkg.Files[0]) - require.NoError(t, err) - return v -} diff --git a/notification/check/custom.go b/notification/check/custom.go deleted file mode 100644 index 6c4511a6fb9..00000000000 --- a/notification/check/custom.go +++ /dev/null @@ -1,345 +0,0 @@ -package check - -import ( - "encoding/json" - "time" - - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/ast/astutil" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/notification/flux" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/query/fluxlang" -) - -var _ influxdb.Check = &Custom{} - -// Custom is the custom check. -type Custom struct { - ID platform.ID `json:"id,omitempty"` - Name string `json:"name"` - Description string `json:"description,omitempty"` - OwnerID platform.ID `json:"ownerID,omitempty"` - OrgID platform.ID `json:"orgID,omitempty"` - Query influxdb.DashboardQuery `json:"query"` - TaskID platform.ID `json:"taskID,omitempty"` - CreatedAt time.Time `json:"createdAt"` - UpdatedAt time.Time `json:"updatedAt"` -} - -// flux example for threshold check for reference: - -// package main -// import "influxdata/influxdb/monitor" -// import "influxdata/influxdb/v1" - -// data = from(bucket: "_tasks") -// |> range(start: -1m) -// |> filter(fn: (r) => r._measurement == "runs") -// |> filter(fn: (r) => r._field == "finishedAt") -// |> aggregateWindow(every: 1m fn: mean, createEmpty: false) - -// option task = { -// name: "Name this Check", -// every: 1m, -// offset: 0s -// } - -// check = { -// _check_id: "undefined", -// _check_name: "Name this Check", -// _type: "custom", -// tags: {a: "b",c: "d"} -// } - -// warn = (r) =>(r.finishedAt> 20) -// crit = (r) =>(r.finishedAt> 20) -// info = (r) =>(r.finishedAt> 20) - -// messageFn = (r) =>("Check: ${ r._check_name } is: ${ r._level }") - -// data -// |> v1.fieldsAsCols() -// |> monitor.check(data: check, messageFn:messageFn, warn:warn, crit:crit, info:info) - -// GenerateFlux returns the check query text directly -func (c Custom) GenerateFlux(lang fluxlang.FluxLanguageService) (string, error) { - return c.Query.Text, nil -} - -// sanitizeFlux modifies the check query text to include correct _check_id param in check object -func (c Custom) sanitizeFlux(lang fluxlang.FluxLanguageService) (string, error) { - p, err := query.Parse(lang, c.Query.Text) - if p == nil { - return "", err - } else if errs := ast.GetErrors(p); len(errs) != 0 { - return "", multiError(errs) - } - - ast.Visit(p, func(n ast.Node) { - if variableAssign, ok := n.(*ast.VariableAssignment); ok && variableAssign.ID.Name == "check" { - if objectExp, ok := variableAssign.Init.(*ast.ObjectExpression); ok { - idx := -1 - for i, prop := range objectExp.Properties { - if prop.Key.Key() == "_check_id" { - idx = i - break - } - } - - idProp := flux.Property("_check_id", flux.String(c.ID.String())) - if idx >= 0 { - objectExp.Properties[idx] = idProp - } else { - objectExp.Properties = append(objectExp.Properties, idProp) - } - } - } - }) - - return astutil.Format(p.Files[0]) -} - -func propertyHasValue(prop *ast.Property, key string, value string) bool { - stringLit, ok := prop.Value.(*ast.StringLiteral) - return ok && prop.Key.Key() == key && stringLit.Value == value -} - -func (c *Custom) hasRequiredTaskOptions(lang fluxlang.FluxLanguageService) (err error) { - - p, err := query.Parse(lang, c.Query.Text) - if p == nil { - return err - } - - hasOptionTask := false - hasName := false - nameMatchesCheck := false - hasEvery := false - hasOffset := false - - ast.Visit(p, func(n ast.Node) { - if option, ok := n.(*ast.OptionStatement); ok { - if variableAssign, ok := option.Assignment.(*ast.VariableAssignment); ok && variableAssign.ID.Name == "task" { - hasOptionTask = true - if objectExp, ok := variableAssign.Init.(*ast.ObjectExpression); ok { - for _, prop := range objectExp.Properties { - if prop.Key.Key() == "name" { - hasName = true - if propertyHasValue(prop, "name", c.Name) { - nameMatchesCheck = true - } - } - if prop.Key.Key() == "every" { - hasEvery = true - } - if prop.Key.Key() == "offset" { - hasOffset = true - } - } - } - } - } - }) - if !hasOptionTask { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Custom flux missing task option statement", - } - } - if !hasName { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Custom flux missing name parameter from task option statement", - } - } - if hasName && !nameMatchesCheck { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Name parameter from task option statement must match check name", - } - } - if !hasEvery { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Custom flux missing every parameter from task option statement", - } - } - if !hasOffset { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Custom flux missing offset parameter from task option statement", - } - } - return nil -} - -func (c *Custom) hasRequiredCheckParameters(lang fluxlang.FluxLanguageService) (err error) { - p, err := query.Parse(lang, c.Query.Text) - if p == nil { - return err - } - - hasCheckObject := false - checkNameMatches := false - checkTypeIsCustom := false - - ast.Visit(p, func(n ast.Node) { - if variableAssign, ok := n.(*ast.VariableAssignment); ok && variableAssign.ID.Name == "check" { - hasCheckObject = true - if objectExp, ok := variableAssign.Init.(*ast.ObjectExpression); ok { - for _, prop := range objectExp.Properties { - if propertyHasValue(prop, "_check_name", c.Name) { - checkNameMatches = true - } - if propertyHasValue(prop, "_type", "custom") { - checkTypeIsCustom = true - } - } - } - } - }) - - if !hasCheckObject { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Custom flux must have an object called 'check'", - } - } - if !checkNameMatches { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "_check_name parameter on check object must match check name", - } - } - if !checkTypeIsCustom { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "_type parameter on check object must be set to 'custom'", - } - } - return nil -} - -// Valid checks whether check flux is valid, returns error if invalid -func (c *Custom) Valid(lang fluxlang.FluxLanguageService) error { - - if err := c.hasRequiredCheckParameters(lang); err != nil { - return err - } - - if err := c.hasRequiredTaskOptions(lang); err != nil { - return err - } - - // add or replace _check_id parameter on the check object - script, err := c.sanitizeFlux(lang) - if err != nil { - return err - } - - c.Query.Text = script - - return nil -} - -type customAlias Custom - -// MarshalJSON implement json.Marshaler interface. -func (c Custom) MarshalJSON() ([]byte, error) { - return json.Marshal( - struct { - customAlias - Type string `json:"type"` - }{ - customAlias: customAlias(c), - Type: c.Type(), - }) -} - -// Type returns the type of the check. -func (c Custom) Type() string { - return "custom" -} - -// ClearPrivateData remove any data that we don't want to be exposed publicly. -func (c *Custom) ClearPrivateData() { - c.TaskID = 0 -} - -// SetTaskID sets the taskID for a check. -func (c *Custom) SetTaskID(id platform.ID) { - c.TaskID = id -} - -// GetTaskID retrieves the task ID for a check. -func (c *Custom) GetTaskID() platform.ID { - return c.TaskID -} - -// GetOwnerID gets the ownerID associated with a Check. -func (c *Custom) GetOwnerID() platform.ID { - return c.OwnerID -} - -// SetOwnerID sets the taskID for a check. -func (c *Custom) SetOwnerID(id platform.ID) { - c.OwnerID = id -} - -// SetCreatedAt sets the creation time for a check -func (c *Custom) SetCreatedAt(now time.Time) { - c.CreatedAt = now -} - -// SetUpdatedAt sets the update time for a check -func (c *Custom) SetUpdatedAt(now time.Time) { - c.UpdatedAt = now -} - -// SetID sets the primary key for a check -func (c *Custom) SetID(id platform.ID) { - c.ID = id -} - -// SetOrgID is SetOrgID -func (c *Custom) SetOrgID(id platform.ID) { - c.OrgID = id -} - -// SetName implements influxdb.Updator interface -func (c *Custom) SetName(name string) { - c.Name = name -} - -// SetDescription is SetDescription -func (c *Custom) SetDescription(description string) { - c.Description = description -} - -// GetID is GetID -func (c *Custom) GetID() platform.ID { - return c.ID -} - -// GetCRUDLog gets crudLog -func (c *Custom) GetCRUDLog() influxdb.CRUDLog { - return influxdb.CRUDLog{CreatedAt: c.CreatedAt, UpdatedAt: c.UpdatedAt} -} - -// GetOrgID gets the orgID associated with the Check -func (c *Custom) GetOrgID() platform.ID { - return c.OrgID -} - -// GetName implements influxdb.Getter interface. -func (c *Custom) GetName() string { - return c.Name -} - -// GetDescription is GetDescription -func (c *Custom) GetDescription() string { - return c.Description -} diff --git a/notification/check/custom_test.go b/notification/check/custom_test.go deleted file mode 100644 index dcd35905f6b..00000000000 --- a/notification/check/custom_test.go +++ /dev/null @@ -1,189 +0,0 @@ -package check_test - -import ( - "errors" - "fmt" - "testing" - - "github.com/andreyvit/diff" - "github.com/influxdata/flux/parser" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/notification/check" - "github.com/influxdata/influxdb/v2/query/fluxlang" -) - -func TestCheck_Valid(t *testing.T) { - type args struct { - custom *check.Custom - } - type wants struct { - err error - script string - } - - validQuery := `package main -import "influxdata/influxdb/monitor" -import "influxdata/influxdb/v1" - -data = from(bucket: "_tasks") -|> range(start: -1m) -|> filter(fn: (r) => r._measurement == "runs") -|> filter(fn: (r) => r._field == "finishedAt") -|> aggregateWindow(every: 1m, fn: mean, createEmpty: false) - -option task = {name: "moo", every: 1m, offset: 0s} - -check = { - _check_id: "%s", - _check_name: "moo", - _type: "custom", - tags: {a: "b", c: "d"}, -} -warn = (r) => - (r.finishedAt > 20) -crit = (r) => - (r.finishedAt > 20) -info = (r) => - (r.finishedAt > 20) -messageFn = (r) => - ("Check: ${r._check_name} is: ${r._level}") - -data - |> v1.fieldsAsCols() - |> monitor.check( - data: check, - messageFn: messageFn, - warn: warn, - crit: crit, - info: info, - )` - - invalidTaskQuery := `package main - import "influxdata/influxdb/monitor" - import "influxdata/influxdb/v1" - - data = from(bucket: "_tasks") - |> range(start: -1m) - |> filter(fn: (r) => - (r._measurement == "runs")) - |> filter(fn: (r) => - (r._field == "finishedAt")) - |> aggregateWindow(every: 1m, fn: mean, createEmpty: false) - - check = { - _check_id: "%s", - _check_name: "moo", - _type: "custom", - tags: {a: "b", c: "d"}, - } - warn = (r) => - (r.finishedAt > 20) - crit = (r) => - (r.finishedAt > 20) - info = (r) => - (r.finishedAt > 20) - messageFn = (r) => - ("Check: ${r._check_name} is: ${r._level}") - - data - |> v1.fieldsAsCols() - |> monitor.check( - data: check, - messageFn: messageFn, - warn: warn, - crit: crit, - info: info, - )` - - tests := []struct { - name string - args args - wants wants - }{ - { - name: "valid flux script is valid and unchanged", - args: args{ - custom: &check.Custom{ - ID: 10, - Name: "moo", - Query: influxdb.DashboardQuery{ - Text: mustFormatPackage(t, parser.ParseSource(fmt.Sprintf(validQuery, "000000000000000a"))), - }, - }, - }, - wants: wants{ - err: nil, - script: mustFormatPackage(t, parser.ParseSource(fmt.Sprintf(validQuery, "000000000000000a"))), - }, - }, - { - name: "valid flux script is valid but check ID is replaced if wrong", - args: args{ - custom: &check.Custom{ - ID: 10, - Name: "moo", - Query: influxdb.DashboardQuery{ - Text: mustFormatPackage(t, parser.ParseSource(fmt.Sprintf(validQuery, "000000000000000b"))), - }, - }, - }, - wants: wants{ - err: nil, - script: mustFormatPackage(t, parser.ParseSource(fmt.Sprintf(validQuery, "000000000000000a"))), - }, - }, - { - name: "empty check query returns helpful error", - args: args{ - custom: &check.Custom{ - ID: 10, - Name: "moo", - Query: influxdb.DashboardQuery{ - Text: "", - }, - }, - }, - wants: wants{ - err: errors.New("Custom flux must have an object called 'check'"), - }, - }, - { - name: "Script missing task option receives error that says so", - args: args{ - custom: &check.Custom{ - ID: 10, - Name: "moo", - Query: influxdb.DashboardQuery{ - Text: mustFormatPackage(t, parser.ParseSource(fmt.Sprintf(invalidTaskQuery, "000000000000000b"))), - }, - }, - }, - wants: wants{ - err: errors.New("Custom flux missing task option statement"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - err := tt.args.custom.Valid(fluxlang.DefaultService) - - if exp, got := tt.wants.err, err; exp != nil && got != nil { - // expected error, got error check that they match - if exp.Error() != got.Error() { - t.Errorf("expected:\n%v\n\ngot:\n%v\n", exp, got) - } - } else if (exp == nil || got == nil) && got != exp { - //either exp or got are nil - t.Errorf("expected:\n%v\n\ngot:\n%v\n", exp, got) - } else { - // neither errs are nil check that scripts match - if exp, got := tt.wants.script, tt.args.custom.Query.Text; exp != got { - t.Errorf("\n\nStrings do not match:\n\n%s", diff.LineDiff(exp, got)) - } - } - }) - } - -} diff --git a/notification/check/deadman.go b/notification/check/deadman.go deleted file mode 100644 index bf1bb93e1ad..00000000000 --- a/notification/check/deadman.go +++ /dev/null @@ -1,128 +0,0 @@ -package check - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/ast/astutil" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/notification" - "github.com/influxdata/influxdb/v2/notification/flux" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/query/fluxlang" -) - -var _ influxdb.Check = (*Deadman)(nil) - -// Deadman is the deadman check. -type Deadman struct { - Base - TimeSince *notification.Duration `json:"timeSince,omitempty"` - StaleTime *notification.Duration `json:"staleTime,omitempty"` - // If only zero values reported since time, trigger alert. - // TODO(desa): Is this implemented in Flux? - ReportZero bool `json:"reportZero"` - Level notification.CheckLevel `json:"level"` -} - -// Type returns the type of the check. -func (c Deadman) Type() string { - return "deadman" -} - -// GenerateFlux returns a flux script for the Deadman provided. -func (c Deadman) GenerateFlux(lang fluxlang.FluxLanguageService) (string, error) { - f, err := c.GenerateFluxAST(lang) - if err != nil { - return "", err - } - - return astutil.Format(f) -} - -// GenerateFluxAST returns a flux AST for the deadman provided. If there -// are any errors in the flux that the user provided the function will return -// an error for each error found when the script is parsed. -func (c Deadman) GenerateFluxAST(lang fluxlang.FluxLanguageService) (*ast.File, error) { - p, err := query.Parse(lang, c.Query.Text) - if p == nil { - return nil, err - } - removeAggregateWindow(p) - replaceDurationsWithEvery(p, c.StaleTime) - removeStopFromRange(p) - - if errs := ast.GetErrors(p); len(errs) != 0 { - return nil, multiError(errs) - } - - // TODO(desa): this is a hack that we had to do as a result of https://github.com/influxdata/flux/issues/1701 - // when it is fixed we should use a separate file and not manipulate the existing one. - if len(p.Files) != 1 { - return nil, fmt.Errorf("expect a single file to be returned from query parsing got %d", len(p.Files)) - } - - f := p.Files[0] - assignPipelineToData(f) - - f.Imports = append(f.Imports, flux.Imports("influxdata/influxdb/monitor", "experimental", "influxdata/influxdb/v1")...) - f.Body = append(f.Body, c.generateFluxASTBody()...) - - return f, nil -} - -func (c Deadman) generateFluxASTBody() []ast.Statement { - var statements []ast.Statement - statements = append(statements, c.generateTaskOption()) - statements = append(statements, c.generateFluxASTCheckDefinition("deadman")) - statements = append(statements, c.generateLevelFn()) - statements = append(statements, c.generateFluxASTMessageFunction()) - return append(statements, c.generateFluxASTChecksFunction()) -} - -func (c Deadman) generateLevelFn() ast.Statement { - fn := flux.Function(flux.FunctionParams("r"), flux.Member("r", "dead")) - - lvl := strings.ToLower(c.Level.String()) - - return flux.DefineVariable(lvl, fn) -} - -func (c Deadman) generateFluxASTChecksFunction() ast.Statement { - dur := (*ast.DurationLiteral)(c.TimeSince) - now := flux.Call(flux.Identifier("now"), flux.Object()) - sub := flux.Call(flux.Member("experimental", "subDuration"), flux.Object(flux.Property("from", now), flux.Property("d", dur))) - return flux.ExpressionStatement(flux.Pipe( - flux.Identifier("data"), - flux.Call(flux.Member("v1", "fieldsAsCols"), flux.Object()), - flux.Call(flux.Member("monitor", "deadman"), flux.Object(flux.Property("t", sub))), - c.generateFluxASTChecksCall(), - )) -} - -func (c Deadman) generateFluxASTChecksCall() *ast.CallExpression { - objectProps := append(([]*ast.Property)(nil), flux.Property("data", flux.Identifier("check"))) - objectProps = append(objectProps, flux.Property("messageFn", flux.Identifier("messageFn"))) - - // This assumes that the ThresholdConfigs we've been provided do not have duplicates. - lvl := strings.ToLower(c.Level.String()) - objectProps = append(objectProps, flux.Property(lvl, flux.Identifier(lvl))) - - return flux.Call(flux.Member("monitor", "check"), flux.Object(objectProps...)) -} - -type deadmanAlias Deadman - -// MarshalJSON implement json.Marshaler interface. -func (c Deadman) MarshalJSON() ([]byte, error) { - return json.Marshal( - struct { - deadmanAlias - Type string `json:"type"` - }{ - deadmanAlias: deadmanAlias(c), - Type: c.Type(), - }) -} diff --git a/notification/check/deadman_test.go b/notification/check/deadman_test.go deleted file mode 100644 index dc352c879d4..00000000000 --- a/notification/check/deadman_test.go +++ /dev/null @@ -1,203 +0,0 @@ -package check_test - -import ( - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/notification" - "github.com/influxdata/influxdb/v2/notification/check" - "github.com/influxdata/influxdb/v2/query/fluxlang" - itesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestDeadman_GenerateFlux(t *testing.T) { - type args struct { - deadman check.Deadman - } - type wants struct { - script string - } - - tests := []struct { - name string - args args - wants wants - }{ - { - name: "with aggregateWindow", - args: args{ - deadman: check.Deadman{ - Base: check.Base{ - ID: 10, - Name: "moo", - Tags: []influxdb.Tag{ - {Key: "aaa", Value: "vaaa"}, - {Key: "bbb", Value: "vbbb"}, - }, - Every: mustDuration("1h"), - StatusMessageTemplate: "whoa! {r[\"dead\"]}", - Query: influxdb.DashboardQuery{ - Text: `from(bucket: "foo") |> range(start: -1d, stop: now()) |> aggregateWindow(fn: mean, every: 1m) |> yield()`, - BuilderConfig: influxdb.BuilderConfig{ - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{ - { - Key: "_field", - Values: []string{"usage_user"}, - AggregateFunctionType: "filter", - }, - }, - }, - }, - }, - TimeSince: mustDuration("60s"), - StaleTime: mustDuration("10m"), - Level: notification.Info, - }, - }, - wants: wants{ - script: `import "influxdata/influxdb/monitor" -import "experimental" -import "influxdata/influxdb/v1" - -data = from(bucket: "foo") |> range(start: -10m) - -option task = {name: "moo", every: 1h} - -check = {_check_id: "000000000000000a", _check_name: "moo", _type: "deadman", tags: {aaa: "vaaa", bbb: "vbbb"}} -info = (r) => r["dead"] -messageFn = (r) => "whoa! {r[\"dead\"]}" - -data - |> v1["fieldsAsCols"]() - |> monitor["deadman"](t: experimental["subDuration"](from: now(), d: 60s)) - |> monitor["check"](data: check, messageFn: messageFn, info: info) -`, - }, - }, - { - name: "basic", - args: args{ - deadman: check.Deadman{ - Base: check.Base{ - ID: 10, - Name: "moo", - Tags: []influxdb.Tag{ - {Key: "aaa", Value: "vaaa"}, - {Key: "bbb", Value: "vbbb"}, - }, - Every: mustDuration("1h"), - StatusMessageTemplate: "whoa! {r[\"dead\"]}", - Query: influxdb.DashboardQuery{ - Text: `from(bucket: "foo") |> range(start: -1d, stop: now()) |> yield()`, - BuilderConfig: influxdb.BuilderConfig{ - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{ - { - Key: "_field", - Values: []string{"usage_user"}, - AggregateFunctionType: "filter", - }, - }, - }, - }, - }, - TimeSince: mustDuration("60s"), - StaleTime: mustDuration("10m"), - Level: notification.Info, - }, - }, - wants: wants{ - script: `import "influxdata/influxdb/monitor" -import "experimental" -import "influxdata/influxdb/v1" - -data = from(bucket: "foo") |> range(start: -10m) - -option task = {name: "moo", every: 1h} - -check = {_check_id: "000000000000000a", _check_name: "moo", _type: "deadman", tags: {aaa: "vaaa", bbb: "vbbb"}} -info = (r) => r["dead"] -messageFn = (r) => "whoa! {r[\"dead\"]}" - -data - |> v1["fieldsAsCols"]() - |> monitor["deadman"](t: experimental["subDuration"](from: now(), d: 60s)) - |> monitor["check"](data: check, messageFn: messageFn, info: info) -`, - }, - }, - { - name: "basic with space in field name", - args: args{ - deadman: check.Deadman{ - Base: check.Base{ - ID: 10, - Name: "moo", - Tags: []influxdb.Tag{ - {Key: "aaa", Value: "vaaa"}, - {Key: "bbb", Value: "vbbb"}, - }, - Every: mustDuration("1h"), - StatusMessageTemplate: "whoa! {r[\"dead\"]}", - Query: influxdb.DashboardQuery{ - Text: `from(bucket: "foo") |> range(start: -1d, stop: now()) |> filter(fn: (r) => r._field == "usage user") |> yield()`, - BuilderConfig: influxdb.BuilderConfig{ - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{ - { - Key: "_field", - Values: []string{"usage_user"}, - AggregateFunctionType: "filter", - }, - }, - }, - }, - }, - TimeSince: mustDuration("60s"), - StaleTime: mustDuration("10m"), - Level: notification.Info, - }, - }, - wants: wants{ - script: `import "influxdata/influxdb/monitor" -import "experimental" -import "influxdata/influxdb/v1" - -data = from(bucket: "foo") |> range(start: -10m) |> filter(fn: (r) => r._field == "usage user") - -option task = {name: "moo", every: 1h} - -check = {_check_id: "000000000000000a", _check_name: "moo", _type: "deadman", tags: {aaa: "vaaa", bbb: "vbbb"}} -info = (r) => r["dead"] -messageFn = (r) => "whoa! {r[\"dead\"]}" - -data - |> v1["fieldsAsCols"]() - |> monitor["deadman"](t: experimental["subDuration"](from: now(), d: 60s)) - |> monitor["check"](data: check, messageFn: messageFn, info: info) -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, err := tt.args.deadman.GenerateFlux(fluxlang.DefaultService) - require.NoError(t, err) - assert.Equal(t, itesting.FormatFluxString(t, tt.wants.script), s) - }) - } - -} diff --git a/notification/check/threshold.go b/notification/check/threshold.go deleted file mode 100644 index 249a1a69cbd..00000000000 --- a/notification/check/threshold.go +++ /dev/null @@ -1,478 +0,0 @@ -package check - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/ast/astutil" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/notification" - "github.com/influxdata/influxdb/v2/notification/flux" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/query/fluxlang" -) - -var _ influxdb.Check = (*Threshold)(nil) - -// Threshold is the threshold check. -type Threshold struct { - Base - Thresholds []ThresholdConfig `json:"thresholds"` -} - -// Type returns the type of the check. -func (t Threshold) Type() string { - return "threshold" -} - -// Valid returns error if something is invalid. -func (t Threshold) Valid(lang fluxlang.FluxLanguageService) error { - if err := t.Base.Valid(lang); err != nil { - return err - } - for _, cc := range t.Thresholds { - if err := cc.Valid(); err != nil { - return err - } - } - return nil -} - -type thresholdDecode struct { - Base - Thresholds []thresholdConfigDecode `json:"thresholds"` -} - -type thresholdConfigDecode struct { - ThresholdConfigBase - Type string `json:"type"` - Value float64 `json:"value"` - Min float64 `json:"min"` - Max float64 `json:"max"` - Within bool `json:"within"` -} - -// UnmarshalJSON implement json.Unmarshaler interface. -func (t *Threshold) UnmarshalJSON(b []byte) error { - tdRaws := new(thresholdDecode) - if err := json.Unmarshal(b, tdRaws); err != nil { - return err - } - t.Base = tdRaws.Base - for _, tdRaw := range tdRaws.Thresholds { - switch tdRaw.Type { - case "lesser": - td := &Lesser{ - ThresholdConfigBase: tdRaw.ThresholdConfigBase, - Value: tdRaw.Value, - } - t.Thresholds = append(t.Thresholds, td) - case "greater": - td := &Greater{ - ThresholdConfigBase: tdRaw.ThresholdConfigBase, - Value: tdRaw.Value, - } - t.Thresholds = append(t.Thresholds, td) - case "range": - td := &Range{ - ThresholdConfigBase: tdRaw.ThresholdConfigBase, - Min: tdRaw.Min, - Max: tdRaw.Max, - Within: tdRaw.Within, - } - t.Thresholds = append(t.Thresholds, td) - default: - return &errors.Error{ - Msg: fmt.Sprintf("invalid threshold type %s", tdRaw.Type), - } - } - } - - return nil -} - -func multiError(errs []error) error { - var b strings.Builder - - for _, err := range errs { - b.WriteString(err.Error() + "\n") - } - - return fmt.Errorf(b.String()) -} - -// GenerateFlux returns a flux script for the threshold provided. If there -// are any errors in the flux that the user provided the function will return -// an error for each error found when the script is parsed. -func (t Threshold) GenerateFlux(lang fluxlang.FluxLanguageService) (string, error) { - f, err := t.GenerateFluxAST(lang) - if err != nil { - return "", err - } - - return astutil.Format(f) -} - -// GenerateFluxAST returns a flux AST for the threshold provided. If there -// are any errors in the flux that the user provided the function will return -// an error for each error found when the script is parsed. -func (t Threshold) GenerateFluxAST(lang fluxlang.FluxLanguageService) (*ast.File, error) { - p, err := query.Parse(lang, t.Query.Text) - if p == nil { - return nil, err - } - replaceDurationsWithEvery(p, t.Every) - removeStopFromRange(p) - addCreateEmptyFalseToAggregateWindow(p) - - if errs := ast.GetErrors(p); len(errs) != 0 { - return nil, multiError(errs) - } - - // TODO(desa): this is a hack that we had to do as a result of https://github.com/influxdata/flux/issues/1701 - // when it is fixed we should use a separate file and not manipulate the existing one. - if len(p.Files) != 1 { - return nil, fmt.Errorf("expect a single file to be returned from query parsing got %d", len(p.Files)) - } - - fields := getFields(p) - if len(fields) != 1 { - return nil, fmt.Errorf("expected a single field but got: %s", fields) - } - - f := p.Files[0] - assignPipelineToData(f) - - f.Imports = append(f.Imports, flux.Imports("influxdata/influxdb/monitor", "influxdata/influxdb/v1")...) - f.Body = append(f.Body, t.generateFluxASTBody(fields[0])...) - - return f, nil -} - -// TODO(desa): we'll likely want something slightly more sophisitcated long term, but this should work for now. -func addCreateEmptyFalseToAggregateWindow(pkg *ast.Package) { - ast.Visit(pkg, func(n ast.Node) { - if call, ok := n.(*ast.CallExpression); ok { - if id, ok := call.Callee.(*ast.Identifier); ok && id.Name == "aggregateWindow" { - for _, args := range call.Arguments { - if obj, ok := args.(*ast.ObjectExpression); ok { - foundCreateEmpty := false - for _, props := range obj.Properties { - if props.Key.Key() == "createEmpty" { - foundCreateEmpty = true - break - } - } - if !foundCreateEmpty { - obj.Properties = append(obj.Properties, flux.Property("createEmpty", flux.Bool(false))) - } - } - } - } - } - }) -} - -// TODO(desa): we'll likely want something slightly more sophisitcated long term, but this should work for now. -func replaceDurationsWithEvery(pkg *ast.Package, every *notification.Duration) { - ast.Visit(pkg, func(n ast.Node) { - switch e := n.(type) { - case *ast.Property: - key := e.Key.Key() - newEvery := (ast.DurationLiteral)(*every) - switch key { - case "start": - e.Value = flux.Negative(&newEvery) - case "every": - e.Value = &newEvery - } - } - }) -} - -// TODO(desa): we'll likely want to remove all other arguments to range that are provided, but for now this should work. -// When we decide to implement the full feature we'll have to do something more sophisticated. -func removeStopFromRange(pkg *ast.Package) { - ast.Visit(pkg, func(n ast.Node) { - if call, ok := n.(*ast.CallExpression); ok { - if id, ok := call.Callee.(*ast.Identifier); ok && id.Name == "range" { - for _, args := range call.Arguments { - if obj, ok := args.(*ast.ObjectExpression); ok { - props := obj.Properties[:0] - for _, prop := range obj.Properties { - if prop.Key.Key() == "start" { - props = append(props, prop) - } - } - obj.Properties = props - } - } - } - } - }) -} - -// TODO(desa): we'll likely want to remove all other arguments to range that are provided, but for now this should work. -// When we decide to implement the full feature we'll have to do something more sophisticated. -func removeAggregateWindow(pkg *ast.Package) { - ast.Visit(pkg, func(n ast.Node) { - if pipe, ok := n.(*ast.PipeExpression); ok { - if id, ok := pipe.Call.Callee.(*ast.Identifier); ok && id.Name == "aggregateWindow" { - if subPipe, ok := pipe.Argument.(*ast.PipeExpression); ok { - *pipe = *subPipe - } - } - } - }) -} - -func getFields(pkg *ast.Package) []string { - var fields []string - ast.Visit(pkg, func(n ast.Node) { - if fn, ok := n.(*ast.BinaryExpression); ok { - if me, ok := fn.Left.(*ast.MemberExpression); ok { - if me.Property.Key() == "_field" { - if str, ok := fn.Right.(*ast.StringLiteral); ok { - fields = append(fields, str.Value) - } - } - } - } - }) - return fields -} - -func assignPipelineToData(f *ast.File) error { - if len(f.Body) != 1 { - return fmt.Errorf("expected there to be a single statement in the flux script body, received %d", len(f.Body)) - } - - stmt := f.Body[0] - - e, ok := stmt.(*ast.ExpressionStatement) - if !ok { - return fmt.Errorf("statement is not an *ast.Expression statement, received %T", stmt) - } - - exp := e.Expression - - pipe, ok := exp.(*ast.PipeExpression) - if !ok { - return fmt.Errorf("expression is not an *ast.PipeExpression statement, received %T", exp) - } - - if id, ok := pipe.Call.Callee.(*ast.Identifier); ok && id.Name == "yield" { - exp = pipe.Argument - } - - f.Body[0] = flux.DefineVariable("data", exp) - return nil -} - -func (t Threshold) generateFluxASTBody(field string) []ast.Statement { - var statements []ast.Statement - statements = append(statements, t.generateTaskOption()) - statements = append(statements, t.generateFluxASTCheckDefinition("threshold")) - statements = append(statements, t.generateFluxASTThresholdFunctions(field)...) - statements = append(statements, t.generateFluxASTMessageFunction()) - statements = append(statements, t.generateFluxASTChecksFunction()) - return statements -} - -func (t Threshold) generateFluxASTChecksFunction() ast.Statement { - return flux.ExpressionStatement(flux.Pipe( - flux.Identifier("data"), - flux.Call(flux.Member("v1", "fieldsAsCols"), flux.Object()), - t.generateFluxASTChecksCall(), - )) -} - -func (t Threshold) generateFluxASTChecksCall() *ast.CallExpression { - objectProps := append(([]*ast.Property)(nil), flux.Property("data", flux.Identifier("check"))) - objectProps = append(objectProps, flux.Property("messageFn", flux.Identifier("messageFn"))) - - // This assumes that the ThresholdConfigs we've been provided do not have duplicates. - for _, c := range t.Thresholds { - lvl := strings.ToLower(c.GetLevel().String()) - objectProps = append(objectProps, flux.Property(lvl, flux.Identifier(lvl))) - } - - return flux.Call(flux.Member("monitor", "check"), flux.Object(objectProps...)) -} - -func (t Threshold) generateFluxASTThresholdFunctions(field string) []ast.Statement { - thresholdStatements := make([]ast.Statement, len(t.Thresholds)) - - // This assumes that the ThresholdConfigs we've been provided do not have duplicates. - for k, v := range t.Thresholds { - thresholdStatements[k] = v.generateFluxASTThresholdFunction(field) - } - return thresholdStatements -} - -func (td Greater) generateFluxASTThresholdFunction(field string) ast.Statement { - fnBody := flux.GreaterThan(flux.Member("r", field), flux.Float(td.Value)) - fn := flux.Function(flux.FunctionParams("r"), fnBody) - - lvl := strings.ToLower(td.Level.String()) - - return flux.DefineVariable(lvl, fn) -} - -func (td Lesser) generateFluxASTThresholdFunction(field string) ast.Statement { - fnBody := flux.LessThan(flux.Member("r", field), flux.Float(td.Value)) - fn := flux.Function(flux.FunctionParams("r"), fnBody) - - lvl := strings.ToLower(td.Level.String()) - - return flux.DefineVariable(lvl, fn) -} - -func (td Range) generateFluxASTThresholdFunction(field string) ast.Statement { - var fnBody *ast.LogicalExpression - if !td.Within { - fnBody = flux.Or( - flux.LessThan(flux.Member("r", field), flux.Float(td.Min)), - flux.GreaterThan(flux.Member("r", field), flux.Float(td.Max)), - ) - } else { - fnBody = flux.And( - flux.LessThan(flux.Member("r", field), flux.Float(td.Max)), - flux.GreaterThan(flux.Member("r", field), flux.Float(td.Min)), - ) - } - - fn := flux.Function(flux.FunctionParams("r"), fnBody) - - lvl := strings.ToLower(td.Level.String()) - - return flux.DefineVariable(lvl, fn) -} - -type thresholdAlias Threshold - -// MarshalJSON implement json.Marshaler interface. -func (t Threshold) MarshalJSON() ([]byte, error) { - return json.Marshal( - struct { - thresholdAlias - Type string `json:"type"` - }{ - thresholdAlias: thresholdAlias(t), - Type: t.Type(), - }) -} - -// ThresholdConfig is the base of all threshold config. -type ThresholdConfig interface { - MarshalJSON() ([]byte, error) - Valid() error - Type() string - generateFluxASTThresholdFunction(string) ast.Statement - GetLevel() notification.CheckLevel -} - -// Valid returns error if something is invalid. -func (b ThresholdConfigBase) Valid() error { - return nil -} - -// ThresholdConfigBase is the base of all threshold config. -type ThresholdConfigBase struct { - // If true, only alert if all values meet threshold. - AllValues bool `json:"allValues"` - Level notification.CheckLevel `json:"level"` -} - -// GetLevel return the check level. -func (b ThresholdConfigBase) GetLevel() notification.CheckLevel { - return b.Level -} - -// Lesser threshold type. -type Lesser struct { - ThresholdConfigBase - Value float64 `json:"value"` -} - -// Type of the threshold config. -func (td Lesser) Type() string { - return "lesser" -} - -// MarshalJSON implement json.Marshaler interface. -func (td Lesser) MarshalJSON() ([]byte, error) { - type lesserAlias Lesser - return json.Marshal( - struct { - lesserAlias - Type string `json:"type"` - }{ - lesserAlias: lesserAlias(td), - Type: "lesser", - }) -} - -// Greater threshold type. -type Greater struct { - ThresholdConfigBase - Value float64 `json:"value"` -} - -// Type of the threshold config. -func (td Greater) Type() string { - return "greater" -} - -// MarshalJSON implement json.Marshaler interface. -func (td Greater) MarshalJSON() ([]byte, error) { - type greaterAlias Greater - return json.Marshal( - struct { - greaterAlias - Type string `json:"type"` - }{ - greaterAlias: greaterAlias(td), - Type: "greater", - }) -} - -// Range threshold type. -type Range struct { - ThresholdConfigBase - Min float64 `json:"min,omitempty"` - Max float64 `json:"max,omitempty"` - Within bool `json:"within"` -} - -// Type of the threshold config. -func (td Range) Type() string { - return "range" -} - -// MarshalJSON implement json.Marshaler interface. -func (td Range) MarshalJSON() ([]byte, error) { - type rangeAlias Range - return json.Marshal( - struct { - rangeAlias - Type string `json:"type"` - }{ - rangeAlias: rangeAlias(td), - Type: "range", - }) -} - -// Valid overwrite the base threshold. -func (td Range) Valid() error { - if td.Min > td.Max { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "range threshold min can't be larger than max", - } - } - return nil -} diff --git a/notification/check/threshold_test.go b/notification/check/threshold_test.go deleted file mode 100644 index 0b1ae5697ff..00000000000 --- a/notification/check/threshold_test.go +++ /dev/null @@ -1,365 +0,0 @@ -package check_test - -import ( - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/notification" - "github.com/influxdata/influxdb/v2/notification/check" - "github.com/influxdata/influxdb/v2/query/fluxlang" - itesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestThreshold_GenerateFlux(t *testing.T) { - type args struct { - threshold check.Threshold - } - type wants struct { - script string - } - - var l float64 = 10 - var u float64 = 40 - - tests := []struct { - name string - args args - wants wants - }{ - { - name: "all levels with yield and stop", - args: args{ - threshold: check.Threshold{ - Base: check.Base{ - ID: 10, - Name: "moo", - Tags: []influxdb.Tag{ - {Key: "aaa", Value: "vaaa"}, - {Key: "bbb", Value: "vbbb"}, - }, - Every: mustDuration("1h"), - StatusMessageTemplate: "whoa! {r[\"usage_user\"]}", - Query: influxdb.DashboardQuery{ - Text: `from(bucket: "foo") |> range(start: -1d, stop: now()) |> filter(fn: (r) => r._field == "usage_user") |> aggregateWindow(every: 1m, fn: mean) |> yield()`, - }, - }, - Thresholds: []check.ThresholdConfig{ - check.Greater{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Ok, - }, - Value: l, - }, - check.Lesser{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Info, - }, - Value: u, - }, - check.Range{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Warn, - }, - Min: l, - Max: u, - Within: true, - }, - check.Range{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Critical, - }, - Min: l, - Max: u, - Within: false, - }, - }, - }, - }, - wants: wants{ - script: `import "influxdata/influxdb/monitor" -import "influxdata/influxdb/v1" - -data = - from(bucket: "foo") - |> range(start: -1h) - |> filter(fn: (r) => r._field == "usage_user") - |> aggregateWindow(every: 1h, fn: mean, createEmpty: false) - -option task = {name: "moo", every: 1h} - -check = {_check_id: "000000000000000a", _check_name: "moo", _type: "threshold", tags: {aaa: "vaaa", bbb: "vbbb"}} -ok = (r) => r["usage_user"] > 10.0 -info = (r) => r["usage_user"] < 40.0 -warn = (r) => r["usage_user"] < 40.0 and r["usage_user"] > 10.0 -crit = (r) => r["usage_user"] < 10.0 or r["usage_user"] > 40.0 -messageFn = (r) => "whoa! {r[\"usage_user\"]}" - -data - |> v1["fieldsAsCols"]() - |> monitor["check"]( - data: check, - messageFn: messageFn, - ok: ok, - info: info, - warn: warn, - crit: crit, - ) -`, - }, - }, - { - name: "all levels with yield", - args: args{ - threshold: check.Threshold{ - Base: check.Base{ - ID: 10, - Name: "moo", - Tags: []influxdb.Tag{ - {Key: "aaa", Value: "vaaa"}, - {Key: "bbb", Value: "vbbb"}, - }, - Every: mustDuration("1h"), - StatusMessageTemplate: "whoa! {r[\"usage_user\"]}", - Query: influxdb.DashboardQuery{ - Text: `from(bucket: "foo") |> range(start: -1d) |> filter(fn: (r) => r._field == "usage_user") |> aggregateWindow(every: 1m, fn: mean) |> yield()`, - }, - }, - Thresholds: []check.ThresholdConfig{ - check.Greater{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Ok, - }, - Value: l, - }, - check.Lesser{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Info, - }, - Value: u, - }, - check.Range{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Warn, - }, - Min: l, - Max: u, - Within: true, - }, - check.Range{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Critical, - }, - Min: l, - Max: u, - Within: false, - }, - }, - }, - }, - wants: wants{ - script: `import "influxdata/influxdb/monitor" -import "influxdata/influxdb/v1" - -data = - from(bucket: "foo") - |> range(start: -1h) - |> filter(fn: (r) => r._field == "usage_user") - |> aggregateWindow(every: 1h, fn: mean, createEmpty: false) - -option task = {name: "moo", every: 1h} - -check = {_check_id: "000000000000000a", _check_name: "moo", _type: "threshold", tags: {aaa: "vaaa", bbb: "vbbb"}} -ok = (r) => r["usage_user"] > 10.0 -info = (r) => r["usage_user"] < 40.0 -warn = (r) => r["usage_user"] < 40.0 and r["usage_user"] > 10.0 -crit = (r) => r["usage_user"] < 10.0 or r["usage_user"] > 40.0 -messageFn = (r) => "whoa! {r[\"usage_user\"]}" - -data - |> v1["fieldsAsCols"]() - |> monitor["check"]( - data: check, - messageFn: messageFn, - ok: ok, - info: info, - warn: warn, - crit: crit, - ) -`, - }, - }, - { - name: "all levels with yield and space in field name", - args: args{ - threshold: check.Threshold{ - Base: check.Base{ - ID: 10, - Name: "moo", - Tags: []influxdb.Tag{ - {Key: "aaa", Value: "vaaa"}, - {Key: "bbb", Value: "vbbb"}, - }, - Every: mustDuration("1h"), - StatusMessageTemplate: "whoa! {r[\"usage user\"]}", - Query: influxdb.DashboardQuery{ - Text: `from(bucket: "foo") |> range(start: -1d) |> filter(fn: (r) => r._field == "usage user") |> aggregateWindow(every: 1m, fn: mean) |> yield()`, - }, - }, - Thresholds: []check.ThresholdConfig{ - check.Greater{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Ok, - }, - Value: l, - }, - check.Lesser{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Info, - }, - Value: u, - }, - check.Range{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Warn, - }, - Min: l, - Max: u, - Within: true, - }, - check.Range{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Critical, - }, - Min: l, - Max: u, - Within: false, - }, - }, - }, - }, - wants: wants{ - script: `import "influxdata/influxdb/monitor" -import "influxdata/influxdb/v1" - -data = - from(bucket: "foo") - |> range(start: -1h) - |> filter(fn: (r) => r._field == "usage user") - |> aggregateWindow(every: 1h, fn: mean, createEmpty: false) - -option task = {name: "moo", every: 1h} - -check = {_check_id: "000000000000000a", _check_name: "moo", _type: "threshold", tags: {aaa: "vaaa", bbb: "vbbb"}} -ok = (r) => r["usage user"] > 10.0 -info = (r) => r["usage user"] < 40.0 -warn = (r) => r["usage user"] < 40.0 and r["usage user"] > 10.0 -crit = (r) => r["usage user"] < 10.0 or r["usage user"] > 40.0 -messageFn = (r) => "whoa! {r[\"usage user\"]}" - -data - |> v1["fieldsAsCols"]() - |> monitor["check"]( - data: check, - messageFn: messageFn, - ok: ok, - info: info, - warn: warn, - crit: crit, - ) -`, - }, - }, - { - name: "all levels without yield", - args: args{ - threshold: check.Threshold{ - Base: check.Base{ - ID: 10, - Name: "moo", - Tags: []influxdb.Tag{ - {Key: "aaa", Value: "vaaa"}, - {Key: "bbb", Value: "vbbb"}, - }, - Every: mustDuration("1h"), - StatusMessageTemplate: "whoa! {r[\"usage_user\"]}", - Query: influxdb.DashboardQuery{ - Text: `from(bucket: "foo") |> range(start: -1d) |> filter(fn: (r) => r._field == "usage_user") |> aggregateWindow(every: 1m, fn: mean)`, - }, - }, - Thresholds: []check.ThresholdConfig{ - check.Greater{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Ok, - }, - Value: l, - }, - check.Lesser{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Info, - }, - Value: u, - }, - check.Range{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Warn, - }, - Min: l, - Max: u, - Within: true, - }, - check.Range{ - ThresholdConfigBase: check.ThresholdConfigBase{ - Level: notification.Critical, - }, - Min: l, - Max: u, - Within: true, - }, - }, - }, - }, - wants: wants{ - script: `import "influxdata/influxdb/monitor" -import "influxdata/influxdb/v1" - -data = - from(bucket: "foo") - |> range(start: -1h) - |> filter(fn: (r) => r._field == "usage_user") - |> aggregateWindow(every: 1h, fn: mean, createEmpty: false) - -option task = {name: "moo", every: 1h} - -check = {_check_id: "000000000000000a", _check_name: "moo", _type: "threshold", tags: {aaa: "vaaa", bbb: "vbbb"}} -ok = (r) => r["usage_user"] > 10.0 -info = (r) => r["usage_user"] < 40.0 -warn = (r) => r["usage_user"] < 40.0 and r["usage_user"] > 10.0 -crit = (r) => r["usage_user"] < 40.0 and r["usage_user"] > 10.0 -messageFn = (r) => "whoa! {r[\"usage_user\"]}" - -data - |> v1["fieldsAsCols"]() - |> monitor["check"]( - data: check, - messageFn: messageFn, - ok: ok, - info: info, - warn: warn, - crit: crit, - ) -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, err := tt.args.threshold.GenerateFlux(fluxlang.DefaultService) - require.NoError(t, err) - assert.Equal(t, itesting.FormatFluxString(t, tt.wants.script), s) - }) - } - -} diff --git a/notification/duration.go b/notification/duration.go deleted file mode 100644 index 9474f28100b..00000000000 --- a/notification/duration.go +++ /dev/null @@ -1,130 +0,0 @@ -package notification - -import ( - "bytes" - "fmt" - "strconv" - "time" - "unicode" - "unicode/utf8" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/codes" -) - -// Duration is a custom type used for generating flux compatible durations. -type Duration ast.DurationLiteral - -// TimeDuration convert notification.Duration to time.Duration. -func (d Duration) TimeDuration() time.Duration { - dl := ast.DurationLiteral(d) - dd, _ := ast.DurationFrom(&dl, time.Time{}) - return dd -} - -// MarshalJSON turns a Duration into a JSON-ified string. -func (d Duration) MarshalJSON() ([]byte, error) { - var b bytes.Buffer - b.WriteByte('"') - for _, d := range d.Values { - b.WriteString(strconv.Itoa(int(d.Magnitude))) - b.WriteString(d.Unit) - } - b.WriteByte('"') - - return b.Bytes(), nil -} - -// UnmarshalJSON turns a flux duration literal into a Duration. -func (d *Duration) UnmarshalJSON(b []byte) error { - dur, err := parseDuration(string(b[1 : len(b)-1])) - if err != nil { - return err - } - - *d = Duration{Values: dur} - - return nil -} - -// FromTimeDuration converts a time.Duration to a notification.Duration type. -func FromTimeDuration(d time.Duration) (Duration, error) { - dur, err := parseDuration(d.String()) - if err != nil { - return Duration{}, err - } - return Duration{Values: dur}, nil -} - -// TODO(jsternberg): This file copies over code from an internal package -// because we need them from an internal package and the only way they -// are exposed is through a package that depends on the core flux parser. -// We want to avoid a dependency on the core parser so we copy these -// implementations. -// -// In the future, we should consider exposing these functions from flux -// in a non-internal package outside of the parser package. - -// parseDuration will convert a string into components of the duration. -func parseDuration(lit string) ([]ast.Duration, error) { - var values []ast.Duration - for len(lit) > 0 { - n := 0 - for n < len(lit) { - ch, size := utf8.DecodeRuneInString(lit[n:]) - if size == 0 { - panic("invalid rune in duration") - } - - if !unicode.IsDigit(ch) { - break - } - n += size - } - - if n == 0 { - return nil, &flux.Error{ - Code: codes.Invalid, - Msg: fmt.Sprintf("invalid duration %s", lit), - } - } - - magnitude, err := strconv.ParseInt(lit[:n], 10, 64) - if err != nil { - return nil, err - } - lit = lit[n:] - - n = 0 - for n < len(lit) { - ch, size := utf8.DecodeRuneInString(lit[n:]) - if size == 0 { - panic("invalid rune in duration") - } - - if !unicode.IsLetter(ch) { - break - } - n += size - } - - if n == 0 { - return nil, &flux.Error{ - Code: codes.Invalid, - Msg: fmt.Sprintf("duration is missing a unit: %s", lit), - } - } - - unit := lit[:n] - if unit == "µs" { - unit = "us" - } - values = append(values, ast.Duration{ - Magnitude: magnitude, - Unit: unit, - }) - lit = lit[n:] - } - return values, nil -} diff --git a/notification/endpoint/endpoint.go b/notification/endpoint/endpoint.go deleted file mode 100644 index 9ada557d516..00000000000 --- a/notification/endpoint/endpoint.go +++ /dev/null @@ -1,162 +0,0 @@ -package endpoint - -import ( - "encoding/json" - "fmt" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// types of endpoints. -const ( - SlackType = "slack" - PagerDutyType = "pagerduty" - HTTPType = "http" - TelegramType = "telegram" -) - -var typeToEndpoint = map[string]func() influxdb.NotificationEndpoint{ - SlackType: func() influxdb.NotificationEndpoint { return &Slack{} }, - PagerDutyType: func() influxdb.NotificationEndpoint { return &PagerDuty{} }, - HTTPType: func() influxdb.NotificationEndpoint { return &HTTP{} }, - TelegramType: func() influxdb.NotificationEndpoint { return &Telegram{} }, -} - -// UnmarshalJSON will convert the bytes to notification endpoint. -func UnmarshalJSON(b []byte) (influxdb.NotificationEndpoint, error) { - var raw struct { - Type string `json:"type"` - } - if err := json.Unmarshal(b, &raw); err != nil { - return nil, &errors.Error{ - Msg: "unable to detect the notification endpoint type from json", - } - } - - convertedFunc, ok := typeToEndpoint[raw.Type] - if !ok { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("invalid notification endpoint type %s", raw.Type), - } - } - converted := convertedFunc() - - if err := json.Unmarshal(b, converted); err != nil { - return nil, &errors.Error{ - Code: errors.EInternal, - Err: err, - } - } - return converted, nil -} - -// Base is the embed struct of every notification endpoint. -type Base struct { - ID *platform.ID `json:"id,omitempty"` - Name string `json:"name"` - Description string `json:"description,omitempty"` - OrgID *platform.ID `json:"orgID,omitempty"` - Status influxdb.Status `json:"status"` - influxdb.CRUDLog -} - -func (b Base) idStr() string { - if b.ID == nil { - return platform.ID(0).String() - } - return b.ID.String() -} - -func (b Base) validID() bool { - return b.ID != nil && b.ID.Valid() -} - -func (b Base) valid() error { - if !b.validID() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Notification Endpoint ID is invalid", - } - } - if b.Name == "" { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Notification Endpoint Name can't be empty", - } - } - if b.Status != influxdb.Active && b.Status != influxdb.Inactive { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid status", - } - } - return nil -} - -// GetID implements influxdb.Getter interface. -func (b Base) GetID() platform.ID { - if b.ID == nil { - return 0 - } - return *b.ID -} - -// GetName implements influxdb.Getter interface. -func (b *Base) GetName() string { - return b.Name -} - -// GetOrgID implements influxdb.Getter interface. -func (b Base) GetOrgID() platform.ID { - return getID(b.OrgID) -} - -// GetCRUDLog implements influxdb.Getter interface. -func (b Base) GetCRUDLog() influxdb.CRUDLog { - return b.CRUDLog -} - -// GetDescription implements influxdb.Getter interface. -func (b *Base) GetDescription() string { - return b.Description -} - -// GetStatus implements influxdb.Getter interface. -func (b *Base) GetStatus() influxdb.Status { - return b.Status -} - -// SetID will set the primary key. -func (b *Base) SetID(id platform.ID) { - b.ID = &id -} - -// SetOrgID will set the org key. -func (b *Base) SetOrgID(id platform.ID) { - b.OrgID = &id -} - -// SetName implements influxdb.Updator interface. -func (b *Base) SetName(name string) { - b.Name = name -} - -// SetDescription implements influxdb.Updator interface. -func (b *Base) SetDescription(description string) { - b.Description = description -} - -// SetStatus implements influxdb.Updator interface. -func (b *Base) SetStatus(status influxdb.Status) { - b.Status = status -} - -func getID(id *platform.ID) platform.ID { - if id == nil { - return 0 - } - return *id -} diff --git a/notification/endpoint/endpoint_test.go b/notification/endpoint/endpoint_test.go deleted file mode 100644 index eec3cb8e6a2..00000000000 --- a/notification/endpoint/endpoint_test.go +++ /dev/null @@ -1,622 +0,0 @@ -package endpoint_test - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/errors" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/notification/endpoint" - influxTesting "github.com/influxdata/influxdb/v2/testing" -) - -var ( - id1 = influxTesting.MustIDBase16Ptr("020f755c3c082000") - id3 = influxTesting.MustIDBase16Ptr("020f755c3c082002") - - timeGen1 = mock.TimeGenerator{FakeValue: time.Date(2006, time.July, 13, 4, 19, 10, 0, time.UTC)} - timeGen2 = mock.TimeGenerator{FakeValue: time.Date(2006, time.July, 14, 5, 23, 53, 10, time.UTC)} - - goodBase = endpoint.Base{ - ID: id1, - Name: "name1", - OrgID: id3, - Status: influxdb.Active, - Description: "desc1", - } -) - -func TestValidEndpoint(t *testing.T) { - cases := []struct { - name string - src influxdb.NotificationEndpoint - err error - errFn func(*testing.T) error - }{ - { - name: "invalid endpoint id", - src: &endpoint.Slack{}, - err: &errors2.Error{ - Code: errors2.EInvalid, - Msg: "Notification Endpoint ID is invalid", - }, - }, - { - name: "invalid status", - src: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: id1, - Name: "name1", - OrgID: id3, - }, - }, - err: &errors2.Error{ - Code: errors2.EInvalid, - Msg: "invalid status", - }, - }, - { - name: "empty name PagerDuty", - src: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: id1, - OrgID: id3, - Status: influxdb.Active, - }, - ClientURL: "https://events.pagerduty.com/v2/enqueue", - RoutingKey: influxdb.SecretField{Key: id1.String() + "-routing-key"}, - }, - err: &errors2.Error{ - Code: errors2.EInvalid, - Msg: "Notification Endpoint Name can't be empty", - }, - }, - { - name: "empty name Telegram", - src: &endpoint.Telegram{ - Base: endpoint.Base{ - ID: id1, - OrgID: id3, - Status: influxdb.Active, - }, - Token: influxdb.SecretField{Key: id1.String() + "-token"}, - Channel: "-1001406363649", - }, - err: &errors2.Error{ - Code: errors2.EInvalid, - Msg: "Notification Endpoint Name can't be empty", - }, - }, - { - name: "empty slack url", - src: &endpoint.Slack{ - Base: goodBase, - }, - err: &errors2.Error{ - Code: errors2.EInvalid, - Msg: "slack endpoint URL must be provided", - }, - }, - { - name: "invalid slack url", - src: &endpoint.Slack{ - Base: goodBase, - URL: "posts://er:{DEf1=ghi@:5432/db?ssl", - }, - errFn: func(t *testing.T) error { - err := url.Error{ - Op: "parse", - URL: "posts://er:{DEf1=ghi@:5432/db?ssl", - Err: errors.New("net/url: invalid userinfo"), - } - return &errors2.Error{ - Code: errors2.EInvalid, - Msg: fmt.Sprintf("slack endpoint URL is invalid: %s", err.Error()), - } - }, - }, - { - name: "empty slack token", - src: &endpoint.Slack{ - Base: goodBase, - URL: "localhost", - }, - err: nil, - }, - { - name: "empty http http method", - src: &endpoint.HTTP{ - Base: goodBase, - URL: "localhost", - }, - err: &errors2.Error{ - Code: errors2.EInvalid, - Msg: "invalid http http method", - }, - }, - { - name: "empty http token", - src: &endpoint.HTTP{ - Base: goodBase, - URL: "localhost", - Method: "GET", - AuthMethod: "bearer", - }, - err: &errors2.Error{ - Code: errors2.EInvalid, - Msg: "invalid http token for bearer auth", - }, - }, - { - name: "empty http username", - src: &endpoint.HTTP{ - Base: goodBase, - URL: "localhost", - Method: http.MethodGet, - AuthMethod: "basic", - }, - err: &errors2.Error{ - Code: errors2.EInvalid, - Msg: "invalid http username/password for basic auth", - }, - }, - { - name: "empty telegram token", - src: &endpoint.Telegram{ - Base: goodBase, - }, - err: &errors2.Error{ - Code: errors2.EInvalid, - Msg: "empty telegram bot token", - }, - }, - { - name: "empty telegram channel", - src: &endpoint.Telegram{ - Base: goodBase, - Token: influxdb.SecretField{Key: id1.String() + "-token"}, - }, - err: &errors2.Error{ - Code: errors2.EInvalid, - Msg: "empty telegram channel", - }, - }, - { - name: "valid telegram token", - src: &endpoint.Telegram{ - Base: goodBase, - Token: influxdb.SecretField{Key: id1.String() + "-token"}, - Channel: "-1001406363649", - }, - err: nil, - }, - } - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - got := c.src.Valid() - var exp error - if c.errFn != nil { - exp = c.errFn(t) - } else { - exp = c.err - } - influxTesting.ErrorsEqual(t, got, exp) - }) - } -} - -func TestJSON(t *testing.T) { - cases := []struct { - name string - src influxdb.NotificationEndpoint - }{ - { - name: "simple Slack", - src: &endpoint.Slack{ - Base: endpoint.Base{ - ID: id1, - Name: "name1", - OrgID: id3, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "https://slack.com/api/chat.postMessage", - Token: influxdb.SecretField{Key: "token-key-1"}, - }, - }, - { - name: "Slack without token", - src: &endpoint.Slack{ - Base: endpoint.Base{ - ID: id1, - Name: "name1", - OrgID: id3, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "https://hooks.slack.com/services/x/y/z", - }, - }, - { - name: "simple pagerduty", - src: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: id1, - Name: "name1", - OrgID: id3, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "https://events.pagerduty.com/v2/enqueue", - RoutingKey: influxdb.SecretField{Key: "pagerduty-routing-key"}, - }, - }, - { - name: "simple http", - src: &endpoint.HTTP{ - Base: endpoint.Base{ - ID: id1, - Name: "name1", - OrgID: id3, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Headers: map[string]string{ - "x-header-1": "header 1", - "x-header-2": "header 2", - }, - AuthMethod: "basic", - URL: "http://example.com", - Username: influxdb.SecretField{Key: "username-key"}, - Password: influxdb.SecretField{Key: "password-key"}, - }, - }, - { - name: "simple Telegram", - src: &endpoint.Telegram{ - Base: endpoint.Base{ - ID: id1, - Name: "nameTelegram", - OrgID: id3, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Token: influxdb.SecretField{Key: "token-key-1"}, - }, - }, - } - for _, c := range cases { - b, err := json.Marshal(c.src) - if err != nil { - t.Fatalf("%s marshal failed, err: %s", c.name, err.Error()) - } - got, err := endpoint.UnmarshalJSON(b) - if err != nil { - t.Fatalf("%s unmarshal failed, err: %s", c.name, err.Error()) - } - if diff := cmp.Diff(got, c.src); diff != "" { - t.Errorf("failed %s, NotificationEndpoint are different -got/+want\ndiff %s", c.name, diff) - } - } -} - -func TestBackFill(t *testing.T) { - cases := []struct { - name string - src influxdb.NotificationEndpoint - target influxdb.NotificationEndpoint - }{ - { - name: "simple Slack", - src: &endpoint.Slack{ - Base: endpoint.Base{ - ID: id1, - Name: "name1", - OrgID: id3, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "https://slack.com/api/chat.postMessage", - Token: influxdb.SecretField{ - Value: strPtr("token-value"), - }, - }, - target: &endpoint.Slack{ - Base: endpoint.Base{ - ID: id1, - Name: "name1", - OrgID: id3, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "https://slack.com/api/chat.postMessage", - Token: influxdb.SecretField{ - Key: id1.String() + "-token", - Value: strPtr("token-value"), - }, - }, - }, - { - name: "simple pagerduty", - src: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: id1, - Name: "name1", - OrgID: id3, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "https://events.pagerduty.com/v2/enqueue", - RoutingKey: influxdb.SecretField{ - Value: strPtr("routing-key-value"), - }, - }, - target: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: id1, - Name: "name1", - OrgID: id3, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "https://events.pagerduty.com/v2/enqueue", - RoutingKey: influxdb.SecretField{ - Key: id1.String() + "-routing-key", - Value: strPtr("routing-key-value"), - }, - }, - }, - { - name: "http with token", - src: &endpoint.HTTP{ - Base: endpoint.Base{ - ID: id1, - Name: "name1", - OrgID: id3, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - AuthMethod: "basic", - URL: "http://example.com", - Username: influxdb.SecretField{ - Value: strPtr("username1"), - }, - Password: influxdb.SecretField{ - Value: strPtr("password1"), - }, - }, - target: &endpoint.HTTP{ - Base: endpoint.Base{ - ID: id1, - Name: "name1", - OrgID: id3, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - AuthMethod: "basic", - URL: "http://example.com", - Username: influxdb.SecretField{ - Key: id1.String() + "-username", - Value: strPtr("username1"), - }, - Password: influxdb.SecretField{ - Key: id1.String() + "-password", - Value: strPtr("password1"), - }, - }, - }, - { - name: "simple Telegram", - src: &endpoint.Telegram{ - Base: endpoint.Base{ - ID: id1, - Name: "name1", - OrgID: id3, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Token: influxdb.SecretField{ - Value: strPtr("token-value"), - }, - }, - target: &endpoint.Telegram{ - Base: endpoint.Base{ - ID: id1, - Name: "name1", - OrgID: id3, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Token: influxdb.SecretField{ - Key: id1.String() + "-token", - Value: strPtr("token-value"), - }, - }, - }, - } - for _, c := range cases { - c.src.BackfillSecretKeys() - if diff := cmp.Diff(c.target, c.src); diff != "" { - t.Errorf("failed %s, NotificationEndpoint are different -got/+want\ndiff %s", c.name, diff) - } - } -} - -func TestSecretFields(t *testing.T) { - cases := []struct { - name string - src influxdb.NotificationEndpoint - secrets []influxdb.SecretField - }{ - { - name: "simple Slack", - src: &endpoint.Slack{ - Base: endpoint.Base{ - ID: id1, - Name: "name1", - OrgID: id3, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "https://slack.com/api/chat.postMessage", - Token: influxdb.SecretField{ - Key: id1.String() + "-token", - Value: strPtr("token-value"), - }, - }, - secrets: []influxdb.SecretField{ - { - Key: id1.String() + "-token", - Value: strPtr("token-value"), - }, - }, - }, - { - name: "simple pagerduty", - src: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: id1, - Name: "name1", - OrgID: id3, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "https://events.pagerduty.com/v2/enqueue", - RoutingKey: influxdb.SecretField{ - Key: id1.String() + "-routing-key", - Value: strPtr("routing-key-value"), - }, - }, - secrets: []influxdb.SecretField{ - { - Key: id1.String() + "-routing-key", - Value: strPtr("routing-key-value"), - }, - }, - }, - { - name: "http with user and password", - src: &endpoint.HTTP{ - Base: endpoint.Base{ - ID: id1, - Name: "name1", - OrgID: id3, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - AuthMethod: "basic", - URL: "http://example.com", - Username: influxdb.SecretField{ - Key: id1.String() + "-username", - Value: strPtr("user1"), - }, - Password: influxdb.SecretField{ - Key: id1.String() + "-password", - Value: strPtr("password1"), - }, - }, - secrets: []influxdb.SecretField{ - { - Key: id1.String() + "-username", - Value: strPtr("user1"), - }, - { - Key: id1.String() + "-password", - Value: strPtr("password1"), - }, - }, - }, - { - name: "simple Telegram", - src: &endpoint.Telegram{ - Base: endpoint.Base{ - ID: id1, - Name: "name1", - OrgID: id3, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Token: influxdb.SecretField{ - Key: id1.String() + "-token", - Value: strPtr("token-value"), - }, - }, - secrets: []influxdb.SecretField{ - { - Key: id1.String() + "-token", - Value: strPtr("token-value"), - }, - }, - }, - } - for _, c := range cases { - secretFields := c.src.SecretFields() - if diff := cmp.Diff(c.secrets, secretFields); diff != "" { - t.Errorf("failed %s, NotificationEndpoint are different -got/+want\ndiff %s", c.name, diff) - } - } -} - -func strPtr(s string) *string { - ss := new(string) - *ss = s - return ss -} diff --git a/notification/endpoint/http.go b/notification/endpoint/http.go deleted file mode 100644 index 75cf1e66d09..00000000000 --- a/notification/endpoint/http.go +++ /dev/null @@ -1,153 +0,0 @@ -package endpoint - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var _ influxdb.NotificationEndpoint = &HTTP{} - -const ( - httpTokenSuffix = "-token" - httpUsernameSuffix = "-username" - httpPasswordSuffix = "-password" -) - -// HTTP is the notification endpoint config of http. -type HTTP struct { - Base - // Path is the API path of HTTP - URL string `json:"url"` - // Token is the bearer token for authorization - Headers map[string]string `json:"headers,omitempty"` - Token influxdb.SecretField `json:"token,omitempty"` - Username influxdb.SecretField `json:"username,omitempty"` - Password influxdb.SecretField `json:"password,omitempty"` - AuthMethod string `json:"authMethod"` - Method string `json:"method"` - ContentTemplate string `json:"contentTemplate"` -} - -// BackfillSecretKeys fill back fill the secret field key during the unmarshalling -// if value of that secret field is not nil. -func (s *HTTP) BackfillSecretKeys() { - if s.Token.Key == "" && s.Token.Value != nil { - s.Token.Key = s.idStr() + httpTokenSuffix - } - if s.Username.Key == "" && s.Username.Value != nil { - s.Username.Key = s.idStr() + httpUsernameSuffix - } - if s.Password.Key == "" && s.Password.Value != nil { - s.Password.Key = s.idStr() + httpPasswordSuffix - } -} - -// SecretFields return available secret fields. -func (s HTTP) SecretFields() []influxdb.SecretField { - arr := make([]influxdb.SecretField, 0) - if s.Token.Key != "" { - arr = append(arr, s.Token) - } - if s.Username.Key != "" { - arr = append(arr, s.Username) - } - if s.Password.Key != "" { - arr = append(arr, s.Password) - } - return arr -} - -var goodHTTPAuthMethod = map[string]bool{ - "none": true, - "basic": true, - "bearer": true, -} - -var goodHTTPMethod = map[string]bool{ - http.MethodGet: true, - http.MethodPost: true, - http.MethodPut: true, -} - -// Valid returns error if some configuration is invalid -func (s HTTP) Valid() error { - if err := s.Base.valid(); err != nil { - return err - } - if s.URL == "" { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "http endpoint URL is empty", - } - } - if _, err := url.Parse(s.URL); err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("http endpoint URL is invalid: %s", err.Error()), - } - } - if !goodHTTPMethod[s.Method] { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid http http method", - } - } - if !goodHTTPAuthMethod[s.AuthMethod] { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid http auth method", - } - } - if s.AuthMethod == "basic" && (s.Username.Key == "" || s.Password.Key == "") { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid http username/password for basic auth", - } - } - if s.AuthMethod == "bearer" && s.Token.Key == "" { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid http token for bearer auth", - } - } - - return nil -} - -// MarshalJSON implement json.Marshaler interface. -func (s HTTP) MarshalJSON() ([]byte, error) { - type httpAlias HTTP - return json.Marshal( - struct { - httpAlias - Type string `json:"type"` - }{ - httpAlias: httpAlias(s), - Type: s.Type(), - }) -} - -// Type returns the type. -func (s HTTP) Type() string { - return HTTPType -} - -// ParseResponse will parse the http response from http. -func (s HTTP) ParseResponse(resp *http.Response) error { - if resp.StatusCode != http.StatusOK { - body, err := io.ReadAll(resp.Body) - if err != nil { - return err - } - return &errors.Error{ - Msg: string(body), - } - } - return nil -} diff --git a/notification/endpoint/pagerduty.go b/notification/endpoint/pagerduty.go deleted file mode 100644 index a13d73e7d88..00000000000 --- a/notification/endpoint/pagerduty.go +++ /dev/null @@ -1,70 +0,0 @@ -package endpoint - -import ( - "encoding/json" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var _ influxdb.NotificationEndpoint = &PagerDuty{} - -const routingKeySuffix = "-routing-key" - -// PagerDuty is the notification endpoint config of pagerduty. -type PagerDuty struct { - Base - // ClientURL is the url that is presented in the PagerDuty UI when this alert is triggered - ClientURL string `json:"clientURL"` - // RoutingKey is a version 4 UUID expressed as a 32-digit hexadecimal number. - // This is the Integration Key for an integration on any given service. - RoutingKey influxdb.SecretField `json:"routingKey"` -} - -// BackfillSecretKeys fill back fill the secret field key during the unmarshalling -// if value of that secret field is not nil. -func (s *PagerDuty) BackfillSecretKeys() { - if s.RoutingKey.Key == "" && s.RoutingKey.Value != nil { - s.RoutingKey.Key = s.idStr() + routingKeySuffix - } -} - -// SecretFields return available secret fields. -func (s PagerDuty) SecretFields() []influxdb.SecretField { - return []influxdb.SecretField{ - s.RoutingKey, - } -} - -// Valid returns error if some configuration is invalid -func (s PagerDuty) Valid() error { - if err := s.Base.valid(); err != nil { - return err - } - if s.RoutingKey.Key == "" { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "pagerduty routing key is invalid", - } - } - return nil -} - -type pagerdutyAlias PagerDuty - -// MarshalJSON implement json.Marshaler interface. -func (s PagerDuty) MarshalJSON() ([]byte, error) { - return json.Marshal( - struct { - pagerdutyAlias - Type string `json:"type"` - }{ - pagerdutyAlias: pagerdutyAlias(s), - Type: s.Type(), - }) -} - -// Type returns the type. -func (s PagerDuty) Type() string { - return PagerDutyType -} diff --git a/notification/endpoint/service/service.go b/notification/endpoint/service/service.go deleted file mode 100644 index 1cc72915a13..00000000000 --- a/notification/endpoint/service/service.go +++ /dev/null @@ -1,93 +0,0 @@ -package service - -import ( - "context" - - influxdb "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// Service provides all the notification endpoint service behavior. -type Service struct { - endpointStore influxdb.NotificationEndpointService - secretSVC influxdb.SecretService -} - -// New constructs a new Service. -func New(store influxdb.NotificationEndpointService, secretSVC influxdb.SecretService) *Service { - return &Service{ - endpointStore: store, - secretSVC: secretSVC, - } -} - -var _ influxdb.NotificationEndpointService = (*Service)(nil) - -// FindNotificationEndpointByID returns a single notification endpoint by ID. -func (s *Service) FindNotificationEndpointByID(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - return s.endpointStore.FindNotificationEndpointByID(ctx, id) -} - -// FindNotificationEndpoints returns a list of notification endpoints that match filter and the total count of matching notification endpoints. -// Additional options provide pagination & sorting. -func (s *Service) FindNotificationEndpoints(ctx context.Context, filter influxdb.NotificationEndpointFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) { - return s.endpointStore.FindNotificationEndpoints(ctx, filter, opt...) -} - -// CreateNotificationEndpoint creates a new notification endpoint and sets b.ID with the new identifier. -func (s *Service) CreateNotificationEndpoint(ctx context.Context, edp influxdb.NotificationEndpoint, userID platform.ID) error { - err := s.endpointStore.CreateNotificationEndpoint(ctx, edp, userID) - if err != nil { - return err - } - - secrets := make(map[string]string) - for _, fld := range edp.SecretFields() { - if fld.Value != nil { - secrets[fld.Key] = *fld.Value - } - } - if len(secrets) == 0 { - return nil - } - - return s.secretSVC.PatchSecrets(ctx, edp.GetOrgID(), secrets) -} - -// UpdateNotificationEndpoint updates a single notification endpoint. -// Returns the new notification endpoint after update. -func (s *Service) UpdateNotificationEndpoint(ctx context.Context, id platform.ID, nr influxdb.NotificationEndpoint, userID platform.ID) (influxdb.NotificationEndpoint, error) { - nr.BackfillSecretKeys() // :sadpanda: - updatedEndpoint, err := s.endpointStore.UpdateNotificationEndpoint(ctx, id, nr, userID) - if err != nil { - return nil, err - } - - secrets := make(map[string]string) - for _, fld := range updatedEndpoint.SecretFields() { - if fld.Value != nil { - secrets[fld.Key] = *fld.Value - } - } - - if len(secrets) == 0 { - return updatedEndpoint, nil - } - - if err := s.secretSVC.PatchSecrets(ctx, updatedEndpoint.GetOrgID(), secrets); err != nil { - return nil, err - } - - return updatedEndpoint, nil -} - -// PatchNotificationEndpoint updates a single notification endpoint with changeset. -// Returns the new notification endpoint state after update. -func (s *Service) PatchNotificationEndpoint(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpointUpdate) (influxdb.NotificationEndpoint, error) { - return s.endpointStore.PatchNotificationEndpoint(ctx, id, upd) -} - -// DeleteNotificationEndpoint removes a notification endpoint by ID, returns secret fields, orgID for further deletion. -func (s *Service) DeleteNotificationEndpoint(ctx context.Context, id platform.ID) ([]influxdb.SecretField, platform.ID, error) { - return s.endpointStore.DeleteNotificationEndpoint(ctx, id) -} diff --git a/notification/endpoint/service/service_test.go b/notification/endpoint/service/service_test.go deleted file mode 100644 index b37975b39d9..00000000000 --- a/notification/endpoint/service/service_test.go +++ /dev/null @@ -1,184 +0,0 @@ -package service_test - -import ( - "context" - "testing" - "time" - - influxdb "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/influxdata/influxdb/v2/notification/endpoint/service" - "github.com/influxdata/influxdb/v2/pkg/pointer" - "github.com/influxdata/influxdb/v2/secret" - "github.com/influxdata/influxdb/v2/tenant" - influxTesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" -) - -var ( - id1 = influxTesting.MustIDBase16Ptr("020f755c3c082000") - id2 = influxTesting.MustIDBase16Ptr("020f755c3c082001") - orgID = influxTesting.MustIDBase16Ptr("a10f755c3c082001") - userID = influxTesting.MustIDBase16Ptr("b10f755c3c082001") - - timeGen1 = mock.TimeGenerator{FakeValue: time.Date(2006, time.July, 13, 4, 19, 10, 0, time.UTC)} - timeGen2 = mock.TimeGenerator{FakeValue: time.Date(2006, time.July, 14, 5, 23, 53, 10, time.UTC)} - - testCrudLog = influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - } -) - -func newSecretService(t *testing.T, ctx context.Context, logger *zap.Logger, s kv.Store) influxdb.SecretService { - t.Helper() - - tenantSvc := tenant.NewService(tenant.NewStore(s)) - - // initialize organization - org := influxdb.Organization{ - ID: *orgID, - Name: "Test Organization", - CRUDLog: testCrudLog, - } - - if err := tenantSvc.CreateOrganization(ctx, &org); err != nil { - t.Fatal(err) - } - orgID = &org.ID // orgID is generated - - secretStore, err := secret.NewStore(s) - require.NoError(t, err) - return secret.NewService(secretStore) -} - -// TestEndpointService_cumulativeSecrets tests that secrets are cumulatively added/updated and removed upon delete -// see https://github.com/influxdata/influxdb/pull/19082 for details -func TestEndpointService_cumulativeSecrets(t *testing.T) { - ctx := context.Background() - store := inmem.NewKVStore() - logger := zaptest.NewLogger(t) - if err := all.Up(ctx, logger, store); err != nil { - t.Fatal(err) - } - - secretService := newSecretService(t, ctx, logger, store) - endpointService := service.New(service.NewStore(store), secretService) - - var endpoint1 = endpoint.HTTP{ - Base: endpoint.Base{ - ID: id1, - Name: "name1", - OrgID: orgID, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Headers: map[string]string{}, - AuthMethod: "basic", - Method: "POST", - URL: "http://example.com", - Username: influxdb.SecretField{Key: id1.String() + "username-key", Value: pointer.String("val1")}, - Password: influxdb.SecretField{Key: id1.String() + "password-key", Value: pointer.String("val2")}, - } - var endpoint2 = endpoint.HTTP{ - Base: endpoint.Base{ - ID: id2, - Name: "name2", - OrgID: orgID, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Headers: map[string]string{}, - AuthMethod: "basic", - Method: "POST", - URL: "http://example2.com", - Username: influxdb.SecretField{Key: id2.String() + "username-key", Value: pointer.String("val3")}, - Password: influxdb.SecretField{Key: id2.String() + "password-key", Value: pointer.String("val4")}, - } - var err error - var secretKeys []string - - // create 1st endpoint and validate secrets - if err = endpointService.CreateNotificationEndpoint(ctx, &endpoint1, *userID); err != nil { - t.Fatal(err) - } - if secretKeys, err = secretService.GetSecretKeys(ctx, *orgID); err != nil { - t.Fatal(err) - } - if len(secretKeys) != 2 { - t.Errorf("secrets after creating 1st endpoint = %v, want %v", len(secretKeys), 2) - } - - // create 2nd endpoint and validate secrets - if err = endpointService.CreateNotificationEndpoint(ctx, &endpoint2, *userID); err != nil { - t.Fatal(err) - } - if secretKeys, err = secretService.GetSecretKeys(ctx, *orgID); err != nil { - t.Fatal(err) - } - if len(secretKeys) != 4 { - t.Errorf("secrets after creating 2nd endpoint = %v, want %v", len(secretKeys), 4) - } - - // update 1st endpoint and validate secrets - const updatedSecretValue = "updatedSecVal" - endpoint1.Username.Value = pointer.String(updatedSecretValue) - if _, err = endpointService.UpdateNotificationEndpoint(ctx, *endpoint1.ID, &endpoint1, *userID); err != nil { - t.Fatal(err) - } - if secretKeys, err = secretService.GetSecretKeys(ctx, *orgID); err != nil { - t.Fatal(err) - } - if len(secretKeys) != 4 { - t.Errorf("secrets after updating 1st endpoint = %v, want %v", len(secretKeys), 4) - } - var secretValue string - if secretValue, err = secretService.LoadSecret(ctx, *orgID, endpoint1.Username.Key); err != nil { - t.Fatal(err) - } - if secretValue != updatedSecretValue { - t.Errorf("secret after updating 1st endpoint is not updated = %v, want %v", secretValue, updatedSecretValue) - } - - // delete 1st endpoints and secrets, validate secrets - var secretsToDelete []influxdb.SecretField - if secretsToDelete, _, err = endpointService.DeleteNotificationEndpoint(ctx, *endpoint1.ID); err != nil { - t.Fatal(err) - } - if len(secretsToDelete) != 2 { - t.Errorf("2 secrets expected as a result of deleting the 1st endpoint") - } - secretService.DeleteSecret(ctx, *orgID, secretsToDelete[0].Key, secretsToDelete[1].Key) - if secretKeys, err = secretService.GetSecretKeys(ctx, *orgID); err != nil { - t.Fatal(err) - } - if len(secretKeys) != 2 { - t.Errorf("secrets after deleting 1st endpoint = %v, want %v", len(secretKeys), 2) - } - - if secretsToDelete, _, err = endpointService.DeleteNotificationEndpoint(ctx, *endpoint2.ID); err != nil { - t.Fatal(err) - } - if len(secretsToDelete) != 2 { - t.Errorf("2 secrets expected as a result of deleting the 2nd endpoint") - } - secretService.DeleteSecret(ctx, *orgID, secretsToDelete[0].Key, secretsToDelete[1].Key) - if secretKeys, err = secretService.GetSecretKeys(ctx, *orgID); err != nil { - t.Fatal(err) - } - if len(secretKeys) != 0 { - t.Errorf("secrets after deleting the 2nd endpoint = %v, want %v", len(secretKeys), 2) - } -} diff --git a/notification/endpoint/service/store.go b/notification/endpoint/service/store.go deleted file mode 100644 index 1dc3fb09d9e..00000000000 --- a/notification/endpoint/service/store.go +++ /dev/null @@ -1,307 +0,0 @@ -package service - -import ( - "context" - - influxdb "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/influxdata/influxdb/v2/snowflake" -) - -var ( - // ErrNotificationEndpointNotFound is used when the notification endpoint is not found. - ErrNotificationEndpointNotFound = &errors.Error{ - Msg: "notification endpoint not found", - Code: errors.ENotFound, - } - - notificationEndpointBucket = []byte("notificationEndpointv1") - notificationEndpointIndexBucket = []byte("notificationEndpointIndexv1") -) - -var _ influxdb.NotificationEndpointService = (*Store)(nil) - -func newEndpointStore() *kv.IndexStore { - const resource = "notification endpoint" - - var decEndpointEntFn kv.DecodeBucketValFn = func(key, val []byte) ([]byte, interface{}, error) { - edp, err := endpoint.UnmarshalJSON(val) - return key, edp, err - } - - var decValToEntFn kv.ConvertValToEntFn = func(_ []byte, v interface{}) (kv.Entity, error) { - edp, ok := v.(influxdb.NotificationEndpoint) - if err := kv.IsErrUnexpectedDecodeVal(ok); err != nil { - return kv.Entity{}, err - } - return kv.Entity{ - PK: kv.EncID(edp.GetID()), - UniqueKey: kv.Encode(kv.EncID(edp.GetOrgID()), kv.EncString(edp.GetName())), - Body: edp, - }, nil - } - - return &kv.IndexStore{ - Resource: resource, - EntStore: kv.NewStoreBase(resource, notificationEndpointBucket, kv.EncIDKey, kv.EncBodyJSON, decEndpointEntFn, decValToEntFn), - IndexStore: kv.NewOrgNameKeyStore(resource, notificationEndpointIndexBucket, true), - } -} - -type Store struct { - kv kv.Store - - endpointStore *kv.IndexStore - - IDGenerator platform.IDGenerator - TimeGenerator influxdb.TimeGenerator -} - -func NewStore(store kv.Store) *Store { - return &Store{ - kv: store, - endpointStore: newEndpointStore(), - IDGenerator: snowflake.NewDefaultIDGenerator(), - TimeGenerator: influxdb.RealTimeGenerator{}, - } -} - -// CreateNotificationEndpoint creates a new notification endpoint and sets b.ID with the new identifier. -func (s *Store) CreateNotificationEndpoint(ctx context.Context, edp influxdb.NotificationEndpoint, userID platform.ID) error { - return s.kv.Update(ctx, func(tx kv.Tx) error { - return s.createNotificationEndpoint(ctx, tx, edp, userID) - }) -} - -func (s *Store) createNotificationEndpoint(ctx context.Context, tx kv.Tx, edp influxdb.NotificationEndpoint, userID platform.ID) error { - id := s.IDGenerator.ID() - edp.SetID(id) - now := s.TimeGenerator.Now() - edp.SetCreatedAt(now) - edp.SetUpdatedAt(now) - edp.BackfillSecretKeys() - - if err := edp.Valid(); err != nil { - return err - } - - ent := kv.Entity{ - PK: kv.EncID(edp.GetID()), - UniqueKey: kv.Encode(kv.EncID(edp.GetOrgID()), kv.EncString(edp.GetName())), - Body: edp, - } - if err := s.endpointStore.Put(ctx, tx, ent, kv.PutNew()); err != nil { - return err - } - - return nil -} - -// UpdateNotificationEndpoint updates a single notification endpoint. -// Returns the new notification endpoint after update. -func (s *Store) UpdateNotificationEndpoint(ctx context.Context, id platform.ID, edp influxdb.NotificationEndpoint, userID platform.ID) (influxdb.NotificationEndpoint, error) { - var err error - err = s.kv.Update(ctx, func(tx kv.Tx) error { - edp, err = s.updateNotificationEndpoint(ctx, tx, id, edp, userID) - return err - }) - return edp, err -} - -func (s *Store) updateNotificationEndpoint(ctx context.Context, tx kv.Tx, id platform.ID, edp influxdb.NotificationEndpoint, userID platform.ID) (influxdb.NotificationEndpoint, error) { - current, err := s.findNotificationEndpointByID(ctx, tx, id) - if err != nil { - return nil, err - } - - // ID and OrganizationID can not be updated - edp.SetCreatedAt(current.GetCRUDLog().CreatedAt) - edp.SetUpdatedAt(s.TimeGenerator.Now()) - - if err := edp.Valid(); err != nil { - return nil, err - } - - ent := kv.Entity{ - PK: kv.EncID(edp.GetID()), - UniqueKey: kv.Encode(kv.EncID(edp.GetOrgID()), kv.EncString(edp.GetName())), - Body: edp, - } - if err := s.endpointStore.Put(ctx, tx, ent, kv.PutUpdate()); err != nil { - return nil, err - } - - return edp, nil -} - -// PatchNotificationEndpoint updates a single notification endpoint with changeset. -// Returns the new notification endpoint state after update. -func (s *Store) PatchNotificationEndpoint(ctx context.Context, id platform.ID, upd influxdb.NotificationEndpointUpdate) (influxdb.NotificationEndpoint, error) { - var edp influxdb.NotificationEndpoint - if err := s.kv.Update(ctx, func(tx kv.Tx) (err error) { - edp, err = s.patchNotificationEndpoint(ctx, tx, id, upd) - if err != nil { - return err - } - return nil - }); err != nil { - return nil, err - } - - return edp, nil -} - -func (s *Store) patchNotificationEndpoint(ctx context.Context, tx kv.Tx, id platform.ID, upd influxdb.NotificationEndpointUpdate) (influxdb.NotificationEndpoint, error) { - edp, err := s.findNotificationEndpointByID(ctx, tx, id) - if err != nil { - return nil, err - } - - if upd.Name != nil { - edp.SetName(*upd.Name) - } - if upd.Description != nil { - edp.SetDescription(*upd.Description) - } - if upd.Status != nil { - edp.SetStatus(*upd.Status) - } - edp.SetUpdatedAt(s.TimeGenerator.Now()) - - if err := edp.Valid(); err != nil { - return nil, err - } - - // TODO(jsteenb2): every above here moves into service layer - - ent := kv.Entity{ - PK: kv.EncID(edp.GetID()), - UniqueKey: kv.Encode(kv.EncID(edp.GetOrgID()), kv.EncString(edp.GetName())), - Body: edp, - } - if err := s.endpointStore.Put(ctx, tx, ent, kv.PutUpdate()); err != nil { - return nil, err - } - - return edp, nil -} - -// PutNotificationEndpoint put a notification endpoint to storage. -func (s *Store) PutNotificationEndpoint(ctx context.Context, edp influxdb.NotificationEndpoint) error { - // TODO(jsteenb2): all the stuffs before the update should be moved up into the - // service layer as well as all the id/time setting items - if err := edp.Valid(); err != nil { - return err - } - - return s.kv.Update(ctx, func(tx kv.Tx) (err error) { - ent := kv.Entity{ - PK: kv.EncID(edp.GetID()), - UniqueKey: kv.Encode(kv.EncID(edp.GetOrgID()), kv.EncString(edp.GetName())), - Body: edp, - } - return s.endpointStore.Put(ctx, tx, ent) - }) -} - -// FindNotificationEndpointByID returns a single notification endpoint by ID. -func (s *Store) FindNotificationEndpointByID(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - var ( - edp influxdb.NotificationEndpoint - err error - ) - - err = s.kv.View(ctx, func(tx kv.Tx) error { - edp, err = s.findNotificationEndpointByID(ctx, tx, id) - return err - }) - - return edp, err -} - -func (s *Store) findNotificationEndpointByID(ctx context.Context, tx kv.Tx, id platform.ID) (influxdb.NotificationEndpoint, error) { - decodedEnt, err := s.endpointStore.FindEnt(ctx, tx, kv.Entity{PK: kv.EncID(id)}) - if err != nil { - return nil, err - } - edp, ok := decodedEnt.(influxdb.NotificationEndpoint) - return edp, kv.IsErrUnexpectedDecodeVal(ok) -} - -// FindNotificationEndpoints returns a list of notification endpoints that match isNext and the total count of matching notification endpoints. -// Additional options provide pagination & sorting. -func (s *Store) FindNotificationEndpoints(ctx context.Context, filter influxdb.NotificationEndpointFilter, opt ...influxdb.FindOptions) (edps []influxdb.NotificationEndpoint, n int, err error) { - err = s.kv.View(ctx, func(tx kv.Tx) error { - edps, n, err = s.findNotificationEndpoints(ctx, tx, filter, opt...) - return err - }) - return edps, n, err -} - -func (s *Store) findNotificationEndpoints(ctx context.Context, tx kv.Tx, filter influxdb.NotificationEndpointFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) { - var o influxdb.FindOptions - if len(opt) > 0 { - o = opt[0] - } - - edps := make([]influxdb.NotificationEndpoint, 0) - err := s.endpointStore.Find(ctx, tx, kv.FindOpts{ - Descending: o.Descending, - Offset: o.Offset, - Limit: o.Limit, - FilterEntFn: filterEndpointsFn(filter), - CaptureFn: func(k []byte, v interface{}) error { - edp, ok := v.(influxdb.NotificationEndpoint) - if err := kv.IsErrUnexpectedDecodeVal(ok); err != nil { - return err - } - edps = append(edps, edp) - return nil - }, - }) - if err != nil { - return nil, 0, err - } - - return edps, len(edps), err -} - -func filterEndpointsFn(filter influxdb.NotificationEndpointFilter) func([]byte, interface{}) bool { - return func(key []byte, val interface{}) bool { - edp := val.(influxdb.NotificationEndpoint) - if filter.ID != nil && edp.GetID() != *filter.ID { - return false - } - - if filter.OrgID != nil && edp.GetOrgID() != *filter.OrgID { - return false - } - - return true - } -} - -// DeleteNotificationEndpoint removes a notification endpoint by ID. -func (s *Store) DeleteNotificationEndpoint(ctx context.Context, id platform.ID) (flds []influxdb.SecretField, orgID platform.ID, err error) { - err = s.kv.Update(ctx, func(tx kv.Tx) error { - flds, orgID, err = s.deleteNotificationEndpoint(ctx, tx, id) - return err - }) - return flds, orgID, err -} - -func (s *Store) deleteNotificationEndpoint(ctx context.Context, tx kv.Tx, id platform.ID) (flds []influxdb.SecretField, orgID platform.ID, err error) { - edp, err := s.findNotificationEndpointByID(ctx, tx, id) - if err != nil { - return nil, 0, err - } - - if err := s.endpointStore.DeleteEnt(ctx, tx, kv.Entity{PK: kv.EncID(id)}); err != nil { - return nil, 0, err - } - - return edp.SecretFields(), edp.GetOrgID(), nil -} diff --git a/notification/endpoint/service/store_test.go b/notification/endpoint/service/store_test.go deleted file mode 100644 index fad2bf23352..00000000000 --- a/notification/endpoint/service/store_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package service_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/notification/endpoint/service" - endpointsTesting "github.com/influxdata/influxdb/v2/notification/endpoint/service/testing" - "github.com/influxdata/influxdb/v2/secret" - "github.com/influxdata/influxdb/v2/tenant" - itesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func TestNotificationEndpointService_WithInmem(t *testing.T) { - endpointsTesting.NotificationEndpointService(initInmemNotificationEndpointService, t) -} - -func TestNotificationEndpointService_WithBolt(t *testing.T) { - endpointsTesting.NotificationEndpointService(initBoltNotificationEndpointService, t) -} - -func initBoltNotificationEndpointService(f endpointsTesting.NotificationEndpointFields, t *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()) { - store, closeStore := itesting.NewTestBoltStore(t) - svc, secretSVC, closeSvc := initNotificationEndpointService(store, f, t) - return svc, secretSVC, func() { - closeSvc() - closeStore() - } -} - -func initInmemNotificationEndpointService(f endpointsTesting.NotificationEndpointFields, t *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()) { - store := inmem.NewKVStore() - if err := all.Up(context.Background(), zaptest.NewLogger(t), store); err != nil { - t.Fatal(err) - } - - svc, secretSVC, closeSvc := initNotificationEndpointService(store, f, t) - return svc, secretSVC, closeSvc -} - -func initNotificationEndpointService(s kv.SchemaStore, f endpointsTesting.NotificationEndpointFields, t *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()) { - ctx := context.Background() - - tenantStore := tenant.NewStore(s) - if f.IDGenerator != nil { - tenantStore.OrgIDGen = f.IDGenerator - tenantStore.IDGen = f.IDGenerator - } - - tenantSvc := tenant.NewService(tenantStore) - - secretStore, err := secret.NewStore(s) - require.NoError(t, err) - secretSvc := secret.NewService(secretStore) - - store := service.NewStore(s) - store.IDGenerator = f.IDGenerator - if f.TimeGenerator != nil { - store.TimeGenerator = f.TimeGenerator - } - - endpointSvc := service.New(store, secretSvc) - - for _, edp := range f.NotificationEndpoints { - if err := store.PutNotificationEndpoint(ctx, edp); err != nil { - t.Fatalf("failed to populate notification endpoint: %v", err) - } - } - - for _, o := range f.Orgs { - if err := tenantSvc.CreateOrganization(ctx, o); err != nil { - t.Fatalf("failed to populate org: %v", err) - } - } - - return endpointSvc, secretSvc, func() { - for _, edp := range f.NotificationEndpoints { - if _, _, err := endpointSvc.DeleteNotificationEndpoint(ctx, edp.GetID()); err != nil && err != service.ErrNotificationEndpointNotFound { - t.Logf("failed to remove notification endpoint: %v", err) - } - } - for _, o := range f.Orgs { - if err := tenantSvc.DeleteOrganization(ctx, o.ID); err != nil { - t.Fatalf("failed to remove org: %v", err) - } - } - } -} diff --git a/notification/endpoint/service/testing/service.go b/notification/endpoint/service/testing/service.go deleted file mode 100644 index 408b18d82bc..00000000000 --- a/notification/endpoint/service/testing/service.go +++ /dev/null @@ -1,1711 +0,0 @@ -package testing - -import ( - "context" - "fmt" - "net/http" - "sort" - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - influxdb "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - oneID = platform.ID(iota + 1) - twoID - threeID - fourID - fiveID - sixID -) - -var ( - fakeDate = time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC) - fakeGenerator = mock.TimeGenerator{FakeValue: fakeDate} - timeGen1 = mock.TimeGenerator{FakeValue: time.Date(2006, time.July, 13, 4, 19, 10, 0, time.UTC)} - timeGen2 = mock.TimeGenerator{FakeValue: time.Date(2006, time.July, 14, 5, 23, 53, 10, time.UTC)} -) - -// NotificationEndpointFields includes prepopulated data for mapping tests. -type NotificationEndpointFields struct { - IDGenerator platform.IDGenerator - TimeGenerator influxdb.TimeGenerator - NotificationEndpoints []influxdb.NotificationEndpoint - Orgs []*influxdb.Organization -} - -var notificationEndpointCmpOptions = cmp.Options{ - cmp.Transformer("Sort", func(in []influxdb.NotificationEndpoint) []influxdb.NotificationEndpoint { - out := append([]influxdb.NotificationEndpoint(nil), in...) - sort.Slice(out, func(i, j int) bool { - return out[i].GetID() > out[j].GetID() - }) - return out - }), -} - -// NotificationEndpointService tests all the service functions. -func NotificationEndpointService( - init func(NotificationEndpointFields, *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()), t *testing.T, -) { - tests := []struct { - name string - fn func(init func(NotificationEndpointFields, *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()), - t *testing.T) - }{ - { - name: "CreateNotificationEndpoint", - fn: CreateNotificationEndpoint, - }, - { - name: "FindNotificationEndpointByID", - fn: FindNotificationEndpointByID, - }, - { - name: "FindNotificationEndpoints", - fn: FindNotificationEndpoints, - }, - { - name: "UpdateNotificationEndpoint", - fn: UpdateNotificationEndpoint, - }, - { - name: "PatchNotificationEndpoint", - fn: PatchNotificationEndpoint, - }, - { - name: "DeleteNotificationEndpoint", - fn: DeleteNotificationEndpoint, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt := tt - t.Parallel() - tt.fn(init, t) - }) - } -} - -// CreateNotificationEndpoint testing. -func CreateNotificationEndpoint( - init func(NotificationEndpointFields, *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()), - t *testing.T, -) { - type args struct { - notificationEndpoint influxdb.NotificationEndpoint - userID platform.ID - } - type wants struct { - err error - notificationEndpoints []influxdb.NotificationEndpoint - } - - tests := []struct { - name string - fields NotificationEndpointFields - args args - wants wants - }{ - { - name: "basic create notification endpoint", - fields: NotificationEndpointFields{ - IDGenerator: mock.NewStaticIDGenerator(twoID), - TimeGenerator: fakeGenerator, - Orgs: []*influxdb.Organization{ - {ID: fourID, Name: "org1"}, - }, - NotificationEndpoints: []influxdb.NotificationEndpoint{}, - }, - args: args{ - userID: sixID, - notificationEndpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{ - Value: strPtr("pagerduty secret2"), - }, - }, - }, - wants: wants{ - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{ - Key: fmt.Sprintf("%s-routing-key", twoID), - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, secretSVC, done := init(tt.fields, t) - defer done() - - ctx := context.Background() - err := s.CreateNotificationEndpoint(ctx, tt.args.notificationEndpoint, tt.args.userID) - ErrorsEqual(t, err, tt.wants.err) - - filter := influxdb.NotificationEndpointFilter{} - edps, _, err := s.FindNotificationEndpoints(ctx, filter) - if err != nil { - t.Fatalf("failed to retrieve notification endpoints: %v", err) - } - if diff := cmp.Diff(edps, tt.wants.notificationEndpoints, notificationEndpointCmpOptions...); diff != "" { - t.Errorf("notificationEndpoints are different -got/+want\ndiff %s", diff) - } - - for _, edp := range tt.wants.notificationEndpoints { - secrets, err := secretSVC.GetSecretKeys(ctx, edp.GetOrgID()) - if err != nil { - t.Errorf("failed to retrieve secrets for endpoint: %v", err) - } - for _, expected := range edp.SecretFields() { - assert.Contains(t, secrets, expected.Key) - } - } - }) - } -} - -// FindNotificationEndpointByID testing. -func FindNotificationEndpointByID( - init func(NotificationEndpointFields, *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()), - t *testing.T, -) { - type args struct { - id platform.ID - } - type wants struct { - err *errors.Error - notificationEndpoint influxdb.NotificationEndpoint - } - - tests := []struct { - name string - fields NotificationEndpointFields - args args - wants wants - }{ - { - name: "bad id", - fields: NotificationEndpointFields{ - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{ - Key: fmt.Sprintf("%s-token", oneID), - }, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{ - Key: fmt.Sprintf("%s-routing-key", twoID), - }, - }, - }, - }, - args: args{ - id: platform.ID(0), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "no key was provided for notification endpoint", - }, - }, - }, - { - name: "not found", - fields: NotificationEndpointFields{ - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - id: threeID, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "notification endpoint not found", - }, - }, - }, - { - name: "basic find telegraf config by id", - fields: NotificationEndpointFields{ - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - id: twoID, - }, - wants: wants{ - notificationEndpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - edp, err := s.FindNotificationEndpointByID(ctx, tt.args.id) - influxErrsEqual(t, tt.wants.err, err) - if diff := cmp.Diff(edp, tt.wants.notificationEndpoint, notificationEndpointCmpOptions...); diff != "" { - t.Errorf("notification endpoint is different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindNotificationEndpoints testing -func FindNotificationEndpoints( - init func(NotificationEndpointFields, *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()), - t *testing.T, -) { - type args struct { - filter influxdb.NotificationEndpointFilter - opts influxdb.FindOptions - } - - type wants struct { - notificationEndpoints []influxdb.NotificationEndpoint - err error - } - tests := []struct { - name string - fields NotificationEndpointFields - args args - wants wants - }{ - { - name: "find nothing (empty set)", - fields: NotificationEndpointFields{ - NotificationEndpoints: []influxdb.NotificationEndpoint{}, - }, - args: args{ - filter: influxdb.NotificationEndpointFilter{}, - }, - wants: wants{ - notificationEndpoints: []influxdb.NotificationEndpoint{}, - }, - }, - { - name: "find all notification endpoints", - fields: NotificationEndpointFields{ - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - wants: wants{ - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - }, - { - name: "filter by organization id only", - fields: NotificationEndpointFields{ - Orgs: []*influxdb.Organization{ - { - ID: oneID, - Name: "org1", - }, - { - ID: fourID, - Name: "org4", - }, - }, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp1", - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(fourID), - OrgID: idPtr(oneID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty2.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", fourID)}, - }, - }, - }, - args: args{ - filter: influxdb.NotificationEndpointFilter{ - OrgID: idPtr(oneID), - }, - }, - wants: wants{ - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(fourID), - OrgID: idPtr(oneID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty2.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", fourID)}, - }, - }, - }, - }, - { - name: "find options limit", - fields: NotificationEndpointFields{ - Orgs: []*influxdb.Organization{ - { - ID: oneID, - Name: "org1", - }, - { - ID: fourID, - Name: "org4", - }, - }, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp1", - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - URL: "example-webhook.com", - Method: http.MethodGet, - AuthMethod: "none", - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(fourID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", fourID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(fiveID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp4", - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", fourID)}, - }, - }, - }, - args: args{ - filter: influxdb.NotificationEndpointFilter{ - Org: strPtr("org4"), - }, - opts: influxdb.FindOptions{ - Limit: 2, - }, - }, - wants: wants{ - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp1", - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - URL: "example-webhook.com", - Method: http.MethodGet, - AuthMethod: "none", - }, - }, - }, - }, - { - name: "find options offset", - fields: NotificationEndpointFields{ - Orgs: []*influxdb.Organization{ - { - ID: oneID, - Name: "org1", - }, - { - ID: fourID, - Name: "org4", - }, - }, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp1", - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - URL: "example-webhook.com", - Method: http.MethodGet, - AuthMethod: "none", - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(fourID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", fourID)}, - }, - }, - }, - args: args{ - filter: influxdb.NotificationEndpointFilter{ - Org: strPtr("org4"), - }, - opts: influxdb.FindOptions{ - Offset: 1, - }, - }, - wants: wants{ - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - URL: "example-webhook.com", - Method: http.MethodGet, - AuthMethod: "none", - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(fourID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", fourID)}, - }, - }, - }, - }, - { - name: "find options offset", - fields: NotificationEndpointFields{ - Orgs: []*influxdb.Organization{ - { - ID: oneID, - Name: "org1", - }, - { - ID: fourID, - Name: "org4", - }, - }, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp1", - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - URL: "example-webhook.com", - Method: http.MethodGet, - AuthMethod: "none", - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(fourID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", fourID)}, - }, - }, - }, - args: args{ - filter: influxdb.NotificationEndpointFilter{ - Org: strPtr("org4"), - }, - opts: influxdb.FindOptions{ - Limit: 1, - Offset: 1, - }, - }, - wants: wants{ - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - URL: "example-webhook.com", - Method: http.MethodGet, - AuthMethod: "none", - }, - }, - }, - }, - { - name: "find by id", - fields: NotificationEndpointFields{ - Orgs: []*influxdb.Organization{ - { - ID: oneID, - Name: "org1", - }, - { - ID: fourID, - Name: "org4", - }, - }, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp1", - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - URL: "example-webhook.com", - Method: http.MethodGet, - AuthMethod: "none", - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(fourID), - OrgID: idPtr(oneID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", fourID)}, - }, - }, - }, - args: args{ - filter: influxdb.NotificationEndpointFilter{ - ID: idPtr(fourID), - }, - }, - wants: wants{ - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(fourID), - OrgID: idPtr(oneID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", fourID)}, - }, - }, - }, - }, - { - name: "look for organization not bound to any notification endpoint", - fields: NotificationEndpointFields{ - Orgs: []*influxdb.Organization{ - { - ID: oneID, - Name: "org1", - }, - { - ID: fourID, - Name: "org4", - }, - }, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp1", - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - URL: "example-webhook.com", - Method: http.MethodGet, - AuthMethod: "none", - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(threeID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", threeID)}, - }, - }, - }, - args: args{ - filter: influxdb.NotificationEndpointFilter{ - OrgID: idPtr(oneID), - }, - }, - wants: wants{ - notificationEndpoints: []influxdb.NotificationEndpoint{}, - }, - }, - { - name: "find nothing", - fields: NotificationEndpointFields{ - Orgs: []*influxdb.Organization{ - { - ID: oneID, - Name: "org1", - }, - { - ID: fourID, - Name: "org4", - }, - }, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp1", - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - URL: "example-webhook.com", - Method: http.MethodGet, - AuthMethod: "none", - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(threeID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty.com", RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", threeID)}, - }, - }, - }, - args: args{ - filter: influxdb.NotificationEndpointFilter{ - ID: idPtr(fiveID), - }, - }, - wants: wants{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - edps, n, err := s.FindNotificationEndpoints(ctx, tt.args.filter, tt.args.opts) - ErrorsEqual(t, err, tt.wants.err) - if n != len(tt.wants.notificationEndpoints) { - t.Fatalf("notification endpoints length is different got %d, want %d", n, len(tt.wants.notificationEndpoints)) - } - - if diff := cmp.Diff(edps, tt.wants.notificationEndpoints, notificationEndpointCmpOptions...); diff != "" { - t.Errorf("notification endpoints are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// UpdateNotificationEndpoint testing. -func UpdateNotificationEndpoint( - init func(NotificationEndpointFields, *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()), - t *testing.T, -) { - type args struct { - userID platform.ID - orgID platform.ID - id platform.ID - notificationEndpoint influxdb.NotificationEndpoint - } - - type wants struct { - notificationEndpoint influxdb.NotificationEndpoint - err *errors.Error - } - tests := []struct { - name string - fields NotificationEndpointFields - args args - wants wants - }{ - { - name: "can't find the id", - fields: NotificationEndpointFields{ - TimeGenerator: fakeGenerator, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - userID: sixID, - id: fourID, - orgID: fourID, - notificationEndpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Inactive, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: "pager-duty-routing-key-2"}, - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: `notification endpoint not found for key "0000000000000004"`, - }, - }, - }, - { - name: "regular update", - fields: NotificationEndpointFields{ - TimeGenerator: fakeGenerator, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - userID: sixID, - id: twoID, - orgID: fourID, - notificationEndpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name3", - OrgID: idPtr(fourID), - Status: influxdb.Inactive, - }, - ClientURL: "example-pagerduty2.com", - RoutingKey: influxdb.SecretField{Value: strPtr("secret value")}, - }, - }, - wants: wants{ - notificationEndpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name3", - OrgID: idPtr(fourID), - Status: influxdb.Inactive, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: fakeDate, - }, - }, - ClientURL: "example-pagerduty2.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - { - name: "update secret", - fields: NotificationEndpointFields{ - TimeGenerator: fakeGenerator, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - userID: sixID, - id: twoID, - orgID: fourID, - notificationEndpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name3", - OrgID: idPtr(fourID), - Status: influxdb.Inactive, - }, - ClientURL: "example-pagerduty2.com", - RoutingKey: influxdb.SecretField{ - Value: strPtr("pager-duty-value2"), - }, - }, - }, - wants: wants{ - notificationEndpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name3", - OrgID: idPtr(fourID), - Status: influxdb.Inactive, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: fakeDate, - }, - }, - ClientURL: "example-pagerduty2.com", - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, secretSVC, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - edp, err := s.UpdateNotificationEndpoint(ctx, tt.args.id, tt.args.notificationEndpoint, tt.args.userID) - if err != nil { - require.Equal(t, tt.wants.err, err) - return - } - - if tt.wants.notificationEndpoint != nil { - secrets, err := secretSVC.GetSecretKeys(ctx, edp.GetOrgID()) - if err != nil { - t.Errorf("failed to retrieve secrets for endpoint: %v", err) - } - for _, actual := range edp.SecretFields() { - assert.Contains(t, secrets, actual.Key) - } - - actual, ok := edp.(*endpoint.PagerDuty) - require.Truef(t, ok, "did not get a pager duty endpoint; got: %#v", edp) - wanted := tt.wants.notificationEndpoint.(*endpoint.PagerDuty) - - wb, ab := wanted.Base, actual.Base - require.NotZero(t, ab.CRUDLog) - wb.CRUDLog, ab.CRUDLog = influxdb.CRUDLog{}, influxdb.CRUDLog{} // zero out times - assert.Equal(t, wb, ab) - assert.Equal(t, wanted.ClientURL, actual.ClientURL) - assert.NotEqual(t, wanted.RoutingKey, actual.RoutingKey) - } - }) - } -} - -// PatchNotificationEndpoint testing. -func PatchNotificationEndpoint( - init func(NotificationEndpointFields, *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()), - t *testing.T, -) { - - name3 := "name2" - status3 := influxdb.Inactive - - type args struct { - //userID influxdb.ID - id platform.ID - upd influxdb.NotificationEndpointUpdate - } - - type wants struct { - notificationEndpoint influxdb.NotificationEndpoint - err *errors.Error - } - tests := []struct { - name string - fields NotificationEndpointFields - args args - wants wants - }{ - { - name: "can't find the id", - fields: NotificationEndpointFields{ - TimeGenerator: fakeGenerator, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - id: fourID, - upd: influxdb.NotificationEndpointUpdate{ - Name: &name3, - Status: &status3, - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "notification endpoint not found", - }, - }, - }, - { - name: "regular update", - fields: NotificationEndpointFields{ - TimeGenerator: fakeGenerator, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - Status: influxdb.Active, - OrgID: idPtr(fourID), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - Status: influxdb.Active, - OrgID: idPtr(fourID), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - id: twoID, - upd: influxdb.NotificationEndpointUpdate{ - Name: &name3, - Status: &status3, - }, - }, - wants: wants{ - notificationEndpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: name3, - Status: status3, - OrgID: idPtr(fourID), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: fakeDate, - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - edp, err := s.PatchNotificationEndpoint(ctx, tt.args.id, tt.args.upd) - if err != nil { - if tt.wants.err == nil { - require.NoError(t, err) - } - iErr, ok := err.(*errors.Error) - require.True(t, ok, err) - assert.Equal(t, tt.wants.err.Code, iErr.Code) - return - } - if diff := cmp.Diff(edp, tt.wants.notificationEndpoint, notificationEndpointCmpOptions...); tt.wants.err == nil && diff != "" { - t.Errorf("notificationEndpoints are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// DeleteNotificationEndpoint testing. -func DeleteNotificationEndpoint( - init func(NotificationEndpointFields, *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()), - t *testing.T, -) { - type args struct { - id platform.ID - orgID platform.ID - userID platform.ID - } - - type wants struct { - notificationEndpoints []influxdb.NotificationEndpoint - secretFlds []influxdb.SecretField - orgID platform.ID - err *errors.Error - } - tests := []struct { - name string - fields NotificationEndpointFields - args args - wants wants - }{ - { - name: "bad id", - fields: NotificationEndpointFields{ - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - id: platform.ID(0), - orgID: fourID, - userID: sixID, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "no key was provided for notification endpoint", - }, - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - }, - { - name: "none existing endpoint", - fields: NotificationEndpointFields{ - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - id: fourID, - orgID: fourID, - userID: sixID, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "notification endpoint not found", - }, - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - }, - { - name: "regular delete", - fields: NotificationEndpointFields{ - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - id: twoID, - orgID: fourID, - userID: sixID, - }, - wants: wants{ - secretFlds: []influxdb.SecretField{ - {Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - orgID: fourID, - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, secretSVC, done := init(tt.fields, t) - defer done() - - ctx := context.Background() - flds, orgID, err := s.DeleteNotificationEndpoint(ctx, tt.args.id) - influxErrsEqual(t, tt.wants.err, err) - if diff := cmp.Diff(flds, tt.wants.secretFlds); diff != "" { - t.Errorf("delete notification endpoint secret fields are different -got/+want\ndiff %s", diff) - } - if diff := cmp.Diff(orgID, tt.wants.orgID); diff != "" { - t.Errorf("delete notification endpoint org id is different -got/+want\ndiff %s", diff) - } - - filter := influxdb.NotificationEndpointFilter{} - edps, n, err := s.FindNotificationEndpoints(ctx, filter) - if err != nil && tt.wants.err == nil { - t.Fatalf("expected errors to be nil got '%v'", err) - } - - if n != len(tt.wants.notificationEndpoints) { - t.Fatalf("notification endpoints length is different got %d, want %d", n, len(tt.wants.notificationEndpoints)) - } - if diff := cmp.Diff(edps, tt.wants.notificationEndpoints, notificationEndpointCmpOptions...); diff != "" { - t.Errorf("notification endpoints are different -got/+want\ndiff %s", diff) - } - - var deletedEndpoint influxdb.NotificationEndpoint - for _, ne := range tt.fields.NotificationEndpoints { - if ne.GetID() == tt.args.id { - deletedEndpoint = ne - break - } - } - if deletedEndpoint == nil { - return - } - - secrets, err := secretSVC.GetSecretKeys(ctx, deletedEndpoint.GetOrgID()) - require.NoError(t, err) - for _, deleted := range deletedEndpoint.SecretFields() { - assert.NotContains(t, secrets, deleted.Key) - } - }) - } -} - -func influxErrsEqual(t *testing.T, expected *errors.Error, actual error) { - t.Helper() - - if expected != nil { - require.Error(t, actual) - } - - if actual == nil { - return - } - - if expected == nil { - require.NoError(t, actual) - return - } - iErr, ok := actual.(*errors.Error) - require.True(t, ok) - assert.Equal(t, expected.Code, iErr.Code) - assert.Truef(t, strings.HasPrefix(iErr.Error(), expected.Error()), "expected: %s got err: %s", expected.Error(), actual.Error()) -} - -func idPtr(id platform.ID) *platform.ID { - return &id -} - -func strPtr(s string) *string { return &s } - -// ErrorsEqual checks to see if the provided errors are equivalent. -func ErrorsEqual(t *testing.T, actual, expected error) { - t.Helper() - if expected == nil && actual == nil { - return - } - - if expected == nil && actual != nil { - t.Errorf("unexpected error %s", actual.Error()) - } - - if expected != nil && actual == nil { - t.Errorf("expected error %s but received nil", expected.Error()) - } - - if errors.ErrorCode(expected) != errors.ErrorCode(actual) { - t.Logf("\nexpected: %v\nactual: %v\n\n", expected, actual) - t.Errorf("expected error code %q but received %q", errors.ErrorCode(expected), errors.ErrorCode(actual)) - } - - if errors.ErrorMessage(expected) != errors.ErrorMessage(actual) { - t.Logf("\nexpected: %v\nactual: %v\n\n", expected, actual) - t.Errorf("expected error message %q but received %q", errors.ErrorMessage(expected), errors.ErrorMessage(actual)) - } -} diff --git a/notification/endpoint/slack.go b/notification/endpoint/slack.go deleted file mode 100644 index f164475b00a..00000000000 --- a/notification/endpoint/slack.go +++ /dev/null @@ -1,83 +0,0 @@ -package endpoint - -import ( - "encoding/json" - "fmt" - "net/url" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var _ influxdb.NotificationEndpoint = &Slack{} - -const slackTokenSuffix = "-token" - -// Slack is the notification endpoint config of slack. -type Slack struct { - Base - // URL is a valid slack webhook URL - // TODO(jm): validate this in unmarshaler - // example: https://slack.com/api/chat.postMessage - URL string `json:"url"` - // Token is the bearer token for authorization - Token influxdb.SecretField `json:"token"` -} - -// BackfillSecretKeys fill back fill the secret field key during the unmarshalling -// if value of that secret field is not nil. -func (s *Slack) BackfillSecretKeys() { - if s.Token.Key == "" && s.Token.Value != nil { - s.Token.Key = s.idStr() + slackTokenSuffix - } -} - -// SecretFields return available secret fields. -func (s Slack) SecretFields() []influxdb.SecretField { - arr := []influxdb.SecretField{} - if s.Token.Key != "" { - arr = append(arr, s.Token) - } - return arr -} - -// Valid returns error if some configuration is invalid -func (s Slack) Valid() error { - if err := s.Base.valid(); err != nil { - return err - } - if s.URL == "" { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "slack endpoint URL must be provided", - } - } - if s.URL != "" { - if _, err := url.Parse(s.URL); err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("slack endpoint URL is invalid: %s", err.Error()), - } - } - } - return nil -} - -type slackAlias Slack - -// MarshalJSON implement json.Marshaler interface. -func (s Slack) MarshalJSON() ([]byte, error) { - return json.Marshal( - struct { - slackAlias - Type string `json:"type"` - }{ - slackAlias: slackAlias(s), - Type: s.Type(), - }) -} - -// Type returns the type. -func (s Slack) Type() string { - return SlackType -} diff --git a/notification/endpoint/telegram.go b/notification/endpoint/telegram.go deleted file mode 100644 index 64541cea3f1..00000000000 --- a/notification/endpoint/telegram.go +++ /dev/null @@ -1,76 +0,0 @@ -package endpoint - -import ( - "encoding/json" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var _ influxdb.NotificationEndpoint = &Telegram{} - -const telegramTokenSuffix = "-token" - -// Telegram is the notification endpoint config of telegram. -type Telegram struct { - Base - // Token is the telegram bot token, see https://core.telegram.org/bots#creating-a-new-bot - Token influxdb.SecretField `json:"token"` - // Channel is an ID of the telegram channel, see https://core.telegram.org/bots/api#sendmessage - Channel string `json:"channel"` -} - -// BackfillSecretKeys fill back the secret field key during the unmarshalling -// if value of that secret field is not nil. -func (s *Telegram) BackfillSecretKeys() { - if s.Token.Key == "" && s.Token.Value != nil { - s.Token.Key = s.idStr() + telegramTokenSuffix - } -} - -// SecretFields return available secret fields. -func (s Telegram) SecretFields() []influxdb.SecretField { - arr := []influxdb.SecretField{} - if s.Token.Key != "" { - arr = append(arr, s.Token) - } - return arr -} - -// Valid returns error if some configuration is invalid -func (s Telegram) Valid() error { - if err := s.Base.valid(); err != nil { - return err - } - if s.Token.Key == "" { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "empty telegram bot token", - } - } - if s.Channel == "" { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "empty telegram channel", - } - } - return nil -} - -// MarshalJSON implement json.Marshaler interface. -func (s Telegram) MarshalJSON() ([]byte, error) { - type telegramAlias Telegram - return json.Marshal( - struct { - telegramAlias - Type string `json:"type"` - }{ - telegramAlias: telegramAlias(s), - Type: s.Type(), - }) -} - -// Type returns the type. -func (s Telegram) Type() string { - return TelegramType -} diff --git a/notification/endpoint/testing/service.go b/notification/endpoint/testing/service.go deleted file mode 100644 index 408b18d82bc..00000000000 --- a/notification/endpoint/testing/service.go +++ /dev/null @@ -1,1711 +0,0 @@ -package testing - -import ( - "context" - "fmt" - "net/http" - "sort" - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - influxdb "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - oneID = platform.ID(iota + 1) - twoID - threeID - fourID - fiveID - sixID -) - -var ( - fakeDate = time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC) - fakeGenerator = mock.TimeGenerator{FakeValue: fakeDate} - timeGen1 = mock.TimeGenerator{FakeValue: time.Date(2006, time.July, 13, 4, 19, 10, 0, time.UTC)} - timeGen2 = mock.TimeGenerator{FakeValue: time.Date(2006, time.July, 14, 5, 23, 53, 10, time.UTC)} -) - -// NotificationEndpointFields includes prepopulated data for mapping tests. -type NotificationEndpointFields struct { - IDGenerator platform.IDGenerator - TimeGenerator influxdb.TimeGenerator - NotificationEndpoints []influxdb.NotificationEndpoint - Orgs []*influxdb.Organization -} - -var notificationEndpointCmpOptions = cmp.Options{ - cmp.Transformer("Sort", func(in []influxdb.NotificationEndpoint) []influxdb.NotificationEndpoint { - out := append([]influxdb.NotificationEndpoint(nil), in...) - sort.Slice(out, func(i, j int) bool { - return out[i].GetID() > out[j].GetID() - }) - return out - }), -} - -// NotificationEndpointService tests all the service functions. -func NotificationEndpointService( - init func(NotificationEndpointFields, *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()), t *testing.T, -) { - tests := []struct { - name string - fn func(init func(NotificationEndpointFields, *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()), - t *testing.T) - }{ - { - name: "CreateNotificationEndpoint", - fn: CreateNotificationEndpoint, - }, - { - name: "FindNotificationEndpointByID", - fn: FindNotificationEndpointByID, - }, - { - name: "FindNotificationEndpoints", - fn: FindNotificationEndpoints, - }, - { - name: "UpdateNotificationEndpoint", - fn: UpdateNotificationEndpoint, - }, - { - name: "PatchNotificationEndpoint", - fn: PatchNotificationEndpoint, - }, - { - name: "DeleteNotificationEndpoint", - fn: DeleteNotificationEndpoint, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt := tt - t.Parallel() - tt.fn(init, t) - }) - } -} - -// CreateNotificationEndpoint testing. -func CreateNotificationEndpoint( - init func(NotificationEndpointFields, *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()), - t *testing.T, -) { - type args struct { - notificationEndpoint influxdb.NotificationEndpoint - userID platform.ID - } - type wants struct { - err error - notificationEndpoints []influxdb.NotificationEndpoint - } - - tests := []struct { - name string - fields NotificationEndpointFields - args args - wants wants - }{ - { - name: "basic create notification endpoint", - fields: NotificationEndpointFields{ - IDGenerator: mock.NewStaticIDGenerator(twoID), - TimeGenerator: fakeGenerator, - Orgs: []*influxdb.Organization{ - {ID: fourID, Name: "org1"}, - }, - NotificationEndpoints: []influxdb.NotificationEndpoint{}, - }, - args: args{ - userID: sixID, - notificationEndpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{ - Value: strPtr("pagerduty secret2"), - }, - }, - }, - wants: wants{ - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{ - Key: fmt.Sprintf("%s-routing-key", twoID), - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, secretSVC, done := init(tt.fields, t) - defer done() - - ctx := context.Background() - err := s.CreateNotificationEndpoint(ctx, tt.args.notificationEndpoint, tt.args.userID) - ErrorsEqual(t, err, tt.wants.err) - - filter := influxdb.NotificationEndpointFilter{} - edps, _, err := s.FindNotificationEndpoints(ctx, filter) - if err != nil { - t.Fatalf("failed to retrieve notification endpoints: %v", err) - } - if diff := cmp.Diff(edps, tt.wants.notificationEndpoints, notificationEndpointCmpOptions...); diff != "" { - t.Errorf("notificationEndpoints are different -got/+want\ndiff %s", diff) - } - - for _, edp := range tt.wants.notificationEndpoints { - secrets, err := secretSVC.GetSecretKeys(ctx, edp.GetOrgID()) - if err != nil { - t.Errorf("failed to retrieve secrets for endpoint: %v", err) - } - for _, expected := range edp.SecretFields() { - assert.Contains(t, secrets, expected.Key) - } - } - }) - } -} - -// FindNotificationEndpointByID testing. -func FindNotificationEndpointByID( - init func(NotificationEndpointFields, *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()), - t *testing.T, -) { - type args struct { - id platform.ID - } - type wants struct { - err *errors.Error - notificationEndpoint influxdb.NotificationEndpoint - } - - tests := []struct { - name string - fields NotificationEndpointFields - args args - wants wants - }{ - { - name: "bad id", - fields: NotificationEndpointFields{ - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{ - Key: fmt.Sprintf("%s-token", oneID), - }, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{ - Key: fmt.Sprintf("%s-routing-key", twoID), - }, - }, - }, - }, - args: args{ - id: platform.ID(0), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "no key was provided for notification endpoint", - }, - }, - }, - { - name: "not found", - fields: NotificationEndpointFields{ - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - id: threeID, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "notification endpoint not found", - }, - }, - }, - { - name: "basic find telegraf config by id", - fields: NotificationEndpointFields{ - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - id: twoID, - }, - wants: wants{ - notificationEndpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - edp, err := s.FindNotificationEndpointByID(ctx, tt.args.id) - influxErrsEqual(t, tt.wants.err, err) - if diff := cmp.Diff(edp, tt.wants.notificationEndpoint, notificationEndpointCmpOptions...); diff != "" { - t.Errorf("notification endpoint is different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindNotificationEndpoints testing -func FindNotificationEndpoints( - init func(NotificationEndpointFields, *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()), - t *testing.T, -) { - type args struct { - filter influxdb.NotificationEndpointFilter - opts influxdb.FindOptions - } - - type wants struct { - notificationEndpoints []influxdb.NotificationEndpoint - err error - } - tests := []struct { - name string - fields NotificationEndpointFields - args args - wants wants - }{ - { - name: "find nothing (empty set)", - fields: NotificationEndpointFields{ - NotificationEndpoints: []influxdb.NotificationEndpoint{}, - }, - args: args{ - filter: influxdb.NotificationEndpointFilter{}, - }, - wants: wants{ - notificationEndpoints: []influxdb.NotificationEndpoint{}, - }, - }, - { - name: "find all notification endpoints", - fields: NotificationEndpointFields{ - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - wants: wants{ - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - }, - { - name: "filter by organization id only", - fields: NotificationEndpointFields{ - Orgs: []*influxdb.Organization{ - { - ID: oneID, - Name: "org1", - }, - { - ID: fourID, - Name: "org4", - }, - }, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp1", - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(fourID), - OrgID: idPtr(oneID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty2.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", fourID)}, - }, - }, - }, - args: args{ - filter: influxdb.NotificationEndpointFilter{ - OrgID: idPtr(oneID), - }, - }, - wants: wants{ - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(fourID), - OrgID: idPtr(oneID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty2.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", fourID)}, - }, - }, - }, - }, - { - name: "find options limit", - fields: NotificationEndpointFields{ - Orgs: []*influxdb.Organization{ - { - ID: oneID, - Name: "org1", - }, - { - ID: fourID, - Name: "org4", - }, - }, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp1", - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - URL: "example-webhook.com", - Method: http.MethodGet, - AuthMethod: "none", - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(fourID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", fourID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(fiveID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp4", - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", fourID)}, - }, - }, - }, - args: args{ - filter: influxdb.NotificationEndpointFilter{ - Org: strPtr("org4"), - }, - opts: influxdb.FindOptions{ - Limit: 2, - }, - }, - wants: wants{ - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp1", - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - URL: "example-webhook.com", - Method: http.MethodGet, - AuthMethod: "none", - }, - }, - }, - }, - { - name: "find options offset", - fields: NotificationEndpointFields{ - Orgs: []*influxdb.Organization{ - { - ID: oneID, - Name: "org1", - }, - { - ID: fourID, - Name: "org4", - }, - }, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp1", - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - URL: "example-webhook.com", - Method: http.MethodGet, - AuthMethod: "none", - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(fourID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", fourID)}, - }, - }, - }, - args: args{ - filter: influxdb.NotificationEndpointFilter{ - Org: strPtr("org4"), - }, - opts: influxdb.FindOptions{ - Offset: 1, - }, - }, - wants: wants{ - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - URL: "example-webhook.com", - Method: http.MethodGet, - AuthMethod: "none", - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(fourID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", fourID)}, - }, - }, - }, - }, - { - name: "find options offset", - fields: NotificationEndpointFields{ - Orgs: []*influxdb.Organization{ - { - ID: oneID, - Name: "org1", - }, - { - ID: fourID, - Name: "org4", - }, - }, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp1", - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - URL: "example-webhook.com", - Method: http.MethodGet, - AuthMethod: "none", - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(fourID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", fourID)}, - }, - }, - }, - args: args{ - filter: influxdb.NotificationEndpointFilter{ - Org: strPtr("org4"), - }, - opts: influxdb.FindOptions{ - Limit: 1, - Offset: 1, - }, - }, - wants: wants{ - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - URL: "example-webhook.com", - Method: http.MethodGet, - AuthMethod: "none", - }, - }, - }, - }, - { - name: "find by id", - fields: NotificationEndpointFields{ - Orgs: []*influxdb.Organization{ - { - ID: oneID, - Name: "org1", - }, - { - ID: fourID, - Name: "org4", - }, - }, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp1", - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - URL: "example-webhook.com", - Method: http.MethodGet, - AuthMethod: "none", - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(fourID), - OrgID: idPtr(oneID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", fourID)}, - }, - }, - }, - args: args{ - filter: influxdb.NotificationEndpointFilter{ - ID: idPtr(fourID), - }, - }, - wants: wants{ - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(fourID), - OrgID: idPtr(oneID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", fourID)}, - }, - }, - }, - }, - { - name: "look for organization not bound to any notification endpoint", - fields: NotificationEndpointFields{ - Orgs: []*influxdb.Organization{ - { - ID: oneID, - Name: "org1", - }, - { - ID: fourID, - Name: "org4", - }, - }, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp1", - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - URL: "example-webhook.com", - Method: http.MethodGet, - AuthMethod: "none", - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(threeID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", threeID)}, - }, - }, - }, - args: args{ - filter: influxdb.NotificationEndpointFilter{ - OrgID: idPtr(oneID), - }, - }, - wants: wants{ - notificationEndpoints: []influxdb.NotificationEndpoint{}, - }, - }, - { - name: "find nothing", - fields: NotificationEndpointFields{ - Orgs: []*influxdb.Organization{ - { - ID: oneID, - Name: "org1", - }, - { - ID: fourID, - Name: "org4", - }, - }, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp1", - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.HTTP{ - Base: endpoint.Base{ - ID: idPtr(twoID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp2", - }, - URL: "example-webhook.com", - Method: http.MethodGet, - AuthMethod: "none", - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(threeID), - OrgID: idPtr(fourID), - Status: influxdb.Active, - Name: "edp3", - }, - ClientURL: "example-pagerduty.com", RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", threeID)}, - }, - }, - }, - args: args{ - filter: influxdb.NotificationEndpointFilter{ - ID: idPtr(fiveID), - }, - }, - wants: wants{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - edps, n, err := s.FindNotificationEndpoints(ctx, tt.args.filter, tt.args.opts) - ErrorsEqual(t, err, tt.wants.err) - if n != len(tt.wants.notificationEndpoints) { - t.Fatalf("notification endpoints length is different got %d, want %d", n, len(tt.wants.notificationEndpoints)) - } - - if diff := cmp.Diff(edps, tt.wants.notificationEndpoints, notificationEndpointCmpOptions...); diff != "" { - t.Errorf("notification endpoints are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// UpdateNotificationEndpoint testing. -func UpdateNotificationEndpoint( - init func(NotificationEndpointFields, *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()), - t *testing.T, -) { - type args struct { - userID platform.ID - orgID platform.ID - id platform.ID - notificationEndpoint influxdb.NotificationEndpoint - } - - type wants struct { - notificationEndpoint influxdb.NotificationEndpoint - err *errors.Error - } - tests := []struct { - name string - fields NotificationEndpointFields - args args - wants wants - }{ - { - name: "can't find the id", - fields: NotificationEndpointFields{ - TimeGenerator: fakeGenerator, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - userID: sixID, - id: fourID, - orgID: fourID, - notificationEndpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Inactive, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: "pager-duty-routing-key-2"}, - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: `notification endpoint not found for key "0000000000000004"`, - }, - }, - }, - { - name: "regular update", - fields: NotificationEndpointFields{ - TimeGenerator: fakeGenerator, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - userID: sixID, - id: twoID, - orgID: fourID, - notificationEndpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name3", - OrgID: idPtr(fourID), - Status: influxdb.Inactive, - }, - ClientURL: "example-pagerduty2.com", - RoutingKey: influxdb.SecretField{Value: strPtr("secret value")}, - }, - }, - wants: wants{ - notificationEndpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name3", - OrgID: idPtr(fourID), - Status: influxdb.Inactive, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: fakeDate, - }, - }, - ClientURL: "example-pagerduty2.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - { - name: "update secret", - fields: NotificationEndpointFields{ - TimeGenerator: fakeGenerator, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - userID: sixID, - id: twoID, - orgID: fourID, - notificationEndpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name3", - OrgID: idPtr(fourID), - Status: influxdb.Inactive, - }, - ClientURL: "example-pagerduty2.com", - RoutingKey: influxdb.SecretField{ - Value: strPtr("pager-duty-value2"), - }, - }, - }, - wants: wants{ - notificationEndpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name3", - OrgID: idPtr(fourID), - Status: influxdb.Inactive, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: fakeDate, - }, - }, - ClientURL: "example-pagerduty2.com", - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, secretSVC, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - edp, err := s.UpdateNotificationEndpoint(ctx, tt.args.id, tt.args.notificationEndpoint, tt.args.userID) - if err != nil { - require.Equal(t, tt.wants.err, err) - return - } - - if tt.wants.notificationEndpoint != nil { - secrets, err := secretSVC.GetSecretKeys(ctx, edp.GetOrgID()) - if err != nil { - t.Errorf("failed to retrieve secrets for endpoint: %v", err) - } - for _, actual := range edp.SecretFields() { - assert.Contains(t, secrets, actual.Key) - } - - actual, ok := edp.(*endpoint.PagerDuty) - require.Truef(t, ok, "did not get a pager duty endpoint; got: %#v", edp) - wanted := tt.wants.notificationEndpoint.(*endpoint.PagerDuty) - - wb, ab := wanted.Base, actual.Base - require.NotZero(t, ab.CRUDLog) - wb.CRUDLog, ab.CRUDLog = influxdb.CRUDLog{}, influxdb.CRUDLog{} // zero out times - assert.Equal(t, wb, ab) - assert.Equal(t, wanted.ClientURL, actual.ClientURL) - assert.NotEqual(t, wanted.RoutingKey, actual.RoutingKey) - } - }) - } -} - -// PatchNotificationEndpoint testing. -func PatchNotificationEndpoint( - init func(NotificationEndpointFields, *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()), - t *testing.T, -) { - - name3 := "name2" - status3 := influxdb.Inactive - - type args struct { - //userID influxdb.ID - id platform.ID - upd influxdb.NotificationEndpointUpdate - } - - type wants struct { - notificationEndpoint influxdb.NotificationEndpoint - err *errors.Error - } - tests := []struct { - name string - fields NotificationEndpointFields - args args - wants wants - }{ - { - name: "can't find the id", - fields: NotificationEndpointFields{ - TimeGenerator: fakeGenerator, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - id: fourID, - upd: influxdb.NotificationEndpointUpdate{ - Name: &name3, - Status: &status3, - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "notification endpoint not found", - }, - }, - }, - { - name: "regular update", - fields: NotificationEndpointFields{ - TimeGenerator: fakeGenerator, - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - Status: influxdb.Active, - OrgID: idPtr(fourID), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - Status: influxdb.Active, - OrgID: idPtr(fourID), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - id: twoID, - upd: influxdb.NotificationEndpointUpdate{ - Name: &name3, - Status: &status3, - }, - }, - wants: wants{ - notificationEndpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: name3, - Status: status3, - OrgID: idPtr(fourID), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: fakeDate, - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - edp, err := s.PatchNotificationEndpoint(ctx, tt.args.id, tt.args.upd) - if err != nil { - if tt.wants.err == nil { - require.NoError(t, err) - } - iErr, ok := err.(*errors.Error) - require.True(t, ok, err) - assert.Equal(t, tt.wants.err.Code, iErr.Code) - return - } - if diff := cmp.Diff(edp, tt.wants.notificationEndpoint, notificationEndpointCmpOptions...); tt.wants.err == nil && diff != "" { - t.Errorf("notificationEndpoints are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// DeleteNotificationEndpoint testing. -func DeleteNotificationEndpoint( - init func(NotificationEndpointFields, *testing.T) (influxdb.NotificationEndpointService, influxdb.SecretService, func()), - t *testing.T, -) { - type args struct { - id platform.ID - orgID platform.ID - userID platform.ID - } - - type wants struct { - notificationEndpoints []influxdb.NotificationEndpoint - secretFlds []influxdb.SecretField - orgID platform.ID - err *errors.Error - } - tests := []struct { - name string - fields NotificationEndpointFields - args args - wants wants - }{ - { - name: "bad id", - fields: NotificationEndpointFields{ - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - id: platform.ID(0), - orgID: fourID, - userID: sixID, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "no key was provided for notification endpoint", - }, - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - }, - { - name: "none existing endpoint", - fields: NotificationEndpointFields{ - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - id: fourID, - orgID: fourID, - userID: sixID, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "notification endpoint not found", - }, - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - }, - { - name: "regular delete", - fields: NotificationEndpointFields{ - NotificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(twoID), - Name: "name2", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - ClientURL: "example-pagerduty.com", - RoutingKey: influxdb.SecretField{Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - }, - }, - args: args{ - id: twoID, - orgID: fourID, - userID: sixID, - }, - wants: wants{ - secretFlds: []influxdb.SecretField{ - {Key: fmt.Sprintf("%s-routing-key", twoID)}, - }, - orgID: fourID, - notificationEndpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(oneID), - Name: "name1", - OrgID: idPtr(fourID), - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - URL: "example-slack.com", - Token: influxdb.SecretField{Key: fmt.Sprintf("%s-token", oneID)}, - }, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, secretSVC, done := init(tt.fields, t) - defer done() - - ctx := context.Background() - flds, orgID, err := s.DeleteNotificationEndpoint(ctx, tt.args.id) - influxErrsEqual(t, tt.wants.err, err) - if diff := cmp.Diff(flds, tt.wants.secretFlds); diff != "" { - t.Errorf("delete notification endpoint secret fields are different -got/+want\ndiff %s", diff) - } - if diff := cmp.Diff(orgID, tt.wants.orgID); diff != "" { - t.Errorf("delete notification endpoint org id is different -got/+want\ndiff %s", diff) - } - - filter := influxdb.NotificationEndpointFilter{} - edps, n, err := s.FindNotificationEndpoints(ctx, filter) - if err != nil && tt.wants.err == nil { - t.Fatalf("expected errors to be nil got '%v'", err) - } - - if n != len(tt.wants.notificationEndpoints) { - t.Fatalf("notification endpoints length is different got %d, want %d", n, len(tt.wants.notificationEndpoints)) - } - if diff := cmp.Diff(edps, tt.wants.notificationEndpoints, notificationEndpointCmpOptions...); diff != "" { - t.Errorf("notification endpoints are different -got/+want\ndiff %s", diff) - } - - var deletedEndpoint influxdb.NotificationEndpoint - for _, ne := range tt.fields.NotificationEndpoints { - if ne.GetID() == tt.args.id { - deletedEndpoint = ne - break - } - } - if deletedEndpoint == nil { - return - } - - secrets, err := secretSVC.GetSecretKeys(ctx, deletedEndpoint.GetOrgID()) - require.NoError(t, err) - for _, deleted := range deletedEndpoint.SecretFields() { - assert.NotContains(t, secrets, deleted.Key) - } - }) - } -} - -func influxErrsEqual(t *testing.T, expected *errors.Error, actual error) { - t.Helper() - - if expected != nil { - require.Error(t, actual) - } - - if actual == nil { - return - } - - if expected == nil { - require.NoError(t, actual) - return - } - iErr, ok := actual.(*errors.Error) - require.True(t, ok) - assert.Equal(t, expected.Code, iErr.Code) - assert.Truef(t, strings.HasPrefix(iErr.Error(), expected.Error()), "expected: %s got err: %s", expected.Error(), actual.Error()) -} - -func idPtr(id platform.ID) *platform.ID { - return &id -} - -func strPtr(s string) *string { return &s } - -// ErrorsEqual checks to see if the provided errors are equivalent. -func ErrorsEqual(t *testing.T, actual, expected error) { - t.Helper() - if expected == nil && actual == nil { - return - } - - if expected == nil && actual != nil { - t.Errorf("unexpected error %s", actual.Error()) - } - - if expected != nil && actual == nil { - t.Errorf("expected error %s but received nil", expected.Error()) - } - - if errors.ErrorCode(expected) != errors.ErrorCode(actual) { - t.Logf("\nexpected: %v\nactual: %v\n\n", expected, actual) - t.Errorf("expected error code %q but received %q", errors.ErrorCode(expected), errors.ErrorCode(actual)) - } - - if errors.ErrorMessage(expected) != errors.ErrorMessage(actual) { - t.Logf("\nexpected: %v\nactual: %v\n\n", expected, actual) - t.Errorf("expected error message %q but received %q", errors.ErrorMessage(expected), errors.ErrorMessage(actual)) - } -} diff --git a/notification/flux/ast.go b/notification/flux/ast.go deleted file mode 100644 index 5e08b60c9f5..00000000000 --- a/notification/flux/ast.go +++ /dev/null @@ -1,285 +0,0 @@ -package flux - -import "github.com/influxdata/flux/ast" - -// File creates a new *ast.File. -func File(name string, imports []*ast.ImportDeclaration, body []ast.Statement) *ast.File { - return &ast.File{ - Name: name, - Imports: imports, - Body: body, - } -} - -// GreaterThan returns a greater than *ast.BinaryExpression. -func GreaterThan(lhs, rhs ast.Expression) *ast.BinaryExpression { - return &ast.BinaryExpression{ - Operator: ast.GreaterThanOperator, - Left: lhs, - Right: rhs, - } -} - -// LessThan returns a less than *ast.BinaryExpression. -func LessThan(lhs, rhs ast.Expression) *ast.BinaryExpression { - return &ast.BinaryExpression{ - Operator: ast.LessThanOperator, - Left: lhs, - Right: rhs, - } -} - -// Equal returns an equal to *ast.BinaryExpression. -func Equal(lhs, rhs ast.Expression) *ast.BinaryExpression { - return &ast.BinaryExpression{ - Operator: ast.EqualOperator, - Left: lhs, - Right: rhs, - } -} - -// Subtract returns a subtraction *ast.BinaryExpression. -func Subtract(lhs, rhs ast.Expression) *ast.BinaryExpression { - return &ast.BinaryExpression{ - Operator: ast.SubtractionOperator, - Left: lhs, - Right: rhs, - } -} - -// Add returns a addition *ast.BinaryExpression. -func Add(lhs, rhs ast.Expression) *ast.BinaryExpression { - return &ast.BinaryExpression{ - Operator: ast.AdditionOperator, - Left: lhs, - Right: rhs, - } -} - -// Member returns an *ast.MemberExpression where the key is p and the values is c. -func Member(p, c string) *ast.MemberExpression { - return &ast.MemberExpression{ - Object: &ast.Identifier{Name: p}, - Property: String(c), - } -} - -// And returns an and *ast.LogicalExpression. -func And(lhs, rhs ast.Expression) *ast.LogicalExpression { - return &ast.LogicalExpression{ - Operator: ast.AndOperator, - Left: lhs, - Right: rhs, - } -} - -// Or returns an or *ast.LogicalExpression. -func Or(lhs, rhs ast.Expression) *ast.LogicalExpression { - return &ast.LogicalExpression{ - Operator: ast.OrOperator, - Left: lhs, - Right: rhs, - } -} - -// If returns an *ast.ConditionalExpression -func If(test, consequent, alternate ast.Expression) *ast.ConditionalExpression { - return &ast.ConditionalExpression{ - Test: test, - Consequent: consequent, - Alternate: alternate, - } -} - -// Pipe returns a *ast.PipeExpression that is a piped sequence of call expressions starting at base. -// It requires at least one call expression and will panic otherwise. -func Pipe(base ast.Expression, calls ...*ast.CallExpression) *ast.PipeExpression { - if len(calls) < 1 { - panic("must pipe forward to at least one *ast.CallExpression") - } - pe := appendPipe(base, calls[0]) - for _, call := range calls[1:] { - pe = appendPipe(pe, call) - } - - return pe -} - -func appendPipe(base ast.Expression, next *ast.CallExpression) *ast.PipeExpression { - return &ast.PipeExpression{ - Argument: base, - Call: next, - } -} - -// Call returns a *ast.CallExpression that is a function call of fn with args. -func Call(fn ast.Expression, args *ast.ObjectExpression) *ast.CallExpression { - return &ast.CallExpression{ - Callee: fn, - Arguments: []ast.Expression{ - args, - }, - } -} - -// ExpressionStatement returns an *ast.ExpressionStatement of e. -func ExpressionStatement(e ast.Expression) *ast.ExpressionStatement { - return &ast.ExpressionStatement{Expression: e} -} - -// Function returns an *ast.FunctionExpression with params with body b. -func Function(params []*ast.Property, b ast.Expression) *ast.FunctionExpression { - return &ast.FunctionExpression{ - Params: params, - Body: b, - } -} - -// FuncBlock takes a series of statements and produces a function. -func FuncBlock(params []*ast.Property, stms ...ast.Statement) *ast.FunctionExpression { - b := &ast.Block{ - Body: stms, - } - return &ast.FunctionExpression{ - Params: params, - Body: b, - } -} - -// String returns an *ast.StringLiteral of s. -func String(s string) *ast.StringLiteral { - return &ast.StringLiteral{ - Value: s, - } -} - -// Bool returns an *ast.BooleanLiteral of b. -func Bool(b bool) *ast.BooleanLiteral { - return &ast.BooleanLiteral{ - Value: b, - } -} - -// Duration returns an *ast.DurationLiteral for a single duration. -func Duration(m int64, u string) *ast.DurationLiteral { - return &ast.DurationLiteral{ - Values: []ast.Duration{ - { - Magnitude: m, - Unit: u, - }, - }, - } -} - -// Identifier returns an *ast.Identifier of i. -func Identifier(i string) *ast.Identifier { - return &ast.Identifier{Name: i} -} - -// Float returns an *ast.FloatLiteral of f. -func Float(f float64) *ast.FloatLiteral { - return &ast.FloatLiteral{ - Value: f, - } -} - -// Integer returns an *ast.IntegerLiteral of i. -func Integer(i int64) *ast.IntegerLiteral { - return &ast.IntegerLiteral{ - Value: i, - } -} - -// Negative returns *ast.UnaryExpression for -(e). -func Negative(e ast.Expression) *ast.UnaryExpression { - return &ast.UnaryExpression{ - Operator: ast.SubtractionOperator, - Argument: e, - } -} - -// DefineVariable returns an *ast.VariableAssignment of id to the e. (e.g. id = ) -func DefineVariable(id string, e ast.Expression) *ast.VariableAssignment { - return &ast.VariableAssignment{ - ID: &ast.Identifier{ - Name: id, - }, - Init: e, - } -} - -// DefineTaskOption returns an *ast.OptionStatement with the object provided. (e.g. option task = {...}) -func DefineTaskOption(o *ast.ObjectExpression) *ast.OptionStatement { - return &ast.OptionStatement{ - Assignment: DefineVariable("task", o), - } -} - -// Property returns an *ast.Property of key to e. (e.g. key: ) -func Property(key string, e ast.Expression) *ast.Property { - return &ast.Property{ - Key: &ast.Identifier{ - Name: key, - }, - Value: e, - } -} - -// Dictionary returns an *ast.Property of string key to value expression. -func Dictionary(key string, v ast.Expression) *ast.Property { - return &ast.Property{ - Key: String(key), - Value: v, - } -} - -// Object returns an *ast.ObjectExpression with properties ps. -func Object(ps ...*ast.Property) *ast.ObjectExpression { - return &ast.ObjectExpression{ - Properties: ps, - } -} - -// ObjectWith adds many properties to an existing named identifier. -func ObjectWith(name string, ps ...*ast.Property) *ast.ObjectExpression { - obj := Object(ps...) - obj.With = &ast.Identifier{ - Name: name, - } - return obj -} - -// Array returns *ast.ArrayExpression with elements es. -func Array(es ...ast.Expression) *ast.ArrayExpression { - return &ast.ArrayExpression{ - Elements: es, - } -} - -// FunctionParams returns a slice of *ast.Property for the parameters of a function. -func FunctionParams(args ...string) []*ast.Property { - var params []*ast.Property - for _, arg := range args { - params = append(params, &ast.Property{Key: &ast.Identifier{Name: arg}}) - } - return params -} - -// Imports returns a []*ast.ImportDeclaration for each package in pkgs. -func Imports(pkgs ...string) []*ast.ImportDeclaration { - var is []*ast.ImportDeclaration - for _, pkg := range pkgs { - is = append(is, ImportDeclaration(pkg)) - } - return is -} - -// ImportDeclaration returns an *ast.ImportDeclaration for pkg. -func ImportDeclaration(pkg string) *ast.ImportDeclaration { - return &ast.ImportDeclaration{ - Path: &ast.StringLiteral{ - Value: pkg, - }, - } -} diff --git a/notification/rule/http.go b/notification/rule/http.go deleted file mode 100644 index 0dfd2e8df88..00000000000 --- a/notification/rule/http.go +++ /dev/null @@ -1,181 +0,0 @@ -package rule - -import ( - "encoding/json" - "fmt" - - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/ast/astutil" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/influxdata/influxdb/v2/notification/flux" -) - -// HTTP is the notification rule config of http. -type HTTP struct { - Base -} - -// GenerateFlux generates a flux script for the http notification rule. -func (s *HTTP) GenerateFlux(e influxdb.NotificationEndpoint) (string, error) { - httpEndpoint, ok := e.(*endpoint.HTTP) - if !ok { - return "", fmt.Errorf("endpoint provided is a %s, not an HTTP endpoint", e.Type()) - } - return astutil.Format(s.GenerateFluxAST(httpEndpoint)) -} - -// GenerateFluxAST generates a flux AST for the http notification rule. -func (s *HTTP) GenerateFluxAST(e *endpoint.HTTP) *ast.File { - return flux.File(s.Name, s.imports(e), s.generateFluxASTBody(e)) -} - -func (s *HTTP) imports(e *endpoint.HTTP) []*ast.ImportDeclaration { - packages := []string{ - "influxdata/influxdb/monitor", - "http", - "json", - "experimental", - } - - if e.AuthMethod == "bearer" || e.AuthMethod == "basic" { - packages = append(packages, "influxdata/influxdb/secrets") - } - - return flux.Imports(packages...) -} - -func (s *HTTP) generateFluxASTBody(e *endpoint.HTTP) []ast.Statement { - var statements []ast.Statement - statements = append(statements, s.generateTaskOption()) - statements = append(statements, s.generateHeaders(e)) - statements = append(statements, s.generateFluxASTEndpoint(e)) - statements = append(statements, s.generateFluxASTNotificationDefinition(e)) - statements = append(statements, s.generateFluxASTStatuses()) - statements = append(statements, s.generateLevelChecks()...) - statements = append(statements, s.generateFluxASTNotifyPipe()) - - return statements -} - -func (s *HTTP) generateHeaders(e *endpoint.HTTP) ast.Statement { - props := []*ast.Property{ - flux.Dictionary( - "Content-Type", flux.String("application/json"), - ), - } - - switch e.AuthMethod { - case "bearer": - token := flux.Call( - flux.Member("secrets", "get"), - flux.Object( - flux.Property("key", flux.String(e.Token.Key)), - ), - ) - bearer := flux.Add( - flux.String("Bearer "), - token, - ) - auth := flux.Dictionary("Authorization", bearer) - props = append(props, auth) - case "basic": - username := flux.Call( - flux.Member("secrets", "get"), - flux.Object( - flux.Property("key", flux.String(e.Username.Key)), - ), - ) - passwd := flux.Call( - flux.Member("secrets", "get"), - flux.Object( - flux.Property("key", flux.String(e.Password.Key)), - ), - ) - - basic := flux.Call( - flux.Member("http", "basicAuth"), - flux.Object( - flux.Property("u", username), - flux.Property("p", passwd), - ), - ) - - auth := flux.Dictionary("Authorization", basic) - props = append(props, auth) - } - return flux.DefineVariable("headers", flux.Object(props...)) -} - -func (s *HTTP) generateFluxASTEndpoint(e *endpoint.HTTP) ast.Statement { - call := flux.Call(flux.Member("http", "endpoint"), flux.Object(flux.Property("url", flux.String(e.URL)))) - - return flux.DefineVariable("endpoint", call) -} - -func (s *HTTP) generateFluxASTNotifyPipe() ast.Statement { - endpointBody := flux.Call( - flux.Member("json", "encode"), - flux.Object(flux.Property("v", flux.Identifier("body"))), - ) - headers := flux.Property("headers", flux.Identifier("headers")) - - endpointProps := []*ast.Property{ - headers, - flux.Property("data", endpointBody), - } - endpointFn := flux.FuncBlock(flux.FunctionParams("r"), - s.generateBody(), - &ast.ReturnStatement{ - Argument: flux.Object(endpointProps...), - }, - ) - - props := []*ast.Property{} - props = append(props, flux.Property("data", flux.Identifier("notification"))) - props = append(props, flux.Property("endpoint", - flux.Call(flux.Identifier("endpoint"), flux.Object(flux.Property("mapFn", endpointFn))))) - - call := flux.Call(flux.Member("monitor", "notify"), flux.Object(props...)) - - return flux.ExpressionStatement(flux.Pipe(flux.Identifier("all_statuses"), call)) -} - -func (s *HTTP) generateBody() ast.Statement { - // {r with "_version": 1} - props := []*ast.Property{ - flux.Property( - "_version", flux.Integer(1), - ), - } - - body := flux.ObjectWith("r", props...) - return flux.DefineVariable("body", body) -} - -type httpAlias HTTP - -// MarshalJSON implement json.Marshaler interface. -func (s HTTP) MarshalJSON() ([]byte, error) { - return json.Marshal( - struct { - httpAlias - Type string `json:"type"` - }{ - httpAlias: httpAlias(s), - Type: s.Type(), - }) -} - -// Valid returns where the config is valid. -func (s HTTP) Valid() error { - if err := s.Base.valid(); err != nil { - return err - } - return nil -} - -// Type returns the type of the rule config. -func (s HTTP) Type() string { - return "http" -} diff --git a/notification/rule/http_test.go b/notification/rule/http_test.go deleted file mode 100644 index f9d8802c257..00000000000 --- a/notification/rule/http_test.go +++ /dev/null @@ -1,316 +0,0 @@ -package rule_test - -import ( - "testing" - - "github.com/andreyvit/diff" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/notification" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/influxdata/influxdb/v2/notification/rule" - itesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestHTTP_GenerateFlux(t *testing.T) { - want := itesting.FormatFluxString(t, `import "influxdata/influxdb/monitor" -import "http" -import "json" -import "experimental" - -option task = {name: "foo", every: 1h, offset: 1s} - -headers = {"Content-Type": "application/json"} -endpoint = http["endpoint"](url: "http://localhost:7777") -notification = { - _notification_rule_id: "0000000000000001", - _notification_rule_name: "foo", - _notification_endpoint_id: "0000000000000002", - _notification_endpoint_name: "foo", -} -statuses = monitor["from"](start: -2h) -crit = statuses |> filter(fn: (r) => r["_level"] == "crit") -all_statuses = crit |> filter(fn: (r) => r["_time"] >= experimental["subDuration"](from: now(), d: 1h)) - -all_statuses - |> monitor["notify"]( - data: notification, - endpoint: - endpoint( - mapFn: (r) => { - body = {r with _version: 1} - - return {headers: headers, data: json["encode"](v: body)} - }, - ), - ) -`) - - s := &rule.HTTP{ - Base: rule.Base{ - ID: 1, - Name: "foo", - Every: mustDuration("1h"), - Offset: mustDuration("1s"), - EndpointID: 2, - TagRules: []notification.TagRule{}, - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - }, - }, - } - - id := platform.ID(2) - e := &endpoint.HTTP{ - Base: endpoint.Base{ - ID: &id, - Name: "foo", - }, - URL: "http://localhost:7777", - } - - f, err := s.GenerateFlux(e) - if err != nil { - t.Fatal(err) - } - - if f != want { - t.Errorf("\n\nScripts did not match:\n\n%s", diff.LineDiff(f, want)) - } -} - -func TestHTTP_GenerateFlux_basicAuth(t *testing.T) { - want := itesting.FormatFluxString(t, `import "influxdata/influxdb/monitor" -import "http" -import "json" -import "experimental" -import "influxdata/influxdb/secrets" - -option task = {name: "foo", every: 1h, offset: 1s} - -headers = { - "Content-Type": "application/json", - "Authorization": - http["basicAuth"]( - u: secrets["get"](key: "000000000000000e-username"), - p: secrets["get"](key: "000000000000000e-password"), - ), -} -endpoint = http["endpoint"](url: "http://localhost:7777") -notification = { - _notification_rule_id: "0000000000000001", - _notification_rule_name: "foo", - _notification_endpoint_id: "0000000000000002", - _notification_endpoint_name: "foo", -} -statuses = monitor["from"](start: -2h) -crit = statuses |> filter(fn: (r) => r["_level"] == "crit") -all_statuses = crit |> filter(fn: (r) => r["_time"] >= experimental["subDuration"](from: now(), d: 1h)) - -all_statuses - |> monitor["notify"]( - data: notification, - endpoint: - endpoint( - mapFn: (r) => { - body = {r with _version: 1} - - return {headers: headers, data: json["encode"](v: body)} - }, - ), - ) -`) - s := &rule.HTTP{ - Base: rule.Base{ - ID: 1, - Name: "foo", - Every: mustDuration("1h"), - Offset: mustDuration("1s"), - EndpointID: 2, - TagRules: []notification.TagRule{}, - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - }, - }, - } - - id := platform.ID(2) - e := &endpoint.HTTP{ - Base: endpoint.Base{ - ID: &id, - Name: "foo", - }, - URL: "http://localhost:7777", - AuthMethod: "basic", - Username: influxdb.SecretField{ - Key: "000000000000000e-username", - }, - Password: influxdb.SecretField{ - Key: "000000000000000e-password", - }, - } - - f, err := s.GenerateFlux(e) - if err != nil { - t.Fatal(err) - } - - if f != want { - t.Errorf("\n\nScripts did not match:\n\n%s", diff.LineDiff(f, want)) - } -} - -func TestHTTP_GenerateFlux_bearer(t *testing.T) { - want := itesting.FormatFluxString(t, `import "influxdata/influxdb/monitor" -import "http" -import "json" -import "experimental" -import "influxdata/influxdb/secrets" - -option task = {name: "foo", every: 1h, offset: 1s} - -headers = { - "Content-Type": "application/json", - "Authorization": "Bearer " + secrets["get"](key: "000000000000000e-token"), -} -endpoint = http["endpoint"](url: "http://localhost:7777") -notification = { - _notification_rule_id: "0000000000000001", - _notification_rule_name: "foo", - _notification_endpoint_id: "0000000000000002", - _notification_endpoint_name: "foo", -} -statuses = monitor["from"](start: -2h) -crit = statuses |> filter(fn: (r) => r["_level"] == "crit") -all_statuses = crit |> filter(fn: (r) => r["_time"] >= experimental["subDuration"](from: now(), d: 1h)) - -all_statuses - |> monitor["notify"]( - data: notification, - endpoint: - endpoint( - mapFn: (r) => { - body = {r with _version: 1} - - return {headers: headers, data: json["encode"](v: body)} - }, - ), - ) -`) - - s := &rule.HTTP{ - Base: rule.Base{ - ID: 1, - Name: "foo", - Every: mustDuration("1h"), - Offset: mustDuration("1s"), - EndpointID: 2, - TagRules: []notification.TagRule{}, - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - }, - }, - } - - id := platform.ID(2) - e := &endpoint.HTTP{ - Base: endpoint.Base{ - ID: &id, - Name: "foo", - }, - URL: "http://localhost:7777", - AuthMethod: "bearer", - Token: influxdb.SecretField{ - Key: "000000000000000e-token", - }, - } - - f, err := s.GenerateFlux(e) - if err != nil { - t.Fatal(err) - } - - if f != want { - t.Errorf("\n\nScripts did not match:\n\n%s", diff.LineDiff(f, want)) - } -} - -func TestHTTP_GenerateFlux_bearer_every_second(t *testing.T) { - want := itesting.FormatFluxString(t, `import "influxdata/influxdb/monitor" -import "http" -import "json" -import "experimental" -import "influxdata/influxdb/secrets" - -option task = {name: "foo", every: 5s, offset: 1s} - -headers = { - "Content-Type": "application/json", - "Authorization": "Bearer " + secrets["get"](key: "000000000000000e-token"), -} -endpoint = http["endpoint"](url: "http://localhost:7777") -notification = { - _notification_rule_id: "0000000000000001", - _notification_rule_name: "foo", - _notification_endpoint_id: "0000000000000002", - _notification_endpoint_name: "foo", -} -statuses = monitor["from"](start: -10s) -crit = statuses |> filter(fn: (r) => r["_level"] == "crit") -all_statuses = crit |> filter(fn: (r) => r["_time"] >= experimental["subDuration"](from: now(), d: 5s)) - -all_statuses - |> monitor["notify"]( - data: notification, - endpoint: - endpoint( - mapFn: (r) => { - body = {r with _version: 1} - - return {headers: headers, data: json["encode"](v: body)} - }, - ), - ) -`) - - s := &rule.HTTP{ - Base: rule.Base{ - ID: 1, - Name: "foo", - Every: mustDuration("5s"), - Offset: mustDuration("1s"), - EndpointID: 2, - TagRules: []notification.TagRule{}, - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - }, - }, - } - - id := platform.ID(2) - e := &endpoint.HTTP{ - Base: endpoint.Base{ - ID: &id, - Name: "foo", - }, - URL: "http://localhost:7777", - AuthMethod: "bearer", - Token: influxdb.SecretField{ - Key: "000000000000000e-token", - }, - } - - f, err := s.GenerateFlux(e) - require.NoError(t, err) - assert.Equal(t, want, f) -} diff --git a/notification/rule/pagerduty.go b/notification/rule/pagerduty.go deleted file mode 100644 index 08093698f89..00000000000 --- a/notification/rule/pagerduty.go +++ /dev/null @@ -1,197 +0,0 @@ -package rule - -import ( - "encoding/json" - "fmt" - - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/ast/astutil" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/influxdata/influxdb/v2/notification/flux" -) - -// PagerDuty is the rule config of pagerduty notification. -type PagerDuty struct { - Base - MessageTemplate string `json:"messageTemplate"` -} - -type pagerDutyAlias PagerDuty - -// MarshalJSON implement json.Marshaler interface. -func (s PagerDuty) MarshalJSON() ([]byte, error) { - return json.Marshal( - struct { - pagerDutyAlias - Type string `json:"type"` - }{ - pagerDutyAlias: pagerDutyAlias(s), - Type: s.Type(), - }) -} - -// Valid returns where the config is valid. -func (s PagerDuty) Valid() error { - if err := s.Base.valid(); err != nil { - return err - } - if s.MessageTemplate == "" { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "pagerduty invalid message template", - } - } - return nil -} - -// Type returns the type of the rule config. -func (s PagerDuty) Type() string { - return "pagerduty" -} - -// GenerateFlux generates a flux script for the pagerduty notification rule. -func (s *PagerDuty) GenerateFlux(e influxdb.NotificationEndpoint) (string, error) { - pagerdutyEndpoint, ok := e.(*endpoint.PagerDuty) - if !ok { - return "", fmt.Errorf("endpoint provided is a %s, not an PagerDuty endpoint", e.Type()) - } - return astutil.Format(s.GenerateFluxAST(pagerdutyEndpoint)) -} - -// GenerateFluxAST generates a flux AST for the pagerduty notification rule. -func (s *PagerDuty) GenerateFluxAST(e *endpoint.PagerDuty) *ast.File { - return flux.File( - s.Name, - flux.Imports("influxdata/influxdb/monitor", "pagerduty", "influxdata/influxdb/secrets", "experimental"), - s.generateFluxASTBody(e), - ) -} - -func (s *PagerDuty) generateFluxASTBody(e *endpoint.PagerDuty) []ast.Statement { - var statements []ast.Statement - statements = append(statements, s.generateTaskOption()) - statements = append(statements, s.generateFluxASTSecrets(e)) - statements = append(statements, s.generateFluxASTEndpoint(e)) - statements = append(statements, s.generateFluxASTNotificationDefinition(e)) - statements = append(statements, s.generateFluxASTStatuses()) - statements = append(statements, s.generateLevelChecks()...) - statements = append(statements, s.generateFluxASTNotifyPipe(e.ClientURL)) - - return statements -} - -func (s *PagerDuty) generateFluxASTSecrets(e *endpoint.PagerDuty) ast.Statement { - call := flux.Call(flux.Member("secrets", "get"), flux.Object(flux.Property("key", flux.String(e.RoutingKey.Key)))) - - return flux.DefineVariable("pagerduty_secret", call) -} - -func (s *PagerDuty) generateFluxASTEndpoint(e *endpoint.PagerDuty) ast.Statement { - call := flux.Call(flux.Member("pagerduty", "endpoint"), - flux.Object(), - ) - - return flux.DefineVariable("pagerduty_endpoint", call) -} - -func (s *PagerDuty) generateFluxASTNotifyPipe(url string) ast.Statement { - endpointProps := []*ast.Property{} - - // routing_key: - // required - // string - // A version 4 UUID expressed as a 32-digit hexadecimal number. This is the Integration Key for an integration on any given service. - endpointProps = append(endpointProps, flux.Property("routingKey", flux.Identifier("pagerduty_secret"))) - - // client: - // optional - // string - // name of the client sending the alert. - endpointProps = append(endpointProps, flux.Property("client", flux.String("influxdata"))) - - // clientURL - // optional - // string - // url of the client sending the alert. - endpointProps = append(endpointProps, flux.Property("clientURL", flux.String(url))) - - // class: - // optional - // string - // The class/type of the event, for example ping failure or cpu load - endpointProps = append(endpointProps, flux.Property("class", flux.Identifier("r._check_name"))) - - // group: - // optional - // string - // Logical grouping of components of a service, for example app-stack - endpointProps = append(endpointProps, flux.Property("group", flux.Member("r", "_source_measurement"))) - - // severity: - // required - // string - // The perceived severity of the status the event is describing with respect to the affected system. This can be critical, error, warning or info. - endpointProps = append(endpointProps, flux.Property("severity", severityFromLevel())) - - // event_action: - // required - // string trigger - // The type of event. Can be trigger, acknowledge or resolve. See Event Action. - endpointProps = append(endpointProps, flux.Property("eventAction", actionFromLevel())) - - // source: - // required - // string - // The unique location of the affected system, preferably a hostname or FQDN - endpointProps = append(endpointProps, flux.Property("source", flux.Member("notification", "_notification_rule_name"))) - - // summary: - // required - // string - // A brief text summary of the event, used to generate the summaries/titles of any associated alerts. The maximum permitted length of this property is 1024 characters. - endpointProps = append(endpointProps, flux.Property("summary", flux.Member("r", "_message"))) - - // timestamp: - // optional - // timestamp (rfc3339 milliseconds) - // The time at which the emitting tool detected or generated the event. - endpointProps = append(endpointProps, flux.Property("timestamp", generateTime())) - - endpointFn := flux.Function(flux.FunctionParams("r"), flux.Object(endpointProps...)) - - props := []*ast.Property{} - props = append(props, flux.Property("data", flux.Identifier("notification"))) - props = append(props, flux.Property("endpoint", - flux.Call(flux.Identifier("pagerduty_endpoint"), flux.Object(flux.Property("mapFn", endpointFn))))) - - call := flux.Call(flux.Member("monitor", "notify"), flux.Object(props...)) - - return flux.ExpressionStatement(flux.Pipe(flux.Identifier("all_statuses"), call)) -} - -func severityFromLevel() *ast.CallExpression { - return flux.Call( - flux.Member("pagerduty", "severityFromLevel"), - flux.Object( - flux.Property("level", flux.Member("r", "_level")), - ), - ) -} - -func actionFromLevel() *ast.CallExpression { - return flux.Call( - flux.Member("pagerduty", "actionFromLevel"), - flux.Object( - flux.Property("level", flux.Member("r", "_level")), - ), - ) -} - -func generateTime() *ast.CallExpression { - props := []*ast.Property{ - flux.Property("v", flux.Member("r", "_source_timestamp")), - } - return flux.Call(flux.Identifier("time"), flux.Object(props...)) -} diff --git a/notification/rule/pagerduty_test.go b/notification/rule/pagerduty_test.go deleted file mode 100644 index c31652acae3..00000000000 --- a/notification/rule/pagerduty_test.go +++ /dev/null @@ -1,296 +0,0 @@ -package rule_test - -import ( - "testing" - - "github.com/andreyvit/diff" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/notification" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/influxdata/influxdb/v2/notification/rule" - itesting "github.com/influxdata/influxdb/v2/testing" -) - -func TestPagerDuty_GenerateFlux(t *testing.T) { - tests := []struct { - name string - rule *rule.PagerDuty - endpoint *endpoint.PagerDuty - script string - }{ - { - name: "notify on crit", - endpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(2), - Name: "foo", - }, - ClientURL: "http://localhost:7777/host/${r.host}", - RoutingKey: influxdb.SecretField{ - Key: "pagerduty_token", - }, - }, - rule: &rule.PagerDuty{ - MessageTemplate: "blah", - Base: rule.Base{ - ID: 1, - EndpointID: 2, - Name: "foo", - Every: mustDuration("1h"), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - }, - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "foo", - Value: "bar", - }, - Operator: influxdb.Equal, - }, - { - Tag: influxdb.Tag{ - Key: "baz", - Value: "bang", - }, - Operator: influxdb.Equal, - }, - }, - }, - }, - script: `import "influxdata/influxdb/monitor" -import "pagerduty" -import "influxdata/influxdb/secrets" -import "experimental" - -option task = {name: "foo", every: 1h} - -pagerduty_secret = secrets["get"](key: "pagerduty_token") -pagerduty_endpoint = pagerduty["endpoint"]() -notification = { - _notification_rule_id: "0000000000000001", - _notification_rule_name: "foo", - _notification_endpoint_id: "0000000000000002", - _notification_endpoint_name: "foo", -} -statuses = monitor["from"](start: -2h, fn: (r) => r["foo"] == "bar" and r["baz"] == "bang") -crit = statuses |> filter(fn: (r) => r["_level"] == "crit") -all_statuses = crit |> filter(fn: (r) => r["_time"] >= experimental["subDuration"](from: now(), d: 1h)) - -all_statuses - |> monitor["notify"]( - data: notification, - endpoint: - pagerduty_endpoint( - mapFn: (r) => - ({ - routingKey: pagerduty_secret, - client: "influxdata", - clientURL: "http://localhost:7777/host/${r.host}", - class: r._check_name, - group: r["_source_measurement"], - severity: pagerduty["severityFromLevel"](level: r["_level"]), - eventAction: pagerduty["actionFromLevel"](level: r["_level"]), - source: notification["_notification_rule_name"], - summary: r["_message"], - timestamp: time(v: r["_source_timestamp"]), - }), - ), - ) -`, - }, - { - name: "notify on info to crit", - endpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(2), - Name: "foo", - }, - ClientURL: "http://localhost:7777/host/${r.host}", - RoutingKey: influxdb.SecretField{ - Key: "pagerduty_token", - }, - }, - rule: &rule.PagerDuty{ - MessageTemplate: "blah", - Base: rule.Base{ - ID: 1, - EndpointID: 2, - Name: "foo", - Every: mustDuration("1h"), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - PreviousLevel: statusRulePtr(notification.Info), - }, - }, - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "foo", - Value: "bar", - }, - Operator: influxdb.Equal, - }, - { - Tag: influxdb.Tag{ - Key: "baz", - Value: "bang", - }, - Operator: influxdb.Equal, - }, - }, - }, - }, - script: `import "influxdata/influxdb/monitor" -import "pagerduty" -import "influxdata/influxdb/secrets" -import "experimental" - -option task = {name: "foo", every: 1h} - -pagerduty_secret = secrets["get"](key: "pagerduty_token") -pagerduty_endpoint = pagerduty["endpoint"]() -notification = { - _notification_rule_id: "0000000000000001", - _notification_rule_name: "foo", - _notification_endpoint_id: "0000000000000002", - _notification_endpoint_name: "foo", -} -statuses = monitor["from"](start: -2h, fn: (r) => r["foo"] == "bar" and r["baz"] == "bang") -info_to_crit = statuses |> monitor["stateChanges"](fromLevel: "info", toLevel: "crit") -all_statuses = info_to_crit |> filter(fn: (r) => r["_time"] >= experimental["subDuration"](from: now(), d: 1h)) - -all_statuses - |> monitor["notify"]( - data: notification, - endpoint: - pagerduty_endpoint( - mapFn: (r) => - ({ - routingKey: pagerduty_secret, - client: "influxdata", - clientURL: "http://localhost:7777/host/${r.host}", - class: r._check_name, - group: r["_source_measurement"], - severity: pagerduty["severityFromLevel"](level: r["_level"]), - eventAction: pagerduty["actionFromLevel"](level: r["_level"]), - source: notification["_notification_rule_name"], - summary: r["_message"], - timestamp: time(v: r["_source_timestamp"]), - }), - ), - ) -`, - }, - { - name: "notify on crit or ok to warn", - endpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: idPtr(2), - Name: "foo", - }, - ClientURL: "http://localhost:7777/host/${r.host}", - RoutingKey: influxdb.SecretField{ - Key: "pagerduty_token", - }, - }, - rule: &rule.PagerDuty{ - MessageTemplate: "blah", - Base: rule.Base{ - ID: 1, - EndpointID: 2, - Name: "foo", - Every: mustDuration("1h"), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - { - CurrentLevel: notification.Warn, - PreviousLevel: statusRulePtr(notification.Ok), - }, - }, - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "foo", - Value: "bar", - }, - Operator: influxdb.Equal, - }, - { - Tag: influxdb.Tag{ - Key: "baz", - Value: "bang", - }, - Operator: influxdb.Equal, - }, - }, - }, - }, - script: `import "influxdata/influxdb/monitor" -import "pagerduty" -import "influxdata/influxdb/secrets" -import "experimental" - -option task = {name: "foo", every: 1h} - -pagerduty_secret = secrets["get"](key: "pagerduty_token") -pagerduty_endpoint = pagerduty["endpoint"]() -notification = { - _notification_rule_id: "0000000000000001", - _notification_rule_name: "foo", - _notification_endpoint_id: "0000000000000002", - _notification_endpoint_name: "foo", -} -statuses = monitor["from"](start: -2h, fn: (r) => r["foo"] == "bar" and r["baz"] == "bang") -crit = statuses |> filter(fn: (r) => r["_level"] == "crit") -ok_to_warn = statuses |> monitor["stateChanges"](fromLevel: "ok", toLevel: "warn") -all_statuses = - union(tables: [crit, ok_to_warn]) - |> sort(columns: ["_time"]) - |> filter(fn: (r) => r["_time"] >= experimental["subDuration"](from: now(), d: 1h)) - -all_statuses - |> monitor["notify"]( - data: notification, - endpoint: - pagerduty_endpoint( - mapFn: (r) => - ({ - routingKey: pagerduty_secret, - client: "influxdata", - clientURL: "http://localhost:7777/host/${r.host}", - class: r._check_name, - group: r["_source_measurement"], - severity: pagerduty["severityFromLevel"](level: r["_level"]), - eventAction: pagerduty["actionFromLevel"](level: r["_level"]), - source: notification["_notification_rule_name"], - summary: r["_message"], - timestamp: time(v: r["_source_timestamp"]), - }), - ), - ) -`, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - script, err := tt.rule.GenerateFlux(tt.endpoint) - if err != nil { - panic(err) - } - - if got, want := script, itesting.FormatFluxString(t, tt.script); got != want { - t.Errorf("\n\nStrings do not match:\n\n%s", diff.LineDiff(got, want)) - - } - - }) - } - -} diff --git a/notification/rule/rule.go b/notification/rule/rule.go deleted file mode 100644 index 39751af8c14..00000000000 --- a/notification/rule/rule.go +++ /dev/null @@ -1,422 +0,0 @@ -package rule - -import ( - "encoding/json" - "fmt" - "strings" - "time" - - "github.com/influxdata/flux/ast" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/notification" - "github.com/influxdata/influxdb/v2/notification/flux" -) - -var typeToRule = map[string](func() influxdb.NotificationRule){ - "slack": func() influxdb.NotificationRule { return &Slack{} }, - "pagerduty": func() influxdb.NotificationRule { return &PagerDuty{} }, - "http": func() influxdb.NotificationRule { return &HTTP{} }, - "telegram": func() influxdb.NotificationRule { return &Telegram{} }, -} - -// UnmarshalJSON will convert -func UnmarshalJSON(b []byte) (influxdb.NotificationRule, error) { - var raw struct { - Typ string `json:"type"` - } - if err := json.Unmarshal(b, &raw); err != nil { - return nil, &errors.Error{ - Msg: "unable to detect the notification type from json", - } - } - convertedFunc, ok := typeToRule[raw.Typ] - if !ok { - return nil, &errors.Error{ - Msg: fmt.Sprintf("invalid notification type %s", raw.Typ), - } - } - converted := convertedFunc() - err := json.Unmarshal(b, converted) - return converted, err -} - -// Base is the embed struct of every notification rule. -type Base struct { - ID platform.ID `json:"id,omitempty"` - Name string `json:"name"` - Description string `json:"description,omitempty"` - EndpointID platform.ID `json:"endpointID,omitempty"` - OrgID platform.ID `json:"orgID,omitempty"` - OwnerID platform.ID `json:"ownerID,omitempty"` - TaskID platform.ID `json:"taskID,omitempty"` - // SleepUntil is an optional sleeptime to start a task. - SleepUntil *time.Time `json:"sleepUntil,omitempty"` - Every *notification.Duration `json:"every,omitempty"` - // Offset represents a delay before execution. - // It gets marshalled from a string duration, i.e.: "10s" is 10 seconds - Offset *notification.Duration `json:"offset,omitempty"` - RunbookLink string `json:"runbookLink"` - TagRules []notification.TagRule `json:"tagRules,omitempty"` - StatusRules []notification.StatusRule `json:"statusRules,omitempty"` - *influxdb.Limit - influxdb.CRUDLog -} - -func (b Base) valid() error { - if !b.ID.Valid() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Notification Rule ID is invalid", - } - } - if b.Name == "" { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Notification Rule Name can't be empty", - } - } - if !b.OwnerID.Valid() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Notification Rule OwnerID is invalid", - } - } - if !b.OrgID.Valid() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Notification Rule OrgID is invalid", - } - } - if !b.EndpointID.Valid() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Notification Rule EndpointID is invalid", - } - } - if b.Offset != nil && b.Every != nil && b.Offset.TimeDuration() >= b.Every.TimeDuration() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Offset should not be equal or greater than the interval", - } - } - for _, tagRule := range b.TagRules { - if err := tagRule.Valid(); err != nil { - return err - } - } - if b.Limit != nil { - if b.Limit.Every <= 0 || b.Limit.Rate <= 0 { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "if limit is set, limit and limitEvery must be larger than 0", - } - } - } - - return nil -} -func (b *Base) generateFluxASTNotificationDefinition(e influxdb.NotificationEndpoint) ast.Statement { - ruleID := flux.Property("_notification_rule_id", flux.String(b.ID.String())) - ruleName := flux.Property("_notification_rule_name", flux.String(b.Name)) - endpointID := flux.Property("_notification_endpoint_id", flux.String(b.EndpointID.String())) - endpointName := flux.Property("_notification_endpoint_name", flux.String(e.GetName())) - - return flux.DefineVariable("notification", flux.Object(ruleID, ruleName, endpointID, endpointName)) -} - -func (b *Base) generateLevelChecks() []ast.Statement { - stmts := []ast.Statement{} - tables := []ast.Expression{} - for _, r := range b.StatusRules { - stmt, table := b.generateLevelCheck(r) - tables = append(tables, table) - stmts = append(stmts, stmt) - } - - now := flux.Call(flux.Identifier("now"), flux.Object()) - timeFilter := flux.Function( - flux.FunctionParams("r"), - &ast.BinaryExpression{ - Operator: ast.GreaterThanEqualOperator, - Left: flux.Member("r", "_time"), - Right: flux.Call( - flux.Member("experimental", "subDuration"), - flux.Object( - flux.Property("from", now), - flux.Property("d", (*ast.DurationLiteral)(b.Every)), - ), - ), - }, - ) - - var pipe *ast.PipeExpression - if len(tables) == 1 { - pipe = flux.Pipe( - tables[0], - flux.Call( - flux.Identifier("filter"), - flux.Object( - flux.Property("fn", timeFilter), - ), - ), - ) - } else { - pipe = flux.Pipe( - flux.Call( - flux.Identifier("union"), - flux.Object( - flux.Property("tables", flux.Array(tables...)), - ), - ), - flux.Call( - flux.Identifier("sort"), - flux.Object( - flux.Property("columns", flux.Array(flux.String("_time"))), - ), - ), - flux.Call( - flux.Identifier("filter"), - flux.Object( - flux.Property("fn", timeFilter), - ), - ), - ) - } - - stmts = append(stmts, flux.DefineVariable("all_statuses", pipe)) - - return stmts -} - -func (b *Base) generateLevelCheck(r notification.StatusRule) (ast.Statement, *ast.Identifier) { - var name string - var pipe *ast.PipeExpression - if r.PreviousLevel == nil && r.CurrentLevel == notification.Any { - pipe = flux.Pipe( - flux.Identifier("statuses"), - flux.Call( - flux.Identifier("filter"), - flux.Object( - flux.Property("fn", flux.Function( - flux.FunctionParams("r"), - flux.Bool(true), - ), - ), - ), - ), - ) - name = strings.ToLower(r.CurrentLevel.String()) - } else if r.PreviousLevel == nil { - pipe = flux.Pipe( - flux.Identifier("statuses"), - flux.Call( - flux.Identifier("filter"), - flux.Object( - flux.Property("fn", flux.Function( - flux.FunctionParams("r"), - flux.Equal( - flux.Member("r", "_level"), - flux.String(strings.ToLower(r.CurrentLevel.String())), - ), - ), - ), - ), - ), - ) - name = strings.ToLower(r.CurrentLevel.String()) - } else { - fromLevel := strings.ToLower(r.PreviousLevel.String()) - toLevel := strings.ToLower(r.CurrentLevel.String()) - - pipe = flux.Pipe( - flux.Identifier("statuses"), - flux.Call( - flux.Member("monitor", "stateChanges"), - flux.Object( - flux.Property("fromLevel", flux.String(fromLevel)), - flux.Property("toLevel", flux.String(toLevel)), - ), - ), - ) - name = fmt.Sprintf("%s_to_%s", fromLevel, toLevel) - } - - return flux.DefineVariable(name, pipe), flux.Identifier(name) -} - -// increaseDur increases the duration of leading duration in a duration literal. -// It is used so that we will have overlapping windows. If the unit of the literal -// is `s`, we double the interval; otherwise we increase the value by 1. The reason -// for this is to that we query the minimal amount of time that is likely to have data -// in the time range. -// -// This is currently a hack around https://github.com/influxdata/flux/issues/1877 -func increaseDur(d *ast.DurationLiteral) *ast.DurationLiteral { - dur := &ast.DurationLiteral{} - for i, v := range d.Values { - value := v - if i == 0 { - switch v.Unit { - case "s", "ms", "us", "ns": - value.Magnitude *= 2 - default: - value.Magnitude += 1 - } - } - dur.Values = append(dur.Values, value) - } - - return dur -} - -func (b *Base) generateTaskOption() ast.Statement { - props := []*ast.Property{} - - props = append(props, flux.Property("name", flux.String(b.Name))) - - if b.Every != nil { - // Make the windows overlap and filter records from previous queries. - // This is so that we wont miss the first points possible state change. - props = append(props, flux.Property("every", (*ast.DurationLiteral)(b.Every))) - } - - if b.Offset != nil { - props = append(props, flux.Property("offset", (*ast.DurationLiteral)(b.Offset))) - } - - return flux.DefineTaskOption(flux.Object(props...)) -} - -func (b *Base) generateFluxASTStatuses() ast.Statement { - props := []*ast.Property{} - - dur := (*ast.DurationLiteral)(b.Every) - props = append(props, flux.Property("start", flux.Negative(increaseDur(dur)))) - - if len(b.TagRules) > 0 { - r := b.TagRules[0] - var body ast.Expression = r.GenerateFluxAST() - for _, r := range b.TagRules[1:] { - body = flux.And(body, r.GenerateFluxAST()) - } - props = append(props, flux.Property("fn", flux.Function(flux.FunctionParams("r"), body))) - } - - base := flux.Call(flux.Member("monitor", "from"), flux.Object(props...)) - - return flux.DefineVariable("statuses", base) -} - -// GetID implements influxdb.Getter interface. -func (b Base) GetID() platform.ID { - return b.ID -} - -// GetEndpointID gets the endpointID for a base. -func (b Base) GetEndpointID() platform.ID { - return b.EndpointID -} - -// GetOrgID implements influxdb.Getter interface. -func (b Base) GetOrgID() platform.ID { - return b.OrgID -} - -// GetTaskID gets the task ID for a base. -func (b Base) GetTaskID() platform.ID { - return b.TaskID -} - -// SetTaskID sets the task ID for a base. -func (b *Base) SetTaskID(id platform.ID) { - b.TaskID = id -} - -// ClearPrivateData clears the task ID from the base. -func (b *Base) ClearPrivateData() { - b.TaskID = 0 -} - -// MatchesTags returns true if the Rule matches all of the tags -func (b *Base) MatchesTags(tags []influxdb.Tag) bool { - if len(tags) == 0 { - return true - } - // for each tag in NR - // if there exists - // a key value match with operator == equal in tags - // or - // a key match with a value mismatch with operator == notequal in tags - // then true - - for _, NRtag := range b.TagRules { - isNRTagInFilterTags := false - - for _, filterTag := range tags { - if NRtag.Key == filterTag.Key { - if NRtag.Operator == influxdb.Equal && NRtag.Value == filterTag.Value { - isNRTagInFilterTags = true - } - if NRtag.Operator == influxdb.NotEqual && NRtag.Value != filterTag.Value { - isNRTagInFilterTags = true - } - } - } - if !isNRTagInFilterTags { - return false - } - } - return true -} - -// GetOwnerID returns the owner id. -func (b Base) GetOwnerID() platform.ID { - return b.OwnerID -} - -// GetCRUDLog implements influxdb.Getter interface. -func (b Base) GetCRUDLog() influxdb.CRUDLog { - return b.CRUDLog -} - -// GetLimit returns the limit pointer. -func (b *Base) GetLimit() *influxdb.Limit { - return b.Limit -} - -// GetName implements influxdb.Getter interface. -func (b *Base) GetName() string { - return b.Name -} - -// GetDescription implements influxdb.Getter interface. -func (b *Base) GetDescription() string { - return b.Description -} - -// SetID will set the primary key. -func (b *Base) SetID(id platform.ID) { - b.ID = id -} - -// SetOrgID will set the org key. -func (b *Base) SetOrgID(id platform.ID) { - b.OrgID = id -} - -// SetOwnerID will set the owner id. -func (b *Base) SetOwnerID(id platform.ID) { - b.OwnerID = id -} - -// SetName implements influxdb.Updator interface. -func (b *Base) SetName(name string) { - b.Name = name -} - -// SetDescription implements influxdb.Updator interface. -func (b *Base) SetDescription(description string) { - b.Description = description -} diff --git a/notification/rule/rule_test.go b/notification/rule/rule_test.go deleted file mode 100644 index 0a8463e2a42..00000000000 --- a/notification/rule/rule_test.go +++ /dev/null @@ -1,541 +0,0 @@ -package rule_test - -import ( - "encoding/json" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/notification" - "github.com/influxdata/influxdb/v2/notification/rule" - "github.com/influxdata/influxdb/v2/pkg/testing/assert" - influxTesting "github.com/influxdata/influxdb/v2/testing" -) - -func lvlPtr(l notification.CheckLevel) *notification.CheckLevel { - return &l -} - -const ( - id1 = "020f755c3c082000" - id2 = "020f755c3c082001" - id3 = "020f755c3c082002" -) - -var goodBase = rule.Base{ - ID: influxTesting.MustIDBase16(id1), - Name: "name1", - OwnerID: influxTesting.MustIDBase16(id2), - OrgID: influxTesting.MustIDBase16(id3), - EndpointID: 1, -} - -func TestValidRule(t *testing.T) { - cases := []struct { - name string - src influxdb.NotificationRule - err error - }{ - { - name: "invalid rule id", - src: &rule.Slack{}, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "Notification Rule ID is invalid", - }, - }, - { - name: "empty name", - src: &rule.PagerDuty{ - Base: rule.Base{ - ID: influxTesting.MustIDBase16(id1), - }, - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "Notification Rule Name can't be empty", - }, - }, - { - name: "invalid auth id", - src: &rule.PagerDuty{ - Base: rule.Base{ - ID: influxTesting.MustIDBase16(id1), - Name: "name1", - }, - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "Notification Rule OwnerID is invalid", - }, - }, - { - name: "invalid org id", - src: &rule.PagerDuty{ - Base: rule.Base{ - ID: influxTesting.MustIDBase16(id1), - Name: "name1", - OwnerID: influxTesting.MustIDBase16(id2), - }, - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "Notification Rule OrgID is invalid", - }, - }, - { - name: "invalid org id", - src: &rule.Slack{ - Base: rule.Base{ - ID: influxTesting.MustIDBase16(id1), - Name: "name1", - OwnerID: influxTesting.MustIDBase16(id2), - OrgID: influxTesting.MustIDBase16(id3), - EndpointID: 0, - }, - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "Notification Rule EndpointID is invalid", - }, - }, - { - name: "offset greater then interval", - src: &rule.Slack{ - Base: rule.Base{ - ID: influxTesting.MustIDBase16(id1), - Name: "name1", - OwnerID: influxTesting.MustIDBase16(id2), - OrgID: influxTesting.MustIDBase16(id3), - EndpointID: 1, - Every: mustDuration("1m"), - Offset: mustDuration("2m"), - }, - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "Offset should not be equal or greater than the interval", - }, - }, - { - name: "empty slack message", - src: &rule.Slack{ - Base: goodBase, - Channel: "channel1", - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "slack msg template is empty", - }, - }, - { - name: "empty pagerDuty message", - src: &rule.PagerDuty{ - Base: goodBase, - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "pagerduty invalid message template", - }, - }, - { - name: "bad tag rule", - src: &rule.PagerDuty{ - Base: rule.Base{ - ID: influxTesting.MustIDBase16(id1), - OwnerID: influxTesting.MustIDBase16(id2), - Name: "name1", - OrgID: influxTesting.MustIDBase16(id3), - EndpointID: 1, - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "k1", - Value: "v1", - }, - Operator: -5, - }, - }, - }, - MessageTemplate: "body {var2}", - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: `Operator is invalid`, - }, - }, - { - name: "bad limit", - src: &rule.PagerDuty{ - Base: rule.Base{ - ID: influxTesting.MustIDBase16(id1), - OwnerID: influxTesting.MustIDBase16(id2), - OrgID: influxTesting.MustIDBase16(id3), - EndpointID: 1, - Name: "name1", - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "k1", - Value: "v1", - }, - Operator: influxdb.RegexEqual, - }, - }, - Limit: &influxdb.Limit{ - Rate: 3, - }, - }, - MessageTemplate: "body {var2}", - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: `if limit is set, limit and limitEvery must be larger than 0`, - }, - }, - } - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - got := c.src.Valid() - influxTesting.ErrorsEqual(t, got, c.err) - }) - } -} - -var timeGen1 = mock.TimeGenerator{FakeValue: time.Date(2006, time.July, 13, 4, 19, 10, 0, time.UTC)} -var timeGen2 = mock.TimeGenerator{FakeValue: time.Date(2006, time.July, 14, 5, 23, 53, 10, time.UTC)} -var time3 = time.Date(2006, time.July, 15, 5, 23, 53, 10, time.UTC) - -func TestJSON(t *testing.T) { - cases := []struct { - name string - src influxdb.NotificationRule - }{ - { - name: "simple slack", - src: &rule.Slack{ - Base: rule.Base{ - ID: influxTesting.MustIDBase16(id1), - OwnerID: influxTesting.MustIDBase16(id2), - Name: "name1", - OrgID: influxTesting.MustIDBase16(id3), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "k1", - Value: "v1", - }, - Operator: influxdb.NotEqual, - }, - { - Tag: influxdb.Tag{ - Key: "k2", - Value: "v2", - }, - Operator: influxdb.RegexEqual, - }, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Channel: "channel1", - MessageTemplate: "msg1", - }, - }, - { - name: "simple smtp", - src: &rule.PagerDuty{ - Base: rule.Base{ - ID: influxTesting.MustIDBase16(id1), - Name: "name1", - OwnerID: influxTesting.MustIDBase16(id2), - OrgID: influxTesting.MustIDBase16(id3), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "k1", - Value: "v1", - }, - Operator: influxdb.NotEqual, - }, - { - Tag: influxdb.Tag{ - Key: "k2", - Value: "v2", - }, - Operator: influxdb.RegexEqual, - }, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - MessageTemplate: "msg1", - }, - }, - { - name: "simple pagerDuty", - src: &rule.PagerDuty{ - Base: rule.Base{ - ID: influxTesting.MustIDBase16(id1), - Name: "name1", - OwnerID: influxTesting.MustIDBase16(id2), - OrgID: influxTesting.MustIDBase16(id3), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "k1", - Value: "v1", - }, - Operator: influxdb.NotEqual, - }, - }, - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Warn, - PreviousLevel: lvlPtr(notification.Critical), - }, - { - CurrentLevel: notification.Critical, - }, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - MessageTemplate: "msg1", - }, - }, - { - name: "simple telegram", - src: &rule.Telegram{ - Base: rule.Base{ - ID: influxTesting.MustIDBase16(id1), - OwnerID: influxTesting.MustIDBase16(id2), - Name: "name1", - OrgID: influxTesting.MustIDBase16(id3), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "k1", - Value: "v1", - }, - Operator: influxdb.NotEqual, - }, - { - Tag: influxdb.Tag{ - Key: "k2", - Value: "v2", - }, - Operator: influxdb.RegexEqual, - }, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - MessageTemplate: "blah", - }, - }, - } - for _, c := range cases { - b, err := json.Marshal(c.src) - if err != nil { - t.Fatalf("%s marshal failed, err: %s", c.name, err.Error()) - } - got, err := rule.UnmarshalJSON(b) - if err != nil { - t.Fatalf("%s unmarshal failed, err: %s", c.name, err.Error()) - } - if diff := cmp.Diff(got, c.src); diff != "" { - t.Errorf("failed %s, notification rule are different -got/+want\ndiff %s", c.name, diff) - } - } -} - -func TestMatchingRules(t *testing.T) { - cases := []struct { - name string - tagRules []notification.TagRule - filterTags []influxdb.Tag - exp bool - }{ - { - name: "Matches when tagrules and filterTags are the same. ", - tagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "a", - Value: "b", - }, - Operator: influxdb.Equal, - }, - { - Tag: influxdb.Tag{ - Key: "c", - Value: "d", - }, - Operator: influxdb.Equal, - }, - }, - filterTags: []influxdb.Tag{ - {Key: "a", Value: "b"}, - {Key: "c", Value: "d"}, - }, - exp: true, - }, - { - name: "Matches when tagrules are subset of filterTags. ", - tagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "a", - Value: "b", - }, - Operator: influxdb.Equal, - }, - { - Tag: influxdb.Tag{ - Key: "c", - Value: "d", - }, - Operator: influxdb.Equal, - }, - }, - filterTags: []influxdb.Tag{ - {Key: "a", Value: "b"}, - {Key: "c", Value: "d"}, - {Key: "e", Value: "f"}, - }, - exp: true, - }, - { - name: "Does not match when filterTags are missing tags that are in tag rules.", - tagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "a", - Value: "b", - }, - Operator: influxdb.Equal, - }, - { - Tag: influxdb.Tag{ - Key: "c", - Value: "d", - }, - Operator: influxdb.Equal, - }, - }, - filterTags: []influxdb.Tag{ - {Key: "a", Value: "b"}, - }, - exp: false, - }, - { - name: "Does not match when tagrule has key value pair that does not match value of same key in filterTags", - tagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "a", - Value: "b", - }, - Operator: influxdb.Equal, - }, - { - Tag: influxdb.Tag{ - Key: "c", - Value: "d", - }, - Operator: influxdb.Equal, - }, - }, - filterTags: []influxdb.Tag{ - {Key: "a", Value: "b"}, - {Key: "c", Value: "X"}, - }, - exp: false, - }, - { - name: "Match when tagrule has key value pair that does not match value of same key in filterTags, if tagrule has notEqual operator", - tagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "a", - Value: "b", - }, - Operator: influxdb.Equal, - }, - { - Tag: influxdb.Tag{ - Key: "c", - Value: "d", - }, - Operator: influxdb.NotEqual, - }, - }, - filterTags: []influxdb.Tag{ - {Key: "a", Value: "b"}, - {Key: "c", Value: "X"}, - }, - exp: true, - }, - { - name: "Empty tag rule matches filterTags", - tagRules: []notification.TagRule{}, - filterTags: []influxdb.Tag{ - {Key: "a", Value: "b"}, - {Key: "c", Value: "X"}, - }, - exp: true, - }, - { - name: "Non empty tag rule matches empty filter tags", - tagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "c", - Value: "d", - }, - Operator: influxdb.NotEqual, - }, - }, - filterTags: []influxdb.Tag{}, - exp: true, - }, - { - name: "Empty tag rule matches empty filter tags", - tagRules: []notification.TagRule{}, - filterTags: []influxdb.Tag{}, - exp: true, - }, - } - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - - r := rule.Base{TagRules: c.tagRules} - - assert.Equal(t, r.MatchesTags(c.filterTags), c.exp, "expected NR tags to be subset of filterTags") - }) - } -} diff --git a/notification/rule/service/service.go b/notification/rule/service/service.go deleted file mode 100644 index 46e8d1e98b5..00000000000 --- a/notification/rule/service/service.go +++ /dev/null @@ -1,500 +0,0 @@ -package service - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/notification/rule" - "github.com/influxdata/influxdb/v2/pkg/pointer" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "go.uber.org/zap" -) - -var ( - notificationRuleBucket = []byte("notificationRulev1") - - // ErrNotificationRuleNotFound is used when the notification rule is not found. - ErrNotificationRuleNotFound = &errors.Error{ - Msg: "notification rule not found", - Code: errors.ENotFound, - } - - // ErrInvalidNotificationRuleID is used when the service was provided - // an invalid ID format. - ErrInvalidNotificationRuleID = &errors.Error{ - Code: errors.EInvalid, - Msg: "provided notification rule ID has invalid format", - } -) - -// RuleService is an implementation of the influxdb CheckService -// It is backed by the kv store abstraction. -type RuleService struct { - log *zap.Logger - - kv kv.Store - tasks taskmodel.TaskService - orgs influxdb.OrganizationService - endpoints influxdb.NotificationEndpointService - - idGenerator platform.IDGenerator - timeGenerator influxdb.TimeGenerator -} - -// New constructs and configures a notification rule service -func New(logger *zap.Logger, store kv.Store, tasks taskmodel.TaskService, orgs influxdb.OrganizationService, endpoints influxdb.NotificationEndpointService) (*RuleService, error) { - s := &RuleService{ - log: logger, - kv: store, - tasks: tasks, - orgs: orgs, - endpoints: endpoints, - timeGenerator: influxdb.RealTimeGenerator{}, - idGenerator: snowflake.NewIDGenerator(), - } - - ctx := context.Background() - if err := store.Update(ctx, func(tx kv.Tx) error { - return s.initializeNotificationRule(ctx, tx) - }); err != nil { - return nil, err - } - - return s, nil -} - -var _ influxdb.NotificationRuleStore = (*RuleService)(nil) - -func (s *RuleService) initializeNotificationRule(ctx context.Context, tx kv.Tx) error { - if _, err := s.notificationRuleBucket(tx); err != nil { - return err - } - return nil -} - -// UnavailableNotificationRuleStoreError is used if we aren't able to interact with the -// store, it means the store is not available at the moment (e.g. network). -func UnavailableNotificationRuleStoreError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("Unable to connect to notification rule store service. Please try again; Err: %v", err), - Op: "kv/notificationRule", - } -} - -// InternalNotificationRuleStoreError is used when the error comes from an -// internal system. -func InternalNotificationRuleStoreError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("Unknown internal notificationRule data error; Err: %v", err), - Op: "kv/notificationRule", - } -} - -func (s *RuleService) notificationRuleBucket(tx kv.Tx) (kv.Bucket, error) { - b, err := tx.Bucket(notificationRuleBucket) - if err != nil { - return nil, UnavailableNotificationRuleStoreError(err) - } - return b, nil -} - -// CreateNotificationRule creates a new notification rule and sets b.ID with the new identifier. -func (s *RuleService) CreateNotificationRule(ctx context.Context, nr influxdb.NotificationRuleCreate, userID platform.ID) error { - // set notification rule ID - id := s.idGenerator.ID() - nr.SetID(id) - - // set notification rule created / updated times - now := s.timeGenerator.Now() - nr.SetOwnerID(userID) - nr.SetCreatedAt(now) - nr.SetUpdatedAt(now) - - // create backing task and set ID (in inactive state initially) - t, err := s.createNotificationTask(ctx, nr) - if err != nil { - return err - } - - nr.SetTaskID(t.ID) - - if err := s.kv.Update(ctx, func(tx kv.Tx) error { - return s.createNotificationRule(ctx, tx, nr, userID) - }); err != nil { - // remove associated task - if derr := s.tasks.DeleteTask(ctx, t.ID); derr != nil { - s.log.Error("failed to remove task for invalid notification rule", zap.Error(derr)) - } - - return err - } - - // set task to notification rule create status - _, err = s.tasks.UpdateTask(ctx, t.ID, taskmodel.TaskUpdate{Status: pointer.String(string(nr.Status))}) - return err -} - -func (s *RuleService) createNotificationRule(ctx context.Context, tx kv.Tx, nr influxdb.NotificationRuleCreate, userID platform.ID) error { - if err := nr.Valid(); err != nil { - return err - } - - if err := nr.Status.Valid(); err != nil { - return err - } - - return s.putNotificationRule(ctx, tx, nr.NotificationRule) -} - -func (s *RuleService) createNotificationTask(ctx context.Context, r influxdb.NotificationRuleCreate) (*taskmodel.Task, error) { - ep, err := s.endpoints.FindNotificationEndpointByID(ctx, r.GetEndpointID()) - if err != nil { - return nil, err - } - - script, err := r.GenerateFlux(ep) - if err != nil { - return nil, err - } - - tc := taskmodel.TaskCreate{ - Type: r.Type(), - Flux: script, - OwnerID: r.GetOwnerID(), - OrganizationID: r.GetOrgID(), - // create task initially in inactive status - Status: string(influxdb.Inactive), - } - - t, err := s.tasks.CreateTask(ctx, tc) - if err != nil { - return nil, err - } - - return t, nil -} - -// UpdateNotificationRule updates a single notification rule. -// Returns the new notification rule after update. -func (s *RuleService) UpdateNotificationRule(ctx context.Context, id platform.ID, nr influxdb.NotificationRuleCreate, userID platform.ID) (influxdb.NotificationRule, error) { - rule, err := s.FindNotificationRuleByID(ctx, id) - if err != nil { - return nil, err - } - - // ID and OrganizationID can not be updated - nr.SetID(rule.GetID()) - nr.SetOrgID(rule.GetOrgID()) - nr.SetOwnerID(rule.GetOwnerID()) - nr.SetCreatedAt(rule.GetCRUDLog().CreatedAt) - nr.SetUpdatedAt(s.timeGenerator.Now()) - nr.SetTaskID(rule.GetTaskID()) - - if err := nr.Valid(); err != nil { - return nil, err - } - - if err := nr.Status.Valid(); err != nil { - return nil, err - } - - _, err = s.updateNotificationTask(ctx, nr, pointer.String(string(nr.Status))) - if err != nil { - return nil, err - } - - err = s.kv.Update(ctx, func(tx kv.Tx) error { - return s.putNotificationRule(ctx, tx, nr.NotificationRule) - }) - - return nr.NotificationRule, err -} - -func (s *RuleService) updateNotificationTask(ctx context.Context, r influxdb.NotificationRule, status *string) (*taskmodel.Task, error) { - ep, err := s.endpoints.FindNotificationEndpointByID(ctx, r.GetEndpointID()) - if err != nil { - return nil, err - } - - script, err := r.GenerateFlux(ep) - if err != nil { - return nil, err - } - - tu := taskmodel.TaskUpdate{ - Flux: &script, - Description: pointer.String(r.GetDescription()), - Status: status, - } - - t, err := s.tasks.UpdateTask(ctx, r.GetTaskID(), tu) - if err != nil { - return nil, err - } - - return t, nil -} - -// PatchNotificationRule updates a single notification rule with changeset. -// Returns the new notification rule state after update. -func (s *RuleService) PatchNotificationRule(ctx context.Context, id platform.ID, upd influxdb.NotificationRuleUpdate) (influxdb.NotificationRule, error) { - nr, err := s.FindNotificationRuleByID(ctx, id) - if err != nil { - return nil, err - } - - if upd.Name != nil { - nr.SetName(*upd.Name) - } - if upd.Description != nil { - nr.SetDescription(*upd.Description) - } - - var status *string - if upd.Status != nil { - status = pointer.String(string(*upd.Status)) - } - - nr.SetUpdatedAt(s.timeGenerator.Now()) - if err := nr.Valid(); err != nil { - return nil, err - } - - _, err = s.updateNotificationTask(ctx, nr, status) - if err != nil { - return nil, err - } - - if err := s.kv.Update(ctx, func(tx kv.Tx) (err error) { - return s.putNotificationRule(ctx, tx, nr) - }); err != nil { - return nil, err - } - - return nr, nil -} - -// PutNotificationRule put a notification rule to storage. -func (s *RuleService) PutNotificationRule(ctx context.Context, nr influxdb.NotificationRuleCreate) error { - return s.kv.Update(ctx, func(tx kv.Tx) (err error) { - if err := nr.Valid(); err != nil { - return err - } - - if err := nr.Status.Valid(); err != nil { - return err - } - - return s.putNotificationRule(ctx, tx, nr) - }) -} - -func (s *RuleService) putNotificationRule(ctx context.Context, tx kv.Tx, nr influxdb.NotificationRule) error { - encodedID, _ := nr.GetID().Encode() - - v, err := json.Marshal(nr) - if err != nil { - return err - } - - bucket, err := s.notificationRuleBucket(tx) - if err != nil { - return err - } - - if err := bucket.Put(encodedID, v); err != nil { - return UnavailableNotificationRuleStoreError(err) - } - return nil -} - -// FindNotificationRuleByID returns a single notification rule by ID. -func (s *RuleService) FindNotificationRuleByID(ctx context.Context, id platform.ID) (influxdb.NotificationRule, error) { - var ( - nr influxdb.NotificationRule - err error - ) - - err = s.kv.View(ctx, func(tx kv.Tx) error { - nr, err = s.findNotificationRuleByID(ctx, tx, id) - return err - }) - - return nr, err -} - -func (s *RuleService) findNotificationRuleByID(ctx context.Context, tx kv.Tx, id platform.ID) (influxdb.NotificationRule, error) { - encID, err := id.Encode() - if err != nil { - return nil, ErrInvalidNotificationRuleID - } - - bucket, err := s.notificationRuleBucket(tx) - if err != nil { - return nil, err - } - - v, err := bucket.Get(encID) - if kv.IsNotFound(err) { - return nil, ErrNotificationRuleNotFound - } - if err != nil { - return nil, InternalNotificationRuleStoreError(err) - } - - return rule.UnmarshalJSON(v) -} - -// FindNotificationRules returns a list of notification rules that match filter and the total count of matching notification rules. -// Additional options provide pagination & sorting. -func (s *RuleService) FindNotificationRules(ctx context.Context, filter influxdb.NotificationRuleFilter, opt ...influxdb.FindOptions) (nrs []influxdb.NotificationRule, n int, err error) { - if filter.OrgID == nil && filter.Organization != nil { - o, err := s.orgs.FindOrganization(ctx, influxdb.OrganizationFilter{ - Name: filter.Organization, - }) - - if err != nil { - return nrs, 0, err - } - - filter.OrgID = &o.ID - } - - err = s.kv.View(ctx, func(tx kv.Tx) error { - nrs, n, err = s.findNotificationRules(ctx, tx, filter, opt...) - return err - }) - - return nrs, n, err -} - -func (s *RuleService) findNotificationRules(ctx context.Context, tx kv.Tx, filter influxdb.NotificationRuleFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationRule, int, error) { - var ( - nrs = make([]influxdb.NotificationRule, 0) - offset int - limit int - count int - descending bool - ) - - if len(opt) > 0 { - offset = opt[0].Offset - limit = opt[0].Limit - descending = opt[0].Descending - } - - filterFn := filterNotificationRulesFn(filter) - err := s.forEachNotificationRule(ctx, tx, descending, func(nr influxdb.NotificationRule) bool { - if filterFn(nr) { - if count >= offset { - nrs = append(nrs, nr) - } - count++ - } - - if limit > 0 && len(nrs) >= limit { - return false - } - - return true - }) - - return nrs, len(nrs), err -} - -// forEachNotificationRule will iterate through all notification rules while fn returns true. -func (s *RuleService) forEachNotificationRule(ctx context.Context, tx kv.Tx, descending bool, fn func(influxdb.NotificationRule) bool) error { - - bkt, err := s.notificationRuleBucket(tx) - if err != nil { - return err - } - - direction := kv.CursorAscending - if descending { - direction = kv.CursorDescending - } - - cur, err := bkt.ForwardCursor(nil, kv.WithCursorDirection(direction)) - if err != nil { - return err - } - - for k, v := cur.Next(); k != nil; k, v = cur.Next() { - nr, err := rule.UnmarshalJSON(v) - if err != nil { - return err - } - if !fn(nr) { - break - } - } - - return nil -} - -func filterNotificationRulesFn(filter influxdb.NotificationRuleFilter) func(nr influxdb.NotificationRule) bool { - if filter.OrgID != nil { - return func(nr influxdb.NotificationRule) bool { - if !nr.MatchesTags(filter.Tags) { - return false - } - - return nr.GetOrgID() == *filter.OrgID - } - } - - return func(nr influxdb.NotificationRule) bool { - return nr.MatchesTags(filter.Tags) - } -} - -// DeleteNotificationRule removes a notification rule by ID. -func (s *RuleService) DeleteNotificationRule(ctx context.Context, id platform.ID) error { - r, err := s.FindNotificationRuleByID(ctx, id) - if err != nil { - return err - } - - if err := s.tasks.DeleteTask(ctx, r.GetTaskID()); err != nil { - return err - } - - return s.kv.Update(ctx, func(tx kv.Tx) error { - return s.deleteNotificationRule(ctx, tx, r) - }) -} - -func (s *RuleService) deleteNotificationRule(ctx context.Context, tx kv.Tx, r influxdb.NotificationRule) error { - encodedID, err := r.GetID().Encode() - if err != nil { - return ErrInvalidNotificationRuleID - } - - bucket, err := s.notificationRuleBucket(tx) - if err != nil { - return err - } - - _, err = bucket.Get(encodedID) - if kv.IsNotFound(err) { - return ErrNotificationRuleNotFound - } - if err != nil { - return InternalNotificationRuleStoreError(err) - } - - if err := bucket.Delete(encodedID); err != nil { - return InternalNotificationRuleStoreError(err) - } - - return nil -} diff --git a/notification/rule/service/service_external_test.go b/notification/rule/service/service_external_test.go deleted file mode 100644 index df090f6eea0..00000000000 --- a/notification/rule/service/service_external_test.go +++ /dev/null @@ -1,2262 +0,0 @@ -package service - -import ( - "bytes" - "context" - "reflect" - "sort" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/flux/ast" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/notification" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/influxdata/influxdb/v2/notification/rule" - "github.com/influxdata/influxdb/v2/pkg/pointer" - "github.com/influxdata/influxdb/v2/task/taskmodel" - itesting "github.com/influxdata/influxdb/v2/testing" -) - -const ( - oneID = "020f755c3c082000" - twoID = "020f755c3c082001" - threeID = "020f755c3c082002" - fourID = "020f755c3c082003" - fiveID = "020f755c3c082004" - sixID = "020f755c3c082005" -) - -var ( - fakeDate = time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC) - fakeGenerator = mock.TimeGenerator{FakeValue: fakeDate} - timeGen1 = mock.TimeGenerator{FakeValue: time.Date(2006, time.July, 13, 4, 19, 10, 0, time.UTC)} - timeGen2 = mock.TimeGenerator{FakeValue: time.Date(2006, time.July, 14, 5, 23, 53, 10, time.UTC)} - time3 = time.Date(2006, time.July, 15, 5, 23, 53, 10, time.UTC) -) - -// NotificationRuleFields includes prepopulated data for mapping tests. -type NotificationRuleFields struct { - IDGenerator platform.IDGenerator - TimeGenerator influxdb.TimeGenerator - NotificationRules []influxdb.NotificationRule - Orgs []*influxdb.Organization - Tasks []taskmodel.TaskCreate - Endpoints []influxdb.NotificationEndpoint -} - -var notificationRuleCmpOptions = cmp.Options{ - cmpopts.IgnoreFields(rule.Base{}, "TaskID"), - cmp.Transformer("Sort", func(in []influxdb.NotificationRule) []influxdb.NotificationRule { - out := append([]influxdb.NotificationRule(nil), in...) - sort.Slice(out, func(i, j int) bool { - return out[i].GetID() > out[j].GetID() - }) - return out - }), -} - -var taskCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - // skip comparing permissions - cmpopts.IgnoreFields( - taskmodel.Task{}, - "LatestCompleted", - "LatestScheduled", - "CreatedAt", - "UpdatedAt", - ), - cmp.Transformer("Sort", func(in []*taskmodel.Task) []*taskmodel.Task { - out := append([]*taskmodel.Task{}, in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID > out[j].ID - }) - return out - }), -} - -type notificationRuleFactory func(NotificationRuleFields, *testing.T) (influxdb.NotificationRuleStore, taskmodel.TaskService, func()) - -// NotificationRuleStore tests all the service functions. -func NotificationRuleStore( - init notificationRuleFactory, t *testing.T, -) { - tests := []struct { - name string - fn func(notificationRuleFactory, *testing.T) - }{ - { - name: "CreateNotificationRule", - fn: CreateNotificationRule, - }, - { - name: "FindNotificationRuleByID", - fn: FindNotificationRuleByID, - }, - { - name: "FindNotificationRules", - fn: FindNotificationRules, - }, - { - name: "UpdateNotificationRule", - fn: UpdateNotificationRule, - }, - { - name: "PatchNotificationRule", - fn: PatchNotificationRule, - }, - { - name: "DeleteNotificationRule", - fn: DeleteNotificationRule, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.fn(init, t) - }) - } -} - -// CreateNotificationRule testing. -func CreateNotificationRule( - init notificationRuleFactory, - t *testing.T, -) { - type args struct { - notificationRule influxdb.NotificationRule - userID platform.ID - } - type wants struct { - err error - notificationRule influxdb.NotificationRule - task *taskmodel.Task - } - - tests := []struct { - name string - fields NotificationRuleFields - args args - wants wants - }{ - { - name: "basic create notification rule", - fields: NotificationRuleFields{ - IDGenerator: mock.NewIDGenerator(twoID, t), - TimeGenerator: fakeGenerator, - Orgs: []*influxdb.Organization{ - { - Name: "org", - ID: MustIDBase16(fourID), - }, - }, - Endpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - URL: "http://localhost:7777", - Token: influxdb.SecretField{ - // TODO(desa): not sure why this has to end in token, but it does - Key: "020f755c3c082001-token", - Value: pointer.String("abc123"), - }, - Base: endpoint.Base{ - OrgID: MustIDBase16Ptr(fourID), - Name: "foo", - Status: influxdb.Active, - }, - }, - }, - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - Name: "name1", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - }, - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "k1", - Value: "v1", - }, - Operator: influxdb.NotEqual, - }, - { - Tag: influxdb.Tag{ - Key: "k2", - Value: "v2", - }, - Operator: influxdb.RegexEqual, - }, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Channel: "channel1", - MessageTemplate: "msg1", - }, - }, - }, - args: args{ - userID: MustIDBase16(sixID), - notificationRule: &rule.Slack{ - Base: rule.Base{ - OwnerID: MustIDBase16(sixID), - Name: "name2", - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - }, - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "k1", - Value: "v1", - }, - Operator: influxdb.NotEqual, - }, - { - Tag: influxdb.Tag{ - Key: "k2", - Value: "v2", - }, - Operator: influxdb.RegexEqual, - }, - }, - }, - MessageTemplate: "msg1", - }, - }, - wants: wants{ - notificationRule: &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: "name2", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - }, - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "k1", - Value: "v1", - }, - Operator: influxdb.NotEqual, - }, - { - Tag: influxdb.Tag{ - Key: "k2", - Value: "v2", - }, - Operator: influxdb.RegexEqual, - }, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - MessageTemplate: "msg1", - }, - task: &taskmodel.Task{ - ID: MustIDBase16("020f755c3c082001"), - Type: "slack", - OrganizationID: MustIDBase16("020f755c3c082003"), - Organization: "org", - OwnerID: MustIDBase16("020f755c3c082005"), - Name: "name2", - Status: "active", - Flux: itesting.FormatFluxString(t, `import "influxdata/influxdb/monitor" -import "slack" -import "influxdata/influxdb/secrets" -import "experimental" - -option task = {name: "name2", every: 1h} - -slack_secret = secrets["get"](key: "020f755c3c082001-token") -slack_endpoint = slack["endpoint"](token: slack_secret, url: "http://localhost:7777") -notification = { - _notification_rule_id: "020f755c3c082001", - _notification_rule_name: "name2", - _notification_endpoint_id: "020f755c3c082001", - _notification_endpoint_name: "foo", -} -statuses = monitor["from"](start: -2h, fn: (r) => r["k1"] == "v1" and r["k2"] == "v2") -crit = statuses |> filter(fn: (r) => r["_level"] == "crit") -all_statuses = crit |> filter(fn: (r) => r["_time"] >= experimental["subDuration"](from: now(), d: 1h)) - -all_statuses - |> monitor["notify"]( - data: notification, - endpoint: - slack_endpoint( - mapFn: (r) => - ({ - channel: "", - text: "msg1", - color: - if r["_level"] == "crit" then - "danger" - else if r["_level"] == "warn" then - "warning" - else - "good", - }), - ), - ) -`), - Every: "1h", - }, - }, - }, - { - name: "invalid tag rule value", - fields: NotificationRuleFields{ - IDGenerator: mock.NewIDGenerator(twoID, t), - TimeGenerator: fakeGenerator, - Orgs: []*influxdb.Organization{ - { - Name: "org", - ID: MustIDBase16(fourID), - }, - }, - Endpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - URL: "http://localhost:7777", - Token: influxdb.SecretField{ - // TODO(desa): not sure why this has to end in token, but it does - Key: "020f755c3c082001-token", - Value: pointer.String("abc123"), - }, - Base: endpoint.Base{ - OrgID: MustIDBase16Ptr(fourID), - Name: "foo", - Status: influxdb.Active, - }, - }, - }, - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - Name: "name1", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - }, - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "k1", - Value: "v1", - }, - Operator: influxdb.NotEqual, - }, - { - Tag: influxdb.Tag{ - Key: "k2", - Value: "v2", - }, - Operator: influxdb.RegexEqual, - }, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Channel: "channel1", - MessageTemplate: "msg1", - }, - }, - }, - args: args{ - userID: MustIDBase16(sixID), - notificationRule: &rule.Slack{ - Base: rule.Base{ - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - Name: "name2", - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - }, - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "k1", - Value: "v1", - }, - Operator: influxdb.NotEqual, - }, - { - Tag: influxdb.Tag{ - Key: "k2", - // empty tag value to trigger validation error - Value: "", - }, - Operator: influxdb.RegexEqual, - }, - }, - }, - MessageTemplate: "msg1", - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "tag must contain a key and a value", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, tasks, done := init(tt.fields, t) - defer done() - ctx := context.Background() - nrc := influxdb.NotificationRuleCreate{ - NotificationRule: tt.args.notificationRule, - Status: influxdb.Active, - } - err := s.CreateNotificationRule(ctx, nrc, tt.args.userID) - if tt.wants.err != nil { - // expected error case - if !reflect.DeepEqual(tt.wants.err, err) { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - - // ensure no rules can be located - _, n, err := s.FindNotificationRules(ctx, influxdb.NotificationRuleFilter{}) - if err != nil { - t.Fatal(err) - } - - if existing := len(tt.fields.NotificationRules); n > existing { - t.Errorf("expected no rules to be created, found %d", n-existing) - } - } else { - nr, err := s.FindNotificationRuleByID(ctx, tt.args.notificationRule.GetID()) - if err != nil { - t.Errorf("failed to retrieve notification rules: %v", err) - } - - if diff := cmp.Diff(nr, tt.wants.notificationRule, notificationRuleCmpOptions...); diff != "" { - t.Errorf("notificationRules are different -got/+want\ndiff %s", diff) - } - } - - if tt.wants.task == nil || !tt.wants.task.ID.Valid() { - // if not tasks or a task with an invalid ID is provided (0) then assume - // no tasks should be persisted - _, n, err := tasks.FindTasks(ctx, taskmodel.TaskFilter{}) - if err != nil { - t.Fatal(err) - } - - if n > 0 { - t.Errorf("expected zero tasks to be created, instead found %d", n) - } - - return - } - - task, err := tasks.FindTaskByID(ctx, tt.wants.task.ID) - if err != nil { - t.Fatal(err) - } - - if diff := cmp.Diff(task, tt.wants.task, taskCmpOptions...); diff != "" { - t.Errorf("task is different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindNotificationRuleByID testing. -func FindNotificationRuleByID( - init notificationRuleFactory, - t *testing.T, -) { - type args struct { - id platform.ID - } - type wants struct { - err error - notificationRule influxdb.NotificationRule - } - - tests := []struct { - name string - fields NotificationRuleFields - args args - wants wants - }{ - { - name: "bad id", - fields: NotificationRuleFields{ - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - Name: "name1", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Channel: "channel1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: "name2", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink2", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - MessageTemplate: "msg", - }, - }, - }, - args: args{ - id: platform.ID(0), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "provided notification rule ID has invalid format", - }, - }, - }, - { - name: "not found", - fields: NotificationRuleFields{ - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - Name: "name1", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Channel: "channel1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: "name2", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink2", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - MessageTemplate: "msg", - }, - }, - }, - args: args{ - id: MustIDBase16(threeID), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "notification rule not found", - }, - }, - }, - { - name: "basic find telegraf config by id", - fields: NotificationRuleFields{ - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - Name: "name1", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Channel: "channel1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: "name2", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink2", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - MessageTemplate: "msg", - }, - }, - }, - args: args{ - id: MustIDBase16(twoID), - }, - wants: wants{ - notificationRule: &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: "name2", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink2", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - MessageTemplate: "msg", - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - nr, err := s.FindNotificationRuleByID(ctx, tt.args.id) - ErrorsEqual(t, err, tt.wants.err) - if diff := cmp.Diff(nr, tt.wants.notificationRule, notificationRuleCmpOptions...); diff != "" { - t.Errorf("notification rule is different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindNotificationRules testing -func FindNotificationRules( - init notificationRuleFactory, - t *testing.T, -) { - type args struct { - filter influxdb.NotificationRuleFilter - opts influxdb.FindOptions - } - - type wants struct { - notificationRules []influxdb.NotificationRule - err error - } - tests := []struct { - name string - fields NotificationRuleFields - args args - wants wants - }{ - { - name: "find nothing (empty set)", - fields: NotificationRuleFields{ - NotificationRules: []influxdb.NotificationRule{}, - }, - args: args{ - filter: influxdb.NotificationRuleFilter{}, - }, - wants: wants{ - notificationRules: []influxdb.NotificationRule{}, - }, - }, - { - name: "find all notification rules", - fields: NotificationRuleFields{ - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - Name: "name1", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Channel: "channel1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: "name2", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink2", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - MessageTemplate: "msg", - }, - }, - }, - args: args{ - filter: influxdb.NotificationRuleFilter{}, - }, - wants: wants{ - notificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - Name: "name1", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Channel: "channel1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: "name2", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink2", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - MessageTemplate: "msg", - }, - }, - }, - }, - { - name: "filter by organization id only", - fields: NotificationRuleFields{ - Orgs: []*influxdb.Organization{ - { - ID: MustIDBase16(oneID), - Name: "org1", - }, - { - ID: MustIDBase16(fourID), - Name: "org4", - }, - }, - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - OrgID: MustIDBase16(fourID), - EndpointID: 1, - OwnerID: MustIDBase16(sixID), - Name: "nr1", - }, - Channel: "ch1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - OrgID: MustIDBase16(fourID), - EndpointID: 1, - OwnerID: MustIDBase16(sixID), - Name: "nr2", - }, - MessageTemplate: "body2", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(fourID), - OrgID: MustIDBase16(oneID), - EndpointID: 1, - OwnerID: MustIDBase16(sixID), - Name: "nr3", - }, - MessageTemplate: "msg", - }, - }, - }, - args: args{ - filter: influxdb.NotificationRuleFilter{ - OrgID: MustIDBase16Ptr(oneID), - }, - }, - wants: wants{ - notificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(fourID), - OrgID: MustIDBase16(oneID), - OwnerID: MustIDBase16(sixID), - EndpointID: 1, - Name: "nr3", - }, - MessageTemplate: "msg", - }, - }, - }, - }, - { - name: "filter by organization name only", - fields: NotificationRuleFields{ - Orgs: []*influxdb.Organization{ - { - ID: MustIDBase16(oneID), - Name: "org1", - }, - { - ID: MustIDBase16(fourID), - Name: "org4", - }, - }, - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - OrgID: MustIDBase16(fourID), - OwnerID: MustIDBase16(sixID), - EndpointID: 1, - Name: "nr1", - }, - Channel: "ch1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - OrgID: MustIDBase16(fourID), - OwnerID: MustIDBase16(sixID), - EndpointID: 1, - Name: "nr2", - }, - MessageTemplate: "body2", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(fourID), - OrgID: MustIDBase16(oneID), - OwnerID: MustIDBase16(sixID), - EndpointID: 1, - Name: "nr3", - }, - MessageTemplate: "msg", - }, - }, - }, - args: args{ - filter: influxdb.NotificationRuleFilter{ - Organization: pointer.String("org4"), - }, - }, - wants: wants{ - notificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - OrgID: MustIDBase16(fourID), - EndpointID: 1, - OwnerID: MustIDBase16(sixID), - Name: "nr1", - }, - Channel: "ch1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - OrgID: MustIDBase16(fourID), - EndpointID: 1, - OwnerID: MustIDBase16(sixID), - Name: "nr2", - }, - MessageTemplate: "body2", - }, - }, - }, - }, - { - name: "look for organization not bound to any notification rule", - fields: NotificationRuleFields{ - Orgs: []*influxdb.Organization{ - { - ID: MustIDBase16(oneID), - Name: "org1", - }, - { - ID: MustIDBase16(fourID), - Name: "org4", - }, - }, - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - OrgID: MustIDBase16(fourID), - EndpointID: 1, - OwnerID: MustIDBase16(sixID), - Name: "nr1", - }, - Channel: "ch1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - OrgID: MustIDBase16(fourID), - OwnerID: MustIDBase16(sixID), - EndpointID: 1, - Name: "nr2", - }, - MessageTemplate: "body2", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(fourID), - OrgID: MustIDBase16(fourID), - OwnerID: MustIDBase16(sixID), - EndpointID: 1, - Name: "nr3", - }, - MessageTemplate: "msg", - }, - }, - }, - args: args{ - filter: influxdb.NotificationRuleFilter{ - OrgID: MustIDBase16Ptr(oneID), - }, - }, - wants: wants{ - notificationRules: []influxdb.NotificationRule{}, - }, - }, - { - name: "find options limit", - fields: NotificationRuleFields{ - Orgs: []*influxdb.Organization{ - { - ID: MustIDBase16(oneID), - Name: "org1", - }, - { - ID: MustIDBase16(fourID), - Name: "org4", - }, - }, - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - OrgID: MustIDBase16(oneID), - EndpointID: 1, - OwnerID: MustIDBase16(sixID), - Name: "nr1", - }, - Channel: "ch1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - OrgID: MustIDBase16(oneID), - EndpointID: 1, - OwnerID: MustIDBase16(sixID), - Name: "nr2", - }, - MessageTemplate: "body2", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(fourID), - OrgID: MustIDBase16(oneID), - EndpointID: 1, - OwnerID: MustIDBase16(sixID), - Name: "nr3", - }, - MessageTemplate: "msg", - }, - }, - }, - args: args{ - filter: influxdb.NotificationRuleFilter{ - OrgID: idPtr(MustIDBase16(oneID)), - }, - opts: influxdb.FindOptions{ - Limit: 2, - }, - }, - wants: wants{ - notificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - OrgID: MustIDBase16(oneID), - EndpointID: 1, - OwnerID: MustIDBase16(sixID), - Name: "nr1", - }, - Channel: "ch1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - OrgID: MustIDBase16(oneID), - EndpointID: 1, - OwnerID: MustIDBase16(sixID), - Name: "nr2", - }, - MessageTemplate: "body2", - }, - }, - }, - }, - { - name: "find options offset", - fields: NotificationRuleFields{ - Orgs: []*influxdb.Organization{ - { - ID: MustIDBase16(oneID), - Name: "org1", - }, - { - ID: MustIDBase16(fourID), - Name: "org4", - }, - }, - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - OrgID: MustIDBase16(oneID), - EndpointID: 1, - OwnerID: MustIDBase16(sixID), - Name: "nr1", - }, - Channel: "ch1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - OrgID: MustIDBase16(oneID), - EndpointID: 1, - OwnerID: MustIDBase16(sixID), - Name: "nr2", - }, - MessageTemplate: "body2", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(fourID), - OrgID: MustIDBase16(oneID), - EndpointID: 1, - OwnerID: MustIDBase16(sixID), - Name: "nr3", - }, - MessageTemplate: "msg", - }, - }, - }, - args: args{ - filter: influxdb.NotificationRuleFilter{ - OrgID: idPtr(MustIDBase16(oneID)), - }, - opts: influxdb.FindOptions{ - Offset: 1, - }, - }, - wants: wants{ - notificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - OrgID: MustIDBase16(oneID), - EndpointID: 1, - OwnerID: MustIDBase16(sixID), - Name: "nr2", - }, - MessageTemplate: "body2", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(fourID), - OrgID: MustIDBase16(oneID), - EndpointID: 1, - OwnerID: MustIDBase16(sixID), - Name: "nr3", - }, - MessageTemplate: "msg", - }, - }, - }, - }, - { - name: "find nothing", - fields: NotificationRuleFields{ - Orgs: []*influxdb.Organization{ - { - ID: MustIDBase16(oneID), - Name: "org1", - }, - { - ID: MustIDBase16(fourID), - Name: "org4", - }, - }, - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - OrgID: MustIDBase16(fourID), - OwnerID: MustIDBase16(sixID), - EndpointID: 1, - Name: "nr1", - }, - Channel: "ch1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - OrgID: MustIDBase16(fourID), - OwnerID: MustIDBase16(sixID), - EndpointID: 1, - Name: "nr2", - }, - MessageTemplate: "body2", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(fourID), - OrgID: MustIDBase16(fourID), - OwnerID: MustIDBase16(sixID), - EndpointID: 1, - Name: "nr3", - }, - MessageTemplate: "msg", - }, - }, - }, - args: args{ - filter: influxdb.NotificationRuleFilter{ - OrgID: MustIDBase16Ptr(threeID), - }, - }, - wants: wants{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - nrs, n, err := s.FindNotificationRules(ctx, tt.args.filter, tt.args.opts) - ErrorsEqual(t, err, tt.wants.err) - if n != len(tt.wants.notificationRules) { - t.Fatalf("notification rules length is different got %d, want %d", n, len(tt.wants.notificationRules)) - } - - if diff := cmp.Diff(nrs, tt.wants.notificationRules, notificationRuleCmpOptions...); diff != "" { - t.Errorf("notification rules are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// UpdateNotificationRule testing. -func UpdateNotificationRule( - init notificationRuleFactory, - t *testing.T, -) { - type args struct { - userID platform.ID - id platform.ID - notificationRule influxdb.NotificationRule - } - - type wants struct { - notificationRule influxdb.NotificationRule - err error - } - tests := []struct { - name string - fields NotificationRuleFields - args args - wants wants - }{ - { - name: "can't find the id", - fields: NotificationRuleFields{ - TimeGenerator: fakeGenerator, - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - Name: "name1", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Channel: "channel1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: "name2", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink2", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - MessageTemplate: "msg", - }, - }, - }, - args: args{ - userID: MustIDBase16(sixID), - id: MustIDBase16(fourID), - notificationRule: &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: "name2", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink3", - SleepUntil: &time3, - Every: mustDuration("2h"), - }, - MessageTemplate: "msg2", - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "notification rule not found", - }, - }, - }, - { - name: "regular update", - fields: NotificationRuleFields{ - TimeGenerator: fakeGenerator, - IDGenerator: mock.NewIDGenerator(twoID, t), - Tasks: []taskmodel.TaskCreate{ - { - OwnerID: MustIDBase16(sixID), - OrganizationID: MustIDBase16(fourID), - Flux: `from(bucket: "foo") |> range(start: -1m) - option task = {name: "bar", every: 1m} - `, - }, - }, - Endpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - URL: "http://localhost:7777", - Token: influxdb.SecretField{ - // TODO(desa): not sure why this has to end in token, but it does - Key: "020f755c3c082001-token", - Value: pointer.String("abc123"), - }, - Base: endpoint.Base{ - OrgID: MustIDBase16Ptr(fourID), - Name: "foo", - Status: influxdb.Active, - }, - }, - }, - Orgs: []*influxdb.Organization{ - { - ID: MustIDBase16(fourID), - Name: "foo", - }, - }, - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - Name: "name1", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Info, - }, - }, - EndpointID: MustIDBase16(twoID), - TaskID: MustIDBase16(twoID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Channel: "channel1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: "name2", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Info, - }, - }, - TaskID: MustIDBase16(twoID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink2", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - MessageTemplate: "msg", - }, - }, - }, - args: args{ - userID: MustIDBase16(sixID), - id: MustIDBase16(twoID), - notificationRule: &rule.Slack{ - Base: rule.Base{ - OwnerID: MustIDBase16(sixID), - Name: "name3", - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Info, - }, - }, - RunbookLink: "runbooklink3", - SleepUntil: &time3, - Every: mustDuration("2h"), - }, - MessageTemplate: "msg2", - }, - }, - wants: wants{ - notificationRule: &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: "name3", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - TaskID: MustIDBase16(twoID), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Info, - }, - }, - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink3", - SleepUntil: &time3, - Every: mustDuration("2h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: fakeDate, - }, - }, - MessageTemplate: "msg2", - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - nrc := influxdb.NotificationRuleCreate{ - NotificationRule: tt.args.notificationRule, - Status: influxdb.Active, - } - - tc, err := s.UpdateNotificationRule(ctx, tt.args.id, - nrc, tt.args.userID) - ErrorsEqual(t, err, tt.wants.err) - if diff := cmp.Diff(tc, tt.wants.notificationRule, notificationRuleCmpOptions...); tt.wants.err == nil && diff != "" { - t.Errorf("notificationRules are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// PatchNotificationRule testing. -func PatchNotificationRule( - init notificationRuleFactory, - t *testing.T, -) { - - name3 := "name2" - status3 := influxdb.Inactive - - type args struct { - //userID influxdb.ID - id platform.ID - upd influxdb.NotificationRuleUpdate - } - - type wants struct { - notificationRule influxdb.NotificationRule - err error - } - tests := []struct { - name string - fields NotificationRuleFields - args args - wants wants - }{ - { - name: "can't find the id", - fields: NotificationRuleFields{ - TimeGenerator: fakeGenerator, - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - Name: "name1", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Channel: "channel1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: "name2", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink2", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - MessageTemplate: "msg", - }, - }, - }, - args: args{ - id: MustIDBase16(fourID), - upd: influxdb.NotificationRuleUpdate{ - Name: &name3, - Status: &status3, - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "notification rule not found", - }, - }, - }, - { - name: "patch without status", - fields: NotificationRuleFields{ - TimeGenerator: fakeGenerator, - IDGenerator: mock.NewIDGenerator(twoID, t), - Tasks: []taskmodel.TaskCreate{ - { - OwnerID: MustIDBase16(sixID), - OrganizationID: MustIDBase16(fourID), - Flux: `from(bucket: "foo") |> range(start: -1m) - option task = {name: "bar", every: 1m} - `, - }, - }, - Endpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - URL: "http://localhost:7777", - Token: influxdb.SecretField{ - // TODO(desa): not sure why this has to end in token, but it does - Key: "020f755c3c082001-token", - Value: pointer.String("abc123"), - }, - Base: endpoint.Base{ - OrgID: MustIDBase16Ptr(fourID), - Name: "foo", - Status: influxdb.Active, - }, - }, - }, - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - Name: "name1", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - { - CurrentLevel: notification.Info, - }, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Channel: "channel1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: "name2", - OwnerID: MustIDBase16(sixID), - EndpointID: MustIDBase16(twoID), - TaskID: MustIDBase16(twoID), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - { - CurrentLevel: notification.Info, - }, - }, - OrgID: MustIDBase16(fourID), - RunbookLink: "runbooklink2", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - MessageTemplate: "msg", - }, - }, - Orgs: []*influxdb.Organization{ - { - ID: MustIDBase16(fourID), - Name: "foo", - }, - }, - }, - args: args{ - id: MustIDBase16(twoID), - upd: influxdb.NotificationRuleUpdate{ - Name: &name3, - }, - }, - wants: wants{ - notificationRule: &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: name3, - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - TaskID: MustIDBase16(twoID), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - { - CurrentLevel: notification.Info, - }, - }, - RunbookLink: "runbooklink2", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: fakeDate, - }, - }, - MessageTemplate: "msg", - }, - }, - }, - { - name: "regular patch", - fields: NotificationRuleFields{ - TimeGenerator: fakeGenerator, - IDGenerator: mock.NewIDGenerator(twoID, t), - Tasks: []taskmodel.TaskCreate{ - { - OwnerID: MustIDBase16(sixID), - OrganizationID: MustIDBase16(fourID), - Flux: `from(bucket: "foo") |> range(start: -1m) - option task = {name: "bar", every: 1m} - `, - }, - }, - Endpoints: []influxdb.NotificationEndpoint{ - &endpoint.Slack{ - URL: "http://localhost:7777", - Token: influxdb.SecretField{ - // TODO(desa): not sure why this has to end in token, but it does - Key: "020f755c3c082001-token", - Value: pointer.String("abc123"), - }, - Base: endpoint.Base{ - OrgID: MustIDBase16Ptr(fourID), - Name: "foo", - Status: influxdb.Active, - }, - }, - }, - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - Name: "name1", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - { - CurrentLevel: notification.Info, - }, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Channel: "channel1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: "name2", - OwnerID: MustIDBase16(sixID), - EndpointID: MustIDBase16(twoID), - TaskID: MustIDBase16(twoID), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - { - CurrentLevel: notification.Info, - }, - }, - OrgID: MustIDBase16(fourID), - RunbookLink: "runbooklink2", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - MessageTemplate: "msg", - }, - }, - Orgs: []*influxdb.Organization{ - { - ID: MustIDBase16(fourID), - Name: "foo", - }, - }, - }, - args: args{ - id: MustIDBase16(twoID), - upd: influxdb.NotificationRuleUpdate{ - Name: &name3, - Status: &status3, - }, - }, - wants: wants{ - notificationRule: &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: name3, - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - TaskID: MustIDBase16(twoID), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - { - CurrentLevel: notification.Info, - }, - }, - RunbookLink: "runbooklink2", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: fakeDate, - }, - }, - MessageTemplate: "msg", - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - tc, err := s.PatchNotificationRule(ctx, tt.args.id, tt.args.upd) - ErrorsEqual(t, err, tt.wants.err) - if diff := cmp.Diff(tc, tt.wants.notificationRule, notificationRuleCmpOptions...); tt.wants.err == nil && diff != "" { - t.Errorf("notificationRules are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// DeleteNotificationRule testing. -func DeleteNotificationRule( - init notificationRuleFactory, - t *testing.T, -) { - type args struct { - id platform.ID - orgID platform.ID - } - - type wants struct { - notificationRules []influxdb.NotificationRule - err error - } - tests := []struct { - name string - fields NotificationRuleFields - args args - wants wants - }{ - { - name: "bad id", - fields: NotificationRuleFields{ - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - Name: "name1", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Channel: "channel1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: "name2", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink2", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - MessageTemplate: "msg", - }, - }, - }, - args: args{ - id: platform.ID(0), - orgID: MustIDBase16(fourID), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "provided notification rule ID has invalid format", - }, - notificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - Name: "name1", - OwnerID: MustIDBase16(sixID), - EndpointID: MustIDBase16(twoID), - OrgID: MustIDBase16(fourID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Channel: "channel1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: "name2", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink2", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - MessageTemplate: "msg", - }, - }, - }, - }, - { - name: "none existing config", - fields: NotificationRuleFields{ - IDGenerator: mock.NewIDGenerator(twoID, t), - Tasks: []taskmodel.TaskCreate{ - { - OwnerID: MustIDBase16(sixID), - OrganizationID: MustIDBase16(fourID), - Flux: `from(bucket: "foo") |> range(start: -1m) - option task = {name: "bar", every: 1m} - `, - }, - }, - Orgs: []*influxdb.Organization{ - { - ID: MustIDBase16(fourID), - Name: "foo", - }, - }, - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - Name: "name1", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Channel: "channel1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: "name2", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - TaskID: MustIDBase16(twoID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink2", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - MessageTemplate: "msg", - }, - }, - }, - args: args{ - id: MustIDBase16(fourID), - orgID: MustIDBase16(fourID), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "notification rule not found", - }, - notificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - Name: "name1", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Channel: "channel1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: "name2", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink2", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - MessageTemplate: "msg", - }, - }, - }, - }, - { - name: "regular delete", - fields: NotificationRuleFields{ - Tasks: []taskmodel.TaskCreate{ - { - OwnerID: MustIDBase16(sixID), - OrganizationID: MustIDBase16(fourID), - Flux: `from(bucket: "foo") |> range(start: -1m) - option task = {name: "bar", every: 1m} - `, - }, - }, - IDGenerator: mock.NewIDGenerator(twoID, t), - Orgs: []*influxdb.Organization{ - { - ID: MustIDBase16(fourID), - Name: "foo", - }, - }, - NotificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - Name: "name1", - OwnerID: MustIDBase16(sixID), - EndpointID: MustIDBase16(twoID), - TaskID: MustIDBase16(twoID), - OrgID: MustIDBase16(fourID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Channel: "channel1", - MessageTemplate: "msg1", - }, - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(twoID), - Name: "name2", - OwnerID: MustIDBase16(sixID), - TaskID: MustIDBase16(twoID), - OrgID: MustIDBase16(fourID), - RunbookLink: "runbooklink2", - EndpointID: MustIDBase16(twoID), - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - MessageTemplate: "msg", - }, - }, - }, - args: args{ - id: MustIDBase16(twoID), - orgID: MustIDBase16(fourID), - }, - wants: wants{ - notificationRules: []influxdb.NotificationRule{ - &rule.Slack{ - Base: rule.Base{ - ID: MustIDBase16(oneID), - Name: "name1", - OwnerID: MustIDBase16(sixID), - OrgID: MustIDBase16(fourID), - TaskID: MustIDBase16(twoID), - EndpointID: MustIDBase16(twoID), - RunbookLink: "runbooklink1", - SleepUntil: &time3, - Every: mustDuration("1h"), - CRUDLog: influxdb.CRUDLog{ - CreatedAt: timeGen1.Now(), - UpdatedAt: timeGen2.Now(), - }, - }, - Channel: "channel1", - MessageTemplate: "msg1", - }, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.DeleteNotificationRule(ctx, tt.args.id) - ErrorsEqual(t, err, tt.wants.err) - - filter := influxdb.NotificationRuleFilter{ - OrgID: &tt.args.orgID, - } - nrs, n, err := s.FindNotificationRules(ctx, filter) - if err != nil && tt.wants.err == nil { - t.Fatalf("expected errors to be nil got '%v'", err) - } - - if err != nil && tt.wants.err != nil { - if want, got := tt.wants.err.Error(), err.Error(); want != got { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - } - - if n != len(tt.wants.notificationRules) { - t.Fatalf("notification rules length is different got %d, want %d", n, len(tt.wants.notificationRules)) - } - if diff := cmp.Diff(nrs, tt.wants.notificationRules, notificationRuleCmpOptions...); diff != "" { - t.Errorf("notification rules are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// MustIDBase16 is an helper to ensure a correct ID is built during testing. -func MustIDBase16(s string) platform.ID { - id, err := platform.IDFromString(s) - if err != nil { - panic(err) - } - return *id -} - -// MustIDBase16Ptr is an helper to ensure a correct *ID is built during testing. -func MustIDBase16Ptr(s string) *platform.ID { - id := MustIDBase16(s) - return &id -} - -// ErrorsEqual checks to see if the provided errors are equivalent. -func ErrorsEqual(t *testing.T, actual, expected error) { - t.Helper() - if expected == nil && actual == nil { - return - } - - if expected == nil && actual != nil { - t.Errorf("unexpected error %s", actual.Error()) - } - - if expected != nil && actual == nil { - t.Errorf("expected error %s but received nil", expected.Error()) - } - - if errors.ErrorCode(expected) != errors.ErrorCode(actual) { - t.Logf("\nexpected: %v\nactual: %v\n\n", expected, actual) - t.Errorf("expected error code %q but received %q", errors.ErrorCode(expected), errors.ErrorCode(actual)) - } - - if errors.ErrorMessage(expected) != errors.ErrorMessage(actual) { - t.Logf("\nexpected: %v\nactual: %v\n\n", expected, actual) - t.Errorf("expected error message %q but received %q", errors.ErrorMessage(expected), errors.ErrorMessage(actual)) - } -} - -func idPtr(id platform.ID) *platform.ID { - return &id -} - -func mustDuration(d string) *notification.Duration { - dur, err := time.ParseDuration(d) - if err != nil { - panic(err) - } - - ndur, err := notification.FromTimeDuration(dur) - if err != nil { - panic(err) - } - - // Filter out the zero values from the duration. - durs := make([]ast.Duration, 0, len(ndur.Values)) - for _, d := range ndur.Values { - if d.Magnitude != 0 { - durs = append(durs, d) - } - } - ndur.Values = durs - return &ndur -} diff --git a/notification/rule/service/service_test.go b/notification/rule/service/service_test.go deleted file mode 100644 index 7009e35824c..00000000000 --- a/notification/rule/service/service_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package service - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - _ "github.com/influxdata/influxdb/v2/fluxinit/static" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/mock" - endpointservice "github.com/influxdata/influxdb/v2/notification/endpoint/service" - "github.com/influxdata/influxdb/v2/query/fluxlang" - "github.com/influxdata/influxdb/v2/secret" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "github.com/influxdata/influxdb/v2/tenant" - itesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func TestInmemNotificationRuleStore(t *testing.T) { - NotificationRuleStore(initInmemNotificationRuleStore, t) -} - -func initInmemNotificationRuleStore(f NotificationRuleFields, t *testing.T) (influxdb.NotificationRuleStore, taskmodel.TaskService, func()) { - store := itesting.NewTestInmemStore(t) - return initNotificationRuleStore(store, f, t) -} - -func initBoltNotificationRuleStore(f NotificationRuleFields, t *testing.T) (influxdb.NotificationRuleStore, taskmodel.TaskService, func()) { - store, closeBolt := itesting.NewTestBoltStore(t) - svc, tsvc, closeSvc := initNotificationRuleStore(store, f, t) - return svc, tsvc, func() { - closeSvc() - closeBolt() - } -} - -func TestBoltNotificationRuleStore(t *testing.T) { - NotificationRuleStore(initBoltNotificationRuleStore, t) -} - -func initNotificationRuleStore(s kv.Store, f NotificationRuleFields, t *testing.T) (influxdb.NotificationRuleStore, taskmodel.TaskService, func()) { - logger := zaptest.NewLogger(t) - - var ( - tenantStore = tenant.NewStore(s) - tenantSvc = tenant.NewService(tenantStore) - ) - - kvsvc := kv.NewService(logger, s, tenantSvc, kv.ServiceConfig{ - FluxLanguageService: fluxlang.DefaultService, - }) - kvsvc.IDGenerator = f.IDGenerator - kvsvc.TimeGenerator = f.TimeGenerator - if f.TimeGenerator == nil { - kvsvc.TimeGenerator = influxdb.RealTimeGenerator{} - } - - secretStore, err := secret.NewStore(s) - require.NoError(t, err) - secretSvc := secret.NewService(secretStore) - - endpStore := endpointservice.NewStore(s) - endpStore.IDGenerator = f.IDGenerator - endpStore.TimeGenerator = f.TimeGenerator - endp := endpointservice.New(endpStore, secretSvc) - - svc, err := New(logger, s, kvsvc, tenantSvc, endp) - if err != nil { - t.Fatal(err) - } - - svc.idGenerator = f.IDGenerator - if f.TimeGenerator != nil { - svc.timeGenerator = f.TimeGenerator - } - - ctx := context.Background() - for _, o := range f.Orgs { - withOrgID(tenantStore, o.ID, func() { - if err := tenantSvc.CreateOrganization(ctx, o); err != nil { - t.Fatalf("failed to populate org: %v", err) - } - }) - } - - for _, e := range f.Endpoints { - if err := endp.CreateNotificationEndpoint(ctx, e, 1); err != nil { - t.Fatalf("failed to populate notification endpoint: %v", err) - } - } - - for _, nr := range f.NotificationRules { - nrc := influxdb.NotificationRuleCreate{ - NotificationRule: nr, - Status: influxdb.Active, - } - if err := svc.PutNotificationRule(ctx, nrc); err != nil { - t.Fatalf("failed to populate notification rule: %v", err) - } - } - - for _, c := range f.Tasks { - if _, err := kvsvc.CreateTask(ctx, c); err != nil { - t.Fatalf("failed to populate task: %v", err) - } - } - - return svc, kvsvc, func() { - for _, nr := range f.NotificationRules { - if err := svc.DeleteNotificationRule(ctx, nr.GetID()); err != nil { - t.Logf("failed to remove notification rule: %v", err) - } - } - for _, o := range f.Orgs { - if err := tenantSvc.DeleteOrganization(ctx, o.ID); err != nil { - t.Fatalf("failed to remove org: %v", err) - } - } - } -} - -func withOrgID(store *tenant.Store, orgID platform.ID, fn func()) { - backup := store.OrgIDGen - defer func() { store.OrgIDGen = backup }() - - store.OrgIDGen = mock.NewStaticIDGenerator(orgID) - - fn() -} diff --git a/notification/rule/slack.go b/notification/rule/slack.go deleted file mode 100644 index 137c6c156b4..00000000000 --- a/notification/rule/slack.go +++ /dev/null @@ -1,136 +0,0 @@ -package rule - -import ( - "encoding/json" - "fmt" - - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/ast/astutil" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/influxdata/influxdb/v2/notification/flux" -) - -// Slack is the notification rule config of slack. -type Slack struct { - Base - Channel string `json:"channel"` - MessageTemplate string `json:"messageTemplate"` -} - -// GenerateFlux generates a flux script for the slack notification rule. -func (s *Slack) GenerateFlux(e influxdb.NotificationEndpoint) (string, error) { - slackEndpoint, ok := e.(*endpoint.Slack) - if !ok { - return "", fmt.Errorf("endpoint provided is a %s, not an Slack endpoint", e.Type()) - } - return astutil.Format(s.GenerateFluxAST(slackEndpoint)) -} - -// GenerateFluxAST generates a flux AST for the slack notification rule. -func (s *Slack) GenerateFluxAST(e *endpoint.Slack) *ast.File { - return flux.File( - s.Name, - flux.Imports("influxdata/influxdb/monitor", "slack", "influxdata/influxdb/secrets", "experimental"), - s.generateFluxASTBody(e), - ) -} - -func (s *Slack) generateFluxASTBody(e *endpoint.Slack) []ast.Statement { - var statements []ast.Statement - statements = append(statements, s.generateTaskOption()) - if e.Token.Key != "" { - statements = append(statements, s.generateFluxASTSecrets(e)) - } - statements = append(statements, s.generateFluxASTEndpoint(e)) - statements = append(statements, s.generateFluxASTNotificationDefinition(e)) - statements = append(statements, s.generateFluxASTStatuses()) - statements = append(statements, s.generateLevelChecks()...) - statements = append(statements, s.generateFluxASTNotifyPipe()) - - return statements -} - -func (s *Slack) generateFluxASTSecrets(e *endpoint.Slack) ast.Statement { - call := flux.Call(flux.Member("secrets", "get"), flux.Object(flux.Property("key", flux.String(e.Token.Key)))) - - return flux.DefineVariable("slack_secret", call) -} - -func (s *Slack) generateFluxASTEndpoint(e *endpoint.Slack) ast.Statement { - props := []*ast.Property{} - if e.Token.Key != "" { - props = append(props, flux.Property("token", flux.Identifier("slack_secret"))) - } - if e.URL != "" { - props = append(props, flux.Property("url", flux.String(e.URL))) - } - call := flux.Call(flux.Member("slack", "endpoint"), flux.Object(props...)) - - return flux.DefineVariable("slack_endpoint", call) -} - -func (s *Slack) generateFluxASTNotifyPipe() ast.Statement { - endpointProps := []*ast.Property{} - endpointProps = append(endpointProps, flux.Property("channel", flux.String(s.Channel))) - // TODO(desa): are these values correct? - endpointProps = append(endpointProps, flux.Property("text", flux.String(s.MessageTemplate))) - endpointProps = append(endpointProps, flux.Property("color", s.generateSlackColors())) - endpointFn := flux.Function(flux.FunctionParams("r"), flux.Object(endpointProps...)) - - props := []*ast.Property{} - props = append(props, flux.Property("data", flux.Identifier("notification"))) - props = append(props, flux.Property("endpoint", - flux.Call(flux.Identifier("slack_endpoint"), flux.Object(flux.Property("mapFn", endpointFn))))) - - call := flux.Call(flux.Member("monitor", "notify"), flux.Object(props...)) - - return flux.ExpressionStatement(flux.Pipe(flux.Identifier("all_statuses"), call)) -} - -func (s *Slack) generateSlackColors() ast.Expression { - level := flux.Member("r", "_level") - return flux.If( - flux.Equal(level, flux.String("crit")), - flux.String("danger"), - flux.If( - flux.Equal(level, flux.String("warn")), - flux.String("warning"), - flux.String("good"), - ), - ) -} - -type slackAlias Slack - -// MarshalJSON implement json.Marshaler interface. -func (s Slack) MarshalJSON() ([]byte, error) { - return json.Marshal( - struct { - slackAlias - Type string `json:"type"` - }{ - slackAlias: slackAlias(s), - Type: s.Type(), - }) -} - -// Valid returns where the config is valid. -func (s Slack) Valid() error { - if err := s.Base.valid(); err != nil { - return err - } - if s.MessageTemplate == "" { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "slack msg template is empty", - } - } - return nil -} - -// Type returns the type of the rule config. -func (s Slack) Type() string { - return "slack" -} diff --git a/notification/rule/slack_test.go b/notification/rule/slack_test.go deleted file mode 100644 index e4e0dd33725..00000000000 --- a/notification/rule/slack_test.go +++ /dev/null @@ -1,402 +0,0 @@ -package rule_test - -import ( - "testing" - - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/parser" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/notification" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/influxdata/influxdb/v2/notification/rule" - itesting "github.com/influxdata/influxdb/v2/testing" -) - -func mustDuration(d string) *notification.Duration { - dur, err := parser.ParseDuration(d) - if err != nil { - panic(err) - } - dur.BaseNode = ast.BaseNode{} - return (*notification.Duration)(dur) -} - -func statusRulePtr(r notification.CheckLevel) *notification.CheckLevel { - return &r -} - -func idPtr(i int) *platform.ID { - id := platform.ID(i) - return &id -} - -func TestSlack_GenerateFlux(t *testing.T) { - tests := []struct { - name string - want string - rule *rule.Slack - endpoint *endpoint.Slack - }{ - { - name: "with any status", - want: `import "influxdata/influxdb/monitor" -import "slack" -import "influxdata/influxdb/secrets" -import "experimental" - -option task = {name: "foo", every: 1h} - -slack_endpoint = slack["endpoint"](url: "http://localhost:7777") -notification = { - _notification_rule_id: "0000000000000001", - _notification_rule_name: "foo", - _notification_endpoint_id: "0000000000000002", - _notification_endpoint_name: "foo", -} -statuses = monitor["from"](start: -2h, fn: (r) => r["foo"] == "bar" and r["baz"] == "bang") -any = statuses |> filter(fn: (r) => true) -all_statuses = any |> filter(fn: (r) => r["_time"] >= experimental["subDuration"](from: now(), d: 1h)) - -all_statuses - |> monitor["notify"]( - data: notification, - endpoint: - slack_endpoint( - mapFn: (r) => - ({ - channel: "bar", - text: "blah", - color: - if r["_level"] == "crit" then - "danger" - else if r["_level"] == "warn" then - "warning" - else - "good", - }), - ), - ) -`, - rule: &rule.Slack{ - Channel: "bar", - MessageTemplate: "blah", - Base: rule.Base{ - ID: 1, - EndpointID: 2, - Name: "foo", - Every: mustDuration("1h"), - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "foo", - Value: "bar", - }, - Operator: influxdb.Equal, - }, - { - Tag: influxdb.Tag{ - Key: "baz", - Value: "bang", - }, - Operator: influxdb.Equal, - }, - }, - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Any, - }, - }, - }, - }, - endpoint: &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(2), - Name: "foo", - }, - URL: "http://localhost:7777", - }, - }, - { - name: "with url", - want: `import "influxdata/influxdb/monitor" -import "slack" -import "influxdata/influxdb/secrets" -import "experimental" - -option task = {name: "foo", every: 1h} - -slack_endpoint = slack["endpoint"](url: "http://localhost:7777") -notification = { - _notification_rule_id: "0000000000000001", - _notification_rule_name: "foo", - _notification_endpoint_id: "0000000000000002", - _notification_endpoint_name: "foo", -} -statuses = monitor["from"](start: -2h, fn: (r) => r["foo"] == "bar" and r["baz"] == "bang") -crit = statuses |> filter(fn: (r) => r["_level"] == "crit") -info_to_warn = statuses |> monitor["stateChanges"](fromLevel: "info", toLevel: "warn") -all_statuses = - union(tables: [crit, info_to_warn]) - |> sort(columns: ["_time"]) - |> filter(fn: (r) => r["_time"] >= experimental["subDuration"](from: now(), d: 1h)) - -all_statuses - |> monitor["notify"]( - data: notification, - endpoint: - slack_endpoint( - mapFn: (r) => - ({ - channel: "bar", - text: "blah", - color: - if r["_level"] == "crit" then - "danger" - else if r["_level"] == "warn" then - "warning" - else - "good", - }), - ), - ) -`, - rule: &rule.Slack{ - Channel: "bar", - MessageTemplate: "blah", - Base: rule.Base{ - ID: 1, - EndpointID: 2, - Name: "foo", - Every: mustDuration("1h"), - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "foo", - Value: "bar", - }, - Operator: influxdb.Equal, - }, - { - Tag: influxdb.Tag{ - Key: "baz", - Value: "bang", - }, - Operator: influxdb.Equal, - }, - }, - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - { - CurrentLevel: notification.Warn, - PreviousLevel: statusRulePtr(notification.Info), - }, - }, - }, - }, - endpoint: &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(2), - Name: "foo", - }, - URL: "http://localhost:7777", - }, - }, - { - name: "with token", - want: `import "influxdata/influxdb/monitor" -import "slack" -import "influxdata/influxdb/secrets" -import "experimental" - -option task = {name: "foo", every: 1h} - -slack_secret = secrets["get"](key: "slack_token") -slack_endpoint = slack["endpoint"](token: slack_secret) -notification = { - _notification_rule_id: "0000000000000001", - _notification_rule_name: "foo", - _notification_endpoint_id: "0000000000000002", - _notification_endpoint_name: "foo", -} -statuses = monitor["from"](start: -2h, fn: (r) => r["foo"] == "bar" and r["baz"] == "bang") -crit = statuses |> filter(fn: (r) => r["_level"] == "crit") -info_to_warn = statuses |> monitor["stateChanges"](fromLevel: "info", toLevel: "warn") -all_statuses = - union(tables: [crit, info_to_warn]) - |> sort(columns: ["_time"]) - |> filter(fn: (r) => r["_time"] >= experimental["subDuration"](from: now(), d: 1h)) - -all_statuses - |> monitor["notify"]( - data: notification, - endpoint: - slack_endpoint( - mapFn: (r) => - ({ - channel: "bar", - text: "blah", - color: - if r["_level"] == "crit" then - "danger" - else if r["_level"] == "warn" then - "warning" - else - "good", - }), - ), - ) -`, - rule: &rule.Slack{ - Channel: "bar", - MessageTemplate: "blah", - Base: rule.Base{ - ID: 1, - EndpointID: 2, - Name: "foo", - Every: mustDuration("1h"), - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "foo", - Value: "bar", - }, - Operator: influxdb.Equal, - }, - { - Tag: influxdb.Tag{ - Key: "baz", - Value: "bang", - }, - Operator: influxdb.Equal, - }, - }, - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - { - CurrentLevel: notification.Warn, - PreviousLevel: statusRulePtr(notification.Info), - }, - }, - }, - }, - endpoint: &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(2), - Name: "foo", - }, - Token: influxdb.SecretField{ - Key: "slack_token", - }, - }, - }, - { - name: "with token and url", - want: `import "influxdata/influxdb/monitor" -import "slack" -import "influxdata/influxdb/secrets" -import "experimental" - -option task = {name: "foo", every: 1h} - -slack_secret = secrets["get"](key: "slack_token") -slack_endpoint = slack["endpoint"](token: slack_secret, url: "http://localhost:7777") -notification = { - _notification_rule_id: "0000000000000001", - _notification_rule_name: "foo", - _notification_endpoint_id: "0000000000000002", - _notification_endpoint_name: "foo", -} -statuses = monitor["from"](start: -2h, fn: (r) => r["foo"] == "bar" and r["baz"] == "bang") -crit = statuses |> filter(fn: (r) => r["_level"] == "crit") -info_to_warn = statuses |> monitor["stateChanges"](fromLevel: "info", toLevel: "warn") -all_statuses = - union(tables: [crit, info_to_warn]) - |> sort(columns: ["_time"]) - |> filter(fn: (r) => r["_time"] >= experimental["subDuration"](from: now(), d: 1h)) - -all_statuses - |> monitor["notify"]( - data: notification, - endpoint: - slack_endpoint( - mapFn: (r) => - ({ - channel: "bar", - text: "blah", - color: - if r["_level"] == "crit" then - "danger" - else if r["_level"] == "warn" then - "warning" - else - "good", - }), - ), - ) -`, - rule: &rule.Slack{ - Channel: "bar", - MessageTemplate: "blah", - Base: rule.Base{ - ID: 1, - EndpointID: 2, - Name: "foo", - Every: mustDuration("1h"), - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "foo", - Value: "bar", - }, - Operator: influxdb.Equal, - }, - { - Tag: influxdb.Tag{ - Key: "baz", - Value: "bang", - }, - Operator: influxdb.Equal, - }, - }, - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - { - CurrentLevel: notification.Warn, - PreviousLevel: statusRulePtr(notification.Info), - }, - }, - }, - }, - endpoint: &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(2), - Name: "foo", - }, - URL: "http://localhost:7777", - Token: influxdb.SecretField{ - Key: "slack_token", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - f, err := tt.rule.GenerateFlux(tt.endpoint) - if err != nil { - t.Fatal(err) - } - - if f != itesting.FormatFluxString(t, tt.want) { - t.Errorf("scripts did not match. want:\n%v\n\ngot:\n%v", tt.want, f) - } - }) - } -} diff --git a/notification/rule/telegram.go b/notification/rule/telegram.go deleted file mode 100644 index 7f6c553b1e5..00000000000 --- a/notification/rule/telegram.go +++ /dev/null @@ -1,137 +0,0 @@ -package rule - -import ( - "encoding/json" - "fmt" - - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/ast/astutil" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/influxdata/influxdb/v2/notification/flux" -) - -// Telegram is the notification rule config of telegram. -type Telegram struct { - Base - MessageTemplate string `json:"messageTemplate"` - ParseMode string `json:"parseMode"` - DisableWebPagePreview bool `json:"disableWebPagePreview"` -} - -// GenerateFlux generates a flux script for the telegram notification rule. -func (s *Telegram) GenerateFlux(e influxdb.NotificationEndpoint) (string, error) { - telegramEndpoint, ok := e.(*endpoint.Telegram) - if !ok { - return "", fmt.Errorf("endpoint provided is a %s, not a Telegram endpoint", e.Type()) - } - return astutil.Format(s.GenerateFluxAST(telegramEndpoint)) -} - -// GenerateFluxAST generates a flux AST for the telegram notification rule. -func (s *Telegram) GenerateFluxAST(e *endpoint.Telegram) *ast.File { - return flux.File( - s.Name, - flux.Imports("influxdata/influxdb/monitor", "contrib/sranka/telegram", "influxdata/influxdb/secrets", "experimental"), - s.generateFluxASTBody(e), - ) -} - -func (s *Telegram) generateFluxASTBody(e *endpoint.Telegram) []ast.Statement { - var statements []ast.Statement - statements = append(statements, s.generateTaskOption()) - if e.Token.Key != "" { - statements = append(statements, s.generateFluxASTSecrets(e)) - } - statements = append(statements, s.generateFluxASTEndpoint(e)) - statements = append(statements, s.generateFluxASTNotificationDefinition(e)) - statements = append(statements, s.generateFluxASTStatuses()) - statements = append(statements, s.generateLevelChecks()...) - statements = append(statements, s.generateFluxASTNotifyPipe(e)) - - return statements -} - -func (s *Telegram) generateFluxASTSecrets(e *endpoint.Telegram) ast.Statement { - call := flux.Call(flux.Member("secrets", "get"), flux.Object(flux.Property("key", flux.String(e.Token.Key)))) - - return flux.DefineVariable("telegram_secret", call) -} - -func (s *Telegram) generateFluxASTEndpoint(e *endpoint.Telegram) ast.Statement { - props := []*ast.Property{} - if e.Token.Key != "" { - props = append(props, flux.Property("token", flux.Identifier("telegram_secret"))) - } - if s.ParseMode != "" { - props = append(props, flux.Property("parseMode", flux.String(s.ParseMode))) - } - props = append(props, flux.Property("disableWebPagePreview", flux.Bool(s.DisableWebPagePreview))) - call := flux.Call(flux.Member("telegram", "endpoint"), flux.Object(props...)) - - return flux.DefineVariable("telegram_endpoint", call) -} - -func (s *Telegram) generateFluxASTNotifyPipe(e *endpoint.Telegram) ast.Statement { - endpointProps := []*ast.Property{} - endpointProps = append(endpointProps, flux.Property("channel", flux.String(e.Channel))) - endpointProps = append(endpointProps, flux.Property("text", flux.String(s.MessageTemplate))) - endpointProps = append(endpointProps, flux.Property("silent", s.generateSilent())) - endpointFn := flux.Function(flux.FunctionParams("r"), flux.Object(endpointProps...)) - - props := []*ast.Property{} - props = append(props, flux.Property("data", flux.Identifier("notification"))) - props = append(props, flux.Property("endpoint", - flux.Call(flux.Identifier("telegram_endpoint"), flux.Object(flux.Property("mapFn", endpointFn))))) - - call := flux.Call(flux.Member("monitor", "notify"), flux.Object(props...)) - - return flux.ExpressionStatement(flux.Pipe(flux.Identifier("all_statuses"), call)) -} - -func (s *Telegram) generateSilent() ast.Expression { - level := flux.Member("r", "_level") - return flux.If( - flux.Equal(level, flux.String("crit")), - flux.Bool(true), - flux.If( - flux.Equal(level, flux.String("warn")), - flux.Bool(true), - flux.Bool(false), - ), - ) -} - -type telegramAlias Telegram - -// MarshalJSON implement json.Marshaler interface. -func (s Telegram) MarshalJSON() ([]byte, error) { - return json.Marshal( - struct { - telegramAlias - Type string `json:"type"` - }{ - telegramAlias: telegramAlias(s), - Type: s.Type(), - }) -} - -// Valid returns where the config is valid. -func (s Telegram) Valid() error { - if err := s.Base.valid(); err != nil { - return err - } - if s.MessageTemplate == "" { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Telegram MessageTemplate is invalid", - } - } - return nil -} - -// Type returns the type of the rule config. -func (s Telegram) Type() string { - return "telegram" -} diff --git a/notification/rule/telegram_test.go b/notification/rule/telegram_test.go deleted file mode 100644 index 86552dc55f6..00000000000 --- a/notification/rule/telegram_test.go +++ /dev/null @@ -1,317 +0,0 @@ -package rule_test - -import ( - "testing" - - "github.com/andreyvit/diff" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/notification" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/influxdata/influxdb/v2/notification/rule" - influxTesting "github.com/influxdata/influxdb/v2/testing" -) - -var _ influxdb.NotificationRule = &rule.Telegram{} - -func TestTelegram_GenerateFlux(t *testing.T) { - tests := []struct { - name string - rule *rule.Telegram - endpoint influxdb.NotificationEndpoint - script string - }{ - { - name: "incompatible with endpoint", - endpoint: &endpoint.Slack{ - Base: endpoint.Base{ - ID: idPtr(3), - Name: "foo", - }, - URL: "http://whatever", - }, - rule: &rule.Telegram{ - MessageTemplate: "blah", - Base: rule.Base{ - ID: 1, - EndpointID: 3, - Name: "foo", - Every: mustDuration("1h"), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - }, - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "foo", - Value: "bar", - }, - Operator: influxdb.Equal, - }, - { - Tag: influxdb.Tag{ - Key: "baz", - Value: "bang", - }, - Operator: influxdb.Equal, - }, - }, - }, - }, - script: "", //no script generater, because of incompatible endpoint - }, - { - name: "notify on crit", - endpoint: &endpoint.Telegram{ - Base: endpoint.Base{ - ID: idPtr(3), - Name: "foo", - }, - Token: influxdb.SecretField{Key: "3-key"}, - Channel: "-12345", - }, - rule: &rule.Telegram{ - MessageTemplate: "blah", - Base: rule.Base{ - ID: 1, - EndpointID: 3, - Name: "foo", - Every: mustDuration("1h"), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - }, - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "foo", - Value: "bar", - }, - Operator: influxdb.Equal, - }, - { - Tag: influxdb.Tag{ - Key: "baz", - Value: "bang", - }, - Operator: influxdb.Equal, - }, - }, - }, - }, - script: `import "influxdata/influxdb/monitor" -import "contrib/sranka/telegram" -import "influxdata/influxdb/secrets" -import "experimental" - -option task = {name: "foo", every: 1h} - -telegram_secret = secrets["get"](key: "3-key") -telegram_endpoint = telegram["endpoint"](token: telegram_secret, disableWebPagePreview: false) -notification = { - _notification_rule_id: "0000000000000001", - _notification_rule_name: "foo", - _notification_endpoint_id: "0000000000000003", - _notification_endpoint_name: "foo", -} -statuses = monitor["from"](start: -2h, fn: (r) => r["foo"] == "bar" and r["baz"] == "bang") -crit = statuses |> filter(fn: (r) => r["_level"] == "crit") -all_statuses = crit |> filter(fn: (r) => r["_time"] >= experimental["subDuration"](from: now(), d: 1h)) - -all_statuses - |> monitor["notify"]( - data: notification, - endpoint: - telegram_endpoint( - mapFn: (r) => - ({ - channel: "-12345", - text: "blah", - silent: if r["_level"] == "crit" then true else if r["_level"] == "warn" then true else false, - }), - ), - ) -`, - }, - { - name: "with DisableWebPagePreview and ParseMode", - endpoint: &endpoint.Telegram{ - Base: endpoint.Base{ - ID: idPtr(3), - Name: "foo", - }, - Token: influxdb.SecretField{Key: "3-key"}, - Channel: "-12345", - }, - rule: &rule.Telegram{ - MessageTemplate: "blah", - DisableWebPagePreview: true, - ParseMode: "HTML", - Base: rule.Base{ - ID: 1, - EndpointID: 3, - Name: "foo", - Every: mustDuration("1h"), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Any, - }, - }, - TagRules: []notification.TagRule{ - { - Tag: influxdb.Tag{ - Key: "foo", - Value: "bar", - }, - Operator: influxdb.Equal, - }, - { - Tag: influxdb.Tag{ - Key: "baz", - Value: "bang", - }, - Operator: influxdb.Equal, - }, - }, - }, - }, - script: `import "influxdata/influxdb/monitor" -import "contrib/sranka/telegram" -import "influxdata/influxdb/secrets" -import "experimental" - -option task = {name: "foo", every: 1h} - -telegram_secret = secrets["get"](key: "3-key") -telegram_endpoint = telegram["endpoint"](token: telegram_secret, parseMode: "HTML", disableWebPagePreview: true) -notification = { - _notification_rule_id: "0000000000000001", - _notification_rule_name: "foo", - _notification_endpoint_id: "0000000000000003", - _notification_endpoint_name: "foo", -} -statuses = monitor["from"](start: -2h, fn: (r) => r["foo"] == "bar" and r["baz"] == "bang") -any = statuses |> filter(fn: (r) => true) -all_statuses = any |> filter(fn: (r) => r["_time"] >= experimental["subDuration"](from: now(), d: 1h)) - -all_statuses - |> monitor["notify"]( - data: notification, - endpoint: - telegram_endpoint( - mapFn: (r) => - ({ - channel: "-12345", - text: "blah", - silent: if r["_level"] == "crit" then true else if r["_level"] == "warn" then true else false, - }), - ), - ) -`, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - script, err := tt.rule.GenerateFlux(tt.endpoint) - if err != nil { - if script != "" { - t.Errorf("Failed to generate flux: %v", err) - } - return - } - - if got, want := script, influxTesting.FormatFluxString(t, tt.script); got != want { - t.Errorf("\n\nStrings do not match:\n\n%s", diff.LineDiff(got, want)) - } - }) - } -} - -func TestTelegram_Valid(t *testing.T) { - cases := []struct { - name string - rule *rule.Telegram - err error - }{ - { - name: "valid template", - rule: &rule.Telegram{ - MessageTemplate: "blah", - Base: rule.Base{ - ID: 1, - EndpointID: 3, - OwnerID: 4, - OrgID: 5, - Name: "foo", - Every: mustDuration("1h"), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - }, - TagRules: []notification.TagRule{}, - }, - }, - err: nil, - }, - { - name: "missing MessageTemplate", - rule: &rule.Telegram{ - MessageTemplate: "", - Base: rule.Base{ - ID: 1, - EndpointID: 3, - OwnerID: 4, - OrgID: 5, - Name: "foo", - Every: mustDuration("1h"), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - }, - TagRules: []notification.TagRule{}, - }, - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "Telegram MessageTemplate is invalid", - }, - }, - { - name: "missing EndpointID", - rule: &rule.Telegram{ - MessageTemplate: "", - Base: rule.Base{ - ID: 1, - // EndpointID: 3, - OwnerID: 4, - OrgID: 5, - Name: "foo", - Every: mustDuration("1h"), - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - }, - TagRules: []notification.TagRule{}, - }, - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "Notification Rule EndpointID is invalid", - }, - }, - } - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - got := c.rule.Valid() - influxTesting.ErrorsEqual(t, got, c.err) - }) - } - -} diff --git a/notification/status.go b/notification/status.go deleted file mode 100644 index c52adbf41ed..00000000000 --- a/notification/status.go +++ /dev/null @@ -1,74 +0,0 @@ -package notification - -import ( - "encoding/json" - "strings" -) - -// StatusRule includes parameters of status rules. -type StatusRule struct { - CurrentLevel CheckLevel `json:"currentLevel"` - PreviousLevel *CheckLevel `json:"previousLevel"` -} - -// CheckLevel is the enum value of status levels. -type CheckLevel int - -// consts of CheckStatusLevel -const ( - Unknown CheckLevel = iota - Ok - Info - Warn - Critical - Any -) - -var checkLevels = []string{ - "UNKNOWN", - "OK", - "INFO", - "WARN", - "CRIT", - "ANY", -} - -var checkLevelMaps = map[string]CheckLevel{ - "UNKNOWN": Unknown, - "OK": Ok, - "INFO": Info, - "WARN": Warn, - "CRIT": Critical, - "ANY": Any, -} - -// MarshalJSON implements json.Marshaller. -func (cl CheckLevel) MarshalJSON() ([]byte, error) { - return json.Marshal(cl.String()) -} - -// UnmarshalJSON implements json.Unmarshaller. -func (cl *CheckLevel) UnmarshalJSON(b []byte) error { - var ss string - if err := json.Unmarshal(b, &ss); err != nil { - return err - } - *cl = ParseCheckLevel(strings.ToUpper(ss)) - return nil -} - -// String returns the string value, invalid CheckLevel will return Unknown. -func (cl CheckLevel) String() string { - if cl < Unknown || cl > Any { - cl = Unknown - } - return checkLevels[cl] -} - -// ParseCheckLevel will parse the string to checkLevel -func ParseCheckLevel(s string) CheckLevel { - if cl, ok := checkLevelMaps[s]; ok { - return cl - } - return Unknown -} diff --git a/notification/status_test.go b/notification/status_test.go deleted file mode 100644 index 48413c138c2..00000000000 --- a/notification/status_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package notification - -import ( - "encoding/json" - "testing" - - "github.com/google/go-cmp/cmp" -) - -func checkLvlPtr(l CheckLevel) *CheckLevel { - return &l -} - -func TestStatusJSON(t *testing.T) { - cases := []struct { - name string - src StatusRule - target StatusRule - }{ - { - name: "regular status rule", - src: StatusRule{ - CurrentLevel: Warn, - PreviousLevel: checkLvlPtr(Critical), - }, - target: StatusRule{ - CurrentLevel: Warn, - PreviousLevel: checkLvlPtr(Critical), - }, - }, - { - name: "empty", - src: StatusRule{}, - target: StatusRule{}, - }, - { - name: "invalid status", - src: StatusRule{ - CurrentLevel: CheckLevel(-10), - }, - target: StatusRule{ - CurrentLevel: Unknown, - }, - }, - } - for _, c := range cases { - serialized, err := json.Marshal(c.src) - if err != nil { - t.Errorf("%s marshal failed, err: %s", c.name, err) - } - var got StatusRule - err = json.Unmarshal(serialized, &got) - if err != nil { - t.Errorf("%s unmarshal failed, err: %s", c.name, err) - } - if diff := cmp.Diff(got, c.target); diff != "" { - t.Errorf("status rules are different -got/+want\ndiff %s", diff) - } - } -} diff --git a/notification/tag.go b/notification/tag.go deleted file mode 100644 index 25ec75695bb..00000000000 --- a/notification/tag.go +++ /dev/null @@ -1,29 +0,0 @@ -package notification - -import ( - "github.com/influxdata/flux/ast" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/notification/flux" -) - -// TagRule is the struct of tag rule. -type TagRule influxdb.TagRule - -// Valid returns error for invalid operators. -func (tr TagRule) Valid() error { - return influxdb.TagRule(tr).Valid() -} - -// GenerateFluxAST generates the AST expression for a tag rule. -func (tr TagRule) GenerateFluxAST() ast.Expression { - k := flux.Member("r", tr.Key) - v := flux.String(tr.Value) - - switch tr.Operator { - case influxdb.Equal: - return flux.Equal(k, v) - // TODO(desa): have this work for all operator types - } - - return flux.Equal(k, v) -} diff --git a/notification_endpoint.go b/notification_endpoint.go deleted file mode 100644 index df5972d6d6b..00000000000 --- a/notification_endpoint.go +++ /dev/null @@ -1,130 +0,0 @@ -package influxdb - -import ( - "context" - "encoding/json" - "errors" - - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - // ErrInvalidNotificationEndpointType denotes that the provided NotificationEndpoint is not a valid type - ErrInvalidNotificationEndpointType = errors.New("unknown notification endpoint type") -) - -// NotificationEndpoint is the configuration describing -// how to call a 3rd party service. E.g. Slack, Pagerduty -type NotificationEndpoint interface { - Valid() error - Type() string - json.Marshaler - CRUDLogSetter - SetID(id platform.ID) - SetOrgID(id platform.ID) - SetName(name string) - SetDescription(description string) - SetStatus(status Status) - - GetID() platform.ID - GetCRUDLog() CRUDLog - GetOrgID() platform.ID - GetName() string - GetDescription() string - GetStatus() Status - // SecretFields return available secret fields. - SecretFields() []SecretField - // BackfillSecretKeys fill back fill the secret field key during the unmarshalling - // if value of that secret field is not nil. - BackfillSecretKeys() -} - -// ops for checks error -var ( - OpFindNotificationEndpointByID = "FindNotificationEndpointByID" - OpFindNotificationEndpoint = "FindNotificationEndpoint" - OpFindNotificationEndpoints = "FindNotificationEndpoints" - OpCreateNotificationEndpoint = "CreateNotificationEndpoint" - OpUpdateNotificationEndpoint = "UpdateNotificationEndpoint" - OpDeleteNotificationEndpoint = "DeleteNotificationEndpoint" -) - -// NotificationEndpointFilter represents a set of filter that restrict the returned notification endpoints. -type NotificationEndpointFilter struct { - ID *platform.ID - OrgID *platform.ID - Org *string - UserResourceMappingFilter -} - -// QueryParams Converts NotificationEndpointFilter fields to url query params. -func (f NotificationEndpointFilter) QueryParams() map[string][]string { - qp := map[string][]string{} - - if f.OrgID != nil { - qp["orgID"] = []string{f.OrgID.String()} - } - - if f.Org != nil { - qp["org"] = []string{*f.Org} - } - - return qp -} - -// NotificationEndpointUpdate is the set of upgrade fields for patch request. -type NotificationEndpointUpdate struct { - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - Status *Status `json:"status,omitempty"` -} - -// Valid will verify if the NotificationEndpointUpdate is valid. -func (n *NotificationEndpointUpdate) Valid() error { - if n.Name != nil && *n.Name == "" { - return &errors2.Error{ - Code: errors2.EInvalid, - Msg: "Notification Endpoint Name can't be empty", - } - } - - if n.Description != nil && *n.Description == "" { - return &errors2.Error{ - Code: errors2.EInvalid, - Msg: "Notification Endpoint Description can't be empty", - } - } - - if n.Status != nil { - if err := n.Status.Valid(); err != nil { - return err - } - } - - return nil -} - -// NotificationEndpointService represents a service for managing notification endpoints. -type NotificationEndpointService interface { - // FindNotificationEndpointByID returns a single notification endpoint by ID. - FindNotificationEndpointByID(ctx context.Context, id platform.ID) (NotificationEndpoint, error) - - // FindNotificationEndpoints returns a list of notification endpoints that match filter and the total count of matching notification endpoints. - // Additional options provide pagination & sorting. - FindNotificationEndpoints(ctx context.Context, filter NotificationEndpointFilter, opt ...FindOptions) ([]NotificationEndpoint, int, error) - - // CreateNotificationEndpoint creates a new notification endpoint and sets b.ID with the new identifier. - CreateNotificationEndpoint(ctx context.Context, ne NotificationEndpoint, userID platform.ID) error - - // UpdateNotificationEndpoint updates a single notification endpoint. - // Returns the new notification endpoint after update. - UpdateNotificationEndpoint(ctx context.Context, id platform.ID, nr NotificationEndpoint, userID platform.ID) (NotificationEndpoint, error) - - // PatchNotificationEndpoint updates a single notification endpoint with changeset. - // Returns the new notification endpoint state after update. - PatchNotificationEndpoint(ctx context.Context, id platform.ID, upd NotificationEndpointUpdate) (NotificationEndpoint, error) - - // DeleteNotificationEndpoint removes a notification endpoint by ID, returns secret fields, orgID for further deletion. - DeleteNotificationEndpoint(ctx context.Context, id platform.ID) (flds []SecretField, orgID platform.ID, err error) -} diff --git a/onboarding.go b/onboarding.go deleted file mode 100644 index a8a1332cc92..00000000000 --- a/onboarding.go +++ /dev/null @@ -1,68 +0,0 @@ -package influxdb - -import ( - "context" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// OnboardingService represents a service for the first run. -type OnboardingService interface { - // IsOnboarding determine if onboarding request is allowed. - IsOnboarding(ctx context.Context) (bool, error) - - // OnboardInitialUser creates the initial org/user/bucket in the DB. - OnboardInitialUser(ctx context.Context, req *OnboardingRequest) (*OnboardingResults, error) -} - -// OnboardingResults is a group of elements required for first run. -type OnboardingResults struct { - User *User `json:"user"` - Org *Organization `json:"org"` - Bucket *Bucket `json:"bucket"` - Auth *Authorization `json:"auth"` -} - -// OnboardingRequest is the request -// to setup defaults. -type OnboardingRequest struct { - User string `json:"username"` - Password string `json:"password"` - Org string `json:"org"` - Bucket string `json:"bucket"` - RetentionPeriodSeconds int64 `json:"retentionPeriodSeconds,omitempty"` - RetentionPeriodDeprecated time.Duration `json:"retentionPeriodHrs,omitempty"` - Token string `json:"token,omitempty"` -} - -func (r *OnboardingRequest) Valid() error { - if r.User == "" { - return &errors.Error{ - Code: errors.EEmptyValue, - Msg: "username is empty", - } - } - - if r.Org == "" { - return &errors.Error{ - Code: errors.EEmptyValue, - Msg: "org name is empty", - } - } - - if r.Bucket == "" { - return &errors.Error{ - Code: errors.EEmptyValue, - Msg: "bucket name is empty", - } - } - return nil -} - -func (r *OnboardingRequest) RetentionPeriod() time.Duration { - if r.RetentionPeriodSeconds > 0 { - return time.Duration(r.RetentionPeriodSeconds) * time.Second - } - return r.RetentionPeriodDeprecated -} diff --git a/operation_log.go b/operation_log.go deleted file mode 100644 index 35a80d20174..00000000000 --- a/operation_log.go +++ /dev/null @@ -1,45 +0,0 @@ -package influxdb - -import ( - "context" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// OperationLogEntry is a record in an operation log. -type OperationLogEntry struct { - Description string `json:"description"` - UserID platform.ID `json:"userID,omitempty"` - Time time.Time `json:"time,omitempty"` -} - -// DashboardOperationLogService is an interface for retrieving the operation log for a dashboard. -type DashboardOperationLogService interface { - // GetDashboardOperationLog retrieves the operation log for the dashboard with the provided id. - GetDashboardOperationLog(ctx context.Context, id platform.ID, opts FindOptions) ([]*OperationLogEntry, int, error) -} - -// BucketOperationLogService is an interface for retrieving the operation log for a bucket. -type BucketOperationLogService interface { - // GetBucketOperationLog retrieves the operation log for the bucket with the provided id. - GetBucketOperationLog(ctx context.Context, id platform.ID, opts FindOptions) ([]*OperationLogEntry, int, error) -} - -// UserOperationLogService is an interface for retrieving the operation log for a user. -type UserOperationLogService interface { - // GetUserOperationLog retrieves the operation log for the user with the provided id. - GetUserOperationLog(ctx context.Context, id platform.ID, opts FindOptions) ([]*OperationLogEntry, int, error) -} - -// OrganizationOperationLogService is an interface for retrieving the operation log for an org. -type OrganizationOperationLogService interface { - // GetOrganizationOperationLog retrieves the operation log for the org with the provided id. - GetOrganizationOperationLog(ctx context.Context, id platform.ID, opts FindOptions) ([]*OperationLogEntry, int, error) -} - -// DefaultOperationLogFindOptions are the default options for the operation log. -var DefaultOperationLogFindOptions = FindOptions{ - Descending: true, - Limit: 100, -} diff --git a/organization.go b/organization.go deleted file mode 100644 index 74f07c71716..00000000000 --- a/organization.go +++ /dev/null @@ -1,89 +0,0 @@ -package influxdb - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// Organization is an organization. 🎉 -type Organization struct { - ID platform.ID `json:"id,omitempty"` - Name string `json:"name"` - Description string `json:"description"` - CRUDLog -} - -// errors of org -var ( - // ErrOrgNameisEmpty is error when org name is empty - ErrOrgNameisEmpty = &errors.Error{ - Code: errors.EInvalid, - Msg: "org name is empty", - } -) - -// ops for orgs error and orgs op logs. -const ( - OpFindOrganizationByID = "FindOrganizationByID" - OpFindOrganization = "FindOrganization" - OpFindOrganizations = "FindOrganizations" - OpCreateOrganization = "CreateOrganization" - OpPutOrganization = "PutOrganization" - OpUpdateOrganization = "UpdateOrganization" - OpDeleteOrganization = "DeleteOrganization" -) - -// OrganizationService represents a service for managing organization data. -type OrganizationService interface { - // Returns a single organization by ID. - FindOrganizationByID(ctx context.Context, id platform.ID) (*Organization, error) - - // Returns the first organization that matches filter. - FindOrganization(ctx context.Context, filter OrganizationFilter) (*Organization, error) - - // Returns a list of organizations that match filter and the total count of matching organizations. - // Additional options provide pagination & sorting. - FindOrganizations(ctx context.Context, filter OrganizationFilter, opt ...FindOptions) ([]*Organization, int, error) - - // Creates a new organization and sets b.ID with the new identifier. - CreateOrganization(ctx context.Context, b *Organization) error - - // Updates a single organization with changeset. - // Returns the new organization state after update. - UpdateOrganization(ctx context.Context, id platform.ID, upd OrganizationUpdate) (*Organization, error) - - // Removes a organization by ID. - DeleteOrganization(ctx context.Context, id platform.ID) error -} - -// OrganizationUpdate represents updates to a organization. -// Only fields which are set are updated. -type OrganizationUpdate struct { - Name *string - Description *string `json:"description,omitempty"` -} - -// ErrInvalidOrgFilter is the error indicate org filter is empty -var ErrInvalidOrgFilter = &errors.Error{ - Code: errors.EInvalid, - Msg: "Please provide either orgID or org", -} - -// OrganizationFilter represents a set of filter that restrict the returned results. -type OrganizationFilter struct { - Name *string - ID *platform.ID - UserID *platform.ID -} - -func ErrInternalOrgServiceError(op string, err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("unexpected error in organizations; Err: %v", err), - Op: op, - Err: err, - } -} diff --git a/paging.go b/paging.go deleted file mode 100644 index 70582d26cfd..00000000000 --- a/paging.go +++ /dev/null @@ -1,209 +0,0 @@ -package influxdb - -import ( - "fmt" - "net/http" - "net/url" - "strconv" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -const ( - DefaultPageSize = 20 - MaxPageSize = 100 -) - -// PagingFilter represents a filter containing url query params. -type PagingFilter interface { - // QueryParams returns a map containing url query params. - QueryParams() map[string][]string -} - -// PagingLinks represents paging links. -type PagingLinks struct { - Prev string `json:"prev,omitempty"` - Self string `json:"self"` - Next string `json:"next,omitempty"` -} - -// FindOptions represents options passed to all find methods with multiple results. -type FindOptions struct { - Limit int - Offset int - After *platform.ID - SortBy string - Descending bool -} - -// GetLimit returns the resolved limit between then limit boundaries. -// Given a limit <= 0 it returns the default limit. -func (f *FindOptions) GetLimit() int { - if f == nil || f.Limit <= 0 { - return DefaultPageSize - } - - if f.Limit > MaxPageSize { - return MaxPageSize - } - - return f.Limit -} - -// DecodeFindOptions returns a FindOptions decoded from http request. -func DecodeFindOptions(r *http.Request) (*FindOptions, error) { - opts := &FindOptions{} - qp := r.URL.Query() - - if offset := qp.Get("offset"); offset != "" { - o, err := strconv.Atoi(offset) - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "offset is invalid", - } - } - - opts.Offset = o - } - - if after := qp.Get("after"); after != "" { - id, err := platform.IDFromString(after) - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: fmt.Errorf("decoding after: %w", err), - } - } - - opts.After = id - } - - if limit := qp.Get("limit"); limit != "" { - l, err := strconv.Atoi(limit) - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "limit is invalid", - } - } - - if l < 1 || l > MaxPageSize { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("limit must be between 1 and %d", MaxPageSize), - } - } - - opts.Limit = l - } else { - opts.Limit = DefaultPageSize - } - - if sortBy := qp.Get("sortBy"); sortBy != "" { - opts.SortBy = sortBy - } - - if descending := qp.Get("descending"); descending != "" { - desc, err := strconv.ParseBool(descending) - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "descending is invalid", - } - } - - opts.Descending = desc - } - - return opts, nil -} - -func FindOptionParams(opts ...FindOptions) [][2]string { - var out [][2]string - for _, o := range opts { - for k, vals := range o.QueryParams() { - for _, v := range vals { - out = append(out, [2]string{k, v}) - } - } - } - return out -} - -// QueryParams returns a map containing url query params. -func (f FindOptions) QueryParams() map[string][]string { - qp := map[string][]string{ - "descending": {strconv.FormatBool(f.Descending)}, - "offset": {strconv.Itoa(f.Offset)}, - } - - if f.After != nil { - qp["after"] = []string{f.After.String()} - } - - if f.Limit > 0 { - qp["limit"] = []string{strconv.Itoa(f.Limit)} - } - - if f.SortBy != "" { - qp["sortBy"] = []string{f.SortBy} - } - - return qp -} - -// NewPagingLinks returns a PagingLinks. -// num is the number of returned results. -func NewPagingLinks(basePath string, opts FindOptions, f PagingFilter, num int) *PagingLinks { - u := url.URL{ - Path: basePath, - } - - values := url.Values{} - for k, vs := range f.QueryParams() { - for _, v := range vs { - if v != "" { - values.Add(k, v) - } - } - } - - var self, next, prev string - for k, vs := range opts.QueryParams() { - for _, v := range vs { - if v != "" { - values.Add(k, v) - } - } - } - - u.RawQuery = values.Encode() - self = u.String() - - if num >= opts.Limit { - nextOffset := opts.Offset + opts.Limit - values.Set("offset", strconv.Itoa(nextOffset)) - u.RawQuery = values.Encode() - next = u.String() - } - - if opts.Offset > 0 { - prevOffset := opts.Offset - opts.Limit - if prevOffset < 0 { - prevOffset = 0 - } - values.Set("offset", strconv.Itoa(prevOffset)) - u.RawQuery = values.Encode() - prev = u.String() - } - - links := &PagingLinks{ - Prev: prev, - Self: self, - Next: next, - } - - return links -} diff --git a/passwords.go b/passwords.go deleted file mode 100644 index 06db283617c..00000000000 --- a/passwords.go +++ /dev/null @@ -1,19 +0,0 @@ -package influxdb - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// PasswordsService is the service for managing basic auth passwords. -type PasswordsService interface { - // SetPassword overrides the password of a known user. - SetPassword(ctx context.Context, userID platform.ID, password string) error - // ComparePassword checks if the password matches the password recorded. - // Passwords that do not match return errors. - ComparePassword(ctx context.Context, userID platform.ID, password string) error - // CompareAndSetPassword checks the password and if they match - // updates to the new password. - CompareAndSetPassword(ctx context.Context, userID platform.ID, old, new string) error -} diff --git a/pkg/binaryutil/binaryutil.go b/pkg/binaryutil/binaryutil.go deleted file mode 100644 index b1d5f2ad066..00000000000 --- a/pkg/binaryutil/binaryutil.go +++ /dev/null @@ -1,22 +0,0 @@ -package binaryutil - -// VarintSize returns the number of bytes to varint encode x. -// This code is copied from encoding/binary.PutVarint() with the buffer removed. -func VarintSize(x int64) int { - ux := uint64(x) << 1 - if x < 0 { - ux = ^ux - } - return UvarintSize(ux) -} - -// UvarintSize returns the number of bytes to uvarint encode x. -// This code is copied from encoding/binary.PutUvarint() with the buffer removed. -func UvarintSize(x uint64) int { - i := 0 - for x >= 0x80 { - x >>= 7 - i++ - } - return i + 1 -} diff --git a/pkg/bloom/bloom.go b/pkg/bloom/bloom.go deleted file mode 100644 index b77ca71b946..00000000000 --- a/pkg/bloom/bloom.go +++ /dev/null @@ -1,136 +0,0 @@ -package bloom - -// NOTE: -// This package implements a limited bloom filter implementation based on -// Will Fitzgerald's bloom & bitset packages. It uses a zero-allocation xxhash -// implementation, rather than murmur3. It's implemented locally to support -// zero-copy memory-mapped slices. -// -// This also optimizes the filter by always using a bitset size with a power of 2. - -import ( - "fmt" - "math" - - "github.com/cespare/xxhash" -) - -// Filter represents a bloom filter. -type Filter struct { - k uint64 - b []byte - mask uint64 -} - -// NewFilter returns a new instance of Filter using m bits and k hash functions. -// If m is not a power of two then it is rounded to the next highest power of 2. -func NewFilter(m uint64, k uint64) *Filter { - m = pow2(m) - return &Filter{k: k, b: make([]byte, m>>3), mask: m - 1} -} - -// NewFilterBuffer returns a new instance of a filter using a backing buffer. -// The buffer length MUST be a power of 2. -func NewFilterBuffer(buf []byte, k uint64) (*Filter, error) { - m := pow2(uint64(len(buf)) * 8) - if m != uint64(len(buf))*8 { - return nil, fmt.Errorf("bloom.Filter: buffer bit count must a power of two: %d/%d", len(buf)*8, m) - } - return &Filter{k: k, b: buf, mask: m - 1}, nil -} - -// Len returns the number of bits used in the filter. -func (f *Filter) Len() uint { return uint(len(f.b)) } - -// K returns the number of hash functions used in the filter. -func (f *Filter) K() uint64 { return f.k } - -// Bytes returns the underlying backing slice. -func (f *Filter) Bytes() []byte { return f.b } - -// Clone returns a copy of f. -func (f *Filter) Clone() *Filter { - other := &Filter{k: f.k, b: make([]byte, len(f.b)), mask: f.mask} - copy(other.b, f.b) - return other -} - -// Insert inserts data to the filter. -func (f *Filter) Insert(v []byte) { - h := f.hash(v) - for i := uint64(0); i < f.k; i++ { - loc := f.location(h, i) - f.b[loc>>3] |= 1 << (loc & 7) - } -} - -// Contains returns true if the filter possibly contains v. -// Returns false if the filter definitely does not contain v. -func (f *Filter) Contains(v []byte) bool { - h := f.hash(v) - for i := uint64(0); i < f.k; i++ { - loc := f.location(h, i) - if f.b[loc>>3]&(1<<(loc&7)) == 0 { - return false - } - } - return true -} - -// Merge performs an in-place union of other into f. -// Returns an error if m or k of the filters differs. -func (f *Filter) Merge(other *Filter) error { - if other == nil { - return nil - } - - // Ensure m & k fields match. - if len(f.b) != len(other.b) { - return fmt.Errorf("bloom.Filter.Merge(): m mismatch: %d <> %d", len(f.b), len(other.b)) - } else if f.k != other.k { - return fmt.Errorf("bloom.Filter.Merge(): k mismatch: %d <> %d", f.b, other.b) - } - - // Perform union of each byte. - for i := range f.b { - f.b[i] |= other.b[i] - } - - return nil -} - -// location returns the ith hashed location using two hash values. -func (f *Filter) location(h [2]uint64, i uint64) uint { - return uint((h[0] + h[1]*i) & f.mask) -} - -// hash returns two 64-bit hashes based on the output of xxhash. -func (f *Filter) hash(data []byte) [2]uint64 { - v1 := xxhash.Sum64(data) - var v2 uint64 - if len(data) > 0 { - b := data[len(data)-1] // We'll put the original byte back. - data[len(data)-1] = byte(0) - v2 = xxhash.Sum64(data) - data[len(data)-1] = b - } - return [2]uint64{v1, v2} -} - -// Estimate returns an estimated bit count and hash count given the element count and false positive rate. -func Estimate(n uint64, p float64) (m uint64, k uint64) { - m = uint64(math.Ceil(-1 * float64(n) * math.Log(p) / math.Pow(math.Log(2), 2))) - k = uint64(math.Ceil(math.Log(2) * float64(m) / float64(n))) - return m, k -} - -// pow2 returns the number that is the next highest power of 2. -// Returns v if it is a power of 2. -func pow2(v uint64) uint64 { - for i := uint64(8); i < 1<<62; i *= 2 { - if i >= v { - return i - } - } - panic("unreachable") -} diff --git a/pkg/bloom/bloom_test.go b/pkg/bloom/bloom_test.go deleted file mode 100644 index 46ec7d60d41..00000000000 --- a/pkg/bloom/bloom_test.go +++ /dev/null @@ -1,189 +0,0 @@ -package bloom_test - -import ( - "encoding/binary" - "fmt" - "os" - "testing" - - "github.com/influxdata/influxdb/v2/pkg/bloom" -) - -// Ensure filter can insert values and verify they exist. -func TestFilter_InsertContains(t *testing.T) { - if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" || os.Getenv("CIRCLECI") != "" { - t.Skip("Skipping test in short, race, circle and appveyor mode.") - } - - // Short, less comprehensive test. - testShortFilter_InsertContains(t) - - if testing.Short() { - return // Just run the above short test - } - - // More comprehensive test for the xxhash based Bloom Filter. - - // These parameters will result, for 10M entries, with a bloom filter - // with 0.001 false positive rate (1 in 1000 values will be incorrectly - // identified as being present in the set). - filter := bloom.NewFilter(143775876, 10) - v := make([]byte, 4) - for i := 0; i < 10000000; i++ { - binary.BigEndian.PutUint32(v, uint32(i)) - filter.Insert(v) - } - - // None of the values inserted should ever be considered "not possibly in - // the filter". - t.Run("100M", func(t *testing.T) { - for i := 0; i < 10000000; i++ { - binary.BigEndian.PutUint32(v, uint32(i)) - if !filter.Contains(v) { - t.Fatalf("got false for value %q, expected true", v) - } - } - - // If we check for 100,000,000 values that we know are not present in the - // filter then we might expect around 100,000 of them to be false positives. - var fp int - for i := 10000000; i < 110000000; i++ { - binary.BigEndian.PutUint32(v, uint32(i)) - if filter.Contains(v) { - fp++ - } - } - - if fp > 1000000 { - // If we're an order of magnitude off, then it's arguable that there - // is a bug in the bloom filter. - t.Fatalf("got %d false positives which is an error rate of %f, expected error rate <=0.001", fp, float64(fp)/100000000) - } - t.Logf("Bloom false positive error rate was %f", float64(fp)/100000000) - }) -} - -func testShortFilter_InsertContains(t *testing.T) { - t.Run("short", func(t *testing.T) { - f := bloom.NewFilter(1000, 4) - - // Insert value and validate. - f.Insert([]byte("Bess")) - if !f.Contains([]byte("Bess")) { - t.Fatal("expected true") - } - - // Insert another value and test. - f.Insert([]byte("Emma")) - if !f.Contains([]byte("Emma")) { - t.Fatal("expected true") - } - - // Validate that a non-existent value doesn't exist. - if f.Contains([]byte("Jane")) { - t.Fatal("expected false") - } - }) -} - -var benchCases = []struct { - m, k uint64 - n int -}{ - {m: 100, k: 4, n: 1000}, - {m: 1000, k: 4, n: 1000}, - {m: 10000, k: 4, n: 1000}, - {m: 100000, k: 4, n: 1000}, - {m: 100, k: 8, n: 1000}, - {m: 1000, k: 8, n: 1000}, - {m: 10000, k: 8, n: 1000}, - {m: 100000, k: 8, n: 1000}, - {m: 100, k: 20, n: 1000}, - {m: 1000, k: 20, n: 1000}, - {m: 10000, k: 20, n: 1000}, - {m: 100000, k: 20, n: 1000}, -} - -func BenchmarkFilter_Insert(b *testing.B) { - for _, c := range benchCases { - data := make([][]byte, 0, c.n) - for i := 0; i < c.n; i++ { - data = append(data, []byte(fmt.Sprintf("%d", i))) - } - - filter := bloom.NewFilter(c.m, c.k) - b.Run(fmt.Sprintf("m=%d_k=%d_n=%d", c.m, c.k, c.n), func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - for _, v := range data { - filter.Insert(v) - } - } - }) - - } -} - -var okResult bool - -func BenchmarkFilter_Contains(b *testing.B) { - for _, c := range benchCases { - data := make([][]byte, 0, c.n) - notData := make([][]byte, 0, c.n) - for i := 0; i < c.n; i++ { - data = append(data, []byte(fmt.Sprintf("%d", i))) - notData = append(notData, []byte(fmt.Sprintf("%d", c.n+i))) - } - - filter := bloom.NewFilter(c.m, c.k) - for _, v := range data { - filter.Insert(v) - } - - b.Run(fmt.Sprintf("m=%d_k=%d_n=%d", c.m, c.k, c.n), func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - for _, v := range data { - okResult = filter.Contains(v) - if !okResult { - b.Fatalf("Filter returned negative for value %q in set", v) - } - } - - // And now a bunch of values that don't exist. - for _, v := range notData { - okResult = filter.Contains(v) - } - } - }) - } -} - -func BenchmarkFilter_Merge(b *testing.B) { - for _, c := range benchCases { - data1 := make([][]byte, 0, c.n) - data2 := make([][]byte, 0, c.n) - for i := 0; i < c.n; i++ { - data1 = append(data1, []byte(fmt.Sprintf("%d", i))) - data2 = append(data2, []byte(fmt.Sprintf("%d", c.n+i))) - } - - filter1 := bloom.NewFilter(c.m, c.k) - filter2 := bloom.NewFilter(c.m, c.k) - for i := 0; i < c.n; i++ { - filter1.Insert(data1[i]) - filter2.Insert(data2[i]) - } - - b.Run(fmt.Sprintf("m=%d_k=%d_n=%d", c.m, c.k, c.n), func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - other, err := bloom.NewFilterBuffer(filter1.Bytes(), filter1.K()) - if err != nil { - b.Fatal(err) - } - other.Merge(filter2) - } - }) - } -} diff --git a/pkg/bytesutil/bytesutil.go b/pkg/bytesutil/bytesutil.go deleted file mode 100644 index a318ab6ad0a..00000000000 --- a/pkg/bytesutil/bytesutil.go +++ /dev/null @@ -1,195 +0,0 @@ -package bytesutil - -import ( - "bytes" - "fmt" - "sort" -) - -// Sort sorts a slice of byte slices. -func Sort(a [][]byte) { - sort.Sort(byteSlices(a)) -} - -// SortDedup sorts the byte slice a and removes duplicates. The ret -func SortDedup(a [][]byte) [][]byte { - if len(a) < 2 { - return a - } - - Sort(a) - - i, j := 0, 1 - for j < len(a) { - if !bytes.Equal(a[j-1], a[j]) { - a[i] = a[j-1] - i++ - } - j++ - } - a[i] = a[j-1] - i++ - return a[:i] -} - -func IsSorted(a [][]byte) bool { - return sort.IsSorted(byteSlices(a)) -} - -// SearchBytes performs a binary search for x in the sorted slice a. -func SearchBytes(a [][]byte, x []byte) int { - // Define f(i) => bytes.Compare(a[i], x) < 0 - // Define f(-1) == false and f(n) == true. - // Invariant: f(i-1) == false, f(j) == true. - i, j := 0, len(a) - for i < j { - h := int(uint(i+j) >> 1) // avoid overflow when computing h - // i ≤ h < j - if bytes.Compare(a[h], x) < 0 { - i = h + 1 // preserves f(i-1) == false - } else { - j = h // preserves f(j) == true - } - } - // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. - return i -} - -// Contains returns true if x is an element of the sorted slice a. -func Contains(a [][]byte, x []byte) bool { - n := SearchBytes(a, x) - return n < len(a) && bytes.Equal(a[n], x) -} - -// SearchBytesFixed searches a for x using a binary search. The size of a must be a multiple of -// of x or else the function panics. There returned value is the index within a where x should -// exist. The caller should ensure that x does exist at this index. -func SearchBytesFixed(a []byte, sz int, fn func(x []byte) bool) int { - if len(a)%sz != 0 { - panic(fmt.Sprintf("x is not a multiple of a: %d %d", len(a), sz)) - } - - i, j := 0, len(a)-sz - for i < j { - h := int(uint(i+j) >> 1) - h -= h % sz - if !fn(a[h : h+sz]) { - i = h + sz - } else { - j = h - } - } - - return i -} - -// Union returns the union of a & b in sorted order. -func Union(a, b [][]byte) [][]byte { - n := len(b) - if len(a) > len(b) { - n = len(a) - } - other := make([][]byte, 0, n) - - for { - if len(a) > 0 && len(b) > 0 { - if cmp := bytes.Compare(a[0], b[0]); cmp == 0 { - other, a, b = append(other, a[0]), a[1:], b[1:] - } else if cmp == -1 { - other, a = append(other, a[0]), a[1:] - } else { - other, b = append(other, b[0]), b[1:] - } - } else if len(a) > 0 { - other, a = append(other, a[0]), a[1:] - } else if len(b) > 0 { - other, b = append(other, b[0]), b[1:] - } else { - return other - } - } -} - -// Intersect returns the intersection of a & b in sorted order. -func Intersect(a, b [][]byte) [][]byte { - n := len(b) - if len(a) > len(b) { - n = len(a) - } - other := make([][]byte, 0, n) - - for len(a) > 0 && len(b) > 0 { - if cmp := bytes.Compare(a[0], b[0]); cmp == 0 { - other, a, b = append(other, a[0]), a[1:], b[1:] - } else if cmp == -1 { - a = a[1:] - } else { - b = b[1:] - } - } - return other -} - -// Clone returns a copy of b. -func Clone(b []byte) []byte { - if b == nil { - return nil - } - buf := make([]byte, len(b)) - copy(buf, b) - return buf -} - -// CloneSlice returns a copy of a slice of byte slices. -func CloneSlice(a [][]byte) [][]byte { - other := make([][]byte, len(a)) - for i := range a { - other[i] = Clone(a[i]) - } - return other -} - -// Pack converts a sparse array to a dense one. It removes sections of a containing -// runs of val of length width. The returned value is a subslice of a. -func Pack(a []byte, width int, val byte) []byte { - var i, j, jStart, end int - - fill := make([]byte, width) - for i := 0; i < len(fill); i++ { - fill[i] = val - } - - // Skip the first run that won't move - for ; i < len(a) && a[i] != val; i += width { - } - end = i - - for i < len(a) { - // Find the next gap to remove - for i < len(a) && a[i] == val { - i += width - } - - // Find the next non-gap to keep - jStart = i - for j = i; j < len(a) && a[j] != val; j += width { - } - - if jStart == len(a) { - break - } - - // Move the non-gap over the section to remove. - copy(a[end:], a[jStart:j]) - end += j - jStart - i = j - } - - return a[:end] -} - -type byteSlices [][]byte - -func (a byteSlices) Len() int { return len(a) } -func (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 } -func (a byteSlices) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/pkg/bytesutil/bytesutil_test.go b/pkg/bytesutil/bytesutil_test.go deleted file mode 100644 index dcfe02122a5..00000000000 --- a/pkg/bytesutil/bytesutil_test.go +++ /dev/null @@ -1,281 +0,0 @@ -package bytesutil_test - -import ( - "bytes" - "encoding/binary" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/pkg/bytesutil" -) - -func TestSearchBytesFixed(t *testing.T) { - n, sz := 5, 8 - a := make([]byte, n*sz) // 5 - 8 byte int64s - - for i := 0; i < 5; i++ { - binary.BigEndian.PutUint64(a[i*sz:i*sz+sz], uint64(i)) - } - - var x [8]byte - - for i := 0; i < n; i++ { - binary.BigEndian.PutUint64(x[:], uint64(i)) - if exp, got := i*sz, bytesutil.SearchBytesFixed(a, len(x), func(v []byte) bool { - return bytes.Compare(v, x[:]) >= 0 - }); exp != got { - t.Fatalf("index mismatch: exp %v, got %v", exp, got) - } - } - - if exp, got := len(a)-1, bytesutil.SearchBytesFixed(a, 1, func(v []byte) bool { - return bytes.Compare(v, []byte{99}) >= 0 - }); exp != got { - t.Fatalf("index mismatch: exp %v, got %v", exp, got) - } -} - -func TestSearchBytes(t *testing.T) { - in := toByteSlices("bbb", "ccc", "eee", "fff", "ggg", "hhh") - tests := []struct { - name string - x string - exp int - }{ - {"exists first", "bbb", 0}, - {"exists middle", "eee", 2}, - {"exists last", "hhh", 5}, - {"not exists last", "zzz", 6}, - {"not exists first", "aaa", 0}, - {"not exists mid", "ddd", 2}, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - got := bytesutil.SearchBytes(in, []byte(test.x)) - if got != test.exp { - t.Errorf("got %d, expected %d", got, test.exp) - } - }) - } -} - -func TestContains(t *testing.T) { - in := toByteSlices("bbb", "ccc", "eee", "fff", "ggg", "hhh") - tests := []struct { - name string - x string - exp bool - }{ - {"exists first", "bbb", true}, - {"exists middle", "eee", true}, - {"exists last", "hhh", true}, - {"not exists last", "zzz", false}, - {"not exists first", "aaa", false}, - {"not exists mid", "ddd", false}, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - got := bytesutil.Contains(in, []byte(test.x)) - if got != test.exp { - t.Errorf("got %t, expected %t", got, test.exp) - } - }) - } -} - -func toByteSlices(s ...string) [][]byte { - r := make([][]byte, len(s)) - for i, v := range s { - r[i] = []byte(v) - } - return r -} - -func TestSortDedup(t *testing.T) { - tests := []struct { - name string - in [][]byte - exp [][]byte - }{ - { - name: "mixed dupes", - in: toByteSlices("bbb", "aba", "bbb", "aba", "ccc", "bbb", "aba"), - exp: toByteSlices("aba", "bbb", "ccc"), - }, - { - name: "no dupes", - in: toByteSlices("bbb", "ccc", "ddd"), - exp: toByteSlices("bbb", "ccc", "ddd"), - }, - { - name: "dupe at end", - in: toByteSlices("ccc", "ccc", "aaa"), - exp: toByteSlices("aaa", "ccc"), - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - out := bytesutil.SortDedup(test.in) - if !cmp.Equal(out, test.exp) { - t.Error("invalid result") - } - }) - } -} - -func TestPack_WidthOne_One(t *testing.T) { - a := make([]byte, 8) - - a[4] = 1 - - a = bytesutil.Pack(a, 1, 0) - if got, exp := len(a), 1; got != exp { - t.Fatalf("len mismatch: got %v, exp %v", got, exp) - } - - for i, v := range []byte{1} { - if got, exp := a[i], v; got != exp { - t.Fatalf("value mismatch: a[%d] = %v, exp %v", i, got, exp) - } - } -} - -func TestPack_WidthOne_Two(t *testing.T) { - a := make([]byte, 8) - - a[4] = 1 - a[6] = 2 - - a = bytesutil.Pack(a, 1, 0) - if got, exp := len(a), 2; got != exp { - t.Fatalf("len mismatch: got %v, exp %v", got, exp) - } - - for i, v := range []byte{1, 2} { - if got, exp := a[i], v; got != exp { - t.Fatalf("value mismatch: a[%d] = %v, exp %v", i, got, exp) - } - } -} - -func TestPack_WidthTwo_Two(t *testing.T) { - a := make([]byte, 8) - - a[2] = 1 - a[3] = 1 - a[6] = 2 - a[7] = 2 - - a = bytesutil.Pack(a, 2, 0) - if got, exp := len(a), 4; got != exp { - t.Fatalf("len mismatch: got %v, exp %v", got, exp) - } - - for i, v := range []byte{1, 1, 2, 2} { - if got, exp := a[i], v; got != exp { - t.Fatalf("value mismatch: a[%d] = %v, exp %v", i, got, exp) - } - } -} - -func TestPack_WidthOne_Last(t *testing.T) { - a := make([]byte, 8) - - a[6] = 2 - a[7] = 2 - - a = bytesutil.Pack(a, 2, 255) - if got, exp := len(a), 8; got != exp { - t.Fatalf("len mismatch: got %v, exp %v", got, exp) - } - - for i, v := range []byte{0, 0, 0, 0, 0, 0, 2, 2} { - if got, exp := a[i], v; got != exp { - t.Fatalf("value mismatch: a[%d] = %v, exp %v", i, got, exp) - } - } -} - -func TestPack_WidthOne_LastFill(t *testing.T) { - a := make([]byte, 8) - - a[0] = 255 - a[1] = 255 - a[2] = 2 - a[3] = 2 - a[4] = 2 - a[5] = 2 - a[6] = 2 - a[7] = 2 - - a = bytesutil.Pack(a, 2, 255) - if got, exp := len(a), 6; got != exp { - t.Fatalf("len mismatch: got %v, exp %v", got, exp) - } - - for i, v := range []byte{2, 2, 2, 2, 2, 2} { - if got, exp := a[i], v; got != exp { - t.Fatalf("value mismatch: a[%d] = %v, exp %v", i, got, exp) - } - } -} - -var result [][]byte - -func BenchmarkSortDedup(b *testing.B) { - b.Run("sort-deduplicate", func(b *testing.B) { - data := toByteSlices("bbb", "aba", "bbb", "aba", "ccc", "bbb", "aba") - in := append([][]byte{}, data...) - b.ReportAllocs() - - copy(in, data) - for i := 0; i < b.N; i++ { - result = bytesutil.SortDedup(in) - - b.StopTimer() - copy(in, data) - b.StartTimer() - } - }) -} - -func BenchmarkContains_True(b *testing.B) { - var in [][]byte - for i := 'a'; i <= 'z'; i++ { - in = append(in, []byte(strings.Repeat(string(i), 3))) - } - for i := 0; i < b.N; i++ { - bytesutil.Contains(in, []byte("xxx")) - } -} - -func BenchmarkContains_False(b *testing.B) { - var in [][]byte - for i := 'a'; i <= 'z'; i++ { - in = append(in, []byte(strings.Repeat(string(i), 3))) - } - for i := 0; i < b.N; i++ { - bytesutil.Contains(in, []byte("a")) - } -} - -func BenchmarkSearchBytes_Exists(b *testing.B) { - var in [][]byte - for i := 'a'; i <= 'z'; i++ { - in = append(in, []byte(strings.Repeat(string(i), 3))) - } - for i := 0; i < b.N; i++ { - bytesutil.SearchBytes(in, []byte("xxx")) - } -} - -func BenchmarkSearchBytes_NotExits(b *testing.B) { - var in [][]byte - for i := 'a'; i <= 'z'; i++ { - in = append(in, []byte(strings.Repeat(string(i), 3))) - } - for i := 0; i < b.N; i++ { - bytesutil.SearchBytes(in, []byte("a")) - } -} diff --git a/pkg/cmputil/cmputil.go b/pkg/cmputil/cmputil.go deleted file mode 100644 index 5d04dc95df6..00000000000 --- a/pkg/cmputil/cmputil.go +++ /dev/null @@ -1,38 +0,0 @@ -// Package cmputil provides helper utilities for the go-cmp package. -package cmputil - -import ( - "reflect" - "unicode" - "unicode/utf8" - - "github.com/google/go-cmp/cmp" - "google.golang.org/protobuf/proto" -) - -func IgnoreProtobufUnexported() cmp.Option { - return cmp.FilterPath(filterProtobufUnexported, cmp.Ignore()) -} - -func filterProtobufUnexported(p cmp.Path) bool { - // Determine if the path is pointing to a struct field. - sf, ok := p.Index(-1).(cmp.StructField) - if !ok { - return false - } - - // Return true if it is a proto.Message and the field is unexported. - return implementsProtoMessage(p.Index(-2).Type()) && !isExported(sf.Name()) -} - -// isExported reports whether the identifier is exported. -func isExported(id string) bool { - r, _ := utf8.DecodeRuneInString(id) - return unicode.IsUpper(r) -} - -var messageType = reflect.TypeOf((*proto.Message)(nil)).Elem() - -func implementsProtoMessage(t reflect.Type) bool { - return t.Implements(messageType) || reflect.PointerTo(t).Implements(messageType) -} diff --git a/pkg/data/gen/arrays.gen.go b/pkg/data/gen/arrays.gen.go deleted file mode 100644 index 24d7bf972fe..00000000000 --- a/pkg/data/gen/arrays.gen.go +++ /dev/null @@ -1,142 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: arrays.gen.go.tmpl - -package gen - -import ( - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" -) - -type FloatValues interface { - Copy(*tsdb.FloatArray) -} - -type floatArray struct { - tsdb.FloatArray -} - -func newFloatArrayLen(sz int) *floatArray { - return &floatArray{ - FloatArray: tsdb.FloatArray{ - Timestamps: make([]int64, sz), - Values: make([]float64, sz), - }, - } -} - -func (a *floatArray) Encode(b []byte) ([]byte, error) { - return tsm1.EncodeFloatArrayBlock(&a.FloatArray, b) -} - -func (a *floatArray) Copy(dst *tsdb.FloatArray) { - dst.Timestamps = append(dst.Timestamps[:0], a.Timestamps...) - dst.Values = append(dst.Values[:0], a.Values...) -} - -type IntegerValues interface { - Copy(*tsdb.IntegerArray) -} - -type integerArray struct { - tsdb.IntegerArray -} - -func newIntegerArrayLen(sz int) *integerArray { - return &integerArray{ - IntegerArray: tsdb.IntegerArray{ - Timestamps: make([]int64, sz), - Values: make([]int64, sz), - }, - } -} - -func (a *integerArray) Encode(b []byte) ([]byte, error) { - return tsm1.EncodeIntegerArrayBlock(&a.IntegerArray, b) -} - -func (a *integerArray) Copy(dst *tsdb.IntegerArray) { - dst.Timestamps = append(dst.Timestamps[:0], a.Timestamps...) - dst.Values = append(dst.Values[:0], a.Values...) -} - -type UnsignedValues interface { - Copy(*tsdb.UnsignedArray) -} - -type unsignedArray struct { - tsdb.UnsignedArray -} - -func newUnsignedArrayLen(sz int) *unsignedArray { - return &unsignedArray{ - UnsignedArray: tsdb.UnsignedArray{ - Timestamps: make([]int64, sz), - Values: make([]uint64, sz), - }, - } -} - -func (a *unsignedArray) Encode(b []byte) ([]byte, error) { - return tsm1.EncodeUnsignedArrayBlock(&a.UnsignedArray, b) -} - -func (a *unsignedArray) Copy(dst *tsdb.UnsignedArray) { - dst.Timestamps = append(dst.Timestamps[:0], a.Timestamps...) - dst.Values = append(dst.Values[:0], a.Values...) -} - -type StringValues interface { - Copy(*tsdb.StringArray) -} - -type stringArray struct { - tsdb.StringArray -} - -func newStringArrayLen(sz int) *stringArray { - return &stringArray{ - StringArray: tsdb.StringArray{ - Timestamps: make([]int64, sz), - Values: make([]string, sz), - }, - } -} - -func (a *stringArray) Encode(b []byte) ([]byte, error) { - return tsm1.EncodeStringArrayBlock(&a.StringArray, b) -} - -func (a *stringArray) Copy(dst *tsdb.StringArray) { - dst.Timestamps = append(dst.Timestamps[:0], a.Timestamps...) - dst.Values = append(dst.Values[:0], a.Values...) -} - -type BooleanValues interface { - Copy(*tsdb.BooleanArray) -} - -type booleanArray struct { - tsdb.BooleanArray -} - -func newBooleanArrayLen(sz int) *booleanArray { - return &booleanArray{ - BooleanArray: tsdb.BooleanArray{ - Timestamps: make([]int64, sz), - Values: make([]bool, sz), - }, - } -} - -func (a *booleanArray) Encode(b []byte) ([]byte, error) { - return tsm1.EncodeBooleanArrayBlock(&a.BooleanArray, b) -} - -func (a *booleanArray) Copy(dst *tsdb.BooleanArray) { - dst.Timestamps = append(dst.Timestamps[:0], a.Timestamps...) - dst.Values = append(dst.Values[:0], a.Values...) -} diff --git a/pkg/data/gen/arrays.gen.go.tmpl b/pkg/data/gen/arrays.gen.go.tmpl deleted file mode 100644 index 98c7c4115b6..00000000000 --- a/pkg/data/gen/arrays.gen.go.tmpl +++ /dev/null @@ -1,36 +0,0 @@ -package gen - -import ( - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" -) - -{{range .}} -{{ $typename := print .name "Array" }} -{{ $tsdbname := print .Name "Array" }} -type {{.Name}}Values interface { - Copy(*tsdb.{{$tsdbname}}) -} - -type {{$typename}} struct { - tsdb.{{$tsdbname}} -} - -func new{{$tsdbname}}Len(sz int) *{{$typename}} { - return &{{$typename}}{ - {{$tsdbname}}: tsdb.{{$tsdbname}}{ - Timestamps: make([]int64, sz), - Values: make([]{{.Type}}, sz), - }, - } -} - -func (a *{{$typename}}) Encode(b []byte) ([]byte, error) { - return tsm1.Encode{{$tsdbname}}Block(&a.{{$tsdbname}}, b) -} - -func (a *{{$typename}}) Copy(dst *tsdb.{{$tsdbname}}) { - dst.Timestamps = append(dst.Timestamps[:0], a.Timestamps...) - dst.Values = append(dst.Values[:0], a.Values...) -} -{{end}} diff --git a/pkg/data/gen/gen.go b/pkg/data/gen/gen.go deleted file mode 100644 index 7af8a05d806..00000000000 --- a/pkg/data/gen/gen.go +++ /dev/null @@ -1,4 +0,0 @@ -package gen - -//go:generate tmpl -data=@types.tmpldata arrays.gen.go.tmpl values.gen.go.tmpl values_sequence.gen.go.tmpl -//go:generate stringer -type=precision -trimprefix=precision diff --git a/pkg/data/gen/merged_series_generator.go b/pkg/data/gen/merged_series_generator.go deleted file mode 100644 index b948ac7ce76..00000000000 --- a/pkg/data/gen/merged_series_generator.go +++ /dev/null @@ -1,144 +0,0 @@ -package gen - -import ( - "container/heap" - "math" - - "github.com/influxdata/influxdb/v2/models" -) - -type mergedSeriesGenerator struct { - heap seriesGeneratorHeap - last constSeries - n int64 - first bool -} - -func NewMergedSeriesGenerator(s []SeriesGenerator) SeriesGenerator { - if len(s) == 0 { - return nil - } else if len(s) == 1 { - return s[0] - } - - msg := &mergedSeriesGenerator{first: true, n: math.MaxInt64} - msg.heap.init(s) - return msg -} - -func NewMergedSeriesGeneratorLimit(s []SeriesGenerator, n int64) SeriesGenerator { - if len(s) == 0 { - return nil - } - - msg := &mergedSeriesGenerator{first: true, n: n} - msg.heap.init(s) - return msg -} - -func (s *mergedSeriesGenerator) Next() bool { - if len(s.heap.items) == 0 { - return false - } - - if s.n > 0 { - s.n-- - if !s.first { - top := s.heap.items[0] - s.last.CopyFrom(top) // capture last key for duplicate checking - - for { - if top.Next() { - if len(s.heap.items) > 1 { - heap.Fix(&s.heap, 0) - } - } else { - heap.Pop(&s.heap) - if len(s.heap.items) == 0 { - return false - } - } - - top = s.heap.items[0] - if CompareSeries(&s.last, top) == 0 { - // duplicate key, get next - continue - } - return true - } - } - - s.first = false - return true - } - - return false -} - -func (s *mergedSeriesGenerator) Key() []byte { - return s.heap.items[0].Key() -} - -func (s *mergedSeriesGenerator) Name() []byte { - return s.heap.items[0].Name() -} - -func (s *mergedSeriesGenerator) Tags() models.Tags { - return s.heap.items[0].Tags() -} - -func (s *mergedSeriesGenerator) Field() []byte { - return s.heap.items[0].Field() -} - -func (s *mergedSeriesGenerator) FieldType() models.FieldType { - return s.heap.items[0].FieldType() -} - -func (s *mergedSeriesGenerator) TimeValuesGenerator() TimeValuesSequence { - return s.heap.items[0].TimeValuesGenerator() -} - -type seriesGeneratorHeap struct { - items []SeriesGenerator -} - -func (h *seriesGeneratorHeap) init(results []SeriesGenerator) { - if cap(h.items) < len(results) { - h.items = make([]SeriesGenerator, 0, len(results)) - } else { - h.items = h.items[:0] - } - - for _, rs := range results { - if rs.Next() { - h.items = append(h.items, rs) - } - } - heap.Init(h) -} - -func (h *seriesGeneratorHeap) Less(i, j int) bool { - return CompareSeries(h.items[i], h.items[j]) == -1 -} - -func (h *seriesGeneratorHeap) Len() int { - return len(h.items) -} - -func (h *seriesGeneratorHeap) Swap(i, j int) { - h.items[i], h.items[j] = h.items[j], h.items[i] -} - -func (h *seriesGeneratorHeap) Push(x interface{}) { - panic("not implemented") -} - -func (h *seriesGeneratorHeap) Pop() interface{} { - old := h.items - n := len(old) - item := old[n-1] - old[n-1] = nil - h.items = old[0 : n-1] - return item -} diff --git a/pkg/data/gen/merged_series_generator_test.go b/pkg/data/gen/merged_series_generator_test.go deleted file mode 100644 index 0d22907b4f0..00000000000 --- a/pkg/data/gen/merged_series_generator_test.go +++ /dev/null @@ -1,213 +0,0 @@ -package gen - -import ( - "fmt" - "math" - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" -) - -func sg(name, prefix, field string, counts ...int) SeriesGenerator { - spec := TimeSequenceSpec{Count: 1, Start: time.Unix(0, 0), Delta: time.Second} - ts := NewTimestampSequenceFromSpec(spec) - vs := NewFloatConstantValuesSequence(1) - vg := NewTimeFloatValuesSequence(spec.Count, ts, vs) - return NewSeriesGenerator([]byte(name), []byte(field), vg, NewTagsValuesSequenceCounts(prefix, counts)) -} - -func tags(sb *strings.Builder, prefix string, vals []int) { - sb.WriteByte(',') - - // max tag width - tw := int(math.Ceil(math.Log10(float64(len(vals))))) - tf := fmt.Sprintf("%s%%0%dd=value%%d", prefix, tw) - tvs := make([]string, len(vals)) - for i := range vals { - tvs[i] = fmt.Sprintf(tf, i, vals[i]) - } - sb.WriteString(strings.Join(tvs, ",")) -} - -func line(name, prefix, field string, vals ...int) string { - var sb strings.Builder - sb.WriteString(name) - tags(&sb, prefix, vals) - sb.WriteString("#!~#") - sb.WriteString(field) - return sb.String() -} - -func seriesGeneratorString(sg SeriesGenerator) []string { - var lines []string - for sg.Next() { - lines = append(lines, fmt.Sprintf("%s#!~#%s", string(sg.Key()), string(sg.Field()))) - } - return lines -} - -func TestNewMergedSeriesGenerator(t *testing.T) { - tests := []struct { - n string - s []SeriesGenerator - exp []string - }{ - { - n: "single", - s: []SeriesGenerator{ - sg("cpu", "t", "f0", 2, 1), - }, - exp: []string{ - line("cpu", "t", "f0", 0, 0), - line("cpu", "t", "f0", 1, 0), - }, - }, - { - n: "multiple,interleaved", - s: []SeriesGenerator{ - sg("cpu", "t", "f0", 2, 1), - sg("cpu", "t", "f1", 2, 1), - }, - exp: []string{ - line("cpu", "t", "f0", 0, 0), - line("cpu", "t", "f1", 0, 0), - line("cpu", "t", "f0", 1, 0), - line("cpu", "t", "f1", 1, 0), - }, - }, - { - n: "multiple,sequential", - s: []SeriesGenerator{ - sg("cpu", "t", "f0", 2), - sg("cpu", "u", "f0", 2, 1), - }, - exp: []string{ - line("cpu", "t", "f0", 0), - line("cpu", "t", "f0", 1), - line("cpu", "u", "f0", 0, 0), - line("cpu", "u", "f0", 1, 0), - }, - }, - { - n: "multiple,sequential", - s: []SeriesGenerator{ - sg("m1", "t", "f0", 2, 1), - sg("m0", "t", "f0", 2, 1), - }, - exp: []string{ - line("m0", "t", "f0", 0, 0), - line("m0", "t", "f0", 1, 0), - line("m1", "t", "f0", 0, 0), - line("m1", "t", "f0", 1, 0), - }, - }, - { - // ensure duplicates are removed - n: "duplicates", - s: []SeriesGenerator{ - sg("cpu", "t", "f0", 2, 1), - sg("cpu", "t", "f0", 2, 1), - }, - exp: []string{ - line("cpu", "t", "f0", 0, 0), - line("cpu", "t", "f0", 1, 0), - }, - }, - { - // ensure duplicates are removed, but non-dupes from same SeriesGenerator - // are still included - n: "duplicates,multiple,interleaved", - s: []SeriesGenerator{ - sg("cpu", "t", "f0", 2, 1), - sg("cpu", "t", "f1", 2, 1), - sg("cpu", "t", "f0", 2, 1), - sg("cpu", "t", "f1", 3, 1), - }, - exp: []string{ - line("cpu", "t", "f0", 0, 0), - line("cpu", "t", "f1", 0, 0), - line("cpu", "t", "f0", 1, 0), - line("cpu", "t", "f1", 1, 0), - line("cpu", "t", "f1", 2, 0), - }, - }, - } - for _, tt := range tests { - t.Run(tt.n, func(t *testing.T) { - sg := NewMergedSeriesGenerator(tt.s) - if got := seriesGeneratorString(sg); !cmp.Equal(got, tt.exp) { - t.Errorf("unpexected -got/+exp\n%s", cmp.Diff(got, tt.exp)) - } - }) - } -} - -func TestNewMergedSeriesGeneratorLimit(t *testing.T) { - tests := []struct { - n string - s []SeriesGenerator - lim int64 - exp []string - }{ - { - n: "single", - s: []SeriesGenerator{ - sg("cpu", "t", "f0", 4, 1), - }, - lim: 2, - exp: []string{ - line("cpu", "t", "f0", 0, 0), - line("cpu", "t", "f0", 1, 0), - }, - }, - { - n: "multiple,interleaved", - s: []SeriesGenerator{ - sg("cpu", "t", "f0", 2, 1), - sg("cpu", "t", "f1", 2, 1), - }, - lim: 3, - exp: []string{ - line("cpu", "t", "f0", 0, 0), - line("cpu", "t", "f1", 0, 0), - line("cpu", "t", "f0", 1, 0), - }, - }, - { - n: "multiple,sequential", - s: []SeriesGenerator{ - sg("cpu", "t", "f0", 2), - sg("cpu", "u", "f0", 2, 1), - }, - lim: 2, - exp: []string{ - line("cpu", "t", "f0", 0), - line("cpu", "t", "f0", 1), - }, - }, - { - n: "multiple,sequential", - s: []SeriesGenerator{ - sg("m1", "t", "f0", 2, 1), - sg("m0", "t", "f0", 2, 1), - }, - lim: 4, - exp: []string{ - line("m0", "t", "f0", 0, 0), - line("m0", "t", "f0", 1, 0), - line("m1", "t", "f0", 0, 0), - line("m1", "t", "f0", 1, 0), - }, - }, - } - for _, tt := range tests { - t.Run(tt.n, func(t *testing.T) { - sg := NewMergedSeriesGeneratorLimit(tt.s, tt.lim) - if got := seriesGeneratorString(sg); !cmp.Equal(got, tt.exp) { - t.Errorf("unpexected -got/+exp\n%s", cmp.Diff(got, tt.exp)) - } - }) - } -} diff --git a/pkg/data/gen/precision_string.go b/pkg/data/gen/precision_string.go deleted file mode 100644 index e53b4712bad..00000000000 --- a/pkg/data/gen/precision_string.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by "stringer -type=precision -trimprefix=precision"; DO NOT EDIT. - -package gen - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[precisionMillisecond-0] - _ = x[precisionNanosecond-1] - _ = x[precisionMicrosecond-2] - _ = x[precisionSecond-3] - _ = x[precisionMinute-4] - _ = x[precisionHour-5] -} - -const _precision_name = "MillisecondNanosecondMicrosecondSecondMinuteHour" - -var _precision_index = [...]uint8{0, 11, 21, 32, 38, 44, 48} - -func (i precision) String() string { - if i >= precision(len(_precision_index)-1) { - return "precision(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _precision_name[_precision_index[i]:_precision_index[i+1]] -} diff --git a/pkg/data/gen/schema.go b/pkg/data/gen/schema.go deleted file mode 100644 index 63068767464..00000000000 --- a/pkg/data/gen/schema.go +++ /dev/null @@ -1,257 +0,0 @@ -package gen - -import ( - "fmt" -) - -type Visitor interface { - Visit(node SchemaNode) (w Visitor) -} - -type SchemaNode interface { - node() -} - -type Schema struct { - Title string - Version string - SeriesLimit *SeriesLimit `toml:"series-limit"` - Measurements Measurements -} - -func (*Schema) node() {} - -type Measurements []Measurement - -func (Measurements) node() {} - -type Tags []Tag - -func (Tags) node() {} - -type Fields []Field - -func (Fields) node() {} - -type Measurement struct { - Name string - SeriesLimit *SeriesLimit `toml:"series-limit"` - Sample *sample - Tags Tags - Fields Fields -} - -func (*Measurement) node() {} - -type TagSource interface { - fmt.Stringer - SchemaNode - tagsource() -} - -type Tag struct { - Name string - Source TagSource -} - -func (*Tag) node() {} - -type TagArraySource struct { - Values []string -} - -func (*TagArraySource) node() {} -func (*TagArraySource) tagsource() {} - -func (s *TagArraySource) String() string { - return fmt.Sprintf("array, source=%#v", s.Values) -} - -type TagSequenceSource struct { - Format string - Start int64 - Count int64 -} - -func (*TagSequenceSource) node() {} -func (*TagSequenceSource) tagsource() {} - -func (t *TagSequenceSource) String() string { - return fmt.Sprintf("sequence, prefix=%q, range=[%d,%d)", t.Format, t.Start, t.Start+t.Count) -} - -type TagFileSource struct { - Path string -} - -func (*TagFileSource) node() {} -func (*TagFileSource) tagsource() {} - -func (s *TagFileSource) String() string { - return fmt.Sprintf("file, path=%s", s.Path) -} - -type FieldSource interface { - fmt.Stringer - SchemaNode - fieldsource() -} - -type Field struct { - Name string - Count int64 - TimePrecision *precision `toml:"time-precision"` // TimePrecision determines the precision for generated timestamp values - TimeInterval *duration `toml:"time-interval"` // TimeInterval determines the duration between timestamp values - Source FieldSource -} - -func (t *Field) TimeSequenceSpec() TimeSequenceSpec { - if t.TimeInterval != nil { - return TimeSequenceSpec{ - Count: int(t.Count), - Delta: t.TimeInterval.Duration, - } - } - - if t.TimePrecision != nil { - return TimeSequenceSpec{ - Count: int(t.Count), - Precision: t.TimePrecision.ToDuration(), - } - } - - panic("TimeInterval and TimePrecision are nil") -} - -func (*Field) node() {} - -type FieldConstantValue struct { - Value interface{} -} - -func (*FieldConstantValue) node() {} -func (*FieldConstantValue) fieldsource() {} - -func (f *FieldConstantValue) String() string { - return fmt.Sprintf("constant, source=%#v", f.Value) -} - -type FieldArraySource struct { - Value interface{} -} - -func (*FieldArraySource) node() {} -func (*FieldArraySource) fieldsource() {} - -func (f *FieldArraySource) String() string { - return fmt.Sprintf("array, source=%#v", f.Value) -} - -type FieldFloatRandomSource struct { - Seed int64 - Min, Max float64 -} - -func (*FieldFloatRandomSource) node() {} -func (*FieldFloatRandomSource) fieldsource() {} - -func (f *FieldFloatRandomSource) String() string { - return fmt.Sprintf("rand, seed=%d, min=%f, max=%f", f.Seed, f.Max, f.Max) -} - -type FieldIntegerZipfSource struct { - Seed int64 - S, V float64 - IMAX uint64 -} - -func (*FieldIntegerZipfSource) node() {} -func (*FieldIntegerZipfSource) fieldsource() {} - -func (f *FieldIntegerZipfSource) String() string { - return fmt.Sprintf("rand, seed=%d, s=%f, v=%f, imax=%d", f.Seed, f.S, f.V, f.IMAX) -} - -type VisitorFn func(node SchemaNode) bool - -func (fn VisitorFn) Visit(node SchemaNode) (w Visitor) { - if fn(node) { - return fn - } - return nil -} - -// WalkDown performs a pre-order, depth-first traversal of the graph, calling v for each node. -// Pre-order starts by calling the visitor for the root and each child as it traverses down -// the graph to the leaves. -func WalkDown(v Visitor, node SchemaNode) { - walk(v, node, false) -} - -// WalkUp performs a post-order, depth-first traversal of the graph, calling v for each node. -// Post-order starts by calling the visitor for the leaves then each parent as it traverses up -// the graph to the root. -func WalkUp(v Visitor, node SchemaNode) { - walk(v, node, true) -} - -func walk(v Visitor, node SchemaNode, up bool) Visitor { - if v == nil { - return nil - } - - if !up { - if v = v.Visit(node); v == nil { - return nil - } - } - - switch n := node.(type) { - case *Schema: - walk(v, n.Measurements, up) - - case Measurements: - v := v - for i := range n { - v = walk(v, &n[i], up) - } - - case *Measurement: - v := v - v = walk(v, n.Tags, up) - walk(v, n.Fields, up) - - case Fields: - v := v - for i := 0; i < len(n); i++ { - v = walk(v, &n[i], up) - } - - case Tags: - v := v - for i := 0; i < len(n); i++ { - v = walk(v, &n[i], up) - } - - case *Tag: - walk(v, n.Source, up) - - case *TagArraySource, *TagSequenceSource, *TagFileSource: - // nothing to do - - case *Field: - walk(v, n.Source, up) - - case *FieldConstantValue, *FieldArraySource, *FieldFloatRandomSource, *FieldIntegerZipfSource: - // nothing to do - - default: - panic(fmt.Sprintf("schema.Walk: unexpected node type %T", n)) - } - - if up && v != nil { - v = v.Visit(node) - } - - return v -} diff --git a/pkg/data/gen/sequence.go b/pkg/data/gen/sequence.go deleted file mode 100644 index e600471bd9c..00000000000 --- a/pkg/data/gen/sequence.go +++ /dev/null @@ -1,96 +0,0 @@ -package gen - -import ( - "fmt" - "math" -) - -type Sequence interface { - Next() bool - Value() string -} - -type CountableSequence interface { - Sequence - Count() int -} - -type CounterByteSequence struct { - format string - nfmt string - val string - s int - i int - end int -} - -func NewCounterByteSequenceCount(n int) *CounterByteSequence { - return NewCounterByteSequence("value%s", 0, n) -} - -func NewCounterByteSequence(format string, start, end int) *CounterByteSequence { - s := &CounterByteSequence{ - format: format, - nfmt: fmt.Sprintf("%%0%dd", int(math.Ceil(math.Log10(float64(end))))), - s: start, - i: start, - end: end, - } - s.update() - return s -} - -func (s *CounterByteSequence) Next() bool { - s.i++ - if s.i >= s.end { - s.i = s.s - } - s.update() - return true -} - -func (s *CounterByteSequence) update() { - s.val = fmt.Sprintf(s.format, fmt.Sprintf(s.nfmt, s.i)) -} - -func (s *CounterByteSequence) Value() string { return s.val } -func (s *CounterByteSequence) Count() int { return s.end - s.s } - -type StringArraySequence struct { - vals []string - c int - i int -} - -func NewStringArraySequence(vals []string) *StringArraySequence { - return &StringArraySequence{vals: sortDedupStrings(vals)} -} - -func (s *StringArraySequence) Next() bool { - s.i++ - if s.i == len(s.vals) { - s.i = 0 - } - s.c = s.i - return true -} - -func (s *StringArraySequence) Value() string { - return s.vals[s.c] -} - -func (s *StringArraySequence) Count() int { - return len(s.vals) -} - -type StringConstantSequence struct { - val string -} - -func NewStringConstantSequence(val string) *StringConstantSequence { - return &StringConstantSequence{val: val} -} - -func (s *StringConstantSequence) Next() bool { return true } -func (s *StringConstantSequence) Value() string { return s.val } -func (s *StringConstantSequence) Count() int { return 1 } diff --git a/pkg/data/gen/series.go b/pkg/data/gen/series.go deleted file mode 100644 index 81d7703f3fd..00000000000 --- a/pkg/data/gen/series.go +++ /dev/null @@ -1,63 +0,0 @@ -package gen - -import ( - "bytes" -) - -type seriesKeyField interface { - // Key returns the series key. - // The returned value may be cached. - Key() []byte - - // Field returns the name of the field. - // The returned value may be modified by a subsequent call to Next. - Field() []byte -} - -type constSeries struct { - key []byte - field []byte -} - -func (s *constSeries) Key() []byte { return s.key } -func (s *constSeries) Field() []byte { return s.field } - -var nilSeries seriesKeyField = &constSeries{} - -// Compare returns an integer comparing two SeriesGenerator instances -// lexicographically. -// The result will be 0 if a==b, -1 if a < b, and +1 if a > b. -// A nil argument is equivalent to an empty SeriesGenerator. -func CompareSeries(a, b seriesKeyField) int { - if a == nil { - a = nilSeries - } - if b == nil { - b = nilSeries - } - - switch res := bytes.Compare(a.Key(), b.Key()); res { - case 0: - return bytes.Compare(a.Field(), b.Field()) - default: - return res - } -} - -func (s *constSeries) CopyFrom(a seriesKeyField) { - key := a.Key() - if cap(s.key) < len(key) { - s.key = make([]byte, len(key)) - } else { - s.key = s.key[:len(key)] - } - copy(s.key, key) - - field := a.Field() - if cap(s.field) < len(field) { - s.field = make([]byte, len(field)) - } else { - s.field = s.field[:len(field)] - } - copy(s.field, field) -} diff --git a/pkg/data/gen/series_generator.go b/pkg/data/gen/series_generator.go deleted file mode 100644 index 1894f5944c2..00000000000 --- a/pkg/data/gen/series_generator.go +++ /dev/null @@ -1,181 +0,0 @@ -package gen - -import ( - "math" - "time" - - "github.com/influxdata/influxdb/v2/models" -) - -type SeriesGenerator interface { - // Next advances the series generator to the next series key. - Next() bool - - // Key returns the series key. - // The returned value may be cached. - Key() []byte - - // Name returns the name of the measurement. - // The returned value may be modified by a subsequent call to Next. - Name() []byte - - // Tags returns the tag set. - // The returned value may be modified by a subsequent call to Next. - Tags() models.Tags - - // Field returns the name of the field. - // The returned value may be modified by a subsequent call to Next. - Field() []byte - - // FieldType returns the data type for the field. - FieldType() models.FieldType - - // TimeValuesGenerator returns a values sequence for the current series. - TimeValuesGenerator() TimeValuesSequence -} - -type TimeSequenceSpec struct { - // Count specifies the maximum number of values to generate. - Count int - - // Start specifies the starting time for the values. - Start time.Time - - // Delta specifies the interval between time stamps. - Delta time.Duration - - // Precision specifies the precision of timestamp intervals - Precision time.Duration -} - -func (ts TimeSequenceSpec) ForTimeRange(tr TimeRange) TimeSequenceSpec { - // Truncate time range - if ts.Delta > 0 { - tr = tr.Truncate(ts.Delta) - } else { - tr = tr.Truncate(ts.Precision) - } - - ts.Start = tr.Start - - if ts.Delta > 0 { - intervals := int(tr.End.Sub(tr.Start) / ts.Delta) - if intervals > ts.Count { - // if the number of intervals in the specified time range exceeds - // the maximum count, move the start forward to limit the number of values - ts.Start = tr.End.Add(-time.Duration(ts.Count) * ts.Delta) - } else { - ts.Count = intervals - } - } else { - ts.Delta = tr.End.Sub(tr.Start) / time.Duration(ts.Count) - if ts.Delta < ts.Precision { - // count is too high for the range of time and precision - ts.Count = int(tr.End.Sub(tr.Start) / ts.Precision) - ts.Delta = ts.Precision - } else { - ts.Delta = ts.Delta.Round(ts.Precision) - } - ts.Precision = 0 - } - - return ts -} - -type TimeRange struct { - Start time.Time - End time.Time -} - -func (t TimeRange) Truncate(d time.Duration) TimeRange { - return TimeRange{ - Start: t.Start.Truncate(d), - End: t.End.Truncate(d), - } -} - -type TimeValuesSequence interface { - Reset() - Next() bool - Values() Values - ValueType() models.FieldType -} - -type Values interface { - MinTime() int64 - MaxTime() int64 - Encode([]byte) ([]byte, error) -} - -type cache struct { - key []byte - tags models.Tags -} - -type seriesGenerator struct { - name []byte - tags TagsSequence - field []byte - vg TimeValuesSequence - n int64 - - c cache -} - -func NewSeriesGenerator(name []byte, field []byte, vg TimeValuesSequence, tags TagsSequence) SeriesGenerator { - return NewSeriesGeneratorLimit(name, field, vg, tags, math.MaxInt64) -} - -func NewSeriesGeneratorLimit(name []byte, field []byte, vg TimeValuesSequence, tags TagsSequence, n int64) SeriesGenerator { - return &seriesGenerator{ - name: name, - field: field, - tags: tags, - vg: vg, - n: n, - } -} - -func (g *seriesGenerator) Next() bool { - if g.n > 0 { - g.n-- - if g.tags.Next() { - g.c = cache{} - g.vg.Reset() - return true - } - g.n = 0 - } - - return false -} - -func (g *seriesGenerator) Key() []byte { - if len(g.c.key) == 0 { - g.c.key = models.MakeKey(g.name, g.tags.Value()) - } - return g.c.key -} - -func (g *seriesGenerator) Name() []byte { - return g.name -} - -func (g *seriesGenerator) Tags() models.Tags { - if len(g.c.tags) == 0 { - g.c.tags = g.tags.Value().Clone() - } - return g.c.tags -} - -func (g *seriesGenerator) Field() []byte { - return g.field -} - -func (g *seriesGenerator) FieldType() models.FieldType { - return g.vg.ValueType() -} - -func (g *seriesGenerator) TimeValuesGenerator() TimeValuesSequence { - return g.vg -} diff --git a/pkg/data/gen/series_generator_test.go b/pkg/data/gen/series_generator_test.go deleted file mode 100644 index bed09c31680..00000000000 --- a/pkg/data/gen/series_generator_test.go +++ /dev/null @@ -1,161 +0,0 @@ -package gen - -import ( - "testing" - "time" - - "github.com/google/go-cmp/cmp" -) - -func TestTimeSequenceSpec_ForTimeRange(t *testing.T) { - secs := func(sec int64) time.Time { - return time.Unix(sec, 0).UTC() - } - - tests := []struct { - name string - ts TimeSequenceSpec - tr TimeRange - exp TimeSequenceSpec - }{ - { - // this test verifies Count is reduced - // as the range has fewer intervals than Count * Delta - name: "delta/range_fewer", - ts: TimeSequenceSpec{ - Count: 100, - Delta: 10 * time.Second, - }, - tr: TimeRange{ - Start: secs(0), - End: secs(100), - }, - exp: TimeSequenceSpec{ - Count: 10, - Start: secs(0), - Delta: 10 * time.Second, - }, - }, - { - // this test verifies Count is not adjusted - // as the range equals Count * Delta - name: "delta/range_equal", - ts: TimeSequenceSpec{ - Count: 100, - Delta: 10 * time.Second, - }, - tr: TimeRange{ - Start: secs(0), - End: secs(1000), - }, - exp: TimeSequenceSpec{ - Count: 100, - Start: secs(0), - Delta: 10 * time.Second, - }, - }, - { - // this test verifies the Start is adjusted to - // limit the number of intervals to Count - name: "delta/range_greater", - ts: TimeSequenceSpec{ - Count: 100, - Delta: 10 * time.Second, - }, - tr: TimeRange{ - Start: secs(0), - End: secs(2000), - }, - exp: TimeSequenceSpec{ - Count: 100, - Start: secs(1000), - Delta: 10 * time.Second, - }, - }, - - { - // this test verifies Count is reduced - // as the time range has fewer intervals than Count * Precision - name: "precision/range_fewer", - ts: TimeSequenceSpec{ - Count: 100, - Precision: 10 * time.Second, - }, - tr: TimeRange{ - Start: secs(0), - End: secs(100), - }, - exp: TimeSequenceSpec{ - Count: 10, - Start: secs(0), - Delta: 10 * time.Second, - }, - }, - - { - // this test verifies Count is unchanged and Delta is a multiple - // of Precision, given the time range has more intervals - // than Count * Precision - name: "precision/range_greater", - ts: TimeSequenceSpec{ - Count: 100, - Precision: 10 * time.Second, - }, - tr: TimeRange{ - Start: secs(0), - End: secs(2000), - }, - exp: TimeSequenceSpec{ - Count: 100, - Start: secs(0), - Delta: 20 * time.Second, - }, - }, - - { - // this test verifies Count is unchanged and Delta is equal - // to Precision, given the time range has an equal number of - // intervals as Count * Precision - name: "precision/range_equal", - ts: TimeSequenceSpec{ - Count: 100, - Precision: 10 * time.Second, - }, - tr: TimeRange{ - Start: secs(0), - End: secs(1000), - }, - exp: TimeSequenceSpec{ - Count: 100, - Start: secs(0), - Delta: 10 * time.Second, - }, - }, - - { - // this test verifies Count is reduced - // as the range has fewer intervals than Count * Delta - name: "start/rounding", - ts: TimeSequenceSpec{ - Count: 100, - Delta: 10 * time.Second, - }, - tr: TimeRange{ - Start: secs(13), - End: secs(110), - }, - exp: TimeSequenceSpec{ - Count: 10, - Start: secs(10), - Delta: 10 * time.Second, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := tt.ts.ForTimeRange(tt.tr); !cmp.Equal(got, tt.exp) { - t.Errorf("unexpected value -got/+exp\n%s", cmp.Diff(got, tt.exp)) - } - }) - } -} diff --git a/pkg/data/gen/series_test.go b/pkg/data/gen/series_test.go deleted file mode 100644 index 0772cc75ca8..00000000000 --- a/pkg/data/gen/series_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package gen - -import ( - "testing" - - "github.com/google/go-cmp/cmp" -) - -func TestCompareSeries(t *testing.T) { - mk := func(k, f string) seriesKeyField { - return &constSeries{key: []byte(k), field: []byte(f)} - } - - tests := []struct { - name string - a seriesKeyField - b seriesKeyField - exp int - }{ - { - name: "nil a,b", - exp: 0, - }, - { - name: "a(nil) < b", - a: nil, - b: mk("cpu,t0=v0", "f0"), - exp: -1, - }, - { - name: "a > b(nil)", - a: mk("cpu,t0=v0", "f0"), - b: nil, - exp: 1, - }, - { - name: "a = b", - a: mk("cpu,t0=v0", "f0"), - b: mk("cpu,t0=v0", "f0"), - exp: 0, - }, - { - name: "a(f0) < b(f1)", - a: mk("cpu,t0=v0", "f0"), - b: mk("cpu,t0=v0", "f1"), - exp: -1, - }, - { - name: "a(v0) < b(v1)", - a: mk("cpu,t0=v0", "f0"), - b: mk("cpu,t0=v1", "f0"), - exp: -1, - }, - { - name: "a(f1) > b(f0)", - a: mk("cpu,t0=v0", "f1"), - b: mk("cpu,t0=v0", "f0"), - exp: 1, - }, - { - name: "a(v1) > b(v0)", - a: mk("cpu,t0=v1", "f0"), - b: mk("cpu,t0=v0", "f0"), - exp: 1, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := CompareSeries(tt.a, tt.b); got != tt.exp { - t.Errorf("unexpected value -got/+exp\n%s", cmp.Diff(got, tt.exp)) - } - }) - } -} diff --git a/pkg/data/gen/specs.go b/pkg/data/gen/specs.go deleted file mode 100644 index c6d0cfb70dd..00000000000 --- a/pkg/data/gen/specs.go +++ /dev/null @@ -1,570 +0,0 @@ -package gen - -import ( - "bufio" - "fmt" - "math/rand" - "os" - "path" - "path/filepath" - "sort" - "unicode/utf8" - - "github.com/BurntSushi/toml" - "github.com/influxdata/influxdb/v2/models" - "github.com/pkg/errors" -) - -type Spec struct { - SeriesLimit *int64 - Measurements []MeasurementSpec -} - -func NewSeriesGeneratorFromSpec(s *Spec, tr TimeRange) SeriesGenerator { - sg := make([]SeriesGenerator, len(s.Measurements)) - for i := range s.Measurements { - sg[i] = newSeriesGeneratorFromMeasurementSpec(&s.Measurements[i], tr) - } - if s.SeriesLimit == nil { - return NewMergedSeriesGenerator(sg) - } - return NewMergedSeriesGeneratorLimit(sg, *s.SeriesLimit) -} - -type MeasurementSpec struct { - Name string - SeriesLimit *SeriesLimit - TagsSpec *TagsSpec - FieldValuesSpec *FieldValuesSpec -} - -func newSeriesGeneratorFromMeasurementSpec(ms *MeasurementSpec, tr TimeRange) SeriesGenerator { - if ms.SeriesLimit == nil { - return NewSeriesGenerator( - []byte(ms.Name), - []byte(ms.FieldValuesSpec.Name), - newTimeValuesSequenceFromFieldValuesSpec(ms.FieldValuesSpec, tr), - newTagsSequenceFromTagsSpec(ms.TagsSpec)) - } - return NewSeriesGeneratorLimit( - []byte(ms.Name), - []byte(ms.FieldValuesSpec.Name), - newTimeValuesSequenceFromFieldValuesSpec(ms.FieldValuesSpec, tr), - newTagsSequenceFromTagsSpec(ms.TagsSpec), - int64(*ms.SeriesLimit)) -} - -// NewTimeValuesSequenceFn returns a TimeValuesSequence that will generate a -// sequence of values based on the spec. -type NewTimeValuesSequenceFn func(spec TimeSequenceSpec) TimeValuesSequence - -type NewTagsValuesSequenceFn func() TagsSequence - -type NewCountableSequenceFn func() CountableSequence - -type TagsSpec struct { - Tags []*TagValuesSpec - Sample *sample -} - -func newTagsSequenceFromTagsSpec(ts *TagsSpec) TagsSequence { - var keys []string - var vals []CountableSequence - for _, spec := range ts.Tags { - keys = append(keys, spec.TagKey) - vals = append(vals, spec.Values()) - } - - var opts []tagsValuesOption - if ts.Sample != nil && *ts.Sample != 1.0 { - opts = append(opts, TagValuesSampleOption(float64(*ts.Sample))) - } - - return NewTagsValuesSequenceKeysValues(keys, vals, opts...) -} - -type TagValuesSpec struct { - TagKey string - Values NewCountableSequenceFn -} - -type FieldValuesSpec struct { - TimeSequenceSpec - Name string - DataType models.FieldType - Values NewTimeValuesSequenceFn -} - -func newTimeValuesSequenceFromFieldValuesSpec(fs *FieldValuesSpec, tr TimeRange) TimeValuesSequence { - return fs.Values(fs.TimeSequenceSpec.ForTimeRange(tr)) -} - -func NewSpecFromToml(s string) (*Spec, error) { - var out Schema - if _, err := toml.Decode(s, &out); err != nil { - return nil, err - } - return NewSpecFromSchema(&out) -} - -func NewSpecFromPath(p string) (*Spec, error) { - var err error - p, err = filepath.Abs(p) - if err != nil { - return nil, err - } - - var out Schema - if _, err := toml.DecodeFile(p, &out); err != nil { - return nil, err - } - return newSpecFromSchema(&out, schemaDir(path.Dir(p))) -} - -func NewSchemaFromPath(path string) (*Schema, error) { - var out Schema - if _, err := toml.DecodeFile(path, &out); err != nil { - return nil, err - } - return &out, nil -} - -type schemaToSpecState int - -const ( - stateOk schemaToSpecState = iota - stateErr -) - -type schemaToSpec struct { - schemaDir string - stack []interface{} - state schemaToSpecState - spec *Spec - err error -} - -func (s *schemaToSpec) push(v interface{}) { - s.stack = append(s.stack, v) -} - -func (s *schemaToSpec) pop() interface{} { - tail := len(s.stack) - 1 - v := s.stack[tail] - s.stack[tail] = nil - s.stack = s.stack[:tail] - return v -} - -func (s *schemaToSpec) peek() interface{} { - if len(s.stack) == 0 { - return nil - } - return s.stack[len(s.stack)-1] -} - -func (s *schemaToSpec) Visit(node SchemaNode) (w Visitor) { - switch s.state { - case stateOk: - if s.visit(node) { - return s - } - s.state = stateErr - - case stateErr: - s.visitErr(node) - } - - return nil -} - -func (s *schemaToSpec) visit(node SchemaNode) bool { - switch n := node.(type) { - case *Schema: - s.spec.Measurements = s.pop().([]MeasurementSpec) - if n.SeriesLimit != nil { - sl := int64(*n.SeriesLimit) - s.spec.SeriesLimit = &sl - } - - case Measurements: - // flatten measurements - var mss []MeasurementSpec - for { - if specs, ok := s.peek().([]MeasurementSpec); ok { - s.pop() - mss = append(mss, specs...) - continue - } - break - } - sort.Slice(mss, func(i, j int) bool { - return mss[i].Name < mss[j].Name - }) - - // validate field types are homogeneous for a single measurement - mg := make(map[string]models.FieldType) - for i := range mss { - spec := &mss[i] - key := spec.Name + "." + spec.FieldValuesSpec.Name - ft := spec.FieldValuesSpec.DataType - if dt, ok := mg[key]; !ok { - mg[key] = ft - } else if dt != ft { - s.err = fmt.Errorf("field %q data-type conflict, found %s and %s", - key, - dt, - ft) - return false - } - } - - s.push(mss) - - case *Measurement: - if len(n.Name) == 0 { - s.err = errors.New("missing measurement name") - return false - } - - fields := s.pop().([]*FieldValuesSpec) - tagsSpec := s.pop().(*TagsSpec) - - tagsSpec.Sample = n.Sample - - // default: sample 50% - if n.Sample == nil { - s := sample(0.5) - tagsSpec.Sample = &s - } - - if *tagsSpec.Sample <= 0.0 || *tagsSpec.Sample > 1.0 { - s.err = errors.New("invalid sample, must be 0 < sample ≤ 1.0") - return false - } - - var ms []MeasurementSpec - for _, spec := range fields { - ms = append(ms, MeasurementSpec{ - Name: n.Name, - SeriesLimit: n.SeriesLimit, - TagsSpec: tagsSpec, - FieldValuesSpec: spec, - }) - } - - // NOTE: sort each measurement name + field name to ensure series are produced - // in correct order - sort.Slice(ms, func(i, j int) bool { - return ms[i].FieldValuesSpec.Name < ms[j].FieldValuesSpec.Name - }) - s.push(ms) - - case Tags: - var ts TagsSpec - for { - if spec, ok := s.peek().(*TagValuesSpec); ok { - s.pop() - ts.Tags = append(ts.Tags, spec) - continue - } - break - } - // Tag keys must be sorted to produce a valid series key sequence - sort.Slice(ts.Tags, func(i, j int) bool { - return ts.Tags[i].TagKey < ts.Tags[j].TagKey - }) - - for i := 1; i < len(ts.Tags); i++ { - if ts.Tags[i-1].TagKey == ts.Tags[i].TagKey { - s.err = fmt.Errorf("duplicate tag keys %q", ts.Tags[i].TagKey) - return false - } - } - - s.push(&ts) - - case Fields: - // combine fields - var fs []*FieldValuesSpec - for { - if spec, ok := s.peek().(*FieldValuesSpec); ok { - s.pop() - fs = append(fs, spec) - continue - } - break - } - - sort.Slice(fs, func(i, j int) bool { - return fs[i].Name < fs[j].Name - }) - - for i := 1; i < len(fs); i++ { - if fs[i-1].Name == fs[i].Name { - s.err = fmt.Errorf("duplicate field names %q", fs[i].Name) - return false - } - } - - s.push(fs) - - case *Field: - fs, ok := s.peek().(*FieldValuesSpec) - if !ok { - panic(fmt.Sprintf("unexpected type %T", fs)) - } - - fs.TimeSequenceSpec = n.TimeSequenceSpec() - fs.Name = n.Name - - case *FieldConstantValue: - var fs FieldValuesSpec - switch v := n.Value.(type) { - case float64: - fs.DataType = models.Float - fs.Values = func(spec TimeSequenceSpec) TimeValuesSequence { - return NewTimeFloatValuesSequence( - spec.Count, - NewTimestampSequenceFromSpec(spec), - NewFloatConstantValuesSequence(v), - ) - } - case int64: - fs.DataType = models.Integer - fs.Values = func(spec TimeSequenceSpec) TimeValuesSequence { - return NewTimeIntegerValuesSequence( - spec.Count, - NewTimestampSequenceFromSpec(spec), - NewIntegerConstantValuesSequence(v), - ) - } - case string: - fs.DataType = models.String - fs.Values = func(spec TimeSequenceSpec) TimeValuesSequence { - return NewTimeStringValuesSequence( - spec.Count, - NewTimestampSequenceFromSpec(spec), - NewStringConstantValuesSequence(v), - ) - } - case bool: - fs.DataType = models.Boolean - fs.Values = func(spec TimeSequenceSpec) TimeValuesSequence { - return NewTimeBooleanValuesSequence( - spec.Count, - NewTimestampSequenceFromSpec(spec), - NewBooleanConstantValuesSequence(v), - ) - } - default: - panic(fmt.Sprintf("unexpected type %T", v)) - } - - s.push(&fs) - - case *FieldArraySource: - var fs FieldValuesSpec - switch v := n.Value.(type) { - case []float64: - fs.DataType = models.Float - fs.Values = func(spec TimeSequenceSpec) TimeValuesSequence { - return NewTimeFloatValuesSequence( - spec.Count, - NewTimestampSequenceFromSpec(spec), - NewFloatArrayValuesSequence(v), - ) - } - case []int64: - fs.DataType = models.Integer - fs.Values = func(spec TimeSequenceSpec) TimeValuesSequence { - return NewTimeIntegerValuesSequence( - spec.Count, - NewTimestampSequenceFromSpec(spec), - NewIntegerArrayValuesSequence(v), - ) - } - case []string: - fs.DataType = models.String - fs.Values = func(spec TimeSequenceSpec) TimeValuesSequence { - return NewTimeStringValuesSequence( - spec.Count, - NewTimestampSequenceFromSpec(spec), - NewStringArrayValuesSequence(v), - ) - } - case []bool: - fs.DataType = models.Boolean - fs.Values = func(spec TimeSequenceSpec) TimeValuesSequence { - return NewTimeBooleanValuesSequence( - spec.Count, - NewTimestampSequenceFromSpec(spec), - NewBooleanArrayValuesSequence(v), - ) - } - default: - panic(fmt.Sprintf("unexpected type %T", v)) - } - - s.push(&fs) - - case *FieldFloatRandomSource: - var fs FieldValuesSpec - fs.DataType = models.Float - fs.Values = NewTimeValuesSequenceFn(func(spec TimeSequenceSpec) TimeValuesSequence { - return NewTimeFloatValuesSequence( - spec.Count, - NewTimestampSequenceFromSpec(spec), - NewFloatRandomValuesSequence(n.Min, n.Max, rand.New(rand.NewSource(n.Seed))), - ) - }) - s.push(&fs) - - case *FieldIntegerZipfSource: - var fs FieldValuesSpec - fs.DataType = models.Integer - fs.Values = NewTimeValuesSequenceFn(func(spec TimeSequenceSpec) TimeValuesSequence { - return NewTimeIntegerValuesSequence( - spec.Count, - NewTimestampSequenceFromSpec(spec), - NewIntegerZipfValuesSequence(n), - ) - }) - s.push(&fs) - - case *Tag: - s.push(&TagValuesSpec{ - TagKey: n.Name, - Values: s.pop().(NewCountableSequenceFn), - }) - - case *TagSequenceSource: - s.push(NewCountableSequenceFn(func() CountableSequence { - return NewCounterByteSequence(n.Format, int(n.Start), int(n.Start+n.Count)) - })) - - case *TagFileSource: - p, err := s.resolvePath(n.Path) - if err != nil { - s.err = err - return false - } - - lines, err := s.readLines(p) - if err != nil { - s.err = err - return false - } - - s.push(NewCountableSequenceFn(func() CountableSequence { - return NewStringArraySequence(lines) - })) - - case *TagArraySource: - s.push(NewCountableSequenceFn(func() CountableSequence { - return NewStringArraySequence(n.Values) - })) - - case nil: - - default: - panic(fmt.Sprintf("unexpected type %T", node)) - } - - return true -} - -func (s *schemaToSpec) visitErr(node SchemaNode) { - switch n := node.(type) { - case *Schema: - s.err = fmt.Errorf("error processing schema: %v", s.err) - case *Measurement: - s.err = fmt.Errorf("measurement %q: %v", n.Name, s.err) - case *Tag: - s.err = fmt.Errorf("tag %q: %v", n.Name, s.err) - case *Field: - s.err = fmt.Errorf("field %q: %v", n.Name, s.err) - } -} - -func (s *schemaToSpec) resolvePath(p string) (string, error) { - fullPath := os.ExpandEnv(p) - if !filepath.IsAbs(fullPath) { - fullPath = filepath.Join(s.schemaDir, fullPath) - } - - fi, err := os.Stat(fullPath) - if err != nil { - return "", fmt.Errorf("error resolving path %q: %v", p, err) - } - - if fi.IsDir() { - return "", fmt.Errorf("path %q is not a file: resolved to %s", p, fullPath) - } - - return fullPath, nil -} - -func (s *schemaToSpec) readLines(p string) ([]string, error) { - fp, err := s.resolvePath(p) - if err != nil { - return nil, err - } - - f, err := os.Open(fp) - if err != nil { - return nil, fmt.Errorf("path error: %v", err) - } - defer f.Close() - scan := bufio.NewScanner(f) - scan.Split(bufio.ScanLines) - - n := 0 - var lines []string - - for scan.Scan() { - if len(scan.Bytes()) == 0 { - // skip empty lines - continue - } - - if !utf8.Valid(scan.Bytes()) { - return nil, fmt.Errorf("path %q, invalid UTF-8 on line %d", p, n) - } - lines = append(lines, scan.Text()) - } - - if scan.Err() != nil { - return nil, scan.Err() - } - - return lines, nil -} - -type option func(s *schemaToSpec) - -func schemaDir(p string) option { - return func(s *schemaToSpec) { - s.schemaDir = p - } -} - -func NewSpecFromSchema(root *Schema) (*Spec, error) { - return newSpecFromSchema(root) -} - -func newSpecFromSchema(root *Schema, opts ...option) (*Spec, error) { - var spec Spec - - vis := &schemaToSpec{spec: &spec} - for _, o := range opts { - o(vis) - } - - WalkUp(vis, root) - if vis.err != nil { - return nil, vis.err - } - - return &spec, nil -} diff --git a/pkg/data/gen/specs_test.go b/pkg/data/gen/specs_test.go deleted file mode 100644 index aef0149053a..00000000000 --- a/pkg/data/gen/specs_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package gen - -import ( - "testing" - "time" - - "github.com/BurntSushi/toml" - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/models" -) - -func countableSequenceFnCmp(a, b NewCountableSequenceFn) bool { - // these aren't comparable - return true -} - -func timeValuesSequenceFnCmp(a, b NewTimeValuesSequenceFn) bool { - // these aren't comparable - return true -} - -func TestSpecFromSchema(t *testing.T) { - in := ` -title = "example schema" - -[[measurements]] -name = "m0" -tags = [ - { name = "tag0", source = [ "host1", "host2" ] }, - { name = "tag1", source = [ "process1", "process2" ] }, - { name = "tag2", source = { type = "sequence", format = "value%s", start = 0, count = 100 } } -] -fields = [ - { name = "f0", count = 5000, source = 0.5 }, - { name = "f1", count = 5000, source = 2 }, -] -[[measurements]] -name = "m1" - -tags = [ - { name = "tag0", source = [ "host1", "host2" ] }, -] -fields = [ - { name = "f0", count = 5000, source = 0.5 }, -] -` - var out Schema - if _, err := toml.Decode(in, &out); err != nil { - t.Fatalf("unxpected error: %v", err) - } - - got, err := NewSpecFromSchema(&out) - if err != nil { - t.Error(err) - } - - samples := []sample{0.5} - exp := &Spec{ - SeriesLimit: nil, - Measurements: []MeasurementSpec{ - { - Name: "m0", - SeriesLimit: nil, - TagsSpec: &TagsSpec{ - Tags: []*TagValuesSpec{ - {TagKey: "tag0"}, - {TagKey: "tag1"}, - {TagKey: "tag2"}, - }, - Sample: &samples[0], - }, - FieldValuesSpec: &FieldValuesSpec{ - TimeSequenceSpec: TimeSequenceSpec{ - Count: 5000, - Precision: time.Millisecond, - }, - Name: "f0", - DataType: models.Float, - }, - }, - { - Name: "m0", - SeriesLimit: nil, - TagsSpec: &TagsSpec{ - Tags: []*TagValuesSpec{ - {TagKey: "tag0"}, - {TagKey: "tag1"}, - {TagKey: "tag2"}, - }, - Sample: &samples[0], - }, - FieldValuesSpec: &FieldValuesSpec{ - TimeSequenceSpec: TimeSequenceSpec{ - Count: 5000, - Precision: time.Millisecond, - }, - Name: "f1", - DataType: models.Integer, - }, - }, - { - Name: "m1", - SeriesLimit: nil, - TagsSpec: &TagsSpec{ - Tags: []*TagValuesSpec{ - {TagKey: "tag0"}, - }, - Sample: &samples[0], - }, - FieldValuesSpec: &FieldValuesSpec{ - TimeSequenceSpec: TimeSequenceSpec{ - Count: 5000, - Precision: time.Millisecond, - }, - Name: "f0", - DataType: models.Float, - }, - }, - }, - } - - // TODO(sgc): use a Spec rather than closures for NewCountableSequenceFn and NewTimeValuesSequenceFn - if !cmp.Equal(got, exp, cmp.Comparer(countableSequenceFnCmp), cmp.Comparer(timeValuesSequenceFnCmp)) { - t.Errorf("unexpected spec; -got/+exp\n%s", cmp.Diff(got, exp, cmp.Comparer(countableSequenceFnCmp), cmp.Comparer(timeValuesSequenceFnCmp))) - } -} - -func TestSpecFromSchemaError(t *testing.T) { - tests := []struct { - name string - in string - - decodeErr string - specErr string - }{ - { - in: ` -[[measurements]] -tags = [ { name = "tag0", source = [ "host1", "host2" ] } ] -fields = [ { name = "f0", count = 5000, source = 0.5 } ] -`, - specErr: "error processing schema: missing measurement name", - }, - { - in: ` -[[measurements]] -sample = -0.1 -tags = [ { name = "tag0", source = [ "host1", "host2" ] } ] -fields = [ { name = "f0", count = 5000, source = 0.5 } ] -`, - decodeErr: "sample: must be 0 < sample ≤ 1.0", - }, - { - in: ` -[[measurements]] -name = "m0" -tags = [ { source = [ "host1", "host2" ] } ] -fields = [ { name = "f0", count = 5000, source = 0.5 } ] -`, - decodeErr: "tag: missing or invalid value for name", - }, - { - in: ` -[[measurements]] -name = "m0" -tags = [ { name = "tag0" } ] -fields = [ { name = "f0", count = 5000, source = 0.5 } ] -`, - decodeErr: `missing source for tag "tag0"`, - }, - { - in: ` -[[measurements]] -name = "m0" -tags = [ { name = "tag0", source = [ "host1", "host2" ] } ] -fields = [ { count = 5000, source = 0.5 } ] -`, - decodeErr: `field: missing or invalid value for name`, - }, - { - in: ` -[[measurements]] -name = "m0" -tags = [ { name = "tag0", source = [ "host1", "host2" ] } ] -fields = [ { name = "f0", count = 5000 } ] -`, - decodeErr: `missing source for field "f0"`, - }, - } - - checkErr := func(t *testing.T, err error, exp string) { - t.Helper() - if exp == "" { - if err == nil { - return - } - - t.Errorf("unexpected error, got %v", err) - } - - if err == nil { - t.Errorf("expected error, got nil") - } else if err.Error() != exp { - t.Errorf("unexpected error, -got/+exp\n%s", cmp.Diff(err.Error(), exp)) - } - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var out Schema - _, err := toml.Decode(test.in, &out) - checkErr(t, err, test.decodeErr) - - if test.decodeErr == "" { - _, err = NewSpecFromSchema(&out) - checkErr(t, err, test.specErr) - } - }) - } -} diff --git a/pkg/data/gen/tags_sequence.go b/pkg/data/gen/tags_sequence.go deleted file mode 100644 index 42acea5b408..00000000000 --- a/pkg/data/gen/tags_sequence.go +++ /dev/null @@ -1,175 +0,0 @@ -package gen - -import ( - "fmt" - "math" - "math/rand" - "sort" - - "github.com/influxdata/influxdb/v2/models" -) - -type TagsSequence interface { - Next() bool - Value() models.Tags - Count() int -} - -type tagsValuesSequence struct { - tags models.Tags - vals []CountableSequence - n int - count int - sample float64 - src rand.Source - nextFn func(*tagsValuesSequence) bool -} - -type tagsValuesOption func(s *tagsValuesSequence) - -func TagValuesLimitOption(n int) tagsValuesOption { - return func(s *tagsValuesSequence) { - if n >= s.count { - return - } - - s.src = rand.NewSource(20040409) - s.sample = float64(n) / float64(s.count) - } -} - -func TagValuesSampleOption(n float64) tagsValuesOption { - return func(s *tagsValuesSequence) { - if n <= 0.0 || n > 1.0 { - panic("expect: 0.0 < n ≤ 1.0") - } - - s.src = rand.NewSource(int64(float64(math.MaxInt64>>1) * n)) - s.sample = n - s.nextFn = (*tagsValuesSequence).nextSample - } -} - -func NewTagsValuesSequenceKeysValues(keys []string, vals []CountableSequence, opts ...tagsValuesOption) TagsSequence { - tm := make(map[string]string, len(keys)) - for _, k := range keys { - tm[k] = "" - } - - count := 1 - for i := range vals { - count *= vals[i].Count() - } - - // models.Tags are ordered, so ensure vals are ordered with respect to keys - sort.Sort(keyValues{keys, vals}) - - s := &tagsValuesSequence{ - tags: models.NewTags(tm), - vals: vals, - count: count, - nextFn: (*tagsValuesSequence).next, - } - - for _, o := range opts { - o(s) - } - - return s -} - -func NewTagsValuesSequenceValues(prefix string, vals []CountableSequence) TagsSequence { - keys := make([]string, len(vals)) - // max tag width - tw := int(math.Ceil(math.Log10(float64(len(vals))))) - tf := fmt.Sprintf("%s%%0%dd", prefix, tw) - for i := range vals { - keys[i] = fmt.Sprintf(tf, i) - } - return NewTagsValuesSequenceKeysValues(keys, vals) -} - -func NewTagsValuesSequenceCounts(prefix string, counts []int) TagsSequence { - tv := make([]CountableSequence, len(counts)) - for i := range counts { - tv[i] = NewCounterByteSequenceCount(counts[i]) - } - return NewTagsValuesSequenceValues(prefix, tv) -} - -func (s *tagsValuesSequence) next() bool { - if s.n >= s.count { - return false - } - - for i := range s.vals { - s.tags[i].Value = []byte(s.vals[i].Value()) - } - - s.n++ - i := s.n - for j := len(s.vals) - 1; j >= 0; j-- { - v := s.vals[j] - v.Next() - c := v.Count() - if r := i % c; r != 0 { - break - } - i /= c - } - - return true -} - -func (s *tagsValuesSequence) skip() bool { - return (float64(s.src.Int63()>>10))*(1.0/9007199254740992.0) > s.sample -} - -func (s *tagsValuesSequence) nextSample() bool { - if s.n >= s.count { - return false - } - - for i := range s.vals { - s.tags[i].Value = []byte(s.vals[i].Value()) - } - - for { - s.n++ - i := s.n - for j := len(s.vals) - 1; j >= 0; j-- { - v := s.vals[j] - v.Next() - c := v.Count() - if r := i % c; r != 0 { - break - } - i /= c - } - - if !s.skip() { - break - } - } - - return true -} - -func (s *tagsValuesSequence) Next() bool { - return s.nextFn(s) -} - -func (s *tagsValuesSequence) Value() models.Tags { return s.tags } -func (s *tagsValuesSequence) Count() int { return s.count } - -type keyValues struct { - keys []string - vals []CountableSequence -} - -func (k keyValues) Len() int { return len(k.keys) } -func (k keyValues) Less(i, j int) bool { return k.keys[i] < k.keys[j] } -func (k keyValues) Swap(i, j int) { - k.keys[i], k.keys[j] = k.keys[j], k.keys[i] - k.vals[i], k.vals[j] = k.vals[j], k.vals[i] -} diff --git a/pkg/data/gen/timestamp_sequence.go b/pkg/data/gen/timestamp_sequence.go deleted file mode 100644 index e2b7aee66d1..00000000000 --- a/pkg/data/gen/timestamp_sequence.go +++ /dev/null @@ -1,36 +0,0 @@ -package gen - -type TimestampSequence interface { - Reset() - Write(ts []int64) -} - -type timestampSequence struct { - t int64 - start int64 - delta int64 -} - -func NewTimestampSequenceFromSpec(spec TimeSequenceSpec) TimestampSequence { - return ×tampSequence{ - t: spec.Start.UnixNano(), - start: spec.Start.UnixNano(), - delta: int64(spec.Delta), - } -} - -func (g *timestampSequence) Reset() { - g.t = g.start -} - -func (g *timestampSequence) Write(ts []int64) { - var ( - t = g.t - d = g.delta - ) - for i := 0; i < len(ts); i++ { - ts[i] = t - t += d - } - g.t = t -} diff --git a/pkg/data/gen/toml.go b/pkg/data/gen/toml.go deleted file mode 100644 index 21137734d18..00000000000 --- a/pkg/data/gen/toml.go +++ /dev/null @@ -1,460 +0,0 @@ -package gen - -import ( - "errors" - "fmt" - "strings" - "time" - - "github.com/spf13/cast" -) - -type SeriesLimit int64 - -func (s *SeriesLimit) UnmarshalTOML(data interface{}) error { - v, ok := data.(int64) - if !ok { - return errors.New("series-limit: invalid value") - } - - if v < 0 { - return errors.New("series-limit: must be ≥ 0") - } - - *s = SeriesLimit(v) - return nil -} - -type sample float64 - -func (s *sample) UnmarshalTOML(data interface{}) error { - v, ok := data.(float64) - if !ok { - return errors.New("sample: must be a float") - } - - if v <= 0 || v > 1.0 { - return errors.New("sample: must be 0 < sample ≤ 1.0") - } - - *s = sample(v) - - return nil -} - -type duration struct { - time.Duration -} - -func (d *duration) UnmarshalTOML(data interface{}) error { - text, ok := data.(string) - if !ok { - return fmt.Errorf("invalid duration, expect a Go duration as a string: %T", data) - } - - return d.UnmarshalText([]byte(text)) -} - -func (d *duration) UnmarshalText(text []byte) error { - s := string(text) - - var err error - d.Duration, err = time.ParseDuration(s) - if err != nil { - return err - } - - if d.Duration == 0 { - d.Duration, err = time.ParseDuration("1" + s) - if err != nil { - return err - } - } - - if d.Duration <= 0 { - return fmt.Errorf("invalid duration, must be > 0: %s", d.Duration) - } - return nil -} - -type precision byte - -const ( - precisionMillisecond precision = iota // default - precisionNanosecond - precisionMicrosecond - precisionSecond - precisionMinute - precisionHour -) - -var precisionToDuration = [...]time.Duration{ - time.Millisecond, - time.Nanosecond, - time.Microsecond, - time.Second, - time.Minute, - time.Minute * 60, - time.Nanosecond, - time.Nanosecond, -} - -func (p *precision) ToDuration() time.Duration { - return precisionToDuration[*p&0x7] -} - -func (p *precision) UnmarshalTOML(data interface{}) error { - d, ok := data.(string) - if !ok { - return fmt.Errorf("invalid precision, expect one of (ns, us, ms, s, m, h): %T", data) - } - - d = strings.ToLower(d) - - switch d { - case "ns", "nanosecond": - *p = precisionNanosecond - case "us", "microsecond", "µs": - *p = precisionMicrosecond - case "ms", "millisecond": - *p = precisionMillisecond - case "s", "second": - *p = precisionSecond - case "m", "minute": - *p = precisionMinute - case "h", "hour": - *p = precisionHour - default: - return fmt.Errorf("invalid precision, expect one of (ns, ms, s, m, h): %s", d) - } - return nil -} - -func (t *Tag) UnmarshalTOML(data interface{}) error { - d, ok := data.(map[string]interface{}) - if !ok { - return nil - } - - if n, ok := d["name"].(string); !ok || n == "" { - return errors.New("tag: missing or invalid value for name") - } else { - t.Name = n - } - - // infer source - - if _, ok := d["source"]; !ok { - return fmt.Errorf("missing source for tag %q", t.Name) - } - - switch v := d["source"].(type) { - case int64, string, float64, bool: - if src, err := decodeTagConstantSource(v); err != nil { - return err - } else { - t.Source = src - } - case []interface{}: - if src, err := decodeTagArraySource(v); err != nil { - return err - } else { - t.Source = src - } - case map[string]interface{}: - if src, err := decodeTagSource(v); err != nil { - return err - } else { - t.Source = src - } - default: - return fmt.Errorf("invalid source for tag %q: %T", t.Name, v) - } - - return nil -} - -func decodeTagConstantSource(data interface{}) (TagSource, error) { - switch data.(type) { - case int64, string, float64, bool: - if src, err := cast.ToStringE(data); err != nil { - return nil, err - } else { - return &TagArraySource{Values: []string{src}}, nil - } - } - - return nil, errors.New("invalid constant tag source") -} - -func decodeTagArraySource(data []interface{}) (TagSource, error) { - if len(data) == 0 { - return nil, errors.New("empty array source") - } - - if src, err := cast.ToStringSliceE(data); err != nil { - return nil, err - } else { - return &TagArraySource{Values: src}, nil - } -} - -func decodeTagSource(data map[string]interface{}) (TagSource, error) { - typ, ok := data["type"].(string) - if !ok { - return nil, errors.New("missing type field") - } - switch typ { - case "sequence": - return decodeTagSequenceSource(data) - case "file": - return decodeTagFileSource(data) - default: - return nil, fmt.Errorf("invalid type field %q", typ) - } -} - -func decodeTagFileSource(data map[string]interface{}) (TagSource, error) { - var s TagFileSource - - if v, ok := data["path"].(string); ok { - s.Path = v - } else { - return nil, errors.New("file: missing path") - } - - return &s, nil -} - -func decodeTagSequenceSource(data map[string]interface{}) (TagSource, error) { - var s TagSequenceSource - - if v, ok := data["format"].(string); ok { - // TODO(sgc): validate format string - s.Format = v - } else { - s.Format = "value%s" - } - - if v, ok := data["start"]; ok { - if v, err := cast.ToInt64E(v); err != nil { - return nil, fmt.Errorf("tag.sequence: invalid start, %v", err) - } else if v < 0 { - return nil, fmt.Errorf("tag.sequence: start must be ≥ 0") - } else { - s.Start = v - } - } - - if v, ok := data["count"]; ok { - if v, err := cast.ToInt64E(v); err != nil { - return nil, fmt.Errorf("tag.sequence: invalid count, %v", err) - } else if v < 0 { - return nil, fmt.Errorf("tag.sequence: count must be > 0") - } else { - s.Count = v - } - } else { - return nil, fmt.Errorf("tag.sequence: missing count") - } - - return &s, nil -} - -func (t *Field) UnmarshalTOML(data interface{}) error { - d, ok := data.(map[string]interface{}) - if !ok { - return nil - } - - if n, ok := d["name"].(string); !ok || n == "" { - return errors.New("field: missing or invalid value for name") - } else { - t.Name = n - } - - if n, ok := d["count"]; !ok { - return errors.New("field: missing value for count") - } else if count, err := cast.ToInt64E(n); err != nil { - return fmt.Errorf("field: invalid count, %v", err) - } else if count <= 0 { - return errors.New("field: count must be > 0") - } else { - t.Count = count - } - - if n, ok := d["time-precision"]; ok { - var tp precision - if err := tp.UnmarshalTOML(n); err != nil { - return err - } - t.TimePrecision = &tp - } - - if n, ok := d["time-interval"]; ok { - var ti duration - if err := ti.UnmarshalTOML(n); err != nil { - return err - } - t.TimeInterval = &ti - t.TimePrecision = nil - } - - if t.TimePrecision == nil && t.TimeInterval == nil { - var tp precision - t.TimePrecision = &tp - } - - // infer source - if _, ok := d["source"]; !ok { - return fmt.Errorf("missing source for field %q", t.Name) - } - - switch v := d["source"].(type) { - case int64, string, float64, bool: - t.Source = &FieldConstantValue{v} - case []interface{}: - if src, err := decodeFieldArraySource(v); err != nil { - return err - } else { - t.Source = src - } - case map[string]interface{}: - if src, err := decodeFieldSource(v); err != nil { - return err - } else { - t.Source = src - } - default: - // unknown - return fmt.Errorf("invalid source for tag %q: %T", t.Name, v) - } - - return nil -} - -func decodeFieldArraySource(data []interface{}) (FieldSource, error) { - if len(data) == 0 { - return nil, errors.New("empty array") - } - - var ( - src interface{} - err error - ) - - // use first value to determine slice type - switch data[0].(type) { - case int64: - src, err = toInt64SliceE(data) - case float64: - src, err = toFloat64SliceE(data) - case string: - src, err = cast.ToStringSliceE(data) - case bool: - src, err = cast.ToBoolSliceE(data) - default: - err = fmt.Errorf("unsupported field source data type: %T", data[0]) - } - - if err != nil { - return nil, err - } - - return &FieldArraySource{Value: src}, nil -} - -func decodeFieldSource(data map[string]interface{}) (FieldSource, error) { - typ, ok := data["type"].(string) - if !ok { - return nil, errors.New("missing type field") - } - switch typ { - case "rand": - return decodeFloatRandomSource(data) - case "zipf": - return decodeIntegerZipfSource(data) - default: - return nil, fmt.Errorf("invalid type field %q", typ) - } -} - -func decodeFloatRandomSource(data map[string]interface{}) (FieldSource, error) { - var s FieldFloatRandomSource - - if v, ok := data["seed"]; ok { - if v, err := cast.ToInt64E(v); err != nil { - return nil, fmt.Errorf("rand: invalid seed, %v", err) - } else { - s.Seed = v - } - } - - if v, ok := data["min"]; ok { - if v, err := cast.ToFloat64E(v); err != nil { - return nil, fmt.Errorf("rand: invalid min, %v", err) - } else { - s.Min = v - } - } - - if v, ok := data["max"]; ok { - if v, err := cast.ToFloat64E(v); err != nil { - return nil, fmt.Errorf("rand: invalid max, %v", err) - } else { - s.Max = v - } - } else { - s.Max = 1.0 - } - - if !(s.Min <= s.Max) { - return nil, errors.New("rand: min ≤ max") - } - - return &s, nil -} - -func decodeIntegerZipfSource(data map[string]interface{}) (FieldSource, error) { - var s FieldIntegerZipfSource - - if v, ok := data["seed"]; ok { - if v, err := cast.ToInt64E(v); err != nil { - return nil, fmt.Errorf("zipf: invalid seed, %v", err) - } else { - s.Seed = v - } - } - - if v, ok := data["s"]; ok { - if v, err := cast.ToFloat64E(v); err != nil || v <= 1.0 { - return nil, fmt.Errorf("zipf: invalid value for s (s > 1), %v", err) - } else { - s.S = v - } - } else { - return nil, fmt.Errorf("zipf: missing value for s") - } - - if v, ok := data["v"]; ok { - if v, err := cast.ToFloat64E(v); err != nil || v < 1.0 { - return nil, fmt.Errorf("zipf: invalid value for v (v ≥ 1), %v", err) - } else { - s.V = v - } - } else { - return nil, fmt.Errorf("zipf: missing value for v") - } - - if v, ok := data["imax"]; ok { - if v, err := cast.ToUint64E(v); err != nil { - return nil, fmt.Errorf("zipf: invalid value for imax, %v", err) - } else { - s.IMAX = v - } - } else { - return nil, fmt.Errorf("zipf: missing value for imax") - } - - return &s, nil -} diff --git a/pkg/data/gen/toml_test.go b/pkg/data/gen/toml_test.go deleted file mode 100644 index 7055ae7cd0b..00000000000 --- a/pkg/data/gen/toml_test.go +++ /dev/null @@ -1,169 +0,0 @@ -package gen - -import ( - "fmt" - "strings" - "testing" - - "github.com/BurntSushi/toml" - "github.com/google/go-cmp/cmp" -) - -func visit(root *Schema) string { - w := &strings.Builder{} - - walkFn := func(node SchemaNode) bool { - switch n := node.(type) { - case *Schema: - - case Measurements: - fmt.Fprintln(w, "Measurements: ") - - case *Measurement: - fmt.Fprintln(w) - fmt.Fprintf(w, " Name: %s\n", n.Name) - - case Tags: - fmt.Fprintln(w, " Tags:") - - case Fields: - fmt.Fprintln(w, " Fields:") - - case *Field: - if n.TimePrecision != nil { - fmt.Fprintf(w, " %s: %s, count=%d, time-precision=%s\n", n.Name, n.Source, n.Count, *n.TimePrecision) - } else { - fmt.Fprintf(w, " %s: %s, count=%d, time-interval=%s\n", n.Name, n.Source, n.Count, n.TimeInterval) - } - - case *Tag: - fmt.Fprintf(w, " %s: %s\n", n.Name, n.Source) - - } - - return true - } - - WalkDown(VisitorFn(walkFn), root) - - return w.String() -} - -func TestSchema(t *testing.T) { - in := ` -title = "example schema" -series-limit = 10 - -[[measurements]] - name = "constant" - series-limit = 5 - - [[measurements.tags]] - name = "tag0" - source = [ "host1", "host2" ] - - [[measurements.tags]] - name = "tag1" - source = { type = "file", path = "foo.txt" } - - [[measurements.fields]] - name = "floatC" - count = 5000 - source = 0.5 - time-precision = "us" - - [[measurements.fields]] - name = "integerC" - count = 5000 - source = 3 - time-precision = "hour" - - [[measurements.fields]] - name = "stringC" - count = 5000 - source = "hello" - time-interval = "60s" - - [[measurements.fields]] - name = "stringA" - count = 5000 - source = ["hello", "world"] - - [[measurements.fields]] - name = "boolf" - count = 5000 - source = false - -[[measurements]] -name = "random" - - [[measurements.tags]] - name = "tagSeq" - source = { type = "sequence", format = "value%s", start = 0, count = 100 } - - [[measurements.fields]] - name = "floatR" - count = 5000 - source = { type = "rand", min = 0.5, max = 50.1, seed = 10 } - time-precision = "us" - -[[measurements]] -name = "array" - - [[measurements.tags]] - name = "tagSeq" - source = { type = "sequence", format = "value%s", start = 0, count = 100 } - - [[measurements.tags]] - name = "tagFile" - source = { type = "file", path = "foo.txt" } - - [[measurements.fields]] - name = "stringA" - count = 1000 - source = ["this", "that"] - time-precision = "us" - - [[measurements.fields]] - name = "integerA" - count = 1000 - source = [5, 6, 7] - time-interval = "90s" -` - var out Schema - _, err := toml.Decode(in, &out) - if err != nil { - t.Fatalf("unxpected error: %v", err) - } - - exp := `Measurements: - - Name: constant - Tags: - tag0: array, source=[]string{"host1", "host2"} - tag1: file, path=foo.txt - Fields: - floatC: constant, source=0.5, count=5000, time-precision=Microsecond - integerC: constant, source=3, count=5000, time-precision=Hour - stringC: constant, source="hello", count=5000, time-interval=1m0s - stringA: array, source=[]string{"hello", "world"}, count=5000, time-precision=Millisecond - boolf: constant, source=false, count=5000, time-precision=Millisecond - - Name: random - Tags: - tagSeq: sequence, prefix="value%s", range=[0,100) - Fields: - floatR: rand, seed=10, min=50.100000, max=50.100000, count=5000, time-precision=Microsecond - - Name: array - Tags: - tagSeq: sequence, prefix="value%s", range=[0,100) - tagFile: file, path=foo.txt - Fields: - stringA: array, source=[]string{"this", "that"}, count=1000, time-precision=Microsecond - integerA: array, source=[]int64{5, 6, 7}, count=1000, time-interval=1m30s -` - if got := visit(&out); !cmp.Equal(got, exp) { - t.Errorf("unexpected value, -got/+exp\n%s", cmp.Diff(got, exp)) - } -} diff --git a/pkg/data/gen/types.tmpldata b/pkg/data/gen/types.tmpldata deleted file mode 100644 index 82651de2530..00000000000 --- a/pkg/data/gen/types.tmpldata +++ /dev/null @@ -1,30 +0,0 @@ -[ - { - "Name":"Float", - "name":"float", - "Type":"float64", - "Rand":"Float64" - }, - { - "Name":"Integer", - "name":"integer", - "Type":"int64", - "Rand":"Int64" - }, - { - "Name":"Unsigned", - "name":"unsigned", - "Type":"uint64", - "Rand":"Uint64" - }, - { - "Name":"String", - "name":"string", - "Type":"string" - }, - { - "Name":"Boolean", - "name":"boolean", - "Type":"bool" - } -] diff --git a/pkg/data/gen/util.go b/pkg/data/gen/util.go deleted file mode 100644 index bb16508f5fd..00000000000 --- a/pkg/data/gen/util.go +++ /dev/null @@ -1,87 +0,0 @@ -package gen - -import ( - "fmt" - "reflect" - "sort" - - "github.com/spf13/cast" -) - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func sortDedupStrings(in []string) []string { - sort.Strings(in) - j := 0 - for i := 1; i < len(in); i++ { - if in[j] == in[i] { - continue - } - j++ - in[j] = in[i] - } - return in[:j+1] -} - -// ToInt64SliceE casts an interface to a []int64 type. -func toInt64SliceE(i interface{}) ([]int64, error) { - if i == nil { - return []int64{}, fmt.Errorf("unable to cast %#v of type %T to []int64", i, i) - } - - switch v := i.(type) { - case []int64: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]int64, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := cast.ToInt64E(s.Index(j).Interface()) - if err != nil { - return []int64{}, fmt.Errorf("unable to cast %#v of type %T to []int64", i, i) - } - a[j] = val - } - return a, nil - default: - return []int64{}, fmt.Errorf("unable to cast %#v of type %T to []int64", i, i) - } -} - -// ToFloat64SliceE casts an interface to a []float64 type. -func toFloat64SliceE(i interface{}) ([]float64, error) { - if i == nil { - return []float64{}, fmt.Errorf("unable to cast %#v of type %T to []float64", i, i) - } - - switch v := i.(type) { - case []float64: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]float64, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := cast.ToFloat64E(s.Index(j).Interface()) - if err != nil { - return []float64{}, fmt.Errorf("unable to cast %#v of type %T to []float64", i, i) - } - a[j] = val - } - return a, nil - default: - return []float64{}, fmt.Errorf("unable to cast %#v of type %T to []float64", i, i) - } -} diff --git a/pkg/data/gen/values.gen.go b/pkg/data/gen/values.gen.go deleted file mode 100644 index 4cc16b76474..00000000000 --- a/pkg/data/gen/values.gen.go +++ /dev/null @@ -1,252 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: values.gen.go.tmpl - -package gen - -type floatConstantValuesSequence struct { - v float64 -} - -func NewFloatConstantValuesSequence(v float64) FloatValuesSequence { - return &floatConstantValuesSequence{ - v: v, - } -} - -func (g *floatConstantValuesSequence) Reset() { -} - -func (g *floatConstantValuesSequence) Write(vs []float64) { - for i := 0; i < len(vs); i++ { - vs[i] = g.v - } -} - -type integerConstantValuesSequence struct { - v int64 -} - -func NewIntegerConstantValuesSequence(v int64) IntegerValuesSequence { - return &integerConstantValuesSequence{ - v: v, - } -} - -func (g *integerConstantValuesSequence) Reset() { -} - -func (g *integerConstantValuesSequence) Write(vs []int64) { - for i := 0; i < len(vs); i++ { - vs[i] = g.v - } -} - -type unsignedConstantValuesSequence struct { - v uint64 -} - -func NewUnsignedConstantValuesSequence(v uint64) UnsignedValuesSequence { - return &unsignedConstantValuesSequence{ - v: v, - } -} - -func (g *unsignedConstantValuesSequence) Reset() { -} - -func (g *unsignedConstantValuesSequence) Write(vs []uint64) { - for i := 0; i < len(vs); i++ { - vs[i] = g.v - } -} - -type stringConstantValuesSequence struct { - v string -} - -func NewStringConstantValuesSequence(v string) StringValuesSequence { - return &stringConstantValuesSequence{ - v: v, - } -} - -func (g *stringConstantValuesSequence) Reset() { -} - -func (g *stringConstantValuesSequence) Write(vs []string) { - for i := 0; i < len(vs); i++ { - vs[i] = g.v - } -} - -type booleanConstantValuesSequence struct { - v bool -} - -func NewBooleanConstantValuesSequence(v bool) BooleanValuesSequence { - return &booleanConstantValuesSequence{ - v: v, - } -} - -func (g *booleanConstantValuesSequence) Reset() { -} - -func (g *booleanConstantValuesSequence) Write(vs []bool) { - for i := 0; i < len(vs); i++ { - vs[i] = g.v - } -} - -type floatArrayValuesSequence struct { - v []float64 - vi int -} - -func NewFloatArrayValuesSequence(v []float64) FloatValuesSequence { - return &floatArrayValuesSequence{ - v: v, - } -} - -func (g *floatArrayValuesSequence) Reset() { - g.vi = 0 -} - -func (g *floatArrayValuesSequence) Write(vs []float64) { - var ( - v = g.v - vi = g.vi - ) - for i := 0; i < len(vs); i++ { - if vi >= len(v) { - vi = 0 - } - vs[i] = v[vi] - vi += 1 - } - g.vi = vi -} - -type integerArrayValuesSequence struct { - v []int64 - vi int -} - -func NewIntegerArrayValuesSequence(v []int64) IntegerValuesSequence { - return &integerArrayValuesSequence{ - v: v, - } -} - -func (g *integerArrayValuesSequence) Reset() { - g.vi = 0 -} - -func (g *integerArrayValuesSequence) Write(vs []int64) { - var ( - v = g.v - vi = g.vi - ) - for i := 0; i < len(vs); i++ { - if vi >= len(v) { - vi = 0 - } - vs[i] = v[vi] - vi += 1 - } - g.vi = vi -} - -type unsignedArrayValuesSequence struct { - v []uint64 - vi int -} - -func NewUnsignedArrayValuesSequence(v []uint64) UnsignedValuesSequence { - return &unsignedArrayValuesSequence{ - v: v, - } -} - -func (g *unsignedArrayValuesSequence) Reset() { - g.vi = 0 -} - -func (g *unsignedArrayValuesSequence) Write(vs []uint64) { - var ( - v = g.v - vi = g.vi - ) - for i := 0; i < len(vs); i++ { - if vi >= len(v) { - vi = 0 - } - vs[i] = v[vi] - vi += 1 - } - g.vi = vi -} - -type stringArrayValuesSequence struct { - v []string - vi int -} - -func NewStringArrayValuesSequence(v []string) StringValuesSequence { - return &stringArrayValuesSequence{ - v: v, - } -} - -func (g *stringArrayValuesSequence) Reset() { - g.vi = 0 -} - -func (g *stringArrayValuesSequence) Write(vs []string) { - var ( - v = g.v - vi = g.vi - ) - for i := 0; i < len(vs); i++ { - if vi >= len(v) { - vi = 0 - } - vs[i] = v[vi] - vi += 1 - } - g.vi = vi -} - -type booleanArrayValuesSequence struct { - v []bool - vi int -} - -func NewBooleanArrayValuesSequence(v []bool) BooleanValuesSequence { - return &booleanArrayValuesSequence{ - v: v, - } -} - -func (g *booleanArrayValuesSequence) Reset() { - g.vi = 0 -} - -func (g *booleanArrayValuesSequence) Write(vs []bool) { - var ( - v = g.v - vi = g.vi - ) - for i := 0; i < len(vs); i++ { - if vi >= len(v) { - vi = 0 - } - vs[i] = v[vi] - vi += 1 - } - g.vi = vi -} diff --git a/pkg/data/gen/values.gen.go.tmpl b/pkg/data/gen/values.gen.go.tmpl deleted file mode 100644 index a76e5a09fee..00000000000 --- a/pkg/data/gen/values.gen.go.tmpl +++ /dev/null @@ -1,54 +0,0 @@ -package gen - -{{range .}} -type {{.name}}ConstantValuesSequence struct { - v {{.Type}} -} - -func New{{.Name}}ConstantValuesSequence(v {{.Type}}) {{.Name}}ValuesSequence { - return &{{.name}}ConstantValuesSequence{ - v: v, - } -} - -func (g *{{.name}}ConstantValuesSequence) Reset() { -} - -func (g *{{.name}}ConstantValuesSequence) Write(vs []{{.Type}}) { - for i := 0; i < len(vs); i++ { - vs[i] = g.v - } -} -{{end}} - -{{range .}} -type {{.name}}ArrayValuesSequence struct { - v []{{.Type}} - vi int -} - -func New{{.Name}}ArrayValuesSequence(v []{{.Type}}) {{.Name}}ValuesSequence { - return &{{.name}}ArrayValuesSequence{ - v: v, - } -} - -func (g *{{.name}}ArrayValuesSequence) Reset() { - g.vi = 0 -} - -func (g *{{.name}}ArrayValuesSequence) Write(vs []{{.Type}}) { - var ( - v = g.v - vi = g.vi - ) - for i := 0; i < len(vs); i++ { - if vi >= len(v) { - vi = 0 - } - vs[i] = v[vi] - vi += 1 - } - g.vi = vi -} -{{end}} diff --git a/pkg/data/gen/values.go b/pkg/data/gen/values.go deleted file mode 100644 index 9dfc1cb8091..00000000000 --- a/pkg/data/gen/values.go +++ /dev/null @@ -1,46 +0,0 @@ -package gen - -import ( - "math/rand" -) - -type floatRandomValuesSequence struct { - r *rand.Rand - a float64 - b float64 -} - -func NewFloatRandomValuesSequence(min, max float64, r *rand.Rand) FloatValuesSequence { - return &floatRandomValuesSequence{r: r, a: max - min, b: min} -} - -func (g *floatRandomValuesSequence) Reset() {} - -func (g *floatRandomValuesSequence) Write(vs []float64) { - var ( - a = g.a - b = g.b - ) - for i := 0; i < len(vs); i++ { - vs[i] = a*g.r.Float64() + b // ax + b - } -} - -type integerRandomValuesSequence struct { - r *rand.Zipf -} - -// NewIntegerZipfValuesSequence produces int64 values using a Zipfian distribution -// described by s. -func NewIntegerZipfValuesSequence(s *FieldIntegerZipfSource) IntegerValuesSequence { - r := rand.New(rand.NewSource(s.Seed)) - return &integerRandomValuesSequence{r: rand.NewZipf(r, s.S, s.V, s.IMAX)} -} - -func (g *integerRandomValuesSequence) Reset() {} - -func (g *integerRandomValuesSequence) Write(vs []int64) { - for i := 0; i < len(vs); i++ { - vs[i] = int64(g.r.Uint64()) - } -} diff --git a/pkg/data/gen/values_sequence.gen.go b/pkg/data/gen/values_sequence.gen.go deleted file mode 100644 index 6aacb05a2d0..00000000000 --- a/pkg/data/gen/values_sequence.gen.go +++ /dev/null @@ -1,272 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: values_sequence.gen.go.tmpl - -package gen - -import ( - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb" -) - -type FloatValuesSequence interface { - Reset() - Write(v []float64) -} - -type timeFloatValuesSequence struct { - vals floatArray - ts TimestampSequence - vs FloatValuesSequence - count int - n int -} - -func NewTimeFloatValuesSequence(count int, ts TimestampSequence, vs FloatValuesSequence) TimeValuesSequence { - return &timeFloatValuesSequence{ - vals: *newFloatArrayLen(tsdb.DefaultMaxPointsPerBlock), - ts: ts, - vs: vs, - count: count, - n: count, - } -} - -func (s *timeFloatValuesSequence) Reset() { - s.ts.Reset() - s.vs.Reset() - s.n = s.count -} - -func (s *timeFloatValuesSequence) Next() bool { - if s.n > 0 { - c := min(s.n, tsdb.DefaultMaxPointsPerBlock) - s.n -= c - s.vals.Timestamps = s.vals.Timestamps[:c] - s.vals.Values = s.vals.Values[:c] - - s.ts.Write(s.vals.Timestamps) - s.vs.Write(s.vals.Values) - return true - } - - return false -} - -func (s *timeFloatValuesSequence) Values() Values { - return &s.vals -} - -func (s *timeFloatValuesSequence) ValueType() models.FieldType { - return models.Float -} - -type IntegerValuesSequence interface { - Reset() - Write(v []int64) -} - -type timeIntegerValuesSequence struct { - vals integerArray - ts TimestampSequence - vs IntegerValuesSequence - count int - n int -} - -func NewTimeIntegerValuesSequence(count int, ts TimestampSequence, vs IntegerValuesSequence) TimeValuesSequence { - return &timeIntegerValuesSequence{ - vals: *newIntegerArrayLen(tsdb.DefaultMaxPointsPerBlock), - ts: ts, - vs: vs, - count: count, - n: count, - } -} - -func (s *timeIntegerValuesSequence) Reset() { - s.ts.Reset() - s.vs.Reset() - s.n = s.count -} - -func (s *timeIntegerValuesSequence) Next() bool { - if s.n > 0 { - c := min(s.n, tsdb.DefaultMaxPointsPerBlock) - s.n -= c - s.vals.Timestamps = s.vals.Timestamps[:c] - s.vals.Values = s.vals.Values[:c] - - s.ts.Write(s.vals.Timestamps) - s.vs.Write(s.vals.Values) - return true - } - - return false -} - -func (s *timeIntegerValuesSequence) Values() Values { - return &s.vals -} - -func (s *timeIntegerValuesSequence) ValueType() models.FieldType { - return models.Integer -} - -type UnsignedValuesSequence interface { - Reset() - Write(v []uint64) -} - -type timeUnsignedValuesSequence struct { - vals unsignedArray - ts TimestampSequence - vs UnsignedValuesSequence - count int - n int -} - -func NewTimeUnsignedValuesSequence(count int, ts TimestampSequence, vs UnsignedValuesSequence) TimeValuesSequence { - return &timeUnsignedValuesSequence{ - vals: *newUnsignedArrayLen(tsdb.DefaultMaxPointsPerBlock), - ts: ts, - vs: vs, - count: count, - n: count, - } -} - -func (s *timeUnsignedValuesSequence) Reset() { - s.ts.Reset() - s.vs.Reset() - s.n = s.count -} - -func (s *timeUnsignedValuesSequence) Next() bool { - if s.n > 0 { - c := min(s.n, tsdb.DefaultMaxPointsPerBlock) - s.n -= c - s.vals.Timestamps = s.vals.Timestamps[:c] - s.vals.Values = s.vals.Values[:c] - - s.ts.Write(s.vals.Timestamps) - s.vs.Write(s.vals.Values) - return true - } - - return false -} - -func (s *timeUnsignedValuesSequence) Values() Values { - return &s.vals -} - -func (s *timeUnsignedValuesSequence) ValueType() models.FieldType { - return models.Unsigned -} - -type StringValuesSequence interface { - Reset() - Write(v []string) -} - -type timeStringValuesSequence struct { - vals stringArray - ts TimestampSequence - vs StringValuesSequence - count int - n int -} - -func NewTimeStringValuesSequence(count int, ts TimestampSequence, vs StringValuesSequence) TimeValuesSequence { - return &timeStringValuesSequence{ - vals: *newStringArrayLen(tsdb.DefaultMaxPointsPerBlock), - ts: ts, - vs: vs, - count: count, - n: count, - } -} - -func (s *timeStringValuesSequence) Reset() { - s.ts.Reset() - s.vs.Reset() - s.n = s.count -} - -func (s *timeStringValuesSequence) Next() bool { - if s.n > 0 { - c := min(s.n, tsdb.DefaultMaxPointsPerBlock) - s.n -= c - s.vals.Timestamps = s.vals.Timestamps[:c] - s.vals.Values = s.vals.Values[:c] - - s.ts.Write(s.vals.Timestamps) - s.vs.Write(s.vals.Values) - return true - } - - return false -} - -func (s *timeStringValuesSequence) Values() Values { - return &s.vals -} - -func (s *timeStringValuesSequence) ValueType() models.FieldType { - return models.String -} - -type BooleanValuesSequence interface { - Reset() - Write(v []bool) -} - -type timeBooleanValuesSequence struct { - vals booleanArray - ts TimestampSequence - vs BooleanValuesSequence - count int - n int -} - -func NewTimeBooleanValuesSequence(count int, ts TimestampSequence, vs BooleanValuesSequence) TimeValuesSequence { - return &timeBooleanValuesSequence{ - vals: *newBooleanArrayLen(tsdb.DefaultMaxPointsPerBlock), - ts: ts, - vs: vs, - count: count, - n: count, - } -} - -func (s *timeBooleanValuesSequence) Reset() { - s.ts.Reset() - s.vs.Reset() - s.n = s.count -} - -func (s *timeBooleanValuesSequence) Next() bool { - if s.n > 0 { - c := min(s.n, tsdb.DefaultMaxPointsPerBlock) - s.n -= c - s.vals.Timestamps = s.vals.Timestamps[:c] - s.vals.Values = s.vals.Values[:c] - - s.ts.Write(s.vals.Timestamps) - s.vs.Write(s.vals.Values) - return true - } - - return false -} - -func (s *timeBooleanValuesSequence) Values() Values { - return &s.vals -} - -func (s *timeBooleanValuesSequence) ValueType() models.FieldType { - return models.Boolean -} diff --git a/pkg/data/gen/values_sequence.gen.go.tmpl b/pkg/data/gen/values_sequence.gen.go.tmpl deleted file mode 100644 index 104b502bc2a..00000000000 --- a/pkg/data/gen/values_sequence.gen.go.tmpl +++ /dev/null @@ -1,60 +0,0 @@ -package gen - -import ( - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb" -) - -{{range .}} -type {{.Name}}ValuesSequence interface { - Reset() - Write(v []{{.Type}}) -} - -type time{{.Name}}ValuesSequence struct { - vals {{.name}}Array - ts TimestampSequence - vs {{.Name}}ValuesSequence - count int - n int -} - -func NewTime{{.Name}}ValuesSequence(count int, ts TimestampSequence, vs {{.Name}}ValuesSequence) TimeValuesSequence { - return &time{{.Name}}ValuesSequence{ - vals: *new{{.Name}}ArrayLen(tsdb.DefaultMaxPointsPerBlock), - ts: ts, - vs: vs, - count: count, - n: count, - } -} - -func (s *time{{.Name}}ValuesSequence) Reset() { - s.ts.Reset() - s.vs.Reset() - s.n = s.count -} - -func (s *time{{.Name}}ValuesSequence) Next() bool { - if s.n > 0 { - c := min(s.n, tsdb.DefaultMaxPointsPerBlock) - s.n -= c - s.vals.Timestamps = s.vals.Timestamps[:c] - s.vals.Values = s.vals.Values[:c] - - s.ts.Write(s.vals.Timestamps) - s.vs.Write(s.vals.Values) - return true - } - - return false -} - -func (s *time{{.Name}}ValuesSequence) Values() Values { - return &s.vals -} - -func (s *time{{.Name}}ValuesSequence) ValueType() models.FieldType { - return models.{{.Name}} -} -{{end}} diff --git a/pkg/deep/equal.go b/pkg/deep/equal.go deleted file mode 100644 index 1291554db70..00000000000 --- a/pkg/deep/equal.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// License. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package deep provides a deep equality check for use in tests. -package deep // import "github.com/influxdata/influxdb/v2/pkg/deep" - -import ( - "fmt" - "math" - "reflect" -) - -// Equal is a copy of reflect.DeepEqual except that it treats NaN == NaN as true. -func Equal(a1, a2 interface{}) bool { - if a1 == nil || a2 == nil { - return a1 == a2 - } - v1 := reflect.ValueOf(a1) - v2 := reflect.ValueOf(a2) - if v1.Type() != v2.Type() { - return false - } - return deepValueEqual(v1, v2, make(map[visit]bool), 0) -} - -// Tests for deep equality using reflected types. The map argument tracks -// comparisons that have already been seen, which allows short circuiting on -// recursive types. -func deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool { - if !v1.IsValid() || !v2.IsValid() { - return v1.IsValid() == v2.IsValid() - } - if v1.Type() != v2.Type() { - return false - } - - // if depth > 10 { panic("deepValueEqual") } // for debugging - hard := func(k reflect.Kind) bool { - switch k { - case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct: - return true - } - return false - } - - if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) { - addr1 := v1.UnsafeAddr() - addr2 := v2.UnsafeAddr() - if addr1 > addr2 { - // Canonicalize order to reduce number of entries in visited. - addr1, addr2 = addr2, addr1 - } - - // Short circuit if references are identical ... - if addr1 == addr2 { - return true - } - - // ... or already seen - typ := v1.Type() - v := visit{addr1, addr2, typ} - if visited[v] { - return true - } - - // Remember for later. - visited[v] = true - } - - switch v1.Kind() { - case reflect.Array: - for i := 0; i < v1.Len(); i++ { - if !deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) { - return false - } - } - return true - case reflect.Slice: - if v1.IsNil() != v2.IsNil() { - return false - } - if v1.Len() != v2.Len() { - return false - } - if v1.Pointer() == v2.Pointer() { - return true - } - for i := 0; i < v1.Len(); i++ { - if !deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) { - return false - } - } - return true - case reflect.Interface: - if v1.IsNil() || v2.IsNil() { - return v1.IsNil() == v2.IsNil() - } - return deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1) - case reflect.Pointer: - return deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1) - case reflect.Struct: - for i, n := 0, v1.NumField(); i < n; i++ { - if !deepValueEqual(v1.Field(i), v2.Field(i), visited, depth+1) { - return false - } - } - return true - case reflect.Map: - if v1.IsNil() != v2.IsNil() { - return false - } - if v1.Len() != v2.Len() { - return false - } - if v1.Pointer() == v2.Pointer() { - return true - } - for _, k := range v1.MapKeys() { - if !deepValueEqual(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) { - return false - } - } - return true - case reflect.Func: - if v1.IsNil() && v2.IsNil() { - return true - } - // Can't do better than this: - return false - case reflect.String: - return v1.String() == v2.String() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - case reflect.Float32, reflect.Float64: - // Special handling for floats so that NaN == NaN is true. - f1, f2 := v1.Float(), v2.Float() - if math.IsNaN(f1) && math.IsNaN(f2) { - return true - } - return f1 == f2 - case reflect.Bool: - return v1.Bool() == v2.Bool() - default: - panic(fmt.Sprintf("cannot compare type: %s", v1.Kind().String())) - } -} - -// During deepValueEqual, must keep track of checks that are -// in progress. The comparison algorithm assumes that all -// checks in progress are true when it reencounters them. -// Visited comparisons are stored in a map indexed by visit. -type visit struct { - a1 uintptr - a2 uintptr - typ reflect.Type -} diff --git a/pkg/durablequeue/queue.go b/pkg/durablequeue/queue.go deleted file mode 100644 index bb1f9ce6062..00000000000 --- a/pkg/durablequeue/queue.go +++ /dev/null @@ -1,1193 +0,0 @@ -package durablequeue - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "sync" - "sync/atomic" - "time" - - "go.uber.org/zap" -) - -// Possible errors returned by a queue. -var ( - ErrNotOpen = fmt.Errorf("queue not open") - ErrQueueFull = fmt.Errorf("queue is full") - ErrQueueBlocked = fmt.Errorf("queue is blocked") - ErrSegmentFull = fmt.Errorf("segment is full") -) - -const ( - DefaultSegmentSize = 10 * 1024 * 1024 - footerSize = 8 -) - -// MaxWritesPending is the number of writes that can be pending at any given time. -const MaxWritesPending = 1024 - -// Queue is a bounded, disk-backed, append-only type that combines Queue and -// log semantics. byte slices can be appended and read back in-order. -// The Queue maintains a pointer to the current Head -// byte slice and can re-read from the Head until it has been advanced. -// -// Internally, the Queue writes byte slices to multiple segment files so -// that disk space can be reclaimed. When a segment file is larger than -// the max segment size, a new file is created. Segments are removed -// after their Head pointer has advanced past the last entry. The first -// segment is the head, and the last segment is the tail. Reads are from -// the head segment and writes tail segment. -// -// queues can have a max size configured such that when the size of all -// segments on disk exceeds the size, write will fail. -// -// ┌─────┐ -// │Head │ -// ├─────┘ -// │ -// ▼ -// ┌─────────────────┐ ┌─────────────────┐┌─────────────────┐ -// │Segment 1 - 10MB │ │Segment 2 - 10MB ││Segment 3 - 10MB │ -// └─────────────────┘ └─────────────────┘└─────────────────┘ -// ▲ -// │ -// │ -// ┌─────┐ -// │Tail │ -// └─────┘ -type Queue struct { - mu sync.RWMutex - - // Directory to create segments - dir string - - // The head and tail segments. Reads are from the beginning of head, - // writes are appended to the tail. - head, tail *segment - - // The maximum size in bytes of a segment file before a new one should be created - maxSegmentSize int64 - - // The maximum size allowed in bytes of all segments before writes will return - // an error - maxSize int64 - queueTotalSize *SharedCount - - // The segments that exist on disk - segments segments - // verifyBlockFn is used to verify a block within a segment contains valid data. - verifyBlockFn func([]byte) error - - // Channel used for throttling append requests. - appendCh chan struct{} - - // scratch is a temporary in-memory space for staging writes - scratch bytes.Buffer - - logger *zap.Logger -} - -// SharedCount manages an integer value, which can be read/written concurrently. -type SharedCount struct { - value int64 -} - -// Add adds delta to the counter value. -func (sc *SharedCount) Add(delta int64) { - atomic.AddInt64(&sc.value, delta) -} - -// Value returns the current value value. -func (sc *SharedCount) Value() int64 { - return atomic.LoadInt64(&sc.value) -} - -type QueuePos struct { - Head string - Tail string -} - -type segments []*segment - -func (a segments) Len() int { return len(a) } -func (a segments) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a segments) Less(i, j int) bool { return a[i].id < a[j].id } - -// NewQueue create a Queue that will store segments in dir and that will consume no more than maxSize on disk. -func NewQueue(dir string, maxSize int64, maxSegmentSize int64, queueTotalSize *SharedCount, depth int, verifyBlockFn func([]byte) error) (*Queue, error) { - if maxSize < 2*maxSegmentSize { - return nil, fmt.Errorf("max queue size %d too small: must be at least twice the max segment size %d", maxSize, maxSegmentSize) - } - - return &Queue{ - dir: dir, - maxSegmentSize: maxSegmentSize, - maxSize: maxSize, - queueTotalSize: queueTotalSize, - segments: segments{}, - appendCh: make(chan struct{}, depth), - logger: zap.NewNop(), - verifyBlockFn: verifyBlockFn, - }, nil -} - -// WithLogger sets the internal logger to the logger passed in. -func (l *Queue) WithLogger(log *zap.Logger) { - l.logger = log -} - -// SetMaxSize updates the max queue size to the passed-in value. -// -// Max queue size must be at least twice the current max segment size, otherwise an error will be returned. -// -// If the new value is smaller than the amount of data currently in the queue, -// writes will be rejected until the queue drains to below the new maximum. -func (l *Queue) SetMaxSize(maxSize int64) error { - l.mu.Lock() - defer l.mu.Unlock() - - if maxSize < 2*l.maxSegmentSize { - return fmt.Errorf("queue size %d too small: must be at least %d bytes", maxSize, 2*l.maxSegmentSize) - } - - l.maxSize = maxSize - return nil -} - -// Open opens the queue for reading and writing. -func (l *Queue) Open() error { - l.mu.Lock() - defer l.mu.Unlock() - - segments, err := l.loadSegments() - if err != nil { - return err - } - l.segments = segments - - if len(l.segments) == 0 { - if err := l.addSegment(); err != nil { - return err - } - } - - l.head = l.segments[0] - l.tail = l.segments[len(l.segments)-1] - - // If the Head has been fully advanced and the segment size is modified, - // existing segments an get stuck and never allow clients to advance further. - // This advances the segment if the current Head is already at the end. - _, err = l.head.current() - if err == io.EOF { - return l.trimHead(false) - } - - l.queueTotalSize.Add(l.DiskUsage()) - - return nil -} - -// Close stops the queue for reading and writing. -func (l *Queue) Close() error { - l.mu.Lock() - defer l.mu.Unlock() - - for _, s := range l.segments { - if err := s.close(); err != nil { - return err - } - } - l.head = nil - l.tail = nil - l.segments = nil - return nil -} - -// Remove removes all underlying file-based resources for the queue. -// It is an error to call this on an open queue. -func (l *Queue) Remove() error { - l.mu.Lock() - defer l.mu.Unlock() - - if l.head != nil || l.tail != nil || l.segments != nil { - return fmt.Errorf("queue is open") - } - - return os.RemoveAll(l.dir) -} - -// RemoveSegments removes all segments for the queue. -// It is an error to call this on an open queue. -func (l *Queue) RemoveSegments() error { - l.mu.Lock() - defer l.mu.Unlock() - - if l.head != nil || l.tail != nil || l.segments != nil { - return fmt.Errorf("queue is open") - } - - files, err := os.ReadDir(l.dir) - if err != nil { - return err - } - - for _, segment := range files { - // Segments should be files. Skip anything that is a dir. - if segment.IsDir() { - continue - } - - // Segments file names are all numeric - _, err := strconv.ParseUint(segment.Name(), 10, 64) - if err != nil { - continue - } - - path := filepath.Join(l.dir, segment.Name()) - if err := os.Remove(path); err != nil { - return err - } - } - return nil -} - -// SetMaxSegmentSize updates the max segment size for new and existing (tail) segments. -// -// The new segment size must be less than half the current max queue size, otherwise an error will be returned. -func (l *Queue) SetMaxSegmentSize(size int64) error { - l.mu.Lock() - defer l.mu.Unlock() - - if 2*size > l.maxSize { - return fmt.Errorf("segment size %d is too large: must be at most half of max queue size %d", size, l.maxSize) - } - - l.maxSegmentSize = size - - for _, s := range l.segments { - s.SetMaxSegmentSize(size) - } - - if l.tail.diskUsage() >= l.maxSegmentSize { - if err := l.addSegment(); err != nil { - return err - } - } - return nil -} - -func (l *Queue) PurgeOlderThan(when time.Time) error { - l.mu.Lock() - defer l.mu.Unlock() - - if len(l.segments) == 0 { - return nil - } - - cutoff := when.Truncate(time.Second) - for { - mod, err := l.head.lastModified() - if err != nil { - return err - } - - if mod.After(cutoff) || mod.Equal(cutoff) { - return nil - } - - // If this is the last segment, first append a new one allowing - // trimming to proceed. - if len(l.segments) == 1 { - if err := l.addSegment(); err != nil { - return err - } - } - - if err := l.trimHead(false); err != nil { - return err - } - } -} - -// LastModified returns the last time the queue was modified. -func (l *Queue) LastModified() (time.Time, error) { - l.mu.RLock() - defer l.mu.RUnlock() - - if l.tail != nil { - return l.tail.lastModified() - } - return time.Time{}.UTC(), nil -} - -func (l *Queue) Position() (*QueuePos, error) { - l.mu.RLock() - defer l.mu.RUnlock() - - qp := &QueuePos{} - if l.head != nil { - qp.Head = fmt.Sprintf("%s:%d", l.head.path, l.head.pos) - } - if l.tail != nil { - qp.Tail = fmt.Sprintf("%s:%d", l.tail.path, l.tail.filePos()) - } - return qp, nil -} - -// Empty returns whether the queue's underlying segments are empty. -func (l *Queue) Empty() bool { - l.mu.RLock() - empty := l.tail.empty() - l.mu.RUnlock() - return empty -} - -// TotalBytes returns the number of bytes of data remaining in the queue. -func (l *Queue) TotalBytes() int64 { - l.mu.RLock() - defer l.mu.RUnlock() - var n int64 - for _, s := range l.segments { - n += s.totalBytes() - } - return n -} - -// Dir returns the directory associated with the queue. -func (l *Queue) Dir() string { - return l.dir -} - -// DiskUsage returns the total size on disk used by the Queue. -func (l *Queue) DiskUsage() int64 { - var size int64 - for _, s := range l.segments { - size += s.diskUsage() - } - return size -} - -// addSegment creates a new empty segment file. -func (l *Queue) addSegment() error { - nextID, err := l.nextSegmentID() - if err != nil { - return err - } - - segment, err := newSegment(filepath.Join(l.dir, strconv.FormatUint(nextID, 10)), l.maxSegmentSize, l.verifyBlockFn) - if err != nil { - return err - } - - l.tail = segment - l.segments = append(l.segments, segment) - return nil -} - -// loadSegments loads all segments on disk. -func (l *Queue) loadSegments() (segments, error) { - var ss segments - - files, err := os.ReadDir(l.dir) - if err != nil { - return ss, err - } - - for _, segment := range files { - // Segments should be files. Skip anything that is a dir. - if segment.IsDir() { - continue - } - - // Segments file names are all numeric - _, err := strconv.ParseUint(segment.Name(), 10, 64) - if err != nil { - continue - } - - path := filepath.Join(l.dir, segment.Name()) - l.logger.Info("Loading", zap.String("path", path)) - segment, err := newSegment(path, l.maxSegmentSize, l.verifyBlockFn) - if err != nil { - return ss, err - } - - // Segment repair can leave files that have no data to process. If this happens, - // the queue can get stuck. We need to remove any empty segments to prevent this. - if segment.empty() { - if err := segment.close(); err != nil { - return ss, err - } - if err := os.Remove(segment.path); err != nil { - return ss, err - } - continue - } - - ss = append(ss, segment) - } - sort.Sort(ss) - - return ss, nil -} - -// nextSegmentID returns the next segment ID that is free. -func (l *Queue) nextSegmentID() (uint64, error) { - segments, err := os.ReadDir(l.dir) - if err != nil { - return 0, err - } - - var maxID uint64 - for _, segment := range segments { - // Segments should be files. Skip anything that is not a dir. - if segment.IsDir() { - continue - } - - // Segments file names are all numeric - segmentID, err := strconv.ParseUint(segment.Name(), 10, 64) - if err != nil { - continue - } - - if segmentID > maxID { - maxID = segmentID - } - } - - return maxID + 1, nil -} - -// TotalSegments determines how many segments the current Queue is -// utilising. Empty segments at the end of the Queue are not counted. -func (l *Queue) TotalSegments() int { - l.mu.RLock() - defer l.mu.RUnlock() - n := len(l.segments) - - // Check last segment's size and if empty, ignore it. - if n > 0 && l.segments[n-1].empty() { - n-- - } - return n -} - -// Append appends a byte slice to the end of the queue. -func (l *Queue) Append(b []byte) error { - // Only allow append if there aren't too many concurrent requests. - select { - case l.appendCh <- struct{}{}: - defer func() { <-l.appendCh }() - default: - return ErrQueueBlocked - } - - l.mu.Lock() - defer l.mu.Unlock() - - if l.tail == nil { - return ErrNotOpen - } - - if l.queueTotalSize.Value()+int64(len(b)) > l.maxSize { - return ErrQueueFull - } - - // Append the entry to the tail, if the segment is full, - // try to create new segment and retry the append - bytesWritten, err := l.tail.append(b, &l.scratch) - if err == ErrSegmentFull { - if err := l.addSegment(); err != nil { - return err - } - bytesWritten, err = l.tail.append(b, &l.scratch) - } - - if err == nil { - l.queueTotalSize.Add(bytesWritten) - } - - return err -} - -// Current returns the current byte slice at the Head of the queue. -func (l *Queue) Current() ([]byte, error) { - l.mu.RLock() - defer l.mu.RUnlock() - if l.head == nil { - return nil, ErrNotOpen - } - - return l.head.current() -} - -// Peek returns the next n byte slices at the Head of the queue. -func (l *Queue) PeekN(n int) ([][]byte, error) { - l.mu.RLock() - defer l.mu.RUnlock() - if l.head == nil { - return nil, ErrNotOpen - } - - return l.head.peek(n) -} - -func (l *Queue) NewScanner() (Scanner, error) { - l.mu.RLock() - defer l.mu.RUnlock() - if l.head == nil { - return nil, ErrNotOpen - } - - ss, err := l.head.newScanner() - if err != nil { - return nil, err - } - - return &queueScanner{q: l, ss: ss}, nil -} - -// Advance moves the Head point to the next byte slice in the queue. -func (l *Queue) Advance() error { - l.mu.Lock() - defer l.mu.Unlock() - if l.head == nil { - return ErrNotOpen - } - - err := l.head.advance() - if err == io.EOF { - if err := l.trimHead(false); err != nil { - return err - } - } - - return nil -} - -func (l *Queue) trimHead(force bool) error { - // If there is only one segment, but it's full, add a new segment so - // so the Head segment can be trimmed. - if len(l.segments) == 1 && l.head.full() || force { - if err := l.addSegment(); err != nil { - return err - } - } - - var bytesDeleted int64 - - if len(l.segments) > 1 { - l.segments = l.segments[1:] - - bytesDeleted = l.head.diskUsage() - - err := l.head.close() - if err != nil { - l.logger.Info("Failed to close segment file.", zap.Error(err), zap.String("path", l.head.path)) - } - - err = os.Remove(l.head.path) - if err != nil { - l.logger.Info("Failed to remove segment file.", zap.Error(err), zap.String("path", l.head.path)) - } - l.head = l.segments[0] - } - - l.queueTotalSize.Add(-bytesDeleted) - - return nil -} - -// Segment is a Queue using a single file. The structure of a segment is a series -// lengths + block with a single footer point to the position in the segment of the -// current Head block. -// -// ┌──────────────────────────┐ ┌──────────────────────────┐ ┌────────────┐ -// │ Block 1 │ │ Block 2 │ │ Footer │ -// └──────────────────────────┘ └──────────────────────────┘ └────────────┘ -// ┌────────────┐┌────────────┐ ┌────────────┐┌────────────┐ ┌────────────┐ -// │Block 1 Len ││Block 1 Body│ │Block 2 Len ││Block 2 Body│ │Head Offset │ -// │ 8 bytes ││ N bytes │ │ 8 bytes ││ N bytes │ │ 8 bytes │ -// └────────────┘└────────────┘ └────────────┘└────────────┘ └────────────┘ -// -// The footer holds the pointer to the Head entry at the end of the segment to allow writes -// to seek to the end and write sequentially (vs having to seek back to the beginning of -// the segment to update the Head pointer). Reads must seek to the end then back into the -// segment offset stored in the footer. -// -// Segments store arbitrary byte slices and leave the serialization to the caller. Segments -// are created with a max size and will block writes when the segment is full. -type segment struct { - mu sync.RWMutex - size int64 // Size of the entire segment file, including previously read blocks and the footer. - maxSize int64 // Maximum size of the segment file. - pos int64 // Position (offset) of current block. - file *os.File // Underlying file representing the segment. - - // verifyBlockFn is used to verify a block within a segment contains valid data. - verifyBlockFn func([]byte) error - - path string // Path of underlying file as passed to newSegment. - id uint64 // Segment ID as encoded in the file name of the segment. -} - -func newSegment(path string, maxSize int64, verifyBlockFn func([]byte) error) (*segment, error) { - f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0600) - if err != nil { - return nil, err - } - - id, err := strconv.ParseUint(filepath.Base(f.Name()), 10, 64) - if err != nil { - return nil, err - } - - stats, err := os.Stat(path) - if err != nil { - return nil, err - } - - // If the segment file is larger than the default segment size then we - // should consider a size under the file size valid. - if maxSize < stats.Size() { - maxSize = stats.Size() - } - - s := &segment{ - id: id, - file: f, - path: path, - size: stats.Size(), - maxSize: maxSize, - verifyBlockFn: verifyBlockFn, - } - - s.mu.Lock() - defer s.mu.Unlock() - if err := s.open(); err != nil { - return nil, err - } - - return s, nil -} - -func (l *segment) open() error { - // If it's a new segment then write the location of the current record in this segment - if l.size == 0 { - l.pos = 0 - - if err := l.writeUint64(uint64(l.pos)); err != nil { - return err - } - - if err := l.file.Sync(); err != nil { - return err - } - - l.size = footerSize - - return nil - } - - // Existing segment so read the current position and the size of the current block - if err := l.seekEnd(-footerSize); err != nil { - return err - } - - pos, err := l.readUint64() - if err != nil { - return err - } - - // Check if the segment is corrupted. A segment is corrupted if the position - // value doesn't point to a valid location in the segment. - if pos > uint64(l.size)-footerSize { - if pos, err = l.repair(); err != nil { - return err - } - } - - // Move to the part of the segment where the next block to read is. - // If we had to repair the segment, this will be the beginning of the - // segment. - l.pos = int64(pos) - if err := l.seekToCurrent(); err != nil { - return err - } - - // If we're at the end of the segment, we're done. - if l.pos >= l.size-footerSize { - return nil - } - - // Read the current block size. - currentSize, err := l.readUint64() - if err != nil { - return err - } - - // Is the size reported larger than what could possibly left? If so, it's corrupted. - if int64(currentSize) > l.size-footerSize-l.pos || int64(currentSize) < 0 { - if _, err = l.repair(); err != nil { - return err - } - return l.open() - } - - // Extract the block data. - block := make([]byte, int64(currentSize)) - if err := l.readBytes(block); err != nil { - if _, err = l.repair(); err != nil { - return err - } - return l.open() - } - - // Seek back to the beginning of the block data. - if err := l.seek(l.pos + 8); err != nil { - return err - } - - // Verify the block data. - if err := l.verifyBlockFn(block); err != nil { - // Verification of the block failed... This means we need to - // truncate the segment. - if err = l.file.Truncate(l.pos); err != nil { - return err - } - - if err := l.seek(l.pos); err != nil { - return err - } - - // Start from the beginning of the segment again. - // TODO(edd): This could be improved to point at the last block in - // the segment... - if err = l.writeUint64(0); err != nil { - return err - } - - if err = l.file.Sync(); err != nil { - return err - } - l.size = l.pos + footerSize - - // re-open the segment. - return l.open() - } - return nil -} - -// full returns true if the segment can no longer accept writes. -func (l *segment) full() bool { - l.mu.RLock() - b := l.size >= l.maxSize - l.mu.RUnlock() - return b -} - -// repair fixes a corrupted segment. -// -// A segment is either corrupted within a block, or the eight byte position -// value in the footer is itself corrupted (more unlikely). -// -// A corrupted segment is corrected by walking the segment until the corrupted -// block is located, which is then truncated. Regardless of which way the -// segment is corrupted, a new position pointing to the beginning of the -// segment, is written into the footer. -// -// repair returns the new position value that the segment should continue to be -// processed from. -// -// Note: if a block has been corrupted internally, e.g., due to a bit flip, -// repair will not be able to detect this. -func (l *segment) repair() (pos uint64, err error) { - // Seek to beginning of segment. - if err = l.seek(0); err != nil { - return pos, err - } - - var ( - recordSize uint64 - offset int64 - truncate bool - ) - - // Seek through each block in the segment until we have either read up to - // the footer, or we reach the end of the segment prematurely. - for { - offset = l.filePos() - - if offset == l.size-footerSize { - // Segment looks good as we've successfully reached the end. Segment - // position in footer must be bad. This is a very unlikely case, - // since it means only the last eight bytes of an otherwise - // acceptable segment were corrupted. - break - } - - // Read the record size. - if recordSize, err = l.readUint64(); err != nil { - truncate = true - break - } - - // Skip the rest of the record. If we go beyond the end of the segment, - // or we hit an error, then we will truncate. - if _, err = l.file.Seek(int64(recordSize), io.SeekCurrent); err != nil || l.filePos() > l.size-footerSize { - truncate = true - break - } - } - - if truncate { - // We reached the end of the segment before we were supposed to, which - // means the last block is short. Truncate the corrupted last block - // onwards. - if err = l.file.Truncate(offset); err != nil { - return pos, err - } - } - - // Set the position as the beginning of the segment, so that the entire - // segment will be replayed. - if err = l.seek(offset); err != nil { - return pos, err - } - - if err = l.writeUint64(pos); err != nil { - return pos, err - } - - if err = l.file.Sync(); err != nil { - return pos, err - } - - l.size = offset + 8 - return pos, err // Current implementation always returns 0 position. -} - -// append adds byte slice to the end of segment. -func (l *segment) append(b []byte, scratch *bytes.Buffer) (int64, error) { - l.mu.Lock() - defer l.mu.Unlock() - - if l.file == nil { - return 0, ErrNotOpen - } - - if l.size > l.maxSize { - return 0, ErrSegmentFull - } - - if err := l.seekEnd(-footerSize); err != nil { - return 0, err - } - - // TODO(SGC): error condition: (len(b) + l.size) > l.maxSize == true; scanner.Next will fail reading last block and get stuck - - // If the size of this block is over the max size of the file, - // update the max file size so we don't get an error indicating - // the size is invalid when reading it back. - l64 := int64(len(b)) - if l64 > l.maxSize { - l.maxSize = l64 - } - - // Construct the segment entry in memory first so it can be - // written to file atomically. - scratch.Reset() - - var buf [8]byte - binary.BigEndian.PutUint64(buf[:], uint64(len(b))) - if _, err := scratch.Write(buf[:]); err != nil { - return 0, err - } - - if _, err := scratch.Write(b); err != nil { - return 0, err - } - - binary.BigEndian.PutUint64(buf[:], uint64(l.pos)) - if _, err := scratch.Write(buf[:]); err != nil { - return 0, err - } - - // Write the segment entry to disk. - if err := l.writeBytes(scratch.Bytes()); err != nil { - return 0, err - } - - if err := l.file.Sync(); err != nil { - return 0, err - } - - bytesWritten := int64(len(b)) + 8 // uint64 for length - l.size += bytesWritten - - return bytesWritten, nil -} - -// empty returns whether there are any remaining blocks to be read from the segment. -func (l *segment) empty() bool { - l.mu.RLock() - defer l.mu.RUnlock() - return int64(l.pos) == l.size-footerSize -} - -// current returns the byte slice that the current segment points to. -func (l *segment) current() ([]byte, error) { - l.mu.Lock() - defer l.mu.Unlock() - - if int64(l.pos) == l.size-footerSize { - return nil, io.EOF - } - - if err := l.seekToCurrent(); err != nil { - return nil, err - } - - // read the record size - sz, err := l.readUint64() - if err != nil { - return nil, err - } - - if sz > uint64(l.maxSize) { - return nil, fmt.Errorf("record size out of range: max %d: got %d", l.maxSize, sz) - } - - b := make([]byte, sz) - if err := l.readBytes(b); err != nil { - return nil, err - } - - return b, nil -} - -func (l *segment) peek(n int) ([][]byte, error) { - l.mu.Lock() - defer l.mu.Unlock() - - if int64(l.pos) == l.size-footerSize { - return nil, io.EOF - } - - if err := l.seekToCurrent(); err != nil { - return nil, err - } - - var blocks [][]byte - pos := l.pos - for i := 0; i < n; i++ { - - if int64(pos) == l.size-footerSize { - return blocks, nil - } - - // read the record size - sz, err := l.readUint64() - if err == io.EOF { - return blocks, nil - } else if err != nil { - return nil, err - } - pos += 8 - - if sz == 0 { - continue - } - - if sz > uint64(l.maxSize) { - return nil, fmt.Errorf("record size out of range: max %d: got %d", l.maxSize, sz) - } - - pos += int64(sz) - - b := make([]byte, sz) - if err := l.readBytes(b); err != nil { - return nil, err - } - blocks = append(blocks, b) - } - return blocks, nil -} - -// advance advances the current value pointer. -// -// Usually a scanner should be used instead of calling advance -func (l *segment) advance() error { - if err := l.seekToCurrent(); err != nil { - return err - } - sz, err := l.readUint64() - if err != nil { - return err - } - currentSize := int64(sz) - return l.advanceTo(l.pos + currentSize + 8) -} - -// advanceTo advances the segment to the position specified by pos -func (l *segment) advanceTo(pos int64) error { - l.mu.Lock() - defer l.mu.Unlock() - - if l.file == nil { - return ErrNotOpen - } - - if pos < l.pos { - return fmt.Errorf("attempt to unread queue from %d to %d", l.pos, pos) - } - - l.pos = pos - - // If we're attempting to move beyond the end of the file, can't advance - if int64(pos) > l.size-footerSize { - return io.EOF - } - - if err := l.seekEnd(-footerSize); err != nil { - return err - } - - if err := l.writeUint64(uint64(pos)); err != nil { - return err - } - - if err := l.file.Sync(); err != nil { - return err - } - - if err := l.seekToCurrent(); err != nil { - return err - } - - _, err := l.readUint64() - if err != nil { - return err - } - - if int64(l.pos) == l.size-footerSize { - return io.EOF - } - - return nil -} - -// totalBytes returns the number of bytes remaining in the segment file, excluding the footer. -func (l *segment) totalBytes() (n int64) { - l.mu.RLock() - n = l.size - int64(l.pos) - footerSize - l.mu.RUnlock() - return -} - -func (l *segment) close() error { - l.mu.Lock() - defer l.mu.Unlock() - err := l.file.Close() - l.file = nil - return err -} - -func (l *segment) lastModified() (time.Time, error) { - l.mu.RLock() - defer l.mu.RUnlock() - - if l.file == nil { - return time.Time{}, ErrNotOpen - } - - stats, err := os.Stat(l.file.Name()) - if err != nil { - return time.Time{}, err - } - return stats.ModTime().UTC(), nil -} - -func (l *segment) diskUsage() int64 { - l.mu.RLock() - defer l.mu.RUnlock() - return l.size -} - -func (l *segment) SetMaxSegmentSize(size int64) { - l.mu.Lock() - defer l.mu.Unlock() - l.maxSize = size -} - -func (l *segment) seekToCurrent() error { - return l.seek(int64(l.pos)) -} - -func (l *segment) seek(pos int64) error { - n, err := l.file.Seek(pos, io.SeekStart) - if err != nil { - return err - } - - if n != pos { - return fmt.Errorf("bad seek. exp %v, got %v", pos, n) - } - - return nil -} - -func (l *segment) seekEnd(pos int64) error { - _, err := l.file.Seek(pos, io.SeekEnd) - return err -} - -func (l *segment) filePos() int64 { - n, _ := l.file.Seek(0, io.SeekCurrent) - return n -} - -func (l *segment) readUint64() (uint64, error) { - var b [8]byte - if err := l.readBytes(b[:]); err != nil { - return 0, err - } - return binary.BigEndian.Uint64(b[:]), nil -} - -func (l *segment) writeUint64(sz uint64) error { - var buf [8]byte - binary.BigEndian.PutUint64(buf[:], sz) - return l.writeBytes(buf[:]) -} - -func (l *segment) writeBytes(b []byte) error { - n, err := l.file.Write(b) - if err != nil { - return err - } - - if n != len(b) { - return fmt.Errorf("short write. got %d, exp %d", n, len(b)) - } - return nil -} - -func (l *segment) readBytes(b []byte) error { - n, err := l.file.Read(b) - if err != nil { - return err - } - - if n != len(b) { - return fmt.Errorf("bad read. exp %v, got %v", len(b), n) - } - return nil -} diff --git a/pkg/durablequeue/queue_test.go b/pkg/durablequeue/queue_test.go deleted file mode 100644 index 054bc5c065d..00000000000 --- a/pkg/durablequeue/queue_test.go +++ /dev/null @@ -1,710 +0,0 @@ -package durablequeue - -import ( - "encoding/binary" - "fmt" - "io" - "math" - "os" - "path/filepath" - "strings" - "sync" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func BenchmarkQueueAppend(b *testing.B) { - q, dir := newTestQueue(b, withMaxSize(1024*1024*1024)) - defer os.RemoveAll(dir) - - for i := 0; i < b.N; i++ { - if err := q.Append([]byte(fmt.Sprintf("%d", i))); err != nil { - println(q.DiskUsage()) - b.Fatalf("Queue.Append failed: %v", err) - } - } -} - -func TestQueueAppendOne(t *testing.T) { - q, dir := newTestQueue(t) - defer os.RemoveAll(dir) - - if err := q.Append([]byte("test")); err != nil { - t.Fatalf("Queue.Append failed: %v", err) - } - - exp := filepath.Join(dir, "1") - stats, err := os.Stat(exp) - if os.IsNotExist(err) { - t.Fatalf("Queue.Append file not exists. exp %v to exist", exp) - } - - // 8 byte header ptr + 8 byte record len + record len - if exp := int64(8 + 8 + 4); stats.Size() != exp { - t.Fatalf("Queue.Append file size mismatch. got %v, exp %v", stats.Size(), exp) - } - - cur, err := q.Current() - if err != nil { - t.Fatalf("Queue.Current failed: %v", err) - } - - if exp := "test"; string(cur) != exp { - t.Errorf("Queue.Current mismatch: got %v, exp %v", string(cur), exp) - } -} - -func TestQueueAppendMultiple(t *testing.T) { - q, dir := newTestQueue(t) - defer os.RemoveAll(dir) - - if err := q.Append([]byte("one")); err != nil { - t.Fatalf("Queue.Append failed: %v", err) - } - - if err := q.Append([]byte("two")); err != nil { - t.Fatalf("Queue.Append failed: %v", err) - } - - for _, exp := range []string{"one", "two"} { - cur, err := q.Current() - if err != nil { - t.Fatalf("Queue.Current failed: %v", err) - } - - if string(cur) != exp { - t.Errorf("Queue.Current mismatch: got %v, exp %v", string(cur), exp) - } - - if err := q.Advance(); err != nil { - t.Fatalf("Queue.Advance failed: %v", err) - } - } -} - -func TestQueueScanRace(t *testing.T) { - const numWrites = 100 - const writeSize = 270000 - q, dir := newTestQueue(t, withMaxSize(1024*1024*1024)) - defer os.RemoveAll(dir) - reads, writes := 0, 0 - wg := sync.WaitGroup{} - wg.Add(2) - done := make(chan struct{}) - go func() { - defer wg.Done() - for w := 0; w < numWrites; w++ { - writes++ - buf := make([]byte, writeSize) - buf[0] = byte(w) - q.Append(buf) - } - close(done) - }() - go func() { - defer wg.Done() - for { - scan, err := q.NewScanner() - if err == io.EOF { - continue - } - require.NoError(t, err) - for scan.Next() { - // ensure bytes come out in the right order - b := scan.Bytes() - assert.Equal(t, writeSize, len(b)) - assert.Equal(t, byte(reads), b[0]) - reads++ - } - _, err = scan.Advance() - require.NoError(t, err) - select { - case <-done: - if q.Empty() { - return - } - default: - } - } - }() - wg.Wait() - assert.Equal(t, numWrites, writes) - assert.Equal(t, numWrites, reads) -} - -func TestQueueAdvancePastEnd(t *testing.T) { - q, dir := newTestQueue(t) - defer os.RemoveAll(dir) - - // append one entry, should go to the first segment - if err := q.Append([]byte("one")); err != nil { - t.Fatalf("Queue.Append failed: %v", err) - } - - // set the segment size low to force a new segment to be created - q.SetMaxSegmentSize(12) - - // Should go into a new segment - if err := q.Append([]byte("two")); err != nil { - t.Fatalf("Queue.Append failed: %v", err) - } - - // should read from first segment - cur, err := q.Current() - if err != nil { - t.Fatalf("Queue.Current failed: %v", err) - } - - if exp := "one"; string(cur) != exp { - t.Errorf("Queue.Current mismatch: got %v, exp %v", string(cur), exp) - } - - if err := q.Advance(); err != nil { - t.Fatalf("Queue.Advance failed: %v", err) - } - - // ensure the first segment file is removed since we've advanced past the end - _, err = os.Stat(filepath.Join(dir, "1")) - if !os.IsNotExist(err) { - t.Fatalf("Queue.Advance should have removed the segment") - } - - // should read from second segment - cur, err = q.Current() - if err != nil { - t.Fatalf("Queue.Current failed: %v", err) - } - - if exp := "two"; string(cur) != exp { - t.Errorf("Queue.Current mismatch: got %v, exp %v", string(cur), exp) - } - - _, err = os.Stat(filepath.Join(dir, "2")) - if os.IsNotExist(err) { - t.Fatalf("Queue.Advance should have removed the segment") - } - - if err := q.Advance(); err != nil { - t.Fatalf("Queue.Advance failed: %v", err) - } - - if _, err = q.Current(); err != io.EOF { - t.Fatalf("Queue.Current should have returned error") - } - - // Should go into a new segment because the existing segment - // is full - if err := q.Append([]byte("two")); err != nil { - t.Fatalf("Queue.Append failed: %v", err) - } - - // should read from the new segment - cur, err = q.Current() - if err != nil { - t.Fatalf("Queue.Current failed: %v", err) - } - - if exp := "two"; string(cur) != exp { - t.Errorf("Queue.Current mismatch: got %v, exp %v", string(cur), exp) - } -} - -func TestQueueFull(t *testing.T) { - q, dir := newTestQueue(t, withMaxSize(64), withMaxSegmentSize(16)) - defer os.RemoveAll(dir) - - require.Equal(t, ErrQueueFull, q.Append([]byte(strings.Repeat("a", 65)))) -} - -func TestQueueChangeMaxSize(t *testing.T) { - q, dir := newTestQueue(t, withMaxSize(64), withMaxSegmentSize(12)) - defer os.RemoveAll(dir) - - // Write a full segment into the queue. - for i := 0; i < 3; i++ { - require.NoError(t, q.Append([]byte("helloworld"))) - } - - // Shrink the queue's max past the current size. - require.NoError(t, q.SetMaxSize(48)) - - // Writes blocked. - require.Equal(t, ErrQueueFull, q.Append([]byte("abcdefg"))) - - // Read off bytes. - cur, err := q.Current() - require.NoError(t, err) - require.Equal(t, "helloworld", string(cur)) - require.NoError(t, q.Advance()) - - // Now enough room to write. - require.NoError(t, q.Append([]byte("abcdefg"))) - - // Writes blocked. - require.Equal(t, ErrQueueFull, q.Append([]byte("hijklmnop"))) - - // Resize to make enough room. - require.NoError(t, q.SetMaxSize(100)) - require.NoError(t, q.Append([]byte("hijklmnop"))) - - for _, exp := range []string{"helloworld", "helloworld", "abcdefg", "hijklmnop"} { - cur, err := q.Current() - require.NoError(t, err) - require.Equal(t, exp, string(cur)) - require.NoError(t, q.Advance()) - } - _, err = q.Current() - require.Equal(t, io.EOF, err) -} - -func TestQueueReopen(t *testing.T) { - q, dir := newTestQueue(t, withVerify(func([]byte) error { return nil })) - defer os.RemoveAll(dir) - - if err := q.Append([]byte("one")); err != nil { - t.Fatalf("Queue.Append failed: %v", err) - } - - cur, err := q.Current() - if err != nil { - t.Fatalf("Queue.Current failed: %v", err) - } - - if exp := "one"; string(cur) != exp { - t.Errorf("Queue.Current mismatch: got %v, exp %v", string(cur), exp) - } - - // close and re-open the queue - if err := q.Close(); err != nil { - t.Fatalf("Queue.Close failed: %v", err) - } - - if err := q.Open(); err != nil { - t.Fatalf("failed to re-open queue: %v", err) - } - - // Make sure we can read back the last current value - cur, err = q.Current() - if err != nil { - t.Fatalf("Queue.Current failed: %v", err) - } - - if exp := "one"; string(cur) != exp { - t.Errorf("Queue.Current mismatch: got %v, exp %v", string(cur), exp) - } - - if err := q.Append([]byte("two")); err != nil { - t.Fatalf("Queue.Append failed: %v", err) - } - - if err := q.Advance(); err != nil { - t.Fatalf("Queue.Advance failed: %v", err) - } - - cur, err = q.Current() - if err != nil { - t.Fatalf("Queue.Current failed: %v", err) - } - - if exp := "two"; string(cur) != exp { - t.Errorf("Queue.Current mismatch: got %v, exp %v", string(cur), exp) - } -} - -func TestPurgeQueue(t *testing.T) { - if testing.Short() { - t.Skip("Skipping purge queue") - } - - q, dir := newTestQueue(t) - defer os.RemoveAll(dir) - - if err := q.Append([]byte("one")); err != nil { - t.Fatalf("Queue.Append failed: %v", err) - } - - cur, err := q.Current() - if err != nil { - t.Fatalf("Queue.Current failed: %v", err) - } - - if exp := "one"; string(cur) != exp { - t.Errorf("Queue.Current mismatch: got %v, exp %v", string(cur), exp) - } - - time.Sleep(time.Second) - - if err := q.PurgeOlderThan(time.Now()); err != nil { - t.Errorf("Queue.PurgeOlderThan failed: %v", err) - } - - if _, err := q.LastModified(); err != nil { - t.Errorf("Queue.LastModified returned error: %v", err) - } - - _, err = q.Current() - if err != io.EOF { - t.Fatalf("Queue.Current expected io.EOF, got: %v", err) - } -} - -func TestQueue_TotalBytes(t *testing.T) { - q, dir := newTestQueue(t, withVerify(func([]byte) error { return nil })) - defer os.RemoveAll(dir) - - if n := q.TotalBytes(); n != 0 { - t.Fatalf("Queue.TotalBytes mismatch: got %v, exp %v", n, 0) - } - - if err := q.Append([]byte("one")); err != nil { - t.Fatalf("Queue.Append failed: %v", err) - } - - if n := q.TotalBytes(); n != 11 { // 8 byte size + 3 byte content - t.Fatalf("Queue.TotalBytes mismatch: got %v, exp %v", n, 11) - } - - // Close that queue, open a new one from the same file(s). - if err := q.Close(); err != nil { - t.Fatalf("Queue.Close failed: %v", err) - } - q, err := NewQueue(dir, 1024, 512, &SharedCount{}, MaxWritesPending, func([]byte) error { return nil }) - if err != nil { - t.Fatalf("failed to create queue: %v", err) - } - - if err := q.Open(); err != nil { - t.Fatalf("failed to open queue: %v", err) - } - - if n := q.TotalBytes(); n != 11 { - t.Fatalf("Queue.TotalBytes mismatch: got %v, exp %v", n, 11) - } - - if err := q.Append([]byte("13 characters")); err != nil { - t.Fatalf("Queue.Append failed: %v", err) - } - - if n := q.TotalBytes(); n != 32 { // 11 + 13 + 8 - t.Fatalf("Queue.TotalBytes mismatch: got %v, exp %v", n, 32) - } -} - -// This test verifies the queue will advance in the following scenario: -// -// - There is one segment -// - The segment is not full -// - The segment record size entry is corrupted, resulting in -// currentRecordSize + pos > fileSize and -// therefore the Advance would fail. -func TestQueue_AdvanceSingleCorruptSegment(t *testing.T) { - q, dir := newTestQueue(t, withVerify(func([]byte) error { return nil })) - defer os.RemoveAll(dir) - - var err error - appendN := func(n int) { - for i := 0; i < n; i++ { - // 12 bytes per entry + length = 20 bytes per record - err = q.Append([]byte(strings.Repeat("<>", 6))) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - } - } - - appendN(5) - - mustScan := func(fn func(Scanner) int64, advanceErr string) { - t.Helper() - scan, err := q.NewScanner() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - exp := fn(scan) - - got, err := scan.Advance() - if advanceErr == "" && err != nil { - t.Fatalf("unexpected error: %v", err) - } - if advanceErr != "" { - assert.EqualError(t, err, advanceErr) - } - - if got != exp { - t.Errorf("unexpected value; -got/+exp\n%s", cmp.Diff(got, exp)) - } - } - - // scan a couple of entries - mustScan(func(scan Scanner) int64 { - scan.Next() - scan.Next() - return 2 - }, "") - - seg := q.segments[0].path - f, err := os.OpenFile(seg, os.O_RDWR, os.ModePerm) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - f.Seek(60, io.SeekStart) - // corrupt a size entry - binary.Write(f, binary.BigEndian, int64(1e9)) - f.Sync() - f.Close() - - // expect only two Next calls to succeed due to corruption - mustScan(func(scan Scanner) int64 { - scan.Next() - scan.Next() - scan.Next() - return 2 - }, "dropped bad disk queue segment: record size out of range: max 512: got 1000000000") - - if got, exp := q.TotalBytes(), int64(0); got != exp { - // queue should have been truncated due to error - t.Errorf("unexpected value: -got/+exp\n%s", cmp.Diff(got, exp)) - } - - // at this point, the segment should have been trimmed, so the queue is empty - note we lost some writes from - // the trimmed segment! - - appendN(5) - mustScan(func(scan Scanner) int64 { - scan.Next() - scan.Next() - scan.Next() - scan.Next() - return 4 - }, "") - - // the queue should have one record left (20 bytes per record) - if got, exp := q.TotalBytes(), int64(20); got != exp { - t.Errorf("unexpected value: -got/+exp\n%s", cmp.Diff(got, exp)) - } - - // drain the last entry - mustScan(func(scan Scanner) int64 { - scan.Next() - return 1 - }, "") - - // queue should now be empty again - if got, exp := q.TotalBytes(), int64(0); got != exp { - t.Errorf("unexpected value: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -// A TestSegment is a simple representation of a segment. Blocks are represented -// by a slice of sizes. When TestSegments are written to a file, they're -// unpacked into the correct representation. -type TestSegment struct { - reportedBlockSizes []uint64 - actualBlockSizes []uint64 - position uint64 -} - -// Encode encodes the TestSegment to the provided writer, ensuring that the -// correct space is left between subsequent blocks, and that the position is -// written into last eight bytes. -func (s *TestSegment) Encode(w io.Writer) error { - if len(s.reportedBlockSizes) != len(s.actualBlockSizes) { - return fmt.Errorf("invalid TestSegment. Block lengths must be equal: %d - %d", s.reportedBlockSizes, s.actualBlockSizes) - } - - var buf [8]byte - for i, size := range s.reportedBlockSizes { - // Write the record size. - binary.BigEndian.PutUint64(buf[:], size) - if _, err := w.Write(buf[:]); err != nil { - return err - } - - // Pad the rest of the block with the actual size in the TestSegment - block := make([]byte, s.actualBlockSizes[i]) - if _, err := w.Write(block); err != nil { - return err - } - } - - // Write the position into end of writer. - binary.BigEndian.PutUint64(buf[:], s.position) - _, err := w.Write(buf[:]) - return err -} - -// String prints a hexadecimal representation of the TestSegment. -func (s *TestSegment) String() string { - - leftPad := func(s string) string { - out := make([]string, 0, 8) - for i := 0; i < 16-len(s); i++ { - out = append(out, "0") - } - return strings.Join(out, "") + s - } - - // Build the blocks - var out string - for i, repSize := range s.reportedBlockSizes { - out += leftPad(fmt.Sprintf("%x", repSize)) - - block := make([]string, 0, int(s.actualBlockSizes[i])*2) // Two words per byte - for i := 0; i < cap(block); i++ { - block = append(block, "0") - } - out += strings.Join(block, "") - } - - // Write out the position - return out + leftPad(fmt.Sprintf("%x", s.position)) -} - -// mustCreateSegment creates a new segment from the provided TestSegment, and -// a directory to store the segment file. -// -// mustCreateSegment calls newSegment, which means it calls open on the segment, -// and possibly attempts to repair the TestSegment. -func mustCreateSegment(ts *TestSegment, dir string, vf func([]byte) error) *segment { - fd, err := os.CreateTemp(dir, "") - if err != nil { - panic(err) - } - - // Encode TestSegment into file. - if err := ts.Encode(fd); err != nil { - panic(err) - } - - // Close the file, so the segment can open it safely. - if err := fd.Close(); err != nil { - panic(err) - } - - // Create a new segment. - segment, err := newSegment(fd.Name(), DefaultSegmentSize, vf) - if err != nil { - panic(err) - } - return segment -} - -// ReadSegment returns a hexadecimal representation of a segment. -func ReadSegment(segment *segment) string { - data, err := os.ReadFile(segment.path) - if err != nil { - panic(err) - } - return fmt.Sprintf("%x", data) -} - -func TestSegment_repair(t *testing.T) { - dir := t.TempDir() - - examples := []struct { - In *TestSegment - Expected *TestSegment - VerifyFn func([]byte) error - }{ - { // Valid segment - In: &TestSegment{ - reportedBlockSizes: []uint64{8, 16, 24}, - actualBlockSizes: []uint64{8, 16, 24}, - position: 16, - }, - Expected: &TestSegment{ - reportedBlockSizes: []uint64{8, 16, 24}, - actualBlockSizes: []uint64{8, 16, 24}, - position: 16, - }, - }, - { // Valid segment - In: &TestSegment{ - reportedBlockSizes: []uint64{8, 16}, - actualBlockSizes: []uint64{8, 16}, - position: 16, - }, - Expected: &TestSegment{ - reportedBlockSizes: []uint64{8, 16}, - actualBlockSizes: []uint64{8, 16}, - position: 16, - }, - }, - { // Valid segment with a corrupted position value - In: &TestSegment{ - reportedBlockSizes: []uint64{8, 16, 24}, - actualBlockSizes: []uint64{8, 16, 24}, - position: 10292901, - }, - Expected: &TestSegment{ - reportedBlockSizes: []uint64{8, 16, 24}, - actualBlockSizes: []uint64{8, 16, 24}, - position: 0, - }, - }, - { // Corrupted last block - In: &TestSegment{ - reportedBlockSizes: []uint64{8, 16, 24}, - actualBlockSizes: []uint64{8, 16, 13}, - position: 998172398, - }, - Expected: &TestSegment{ - reportedBlockSizes: []uint64{8, 16}, - actualBlockSizes: []uint64{8, 16}, - position: 0, - }, - }, - { // Corrupted block followed by valid later blocks with valid position - In: &TestSegment{ - reportedBlockSizes: []uint64{8, 16, 24, 8}, - actualBlockSizes: []uint64{8, 16, 18, 8}, - position: 40, // Will point to third block. - }, - Expected: &TestSegment{ - reportedBlockSizes: []uint64{8, 16}, - actualBlockSizes: []uint64{8, 16}, - position: 0, - }, - // Mock out VerifyFn to determine third block is invalid. - VerifyFn: func(b []byte) error { - if len(b) == 24 { - // Second block in example segment. - return fmt.Errorf("a verification error") - } - return nil - }, - }, - { // Block size overflows when converting to int64 - In: &TestSegment{ - reportedBlockSizes: []uint64{math.MaxUint64}, - actualBlockSizes: []uint64{8}, - position: 0, - }, - Expected: &TestSegment{ - reportedBlockSizes: []uint64{}, - actualBlockSizes: []uint64{}, - position: 0, - }, - }, - } - - for i, example := range examples { - if example.VerifyFn == nil { - example.VerifyFn = func([]byte) error { return nil } - } - segment := mustCreateSegment(example.In, dir, example.VerifyFn) - t.Cleanup(func() { - segment.close() - }) - - if got, exp := ReadSegment(segment), example.Expected.String(); got != exp { - t.Errorf("[example %d]\ngot: %s\nexp: %s\n\n", i+1, got, exp) - } - } -} diff --git a/pkg/durablequeue/scanner.go b/pkg/durablequeue/scanner.go deleted file mode 100644 index 5f1f4e2d682..00000000000 --- a/pkg/durablequeue/scanner.go +++ /dev/null @@ -1,171 +0,0 @@ -package durablequeue - -import ( - "fmt" - "io" -) - -type Scanner interface { - // Next returns the current block and advances the scanner to the next block. - Next() bool - - // Err returns any non io.EOF error as a result of calling the Next function. - Err() error - - // Bytes returns the most recent block generated by a call to Next. A new buffer - // is generated with each call to Next, so the buffer may be retained by the caller. - Bytes() []byte - - // Advance moves the head pointer to the next byte slice in the queue. - // Advance is guaranteed to make forward progress and is idempotent. - Advance() (int64, error) -} - -type queueScanner struct { - q *Queue - ss *segmentScanner -} - -func (qs *queueScanner) Next() bool { - return qs.ss.Next() -} - -func (qs *queueScanner) Err() error { - return qs.ss.Err() -} - -func (qs *queueScanner) Bytes() []byte { - return qs.ss.Bytes() -} - -func (qs *queueScanner) Advance() (n int64, err error) { - n, err = qs.ss.Advance() - // always advance to the next segment if the current segment presents any error - // condition, which either indicates success (io.EOF) or corruption of some kind. - if err != nil { - qs.q.mu.Lock() - defer qs.q.mu.Unlock() - - // retry under lock - otherwise it is possible a write happened between getting the EOF - // and taking the queue lock. - if err == io.EOF { - n, err = qs.ss.Advance() - if err == nil { - return n, nil - } - } - - // If the error was not EOF, force the segment to be trimmed - force := err != io.EOF - if trimErr := qs.q.trimHead(force); trimErr != nil { - return 0, trimErr - } - if err != io.EOF { - // We are dropping writes due to this error, so we should report it - return n, fmt.Errorf("dropped bad disk queue segment: %w", err) - } - } - return n, nil -} - -type segmentScanner struct { - s *segment - pos int64 - n int64 - buf []byte - err error - eof bool - - //TODO(SGC): consider adding backing buffer once we send writes to remote node as single array -} - -var _ Scanner = (*segmentScanner)(nil) - -func (l *segment) newScanner() (*segmentScanner, error) { - l.mu.Lock() - defer l.mu.Unlock() - - // If we're at the end of the file, can't advance - if int64(l.pos) == l.size-footerSize { - return nil, io.EOF - } - - if err := l.seekToCurrent(); err != nil { - return nil, err - } - - return &segmentScanner{s: l, pos: l.pos}, nil -} - -func (ss *segmentScanner) Next() bool { - ss.s.mu.Lock() - defer ss.s.mu.Unlock() - - if ss.eof || ss.err != nil { - return false - } - - if err := ss.s.seek(ss.pos); err != nil { - ss.setErr(err) - return false - } - - for { - if int64(ss.pos) == ss.s.size-footerSize { - ss.eof = true - return false - } - - ss.n++ - // read the record size - sz, err := ss.s.readUint64() - if err == io.EOF { - return false - } else if err != nil { - ss.setErr(err) - return false - } - - ss.pos += 8 + int64(sz) - if sz == 0 { - continue - } - - if sz > uint64(ss.s.maxSize) { - ss.setErr(fmt.Errorf("record size out of range: max %d: got %d", ss.s.maxSize, sz)) - return false - } - - // The node processor will hold a reference to ss.buf via the Bytes method, - // so it's important to create a new slice here, - // even though it looks like we could reslice ss.buf. - ss.buf = make([]byte, sz) - - if err := ss.s.readBytes(ss.buf); err != nil { - ss.setErr(err) - return false - } - - return true - } -} - -func (ss *segmentScanner) setErr(err error) { - ss.err = err - ss.buf = nil -} - -func (ss *segmentScanner) Err() error { - return ss.err -} - -func (ss *segmentScanner) Bytes() []byte { - return ss.buf -} - -func (ss *segmentScanner) Advance() (int64, error) { - if ss.err != nil { - return ss.n, ss.err - } - return ss.n, ss.s.advanceTo(ss.pos) -} diff --git a/pkg/durablequeue/scanner_test.go b/pkg/durablequeue/scanner_test.go deleted file mode 100644 index 6a14c56cb34..00000000000 --- a/pkg/durablequeue/scanner_test.go +++ /dev/null @@ -1,433 +0,0 @@ -package durablequeue - -import ( - "encoding/binary" - "fmt" - "io" - "os" - "reflect" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/stretchr/testify/require" -) - -type opts struct { - fn func([]byte) error - maxSize int64 - maxSegmentSize int64 -} - -func withVerify(fn func([]byte) error) func(o *opts) { - return func(o *opts) { - o.fn = fn - } -} - -func withMaxSize(maxSize int64) func(o *opts) { - return func(o *opts) { - o.maxSize = maxSize - } -} - -func withMaxSegmentSize(maxSegmentSize int64) func(o *opts) { - return func(o *opts) { - o.maxSegmentSize = maxSegmentSize - } -} - -// newTestQueue creates and opens a new Queue with a default -// maxSize of 1024 -func newTestQueue(t testing.TB, fns ...func(o *opts)) (*Queue, string) { - t.Helper() - - opts := &opts{maxSize: 1024, maxSegmentSize: 512} - for _, fn := range fns { - fn(opts) - } - - tmp := "" - if htmp, ok := os.LookupEnv("HH_TMP"); ok { - tmp = os.ExpandEnv(htmp) - } - - dir, err := os.MkdirTemp(tmp, "hh_queue") - require.NoError(t, err) - - q, err := NewQueue(dir, opts.maxSize, opts.maxSegmentSize, &SharedCount{}, MaxWritesPending, opts.fn) - require.NoError(t, err) - - require.NoError(t, q.Open()) - return q, dir -} - -func TestQueue_NewScanner(t *testing.T) { - q, dir := newTestQueue(t) - defer os.RemoveAll(dir) - - for i := 0; i < 50; i++ { - q.Append([]byte(fmt.Sprintf("%d", i))) - } - - scan, err := q.NewScanner() - if err != nil { - t.Fatalf("Queue.NewScanner failed: %v", err) - } - - var have []string - - for i := 0; i < 3; i++ { - scan.Next() - if scan.Err() != nil { - t.Fatalf("Next failed: %v", scan.Err()) - } - - have = append(have, string(scan.Bytes())) - } - - if want := []string{"0", "1", "2"}; !reflect.DeepEqual(have, want) { - t.Fatalf("Next failed: have %v, want %v", have, want) - } - - n, err := scan.Advance() - if err != nil { - t.Fatalf("Advance failed: %v", err) - } - - if want := int64(3); n != want { - t.Fatalf("Advance failed: have %d, want %d", n, want) - } - - v, err := q.Current() - if err != nil { - t.Fatalf("Queue.Current failed: %v", err) - } - - if want := "3"; string(v) != want { - t.Fatalf("Queue.Current failed: have %s, want %s", v, want) - } -} - -func TestQueue_NewScanner_ScanToEnd(t *testing.T) { - q, dir := newTestQueue(t) - defer os.RemoveAll(dir) - - var want []string - for i := 0; i < 50; i++ { - want = append(want, fmt.Sprintf("%d", i)) - } - - for _, v := range want { - q.Append([]byte(v)) - } - - scan, err := q.NewScanner() - if err != nil { - t.Fatalf("Queue.NewScanner failed: %v", err) - } - - var have []string - for scan.Next() { - if scan.Err() != nil { - t.Fatalf("Next failed: %v", scan.Err()) - } - - have = append(have, string(scan.Bytes())) - } - - if !reflect.DeepEqual(have, want) { - t.Fatalf("Next failed: have %v, want %v", have, want) - } - - n, err := scan.Advance() - if err != nil { - t.Fatalf("Advance failed: %v", err) - } - - if want := int64(50); n != want { - t.Fatalf("Advance failed: have %d, want %d", n, want) - } - - _, err = q.Current() - if err != io.EOF { - t.Fatalf("Queue.Current failed: %v", err) - } -} - -func TestQueue_NewScanner_EmptyQueue(t *testing.T) { - q, dir := newTestQueue(t) - defer os.RemoveAll(dir) - - _, err := q.NewScanner() - if err != io.EOF { - t.Fatalf("Queue.NewScanner failed: have %v, expected io.EOF", err) - } -} - -func TestQueue_NewScanner_EmptyAppendAndScanMore(t *testing.T) { - q, dir := newTestQueue(t) - defer os.RemoveAll(dir) - - for i := 0; i < 2; i++ { - - q.Append([]byte("one")) - q.Append([]byte("two")) - - scan, err := q.NewScanner() - if err != nil { - t.Fatalf("Queue.NewScanner failed: %v", err) - } - - var have []string - for scan.Next() { - if scan.Err() != nil { - t.Fatalf("Next failed: %v", scan.Err()) - } - - have = append(have, string(scan.Bytes())) - } - - if want := []string{"one", "two"}; !reflect.DeepEqual(have, want) { - t.Fatalf("Next failed: have %v, want %v", have, want) - } - - n, err := scan.Advance() - if err != nil { - t.Fatalf("Advance failed: %v", err) - } - - if want := int64(2); n != want { - t.Fatalf("Advance failed: have %d, want %d", n, want) - } - - if !q.Empty() { - t.Fatal("Queue.Empty failed; expected true") - } - } -} - -// AppendWhileScan tests that whilst scanning, -// the scanner will pick up the additional blocks -func TestQueue_NewScanner_AppendWhileScan(t *testing.T) { - q, dir := newTestQueue(t) - defer os.RemoveAll(dir) - - q.Append([]byte("one")) - - scan, err := q.NewScanner() - if err != nil { - t.Fatalf("Queue.NewScanner failed: %v", err) - } - - scan.Next() - have := string(scan.Bytes()) - if want := "one"; have != want { - t.Fatalf("Next failed: have %v, want %v", have, want) - } - - q.Append([]byte("two")) - - scan.Next() - have = string(scan.Bytes()) - if want := "two"; have != want { - t.Fatalf("Next failed: have %v, want %v", have, want) - } - - scan.Advance() - - _, err = q.Current() - if err != io.EOF { - t.Fatalf("Queue.Current failed: %v", err) - } -} - -func TestQueue_NewScanner_Corrupted(t *testing.T) { - q, dir := newTestQueue(t, withMaxSize(100), withMaxSegmentSize(25)) - defer os.RemoveAll(dir) - _ = dir - - q.SetMaxSegmentSize(10) - q.Append([]byte("block number 0")) - exp := []byte("block number 1") - q.Append(exp) - - var buf [8]byte - binary.BigEndian.PutUint64(buf[:], uint64(1<<63)) - err := os.WriteFile(q.segments[0].path, buf[:], os.ModePerm) - if err != nil { - t.Fatalf("WriteFile: %v", err) - } - - s, err := q.NewScanner() - if err != nil { - t.Fatal(err) - } - - s.Next() - s.Advance() - s, err = q.NewScanner() - if err != nil { - t.Fatal(err) - } - s.Next() - got := s.Bytes() - if !cmp.Equal(got, exp) { - t.Errorf("unpexected -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestQueue_NewScanner_Corruption(t *testing.T) { - type bytes [][]byte - - type expseg struct { - expe []string - expb bytes - } - - bs := func(s ...string) bytes { - r := make(bytes, len(s)) - for i := 0; i < len(s); i++ { - if s[i] != "" { - r[i] = []byte(s[i]) - } - } - return r - } - - ss := func(s ...string) []string { return s } - - cases := []struct { - name string - blocks bytes - pre func(t *testing.T, q *Queue) - exp []expseg - }{ - { - name: "no corruption", - blocks: bs("0#0123456789", "1#0123456789", "2#0123456789"), - exp: []expseg{ - {ss("", ""), bs("0#0123456789", "1#0123456789")}, - {ss(""), bs("2#0123456789")}, - }, - }, - { - name: "corrupt first block size", - blocks: bs("0#0123456789", "1#0123456789", "2#0123456789"), - pre: func(t *testing.T, q *Queue) { - f, err := os.OpenFile(q.segments[0].path, os.O_WRONLY, os.ModePerm) - if err != nil { - t.Fatalf("Open: %v", err) - } - defer f.Close() - - var buf [8]byte - binary.BigEndian.PutUint64(buf[:], uint64(1000)) - _, err = f.Write(buf[:]) - if err != nil { - t.Fatalf("Write: %v", err) - } - }, - exp: []expseg{ - {ss("record size out of range: max 30: got 1000", "record size out of range: max 30: got 1000"), bs("", "")}, - {ss(""), bs("2#0123456789")}, - }, - }, - { - name: "corrupt second block size", - blocks: bs("0#0123456789", "1#0123456789", "2#0123456789"), - pre: func(t *testing.T, q *Queue) { - f, err := os.OpenFile(q.segments[0].path, os.O_WRONLY, os.ModePerm) - if err != nil { - t.Fatalf("Open: %v", err) - } - defer f.Close() - - var buf [8]byte - binary.BigEndian.PutUint64(buf[:], uint64(50)) - _, err = f.WriteAt(buf[:], 20) - if err != nil { - t.Fatalf("Write: %v", err) - } - }, - exp: []expseg{ - {ss("", "record size out of range: max 30: got 50"), bs("0#0123456789", "")}, - {ss(""), bs("2#0123456789")}, - }, - }, - { - name: "truncate file", - blocks: bs("0#0123456789", "1#0123456789", "2#0123456789"), - pre: func(t *testing.T, q *Queue) { - f, err := os.OpenFile(q.segments[0].path, os.O_WRONLY, os.ModePerm) - if err != nil { - t.Fatalf("Open: %v", err) - } - defer f.Close() - - err = f.Truncate(25) - if err != nil { - t.Fatalf("Truncate: %v", err) - } - }, - exp: []expseg{ - {ss("", "bad read. exp 8, got 5"), bs("0#0123456789", "")}, - {ss(""), bs("2#0123456789")}, - }, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - q, dir := newTestQueue(t) - defer os.RemoveAll(dir) - - q.SetMaxSegmentSize(30) - for _, b := range tc.blocks { - q.Append([]byte(b)) - } - - if tc.pre != nil { - tc.pre(t, q) - } - - // invariant: len(segments) ≤ len(blocks) - for _, exp := range tc.exp { - s, err := q.NewScanner() - if err == io.EOF { - break - } - - if err != nil { - t.Fatal("NewSegment", err) - } - - for i := 0; i < len(exp.expb); i++ { - s.Next() - - { - got := s.Bytes() - if exp := []byte(exp.expb[i]); !cmp.Equal(got, exp) { - t.Errorf("unpexected block %d -got/+exp\n%s", i, cmp.Diff(got, exp)) - } - } - - if len(exp.expe) > 0 { - var got string - if s.Err() != nil { - got = s.Err().Error() - } - if exp := exp.expe[i]; !cmp.Equal(got, exp) { - t.Errorf("unpexected err %d -got/+exp\n%s", i, cmp.Diff(got, exp)) - } - } - } - - if exp := s.Next(); exp != false { - t.Error("expected Next to return false") - } - - s.Advance() - } - }) - } -} diff --git a/pkg/encoding/simple8b/encoding.go b/pkg/encoding/simple8b/encoding.go deleted file mode 100644 index 1feb720d178..00000000000 --- a/pkg/encoding/simple8b/encoding.go +++ /dev/null @@ -1,1045 +0,0 @@ -// Package simple8b implements the 64bit integer encoding algorithm as published -// by Ann and Moffat in "Index compression using 64-bit words", Softw. Pract. Exper. 2010; 40:131–147 -// -// It is capable of encoding multiple integers with values betweeen 0 and to 1^60 -1, in a single word. -// -// Imported from github.com/jwilder/encoding -package simple8b - -// Simple8b is 64bit word-sized encoder that packs multiple integers into a single word using -// a 4 bit selector values and up to 60 bits for the remaining values. Integers are encoded using -// the following table: -// -// ┌──────────────┬─────────────────────────────────────────────────────────────┐ -// │ Selector │ 0 1 2 3 4 5 6 7 8 9 0 11 12 13 14 15│ -// ├──────────────┼─────────────────────────────────────────────────────────────┤ -// │ Bits │ 0 0 1 2 3 4 5 6 7 8 10 12 15 20 30 60│ -// ├──────────────┼─────────────────────────────────────────────────────────────┤ -// │ N │ 240 120 60 30 20 15 12 10 8 7 6 5 4 3 2 1│ -// ├──────────────┼─────────────────────────────────────────────────────────────┤ -// │ Wasted Bits│ 60 60 0 0 0 0 12 0 4 4 0 0 0 0 0 0│ -// └──────────────┴─────────────────────────────────────────────────────────────┘ -// -// For example, when the number of values can be encoded using 4 bits, selected 5 is encoded in the -// 4 most significant bits followed by 15 values encoded used 4 bits each in the remaining 60 bits. -import ( - "encoding/binary" - "errors" - "fmt" - "unsafe" -) - -const MaxValue = (1 << 60) - 1 - -// Encoder converts a stream of unsigned 64bit integers to a compressed byte slice. -type Encoder struct { - // most recently written integers that have not been flushed - buf []uint64 - - // index in buf of the head of the buf - h int - - // index in buf of the tail of the buf - t int - - // index into bytes of written bytes - bp int - - // current bytes written and flushed - bytes []byte - b []byte -} - -// NewEncoder returns an Encoder able to convert uint64s to compressed byte slices -func NewEncoder() *Encoder { - return &Encoder{ - buf: make([]uint64, 240), - b: make([]byte, 8), - bytes: make([]byte, 128), - } -} - -func (e *Encoder) SetValues(v []uint64) { - e.buf = v - e.t = len(v) - e.h = 0 - e.bytes = e.bytes[:0] -} - -func (e *Encoder) Reset() { - e.t = 0 - e.h = 0 - e.bp = 0 - - e.buf = e.buf[:240] - e.b = e.b[:8] - e.bytes = e.bytes[:128] -} - -func (e *Encoder) Write(v uint64) error { - if e.t >= len(e.buf) { - if err := e.flush(); err != nil { - return err - } - } - - // The buf is full but there is space at the front, just shift - // the values down for now. TODO: use ring buffer - if e.t >= len(e.buf) { - copy(e.buf, e.buf[e.h:]) - e.t -= e.h - e.h = 0 - } - e.buf[e.t] = v - e.t += 1 - return nil -} - -func (e *Encoder) flush() error { - if e.t == 0 { - return nil - } - - // encode as many values into one as we can - encoded, n, err := Encode(e.buf[e.h:e.t]) - if err != nil { - return err - } - binary.BigEndian.PutUint64(e.b, encoded) - if e.bp+8 > len(e.bytes) { - e.bytes = append(e.bytes, e.b...) - e.bp = len(e.bytes) - } else { - copy(e.bytes[e.bp:e.bp+8], e.b) - e.bp += 8 - } - - // Move the head forward since we encoded those values - e.h += n - - // If we encoded them all, reset the head/tail pointers to the beginning - if e.h == e.t { - e.h = 0 - e.t = 0 - } - - return nil -} - -func (e *Encoder) Bytes() ([]byte, error) { - for e.t > 0 { - if err := e.flush(); err != nil { - return nil, err - } - } - - return e.bytes[:e.bp], nil -} - -// Decoder converts a compressed byte slice to a stream of unsigned 64bit integers. -type Decoder struct { - bytes []byte - buf [240]uint64 - i int - n int -} - -// NewDecoder returns a Decoder from a byte slice -func NewDecoder(b []byte) *Decoder { - return &Decoder{ - bytes: b, - } -} - -// Next returns true if there are remaining values to be read. Successive -// calls to Next advance the current element pointer. -func (d *Decoder) Next() bool { - d.i += 1 - - if d.i >= d.n { - d.read() - } - - return len(d.bytes) >= 8 || (d.i >= 0 && d.i < d.n) -} - -func (d *Decoder) SetBytes(b []byte) { - d.bytes = b - d.i = 0 - d.n = 0 -} - -// Read returns the current value. Successive calls to Read return the same -// value. -func (d *Decoder) Read() uint64 { - v := d.buf[d.i] - return v -} - -func (d *Decoder) read() { - if len(d.bytes) < 8 { - return - } - - v := binary.BigEndian.Uint64(d.bytes[:8]) - d.bytes = d.bytes[8:] - d.n, _ = Decode(&d.buf, v) - d.i = 0 -} - -type packing struct { - n, bit int - unpack func(uint64, *[240]uint64) - pack func([]uint64) uint64 -} - -var selector [16]packing = [16]packing{ - {240, 0, unpack240, pack240}, - {120, 0, unpack120, pack120}, - {60, 1, unpack60, pack60}, - {30, 2, unpack30, pack30}, - {20, 3, unpack20, pack20}, - {15, 4, unpack15, pack15}, - {12, 5, unpack12, pack12}, - {10, 6, unpack10, pack10}, - {8, 7, unpack8, pack8}, - {7, 8, unpack7, pack7}, - {6, 10, unpack6, pack6}, - {5, 12, unpack5, pack5}, - {4, 15, unpack4, pack4}, - {3, 20, unpack3, pack3}, - {2, 30, unpack2, pack2}, - {1, 60, unpack1, pack1}, -} - -// Count returns the number of integers encoded in the byte slice -func CountBytes(b []byte) (int, error) { - var count int - for len(b) >= 8 { - v := binary.BigEndian.Uint64(b[:8]) - b = b[8:] - - sel := v >> 60 - if sel >= 16 { - return 0, fmt.Errorf("invalid selector value: %v", sel) - } - count += selector[sel].n - } - - if len(b) > 0 { - return 0, fmt.Errorf("invalid slice len remaining: %v", len(b)) - } - return count, nil -} - -// Count returns the number of integers encoded within an uint64 -func Count(v uint64) (int, error) { - sel := v >> 60 - if sel >= 16 { - return 0, fmt.Errorf("invalid selector value: %v", sel) - } - return selector[sel].n, nil -} - -func ForEach(b []byte, fn func(v uint64) bool) error { - for len(b) >= 8 { - v := binary.BigEndian.Uint64(b[:8]) - b = b[8:] - - sel := v >> 60 - if sel >= 16 { - return fmt.Errorf("invalid selector value: %v", sel) - } - - n := selector[sel].n - bits := uint(selector[sel].bit) - mask := uint64(^(int64(^0) << bits)) - - for i := 0; i < n; i++ { - val := v & mask - if !fn(val) { - return nil - } - v = v >> bits - } - } - return nil -} - -func CountBytesBetween(b []byte, min, max uint64) (int, error) { - var count int - for len(b) >= 8 { - v := binary.BigEndian.Uint64(b[:8]) - b = b[8:] - - sel := v >> 60 - if sel >= 16 { - return 0, fmt.Errorf("invalid selector value: %v", sel) - } - // If the max value that could be encoded by the uint64 is less than the min - // skip the whole thing. - maxValue := uint64((1 << uint64(selector[sel].bit)) - 1) - if maxValue < min { - continue - } - - mask := uint64(^(int64(^0) << uint(selector[sel].bit))) - - for i := 0; i < selector[sel].n; i++ { - val := v & mask - if val >= min && val < max { - count++ - } else if val > max { - break - } - - v = v >> uint(selector[sel].bit) - } - } - - if len(b) > 0 { - return 0, fmt.Errorf("invalid slice len remaining: %v", len(b)) - } - return count, nil -} - -// Encode packs as many values into a single uint64. It returns the packed -// uint64, how many values from src were packed, or an error if the values exceed -// the maximum value range. -func Encode(src []uint64) (value uint64, n int, err error) { - if canPack(src, 240, 0) { - return uint64(0), 240, nil - } else if canPack(src, 120, 0) { - return 1 << 60, 120, nil - } else if canPack(src, 60, 1) { - return pack60(src[:60]), 60, nil - } else if canPack(src, 30, 2) { - return pack30(src[:30]), 30, nil - } else if canPack(src, 20, 3) { - return pack20(src[:20]), 20, nil - } else if canPack(src, 15, 4) { - return pack15(src[:15]), 15, nil - } else if canPack(src, 12, 5) { - return pack12(src[:12]), 12, nil - } else if canPack(src, 10, 6) { - return pack10(src[:10]), 10, nil - } else if canPack(src, 8, 7) { - return pack8(src[:8]), 8, nil - } else if canPack(src, 7, 8) { - return pack7(src[:7]), 7, nil - } else if canPack(src, 6, 10) { - return pack6(src[:6]), 6, nil - } else if canPack(src, 5, 12) { - return pack5(src[:5]), 5, nil - } else if canPack(src, 4, 15) { - return pack4(src[:4]), 4, nil - } else if canPack(src, 3, 20) { - return pack3(src[:3]), 3, nil - } else if canPack(src, 2, 30) { - return pack2(src[:2]), 2, nil - } else if canPack(src, 1, 60) { - return pack1(src[:1]), 1, nil - } else { - if len(src) > 0 { - return 0, 0, fmt.Errorf("value out of bounds: %v", src) - } - return 0, 0, nil - } -} - -const ( - S8B_BIT_SIZE = 60 -) - -var ( - numBits = [...][2]byte{ - // { number of values, max bits per value } - {60, 1}, - {30, 2}, - {20, 3}, - {15, 4}, - {12, 5}, - {10, 6}, - {8, 7}, - {7, 8}, - {6, 10}, - {5, 12}, - {4, 15}, - {3, 20}, - {2, 30}, - {1, 60}, - } - ErrValueOutOfBounds = errors.New("value out of bounds") -) - -// Encode returns a packed slice of the values from src. If a value is over -// 1 << 60, an error is returned. The input src is modified to avoid extra -// allocations. If you need to re-use, use a copy. -func EncodeAll(src []uint64) ([]uint64, error) { - i := 0 - - // Re-use the input slice and write encoded values back in place - dst := src - j := 0 - -NEXTVALUE: - for i < len(src) { - remaining := src[i:] - - // try to pack run of 240 or 120 1s - if len(remaining) >= 120 { - // Invariant: len(a) is fixed to 120 or 240 values - var a []uint64 - if len(remaining) >= 240 { - a = remaining[:240] - } else { - a = remaining[:120] - } - - // search for the longest sequence of 1s in a - // Postcondition: k equals the index of the last 1 or -1 - k := 0 - for k = range a { - if a[k] != 1 { - k-- - break - } - } - - v := uint64(0) - switch { - case k == 239: - // 240 1s - i += 240 - - case k >= 119: - // at least 120 1s - v = 1 << 60 - i += 120 - - default: - goto CODES - } - - dst[j] = v - j++ - continue - } - - CODES: - for code := range numBits { - intN := int(numBits[code][0]) - bitN := numBits[code][1] - if intN > len(remaining) { - continue - } - - maxVal := uint64(1 << (bitN & 0x3f)) - val := uint64(code+2) << S8B_BIT_SIZE - - for k, inV := range remaining { - if k < intN { - if inV >= maxVal { - continue CODES - } - val |= inV << ((byte(k) * bitN) & 0x3f) - } else { - break - } - } - dst[j] = val - j += 1 - i += intN - continue NEXTVALUE - } - return nil, ErrValueOutOfBounds - } - return dst[:j], nil -} - -func Decode(dst *[240]uint64, v uint64) (n int, err error) { - sel := v >> 60 - if sel >= 16 { - return 0, fmt.Errorf("invalid selector value: %b", sel) - } - selector[sel].unpack(v, dst) - return selector[sel].n, nil -} - -// Decode writes the uncompressed values from src to dst. It returns the number -// of values written or an error. -// nocheckptr while the underlying struct layout doesn't change -// -//go:nocheckptr -func DecodeAll(dst, src []uint64) (value int, err error) { - j := 0 - for _, v := range src { - sel := (v >> 60) & 0xf - selector[sel].unpack(v, (*[240]uint64)(unsafe.Pointer(&dst[j]))) - j += selector[sel].n - } - return j, nil -} - -// DecodeBytesBigEndian writes the compressed, big-endian values from src to dst. It returns the number -// of values written or an error. -// nocheckptr while the underlying struct layout doesn't change -// -//go:nocheckptr -func DecodeBytesBigEndian(dst []uint64, src []byte) (value int, err error) { - if len(src)&7 != 0 { - return 0, errors.New("src length is not multiple of 8") - } - - i := 0 - j := 0 - for i < len(src) { - v := binary.BigEndian.Uint64(src[i:]) - sel := (v >> 60) & 0xf - selector[sel].unpack(v, (*[240]uint64)(unsafe.Pointer(&dst[j]))) - j += selector[sel].n - i += 8 - } - return j, nil -} - -// canPack returns true if n elements from in can be stored using bits per element -func canPack(src []uint64, n, bits int) bool { - if len(src) < n { - return false - } - - // Selector 0,1 are special and use 0 bits to encode runs of 1's - if bits == 0 { - for _, v := range src { - if v != 1 { - return false - } - } - return true - } - - max := uint64((1 << uint64(bits)) - 1) - - for _, s := range src[:n] { - if s > max { - return false - } - } - - return true -} - -// pack240 packs 240 ones from in using 1 bit each -func pack240(src []uint64) uint64 { - return 0 -} - -// pack120 packs 120 ones from in using 1 bit each -func pack120(src []uint64) uint64 { - return 0 -} - -// pack60 packs 60 values from in using 1 bit each -func pack60(src []uint64) uint64 { - _ = src[59] // eliminate multiple bounds checks - return 2<<60 | - src[0] | - src[1]<<1 | - src[2]<<2 | - src[3]<<3 | - src[4]<<4 | - src[5]<<5 | - src[6]<<6 | - src[7]<<7 | - src[8]<<8 | - src[9]<<9 | - src[10]<<10 | - src[11]<<11 | - src[12]<<12 | - src[13]<<13 | - src[14]<<14 | - src[15]<<15 | - src[16]<<16 | - src[17]<<17 | - src[18]<<18 | - src[19]<<19 | - src[20]<<20 | - src[21]<<21 | - src[22]<<22 | - src[23]<<23 | - src[24]<<24 | - src[25]<<25 | - src[26]<<26 | - src[27]<<27 | - src[28]<<28 | - src[29]<<29 | - src[30]<<30 | - src[31]<<31 | - src[32]<<32 | - src[33]<<33 | - src[34]<<34 | - src[35]<<35 | - src[36]<<36 | - src[37]<<37 | - src[38]<<38 | - src[39]<<39 | - src[40]<<40 | - src[41]<<41 | - src[42]<<42 | - src[43]<<43 | - src[44]<<44 | - src[45]<<45 | - src[46]<<46 | - src[47]<<47 | - src[48]<<48 | - src[49]<<49 | - src[50]<<50 | - src[51]<<51 | - src[52]<<52 | - src[53]<<53 | - src[54]<<54 | - src[55]<<55 | - src[56]<<56 | - src[57]<<57 | - src[58]<<58 | - src[59]<<59 - -} - -// pack30 packs 30 values from in using 2 bits each -func pack30(src []uint64) uint64 { - _ = src[29] // eliminate multiple bounds checks - return 3<<60 | - src[0] | - src[1]<<2 | - src[2]<<4 | - src[3]<<6 | - src[4]<<8 | - src[5]<<10 | - src[6]<<12 | - src[7]<<14 | - src[8]<<16 | - src[9]<<18 | - src[10]<<20 | - src[11]<<22 | - src[12]<<24 | - src[13]<<26 | - src[14]<<28 | - src[15]<<30 | - src[16]<<32 | - src[17]<<34 | - src[18]<<36 | - src[19]<<38 | - src[20]<<40 | - src[21]<<42 | - src[22]<<44 | - src[23]<<46 | - src[24]<<48 | - src[25]<<50 | - src[26]<<52 | - src[27]<<54 | - src[28]<<56 | - src[29]<<58 -} - -// pack20 packs 20 values from in using 3 bits each -func pack20(src []uint64) uint64 { - _ = src[19] // eliminate multiple bounds checks - return 4<<60 | - src[0] | - src[1]<<3 | - src[2]<<6 | - src[3]<<9 | - src[4]<<12 | - src[5]<<15 | - src[6]<<18 | - src[7]<<21 | - src[8]<<24 | - src[9]<<27 | - src[10]<<30 | - src[11]<<33 | - src[12]<<36 | - src[13]<<39 | - src[14]<<42 | - src[15]<<45 | - src[16]<<48 | - src[17]<<51 | - src[18]<<54 | - src[19]<<57 -} - -// pack15 packs 15 values from in using 3 bits each -func pack15(src []uint64) uint64 { - _ = src[14] // eliminate multiple bounds checks - return 5<<60 | - src[0] | - src[1]<<4 | - src[2]<<8 | - src[3]<<12 | - src[4]<<16 | - src[5]<<20 | - src[6]<<24 | - src[7]<<28 | - src[8]<<32 | - src[9]<<36 | - src[10]<<40 | - src[11]<<44 | - src[12]<<48 | - src[13]<<52 | - src[14]<<56 -} - -// pack12 packs 12 values from in using 5 bits each -func pack12(src []uint64) uint64 { - _ = src[11] // eliminate multiple bounds checks - return 6<<60 | - src[0] | - src[1]<<5 | - src[2]<<10 | - src[3]<<15 | - src[4]<<20 | - src[5]<<25 | - src[6]<<30 | - src[7]<<35 | - src[8]<<40 | - src[9]<<45 | - src[10]<<50 | - src[11]<<55 -} - -// pack10 packs 10 values from in using 6 bits each -func pack10(src []uint64) uint64 { - _ = src[9] // eliminate multiple bounds checks - return 7<<60 | - src[0] | - src[1]<<6 | - src[2]<<12 | - src[3]<<18 | - src[4]<<24 | - src[5]<<30 | - src[6]<<36 | - src[7]<<42 | - src[8]<<48 | - src[9]<<54 -} - -// pack8 packs 8 values from in using 7 bits each -func pack8(src []uint64) uint64 { - _ = src[7] // eliminate multiple bounds checks - return 8<<60 | - src[0] | - src[1]<<7 | - src[2]<<14 | - src[3]<<21 | - src[4]<<28 | - src[5]<<35 | - src[6]<<42 | - src[7]<<49 -} - -// pack7 packs 7 values from in using 8 bits each -func pack7(src []uint64) uint64 { - _ = src[6] // eliminate multiple bounds checks - return 9<<60 | - src[0] | - src[1]<<8 | - src[2]<<16 | - src[3]<<24 | - src[4]<<32 | - src[5]<<40 | - src[6]<<48 -} - -// pack6 packs 6 values from in using 10 bits each -func pack6(src []uint64) uint64 { - _ = src[5] // eliminate multiple bounds checks - return 10<<60 | - src[0] | - src[1]<<10 | - src[2]<<20 | - src[3]<<30 | - src[4]<<40 | - src[5]<<50 -} - -// pack5 packs 5 values from in using 12 bits each -func pack5(src []uint64) uint64 { - _ = src[4] // eliminate multiple bounds checks - return 11<<60 | - src[0] | - src[1]<<12 | - src[2]<<24 | - src[3]<<36 | - src[4]<<48 -} - -// pack4 packs 4 values from in using 15 bits each -func pack4(src []uint64) uint64 { - _ = src[3] // eliminate multiple bounds checks - return 12<<60 | - src[0] | - src[1]<<15 | - src[2]<<30 | - src[3]<<45 -} - -// pack3 packs 3 values from in using 20 bits each -func pack3(src []uint64) uint64 { - _ = src[2] // eliminate multiple bounds checks - return 13<<60 | - src[0] | - src[1]<<20 | - src[2]<<40 -} - -// pack2 packs 2 values from in using 30 bits each -func pack2(src []uint64) uint64 { - _ = src[1] // eliminate multiple bounds checks - return 14<<60 | - src[0] | - src[1]<<30 -} - -// pack1 packs 1 values from in using 60 bits each -func pack1(src []uint64) uint64 { - return 15<<60 | - src[0] -} - -func unpack240(v uint64, dst *[240]uint64) { - for i := range dst { - dst[i] = 1 - } -} - -func unpack120(v uint64, dst *[240]uint64) { - for i := range dst[:120] { - dst[i] = 1 - } -} - -func unpack60(v uint64, dst *[240]uint64) { - dst[0] = v & 1 - dst[1] = (v >> 1) & 1 - dst[2] = (v >> 2) & 1 - dst[3] = (v >> 3) & 1 - dst[4] = (v >> 4) & 1 - dst[5] = (v >> 5) & 1 - dst[6] = (v >> 6) & 1 - dst[7] = (v >> 7) & 1 - dst[8] = (v >> 8) & 1 - dst[9] = (v >> 9) & 1 - dst[10] = (v >> 10) & 1 - dst[11] = (v >> 11) & 1 - dst[12] = (v >> 12) & 1 - dst[13] = (v >> 13) & 1 - dst[14] = (v >> 14) & 1 - dst[15] = (v >> 15) & 1 - dst[16] = (v >> 16) & 1 - dst[17] = (v >> 17) & 1 - dst[18] = (v >> 18) & 1 - dst[19] = (v >> 19) & 1 - dst[20] = (v >> 20) & 1 - dst[21] = (v >> 21) & 1 - dst[22] = (v >> 22) & 1 - dst[23] = (v >> 23) & 1 - dst[24] = (v >> 24) & 1 - dst[25] = (v >> 25) & 1 - dst[26] = (v >> 26) & 1 - dst[27] = (v >> 27) & 1 - dst[28] = (v >> 28) & 1 - dst[29] = (v >> 29) & 1 - dst[30] = (v >> 30) & 1 - dst[31] = (v >> 31) & 1 - dst[32] = (v >> 32) & 1 - dst[33] = (v >> 33) & 1 - dst[34] = (v >> 34) & 1 - dst[35] = (v >> 35) & 1 - dst[36] = (v >> 36) & 1 - dst[37] = (v >> 37) & 1 - dst[38] = (v >> 38) & 1 - dst[39] = (v >> 39) & 1 - dst[40] = (v >> 40) & 1 - dst[41] = (v >> 41) & 1 - dst[42] = (v >> 42) & 1 - dst[43] = (v >> 43) & 1 - dst[44] = (v >> 44) & 1 - dst[45] = (v >> 45) & 1 - dst[46] = (v >> 46) & 1 - dst[47] = (v >> 47) & 1 - dst[48] = (v >> 48) & 1 - dst[49] = (v >> 49) & 1 - dst[50] = (v >> 50) & 1 - dst[51] = (v >> 51) & 1 - dst[52] = (v >> 52) & 1 - dst[53] = (v >> 53) & 1 - dst[54] = (v >> 54) & 1 - dst[55] = (v >> 55) & 1 - dst[56] = (v >> 56) & 1 - dst[57] = (v >> 57) & 1 - dst[58] = (v >> 58) & 1 - dst[59] = (v >> 59) & 1 -} - -func unpack30(v uint64, dst *[240]uint64) { - dst[0] = v & 3 - dst[1] = (v >> 2) & 3 - dst[2] = (v >> 4) & 3 - dst[3] = (v >> 6) & 3 - dst[4] = (v >> 8) & 3 - dst[5] = (v >> 10) & 3 - dst[6] = (v >> 12) & 3 - dst[7] = (v >> 14) & 3 - dst[8] = (v >> 16) & 3 - dst[9] = (v >> 18) & 3 - dst[10] = (v >> 20) & 3 - dst[11] = (v >> 22) & 3 - dst[12] = (v >> 24) & 3 - dst[13] = (v >> 26) & 3 - dst[14] = (v >> 28) & 3 - dst[15] = (v >> 30) & 3 - dst[16] = (v >> 32) & 3 - dst[17] = (v >> 34) & 3 - dst[18] = (v >> 36) & 3 - dst[19] = (v >> 38) & 3 - dst[20] = (v >> 40) & 3 - dst[21] = (v >> 42) & 3 - dst[22] = (v >> 44) & 3 - dst[23] = (v >> 46) & 3 - dst[24] = (v >> 48) & 3 - dst[25] = (v >> 50) & 3 - dst[26] = (v >> 52) & 3 - dst[27] = (v >> 54) & 3 - dst[28] = (v >> 56) & 3 - dst[29] = (v >> 58) & 3 -} - -func unpack20(v uint64, dst *[240]uint64) { - dst[0] = v & 7 - dst[1] = (v >> 3) & 7 - dst[2] = (v >> 6) & 7 - dst[3] = (v >> 9) & 7 - dst[4] = (v >> 12) & 7 - dst[5] = (v >> 15) & 7 - dst[6] = (v >> 18) & 7 - dst[7] = (v >> 21) & 7 - dst[8] = (v >> 24) & 7 - dst[9] = (v >> 27) & 7 - dst[10] = (v >> 30) & 7 - dst[11] = (v >> 33) & 7 - dst[12] = (v >> 36) & 7 - dst[13] = (v >> 39) & 7 - dst[14] = (v >> 42) & 7 - dst[15] = (v >> 45) & 7 - dst[16] = (v >> 48) & 7 - dst[17] = (v >> 51) & 7 - dst[18] = (v >> 54) & 7 - dst[19] = (v >> 57) & 7 -} - -func unpack15(v uint64, dst *[240]uint64) { - dst[0] = v & 15 - dst[1] = (v >> 4) & 15 - dst[2] = (v >> 8) & 15 - dst[3] = (v >> 12) & 15 - dst[4] = (v >> 16) & 15 - dst[5] = (v >> 20) & 15 - dst[6] = (v >> 24) & 15 - dst[7] = (v >> 28) & 15 - dst[8] = (v >> 32) & 15 - dst[9] = (v >> 36) & 15 - dst[10] = (v >> 40) & 15 - dst[11] = (v >> 44) & 15 - dst[12] = (v >> 48) & 15 - dst[13] = (v >> 52) & 15 - dst[14] = (v >> 56) & 15 -} - -func unpack12(v uint64, dst *[240]uint64) { - dst[0] = v & 31 - dst[1] = (v >> 5) & 31 - dst[2] = (v >> 10) & 31 - dst[3] = (v >> 15) & 31 - dst[4] = (v >> 20) & 31 - dst[5] = (v >> 25) & 31 - dst[6] = (v >> 30) & 31 - dst[7] = (v >> 35) & 31 - dst[8] = (v >> 40) & 31 - dst[9] = (v >> 45) & 31 - dst[10] = (v >> 50) & 31 - dst[11] = (v >> 55) & 31 -} - -func unpack10(v uint64, dst *[240]uint64) { - dst[0] = v & 63 - dst[1] = (v >> 6) & 63 - dst[2] = (v >> 12) & 63 - dst[3] = (v >> 18) & 63 - dst[4] = (v >> 24) & 63 - dst[5] = (v >> 30) & 63 - dst[6] = (v >> 36) & 63 - dst[7] = (v >> 42) & 63 - dst[8] = (v >> 48) & 63 - dst[9] = (v >> 54) & 63 -} - -func unpack8(v uint64, dst *[240]uint64) { - dst[0] = v & 127 - dst[1] = (v >> 7) & 127 - dst[2] = (v >> 14) & 127 - dst[3] = (v >> 21) & 127 - dst[4] = (v >> 28) & 127 - dst[5] = (v >> 35) & 127 - dst[6] = (v >> 42) & 127 - dst[7] = (v >> 49) & 127 -} - -func unpack7(v uint64, dst *[240]uint64) { - dst[0] = v & 255 - dst[1] = (v >> 8) & 255 - dst[2] = (v >> 16) & 255 - dst[3] = (v >> 24) & 255 - dst[4] = (v >> 32) & 255 - dst[5] = (v >> 40) & 255 - dst[6] = (v >> 48) & 255 -} - -func unpack6(v uint64, dst *[240]uint64) { - dst[0] = v & 1023 - dst[1] = (v >> 10) & 1023 - dst[2] = (v >> 20) & 1023 - dst[3] = (v >> 30) & 1023 - dst[4] = (v >> 40) & 1023 - dst[5] = (v >> 50) & 1023 -} - -func unpack5(v uint64, dst *[240]uint64) { - dst[0] = v & 4095 - dst[1] = (v >> 12) & 4095 - dst[2] = (v >> 24) & 4095 - dst[3] = (v >> 36) & 4095 - dst[4] = (v >> 48) & 4095 -} - -func unpack4(v uint64, dst *[240]uint64) { - dst[0] = v & 32767 - dst[1] = (v >> 15) & 32767 - dst[2] = (v >> 30) & 32767 - dst[3] = (v >> 45) & 32767 -} - -func unpack3(v uint64, dst *[240]uint64) { - dst[0] = v & 1048575 - dst[1] = (v >> 20) & 1048575 - dst[2] = (v >> 40) & 1048575 -} - -func unpack2(v uint64, dst *[240]uint64) { - dst[0] = v & 1073741823 - dst[1] = (v >> 30) & 1073741823 -} - -func unpack1(v uint64, dst *[240]uint64) { - dst[0] = v & 1152921504606846975 -} diff --git a/pkg/encoding/simple8b/encoding_test.go b/pkg/encoding/simple8b/encoding_test.go deleted file mode 100644 index e9c3f11bb9f..00000000000 --- a/pkg/encoding/simple8b/encoding_test.go +++ /dev/null @@ -1,525 +0,0 @@ -package simple8b_test - -import ( - "math/rand" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/pkg/encoding/simple8b" -) - -func Test_Encode_NoValues(t *testing.T) { - var in []uint64 - encoded, _ := simple8b.EncodeAll(in) - - decoded := make([]uint64, len(in)) - n, _ := simple8b.DecodeAll(decoded, encoded) - - if len(in) != len(decoded[:n]) { - t.Fatalf("Len mismatch: got %v, exp %v", len(decoded), len(in)) - } -} - -func ones(n int) func() []uint64 { - return func() []uint64 { - in := make([]uint64, n) - for i := 0; i < n; i++ { - in[i] = 1 - } - return in - } -} - -func bitsN(b int) func(n int) func() []uint64 { - return func(n int) func() []uint64 { - return bits(n, b) - } -} - -func combineN(fns ...func(n int) func() []uint64) func(n int) func() []uint64 { - return func(n int) func() []uint64 { - var out []func() []uint64 - for _, fn := range fns { - out = append(out, fn(n)) - } - return combine(out...) - } -} - -// bits generates sequence of n numbers with max bits, -// ensuring max bit is set for 50% of the values. -func bits(n, bits int) func() []uint64 { - return func() []uint64 { - out := make([]uint64, n) - maxVal := uint64(1 << uint8(bits)) - for i := range out { - topBit := uint64((i & 1) << uint8(bits-1)) - out[i] = uint64(rand.Int63n(int64(maxVal))) | topBit - if out[i] >= maxVal { - panic("max") - } - } - return out - } -} - -func combine(fns ...func() []uint64) func() []uint64 { - return func() []uint64 { - var out []uint64 - for _, fn := range fns { - out = append(out, fn()...) - } - return out - } -} - -// TestEncodeAll ensures 100% test coverage of simple8b.EncodeAll and -// verifies all output by comparing the original input with the output of simple8b.DecodeAll -func TestEncodeAll(t *testing.T) { - //lint:ignore SA1019 This function was deprecated for good reasons that aren't important to us since its just used for testing. - // Ignoring seems better than all the effort to address the underlying concern. https://github.com/golang/go/issues/56319 - rand.Seed(0) - - tests := []struct { - name string - in []uint64 - fn func() []uint64 - err error - }{ - {name: "no values", in: []uint64{}}, - {name: "mixed sizes", in: []uint64{7, 6, 256, 4, 3, 2, 1}}, - {name: "too big", in: []uint64{7, 6, 2<<61 - 1, 4, 3, 2, 1}, err: simple8b.ErrValueOutOfBounds}, - {name: "1 bit", fn: bits(100, 1)}, - {name: "2 bits", fn: bits(100, 2)}, - {name: "3 bits", fn: bits(100, 3)}, - {name: "4 bits", fn: bits(100, 4)}, - {name: "5 bits", fn: bits(100, 5)}, - {name: "6 bits", fn: bits(100, 6)}, - {name: "7 bits", fn: bits(100, 7)}, - {name: "8 bits", fn: bits(100, 8)}, - {name: "10 bits", fn: bits(100, 10)}, - {name: "12 bits", fn: bits(100, 12)}, - {name: "15 bits", fn: bits(100, 15)}, - {name: "20 bits", fn: bits(100, 20)}, - {name: "30 bits", fn: bits(100, 30)}, - {name: "60 bits", fn: bits(100, 60)}, - {name: "combination", fn: combine( - bits(100, 1), - bits(100, 2), - bits(100, 3), - bits(100, 4), - bits(100, 5), - bits(100, 6), - bits(100, 7), - bits(100, 8), - bits(100, 10), - bits(100, 12), - bits(100, 15), - bits(100, 20), - bits(100, 30), - bits(100, 60), - )}, - {name: "240 ones", fn: ones(240)}, - {name: "120 ones", fn: func() []uint64 { - in := ones(240)() - in[120] = 5 - return in - }}, - {name: "119 ones", fn: func() []uint64 { - in := ones(240)() - in[119] = 5 - return in - }}, - {name: "239 ones", fn: func() []uint64 { - in := ones(241)() - in[239] = 5 - return in - }}, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - if test.fn != nil { - test.in = test.fn() - } - - encoded, err := simple8b.EncodeAll(append(make([]uint64, 0, len(test.in)), test.in...)) - if test.err != nil { - if err != test.err { - t.Fatalf("expected encode error, got\n%s", err) - } - return - } - - decoded := make([]uint64, len(test.in)) - n, err := simple8b.DecodeAll(decoded, encoded) - if err != nil { - t.Fatalf("unexpected decode error\n%s", err) - } - - if !cmp.Equal(decoded[:n], test.in) { - t.Fatalf("unexpected values; +got/-exp\n%s", cmp.Diff(decoded, test.in)) - } - }) - } -} - -func Test_FewValues(t *testing.T) { - testEncode(t, 20, 2) -} - -func Test_Encode_Multiple_Zeros(t *testing.T) { - testEncode(t, 250, 0) -} - -func Test_Encode_Multiple_Ones(t *testing.T) { - testEncode(t, 250, 1) -} - -func Test_Encode_Multiple_Large(t *testing.T) { - testEncode(t, 250, 134) -} - -func Test_Encode_240Ones(t *testing.T) { - testEncode(t, 240, 1) -} - -func Test_Encode_120Ones(t *testing.T) { - testEncode(t, 120, 1) -} - -func Test_Encode_60(t *testing.T) { - testEncode(t, 60, 1) -} - -func Test_Encode_30(t *testing.T) { - testEncode(t, 30, 3) -} - -func Test_Encode_20(t *testing.T) { - testEncode(t, 20, 7) -} - -func Test_Encode_15(t *testing.T) { - testEncode(t, 15, 15) -} - -func Test_Encode_12(t *testing.T) { - testEncode(t, 12, 31) -} - -func Test_Encode_10(t *testing.T) { - testEncode(t, 10, 63) -} - -func Test_Encode_8(t *testing.T) { - testEncode(t, 8, 127) -} - -func Test_Encode_7(t *testing.T) { - testEncode(t, 7, 255) -} - -func Test_Encode_6(t *testing.T) { - testEncode(t, 6, 1023) -} - -func Test_Encode_5(t *testing.T) { - testEncode(t, 5, 4095) -} - -func Test_Encode_4(t *testing.T) { - testEncode(t, 4, 32767) -} - -func Test_Encode_3(t *testing.T) { - testEncode(t, 3, 1048575) -} - -func Test_Encode_2(t *testing.T) { - testEncode(t, 2, 1073741823) -} - -func Test_Encode_1(t *testing.T) { - testEncode(t, 1, 1152921504606846975) -} - -func testEncode(t *testing.T, n int, val uint64) { - enc := simple8b.NewEncoder() - in := make([]uint64, n) - for i := 0; i < n; i++ { - in[i] = val - enc.Write(in[i]) - } - - encoded, err := enc.Bytes() - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - - dec := simple8b.NewDecoder(encoded) - i := 0 - for dec.Next() { - if i >= len(in) { - t.Fatalf("Decoded too many values: got %v, exp %v", i, len(in)) - } - - if dec.Read() != in[i] { - t.Fatalf("Decoded[%d] != %v, got %v", i, in[i], dec.Read()) - } - i += 1 - } - - if exp, got := n, i; got != exp { - t.Fatalf("Decode len mismatch: exp %v, got %v", exp, got) - } - - got, err := simple8b.CountBytes(encoded) - if err != nil { - t.Fatalf("Unexpected error in Count: %v", err) - } - if got != n { - t.Fatalf("Count mismatch: got %v, exp %v", got, n) - } - -} - -func Test_Bytes(t *testing.T) { - enc := simple8b.NewEncoder() - for i := 0; i < 30; i++ { - enc.Write(uint64(i)) - } - b, _ := enc.Bytes() - - dec := simple8b.NewDecoder(b) - x := uint64(0) - for dec.Next() { - if x != dec.Read() { - t.Fatalf("mismatch: got %v, exp %v", dec.Read(), x) - } - x += 1 - } -} - -func Test_Encode_ValueTooLarge(t *testing.T) { - enc := simple8b.NewEncoder() - - values := []uint64{ - 1442369134000000000, 0, - } - - for _, v := range values { - enc.Write(v) - } - - _, err := enc.Bytes() - if err == nil { - t.Fatalf("Expected error, got nil") - - } -} - -func Test_Decode_NotEnoughBytes(t *testing.T) { - dec := simple8b.NewDecoder([]byte{0}) - if dec.Next() { - t.Fatalf("Expected Next to return false but it returned true") - } -} - -func TestCountBytesBetween(t *testing.T) { - enc := simple8b.NewEncoder() - in := make([]uint64, 8) - for i := 0; i < len(in); i++ { - in[i] = uint64(i) - enc.Write(in[i]) - } - - encoded, err := enc.Bytes() - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - - dec := simple8b.NewDecoder(encoded) - i := 0 - for dec.Next() { - if i >= len(in) { - t.Fatalf("Decoded too many values: got %v, exp %v", i, len(in)) - } - - if dec.Read() != in[i] { - t.Fatalf("Decoded[%d] != %v, got %v", i, in[i], dec.Read()) - } - i += 1 - } - - if exp, got := len(in), i; got != exp { - t.Fatalf("Decode len mismatch: exp %v, got %v", exp, got) - } - - got, err := simple8b.CountBytesBetween(encoded, 2, 6) - if err != nil { - t.Fatalf("Unexpected error in Count: %v", err) - } - if got != 4 { - t.Fatalf("Count mismatch: got %v, exp %v", got, 4) - } -} - -func TestCountBytesBetween_SkipMin(t *testing.T) { - enc := simple8b.NewEncoder() - in := make([]uint64, 8) - for i := 0; i < len(in); i++ { - in[i] = uint64(i) - enc.Write(in[i]) - } - in = append(in, 100000) - enc.Write(100000) - - encoded, err := enc.Bytes() - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - - dec := simple8b.NewDecoder(encoded) - i := 0 - for dec.Next() { - if i >= len(in) { - t.Fatalf("Decoded too many values: got %v, exp %v", i, len(in)) - } - - if dec.Read() != in[i] { - t.Fatalf("Decoded[%d] != %v, got %v", i, in[i], dec.Read()) - } - i += 1 - } - - if exp, got := len(in), i; got != exp { - t.Fatalf("Decode len mismatch: exp %v, got %v", exp, got) - } - - got, err := simple8b.CountBytesBetween(encoded, 100000, 100001) - if err != nil { - t.Fatalf("Unexpected error in Count: %v", err) - } - if got != 1 { - t.Fatalf("Count mismatch: got %v, exp %v", got, 1) - } -} - -func BenchmarkEncodeAll(b *testing.B) { - benchmarks := []struct { - name string - fn func(n int) func() []uint64 - }{ - {name: "1 bit", fn: bitsN(1)}, - {name: "2 bits", fn: bitsN(2)}, - {name: "3 bits", fn: bitsN(3)}, - {name: "4 bits", fn: bitsN(4)}, - {name: "5 bits", fn: bitsN(5)}, - {name: "6 bits", fn: bitsN(6)}, - {name: "7 bits", fn: bitsN(7)}, - {name: "8 bits", fn: bitsN(8)}, - {name: "10 bits", fn: bitsN(10)}, - {name: "12 bits", fn: bitsN(12)}, - {name: "15 bits", fn: bitsN(15)}, - {name: "20 bits", fn: bitsN(20)}, - {name: "30 bits", fn: bitsN(30)}, - {name: "60 bits", fn: bitsN(60)}, - {name: "combination", fn: combineN( - bitsN(1), - bitsN(2), - bitsN(3), - bitsN(4), - bitsN(5), - bitsN(6), - bitsN(7), - bitsN(8), - bitsN(10), - bitsN(12), - bitsN(15), - bitsN(20), - bitsN(30), - bitsN(60), - )}, - } - for _, bm := range benchmarks { - b.Run(bm.name, func(b *testing.B) { - for i := 0; i < b.N; i++ { - in := bm.fn(1000)() - simple8b.EncodeAll(append(make([]uint64, 0, len(in)), in...)) - } - }) - } -} - -func BenchmarkEncode(b *testing.B) { - x := make([]uint64, 1024) - for i := 0; i < len(x); i++ { - x[i] = uint64(15) - } - - in := make([]uint64, 1024) - - b.SetBytes(int64(len(x) * 8)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - copy(in, x) - simple8b.EncodeAll(in) - } -} - -func BenchmarkEncoder(b *testing.B) { - x := make([]uint64, 1024) - for i := 0; i < len(x); i++ { - x[i] = uint64(15) - } - - enc := simple8b.NewEncoder() - b.ResetTimer() - for i := 0; i < b.N; i++ { - enc.SetValues(x) - enc.Bytes() - b.SetBytes(int64(len(x)) * 8) - } -} -func BenchmarkDecode(b *testing.B) { - total := 0 - - x := make([]uint64, 1024) - for i := 0; i < len(x); i++ { - x[i] = uint64(10) - } - y, _ := simple8b.EncodeAll(x) - - decoded := make([]uint64, len(x)) - - b.SetBytes(int64(len(decoded) * 8)) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _, _ = simple8b.DecodeAll(decoded, y) - total += len(decoded) - } -} - -func BenchmarkDecoder(b *testing.B) { - enc := simple8b.NewEncoder() - x := make([]uint64, 1024) - for i := 0; i < len(x); i++ { - x[i] = uint64(10) - enc.Write(x[i]) - } - y, _ := enc.Bytes() - - b.ResetTimer() - - dec := simple8b.NewDecoder(y) - for i := 0; i < b.N; i++ { - dec.SetBytes(y) - j := 0 - for dec.Next() { - j += 1 - } - b.SetBytes(int64(j * 8)) - } -} diff --git a/pkg/errors/error_capture.go b/pkg/errors/error_capture.go deleted file mode 100644 index 7ecb1054571..00000000000 --- a/pkg/errors/error_capture.go +++ /dev/null @@ -1,21 +0,0 @@ -package errors - -// Capture is a wrapper function which can be used to capture errors from closing via a defer. -// An example: -// -// func Example() (err error) { -// f, _ := os.Open(...) -// defer errors.Capture(&err, f.Close)() -// ... -// return -// -// Doing this will result in the error from the f.Close() call being -// put in the error via a ptr, if the error is not nil -func Capture(rErr *error, fn func() error) func() { - return func() { - err := fn() - if *rErr == nil { - *rErr = err - } - } -} diff --git a/pkg/escape/bytes.go b/pkg/escape/bytes.go deleted file mode 100644 index dd6b2eb9baa..00000000000 --- a/pkg/escape/bytes.go +++ /dev/null @@ -1,115 +0,0 @@ -// Package escape contains utilities for escaping parts of InfluxQL -// and InfluxDB line protocol. -package escape // import "github.com/influxdata/influxdb/v2/pkg/escape" - -import ( - "bytes" - "strings" -) - -// Codes is a map of bytes to be escaped. -var Codes = map[byte][]byte{ - ',': []byte(`\,`), - '"': []byte(`\"`), - ' ': []byte(`\ `), - '=': []byte(`\=`), -} - -// Bytes escapes characters on the input slice, as defined by Codes. -func Bytes(in []byte) []byte { - for b, esc := range Codes { - in = bytes.Replace(in, []byte{b}, esc, -1) - } - return in -} - -const escapeChars = `," =` - -// IsEscaped returns whether b has any escaped characters, -// i.e. whether b seems to have been processed by Bytes. -func IsEscaped(b []byte) bool { - for len(b) > 0 { - i := bytes.IndexByte(b, '\\') - if i < 0 { - return false - } - - if i+1 < len(b) && strings.IndexByte(escapeChars, b[i+1]) >= 0 { - return true - } - b = b[i+1:] - } - return false -} - -// AppendUnescaped appends the unescaped version of src to dst -// and returns the resulting slice. -func AppendUnescaped(dst, src []byte) []byte { - var pos int - for len(src) > 0 { - next := bytes.IndexByte(src[pos:], '\\') - if next < 0 || pos+next+1 >= len(src) { - return append(dst, src...) - } - - if pos+next+1 < len(src) && strings.IndexByte(escapeChars, src[pos+next+1]) >= 0 { - if pos+next > 0 { - dst = append(dst, src[:pos+next]...) - } - src = src[pos+next+1:] - pos = 0 - } else { - pos += next + 1 - } - } - - return dst -} - -// Unescape returns a new slice containing the unescaped version of in. -func Unescape(in []byte) []byte { - if len(in) == 0 { - return nil - } - - if bytes.IndexByte(in, '\\') == -1 { - return in - } - - i := 0 - inLen := len(in) - - // The output size will be no more than inLen. Preallocating the - // capacity of the output is faster and uses less memory than - // letting append() do its own (over)allocation. - out := make([]byte, 0, inLen) - - for { - if i >= inLen { - break - } - if in[i] == '\\' && i+1 < inLen { - switch in[i+1] { - case ',': - out = append(out, ',') - i += 2 - continue - case '"': - out = append(out, '"') - i += 2 - continue - case ' ': - out = append(out, ' ') - i += 2 - continue - case '=': - out = append(out, '=') - i += 2 - continue - } - } - out = append(out, in[i]) - i += 1 - } - return out -} diff --git a/pkg/escape/bytes_test.go b/pkg/escape/bytes_test.go deleted file mode 100644 index 8cb101a5125..00000000000 --- a/pkg/escape/bytes_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package escape - -import ( - "bytes" - "reflect" - "strings" - "testing" -) - -var result []byte - -func BenchmarkBytesEscapeNoEscapes(b *testing.B) { - buf := []byte(`no_escapes`) - for i := 0; i < b.N; i++ { - result = Bytes(buf) - } -} - -func BenchmarkUnescapeNoEscapes(b *testing.B) { - buf := []byte(`no_escapes`) - for i := 0; i < b.N; i++ { - result = Unescape(buf) - } -} - -func BenchmarkBytesEscapeMany(b *testing.B) { - tests := [][]byte{ - []byte("this is my special string"), - []byte("a field w=i th == tons of escapes"), - []byte("some,commas,here"), - } - for n := 0; n < b.N; n++ { - for _, test := range tests { - result = Bytes(test) - } - } -} - -func BenchmarkUnescapeMany(b *testing.B) { - tests := [][]byte{ - []byte(`this\ is\ my\ special\ string`), - []byte(`a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`), - []byte(`some\,commas\,here`), - } - for i := 0; i < b.N; i++ { - for _, test := range tests { - result = Unescape(test) - } - } -} - -var boolResult bool - -func BenchmarkIsEscaped(b *testing.B) { - tests := [][]byte{ - []byte(`no_escapes`), - []byte(`a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`), - []byte(`some\,commas\,here`), - } - for i := 0; i < b.N; i++ { - for _, test := range tests { - boolResult = IsEscaped(test) - } - } -} - -func BenchmarkAppendUnescaped(b *testing.B) { - tests := [][]byte{ - []byte(`this\ is\ my\ special\ string`), - []byte(`a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`), - []byte(`some\,commas\,here`), - } - for i := 0; i < b.N; i++ { - result = nil - for _, test := range tests { - result = AppendUnescaped(result, test) - } - } -} - -func TestUnescape(t *testing.T) { - tests := []struct { - in []byte - out []byte - }{ - { - []byte(nil), - []byte(nil), - }, - - { - []byte(""), - []byte(nil), - }, - - { - []byte("\\,\\\"\\ \\="), - []byte(",\" ="), - }, - - { - []byte("\\\\"), - []byte("\\\\"), - }, - - { - []byte("plain and simple"), - []byte("plain and simple"), - }, - } - - for ii, tt := range tests { - got := Unescape(tt.in) - if !reflect.DeepEqual(got, tt.out) { - t.Errorf("[%d] Unescape(%#v) = %#v, expected %#v", ii, string(tt.in), string(got), string(tt.out)) - } - } -} - -func TestAppendUnescaped(t *testing.T) { - cases := strings.Split(strings.TrimSpace(` -normal -inv\alid -goo\"d -sp\ ace -\,\"\ \= -f\\\ x -`), "\n") - - for _, c := range cases { - exp := Unescape([]byte(c)) - got := AppendUnescaped(nil, []byte(c)) - - if !bytes.Equal(got, exp) { - t.Errorf("AppendUnescaped failed for %#q: got %#q, exp %#q", c, got, exp) - } - } - -} diff --git a/pkg/escape/strings.go b/pkg/escape/strings.go deleted file mode 100644 index db98033b0d7..00000000000 --- a/pkg/escape/strings.go +++ /dev/null @@ -1,21 +0,0 @@ -package escape - -import "strings" - -var ( - escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`) - unescaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`) -) - -// UnescapeString returns unescaped version of in. -func UnescapeString(in string) string { - if strings.IndexByte(in, '\\') == -1 { - return in - } - return unescaper.Replace(in) -} - -// String returns the escaped version of in. -func String(in string) string { - return escaper.Replace(in) -} diff --git a/pkg/escape/strings_test.go b/pkg/escape/strings_test.go deleted file mode 100644 index d124732c1e9..00000000000 --- a/pkg/escape/strings_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package escape - -import ( - "testing" -) - -var s string - -func BenchmarkStringEscapeNoEscapes(b *testing.B) { - for n := 0; n < b.N; n++ { - s = String("no_escapes") - } -} - -func BenchmarkStringUnescapeNoEscapes(b *testing.B) { - for n := 0; n < b.N; n++ { - s = UnescapeString("no_escapes") - } -} - -func BenchmarkManyStringEscape(b *testing.B) { - tests := []string{ - "this is my special string", - "a field w=i th == tons of escapes", - "some,commas,here", - } - - for n := 0; n < b.N; n++ { - for _, test := range tests { - s = String(test) - } - } -} - -func BenchmarkManyStringUnescape(b *testing.B) { - tests := []string{ - `this\ is\ my\ special\ string`, - `a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`, - `some\,commas\,here`, - } - - for n := 0; n < b.N; n++ { - for _, test := range tests { - s = UnescapeString(test) - } - } -} - -func TestStringEscape(t *testing.T) { - tests := []struct { - in string - expected string - }{ - { - in: "", - expected: "", - }, - { - in: "this is my special string", - expected: `this\ is\ my\ special\ string`, - }, - { - in: "a field w=i th == tons of escapes", - expected: `a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`, - }, - { - in: "no_escapes", - expected: "no_escapes", - }, - { - in: "some,commas,here", - expected: `some\,commas\,here`, - }, - } - - for _, test := range tests { - if test.expected != String(test.in) { - t.Errorf("Got %s, expected %s", String(test.in), test.expected) - } - } -} - -func TestStringUnescape(t *testing.T) { - tests := []struct { - in string - expected string - }{ - { - in: "", - expected: "", - }, - { - in: `this\ is\ my\ special\ string`, - expected: "this is my special string", - }, - { - in: `a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`, - expected: "a field w=i th == tons of escapes", - }, - { - in: "no_escapes", - expected: "no_escapes", - }, - { - in: `some\,commas\,here`, - expected: "some,commas,here", - }, - } - - for _, test := range tests { - if test.expected != UnescapeString(test.in) { - t.Errorf("Got %s, expected %s", UnescapeString(test.in), test.expected) - } - } -} diff --git a/pkg/estimator/hll/compressed.go b/pkg/estimator/hll/compressed.go deleted file mode 100644 index d2a48804b4d..00000000000 --- a/pkg/estimator/hll/compressed.go +++ /dev/null @@ -1,173 +0,0 @@ -package hll - -import "encoding/binary" - -// Original author of this file is github.com/clarkduvall/hyperloglog -type iterable interface { - decode(i int, last uint32) (uint32, int) - Len() int - Iter() *iterator -} - -type iterator struct { - i int - last uint32 - v iterable -} - -func (iter *iterator) Next() uint32 { - n, i := iter.v.decode(iter.i, iter.last) - iter.last = n - iter.i = i - return n -} - -func (iter *iterator) Peek() uint32 { - n, _ := iter.v.decode(iter.i, iter.last) - return n -} - -func (iter iterator) HasNext() bool { - return iter.i < iter.v.Len() -} - -type compressedList struct { - count uint32 - last uint32 - b variableLengthList -} - -func (v *compressedList) Clone() *compressedList { - if v == nil { - return nil - } - - newV := &compressedList{ - count: v.count, - last: v.last, - } - - newV.b = make(variableLengthList, len(v.b)) - copy(newV.b, v.b) - return newV -} - -func (v *compressedList) MarshalBinary() (data []byte, err error) { - // Marshal the variableLengthList - bdata, err := v.b.MarshalBinary() - if err != nil { - return nil, err - } - - // At least 4 bytes for the two fixed sized values plus the size of bdata. - data = make([]byte, 0, 4+4+len(bdata)) - - // Marshal the count and last values. - data = append(data, []byte{ - // Number of items in the list. - byte(v.count >> 24), - byte(v.count >> 16), - byte(v.count >> 8), - byte(v.count), - // The last item in the list. - byte(v.last >> 24), - byte(v.last >> 16), - byte(v.last >> 8), - byte(v.last), - }...) - - // Append the list - return append(data, bdata...), nil -} - -func (v *compressedList) UnmarshalBinary(data []byte) error { - // Set the count. - v.count, data = binary.BigEndian.Uint32(data[:4]), data[4:] - - // Set the last value. - v.last, data = binary.BigEndian.Uint32(data[:4]), data[4:] - - // Set the list. - sz, data := binary.BigEndian.Uint32(data[:4]), data[4:] - v.b = make([]uint8, sz) - for i := uint32(0); i < sz; i++ { - v.b[i] = uint8(data[i]) - } - return nil -} - -func newCompressedList(size int) *compressedList { - v := &compressedList{} - v.b = make(variableLengthList, 0, size) - return v -} - -func (v *compressedList) Len() int { - return len(v.b) -} - -func (v *compressedList) decode(i int, last uint32) (uint32, int) { - n, i := v.b.decode(i, last) - return n + last, i -} - -func (v *compressedList) Append(x uint32) { - v.count++ - v.b = v.b.Append(x - v.last) - v.last = x -} - -func (v *compressedList) Iter() *iterator { - return &iterator{0, 0, v} -} - -type variableLengthList []uint8 - -func (v variableLengthList) MarshalBinary() (data []byte, err error) { - // 4 bytes for the size of the list, and a byte for each element in the - // list. - data = make([]byte, 0, 4+v.Len()) - - // Length of the list. We only need 32 bits because the size of the set - // couldn't exceed that on 32 bit architectures. - sz := v.Len() - data = append(data, []byte{ - byte(sz >> 24), - byte(sz >> 16), - byte(sz >> 8), - byte(sz), - }...) - - // Marshal each element in the list. - for i := 0; i < sz; i++ { - data = append(data, byte(v[i])) - } - - return data, nil -} - -func (v variableLengthList) Len() int { - return len(v) -} - -func (v *variableLengthList) Iter() *iterator { - return &iterator{0, 0, v} -} - -func (v variableLengthList) decode(i int, last uint32) (uint32, int) { - var x uint32 - j := i - for ; v[j]&0x80 != 0; j++ { - x |= uint32(v[j]&0x7f) << (uint(j-i) * 7) - } - x |= uint32(v[j]) << (uint(j-i) * 7) - return x, j + 1 -} - -func (v variableLengthList) Append(x uint32) variableLengthList { - for x&0xffffff80 != 0 { - v = append(v, uint8((x&0x7f)|0x80)) - x >>= 7 - } - return append(v, uint8(x&0x7f)) -} diff --git a/pkg/estimator/hll/hll.go b/pkg/estimator/hll/hll.go deleted file mode 100644 index df886dcb4b4..00000000000 --- a/pkg/estimator/hll/hll.go +++ /dev/null @@ -1,500 +0,0 @@ -// Package hll contains a HyperLogLog++ with a LogLog-Beta bias correction implementation that is adapted (mostly -// copied) from an implementation provided by Clark DuVall -// github.com/clarkduvall/hyperloglog. -// -// The differences are that the implementation in this package: -// -// - uses an AMD64 optimised xxhash algorithm instead of murmur; -// - uses some AMD64 optimisations for things like clz; -// - works with []byte rather than a Hash64 interface, to reduce allocations; -// - implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler -// -// Based on some rough benchmarking, this implementation of HyperLogLog++ is -// around twice as fast as the github.com/clarkduvall/hyperloglog implementation. -package hll - -import ( - "encoding/binary" - "errors" - "fmt" - "math" - "math/bits" - "sort" - "unsafe" - - "github.com/cespare/xxhash" - "github.com/influxdata/influxdb/v2/pkg/estimator" -) - -// Current version of HLL implementation. -const version uint8 = 2 - -// DefaultPrecision is the default precision. -const DefaultPrecision = 16 - -func beta(ez float64) float64 { - zl := math.Log(ez + 1) - return -0.37331876643753059*ez + - -1.41704077448122989*zl + - 0.40729184796612533*math.Pow(zl, 2) + - 1.56152033906584164*math.Pow(zl, 3) + - -0.99242233534286128*math.Pow(zl, 4) + - 0.26064681399483092*math.Pow(zl, 5) + - -0.03053811369682807*math.Pow(zl, 6) + - 0.00155770210179105*math.Pow(zl, 7) -} - -// Plus implements the Hyperloglog++ algorithm, described in the following -// paper: http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/40671.pdf -// -// The HyperLogLog++ algorithm provides cardinality estimations. -type Plus struct { - // hash function used to hash values to add to the sketch. - hash func([]byte) uint64 - - p uint8 // precision. - pp uint8 // p' (sparse) precision to be used when p ∈ [4..pp] and pp < 64. - - m uint32 // Number of substream used for stochastic averaging of stream. - mp uint32 // m' (sparse) number of substreams. - - alpha float64 // alpha is used for bias correction. - - sparse bool // Should we use a sparse sketch representation. - tmpSet set - - denseList []uint8 // The dense representation of the HLL. - sparseList *compressedList // values that can be stored in the sparse representation. -} - -// NewPlus returns a new Plus with precision p. p must be between 4 and 18. -func NewPlus(p uint8) (*Plus, error) { - if p > 18 || p < 4 { - return nil, errors.New("precision must be between 4 and 18") - } - - // p' = 25 is used in the Google paper. - pp := uint8(25) - - hll := &Plus{ - hash: xxhash.Sum64, - p: p, - pp: pp, - m: 1 << p, - mp: 1 << pp, - tmpSet: set{}, - sparse: true, - } - hll.sparseList = newCompressedList(int(hll.m)) - - // Determine alpha. - switch hll.m { - case 16: - hll.alpha = 0.673 - case 32: - hll.alpha = 0.697 - case 64: - hll.alpha = 0.709 - default: - hll.alpha = 0.7213 / (1 + 1.079/float64(hll.m)) - } - - return hll, nil -} - -// Bytes estimates the memory footprint of this Plus, in bytes. -func (h *Plus) Bytes() int { - var b int - b += len(h.tmpSet) * 4 - b += cap(h.denseList) - if h.sparseList != nil { - b += int(unsafe.Sizeof(*h.sparseList)) - b += cap(h.sparseList.b) - } - b += int(unsafe.Sizeof(*h)) - return b -} - -// NewDefaultPlus creates a new Plus with the default precision. -func NewDefaultPlus() *Plus { - p, err := NewPlus(DefaultPrecision) - if err != nil { - panic(err) - } - return p -} - -// Clone returns a deep copy of h. -func (h *Plus) Clone() estimator.Sketch { - var hll = &Plus{ - hash: h.hash, - p: h.p, - pp: h.pp, - m: h.m, - mp: h.mp, - alpha: h.alpha, - sparse: h.sparse, - tmpSet: h.tmpSet.Clone(), - sparseList: h.sparseList.Clone(), - } - - hll.denseList = make([]uint8, len(h.denseList)) - copy(hll.denseList, h.denseList) - return hll -} - -// Add adds a new value to the HLL. -func (h *Plus) Add(v []byte) { - x := h.hash(v) - if h.sparse { - h.tmpSet.add(h.encodeHash(x)) - - if uint32(len(h.tmpSet))*100 > h.m { - h.mergeSparse() - } - if uint32(h.sparseList.Len()) > h.m { - h.mergeSparse() - h.toNormal() - } - } else { - i := bextr(x, 64-h.p, h.p) // {x63,...,x64-p} - w := x< h.denseList[i] { - h.denseList[i] = rho - } - } -} - -// Count returns a cardinality estimate. -func (h *Plus) Count() uint64 { - if h == nil { - return 0 // Nothing to do. - } - - if h.sparse { - h.mergeSparse() - return uint64(h.linearCount(h.mp, h.mp-uint32(h.sparseList.count))) - } - sum := 0.0 - m := float64(h.m) - var count float64 - for _, val := range h.denseList { - sum += 1.0 / float64(uint32(1)< h.denseList[i] { - h.denseList[i] = v - } - } - } - return nil -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (h *Plus) MarshalBinary() (data []byte, err error) { - if h == nil { - return nil, nil - } - - if h.sparse { - h.mergeSparse() - } - - // Marshal a version marker. - data = append(data, version) - - // Marshal precision. - data = append(data, byte(h.p)) - - if h.sparse { - // It's using the sparse representation. - data = append(data, byte(1)) - - // Add the tmp_set (should be empty) - tsdata, err := h.tmpSet.MarshalBinary() - if err != nil { - return nil, err - } - data = append(data, tsdata...) - - // Add the sparse representation - sdata, err := h.sparseList.MarshalBinary() - if err != nil { - return nil, err - } - return append(data, sdata...), nil - } - - // It's using the dense representation. - data = append(data, byte(0)) - - // Add the dense sketch representation. - sz := len(h.denseList) - data = append(data, []byte{ - byte(sz >> 24), - byte(sz >> 16), - byte(sz >> 8), - byte(sz), - }...) - - // Marshal each element in the list. - for i := 0; i < len(h.denseList); i++ { - data = append(data, byte(h.denseList[i])) - } - - return data, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (h *Plus) UnmarshalBinary(data []byte) error { - if len(data) < 12 { - return fmt.Errorf("provided buffer %v too short for initializing HLL sketch", data) - } - - // Unmarshal version. We may need this in the future if we make - // non-compatible changes. - _ = data[0] - - // Unmarshal precision. - p := uint8(data[1]) - newh, err := NewPlus(p) - if err != nil { - return err - } - *h = *newh - - // h is now initialised with the correct precision. We just need to fill the - // rest of the details out. - if data[2] == byte(1) { - // Using the sparse representation. - h.sparse = true - - // Unmarshal the tmp_set. - tssz := binary.BigEndian.Uint32(data[3:7]) - h.tmpSet = make(map[uint32]struct{}, tssz) - - // We need to unmarshal tssz values in total, and each value requires us - // to read 4 bytes. - tsLastByte := int((tssz * 4) + 7) - for i := 7; i < tsLastByte; i += 4 { - k := binary.BigEndian.Uint32(data[i : i+4]) - h.tmpSet[k] = struct{}{} - } - - // Unmarshal the sparse representation. - return h.sparseList.UnmarshalBinary(data[tsLastByte:]) - } - - // Using the dense representation. - h.sparse = false - dsz := int(binary.BigEndian.Uint32(data[3:7])) - h.denseList = make([]uint8, 0, dsz) - for i := 7; i < dsz+7; i++ { - h.denseList = append(h.denseList, uint8(data[i])) - } - return nil -} - -func (h *Plus) mergeSparse() { - if len(h.tmpSet) == 0 { - return - } - keys := make(uint64Slice, 0, len(h.tmpSet)) - for k := range h.tmpSet { - keys = append(keys, k) - } - sort.Sort(keys) - - newList := newCompressedList(int(h.m)) - for iter, i := h.sparseList.Iter(), 0; iter.HasNext() || i < len(keys); { - if !iter.HasNext() { - newList.Append(keys[i]) - i++ - continue - } - - if i >= len(keys) { - newList.Append(iter.Next()) - continue - } - - x1, x2 := iter.Peek(), keys[i] - if x1 == x2 { - newList.Append(iter.Next()) - i++ - } else if x1 > x2 { - newList.Append(x2) - i++ - } else { - newList.Append(iter.Next()) - } - } - - h.sparseList = newList - h.tmpSet = set{} -} - -// Convert from sparse representation to dense representation. -func (h *Plus) toNormal() { - if len(h.tmpSet) > 0 { - h.mergeSparse() - } - - h.denseList = make([]uint8, h.m) - for iter := h.sparseList.Iter(); iter.HasNext(); { - i, r := h.decodeHash(iter.Next()) - if h.denseList[i] < r { - h.denseList[i] = r - } - } - - h.sparse = false - h.tmpSet = nil - h.sparseList = nil -} - -// Encode a hash to be used in the sparse representation. -func (h *Plus) encodeHash(x uint64) uint32 { - idx := uint32(bextr(x, 64-h.pp, h.pp)) - if bextr(x, 64-h.pp, h.pp-h.p) == 0 { - zeros := bits.LeadingZeros64((bextr(x, 0, 64-h.pp)<> 24), - byte(sl >> 16), - byte(sl >> 8), - byte(sl), - }...) - - // Marshal each element in the set. - for k := range s { - data = append(data, []byte{ - byte(k >> 24), - byte(k >> 16), - byte(k >> 8), - byte(k), - }...) - } - - return data, nil -} - -func (s set) add(v uint32) { s[v] = struct{}{} } -func (s set) has(v uint32) bool { _, ok := s[v]; return ok } - -// bextr performs a bitfield extract on v. start should be the LSB of the field -// you wish to extract, and length the number of bits to extract. -// -// For example: start=0 and length=4 for the following 64-bit word would result -// in 1111 being returned. -// -// 00011110 -// returns 1110 -func bextr(v uint64, start, length uint8) uint64 { - return (v >> start) & ((1 << length) - 1) -} - -func bextr32(v uint32, start, length uint8) uint32 { - return (v >> start) & ((1 << length) - 1) -} diff --git a/pkg/estimator/hll/hll_test.go b/pkg/estimator/hll/hll_test.go deleted file mode 100644 index 1d24f0b13e6..00000000000 --- a/pkg/estimator/hll/hll_test.go +++ /dev/null @@ -1,689 +0,0 @@ -package hll - -import ( - "encoding/binary" - "fmt" - "math" - "math/rand" - "reflect" - "testing" - "unsafe" - - "github.com/davecgh/go-spew/spew" -) - -func nopHash(buf []byte) uint64 { - if len(buf) != 8 { - panic(fmt.Sprintf("unexpected size buffer: %d", len(buf))) - } - return binary.BigEndian.Uint64(buf) -} - -func toByte(v uint64) []byte { - var buf [8]byte - binary.BigEndian.PutUint64(buf[:], v) - return buf[:] -} - -func TestPlus_Bytes(t *testing.T) { - testCases := []struct { - p uint8 - normal bool - }{ - {4, false}, - {5, false}, - {4, true}, - {5, true}, - } - - for i, testCase := range testCases { - t.Run(fmt.Sprint(i), func(t *testing.T) { - h := NewTestPlus(testCase.p) - - plusStructOverhead := int(unsafe.Sizeof(*h)) - compressedListOverhead := int(unsafe.Sizeof(*h.sparseList)) - - var expectedDenseListCapacity, expectedSparseListCapacity int - - if testCase.normal { - h.toNormal() - // denseList has capacity for 2^p elements, one byte each - expectedDenseListCapacity = int(math.Pow(2, float64(testCase.p))) - if expectedDenseListCapacity != cap(h.denseList) { - t.Errorf("denseList capacity: want %d got %d", expectedDenseListCapacity, cap(h.denseList)) - } - } else { - // sparseList has capacity for 2^p elements, one byte each - expectedSparseListCapacity = int(math.Pow(2, float64(testCase.p))) - if expectedSparseListCapacity != cap(h.sparseList.b) { - t.Errorf("sparseList capacity: want %d got %d", expectedSparseListCapacity, cap(h.sparseList.b)) - } - expectedSparseListCapacity += compressedListOverhead - } - - expectedSize := plusStructOverhead + expectedDenseListCapacity + expectedSparseListCapacity - if expectedSize != h.Bytes() { - t.Errorf("Bytes(): want %d got %d", expectedSize, h.Bytes()) - } - }) - } -} - -func TestPlus_Add_NoSparse(t *testing.T) { - h := NewTestPlus(16) - h.toNormal() - - h.Add(toByte(0x00010fffffffffff)) - n := h.denseList[1] - if n != 5 { - t.Error(n) - } - - h.Add(toByte(0x0002ffffffffffff)) - n = h.denseList[2] - if n != 1 { - t.Error(n) - } - - h.Add(toByte(0x0003000000000000)) - n = h.denseList[3] - if n != 49 { - t.Error(n) - } - - h.Add(toByte(0x0003000000000001)) - n = h.denseList[3] - if n != 49 { - t.Error(n) - } - - h.Add(toByte(0xff03700000000000)) - n = h.denseList[0xff03] - if n != 2 { - t.Error(n) - } - - h.Add(toByte(0xff03080000000000)) - n = h.denseList[0xff03] - if n != 5 { - t.Error(n) - } -} - -func TestPlusPrecision_NoSparse(t *testing.T) { - h := NewTestPlus(4) - h.toNormal() - - h.Add(toByte(0x1fffffffffffffff)) - n := h.denseList[1] - if n != 1 { - t.Error(n) - } - - h.Add(toByte(0xffffffffffffffff)) - n = h.denseList[0xf] - if n != 1 { - t.Error(n) - } - - h.Add(toByte(0x00ffffffffffffff)) - n = h.denseList[0] - if n != 5 { - t.Error(n) - } -} - -func TestPlus_toNormal(t *testing.T) { - h := NewTestPlus(16) - h.Add(toByte(0x00010fffffffffff)) - h.toNormal() - c := h.Count() - if c != 1 { - t.Error(c) - } - - if h.sparse { - t.Error("toNormal should convert to normal") - } - - h = NewTestPlus(16) - h.hash = nopHash - h.Add(toByte(0x00010fffffffffff)) - h.Add(toByte(0x0002ffffffffffff)) - h.Add(toByte(0x0003000000000000)) - h.Add(toByte(0x0003000000000001)) - h.Add(toByte(0xff03700000000000)) - h.Add(toByte(0xff03080000000000)) - h.mergeSparse() - h.toNormal() - - n := h.denseList[1] - if n != 5 { - t.Error(n) - } - n = h.denseList[2] - if n != 1 { - t.Error(n) - } - n = h.denseList[3] - if n != 49 { - t.Error(n) - } - n = h.denseList[0xff03] - if n != 5 { - t.Error(n) - } -} - -func TestPlusCount(t *testing.T) { - h := NewTestPlus(16) - - n := h.Count() - if n != 0 { - t.Error(n) - } - - h.Add(toByte(0x00010fffffffffff)) - h.Add(toByte(0x00020fffffffffff)) - h.Add(toByte(0x00030fffffffffff)) - h.Add(toByte(0x00040fffffffffff)) - h.Add(toByte(0x00050fffffffffff)) - h.Add(toByte(0x00050fffffffffff)) - - n = h.Count() - if n != 5 { - t.Error(n) - } - - // not mutated, still returns correct count - n = h.Count() - if n != 5 { - t.Error(n) - } - - h.Add(toByte(0x00060fffffffffff)) - - // mutated - n = h.Count() - if n != 6 { - t.Error(n) - } -} - -func TestPlus_Merge_Error(t *testing.T) { - h := NewTestPlus(16) - h2 := NewTestPlus(10) - - err := h.Merge(h2) - if err == nil { - t.Error("different precision should return error") - } -} - -func TestHLL_Merge_Sparse(t *testing.T) { - h := NewTestPlus(16) - h.Add(toByte(0x00010fffffffffff)) - h.Add(toByte(0x00020fffffffffff)) - h.Add(toByte(0x00030fffffffffff)) - h.Add(toByte(0x00040fffffffffff)) - h.Add(toByte(0x00050fffffffffff)) - h.Add(toByte(0x00050fffffffffff)) - - h2 := NewTestPlus(16) - h2.Merge(h) - n := h2.Count() - if n != 5 { - t.Error(n) - } - - if h2.sparse { - t.Error("Merge should convert to normal") - } - - if !h.sparse { - t.Error("Merge should not modify argument") - } - - h2.Merge(h) - n = h2.Count() - if n != 5 { - t.Error(n) - } - - h.Add(toByte(0x00060fffffffffff)) - h.Add(toByte(0x00070fffffffffff)) - h.Add(toByte(0x00080fffffffffff)) - h.Add(toByte(0x00090fffffffffff)) - h.Add(toByte(0x000a0fffffffffff)) - h.Add(toByte(0x000a0fffffffffff)) - n = h.Count() - if n != 10 { - t.Error(n) - } - - h2.Merge(h) - n = h2.Count() - if n != 10 { - t.Error(n) - } -} - -func TestHLL_Merge_Normal(t *testing.T) { - h := NewTestPlus(16) - h.toNormal() - h.Add(toByte(0x00010fffffffffff)) - h.Add(toByte(0x00020fffffffffff)) - h.Add(toByte(0x00030fffffffffff)) - h.Add(toByte(0x00040fffffffffff)) - h.Add(toByte(0x00050fffffffffff)) - h.Add(toByte(0x00050fffffffffff)) - - h2 := NewTestPlus(16) - h2.toNormal() - h2.Merge(h) - n := h2.Count() - if n != 5 { - t.Error(n) - } - - h2.Merge(h) - n = h2.Count() - if n != 5 { - t.Error(n) - } - - h.Add(toByte(0x00060fffffffffff)) - h.Add(toByte(0x00070fffffffffff)) - h.Add(toByte(0x00080fffffffffff)) - h.Add(toByte(0x00090fffffffffff)) - h.Add(toByte(0x000a0fffffffffff)) - h.Add(toByte(0x000a0fffffffffff)) - n = h.Count() - if n != 10 { - t.Error(n) - } - - h2.Merge(h) - n = h2.Count() - if n != 10 { - t.Error(n) - } -} - -func TestPlus_Merge(t *testing.T) { - h := NewTestPlus(16) - - k1 := uint64(0xf000017000000000) - h.Add(toByte(k1)) - if !h.tmpSet.has(h.encodeHash(k1)) { - t.Error("key not in hash") - } - - k2 := uint64(0x000fff8f00000000) - h.Add(toByte(k2)) - if !h.tmpSet.has(h.encodeHash(k2)) { - t.Error("key not in hash") - } - - if len(h.tmpSet) != 2 { - t.Error(h.tmpSet) - } - - h.mergeSparse() - if len(h.tmpSet) != 0 { - t.Error(h.tmpSet) - } - if h.sparseList.count != 2 { - t.Error(h.sparseList) - } - - iter := h.sparseList.Iter() - n := iter.Next() - if n != h.encodeHash(k2) { - t.Error(n) - } - n = iter.Next() - if n != h.encodeHash(k1) { - t.Error(n) - } - - k3 := uint64(0x0f00017000000000) - h.Add(toByte(k3)) - if !h.tmpSet.has(h.encodeHash(k3)) { - t.Error("key not in hash") - } - - h.mergeSparse() - if len(h.tmpSet) != 0 { - t.Error(h.tmpSet) - } - if h.sparseList.count != 3 { - t.Error(h.sparseList) - } - - iter = h.sparseList.Iter() - n = iter.Next() - if n != h.encodeHash(k2) { - t.Error(n) - } - n = iter.Next() - if n != h.encodeHash(k3) { - t.Error(n) - } - n = iter.Next() - if n != h.encodeHash(k1) { - t.Error(n) - } - - h.Add(toByte(k1)) - if !h.tmpSet.has(h.encodeHash(k1)) { - t.Error("key not in hash") - } - - h.mergeSparse() - if len(h.tmpSet) != 0 { - t.Error(h.tmpSet) - } - if h.sparseList.count != 3 { - t.Error(h.sparseList) - } - - iter = h.sparseList.Iter() - n = iter.Next() - if n != h.encodeHash(k2) { - t.Error(n) - } - n = iter.Next() - if n != h.encodeHash(k3) { - t.Error(n) - } - n = iter.Next() - if n != h.encodeHash(k1) { - t.Error(n) - } -} - -func TestPlus_EncodeDecode(t *testing.T) { - h := NewTestPlus(8) - i, r := h.decodeHash(h.encodeHash(0xffffff8000000000)) - if i != 0xff { - t.Error(i) - } - if r != 1 { - t.Error(r) - } - - i, r = h.decodeHash(h.encodeHash(0xff00000000000000)) - if i != 0xff { - t.Error(i) - } - if r != 57 { - t.Error(r) - } - - i, r = h.decodeHash(h.encodeHash(0xff30000000000000)) - if i != 0xff { - t.Error(i) - } - if r != 3 { - t.Error(r) - } - - i, r = h.decodeHash(h.encodeHash(0xaa10000000000000)) - if i != 0xaa { - t.Error(i) - } - if r != 4 { - t.Error(r) - } - - i, r = h.decodeHash(h.encodeHash(0xaa0f000000000000)) - if i != 0xaa { - t.Error(i) - } - if r != 5 { - t.Error(r) - } -} - -func TestPlus_Error(t *testing.T) { - _, err := NewPlus(3) - if err == nil { - t.Error("precision 3 should return error") - } - - _, err = NewPlus(18) - if err != nil { - t.Error(err) - } - - _, err = NewPlus(19) - if err == nil { - t.Error("precision 17 should return error") - } -} - -func TestPlus_Marshal_Unmarshal_Sparse(t *testing.T) { - h, _ := NewPlus(4) - h.sparse = true - h.tmpSet = map[uint32]struct{}{26: {}, 40: {}} - - src := rand.New(rand.NewSource(6611)) - - // Add a bunch of values to the sparse representation. - for i := 0; i < 10; i++ { - h.sparseList.Append(uint32(src.Int())) - } - - data, err := h.MarshalBinary() - if err != nil { - t.Fatal(err) - } - - // Peeking at the first byte should reveal the version. - if got, exp := data[0], byte(2); got != exp { - t.Fatalf("got byte %v, expected %v", got, exp) - } - - var res Plus - if err := res.UnmarshalBinary(data); err != nil { - t.Fatal(err) - } - - // reflect.DeepEqual will always return false when comparing non-nil - // functions, so we'll set them to nil. - h.hash, res.hash = nil, nil - if got, exp := &res, h; !reflect.DeepEqual(got, exp) { - t.Fatalf("got %v, wanted %v", spew.Sdump(got), spew.Sdump(exp)) - } -} - -func TestPlus_Marshal_Unmarshal_Dense(t *testing.T) { - h, _ := NewPlus(4) - h.sparse = false - - src := rand.New(rand.NewSource(1688)) - - // Add a bunch of values to the dense representation. - for i := 0; i < 10; i++ { - h.denseList = append(h.denseList, uint8(src.Int())) - } - - data, err := h.MarshalBinary() - if err != nil { - t.Fatal(err) - } - - // Peeking at the first byte should reveal the version. - if got, exp := data[0], byte(2); got != exp { - t.Fatalf("got byte %v, expected %v", got, exp) - } - - var res Plus - if err := res.UnmarshalBinary(data); err != nil { - t.Fatal(err) - } - - // reflect.DeepEqual will always return false when comparing non-nil - // functions, so we'll set them to nil. - h.hash, res.hash = nil, nil - if got, exp := &res, h; !reflect.DeepEqual(got, exp) { - t.Fatalf("got %v, wanted %v", spew.Sdump(got), spew.Sdump(exp)) - } -} - -// Tests that a sketch can be serialised / unserialised and keep an accurate -// cardinality estimate. -func TestPlus_Marshal_Unmarshal_Count(t *testing.T) { - if testing.Short() { - t.Skip("Skipping test in short mode") - } - - count := make(map[string]struct{}, 1000000) - h, _ := NewPlus(16) - - src := rand.New(rand.NewSource(6828)) - - buf := make([]byte, 8) - for i := 0; i < 1000000; i++ { - if _, err := src.Read(buf); err != nil { - panic(err) - } - - count[string(buf)] = struct{}{} - - // Add to the sketch. - h.Add(buf) - } - - gotC := h.Count() - epsilon := 15000 // 1.5% - if got, exp := math.Abs(float64(int(gotC)-len(count))), epsilon; int(got) > exp { - t.Fatalf("error was %v for estimation %d and true cardinality %d", got, gotC, len(count)) - } - - // Serialise the sketch. - sketch, err := h.MarshalBinary() - if err != nil { - t.Fatal(err) - } - - // Deserialise. - h = &Plus{} - if err := h.UnmarshalBinary(sketch); err != nil { - t.Fatal(err) - } - - // The count should be the same - oldC := gotC - if got, exp := h.Count(), oldC; got != exp { - t.Fatalf("got %d, expected %d", got, exp) - } - - // Add some more values. - for i := 0; i < 1000000; i++ { - if _, err := src.Read(buf); err != nil { - panic(err) - } - - count[string(buf)] = struct{}{} - - // Add to the sketch. - h.Add(buf) - } - - // The sketch should still be working correctly. - gotC = h.Count() - epsilon = 30000 // 1.5% - if got, exp := math.Abs(float64(int(gotC)-len(count))), epsilon; int(got) > exp { - t.Fatalf("error was %v for estimation %d and true cardinality %d", got, gotC, len(count)) - } -} - -func NewTestPlus(p uint8) *Plus { - h, err := NewPlus(p) - if err != nil { - panic(err) - } - h.hash = nopHash - return h -} - -// Generate random data to add to the sketch. -func genData(n int, src *rand.Rand) [][]byte { - out := make([][]byte, 0, n) - buf := make([]byte, 8) - - for i := 0; i < n; i++ { - // generate 8 random bytes - n, err := src.Read(buf) - if err != nil { - panic(err) - } else if n != 8 { - panic(fmt.Errorf("only %d bytes generated", n)) - } - - out = append(out, buf) - } - if len(out) != n { - panic(fmt.Sprintf("wrong size slice: %d", n)) - } - return out -} - -// Memoises values to be added to a sketch during a benchmark. -var benchdata = map[int][][]byte{} - -func benchmarkPlusAdd(b *testing.B, h *Plus, n int) { - src := rand.New(rand.NewSource(9938)) - blobs, ok := benchdata[n] - if !ok { - // Generate it. - benchdata[n] = genData(n, src) - blobs = benchdata[n] - } - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - for j := 0; j < len(blobs); j++ { - h.Add(blobs[j]) - } - } - b.StopTimer() -} - -func BenchmarkPlus_Add_100(b *testing.B) { - h, _ := NewPlus(16) - benchmarkPlusAdd(b, h, 100) -} - -func BenchmarkPlus_Add_1000(b *testing.B) { - h, _ := NewPlus(16) - benchmarkPlusAdd(b, h, 1000) -} - -func BenchmarkPlus_Add_10000(b *testing.B) { - h, _ := NewPlus(16) - benchmarkPlusAdd(b, h, 10000) -} - -func BenchmarkPlus_Add_100000(b *testing.B) { - h, _ := NewPlus(16) - benchmarkPlusAdd(b, h, 100000) -} - -func BenchmarkPlus_Add_1000000(b *testing.B) { - h, _ := NewPlus(16) - benchmarkPlusAdd(b, h, 1000000) -} - -func BenchmarkPlus_Add_10000000(b *testing.B) { - h, _ := NewPlus(16) - benchmarkPlusAdd(b, h, 10000000) -} - -func BenchmarkPlus_Add_100000000(b *testing.B) { - h, _ := NewPlus(16) - benchmarkPlusAdd(b, h, 100000000) -} diff --git a/pkg/estimator/sketch.go b/pkg/estimator/sketch.go deleted file mode 100644 index b5d0fdc958b..00000000000 --- a/pkg/estimator/sketch.go +++ /dev/null @@ -1,24 +0,0 @@ -package estimator - -import "encoding" - -// Sketch is the interface representing a sketch for estimating cardinality. -type Sketch interface { - // Add adds a single value to the sketch. - Add(v []byte) - - // Count returns a cardinality estimate for the sketch. - Count() uint64 - - // Merge merges another sketch into this one. - Merge(s Sketch) error - - // Bytes estimates the memory footprint of the sketch, in bytes. - Bytes() int - - // Clone returns a deep copy of the sketch. - Clone() Sketch - - encoding.BinaryMarshaler - encoding.BinaryUnmarshaler -} diff --git a/pkg/file/file_unix.go b/pkg/file/file_unix.go deleted file mode 100644 index 66609888e58..00000000000 --- a/pkg/file/file_unix.go +++ /dev/null @@ -1,35 +0,0 @@ -//go:build !windows - -package file - -import ( - "os" - "syscall" -) - -func SyncDir(dirName string) error { - // fsync the dir to flush the rename - dir, err := os.OpenFile(dirName, os.O_RDONLY, os.ModeDir) - if err != nil { - return err - } - defer dir.Close() - - // While we're on unix, we may be running in a Docker container that is - // pointed at a Windows volume over samba. That doesn't support fsyncs - // on directories. This shows itself as an EINVAL, so we ignore that - // error. - err = dir.Sync() - if pe, ok := err.(*os.PathError); ok && pe.Err == syscall.EINVAL { - err = nil - } else if err != nil { - return err - } - - return dir.Close() -} - -// RenameFile will rename the source to target using os function. -func RenameFile(oldpath, newpath string) error { - return os.Rename(oldpath, newpath) -} diff --git a/pkg/file/file_windows.go b/pkg/file/file_windows.go deleted file mode 100644 index 97f31b062f1..00000000000 --- a/pkg/file/file_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -package file - -import "os" - -func SyncDir(dirName string) error { - return nil -} - -// RenameFile will rename the source to target using os function. If target exists it will be removed before renaming. -func RenameFile(oldpath, newpath string) error { - if _, err := os.Stat(newpath); err == nil { - if err = os.Remove(newpath); nil != err { - return err - } - } - - return os.Rename(oldpath, newpath) -} diff --git a/pkg/fs/fs.go b/pkg/fs/fs.go deleted file mode 100644 index b91f8a5cead..00000000000 --- a/pkg/fs/fs.go +++ /dev/null @@ -1,66 +0,0 @@ -package fs - -import ( - "fmt" - "io" - "os" - - "github.com/influxdata/influxdb/v2/pkg/errors" -) - -// A FileExistsError is returned when an operation cannot be completed due to a -// file already existing. -type FileExistsError struct { - path string -} - -func newFileExistsError(path string) FileExistsError { - return FileExistsError{path: path} -} - -func (e FileExistsError) Error() string { - return fmt.Sprintf("operation not allowed, file %q exists", e.path) -} - -// DiskStatus is returned by DiskUsage -type DiskStatus struct { - All uint64 - Used uint64 - Free uint64 - Avail uint64 -} - -func copyFile(src, dst string) (err error) { - in, err := os.Open(src) - if err != nil { - return err - } - - out, err := os.Create(dst) - if err != nil { - return err - } - - defer errors.Capture(&err, out.Close)() - - defer errors.Capture(&err, in.Close)() - - if _, err = io.Copy(out, in); err != nil { - return err - } - - return out.Sync() -} - -// MoveFileWithReplacement copies the file contents at `src` to `dst`. -// and deletes `src` on success. -// -// If the file at `dst` already exists, it will be truncated and its contents -// overwritten. -func MoveFileWithReplacement(src, dst string) error { - if err := copyFile(src, dst); err != nil { - return fmt.Errorf("copy: %w", err) - } - - return os.Remove(src) -} diff --git a/pkg/fs/fs_test.go b/pkg/fs/fs_test.go deleted file mode 100644 index 3e7b35dddb3..00000000000 --- a/pkg/fs/fs_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package fs_test - -import ( - "io" - "os" - "path/filepath" - "testing" - - "github.com/influxdata/influxdb/v2/pkg/fs" -) - -func TestRenameFileWithReplacement(t *testing.T) { - testFileMoveOrRename(t, "rename", fs.RenameFileWithReplacement) -} - -func TestMoveFileWithReplacement(t *testing.T) { - testFileMoveOrRename(t, "move", fs.MoveFileWithReplacement) -} - -func testFileMoveOrRename(t *testing.T, name string, testFunc func(src string, dst string) error) { - // sample data for loading into files - sampleData1 := "this is some data" - sampleData2 := "we got some more data" - - t.Run("exists", func(t *testing.T) { - oldpath := MustCreateTempFile(t, sampleData1) - newpath := MustCreateTempFile(t, sampleData2) - defer MustRemoveAll(oldpath) - defer MustRemoveAll(newpath) - - oldContents := MustReadAllFile(oldpath) - newContents := MustReadAllFile(newpath) - - if got, exp := oldContents, sampleData1; got != exp { - t.Fatalf("got contents %q, expected %q", got, exp) - } else if got, exp := newContents, sampleData2; got != exp { - t.Fatalf("got contents %q, expected %q", got, exp) - } - - if err := testFunc(oldpath, newpath); err != nil { - t.Fatalf("%s returned an error: %s", name, err) - } - - if err := fs.SyncDir(filepath.Dir(oldpath)); err != nil { - panic(err) - } - - // Contents of newpath will now be equivalent to oldpath' contents. - newContents = MustReadAllFile(newpath) - if newContents != oldContents { - t.Fatalf("contents for files differ: %q versus %q", newContents, oldContents) - } - - // oldpath will be removed. - if MustFileExists(oldpath) { - t.Fatalf("file %q still exists, but it shouldn't", oldpath) - } - }) - - t.Run("not exists", func(t *testing.T) { - oldpath := MustCreateTempFile(t, sampleData1) - defer MustRemoveAll(oldpath) - - oldContents := MustReadAllFile(oldpath) - if got, exp := oldContents, sampleData1; got != exp { - t.Fatalf("got contents %q, expected %q", got, exp) - } - - root := filepath.Dir(oldpath) - newpath := filepath.Join(root, "foo") - - if err := testFunc(oldpath, newpath); err != nil { - t.Fatalf("%s returned an error: %s", name, err) - } - - if err := fs.SyncDir(filepath.Dir(oldpath)); err != nil { - panic(err) - } - - // Contents of newpath will now be equivalent to oldpath's contents. - newContents := MustReadAllFile(newpath) - if newContents != oldContents { - t.Fatalf("contents for files differ: %q versus %q", newContents, oldContents) - } - - // oldpath will be removed. - if MustFileExists(oldpath) { - t.Fatalf("file %q still exists, but it shouldn't", oldpath) - } - }) -} - -// CreateTempFileOrFail creates a temporary file returning the path to the file. -func MustCreateTempFile(t testing.TB, data string) string { - t.Helper() - - f, err := os.CreateTemp("", "fs-test") - if err != nil { - t.Fatalf("failed to create temp file: %v", err) - } else if _, err := f.WriteString(data); err != nil { - t.Fatal(err) - } else if err := f.Close(); err != nil { - t.Fatal(err) - } - return f.Name() -} - -func MustRemoveAll(path string) { - if err := os.RemoveAll(path); err != nil { - panic(err) - } -} - -// MustFileExists determines if a file exists, panicking if any error -// (other than one associated with the file not existing) is returned. -func MustFileExists(path string) bool { - _, err := os.Stat(path) - if err == nil { - return true - } else if os.IsNotExist(err) { - return false - } - panic(err) -} - -// MustReadAllFile reads the contents of path, panicking if there is an error. -func MustReadAllFile(path string) string { - fd, err := os.Open(path) - if err != nil { - panic(err) - } - defer fd.Close() - - data, err := io.ReadAll(fd) - if err != nil { - panic(err) - } - return string(data) -} diff --git a/pkg/fs/fs_unix.go b/pkg/fs/fs_unix.go deleted file mode 100644 index 646763824b3..00000000000 --- a/pkg/fs/fs_unix.go +++ /dev/null @@ -1,88 +0,0 @@ -//go:build !windows - -package fs - -import ( - "errors" - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -// SyncDir flushes any file renames to the filesystem. -func SyncDir(dirName string) error { - // fsync the dir to flush the rename - dir, err := os.OpenFile(dirName, os.O_RDONLY, os.ModeDir) - if err != nil { - return err - } - defer dir.Close() - - // While we're on unix, we may be running in a Docker container that is - // pointed at a Windows volume over samba. That doesn't support fsyncs - // on directories. This shows itself as an EINVAL, so we ignore that - // error. - err = dir.Sync() - if pe, ok := err.(*os.PathError); ok && pe.Err == syscall.EINVAL { - err = nil - } else if err != nil { - return err - } - - return dir.Close() -} - -// RenameFileWithReplacement will replace any existing file at newpath with the contents -// of oldpath. It works also if it the rename spans over several file systems. -// -// If no file already exists at newpath, newpath will be created using the contents -// of oldpath. If this function returns successfully, the contents of newpath will -// be identical to oldpath, and oldpath will be removed. -func RenameFileWithReplacement(oldpath, newpath string) error { - if err := os.Rename(oldpath, newpath); !errors.Is(err, syscall.EXDEV) { - // note: also includes err == nil - return err - } - - // move over filesystem boundaries, we have to copy. - // (if there was another error, it will likely fail a second time) - return MoveFileWithReplacement(oldpath, newpath) - -} - -// RenameFile renames oldpath to newpath, returning an error if newpath already -// exists. If this function returns successfully, the contents of newpath will -// be identical to oldpath, and oldpath will be removed. -func RenameFile(oldpath, newpath string) error { - if _, err := os.Stat(newpath); err == nil { - return newFileExistsError(newpath) - } - - return RenameFileWithReplacement(oldpath, newpath) -} - -// CreateFile creates a new file at newpath, returning an error if newpath already -// exists -func CreateFile(newpath string) (*os.File, error) { - if _, err := os.Stat(newpath); err == nil { - return nil, newFileExistsError(newpath) - } - - return os.Create(newpath) -} - -// DiskUsage returns disk usage of disk of path -func DiskUsage(path string) (*DiskStatus, error) { - fs := unix.Statfs_t{} - if err := unix.Statfs(path, &fs); err != nil { - return nil, err - } - - var disk DiskStatus - disk.All = fs.Blocks * uint64(fs.Bsize) - disk.Avail = uint64(fs.Bavail) * uint64(fs.Bsize) - disk.Free = fs.Bfree * uint64(fs.Bsize) - disk.Used = disk.All - disk.Free - return &disk, nil -} diff --git a/pkg/fs/fs_windows.go b/pkg/fs/fs_windows.go deleted file mode 100644 index 6d0c69ea85f..00000000000 --- a/pkg/fs/fs_windows.go +++ /dev/null @@ -1,70 +0,0 @@ -package fs - -import ( - "os" - "syscall" - "unsafe" -) - -func SyncDir(dirName string) error { - return nil -} - -// RenameFileWithReplacement will replace any existing file at newpath with the contents -// of oldpath. -// -// If no file already exists at newpath, newpath will be created using the contents -// of oldpath. If this function returns successfully, the contents of newpath will -// be identical to oldpath, and oldpath will be removed. -func RenameFileWithReplacement(oldpath, newpath string) error { - if _, err := os.Stat(newpath); err == nil { - if err = os.Remove(newpath); nil != err { - return err - } - } - - return os.Rename(oldpath, newpath) -} - -// RenameFile renames oldpath to newpath, returning an error if newpath already -// exists. If this function returns successfully, the contents of newpath will -// be identical to oldpath, and oldpath will be removed. -func RenameFile(oldpath, newpath string) error { - if _, err := os.Stat(newpath); err == nil { - // os.Rename on Windows will return an error if the file exists, but it's - // preferable to keep the errors the same across platforms. - return newFileExistsError(newpath) - } - - return os.Rename(oldpath, newpath) -} - -// CreateFile creates a new file at newpath, returning an error if newpath already -// exists -func CreateFile(newpath string) (*os.File, error) { - if _, err := os.Stat(newpath); err == nil { - return nil, newFileExistsError(newpath) - } - - return os.Create(newpath) -} - -// DiskUsage returns disk usage of disk of path -func DiskUsage(path string) (*DiskStatus, error) { - var disk DiskStatus - h := syscall.MustLoadDLL("kernel32.dll") - c := h.MustFindProc("GetDiskFreeSpaceExW") - p, err := syscall.UTF16PtrFromString(path) - if err != nil { - return nil, err - } - r1, _, err := c.Call(uintptr(unsafe.Pointer(p)), - uintptr(unsafe.Pointer(&disk.Avail)), - uintptr(unsafe.Pointer(&disk.All)), - uintptr(unsafe.Pointer(&disk.Free))) - if r1 == 0 { - return nil, err - } - disk.Used = disk.All - disk.Free - return &disk, nil -} diff --git a/pkg/httpc/body_fns.go b/pkg/httpc/body_fns.go deleted file mode 100644 index 18beb8c0dcb..00000000000 --- a/pkg/httpc/body_fns.go +++ /dev/null @@ -1,41 +0,0 @@ -package httpc - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "io" -) - -// BodyFn provides a writer to which a value will be written to -// that will make it's way into the HTTP request. -type BodyFn func(w io.Writer) (header string, headerVal string, err error) - -// BodyEmpty returns an empty body. -func BodyEmpty(io.Writer) (string, string, error) { - return "", "", nil -} - -// BodyGob gob encodes the value provided for the HTTP request. Sets the -// Content-Encoding to application/gob. -func BodyGob(v interface{}) BodyFn { - return func(w io.Writer) (string, string, error) { - return headerContentEncoding, "application/gob", gob.NewEncoder(w).Encode(v) - } -} - -// BodyJSON JSON encodes the value provided for the HTTP request. Sets the -// Content-Type to application/json. -func BodyJSON(v interface{}) BodyFn { - return func(w io.Writer) (string, string, error) { - return headerContentType, "application/json", json.NewEncoder(w).Encode(v) - } -} - -type nopBufCloser struct { - bytes.Buffer -} - -func (*nopBufCloser) Close() error { - return nil -} diff --git a/pkg/httpc/client.go b/pkg/httpc/client.go deleted file mode 100644 index 955d354c909..00000000000 --- a/pkg/httpc/client.go +++ /dev/null @@ -1,214 +0,0 @@ -package httpc - -import ( - "errors" - "io" - "net/http" - "net/url" - "path" - - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -type ( - // WriteCloserFn is a write closer wrapper than indicates the type of writer by - // returning the header and header value associated with the writer closer. - // i.e. GZIP writer returns header Content-Encoding with value gzip alongside - // the writer. - WriteCloserFn func(closer io.WriteCloser) (string, string, io.WriteCloser) - - // doer provides an abstraction around the actual http client behavior. The doer - // can be faked out in tests or another http client provided in its place. - doer interface { - Do(*http.Request) (*http.Response, error) - } -) - -// Client is a basic http client that can make cReqs with out having to juggle -// the token and so forth. It provides sane defaults for checking response -// statuses, sets auth token when provided, and sets the content type to -// application/json for each request. The token, response checker, and -// content type can be overridden on the Req as well. -type Client struct { - addr url.URL - doer doer - defaultHeaders http.Header - - writerFns []WriteCloserFn - - authFn func(*http.Request) error - respFn func(*http.Response) error - statusFn func(*http.Response) error -} - -// New creates a new httpc client. -func New(opts ...ClientOptFn) (*Client, error) { - opt := clientOpt{ - authFn: func(*http.Request) error { return nil }, - } - for _, o := range opts { - if err := o(&opt); err != nil { - return nil, err - } - } - - if opt.addr == "" { - return nil, errors.New("must provide a non empty host address") - } - - u, err := url.Parse(opt.addr) - if err != nil { - return nil, err - } - - if opt.doer == nil { - opt.doer = defaultHTTPClient(u.Scheme, opt.insecureSkipVerify) - } - - return &Client{ - addr: *u, - doer: opt.doer, - defaultHeaders: opt.headers, - authFn: opt.authFn, - statusFn: opt.statusFn, - writerFns: opt.writerFns, - }, nil -} - -// Delete generates a DELETE request. -func (c *Client) Delete(urlPath ...string) *Req { - return c.Req(http.MethodDelete, nil, urlPath...) -} - -// Get generates a GET request. -func (c *Client) Get(urlPath ...string) *Req { - return c.Req(http.MethodGet, nil, urlPath...) -} - -// Patch generates a PATCH request. -func (c *Client) Patch(bFn BodyFn, urlPath ...string) *Req { - return c.Req(http.MethodPatch, bFn, urlPath...) -} - -// PatchJSON generates a PATCH request. This is to be used with value or pointer to value type. -// Providing a stream/reader will result in disappointment. -func (c *Client) PatchJSON(v interface{}, urlPath ...string) *Req { - return c.Patch(BodyJSON(v), urlPath...) -} - -// Post generates a POST request. -func (c *Client) Post(bFn BodyFn, urlPath ...string) *Req { - return c.Req(http.MethodPost, bFn, urlPath...) -} - -// PostJSON generates a POST request and json encodes the body. This is to be -// used with value or pointer to value type. Providing a stream/reader will result -// in disappointment. -func (c *Client) PostJSON(v interface{}, urlPath ...string) *Req { - return c.Post(BodyJSON(v), urlPath...) -} - -// Put generates a PUT request. -func (c *Client) Put(bFn BodyFn, urlPath ...string) *Req { - return c.Req(http.MethodPut, bFn, urlPath...) -} - -// PutJSON generates a PUT request. This is to be used with value or pointer to value type. -// Providing a stream/reader will result in disappointment. -func (c *Client) PutJSON(v interface{}, urlPath ...string) *Req { - return c.Put(BodyJSON(v), urlPath...) -} - -// Req constructs a request. -func (c *Client) Req(method string, bFn BodyFn, urlPath ...string) *Req { - bodyF := BodyEmpty - if bFn != nil { - bodyF = bFn - } - - headers := make(http.Header, len(c.defaultHeaders)) - for header, vals := range c.defaultHeaders { - for _, v := range vals { - headers.Add(header, v) - } - } - var buf nopBufCloser - var w io.WriteCloser = &buf - for _, writerFn := range c.writerFns { - header, headerVal, ww := writerFn(w) - w = ww - headers.Add(header, headerVal) - } - - header, headerVal, err := bodyF(w) - if err != nil { - // TODO(@jsteenb2): add a inspection for an OK() or Valid() method, then enforce - // that across all consumers? Same for all bodyFns for that matter. - return &Req{ - err: &errors2.Error{ - Code: errors2.EInvalid, - Err: err, - }, - } - } - if header != "" { - headers.Set(header, headerVal) - } - // w.Close here is necessary since we have to close any gzip writer - // or other writer that requires closing. - if err := w.Close(); err != nil { - return &Req{err: err} - } - - var body io.Reader - if buf.Len() > 0 { - body = &buf - } - - req, err := http.NewRequest(method, c.buildURL(urlPath...), body) - if err != nil { - return &Req{err: err} - } - - cr := &Req{ - client: c.doer, - req: req, - authFn: c.authFn, - respFn: c.respFn, - statusFn: c.statusFn, - } - return cr.Headers(headers) -} - -// Clone creates a new *Client type from an existing client. This may be -// useful if you want to have a shared base client, then take a specific -// client from that base and tack on some extra goodies like specific headers -// and whatever else that suits you. -// Note: a new net.http.Client type will not be created. It will share the existing -// http.Client from the parent httpc.Client. Same connection pool, different specifics. -func (c *Client) Clone(opts ...ClientOptFn) (*Client, error) { - existingOpts := []ClientOptFn{ - WithAuth(c.authFn), - withDoer(c.doer), - WithRespFn(c.respFn), - WithStatusFn(c.statusFn), - } - for h, vals := range c.defaultHeaders { - for _, v := range vals { - existingOpts = append(existingOpts, WithHeader(h, v)) - } - } - for _, fn := range c.writerFns { - existingOpts = append(existingOpts, WithWriterFn(fn)) - } - - return New(append(existingOpts, opts...)...) -} - -func (c *Client) buildURL(urlPath ...string) string { - u := c.addr - if len(urlPath) > 0 { - u.Path = path.Join(u.Path, path.Join(urlPath...)) - } - return u.String() -} diff --git a/pkg/httpc/client_test.go b/pkg/httpc/client_test.go deleted file mode 100644 index 3463bcafc5b..00000000000 --- a/pkg/httpc/client_test.go +++ /dev/null @@ -1,503 +0,0 @@ -package httpc - -import ( - "bytes" - "compress/gzip" - "context" - "encoding/gob" - "encoding/json" - "errors" - "io" - "net/http" - "sort" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestClient(t *testing.T) { - newClient := func(t *testing.T, addr string, opts ...ClientOptFn) *Client { - t.Helper() - client, err := New(append(opts, WithAddr(addr))...) - require.NoError(t, err) - return client - } - - type ( - respFn func(status int, req *http.Request) (resp *http.Response, err error) - - authFn func(status int, respFn respFn, opts ...ClientOptFn) (*Client, *fakeDoer) - - newReqFn func(*Client, string, reqBody) *Req - - testCase struct { - method string - status int - clientOpts []ClientOptFn - reqFn newReqFn - queryParams [][2]string - reqBody reqBody - } - ) - - tokenAuthClient := func(status int, respFn respFn, opts ...ClientOptFn) (*Client, *fakeDoer) { - const token = "secrettoken" - fakeDoer := &fakeDoer{ - doFn: func(r *http.Request) (*http.Response, error) { - if r.Header.Get("Authorization") != "Token "+token { - return nil, errors.New("unauthed token") - } - return respFn(status, r) - }, - } - client := newClient(t, "http://example.com", append(opts, WithAuthToken(token))...) - client.doer = fakeDoer - return client, fakeDoer - } - - cookieAuthClient := func(status int, respFn respFn, opts ...ClientOptFn) (*Client, *fakeDoer) { - const session = "secret" - fakeDoer := &fakeDoer{ - doFn: func(r *http.Request) (*http.Response, error) { - cookie, err := r.Cookie("session") - if err != nil { - return nil, errors.New("session cookie not found") - } - if cookie.Value != session { - return nil, errors.New("unauthed cookie") - } - return respFn(status, r) - }, - } - client := newClient(t, "http://example.com", append(opts, WithSessionCookie(session))...) - client.doer = fakeDoer - return client, fakeDoer - } - - noAuthClient := func(status int, respFn respFn, opts ...ClientOptFn) (*Client, *fakeDoer) { - fakeDoer := &fakeDoer{ - doFn: func(r *http.Request) (*http.Response, error) { - return respFn(status, r) - }, - } - client := newClient(t, "http://example.com", opts...) - client.doer = fakeDoer - return client, fakeDoer - } - - authTests := []struct { - name string - clientFn authFn - }{ - { - name: "no auth", - clientFn: noAuthClient, - }, - { - name: "token auth", - clientFn: tokenAuthClient, - }, - { - name: "cookie auth", - clientFn: cookieAuthClient, - }, - } - - encodingTests := []struct { - name string - respFn respFn - decodeFn func(v interface{}) func(r *Req) *Req - }{ - { - name: "json response", - respFn: stubRespNJSONBody, - decodeFn: func(v interface{}) func(r *Req) *Req { - return func(r *Req) *Req { return r.DecodeJSON(v) } - }, - }, - { - name: "gzipped json response", - respFn: stubRespNGZippedJSON, - decodeFn: func(v interface{}) func(r *Req) *Req { - return func(r *Req) *Req { return r.DecodeJSON(v) } - }, - }, - { - name: "gob response", - respFn: stubRespNGobBody, - decodeFn: func(v interface{}) func(r *Req) *Req { - return func(r *Req) *Req { return r.DecodeGob(v) } - }, - }, - } - - testWithRespBody := func(tt testCase) func(t *testing.T) { - return func(t *testing.T) { - t.Helper() - - for _, encTest := range encodingTests { - t.Run(encTest.name, func(t *testing.T) { - t.Helper() - - for _, authTest := range authTests { - fn := func(t *testing.T) { - t.Helper() - client, fakeDoer := authTest.clientFn(tt.status, encTest.respFn, tt.clientOpts...) - - req := tt.reqFn(client, "/new/path/heres", tt.reqBody). - Accept("application/json"). - Header("X-Code", "Code"). - QueryParams(tt.queryParams...). - StatusFn(StatusIn(tt.status)) - - var actual echoResp - req = encTest.decodeFn(&actual)(req) - - err := req.Do(context.TODO()) - require.NoError(t, err) - - expectedResp := echoResp{ - Method: tt.method, - Scheme: "http", - Host: "example.com", - Path: "/new/path/heres", - Queries: tt.queryParams, - ReqBody: tt.reqBody, - } - assert.Equal(t, expectedResp, actual) - require.Len(t, fakeDoer.args, 1) - assert.Equal(t, "application/json", fakeDoer.args[0].Header.Get("Accept")) - assert.Equal(t, "Code", fakeDoer.args[0].Header.Get("X-Code")) - } - t.Run(authTest.name, fn) - } - }) - } - } - } - - newGet := func(client *Client, urlPath string, _ reqBody) *Req { - return client.Get(urlPath) - } - - t.Run("Delete", func(t *testing.T) { - for _, authTest := range authTests { - fn := func(t *testing.T) { - client, fakeDoer := authTest.clientFn(204, stubResp) - - err := client.Delete("/new/path/heres"). - Header("X-Code", "Code"). - StatusFn(StatusIn(204)). - Do(context.TODO()) - require.NoError(t, err) - - require.Len(t, fakeDoer.args, 1) - assert.Equal(t, "Code", fakeDoer.args[0].Header.Get("X-Code")) - } - t.Run(authTest.name, fn) - } - }) - - t.Run("Get", func(t *testing.T) { - tests := []struct { - name string - testCase - }{ - { - name: "handles basic call", - testCase: testCase{ - status: 200, - }, - }, - { - name: "handles query values", - testCase: testCase{ - queryParams: [][2]string{{"q1", "v1"}, {"q2", "v2"}}, - status: 202, - }, - }, - } - - for _, tt := range tests { - tt.method = "GET" - tt.reqFn = newGet - - t.Run(tt.name, testWithRespBody(tt.testCase)) - } - }) - - t.Run("Patch Post Put with request bodies", func(t *testing.T) { - methods := []struct { - name string - methodCallFn func(client *Client, urlPath string, bFn BodyFn) *Req - }{ - { - name: "PATCH", - methodCallFn: func(client *Client, urlPath string, bFn BodyFn) *Req { - return client.Patch(bFn, urlPath) - }, - }, - { - name: "POST", - methodCallFn: func(client *Client, urlPath string, bFn BodyFn) *Req { - return client.Post(bFn, urlPath) - }, - }, - { - name: "PUT", - methodCallFn: func(client *Client, urlPath string, bFn BodyFn) *Req { - return client.Put(bFn, urlPath) - }, - }, - } - - for _, method := range methods { - t.Run(method.name, func(t *testing.T) { - tests := []struct { - name string - testCase - }{ - { - name: "handles json req body", - testCase: testCase{ - status: 200, - reqFn: func(client *Client, urlPath string, body reqBody) *Req { - return method.methodCallFn(client, urlPath, BodyJSON(body)) - }, - reqBody: reqBody{ - Foo: "foo 1", - Bar: 31, - }, - }, - }, - { - name: "handles gob req body", - testCase: testCase{ - status: 201, - reqFn: func(client *Client, urlPath string, body reqBody) *Req { - return method.methodCallFn(client, urlPath, BodyGob(body)) - }, - reqBody: reqBody{ - Foo: "foo 1", - Bar: 31, - }, - }, - }, - { - name: "handles gzipped json req body", - testCase: testCase{ - status: 201, - clientOpts: []ClientOptFn{WithWriterGZIP()}, - reqFn: func(client *Client, urlPath string, body reqBody) *Req { - return method.methodCallFn(client, urlPath, BodyJSON(body)) - }, - reqBody: reqBody{ - Foo: "foo", - Bar: 31, - }, - }, - }, - } - - for _, tt := range tests { - tt.method = method.name - - t.Run(tt.name, testWithRespBody(tt.testCase)) - } - }) - } - }) - - t.Run("PatchJSON PostJSON PutJSON with request bodies", func(t *testing.T) { - methods := []struct { - name string - methodCallFn func(client *Client, urlPath string, v interface{}) *Req - }{ - { - name: "PATCH", - methodCallFn: func(client *Client, urlPath string, v interface{}) *Req { - return client.PatchJSON(v, urlPath) - }, - }, - { - name: "POST", - methodCallFn: func(client *Client, urlPath string, v interface{}) *Req { - return client.PostJSON(v, urlPath) - }, - }, - { - name: "PUT", - methodCallFn: func(client *Client, urlPath string, v interface{}) *Req { - return client.PutJSON(v, urlPath) - }, - }, - } - - for _, method := range methods { - t.Run(method.name, func(t *testing.T) { - tests := []struct { - name string - testCase - }{ - { - name: "handles json req body", - testCase: testCase{ - status: 200, - reqFn: func(client *Client, urlPath string, body reqBody) *Req { - return method.methodCallFn(client, urlPath, body) - }, - reqBody: reqBody{ - Foo: "foo 1", - Bar: 31, - }, - }, - }, - } - - for _, tt := range tests { - tt.method = method.name - - t.Run(tt.name, testWithRespBody(tt.testCase)) - } - }) - } - }) -} - -type fakeDoer struct { - doFn func(*http.Request) (*http.Response, error) - args []*http.Request - callCount int -} - -func (f *fakeDoer) Do(r *http.Request) (*http.Response, error) { - f.callCount++ - f.args = append(f.args, r) - return f.doFn(r) -} - -func stubResp(status int, _ *http.Request) (*http.Response, error) { - return &http.Response{ - StatusCode: status, - Body: io.NopCloser(new(bytes.Buffer)), - }, nil -} - -func stubRespNGZippedJSON(status int, r *http.Request) (*http.Response, error) { - e, err := decodeFromContentType(r) - if err != nil { - return nil, err - } - - var buf bytes.Buffer - w := gzip.NewWriter(&buf) - defer w.Close() - if err := json.NewEncoder(w).Encode(e); err != nil { - return nil, err - } - if err := w.Flush(); err != nil { - return nil, err - } - - return &http.Response{ - StatusCode: status, - Body: io.NopCloser(&buf), - Header: http.Header{ - "Content-Encoding": []string{"gzip"}, - headerContentType: []string{"application/json"}, - }, - }, nil -} - -func stubRespNJSONBody(status int, r *http.Request) (*http.Response, error) { - e, err := decodeFromContentType(r) - if err != nil { - return nil, err - } - - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(e); err != nil { - return nil, err - } - - return &http.Response{ - StatusCode: status, - Body: io.NopCloser(&buf), - Header: http.Header{headerContentType: []string{"application/json"}}, - }, nil -} - -func stubRespNGobBody(status int, r *http.Request) (*http.Response, error) { - e, err := decodeFromContentType(r) - if err != nil { - return nil, err - } - - var buf bytes.Buffer - if err := gob.NewEncoder(&buf).Encode(e); err != nil { - return nil, err - } - return &http.Response{ - StatusCode: status, - Body: io.NopCloser(&buf), - Header: http.Header{headerContentEncoding: []string{"application/gob"}}, - }, nil -} - -type ( - reqBody struct { - Foo string - Bar int - } - - echoResp struct { - Method string - Scheme string - Host string - Path string - Queries [][2]string - - ReqBody reqBody - } -) - -func decodeFromContentType(r *http.Request) (echoResp, error) { - e := echoResp{ - Method: r.Method, - Scheme: r.URL.Scheme, - Host: r.URL.Host, - Path: r.URL.Path, - } - for key, vals := range r.URL.Query() { - for _, v := range vals { - e.Queries = append(e.Queries, [2]string{key, v}) - } - } - sort.Slice(e.Queries, func(i, j int) bool { - qi, qj := e.Queries[i], e.Queries[j] - if qi[0] == qj[0] { - return qi[1] < qj[1] - } - return qi[0] < qj[0] - }) - - var reader io.Reader = r.Body - if r.Header.Get(headerContentEncoding) == "gzip" { - gr, err := gzip.NewReader(reader) - if err != nil { - return echoResp{}, err - } - reader = gr - } - - if r.Header.Get(headerContentEncoding) == "application/gob" { - return e, gob.NewDecoder(reader).Decode(&e.ReqBody) - } - - if r.Header.Get(headerContentType) == "application/json" { - return e, json.NewDecoder(reader).Decode(&e.ReqBody) - } - - return e, nil -} diff --git a/pkg/httpc/options.go b/pkg/httpc/options.go deleted file mode 100644 index 94a2356378f..00000000000 --- a/pkg/httpc/options.go +++ /dev/null @@ -1,169 +0,0 @@ -package httpc - -import ( - "compress/gzip" - "crypto/tls" - "io" - "net" - "net/http" - "time" -) - -// ClientOptFn are options to set different parameters on the Client. -type ClientOptFn func(*clientOpt) error - -type clientOpt struct { - addr string - insecureSkipVerify bool - doer doer - headers http.Header - authFn func(*http.Request) error - respFn func(*http.Response) error - statusFn func(*http.Response) error - writerFns []WriteCloserFn -} - -// WithAddr sets the host address on the client. -func WithAddr(addr string) ClientOptFn { - return func(opt *clientOpt) error { - opt.addr = addr - return nil - } -} - -// WithAuth provides a means to set a custom auth that doesn't match -// the provided auth types here. -func WithAuth(fn func(r *http.Request) error) ClientOptFn { - return func(opt *clientOpt) error { - opt.authFn = fn - return nil - } -} - -// WithAuthToken provides token auth for requests. -func WithAuthToken(token string) ClientOptFn { - return WithAuth(func(r *http.Request) error { - r.Header.Set("Authorization", "Token "+token) - return nil - }) -} - -// WithSessionCookie provides cookie auth for requests to mimic the browser. -// Typically, session is influxdb.Session.Key. -func WithSessionCookie(session string) ClientOptFn { - return WithAuth(func(r *http.Request) error { - r.AddCookie(&http.Cookie{ - Name: "session", - Value: session, - }) - - return nil - }) -} - -// WithContentType sets the content type that will be applied to the requests created -// by the Client. -func WithContentType(ct string) ClientOptFn { - return WithHeader(headerContentType, ct) -} - -func withDoer(d doer) ClientOptFn { - return func(opt *clientOpt) error { - opt.doer = d - return nil - } -} - -// WithHeader sets a default header that will be applied to all requests created -// by the client. -func WithHeader(header, val string) ClientOptFn { - return func(opt *clientOpt) error { - if opt.headers == nil { - opt.headers = make(http.Header) - } - opt.headers.Add(header, val) - return nil - } -} - -// WithUserAgentHeader sets the user agent for the http client requests. -func WithUserAgentHeader(userAgent string) ClientOptFn { - return WithHeader("User-Agent", userAgent) -} - -// WithHTTPClient sets the raw http client on the httpc Client. -func WithHTTPClient(c *http.Client) ClientOptFn { - return func(opt *clientOpt) error { - opt.doer = c - return nil - } -} - -// WithInsecureSkipVerify sets the insecure skip verify on the http client's htp transport. -func WithInsecureSkipVerify(b bool) ClientOptFn { - return func(opts *clientOpt) error { - opts.insecureSkipVerify = b - return nil - } -} - -// WithRespFn sets the default resp fn for the client that will be applied to all requests -// generated from it. -func WithRespFn(fn func(*http.Response) error) ClientOptFn { - return func(opt *clientOpt) error { - opt.respFn = fn - return nil - } -} - -// WithStatusFn sets the default status fn for the client that will be applied to all requests -// generated from it. -func WithStatusFn(fn func(*http.Response) error) ClientOptFn { - return func(opt *clientOpt) error { - opt.statusFn = fn - return nil - } -} - -// WithWriterFn applies the provided writer behavior to all the request bodies' -// generated from the client. -func WithWriterFn(fn WriteCloserFn) ClientOptFn { - return func(opt *clientOpt) error { - opt.writerFns = append(opt.writerFns, fn) - return nil - } -} - -// WithWriterGZIP gzips the request body generated from this client. -func WithWriterGZIP() ClientOptFn { - return WithWriterFn(func(w io.WriteCloser) (string, string, io.WriteCloser) { - return headerContentEncoding, "gzip", gzip.NewWriter(w) - }) -} - -// DefaultTransportInsecure is identical to http.DefaultTransport, with -// the exception that tls.Config is configured with InsecureSkipVerify -// set to true. -var DefaultTransportInsecure http.RoundTripper = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - ForceAttemptHTTP2: true, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - }, -} - -func defaultHTTPClient(scheme string, insecure bool) *http.Client { - if scheme == "https" && insecure { - return &http.Client{Transport: DefaultTransportInsecure} - } - return &http.Client{Transport: http.DefaultTransport} -} diff --git a/pkg/httpc/req.go b/pkg/httpc/req.go deleted file mode 100644 index 04857cc5783..00000000000 --- a/pkg/httpc/req.go +++ /dev/null @@ -1,229 +0,0 @@ -package httpc - -import ( - "compress/gzip" - "context" - "encoding/gob" - "encoding/json" - "fmt" - "io" - "net/http" - "strings" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" -) - -const ( - headerContentType = "Content-Type" - headerContentEncoding = "Content-Encoding" -) - -// Req is a request type. -type Req struct { - client doer - - req *http.Request - authFn func(*http.Request) error - - decodeFn func(*http.Response) error - respFn func(*http.Response) error - statusFn func(*http.Response) error - - err error -} - -// Accept sets the Accept header to the provided content type on the request. -func (r *Req) Accept(contentType string) *Req { - return r.Header("Accept", contentType) -} - -// Auth sets the authorization for a request. -func (r *Req) Auth(authFn func(r *http.Request) error) *Req { - if r.err != nil { - return r - } - r.authFn = authFn - return r -} - -// ContentType sets the Content-Type header to the provided content type on the request. -func (r *Req) ContentType(contentType string) *Req { - return r.Header("Content-Type", contentType) -} - -// Decode sets the decoding functionality for the request. All Decode calls are called -// after the status and response functions are called. Decoding will not happen if error -// encountered in the status check. -func (r *Req) Decode(fn func(resp *http.Response) error) *Req { - if r.err != nil { - return r - } - r.decodeFn = fn - return r -} - -// DecodeGob sets the decoding functionality to decode gob for the request. -func (r *Req) DecodeGob(v interface{}) *Req { - return r.Decode(func(resp *http.Response) error { - r := decodeReader(resp.Body, resp.Header) - return gob.NewDecoder(r).Decode(v) - }) -} - -// DecodeJSON sets the decoding functionality to decode json for the request. -func (r *Req) DecodeJSON(v interface{}) *Req { - return r.Decode(func(resp *http.Response) error { - r := decodeReader(resp.Body, resp.Header) - return json.NewDecoder(r).Decode(v) - }) -} - -// Header adds the header to the http request. -func (r *Req) Header(k, v string) *Req { - if r.err != nil { - return r - } - r.req.Header.Add(k, v) - return r -} - -// Headers adds all the headers to the http request. -func (r *Req) Headers(m map[string][]string) *Req { - if r.err != nil { - return r - } - for header, vals := range m { - if header == "" { - continue - } - for _, v := range vals { - r = r.Header(header, v) - } - } - return r -} - -// QueryParams adds the query params to the http request. -func (r *Req) QueryParams(pairs ...[2]string) *Req { - if r.err != nil || len(pairs) == 0 { - return r - } - params := r.req.URL.Query() - for _, p := range pairs { - params.Add(p[0], p[1]) - } - r.req.URL.RawQuery = params.Encode() - return r -} - -// RespFn provides a means to inspect the entire http response. This function runs first -// before the status and decode functions are called. -func (r *Req) RespFn(fn func(*http.Response) error) *Req { - r.respFn = fn - return r -} - -// StatusFn sets a status check function. This runs after the resp func -// but before the decode fn. -func (r *Req) StatusFn(fn func(*http.Response) error) *Req { - r.statusFn = fn - return r -} - -// Do makes the HTTP request. Any errors that had been encountered in -// the lifetime of the Req type will be returned here first, in place of -// the call. This makes it safe to call Do at anytime. -func (r *Req) Do(ctx context.Context) error { - if r.err != nil { - return r.err - } - - if err := r.authFn(r.req); err != nil { - return err - } - - // TODO(@jsteenb2): wrap do with retry/backoff policy. - return r.do(ctx) -} - -func (r *Req) do(ctx context.Context) error { - span, ctx := tracing.StartSpanFromContextWithOperationName(ctx, r.req.URL.String()) - defer span.Finish() - - u := r.req.URL - span.LogKV( - "scheme", u.Scheme, - "host", u.Host, - "path", u.Path, - "query_params", u.Query().Encode(), - ) - - tracing.InjectToHTTPRequest(span, r.req) - - resp, err := r.client.Do(r.req.WithContext(ctx)) - if err != nil { - return err - } - defer func() { - io.Copy(io.Discard, resp.Body) // drain body completely - resp.Body.Close() - }() - - span.LogKV( - "response_code", resp.StatusCode, - "response_byte", resp.ContentLength, - ) - - if r.respFn != nil { - if err := r.respFn(resp); err != nil { - return err - } - } - - if r.statusFn != nil { - if err := r.statusFn(resp); err != nil { - return err - } - } - - if r.decodeFn != nil { - if err := r.decodeFn(resp); err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - } - return nil -} - -// StatusIn validates the status code matches one of the provided statuses. -func StatusIn(code int, rest ...int) func(*http.Response) error { - return func(resp *http.Response) error { - for _, code := range append(rest, code) { - if code == resp.StatusCode { - return nil - } - } - return fmt.Errorf("received unexpected status: %s %d", resp.Status, resp.StatusCode) - } -} - -var encodingReaders = map[string]func(io.Reader) io.Reader{ - "gzip": func(r io.Reader) io.Reader { - if gr, err := gzip.NewReader(r); err == nil { - return gr - } - return r - }, -} - -func decodeReader(r io.Reader, headers http.Header) io.Reader { - contentEncoding := strings.TrimSpace(headers.Get(headerContentEncoding)) - fn, ok := encodingReaders[contentEncoding] - if ok { - return fn(r) - } - return r -} diff --git a/pkg/jsonnet/decode.go b/pkg/jsonnet/decode.go deleted file mode 100644 index 79bfcf3f9d1..00000000000 --- a/pkg/jsonnet/decode.go +++ /dev/null @@ -1,33 +0,0 @@ -package jsonnet - -import ( - "encoding/json" - "io" - - "github.com/google/go-jsonnet" -) - -// Decoder type can decode a jsonnet stream into the given output. -type Decoder struct { - r io.Reader -} - -// NewDecoder creates a new decoder. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{r: r} -} - -// Decode decodes the stream into the provide value. -func (d *Decoder) Decode(v interface{}) error { - b, err := io.ReadAll(d.r) - if err != nil { - return err - } - - vm := jsonnet.MakeVM() - jsonStr, err := vm.EvaluateAnonymousSnippet("memory", string(b)) - if err != nil { - return err - } - return json.Unmarshal([]byte(jsonStr), &v) -} diff --git a/pkg/jsonnet/decode_test.go b/pkg/jsonnet/decode_test.go deleted file mode 100644 index eb261a1261c..00000000000 --- a/pkg/jsonnet/decode_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package jsonnet_test - -import ( - "strings" - "testing" - - "github.com/influxdata/influxdb/v2/pkg/jsonnet" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestDecoder(t *testing.T) { - type ( - person struct { - Name string `json:"name"` - Welcome string `json:"welcome"` - } - - persons struct { - Person1 person `json:"person1"` - Person2 person `json:"person2"` - } - ) - - const entry = `{ - person1: { - name: "Alice", - welcome: "Hello " + self.name + "!", - }, - person2: self.person1 { name: "Bob" }, -}` - - var out persons - require.NoError(t, jsonnet.NewDecoder(strings.NewReader(entry)).Decode(&out)) - - expected := persons{ - Person1: person{ - Name: "Alice", - Welcome: "Hello Alice!", - }, - Person2: person{ - Name: "Bob", - Welcome: "Hello Bob!", - }, - } - assert.Equal(t, expected, out) -} diff --git a/pkg/jsonparser/jsonparser.go b/pkg/jsonparser/jsonparser.go deleted file mode 100644 index 301ccc6d8cc..00000000000 --- a/pkg/jsonparser/jsonparser.go +++ /dev/null @@ -1,45 +0,0 @@ -package jsonparser - -import ( - "github.com/buger/jsonparser" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// GetID returns an influxdb.ID for the specified keys path or an error if -// the value cannot be decoded or does not exist. -func GetID(data []byte, keys ...string) (val platform.ID, err error) { - v, _, _, err := jsonparser.Get(data, keys...) - if err != nil { - return 0, err - } - - var id platform.ID - err = id.Decode(v) - if err != nil { - return 0, err - } - - return id, nil -} - -// GetOptionalID returns an influxdb.ID for the specified keys path or an error if -// the value cannot be decoded. The value of exists will be false if the keys path -// does not exist. -func GetOptionalID(data []byte, keys ...string) (val platform.ID, exists bool, err error) { - v, typ, _, err := jsonparser.Get(data, keys...) - if typ == jsonparser.NotExist { - return 0, false, nil - } - - if err != nil { - return 0, false, err - } - - var id platform.ID - err = id.Decode(v) - if err != nil { - return 0, false, err - } - - return id, true, nil -} diff --git a/pkg/jsonparser/jsonparser_test.go b/pkg/jsonparser/jsonparser_test.go deleted file mode 100644 index 27b2761525d..00000000000 --- a/pkg/jsonparser/jsonparser_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package jsonparser_test - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/pkg/jsonparser" -) - -func TestGetID(t *testing.T) { - t.Run("decode valid id", func(t *testing.T) { - json := `{ "id": "000000000000000a" }` - got, err := jsonparser.GetID([]byte(json), "id") - if err != nil { - t.Error("unexpected error:", err) - } - - if exp := platform.ID(10); got != exp { - t.Error("unexpected value: -got/+exp", cmp.Diff(got, exp)) - } - }) - - t.Run("error invalid id", func(t *testing.T) { - json := `{ "id": "00000000000a" }` - _, err := jsonparser.GetID([]byte(json), "id") - if err == nil { - t.Error("expected error") - } - }) -} - -func TestGetOptionalID(t *testing.T) { - t.Run("missing id", func(t *testing.T) { - json := `{ "name": "foo" }` - _, got, err := jsonparser.GetOptionalID([]byte(json), "id") - if err != nil { - t.Error("unexpected error:", err) - } - - if exp := false; got != exp { - t.Error("unexpected value: -got/+exp", cmp.Diff(got, exp)) - } - }) -} diff --git a/pkg/limiter/fixed.go b/pkg/limiter/fixed.go deleted file mode 100644 index 2ff94e8b279..00000000000 --- a/pkg/limiter/fixed.go +++ /dev/null @@ -1,54 +0,0 @@ -// Package limiter provides concurrency limiters. -package limiter - -import "context" - -// Fixed is a simple channel-based concurrency limiter. It uses a fixed -// size channel to limit callers from proceeding until there is a value available -// in the channel. If all are in-use, the caller blocks until one is freed. -type Fixed chan struct{} - -func NewFixed(limit int) Fixed { - return make(Fixed, limit) -} - -// Idle returns true if the limiter has all its capacity is available. -func (t Fixed) Idle() bool { - return len(t) == cap(t) -} - -// Available returns the number of available tokens that may be taken. -func (t Fixed) Available() int { - return cap(t) - len(t) -} - -// Capacity returns the number of tokens can be taken. -func (t Fixed) Capacity() int { - return cap(t) -} - -// TryTake attempts to take a token and return true if successful, otherwise returns false. -func (t Fixed) TryTake() bool { - select { - case t <- struct{}{}: - return true - default: - return false - } -} - -// Take attempts to take a token and blocks until one is available OR until the given context -// is cancelled. -func (t Fixed) Take(ctx context.Context) error { - select { - case <-ctx.Done(): - return ctx.Err() - case t <- struct{}{}: - return nil - } -} - -// Release releases a token back to the limiter. -func (t Fixed) Release() { - <-t -} diff --git a/pkg/limiter/fixed_test.go b/pkg/limiter/fixed_test.go deleted file mode 100644 index 589963b6cff..00000000000 --- a/pkg/limiter/fixed_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package limiter_test - -import ( - "context" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/pkg/limiter" - "github.com/stretchr/testify/require" -) - -func TestFixed_Available(t *testing.T) { - f := limiter.NewFixed(10) - require.Equal(t, 10, f.Available()) - - require.NoError(t, f.Take(context.Background())) - require.Equal(t, 9, f.Available()) - - f.Release() - require.Equal(t, 10, f.Available()) -} - -func TestFixed_Timeout(t *testing.T) { - f := limiter.NewFixed(1) - require.NoError(t, f.Take(context.Background())) - - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - err := f.Take(ctx) - require.Error(t, err) - require.Equal(t, "context deadline exceeded", err.Error()) -} - -func TestFixed_Canceled(t *testing.T) { - f := limiter.NewFixed(1) - require.NoError(t, f.Take(context.Background())) - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - cancel() - err := f.Take(ctx) - require.Error(t, err) - require.Equal(t, "context canceled", err.Error()) -} diff --git a/pkg/limiter/write_test.go b/pkg/limiter/write_test.go deleted file mode 100644 index b97e0f2bbf6..00000000000 --- a/pkg/limiter/write_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package limiter_test - -import ( - "bytes" - "io" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/pkg/limiter" -) - -func TestWriter_Limited(t *testing.T) { - r := bytes.NewReader(bytes.Repeat([]byte{0}, 1024*1024)) - - limit := 512 * 1024 - w := limiter.NewWriter(nopWriteCloser{io.Discard}, limit, 10*1024*1024) - - start := time.Now() - n, err := io.Copy(w, r) - elapsed := time.Since(start) - if err != nil { - t.Error("copy error: ", err) - } - - rate := float64(n) / elapsed.Seconds() - // 1% tolerance - we have seen the limit be slightly off on Windows systems, likely due to - // rounding of time intervals. - tolerance := 1.01 - if rate > (float64(limit) * tolerance) { - t.Errorf("rate limit mismatch: exp %f, got %f", float64(limit), rate) - } -} - -func TestWriter_Limiter_ExceedBurst(t *testing.T) { - limit := 10 - burstLimit := 20 - - twentyOneBytes := make([]byte, 21) - - b := nopWriteCloser{bytes.NewBuffer(nil)} - - w := limiter.NewWriter(b, limit, burstLimit) - n, err := w.Write(twentyOneBytes) - if err != nil { - t.Fatal(err) - } - if n != len(twentyOneBytes) { - t.Errorf("expected %d bytes written, but got %d", len(twentyOneBytes), n) - } -} - -type nopWriteCloser struct { - io.Writer -} - -func (d nopWriteCloser) Close() error { return nil } diff --git a/pkg/limiter/writer.go b/pkg/limiter/writer.go deleted file mode 100644 index 4beb0dfe370..00000000000 --- a/pkg/limiter/writer.go +++ /dev/null @@ -1,94 +0,0 @@ -package limiter - -import ( - "context" - "io" - "os" - "time" - - "golang.org/x/time/rate" -) - -type Writer struct { - w io.WriteCloser - limiter Rate - ctx context.Context -} - -type Rate interface { - WaitN(ctx context.Context, n int) error - Burst() int -} - -func NewRate(bytesPerSec, burstLimit int) Rate { - limiter := rate.NewLimiter(rate.Limit(bytesPerSec), burstLimit) - limiter.AllowN(time.Now(), burstLimit) // spend initial burst - return limiter -} - -// NewWriter returns a writer that implements io.Writer with rate limiting. -// The limiter use a token bucket approach and limits the rate to bytesPerSec -// with a maximum burst of burstLimit. -func NewWriter(w io.WriteCloser, bytesPerSec, burstLimit int) *Writer { - limiter := NewRate(bytesPerSec, burstLimit) - - return &Writer{ - w: w, - ctx: context.Background(), - limiter: limiter, - } -} - -// WithRate returns a Writer with the specified rate limiter. -func NewWriterWithRate(w io.WriteCloser, limiter Rate) *Writer { - return &Writer{ - w: w, - ctx: context.Background(), - limiter: limiter, - } -} - -// Write writes bytes from b. -func (s *Writer) Write(b []byte) (int, error) { - if s.limiter == nil { - return s.w.Write(b) - } - - var n int - for n < len(b) { - wantToWriteN := len(b[n:]) - if wantToWriteN > s.limiter.Burst() { - wantToWriteN = s.limiter.Burst() - } - - wroteN, err := s.w.Write(b[n : n+wantToWriteN]) - if err != nil { - return n, err - } - n += wroteN - - if err := s.limiter.WaitN(s.ctx, wroteN); err != nil { - return n, err - } - } - - return n, nil -} - -func (s *Writer) Sync() error { - if f, ok := s.w.(*os.File); ok { - return f.Sync() - } - return nil -} - -func (s *Writer) Name() string { - if f, ok := s.w.(*os.File); ok { - return f.Name() - } - return "" -} - -func (s *Writer) Close() error { - return s.w.Close() -} diff --git a/pkg/metrics/context.go b/pkg/metrics/context.go deleted file mode 100644 index ee407ac9e5c..00000000000 --- a/pkg/metrics/context.go +++ /dev/null @@ -1,20 +0,0 @@ -package metrics - -import "context" - -type key int - -const ( - groupKey key = iota -) - -// NewContextWithGroup returns a new context with the given Group added. -func NewContextWithGroup(ctx context.Context, c *Group) context.Context { - return context.WithValue(ctx, groupKey, c) -} - -// GroupFromContext returns the Group associated with ctx or nil if no Group has been assigned. -func GroupFromContext(ctx context.Context) *Group { - c, _ := ctx.Value(groupKey).(*Group) - return c -} diff --git a/pkg/metrics/counter.go b/pkg/metrics/counter.go deleted file mode 100644 index 6f2e526cd2f..00000000000 --- a/pkg/metrics/counter.go +++ /dev/null @@ -1,28 +0,0 @@ -package metrics - -import ( - "strconv" - "sync/atomic" -) - -// The Counter type represents a numeric counter that is safe to use from concurrent goroutines. -type Counter struct { - val int64 - desc *desc -} - -// Name identifies the name of the counter. -func (c *Counter) Name() string { return c.desc.Name } - -// Value atomically returns the current value of the counter. -func (c *Counter) Value() int64 { return atomic.LoadInt64(&c.val) } - -// Add atomically adds d to the counter. -func (c *Counter) Add(d int64) { atomic.AddInt64(&c.val, d) } - -// String returns a string representation using the name and value of the counter. -func (c *Counter) String() string { - var buf [16]byte - v := strconv.AppendInt(buf[:0], c.val, 10) - return c.desc.Name + ": " + string(v) -} diff --git a/pkg/metrics/counter_test.go b/pkg/metrics/counter_test.go deleted file mode 100644 index d444cd80697..00000000000 --- a/pkg/metrics/counter_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package metrics - -import ( - "testing" -) - -func TestCounter_Add(t *testing.T) { - c := Counter{} - c.Add(5) - c.Add(5) - if exp, got := int64(10), c.Value(); exp != got { - t.Errorf("unexpected value; exp=%d, got=%d", exp, got) - } -} diff --git a/pkg/metrics/default_registry.go b/pkg/metrics/default_registry.go deleted file mode 100644 index 893221ef11e..00000000000 --- a/pkg/metrics/default_registry.go +++ /dev/null @@ -1,36 +0,0 @@ -package metrics - -var defaultRegistry = NewRegistry() - -// MustRegisterGroup registers a new group using the specified name. -// If the group name is not unique, MustRegisterGroup will panic. -// -// MustRegisterGroup is not safe to call from multiple goroutines. -func MustRegisterGroup(name string) GID { - return defaultRegistry.MustRegisterGroup(name) -} - -// MustRegisterCounter registers a new counter metric with the default registry -// using the provided descriptor. -// If the metric name is not unique, MustRegisterCounter will panic. -// -// MustRegisterCounter is not safe to call from multiple goroutines. -func MustRegisterCounter(name string, opts ...descOption) ID { - return defaultRegistry.MustRegisterCounter(name, opts...) -} - -// MustRegisterTimer registers a new timer metric with the default registry -// using the provided descriptor. -// If the metric name is not unique, MustRegisterTimer will panic. -// -// MustRegisterTimer is not safe to call from multiple goroutines. -func MustRegisterTimer(name string, opts ...descOption) ID { - return defaultRegistry.MustRegisterTimer(name, opts...) -} - -// NewGroup returns a new measurement group from the default registry. -// -// NewGroup is safe to call from multiple goroutines. -func NewGroup(gid GID) *Group { - return defaultRegistry.NewGroup(gid) -} diff --git a/pkg/metrics/descriptors.go b/pkg/metrics/descriptors.go deleted file mode 100644 index 0a8dac8f3d9..00000000000 --- a/pkg/metrics/descriptors.go +++ /dev/null @@ -1,64 +0,0 @@ -package metrics - -type groupDesc struct { - Name string - id GID -} - -type metricType int - -const ( - counterMetricType metricType = iota - timerMetricType -) - -type desc struct { - Name string - mt metricType - gid GID - id ID -} - -type descOption func(*desc) - -// WithGroup assigns the associated measurement to the group identified by gid originally -// returned from MustRegisterGroup. -func WithGroup(gid GID) descOption { - return func(d *desc) { - d.gid = gid - } -} - -func newDesc(name string, opts ...descOption) *desc { - desc := &desc{Name: name} - for _, o := range opts { - o(desc) - } - return desc -} - -const ( - idMask = (1 << 32) - 1 - gidShift = 32 -) - -type ( - GID uint32 - ID uint64 -) - -func newID(id int, gid GID) ID { - return ID(gid)<> gidShift) -} - -func (id *ID) setGID(gid GID) { - *id |= ID(gid) << gidShift -} diff --git a/pkg/metrics/descriptors_test.go b/pkg/metrics/descriptors_test.go deleted file mode 100644 index 280386c93eb..00000000000 --- a/pkg/metrics/descriptors_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package metrics - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/pkg/testing/assert" -) - -func TestID_newID(t *testing.T) { - var id = newID(0xff, 0xff0f0fff) - assert.Equal(t, id, ID(0xff0f0fff000000ff)) - assert.Equal(t, id.id(), uint32(0xff)) - assert.Equal(t, id.gid(), uint32(0xff0f0fff)) -} - -func TestID_setGID(t *testing.T) { - var id = ID(1) - assert.Equal(t, id.gid(), uint32(0)) - id.setGID(1) - assert.Equal(t, id.gid(), uint32(1)) -} diff --git a/pkg/metrics/doc.go b/pkg/metrics/doc.go deleted file mode 100644 index cb0feac4ad6..00000000000 --- a/pkg/metrics/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -/* -Package metrics provides various measurements that are safe for concurrent access. - -Measurements are arranged into groups that are efficient to create and access. -*/ -package metrics diff --git a/pkg/metrics/group.go b/pkg/metrics/group.go deleted file mode 100644 index 0a02bb0c654..00000000000 --- a/pkg/metrics/group.go +++ /dev/null @@ -1,37 +0,0 @@ -package metrics - -// The Group type represents an instance of a set of measurements that are used for -// instrumenting a specific request. -type Group struct { - g *groupRegistry - counters []Counter - timers []Timer -} - -// Name returns the name of the group. -func (g *Group) Name() string { return g.g.desc.Name } - -// GetCounter returns the counter identified by the id that was returned -// by MustRegisterCounter for the same group. -// Using an id from a different group will result in undefined behavior. -func (g *Group) GetCounter(id ID) *Counter { return &g.counters[id.id()] } - -// GetTimer returns the timer identified by the id that was returned -// by MustRegisterTimer for the same group. -// Using an id from a different group will result in undefined behavior. -func (g *Group) GetTimer(id ID) *Timer { return &g.timers[id.id()] } - -// The Metric type defines a Name -type Metric interface { - Name() string -} - -// ForEach calls fn for all measurements of the group. -func (g *Group) ForEach(fn func(v Metric)) { - for i := range g.counters { - fn(&g.counters[i]) - } - for i := range g.timers { - fn(&g.timers[i]) - } -} diff --git a/pkg/metrics/group_registry.go b/pkg/metrics/group_registry.go deleted file mode 100644 index f457f8f8f4d..00000000000 --- a/pkg/metrics/group_registry.go +++ /dev/null @@ -1,79 +0,0 @@ -package metrics - -import ( - "fmt" - "sort" -) - -// The groupRegistry type represents a set of metrics that are measured together. -type groupRegistry struct { - desc *groupDesc - descriptors []*desc - group Group -} - -func (g *groupRegistry) register(desc *desc) error { - p := sort.Search(len(g.descriptors), func(i int) bool { - return g.descriptors[i].Name == desc.Name - }) - - if p != len(g.descriptors) { - return fmt.Errorf("metric name '%s' already in use", desc.Name) - } - - g.descriptors = append(g.descriptors, desc) - sort.Slice(g.descriptors, func(i, j int) bool { - return g.descriptors[i].Name < g.descriptors[j].Name - }) - - return nil -} - -func (g *groupRegistry) mustRegister(desc *desc) { - if err := g.register(desc); err != nil { - panic(err.Error()) - } -} - -// MustRegisterCounter registers a new counter metric using the provided descriptor. -// If the metric name is not unique, MustRegisterCounter will panic. -// -// MustRegisterCounter is not safe to call from multiple goroutines. -func (g *groupRegistry) mustRegisterCounter(desc *desc) ID { - desc.mt = counterMetricType - g.mustRegister(desc) - - desc.id = newID(len(g.group.counters), g.desc.id) - g.group.counters = append(g.group.counters, Counter{desc: desc}) - - return desc.id -} - -// MustRegisterTimer registers a new timer metric using the provided descriptor. -// If the metric name is not unique, MustRegisterTimer will panic. -// -// MustRegisterTimer is not safe to call from multiple goroutines. -func (g *groupRegistry) mustRegisterTimer(desc *desc) ID { - desc.mt = timerMetricType - g.mustRegister(desc) - - desc.id = newID(len(g.group.timers), g.desc.id) - g.group.timers = append(g.group.timers, Timer{desc: desc}) - - return desc.id -} - -// newCollector returns a Collector with a copy of all the registered counters. -// -// newCollector is safe to call from multiple goroutines. -func (g *groupRegistry) newGroup() *Group { - c := &Group{ - g: g, - counters: make([]Counter, len(g.group.counters)), - timers: make([]Timer, len(g.group.timers)), - } - copy(c.counters, g.group.counters) - copy(c.timers, g.group.timers) - - return c -} diff --git a/pkg/metrics/registry.go b/pkg/metrics/registry.go deleted file mode 100644 index 6edbe190bc5..00000000000 --- a/pkg/metrics/registry.go +++ /dev/null @@ -1,87 +0,0 @@ -package metrics - -import ( - "fmt" - "sort" -) - -type Registry struct { - descriptors []*groupDesc - groups []groupRegistry -} - -const ( - // DefaultGroup is the identifier for the default group. - DefaultGroup = GID(0) -) - -// NewRegistry creates a new Registry with a single group identified by DefaultGroup. -func NewRegistry() *Registry { - var r Registry - r.MustRegisterGroup("global") - return &r -} - -func (r *Registry) register(gd *groupDesc) error { - p := sort.Search(len(r.descriptors), func(i int) bool { - return r.descriptors[i].Name == gd.Name - }) - - if p != len(r.descriptors) { - return fmt.Errorf("group name '%s' already in use", gd.Name) - } - - r.descriptors = append(r.descriptors, gd) - sort.Slice(r.descriptors, func(i, j int) bool { - return r.descriptors[i].Name < r.descriptors[j].Name - }) - - gd.id = GID(len(r.groups)) - r.groups = append(r.groups, groupRegistry{desc: gd}) - - return nil -} - -func (r *Registry) mustRegister(gd *groupDesc) { - if err := r.register(gd); err != nil { - panic(err.Error()) - } -} - -// MustRegisterGroup registers a new group and panics if a group already exists with the same name. -// -// MustRegisterGroup is not safe to call from concurrent goroutines. -func (r *Registry) MustRegisterGroup(name string) GID { - gd := &groupDesc{Name: name} - r.mustRegister(gd) - return gd.id -} - -func (r *Registry) mustGetGroupRegistry(id GID) *groupRegistry { - if int(id) >= len(r.groups) { - panic("invalid group ID") - } - return &r.groups[id] -} - -// MustRegisterCounter registers a new counter metric using the provided descriptor. -// If the metric name is not unique within the group, MustRegisterCounter will panic. -// -// MustRegisterCounter is not safe to call from concurrent goroutines. -func (r *Registry) MustRegisterCounter(name string, opts ...descOption) ID { - desc := newDesc(name, opts...) - return r.mustGetGroupRegistry(desc.gid).mustRegisterCounter(desc) -} - -// MustRegisterTimer registers a new timer metric using the provided descriptor. -// If the metric name is not unique within the group, MustRegisterTimer will panic. -// -// MustRegisterTimer is not safe to call from concurrent goroutines. -func (r *Registry) MustRegisterTimer(name string, opts ...descOption) ID { - desc := newDesc(name, opts...) - return r.mustGetGroupRegistry(desc.gid).mustRegisterTimer(desc) -} - -func (r *Registry) NewGroup(gid GID) *Group { - return r.mustGetGroupRegistry(gid).newGroup() -} diff --git a/pkg/metrics/registry_test.go b/pkg/metrics/registry_test.go deleted file mode 100644 index 67f148f08ca..00000000000 --- a/pkg/metrics/registry_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package metrics - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/pkg/testing/assert" -) - -func TestRegistry_MustRegisterCounter(t *testing.T) { - r := NewRegistry() - id := r.MustRegisterCounter("counter") - assert.Equal(t, id, ID(0), "invalid id") -} - -func TestRegistry_MustRegisterCounter_Panics(t *testing.T) { - r := NewRegistry() - r.MustRegisterCounter("counter") - assert.PanicsWithValue(t, "metric name 'counter' already in use", func() { - r.MustRegisterCounter("counter") - }) -} - -func TestRegistry_NewGroup_CounterIsZero(t *testing.T) { - r := NewRegistry() - id := r.MustRegisterCounter("counter") - - c := r.NewGroup(DefaultGroup).GetCounter(id) - c.Add(1) - assert.Equal(t, int64(1), c.Value()) - - c = r.NewGroup(DefaultGroup).GetCounter(id) - assert.Equal(t, int64(0), c.Value()) -} - -func TestRegistry_MustRegisterTimer(t *testing.T) { - r := NewRegistry() - id := r.MustRegisterTimer("timer") - assert.Equal(t, ID(0), id, "invalid id") -} - -func TestRegistry_MustRegisterTimer_Panics(t *testing.T) { - r := NewRegistry() - r.MustRegisterCounter("timer") - assert.PanicsWithValue(t, "metric name 'timer' already in use", func() { - r.MustRegisterCounter("timer") - }) -} - -func TestRegistry_MustRegisterMultiple(t *testing.T) { - r := NewRegistry() - cnt := r.MustRegisterCounter("counter") - tmr := r.MustRegisterTimer("timer") - assert.Equal(t, ID(0), cnt, "invalid id") - assert.Equal(t, ID(0), tmr, "invalid id") -} - -func TestRegistry_MustRegister_Panics_Across_Measurements(t *testing.T) { - r := NewRegistry() - r.MustRegisterCounter("foo") - assert.PanicsWithValue(t, "metric name 'foo' already in use", func() { - r.MustRegisterCounter("foo") - }) -} diff --git a/pkg/metrics/timer.go b/pkg/metrics/timer.go deleted file mode 100644 index a0382c56db4..00000000000 --- a/pkg/metrics/timer.go +++ /dev/null @@ -1,34 +0,0 @@ -package metrics - -import ( - "sync/atomic" - "time" -) - -// The timer type is used to store a duration. -type Timer struct { - val int64 - desc *desc -} - -// Name returns the name of the timer. -func (t *Timer) Name() string { return t.desc.Name } - -// Value atomically returns the value of the timer. -func (t *Timer) Value() time.Duration { return time.Duration(atomic.LoadInt64(&t.val)) } - -// Update sets the timer value to d. -func (t *Timer) Update(d time.Duration) { atomic.StoreInt64(&t.val, int64(d)) } - -// UpdateSince sets the timer value to the difference between since and the current time. -func (t *Timer) UpdateSince(since time.Time) { t.Update(time.Since(since)) } - -// String returns a string representation using the name and value of the timer. -func (t *Timer) String() string { return t.desc.Name + ": " + time.Duration(t.val).String() } - -// Time updates the timer to the duration it takes to call f. -func (t *Timer) Time(f func()) { - s := time.Now() - f() - t.UpdateSince(s) -} diff --git a/pkg/metrics/timer_test.go b/pkg/metrics/timer_test.go deleted file mode 100644 index a6c69525687..00000000000 --- a/pkg/metrics/timer_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package metrics - -import ( - "testing" - "time" - - "github.com/influxdata/influxdb/v2/pkg/testing/assert" -) - -func TestTimer_Update(t *testing.T) { - var c Timer - c.Update(100 * time.Millisecond) - assert.Equal(t, c.Value(), 100*time.Millisecond, "unexpected value") -} diff --git a/pkg/mincore/limiter.go b/pkg/mincore/limiter.go deleted file mode 100644 index 04ba6846cd1..00000000000 --- a/pkg/mincore/limiter.go +++ /dev/null @@ -1,179 +0,0 @@ -package mincore - -import ( - "context" - "os" - "sync" - "time" - "unsafe" - - "golang.org/x/time/rate" -) - -// Limiter defaults. -const ( - DefaultUpdateInterval = 10 * time.Second -) - -// Limiter represents a token bucket rate limiter based on -type Limiter struct { - mu sync.Mutex - underlying *rate.Limiter - data []byte // mmap reference - incore []byte // in-core vector - updatedAt time.Time // last incore update - - // Frequency of updates of the in-core vector. - // Updates are performed lazily so this is the maximum frequency. - UpdateInterval time.Duration - - // OS mincore() function. - Mincore func(data []byte) ([]byte, error) -} - -// NewLimiter returns a new instance of Limiter associated with an mmap. -// The underlying limiter can be shared to limit faults across the entire process. -func NewLimiter(underlying *rate.Limiter, data []byte) *Limiter { - if underlying == nil { - return nil - } - - return &Limiter{ - underlying: underlying, - data: data, - - UpdateInterval: DefaultUpdateInterval, - Mincore: Mincore, - } -} - -// WaitPointer checks if ptr would cause a page fault and, if so, rate limits its access. -// Once a page access is limited, it's updated to be considered memory resident. -func (l *Limiter) WaitPointer(ctx context.Context, ptr unsafe.Pointer) error { - // Check if the page is in-memory under lock. - // However, we want to exclude the wait from the limiter lock. - if wait, err := func() (bool, error) { - l.mu.Lock() - defer l.mu.Unlock() - - // Update incore mapping if data is too stale. - if err := l.checkUpdate(); err != nil { - return false, err - } - - return l.wait(uintptr(ptr)), nil - }(); err != nil { - return err - } else if !wait { - return nil - } - - return l.underlying.Wait(ctx) -} - -// WaitRange checks all pages in b for page faults and, if so, rate limits their access. -// Once a page access is limited, it's updated to be considered memory resident. -func (l *Limiter) WaitRange(ctx context.Context, b []byte) error { - // Empty byte slices will never access memory so skip them. - if len(b) == 0 { - return nil - } - - // Check every page for being in-memory under lock. - // However, we want to exclude the wait from the limiter lock. - var n int - if err := func() error { - l.mu.Lock() - defer l.mu.Unlock() - - // Update incore mapping if data is too stale. - if err := l.checkUpdate(); err != nil { - return err - } - - // Iterate over every page within the range. - pageSize := uintptr(os.Getpagesize()) - start := (uintptr(unsafe.Pointer(&b[0])) / pageSize) * pageSize - end := (uintptr(unsafe.Pointer(&b[len(b)-1])) / pageSize) * pageSize - - for i := start; i <= end; i += pageSize { - if l.wait(i) { - n++ - } - } - - return nil - }(); err != nil { - return err - } else if n == 0 { - return nil - } - - for i := 0; i < n; i++ { - if err := l.underlying.Wait(ctx); err != nil { - return err - } - } - return nil -} - -func (l *Limiter) wait(ptr uintptr) bool { - // Check if page access requires page fault. If not, exit immediately. - // If so, mark the page as memory resident afterward. - if l.isInCore(ptr) { - return false - } - - // Otherwise mark page as resident in memory and rate limit. - if i := l.index(ptr); i < len(l.incore) { - l.incore[l.index(ptr)] |= 1 - } - return true -} - -// IsInCore returns true if the address is resident in memory or if the -// address is outside the range of the data the limiter is tracking. -func (l *Limiter) IsInCore(ptr uintptr) bool { - l.mu.Lock() - defer l.mu.Unlock() - return l.isInCore(ptr) -} - -func (l *Limiter) isInCore(ptr uintptr) bool { - if i := l.index(ptr); i < len(l.incore) { - return (l.incore[i] & 1) == 1 - } - return true -} - -// Update updates the vector of in-core pages. Automatically updated when calling Wait(). -func (l *Limiter) Update() error { - l.mu.Lock() - defer l.mu.Unlock() - return l.update() -} - -func (l *Limiter) update() error { - vec, err := l.Mincore(l.data) - if err != nil { - return err - } - - l.incore = vec - l.updatedAt = time.Now() - - return nil -} - -// checkUpdate performs an update if one hasn't been done before or the interval has passed. -func (l *Limiter) checkUpdate() error { - if l.incore != nil && time.Since(l.updatedAt) < l.UpdateInterval { - return nil - } - return l.update() -} - -// index returns the position in the in-core vector that represents ptr. -func (l *Limiter) index(ptr uintptr) int { - return int(int64(ptr-uintptr(unsafe.Pointer(&l.data[0]))) / int64(os.Getpagesize())) -} diff --git a/pkg/mincore/limiter_test.go b/pkg/mincore/limiter_test.go deleted file mode 100644 index 01433fc3606..00000000000 --- a/pkg/mincore/limiter_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package mincore_test - -import ( - "context" - "os" - "testing" - "time" - "unsafe" - - "github.com/influxdata/influxdb/v2/pkg/mincore" - "golang.org/x/time/rate" -) - -func TestLimiter(t *testing.T) { - pageSize := os.Getpagesize() - - // Ensure limiter waits long enough between faults - t.Run("WaitPointer", func(t *testing.T) { - t.Parallel() - - data := make([]byte, pageSize*2) - l := mincore.NewLimiter(rate.NewLimiter(1, 1), data) // 1 fault per sec - l.Mincore = func(data []byte) ([]byte, error) { return make([]byte, 2), nil } - - start := time.Now() - if err := l.WaitPointer(context.Background(), unsafe.Pointer(&data[0])); err != nil { - t.Fatal(err) - } else if err := l.WaitPointer(context.Background(), unsafe.Pointer(&data[pageSize])); err != nil { - t.Fatal(err) - } - - if d := time.Since(start); d < time.Second { - t.Fatalf("not enough time elapsed: %s", d) - } - }) - - // Ensure limiter waits long enough between faults for a byte slice. - t.Run("WaitRange", func(t *testing.T) { - t.Parallel() - - data := make([]byte, 2*pageSize) - l := mincore.NewLimiter(rate.NewLimiter(1, 1), data) // 1 fault per sec - l.Mincore = func(data []byte) ([]byte, error) { return make([]byte, 2), nil } - - start := time.Now() - if err := l.WaitRange(context.Background(), data); err != nil { - t.Fatal(err) - } - - if d := time.Since(start); d < time.Second { - t.Fatalf("not enough time elapsed: %s", d) - } - }) - - // Ensure pages are marked as in-core after calling Wait() on them. - t.Run("MoveToInMemoryAfterUse", func(t *testing.T) { - t.Parallel() - - data := make([]byte, pageSize*10) - l := mincore.NewLimiter(rate.NewLimiter(1, 1), data) - l.Mincore = func(data []byte) ([]byte, error) { - return make([]byte, 10), nil - } - if err := l.Update(); err != nil { - t.Fatal(err) - } else if l.IsInCore(uintptr(unsafe.Pointer(&data[0]))) { - t.Fatal("expected page to not be in-memory") - } - - if err := l.WaitPointer(context.Background(), unsafe.Pointer(&data[0])); err != nil { - t.Fatal(err) - } else if !l.IsInCore(uintptr(unsafe.Pointer(&data[0]))) { - t.Fatal("expected page to be in-memory") - } - }) - - // Ensure fresh in-core data is pulled after the update interval. - t.Run("UpdateAfterInterval", func(t *testing.T) { - t.Parallel() - - data := make([]byte, pageSize*10) - l := mincore.NewLimiter(rate.NewLimiter(1, 1), data) - l.UpdateInterval = 100 * time.Millisecond - - var n int - l.Mincore = func(data []byte) ([]byte, error) { - n++ - return make([]byte, 10), nil - } - - // Wait for two pages to pull them in-memory. - if err := l.WaitPointer(context.Background(), unsafe.Pointer(&data[0])); err != nil { - t.Fatal(err) - } else if err := l.WaitPointer(context.Background(), unsafe.Pointer(&data[pageSize])); err != nil { - t.Fatal(err) - } else if !l.IsInCore(uintptr(unsafe.Pointer(&data[0]))) { - t.Fatal("expected page to be in-memory") - } else if !l.IsInCore(uintptr(unsafe.Pointer(&data[pageSize]))) { - t.Fatal("expected page to be in-memory") - } - - // Wait for interval to pass. - time.Sleep(l.UpdateInterval) - - // Fetch one of the previous pages and ensure the other one has been flushed from the update. - if err := l.WaitPointer(context.Background(), unsafe.Pointer(&data[0])); err != nil { - t.Fatal(err) - } else if !l.IsInCore(uintptr(unsafe.Pointer(&data[0]))) { - t.Fatal("expected page to be in-memory") - } else if l.IsInCore(uintptr(unsafe.Pointer(&data[pageSize]))) { - t.Fatal("expected page to not be in-memory") - } - - if got, want := n, 2; got != want { - t.Fatalf("refreshed %d times, expected %d times", got, want) - } - }) - - // Ensure referencing data outside the limiter's data shows as in-memory. - t.Run("OutOfBounds", func(t *testing.T) { - l := mincore.NewLimiter(rate.NewLimiter(1, 1), make([]byte, pageSize)) - l.Mincore = func(data []byte) ([]byte, error) { - return make([]byte, 1), nil - } - - data := make([]byte, pageSize) - if !l.IsInCore(uintptr(unsafe.Pointer(&data[0]))) { - t.Fatal("expected out-of-bounds page to be resident") - } - }) -} diff --git a/pkg/mincore/mincore_unix.go b/pkg/mincore/mincore_unix.go deleted file mode 100644 index b44b7b2060b..00000000000 --- a/pkg/mincore/mincore_unix.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build darwin || dragonfly || freebsd || linux || nacl || netbsd || openbsd - -package mincore - -import ( - "os" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Mincore is a wrapper function for mincore(2). -func Mincore(data []byte) ([]byte, error) { - vec := make([]byte, (int64(len(data))+int64(os.Getpagesize())-1)/int64(os.Getpagesize())) - - if ret, _, err := unix.Syscall( - unix.SYS_MINCORE, - uintptr(unsafe.Pointer(&data[0])), - uintptr(len(data)), - uintptr(unsafe.Pointer(&vec[0]))); ret != 0 { - return nil, err - } - return vec, nil -} diff --git a/pkg/mincore/mincore_windows.go b/pkg/mincore/mincore_windows.go deleted file mode 100644 index 55e01a73d17..00000000000 --- a/pkg/mincore/mincore_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build windows - -package mincore - -// Mincore returns a zero-length vector. -func Mincore(data []byte) ([]byte, error) { - return make([]byte, 0), nil -} diff --git a/pkg/mmap/mmap_solaris.go b/pkg/mmap/mmap_solaris.go deleted file mode 100644 index f313bdae52d..00000000000 --- a/pkg/mmap/mmap_solaris.go +++ /dev/null @@ -1,45 +0,0 @@ -//go:build solaris - -package mmap - -import ( - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -func Map(path string, sz int64) ([]byte, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - fi, err := f.Stat() - if err != nil { - return nil, err - } else if fi.Size() == 0 { - return nil, nil - } - - // Use file size if map size is not passed in. - if sz == 0 { - sz = fi.Size() - } - - data, err := unix.Mmap(int(f.Fd()), 0, int(sz), syscall.PROT_READ, syscall.MAP_SHARED) - if err != nil { - return nil, err - } - - return data, nil -} - -// Unmap closes the memory-map. -func Unmap(data []byte) error { - if data == nil { - return nil - } - return unix.Munmap(data) -} diff --git a/pkg/mmap/mmap_test.go b/pkg/mmap/mmap_test.go deleted file mode 100644 index 5ebe1aa667f..00000000000 --- a/pkg/mmap/mmap_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package mmap_test - -import ( - "bytes" - "os" - "testing" - - "github.com/influxdata/influxdb/v2/pkg/mmap" -) - -func TestMap(t *testing.T) { - data, err := mmap.Map("mmap_test.go", 0) - if err != nil { - t.Fatalf("Open: %v", err) - } - - if exp, err := os.ReadFile("mmap_test.go"); err != nil { - t.Fatalf("os.ReadFile: %v", err) - } else if !bytes.Equal(data, exp) { - t.Fatalf("got %q\nwant %q", string(data), string(exp)) - } -} diff --git a/pkg/mmap/mmap_unix.go b/pkg/mmap/mmap_unix.go deleted file mode 100644 index ebbf2e301a4..00000000000 --- a/pkg/mmap/mmap_unix.go +++ /dev/null @@ -1,51 +0,0 @@ -//go:build darwin || dragonfly || freebsd || linux || nacl || netbsd || openbsd - -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package mmap provides a way to memory-map a file. -package mmap - -import ( - "os" - "syscall" - - errors2 "github.com/influxdata/influxdb/v2/pkg/errors" -) - -// Map memory-maps a file. -func Map(path string, sz int64) (data []byte, err error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer errors2.Capture(&err, f.Close)() - - fi, err := f.Stat() - if err != nil { - return nil, err - } else if fi.Size() == 0 { - return nil, nil - } - - // Use file size if map size is not passed in. - if sz == 0 { - sz = fi.Size() - } - - data, err = syscall.Mmap(int(f.Fd()), 0, int(sz), syscall.PROT_READ, syscall.MAP_SHARED) - if err != nil { - return nil, err - } - - return data, nil -} - -// Unmap closes the memory-map. -func Unmap(data []byte) error { - if data == nil { - return nil - } - return syscall.Munmap(data) -} diff --git a/pkg/mmap/mmap_windows.go b/pkg/mmap/mmap_windows.go deleted file mode 100644 index 5df36eacd27..00000000000 --- a/pkg/mmap/mmap_windows.go +++ /dev/null @@ -1,56 +0,0 @@ -package mmap - -import ( - "os" - "syscall" - "unsafe" -) - -// Map memory-maps a file. -func Map(path string, sz int64) ([]byte, error) { - fi, err := os.Stat(path) - if err != nil { - return nil, err - } - - // Truncate file to size if too small. - if fi.Size() < sz { - if err := os.Truncate(path, sz); err != nil { - return nil, err - } - } else { - sz = fi.Size() - } - if sz == 0 { - return nil, nil - } - - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - lo, hi := uint32(sz), uint32(sz>>32) - fmap, err := syscall.CreateFileMapping(syscall.Handle(f.Fd()), nil, syscall.PAGE_READONLY, hi, lo, nil) - if err != nil { - return nil, err - } - defer syscall.CloseHandle(fmap) - - ptr, err := syscall.MapViewOfFile(fmap, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) - if err != nil { - return nil, err - } - data := (*[1 << 30]byte)(unsafe.Pointer(ptr))[:sz] - - return data, nil -} - -// Unmap closes the memory-map. -func Unmap(data []byte) error { - if data == nil { - return nil - } - return syscall.UnmapViewOfFile(uintptr(unsafe.Pointer(&data[0]))) -} diff --git a/pkg/pointer/pointer.go b/pkg/pointer/pointer.go deleted file mode 100644 index 8906169c3b8..00000000000 --- a/pkg/pointer/pointer.go +++ /dev/null @@ -1,32 +0,0 @@ -// Package pointer provides utilities for pointer handling that aren't available in go. -// Feel free to add more pointerification functions for more types as you need them. -package pointer - -import ( - "time" -) - -// Duration returns a pointer to its argument. -func Duration(d time.Duration) *time.Duration { - return &d -} - -// Int returns a pointer to its argument. -func Int(i int) *int { - return &i -} - -// Int64 returns a pointer to its argument. -func Int64(i int64) *int64 { - return &i -} - -// String returns a pointer to its argument. -func String(s string) *string { - return &s -} - -// Time returns a pointer to its argument. -func Time(t time.Time) *time.Time { - return &t -} diff --git a/pkg/pool/bytes.go b/pkg/pool/bytes.go deleted file mode 100644 index 35db6f2cf52..00000000000 --- a/pkg/pool/bytes.go +++ /dev/null @@ -1,99 +0,0 @@ -// Package pool provides pool structures to help reduce garbage collector pressure. -package pool - -// Bytes is a pool of byte slices that can be re-used. Slices in -// this pool will not be garbage collected when not in use. -type Bytes struct { - pool chan []byte -} - -// NewBytes returns a Bytes pool with capacity for max byte slices -// to be pool. -func NewBytes(max int) *Bytes { - return &Bytes{ - pool: make(chan []byte, max), - } -} - -// Get returns a byte slice size with at least sz capacity. Items -// returned may not be in the zero state and should be reset by the -// caller. -func (p *Bytes) Get(sz int) []byte { - var c []byte - select { - case c = <-p.pool: - default: - return make([]byte, sz) - } - - if cap(c) < sz { - return make([]byte, sz) - } - - return c[:sz] -} - -// Put returns a slice back to the pool. If the pool is full, the byte -// slice is discarded. -func (p *Bytes) Put(c []byte) { - select { - case p.pool <- c: - default: - } -} - -// LimitedBytes is a pool of byte slices that can be re-used. Slices in -// this pool will not be garbage collected when not in use. The pool will -// hold onto a fixed number of byte slices of a maximum size. If the pool -// is empty or the required size is larger than max size, it will return a -// new byte slice. Byte slices added to the pool that are over the max size -// are dropped. -type LimitedBytes struct { - maxSize int - pool chan []byte -} - -// NewBytes returns a Bytes pool with capacity for max byte slices -// to be pool. -func NewLimitedBytes(capacity int, maxSize int) *LimitedBytes { - return &LimitedBytes{ - pool: make(chan []byte, capacity), - maxSize: maxSize, - } -} - -// Get returns a byte slice size with at least sz capacity. Items -// returned may not be in the zero state and should be reset by the -// caller. -func (p *LimitedBytes) Get(sz int) []byte { - var c []byte - - // If we have not allocated our capacity, return a new allocation, - // otherwise block until one frees up. - select { - case c = <-p.pool: - default: - return make([]byte, sz) - } - - if cap(c) < sz { - return make([]byte, sz) - } - - return c[:sz] -} - -// Put returns a slice back to the pool. If the pool is full, the byte -// slice is discarded. If the byte slice is over the configured max size -// of any byte slice in the pool, it is discarded. -func (p *LimitedBytes) Put(c []byte) { - // Drop buffers that are larger than the max size - if cap(c) >= p.maxSize { - return - } - - select { - case p.pool <- c: - default: - } -} diff --git a/pkg/pool/bytes_test.go b/pkg/pool/bytes_test.go deleted file mode 100644 index 0a8b1bfc177..00000000000 --- a/pkg/pool/bytes_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package pool_test - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/pkg/pool" -) - -func TestLimitedBytePool_Put_MaxSize(t *testing.T) { - bp := pool.NewLimitedBytes(1, 10) - bp.Put(make([]byte, 1024)) // should be dropped - - if got, exp := cap(bp.Get(10)), 10; got != exp { - t.Fatalf("max cap size exceeded: got %v, exp %v", got, exp) - } -} diff --git a/pkg/pool/generic.go b/pkg/pool/generic.go deleted file mode 100644 index 9eb98cce3b0..00000000000 --- a/pkg/pool/generic.go +++ /dev/null @@ -1,40 +0,0 @@ -package pool - -// Generic is a pool of types that can be re-used. Items in -// this pool will not be garbage collected when not in use. -type Generic struct { - pool chan interface{} - fn func(sz int) interface{} -} - -// NewGeneric returns a Generic pool with capacity for max items -// to be pool. -func NewGeneric(max int, fn func(sz int) interface{}) *Generic { - return &Generic{ - pool: make(chan interface{}, max), - fn: fn, - } -} - -// Get returns a item from the pool or a new instance if the pool -// is empty. Items returned may not be in the zero state and should -// be reset by the caller. -func (p *Generic) Get(sz int) interface{} { - var c interface{} - select { - case c = <-p.pool: - default: - c = p.fn(sz) - } - - return c -} - -// Put returns an item back to the pool. If the pool is full, the item -// is discarded. -func (p *Generic) Put(c interface{}) { - select { - case p.pool <- c: - default: - } -} diff --git a/pkg/radix/buffer.go b/pkg/radix/buffer.go deleted file mode 100644 index 19e5bfc9762..00000000000 --- a/pkg/radix/buffer.go +++ /dev/null @@ -1,31 +0,0 @@ -package radix - -// bufferSize is the size of the buffer and the largest slice that can be -// contained in it. -const bufferSize = 4096 - -// buffer is a type that amoritizes allocations into larger ones, handing out -// small subslices to make copies. -type buffer []byte - -// Copy returns a copy of the passed in byte slice allocated using the byte -// slice in the buffer. -func (b *buffer) Copy(x []byte) []byte { - // if we can never have enough room, just return a copy - if len(x) > bufferSize { - out := make([]byte, len(x)) - copy(out, x) - return out - } - - // if we don't have enough room, reallocate the buf first - if len(x) > len(*b) { - *b = make([]byte, bufferSize) - } - - // create a copy and hand out a slice - copy(*b, x) - out := (*b)[:len(x):len(x)] - *b = (*b)[len(x):] - return out -} diff --git a/pkg/radix/buffer_test.go b/pkg/radix/buffer_test.go deleted file mode 100644 index ff21c5a5300..00000000000 --- a/pkg/radix/buffer_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package radix - -import ( - "bytes" - "math/rand" - "testing" -) - -func TestBuffer(t *testing.T) { - var buf buffer - - for i := 0; i < 1000; i++ { - x1 := make([]byte, rand.Intn(32)+1) - for j := range x1 { - x1[j] = byte(i + j) - } - - x2 := buf.Copy(x1) - if !bytes.Equal(x2, x1) { - t.Fatal("bad copy") - } - - x1[0] += 1 - if bytes.Equal(x2, x1) { - t.Fatal("bad copy") - } - } -} - -func TestBufferAppend(t *testing.T) { - var buf buffer - x1 := buf.Copy(make([]byte, 1)) - x2 := buf.Copy(make([]byte, 1)) - - _ = append(x1, 1) - if x2[0] != 0 { - t.Fatal("append wrote past") - } -} - -func TestBufferLarge(t *testing.T) { - var buf buffer - - x1 := make([]byte, bufferSize+1) - x2 := buf.Copy(x1) - - if !bytes.Equal(x1, x2) { - t.Fatal("bad copy") - } - - x1[0] += 1 - if bytes.Equal(x1, x2) { - t.Fatal("bad copy") - } -} diff --git a/pkg/radix/sort.go b/pkg/radix/sort.go deleted file mode 100644 index cfc486d9bcb..00000000000 --- a/pkg/radix/sort.go +++ /dev/null @@ -1,92 +0,0 @@ -// Portions of this file from github.com/shawnsmithdev/zermelo under the MIT license. -// -// The MIT License (MIT) -// -// Copyright (c) 2014 Shawn Smith -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package radix - -import ( - "sort" -) - -const ( - minSize = 256 - radix uint = 8 - bitSize uint = 64 -) - -// SortUint64s sorts a slice of uint64s. -func SortUint64s(x []uint64) { - if len(x) < 2 { - return - } else if len(x) < minSize { - sort.Slice(x, func(i, j int) bool { return x[i] < x[j] }) - } else { - doSort(x) - } -} - -func doSort(x []uint64) { - // Each pass processes a byte offset, copying back and forth between slices - from := x - to := make([]uint64, len(x)) - var key uint8 - var offset [256]int // Keep track of where groups start - - for keyOffset := uint(0); keyOffset < bitSize; keyOffset += radix { - keyMask := uint64(0xFF << keyOffset) // Current 'digit' to look at - var counts [256]int // Keep track of the number of elements for each kind of byte - sorted := true // Check for already sorted - prev := uint64(0) // if elem is always >= prev it is already sorted - for _, elem := range from { - key = uint8((elem & keyMask) >> keyOffset) // fetch the byte at current 'digit' - counts[key]++ // count of elems to put in this digit's bucket - - if sorted { // Detect sorted - sorted = elem >= prev - prev = elem - } - } - - if sorted { // Short-circuit sorted - if (keyOffset/radix)%2 == 1 { - copy(to, from) - } - return - } - - // Find target bucket offsets - offset[0] = 0 - for i := 1; i < len(offset); i++ { - offset[i] = offset[i-1] + counts[i-1] - } - - // Rebucket while copying to other buffer - for _, elem := range from { - key = uint8((elem & keyMask) >> keyOffset) // Get the digit - to[offset[key]] = elem // Copy the element to the digit's bucket - offset[key]++ // One less space, move the offset - } - // On next pass copy data the other way - to, from = from, to - } -} diff --git a/pkg/radix/sort_test.go b/pkg/radix/sort_test.go deleted file mode 100644 index 19e6d08b43b..00000000000 --- a/pkg/radix/sort_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package radix - -import ( - "math/rand" - "testing" -) - -func benchmarkSort(b *testing.B, size int) { - orig := make([]uint64, size) - for i := range orig { - orig[i] = uint64(rand.Int63()) - } - data := make([]uint64, size) - - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - copy(data, orig) - SortUint64s(data) - } -} - -func BenchmarkSort_64(b *testing.B) { benchmarkSort(b, 64) } -func BenchmarkSort_128(b *testing.B) { benchmarkSort(b, 128) } -func BenchmarkSort_256(b *testing.B) { benchmarkSort(b, 256) } -func BenchmarkSort_12K(b *testing.B) { benchmarkSort(b, 12*1024) } diff --git a/pkg/radix/tree.go b/pkg/radix/tree.go deleted file mode 100644 index 5bf21bb4097..00000000000 --- a/pkg/radix/tree.go +++ /dev/null @@ -1,428 +0,0 @@ -package radix - -// This is a fork of https://github.com/armon/go-radix that removes the -// ability to update nodes as well as uses fixed int value type. - -import ( - "bytes" - "sort" - "sync" -) - -// leafNode is used to represent a value -type leafNode struct { - valid bool // true if key/val are valid - key []byte - val int -} - -// edge is used to represent an edge node -type edge struct { - label byte - node *node -} - -type node struct { - // leaf is used to store possible leaf - leaf leafNode - - // prefix is the common prefix we ignore - prefix []byte - - // Edges should be stored in-order for iteration. - // We avoid a fully materialized slice to save memory, - // since in most cases we expect to be sparse - edges edges -} - -func (n *node) isLeaf() bool { - return n.leaf.valid -} - -func (n *node) addEdge(e edge) { - // find the insertion point with bisection - num := len(n.edges) - i, j := 0, num - for i < j { - h := int(uint(i+j) >> 1) - if n.edges[h].label < e.label { - i = h + 1 - } else { - j = h - } - } - - // make room, copy the suffix, and insert. - n.edges = append(n.edges, edge{}) - copy(n.edges[i+1:], n.edges[i:]) - n.edges[i] = e -} - -func (n *node) replaceEdge(e edge) { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= e.label - }) - if idx < num && n.edges[idx].label == e.label { - n.edges[idx].node = e.node - return - } - panic("replacing missing edge") -} - -func (n *node) getEdge(label byte) *node { - // linear search for small slices - if len(n.edges) < 16 { - for _, e := range n.edges { - if e.label == label { - return e.node - } - } - return nil - } - - // binary search for larger - num := len(n.edges) - i, j := 0, num - for i < j { - h := int(uint(i+j) >> 1) - if n.edges[h].label < label { - i = h + 1 - } else { - j = h - } - } - if i < num && n.edges[i].label == label { - return n.edges[i].node - } - return nil -} - -type edges []edge - -// Tree implements a radix tree. This can be treated as a -// Dictionary abstract data type. The main advantage over -// a standard hash map is prefix-based lookups and -// ordered iteration. The tree is safe for concurrent access. -type Tree struct { - mu sync.RWMutex - root *node - size int - buf buffer -} - -// New returns an empty Tree -func New() *Tree { - return &Tree{root: &node{}} -} - -// NewFromMap returns a new tree containing the keys -// from an existing map -func NewFromMap(m map[string]int) *Tree { - t := &Tree{root: &node{}} - for k, v := range m { - t.Insert([]byte(k), v) - } - return t -} - -// Len is used to return the number of elements in the tree -func (t *Tree) Len() int { - t.mu.RLock() - size := t.size - t.mu.RUnlock() - - return size -} - -// longestPrefix finds the length of the shared prefix -// of two strings -func longestPrefix(k1, k2 []byte) int { - // for loops can't be inlined, but goto's can. we also use uint to help - // out the compiler to prove bounds checks aren't necessary on the index - // operations. - - lk1, lk2 := uint(len(k1)), uint(len(k2)) - i := uint(0) - -loop: - if lk1 <= i || lk2 <= i { - return int(i) - } - if k1[i] != k2[i] { - return int(i) - } - i++ - goto loop -} - -// Insert is used to add a newentry or update -// an existing entry. Returns if inserted. -func (t *Tree) Insert(s []byte, v int) (int, bool) { - t.mu.RLock() - - var parent *node - n := t.root - search := s - - for { - // Handle key exhaution - if len(search) == 0 { - if n.isLeaf() { - old := n.leaf.val - - t.mu.RUnlock() - return old, false - } - - n.leaf = leafNode{ - key: t.buf.Copy(s), - val: v, - valid: true, - } - t.size++ - - t.mu.RUnlock() - return v, true - } - - // Look for the edge - parent = n - n = n.getEdge(search[0]) - - // No edge, create one - if n == nil { - newNode := &node{ - leaf: leafNode{ - key: t.buf.Copy(s), - val: v, - valid: true, - }, - prefix: t.buf.Copy(search), - } - - e := edge{ - label: search[0], - node: newNode, - } - - parent.addEdge(e) - t.size++ - - t.mu.RUnlock() - return v, true - } - - // Determine longest prefix of the search key on match - commonPrefix := longestPrefix(search, n.prefix) - if commonPrefix == len(n.prefix) { - search = search[commonPrefix:] - continue - } - - // Split the node - t.size++ - child := &node{ - prefix: t.buf.Copy(search[:commonPrefix]), - } - parent.replaceEdge(edge{ - label: search[0], - node: child, - }) - - // Restore the existing node - child.addEdge(edge{ - label: n.prefix[commonPrefix], - node: n, - }) - n.prefix = n.prefix[commonPrefix:] - - // Create a new leaf node - leaf := leafNode{ - key: t.buf.Copy(s), - val: v, - valid: true, - } - - // If the new key is a subset, add to to this node - search = search[commonPrefix:] - if len(search) == 0 { - child.leaf = leaf - - t.mu.RUnlock() - return v, true - } - - // Create a new edge for the node - child.addEdge(edge{ - label: search[0], - node: &node{ - leaf: leaf, - prefix: t.buf.Copy(search), - }, - }) - - t.mu.RUnlock() - return v, true - } -} - -// DeletePrefix is used to delete the subtree under a prefix -// Returns how many nodes were deleted -// Use this to delete large subtrees efficiently -func (t *Tree) DeletePrefix(s []byte) int { - t.mu.Lock() - defer t.mu.Unlock() - - return t.deletePrefix(nil, t.root, s) -} - -// delete does a recursive deletion -func (t *Tree) deletePrefix(parent, n *node, prefix []byte) int { - // Check for key exhaustion - if len(prefix) == 0 { - // Remove the leaf node - subTreeSize := 0 - //recursively walk from all edges of the node to be deleted - recursiveWalk(n, func(s []byte, v int) bool { - subTreeSize++ - return false - }) - if n.isLeaf() { - n.leaf = leafNode{} - } - n.edges = nil // deletes the entire subtree - - // Check if we should merge the parent's other child - if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { - parent.mergeChild() - } - t.size -= subTreeSize - return subTreeSize - } - - // Look for an edge - label := prefix[0] - child := n.getEdge(label) - if child == nil || (!bytes.HasPrefix(child.prefix, prefix) && !bytes.HasPrefix(prefix, child.prefix)) { - return 0 - } - - // Consume the search prefix - if len(child.prefix) > len(prefix) { - prefix = prefix[len(prefix):] - } else { - prefix = prefix[len(child.prefix):] - } - return t.deletePrefix(n, child, prefix) -} - -func (n *node) mergeChild() { - e := n.edges[0] - child := e.node - prefix := make([]byte, 0, len(n.prefix)+len(child.prefix)) - prefix = append(prefix, n.prefix...) - prefix = append(prefix, child.prefix...) - n.prefix = prefix - n.leaf = child.leaf - n.edges = child.edges -} - -// Get is used to lookup a specific key, returning -// the value and if it was found -func (t *Tree) Get(s []byte) (int, bool) { - t.mu.RLock() - - n := t.root - search := s - for { - // Check for key exhaution - if len(search) == 0 { - if n.isLeaf() { - t.mu.RUnlock() - return n.leaf.val, true - } - break - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if bytes.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - - t.mu.RUnlock() - return 0, false -} - -// walkFn is used when walking the tree. Takes a -// key and value, returning if iteration should -// be terminated. -type walkFn func(s []byte, v int) bool - -// recursiveWalk is used to do a pre-order walk of a node -// recursively. Returns true if the walk should be aborted -func recursiveWalk(n *node, fn walkFn) bool { - // Visit the leaf values if any - if n.leaf.valid && fn(n.leaf.key, n.leaf.val) { - return true - } - - // Recurse on the children - for _, e := range n.edges { - if recursiveWalk(e.node, fn) { - return true - } - } - return false -} - -// Minimum is used to return the minimum value in the tree -func (t *Tree) Minimum() ([]byte, int, bool) { - t.mu.RLock() - - n := t.root - for { - if n.isLeaf() { - t.mu.RUnlock() - return n.leaf.key, n.leaf.val, true - } - if len(n.edges) > 0 { - n = n.edges[0].node - } else { - break - } - } - - t.mu.RUnlock() - return nil, 0, false -} - -// Maximum is used to return the maximum value in the tree -func (t *Tree) Maximum() ([]byte, int, bool) { - t.mu.RLock() - - n := t.root - for { - if num := len(n.edges); num > 0 { - n = n.edges[num-1].node - continue - } - if n.isLeaf() { - t.mu.RUnlock() - return n.leaf.key, n.leaf.val, true - } - break - } - - t.mu.RUnlock() - return nil, 0, false -} diff --git a/pkg/radix/tree_test.go b/pkg/radix/tree_test.go deleted file mode 100644 index 875b9b7a29d..00000000000 --- a/pkg/radix/tree_test.go +++ /dev/null @@ -1,174 +0,0 @@ -package radix - -import ( - "crypto/rand" - "fmt" - "reflect" - "testing" -) - -// generateUUID is used to generate a random UUID -func generateUUID() string { - buf := make([]byte, 16) - if _, err := rand.Read(buf); err != nil { - panic(fmt.Errorf("failed to read random bytes: %v", err)) - } - - return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", - buf[0:4], - buf[4:6], - buf[6:8], - buf[8:10], - buf[10:16]) -} - -func TestRadix(t *testing.T) { - var min, max string - inp := make(map[string]int) - for i := 0; i < 1000; i++ { - gen := generateUUID() - inp[gen] = i - if gen < min || i == 0 { - min = gen - } - if gen > max || i == 0 { - max = gen - } - } - - r := NewFromMap(inp) - if r.Len() != len(inp) { - t.Fatalf("bad length: %v %v", r.Len(), len(inp)) - } - - // Check min and max - outMin, _, _ := r.Minimum() - if string(outMin) != min { - t.Fatalf("bad minimum: %s %v", outMin, min) - } - outMax, _, _ := r.Maximum() - if string(outMax) != max { - t.Fatalf("bad maximum: %s %v", outMax, max) - } - - for k, v := range inp { - out, ok := r.Get([]byte(k)) - if !ok { - t.Fatalf("missing key: %v", k) - } - if out != v { - t.Fatalf("value mis-match: %v %v", out, v) - } - } - -} - -func TestDeletePrefix(t *testing.T) { - type exp struct { - inp []string - prefix string - out []string - numDeleted int - } - - cases := []exp{ - {[]string{"", "A", "AB", "ABC", "R", "S"}, "A", []string{"", "R", "S"}, 3}, - {[]string{"", "A", "AB", "ABC", "R", "S"}, "ABC", []string{"", "A", "AB", "R", "S"}, 1}, - {[]string{"", "A", "AB", "ABC", "R", "S"}, "", []string{}, 6}, - {[]string{"", "A", "AB", "ABC", "R", "S"}, "S", []string{"", "A", "AB", "ABC", "R"}, 1}, - {[]string{"", "A", "AB", "ABC", "R", "S"}, "SS", []string{"", "A", "AB", "ABC", "R", "S"}, 0}, - } - - for _, test := range cases { - r := New() - for _, ss := range test.inp { - r.Insert([]byte(ss), 1) - } - - deleted := r.DeletePrefix([]byte(test.prefix)) - if deleted != test.numDeleted { - t.Fatalf("Bad delete, expected %v to be deleted but got %v", test.numDeleted, deleted) - } - - out := []string{} - fn := func(s []byte, v int) bool { - out = append(out, string(s)) - return false - } - recursiveWalk(r.root, fn) - - if !reflect.DeepEqual(out, test.out) { - t.Fatalf("mis-match: %v %v", out, test.out) - } - } -} - -func TestInsert_Duplicate(t *testing.T) { - r := New() - vv, ok := r.Insert([]byte("cpu"), 1) - if vv != 1 { - t.Fatalf("value mismatch: got %v, exp %v", vv, 1) - } - - if !ok { - t.Fatalf("value mismatch: got %v, exp %v", ok, true) - } - - // Insert a dup with a different type should fail - vv, ok = r.Insert([]byte("cpu"), 2) - if vv != 1 { - t.Fatalf("value mismatch: got %v, exp %v", vv, 1) - } - - if ok { - t.Fatalf("value mismatch: got %v, exp %v", ok, false) - } -} - -// -// benchmarks -// - -func BenchmarkTree_Insert(b *testing.B) { - t := New() - - keys := make([][]byte, 0, 10000) - for i := 0; i < cap(keys); i++ { - k := []byte(fmt.Sprintf("cpu,host=%d", i)) - if v, ok := t.Insert(k, 1); v != 1 || !ok { - b.Fatalf("insert failed: %v != 1 || !%v", v, ok) - } - keys = append(keys, k) - } - - b.SetBytes(int64(len(keys))) - b.ReportAllocs() - b.ResetTimer() - - for j := 0; j < b.N; j++ { - for _, key := range keys { - if v, ok := t.Insert(key, 1); v != 1 || ok { - b.Fatalf("insert failed: %v != 1 || !%v", v, ok) - } - } - } -} - -func BenchmarkTree_InsertNew(b *testing.B) { - keys := make([][]byte, 0, 10000) - for i := 0; i < cap(keys); i++ { - k := []byte(fmt.Sprintf("cpu,host=%d", i)) - keys = append(keys, k) - } - - b.SetBytes(int64(len(keys))) - b.ReportAllocs() - b.ResetTimer() - - for j := 0; j < b.N; j++ { - t := New() - for _, key := range keys { - t.Insert(key, 1) - } - } -} diff --git a/pkg/reporthelper/walkshards.go b/pkg/reporthelper/walkshards.go deleted file mode 100644 index 3a745fce37e..00000000000 --- a/pkg/reporthelper/walkshards.go +++ /dev/null @@ -1,71 +0,0 @@ -// Package reporthelper reports statistics about TSM files. -package reporthelper - -import ( - "fmt" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" -) - -func IsShardDir(dir string) error { - name := filepath.Base(dir) - if id, err := strconv.Atoi(name); err != nil || id < 1 { - return fmt.Errorf("not a valid shard dir: %v", dir) - } - - return nil -} - -func WalkShardDirs(root string, fn func(db, rp, id, path string) error) error { - type location struct { - db, rp, id, path string - } - - var dirs []location - if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() { - return nil - } - - if filepath.Ext(info.Name()) == "."+tsm1.TSMFileExtension { - shardDir := filepath.Dir(path) - - if err := IsShardDir(shardDir); err != nil { - return err - } - absPath, err := filepath.Abs(path) - if err != nil { - return err - } - parts := strings.Split(absPath, string(filepath.Separator)) - db, rp, id := parts[len(parts)-4], parts[len(parts)-3], parts[len(parts)-2] - dirs = append(dirs, location{db: db, rp: rp, id: id, path: path}) - return nil - } - return nil - }); err != nil { - return err - } - - sort.Slice(dirs, func(i, j int) bool { - a, _ := strconv.Atoi(dirs[i].id) - b, _ := strconv.Atoi(dirs[j].id) - return a < b - }) - - for _, shard := range dirs { - if err := fn(shard.db, shard.rp, shard.id, shard.path); err != nil { - return err - } - } - return nil -} diff --git a/pkg/rhh/metrics.go b/pkg/rhh/metrics.go deleted file mode 100644 index 947743d9969..00000000000 --- a/pkg/rhh/metrics.go +++ /dev/null @@ -1,118 +0,0 @@ -package rhh - -import ( - "sort" - - "github.com/prometheus/client_golang/prometheus" -) - -type Metrics struct { - LoadFactor *prometheus.GaugeVec // Load factor of the hashmap. - Size *prometheus.GaugeVec // Number of items in hashmap. - GetDuration *prometheus.HistogramVec // Sample of get times. - LastGetDuration *prometheus.GaugeVec // Sample of most recent get time. - InsertDuration *prometheus.HistogramVec // Sample of insertion times. - LastInsertDuration *prometheus.GaugeVec // Sample of most recent insertion time. - LastGrowDuration *prometheus.GaugeVec // Most recent growth time. - MeanProbeCount *prometheus.GaugeVec // Average number of probes for each element. - - // These metrics have an extra label status = {"hit", "miss"} - Gets *prometheus.CounterVec // Number of times item retrieved. - Puts *prometheus.CounterVec // Number of times item inserted. -} - -// NewMetrics initialises prometheus metrics for tracking an RHH hashmap. -func NewMetrics(namespace, subsystem string, labels prometheus.Labels) *Metrics { - var names []string - for k := range labels { - names = append(names, k) - } - sort.Strings(names) - - getPutNames := append(append([]string(nil), names...), "status") - sort.Strings(getPutNames) - - return &Metrics{ - LoadFactor: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "load_percent", - Help: "Load factor of the hashmap.", - }, names), - Size: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "size", - Help: "Number of items in the hashmap.", - }, names), - GetDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "get_duration_ns", - Help: "Times taken to retrieve elements in nanoseconds (sampled every 10% of retrievals).", - // 15 buckets spaced exponentially between 100 and ~30,000. - Buckets: prometheus.ExponentialBuckets(100., 1.5, 15), - }, names), - LastGetDuration: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "get_duration_last_ns", - Help: "Last retrieval duration in nanoseconds (sampled every 10% of retrievals)", - }, names), - InsertDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "put_duration_ns", - Help: "Times taken to insert elements in nanoseconds (sampled every 10% of insertions).", - // 15 buckets spaced exponentially between 100 and ~30,000. - Buckets: prometheus.ExponentialBuckets(100., 1.5, 15), - }, names), - LastInsertDuration: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "put_duration_last_ns", - Help: "Last insertion duration in nanoseconds (sampled every 10% of insertions)", - }, names), - LastGrowDuration: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "grow_duration_s", - Help: "Time in seconds to last grow the hashmap.", - }, names), - MeanProbeCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "mean_probes", - Help: "Average probe count of all elements (sampled every 0.5% of insertions).", - }, names), - - Gets: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "get_total", - Help: "Number of times elements retrieved.", - }, getPutNames), - Puts: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "put_total", - Help: "Number of times elements inserted.", - }, getPutNames), - } -} - -// PrometheusCollectors satisfies the prom.PrometheusCollector interface. -func (m *Metrics) PrometheusCollectors() []prometheus.Collector { - return []prometheus.Collector{ - m.LoadFactor, - m.Size, - m.GetDuration, - m.LastGetDuration, - m.InsertDuration, - m.LastInsertDuration, - m.LastGrowDuration, - m.MeanProbeCount, - m.Gets, - m.Puts, - } -} diff --git a/pkg/rhh/metrics_test.go b/pkg/rhh/metrics_test.go deleted file mode 100644 index 000f3bdc2cf..00000000000 --- a/pkg/rhh/metrics_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package rhh - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/kit/prom/promtest" - "github.com/prometheus/client_golang/prometheus" -) - -func TestMetrics_Metrics(t *testing.T) { - // metrics to be shared by multiple file stores. - metrics := NewMetrics("test", "sub", prometheus.Labels{"engine_id": "", "node_id": ""}) - - t1 := newRHHTracker(metrics, prometheus.Labels{"engine_id": "0", "node_id": "0"}) - t2 := newRHHTracker(metrics, prometheus.Labels{"engine_id": "1", "node_id": "0"}) - - reg := prometheus.NewRegistry() - reg.MustRegister(metrics.PrometheusCollectors()...) - - base := "test_sub_" - - // All the metric names - gauges := []string{ - base + "load_percent", - base + "size", - base + "get_duration_last_ns", - base + "put_duration_last_ns", - base + "grow_duration_s", - base + "mean_probes", - } - - counters := []string{ - base + "get_total", - base + "put_total", - } - - histograms := []string{ - base + "get_duration_ns", - base + "put_duration_ns", - } - - // Generate some measurements. - for i, tracker := range []*rhhTracker{t1, t2} { - tracker.SetLoadFactor(float64(i + len(gauges[0]))) - tracker.SetSize(uint64(i + len(gauges[1]))) - - labels := tracker.Labels() - tracker.metrics.LastGetDuration.With(labels).Set(float64(i + len(gauges[2]))) - tracker.metrics.LastInsertDuration.With(labels).Set(float64(i + len(gauges[3]))) - tracker.metrics.LastGrowDuration.With(labels).Set(float64(i + len(gauges[4]))) - tracker.SetProbeCount(float64(i + len(gauges[5]))) - - labels = tracker.Labels() - labels["status"] = "ok" - tracker.metrics.Gets.With(labels).Add(float64(i + len(counters[0]))) - tracker.metrics.Puts.With(labels).Add(float64(i + len(counters[1]))) - - labels = tracker.Labels() - tracker.metrics.GetDuration.With(labels).Observe(float64(i + len(histograms[0]))) - tracker.metrics.InsertDuration.With(labels).Observe(float64(i + len(histograms[1]))) - } - - // Test that all the correct metrics are present. - mfs, err := reg.Gather() - if err != nil { - t.Fatal(err) - } - - // The label variants for the two caches. - labelVariants := []prometheus.Labels{ - {"engine_id": "0", "node_id": "0"}, - {"engine_id": "1", "node_id": "0"}, - } - - for i, labels := range labelVariants { - for _, name := range gauges { - exp := float64(i + len(name)) - metric := promtest.MustFindMetric(t, mfs, name, labels) - if got := metric.GetGauge().GetValue(); got != exp { - t.Errorf("[%s %d] got %v, expected %v", name, i, got, exp) - } - } - - for _, name := range counters { - exp := float64(i + len(name)) - - // Make a copy since we need to add a label - l := make(prometheus.Labels, len(labels)) - for k, v := range labels { - l[k] = v - } - l["status"] = "ok" - - metric := promtest.MustFindMetric(t, mfs, name, l) - if got := metric.GetCounter().GetValue(); got != exp { - t.Errorf("[%s %d] got %v, expected %v", name, i, got, exp) - } - } - - for _, name := range histograms { - exp := float64(i + len(name)) - metric := promtest.MustFindMetric(t, mfs, name, labels) - if got := metric.GetHistogram().GetSampleSum(); got != exp { - t.Errorf("[%s %d] got %v, expected %v", name, i, got, exp) - } - } - } -} diff --git a/pkg/rhh/rhh.go b/pkg/rhh/rhh.go deleted file mode 100644 index 2b50414c317..00000000000 --- a/pkg/rhh/rhh.go +++ /dev/null @@ -1,483 +0,0 @@ -package rhh - -import ( - "bytes" - "encoding/binary" - "math/rand" - "sort" - "time" - - "github.com/cespare/xxhash" - "github.com/prometheus/client_golang/prometheus" -) - -// HashMap represents a hash map that implements Robin Hood Hashing. -// https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf -type HashMap struct { - hashes []int64 - elems []hashElem - - n int64 - capacity int64 - threshold int64 - mask int64 - loadFactor int - - tmpKey []byte - - tracker *rhhTracker -} - -// NewHashMap initialises a new Hashmap with the provided options. -func NewHashMap(opt Options) *HashMap { - if opt.Metrics == nil { - opt.Metrics = NewMetrics("", "", nil) - } - - m := &HashMap{ - capacity: pow2(opt.Capacity), // Limited to 2^64. - loadFactor: opt.LoadFactor, - tracker: newRHHTracker(opt.Metrics, opt.Labels), - } - m.tracker.enabled = opt.MetricsEnabled - - m.alloc() - return m -} - -// Reset clears the values in the map without deallocating the space. -func (m *HashMap) Reset() { - for i := int64(0); i < m.capacity; i++ { - m.hashes[i] = 0 - m.elems[i].reset() - } - m.n = 0 - m.tracker.SetSize(0) -} - -func (m *HashMap) LoadFactor() int { return m.loadFactor } - -// Get returns the value for a key from the Hashmap, or nil if no key exists. -func (m *HashMap) Get(key []byte) interface{} { - var now time.Time - var sample bool - if rand.Float64() < 0.1 { - now = time.Now() - sample = true - } - - i := m.index(key) - - if sample { - m.tracker.ObserveGet(time.Since(now)) - } - - if i == -1 { - m.tracker.IncGetMiss() - return nil - } - m.tracker.IncGetHit() - return m.elems[i].value -} - -func (m *HashMap) put(key []byte, val interface{}, instrument bool) { - var now time.Time - var samplePut bool - - if instrument && rand.Float64() < 0.1 { - now = time.Now() - samplePut = true - } - - // Grow the map if we've run out of slots. - m.n++ - if m.n > m.threshold { - m.Grow(m.capacity * 2) - } - - // If the key was overwritten then decrement the size. - overwritten := m.insert(HashKey(key), key, val) - if instrument && samplePut { - m.tracker.ObservePut(time.Since(now)) - } - - if overwritten { - m.n-- - if instrument { - m.tracker.IncPutHit() - } - } else if instrument { - m.tracker.SetSize(uint64(m.n)) - m.tracker.SetLoadFactor(float64(m.n) / float64(m.capacity) * 100.0) - m.tracker.IncPutMiss() - } -} - -// Put stores the value at key in the Hashmap, overwriting an existing value if -// one exists. If the maximum load of the Hashmap is reached, the Hashmap will -// first resize itself. -func (m *HashMap) Put(key []byte, val interface{}) { - m.put(key, val, true) -} - -// PutQuiet is equivalent to Put, but no instrumentation code is executed. It can -// be faster when many keys are being inserted into the Hashmap. -func (m *HashMap) PutQuiet(key []byte, val interface{}) { - m.put(key, val, false) -} - -func (m *HashMap) insert(hash int64, key []byte, val interface{}) (overwritten bool) { - pos := hash & m.mask - var dist int64 - - var copied bool - searchKey := key - - // Continue searching until we find an empty slot or lower probe distance. - for { - e := &m.elems[pos] - - // Empty slot found or matching key, insert and exit. - match := bytes.Equal(m.elems[pos].key, searchKey) - if m.hashes[pos] == 0 || match { - m.hashes[pos] = hash - e.hash, e.value = hash, val - e.setKey(searchKey) - return match - } - - // If the existing elem has probed less than us, then swap places with - // existing elem, and keep going to find another slot for that elem. - elemDist := Dist(m.hashes[pos], pos, m.capacity) - if elemDist < dist { - // Swap with current position. - hash, m.hashes[pos] = m.hashes[pos], hash - val, e.value = e.value, val - - m.tmpKey = assign(m.tmpKey, e.key) - e.setKey(searchKey) - - if !copied { - searchKey = make([]byte, len(key)) - copy(searchKey, key) - copied = true - } - - searchKey = assign(searchKey, m.tmpKey) - - // Update current distance. - dist = elemDist - } - - // Increment position, wrap around on overflow. - pos = (pos + 1) & m.mask - dist++ - } -} - -// alloc elems according to currently set capacity. -func (m *HashMap) alloc() { - m.elems = make([]hashElem, m.capacity) - m.hashes = make([]int64, m.capacity) - m.threshold = (m.capacity * int64(m.loadFactor)) / 100 - m.mask = int64(m.capacity - 1) -} - -// Grow increases the capacity and reinserts all existing hashes & elements. -func (m *HashMap) Grow(sz int64) { - // Ensure new capacity is a power of two and greater than current capacity. - sz = pow2(sz) - if sz <= m.capacity { - return - } - - // Copy old elements and hashes. - elems, hashes := m.elems, m.hashes - capacity := m.capacity - - // Increase capacity & reallocate. - m.capacity = sz - m.alloc() - - // Copy old elements to new hash/elem list. - for i := int64(0); i < capacity; i++ { - elem, hash := &elems[i], hashes[i] - if hash == 0 { - continue - } - m.insert(hash, elem.key, elem.value) - } -} - -// index returns the position of key in the hash map. -func (m *HashMap) index(key []byte) int64 { - hash := HashKey(key) - pos := hash & m.mask - - var dist int64 - for { - if m.hashes[pos] == 0 { - return -1 - } else if dist > Dist(m.hashes[pos], pos, m.capacity) { - return -1 - } else if m.hashes[pos] == hash && bytes.Equal(m.elems[pos].key, key) { - return pos - } - - pos = (pos + 1) & m.mask - dist++ - } -} - -// Elem returns the i-th key/value pair of the hash map. -func (m *HashMap) Elem(i int64) (key []byte, value interface{}) { - if i >= int64(len(m.elems)) { - return nil, nil - } - - e := &m.elems[i] - return e.key, e.value -} - -// Len returns the number of key/values set in map. -func (m *HashMap) Len() int64 { return m.n } - -// Cap returns the number of key/values set in map. -func (m *HashMap) Cap() int64 { return m.capacity } - -// AverageProbeCount returns the average number of probes for each element. -func (m *HashMap) AverageProbeCount() float64 { - var sum float64 - for i := int64(0); i < m.capacity; i++ { - hash := m.hashes[i] - if hash == 0 { - continue - } - sum += float64(Dist(hash, i, m.capacity)) - } - return sum / (float64(m.n) + 1.0) -} - -// Keys returns a list of sorted keys. -func (m *HashMap) Keys() [][]byte { - a := make([][]byte, 0, m.Len()) - for i := int64(0); i < m.Cap(); i++ { - k, v := m.Elem(i) - if v == nil { - continue - } - a = append(a, k) - } - sort.Sort(byteSlices(a)) - return a -} - -// PrometheusCollectors returns the metrics associated with this hashmap. -func (m *HashMap) PrometheusCollectors() []prometheus.Collector { - return m.tracker.metrics.PrometheusCollectors() -} - -type rhhTracker struct { - metrics *Metrics - enabled bool - baseLabels prometheus.Labels - - // Prevent allocations by initialising these static maps when creating a - // new tracker. - hitIncLabels prometheus.Labels - missIncLabels prometheus.Labels -} - -// Labels returns a copy of the default labels used by the tracker's metrics. -// The returned map is safe for modification. -func (t *rhhTracker) Labels() prometheus.Labels { - labels := make(prometheus.Labels, len(t.baseLabels)) - for k, v := range t.baseLabels { - labels[k] = v - } - return labels -} - -func newRHHTracker(metrics *Metrics, defaultLabels prometheus.Labels) *rhhTracker { - tracker := &rhhTracker{metrics: metrics, enabled: true} - - // Create a copy of the provided labels. - tracker.baseLabels = make(prometheus.Labels, len(defaultLabels)) - for k, v := range defaultLabels { - tracker.baseLabels[k] = v - } - - tracker.hitIncLabels = tracker.Labels() - tracker.hitIncLabels["status"] = "hit" - tracker.missIncLabels = tracker.Labels() - tracker.missIncLabels["status"] = "miss" - - return tracker -} - -func (t *rhhTracker) SetLoadFactor(load float64) { - if !t.enabled { - return - } - - t.metrics.LoadFactor.With(t.baseLabels).Set(load) -} - -func (t *rhhTracker) SetSize(sz uint64) { - if !t.enabled { - return - } - - t.metrics.Size.With(t.baseLabels).Set(float64(sz)) -} - -func (t *rhhTracker) ObserveGet(d time.Duration) { - if !t.enabled { - return - } - - t.metrics.GetDuration.With(t.baseLabels).Observe(float64(d.Nanoseconds())) - t.metrics.LastGetDuration.With(t.baseLabels).Set(float64(d.Nanoseconds())) -} - -func (t *rhhTracker) ObservePut(d time.Duration) { - if !t.enabled { - return - } - - t.metrics.InsertDuration.With(t.baseLabels).Observe(float64(d.Nanoseconds())) - t.metrics.LastInsertDuration.With(t.baseLabels).Set(float64(d.Nanoseconds())) -} - -func (t *rhhTracker) SetGrowDuration(d time.Duration) { - if !t.enabled { - return - } - - t.metrics.LastGrowDuration.With(t.baseLabels).Set(d.Seconds()) -} - -// TODO(edd): currently no safe way to calculate this concurrently. -func (t *rhhTracker) SetProbeCount(length float64) { - if !t.enabled { - return - } - - t.metrics.MeanProbeCount.With(t.baseLabels).Set(length) -} - -func (t *rhhTracker) incGet(status string) { - if !t.enabled { - return - } - - labels := t.hitIncLabels - if status == "miss" { - labels = t.missIncLabels - } - t.metrics.Gets.With(labels).Inc() -} - -func (t *rhhTracker) IncGetHit() { t.incGet("hit") } -func (t *rhhTracker) IncGetMiss() { t.incGet("miss") } - -func (t *rhhTracker) incPut(status string) { - if !t.enabled { - return - } - - labels := t.hitIncLabels - if status == "miss" { - labels = t.missIncLabels - } - t.metrics.Puts.With(labels).Inc() -} - -func (t *rhhTracker) IncPutHit() { t.incPut("hit") } -func (t *rhhTracker) IncPutMiss() { t.incPut("miss") } - -type hashElem struct { - key []byte - value interface{} - hash int64 -} - -// reset clears the values in the element. -func (e *hashElem) reset() { - e.key = e.key[:0] - e.value = nil - e.hash = 0 -} - -// setKey copies v to a key on e. -func (e *hashElem) setKey(v []byte) { - e.key = assign(e.key, v) -} - -// Options represents initialization options that are passed to NewHashMap(). -type Options struct { - Capacity int64 - LoadFactor int - MetricsEnabled bool - Metrics *Metrics - Labels prometheus.Labels -} - -// DefaultOptions represents a default set of options to pass to NewHashMap(). -var DefaultOptions = Options{ - Capacity: 256, - LoadFactor: 90, - MetricsEnabled: true, -} - -// HashKey computes a hash of key. Hash is always non-zero. -func HashKey(key []byte) int64 { - h := int64(xxhash.Sum64(key)) - if h == 0 { - h = 1 - } else if h < 0 { - h = 0 - h - } - return h -} - -// HashUint64 computes a hash of an int64. Hash is always non-zero. -func HashUint64(key uint64) int64 { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, key) - return HashKey(buf) -} - -// Dist returns the probe distance for a hash in a slot index. -// NOTE: Capacity must be a power of 2. -func Dist(hash, i, capacity int64) int64 { - mask := capacity - 1 - dist := (i + capacity - (hash & mask)) & mask - return dist -} - -// pow2 returns the number that is the next highest power of 2. -// Returns v if it is a power of 2. -func pow2(v int64) int64 { - for i := int64(2); i < 1<<62; i *= 2 { - if i >= v { - return i - } - } - panic("unreachable") -} - -func assign(x, v []byte) []byte { - if cap(x) < len(v) { - x = make([]byte, len(v)) - } - x = x[:len(v)] - copy(x, v) - return x -} - -type byteSlices [][]byte - -func (a byteSlices) Len() int { return len(a) } -func (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 } -func (a byteSlices) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/pkg/rhh/rhh_test.go b/pkg/rhh/rhh_test.go deleted file mode 100644 index 0f58d58e8d2..00000000000 --- a/pkg/rhh/rhh_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package rhh_test - -import ( - "bytes" - "math/rand" - "reflect" - "testing" - "testing/quick" - - "github.com/influxdata/influxdb/v2/pkg/rhh" -) - -// Ensure hash map can perform basic get/put operations. -func TestHashMap(t *testing.T) { - m := rhh.NewHashMap(rhh.DefaultOptions) - m.Put([]byte("foo"), []byte("bar")) - m.Put([]byte("baz"), []byte("bat")) - - // Verify values can be retrieved. - if v := m.Get([]byte("foo")); !bytes.Equal(v.([]byte), []byte("bar")) { - t.Fatalf("unexpected value: %s", v) - } - if v := m.Get([]byte("baz")); !bytes.Equal(v.([]byte), []byte("bat")) { - t.Fatalf("unexpected value: %s", v) - } - - // Overwrite field & verify. - m.Put([]byte("foo"), []byte("XXX")) - if v := m.Get([]byte("foo")); !bytes.Equal(v.([]byte), []byte("XXX")) { - t.Fatalf("unexpected value: %s", v) - } -} - -// Ensure hash map can insert random data. -func TestHashMap_Quick(t *testing.T) { - if testing.Short() { - t.Skip("short mode, skipping") - } - - if err := quick.Check(func(keys, values [][]byte) bool { - m := rhh.NewHashMap(rhh.Options{Capacity: 1000, LoadFactor: 90}) - h := make(map[string][]byte) - - // Insert all key/values into both maps. - for i := range keys { - key, value := keys[i], values[i] - h[string(key)] = value - m.Put(key, value) - } - - // Verify the maps are equal. - for k, v := range h { - if mv := m.Get([]byte(k)); !bytes.Equal(mv.([]byte), v) { - t.Fatalf("value mismatch:\nkey=%x\ngot=%x\nexp=%x\n\n", []byte(k), mv, v) - } - } - - return true - }, &quick.Config{ - Values: func(values []reflect.Value, rand *rand.Rand) { - n := rand.Intn(10000) - values[0] = GenerateByteSlices(rand, n) - values[1] = GenerateByteSlices(rand, n) - }, - }); err != nil { - t.Fatal(err) - } -} - -// GenerateByteSlices returns a random list of byte slices. -func GenerateByteSlices(rand *rand.Rand, n int) reflect.Value { - var a [][]byte - for i := 0; i < n; i++ { - v, _ := quick.Value(reflect.TypeOf(([]byte)(nil)), rand) - a = append(a, v.Interface().([]byte)) - } - return reflect.ValueOf(a) -} diff --git a/pkg/slices/bytes.go b/pkg/slices/bytes.go deleted file mode 100644 index 192ab27f26a..00000000000 --- a/pkg/slices/bytes.go +++ /dev/null @@ -1,66 +0,0 @@ -package slices - -import ( - "bytes" -) - -// BytesToStrings converts a slice of []byte into a slice of strings. -func BytesToStrings(a [][]byte) []string { - s := make([]string, 0, len(a)) - for _, v := range a { - s = append(s, string(v)) - } - return s -} - -// CopyChunkedByteSlices deep-copies a [][]byte to a new [][]byte that is backed by a small number of []byte "chunks". -func CopyChunkedByteSlices(src [][]byte, chunkSize int) [][]byte { - dst := make([][]byte, len(src)) - - for chunkBegin := 0; chunkBegin < len(src); chunkBegin += chunkSize { - chunkEnd := len(src) - if chunkEnd-chunkBegin > chunkSize { - chunkEnd = chunkBegin + chunkSize - } - - chunkByteSize := 0 - for j := chunkBegin; j < chunkEnd; j++ { - chunkByteSize += len(src[j]) - } - - chunk := make([]byte, chunkByteSize) - offset := 0 - for j := chunkBegin; j < chunkEnd; j++ { - copy(chunk[offset:offset+len(src[j])], src[j]) - dst[j] = chunk[offset : offset+len(src[j]) : offset+len(src[j])] - offset += len(src[j]) - } - } - - return dst -} - -// CompareSlice returns an integer comparing two slices of byte slices -// lexicographically. -// The result will be 0 if a==b, -1 if a < b, and +1 if a > b. -func CompareSlice(a, b [][]byte) int { - i := 0 - for i < len(a) && i < len(b) { - if v := bytes.Compare(a[i], b[i]); v == 0 { - i++ - continue - } else { - return v - } - } - - if i < len(b) { - // b is longer, so assume a is less - return -1 - } else if i < len(a) { - // a is longer, so assume b is less - return 1 - } else { - return 0 - } -} diff --git a/pkg/slices/bytes_test.go b/pkg/slices/bytes_test.go deleted file mode 100644 index 866e03c30a9..00000000000 --- a/pkg/slices/bytes_test.go +++ /dev/null @@ -1,204 +0,0 @@ -package slices - -import ( - "bytes" - "fmt" - "math" - "reflect" - "testing" - "unsafe" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/pkg/bytesutil" -) - -func TestCopyChunkedByteSlices_oneChunk(t *testing.T) { - src := [][]byte{ - []byte("influx"), - []byte("data"), - } - - dst := CopyChunkedByteSlices(src, 3) - if !reflect.DeepEqual(src, dst) { - t.Errorf("destination should match source src: %v dst: %v", src, dst) - } - - dst[0][1] = 'z' - if reflect.DeepEqual(src, dst) { - t.Error("destination should not match source") - } -} - -func TestCopyChunkedByteSlices_multipleChunks(t *testing.T) { - src := [][]byte{ - []byte("influx"), - []byte("data"), - []byte("is"), - []byte("the"), - []byte("best"), - []byte("time"), - []byte("series"), - []byte("database"), - []byte("in"), - []byte("the"), - []byte("whole"), - []byte("wide"), - []byte("world"), - []byte(":-)"), - } - - chunkSize := 4 - dst := CopyChunkedByteSlices(src, chunkSize) - if !reflect.DeepEqual(src, dst) { - t.Errorf("destination should match source src: %v dst: %v", src, dst) - } - - for i := 0; i < int(math.Ceil(float64(len(src))/float64(chunkSize))); i++ { - thisChunkSize := chunkSize - if len(src)-thisChunkSize*i < thisChunkSize { - thisChunkSize = len(src) - thisChunkSize*i - } - - chunk := dst[i*thisChunkSize : (i+1)*thisChunkSize] - - for j := 0; j < thisChunkSize-1; j++ { - a := (*reflect.SliceHeader)(unsafe.Pointer(&chunk[j])) - b := (*reflect.SliceHeader)(unsafe.Pointer(&chunk[j+1])) - if b.Data-a.Data != uintptr(a.Len) { - t.Error("chunk elements do not appear to be adjacent, so not part of one chunk") - } - if a.Cap != a.Len { - t.Errorf("slice length != capacity; %d vs %d", a.Len, a.Cap) - } - if b.Cap != b.Len { - t.Errorf("slice length != capacity; %d vs %d", b.Len, b.Cap) - } - } - } - - dst[0][5] = 'z' - if reflect.DeepEqual(src, dst) { - t.Error("destination should not match source") - } -} - -const NIL = "" - -// ss returns a sorted slice of byte slices. -func ss(s ...string) [][]byte { - r := make([][]byte, len(s)) - for i := range s { - if s[i] != NIL { - r[i] = []byte(s[i]) - } - } - bytesutil.Sort(r) - return r -} - -func TestCompareSlice(t *testing.T) { - name := func(a, b [][]byte, exp int) string { - var as string - if a != nil { - as = string(bytes.Join(a, nil)) - } else { - as = NIL - } - var bs string - if b != nil { - bs = string(bytes.Join(b, nil)) - } else { - bs = NIL - } - return fmt.Sprintf("%s <=> %s is %d", as, bs, exp) - } - tests := []struct { - a, b [][]byte - exp int - }{ - { - a: ss("aaa", "bbb", "ccc"), - b: ss("aaa", "bbb", "ccc"), - exp: 0, - }, - - { - a: ss("aaa", "bbb", "ccc", "ddd"), - b: ss("aaa", "bbb", "ccc"), - exp: 1, - }, - - { - a: ss("aaa", "bbb"), - b: ss("aaa", "bbb", "ccc"), - exp: -1, - }, - - { - a: ss("aaa", "bbbb"), - b: ss("aaa", "bbb", "ccc"), - exp: 1, - }, - - { - a: ss("aaa", "ccc"), - b: ss("aaa", "bbb", "ccc"), - exp: 1, - }, - - { - a: ss("aaa", "bbb", NIL), - b: ss("aaa", "bbb", "ccc"), - exp: -1, - }, - - { - a: ss("aaa", NIL, "ccc"), - b: ss("aaa", NIL, "ccc"), - exp: 0, - }, - - { - a: ss(NIL, "bbb", "ccc"), - b: ss("aaa", "bbb", "ccc"), - exp: -1, - }, - - { - a: ss("aaa", "aaa"), - b: ss("aaa", "bbb", "ccc"), - exp: -1, - }, - - { - a: nil, - b: ss("aaa", "bbb", "ccc"), - exp: -1, - }, - - { - a: ss("aaa", "bbb"), - b: nil, - exp: 1, - }, - - { - a: nil, - b: nil, - exp: 0, - }, - - { - a: [][]byte{}, - b: nil, - exp: 0, - }, - } - for _, test := range tests { - t.Run(name(test.a, test.b, test.exp), func(t *testing.T) { - if got := CompareSlice(test.a, test.b); got != test.exp { - t.Errorf("unexpected result, -got/+exp\n%s", cmp.Diff(got, test.exp)) - } - }) - } -} diff --git a/pkg/slices/merge.gen.go b/pkg/slices/merge.gen.go deleted file mode 100644 index 304c2de8d9d..00000000000 --- a/pkg/slices/merge.gen.go +++ /dev/null @@ -1,398 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: merge.gen.go.tmpl - -package slices - -import "bytes" - -// Merge uses a k-way merge to merge n collections of sorted byte slices. -// -// The resulting slice is returned in ascending order, with any duplicate values -// removed. -func MergeSortedFloats(n ...[]float64) []float64 { - var result []float64 - if len(n) == 0 { - return nil - } else if len(n) == 1 { - // Special case. Merge single slice with a nil slice, to remove any - // duplicates from the single slice. - return MergeSortedFloats(n[0], nil) - } - - var maxSize int - for _, a := range n { - if len(a) > maxSize { - maxSize = len(a) - } - } - result = make([]float64, 0, maxSize) // This will likely be too small but it's a start. - - idxs := make([]int, len(n)) // Indexes we've processed. - var j int // Index we currently think is minimum. - - for { - j = -1 - - // Find the smallest minimum in all slices. - for i := 0; i < len(n); i++ { - if idxs[i] >= len(n[i]) { - continue // We have completely drained all values in this slice. - } else if j == -1 { - // We haven't picked the minimum value yet. Pick this one. - j = i - continue - } - - // It this value key is lower than the candidate. - - if n[i][idxs[i]] < n[j][idxs[j]] { - j = i - } else if n[i][idxs[i]] == n[j][idxs[j]] { - // Duplicate value. Throw it away. - idxs[i]++ - } - - } - - // We could have drained all of the values and be done... - if j == -1 { - break - } - - // First value to just append it and move on. - if len(result) == 0 { - result = append(result, n[j][idxs[j]]) - idxs[j]++ - continue - } - - // Append the minimum value to results if it's not a duplicate of - // the existing one. - - if result[len(result)-1] < n[j][idxs[j]] { - result = append(result, n[j][idxs[j]]) - } else if result[len(result)-1] == n[j][idxs[j]] { - // Duplicate so drop it. - } else { - panic("value being merged out of order.") - } - - idxs[j]++ - } - return result -} - -// Merge uses a k-way merge to merge n collections of sorted byte slices. -// -// The resulting slice is returned in ascending order, with any duplicate values -// removed. -func MergeSortedInts(n ...[]int64) []int64 { - var result []int64 - if len(n) == 0 { - return nil - } else if len(n) == 1 { - // Special case. Merge single slice with a nil slice, to remove any - // duplicates from the single slice. - return MergeSortedInts(n[0], nil) - } - - var maxSize int - for _, a := range n { - if len(a) > maxSize { - maxSize = len(a) - } - } - result = make([]int64, 0, maxSize) // This will likely be too small but it's a start. - - idxs := make([]int, len(n)) // Indexes we've processed. - var j int // Index we currently think is minimum. - - for { - j = -1 - - // Find the smallest minimum in all slices. - for i := 0; i < len(n); i++ { - if idxs[i] >= len(n[i]) { - continue // We have completely drained all values in this slice. - } else if j == -1 { - // We haven't picked the minimum value yet. Pick this one. - j = i - continue - } - - // It this value key is lower than the candidate. - - if n[i][idxs[i]] < n[j][idxs[j]] { - j = i - } else if n[i][idxs[i]] == n[j][idxs[j]] { - // Duplicate value. Throw it away. - idxs[i]++ - } - - } - - // We could have drained all of the values and be done... - if j == -1 { - break - } - - // First value to just append it and move on. - if len(result) == 0 { - result = append(result, n[j][idxs[j]]) - idxs[j]++ - continue - } - - // Append the minimum value to results if it's not a duplicate of - // the existing one. - - if result[len(result)-1] < n[j][idxs[j]] { - result = append(result, n[j][idxs[j]]) - } else if result[len(result)-1] == n[j][idxs[j]] { - // Duplicate so drop it. - } else { - panic("value being merged out of order.") - } - - idxs[j]++ - } - return result -} - -// Merge uses a k-way merge to merge n collections of sorted byte slices. -// -// The resulting slice is returned in ascending order, with any duplicate values -// removed. -func MergeSortedUInts(n ...[]uint64) []uint64 { - var result []uint64 - if len(n) == 0 { - return nil - } else if len(n) == 1 { - // Special case. Merge single slice with a nil slice, to remove any - // duplicates from the single slice. - return MergeSortedUInts(n[0], nil) - } - - var maxSize int - for _, a := range n { - if len(a) > maxSize { - maxSize = len(a) - } - } - result = make([]uint64, 0, maxSize) // This will likely be too small but it's a start. - - idxs := make([]int, len(n)) // Indexes we've processed. - var j int // Index we currently think is minimum. - - for { - j = -1 - - // Find the smallest minimum in all slices. - for i := 0; i < len(n); i++ { - if idxs[i] >= len(n[i]) { - continue // We have completely drained all values in this slice. - } else if j == -1 { - // We haven't picked the minimum value yet. Pick this one. - j = i - continue - } - - // It this value key is lower than the candidate. - - if n[i][idxs[i]] < n[j][idxs[j]] { - j = i - } else if n[i][idxs[i]] == n[j][idxs[j]] { - // Duplicate value. Throw it away. - idxs[i]++ - } - - } - - // We could have drained all of the values and be done... - if j == -1 { - break - } - - // First value to just append it and move on. - if len(result) == 0 { - result = append(result, n[j][idxs[j]]) - idxs[j]++ - continue - } - - // Append the minimum value to results if it's not a duplicate of - // the existing one. - - if result[len(result)-1] < n[j][idxs[j]] { - result = append(result, n[j][idxs[j]]) - } else if result[len(result)-1] == n[j][idxs[j]] { - // Duplicate so drop it. - } else { - panic("value being merged out of order.") - } - - idxs[j]++ - } - return result -} - -// Merge uses a k-way merge to merge n collections of sorted byte slices. -// -// The resulting slice is returned in ascending order, with any duplicate values -// removed. -func MergeSortedStrings(n ...[]string) []string { - var result []string - if len(n) == 0 { - return nil - } else if len(n) == 1 { - // Special case. Merge single slice with a nil slice, to remove any - // duplicates from the single slice. - return MergeSortedStrings(n[0], nil) - } - - var maxSize int - for _, a := range n { - if len(a) > maxSize { - maxSize = len(a) - } - } - result = make([]string, 0, maxSize) // This will likely be too small but it's a start. - - idxs := make([]int, len(n)) // Indexes we've processed. - var j int // Index we currently think is minimum. - - for { - j = -1 - - // Find the smallest minimum in all slices. - for i := 0; i < len(n); i++ { - if idxs[i] >= len(n[i]) { - continue // We have completely drained all values in this slice. - } else if j == -1 { - // We haven't picked the minimum value yet. Pick this one. - j = i - continue - } - - // It this value key is lower than the candidate. - - if n[i][idxs[i]] < n[j][idxs[j]] { - j = i - } else if n[i][idxs[i]] == n[j][idxs[j]] { - // Duplicate value. Throw it away. - idxs[i]++ - } - - } - - // We could have drained all of the values and be done... - if j == -1 { - break - } - - // First value to just append it and move on. - if len(result) == 0 { - result = append(result, n[j][idxs[j]]) - idxs[j]++ - continue - } - - // Append the minimum value to results if it's not a duplicate of - // the existing one. - - if result[len(result)-1] < n[j][idxs[j]] { - result = append(result, n[j][idxs[j]]) - } else if result[len(result)-1] == n[j][idxs[j]] { - // Duplicate so drop it. - } else { - panic("value being merged out of order.") - } - - idxs[j]++ - } - return result -} - -// Merge uses a k-way merge to merge n collections of sorted byte slices. -// -// The resulting slice is returned in ascending order, with any duplicate values -// removed. -func MergeSortedBytes(n ...[][]byte) [][]byte { - var result [][]byte - if len(n) == 0 { - return nil - } else if len(n) == 1 { - // Special case. Merge single slice with a nil slice, to remove any - // duplicates from the single slice. - return MergeSortedBytes(n[0], nil) - } - - var maxSize int - for _, a := range n { - if len(a) > maxSize { - maxSize = len(a) - } - } - result = make([][]byte, 0, maxSize) // This will likely be too small but it's a start. - - idxs := make([]int, len(n)) // Indexes we've processed. - var j int // Index we currently think is minimum. - - var cmp int // Result of comparing most recent value. - - for { - j = -1 - - // Find the smallest minimum in all slices. - for i := 0; i < len(n); i++ { - if idxs[i] >= len(n[i]) { - continue // We have completely drained all values in this slice. - } else if j == -1 { - // We haven't picked the minimum value yet. Pick this one. - j = i - continue - } - - // It this value key is lower than the candidate. - - cmp = bytes.Compare(n[i][idxs[i]], n[j][idxs[j]]) - if cmp == -1 { - j = i - } else if cmp == 0 { - // Duplicate value. Throw it away. - idxs[i]++ - } - - } - - // We could have drained all of the values and be done... - if j == -1 { - break - } - - // First value to just append it and move on. - if len(result) == 0 { - result = append(result, n[j][idxs[j]]) - idxs[j]++ - continue - } - - // Append the minimum value to results if it's not a duplicate of - // the existing one. - - cmp = bytes.Compare(result[len(result)-1], n[j][idxs[j]]) - if cmp == -1 { - result = append(result, n[j][idxs[j]]) - } else if cmp == 0 { - // Duplicate so drop it. - } else { - panic("value being merged out of order.") - } - - idxs[j]++ - } - return result -} diff --git a/pkg/slices/merge.gen.go.tmpl b/pkg/slices/merge.gen.go.tmpl deleted file mode 100644 index 8e40a656d6f..00000000000 --- a/pkg/slices/merge.gen.go.tmpl +++ /dev/null @@ -1,104 +0,0 @@ -package slices - -import "bytes" - -{{with $types := .}}{{range $k := $types}} - -// Merge uses a k-way merge to merge n collections of sorted byte slices. -// -// The resulting slice is returned in ascending order, with any duplicate values -// removed. -func MergeSorted{{$k.Name}}(n ...[]{{$k.Type}}) []{{$k.Type}} { - var result []{{$k.Type}} - if len(n) == 0 { - return nil - } else if len(n) == 1 { - // Special case. Merge single slice with a nil slice, to remove any - // duplicates from the single slice. - return MergeSorted{{$k.Name}}(n[0], nil) - } - - var maxSize int - for _, a := range n { - if len(a) > maxSize { - maxSize = len(a) - } - } - result = make([]{{$k.Type}}, 0, maxSize) // This will likely be too small but it's a start. - - idxs := make([]int, len(n)) // Indexes we've processed. - var j int // Index we currently think is minimum. -{{if eq $k.Name "Bytes" }} - var cmp int // Result of comparing most recent value. -{{end}} - for { - j = -1 - - // Find the smallest minimum in all slices. - for i := 0; i < len(n); i++ { - if idxs[i] >= len(n[i]) { - continue // We have completely drained all values in this slice. - } else if j == -1 { - // We haven't picked the minimum value yet. Pick this one. - j = i - continue - } - - // It this value key is lower than the candidate. -{{if eq $k.Name "Bytes" }} - cmp = bytes.Compare(n[i][idxs[i]], n[j][idxs[j]]) - if cmp == -1 { - j = i - } else if cmp == 0 { - // Duplicate value. Throw it away. - idxs[i]++ - } -{{else}} - if n[i][idxs[i]] < n[j][idxs[j]] { - j = i - } else if n[i][idxs[i]] == n[j][idxs[j]] { - // Duplicate value. Throw it away. - idxs[i]++ - } -{{end}} - } - - // We could have drained all of the values and be done... - if j == -1 { - break - } - - // First value to just append it and move on. - if len(result) == 0 { - result = append(result, n[j][idxs[j]]) - idxs[j]++ - continue - } - - // Append the minimum value to results if it's not a duplicate of - // the existing one. -{{if eq $k.Name "Bytes" }} - cmp = bytes.Compare(result[len(result)-1], n[j][idxs[j]]) - if cmp == -1 { - result = append(result, n[j][idxs[j]]) - } else if cmp == 0 { - // Duplicate so drop it. - } else { - panic("value being merged out of order.") - } -{{else}} - if result[len(result)-1] < n[j][idxs[j]] { - result = append(result, n[j][idxs[j]]) - } else if result[len(result)-1] == n[j][idxs[j]] { - // Duplicate so drop it. - } else { - panic("value being merged out of order.") - } -{{end}} - idxs[j]++ - } - return result -} - - -{{end}}{{end}} \ No newline at end of file diff --git a/pkg/slices/merge_test.go b/pkg/slices/merge_test.go deleted file mode 100644 index 439c9fbf691..00000000000 --- a/pkg/slices/merge_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package slices_test - -import ( - "fmt" - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2/pkg/slices" -) - -func TestMergeSortedBytes(t *testing.T) { - cases := []struct { - Inputs [][][]byte - Out [][]byte - }{ - {Inputs: [][][]byte{}}, - {Inputs: [][][]byte{toBytes(0)}, Out: toBytes(0)}, - { - Inputs: [][][]byte{toBytes(2), [][]byte(nil), toBytes(2)}, - Out: toBytes(2), - }, - { - Inputs: [][][]byte{toBytes(9), toBytes(1, 16, 16), toBytes(5, 10)}, - Out: toBytes(1, 5, 9, 10, 16), - }, - { - Inputs: [][][]byte{toBytes(20), toBytes(16), toBytes(10)}, - Out: toBytes(10, 16, 20), - }, - { - Inputs: [][][]byte{toBytes(2, 2, 2, 2, 2, 2, 2, 2)}, - Out: toBytes(2), - }, - { - Inputs: [][][]byte{toBytes(2, 2, 2, 2, 2, 2, 2, 2), [][]byte(nil), [][]byte(nil), [][]byte(nil)}, - Out: toBytes(2), - }, - { - Inputs: [][][]byte{toBytes(1, 2, 3, 4, 5), toBytes(1, 2, 3, 4, 5), toBytes(1, 2, 3, 4, 5)}, - Out: toBytes(1, 2, 3, 4, 5), - }, - } - - for i, c := range cases { - t.Run(fmt.Sprintf("Example %d", i+1), func(t *testing.T) { - if got, exp := slices.MergeSortedBytes(c.Inputs...), c.Out; !reflect.DeepEqual(got, exp) { - t.Fatalf("got %v, expected %v", got, exp) - } - }) - } -} - -func toBytes(a ...int) [][]byte { - var result [][]byte - for _, v := range a { - result = append(result, []byte{byte(v)}) - } - return result -} - -func TestMergeSortedInts(t *testing.T) { - cases := []struct { - Inputs [][]int64 - Out []int64 - }{ - {Inputs: [][]int64{}}, - {Inputs: [][]int64{[]int64{0}}, Out: []int64{0}}, - { - Inputs: [][]int64{[]int64{2}, []int64(nil), []int64{2}}, - Out: []int64{2}, - }, - { - Inputs: [][]int64{[]int64{9}, []int64{1, 16, 16}, []int64{5, 10}}, - Out: []int64{1, 5, 9, 10, 16}, - }, - { - Inputs: [][]int64{[]int64{20}, []int64{16}, []int64{10}}, - Out: []int64{10, 16, 20}, - }, - { - Inputs: [][]int64{[]int64{2, 2, 2, 2, 2, 2, 2, 2}}, - Out: []int64{2}, - }, - { - Inputs: [][]int64{[]int64{2, 2, 2, 2, 2, 2, 2, 2}, []int64(nil), []int64(nil), []int64(nil)}, - Out: []int64{2}, - }, - { - Inputs: [][]int64{[]int64{1, 2, 3, 4, 5}, []int64{1, 2, 3, 4, 5}, []int64{1, 2, 3, 4, 5}}, - Out: []int64{1, 2, 3, 4, 5}, - }, - } - - for i, c := range cases { - t.Run(fmt.Sprintf("Example %d", i+1), func(t *testing.T) { - if got, exp := slices.MergeSortedInts(c.Inputs...), c.Out; !reflect.DeepEqual(got, exp) { - t.Fatalf("got %v, expected %v", got, exp) - } - }) - } -} diff --git a/pkg/slices/strings.go b/pkg/slices/strings.go deleted file mode 100644 index 8a9fb1a8531..00000000000 --- a/pkg/slices/strings.go +++ /dev/null @@ -1,50 +0,0 @@ -// Package slices contains functions to operate on slices treated as sets. -package slices // import "github.com/influxdata/influxdb/v2/pkg/slices" - -import "strings" - -// Union combines two string sets. -func Union(setA, setB []string, ignoreCase bool) []string { - for _, b := range setB { - if ignoreCase { - if !ExistsIgnoreCase(setA, b) { - setA = append(setA, b) - } - continue - } - if !Exists(setA, b) { - setA = append(setA, b) - } - } - return setA -} - -// Exists checks if a string is in a set. -func Exists(set []string, find string) bool { - for _, s := range set { - if s == find { - return true - } - } - return false -} - -// ExistsIgnoreCase checks if a string is in a set but ignores its case. -func ExistsIgnoreCase(set []string, find string) bool { - find = strings.ToLower(find) - for _, s := range set { - if strings.ToLower(s) == find { - return true - } - } - return false -} - -// StringsToBytes converts a variable number of strings into a slice of []byte. -func StringsToBytes(s ...string) [][]byte { - a := make([][]byte, 0, len(s)) - for _, v := range s { - a = append(a, []byte(v)) - } - return a -} diff --git a/pkg/slices/strings_test.go b/pkg/slices/strings_test.go deleted file mode 100644 index 42d8153a657..00000000000 --- a/pkg/slices/strings_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package slices - -import "testing" - -func TestExists(t *testing.T) { - tests := []struct { - set []string - find string - output bool - }{ - { - set: []string{}, - find: "foo", - output: false, - }, - { - set: []string{"foo"}, - find: "foo", - output: true, - }, - { - set: []string{"bar", "foo"}, - find: "foo", - output: true, - }, - { - set: []string{"bar", "foo"}, - find: "stuff", - output: false, - }, - { - set: []string{"bar", "Foo"}, - find: "foo", - output: false, - }, - } - for i, tt := range tests { - actual := Exists(tt.set, tt.find) - if actual != tt.output { - t.Errorf("[%d] set: %v , find: %s , expected: %t , actual: %t", i, tt.set, tt.find, tt.output, actual) - } - } -} - -func TestExistsIgnoreCase(t *testing.T) { - tests := []struct { - set []string - find string - output bool - }{ - { - set: []string{}, - find: "foo", - output: false, - }, - { - set: []string{"foo"}, - find: "foo", - output: true, - }, - { - set: []string{"bar", "foo"}, - find: "foo", - output: true, - }, - { - set: []string{"bar", "foo"}, - find: "stuff", - output: false, - }, - { - set: []string{"bar", "Foo"}, - find: "foo", - output: true, - }, - } - for i, tt := range tests { - actual := ExistsIgnoreCase(tt.set, tt.find) - if actual != tt.output { - t.Errorf("[%d] set: %v , find: %s , expected: %t , actual: %t", i, tt.set, tt.find, tt.output, actual) - } - } -} diff --git a/pkg/slices/tmpldata b/pkg/slices/tmpldata deleted file mode 100644 index f4786858a55..00000000000 --- a/pkg/slices/tmpldata +++ /dev/null @@ -1,22 +0,0 @@ -[ - { - "Name":"Floats", - "Type":"float64" - }, - { - "Name":"Ints", - "Type":"int64" - }, - { - "Name":"UInts", - "Type":"uint64" - }, - { - "Name":"Strings", - "Type":"string" - }, - { - "Name":"Bytes", - "Type":"[]byte" - } -] diff --git a/pkg/snowflake/README.md b/pkg/snowflake/README.md deleted file mode 100644 index 92166b2361b..00000000000 --- a/pkg/snowflake/README.md +++ /dev/null @@ -1,38 +0,0 @@ -Snowflake ID generator -====================== - -This is a Go implementation of [Twitter Snowflake](https://blog.twitter.com/2010/announcing-snowflake). - -The most useful aspect of these IDs is they are _roughly_ sortable and when generated -at roughly the same time, should have values in close proximity to each other. - -IDs ---- - -Each id will be a 64-bit number represented, structured as follows: - - -``` -6 6 5 4 3 2 1 -3210987654321098765432109876543210987654321098765432109876543210 - -ttttttttttttttttttttttttttttttttttttttttttmmmmmmmmmmssssssssssss -``` - -where - -* s (sequence) is a 12-bit integer that increments if called multiple times for the same millisecond -* m (machine id) is a 10-bit integer representing the server id -* t (time) is a 42-bit integer representing the current timestamp in milliseconds - the number of milliseconds to have elapsed since 1491696000000 or 2017-04-09T00:00:00Z - -### String Encoding - -The 64-bit unsigned integer is base-63 encoded using the following URL-safe characters, which are ordered -according to their ASCII value. - -``` -0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz~ -``` - -A binary sort of a list of encoded values will be correctly ordered according to the numerical representation. \ No newline at end of file diff --git a/pkg/snowflake/gen.go b/pkg/snowflake/gen.go deleted file mode 100644 index c245f1b67bf..00000000000 --- a/pkg/snowflake/gen.go +++ /dev/null @@ -1,124 +0,0 @@ -package snowflake - -import ( - "fmt" - "sync/atomic" - "time" -) - -const ( - epoch = 1491696000000 - serverBits = 10 - sequenceBits = 12 - timeBits = 42 - serverShift = sequenceBits - timeShift = sequenceBits + serverBits - serverMax = ^(-1 << serverBits) - sequenceMask = ^(-1 << sequenceBits) - timeMask = ^(-1 << timeBits) -) - -type Generator struct { - state uint64 - machine uint64 -} - -func New(machineID int) *Generator { - if machineID < 0 || machineID > serverMax { - panic(fmt.Errorf("invalid machine id; must be 0 ≤ id < %d", serverMax)) - } - return &Generator{ - state: 0, - machine: uint64(machineID << serverShift), - } -} - -func (g *Generator) MachineID() int { - return int(g.machine >> serverShift) -} - -func (g *Generator) Next() uint64 { - var state uint64 - - // we attempt 100 times to update the millisecond part of the state - // and increment the sequence atomically. each attempt is approx ~30ns - // so we spend around ~3µs total. - for i := 0; i < 100; i++ { - t := (now() - epoch) & timeMask - current := atomic.LoadUint64(&g.state) - currentTime := current >> timeShift & timeMask - currentSeq := current & sequenceMask - - // this sequence of conditionals ensures a monotonically increasing - // state. - - switch { - // if our time is in the future, use that with a zero sequence number. - case t > currentTime: - state = t << timeShift - - // we now know that our time is at or before the current time. - // if we're at the maximum sequence, bump to the next millisecond - case currentSeq == sequenceMask: - state = (currentTime + 1) << timeShift - - // otherwise, increment the sequence. - default: - state = current + 1 - } - - if atomic.CompareAndSwapUint64(&g.state, current, state) { - break - } - - state = 0 - } - - // since we failed 100 times, there's high contention. bail out of the - // loop to bound the time we'll spend in this method, and just add - // one to the counter. this can cause millisecond drift, but hopefully - // some CAS eventually succeeds and fixes the milliseconds. additionally, - // if the sequence is already at the maximum, adding 1 here can cause - // it to roll over into the machine id. giving the CAS 100 attempts - // helps to avoid these problems. - if state == 0 { - state = atomic.AddUint64(&g.state, 1) - } - - return state | g.machine -} - -func (g *Generator) NextString() string { - var s [11]byte - encode(&s, g.Next()) - return string(s[:]) -} - -func (g *Generator) AppendNext(s *[11]byte) { - encode(s, g.Next()) -} - -func now() uint64 { return uint64(time.Now().UnixNano() / 1e6) } - -var digits = [...]byte{ - '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', - 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', - 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', - 'U', 'V', 'W', 'X', 'Y', 'Z', '_', 'a', 'b', 'c', - 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', - 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', - 'x', 'y', 'z', '~'} - -func encode(s *[11]byte, n uint64) { - s[10], n = digits[n&0x3f], n>>6 - s[9], n = digits[n&0x3f], n>>6 - s[8], n = digits[n&0x3f], n>>6 - s[7], n = digits[n&0x3f], n>>6 - s[6], n = digits[n&0x3f], n>>6 - s[5], n = digits[n&0x3f], n>>6 - s[4], n = digits[n&0x3f], n>>6 - s[3], n = digits[n&0x3f], n>>6 - s[2], n = digits[n&0x3f], n>>6 - s[1], n = digits[n&0x3f], n>>6 - s[0] = digits[n&0x3f] -} diff --git a/pkg/snowflake/gen_test.go b/pkg/snowflake/gen_test.go deleted file mode 100644 index 6ff00cbe583..00000000000 --- a/pkg/snowflake/gen_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package snowflake - -import ( - "fmt" - "math/rand" - "reflect" - "sort" - "sync/atomic" - "testing" -) - -func TestEncode(t *testing.T) { - tests := []struct { - v uint64 - exp string - }{ - {0x000, "00000000000"}, - {0x001, "00000000001"}, - {0x03f, "0000000000~"}, - {0x07f, "0000000001~"}, - {0xf07f07f07f07f07f, "F1~1~1~1~1~"}, - } - for _, test := range tests { - t.Run(fmt.Sprintf("0x%03x→%s", test.v, test.exp), func(t *testing.T) { - var s [11]byte - encode(&s, test.v) - if got, exp := string(s[:]), test.exp; got != exp { - t.Fatalf("got %q, expected %q", got, exp) - } - }) - } -} - -// TestSorting verifies numbers using base 63 encoding are ordered according to their numerical representation. -func TestSorting(t *testing.T) { - var ( - vals = make([]string, 1000) - exp = make([]string, 1000) - ) - - for i := 0; i < len(vals); i++ { - var s [11]byte - encode(&s, uint64(i*47)) - vals[i] = string(s[:]) - exp[i] = string(s[:]) - } - - // randomize them - shuffle(len(vals), func(i, j int) { - vals[i], vals[j] = vals[j], vals[i] - }) - - sort.Strings(vals) - if !reflect.DeepEqual(vals, exp) { - t.Fatalf("got %v, expected %v", vals, exp) - } -} - -func TestMachineID(t *testing.T) { - for i := 0; i < serverMax; i++ { - if got, exp := New(i).MachineID(), i; got != exp { - t.Fatalf("got %d, expected %d", got, exp) - } - } -} - -func TestNextMonotonic(t *testing.T) { - g := New(10) - out := make([]string, 10000) - - for i := range out { - out[i] = g.NextString() - } - - // ensure they are all distinct and increasing - for i := range out[1:] { - if out[i] >= out[i+1] { - t.Fatal("bad entries:", out[i], out[i+1]) - } - } -} - -func BenchmarkEncode(b *testing.B) { - b.ReportAllocs() - var s [11]byte - for i := 0; i < b.N; i++ { - encode(&s, 100) - } -} - -var blackhole uint64 // to make sure the g.Next calls are not removed - -func BenchmarkNext(b *testing.B) { - g := New(10) - - for i := 0; i < b.N; i++ { - blackhole += g.Next() - } -} - -func BenchmarkNextParallel(b *testing.B) { - g := New(1) - - b.RunParallel(func(pb *testing.PB) { - var lblackhole uint64 - for pb.Next() { - lblackhole += g.Next() - } - atomic.AddUint64(&blackhole, lblackhole) - }) -} - -func shuffle(n int, swap func(i, j int)) { - for i := n - 1; i > 0; i-- { - j := rand.Intn(i + 1) - swap(i, j) - } -} diff --git a/pkg/tar/stream.go b/pkg/tar/stream.go deleted file mode 100644 index 5b6328dc332..00000000000 --- a/pkg/tar/stream.go +++ /dev/null @@ -1,169 +0,0 @@ -package tar - -import ( - "archive/tar" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "time" - - "github.com/influxdata/influxdb/v2/pkg/file" -) - -// Stream is a convenience function for creating a tar of a shard dir. It walks over the directory and subdirs, -// possibly writing each file to a tar writer stream. By default StreamFile is used, which will result in all files -// being written. A custom writeFunc can be passed so that each file may be written, modified+written, or skipped -// depending on the custom logic. -func Stream(w io.Writer, dir, relativePath string, writeFunc func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error) error { - tw := tar.NewWriter(w) - defer tw.Close() - - if writeFunc == nil { - writeFunc = StreamFile - } - - return filepath.WalkDir(dir, func(path string, entry os.DirEntry, err error) error { - if err != nil { - return err - } - - // Skip adding an entry for the root dir - if dir == path && entry.IsDir() { - return nil - } - - // Figure out the full relative path including any sub-dirs - subDir, _ := filepath.Split(path) - subDir, err = filepath.Rel(dir, subDir) - if err != nil { - return err - } - f, err := entry.Info() - if err != nil { - return err - } - - return writeFunc(f, filepath.Join(relativePath, subDir), path, tw) - }) -} - -// Generates a filtering function for Stream that checks an incoming file, and only writes the file to the stream if -// its mod time is later than since. Example: to tar only files newer than a certain datetime, use -// tar.Stream(w, dir, relativePath, SinceFilterTarFile(datetime)) -func SinceFilterTarFile(since time.Time) func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error { - return func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error { - if f.ModTime().After(since) { - return StreamFile(f, shardRelativePath, fullPath, tw) - } - return nil - } -} - -// stream a single file to tw, extending the header name using the shardRelativePath -func StreamFile(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error { - return StreamRenameFile(f, f.Name(), shardRelativePath, fullPath, tw) -} - -// Stream a single file to tw, using tarHeaderFileName instead of the actual filename -// e.g., when we want to write a *.tmp file using the original file's non-tmp name. -func StreamRenameFile(f os.FileInfo, tarHeaderFileName, relativePath, fullPath string, tw *tar.Writer) error { - h, err := tar.FileInfoHeader(f, f.Name()) - if err != nil { - return err - } - h.Name = filepath.ToSlash(filepath.Join(relativePath, tarHeaderFileName)) - - if err := tw.WriteHeader(h); err != nil { - return err - } - - if !f.Mode().IsRegular() { - return nil - } - - fr, err := os.Open(fullPath) - if err != nil { - return err - } - - defer fr.Close() - - _, err = io.CopyN(tw, fr, h.Size) - - return err -} - -// Restore reads a tar archive from r and extracts all of its files into dir, -// using only the base name of each file. -func Restore(r io.Reader, dir string) error { - tr := tar.NewReader(r) - for { - if err := extractFile(tr, dir); err == io.EOF { - break - } else if err != nil { - return err - } - } - - return file.SyncDir(dir) -} - -// extractFile copies the next file from tr into dir, using the file's base name. -func extractFile(tr *tar.Reader, dir string) error { - // Read next archive file. - hdr, err := tr.Next() - if err != nil { - return err - } - - // The hdr.Name is the relative path of the file from the root data dir. - // e.g (db/rp/1/xxxxx.tsm or db/rp/1/index/xxxxxx.tsi) - sections := strings.Split(filepath.FromSlash(hdr.Name), string(filepath.Separator)) - if len(sections) < 3 { - return fmt.Errorf("invalid archive path: %s", hdr.Name) - } - - relativePath := filepath.Join(sections[3:]...) - - subDir, _ := filepath.Split(relativePath) - // If this is a directory entry (usually just `index` for tsi), create it an move on. - if hdr.Typeflag == tar.TypeDir { - return os.MkdirAll(filepath.Join(dir, subDir), os.FileMode(hdr.Mode).Perm()) - } - - // Make sure the dir we need to write into exists. It should, but just double check in - // case we get a slightly invalid tarball. - if subDir != "" { - if err := os.MkdirAll(filepath.Join(dir, subDir), 0755); err != nil { - return err - } - } - - destPath := filepath.Join(dir, relativePath) - tmp := destPath + ".tmp" - - // Create new file on disk. - f, err := os.OpenFile(tmp, os.O_CREATE|os.O_RDWR, os.FileMode(hdr.Mode).Perm()) - if err != nil { - return err - } - defer f.Close() - - // Copy from archive to the file. - if _, err := io.CopyN(f, tr, hdr.Size); err != nil { - return err - } - - // Sync to disk & close. - if err := f.Sync(); err != nil { - return err - } - - if err := f.Close(); err != nil { - return err - } - - return file.RenameFile(tmp, destPath) -} diff --git a/pkg/tar/untar.go b/pkg/tar/untar.go deleted file mode 100644 index f8b2ca88109..00000000000 --- a/pkg/tar/untar.go +++ /dev/null @@ -1,83 +0,0 @@ -package tar - -import ( - "archive/tar" - "compress/gzip" - "errors" - "io" - "os" - "path/filepath" - - errors2 "github.com/influxdata/influxdb/v2/pkg/errors" -) - -// Untar takes a destination path and a reader; a tar reader loops over the tarfile -// creating the file structure at 'dir' along the way, and writing any files -func Untar(dir string, r io.Reader) (rErr error) { - - gzr, err := gzip.NewReader(r) - if err != nil { - return err - } - defer errors2.Capture(&rErr, gzr.Close)() - - tr := tar.NewReader(gzr) - - for { - header, err := tr.Next() - - switch { - - // if no more files are found return - case errors.Is(err, io.EOF): - return nil - - // return any other error - case err != nil: - return err - - // if the header is nil, just skip it (not sure how this happens) - case header == nil: - continue - } - - // the target location where the dir/file should be created - target := filepath.Join(dir, header.Name) - - // the following switch could also be done using fi.Mode(), not sure if there - // a benefit of using one vs. the other. - // fi := header.FileInfo() - - // check the file type - switch header.Typeflag { - - // if its a dir and it doesn't exist create it - case tar.TypeDir: - if _, err := os.Stat(target); err != nil { - if err := os.MkdirAll(target, 0755); err != nil { - return err - } - } - - // if it's a file create it - case tar.TypeReg: - if err := untarFile(target, tr, header); err != nil { - return err - } - } - } -} - -func untarFile(target string, tr *tar.Reader, header *tar.Header) (rErr error) { - f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) - if err != nil { - return err - } - defer errors2.Capture(&rErr, f.Close)() - - // copy over contents - if _, err := io.Copy(f, tr); err != nil { - return err - } - return nil -} diff --git a/pkg/testing/assert/assertions.go b/pkg/testing/assert/assertions.go deleted file mode 100644 index 3699d312e52..00000000000 --- a/pkg/testing/assert/assertions.go +++ /dev/null @@ -1,116 +0,0 @@ -package assert - -import ( - "bytes" - "fmt" - "reflect" -) - -type TestingT interface { - Errorf(format string, args ...interface{}) -} - -type helper interface { - Helper() -} - -// Equal asserts that the values are equal and returns -// true if the assertion was successful. -func Equal(t TestingT, got, expected interface{}, msgAndArgs ...interface{}) bool { - if ValuesAreEqual(got, expected) { - return true - } - - if th, ok := t.(helper); ok { - th.Helper() - } - - got, expected = formatValues(got, expected) - fail(t, fmt.Sprintf("Not Equal: got=%s, exp=%s", got, expected), msgAndArgs...) - return false -} - -// NotEqual asserts that the values are not equal and returns -// true if the assertion was successful. -func NotEqual(t TestingT, got, expected interface{}, msgAndArgs ...interface{}) bool { - if !ValuesAreEqual(got, expected) { - return true - } - - if th, ok := t.(helper); ok { - th.Helper() - } - _, expected = formatValues(got, expected) - fail(t, fmt.Sprintf("Equal: should not be %s", expected), msgAndArgs...) - return false -} - -// NoError asserts that err is nil and returns -// true if the assertion was successful. -func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { - if err != nil { - return fail(t, fmt.Sprintf("unexpected error: %+v", err), msgAndArgs...) - } - - return true -} - -// PanicsWithValue asserts that fn panics, and that -// the recovered panic value equals the expected panic value. -// -// Returns true if the assertion was successful. -func PanicsWithValue(t TestingT, expected interface{}, fn PanicTestFunc, msgAndArgs ...interface{}) bool { - if th, ok := t.(helper); ok { - th.Helper() - } - if funcDidPanic, got := didPanic(fn); !funcDidPanic { - return fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", fn, got), msgAndArgs...) - } else if got != expected { - return fail(t, fmt.Sprintf("func %#v should panic with value:\t%v\n\r\tPanic value:\t%v", fn, expected, got), msgAndArgs...) - } - - return true -} - -// ValuesAreEqual determines if the values are equal. -func ValuesAreEqual(got, expected interface{}) bool { - if got == nil || expected == nil { - return got == expected - } - - if exp, ok := expected.([]byte); ok { - act, ok := got.([]byte) - if !ok { - return false - } else if exp == nil || act == nil { - return exp == nil && act == nil - } - return bytes.Equal(exp, act) - } - - return reflect.DeepEqual(expected, got) - -} - -// ValuesAreExactlyEqual determines if the values are equal and -// their types are the same. -func ValuesAreExactlyEqual(got, expected interface{}) bool { - if ValuesAreEqual(got, expected) { - return true - } - - actualType := reflect.TypeOf(got) - if actualType == nil { - return false - } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { - // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), got) - } - - return false -} - -// PanicTestFunc defines a function that is called to determine whether a panic occurs. -type PanicTestFunc func() diff --git a/pkg/testing/assert/doc.go b/pkg/testing/assert/doc.go deleted file mode 100644 index 174facb15f0..00000000000 --- a/pkg/testing/assert/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Package assert provides helper functions that can be used with the standard Go testing package. -*/ -package assert diff --git a/pkg/testing/assert/helper.go b/pkg/testing/assert/helper.go deleted file mode 100644 index 9afc21b8857..00000000000 --- a/pkg/testing/assert/helper.go +++ /dev/null @@ -1,55 +0,0 @@ -package assert - -import ( - "fmt" - "reflect" -) - -func fail(t TestingT, failureMsg string, msgAndArgs ...interface{}) bool { - if th, ok := t.(helper); ok { - th.Helper() - } - - msg := formatMsgAndArgs(msgAndArgs...) - if msg == "" { - t.Errorf("%s", failureMsg) - } else { - t.Errorf("%s: %s", failureMsg, msg) - } - - return false -} - -func formatValues(got, expected interface{}) (string, string) { - if reflect.TypeOf(got) != reflect.TypeOf(expected) { - return fmt.Sprintf("%T(%#v)", got, got), fmt.Sprintf("%T(%#v)", expected, expected) - } - - return fmt.Sprintf("%#v", got), fmt.Sprintf("%#v", expected) -} - -func formatMsgAndArgs(msgAndArgs ...interface{}) string { - if len(msgAndArgs) == 0 || msgAndArgs == nil { - return "" - } - if len(msgAndArgs) == 1 { - return msgAndArgs[0].(string) - } - if len(msgAndArgs) > 1 { - return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) - } - return "" -} - -// didPanic returns true if fn panics when called. -func didPanic(fn PanicTestFunc) (panicked bool, message interface{}) { - defer func() { - if message = recover(); message != nil { - panicked = true - } - }() - - fn() - - return panicked, message -} diff --git a/pkg/testttp/http.go b/pkg/testttp/http.go deleted file mode 100644 index 05e4e408b6c..00000000000 --- a/pkg/testttp/http.go +++ /dev/null @@ -1,195 +0,0 @@ -package testttp - -import ( - "bytes" - "context" - "encoding/json" - "io" - "net/http" - "net/http/httptest" - "net/url" - "testing" -) - -// Req is a request builder. -type Req struct { - t testing.TB - - req *http.Request -} - -// HTTP runs creates a request for an http call. -func HTTP(t testing.TB, method, addr string, body io.Reader) *Req { - return &Req{ - t: t, - req: httptest.NewRequest(method, addr, body), - } -} - -// Delete creates a DELETE request. -func Delete(t testing.TB, addr string) *Req { - return HTTP(t, http.MethodDelete, addr, nil) -} - -// Get creates a GET request. -func Get(t testing.TB, addr string) *Req { - return HTTP(t, http.MethodGet, addr, nil) -} - -// Patch creates a PATCH request. -func Patch(t testing.TB, addr string, body io.Reader) *Req { - return HTTP(t, http.MethodPatch, addr, body) -} - -// PatchJSON creates a PATCH request with a json encoded body. -func PatchJSON(t testing.TB, addr string, v interface{}) *Req { - return HTTP(t, http.MethodPatch, addr, mustEncodeJSON(t, v)) -} - -// Post creates a POST request. -func Post(t testing.TB, addr string, body io.Reader) *Req { - return HTTP(t, http.MethodPost, addr, body) -} - -// PostJSON returns a POST request with a json encoded body. -func PostJSON(t testing.TB, addr string, v interface{}) *Req { - return Post(t, addr, mustEncodeJSON(t, v)) -} - -// Put creates a PUT request. -func Put(t testing.TB, addr string, body io.Reader) *Req { - return HTTP(t, http.MethodPut, addr, body) -} - -// PutJSON creates a PUT request with a json encoded body. -func PutJSON(t testing.TB, addr string, v interface{}) *Req { - return HTTP(t, http.MethodPut, addr, mustEncodeJSON(t, v)) -} - -// Do runs the request against the provided handler. -func (r *Req) Do(handler http.Handler) *Resp { - rec := httptest.NewRecorder() - - handler.ServeHTTP(rec, r.req) - - return &Resp{ - t: r.t, - debug: true, - Req: r.req, - Rec: rec, - } -} - -func (r *Req) SetFormValue(k, v string) *Req { - if r.req.Form == nil { - r.req.Form = make(url.Values) - } - r.req.Form.Set(k, v) - return r -} - -// Headers allows the user to set headers on the http request. -func (r *Req) Headers(k, v string, rest ...string) *Req { - headers := append(rest, k, v) - for i := 0; i < len(headers); i += 2 { - if i+1 >= len(headers) { - break - } - k, v := headers[i], headers[i+1] - r.req.Header.Add(k, v) - } - return r -} - -// WithCtx sets the ctx on the request. -func (r *Req) WithCtx(ctx context.Context) *Req { - r.req = r.req.WithContext(ctx) - return r -} - -// WrapCtx provides means to wrap a request context. This is useful for stuffing in the -// auth stuffs that are required at times. -func (r *Req) WrapCtx(fn func(ctx context.Context) context.Context) *Req { - return r.WithCtx(fn(r.req.Context())) -} - -// Resp is a http recorder wrapper. -type Resp struct { - t testing.TB - - debug bool - - Req *http.Request - Rec *httptest.ResponseRecorder -} - -// Debug sets the debugger. If true, the debugger will print the body of the response -// when the expected status is not received. -func (r *Resp) Debug(b bool) *Resp { - r.debug = b - return r -} - -// Expect allows the assertions against the raw Resp. -func (r *Resp) Expect(fn func(*Resp)) *Resp { - fn(r) - return r -} - -// ExpectStatus compares the expected status code against the recorded status code. -func (r *Resp) ExpectStatus(code int) *Resp { - r.t.Helper() - - if r.Rec.Code != code { - r.t.Errorf("unexpected status code: expected=%d got=%d", code, r.Rec.Code) - if r.debug { - r.t.Logf("body: %v", r.Rec.Body.String()) - } - } - return r -} - -// ExpectBody provides an assertion against the recorder body. -func (r *Resp) ExpectBody(fn func(body *bytes.Buffer)) *Resp { - fn(r.Rec.Body) - return r -} - -// ExpectHeaders asserts that multiple headers with values exist in the recorder. -func (r *Resp) ExpectHeaders(h map[string]string) *Resp { - for k, v := range h { - r.ExpectHeader(k, v) - } - - return r -} - -// ExpectHeader asserts that the header is in the recorder. -func (r *Resp) ExpectHeader(k, v string) *Resp { - r.t.Helper() - - vals, ok := r.Rec.Header()[k] - if !ok { - r.t.Errorf("did not find expected header: %q", k) - return r - } - - for _, vv := range vals { - if vv == v { - return r - } - } - r.t.Errorf("did not find expected value for header %q; got: %v", k, vals) - - return r -} - -func mustEncodeJSON(t testing.TB, v interface{}) *bytes.Buffer { - t.Helper() - - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(v); err != nil { - t.Fatal(err) - } - return &buf -} diff --git a/pkg/testttp/http_test.go b/pkg/testttp/http_test.go deleted file mode 100644 index 5bed499486b..00000000000 --- a/pkg/testttp/http_test.go +++ /dev/null @@ -1,174 +0,0 @@ -package testttp_test - -import ( - "bytes" - "encoding/json" - "io" - "net/http" - "strings" - "testing" - - "github.com/influxdata/influxdb/v2/pkg/testttp" -) - -func TestHTTP(t *testing.T) { - svr := newMux() - t.Run("Delete", func(t *testing.T) { - testttp. - Delete(t, "/"). - Do(svr). - ExpectStatus(http.StatusNoContent) - }) - - t.Run("Get", func(t *testing.T) { - testttp. - Get(t, "/"). - Do(svr). - ExpectStatus(http.StatusOK). - ExpectBody(assertBody(t, http.MethodGet)) - }) - - t.Run("Patch", func(t *testing.T) { - testttp. - Patch(t, "/", nil). - Do(svr). - ExpectStatus(http.StatusPartialContent). - ExpectBody(assertBody(t, http.MethodPatch)) - }) - - t.Run("PatchJSON", func(t *testing.T) { - testttp. - PatchJSON(t, "/", map[string]string{"k": "t"}). - Do(svr). - ExpectStatus(http.StatusPartialContent). - ExpectBody(assertBody(t, http.MethodPatch)) - }) - - t.Run("Post", func(t *testing.T) { - t.Run("basic", func(t *testing.T) { - testttp. - Post(t, "/", nil). - Do(svr). - ExpectStatus(http.StatusCreated). - ExpectBody(assertBody(t, http.MethodPost)) - }) - - t.Run("with form values", func(t *testing.T) { - svr := http.NewServeMux() - svr.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - r.ParseForm() - w.WriteHeader(http.StatusOK) - w.Write([]byte(r.FormValue("key"))) - })) - - testttp. - Post(t, "/", nil). - SetFormValue("key", "val"). - Do(svr). - ExpectStatus(http.StatusOK). - ExpectBody(func(body *bytes.Buffer) { - if expected, got := "val", body.String(); expected != got { - t.Fatalf("did not get form value; expected=%q got=%q", expected, got) - } - }) - }) - }) - - t.Run("PostJSON", func(t *testing.T) { - testttp. - PostJSON(t, "/", map[string]string{"k": "v"}). - Do(svr). - ExpectStatus(http.StatusCreated). - ExpectBody(assertBody(t, http.MethodPost)) - }) - - t.Run("Put", func(t *testing.T) { - testttp. - Put(t, "/", nil). - Do(svr). - ExpectStatus(http.StatusAccepted). - ExpectBody(assertBody(t, http.MethodPut)) - }) - - t.Run("PutJSON", func(t *testing.T) { - testttp. - PutJSON(t, "/", map[string]string{"k": "t"}). - Do(svr). - ExpectStatus(http.StatusAccepted). - ExpectBody(assertBody(t, http.MethodPut)) - }) - - t.Run("Headers", func(t *testing.T) { - testttp. - Post(t, "/", strings.NewReader(`a: foo`)). - Headers("Content-Type", "text/yml"). - Do(svr). - Expect(func(resp *testttp.Resp) { - equals(t, "text/yml", resp.Req.Header.Get("Content-Type")) - }) - }) -} - -type foo struct { - Name, Thing, Method string -} - -func newMux() http.Handler { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - switch req.Method { - case http.MethodGet: - writeFn(w, req.Method, http.StatusOK) - case http.MethodPost: - writeFn(w, req.Method, http.StatusCreated) - case http.MethodPut: - writeFn(w, req.Method, http.StatusAccepted) - case http.MethodPatch: - writeFn(w, req.Method, http.StatusPartialContent) - case http.MethodDelete: - w.WriteHeader(http.StatusNoContent) - } - }) - return mux -} - -func assertBody(t *testing.T, method string) func(*bytes.Buffer) { - return func(buf *bytes.Buffer) { - var f foo - if err := json.NewDecoder(buf).Decode(&f); err != nil { - t.Fatal(err) - } - expected := foo{Name: "name", Thing: "thing", Method: method} - equals(t, expected, f) - } -} - -func writeFn(w http.ResponseWriter, method string, statusCode int) { - f := foo{Name: "name", Thing: "thing", Method: method} - r, err := encodeBuf(f) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - return - } - w.WriteHeader(statusCode) - if _, err := io.Copy(w, r); err != nil { - w.WriteHeader(http.StatusInternalServerError) - return - } -} - -func equals(t *testing.T, expected, actual interface{}) { - t.Helper() - if expected == actual { - return - } - t.Errorf("expected: %v\tactual: %v", expected, actual) -} - -func encodeBuf(v interface{}) (io.Reader, error) { - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(v); err != nil { - return nil, err - } - return &buf, nil -} diff --git a/pkg/tracing/context.go b/pkg/tracing/context.go deleted file mode 100644 index a85ffae58c0..00000000000 --- a/pkg/tracing/context.go +++ /dev/null @@ -1,30 +0,0 @@ -package tracing - -import "context" - -type ( - spanContextKey struct{} - traceContextKey struct{} -) - -// NewContextWithSpan returns a new context with the given Span added. -func NewContextWithSpan(ctx context.Context, c *Span) context.Context { - return context.WithValue(ctx, spanContextKey{}, c) -} - -// SpanFromContext returns the Span associated with ctx or nil if no Span has been assigned. -func SpanFromContext(ctx context.Context) *Span { - c, _ := ctx.Value(spanContextKey{}).(*Span) - return c -} - -// NewContextWithTrace returns a new context with the given Trace added. -func NewContextWithTrace(ctx context.Context, t *Trace) context.Context { - return context.WithValue(ctx, traceContextKey{}, t) -} - -// TraceFromContext returns the Trace associated with ctx or nil if no Trace has been assigned. -func TraceFromContext(ctx context.Context) *Trace { - c, _ := ctx.Value(traceContextKey{}).(*Trace) - return c -} diff --git a/pkg/tracing/doc.go b/pkg/tracing/doc.go deleted file mode 100644 index 36bf889a14a..00000000000 --- a/pkg/tracing/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Package tracing provides a way for capturing hierarchical traces. - -To start a new trace with a root span named select - - trace, span := tracing.NewTrace("select") - -It is recommended that a span be forwarded to callees using the -context package. Firstly, create a new context with the span associated -as follows - - ctx = tracing.NewContextWithSpan(ctx, span) - -followed by calling the API with the new context - - SomeAPI(ctx, ...) - -Once the trace is complete, it may be converted to a graph with the Tree method. - - tree := t.Tree() - -The tree is intended to be used with the Walk function in order to generate -different presentations. The default Tree#String method returns a tree. -*/ -package tracing diff --git a/pkg/tracing/fields/field.go b/pkg/tracing/fields/field.go deleted file mode 100644 index bc96be1fbfa..00000000000 --- a/pkg/tracing/fields/field.go +++ /dev/null @@ -1,117 +0,0 @@ -package fields - -import ( - "fmt" - "math" - "time" -) - -type fieldType int - -const ( - stringType fieldType = iota - boolType - int64Type - uint64Type - durationType - float64Type -) - -// Field instances are constructed via Bool, String, and so on. -// -// "heavily influenced by" (i.e., partially stolen from) -// https://github.com/opentracing/opentracing-go/log -type Field struct { - key string - fieldType fieldType - numericVal int64 - stringVal string -} - -// String adds a string-valued key:value pair to a Span.LogFields() record -func String(key, val string) Field { - return Field{ - key: key, - fieldType: stringType, - stringVal: val, - } -} - -// Bool adds a bool-valued key:value pair to a Span.LogFields() record -func Bool(key string, val bool) Field { - var numericVal int64 - if val { - numericVal = 1 - } - return Field{ - key: key, - fieldType: boolType, - numericVal: numericVal, - } -} - -// / Int64 adds an int64-valued key:value pair to a Span.LogFields() record -func Int64(key string, val int64) Field { - return Field{ - key: key, - fieldType: int64Type, - numericVal: val, - } -} - -// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record -func Uint64(key string, val uint64) Field { - return Field{ - key: key, - fieldType: uint64Type, - numericVal: int64(val), - } -} - -// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record -func Duration(key string, val time.Duration) Field { - return Field{ - key: key, - fieldType: durationType, - numericVal: int64(val), - } -} - -// Float64 adds a float64-valued key:value pair to a Span.LogFields() record -func Float64(key string, val float64) Field { - return Field{ - key: key, - fieldType: float64Type, - numericVal: int64(math.Float64bits(val)), - } -} - -// Key returns the field's key. -func (lf Field) Key() string { - return lf.key -} - -// Value returns the field's value as interface{}. -func (lf Field) Value() interface{} { - switch lf.fieldType { - case stringType: - return lf.stringVal - case boolType: - return lf.numericVal != 0 - case int64Type: - return int64(lf.numericVal) - case uint64Type: - return uint64(lf.numericVal) - case durationType: - return time.Duration(lf.numericVal) - case float64Type: - return math.Float64frombits(uint64(lf.numericVal)) - default: - return nil - } -} - -// String returns a string representation of the key and value. -func (lf Field) String() string { - return fmt.Sprint(lf.key, ": ", lf.Value()) -} diff --git a/pkg/tracing/fields/fields.go b/pkg/tracing/fields/fields.go deleted file mode 100644 index 825cf255095..00000000000 --- a/pkg/tracing/fields/fields.go +++ /dev/null @@ -1,61 +0,0 @@ -package fields - -import "sort" - -type Fields []Field - -// Merge merges other with the current set, replacing any matching keys from other. -func (fs *Fields) Merge(other Fields) { - var list []Field - i, j := 0, 0 - for i < len(*fs) && j < len(other) { - if (*fs)[i].key < other[j].key { - list = append(list, (*fs)[i]) - i++ - } else if (*fs)[i].key > other[j].key { - list = append(list, other[j]) - j++ - } else { - // equal, then "other" replaces existing key - list = append(list, other[j]) - i++ - j++ - } - } - - if i < len(*fs) { - list = append(list, (*fs)[i:]...) - } else if j < len(other) { - list = append(list, other[j:]...) - } - - *fs = list -} - -// New creates a new set of fields, sorted by Key. -// Duplicate keys are removed. -func New(args ...Field) Fields { - fields := Fields(args) - sort.Slice(fields, func(i, j int) bool { - return fields[i].key < fields[j].key - }) - - // deduplicate - // loop invariant: fields[:i] has no duplicates - for i := 0; i < len(fields)-1; i++ { - j := i + 1 - // find all duplicate keys - for j < len(fields) && fields[i].key == fields[j].key { - j++ - } - - d := (j - 1) - i // number of duplicate keys - if d > 0 { - // copy over duplicate keys in order to maintain loop invariant - copy(fields[i+1:], fields[j:]) - fields = fields[:len(fields)-d] - } - } - - return fields -} diff --git a/pkg/tracing/fields/fields_test.go b/pkg/tracing/fields/fields_test.go deleted file mode 100644 index c15b1aec958..00000000000 --- a/pkg/tracing/fields/fields_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package fields - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/pkg/testing/assert" -) - -func makeFields(args ...string) Fields { - if len(args)%2 != 0 { - panic("uneven number of arguments") - } - - var f Fields - for i := 0; i+1 < len(args); i += 2 { - f = append(f, String(args[i], args[i+1])) - } - return f -} - -func TestNew(t *testing.T) { - cases := []struct { - n string - l []string - exp Fields - }{ - { - n: "empty", - l: nil, - exp: makeFields(), - }, - { - n: "not duplicates", - l: []string{"k01", "v01", "k03", "v03", "k02", "v02"}, - exp: makeFields("k01", "v01", "k02", "v02", "k03", "v03"), - }, - { - n: "duplicates at end", - l: []string{"k01", "v01", "k02", "v02", "k02", "v02"}, - exp: makeFields("k01", "v01", "k02", "v02"), - }, - { - n: "duplicates at start", - l: []string{"k01", "v01", "k02", "v02", "k01", "v01"}, - exp: makeFields("k01", "v01", "k02", "v02"), - }, - { - n: "duplicates in middle", - l: []string{"k01", "v01", "k02", "v02", "k03", "v03", "k02", "v02", "k02", "v02"}, - exp: makeFields("k01", "v01", "k02", "v02", "k03", "v03"), - }, - } - - for _, tc := range cases { - t.Run(tc.n, func(t *testing.T) { - l := New(makeFields(tc.l...)...) - assert.Equal(t, tc.exp, l) - }) - } -} - -func TestFields_Merge(t *testing.T) { - cases := []struct { - n string - l, r Fields - exp Fields - }{ - { - n: "no matching keys", - l: New(String("k05", "v05"), String("k03", "v03"), String("k01", "v01")), - r: New(String("k02", "v02"), String("k04", "v04"), String("k00", "v00")), - exp: New(String("k05", "v05"), String("k03", "v03"), String("k01", "v01"), String("k02", "v02"), String("k04", "v04"), String("k00", "v00")), - }, - { - n: "multiple matching keys", - l: New(String("k05", "v05"), String("k03", "v03"), String("k01", "v01")), - r: New(String("k02", "v02"), String("k03", "v03a"), String("k05", "v05a")), - exp: New(String("k05", "v05a"), String("k03", "v03a"), String("k01", "v01"), String("k02", "v02")), - }, - { - n: "source empty", - l: New(), - r: New(String("k02", "v02"), String("k04", "v04"), String("k00", "v00")), - exp: New(String("k02", "v02"), String("k04", "v04"), String("k00", "v00")), - }, - { - n: "other empty", - l: New(String("k02", "v02"), String("k04", "v04"), String("k00", "v00")), - r: New(), - exp: New(String("k02", "v02"), String("k04", "v04"), String("k00", "v00")), - }, - } - - for _, tc := range cases { - t.Run(tc.n, func(t *testing.T) { - l := tc.l - l.Merge(tc.r) - assert.Equal(t, tc.exp, l) - }) - } -} diff --git a/pkg/tracing/labels/labels.go b/pkg/tracing/labels/labels.go deleted file mode 100644 index 90afda7dabf..00000000000 --- a/pkg/tracing/labels/labels.go +++ /dev/null @@ -1,74 +0,0 @@ -package labels - -import "sort" - -type Label struct { - Key, Value string -} - -// The Labels type represents a set of labels, sorted by Key. -type Labels []Label - -// Merge merges other with the current set, replacing any matching keys from other. -func (ls *Labels) Merge(other Labels) { - var list []Label - i, j := 0, 0 - for i < len(*ls) && j < len(other) { - if (*ls)[i].Key < other[j].Key { - list = append(list, (*ls)[i]) - i++ - } else if (*ls)[i].Key > other[j].Key { - list = append(list, other[j]) - j++ - } else { - // equal, then "other" replaces existing key - list = append(list, other[j]) - i++ - j++ - } - } - - if i < len(*ls) { - list = append(list, (*ls)[i:]...) - } else if j < len(other) { - list = append(list, other[j:]...) - } - - *ls = list -} - -// New takes an even number of strings representing key-value pairs -// and creates a new slice of Labels. Duplicates are removed, however, -// there is no guarantee which will be removed -func New(args ...string) Labels { - if len(args)%2 != 0 { - panic("uneven number of arguments to label.Labels") - } - var labels Labels - for i := 0; i+1 < len(args); i += 2 { - labels = append(labels, Label{Key: args[i], Value: args[i+1]}) - } - - sort.Slice(labels, func(i, j int) bool { - return labels[i].Key < labels[j].Key - }) - - // deduplicate - // loop invariant: labels[:i] has no duplicates - for i := 0; i < len(labels)-1; i++ { - j := i + 1 - // find all duplicate keys - for j < len(labels) && labels[i].Key == labels[j].Key { - j++ - } - - d := (j - 1) - i // number of duplicate keys - if d > 0 { - // copy over duplicate keys in order to maintain loop invariant - copy(labels[i+1:], labels[j:]) - labels = labels[:len(labels)-d] - } - } - - return labels -} diff --git a/pkg/tracing/labels/labels_test.go b/pkg/tracing/labels/labels_test.go deleted file mode 100644 index f92bbf728a2..00000000000 --- a/pkg/tracing/labels/labels_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package labels - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/pkg/testing/assert" -) - -func makeLabels(args ...string) Labels { - if len(args)%2 != 0 { - panic("uneven number of arguments") - } - - var l Labels - for i := 0; i+1 < len(args); i += 2 { - l = append(l, Label{Key: args[i], Value: args[i+1]}) - } - return l -} - -func TestNew(t *testing.T) { - cases := []struct { - n string - l []string - exp Labels - }{ - { - n: "empty", - l: nil, - exp: makeLabels(), - }, - { - n: "not duplicates", - l: []string{"k01", "v01", "k03", "v03", "k02", "v02"}, - exp: makeLabels("k01", "v01", "k02", "v02", "k03", "v03"), - }, - { - n: "duplicates at end", - l: []string{"k01", "v01", "k02", "v02", "k02", "v02"}, - exp: makeLabels("k01", "v01", "k02", "v02"), - }, - { - n: "duplicates at start", - l: []string{"k01", "v01", "k02", "v02", "k01", "v01"}, - exp: makeLabels("k01", "v01", "k02", "v02"), - }, - { - n: "duplicates in middle", - l: []string{"k01", "v01", "k02", "v02", "k03", "v03", "k02", "v02", "k02", "v02"}, - exp: makeLabels("k01", "v01", "k02", "v02", "k03", "v03"), - }, - } - - for _, tc := range cases { - t.Run(tc.n, func(t *testing.T) { - l := New(tc.l...) - assert.Equal(t, l, tc.exp) - }) - } -} - -func TestLabels_Merge(t *testing.T) { - cases := []struct { - n string - l, r Labels - exp Labels - }{ - { - n: "no matching keys", - l: New("k05", "v05", "k03", "v03", "k01", "v01"), - r: New("k02", "v02", "k04", "v04", "k00", "v00"), - exp: New("k05", "v05", "k03", "v03", "k01", "v01", "k02", "v02", "k04", "v04", "k00", "v00"), - }, - { - n: "multiple matching keys", - l: New("k05", "v05", "k03", "v03", "k01", "v01"), - r: New("k02", "v02", "k03", "v03a", "k05", "v05a"), - exp: New("k05", "v05a", "k03", "v03a", "k01", "v01", "k02", "v02"), - }, - { - n: "source empty", - l: New(), - r: New("k02", "v02", "k04", "v04", "k00", "v00"), - exp: New("k02", "v02", "k04", "v04", "k00", "v00"), - }, - { - n: "other empty", - l: New("k02", "v02", "k04", "v04", "k00", "v00"), - r: New(), - exp: New("k02", "v02", "k04", "v04", "k00", "v00"), - }, - } - - for _, tc := range cases { - t.Run(tc.n, func(t *testing.T) { - l := tc.l - l.Merge(tc.r) - assert.Equal(t, l, tc.exp) - }) - } -} diff --git a/pkg/tracing/rawspan.go b/pkg/tracing/rawspan.go deleted file mode 100644 index cf10e75f51e..00000000000 --- a/pkg/tracing/rawspan.go +++ /dev/null @@ -1,18 +0,0 @@ -package tracing - -import ( - "time" - - "github.com/influxdata/influxdb/v2/pkg/tracing/fields" - "github.com/influxdata/influxdb/v2/pkg/tracing/labels" -) - -// RawSpan represents the data associated with a span. -type RawSpan struct { - Context SpanContext - ParentSpanID uint64 // ParentSpanID identifies the parent of this span or 0 if this is the root span. - Name string // Name is the operation name given to this span. - Start time.Time // Start identifies the start time of the span. - Labels labels.Labels // Labels contains additional metadata about this span. - Fields fields.Fields // Fields contains typed values associated with this span. -} diff --git a/pkg/tracing/span.go b/pkg/tracing/span.go deleted file mode 100644 index 892d4fcad66..00000000000 --- a/pkg/tracing/span.go +++ /dev/null @@ -1,84 +0,0 @@ -package tracing - -import ( - "sync" - "time" - - "github.com/influxdata/influxdb/v2/pkg/tracing/fields" - "github.com/influxdata/influxdb/v2/pkg/tracing/labels" -) - -// The Span type denotes a specific operation for a Trace. -// A Span may have one or more children, identifying additional -// details about a trace. -type Span struct { - tracer *Trace - mu sync.Mutex - raw RawSpan -} - -type StartSpanOption interface { - applyStart(*Span) -} - -// The StartTime start span option specifies the start time of -// the new span rather than using now. -type StartTime time.Time - -func (t StartTime) applyStart(s *Span) { - s.raw.Start = time.Time(t) -} - -// StartSpan creates a new child span using time.Now as the start time. -func (s *Span) StartSpan(name string, opt ...StartSpanOption) *Span { - return s.tracer.startSpan(name, s.raw.Context, opt) -} - -// Context returns a SpanContext that can be serialized and passed to a remote node to continue a trace. -func (s *Span) Context() SpanContext { - return s.raw.Context -} - -// SetLabels replaces any existing labels for the Span with args. -func (s *Span) SetLabels(args ...string) { - s.mu.Lock() - s.raw.Labels = labels.New(args...) - s.mu.Unlock() -} - -// MergeLabels merges args with any existing labels defined -// for the Span. -func (s *Span) MergeLabels(args ...string) { - ls := labels.New(args...) - s.mu.Lock() - s.raw.Labels.Merge(ls) - s.mu.Unlock() -} - -// SetFields replaces any existing fields for the Span with args. -func (s *Span) SetFields(set fields.Fields) { - s.mu.Lock() - s.raw.Fields = set - s.mu.Unlock() -} - -// MergeFields merges the provides args with any existing fields defined -// for the Span. -func (s *Span) MergeFields(args ...fields.Field) { - set := fields.New(args...) - s.mu.Lock() - s.raw.Fields.Merge(set) - s.mu.Unlock() -} - -// Finish marks the end of the span and records it to the associated Trace. -// If Finish is not called, the span will not appear in the trace. -func (s *Span) Finish() { - s.mu.Lock() - s.tracer.addRawSpan(s.raw) - s.mu.Unlock() -} - -func (s *Span) Tree() *TreeNode { - return s.tracer.TreeFrom(s.raw.Context.SpanID) -} diff --git a/pkg/tracing/spancontext.go b/pkg/tracing/spancontext.go deleted file mode 100644 index 2ba8cf9f377..00000000000 --- a/pkg/tracing/spancontext.go +++ /dev/null @@ -1,32 +0,0 @@ -package tracing - -import ( - "github.com/influxdata/influxdb/v2/pkg/tracing/wire" - "google.golang.org/protobuf/proto" -) - -// A SpanContext represents the minimal information to identify a span in a trace. -// This is typically serialized to continue a trace on a remote node. -type SpanContext struct { - TraceID uint64 // TraceID is assigned a random number to this trace. - SpanID uint64 // SpanID is assigned a random number to identify this span. -} - -func (s SpanContext) MarshalBinary() ([]byte, error) { - return proto.Marshal(&wire.SpanContext{ - TraceID: s.TraceID, - SpanID: s.SpanID, - }) -} - -func (s *SpanContext) UnmarshalBinary(data []byte) error { - var ws wire.SpanContext - err := proto.Unmarshal(data, &ws) - if err == nil { - *s = SpanContext{ - TraceID: ws.TraceID, - SpanID: ws.SpanID, - } - } - return err -} diff --git a/pkg/tracing/trace.go b/pkg/tracing/trace.go deleted file mode 100644 index 4beb7a5e708..00000000000 --- a/pkg/tracing/trace.go +++ /dev/null @@ -1,138 +0,0 @@ -package tracing - -import ( - "sort" - "sync" - "time" -) - -// The Trace type functions as a container for capturing Spans used to -// trace the execution of a request. -type Trace struct { - mu sync.Mutex - spans map[uint64]RawSpan -} - -// NewTrace starts a new trace and returns a root span identified by the provided name. -// -// Additional options may be specified to override the default behavior when creating the span. -func NewTrace(name string, opt ...StartSpanOption) (*Trace, *Span) { - t := &Trace{spans: make(map[uint64]RawSpan)} - s := &Span{tracer: t} - s.raw.Name = name - s.raw.Context.TraceID, s.raw.Context.SpanID = randomID2() - setOptions(s, opt) - - return t, s -} - -// NewTraceFromSpan starts a new trace and returns the associated span, which is a child of the -// parent span context. -func NewTraceFromSpan(name string, parent SpanContext, opt ...StartSpanOption) (*Trace, *Span) { - t := &Trace{spans: make(map[uint64]RawSpan)} - s := &Span{tracer: t} - s.raw.Name = name - s.raw.ParentSpanID = parent.SpanID - s.raw.Context.TraceID = parent.TraceID - s.raw.Context.SpanID = randomID() - setOptions(s, opt) - - return t, s -} - -func (t *Trace) startSpan(name string, sc SpanContext, opt []StartSpanOption) *Span { - s := &Span{tracer: t} - s.raw.Name = name - s.raw.Context.SpanID = randomID() - s.raw.Context.TraceID = sc.TraceID - s.raw.ParentSpanID = sc.SpanID - setOptions(s, opt) - - return s -} - -func setOptions(s *Span, opt []StartSpanOption) { - for _, o := range opt { - o.applyStart(s) - } - - if s.raw.Start.IsZero() { - s.raw.Start = time.Now() - } -} - -func (t *Trace) addRawSpan(raw RawSpan) { - t.mu.Lock() - t.spans[raw.Context.SpanID] = raw - t.mu.Unlock() -} - -// Tree returns a graph of the current trace. -func (t *Trace) Tree() *TreeNode { - t.mu.Lock() - defer t.mu.Unlock() - - for _, s := range t.spans { - if s.ParentSpanID == 0 { - return t.treeFrom(s.Context.SpanID) - } - } - return nil -} - -// Merge combines other with the current trace. This is -// typically necessary when traces are transferred from a remote. -func (t *Trace) Merge(other *Trace) { - for k, s := range other.spans { - t.spans[k] = s - } -} - -func (t *Trace) TreeFrom(root uint64) *TreeNode { - t.mu.Lock() - defer t.mu.Unlock() - return t.treeFrom(root) -} - -func (t *Trace) treeFrom(root uint64) *TreeNode { - c := map[uint64]*TreeNode{} - - for k, s := range t.spans { - c[k] = &TreeNode{Raw: s} - } - - if _, ok := c[root]; !ok { - return nil - } - - for _, n := range c { - if n.Raw.ParentSpanID != 0 { - if pn := c[n.Raw.ParentSpanID]; pn != nil { - pn.Children = append(pn.Children, n) - } - } - } - - // sort nodes - var v treeSortVisitor - Walk(&v, c[root]) - - return c[root] -} - -type treeSortVisitor struct{} - -func (v *treeSortVisitor) Visit(node *TreeNode) Visitor { - sort.Slice(node.Children, func(i, j int) bool { - lt, rt := node.Children[i].Raw.Start.UnixNano(), node.Children[j].Raw.Start.UnixNano() - if lt < rt { - return true - } else if lt > rt { - return false - } - - ln, rn := node.Children[i].Raw.Name, node.Children[j].Raw.Name - return ln < rn - }) - return v -} diff --git a/pkg/tracing/trace_encoding.go b/pkg/tracing/trace_encoding.go deleted file mode 100644 index 06b7eb1d423..00000000000 --- a/pkg/tracing/trace_encoding.go +++ /dev/null @@ -1,137 +0,0 @@ -package tracing - -import ( - "math" - "time" - - "github.com/influxdata/influxdb/v2/pkg/tracing/fields" - "github.com/influxdata/influxdb/v2/pkg/tracing/labels" - "github.com/influxdata/influxdb/v2/pkg/tracing/wire" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" -) - -func fieldsToWire(set fields.Fields) []*wire.Field { - var r []*wire.Field - for _, f := range set { - wf := wire.Field{Key: f.Key()} - switch val := f.Value().(type) { - case string: - wf.FieldType = wire.FieldType_FieldTypeString - wf.Value = &wire.Field_StringVal{StringVal: val} - - case bool: - var numericVal int64 - if val { - numericVal = 1 - } - wf.FieldType = wire.FieldType_FieldTypeBool - wf.Value = &wire.Field_NumericVal{NumericVal: numericVal} - - case int64: - wf.FieldType = wire.FieldType_FieldTypeInt64 - wf.Value = &wire.Field_NumericVal{NumericVal: val} - - case uint64: - wf.FieldType = wire.FieldType_FieldTypeUint64 - wf.Value = &wire.Field_NumericVal{NumericVal: int64(val)} - - case time.Duration: - wf.FieldType = wire.FieldType_FieldTypeDuration - wf.Value = &wire.Field_NumericVal{NumericVal: int64(val)} - - case float64: - wf.FieldType = wire.FieldType_FieldTypeFloat64 - wf.Value = &wire.Field_NumericVal{NumericVal: int64(math.Float64bits(val))} - - default: - continue - } - - r = append(r, &wf) - } - return r -} - -func labelsToWire(set labels.Labels) []string { - var r []string - for i := range set { - r = append(r, set[i].Key, set[i].Value) - } - return r -} - -func (t *Trace) MarshalBinary() ([]byte, error) { - wt := wire.Trace{} - for _, sp := range t.spans { - wt.Spans = append(wt.Spans, &wire.Span{ - Context: &wire.SpanContext{ - TraceID: sp.Context.TraceID, - SpanID: sp.Context.SpanID, - }, - ParentSpanID: sp.ParentSpanID, - Name: sp.Name, - Start: timestamppb.New(sp.Start), - Labels: labelsToWire(sp.Labels), - Fields: fieldsToWire(sp.Fields), - }) - } - - return proto.Marshal(&wt) -} - -func wireToFields(wfs []*wire.Field) fields.Fields { - var fs []fields.Field - for _, wf := range wfs { - switch wf.FieldType { - case wire.FieldType_FieldTypeString: - fs = append(fs, fields.String(wf.Key, wf.GetStringVal())) - - case wire.FieldType_FieldTypeBool: - var boolVal bool - if wf.GetNumericVal() != 0 { - boolVal = true - } - fs = append(fs, fields.Bool(wf.Key, boolVal)) - - case wire.FieldType_FieldTypeInt64: - fs = append(fs, fields.Int64(wf.Key, wf.GetNumericVal())) - - case wire.FieldType_FieldTypeUint64: - fs = append(fs, fields.Uint64(wf.Key, uint64(wf.GetNumericVal()))) - - case wire.FieldType_FieldTypeDuration: - fs = append(fs, fields.Duration(wf.Key, time.Duration(wf.GetNumericVal()))) - - case wire.FieldType_FieldTypeFloat64: - fs = append(fs, fields.Float64(wf.Key, math.Float64frombits(uint64(wf.GetNumericVal())))) - } - } - - return fields.New(fs...) -} - -func (t *Trace) UnmarshalBinary(data []byte) error { - var wt wire.Trace - if err := proto.Unmarshal(data, &wt); err != nil { - return err - } - - t.spans = make(map[uint64]RawSpan) - - for _, sp := range wt.Spans { - t.spans[sp.Context.SpanID] = RawSpan{ - Context: SpanContext{ - TraceID: sp.Context.TraceID, - SpanID: sp.Context.SpanID, - }, - ParentSpanID: sp.ParentSpanID, - Name: sp.Name, - Start: sp.Start.AsTime(), - Labels: labels.New(sp.Labels...), - Fields: wireToFields(sp.Fields), - } - } - - return nil -} diff --git a/pkg/tracing/tree.go b/pkg/tracing/tree.go deleted file mode 100644 index 0321be64124..00000000000 --- a/pkg/tracing/tree.go +++ /dev/null @@ -1,74 +0,0 @@ -package tracing - -import ( - "github.com/xlab/treeprint" -) - -// A Visitor's Visit method is invoked for each node encountered by Walk. -// If the result of Visit is not nil, Walk visits each of the children. -type Visitor interface { - Visit(*TreeNode) Visitor -} - -// A TreeNode represents a single node in the graph. -type TreeNode struct { - Raw RawSpan - Children []*TreeNode -} - -// String returns the tree as a string. -func (t *TreeNode) String() string { - if t == nil { - return "" - } - tv := newTreeVisitor() - Walk(tv, t) - return tv.root.String() -} - -// Walk traverses the graph in a depth-first order, calling v.Visit -// for each node until completion or v.Visit returns nil. -func Walk(v Visitor, node *TreeNode) { - if v = v.Visit(node); v == nil { - return - } - - for _, c := range node.Children { - Walk(v, c) - } -} - -type treeVisitor struct { - root treeprint.Tree - trees []treeprint.Tree -} - -func newTreeVisitor() *treeVisitor { - t := treeprint.New() - return &treeVisitor{root: t, trees: []treeprint.Tree{t}} -} - -func (v *treeVisitor) Visit(n *TreeNode) Visitor { - t := v.trees[len(v.trees)-1].AddBranch(n.Raw.Name) - v.trees = append(v.trees, t) - - if labels := n.Raw.Labels; len(labels) > 0 { - l := t.AddBranch("labels") - for _, ll := range n.Raw.Labels { - l.AddNode(ll.Key + ": " + ll.Value) - } - } - - for _, k := range n.Raw.Fields { - t.AddNode(k.String()) - } - - for _, cn := range n.Children { - Walk(v, cn) - } - - v.trees[len(v.trees)-1] = nil - v.trees = v.trees[:len(v.trees)-1] - - return nil -} diff --git a/pkg/tracing/util.go b/pkg/tracing/util.go deleted file mode 100644 index f98cc776a1f..00000000000 --- a/pkg/tracing/util.go +++ /dev/null @@ -1,26 +0,0 @@ -package tracing - -import ( - "math/rand" - "sync" - "time" -) - -var ( - seededIDGen = rand.New(rand.NewSource(time.Now().UnixNano())) - seededIDLock sync.Mutex -) - -func randomID() (n uint64) { - seededIDLock.Lock() - n = uint64(seededIDGen.Int63()) - seededIDLock.Unlock() - return -} - -func randomID2() (n uint64, m uint64) { - seededIDLock.Lock() - n, m = uint64(seededIDGen.Int63()), uint64(seededIDGen.Int63()) - seededIDLock.Unlock() - return -} diff --git a/pkg/tracing/wire/binary.go b/pkg/tracing/wire/binary.go deleted file mode 100644 index ffec1d5b825..00000000000 --- a/pkg/tracing/wire/binary.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package wire is used to serialize a trace. -package wire - -//go:generate protoc --go_out=. binary.proto diff --git a/pkg/tracing/wire/binary.pb.go b/pkg/tracing/wire/binary.pb.go deleted file mode 100644 index 666d4c8aab6..00000000000 --- a/pkg/tracing/wire/binary.pb.go +++ /dev/null @@ -1,530 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.27.1 -// protoc v3.17.3 -// source: binary.proto - -package wire - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type FieldType int32 - -const ( - FieldType_FieldTypeString FieldType = 0 - FieldType_FieldTypeBool FieldType = 1 - FieldType_FieldTypeInt64 FieldType = 2 - FieldType_FieldTypeUint64 FieldType = 3 - FieldType_FieldTypeDuration FieldType = 4 - FieldType_FieldTypeFloat64 FieldType = 6 -) - -// Enum value maps for FieldType. -var ( - FieldType_name = map[int32]string{ - 0: "FieldTypeString", - 1: "FieldTypeBool", - 2: "FieldTypeInt64", - 3: "FieldTypeUint64", - 4: "FieldTypeDuration", - 6: "FieldTypeFloat64", - } - FieldType_value = map[string]int32{ - "FieldTypeString": 0, - "FieldTypeBool": 1, - "FieldTypeInt64": 2, - "FieldTypeUint64": 3, - "FieldTypeDuration": 4, - "FieldTypeFloat64": 6, - } -) - -func (x FieldType) Enum() *FieldType { - p := new(FieldType) - *p = x - return p -} - -func (x FieldType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FieldType) Descriptor() protoreflect.EnumDescriptor { - return file_binary_proto_enumTypes[0].Descriptor() -} - -func (FieldType) Type() protoreflect.EnumType { - return &file_binary_proto_enumTypes[0] -} - -func (x FieldType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use FieldType.Descriptor instead. -func (FieldType) EnumDescriptor() ([]byte, []int) { - return file_binary_proto_rawDescGZIP(), []int{0} -} - -type SpanContext struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TraceID uint64 `protobuf:"varint,1,opt,name=TraceID,proto3" json:"TraceID,omitempty"` - SpanID uint64 `protobuf:"varint,2,opt,name=SpanID,proto3" json:"SpanID,omitempty"` -} - -func (x *SpanContext) Reset() { - *x = SpanContext{} - if protoimpl.UnsafeEnabled { - mi := &file_binary_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SpanContext) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SpanContext) ProtoMessage() {} - -func (x *SpanContext) ProtoReflect() protoreflect.Message { - mi := &file_binary_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SpanContext.ProtoReflect.Descriptor instead. -func (*SpanContext) Descriptor() ([]byte, []int) { - return file_binary_proto_rawDescGZIP(), []int{0} -} - -func (x *SpanContext) GetTraceID() uint64 { - if x != nil { - return x.TraceID - } - return 0 -} - -func (x *SpanContext) GetSpanID() uint64 { - if x != nil { - return x.SpanID - } - return 0 -} - -type Span struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Context *SpanContext `protobuf:"bytes,1,opt,name=context,proto3" json:"context,omitempty"` // [(gogoproto.nullable) = false]; - ParentSpanID uint64 `protobuf:"varint,2,opt,name=ParentSpanID,proto3" json:"ParentSpanID,omitempty"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - Start *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=Start,proto3" json:"Start,omitempty"` // [(gogoproto.customname) = "Start", (gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - Labels []string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty"` - Fields []*Field `protobuf:"bytes,6,rep,name=fields,proto3" json:"fields,omitempty"` // [(gogoproto.nullable) = false]; -} - -func (x *Span) Reset() { - *x = Span{} - if protoimpl.UnsafeEnabled { - mi := &file_binary_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Span) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Span) ProtoMessage() {} - -func (x *Span) ProtoReflect() protoreflect.Message { - mi := &file_binary_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Span.ProtoReflect.Descriptor instead. -func (*Span) Descriptor() ([]byte, []int) { - return file_binary_proto_rawDescGZIP(), []int{1} -} - -func (x *Span) GetContext() *SpanContext { - if x != nil { - return x.Context - } - return nil -} - -func (x *Span) GetParentSpanID() uint64 { - if x != nil { - return x.ParentSpanID - } - return 0 -} - -func (x *Span) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Span) GetStart() *timestamppb.Timestamp { - if x != nil { - return x.Start - } - return nil -} - -func (x *Span) GetLabels() []string { - if x != nil { - return x.Labels - } - return nil -} - -func (x *Span) GetFields() []*Field { - if x != nil { - return x.Fields - } - return nil -} - -type Trace struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Spans []*Span `protobuf:"bytes,1,rep,name=spans,proto3" json:"spans,omitempty"` -} - -func (x *Trace) Reset() { - *x = Trace{} - if protoimpl.UnsafeEnabled { - mi := &file_binary_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Trace) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Trace) ProtoMessage() {} - -func (x *Trace) ProtoReflect() protoreflect.Message { - mi := &file_binary_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Trace.ProtoReflect.Descriptor instead. -func (*Trace) Descriptor() ([]byte, []int) { - return file_binary_proto_rawDescGZIP(), []int{2} -} - -func (x *Trace) GetSpans() []*Span { - if x != nil { - return x.Spans - } - return nil -} - -type Field struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - FieldType FieldType `protobuf:"varint,2,opt,name=FieldType,proto3,enum=wire.FieldType" json:"FieldType,omitempty"` - // Types that are assignable to Value: - // - // *Field_NumericVal - // *Field_StringVal - Value isField_Value `protobuf_oneof:"value"` -} - -func (x *Field) Reset() { - *x = Field{} - if protoimpl.UnsafeEnabled { - mi := &file_binary_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Field) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Field) ProtoMessage() {} - -func (x *Field) ProtoReflect() protoreflect.Message { - mi := &file_binary_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Field.ProtoReflect.Descriptor instead. -func (*Field) Descriptor() ([]byte, []int) { - return file_binary_proto_rawDescGZIP(), []int{3} -} - -func (x *Field) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *Field) GetFieldType() FieldType { - if x != nil { - return x.FieldType - } - return FieldType_FieldTypeString -} - -func (m *Field) GetValue() isField_Value { - if m != nil { - return m.Value - } - return nil -} - -func (x *Field) GetNumericVal() int64 { - if x, ok := x.GetValue().(*Field_NumericVal); ok { - return x.NumericVal - } - return 0 -} - -func (x *Field) GetStringVal() string { - if x, ok := x.GetValue().(*Field_StringVal); ok { - return x.StringVal - } - return "" -} - -type isField_Value interface { - isField_Value() -} - -type Field_NumericVal struct { - NumericVal int64 `protobuf:"fixed64,3,opt,name=NumericVal,proto3,oneof"` -} - -type Field_StringVal struct { - StringVal string `protobuf:"bytes,4,opt,name=StringVal,proto3,oneof"` -} - -func (*Field_NumericVal) isField_Value() {} - -func (*Field_StringVal) isField_Value() {} - -var File_binary_proto protoreflect.FileDescriptor - -var file_binary_proto_rawDesc = []byte{ - 0x0a, 0x0c, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, - 0x77, 0x69, 0x72, 0x65, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x3f, 0x0a, 0x0b, 0x53, 0x70, 0x61, 0x6e, 0x43, 0x6f, 0x6e, - 0x74, 0x65, 0x78, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x54, 0x72, 0x61, 0x63, 0x65, 0x49, 0x44, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x54, 0x72, 0x61, 0x63, 0x65, 0x49, 0x44, 0x12, 0x16, - 0x0a, 0x06, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, - 0x53, 0x70, 0x61, 0x6e, 0x49, 0x44, 0x22, 0xda, 0x01, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, - 0x2b, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x11, 0x2e, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x43, 0x6f, 0x6e, 0x74, - 0x65, 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x22, 0x0a, 0x0c, - 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x0c, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x44, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x23, - 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, - 0x2e, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x73, 0x22, 0x29, 0x0a, 0x05, 0x54, 0x72, 0x61, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x05, - 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x77, 0x69, - 0x72, 0x65, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x22, 0x93, - 0x01, 0x0a, 0x05, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x09, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, - 0x77, 0x69, 0x72, 0x65, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0a, 0x4e, 0x75, 0x6d, - 0x65, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x10, 0x48, 0x00, 0x52, - 0x0a, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x12, 0x1e, 0x0a, 0x09, 0x53, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, - 0x52, 0x09, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x42, 0x07, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x2a, 0x89, 0x01, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x53, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x54, 0x79, 0x70, 0x65, 0x42, 0x6f, 0x6f, 0x6c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x10, 0x02, 0x12, 0x13, - 0x0a, 0x0f, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x55, 0x69, 0x6e, 0x74, 0x36, - 0x34, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, - 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x10, 0x04, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x36, 0x34, 0x10, 0x06, - 0x42, 0x08, 0x5a, 0x06, 0x2e, 0x3b, 0x77, 0x69, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_binary_proto_rawDescOnce sync.Once - file_binary_proto_rawDescData = file_binary_proto_rawDesc -) - -func file_binary_proto_rawDescGZIP() []byte { - file_binary_proto_rawDescOnce.Do(func() { - file_binary_proto_rawDescData = protoimpl.X.CompressGZIP(file_binary_proto_rawDescData) - }) - return file_binary_proto_rawDescData -} - -var file_binary_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_binary_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_binary_proto_goTypes = []interface{}{ - (FieldType)(0), // 0: wire.FieldType - (*SpanContext)(nil), // 1: wire.SpanContext - (*Span)(nil), // 2: wire.Span - (*Trace)(nil), // 3: wire.Trace - (*Field)(nil), // 4: wire.Field - (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp -} -var file_binary_proto_depIdxs = []int32{ - 1, // 0: wire.Span.context:type_name -> wire.SpanContext - 5, // 1: wire.Span.Start:type_name -> google.protobuf.Timestamp - 4, // 2: wire.Span.fields:type_name -> wire.Field - 2, // 3: wire.Trace.spans:type_name -> wire.Span - 0, // 4: wire.Field.FieldType:type_name -> wire.FieldType - 5, // [5:5] is the sub-list for method output_type - 5, // [5:5] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name -} - -func init() { file_binary_proto_init() } -func file_binary_proto_init() { - if File_binary_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_binary_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SpanContext); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_binary_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Span); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_binary_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Trace); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_binary_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Field); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_binary_proto_msgTypes[3].OneofWrappers = []interface{}{ - (*Field_NumericVal)(nil), - (*Field_StringVal)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_binary_proto_rawDesc, - NumEnums: 1, - NumMessages: 4, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_binary_proto_goTypes, - DependencyIndexes: file_binary_proto_depIdxs, - EnumInfos: file_binary_proto_enumTypes, - MessageInfos: file_binary_proto_msgTypes, - }.Build() - File_binary_proto = out.File - file_binary_proto_rawDesc = nil - file_binary_proto_goTypes = nil - file_binary_proto_depIdxs = nil -} diff --git a/pkg/tracing/wire/binary.proto b/pkg/tracing/wire/binary.proto deleted file mode 100644 index 8b0f6b80e0c..00000000000 --- a/pkg/tracing/wire/binary.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; -package wire; -option go_package = ".;wire"; - -import "google/protobuf/timestamp.proto"; - -message SpanContext { - uint64 TraceID = 1; - uint64 SpanID = 2; -} - -message Span { - SpanContext context = 1; // [(gogoproto.nullable) = false]; - uint64 ParentSpanID = 2; - string name = 3; - google.protobuf.Timestamp Start = 4; // [(gogoproto.customname) = "Start", (gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - repeated string labels = 5; - repeated Field fields = 6; // [(gogoproto.nullable) = false]; -} - -message Trace { - repeated Span spans = 1; -} - -message Field { - - string key = 1; - FieldType FieldType = 2; - - oneof value { - sfixed64 NumericVal = 3; - string StringVal = 4; - } -} - -enum FieldType { - FieldTypeString = 0; - FieldTypeBool = 1; - FieldTypeInt64 = 2; - FieldTypeUint64 = 3; - FieldTypeDuration = 4; - FieldTypeFloat64 = 6; -} diff --git a/pkger/README.md b/pkger/README.md deleted file mode 100644 index c00cc1739be..00000000000 --- a/pkger/README.md +++ /dev/null @@ -1,639 +0,0 @@ -# Pkger: the What and How - -Responsibilities - -* Translating a declarative [package](#anatomy_of_a_package) file (either JSON | Yaml | Jsonnet) into resources in the platform -* Exporting existing resources in the form of a pkg (either JSON | Yaml) -* Managing the state of a pkg's side effects via a stack - -## Anatomy of a package - -A package is a collection of resource configurations. -These resource configurations can be seen in full in the [pkger/testdata](https://github.com/influxdata/influxdb/tree/master/pkger/testdata) directory. -The package itself does not have any state. -Packages may contain resources that are uniquely identifiable within the platform and some that are not. -If it is desired to use packages in a gitops scenario or in a manner that requires all resources are not duplicated, you will want to explore using a stack. - -Properties of package files: - -* A package's resources are unique by a combination `kind` and `metadata.name` fields -* A package guarantees that all resources within a package is applied consistently - * the pkger service manages all state management concerns -* A package does not have any state tracked without a stack -* A package may consist of multiple packages where all uniqueness and state guarantees apply - -### Stacks - -A stack is a stateful entity for which packages can be applied and managed. -A stack uses a combination of a resource's `kind` and `metadata.name` fields to uniquely identify a resource inside a package and map that to state in the platform. -Via this state, a stack provides the ability to apply a package idempotently. -Stack's manage the full lifecyle of a package's resources, including creating, updating, and deleting resources. - -Packages may contain resources that are not uniquely identifiable within the platform. -For instance, a dashboard resource, does not have any unique identifier within the platform beyond its UUID. -A stack uses the `metadata.name` field to uniquely identify a resource inside a package and map that to state in the platform. - -#### Stacks will manage the following use cases: - -We create a stack without any URLs to packages, henceforth identified as S1: - -```yaml -# S1 package - initial -kind: Label -metadata: - name: lucid_einstein -spec: - name: label_1 ---- -kind: Bucket -metadta: - name: pristine_noir -spec: - name: bucket_1 - association: - - kind: Label - name: lucid_einstein ---- -kind: Dashboard -metadata: - name: charmed_saratoba -spec: - name: dash_1 - association: - - kind: Label - name: lucid_einstein -``` - - -1. The S1 package (seen above) with all new resources is applied - - * Side effects: all resources are created and a record of all resources (id, res type, etc) is added to the S1 stack record - -
Stack Record - - ```json - { - "stack_id": S1_UUID, - "createdAt": CreatedAtTimestamp, - "updatedAt": CreatedAtTimestamp, - "config": {}, - "resources": [ - { - "kind": "Label", - "id": LABEL_UUID, - "pkgName": "lucid_einstein" - }, - { - "kind": "Bucket", - "id": BUCKET_UUID, - "pkgName": "pristine_noir", - "associations": [ - { - "kind": "Label", - "pkgName": "lucid_einstein" - } - ] - }, - { - "kind": "Dashboard", - "id": DASHBOARD_UUID, - "pkgName": "charmed_saratoba", - "associations": [ - { - "kind": "Label", - "pkgName": "lucid_einstein" - } - ] - } - ] - } - ``` - -
- - - -2. Same S1 package (seen above) is reapplied with no changes from step 1 - - * Side effects: nothing, no changes - -
Stack Record - - ```json - { - "stack_id": S1_UUID, - "createdAt": CreatedAtTimestamp, - "updatedAt": CreatedAtTimestamp, - "config": {}, - "resources": [ - { - "kind": "Label", - "id": LABEL_UUID, - "pkgName": "lucid_einstein" - }, - { - "kind": "Bucket", - "id": BUCKET_UUID, - "pkgName": "pristine_noir", - "associations": [ - { - "kind": "Label", - "pkgName": "lucid_einstein" - } - ] - }, - { - "kind": "Dashboard", - "id": DASHBOARD_UUID, - "pkgName": "charmed_saratoba", - "associations": [ - { - "kind": "Label", - "pkgName": "lucid_einstein" - } - ] - } - ] - } - ``` - -
- -
-
- - - - - -```yaml -# S1 package - updated label name -kind: Label -metadata: - name: lucid_einstein -spec: - name: cool label name #<<<<<< THIS NAME CHANGES ---- -kind: Bucket -metadta: - name: pristine_noir -# snip - no changes ---- -kind: Dashboard -metadata: - name: charmed_saratoba -# snip - no changes -``` - -3. The S1 package is applied with an update to the label resource - - * Side effects: platform label (LABEL_UUID) is renamed and `updatedAt` field in **S1** record is updated - -
Stack Record - - ```json - { - "stack_id": S1_UUID, - "createdAt": CreatedAtTimestamp, - "updatedAt": LABEL_UPDATE_TIMESTAMP, - "config": {}, - "resources": [ - ... snip, all resoruces are same - ] - } - ``` - -
- - - -
-
- - - -```yaml -# S1 package - new reosource added -kind: Label -metadata: - name: lucid_einstein -# snip - no change ---- -kind: Bucket -metadta: - name: pristine_noir -# snip - no changes ---- -kind: Dashboard -metadata: - name: charmed_saratoba -# snip - no changes ---- -kind: Task #<<<<<< THIS RESOURCE IS ADDED -metadata: - name: alcord_mumphries -spec: - name: task_1 - association: - - kind: Label - name: lucid_einstein -``` - - - -4. The S1 package is applied with a new resource added - - * Side effects: new task is created and **S1** record is updated - -
Stack Record - - ```json - { - "stack_id": S1_UUID, - "createdAt": CreatedAtTimestamp, - "updatedAt": TASK_ADD_TIMESTAMP, - "config": {}, - "resources": [ - ... snip, all resoruces from before, - { - "kind": "Task", - "id": TASK_UUID, - "pkgName": "alcord_mumphries", - "associations": [ - { - "kind": "Label", - "pkgName": "lucid_einstein" - } - ] - } - ] - } - ``` - -
- - -
-
- - -```yaml -# S1 package - task resource is removed -kind: Label -metadata: - name: lucid_einstein -# snip - no change ---- -kind: Bucket -metadta: - name: pristine_noir -# snip - no changes ---- -kind: Dashboard -metadata: - name: charmed_saratoba -# snip - no changes -``` - - - -5. The S1 package is applied with changes that removes an existing resource - * Side effects: task is deleted from platform and **S1** record is updated - -
Stack Record - - ```json - { - "stack_id": S1_UUID, - "createdAt": CreatedAtTimestamp, - "updatedAt": TASK_DELETE_TIMESTAMP, - "config": {}, - "resources": [ - { - "kind": "Label", - "id": LABEL_UUID, - "pkgName": "lucid_einstein" - }, - { - "kind": "Bucket", - "id": BUCKET_UUID, - "pkgName": "pristine_noir", - "associations": [ - { - "kind": "Label", - "pkgName": "lucid_einstein" - } - ] - }, - { - "kind": "Dashboard", - "id": DASHBOARD_UUID, - "pkgName": "charmed_saratoba", - "associations": [ - { - "kind": "Label", - "pkgName": "lucid_einstein" - } - ] - } - ] - } - ``` - -
- -
-
- - -```yaml -# S1 package - label and associations to it are removed -kind: Bucket -metadta: - name: pristine_noir -spec: - name: bucket_1 ---- -kind: Dashboard -metadata: - name: charmed_saratoba -spec: - name: dash_1 -``` - -6. The S1 package is apllied with label and associations to that label removed - * Side effects: label and all label assocations for that label are removed from the platform and **S1** record is updated - -
Stack Record - - ```json - { - "stack_id": S1_UUID, - "createdAt": CreatedAtTimestamp, - "updatedAt": Label_DELETE_TIMESTAMP, - "config": {}, - "resources": [ - { - "kind": "Bucket", - "id": BUCKET_UUID, - "pkgName": "pristine_noir", - "associations": [] - }, - { - "kind": "Dashboard", - "id": DASHBOARD_UUID, - "pkgName": "charmed_saratoba", - "associations": [] - } - ] - } - ``` - -
- - -## From package to platform resources - -There are 3 main building blocks that take a package and make the declarative package a reality. The following is a quick overview of the system that manages packages. - -1. Parser - parses package - * informs the user of all validation errors in their package - * enforces `metadata.name` field uniqueness constraint -2. Service - all the business logic for managing packages - * handles all state management concerns, including making the entire package applied - * in case of failure to apply a package, the service guarantees the resoruces are returned to their existing state (if any) before the package was applied -3. HTTP API / CLI - means for user to submit packges to be applied - * provides the ability to export existing resources as a package, dry run a package, and apply a package - * all CLI calls go through the HTTP API the same way a user generated request would - - -### Parser internals - -The parser converts a package in any of the supported encoding types (JSON|Yaml|Jsonnet), and turns it into a [package model](https://github.com/influxdata/influxdb/blob/7d8bd1e055451d06dd55e6334c43d46261749ed7/pkger/parser.go#L229-L254). The parser handles the following: - -* enforces naming uniqueness by `metadata.name` -* split loop refactoring -* returns ALL errors in validation with ability to turn off error checking via validation opts -* trivial to extend to support different encoding types (JSON|Yaml|Jsonnet) - -You can explore more the goary details [here](https://github.com/influxdata/influxdb/blob/7d8bd1e055451d06dd55e6334c43d46261749ed7/pkger/parser.go). - - -### Service internals - -The service manages all intracommunication to other services and encapsulates the rules for the package domain. The pkger service depends on every service that we currently sans write and query services. Details of the service dependencies can be found [here](https://github.com/influxdata/influxdb/blob/c926accb42d87c407bcac6bbda753f9a03f9ec95/pkger/service.go#L197-L218): - -```go -type Service struct { - log *zap.Logger - - // internal dependencies - applyReqLimit int - idGen influxdb.IDGenerator - store Store - timeGen influxdb.TimeGenerator - - // external service dependencies - bucketSVC influxdb.BucketService - checkSVC influxdb.CheckService - dashSVC influxdb.DashboardService - labelSVC influxdb.LabelService - endpointSVC influxdb.NotificationEndpointService - orgSVC influxdb.OrganizationService - ruleSVC influxdb.NotificationRuleStore - secretSVC influxdb.SecretService - taskSVC influxdb.TaskService - teleSVC influxdb.TelegrafConfigStore - varSVC influxdb.VariableService -} -``` - -The behavior of the servcie includes the following: - -1. Dry run a package -2. Apply a package -3. Export a package -4. Initialize a stack - -The following sections explore this behavior further. - -#### Dry run a package - -When a package is submitted for a dry run the service takes the contents of that package and identifies the impact of its application before it is run. This is similar to `terraform plan`. - -> This command is a convenient way to check whether the package matches your expectations without making any changes to real resources. For example, a dry run might be run before committing a change to version control, to create confidence that it will behave as expected. - -A dry run requires that the package to be dry run has been parsed and graphed. If it has not, the dry run will do so before it attempts the dry run functionality. When a dry run is executed, the caller will have returned a summary of the package and a detailed diff of the impact of the package were it to be applied. - -The package summary is as follows: - -```go -type Summary struct { - Buckets []SummaryBucket `json:"buckets"` - Checks []SummaryCheck `json:"checks"` - Dashboards []SummaryDashboard `json:"dashboards"` - NotificationEndpoints []SummaryNotificationEndpoint `json:"notificationEndpoints"` - NotificationRules []SummaryNotificationRule `json:"notificationRules"` - Labels []SummaryLabel `json:"labels"` - LabelMappings []SummaryLabelMapping `json:"labelMappings"` - MissingEnvs []string `json:"missingEnvRefs"` - MissingSecrets []string `json:"missingSecrets"` - Tasks []SummaryTask `json:"summaryTask"` - TelegrafConfigs []SummaryTelegraf `json:"telegrafConfigs"` - Variables []SummaryVariable `json:"variables"` -} - -type SummaryBucket struct { - ID SafeID `json:"id,omitempty"` - OrgID SafeID `json:"orgID,omitempty"` - Name string `json:"name"` - Description string `json:"description"` - // TODO: return retention rules? - RetentionPeriod time.Duration `json:"retentionPeriod"` - LabelAssociations []SummaryLabel `json:"labelAssociations"` -} - -// snip other resources -``` - -The package diff is as follows: - -```go -type Diff struct { - Buckets []DiffBucket `json:"buckets"` - Checks []DiffCheck `json:"checks"` - Dashboards []DiffDashboard `json:"dashboards"` - Labels []DiffLabel `json:"labels"` - LabelMappings []DiffLabelMapping `json:"labelMappings"` - NotificationEndpoints []DiffNotificationEndpoint `json:"notificationEndpoints"` - NotificationRules []DiffNotificationRule `json:"notificationRules"` - Tasks []DiffTask `json:"tasks"` - Telegrafs []DiffTelegraf `json:"telegrafConfigs"` - Variables []DiffVariable `json:"variables"` -} - -// DiffBucketValues are the varying values for a bucket. -type DiffBucketValues struct { - Description string `json:"description"` - RetentionRules retentionRules `json:"retentionRules"` -} - -// DiffBucket is a diff of an individual bucket. -type DiffBucket struct { - ID SafeID `json:"id"` - Name string `json:"name"` - New DiffBucketValues `json:"new"` - Old *DiffBucketValues `json:"old,omitempty"` // using omitempty here to signal there was no prev state with a nil -} - -// snip other resources -``` - -If errors are encountered in the parsing, the dry run will return errors in addition to the package summary and diff. - - -#### Apply a package - -When a package is submitted to be applied, the service takes the contents of that package and identifies the impact of its application before it is run (Dry Run). It then brings the platform to the desired state of the package. - -> Apply is used to apply the changes required to reach the desired state of the package - -If a package had not been verified by a dry run when applying, it will be done to identify existing state within the platform. This existing state has to be maintained to account for an unexpected event that stops the package from being applied. The side effects created from the application will all be rolled back at this point. If a resource was newly created during the application, it will be removed. For a resource that existed in the platform, it will be returned to its state from before the application took place. The guarantee of state consistency is a best attempt. It is not bullet proof. However, a user can reapply the package and arrive at their desired state therafter. Upon successful application of a package, a summary will be provided to the user. - -The service takes advantage of [split loop refactoring](https://refactoring.com/catalog/splitLoop.html) to break the package up by resource. The platform requires certain dependencies be met before a resource is created. For instance, when a label mapping is desired for a new label and bucket, it forces us to guarantee the label exists before creating the label mapping. To accomplish this, labels are always applied first. You can see it in action [here](https://github.com/influxdata/influxdb/blob/c926accb42d87c407bcac6bbda753f9a03f9ec95/pkger/service.go#L1110-L1148): - -```go -func (s *Service) Apply(ctx context.Context, orgID, userID influxdb.ID, pkg *Pkg, opts ...ApplyOptFn) (sum Summary, e error) { - // snipping preceding code - - coordinator := &rollbackCoordinator{sem: make(chan struct{}, s.applyReqLimit)} - defer coordinator.rollback(s.log, &e, orgID) - - // each grouping here runs for its entirety, then returns an error that - // is indicative of running all appliers provided. For instance, the labels - // may have 1 variable fail and one of the buckets fails. The errors aggregate so - // the caller will be informed of both the failed label variable the failed bucket. - // the groupings here allow for steps to occur before exiting. The first step is - // adding the dependencies, resources that are associated by other resources. Then the - // primary resources. Here we get all the errors associated with them. - // If those are all good, then we run the secondary(dependent) resources which - // rely on the primary resources having been created. - appliers := [][]applier{ - { - // adds secrets that are referenced it the pkg, this allows user to - // provide data that does not rest in the pkg. - s.applySecrets(opt.MissingSecrets), - }, - { - // deps for primary resources - s.applyLabels(pkg.labels()), - }, - { - // primary resources, can have relationships to labels - s.applyVariables(pkg.variables()), - s.applyBuckets(pkg.buckets()), - s.applyChecks(pkg.checks()), - s.applyDashboards(pkg.dashboards()), - s.applyNotificationEndpoints(pkg.notificationEndpoints()), - s.applyTasks(pkg.tasks()), - s.applyTelegrafs(pkg.telegrafs()), - }, - } - - for _, group := range appliers { - if err := coordinator.runTilEnd(ctx, orgID, userID, group...); err != nil { - return Summary{}, internalErr(err) - } - } - - // snipping succeeding code - - return pkg.Summary(), nil -} -``` - -Looking at the above you may have noticed we have groups of appliers. The second group contains the label resources. Each group's individual resources are applied concurrently. The `coordinator.runTilEnd(ctx, orgID, userID, group...)` call takes the group, and fans out all the state changes and processes them concurrently for writes. The label resources are guaranteed to have succeeded before processing the primary resources which can have relationships with the label. - -When an issue is encountered that cannot be recovered from, and error is returned, and upon seeing that error we roll back all changes. The `defer coordinator.rollback(s.log, &e, orgID)` line rollsback all resources to their preexisting state. For a more in depth look at that check out [here](https://github.com/influxdata/influxdb/blob/c926accb42d87c407bcac6bbda753f9a03f9ec95/pkger/service.go#L2118-L2167). - -#### Exporting existing resources as a package - -If a user has put a lot of effort in creating dashboards, notifications, and telegraf configs, we have the ability for them to export that work in the shape of a package :-). This enables them to both share that work within the community or their org, and also source control the changes to dashboards. - -Resources can be exported all at once, via the export by organization, by specific resource IDs, a combination of the above, and advanced filtering (i.e. by label name or resource type). You can read up more on the export options [here](https://github.com/influxdata/influxdb/blob/c926accb42d87c407bcac6bbda753f9a03f9ec95/pkger/service.go#L280-L330). - -Each resource that is exported is assigned a uniq `metadata.name` entry. The names are generated and are not strictly required to remain in that shape. If a user decides to use `metadata.name` as the name of the resource, they are free to do so. The only requirement is that within a package every resource type has a unique `metadata.name` per its type. For example each resource kind `metadata.name` field should have be unique amongst all resources of the same kind within a package. - -> Each label should have a unique `metadata.name` field amongst all labels in the package. - -#### Initializing a stack - -When creating a stack we create an stub stack record that contains all the metadata about that stack. Optionally, a user may set URLs in the stack config. These URLs may be used to apply packages from a remote location (i.e. S3 bucket). A stack looks like the following: - -```go -type ( - // Stack is an identifier for stateful application of a package(s). This stack - // will map created resources from the pkg(s) to existing resources on the - // platform. This stack is updated only after side effects of applying a pkg. - // If the pkg is applied, and no changes are had, then the stack is not updated. - Stack struct { - ID influxdb.ID - OrgID influxdb.ID - Name string - Desc string - URLs []url.URL - Resources []StackResource - - influxdb.CRUDLog - } - - // StackResource is a record for an individual resource side effect genereated from - // applying a pkg. - StackResource struct { - APIVersion string - ID influxdb.ID - Kind Kind - Name string - } -) -``` diff --git a/pkger/clone_resource.go b/pkger/clone_resource.go deleted file mode 100644 index e84a9b0d4f8..00000000000 --- a/pkger/clone_resource.go +++ /dev/null @@ -1,1513 +0,0 @@ -package pkger - -import ( - "context" - "errors" - "fmt" - "regexp" - "sort" - "strings" - - "github.com/influxdata/influxdb/v2" - ierrors "github.com/influxdata/influxdb/v2/kit/errors" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/notification" - icheck "github.com/influxdata/influxdb/v2/notification/check" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/influxdata/influxdb/v2/notification/rule" - "github.com/influxdata/influxdb/v2/pkger/internal/wordplay" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -var idGenerator = snowflake.NewDefaultIDGenerator() - -// NameGenerator generates a random name. Includes an optional fuzz option to -// further randomize the name. -type NameGenerator func() string - -// ResourceToClone is a resource that will be cloned. -type ResourceToClone struct { - Kind Kind `json:"kind"` - ID platform.ID `json:"id,omitempty"` - Name string `json:"name"` - // note(jsteenb2): For time being we'll allow this internally, but not externally. A lot of - // issues to account for when exposing this to the outside world. Not something I'm keen - // to accommodate at this time. - MetaName string `json:"-"` -} - -// OK validates a resource clone is viable. -func (r ResourceToClone) OK() error { - if err := r.Kind.OK(); err != nil { - return err - } - if r.ID == platform.ID(0) && len(r.Name) == 0 { - return errors.New("must provide an ID or name") - } - return nil -} - -var kindPriorities = map[Kind]int{ - KindLabel: 1, - KindBucket: 2, - KindCheck: 3, - KindCheckDeadman: 4, - KindCheckThreshold: 5, - KindNotificationEndpoint: 6, - KindNotificationEndpointHTTP: 7, - KindNotificationEndpointPagerDuty: 8, - KindNotificationEndpointSlack: 9, - KindNotificationRule: 10, - KindTask: 11, - KindVariable: 12, - KindDashboard: 13, - KindTelegraf: 14, -} - -type exportKey struct { - orgID platform.ID - id platform.ID - name string - kind Kind -} - -func newExportKey(orgID, id platform.ID, k Kind, name string) exportKey { - return exportKey{ - orgID: orgID, - id: id, - name: name, - kind: k, - } -} - -type resourceExporter struct { - nameGen NameGenerator - - bucketSVC influxdb.BucketService - checkSVC influxdb.CheckService - dashSVC influxdb.DashboardService - labelSVC influxdb.LabelService - endpointSVC influxdb.NotificationEndpointService - ruleSVC influxdb.NotificationRuleStore - taskSVC taskmodel.TaskService - teleSVC influxdb.TelegrafConfigStore - varSVC influxdb.VariableService - - mObjects map[exportKey]Object - mPkgNames map[string]bool - mStackResources map[exportKey]StackResource -} - -func newResourceExporter(svc *Service) *resourceExporter { - return &resourceExporter{ - nameGen: wordplay.GetRandomName, - bucketSVC: svc.bucketSVC, - checkSVC: svc.checkSVC, - dashSVC: svc.dashSVC, - labelSVC: svc.labelSVC, - endpointSVC: svc.endpointSVC, - ruleSVC: svc.ruleSVC, - taskSVC: svc.taskSVC, - teleSVC: svc.teleSVC, - varSVC: svc.varSVC, - mObjects: make(map[exportKey]Object), - mPkgNames: make(map[string]bool), - mStackResources: make(map[exportKey]StackResource), - } -} - -func (ex *resourceExporter) Export(ctx context.Context, resourcesToClone []ResourceToClone, labelNames ...string) error { - mLabelIDsToMetaName := make(map[platform.ID]string) - for _, r := range resourcesToClone { - if !r.Kind.is(KindLabel) || r.MetaName == "" { - continue - } - mLabelIDsToMetaName[r.ID] = r.MetaName - } - - cloneAssFn, err := ex.resourceCloneAssociationsGen(ctx, mLabelIDsToMetaName, labelNames...) - if err != nil { - return err - } - - resourcesToClone = uniqResourcesToClone(resourcesToClone) - // sorting this in priority order guarantees that the dependencies/associations - // for a resource are handled prior to the resource being processed. - // i.e. if a bucket depends on a label, then labels need to be run first - // to guarantee they are available before a bucket is exported. - sort.Slice(resourcesToClone, func(i, j int) bool { - iName, jName := resourcesToClone[i].Name, resourcesToClone[j].Name - iKind, jKind := resourcesToClone[i].Kind, resourcesToClone[j].Kind - - if iKind.is(jKind) { - return iName < jName - } - return kindPriorities[iKind] < kindPriorities[jKind] - }) - - for _, r := range resourcesToClone { - err := ex.resourceCloneToKind(ctx, r, cloneAssFn) - if err != nil { - return internalErr(fmt.Errorf("failed to clone resource: resource_id=%s resource_kind=%s err=%q", r.ID, r.Kind, err)) - } - } - - return nil -} - -func (ex *resourceExporter) Objects() []Object { - objects := make([]Object, 0, len(ex.mObjects)) - for _, obj := range ex.mObjects { - objects = append(objects, obj) - } - - return sortObjects(objects) -} - -func (ex *resourceExporter) StackResources() []StackResource { - resources := make([]StackResource, 0, len(ex.mStackResources)) - for _, res := range ex.mStackResources { - resources = append(resources, res) - } - return resources -} - -// we only need an id when we have resources that are not unique by name via the -// metastore. resoureces that are unique by name will be provided a default stamp -// making looksup unique since each resource will be unique by name. -const uniqByNameResID = platform.ID(0) - -type cloneAssociationsFn func(context.Context, ResourceToClone) (associations []ObjectAssociation, skipResource bool, err error) - -func (ex *resourceExporter) resourceCloneToKind(ctx context.Context, r ResourceToClone, cFn cloneAssociationsFn) (e error) { - defer func() { - if e != nil { - e = ierrors.Wrap(e, "cloning resource") - } - }() - - ass, skipResource, err := cFn(ctx, r) - if err != nil { - return err - } - if skipResource { - return nil - } - - mapResource := func(orgID, uniqResID platform.ID, k Kind, object Object) { - // overwrite the default metadata.name field with export generated one here - metaName := r.MetaName - if r.MetaName == "" { - metaName = ex.uniqName() - } - - stackResource := StackResource{ - APIVersion: APIVersion, - ID: r.ID, - MetaName: metaName, - Kind: r.Kind, - } - for _, a := range ass { - stackResource.Associations = append(stackResource.Associations, StackResourceAssociation(a)) - } - - object.SetMetadataName(metaName) - object.AddAssociations(ass...) - key := newExportKey(orgID, uniqResID, k, object.Spec.stringShort(fieldName)) - ex.mObjects[key] = object - ex.mStackResources[key] = stackResource - } - - switch { - case r.Kind.is(KindBucket): - filter := influxdb.BucketFilter{} - if r.ID != platform.ID(0) { - filter.ID = &r.ID - } - if len(r.Name) > 0 { - filter.Name = &r.Name - } - - bkts, n, err := ex.bucketSVC.FindBuckets(ctx, filter) - if err != nil { - return err - } - if n < 1 { - return errors.New("no buckets found") - } - - for _, bkt := range bkts { - mapResource(bkt.OrgID, bkt.ID, KindBucket, BucketToObject(r.Name, *bkt)) - } - case r.Kind.is(KindCheck), r.Kind.is(KindCheckDeadman), r.Kind.is(KindCheckThreshold): - filter := influxdb.CheckFilter{} - if r.ID != platform.ID(0) { - filter.ID = &r.ID - } - if len(r.Name) > 0 { - filter.Name = &r.Name - } - chs, n, err := ex.checkSVC.FindChecks(ctx, filter) - if err != nil { - return err - } - if n < 1 { - return errors.New("no checks found") - } - - for _, ch := range chs { - mapResource(ch.GetOrgID(), ch.GetID(), KindCheck, CheckToObject(r.Name, ch)) - } - case r.Kind.is(KindDashboard): - var ( - hasID bool - filter = influxdb.DashboardFilter{} - ) - if r.ID != platform.ID(0) { - hasID = true - filter.IDs = []*platform.ID{&r.ID} - } - - dashes, _, err := ex.dashSVC.FindDashboards(ctx, filter, influxdb.DefaultDashboardFindOptions) - if err != nil { - return err - } - - var mapped bool - for _, dash := range dashes { - if (!hasID && len(r.Name) > 0 && dash.Name != r.Name) || (hasID && dash.ID != r.ID) { - continue - } - - for _, cell := range dash.Cells { - v, err := ex.dashSVC.GetDashboardCellView(ctx, dash.ID, cell.ID) - if err != nil { - continue - } - cell.View = v - } - - mapResource(dash.OrganizationID, dash.ID, KindDashboard, DashboardToObject(r.Name, *dash)) - mapped = true - } - - if !mapped { - return errors.New("no dashboards found") - } - case r.Kind.is(KindLabel): - switch { - case r.ID != platform.ID(0): - l, err := ex.labelSVC.FindLabelByID(ctx, r.ID) - if err != nil { - return err - } - - mapResource(l.OrgID, uniqByNameResID, KindLabel, LabelToObject(r.Name, *l)) - case len(r.Name) > 0: - labels, err := ex.labelSVC.FindLabels(ctx, influxdb.LabelFilter{Name: r.Name}) - if err != nil { - return err - } - - for _, l := range labels { - mapResource(l.OrgID, uniqByNameResID, KindLabel, LabelToObject(r.Name, *l)) - } - } - case r.Kind.is(KindNotificationEndpoint), - r.Kind.is(KindNotificationEndpointHTTP), - r.Kind.is(KindNotificationEndpointPagerDuty), - r.Kind.is(KindNotificationEndpointSlack): - var endpoints []influxdb.NotificationEndpoint - - switch { - case r.ID != platform.ID(0): - notifEndpoint, err := ex.endpointSVC.FindNotificationEndpointByID(ctx, r.ID) - if err != nil { - return err - } - endpoints = append(endpoints, notifEndpoint) - case len(r.Name) != 0: - allEndpoints, _, err := ex.endpointSVC.FindNotificationEndpoints(ctx, influxdb.NotificationEndpointFilter{}) - if err != nil { - return err - } - - for _, notifEndpoint := range allEndpoints { - if notifEndpoint.GetName() != r.Name || notifEndpoint == nil { - continue - } - endpoints = append(endpoints, notifEndpoint) - } - } - - if len(endpoints) == 0 { - return errors.New("no notification endpoints found") - } - - for _, e := range endpoints { - mapResource(e.GetOrgID(), uniqByNameResID, KindNotificationEndpoint, NotificationEndpointToObject(r.Name, e)) - } - case r.Kind.is(KindNotificationRule): - var rules []influxdb.NotificationRule - - switch { - case r.ID != platform.ID(0): - r, err := ex.ruleSVC.FindNotificationRuleByID(ctx, r.ID) - if err != nil { - return err - } - rules = append(rules, r) - case len(r.Name) != 0: - allRules, _, err := ex.ruleSVC.FindNotificationRules(ctx, influxdb.NotificationRuleFilter{}) - if err != nil { - return err - } - - for _, rule := range allRules { - if rule.GetName() != r.Name { - continue - } - rules = append(rules, rule) - } - } - - if len(rules) == 0 { - return errors.New("no notification rules found") - } - - for _, rule := range rules { - ruleEndpoint, err := ex.endpointSVC.FindNotificationEndpointByID(ctx, rule.GetEndpointID()) - if err != nil { - return err - } - - endpointKey := newExportKey(ruleEndpoint.GetOrgID(), uniqByNameResID, KindNotificationEndpoint, ruleEndpoint.GetName()) - object, ok := ex.mObjects[endpointKey] - if !ok { - mapResource(ruleEndpoint.GetOrgID(), uniqByNameResID, KindNotificationEndpoint, NotificationEndpointToObject("", ruleEndpoint)) - object = ex.mObjects[endpointKey] - } - endpointObjectName := object.Name() - - mapResource(rule.GetOrgID(), rule.GetID(), KindNotificationRule, NotificationRuleToObject(r.Name, endpointObjectName, rule)) - } - case r.Kind.is(KindTask): - switch { - case r.ID != platform.ID(0): - t, err := ex.taskSVC.FindTaskByID(ctx, r.ID) - if err != nil { - return err - } - mapResource(t.OrganizationID, t.ID, KindTask, TaskToObject(r.Name, *t)) - case len(r.Name) > 0: - tasks, n, err := ex.taskSVC.FindTasks(ctx, taskmodel.TaskFilter{Name: &r.Name}) - if err != nil { - return err - } - if n < 1 { - return errors.New("no tasks found") - } - - for _, t := range tasks { - mapResource(t.OrganizationID, t.ID, KindTask, TaskToObject(r.Name, *t)) - } - } - case r.Kind.is(KindTelegraf): - switch { - case r.ID != platform.ID(0): - t, err := ex.teleSVC.FindTelegrafConfigByID(ctx, r.ID) - if err != nil { - return err - } - mapResource(t.OrgID, t.ID, KindTelegraf, TelegrafToObject(r.Name, *t)) - case len(r.Name) > 0: - telegrafs, _, err := ex.teleSVC.FindTelegrafConfigs(ctx, influxdb.TelegrafConfigFilter{}) - if err != nil { - return err - } - - var mapped bool - for _, t := range telegrafs { - if t.Name != r.Name { - continue - } - - mapResource(t.OrgID, t.ID, KindTelegraf, TelegrafToObject(r.Name, *t)) - mapped = true - } - if !mapped { - return errors.New("no telegraf configs found") - } - - } - case r.Kind.is(KindVariable): - switch { - case r.ID != platform.ID(0): - v, err := ex.varSVC.FindVariableByID(ctx, r.ID) - if err != nil { - return err - } - mapResource(v.OrganizationID, uniqByNameResID, KindVariable, VariableToObject(r.Name, *v)) - case len(r.Name) > 0: - variables, err := ex.varSVC.FindVariables(ctx, influxdb.VariableFilter{}) - if err != nil { - return err - } - - var mapped bool - for _, v := range variables { - if v.Name != r.Name { - continue - } - - mapResource(v.OrganizationID, uniqByNameResID, KindVariable, VariableToObject(r.Name, *v)) - mapped = true - } - if !mapped { - return errors.New("no variables found") - } - } - default: - return errors.New("unsupported kind provided: " + string(r.Kind)) - } - - return nil -} - -func (ex *resourceExporter) resourceCloneAssociationsGen(ctx context.Context, labelIDsToMetaName map[platform.ID]string, labelNames ...string) (cloneAssociationsFn, error) { - mLabelNames := make(map[string]bool) - for _, labelName := range labelNames { - mLabelNames[labelName] = true - } - - mLabelIDs, err := getLabelIDMap(ctx, ex.labelSVC, labelNames) - if err != nil { - return nil, err - } - - cloneFn := func(ctx context.Context, r ResourceToClone) ([]ObjectAssociation, bool, error) { - if r.Kind.is(KindUnknown) { - return nil, true, nil - } - if r.Kind.is(KindLabel) { - // check here verifies the label maps to an id of a valid label name - shouldSkip := len(mLabelIDs) > 0 && !mLabelIDs[r.ID] - return nil, shouldSkip, nil - } - - if len(r.Name) > 0 && r.ID == platform.ID(0) { - return nil, false, nil - } - - labels, err := ex.labelSVC.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ - ResourceID: r.ID, - ResourceType: r.Kind.ResourceType(), - }) - if err != nil { - return nil, false, ierrors.Wrap(err, "finding resource labels") - } - - if len(mLabelNames) > 0 { - shouldSkip := true - for _, l := range labels { - if _, ok := mLabelNames[l.Name]; ok { - shouldSkip = false - break - } - } - if shouldSkip { - return nil, true, nil - } - } - - var associations []ObjectAssociation - for _, l := range labels { - if len(mLabelNames) > 0 { - if _, ok := mLabelNames[l.Name]; !ok { - continue - } - } - - labelObject := LabelToObject("", *l) - metaName := labelIDsToMetaName[l.ID] - if metaName == "" { - metaName = ex.uniqName() - } - labelObject.Metadata[fieldName] = metaName - - k := newExportKey(l.OrgID, uniqByNameResID, KindLabel, l.Name) - existing, ok := ex.mObjects[k] - if ok { - associations = append(associations, ObjectAssociation{ - Kind: KindLabel, - MetaName: existing.Name(), - }) - continue - } - associations = append(associations, ObjectAssociation{ - Kind: KindLabel, - MetaName: labelObject.Name(), - }) - ex.mObjects[k] = labelObject - } - sort.Slice(associations, func(i, j int) bool { - return associations[i].MetaName < associations[j].MetaName - }) - return associations, false, nil - } - - return cloneFn, nil -} - -func (ex *resourceExporter) uniqName() string { - return uniqMetaName(ex.nameGen, idGenerator, ex.mPkgNames) -} - -func uniqMetaName(nameGen NameGenerator, idGen platform.IDGenerator, existingNames map[string]bool) string { - uuid := strings.ToLower(idGen.ID().String()) - name := uuid - for i := 1; i < 250; i++ { - name = fmt.Sprintf("%s-%s", nameGen(), uuid[10:]) - if !existingNames[name] { - break - } - } - return name -} - -func uniqResourcesToClone(resources []ResourceToClone) []ResourceToClone { - type key struct { - kind Kind - id platform.ID - } - m := make(map[key]ResourceToClone) - - for i := range resources { - r := resources[i] - rKey := key{kind: r.Kind, id: r.ID} - - kr, ok := m[rKey] - switch { - case ok && kr.Name == r.Name && kr.MetaName == r.MetaName: - case ok && kr.MetaName != "" && r.MetaName == "": - case ok && kr.MetaName == "" && kr.Name != "" && r.Name == "": - default: - m[rKey] = r - } - } - - out := make([]ResourceToClone, 0, len(resources)) - for _, r := range m { - out = append(out, r) - } - return out -} - -// BucketToObject converts a influxdb.Bucket into an Object. -func BucketToObject(name string, bkt influxdb.Bucket) Object { - if name == "" { - name = bkt.Name - } - - o := newObject(KindBucket, name) - assignNonZeroStrings(o.Spec, map[string]string{fieldDescription: bkt.Description}) - if bkt.RetentionPeriod != 0 { - o.Spec[fieldBucketRetentionRules] = retentionRules{newRetentionRule(bkt.RetentionPeriod)} - } - return o -} - -func CheckToObject(name string, ch influxdb.Check) Object { - if name == "" { - name = ch.GetName() - } - o := newObject(KindCheck, name) - assignNonZeroStrings(o.Spec, map[string]string{ - fieldDescription: ch.GetDescription(), - fieldStatus: taskmodel.TaskStatusActive, - }) - - assignBase := func(base icheck.Base) { - o.Spec[fieldQuery] = strings.TrimSpace(base.Query.Text) - o.Spec[fieldCheckStatusMessageTemplate] = base.StatusMessageTemplate - assignNonZeroFluxDurs(o.Spec, map[string]*notification.Duration{ - fieldEvery: base.Every, - fieldOffset: base.Offset, - }) - - var tags []Resource - for _, t := range base.Tags { - if t.Valid() != nil { - continue - } - tags = append(tags, Resource{ - fieldKey: t.Key, - fieldValue: t.Value, - }) - } - if len(tags) > 0 { - o.Spec[fieldCheckTags] = tags - } - } - - switch cT := ch.(type) { - case *icheck.Deadman: - o.Kind = KindCheckDeadman - assignBase(cT.Base) - assignNonZeroFluxDurs(o.Spec, map[string]*notification.Duration{ - fieldCheckTimeSince: cT.TimeSince, - fieldCheckStaleTime: cT.StaleTime, - }) - o.Spec[fieldLevel] = cT.Level.String() - assignNonZeroBools(o.Spec, map[string]bool{fieldCheckReportZero: cT.ReportZero}) - case *icheck.Threshold: - o.Kind = KindCheckThreshold - assignBase(cT.Base) - var thresholds []Resource - for _, th := range cT.Thresholds { - thresholds = append(thresholds, convertThreshold(th)) - } - o.Spec[fieldCheckThresholds] = thresholds - } - return o -} - -func convertThreshold(th icheck.ThresholdConfig) Resource { - r := Resource{fieldLevel: th.GetLevel().String()} - - assignLesser := func(threshType thresholdType, allValues bool, val float64) { - r[fieldType] = string(threshType) - assignNonZeroBools(r, map[string]bool{fieldCheckAllValues: allValues}) - r[fieldValue] = val - } - - switch realType := th.(type) { - case icheck.Lesser: - assignLesser(thresholdTypeLesser, realType.AllValues, realType.Value) - case *icheck.Lesser: - assignLesser(thresholdTypeLesser, realType.AllValues, realType.Value) - case icheck.Greater: - assignLesser(thresholdTypeGreater, realType.AllValues, realType.Value) - case *icheck.Greater: - assignLesser(thresholdTypeGreater, realType.AllValues, realType.Value) - case icheck.Range: - assignRangeThreshold(r, realType) - case *icheck.Range: - assignRangeThreshold(r, *realType) - } - - return r -} - -func assignRangeThreshold(r Resource, rangeThreshold icheck.Range) { - thType := thresholdTypeOutsideRange - if rangeThreshold.Within { - thType = thresholdTypeInsideRange - } - r[fieldType] = string(thType) - assignNonZeroBools(r, map[string]bool{fieldCheckAllValues: rangeThreshold.AllValues}) - r[fieldMax] = rangeThreshold.Max - r[fieldMin] = rangeThreshold.Min -} - -func convertCellView(cell influxdb.Cell) chart { - var name string - if cell.View != nil { - name = cell.View.Name - } - ch := chart{ - Name: name, - Height: int(cell.H), - Width: int(cell.W), - XPos: int(cell.X), - YPos: int(cell.Y), - } - - setCommon := func(k chartKind, iColors []influxdb.ViewColor, dec influxdb.DecimalPlaces, iQueries []influxdb.DashboardQuery) { - ch.Kind = k - ch.Colors = convertColors(iColors) - ch.DecimalPlaces = int(dec.Digits) - ch.EnforceDecimals = dec.IsEnforced - ch.Queries = convertQueries(iQueries) - } - - setNoteFixes := func(note string, noteOnEmpty bool, prefix, suffix string) { - ch.Note = note - ch.NoteOnEmpty = noteOnEmpty - ch.Prefix = prefix - ch.Suffix = suffix - } - - setStaticLegend := func(sl influxdb.StaticLegend) { - ch.StaticLegend.ColorizeRows = sl.ColorizeRows - ch.StaticLegend.HeightRatio = sl.HeightRatio - ch.StaticLegend.Show = sl.Show - ch.StaticLegend.Opacity = sl.Opacity - ch.StaticLegend.OrientationThreshold = sl.OrientationThreshold - ch.StaticLegend.ValueAxis = sl.ValueAxis - ch.StaticLegend.WidthRatio = sl.WidthRatio - } - - props := cell.View.Properties - switch p := props.(type) { - case influxdb.GaugeViewProperties: - setCommon(chartKindGauge, p.ViewColors, p.DecimalPlaces, p.Queries) - setNoteFixes(p.Note, p.ShowNoteWhenEmpty, p.Prefix, p.Suffix) - ch.TickPrefix = p.TickPrefix - ch.TickSuffix = p.TickSuffix - case influxdb.GeoViewProperties: - ch.Kind = chartKindGeo - ch.Queries = convertQueries(p.Queries) - ch.Zoom = p.Zoom - ch.Center = center{Lat: p.Center.Lat, Lon: p.Center.Lon} - ch.MapStyle = p.MapStyle - ch.AllowPanAndZoom = p.AllowPanAndZoom - ch.DetectCoordinateFields = p.DetectCoordinateFields - ch.Colors = convertColors(p.ViewColor) - ch.GeoLayers = convertGeoLayers(p.GeoLayers) - ch.Note = p.Note - ch.NoteOnEmpty = p.ShowNoteWhenEmpty - case influxdb.HeatmapViewProperties: - ch.Kind = chartKindHeatMap - ch.Queries = convertQueries(p.Queries) - ch.Colors = stringsToColors(p.ViewColors) - ch.XCol = p.XColumn - ch.GenerateXAxisTicks = p.GenerateXAxisTicks - ch.XTotalTicks = p.XTotalTicks - ch.XTickStart = p.XTickStart - ch.XTickStep = p.XTickStep - ch.YCol = p.YColumn - ch.GenerateYAxisTicks = p.GenerateYAxisTicks - ch.YTotalTicks = p.YTotalTicks - ch.YTickStart = p.YTickStart - ch.YTickStep = p.YTickStep - ch.Axes = []axis{ - {Label: p.XAxisLabel, Prefix: p.XPrefix, Suffix: p.XSuffix, Name: "x", Domain: p.XDomain}, - {Label: p.YAxisLabel, Prefix: p.YPrefix, Suffix: p.YSuffix, Name: "y", Domain: p.YDomain}, - } - ch.Note = p.Note - ch.NoteOnEmpty = p.ShowNoteWhenEmpty - ch.BinSize = int(p.BinSize) - ch.LegendColorizeRows = p.LegendColorizeRows - ch.LegendHide = p.LegendHide - ch.LegendOpacity = float64(p.LegendOpacity) - ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold) - case influxdb.HistogramViewProperties: - ch.Kind = chartKindHistogram - ch.Queries = convertQueries(p.Queries) - ch.Colors = convertColors(p.ViewColors) - ch.FillColumns = p.FillColumns - ch.XCol = p.XColumn - ch.Axes = []axis{{Label: p.XAxisLabel, Name: "x", Domain: p.XDomain}} - ch.Note = p.Note - ch.NoteOnEmpty = p.ShowNoteWhenEmpty - ch.BinCount = p.BinCount - ch.Position = p.Position - ch.LegendColorizeRows = p.LegendColorizeRows - ch.LegendHide = p.LegendHide - ch.LegendOpacity = float64(p.LegendOpacity) - ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold) - case influxdb.MarkdownViewProperties: - ch.Kind = chartKindMarkdown - ch.Note = p.Note - case influxdb.LinePlusSingleStatProperties: - setCommon(chartKindSingleStatPlusLine, p.ViewColors, p.DecimalPlaces, p.Queries) - setNoteFixes(p.Note, p.ShowNoteWhenEmpty, p.Prefix, p.Suffix) - ch.StaticLegend = StaticLegend{} - setStaticLegend(p.StaticLegend) - ch.Axes = convertAxes(p.Axes) - ch.Shade = p.ShadeBelow - ch.HoverDimension = p.HoverDimension - ch.XCol = p.XColumn - ch.GenerateXAxisTicks = p.GenerateXAxisTicks - ch.XTotalTicks = p.XTotalTicks - ch.XTickStart = p.XTickStart - ch.XTickStep = p.XTickStep - ch.YCol = p.YColumn - ch.GenerateYAxisTicks = p.GenerateYAxisTicks - ch.YTotalTicks = p.YTotalTicks - ch.YTickStart = p.YTickStart - ch.YTickStep = p.YTickStep - ch.Position = p.Position - ch.LegendColorizeRows = p.LegendColorizeRows - ch.LegendHide = p.LegendHide - ch.LegendOpacity = float64(p.LegendOpacity) - ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold) - case influxdb.SingleStatViewProperties: - setCommon(chartKindSingleStat, p.ViewColors, p.DecimalPlaces, p.Queries) - setNoteFixes(p.Note, p.ShowNoteWhenEmpty, p.Prefix, p.Suffix) - ch.TickPrefix = p.TickPrefix - ch.TickSuffix = p.TickSuffix - case influxdb.MosaicViewProperties: - ch.Kind = chartKindMosaic - ch.Queries = convertQueries(p.Queries) - ch.Colors = stringsToColors(p.ViewColors) - ch.HoverDimension = p.HoverDimension - ch.XCol = p.XColumn - ch.GenerateXAxisTicks = p.GenerateXAxisTicks - ch.XTotalTicks = p.XTotalTicks - ch.XTickStart = p.XTickStart - ch.XTickStep = p.XTickStep - ch.YLabelColumnSeparator = p.YLabelColumnSeparator - ch.YLabelColumns = p.YLabelColumns - ch.YSeriesColumns = p.YSeriesColumns - ch.Axes = []axis{ - {Label: p.XAxisLabel, Prefix: p.XPrefix, Suffix: p.XSuffix, Name: "x", Domain: p.XDomain}, - {Label: p.YAxisLabel, Prefix: p.YPrefix, Suffix: p.YSuffix, Name: "y", Domain: p.YDomain}, - } - ch.Note = p.Note - ch.NoteOnEmpty = p.ShowNoteWhenEmpty - ch.LegendColorizeRows = p.LegendColorizeRows - ch.LegendHide = p.LegendHide - ch.LegendOpacity = float64(p.LegendOpacity) - ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold) - case influxdb.ScatterViewProperties: - ch.Kind = chartKindScatter - ch.Queries = convertQueries(p.Queries) - ch.Colors = stringsToColors(p.ViewColors) - ch.XCol = p.XColumn - ch.GenerateXAxisTicks = p.GenerateXAxisTicks - ch.XTotalTicks = p.XTotalTicks - ch.XTickStart = p.XTickStart - ch.XTickStep = p.XTickStep - ch.YCol = p.YColumn - ch.GenerateYAxisTicks = p.GenerateYAxisTicks - ch.YTotalTicks = p.YTotalTicks - ch.YTickStart = p.YTickStart - ch.YTickStep = p.YTickStep - ch.Axes = []axis{ - {Label: p.XAxisLabel, Prefix: p.XPrefix, Suffix: p.XSuffix, Name: "x", Domain: p.XDomain}, - {Label: p.YAxisLabel, Prefix: p.YPrefix, Suffix: p.YSuffix, Name: "y", Domain: p.YDomain}, - } - ch.Note = p.Note - ch.NoteOnEmpty = p.ShowNoteWhenEmpty - ch.LegendColorizeRows = p.LegendColorizeRows - ch.LegendHide = p.LegendHide - ch.LegendOpacity = float64(p.LegendOpacity) - ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold) - case influxdb.TableViewProperties: - setCommon(chartKindTable, p.ViewColors, p.DecimalPlaces, p.Queries) - setNoteFixes(p.Note, p.ShowNoteWhenEmpty, "", "") - ch.TimeFormat = p.TimeFormat - ch.TableOptions = tableOptions{ - VerticalTimeAxis: p.TableOptions.VerticalTimeAxis, - SortByField: p.TableOptions.SortBy.InternalName, - Wrapping: p.TableOptions.Wrapping, - FixFirstColumn: p.TableOptions.FixFirstColumn, - } - for _, fieldOpt := range p.FieldOptions { - ch.FieldOptions = append(ch.FieldOptions, fieldOption{ - FieldName: fieldOpt.InternalName, - DisplayName: fieldOpt.DisplayName, - Visible: fieldOpt.Visible, - }) - } - case influxdb.BandViewProperties: - setCommon(chartKindBand, p.ViewColors, influxdb.DecimalPlaces{}, p.Queries) - setNoteFixes(p.Note, p.ShowNoteWhenEmpty, "", "") - ch.StaticLegend = StaticLegend{} - setStaticLegend(p.StaticLegend) - ch.Axes = convertAxes(p.Axes) - ch.Geom = p.Geom - ch.HoverDimension = p.HoverDimension - ch.XCol = p.XColumn - ch.GenerateXAxisTicks = p.GenerateXAxisTicks - ch.XTotalTicks = p.XTotalTicks - ch.XTickStart = p.XTickStart - ch.XTickStep = p.XTickStep - ch.YCol = p.YColumn - ch.GenerateYAxisTicks = p.GenerateYAxisTicks - ch.YTotalTicks = p.YTotalTicks - ch.YTickStart = p.YTickStart - ch.YTickStep = p.YTickStep - ch.UpperColumn = p.UpperColumn - ch.MainColumn = p.MainColumn - ch.LowerColumn = p.LowerColumn - ch.LegendColorizeRows = p.LegendColorizeRows - ch.LegendHide = p.LegendHide - ch.LegendOpacity = float64(p.LegendOpacity) - ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold) - case influxdb.XYViewProperties: - setCommon(chartKindXY, p.ViewColors, influxdb.DecimalPlaces{}, p.Queries) - setNoteFixes(p.Note, p.ShowNoteWhenEmpty, "", "") - ch.StaticLegend = StaticLegend{} - setStaticLegend(p.StaticLegend) - ch.Axes = convertAxes(p.Axes) - ch.Geom = p.Geom - ch.Shade = p.ShadeBelow - ch.HoverDimension = p.HoverDimension - ch.XCol = p.XColumn - ch.GenerateXAxisTicks = p.GenerateXAxisTicks - ch.XTotalTicks = p.XTotalTicks - ch.XTickStart = p.XTickStart - ch.XTickStep = p.XTickStep - ch.YCol = p.YColumn - ch.GenerateYAxisTicks = p.GenerateYAxisTicks - ch.YTotalTicks = p.YTotalTicks - ch.YTickStart = p.YTickStart - ch.YTickStep = p.YTickStep - ch.Position = p.Position - ch.LegendColorizeRows = p.LegendColorizeRows - ch.LegendHide = p.LegendHide - ch.LegendOpacity = float64(p.LegendOpacity) - ch.LegendOrientationThreshold = int(p.LegendOrientationThreshold) - } - - sort.Slice(ch.Axes, func(i, j int) bool { - return ch.Axes[i].Name < ch.Axes[j].Name - }) - return ch -} - -func convertChartToResource(ch chart) Resource { - r := Resource{ - fieldKind: ch.Kind.title(), - fieldName: ch.Name, - fieldChartHeight: ch.Height, - fieldChartWidth: ch.Width, - } - var qq []Resource - for _, q := range ch.Queries { - qq = append(qq, Resource{ - fieldQuery: q.DashboardQuery(), - }) - } - if len(qq) > 0 { - r[fieldChartQueries] = qq - } - if len(ch.Colors) > 0 { - r[fieldChartColors] = ch.Colors - } - if len(ch.Axes) > 0 { - r[fieldChartAxes] = ch.Axes - } - if len(ch.YLabelColumns) > 0 { - r[fieldChartYLabelColumns] = ch.YLabelColumns - } - if len(ch.YSeriesColumns) > 0 { - r[fieldChartYSeriesColumns] = ch.YSeriesColumns - } - if len(ch.UpperColumn) > 0 { - r[fieldChartUpperColumn] = ch.UpperColumn - } - if len(ch.MainColumn) > 0 { - r[fieldChartMainColumn] = ch.MainColumn - } - if len(ch.LowerColumn) > 0 { - r[fieldChartLowerColumn] = ch.LowerColumn - } - if ch.EnforceDecimals { - r[fieldChartDecimalPlaces] = ch.DecimalPlaces - } - - if len(ch.FillColumns) > 0 { - r[fieldChartFillColumns] = ch.FillColumns - } - - if len(ch.GenerateXAxisTicks) > 0 { - r[fieldChartGenerateXAxisTicks] = ch.GenerateXAxisTicks - } - - if len(ch.GenerateYAxisTicks) > 0 { - r[fieldChartGenerateYAxisTicks] = ch.GenerateYAxisTicks - } - - if ch.StaticLegend.HeightRatio >= 0 && ch.StaticLegend.WidthRatio >= 0 { - r[fieldChartStaticLegend] = ch.StaticLegend - } - - if len(ch.GeoLayers) > 0 { - geoLayers := make([]Resource, 0, len(ch.GeoLayers)) - for _, l := range ch.GeoLayers { - lRes := make(Resource) - geoLayers = append(geoLayers, lRes) - assignNonZeroStrings(lRes, map[string]string{ - fieldChartGeoLayerType: l.Type, - fieldChartGeoLayerRadiusField: l.RadiusField, - fieldChartGeoLayerIntensityField: l.IntensityField, - fieldChartGeoLayerColorField: l.ColorField, - }) - assignNonZeroInts(lRes, map[string]int{ - fieldChartGeoLayerRadius: int(l.Radius), - fieldChartGeoLayerBlur: int(l.Blur), - fieldChartGeoLayerSpeed: int(l.Speed), - fieldChartGeoLayerTrackWidth: int(l.TrackWidth), - }) - assignNonZeroBools(lRes, map[string]bool{ - fieldChartGeoLayerRandomColors: l.RandomColors, - fieldChartGeoLayerIsClustered: l.IsClustered, - fieldChartGeoLayerInterpolateColors: l.InterpolateColors, - }) - if len(l.ViewColors) > 0 { - lRes[fieldChartGeoLayerViewColors] = l.ViewColors - } - if l.RadiusDimension != nil { - lRes[fieldChartGeoLayerRadiusDimension] = l.RadiusDimension - } - if l.ColorDimension != nil { - lRes[fieldChartGeoLayerColorDimension] = l.ColorDimension - } - if l.IntensityDimension != nil { - lRes[fieldChartGeoLayerIntensityDimension] = l.IntensityDimension - } - } - r[fieldChartGeoLayers] = geoLayers - } - - if zero := new(tableOptions); ch.TableOptions != *zero { - tRes := make(Resource) - assignNonZeroBools(tRes, map[string]bool{ - fieldChartTableOptionVerticalTimeAxis: ch.TableOptions.VerticalTimeAxis, - fieldChartTableOptionFixFirstColumn: ch.TableOptions.FixFirstColumn, - }) - assignNonZeroStrings(tRes, map[string]string{ - fieldChartTableOptionSortBy: ch.TableOptions.SortByField, - fieldChartTableOptionWrapping: ch.TableOptions.Wrapping, - }) - r[fieldChartTableOptions] = tRes - } - - if len(ch.FieldOptions) > 0 { - fieldOpts := make([]Resource, 0, len(ch.FieldOptions)) - for _, fo := range ch.FieldOptions { - fRes := make(Resource) - assignNonZeroBools(fRes, map[string]bool{ - fieldChartFieldOptionVisible: fo.Visible, - }) - assignNonZeroStrings(fRes, map[string]string{ - fieldChartFieldOptionDisplayName: fo.DisplayName, - fieldChartFieldOptionFieldName: fo.FieldName, - }) - fieldOpts = append(fieldOpts, fRes) - } - r[fieldChartFieldOptions] = fieldOpts - } - - assignNonZeroBools(r, map[string]bool{ - fieldChartNoteOnEmpty: ch.NoteOnEmpty, - fieldChartShade: ch.Shade, - fieldChartLegendColorizeRows: ch.LegendColorizeRows, - fieldChartLegendHide: ch.LegendHide, - fieldChartStaticLegendColorizeRows: ch.StaticLegend.ColorizeRows, - fieldChartStaticLegendShow: ch.StaticLegend.Show, - fieldChartGeoAllowPanAndZoom: ch.AllowPanAndZoom, - fieldChartGeoDetectCoordinateFields: ch.DetectCoordinateFields, - }) - - assignNonZeroStrings(r, map[string]string{ - fieldChartNote: ch.Note, - fieldPrefix: ch.Prefix, - fieldSuffix: ch.Suffix, - fieldChartGeom: ch.Geom, - fieldChartXCol: ch.XCol, - fieldChartYCol: ch.YCol, - fieldChartPosition: ch.Position, - fieldChartTickPrefix: ch.TickPrefix, - fieldChartTickSuffix: ch.TickSuffix, - fieldChartTimeFormat: ch.TimeFormat, - fieldChartHoverDimension: ch.HoverDimension, - fieldChartYLabelColumnSeparator: ch.YLabelColumnSeparator, - fieldChartStaticLegendValueAxis: ch.StaticLegend.ValueAxis, - fieldChartGeoMapStyle: ch.MapStyle, - }) - - assignNonZeroInts(r, map[string]int{ - fieldChartXPos: ch.XPos, - fieldChartXTotalTicks: ch.XTotalTicks, - fieldChartYPos: ch.YPos, - fieldChartYTotalTicks: ch.YTotalTicks, - fieldChartBinCount: ch.BinCount, - fieldChartBinSize: ch.BinSize, - fieldChartLegendOrientationThreshold: ch.LegendOrientationThreshold, - fieldChartStaticLegendOrientationThreshold: ch.StaticLegend.OrientationThreshold, - }) - - assignNonZeroFloats(r, map[string]float64{ - fieldChartLegendOpacity: ch.LegendOpacity, - fieldChartStaticLegendOpacity: ch.StaticLegend.Opacity, - fieldChartStaticLegendHeightRatio: ch.StaticLegend.HeightRatio, - fieldChartStaticLegendWidthRatio: ch.StaticLegend.WidthRatio, - fieldChartXTickStart: ch.XTickStart, - fieldChartXTickStep: ch.XTickStep, - fieldChartYTickStart: ch.YTickStart, - fieldChartYTickStep: ch.YTickStep, - fieldChartGeoCenterLon: ch.Center.Lon, - fieldChartGeoCenterLat: ch.Center.Lat, - fieldChartGeoZoom: ch.Zoom, - }) - - return r -} - -func convertAxis(name string, a influxdb.Axis) *axis { - return &axis{ - Base: a.Base, - Label: a.Label, - Name: name, - Prefix: a.Prefix, - Scale: a.Scale, - Suffix: a.Suffix, - } -} - -func convertAxes(iAxes map[string]influxdb.Axis) axes { - out := make(axes, 0, len(iAxes)) - for name, a := range iAxes { - out = append(out, *convertAxis(name, a)) - } - return out -} - -func convertColors(iColors []influxdb.ViewColor) colors { - out := make(colors, 0, len(iColors)) - for _, ic := range iColors { - out = append(out, &color{ - ID: ic.ID, - Name: ic.Name, - Type: ic.Type, - Hex: ic.Hex, - Value: flt64Ptr(ic.Value), - }) - } - return out -} - -func convertQueries(iQueries []influxdb.DashboardQuery) queries { - out := make(queries, 0, len(iQueries)) - for _, iq := range iQueries { - out = append(out, query{Query: strings.TrimSpace(iq.Text)}) - } - return out -} - -func convertGeoLayers(iLayers []influxdb.GeoLayer) geoLayers { - out := make(geoLayers, 0, len(iLayers)) - for _, ic := range iLayers { - out = append(out, &geoLayer{ - Type: ic.Type, - RadiusField: ic.RadiusField, - ColorField: ic.ColorField, - IntensityField: ic.IntensityField, - ViewColors: convertColors(ic.ViewColors), - Radius: ic.Radius, - Blur: ic.Blur, - RadiusDimension: convertAxis("radius", ic.RadiusDimension), - ColorDimension: convertAxis("color", ic.ColorDimension), - IntensityDimension: convertAxis("intensity", ic.IntensityDimension), - InterpolateColors: ic.InterpolateColors, - TrackWidth: ic.TrackWidth, - Speed: ic.Speed, - RandomColors: ic.RandomColors, - IsClustered: ic.IsClustered, - }) - } - return out -} - -// DashboardToObject converts an influxdb.Dashboard to an Object. -func DashboardToObject(name string, dash influxdb.Dashboard) Object { - if name == "" { - name = dash.Name - } - - sort.Slice(dash.Cells, func(i, j int) bool { - ic, jc := dash.Cells[i], dash.Cells[j] - if ic.X == jc.X { - return ic.Y < jc.Y - } - return ic.X < jc.X - }) - - charts := make([]Resource, 0, len(dash.Cells)) - for _, cell := range dash.Cells { - if cell.View == nil { - continue - } - ch := convertCellView(*cell) - if !ch.Kind.ok() { - continue - } - charts = append(charts, convertChartToResource(ch)) - } - - o := newObject(KindDashboard, name) - assignNonZeroStrings(o.Spec, map[string]string{ - fieldDescription: dash.Description, - }) - o.Spec[fieldDashCharts] = charts - return o -} - -// LabelToObject converts an influxdb.Label to an Object. -func LabelToObject(name string, l influxdb.Label) Object { - if name == "" { - name = l.Name - } - - o := newObject(KindLabel, name) - assignNonZeroStrings(o.Spec, map[string]string{ - fieldDescription: l.Properties["description"], - fieldLabelColor: l.Properties["color"], - }) - return o -} - -// NotificationEndpointToObject converts an notification endpoint into a pkger Object. -func NotificationEndpointToObject(name string, e influxdb.NotificationEndpoint) Object { - if name == "" { - name = e.GetName() - } - - o := newObject(KindNotificationEndpoint, name) - assignNonZeroStrings(o.Spec, map[string]string{ - fieldDescription: e.GetDescription(), - fieldStatus: string(e.GetStatus()), - }) - - switch actual := e.(type) { - case *endpoint.HTTP: - o.Kind = KindNotificationEndpointHTTP - o.Spec[fieldNotificationEndpointHTTPMethod] = actual.Method - o.Spec[fieldNotificationEndpointURL] = actual.URL - o.Spec[fieldType] = actual.AuthMethod - assignNonZeroSecrets(o.Spec, map[string]influxdb.SecretField{ - fieldNotificationEndpointPassword: actual.Password, - fieldNotificationEndpointToken: actual.Token, - fieldNotificationEndpointUsername: actual.Username, - }) - case *endpoint.PagerDuty: - o.Kind = KindNotificationEndpointPagerDuty - o.Spec[fieldNotificationEndpointURL] = actual.ClientURL - assignNonZeroSecrets(o.Spec, map[string]influxdb.SecretField{ - fieldNotificationEndpointRoutingKey: actual.RoutingKey, - }) - case *endpoint.Slack: - o.Kind = KindNotificationEndpointSlack - o.Spec[fieldNotificationEndpointURL] = actual.URL - assignNonZeroSecrets(o.Spec, map[string]influxdb.SecretField{ - fieldNotificationEndpointToken: actual.Token, - }) - } - - return o -} - -// NotificationRuleToObject converts an notification rule into a pkger Object. -func NotificationRuleToObject(name, endpointPkgName string, iRule influxdb.NotificationRule) Object { - if name == "" { - name = iRule.GetName() - } - - o := newObject(KindNotificationRule, name) - o.Spec[fieldNotificationRuleEndpointName] = endpointPkgName - assignNonZeroStrings(o.Spec, map[string]string{ - fieldDescription: iRule.GetDescription(), - }) - - assignBase := func(base rule.Base) { - assignNonZeroFluxDurs(o.Spec, map[string]*notification.Duration{ - fieldEvery: base.Every, - fieldOffset: base.Offset, - }) - - var tagRes []Resource - for _, tRule := range base.TagRules { - tagRes = append(tagRes, Resource{ - fieldKey: tRule.Key, - fieldValue: tRule.Value, - fieldOperator: tRule.Operator.String(), - }) - } - if len(tagRes) > 0 { - o.Spec[fieldNotificationRuleTagRules] = tagRes - } - - var statusRuleRes []Resource - for _, sRule := range base.StatusRules { - sRes := Resource{ - fieldNotificationRuleCurrentLevel: sRule.CurrentLevel.String(), - } - if sRule.PreviousLevel != nil { - sRes[fieldNotificationRulePreviousLevel] = sRule.PreviousLevel.String() - } - statusRuleRes = append(statusRuleRes, sRes) - } - if len(statusRuleRes) > 0 { - o.Spec[fieldNotificationRuleStatusRules] = statusRuleRes - } - } - - switch t := iRule.(type) { - case *rule.HTTP: - assignBase(t.Base) - case *rule.PagerDuty: - assignBase(t.Base) - o.Spec[fieldNotificationRuleMessageTemplate] = t.MessageTemplate - case *rule.Slack: - assignBase(t.Base) - o.Spec[fieldNotificationRuleMessageTemplate] = t.MessageTemplate - assignNonZeroStrings(o.Spec, map[string]string{fieldNotificationRuleChannel: t.Channel}) - } - - return o -} - -// regex used to rip out the hard coded task option stuffs -var taskFluxRegex = regexp.MustCompile(`option task = {(.|\n)*?}`) - -// TaskToObject converts an influxdb.Task into a pkger.Object. -func TaskToObject(name string, t taskmodel.Task) Object { - if name == "" { - name = t.Name - } - - query := strings.TrimSpace(taskFluxRegex.ReplaceAllString(t.Flux, "")) - - o := newObject(KindTask, name) - assignNonZeroStrings(o.Spec, map[string]string{ - fieldTaskCron: t.Cron, - fieldDescription: t.Description, - fieldEvery: t.Every, - fieldOffset: durToStr(t.Offset), - fieldQuery: strings.TrimSpace(query), - }) - return o -} - -// TelegrafToObject converts an influxdb.TelegrafConfig into a pkger.Object. -func TelegrafToObject(name string, t influxdb.TelegrafConfig) Object { - if name == "" { - name = t.Name - } - - o := newObject(KindTelegraf, name) - assignNonZeroStrings(o.Spec, map[string]string{ - fieldTelegrafConfig: t.Config, - fieldDescription: t.Description, - }) - return o -} - -// VariableToObject converts an influxdb.Variable to a pkger.Object. -func VariableToObject(name string, v influxdb.Variable) Object { - if name == "" { - name = v.Name - } - - o := newObject(KindVariable, name) - - assignNonZeroStrings(o.Spec, map[string]string{fieldDescription: v.Description}) - - if len(v.Selected) > 0 { - o.Spec[fieldVariableSelected] = v.Selected - } - - args := v.Arguments - if args == nil { - return o - } - o.Spec[fieldType] = args.Type - - switch args.Type { - case fieldArgTypeConstant: - vals, ok := args.Values.(influxdb.VariableConstantValues) - if ok { - o.Spec[fieldValues] = []string(vals) - } - case fieldArgTypeMap: - vals, ok := args.Values.(influxdb.VariableMapValues) - if ok { - o.Spec[fieldValues] = map[string]string(vals) - } - case fieldArgTypeQuery: - vals, ok := args.Values.(influxdb.VariableQueryValues) - if ok { - o.Spec[fieldLanguage] = vals.Language - o.Spec[fieldQuery] = strings.TrimSpace(vals.Query) - } - } - - return o -} - -func newObject(kind Kind, name string) Object { - return Object{ - APIVersion: APIVersion, - Kind: kind, - Metadata: Resource{ - // this timestamp is added to make the resource unique. Should also indicate - // to the end user that this is machine readable and the spec.name field is - // the one they want to edit when a name change is desired. - fieldName: strings.ToLower(idGenerator.ID().String()), - }, - Spec: Resource{ - fieldName: name, - }, - } -} - -func assignNonZeroFluxDurs(r Resource, m map[string]*notification.Duration) { - for field, dur := range m { - if dur == nil { - continue - } - if dur.TimeDuration() == 0 { - continue - } - r[field] = dur.TimeDuration().String() - } -} - -func assignNonZeroBools(r Resource, m map[string]bool) { - for k, v := range m { - if v { - r[k] = v - } - } -} - -func assignNonZeroInts(r Resource, m map[string]int) { - for k, v := range m { - if v != 0 { - r[k] = v - } - } -} - -func assignNonZeroFloats(r Resource, m map[string]float64) { - for k, v := range m { - if v != 0 { - r[k] = v - } - } -} - -func assignNonZeroStrings(r Resource, m map[string]string) { - for k, v := range m { - if v != "" { - r[k] = v - } - } -} - -func assignNonZeroSecrets(r Resource, m map[string]influxdb.SecretField) { - for field, secret := range m { - if secret.Key == "" { - continue - } - r[field] = Resource{ - fieldReferencesSecret: Resource{ - fieldKey: secret.Key, - }, - } - } -} - -func stringsToColors(clrs []string) colors { - newColors := make(colors, 0) - for _, x := range clrs { - newColors = append(newColors, &color{Hex: x}) - } - return newColors -} diff --git a/pkger/doc.go b/pkger/doc.go deleted file mode 100644 index 4737fe9c3d4..00000000000 --- a/pkger/doc.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Package pkger implements a means to create and consume reusable -templates for what will eventually come to support all influxdb -resources. - -The parser supports JSON, Jsonnet, and YAML encodings as well as a number -of different ways to read the file/reader/string as it may. While the parser -supports Jsonnet, due to issues in the go-jsonnet implementation, only trusted -input should be given to the parser and therefore the EnableJsonnet() option -must be used with Parse() to enable Jsonnet support. - -As an example, you can use the following to parse and validate a YAML -file and see a summary of its contents: - - newTemplate, err := Parse(EncodingYAML, FromFile(PATH_TO_FILE)) - if err != nil { - panic(err) // handle error as you see fit - } - sum := newTemplate.Summary() - fmt.Println(sum) // do something with the summary - -The parser will validate all contents of the template and provide any -and all fields/entries that failed validation. - -If you wish to use the Template type in your transport layer and let the -transport layer manage the decoding, then you can run the following -to validate the template after the raw decoding is done: - - if err := template.Validate(); err != nil { - panic(err) // handle error as you see fit - } - -If a validation error is encountered during the validation or parsing then -the error returned will be of type *parseErr. The parseErr provides a rich -set of validations failures. There can be numerous failures in a template -and we did our best to inform the caller about them all in a single run. - -If you want to see the effects of a template before applying it to the -organization's influxdb platform, you have the flexibility to dry run the -template and see the outcome of what would happen after it were to be applied. -You may use the following to dry run a template within your organization: - - svc := NewService(serviceOpts...) - summary, diff, err := svc.DryRun(ctx, orgID, userID, ApplyWithTemplate(template)) - if err != nil { - panic(err) // handle error as you see fit - } - // explore the summary and diff - -The diff provided here is a diff of the existing state of the platform for -your organization and the concluding the state after the application of a -template. All buckets, labels, and variables, when given a name that already -exists, will not create a new resource, but rather, will edit the existing -resource. If this is not a desired result, then rename your bucket to something -else to avoid the imposed changes applying this template would incur. The summary -provided is a summary of the template itself. If a resource exists all IDs will -be populated for them, if they do not, then they will be zero values. Any zero -value ID is safe to assume is not populated. All influxdb.ID's must be non zero -to be in existence. - -If you would like to apply a template you may use the service to do so. The -following will apply the template in full to the provided organization. - - svc := NewService(serviceOpts...) - summary, err := svc.Apply(ctx, orgID, userID, ApplyWithTemplate(template)) - if err != nil { - panic(err) // handle error as you see fit - } - // explore the summary - -The summary will be populated with valid IDs that were created during the -application of the template. If an error is encountered during the application -of a template, then all changes that had occurred will be rolled back. However, as -a warning for buckets, changes may have incurred destructive changes. The changes -are not applied inside a large transaction, for numerous reasons, but it is -something to be considered. If you have dry run the template before it is to be -applied, then the changes should have been made known to you. If not, then there is -potential loss of data if the changes to a bucket resulted in the retention period -being shortened in the template. - -If you would like to export existing resources into the form of a template, then you -have the ability to do so using the following: - - resourcesToClone := []ResourceToClone{ - { - Kind: KindBucket, - ID: Existing_BUCKET_ID, - Name: "new bucket name" - }, - { - Kind: KindDashboard, - ID: Existing_Dashboard_ID, - }, - { - Kind: KindLabel, - ID: Existing_Label_ID, - }, - { - Kind: KindVarible, - ID: Existing_Var_ID, - }, - } - - svc := NewService(serviceOpts...) - newTemplate, err := svc.Export(ctx, ExportWithExistingResources(resourcesToClone...)) - if err != nil { - panic(err) // handle error as you see fit - } - // explore newly created and validated template - -Things to note about the behavior of exporting existing resources. All label -associations with existing resources will be included in the new template. -However, the variables that are used within a dashboard query will not be added -automatically to the template. Variables will need to be passed in alongside -the dashboard to be added to the template. -*/ -package pkger diff --git a/pkger/http_remote_service.go b/pkger/http_remote_service.go deleted file mode 100644 index c22e81b567f..00000000000 --- a/pkger/http_remote_service.go +++ /dev/null @@ -1,346 +0,0 @@ -package pkger - -import ( - "context" - "encoding/json" - "net/http" - - ihttp "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/pkg/httpc" -) - -// HTTPRemoteService provides an http client that is fluent in all things template. -type HTTPRemoteService struct { - Client *httpc.Client -} - -var _ SVC = (*HTTPRemoteService)(nil) - -func (s *HTTPRemoteService) InitStack(ctx context.Context, userID platform.ID, stack StackCreate) (Stack, error) { - reqBody := ReqCreateStack{ - OrgID: stack.OrgID.String(), - Name: stack.Name, - Description: stack.Description, - URLs: stack.TemplateURLs, - } - - var respBody RespStack - err := s.Client. - PostJSON(reqBody, RoutePrefixStacks). - DecodeJSON(&respBody). - Do(ctx) - if err != nil { - return Stack{}, err - } - - return convertRespStackToStack(respBody) -} - -func (s *HTTPRemoteService) UninstallStack(ctx context.Context, identifiers struct{ OrgID, UserID, StackID platform.ID }) (Stack, error) { - var respBody RespStack - err := s.Client. - Post(httpc.BodyEmpty, RoutePrefixStacks, identifiers.StackID.String(), "/uninstall"). - QueryParams([2]string{"orgID", identifiers.OrgID.String()}). - DecodeJSON(&respBody). - Do(ctx) - if err != nil { - return Stack{}, err - } - - return convertRespStackToStack(respBody) -} - -func (s *HTTPRemoteService) DeleteStack(ctx context.Context, identifiers struct{ OrgID, UserID, StackID platform.ID }) error { - return s.Client. - Delete(RoutePrefixStacks, identifiers.StackID.String()). - QueryParams([2]string{"orgID", identifiers.OrgID.String()}). - Do(ctx) -} - -func (s *HTTPRemoteService) ListStacks(ctx context.Context, orgID platform.ID, f ListFilter) ([]Stack, error) { - queryParams := [][2]string{{"orgID", orgID.String()}} - for _, name := range f.Names { - queryParams = append(queryParams, [2]string{"name", name}) - } - for _, stackID := range f.StackIDs { - queryParams = append(queryParams, [2]string{"stackID", stackID.String()}) - } - - var resp RespListStacks - err := s.Client. - Get(RoutePrefixStacks). - QueryParams(queryParams...). - DecodeJSON(&resp). - Do(ctx) - if err != nil { - return nil, err - } - - out := make([]Stack, 0, len(resp.Stacks)) - for _, st := range resp.Stacks { - stack, err := convertRespStackToStack(st) - if err != nil { - continue - } - out = append(out, stack) - } - return out, nil -} - -func (s *HTTPRemoteService) ReadStack(ctx context.Context, id platform.ID) (Stack, error) { - var respBody RespStack - err := s.Client. - Get(RoutePrefixStacks, id.String()). - DecodeJSON(&respBody). - Do(ctx) - if err != nil { - return Stack{}, err - } - return convertRespStackToStack(respBody) -} - -func (s *HTTPRemoteService) UpdateStack(ctx context.Context, upd StackUpdate) (Stack, error) { - reqBody := ReqUpdateStack{ - Name: upd.Name, - Description: upd.Description, - TemplateURLs: upd.TemplateURLs, - } - for _, r := range upd.AdditionalResources { - reqBody.AdditionalResources = append(reqBody.AdditionalResources, ReqUpdateStackResource{ - ID: r.ID.String(), - MetaName: r.MetaName, - Kind: r.Kind, - }) - } - - var respBody RespStack - err := s.Client. - PatchJSON(reqBody, RoutePrefixStacks, upd.ID.String()). - DecodeJSON(&respBody). - Do(ctx) - if err != nil { - return Stack{}, err - } - - return convertRespStackToStack(respBody) -} - -// Export will produce a template from the parameters provided. -func (s *HTTPRemoteService) Export(ctx context.Context, opts ...ExportOptFn) (*Template, error) { - opt, err := exportOptFromOptFns(opts) - if err != nil { - return nil, err - } - - var orgIDs []ReqExportOrgIDOpt - for _, org := range opt.OrgIDs { - orgIDs = append(orgIDs, ReqExportOrgIDOpt{ - OrgID: org.OrgID.String(), - Filters: struct { - ByLabel []string `json:"byLabel"` - ByResourceKind []Kind `json:"byResourceKind"` - }{ - ByLabel: org.LabelNames, - ByResourceKind: org.ResourceKinds, - }, - }) - } - - reqBody := ReqExport{ - StackID: opt.StackID.String(), - OrgIDs: orgIDs, - Resources: opt.Resources, - } - - var newTemplate *Template - err = s.Client. - PostJSON(reqBody, RoutePrefixTemplates, "/export"). - Decode(func(resp *http.Response) error { - t, err := Parse(EncodingJSON, FromReader(resp.Body, "export")) - newTemplate = t - return err - }). - Do(ctx) - if err != nil { - return nil, err - } - - if err := newTemplate.Validate(ValidWithoutResources()); err != nil { - return nil, err - } - return newTemplate, nil -} - -// DryRun provides a dry run of the template application. The template will be marked verified -// for later calls to Apply. This func will be run on an Apply if it has not been run -// already. -func (s *HTTPRemoteService) DryRun(ctx context.Context, orgID, userID platform.ID, opts ...ApplyOptFn) (ImpactSummary, error) { - return s.apply(ctx, orgID, true, opts...) -} - -// Apply will apply all the resources identified in the provided template. The entire template will be applied -// in its entirety. If a failure happens midway then the entire template will be rolled back to the state -// from before the template was applied. -func (s *HTTPRemoteService) Apply(ctx context.Context, orgID, userID platform.ID, opts ...ApplyOptFn) (ImpactSummary, error) { - return s.apply(ctx, orgID, false, opts...) -} - -func (s *HTTPRemoteService) apply(ctx context.Context, orgID platform.ID, dryRun bool, opts ...ApplyOptFn) (ImpactSummary, error) { - opt := applyOptFromOptFns(opts...) - - var rawTemplate ReqRawTemplate - for _, t := range opt.Templates { - b, err := t.Encode(EncodingJSON) - if err != nil { - return ImpactSummary{}, err - } - rawTemplate.Template = b - rawTemplate.Sources = t.sources - rawTemplate.ContentType = EncodingJSON.String() - } - - reqBody := ReqApply{ - OrgID: orgID.String(), - DryRun: dryRun, - EnvRefs: opt.EnvRefs, - Secrets: opt.MissingSecrets, - RawTemplate: rawTemplate, - } - if opt.StackID != 0 { - stackID := opt.StackID.String() - reqBody.StackID = &stackID - } - - for act := range opt.ResourcesToSkip { - b, err := json.Marshal(act) - if err != nil { - return ImpactSummary{}, influxErr(errors.EInvalid, err) - } - reqBody.RawActions = append(reqBody.RawActions, ReqRawAction{ - Action: string(ActionTypeSkipResource), - Properties: b, - }) - } - for kind := range opt.KindsToSkip { - b, err := json.Marshal(ActionSkipKind{Kind: kind}) - if err != nil { - return ImpactSummary{}, influxErr(errors.EInvalid, err) - } - reqBody.RawActions = append(reqBody.RawActions, ReqRawAction{ - Action: string(ActionTypeSkipKind), - Properties: b, - }) - } - - var resp RespApply - err := s.Client. - PostJSON(reqBody, RoutePrefixTemplates, "/apply"). - DecodeJSON(&resp). - StatusFn(func(resp *http.Response) error { - // valid response code when the template itself has parser errors. - // we short circuit on that and allow that response to pass through - // but consume the initial implementation if that does not hold. - if resp.StatusCode == http.StatusUnprocessableEntity { - return nil - } - return ihttp.CheckError(resp) - }). - Do(ctx) - if err != nil { - return ImpactSummary{}, err - } - - impact := ImpactSummary{ - Sources: resp.Sources, - Diff: resp.Diff, - Summary: resp.Summary, - } - - if stackID, err := platform.IDFromString(resp.StackID); err == nil { - impact.StackID = *stackID - } - - return impact, NewParseError(resp.Errors...) -} - -func convertRespStackToStack(respStack RespStack) (Stack, error) { - newStack := Stack{ - CreatedAt: respStack.CreatedAt, - } - id, err := platform.IDFromString(respStack.ID) - if err != nil { - return Stack{}, err - } - newStack.ID = *id - - orgID, err := platform.IDFromString(respStack.OrgID) - if err != nil { - return Stack{}, err - } - newStack.OrgID = *orgID - - events := respStack.Events - if len(events) == 0 && !respStack.UpdatedAt.IsZero() { - events = append(events, respStack.RespStackEvent) - } - - for _, respEv := range events { - ev, err := convertRespStackEvent(respEv) - if err != nil { - return Stack{}, err - } - newStack.Events = append(newStack.Events, ev) - } - - return newStack, nil -} - -func convertRespStackEvent(ev RespStackEvent) (StackEvent, error) { - res, err := convertRespStackResources(ev.Resources) - if err != nil { - return StackEvent{}, err - } - - eventType := StackEventCreate - switch ev.EventType { - case "uninstall", "delete": // delete is included to maintain backwards compatibility - eventType = StackEventUninstalled - case "update": - eventType = StackEventUpdate - } - - return StackEvent{ - EventType: eventType, - Name: ev.Name, - Description: ev.Description, - Resources: res, - Sources: ev.Sources, - TemplateURLs: ev.URLs, - UpdatedAt: ev.UpdatedAt, - }, nil -} - -func convertRespStackResources(resources []RespStackResource) ([]StackResource, error) { - out := make([]StackResource, 0, len(resources)) - for _, r := range resources { - sr := StackResource{ - APIVersion: r.APIVersion, - MetaName: r.MetaName, - Kind: r.Kind, - } - for _, a := range r.Associations { - sr.Associations = append(sr.Associations, StackResourceAssociation(a)) - } - - resID, err := platform.IDFromString(r.ID) - if err != nil { - return nil, influxErr(errors.EInternal, err) - } - sr.ID = *resID - - out = append(out, sr) - } - return out, nil -} diff --git a/pkger/http_server_stack.go b/pkger/http_server_stack.go deleted file mode 100644 index 45e08e2ae4b..00000000000 --- a/pkger/http_server_stack.go +++ /dev/null @@ -1,475 +0,0 @@ -package pkger - -import ( - "fmt" - "net/http" - "net/url" - "path" - "time" - - "github.com/go-chi/chi" - pctx "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "go.uber.org/zap" -) - -const RoutePrefixStacks = "/api/v2/stacks" - -// HTTPServerStacks is a server that manages the stacks HTTP transport. -type HTTPServerStacks struct { - chi.Router - api *kithttp.API - logger *zap.Logger - svc SVC -} - -// NewHTTPServerStacks constructs a new http server. -func NewHTTPServerStacks(log *zap.Logger, svc SVC) *HTTPServerStacks { - svr := &HTTPServerStacks{ - api: kithttp.NewAPI(kithttp.WithLog(log)), - logger: log, - svc: svc, - } - - r := chi.NewRouter() - { - r.Post("/", svr.createStack) - r.Get("/", svr.listStacks) - - r.Route("/{stack_id}", func(r chi.Router) { - r.Get("/", svr.readStack) - r.Delete("/", svr.deleteStack) - r.Patch("/", svr.updateStack) - r.Post("/uninstall", svr.uninstallStack) - }) - } - - svr.Router = r - return svr -} - -// Prefix provides the prefix to this route tree. -func (s *HTTPServerStacks) Prefix() string { - return RoutePrefixStacks -} - -type ( - // RespStack is the response body for a stack. - RespStack struct { - ID string `json:"id"` - OrgID string `json:"orgID"` - CreatedAt time.Time `json:"createdAt"` - Events []RespStackEvent `json:"events"` - - // maintain same interface for backward compatibility - RespStackEvent - } - - RespStackEvent struct { - EventType string `json:"eventType"` - Name string `json:"name"` - Description string `json:"description"` - Resources []RespStackResource `json:"resources"` - Sources []string `json:"sources"` - URLs []string `json:"urls"` - UpdatedAt time.Time `json:"updatedAt"` - } - - // RespStackResource is the response for a stack resource. This type exists - // to decouple the internal service implementation from the deprecates usage - // of templates in the API. We could add a custom UnmarshalJSON method, but - // I would rather keep it obvious and explicit with a separate field. - RespStackResource struct { - APIVersion string `json:"apiVersion"` - ID string `json:"resourceID"` - Kind Kind `json:"kind"` - MetaName string `json:"templateMetaName"` - Associations []RespStackResourceAssoc `json:"associations"` - Links RespStackResourceLinks `json:"links"` - } - - // RespStackResourceAssoc is the response for a stack resource's associations. - RespStackResourceAssoc struct { - Kind Kind `json:"kind"` - MetaName string `json:"metaName"` - } - - RespStackResourceLinks struct { - Self string `json:"self"` - } -) - -// RespListStacks is the HTTP response for a stack list call. -type RespListStacks struct { - Stacks []RespStack `json:"stacks"` -} - -func (s *HTTPServerStacks) listStacks(w http.ResponseWriter, r *http.Request) { - q := r.URL.Query() - - rawOrgID := q.Get("orgID") - orgID, err := platform.IDFromString(rawOrgID) - if err != nil { - s.api.Err(w, r, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("organization id[%q] is invalid", rawOrgID), - Err: err, - }) - return - } - - if err := r.ParseForm(); err != nil { - s.api.Err(w, r, &errors.Error{ - Code: errors.EInvalid, - Msg: "failed to parse form from encoded url", - Err: err, - }) - return - } - - filter := ListFilter{ - Names: r.Form["name"], - } - - for _, idRaw := range r.Form["stackID"] { - id, err := platform.IDFromString(idRaw) - if err != nil { - s.api.Err(w, r, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("stack ID[%q] provided is invalid", idRaw), - Err: err, - }) - return - } - filter.StackIDs = append(filter.StackIDs, *id) - } - - stacks, err := s.svc.ListStacks(r.Context(), *orgID, filter) - if err != nil { - s.api.Err(w, r, err) - return - } - if stacks == nil { - stacks = []Stack{} - } - - out := make([]RespStack, 0, len(stacks)) - for _, st := range stacks { - out = append(out, convertStackToRespStack(st)) - } - - s.api.Respond(w, r, http.StatusOK, RespListStacks{ - Stacks: out, - }) -} - -// ReqCreateStack is a request body for a create stack call. -type ReqCreateStack struct { - OrgID string `json:"orgID"` - Name string `json:"name"` - Description string `json:"description"` - URLs []string `json:"urls"` -} - -// OK validates the request body is valid. -func (r *ReqCreateStack) OK() error { - // TODO: provide multiple errors back for failing validation - if _, err := platform.IDFromString(r.OrgID); err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("provided org id[%q] is invalid", r.OrgID), - } - } - - for _, u := range r.URLs { - if _, err := url.Parse(u); err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("provided url[%q] is invalid", u), - } - } - } - return nil -} - -func (r *ReqCreateStack) orgID() platform.ID { - orgID, _ := platform.IDFromString(r.OrgID) - return *orgID -} - -func (s *HTTPServerStacks) createStack(w http.ResponseWriter, r *http.Request) { - var reqBody ReqCreateStack - if err := s.api.DecodeJSON(r.Body, &reqBody); err != nil { - s.api.Err(w, r, err) - return - } - defer r.Body.Close() - - auth, err := pctx.GetAuthorizer(r.Context()) - if err != nil { - s.api.Err(w, r, err) - return - } - - stack, err := s.svc.InitStack(r.Context(), auth.GetUserID(), StackCreate{ - OrgID: reqBody.orgID(), - Name: reqBody.Name, - Description: reqBody.Description, - TemplateURLs: reqBody.URLs, - }) - if err != nil { - s.api.Err(w, r, err) - return - } - - s.api.Respond(w, r, http.StatusCreated, convertStackToRespStack(stack)) -} - -func (s *HTTPServerStacks) deleteStack(w http.ResponseWriter, r *http.Request) { - orgID, err := getRequiredOrgIDFromQuery(r.URL.Query()) - if err != nil { - s.api.Err(w, r, err) - return - } - - stackID, err := stackIDFromReq(r) - if err != nil { - s.api.Err(w, r, err) - return - } - - auth, err := pctx.GetAuthorizer(r.Context()) - if err != nil { - s.api.Err(w, r, err) - return - } - - err = s.svc.DeleteStack(r.Context(), struct{ OrgID, UserID, StackID platform.ID }{ - OrgID: orgID, - UserID: auth.GetUserID(), - StackID: stackID, - }) - if err != nil { - s.api.Err(w, r, err) - return - } - - s.api.Respond(w, r, http.StatusNoContent, nil) -} - -func (s *HTTPServerStacks) uninstallStack(w http.ResponseWriter, r *http.Request) { - orgID, err := getRequiredOrgIDFromQuery(r.URL.Query()) - if err != nil { - s.api.Err(w, r, err) - return - } - - stackID, err := stackIDFromReq(r) - if err != nil { - s.api.Err(w, r, err) - return - } - - auth, err := pctx.GetAuthorizer(r.Context()) - if err != nil { - s.api.Err(w, r, err) - return - } - - stack, err := s.svc.UninstallStack(r.Context(), struct{ OrgID, UserID, StackID platform.ID }{ - OrgID: orgID, - UserID: auth.GetUserID(), - StackID: stackID, - }) - if err != nil { - s.api.Err(w, r, err) - return - } - - s.api.Respond(w, r, http.StatusOK, convertStackToRespStack(stack)) -} - -func (s *HTTPServerStacks) readStack(w http.ResponseWriter, r *http.Request) { - stackID, err := stackIDFromReq(r) - if err != nil { - s.api.Err(w, r, err) - return - } - - stack, err := s.svc.ReadStack(r.Context(), stackID) - if err != nil { - s.api.Err(w, r, err) - return - } - - s.api.Respond(w, r, http.StatusOK, convertStackToRespStack(stack)) -} - -type ( - // ReqUpdateStack is the request body for updating a stack. - ReqUpdateStack struct { - Name *string `json:"name"` - Description *string `json:"description"` - TemplateURLs []string `json:"templateURLs"` - AdditionalResources []ReqUpdateStackResource `json:"additionalResources"` - - // Deprecating the urls field and replacing with templateURLs field. - // This is remaining here for backwards compatibility. - URLs []string `json:"urls"` - } - - ReqUpdateStackResource struct { - ID string `json:"resourceID"` - Kind Kind `json:"kind"` - MetaName string `json:"templateMetaName"` - } -) - -func (s *HTTPServerStacks) updateStack(w http.ResponseWriter, r *http.Request) { - var req ReqUpdateStack - if err := s.api.DecodeJSON(r.Body, &req); err != nil { - s.api.Err(w, r, err) - return - } - - stackID, err := stackIDFromReq(r) - if err != nil { - s.api.Err(w, r, err) - return - } - - update := StackUpdate{ - ID: stackID, - Name: req.Name, - Description: req.Description, - TemplateURLs: append(req.TemplateURLs, req.URLs...), - } - for _, res := range req.AdditionalResources { - id, err := platform.IDFromString(res.ID) - if err != nil { - s.api.Err(w, r, influxErr(errors.EInvalid, err, fmt.Sprintf("stack resource id %q", res.ID))) - return - } - update.AdditionalResources = append(update.AdditionalResources, StackAdditionalResource{ - APIVersion: APIVersion, - ID: *id, - Kind: res.Kind, - MetaName: res.MetaName, - }) - } - - stack, err := s.svc.UpdateStack(r.Context(), update) - if err != nil { - s.api.Err(w, r, err) - return - } - - s.api.Respond(w, r, http.StatusOK, convertStackToRespStack(stack)) -} - -func convertStackToRespStack(st Stack) RespStack { - events := make([]RespStackEvent, 0, len(st.Events)) - for _, ev := range st.Events { - events = append(events, convertStackEvent(ev)) - } - - return RespStack{ - ID: st.ID.String(), - OrgID: st.OrgID.String(), - CreatedAt: st.CreatedAt, - RespStackEvent: convertStackEvent(st.LatestEvent()), - Events: events, - } -} - -func convertStackEvent(ev StackEvent) RespStackEvent { - resources := make([]RespStackResource, 0, len(ev.Resources)) - for _, r := range ev.Resources { - asses := make([]RespStackResourceAssoc, 0, len(r.Associations)) - for _, a := range r.Associations { - asses = append(asses, RespStackResourceAssoc(a)) - } - resources = append(resources, RespStackResource{ - APIVersion: r.APIVersion, - ID: r.ID.String(), - Kind: r.Kind, - MetaName: r.MetaName, - Links: stackResLinks(r), - Associations: asses, - }) - } - - return RespStackEvent{ - EventType: ev.EventType.String(), - Name: ev.Name, - Description: ev.Description, - Resources: resources, - Sources: append([]string{}, ev.Sources...), - URLs: append([]string{}, ev.TemplateURLs...), - UpdatedAt: ev.UpdatedAt, - } -} - -func stackResLinks(r StackResource) RespStackResourceLinks { - var linkResource string - switch r.Kind { - case KindBucket: - linkResource = "buckets" - case KindCheck, KindCheckDeadman, KindCheckThreshold: - linkResource = "checks" - case KindDashboard: - linkResource = "dashboards" - case KindLabel: - linkResource = "labels" - case KindNotificationEndpoint, - KindNotificationEndpointHTTP, - KindNotificationEndpointPagerDuty, - KindNotificationEndpointSlack: - linkResource = "notificationEndpoints" - case KindNotificationRule: - linkResource = "notificationRules" - case KindTask: - linkResource = "tasks" - case KindTelegraf: - linkResource = "telegrafs" - case KindVariable: - linkResource = "variables" - } - return RespStackResourceLinks{ - Self: path.Join("/api/v2", linkResource, r.ID.String()), - } -} - -func stackIDFromReq(r *http.Request) (platform.ID, error) { - stackID, err := platform.IDFromString(chi.URLParam(r, "stack_id")) - if err != nil { - return 0, &errors.Error{ - Code: errors.EInvalid, - Msg: "the stack id provided in the path was invalid", - Err: err, - } - } - return *stackID, nil -} - -func getRequiredOrgIDFromQuery(q url.Values) (platform.ID, error) { - orgIDRaw := q.Get("orgID") - if orgIDRaw == "" { - return 0, &errors.Error{ - Code: errors.EInvalid, - Msg: "the orgID query param is required", - } - } - - orgID, err := platform.IDFromString(orgIDRaw) - if err != nil { - return 0, &errors.Error{ - Code: errors.EInvalid, - Msg: "the orgID query param was invalid", - Err: err, - } - } - return *orgID, nil -} diff --git a/pkger/http_server_stack_test.go b/pkger/http_server_stack_test.go deleted file mode 100644 index cadd083654d..00000000000 --- a/pkger/http_server_stack_test.go +++ /dev/null @@ -1,1617 +0,0 @@ -package pkger_test - -import ( - "bytes" - "context" - "errors" - "fmt" - "net/http" - "net/http/httptest" - "os" - "strings" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/pkg/testttp" - "github.com/influxdata/influxdb/v2/pkger" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -func TestPkgerHTTPServerStacks(t *testing.T) { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - b, err := os.ReadFile(strings.TrimPrefix(r.URL.Path, "/")) - if err != nil { - http.Error(w, err.Error(), 500) - return - } - w.Write(b) - }) - filesvr := httptest.NewServer(mux) - defer filesvr.Close() - - strPtr := func(s string) *string { - return &s - } - - newStackEvent := func(id platform.ID, k pkger.Kind, metaName string, associations ...pkger.RespStackResourceAssoc) pkger.RespStackResource { - if associations == nil { - associations = []pkger.RespStackResourceAssoc{} - } - return pkger.RespStackResource{ - APIVersion: pkger.APIVersion, - ID: id.String(), - Kind: k, - MetaName: metaName, - Associations: associations, - Links: stackResLinks(string(k.ResourceType()), id), - } - } - - t.Run("create a stack", func(t *testing.T) { - t.Run("should successfully return with valid req body", func(t *testing.T) { - svc := &fakeSVC{ - initStackFn: func(ctx context.Context, userID platform.ID, stackCr pkger.StackCreate) (pkger.Stack, error) { - return pkger.Stack{ - ID: 2, - OrgID: stackCr.OrgID, - Events: []pkger.StackEvent{ - { - Name: stackCr.Name, - Description: stackCr.Description, - Sources: stackCr.Sources, - TemplateURLs: stackCr.TemplateURLs, - UpdatedAt: time.Now(), - }, - }, - CreatedAt: time.Now(), - }, nil - }, - } - pkgHandler := pkger.NewHTTPServerStacks(zap.NewNop(), svc) - svr := newMountedHandler(pkgHandler, 1) - - reqBody := pkger.ReqCreateStack{ - OrgID: platform.ID(3).String(), - Name: "threeve", - Description: "desc", - URLs: []string{"http://example.com"}, - } - - testttp. - PostJSON(t, "/api/v2/stacks", reqBody). - Headers("Content-Type", "application/json"). - Do(svr). - ExpectStatus(http.StatusCreated). - ExpectBody(func(buf *bytes.Buffer) { - var resp pkger.RespStack - decodeBody(t, buf, &resp) - - assert.NotZero(t, resp.ID) - assert.Equal(t, pkger.StackEventCreate.String(), resp.EventType) - assert.Equal(t, reqBody.OrgID, resp.OrgID) - assert.Equal(t, reqBody.Name, resp.Name) - assert.Equal(t, reqBody.Description, resp.Description) - assert.Equal(t, reqBody.URLs, resp.URLs) - assert.NotZero(t, resp.CreatedAt) - assert.NotZero(t, resp.UpdatedAt) - }) - - }) - - t.Run("error cases", func(t *testing.T) { - tests := []struct { - name string - reqBody pkger.ReqCreateStack - expectedStatus int - svc pkger.SVC - }{ - { - name: "bad org id", - reqBody: pkger.ReqCreateStack{ - OrgID: "invalid id", - }, - expectedStatus: http.StatusBadRequest, - }, - { - name: "bad url", - reqBody: pkger.ReqCreateStack{ - OrgID: platform.ID(3).String(), - URLs: []string{"invalid @% url"}, - }, - expectedStatus: http.StatusBadRequest, - }, - { - name: "translates svc conflict error", - reqBody: pkger.ReqCreateStack{OrgID: platform.ID(3).String()}, - svc: &fakeSVC{ - initStackFn: func(ctx context.Context, userID platform.ID, stack pkger.StackCreate) (pkger.Stack, error) { - return pkger.Stack{}, &errors2.Error{Code: errors2.EConflict} - }, - }, - expectedStatus: http.StatusUnprocessableEntity, - }, - { - name: "translates svc internal error", - reqBody: pkger.ReqCreateStack{OrgID: platform.ID(3).String()}, - svc: &fakeSVC{ - initStackFn: func(ctx context.Context, userID platform.ID, stack pkger.StackCreate) (pkger.Stack, error) { - return pkger.Stack{}, &errors2.Error{Code: errors2.EInternal} - }, - }, - expectedStatus: http.StatusInternalServerError, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - svc := tt.svc - if svc == nil { - svc = &fakeSVC{ - initStackFn: func(ctx context.Context, userID platform.ID, stack pkger.StackCreate) (pkger.Stack, error) { - return pkger.Stack{}, nil - }, - } - } - - pkgHandler := pkger.NewHTTPServerStacks(zap.NewNop(), svc) - svr := newMountedHandler(pkgHandler, 1) - - testttp. - PostJSON(t, "/api/v2/stacks", tt.reqBody). - Headers("Content-Type", "application/json"). - Do(svr). - ExpectStatus(tt.expectedStatus) - } - - t.Run(tt.name, fn) - } - }) - }) - - t.Run("list a stack", func(t *testing.T) { - t.Run("should successfully return with valid req body", func(t *testing.T) { - const expectedOrgID platform.ID = 3 - - svc := &fakeSVC{ - listStacksFn: func(ctx context.Context, orgID platform.ID, filter pkger.ListFilter) ([]pkger.Stack, error) { - if orgID != expectedOrgID { - return nil, nil - } - - if len(filter.Names) > 0 && len(filter.StackIDs) == 0 { - var stacks []pkger.Stack - for i, name := range filter.Names { - stacks = append(stacks, pkger.Stack{ - ID: platform.ID(i + 1), - OrgID: expectedOrgID, - Events: []pkger.StackEvent{ - { - Name: name, - }, - }, - }) - } - return stacks, nil - } - - if len(filter.StackIDs) > 0 && len(filter.Names) == 0 { - var stacks []pkger.Stack - for _, stackID := range filter.StackIDs { - stacks = append(stacks, pkger.Stack{ - ID: stackID, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{}}, - }) - } - return stacks, nil - } - - return []pkger.Stack{{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{ - Name: "stack_1", - }}, - }}, nil - }, - } - - pkgHandler := pkger.NewHTTPServerStacks(zap.NewNop(), svc) - svr := newMountedHandler(pkgHandler, 1) - - tests := []struct { - name string - queryArgs string - expectedStacks []pkger.RespStack - }{ - { - name: "with org ID that has stacks", - queryArgs: "orgID=" + expectedOrgID.String(), - expectedStacks: []pkger.RespStack{{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Name: "stack_1", - Resources: []pkger.RespStackResource{}, - Sources: []string{}, - URLs: []string{}, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Name: "stack_1", - Resources: []pkger.RespStackResource{}, - Sources: []string{}, - URLs: []string{}, - }, - }, - }}, - }, - { - name: "with orgID with no stacks", - queryArgs: "orgID=" + platform.ID(9000).String(), - expectedStacks: []pkger.RespStack{}, - }, - { - name: "with names", - queryArgs: "name=name_stack&name=threeve&orgID=" + expectedOrgID.String(), - expectedStacks: []pkger.RespStack{ - { - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Name: "name_stack", - Resources: []pkger.RespStackResource{}, - Sources: []string{}, - URLs: []string{}, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Name: "name_stack", - Resources: []pkger.RespStackResource{}, - Sources: []string{}, - URLs: []string{}, - }, - }, - }, - { - ID: platform.ID(2).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Name: "threeve", - Resources: []pkger.RespStackResource{}, - Sources: []string{}, - URLs: []string{}, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Name: "threeve", - Resources: []pkger.RespStackResource{}, - Sources: []string{}, - URLs: []string{}, - }, - }, - }, - }, - }, - { - name: "with ids", - queryArgs: fmt.Sprintf("stackID=%s&stackID=%s&orgID=%s", platform.ID(1), platform.ID(2), platform.ID(expectedOrgID)), - expectedStacks: []pkger.RespStack{ - { - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Resources: []pkger.RespStackResource{}, - Sources: []string{}, - URLs: []string{}, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Resources: []pkger.RespStackResource{}, - Sources: []string{}, - URLs: []string{}, - }, - }, - }, - { - ID: platform.ID(2).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Resources: []pkger.RespStackResource{}, - Sources: []string{}, - URLs: []string{}, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Resources: []pkger.RespStackResource{}, - Sources: []string{}, - URLs: []string{}, - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - testttp. - Get(t, "/api/v2/stacks?"+tt.queryArgs). - Headers("Content-Type", "application/x-www-form-urlencoded"). - Do(svr). - ExpectStatus(http.StatusOK). - ExpectBody(func(buf *bytes.Buffer) { - var resp pkger.RespListStacks - decodeBody(t, buf, &resp) - - assert.Equal(t, tt.expectedStacks, resp.Stacks) - }) - } - - t.Run(tt.name, fn) - } - }) - - t.Run("should provide all resource links for each stack resource collection", func(t *testing.T) { - const expectedOrgID platform.ID = 3 - - tests := []struct { - name string - stub pkger.Stack - expectedStack pkger.RespStack - }{ - { - name: "for stacks with associated buckets", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{ - Resources: []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindBucket, - MetaName: "buck-1", - }, - }, - }}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindBucket, "buck-1"), - }, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindBucket, "buck-1"), - }, - }, - }, - }, - }, - { - name: "for stacks with associated checks", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{ - Resources: []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindCheckThreshold, - MetaName: "check-thresh", - }, - { - APIVersion: pkger.APIVersion, - ID: 2, - Kind: pkger.KindCheckDeadman, - MetaName: "check-deadman", - }, - }, - }}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindCheckThreshold, "check-thresh"), - newStackEvent(2, pkger.KindCheckDeadman, "check-deadman"), - }, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindCheckThreshold, "check-thresh"), - newStackEvent(2, pkger.KindCheckDeadman, "check-deadman"), - }, - }, - }, - }, - }, - { - name: "for stacks with associated dashboards", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{ - Resources: []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindDashboard, - MetaName: "dash", - }, - }, - }}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindDashboard, "dash"), - }, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindDashboard, "dash"), - }, - }, - }, - }, - }, - { - name: "for stacks with associated labels", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{ - Resources: []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindLabel, - MetaName: "label", - }, - }, - }}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindLabel, "label"), - }, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindLabel, "label"), - }, - }, - }, - }, - }, - { - name: "for stacks with associated notification endpoints", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{ - Resources: []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindNotificationEndpoint, - MetaName: "end-1", - }, - { - APIVersion: pkger.APIVersion, - ID: 2, - Kind: pkger.KindNotificationEndpointHTTP, - MetaName: "end-2", - }, - { - APIVersion: pkger.APIVersion, - ID: 3, - Kind: pkger.KindNotificationEndpointPagerDuty, - MetaName: "end-3", - }, - { - APIVersion: pkger.APIVersion, - ID: 4, - Kind: pkger.KindNotificationEndpointSlack, - MetaName: "end-4", - }, - }, - }}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindNotificationEndpoint, "end-1"), - newStackEvent(2, pkger.KindNotificationEndpointHTTP, "end-2"), - newStackEvent(3, pkger.KindNotificationEndpointPagerDuty, "end-3"), - newStackEvent(4, pkger.KindNotificationEndpointSlack, "end-4"), - }, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindNotificationEndpoint, "end-1"), - newStackEvent(2, pkger.KindNotificationEndpointHTTP, "end-2"), - newStackEvent(3, pkger.KindNotificationEndpointPagerDuty, "end-3"), - newStackEvent(4, pkger.KindNotificationEndpointSlack, "end-4"), - }, - }, - }, - }, - }, - { - name: "for stacks with associated notification rules", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{ - Resources: []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindNotificationRule, - MetaName: "rule-1", - }, - }, - }}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindNotificationRule, "rule-1"), - }, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindNotificationRule, "rule-1"), - }, - }, - }, - }, - }, - { - name: "for stacks with associated tasks", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{ - Resources: []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindTask, - MetaName: "task-1", - }, - }, - }}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindTask, "task-1"), - }, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindTask, "task-1"), - }, - }, - }, - }, - }, - { - name: "for stacks with associated telegraf configs", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{ - Resources: []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindTelegraf, - MetaName: "tele-1", - }, - }, - }}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindTelegraf, "tele-1"), - }, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindTelegraf, "tele-1"), - }, - }, - }, - }, - }, - { - name: "for stacks with associated variables", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{ - Resources: []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindVariable, - MetaName: "var-1", - }, - }, - }}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindVariable, "var-1"), - }, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindVariable, "var-1"), - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - svc := &fakeSVC{ - listStacksFn: func(ctx context.Context, orgID platform.ID, filter pkger.ListFilter) ([]pkger.Stack, error) { - return []pkger.Stack{tt.stub}, nil - }, - } - pkgHandler := pkger.NewHTTPServerStacks(zap.NewNop(), svc) - svr := newMountedHandler(pkgHandler, 1) - - testttp. - Get(t, "/api/v2/stacks?orgID="+expectedOrgID.String()). - Do(svr). - ExpectStatus(http.StatusOK). - ExpectBody(func(buf *bytes.Buffer) { - var resp pkger.RespListStacks - decodeBody(t, buf, &resp) - - require.Len(t, resp.Stacks, 1) - assert.Equal(t, tt.expectedStack, resp.Stacks[0]) - }) - } - - t.Run(tt.name, fn) - } - }) - }) - - t.Run("read a stack", func(t *testing.T) { - t.Run("should successfully return with valid req body", func(t *testing.T) { - const expectedOrgID platform.ID = 3 - - tests := []struct { - name string - stub pkger.Stack - expectedStack pkger.RespStack - }{ - { - name: "for stack that has all fields available", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{ - { - Name: "name", - Description: "desc", - Sources: []string{"threeve"}, - TemplateURLs: []string{"http://example.com"}, - Resources: []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 3, - Kind: pkger.KindBucket, - MetaName: "rucketeer", - }, - }, - }, - }, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Name: "name", - Description: "desc", - Sources: []string{"threeve"}, - URLs: []string{"http://example.com"}, - Resources: []pkger.RespStackResource{ - newStackEvent(3, pkger.KindBucket, "rucketeer"), - }, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Name: "name", - Description: "desc", - Sources: []string{"threeve"}, - URLs: []string{"http://example.com"}, - Resources: []pkger.RespStackResource{ - newStackEvent(3, pkger.KindBucket, "rucketeer"), - }, - }, - }, - }, - }, - { - name: "for stack that has missing resources urls and sources", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{ - { - Name: "name", - Description: "desc", - }, - }, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Name: "name", - Description: "desc", - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{}, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Name: "name", - Description: "desc", - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{}, - }, - }, - }, - }, - { - name: "for stack that has no set fields", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{}}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{}, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{}, - }, - }, - }, - }, - { - name: "for stacks with associated checks", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{ - Resources: []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindCheckThreshold, - MetaName: "check-thresh", - }, - { - APIVersion: pkger.APIVersion, - ID: 2, - Kind: pkger.KindCheckDeadman, - MetaName: "check-deadman", - }, - }, - }}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindCheckThreshold, "check-thresh"), - newStackEvent(2, pkger.KindCheckDeadman, "check-deadman"), - }, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindCheckThreshold, "check-thresh"), - newStackEvent(2, pkger.KindCheckDeadman, "check-deadman"), - }, - }, - }, - }, - }, - { - name: "for stacks with associated dashboards", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{ - Resources: []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindDashboard, - MetaName: "dash", - }, - }, - }}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindDashboard, "dash"), - }, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindDashboard, "dash"), - }, - }, - }, - }, - }, - { - name: "for stacks with associated labels", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{ - Resources: []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindLabel, - MetaName: "label", - }, - }, - }}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindLabel, "label"), - }, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindLabel, "label"), - }, - }, - }, - }, - }, - { - name: "for stacks with associated notification endpoints", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{ - Resources: []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindNotificationEndpoint, - MetaName: "end-1", - }, - { - APIVersion: pkger.APIVersion, - ID: 2, - Kind: pkger.KindNotificationEndpointHTTP, - MetaName: "end-2", - }, - { - APIVersion: pkger.APIVersion, - ID: 3, - Kind: pkger.KindNotificationEndpointPagerDuty, - MetaName: "end-3", - }, - { - APIVersion: pkger.APIVersion, - ID: 4, - Kind: pkger.KindNotificationEndpointSlack, - MetaName: "end-4", - }, - }, - }}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindNotificationEndpoint, "end-1"), - newStackEvent(2, pkger.KindNotificationEndpointHTTP, "end-2"), - newStackEvent(3, pkger.KindNotificationEndpointPagerDuty, "end-3"), - newStackEvent(4, pkger.KindNotificationEndpointSlack, "end-4"), - }, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindNotificationEndpoint, "end-1"), - newStackEvent(2, pkger.KindNotificationEndpointHTTP, "end-2"), - newStackEvent(3, pkger.KindNotificationEndpointPagerDuty, "end-3"), - newStackEvent(4, pkger.KindNotificationEndpointSlack, "end-4"), - }, - }, - }, - }, - }, - { - name: "for stacks with associated notification rules", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{ - Resources: []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindNotificationRule, - MetaName: "rule-1", - }, - }, - }}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindNotificationRule, "rule-1"), - }, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindNotificationRule, "rule-1"), - }, - }, - }, - }, - }, - { - name: "for stacks with associated tasks", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{ - Resources: []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindTask, - MetaName: "task-1", - }, - }, - }}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindTask, "task-1"), - }, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindTask, "task-1"), - }, - }, - }, - }, - }, - { - name: "for stacks with associated telegraf configs", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{ - Resources: []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindTelegraf, - MetaName: "tele-1", - }, - }, - }}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindTelegraf, "tele-1"), - }, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindTelegraf, "tele-1"), - }, - }, - }, - }, - }, - { - name: "for stacks with associated variables", - stub: pkger.Stack{ - ID: 1, - OrgID: expectedOrgID, - Events: []pkger.StackEvent{{ - Resources: []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 1, - Kind: pkger.KindVariable, - MetaName: "var-1", - }, - }, - }}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindVariable, "var-1"), - }, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventCreate.String(), - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{ - newStackEvent(1, pkger.KindVariable, "var-1"), - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - svc := &fakeSVC{ - readStackFn: func(ctx context.Context, id platform.ID) (pkger.Stack, error) { - return tt.stub, nil - }, - } - pkgHandler := pkger.NewHTTPServerStacks(zap.NewNop(), svc) - svr := newMountedHandler(pkgHandler, 1) - - testttp. - Get(t, "/api/v2/stacks/"+tt.stub.ID.String()). - Do(svr). - ExpectStatus(http.StatusOK). - ExpectBody(func(buf *bytes.Buffer) { - var resp pkger.RespStack - decodeBody(t, buf, &resp) - - assert.Equal(t, tt.expectedStack, resp) - }) - } - - t.Run(tt.name, fn) - } - }) - - t.Run("error cases", func(t *testing.T) { - tests := []struct { - name string - stackIDPath string - expectedStatus int - svc pkger.SVC - }{ - { - name: "bad stack id path", - stackIDPath: "badID", - expectedStatus: http.StatusBadRequest, - }, - { - name: "stack not found", - stackIDPath: platform.ID(1).String(), - svc: &fakeSVC{ - readStackFn: func(ctx context.Context, id platform.ID) (pkger.Stack, error) { - return pkger.Stack{}, &errors2.Error{Code: errors2.ENotFound} - }, - }, - expectedStatus: http.StatusNotFound, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - svc := tt.svc - if svc == nil { - svc = &fakeSVC{ - initStackFn: func(ctx context.Context, userID platform.ID, stack pkger.StackCreate) (pkger.Stack, error) { - return pkger.Stack{}, nil - }, - } - } - - pkgHandler := pkger.NewHTTPServerStacks(zap.NewNop(), svc) - svr := newMountedHandler(pkgHandler, 1) - - testttp. - Get(t, "/api/v2/stacks/"+tt.stackIDPath). - Headers("Content-Type", "application/json"). - Do(svr). - ExpectStatus(tt.expectedStatus) - } - - t.Run(tt.name, fn) - } - }) - }) - - t.Run("update a stack", func(t *testing.T) { - t.Run("should successfully update with valid req body", func(t *testing.T) { - const expectedOrgID platform.ID = 3 - - tests := []struct { - name string - input pkger.ReqUpdateStack - expectedStack pkger.RespStack - }{ - { - name: "update name field", - input: pkger.ReqUpdateStack{ - Name: strPtr("name"), - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventUpdate.String(), - Name: "name", - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{}, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventUpdate.String(), - Name: "name", - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{}, - }, - }, - }, - }, - { - name: "update desc field", - input: pkger.ReqUpdateStack{ - Description: strPtr("desc"), - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventUpdate.String(), - Description: "desc", - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{}, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventUpdate.String(), - Description: "desc", - Sources: []string{}, - URLs: []string{}, - Resources: []pkger.RespStackResource{}, - }, - }, - }, - }, - { - name: "update urls field", - input: pkger.ReqUpdateStack{ - TemplateURLs: []string{"http://example.com"}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventUpdate.String(), - Sources: []string{}, - URLs: []string{"http://example.com"}, - Resources: []pkger.RespStackResource{}, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventUpdate.String(), - Sources: []string{}, - URLs: []string{"http://example.com"}, - Resources: []pkger.RespStackResource{}, - }, - }, - }, - }, - { - name: "update all fields", - input: pkger.ReqUpdateStack{ - Name: strPtr("name"), - Description: strPtr("desc"), - TemplateURLs: []string{"http://example.com"}, - }, - expectedStack: pkger.RespStack{ - ID: platform.ID(1).String(), - OrgID: expectedOrgID.String(), - RespStackEvent: pkger.RespStackEvent{ - EventType: pkger.StackEventUpdate.String(), - Name: "name", - Description: "desc", - Sources: []string{}, - URLs: []string{"http://example.com"}, - Resources: []pkger.RespStackResource{}, - }, - Events: []pkger.RespStackEvent{ - { - EventType: pkger.StackEventUpdate.String(), - Name: "name", - Description: "desc", - Sources: []string{}, - URLs: []string{"http://example.com"}, - Resources: []pkger.RespStackResource{}, - }, - }, - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - id, err := platform.IDFromString(tt.expectedStack.ID) - require.NoError(t, err) - - svc := &fakeSVC{ - updateStackFn: func(ctx context.Context, upd pkger.StackUpdate) (pkger.Stack, error) { - if upd.ID != *id { - return pkger.Stack{}, errors.New("unexpected stack ID: " + upd.ID.String()) - } - st := pkger.Stack{ - ID: *id, - OrgID: expectedOrgID, - } - ev := pkger.StackEvent{EventType: pkger.StackEventUpdate} - if upd.Name != nil { - ev.Name = *upd.Name - } - if upd.Description != nil { - ev.Description = *upd.Description - } - if upd.TemplateURLs != nil { - ev.TemplateURLs = upd.TemplateURLs - } - st.Events = append(st.Events, ev) - return st, nil - }, - } - pkgHandler := pkger.NewHTTPServerStacks(zap.NewNop(), svc) - svr := newMountedHandler(pkgHandler, 1) - - testttp. - PatchJSON(t, "/api/v2/stacks/"+tt.expectedStack.ID, tt.input). - Do(svr). - ExpectStatus(http.StatusOK). - ExpectBody(func(buf *bytes.Buffer) { - var resp pkger.RespStack - decodeBody(t, buf, &resp) - - assert.Equal(t, tt.expectedStack, resp) - }) - } - - t.Run(tt.name, fn) - } - }) - - t.Run("error cases", func(t *testing.T) { - tests := []struct { - name string - stackIDPath string - expectedStatus int - svc pkger.SVC - }{ - { - name: "bad stack id path", - stackIDPath: "badID", - expectedStatus: http.StatusBadRequest, - }, - { - name: "stack not found", - stackIDPath: platform.ID(1).String(), - svc: &fakeSVC{ - readStackFn: func(ctx context.Context, id platform.ID) (pkger.Stack, error) { - return pkger.Stack{}, &errors2.Error{Code: errors2.ENotFound} - }, - }, - expectedStatus: http.StatusNotFound, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - svc := tt.svc - if svc == nil { - svc = &fakeSVC{ - initStackFn: func(ctx context.Context, userID platform.ID, stack pkger.StackCreate) (pkger.Stack, error) { - return pkger.Stack{}, nil - }, - } - } - - pkgHandler := pkger.NewHTTPServerStacks(zap.NewNop(), svc) - svr := newMountedHandler(pkgHandler, 1) - - testttp. - Get(t, "/api/v2/stacks/"+tt.stackIDPath). - Headers("Content-Type", "application/json"). - Do(svr). - ExpectStatus(tt.expectedStatus) - } - - t.Run(tt.name, fn) - } - }) - }) -} - -type fakeSVC struct { - initStackFn func(ctx context.Context, userID platform.ID, stack pkger.StackCreate) (pkger.Stack, error) - listStacksFn func(ctx context.Context, orgID platform.ID, filter pkger.ListFilter) ([]pkger.Stack, error) - readStackFn func(ctx context.Context, id platform.ID) (pkger.Stack, error) - updateStackFn func(ctx context.Context, upd pkger.StackUpdate) (pkger.Stack, error) - dryRunFn func(ctx context.Context, orgID, userID platform.ID, opts ...pkger.ApplyOptFn) (pkger.ImpactSummary, error) - applyFn func(ctx context.Context, orgID, userID platform.ID, opts ...pkger.ApplyOptFn) (pkger.ImpactSummary, error) -} - -var _ pkger.SVC = (*fakeSVC)(nil) - -func (f *fakeSVC) InitStack(ctx context.Context, userID platform.ID, stack pkger.StackCreate) (pkger.Stack, error) { - if f.initStackFn == nil { - panic("not implemented") - } - return f.initStackFn(ctx, userID, stack) -} - -func (f *fakeSVC) UninstallStack(ctx context.Context, identifiers struct{ OrgID, UserID, StackID platform.ID }) (pkger.Stack, error) { - panic("not implemented") -} - -func (f *fakeSVC) DeleteStack(ctx context.Context, identifiers struct{ OrgID, UserID, StackID platform.ID }) error { - panic("not implemented yet") -} - -func (f *fakeSVC) ListStacks(ctx context.Context, orgID platform.ID, filter pkger.ListFilter) ([]pkger.Stack, error) { - if f.listStacksFn == nil { - panic("not implemented") - } - return f.listStacksFn(ctx, orgID, filter) -} - -func (f *fakeSVC) ReadStack(ctx context.Context, id platform.ID) (pkger.Stack, error) { - if f.readStackFn != nil { - return f.readStackFn(ctx, id) - } - panic("not implemented") -} - -func (f *fakeSVC) UpdateStack(ctx context.Context, upd pkger.StackUpdate) (pkger.Stack, error) { - if f.updateStackFn != nil { - return f.updateStackFn(ctx, upd) - } - panic("not implemented") -} - -func (f *fakeSVC) Export(ctx context.Context, setters ...pkger.ExportOptFn) (*pkger.Template, error) { - panic("not implemented") -} - -func (f *fakeSVC) DryRun(ctx context.Context, orgID, userID platform.ID, opts ...pkger.ApplyOptFn) (pkger.ImpactSummary, error) { - if f.dryRunFn == nil { - panic("not implemented") - } - - return f.dryRunFn(ctx, orgID, userID, opts...) -} - -func (f *fakeSVC) Apply(ctx context.Context, orgID, userID platform.ID, opts ...pkger.ApplyOptFn) (pkger.ImpactSummary, error) { - if f.applyFn == nil { - panic("not implemented") - } - return f.applyFn(ctx, orgID, userID, opts...) -} - -func stackResLinks(resource string, id platform.ID) pkger.RespStackResourceLinks { - return pkger.RespStackResourceLinks{ - Self: fmt.Sprintf("/api/v2/%s/%s", resource, id), - } -} diff --git a/pkger/http_server_template.go b/pkger/http_server_template.go deleted file mode 100644 index e84daea72f8..00000000000 --- a/pkger/http_server_template.go +++ /dev/null @@ -1,617 +0,0 @@ -package pkger - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "path" - "strings" - - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" - pctx "github.com/influxdata/influxdb/v2/context" - ierrors "github.com/influxdata/influxdb/v2/kit/errors" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/pkg/jsonnet" - "go.uber.org/zap" - "gopkg.in/yaml.v3" -) - -const RoutePrefixTemplates = "/api/v2/templates" - -// HTTPServerTemplates is a server that manages the templates HTTP transport. -type HTTPServerTemplates struct { - chi.Router - api *kithttp.API - logger *zap.Logger - svc SVC - client *http.Client -} - -// NewHTTPServerTemplates constructs a new http server. -func NewHTTPServerTemplates(log *zap.Logger, svc SVC, client *http.Client) *HTTPServerTemplates { - svr := &HTTPServerTemplates{ - api: kithttp.NewAPI(kithttp.WithLog(log)), - logger: log, - svc: svc, - client: client, - } - - exportAllowContentTypes := middleware.AllowContentType("text/yml", "application/x-yaml", "application/json") - setJSONContentType := middleware.SetHeader("Content-Type", "application/json; charset=utf-8") - - r := chi.NewRouter() - { - r.With(exportAllowContentTypes).Post("/export", svr.export) - r.With(setJSONContentType).Post("/apply", svr.apply) - } - - svr.Router = r - return svr -} - -// Prefix provides the prefix to this route tree. -func (s *HTTPServerTemplates) Prefix() string { - return RoutePrefixTemplates -} - -// ReqExportOrgIDOpt provides options to export resources by organization id. -type ReqExportOrgIDOpt struct { - OrgID string `json:"orgID"` - Filters struct { - ByLabel []string `json:"byLabel"` - ByResourceKind []Kind `json:"byResourceKind"` - } `json:"resourceFilters"` -} - -// ReqExport is a request body for the export endpoint. -type ReqExport struct { - StackID string `json:"stackID"` - OrgIDs []ReqExportOrgIDOpt `json:"orgIDs"` - Resources []ResourceToClone `json:"resources"` -} - -// OK validates a create request. -func (r *ReqExport) OK() error { - if len(r.Resources) == 0 && len(r.OrgIDs) == 0 && r.StackID == "" { - return &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: "at least 1 resource, 1 org id, or stack id must be provided", - } - } - - for _, org := range r.OrgIDs { - if _, err := platform.IDFromString(org.OrgID); err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("provided org id is invalid: %q", org.OrgID), - } - } - } - - if r.StackID != "" { - _, err := platform.IDFromString(r.StackID) - return err - } - return nil -} - -// RespExport is a response body for the create template endpoint. -type RespExport []Object - -func (s *HTTPServerTemplates) export(w http.ResponseWriter, r *http.Request) { - var reqBody ReqExport - if err := s.api.DecodeJSON(r.Body, &reqBody); err != nil { - s.api.Err(w, r, err) - return - } - defer r.Body.Close() - - opts := []ExportOptFn{ - ExportWithExistingResources(reqBody.Resources...), - } - for _, orgIDStr := range reqBody.OrgIDs { - orgID, err := platform.IDFromString(orgIDStr.OrgID) - if err != nil { - continue - } - opts = append(opts, ExportWithAllOrgResources(ExportByOrgIDOpt{ - OrgID: *orgID, - LabelNames: orgIDStr.Filters.ByLabel, - ResourceKinds: orgIDStr.Filters.ByResourceKind, - })) - } - - if reqBody.StackID != "" { - stackID, err := platform.IDFromString(reqBody.StackID) - if err != nil { - s.api.Err(w, r, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("invalid stack ID provided: %q", reqBody.StackID), - }) - return - } - opts = append(opts, ExportWithStackID(*stackID)) - } - - newTemplate, err := s.svc.Export(r.Context(), opts...) - if err != nil { - s.api.Err(w, r, err) - return - } - - resp := RespExport(newTemplate.Objects) - if resp == nil { - resp = []Object{} - } - - var enc encoder - switch templateEncoding(r.Header.Get("Accept")) { - case EncodingYAML: - enc = yaml.NewEncoder(w) - w.Header().Set("Content-Type", "application/x-yaml") - default: - enc = newJSONEnc(w) - w.Header().Set("Content-Type", "application/json; charset=utf-8") - } - - s.encResp(w, r, enc, http.StatusOK, resp) -} - -// ReqTemplateRemote provides a package via a remote (i.e. a gist). If content type is not -// provided then the service will do its best to discern the content type of the -// contents. -type ReqTemplateRemote struct { - URL string `json:"url" yaml:"url"` - ContentType string `json:"contentType" yaml:"contentType"` -} - -// Encoding returns the encoding type that corresponds to the given content type. -func (p ReqTemplateRemote) Encoding() Encoding { - return convertEncoding(p.ContentType, p.URL) -} - -type ReqRawTemplate struct { - ContentType string `json:"contentType" yaml:"contentType"` - Sources []string `json:"sources" yaml:"sources"` - Template json.RawMessage `json:"contents" yaml:"contents"` -} - -func (p ReqRawTemplate) Encoding() Encoding { - var source string - if len(p.Sources) > 0 { - source = p.Sources[0] - } - return convertEncoding(p.ContentType, source) -} - -// ReqRawAction is a raw action consumers can provide to change the behavior -// of the application of a template. -type ReqRawAction struct { - Action string `json:"action"` - Properties json.RawMessage `json:"properties"` -} - -// ReqApply is the request body for a json or yaml body for the apply template endpoint. -type ReqApply struct { - DryRun bool `json:"dryRun" yaml:"dryRun"` - OrgID string `json:"orgID" yaml:"orgID"` - StackID *string `json:"stackID" yaml:"stackID"` // optional: non nil value signals stack should be used - Remotes []ReqTemplateRemote `json:"remotes" yaml:"remotes"` - - RawTemplates []ReqRawTemplate `json:"templates" yaml:"templates"` - RawTemplate ReqRawTemplate `json:"template" yaml:"template"` - - EnvRefs map[string]interface{} `json:"envRefs"` - Secrets map[string]string `json:"secrets"` - - RawActions []ReqRawAction `json:"actions"` -} - -// Templates returns all templates associated with the request. -func (r ReqApply) Templates(encoding Encoding, client *http.Client) (*Template, error) { - var rawTemplates []*Template - for _, rem := range r.Remotes { - if rem.URL == "" { - continue - } - template, err := Parse(rem.Encoding(), FromHTTPRequest(rem.URL, client), ValidSkipParseError()) - if err != nil { - msg := fmt.Sprintf("template from url[%s] had an issue: %s", rem.URL, err.Error()) - return nil, influxErr(errors.EUnprocessableEntity, msg) - } - rawTemplates = append(rawTemplates, template) - } - - for i, rawTmpl := range append(r.RawTemplates, r.RawTemplate) { - if rawTmpl.Template == nil { - continue - } - enc := encoding - if sourceEncoding := rawTmpl.Encoding(); sourceEncoding != EncodingSource { - enc = sourceEncoding - } - template, err := Parse(enc, FromReader(bytes.NewReader(rawTmpl.Template), rawTmpl.Sources...), ValidSkipParseError()) - if err != nil { - sources := formatSources(rawTmpl.Sources) - msg := fmt.Sprintf("template[%d] from source(s) %q had an issue: %s", i, sources, err.Error()) - return nil, influxErr(errors.EUnprocessableEntity, msg) - } - rawTemplates = append(rawTemplates, template) - } - - return Combine(rawTemplates, ValidWithoutResources(), ValidSkipParseError()) -} - -type actionType string - -// various ActionTypes the transport API speaks -const ( - ActionTypeSkipKind actionType = "skipKind" - ActionTypeSkipResource actionType = "skipResource" -) - -func (r ReqApply) validActions() (struct { - SkipKinds []ActionSkipKind - SkipResources []ActionSkipResource -}, error) { - type actions struct { - SkipKinds []ActionSkipKind - SkipResources []ActionSkipResource - } - - unmarshalErrFn := func(err error, idx int, actionType string) error { - msg := fmt.Sprintf("failed to unmarshal properties for actions[%d] %q", idx, actionType) - return ierrors.Wrap(err, msg) - } - - kindErrFn := func(err error, idx int, actionType string) error { - msg := fmt.Sprintf("invalid kind for actions[%d] %q", idx, actionType) - return ierrors.Wrap(err, msg) - } - - var out actions - for i, rawAct := range r.RawActions { - switch a := rawAct.Action; actionType(a) { - case ActionTypeSkipResource: - var asr ActionSkipResource - if err := json.Unmarshal(rawAct.Properties, &asr); err != nil { - return actions{}, influxErr(errors.EInvalid, unmarshalErrFn(err, i, a)) - } - if err := asr.Kind.OK(); err != nil { - return actions{}, influxErr(errors.EInvalid, kindErrFn(err, i, a)) - } - out.SkipResources = append(out.SkipResources, asr) - case ActionTypeSkipKind: - var ask ActionSkipKind - if err := json.Unmarshal(rawAct.Properties, &ask); err != nil { - return actions{}, influxErr(errors.EInvalid, unmarshalErrFn(err, i, a)) - } - if err := ask.Kind.OK(); err != nil { - return actions{}, influxErr(errors.EInvalid, kindErrFn(err, i, a)) - } - out.SkipKinds = append(out.SkipKinds, ask) - default: - msg := fmt.Sprintf( - "invalid action type %q provided for actions[%d] ; Must be one of [%s]", - a, i, ActionTypeSkipResource, - ) - return actions{}, influxErr(errors.EInvalid, msg) - } - } - - return out, nil -} - -// RespApply is the response body for the apply template endpoint. -type RespApply struct { - Sources []string `json:"sources" yaml:"sources"` - StackID string `json:"stackID" yaml:"stackID"` - Diff Diff `json:"diff" yaml:"diff"` - Summary Summary `json:"summary" yaml:"summary"` - - Errors []ValidationErr `json:"errors,omitempty" yaml:"errors,omitempty"` -} - -// RespApplyErr is the response body for a dry-run parse error in the apply template endpoint. -type RespApplyErr struct { - RespApply - - Code string `json:"code" yaml:"code"` - Message string `json:"message" yaml:"message"` -} - -func (s *HTTPServerTemplates) apply(w http.ResponseWriter, r *http.Request) { - var reqBody ReqApply - encoding, err := decodeWithEncoding(r, &reqBody) - if err != nil { - s.api.Err(w, r, newDecodeErr(encoding.String(), err)) - return - } - - orgID, err := platform.IDFromString(reqBody.OrgID) - if err != nil { - s.api.Err(w, r, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("invalid organization ID provided: %q", reqBody.OrgID), - }) - return - } - - // Reject use of server-side jsonnet with /api/v2/templates/apply - if encoding == EncodingJsonnet { - s.api.Err(w, r, &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: fmt.Sprintf("template from source(s) had an issue: %s", ErrInvalidEncoding.Error()), - }) - return - } - - var remotes []string - for _, rem := range reqBody.Remotes { - remotes = append(remotes, rem.URL) - } - remotes = append(remotes, reqBody.RawTemplate.Sources...) - - for _, rem := range remotes { - // While things like '.%6Aonnet' evaluate to the default encoding (yaml), let's unescape and catch those too - decoded, err := url.QueryUnescape(rem) - if err != nil { - s.api.Err(w, r, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("template from url[%q] had an issue", rem), - }) - return - } - if len(decoded) > 0 && strings.HasSuffix(strings.ToLower(decoded), "jsonnet") { - s.api.Err(w, r, &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: fmt.Sprintf("template from url[%q] had an issue: %s", rem, ErrInvalidEncoding.Error()), - }) - return - } - } - - var stackID platform.ID - if reqBody.StackID != nil { - if err := stackID.DecodeFromString(*reqBody.StackID); err != nil { - s.api.Err(w, r, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("invalid stack ID provided: %q", *reqBody.StackID), - }) - return - } - } - - parsedTemplate, err := reqBody.Templates(encoding, s.client) - if err != nil { - s.api.Err(w, r, &errors.Error{ - Code: errors.EUnprocessableEntity, - Err: err, - }) - return - } - - actions, err := reqBody.validActions() - if err != nil { - s.api.Err(w, r, err) - return - } - - applyOpts := []ApplyOptFn{ - ApplyWithEnvRefs(reqBody.EnvRefs), - ApplyWithTemplate(parsedTemplate), - ApplyWithStackID(stackID), - } - for _, a := range actions.SkipResources { - applyOpts = append(applyOpts, ApplyWithResourceSkip(a)) - } - for _, a := range actions.SkipKinds { - applyOpts = append(applyOpts, ApplyWithKindSkip(a)) - } - - auth, err := pctx.GetAuthorizer(r.Context()) - if err != nil { - s.api.Err(w, r, err) - return - } - userID := auth.GetUserID() - - if reqBody.DryRun { - impact, err := s.svc.DryRun(r.Context(), *orgID, userID, applyOpts...) - if IsParseErr(err) { - s.api.Respond(w, r, http.StatusUnprocessableEntity, RespApplyErr{ - RespApply: impactToRespApply(impact, err), - Code: errors.EUnprocessableEntity, - Message: "unprocessable entity", - }) - return - } - if err != nil { - s.api.Err(w, r, err) - return - } - - s.api.Respond(w, r, http.StatusOK, impactToRespApply(impact, nil)) - return - } - - applyOpts = append(applyOpts, ApplyWithSecrets(reqBody.Secrets)) - - impact, err := s.svc.Apply(r.Context(), *orgID, userID, applyOpts...) - if err != nil && !IsParseErr(err) { - s.api.Err(w, r, err) - return - } - - s.api.Respond(w, r, http.StatusCreated, impactToRespApply(impact, err)) -} - -func (s *HTTPServerTemplates) encResp(w http.ResponseWriter, r *http.Request, enc encoder, code int, res interface{}) { - w.WriteHeader(code) - if err := enc.Encode(res); err != nil { - s.api.Err(w, r, &errors.Error{ - Msg: fmt.Sprintf("unable to marshal; Err: %v", err), - Code: errors.EInternal, - Err: err, - }) - } -} - -func impactToRespApply(impact ImpactSummary, err error) RespApply { - out := RespApply{ - Sources: append([]string{}, impact.Sources...), // guarantee non nil slice - StackID: impact.StackID.String(), - Diff: impact.Diff, - Summary: impact.Summary, - } - if err != nil { - out.Errors = convertParseErr(err) - } - if out.Diff.Buckets == nil { - out.Diff.Buckets = []DiffBucket{} - } - if out.Diff.Checks == nil { - out.Diff.Checks = []DiffCheck{} - } - if out.Diff.Dashboards == nil { - out.Diff.Dashboards = []DiffDashboard{} - } - if out.Diff.Labels == nil { - out.Diff.Labels = []DiffLabel{} - } - if out.Diff.LabelMappings == nil { - out.Diff.LabelMappings = []DiffLabelMapping{} - } - if out.Diff.NotificationEndpoints == nil { - out.Diff.NotificationEndpoints = []DiffNotificationEndpoint{} - } - if out.Diff.NotificationRules == nil { - out.Diff.NotificationRules = []DiffNotificationRule{} - } - if out.Diff.NotificationRules == nil { - out.Diff.NotificationRules = []DiffNotificationRule{} - } - if out.Diff.Tasks == nil { - out.Diff.Tasks = []DiffTask{} - } - if out.Diff.Telegrafs == nil { - out.Diff.Telegrafs = []DiffTelegraf{} - } - if out.Diff.Variables == nil { - out.Diff.Variables = []DiffVariable{} - } - - if out.Summary.Buckets == nil { - out.Summary.Buckets = []SummaryBucket{} - } - if out.Summary.Checks == nil { - out.Summary.Checks = []SummaryCheck{} - } - if out.Summary.Dashboards == nil { - out.Summary.Dashboards = []SummaryDashboard{} - } - if out.Summary.Labels == nil { - out.Summary.Labels = []SummaryLabel{} - } - if out.Summary.LabelMappings == nil { - out.Summary.LabelMappings = []SummaryLabelMapping{} - } - if out.Summary.NotificationEndpoints == nil { - out.Summary.NotificationEndpoints = []SummaryNotificationEndpoint{} - } - if out.Summary.NotificationRules == nil { - out.Summary.NotificationRules = []SummaryNotificationRule{} - } - if out.Summary.NotificationRules == nil { - out.Summary.NotificationRules = []SummaryNotificationRule{} - } - if out.Summary.Tasks == nil { - out.Summary.Tasks = []SummaryTask{} - } - if out.Summary.TelegrafConfigs == nil { - out.Summary.TelegrafConfigs = []SummaryTelegraf{} - } - if out.Summary.Variables == nil { - out.Summary.Variables = []SummaryVariable{} - } - - return out -} - -func formatSources(sources []string) string { - return strings.Join(sources, "; ") -} - -func decodeWithEncoding(r *http.Request, v interface{}) (Encoding, error) { - encoding := templateEncoding(r.Header.Get("Content-Type")) - - var dec interface{ Decode(interface{}) error } - switch encoding { - case EncodingJsonnet: - dec = jsonnet.NewDecoder(r.Body) - case EncodingYAML: - dec = yaml.NewDecoder(r.Body) - default: - dec = json.NewDecoder(r.Body) - } - - return encoding, dec.Decode(v) -} - -func templateEncoding(contentType string) Encoding { - switch contentType { - case "application/x-jsonnet": - return EncodingJsonnet - case "text/yml", "application/x-yaml": - return EncodingYAML - default: - return EncodingJSON - } -} - -func convertEncoding(ct, rawURL string) Encoding { - ct = strings.ToLower(ct) - urlBase := path.Ext(rawURL) - switch { - case ct == "jsonnet" || urlBase == ".jsonnet": - return EncodingJsonnet - case ct == "json" || urlBase == ".json": - return EncodingJSON - case ct == "yml" || ct == "yaml" || urlBase == ".yml" || urlBase == ".yaml": - return EncodingYAML - default: - return EncodingSource - } -} - -type encoder interface { - Encode(interface{}) error -} - -func newJSONEnc(w io.Writer) encoder { - enc := json.NewEncoder(w) - enc.SetIndent("", "\t") - return enc -} - -func convertParseErr(err error) []ValidationErr { - pErr, ok := err.(ParseError) - if !ok { - return nil - } - return pErr.ValidationErrs() -} - -func newDecodeErr(encoding string, err error) *errors.Error { - return &errors.Error{ - Msg: fmt.Sprintf("unable to unmarshal %s", encoding), - Code: errors.EInvalid, - Err: err, - } -} diff --git a/pkger/http_server_template_test.go b/pkger/http_server_template_test.go deleted file mode 100644 index a65a43923a8..00000000000 --- a/pkger/http_server_template_test.go +++ /dev/null @@ -1,1037 +0,0 @@ -package pkger_test - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "net/url" - "os" - "path" - "strings" - "testing" - - "github.com/go-chi/chi" - fluxurl "github.com/influxdata/flux/dependencies/url" - "github.com/influxdata/influxdb/v2" - pcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - influxerror "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/pkg/testttp" - "github.com/influxdata/influxdb/v2/pkger" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "go.uber.org/zap/zaptest/observer" - "gopkg.in/yaml.v3" -) - -func TestPkgerHTTPServerTemplate(t *testing.T) { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - b, err := os.ReadFile(strings.TrimPrefix(r.URL.Path, "/")) - if err != nil { - http.Error(w, err.Error(), 500) - return - } - w.Write(b) - }) - filesvr := httptest.NewServer(mux) - defer filesvr.Close() - - defaultClient := pkger.NewDefaultHTTPClient(fluxurl.PassValidator{}) - - newPkgURL := func(t *testing.T, svrURL string, pkgPath string) string { - t.Helper() - - u, err := url.Parse(svrURL) - require.NoError(t, err) - u.Path = path.Join(u.Path, pkgPath) - return u.String() - } - - strPtr := func(s string) *string { - return &s - } - - t.Run("create pkg", func(t *testing.T) { - t.Run("should successfully return with valid req body", func(t *testing.T) { - fakeLabelSVC := mock.NewLabelService() - fakeLabelSVC.FindLabelByIDFn = func(ctx context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ - ID: id, - }, nil - } - svc := pkger.NewService(pkger.WithLabelSVC(fakeLabelSVC)) - pkgHandler := pkger.NewHTTPServerTemplates(zap.NewNop(), svc, defaultClient) - svr := newMountedHandler(pkgHandler, 1) - - testttp. - PostJSON(t, "/api/v2/templates/export", pkger.ReqExport{ - Resources: []pkger.ResourceToClone{ - { - Kind: pkger.KindLabel, - ID: 1, - Name: "new name", - }, - }, - }). - Headers("Content-Type", "application/json"). - Do(svr). - ExpectStatus(http.StatusOK). - ExpectBody(func(buf *bytes.Buffer) { - pkg, err := pkger.Parse(pkger.EncodingJSON, pkger.FromReader(buf)) - require.NoError(t, err) - - require.NotNil(t, pkg) - require.NoError(t, pkg.Validate()) - - assert.Len(t, pkg.Objects, 1) - assert.Len(t, pkg.Summary().Labels, 1) - }) - - }) - - t.Run("should be invalid if not org ids or resources provided", func(t *testing.T) { - pkgHandler := pkger.NewHTTPServerTemplates(zap.NewNop(), nil, defaultClient) - svr := newMountedHandler(pkgHandler, 1) - - testttp. - PostJSON(t, "/api/v2/templates/export", pkger.ReqExport{}). - Headers("Content-Type", "application/json"). - Do(svr). - ExpectStatus(http.StatusUnprocessableEntity) - - }) - }) - - t.Run("dry run pkg", func(t *testing.T) { - t.Run("jsonnet disabled", func(t *testing.T) { - tests := []struct { - name string - contentType string - reqBody pkger.ReqApply - }{ - { - name: "app jsonnet disabled", - contentType: "application/x-jsonnet", - reqBody: pkger.ReqApply{ - DryRun: true, - OrgID: platform.ID(9000).String(), - RawTemplate: bucketPkgKinds(t, pkger.EncodingJsonnet), - }, - }, - { - name: "retrieves package from a URL (jsonnet disabled)", - reqBody: pkger.ReqApply{ - DryRun: true, - OrgID: platform.ID(9000).String(), - Remotes: []pkger.ReqTemplateRemote{{ - URL: newPkgURL(t, filesvr.URL, "testdata/bucket_associates_labels_one.jsonnet"), - }}, - }, - }, - { - name: "app json with jsonnet disabled remote", - contentType: "application/json", - reqBody: pkger.ReqApply{ - DryRun: true, - OrgID: platform.ID(9000).String(), - RawTemplate: bucketPkgJsonWithJsonnetRemote(t), - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - svc := &fakeSVC{ - dryRunFn: func(ctx context.Context, orgID, userID platform.ID, opts ...pkger.ApplyOptFn) (pkger.ImpactSummary, error) { - var opt pkger.ApplyOpt - for _, o := range opts { - o(&opt) - } - pkg, err := pkger.Combine(opt.Templates) - if err != nil { - return pkger.ImpactSummary{}, err - } - - if err := pkg.Validate(); err != nil { - return pkger.ImpactSummary{}, err - } - sum := pkg.Summary() - var diff pkger.Diff - for _, b := range sum.Buckets { - diff.Buckets = append(diff.Buckets, pkger.DiffBucket{ - DiffIdentifier: pkger.DiffIdentifier{ - MetaName: b.Name, - }, - }) - } - return pkger.ImpactSummary{ - Summary: sum, - Diff: diff, - }, nil - }, - } - - core, sink := observer.New(zap.InfoLevel) - pkgHandler := pkger.NewHTTPServerTemplates(zap.New(core), svc, defaultClient) - svr := newMountedHandler(pkgHandler, 1) - - ctx := context.Background() - testttp. - PostJSON(t, "/api/v2/templates/apply", tt.reqBody). - Headers("Content-Type", tt.contentType). - WithCtx(ctx). - Do(svr). - ExpectStatus(http.StatusUnprocessableEntity). - ExpectBody(func(buf *bytes.Buffer) { - var resp pkger.RespApply - decodeBody(t, buf, &resp) - - assert.Len(t, resp.Summary.Buckets, 0) - assert.Len(t, resp.Diff.Buckets, 0) - }) - - // Verify logging when jsonnet is disabled - entries := sink.TakeAll() // resets to 0 - if tt.contentType == "application/x-jsonnet" { - require.Equal(t, 1, len(entries)) - // message 0 - require.Equal(t, zap.ErrorLevel, entries[0].Entry.Level) - require.Equal(t, "api error encountered", entries[0].Entry.Message) - assert.ElementsMatch(t, []zap.Field{ - zap.Error(&influxerror.Error{ - Code: influxerror.EUnprocessableEntity, - Msg: "template from source(s) had an issue: invalid encoding provided", - }, - )}, entries[0].Context) - } else if len(tt.reqBody.Remotes) == 1 && strings.HasSuffix(tt.reqBody.Remotes[0].URL, "jsonnet") { - require.Equal(t, 1, len(entries)) - // message 0 - require.Equal(t, zap.ErrorLevel, entries[0].Entry.Level) - require.Equal(t, "api error encountered", entries[0].Entry.Message) - expMsg := fmt.Sprintf("template from url[\"%s\"] had an issue: invalid encoding provided", tt.reqBody.Remotes[0].URL) - assert.ElementsMatch(t, []zap.Field{ - zap.Error(&influxerror.Error{ - Code: influxerror.EUnprocessableEntity, - Msg: expMsg, - }, - )}, entries[0].Context) - } else if len(tt.reqBody.RawTemplate.Sources) == 1 && strings.HasSuffix(tt.reqBody.RawTemplate.Sources[0], "jsonnet") { - require.Equal(t, 1, len(entries)) - // message 0 - require.Equal(t, zap.ErrorLevel, entries[0].Entry.Level) - require.Equal(t, "api error encountered", entries[0].Entry.Message) - expMsg := fmt.Sprintf("template from url[\"%s\"] had an issue: invalid encoding provided", tt.reqBody.RawTemplate.Sources[0]) - assert.ElementsMatch(t, []zap.Field{ - zap.Error(&influxerror.Error{ - Code: influxerror.EUnprocessableEntity, - Msg: expMsg, - }, - )}, entries[0].Context) - } else { - require.Equal(t, 0, len(entries)) - } - } - t.Run(tt.name, fn) - } - }) - - t.Run("json", func(t *testing.T) { - tests := []struct { - name string - contentType string - reqBody pkger.ReqApply - }{ - { - name: "app json", - contentType: "application/json", - reqBody: pkger.ReqApply{ - DryRun: true, - OrgID: platform.ID(9000).String(), - RawTemplate: bucketPkgKinds(t, pkger.EncodingJSON), - }, - }, - { - name: "defaults json when no content type", - reqBody: pkger.ReqApply{ - DryRun: true, - OrgID: platform.ID(9000).String(), - RawTemplate: bucketPkgKinds(t, pkger.EncodingJSON), - }, - }, - { - name: "retrieves package from a URL (json)", - reqBody: pkger.ReqApply{ - DryRun: true, - OrgID: platform.ID(9000).String(), - Remotes: []pkger.ReqTemplateRemote{{ - URL: newPkgURL(t, filesvr.URL, "testdata/remote_bucket.json"), - }}, - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - svc := &fakeSVC{ - dryRunFn: func(ctx context.Context, orgID, userID platform.ID, opts ...pkger.ApplyOptFn) (pkger.ImpactSummary, error) { - var opt pkger.ApplyOpt - for _, o := range opts { - o(&opt) - } - pkg, err := pkger.Combine(opt.Templates) - if err != nil { - return pkger.ImpactSummary{}, err - } - - if err := pkg.Validate(); err != nil { - return pkger.ImpactSummary{}, err - } - sum := pkg.Summary() - var diff pkger.Diff - for _, b := range sum.Buckets { - diff.Buckets = append(diff.Buckets, pkger.DiffBucket{ - DiffIdentifier: pkger.DiffIdentifier{ - MetaName: b.Name, - }, - }) - } - return pkger.ImpactSummary{ - Summary: sum, - Diff: diff, - }, nil - }, - } - - core, _ := observer.New(zap.InfoLevel) - pkgHandler := pkger.NewHTTPServerTemplates(zap.New(core), svc, defaultClient) - svr := newMountedHandler(pkgHandler, 1) - - testttp. - PostJSON(t, "/api/v2/templates/apply", tt.reqBody). - Headers("Content-Type", tt.contentType). - Do(svr). - ExpectStatus(http.StatusOK). - ExpectBody(func(buf *bytes.Buffer) { - var resp pkger.RespApply - decodeBody(t, buf, &resp) - - assert.Len(t, resp.Summary.Buckets, 1) - assert.Len(t, resp.Diff.Buckets, 1) - }) - } - t.Run(tt.name, fn) - } - }) - - t.Run("yml", func(t *testing.T) { - tests := []struct { - name string - contentType string - }{ - { - name: "app yml", - contentType: "application/x-yaml", - }, - { - name: "text yml", - contentType: "text/yml", - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - svc := &fakeSVC{ - dryRunFn: func(ctx context.Context, orgID, userID platform.ID, opts ...pkger.ApplyOptFn) (pkger.ImpactSummary, error) { - var opt pkger.ApplyOpt - for _, o := range opts { - o(&opt) - } - pkg, err := pkger.Combine(opt.Templates) - if err != nil { - return pkger.ImpactSummary{}, err - } - - if err := pkg.Validate(); err != nil { - return pkger.ImpactSummary{}, err - } - sum := pkg.Summary() - var diff pkger.Diff - for _, b := range sum.Buckets { - diff.Buckets = append(diff.Buckets, pkger.DiffBucket{ - DiffIdentifier: pkger.DiffIdentifier{ - MetaName: b.Name, - }, - }) - } - return pkger.ImpactSummary{ - Diff: diff, - Summary: sum, - }, nil - }, - } - - pkgHandler := pkger.NewHTTPServerTemplates(zap.NewNop(), svc, defaultClient) - svr := newMountedHandler(pkgHandler, 1) - - body := newReqApplyYMLBody(t, platform.ID(9000), true) - - testttp. - Post(t, "/api/v2/templates/apply", body). - Headers("Content-Type", tt.contentType). - Do(svr). - ExpectStatus(http.StatusOK). - ExpectBody(func(buf *bytes.Buffer) { - var resp pkger.RespApply - decodeBody(t, buf, &resp) - - assert.Len(t, resp.Summary.Buckets, 1) - assert.Len(t, resp.Diff.Buckets, 1) - }) - } - - t.Run(tt.name, fn) - } - }) - - t.Run("all diff and summary resource collections are non null", func(t *testing.T) { - svc := &fakeSVC{ - dryRunFn: func(ctx context.Context, orgID, userID platform.ID, opts ...pkger.ApplyOptFn) (pkger.ImpactSummary, error) { - // return zero value pkg - return pkger.ImpactSummary{}, nil - }, - } - - pkgHandler := pkger.NewHTTPServerTemplates(zap.NewNop(), svc, defaultClient) - svr := newMountedHandler(pkgHandler, 1) - - testttp. - PostJSON(t, "/api/v2/templates/apply", pkger.ReqApply{ - DryRun: true, - OrgID: platform.ID(1).String(), - RawTemplate: bucketPkgKinds(t, pkger.EncodingJSON), - }). - Headers("Content-Type", "application/json"). - Do(svr). - ExpectStatus(http.StatusOK). - ExpectBody(func(buf *bytes.Buffer) { - var resp pkger.RespApply - decodeBody(t, buf, &resp) - assertNonZeroApplyResp(t, resp) - }) - }) - - t.Run("with multiple pkgs", func(t *testing.T) { - newBktPkg := func(t *testing.T, bktName string) pkger.ReqRawTemplate { - t.Helper() - - pkgStr := fmt.Sprintf(`[ - { - "apiVersion": "%[1]s", - "kind": "Bucket", - "metadata": { - "name": %q - }, - "spec": {} - } -]`, pkger.APIVersion, bktName) - - pkg, err := pkger.Parse(pkger.EncodingJSON, pkger.FromString(pkgStr)) - require.NoError(t, err) - - pkgBytes, err := pkg.Encode(pkger.EncodingJSON) - require.NoError(t, err) - return pkger.ReqRawTemplate{ - ContentType: pkger.EncodingJSON.String(), - Sources: pkg.Sources(), - Template: pkgBytes, - } - } - - tests := []struct { - name string - reqBody pkger.ReqApply - expectedBkts []string - }{ - { - name: "retrieves package from a URL and raw pkgs", - reqBody: pkger.ReqApply{ - DryRun: true, - OrgID: platform.ID(9000).String(), - Remotes: []pkger.ReqTemplateRemote{{ - ContentType: "json", - URL: newPkgURL(t, filesvr.URL, "testdata/remote_bucket.json"), - }}, - RawTemplates: []pkger.ReqRawTemplate{ - newBktPkg(t, "bkt1"), - newBktPkg(t, "bkt2"), - newBktPkg(t, "bkt3"), - }, - }, - expectedBkts: []string{"bkt1", "bkt2", "bkt3", "rucket-11"}, - }, - { - name: "retrieves packages from raw single and list", - reqBody: pkger.ReqApply{ - DryRun: true, - OrgID: platform.ID(9000).String(), - RawTemplate: newBktPkg(t, "bkt4"), - RawTemplates: []pkger.ReqRawTemplate{ - newBktPkg(t, "bkt1"), - newBktPkg(t, "bkt2"), - newBktPkg(t, "bkt3"), - }, - }, - expectedBkts: []string{"bkt1", "bkt2", "bkt3", "bkt4"}, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - svc := &fakeSVC{ - dryRunFn: func(ctx context.Context, orgID, userID platform.ID, opts ...pkger.ApplyOptFn) (pkger.ImpactSummary, error) { - var opt pkger.ApplyOpt - for _, o := range opts { - o(&opt) - } - pkg, err := pkger.Combine(opt.Templates) - if err != nil { - return pkger.ImpactSummary{}, err - } - - if err := pkg.Validate(); err != nil { - return pkger.ImpactSummary{}, err - } - sum := pkg.Summary() - var diff pkger.Diff - for _, b := range sum.Buckets { - diff.Buckets = append(diff.Buckets, pkger.DiffBucket{ - DiffIdentifier: pkger.DiffIdentifier{ - MetaName: b.Name, - }, - }) - } - - return pkger.ImpactSummary{ - Diff: diff, - Summary: sum, - }, nil - }, - } - - pkgHandler := pkger.NewHTTPServerTemplates(zap.NewNop(), svc, defaultClient) - svr := newMountedHandler(pkgHandler, 1) - - testttp. - PostJSON(t, "/api/v2/templates/apply", tt.reqBody). - Do(svr). - ExpectStatus(http.StatusOK). - ExpectBody(func(buf *bytes.Buffer) { - var resp pkger.RespApply - decodeBody(t, buf, &resp) - - require.Len(t, resp.Summary.Buckets, len(tt.expectedBkts)) - for i, expected := range tt.expectedBkts { - assert.Equal(t, expected, resp.Summary.Buckets[i].Name) - } - }) - } - - t.Run(tt.name, fn) - } - }) - - t.Run("validation failures", func(t *testing.T) { - tests := []struct { - name string - contentType string - reqBody pkger.ReqApply - expectedStatusCode int - }{ - { - name: "invalid org id", - contentType: "application/json", - reqBody: pkger.ReqApply{ - DryRun: true, - OrgID: "bad org id", - RawTemplate: bucketPkgKinds(t, pkger.EncodingJSON), - }, - expectedStatusCode: http.StatusBadRequest, - }, - { - name: "invalid stack id", - contentType: "application/json", - reqBody: pkger.ReqApply{ - DryRun: true, - OrgID: platform.ID(9000).String(), - StackID: strPtr("invalid stack id"), - RawTemplate: bucketPkgKinds(t, pkger.EncodingJSON), - }, - expectedStatusCode: http.StatusBadRequest, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - svc := &fakeSVC{ - dryRunFn: func(ctx context.Context, orgID, userID platform.ID, opts ...pkger.ApplyOptFn) (pkger.ImpactSummary, error) { - var opt pkger.ApplyOpt - for _, o := range opts { - o(&opt) - } - pkg, err := pkger.Combine(opt.Templates) - if err != nil { - return pkger.ImpactSummary{}, err - } - return pkger.ImpactSummary{ - Summary: pkg.Summary(), - }, nil - }, - } - - pkgHandler := pkger.NewHTTPServerTemplates(zap.NewNop(), svc, defaultClient) - svr := newMountedHandler(pkgHandler, 1) - - testttp. - PostJSON(t, "/api/v2/templates/apply", tt.reqBody). - Headers("Content-Type", tt.contentType). - Do(svr). - ExpectStatus(tt.expectedStatusCode) - } - - t.Run(tt.name, fn) - } - }) - - t.Run("resp apply err response", func(t *testing.T) { - tests := []struct { - name string - contentType string - reqBody pkger.ReqApply - }{ - { - name: "invalid json", - contentType: "application/json", - reqBody: pkger.ReqApply{ - DryRun: true, - OrgID: platform.ID(9000).String(), - RawTemplate: simpleInvalidBody(t, pkger.EncodingJSON), - }, - }, - } - for _, tt := range tests { - fn := func(t *testing.T) { - svc := &fakeSVC{ - dryRunFn: func(ctx context.Context, orgID, userID platform.ID, opts ...pkger.ApplyOptFn) (pkger.ImpactSummary, error) { - var opt pkger.ApplyOpt - for _, o := range opts { - o(&opt) - } - pkg, err := pkger.Combine(opt.Templates) - if err != nil { - return pkger.ImpactSummary{}, err - } - - if err := pkg.Validate(); err != nil { - return pkger.ImpactSummary{}, err - } - sum := pkg.Summary() - var diff pkger.Diff - for _, b := range sum.Buckets { - diff.Buckets = append(diff.Buckets, pkger.DiffBucket{ - DiffIdentifier: pkger.DiffIdentifier{ - MetaName: b.Name, - }, - }) - } - return pkger.ImpactSummary{ - Summary: sum, - Diff: diff, - }, nil - }, - } - - pkgHandler := pkger.NewHTTPServerTemplates(zap.NewNop(), svc, defaultClient) - svr := newMountedHandler(pkgHandler, 1) - - testttp. - PostJSON(t, "/api/v2/templates/apply", tt.reqBody). - Headers("Content-Type", tt.contentType). - Do(svr). - ExpectStatus(http.StatusUnprocessableEntity). - ExpectBody(func(buf *bytes.Buffer) { - var resp pkger.RespApplyErr - decodeBody(t, buf, &resp) - require.Equal(t, "unprocessable entity", resp.Code) - require.Greater(t, len(resp.Message), 0) - require.NotNil(t, resp.Summary) - require.NotNil(t, resp.Diff) - require.Greater(t, len(resp.Errors), 0) - }) - } - t.Run(tt.name, fn) - } - }) - }) - - t.Run("apply a pkg", func(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - svc := &fakeSVC{ - applyFn: func(ctx context.Context, orgID, userID platform.ID, opts ...pkger.ApplyOptFn) (pkger.ImpactSummary, error) { - var opt pkger.ApplyOpt - for _, o := range opts { - o(&opt) - } - - pkg, err := pkger.Combine(opt.Templates) - if err != nil { - return pkger.ImpactSummary{}, err - } - - sum := pkg.Summary() - - var diff pkger.Diff - for _, b := range sum.Buckets { - diff.Buckets = append(diff.Buckets, pkger.DiffBucket{ - DiffIdentifier: pkger.DiffIdentifier{ - MetaName: b.Name, - }, - }) - } - for key := range opt.MissingSecrets { - sum.MissingSecrets = append(sum.MissingSecrets, key) - } - - return pkger.ImpactSummary{ - Diff: diff, - Summary: sum, - }, nil - }, - } - - pkgHandler := pkger.NewHTTPServerTemplates(zap.NewNop(), svc, defaultClient) - svr := newMountedHandler(pkgHandler, 1) - - testttp. - PostJSON(t, "/api/v2/templates/apply", pkger.ReqApply{ - OrgID: platform.ID(9000).String(), - Secrets: map[string]string{"secret1": "val1"}, - RawTemplate: bucketPkgKinds(t, pkger.EncodingJSON), - }). - Do(svr). - ExpectStatus(http.StatusCreated). - ExpectBody(func(buf *bytes.Buffer) { - var resp pkger.RespApply - decodeBody(t, buf, &resp) - - assert.Len(t, resp.Summary.Buckets, 1) - assert.Len(t, resp.Diff.Buckets, 1) - assert.Equal(t, []string{"secret1"}, resp.Summary.MissingSecrets) - assert.Nil(t, resp.Errors) - }) - }) - - t.Run("all diff and summary resource collections are non null", func(t *testing.T) { - svc := &fakeSVC{ - applyFn: func(ctx context.Context, orgID, userID platform.ID, opts ...pkger.ApplyOptFn) (pkger.ImpactSummary, error) { - // return zero value pkg - return pkger.ImpactSummary{}, nil - }, - } - - pkgHandler := pkger.NewHTTPServerTemplates(zap.NewNop(), svc, defaultClient) - svr := newMountedHandler(pkgHandler, 1) - - testttp. - PostJSON(t, "/api/v2/templates/apply", pkger.ReqApply{ - OrgID: platform.ID(1).String(), - RawTemplate: bucketPkgKinds(t, pkger.EncodingJSON), - }). - Headers("Content-Type", "application/json"). - Do(svr). - ExpectStatus(http.StatusCreated). - ExpectBody(func(buf *bytes.Buffer) { - var resp pkger.RespApply - decodeBody(t, buf, &resp) - assertNonZeroApplyResp(t, resp) - }) - }) - }) - - t.Run("Templates()", func(t *testing.T) { - tests := []struct { - name string - reqBody pkger.ReqApply - encoding pkger.Encoding - }{ - { - name: "jsonnet disabled", - reqBody: pkger.ReqApply{ - OrgID: platform.ID(9000).String(), - RawTemplate: bucketPkgKinds(t, pkger.EncodingJsonnet), - }, - encoding: pkger.EncodingJsonnet, - }, - { - name: "jsonnet remote disabled", - reqBody: pkger.ReqApply{ - OrgID: platform.ID(9000).String(), - Remotes: []pkger.ReqTemplateRemote{{ - URL: newPkgURL(t, filesvr.URL, "testdata/bucket_associates_labels.jsonnet"), - }}, - }, - encoding: pkger.EncodingJsonnet, - }, - { - name: "jsonnet disabled remote source", - reqBody: pkger.ReqApply{ - OrgID: platform.ID(9000).String(), - RawTemplate: bucketPkgJsonWithJsonnetRemote(t), - }, - encoding: pkger.EncodingJSON, - }, - } - - for _, tt := range tests { - tmpl, err := tt.reqBody.Templates(tt.encoding, defaultClient) - assert.Nil(t, tmpl) - require.Error(t, err) - assert.Equal(t, "unprocessable entity", influxerror.ErrorCode(err)) - assert.Contains(t, influxerror.ErrorMessage(err), "invalid encoding provided: jsonnet") - } - }) - - t.Run("Templates() remotes with IP validation", func(t *testing.T) { - tests := []struct { - name string - client *http.Client - expCode int - expErr string - }{ - { - name: "no filter ip", - client: pkger.NewDefaultHTTPClient(fluxurl.PassValidator{}), - expCode: http.StatusOK, - expErr: "", - }, - { - name: "filter ip", - client: pkger.NewDefaultHTTPClient(fluxurl.PrivateIPValidator{}), - expCode: http.StatusUnprocessableEntity, - expErr: "no such host", - }, - } - - svc := &fakeSVC{ - dryRunFn: func(ctx context.Context, orgID, userID platform.ID, opts ...pkger.ApplyOptFn) (pkger.ImpactSummary, error) { - return pkger.ImpactSummary{}, nil - }, - } - for _, tt := range tests { - core, sink := observer.New(zap.InfoLevel) - pkgHandler := pkger.NewHTTPServerTemplates(zap.New(core), svc, tt.client) - svr := newMountedHandler(pkgHandler, 1) - - reqBody := pkger.ReqApply{ - DryRun: true, - OrgID: platform.ID(9000).String(), - Remotes: []pkger.ReqTemplateRemote{{ - URL: newPkgURL(t, filesvr.URL, "testdata/remote_bucket.json"), - }}, - } - - ctx := context.Background() - testttp. - PostJSON(t, "/api/v2/templates/apply", reqBody). - Headers("Content-Type", "application/json"). - WithCtx(ctx). - Do(svr). - ExpectStatus(tt.expCode). - ExpectBody(func(buf *bytes.Buffer) { - var resp pkger.RespApply - decodeBody(t, buf, &resp) - - assert.Len(t, resp.Summary.Buckets, 0) - assert.Len(t, resp.Diff.Buckets, 0) - }) - - if tt.expErr != "" { - // Verify logging output has the expected generic flux message - entries := sink.TakeAll() // resets to 0 - fmt.Printf("%+v\n", entries) - require.Equal(t, 1, len(entries)) - assert.Contains(t, fmt.Sprintf("%s", entries[0].Context[0].Interface), tt.expErr) - } - } - }) -} - -func assertNonZeroApplyResp(t *testing.T, resp pkger.RespApply) { - t.Helper() - - assert.NotNil(t, resp.Sources) - - assert.NotNil(t, resp.Diff.Buckets) - assert.NotNil(t, resp.Diff.Checks) - assert.NotNil(t, resp.Diff.Dashboards) - assert.NotNil(t, resp.Diff.Labels) - assert.NotNil(t, resp.Diff.LabelMappings) - assert.NotNil(t, resp.Diff.NotificationEndpoints) - assert.NotNil(t, resp.Diff.NotificationRules) - assert.NotNil(t, resp.Diff.Tasks) - assert.NotNil(t, resp.Diff.Telegrafs) - assert.NotNil(t, resp.Diff.Variables) - - assert.NotNil(t, resp.Summary.Buckets) - assert.NotNil(t, resp.Summary.Checks) - assert.NotNil(t, resp.Summary.Dashboards) - assert.NotNil(t, resp.Summary.Labels) - assert.NotNil(t, resp.Summary.LabelMappings) - assert.NotNil(t, resp.Summary.NotificationEndpoints) - assert.NotNil(t, resp.Summary.NotificationRules) - assert.NotNil(t, resp.Summary.Tasks) - assert.NotNil(t, resp.Summary.TelegrafConfigs) - assert.NotNil(t, resp.Summary.Variables) -} - -func bucketPkgKinds(t *testing.T, encoding pkger.Encoding) pkger.ReqRawTemplate { - t.Helper() - - var pkgStr string - switch encoding { - case pkger.EncodingJsonnet: - pkgStr = ` -local Bucket(name, desc) = { - apiVersion: '%[1]s', - kind: 'Bucket', - metadata: { - name: name - }, - spec: { - description: desc - } -}; - -[ - Bucket(name="rucket-1", desc="bucket 1 description"), -] -` - case pkger.EncodingJSON: - pkgStr = `[ - { - "apiVersion": "%[1]s", - "kind": "Bucket", - "metadata": { - "name": "rucket-11" - }, - "spec": { - "description": "bucket 1 description" - } - } -] -` - case pkger.EncodingYAML: - pkgStr = `apiVersion: %[1]s -kind: Bucket -metadata: - name: rucket-11 -spec: - description: bucket 1 description -` - default: - require.FailNow(t, "invalid encoding provided: "+encoding.String()) - } - - pkg, err := pkger.Parse(encoding, pkger.FromString(fmt.Sprintf(pkgStr, pkger.APIVersion)), pkger.EnableJsonnet()) - require.NoError(t, err) - - b, err := pkg.Encode(encoding) - require.NoError(t, err) - return pkger.ReqRawTemplate{ - ContentType: encoding.String(), - Sources: pkg.Sources(), - Template: b, - } -} - -func bucketPkgJsonWithJsonnetRemote(t *testing.T) pkger.ReqRawTemplate { - pkgStr := `[ - { - "apiVersion": "%[1]s", - "kind": "Bucket", - "metadata": { - "name": "rucket-11" - }, - "spec": { - "description": "bucket 1 description" - } - } -] -` - // Create a json template and then add a jsonnet remote raw template - pkg, err := pkger.Parse(pkger.EncodingJSON, pkger.FromString(fmt.Sprintf(pkgStr, pkger.APIVersion))) - require.NoError(t, err) - - b, err := pkg.Encode(pkger.EncodingJSON) - require.NoError(t, err) - return pkger.ReqRawTemplate{ - ContentType: pkger.EncodingJsonnet.String(), - Sources: []string{"file:///nonexistent.jsonnet"}, - Template: b, - } -} - -func simpleInvalidBody(t *testing.T, encoding pkger.Encoding) pkger.ReqRawTemplate { - t.Helper() - b := bytes.Buffer{} - b.WriteString("[ {}, {} ]") - return pkger.ReqRawTemplate{ - ContentType: encoding.String(), - Sources: []string{"test1.json"}, - Template: b.Bytes(), - } -} - -func newReqApplyYMLBody(t *testing.T, orgID platform.ID, dryRun bool) *bytes.Buffer { - t.Helper() - - var buf bytes.Buffer - err := yaml.NewEncoder(&buf).Encode(pkger.ReqApply{ - DryRun: dryRun, - OrgID: orgID.String(), - RawTemplate: bucketPkgKinds(t, pkger.EncodingYAML), - }) - require.NoError(t, err) - return &buf -} - -func decodeBody(t *testing.T, r io.Reader, v interface{}) { - t.Helper() - - if err := json.NewDecoder(r).Decode(v); err != nil { - require.FailNow(t, err.Error()) - } -} - -func newMountedHandler(rh kithttp.ResourceHandler, userID platform.ID) chi.Router { - r := chi.NewRouter() - r.Mount(rh.Prefix(), authMW(userID)(rh)) - return r -} - -func authMW(userID platform.ID) func(http.Handler) http.Handler { - return func(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - r = r.WithContext(pcontext.SetAuthorizer(r.Context(), &influxdb.Session{UserID: userID})) - next.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) - } -} diff --git a/pkger/internal/wordplay/wordplay.go b/pkger/internal/wordplay/wordplay.go deleted file mode 100644 index b10720674cc..00000000000 --- a/pkger/internal/wordplay/wordplay.go +++ /dev/null @@ -1,887 +0,0 @@ -package wordplay - -import ( - "fmt" - "math/rand" - "time" - - rand2 "github.com/influxdata/influxdb/v2/internal/rand" -) - -var seededRand *rand.Rand - -func init() { - lockedSource := rand2.NewLockedSourceFromSeed(time.Now().UnixNano()) - seededRand = rand.New(lockedSource) -} - -var ( - left = [...]string{ - "admiring", - "adoring", - "adventuring", - "affectionate", - "agitated", - "agreeing", - "alerting", - "amazing", - "amusing", - "angry", - "annoying", - "awesome", - "beautiful", - "bettering", - "blissful", - "bold", - "boring", - "brave", - "burfect", - "busy", - "charming", - "clever", - "cool", - "compassionate", - "competent", - "condescending", - "confident", - "cranky", - "crazy", - "crumbling", - "dangerous", - "dangling", - "dazzling", - "determined", - "distracted", - "dreamy", - "eager", - "earnest", - "earning", - "ecstatic", - "eerie", - "elastic", - "elated", - "elegant", - "eloquent", - "endangered", - "epic", - "exciting", - "fasting", - "fervent", - "festive", - "flamboyant", - "focused", - "friendly", - "frosty", - "funny", - "gallant", - "gifted", - "goofy", - "gracious", - "great", - "happy", - "hardcore", - "heuristic", - "hopeful", - "hungry", - "infallible", - "inspiring", - "interesting", - "intelligent", - "jolly", - "jovial", - "keen", - "kind", - "laughing", - "loving", - "lucid", - "magical", - "mystifying", - "modest", - "musing", - "naughty", - "nervous", - "nice", - "nifty", - "noshing", - "nostalgic", - "objective", - "obstinate", - "optimistic", - "peaceful", - "pedantic", - "pensive", - "practical", - "priceless", - "quirky", - "quizzical", - "rainy", - "realistic", - "recursing", - "ridiculous", - "righteous", - "rightful", - "relaxed", - "reverent", - "romantic", - "rustic", - "rustling", - "rusty", - "sad", - "serene", - "sharp", - "shiny", - "silly", - "sleepy", - "sloppy", - "spectacular", - "stoic", - "strange", - "stubborn", - "stupefied", - "suspicious", - "sweet", - "tasty", - "tender", - "terrifying", - "thirsty", - "toasty", - "trusting", - "unbridled", - "unruffled", - "upbeat", - "vibrant", - "victorious", - "vigilant", - "vigorous", - "vivid", - "wizardly", - "wonderful", - "wondrous", - "xenodochial", - "youthful", - "zealous", - "zen", - } - - right = [...]string{ - // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB - "albattani", - - // Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen - "allen", - - // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida - "almeida", - - // Kathleen Antonelli, American computer programmer and one of the six original programmers of the ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli - "antonelli", - - // Maria Gaetana Agnesi - Italian mathematician, philosopher, theologian and humanitarian. She was the first woman to write a mathematics handbook and the first woman appointed as a Mathematics Professor at a University. https://en.wikipedia.org/wiki/Maria_Gaetana_Agnesi - "agnesi", - - // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes - "archimedes", - - // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli - "ardinghelli", - - // Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata - "aryabhata", - - // Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin - "austin", - - // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. - "babbage", - - // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach - "banach", - - // Buckaroo Banzai and his mentor Dr. Hikita perfected the "oscillation overthruster", a device that allows one to pass through solid matter. - https://en.wikipedia.org/wiki/The_Adventures_of_Buckaroo_Banzai_Across_the_8th_Dimension - "banzai", - - // John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen - "bardeen", - - // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik - "bartik", - - // Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi - "bassi", - - // Hugh Beaver, British engineer, founder of the Guinness Book of World Records https://en.wikipedia.org/wiki/Hugh_Beaver - "beaver", - - // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell - "bell", - - // Karl Friedrich Benz - a German automobile engineer. Inventor of the first practical motorcar. https://en.wikipedia.org/wiki/Karl_Benz - "benz", - - // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha - "bhabha", - - // Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus - "bhaskara", - - // Sue Black - British computer scientist and campaigner. She has been instrumental in saving Bletchley Park, the site of World War II codebreaking - https://en.wikipedia.org/wiki/Sue_Black_(computer_scientist) - "black", - - // Elizabeth Helen Blackburn - Australian-American Nobel laureate; best known for co-discovering telomerase. https://en.wikipedia.org/wiki/Elizabeth_Blackburn - "blackburn", - - // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell - "blackwell", - - // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. - "bohr", - - // Kathleen Booth, she's credited with writing the first assembly language. https://en.wikipedia.org/wiki/Kathleen_Booth - "booth", - - // Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https://en.wikipedia.org/wiki/Anita_Borg - "borg", - - // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose - "bose", - - // Katherine Louise Bouman is an imaging scientist and Assistant Professor of Computer Science at the California Institute of Technology. She researches computational methods for imaging, and developed an algorithm that made possible the picture first visualization of a black hole using the Event Horizon Telescope. - https://en.wikipedia.org/wiki/Katie_Bouman - "bouman", - - // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville - "boyd", - - // Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero - "brahmagupta", - - // Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain - "brattain", - - // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) - "brown", - - // Linda Brown Buck - American biologist and Nobel laureate best known for her genetic and molecular analyses of the mechanisms of smell. https://en.wikipedia.org/wiki/Linda_B._Buck - "buck", - - // Dame Susan Jocelyn Bell Burnell - Northern Irish astrophysicist who discovered radio pulsars and was the first to analyse them. https://en.wikipedia.org/wiki/Jocelyn_Bell_Burnell - "burnell", - - // Annie Jump Cannon - pioneering female astronomer who classified hundreds of thousands of stars and created the system we use to understand stars today. https://en.wikipedia.org/wiki/Annie_Jump_Cannon - "cannon", - - // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson - "carson", - - // Dame Mary Lucy Cartwright - British mathematician who was one of the first to study what is now known as chaos theory. Also known for Cartwright's theorem which finds applications in signal processing. https://en.wikipedia.org/wiki/Mary_Cartwright - "cartwright", - - // George Washington Carver - American agricultural scientist and inventor. He was the most prominent black scientist of the early 20th century. https://en.wikipedia.org/wiki/George_Washington_Carver - "carver", - - // Vinton Gray Cerf - American Internet pioneer, recognised as one of "the fathers of the Internet". With Robert Elliot Kahn, he designed TCP and IP, the primary data communication protocols of the Internet and other computer networks. https://en.wikipedia.org/wiki/Vint_Cerf - "cerf", - - // Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar - "chandrasekhar", - - // Sergey Alexeyevich Chaplygin (Russian: Серге́й Алексе́евич Чаплы́гин; April 5, 1869 – October 8, 1942) was a Russian and Soviet physicist, mathematician, and mechanical engineer. He is known for mathematical formulas such as Chaplygin's equation and for a hypothetical substance in cosmology called Chaplygin gas, named after him. https://en.wikipedia.org/wiki/Sergey_Chaplygin - "chaplygin", - - // Émilie du Châtelet - French natural philosopher, mathematician, physicist, and author during the early 1730s, known for her translation of and commentary on Isaac Newton's book Principia containing basic laws of physics. https://en.wikipedia.org/wiki/%C3%89milie_du_Ch%C3%A2telet - "chatelet", - - // Asima Chatterjee was an Indian organic chemist noted for her research on vinca alkaloids, development of drugs for treatment of epilepsy and malaria - https://en.wikipedia.org/wiki/Asima_Chatterjee - "chatterjee", - - // Pafnuty Chebyshev - Russian mathematician. He is known fo his works on probability, statistics, mechanics, analytical geometry and number theory https://en.wikipedia.org/wiki/Pafnuty_Chebyshev - "chebyshev", - - // Bram Cohen - American computer programmer and author of the BitTorrent peer-to-peer protocol. https://en.wikipedia.org/wiki/Bram_Cohen - "cohen", - - // David Lee Chaum - American computer scientist and cryptographer. Known for his seminal contributions in the field of anonymous communication. https://en.wikipedia.org/wiki/David_Chaum - "chaum", - - // Joan Clarke - Bletchley Park code breaker during the Second World War who pioneered techniques that remained top secret for decades. Also an accomplished numismatist https://en.wikipedia.org/wiki/Joan_Clarke - "clarke", - - // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden - "colden", - - // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori - "cori", - - // Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray - "cray", - - // This entry reflects a husband and wife team who worked together: - // Joan Curran was a Welsh scientist who developed radar and invented chaff, a radar countermeasure. https://en.wikipedia.org/wiki/Joan_Curran - // Samuel Curran was an Irish physicist who worked alongside his wife during WWII and invented the proximity fuse. https://en.wikipedia.org/wiki/Samuel_Curran - "curran", - - // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. - "curie", - - // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. - "darwin", - - // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. - "davinci", - - // A. K. (Alexander Keewatin) Dewdney, Canadian mathematician, computer scientist, author and filmmaker. Contributor to Scientific American's "Computer Recreations" from 1984 to 1991. Author of Core War (program), The Planiverse, The Armchair Universe, The Magic Machine, The New Turing Omnibus, and more. https://en.wikipedia.org/wiki/Alexander_Dewdney - "dewdney", - - // Satish Dhawan - Indian mathematician and aerospace engineer, known for leading the successful and indigenous development of the Indian space programme. https://en.wikipedia.org/wiki/Satish_Dhawan - "dhawan", - - // Bailey Whitfield Diffie - American cryptographer and one of the pioneers of public-key cryptography. https://en.wikipedia.org/wiki/Whitfield_Diffie - "diffie", - - // Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https://en.wikipedia.org/wiki/Edsger_W._Dijkstra. - "dijkstra", - - // Paul Adrien Maurice Dirac - English theoretical physicist who made fundamental contributions to the early development of both quantum mechanics and quantum electrodynamics. https://en.wikipedia.org/wiki/Paul_Dirac - "dirac", - - // Agnes Meyer Driscoll - American cryptanalyst during World Wars I and II who successfully cryptanalysed a number of Japanese ciphers. She was also the co-developer of one of the cipher machines of the US Navy, the CM. https://en.wikipedia.org/wiki/Agnes_Meyer_Driscoll - "driscoll", - - // Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky - "dubinsky", - - // Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley - "easley", - - // Thomas Alva Edison, prolific inventor https://en.wikipedia.org/wiki/Thomas_Edison - "edison", - - // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein - "einstein", - - // Alexandra Asanovna Elbakyan (Russian: Алекса́ндра Аса́новна Элбакя́н) is a Kazakhstani graduate student, computer programmer, internet pirate in hiding, and the creator of the site Sci-Hub. Nature has listed her in 2016 in the top ten people that mattered in science, and Ars Technica has compared her to Aaron Swartz. - https://en.wikipedia.org/wiki/Alexandra_Elbakyan - "elbakyan", - - // Taher A. ElGamal - Egyptian cryptographer best known for the ElGamal discrete log cryptosystem and the ElGamal digital signature scheme. https://en.wikipedia.org/wiki/Taher_Elgamal - "elgamal", - - // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion - "elion", - - // James Henry Ellis - British engineer and cryptographer employed by the GCHQ. Best known for conceiving for the first time, the idea of public-key cryptography. https://en.wikipedia.org/wiki/James_H._Ellis - "ellis", - - // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart - "engelbart", - - // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid - "euclid", - - // Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler - "euler", - - // Michael Faraday - British scientist who contributed to the study of electromagnetism and electrochemistry. https://en.wikipedia.org/wiki/Michael_Faraday - "faraday", - - // Horst Feistel - German-born American cryptographer who was one of the earliest non-government researchers to study the design and theory of block ciphers. Co-developer of DES and Lucifer. Feistel networks, a symmetric structure used in the construction of block ciphers are named after him. https://en.wikipedia.org/wiki/Horst_Feistel - "feistel", - - // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat - "fermat", - - // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. - "fermi", - - // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman - "feynman", - - // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. - "franklin", - - // Yuri Alekseyevich Gagarin - Soviet pilot and cosmonaut, best known as the first human to journey into outer space. https://en.wikipedia.org/wiki/Yuri_Gagarin - "gagarin", - - // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei - "galileo", - - // Évariste Galois - French mathematician whose work laid the foundations of Galois theory and group theory, two major branches of abstract algebra, and the subfield of Galois connections, all while still in his late teens. https://en.wikipedia.org/wiki/%C3%89variste_Galois - "galois", - - // Kadambini Ganguly - Indian physician, known for being the first South Asian female physician, trained in western medicine, to graduate in South Asia. https://en.wikipedia.org/wiki/Kadambini_Ganguly - "ganguly", - - // William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https://en.wikipedia.org/wiki/Bill_Gates - "gates", - - // Johann Carl Friedrich Gauss - German mathematician who made significant contributions to many fields, including number theory, algebra, statistics, analysis, differential geometry, geodesy, geophysics, mechanics, electrostatics, magnetic fields, astronomy, matrix theory, and optics. https://en.wikipedia.org/wiki/Carl_Friedrich_Gauss - "gauss", - - // Marie-Sophie Germain - French mathematician, physicist and philosopher. Known for her work on elasticity theory, number theory and philosophy. https://en.wikipedia.org/wiki/Sophie_Germain - "germain", - - // Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist) - "goldberg", - - // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine - "goldstine", - - // Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser - "goldwasser", - - // James Golick, all around gangster. - "golick", - - // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall - "goodall", - - // Stephen Jay Gould was was an American paleontologist, evolutionary biologist, and historian of science. He is most famous for the theory of punctuated equilibrium - https://en.wikipedia.org/wiki/Stephen_Jay_Gould - "gould", - - // Carolyn Widney Greider - American molecular biologist and joint winner of the 2009 Nobel Prize for Physiology or Medicine for the discovery of telomerase. https://en.wikipedia.org/wiki/Carol_W._Greider - "greider", - - // Alexander Grothendieck - German-born French mathematician who became a leading figure in the creation of modern algebraic geometry. https://en.wikipedia.org/wiki/Alexander_Grothendieck - "grothendieck", - - // Lois Haibt - American computer scientist, part of the team at IBM that developed FORTRAN - https://en.wikipedia.org/wiki/Lois_Haibt - "haibt", - - // Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https://en.wikipedia.org/wiki/Margaret_Hamilton_(scientist) - "hamilton", - - // Caroline Harriet Haslett - English electrical engineer, electricity industry administrator and champion of women's rights. Co-author of British Standard 1363 that specifies AC power plugs and sockets used across the United Kingdom (which is widely considered as one of the safest designs). https://en.wikipedia.org/wiki/Caroline_Haslett - "haslett", - - // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking - "hawking", - - // Martin Edward Hellman - American cryptologist, best known for his invention of public-key cryptography in co-operation with Whitfield Diffie and Ralph Merkle. https://en.wikipedia.org/wiki/Martin_Hellman - "hellman", - - // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg - "heisenberg", - - // Grete Hermann was a German philosopher noted for her philosophical work on the foundations of quantum mechanics. https://en.wikipedia.org/wiki/Grete_Hermann - "hermann", - - // Caroline Lucretia Herschel - German astronomer and discoverer of several comets. https://en.wikipedia.org/wiki/Caroline_Herschel - "herschel", - - // Heinrich Rudolf Hertz - German physicist who first conclusively proved the existence of the electromagnetic waves. https://en.wikipedia.org/wiki/Heinrich_Hertz - "hertz", - - // Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD - "heyrovsky", - - // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin - "hodgkin", - - // Douglas R. Hofstadter is an American professor of cognitive science and author of the Pulitzer Prize and American Book Award-winning work Goedel, Escher, Bach: An Eternal Golden Braid in 1979. A mind-bending work which coined Hofstadter's Law: "It always takes longer than you expect, even when you take into account Hofstadter's Law." https://en.wikipedia.org/wiki/Douglas_Hofstadter - "hofstadter", - - // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephone switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover - "hoover", - - // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper - "hopper", - - // Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en.wikipedia.org/wiki/Frances_Hugle - "hugle", - - // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia - "hypatia", - - // Teruko Ishizaka - Japanese scientist and immunologist who co-discovered the antibody class Immunoglobulin E. https://en.wikipedia.org/wiki/Teruko_Ishizaka - "ishizaka", - - // Mary Jackson, American mathematician and aerospace engineer who earned the highest title within NASA's engineering department - https://en.wikipedia.org/wiki/Mary_Jackson_(engineer) - "jackson", - - // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil - "jang", - - // Mae Carol Jemison - is an American engineer, physician, and former NASA astronaut. She became the first black woman to travel in space when she served as a mission specialist aboard the Space Shuttle Endeavour - https://en.wikipedia.org/wiki/Mae_Jemison - "jemison", - - // Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik - "jennings", - - // Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen - "jepsen", - - // Katherine Coleman Goble Johnson - American physicist and mathematician contributed to the NASA. https://en.wikipedia.org/wiki/Katherine_Johnson - "johnson", - - // Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie - "joliot", - - // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones - "jones", - - // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam - "kalam", - - // Sergey Petrovich Kapitsa (Russian: Серге́й Петро́вич Капи́ца; 14 February 1928 – 14 August 2012) was a Russian physicist and demographer. He was best known as host of the popular and long-running Russian scientific TV show, Evident, but Incredible. His father was the Nobel laureate Soviet-era physicist Pyotr Kapitsa, and his brother was the geographer and Antarctic explorer Andrey Kapitsa. - https://en.wikipedia.org/wiki/Sergey_Kapitsa - "kapitsa", - - // Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare - "kare", - - // Mstislav Keldysh - a Soviet scientist in the field of mathematics and mechanics, academician of the USSR Academy of Sciences (1946), President of the USSR Academy of Sciences (1961–1975), three times Hero of Socialist Labor (1956, 1961, 1971), fellow of the Royal Society of Edinburgh (1968). https://en.wikipedia.org/wiki/Mstislav_Keldysh - "keldysh", - - // Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller - "keller", - - // Johannes Kepler, German astronomer known for his three laws of planetary motion - https://en.wikipedia.org/wiki/Johannes_Kepler - "kepler", - - // Omar Khayyam - Persian mathematician, astronomer and poet. Known for his work on the classification and solution of cubic equations, for his contribution to the understanding of Euclid's fifth postulate and for computing the length of a year very accurately. https://en.wikipedia.org/wiki/Omar_Khayyam - "khayyam", - - // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana - "khorana", - - // Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby - "kilby", - - // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch - "kirch", - - // Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth - "knuth", - - // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya - "kowalevski", - - // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande - "lalande", - - // Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr - "lamarr", - - // Leslie B. Lamport - American computer scientist. Lamport is best known for his seminal work in distributed systems and was the winner of the 2013 Turing Award. https://en.wikipedia.org/wiki/Leslie_Lamport - "lamport", - - // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey - "leakey", - - // Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt - "leavitt", - - // Esther Miriam Zimmer Lederberg - American microbiologist and a pioneer of bacterial genetics. https://en.wikipedia.org/wiki/Esther_Lederberg - "lederberg", - - // Inge Lehmann - Danish seismologist and geophysicist. Known for discovering in 1936 that the Earth has a solid inner core inside a molten outer core. https://en.wikipedia.org/wiki/Inge_Lehmann - "lehmann", - - // Daniel Lewin - Mathematician, Akamai co-founder, soldier, 9/11 victim-- Developed optimization techniques for routing traffic on the internet. Died attempting to stop the 9-11 hijackers. https://en.wikipedia.org/wiki/Daniel_Lewin - "lewin", - - // Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Ruth_Teitelbaum - "lichterman", - - // Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov - "liskov", - - // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) - "lovelace", - - // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re - "lumiere", - - // Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician) - "mahavira", - - // Lynn Margulis (b. Lynn Petra Alexander) - an American evolutionary theorist and biologist, science author, educator, and popularizer, and was the primary modern proponent for the significance of symbiosis in evolution. - https://en.wikipedia.org/wiki/Lynn_Margulis - "margulis", - - // Yukihiro Matsumoto - Japanese computer scientist and software programmer best known as the chief designer of the Ruby programming language. https://en.wikipedia.org/wiki/Yukihiro_Matsumoto - "matsumoto", - - // James Clerk Maxwell - Scottish physicist, best known for his formulation of electromagnetic theory. https://en.wikipedia.org/wiki/James_Clerk_Maxwell - "maxwell", - - // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer - "mayer", - - // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) - "mccarthy", - - // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock - "mcclintock", - - // Anne Laura Dorinthea McLaren - British developmental biologist whose work helped lead to human in-vitro fertilisation. https://en.wikipedia.org/wiki/Anne_McLaren - "mclaren", - - // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean - "mclean", - - // Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli - "mcnulty", - - // Gregor Johann Mendel - Czech scientist and founder of genetics. https://en.wikipedia.org/wiki/Gregor_Mendel - "mendel", - - // Dmitri Mendeleev - a chemist and inventor. He formulated the Periodic Law, created a farsighted version of the periodic table of elements, and used it to correct the properties of some already discovered elements and also to predict the properties of eight elements yet to be discovered. https://en.wikipedia.org/wiki/Dmitri_Mendeleev - "mendeleev", - - // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner - "meitner", - - // Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en.wikipedia.org/wiki/Carla_Meninsky - "meninsky", - - // Ralph C. Merkle - American computer scientist, known for devising Merkle's puzzles - one of the very first schemes for public-key cryptography. Also, inventor of Merkle trees and co-inventor of the Merkle-Damgård construction for building collision-resistant cryptographic hash functions and the Merkle-Hellman knapsack cryptosystem. https://en.wikipedia.org/wiki/Ralph_Merkle - "merkle", - - // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf - "mestorf", - - // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani - "mirzakhani", - - // Gordon Earle Moore - American engineer, Silicon Valley founding father, author of Moore's law. https://en.wikipedia.org/wiki/Gordon_Moore - "moore", - - // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse - "morse", - - // Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock - "murdock", - - // May-Britt Moser - Nobel prize winner neuroscientist who contributed to the discovery of grid cells in the brain. https://en.wikipedia.org/wiki/May-Britt_Moser - "moser", - - // John Napier of Merchiston - Scottish landowner known as an astronomer, mathematician and physicist. Best known for his discovery of logarithms. https://en.wikipedia.org/wiki/John_Napier - "napier", - - // John Forbes Nash, Jr. - American mathematician who made fundamental contributions to game theory, differential geometry, and the study of partial differential equations. https://en.wikipedia.org/wiki/John_Forbes_Nash_Jr. - "nash", - - // John von Neumann - todays computer architectures are based on the von Neumann architecture. https://en.wikipedia.org/wiki/Von_Neumann_architecture - "neumann", - - // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton - "newton", - - // Florence Nightingale, more prominently known as a nurse, was also the first female member of the Royal Statistical Society and a pioneer in statistical graphics https://en.wikipedia.org/wiki/Florence_Nightingale#Statistics_and_sanitary_reform - "nightingale", - - // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel - "nobel", - - // Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia.org/wiki/Emmy_Noether - "noether", - - // Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1 - "northcutt", - - // Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce - "noyce", - - // Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems - "panini", - - // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 - "pare", - - // Blaise Pascal, French mathematician, physicist, and inventor - https://en.wikipedia.org/wiki/Blaise_Pascal - "pascal", - - // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. - "pasteur", - - // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin - "payne", - - // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman - "perlman", - - // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike - "pike", - - // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 - "poincare", - - // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras - "poitras", - - // Tat’yana Avenirovna Proskuriakova (Russian: Татья́на Авени́ровна Проскуряко́ва) (January 23 [O.S. January 10] 1909 – August 30, 1985) was a Russian-American Mayanist scholar and archaeologist who contributed significantly to the deciphering of Maya hieroglyphs, the writing system of the pre-Columbian Maya civilization of Mesoamerica. https://en.wikipedia.org/wiki/Tatiana_Proskouriakoff - "proskuriakova", - - // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy - "ptolemy", - - // C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman - "raman", - - // Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan - "ramanujan", - - // Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride - "ride", - - // Rita Levi-Montalcini - Won Nobel Prize in Physiology or Medicine jointly with colleague Stanley Cohen for the discovery of nerve growth factor (https://en.wikipedia.org/wiki/Rita_Levi-Montalcini) - "montalcini", - - // Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie - "ritchie", - - // Ida Rhodes - American pioneer in computer programming, designed the first computer used for Social Security. https://en.wikipedia.org/wiki/Ida_Rhodes - "rhodes", - - // Julia Hall Bowman Robinson - American mathematician renowned for her contributions to the fields of computability theory and computational complexity theory. https://en.wikipedia.org/wiki/Julia_Robinson - "robinson", - - // Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen - "roentgen", - - // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin - "rosalind", - - // Vera Rubin - American astronomer who pioneered work on galaxy rotation rates. https://en.wikipedia.org/wiki/Vera_Rubin - "rubin", - - // Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha - "saha", - - // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet - "sammet", - - // Mildred Sanderson - American mathematician best known for Sanderson's theorem concerning modular invariants. https://en.wikipedia.org/wiki/Mildred_Sanderson - "sanderson", - - // Satoshi Nakamoto is the name used by the unknown person or group of people who developed bitcoin, authored the bitcoin white paper, and created and deployed bitcoin's original reference implementation. https://en.wikipedia.org/wiki/Satoshi_Nakamoto - "satoshi", - - // Adi Shamir - Israeli cryptographer whose numerous inventions and contributions to cryptography include the Ferge Fiat Shamir identification scheme, the Rivest Shamir Adleman (RSA) public-key cryptosystem, the Shamir's secret sharing scheme, the breaking of the Merkle-Hellman cryptosystem, the TWINKLE and TWIRL factoring devices and the discovery of differential cryptanalysis (with Eli Biham). https://en.wikipedia.org/wiki/Adi_Shamir - "shamir", - - // Claude Shannon - The father of information theory and founder of digital circuit design theory. (https://en.wikipedia.org/wiki/Claude_Shannon) - "shannon", - - // Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer) - "shaw", - - // Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home. https://en.wikipedia.org/wiki/Steve_Shirley - "shirley", - - // William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley - "shockley", - - // Lina Solomonovna Stern (or Shtern; Russian: Лина Соломоновна Штерн; 26 August 1878 – 7 March 1968) was a Soviet biochemist, physiologist and humanist whose medical discoveries saved thousands of lives at the fronts of World War II. She is best known for her pioneering work on blood–brain barrier, which she described as hemato-encephalic barrier in 1921. https://en.wikipedia.org/wiki/Lina_Stern - "shtern", - - // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi - "sinoussi", - - // Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Betty_Holberton - "snyder", - - // Cynthia Solomon - Pioneer in the fields of artificial intelligence, computer science and educational computing. Known for creation of Logo, an educational programming language. https://en.wikipedia.org/wiki/Cynthia_Solomon - "solomon", - - // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence - "spence", - - // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker - "stonebraker", - - // Ivan Edward Sutherland - American computer scientist and Internet pioneer, widely regarded as the father of computer graphics. https://en.wikipedia.org/wiki/Ivan_Sutherland - "sutherland", - - // Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https://en.wikipedia.org/wiki/Janese_Swanson - "swanson", - - // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz - "swartz", - - // Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles - "swirles", - - // Helen Brooke Taussig - American cardiologist and founder of the field of paediatric cardiology. https://en.wikipedia.org/wiki/Helen_B._Taussig - "taussig", - - // Valentina Tereshkova is a Russian engineer, cosmonaut and politician. She was the first woman to fly to space in 1963. In 2013, at the age of 76, she offered to go on a one-way mission to Mars. https://en.wikipedia.org/wiki/Valentina_Tereshkova - "tereshkova", - - // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla - "tesla", - - // Marie Tharp - American geologist and oceanic cartographer who co-created the first scientific map of the Atlantic Ocean floor. Her work led to the acceptance of the theories of plate tectonics and continental drift. https://en.wikipedia.org/wiki/Marie_Tharp - "tharp", - - // Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson - "thompson", - - // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds - "torvalds", - - // Youyou Tu - Chinese pharmaceutical chemist and educator known for discovering artemisinin and dihydroartemisinin, used to treat malaria, which has saved millions of lives. Joint winner of the 2015 Nobel Prize in Physiology or Medicine. https://en.wikipedia.org/wiki/Tu_Youyou - "tu", - - // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. - "turing", - - // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions - "varahamihira", - - // Dorothy Vaughan was a NASA mathematician and computer programmer on the SCOUT launch vehicle program that put America's first satellites into space - https://en.wikipedia.org/wiki/Dorothy_Vaughan - "vaughan", - - // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya - "visvesvaraya", - - // Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard - "volhard", - - // Cédric Villani - French mathematician, won Fields Medal, Fermat Prize and Poincaré Price for his work in differential geometry and statistical mechanics. https://en.wikipedia.org/wiki/C%C3%A9dric_Villani - "villani", - - // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer - "wescoff", - - // Sylvia B. Wilbur - British computer scientist who helped develop the ARPANET, was one of the first to exchange email in the UK and a leading researcher in computer-supported collaborative work. https://en.wikipedia.org/wiki/Sylvia_Wilbur - "wilbur", - - // Andrew Wiles - Notable British mathematician who proved the enigmatic Fermat's Last Theorem - https://en.wikipedia.org/wiki/Andrew_Wiles - "wiles", - - // Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams - "williams", - - // Malcolm John Williamson - British mathematician and cryptographer employed by the GCHQ. Developed in 1974 what is now known as Diffie-Hellman key exchange (Diffie and Hellman first published the scheme in 1976). https://en.wikipedia.org/wiki/Malcolm_J._Williamson - "williamson", - - // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson - "wilson", - - // Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing - "wing", - - // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak - "wozniak", - - // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers - "wright", - - // Chien-Shiung Wu - Chinese-American experimental physicist who made significant contributions to nuclear physics. https://en.wikipedia.org/wiki/Chien-Shiung_Wu - "wu", - - // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow - "yalow", - - // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath - "yonath", - - // Nikolay Yegorovich Zhukovsky (Russian: Никола́й Его́рович Жуко́вский, January 17 1847 – March 17, 1921) was a Russian scientist, mathematician and engineer, and a founding father of modern aero- and hydrodynamics. Whereas contemporary scientists scoffed at the idea of human flight, Zhukovsky was the first to undertake the study of airflow. He is often called the Father of Russian Aviation. https://en.wikipedia.org/wiki/Nikolay_Yegorovich_Zhukovsky - "zhukovsky", - } -) - -// GetRandomName generates a random name from the list of adjectives and surnames in this package -// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random -// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` -func GetRandomName() string { - return fmt.Sprintf("%s-%s", left[seededRand.Intn(len(left))], right[seededRand.Intn(len(right))]) -} diff --git a/pkger/main_test.go b/pkger/main_test.go deleted file mode 100644 index 67711f41c92..00000000000 --- a/pkger/main_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package pkger - -import ( - "fmt" - "os" - "path" - "testing" -) - -var ( - missedTemplateCacheCounter int64 - availableTemplateFiles = map[string][]byte{} -) - -func TestMain(m *testing.M) { - // this is to prime the files so we don't have to keep reading from disk for each test - // cuts runtime of tests down by 80% on current mac - files, _ := os.ReadDir("testdata") - for _, f := range files { - relativeName := path.Join("testdata", f.Name()) - b, err := os.ReadFile(relativeName) - if err == nil { - availableTemplateFiles[relativeName] = b - } - } - exitCode := m.Run() - if missedTemplateCacheCounter > 0 { - fmt.Println("templates that missed cache: ", missedTemplateCacheCounter) - } - os.Exit(exitCode) -} diff --git a/pkger/models.go b/pkger/models.go deleted file mode 100644 index 0651e6edecc..00000000000 --- a/pkger/models.go +++ /dev/null @@ -1,710 +0,0 @@ -package pkger - -import ( - "encoding/json" - "errors" - "reflect" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - icheck "github.com/influxdata/influxdb/v2/notification/check" - "github.com/influxdata/influxdb/v2/notification/endpoint" -) - -// Package kind types. -const ( - KindUnknown Kind = "" - KindBucket Kind = "Bucket" - KindCheck Kind = "Check" - KindCheckDeadman Kind = "CheckDeadman" - KindCheckThreshold Kind = "CheckThreshold" - KindDashboard Kind = "Dashboard" - KindLabel Kind = "Label" - KindNotificationEndpoint Kind = "NotificationEndpoint" - KindNotificationEndpointHTTP Kind = "NotificationEndpointHTTP" - KindNotificationEndpointPagerDuty Kind = "NotificationEndpointPagerDuty" - KindNotificationEndpointSlack Kind = "NotificationEndpointSlack" - KindNotificationRule Kind = "NotificationRule" - KindPackage Kind = "Package" - KindTask Kind = "Task" - KindTelegraf Kind = "Telegraf" - KindVariable Kind = "Variable" -) - -// Kinds is a list of known pkger kinds. -func Kinds() []Kind { - var out []Kind - for k := range kinds { - out = append(out, k) - } - return out -} - -var kinds = map[Kind]bool{ - KindBucket: true, - KindCheck: true, - KindCheckDeadman: true, - KindCheckThreshold: true, - KindDashboard: true, - KindLabel: true, - KindNotificationEndpoint: true, - KindNotificationEndpointHTTP: true, - KindNotificationEndpointPagerDuty: true, - KindNotificationEndpointSlack: true, - KindNotificationRule: true, - KindTask: true, - KindTelegraf: true, - KindVariable: true, -} - -// Kind is a resource kind. -type Kind string - -// String provides the kind in human readable form. -func (k Kind) String() string { - if kinds[k] { - return string(k) - } - if k == KindUnknown { - return "unknown" - } - return string(k) -} - -// OK validates the kind is valid. -func (k Kind) OK() error { - if k == KindUnknown { - return errors.New("invalid kind") - } - if !kinds[k] { - return errors.New("unsupported kind provided") - } - return nil -} - -// ResourceType converts a kind to a known resource type (if applicable). -func (k Kind) ResourceType() influxdb.ResourceType { - switch k { - case KindBucket: - return influxdb.BucketsResourceType - case KindCheck, KindCheckDeadman, KindCheckThreshold: - return influxdb.ChecksResourceType - case KindDashboard: - return influxdb.DashboardsResourceType - case KindLabel: - return influxdb.LabelsResourceType - case KindNotificationEndpoint, - KindNotificationEndpointHTTP, - KindNotificationEndpointPagerDuty, - KindNotificationEndpointSlack: - return influxdb.NotificationEndpointResourceType - case KindNotificationRule: - return influxdb.NotificationRuleResourceType - case KindTask: - return influxdb.TasksResourceType - case KindTelegraf: - return influxdb.TelegrafsResourceType - case KindVariable: - return influxdb.VariablesResourceType - default: - return "" - } -} - -func (k Kind) is(comps ...Kind) bool { - for _, c := range comps { - if c == k { - return true - } - } - return false -} - -// SafeID is an equivalent influxdb.ID that encodes safely with -// zero values (influxdb.ID == 0). -type SafeID platform.ID - -// Encode will safely encode the id. -func (s SafeID) Encode() ([]byte, error) { - id := platform.ID(s) - b, _ := id.Encode() - return b, nil -} - -// String prints a encoded string representation of the id. -func (s SafeID) String() string { - return platform.ID(s).String() -} - -// DiffIdentifier are the identifying fields for any given resource. Each resource -// dictates if the resource is new, to be removed, or will remain. -type DiffIdentifier struct { - ID SafeID `json:"id"` - StateStatus StateStatus `json:"stateStatus"` - MetaName string `json:"templateMetaName"` - Kind Kind `json:"kind"` -} - -// IsNew indicates the resource is new to the platform. -func (d DiffIdentifier) IsNew() bool { - return d.ID == 0 -} - -// Diff is the result of a service DryRun call. The diff outlines -// what is new and or updated from the current state of the platform. -type Diff struct { - Buckets []DiffBucket `json:"buckets"` - Checks []DiffCheck `json:"checks"` - Dashboards []DiffDashboard `json:"dashboards"` - Labels []DiffLabel `json:"labels"` - LabelMappings []DiffLabelMapping `json:"labelMappings"` - NotificationEndpoints []DiffNotificationEndpoint `json:"notificationEndpoints"` - NotificationRules []DiffNotificationRule `json:"notificationRules"` - Tasks []DiffTask `json:"tasks"` - Telegrafs []DiffTelegraf `json:"telegrafConfigs"` - Variables []DiffVariable `json:"variables"` -} - -// HasConflicts provides a binary t/f if there are any changes within package -// after dry run is complete. -func (d Diff) HasConflicts() bool { - for _, b := range d.Buckets { - if b.hasConflict() { - return true - } - } - - for _, l := range d.Labels { - if l.hasConflict() { - return true - } - } - - for _, v := range d.Variables { - if v.hasConflict() { - return true - } - } - - return false -} - -type ( - // DiffBucket is a diff of an individual bucket. - DiffBucket struct { - DiffIdentifier - - New DiffBucketValues `json:"new"` - Old *DiffBucketValues `json:"old"` - } - - // DiffBucketValues are the varying values for a bucket. - DiffBucketValues struct { - Name string `json:"name"` - Description string `json:"description"` - RetentionRules retentionRules `json:"retentionRules"` - SchemaType string `json:"schemaType,omitempty"` - MeasurementSchemas measurementSchemas `json:"measurementSchemas,omitempty"` - } -) - -func (d DiffBucket) hasConflict() bool { - return !d.IsNew() && d.Old != nil && !reflect.DeepEqual(*d.Old, d.New) -} - -// DiffCheckValues are the varying values for a check. -type DiffCheckValues struct { - influxdb.Check -} - -// MarshalJSON implementation here is forced by the embedded check value here. -func (d DiffCheckValues) MarshalJSON() ([]byte, error) { - if d.Check == nil { - return json.Marshal(nil) - } - return json.Marshal(d.Check) -} - -// UnmarshalJSON decodes the check values. -func (d *DiffCheckValues) UnmarshalJSON(b []byte) (err error) { - d.Check, err = icheck.UnmarshalJSON(b) - if errors2.EInternal == errors2.ErrorCode(err) { - return nil - } - return err -} - -// DiffCheck is a diff of an individual check. -type DiffCheck struct { - DiffIdentifier - - New DiffCheckValues `json:"new"` - Old *DiffCheckValues `json:"old"` -} - -type ( - // DiffDashboard is a diff of an individual dashboard. - DiffDashboard struct { - DiffIdentifier - - New DiffDashboardValues `json:"new"` - Old *DiffDashboardValues `json:"old"` - } - - // DiffDashboardValues are values for a dashboard. - DiffDashboardValues struct { - Name string `json:"name"` - Desc string `json:"description"` - Charts []DiffChart `json:"charts"` - } -) - -// DiffChart is a diff of oa chart. Since all charts are new right now. -// the SummaryChart is reused here. -type DiffChart SummaryChart - -func (d *DiffChart) MarshalJSON() ([]byte, error) { - return json.Marshal((*SummaryChart)(d)) -} - -func (d *DiffChart) UnmarshalJSON(b []byte) error { - var sumChart SummaryChart - if err := json.Unmarshal(b, &sumChart); err != nil { - return err - } - *d = DiffChart(sumChart) - return nil -} - -type ( - // DiffLabel is a diff of an individual label. - DiffLabel struct { - DiffIdentifier - - New DiffLabelValues `json:"new"` - Old *DiffLabelValues `json:"old"` - } - - // DiffLabelValues are the varying values for a label. - DiffLabelValues struct { - Name string `json:"name"` - Color string `json:"color"` - Description string `json:"description"` - } -) - -func (d DiffLabel) hasConflict() bool { - return !d.IsNew() && d.Old != nil && *d.Old != d.New -} - -// StateStatus indicates the status of a diff or summary resource -type StateStatus string - -const ( - StateStatusExists StateStatus = "exists" - StateStatusNew StateStatus = "new" - StateStatusRemove StateStatus = "remove" -) - -// DiffLabelMapping is a diff of an individual label mapping. A -// single resource may have multiple mappings to multiple labels. -// A label can have many mappings to other resources. -type DiffLabelMapping struct { - StateStatus StateStatus `json:"stateStatus"` - - ResType influxdb.ResourceType `json:"resourceType"` - ResID SafeID `json:"resourceID"` - ResName string `json:"resourceName"` - ResMetaName string `json:"resourceTemplateMetaName"` - - LabelID SafeID `json:"labelID"` - LabelName string `json:"labelName"` - LabelMetaName string `json:"labelTemplateMetaName"` -} - -//func (d DiffLabelMapping) IsNew() bool { -// return d.StateStatus == StateStatusNew -//} - -// DiffNotificationEndpointValues are the varying values for a notification endpoint. -type DiffNotificationEndpointValues struct { - influxdb.NotificationEndpoint -} - -// MarshalJSON implementation here is forced by the embedded check value here. -func (d DiffNotificationEndpointValues) MarshalJSON() ([]byte, error) { - if d.NotificationEndpoint == nil { - return json.Marshal(nil) - } - return json.Marshal(d.NotificationEndpoint) -} - -// UnmarshalJSON decodes the notification endpoint. This is necessary unfortunately. -func (d *DiffNotificationEndpointValues) UnmarshalJSON(b []byte) (err error) { - d.NotificationEndpoint, err = endpoint.UnmarshalJSON(b) - if errors2.EInvalid == errors2.ErrorCode(err) { - return nil - } - return -} - -// DiffNotificationEndpoint is a diff of an individual notification endpoint. -type DiffNotificationEndpoint struct { - DiffIdentifier - - New DiffNotificationEndpointValues `json:"new"` - Old *DiffNotificationEndpointValues `json:"old"` -} - -type ( - // DiffNotificationRule is a diff of an individual notification rule. - DiffNotificationRule struct { - DiffIdentifier - - New DiffNotificationRuleValues `json:"new"` - Old *DiffNotificationRuleValues `json:"old"` - } - - // DiffNotificationRuleValues are the values for an individual rule. - DiffNotificationRuleValues struct { - Name string `json:"name"` - Description string `json:"description"` - - // These 3 fields represent the relationship of the rule to the endpoint. - EndpointID SafeID `json:"endpointID"` - EndpointName string `json:"endpointName"` - EndpointType string `json:"endpointType"` - - Every string `json:"every"` - Offset string `json:"offset"` - MessageTemplate string `json:"messageTemplate"` - StatusRules []SummaryStatusRule `json:"statusRules"` - TagRules []SummaryTagRule `json:"tagRules"` - } -) - -type ( - // DiffTask is a diff of an individual task. - DiffTask struct { - DiffIdentifier - - New DiffTaskValues `json:"new"` - Old *DiffTaskValues `json:"old"` - } - - // DiffTaskValues are the values for an individual task. - DiffTaskValues struct { - Name string `json:"name"` - Cron string `json:"cron"` - Description string `json:"description"` - Every string `json:"every"` - Offset string `json:"offset"` - Query string `json:"query"` - Status influxdb.Status `json:"status"` - } -) - -// DiffTelegraf is a diff of an individual telegraf. This resource is always new. -type DiffTelegraf struct { - DiffIdentifier - - New influxdb.TelegrafConfig `json:"new"` - Old *influxdb.TelegrafConfig `json:"old"` -} - -type ( - // DiffVariable is a diff of an individual variable. - DiffVariable struct { - DiffIdentifier - - New DiffVariableValues `json:"new"` - Old *DiffVariableValues `json:"old,omitempty"` // using omitempty here to signal there was no prev state with a nil - } - - // DiffVariableValues are the varying values for a variable. - DiffVariableValues struct { - Name string `json:"name"` - Description string `json:"description"` - Args *influxdb.VariableArguments `json:"args"` - } -) - -func (d DiffVariable) hasConflict() bool { - return !d.IsNew() && d.Old != nil && !reflect.DeepEqual(*d.Old, d.New) -} - -// Summary is a definition of all the resources that have or -// will be created from a pkg. -type Summary struct { - Buckets []SummaryBucket `json:"buckets"` - Checks []SummaryCheck `json:"checks"` - Dashboards []SummaryDashboard `json:"dashboards"` - NotificationEndpoints []SummaryNotificationEndpoint `json:"notificationEndpoints"` - NotificationRules []SummaryNotificationRule `json:"notificationRules"` - Labels []SummaryLabel `json:"labels"` - LabelMappings []SummaryLabelMapping `json:"labelMappings"` - MissingEnvs []string `json:"missingEnvRefs"` - MissingSecrets []string `json:"missingSecrets"` - Tasks []SummaryTask `json:"summaryTask"` - TelegrafConfigs []SummaryTelegraf `json:"telegrafConfigs"` - Variables []SummaryVariable `json:"variables"` -} - -// SummaryIdentifier establishes the shared identifiers for a given resource -// within a template. -type SummaryIdentifier struct { - Kind Kind `json:"kind"` - MetaName string `json:"templateMetaName"` - EnvReferences []SummaryReference `json:"envReferences"` -} - -// SummaryBucket provides a summary of a pkg bucket. -type SummaryBucket struct { - SummaryIdentifier - ID SafeID `json:"id,omitempty"` - OrgID SafeID `json:"orgID,omitempty"` - Name string `json:"name"` - Description string `json:"description"` - // TODO: return retention rules? - RetentionPeriod time.Duration `json:"retentionPeriod"` - - SchemaType string `json:"schemaType,omitempty"` - MeasurementSchemas []SummaryMeasurementSchema `json:"measurementSchemas,omitempty"` - - LabelAssociations []SummaryLabel `json:"labelAssociations"` -} - -type SummaryMeasurementSchema struct { - Name string `json:"name"` - Columns []SummaryMeasurementSchemaColumn `json:"columns"` -} - -type SummaryMeasurementSchemaColumn struct { - Name string `json:"name"` - Type string `json:"type"` - DataType string `json:"dataType,omitempty"` -} - -// SummaryCheck provides a summary of a pkg check. -type SummaryCheck struct { - SummaryIdentifier - Check influxdb.Check `json:"check"` - Status influxdb.Status `json:"status"` - - LabelAssociations []SummaryLabel `json:"labelAssociations"` -} - -func (s *SummaryCheck) UnmarshalJSON(b []byte) error { - var out struct { - SummaryIdentifier - Status string `json:"status"` - LabelAssociations []SummaryLabel `json:"labelAssociations"` - Check json.RawMessage `json:"check"` - } - if err := json.Unmarshal(b, &out); err != nil { - return err - } - s.SummaryIdentifier = out.SummaryIdentifier - s.Status = influxdb.Status(out.Status) - s.LabelAssociations = out.LabelAssociations - - var err error - s.Check, err = icheck.UnmarshalJSON(out.Check) - return err -} - -// SummaryDashboard provides a summary of a pkg dashboard. -type SummaryDashboard struct { - SummaryIdentifier - ID SafeID `json:"id"` - OrgID SafeID `json:"orgID"` - Name string `json:"name"` - Description string `json:"description"` - Charts []SummaryChart `json:"charts"` - - LabelAssociations []SummaryLabel `json:"labelAssociations"` -} - -// SummaryChart provides a summary of a pkg dashboard's chart. -type SummaryChart struct { - Properties influxdb.ViewProperties `json:"-"` - - XPosition int `json:"xPos"` - YPosition int `json:"yPos"` - Height int `json:"height"` - Width int `json:"width"` -} - -// MarshalJSON marshals a summary chart. -func (s *SummaryChart) MarshalJSON() ([]byte, error) { - b, err := influxdb.MarshalViewPropertiesJSON(s.Properties) - if err != nil { - return nil, err - } - - type alias SummaryChart - - out := struct { - Props json.RawMessage `json:"properties"` - alias - }{ - Props: b, - alias: alias(*s), - } - return json.Marshal(out) -} - -// UnmarshalJSON unmarshals a view properties and other data. -func (s *SummaryChart) UnmarshalJSON(b []byte) error { - type alias SummaryChart - a := (*alias)(s) - if err := json.Unmarshal(b, a); err != nil { - return err - } - s.XPosition = a.XPosition - s.XPosition = a.YPosition - s.Height = a.Height - s.Width = a.Width - - vp, err := influxdb.UnmarshalViewPropertiesJSON(b) - if err != nil { - return err - } - s.Properties = vp - return nil -} - -// SummaryNotificationEndpoint provides a summary of a pkg notification endpoint. -type SummaryNotificationEndpoint struct { - SummaryIdentifier - NotificationEndpoint influxdb.NotificationEndpoint `json:"notificationEndpoint"` - - LabelAssociations []SummaryLabel `json:"labelAssociations"` -} - -// UnmarshalJSON unmarshals the notificatio endpoint. This is necessary b/c of -// the notification endpoint does not have a means ot unmarshal itself. -func (s *SummaryNotificationEndpoint) UnmarshalJSON(b []byte) error { - var a struct { - SummaryIdentifier - NotificationEndpoint json.RawMessage `json:"notificationEndpoint"` - LabelAssociations []SummaryLabel `json:"labelAssociations"` - } - if err := json.Unmarshal(b, &a); err != nil { - return err - } - s.SummaryIdentifier = a.SummaryIdentifier - s.LabelAssociations = a.LabelAssociations - - e, err := endpoint.UnmarshalJSON(a.NotificationEndpoint) - s.NotificationEndpoint = e - return err -} - -// Summary types for NotificationRules which provide a summary of a pkg notification rule. -type ( - SummaryNotificationRule struct { - SummaryIdentifier - ID SafeID `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - - // These fields represent the relationship of the rule to the endpoint. - EndpointID SafeID `json:"endpointID"` - EndpointMetaName string `json:"endpointTemplateMetaName"` - EndpointType string `json:"endpointType"` - - Every string `json:"every"` - Offset string `json:"offset"` - MessageTemplate string `json:"messageTemplate"` - Status influxdb.Status `json:"status"` - StatusRules []SummaryStatusRule `json:"statusRules"` - TagRules []SummaryTagRule `json:"tagRules"` - - LabelAssociations []SummaryLabel `json:"labelAssociations"` - } - - SummaryStatusRule struct { - CurrentLevel string `json:"currentLevel"` - PreviousLevel string `json:"previousLevel"` - } - - SummaryTagRule struct { - Key string `json:"key"` - Value string `json:"value"` - Operator string `json:"operator"` - } -) - -// SummaryLabel provides a summary of a pkg label. -type SummaryLabel struct { - SummaryIdentifier - ID SafeID `json:"id"` - OrgID SafeID `json:"orgID"` - Name string `json:"name"` - Properties struct { - Color string `json:"color"` - Description string `json:"description"` - } `json:"properties"` -} - -// SummaryLabelMapping provides a summary of a label mapped with a single resource. -type SummaryLabelMapping struct { - exists bool - Status StateStatus `json:"status,omitempty"` - ResourceID SafeID `json:"resourceID"` - ResourceMetaName string `json:"resourceTemplateMetaName"` - ResourceName string `json:"resourceName"` - ResourceType influxdb.ResourceType `json:"resourceType"` - LabelMetaName string `json:"labelTemplateMetaName"` - LabelName string `json:"labelName"` - LabelID SafeID `json:"labelID"` -} - -// SummaryReference informs the consumer of required references for -// this resource. -type SummaryReference struct { - Field string `json:"resourceField"` - EnvRefKey string `json:"envRefKey"` - ValType string `json:"valueType"` - Value interface{} `json:"value"` - DefaultValue interface{} `json:"defaultValue"` -} - -// SummaryTask provides a summary of a task. -type SummaryTask struct { - SummaryIdentifier - ID SafeID `json:"id"` - Name string `json:"name"` - Cron string `json:"cron"` - Description string `json:"description"` - Every string `json:"every"` - Offset string `json:"offset"` - Query string `json:"query"` - Status influxdb.Status `json:"status"` - - LabelAssociations []SummaryLabel `json:"labelAssociations"` -} - -// SummaryTelegraf provides a summary of a pkg telegraf config. -type SummaryTelegraf struct { - SummaryIdentifier - TelegrafConfig influxdb.TelegrafConfig `json:"telegrafConfig"` - - LabelAssociations []SummaryLabel `json:"labelAssociations"` -} - -// SummaryVariable provides a summary of a pkg variable. -type SummaryVariable struct { - SummaryIdentifier - ID SafeID `json:"id,omitempty"` - OrgID SafeID `json:"orgID,omitempty"` - Name string `json:"name"` - Description string `json:"description"` - Selected []string `json:"variables"` - Arguments *influxdb.VariableArguments `json:"arguments"` - - LabelAssociations []SummaryLabel `json:"labelAssociations"` -} diff --git a/pkger/models_test.go b/pkger/models_test.go deleted file mode 100644 index 15ba117702f..00000000000 --- a/pkger/models_test.go +++ /dev/null @@ -1,504 +0,0 @@ -package pkger - -import ( - "strconv" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestTemplate(t *testing.T) { - t.Run("Summary", func(t *testing.T) { - t.Run("buckets returned in asc order by name", func(t *testing.T) { - pkg := Template{ - mBuckets: map[string]*bucket{ - "buck_2": { - Description: "desc2", - identity: identity{name: &references{val: "metaName2"}, displayName: &references{val: "name2"}}, - RetentionRules: retentionRules{newRetentionRule(2 * time.Hour)}, - }, - "buck_1": { - identity: identity{name: &references{val: "metaName1"}, displayName: &references{val: "name1"}}, - Description: "desc1", - RetentionRules: retentionRules{newRetentionRule(time.Hour)}, - }, - }, - } - - summary := pkg.Summary() - require.Len(t, summary.Buckets, len(pkg.mBuckets)) - for i := 1; i <= len(summary.Buckets); i++ { - buck := summary.Buckets[i-1] - assert.Zero(t, buck.ID) - assert.Zero(t, buck.OrgID) - assert.Equal(t, "desc"+strconv.Itoa(i), buck.Description) - assert.Equal(t, "metaName"+strconv.Itoa(i), buck.MetaName) - assert.Equal(t, "name"+strconv.Itoa(i), buck.Name) - assert.Equal(t, time.Duration(i)*time.Hour, buck.RetentionPeriod) - } - }) - - t.Run("labels returned in asc order by name", func(t *testing.T) { - pkg := Template{ - mLabels: map[string]*label{ - "2": { - identity: identity{name: &references{val: "pkgName2"}, displayName: &references{val: "name2"}}, - Description: "desc2", - Color: "blurple", - }, - "1": { - identity: identity{name: &references{val: "pkgName1"}, displayName: &references{val: "name1"}}, - Description: "desc1", - Color: "peru", - }, - }, - } - - summary := pkg.Summary() - - require.Len(t, summary.Labels, len(pkg.mLabels)) - label1 := summary.Labels[0] - assert.Equal(t, "pkgName1", label1.MetaName) - assert.Equal(t, "name1", label1.Name) - assert.Equal(t, "desc1", label1.Properties.Description) - assert.Equal(t, "peru", label1.Properties.Color) - - label2 := summary.Labels[1] - assert.Equal(t, "pkgName2", label2.MetaName) - assert.Equal(t, "name2", label2.Name) - assert.Equal(t, "desc2", label2.Properties.Description) - assert.Equal(t, "blurple", label2.Properties.Color) - }) - - t.Run("label mappings returned in asc order by name", func(t *testing.T) { - bucket1 := &bucket{ - identity: identity{name: &references{val: "pkgBucket1"}, displayName: &references{val: "bd1"}}, - } - label1 := &label{ - identity: identity{name: &references{val: "pkgLabel2"}, displayName: &references{val: "name2"}}, - Description: "desc2", - Color: "blurple", - associationMapping: associationMapping{ - mappings: map[assocMapKey][]assocMapVal{ - { - resType: influxdb.BucketsResourceType, - name: bucket1.Name(), - }: {{ - v: bucket1, - }}, - }, - }, - } - bucket1.labels = append(bucket1.labels, label1) - - pkg := Template{ - mBuckets: map[string]*bucket{bucket1.MetaName(): bucket1}, - mLabels: map[string]*label{label1.MetaName(): label1}, - } - - summary := pkg.Summary() - - require.Len(t, summary.LabelMappings, 1) - mapping1 := summary.LabelMappings[0] - assert.Equal(t, bucket1.MetaName(), mapping1.ResourceMetaName) - assert.Equal(t, bucket1.Name(), mapping1.ResourceName) - assert.Equal(t, influxdb.BucketsResourceType, mapping1.ResourceType) - assert.Equal(t, label1.MetaName(), mapping1.LabelMetaName) - assert.Equal(t, label1.Name(), mapping1.LabelName) - }) - }) - - t.Run("Diff", func(t *testing.T) { - t.Run("hasConflict", func(t *testing.T) { - tests := []struct { - name string - resource interface { - hasConflict() bool - } - expected bool - }{ - { - name: "new bucket", - resource: DiffBucket{ - DiffIdentifier: DiffIdentifier{ - MetaName: "new bucket", - }, - New: DiffBucketValues{ - Description: "new desc", - }, - }, - expected: false, - }, - { - name: "existing bucket with no changes", - resource: DiffBucket{ - DiffIdentifier: DiffIdentifier{ - ID: 3, - MetaName: "new bucket", - }, - New: DiffBucketValues{ - Description: "new desc", - RetentionRules: retentionRules{{ - Type: "expire", - Seconds: 3600, - }}, - }, - Old: &DiffBucketValues{ - Description: "new desc", - RetentionRules: retentionRules{{ - Type: "expire", - Seconds: 3600, - }}, - }, - }, - expected: false, - }, - { - name: "existing bucket with desc changes", - resource: DiffBucket{ - DiffIdentifier: DiffIdentifier{ - ID: 3, - MetaName: "existing bucket", - }, - New: DiffBucketValues{ - Description: "new desc", - RetentionRules: retentionRules{{ - Type: "expire", - Seconds: 3600, - }}, - }, - Old: &DiffBucketValues{ - Description: "newer desc", - RetentionRules: retentionRules{{ - Type: "expire", - Seconds: 3600, - }}, - }, - }, - expected: true, - }, - { - name: "existing bucket with retention changes", - resource: DiffBucket{ - DiffIdentifier: DiffIdentifier{ - ID: 3, - MetaName: "existing bucket", - }, - New: DiffBucketValues{ - Description: "new desc", - RetentionRules: retentionRules{{ - Type: "expire", - Seconds: 3600, - }}, - }, - Old: &DiffBucketValues{ - Description: "new desc", - }, - }, - expected: true, - }, - { - name: "existing bucket with retention changes", - resource: DiffBucket{ - DiffIdentifier: DiffIdentifier{ - ID: 3, - MetaName: "existing bucket", - }, - New: DiffBucketValues{ - Description: "new desc", - RetentionRules: retentionRules{{ - Type: "expire", - Seconds: 3600, - }}, - }, - Old: &DiffBucketValues{ - Description: "new desc", - RetentionRules: retentionRules{{ - Type: "expire", - Seconds: 360, - }}, - }, - }, - expected: true, - }, - { - name: "existing bucket with retention changes", - resource: DiffBucket{ - DiffIdentifier: DiffIdentifier{ - ID: 3, - MetaName: "existing bucket", - }, - New: DiffBucketValues{ - Description: "new desc", - RetentionRules: retentionRules{{ - Type: "expire", - Seconds: 3600, - }}, - }, - Old: &DiffBucketValues{ - Description: "new desc", - RetentionRules: retentionRules{ - { - Type: "expire", - Seconds: 360, - }, - { - Type: "expire", - Seconds: 36000, - }, - }, - }, - }, - expected: true, - }, - { - name: "new label", - resource: DiffLabel{ - DiffIdentifier: DiffIdentifier{ - MetaName: "new label", - }, - New: DiffLabelValues{ - Name: "new label", - Color: "new color", - Description: "new desc", - }, - }, - expected: false, - }, - { - name: "existing label with no changes", - resource: DiffLabel{ - DiffIdentifier: DiffIdentifier{ - ID: 1, - MetaName: "existing label", - }, - New: DiffLabelValues{ - Name: "existing label", - Color: "color", - Description: "desc", - }, - Old: &DiffLabelValues{ - Name: "existing label", - Color: "color", - Description: "desc", - }, - }, - expected: false, - }, - { - name: "existing label with changes", - resource: DiffLabel{ - DiffIdentifier: DiffIdentifier{ - ID: 1, - MetaName: "existing label", - }, - New: DiffLabelValues{ - Name: "existing label", - Color: "color", - Description: "desc", - }, - Old: &DiffLabelValues{ - Name: "existing label", - Color: "new color", - Description: "new desc", - }, - }, - expected: true, - }, - { - name: "new variable", - resource: DiffVariable{ - DiffIdentifier: DiffIdentifier{ - MetaName: "new var", - }, - New: DiffVariableValues{ - Name: "new var", - Description: "new desc", - Args: &influxdb.VariableArguments{ - Type: "constant", - Values: &influxdb.VariableConstantValues{"1", "b"}, - }, - }, - }, - expected: false, - }, - { - name: "existing variable no changes", - resource: DiffVariable{ - DiffIdentifier: DiffIdentifier{ - ID: 2, - MetaName: "new var", - }, - New: DiffVariableValues{ - Name: "new var", - Description: "new desc", - Args: &influxdb.VariableArguments{ - Type: "constant", - Values: &influxdb.VariableConstantValues{"1", "b"}, - }, - }, - Old: &DiffVariableValues{ - Name: "new var", - Description: "new desc", - Args: &influxdb.VariableArguments{ - Type: "constant", - Values: &influxdb.VariableConstantValues{"1", "b"}, - }, - }, - }, - expected: false, - }, - { - name: "existing variable with desc changes", - resource: DiffVariable{ - DiffIdentifier: DiffIdentifier{ - ID: 3, - MetaName: "new var", - }, - New: DiffVariableValues{ - Name: "new var", - Description: "new desc", - Args: &influxdb.VariableArguments{ - Type: "constant", - Values: &influxdb.VariableConstantValues{"1", "b"}, - }, - }, - Old: &DiffVariableValues{ - Description: "newer desc", - Args: &influxdb.VariableArguments{ - Type: "constant", - Values: &influxdb.VariableConstantValues{"1", "b"}, - }, - }, - }, - expected: true, - }, - { - name: "existing variable with constant arg changes", - resource: DiffVariable{ - DiffIdentifier: DiffIdentifier{ - ID: 3, - MetaName: "new var", - }, - New: DiffVariableValues{ - Name: "new var", - Description: "new desc", - Args: &influxdb.VariableArguments{ - Type: "constant", - Values: &influxdb.VariableConstantValues{"1", "b"}, - }, - }, - Old: &DiffVariableValues{ - Description: "new desc", - Args: &influxdb.VariableArguments{ - Type: "constant", - Values: &influxdb.VariableConstantValues{"1", "b", "new"}, - }, - }, - }, - expected: true, - }, - { - name: "existing variable with map arg changes", - resource: DiffVariable{ - DiffIdentifier: DiffIdentifier{ - ID: 3, - MetaName: "new var", - }, - New: DiffVariableValues{ - Name: "new var", - Description: "new desc", - Args: &influxdb.VariableArguments{ - Type: "map", - Values: &influxdb.VariableMapValues{"1": "b"}, - }, - }, - Old: &DiffVariableValues{ - Description: "new desc", - Args: &influxdb.VariableArguments{ - Type: "map", - Values: &influxdb.VariableMapValues{"1": "b", "2": "new"}, - }, - }, - }, - expected: true, - }, - { - name: "existing variable with query arg changes", - resource: DiffVariable{ - DiffIdentifier: DiffIdentifier{ - ID: 3, - MetaName: "new var", - }, - New: DiffVariableValues{ - Name: "new var", - Description: "new desc", - Args: &influxdb.VariableArguments{ - Type: "query", - Values: &influxdb.VariableQueryValues{ - Query: "from(bucket: rucket)", - Language: "flux", - }, - }, - }, - Old: &DiffVariableValues{ - Name: "new var", - Description: "new desc", - Args: &influxdb.VariableArguments{ - Type: "query", - Values: &influxdb.VariableQueryValues{ - Query: "from(bucket: rucket) |> yield(name: threeve)", - Language: "flux", - }, - }, - }, - }, - expected: true, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - assert.Equal(t, tt.expected, tt.resource.hasConflict()) - } - - t.Run(tt.name, fn) - } - }) - }) - - t.Run("Contains", func(t *testing.T) { - tests := []struct { - pkgFile string - kind Kind - validName string - }{ - { - pkgFile: "testdata/label.yml", - kind: KindLabel, - validName: "label-1", - }, - { - pkgFile: "testdata/notification_rule.yml", - kind: KindNotificationRule, - validName: "rule-uuid", - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - testfileRunner(t, tt.pkgFile, func(t *testing.T, pkg *Template) { - contained := pkg.Contains(tt.kind, tt.validName) - assert.True(t, contained) - - contained = pkg.Contains(tt.kind, "RANdo Name_ not found anywhere") - assert.False(t, contained) - }) - } - t.Run(tt.kind.String(), fn) - } - }) -} diff --git a/pkger/parser.go b/pkger/parser.go deleted file mode 100644 index dce50ab9424..00000000000 --- a/pkger/parser.go +++ /dev/null @@ -1,2372 +0,0 @@ -package pkger - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "path" - "regexp" - "sort" - "strconv" - "strings" - "syscall" - "time" - - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/ast/edit" - fluxurl "github.com/influxdata/flux/dependencies/url" - "github.com/influxdata/flux/parser" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/pkg/jsonnet" - "github.com/influxdata/influxdb/v2/task/options" - "gopkg.in/yaml.v3" -) - -type ( - // ReaderFn is used for functional inputs to abstract the individual - // entrypoints for the reader itself. - ReaderFn func() (r io.Reader, source string, err error) - - // Encoder is an encodes a type. - Encoder interface { - Encode(v interface{}) error - } - - // Encoding describes the encoding for the raw package data. The - // encoding determines how the raw data is parsed. - Encoding int -) - -// encoding types -const ( - EncodingUnknown Encoding = iota - EncodingJSON - EncodingJsonnet - EncodingSource // EncodingSource draws the encoding type by inferring it from the source. - EncodingYAML -) - -// String provides the string representation of the encoding. -func (e Encoding) String() string { - switch e { - case EncodingJSON: - return "json" - case EncodingJsonnet: - return "jsonnet" - case EncodingSource: - return "source" - case EncodingYAML: - return "yaml" - default: - return "unknown" - } -} - -// ErrInvalidEncoding indicates the encoding is invalid type for the parser. -var ErrInvalidEncoding = errors.New("invalid encoding provided") - -// Parse parses a pkg defined by the encoding and readerFns. As of writing this -// we can parse both a YAML, JSON, and Jsonnet formats of the Template model. -func Parse(encoding Encoding, readerFn ReaderFn, opts ...ValidateOptFn) (*Template, error) { - r, source, err := readerFn() - if err != nil { - return nil, err - } - - var pkgFn func(io.Reader, ...ValidateOptFn) (*Template, error) - switch encoding { - case EncodingJSON: - pkgFn = parseJSON - case EncodingJsonnet: - pkgFn = parseJsonnet - case EncodingSource: - pkgFn = parseSource - case EncodingYAML: - pkgFn = parseYAML - default: - return nil, ErrInvalidEncoding - } - - pkg, err := pkgFn(r, opts...) - if err != nil { - return nil, err - } - pkg.sources = []string{source} - - return pkg, nil -} - -// FromFile reads a file from disk and provides a reader from it. -func FromFile(filePath string) ReaderFn { - return func() (io.Reader, string, error) { - u, err := url.Parse(filePath) - if err != nil { - return nil, filePath, &errors2.Error{ - Code: errors2.EInvalid, - Msg: "invalid filepath provided", - Err: err, - } - } - if u.Scheme == "" { - u.Scheme = "file" - } - - // not using os.Open to avoid having to deal with closing the file in here - b, err := os.ReadFile(u.Path) - if err != nil { - return nil, filePath, err - } - - return bytes.NewBuffer(b), u.String(), nil - } -} - -// FromReader simply passes the reader along. Useful when consuming -// this from an HTTP request body. There are a number of other useful -// places for this functional input. -func FromReader(r io.Reader, sources ...string) ReaderFn { - return func() (io.Reader, string, error) { - source := "byte stream" - if len(sources) > 0 { - source = formatSources(sources) - } - return r, source, nil - } -} - -// FromString parses a pkg from a raw string value. This is very useful -// in tests. -func FromString(s string) ReaderFn { - return func() (io.Reader, string, error) { - return strings.NewReader(s), "string", nil - } -} - -// NewDefaultHTTPClient creates a client with the specified flux IP validator. -// This is copied from flux/dependencies/http/http.go -func NewDefaultHTTPClient(urlValidator fluxurl.Validator) *http.Client { - // Control is called after DNS lookup, but before the network - // connection is initiated. - control := func(network, address string, c syscall.RawConn) error { - host, _, err := net.SplitHostPort(address) - if err != nil { - return err - } - - ip := net.ParseIP(host) - return urlValidator.ValidateIP(ip) - } - - dialer := &net.Dialer{ - Timeout: time.Minute, - Control: control, - // DualStack is deprecated - } - - return &http.Client{ - Transport: &http.Transport{ - DialContext: dialer.DialContext, - }, - } -} - -// FromHTTPRequest parses a pkg from the request body of a HTTP request. This is -// very useful when using packages that are hosted.. -func FromHTTPRequest(addr string, client *http.Client) ReaderFn { - return func() (io.Reader, string, error) { - resp, err := client.Get(normalizeGithubURLToContent(addr)) - if err != nil { - return nil, addr, err - } - defer resp.Body.Close() - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return nil, addr, err - } - - if resp.StatusCode/100 != 2 { - return nil, addr, fmt.Errorf( - "bad response: address=%s status_code=%d body=%q", - addr, resp.StatusCode, strings.TrimSpace(buf.String()), - ) - } - - return &buf, addr, nil - } -} - -const ( - githubRawContentHost = "raw.githubusercontent.com" - githubHost = "github.com" -) - -func normalizeGithubURLToContent(addr string) string { - u, err := url.Parse(addr) - if err != nil { - return addr - } - - if u.Host == githubHost { - switch path.Ext(u.Path) { - case ".yaml", ".yml", ".json", ".jsonnet": - default: - return u.String() - } - - parts := strings.Split(u.Path, "/") - if len(parts) < 4 { - return u.String() - } - u.Host = githubRawContentHost - u.Path = path.Join(append(parts[:3], parts[4:]...)...) - } - - return u.String() -} - -func parseJSON(r io.Reader, opts ...ValidateOptFn) (*Template, error) { - return parse(json.NewDecoder(r), opts...) -} - -func parseJsonnet(r io.Reader, opts ...ValidateOptFn) (*Template, error) { - opt := &validateOpt{} - for _, o := range opts { - o(opt) - } - // For security, we'll default to disabling parsing jsonnet but allow callers to override the behavior via - // EnableJsonnet(). Enabling jsonnet might be useful for client code where parsing jsonnet could be acceptable. - if opt.enableJsonnet { - return parse(jsonnet.NewDecoder(r), opts...) - } - return nil, fmt.Errorf("%s: jsonnet", ErrInvalidEncoding) -} - -func parseSource(r io.Reader, opts ...ValidateOptFn) (*Template, error) { - var b []byte - if byter, ok := r.(interface{ Bytes() []byte }); ok { - b = byter.Bytes() - } else { - bb, err := io.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("failed to decode pkg source: %s", err) - } - b = bb - } - - contentType := http.DetectContentType(b[:512]) - switch { - case strings.Contains(contentType, "jsonnet"): - // highly unlikely to fall in here with supported content type detection as is - return parseJsonnet(bytes.NewReader(b), opts...) - case strings.Contains(contentType, "json"): - return parseJSON(bytes.NewReader(b), opts...) - case strings.Contains(contentType, "yaml"), - strings.Contains(contentType, "yml"): - return parseYAML(bytes.NewReader(b), opts...) - default: - return parseYAML(bytes.NewReader(b), opts...) - } -} - -func parseYAML(r io.Reader, opts ...ValidateOptFn) (*Template, error) { - dec := yaml.NewDecoder(r) - - var pkg Template - for { - // forced to use this for loop b/c the yaml dependency does not - // decode multi documents. - var k Object - err := dec.Decode(&k) - if err == io.EOF { - break - } - if err != nil { - return nil, err - } - pkg.Objects = append(pkg.Objects, k) - } - - if err := pkg.Validate(opts...); err != nil { - return nil, err - } - - return &pkg, nil -} - -type decoder interface { - Decode(interface{}) error -} - -func parse(dec decoder, opts ...ValidateOptFn) (*Template, error) { - var pkg Template - if err := dec.Decode(&pkg.Objects); err != nil { - return nil, err - } - - if err := pkg.Validate(opts...); err != nil { - return nil, err - } - - return &pkg, nil -} - -// Object describes the metadata and raw spec for an entity of a package kind. -type Object struct { - APIVersion string `json:"apiVersion" yaml:"apiVersion"` - Kind Kind `json:"kind" yaml:"kind"` - Metadata Resource `json:"metadata" yaml:"metadata"` - Spec Resource `json:"spec" yaml:"spec"` -} - -// Name returns the name of the kind. -func (k Object) Name() string { - return k.Metadata.references(fieldName).String() -} - -// ObjectAssociation is an association for an object. The supported types -// at this time are KindLabel. -type ObjectAssociation struct { - Kind Kind - MetaName string -} - -// AddAssociations adds an association to the object. -func (k Object) AddAssociations(associations ...ObjectAssociation) { - if len(associations) == 0 { - return - } - - if k.Spec == nil { - k.Spec = make(Resource) - } - - existingAss := k.Spec.slcResource(fieldAssociations) - for _, ass := range associations { - existingAss = append(existingAss, Resource{ - fieldKind: ass.Kind, - fieldName: ass.MetaName, - }) - } - sort.Slice(existingAss, func(i, j int) bool { - iPkgName, jPkgName := existingAss[i].Name(), existingAss[j].Name() - return iPkgName < jPkgName - }) - if existingAss == nil { - return - } - - k.Spec[fieldAssociations] = existingAss -} - -// SetMetadataName sets the metadata.name field. -func (k Object) SetMetadataName(name string) { - if k.Metadata == nil { - k.Metadata = make(Resource) - } - k.Metadata[fieldName] = name -} - -// Template is the model for a package. The resources are more generic that one might -// expect at first glance. This was done on purpose. The way json/yaml/toml or -// w/e scripting you want to use, can have very different ways of parsing. The -// different parsers are limited for the parsers that do not come from the std -// lib (looking at you yaml/v2). This allows us to parse it and leave the matching -// to another power, the graphing of the package is handled within itself. -type Template struct { - Objects []Object `json:"-" yaml:"-"` - sources []string - - mLabels map[string]*label - mBuckets map[string]*bucket - mChecks map[string]*check - mDashboards map[string]*dashboard - mNotificationEndpoints map[string]*notificationEndpoint - mNotificationRules map[string]*notificationRule - mTasks map[string]*task - mTelegrafs map[string]*telegraf - mVariables map[string]*variable - - mEnv map[string]bool - mEnvVals map[string]interface{} - mSecrets map[string]bool - - isParsed bool // indicates the pkg has been parsed and all resources graphed accordingly -} - -// Encode is a helper for encoding the pkg correctly. -func (p *Template) Encode(encoding Encoding) ([]byte, error) { - if p == nil { - panic("attempted to encode a nil Template") - } - - var ( - buf bytes.Buffer - err error - ) - switch encoding { - case EncodingJSON, EncodingJsonnet: - enc := json.NewEncoder(&buf) - enc.SetIndent("", "\t") - err = enc.Encode(p.Objects) - case EncodingYAML: - enc := yaml.NewEncoder(&buf) - for _, k := range p.Objects { - if err = enc.Encode(k); err != nil { - break - } - } - default: - return nil, ErrInvalidEncoding - } - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func (p *Template) Sources() []string { - // note: we prevent the internal field from being changed by enabling access - // to the sources via the exported method here. - return p.sources -} - -// Summary returns a package Summary that describes all the resources and -// associations the pkg contains. It is very useful for informing users of -// the changes that will take place when this pkg would be applied. -func (p *Template) Summary() Summary { - // ensure zero values for arrays aren't returned, but instead - // we always returning an initialized slice. - sum := Summary{ - Buckets: []SummaryBucket{}, - Checks: []SummaryCheck{}, - Dashboards: []SummaryDashboard{}, - NotificationEndpoints: []SummaryNotificationEndpoint{}, - NotificationRules: []SummaryNotificationRule{}, - Labels: []SummaryLabel{}, - MissingEnvs: p.missingEnvRefs(), - MissingSecrets: p.missingSecrets(), - Tasks: []SummaryTask{}, - TelegrafConfigs: []SummaryTelegraf{}, - Variables: []SummaryVariable{}, - } - - for _, b := range p.buckets() { - sum.Buckets = append(sum.Buckets, b.summarize()) - } - - for _, c := range p.checks() { - sum.Checks = append(sum.Checks, c.summarize()) - } - - for _, d := range p.dashboards() { - sum.Dashboards = append(sum.Dashboards, d.summarize()) - } - - for _, l := range p.labels() { - sum.Labels = append(sum.Labels, l.summarize()) - } - - sum.LabelMappings = p.labelMappings() - - for _, n := range p.notificationEndpoints() { - sum.NotificationEndpoints = append(sum.NotificationEndpoints, n.summarize()) - } - - for _, r := range p.notificationRules() { - sum.NotificationRules = append(sum.NotificationRules, r.summarize()) - } - - for _, t := range p.tasks() { - sum.Tasks = append(sum.Tasks, t.summarize()) - } - - for _, t := range p.telegrafs() { - sum.TelegrafConfigs = append(sum.TelegrafConfigs, t.summarize()) - } - - for _, v := range p.variables() { - sum.Variables = append(sum.Variables, v.summarize()) - } - - return sum -} - -func (p *Template) applyEnvRefs(envRefs map[string]interface{}) error { - if len(envRefs) == 0 { - return nil - } - - if p.mEnvVals == nil { - p.mEnvVals = make(map[string]interface{}) - } - - for k, v := range envRefs { - p.mEnvVals[k] = v - } - - return p.Validate() -} - -func (p *Template) applySecrets(secrets map[string]string) { - for k := range secrets { - p.mSecrets[k] = true - } -} - -// Contains identifies if a pkg contains a given object identified -// by its kind and metadata.Name (MetaName) field. -func (p *Template) Contains(k Kind, pkgName string) bool { - switch k { - case KindBucket: - _, ok := p.mBuckets[pkgName] - return ok - case KindCheck, KindCheckDeadman, KindCheckThreshold: - _, ok := p.mChecks[pkgName] - return ok - case KindLabel: - _, ok := p.mLabels[pkgName] - return ok - case KindNotificationEndpoint, - KindNotificationEndpointHTTP, - KindNotificationEndpointPagerDuty, - KindNotificationEndpointSlack: - _, ok := p.mNotificationEndpoints[pkgName] - return ok - case KindNotificationRule: - _, ok := p.mNotificationRules[pkgName] - return ok - case KindTask: - _, ok := p.mTasks[pkgName] - return ok - case KindTelegraf: - _, ok := p.mTelegrafs[pkgName] - return ok - case KindVariable: - _, ok := p.mVariables[pkgName] - return ok - } - return false -} - -// Combine combines pkgs together. Is useful when you want to take multiple disparate pkgs -// and compile them into one to take advantage of the parser and service guarantees. -func Combine(pkgs []*Template, validationOpts ...ValidateOptFn) (*Template, error) { - newPkg := new(Template) - for _, p := range pkgs { - if len(p.Objects) == 0 { - continue - } - newPkg.sources = append(newPkg.sources, p.sources...) - newPkg.Objects = append(newPkg.Objects, p.Objects...) - } - - return newPkg, newPkg.Validate(validationOpts...) -} - -type ( - validateOpt struct { - minResources bool - skipValidate bool - enableJsonnet bool - } - - // ValidateOptFn provides a means to disable desired validation checks. - ValidateOptFn func(*validateOpt) -) - -// Jsonnet parsing is disabled by default. EnableJsonnet turns it back on. -func EnableJsonnet() ValidateOptFn { - return func(opt *validateOpt) { - opt.enableJsonnet = true - } -} - -// ValidWithoutResources ignores the validation check for minimum number -// of resources. This is useful for the service Create to ignore this and -// allow the creation of a pkg without resources. -func ValidWithoutResources() ValidateOptFn { - return func(opt *validateOpt) { - opt.minResources = false - } -} - -// ValidSkipParseError ignores the validation check from the of resources. This -// is useful for the service Create to ignore this and allow the creation of a -// pkg without resources. -func ValidSkipParseError() ValidateOptFn { - return func(opt *validateOpt) { - opt.skipValidate = true - } -} - -// Validate will graph all resources and validate every thing is in a useful form. -func (p *Template) Validate(opts ...ValidateOptFn) error { - opt := &validateOpt{minResources: true} - for _, o := range opts { - o(opt) - } - - var setupFns []func() error - if opt.minResources { - setupFns = append(setupFns, p.validResources) - } - setupFns = append(setupFns, p.graphResources) - - var pErr parseErr - for _, fn := range setupFns { - if err := fn(); err != nil { - if IsParseErr(err) { - pErr.append(err.(*parseErr).Resources...) - continue - } - return err - } - } - - if len(pErr.Resources) > 0 && !opt.skipValidate { - return &pErr - } - - p.isParsed = true - return nil -} - -func (p *Template) buckets() []*bucket { - buckets := make([]*bucket, 0, len(p.mBuckets)) - for _, b := range p.mBuckets { - buckets = append(buckets, b) - } - - sort.Slice(buckets, func(i, j int) bool { return buckets[i].MetaName() < buckets[j].MetaName() }) - - return buckets -} - -func (p *Template) checks() []*check { - checks := make([]*check, 0, len(p.mChecks)) - for _, c := range p.mChecks { - checks = append(checks, c) - } - - sort.Slice(checks, func(i, j int) bool { return checks[i].MetaName() < checks[j].MetaName() }) - - return checks -} - -func (p *Template) labels() []*label { - labels := make(sortedLabels, 0, len(p.mLabels)) - for _, l := range p.mLabels { - labels = append(labels, l) - } - - sort.Sort(labels) - - return labels -} - -func (p *Template) dashboards() []*dashboard { - dashes := make([]*dashboard, 0, len(p.mDashboards)) - for _, d := range p.mDashboards { - dashes = append(dashes, d) - } - sort.Slice(dashes, func(i, j int) bool { return dashes[i].MetaName() < dashes[j].MetaName() }) - return dashes -} - -func (p *Template) notificationEndpoints() []*notificationEndpoint { - endpoints := make([]*notificationEndpoint, 0, len(p.mNotificationEndpoints)) - for _, e := range p.mNotificationEndpoints { - endpoints = append(endpoints, e) - } - sort.Slice(endpoints, func(i, j int) bool { - ei, ej := endpoints[i], endpoints[j] - if ei.kind == ej.kind { - return ei.MetaName() < ej.MetaName() - } - return ei.kind < ej.kind - }) - return endpoints -} - -func (p *Template) notificationRules() []*notificationRule { - rules := make([]*notificationRule, 0, len(p.mNotificationRules)) - for _, r := range p.mNotificationRules { - rules = append(rules, r) - } - sort.Slice(rules, func(i, j int) bool { return rules[i].MetaName() < rules[j].MetaName() }) - return rules -} - -func (p *Template) missingEnvRefs() []string { - envRefs := make([]string, 0) - for envRef, matching := range p.mEnv { - if !matching { - envRefs = append(envRefs, envRef) - } - } - sort.Strings(envRefs) - return envRefs -} - -func (p *Template) missingSecrets() []string { - secrets := make([]string, 0, len(p.mSecrets)) - for secret, foundInPlatform := range p.mSecrets { - if foundInPlatform { - continue - } - secrets = append(secrets, secret) - } - return secrets -} - -func (p *Template) tasks() []*task { - tasks := make([]*task, 0, len(p.mTasks)) - for _, t := range p.mTasks { - tasks = append(tasks, t) - } - - sort.Slice(tasks, func(i, j int) bool { return tasks[i].MetaName() < tasks[j].MetaName() }) - - return tasks -} - -func (p *Template) telegrafs() []*telegraf { - teles := make([]*telegraf, 0, len(p.mTelegrafs)) - for _, t := range p.mTelegrafs { - t.config.Name = t.Name() - teles = append(teles, t) - } - - sort.Slice(teles, func(i, j int) bool { return teles[i].MetaName() < teles[j].MetaName() }) - - return teles -} - -func (p *Template) variables() []*variable { - vars := make([]*variable, 0, len(p.mVariables)) - for _, v := range p.mVariables { - vars = append(vars, v) - } - - sort.Slice(vars, func(i, j int) bool { return vars[i].MetaName() < vars[j].MetaName() }) - - return vars -} - -// labelMappings returns the mappings that will be created for -// valid pairs of labels and resources of which all have IDs. -// If a resource does not exist yet, a label mapping will not -// be returned for it. -func (p *Template) labelMappings() []SummaryLabelMapping { - labels := p.mLabels - mappings := make([]SummaryLabelMapping, 0, len(labels)) - for _, l := range labels { - mappings = append(mappings, l.mappingSummary()...) - } - - // sort by res type ASC, then res name ASC, then label name ASC - sort.Slice(mappings, func(i, j int) bool { - n, m := mappings[i], mappings[j] - if n.ResourceType < m.ResourceType { - return true - } - if n.ResourceType > m.ResourceType { - return false - } - if n.ResourceName < m.ResourceName { - return true - } - if n.ResourceName > m.ResourceName { - return false - } - return n.LabelName < m.LabelName - }) - - return mappings -} - -func (p *Template) validResources() error { - if len(p.Objects) > 0 { - return nil - } - - res := resourceErr{ - Kind: KindPackage.String(), - RootErrs: []validationErr{{ - Field: "resources", - Msg: "at least 1 kind must be provided", - }}, - } - var err parseErr - err.append(res) - return &err -} - -func (p *Template) graphResources() error { - p.mEnv = make(map[string]bool) - p.mSecrets = make(map[string]bool) - - graphFns := []func() *parseErr{ - // labels are first, this is to validate associations with other resources - p.graphLabels, - p.graphVariables, - p.graphBuckets, - p.graphChecks, - p.graphDashboards, - p.graphNotificationEndpoints, - p.graphNotificationRules, - p.graphTasks, - p.graphTelegrafs, - } - - var pErr parseErr - for _, fn := range graphFns { - if err := fn(); err != nil { - pErr.append(err.Resources...) - } - } - - if len(pErr.Resources) > 0 { - sort.Slice(pErr.Resources, func(i, j int) bool { - ir, jr := pErr.Resources[i], pErr.Resources[j] - return *ir.Idx < *jr.Idx - }) - return &pErr - } - - return nil -} - -func (p *Template) graphBuckets() *parseErr { - p.mBuckets = make(map[string]*bucket) - tracker := p.trackNames(true) - return p.eachResource(KindBucket, func(o Object) []validationErr { - ident, errs := tracker(o) - if len(errs) > 0 { - return errs - } - - bkt := &bucket{ - identity: ident, - Description: o.Spec.stringShort(fieldDescription), - SchemaType: o.Spec.stringShort(fieldBucketSchemaType), - } - if rules, ok := o.Spec[fieldBucketRetentionRules].(retentionRules); ok { - bkt.RetentionRules = rules - } else { - for _, r := range o.Spec.slcResource(fieldBucketRetentionRules) { - bkt.RetentionRules = append(bkt.RetentionRules, retentionRule{ - Type: r.stringShort(fieldType), - Seconds: r.intShort(fieldRetentionRulesEverySeconds), - }) - } - } - if schemas, ok := o.Spec[fieldMeasurementSchemas].(measurementSchemas); ok { - bkt.MeasurementSchemas = schemas - } else { - for _, sr := range o.Spec.slcResource(fieldMeasurementSchemas) { - ms := measurementSchema{Name: sr.stringShort(fieldMeasurementSchemaName)} - for _, scr := range sr.slcResource(fieldMeasurementSchemaColumns) { - ms.Columns = append(ms.Columns, measurementColumn{ - Name: scr.stringShort(fieldMeasurementColumnName), - Type: scr.stringShort(fieldMeasurementColumnType), - DataType: scr.stringShort(fieldMeasurementColumnDataType), - }) - } - bkt.MeasurementSchemas = append(bkt.MeasurementSchemas, ms) - } - } - p.setRefs(bkt.name, bkt.displayName) - - failures := p.parseNestedLabels(o.Spec, func(l *label) error { - bkt.labels = append(bkt.labels, l) - p.mLabels[l.MetaName()].setMapping(bkt, false) - return nil - }) - sort.Sort(bkt.labels) - - p.mBuckets[bkt.MetaName()] = bkt - - return append(failures, bkt.valid()...) - }) -} - -func (p *Template) graphLabels() *parseErr { - p.mLabels = make(map[string]*label) - tracker := p.trackNames(true) - return p.eachResource(KindLabel, func(o Object) []validationErr { - ident, errs := tracker(o) - if len(errs) > 0 { - return errs - } - - l := &label{ - identity: ident, - Color: o.Spec.stringShort(fieldLabelColor), - Description: o.Spec.stringShort(fieldDescription), - } - p.mLabels[l.MetaName()] = l - p.setRefs(l.name, l.displayName) - - return l.valid() - }) -} - -func (p *Template) graphChecks() *parseErr { - p.mChecks = make(map[string]*check) - // todo: what is the business goal wrt having unique names? (currently duplicates are allowed) - tracker := p.trackNames(false) - - checkKinds := []struct { - kind Kind - checkKind checkKind - }{ - {kind: KindCheckThreshold, checkKind: checkKindThreshold}, - {kind: KindCheckDeadman, checkKind: checkKindDeadman}, - } - var pErr parseErr - for _, checkKind := range checkKinds { - err := p.eachResource(checkKind.kind, func(o Object) []validationErr { - ident, errs := tracker(o) - if len(errs) > 0 { - return errs - } - - ch := &check{ - kind: checkKind.checkKind, - identity: ident, - description: o.Spec.stringShort(fieldDescription), - every: o.Spec.durationShort(fieldEvery), - level: o.Spec.stringShort(fieldLevel), - offset: o.Spec.durationShort(fieldOffset), - query: strings.TrimSpace(o.Spec.stringShort(fieldQuery)), - reportZero: o.Spec.boolShort(fieldCheckReportZero), - staleTime: o.Spec.durationShort(fieldCheckStaleTime), - status: normStr(o.Spec.stringShort(fieldStatus)), - statusMessage: o.Spec.stringShort(fieldCheckStatusMessageTemplate), - timeSince: o.Spec.durationShort(fieldCheckTimeSince), - } - for _, tagRes := range o.Spec.slcResource(fieldCheckTags) { - ch.tags = append(ch.tags, struct{ k, v string }{ - k: tagRes.stringShort(fieldKey), - v: tagRes.stringShort(fieldValue), - }) - } - for _, th := range o.Spec.slcResource(fieldCheckThresholds) { - ch.thresholds = append(ch.thresholds, threshold{ - threshType: thresholdType(normStr(th.stringShort(fieldType))), - allVals: th.boolShort(fieldCheckAllValues), - level: strings.TrimSpace(strings.ToUpper(th.stringShort(fieldLevel))), - max: th.float64Short(fieldMax), - min: th.float64Short(fieldMin), - val: th.float64Short(fieldValue), - }) - } - - failures := p.parseNestedLabels(o.Spec, func(l *label) error { - ch.labels = append(ch.labels, l) - p.mLabels[l.MetaName()].setMapping(ch, false) - return nil - }) - sort.Sort(ch.labels) - - p.mChecks[ch.MetaName()] = ch - p.setRefs(ch.name, ch.displayName) - return append(failures, ch.valid()...) - }) - if err != nil { - pErr.append(err.Resources...) - } - } - if len(pErr.Resources) > 0 { - return &pErr - } - return nil -} - -func (p *Template) graphDashboards() *parseErr { - p.mDashboards = make(map[string]*dashboard) - tracker := p.trackNames(false) - return p.eachResource(KindDashboard, func(o Object) []validationErr { - ident, errs := tracker(o) - if len(errs) > 0 { - return errs - } - - dash := &dashboard{ - identity: ident, - Description: o.Spec.stringShort(fieldDescription), - } - - failures := p.parseNestedLabels(o.Spec, func(l *label) error { - dash.labels = append(dash.labels, l) - p.mLabels[l.MetaName()].setMapping(dash, false) - return nil - }) - sort.Sort(dash.labels) - - for i, cr := range o.Spec.slcResource(fieldDashCharts) { - ch, fails := p.parseChart(dash.MetaName(), i, cr) - if fails != nil { - failures = append(failures, - objectValidationErr(fieldSpec, validationErr{ - Field: fieldDashCharts, - Index: intPtr(i), - Nested: fails, - }), - ) - continue - } - dash.Charts = append(dash.Charts, ch) - } - - p.mDashboards[dash.MetaName()] = dash - p.setRefs(dash.refs()...) - - return append(failures, dash.valid()...) - }) -} - -func (p *Template) graphNotificationEndpoints() *parseErr { - p.mNotificationEndpoints = make(map[string]*notificationEndpoint) - tracker := p.trackNames(true) - - notificationKinds := []struct { - kind Kind - notificationKind notificationEndpointKind - }{ - { - kind: KindNotificationEndpointHTTP, - notificationKind: notificationKindHTTP, - }, - { - kind: KindNotificationEndpointPagerDuty, - notificationKind: notificationKindPagerDuty, - }, - { - kind: KindNotificationEndpointSlack, - notificationKind: notificationKindSlack, - }, - } - - var pErr parseErr - for _, nk := range notificationKinds { - err := p.eachResource(nk.kind, func(o Object) []validationErr { - ident, errs := tracker(o) - if len(errs) > 0 { - return errs - } - - endpoint := ¬ificationEndpoint{ - kind: nk.notificationKind, - identity: ident, - description: o.Spec.stringShort(fieldDescription), - method: strings.TrimSpace(strings.ToUpper(o.Spec.stringShort(fieldNotificationEndpointHTTPMethod))), - httpType: normStr(o.Spec.stringShort(fieldType)), - password: o.Spec.references(fieldNotificationEndpointPassword), - routingKey: o.Spec.references(fieldNotificationEndpointRoutingKey), - status: normStr(o.Spec.stringShort(fieldStatus)), - token: o.Spec.references(fieldNotificationEndpointToken), - url: o.Spec.stringShort(fieldNotificationEndpointURL), - username: o.Spec.references(fieldNotificationEndpointUsername), - } - failures := p.parseNestedLabels(o.Spec, func(l *label) error { - endpoint.labels = append(endpoint.labels, l) - p.mLabels[l.MetaName()].setMapping(endpoint, false) - return nil - }) - sort.Sort(endpoint.labels) - - p.setRefs( - endpoint.name, - endpoint.displayName, - endpoint.password, - endpoint.routingKey, - endpoint.token, - endpoint.username, - ) - - p.mNotificationEndpoints[endpoint.MetaName()] = endpoint - return append(failures, endpoint.valid()...) - }) - if err != nil { - pErr.append(err.Resources...) - } - } - if len(pErr.Resources) > 0 { - return &pErr - } - return nil -} - -func (p *Template) graphNotificationRules() *parseErr { - p.mNotificationRules = make(map[string]*notificationRule) - tracker := p.trackNames(false) - return p.eachResource(KindNotificationRule, func(o Object) []validationErr { - ident, errs := tracker(o) - if len(errs) > 0 { - return errs - } - - rule := ¬ificationRule{ - identity: ident, - endpointName: p.getRefWithKnownEnvs(o.Spec, fieldNotificationRuleEndpointName), - description: o.Spec.stringShort(fieldDescription), - channel: o.Spec.stringShort(fieldNotificationRuleChannel), - every: o.Spec.durationShort(fieldEvery), - msgTemplate: o.Spec.stringShort(fieldNotificationRuleMessageTemplate), - offset: o.Spec.durationShort(fieldOffset), - status: normStr(o.Spec.stringShort(fieldStatus)), - } - - for _, sRule := range o.Spec.slcResource(fieldNotificationRuleStatusRules) { - rule.statusRules = append(rule.statusRules, struct{ curLvl, prevLvl string }{ - curLvl: strings.TrimSpace(strings.ToUpper(sRule.stringShort(fieldNotificationRuleCurrentLevel))), - prevLvl: strings.TrimSpace(strings.ToUpper(sRule.stringShort(fieldNotificationRulePreviousLevel))), - }) - } - - for _, tRule := range o.Spec.slcResource(fieldNotificationRuleTagRules) { - rule.tagRules = append(rule.tagRules, struct{ k, v, op string }{ - k: tRule.stringShort(fieldKey), - v: tRule.stringShort(fieldValue), - op: normStr(tRule.stringShort(fieldOperator)), - }) - } - - rule.associatedEndpoint = p.mNotificationEndpoints[rule.endpointName.String()] - - failures := p.parseNestedLabels(o.Spec, func(l *label) error { - rule.labels = append(rule.labels, l) - p.mLabels[l.MetaName()].setMapping(rule, false) - return nil - }) - sort.Sort(rule.labels) - - p.mNotificationRules[rule.MetaName()] = rule - p.setRefs(rule.name, rule.displayName, rule.endpointName) - return append(failures, rule.valid()...) - }) -} - -func (p *Template) graphTasks() *parseErr { - p.mTasks = make(map[string]*task) - tracker := p.trackNames(false) - return p.eachResource(KindTask, func(o Object) []validationErr { - ident, errs := tracker(o) - if len(errs) > 0 { - return errs - } - - t := &task{ - identity: ident, - cron: o.Spec.stringShort(fieldTaskCron), - description: o.Spec.stringShort(fieldDescription), - every: o.Spec.durationShort(fieldEvery), - offset: o.Spec.durationShort(fieldOffset), - status: normStr(o.Spec.stringShort(fieldStatus)), - } - - prefix := fmt.Sprintf("tasks[%s].spec", t.MetaName()) - params := o.Spec.slcResource(fieldParams) - task := o.Spec.slcResource("task") - - var ( - err error - failures []validationErr - ) - - t.query, err = p.parseQuery(prefix, o.Spec.stringShort(fieldQuery), params, task) - if err != nil { - failures = append(failures, validationErr{ - Field: fieldQuery, - Msg: err.Error(), - }) - } - - if o.APIVersion == APIVersion2 { - for _, ref := range t.query.task { - switch ref.EnvRef { - case prefix + ".task.name", prefix + ".params.name": - t.displayName = ref - case prefix + ".task.every": - every, ok := ref.defaultVal.(time.Duration) - if ok { - t.every = every - } else { - failures = append(failures, validationErr{ - Field: fieldTask, - Msg: "field every is not duration", - }) - } - case prefix + ".task.offset": - offset, ok := ref.defaultVal.(time.Duration) - if ok { - t.offset = offset - } else { - failures = append(failures, validationErr{ - Field: fieldTask, - Msg: "field every is not duration", - }) - } - } - } - } - - failures = append(failures, p.parseNestedLabels(o.Spec, func(l *label) error { - t.labels = append(t.labels, l) - p.mLabels[l.MetaName()].setMapping(t, false) - return nil - })...) - sort.Sort(t.labels) - - p.mTasks[t.MetaName()] = t - - p.setRefs(t.refs()...) - return append(failures, t.valid()...) - }) -} - -func (p *Template) graphTelegrafs() *parseErr { - p.mTelegrafs = make(map[string]*telegraf) - tracker := p.trackNames(false) - return p.eachResource(KindTelegraf, func(o Object) []validationErr { - ident, errs := tracker(o) - if len(errs) > 0 { - return errs - } - - tele := &telegraf{ - identity: ident, - } - tele.config.Config = o.Spec.stringShort(fieldTelegrafConfig) - tele.config.Description = o.Spec.stringShort(fieldDescription) - - failures := p.parseNestedLabels(o.Spec, func(l *label) error { - tele.labels = append(tele.labels, l) - p.mLabels[l.MetaName()].setMapping(tele, false) - return nil - }) - sort.Sort(tele.labels) - - p.mTelegrafs[tele.MetaName()] = tele - p.setRefs(tele.name, tele.displayName) - - return append(failures, tele.valid()...) - }) -} - -func (p *Template) graphVariables() *parseErr { - p.mVariables = make(map[string]*variable) - tracker := p.trackNames(true) - return p.eachResource(KindVariable, func(o Object) []validationErr { - ident, errs := tracker(o) - if len(errs) > 0 { - return errs - } - - newVar := &variable{ - identity: ident, - Description: o.Spec.stringShort(fieldDescription), - Type: normStr(o.Spec.stringShort(fieldType)), - Query: strings.TrimSpace(o.Spec.stringShort(fieldQuery)), - Language: normStr(o.Spec.stringShort(fieldLanguage)), - ConstValues: o.Spec.slcStr(fieldValues), - MapValues: o.Spec.mapStrStr(fieldValues), - } - - if iSelected, ok := o.Spec[fieldVariableSelected].([]interface{}); ok { - for _, res := range iSelected { - newVar.selected = append(newVar.selected, ifaceToReference(res)) - } - } - - failures := p.parseNestedLabels(o.Spec, func(l *label) error { - newVar.labels = append(newVar.labels, l) - p.mLabels[l.MetaName()].setMapping(newVar, false) - return nil - }) - sort.Sort(newVar.labels) - - p.mVariables[newVar.MetaName()] = newVar - p.setRefs(newVar.name, newVar.displayName) - p.setRefs(newVar.selected...) - - return append(failures, newVar.valid()...) - }) -} - -func (p *Template) eachResource(resourceKind Kind, fn func(o Object) []validationErr) *parseErr { - var pErr parseErr - for i, k := range p.Objects { - if err := k.Kind.OK(); err != nil { - pErr.append(resourceErr{ - Kind: k.Kind.String(), - Idx: intPtr(i), - ValidationErrs: []validationErr{ - { - Field: fieldKind, - Msg: err.Error(), - }, - }, - }) - continue - } - if !k.Kind.is(resourceKind) { - continue - } - - if k.APIVersion != APIVersion && k.APIVersion != APIVersion2 { - pErr.append(resourceErr{ - Kind: k.Kind.String(), - Idx: intPtr(i), - ValidationErrs: []validationErr{ - { - Field: fieldAPIVersion, - Msg: fmt.Sprintf("invalid API version provided %q; must be 1 in [%s, %s]", k.APIVersion, APIVersion, APIVersion2), - }, - }, - }) - continue - } - - if errs := isDNS1123Label(k.Name()); len(errs) > 0 { - pErr.append(resourceErr{ - Kind: k.Kind.String(), - Idx: intPtr(i), - ValidationErrs: []validationErr{ - objectValidationErr(fieldMetadata, validationErr{ - Field: fieldName, - Msg: fmt.Sprintf("name %q is invalid; %s", k.Name(), strings.Join(errs, "; ")), - }), - }, - }) - continue - } - - if failures := fn(k); failures != nil { - err := resourceErr{ - Kind: resourceKind.String(), - Idx: intPtr(i), - } - for _, f := range failures { - vErr := validationErr{ - Field: f.Field, - Msg: f.Msg, - Index: f.Index, - Nested: f.Nested, - } - if vErr.Field == "associations" { - err.AssociationErrs = append(err.AssociationErrs, vErr) - continue - } - err.ValidationErrs = append(err.ValidationErrs, vErr) - } - pErr.append(err) - } - } - - if len(pErr.Resources) > 0 { - return &pErr - } - return nil -} - -func (p *Template) parseNestedLabels(r Resource, fn func(lb *label) error) []validationErr { - nestedLabels := make(map[string]*label) - - var failures []validationErr - for i, nr := range r.slcResource(fieldAssociations) { - fail := p.parseNestedLabel(nr, func(l *label) error { - if _, ok := nestedLabels[l.Name()]; ok { - return fmt.Errorf("duplicate nested label: %q", l.Name()) - } - nestedLabels[l.Name()] = l - - return fn(l) - }) - if fail != nil { - fail.Index = intPtr(i) - failures = append(failures, *fail) - } - } - - return failures -} - -func (p *Template) parseNestedLabel(nr Resource, fn func(lb *label) error) *validationErr { - k, err := nr.kind() - if err != nil { - return &validationErr{ - Field: fieldAssociations, - Nested: []validationErr{ - { - Field: fieldKind, - Msg: err.Error(), - }, - }, - } - } - if !k.is(KindLabel) { - return nil - } - - nameRef := p.getRefWithKnownEnvs(nr, fieldName) - lb, found := p.mLabels[nameRef.String()] - if !found { - return &validationErr{ - Field: fieldAssociations, - Msg: fmt.Sprintf("label %q does not exist in pkg", nr.Name()), - } - } - - if err := fn(lb); err != nil { - return &validationErr{ - Field: fieldAssociations, - Msg: err.Error(), - } - } - return nil -} - -func (p *Template) trackNames(resourceUniqueByName bool) func(Object) (identity, []validationErr) { - mPkgNames := make(map[string]bool) - uniqNames := make(map[string]bool) - return func(o Object) (identity, []validationErr) { - nameRef := p.getRefWithKnownEnvs(o.Metadata, fieldName) - if mPkgNames[nameRef.String()] { - return identity{}, []validationErr{ - objectValidationErr(fieldMetadata, validationErr{ - Field: fieldName, - Msg: "duplicate name: " + nameRef.String(), - }), - } - } - mPkgNames[nameRef.String()] = true - - displayNameRef := p.getRefWithKnownEnvs(o.Spec, fieldName) - identity := identity{ - name: nameRef, - displayName: displayNameRef, - } - if !resourceUniqueByName { - return identity, nil - } - - name := identity.Name() - if uniqNames[name] { - return identity, []validationErr{ - objectValidationErr(fieldSpec, validationErr{ - Field: fieldName, - Msg: "duplicate name: " + nameRef.String(), - }), - } - } - uniqNames[name] = true - - return identity, nil - } -} - -func (p *Template) getRefWithKnownEnvs(r Resource, field string) *references { - nameRef := r.references(field) - if v, ok := p.mEnvVals[nameRef.EnvRef]; ok { - nameRef.val = v - } - return nameRef -} - -func (p *Template) setRefs(refs ...*references) { - for _, ref := range refs { - if ref.Secret != "" { - p.mSecrets[ref.Secret] = false - } - if ref.EnvRef != "" { - p.mEnv[ref.EnvRef] = p.mEnvVals[ref.EnvRef] != nil - } - } -} - -func parseAxis(ra Resource, domain []float64) *axis { - return &axis{ - Base: ra.stringShort(fieldAxisBase), - Label: ra.stringShort(fieldAxisLabel), - Name: ra.Name(), - Prefix: ra.stringShort(fieldPrefix), - Scale: ra.stringShort(fieldAxisScale), - Suffix: ra.stringShort(fieldSuffix), - Domain: domain, - } -} - -func parseColor(rc Resource) *color { - return &color{ - ID: rc.stringShort("id"), - Name: rc.Name(), - Type: rc.stringShort(fieldType), - Hex: rc.stringShort(fieldColorHex), - Value: flt64Ptr(rc.float64Short(fieldValue)), - } -} - -func (p *Template) parseChart(dashMetaName string, chartIdx int, r Resource) (*chart, []validationErr) { - ck, err := r.chartKind() - if err != nil { - return nil, []validationErr{{ - Field: fieldKind, - Msg: err.Error(), - }} - } - - c := chart{ - Kind: ck, - Name: r.Name(), - BinSize: r.intShort(fieldChartBinSize), - BinCount: r.intShort(fieldChartBinCount), - Geom: r.stringShort(fieldChartGeom), - Height: r.intShort(fieldChartHeight), - Note: r.stringShort(fieldChartNote), - NoteOnEmpty: r.boolShort(fieldChartNoteOnEmpty), - Position: r.stringShort(fieldChartPosition), - Prefix: r.stringShort(fieldPrefix), - Shade: r.boolShort(fieldChartShade), - HoverDimension: r.stringShort(fieldChartHoverDimension), - Suffix: r.stringShort(fieldSuffix), - TickPrefix: r.stringShort(fieldChartTickPrefix), - TickSuffix: r.stringShort(fieldChartTickSuffix), - TimeFormat: r.stringShort(fieldChartTimeFormat), - Width: r.intShort(fieldChartWidth), - XCol: r.stringShort(fieldChartXCol), - GenerateXAxisTicks: r.slcStr(fieldChartGenerateXAxisTicks), - XTotalTicks: r.intShort(fieldChartXTotalTicks), - XTickStart: r.float64Short(fieldChartXTickStart), - XTickStep: r.float64Short(fieldChartXTickStep), - YCol: r.stringShort(fieldChartYCol), - GenerateYAxisTicks: r.slcStr(fieldChartGenerateYAxisTicks), - YTotalTicks: r.intShort(fieldChartYTotalTicks), - YTickStart: r.float64Short(fieldChartYTickStart), - YTickStep: r.float64Short(fieldChartYTickStep), - XPos: r.intShort(fieldChartXPos), - YPos: r.intShort(fieldChartYPos), - FillColumns: r.slcStr(fieldChartFillColumns), - YLabelColumnSeparator: r.stringShort(fieldChartYLabelColumnSeparator), - YLabelColumns: r.slcStr(fieldChartYLabelColumns), - YSeriesColumns: r.slcStr(fieldChartYSeriesColumns), - UpperColumn: r.stringShort(fieldChartUpperColumn), - MainColumn: r.stringShort(fieldChartMainColumn), - LowerColumn: r.stringShort(fieldChartLowerColumn), - LegendColorizeRows: r.boolShort(fieldChartLegendColorizeRows), - LegendHide: r.boolShort(fieldChartLegendHide), - LegendOpacity: r.float64Short(fieldChartLegendOpacity), - LegendOrientationThreshold: r.intShort(fieldChartLegendOrientationThreshold), - Zoom: r.float64Short(fieldChartGeoZoom), - Center: center{Lat: r.float64Short(fieldChartGeoCenterLat), Lon: r.float64Short(fieldChartGeoCenterLon)}, - MapStyle: r.stringShort(fieldChartGeoMapStyle), - AllowPanAndZoom: r.boolShort(fieldChartGeoAllowPanAndZoom), - DetectCoordinateFields: r.boolShort(fieldChartGeoDetectCoordinateFields), - } - - if presStaticLeg, ok := r[fieldChartStaticLegend].(StaticLegend); ok { - c.StaticLegend = presStaticLeg - } else { - if staticLeg, ok := ifaceToResource(r[fieldChartStaticLegend]); ok { - c.StaticLegend.ColorizeRows = staticLeg.boolShort(fieldChartStaticLegendColorizeRows) - c.StaticLegend.HeightRatio = staticLeg.float64Short(fieldChartStaticLegendHeightRatio) - c.StaticLegend.Show = staticLeg.boolShort(fieldChartStaticLegendShow) - c.StaticLegend.Opacity = staticLeg.float64Short(fieldChartStaticLegendOpacity) - c.StaticLegend.OrientationThreshold = staticLeg.intShort(fieldChartStaticLegendOrientationThreshold) - c.StaticLegend.ValueAxis = staticLeg.stringShort(fieldChartStaticLegendValueAxis) - c.StaticLegend.WidthRatio = staticLeg.float64Short(fieldChartStaticLegendWidthRatio) - } - } - - if dp, ok := r.int(fieldChartDecimalPlaces); ok { - c.EnforceDecimals = true - c.DecimalPlaces = dp - } - - var failures []validationErr - if presentQueries, ok := r[fieldChartQueries].(queries); ok { - c.Queries = presentQueries - } else { - q, vErrs := p.parseChartQueries(dashMetaName, chartIdx, r.slcResource(fieldChartQueries)) - if len(vErrs) > 0 { - failures = append(failures, validationErr{ - Field: "queries", - Nested: vErrs, - }) - } - c.Queries = q - } - - if presentColors, ok := r[fieldChartColors].(colors); ok { - c.Colors = presentColors - } else { - for _, rc := range r.slcResource(fieldChartColors) { - c.Colors = append(c.Colors, parseColor(rc)) - } - } - - if presAxes, ok := r[fieldChartAxes].(axes); ok { - c.Axes = presAxes - } else { - for _, ra := range r.slcResource(fieldChartAxes) { - domain := []float64{} - - if _, ok := ra[fieldChartDomain]; ok { - for _, str := range ra.slcStr(fieldChartDomain) { - val, err := strconv.ParseFloat(str, 64) - if err != nil { - failures = append(failures, validationErr{ - Field: "axes", - Msg: err.Error(), - }) - } - domain = append(domain, val) - } - } - - c.Axes = append(c.Axes, *parseAxis(ra, domain)) - } - } - - if presentGeoLayers, ok := r[fieldChartGeoLayers].(geoLayers); ok { - c.GeoLayers = presentGeoLayers - } else { - parseGeoAxis := func(r Resource, field string) *axis { - if axis, ok := r[field].(*axis); ok { - return axis - } else { - if leg, ok := ifaceToResource(r[field]); ok { - return parseAxis(leg, nil) - } - } - return nil - } - - for _, rl := range r.slcResource(fieldChartGeoLayers) { - gl := geoLayer{ - Type: rl.stringShort(fieldChartGeoLayerType), - RadiusField: rl.stringShort(fieldChartGeoLayerRadiusField), - ColorField: rl.stringShort(fieldChartGeoLayerColorField), - IntensityField: rl.stringShort(fieldChartGeoLayerIntensityField), - Radius: int32(rl.intShort(fieldChartGeoLayerRadius)), - Blur: int32(rl.intShort(fieldChartGeoLayerBlur)), - RadiusDimension: parseGeoAxis(rl, fieldChartGeoLayerRadiusDimension), - ColorDimension: parseGeoAxis(rl, fieldChartGeoLayerColorDimension), - IntensityDimension: parseGeoAxis(rl, fieldChartGeoLayerIntensityDimension), - InterpolateColors: rl.boolShort(fieldChartGeoLayerInterpolateColors), - TrackWidth: int32(rl.intShort(fieldChartGeoLayerTrackWidth)), - Speed: int32(rl.intShort(fieldChartGeoLayerSpeed)), - RandomColors: rl.boolShort(fieldChartGeoLayerRandomColors), - IsClustered: rl.boolShort(fieldChartGeoLayerIsClustered), - } - if presentColors, ok := rl[fieldChartGeoLayerViewColors].(colors); ok { - gl.ViewColors = presentColors - } else { - for _, rc := range rl.slcResource(fieldChartGeoLayerViewColors) { - gl.ViewColors = append(gl.ViewColors, parseColor(rc)) - } - } - c.GeoLayers = append(c.GeoLayers, &gl) - } - } - - if tableOptsRes, ok := ifaceToResource(r[fieldChartTableOptions]); ok { - c.TableOptions = tableOptions{ - VerticalTimeAxis: tableOptsRes.boolShort(fieldChartTableOptionVerticalTimeAxis), - SortByField: tableOptsRes.stringShort(fieldChartTableOptionSortBy), - Wrapping: tableOptsRes.stringShort(fieldChartTableOptionWrapping), - FixFirstColumn: tableOptsRes.boolShort(fieldChartTableOptionFixFirstColumn), - } - } - - for _, fieldOptRes := range r.slcResource(fieldChartFieldOptions) { - c.FieldOptions = append(c.FieldOptions, fieldOption{ - FieldName: fieldOptRes.stringShort(fieldChartFieldOptionFieldName), - DisplayName: fieldOptRes.stringShort(fieldChartFieldOptionDisplayName), - Visible: fieldOptRes.boolShort(fieldChartFieldOptionVisible), - }) - } - - if failures = append(failures, c.validProperties()...); len(failures) > 0 { - return nil, failures - } - - return &c, nil -} - -func (p *Template) parseChartQueries(dashMetaName string, chartIdx int, resources []Resource) (queries, []validationErr) { - var ( - q queries - vErrs []validationErr - ) - for i, rq := range resources { - source := rq.stringShort(fieldQuery) - if source == "" { - continue - } - prefix := fmt.Sprintf("dashboards[%s].spec.charts[%d].queries[%d]", dashMetaName, chartIdx, i) - qq, err := p.parseQuery(prefix, source, rq.slcResource(fieldParams), nil) - if err != nil { - vErrs = append(vErrs, validationErr{ - Field: "query", - Index: intPtr(i), - Msg: err.Error(), - }) - } - q = append(q, qq) - } - return q, vErrs -} - -func (p *Template) parseQuery(prefix, source string, params, task []Resource) (query, error) { - files := parser.ParseSource(source).Files - if len(files) != 1 { - return query{}, influxErr(errors2.EInvalid, "invalid query source") - } - - q := query{ - Query: strings.TrimSpace(source), - } - - mParams := make(map[string]*references) - tParams := make(map[string]*references) - - paramsOpt, paramsErr := edit.GetOption(files[0], "params") - taskOpt, taskErr := edit.GetOption(files[0], "task") - if paramsErr != nil && taskErr != nil { - return q, nil - } - - if paramsErr == nil { - obj, ok := paramsOpt.(*ast.ObjectExpression) - if ok { - for _, p := range obj.Properties { - sl, ok := p.Key.(*ast.Identifier) - if !ok { - continue - } - - mParams[sl.Name] = &references{ - EnvRef: sl.Name, - defaultVal: valFromExpr(p.Value), - valType: p.Value.Type(), - } - } - } - } - - if taskErr == nil { - tobj, ok := taskOpt.(*ast.ObjectExpression) - if ok { - for _, p := range tobj.Properties { - sl, ok := p.Key.(*ast.Identifier) - if !ok { - continue - } - - tParams[sl.Name] = &references{ - EnvRef: sl.Name, - defaultVal: valFromExpr(p.Value), - valType: p.Value.Type(), - } - } - } - } - - // override defaults here maybe? - for _, pr := range params { - field := pr.stringShort(fieldKey) - if field == "" { - continue - } - - if _, ok := mParams[field]; !ok { - mParams[field] = &references{EnvRef: field} - } - if def, ok := pr[fieldDefault]; ok { - mParams[field].defaultVal = def - } - if valtype, ok := pr.string(fieldType); ok { - mParams[field].valType = valtype - } - } - - var err error - for _, pr := range task { - field := pr.stringShort(fieldKey) - if field == "" { - continue - } - - if _, ok := tParams[field]; !ok { - tParams[field] = &references{EnvRef: field} - } - - if valtype, ok := pr.string(fieldType); ok { - tParams[field].valType = valtype - } - - if def, ok := pr[fieldDefault]; ok { - switch tParams[field].valType { - case "duration": - switch defDur := def.(type) { - case string: - tParams[field].defaultVal, err = time.ParseDuration(defDur) - if err != nil { - return query{}, influxErr(errors2.EInvalid, err.Error()) - } - case time.Duration: - tParams[field].defaultVal = defDur - } - default: - tParams[field].defaultVal = def - } - } - } - - for _, ref := range mParams { - envRef := fmt.Sprintf("%s.params.%s", prefix, ref.EnvRef) - q.params = append(q.params, &references{ - EnvRef: envRef, - defaultVal: ref.defaultVal, - val: p.mEnvVals[envRef], - valType: ref.valType, - }) - } - - for _, ref := range tParams { - envRef := fmt.Sprintf("%s.task.%s", prefix, ref.EnvRef) - q.task = append(q.task, &references{ - EnvRef: envRef, - defaultVal: ref.defaultVal, - val: p.mEnvVals[envRef], - valType: ref.valType, - }) - } - return q, nil -} - -func valFromExpr(p ast.Expression) interface{} { - switch literal := p.(type) { - case *ast.CallExpression: - sl, ok := literal.Callee.(*ast.Identifier) - if ok && sl.Name == "now" { - return "now()" - } - return nil - case *ast.DateTimeLiteral: - return ast.DateTimeFromLiteral(literal) - case *ast.FloatLiteral: - return ast.FloatFromLiteral(literal) - case *ast.IntegerLiteral: - return ast.IntegerFromLiteral(literal) - case *ast.DurationLiteral: - dur, _ := ast.DurationFrom(literal, time.Time{}) - return dur - case *ast.StringLiteral: - return ast.StringFromLiteral(literal) - case *ast.UnaryExpression: - // a signed duration is represented by a UnaryExpression. - // it is the only unary expression allowed. - v := valFromExpr(literal.Argument) - if dur, ok := v.(time.Duration); ok { - switch literal.Operator { - case ast.SubtractionOperator: - return "-" + dur.String() - } - } - return v - default: - return nil - } -} - -// dns1123LabelMaxLength is a label's max length in DNS (RFC 1123) -const dns1123LabelMaxLength int = 63 - -const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" -const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" - -var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$") - -// isDNS1123Label tests for a string that conforms to the definition of a label in -// DNS (RFC 1123). -func isDNS1123Label(value string) []string { - var errs []string - if len(value) > dns1123LabelMaxLength { - errs = append(errs, fmt.Sprintf("must be no more than %d characters", dns1123LabelMaxLength)) - } - if !dns1123LabelRegexp.MatchString(value) { - errs = append(errs, regexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) - } - return errs -} - -// regexError returns a string explanation of a regex validation failure. -func regexError(msg string, fmt string, examples ...string) string { - if len(examples) == 0 { - return msg + " (regex used for validation is '" + fmt + "')" - } - msg += " (e.g. " - for i := range examples { - if i > 0 { - msg += " or " - } - msg += "'" + examples[i] + "', " - } - msg += "regex used for validation is '" + fmt + "')" - return msg -} - -// Resource is a pkger Resource kind. It can be one of any of -// available kinds that are supported. -type Resource map[string]interface{} - -// Name returns the name of the resource. -func (r Resource) Name() string { - return strings.TrimSpace(r.stringShort(fieldName)) -} - -func (r Resource) kind() (Kind, error) { - if k, ok := r[fieldKind].(Kind); ok { - return k, k.OK() - } - - resKind, ok := r.string(fieldKind) - if !ok { - return KindUnknown, errors.New("no kind provided") - } - - k := Kind(resKind) - return k, k.OK() -} - -func (r Resource) chartKind() (chartKind, error) { - ck, _ := r.kind() - chartKind := chartKind(normStr(string(ck))) - if !chartKind.ok() { - return chartKindUnknown, errors.New("invalid chart kind provided: " + string(chartKind)) - } - return chartKind, nil -} - -func (r Resource) bool(key string) (bool, bool) { - b, ok := r[key].(bool) - return b, ok -} - -func (r Resource) boolShort(key string) bool { - b, _ := r.bool(key) - return b -} - -func (r Resource) duration(key string) (time.Duration, bool) { - astDur, err := options.ParseSignedDuration(r.stringShort(key)) - if err != nil { - return time.Duration(0), false - } - - dur, err := ast.DurationFrom(astDur, time.Time{}) - return dur, err == nil -} - -func (r Resource) durationShort(key string) time.Duration { - dur, _ := r.duration(key) - return dur -} - -func (r Resource) float64(key string) (float64, bool) { - f, ok := r[key].(float64) - if ok { - return f, true - } - - i, ok := r[key].(int) - if ok { - return float64(i), true - } - return 0, false -} - -func (r Resource) float64Short(key string) float64 { - f, _ := r.float64(key) - return f -} - -func (r Resource) int(key string) (int, bool) { - i, ok := r[key].(int) - if ok { - return i, true - } - - f, ok := r[key].(float64) - if ok { - return int(f), true - } - return 0, false -} - -func (r Resource) intShort(key string) int { - i, _ := r.int(key) - return i -} - -func (r Resource) references(key string) *references { - v, ok := r[key] - if !ok { - return &references{} - } - return ifaceToReference(v) -} - -func (r Resource) string(key string) (string, bool) { - return ifaceToStr(r[key]) -} - -func (r Resource) stringShort(key string) string { - s, _ := r.string(key) - return s -} - -func (r Resource) slcResource(key string) []Resource { - v, ok := r[key] - if !ok { - return nil - } - - if resources, ok := v.([]Resource); ok { - return resources - } - - iFaceSlc, ok := v.([]interface{}) - if !ok { - return nil - } - - var newResources []Resource - for _, iFace := range iFaceSlc { - r, ok := ifaceToResource(iFace) - if !ok { - continue - } - newResources = append(newResources, r) - } - - return newResources -} - -func (r Resource) slcStr(key string) []string { - v, ok := r[key] - if !ok { - return nil - } - - if strSlc, ok := v.([]string); ok { - return strSlc - } - - iFaceSlc, ok := v.([]interface{}) - if !ok { - return nil - } - - var out []string - for _, iface := range iFaceSlc { - s, ok := ifaceToStr(iface) - if !ok { - continue - } - out = append(out, s) - } - - return out -} - -func (r Resource) mapStrStr(key string) map[string]string { - v, ok := r[key] - if !ok { - return nil - } - - if m, ok := v.(map[string]string); ok { - return m - } - - res, ok := ifaceToResource(v) - if !ok { - return nil - } - - m := make(map[string]string) - for k, v := range res { - s, ok := ifaceToStr(v) - if !ok { - continue - } - m[k] = s - } - return m -} - -func ifaceToResource(i interface{}) (Resource, bool) { - if i == nil { - return nil, false - } - - if res, ok := i.(Resource); ok { - return res, true - } - - if m, ok := i.(map[string]interface{}); ok { - return m, true - } - - m, ok := i.(map[interface{}]interface{}) - if !ok { - return nil, false - } - - newRes := make(Resource) - for k, v := range m { - s, ok := k.(string) - if !ok { - continue - } - newRes[s] = v - } - return newRes, true -} - -func ifaceToReference(i interface{}) *references { - var ref references - for _, f := range []string{fieldReferencesSecret, fieldReferencesEnv} { - resBody, ok := ifaceToResource(i) - if !ok { - continue - } - if keyRes, ok := ifaceToResource(resBody[f]); ok { - switch f { - case fieldReferencesEnv: - ref.EnvRef = keyRes.stringShort(fieldKey) - ref.defaultVal = keyRes[fieldDefault] - case fieldReferencesSecret: - ref.Secret = keyRes.stringShort(fieldKey) - } - } - } - if ref.hasValue() { - return &ref - } - - return &references{val: i} -} - -func ifaceToStr(v interface{}) (string, bool) { - if v == nil { - return "", false - } - - if s, ok := v.(string); ok { - return s, true - } - - if i, ok := v.(int); ok { - return strconv.Itoa(i), true - } - - if f, ok := v.(float64); ok { - return strconv.FormatFloat(f, 'f', -1, 64), true - } - - return "", false -} - -// ParseError is the error from parsing the given package. The ParseError -// behavior provides a list of resources that failed and all validations -// that failed for that resource. A resource can multiple errors, and -// a parseErr can have multiple resources which themselves can have -// multiple validation failures. -type ParseError interface { - ValidationErrs() []ValidationErr -} - -// NewParseError creates a new parse error from existing validation errors. -func NewParseError(errs ...ValidationErr) error { - if len(errs) == 0 { - return nil - } - return &parseErr{rawErrs: errs} -} - -type ( - parseErr struct { - Resources []resourceErr - rawErrs []ValidationErr - } - - // resourceErr describes the error for a particular resource. In - // which it may have numerous validation and association errors. - resourceErr struct { - Kind string - Idx *int - RootErrs []validationErr - AssociationErrs []validationErr - ValidationErrs []validationErr - } - - validationErr struct { - Field string - Msg string - Index *int - - Nested []validationErr - } -) - -// Error implements the error interface. -func (e *parseErr) Error() string { - var ( - errMsg []string - seenErrs = make(map[string]bool) - ) - for _, ve := range append(e.ValidationErrs(), e.rawErrs...) { - msg := ve.Error() - if seenErrs[msg] { - continue - } - seenErrs[msg] = true - errMsg = append(errMsg, ve.Error()) - } - - return strings.Join(errMsg, "\n\t") -} - -func (e *parseErr) ValidationErrs() []ValidationErr { - errs := e.rawErrs[:] - for _, r := range e.Resources { - rootErr := ValidationErr{ - Kind: r.Kind, - } - for _, v := range r.RootErrs { - errs = append(errs, traverseErrs(rootErr, v)...) - } - - rootErr.Indexes = []*int{r.Idx} - rootErr.Fields = []string{"root"} - for _, v := range append(r.ValidationErrs, r.AssociationErrs...) { - errs = append(errs, traverseErrs(rootErr, v)...) - } - } - - // used to provide a means to == or != in the map lookup - // to remove duplicate errors - type key struct { - kind string - fields string - indexes string - reason string - } - - m := make(map[key]bool) - var out []ValidationErr - for _, verr := range errs { - k := key{ - kind: verr.Kind, - fields: strings.Join(verr.Fields, ":"), - reason: verr.Reason, - } - var indexes []string - for _, idx := range verr.Indexes { - if idx == nil { - continue - } - indexes = append(indexes, strconv.Itoa(*idx)) - } - k.indexes = strings.Join(indexes, ":") - if m[k] { - continue - } - m[k] = true - out = append(out, verr) - } - - return out -} - -// ValidationErr represents an error during the parsing of a package. -type ValidationErr struct { - Kind string `json:"kind" yaml:"kind"` - Fields []string `json:"fields" yaml:"fields"` - Indexes []*int `json:"idxs" yaml:"idxs"` - Reason string `json:"reason" yaml:"reason"` -} - -func (v ValidationErr) Error() string { - fieldPairs := make([]string, 0, len(v.Fields)) - for i, idx := range v.Indexes { - field := v.Fields[i] - if idx == nil || *idx == -1 { - fieldPairs = append(fieldPairs, field) - continue - } - fieldPairs = append(fieldPairs, fmt.Sprintf("%s[%d]", field, *idx)) - } - - return fmt.Sprintf("kind=%s field=%s reason=%q", v.Kind, strings.Join(fieldPairs, "."), v.Reason) -} - -func traverseErrs(root ValidationErr, vErr validationErr) []ValidationErr { - root.Fields = append(root.Fields, vErr.Field) - root.Indexes = append(root.Indexes, vErr.Index) - if len(vErr.Nested) == 0 { - root.Reason = vErr.Msg - return []ValidationErr{root} - } - - var errs []ValidationErr - for _, n := range vErr.Nested { - errs = append(errs, traverseErrs(root, n)...) - } - return errs -} - -func (e *parseErr) append(errs ...resourceErr) { - e.Resources = append(e.Resources, errs...) -} - -// IsParseErr inspects a given error to determine if it is -// a parseErr. If a parseErr it is, it will return it along -// with the confirmation boolean. If the error is not a parseErr -// it will return nil values for the parseErr, making it unsafe -// to use. -func IsParseErr(err error) bool { - if _, ok := err.(*parseErr); ok { - return true - } - - iErr, ok := err.(*errors2.Error) - if !ok { - return false - } - return IsParseErr(iErr.Err) -} - -func objectValidationErr(field string, vErrs ...validationErr) validationErr { - return validationErr{ - Field: field, - Nested: vErrs, - } -} - -func normStr(s string) string { - return strings.TrimSpace(strings.ToLower(s)) -} diff --git a/pkger/parser_models.go b/pkger/parser_models.go deleted file mode 100644 index 9661c22c7d3..00000000000 --- a/pkger/parser_models.go +++ /dev/null @@ -1,2655 +0,0 @@ -package pkger - -import ( - "fmt" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - "time" - - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/ast/astutil" - "github.com/influxdata/flux/ast/edit" - "github.com/influxdata/flux/parser" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/notification" - icheck "github.com/influxdata/influxdb/v2/notification/check" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/influxdata/influxdb/v2/notification/rule" - "golang.org/x/text/cases" - "golang.org/x/text/language" -) - -type identity struct { - name *references - displayName *references -} - -func (i *identity) Name() string { - if displayName := i.displayName.String(); displayName != "" { - return displayName - } - return i.name.String() -} - -func (i *identity) MetaName() string { - return i.name.String() -} - -func (i *identity) summarizeReferences() []SummaryReference { - refs := make([]SummaryReference, 0) - if i.name.hasEnvRef() { - refs = append(refs, convertRefToRefSummary("metadata.name", i.name)) - } - if i.displayName.hasEnvRef() { - refs = append(refs, convertRefToRefSummary("spec.name", i.displayName)) - } - return refs -} - -func summarizeCommonReferences(ident identity, labels sortedLabels) []SummaryReference { - return append(ident.summarizeReferences(), labels.summarizeReferences()...) -} - -const ( - fieldAPIVersion = "apiVersion" - fieldAssociations = "associations" - fieldDefault = "default" - fieldDescription = "description" - fieldEvery = "every" - fieldKey = "key" - fieldKind = "kind" - fieldLanguage = "language" - fieldLevel = "level" - fieldMin = "min" - fieldMax = "max" - fieldMetadata = "metadata" - fieldName = "name" - fieldOffset = "offset" - fieldOperator = "operator" - fieldParams = "params" - fieldPrefix = "prefix" - fieldQuery = "query" - fieldSuffix = "suffix" - fieldSpec = "spec" - fieldStatus = "status" - fieldType = "type" - fieldValue = "value" - fieldValues = "values" -) - -const ( - fieldBucketRetentionRules = "retentionRules" -) - -const bucketNameMinLength = 2 - -type bucket struct { - identity - - Description string - RetentionRules retentionRules - - SchemaType string - MeasurementSchemas measurementSchemas - - labels sortedLabels -} - -func (b *bucket) summarize() SummaryBucket { - return SummaryBucket{ - SummaryIdentifier: SummaryIdentifier{ - Kind: KindBucket, - MetaName: b.MetaName(), - EnvReferences: summarizeCommonReferences(b.identity, b.labels), - }, - Name: b.Name(), - Description: b.Description, - SchemaType: b.SchemaType, - MeasurementSchemas: b.MeasurementSchemas.summarize(), - RetentionPeriod: b.RetentionRules.RP(), - LabelAssociations: toSummaryLabels(b.labels...), - } -} - -func (b *bucket) ResourceType() influxdb.ResourceType { - return KindBucket.ResourceType() -} - -func (b *bucket) valid() []validationErr { - var vErrs []validationErr - if err, ok := isValidName(b.Name(), bucketNameMinLength); !ok { - vErrs = append(vErrs, err) - } - vErrs = append(vErrs, b.RetentionRules.valid()...) - vErrs = append(vErrs, b.MeasurementSchemas.valid()...) - if len(vErrs) == 0 { - return nil - } - return []validationErr{ - objectValidationErr(fieldSpec, vErrs...), - } -} - -const ( - retentionRuleTypeExpire = "expire" -) - -type retentionRule struct { - Type string `json:"type" yaml:"type"` - Seconds int `json:"everySeconds" yaml:"everySeconds"` -} - -func newRetentionRule(d time.Duration) retentionRule { - return retentionRule{ - Type: retentionRuleTypeExpire, - Seconds: int(d.Round(time.Second) / time.Second), - } -} - -func (r retentionRule) valid() []validationErr { - const hour = 3600 - var ff []validationErr - if r.Seconds < hour { - ff = append(ff, validationErr{ - Field: fieldRetentionRulesEverySeconds, - Msg: "seconds must be a minimum of " + strconv.Itoa(hour), - }) - } - if r.Type != retentionRuleTypeExpire { - ff = append(ff, validationErr{ - Field: fieldType, - Msg: `type must be "expire"`, - }) - } - return ff -} - -const ( - fieldRetentionRulesEverySeconds = "everySeconds" -) - -type retentionRules []retentionRule - -func (r retentionRules) RP() time.Duration { - // TODO: this feels very odd to me, will need to follow up with - // team to better understand this - for _, rule := range r { - return time.Duration(rule.Seconds) * time.Second - } - return 0 -} - -func (r retentionRules) valid() []validationErr { - var failures []validationErr - for i, rule := range r { - if ff := rule.valid(); len(ff) > 0 { - failures = append(failures, validationErr{ - Field: fieldBucketRetentionRules, - Index: intPtr(i), - Nested: ff, - }) - } - } - return failures -} - -type checkKind int - -const ( - checkKindDeadman checkKind = iota + 1 - checkKindThreshold -) - -const ( - fieldCheckAllValues = "allValues" - fieldCheckReportZero = "reportZero" - fieldCheckStaleTime = "staleTime" - fieldCheckStatusMessageTemplate = "statusMessageTemplate" - fieldCheckTags = "tags" - fieldCheckThresholds = "thresholds" - fieldCheckTimeSince = "timeSince" -) - -const checkNameMinLength = 1 - -type check struct { - identity - - kind checkKind - description string - every time.Duration - level string - offset time.Duration - query string - reportZero bool - staleTime time.Duration - status string - statusMessage string - tags []struct{ k, v string } - timeSince time.Duration - thresholds []threshold - - labels sortedLabels -} - -func (c *check) Labels() []*label { - return c.labels -} - -func (c *check) ResourceType() influxdb.ResourceType { - return KindCheck.ResourceType() -} - -func (c *check) Status() influxdb.Status { - status := influxdb.Status(c.status) - if status == "" { - status = influxdb.Active - } - return status -} - -func (c *check) summarize() SummaryCheck { - base := icheck.Base{ - Name: c.Name(), - Description: c.description, - Every: toNotificationDuration(c.every), - Offset: toNotificationDuration(c.offset), - StatusMessageTemplate: c.statusMessage, - } - base.Query.Text = c.query - for _, tag := range c.tags { - base.Tags = append(base.Tags, influxdb.Tag{Key: tag.k, Value: tag.v}) - } - - sum := SummaryCheck{ - SummaryIdentifier: SummaryIdentifier{ - MetaName: c.MetaName(), - EnvReferences: summarizeCommonReferences(c.identity, c.labels), - }, - Status: c.Status(), - LabelAssociations: toSummaryLabels(c.labels...), - } - switch c.kind { - case checkKindThreshold: - sum.Kind = KindCheckThreshold - sum.Check = &icheck.Threshold{ - Base: base, - Thresholds: toInfluxThresholds(c.thresholds...), - } - case checkKindDeadman: - sum.Kind = KindCheckDeadman - sum.Check = &icheck.Deadman{ - Base: base, - Level: notification.ParseCheckLevel(strings.ToUpper(c.level)), - ReportZero: c.reportZero, - StaleTime: toNotificationDuration(c.staleTime), - TimeSince: toNotificationDuration(c.timeSince), - } - } - return sum -} - -func (c *check) valid() []validationErr { - var vErrs []validationErr - if err, ok := isValidName(c.Name(), checkNameMinLength); !ok { - vErrs = append(vErrs, err) - } - if c.every == 0 { - vErrs = append(vErrs, validationErr{ - Field: fieldEvery, - Msg: "duration value must be provided that is >= 5s (seconds)", - }) - } - if c.query == "" { - vErrs = append(vErrs, validationErr{ - Field: fieldQuery, - Msg: "must provide a non zero value", - }) - } - if c.statusMessage == "" { - vErrs = append(vErrs, validationErr{ - Field: fieldCheckStatusMessageTemplate, - Msg: `must provide a template; ex. "Check: ${ r._check_name } is: ${ r._level }"`, - }) - } - if status := c.Status(); status != influxdb.Active && status != influxdb.Inactive { - vErrs = append(vErrs, validationErr{ - Field: fieldStatus, - Msg: "must be 1 of [active, inactive]", - }) - } - - switch c.kind { - case checkKindThreshold: - if len(c.thresholds) == 0 { - vErrs = append(vErrs, validationErr{ - Field: fieldCheckThresholds, - Msg: "must provide at least 1 threshold entry", - }) - } - for i, th := range c.thresholds { - for _, fail := range th.valid() { - fail.Index = intPtr(i) - vErrs = append(vErrs, fail) - } - } - } - - if len(vErrs) > 0 { - return []validationErr{ - objectValidationErr(fieldSpec, vErrs...), - } - } - - return nil -} - -type thresholdType string - -const ( - thresholdTypeGreater thresholdType = "greater" - thresholdTypeLesser thresholdType = "lesser" - thresholdTypeInsideRange thresholdType = "inside_range" - thresholdTypeOutsideRange thresholdType = "outside_range" -) - -var thresholdTypes = map[thresholdType]bool{ - thresholdTypeGreater: true, - thresholdTypeLesser: true, - thresholdTypeInsideRange: true, - thresholdTypeOutsideRange: true, -} - -type threshold struct { - threshType thresholdType - allVals bool - level string - val float64 - min, max float64 -} - -func (t threshold) valid() []validationErr { - var vErrs []validationErr - if notification.ParseCheckLevel(t.level) == notification.Unknown { - vErrs = append(vErrs, validationErr{ - Field: fieldLevel, - Msg: fmt.Sprintf("must be 1 in [CRIT, WARN, INFO, OK]; got=%q", t.level), - }) - } - if !thresholdTypes[t.threshType] { - vErrs = append(vErrs, validationErr{ - Field: fieldType, - Msg: fmt.Sprintf("must be 1 in [Lesser, Greater, Inside_Range, Outside_Range]; got=%q", t.threshType), - }) - } - if t.min > t.max { - vErrs = append(vErrs, validationErr{ - Field: fieldMin, - Msg: "min must be < max", - }) - } - return vErrs -} - -func toInfluxThresholds(thresholds ...threshold) []icheck.ThresholdConfig { - var iThresh []icheck.ThresholdConfig - for _, th := range thresholds { - base := icheck.ThresholdConfigBase{ - AllValues: th.allVals, - Level: notification.ParseCheckLevel(th.level), - } - switch th.threshType { - case thresholdTypeGreater: - iThresh = append(iThresh, icheck.Greater{ - ThresholdConfigBase: base, - Value: th.val, - }) - case thresholdTypeLesser: - iThresh = append(iThresh, icheck.Lesser{ - ThresholdConfigBase: base, - Value: th.val, - }) - case thresholdTypeInsideRange, thresholdTypeOutsideRange: - iThresh = append(iThresh, icheck.Range{ - ThresholdConfigBase: base, - Max: th.max, - Min: th.min, - Within: th.threshType == thresholdTypeInsideRange, - }) - } - } - return iThresh -} - -// chartKind identifies what kind of chart is eluded too. Each -// chart kind has their own requirements for what constitutes -// a chart. -type chartKind string - -// available chart kinds -const ( - chartKindUnknown chartKind = "" - chartKindGauge chartKind = "gauge" - chartKindGeo chartKind = "geo" - chartKindHeatMap chartKind = "heatmap" - chartKindHistogram chartKind = "histogram" - chartKindMarkdown chartKind = "markdown" - chartKindMosaic chartKind = "mosaic" - chartKindScatter chartKind = "scatter" - chartKindSingleStat chartKind = "single_stat" - chartKindSingleStatPlusLine chartKind = "single_stat_plus_line" - chartKindTable chartKind = "table" - chartKindXY chartKind = "xy" - chartKindBand chartKind = "band" -) - -func (c chartKind) ok() bool { - switch c { - case chartKindGauge, chartKindGeo, chartKindHeatMap, chartKindHistogram, - chartKindMarkdown, chartKindMosaic, chartKindScatter, - chartKindSingleStat, chartKindSingleStatPlusLine, chartKindTable, - chartKindXY, chartKindBand: - return true - default: - return false - } -} - -func (c chartKind) title() string { - spacedKind := strings.ReplaceAll(string(c), "_", " ") - return strings.ReplaceAll(cases.Title(language.Und).String(spacedKind), " ", "_") -} - -const ( - fieldDashCharts = "charts" -) - -const dashboardNameMinLength = 2 - -type dashboard struct { - identity - - Description string - Charts []*chart - - labels sortedLabels -} - -func (d *dashboard) Labels() []*label { - return d.labels -} - -func (d *dashboard) ResourceType() influxdb.ResourceType { - return KindDashboard.ResourceType() -} - -func (d *dashboard) refs() []*references { - var queryRefs []*references - for _, c := range d.Charts { - queryRefs = append(queryRefs, c.Queries.references()...) - } - return append([]*references{d.name, d.displayName}, queryRefs...) -} - -func (d *dashboard) summarize() SummaryDashboard { - sum := SummaryDashboard{ - SummaryIdentifier: SummaryIdentifier{ - Kind: KindDashboard, - MetaName: d.MetaName(), - EnvReferences: summarizeCommonReferences(d.identity, d.labels), - }, - Name: d.Name(), - Description: d.Description, - LabelAssociations: toSummaryLabels(d.labels...), - } - - for chartIdx, c := range d.Charts { - sum.Charts = append(sum.Charts, SummaryChart{ - Properties: c.properties(), - Height: c.Height, - Width: c.Width, - XPosition: c.XPos, - YPosition: c.YPos, - }) - for qIdx, q := range c.Queries { - for _, ref := range q.params { - parts := strings.Split(ref.EnvRef, ".") - field := fmt.Sprintf("spec.charts[%d].queries[%d].params.%s", chartIdx, qIdx, parts[len(parts)-1]) - sum.EnvReferences = append(sum.EnvReferences, convertRefToRefSummary(field, ref)) - } - } - } - sort.Slice(sum.EnvReferences, func(i, j int) bool { - return sum.EnvReferences[i].EnvRefKey < sum.EnvReferences[j].EnvRefKey - }) - return sum -} - -func (d *dashboard) valid() []validationErr { - var vErrs []validationErr - if err, ok := isValidName(d.Name(), dashboardNameMinLength); !ok { - vErrs = append(vErrs, err) - } - if len(vErrs) == 0 { - return nil - } - return []validationErr{ - objectValidationErr(fieldSpec, vErrs...), - } -} - -const ( - fieldChartAxes = "axes" - fieldChartBinCount = "binCount" - fieldChartBinSize = "binSize" - fieldChartColors = "colors" - fieldChartDecimalPlaces = "decimalPlaces" - fieldChartDomain = "domain" - fieldChartFillColumns = "fillColumns" - fieldChartGeom = "geom" - fieldChartHeight = "height" - fieldChartStaticLegend = "staticLegend" - fieldChartNote = "note" - fieldChartNoteOnEmpty = "noteOnEmpty" - fieldChartPosition = "position" - fieldChartQueries = "queries" - fieldChartShade = "shade" - fieldChartHoverDimension = "hoverDimension" - fieldChartFieldOptions = "fieldOptions" - fieldChartTableOptions = "tableOptions" - fieldChartTickPrefix = "tickPrefix" - fieldChartTickSuffix = "tickSuffix" - fieldChartTimeFormat = "timeFormat" - fieldChartYLabelColumnSeparator = "yLabelColumnSeparator" - fieldChartYLabelColumns = "yLabelColumns" - fieldChartYSeriesColumns = "ySeriesColumns" - fieldChartUpperColumn = "upperColumn" - fieldChartMainColumn = "mainColumn" - fieldChartLowerColumn = "lowerColumn" - fieldChartWidth = "width" - fieldChartXCol = "xCol" - fieldChartGenerateXAxisTicks = "generateXAxisTicks" - fieldChartXTotalTicks = "xTotalTicks" - fieldChartXTickStart = "xTickStart" - fieldChartXTickStep = "xTickStep" - fieldChartXPos = "xPos" - fieldChartYCol = "yCol" - fieldChartGenerateYAxisTicks = "generateYAxisTicks" - fieldChartYTotalTicks = "yTotalTicks" - fieldChartYTickStart = "yTickStart" - fieldChartYTickStep = "yTickStep" - fieldChartYPos = "yPos" - fieldChartLegendColorizeRows = "legendColorizeRows" - fieldChartLegendHide = "legendHide" - fieldChartLegendOpacity = "legendOpacity" - fieldChartLegendOrientationThreshold = "legendOrientationThreshold" - fieldChartGeoCenterLon = "lon" - fieldChartGeoCenterLat = "lat" - fieldChartGeoZoom = "zoom" - fieldChartGeoMapStyle = "mapStyle" - fieldChartGeoAllowPanAndZoom = "allowPanAndZoom" - fieldChartGeoDetectCoordinateFields = "detectCoordinateFields" - fieldChartGeoLayers = "geoLayers" -) - -type chart struct { - Kind chartKind - Name string - Prefix string - TickPrefix string - Suffix string - TickSuffix string - Note string - NoteOnEmpty bool - DecimalPlaces int - EnforceDecimals bool - Shade bool - HoverDimension string - StaticLegend StaticLegend - Colors colors - Queries queries - Axes axes - Geom string - YLabelColumnSeparator string - YLabelColumns []string - YSeriesColumns []string - XCol, YCol string - GenerateXAxisTicks []string - GenerateYAxisTicks []string - XTotalTicks, YTotalTicks int - XTickStart, YTickStart float64 - XTickStep, YTickStep float64 - UpperColumn string - MainColumn string - LowerColumn string - XPos, YPos int - Height, Width int - BinSize int - BinCount int - Position string - FieldOptions []fieldOption - FillColumns []string - TableOptions tableOptions - TimeFormat string - LegendColorizeRows bool - LegendHide bool - LegendOpacity float64 - LegendOrientationThreshold int - Zoom float64 - Center center - MapStyle string - AllowPanAndZoom bool - DetectCoordinateFields bool - GeoLayers geoLayers -} - -func (c *chart) properties() influxdb.ViewProperties { - switch c.Kind { - case chartKindGauge: - return influxdb.GaugeViewProperties{ - Type: influxdb.ViewPropertyTypeGauge, - Queries: c.Queries.influxDashQueries(), - Prefix: c.Prefix, - TickPrefix: c.TickPrefix, - Suffix: c.Suffix, - TickSuffix: c.TickSuffix, - ViewColors: c.Colors.influxViewColors(), - DecimalPlaces: influxdb.DecimalPlaces{ - IsEnforced: c.EnforceDecimals, - Digits: int32(c.DecimalPlaces), - }, - Note: c.Note, - ShowNoteWhenEmpty: c.NoteOnEmpty, - } - case chartKindGeo: - return influxdb.GeoViewProperties{ - Type: influxdb.ViewPropertyTypeGeo, - Queries: c.Queries.influxDashQueries(), - Center: influxdb.Datum{Lat: c.Center.Lat, Lon: c.Center.Lon}, - Zoom: c.Zoom, - MapStyle: c.MapStyle, - AllowPanAndZoom: c.AllowPanAndZoom, - DetectCoordinateFields: c.DetectCoordinateFields, - ViewColor: c.Colors.influxViewColors(), - GeoLayers: c.GeoLayers.influxGeoLayers(), - Note: c.Note, - ShowNoteWhenEmpty: c.NoteOnEmpty, - } - case chartKindHeatMap: - return influxdb.HeatmapViewProperties{ - Type: influxdb.ViewPropertyTypeHeatMap, - Queries: c.Queries.influxDashQueries(), - ViewColors: c.Colors.strings(), - BinSize: int32(c.BinSize), - XColumn: c.XCol, - GenerateXAxisTicks: c.GenerateXAxisTicks, - XTotalTicks: c.XTotalTicks, - XTickStart: c.XTickStart, - XTickStep: c.XTickStep, - YColumn: c.YCol, - GenerateYAxisTicks: c.GenerateYAxisTicks, - YTotalTicks: c.YTotalTicks, - YTickStart: c.YTickStart, - YTickStep: c.YTickStep, - XDomain: c.Axes.get("x").Domain, - YDomain: c.Axes.get("y").Domain, - XPrefix: c.Axes.get("x").Prefix, - YPrefix: c.Axes.get("y").Prefix, - XSuffix: c.Axes.get("x").Suffix, - YSuffix: c.Axes.get("y").Suffix, - XAxisLabel: c.Axes.get("x").Label, - YAxisLabel: c.Axes.get("y").Label, - Note: c.Note, - ShowNoteWhenEmpty: c.NoteOnEmpty, - TimeFormat: c.TimeFormat, - LegendColorizeRows: c.LegendColorizeRows, - LegendHide: c.LegendHide, - LegendOpacity: float64(c.LegendOpacity), - LegendOrientationThreshold: int(c.LegendOrientationThreshold), - } - case chartKindHistogram: - return influxdb.HistogramViewProperties{ - Type: influxdb.ViewPropertyTypeHistogram, - Queries: c.Queries.influxDashQueries(), - ViewColors: c.Colors.influxViewColors(), - FillColumns: c.FillColumns, - XColumn: c.XCol, - XDomain: c.Axes.get("x").Domain, - XAxisLabel: c.Axes.get("x").Label, - Position: c.Position, - BinCount: c.BinCount, - Note: c.Note, - ShowNoteWhenEmpty: c.NoteOnEmpty, - LegendColorizeRows: c.LegendColorizeRows, - LegendHide: c.LegendHide, - LegendOpacity: float64(c.LegendOpacity), - LegendOrientationThreshold: int(c.LegendOrientationThreshold), - } - case chartKindMarkdown: - return influxdb.MarkdownViewProperties{ - Type: influxdb.ViewPropertyTypeMarkdown, - Note: c.Note, - } - case chartKindMosaic: - return influxdb.MosaicViewProperties{ - Type: influxdb.ViewPropertyTypeMosaic, - Queries: c.Queries.influxDashQueries(), - ViewColors: c.Colors.strings(), - HoverDimension: c.HoverDimension, - XColumn: c.XCol, - GenerateXAxisTicks: c.GenerateXAxisTicks, - XTotalTicks: c.XTotalTicks, - XTickStart: c.XTickStart, - XTickStep: c.XTickStep, - YLabelColumnSeparator: c.YLabelColumnSeparator, - YLabelColumns: c.YLabelColumns, - YSeriesColumns: c.YSeriesColumns, - XDomain: c.Axes.get("x").Domain, - YDomain: c.Axes.get("y").Domain, - XPrefix: c.Axes.get("x").Prefix, - YPrefix: c.Axes.get("y").Prefix, - XSuffix: c.Axes.get("x").Suffix, - YSuffix: c.Axes.get("y").Suffix, - XAxisLabel: c.Axes.get("x").Label, - YAxisLabel: c.Axes.get("y").Label, - Note: c.Note, - ShowNoteWhenEmpty: c.NoteOnEmpty, - TimeFormat: c.TimeFormat, - LegendColorizeRows: c.LegendColorizeRows, - LegendHide: c.LegendHide, - LegendOpacity: float64(c.LegendOpacity), - LegendOrientationThreshold: int(c.LegendOrientationThreshold), - } - case chartKindBand: - return influxdb.BandViewProperties{ - Type: influxdb.ViewPropertyTypeBand, - Queries: c.Queries.influxDashQueries(), - ViewColors: c.Colors.influxViewColors(), - StaticLegend: c.StaticLegend.influxStaticLegend(), - HoverDimension: c.HoverDimension, - XColumn: c.XCol, - GenerateXAxisTicks: c.GenerateXAxisTicks, - XTotalTicks: c.XTotalTicks, - XTickStart: c.XTickStart, - XTickStep: c.XTickStep, - YColumn: c.YCol, - GenerateYAxisTicks: c.GenerateYAxisTicks, - YTotalTicks: c.YTotalTicks, - YTickStart: c.YTickStart, - YTickStep: c.YTickStep, - UpperColumn: c.UpperColumn, - MainColumn: c.MainColumn, - LowerColumn: c.LowerColumn, - Axes: c.Axes.influxAxes(), - Geom: c.Geom, - Note: c.Note, - ShowNoteWhenEmpty: c.NoteOnEmpty, - TimeFormat: c.TimeFormat, - LegendColorizeRows: c.LegendColorizeRows, - LegendHide: c.LegendHide, - LegendOpacity: float64(c.LegendOpacity), - LegendOrientationThreshold: int(c.LegendOrientationThreshold), - } - case chartKindScatter: - return influxdb.ScatterViewProperties{ - Type: influxdb.ViewPropertyTypeScatter, - Queries: c.Queries.influxDashQueries(), - ViewColors: c.Colors.strings(), - XColumn: c.XCol, - GenerateXAxisTicks: c.GenerateXAxisTicks, - XTotalTicks: c.XTotalTicks, - XTickStart: c.XTickStart, - XTickStep: c.XTickStep, - YColumn: c.YCol, - GenerateYAxisTicks: c.GenerateYAxisTicks, - YTotalTicks: c.YTotalTicks, - YTickStart: c.YTickStart, - YTickStep: c.YTickStep, - XDomain: c.Axes.get("x").Domain, - YDomain: c.Axes.get("y").Domain, - XPrefix: c.Axes.get("x").Prefix, - YPrefix: c.Axes.get("y").Prefix, - XSuffix: c.Axes.get("x").Suffix, - YSuffix: c.Axes.get("y").Suffix, - XAxisLabel: c.Axes.get("x").Label, - YAxisLabel: c.Axes.get("y").Label, - Note: c.Note, - ShowNoteWhenEmpty: c.NoteOnEmpty, - TimeFormat: c.TimeFormat, - LegendColorizeRows: c.LegendColorizeRows, - LegendHide: c.LegendHide, - LegendOpacity: float64(c.LegendOpacity), - LegendOrientationThreshold: int(c.LegendOrientationThreshold), - } - case chartKindSingleStat: - return influxdb.SingleStatViewProperties{ - Type: influxdb.ViewPropertyTypeSingleStat, - Prefix: c.Prefix, - TickPrefix: c.TickPrefix, - Suffix: c.Suffix, - TickSuffix: c.TickSuffix, - DecimalPlaces: influxdb.DecimalPlaces{ - IsEnforced: c.EnforceDecimals, - Digits: int32(c.DecimalPlaces), - }, - Note: c.Note, - ShowNoteWhenEmpty: c.NoteOnEmpty, - Queries: c.Queries.influxDashQueries(), - ViewColors: c.Colors.influxViewColors(), - } - case chartKindSingleStatPlusLine: - return influxdb.LinePlusSingleStatProperties{ - Type: influxdb.ViewPropertyTypeSingleStatPlusLine, - Prefix: c.Prefix, - Suffix: c.Suffix, - DecimalPlaces: influxdb.DecimalPlaces{ - IsEnforced: c.EnforceDecimals, - Digits: int32(c.DecimalPlaces), - }, - Note: c.Note, - ShowNoteWhenEmpty: c.NoteOnEmpty, - XColumn: c.XCol, - GenerateXAxisTicks: c.GenerateXAxisTicks, - XTotalTicks: c.XTotalTicks, - XTickStart: c.XTickStart, - XTickStep: c.XTickStep, - YColumn: c.YCol, - GenerateYAxisTicks: c.GenerateYAxisTicks, - YTotalTicks: c.YTotalTicks, - YTickStart: c.YTickStart, - YTickStep: c.YTickStep, - ShadeBelow: c.Shade, - HoverDimension: c.HoverDimension, - StaticLegend: c.StaticLegend.influxStaticLegend(), - Queries: c.Queries.influxDashQueries(), - ViewColors: c.Colors.influxViewColors(), - Axes: c.Axes.influxAxes(), - Position: c.Position, - LegendColorizeRows: c.LegendColorizeRows, - LegendHide: c.LegendHide, - LegendOpacity: float64(c.LegendOpacity), - LegendOrientationThreshold: int(c.LegendOrientationThreshold), - } - case chartKindTable: - fieldOptions := make([]influxdb.RenamableField, 0, len(c.FieldOptions)) - for _, fieldOpt := range c.FieldOptions { - fieldOptions = append(fieldOptions, influxdb.RenamableField{ - InternalName: fieldOpt.FieldName, - DisplayName: fieldOpt.DisplayName, - Visible: fieldOpt.Visible, - }) - } - - return influxdb.TableViewProperties{ - Type: influxdb.ViewPropertyTypeTable, - Note: c.Note, - ShowNoteWhenEmpty: c.NoteOnEmpty, - DecimalPlaces: influxdb.DecimalPlaces{ - IsEnforced: c.EnforceDecimals, - Digits: int32(c.DecimalPlaces), - }, - Queries: c.Queries.influxDashQueries(), - ViewColors: c.Colors.influxViewColors(), - TableOptions: influxdb.TableOptions{ - VerticalTimeAxis: c.TableOptions.VerticalTimeAxis, - SortBy: influxdb.RenamableField{ - InternalName: c.TableOptions.SortByField, - }, - Wrapping: c.TableOptions.Wrapping, - FixFirstColumn: c.TableOptions.FixFirstColumn, - }, - FieldOptions: fieldOptions, - TimeFormat: c.TimeFormat, - } - case chartKindXY: - return influxdb.XYViewProperties{ - Type: influxdb.ViewPropertyTypeXY, - Note: c.Note, - ShowNoteWhenEmpty: c.NoteOnEmpty, - XColumn: c.XCol, - GenerateXAxisTicks: c.GenerateXAxisTicks, - XTotalTicks: c.XTotalTicks, - XTickStart: c.XTickStart, - XTickStep: c.XTickStep, - YColumn: c.YCol, - GenerateYAxisTicks: c.GenerateYAxisTicks, - YTotalTicks: c.YTotalTicks, - YTickStart: c.YTickStart, - YTickStep: c.YTickStep, - ShadeBelow: c.Shade, - HoverDimension: c.HoverDimension, - StaticLegend: c.StaticLegend.influxStaticLegend(), - Queries: c.Queries.influxDashQueries(), - ViewColors: c.Colors.influxViewColors(), - Axes: c.Axes.influxAxes(), - Geom: c.Geom, - Position: c.Position, - TimeFormat: c.TimeFormat, - LegendColorizeRows: c.LegendColorizeRows, - LegendHide: c.LegendHide, - LegendOpacity: float64(c.LegendOpacity), - LegendOrientationThreshold: int(c.LegendOrientationThreshold), - } - default: - return nil - } -} - -func (c *chart) validProperties() []validationErr { - if c.Kind == chartKindMarkdown { - // at the time of writing, there's nothing to validate for markdown types - return nil - } - - var fails []validationErr - - validatorFns := []func() []validationErr{ - c.validBaseProps, - c.Colors.valid, - } - for _, validatorFn := range validatorFns { - fails = append(fails, validatorFn()...) - } - - // chart kind specific validations - switch c.Kind { - case chartKindGauge: - fails = append(fails, c.Colors.hasTypes(colorTypeMin, colorTypeMax)...) - case chartKindHeatMap: - fails = append(fails, c.Axes.hasAxes("x", "y")...) - case chartKindHistogram: - fails = append(fails, c.Axes.hasAxes("x")...) - case chartKindScatter: - fails = append(fails, c.Axes.hasAxes("x", "y")...) - case chartKindSingleStat: - case chartKindSingleStatPlusLine: - fails = append(fails, c.Axes.hasAxes("x", "y")...) - fails = append(fails, validPosition(c.Position)...) - case chartKindTable: - fails = append(fails, validTableOptions(c.TableOptions)...) - case chartKindXY: - fails = append(fails, validGeometry(c.Geom)...) - fails = append(fails, c.Axes.hasAxes("x", "y")...) - fails = append(fails, validPosition(c.Position)...) - } - - return fails -} - -func validPosition(pos string) []validationErr { - pos = strings.ToLower(pos) - if pos != "" && pos != "overlaid" && pos != "stacked" { - return []validationErr{{ - Field: fieldChartPosition, - Msg: fmt.Sprintf("invalid position supplied %q; valid positions is one of [overlaid, stacked]", pos), - }} - } - return nil -} - -func (c *chart) validBaseProps() []validationErr { - var fails []validationErr - if c.Width <= 0 { - fails = append(fails, validationErr{ - Field: fieldChartWidth, - Msg: "must be greater than 0", - }) - } - - if c.Height <= 0 { - fails = append(fails, validationErr{ - Field: fieldChartHeight, - Msg: "must be greater than 0", - }) - } - return fails -} - -var geometryTypes = map[string]bool{ - "line": true, - "step": true, - "stacked": true, - "monotoneX": true, - "bar": true, -} - -func validGeometry(geom string) []validationErr { - if !geometryTypes[geom] { - msg := "type not found" - if geom != "" { - msg = "type provided is not supported" - } - return []validationErr{{ - Field: fieldChartGeom, - Msg: fmt.Sprintf("%s: %q", msg, geom), - }} - } - - return nil -} - -const ( - fieldChartFieldOptionDisplayName = "displayName" - fieldChartFieldOptionFieldName = "fieldName" - fieldChartFieldOptionVisible = "visible" -) - -type fieldOption struct { - FieldName string - DisplayName string - Visible bool -} - -type center struct { - Lat float64 - Lon float64 -} - -type geoLayer struct { - Type string - RadiusField string - ColorField string - IntensityField string - ViewColors colors - Radius int32 - Blur int32 - RadiusDimension *axis - ColorDimension *axis - IntensityDimension *axis - InterpolateColors bool - TrackWidth int32 - Speed int32 - RandomColors bool - IsClustered bool -} - -const ( - fieldChartGeoLayerType = "layerType" - fieldChartGeoLayerRadiusField = "radiusField" - fieldChartGeoLayerIntensityField = "intensityField" - fieldChartGeoLayerColorField = "colorField" - fieldChartGeoLayerViewColors = "viewColors" - fieldChartGeoLayerRadius = "radius" - fieldChartGeoLayerBlur = "blur" - fieldChartGeoLayerRadiusDimension = "radiusDimension" - fieldChartGeoLayerColorDimension = "colorDimension" - fieldChartGeoLayerIntensityDimension = "intensityDimension" - fieldChartGeoLayerInterpolateColors = "interpolateColors" - fieldChartGeoLayerTrackWidth = "trackWidth" - fieldChartGeoLayerSpeed = "speed" - fieldChartGeoLayerRandomColors = "randomColors" - fieldChartGeoLayerIsClustered = "isClustered" -) - -type geoLayers []*geoLayer - -func (l geoLayers) influxGeoLayers() []influxdb.GeoLayer { - var iGeoLayers []influxdb.GeoLayer - for _, ll := range l { - geoLayer := influxdb.GeoLayer{ - Type: ll.Type, - RadiusField: ll.RadiusField, - ColorField: ll.ColorField, - IntensityField: ll.IntensityField, - Radius: ll.Radius, - Blur: ll.Blur, - InterpolateColors: ll.InterpolateColors, - TrackWidth: ll.TrackWidth, - Speed: ll.Speed, - RandomColors: ll.RandomColors, - IsClustered: ll.IsClustered, - } - if ll.RadiusDimension != nil { - geoLayer.RadiusDimension = influxAxis(*ll.RadiusDimension, true) - } - if ll.ColorDimension != nil { - geoLayer.ColorDimension = influxAxis(*ll.ColorDimension, true) - } - if ll.IntensityDimension != nil { - geoLayer.IntensityDimension = influxAxis(*ll.IntensityDimension, true) - } - if ll.ViewColors != nil { - geoLayer.ViewColors = ll.ViewColors.influxViewColors() - } - iGeoLayers = append(iGeoLayers, geoLayer) - } - return iGeoLayers -} - -const ( - fieldChartTableOptionVerticalTimeAxis = "verticalTimeAxis" - fieldChartTableOptionSortBy = "sortBy" - fieldChartTableOptionWrapping = "wrapping" - fieldChartTableOptionFixFirstColumn = "fixFirstColumn" -) - -type tableOptions struct { - VerticalTimeAxis bool - SortByField string - Wrapping string - FixFirstColumn bool -} - -func validTableOptions(opts tableOptions) []validationErr { - var fails []validationErr - - switch opts.Wrapping { - case "", "single-line", "truncate", "wrap": - default: - fails = append(fails, validationErr{ - Field: fieldChartTableOptionWrapping, - Msg: `chart table option should 1 in ["single-line", "truncate", "wrap"]`, - }) - } - - if len(fails) == 0 { - return nil - } - - return []validationErr{ - { - Field: fieldChartTableOptions, - Nested: fails, - }, - } -} - -const ( - colorTypeBackground = "background" - colorTypeMin = "min" - colorTypeMax = "max" - colorTypeScale = "scale" - colorTypeText = "text" - colorTypeThreshold = "threshold" -) - -const ( - fieldColorHex = "hex" -) - -type color struct { - ID string `json:"id,omitempty" yaml:"id,omitempty"` - Name string `json:"name,omitempty" yaml:"name,omitempty"` - Type string `json:"type,omitempty" yaml:"type,omitempty"` - Hex string `json:"hex,omitempty" yaml:"hex,omitempty"` - // using reference for Value here so we can set to nil and - // it will be ignored during encoding, keeps our exported pkgs - // clear of unneeded entries. - Value *float64 `json:"value,omitempty" yaml:"value,omitempty"` -} - -// TODO: -// - verify templates are desired -// - template colors so references can be shared -type colors []*color - -func (c colors) influxViewColors() []influxdb.ViewColor { - ptrToFloat64 := func(f *float64) float64 { - if f == nil { - return 0 - } - return *f - } - - var iColors []influxdb.ViewColor - for _, cc := range c { - iColors = append(iColors, influxdb.ViewColor{ - ID: cc.ID, - Type: cc.Type, - Hex: cc.Hex, - Name: cc.Name, - Value: ptrToFloat64(cc.Value), - }) - } - return iColors -} - -func (c colors) strings() []string { - clrs := []string{} - - for _, clr := range c { - clrs = append(clrs, clr.Hex) - } - - return clrs -} - -// TODO: looks like much of these are actually getting defaults in -// -// the UI. looking at system charts, seeing lots of failures for missing -// color types or no colors at all. -func (c colors) hasTypes(types ...string) []validationErr { - tMap := make(map[string]bool) - for _, cc := range c { - tMap[cc.Type] = true - } - - var failures []validationErr - for _, t := range types { - if !tMap[t] { - failures = append(failures, validationErr{ - Field: "colors", - Msg: fmt.Sprintf("type not found: %q", t), - }) - } - } - - return failures -} - -func (c colors) valid() []validationErr { - var fails []validationErr - for i, cc := range c { - cErr := validationErr{ - Field: fieldChartColors, - Index: intPtr(i), - } - if cc.Hex == "" { - cErr.Nested = append(cErr.Nested, validationErr{ - Field: fieldColorHex, - Msg: "a color must have a hex value provided", - }) - } - if len(cErr.Nested) > 0 { - fails = append(fails, cErr) - } - } - - return fails -} - -type query struct { - Query string `json:"query" yaml:"query"` - params []*references - task []*references -} - -func (q query) DashboardQuery() string { - if len(q.params) == 0 && len(q.task) == 0 { - return q.Query - } - - files := parser.ParseSource(q.Query).Files - if len(files) != 1 { - return q.Query - } - - paramsOpt, paramsErr := edit.GetOption(files[0], "params") - taskOpt, taskErr := edit.GetOption(files[0], "task") - if taskErr != nil && paramsErr != nil { - return q.Query - } - - if paramsErr == nil { - obj, ok := paramsOpt.(*ast.ObjectExpression) - if ok { - for _, ref := range q.params { - parts := strings.Split(ref.EnvRef, ".") - key := parts[len(parts)-1] - edit.SetProperty(obj, key, ref.expression()) - } - - edit.SetOption(files[0], "params", obj) - } - } - - if taskErr == nil { - tobj, ok := taskOpt.(*ast.ObjectExpression) - if ok { - for _, ref := range q.task { - parts := strings.Split(ref.EnvRef, ".") - key := parts[len(parts)-1] - edit.SetProperty(tobj, key, ref.expression()) - } - - edit.SetOption(files[0], "task", tobj) - } - } - // TODO(danmoran): I'm not happy about ignoring this error, but pkger doesn't have adequate error return values - // in the callstack. In most cases errors are simply ignored and the desired output of the operation is skipped. - // If I were to change the contract here, a lot of other things would need to be changed. - s, _ := astutil.Format(files[0]) - return s -} - -type queries []query - -func (q queries) influxDashQueries() []influxdb.DashboardQuery { - var iQueries []influxdb.DashboardQuery - for _, qq := range q { - iQueries = append(iQueries, influxdb.DashboardQuery{ - Text: qq.DashboardQuery(), - EditMode: "advanced", - }) - } - return iQueries -} - -func (q queries) references() []*references { - var refs []*references - for _, qq := range q { - refs = append(refs, qq.params...) - } - return refs -} - -const ( - fieldAxisBase = "base" - fieldAxisLabel = "label" - fieldAxisScale = "scale" -) - -type axis struct { - Base string `json:"base,omitempty" yaml:"base,omitempty"` - Label string `json:"label,omitempty" yaml:"label,omitempty"` - Name string `json:"name,omitempty" yaml:"name,omitempty"` - Prefix string `json:"prefix,omitempty" yaml:"prefix,omitempty"` - Scale string `json:"scale,omitempty" yaml:"scale,omitempty"` - Suffix string `json:"suffix,omitempty" yaml:"suffix,omitempty"` - Domain []float64 `json:"domain,omitempty" yaml:"domain,omitempty"` -} - -type axes []axis - -func (a axes) get(name string) axis { - for _, ax := range a { - if name == ax.Name { - return ax - } - } - return axis{} -} - -func influxAxis(ax axis, nilBounds bool) influxdb.Axis { - bounds := []string{} - if nilBounds { - bounds = nil - } - return influxdb.Axis{ - Bounds: bounds, - Label: ax.Label, - Prefix: ax.Prefix, - Suffix: ax.Suffix, - Base: ax.Base, - Scale: ax.Scale, - } -} - -func (a axes) influxAxes() map[string]influxdb.Axis { - m := make(map[string]influxdb.Axis) - for _, ax := range a { - m[ax.Name] = influxAxis(ax, false) - } - return m -} - -func (a axes) hasAxes(expectedAxes ...string) []validationErr { - mAxes := make(map[string]bool) - for _, ax := range a { - mAxes[ax.Name] = true - } - - var failures []validationErr - for _, expected := range expectedAxes { - if !mAxes[expected] { - failures = append(failures, validationErr{ - Field: fieldChartAxes, - Msg: fmt.Sprintf("axis not found: %q", expected), - }) - } - } - - return failures -} - -type StaticLegend struct { - ColorizeRows bool `json:"colorizeRows,omitempty" yaml:"colorizeRows,omitempty"` - HeightRatio float64 `json:"heightRatio,omitempty" yaml:"heightRatio,omitempty"` - Show bool `json:"show,omitempty" yaml:"show,omitempty"` - Opacity float64 `json:"opacity,omitempty" yaml:"opacity,omitempty"` - OrientationThreshold int `json:"orientationThreshold,omitempty" yaml:"orientationThreshold,omitempty"` - ValueAxis string `json:"valueAxis,omitempty" yaml:"valueAxis,omitempty"` - WidthRatio float64 `json:"widthRatio,omitempty" yaml:"widthRatio,omitempty"` -} - -const ( - fieldChartStaticLegendColorizeRows = "colorizeRows" - fieldChartStaticLegendHeightRatio = "heightRatio" - fieldChartStaticLegendShow = "show" - fieldChartStaticLegendOpacity = "opacity" - fieldChartStaticLegendOrientationThreshold = "orientationThreshold" - fieldChartStaticLegendValueAxis = "valueAxis" - fieldChartStaticLegendWidthRatio = "widthRatio" -) - -func (sl StaticLegend) influxStaticLegend() influxdb.StaticLegend { - return influxdb.StaticLegend{ - ColorizeRows: sl.ColorizeRows, - HeightRatio: sl.HeightRatio, - Show: sl.Show, - Opacity: sl.Opacity, - OrientationThreshold: sl.OrientationThreshold, - ValueAxis: sl.ValueAxis, - WidthRatio: sl.WidthRatio, - } -} - -type assocMapKey struct { - resType influxdb.ResourceType - name string -} - -type assocMapVal struct { - exists bool - v interface{} -} - -func (l assocMapVal) PkgName() string { - t, ok := l.v.(interface{ MetaName() string }) - if ok { - return t.MetaName() - } - return "" -} - -type associationMapping struct { - mappings map[assocMapKey][]assocMapVal -} - -func (l *associationMapping) setMapping(v interface { - ResourceType() influxdb.ResourceType - Name() string -}, exists bool) { - if l == nil { - return - } - if l.mappings == nil { - l.mappings = make(map[assocMapKey][]assocMapVal) - } - - k := assocMapKey{ - resType: v.ResourceType(), - name: v.Name(), - } - val := assocMapVal{ - exists: exists, - v: v, - } - existing, ok := l.mappings[k] - if !ok { - l.mappings[k] = []assocMapVal{val} - return - } - for i, ex := range existing { - if ex.v == v { - existing[i].exists = exists - return - } - } - l.mappings[k] = append(l.mappings[k], val) -} - -const ( - fieldLabelColor = "color" -) - -const labelNameMinLength = 2 - -type label struct { - identity - - Color string - Description string - associationMapping -} - -func (l *label) summarize() SummaryLabel { - return SummaryLabel{ - SummaryIdentifier: SummaryIdentifier{ - Kind: KindLabel, - MetaName: l.MetaName(), - EnvReferences: l.identity.summarizeReferences(), - }, - Name: l.Name(), - Properties: struct { - Color string `json:"color"` - Description string `json:"description"` - }{ - Color: l.Color, - Description: l.Description, - }, - } -} - -func (l *label) mappingSummary() []SummaryLabelMapping { - var mappings []SummaryLabelMapping - for resource, vals := range l.mappings { - for _, v := range vals { - status := StateStatusNew - if v.exists { - status = StateStatusExists - } - mappings = append(mappings, SummaryLabelMapping{ - exists: v.exists, - Status: status, - ResourceMetaName: v.PkgName(), - ResourceName: resource.name, - ResourceType: resource.resType, - LabelMetaName: l.MetaName(), - LabelName: l.Name(), - }) - } - } - - return mappings -} - -func (l *label) valid() []validationErr { - var vErrs []validationErr - if err, ok := isValidName(l.Name(), labelNameMinLength); !ok { - vErrs = append(vErrs, err) - } - if len(vErrs) == 0 { - return nil - } - return []validationErr{ - objectValidationErr(fieldSpec, vErrs...), - } -} - -func toSummaryLabels(labels ...*label) []SummaryLabel { - iLabels := make([]SummaryLabel, 0, len(labels)) - for _, l := range labels { - iLabels = append(iLabels, l.summarize()) - } - return iLabels -} - -type sortedLabels []*label - -func (s sortedLabels) summarizeReferences() []SummaryReference { - refs := make([]SummaryReference, 0) - for i, l := range s { - if !l.name.hasEnvRef() { - continue - } - field := fmt.Sprintf("spec.%s[%d].name", fieldAssociations, i) - refs = append(refs, convertRefToRefSummary(field, l.name)) - } - return refs -} - -func (s sortedLabels) Len() int { - return len(s) -} - -func (s sortedLabels) Less(i, j int) bool { - return s[i].MetaName() < s[j].MetaName() -} - -func (s sortedLabels) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -type notificationEndpointKind int - -const ( - notificationKindHTTP notificationEndpointKind = iota + 1 - notificationKindPagerDuty - notificationKindSlack -) - -func (n notificationEndpointKind) String() string { - if n > 0 && n < 4 { - return [...]string{ - endpoint.HTTPType, - endpoint.PagerDutyType, - endpoint.SlackType, - }[n-1] - } - return "" -} - -const ( - notificationHTTPAuthTypeBasic = "basic" - notificationHTTPAuthTypeBearer = "bearer" - notificationHTTPAuthTypeNone = "none" -) - -const ( - fieldNotificationEndpointHTTPMethod = "method" - fieldNotificationEndpointPassword = "password" - fieldNotificationEndpointRoutingKey = "routingKey" - fieldNotificationEndpointToken = "token" - fieldNotificationEndpointURL = "url" - fieldNotificationEndpointUsername = "username" -) - -type notificationEndpoint struct { - identity - - kind notificationEndpointKind - description string - method string - password *references - routingKey *references - status string - token *references - httpType string - url string - username *references - - labels sortedLabels -} - -func (n *notificationEndpoint) Labels() []*label { - return n.labels -} - -func (n *notificationEndpoint) ResourceType() influxdb.ResourceType { - return KindNotificationEndpointSlack.ResourceType() -} - -func (n *notificationEndpoint) base() endpoint.Base { - return endpoint.Base{ - Name: n.Name(), - Description: n.description, - Status: n.influxStatus(), - } -} - -func (n *notificationEndpoint) summarize() SummaryNotificationEndpoint { - base := n.base() - sum := SummaryNotificationEndpoint{ - SummaryIdentifier: SummaryIdentifier{ - MetaName: n.MetaName(), - EnvReferences: summarizeCommonReferences(n.identity, n.labels), - }, - LabelAssociations: toSummaryLabels(n.labels...), - } - - switch n.kind { - case notificationKindHTTP: - sum.Kind = KindNotificationEndpointHTTP - e := &endpoint.HTTP{ - Base: base, - URL: n.url, - Method: n.method, - } - switch n.httpType { - case notificationHTTPAuthTypeBasic: - e.AuthMethod = notificationHTTPAuthTypeBasic - e.Password = n.password.SecretField() - e.Username = n.username.SecretField() - case notificationHTTPAuthTypeBearer: - e.AuthMethod = notificationHTTPAuthTypeBearer - e.Token = n.token.SecretField() - case notificationHTTPAuthTypeNone: - e.AuthMethod = notificationHTTPAuthTypeNone - } - sum.NotificationEndpoint = e - case notificationKindPagerDuty: - sum.Kind = KindNotificationEndpointPagerDuty - sum.NotificationEndpoint = &endpoint.PagerDuty{ - Base: base, - ClientURL: n.url, - RoutingKey: n.routingKey.SecretField(), - } - case notificationKindSlack: - sum.Kind = KindNotificationEndpointSlack - sum.NotificationEndpoint = &endpoint.Slack{ - Base: base, - URL: n.url, - Token: n.token.SecretField(), - } - } - return sum -} - -func (n *notificationEndpoint) influxStatus() influxdb.Status { - status := influxdb.Active - if n.status != "" { - status = influxdb.Status(n.status) - } - return status -} - -var validEndpointHTTPMethods = map[string]bool{ - "DELETE": true, - "GET": true, - "HEAD": true, - "OPTIONS": true, - "PATCH": true, - "POST": true, - "PUT": true, -} - -func (n *notificationEndpoint) valid() []validationErr { - var failures []validationErr - if err, ok := isValidName(n.Name(), 1); !ok { - failures = append(failures, err) - } - - if _, err := url.Parse(n.url); err != nil || n.url == "" { - failures = append(failures, validationErr{ - Field: fieldNotificationEndpointURL, - Msg: "must be valid url", - }) - } - - status := influxdb.Status(n.status) - if status != "" && influxdb.Inactive != status && influxdb.Active != status { - failures = append(failures, validationErr{ - Field: fieldStatus, - Msg: "not a valid status; valid statues are one of [active, inactive]", - }) - } - - switch n.kind { - case notificationKindPagerDuty: - if !n.routingKey.hasValue() { - failures = append(failures, validationErr{ - Field: fieldNotificationEndpointRoutingKey, - Msg: "must be provide", - }) - } - case notificationKindHTTP: - if !validEndpointHTTPMethods[n.method] { - failures = append(failures, validationErr{ - Field: fieldNotificationEndpointHTTPMethod, - Msg: "http method must be a valid HTTP verb", - }) - } - - switch n.httpType { - case notificationHTTPAuthTypeBasic: - if !n.password.hasValue() { - failures = append(failures, validationErr{ - Field: fieldNotificationEndpointPassword, - Msg: "must provide non empty string", - }) - } - if !n.username.hasValue() { - failures = append(failures, validationErr{ - Field: fieldNotificationEndpointUsername, - Msg: "must provide non empty string", - }) - } - case notificationHTTPAuthTypeBearer: - if !n.token.hasValue() { - failures = append(failures, validationErr{ - Field: fieldNotificationEndpointToken, - Msg: "must provide non empty string", - }) - } - case notificationHTTPAuthTypeNone: - default: - failures = append(failures, validationErr{ - Field: fieldType, - Msg: fmt.Sprintf( - "invalid type provided %q; valid type is 1 in [%s, %s, %s]", - n.httpType, - notificationHTTPAuthTypeBasic, - notificationHTTPAuthTypeBearer, - notificationHTTPAuthTypeNone, - ), - }) - } - } - - if len(failures) > 0 { - return []validationErr{ - objectValidationErr(fieldSpec, failures...), - } - } - - return nil -} - -const ( - fieldNotificationRuleChannel = "channel" - fieldNotificationRuleCurrentLevel = "currentLevel" - fieldNotificationRuleEndpointName = "endpointName" - fieldNotificationRuleMessageTemplate = "messageTemplate" - fieldNotificationRulePreviousLevel = "previousLevel" - fieldNotificationRuleStatusRules = "statusRules" - fieldNotificationRuleTagRules = "tagRules" -) - -type notificationRule struct { - identity - - channel string - description string - every time.Duration - msgTemplate string - offset time.Duration - status string - statusRules []struct{ curLvl, prevLvl string } - tagRules []struct{ k, v, op string } - - associatedEndpoint *notificationEndpoint - endpointName *references - - labels sortedLabels -} - -func (r *notificationRule) Labels() []*label { - return r.labels -} - -func (r *notificationRule) ResourceType() influxdb.ResourceType { - return KindNotificationRule.ResourceType() -} - -func (r *notificationRule) Status() influxdb.Status { - if r.status == "" { - return influxdb.Active - } - return influxdb.Status(r.status) -} - -func (r *notificationRule) endpointMetaName() string { - if r.associatedEndpoint != nil { - return r.associatedEndpoint.MetaName() - } - return "" -} - -func (r *notificationRule) summarize() SummaryNotificationRule { - var endpointPkgName, endpointType string - if r.associatedEndpoint != nil { - endpointPkgName = r.associatedEndpoint.MetaName() - endpointType = r.associatedEndpoint.kind.String() - } - - envRefs := summarizeCommonReferences(r.identity, r.labels) - if r.endpointName.hasEnvRef() { - envRefs = append(envRefs, convertRefToRefSummary("spec.endpointName", r.endpointName)) - } - - return SummaryNotificationRule{ - SummaryIdentifier: SummaryIdentifier{ - Kind: KindNotificationRule, - MetaName: r.MetaName(), - EnvReferences: envRefs, - }, - Name: r.Name(), - EndpointMetaName: endpointPkgName, - EndpointType: endpointType, - Description: r.description, - Every: r.every.String(), - LabelAssociations: toSummaryLabels(r.labels...), - Offset: r.offset.String(), - MessageTemplate: r.msgTemplate, - Status: r.Status(), - StatusRules: toSummaryStatusRules(r.statusRules), - TagRules: toSummaryTagRules(r.tagRules), - } -} - -func (r *notificationRule) toInfluxRule() influxdb.NotificationRule { - base := rule.Base{ - Name: r.Name(), - Description: r.description, - Every: toNotificationDuration(r.every), - Offset: toNotificationDuration(r.offset), - } - for _, sr := range r.statusRules { - var prevLvl *notification.CheckLevel - if lvl := notification.ParseCheckLevel(sr.prevLvl); lvl != notification.Unknown { - prevLvl = &lvl - } - base.StatusRules = append(base.StatusRules, notification.StatusRule{ - CurrentLevel: notification.ParseCheckLevel(sr.curLvl), - PreviousLevel: prevLvl, - }) - } - for _, tr := range r.tagRules { - op, _ := influxdb.ToOperator(tr.op) - base.TagRules = append(base.TagRules, notification.TagRule{ - Tag: influxdb.Tag{ - Key: tr.k, - Value: tr.v, - }, - Operator: op, - }) - } - - switch r.associatedEndpoint.kind { - case notificationKindHTTP: - return &rule.HTTP{Base: base} - case notificationKindPagerDuty: - return &rule.PagerDuty{ - Base: base, - MessageTemplate: r.msgTemplate, - } - case notificationKindSlack: - return &rule.Slack{ - Base: base, - Channel: r.channel, - MessageTemplate: r.msgTemplate, - } - } - return nil -} - -func (r *notificationRule) valid() []validationErr { - var vErrs []validationErr - if err, ok := isValidName(r.Name(), 1); !ok { - vErrs = append(vErrs, err) - } - if !r.endpointName.hasValue() { - vErrs = append(vErrs, validationErr{ - Field: fieldNotificationRuleEndpointName, - Msg: "must be provided", - }) - } else if r.associatedEndpoint == nil { - vErrs = append(vErrs, validationErr{ - Field: fieldNotificationRuleEndpointName, - Msg: fmt.Sprintf("notification endpoint %q does not exist in pkg", r.endpointName.String()), - }) - } - - if r.every == 0 { - vErrs = append(vErrs, validationErr{ - Field: fieldEvery, - Msg: "must be provided", - }) - } - if status := r.Status(); status != influxdb.Active && status != influxdb.Inactive { - vErrs = append(vErrs, validationErr{ - Field: fieldStatus, - Msg: fmt.Sprintf("must be 1 in [active, inactive]; got=%q", r.status), - }) - } - - if len(r.statusRules) == 0 { - vErrs = append(vErrs, validationErr{ - Field: fieldNotificationRuleStatusRules, - Msg: "must provide at least 1", - }) - } - - var sRuleErrs []validationErr - for i, sRule := range r.statusRules { - if notification.ParseCheckLevel(sRule.curLvl) == notification.Unknown { - sRuleErrs = append(sRuleErrs, validationErr{ - Field: fieldNotificationRuleCurrentLevel, - Msg: fmt.Sprintf("must be 1 in [CRIT, WARN, INFO, OK]; got=%q", sRule.curLvl), - Index: intPtr(i), - }) - } - if sRule.prevLvl != "" && notification.ParseCheckLevel(sRule.prevLvl) == notification.Unknown { - sRuleErrs = append(sRuleErrs, validationErr{ - Field: fieldNotificationRulePreviousLevel, - Msg: fmt.Sprintf("must be 1 in [CRIT, WARN, INFO, OK]; got=%q", sRule.prevLvl), - Index: intPtr(i), - }) - } - } - if len(sRuleErrs) > 0 { - vErrs = append(vErrs, validationErr{ - Field: fieldNotificationRuleStatusRules, - Nested: sRuleErrs, - }) - } - - var tagErrs []validationErr - for i, tRule := range r.tagRules { - if _, ok := influxdb.ToOperator(tRule.op); !ok { - tagErrs = append(tagErrs, validationErr{ - Field: fieldOperator, - Msg: fmt.Sprintf("must be 1 in [equal]; got=%q", tRule.op), - Index: intPtr(i), - }) - } - } - if len(tagErrs) > 0 { - vErrs = append(vErrs, validationErr{ - Field: fieldNotificationRuleTagRules, - Nested: tagErrs, - }) - } - - if len(vErrs) > 0 { - return []validationErr{ - objectValidationErr(fieldSpec, vErrs...), - } - } - - return nil -} - -func toSummaryStatusRules(statusRules []struct{ curLvl, prevLvl string }) []SummaryStatusRule { - out := make([]SummaryStatusRule, 0, len(statusRules)) - for _, sRule := range statusRules { - out = append(out, SummaryStatusRule{ - CurrentLevel: sRule.curLvl, - PreviousLevel: sRule.prevLvl, - }) - } - sort.Slice(out, func(i, j int) bool { - si, sj := out[i], out[j] - if si.CurrentLevel == sj.CurrentLevel { - return si.PreviousLevel < sj.PreviousLevel - } - return si.CurrentLevel < sj.CurrentLevel - }) - return out -} - -func toSummaryTagRules(tagRules []struct{ k, v, op string }) []SummaryTagRule { - out := make([]SummaryTagRule, 0, len(tagRules)) - for _, tRule := range tagRules { - out = append(out, SummaryTagRule{ - Key: tRule.k, - Value: tRule.v, - Operator: tRule.op, - }) - } - sort.Slice(out, func(i, j int) bool { - ti, tj := out[i], out[j] - if ti.Key == tj.Key && ti.Value == tj.Value { - return ti.Operator < tj.Operator - } - if ti.Key == tj.Key { - return ti.Value < tj.Value - } - return ti.Key < tj.Key - }) - return out -} - -const ( - fieldTaskCron = "cron" - fieldTask = "task" -) - -type task struct { - identity - - cron string - description string - every time.Duration - offset time.Duration - query query - status string - - labels sortedLabels -} - -func (t *task) Labels() []*label { - return t.labels -} - -func (t *task) ResourceType() influxdb.ResourceType { - return KindTask.ResourceType() -} - -func (t *task) Status() influxdb.Status { - if t.status == "" { - return influxdb.Active - } - return influxdb.Status(t.status) -} - -func (t *task) flux() string { - translator := taskFluxTranslation{ - name: t.Name(), - cron: t.cron, - every: t.every, - offset: t.offset, - rawQuery: t.query.DashboardQuery(), - } - return translator.flux() -} - -func (t *task) refs() []*references { - return append(t.query.params, t.name, t.displayName) -} - -func (t *task) summarize() SummaryTask { - refs := summarizeCommonReferences(t.identity, t.labels) - for _, ref := range t.query.params { - parts := strings.Split(ref.EnvRef, ".") - field := fmt.Sprintf("spec.params.%s", parts[len(parts)-1]) - refs = append(refs, convertRefToRefSummary(field, ref)) - } - for _, ref := range t.query.task { - parts := strings.Split(ref.EnvRef, ".") - field := fmt.Sprintf("spec.task.%s", parts[len(parts)-1]) - refs = append(refs, convertRefToRefSummary(field, ref)) - } - sort.Slice(refs, func(i, j int) bool { - return refs[i].EnvRefKey < refs[j].EnvRefKey - }) - - return SummaryTask{ - SummaryIdentifier: SummaryIdentifier{ - Kind: KindTask, - MetaName: t.MetaName(), - EnvReferences: refs, - }, - Name: t.Name(), - Cron: t.cron, - Description: t.description, - Every: durToStr(t.every), - Offset: durToStr(t.offset), - Query: t.query.DashboardQuery(), - Status: t.Status(), - - LabelAssociations: toSummaryLabels(t.labels...), - } -} - -func (t *task) valid() []validationErr { - var vErrs []validationErr - if err, ok := isValidName(t.Name(), 1); !ok { - vErrs = append(vErrs, err) - } - - if t.cron == "" && t.every == 0 { - vErrs = append(vErrs, - validationErr{ - Field: fieldEvery, - Msg: "must provide if cron field is not provided", - }, - validationErr{ - Field: fieldTaskCron, - Msg: "must provide if every field is not provided", - }, - ) - } - - if t.query.Query == "" { - vErrs = append(vErrs, validationErr{ - Field: fieldQuery, - Msg: "must provide a non zero value", - }) - } - - if status := t.Status(); status != influxdb.Active && status != influxdb.Inactive { - vErrs = append(vErrs, validationErr{ - Field: fieldStatus, - Msg: "must be 1 of [active, inactive]", - }) - } - - if len(vErrs) > 0 { - return []validationErr{ - objectValidationErr(fieldSpec, vErrs...), - } - } - - return nil -} - -var fluxRegex = regexp.MustCompile(`import\s+\".*\"`) - -type taskFluxTranslation struct { - name string - cron string - every time.Duration - offset time.Duration - - rawQuery string -} - -func (tft taskFluxTranslation) flux() string { - var sb strings.Builder - writeLine := func(s string) { - sb.WriteString(s + "\n") - } - - imports, queryBody := tft.separateQueryImports() - if imports != "" { - writeLine(imports + "\n") - } - - writeLine(tft.generateTaskOption()) - sb.WriteString(queryBody) - - return sb.String() -} - -func (tft taskFluxTranslation) separateQueryImports() (imports string, querySansImports string) { - if indices := fluxRegex.FindAllIndex([]byte(tft.rawQuery), -1); len(indices) > 0 { - lastImportIdx := indices[len(indices)-1][1] - return tft.rawQuery[:lastImportIdx], tft.rawQuery[lastImportIdx:] - } - - return "", tft.rawQuery -} - -func (tft taskFluxTranslation) generateTaskOption() string { - taskOpts := []string{fmt.Sprintf("name: %q", tft.name)} - if tft.cron != "" { - taskOpts = append(taskOpts, fmt.Sprintf("cron: %q", tft.cron)) - } - if tft.every > 0 { - taskOpts = append(taskOpts, fmt.Sprintf("every: %s", tft.every)) - } - if tft.offset > 0 { - taskOpts = append(taskOpts, fmt.Sprintf("offset: %s", tft.offset)) - } - - // this is required by the API, super nasty. Will be super challenging for - // anyone outside org to figure out how to do this within an hour of looking - // at the API :sadpanda:. Would be ideal to let the API translate the arguments - // into this required form instead of forcing that complexity on the caller. - return fmt.Sprintf("option task = { %s }", strings.Join(taskOpts, ", ")) -} - -const ( - fieldTelegrafConfig = "config" -) - -type telegraf struct { - identity - - config influxdb.TelegrafConfig - - labels sortedLabels -} - -func (t *telegraf) Labels() []*label { - return t.labels -} - -func (t *telegraf) ResourceType() influxdb.ResourceType { - return KindTelegraf.ResourceType() -} - -func (t *telegraf) summarize() SummaryTelegraf { - cfg := t.config - cfg.Name = t.Name() - return SummaryTelegraf{ - SummaryIdentifier: SummaryIdentifier{ - Kind: KindTelegraf, - MetaName: t.MetaName(), - EnvReferences: summarizeCommonReferences(t.identity, t.labels), - }, - TelegrafConfig: cfg, - LabelAssociations: toSummaryLabels(t.labels...), - } -} - -func (t *telegraf) valid() []validationErr { - var vErrs []validationErr - if err, ok := isValidName(t.Name(), 1); !ok { - vErrs = append(vErrs, err) - } - if t.config.Config == "" { - vErrs = append(vErrs, validationErr{ - Field: fieldTelegrafConfig, - Msg: "no config provided", - }) - } - - if len(vErrs) > 0 { - return []validationErr{ - objectValidationErr(fieldSpec, vErrs...), - } - } - - return nil -} - -const ( - fieldArgTypeConstant = "constant" - fieldArgTypeMap = "map" - fieldArgTypeQuery = "query" - fieldVariableSelected = "selected" -) - -type variable struct { - identity - - Description string - Type string - Query string - Language string - ConstValues []string - MapValues map[string]string - selected []*references - - labels sortedLabels -} - -func (v *variable) Labels() []*label { - return v.labels -} - -func (v *variable) ResourceType() influxdb.ResourceType { - return KindVariable.ResourceType() -} - -func (v *variable) Selected() []string { - selected := make([]string, 0, len(v.selected)) - for _, sel := range v.selected { - s := sel.String() - if s == "" { - continue - } - selected = append(selected, s) - } - return selected -} - -func (v *variable) summarize() SummaryVariable { - envRefs := summarizeCommonReferences(v.identity, v.labels) - for i, sel := range v.selected { - if sel.hasEnvRef() { - field := fmt.Sprintf("spec.%s[%d]", fieldVariableSelected, i) - envRefs = append(envRefs, convertRefToRefSummary(field, sel)) - } - } - return SummaryVariable{ - SummaryIdentifier: SummaryIdentifier{ - Kind: KindVariable, - MetaName: v.MetaName(), - EnvReferences: envRefs, - }, - Name: v.Name(), - Description: v.Description, - Selected: v.Selected(), - Arguments: v.influxVarArgs(), - LabelAssociations: toSummaryLabels(v.labels...), - } -} - -func (v *variable) influxVarArgs() *influxdb.VariableArguments { - // this zero value check is for situations where we want to marshal/unmarshal - // a variable and not have the invalid args blow up during unmarshalling. When - // that validation is decoupled from the unmarshalling, we can clean this up. - if v.Type == "" { - return nil - } - - args := &influxdb.VariableArguments{ - Type: v.Type, - } - switch args.Type { - case "query": - args.Values = influxdb.VariableQueryValues{ - Query: v.Query, - Language: v.Language, - } - case "constant": - args.Values = influxdb.VariableConstantValues(v.ConstValues) - case "map": - args.Values = influxdb.VariableMapValues(v.MapValues) - } - return args -} - -func (v *variable) valid() []validationErr { - var failures []validationErr - if err, ok := isValidName(v.Name(), 1); !ok { - failures = append(failures, err) - } - - switch v.Type { - case "map": - if len(v.MapValues) == 0 { - failures = append(failures, validationErr{ - Field: fieldValues, - Msg: "map variable must have at least 1 key/val pair", - }) - } - case "constant": - if len(v.ConstValues) == 0 { - failures = append(failures, validationErr{ - Field: fieldValues, - Msg: "constant variable must have a least 1 value provided", - }) - } - case "query": - if v.Query == "" { - failures = append(failures, validationErr{ - Field: fieldQuery, - Msg: "query variable must provide a query string", - }) - } - if v.Language != "influxql" && v.Language != "flux" { - failures = append(failures, validationErr{ - Field: fieldLanguage, - Msg: fmt.Sprintf(`query variable language must be either "influxql" or "flux"; got %q`, v.Language), - }) - } - } - if len(failures) > 0 { - return []validationErr{ - objectValidationErr(fieldSpec, failures...), - } - } - - return nil -} - -const ( - fieldReferencesEnv = "envRef" - fieldReferencesSecret = "secretRef" -) - -type references struct { - EnvRef string // key used to reference parameterized field - Secret string - - val interface{} - defaultVal interface{} - valType string -} - -func (r *references) hasValue() bool { - return r.EnvRef != "" || r.Secret != "" || r.val != nil -} - -func (r *references) hasEnvRef() bool { - return r != nil && r.EnvRef != "" -} - -func (r *references) expression() ast.Expression { - v := r.val - if v == nil { - v = r.defaultVal - } - if v == nil { - return nil - } - - switch strings.ToLower(r.valType) { - case "bool", "booleanliteral": - return astBoolFromIface(v) - case "duration", "durationliteral": - return astDurationFromIface(v) - case "float", "floatliteral": - return astFloatFromIface(v) - case "int", "integerliteral": - return astIntegerFromIface(v) - case "string", "stringliteral": - return astStringFromIface(v) - case "time", "datetimeliteral": - if v == "now()" { - return astNow() - } - return astTimeFromIface(v) - } - return nil -} - -func (r *references) Float64() float64 { - if r == nil || r.val == nil { - return 0 - } - i, _ := r.val.(float64) - return i -} - -func (r *references) Int64() int64 { - if r == nil || r.val == nil { - return 0 - } - i, _ := r.val.(int64) - return i -} - -func (r *references) String() string { - if r == nil { - return "" - } - if v := r.StringVal(); v != "" { - return v - } - if r.EnvRef != "" { - if s, _ := ifaceToStr(r.defaultVal); s != "" { - return s - } - return "env-" + r.EnvRef - } - return "" -} - -func (r *references) StringVal() string { - s, _ := ifaceToStr(r.val) - return s -} - -func (r *references) SecretField() influxdb.SecretField { - if secret := r.Secret; secret != "" { - return influxdb.SecretField{Key: secret} - } - if str := r.StringVal(); str != "" { - return influxdb.SecretField{Value: &str} - } - return influxdb.SecretField{} -} - -func convertRefToRefSummary(field string, ref *references) SummaryReference { - var valType string - switch strings.ToLower(ref.valType) { - case "bool", "booleanliteral": - valType = "bool" - case "duration", "durationliteral": - valType = "duration" - case "float", "floatliteral": - valType = "float" - case "int", "integerliteral": - valType = "integer" - case "string", "stringliteral": - valType = "string" - case "time", "datetimeliteral": - valType = "time" - } - - return SummaryReference{ - Field: field, - EnvRefKey: ref.EnvRef, - ValType: valType, - Value: ref.val, - DefaultValue: ref.defaultVal, - } -} - -func astBoolFromIface(v interface{}) *ast.BooleanLiteral { - b, _ := v.(bool) - return ast.BooleanLiteralFromValue(b) -} - -func astDurationFromIface(v interface{}) *ast.DurationLiteral { - s, ok := v.(string) - if !ok { - d, ok := v.(time.Duration) - if !ok { - return nil - } - s = d.String() - } - - dur, err := parser.ParseSignedDuration(s) - if err != nil { - dur, _ = parser.ParseSignedDuration("-0m") - } - return dur -} - -func astFloatFromIface(v interface{}) *ast.FloatLiteral { - if i, ok := v.(int); ok { - return ast.FloatLiteralFromValue(float64(i)) - } - f, _ := v.(float64) - return ast.FloatLiteralFromValue(f) -} - -func astIntegerFromIface(v interface{}) *ast.IntegerLiteral { - if f, ok := v.(float64); ok { - return ast.IntegerLiteralFromValue(int64(f)) - } - i, _ := v.(int64) - return ast.IntegerLiteralFromValue(i) -} - -func astNow() *ast.CallExpression { - return &ast.CallExpression{ - Callee: &ast.Identifier{Name: "now"}, - } -} - -func astStringFromIface(v interface{}) *ast.StringLiteral { - s, _ := v.(string) - return ast.StringLiteralFromValue(s) -} - -func astTimeFromIface(v interface{}) *ast.DateTimeLiteral { - if t, ok := v.(time.Time); ok { - return ast.DateTimeLiteralFromValue(t) - } - - s, ok := v.(string) - if !ok { - return nil - } - - t, err := parser.ParseTime(s) - if err != nil { - return ast.DateTimeLiteralFromValue(time.Now()) - } - return t -} - -func isValidName(name string, minLength int) (validationErr, bool) { - if len(name) >= minLength { - return validationErr{}, true - } - return validationErr{ - Field: fieldName, - Msg: fmt.Sprintf("must be a string of at least %d chars in length", minLength), - }, false -} - -func toNotificationDuration(dur time.Duration) *notification.Duration { - d, _ := notification.FromTimeDuration(dur) - return &d -} - -func durToStr(dur time.Duration) string { - if dur == 0 { - return "" - } - return dur.String() -} - -func flt64Ptr(f float64) *float64 { - if f != 0 { - return &f - } - return nil -} - -func intPtr(i int) *int { - return &i -} diff --git a/pkger/parser_models_schema.go b/pkger/parser_models_schema.go deleted file mode 100644 index e6694641d50..00000000000 --- a/pkger/parser_models_schema.go +++ /dev/null @@ -1,219 +0,0 @@ -package pkger - -import ( - "fmt" - "sort" - "strings" - - "github.com/influxdata/influxdb/v2" -) - -const ( - fieldBucketSchemaType = "schemaType" - fieldMeasurementSchemas = "measurementSchemas" - - // measurementSchema fields - fieldMeasurementSchemaName = "name" - fieldMeasurementSchemaColumns = "columns" - - // measurementColumn fields - fieldMeasurementColumnName = "name" - fieldMeasurementColumnType = "type" - fieldMeasurementColumnDataType = "dataType" -) - -type measurementSchemas []measurementSchema - -func (s measurementSchemas) valid() []validationErr { - var errs []validationErr - - for idx, ms := range s { - if nestedErrs := ms.valid(); len(nestedErrs) > 0 { - errs = append(errs, validationErr{ - Field: fieldMeasurementSchemas, - Index: intPtr(idx), - Nested: nestedErrs, - }) - } - } - - return errs -} - -func (s measurementSchema) valid() []validationErr { - var errs []validationErr - - if err := influxdb.ValidateMeasurementSchemaName(s.Name); err != nil { - errs = append(errs, validationErr{ - Field: fieldMeasurementSchemaName, - Msg: err.Error(), - }) - } - - // validate columns - timeCount := 0 - fieldCount := 0 - names := make([]string, 0, len(s.Columns)) - - columnErrors := make([]validationErr, len(s.Columns)) - - for idx, col := range s.Columns { - colErr := &columnErrors[idx] - *colErr = validationErr{ - Field: fieldMeasurementSchemaColumns, - Index: intPtr(idx), - } - - names = append(names, col.Name) - - if err := influxdb.ValidateMeasurementSchemaName(col.Name); err != nil { - colErr.Nested = append(colErr.Nested, validationErr{ - Field: fieldMeasurementColumnName, - Msg: err.Error(), - }) - } - - colType := influxdb.SemanticColumnTypeFromString(col.Type) - if colType == nil { - colErr.Nested = append(colErr.Nested, validationErr{ - Field: fieldMeasurementColumnType, - Msg: "missing type", - }) - continue - } - - colDataType := influxdb.SchemaColumnDataTypeFromString(col.DataType) - - // all columns require a type field - if col.Name == "time" { - timeCount++ - if *colType != influxdb.SemanticColumnTypeTimestamp { - colErr.Nested = append(colErr.Nested, validationErr{ - Field: fieldMeasurementColumnType, - Msg: "\"time\" column type must be timestamp", - }) - } - - if colDataType != nil { - colErr.Nested = append(colErr.Nested, validationErr{ - Field: fieldMeasurementColumnDataType, - Msg: "unexpected dataType for time column", - }) - } - } - - // ensure no other columns have a timestamp semantic - switch *colType { - case influxdb.SemanticColumnTypeTimestamp: - if col.Name != "time" { - colErr.Nested = append(colErr.Nested, validationErr{ - Field: fieldMeasurementColumnName, - Msg: "timestamp column must be named \"time\"", - }) - } - - case influxdb.SemanticColumnTypeTag: - // ensure tag columns don't include a data type value - if colDataType != nil { - colErr.Nested = append(colErr.Nested, validationErr{ - Field: fieldMeasurementColumnDataType, - Msg: "unexpected dataType for tag column", - }) - } - - case influxdb.SemanticColumnTypeField: - if colDataType == nil { - colErr.Nested = append(colErr.Nested, validationErr{ - Field: fieldMeasurementColumnDataType, - Msg: "missing or invalid data type for field column", - }) - } - fieldCount++ - } - } - - // collect only those column errors with nested errors - for _, colErr := range columnErrors { - if len(colErr.Nested) > 0 { - errs = append(errs, colErr) - } - } - - if timeCount == 0 { - errs = append(errs, validationErr{ - Field: fieldMeasurementSchemaColumns, - Msg: "missing \"time\" column", - }) - } - - // ensure there is at least one field defined - if fieldCount == 0 { - errs = append(errs, validationErr{ - Field: fieldMeasurementSchemaColumns, - Msg: "at least one field column is required", - }) - } - - // check for duplicate columns using general UTF-8 case insensitive comparison - sort.Strings(names) - for i := 0; i < len(names)-1; i++ { - if strings.EqualFold(names[i], names[i+1]) { - errs = append(errs, validationErr{ - Field: fieldMeasurementSchemaColumns, - Msg: fmt.Sprintf("duplicate columns with name %q", names[i]), - }) - } - } - - return errs -} - -func (s measurementSchemas) summarize() []SummaryMeasurementSchema { - if len(s) == 0 { - return nil - } - - schemas := make([]SummaryMeasurementSchema, 0, len(s)) - for _, schema := range s { - schemas = append(schemas, schema.summarize()) - } - - // Measurements are in Name order for consistent output in summaries - sort.Slice(schemas, func(i, j int) bool { - return schemas[i].Name < schemas[j].Name - }) - - return schemas -} - -type measurementSchema struct { - Name string `json:"name" yaml:"name"` - Columns []measurementColumn `json:"columns" yaml:"columns"` -} - -func (s measurementSchema) summarize() SummaryMeasurementSchema { - var cols []SummaryMeasurementSchemaColumn - if len(s.Columns) > 0 { - cols = make([]SummaryMeasurementSchemaColumn, 0, len(s.Columns)) - for i := range s.Columns { - cols = append(cols, s.Columns[i].summarize()) - } - - // Columns are in Name order for consistent output in summaries - sort.Slice(cols, func(i, j int) bool { - return cols[i].Name < cols[j].Name - }) - } - - return SummaryMeasurementSchema{Name: s.Name, Columns: cols} -} - -type measurementColumn struct { - Name string `json:"name" yaml:"name"` - Type string `json:"type" yaml:"type"` - DataType string `json:"dataType,omitempty" yaml:"dataType,omitempty"` -} - -func (c measurementColumn) summarize() SummaryMeasurementSchemaColumn { - return SummaryMeasurementSchemaColumn(c) -} diff --git a/pkger/parser_test.go b/pkger/parser_test.go deleted file mode 100644 index 830160e7470..00000000000 --- a/pkger/parser_test.go +++ /dev/null @@ -1,5073 +0,0 @@ -package pkger - -import ( - "bytes" - "errors" - "fmt" - "net/url" - "path/filepath" - "sort" - "strconv" - "strings" - "sync/atomic" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/notification" - icheck "github.com/influxdata/influxdb/v2/notification/check" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestParse(t *testing.T) { - t.Run("template with a bucket", func(t *testing.T) { - t.Run("with valid bucket template should be valid", func(t *testing.T) { - testfileRunner(t, "testdata/bucket", func(t *testing.T, template *Template) { - buckets := template.Summary().Buckets - require.Len(t, buckets, 2) - - actual := buckets[0] - expectedBucket := SummaryBucket{ - SummaryIdentifier: SummaryIdentifier{ - Kind: KindBucket, - MetaName: "rucket-11", - EnvReferences: []SummaryReference{}, - }, - Name: "rucket-11", - Description: "bucket 1 description", - RetentionPeriod: time.Hour, - LabelAssociations: []SummaryLabel{}, - } - assert.Equal(t, expectedBucket, actual) - - actual = buckets[1] - expectedBucket = SummaryBucket{ - SummaryIdentifier: SummaryIdentifier{ - Kind: KindBucket, - MetaName: "rucket-22", - EnvReferences: []SummaryReference{}, - }, - Name: "display name", - Description: "bucket 2 description", - LabelAssociations: []SummaryLabel{}, - } - assert.Equal(t, expectedBucket, actual) - }) - }) - - t.Run("with valid bucket and schema should be valid", func(t *testing.T) { - template := validParsedTemplateFromFile(t, "testdata/bucket_schema.yml", EncodingYAML) - buckets := template.Summary().Buckets - require.Len(t, buckets, 1) - - exp := SummaryBucket{ - SummaryIdentifier: SummaryIdentifier{ - Kind: KindBucket, - MetaName: "explicit-11", - EnvReferences: []SummaryReference{}, - }, - Name: "my_explicit", - SchemaType: "explicit", - LabelAssociations: []SummaryLabel{}, - MeasurementSchemas: []SummaryMeasurementSchema{ - { - Name: "cpu", - Columns: []SummaryMeasurementSchemaColumn{ - {Name: "host", Type: "tag"}, - {Name: "time", Type: "timestamp"}, - {Name: "usage_user", Type: "field", DataType: "float"}, - }, - }, - }, - } - - assert.Equal(t, exp, buckets[0]) - }) - - t.Run("with env refs should be valid", func(t *testing.T) { - testfileRunner(t, "testdata/bucket_ref.yml", func(t *testing.T, template *Template) { - actual := template.Summary().Buckets - require.Len(t, actual, 1) - - expectedEnvRefs := []SummaryReference{ - { - Field: "metadata.name", - EnvRefKey: "meta-name", - DefaultValue: "meta", - }, - { - Field: "spec.name", - EnvRefKey: "spec-name", - DefaultValue: "spectacles", - }, - { - Field: "spec.associations[0].name", - EnvRefKey: "label-meta-name", - }, - } - assert.Equal(t, expectedEnvRefs, actual[0].EnvReferences) - }) - }) - - t.Run("should handle bad config", func(t *testing.T) { - tests := []testTemplateResourceError{ - { - name: "missing name", - validationErrs: 1, - valFields: []string{fieldMetadata, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: -spec: -`, - }, - { - name: "mixed valid and missing name", - validationErrs: 1, - valFields: []string{fieldMetadata, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: rucket-11 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: -spec: -`, - }, - { - name: "mixed valid and multiple bad names", - resourceErrs: 2, - validationErrs: 1, - valFields: []string{fieldMetadata, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: rucket-11 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: -spec: ---- -apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: -spec: -`, - }, - { - name: "duplicate bucket names", - resourceErrs: 1, - validationErrs: 1, - valFields: []string{fieldMetadata, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: valid-name ---- -apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: valid-name -`, - }, - { - name: "duplicate meta name and spec name", - resourceErrs: 1, - validationErrs: 1, - valFields: []string{fieldSpec, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: rucket-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: valid-name -spec: - name: rucket-1 -`, - }, - { - name: "spec name too short", - resourceErrs: 1, - validationErrs: 1, - valFields: []string{fieldSpec, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: rucket-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: invalid-name -spec: - name: f -`, - }, - { - name: "invalid measurement name", - resourceErrs: 1, - validationErrs: 1, - valFields: []string{strings.Join([]string{fieldSpec, fieldMeasurementSchemas}, ".")}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: foo-1 -spec: - name: foo - schemaType: explicit - measurementSchemas: - - name: _cpu - columns: - - name: time - type: timestamp - - name: usage_user - type: field - dataType: float -`, - }, - { - name: "invalid semantic type", - resourceErrs: 1, - validationErrs: 1, - valFields: []string{strings.Join([]string{fieldSpec, fieldMeasurementSchemas, fieldMeasurementSchemaColumns}, ".")}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: foo-1 -spec: - name: foo - schemaType: explicit - measurementSchemas: - - name: _cpu - columns: - - name: time - type: field - - name: usage_user - type: field - dataType: float -`, - }, - { - name: "missing time column", - resourceErrs: 1, - validationErrs: 1, - valFields: []string{strings.Join([]string{fieldSpec, fieldMeasurementSchemas}, ".")}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: foo-1 -spec: - name: foo - schemaType: explicit - measurementSchemas: - - name: cpu - columns: - - name: usage_user - type: field - dataType: float -`, - }, - } - - for _, tt := range tests { - testTemplateErrors(t, KindBucket, tt) - } - }) - }) - - t.Run("template with a label", func(t *testing.T) { - t.Run("with valid label template should be valid", func(t *testing.T) { - testfileRunner(t, "testdata/label", func(t *testing.T, template *Template) { - labels := template.Summary().Labels - require.Len(t, labels, 3) - - expectedLabel := sumLabelGen("label-1", "label-1", "#FFFFFF", "label 1 description") - assert.Equal(t, expectedLabel, labels[0]) - - expectedLabel = sumLabelGen("label-2", "label-2", "#000000", "label 2 description") - assert.Equal(t, expectedLabel, labels[1]) - - expectedLabel = sumLabelGen("label-3", "display name", "", "label 3 description") - assert.Equal(t, expectedLabel, labels[2]) - }) - }) - - t.Run("with env refs should be valid", func(t *testing.T) { - testfileRunner(t, "testdata/label_ref.yml", func(t *testing.T, template *Template) { - actual := template.Summary().Labels - require.Len(t, actual, 1) - - expected := sumLabelGen("env-meta-name", "env-spec-name", "", "", - SummaryReference{ - Field: "metadata.name", - EnvRefKey: "meta-name", - }, - SummaryReference{ - Field: "spec.name", - EnvRefKey: "spec-name", - }, - ) - assert.Contains(t, actual, expected) - }) - }) - - t.Run("with missing label name should error", func(t *testing.T) { - tests := []testTemplateResourceError{ - { - name: "missing name", - validationErrs: 1, - valFields: []string{fieldMetadata, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: -spec: -`, - }, - { - name: "mixed valid and missing name", - validationErrs: 1, - valFields: []string{fieldMetadata, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: valid-name -spec: ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: -spec: -`, - }, - { - name: "duplicate names", - validationErrs: 1, - valFields: []string{fieldMetadata, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: valid-name -spec: ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: valid-name -spec: -`, - }, - { - name: "multiple labels with missing name", - resourceErrs: 2, - validationErrs: 1, - valFields: []string{fieldMetadata, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Label ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -`, - }, - { - name: "duplicate meta name and spec name", - validationErrs: 1, - valFields: []string{fieldSpec, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: valid-name -spec: ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-1 -spec: - name: valid-name -`, - }, - { - name: "spec name to short", - validationErrs: 1, - valFields: []string{fieldSpec, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: valid-name -spec: ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-1 -spec: - name: a -`, - }, - } - - for _, tt := range tests { - testTemplateErrors(t, KindLabel, tt) - } - }) - }) - - t.Run("template with buckets and labels associated", func(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - testfileRunner(t, "testdata/bucket_associates_label", func(t *testing.T, template *Template) { - sum := template.Summary() - require.Len(t, sum.Labels, 2) - - bkts := sum.Buckets - require.Len(t, bkts, 3) - - expectedLabels := []struct { - bktName string - labels []string - }{ - { - bktName: "rucket-1", - labels: []string{"label-1"}, - }, - { - bktName: "rucket-2", - labels: []string{"label-2"}, - }, - { - bktName: "rucket-3", - labels: []string{"label-1", "label-2"}, - }, - } - for i, expected := range expectedLabels { - bkt := bkts[i] - require.Len(t, bkt.LabelAssociations, len(expected.labels)) - - for j, label := range expected.labels { - assert.Equal(t, label, bkt.LabelAssociations[j].Name) - } - } - - expectedMappings := []SummaryLabelMapping{ - { - ResourceMetaName: "rucket-1", - ResourceName: "rucket-1", - LabelMetaName: "label-1", - LabelName: "label-1", - }, - { - ResourceMetaName: "rucket-2", - ResourceName: "rucket-2", - LabelMetaName: "label-2", - LabelName: "label-2", - }, - { - ResourceMetaName: "rucket-3", - ResourceName: "rucket-3", - LabelMetaName: "label-1", - LabelName: "label-1", - }, - { - ResourceMetaName: "rucket-3", - ResourceName: "rucket-3", - LabelMetaName: "label-2", - LabelName: "label-2", - }, - } - - for _, expectedMapping := range expectedMappings { - expectedMapping.Status = StateStatusNew - expectedMapping.ResourceType = influxdb.BucketsResourceType - assert.Contains(t, sum.LabelMappings, expectedMapping) - } - }) - }) - - t.Run("association doesn't exist then provides an error", func(t *testing.T) { - tests := []testTemplateResourceError{ - { - name: "no labels provided", - assErrs: 1, - assIdxs: []int{0}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: rucket-1 -spec: - associations: - - kind: Label - name: label-1 -`, - }, - { - name: "mixed found and not found", - assErrs: 1, - assIdxs: []int{1}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: rucket-3 -spec: - associations: - - kind: Label - name: label-1 - - kind: Label - name: NOT TO BE FOUND -`, - }, - { - name: "multiple not found", - assErrs: 1, - assIdxs: []int{0, 1}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: rucket-3 -spec: - associations: - - kind: Label - name: label-1 - - kind: Label - name: label-2 -`, - }, - { - name: "duplicate valid nested labels", - assErrs: 1, - assIdxs: []int{1}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: rucket-3 -spec: - associations: - - kind: Label - name: label-1 - - kind: Label - name: label-1 -`, - }, - } - - for _, tt := range tests { - testTemplateErrors(t, KindBucket, tt) - } - }) - }) - - t.Run("template with checks", func(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - testfileRunner(t, "testdata/checks", func(t *testing.T, template *Template) { - sum := template.Summary() - require.Len(t, sum.Checks, 2) - - check1 := sum.Checks[0] - assert.Equal(t, KindCheckThreshold, check1.Kind) - thresholdCheck, ok := check1.Check.(*icheck.Threshold) - require.Truef(t, ok, "got: %#v", check1) - - expectedBase := icheck.Base{ - Name: "check-0", - Description: "desc_0", - Every: mustDuration(t, time.Minute), - Offset: mustDuration(t, 15*time.Second), - StatusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }", - Tags: []influxdb.Tag{ - {Key: "tag_1", Value: "val_1"}, - {Key: "tag_2", Value: "val_2"}, - }, - } - expectedBase.Query.Text = "from(bucket: \"rucket_1\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"cpu\")\n |> filter(fn: (r) => r._field == \"usage_idle\")\n |> aggregateWindow(every: 1m, fn: mean)\n |> yield(name: \"mean\")" - assert.Equal(t, expectedBase, thresholdCheck.Base) - - expectedThresholds := []icheck.ThresholdConfig{ - icheck.Greater{ - ThresholdConfigBase: icheck.ThresholdConfigBase{ - AllValues: true, - Level: notification.Critical, - }, - Value: 50.0, - }, - icheck.Lesser{ - ThresholdConfigBase: icheck.ThresholdConfigBase{Level: notification.Warn}, - Value: 49.9, - }, - icheck.Range{ - ThresholdConfigBase: icheck.ThresholdConfigBase{Level: notification.Info}, - Within: true, - Min: 30.0, - Max: 45.0, - }, - icheck.Range{ - ThresholdConfigBase: icheck.ThresholdConfigBase{Level: notification.Ok}, - Min: 30.0, - Max: 35.0, - }, - } - assert.Equal(t, expectedThresholds, thresholdCheck.Thresholds) - assert.Equal(t, influxdb.Inactive, check1.Status) - assert.Len(t, check1.LabelAssociations, 1) - - check2 := sum.Checks[1] - assert.Equal(t, KindCheckDeadman, check2.Kind) - deadmanCheck, ok := check2.Check.(*icheck.Deadman) - require.Truef(t, ok, "got: %#v", check2) - - expectedBase = icheck.Base{ - Name: "display name", - Description: "desc_1", - Every: mustDuration(t, 5*time.Minute), - Offset: mustDuration(t, 10*time.Second), - StatusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }", - Tags: []influxdb.Tag{ - {Key: "tag_1", Value: "val_1"}, - {Key: "tag_2", Value: "val_2"}, - }, - } - expectedBase.Query.Text = "from(bucket: \"rucket_1\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"cpu\")\n |> filter(fn: (r) => r._field == \"usage_idle\")\n |> aggregateWindow(every: 1m, fn: mean)\n |> yield(name: \"mean\")" - assert.Equal(t, expectedBase, deadmanCheck.Base) - assert.Equal(t, influxdb.Active, check2.Status) - assert.Equal(t, mustDuration(t, 10*time.Minute), deadmanCheck.StaleTime) - assert.Equal(t, mustDuration(t, 90*time.Second), deadmanCheck.TimeSince) - assert.True(t, deadmanCheck.ReportZero) - assert.Len(t, check2.LabelAssociations, 1) - - expectedMappings := []SummaryLabelMapping{ - { - LabelMetaName: "label-1", - LabelName: "label-1", - ResourceMetaName: "check-0", - ResourceName: "check-0", - }, - { - LabelMetaName: "label-1", - LabelName: "label-1", - ResourceMetaName: "check-1", - ResourceName: "display name", - }, - } - for _, expected := range expectedMappings { - expected.Status = StateStatusNew - expected.ResourceType = influxdb.ChecksResourceType - assert.Contains(t, sum.LabelMappings, expected) - } - }) - }) - - t.Run("with env refs should be successful", func(t *testing.T) { - testfileRunner(t, "testdata/checks_ref.yml", func(t *testing.T, template *Template) { - actual := template.Summary().Checks - require.Len(t, actual, 1) - - expectedEnvRefs := []SummaryReference{ - { - Field: "metadata.name", - EnvRefKey: "meta-name", - DefaultValue: "meta", - }, - { - Field: "spec.name", - EnvRefKey: "spec-name", - DefaultValue: "spectacles", - }, - { - Field: "spec.associations[0].name", - EnvRefKey: "label-meta-name", - }, - } - assert.Equal(t, expectedEnvRefs, actual[0].EnvReferences) - }) - }) - - t.Run("handles bad config", func(t *testing.T) { - tests := []struct { - kind Kind - resErr testTemplateResourceError - }{ - { - kind: KindCheckDeadman, - resErr: testTemplateResourceError{ - name: "duplicate name", - validationErrs: 1, - valFields: []string{fieldMetadata, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: CheckDeadman -metadata: - name: check-1 -spec: - every: 5m - level: cRiT - query: > - from(bucket: "rucket_1") |> yield(name: "mean") - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" ---- -apiVersion: influxdata.com/v2alpha1 -kind: CheckDeadman -metadata: - name: check-1 -spec: - every: 5m - level: cRiT - query: > - from(bucket: "rucket_1") |> yield(name: "mean") - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" -`, - }, - }, - { - kind: KindCheckThreshold, - resErr: testTemplateResourceError{ - name: "missing every duration", - validationErrs: 1, - valFields: []string{fieldSpec, fieldEvery}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: CheckThreshold -metadata: - name: check-0 -spec: - query: > - from(bucket: "rucket_1") |> yield(name: "mean") - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - thresholds: - - type: outside_range - level: ok - min: 30.0 - max: 35.0 - -`, - }, - }, - { - kind: KindCheckThreshold, - resErr: testTemplateResourceError{ - name: "invalid threshold value provided", - validationErrs: 1, - valFields: []string{fieldSpec, fieldLevel}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: CheckThreshold -metadata: - name: check-0 -spec: - every: 1m - query: > - from(bucket: "rucket_1") |> yield(name: "mean") - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - thresholds: - - type: greater - level: RANDO - value: 50.0 -`, - }, - }, - { - kind: KindCheckThreshold, - resErr: testTemplateResourceError{ - name: "invalid threshold type provided", - validationErrs: 1, - valFields: []string{fieldSpec, fieldType}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: CheckThreshold -metadata: - name: check-0 -spec: - every: 1m - query: > - from(bucket: "rucket_1") |> yield(name: "mean") - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - thresholds: - - type: RANDO_TYPE - level: CRIT - value: 50.0 -`, - }, - }, - { - kind: KindCheckThreshold, - resErr: testTemplateResourceError{ - name: "invalid min for inside range", - validationErrs: 1, - valFields: []string{fieldSpec, fieldMin}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: CheckThreshold -metadata: - name: check-0 -spec: - every: 1m - query: > - from(bucket: "rucket_1") |> yield(name: "mean") - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - thresholds: - - type: inside_range - level: INfO - min: 45.0 - max: 30.0 -`, - }, - }, - { - kind: KindCheckThreshold, - resErr: testTemplateResourceError{ - name: "no threshold values provided", - validationErrs: 1, - valFields: []string{fieldSpec, fieldCheckThresholds}, - templateStr: `--- -apiVersion: influxdata.com/v2alpha1 -kind: CheckThreshold -metadata: - name: check-0 -spec: - every: 1m - query: > - from(bucket: "rucket_1") |> yield(name: "mean") - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - thresholds: -`, - }, - }, - { - kind: KindCheckThreshold, - resErr: testTemplateResourceError{ - name: "threshold missing query", - validationErrs: 1, - valFields: []string{fieldSpec, fieldQuery}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: CheckThreshold -metadata: - name: check-0 -spec: - every: 1m - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - thresholds: - - type: greater - level: CRIT - value: 50.0 -`, - }, - }, - { - kind: KindCheckThreshold, - resErr: testTemplateResourceError{ - name: "invalid status provided", - validationErrs: 1, - valFields: []string{fieldSpec, fieldStatus}, - templateStr: `--- -apiVersion: influxdata.com/v2alpha1 -kind: CheckThreshold -metadata: - name: check-0 -spec: - every: 1m - query: > - from(bucket: "rucket_1") |> yield(name: "mean") - status: RANDO STATUS - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - thresholds: - - type: greater - level: CRIT - value: 50.0 - allValues: true -`, - }, - }, - { - kind: KindCheckThreshold, - resErr: testTemplateResourceError{ - name: "missing status message template", - validationErrs: 1, - valFields: []string{fieldSpec, fieldCheckStatusMessageTemplate}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: CheckThreshold -metadata: - name: check-0 -spec: - every: 1m - query: > - from(bucket: "rucket_1") - thresholds: - - type: greater - level: CRIT - value: 50.0 -`, - }, - }, - { - kind: KindCheckDeadman, - resErr: testTemplateResourceError{ - name: "missing every", - validationErrs: 1, - valFields: []string{fieldSpec, fieldEvery}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: CheckDeadman -metadata: - name: check-1 -spec: - level: cRiT - query: > - from(bucket: "rucket_1") |> yield(name: "mean") - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - timeSince: 90s -`, - }, - }, - { - kind: KindCheckDeadman, - resErr: testTemplateResourceError{ - name: "deadman missing every", - validationErrs: 1, - valFields: []string{fieldSpec, fieldQuery}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: CheckDeadman -metadata: - name: check-1 -spec: - every: 5m - level: cRiT - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - timeSince: 90s -`, - }, - }, - { - kind: KindCheckDeadman, - resErr: testTemplateResourceError{ - name: "missing association label", - validationErrs: 1, - valFields: []string{fieldSpec, fieldAssociations}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: CheckDeadman -metadata: - name: check-1 -spec: - every: 5m - level: cRiT - query: > - from(bucket: "rucket_1") |> yield(name: "mean") - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - timeSince: 90s - associations: - - kind: Label - name: label-1 -`, - }, - }, - { - kind: KindCheckDeadman, - resErr: testTemplateResourceError{ - name: "duplicate association labels", - validationErrs: 1, - valFields: []string{fieldSpec, fieldAssociations}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: CheckDeadman -metadata: - name: check-1 -spec: - every: 5m - level: cRiT - query: > - from(bucket: "rucket_1") |> yield(name: "mean") - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - timeSince: 90s - associations: - - kind: Label - name: label-1 - - kind: Label - name: label-1 -`, - }, - }, - /* checks are not name unique - { - kind: KindCheckDeadman, - resErr: testTemplateResourceError{ - name: "duplicate meta name and spec name", - validationErrs: 1, - valFields: []string{fieldSpec, fieldAssociations}, - templateStr: ` - apiVersion: influxdata.com/v2alpha1 - kind: CheckDeadman - metadata: - name: check-1 - spec: - every: 5m - level: cRiT - query: > - from(bucket: "rucket_1") |> yield(name: "mean") - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - timeSince: 90s - --- - apiVersion: influxdata.com/v2alpha1 - kind: CheckDeadman - metadata: - name: valid-name - spec: - name: check-1 - every: 5m - level: cRiT - query: > - from(bucket: "rucket_1") |> yield(name: "mean") - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - timeSince: 90s - `, - }, - }, - */ - } - - for _, tt := range tests { - testTemplateErrors(t, tt.kind, tt.resErr) - } - }) - }) - - t.Run("template with dashboard", func(t *testing.T) { - t.Run("single chart should be successful", func(t *testing.T) { - t.Run("gauge chart", func(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - testfileRunner(t, "testdata/dashboard_gauge", func(t *testing.T, template *Template) { - sum := template.Summary() - require.Len(t, sum.Dashboards, 1) - - actual := sum.Dashboards[0] - assert.Equal(t, KindDashboard, actual.Kind) - assert.Equal(t, "dash-1", actual.Name) - assert.Equal(t, "desc1", actual.Description) - - require.Len(t, actual.Charts, 1) - actualChart := actual.Charts[0] - assert.Equal(t, 3, actualChart.Height) - assert.Equal(t, 6, actualChart.Width) - assert.Equal(t, 1, actualChart.XPosition) - assert.Equal(t, 2, actualChart.YPosition) - - props, ok := actualChart.Properties.(influxdb.GaugeViewProperties) - require.True(t, ok) - assert.Equal(t, "gauge", props.GetType()) - assert.Equal(t, "gauge note", props.Note) - assert.True(t, props.ShowNoteWhenEmpty) - - require.Len(t, props.Queries, 1) - q := props.Queries[0] - queryText := `from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_writes_total") |> filter(fn: (r) => r._field == "counter")` - assert.Equal(t, queryText, q.Text) - assert.Equal(t, "advanced", q.EditMode) - - require.Len(t, props.ViewColors, 3) - c := props.ViewColors[0] - assert.Equal(t, "laser", c.Name) - assert.Equal(t, "min", c.Type) - assert.Equal(t, "#8F8AF4", c.Hex) - assert.Equal(t, 0.0, c.Value) - }) - }) - - t.Run("handles invalid config", func(t *testing.T) { - tests := []testTemplateResourceError{ - { - name: "color mixing a hex value", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].colors[0].hex"}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: desc1 - charts: - - kind: gauge - name: gauge - note: gauge note - noteOnEmpty: true - xPos: 1 - yPos: 2 - width: 6 - height: 3 - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_writes_total") |> filter(fn: (r) => r._field == "counter") - colors: - - name: laser - type: min - value: 0 - - name: laser - type: threshold - hex: "#8F8AF4" - value: 700 - - name: laser - type: max - hex: "#8F8AF4" - value: 5000 -`, - }, - } - - for _, tt := range tests { - testTemplateErrors(t, KindDashboard, tt) - } - }) - }) - - t.Run("heatmap chart", func(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - testfileRunner(t, "testdata/dashboard_heatmap", func(t *testing.T, template *Template) { - sum := template.Summary() - require.Len(t, sum.Dashboards, 1) - - actual := sum.Dashboards[0] - assert.Equal(t, KindDashboard, actual.Kind) - assert.Equal(t, "dash-0", actual.Name) - assert.Equal(t, "a dashboard w/ heatmap chart", actual.Description) - - require.Len(t, actual.Charts, 1) - actualChart := actual.Charts[0] - assert.Equal(t, 3, actualChart.Height) - assert.Equal(t, 6, actualChart.Width) - assert.Equal(t, 1, actualChart.XPosition) - assert.Equal(t, 2, actualChart.YPosition) - - props, ok := actualChart.Properties.(influxdb.HeatmapViewProperties) - require.True(t, ok) - assert.Equal(t, "heatmap", props.GetType()) - assert.Equal(t, "heatmap note", props.Note) - assert.Equal(t, int32(10), props.BinSize) - assert.Equal(t, []string{"xTotalTicks", "xTickStart", "xTickStep"}, props.GenerateXAxisTicks) - assert.Equal(t, 15, props.XTotalTicks) - assert.Equal(t, 0.0, props.XTickStart) - assert.Equal(t, 1000.0, props.XTickStep) - assert.Equal(t, []string{"yTotalTicks", "yTickStart", "yTickStep"}, props.GenerateYAxisTicks) - assert.Equal(t, 10, props.YTotalTicks) - assert.Equal(t, 0.0, props.YTickStart) - assert.Equal(t, 100.0, props.YTickStep) - assert.Equal(t, true, props.LegendColorizeRows) - assert.Equal(t, false, props.LegendHide) - assert.Equal(t, 1.0, props.LegendOpacity) - assert.Equal(t, 5, props.LegendOrientationThreshold) - assert.True(t, props.ShowNoteWhenEmpty) - - assert.Equal(t, []float64{0, 10}, props.XDomain) - assert.Equal(t, []float64{0, 100}, props.YDomain) - - require.Len(t, props.Queries, 1) - q := props.Queries[0] - queryText := `from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean")` - assert.Equal(t, queryText, q.Text) - assert.Equal(t, "advanced", q.EditMode) - - require.Len(t, props.ViewColors, 12) - c := props.ViewColors[0] - assert.Equal(t, "#000004", c) - }) - }) - - t.Run("handles invalid config", func(t *testing.T) { - tests := []testTemplateResourceError{ - { - name: "a color is missing a hex value", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].colors[2].hex"}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-0 -spec: - charts: - - kind: heatmap - name: heatmap - xPos: 1 - yPos: 2 - width: 6 - height: 3 - binSize: 10 - xCol: _time - yCol: _value - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") - colors: - - hex: "#fbb61a" - - hex: "#f4df53" - - hex: "" - axes: - - name: "x" - label: "x_label" - prefix: "x_prefix" - suffix: "x_suffix" - domain: - - 0 - - 10 - - name: "y" - label: "y_label" - prefix: "y_prefix" - suffix: "y_suffix" - domain: - - 0 - - 100 -`, - }, - { - name: "missing axes", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].axes"}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-0 -spec: - charts: - - kind: heatmap - name: heatmap - xPos: 1 - yPos: 2 - width: 6 - height: 3 - binSize: 10 - xCol: _time - yCol: _value - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") - colors: - - hex: "#000004" -`, - }, - } - - for _, tt := range tests { - testTemplateErrors(t, KindDashboard, tt) - } - }) - }) - - t.Run("histogram chart", func(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - testfileRunner(t, "testdata/dashboard_histogram", func(t *testing.T, template *Template) { - sum := template.Summary() - require.Len(t, sum.Dashboards, 1) - - actual := sum.Dashboards[0] - assert.Equal(t, KindDashboard, actual.Kind) - assert.Equal(t, "dash-0", actual.Name) - assert.Equal(t, "a dashboard w/ single histogram chart", actual.Description) - - require.Len(t, actual.Charts, 1) - actualChart := actual.Charts[0] - assert.Equal(t, 3, actualChart.Height) - assert.Equal(t, 6, actualChart.Width) - - props, ok := actualChart.Properties.(influxdb.HistogramViewProperties) - require.True(t, ok) - assert.Equal(t, "histogram", props.GetType()) - assert.Equal(t, "histogram note", props.Note) - assert.Equal(t, 30, props.BinCount) - assert.Equal(t, true, props.LegendColorizeRows) - assert.Equal(t, false, props.LegendHide) - assert.Equal(t, 1.0, props.LegendOpacity) - assert.Equal(t, 5, props.LegendOrientationThreshold) - assert.True(t, props.ShowNoteWhenEmpty) - assert.Equal(t, []float64{0, 10}, props.XDomain) - assert.Equal(t, []string{"a", "b"}, props.FillColumns) - require.Len(t, props.Queries, 1) - q := props.Queries[0] - queryText := `from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_reads_total") |> filter(fn: (r) => r._field == "counter")` - assert.Equal(t, queryText, q.Text) - assert.Equal(t, "advanced", q.EditMode) - - require.Len(t, props.ViewColors, 3) - assert.Equal(t, "#8F8AF4", props.ViewColors[0].Hex) - assert.Equal(t, "#F4CF31", props.ViewColors[1].Hex) - assert.Equal(t, "#FFFFFF", props.ViewColors[2].Hex) - }) - }) - - t.Run("handles invalid config", func(t *testing.T) { - tests := []testTemplateResourceError{ - { - name: "missing x-axis", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].axes"}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-0 -spec: - description: a dashboard w/ single histogram chart - charts: - - kind: Histogram - name: histogram chart - xCol: _value - width: 6 - height: 3 - binCount: 30 - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_reads_total") |> filter(fn: (r) => r._field == "counter") - colors: - - hex: "#8F8AF4" - type: scale - value: 0 - name: mycolor - axes: -`, - }, - } - - for _, tt := range tests { - testTemplateErrors(t, KindDashboard, tt) - } - }) - }) - - t.Run("markdown chart", func(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - testfileRunner(t, "testdata/dashboard_markdown", func(t *testing.T, template *Template) { - sum := template.Summary() - require.Len(t, sum.Dashboards, 1) - - actual := sum.Dashboards[0] - assert.Equal(t, KindDashboard, actual.Kind) - assert.Equal(t, "dash-0", actual.Name) - assert.Equal(t, "a dashboard w/ single markdown chart", actual.Description) - - require.Len(t, actual.Charts, 1) - actualChart := actual.Charts[0] - - props, ok := actualChart.Properties.(influxdb.MarkdownViewProperties) - require.True(t, ok) - assert.Equal(t, "markdown", props.GetType()) - assert.Equal(t, "## markdown note", props.Note) - }) - }) - }) - - t.Run("mosaic chart", func(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - testfileRunner(t, "testdata/dashboard_mosaic.yml", func(t *testing.T, template *Template) { - sum := template.Summary() - require.Len(t, sum.Dashboards, 1) - - actual := sum.Dashboards[0] - assert.Equal(t, KindDashboard, actual.Kind) - assert.Equal(t, "dash-0", actual.Name) - assert.Equal(t, "a dashboard w/ single mosaic chart", actual.Description) - - require.Len(t, actual.Charts, 1) - actualChart := actual.Charts[0] - assert.Equal(t, 3, actualChart.Height) - assert.Equal(t, 6, actualChart.Width) - assert.Equal(t, 1, actualChart.XPosition) - assert.Equal(t, 2, actualChart.YPosition) - - props, ok := actualChart.Properties.(influxdb.MosaicViewProperties) - require.True(t, ok) - assert.Equal(t, "mosaic note", props.Note) - assert.Equal(t, "y", props.HoverDimension) - assert.True(t, props.ShowNoteWhenEmpty) - - require.Len(t, props.Queries, 1) - q := props.Queries[0] - expectedQuery := `from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean")` - assert.Equal(t, expectedQuery, q.Text) - assert.Equal(t, "advanced", q.EditMode) - - assert.Equal(t, ",", props.YLabelColumnSeparator) - assert.Equal(t, []string{"foo"}, props.YLabelColumns) - assert.Equal(t, []string{"_value", "foo"}, props.YSeriesColumns) - assert.Equal(t, []float64{0, 10}, props.XDomain) - assert.Equal(t, []float64{0, 100}, props.YDomain) - assert.Equal(t, "x_label", props.XAxisLabel) - assert.Equal(t, "y_label", props.YAxisLabel) - assert.Equal(t, "x_prefix", props.XPrefix) - assert.Equal(t, "y_prefix", props.YPrefix) - assert.Equal(t, "x_suffix", props.XSuffix) - assert.Equal(t, "y_suffix", props.YSuffix) - assert.Equal(t, []string{"xTotalTicks", "xTickStart", "xTickStep"}, props.GenerateXAxisTicks) - assert.Equal(t, 15, props.XTotalTicks) - assert.Equal(t, 0.0, props.XTickStart) - assert.Equal(t, 1000.0, props.XTickStep) - assert.Equal(t, true, props.LegendColorizeRows) - assert.Equal(t, false, props.LegendHide) - assert.Equal(t, 1.0, props.LegendOpacity) - assert.Equal(t, 5, props.LegendOrientationThreshold) - }) - }) - }) - - t.Run("band chart", func(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - testfileRunner(t, "testdata/dashboard_band.yml", func(t *testing.T, template *Template) { - sum := template.Summary() - require.Len(t, sum.Dashboards, 1) - - actual := sum.Dashboards[0] - assert.Equal(t, KindDashboard, actual.Kind) - assert.Equal(t, "dash-1", actual.Name) - assert.Equal(t, "a dashboard w/ single band chart", actual.Description) - - require.Len(t, actual.Charts, 1) - actualChart := actual.Charts[0] - assert.Equal(t, 3, actualChart.Height) - assert.Equal(t, 6, actualChart.Width) - assert.Equal(t, 1, actualChart.XPosition) - assert.Equal(t, 2, actualChart.YPosition) - - props, ok := actualChart.Properties.(influxdb.BandViewProperties) - require.True(t, ok) - assert.Equal(t, "band note", props.Note) - assert.True(t, props.ShowNoteWhenEmpty) - assert.Equal(t, "y", props.HoverDimension) - assert.Equal(t, "foo", props.UpperColumn) - assert.Equal(t, "baz", props.MainColumn) - assert.Equal(t, "bar", props.LowerColumn) - assert.Equal(t, []string{"xTotalTicks", "xTickStart", "xTickStep"}, props.GenerateXAxisTicks) - assert.Equal(t, 15, props.XTotalTicks) - assert.Equal(t, 0.0, props.XTickStart) - assert.Equal(t, 1000.0, props.XTickStep) - assert.Equal(t, []string{"yTotalTicks", "yTickStart", "yTickStep"}, props.GenerateYAxisTicks) - assert.Equal(t, 10, props.YTotalTicks) - assert.Equal(t, 0.0, props.YTickStart) - assert.Equal(t, 100.0, props.YTickStep) - assert.Equal(t, true, props.LegendColorizeRows) - assert.Equal(t, false, props.LegendHide) - assert.Equal(t, 1.0, props.LegendOpacity) - assert.Equal(t, 5, props.LegendOrientationThreshold) - assert.Equal(t, true, props.StaticLegend.ColorizeRows) - assert.Equal(t, 0.2, props.StaticLegend.HeightRatio) - assert.Equal(t, true, props.StaticLegend.Show) - assert.Equal(t, 1.0, props.StaticLegend.Opacity) - assert.Equal(t, 5, props.StaticLegend.OrientationThreshold) - assert.Equal(t, "y", props.StaticLegend.ValueAxis) - assert.Equal(t, 1.0, props.StaticLegend.WidthRatio) - - require.Len(t, props.ViewColors, 1) - c := props.ViewColors[0] - assert.Equal(t, "laser", c.Name) - assert.Equal(t, "scale", c.Type) - assert.Equal(t, "#8F8AF4", c.Hex) - assert.Equal(t, 3.0, c.Value) - - require.Len(t, props.Queries, 1) - q := props.Queries[0] - expectedQuery := `from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean")` - assert.Equal(t, expectedQuery, q.Text) - assert.Equal(t, "advanced", q.EditMode) - - for _, key := range []string{"x", "y"} { - xAxis, ok := props.Axes[key] - require.True(t, ok, "key="+key) - assert.Equal(t, key+"_label", xAxis.Label, "key="+key) - assert.Equal(t, key+"_prefix", xAxis.Prefix, "key="+key) - assert.Equal(t, key+"_suffix", xAxis.Suffix, "key="+key) - } - - }) - }) - }) - - t.Run("scatter chart", func(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - testfileRunner(t, "testdata/dashboard_scatter", func(t *testing.T, template *Template) { - sum := template.Summary() - require.Len(t, sum.Dashboards, 1) - - actual := sum.Dashboards[0] - assert.Equal(t, KindDashboard, actual.Kind) - assert.Equal(t, "dash-0", actual.Name) - assert.Equal(t, "a dashboard w/ single scatter chart", actual.Description) - - require.Len(t, actual.Charts, 1) - actualChart := actual.Charts[0] - assert.Equal(t, 3, actualChart.Height) - assert.Equal(t, 6, actualChart.Width) - assert.Equal(t, 1, actualChart.XPosition) - assert.Equal(t, 2, actualChart.YPosition) - - props, ok := actualChart.Properties.(influxdb.ScatterViewProperties) - require.True(t, ok) - assert.Equal(t, "scatter note", props.Note) - assert.True(t, props.ShowNoteWhenEmpty) - - require.Len(t, props.Queries, 1) - q := props.Queries[0] - expectedQuery := `from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean")` - assert.Equal(t, expectedQuery, q.Text) - assert.Equal(t, "advanced", q.EditMode) - - assert.Equal(t, []float64{0, 10}, props.XDomain) - assert.Equal(t, []float64{0, 100}, props.YDomain) - assert.Equal(t, "x_label", props.XAxisLabel) - assert.Equal(t, "y_label", props.YAxisLabel) - assert.Equal(t, "x_prefix", props.XPrefix) - assert.Equal(t, "y_prefix", props.YPrefix) - assert.Equal(t, "x_suffix", props.XSuffix) - assert.Equal(t, "y_suffix", props.YSuffix) - assert.Equal(t, []string{"xTotalTicks", "xTickStart", "xTickStep"}, props.GenerateXAxisTicks) - assert.Equal(t, 15, props.XTotalTicks) - assert.Equal(t, 0.0, props.XTickStart) - assert.Equal(t, 1000.0, props.XTickStep) - assert.Equal(t, []string{"yTotalTicks", "yTickStart", "yTickStep"}, props.GenerateYAxisTicks) - assert.Equal(t, 10, props.YTotalTicks) - assert.Equal(t, 0.0, props.YTickStart) - assert.Equal(t, 100.0, props.YTickStep) - assert.Equal(t, true, props.LegendColorizeRows) - assert.Equal(t, false, props.LegendHide) - assert.Equal(t, 1.0, props.LegendOpacity) - assert.Equal(t, 5, props.LegendOrientationThreshold) - }) - }) - - t.Run("handles invalid config", func(t *testing.T) { - tests := []testTemplateResourceError{ - { - name: "missing axes", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].axes"}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-0 -spec: - description: a dashboard w/ single scatter chart - charts: - - kind: Scatter - name: scatter chart - xPos: 1 - yPos: 2 - xCol: _time - yCol: _value - width: 6 - height: 3 - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") - colors: - - hex: "#8F8AF4" - - hex: "#F4CF31" -`, - }, - { - name: "no width provided", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].width"}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-0 -spec: - description: a dashboard w/ single scatter chart - charts: - - kind: Scatter - name: scatter chart - xPos: 1 - yPos: 2 - xCol: _time - yCol: _value - height: 3 - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") - colors: - - hex: "#8F8AF4" - - hex: "#F4CF31" - - hex: "#FFFFFF" - axes: - - name : "x" - label: x_label - prefix: x_prefix - suffix: x_suffix - domain: - - 0 - - 10 - - name: "y" - label: y_label - prefix: y_prefix - suffix: y_suffix - domain: - - 0 - - 100 -`, - }, - { - name: "no height provided", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].height"}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-0 -spec: - description: a dashboard w/ single scatter chart - charts: - - kind: Scatter - name: scatter chart - xPos: 1 - yPos: 2 - xCol: _time - yCol: _value - width: 6 - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") - colors: - - hex: "#8F8AF4" - - hex: "#F4CF31" - - hex: "#FFFFFF" - axes: - - name : "x" - label: x_label - prefix: x_prefix - suffix: x_suffix - domain: - - 0 - - 10 - - name: "y" - label: y_label - prefix: y_prefix - suffix: y_suffix - domain: - - 0 - - 100 -`, - }, - { - name: "missing hex color", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].colors[0].hex"}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-0 -spec: - description: a dashboard w/ single scatter chart - charts: - - kind: Scatter - name: scatter chart - note: scatter note - noteOnEmpty: true - prefix: sumtin - suffix: days - xPos: 1 - yPos: 2 - xCol: _time - yCol: _value - width: 6 - height: 3 - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") - colors: - - hex: "" - axes: - - name : "x" - label: x_label - prefix: x_prefix - suffix: x_suffix - domain: - - 0 - - 10 - - name: "y" - label: y_label - prefix: y_prefix - suffix: y_suffix - domain: - - 0 - - 100 -`, - }, - { - name: "missing x axis", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].axes"}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-0 -spec: - description: a dashboard w/ single scatter chart - charts: - - kind: Scatter - name: scatter chart - xPos: 1 - yPos: 2 - xCol: _time - yCol: _value - width: 6 - height: 3 - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") - colors: - - hex: "#8F8AF4" - - hex: "#F4CF31" - - hex: "#FFFFFF" - axes: - - name: "y" - label: y_label - prefix: y_prefix - suffix: y_suffix - domain: - - 0 - - 100 -`, - }, - { - name: "missing y axis", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].axes"}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-0 -spec: - description: a dashboard w/ single scatter chart - charts: - - kind: Scatter - name: scatter chart - xPos: 1 - yPos: 2 - xCol: _time - yCol: _value - width: 6 - height: 3 - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") - colors: - - hex: "#8F8AF4" - - hex: "#F4CF31" - - hex: "#FFFFFF" - axes: - - name : "x" - label: x_label - prefix: x_prefix - suffix: x_suffix - domain: - - 0 - - 10 -`, - }, - } - - for _, tt := range tests { - testTemplateErrors(t, KindDashboard, tt) - } - }) - }) - - t.Run("single stat chart", func(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - testfileRunner(t, "testdata/dashboard", func(t *testing.T, template *Template) { - sum := template.Summary() - require.Len(t, sum.Dashboards, 2) - - actual := sum.Dashboards[0] - assert.Equal(t, KindDashboard, actual.Kind) - assert.Equal(t, "dash-1", actual.MetaName) - assert.Equal(t, "display name", actual.Name) - assert.Equal(t, "desc1", actual.Description) - - require.Len(t, actual.Charts, 1) - actualChart := actual.Charts[0] - assert.Equal(t, 3, actualChart.Height) - assert.Equal(t, 6, actualChart.Width) - assert.Equal(t, 1, actualChart.XPosition) - assert.Equal(t, 2, actualChart.YPosition) - - props, ok := actualChart.Properties.(influxdb.SingleStatViewProperties) - require.True(t, ok) - assert.Equal(t, "single-stat", props.GetType()) - assert.Equal(t, "single stat note", props.Note) - assert.True(t, props.ShowNoteWhenEmpty) - assert.True(t, props.DecimalPlaces.IsEnforced) - assert.Equal(t, int32(1), props.DecimalPlaces.Digits) - assert.Equal(t, "days", props.Suffix) - assert.Equal(t, "true", props.TickSuffix) - assert.Equal(t, "sumtin", props.Prefix) - assert.Equal(t, "true", props.TickPrefix) - - require.Len(t, props.Queries, 1) - q := props.Queries[0] - queryText := `from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "processes") |> filter(fn: (r) => r._field == "running" or r._field == "blocked") |> aggregateWindow(every: v.windowPeriod, fn: max) |> yield(name: "max")` - assert.Equal(t, queryText, q.Text) - assert.Equal(t, "advanced", q.EditMode) - - require.Len(t, props.ViewColors, 1) - c := props.ViewColors[0] - assert.Equal(t, "laser", c.Name) - assert.Equal(t, "text", c.Type) - assert.Equal(t, "#8F8AF4", c.Hex) - assert.Equal(t, 3.0, c.Value) - - actual2 := sum.Dashboards[1] - assert.Equal(t, "dash-2", actual2.MetaName) - assert.Equal(t, "dash-2", actual2.Name) - assert.Equal(t, "desc", actual2.Description) - }) - }) - - t.Run("handles invalid config", func(t *testing.T) { - tests := []testTemplateResourceError{ - { - name: "color missing hex value", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].colors[0].hex"}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: desc1 - charts: - - kind: Single_Stat - name: single stat - xPos: 1 - yPos: 2 - width: 6 - height: 3 - decimalPlaces: 1 - shade: true - hoverDimension: y - queries: - - query: "from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == \"processes\") |> filter(fn: (r) => r._field == \"running\" or r._field == \"blocked\") |> aggregateWindow(every: v.windowPeriod, fn: max) |> yield(name: \"max\")" - colors: - - name: laser - type: text - value: 3 -`, - }, - { - name: "no width provided", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].width"}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: desc1 - charts: - - kind: Single_Stat - name: single stat - xPos: 1 - yPos: 2 - height: 3 - queries: - - query: "from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == \"processes\") |> filter(fn: (r) => r._field == \"running\" or r._field == \"blocked\") |> aggregateWindow(every: v.windowPeriod, fn: max) |> yield(name: \"max\")" - colors: - - name: laser - type: text - hex: "#8F8AF4" -`, - }, - { - name: "no height provided", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].height"}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: desc1 - charts: - - kind: Single_Stat - name: single stat - xPos: 1 - yPos: 2 - width: 3 - queries: - - query: "from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == \"processes\") |> filter(fn: (r) => r._field == \"running\" or r._field == \"blocked\") |> aggregateWindow(every: v.windowPeriod, fn: max) |> yield(name: \"max\")" - colors: - - name: laser - type: text - hex: "#8F8AF4" -`, - }, - { - name: "duplicate metadata names", - validationErrs: 1, - valFields: []string{fieldMetadata, fieldName}, - templateStr: ` -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: ---- -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: -`, - }, - { - name: "spec name too short", - validationErrs: 1, - valFields: []string{fieldSpec, fieldName}, - templateStr: ` -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - name: d -`, - }, - } - - for _, tt := range tests { - testTemplateErrors(t, KindDashboard, tt) - } - }) - }) - - t.Run("single stat plus line chart", func(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - testfileRunner(t, "testdata/dashboard_single_stat_plus_line", func(t *testing.T, template *Template) { - sum := template.Summary() - require.Len(t, sum.Dashboards, 1) - - actual := sum.Dashboards[0] - assert.Equal(t, KindDashboard, actual.Kind) - assert.Equal(t, "dash-1", actual.Name) - assert.Equal(t, "desc1", actual.Description) - - require.Len(t, actual.Charts, 1) - actualChart := actual.Charts[0] - assert.Equal(t, 3, actualChart.Height) - assert.Equal(t, 6, actualChart.Width) - assert.Equal(t, 1, actualChart.XPosition) - assert.Equal(t, 2, actualChart.YPosition) - - props, ok := actualChart.Properties.(influxdb.LinePlusSingleStatProperties) - require.True(t, ok) - assert.Equal(t, "single stat plus line note", props.Note) - assert.True(t, props.ShowNoteWhenEmpty) - assert.True(t, props.DecimalPlaces.IsEnforced) - assert.Equal(t, int32(1), props.DecimalPlaces.Digits) - assert.Equal(t, "days", props.Suffix) - assert.Equal(t, "sumtin", props.Prefix) - assert.Equal(t, "overlaid", props.Position) - assert.Equal(t, []string{"xTotalTicks", "xTickStart", "xTickStep"}, props.GenerateXAxisTicks) - assert.Equal(t, 15, props.XTotalTicks) - assert.Equal(t, 0.0, props.XTickStart) - assert.Equal(t, 1000.0, props.XTickStep) - assert.Equal(t, []string{"yTotalTicks", "yTickStart", "yTickStep"}, props.GenerateYAxisTicks) - assert.Equal(t, 10, props.YTotalTicks) - assert.Equal(t, 0.0, props.YTickStart) - assert.Equal(t, 100.0, props.YTickStep) - assert.Equal(t, true, props.LegendColorizeRows) - assert.Equal(t, false, props.LegendHide) - assert.Equal(t, 1.0, props.LegendOpacity) - assert.Equal(t, 5, props.LegendOrientationThreshold) - assert.Equal(t, true, props.StaticLegend.ColorizeRows) - assert.Equal(t, 0.2, props.StaticLegend.HeightRatio) - assert.Equal(t, true, props.StaticLegend.Show) - assert.Equal(t, 1.0, props.StaticLegend.Opacity) - assert.Equal(t, 5, props.StaticLegend.OrientationThreshold) - assert.Equal(t, "y", props.StaticLegend.ValueAxis) - assert.Equal(t, 1.0, props.StaticLegend.WidthRatio) - - require.Len(t, props.Queries, 1) - q := props.Queries[0] - expectedQuery := `from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean")` - assert.Equal(t, expectedQuery, q.Text) - assert.Equal(t, "advanced", q.EditMode) - - for _, key := range []string{"x", "y"} { - xAxis, ok := props.Axes[key] - require.True(t, ok, "key="+key) - assert.Equal(t, "10", xAxis.Base, "key="+key) - assert.Equal(t, key+"_label", xAxis.Label, "key="+key) - assert.Equal(t, key+"_prefix", xAxis.Prefix, "key="+key) - assert.Equal(t, "linear", xAxis.Scale, "key="+key) - assert.Equal(t, key+"_suffix", xAxis.Suffix, "key="+key) - } - - require.Len(t, props.ViewColors, 2) - c := props.ViewColors[0] - assert.Equal(t, "base", c.ID) - assert.Equal(t, "laser", c.Name) - assert.Equal(t, "text", c.Type) - assert.Equal(t, "#8F8AF4", c.Hex) - assert.Equal(t, 3.0, c.Value) - - c = props.ViewColors[1] - assert.Equal(t, "base", c.ID) - assert.Equal(t, "android", c.Name) - assert.Equal(t, "scale", c.Type) - assert.Equal(t, "#F4CF31", c.Hex) - assert.Equal(t, 1.0, c.Value) - }) - }) - - t.Run("handles invalid config", func(t *testing.T) { - tests := []testTemplateResourceError{ - { - name: "color missing hex value", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].colors[0].hex"}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: desc1 - charts: - - kind: Single_Stat_Plus_Line - name: single stat plus line - xPos: 1 - yPos: 2 - width: 6 - height: 3 - position: overlaid - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") - colors: - - name: laser - type: text - value: 3 - axes: - - name : "x" - label: x_label - prefix: x_prefix - suffix: x_suffix - base: 10 - scale: linear - - name: "y" - label: y_label - prefix: y_prefix - suffix: y_suffix - base: 10 - scale: linear -`, - }, - { - name: "no width provided", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].width"}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: desc1 - charts: - - kind: Single_Stat_Plus_Line - name: single stat plus line - xPos: 1 - yPos: 2 - height: 3 - shade: true - hoverDimension: "y" - position: overlaid - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") - colors: - - name: laser - type: text - hex: "#8F8AF4" - value: 3 - - name: android - type: scale - hex: "#F4CF31" - value: 1 - axes: - - name : "x" - label: x_label - prefix: x_prefix - suffix: x_suffix - base: 10 - scale: linear - - name: "y" - label: y_label - prefix: y_prefix - suffix: y_suffix - base: 10 - scale: linear -`, - }, - { - name: "no height provided", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].height"}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: desc1 - charts: - - kind: Single_Stat_Plus_Line - name: single stat plus line - xPos: 1 - yPos: 2 - width: 6 - position: overlaid - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") - colors: - - name: laser - type: text - hex: "#8F8AF4" - value: 3 - - name: android - type: scale - hex: "#F4CF31" - value: 1 - axes: - - name : "x" - label: x_label - prefix: x_prefix - suffix: x_suffix - base: 10 - scale: linear - - name: "y" - label: y_label - prefix: y_prefix - suffix: y_suffix - base: 10 - scale: linear -`, - }, - { - name: "missing x axis", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].axes"}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: desc1 - charts: - - kind: Single_Stat_Plus_Line - name: single stat plus line - xPos: 1 - yPos: 2 - width: 6 - height: 3 - position: overlaid - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") - colors: - - name: laser - type: text - hex: "#8F8AF4" - value: 3 - - name: android - type: scale - hex: "#F4CF31" - value: 1 - axes: - - name: "y" - label: y_label - prefix: y_prefix - suffix: y_suffix - base: 10 - scale: linear -`, - }, - { - name: "missing y axis", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].axes"}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: desc1 - charts: - - kind: Single_Stat_Plus_Line - name: single stat plus line - xPos: 1 - yPos: 2 - width: 6 - height: 3 - position: overlaid - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") - colors: - - name: laser - type: text - hex: "#8F8AF4" - value: 3 - - name: android - type: scale - hex: "#F4CF31" - value: 1 - axes: - - name : "x" - label: x_label - prefix: x_prefix - suffix: x_suffix - base: 10 - scale: linear -`, - }, - } - - for _, tt := range tests { - testTemplateErrors(t, KindDashboard, tt) - } - }) - }) - - t.Run("table chart", func(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - testfileRunner(t, "testdata/dashboard_table", func(t *testing.T, template *Template) { - sum := template.Summary() - require.Len(t, sum.Dashboards, 1) - - actual := sum.Dashboards[0] - assert.Equal(t, KindDashboard, actual.Kind) - assert.Equal(t, "dash-1", actual.Name) - assert.Equal(t, "desc1", actual.Description) - - require.Len(t, actual.Charts, 1) - actualChart := actual.Charts[0] - assert.Equal(t, 3, actualChart.Height) - assert.Equal(t, 6, actualChart.Width) - assert.Equal(t, 1, actualChart.XPosition) - assert.Equal(t, 2, actualChart.YPosition) - - props, ok := actualChart.Properties.(influxdb.TableViewProperties) - require.True(t, ok) - assert.Equal(t, "table note", props.Note) - assert.True(t, props.ShowNoteWhenEmpty) - assert.True(t, props.DecimalPlaces.IsEnforced) - assert.Equal(t, int32(1), props.DecimalPlaces.Digits) - assert.Equal(t, "YYYY:MMMM:DD", props.TimeFormat) - - require.Len(t, props.Queries, 1) - q := props.Queries[0] - expectedQuery := `from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_writes_total") |> filter(fn: (r) => r._field == "counter")` - assert.Equal(t, expectedQuery, q.Text) - assert.Equal(t, "advanced", q.EditMode) - - require.Len(t, props.ViewColors, 1) - c := props.ViewColors[0] - assert.Equal(t, "laser", c.Name) - assert.Equal(t, "min", c.Type) - assert.Equal(t, "#8F8AF4", c.Hex) - assert.Equal(t, 3.0, c.Value) - - tableOpts := props.TableOptions - assert.True(t, tableOpts.VerticalTimeAxis) - assert.Equal(t, "_time", tableOpts.SortBy.InternalName) - assert.Equal(t, "truncate", tableOpts.Wrapping) - assert.True(t, tableOpts.FixFirstColumn) - - assert.Contains(t, props.FieldOptions, influxdb.RenamableField{ - InternalName: "_value", - DisplayName: "MB", - Visible: true, - }) - assert.Contains(t, props.FieldOptions, influxdb.RenamableField{ - InternalName: "_time", - DisplayName: "time (ms)", - Visible: true, - }) - }) - }) - - t.Run("handles invalid config", func(t *testing.T) { - tests := []testTemplateResourceError{ - { - name: "color missing hex value", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].colors[0].hex"}, - templateStr: ` -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: desc1 - charts: - - kind: Table - name: table - xPos: 1 - yPos: 2 - width: 6 - height: 3 - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_writes_total") |> filter(fn: (r) => r._field == "counter") - colors: - - name: laser - type: min - hex: - value: 3.0`, - }, - { - name: "no width provided", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].width"}, - templateStr: ` -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: desc1 - charts: - - kind: Table - name: table - xPos: 1 - yPos: 2 - height: 3 - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_writes_total") |> filter(fn: (r) => r._field == "counter") - colors: - - name: laser - type: min - hex: peru - value: 3.0`, - }, - { - name: "no height provided", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].height"}, - templateStr: ` -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: desc1 - charts: - - kind: Table - name: table - xPos: 1 - yPos: 2 - width: 6 - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_writes_total") |> filter(fn: (r) => r._field == "counter") - colors: - - name: laser - type: min - hex: peru - value: 3.0`, - }, - { - name: "invalid wrapping table option", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].tableOptions.wrapping"}, - templateStr: ` -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: desc1 - charts: - - kind: Table - name: table - xPos: 1 - yPos: 2 - width: 6 - height: 3 - tableOptions: - sortBy: _time - wrapping: WRONGO wrapping - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_writes_total") |> filter(fn: (r) => r._field == "counter") - colors: - - name: laser - type: min - hex: "#8F8AF4" - value: 3.0 -`, - }, - } - - for _, tt := range tests { - testTemplateErrors(t, KindDashboard, tt) - } - }) - }) - - t.Run("xy chart", func(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - testfileRunner(t, "testdata/dashboard_xy", func(t *testing.T, template *Template) { - sum := template.Summary() - require.Len(t, sum.Dashboards, 1) - - actual := sum.Dashboards[0] - assert.Equal(t, KindDashboard, actual.Kind) - assert.Equal(t, "dash-1", actual.Name) - assert.Equal(t, "desc1", actual.Description) - - require.Len(t, actual.Charts, 1) - actualChart := actual.Charts[0] - assert.Equal(t, 3, actualChart.Height) - assert.Equal(t, 6, actualChart.Width) - assert.Equal(t, 1, actualChart.XPosition) - assert.Equal(t, 2, actualChart.YPosition) - - props, ok := actualChart.Properties.(influxdb.XYViewProperties) - require.True(t, ok) - assert.Equal(t, "xy", props.GetType()) - assert.Equal(t, true, props.ShadeBelow) - assert.Equal(t, "y", props.HoverDimension) - assert.Equal(t, "xy chart note", props.Note) - assert.True(t, props.ShowNoteWhenEmpty) - assert.Equal(t, "stacked", props.Position) - assert.Equal(t, []string{"xTotalTicks", "xTickStart", "xTickStep"}, props.GenerateXAxisTicks) - assert.Equal(t, 15, props.XTotalTicks) - assert.Equal(t, 0.0, props.XTickStart) - assert.Equal(t, 1000.0, props.XTickStep) - assert.Equal(t, []string{"yTotalTicks", "yTickStart", "yTickStep"}, props.GenerateYAxisTicks) - assert.Equal(t, 10, props.YTotalTicks) - assert.Equal(t, 0.0, props.YTickStart) - assert.Equal(t, 100.0, props.YTickStep) - assert.Equal(t, true, props.LegendColorizeRows) - assert.Equal(t, false, props.LegendHide) - assert.Equal(t, 1.0, props.LegendOpacity) - assert.Equal(t, 5, props.LegendOrientationThreshold) - assert.Equal(t, true, props.StaticLegend.ColorizeRows) - assert.Equal(t, 0.2, props.StaticLegend.HeightRatio) - assert.Equal(t, true, props.StaticLegend.Show) - assert.Equal(t, 1.0, props.StaticLegend.Opacity) - assert.Equal(t, 5, props.StaticLegend.OrientationThreshold) - assert.Equal(t, "y", props.StaticLegend.ValueAxis) - assert.Equal(t, 1.0, props.StaticLegend.WidthRatio) - - require.Len(t, props.Queries, 1) - q := props.Queries[0] - queryText := `from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_writes_total") |> filter(fn: (r) => r._field == "counter")` - assert.Equal(t, queryText, q.Text) - assert.Equal(t, "advanced", q.EditMode) - - require.Len(t, props.ViewColors, 1) - c := props.ViewColors[0] - assert.Equal(t, "laser", c.Name) - assert.Equal(t, "scale", c.Type) - assert.Equal(t, "#8F8AF4", c.Hex) - assert.Equal(t, 3.0, c.Value) - }) - }) - - t.Run("handles invalid config", func(t *testing.T) { - tests := []testTemplateResourceError{ - { - name: "color missing hex value", - validationErrs: 1, - valFields: []string{fieldSpec, "charts[0].colors[0].hex"}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: desc1 - charts: - - kind: XY - name: xy chart - xPos: 1 - yPos: 2 - width: 6 - height: 3 - geom: line - position: stacked - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_writes_total") |> filter(fn: (r) => r._field == "counter") - colors: - - name: laser - type: scale - value: 3 - axes: - - name : "x" - label: x_label - prefix: x_prefix - suffix: x_suffix - base: 10 - scale: linear - - name: "y" - label: y_label - prefix: y_prefix - suffix: y_suffix - base: 10 - scale: linear -`, - }, - { - name: "invalid geom flag", - validationErrs: 1, - valFields: []string{fieldSpec, fieldDashCharts, fieldChartGeom}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: desc1 - charts: - - kind: XY - name: xy chart - xPos: 1 - yPos: 2 - width: 6 - height: 3 - position: stacked - staticLegend: - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_writes_total") |> filter(fn: (r) => r._field == "counter") - colors: - - name: laser - type: scale - hex: "#8F8AF4" - value: 3 - axes: - - name : "x" - label: x_label - prefix: x_prefix - suffix: x_suffix - base: 10 - scale: linear - - name: "y" - label: y_label - prefix: y_prefix - suffix: y_suffix - base: 10 - scale: linear -`, - }, - } - - for _, tt := range tests { - testTemplateErrors(t, KindDashboard, tt) - } - }) - }) - }) - - t.Run("with params option should be parameterizable", func(t *testing.T) { - testfileRunner(t, "testdata/dashboard_params.yml", func(t *testing.T, template *Template) { - sum := template.Summary() - require.Len(t, sum.Dashboards, 1) - - actual := sum.Dashboards[0] - assert.Equal(t, KindDashboard, actual.Kind) - assert.Equal(t, "dash-1", actual.MetaName) - - require.Len(t, actual.Charts, 1) - actualChart := actual.Charts[0] - assert.Equal(t, 3, actualChart.Height) - assert.Equal(t, 6, actualChart.Width) - assert.Equal(t, 1, actualChart.XPosition) - assert.Equal(t, 2, actualChart.YPosition) - - props, ok := actualChart.Properties.(influxdb.SingleStatViewProperties) - require.True(t, ok) - assert.Equal(t, "single-stat", props.GetType()) - - require.Len(t, props.Queries, 1) - - // parmas - queryText := `option params = { - bucket: "bar", - start: -24h0m0s, - stop: now(), - name: "max", - floatVal: 37.2, - minVal: 10, -} - -from(bucket: params.bucket) - |> range(start: params.start, stop: params.stop) - |> filter(fn: (r) => r._measurement == "processes") - |> filter(fn: (r) => r.floater == params.floatVal) - |> filter(fn: (r) => r._value > params.minVal) - |> aggregateWindow(every: v.windowPeriod, fn: max) - |> yield(name: params.name) -` - - q := props.Queries[0] - assert.Equal(t, queryText, q.Text) - assert.Equal(t, "advanced", q.EditMode) - - expectedRefs := []SummaryReference{ - { - Field: "spec.charts[0].queries[0].params.bucket", - EnvRefKey: `dashboards[dash-1].spec.charts[0].queries[0].params.bucket`, - ValType: "string", - DefaultValue: "bar", - }, - { - Field: "spec.charts[0].queries[0].params.floatVal", - EnvRefKey: `dashboards[dash-1].spec.charts[0].queries[0].params.floatVal`, - ValType: "float", - DefaultValue: 37.2, - }, - { - Field: "spec.charts[0].queries[0].params.minVal", - EnvRefKey: `dashboards[dash-1].spec.charts[0].queries[0].params.minVal`, - ValType: "integer", - DefaultValue: int64(10), - }, - { - Field: "spec.charts[0].queries[0].params.name", - EnvRefKey: `dashboards[dash-1].spec.charts[0].queries[0].params.name`, - ValType: "string", - DefaultValue: "max", - }, - { - Field: "spec.charts[0].queries[0].params.start", - EnvRefKey: `dashboards[dash-1].spec.charts[0].queries[0].params.start`, - ValType: "duration", - DefaultValue: "-24h0m0s", - }, - { - Field: "spec.charts[0].queries[0].params.stop", - EnvRefKey: `dashboards[dash-1].spec.charts[0].queries[0].params.stop`, - ValType: "time", - DefaultValue: "now()", - }, - } - assert.Equal(t, expectedRefs, actual.EnvReferences) - }) - }) - - t.Run("with env refs should be valid", func(t *testing.T) { - testfileRunner(t, "testdata/dashboard_ref.yml", func(t *testing.T, template *Template) { - actual := template.Summary().Dashboards - require.Len(t, actual, 1) - - expected := []SummaryReference{ - { - Field: "spec.associations[0].name", - EnvRefKey: "label-meta-name", - }, - { - Field: "metadata.name", - EnvRefKey: "meta-name", - DefaultValue: "meta", - }, - { - Field: "spec.name", - EnvRefKey: "spec-name", - DefaultValue: "spectacles", - }, - } - assert.Equal(t, expected, actual[0].EnvReferences) - }) - }) - - t.Run("and labels associated should be successful", func(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - testfileRunner(t, "testdata/dashboard_associates_label", func(t *testing.T, template *Template) { - sum := template.Summary() - require.Len(t, sum.Dashboards, 1) - - actual := sum.Dashboards[0] - assert.Equal(t, "dash-1", actual.Name) - - require.Len(t, actual.LabelAssociations, 2) - assert.Equal(t, "label-1", actual.LabelAssociations[0].Name) - assert.Equal(t, "label-2", actual.LabelAssociations[1].Name) - - expectedMappings := []SummaryLabelMapping{ - { - Status: StateStatusNew, - ResourceType: influxdb.DashboardsResourceType, - ResourceMetaName: "dash-1", - ResourceName: "dash-1", - LabelMetaName: "label-1", - LabelName: "label-1", - }, - { - Status: StateStatusNew, - ResourceType: influxdb.DashboardsResourceType, - ResourceMetaName: "dash-1", - ResourceName: "dash-1", - LabelMetaName: "label-2", - LabelName: "label-2", - }, - } - - for _, expectedMapping := range expectedMappings { - assert.Contains(t, sum.LabelMappings, expectedMapping) - } - }) - }) - - t.Run("association doesn't exist then provides an error", func(t *testing.T) { - tests := []testTemplateResourceError{ - { - name: "no labels provided", - assErrs: 1, - assIdxs: []int{0}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - associations: - - kind: Label - name: label-1 -`, - }, - { - name: "mixed found and not found", - assErrs: 1, - assIdxs: []int{1}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - associations: - - kind: Label - name: label-1 - - kind: Label - name: unfound label -`, - }, - { - name: "multiple not found", - assErrs: 1, - assIdxs: []int{0, 1}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - associations: - - kind: Label - name: not found 1 - - kind: Label - name: unfound label -`, - }, - { - name: "duplicate valid nested labels", - assErrs: 1, - assIdxs: []int{1}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - associations: - - kind: Label - name: label-1 - - kind: Label - name: label-1 -`, - }, - } - - for _, tt := range tests { - testTemplateErrors(t, KindDashboard, tt) - } - }) - }) - }) - - t.Run("template with notification endpoints", func(t *testing.T) { - t.Run("and labels associated should be successful", func(t *testing.T) { - testfileRunner(t, "testdata/notification_endpoint", func(t *testing.T, template *Template) { - expectedEndpoints := []SummaryNotificationEndpoint{ - { - SummaryIdentifier: SummaryIdentifier{ - Kind: KindNotificationEndpointHTTP, - MetaName: "http-basic-auth-notification-endpoint", - }, - NotificationEndpoint: &endpoint.HTTP{ - Base: endpoint.Base{ - Name: "basic endpoint name", - Description: "http basic auth desc", - Status: taskmodel.TaskStatusInactive, - }, - URL: "https://www.example.com/endpoint/basicauth", - AuthMethod: "basic", - Method: "POST", - Username: influxdb.SecretField{Value: strPtr("secret username")}, - Password: influxdb.SecretField{Value: strPtr("secret password")}, - }, - }, - { - SummaryIdentifier: SummaryIdentifier{ - Kind: KindNotificationEndpointHTTP, - MetaName: "http-bearer-auth-notification-endpoint", - }, - NotificationEndpoint: &endpoint.HTTP{ - Base: endpoint.Base{ - Name: "http-bearer-auth-notification-endpoint", - Description: "http bearer auth desc", - Status: taskmodel.TaskStatusActive, - }, - URL: "https://www.example.com/endpoint/bearerauth", - AuthMethod: "bearer", - Method: "PUT", - Token: influxdb.SecretField{Value: strPtr("secret token")}, - }, - }, - { - SummaryIdentifier: SummaryIdentifier{ - Kind: KindNotificationEndpointHTTP, - MetaName: "http-none-auth-notification-endpoint", - }, - NotificationEndpoint: &endpoint.HTTP{ - Base: endpoint.Base{ - Name: "http-none-auth-notification-endpoint", - Description: "http none auth desc", - Status: taskmodel.TaskStatusActive, - }, - URL: "https://www.example.com/endpoint/noneauth", - AuthMethod: "none", - Method: "GET", - }, - }, - { - SummaryIdentifier: SummaryIdentifier{ - Kind: KindNotificationEndpointPagerDuty, - MetaName: "pager-duty-notification-endpoint", - }, - NotificationEndpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - Name: "pager duty name", - Description: "pager duty desc", - Status: taskmodel.TaskStatusActive, - }, - ClientURL: "http://localhost:8080/orgs/7167eb6719fa34e5/alert-history", - RoutingKey: influxdb.SecretField{Value: strPtr("secret routing-key")}, - }, - }, - { - SummaryIdentifier: SummaryIdentifier{ - Kind: KindNotificationEndpointHTTP, - MetaName: "slack-notification-endpoint", - }, - NotificationEndpoint: &endpoint.Slack{ - Base: endpoint.Base{ - Name: "slack name", - Description: "slack desc", - Status: taskmodel.TaskStatusActive, - }, - URL: "https://hooks.slack.com/services/bip/piddy/boppidy", - Token: influxdb.SecretField{Value: strPtr("tokenval")}, - }, - }, - } - - sum := template.Summary() - endpoints := sum.NotificationEndpoints - require.Len(t, endpoints, len(expectedEndpoints)) - require.Len(t, sum.LabelMappings, len(expectedEndpoints)) - - for i := range expectedEndpoints { - expected, actual := expectedEndpoints[i], endpoints[i] - assert.Equalf(t, expected.NotificationEndpoint, actual.NotificationEndpoint, "index=%d", i) - require.Len(t, actual.LabelAssociations, 1) - assert.Equal(t, "label-1", actual.LabelAssociations[0].Name) - - assert.Contains(t, sum.LabelMappings, SummaryLabelMapping{ - Status: StateStatusNew, - ResourceType: influxdb.NotificationEndpointResourceType, - ResourceMetaName: expected.MetaName, - ResourceName: expected.NotificationEndpoint.GetName(), - LabelMetaName: "label-1", - LabelName: "label-1", - }) - } - }) - }) - - t.Run("with env refs should be valid", func(t *testing.T) { - testfileRunner(t, "testdata/notification_endpoint_ref.yml", func(t *testing.T, template *Template) { - actual := template.Summary().NotificationEndpoints - require.Len(t, actual, 1) - - expectedEnvRefs := []SummaryReference{ - { - Field: "metadata.name", - EnvRefKey: "meta-name", - DefaultValue: "meta", - }, - { - Field: "spec.name", - EnvRefKey: "spec-name", - DefaultValue: "spectacles", - }, - { - Field: "spec.associations[0].name", - EnvRefKey: "label-meta-name", - }, - } - assert.Equal(t, expectedEnvRefs, actual[0].EnvReferences) - }) - }) - - t.Run("handles bad config", func(t *testing.T) { - tests := []struct { - kind Kind - resErr testTemplateResourceError - }{ - { - kind: KindNotificationEndpointSlack, - resErr: testTemplateResourceError{ - name: "missing slack url", - validationErrs: 1, - valFields: []string{fieldSpec, fieldNotificationEndpointURL}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointSlack -metadata: - name: slack-notification-endpoint -spec: -`, - }, - }, - { - kind: KindNotificationEndpointPagerDuty, - resErr: testTemplateResourceError{ - name: "missing pager duty url", - validationErrs: 1, - valFields: []string{fieldSpec, fieldNotificationEndpointURL}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointPagerDuty -metadata: - name: pager-duty-notification-endpoint -spec: -`, - }, - }, - { - kind: KindNotificationEndpointHTTP, - resErr: testTemplateResourceError{ - name: "missing http url", - validationErrs: 1, - valFields: []string{fieldSpec, fieldNotificationEndpointURL}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointHTTP -metadata: - name: http-none-auth-notification-endpoint -spec: - type: none - method: get -`, - }, - }, - { - kind: KindNotificationEndpointHTTP, - resErr: testTemplateResourceError{ - name: "bad url", - validationErrs: 1, - valFields: []string{fieldSpec, fieldNotificationEndpointURL}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointHTTP -metadata: - name: http-none-auth-notification-endpoint -spec: - type: none - method: get - url: d_____-_8**(*https://www.examples.coms -`, - }, - }, - { - kind: KindNotificationEndpointHTTP, - resErr: testTemplateResourceError{ - name: "missing http method", - validationErrs: 1, - valFields: []string{fieldSpec, fieldNotificationEndpointHTTPMethod}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointHTTP -metadata: - name: http-none-auth-notification-endpoint -spec: - type: none - url: https://www.example.com/endpoint/noneauth -`, - }, - }, - { - kind: KindNotificationEndpointHTTP, - resErr: testTemplateResourceError{ - name: "invalid http method", - validationErrs: 1, - valFields: []string{fieldSpec, fieldNotificationEndpointHTTPMethod}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointHTTP -metadata: - name: http-basic-auth-notification-endpoint -spec: - type: none - description: http none auth desc - method: GHOST - url: https://www.example.com/endpoint/noneauth -`, - }, - }, - { - kind: KindNotificationEndpointHTTP, - resErr: testTemplateResourceError{ - name: "missing basic username", - validationErrs: 1, - valFields: []string{fieldSpec, fieldNotificationEndpointUsername}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointHTTP -metadata: - name: http-basic-auth-notification-endpoint -spec: - type: basic - method: POST - url: https://www.example.com/endpoint/basicauth - password: "secret password" -`, - }, - }, - { - kind: KindNotificationEndpointHTTP, - resErr: testTemplateResourceError{ - name: "missing basic password", - validationErrs: 1, - valFields: []string{fieldSpec, fieldNotificationEndpointPassword}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointHTTP -metadata: - name: http-basic-auth-notification-endpoint -spec: - type: basic - method: POST - url: https://www.example.com/endpoint/basicauth - username: username -`, - }, - }, - { - kind: KindNotificationEndpointHTTP, - resErr: testTemplateResourceError{ - name: "missing basic password and username", - validationErrs: 1, - valFields: []string{fieldSpec, fieldNotificationEndpointPassword, fieldNotificationEndpointUsername}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointHTTP -metadata: - name: http-basic-auth-notification-endpoint -spec: - description: http basic auth desc - type: basic - method: pOsT - url: https://www.example.com/endpoint/basicauth -`, - }, - }, - { - kind: KindNotificationEndpointHTTP, - resErr: testTemplateResourceError{ - name: "missing bearer token", - validationErrs: 1, - valFields: []string{fieldSpec, fieldNotificationEndpointToken}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointHTTP -metadata: - name: http-bearer-auth-notification-endpoint -spec: - description: http bearer auth desc - type: bearer - method: puT - url: https://www.example.com/endpoint/bearerauth -`, - }, - }, - { - kind: KindNotificationEndpointHTTP, - resErr: testTemplateResourceError{ - name: "invalid http type", - validationErrs: 1, - valFields: []string{fieldSpec, fieldType}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointHTTP -metadata: - name: http-basic-auth-notification-endpoint -spec: - type: RANDOM WRONG TYPE - description: http none auth desc - method: get - url: https://www.example.com/endpoint/noneauth -`, - }, - }, - { - kind: KindNotificationEndpointSlack, - resErr: testTemplateResourceError{ - name: "duplicate endpoints", - validationErrs: 1, - valFields: []string{fieldMetadata, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointSlack -metadata: - name: slack-notification-endpoint -spec: - url: https://hooks.slack.com/services/bip/piddy/boppidy ---- -apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointSlack -metadata: - name: slack_notification_endpoint -spec: - url: https://hooks.slack.com/services/bip/piddy/boppidy -`, - }, - }, - { - kind: KindNotificationEndpointSlack, - resErr: testTemplateResourceError{ - name: "invalid status", - validationErrs: 1, - valFields: []string{fieldSpec, fieldStatus}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointSlack -metadata: - name: slack-notification-endpoint -spec: - description: slack desc - url: https://hooks.slack.com/services/bip/piddy/boppidy - status: RANDO STATUS -`, - }, - }, - { - kind: KindNotificationEndpointSlack, - resErr: testTemplateResourceError{ - name: "duplicate meta name and spec name", - validationErrs: 1, - valFields: []string{fieldSpec, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointSlack -metadata: - name: slack -spec: - description: slack desc - url: https://hooks.slack.com/services/bip/piddy/boppidy ---- -apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointSlack -metadata: - name: slack-notification-endpoint -spec: - name: slack - description: slack desc - url: https://hooks.slack.com/services/bip/piddy/boppidy -`, - }, - }, - } - - for _, tt := range tests { - testTemplateErrors(t, tt.kind, tt.resErr) - } - }) - }) - - t.Run("template with notification rules", func(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - testfileRunner(t, "testdata/notification_rule", func(t *testing.T, template *Template) { - sum := template.Summary() - rules := sum.NotificationRules - require.Len(t, rules, 1) - - rule := rules[0] - assert.Equal(t, KindNotificationRule, rule.Kind) - assert.Equal(t, "rule_0", rule.Name) - assert.Equal(t, "endpoint-0", rule.EndpointMetaName) - assert.Equal(t, "desc_0", rule.Description) - assert.Equal(t, (10 * time.Minute).String(), rule.Every) - assert.Equal(t, (30 * time.Second).String(), rule.Offset) - expectedMsgTempl := "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }" - assert.Equal(t, expectedMsgTempl, rule.MessageTemplate) - assert.Equal(t, influxdb.Active, rule.Status) - - expectedStatusRules := []SummaryStatusRule{ - {CurrentLevel: "CRIT", PreviousLevel: "OK"}, - {CurrentLevel: "WARN"}, - } - assert.Equal(t, expectedStatusRules, rule.StatusRules) - - expectedTagRules := []SummaryTagRule{ - {Key: "k1", Value: "v1", Operator: "equal"}, - {Key: "k1", Value: "v2", Operator: "equal"}, - } - assert.Equal(t, expectedTagRules, rule.TagRules) - - require.Len(t, sum.Labels, 2) - require.Len(t, rule.LabelAssociations, 2) - assert.Equal(t, "label-1", rule.LabelAssociations[0].MetaName) - assert.Equal(t, "label-2", rule.LabelAssociations[1].MetaName) - }) - }) - - t.Run("with env refs should be valid", func(t *testing.T) { - testfileRunner(t, "testdata/notification_rule_ref.yml", func(t *testing.T, template *Template) { - actual := template.Summary().NotificationRules - require.Len(t, actual, 1) - - expectedEnvRefs := []SummaryReference{ - { - Field: "metadata.name", - EnvRefKey: "meta-name", - DefaultValue: "meta", - }, - { - Field: "spec.name", - EnvRefKey: "spec-name", - DefaultValue: "spectacles", - }, - { - Field: "spec.associations[0].name", - EnvRefKey: "label-meta-name", - }, - { - Field: "spec.endpointName", - EnvRefKey: "endpoint-meta-name", - }, - } - assert.Equal(t, expectedEnvRefs, actual[0].EnvReferences) - }) - }) - - t.Run("handles bad config", func(t *testing.T) { - templateWithValidEndpint := func(resource string) string { - return fmt.Sprintf(` -apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointSlack -metadata: - name: endpoint-0 -spec: - url: https://hooks.slack.com/services/bip/piddy/boppidy ---- -%s -`, resource) - } - - tests := []struct { - kind Kind - resErr testTemplateResourceError - }{ - { - kind: KindNotificationRule, - resErr: testTemplateResourceError{ - name: "missing name", - valFields: []string{fieldMetadata, fieldName}, - templateStr: templateWithValidEndpint(`apiVersion: influxdata.com/v2alpha1 -kind: NotificationRule -metadata: -spec: - endpointName: endpoint-0 - every: 10m - messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }" - statusRules: - - currentLevel: WARN -`), - }, - }, - { - kind: KindNotificationRule, - resErr: testTemplateResourceError{ - name: "missing endpoint name", - valFields: []string{fieldSpec, fieldNotificationRuleEndpointName}, - templateStr: templateWithValidEndpint(`apiVersion: influxdata.com/v2alpha1 -kind: NotificationRule -metadata: - name: rule-0 -spec: - every: 10m - messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }" - statusRules: - - currentLevel: WARN -`), - }, - }, - { - kind: KindNotificationRule, - resErr: testTemplateResourceError{ - name: "missing every", - valFields: []string{fieldSpec, fieldEvery}, - templateStr: templateWithValidEndpint(`apiVersion: influxdata.com/v2alpha1 -kind: NotificationRule -metadata: - name: rule-0 -spec: - endpointName: endpoint-0 - messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }" - statusRules: - - currentLevel: WARN -`), - }, - }, - { - kind: KindNotificationRule, - resErr: testTemplateResourceError{ - name: "missing status rules", - valFields: []string{fieldSpec, fieldNotificationRuleStatusRules}, - templateStr: templateWithValidEndpint(`apiVersion: influxdata.com/v2alpha1 -kind: NotificationRule -metadata: - name: rule-0 -spec: - every: 10m - endpointName: endpoint-0 - messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }" -`), - }, - }, - { - kind: KindNotificationRule, - resErr: testTemplateResourceError{ - name: "bad current status rule level", - valFields: []string{fieldSpec, fieldNotificationRuleStatusRules}, - templateStr: templateWithValidEndpint(`apiVersion: influxdata.com/v2alpha1 -kind: NotificationRule -metadata: - name: rule-0 -spec: - every: 10m - endpointName: endpoint-0 - messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }" - statusRules: - - currentLevel: WRONGO -`), - }, - }, - { - kind: KindNotificationRule, - resErr: testTemplateResourceError{ - name: "bad previous status rule level", - valFields: []string{fieldSpec, fieldNotificationRuleStatusRules}, - templateStr: templateWithValidEndpint(`apiVersion: influxdata.com/v2alpha1 -kind: NotificationRule -metadata: - name: rule-0 -spec: - endpointName: endpoint-0 - every: 10m - messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }" - statusRules: - - currentLevel: CRIT - previousLevel: WRONG -`), - }, - }, - { - kind: KindNotificationRule, - resErr: testTemplateResourceError{ - name: "bad tag rule operator", - valFields: []string{fieldSpec, fieldNotificationRuleTagRules}, - templateStr: templateWithValidEndpint(`apiVersion: influxdata.com/v2alpha1 -kind: NotificationRule -metadata: - name: rule-0 -spec: - endpointName: endpoint-0 - every: 10m - messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }" - statusRules: - - currentLevel: WARN - tagRules: - - key: k1 - value: v2 - operator: WRONG -`), - }, - }, - { - kind: KindNotificationRule, - resErr: testTemplateResourceError{ - name: "bad status provided", - valFields: []string{fieldSpec, fieldStatus}, - templateStr: templateWithValidEndpint(`apiVersion: influxdata.com/v2alpha1 -kind: NotificationRule -metadata: - name: rule-0 -spec: - endpointName: endpoint-0 - every: 10m - messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }" - status: RANDO STATUS - statusRules: - - currentLevel: WARN -`), - }, - }, - { - kind: KindNotificationRule, - resErr: testTemplateResourceError{ - name: "label association does not exist", - valFields: []string{fieldSpec, fieldAssociations}, - templateStr: templateWithValidEndpint(`apiVersion: influxdata.com/v2alpha1 -kind: NotificationRule -metadata: - name: rule-0 -spec: - endpointName: endpoint-0 - every: 10m - messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }" - statusRules: - - currentLevel: WARN - associations: - - kind: Label - name: label-1 -`), - }, - }, - { - kind: KindNotificationRule, - resErr: testTemplateResourceError{ - name: "label association dupe", - valFields: []string{fieldSpec, fieldAssociations}, - templateStr: templateWithValidEndpint(`apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: NotificationRule -metadata: - name: rule-0 -spec: - endpointName: endpoint-0 - every: 10m - messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }" - statusRules: - - currentLevel: WARN - associations: - - kind: Label - name: label-1 - - kind: Label - name: label-1 -`), - }, - }, - { - kind: KindNotificationRule, - resErr: testTemplateResourceError{ - name: "duplicate meta names", - valFields: []string{fieldMetadata, fieldName}, - templateStr: templateWithValidEndpint(` -apiVersion: influxdata.com/v2alpha1 -kind: NotificationRule -metadata: - name: rule-0 -spec: - endpointName: endpoint-0 - every: 10m - messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }" - statusRules: - - currentLevel: WARN ---- -apiVersion: influxdata.com/v2alpha1 -kind: NotificationRule -metadata: - name: rule-0 -spec: - endpointName: endpoint-0 - every: 10m - messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }" - statusRules: - - currentLevel: WARN -`), - }, - }, - { - kind: KindNotificationRule, - resErr: testTemplateResourceError{ - name: "missing endpoint association in template", - valFields: []string{fieldSpec, fieldNotificationRuleEndpointName}, - templateStr: ` -apiVersion: influxdata.com/v2alpha1 -kind: NotificationRule -metadata: - name: rule-0 -spec: - endpointName: RANDO_ENDPOINT_NAME - every: 10m - messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }" - statusRules: - - currentLevel: WARN -`, - }, - }, - } - - for _, tt := range tests { - testTemplateErrors(t, tt.kind, tt.resErr) - } - }) - }) - - t.Run("template with tasks", func(t *testing.T) { - t.Run("happy path", func(t *testing.T) { - testfileRunner(t, "testdata/tasks", func(t *testing.T, template *Template) { - sum := template.Summary() - tasks := sum.Tasks - require.Len(t, tasks, 2) - - for _, ta := range tasks { - assert.Equal(t, KindTask, ta.Kind) - } - - sort.Slice(tasks, func(i, j int) bool { - return tasks[i].MetaName < tasks[j].MetaName - }) - - baseEqual := func(t *testing.T, i int, status influxdb.Status, actual SummaryTask) { - t.Helper() - - assert.Equal(t, "task-"+strconv.Itoa(i), actual.Name) - assert.Equal(t, "desc_"+strconv.Itoa(i), actual.Description) - assert.Equal(t, status, actual.Status) - - expectedQuery := "from(bucket: \"rucket_1\")\n |> range(start: -5d, stop: -1h)\n |> filter(fn: (r) => r._measurement == \"cpu\")\n |> filter(fn: (r) => r._field == \"usage_idle\")\n |> aggregateWindow(every: 1m, fn: mean)\n |> yield(name: \"mean\")" - assert.Equal(t, expectedQuery, actual.Query) - - require.Len(t, actual.LabelAssociations, 1) - assert.Equal(t, "label-1", actual.LabelAssociations[0].Name) - } - - require.Len(t, sum.Labels, 1) - - task0 := tasks[0] - baseEqual(t, 1, influxdb.Active, task0) - assert.Equal(t, "15 * * * *", task0.Cron) - - task1 := tasks[1] - baseEqual(t, 0, influxdb.Inactive, task1) - assert.Equal(t, (25 * time.Hour).String(), task1.Every) - assert.Equal(t, (15 * time.Second).String(), task1.Offset) - }) - }) - - t.Run("with params option should be parameterizable", func(t *testing.T) { - testfileRunner(t, "testdata/tasks_params.yml", func(t *testing.T, template *Template) { - sum := template.Summary() - require.Len(t, sum.Tasks, 1) - - actual := sum.Tasks[0] - assert.Equal(t, KindTask, actual.Kind) - assert.Equal(t, "task-uuid", actual.MetaName) - - queryText := `option params = { - bucket: "bar", - start: -24h0m0s, - stop: now(), - name: "max", - floatVal: 37.2, - minVal: 10, -} - -from(bucket: params.bucket) - |> range(start: params.start, stop: params.stop) - |> filter(fn: (r) => r._measurement == "processes") - |> filter(fn: (r) => r.floater == params.floatVal) - |> filter(fn: (r) => r._value > params.minVal) - |> aggregateWindow(every: v.windowPeriod, fn: max) - |> yield(name: params.name) -` - - assert.Equal(t, queryText, actual.Query) - - expectedRefs := []SummaryReference{ - { - Field: "spec.params.bucket", - EnvRefKey: `tasks[task-uuid].spec.params.bucket`, - ValType: "string", - DefaultValue: "bar", - }, - { - Field: "spec.params.floatVal", - EnvRefKey: `tasks[task-uuid].spec.params.floatVal`, - ValType: "float", - DefaultValue: 37.2, - }, - { - Field: "spec.params.minVal", - EnvRefKey: `tasks[task-uuid].spec.params.minVal`, - ValType: "integer", - DefaultValue: int64(10), - }, - { - Field: "spec.params.name", - EnvRefKey: `tasks[task-uuid].spec.params.name`, - ValType: "string", - DefaultValue: "max", - }, - { - Field: "spec.params.start", - EnvRefKey: `tasks[task-uuid].spec.params.start`, - ValType: "duration", - DefaultValue: "-24h0m0s", - }, - { - Field: "spec.params.stop", - EnvRefKey: `tasks[task-uuid].spec.params.stop`, - ValType: "time", - DefaultValue: "now()", - }, - } - assert.Equal(t, expectedRefs, actual.EnvReferences) - }) - }) - - t.Run("with task option should be valid", func(t *testing.T) { - testfileRunner(t, "testdata/task_v2.yml", func(t *testing.T, template *Template) { - actual := template.Summary().Tasks - require.Len(t, actual, 1) - - expectedEnvRefs := []SummaryReference{ - { - Field: "spec.task.every", - EnvRefKey: "tasks[task-1].spec.task.every", - ValType: "duration", - DefaultValue: time.Minute, - }, - { - Field: "spec.name", - EnvRefKey: "tasks[task-1].spec.task.name", - ValType: "string", - DefaultValue: "bar", - }, - { - Field: "spec.task.name", - EnvRefKey: "tasks[task-1].spec.task.name", - ValType: "string", - DefaultValue: "bar", - }, - { - Field: "spec.task.offset", - EnvRefKey: "tasks[task-1].spec.task.offset", - ValType: "duration", - DefaultValue: time.Minute * 3, - }, - } - assert.Equal(t, expectedEnvRefs, actual[0].EnvReferences) - }) - }) - - t.Run("with task spec should be valid", func(t *testing.T) { - testfileRunner(t, "testdata/task_v2_taskSpec.yml", func(t *testing.T, template *Template) { - actual := template.Summary().Tasks - require.Len(t, actual, 1) - - expectedEnvRefs := []SummaryReference{ - { - Field: "spec.task.every", - EnvRefKey: "tasks[task-1].spec.task.every", - ValType: "duration", - DefaultValue: time.Minute, - }, - { - Field: "spec.name", - EnvRefKey: "tasks[task-1].spec.task.name", - ValType: "string", - DefaultValue: "foo", - }, - { - Field: "spec.task.name", - EnvRefKey: "tasks[task-1].spec.task.name", - ValType: "string", - DefaultValue: "foo", - }, - { - Field: "spec.task.offset", - EnvRefKey: "tasks[task-1].spec.task.offset", - ValType: "duration", - DefaultValue: time.Minute, - }, - } - - queryText := `option task = {name: "foo", every: 1m0s, offset: 1m0s} - -from(bucket: "rucket_1") - |> range(start: -5d, stop: -1h) - |> filter(fn: (r) => r._measurement == "cpu") - |> filter(fn: (r) => r._field == "usage_idle") - |> aggregateWindow(every: 1m, fn: mean) - |> yield(name: "mean") -` - - assert.Equal(t, queryText, actual[0].Query) - - assert.Equal(t, expectedEnvRefs, actual[0].EnvReferences) - }) - }) - - t.Run("with params option should be valid", func(t *testing.T) { - testfileRunner(t, "testdata/task_v2_params.yml", func(t *testing.T, template *Template) { - actual := template.Summary().Tasks - require.Len(t, actual, 1) - - expectedEnvRefs := []SummaryReference{ - { - Field: "spec.params.this", - EnvRefKey: "tasks[task-1].spec.params.this", - ValType: "string", - DefaultValue: "foo", - }, - } - - queryText := `option params = {this: "foo"} - -from(bucket: "rucket_1") - |> range(start: -5d, stop: -1h) - |> filter(fn: (r) => r._measurement == params.this) - |> filter(fn: (r) => r._field == "usage_idle") - |> aggregateWindow(every: 1m, fn: mean) - |> yield(name: "mean") -` - - assert.Equal(t, queryText, actual[0].Query) - - assert.Equal(t, expectedEnvRefs, actual[0].EnvReferences) - }) - }) - - t.Run("with env refs should be valid", func(t *testing.T) { - testfileRunner(t, "testdata/task_ref.yml", func(t *testing.T, template *Template) { - actual := template.Summary().Tasks - require.Len(t, actual, 1) - - expectedEnvRefs := []SummaryReference{ - { - Field: "spec.associations[0].name", - EnvRefKey: "label-meta-name", - }, - { - Field: "metadata.name", - EnvRefKey: "meta-name", - DefaultValue: "meta", - }, - { - Field: "spec.name", - EnvRefKey: "spec-name", - DefaultValue: "spectacles", - }, - } - assert.Equal(t, expectedEnvRefs, actual[0].EnvReferences) - }) - }) - - t.Run("handles bad config", func(t *testing.T) { - tests := []struct { - kind Kind - resErr testTemplateResourceError - }{ - { - kind: KindTask, - resErr: testTemplateResourceError{ - name: "missing name", - validationErrs: 1, - valFields: []string{fieldMetadata, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Task -metadata: -spec: - description: desc_1 - cron: 15 * * * * - query: > - from(bucket: "rucket_1") |> yield(name: "mean") -`, - }, - }, - { - kind: KindTask, - resErr: testTemplateResourceError{ - name: "invalid status", - validationErrs: 1, - valFields: []string{fieldSpec, fieldStatus}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Task -metadata: - name: task-0 -spec: - cron: 15 * * * * - query: > - from(bucket: "rucket_1") |> yield(name: "mean") - status: RANDO WRONGO -`, - }, - }, - { - kind: KindTask, - resErr: testTemplateResourceError{ - name: "missing query", - validationErrs: 1, - valFields: []string{fieldSpec, fieldQuery}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Task -metadata: - name: task-0 -spec: - description: desc_0 - every: 10m - offset: 15s -`, - }, - }, - { - kind: KindTask, - resErr: testTemplateResourceError{ - name: "missing every and cron fields", - validationErrs: 1, - valFields: []string{fieldSpec, fieldEvery, fieldTaskCron}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Task -metadata: - name: task-0 -spec: - description: desc_0 - offset: 15s -`, - }, - }, - { - kind: KindTask, - resErr: testTemplateResourceError{ - name: "invalid association", - validationErrs: 1, - valFields: []string{fieldSpec, fieldAssociations}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Task -metadata: - name: task-1 -spec: - cron: 15 * * * * - query: > - from(bucket: "rucket_1") |> yield(name: "mean") - associations: - - kind: Label - name: label-1 -`, - }, - }, - { - kind: KindTask, - resErr: testTemplateResourceError{ - name: "duplicate association", - validationErrs: 1, - valFields: []string{fieldSpec, fieldAssociations}, - templateStr: `--- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Task -metadata: - name: task-0 -spec: - every: 10m - offset: 15s - query: > - from(bucket: "rucket_1") |> yield(name: "mean") - status: inactive - associations: - - kind: Label - name: label-1 - - kind: Label - name: label-1 -`, - }, - }, - { - kind: KindTask, - resErr: testTemplateResourceError{ - name: "duplicate meta names", - validationErrs: 1, - valFields: []string{fieldMetadata, fieldName}, - templateStr: ` -apiVersion: influxdata.com/v2alpha1 -kind: Task -metadata: - name: task-0 -spec: - every: 10m - query: > - from(bucket: "rucket_1") |> yield(name: "mean") ---- -apiVersion: influxdata.com/v2alpha1 -kind: Task -metadata: - name: task-0 -spec: - every: 10m - query: > - from(bucket: "rucket_1") |> yield(name: "mean") -`, - }, - }, - } - - for _, tt := range tests { - testTemplateErrors(t, tt.kind, tt.resErr) - } - }) - }) - - t.Run("template with telegraf config", func(t *testing.T) { - t.Run("and associated labels should be successful", func(t *testing.T) { - testfileRunner(t, "testdata/telegraf", func(t *testing.T, template *Template) { - sum := template.Summary() - require.Len(t, sum.TelegrafConfigs, 2) - - actual := sum.TelegrafConfigs[0] - assert.Equal(t, KindTelegraf, actual.Kind) - assert.Equal(t, "display name", actual.TelegrafConfig.Name) - assert.Equal(t, "desc", actual.TelegrafConfig.Description) - - require.Len(t, actual.LabelAssociations, 2) - assert.Equal(t, "label-1", actual.LabelAssociations[0].Name) - assert.Equal(t, "label-2", actual.LabelAssociations[1].Name) - - actual = sum.TelegrafConfigs[1] - assert.Equal(t, "tele-2", actual.TelegrafConfig.Name) - assert.Empty(t, actual.LabelAssociations) - - require.Len(t, sum.LabelMappings, 2) - expectedMapping := SummaryLabelMapping{ - Status: StateStatusNew, - ResourceMetaName: "first-tele-config", - ResourceName: "display name", - LabelMetaName: "label-1", - LabelName: "label-1", - ResourceType: influxdb.TelegrafsResourceType, - } - assert.Equal(t, expectedMapping, sum.LabelMappings[0]) - expectedMapping.LabelMetaName = "label-2" - expectedMapping.LabelName = "label-2" - assert.Equal(t, expectedMapping, sum.LabelMappings[1]) - }) - }) - - t.Run("with env refs should be valid", func(t *testing.T) { - testfileRunner(t, "testdata/telegraf_ref.yml", func(t *testing.T, template *Template) { - actual := template.Summary().TelegrafConfigs - require.Len(t, actual, 1) - - expectedEnvRefs := []SummaryReference{ - { - Field: "metadata.name", - EnvRefKey: "meta-name", - DefaultValue: "meta", - }, - { - Field: "spec.name", - EnvRefKey: "spec-name", - DefaultValue: "spectacles", - }, - { - Field: "spec.associations[0].name", - EnvRefKey: "label-meta-name", - }, - } - assert.Equal(t, expectedEnvRefs, actual[0].EnvReferences) - }) - }) - - t.Run("handles bad config", func(t *testing.T) { - tests := []testTemplateResourceError{ - { - name: "config missing", - validationErrs: 1, - valFields: []string{fieldSpec, fieldTelegrafConfig}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Telegraf -metadata: - name: first-tele-config -spec: -`, - }, - { - name: "duplicate metadata names", - validationErrs: 1, - valFields: []string{fieldMetadata, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Telegraf -metadata: - name: tele-0 -spec: - config: fake tele config ---- -apiVersion: influxdata.com/v2alpha1 -kind: Telegraf -metadata: - name: tele-0 -spec: - config: fake tele config -`, - }, - } - - for _, tt := range tests { - testTemplateErrors(t, KindTelegraf, tt) - } - }) - }) - - t.Run("template with a variable", func(t *testing.T) { - t.Run("with valid fields should produce summary", func(t *testing.T) { - testfileRunner(t, "testdata/variables", func(t *testing.T, template *Template) { - sum := template.Summary() - - require.Len(t, sum.Variables, 4) - for _, v := range sum.Variables { - assert.Equal(t, KindVariable, v.Kind) - } - - varEquals := func(t *testing.T, name, vType string, vals interface{}, selected []string, v SummaryVariable) { - t.Helper() - - assert.Equal(t, name, v.Name) - assert.Equal(t, name+" desc", v.Description) - if selected == nil { - selected = []string{} - } - assert.Equal(t, selected, v.Selected) - require.NotNil(t, v.Arguments) - assert.Equal(t, vType, v.Arguments.Type) - assert.Equal(t, vals, v.Arguments.Values) - } - - // validates we support all known variable types - varEquals(t, - "var-const-3", - "constant", - influxdb.VariableConstantValues([]string{"first val"}), - nil, - sum.Variables[0], - ) - - varEquals(t, - "var-map-4", - "map", - influxdb.VariableMapValues{"k1": "v1"}, - nil, - sum.Variables[1], - ) - - varEquals(t, - "query var", - "query", - influxdb.VariableQueryValues{ - Query: `buckets() |> filter(fn: (r) => r.name !~ /^_/) |> rename(columns: {name: "_value"}) |> keep(columns: ["_value"])`, - Language: "flux", - }, - []string{"rucket"}, - sum.Variables[2], - ) - - varEquals(t, - "var-query-2", - "query", - influxdb.VariableQueryValues{ - Query: "an influxql query of sorts", - Language: "influxql", - }, - nil, - sum.Variables[3], - ) - }) - }) - - t.Run("with env refs should be valid", func(t *testing.T) { - testfileRunner(t, "testdata/variable_ref.yml", func(t *testing.T, template *Template) { - actual := template.Summary().Variables - require.Len(t, actual, 1) - - expectedEnvRefs := []SummaryReference{ - { - Field: "metadata.name", - EnvRefKey: "meta-name", - DefaultValue: "meta", - }, - { - Field: "spec.name", - EnvRefKey: "spec-name", - DefaultValue: "spectacles", - }, - { - Field: "spec.associations[0].name", - EnvRefKey: "label-meta-name", - }, - { - Field: "spec.selected[0]", - EnvRefKey: "the-selected", - DefaultValue: "second val", - }, - { - Field: "spec.selected[1]", - EnvRefKey: "the-2nd", - }, - } - assert.Equal(t, expectedEnvRefs, actual[0].EnvReferences) - }) - }) - - t.Run("and labels associated", func(t *testing.T) { - testfileRunner(t, "testdata/variable_associates_label.yml", func(t *testing.T, template *Template) { - sum := template.Summary() - require.Len(t, sum.Labels, 1) - - vars := sum.Variables - require.Len(t, vars, 1) - - expectedLabelMappings := []struct { - varName string - labels []string - }{ - { - varName: "var-1", - labels: []string{"label-1"}, - }, - } - for i, expected := range expectedLabelMappings { - v := vars[i] - require.Len(t, v.LabelAssociations, len(expected.labels)) - - for j, label := range expected.labels { - assert.Equal(t, label, v.LabelAssociations[j].Name) - } - } - - expectedMappings := []SummaryLabelMapping{ - { - Status: StateStatusNew, - ResourceMetaName: "var-1", - ResourceName: "var-1", - LabelMetaName: "label-1", - LabelName: "label-1", - }, - } - - require.Len(t, sum.LabelMappings, len(expectedMappings)) - for i, expected := range expectedMappings { - expected.ResourceType = influxdb.VariablesResourceType - assert.Equal(t, expected, sum.LabelMappings[i]) - } - }) - }) - - t.Run("handles bad config", func(t *testing.T) { - tests := []testTemplateResourceError{ - { - name: "name missing", - validationErrs: 1, - valFields: []string{fieldMetadata, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Variable -metadata: -spec: - description: var-map-4 desc - type: map - values: - k1: v1 -`, - }, - { - name: "map var missing values", - validationErrs: 1, - valFields: []string{fieldSpec, fieldValues}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Variable -metadata: - name: var-map-4 -spec: - description: var-map-4 desc - type: map -`, - }, - { - name: "const var missing values", - validationErrs: 1, - valFields: []string{fieldSpec, fieldValues}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Variable -metadata: - name: var-const-3 -spec: - description: var-const-3 desc - type: constant -`, - }, - { - name: "query var missing query", - validationErrs: 1, - valFields: []string{fieldSpec, fieldQuery}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Variable -metadata: - name: var-query-2 -spec: - description: var-query-2 desc - type: query - language: influxql -`, - }, - { - name: "query var missing query language", - validationErrs: 1, - valFields: []string{fieldSpec, fieldLanguage}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Variable -metadata: - name: var-query-2 -spec: - description: var-query-2 desc - type: query - query: an influxql query of sorts -`, - }, - { - name: "query var provides incorrect query language", - validationErrs: 1, - valFields: []string{fieldSpec, fieldLanguage}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Variable -metadata: - name: var-query-2 -spec: - description: var-query-2 desc - type: query - query: an influxql query of sorts - language: wrong Language -`, - }, - { - name: "duplicate var names", - validationErrs: 1, - valFields: []string{fieldMetadata, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Variable -metadata: - name: var-query-2 -spec: - description: var-query-2 desc - type: query - query: an influxql query of sorts - language: influxql ---- -apiVersion: influxdata.com/v2alpha1 -kind: Variable -metadata: - name: var-query-2 -spec: - description: var-query-2 desc - type: query - query: an influxql query of sorts - language: influxql -`, - }, - { - name: "duplicate meta name and spec name", - validationErrs: 1, - valFields: []string{fieldSpec, fieldName}, - templateStr: `apiVersion: influxdata.com/v2alpha1 -kind: Variable -metadata: - name: var-query-2 -spec: - description: var-query-2 desc - type: query - query: an influxql query of sorts - language: influxql ---- -apiVersion: influxdata.com/v2alpha1 -kind: Variable -metadata: - name: valid-query -spec: - name: var-query-2 - description: var-query-2 desc - type: query - query: an influxql query of sorts - language: influxql -`, - }, - } - - for _, tt := range tests { - testTemplateErrors(t, KindVariable, tt) - } - }) - }) - - t.Run("referencing secrets", func(t *testing.T) { - hasSecret := func(t *testing.T, refs map[string]bool, key string) { - t.Helper() - b, ok := refs[key] - assert.True(t, ok) - assert.False(t, b) - } - - testfileRunner(t, "testdata/notification_endpoint_secrets.yml", func(t *testing.T, template *Template) { - sum := template.Summary() - - endpoints := sum.NotificationEndpoints - require.Len(t, endpoints, 1) - - expected := &endpoint.PagerDuty{ - Base: endpoint.Base{ - Name: "pager-duty-notification-endpoint", - Status: taskmodel.TaskStatusActive, - }, - ClientURL: "http://localhost:8080/orgs/7167eb6719fa34e5/alert-history", - RoutingKey: influxdb.SecretField{Key: "-routing-key", Value: strPtr("not empty")}, - } - actual, ok := endpoints[0].NotificationEndpoint.(*endpoint.PagerDuty) - require.True(t, ok) - assert.Equal(t, expected.Base.Name, actual.Name) - require.Nil(t, actual.RoutingKey.Value) - assert.Equal(t, "routing-key", actual.RoutingKey.Key) - - hasSecret(t, template.mSecrets, "routing-key") - }) - }) - - t.Run("referencing env", func(t *testing.T) { - hasEnv := func(t *testing.T, refs map[string]bool, key string) { - t.Helper() - _, ok := refs[key] - assert.True(t, ok) - } - - testfileRunner(t, "testdata/env_refs.yml", func(t *testing.T, template *Template) { - sum := template.Summary() - - require.Len(t, sum.Buckets, 1) - assert.Equal(t, "env-bkt-1-name-ref", sum.Buckets[0].Name) - assert.Len(t, sum.Buckets[0].LabelAssociations, 1) - hasEnv(t, template.mEnv, "bkt-1-name-ref") - - require.Len(t, sum.Checks, 1) - assert.Equal(t, "env-check-1-name-ref", sum.Checks[0].Check.GetName()) - assert.Len(t, sum.Checks[0].LabelAssociations, 1) - hasEnv(t, template.mEnv, "check-1-name-ref") - - require.Len(t, sum.Dashboards, 1) - assert.Equal(t, "env-dash-1-name-ref", sum.Dashboards[0].Name) - assert.Len(t, sum.Dashboards[0].LabelAssociations, 1) - hasEnv(t, template.mEnv, "dash-1-name-ref") - - require.Len(t, sum.NotificationEndpoints, 1) - assert.Equal(t, "env-endpoint-1-name-ref", sum.NotificationEndpoints[0].NotificationEndpoint.GetName()) - hasEnv(t, template.mEnv, "endpoint-1-name-ref") - - require.Len(t, sum.Labels, 1) - assert.Equal(t, "env-label-1-name-ref", sum.Labels[0].Name) - hasEnv(t, template.mEnv, "label-1-name-ref") - - require.Len(t, sum.NotificationRules, 1) - assert.Equal(t, "env-rule-1-name-ref", sum.NotificationRules[0].Name) - assert.Equal(t, "env-endpoint-1-name-ref", sum.NotificationRules[0].EndpointMetaName) - hasEnv(t, template.mEnv, "rule-1-name-ref") - - require.Len(t, sum.Tasks, 1) - assert.Equal(t, "env-task-1-name-ref", sum.Tasks[0].Name) - hasEnv(t, template.mEnv, "task-1-name-ref") - - require.Len(t, sum.TelegrafConfigs, 1) - assert.Equal(t, "env-telegraf-1-name-ref", sum.TelegrafConfigs[0].TelegrafConfig.Name) - hasEnv(t, template.mEnv, "telegraf-1-name-ref") - - require.Len(t, sum.Variables, 1) - assert.Equal(t, "env-var-1-name-ref", sum.Variables[0].Name) - hasEnv(t, template.mEnv, "var-1-name-ref") - - t.Log("applying env vars should populate env fields") - { - err := template.applyEnvRefs(map[string]interface{}{ - "bkt-1-name-ref": "bucket-1", - "label-1-name-ref": "label-1", - }) - require.NoError(t, err) - - sum := template.Summary() - - require.Len(t, sum.Buckets, 1) - assert.Equal(t, "bucket-1", sum.Buckets[0].Name) - assert.Len(t, sum.Buckets[0].LabelAssociations, 1) - hasEnv(t, template.mEnv, "bkt-1-name-ref") - - require.Len(t, sum.Labels, 1) - assert.Equal(t, "label-1", sum.Labels[0].Name) - hasEnv(t, template.mEnv, "label-1-name-ref") - } - }) - }) - - t.Run("jsonnet support disabled by default", func(t *testing.T) { - template := validParsedTemplateFromFile(t, "testdata/bucket_associates_labels.jsonnet", EncodingJsonnet) - require.Equal(t, &Template{}, template) - }) - - t.Run("jsonnet support", func(t *testing.T) { - template := validParsedTemplateFromFile(t, "testdata/bucket_associates_labels.jsonnet", EncodingJsonnet, EnableJsonnet()) - - sum := template.Summary() - - labels := []SummaryLabel{ - sumLabelGen("label-1", "label-1", "#eee888", "desc_1"), - } - assert.Equal(t, labels, sum.Labels) - - bkts := []SummaryBucket{ - { - SummaryIdentifier: SummaryIdentifier{ - Kind: KindBucket, - MetaName: "rucket-1", - EnvReferences: []SummaryReference{}, - }, - Name: "rucket-1", - Description: "desc_1", - RetentionPeriod: 10000 * time.Second, - LabelAssociations: labels, - }, - { - SummaryIdentifier: SummaryIdentifier{ - Kind: KindBucket, - MetaName: "rucket-2", - EnvReferences: []SummaryReference{}, - }, - Name: "rucket-2", - Description: "desc-2", - RetentionPeriod: 20000 * time.Second, - LabelAssociations: labels, - }, - { - SummaryIdentifier: SummaryIdentifier{ - Kind: KindBucket, - MetaName: "rucket-3", - EnvReferences: []SummaryReference{}, - }, - Name: "rucket-3", - Description: "desc_3", - RetentionPeriod: 30000 * time.Second, - LabelAssociations: labels, - }, - } - assert.Equal(t, bkts, sum.Buckets) - }) -} - -func TestCombine(t *testing.T) { - newTemplateFromYmlStr := func(t *testing.T, templateStr string) *Template { - t.Helper() - return newParsedTemplate(t, FromString(templateStr), EncodingYAML, ValidSkipParseError()) - } - - associationsEqual := func(t *testing.T, summaryLabels []SummaryLabel, names ...string) { - t.Helper() - - require.Len(t, summaryLabels, len(names)) - - m := make(map[string]bool) - for _, n := range names { - m[n] = true - } - - for _, l := range summaryLabels { - if !m[l.Name] { - assert.Fail(t, "did not find label: "+l.Name) - } - delete(m, l.Name) - } - - if len(m) > 0 { - var unexpectedLabels []string - for name := range m { - unexpectedLabels = append(unexpectedLabels, name) - } - assert.Failf(t, "additional labels found", "got: %v", unexpectedLabels) - } - } - - t.Run("multiple templates with associations across files", func(t *testing.T) { - var templates []*Template - numLabels := 5 - for i := 0; i < numLabels; i++ { - template := newTemplateFromYmlStr(t, fmt.Sprintf(` -apiVersion: %[1]s -kind: Label -metadata: - name: label-%d -`, APIVersion, i)) - templates = append(templates, template) - } - - templates = append(templates, newTemplateFromYmlStr(t, fmt.Sprintf(` -apiVersion: %[1]s -kind: Bucket -metadata: - name: rucket-1 -spec: - associations: - - kind: Label - name: label-1 -`, APIVersion))) - - templates = append(templates, newTemplateFromYmlStr(t, fmt.Sprintf(` -apiVersion: %[1]s -kind: Bucket -metadata: - name: rucket-2 -spec: - associations: - - kind: Label - name: label-2 -`, APIVersion))) - - templates = append(templates, newTemplateFromYmlStr(t, fmt.Sprintf(` -apiVersion: %[1]s -kind: Bucket -metadata: - name: rucket-3 -spec: - associations: - - kind: Label - name: label-1 - - kind: Label - name: label-2 -`, APIVersion))) - - combinedTemplate, err := Combine(templates) - require.NoError(t, err) - - sum := combinedTemplate.Summary() - - require.Len(t, sum.Labels, numLabels) - for i := 0; i < numLabels; i++ { - assert.Equal(t, fmt.Sprintf("label-%d", i), sum.Labels[i].Name) - } - - require.Len(t, sum.Labels, numLabels) - for i := 0; i < numLabels; i++ { - assert.Equal(t, fmt.Sprintf("label-%d", i), sum.Labels[i].Name) - } - - require.Len(t, sum.Buckets, 3) - assert.Equal(t, "rucket-1", sum.Buckets[0].Name) - associationsEqual(t, sum.Buckets[0].LabelAssociations, "label-1") - assert.Equal(t, "rucket-2", sum.Buckets[1].Name) - associationsEqual(t, sum.Buckets[1].LabelAssociations, "label-2") - assert.Equal(t, "rucket-3", sum.Buckets[2].Name) - associationsEqual(t, sum.Buckets[2].LabelAssociations, "label-1", "label-2") - }) -} - -func Test_normalizeGithubURLToContent(t *testing.T) { - tests := []struct { - name string - input string - expected string - }{ - { - name: "raw url passes untouched", - input: "https://raw.githubusercontent.com/influxdata/community-templates/master/github/github.yml", - expected: "https://raw.githubusercontent.com/influxdata/community-templates/master/github/github.yml", - }, - { - name: "URL that is to short is unchanged", - input: "https://github.com/influxdata/community-templates", - expected: "https://github.com/influxdata/community-templates", - }, - { - name: "URL that does not end in required extention is unchanged", - input: "https://github.com/influxdata/community-templates/master/github", - expected: "https://github.com/influxdata/community-templates/master/github", - }, - { - name: "converts base url with ext yaml to raw content url", - input: "https://github.com/influxdata/community-templates/blob/master/github/github.yaml", - expected: "https://raw.githubusercontent.com/influxdata/community-templates/master/github/github.yaml", - }, - { - name: "converts base url with ext yml to raw content url", - input: "https://github.com/influxdata/community-templates/blob/master/github/github.yml", - expected: "https://raw.githubusercontent.com/influxdata/community-templates/master/github/github.yml", - }, - { - name: "converts base url with ext json to raw content url", - input: "https://github.com/influxdata/community-templates/blob/master/github/github.json", - expected: "https://raw.githubusercontent.com/influxdata/community-templates/master/github/github.json", - }, - { - name: "converts base url with ext jsonnet to raw content url", - input: "https://github.com/influxdata/community-templates/blob/master/github/github.jsonnet", - expected: "https://raw.githubusercontent.com/influxdata/community-templates/master/github/github.jsonnet", - }, - { - name: "url with unexpected content type is unchanged 1", - input: "https://github.com/influxdata/community-templates/blob/master/github/github.jason", - expected: "https://github.com/influxdata/community-templates/blob/master/github/github.jason", - }, - { - name: "url with unexpected content type is unchanged 2", - input: "https://github.com/influxdata/community-templates/blob/master/github/github.rando", - expected: "https://github.com/influxdata/community-templates/blob/master/github/github.rando", - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - actual := normalizeGithubURLToContent(tt.input) - - assert.Equal(t, tt.expected, actual) - } - - t.Run(tt.name, fn) - } -} - -func Test_IsParseError(t *testing.T) { - tests := []struct { - name string - err error - expected bool - }{ - { - name: "base case", - err: &parseErr{}, - expected: true, - }, - { - name: "wrapped by influxdb error", - err: &errors2.Error{ - Err: &parseErr{}, - }, - expected: true, - }, - { - name: "deeply nested in influxdb error", - err: &errors2.Error{ - Err: &errors2.Error{ - Err: &errors2.Error{ - Err: &errors2.Error{ - Err: &parseErr{}, - }, - }, - }, - }, - expected: true, - }, - { - name: "influxdb error without nested parse err", - err: &errors2.Error{ - Err: errors.New("nope"), - }, - expected: false, - }, - { - name: "plain error", - err: errors.New("nope"), - expected: false, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - isParseErr := IsParseErr(tt.err) - assert.Equal(t, tt.expected, isParseErr) - } - t.Run(tt.name, fn) - } -} - -func Test_TemplateValidationErr(t *testing.T) { - iPtr := func(i int) *int { - return &i - } - - compIntSlcs := func(t *testing.T, expected []int, actuals []*int) { - t.Helper() - - if len(expected) >= len(actuals) { - require.FailNow(t, "expected array is larger than actuals") - } - - for i, actual := range actuals { - if i == len(expected) { - assert.Nil(t, actual) - continue - } - assert.Equal(t, expected[i], *actual) - } - } - - pErr := &parseErr{ - Resources: []resourceErr{ - { - Kind: KindDashboard.String(), - Idx: intPtr(0), - ValidationErrs: []validationErr{ - { - Field: "charts", - Index: iPtr(1), - Nested: []validationErr{ - { - Field: "colors", - Index: iPtr(0), - Nested: []validationErr{ - { - Field: "hex", - Msg: "hex value required", - }, - }, - }, - { - Field: "kind", - Msg: "chart kind must be provided", - }, - }, - }, - }, - }, - }, - } - - errs := pErr.ValidationErrs() - require.Len(t, errs, 2) - assert.Equal(t, KindDashboard.String(), errs[0].Kind) - assert.Equal(t, []string{"root", "charts", "colors", "hex"}, errs[0].Fields) - compIntSlcs(t, []int{0, 1, 0}, errs[0].Indexes) - assert.Equal(t, "hex value required", errs[0].Reason) - - assert.Equal(t, KindDashboard.String(), errs[1].Kind) - assert.Equal(t, []string{"root", "charts", "kind"}, errs[1].Fields) - compIntSlcs(t, []int{0, 1}, errs[1].Indexes) - assert.Equal(t, "chart kind must be provided", errs[1].Reason) -} - -func Test_validGeometry(t *testing.T) { - tests := []struct { - geom string - expected bool - }{ - { - geom: "line", expected: true, - }, - { - geom: "step", expected: true, - }, - { - geom: "stacked", expected: true, - }, - { - geom: "monotoneX", expected: true, - }, - { - geom: "bar", expected: true, - }, - { - geom: "rando", expected: false, - }, - { - geom: "not a valid geom", expected: false, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - isValid := len(validGeometry(tt.geom)) == 0 - assert.Equal(t, tt.expected, isValid) - } - - t.Run(tt.geom, fn) - } -} - -type testTemplateResourceError struct { - name string - encoding Encoding - templateStr string - resourceErrs int - validationErrs int - valFields []string - assErrs int - assIdxs []int -} - -// defaults to yaml encoding if encoding not provided -// defaults num resources to 1 if resource errs not provided. -func testTemplateErrors(t *testing.T, k Kind, tt testTemplateResourceError) { - t.Helper() - encoding := EncodingYAML - if tt.encoding != EncodingUnknown { - encoding = tt.encoding - } - - resErrs := 1 - if tt.resourceErrs > 0 { - resErrs = tt.resourceErrs - } - - fn := func(t *testing.T) { - t.Helper() - - _, err := Parse(encoding, FromString(tt.templateStr)) - require.Error(t, err) - - require.True(t, IsParseErr(err), err) - - pErr := err.(*parseErr) - require.Len(t, pErr.Resources, resErrs) - - defer func() { - if t.Failed() { - t.Logf("received unexpected err: %s", pErr) - } - }() - - resErr := pErr.Resources[0] - assert.Equal(t, k.String(), resErr.Kind) - - for i, vFail := range resErr.ValidationErrs { - if len(tt.valFields) == i { - break - } - expectedField := tt.valFields[i] - findErr(t, expectedField, vFail) - } - - if tt.assErrs == 0 { - return - } - - assFails := pErr.Resources[0].AssociationErrs - for i, assFail := range assFails { - if len(tt.valFields) == i { - break - } - expectedField := tt.valFields[i] - findErr(t, expectedField, assFail) - } - } - t.Run(tt.name, fn) -} - -func findErr(t *testing.T, expectedField string, vErr validationErr) validationErr { - t.Helper() - - fields := strings.Split(expectedField, ".") - if len(fields) == 1 { - require.Equal(t, expectedField, vErr.Field) - return vErr - } - - currentFieldName, idx := nextField(t, fields[0]) - if idx > -1 { - require.NotNil(t, vErr.Index) - require.Equal(t, idx, *vErr.Index) - } - require.Equal(t, currentFieldName, vErr.Field) - - next := strings.Join(fields[1:], ".") - nestedField, _ := nextField(t, next) - for _, n := range vErr.Nested { - if n.Field == nestedField { - return findErr(t, next, n) - } - } - assert.Fail(t, "did not find field: "+expectedField) - - return vErr -} - -func nextField(t *testing.T, field string) (string, int) { - t.Helper() - - fields := strings.Split(field, ".") - if len(fields) == 1 && !strings.HasSuffix(fields[0], "]") { - return field, -1 - } - parts := strings.Split(fields[0], "[") - if len(parts) == 1 { - return parts[0], -1 - } - fieldName := parts[0] - - if strIdx := strings.Index(parts[1], "]"); strIdx > -1 { - idx, err := strconv.Atoi(parts[1][:strIdx]) - require.NoError(t, err) - return fieldName, idx - } - return "", -1 -} - -func validParsedTemplateFromFile(t *testing.T, path string, encoding Encoding, opts ...ValidateOptFn) *Template { - t.Helper() - - var readFn ReaderFn - templateBytes, ok := availableTemplateFiles[path] - if ok { - readFn = FromReader(bytes.NewBuffer(templateBytes), "file://"+path) - } else { - readFn = FromFile(path) - atomic.AddInt64(&missedTemplateCacheCounter, 1) - } - - opt := &validateOpt{} - for _, o := range opts { - o(opt) - } - - template := newParsedTemplate(t, readFn, encoding, opts...) - if encoding == EncodingJsonnet && !opt.enableJsonnet { - require.Equal(t, &Template{}, template) - return template - } - - u := url.URL{ - Scheme: "file", - Path: path, - } - require.Equal(t, []string{u.String()}, template.Sources()) - return template -} - -func newParsedTemplate(t *testing.T, fn ReaderFn, encoding Encoding, opts ...ValidateOptFn) *Template { - t.Helper() - - opt := &validateOpt{} - for _, o := range opts { - o(opt) - } - - template, err := Parse(encoding, fn, opts...) - if encoding == EncodingJsonnet && !opt.enableJsonnet { - require.Error(t, err) - return &Template{} - } - require.NoError(t, err) - - for _, k := range template.Objects { - require.Contains(t, k.APIVersion, "influxdata.com/v2alpha") - } - - require.True(t, template.isParsed) - return template -} - -func testfileRunner(t *testing.T, path string, testFn func(t *testing.T, template *Template)) { - t.Helper() - - tests := []struct { - name string - extension string - encoding Encoding - }{ - { - name: "yaml", - extension: ".yml", - encoding: EncodingYAML, - }, - { - name: "json", - extension: ".json", - encoding: EncodingJSON, - }, - } - - ext := filepath.Ext(path) - switch ext { - case ".yml": - tests = tests[:1] - case ".json": - tests = tests[1:] - } - - path = strings.TrimSuffix(path, ext) - - for _, tt := range tests { - fn := func(t *testing.T) { - t.Helper() - - template := validParsedTemplateFromFile(t, path+tt.extension, tt.encoding) - if testFn != nil { - testFn(t, template) - } - } - t.Run(tt.name, fn) - } -} - -func sumLabelGen(metaName, name, color, desc string, envRefs ...SummaryReference) SummaryLabel { - if envRefs == nil { - envRefs = make([]SummaryReference, 0) - } - return SummaryLabel{ - SummaryIdentifier: SummaryIdentifier{ - Kind: KindLabel, - MetaName: metaName, - EnvReferences: envRefs, - }, - Name: name, - Properties: struct { - Color string `json:"color"` - Description string `json:"description"` - }{ - Color: color, - Description: desc, - }, - } -} - -func strPtr(s string) *string { - return &s -} - -func mustDuration(t *testing.T, d time.Duration) *notification.Duration { - t.Helper() - dur, err := notification.FromTimeDuration(d) - require.NoError(t, err) - return &dur -} diff --git a/pkger/service.go b/pkger/service.go deleted file mode 100644 index 838132b0390..00000000000 --- a/pkger/service.go +++ /dev/null @@ -1,3778 +0,0 @@ -package pkger - -import ( - "context" - "errors" - "fmt" - "net/http" - "net/url" - "path" - "regexp" - "sort" - "strings" - "sync" - "time" - - "github.com/go-stack/stack" - "github.com/influxdata/influxdb/v2" - ierrors "github.com/influxdata/influxdb/v2/kit/errors" - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - icheck "github.com/influxdata/influxdb/v2/notification/check" - "github.com/influxdata/influxdb/v2/notification/rule" - "github.com/influxdata/influxdb/v2/pkger/internal/wordplay" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/influxdata/influxdb/v2/task/options" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "go.uber.org/zap" -) - -// APIVersion marks the current APIVersion for influx packages. -const APIVersion = "influxdata.com/v2alpha1" -const APIVersion2 = "influxdata.com/v2alpha2" - -// Stack is an identifier for stateful application of a package(s). This stack -// will map created resources from the template(s) to existing resources on the -// platform. This stack is updated only after side effects of applying a template. -// If the template is applied, and no changes are had, then the stack is not updated. -type Stack struct { - ID platform.ID - OrgID platform.ID - CreatedAt time.Time `json:"createdAt"` - Events []StackEvent -} - -func (s Stack) LatestEvent() StackEvent { - if len(s.Events) == 0 { - return StackEvent{} - } - sort.Slice(s.Events, func(i, j int) bool { - return s.Events[i].UpdatedAt.Before(s.Events[j].UpdatedAt) - }) - return s.Events[len(s.Events)-1] -} - -type ( - StackEvent struct { - EventType StackEventType - Name string - Description string - Sources []string - TemplateURLs []string - Resources []StackResource - UpdatedAt time.Time `json:"updatedAt"` - } - - StackCreate struct { - OrgID platform.ID - Name string - Description string - Sources []string - TemplateURLs []string - Resources []StackResource - } - - // StackResource is a record for an individual resource side effect generated from - // applying a template. - StackResource struct { - APIVersion string - ID platform.ID - Name string - Kind Kind - MetaName string - Associations []StackResourceAssociation - } - - // StackResourceAssociation associates a stack resource with another stack resource. - StackResourceAssociation struct { - Kind Kind - MetaName string - } - - // StackUpdate provides a means to update an existing stack. - StackUpdate struct { - ID platform.ID - Name *string - Description *string - TemplateURLs []string - AdditionalResources []StackAdditionalResource - } - - StackAdditionalResource struct { - APIVersion string - ID platform.ID - Kind Kind - MetaName string - } -) - -type StackEventType uint - -const ( - StackEventCreate StackEventType = iota - StackEventUpdate - StackEventUninstalled -) - -func (e StackEventType) String() string { - switch e { - case StackEventCreate: - return "create" - case StackEventUninstalled: - return "uninstall" - case StackEventUpdate: - return "update" - default: - return "unknown" - } -} - -const ResourceTypeStack influxdb.ResourceType = "stack" - -// SVC is the packages service interface. -type SVC interface { - InitStack(ctx context.Context, userID platform.ID, stack StackCreate) (Stack, error) - UninstallStack(ctx context.Context, identifiers struct{ OrgID, UserID, StackID platform.ID }) (Stack, error) - DeleteStack(ctx context.Context, identifiers struct{ OrgID, UserID, StackID platform.ID }) error - ListStacks(ctx context.Context, orgID platform.ID, filter ListFilter) ([]Stack, error) - ReadStack(ctx context.Context, id platform.ID) (Stack, error) - UpdateStack(ctx context.Context, upd StackUpdate) (Stack, error) - - Export(ctx context.Context, opts ...ExportOptFn) (*Template, error) - DryRun(ctx context.Context, orgID, userID platform.ID, opts ...ApplyOptFn) (ImpactSummary, error) - Apply(ctx context.Context, orgID, userID platform.ID, opts ...ApplyOptFn) (ImpactSummary, error) -} - -// SVCMiddleware is a service middleware func. -type SVCMiddleware func(SVC) SVC - -type serviceOpt struct { - logger *zap.Logger - - applyReqLimit int - client *http.Client - idGen platform.IDGenerator - nameGen NameGenerator - timeGen influxdb.TimeGenerator - store Store - - bucketSVC influxdb.BucketService - checkSVC influxdb.CheckService - dashSVC influxdb.DashboardService - labelSVC influxdb.LabelService - endpointSVC influxdb.NotificationEndpointService - orgSVC influxdb.OrganizationService - ruleSVC influxdb.NotificationRuleStore - secretSVC influxdb.SecretService - taskSVC taskmodel.TaskService - teleSVC influxdb.TelegrafConfigStore - varSVC influxdb.VariableService -} - -// ServiceSetterFn is a means of setting dependencies on the Service type. -type ServiceSetterFn func(opt *serviceOpt) - -// WithHTTPClient sets the http client for the service. -func WithHTTPClient(c *http.Client) ServiceSetterFn { - return func(o *serviceOpt) { - o.client = c - } -} - -// WithLogger sets the logger for the service. -func WithLogger(log *zap.Logger) ServiceSetterFn { - return func(o *serviceOpt) { - o.logger = log - } -} - -// WithIDGenerator sets the id generator for the service. -func WithIDGenerator(idGen platform.IDGenerator) ServiceSetterFn { - return func(opt *serviceOpt) { - opt.idGen = idGen - } -} - -// WithTimeGenerator sets the time generator for the service. -func WithTimeGenerator(timeGen influxdb.TimeGenerator) ServiceSetterFn { - return func(opt *serviceOpt) { - opt.timeGen = timeGen - } -} - -// WithStore sets the store for the service. -func WithStore(store Store) ServiceSetterFn { - return func(opt *serviceOpt) { - opt.store = store - } -} - -// WithBucketSVC sets the bucket service. -func WithBucketSVC(bktSVC influxdb.BucketService) ServiceSetterFn { - return func(opt *serviceOpt) { - opt.bucketSVC = bktSVC - } -} - -// WithCheckSVC sets the check service. -func WithCheckSVC(checkSVC influxdb.CheckService) ServiceSetterFn { - return func(opt *serviceOpt) { - opt.checkSVC = checkSVC - } -} - -// WithDashboardSVC sets the dashboard service. -func WithDashboardSVC(dashSVC influxdb.DashboardService) ServiceSetterFn { - return func(opt *serviceOpt) { - opt.dashSVC = dashSVC - } -} - -// WithLabelSVC sets the label service. -func WithLabelSVC(labelSVC influxdb.LabelService) ServiceSetterFn { - return func(opt *serviceOpt) { - opt.labelSVC = labelSVC - } -} - -func withNameGen(nameGen NameGenerator) ServiceSetterFn { - return func(opt *serviceOpt) { - opt.nameGen = nameGen - } -} - -// WithNotificationEndpointSVC sets the endpoint notification service. -func WithNotificationEndpointSVC(endpointSVC influxdb.NotificationEndpointService) ServiceSetterFn { - return func(opt *serviceOpt) { - opt.endpointSVC = endpointSVC - } -} - -// WithNotificationRuleSVC sets the endpoint rule service. -func WithNotificationRuleSVC(ruleSVC influxdb.NotificationRuleStore) ServiceSetterFn { - return func(opt *serviceOpt) { - opt.ruleSVC = ruleSVC - } -} - -// WithOrganizationService sets the organization service for the service. -func WithOrganizationService(orgSVC influxdb.OrganizationService) ServiceSetterFn { - return func(opt *serviceOpt) { - opt.orgSVC = orgSVC - } -} - -// WithSecretSVC sets the secret service. -func WithSecretSVC(secretSVC influxdb.SecretService) ServiceSetterFn { - return func(opt *serviceOpt) { - opt.secretSVC = secretSVC - } -} - -// WithTaskSVC sets the task service. -func WithTaskSVC(taskSVC taskmodel.TaskService) ServiceSetterFn { - return func(opt *serviceOpt) { - opt.taskSVC = taskSVC - } -} - -// WithTelegrafSVC sets the telegraf service. -func WithTelegrafSVC(telegrafSVC influxdb.TelegrafConfigStore) ServiceSetterFn { - return func(opt *serviceOpt) { - opt.teleSVC = telegrafSVC - } -} - -// WithVariableSVC sets the variable service. -func WithVariableSVC(varSVC influxdb.VariableService) ServiceSetterFn { - return func(opt *serviceOpt) { - opt.varSVC = varSVC - } -} - -// Store is the storage behavior the Service depends on. -type Store interface { - CreateStack(ctx context.Context, stack Stack) error - ListStacks(ctx context.Context, orgID platform.ID, filter ListFilter) ([]Stack, error) - ReadStackByID(ctx context.Context, id platform.ID) (Stack, error) - UpdateStack(ctx context.Context, stack Stack) error - DeleteStack(ctx context.Context, id platform.ID) error -} - -// Service provides the template business logic including all the dependencies to make -// this resource sausage. -type Service struct { - log *zap.Logger - - // internal dependencies - applyReqLimit int - client *http.Client - idGen platform.IDGenerator - nameGen NameGenerator - store Store - timeGen influxdb.TimeGenerator - - // external service dependencies - bucketSVC influxdb.BucketService - checkSVC influxdb.CheckService - dashSVC influxdb.DashboardService - labelSVC influxdb.LabelService - endpointSVC influxdb.NotificationEndpointService - orgSVC influxdb.OrganizationService - ruleSVC influxdb.NotificationRuleStore - secretSVC influxdb.SecretService - taskSVC taskmodel.TaskService - teleSVC influxdb.TelegrafConfigStore - varSVC influxdb.VariableService -} - -var _ SVC = (*Service)(nil) - -// NewService is a constructor for a template Service. -func NewService(opts ...ServiceSetterFn) *Service { - opt := &serviceOpt{ - logger: zap.NewNop(), - applyReqLimit: 5, - idGen: snowflake.NewDefaultIDGenerator(), - nameGen: wordplay.GetRandomName, - timeGen: influxdb.RealTimeGenerator{}, - } - for _, o := range opts { - o(opt) - } - - return &Service{ - log: opt.logger, - - applyReqLimit: opt.applyReqLimit, - client: opt.client, - idGen: opt.idGen, - nameGen: opt.nameGen, - store: opt.store, - timeGen: opt.timeGen, - - bucketSVC: opt.bucketSVC, - checkSVC: opt.checkSVC, - labelSVC: opt.labelSVC, - dashSVC: opt.dashSVC, - endpointSVC: opt.endpointSVC, - orgSVC: opt.orgSVC, - ruleSVC: opt.ruleSVC, - secretSVC: opt.secretSVC, - taskSVC: opt.taskSVC, - teleSVC: opt.teleSVC, - varSVC: opt.varSVC, - } -} - -// InitStack will create a new stack for the given user and its given org. The stack can be created -// with urls that point to the location of packages that are included as part of the stack when -// it is applied. -func (s *Service) InitStack(ctx context.Context, userID platform.ID, stCreate StackCreate) (Stack, error) { - if err := validURLs(stCreate.TemplateURLs); err != nil { - return Stack{}, err - } - - // Reject use of server-side jsonnet with stack templates - for _, u := range stCreate.TemplateURLs { - // While things like '.%6Aonnet' evaluate to the default encoding (yaml), let's unescape and catch those too - decoded, err := url.QueryUnescape(u) - if err != nil { - msg := fmt.Sprintf("stack template from url[%q] had an issue", u) - return Stack{}, influxErr(errors2.EInvalid, msg) - } - - if strings.HasSuffix(strings.ToLower(decoded), "jsonnet") { - msg := fmt.Sprintf("stack template from url[%q] had an issue: %s", u, ErrInvalidEncoding.Error()) - return Stack{}, influxErr(errors2.EUnprocessableEntity, msg) - } - } - - if _, err := s.orgSVC.FindOrganizationByID(ctx, stCreate.OrgID); err != nil { - if errors2.ErrorCode(err) == errors2.ENotFound { - msg := fmt.Sprintf("organization dependency does not exist for id[%q]", stCreate.OrgID.String()) - return Stack{}, influxErr(errors2.EConflict, msg) - } - return Stack{}, internalErr(err) - } - - now := s.timeGen.Now() - newStack := Stack{ - ID: s.idGen.ID(), - OrgID: stCreate.OrgID, - CreatedAt: now, - Events: []StackEvent{ - { - EventType: StackEventCreate, - Name: stCreate.Name, - Description: stCreate.Description, - Resources: stCreate.Resources, - TemplateURLs: stCreate.TemplateURLs, - UpdatedAt: now, - }, - }, - } - if err := s.store.CreateStack(ctx, newStack); err != nil { - return Stack{}, internalErr(err) - } - - return newStack, nil -} - -// UninstallStack will remove all resources associated with the stack. -func (s *Service) UninstallStack(ctx context.Context, identifiers struct{ OrgID, UserID, StackID platform.ID }) (Stack, error) { - uninstalledStack, err := s.uninstallStack(ctx, identifiers) - if err != nil { - return Stack{}, err - } - - ev := uninstalledStack.LatestEvent() - ev.EventType = StackEventUninstalled - ev.Resources = nil - ev.UpdatedAt = s.timeGen.Now() - - uninstalledStack.Events = append(uninstalledStack.Events, ev) - if err := s.store.UpdateStack(ctx, uninstalledStack); err != nil { - s.log.Error("unable to update stack after uninstalling resources", zap.Error(err)) - } - return uninstalledStack, nil -} - -// DeleteStack removes a stack and all the resources that have are associated with the stack. -func (s *Service) DeleteStack(ctx context.Context, identifiers struct{ OrgID, UserID, StackID platform.ID }) (e error) { - deletedStack, err := s.uninstallStack(ctx, identifiers) - if errors2.ErrorCode(err) == errors2.ENotFound { - return nil - } - if err != nil { - return err - } - - return s.store.DeleteStack(ctx, deletedStack.ID) -} - -func (s *Service) uninstallStack(ctx context.Context, identifiers struct{ OrgID, UserID, StackID platform.ID }) (_ Stack, e error) { - stack, err := s.store.ReadStackByID(ctx, identifiers.StackID) - if err != nil { - return Stack{}, err - } - if stack.OrgID != identifiers.OrgID { - return Stack{}, &errors2.Error{ - Code: errors2.EConflict, - Msg: "you do not have access to given stack ID", - } - } - - // providing empty template will remove all applied resources - state, err := s.dryRun(ctx, identifiers.OrgID, new(Template), applyOptFromOptFns(ApplyWithStackID(identifiers.StackID))) - if err != nil { - return Stack{}, err - } - - coordinator := newRollbackCoordinator(s.log, s.applyReqLimit) - defer coordinator.rollback(s.log, &e, identifiers.OrgID) - - err = s.applyState(ctx, coordinator, identifiers.OrgID, identifiers.UserID, state, nil) - if err != nil { - return Stack{}, err - } - return stack, nil -} - -// ListFilter are filter options for filtering stacks from being returned. -type ListFilter struct { - StackIDs []platform.ID - Names []string -} - -// ListStacks returns a list of stacks. -func (s *Service) ListStacks(ctx context.Context, orgID platform.ID, f ListFilter) ([]Stack, error) { - return s.store.ListStacks(ctx, orgID, f) -} - -// ReadStack returns a stack that matches the given id. -func (s *Service) ReadStack(ctx context.Context, id platform.ID) (Stack, error) { - return s.store.ReadStackByID(ctx, id) -} - -// UpdateStack updates the stack by the given parameters. -func (s *Service) UpdateStack(ctx context.Context, upd StackUpdate) (Stack, error) { - existing, err := s.ReadStack(ctx, upd.ID) - if err != nil { - return Stack{}, err - } - - // Reject use of server-side jsonnet with stack templates - for _, u := range upd.TemplateURLs { - // While things like '.%6Aonnet' evaluate to the default encoding (yaml), let's unescape and catch those too - decoded, err := url.QueryUnescape(u) - if err != nil { - msg := fmt.Sprintf("stack template from url[%q] had an issue", u) - return Stack{}, influxErr(errors2.EInvalid, msg) - } - - if strings.HasSuffix(strings.ToLower(decoded), "jsonnet") { - msg := fmt.Sprintf("stack template from url[%q] had an issue: %s", u, ErrInvalidEncoding.Error()) - return Stack{}, influxErr(errors2.EUnprocessableEntity, msg) - } - } - - updatedStack := s.applyStackUpdate(existing, upd) - if err := s.store.UpdateStack(ctx, updatedStack); err != nil { - return Stack{}, err - } - - return updatedStack, nil -} - -func (s *Service) applyStackUpdate(existing Stack, upd StackUpdate) Stack { - ev := existing.LatestEvent() - ev.EventType = StackEventUpdate - ev.UpdatedAt = s.timeGen.Now() - if upd.Name != nil { - ev.Name = *upd.Name - } - if upd.Description != nil { - ev.Description = *upd.Description - } - if upd.TemplateURLs != nil { - ev.TemplateURLs = upd.TemplateURLs - } - - type key struct { - k Kind - id platform.ID - } - mExistingResources := make(map[key]bool) - mExistingNames := make(map[string]bool) - for _, r := range ev.Resources { - k := key{k: r.Kind, id: r.ID} - mExistingResources[k] = true - mExistingNames[r.MetaName] = true - } - - var out []StackResource - for _, r := range upd.AdditionalResources { - k := key{k: r.Kind, id: r.ID} - if mExistingResources[k] { - continue - } - - sr := StackResource{ - APIVersion: r.APIVersion, - ID: r.ID, - Kind: r.Kind, - } - - metaName := r.MetaName - if metaName == "" || mExistingNames[metaName] { - metaName = uniqMetaName(s.nameGen, s.idGen, mExistingNames) - } - mExistingNames[metaName] = true - sr.MetaName = metaName - - out = append(out, sr) - } - - ev.Resources = append(ev.Resources, out...) - sort.Slice(ev.Resources, func(i, j int) bool { - iName, jName := ev.Resources[i].MetaName, ev.Resources[j].MetaName - iKind, jKind := ev.Resources[i].Kind, ev.Resources[j].Kind - - if iKind.is(jKind) { - return iName < jName - } - return kindPriorities[iKind] > kindPriorities[jKind] - }) - - existing.Events = append(existing.Events, ev) - return existing -} - -type ( - // ExportOptFn is a functional input for setting the template fields. - ExportOptFn func(opt *ExportOpt) error - - // ExportOpt are the options for creating a new package. - ExportOpt struct { - StackID platform.ID - OrgIDs []ExportByOrgIDOpt - Resources []ResourceToClone - } - - // ExportByOrgIDOpt identifies an org to export resources for and provides - // multiple filtering options. - ExportByOrgIDOpt struct { - OrgID platform.ID - LabelNames []string - ResourceKinds []Kind - } -) - -// ExportWithExistingResources allows the create method to clone existing resources. -func ExportWithExistingResources(resources ...ResourceToClone) ExportOptFn { - return func(opt *ExportOpt) error { - for _, r := range resources { - if err := r.OK(); err != nil { - return err - } - } - opt.Resources = append(opt.Resources, resources...) - return nil - } -} - -// ExportWithAllOrgResources allows the create method to clone all existing resources -// for the given organization. -func ExportWithAllOrgResources(orgIDOpt ExportByOrgIDOpt) ExportOptFn { - return func(opt *ExportOpt) error { - if orgIDOpt.OrgID == 0 { - return errors.New("orgID provided must not be zero") - } - for _, k := range orgIDOpt.ResourceKinds { - if err := k.OK(); err != nil { - return err - } - } - opt.OrgIDs = append(opt.OrgIDs, orgIDOpt) - return nil - } -} - -// ExportWithStackID provides an export for the given stack ID. -func ExportWithStackID(stackID platform.ID) ExportOptFn { - return func(opt *ExportOpt) error { - opt.StackID = stackID - return nil - } -} - -func exportOptFromOptFns(opts []ExportOptFn) (ExportOpt, error) { - var opt ExportOpt - for _, setter := range opts { - if err := setter(&opt); err != nil { - return ExportOpt{}, err - } - } - return opt, nil -} - -// Export will produce a templates from the parameters provided. -func (s *Service) Export(ctx context.Context, setters ...ExportOptFn) (*Template, error) { - opt, err := exportOptFromOptFns(setters) - if err != nil { - return nil, err - } - - var stack Stack - if opt.StackID != 0 { - stack, err = s.store.ReadStackByID(ctx, opt.StackID) - if err != nil { - return nil, err - } - - var opts []ExportOptFn - for _, r := range stack.LatestEvent().Resources { - opts = append(opts, ExportWithExistingResources(ResourceToClone{ - Kind: r.Kind, - ID: r.ID, - MetaName: r.MetaName, - Name: r.Name, - })) - } - - opt, err = exportOptFromOptFns(append(setters, opts...)) - if err != nil { - return nil, err - } - } - - exporter := newResourceExporter(s) - - for _, orgIDOpt := range opt.OrgIDs { - resourcesToClone, err := s.cloneOrgResources(ctx, orgIDOpt.OrgID, orgIDOpt.ResourceKinds) - if err != nil { - return nil, internalErr(err) - } - - if err := exporter.Export(ctx, resourcesToClone, orgIDOpt.LabelNames...); err != nil { - return nil, internalErr(err) - } - } - - if err := exporter.Export(ctx, opt.Resources); err != nil { - return nil, internalErr(err) - } - - template := &Template{Objects: exporter.Objects()} - if err := template.Validate(ValidWithoutResources()); err != nil { - return nil, failedValidationErr(err) - } - - return template, nil -} - -func (s *Service) cloneOrgResources(ctx context.Context, orgID platform.ID, resourceKinds []Kind) ([]ResourceToClone, error) { - var resources []ResourceToClone - for _, resGen := range s.filterOrgResourceKinds(resourceKinds) { - existingResources, err := resGen.cloneFn(ctx, orgID) - if err != nil { - return nil, ierrors.Wrap(err, "finding "+string(resGen.resType)) - } - resources = append(resources, existingResources...) - } - - return resources, nil -} - -func (s *Service) cloneOrgBuckets(ctx context.Context, orgID platform.ID) ([]ResourceToClone, error) { - buckets, _, err := s.bucketSVC.FindBuckets(ctx, influxdb.BucketFilter{ - OrganizationID: &orgID, - }) - if err != nil { - return nil, err - } - - resources := make([]ResourceToClone, 0, len(buckets)) - for _, b := range buckets { - if b.Type == influxdb.BucketTypeSystem { - continue - } - resources = append(resources, ResourceToClone{ - Kind: KindBucket, - ID: b.ID, - Name: b.Name, - }) - } - return resources, nil -} - -func (s *Service) cloneOrgChecks(ctx context.Context, orgID platform.ID) ([]ResourceToClone, error) { - checks, _, err := s.checkSVC.FindChecks(ctx, influxdb.CheckFilter{ - OrgID: &orgID, - }) - if err != nil { - return nil, err - } - - resources := make([]ResourceToClone, 0, len(checks)) - for _, c := range checks { - resources = append(resources, ResourceToClone{ - Kind: KindCheck, - ID: c.GetID(), - Name: c.GetName(), - }) - } - return resources, nil -} - -func (s *Service) cloneOrgDashboards(ctx context.Context, orgID platform.ID) ([]ResourceToClone, error) { - dashs, _, err := s.dashSVC.FindDashboards(ctx, influxdb.DashboardFilter{ - OrganizationID: &orgID, - }, influxdb.FindOptions{Limit: 100}) - if err != nil { - return nil, err - } - - resources := make([]ResourceToClone, 0, len(dashs)) - for _, d := range dashs { - resources = append(resources, ResourceToClone{ - Kind: KindDashboard, - ID: d.ID, - }) - } - return resources, nil -} - -func (s *Service) cloneOrgLabels(ctx context.Context, orgID platform.ID) ([]ResourceToClone, error) { - filter := influxdb.LabelFilter{ - OrgID: &orgID, - } - - labels, err := s.labelSVC.FindLabels(ctx, filter, influxdb.FindOptions{Limit: 100}) - if err != nil { - return nil, ierrors.Wrap(err, "finding labels") - } - - resources := make([]ResourceToClone, 0, len(labels)) - for _, l := range labels { - resources = append(resources, ResourceToClone{ - Kind: KindLabel, - ID: l.ID, - Name: l.Name, - }) - } - return resources, nil -} - -func (s *Service) cloneOrgNotificationEndpoints(ctx context.Context, orgID platform.ID) ([]ResourceToClone, error) { - endpoints, _, err := s.endpointSVC.FindNotificationEndpoints(ctx, influxdb.NotificationEndpointFilter{ - OrgID: &orgID, - }) - if err != nil { - return nil, err - } - - resources := make([]ResourceToClone, 0, len(endpoints)) - for _, e := range endpoints { - resources = append(resources, ResourceToClone{ - Kind: KindNotificationEndpoint, - ID: e.GetID(), - Name: e.GetName(), - }) - } - return resources, nil -} - -func (s *Service) cloneOrgNotificationRules(ctx context.Context, orgID platform.ID) ([]ResourceToClone, error) { - rules, _, err := s.ruleSVC.FindNotificationRules(ctx, influxdb.NotificationRuleFilter{ - OrgID: &orgID, - }) - if err != nil { - return nil, err - } - - resources := make([]ResourceToClone, 0, len(rules)) - for _, r := range rules { - resources = append(resources, ResourceToClone{ - Kind: KindNotificationRule, - ID: r.GetID(), - Name: r.GetName(), - }) - } - return resources, nil -} - -func (s *Service) cloneOrgTasks(ctx context.Context, orgID platform.ID) ([]ResourceToClone, error) { - tasks, err := s.getAllTasks(ctx, orgID) - if err != nil { - return nil, err - } - - if len(tasks) == 0 { - return nil, nil - } - - checks, err := s.getAllChecks(ctx, orgID) - if err != nil { - return nil, err - } - - rules, err := s.getNotificationRules(ctx, orgID) - if err != nil { - return nil, err - } - - mTasks := make(map[platform.ID]*taskmodel.Task) - for i := range tasks { - t := tasks[i] - if t.Type != taskmodel.TaskSystemType { - continue - } - mTasks[t.ID] = t - } - for _, c := range checks { - delete(mTasks, c.GetTaskID()) - } - for _, r := range rules { - delete(mTasks, r.GetTaskID()) - } - - resources := make([]ResourceToClone, 0, len(mTasks)) - for _, t := range mTasks { - resources = append(resources, ResourceToClone{ - Kind: KindTask, - ID: t.ID, - }) - } - return resources, nil -} - -func (s *Service) cloneOrgTelegrafs(ctx context.Context, orgID platform.ID) ([]ResourceToClone, error) { - teles, _, err := s.teleSVC.FindTelegrafConfigs(ctx, influxdb.TelegrafConfigFilter{OrgID: &orgID}) - if err != nil { - return nil, err - } - - resources := make([]ResourceToClone, 0, len(teles)) - for _, t := range teles { - resources = append(resources, ResourceToClone{ - Kind: KindTelegraf, - ID: t.ID, - }) - } - return resources, nil -} - -func (s *Service) cloneOrgVariables(ctx context.Context, orgID platform.ID) ([]ResourceToClone, error) { - vars, err := s.varSVC.FindVariables(ctx, influxdb.VariableFilter{ - OrganizationID: &orgID, - }, influxdb.FindOptions{Limit: 10000}) - if err != nil { - return nil, err - } - - resources := make([]ResourceToClone, 0, len(vars)) - for _, v := range vars { - resources = append(resources, ResourceToClone{ - Kind: KindVariable, - ID: v.ID, - }) - } - - return resources, nil -} - -type ( - cloneResFn func(context.Context, platform.ID) ([]ResourceToClone, error) - resClone struct { - resType influxdb.ResourceType - cloneFn cloneResFn - } -) - -func (s *Service) filterOrgResourceKinds(resourceKindFilters []Kind) []resClone { - mKinds := map[Kind]cloneResFn{ - KindBucket: s.cloneOrgBuckets, - KindCheck: s.cloneOrgChecks, - KindDashboard: s.cloneOrgDashboards, - KindLabel: s.cloneOrgLabels, - KindNotificationEndpoint: s.cloneOrgNotificationEndpoints, - KindNotificationRule: s.cloneOrgNotificationRules, - KindTask: s.cloneOrgTasks, - KindTelegraf: s.cloneOrgTelegrafs, - KindVariable: s.cloneOrgVariables, - } - - newResGen := func(resType influxdb.ResourceType, cloneFn cloneResFn) resClone { - return resClone{ - resType: resType, - cloneFn: cloneFn, - } - } - - var resourceTypeGens []resClone - if len(resourceKindFilters) == 0 { - for k, cloneFn := range mKinds { - resourceTypeGens = append(resourceTypeGens, newResGen(k.ResourceType(), cloneFn)) - } - return resourceTypeGens - } - - seenKinds := make(map[Kind]bool) - for _, k := range resourceKindFilters { - cloneFn, ok := mKinds[k] - if !ok || seenKinds[k] { - continue - } - seenKinds[k] = true - resourceTypeGens = append(resourceTypeGens, newResGen(k.ResourceType(), cloneFn)) - } - - return resourceTypeGens -} - -// ImpactSummary represents the impact the application of a template will have on the system. -type ImpactSummary struct { - Sources []string - StackID platform.ID - Diff Diff - Summary Summary -} - -var reCommunityTemplatesValidAddr = regexp.MustCompile(`(?:https://raw\.githubusercontent\.com/influxdata/community-templates/master/)(?P\w+)(?:/.*)`) - -func (i *ImpactSummary) communityName() string { - if len(i.Sources) == 0 { - return "custom" - } - - // pull name `name` from community url https://raw.githubusercontent.com/influxdata/community-templates/master/name/name_template.yml - for j := range i.Sources { - finds := reCommunityTemplatesValidAddr.FindStringSubmatch(i.Sources[j]) - if len(finds) == 2 { - return finds[1] - } - } - - return "custom" -} - -// DryRun provides a dry run of the template application. The template will be marked verified -// for later calls to Apply. This func will be run on an Apply if it has not been run -// already. -func (s *Service) DryRun(ctx context.Context, orgID, userID platform.ID, opts ...ApplyOptFn) (ImpactSummary, error) { - opt := applyOptFromOptFns(opts...) - template, err := s.templateFromApplyOpts(ctx, opt) - if err != nil { - return ImpactSummary{}, err - } - - state, err := s.dryRun(ctx, orgID, template, opt) - if err != nil { - return ImpactSummary{}, err - } - - return ImpactSummary{ - Sources: template.sources, - StackID: opt.StackID, - Diff: state.diff(), - Summary: newSummaryFromStateTemplate(state, template), - }, nil -} - -func (s *Service) dryRun(ctx context.Context, orgID platform.ID, template *Template, opt ApplyOpt) (*stateCoordinator, error) { - // so here's the deal, when we have issues with the parsing validation, we - // continue to do the diff anyhow. any resource that does not have a name - // will be skipped, and won't bleed into the dry run here. We can now return - // a error (parseErr) and valid diff/summary. - var parseErr error - err := template.Validate(ValidWithoutResources()) - if err != nil && !IsParseErr(err) { - return nil, internalErr(err) - } - parseErr = err - - if len(opt.EnvRefs) > 0 { - err := template.applyEnvRefs(opt.EnvRefs) - if err != nil && !IsParseErr(err) { - return nil, internalErr(err) - } - parseErr = err - } - - state := newStateCoordinator(template, resourceActions{ - skipKinds: opt.KindsToSkip, - skipResources: opt.ResourcesToSkip, - }) - - if opt.StackID > 0 { - if err := s.addStackState(ctx, opt.StackID, state); err != nil { - return nil, internalErr(err) - } - } - - if err := s.dryRunSecrets(ctx, orgID, template); err != nil { - return nil, err - } - - s.dryRunBuckets(ctx, orgID, state.mBuckets) - s.dryRunChecks(ctx, orgID, state.mChecks) - s.dryRunDashboards(ctx, orgID, state.mDashboards) - s.dryRunLabels(ctx, orgID, state.mLabels) - s.dryRunTasks(ctx, orgID, state.mTasks) - s.dryRunTelegrafConfigs(ctx, orgID, state.mTelegrafs) - s.dryRunVariables(ctx, orgID, state.mVariables) - - err = s.dryRunNotificationEndpoints(ctx, orgID, state.mEndpoints) - if err != nil { - return nil, ierrors.Wrap(err, "failed to dry run notification endpoints") - } - - err = s.dryRunNotificationRules(ctx, orgID, state.mRules, state.mEndpoints) - if err != nil { - return nil, err - } - - stateLabelMappings, err := s.dryRunLabelMappings(ctx, state) - if err != nil { - return nil, err - } - state.labelMappings = stateLabelMappings - - return state, parseErr -} - -func (s *Service) dryRunBuckets(ctx context.Context, orgID platform.ID, bkts map[string]*stateBucket) { - for _, stateBkt := range bkts { - stateBkt.orgID = orgID - var existing *influxdb.Bucket - if stateBkt.ID() != 0 { - existing, _ = s.bucketSVC.FindBucketByID(ctx, stateBkt.ID()) - } else { - existing, _ = s.bucketSVC.FindBucketByName(ctx, orgID, stateBkt.parserBkt.Name()) - } - if IsNew(stateBkt.stateStatus) && existing != nil { - stateBkt.stateStatus = StateStatusExists - } - stateBkt.existing = existing - } -} - -func (s *Service) dryRunChecks(ctx context.Context, orgID platform.ID, checks map[string]*stateCheck) { - for _, c := range checks { - c.orgID = orgID - - var existing influxdb.Check - if c.ID() != 0 { - existing, _ = s.checkSVC.FindCheckByID(ctx, c.ID()) - } else { - name := c.parserCheck.Name() - existing, _ = s.checkSVC.FindCheck(ctx, influxdb.CheckFilter{ - Name: &name, - OrgID: &orgID, - }) - } - if IsNew(c.stateStatus) && existing != nil { - c.stateStatus = StateStatusExists - } - c.existing = existing - } -} - -func (s *Service) dryRunDashboards(ctx context.Context, orgID platform.ID, dashs map[string]*stateDashboard) { - for _, stateDash := range dashs { - stateDash.orgID = orgID - var existing *influxdb.Dashboard - if stateDash.ID() != 0 { - existing, _ = s.dashSVC.FindDashboardByID(ctx, stateDash.ID()) - } - if IsNew(stateDash.stateStatus) && existing != nil { - stateDash.stateStatus = StateStatusExists - } - stateDash.existing = existing - } -} - -func (s *Service) dryRunLabels(ctx context.Context, orgID platform.ID, labels map[string]*stateLabel) { - for _, l := range labels { - l.orgID = orgID - existingLabel, _ := s.findLabel(ctx, orgID, l) - if IsNew(l.stateStatus) && existingLabel != nil { - l.stateStatus = StateStatusExists - } - l.existing = existingLabel - } -} - -func (s *Service) dryRunNotificationEndpoints(ctx context.Context, orgID platform.ID, endpoints map[string]*stateEndpoint) error { - existingEndpoints, _, err := s.endpointSVC.FindNotificationEndpoints(ctx, influxdb.NotificationEndpointFilter{ - OrgID: &orgID, - }) // grab em all - if err != nil { - return internalErr(err) - } - - mExistingByName := make(map[string]influxdb.NotificationEndpoint) - mExistingByID := make(map[platform.ID]influxdb.NotificationEndpoint) - for i := range existingEndpoints { - e := existingEndpoints[i] - mExistingByName[e.GetName()] = e - mExistingByID[e.GetID()] = e - } - - findEndpoint := func(e *stateEndpoint) influxdb.NotificationEndpoint { - if iExisting, ok := mExistingByID[e.ID()]; ok { - return iExisting - } - if iExisting, ok := mExistingByName[e.parserEndpoint.Name()]; ok { - return iExisting - } - return nil - } - - for _, newEndpoint := range endpoints { - existing := findEndpoint(newEndpoint) - if IsNew(newEndpoint.stateStatus) && existing != nil { - newEndpoint.stateStatus = StateStatusExists - } - newEndpoint.existing = existing - } - - return nil -} - -func (s *Service) dryRunNotificationRules(ctx context.Context, orgID platform.ID, rules map[string]*stateRule, endpoints map[string]*stateEndpoint) error { - for _, rule := range rules { - rule.orgID = orgID - var existing influxdb.NotificationRule - if rule.ID() != 0 { - existing, _ = s.ruleSVC.FindNotificationRuleByID(ctx, rule.ID()) - } - rule.existing = existing - } - - for _, r := range rules { - if r.associatedEndpoint != nil { - continue - } - - e, ok := endpoints[r.parserRule.endpointMetaName()] - if !IsRemoval(r.stateStatus) && !ok { - err := fmt.Errorf("failed to find notification endpoint %q dependency for notification rule %q", r.parserRule.endpointName, r.parserRule.MetaName()) - return &errors2.Error{ - Code: errors2.EUnprocessableEntity, - Err: err, - } - } - r.associatedEndpoint = e - } - - return nil -} - -func (s *Service) dryRunSecrets(ctx context.Context, orgID platform.ID, template *Template) error { - templateSecrets := template.mSecrets - if len(templateSecrets) == 0 { - return nil - } - - existingSecrets, err := s.secretSVC.GetSecretKeys(ctx, orgID) - if err != nil { - return &errors2.Error{Code: errors2.EInternal, Err: err} - } - - for _, secret := range existingSecrets { - templateSecrets[secret] = true // marked true since it exists in the platform - } - - return nil -} - -func (s *Service) dryRunTasks(ctx context.Context, orgID platform.ID, tasks map[string]*stateTask) { - for _, stateTask := range tasks { - stateTask.orgID = orgID - var existing *taskmodel.Task - if stateTask.ID() != 0 { - existing, _ = s.taskSVC.FindTaskByID(ctx, stateTask.ID()) - } - if IsNew(stateTask.stateStatus) && existing != nil { - stateTask.stateStatus = StateStatusExists - } - stateTask.existing = existing - } -} - -func (s *Service) dryRunTelegrafConfigs(ctx context.Context, orgID platform.ID, teleConfigs map[string]*stateTelegraf) { - for _, stateTele := range teleConfigs { - stateTele.orgID = orgID - var existing *influxdb.TelegrafConfig - if stateTele.ID() != 0 { - existing, _ = s.teleSVC.FindTelegrafConfigByID(ctx, stateTele.ID()) - } - if IsNew(stateTele.stateStatus) && existing != nil { - stateTele.stateStatus = StateStatusExists - } - stateTele.existing = existing - } -} - -func (s *Service) dryRunVariables(ctx context.Context, orgID platform.ID, vars map[string]*stateVariable) { - existingVars, _ := s.getAllPlatformVariables(ctx, orgID) - - mIDs := make(map[platform.ID]*influxdb.Variable) - mNames := make(map[string]*influxdb.Variable) - for _, v := range existingVars { - mIDs[v.ID] = v - mNames[v.Name] = v - } - - for _, v := range vars { - existing := mNames[v.parserVar.Name()] - if v.ID() != 0 { - existing = mIDs[v.ID()] - } - if IsNew(v.stateStatus) && existing != nil { - v.stateStatus = StateStatusExists - } - v.existing = existing - } -} - -func (s *Service) dryRunLabelMappings(ctx context.Context, state *stateCoordinator) ([]stateLabelMapping, error) { - stateLabelsByResName := make(map[string]*stateLabel) - for _, l := range state.mLabels { - if IsRemoval(l.stateStatus) { - continue - } - stateLabelsByResName[l.parserLabel.Name()] = l - } - - var mappings []stateLabelMapping - for _, b := range state.mBuckets { - if IsRemoval(b.stateStatus) { - continue - } - mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, b) - if err != nil { - return nil, err - } - mappings = append(mappings, mm...) - } - - for _, c := range state.mChecks { - if IsRemoval(c.stateStatus) { - continue - } - mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, c) - if err != nil { - return nil, err - } - mappings = append(mappings, mm...) - } - - for _, d := range state.mDashboards { - if IsRemoval(d.stateStatus) { - continue - } - mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, d) - if err != nil { - return nil, err - } - mappings = append(mappings, mm...) - } - - for _, e := range state.mEndpoints { - if IsRemoval(e.stateStatus) { - continue - } - mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, e) - if err != nil { - return nil, err - } - mappings = append(mappings, mm...) - } - - for _, r := range state.mRules { - if IsRemoval(r.stateStatus) { - continue - } - mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, r) - if err != nil { - return nil, err - } - mappings = append(mappings, mm...) - } - - for _, t := range state.mTasks { - if IsRemoval(t.stateStatus) { - continue - } - mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, t) - if err != nil { - return nil, err - } - mappings = append(mappings, mm...) - } - - for _, t := range state.mTelegrafs { - if IsRemoval(t.stateStatus) { - continue - } - mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, t) - if err != nil { - return nil, err - } - mappings = append(mappings, mm...) - } - - for _, v := range state.mVariables { - if IsRemoval(v.stateStatus) { - continue - } - mm, err := s.dryRunResourceLabelMapping(ctx, state, stateLabelsByResName, v) - if err != nil { - return nil, err - } - mappings = append(mappings, mm...) - } - - return mappings, nil -} - -func (s *Service) dryRunResourceLabelMapping(ctx context.Context, state *stateCoordinator, stateLabelsByResName map[string]*stateLabel, associatedResource interface { - labels() []*stateLabel - stateIdentity() stateIdentity -}) ([]stateLabelMapping, error) { - - ident := associatedResource.stateIdentity() - templateResourceLabels := associatedResource.labels() - - var mappings []stateLabelMapping - if !ident.exists() { - for _, l := range templateResourceLabels { - mappings = append(mappings, stateLabelMapping{ - status: StateStatusNew, - resource: associatedResource, - label: l, - }) - } - return mappings, nil - } - - existingLabels, err := s.labelSVC.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ - ResourceID: ident.id, - ResourceType: ident.resourceType, - }) - if err != nil && errors2.ErrorCode(err) != errors2.ENotFound { - msgFmt := fmt.Sprintf("failed to find labels mappings for %s resource[%q]", ident.resourceType, ident.id) - return nil, ierrors.Wrap(err, msgFmt) - } - - templateLabels := labelSlcToMap(templateResourceLabels) - for _, l := range existingLabels { - // if label is found in state then we track the mapping and mark it existing - // otherwise we continue on - delete(templateLabels, l.Name) - if sLabel, ok := stateLabelsByResName[l.Name]; ok { - mappings = append(mappings, stateLabelMapping{ - status: StateStatusExists, - resource: associatedResource, - label: sLabel, - }) - } - } - - // now we add labels that do not exist - for _, l := range templateLabels { - stLabel, found := state.getLabelByMetaName(l.MetaName()) - if !found { - continue - } - mappings = append(mappings, stateLabelMapping{ - status: StateStatusNew, - resource: associatedResource, - label: stLabel, - }) - } - - return mappings, nil -} - -func (s *Service) addStackState(ctx context.Context, stackID platform.ID, state *stateCoordinator) error { - stack, err := s.store.ReadStackByID(ctx, stackID) - if err != nil { - return ierrors.Wrap(err, "reading stack") - } - - state.addStackState(stack) - return nil -} - -type ( - // ApplyOpt is an option for applying a package. - ApplyOpt struct { - Templates []*Template - EnvRefs map[string]interface{} - MissingSecrets map[string]string - StackID platform.ID - ResourcesToSkip map[ActionSkipResource]bool - KindsToSkip map[Kind]bool - } - - // ActionSkipResource provides an action from the consumer to use the template with - // modifications to the resource kind and template name that will be applied. - ActionSkipResource struct { - Kind Kind `json:"kind"` - MetaName string `json:"resourceTemplateName"` - } - - // ActionSkipKind provides an action from the consumer to use the template with - // modifications to the resource kinds will be applied. - ActionSkipKind struct { - Kind Kind `json:"kind"` - } - - // ApplyOptFn updates the ApplyOpt per the functional option. - ApplyOptFn func(opt *ApplyOpt) -) - -// ApplyWithEnvRefs provides env refs to saturate the missing reference fields in the template. -func ApplyWithEnvRefs(envRefs map[string]interface{}) ApplyOptFn { - return func(o *ApplyOpt) { - o.EnvRefs = envRefs - } -} - -// ApplyWithTemplate provides a template to the application/dry run. -func ApplyWithTemplate(template *Template) ApplyOptFn { - return func(opt *ApplyOpt) { - opt.Templates = append(opt.Templates, template) - } -} - -// ApplyWithResourceSkip provides an action skip a resource in the application of a template. -func ApplyWithResourceSkip(action ActionSkipResource) ApplyOptFn { - return func(opt *ApplyOpt) { - if opt.ResourcesToSkip == nil { - opt.ResourcesToSkip = make(map[ActionSkipResource]bool) - } - switch action.Kind { - case KindCheckDeadman, KindCheckThreshold: - action.Kind = KindCheck - case KindNotificationEndpointHTTP, - KindNotificationEndpointPagerDuty, - KindNotificationEndpointSlack: - action.Kind = KindNotificationEndpoint - } - opt.ResourcesToSkip[action] = true - } -} - -// ApplyWithKindSkip provides an action skip a kidn in the application of a template. -func ApplyWithKindSkip(action ActionSkipKind) ApplyOptFn { - return func(opt *ApplyOpt) { - if opt.KindsToSkip == nil { - opt.KindsToSkip = make(map[Kind]bool) - } - switch action.Kind { - case KindCheckDeadman, KindCheckThreshold: - action.Kind = KindCheck - case KindNotificationEndpointHTTP, - KindNotificationEndpointPagerDuty, - KindNotificationEndpointSlack: - action.Kind = KindNotificationEndpoint - } - opt.KindsToSkip[action.Kind] = true - } -} - -// ApplyWithSecrets provides secrets to the platform that the template will need. -func ApplyWithSecrets(secrets map[string]string) ApplyOptFn { - return func(o *ApplyOpt) { - o.MissingSecrets = secrets - } -} - -// ApplyWithStackID associates the application of a template with a stack. -func ApplyWithStackID(stackID platform.ID) ApplyOptFn { - return func(o *ApplyOpt) { - o.StackID = stackID - } -} - -func applyOptFromOptFns(opts ...ApplyOptFn) ApplyOpt { - var opt ApplyOpt - for _, o := range opts { - o(&opt) - } - return opt -} - -// Apply will apply all the resources identified in the provided template. The entire template will be applied -// in its entirety. If a failure happens midway then the entire template will be rolled back to the state -// from before the template were applied. -func (s *Service) Apply(ctx context.Context, orgID, userID platform.ID, opts ...ApplyOptFn) (impact ImpactSummary, e error) { - opt := applyOptFromOptFns(opts...) - - template, err := s.templateFromApplyOpts(ctx, opt) - if err != nil { - return ImpactSummary{}, err - } - - if err := template.Validate(ValidWithoutResources()); err != nil { - return ImpactSummary{}, failedValidationErr(err) - } - - if err := template.applyEnvRefs(opt.EnvRefs); err != nil { - return ImpactSummary{}, failedValidationErr(err) - } - - state, err := s.dryRun(ctx, orgID, template, opt) - if err != nil { - return ImpactSummary{}, err - } - - stackID := opt.StackID - // if stackID is not provided, a stack will be provided for the application. - if stackID == 0 { - newStack, err := s.InitStack(ctx, userID, StackCreate{OrgID: orgID}) - if err != nil { - return ImpactSummary{}, err - } - stackID = newStack.ID - } - - defer func(stackID platform.ID) { - updateStackFn := s.updateStackAfterSuccess - if e != nil { - updateStackFn = s.updateStackAfterRollback - if opt.StackID == 0 { - if err := s.store.DeleteStack(ctx, stackID); err != nil { - s.log.Error("failed to delete created stack", zap.Error(err)) - } - } - } - - err := updateStackFn(ctx, stackID, state, template.Sources()) - if err != nil { - s.log.Error("failed to update stack", zap.Error(err)) - } - }(stackID) - - coordinator := newRollbackCoordinator(s.log, s.applyReqLimit) - defer coordinator.rollback(s.log, &e, orgID) - - err = s.applyState(ctx, coordinator, orgID, userID, state, opt.MissingSecrets) - if err != nil { - return ImpactSummary{}, err - } - - template.applySecrets(opt.MissingSecrets) - - return ImpactSummary{ - Sources: template.sources, - StackID: stackID, - Diff: state.diff(), - Summary: newSummaryFromStateTemplate(state, template), - }, nil -} - -func (s *Service) applyState(ctx context.Context, coordinator *rollbackCoordinator, orgID, userID platform.ID, state *stateCoordinator, missingSecrets map[string]string) (e error) { - endpointApp, ruleApp, err := s.applyNotificationGenerator(ctx, userID, state.rules(), state.endpoints()) - if err != nil { - return ierrors.Wrap(err, "failed to setup notification generator") - } - - // each grouping here runs for its entirety, then returns an error that - // is indicative of running all appliers provided. For instance, the labels - // may have 1 variable fail and one of the buckets fails. The errors aggregate so - // the caller will be informed of both the failed label variable the failed bucket. - // the groupings here allow for steps to occur before exiting. The first step is - // adding the dependencies, resources that are associated by other resources. Then the - // primary resources. Here we get all the errors associated with them. - // If those are all good, then we run the secondary(dependent) resources which - // rely on the primary resources having been created. - appliers := [][]applier{ - { - // adds secrets that are referenced it the template, this allows user to - // provide data that does not rest in the template. - s.applySecrets(missingSecrets), - }, - { - // deps for primary resources - s.applyLabels(ctx, state.labels()), - }, - { - // primary resources, can have relationships to labels - s.applyVariables(ctx, state.variables()), - s.applyBuckets(ctx, state.buckets()), - s.applyChecks(ctx, state.checks()), - s.applyDashboards(ctx, state.dashboards()), - endpointApp, - s.applyTasks(ctx, state.tasks()), - s.applyTelegrafs(ctx, userID, state.telegrafConfigs()), - }, - } - - for _, group := range appliers { - if err := coordinator.runTilEnd(ctx, orgID, userID, group...); err != nil { - return internalErr(err) - } - } - - // this has to be run after the above primary resources, because it relies on - // notification endpoints already being applied. - if err := coordinator.runTilEnd(ctx, orgID, userID, ruleApp); err != nil { - return err - } - - // secondary resources - // this last grouping relies on the above 2 steps having completely successfully - secondary := []applier{ - s.applyLabelMappings(ctx, state.labelMappings), - s.removeLabelMappings(ctx, state.labelMappingsToRemove), - } - if err := coordinator.runTilEnd(ctx, orgID, userID, secondary...); err != nil { - return internalErr(err) - } - - return nil -} - -func (s *Service) applyBuckets(ctx context.Context, buckets []*stateBucket) applier { - const resource = "bucket" - - mutex := new(doMutex) - rollbackBuckets := make([]*stateBucket, 0, len(buckets)) - - createFn := func(ctx context.Context, i int, orgID, userID platform.ID) *applyErrBody { - var b *stateBucket - mutex.Do(func() { - buckets[i].orgID = orgID - b = buckets[i] - }) - if !b.shouldApply() { - return nil - } - - influxBucket, err := s.applyBucket(ctx, b) - if err != nil { - return &applyErrBody{ - name: b.parserBkt.MetaName(), - msg: err.Error(), - } - } - - mutex.Do(func() { - buckets[i].id = influxBucket.ID - rollbackBuckets = append(rollbackBuckets, buckets[i]) - }) - - return nil - } - - return applier{ - creater: creater{ - entries: len(buckets), - fn: createFn, - }, - rollbacker: rollbacker{ - resource: resource, - fn: func(_ platform.ID) error { return s.rollbackBuckets(ctx, rollbackBuckets) }, - }, - } -} - -func (s *Service) rollbackBuckets(ctx context.Context, buckets []*stateBucket) error { - rollbackFn := func(b *stateBucket) error { - if !IsNew(b.stateStatus) && b.existing == nil || isSystemBucket(b.existing) { - return nil - } - - var err error - switch { - case IsRemoval(b.stateStatus): - err = ierrors.Wrap(s.bucketSVC.CreateBucket(ctx, b.existing), "rolling back removed bucket") - case IsExisting(b.stateStatus): - _, err = s.bucketSVC.UpdateBucket(ctx, b.ID(), influxdb.BucketUpdate{ - Description: &b.existing.Description, - RetentionPeriod: &b.existing.RetentionPeriod, - }) - err = ierrors.Wrap(err, "rolling back existing bucket to previous state") - default: - err = ierrors.Wrap(s.bucketSVC.DeleteBucket(ctx, b.ID()), "rolling back new bucket") - } - return err - } - - var errs []string - for _, b := range buckets { - if err := rollbackFn(b); err != nil { - errs = append(errs, fmt.Sprintf("error for bucket[%q]: %s", b.ID(), err)) - } - } - - if len(errs) > 0 { - // TODO: fixup error - return errors.New(strings.Join(errs, ", ")) - } - - return nil -} - -func (s *Service) applyBucket(ctx context.Context, b *stateBucket) (influxdb.Bucket, error) { - if isSystemBucket(b.existing) { - return *b.existing, nil - } - switch { - case IsRemoval(b.stateStatus): - if err := s.bucketSVC.DeleteBucket(ctx, b.ID()); err != nil { - if errors2.ErrorCode(err) == errors2.ENotFound { - return influxdb.Bucket{}, nil - } - return influxdb.Bucket{}, applyFailErr("delete", b.stateIdentity(), err) - } - return *b.existing, nil - case IsExisting(b.stateStatus) && b.existing != nil: - rp := b.parserBkt.RetentionRules.RP() - newName := b.parserBkt.Name() - influxBucket, err := s.bucketSVC.UpdateBucket(ctx, b.ID(), influxdb.BucketUpdate{ - Description: &b.parserBkt.Description, - Name: &newName, - RetentionPeriod: &rp, - }) - if err != nil { - return influxdb.Bucket{}, applyFailErr("update", b.stateIdentity(), err) - } - return *influxBucket, nil - default: - rp := b.parserBkt.RetentionRules.RP() - influxBucket := influxdb.Bucket{ - OrgID: b.orgID, - Description: b.parserBkt.Description, - Name: b.parserBkt.Name(), - RetentionPeriod: rp, - } - err := s.bucketSVC.CreateBucket(ctx, &influxBucket) - if err != nil { - return influxdb.Bucket{}, applyFailErr("create", b.stateIdentity(), err) - } - return influxBucket, nil - } -} - -func (s *Service) applyChecks(ctx context.Context, checks []*stateCheck) applier { - const resource = "check" - - mutex := new(doMutex) - rollbackChecks := make([]*stateCheck, 0, len(checks)) - - createFn := func(ctx context.Context, i int, orgID, userID platform.ID) *applyErrBody { - var c *stateCheck - mutex.Do(func() { - checks[i].orgID = orgID - c = checks[i] - }) - - influxCheck, err := s.applyCheck(ctx, c, userID) - if err != nil { - return &applyErrBody{ - name: c.parserCheck.MetaName(), - msg: err.Error(), - } - } - - mutex.Do(func() { - checks[i].id = influxCheck.GetID() - rollbackChecks = append(rollbackChecks, checks[i]) - }) - - return nil - } - - return applier{ - creater: creater{ - entries: len(checks), - fn: createFn, - }, - rollbacker: rollbacker{ - resource: resource, - fn: func(_ platform.ID) error { return s.rollbackChecks(ctx, rollbackChecks) }, - }, - } -} - -func (s *Service) rollbackChecks(ctx context.Context, checks []*stateCheck) error { - rollbackFn := func(c *stateCheck) error { - var err error - switch { - case IsRemoval(c.stateStatus): - err = s.checkSVC.CreateCheck( - ctx, - influxdb.CheckCreate{ - Check: c.existing, - Status: c.parserCheck.Status(), - }, - c.existing.GetOwnerID(), - ) - c.id = c.existing.GetID() - case IsExisting(c.stateStatus): - if c.existing == nil { - return nil - } - _, err = s.checkSVC.UpdateCheck(ctx, c.ID(), influxdb.CheckCreate{ - Check: c.summarize().Check, - Status: influxdb.Status(c.parserCheck.status), - }) - default: - err = s.checkSVC.DeleteCheck(ctx, c.ID()) - } - return err - } - - var errs []string - for _, c := range checks { - if err := rollbackFn(c); err != nil { - errs = append(errs, fmt.Sprintf("error for check[%q]: %s", c.ID(), err)) - } - } - - if len(errs) > 0 { - return errors.New(strings.Join(errs, "; ")) - } - - return nil -} - -func (s *Service) applyCheck(ctx context.Context, c *stateCheck, userID platform.ID) (influxdb.Check, error) { - switch { - case IsRemoval(c.stateStatus): - if err := s.checkSVC.DeleteCheck(ctx, c.ID()); err != nil { - if errors2.ErrorCode(err) == errors2.ENotFound { - return &icheck.Threshold{Base: icheck.Base{ID: c.ID()}}, nil - } - return nil, applyFailErr("delete", c.stateIdentity(), err) - } - return c.existing, nil - case IsExisting(c.stateStatus) && c.existing != nil: - influxCheck, err := s.checkSVC.UpdateCheck(ctx, c.ID(), influxdb.CheckCreate{ - Check: c.summarize().Check, - Status: c.parserCheck.Status(), - }) - if err != nil { - return nil, applyFailErr("update", c.stateIdentity(), err) - } - return influxCheck, nil - default: - checkStub := influxdb.CheckCreate{ - Check: c.summarize().Check, - Status: c.parserCheck.Status(), - } - err := s.checkSVC.CreateCheck(ctx, checkStub, userID) - if err != nil { - return nil, applyFailErr("create", c.stateIdentity(), err) - } - return checkStub.Check, nil - } -} - -func (s *Service) applyDashboards(ctx context.Context, dashboards []*stateDashboard) applier { - const resource = "dashboard" - - mutex := new(doMutex) - rollbackDashboards := make([]*stateDashboard, 0, len(dashboards)) - - createFn := func(ctx context.Context, i int, orgID, userID platform.ID) *applyErrBody { - var d *stateDashboard - mutex.Do(func() { - dashboards[i].orgID = orgID - d = dashboards[i] - }) - - influxBucket, err := s.applyDashboard(ctx, d) - if err != nil { - return &applyErrBody{ - name: d.parserDash.MetaName(), - msg: err.Error(), - } - } - - mutex.Do(func() { - dashboards[i].id = influxBucket.ID - rollbackDashboards = append(rollbackDashboards, dashboards[i]) - }) - return nil - } - - return applier{ - creater: creater{ - entries: len(dashboards), - fn: createFn, - }, - rollbacker: rollbacker{ - resource: resource, - fn: func(_ platform.ID) error { - return s.rollbackDashboards(ctx, rollbackDashboards) - }, - }, - } -} - -func (s *Service) applyDashboard(ctx context.Context, d *stateDashboard) (influxdb.Dashboard, error) { - switch { - case IsRemoval(d.stateStatus): - if err := s.dashSVC.DeleteDashboard(ctx, d.ID()); err != nil { - if errors2.ErrorCode(err) == errors2.ENotFound { - return influxdb.Dashboard{}, nil - } - return influxdb.Dashboard{}, applyFailErr("delete", d.stateIdentity(), err) - } - return *d.existing, nil - case IsExisting(d.stateStatus) && d.existing != nil: - name := d.parserDash.Name() - cells := convertChartsToCells(d.parserDash.Charts) - dash, err := s.dashSVC.UpdateDashboard(ctx, d.ID(), influxdb.DashboardUpdate{ - Name: &name, - Description: &d.parserDash.Description, - Cells: &cells, - }) - if err != nil { - return influxdb.Dashboard{}, applyFailErr("update", d.stateIdentity(), err) - } - return *dash, nil - default: - cells := convertChartsToCells(d.parserDash.Charts) - influxDashboard := influxdb.Dashboard{ - OrganizationID: d.orgID, - Description: d.parserDash.Description, - Name: d.parserDash.Name(), - Cells: cells, - } - err := s.dashSVC.CreateDashboard(ctx, &influxDashboard) - if err != nil { - return influxdb.Dashboard{}, applyFailErr("create", d.stateIdentity(), err) - } - return influxDashboard, nil - } -} - -func (s *Service) rollbackDashboards(ctx context.Context, dashs []*stateDashboard) error { - rollbackFn := func(d *stateDashboard) error { - if !IsNew(d.stateStatus) && d.existing == nil { - return nil - } - - var err error - switch { - case IsRemoval(d.stateStatus): - err = ierrors.Wrap(s.dashSVC.CreateDashboard(ctx, d.existing), "rolling back removed dashboard") - case IsExisting(d.stateStatus): - _, err := s.dashSVC.UpdateDashboard(ctx, d.ID(), influxdb.DashboardUpdate{ - Name: &d.existing.Name, - Description: &d.existing.Description, - Cells: &d.existing.Cells, - }) - return ierrors.Wrap(err, "failed to update dashboard") - default: - err = ierrors.Wrap(s.dashSVC.DeleteDashboard(ctx, d.ID()), "rolling back new dashboard") - } - return err - } - - var errs []string - for _, d := range dashs { - if err := rollbackFn(d); err != nil { - errs = append(errs, fmt.Sprintf("error for dashboard[%q]: %s", d.ID(), err)) - } - } - - if len(errs) > 0 { - // TODO: fixup error - return errors.New(strings.Join(errs, ", ")) - } - - return nil -} - -func convertChartsToCells(ch []*chart) []*influxdb.Cell { - icells := make([]*influxdb.Cell, 0, len(ch)) - for _, c := range ch { - icell := &influxdb.Cell{ - CellProperty: influxdb.CellProperty{ - X: int32(c.XPos), - Y: int32(c.YPos), - H: int32(c.Height), - W: int32(c.Width), - }, - View: &influxdb.View{ - ViewContents: influxdb.ViewContents{Name: c.Name}, - Properties: c.properties(), - }, - } - icells = append(icells, icell) - } - return icells -} - -func (s *Service) applyLabels(ctx context.Context, labels []*stateLabel) applier { - const resource = "label" - - mutex := new(doMutex) - rollBackLabels := make([]*stateLabel, 0, len(labels)) - - createFn := func(ctx context.Context, i int, orgID, userID platform.ID) *applyErrBody { - var l *stateLabel - mutex.Do(func() { - labels[i].orgID = orgID - l = labels[i] - }) - if !l.shouldApply() { - return nil - } - - influxLabel, err := s.applyLabel(ctx, l) - if err != nil { - return &applyErrBody{ - name: l.parserLabel.MetaName(), - msg: err.Error(), - } - } - - mutex.Do(func() { - labels[i].id = influxLabel.ID - rollBackLabels = append(rollBackLabels, labels[i]) - }) - - return nil - } - - return applier{ - creater: creater{ - entries: len(labels), - fn: createFn, - }, - rollbacker: rollbacker{ - resource: resource, - fn: func(_ platform.ID) error { return s.rollbackLabels(ctx, rollBackLabels) }, - }, - } -} - -func (s *Service) rollbackLabels(ctx context.Context, labels []*stateLabel) error { - rollbackFn := func(l *stateLabel) error { - if !IsNew(l.stateStatus) && l.existing == nil { - return nil - } - - var err error - switch { - case IsRemoval(l.stateStatus): - err = s.labelSVC.CreateLabel(ctx, l.existing) - case IsExisting(l.stateStatus): - _, err = s.labelSVC.UpdateLabel(ctx, l.ID(), influxdb.LabelUpdate{ - Name: l.parserLabel.Name(), - Properties: l.existing.Properties, - }) - default: - err = s.labelSVC.DeleteLabel(ctx, l.ID()) - } - return err - } - - var errs []string - for _, l := range labels { - if err := rollbackFn(l); err != nil { - errs = append(errs, fmt.Sprintf("error for label[%q]: %s", l.ID(), err)) - } - } - - if len(errs) > 0 { - return errors.New(strings.Join(errs, ", ")) - } - - return nil -} - -func (s *Service) applyLabel(ctx context.Context, l *stateLabel) (influxdb.Label, error) { - var ( - influxLabel *influxdb.Label - err error - ) - switch { - case IsRemoval(l.stateStatus): - influxLabel, err = l.existing, s.labelSVC.DeleteLabel(ctx, l.ID()) - case IsExisting(l.stateStatus) && l.existing != nil: - influxLabel, err = s.labelSVC.UpdateLabel(ctx, l.ID(), influxdb.LabelUpdate{ - Name: l.parserLabel.Name(), - Properties: l.properties(), - }) - err = ierrors.Wrap(err, "updating") - default: - creatLabel := l.toInfluxLabel() - influxLabel = &creatLabel - err = ierrors.Wrap(s.labelSVC.CreateLabel(ctx, &creatLabel), "creating") - } - if errors2.ErrorCode(err) == errors2.ENotFound { - return influxdb.Label{}, nil - } - if err != nil || influxLabel == nil { - return influxdb.Label{}, err - } - - return *influxLabel, nil -} - -func (s *Service) applyNotificationEndpoints(ctx context.Context, userID platform.ID, endpoints []*stateEndpoint) (applier, func(platform.ID) error) { - mutex := new(doMutex) - rollbackEndpoints := make([]*stateEndpoint, 0, len(endpoints)) - - createFn := func(ctx context.Context, i int, orgID, userID platform.ID) *applyErrBody { - var endpoint *stateEndpoint - mutex.Do(func() { - endpoints[i].orgID = orgID - endpoint = endpoints[i] - }) - - influxEndpoint, err := s.applyNotificationEndpoint(ctx, endpoint, userID) - if err != nil { - return &applyErrBody{ - name: endpoint.parserEndpoint.MetaName(), - msg: err.Error(), - } - } - - mutex.Do(func() { - if influxEndpoint != nil { - endpoints[i].id = influxEndpoint.GetID() - for _, secret := range influxEndpoint.SecretFields() { - switch { - case strings.HasSuffix(secret.Key, "-routing-key"): - if endpoints[i].parserEndpoint.routingKey == nil { - endpoints[i].parserEndpoint.routingKey = new(references) - } - endpoints[i].parserEndpoint.routingKey.Secret = secret.Key - case strings.HasSuffix(secret.Key, "-token"): - if endpoints[i].parserEndpoint.token == nil { - endpoints[i].parserEndpoint.token = new(references) - } - endpoints[i].parserEndpoint.token.Secret = secret.Key - case strings.HasSuffix(secret.Key, "-username"): - if endpoints[i].parserEndpoint.username == nil { - endpoints[i].parserEndpoint.username = new(references) - } - endpoints[i].parserEndpoint.username.Secret = secret.Key - case strings.HasSuffix(secret.Key, "-password"): - if endpoints[i].parserEndpoint.password == nil { - endpoints[i].parserEndpoint.password = new(references) - } - endpoints[i].parserEndpoint.password.Secret = secret.Key - } - } - } - rollbackEndpoints = append(rollbackEndpoints, endpoints[i]) - }) - - return nil - } - - rollbackFn := func(_ platform.ID) error { - return s.rollbackNotificationEndpoints(ctx, userID, rollbackEndpoints) - } - - return applier{ - creater: creater{ - entries: len(endpoints), - fn: createFn, - }, - rollbacker: rollbacker{ - fn: func(_ platform.ID) error { - return nil - }, - }, - }, rollbackFn -} - -func (s *Service) applyNotificationEndpoint(ctx context.Context, e *stateEndpoint, userID platform.ID) (influxdb.NotificationEndpoint, error) { - switch { - case IsRemoval(e.stateStatus): - _, _, err := s.endpointSVC.DeleteNotificationEndpoint(ctx, e.ID()) - if err != nil && errors2.ErrorCode(err) != errors2.ENotFound { - return nil, applyFailErr("delete", e.stateIdentity(), err) - } - return e.existing, nil - case IsExisting(e.stateStatus) && e.existing != nil: - // stub out userID since we're always using hte http client which will fill it in for us with the token - // feels a bit broken that is required. - // TODO: look into this userID requirement - end, err := s.endpointSVC.UpdateNotificationEndpoint( - ctx, - e.ID(), - e.summarize().NotificationEndpoint, - userID, - ) - return end, applyFailErr("update", e.stateIdentity(), err) - default: - actual := e.summarize().NotificationEndpoint - err := s.endpointSVC.CreateNotificationEndpoint(ctx, actual, userID) - if err != nil { - return nil, applyFailErr("create", e.stateIdentity(), err) - } - return actual, nil - } -} - -func (s *Service) rollbackNotificationEndpoints(ctx context.Context, userID platform.ID, endpoints []*stateEndpoint) error { - rollbackFn := func(e *stateEndpoint) error { - if !IsNew(e.stateStatus) && e.existing == nil { - return nil - } - var err error - switch e.stateStatus { - case StateStatusRemove: - err = s.endpointSVC.CreateNotificationEndpoint(ctx, e.existing, userID) - err = ierrors.Wrap(err, "failed to rollback removed endpoint") - case StateStatusExists: - _, err = s.endpointSVC.UpdateNotificationEndpoint(ctx, e.ID(), e.existing, userID) - err = ierrors.Wrap(err, "failed to rollback updated endpoint") - default: - _, _, err = s.endpointSVC.DeleteNotificationEndpoint(ctx, e.ID()) - err = ierrors.Wrap(err, "failed to rollback created endpoint") - } - return err - } - - var errs []string - for _, e := range endpoints { - if err := rollbackFn(e); err != nil { - errs = append(errs, fmt.Sprintf("error for notification endpoint[%q]: %s", e.ID(), err)) - } - } - - if len(errs) > 0 { - return errors.New(strings.Join(errs, "; ")) - } - - return nil -} - -func (s *Service) applyNotificationGenerator(ctx context.Context, userID platform.ID, rules []*stateRule, stateEndpoints []*stateEndpoint) (endpointApplier applier, ruleApplier applier, err error) { - mEndpoints := make(map[string]*stateEndpoint) - for _, e := range stateEndpoints { - mEndpoints[e.parserEndpoint.MetaName()] = e - } - - var errs applyErrs - for _, r := range rules { - if IsRemoval(r.stateStatus) { - continue - } - v, ok := mEndpoints[r.endpointTemplateName()] - if !ok { - errs = append(errs, &applyErrBody{ - name: r.parserRule.MetaName(), - msg: fmt.Sprintf("notification rule endpoint dependency does not exist; endpointName=%q", r.parserRule.associatedEndpoint.MetaName()), - }) - continue - } - r.associatedEndpoint = v - } - - err = errs.toError("notification_rules", "failed to find dependency") - if err != nil { - return applier{}, applier{}, err - } - - endpointApp, endpointRollbackFn := s.applyNotificationEndpoints(ctx, userID, stateEndpoints) - ruleApp, ruleRollbackFn := s.applyNotificationRules(ctx, userID, rules) - - // here we have to couple the endpoints to rules b/c of the dependency here when rolling back - // a deleted endpoint and rule. This forces the endpoints to be rolled back first so the - // reference for the rule has settled. The dependency has to be available before rolling back - // notification rules. - endpointApp.rollbacker = rollbacker{ - fn: func(orgID platform.ID) error { - if err := endpointRollbackFn(orgID); err != nil { - s.log.Error("failed to roll back endpoints", zap.Error(err)) - } - return ruleRollbackFn(orgID) - }, - } - - return endpointApp, ruleApp, nil -} - -func (s *Service) applyNotificationRules(ctx context.Context, userID platform.ID, rules []*stateRule) (applier, func(platform.ID) error) { - mutex := new(doMutex) - rollbackEndpoints := make([]*stateRule, 0, len(rules)) - - createFn := func(ctx context.Context, i int, orgID, userID platform.ID) *applyErrBody { - var rule *stateRule - mutex.Do(func() { - rules[i].orgID = orgID - rule = rules[i] - }) - - influxRule, err := s.applyNotificationRule(ctx, rule, userID) - if err != nil { - return &applyErrBody{ - name: rule.parserRule.MetaName(), - msg: err.Error(), - } - } - - mutex.Do(func() { - if influxRule != nil { - rules[i].id = influxRule.GetID() - } - rollbackEndpoints = append(rollbackEndpoints, rules[i]) - }) - - return nil - } - - rollbackFn := func(_ platform.ID) error { - return s.rollbackNotificationRules(ctx, userID, rollbackEndpoints) - } - - return applier{ - creater: creater{ - entries: len(rules), - fn: createFn, - }, - rollbacker: rollbacker{ - fn: func(_ platform.ID) error { return nil }, - }, - }, rollbackFn -} - -func (s *Service) applyNotificationRule(ctx context.Context, r *stateRule, userID platform.ID) (influxdb.NotificationRule, error) { - switch { - case IsRemoval(r.stateStatus): - if err := s.ruleSVC.DeleteNotificationRule(ctx, r.ID()); err != nil { - if errors2.ErrorCode(err) == errors2.ENotFound { - return nil, nil - } - return nil, applyFailErr("delete", r.stateIdentity(), err) - } - return r.existing, nil - case IsExisting(r.stateStatus) && r.existing != nil: - ruleCreate := influxdb.NotificationRuleCreate{ - NotificationRule: r.toInfluxRule(), - Status: r.parserRule.Status(), - } - influxRule, err := s.ruleSVC.UpdateNotificationRule(ctx, r.ID(), ruleCreate, userID) - if err != nil { - return nil, applyFailErr("update", r.stateIdentity(), err) - } - return influxRule, nil - default: - influxRule := influxdb.NotificationRuleCreate{ - NotificationRule: r.toInfluxRule(), - Status: r.parserRule.Status(), - } - err := s.ruleSVC.CreateNotificationRule(ctx, influxRule, userID) - if err != nil { - return nil, applyFailErr("create", r.stateIdentity(), err) - } - return influxRule.NotificationRule, nil - } -} - -func (s *Service) rollbackNotificationRules(ctx context.Context, userID platform.ID, rules []*stateRule) error { - rollbackFn := func(r *stateRule) error { - if !IsNew(r.stateStatus) && r.existing == nil { - return nil - } - - existingRuleFn := func(endpointID platform.ID) influxdb.NotificationRule { - switch rr := r.existing.(type) { - case *rule.HTTP: - rr.EndpointID = endpointID - case *rule.PagerDuty: - rr.EndpointID = endpointID - case *rule.Slack: - rr.EndpointID = endpointID - } - return r.existing - } - - // setting status to unknown b/c these resources for two reasons: - // 1. we have no ability to find status via the Service, only to set it... - // 2. we have no way of inspecting an existing rule and pulling status from it - // 3. since this is a fallback condition, we set things to inactive as a user - // is likely to follow up this failure by fixing their template up then reapplying - unknownStatus := influxdb.Inactive - - var err error - switch r.stateStatus { - case StateStatusRemove: - if r.associatedEndpoint == nil { - return internalErr(errors.New("failed to find endpoint dependency to rollback existing notification rule")) - } - influxRule := influxdb.NotificationRuleCreate{ - NotificationRule: existingRuleFn(r.endpointID()), - Status: unknownStatus, - } - err = s.ruleSVC.CreateNotificationRule(ctx, influxRule, userID) - err = ierrors.Wrap(err, "failed to rollback created notification rule") - case StateStatusExists: - if r.associatedEndpoint == nil { - return internalErr(errors.New("failed to find endpoint dependency to rollback existing notification rule")) - } - - influxRule := influxdb.NotificationRuleCreate{ - NotificationRule: existingRuleFn(r.endpointID()), - Status: unknownStatus, - } - _, err = s.ruleSVC.UpdateNotificationRule(ctx, r.ID(), influxRule, r.existing.GetOwnerID()) - err = ierrors.Wrap(err, "failed to rollback updated notification rule") - default: - err = s.ruleSVC.DeleteNotificationRule(ctx, r.ID()) - err = ierrors.Wrap(err, "failed to rollback created notification rule") - } - return err - } - - var errs []string - for _, r := range rules { - if err := rollbackFn(r); err != nil { - errs = append(errs, fmt.Sprintf("error for notification rule[%q]: %s", r.ID(), err)) - } - } - - if len(errs) > 0 { - return errors.New(strings.Join(errs, "; ")) - } - return nil -} - -func (s *Service) applySecrets(secrets map[string]string) applier { - const resource = "secrets" - - if len(secrets) == 0 { - return applier{ - rollbacker: rollbacker{fn: func(orgID platform.ID) error { return nil }}, - } - } - - mutex := new(doMutex) - rollbackSecrets := make([]string, 0) - - createFn := func(ctx context.Context, i int, orgID, userID platform.ID) *applyErrBody { - err := s.secretSVC.PutSecrets(ctx, orgID, secrets) - if err != nil { - return &applyErrBody{name: "secrets", msg: err.Error()} - } - - mutex.Do(func() { - for key := range secrets { - rollbackSecrets = append(rollbackSecrets, key) - } - }) - - return nil - } - - return applier{ - creater: creater{ - entries: 1, - fn: createFn, - }, - rollbacker: rollbacker{ - resource: resource, - fn: func(orgID platform.ID) error { - return s.secretSVC.DeleteSecret(context.Background(), orgID) - }, - }, - } -} - -func (s *Service) applyTasks(ctx context.Context, tasks []*stateTask) applier { - const resource = "tasks" - - mutex := new(doMutex) - rollbackTasks := make([]*stateTask, 0, len(tasks)) - - createFn := func(ctx context.Context, i int, orgID, userID platform.ID) *applyErrBody { - var t *stateTask - mutex.Do(func() { - tasks[i].orgID = orgID - t = tasks[i] - }) - - newTask, err := s.applyTask(ctx, userID, t) - if err != nil { - return &applyErrBody{ - name: t.parserTask.MetaName(), - msg: err.Error(), - } - } - - mutex.Do(func() { - tasks[i].id = newTask.ID - rollbackTasks = append(rollbackTasks, tasks[i]) - }) - - return nil - } - - return applier{ - creater: creater{ - entries: len(tasks), - fn: createFn, - }, - rollbacker: rollbacker{ - resource: resource, - fn: func(_ platform.ID) error { - return s.rollbackTasks(ctx, rollbackTasks) - }, - }, - } -} - -func (s *Service) applyTask(ctx context.Context, userID platform.ID, t *stateTask) (taskmodel.Task, error) { - if isRestrictedTask(t.existing) { - return *t.existing, nil - } - switch { - case IsRemoval(t.stateStatus): - if err := s.taskSVC.DeleteTask(ctx, t.ID()); err != nil { - if errors2.ErrorCode(err) == errors2.ENotFound { - return taskmodel.Task{}, nil - } - return taskmodel.Task{}, applyFailErr("delete", t.stateIdentity(), err) - } - return *t.existing, nil - case IsExisting(t.stateStatus) && t.existing != nil: - newFlux := t.parserTask.flux() - newStatus := string(t.parserTask.Status()) - opt := options.Options{ - Name: t.parserTask.Name(), - Cron: t.parserTask.cron, - } - if every := t.parserTask.every; every > 0 { - opt.Every.Parse(every.String()) - } - if offset := t.parserTask.offset; offset > 0 { - var off options.Duration - if err := off.Parse(offset.String()); err == nil { - opt.Offset = &off - } - } - - updatedTask, err := s.taskSVC.UpdateTask(ctx, t.ID(), taskmodel.TaskUpdate{ - Flux: &newFlux, - Status: &newStatus, - Description: &t.parserTask.description, - Options: opt, - }) - if err != nil { - return taskmodel.Task{}, applyFailErr("update", t.stateIdentity(), err) - } - return *updatedTask, nil - default: - newTask, err := s.taskSVC.CreateTask(ctx, taskmodel.TaskCreate{ - Type: taskmodel.TaskSystemType, - Flux: t.parserTask.flux(), - OwnerID: userID, - Description: t.parserTask.description, - Status: string(t.parserTask.Status()), - OrganizationID: t.orgID, - }) - if err != nil { - return taskmodel.Task{}, applyFailErr("create", t.stateIdentity(), err) - } - return *newTask, nil - } -} - -func (s *Service) rollbackTasks(ctx context.Context, tasks []*stateTask) error { - rollbackFn := func(t *stateTask) error { - if !IsNew(t.stateStatus) && t.existing == nil || isRestrictedTask(t.existing) { - return nil - } - - var err error - switch t.stateStatus { - case StateStatusRemove: - newTask, err := s.taskSVC.CreateTask(ctx, taskmodel.TaskCreate{ - Type: t.existing.Type, - Flux: t.existing.Flux, - OwnerID: t.existing.OwnerID, - Description: t.existing.Description, - Status: t.existing.Status, - OrganizationID: t.orgID, - Metadata: t.existing.Metadata, - }) - if err != nil { - return ierrors.Wrap(err, "failed to rollback removed task") - } - t.existing = newTask - case StateStatusExists: - opt := options.Options{ - Name: t.existing.Name, - Cron: t.existing.Cron, - } - if every := t.existing.Every; every != "" { - opt.Every.Parse(every) - } - if offset := t.existing.Offset; offset > 0 { - var off options.Duration - if err := off.Parse(offset.String()); err == nil { - opt.Offset = &off - } - } - - _, err = s.taskSVC.UpdateTask(ctx, t.ID(), taskmodel.TaskUpdate{ - Flux: &t.existing.Flux, - Status: &t.existing.Status, - Description: &t.existing.Description, - Metadata: t.existing.Metadata, - Options: opt, - }) - err = ierrors.Wrap(err, "failed to rollback updated task") - default: - err = s.taskSVC.DeleteTask(ctx, t.ID()) - err = ierrors.Wrap(err, "failed to rollback created task") - } - return err - } - - var errs []string - for _, d := range tasks { - if err := rollbackFn(d); err != nil { - errs = append(errs, fmt.Sprintf("error for task[%q]: %s", d.ID(), err)) - } - } - - if len(errs) > 0 { - // TODO: fixup error - return errors.New(strings.Join(errs, ", ")) - } - - return nil -} - -func (s *Service) applyTelegrafs(ctx context.Context, userID platform.ID, teles []*stateTelegraf) applier { - const resource = "telegrafs" - - mutex := new(doMutex) - rollbackTelegrafs := make([]*stateTelegraf, 0, len(teles)) - - createFn := func(ctx context.Context, i int, orgID, userID platform.ID) *applyErrBody { - var t *stateTelegraf - mutex.Do(func() { - teles[i].orgID = orgID - t = teles[i] - }) - - existing, err := s.applyTelegrafConfig(ctx, userID, t) - if err != nil { - return &applyErrBody{ - name: t.parserTelegraf.MetaName(), - msg: err.Error(), - } - } - - mutex.Do(func() { - teles[i].id = existing.ID - rollbackTelegrafs = append(rollbackTelegrafs, teles[i]) - }) - - return nil - } - - return applier{ - creater: creater{ - entries: len(teles), - fn: createFn, - }, - rollbacker: rollbacker{ - resource: resource, - fn: func(_ platform.ID) error { - return s.rollbackTelegrafConfigs(ctx, userID, rollbackTelegrafs) - }, - }, - } -} - -func (s *Service) applyTelegrafConfig(ctx context.Context, userID platform.ID, t *stateTelegraf) (influxdb.TelegrafConfig, error) { - switch { - case IsRemoval(t.stateStatus): - if err := s.teleSVC.DeleteTelegrafConfig(ctx, t.ID()); err != nil { - if errors2.ErrorCode(err) == errors2.ENotFound { - return influxdb.TelegrafConfig{}, nil - } - return influxdb.TelegrafConfig{}, applyFailErr("delete", t.stateIdentity(), err) - } - return *t.existing, nil - case IsExisting(t.stateStatus) && t.existing != nil: - cfg := t.summarize().TelegrafConfig - updatedConfig, err := s.teleSVC.UpdateTelegrafConfig(ctx, t.ID(), &cfg, userID) - if err != nil { - return influxdb.TelegrafConfig{}, applyFailErr("update", t.stateIdentity(), err) - } - return *updatedConfig, nil - default: - cfg := t.summarize().TelegrafConfig - err := s.teleSVC.CreateTelegrafConfig(ctx, &cfg, userID) - if err != nil { - return influxdb.TelegrafConfig{}, applyFailErr("create", t.stateIdentity(), err) - } - return cfg, nil - } -} - -func (s *Service) rollbackTelegrafConfigs(ctx context.Context, userID platform.ID, cfgs []*stateTelegraf) error { - rollbackFn := func(t *stateTelegraf) error { - if !IsNew(t.stateStatus) && t.existing == nil { - return nil - } - - var err error - switch t.stateStatus { - case StateStatusRemove: - err = ierrors.Wrap(s.teleSVC.CreateTelegrafConfig(ctx, t.existing, userID), "rolling back removed telegraf config") - case StateStatusExists: - _, err = s.teleSVC.UpdateTelegrafConfig(ctx, t.ID(), t.existing, userID) - err = ierrors.Wrap(err, "rolling back updated telegraf config") - default: - err = ierrors.Wrap(s.teleSVC.DeleteTelegrafConfig(ctx, t.ID()), "rolling back created telegraf config") - } - return err - } - - var errs []string - for _, v := range cfgs { - if err := rollbackFn(v); err != nil { - errs = append(errs, fmt.Sprintf("error for variable[%q]: %s", v.ID(), err)) - } - } - - if len(errs) > 0 { - return errors.New(strings.Join(errs, "; ")) - } - - return nil -} - -func (s *Service) applyVariables(ctx context.Context, vars []*stateVariable) applier { - const resource = "variable" - - mutex := new(doMutex) - rollBackVars := make([]*stateVariable, 0, len(vars)) - - createFn := func(ctx context.Context, i int, orgID, userID platform.ID) *applyErrBody { - var v *stateVariable - mutex.Do(func() { - vars[i].orgID = orgID - v = vars[i] - }) - if !v.shouldApply() { - return nil - } - influxVar, err := s.applyVariable(ctx, v) - if err != nil { - return &applyErrBody{ - name: v.parserVar.MetaName(), - msg: err.Error(), - } - } - - mutex.Do(func() { - vars[i].id = influxVar.ID - rollBackVars = append(rollBackVars, vars[i]) - }) - return nil - } - - return applier{ - creater: creater{ - entries: len(vars), - fn: createFn, - }, - rollbacker: rollbacker{ - resource: resource, - fn: func(_ platform.ID) error { return s.rollbackVariables(ctx, rollBackVars) }, - }, - } -} - -func (s *Service) rollbackVariables(ctx context.Context, variables []*stateVariable) error { - rollbackFn := func(v *stateVariable) error { - var err error - switch { - case IsRemoval(v.stateStatus): - if v.existing == nil { - return nil - } - err = ierrors.Wrap(s.varSVC.CreateVariable(ctx, v.existing), "rolling back removed variable") - case IsExisting(v.stateStatus): - if v.existing == nil { - return nil - } - _, err = s.varSVC.UpdateVariable(ctx, v.ID(), &influxdb.VariableUpdate{ - Name: v.existing.Name, - Description: v.existing.Description, - Selected: v.existing.Selected, - Arguments: v.existing.Arguments, - }) - err = ierrors.Wrap(err, "rolling back updated variable") - default: - err = ierrors.Wrap(s.varSVC.DeleteVariable(ctx, v.ID()), "rolling back created variable") - } - return err - } - - var errs []string - for _, v := range variables { - if err := rollbackFn(v); err != nil { - errs = append(errs, fmt.Sprintf("error for variable[%q]: %s", v.ID(), err)) - } - } - - if len(errs) > 0 { - return errors.New(strings.Join(errs, "; ")) - } - - return nil -} - -func (s *Service) applyVariable(ctx context.Context, v *stateVariable) (influxdb.Variable, error) { - switch { - case IsRemoval(v.stateStatus): - if err := s.varSVC.DeleteVariable(ctx, v.id); err != nil && errors2.ErrorCode(err) != errors2.ENotFound { - return influxdb.Variable{}, applyFailErr("delete", v.stateIdentity(), err) - } - if v.existing == nil { - return influxdb.Variable{}, nil - } - return *v.existing, nil - case IsExisting(v.stateStatus) && v.existing != nil: - updatedVar, err := s.varSVC.UpdateVariable(ctx, v.ID(), &influxdb.VariableUpdate{ - Name: v.parserVar.Name(), - Selected: v.parserVar.Selected(), - Description: v.parserVar.Description, - Arguments: v.parserVar.influxVarArgs(), - }) - if err != nil { - return influxdb.Variable{}, applyFailErr("update", v.stateIdentity(), err) - } - return *updatedVar, nil - default: - // when an existing variable (referenced in stack) has been deleted by a user - // then the resource is created anew to get it back to the expected state. - influxVar := influxdb.Variable{ - OrganizationID: v.orgID, - Name: v.parserVar.Name(), - Selected: v.parserVar.Selected(), - Description: v.parserVar.Description, - Arguments: v.parserVar.influxVarArgs(), - } - err := s.varSVC.CreateVariable(ctx, &influxVar) - if err != nil { - return influxdb.Variable{}, applyFailErr("create", v.stateIdentity(), err) - } - return influxVar, nil - } -} - -func (s *Service) removeLabelMappings(ctx context.Context, labelMappings []stateLabelMappingForRemoval) applier { - const resource = "removed_label_mapping" - - var rollbackMappings []stateLabelMappingForRemoval - - mutex := new(doMutex) - createFn := func(ctx context.Context, i int, orgID, userID platform.ID) *applyErrBody { - var mapping stateLabelMappingForRemoval - mutex.Do(func() { - mapping = labelMappings[i] - }) - - err := s.labelSVC.DeleteLabelMapping(ctx, &influxdb.LabelMapping{ - LabelID: mapping.LabelID, - ResourceID: mapping.ResourceID, - ResourceType: mapping.ResourceType, - }) - if err != nil && errors2.ErrorCode(err) != errors2.ENotFound { - return &applyErrBody{ - name: fmt.Sprintf("%s:%s:%s", mapping.ResourceType, mapping.ResourceID, mapping.LabelID), - msg: err.Error(), - } - } - - mutex.Do(func() { - rollbackMappings = append(rollbackMappings, mapping) - }) - return nil - } - - return applier{ - creater: creater{ - entries: len(labelMappings), - fn: createFn, - }, - rollbacker: rollbacker{ - resource: resource, - fn: func(_ platform.ID) error { return s.rollbackRemoveLabelMappings(ctx, rollbackMappings) }, - }, - } -} - -func (s *Service) rollbackRemoveLabelMappings(ctx context.Context, mappings []stateLabelMappingForRemoval) error { - var errs []string - for _, m := range mappings { - err := s.labelSVC.CreateLabelMapping(ctx, &influxdb.LabelMapping{ - LabelID: m.LabelID, - ResourceID: m.ResourceID, - ResourceType: m.ResourceType, - }) - if err != nil { - errs = append(errs, - fmt.Sprintf( - "error for label mapping: resource_type=%s resource_id=%s label_id=%s err=%s", - m.ResourceType, - m.ResourceID, - m.LabelID, - err, - )) - } - } - - if len(errs) > 0 { - return errors.New(strings.Join(errs, "; ")) - } - - return nil -} - -func (s *Service) applyLabelMappings(ctx context.Context, labelMappings []stateLabelMapping) applier { - const resource = "label_mapping" - - mutex := new(doMutex) - rollbackMappings := make([]stateLabelMapping, 0, len(labelMappings)) - - createFn := func(ctx context.Context, i int, orgID, userID platform.ID) *applyErrBody { - var mapping stateLabelMapping - mutex.Do(func() { - mapping = labelMappings[i] - }) - - ident := mapping.resource.stateIdentity() - if IsExisting(mapping.status) || mapping.label.ID() == 0 || ident.id == 0 { - // this block here does 2 things, it does not write a - // mapping when one exists. it also avoids having to worry - // about deleting an existing mapping since it will not be - // passed to the delete function below b/c it is never added - // to the list of mappings that is referenced in the delete - // call. - return nil - } - - m := influxdb.LabelMapping{ - LabelID: mapping.label.ID(), - ResourceID: ident.id, - ResourceType: ident.resourceType, - } - err := s.labelSVC.CreateLabelMapping(ctx, &m) - if err != nil { - return &applyErrBody{ - name: fmt.Sprintf("%s:%s:%s", ident.resourceType, ident.id, mapping.label.ID()), - msg: err.Error(), - } - } - - mutex.Do(func() { - rollbackMappings = append(rollbackMappings, mapping) - }) - - return nil - } - - return applier{ - creater: creater{ - entries: len(labelMappings), - fn: createFn, - }, - rollbacker: rollbacker{ - resource: resource, - fn: func(_ platform.ID) error { return s.rollbackLabelMappings(ctx, rollbackMappings) }, - }, - } -} - -func (s *Service) rollbackLabelMappings(ctx context.Context, mappings []stateLabelMapping) error { - var errs []string - for _, stateMapping := range mappings { - influxMapping := stateLabelMappingToInfluxLabelMapping(stateMapping) - err := s.labelSVC.DeleteLabelMapping(ctx, &influxMapping) - if err != nil { - errs = append(errs, fmt.Sprintf("%s:%s", stateMapping.label.ID(), stateMapping.resource.stateIdentity().id)) - } - } - - if len(errs) > 0 { - return fmt.Errorf(`label_resource_id_pairs=[%s] err="unable to delete label"`, strings.Join(errs, ", ")) - } - - return nil -} - -func (s *Service) templateFromApplyOpts(ctx context.Context, opt ApplyOpt) (*Template, error) { - if opt.StackID != 0 { - remotes, err := s.getStackRemoteTemplates(ctx, opt.StackID) - if err != nil { - return nil, err - } - opt.Templates = append(opt.Templates, remotes...) - } - - return Combine(opt.Templates, ValidWithoutResources()) -} - -func (s *Service) getStackRemoteTemplates(ctx context.Context, stackID platform.ID) ([]*Template, error) { - stack, err := s.store.ReadStackByID(ctx, stackID) - if err != nil { - return nil, err - } - - lastEvent := stack.LatestEvent() - var remotes []*Template - for _, rawURL := range lastEvent.TemplateURLs { - u, err := url.Parse(rawURL) - if err != nil { - return nil, &errors2.Error{ - Code: errors2.EInternal, - Msg: "failed to parse url", - Err: err, - } - } - - encoding := EncodingSource - switch path.Ext(u.String()) { - case ".jsonnet": - encoding = EncodingJsonnet - case ".json": - encoding = EncodingJSON - case ".yaml", ".yml": - encoding = EncodingYAML - } - - readerFn := FromHTTPRequest(u.String(), s.client) - if u.Scheme == "file" { - readerFn = FromFile(u.Path) - } - - template, err := Parse(encoding, readerFn) - if err != nil { - return nil, err - } - remotes = append(remotes, template) - } - return remotes, nil -} - -func (s *Service) updateStackAfterSuccess(ctx context.Context, stackID platform.ID, state *stateCoordinator, sources []string) error { - stack, err := s.store.ReadStackByID(ctx, stackID) - if err != nil { - return err - } - - var stackResources []StackResource - for _, b := range state.mBuckets { - if IsRemoval(b.stateStatus) || isSystemBucket(b.existing) { - continue - } - stackResources = append(stackResources, StackResource{ - APIVersion: APIVersion, - ID: b.ID(), - Kind: KindBucket, - MetaName: b.parserBkt.MetaName(), - Associations: stateLabelsToStackAssociations(b.labels()), - }) - } - for _, c := range state.mChecks { - if IsRemoval(c.stateStatus) { - continue - } - stackResources = append(stackResources, StackResource{ - APIVersion: APIVersion, - ID: c.ID(), - Kind: KindCheck, - MetaName: c.parserCheck.MetaName(), - Associations: stateLabelsToStackAssociations(c.labels()), - }) - } - for _, d := range state.mDashboards { - if IsRemoval(d.stateStatus) { - continue - } - stackResources = append(stackResources, StackResource{ - APIVersion: APIVersion, - ID: d.ID(), - Kind: KindDashboard, - MetaName: d.parserDash.MetaName(), - Associations: stateLabelsToStackAssociations(d.labels()), - }) - } - for _, n := range state.mEndpoints { - if IsRemoval(n.stateStatus) { - continue - } - stackResources = append(stackResources, StackResource{ - APIVersion: APIVersion, - ID: n.ID(), - Kind: KindNotificationEndpoint, - MetaName: n.parserEndpoint.MetaName(), - Associations: stateLabelsToStackAssociations(n.labels()), - }) - } - for _, l := range state.mLabels { - if IsRemoval(l.stateStatus) { - continue - } - stackResources = append(stackResources, StackResource{ - APIVersion: APIVersion, - ID: l.ID(), - Kind: KindLabel, - MetaName: l.parserLabel.MetaName(), - }) - } - for _, r := range state.mRules { - if IsRemoval(r.stateStatus) { - continue - } - stackResources = append(stackResources, StackResource{ - APIVersion: APIVersion, - ID: r.ID(), - Kind: KindNotificationRule, - MetaName: r.parserRule.MetaName(), - Associations: append( - stateLabelsToStackAssociations(r.labels()), - r.endpointAssociation(), - ), - }) - } - for _, t := range state.mTasks { - if IsRemoval(t.stateStatus) || isRestrictedTask(t.existing) { - continue - } - stackResources = append(stackResources, StackResource{ - APIVersion: APIVersion, - ID: t.ID(), - Kind: KindTask, - MetaName: t.parserTask.MetaName(), - Associations: stateLabelsToStackAssociations(t.labels()), - }) - } - for _, t := range state.mTelegrafs { - if IsRemoval(t.stateStatus) { - continue - } - stackResources = append(stackResources, StackResource{ - APIVersion: APIVersion, - ID: t.ID(), - Kind: KindTelegraf, - MetaName: t.parserTelegraf.MetaName(), - Associations: stateLabelsToStackAssociations(t.labels()), - }) - } - for _, v := range state.mVariables { - if IsRemoval(v.stateStatus) { - continue - } - stackResources = append(stackResources, StackResource{ - APIVersion: APIVersion, - ID: v.ID(), - Kind: KindVariable, - MetaName: v.parserVar.MetaName(), - Associations: stateLabelsToStackAssociations(v.labels()), - }) - } - ev := stack.LatestEvent() - ev.EventType = StackEventUpdate - ev.Resources = stackResources - ev.Sources = sources - ev.UpdatedAt = s.timeGen.Now() - stack.Events = append(stack.Events, ev) - return s.store.UpdateStack(ctx, stack) -} - -func (s *Service) updateStackAfterRollback(ctx context.Context, stackID platform.ID, state *stateCoordinator, sources []string) error { - stack, err := s.store.ReadStackByID(ctx, stackID) - if err != nil { - return err - } - - type key struct { - k Kind - metaName string - } - newKey := func(k Kind, metaName string) key { - return key{k: k, metaName: metaName} - } - - latestEvent := stack.LatestEvent() - existingResources := make(map[key]*StackResource) - for i := range latestEvent.Resources { - res := latestEvent.Resources[i] - existingResources[newKey(res.Kind, res.MetaName)] = &latestEvent.Resources[i] - } - - hasChanges := false - { - // these are the case where a deletion happens and is rolled back creating a new resource. - // when resource is not to be removed this is a nothing burger, as it should be - // rolled back to previous state. - for _, b := range state.mBuckets { - res, ok := existingResources[newKey(KindBucket, b.parserBkt.MetaName())] - if ok && res.ID != b.ID() { - hasChanges = true - res.ID = b.existing.ID - } - } - for _, c := range state.mChecks { - res, ok := existingResources[newKey(KindCheck, c.parserCheck.MetaName())] - if ok && res.ID != c.ID() { - hasChanges = true - res.ID = c.existing.GetID() - } - } - for _, d := range state.mDashboards { - res, ok := existingResources[newKey(KindDashboard, d.parserDash.MetaName())] - if ok && res.ID != d.ID() { - hasChanges = true - res.ID = d.existing.ID - } - } - for _, e := range state.mEndpoints { - res, ok := existingResources[newKey(KindNotificationEndpoint, e.parserEndpoint.MetaName())] - if ok && res.ID != e.ID() { - hasChanges = true - res.ID = e.existing.GetID() - } - } - for _, l := range state.mLabels { - res, ok := existingResources[newKey(KindLabel, l.parserLabel.MetaName())] - if ok && res.ID != l.ID() { - hasChanges = true - res.ID = l.existing.ID - } - } - for _, r := range state.mRules { - res, ok := existingResources[newKey(KindNotificationRule, r.parserRule.MetaName())] - if !ok { - continue - } - - if res.ID != r.ID() { - hasChanges = true - res.ID = r.existing.GetID() - } - - endpointAssociation := r.endpointAssociation() - newAss := make([]StackResourceAssociation, 0, len(res.Associations)) - - var endpointAssociationChanged bool - for _, ass := range res.Associations { - if ass.Kind.is(KindNotificationEndpoint) && ass != endpointAssociation { - endpointAssociationChanged = true - ass = endpointAssociation - } - newAss = append(newAss, ass) - } - if endpointAssociationChanged { - hasChanges = true - res.Associations = newAss - } - } - for _, t := range state.mTasks { - res, ok := existingResources[newKey(KindTask, t.parserTask.MetaName())] - if ok && res.ID != t.ID() { - hasChanges = true - res.ID = t.existing.ID - } - } - for _, t := range state.mTelegrafs { - res, ok := existingResources[newKey(KindTelegraf, t.parserTelegraf.MetaName())] - if ok && res.ID != t.ID() { - hasChanges = true - res.ID = t.existing.ID - } - } - for _, v := range state.mVariables { - res, ok := existingResources[newKey(KindVariable, v.parserVar.MetaName())] - if ok && res.ID != v.ID() { - hasChanges = true - res.ID = v.existing.ID - } - } - } - if !hasChanges { - return nil - } - - latestEvent.EventType = StackEventUpdate - latestEvent.Sources = sources - latestEvent.UpdatedAt = s.timeGen.Now() - stack.Events = append(stack.Events, latestEvent) - return s.store.UpdateStack(ctx, stack) -} - -func (s *Service) findLabel(ctx context.Context, orgID platform.ID, l *stateLabel) (*influxdb.Label, error) { - if l.ID() != 0 { - return s.labelSVC.FindLabelByID(ctx, l.ID()) - } - - existingLabels, err := s.labelSVC.FindLabels(ctx, influxdb.LabelFilter{ - Name: l.parserLabel.Name(), - OrgID: &orgID, - }, influxdb.FindOptions{Limit: 1}) - if err != nil { - return nil, err - } - if len(existingLabels) == 0 { - return nil, errors.New("no labels found for name: " + l.parserLabel.Name()) - } - return existingLabels[0], nil -} - -func (s *Service) getAllPlatformVariables(ctx context.Context, orgID platform.ID) ([]*influxdb.Variable, error) { - const limit = 100 - - var ( - existingVars []*influxdb.Variable - offset int - ) - for { - vars, err := s.varSVC.FindVariables(ctx, influxdb.VariableFilter{ - OrganizationID: &orgID, - // TODO: would be ideal to extend find variables to allow for a name matcher - // since names are unique for vars within an org. In the meanwhile, make large - // limit returned vars, should be more than enough for the time being. - }, influxdb.FindOptions{Limit: limit, Offset: offset}) - if err != nil { - return nil, err - } - existingVars = append(existingVars, vars...) - - if len(vars) < limit { - break - } - offset += len(vars) - } - return existingVars, nil -} - -func (s *Service) getAllChecks(ctx context.Context, orgID platform.ID) ([]influxdb.Check, error) { - filter := influxdb.CheckFilter{OrgID: &orgID} - const limit = 100 - - var ( - out []influxdb.Check - offset int - ) - for { - checks, _, err := s.checkSVC.FindChecks(ctx, filter, influxdb.FindOptions{ - Limit: limit, - Offset: offset, - }) - if err != nil { - return nil, err - } - out = append(out, checks...) - if len(checks) < limit { - break - } - offset += limit - } - return out, nil -} - -func (s *Service) getNotificationRules(ctx context.Context, orgID platform.ID) ([]influxdb.NotificationRule, error) { - filter := influxdb.NotificationRuleFilter{OrgID: &orgID} - const limit = 100 - - var ( - out []influxdb.NotificationRule - offset int - ) - for { - rules, _, err := s.ruleSVC.FindNotificationRules(ctx, filter) - if err != nil { - return nil, err - } - out = append(out, rules...) - if len(rules) < limit { - break - } - offset += limit - } - return out, nil - -} - -func (s *Service) getAllTasks(ctx context.Context, orgID platform.ID) ([]*taskmodel.Task, error) { - var ( - out []*taskmodel.Task - afterID *platform.ID - ) - for { - f := taskmodel.TaskFilter{ - OrganizationID: &orgID, - Limit: taskmodel.TaskMaxPageSize, - } - if afterID != nil { - f.After = afterID - } - tasks, _, err := s.taskSVC.FindTasks(ctx, f) - if err != nil { - return nil, err - } - if len(tasks) == 0 { - break - } - out = append(out, tasks...) - afterID = &tasks[len(tasks)-1].ID - } - return out, nil -} - -func newSummaryFromStateTemplate(state *stateCoordinator, template *Template) Summary { - stateSum := state.summary() - stateSum.MissingEnvs = template.missingEnvRefs() - stateSum.MissingSecrets = template.missingSecrets() - return stateSum -} - -func stateLabelsToStackAssociations(stateLabels []*stateLabel) []StackResourceAssociation { - var out []StackResourceAssociation - for _, l := range stateLabels { - out = append(out, StackResourceAssociation{ - Kind: KindLabel, - MetaName: l.parserLabel.MetaName(), - }) - } - return out -} - -func applyFailErr(method string, ident stateIdentity, err error) error { - v := ident.id.String() - if v == "" { - v = ident.metaName - } - msg := fmt.Sprintf("failed to %s %s[%q]", method, ident.resourceType, v) - return ierrors.Wrap(err, msg) -} - -func getLabelIDMap(ctx context.Context, labelSVC influxdb.LabelService, labelNames []string) (map[platform.ID]bool, error) { - mLabelIDs := make(map[platform.ID]bool) - for _, labelName := range labelNames { - iLabels, err := labelSVC.FindLabels(ctx, influxdb.LabelFilter{ - Name: labelName, - }) - if err != nil { - return nil, err - } - if len(iLabels) == 1 { - mLabelIDs[iLabels[0].ID] = true - } - } - return mLabelIDs, nil -} - -func sortObjects(objects []Object) []Object { - sort.Slice(objects, func(i, j int) bool { - iName, jName := objects[i].Name(), objects[j].Name() - iKind, jKind := objects[i].Kind, objects[j].Kind - - if iKind.is(jKind) { - return iName < jName - } - return kindPriorities[iKind] < kindPriorities[jKind] - }) - return objects -} - -type doMutex struct { - sync.Mutex -} - -func (m *doMutex) Do(fn func()) { - m.Lock() - defer m.Unlock() - fn() -} - -type ( - applier struct { - creater creater - rollbacker rollbacker - } - - rollbacker struct { - resource string - fn func(orgID platform.ID) error - } - - creater struct { - entries int - fn func(ctx context.Context, i int, orgID, userID platform.ID) *applyErrBody - } -) - -type rollbackCoordinator struct { - logger *zap.Logger - rollbacks []rollbacker - - sem chan struct{} -} - -func newRollbackCoordinator(logger *zap.Logger, reqLimit int) *rollbackCoordinator { - return &rollbackCoordinator{ - logger: logger, - sem: make(chan struct{}, reqLimit), - } -} - -func (r *rollbackCoordinator) runTilEnd(ctx context.Context, orgID, userID platform.ID, appliers ...applier) error { - errStr := newErrStream(ctx) - - wg := new(sync.WaitGroup) - for i := range appliers { - // cannot reuse the shared variable from for loop since we're using concurrency b/c - // that temp var gets recycled between iterations - app := appliers[i] - r.rollbacks = append(r.rollbacks, app.rollbacker) - for idx := range make([]struct{}, app.creater.entries) { - r.sem <- struct{}{} - wg.Add(1) - - go func(i int, resource string) { - defer func() { - wg.Done() - <-r.sem - }() - - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - defer func() { - if err := recover(); err != nil { - r.logger.Error( - "panic applying "+resource, - zap.String("stack_trace", fmt.Sprintf("%+v", stack.Trace())), - zap.Reflect("panic", err), - ) - errStr.add(errMsg{ - resource: resource, - err: applyErrBody{ - msg: fmt.Sprintf("panic: %s paniced", resource), - }, - }) - } - }() - - if err := app.creater.fn(ctx, i, orgID, userID); err != nil { - errStr.add(errMsg{resource: resource, err: *err}) - } - }(idx, app.rollbacker.resource) - } - } - wg.Wait() - - return errStr.close() -} - -func (r *rollbackCoordinator) rollback(l *zap.Logger, err *error, orgID platform.ID) { - if *err == nil { - return - } - - for _, r := range r.rollbacks { - if err := r.fn(orgID); err != nil { - l.Error("failed to delete "+r.resource, zap.Error(err)) - } - } -} - -type errMsg struct { - resource string - err applyErrBody -} - -type errStream struct { - msgStream chan errMsg - err chan error - done <-chan struct{} -} - -func newErrStream(ctx context.Context) *errStream { - e := &errStream{ - msgStream: make(chan errMsg), - err: make(chan error), - done: ctx.Done(), - } - e.do() - return e -} - -func (e *errStream) do() { - go func() { - mErrs := func() map[string]applyErrs { - mErrs := make(map[string]applyErrs) - for { - select { - case <-e.done: - return nil - case msg, ok := <-e.msgStream: - if !ok { - return mErrs - } - mErrs[msg.resource] = append(mErrs[msg.resource], &msg.err) - } - } - }() - - if len(mErrs) == 0 { - e.err <- nil - return - } - - var errs []string - for resource, err := range mErrs { - errs = append(errs, err.toError(resource, "failed to apply resource").Error()) - } - e.err <- errors.New(strings.Join(errs, "\n")) - }() -} - -func (e *errStream) close() error { - close(e.msgStream) - return <-e.err -} - -func (e *errStream) add(msg errMsg) { - select { - case <-e.done: - case e.msgStream <- msg: - } -} - -// TODO: clean up apply errors to inform the user in an actionable way -type applyErrBody struct { - name string - msg string -} - -type applyErrs []*applyErrBody - -func (a applyErrs) toError(resType, msg string) error { - if len(a) == 0 { - return nil - } - errMsg := fmt.Sprintf(`resource_type=%q err=%q`, resType, msg) - for _, e := range a { - errMsg += fmt.Sprintf("\n\tmetadata_name=%q err_msg=%q", e.name, e.msg) - } - return errors.New(errMsg) -} - -func validURLs(urls []string) error { - for _, u := range urls { - if _, err := url.Parse(u); err != nil { - msg := fmt.Sprintf("url invalid for entry %q", u) - return influxErr(errors2.EInvalid, msg) - } - } - return nil -} - -func isRestrictedTask(t *taskmodel.Task) bool { - return t != nil && t.Type != taskmodel.TaskSystemType -} - -func isSystemBucket(b *influxdb.Bucket) bool { - return b != nil && b.Type == influxdb.BucketTypeSystem -} - -func labelSlcToMap(labels []*stateLabel) map[string]*stateLabel { - m := make(map[string]*stateLabel) - for i := range labels { - m[labels[i].Name()] = labels[i] - } - return m -} - -func failedValidationErr(err error) error { - if err == nil { - return nil - } - return &errors2.Error{Code: errors2.EUnprocessableEntity, Err: err} -} - -func internalErr(err error) error { - if err == nil { - return nil - } - return influxErr(errors2.EInternal, err) -} - -func influxErr(code string, errArg interface{}, rest ...interface{}) *errors2.Error { - err := &errors2.Error{ - Code: code, - } - for _, a := range append(rest, errArg) { - switch v := a.(type) { - case string: - err.Msg = v - case error: - err.Err = v - case nil: - case interface{ String() string }: - err.Msg = v.String() - } - } - return err -} diff --git a/pkger/service_auth.go b/pkger/service_auth.go deleted file mode 100644 index 61b79730c16..00000000000 --- a/pkger/service_auth.go +++ /dev/null @@ -1,109 +0,0 @@ -package pkger - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -type AuthAgent interface { - IsWritable(ctx context.Context, orgID platform.ID, resType influxdb.ResourceType) error - OrgPermissions(ctx context.Context, orgID platform.ID, action influxdb.Action, rest ...influxdb.Action) error -} - -type authMW struct { - authAgent AuthAgent - next SVC -} - -var _ SVC = (*authMW)(nil) - -// MWAuth is an auth service middleware for the packager domain. -func MWAuth(authAgent AuthAgent) SVCMiddleware { - return func(svc SVC) SVC { - return &authMW{ - authAgent: authAgent, - next: svc, - } - } -} - -func (s *authMW) InitStack(ctx context.Context, userID platform.ID, newStack StackCreate) (Stack, error) { - err := s.authAgent.IsWritable(ctx, newStack.OrgID, ResourceTypeStack) - if err != nil { - return Stack{}, err - } - return s.next.InitStack(ctx, userID, newStack) -} - -func (s *authMW) UninstallStack(ctx context.Context, identifiers struct{ OrgID, UserID, StackID platform.ID }) (Stack, error) { - err := s.authAgent.IsWritable(ctx, identifiers.OrgID, ResourceTypeStack) - if err != nil { - return Stack{}, err - } - return s.next.UninstallStack(ctx, identifiers) -} - -func (s *authMW) DeleteStack(ctx context.Context, identifiers struct{ OrgID, UserID, StackID platform.ID }) error { - err := s.authAgent.IsWritable(ctx, identifiers.OrgID, ResourceTypeStack) - if err != nil { - return err - } - return s.next.DeleteStack(ctx, identifiers) -} - -func (s *authMW) ListStacks(ctx context.Context, orgID platform.ID, f ListFilter) ([]Stack, error) { - err := s.authAgent.OrgPermissions(ctx, orgID, influxdb.ReadAction) - if err != nil { - return nil, err - } - return s.next.ListStacks(ctx, orgID, f) -} - -func (s *authMW) ReadStack(ctx context.Context, id platform.ID) (Stack, error) { - st, err := s.next.ReadStack(ctx, id) - if err != nil { - return Stack{}, err - } - - err = s.authAgent.OrgPermissions(ctx, st.OrgID, influxdb.ReadAction) - if err != nil { - return Stack{}, err - } - return st, nil -} - -func (s *authMW) UpdateStack(ctx context.Context, upd StackUpdate) (Stack, error) { - stack, err := s.next.ReadStack(ctx, upd.ID) - if err != nil { - return Stack{}, err - } - - err = s.authAgent.IsWritable(ctx, stack.OrgID, ResourceTypeStack) - if err != nil { - return Stack{}, err - } - return s.next.UpdateStack(ctx, upd) -} - -func (s *authMW) Export(ctx context.Context, opts ...ExportOptFn) (*Template, error) { - opt, err := exportOptFromOptFns(opts) - if err != nil { - return nil, err - } - if opt.StackID != 0 { - if _, err := s.ReadStack(ctx, opt.StackID); err != nil { - return nil, err - } - } - return s.next.Export(ctx, opts...) -} - -func (s *authMW) DryRun(ctx context.Context, orgID, userID platform.ID, opts ...ApplyOptFn) (ImpactSummary, error) { - return s.next.DryRun(ctx, orgID, userID, opts...) -} - -func (s *authMW) Apply(ctx context.Context, orgID, userID platform.ID, opts ...ApplyOptFn) (ImpactSummary, error) { - return s.next.Apply(ctx, orgID, userID, opts...) -} diff --git a/pkger/service_logging.go b/pkger/service_logging.go deleted file mode 100644 index 5ecb47a0113..00000000000 --- a/pkger/service_logging.go +++ /dev/null @@ -1,235 +0,0 @@ -package pkger - -import ( - "context" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "go.uber.org/zap" -) - -type loggingMW struct { - logger *zap.Logger - next SVC -} - -// MWLogging adds logging functionality for the service. -func MWLogging(log *zap.Logger) SVCMiddleware { - return func(svc SVC) SVC { - return &loggingMW{ - logger: log, - next: svc, - } - } -} - -var _ SVC = (*loggingMW)(nil) - -func (s *loggingMW) InitStack(ctx context.Context, userID platform.ID, newStack StackCreate) (stack Stack, err error) { - defer func(start time.Time) { - if err == nil { - return - } - - s.logger.Error( - "failed to init stack", - zap.Error(err), - zap.Stringer("orgID", newStack.OrgID), - zap.Stringer("userID", userID), - zap.Strings("urls", newStack.TemplateURLs), - zap.Duration("took", time.Since(start)), - ) - }(time.Now()) - return s.next.InitStack(ctx, userID, newStack) -} - -func (s *loggingMW) UninstallStack(ctx context.Context, identifiers struct{ OrgID, UserID, StackID platform.ID }) (_ Stack, err error) { - defer func(start time.Time) { - if err == nil { - return - } - - s.logger.Error( - "failed to uninstall stack", - zap.Error(err), - zap.Stringer("orgID", identifiers.OrgID), - zap.Stringer("userID", identifiers.OrgID), - zap.Stringer("stackID", identifiers.StackID), - zap.Duration("took", time.Since(start)), - ) - }(time.Now()) - return s.next.UninstallStack(ctx, identifiers) -} - -func (s *loggingMW) DeleteStack(ctx context.Context, identifiers struct{ OrgID, UserID, StackID platform.ID }) (err error) { - defer func(start time.Time) { - if err == nil { - return - } - - s.logger.Error( - "failed to delete stack", - zap.Error(err), - zap.Stringer("orgID", identifiers.OrgID), - zap.Stringer("userID", identifiers.OrgID), - zap.Stringer("stackID", identifiers.StackID), - zap.Duration("took", time.Since(start)), - ) - }(time.Now()) - return s.next.DeleteStack(ctx, identifiers) -} - -func (s *loggingMW) ListStacks(ctx context.Context, orgID platform.ID, f ListFilter) (stacks []Stack, err error) { - defer func(start time.Time) { - if err == nil { - return - } - - var stackIDs []string - for _, id := range f.StackIDs { - stackIDs = append(stackIDs, id.String()) - } - - s.logger.Error( - "failed to list stacks", - zap.Error(err), - zap.Stringer("orgID", orgID), - zap.Strings("stackIDs", stackIDs), - zap.Strings("names", f.Names), - zap.Duration("took", time.Since(start)), - ) - }(time.Now()) - return s.next.ListStacks(ctx, orgID, f) -} - -func (s *loggingMW) ReadStack(ctx context.Context, id platform.ID) (st Stack, err error) { - defer func(start time.Time) { - if err != nil { - s.logger.Error("failed to read stack", - zap.Error(err), - zap.String("id", id.String()), - zap.Duration("took", time.Since(start)), - ) - return - } - }(time.Now()) - return s.next.ReadStack(ctx, id) -} - -func (s *loggingMW) UpdateStack(ctx context.Context, upd StackUpdate) (_ Stack, err error) { - defer func(start time.Time) { - if err != nil { - fields := []zap.Field{ - zap.Error(err), - zap.String("id", upd.ID.String()), - } - if upd.Name != nil { - fields = append(fields, zap.String("name", *upd.Name)) - } - if upd.Description != nil { - fields = append(fields, zap.String("desc", *upd.Description)) - } - fields = append(fields, - zap.Strings("urls", upd.TemplateURLs), - zap.Duration("took", time.Since(start)), - ) - - s.logger.Error("failed to update stack", fields...) - return - } - }(time.Now()) - return s.next.UpdateStack(ctx, upd) -} - -func (s *loggingMW) Export(ctx context.Context, opts ...ExportOptFn) (template *Template, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - s.logger.Error("failed to export template", zap.Error(err), dur) - return - } - s.logger.Info("exported template", append(s.summaryLogFields(template.Summary()), dur)...) - }(time.Now()) - return s.next.Export(ctx, opts...) -} - -func (s *loggingMW) DryRun(ctx context.Context, orgID, userID platform.ID, opts ...ApplyOptFn) (impact ImpactSummary, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - s.logger.Error("failed to dry run template", - zap.String("orgID", orgID.String()), - zap.String("userID", userID.String()), - zap.Error(err), - dur, - ) - return - } - - var opt ApplyOpt - for _, o := range opts { - o(&opt) - } - - fields := s.summaryLogFields(impact.Summary) - if opt.StackID != 0 { - fields = append(fields, zap.Stringer("stackID", opt.StackID)) - } - fields = append(fields, dur) - s.logger.Info("template dry run successful", fields...) - }(time.Now()) - return s.next.DryRun(ctx, orgID, userID, opts...) -} - -func (s *loggingMW) Apply(ctx context.Context, orgID, userID platform.ID, opts ...ApplyOptFn) (impact ImpactSummary, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - s.logger.Error("failed to apply template", - zap.String("orgID", orgID.String()), - zap.String("userID", userID.String()), - zap.Error(err), - dur, - ) - return - } - - fields := s.summaryLogFields(impact.Summary) - - opt := applyOptFromOptFns(opts...) - if opt.StackID != 0 { - fields = append(fields, zap.Stringer("stackID", opt.StackID)) - } - fields = append(fields, dur) - s.logger.Info("template apply successful", fields...) - }(time.Now()) - return s.next.Apply(ctx, orgID, userID, opts...) -} - -func (s *loggingMW) summaryLogFields(sum Summary) []zap.Field { - potentialFields := []struct { - key string - val int - }{ - {key: "buckets", val: len(sum.Buckets)}, - {key: "checks", val: len(sum.Checks)}, - {key: "dashboards", val: len(sum.Dashboards)}, - {key: "endpoints", val: len(sum.NotificationEndpoints)}, - {key: "labels", val: len(sum.Labels)}, - {key: "label_mappings", val: len(sum.LabelMappings)}, - {key: "rules", val: len(sum.NotificationRules)}, - {key: "secrets", val: len(sum.MissingSecrets)}, - {key: "tasks", val: len(sum.Tasks)}, - {key: "telegrafs", val: len(sum.TelegrafConfigs)}, - {key: "variables", val: len(sum.Variables)}, - } - - var fields []zap.Field - for _, f := range potentialFields { - if f.val > 0 { - fields = append(fields, zap.Int("num_"+f.key, f.val)) - } - } - - return fields -} diff --git a/pkger/service_metrics.go b/pkger/service_metrics.go deleted file mode 100644 index da17c7234e4..00000000000 --- a/pkger/service_metrics.go +++ /dev/null @@ -1,244 +0,0 @@ -package pkger - -import ( - "context" - "net/url" - "path" - "strconv" - "strings" - - "github.com/influxdata/influxdb/v2/kit/metric" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/prom" - "github.com/prometheus/client_golang/prometheus" -) - -type mwMetrics struct { - // RED metrics - rec *metric.REDClient - // Installed template count metrics - templateCounts *prometheus.CounterVec - - next SVC -} - -var _ SVC = (*mwMetrics)(nil) - -// MWMetrics is a metrics service middleware for the notification endpoint service. -func MWMetrics(reg *prom.Registry) SVCMiddleware { - return func(svc SVC) SVC { - m := &mwMetrics{ - rec: metric.New(reg, "pkger", metric.WithVec(templateVec()), metric.WithVec(exportVec())), - templateCounts: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "templates", - Subsystem: "installed", - Name: "count", - Help: "Total number of templates installed by name.", - }, []string{"template"}), - next: svc, - } - reg.MustRegister(m.templateCounts) - return m - } -} - -func (s *mwMetrics) InitStack(ctx context.Context, userID platform.ID, newStack StackCreate) (Stack, error) { - rec := s.rec.Record("init_stack") - stack, err := s.next.InitStack(ctx, userID, newStack) - return stack, rec(err) -} - -func (s *mwMetrics) UninstallStack(ctx context.Context, identifiers struct{ OrgID, UserID, StackID platform.ID }) (Stack, error) { - rec := s.rec.Record("uninstall_stack") - stack, err := s.next.UninstallStack(ctx, identifiers) - return stack, rec(err) -} - -func (s *mwMetrics) DeleteStack(ctx context.Context, identifiers struct{ OrgID, UserID, StackID platform.ID }) error { - rec := s.rec.Record("delete_stack") - return rec(s.next.DeleteStack(ctx, identifiers)) -} - -func (s *mwMetrics) ListStacks(ctx context.Context, orgID platform.ID, f ListFilter) ([]Stack, error) { - rec := s.rec.Record("list_stacks") - stacks, err := s.next.ListStacks(ctx, orgID, f) - return stacks, rec(err) -} - -func (s *mwMetrics) ReadStack(ctx context.Context, id platform.ID) (Stack, error) { - rec := s.rec.Record("read_stack") - stack, err := s.next.ReadStack(ctx, id) - return stack, rec(err) -} - -func (s *mwMetrics) UpdateStack(ctx context.Context, upd StackUpdate) (Stack, error) { - rec := s.rec.Record("update_stack") - stack, err := s.next.UpdateStack(ctx, upd) - return stack, rec(err) -} - -func (s *mwMetrics) Export(ctx context.Context, opts ...ExportOptFn) (*Template, error) { - rec := s.rec.Record("export") - opt, err := exportOptFromOptFns(opts) - if err != nil { - return nil, rec(err) - } - - template, err := s.next.Export(ctx, opts...) - if err != nil { - return nil, err - } - - return template, rec(err, metric.RecordAdditional(map[string]interface{}{ - "num_org_ids": len(opt.OrgIDs), - "summary": template.Summary(), - "by_stack": opt.StackID != 0, - })) -} - -func (s *mwMetrics) DryRun(ctx context.Context, orgID, userID platform.ID, opts ...ApplyOptFn) (ImpactSummary, error) { - rec := s.rec.Record("dry_run") - impact, err := s.next.DryRun(ctx, orgID, userID, opts...) - return impact, rec(err, applyMetricAdditions(orgID, userID, impact.Sources)) -} - -func (s *mwMetrics) Apply(ctx context.Context, orgID, userID platform.ID, opts ...ApplyOptFn) (ImpactSummary, error) { - rec := s.rec.Record("apply") - impact, err := s.next.Apply(ctx, orgID, userID, opts...) - if err == nil { - s.templateCounts.WithLabelValues(impact.communityName()).Inc() - } - return impact, rec(err, applyMetricAdditions(orgID, userID, impact.Sources)) -} - -func applyMetricAdditions(orgID, userID platform.ID, sources []string) func(*metric.CollectFnOpts) { - return metric.RecordAdditional(map[string]interface{}{ - "org_id": orgID.String(), - "sources": sources, - "user_id": userID.String(), - }) -} - -func exportVec() metric.VecOpts { - const ( - byStack = "by_stack" - numOrgIDs = "num_org_ids" - bkts = "buckets" - checks = "checks" - dashes = "dashboards" - endpoints = "endpoints" - labels = "labels" - labelMappings = "label_mappings" - rules = "rules" - tasks = "tasks" - telegrafConfigs = "telegraf_configs" - variables = "variables" - ) - return metric.VecOpts{ - Name: "template_export", - Help: "Metrics for resources being exported", - LabelNames: []string{ - "method", - byStack, - numOrgIDs, - bkts, - checks, - dashes, - endpoints, - labels, - labelMappings, - rules, - tasks, - telegrafConfigs, - variables, - }, - CounterFn: func(vec *prometheus.CounterVec, o metric.CollectFnOpts) { - if o.Err != nil { - return - } - - orgID, _ := o.AdditionalProps[numOrgIDs].(int) - sum, _ := o.AdditionalProps["sum"].(Summary) - st, _ := o.AdditionalProps[byStack].(bool) - - vec. - With(prometheus.Labels{ - "method": o.Method, - byStack: strconv.FormatBool(st), - numOrgIDs: strconv.Itoa(orgID), - bkts: strconv.Itoa(len(sum.Buckets)), - checks: strconv.Itoa(len(sum.Checks)), - dashes: strconv.Itoa(len(sum.Dashboards)), - endpoints: strconv.Itoa(len(sum.NotificationEndpoints)), - labels: strconv.Itoa(len(sum.Labels)), - labelMappings: strconv.Itoa(len(sum.LabelMappings)), - rules: strconv.Itoa(len(sum.NotificationRules)), - tasks: strconv.Itoa(len(sum.Tasks)), - telegrafConfigs: strconv.Itoa(len(sum.TelegrafConfigs)), - variables: strconv.Itoa(len(sum.TelegrafConfigs)), - }). - Inc() - }, - HistogramFn: nil, - } -} - -func templateVec() metric.VecOpts { - return metric.VecOpts{ - Name: "template_count", - Help: "Number of installations per template", - LabelNames: []string{"method", "source", "user_id", "org_id"}, - CounterFn: func(vec *prometheus.CounterVec, o metric.CollectFnOpts) { - if o.Err != nil { - return - } - - orgID, _ := o.AdditionalProps["org_id"].(string) - userID, _ := o.AdditionalProps["user_id"].(string) - - // safe to ignore the failed type assertion, a zero value - // provides a nil slice, so no worries. - sources, _ := o.AdditionalProps["sources"].([]string) - for _, source := range normalizeRemoteSources(sources) { - vec. - With(prometheus.Labels{ - "method": o.Method, - "source": source.String(), - "org_id": orgID, - "user_id": userID, - }). - Inc() - } - }, - } -} - -func normalizeRemoteSources(sources []string) []url.URL { - var out []url.URL - for _, source := range sources { - u, err := url.Parse(source) - if err != nil { - continue - } - if !strings.HasPrefix(u.Scheme, "http") { - continue - } - if u.Host == githubRawContentHost { - u.Host = githubHost - u.Path = normalizeRawGithubPath(u.Path) - } - out = append(out, *u) - } - return out -} - -func normalizeRawGithubPath(rawPath string) string { - parts := strings.Split(rawPath, "/") - if len(parts) < 4 { - return rawPath - } - // keep /account/repo as base, then append the blob to it - tail := append([]string{"blob"}, parts[3:]...) - parts = append(parts[:3], tail...) - return path.Join(parts...) -} diff --git a/pkger/service_models.go b/pkger/service_models.go deleted file mode 100644 index de134679abc..00000000000 --- a/pkger/service_models.go +++ /dev/null @@ -1,1580 +0,0 @@ -package pkger - -import ( - "reflect" - "sort" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/notification/rule" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -type stateCoordinator struct { - mBuckets map[string]*stateBucket - mChecks map[string]*stateCheck - mDashboards map[string]*stateDashboard - mEndpoints map[string]*stateEndpoint - mLabels map[string]*stateLabel - mRules map[string]*stateRule - mTasks map[string]*stateTask - mTelegrafs map[string]*stateTelegraf - mVariables map[string]*stateVariable - - labelMappings []stateLabelMapping - labelMappingsToRemove []stateLabelMappingForRemoval -} - -func newStateCoordinator(template *Template, acts resourceActions) *stateCoordinator { - state := stateCoordinator{ - mBuckets: make(map[string]*stateBucket), - mChecks: make(map[string]*stateCheck), - mDashboards: make(map[string]*stateDashboard), - mEndpoints: make(map[string]*stateEndpoint), - mLabels: make(map[string]*stateLabel), - mRules: make(map[string]*stateRule), - mTasks: make(map[string]*stateTask), - mTelegrafs: make(map[string]*stateTelegraf), - mVariables: make(map[string]*stateVariable), - } - - // labels are done first to validate dependencies are accounted for. - // when a label is skipped by an action, this will still be accurate - // for hte individual labels, and cascades to the resources that are - // associated to a label. - for _, l := range template.labels() { - if acts.skipResource(KindLabel, l.MetaName()) { - continue - } - state.mLabels[l.MetaName()] = &stateLabel{ - parserLabel: l, - stateStatus: StateStatusNew, - } - } - for _, b := range template.buckets() { - if acts.skipResource(KindBucket, b.MetaName()) { - continue - } - state.mBuckets[b.MetaName()] = &stateBucket{ - parserBkt: b, - stateStatus: StateStatusNew, - labelAssociations: state.templateToStateLabels(b.labels), - } - } - for _, c := range template.checks() { - if acts.skipResource(KindCheck, c.MetaName()) { - continue - } - state.mChecks[c.MetaName()] = &stateCheck{ - parserCheck: c, - stateStatus: StateStatusNew, - labelAssociations: state.templateToStateLabels(c.labels), - } - } - for _, d := range template.dashboards() { - if acts.skipResource(KindDashboard, d.MetaName()) { - continue - } - state.mDashboards[d.MetaName()] = &stateDashboard{ - parserDash: d, - stateStatus: StateStatusNew, - labelAssociations: state.templateToStateLabels(d.labels), - } - } - for _, e := range template.notificationEndpoints() { - if acts.skipResource(KindNotificationEndpoint, e.MetaName()) { - continue - } - state.mEndpoints[e.MetaName()] = &stateEndpoint{ - parserEndpoint: e, - stateStatus: StateStatusNew, - labelAssociations: state.templateToStateLabels(e.labels), - } - } - for _, r := range template.notificationRules() { - if acts.skipResource(KindNotificationRule, r.MetaName()) { - continue - } - state.mRules[r.MetaName()] = &stateRule{ - parserRule: r, - stateStatus: StateStatusNew, - labelAssociations: state.templateToStateLabels(r.labels), - } - } - for _, task := range template.tasks() { - if acts.skipResource(KindTask, task.MetaName()) { - continue - } - state.mTasks[task.MetaName()] = &stateTask{ - parserTask: task, - stateStatus: StateStatusNew, - labelAssociations: state.templateToStateLabels(task.labels), - } - } - for _, tele := range template.telegrafs() { - if acts.skipResource(KindTelegraf, tele.MetaName()) { - continue - } - state.mTelegrafs[tele.MetaName()] = &stateTelegraf{ - parserTelegraf: tele, - stateStatus: StateStatusNew, - labelAssociations: state.templateToStateLabels(tele.labels), - } - } - for _, v := range template.variables() { - if acts.skipResource(KindVariable, v.MetaName()) { - continue - } - state.mVariables[v.MetaName()] = &stateVariable{ - parserVar: v, - stateStatus: StateStatusNew, - labelAssociations: state.templateToStateLabels(v.labels), - } - } - - return &state -} - -func (s *stateCoordinator) buckets() []*stateBucket { - out := make([]*stateBucket, 0, len(s.mBuckets)) - for _, v := range s.mBuckets { - out = append(out, v) - } - return out -} - -func (s *stateCoordinator) checks() []*stateCheck { - out := make([]*stateCheck, 0, len(s.mChecks)) - for _, v := range s.mChecks { - out = append(out, v) - } - return out -} - -func (s *stateCoordinator) dashboards() []*stateDashboard { - out := make([]*stateDashboard, 0, len(s.mDashboards)) - for _, d := range s.mDashboards { - out = append(out, d) - } - return out -} - -func (s *stateCoordinator) endpoints() []*stateEndpoint { - out := make([]*stateEndpoint, 0, len(s.mEndpoints)) - for _, e := range s.mEndpoints { - out = append(out, e) - } - return out -} - -func (s *stateCoordinator) labels() []*stateLabel { - out := make([]*stateLabel, 0, len(s.mLabels)) - for _, v := range s.mLabels { - out = append(out, v) - } - return out -} - -func (s *stateCoordinator) rules() []*stateRule { - out := make([]*stateRule, 0, len(s.mRules)) - for _, r := range s.mRules { - out = append(out, r) - } - return out -} - -func (s *stateCoordinator) tasks() []*stateTask { - out := make([]*stateTask, 0, len(s.mTasks)) - for _, t := range s.mTasks { - out = append(out, t) - } - return out -} - -func (s *stateCoordinator) telegrafConfigs() []*stateTelegraf { - out := make([]*stateTelegraf, 0, len(s.mTelegrafs)) - for _, t := range s.mTelegrafs { - out = append(out, t) - } - return out -} - -func (s *stateCoordinator) variables() []*stateVariable { - out := make([]*stateVariable, 0, len(s.mVariables)) - for _, v := range s.mVariables { - out = append(out, v) - } - return out -} - -func (s *stateCoordinator) diff() Diff { - var diff Diff - for _, b := range s.mBuckets { - diff.Buckets = append(diff.Buckets, b.diffBucket()) - } - sort.Slice(diff.Buckets, func(i, j int) bool { - return diff.Buckets[i].MetaName < diff.Buckets[j].MetaName - }) - - for _, c := range s.mChecks { - diff.Checks = append(diff.Checks, c.diffCheck()) - } - sort.Slice(diff.Checks, func(i, j int) bool { - return diff.Checks[i].MetaName < diff.Checks[j].MetaName - }) - - for _, d := range s.mDashboards { - diff.Dashboards = append(diff.Dashboards, d.diffDashboard()) - } - sort.Slice(diff.Dashboards, func(i, j int) bool { - return diff.Dashboards[i].MetaName < diff.Dashboards[j].MetaName - }) - - for _, e := range s.mEndpoints { - diff.NotificationEndpoints = append(diff.NotificationEndpoints, e.diffEndpoint()) - } - sort.Slice(diff.NotificationEndpoints, func(i, j int) bool { - return diff.NotificationEndpoints[i].MetaName < diff.NotificationEndpoints[j].MetaName - }) - - for _, l := range s.mLabels { - diff.Labels = append(diff.Labels, l.diffLabel()) - } - sort.Slice(diff.Labels, func(i, j int) bool { - return diff.Labels[i].MetaName < diff.Labels[j].MetaName - }) - - for _, r := range s.mRules { - diff.NotificationRules = append(diff.NotificationRules, r.diffRule()) - } - sort.Slice(diff.NotificationRules, func(i, j int) bool { - return diff.NotificationRules[i].MetaName < diff.NotificationRules[j].MetaName - }) - - for _, t := range s.mTasks { - diff.Tasks = append(diff.Tasks, t.diffTask()) - } - sort.Slice(diff.Tasks, func(i, j int) bool { - return diff.Tasks[i].MetaName < diff.Tasks[j].MetaName - }) - - for _, t := range s.mTelegrafs { - diff.Telegrafs = append(diff.Telegrafs, t.diffTelegraf()) - } - sort.Slice(diff.Telegrafs, func(i, j int) bool { - return diff.Telegrafs[i].MetaName < diff.Telegrafs[j].MetaName - }) - - for _, v := range s.mVariables { - diff.Variables = append(diff.Variables, v.diffVariable()) - } - sort.Slice(diff.Variables, func(i, j int) bool { - return diff.Variables[i].MetaName < diff.Variables[j].MetaName - }) - - for _, m := range s.labelMappings { - diff.LabelMappings = append(diff.LabelMappings, m.diffLabelMapping()) - } - for _, m := range s.labelMappingsToRemove { - diff.LabelMappings = append(diff.LabelMappings, m.diffLabelMapping()) - } - - sort.Slice(diff.LabelMappings, func(i, j int) bool { - n, m := diff.LabelMappings[i], diff.LabelMappings[j] - if n.ResType < m.ResType { - return true - } - if n.ResType > m.ResType { - return false - } - if n.ResMetaName < m.ResMetaName { - return true - } - if n.ResMetaName > m.ResMetaName { - return false - } - return n.LabelName < m.LabelName - }) - - return diff -} - -func (s *stateCoordinator) summary() Summary { - var sum Summary - for _, v := range s.mBuckets { - if IsRemoval(v.stateStatus) { - continue - } - sum.Buckets = append(sum.Buckets, v.summarize()) - } - sort.Slice(sum.Buckets, func(i, j int) bool { - return sum.Buckets[i].MetaName < sum.Buckets[j].MetaName - }) - - for _, c := range s.mChecks { - if IsRemoval(c.stateStatus) { - continue - } - sum.Checks = append(sum.Checks, c.summarize()) - } - sort.Slice(sum.Checks, func(i, j int) bool { - return sum.Checks[i].MetaName < sum.Checks[j].MetaName - }) - - for _, d := range s.mDashboards { - if IsRemoval(d.stateStatus) { - continue - } - sum.Dashboards = append(sum.Dashboards, d.summarize()) - } - sort.Slice(sum.Dashboards, func(i, j int) bool { - return sum.Dashboards[i].MetaName < sum.Dashboards[j].MetaName - }) - - for _, e := range s.mEndpoints { - if IsRemoval(e.stateStatus) { - continue - } - sum.NotificationEndpoints = append(sum.NotificationEndpoints, e.summarize()) - } - sort.Slice(sum.NotificationEndpoints, func(i, j int) bool { - return sum.NotificationEndpoints[i].MetaName < sum.NotificationEndpoints[j].MetaName - }) - - for _, v := range s.mLabels { - if IsRemoval(v.stateStatus) { - continue - } - sum.Labels = append(sum.Labels, v.summarize()) - } - sort.Slice(sum.Labels, func(i, j int) bool { - return sum.Labels[i].MetaName < sum.Labels[j].MetaName - }) - - for _, v := range s.mRules { - if IsRemoval(v.stateStatus) { - continue - } - sum.NotificationRules = append(sum.NotificationRules, v.summarize()) - } - sort.Slice(sum.NotificationRules, func(i, j int) bool { - return sum.NotificationRules[i].MetaName < sum.NotificationRules[j].MetaName - }) - - for _, t := range s.mTasks { - if IsRemoval(t.stateStatus) { - continue - } - sum.Tasks = append(sum.Tasks, t.summarize()) - } - sort.Slice(sum.Tasks, func(i, j int) bool { - return sum.Tasks[i].MetaName < sum.Tasks[j].MetaName - }) - - for _, t := range s.mTelegrafs { - if IsRemoval(t.stateStatus) { - continue - } - sum.TelegrafConfigs = append(sum.TelegrafConfigs, t.summarize()) - } - sort.Slice(sum.TelegrafConfigs, func(i, j int) bool { - return sum.TelegrafConfigs[i].MetaName < sum.TelegrafConfigs[j].MetaName - }) - - for _, v := range s.mVariables { - if IsRemoval(v.stateStatus) { - continue - } - sum.Variables = append(sum.Variables, v.summarize()) - } - sort.Slice(sum.Variables, func(i, j int) bool { - return sum.Variables[i].MetaName < sum.Variables[j].MetaName - }) - - for _, v := range s.labelMappings { - sum.LabelMappings = append(sum.LabelMappings, v.summarize()) - } - sort.Slice(sum.LabelMappings, func(i, j int) bool { - n, m := sum.LabelMappings[i], sum.LabelMappings[j] - if n.ResourceType != m.ResourceType { - return n.ResourceType < m.ResourceType - } - if n.ResourceMetaName != m.ResourceMetaName { - return n.ResourceMetaName < m.ResourceMetaName - } - return n.LabelName < m.LabelName - }) - - return sum -} - -func (s *stateCoordinator) getLabelByMetaName(metaName string) (*stateLabel, bool) { - l, ok := s.mLabels[metaName] - return l, ok -} - -func (s *stateCoordinator) templateToStateLabels(labels []*label) []*stateLabel { - var out []*stateLabel - for _, l := range labels { - stLabel, found := s.getLabelByMetaName(l.MetaName()) - if !found { - continue - } - out = append(out, stLabel) - } - return out -} - -func (s *stateCoordinator) addStackState(stack Stack) { - reconcilers := []func([]StackResource){ - s.reconcileStackResources, - s.reconcileLabelMappings, - s.reconcileNotificationDependencies, - } - for _, reconcileFn := range reconcilers { - reconcileFn(stack.LatestEvent().Resources) - } -} - -func (s *stateCoordinator) reconcileStackResources(stackResources []StackResource) { - for _, r := range stackResources { - if !s.Contains(r.Kind, r.MetaName) { - s.addObjectForRemoval(r.Kind, r.MetaName, r.ID) - continue - } - s.setObjectID(r.Kind, r.MetaName, r.ID) - } -} - -func (s *stateCoordinator) reconcileLabelMappings(stackResources []StackResource) { - mLabelMetaNameToID := make(map[string]platform.ID) - for _, r := range stackResources { - if r.Kind.is(KindLabel) { - mLabelMetaNameToID[r.MetaName] = r.ID - } - } - - for _, r := range stackResources { - labels := s.labelAssociations(r.Kind, r.MetaName) - if len(r.Associations) == 0 { - continue - } - - // if associations agree => do nothing - // if associations are new (in state not in stack) => do nothing - // if associations are not in state and in stack => add them for removal - mStackAss := make(map[StackResourceAssociation]struct{}) - for _, ass := range r.Associations { - if ass.Kind.is(KindLabel) { - mStackAss[ass] = struct{}{} - } - } - - for _, l := range labels { - // we want to keep associations that are from previous application and are not changing - delete(mStackAss, StackResourceAssociation{ - Kind: KindLabel, - MetaName: l.parserLabel.MetaName(), - }) - } - - // all associations that are in the stack but not in the - // state fall into here and are marked for removal. - for assForRemoval := range mStackAss { - s.labelMappingsToRemove = append(s.labelMappingsToRemove, stateLabelMappingForRemoval{ - LabelMetaName: assForRemoval.MetaName, - LabelID: mLabelMetaNameToID[assForRemoval.MetaName], - ResourceID: r.ID, - ResourceMetaName: r.MetaName, - ResourceType: r.Kind.ResourceType(), - }) - } - } -} - -func (s *stateCoordinator) reconcileNotificationDependencies(stackResources []StackResource) { - for _, r := range stackResources { - if r.Kind.is(KindNotificationRule) { - for _, ass := range r.Associations { - if ass.Kind.is(KindNotificationEndpoint) { - s.mRules[r.MetaName].associatedEndpoint = s.mEndpoints[ass.MetaName] - break - } - } - } - } -} - -func (s *stateCoordinator) get(k Kind, metaName string) (interface{}, bool) { - switch k { - case KindBucket: - v, ok := s.mBuckets[metaName] - return v, ok - case KindCheck, KindCheckDeadman, KindCheckThreshold: - v, ok := s.mChecks[metaName] - return v, ok - case KindDashboard: - v, ok := s.mDashboards[metaName] - return v, ok - case KindLabel: - v, ok := s.mLabels[metaName] - return v, ok - case KindNotificationEndpoint, - KindNotificationEndpointHTTP, - KindNotificationEndpointPagerDuty, - KindNotificationEndpointSlack: - v, ok := s.mEndpoints[metaName] - return v, ok - case KindNotificationRule: - v, ok := s.mRules[metaName] - return v, ok - case KindTask: - v, ok := s.mTasks[metaName] - return v, ok - case KindTelegraf: - v, ok := s.mTelegrafs[metaName] - return v, ok - case KindVariable: - v, ok := s.mVariables[metaName] - return v, ok - default: - return nil, false - } -} - -func (s *stateCoordinator) labelAssociations(k Kind, metaName string) []*stateLabel { - v, _ := s.get(k, metaName) - labeler, ok := v.(interface { - labels() []*stateLabel - }) - if !ok { - return nil - } - - return labeler.labels() -} - -func (s *stateCoordinator) Contains(k Kind, metaName string) bool { - _, ok := s.get(k, metaName) - return ok -} - -// setObjectID sets the id for the resource graphed from the object the key identifies. -func (s *stateCoordinator) setObjectID(k Kind, metaName string, id platform.ID) { - idSetFn, ok := s.getObjectIDSetter(k, metaName) - if !ok { - return - } - idSetFn(id) -} - -// addObjectForRemoval sets the id for the resource graphed from the object the key identifies. -// The metaName and kind are used as the unique identifier, when calling this it will -// overwrite any existing value if one exists. If desired, check for the value by using -// the Contains method. -func (s *stateCoordinator) addObjectForRemoval(k Kind, metaName string, id platform.ID) { - newIdentity := identity{ - name: &references{val: metaName}, - } - - switch k { - case KindBucket: - s.mBuckets[metaName] = &stateBucket{ - id: id, - parserBkt: &bucket{identity: newIdentity}, - stateStatus: StateStatusRemove, - } - case KindCheck, KindCheckDeadman, KindCheckThreshold: - s.mChecks[metaName] = &stateCheck{ - id: id, - parserCheck: &check{identity: newIdentity}, - stateStatus: StateStatusRemove, - } - case KindDashboard: - s.mDashboards[metaName] = &stateDashboard{ - id: id, - parserDash: &dashboard{identity: newIdentity}, - stateStatus: StateStatusRemove, - } - case KindLabel: - s.mLabels[metaName] = &stateLabel{ - id: id, - parserLabel: &label{identity: newIdentity}, - stateStatus: StateStatusRemove, - } - case KindNotificationEndpoint, - KindNotificationEndpointHTTP, - KindNotificationEndpointPagerDuty, - KindNotificationEndpointSlack: - s.mEndpoints[metaName] = &stateEndpoint{ - id: id, - parserEndpoint: ¬ificationEndpoint{identity: newIdentity}, - stateStatus: StateStatusRemove, - } - case KindNotificationRule: - s.mRules[metaName] = &stateRule{ - id: id, - parserRule: ¬ificationRule{identity: newIdentity}, - stateStatus: StateStatusRemove, - } - case KindTask: - s.mTasks[metaName] = &stateTask{ - id: id, - parserTask: &task{identity: newIdentity}, - stateStatus: StateStatusRemove, - } - case KindTelegraf: - s.mTelegrafs[metaName] = &stateTelegraf{ - id: id, - parserTelegraf: &telegraf{identity: newIdentity}, - stateStatus: StateStatusRemove, - } - case KindVariable: - s.mVariables[metaName] = &stateVariable{ - id: id, - parserVar: &variable{identity: newIdentity}, - stateStatus: StateStatusRemove, - } - } -} - -func (s *stateCoordinator) getObjectIDSetter(k Kind, metaName string) (func(platform.ID), bool) { - switch k { - case KindBucket: - r, ok := s.mBuckets[metaName] - return func(id platform.ID) { - r.id = id - r.stateStatus = StateStatusExists - }, ok - case KindCheck, KindCheckDeadman, KindCheckThreshold: - r, ok := s.mChecks[metaName] - return func(id platform.ID) { - r.id = id - r.stateStatus = StateStatusExists - }, ok - case KindDashboard: - r, ok := s.mDashboards[metaName] - return func(id platform.ID) { - r.id = id - r.stateStatus = StateStatusExists - }, ok - case KindLabel: - r, ok := s.mLabels[metaName] - return func(id platform.ID) { - r.id = id - r.stateStatus = StateStatusExists - }, ok - case KindNotificationEndpoint, - KindNotificationEndpointHTTP, - KindNotificationEndpointPagerDuty, - KindNotificationEndpointSlack: - r, ok := s.mEndpoints[metaName] - return func(id platform.ID) { - r.id = id - r.stateStatus = StateStatusExists - }, ok - case KindNotificationRule: - r, ok := s.mRules[metaName] - return func(id platform.ID) { - r.id = id - r.stateStatus = StateStatusExists - }, ok - case KindTask: - r, ok := s.mTasks[metaName] - return func(id platform.ID) { - r.id = id - r.stateStatus = StateStatusExists - }, ok - case KindTelegraf: - r, ok := s.mTelegrafs[metaName] - return func(id platform.ID) { - r.id = id - r.stateStatus = StateStatusExists - }, ok - case KindVariable: - r, ok := s.mVariables[metaName] - return func(id platform.ID) { - r.id = id - r.stateStatus = StateStatusExists - }, ok - default: - return nil, false - } -} - -type stateIdentity struct { - id platform.ID - name string - metaName string - resourceType influxdb.ResourceType - stateStatus StateStatus -} - -func (s stateIdentity) exists() bool { - return IsExisting(s.stateStatus) -} - -type stateBucket struct { - id, orgID platform.ID - stateStatus StateStatus - labelAssociations []*stateLabel - - parserBkt *bucket - existing *influxdb.Bucket -} - -func (b *stateBucket) diffBucket() DiffBucket { - diff := DiffBucket{ - DiffIdentifier: DiffIdentifier{ - Kind: KindBucket, - ID: SafeID(b.ID()), - StateStatus: b.stateStatus, - MetaName: b.parserBkt.MetaName(), - }, - New: DiffBucketValues{ - Name: b.parserBkt.Name(), - Description: b.parserBkt.Description, - RetentionRules: b.parserBkt.RetentionRules, - }, - } - if e := b.existing; e != nil { - diff.Old = &DiffBucketValues{ - Name: e.Name, - Description: e.Description, - } - if e.RetentionPeriod > 0 { - diff.Old.RetentionRules = retentionRules{newRetentionRule(e.RetentionPeriod)} - } - } - return diff -} - -func stateToSummaryLabels(labels []*stateLabel) []SummaryLabel { - out := make([]SummaryLabel, 0, len(labels)) - for _, l := range labels { - out = append(out, l.summarize()) - } - return out -} - -func (b *stateBucket) summarize() SummaryBucket { - sum := b.parserBkt.summarize() - sum.ID = SafeID(b.ID()) - sum.OrgID = SafeID(b.orgID) - sum.LabelAssociations = stateToSummaryLabels(b.labelAssociations) - return sum -} - -func (b *stateBucket) ID() platform.ID { - if !IsNew(b.stateStatus) && b.existing != nil { - return b.existing.ID - } - return b.id -} - -func (b *stateBucket) resourceType() influxdb.ResourceType { - return KindBucket.ResourceType() -} - -func (b *stateBucket) labels() []*stateLabel { - return b.labelAssociations -} - -func (b *stateBucket) stateIdentity() stateIdentity { - return stateIdentity{ - id: b.ID(), - name: b.parserBkt.Name(), - metaName: b.parserBkt.MetaName(), - resourceType: b.resourceType(), - stateStatus: b.stateStatus, - } -} - -func (b *stateBucket) shouldApply() bool { - return IsRemoval(b.stateStatus) || - b.existing == nil || - b.parserBkt.Description != b.existing.Description || - b.parserBkt.Name() != b.existing.Name || - b.parserBkt.RetentionRules.RP() != b.existing.RetentionPeriod -} - -type stateCheck struct { - id, orgID platform.ID - stateStatus StateStatus - labelAssociations []*stateLabel - - parserCheck *check - existing influxdb.Check -} - -func (c *stateCheck) ID() platform.ID { - if !IsNew(c.stateStatus) && c.existing != nil { - return c.existing.GetID() - } - return c.id -} - -func (c *stateCheck) labels() []*stateLabel { - return c.labelAssociations -} - -func (c *stateCheck) resourceType() influxdb.ResourceType { - return KindCheck.ResourceType() -} - -func (c *stateCheck) stateIdentity() stateIdentity { - return stateIdentity{ - id: c.ID(), - name: c.parserCheck.Name(), - metaName: c.parserCheck.MetaName(), - resourceType: c.resourceType(), - stateStatus: c.stateStatus, - } -} - -func (c *stateCheck) diffCheck() DiffCheck { - diff := DiffCheck{ - DiffIdentifier: DiffIdentifier{ - ID: SafeID(c.ID()), - StateStatus: c.stateStatus, - MetaName: c.parserCheck.MetaName(), - }, - } - newCheck := c.summarize() - diff.Kind = newCheck.Kind - if newCheck.Check != nil { - diff.New.Check = newCheck.Check - } - if c.existing != nil { - diff.Old = &DiffCheckValues{ - Check: c.existing, - } - } - return diff -} - -func (c *stateCheck) summarize() SummaryCheck { - sum := c.parserCheck.summarize() - if sum.Check == nil { - return sum - } - sum.Check.SetID(c.id) - sum.Check.SetOrgID(c.orgID) - sum.LabelAssociations = stateToSummaryLabels(c.labelAssociations) - return sum -} - -type stateDashboard struct { - id, orgID platform.ID - stateStatus StateStatus - labelAssociations []*stateLabel - - parserDash *dashboard - existing *influxdb.Dashboard -} - -func (d *stateDashboard) ID() platform.ID { - if !IsNew(d.stateStatus) && d.existing != nil { - return d.existing.ID - } - return d.id -} - -func (d *stateDashboard) labels() []*stateLabel { - return d.labelAssociations -} - -func (d *stateDashboard) resourceType() influxdb.ResourceType { - return KindDashboard.ResourceType() -} - -func (d *stateDashboard) stateIdentity() stateIdentity { - return stateIdentity{ - id: d.ID(), - name: d.parserDash.Name(), - metaName: d.parserDash.MetaName(), - resourceType: d.resourceType(), - stateStatus: d.stateStatus, - } -} - -func (d *stateDashboard) diffDashboard() DiffDashboard { - diff := DiffDashboard{ - DiffIdentifier: DiffIdentifier{ - Kind: KindDashboard, - ID: SafeID(d.ID()), - StateStatus: d.stateStatus, - MetaName: d.parserDash.MetaName(), - }, - New: DiffDashboardValues{ - Name: d.parserDash.Name(), - Desc: d.parserDash.Description, - Charts: make([]DiffChart, 0, len(d.parserDash.Charts)), - }, - } - - for _, c := range d.parserDash.Charts { - diff.New.Charts = append(diff.New.Charts, DiffChart{ - Properties: c.properties(), - Height: c.Height, - Width: c.Width, - }) - } - - if d.existing == nil { - return diff - } - - oldDiff := DiffDashboardValues{ - Name: d.existing.Name, - Desc: d.existing.Description, - Charts: make([]DiffChart, 0, len(d.existing.Cells)), - } - - for _, c := range d.existing.Cells { - var props influxdb.ViewProperties - if c.View != nil { - props = c.View.Properties - } - - oldDiff.Charts = append(oldDiff.Charts, DiffChart{ - Properties: props, - XPosition: int(c.X), - YPosition: int(c.Y), - Height: int(c.H), - Width: int(c.W), - }) - } - - diff.Old = &oldDiff - - return diff -} - -func (d *stateDashboard) summarize() SummaryDashboard { - sum := d.parserDash.summarize() - sum.ID = SafeID(d.ID()) - sum.OrgID = SafeID(d.orgID) - sum.LabelAssociations = stateToSummaryLabels(d.labelAssociations) - return sum -} - -type stateLabel struct { - id, orgID platform.ID - stateStatus StateStatus - - parserLabel *label - existing *influxdb.Label -} - -func (l *stateLabel) diffLabel() DiffLabel { - diff := DiffLabel{ - DiffIdentifier: DiffIdentifier{ - Kind: KindLabel, - ID: SafeID(l.ID()), - StateStatus: l.stateStatus, - MetaName: l.parserLabel.MetaName(), - }, - New: DiffLabelValues{ - Name: l.parserLabel.Name(), - Description: l.parserLabel.Description, - Color: l.parserLabel.Color, - }, - } - if e := l.existing; e != nil { - diff.Old = &DiffLabelValues{ - Name: e.Name, - Description: e.Properties["description"], - Color: e.Properties["color"], - } - } - return diff -} - -func (l *stateLabel) summarize() SummaryLabel { - sum := l.parserLabel.summarize() - sum.ID = SafeID(l.ID()) - sum.OrgID = SafeID(l.orgID) - return sum -} - -func (l *stateLabel) ID() platform.ID { - if !IsNew(l.stateStatus) && l.existing != nil { - return l.existing.ID - } - return l.id -} - -func (l *stateLabel) Name() string { - return l.parserLabel.Name() -} - -func (l *stateLabel) MetaName() string { - return l.parserLabel.MetaName() -} - -func (l *stateLabel) shouldApply() bool { - return IsRemoval(l.stateStatus) || - l.existing == nil || - l.parserLabel.Description != l.existing.Properties["description"] || - l.parserLabel.Name() != l.existing.Name || - l.parserLabel.Color != l.existing.Properties["color"] -} - -func (l *stateLabel) toInfluxLabel() influxdb.Label { - return influxdb.Label{ - ID: l.ID(), - OrgID: l.orgID, - Name: l.parserLabel.Name(), - Properties: l.properties(), - } -} - -func (l *stateLabel) properties() map[string]string { - return map[string]string{ - "color": l.parserLabel.Color, - "description": l.parserLabel.Description, - } -} - -type stateLabelMapping struct { - status StateStatus - - resource interface { - stateIdentity() stateIdentity - } - - label *stateLabel -} - -func (lm stateLabelMapping) diffLabelMapping() DiffLabelMapping { - ident := lm.resource.stateIdentity() - return DiffLabelMapping{ - StateStatus: lm.status, - ResType: ident.resourceType, - ResID: SafeID(ident.id), - ResMetaName: ident.metaName, - ResName: ident.name, - LabelID: SafeID(lm.label.ID()), - LabelMetaName: lm.label.parserLabel.MetaName(), - LabelName: lm.label.parserLabel.Name(), - } -} - -func (lm stateLabelMapping) summarize() SummaryLabelMapping { - ident := lm.resource.stateIdentity() - return SummaryLabelMapping{ - Status: lm.status, - ResourceID: SafeID(ident.id), - ResourceMetaName: ident.metaName, - ResourceName: ident.name, - ResourceType: ident.resourceType, - LabelMetaName: lm.label.parserLabel.MetaName(), - LabelName: lm.label.parserLabel.Name(), - LabelID: SafeID(lm.label.ID()), - } -} - -func stateLabelMappingToInfluxLabelMapping(mapping stateLabelMapping) influxdb.LabelMapping { - ident := mapping.resource.stateIdentity() - return influxdb.LabelMapping{ - LabelID: mapping.label.ID(), - ResourceID: ident.id, - ResourceType: ident.resourceType, - } -} - -type stateLabelMappingForRemoval struct { - LabelID platform.ID - LabelMetaName string - ResourceID platform.ID - ResourceMetaName string - ResourceType influxdb.ResourceType -} - -func (m *stateLabelMappingForRemoval) diffLabelMapping() DiffLabelMapping { - return DiffLabelMapping{ - StateStatus: StateStatusRemove, - ResType: m.ResourceType, - ResID: SafeID(m.ResourceID), - ResMetaName: m.ResourceMetaName, - LabelID: SafeID(m.LabelID), - LabelMetaName: m.LabelMetaName, - } -} - -type stateEndpoint struct { - id, orgID platform.ID - stateStatus StateStatus - labelAssociations []*stateLabel - - parserEndpoint *notificationEndpoint - existing influxdb.NotificationEndpoint -} - -func (e *stateEndpoint) ID() platform.ID { - if !IsNew(e.stateStatus) && e.existing != nil { - return e.existing.GetID() - } - return e.id -} - -func (e *stateEndpoint) diffEndpoint() DiffNotificationEndpoint { - diff := DiffNotificationEndpoint{ - DiffIdentifier: DiffIdentifier{ - ID: SafeID(e.ID()), - StateStatus: e.stateStatus, - MetaName: e.parserEndpoint.MetaName(), - }, - } - sum := e.summarize() - diff.Kind = sum.Kind - if sum.NotificationEndpoint != nil { - diff.New.NotificationEndpoint = sum.NotificationEndpoint - } - if e.existing != nil { - diff.Old = &DiffNotificationEndpointValues{ - NotificationEndpoint: e.existing, - } - } - return diff -} - -func (e *stateEndpoint) labels() []*stateLabel { - return e.labelAssociations -} - -func (e *stateEndpoint) resourceType() influxdb.ResourceType { - return KindNotificationEndpoint.ResourceType() -} - -func (e *stateEndpoint) stateIdentity() stateIdentity { - return stateIdentity{ - id: e.ID(), - name: e.parserEndpoint.Name(), - metaName: e.parserEndpoint.MetaName(), - resourceType: e.resourceType(), - stateStatus: e.stateStatus, - } -} - -func (e *stateEndpoint) summarize() SummaryNotificationEndpoint { - sum := e.parserEndpoint.summarize() - if sum.NotificationEndpoint == nil { - return sum - } - if e.ID() != 0 { - sum.NotificationEndpoint.SetID(e.ID()) - } - if e.orgID != 0 { - sum.NotificationEndpoint.SetOrgID(e.orgID) - } - sum.LabelAssociations = stateToSummaryLabels(e.labelAssociations) - return sum -} - -type stateRule struct { - id, orgID platform.ID - stateStatus StateStatus - labelAssociations []*stateLabel - - associatedEndpoint *stateEndpoint - - parserRule *notificationRule - existing influxdb.NotificationRule -} - -func (r *stateRule) ID() platform.ID { - if !IsNew(r.stateStatus) && r.existing != nil { - return r.existing.GetID() - } - return r.id -} - -func (r *stateRule) endpointAssociation() StackResourceAssociation { - if r.associatedEndpoint == nil { - return StackResourceAssociation{} - } - return StackResourceAssociation{ - Kind: KindNotificationEndpoint, - MetaName: r.endpointTemplateName(), - } -} - -func (r *stateRule) diffRule() DiffNotificationRule { - sum := DiffNotificationRule{ - DiffIdentifier: DiffIdentifier{ - Kind: KindNotificationRule, - ID: SafeID(r.ID()), - StateStatus: r.stateStatus, - MetaName: r.parserRule.MetaName(), - }, - New: DiffNotificationRuleValues{ - Name: r.parserRule.Name(), - Description: r.parserRule.description, - EndpointName: r.endpointTemplateName(), - EndpointID: SafeID(r.endpointID()), - EndpointType: r.endpointType(), - Every: r.parserRule.every.String(), - Offset: r.parserRule.offset.String(), - MessageTemplate: r.parserRule.msgTemplate, - StatusRules: toSummaryStatusRules(r.parserRule.statusRules), - TagRules: toSummaryTagRules(r.parserRule.tagRules), - }, - } - - if r.existing == nil { - return sum - } - - sum.Old = &DiffNotificationRuleValues{ - Name: r.existing.GetName(), - Description: r.existing.GetDescription(), - EndpointName: r.existing.GetName(), - EndpointID: SafeID(r.existing.GetEndpointID()), - EndpointType: r.existing.Type(), - } - - assignBase := func(b rule.Base) { - if b.Every != nil { - sum.Old.Every = b.Every.TimeDuration().String() - } - if b.Offset != nil { - sum.Old.Offset = b.Offset.TimeDuration().String() - } - for _, tr := range b.TagRules { - sum.Old.TagRules = append(sum.Old.TagRules, SummaryTagRule{ - Key: tr.Key, - Value: tr.Value, - Operator: tr.Operator.String(), - }) - } - for _, sr := range b.StatusRules { - sRule := SummaryStatusRule{CurrentLevel: sr.CurrentLevel.String()} - if sr.PreviousLevel != nil { - sRule.PreviousLevel = sr.PreviousLevel.String() - } - sum.Old.StatusRules = append(sum.Old.StatusRules, sRule) - } - } - - switch p := r.existing.(type) { - case *rule.HTTP: - assignBase(p.Base) - case *rule.Slack: - assignBase(p.Base) - sum.Old.MessageTemplate = p.MessageTemplate - case *rule.PagerDuty: - assignBase(p.Base) - sum.Old.MessageTemplate = p.MessageTemplate - } - - return sum -} - -func (r *stateRule) endpointID() platform.ID { - if r.associatedEndpoint != nil { - return r.associatedEndpoint.ID() - } - return 0 -} - -func (r *stateRule) endpointTemplateName() string { - if r.associatedEndpoint != nil && r.associatedEndpoint.parserEndpoint != nil { - return r.associatedEndpoint.parserEndpoint.MetaName() - } - return "" -} - -func (r *stateRule) endpointType() string { - if r.associatedEndpoint != nil { - return r.associatedEndpoint.parserEndpoint.kind.String() - } - return "" -} - -func (r *stateRule) labels() []*stateLabel { - return r.labelAssociations -} - -func (r *stateRule) resourceType() influxdb.ResourceType { - return KindNotificationRule.ResourceType() -} - -func (r *stateRule) stateIdentity() stateIdentity { - return stateIdentity{ - id: r.ID(), - name: r.parserRule.Name(), - metaName: r.parserRule.MetaName(), - resourceType: r.resourceType(), - stateStatus: r.stateStatus, - } -} - -func (r *stateRule) summarize() SummaryNotificationRule { - sum := r.parserRule.summarize() - sum.ID = SafeID(r.id) - sum.EndpointID = SafeID(r.associatedEndpoint.ID()) - sum.EndpointMetaName = r.associatedEndpoint.parserEndpoint.MetaName() - sum.EndpointType = r.associatedEndpoint.parserEndpoint.kind.String() - sum.LabelAssociations = stateToSummaryLabels(r.labelAssociations) - return sum -} - -func (r *stateRule) toInfluxRule() influxdb.NotificationRule { - influxRule := r.parserRule.toInfluxRule() - if r.ID() > 0 { - influxRule.SetID(r.ID()) - } - if r.orgID > 0 { - influxRule.SetOrgID(r.orgID) - } - switch e := influxRule.(type) { - case *rule.HTTP: - e.EndpointID = r.associatedEndpoint.ID() - case *rule.PagerDuty: - e.EndpointID = r.associatedEndpoint.ID() - case *rule.Slack: - e.EndpointID = r.associatedEndpoint.ID() - } - - return influxRule -} - -type stateTask struct { - id, orgID platform.ID - stateStatus StateStatus - labelAssociations []*stateLabel - - parserTask *task - existing *taskmodel.Task -} - -func (t *stateTask) ID() platform.ID { - if !IsNew(t.stateStatus) && t.existing != nil { - return t.existing.ID - } - return t.id -} - -func (t *stateTask) diffTask() DiffTask { - diff := DiffTask{ - DiffIdentifier: DiffIdentifier{ - Kind: KindTask, - ID: SafeID(t.ID()), - StateStatus: t.stateStatus, - MetaName: t.parserTask.MetaName(), - }, - New: DiffTaskValues{ - Name: t.parserTask.Name(), - Cron: t.parserTask.cron, - Description: t.parserTask.description, - Every: durToStr(t.parserTask.every), - Offset: durToStr(t.parserTask.offset), - Query: t.parserTask.query.DashboardQuery(), - Status: t.parserTask.Status(), - }, - } - - if t.existing == nil { - return diff - } - - diff.Old = &DiffTaskValues{ - Name: t.existing.Name, - Cron: t.existing.Cron, - Description: t.existing.Description, - Every: t.existing.Every, - Offset: t.existing.Offset.String(), - Query: t.existing.Flux, - Status: influxdb.Status(t.existing.Status), - } - - return diff -} - -func (t *stateTask) labels() []*stateLabel { - return t.labelAssociations -} - -func (t *stateTask) resourceType() influxdb.ResourceType { - return influxdb.TasksResourceType -} - -func (t *stateTask) stateIdentity() stateIdentity { - return stateIdentity{ - id: t.ID(), - name: t.parserTask.Name(), - metaName: t.parserTask.MetaName(), - resourceType: t.resourceType(), - stateStatus: t.stateStatus, - } -} - -func (t *stateTask) summarize() SummaryTask { - sum := t.parserTask.summarize() - sum.ID = SafeID(t.id) - sum.LabelAssociations = stateToSummaryLabels(t.labelAssociations) - return sum -} - -type stateTelegraf struct { - id, orgID platform.ID - stateStatus StateStatus - labelAssociations []*stateLabel - - parserTelegraf *telegraf - existing *influxdb.TelegrafConfig -} - -func (t *stateTelegraf) ID() platform.ID { - if !IsNew(t.stateStatus) && t.existing != nil { - return t.existing.ID - } - return t.id -} - -func (t *stateTelegraf) diffTelegraf() DiffTelegraf { - return DiffTelegraf{ - DiffIdentifier: DiffIdentifier{ - Kind: KindTelegraf, - ID: SafeID(t.ID()), - StateStatus: t.stateStatus, - MetaName: t.parserTelegraf.MetaName(), - }, - New: t.parserTelegraf.config, - Old: t.existing, - } -} - -func (t *stateTelegraf) labels() []*stateLabel { - return t.labelAssociations -} - -func (t *stateTelegraf) resourceType() influxdb.ResourceType { - return influxdb.TelegrafsResourceType -} - -func (t *stateTelegraf) stateIdentity() stateIdentity { - return stateIdentity{ - id: t.ID(), - name: t.parserTelegraf.Name(), - metaName: t.parserTelegraf.MetaName(), - resourceType: t.resourceType(), - stateStatus: t.stateStatus, - } -} - -func (t *stateTelegraf) summarize() SummaryTelegraf { - sum := t.parserTelegraf.summarize() - sum.TelegrafConfig.ID = t.id - sum.TelegrafConfig.OrgID = t.orgID - sum.LabelAssociations = stateToSummaryLabels(t.labelAssociations) - return sum -} - -type stateVariable struct { - id, orgID platform.ID - stateStatus StateStatus - labelAssociations []*stateLabel - - parserVar *variable - existing *influxdb.Variable -} - -func (v *stateVariable) ID() platform.ID { - if !IsNew(v.stateStatus) && v.existing != nil { - return v.existing.ID - } - return v.id -} - -func (v *stateVariable) diffVariable() DiffVariable { - diff := DiffVariable{ - DiffIdentifier: DiffIdentifier{ - Kind: KindVariable, - ID: SafeID(v.ID()), - StateStatus: v.stateStatus, - MetaName: v.parserVar.MetaName(), - }, - New: DiffVariableValues{ - Name: v.parserVar.Name(), - Description: v.parserVar.Description, - Args: v.parserVar.influxVarArgs(), - }, - } - if iv := v.existing; iv != nil { - diff.Old = &DiffVariableValues{ - Name: iv.Name, - Description: iv.Description, - Args: iv.Arguments, - } - } - - return diff -} - -func (v *stateVariable) labels() []*stateLabel { - return v.labelAssociations -} - -func (v *stateVariable) resourceType() influxdb.ResourceType { - return KindVariable.ResourceType() -} - -func (v *stateVariable) shouldApply() bool { - return IsRemoval(v.stateStatus) || - v.existing == nil || - v.existing.Description != v.parserVar.Description || - !reflect.DeepEqual(v.existing.Selected, v.parserVar.Selected()) || - v.existing.Arguments == nil || - !reflect.DeepEqual(v.existing.Arguments, v.parserVar.influxVarArgs()) -} - -func (v *stateVariable) stateIdentity() stateIdentity { - return stateIdentity{ - id: v.ID(), - name: v.parserVar.Name(), - metaName: v.parserVar.MetaName(), - resourceType: v.resourceType(), - stateStatus: v.stateStatus, - } -} - -func (v *stateVariable) summarize() SummaryVariable { - sum := v.parserVar.summarize() - sum.ID = SafeID(v.ID()) - sum.OrgID = SafeID(v.orgID) - sum.LabelAssociations = stateToSummaryLabels(v.labelAssociations) - return sum -} - -// IsNew identifies state status as new to the platform. -func IsNew(status StateStatus) bool { - // defaulting zero value to identify as new - return status == StateStatusNew || status == "" -} - -// IsExisting identifies state status as existing in the platform. -func IsExisting(status StateStatus) bool { - return status == StateStatusExists -} - -// IsRemoval identifies state status as existing resource that will be removed -// from the platform. -func IsRemoval(status StateStatus) bool { - return status == StateStatusRemove -} - -type resourceActions struct { - skipKinds map[Kind]bool - skipResources map[ActionSkipResource]bool -} - -func (r resourceActions) skipResource(k Kind, metaName string) bool { - key := ActionSkipResource{ - Kind: k, - MetaName: metaName, - } - return r.skipResources[key] || r.skipKinds[k] -} diff --git a/pkger/service_test.go b/pkger/service_test.go deleted file mode 100644 index 5c8b94468ca..00000000000 --- a/pkger/service_test.go +++ /dev/null @@ -1,5557 +0,0 @@ -package pkger - -import ( - "bytes" - "context" - "errors" - "fmt" - "math/rand" - "net/url" - "regexp" - "sort" - "strconv" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/notification" - icheck "github.com/influxdata/influxdb/v2/notification/check" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/influxdata/influxdb/v2/notification/rule" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func TestService(t *testing.T) { - newTestService := func(opts ...ServiceSetterFn) *Service { - opt := serviceOpt{ - bucketSVC: mock.NewBucketService(), - checkSVC: mock.NewCheckService(), - dashSVC: mock.NewDashboardService(), - labelSVC: mock.NewLabelService(), - endpointSVC: mock.NewNotificationEndpointService(), - orgSVC: mock.NewOrganizationService(), - ruleSVC: mock.NewNotificationRuleStore(), - store: &fakeStore{ - createFn: func(ctx context.Context, stack Stack) error { - return nil - }, - deleteFn: func(ctx context.Context, id platform.ID) error { - return nil - }, - readFn: func(ctx context.Context, id platform.ID) (Stack, error) { - return Stack{ID: id}, nil - }, - updateFn: func(ctx context.Context, stack Stack) error { - return nil - }, - }, - taskSVC: mock.NewTaskService(), - teleSVC: mock.NewTelegrafConfigStore(), - varSVC: mock.NewVariableService(), - } - for _, o := range opts { - o(&opt) - } - - applyOpts := []ServiceSetterFn{ - WithStore(opt.store), - WithBucketSVC(opt.bucketSVC), - WithCheckSVC(opt.checkSVC), - WithDashboardSVC(opt.dashSVC), - WithLabelSVC(opt.labelSVC), - WithNotificationEndpointSVC(opt.endpointSVC), - WithNotificationRuleSVC(opt.ruleSVC), - WithOrganizationService(opt.orgSVC), - WithSecretSVC(opt.secretSVC), - WithTaskSVC(opt.taskSVC), - WithTelegrafSVC(opt.teleSVC), - WithVariableSVC(opt.varSVC), - } - if opt.idGen != nil { - applyOpts = append(applyOpts, WithIDGenerator(opt.idGen)) - } - if opt.timeGen != nil { - applyOpts = append(applyOpts, WithTimeGenerator(opt.timeGen)) - } - if opt.nameGen != nil { - applyOpts = append(applyOpts, withNameGen(opt.nameGen)) - } - - return NewService(applyOpts...) - } - - t.Run("DryRun", func(t *testing.T) { - type dryRunTestFields struct { - path string - kinds []Kind - skipResources []ActionSkipResource - assertFn func(*testing.T, ImpactSummary) - } - - testDryRunActions := func(t *testing.T, fields dryRunTestFields) { - t.Helper() - - var skipResOpts []ApplyOptFn - for _, asr := range fields.skipResources { - skipResOpts = append(skipResOpts, ApplyWithResourceSkip(asr)) - } - - testfileRunner(t, fields.path, func(t *testing.T, template *Template) { - t.Helper() - - tests := []struct { - name string - applyOpts []ApplyOptFn - }{ - { - name: "skip resources", - applyOpts: skipResOpts, - }, - } - - for _, k := range fields.kinds { - tests = append(tests, struct { - name string - applyOpts []ApplyOptFn - }{ - name: "skip kind " + k.String(), - applyOpts: []ApplyOptFn{ - ApplyWithKindSkip(ActionSkipKind{ - Kind: k, - }), - }, - }) - } - - for _, tt := range tests { - fn := func(t *testing.T) { - t.Helper() - - svc := newTestService() - - impact, err := svc.DryRun( - context.TODO(), - platform.ID(100), - 0, - append(tt.applyOpts, ApplyWithTemplate(template))..., - ) - require.NoError(t, err) - - fields.assertFn(t, impact) - } - t.Run(tt.name, fn) - } - }) - } - - t.Run("buckets", func(t *testing.T) { - t.Run("single bucket updated", func(t *testing.T) { - testfileRunner(t, "testdata/bucket.yml", func(t *testing.T, template *Template) { - fakeBktSVC := mock.NewBucketService() - fakeBktSVC.FindBucketByNameFn = func(_ context.Context, orgID platform.ID, name string) (*influxdb.Bucket, error) { - if name != "rucket-11" { - return nil, errors.New("not found") - } - return &influxdb.Bucket{ - ID: platform.ID(1), - OrgID: orgID, - Name: name, - Description: "old desc", - RetentionPeriod: 30 * time.Hour, - }, nil - } - svc := newTestService(WithBucketSVC(fakeBktSVC)) - - impact, err := svc.DryRun(context.TODO(), platform.ID(100), 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - require.Len(t, impact.Diff.Buckets, 2) - - expected := DiffBucket{ - DiffIdentifier: DiffIdentifier{ - ID: SafeID(1), - StateStatus: StateStatusExists, - MetaName: "rucket-11", - Kind: KindBucket, - }, - - Old: &DiffBucketValues{ - Name: "rucket-11", - Description: "old desc", - RetentionRules: retentionRules{newRetentionRule(30 * time.Hour)}, - }, - New: DiffBucketValues{ - Name: "rucket-11", - Description: "bucket 1 description", - RetentionRules: retentionRules{newRetentionRule(time.Hour)}, - }, - } - assert.Contains(t, impact.Diff.Buckets, expected) - }) - }) - - t.Run("single bucket new", func(t *testing.T) { - testfileRunner(t, "testdata/bucket.json", func(t *testing.T, template *Template) { - fakeBktSVC := mock.NewBucketService() - fakeBktSVC.FindBucketByNameFn = func(_ context.Context, orgID platform.ID, name string) (*influxdb.Bucket, error) { - return nil, errors.New("not found") - } - svc := newTestService(WithBucketSVC(fakeBktSVC)) - - impact, err := svc.DryRun(context.TODO(), platform.ID(100), 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - require.Len(t, impact.Diff.Buckets, 2) - - expected := DiffBucket{ - DiffIdentifier: DiffIdentifier{ - MetaName: "rucket-11", - StateStatus: StateStatusNew, - Kind: KindBucket, - }, - New: DiffBucketValues{ - Name: "rucket-11", - Description: "bucket 1 description", - RetentionRules: retentionRules{newRetentionRule(time.Hour)}, - }, - } - assert.Contains(t, impact.Diff.Buckets, expected) - }) - }) - - t.Run("with actions applied", func(t *testing.T) { - testDryRunActions(t, dryRunTestFields{ - path: "testdata/bucket.yml", - kinds: []Kind{KindBucket}, - skipResources: []ActionSkipResource{ - { - Kind: KindBucket, - MetaName: "rucket-22", - }, - { - Kind: KindBucket, - MetaName: "rucket-11", - }, - }, - assertFn: func(t *testing.T, impact ImpactSummary) { - require.Empty(t, impact.Diff.Buckets) - }, - }) - }) - }) - - t.Run("checks", func(t *testing.T) { - t.Run("mixed update and creates", func(t *testing.T) { - testfileRunner(t, "testdata/checks.yml", func(t *testing.T, template *Template) { - fakeCheckSVC := mock.NewCheckService() - id := platform.ID(1) - existing := &icheck.Deadman{ - Base: icheck.Base{ - ID: id, - Name: "display name", - Description: "old desc", - }, - } - fakeCheckSVC.FindCheckFn = func(ctx context.Context, f influxdb.CheckFilter) (influxdb.Check, error) { - if f.Name != nil && *f.Name == "display name" { - return existing, nil - } - return nil, errors.New("not found") - } - - svc := newTestService(WithCheckSVC(fakeCheckSVC)) - - impact, err := svc.DryRun(context.TODO(), platform.ID(100), 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - checks := impact.Diff.Checks - require.Len(t, checks, 2) - check0 := checks[0] - assert.True(t, check0.IsNew()) - assert.Equal(t, "check-0", check0.MetaName) - assert.Zero(t, check0.ID) - assert.Nil(t, check0.Old) - - check1 := checks[1] - assert.False(t, check1.IsNew()) - assert.Equal(t, "check-1", check1.MetaName) - assert.Equal(t, "display name", check1.New.GetName()) - assert.NotZero(t, check1.ID) - assert.Equal(t, existing, check1.Old.Check) - }) - }) - - t.Run("with actions applied", func(t *testing.T) { - testDryRunActions(t, dryRunTestFields{ - path: "testdata/checks.yml", - kinds: []Kind{KindCheck, KindCheckDeadman, KindCheckThreshold}, - skipResources: []ActionSkipResource{ - { - Kind: KindCheck, - MetaName: "check-0", - }, - { - Kind: KindCheck, - MetaName: "check-1", - }, - }, - assertFn: func(t *testing.T, impact ImpactSummary) { - require.Empty(t, impact.Diff.Checks) - }, - }) - }) - }) - - t.Run("dashboards", func(t *testing.T) { - t.Run("with actions applied", func(t *testing.T) { - testDryRunActions(t, dryRunTestFields{ - path: "testdata/dashboard.yml", - kinds: []Kind{KindDashboard}, - skipResources: []ActionSkipResource{ - { - Kind: KindDashboard, - MetaName: "dash-1", - }, - { - Kind: KindDashboard, - MetaName: "dash-2", - }, - }, - assertFn: func(t *testing.T, impact ImpactSummary) { - require.Empty(t, impact.Diff.Dashboards) - }, - }) - }) - }) - - t.Run("labels", func(t *testing.T) { - t.Run("two labels updated", func(t *testing.T) { - testfileRunner(t, "testdata/label.json", func(t *testing.T, template *Template) { - fakeLabelSVC := mock.NewLabelService() - fakeLabelSVC.FindLabelsFn = func(_ context.Context, filter influxdb.LabelFilter) ([]*influxdb.Label, error) { - return []*influxdb.Label{ - { - ID: platform.ID(1), - Name: filter.Name, - Properties: map[string]string{ - "color": "old color", - "description": "old description", - }, - }, - }, nil - } - svc := newTestService(WithLabelSVC(fakeLabelSVC)) - - impact, err := svc.DryRun(context.TODO(), platform.ID(100), 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - require.Len(t, impact.Diff.Labels, 3) - - expected := DiffLabel{ - DiffIdentifier: DiffIdentifier{ - ID: SafeID(1), - StateStatus: StateStatusExists, - MetaName: "label-1", - Kind: KindLabel, - }, - Old: &DiffLabelValues{ - Name: "label-1", - Color: "old color", - Description: "old description", - }, - New: DiffLabelValues{ - Name: "label-1", - Color: "#FFFFFF", - Description: "label 1 description", - }, - } - assert.Contains(t, impact.Diff.Labels, expected) - - expected.MetaName = "label-2" - expected.New.Name = "label-2" - expected.New.Color = "#000000" - expected.New.Description = "label 2 description" - expected.Old.Name = "label-2" - assert.Contains(t, impact.Diff.Labels, expected) - }) - }) - - t.Run("two labels created", func(t *testing.T) { - testfileRunner(t, "testdata/label.yml", func(t *testing.T, template *Template) { - fakeLabelSVC := mock.NewLabelService() - fakeLabelSVC.FindLabelsFn = func(_ context.Context, filter influxdb.LabelFilter) ([]*influxdb.Label, error) { - return nil, errors.New("no labels found") - } - svc := newTestService(WithLabelSVC(fakeLabelSVC)) - - impact, err := svc.DryRun(context.TODO(), platform.ID(100), 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - labels := impact.Diff.Labels - require.Len(t, labels, 3) - - expected := DiffLabel{ - DiffIdentifier: DiffIdentifier{ - MetaName: "label-1", - StateStatus: StateStatusNew, - Kind: KindLabel, - }, - New: DiffLabelValues{ - Name: "label-1", - Color: "#FFFFFF", - Description: "label 1 description", - }, - } - assert.Contains(t, labels, expected) - - expected.MetaName = "label-2" - expected.New.Name = "label-2" - expected.New.Color = "#000000" - expected.New.Description = "label 2 description" - assert.Contains(t, labels, expected) - }) - }) - - t.Run("with actions applied", func(t *testing.T) { - testDryRunActions(t, dryRunTestFields{ - path: "testdata/label.yml", - kinds: []Kind{KindLabel}, - skipResources: []ActionSkipResource{ - { - Kind: KindLabel, - MetaName: "label-1", - }, - { - Kind: KindLabel, - MetaName: "label-2", - }, - { - Kind: KindLabel, - MetaName: "label-3", - }, - }, - assertFn: func(t *testing.T, impact ImpactSummary) { - require.Empty(t, impact.Diff.Labels) - }, - }) - }) - }) - - t.Run("notification endpoints", func(t *testing.T) { - t.Run("mixed update and created", func(t *testing.T) { - testfileRunner(t, "testdata/notification_endpoint.yml", func(t *testing.T, template *Template) { - fakeEndpointSVC := mock.NewNotificationEndpointService() - id := platform.ID(1) - existing := &endpoint.HTTP{ - Base: endpoint.Base{ - ID: &id, - Name: "http-none-auth-notification-endpoint", - Description: "old desc", - Status: taskmodel.TaskStatusInactive, - }, - Method: "POST", - AuthMethod: "none", - URL: "https://www.example.com/endpoint/old", - } - fakeEndpointSVC.FindNotificationEndpointsF = func(ctx context.Context, f influxdb.NotificationEndpointFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) { - return []influxdb.NotificationEndpoint{existing}, 1, nil - } - - svc := newTestService(WithNotificationEndpointSVC(fakeEndpointSVC)) - - impact, err := svc.DryRun(context.TODO(), platform.ID(100), 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - require.Len(t, impact.Diff.NotificationEndpoints, 5) - - var ( - newEndpoints []DiffNotificationEndpoint - existingEndpoints []DiffNotificationEndpoint - ) - for _, e := range impact.Diff.NotificationEndpoints { - if e.Old != nil { - existingEndpoints = append(existingEndpoints, e) - continue - } - newEndpoints = append(newEndpoints, e) - } - require.Len(t, newEndpoints, 4) - require.Len(t, existingEndpoints, 1) - - expected := DiffNotificationEndpoint{ - DiffIdentifier: DiffIdentifier{ - ID: 1, - MetaName: "http-none-auth-notification-endpoint", - StateStatus: StateStatusExists, - Kind: KindNotificationEndpointHTTP, - }, - Old: &DiffNotificationEndpointValues{ - NotificationEndpoint: existing, - }, - New: DiffNotificationEndpointValues{ - NotificationEndpoint: &endpoint.HTTP{ - Base: endpoint.Base{ - ID: &id, - Name: "http-none-auth-notification-endpoint", - Description: "http none auth desc", - Status: taskmodel.TaskStatusActive, - }, - AuthMethod: "none", - Method: "GET", - URL: "https://www.example.com/endpoint/noneauth", - }, - }, - } - assert.Equal(t, expected, existingEndpoints[0]) - }) - }) - - t.Run("with actions applied", func(t *testing.T) { - testDryRunActions(t, dryRunTestFields{ - path: "testdata/notification_endpoint.yml", - kinds: []Kind{ - KindNotificationEndpoint, - KindNotificationEndpointHTTP, - KindNotificationEndpointPagerDuty, - KindNotificationEndpointSlack, - }, - skipResources: []ActionSkipResource{ - { - Kind: KindNotificationEndpoint, - MetaName: "http-none-auth-notification-endpoint", - }, - { - Kind: KindNotificationEndpoint, - MetaName: "http-bearer-auth-notification-endpoint", - }, - { - Kind: KindNotificationEndpointHTTP, - MetaName: "http-basic-auth-notification-endpoint", - }, - { - Kind: KindNotificationEndpointSlack, - MetaName: "slack-notification-endpoint", - }, - { - Kind: KindNotificationEndpointPagerDuty, - MetaName: "pager-duty-notification-endpoint", - }, - }, - assertFn: func(t *testing.T, impact ImpactSummary) { - require.Empty(t, impact.Diff.NotificationEndpoints) - }, - }) - }) - }) - - t.Run("notification rules", func(t *testing.T) { - t.Run("mixed update and created", func(t *testing.T) { - testfileRunner(t, "testdata/notification_rule.yml", func(t *testing.T, template *Template) { - fakeEndpointSVC := mock.NewNotificationEndpointService() - id := platform.ID(1) - existing := &endpoint.HTTP{ - Base: endpoint.Base{ - ID: &id, - // This name here matches the endpoint identified in the template notification rule - Name: "endpoint-0", - Description: "old desc", - Status: taskmodel.TaskStatusInactive, - }, - Method: "POST", - AuthMethod: "none", - URL: "https://www.example.com/endpoint/old", - } - fakeEndpointSVC.FindNotificationEndpointsF = func(ctx context.Context, f influxdb.NotificationEndpointFilter, opt ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) { - return []influxdb.NotificationEndpoint{existing}, 1, nil - } - - svc := newTestService(WithNotificationEndpointSVC(fakeEndpointSVC)) - - impact, err := svc.DryRun(context.TODO(), platform.ID(100), 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - require.Len(t, impact.Diff.NotificationRules, 1) - - actual := impact.Diff.NotificationRules[0].New - assert.Equal(t, "rule_0", actual.Name) - assert.Equal(t, "desc_0", actual.Description) - assert.Equal(t, "slack", actual.EndpointType) - assert.Equal(t, existing.Name, actual.EndpointName) - assert.Equal(t, SafeID(*existing.ID), actual.EndpointID) - assert.Equal(t, (10 * time.Minute).String(), actual.Every) - assert.Equal(t, (30 * time.Second).String(), actual.Offset) - - expectedStatusRules := []SummaryStatusRule{ - {CurrentLevel: "CRIT", PreviousLevel: "OK"}, - {CurrentLevel: "WARN"}, - } - assert.Equal(t, expectedStatusRules, actual.StatusRules) - - expectedTagRules := []SummaryTagRule{ - {Key: "k1", Value: "v1", Operator: "equal"}, - {Key: "k1", Value: "v2", Operator: "equal"}, - } - assert.Equal(t, expectedTagRules, actual.TagRules) - }) - }) - - t.Run("with actions applied", func(t *testing.T) { - testDryRunActions(t, dryRunTestFields{ - path: "testdata/notification_rule.yml", - kinds: []Kind{KindNotificationRule}, - skipResources: []ActionSkipResource{ - { - Kind: KindNotificationRule, - MetaName: "rule-uuid", - }, - }, - assertFn: func(t *testing.T, impact ImpactSummary) { - require.Empty(t, impact.Diff.NotificationRules) - }, - }) - }) - }) - - t.Run("secrets not returns missing secrets", func(t *testing.T) { - testfileRunner(t, "testdata/notification_endpoint_secrets.yml", func(t *testing.T, template *Template) { - fakeSecretSVC := mock.NewSecretService() - fakeSecretSVC.GetSecretKeysFn = func(ctx context.Context, orgID platform.ID) ([]string, error) { - return []string{"rando-1", "rando-2"}, nil - } - svc := newTestService(WithSecretSVC(fakeSecretSVC)) - - impact, err := svc.DryRun(context.TODO(), platform.ID(100), 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - assert.Equal(t, []string{"routing-key"}, impact.Summary.MissingSecrets) - }) - }) - - t.Run("tasks", func(t *testing.T) { - t.Run("with actions applied", func(t *testing.T) { - testDryRunActions(t, dryRunTestFields{ - path: "testdata/tasks.yml", - kinds: []Kind{KindTask}, - skipResources: []ActionSkipResource{ - { - Kind: KindTask, - MetaName: "task-uuid", - }, - { - Kind: KindTask, - MetaName: "task-1", - }, - }, - assertFn: func(t *testing.T, impact ImpactSummary) { - require.Empty(t, impact.Diff.Tasks) - }, - }) - }) - }) - - t.Run("telegraf configs", func(t *testing.T) { - t.Run("with actions applied", func(t *testing.T) { - testDryRunActions(t, dryRunTestFields{ - path: "testdata/telegraf.yml", - kinds: []Kind{KindTelegraf}, - skipResources: []ActionSkipResource{ - { - Kind: KindTelegraf, - MetaName: "first-tele-config", - }, - { - Kind: KindTelegraf, - MetaName: "tele-2", - }, - }, - assertFn: func(t *testing.T, impact ImpactSummary) { - require.Empty(t, impact.Diff.Telegrafs) - }, - }) - }) - }) - - t.Run("variables", func(t *testing.T) { - t.Run("mixed update and created", func(t *testing.T) { - testfileRunner(t, "testdata/variables.json", func(t *testing.T, template *Template) { - fakeVarSVC := mock.NewVariableService() - fakeVarSVC.FindVariablesF = func(_ context.Context, filter influxdb.VariableFilter, opts ...influxdb.FindOptions) ([]*influxdb.Variable, error) { - return []*influxdb.Variable{ - { - ID: platform.ID(1), - Name: "var-const-3", - Description: "old desc", - }, - }, nil - } - svc := newTestService(WithVariableSVC(fakeVarSVC)) - - impact, err := svc.DryRun(context.TODO(), platform.ID(100), 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - variables := impact.Diff.Variables - require.Len(t, variables, 4) - - expected := DiffVariable{ - DiffIdentifier: DiffIdentifier{ - ID: 1, - MetaName: "var-const-3", - StateStatus: StateStatusExists, - Kind: KindVariable, - }, - Old: &DiffVariableValues{ - Name: "var-const-3", - Description: "old desc", - }, - New: DiffVariableValues{ - Name: "var-const-3", - Description: "var-const-3 desc", - Args: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"first val"}, - }, - }, - } - assert.Equal(t, expected, variables[0]) - - expected = DiffVariable{ - DiffIdentifier: DiffIdentifier{ - // no ID here since this one would be new - MetaName: "var-map-4", - StateStatus: StateStatusNew, - Kind: KindVariable, - }, - New: DiffVariableValues{ - Name: "var-map-4", - Description: "var-map-4 desc", - Args: &influxdb.VariableArguments{ - Type: "map", - Values: influxdb.VariableMapValues{"k1": "v1"}, - }, - }, - } - assert.Equal(t, expected, variables[1]) - }) - }) - - t.Run("with actions applied", func(t *testing.T) { - testDryRunActions(t, dryRunTestFields{ - path: "testdata/variables.yml", - kinds: []Kind{KindVariable}, - skipResources: []ActionSkipResource{ - { - Kind: KindVariable, - MetaName: "var-query-1", - }, - { - Kind: KindVariable, - MetaName: "var-query-2", - }, - { - Kind: KindVariable, - MetaName: "var-const-3", - }, - { - Kind: KindVariable, - MetaName: "var-map-4", - }, - }, - assertFn: func(t *testing.T, impact ImpactSummary) { - require.Empty(t, impact.Diff.Variables) - }, - }) - }) - }) - }) - - t.Run("Apply", func(t *testing.T) { - t.Run("buckets", func(t *testing.T) { - t.Run("successfully creates template of buckets", func(t *testing.T) { - testfileRunner(t, "testdata/bucket.yml", func(t *testing.T, template *Template) { - fakeBktSVC := mock.NewBucketService() - fakeBktSVC.CreateBucketFn = func(_ context.Context, b *influxdb.Bucket) error { - b.ID = platform.ID(b.RetentionPeriod) - return nil - } - fakeBktSVC.FindBucketByNameFn = func(_ context.Context, id platform.ID, s string) (*influxdb.Bucket, error) { - // forces the bucket to be created a new - return nil, errors.New("an error") - } - fakeBktSVC.UpdateBucketFn = func(_ context.Context, id platform.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ID: id}, nil - } - - svc := newTestService(WithBucketSVC(fakeBktSVC)) - - orgID := platform.ID(9000) - - impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - sum := impact.Summary - require.Len(t, sum.Buckets, 2) - - expected := SummaryBucket{ - SummaryIdentifier: SummaryIdentifier{ - Kind: KindBucket, - MetaName: "rucket-11", - EnvReferences: []SummaryReference{}, - }, - ID: SafeID(time.Hour), - OrgID: SafeID(orgID), - Name: "rucket-11", - Description: "bucket 1 description", - RetentionPeriod: time.Hour, - LabelAssociations: []SummaryLabel{}, - } - assert.Contains(t, sum.Buckets, expected) - }) - }) - - t.Run("will not apply bucket if no changes to be applied", func(t *testing.T) { - testfileRunner(t, "testdata/bucket.yml", func(t *testing.T, template *Template) { - orgID := platform.ID(9000) - - fakeBktSVC := mock.NewBucketService() - fakeBktSVC.FindBucketByNameFn = func(ctx context.Context, oid platform.ID, name string) (*influxdb.Bucket, error) { - if orgID != oid { - return nil, errors.New("invalid org id") - } - - id := platform.ID(3) - if name == "display name" { - id = 4 - name = "rucket-22" - } - if bkt, ok := template.mBuckets[name]; ok { - return &influxdb.Bucket{ - ID: id, - OrgID: oid, - Name: bkt.Name(), - Description: bkt.Description, - RetentionPeriod: bkt.RetentionRules.RP(), - }, nil - } - return nil, errors.New("not found") - } - fakeBktSVC.UpdateBucketFn = func(_ context.Context, id platform.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ID: id}, nil - } - - svc := newTestService(WithBucketSVC(fakeBktSVC)) - - impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - sum := impact.Summary - require.Len(t, sum.Buckets, 2) - - expected := SummaryBucket{ - SummaryIdentifier: SummaryIdentifier{ - Kind: KindBucket, - MetaName: "rucket-11", - EnvReferences: []SummaryReference{}, - }, - ID: SafeID(3), - OrgID: SafeID(orgID), - Name: "rucket-11", - Description: "bucket 1 description", - RetentionPeriod: time.Hour, - LabelAssociations: []SummaryLabel{}, - } - assert.Contains(t, sum.Buckets, expected) - assert.Zero(t, fakeBktSVC.CreateBucketCalls.Count()) - assert.Zero(t, fakeBktSVC.UpdateBucketCalls.Count()) - }) - }) - - t.Run("rolls back all created buckets on an error", func(t *testing.T) { - testfileRunner(t, "testdata/bucket.yml", func(t *testing.T, template *Template) { - fakeBktSVC := mock.NewBucketService() - fakeBktSVC.FindBucketByNameFn = func(_ context.Context, id platform.ID, s string) (*influxdb.Bucket, error) { - // forces the bucket to be created a new - return nil, errors.New("an error") - } - fakeBktSVC.CreateBucketFn = func(_ context.Context, b *influxdb.Bucket) error { - if fakeBktSVC.CreateBucketCalls.Count() == 1 { - return errors.New("blowed up ") - } - return nil - } - - svc := newTestService(WithBucketSVC(fakeBktSVC)) - - orgID := platform.ID(9000) - - _, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.Error(t, err) - - assert.GreaterOrEqual(t, fakeBktSVC.DeleteBucketCalls.Count(), 1) - }) - }) - }) - - t.Run("checks", func(t *testing.T) { - t.Run("successfully creates template of checks", func(t *testing.T) { - testfileRunner(t, "testdata/checks.yml", func(t *testing.T, template *Template) { - fakeCheckSVC := mock.NewCheckService() - fakeCheckSVC.CreateCheckFn = func(ctx context.Context, c influxdb.CheckCreate, id platform.ID) error { - c.SetID(platform.ID(fakeCheckSVC.CreateCheckCalls.Count() + 1)) - return nil - } - - svc := newTestService(WithCheckSVC(fakeCheckSVC)) - - orgID := platform.ID(9000) - - impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - sum := impact.Summary - require.Len(t, sum.Checks, 2) - - containsWithID := func(t *testing.T, name string) { - t.Helper() - - for _, actualNotification := range sum.Checks { - actual := actualNotification.Check - if actual.GetID() == 0 { - assert.NotZero(t, actual.GetID()) - } - if actual.GetName() == name { - return - } - } - assert.Fail(t, "did not find notification by name: "+name) - } - - for _, expectedName := range []string{"check-0", "display name"} { - containsWithID(t, expectedName) - } - }) - }) - - t.Run("rolls back all created checks on an error", func(t *testing.T) { - testfileRunner(t, "testdata/checks.yml", func(t *testing.T, template *Template) { - fakeCheckSVC := mock.NewCheckService() - fakeCheckSVC.CreateCheckFn = func(ctx context.Context, c influxdb.CheckCreate, id platform.ID) error { - c.SetID(platform.ID(fakeCheckSVC.CreateCheckCalls.Count() + 1)) - if fakeCheckSVC.CreateCheckCalls.Count() == 1 { - return errors.New("hit that kill count") - } - return nil - } - - svc := newTestService(WithCheckSVC(fakeCheckSVC)) - - orgID := platform.ID(9000) - - _, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.Error(t, err) - - assert.GreaterOrEqual(t, fakeCheckSVC.DeleteCheckCalls.Count(), 1) - }) - }) - }) - - t.Run("labels", func(t *testing.T) { - t.Run("successfully creates template of labels", func(t *testing.T) { - testfileRunner(t, "testdata/label.json", func(t *testing.T, template *Template) { - fakeLabelSVC := mock.NewLabelService() - fakeLabelSVC.CreateLabelFn = func(_ context.Context, l *influxdb.Label) error { - i, err := strconv.Atoi(l.Name[len(l.Name)-1:]) - if err != nil { - return nil - } - l.ID = platform.ID(i) - return nil - } - - svc := newTestService(WithLabelSVC(fakeLabelSVC)) - - orgID := platform.ID(9000) - - impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - sum := impact.Summary - require.Len(t, sum.Labels, 3) - - expectedLabel := sumLabelGen("label-1", "label-1", "#FFFFFF", "label 1 description") - expectedLabel.ID = 1 - expectedLabel.OrgID = SafeID(orgID) - assert.Contains(t, sum.Labels, expectedLabel) - - expectedLabel = sumLabelGen("label-2", "label-2", "#000000", "label 2 description") - expectedLabel.ID = 2 - expectedLabel.OrgID = SafeID(orgID) - assert.Contains(t, sum.Labels, expectedLabel) - }) - }) - - t.Run("rolls back all created labels on an error", func(t *testing.T) { - testfileRunner(t, "testdata/label", func(t *testing.T, template *Template) { - fakeLabelSVC := mock.NewLabelService() - fakeLabelSVC.CreateLabelFn = func(_ context.Context, l *influxdb.Label) error { - // 3rd/4th label will return the error here, and 2 before should be rolled back - if fakeLabelSVC.CreateLabelCalls.Count() == 2 { - return errors.New("blowed up ") - } - return nil - } - - svc := newTestService(WithLabelSVC(fakeLabelSVC)) - - orgID := platform.ID(9000) - - _, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.Error(t, err) - - assert.GreaterOrEqual(t, fakeLabelSVC.DeleteLabelCalls.Count(), 1) - }) - }) - - t.Run("will not apply label if no changes to be applied", func(t *testing.T) { - testfileRunner(t, "testdata/label.yml", func(t *testing.T, template *Template) { - orgID := platform.ID(9000) - - stubExisting := func(name string, id platform.ID) *influxdb.Label { - templateLabel := template.mLabels[name] - return &influxdb.Label{ - // makes all template changes same as they are on the existing - ID: id, - OrgID: orgID, - Name: templateLabel.Name(), - Properties: map[string]string{ - "color": templateLabel.Color, - "description": templateLabel.Description, - }, - } - } - stubExisting("label-1", 1) - stubExisting("label-3", 3) - - fakeLabelSVC := mock.NewLabelService() - fakeLabelSVC.FindLabelsFn = func(ctx context.Context, f influxdb.LabelFilter) ([]*influxdb.Label, error) { - if f.Name != "label-1" && f.Name != "display name" { - return nil, nil - } - id := platform.ID(1) - name := f.Name - if f.Name == "display name" { - id = 3 - name = "label-3" - } - return []*influxdb.Label{stubExisting(name, id)}, nil - } - fakeLabelSVC.CreateLabelFn = func(_ context.Context, l *influxdb.Label) error { - if l.Name == "label-2" { - l.ID = 2 - } - return nil - } - fakeLabelSVC.UpdateLabelFn = func(_ context.Context, id platform.ID, l influxdb.LabelUpdate) (*influxdb.Label, error) { - if id == platform.ID(3) { - return nil, errors.New("invalid id provided") - } - return &influxdb.Label{ID: id}, nil - } - - svc := newTestService(WithLabelSVC(fakeLabelSVC)) - - impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - sum := impact.Summary - require.Len(t, sum.Labels, 3) - - expectedLabel := sumLabelGen("label-1", "label-1", "#FFFFFF", "label 1 description") - expectedLabel.ID = 1 - expectedLabel.OrgID = SafeID(orgID) - assert.Contains(t, sum.Labels, expectedLabel) - - expectedLabel = sumLabelGen("label-2", "label-2", "#000000", "label 2 description") - expectedLabel.ID = 2 - expectedLabel.OrgID = SafeID(orgID) - assert.Contains(t, sum.Labels, expectedLabel) - - assert.Equal(t, 1, fakeLabelSVC.CreateLabelCalls.Count()) // only called for second label - }) - }) - }) - - t.Run("dashboards", func(t *testing.T) { - t.Run("successfully creates a dashboard", func(t *testing.T) { - testfileRunner(t, "testdata/dashboard.yml", func(t *testing.T, template *Template) { - fakeDashSVC := mock.NewDashboardService() - fakeDashSVC.CreateDashboardF = func(_ context.Context, d *influxdb.Dashboard) error { - d.ID = platform.ID(1) - return nil - } - fakeDashSVC.UpdateDashboardCellViewF = func(ctx context.Context, dID platform.ID, cID platform.ID, upd influxdb.ViewUpdate) (*influxdb.View, error) { - return &influxdb.View{}, nil - } - - svc := newTestService(WithDashboardSVC(fakeDashSVC)) - - orgID := platform.ID(9000) - - impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - sum := impact.Summary - - require.Len(t, sum.Dashboards, 2) - dash1 := sum.Dashboards[0] - assert.NotZero(t, dash1.ID) - assert.NotZero(t, dash1.OrgID) - assert.Equal(t, "dash-1", dash1.MetaName) - assert.Equal(t, "display name", dash1.Name) - require.Len(t, dash1.Charts, 1) - - dash2 := sum.Dashboards[1] - assert.NotZero(t, dash2.ID) - assert.Equal(t, "dash-2", dash2.MetaName) - assert.Equal(t, "dash-2", dash2.Name) - require.Empty(t, dash2.Charts) - }) - }) - - t.Run("rolls back created dashboard on an error", func(t *testing.T) { - testfileRunner(t, "testdata/dashboard.yml", func(t *testing.T, template *Template) { - fakeDashSVC := mock.NewDashboardService() - fakeDashSVC.CreateDashboardF = func(_ context.Context, d *influxdb.Dashboard) error { - // error out on second dashboard attempted - if fakeDashSVC.CreateDashboardCalls.Count() == 1 { - return errors.New("blowed up ") - } - d.ID = platform.ID(1) - return nil - } - deletedDashs := make(map[platform.ID]bool) - fakeDashSVC.DeleteDashboardF = func(_ context.Context, id platform.ID) error { - deletedDashs[id] = true - return nil - } - - svc := newTestService(WithDashboardSVC(fakeDashSVC)) - - orgID := platform.ID(9000) - - _, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.Error(t, err) - - assert.True(t, deletedDashs[1]) - }) - }) - }) - - t.Run("label mapping", func(t *testing.T) { - testLabelMappingApplyFn := func(t *testing.T, filename string, numExpected int, settersFn func() []ServiceSetterFn) { - t.Helper() - testfileRunner(t, filename, func(t *testing.T, template *Template) { - t.Helper() - - fakeLabelSVC := mock.NewLabelService() - fakeLabelSVC.CreateLabelFn = func(_ context.Context, l *influxdb.Label) error { - l.ID = platform.ID(rand.Int()) - return nil - } - fakeLabelSVC.CreateLabelMappingFn = func(_ context.Context, mapping *influxdb.LabelMapping) error { - if mapping.ResourceID == 0 { - return errors.New("did not get a resource ID") - } - if mapping.ResourceType == "" { - return errors.New("did not get a resource type") - } - return nil - } - svc := newTestService(append(settersFn(), - WithLabelSVC(fakeLabelSVC), - WithLogger(zaptest.NewLogger(t)), - )...) - - orgID := platform.ID(9000) - - _, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - assert.Equal(t, numExpected, fakeLabelSVC.CreateLabelMappingCalls.Count()) - }) - } - - testLabelMappingRollbackFn := func(t *testing.T, filename string, killCount int, settersFn func() []ServiceSetterFn) { - t.Helper() - testfileRunner(t, filename, func(t *testing.T, template *Template) { - t.Helper() - - fakeLabelSVC := mock.NewLabelService() - fakeLabelSVC.CreateLabelFn = func(_ context.Context, l *influxdb.Label) error { - l.ID = platform.ID(fakeLabelSVC.CreateLabelCalls.Count() + 1) - return nil - } - fakeLabelSVC.CreateLabelMappingFn = func(_ context.Context, mapping *influxdb.LabelMapping) error { - if mapping.ResourceID == 0 { - return errors.New("did not get a resource ID") - } - if mapping.ResourceType == "" { - return errors.New("did not get a resource type") - } - if fakeLabelSVC.CreateLabelMappingCalls.Count() == killCount { - return errors.New("hit last label") - } - return nil - } - svc := newTestService(append(settersFn(), - WithLabelSVC(fakeLabelSVC), - WithLogger(zaptest.NewLogger(t)), - )...) - - orgID := platform.ID(9000) - - _, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.Error(t, err) - - assert.GreaterOrEqual(t, fakeLabelSVC.DeleteLabelMappingCalls.Count(), killCount) - }) - } - - t.Run("maps buckets with labels", func(t *testing.T) { - bktOpt := func() []ServiceSetterFn { - fakeBktSVC := mock.NewBucketService() - fakeBktSVC.CreateBucketFn = func(_ context.Context, b *influxdb.Bucket) error { - b.ID = platform.ID(rand.Int()) - return nil - } - fakeBktSVC.FindBucketByNameFn = func(_ context.Context, id platform.ID, s string) (*influxdb.Bucket, error) { - // forces the bucket to be created a new - return nil, errors.New("an error") - } - return []ServiceSetterFn{WithBucketSVC(fakeBktSVC)} - } - - t.Run("applies successfully", func(t *testing.T) { - testLabelMappingApplyFn(t, "testdata/bucket_associates_label.yml", 4, bktOpt) - }) - - t.Run("deletes new label mappings on error", func(t *testing.T) { - testLabelMappingRollbackFn(t, "testdata/bucket_associates_label.yml", 2, bktOpt) - }) - }) - - t.Run("maps checks with labels", func(t *testing.T) { - opts := func() []ServiceSetterFn { - fakeCheckSVC := mock.NewCheckService() - fakeCheckSVC.CreateCheckFn = func(ctx context.Context, c influxdb.CheckCreate, id platform.ID) error { - c.Check.SetID(platform.ID(rand.Int())) - return nil - } - fakeCheckSVC.FindCheckFn = func(ctx context.Context, f influxdb.CheckFilter) (influxdb.Check, error) { - return nil, errors.New("check not found") - } - - return []ServiceSetterFn{WithCheckSVC(fakeCheckSVC)} - } - - t.Run("applies successfully", func(t *testing.T) { - testLabelMappingApplyFn(t, "testdata/checks.yml", 2, opts) - }) - - t.Run("deletes new label mappings on error", func(t *testing.T) { - testLabelMappingRollbackFn(t, "testdata/checks.yml", 1, opts) - }) - }) - - t.Run("maps dashboards with labels", func(t *testing.T) { - opts := func() []ServiceSetterFn { - fakeDashSVC := mock.NewDashboardService() - fakeDashSVC.CreateDashboardF = func(_ context.Context, d *influxdb.Dashboard) error { - d.ID = platform.ID(rand.Int()) - return nil - } - return []ServiceSetterFn{WithDashboardSVC(fakeDashSVC)} - } - - t.Run("applies successfully", func(t *testing.T) { - testLabelMappingApplyFn(t, "testdata/dashboard_associates_label.yml", 2, opts) - }) - - t.Run("deletes new label mappings on error", func(t *testing.T) { - testLabelMappingRollbackFn(t, "testdata/dashboard_associates_label.yml", 1, opts) - }) - }) - - t.Run("maps notification endpoints with labels", func(t *testing.T) { - opts := func() []ServiceSetterFn { - fakeEndpointSVC := mock.NewNotificationEndpointService() - fakeEndpointSVC.CreateNotificationEndpointF = func(ctx context.Context, nr influxdb.NotificationEndpoint, userID platform.ID) error { - nr.SetID(platform.ID(rand.Int())) - return nil - } - return []ServiceSetterFn{WithNotificationEndpointSVC(fakeEndpointSVC)} - } - - t.Run("applies successfully", func(t *testing.T) { - testLabelMappingApplyFn(t, "testdata/notification_endpoint.yml", 5, opts) - }) - - t.Run("deletes new label mappings on error", func(t *testing.T) { - testLabelMappingRollbackFn(t, "testdata/notification_endpoint.yml", 3, opts) - }) - }) - - t.Run("maps notification rules with labels", func(t *testing.T) { - opts := func() []ServiceSetterFn { - fakeRuleStore := mock.NewNotificationRuleStore() - fakeRuleStore.CreateNotificationRuleF = func(ctx context.Context, nr influxdb.NotificationRuleCreate, userID platform.ID) error { - nr.SetID(platform.ID(fakeRuleStore.CreateNotificationRuleCalls.Count() + 1)) - return nil - } - return []ServiceSetterFn{ - WithNotificationRuleSVC(fakeRuleStore), - } - } - - t.Run("applies successfully", func(t *testing.T) { - testLabelMappingApplyFn(t, "testdata/notification_rule.yml", 2, opts) - }) - - t.Run("deletes new label mappings on error", func(t *testing.T) { - testLabelMappingRollbackFn(t, "testdata/notification_rule.yml", 1, opts) - }) - }) - - t.Run("maps tasks with labels", func(t *testing.T) { - opts := func() []ServiceSetterFn { - fakeTaskSVC := mock.NewTaskService() - fakeTaskSVC.CreateTaskFn = func(ctx context.Context, tc taskmodel.TaskCreate) (*taskmodel.Task, error) { - reg := regexp.MustCompile(`name: "(.+)",`) - names := reg.FindStringSubmatch(tc.Flux) - if len(names) < 2 { - return nil, errors.New("bad flux query provided: " + tc.Flux) - } - return &taskmodel.Task{ - ID: platform.ID(rand.Int()), - Type: tc.Type, - OrganizationID: tc.OrganizationID, - OwnerID: tc.OwnerID, - Name: names[1], - Description: tc.Description, - Status: tc.Status, - Flux: tc.Flux, - }, nil - } - return []ServiceSetterFn{WithTaskSVC(fakeTaskSVC)} - } - - t.Run("applies successfully", func(t *testing.T) { - testLabelMappingApplyFn(t, "testdata/tasks.yml", 2, opts) - }) - - t.Run("deletes new label mappings on error", func(t *testing.T) { - testLabelMappingRollbackFn(t, "testdata/tasks.yml", 1, opts) - }) - }) - - t.Run("maps telegrafs with labels", func(t *testing.T) { - opts := func() []ServiceSetterFn { - fakeTeleSVC := mock.NewTelegrafConfigStore() - fakeTeleSVC.CreateTelegrafConfigF = func(_ context.Context, cfg *influxdb.TelegrafConfig, _ platform.ID) error { - cfg.ID = platform.ID(rand.Int()) - return nil - } - return []ServiceSetterFn{WithTelegrafSVC(fakeTeleSVC)} - } - - t.Run("applies successfully", func(t *testing.T) { - testLabelMappingApplyFn(t, "testdata/telegraf.yml", 2, opts) - }) - - t.Run("deletes new label mappings on error", func(t *testing.T) { - testLabelMappingRollbackFn(t, "testdata/telegraf.yml", 1, opts) - }) - }) - - t.Run("maps variables with labels", func(t *testing.T) { - opt := func() []ServiceSetterFn { - fakeVarSVC := mock.NewVariableService() - fakeVarSVC.CreateVariableF = func(_ context.Context, v *influxdb.Variable) error { - v.ID = platform.ID(rand.Int()) - return nil - } - return []ServiceSetterFn{WithVariableSVC(fakeVarSVC)} - } - - t.Run("applies successfully", func(t *testing.T) { - testLabelMappingApplyFn(t, "testdata/variable_associates_label.yml", 1, opt) - }) - - t.Run("deletes new label mappings on error", func(t *testing.T) { - testLabelMappingRollbackFn(t, "testdata/variable_associates_label.yml", 0, opt) - }) - }) - }) - - t.Run("notification endpoints", func(t *testing.T) { - t.Run("successfully creates template of endpoints", func(t *testing.T) { - testfileRunner(t, "testdata/notification_endpoint.yml", func(t *testing.T, template *Template) { - fakeEndpointSVC := mock.NewNotificationEndpointService() - fakeEndpointSVC.CreateNotificationEndpointF = func(ctx context.Context, nr influxdb.NotificationEndpoint, userID platform.ID) error { - nr.SetID(platform.ID(fakeEndpointSVC.CreateNotificationEndpointCalls.Count() + 1)) - return nil - } - - svc := newTestService(WithNotificationEndpointSVC(fakeEndpointSVC)) - - orgID := platform.ID(9000) - - impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - sum := impact.Summary - require.Len(t, sum.NotificationEndpoints, 5) - - containsWithID := func(t *testing.T, name string) { - var endpoints []string - for _, actualNotification := range sum.NotificationEndpoints { - actual := actualNotification.NotificationEndpoint - if actual.GetID() == 0 { - assert.NotZero(t, actual.GetID()) - } - if actual.GetName() == name { - return - } - endpoints = append(endpoints, fmt.Sprintf("%+v", actual)) - } - assert.Failf(t, "did not find notification by name: "+name, "endpoints received: %s", endpoints) - } - - expectedNames := []string{ - "basic endpoint name", - "http-bearer-auth-notification-endpoint", - "http-none-auth-notification-endpoint", - "pager duty name", - "slack name", - } - for _, expectedName := range expectedNames { - containsWithID(t, expectedName) - } - }) - }) - - t.Run("rolls back all created notifications on an error", func(t *testing.T) { - testfileRunner(t, "testdata/notification_endpoint.yml", func(t *testing.T, template *Template) { - fakeEndpointSVC := mock.NewNotificationEndpointService() - fakeEndpointSVC.CreateNotificationEndpointF = func(ctx context.Context, nr influxdb.NotificationEndpoint, userID platform.ID) error { - nr.SetID(platform.ID(fakeEndpointSVC.CreateNotificationEndpointCalls.Count() + 1)) - if fakeEndpointSVC.CreateNotificationEndpointCalls.Count() == 3 { - return errors.New("hit that kill count") - } - return nil - } - - svc := newTestService(WithNotificationEndpointSVC(fakeEndpointSVC)) - - orgID := platform.ID(9000) - - _, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.Error(t, err) - - assert.GreaterOrEqual(t, fakeEndpointSVC.DeleteNotificationEndpointCalls.Count(), 3) - }) - }) - }) - - t.Run("notification rules", func(t *testing.T) { - t.Run("successfully creates", func(t *testing.T) { - testfileRunner(t, "testdata/notification_rule.yml", func(t *testing.T, template *Template) { - fakeEndpointSVC := mock.NewNotificationEndpointService() - fakeEndpointSVC.CreateNotificationEndpointF = func(ctx context.Context, nr influxdb.NotificationEndpoint, userID platform.ID) error { - nr.SetID(platform.ID(fakeEndpointSVC.CreateNotificationEndpointCalls.Count() + 1)) - return nil - } - fakeRuleStore := mock.NewNotificationRuleStore() - fakeRuleStore.CreateNotificationRuleF = func(ctx context.Context, nr influxdb.NotificationRuleCreate, userID platform.ID) error { - nr.SetID(platform.ID(fakeRuleStore.CreateNotificationRuleCalls.Count() + 1)) - return nil - } - - svc := newTestService( - WithNotificationEndpointSVC(fakeEndpointSVC), - WithNotificationRuleSVC(fakeRuleStore), - ) - - orgID := platform.ID(9000) - - impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - sum := impact.Summary - require.Len(t, sum.NotificationRules, 1) - assert.Equal(t, "rule-uuid", sum.NotificationRules[0].MetaName) - assert.Equal(t, "rule_0", sum.NotificationRules[0].Name) - assert.Equal(t, "desc_0", sum.NotificationRules[0].Description) - assert.Equal(t, SafeID(1), sum.NotificationRules[0].EndpointID) - assert.Equal(t, "endpoint-0", sum.NotificationRules[0].EndpointMetaName) - assert.Equal(t, "slack", sum.NotificationRules[0].EndpointType) - }) - }) - - t.Run("rolls back all created notification rules on an error", func(t *testing.T) { - testfileRunner(t, "testdata/notification_rule.yml", func(t *testing.T, template *Template) { - fakeRuleStore := mock.NewNotificationRuleStore() - fakeRuleStore.CreateNotificationRuleF = func(ctx context.Context, nr influxdb.NotificationRuleCreate, userID platform.ID) error { - nr.SetID(platform.ID(fakeRuleStore.CreateNotificationRuleCalls.Count() + 1)) - return nil - } - fakeRuleStore.DeleteNotificationRuleF = func(ctx context.Context, id platform.ID) error { - if id != 1 { - return errors.New("wrong id here") - } - return nil - } - fakeLabelSVC := mock.NewLabelService() - fakeLabelSVC.CreateLabelFn = func(ctx context.Context, l *influxdb.Label) error { - l.ID = platform.ID(fakeLabelSVC.CreateLabelCalls.Count() + 1) - return nil - } - fakeLabelSVC.CreateLabelMappingFn = func(ctx context.Context, m *influxdb.LabelMapping) error { - return errors.New("start the rollack") - } - - svc := newTestService( - WithLabelSVC(fakeLabelSVC), - WithNotificationRuleSVC(fakeRuleStore), - ) - - orgID := platform.ID(9000) - - _, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.Error(t, err) - - assert.Equal(t, 1, fakeRuleStore.DeleteNotificationRuleCalls.Count()) - }) - }) - }) - - t.Run("tasks", func(t *testing.T) { - t.Run("successfuly creates", func(t *testing.T) { - testfileRunner(t, "testdata/tasks.yml", func(t *testing.T, template *Template) { - orgID := platform.ID(9000) - - fakeTaskSVC := mock.NewTaskService() - fakeTaskSVC.CreateTaskFn = func(ctx context.Context, tc taskmodel.TaskCreate) (*taskmodel.Task, error) { - reg := regexp.MustCompile(`name: "(.+)",`) - names := reg.FindStringSubmatch(tc.Flux) - if len(names) < 2 { - return nil, errors.New("bad flux query provided: " + tc.Flux) - } - return &taskmodel.Task{ - ID: platform.ID(fakeTaskSVC.CreateTaskCalls.Count() + 1), - Type: tc.Type, - OrganizationID: tc.OrganizationID, - OwnerID: tc.OwnerID, - Name: names[1], - Description: tc.Description, - Status: tc.Status, - Flux: tc.Flux, - }, nil - } - - svc := newTestService(WithTaskSVC(fakeTaskSVC)) - - impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - sum := impact.Summary - require.Len(t, sum.Tasks, 2) - assert.NotZero(t, sum.Tasks[0].ID) - assert.Equal(t, "task-1", sum.Tasks[0].MetaName) - assert.Equal(t, "task-1", sum.Tasks[0].Name) - assert.Equal(t, "desc_1", sum.Tasks[0].Description) - - assert.NotZero(t, sum.Tasks[1].ID) - assert.Equal(t, "task-uuid", sum.Tasks[1].MetaName) - assert.Equal(t, "task-0", sum.Tasks[1].Name) - assert.Equal(t, "desc_0", sum.Tasks[1].Description) - }) - }) - - t.Run("rolls back all created tasks on an error", func(t *testing.T) { - testfileRunner(t, "testdata/tasks.yml", func(t *testing.T, template *Template) { - fakeTaskSVC := mock.NewTaskService() - fakeTaskSVC.CreateTaskFn = func(ctx context.Context, tc taskmodel.TaskCreate) (*taskmodel.Task, error) { - if fakeTaskSVC.CreateTaskCalls.Count() == 1 { - return nil, errors.New("expected error") - } - return &taskmodel.Task{ - ID: platform.ID(fakeTaskSVC.CreateTaskCalls.Count() + 1), - }, nil - } - - svc := newTestService(WithTaskSVC(fakeTaskSVC)) - - orgID := platform.ID(9000) - - _, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.Error(t, err) - - assert.Equal(t, 1, fakeTaskSVC.DeleteTaskCalls.Count()) - }) - }) - }) - - t.Run("telegrafs", func(t *testing.T) { - t.Run("successfuly creates", func(t *testing.T) { - testfileRunner(t, "testdata/telegraf.yml", func(t *testing.T, template *Template) { - orgID := platform.ID(9000) - - fakeTeleSVC := mock.NewTelegrafConfigStore() - fakeTeleSVC.CreateTelegrafConfigF = func(_ context.Context, tc *influxdb.TelegrafConfig, userID platform.ID) error { - tc.ID = 1 - return nil - } - - svc := newTestService(WithTelegrafSVC(fakeTeleSVC)) - - impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - sum := impact.Summary - require.Len(t, sum.TelegrafConfigs, 2) - assert.Equal(t, "display name", sum.TelegrafConfigs[0].TelegrafConfig.Name) - assert.Equal(t, "desc", sum.TelegrafConfigs[0].TelegrafConfig.Description) - assert.Equal(t, "tele-2", sum.TelegrafConfigs[1].TelegrafConfig.Name) - }) - }) - - t.Run("rolls back all created telegrafs on an error", func(t *testing.T) { - testfileRunner(t, "testdata/telegraf.yml", func(t *testing.T, template *Template) { - fakeTeleSVC := mock.NewTelegrafConfigStore() - fakeTeleSVC.CreateTelegrafConfigF = func(_ context.Context, tc *influxdb.TelegrafConfig, userID platform.ID) error { - t.Log("called") - if fakeTeleSVC.CreateTelegrafConfigCalls.Count() == 1 { - return errors.New("limit hit") - } - tc.ID = platform.ID(1) - return nil - } - fakeTeleSVC.DeleteTelegrafConfigF = func(_ context.Context, id platform.ID) error { - if id != 1 { - return errors.New("wrong id here") - } - return nil - } - - svc := newTestService(WithTelegrafSVC(fakeTeleSVC)) - - orgID := platform.ID(9000) - - _, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.Error(t, err) - - assert.Equal(t, 1, fakeTeleSVC.DeleteTelegrafConfigCalls.Count()) - }) - }) - }) - - t.Run("variables", func(t *testing.T) { - t.Run("successfully creates template of variables", func(t *testing.T) { - testfileRunner(t, "testdata/variables.yml", func(t *testing.T, template *Template) { - fakeVarSVC := mock.NewVariableService() - fakeVarSVC.CreateVariableF = func(_ context.Context, v *influxdb.Variable) error { - v.ID = platform.ID(fakeVarSVC.CreateVariableCalls.Count() + 1) - return nil - } - - svc := newTestService(WithVariableSVC(fakeVarSVC)) - - orgID := platform.ID(9000) - - impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - sum := impact.Summary - require.Len(t, sum.Variables, 4) - - actual := sum.Variables[0] - assert.True(t, actual.ID > 0 && actual.ID < 5) - assert.Equal(t, SafeID(orgID), actual.OrgID) - assert.Equal(t, "var-const-3", actual.Name) - assert.Equal(t, "var-const-3 desc", actual.Description) - require.NotNil(t, actual.Arguments) - assert.Equal(t, influxdb.VariableConstantValues{"first val"}, actual.Arguments.Values) - - actual = sum.Variables[2] - assert.Equal(t, []string{"rucket"}, actual.Selected) - - for _, actual := range sum.Variables { - assert.Containsf(t, []SafeID{1, 2, 3, 4}, actual.ID, "actual var: %+v", actual) - } - }) - }) - - t.Run("rolls back all created variables on an error", func(t *testing.T) { - testfileRunner(t, "testdata/variables.yml", func(t *testing.T, template *Template) { - fakeVarSVC := mock.NewVariableService() - fakeVarSVC.CreateVariableF = func(_ context.Context, l *influxdb.Variable) error { - // 4th variable will return the error here, and 3 before should be rolled back - if fakeVarSVC.CreateVariableCalls.Count() == 2 { - return errors.New("blowed up ") - } - return nil - } - - svc := newTestService(WithVariableSVC(fakeVarSVC)) - - orgID := platform.ID(9000) - - _, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.Error(t, err) - - assert.GreaterOrEqual(t, fakeVarSVC.DeleteVariableCalls.Count(), 1) - }) - }) - - t.Run("will not apply variable if no changes to be applied", func(t *testing.T) { - testfileRunner(t, "testdata/variables.yml", func(t *testing.T, template *Template) { - orgID := platform.ID(9000) - - fakeVarSVC := mock.NewVariableService() - fakeVarSVC.FindVariablesF = func(ctx context.Context, f influxdb.VariableFilter, _ ...influxdb.FindOptions) ([]*influxdb.Variable, error) { - return []*influxdb.Variable{ - { - // makes all template changes same as they are on the existing - ID: platform.ID(1), - OrganizationID: orgID, - Name: template.mVariables["var-const-3"].Name(), - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"first val"}, - }, - }, - }, nil - } - fakeVarSVC.CreateVariableF = func(_ context.Context, l *influxdb.Variable) error { - if l.Name == "var_const" { - return errors.New("shouldn't get here") - } - return nil - } - fakeVarSVC.UpdateVariableF = func(_ context.Context, id platform.ID, v *influxdb.VariableUpdate) (*influxdb.Variable, error) { - if id > platform.ID(1) { - return nil, errors.New("this id should not be updated") - } - return &influxdb.Variable{ID: id}, nil - } - - svc := newTestService(WithVariableSVC(fakeVarSVC)) - - impact, err := svc.Apply(context.TODO(), orgID, 0, ApplyWithTemplate(template)) - require.NoError(t, err) - - sum := impact.Summary - require.Len(t, sum.Variables, 4) - expected := sum.Variables[0] - assert.Equal(t, SafeID(1), expected.ID) - assert.Equal(t, "var-const-3", expected.Name) - - assert.Equal(t, 3, fakeVarSVC.CreateVariableCalls.Count()) // only called for last 3 labels - }) - }) - }) - }) - - t.Run("Export", func(t *testing.T) { - newThresholdBase := func(i int) icheck.Base { - return icheck.Base{ - ID: platform.ID(i), - TaskID: 300, - Name: fmt.Sprintf("check_%d", i), - Description: fmt.Sprintf("desc_%d", i), - Every: mustDuration(t, time.Minute), - Offset: mustDuration(t, 15*time.Second), - Query: influxdb.DashboardQuery{ - Text: `from(bucket: "telegraf") |> range(start: -1m) |> filter(fn: (r) => r._field == "usage_user")`, - }, - StatusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }", - Tags: []influxdb.Tag{ - {Key: "key_1", Value: "val_1"}, - {Key: "key_2", Value: "val_2"}, - }, - } - } - - sortLabelsByName := func(labels []SummaryLabel) { - sort.Slice(labels, func(i, j int) bool { - return labels[i].Name < labels[j].Name - }) - } - - t.Run("with existing resources", func(t *testing.T) { - encodeAndDecode := func(t *testing.T, template *Template) *Template { - t.Helper() - - b, err := template.Encode(EncodingJSON) - require.NoError(t, err) - - newTemplate, err := Parse(EncodingJSON, FromReader(bytes.NewReader(b))) - require.NoError(t, err) - - return newTemplate - } - - t.Run("bucket", func(t *testing.T) { - tests := []struct { - name string - newName string - }{ - { - name: "without new name", - }, - { - name: "with new name", - newName: "new name", - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - expected := &influxdb.Bucket{ - ID: 3, - Name: "bucket name", - Description: "desc", - RetentionPeriod: time.Hour, - } - - bktSVC := mock.NewBucketService() - bktSVC.FindBucketByIDFn = func(_ context.Context, id platform.ID) (*influxdb.Bucket, error) { - if id != expected.ID { - return nil, errors.New("uh ohhh, wrong id here: " + id.String()) - } - return expected, nil - } - bktSVC.FindBucketsFn = func(_ context.Context, filter influxdb.BucketFilter, _ ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - if filter.ID != nil { - if *filter.ID != expected.ID { - return nil, 0, errors.New("uh ohhh, wrong id here: " + filter.ID.String()) - } - } else if filter.Name != nil && *filter.Name != expected.Name { - return nil, 0, errors.New("uh ohhh, wrong name here: " + *filter.Name) - } - return []*influxdb.Bucket{expected}, 1, nil - } - - svc := newTestService(WithBucketSVC(bktSVC), WithLabelSVC(mock.NewLabelService())) - - resToClone := ResourceToClone{ - Kind: KindBucket, - ID: expected.ID, - Name: tt.newName, - } - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone)) - require.NoError(t, err) - - newTemplate := encodeAndDecode(t, template) - - bkts := newTemplate.Summary().Buckets - require.Len(t, bkts, 1) - - actual := bkts[0] - expectedName := expected.Name - if tt.newName != "" { - expectedName = tt.newName - } - assert.Equal(t, expectedName, actual.Name) - assert.Equal(t, expected.Description, actual.Description) - assert.Equal(t, expected.RetentionPeriod, actual.RetentionPeriod) - } - t.Run(tt.name, fn) - } - }) - - // todo: bucket names are unique. - t.Run("bucket by name", func(t *testing.T) { - knownBuckets := []*influxdb.Bucket{ - { - ID: platform.ID(1), - Name: "bucket", - Description: "desc", - RetentionPeriod: time.Hour, - }, - { - ID: platform.ID(2), - Name: "bucketCopy", - Description: "desc", - RetentionPeriod: time.Hour, - }, - { - ID: platform.ID(3), - Name: "bucket3", - Description: "desc", - RetentionPeriod: time.Hour, - }, - } - - tests := []struct { - name string - findName string - findID platform.ID - expected []*influxdb.Bucket - }{ - { - name: "find bucket with unique name", - findName: "bucket", - expected: []*influxdb.Bucket{knownBuckets[0]}, - }, - { - name: "find no buckets", - findName: "fakeBucket", - expected: nil, - }, - { - name: "find bucket by id", - findID: platform.ID(2), - expected: []*influxdb.Bucket{knownBuckets[1]}, - }, - { - // todo: verify this is intended behavior (it is in swagger) - name: "find by id, set new name", - findID: platform.ID(2), - findName: "renamedBucket", - expected: []*influxdb.Bucket{knownBuckets[1]}, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - bktSVC := mock.NewBucketService() - bktSVC.FindBucketsFn = func(_ context.Context, filter influxdb.BucketFilter, _ ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - if filter.ID != nil { - for i := range knownBuckets { - if knownBuckets[i].ID == *filter.ID { - return []*influxdb.Bucket{knownBuckets[i]}, 1, nil - } - } - return nil, 0, errors.New("uh ohhh, wrong id here: " + filter.ID.String()) - } else if filter.Name != nil { - bkts := []*influxdb.Bucket{} - - for i := range knownBuckets { - if knownBuckets[i].Name == *filter.Name { - bkts = append(bkts, knownBuckets[i]) - } - } - - if lBkts := len(bkts); lBkts > 0 { - return bkts, lBkts, nil - } - return nil, 0, errors.New("uh ohhh, wrong name here: " + *filter.Name) - } - - return knownBuckets, len(knownBuckets), nil - } - - resToClone := ResourceToClone{ - Kind: KindBucket, - } - if tt.findName != "" { - resToClone.Name = tt.findName - } - if tt.findID != platform.ID(0) { - resToClone.ID = tt.findID - } - - svc := newTestService( - WithBucketSVC(bktSVC), - WithLabelSVC(mock.NewLabelService()), - ) - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone)) - if tt.expected == nil { - require.Error(t, err) - } else { - require.NoError(t, err) - - if tt.findName != "" && tt.findID != platform.ID(0) { - tt.expected[0].Name = tt.findName - } - - actual := template.Summary().Buckets - require.Len(t, actual, len(tt.expected)) - - for i := range actual { - // can't verify id's match due to the use of SafeID's - assert.Equal(t, tt.expected[i].Name, actual[i].Name) - assert.Equal(t, tt.expected[i].Description, actual[i].Description) - assert.Equal(t, tt.expected[i].RetentionPeriod, actual[i].RetentionPeriod) - } - - assert.True(t, encodeAndDecode(t, template) != nil) - } - } - t.Run(tt.name, fn) - } - }) - - t.Run("checks", func(t *testing.T) { - tests := []struct { - name string - newName string - expected influxdb.Check - }{ - { - name: "threshold", - expected: &icheck.Threshold{ - Base: newThresholdBase(0), - Thresholds: []icheck.ThresholdConfig{ - icheck.Lesser{ - ThresholdConfigBase: icheck.ThresholdConfigBase{ - AllValues: true, - Level: notification.Critical, - }, - Value: 20, - }, - icheck.Greater{ - ThresholdConfigBase: icheck.ThresholdConfigBase{ - AllValues: true, - Level: notification.Warn, - }, - Value: 30, - }, - icheck.Range{ - ThresholdConfigBase: icheck.ThresholdConfigBase{ - AllValues: true, - Level: notification.Info, - }, - Within: false, // outside_range - Min: 10, - Max: 25, - }, - icheck.Range{ - ThresholdConfigBase: icheck.ThresholdConfigBase{ - AllValues: true, - Level: notification.Ok, - }, - Within: true, // inside_range - Min: 21, - Max: 24, - }, - }, - }, - }, - { - name: "deadman", - newName: "new name", - expected: &icheck.Deadman{ - Base: newThresholdBase(1), - TimeSince: mustDuration(t, time.Hour), - StaleTime: mustDuration(t, 5*time.Hour), - ReportZero: true, - Level: notification.Critical, - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - id := platform.ID(1) - tt.expected.SetID(id) - - checkSVC := mock.NewCheckService() - checkSVC.FindCheckByIDFn = func(ctx context.Context, id platform.ID) (influxdb.Check, error) { - if id != tt.expected.GetID() { - return nil, errors.New("uh ohhh, wrong id here: " + id.String()) - } - return tt.expected, nil - } - checkSVC.FindChecksFn = func(_ context.Context, filter influxdb.CheckFilter, _ ...influxdb.FindOptions) ([]influxdb.Check, int, error) { - if filter.ID != nil { - if *filter.ID != tt.expected.GetID() { - return nil, 0, errors.New("uh ohhh, wrong id here: " + filter.ID.String()) - } - } else if filter.Name != nil && *filter.Name != tt.expected.GetName() { - return nil, 0, errors.New("uh ohhh, wrong name here: " + *filter.Name) - } - - return []influxdb.Check{tt.expected}, 1, nil - } - - svc := newTestService(WithCheckSVC(checkSVC)) - - resToClone := ResourceToClone{ - Kind: KindCheck, - ID: tt.expected.GetID(), - Name: tt.newName, - } - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone)) - require.NoError(t, err) - - newTemplate := encodeAndDecode(t, template) - - checks := newTemplate.Summary().Checks - require.Len(t, checks, 1) - - actual := checks[0].Check - expectedName := tt.expected.GetName() - if tt.newName != "" { - expectedName = tt.newName - } - assert.Equal(t, expectedName, actual.GetName()) - } - t.Run(tt.name, fn) - } - }) - - t.Run("checks by name", func(t *testing.T) { - knownChecks := []influxdb.Check{ - &icheck.Threshold{ - Base: newThresholdBase(0), - Thresholds: []icheck.ThresholdConfig{ - icheck.Lesser{ - ThresholdConfigBase: icheck.ThresholdConfigBase{ - AllValues: true, - Level: notification.Critical, - }, - Value: 20, - }, - icheck.Greater{ - ThresholdConfigBase: icheck.ThresholdConfigBase{ - AllValues: true, - Level: notification.Warn, - }, - Value: 30, - }, - icheck.Range{ - ThresholdConfigBase: icheck.ThresholdConfigBase{ - AllValues: true, - Level: notification.Info, - }, - Within: false, // outside_range - Min: 10, - Max: 25, - }, - icheck.Range{ - ThresholdConfigBase: icheck.ThresholdConfigBase{ - AllValues: true, - Level: notification.Ok, - }, - Within: true, // inside_range - Min: 21, - Max: 24, - }, - }, - }, - &icheck.Deadman{ - Base: newThresholdBase(1), - TimeSince: mustDuration(t, time.Hour), - StaleTime: mustDuration(t, 5*time.Hour), - ReportZero: true, - Level: notification.Critical, - }, - &icheck.Deadman{ - Base: icheck.Base{ - ID: platform.ID(2), - TaskID: 300, - Name: "check_1", - Description: "desc_2", - Every: mustDuration(t, 2*time.Minute), - Offset: mustDuration(t, 30*time.Second), - Query: influxdb.DashboardQuery{ - Text: `from(bucket: "telegraf") |> range(start: -1m) |> filter(fn: (r) => r._field == "usage_user")`, - }, - StatusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }", - Tags: []influxdb.Tag{ - {Key: "key_1", Value: "val_1"}, - {Key: "key_2", Value: "val_2"}, - }, - }, - TimeSince: mustDuration(t, time.Hour), - StaleTime: mustDuration(t, 5*time.Hour), - ReportZero: true, - Level: notification.Critical, - }, - } - - tests := []struct { - name string - findName string - findID platform.ID - expected []influxdb.Check - }{ - { - name: "find check with unique name", - findName: "check_0", - expected: []influxdb.Check{knownChecks[0]}, - }, - { - name: "find multiple checks with same name", - findName: "check_1", - expected: []influxdb.Check{knownChecks[1], knownChecks[2]}, - }, - { - name: "find no checks", - findName: "fakeCheck", - expected: nil, - }, - { - name: "find check by id", - findID: platform.ID(1), - expected: []influxdb.Check{knownChecks[1]}, - }, - { - name: "find check by id, set new name", - findID: platform.ID(1), - findName: "chex original", - expected: []influxdb.Check{knownChecks[1]}, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - checkSVC := mock.NewCheckService() - checkSVC.FindChecksFn = func(_ context.Context, filter influxdb.CheckFilter, _ ...influxdb.FindOptions) ([]influxdb.Check, int, error) { - if filter.ID != nil { - for i := range knownChecks { - if knownChecks[i].GetID() == *filter.ID { - return []influxdb.Check{knownChecks[i]}, 1, nil - } - } - return nil, 0, errors.New("uh ohhh, wrong id here: " + filter.ID.String()) - } else if filter.Name != nil { - checks := []influxdb.Check{} - - for i := range knownChecks { - if knownChecks[i].GetName() == *filter.Name { - checks = append(checks, knownChecks[i]) - } - } - - if lChecks := len(checks); lChecks > 0 { - return checks, lChecks, nil - } - - return nil, 0, errors.New("uh ohhh, wrong name here: " + *filter.Name) - } - - return knownChecks, len(knownChecks), nil - } - - resToClone := ResourceToClone{ - Kind: KindCheck, - } - if tt.findName != "" { - resToClone.Name = tt.findName - } - if tt.findID != platform.ID(0) { - resToClone.ID = tt.findID - } - - svc := newTestService(WithCheckSVC(checkSVC)) - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone)) - if tt.expected == nil { - require.Error(t, err) - } else { - require.NoError(t, err) - - if tt.findName != "" && tt.findID != platform.ID(0) { - tt.expected[0].SetName(tt.findName) - } - - actual := template.Summary().Checks - require.Len(t, actual, len(tt.expected)) - sort.Slice(actual, func(i, j int) bool { - return actual[i].Check.GetDescription() < actual[j].Check.GetDescription() - }) - - for i := range actual { - assert.Equal(t, tt.expected[i].GetName(), actual[i].Check.GetName()) - assert.Equal(t, tt.expected[i].GetDescription(), actual[i].Check.GetDescription()) - } - - assert.True(t, encodeAndDecode(t, template) != nil) - } - } - t.Run(tt.name, fn) - } - }) - - newQuery := func() influxdb.DashboardQuery { - return influxdb.DashboardQuery{ - Text: "from(v.bucket) |> count()", - EditMode: "advanced", - } - } - - newAxes := func() map[string]influxdb.Axis { - return map[string]influxdb.Axis{ - "x": { - Bounds: []string{}, - Label: "labx", - Prefix: "pre", - Suffix: "suf", - Base: "base", - Scale: "linear", - }, - "y": { - Bounds: []string{}, - Label: "laby", - Prefix: "pre", - Suffix: "suf", - Base: "base", - Scale: "linear", - }, - } - } - - newColors := func(types ...string) []influxdb.ViewColor { - var out []influxdb.ViewColor - for _, t := range types { - out = append(out, influxdb.ViewColor{ - Type: t, - Hex: time.Now().Format(time.RFC3339), - Name: time.Now().Format(time.RFC3339), - Value: float64(time.Now().Unix()), - }) - } - return out - } - - t.Run("dashboard", func(t *testing.T) { - t.Run("with single chart", func(t *testing.T) { - tests := []struct { - name string - newName string - expectedView influxdb.View - }{ - { - name: "gauge", - newName: "new name", - expectedView: influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "view name", - }, - Properties: influxdb.GaugeViewProperties{ - Type: influxdb.ViewPropertyTypeGauge, - DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1}, - Note: "a note", - Prefix: "pre", - TickPrefix: "true", - Suffix: "suf", - TickSuffix: "false", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShowNoteWhenEmpty: true, - ViewColors: newColors("min", "max", "threshold"), - }, - }, - }, - { - name: "geo", - newName: "new name", - expectedView: influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "view name", - }, - Properties: influxdb.GeoViewProperties{ - Type: influxdb.ViewPropertyTypeGeo, - Queries: []influxdb.DashboardQuery{newQuery()}, - Center: influxdb.Datum{Lat: 10, Lon: -4}, - Zoom: 4, - GeoLayers: []influxdb.GeoLayer{{ - Type: "circleMap", - RadiusField: "radius", - IntensityField: "count", - Radius: 5, - Blur: 1, - RadiusDimension: influxdb.Axis{ - Prefix: "$", - Suffix: "%", - }, - InterpolateColors: false, - TrackWidth: 4, - Speed: 1, - RandomColors: false, - IsClustered: false, - ViewColors: newColors("min", "max"), - }}, - Note: "a note", - ShowNoteWhenEmpty: true, - }, - }, - }, - { - name: "heatmap", - newName: "new name", - expectedView: influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "view name", - }, - Properties: influxdb.HeatmapViewProperties{ - Type: influxdb.ViewPropertyTypeHeatMap, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShowNoteWhenEmpty: true, - ViewColors: []string{"#8F8AF4", "#8F8AF4", "#8F8AF4"}, - XColumn: "x", - GenerateXAxisTicks: []string{"xTotalTicks", "xTickStart", "xTickStep"}, - XTotalTicks: 15, - XTickStart: 0, - XTickStep: 1000, - YColumn: "y", - GenerateYAxisTicks: []string{"yTotalTicks", "yTickStart", "yTickStep"}, - YTotalTicks: 10, - YTickStart: 0, - YTickStep: 100, - XDomain: []float64{0, 10}, - YDomain: []float64{0, 100}, - XAxisLabel: "x_label", - XPrefix: "x_prefix", - XSuffix: "x_suffix", - YAxisLabel: "y_label", - YPrefix: "y_prefix", - YSuffix: "y_suffix", - BinSize: 10, - TimeFormat: "", - LegendColorizeRows: true, - LegendHide: false, - LegendOpacity: 1.0, - LegendOrientationThreshold: 5, - }, - }, - }, - { - name: "histogram", - newName: "new name", - expectedView: influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "view name", - }, - Properties: influxdb.HistogramViewProperties{ - Type: influxdb.ViewPropertyTypeHistogram, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShowNoteWhenEmpty: true, - ViewColors: []influxdb.ViewColor{{Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}}, - FillColumns: []string{"a", "b"}, - XColumn: "_value", - XDomain: []float64{0, 10}, - XAxisLabel: "x_label", - BinCount: 30, - Position: "stacked", - LegendColorizeRows: true, - LegendHide: false, - LegendOpacity: 1.0, - LegendOrientationThreshold: 5, - }, - }, - }, - { - name: "scatter", - newName: "new name", - expectedView: influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "view name", - }, - Properties: influxdb.ScatterViewProperties{ - Type: influxdb.ViewPropertyTypeScatter, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShowNoteWhenEmpty: true, - ViewColors: []string{"#8F8AF4", "#8F8AF4", "#8F8AF4"}, - XColumn: "x", - GenerateXAxisTicks: []string{"xTotalTicks", "xTickStart", "xTickStep"}, - XTotalTicks: 15, - XTickStart: 0, - XTickStep: 1000, - YColumn: "y", - GenerateYAxisTicks: []string{"yTotalTicks", "yTickStart", "yTickStep"}, - YTotalTicks: 10, - YTickStart: 0, - YTickStep: 100, - XDomain: []float64{0, 10}, - YDomain: []float64{0, 100}, - XAxisLabel: "x_label", - XPrefix: "x_prefix", - XSuffix: "x_suffix", - YAxisLabel: "y_label", - YPrefix: "y_prefix", - YSuffix: "y_suffix", - TimeFormat: "", - LegendColorizeRows: true, - LegendHide: false, - LegendOpacity: 1.0, - LegendOrientationThreshold: 5, - }, - }, - }, - { - name: "mosaic", - expectedView: influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "view name", - }, - Properties: influxdb.MosaicViewProperties{ - Type: influxdb.ViewPropertyTypeMosaic, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShowNoteWhenEmpty: true, - ViewColors: []string{"#8F8AF4", "#8F8AF4", "#8F8AF4"}, - HoverDimension: "y", - XColumn: "x", - GenerateXAxisTicks: []string{"xTotalTicks", "xTickStart", "xTickStep"}, - XTotalTicks: 15, - XTickStart: 0, - XTickStep: 1000, - YLabelColumnSeparator: ",", - YLabelColumns: []string{"foo"}, - YSeriesColumns: []string{"y"}, - XDomain: []float64{0, 10}, - YDomain: []float64{0, 100}, - XAxisLabel: "x_label", - XPrefix: "x_prefix", - XSuffix: "x_suffix", - YAxisLabel: "y_label", - YPrefix: "y_prefix", - YSuffix: "y_suffix", - LegendColorizeRows: true, - LegendHide: false, - LegendOpacity: 1.0, - LegendOrientationThreshold: 5, - }, - }, - }, - { - name: "without new name single stat", - expectedView: influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "view name", - }, - Properties: influxdb.SingleStatViewProperties{ - Type: influxdb.ViewPropertyTypeSingleStat, - DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1}, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - Prefix: "pre", - TickPrefix: "false", - ShowNoteWhenEmpty: true, - Suffix: "suf", - TickSuffix: "true", - ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}}, - }, - }, - }, - { - name: "with new name single stat", - newName: "new name", - expectedView: influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "view name", - }, - Properties: influxdb.SingleStatViewProperties{ - Type: influxdb.ViewPropertyTypeSingleStat, - DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1}, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - Prefix: "pre", - TickPrefix: "false", - ShowNoteWhenEmpty: true, - Suffix: "suf", - TickSuffix: "true", - ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}}, - }, - }, - }, - { - name: "single stat plus line", - newName: "new name", - expectedView: influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "view name", - }, - Properties: influxdb.LinePlusSingleStatProperties{ - Type: influxdb.ViewPropertyTypeSingleStatPlusLine, - Axes: newAxes(), - DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1}, - StaticLegend: influxdb.StaticLegend{ColorizeRows: true, HeightRatio: 0.2, Show: true, Opacity: 1.0, OrientationThreshold: 5, ValueAxis: "y", WidthRatio: 1.0}, - Note: "a note", - Prefix: "pre", - Suffix: "suf", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShadeBelow: true, - HoverDimension: "y", - ShowNoteWhenEmpty: true, - ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}}, - XColumn: "x", - GenerateXAxisTicks: []string{"xTotalTicks", "xTickStart", "xTickStep"}, - XTotalTicks: 15, - XTickStart: 0, - XTickStep: 1000, - YColumn: "y", - GenerateYAxisTicks: []string{"yTotalTicks", "yTickStart", "yTickStep"}, - YTotalTicks: 10, - YTickStart: 0, - YTickStep: 100, - Position: "stacked", - LegendColorizeRows: true, - LegendHide: false, - LegendOpacity: 1.0, - LegendOrientationThreshold: 5, - }, - }, - }, - { - name: "xy", - newName: "new name", - expectedView: influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "view name", - }, - Properties: influxdb.XYViewProperties{ - Type: influxdb.ViewPropertyTypeXY, - Axes: newAxes(), - Geom: "step", - StaticLegend: influxdb.StaticLegend{ColorizeRows: true, HeightRatio: 0.2, Show: true, Opacity: 1.0, OrientationThreshold: 5, ValueAxis: "y", WidthRatio: 1.0}, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShadeBelow: true, - HoverDimension: "y", - ShowNoteWhenEmpty: true, - ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}}, - XColumn: "x", - GenerateXAxisTicks: []string{"xTotalTicks", "xTickStart", "xTickStep"}, - XTotalTicks: 15, - XTickStart: 0, - XTickStep: 1000, - YColumn: "y", - GenerateYAxisTicks: []string{"yTotalTicks", "yTickStart", "yTickStep"}, - YTotalTicks: 10, - YTickStart: 0, - YTickStep: 100, - Position: "overlaid", - TimeFormat: "", - LegendColorizeRows: true, - LegendHide: false, - LegendOpacity: 1.0, - LegendOrientationThreshold: 5, - }, - }, - }, - { - name: "band", - newName: "new name", - expectedView: influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "view name", - }, - Properties: influxdb.BandViewProperties{ - Type: influxdb.ViewPropertyTypeBand, - Axes: newAxes(), - Geom: "step", - StaticLegend: influxdb.StaticLegend{ColorizeRows: true, HeightRatio: 0.2, Show: true, Opacity: 1.0, OrientationThreshold: 5, ValueAxis: "y", WidthRatio: 1.0}, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - HoverDimension: "y", - ShowNoteWhenEmpty: true, - ViewColors: []influxdb.ViewColor{{Type: "text", Hex: "red"}}, - XColumn: "x", - GenerateXAxisTicks: []string{"xTotalTicks", "xTickStart", "xTickStep"}, - XTotalTicks: 15, - XTickStart: 0, - XTickStep: 1000, - YColumn: "y", - GenerateYAxisTicks: []string{"yTotalTicks", "yTickStart", "yTickStep"}, - YTotalTicks: 10, - YTickStart: 0, - YTickStep: 100, - UpperColumn: "upper", - MainColumn: "main", - LowerColumn: "lower", - TimeFormat: "", - LegendColorizeRows: true, - LegendHide: false, - LegendOpacity: 1.0, - LegendOrientationThreshold: 5, - }, - }, - }, - { - name: "markdown", - newName: "new name", - expectedView: influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "view name", - }, - Properties: influxdb.MarkdownViewProperties{ - Type: influxdb.ViewPropertyTypeMarkdown, - Note: "a note", - }, - }, - }, - { - name: "table", - newName: "new name", - expectedView: influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "view name", - }, - Properties: influxdb.TableViewProperties{ - Type: influxdb.ViewPropertyTypeTable, - Note: "a note", - ShowNoteWhenEmpty: true, - Queries: []influxdb.DashboardQuery{newQuery()}, - ViewColors: []influxdb.ViewColor{{Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}}, - TableOptions: influxdb.TableOptions{ - VerticalTimeAxis: true, - SortBy: influxdb.RenamableField{ - InternalName: "_time", - }, - Wrapping: "truncate", - FixFirstColumn: true, - }, - FieldOptions: []influxdb.RenamableField{ - { - InternalName: "_time", - DisplayName: "time (ms)", - Visible: true, - }, - }, - TimeFormat: "YYYY:MM:DD", - DecimalPlaces: influxdb.DecimalPlaces{ - IsEnforced: true, - Digits: 1, - }, - }, - }, - }, - { - // validate implementation resolves: https://github.com/influxdata/influxdb/issues/17708 - name: "table converts table options correctly", - newName: "new name", - expectedView: influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "view name", - }, - Properties: influxdb.TableViewProperties{ - Type: influxdb.ViewPropertyTypeTable, - Note: "a note", - ShowNoteWhenEmpty: true, - Queries: []influxdb.DashboardQuery{newQuery()}, - ViewColors: []influxdb.ViewColor{{Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}}, - TableOptions: influxdb.TableOptions{ - VerticalTimeAxis: true, - SortBy: influxdb.RenamableField{ - InternalName: "_time", - }, - Wrapping: "truncate", - }, - FieldOptions: []influxdb.RenamableField{ - { - InternalName: "_time", - DisplayName: "time (ms)", - Visible: true, - }, - { - InternalName: "_value", - DisplayName: "bytes", - Visible: true, - }, - }, - TimeFormat: "YYYY:MM:DD", - DecimalPlaces: influxdb.DecimalPlaces{ - IsEnforced: true, - Digits: 1, - }, - }, - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - expectedCell := &influxdb.Cell{ - ID: 5, - CellProperty: influxdb.CellProperty{X: 1, Y: 2, W: 3, H: 4}, - View: &tt.expectedView, - } - expected := &influxdb.Dashboard{ - ID: 3, - Name: "bucket name", - Description: "desc", - Cells: []*influxdb.Cell{expectedCell}, - } - - dashSVC := mock.NewDashboardService() - dashSVC.FindDashboardByIDF = func(_ context.Context, id platform.ID) (*influxdb.Dashboard, error) { - if id != expected.ID { - return nil, errors.New("uh ohhh, wrong id here: " + id.String()) - } - return expected, nil - } - dashSVC.FindDashboardsF = func(_ context.Context, filter influxdb.DashboardFilter, _ influxdb.FindOptions) ([]*influxdb.Dashboard, int, error) { - if len(filter.IDs) < 1 { - return nil, 0, errors.New("uh ohhh, no id here") - } - if filter.IDs[0] != nil && *filter.IDs[0] != expected.ID { - return nil, 0, errors.New("uh ohhh, wrong id here: " + filter.IDs[0].String()) - } - return []*influxdb.Dashboard{expected}, 1, nil - } - dashSVC.GetDashboardCellViewF = func(_ context.Context, id platform.ID, cID platform.ID) (*influxdb.View, error) { - if id == expected.ID && cID == expectedCell.ID { - return &tt.expectedView, nil - } - return nil, errors.New("wrongo ids") - } - - svc := newTestService(WithDashboardSVC(dashSVC), WithLabelSVC(mock.NewLabelService())) - - resToClone := ResourceToClone{ - Kind: KindDashboard, - ID: expected.ID, - Name: tt.newName, - } - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone)) - require.NoError(t, err) - - newTemplate := encodeAndDecode(t, template) - - dashs := newTemplate.Summary().Dashboards - require.Len(t, dashs, 1) - - actual := dashs[0] - expectedName := expected.Name - if tt.newName != "" { - expectedName = tt.newName - } - assert.Equal(t, expectedName, actual.Name) - assert.Equal(t, expected.Description, actual.Description) - - require.Len(t, actual.Charts, 1) - ch := actual.Charts[0] - assert.Equal(t, int(expectedCell.X), ch.XPosition) - assert.Equal(t, int(expectedCell.Y), ch.YPosition) - assert.Equal(t, int(expectedCell.H), ch.Height) - assert.Equal(t, int(expectedCell.W), ch.Width) - assert.Equal(t, tt.expectedView.Properties, ch.Properties) - } - t.Run(tt.name, fn) - } - }) - - t.Run("handles duplicate dashboard names", func(t *testing.T) { - dashSVC := mock.NewDashboardService() - dashSVC.FindDashboardByIDF = func(_ context.Context, id platform.ID) (*influxdb.Dashboard, error) { - return &influxdb.Dashboard{ - ID: id, - Name: "dash name", - Description: "desc", - }, nil - } - dashSVC.FindDashboardsF = func(_ context.Context, filter influxdb.DashboardFilter, _ influxdb.FindOptions) ([]*influxdb.Dashboard, int, error) { - if len(filter.IDs) < 1 { - return nil, 0, errors.New("uh ohhh, no id here") - } - return []*influxdb.Dashboard{{ - ID: *filter.IDs[0], - Name: "dash name", - Description: "desc", - }}, 1, nil - } - - svc := newTestService(WithDashboardSVC(dashSVC), WithLabelSVC(mock.NewLabelService())) - - resourcesToClone := []ResourceToClone{ - { - Kind: KindDashboard, - ID: 1, - }, - { - Kind: KindDashboard, - ID: 2, - }, - } - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resourcesToClone...)) - require.NoError(t, err) - - newTemplate := encodeAndDecode(t, template) - - dashs := newTemplate.Summary().Dashboards - require.Len(t, dashs, len(resourcesToClone)) - - for i := range resourcesToClone { - actual := dashs[i] - assert.Equal(t, "dash name", actual.Name) - assert.Equal(t, "desc", actual.Description) - } - }) - }) - - t.Run("dashboard by name", func(t *testing.T) { - id := 0 - newDash := func(name string, view influxdb.View) *influxdb.Dashboard { - id++ - return &influxdb.Dashboard{ - ID: platform.ID(id), - Name: name, - Description: fmt.Sprintf("desc_%d", id), - Cells: []*influxdb.Cell{ - { - ID: 0, - CellProperty: influxdb.CellProperty{X: 1, Y: 2, W: 3, H: 4}, - View: &view, - }, - }, - } - } - knownDashboards := []*influxdb.Dashboard{ - newDash("dasher", influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "view name", - }, - Properties: influxdb.GaugeViewProperties{ - Type: influxdb.ViewPropertyTypeGauge, - DecimalPlaces: influxdb.DecimalPlaces{IsEnforced: true, Digits: 1}, - Note: "a note", - Prefix: "pre", - TickPrefix: "true", - Suffix: "suf", - TickSuffix: "false", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShowNoteWhenEmpty: true, - ViewColors: newColors("min", "max", "threshold"), - }, - }), - newDash("prancer", influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "view name", - }, - Properties: influxdb.HeatmapViewProperties{ - Type: influxdb.ViewPropertyTypeHeatMap, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShowNoteWhenEmpty: true, - ViewColors: []string{"#8F8AF4", "#8F8AF4", "#8F8AF4"}, - XColumn: "x", - GenerateXAxisTicks: []string{"xTotalTicks", "xTickStart", "xTickStep"}, - XTotalTicks: 15, - XTickStart: 0, - XTickStep: 1000, - YColumn: "y", - GenerateYAxisTicks: []string{"yTotalTicks", "yTickStart", "yTickStep"}, - YTotalTicks: 10, - YTickStart: 0, - YTickStep: 100, - XDomain: []float64{0, 10}, - YDomain: []float64{0, 100}, - XAxisLabel: "x_label", - XPrefix: "x_prefix", - XSuffix: "x_suffix", - YAxisLabel: "y_label", - YPrefix: "y_prefix", - YSuffix: "y_suffix", - BinSize: 10, - TimeFormat: "", - }, - }), - newDash("prancer", influxdb.View{ - ViewContents: influxdb.ViewContents{ - Name: "view name", - }, - Properties: influxdb.HistogramViewProperties{ - Type: influxdb.ViewPropertyTypeHistogram, - Note: "a note", - Queries: []influxdb.DashboardQuery{newQuery()}, - ShowNoteWhenEmpty: true, - ViewColors: []influxdb.ViewColor{{Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}, {Type: "scale", Hex: "#8F8AF4", Value: 0}}, - FillColumns: []string{"a", "b"}, - XColumn: "_value", - XDomain: []float64{0, 10}, - XAxisLabel: "x_label", - BinCount: 30, - Position: "stacked", - LegendColorizeRows: true, - LegendHide: false, - LegendOpacity: 1.0, - LegendOrientationThreshold: 5, - }, - }), - } - - tests := []struct { - name string - findName string - findID platform.ID - expected []*influxdb.Dashboard - }{ - { - name: "find dash with unique name", - findName: "dasher", - expected: []*influxdb.Dashboard{knownDashboards[0]}, - }, - { - name: "find multiple dash with shared name", - findName: "prancer", - expected: []*influxdb.Dashboard{knownDashboards[1], knownDashboards[2]}, - }, - { - name: "find no dash", - findName: "fakeDash", - expected: nil, - }, - { - name: "find dash by id", - findID: 1, - expected: []*influxdb.Dashboard{knownDashboards[0]}, - }, - { - name: "find dash by id, set new name", - findID: 1, - findName: "dancer", - expected: []*influxdb.Dashboard{knownDashboards[0]}, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - dashSVC := mock.NewDashboardService() - dashSVC.FindDashboardsF = func(_ context.Context, filter influxdb.DashboardFilter, _ influxdb.FindOptions) ([]*influxdb.Dashboard, int, error) { - if filter.IDs != nil && filter.IDs[0] != nil { - for i := range knownDashboards { - if knownDashboards[i].ID == *filter.IDs[0] { - return []*influxdb.Dashboard{knownDashboards[i]}, 1, nil - } - } - - return nil, 0, errors.New("uh ohhh, wrong id here: " + filter.IDs[0].String()) - } - - return knownDashboards, len(knownDashboards), nil - } - - dashSVC.GetDashboardCellViewF = func(_ context.Context, id platform.ID, cID platform.ID) (*influxdb.View, error) { - for i := range knownDashboards { - if knownDashboards[i].ID == id { - return knownDashboards[i].Cells[0].View, nil - } - } - - return nil, errors.New("wrongo ids") - } - - resToClone := ResourceToClone{ - Kind: KindDashboard, - } - if tt.findName != "" { - resToClone.Name = tt.findName - } - if tt.findID != platform.ID(0) { - resToClone.ID = tt.findID - } - - svc := newTestService( - WithDashboardSVC(dashSVC), - WithLabelSVC(mock.NewLabelService()), - ) - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone)) - if tt.expected == nil { - require.Error(t, err) - } else { - require.NoError(t, err) - - if tt.findName != "" && tt.findID != platform.ID(0) { - tt.expected[0].Name = tt.findName - } - - actual := template.Summary().Dashboards - require.Len(t, actual, len(tt.expected)) - sort.Slice(actual, func(i, j int) bool { - return actual[i].Description < actual[j].Description - }) - - for i := range actual { - assert.Equal(t, tt.expected[i].Name, actual[i].Name) - assert.Equal(t, tt.expected[i].Description, actual[i].Description) - - require.Len(t, actual[i].Charts, 1) - ch := actual[i].Charts[0] - assert.Equal(t, int(tt.expected[i].Cells[0].CellProperty.X), ch.XPosition) - assert.Equal(t, int(tt.expected[i].Cells[0].CellProperty.Y), ch.YPosition) - assert.Equal(t, int(tt.expected[i].Cells[0].CellProperty.H), ch.Height) - assert.Equal(t, int(tt.expected[i].Cells[0].CellProperty.W), ch.Width) - assert.Equal(t, tt.expected[i].Cells[0].View.Properties, ch.Properties) - } - - assert.True(t, encodeAndDecode(t, template) != nil) - } - } - t.Run(tt.name, fn) - } - }) - - t.Run("label", func(t *testing.T) { - tests := []struct { - name string - newName string - }{ - { - name: "without new name", - }, - { - name: "with new name", - newName: "new name", - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - expectedLabel := &influxdb.Label{ - ID: 3, - Name: "bucket name", - Properties: map[string]string{ - "description": "desc", - "color": "red", - }, - } - - labelSVC := mock.NewLabelService() - labelSVC.FindLabelByIDFn = func(_ context.Context, id platform.ID) (*influxdb.Label, error) { - if id != expectedLabel.ID { - return nil, errors.New("uh ohhh, wrong id here: " + id.String()) - } - return expectedLabel, nil - } - labelSVC.FindLabelsFn = func(_ context.Context, filter influxdb.LabelFilter) ([]*influxdb.Label, error) { - if filter.Name != expectedLabel.Name { - return nil, errors.New("uh ohhh, wrong name here: " + filter.Name) - } - return []*influxdb.Label{expectedLabel}, nil - } - - svc := newTestService(WithLabelSVC(labelSVC)) - - resToClone := ResourceToClone{ - Kind: KindLabel, - ID: expectedLabel.ID, - Name: tt.newName, - } - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone)) - require.NoError(t, err) - - newTemplate := encodeAndDecode(t, template) - - newLabels := newTemplate.Summary().Labels - require.Len(t, newLabels, 1) - - actual := newLabels[0] - expectedName := expectedLabel.Name - if tt.newName != "" { - expectedName = tt.newName - } - assert.Equal(t, expectedName, actual.Name) - assert.Equal(t, expectedLabel.Properties["color"], actual.Properties.Color) - assert.Equal(t, expectedLabel.Properties["description"], actual.Properties.Description) - } - t.Run(tt.name, fn) - } - }) - - t.Run("label by name", func(t *testing.T) { - knownLabels := []*influxdb.Label{ - { - ID: 1, - Name: "label one", - Properties: map[string]string{ - "description": "desc", - "color": "red", - }, - }, - { - ID: 2, - Name: "label two", - Properties: map[string]string{ - "description": "desc2", - "color": "green", - }, - }, - } - tests := []struct { - name string - findName string - findID platform.ID - expected []*influxdb.Label - }{ - { - name: "find label by name", - findName: "label one", - expected: []*influxdb.Label{knownLabels[0]}, - }, - { - name: "find no label", - findName: "label none", - expected: nil, - }, - { - name: "find label by id", - findID: platform.ID(2), - expected: []*influxdb.Label{knownLabels[1]}, - }, - { - name: "find label by id, set new name", - findName: "label three", - findID: platform.ID(2), - expected: []*influxdb.Label{knownLabels[1]}, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - labelSVC := mock.NewLabelService() - labelSVC.FindLabelByIDFn = func(_ context.Context, id platform.ID) (*influxdb.Label, error) { - for i := range knownLabels { - if knownLabels[i].ID == id { - return knownLabels[i], nil - } - } - - return nil, errors.New("uh ohhh, wrong id here: " + id.String()) - } - labelSVC.FindLabelsFn = func(_ context.Context, filter influxdb.LabelFilter) ([]*influxdb.Label, error) { - if filter.Name != "" { - for i := range knownLabels { - if knownLabels[i].Name == filter.Name { - return []*influxdb.Label{knownLabels[i]}, nil - } - } - - return nil, errors.New("uh ohhh, wrong name here: " + filter.Name) - } - - return knownLabels, nil - } - - resToClone := ResourceToClone{ - Kind: KindLabel, - } - if tt.findName != "" { - resToClone.Name = tt.findName - } - if tt.findID != platform.ID(0) { - resToClone.ID = tt.findID - } - - svc := newTestService(WithLabelSVC(labelSVC)) - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone)) - if tt.expected == nil { - require.Error(t, err) - } else { - require.NoError(t, err) - - if tt.findName != "" && tt.findID != platform.ID(0) { - tt.expected[0].Name = tt.findName - } - - actual := template.Summary().Labels - require.Len(t, actual, len(tt.expected)) - - for i := range actual { - assert.Equal(t, tt.expected[i].Name, actual[i].Name) - assert.Equal(t, tt.expected[i].Properties["color"], actual[i].Properties.Color) - assert.Equal(t, tt.expected[i].Properties["description"], actual[i].Properties.Description) - } - - assert.True(t, encodeAndDecode(t, template) != nil) - } - } - t.Run(tt.name, fn) - } - }) - - t.Run("notification endpoints", func(t *testing.T) { - tests := []struct { - name string - newName string - expected influxdb.NotificationEndpoint - }{ - { - name: "pager duty", - expected: &endpoint.PagerDuty{ - Base: endpoint.Base{ - Name: "pd-endpoint", - Description: "desc", - Status: taskmodel.TaskStatusActive, - }, - ClientURL: "http://example.com", - RoutingKey: influxdb.SecretField{Key: "-routing-key"}, - }, - }, - { - name: "pager duty with new name", - newName: "new name", - expected: &endpoint.PagerDuty{ - Base: endpoint.Base{ - Name: "pd-endpoint", - Description: "desc", - Status: taskmodel.TaskStatusActive, - }, - ClientURL: "http://example.com", - RoutingKey: influxdb.SecretField{Key: "-routing-key"}, - }, - }, - { - name: "slack", - expected: &endpoint.Slack{ - Base: endpoint.Base{ - Name: "pd-endpoint", - Description: "desc", - Status: taskmodel.TaskStatusInactive, - }, - URL: "http://example.com", - Token: influxdb.SecretField{Key: "tokne"}, - }, - }, - { - name: "http basic", - expected: &endpoint.HTTP{ - Base: endpoint.Base{ - Name: "pd-endpoint", - Description: "desc", - Status: taskmodel.TaskStatusInactive, - }, - AuthMethod: "basic", - Method: "POST", - URL: "http://example.com", - Password: influxdb.SecretField{Key: "password"}, - Username: influxdb.SecretField{Key: "username"}, - }, - }, - { - name: "http bearer", - expected: &endpoint.HTTP{ - Base: endpoint.Base{ - Name: "pd-endpoint", - Description: "desc", - Status: taskmodel.TaskStatusInactive, - }, - AuthMethod: "bearer", - Method: "GET", - URL: "http://example.com", - Token: influxdb.SecretField{Key: "token"}, - }, - }, - { - name: "http none", - expected: &endpoint.HTTP{ - Base: endpoint.Base{ - Name: "pd-endpoint", - Description: "desc", - Status: taskmodel.TaskStatusInactive, - }, - AuthMethod: "none", - Method: "GET", - URL: "http://example.com", - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - id := platform.ID(1) - tt.expected.SetID(id) - - endpointSVC := mock.NewNotificationEndpointService() - endpointSVC.FindNotificationEndpointByIDF = func(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - if id != tt.expected.GetID() { - return nil, errors.New("uh ohhh, wrong id here: " + id.String()) - } - return tt.expected, nil - } - endpointSVC.FindNotificationEndpointsF = func(ctx context.Context, filter influxdb.NotificationEndpointFilter, _ ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) { - if filter.ID != nil && *filter.ID != tt.expected.GetID() { - return nil, 0, errors.New("uh ohhh, wrong id here: " + filter.ID.String()) - } - return []influxdb.NotificationEndpoint{tt.expected}, 1, nil - } - - svc := newTestService(WithNotificationEndpointSVC(endpointSVC)) - - resToClone := ResourceToClone{ - Kind: KindNotificationEndpoint, - ID: tt.expected.GetID(), - Name: tt.newName, - } - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone)) - require.NoError(t, err) - - newTemplate := encodeAndDecode(t, template) - - endpoints := newTemplate.Summary().NotificationEndpoints - require.Len(t, endpoints, 1) - - actual := endpoints[0].NotificationEndpoint - expectedName := tt.expected.GetName() - if tt.newName != "" { - expectedName = tt.newName - } - assert.Equal(t, expectedName, actual.GetName()) - assert.Equal(t, tt.expected.GetDescription(), actual.GetDescription()) - assert.Equal(t, tt.expected.GetStatus(), actual.GetStatus()) - assert.Equal(t, tt.expected.SecretFields(), actual.SecretFields()) - } - t.Run(tt.name, fn) - } - }) - - knownEndpoints := []influxdb.NotificationEndpoint{ - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: newTestIDPtr(1), - Name: "pd endpoint", - Description: "desc", - Status: taskmodel.TaskStatusActive, - }, - ClientURL: "http://example.com", - RoutingKey: influxdb.SecretField{Key: "-routing-key"}, - }, - &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: newTestIDPtr(2), - Name: "pd-endpoint", - Description: "desc pd", - Status: taskmodel.TaskStatusActive, - }, - ClientURL: "http://example.com", - RoutingKey: influxdb.SecretField{Key: "-routing-key"}, - }, - &endpoint.Slack{ - Base: endpoint.Base{ - ID: newTestIDPtr(3), - Name: "slack endpoint", - Description: "desc slack", - Status: taskmodel.TaskStatusInactive, - }, - URL: "http://example.com", - Token: influxdb.SecretField{Key: "tokne"}, - }, - } - - t.Run("notification endpoints by name", func(t *testing.T) { - tests := []struct { - name string - findName string - findID platform.ID - expected []influxdb.NotificationEndpoint - }{ - { - name: "find notification endpoint with unique name", - findName: "pd endpoint", - expected: []influxdb.NotificationEndpoint{knownEndpoints[0]}, - }, - { - name: "find no notification endpoints", - findName: "fakeEndpoint", - expected: nil, - }, - { - name: "find notification endpoint by id", - findID: platform.ID(2), - expected: []influxdb.NotificationEndpoint{knownEndpoints[1]}, - }, - { - name: "find by id, set new name", - findID: platform.ID(3), - findName: "slack-endpoint", - expected: []influxdb.NotificationEndpoint{knownEndpoints[2]}, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - endpointSVC := mock.NewNotificationEndpointService() - endpointSVC.FindNotificationEndpointByIDF = func(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - for i := range knownEndpoints { - if knownEndpoints[i].GetID() == id { - return knownEndpoints[i], nil - } - } - - return nil, errors.New("uh ohhh, wrong endpoint id here: " + id.String()) - } - endpointSVC.FindNotificationEndpointsF = func(ctx context.Context, filter influxdb.NotificationEndpointFilter, _ ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) { - if filter.ID != nil { - for i := range knownEndpoints { - if knownEndpoints[i].GetID() == *filter.ID { - return []influxdb.NotificationEndpoint{knownEndpoints[i]}, 1, nil - } - } - - return nil, 0, errors.New("uh ohhh, wrong id here: " + filter.ID.String()) - } - - return knownEndpoints, len(knownEndpoints), nil - } - - resToClone := ResourceToClone{ - Kind: KindNotificationEndpoint, - } - if tt.findName != "" { - resToClone.Name = tt.findName - } - if tt.findID != platform.ID(0) { - resToClone.ID = tt.findID - } - - svc := newTestService(WithNotificationEndpointSVC(endpointSVC)) - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone)) - if tt.expected == nil { - require.Error(t, err) - } else { - require.NoError(t, err) - - if tt.findName != "" && tt.findID != platform.ID(0) { - tt.expected[0].SetName(tt.findName) - } - - actual := template.Summary().NotificationEndpoints - require.Len(t, actual, len(tt.expected)) - - for i := range actual { - assert.Equal(t, tt.expected[i].GetName(), actual[i].NotificationEndpoint.GetName()) - assert.Equal(t, tt.expected[i].GetDescription(), actual[i].NotificationEndpoint.GetDescription()) - assert.Equal(t, tt.expected[i].GetStatus(), actual[i].NotificationEndpoint.GetStatus()) - assert.Equal(t, tt.expected[i].SecretFields(), actual[i].NotificationEndpoint.SecretFields()) - } - - assert.True(t, encodeAndDecode(t, template) != nil) - } - } - t.Run(tt.name, fn) - } - }) - - t.Run("notification rules", func(t *testing.T) { - newRuleBase := func(id int) rule.Base { - return rule.Base{ - ID: platform.ID(id), - Name: "old_name", - Description: "desc", - EndpointID: platform.ID(id), - Every: mustDuration(t, time.Hour), - Offset: mustDuration(t, time.Minute), - TagRules: []notification.TagRule{ - {Tag: influxdb.Tag{Key: "k1", Value: "v1"}}, - }, - StatusRules: []notification.StatusRule{ - {CurrentLevel: notification.Ok, PreviousLevel: levelPtr(notification.Warn)}, - {CurrentLevel: notification.Critical}, - }, - } - } - - t.Run("single rule export", func(t *testing.T) { - tests := []struct { - name string - newName string - endpoint influxdb.NotificationEndpoint - rule influxdb.NotificationRule - }{ - { - name: "pager duty", - newName: "pager_duty_name", - endpoint: &endpoint.PagerDuty{ - Base: endpoint.Base{ - ID: newTestIDPtr(13), - Name: "endpoint_0", - Description: "desc", - Status: taskmodel.TaskStatusActive, - }, - ClientURL: "http://example.com", - RoutingKey: influxdb.SecretField{Key: "-routing-key"}, - }, - rule: &rule.PagerDuty{ - Base: newRuleBase(13), - MessageTemplate: "Template", - }, - }, - { - name: "slack", - endpoint: &endpoint.Slack{ - Base: endpoint.Base{ - ID: newTestIDPtr(13), - Name: "endpoint_0", - Description: "desc", - Status: taskmodel.TaskStatusInactive, - }, - URL: "http://example.com", - Token: influxdb.SecretField{Key: "tokne"}, - }, - rule: &rule.Slack{ - Base: newRuleBase(13), - Channel: "abc", - MessageTemplate: "SLACK TEMPlate", - }, - }, - { - name: "http none", - endpoint: &endpoint.HTTP{ - Base: endpoint.Base{ - ID: newTestIDPtr(13), - Name: "endpoint_0", - Description: "desc", - Status: taskmodel.TaskStatusInactive, - }, - AuthMethod: "none", - Method: "GET", - URL: "http://example.com", - }, - rule: &rule.HTTP{ - Base: newRuleBase(13), - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - endpointSVC := mock.NewNotificationEndpointService() - endpointSVC.FindNotificationEndpointByIDF = func(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - if id != tt.endpoint.GetID() { - return nil, errors.New("uh ohhh, wrong id here: " + id.String()) - } - return tt.endpoint, nil - } - ruleSVC := mock.NewNotificationRuleStore() - ruleSVC.FindNotificationRuleByIDF = func(ctx context.Context, id platform.ID) (influxdb.NotificationRule, error) { - return tt.rule, nil - } - ruleSVC.FindNotificationRulesF = func(ctx context.Context, _ influxdb.NotificationRuleFilter, _ ...influxdb.FindOptions) ([]influxdb.NotificationRule, int, error) { - return []influxdb.NotificationRule{tt.rule}, 1, nil - } - - svc := newTestService( - WithNotificationEndpointSVC(endpointSVC), - WithNotificationRuleSVC(ruleSVC), - ) - - resToClone := ResourceToClone{ - Kind: KindNotificationRule, - ID: tt.rule.GetID(), - Name: tt.newName, - } - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone)) - require.NoError(t, err) - - newTemplate := encodeAndDecode(t, template) - - sum := newTemplate.Summary() - require.Len(t, sum.NotificationRules, 1) - - actualRule := sum.NotificationRules[0] - assert.Zero(t, actualRule.ID) - assert.Zero(t, actualRule.EndpointID) - assert.NotEmpty(t, actualRule.EndpointType) - assert.NotEmpty(t, actualRule.EndpointMetaName) - - baseEqual := func(t *testing.T, base rule.Base) { - t.Helper() - expectedName := base.Name - if tt.newName != "" { - expectedName = tt.newName - } - assert.Equal(t, expectedName, actualRule.Name) - assert.Equal(t, base.Description, actualRule.Description) - assert.Equal(t, base.Every.TimeDuration().String(), actualRule.Every) - assert.Equal(t, base.Offset.TimeDuration().String(), actualRule.Offset) - - for _, sRule := range base.StatusRules { - expected := SummaryStatusRule{CurrentLevel: sRule.CurrentLevel.String()} - if sRule.PreviousLevel != nil { - expected.PreviousLevel = sRule.PreviousLevel.String() - } - assert.Contains(t, actualRule.StatusRules, expected) - } - for _, tRule := range base.TagRules { - expected := SummaryTagRule{ - Key: tRule.Key, - Value: tRule.Value, - Operator: tRule.Operator.String(), - } - assert.Contains(t, actualRule.TagRules, expected) - } - } - - switch p := tt.rule.(type) { - case *rule.HTTP: - baseEqual(t, p.Base) - case *rule.PagerDuty: - baseEqual(t, p.Base) - assert.Equal(t, p.MessageTemplate, actualRule.MessageTemplate) - case *rule.Slack: - baseEqual(t, p.Base) - assert.Equal(t, p.MessageTemplate, actualRule.MessageTemplate) - } - - require.Len(t, template.Summary().NotificationEndpoints, 1) - - actualEndpoint := template.Summary().NotificationEndpoints[0].NotificationEndpoint - assert.Equal(t, tt.endpoint.GetName(), actualEndpoint.GetName()) - assert.Equal(t, tt.endpoint.GetDescription(), actualEndpoint.GetDescription()) - assert.Equal(t, tt.endpoint.GetStatus(), actualEndpoint.GetStatus()) - } - t.Run(tt.name, fn) - } - }) - - t.Run("handles rules duplicate names", func(t *testing.T) { - endpointSVC := mock.NewNotificationEndpointService() - endpointSVC.FindNotificationEndpointByIDF = func(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - return &endpoint.HTTP{ - Base: endpoint.Base{ - ID: &id, - Name: "endpoint_0", - Description: "desc", - Status: taskmodel.TaskStatusInactive, - }, - AuthMethod: "none", - Method: "GET", - URL: "http://example.com", - }, nil - } - ruleSVC := mock.NewNotificationRuleStore() - ruleSVC.FindNotificationRuleByIDF = func(ctx context.Context, id platform.ID) (influxdb.NotificationRule, error) { - return &rule.HTTP{ - Base: newRuleBase(int(id)), - }, nil - } - - svc := newTestService( - WithNotificationEndpointSVC(endpointSVC), - WithNotificationRuleSVC(ruleSVC), - ) - - resourcesToClone := []ResourceToClone{ - { - Kind: KindNotificationRule, - ID: 1, - }, - { - Kind: KindNotificationRule, - ID: 2, - }, - } - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resourcesToClone...)) - require.NoError(t, err) - - newTemplate := encodeAndDecode(t, template) - - sum := newTemplate.Summary() - require.Len(t, sum.NotificationRules, len(resourcesToClone)) - - expectedSameEndpointName := sum.NotificationRules[0].EndpointMetaName - assert.NotZero(t, expectedSameEndpointName) - assert.NotEqual(t, "endpoint_0", expectedSameEndpointName) - - for i := range resourcesToClone { - actual := sum.NotificationRules[i] - assert.Equal(t, "old_name", actual.Name) - assert.Equal(t, "desc", actual.Description) - assert.Equal(t, expectedSameEndpointName, actual.EndpointMetaName) - } - - require.Len(t, sum.NotificationEndpoints, 1) - assert.Equal(t, "endpoint_0", sum.NotificationEndpoints[0].NotificationEndpoint.GetName()) - }) - }) - - t.Run("notification rules by name", func(t *testing.T) { - newRuleBase := func(id int, name string) rule.Base { - return rule.Base{ - ID: platform.ID(id), - Name: name, - Description: fmt.Sprintf("desc %d", id), - EndpointID: platform.ID(1), // todo: setting to id as well likely doesn't work due to safeID - Every: mustDuration(t, time.Hour), - Offset: mustDuration(t, time.Minute), - TagRules: []notification.TagRule{ - {Tag: influxdb.Tag{Key: "k1", Value: "v1"}}, - }, - StatusRules: []notification.StatusRule{ - {CurrentLevel: notification.Ok, PreviousLevel: levelPtr(notification.Warn)}, - {CurrentLevel: notification.Critical}, - }, - } - } - - knownRules := []influxdb.NotificationRule{ - &rule.PagerDuty{ - Base: newRuleBase(1, "pd notify"), - MessageTemplate: "Template", - }, - &rule.PagerDuty{ - Base: newRuleBase(2, "pd-notify"), - MessageTemplate: "Template2 ", - }, - &rule.Slack{ - Base: newRuleBase(3, "pd-notify"), - Channel: "abc", - MessageTemplate: "SLACK TEMPlate", - }, - } - - tests := []struct { - name string - findName string - findID platform.ID - expected []influxdb.NotificationRule - }{ - { - name: "find rule with unique name", - findName: "pd notify", - expected: []influxdb.NotificationRule{knownRules[0]}, - }, - { - name: "find multiple rules with shared name", - findName: "pd-notify", - expected: []influxdb.NotificationRule{knownRules[1], knownRules[2]}, - }, - { - name: "find no rules", - findName: "fakeRule", - expected: nil, - }, - { - name: "find rule by id", - findID: platform.ID(2), - expected: []influxdb.NotificationRule{knownRules[1]}, - }, - { - name: "find by id, set new name", - findID: platform.ID(3), - findName: "slack-notify", - expected: []influxdb.NotificationRule{knownRules[2]}, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - endpointSVC := mock.NewNotificationEndpointService() - endpointSVC.FindNotificationEndpointByIDF = func(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - for i := range knownEndpoints { - if knownEndpoints[i].GetID() == id { - return knownEndpoints[i], nil - } - } - - return nil, errors.New("uh ohhh, wrong endpoint id here: " + id.String()) - } - - ruleSVC := mock.NewNotificationRuleStore() - ruleSVC.FindNotificationRuleByIDF = func(ctx context.Context, id platform.ID) (influxdb.NotificationRule, error) { - for i := range knownRules { - if knownRules[i].GetID() == id { - return knownRules[i], nil - } - } - - return nil, errors.New("uh ohhh, wrong rule id here: " + id.String()) - } - ruleSVC.FindNotificationRulesF = func(ctx context.Context, _ influxdb.NotificationRuleFilter, _ ...influxdb.FindOptions) ([]influxdb.NotificationRule, int, error) { - return knownRules, len(knownRules), nil - } - - resToClone := ResourceToClone{ - Kind: KindNotificationRule, - } - if tt.findName != "" { - resToClone.Name = tt.findName - } - if tt.findID != platform.ID(0) { - resToClone.ID = tt.findID - } - - svc := newTestService( - WithNotificationEndpointSVC(endpointSVC), - WithNotificationRuleSVC(ruleSVC), - ) - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone)) - if tt.expected == nil { - require.Error(t, err) - } else { - require.NoError(t, err) - - actual := template.Summary() - require.Len(t, actual.NotificationRules, len(tt.expected)) - require.Len(t, actual.NotificationEndpoints, 1) - sort.Slice(actual.NotificationRules, func(i, j int) bool { - return actual.NotificationRules[i].Description < actual.NotificationRules[j].Description - }) - sort.Slice(actual.NotificationEndpoints, func(i, j int) bool { - return actual.NotificationEndpoints[i].NotificationEndpoint.GetDescription() < actual.NotificationEndpoints[j].NotificationEndpoint.GetDescription() - }) - - if tt.findName != "" && tt.findID != platform.ID(0) { - tt.expected[0].SetName(tt.findName) - } - - for i := range actual.NotificationRules { - assert.Zero(t, actual.NotificationRules[i].ID) - assert.Zero(t, actual.NotificationRules[i].EndpointID) - assert.NotEmpty(t, actual.NotificationRules[i].EndpointType) - assert.NotEmpty(t, actual.NotificationRules[i].EndpointMetaName) - - baseEqual := func(t *testing.T, base rule.Base) { - assert.Equal(t, base.Name, actual.NotificationRules[i].Name) - assert.Equal(t, base.Description, actual.NotificationRules[i].Description) - assert.Equal(t, base.Every.TimeDuration().String(), actual.NotificationRules[i].Every) - assert.Equal(t, base.Offset.TimeDuration().String(), actual.NotificationRules[i].Offset) - - for _, sRule := range base.StatusRules { - expected := SummaryStatusRule{CurrentLevel: sRule.CurrentLevel.String()} - if sRule.PreviousLevel != nil { - expected.PreviousLevel = sRule.PreviousLevel.String() - } - assert.Contains(t, actual.NotificationRules[i].StatusRules, expected) - } - for _, tRule := range base.TagRules { - expected := SummaryTagRule{ - Key: tRule.Key, - Value: tRule.Value, - Operator: tRule.Operator.String(), - } - assert.Contains(t, actual.NotificationRules[i].TagRules, expected) - } - } - - switch p := tt.expected[i].(type) { - case *rule.HTTP: - baseEqual(t, p.Base) - case *rule.PagerDuty: - baseEqual(t, p.Base) - assert.Equal(t, p.MessageTemplate, actual.NotificationRules[i].MessageTemplate) - case *rule.Slack: - baseEqual(t, p.Base) - assert.Equal(t, p.MessageTemplate, actual.NotificationRules[i].MessageTemplate) - } - - for j := range actual.NotificationEndpoints { - endpoint, err := endpointSVC.FindNotificationEndpointByIDF(context.Background(), tt.expected[i].GetEndpointID()) - require.NoError(t, err) - - assert.Equal(t, endpoint.GetName(), actual.NotificationEndpoints[j].NotificationEndpoint.GetName()) - assert.Equal(t, endpoint.GetDescription(), actual.NotificationEndpoints[j].NotificationEndpoint.GetDescription()) - assert.Equal(t, endpoint.GetStatus(), actual.NotificationEndpoints[j].NotificationEndpoint.GetStatus()) - } - } - - assert.True(t, encodeAndDecode(t, template) != nil) - } - } - t.Run(tt.name, fn) - } - }) - - t.Run("tasks", func(t *testing.T) { - t.Run("single task exports", func(t *testing.T) { - tests := []struct { - name string - newName string - task taskmodel.Task - }{ - { - name: "every offset is set", - newName: "new name", - task: taskmodel.Task{ - ID: 1, - Name: "name_9000", - Every: time.Minute.String(), - Offset: 10 * time.Second, - Type: taskmodel.TaskSystemType, - Flux: `option task = { name: "larry" } from(bucket: "rucket") |> yield()`, - }, - }, - { - name: "cron is set", - task: taskmodel.Task{ - ID: 1, - Name: "name_0", - Cron: "2 * * * *", - Type: taskmodel.TaskSystemType, - Flux: `option task = { name: "larry" } from(bucket: "rucket") |> yield()`, - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - taskSVC := mock.NewTaskService() - taskSVC.FindTaskByIDFn = func(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - if id != tt.task.ID { - return nil, errors.New("wrong id provided: " + id.String()) - } - return &tt.task, nil - } - taskSVC.FindTasksFn = func(ctx context.Context, filter taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { - return []*taskmodel.Task{&tt.task}, 1, nil - } - - svc := newTestService(WithTaskSVC(taskSVC)) - - resToClone := ResourceToClone{ - Kind: KindTask, - ID: tt.task.ID, - Name: tt.newName, - } - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone)) - require.NoError(t, err) - - newTemplate := encodeAndDecode(t, template) - - sum := newTemplate.Summary() - - tasks := sum.Tasks - require.Len(t, tasks, 1) - - expectedName := tt.task.Name - if tt.newName != "" { - expectedName = tt.newName - } - actual := tasks[0] - assert.Equal(t, expectedName, actual.Name) - assert.Equal(t, tt.task.Cron, actual.Cron) - assert.Equal(t, tt.task.Description, actual.Description) - assert.Equal(t, tt.task.Every, actual.Every) - assert.Equal(t, durToStr(tt.task.Offset), actual.Offset) - - expectedQuery := `from(bucket: "rucket") |> yield()` - assert.Equal(t, expectedQuery, actual.Query) - } - t.Run(tt.name, fn) - } - }) - - t.Run("handles multiple tasks of same name", func(t *testing.T) { - taskSVC := mock.NewTaskService() - taskSVC.FindTaskByIDFn = func(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - return &taskmodel.Task{ - ID: id, - Type: taskmodel.TaskSystemType, - Name: "same name", - Description: "desc", - Status: taskmodel.TaskStatusActive, - Flux: `from(bucket: "foo")`, - Every: "5m0s", - }, nil - } - - svc := newTestService(WithTaskSVC(taskSVC)) - - resourcesToClone := []ResourceToClone{ - { - Kind: KindTask, - ID: 1, - }, - { - Kind: KindTask, - ID: 2, - }, - } - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resourcesToClone...)) - require.NoError(t, err) - - newTemplate := encodeAndDecode(t, template) - - sum := newTemplate.Summary() - - tasks := sum.Tasks - require.Len(t, tasks, len(resourcesToClone)) - - for _, actual := range sum.Tasks { - assert.Equal(t, "same name", actual.Name) - assert.Equal(t, "desc", actual.Description) - assert.Equal(t, influxdb.Active, actual.Status) - assert.Equal(t, `from(bucket: "foo")`, actual.Query) - assert.Equal(t, "5m0s", actual.Every) - } - }) - }) - - t.Run("tasks by name", func(t *testing.T) { - knownTasks := []*taskmodel.Task{ - { - ID: 1, - Name: "task", - Description: "task 1", - Every: time.Minute.String(), - Offset: 10 * time.Second, - Type: taskmodel.TaskSystemType, - Flux: `option task = { name: "larry" } from(bucket: "rucket") |> yield()`, - }, - { - ID: 2, - Name: "taskCopy", - Description: "task 2", - Cron: "2 * * * *", - Type: taskmodel.TaskSystemType, - Flux: `option task = { name: "curly" } from(bucket: "rucket") |> yield()`, - }, - { - ID: 3, - Name: "taskCopy", - Description: "task 3", - Cron: "2 3 4 5 *", - Type: taskmodel.TaskSystemType, - Flux: `option task = { name: "moe" } from(bucket: "rucket") |> yield()`, - }, - } - - tests := []struct { - name string - findName string - findID platform.ID - expected []*taskmodel.Task - }{ - { - name: "find task with unique name", - findName: "task", - expected: []*taskmodel.Task{knownTasks[0]}, - }, - { - name: "find multiple tasks with shared name", - findName: "taskCopy", - expected: []*taskmodel.Task{knownTasks[1], knownTasks[2]}, - }, - { - name: "find no tasks", - findName: "faketask", - expected: nil, - }, - { - name: "find task by id", - findID: platform.ID(2), - expected: []*taskmodel.Task{knownTasks[1]}, - }, - { - name: "find by id, set new name", - findID: platform.ID(2), - findName: "renamedTask", - expected: []*taskmodel.Task{knownTasks[1]}, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - taskSVC := mock.NewTaskService() - taskSVC.FindTaskByIDFn = func(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - for i := range knownTasks { - if knownTasks[i].ID == id { - return knownTasks[i], nil - } - } - - return nil, errors.New("wrong id provided: " + id.String()) - } - taskSVC.FindTasksFn = func(ctx context.Context, filter taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { - tasks := []*taskmodel.Task{} - for i := range knownTasks { - if knownTasks[i].Name == *filter.Name { - tasks = append(tasks, knownTasks[i]) - } - } - return tasks, len(tasks), nil - } - - resToClone := ResourceToClone{ - Kind: KindTask, - } - if tt.findName != "" { - resToClone.Name = tt.findName - } - if tt.findID != platform.ID(0) { - resToClone.ID = tt.findID - } - - svc := newTestService(WithTaskSVC(taskSVC)) - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone)) - if tt.expected == nil { - require.Error(t, err) - } else { - require.NoError(t, err) - - if tt.findName != "" && tt.findID != platform.ID(0) { - tt.expected[0].Name = tt.findName - } - - actual := template.Summary().Tasks - require.Len(t, actual, len(tt.expected)) - sort.Slice(actual, func(i, j int) bool { - return actual[i].Description < actual[j].Description - }) - - for i := range actual { - assert.Equal(t, tt.expected[i].Name, actual[i].Name) - assert.Equal(t, tt.expected[i].Cron, actual[i].Cron) - assert.Equal(t, tt.expected[i].Description, actual[i].Description) - assert.Equal(t, tt.expected[i].Every, actual[i].Every) - assert.Equal(t, durToStr(tt.expected[i].Offset), actual[i].Offset) - - assert.Equal(t, `from(bucket: "rucket") |> yield()`, actual[i].Query) - } - assert.True(t, encodeAndDecode(t, template) != nil) - } - } - t.Run(tt.name, fn) - } - }) - - t.Run("telegraf configs", func(t *testing.T) { - t.Run("allows for duplicate telegraf names to be exported", func(t *testing.T) { - tConfig := &influxdb.TelegrafConfig{ - OrgID: 9000, - Name: "same name", - Description: "desc", - Config: "some config string", - } - teleStore := mock.NewTelegrafConfigStore() - teleStore.FindTelegrafConfigByIDF = func(ctx context.Context, id platform.ID) (*influxdb.TelegrafConfig, error) { - tConfig.ID = id - return tConfig, nil - } - - svc := newTestService(WithTelegrafSVC(teleStore)) - - resourcesToClone := []ResourceToClone{ - { - Kind: KindTelegraf, - ID: 1, - }, - { - Kind: KindTelegraf, - ID: 2, - }, - } - - teleStore.FindTelegrafConfigsF = func(ctx context.Context, filter influxdb.TelegrafConfigFilter, _ ...influxdb.FindOptions) ([]*influxdb.TelegrafConfig, int, error) { - tgrafs := []*influxdb.TelegrafConfig{} - for _, r := range resourcesToClone { - t := tConfig - t.ID = r.ID - tgrafs = append(tgrafs, t) - } - return tgrafs, len(tgrafs), nil - } - - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resourcesToClone...)) - require.NoError(t, err) - - newTemplate := encodeAndDecode(t, template) - - sum := newTemplate.Summary() - - teles := sum.TelegrafConfigs - sort.Slice(teles, func(i, j int) bool { - return teles[i].TelegrafConfig.Name < teles[j].TelegrafConfig.Name - }) - require.Len(t, teles, len(resourcesToClone)) - - for i := range resourcesToClone { - actual := teles[i] - assert.Equal(t, "same name", actual.TelegrafConfig.Name) - assert.Equal(t, "desc", actual.TelegrafConfig.Description) - assert.Equal(t, "some config string", actual.TelegrafConfig.Config) - } - }) - }) - - t.Run("telegraf configs by name", func(t *testing.T) { - knownConfigs := []*influxdb.TelegrafConfig{ - { - ID: 1, - OrgID: 9000, - Name: "my config", - Description: "desc1", - Config: "a config string", - }, - { - ID: 2, - OrgID: 9000, - Name: "telConfig", - Description: "desc2", - Config: "some config string", - }, - { - ID: 3, - OrgID: 9000, - Name: "telConfig", - Description: "desc3", - Config: "some other config string", - }, - } - - tests := []struct { - name string - findName string - findID platform.ID - expected []*influxdb.TelegrafConfig - }{ - { - name: "find telegraf with unique name", - findName: "my config", - expected: []*influxdb.TelegrafConfig{knownConfigs[0]}, - }, - { - name: "find multiple telegrafs with shared name", - findName: "telConfig", - expected: []*influxdb.TelegrafConfig{knownConfigs[1], knownConfigs[2]}, - }, - { - name: "find no telegrafs", - findName: "fakeConfig", - expected: nil, - }, - { - name: "find telegraf by id", - findID: platform.ID(2), - expected: []*influxdb.TelegrafConfig{knownConfigs[1]}, - }, - { - name: "find by id, set new name", - findID: platform.ID(2), - findName: "newConfig", - expected: []*influxdb.TelegrafConfig{knownConfigs[1]}, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - teleStore := mock.NewTelegrafConfigStore() - teleStore.FindTelegrafConfigByIDF = func(_ context.Context, id platform.ID) (*influxdb.TelegrafConfig, error) { - for i := range knownConfigs { - if knownConfigs[i].ID == id { - return knownConfigs[i], nil - } - } - return nil, errors.New("uh ohhh, wrong id here: " + id.String()) - } - teleStore.FindTelegrafConfigsF = func(_ context.Context, filter influxdb.TelegrafConfigFilter, _ ...influxdb.FindOptions) ([]*influxdb.TelegrafConfig, int, error) { - return knownConfigs, len(knownConfigs), nil - } - - resToClone := ResourceToClone{ - Kind: KindTelegraf, - } - if tt.findName != "" { - resToClone.Name = tt.findName - } - if tt.findID != platform.ID(0) { - resToClone.ID = tt.findID - } - - svc := newTestService(WithTelegrafSVC(teleStore)) - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone)) - if tt.expected == nil { - require.Error(t, err) - } else { - require.NoError(t, err) - - if tt.findName != "" && tt.findID != platform.ID(0) { - tt.expected[0].Name = tt.findName - } - - actual := template.Summary().TelegrafConfigs - require.Len(t, actual, len(tt.expected)) - sort.Slice(actual, func(i, j int) bool { - return actual[i].TelegrafConfig.Description < actual[j].TelegrafConfig.Description - }) - - for i := range actual { - assert.Equal(t, tt.expected[i].Name, actual[i].TelegrafConfig.Name) - assert.Equal(t, tt.expected[i].Description, actual[i].TelegrafConfig.Description) - assert.Equal(t, tt.expected[i].Config, actual[i].TelegrafConfig.Config) - } - - assert.True(t, encodeAndDecode(t, template) != nil) - } - } - t.Run(tt.name, fn) - } - }) - - t.Run("variable", func(t *testing.T) { - tests := []struct { - name string - newName string - expectedVar influxdb.Variable - }{ - { - name: "without new name", - expectedVar: influxdb.Variable{ - ID: 1, - Name: "old name", - Description: "desc", - Selected: []string{"val"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"val"}, - }, - }, - }, - { - name: "with new name", - newName: "new name", - expectedVar: influxdb.Variable{ - ID: 1, - Name: "old name", - Selected: []string{"val"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"val"}, - }, - }, - }, - { - name: "with map arg", - expectedVar: influxdb.Variable{ - ID: 1, - Name: "old name", - Selected: []string{"v"}, - Arguments: &influxdb.VariableArguments{ - Type: "map", - Values: influxdb.VariableMapValues{"k": "v"}, - }, - }, - }, - { - name: "with query arg", - expectedVar: influxdb.Variable{ - ID: 1, - Name: "old name", - Selected: []string{"bucket-foo"}, - Arguments: &influxdb.VariableArguments{ - Type: "query", - Values: influxdb.VariableQueryValues{ - Query: "buckets()", - Language: "flux", - }, - }, - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - varSVC := mock.NewVariableService() - varSVC.FindVariableByIDF = func(_ context.Context, id platform.ID) (*influxdb.Variable, error) { - if id != tt.expectedVar.ID { - return nil, errors.New("uh ohhh, wrong id here: " + id.String()) - } - return &tt.expectedVar, nil - } - varSVC.FindVariablesF = func(_ context.Context, filter influxdb.VariableFilter, _ ...influxdb.FindOptions) ([]*influxdb.Variable, error) { - if filter.ID != nil && *filter.ID != tt.expectedVar.ID { - return nil, errors.New("uh ohhh, wrong id here: " + fmt.Sprint(*filter.ID)) - } - return []*influxdb.Variable{&tt.expectedVar}, nil - } - - svc := newTestService(WithVariableSVC(varSVC), WithLabelSVC(mock.NewLabelService())) - - resToClone := ResourceToClone{ - Kind: KindVariable, - ID: tt.expectedVar.ID, - Name: tt.newName, - } - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone)) - require.NoError(t, err) - - newTemplate := encodeAndDecode(t, template) - - newVars := newTemplate.Summary().Variables - require.Len(t, newVars, 1) - - actual := newVars[0] - expectedName := tt.expectedVar.Name - if tt.newName != "" { - expectedName = tt.newName - } - assert.Equal(t, expectedName, actual.Name) - assert.Equal(t, tt.expectedVar.Description, actual.Description) - assert.Equal(t, tt.expectedVar.Selected, actual.Selected) - assert.Equal(t, tt.expectedVar.Arguments, actual.Arguments) - } - t.Run(tt.name, fn) - } - }) - - t.Run("variable by name", func(t *testing.T) { - knownVariables := []*influxdb.Variable{ - { - ID: 1, - Name: "variable", - Description: "desc", - Selected: []string{"val1"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"val"}, - }, - }, - { - ID: 2, - Name: "var 2", - Selected: []string{"val2"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"val"}, - }, - }, - { - ID: 3, - Name: "var 3", - Selected: []string{"v"}, - Arguments: &influxdb.VariableArguments{ - Type: "map", - Values: influxdb.VariableMapValues{"k": "v"}, - }, - }, - } - - tests := []struct { - name string - findName string - findID platform.ID - expected []*influxdb.Variable - }{ - { - name: "find variable with unique name", - findName: "variable", - expected: []*influxdb.Variable{knownVariables[0]}, - }, - { - name: "find no variables", - findName: "fakeVariable", - expected: nil, - }, - { - name: "find variable by id", - findID: platform.ID(2), - expected: []*influxdb.Variable{knownVariables[1]}, - }, - { - name: "find by id, set new name", - findID: platform.ID(2), - findName: "useful var", - expected: []*influxdb.Variable{knownVariables[1]}, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - varSVC := mock.NewVariableService() - varSVC.FindVariableByIDF = func(_ context.Context, id platform.ID) (*influxdb.Variable, error) { - for i := range knownVariables { - if knownVariables[i].ID == id { - return knownVariables[i], nil - } - } - - return nil, errors.New("uh ohhh, wrong id here: " + id.String()) - } - varSVC.FindVariablesF = func(_ context.Context, filter influxdb.VariableFilter, _ ...influxdb.FindOptions) ([]*influxdb.Variable, error) { - return knownVariables, nil - } - - resToClone := ResourceToClone{ - Kind: KindVariable, - } - if tt.findName != "" { - resToClone.Name = tt.findName - } - if tt.findID != platform.ID(0) { - resToClone.ID = tt.findID - } - - svc := newTestService(WithVariableSVC(varSVC), WithLabelSVC(mock.NewLabelService())) - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone)) - if tt.expected == nil { - require.Error(t, err) - } else { - require.NoError(t, err) - - if tt.findName != "" && tt.findID != platform.ID(0) { - tt.expected[0].Name = tt.findName - } - - actual := template.Summary().Variables - require.Len(t, actual, len(tt.expected)) - - for i := range actual { - assert.Equal(t, tt.expected[i].Name, actual[i].Name) - assert.Equal(t, tt.expected[i].Description, actual[i].Description) - assert.Equal(t, tt.expected[i].Arguments, actual[i].Arguments) - } - - assert.True(t, encodeAndDecode(t, template) != nil) - } - } - t.Run(tt.name, fn) - } - }) - - t.Run("includes resource associations", func(t *testing.T) { - t.Run("single resource with single association", func(t *testing.T) { - expected := &influxdb.Bucket{ - ID: 3, - Name: "bucket name", - Description: "desc", - RetentionPeriod: time.Hour, - } - - bktSVC := mock.NewBucketService() - bktSVC.FindBucketByIDFn = func(_ context.Context, id platform.ID) (*influxdb.Bucket, error) { - if id != expected.ID { - return nil, errors.New("uh ohhh, wrong id here: " + id.String()) - } - return expected, nil - } - bktSVC.FindBucketsFn = func(_ context.Context, f influxdb.BucketFilter, opts ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - if f.ID != nil && *f.ID != expected.ID { - return nil, 0, errors.New("not suppose to get here") - } - return []*influxdb.Bucket{expected}, 1, nil - } - - labelSVC := mock.NewLabelService() - labelSVC.FindResourceLabelsFn = func(_ context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - if f.ResourceID != expected.ID { - return nil, errors.New("uh ohs wrong id: " + f.ResourceID.String()) - } - return []*influxdb.Label{ - {Name: "label_1"}, - }, nil - } - - svc := newTestService(WithBucketSVC(bktSVC), WithLabelSVC(labelSVC)) - - resToClone := ResourceToClone{ - Kind: KindBucket, - ID: expected.ID, - } - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone)) - require.NoError(t, err) - - newTemplate := encodeAndDecode(t, template) - sum := newTemplate.Summary() - - bkts := sum.Buckets - require.Len(t, bkts, 1) - - actual := bkts[0] - expectedName := expected.Name - assert.Equal(t, expectedName, actual.Name) - assert.Equal(t, expected.Description, actual.Description) - assert.Equal(t, expected.RetentionPeriod, actual.RetentionPeriod) - require.Len(t, actual.LabelAssociations, 1) - assert.Equal(t, "label_1", actual.LabelAssociations[0].Name) - - labels := sum.Labels - require.Len(t, labels, 1) - assert.Equal(t, "label_1", labels[0].Name) - }) - - t.Run("multiple resources with same associations", func(t *testing.T) { - bktSVC := mock.NewBucketService() - bktSVC.FindBucketByIDFn = func(_ context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ID: id, Name: strconv.Itoa(int(id))}, nil - } - - labelSVC := mock.NewLabelService() - labelSVC.FindResourceLabelsFn = func(_ context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - return []*influxdb.Label{ - {Name: "label_1"}, - {Name: "label_2"}, - }, nil - } - - svc := newTestService(WithBucketSVC(bktSVC), WithLabelSVC(labelSVC)) - - resourcesToClone := []ResourceToClone{ - { - Kind: KindBucket, - ID: 10, - }, - { - Kind: KindBucket, - ID: 20, - }, - } - - bktSVC.FindBucketsFn = func(_ context.Context, f influxdb.BucketFilter, opts ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - bkts := []*influxdb.Bucket{} - for _, r := range resourcesToClone { - bkts = append(bkts, &influxdb.Bucket{ID: r.ID, Name: strconv.Itoa(int(r.ID)), Type: influxdb.BucketTypeUser}) - } - return bkts, len(bkts), nil - } - - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resourcesToClone...)) - require.NoError(t, err) - - newTemplate := encodeAndDecode(t, template) - sum := newTemplate.Summary() - - bkts := sum.Buckets - sort.Slice(bkts, func(i, j int) bool { - return bkts[i].Name < bkts[j].Name - }) - require.Len(t, bkts, 2) - - for i, actual := range bkts { - sortLabelsByName(actual.LabelAssociations) - assert.Equal(t, strconv.Itoa((i+1)*10), actual.Name) - require.Len(t, actual.LabelAssociations, 2) - assert.Equal(t, "label_1", actual.LabelAssociations[0].Name) - assert.Equal(t, "label_2", actual.LabelAssociations[1].Name) - } - - labels := sum.Labels - sortLabelsByName(labels) - require.Len(t, labels, 2) - assert.Equal(t, "label_1", labels[0].Name) - assert.Equal(t, "label_2", labels[1].Name) - }) - - t.Run("labels do not fetch associations", func(t *testing.T) { - labelSVC := mock.NewLabelService() - labelSVC.FindLabelByIDFn = func(_ context.Context, id platform.ID) (*influxdb.Label, error) { - return &influxdb.Label{ID: id, Name: "label_1"}, nil - } - labelSVC.FindResourceLabelsFn = func(_ context.Context, f influxdb.LabelMappingFilter) ([]*influxdb.Label, error) { - return nil, errors.New("should not get here") - } - - svc := newTestService(WithLabelSVC(labelSVC)) - - resToClone := ResourceToClone{ - Kind: KindLabel, - ID: 1, - } - template, err := svc.Export(context.TODO(), ExportWithExistingResources(resToClone)) - require.NoError(t, err) - - newTemplate := encodeAndDecode(t, template) - - labels := newTemplate.Summary().Labels - require.Len(t, labels, 1) - assert.Equal(t, "label_1", labels[0].Name) - }) - }) - }) - - t.Run("with org id", func(t *testing.T) { - orgID := platform.ID(9000) - bkt := &influxdb.Bucket{ID: 1, Name: "bucket"} - - bktSVC := mock.NewBucketService() - bktSVC.FindBucketsFn = func(_ context.Context, f influxdb.BucketFilter, opts ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - if (f.ID != nil && *f.ID != bkt.ID) && (f.OrganizationID == nil || *f.OrganizationID != orgID) { - return nil, 0, errors.New("not suppose to get here") - } - return []*influxdb.Bucket{bkt}, 1, nil - } - bktSVC.FindBucketByIDFn = func(_ context.Context, id platform.ID) (*influxdb.Bucket, error) { - if id != 1 { - return nil, errors.New("wrong id") - } - return bkt, nil - } - - checkSVC := mock.NewCheckService() - expectedCheck := &icheck.Deadman{ - Base: newThresholdBase(1), - TimeSince: mustDuration(t, time.Hour), - StaleTime: mustDuration(t, 5*time.Hour), - ReportZero: true, - Level: notification.Critical, - } - checkSVC.FindChecksFn = func(ctx context.Context, f influxdb.CheckFilter, _ ...influxdb.FindOptions) ([]influxdb.Check, int, error) { - if (f.ID != nil && *f.ID != expectedCheck.GetID()) && (f.OrgID == nil || *f.OrgID != orgID) { - return nil, 0, errors.New("not suppose to get here") - } - return []influxdb.Check{expectedCheck}, 1, nil - } - checkSVC.FindCheckByIDFn = func(ctx context.Context, id platform.ID) (influxdb.Check, error) { - return expectedCheck, nil - } - - dash := &influxdb.Dashboard{ - ID: 2, - Name: "dashboard", - Cells: []*influxdb.Cell{}, - } - dashSVC := mock.NewDashboardService() - dashSVC.FindDashboardsF = func(_ context.Context, f influxdb.DashboardFilter, _ influxdb.FindOptions) ([]*influxdb.Dashboard, int, error) { - if (f.IDs != nil && len(f.IDs) > 0 && - f.IDs[0] != nil && *f.IDs[0] != dash.ID) && - (f.OrganizationID == nil || *f.OrganizationID != orgID) { - return nil, 0, errors.New("not suppose to get here") - } - return []*influxdb.Dashboard{dash}, 1, nil - } - dashSVC.FindDashboardByIDF = func(_ context.Context, id platform.ID) (*influxdb.Dashboard, error) { - if id != 2 { - return nil, errors.New("wrong id") - } - return dash, nil - } - - notificationEndpoint := &endpoint.HTTP{ - Base: endpoint.Base{ - ID: newTestIDPtr(2), - Name: "http", - }, - URL: "http://example.com/id", - Username: influxdb.SecretField{Key: "2-username"}, - Password: influxdb.SecretField{Key: "2-password"}, - AuthMethod: "basic", - Method: "POST", - } - endpointSVC := mock.NewNotificationEndpointService() - endpointSVC.FindNotificationEndpointsF = func(ctx context.Context, f influxdb.NotificationEndpointFilter, _ ...influxdb.FindOptions) ([]influxdb.NotificationEndpoint, int, error) { - return []influxdb.NotificationEndpoint{notificationEndpoint}, 1, nil - } - endpointSVC.FindNotificationEndpointByIDF = func(ctx context.Context, id platform.ID) (influxdb.NotificationEndpoint, error) { - return notificationEndpoint, nil - } - - expectedRule := &rule.HTTP{ - Base: rule.Base{ - ID: 12, - Name: "rule_0", - EndpointID: 2, - Every: mustDuration(t, time.Minute), - StatusRules: []notification.StatusRule{{CurrentLevel: notification.Critical}}, - }, - } - ruleSVC := mock.NewNotificationRuleStore() - ruleSVC.FindNotificationRulesF = func(ctx context.Context, f influxdb.NotificationRuleFilter, _ ...influxdb.FindOptions) ([]influxdb.NotificationRule, int, error) { - out := []influxdb.NotificationRule{expectedRule} - return out, len(out), nil - } - ruleSVC.FindNotificationRuleByIDF = func(ctx context.Context, id platform.ID) (influxdb.NotificationRule, error) { - return expectedRule, nil - } - - labelSVC := mock.NewLabelService() - labelSVC.FindLabelsFn = func(_ context.Context, f influxdb.LabelFilter) ([]*influxdb.Label, error) { - if f.OrgID == nil || *f.OrgID != orgID { - return nil, errors.New("not suppose to get here") - } - return []*influxdb.Label{{ID: 3, Name: "label"}}, nil - } - labelSVC.FindLabelByIDFn = func(_ context.Context, id platform.ID) (*influxdb.Label, error) { - if id != 3 { - return nil, errors.New("wrong id") - } - return &influxdb.Label{ID: 3, Name: "label"}, nil - } - - taskSVC := mock.NewTaskService() - taskSVC.FindTasksFn = func(ctx context.Context, f taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { - if f.After != nil { - return nil, 0, nil - } - return []*taskmodel.Task{ - {ID: 31, Type: taskmodel.TaskSystemType}, - {ID: expectedCheck.TaskID, Type: taskmodel.TaskSystemType}, // this one should be ignored in the return - {ID: expectedRule.TaskID, Type: taskmodel.TaskSystemType}, // this one should be ignored in the return as well - {ID: 99}, // this one should be skipped since it is not a system task - }, 3, nil - } - taskSVC.FindTaskByIDFn = func(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - if id != 31 { - return nil, errors.New("wrong id: " + id.String()) - } - return &taskmodel.Task{ - ID: id, - Name: "task_0", - Every: time.Minute.String(), - Offset: 10 * time.Second, - Type: taskmodel.TaskSystemType, - Flux: `option task = { name: "larry" } from(bucket: "rucket") |> yield()`, - }, nil - } - - varSVC := mock.NewVariableService() - varSVC.FindVariablesF = func(_ context.Context, f influxdb.VariableFilter, _ ...influxdb.FindOptions) ([]*influxdb.Variable, error) { - if f.OrganizationID == nil || *f.OrganizationID != orgID { - return nil, errors.New("not suppose to get here") - } - return []*influxdb.Variable{{ID: 4, Name: "variable"}}, nil - } - varSVC.FindVariableByIDF = func(_ context.Context, id platform.ID) (*influxdb.Variable, error) { - if id != 4 { - return nil, errors.New("wrong id") - } - return &influxdb.Variable{ID: 4, Name: "variable"}, nil - } - - svc := newTestService( - WithBucketSVC(bktSVC), - WithCheckSVC(checkSVC), - WithDashboardSVC(dashSVC), - WithLabelSVC(labelSVC), - WithNotificationEndpointSVC(endpointSVC), - WithNotificationRuleSVC(ruleSVC), - WithTaskSVC(taskSVC), - WithVariableSVC(varSVC), - ) - - template, err := svc.Export( - context.TODO(), - ExportWithAllOrgResources(ExportByOrgIDOpt{ - OrgID: orgID, - }), - ) - require.NoError(t, err) - - summary := template.Summary() - bkts := summary.Buckets - require.Len(t, bkts, 1) - assert.Equal(t, "bucket", bkts[0].Name) - - checks := summary.Checks - require.Len(t, checks, 1) - assert.Equal(t, expectedCheck.Name, checks[0].Check.GetName()) - - dashs := summary.Dashboards - require.Len(t, dashs, 1) - assert.Equal(t, "dashboard", dashs[0].Name) - - labels := summary.Labels - require.Len(t, labels, 1) - assert.Equal(t, "label", labels[0].Name) - - endpoints := summary.NotificationEndpoints - require.Len(t, endpoints, 1) - assert.Equal(t, "http", endpoints[0].NotificationEndpoint.GetName()) - - rules := summary.NotificationRules - require.Len(t, rules, 1) - assert.Equal(t, expectedRule.Name, rules[0].Name) - assert.NotEmpty(t, rules[0].EndpointMetaName) - - require.Len(t, summary.Tasks, 1) - task1 := summary.Tasks[0] - assert.Equal(t, "task_0", task1.Name) - - vars := summary.Variables - require.Len(t, vars, 1) - assert.Equal(t, "variable", vars[0].Name) - }) - }) - - t.Run("InitStack", func(t *testing.T) { - safeCreateFn := func(ctx context.Context, stack Stack) error { - return nil - } - - type createFn func(ctx context.Context, stack Stack) error - - newFakeStore := func(fn createFn) *fakeStore { - return &fakeStore{ - createFn: fn, - } - } - - now := time.Time{}.Add(10 * 24 * time.Hour) - - t.Run("when store call is successful", func(t *testing.T) { - svc := newTestService( - WithIDGenerator(newFakeIDGen(3)), - WithTimeGenerator(newTimeGen(now)), - WithStore(newFakeStore(safeCreateFn)), - ) - - stack, err := svc.InitStack(context.Background(), 9000, StackCreate{OrgID: 3333}) - require.NoError(t, err) - - assert.Equal(t, platform.ID(3), stack.ID) - assert.Equal(t, now, stack.CreatedAt) - assert.Equal(t, now, stack.LatestEvent().UpdatedAt) - }) - - t.Run("handles unexpected error paths", func(t *testing.T) { - tests := []struct { - name string - expectedErrCode string - store func() *fakeStore - orgSVC func() influxdb.OrganizationService - }{ - { - name: "unexpected store err", - expectedErrCode: errors2.EInternal, - store: func() *fakeStore { - return newFakeStore(func(ctx context.Context, stack Stack) error { - return errors.New("unexpected error") - }) - }, - }, - { - name: "unexpected conflict store err", - expectedErrCode: errors2.EInternal, - store: func() *fakeStore { - return newFakeStore(func(ctx context.Context, stack Stack) error { - return &errors2.Error{Code: errors2.EConflict} - }) - }, - }, - { - name: "org does not exist produces conflict error", - expectedErrCode: errors2.EConflict, - store: func() *fakeStore { - return newFakeStore(safeCreateFn) - }, - orgSVC: func() influxdb.OrganizationService { - orgSVC := mock.NewOrganizationService() - orgSVC.FindOrganizationByIDF = func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return nil, &errors2.Error{Code: errors2.ENotFound} - } - return orgSVC - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - var orgSVC influxdb.OrganizationService = mock.NewOrganizationService() - if tt.orgSVC != nil { - orgSVC = tt.orgSVC() - } - - svc := newTestService( - WithIDGenerator(newFakeIDGen(3)), - WithTimeGenerator(newTimeGen(now)), - WithStore(tt.store()), - WithOrganizationService(orgSVC), - ) - - _, err := svc.InitStack(context.Background(), 9000, StackCreate{OrgID: 3333}) - require.Error(t, err) - assert.Equal(t, tt.expectedErrCode, errors2.ErrorCode(err)) - } - t.Run(tt.name, fn) - } - }) - - t.Run("jsonnet template url", func(t *testing.T) { - tests := []struct { - name string - create StackCreate - expectedErrCode string - }{ - // always valid - { - name: "no templates", - create: StackCreate{OrgID: 3333}, - }, - { - name: "one json template", - create: StackCreate{ - OrgID: 3333, - TemplateURLs: []string{"http://fake/some.json"}, - }, - }, - { - name: "one yaml template", - create: StackCreate{ - OrgID: 3333, - TemplateURLs: []string{"http://fake/some.yaml"}, - }, - }, - { - name: "multiple templates", - create: StackCreate{ - OrgID: 3333, - TemplateURLs: []string{ - "http://fake/some.yaml", - "http://fake/some.json", - "http://fake/other.yaml", - }, - }, - }, - // invalid - { - name: "one jsonnet template", - create: StackCreate{ - OrgID: 3333, - TemplateURLs: []string{"http://fake/some.jsonnet"}, - }, - expectedErrCode: "unprocessable entity", - }, - { - name: "multiple with one jsonnet template", - create: StackCreate{ - OrgID: 3333, - TemplateURLs: []string{ - "http://fake/some.json", - "http://fake/some.jsonnet", - "http://fake/some.yaml", - }, - }, - expectedErrCode: "unprocessable entity", - }, - { - name: "one weird jsonnet template", - create: StackCreate{ - OrgID: 3333, - TemplateURLs: []string{"http://fake/some.%6asonnet"}, - }, - expectedErrCode: "unprocessable entity", - }, - } - - svc := newTestService( - WithIDGenerator(newFakeIDGen(3)), - WithTimeGenerator(newTimeGen(now)), - WithStore(newFakeStore(safeCreateFn)), - ) - - for _, tt := range tests { - ctx := context.Background() - stack, err := svc.InitStack(ctx, 9000, tt.create) - if tt.expectedErrCode == "" { - require.NoError(t, err) - assert.Equal(t, platform.ID(3), stack.ID) - assert.Equal(t, platform.ID(3333), stack.OrgID) - assert.Equal(t, now, stack.CreatedAt) - assert.Equal(t, now, stack.LatestEvent().UpdatedAt) - } else { - require.Error(t, err) - assert.Equal(t, tt.expectedErrCode, errors2.ErrorCode(err)) - assert.Equal(t, Stack{}, stack) - } - } - }) - }) - - t.Run("UpdateStack", func(t *testing.T) { - now := time.Time{}.Add(10 * 24 * time.Hour) - - t.Run("when updating valid stack", func(t *testing.T) { - tests := []struct { - name string - input StackUpdate - expected StackEvent - }{ - { - name: "update nothing", - input: StackUpdate{}, - expected: StackEvent{ - EventType: StackEventUpdate, - UpdatedAt: now, - }, - }, - { - name: "update name", - input: StackUpdate{ - Name: strPtr("name"), - }, - expected: StackEvent{ - EventType: StackEventUpdate, - Name: "name", - UpdatedAt: now, - }, - }, - { - name: "update desc", - input: StackUpdate{ - Description: strPtr("desc"), - }, - expected: StackEvent{ - EventType: StackEventUpdate, - Description: "desc", - UpdatedAt: now, - }, - }, - { - name: "update URLs", - input: StackUpdate{ - TemplateURLs: []string{"http://example.com"}, - }, - expected: StackEvent{ - EventType: StackEventUpdate, - TemplateURLs: []string{"http://example.com"}, - UpdatedAt: now, - }, - }, - { - name: "update first 3", - input: StackUpdate{ - Name: strPtr("name"), - Description: strPtr("desc"), - TemplateURLs: []string{"http://example.com"}, - }, - expected: StackEvent{ - EventType: StackEventUpdate, - Name: "name", - Description: "desc", - TemplateURLs: []string{"http://example.com"}, - UpdatedAt: now, - }, - }, - { - name: "update with metaname collisions", - input: StackUpdate{ - Name: strPtr("name"), - Description: strPtr("desc"), - TemplateURLs: []string{"http://example.com"}, - AdditionalResources: []StackAdditionalResource{ - { - APIVersion: APIVersion, - ID: 1, - Kind: KindLabel, - MetaName: "meta-label", - }, - { - APIVersion: APIVersion, - ID: 2, - Kind: KindLabel, - MetaName: "meta-label", - }, - }, - }, - expected: StackEvent{ - EventType: StackEventUpdate, - Name: "name", - Description: "desc", - TemplateURLs: []string{"http://example.com"}, - Resources: []StackResource{ - { - APIVersion: APIVersion, - ID: 2, - Kind: KindLabel, - MetaName: "collision-1-" + platform.ID(333).String()[10:], - }, - { - APIVersion: APIVersion, - ID: 1, - Kind: KindLabel, - MetaName: "meta-label", - }, - }, - UpdatedAt: now, - }, - }, - { - name: "update all", - input: StackUpdate{ - Name: strPtr("name"), - Description: strPtr("desc"), - TemplateURLs: []string{"http://example.com"}, - AdditionalResources: []StackAdditionalResource{ - { - APIVersion: APIVersion, - ID: 1, - Kind: KindLabel, - MetaName: "meta-label", - }, - }, - }, - expected: StackEvent{ - EventType: StackEventUpdate, - Name: "name", - Description: "desc", - TemplateURLs: []string{"http://example.com"}, - Resources: []StackResource{ - { - APIVersion: APIVersion, - ID: 1, - Kind: KindLabel, - MetaName: "meta-label", - }, - }, - UpdatedAt: now, - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - var collisions int - nameGenFn := func() string { - collisions++ - return "collision-" + strconv.Itoa(collisions) - } - - svc := newTestService( - WithIDGenerator(mock.IDGenerator{ - IDFn: func() platform.ID { - return 333 - }, - }), - withNameGen(nameGenFn), - WithTimeGenerator(newTimeGen(now)), - WithStore(&fakeStore{ - readFn: func(ctx context.Context, id platform.ID) (Stack, error) { - if id != 33 { - return Stack{}, errors.New("wrong id: " + id.String()) - } - return Stack{ID: id, OrgID: 3}, nil - }, - updateFn: func(ctx context.Context, stack Stack) error { - return nil - }, - }), - ) - - tt.input.ID = 33 - stack, err := svc.UpdateStack(context.Background(), tt.input) - require.NoError(t, err) - - assert.Equal(t, platform.ID(33), stack.ID) - assert.Equal(t, platform.ID(3), stack.OrgID) - assert.Zero(t, stack.CreatedAt) // should always zero value in these tests - assert.Equal(t, tt.expected, stack.LatestEvent()) - } - - t.Run(tt.name, fn) - } - }) - t.Run("jsonnet template url", func(t *testing.T) { - tests := []struct { - name string - update StackUpdate - expectedErrCode string - }{ - // always valid - { - name: "no templates", - update: StackUpdate{ID: 3333}, - }, - { - name: "one json template", - update: StackUpdate{ - ID: 3333, - TemplateURLs: []string{"http://fake/some.json"}, - }, - }, - { - name: "one yaml template", - update: StackUpdate{ - ID: 3333, - TemplateURLs: []string{"http://fake/some.yaml"}, - }, - }, - { - name: "multiple templates", - update: StackUpdate{ - ID: 3333, - TemplateURLs: []string{ - "http://fake/some.yaml", - "http://fake/some.json", - "http://fake/other.yaml", - }, - }, - }, - // invalid - { - name: "one jsonnet template", - update: StackUpdate{ - ID: 3333, - TemplateURLs: []string{"http://fake/some.jsonnet"}, - }, - expectedErrCode: "unprocessable entity", - }, - { - name: "multiple with one jsonnet template", - update: StackUpdate{ - ID: 3333, - TemplateURLs: []string{ - "http://fake/some.json", - "http://fake/some.jsonnet", - "http://fake/some.yaml", - }, - }, - expectedErrCode: "unprocessable entity", - }, - { - name: "one weird jsonnet template", - update: StackUpdate{ - ID: 3333, - TemplateURLs: []string{"http://fake/some.%6asonnet"}, - }, - expectedErrCode: "unprocessable entity", - }, - } - - for _, tt := range tests { - svc := newTestService( - WithIDGenerator(mock.IDGenerator{ - IDFn: func() platform.ID { - return 333 - }, - }), - WithTimeGenerator(newTimeGen(now)), - WithStore(&fakeStore{ - readFn: func(ctx context.Context, id platform.ID) (Stack, error) { - return Stack{ID: id, OrgID: 3}, nil - }, - updateFn: func(ctx context.Context, stack Stack) error { - return nil - }, - }), - ) - - ctx := context.Background() - stack, err := svc.UpdateStack(ctx, tt.update) - if tt.expectedErrCode == "" { - require.NoError(t, err) - assert.Equal(t, platform.ID(3333), stack.ID) - assert.Equal(t, platform.ID(3), stack.OrgID) - assert.Equal(t, now, stack.LatestEvent().UpdatedAt) - } else { - require.Error(t, err) - assert.Equal(t, tt.expectedErrCode, errors2.ErrorCode(err)) - assert.Equal(t, Stack{}, stack) - } - } - }) - }) -} - -func Test_normalizeRemoteSources(t *testing.T) { - tests := []struct { - name string - input []string - expected []url.URL - }{ - { - name: "no urls provided", - input: []string{"byte stream", "string", ""}, - expected: nil, - }, - { - name: "skips valid file url", - input: []string{"file:///example.com"}, - expected: nil, - }, - { - name: "valid http url provided", - input: []string{"http://example.com"}, - expected: []url.URL{parseURL(t, "http://example.com")}, - }, - { - name: "valid https url provided", - input: []string{"https://example.com"}, - expected: []url.URL{parseURL(t, "https://example.com")}, - }, - { - name: "converts raw github user url to base github", - input: []string{"https://raw.githubusercontent.com/influxdata/community-templates/master/github/github.yml"}, - expected: []url.URL{ - parseURL(t, "https://github.com/influxdata/community-templates/blob/master/github/github.yml"), - }, - }, - { - name: "passes base github link unchanged", - input: []string{"https://github.com/influxdata/community-templates/blob/master/github/github.yml"}, - expected: []url.URL{ - parseURL(t, "https://github.com/influxdata/community-templates/blob/master/github/github.yml"), - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - actual := normalizeRemoteSources(tt.input) - require.Len(t, actual, len(tt.expected)) - for i, expected := range tt.expected { - assert.Equal(t, expected.String(), actual[i].String()) - } - } - t.Run(tt.name, fn) - } -} - -func newTestIDPtr(i int) *platform.ID { - id := platform.ID(i) - return &id -} - -func levelPtr(l notification.CheckLevel) *notification.CheckLevel { - return &l -} - -type fakeStore struct { - createFn func(ctx context.Context, stack Stack) error - deleteFn func(ctx context.Context, id platform.ID) error - readFn func(ctx context.Context, id platform.ID) (Stack, error) - updateFn func(ctx context.Context, stack Stack) error -} - -var _ Store = (*fakeStore)(nil) - -func (s *fakeStore) CreateStack(ctx context.Context, stack Stack) error { - if s.createFn != nil { - return s.createFn(ctx, stack) - } - panic("not implemented") -} - -func (s *fakeStore) ListStacks(ctx context.Context, orgID platform.ID, f ListFilter) ([]Stack, error) { - panic("not implemented") -} - -func (s *fakeStore) ReadStackByID(ctx context.Context, id platform.ID) (Stack, error) { - if s.readFn != nil { - return s.readFn(ctx, id) - } - panic("not implemented") -} - -func (s *fakeStore) UpdateStack(ctx context.Context, stack Stack) error { - if s.updateFn != nil { - return s.updateFn(ctx, stack) - } - panic("not implemented") -} - -func (s *fakeStore) DeleteStack(ctx context.Context, id platform.ID) error { - if s.deleteFn != nil { - return s.deleteFn(ctx, id) - } - panic("not implemented") -} - -type fakeIDGen func() platform.ID - -func newFakeIDGen(id platform.ID) fakeIDGen { - return func() platform.ID { - return id - } -} - -func (f fakeIDGen) ID() platform.ID { - return f() -} - -type fakeTimeGen func() time.Time - -func newTimeGen(t time.Time) fakeTimeGen { - return func() time.Time { - return t - } -} - -func (t fakeTimeGen) Now() time.Time { - return t() -} - -func parseURL(t *testing.T, rawAddr string) url.URL { - t.Helper() - u, err := url.Parse(rawAddr) - require.NoError(t, err) - return *u -} diff --git a/pkger/service_tracing.go b/pkger/service_tracing.go deleted file mode 100644 index 82cc9930b9e..00000000000 --- a/pkger/service_tracing.go +++ /dev/null @@ -1,84 +0,0 @@ -package pkger - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/opentracing/opentracing-go/log" -) - -type traceMW struct { - next SVC -} - -// MWTracing adds tracing functionality for the service. -func MWTracing() SVCMiddleware { - return func(svc SVC) SVC { - return &traceMW{next: svc} - } -} - -var _ SVC = (*traceMW)(nil) - -func (s *traceMW) InitStack(ctx context.Context, userID platform.ID, newStack StackCreate) (Stack, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - return s.next.InitStack(ctx, userID, newStack) -} - -func (s *traceMW) UninstallStack(ctx context.Context, identifiers struct{ OrgID, UserID, StackID platform.ID }) (Stack, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - return s.next.UninstallStack(ctx, identifiers) -} - -func (s *traceMW) DeleteStack(ctx context.Context, identifiers struct{ OrgID, UserID, StackID platform.ID }) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - return s.next.DeleteStack(ctx, identifiers) -} - -func (s *traceMW) ListStacks(ctx context.Context, orgID platform.ID, f ListFilter) ([]Stack, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - stacks, err := s.next.ListStacks(ctx, orgID, f) - span.LogFields( - log.String("org_id", orgID.String()), - log.Int("num_stacks", len(stacks)), - ) - return stacks, err -} - -func (s *traceMW) ReadStack(ctx context.Context, id platform.ID) (Stack, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - return s.next.ReadStack(ctx, id) -} - -func (s *traceMW) UpdateStack(ctx context.Context, upd StackUpdate) (Stack, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - return s.next.UpdateStack(ctx, upd) -} - -func (s *traceMW) Export(ctx context.Context, opts ...ExportOptFn) (template *Template, err error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - return s.next.Export(ctx, opts...) -} - -func (s *traceMW) DryRun(ctx context.Context, orgID, userID platform.ID, opts ...ApplyOptFn) (ImpactSummary, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - span.LogKV("orgID", orgID.String(), "userID", userID.String()) - defer span.Finish() - return s.next.DryRun(ctx, orgID, userID, opts...) -} - -func (s *traceMW) Apply(ctx context.Context, orgID, userID platform.ID, opts ...ApplyOptFn) (ImpactSummary, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - span.LogKV("orgID", orgID.String(), "userID", userID.String()) - defer span.Finish() - return s.next.Apply(ctx, orgID, userID, opts...) -} diff --git a/pkger/store.go b/pkger/store.go deleted file mode 100644 index f39cfe6dc4e..00000000000 --- a/pkger/store.go +++ /dev/null @@ -1,397 +0,0 @@ -package pkger - -import ( - "bytes" - "context" - "encoding/json" - "sort" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" -) - -type ( - entStack struct { - ID []byte `json:"id"` - OrgID []byte `json:"orgID"` - CreatedAt time.Time `json:"createdAt"` - - Events []entStackEvent `json:"events"` - - // this embedding is for stacks that were - // created before events, this should stay - // for some time. - entStackEvent - } - - entStackEvent struct { - EventType StackEventType `json:"eventType"` - Name string `json:"name"` - Description string `json:"description"` - Sources []string `json:"sources,omitempty"` - URLs []string `json:"urls,omitempty"` - Resources []entStackResource `json:"resources,omitempty"` - UpdatedAt time.Time `json:"updatedAt"` - } - - entStackResource struct { - APIVersion string `json:"apiVersion"` - ID string `json:"id"` - Kind string `json:"kind"` - Name string `json:"name"` - Associations []entStackAssociation `json:"associations,omitempty"` - } - - entStackAssociation struct { - Kind string `json:"kind"` - Name string `json:"name"` - } -) - -// StoreKV is a store implementation that uses a kv store backing. -type StoreKV struct { - kvStore kv.Store - indexBase *kv.IndexStore -} - -var _ Store = (*StoreKV)(nil) - -// NewStoreKV creates a new StoreKV entity. This does not initialize the store. You will -// want to init it if you want to have this init donezo at startup. If not it'll lazy -// load the buckets as they are used. -func NewStoreKV(store kv.Store) *StoreKV { - const resource = "stack" - - storeKV := &StoreKV{ - kvStore: store, - } - storeKV.indexBase = &kv.IndexStore{ - Resource: resource, - EntStore: storeKV.entStoreBase(resource), - IndexStore: storeKV.indexStoreBase(resource), - } - return storeKV -} - -// CreateStack will create a new stack. If collisions are found will fail. -func (s *StoreKV) CreateStack(ctx context.Context, stack Stack) error { - return s.put(ctx, stack, kv.PutNew()) -} - -// ListStacks returns a list of stacks. -func (s *StoreKV) ListStacks(ctx context.Context, orgID platform.ID, f ListFilter) ([]Stack, error) { - if len(f.StackIDs) > 0 && len(f.Names) == 0 { - return s.listStacksByID(ctx, orgID, f.StackIDs) - } - - filterFn, err := storeListFilterFn(orgID, f) - if err != nil { - return nil, err - } - - var stacks []Stack - err = s.view(ctx, func(tx kv.Tx) error { - return s.indexBase.Find(ctx, tx, kv.FindOpts{ - CaptureFn: func(key []byte, decodedVal interface{}) error { - stack, err := convertStackEntToStack(decodedVal.(*entStack)) - if err != nil { - return err - } - stacks = append(stacks, stack) - return nil - }, - FilterEntFn: func(key []byte, decodedVal interface{}) bool { - st := decodedVal.(*entStack) - return filterFn(st) - }, - }) - }) - if err != nil { - return nil, err - } - - return stacks, nil -} - -func storeListFilterFn(orgID platform.ID, f ListFilter) (func(*entStack) bool, error) { - orgIDEncoded, err := orgID.Encode() - if err != nil { - return nil, err - } - - mIDs := make(map[string]bool) - for _, id := range f.StackIDs { - b, err := id.Encode() - if err != nil { - return nil, err - } - mIDs[string(b)] = true - } - - mNames := make(map[string]bool) - for _, name := range f.Names { - mNames[name] = true - } - - optionalFieldFilterFn := func(ent *entStack) bool { - switch { - case mIDs[string(ent.ID)]: - return true - // existing data before stacks are event sourced have - // this shape. - case len(mNames) > 0 && ent.Name != "": - return mNames[ent.Name] - case len(mNames) > 0 && len(ent.Events) > 0: - sort.Slice(ent.Events, func(i, j int) bool { - return ent.Events[i].UpdatedAt.After(ent.Events[j].UpdatedAt) - }) - return mNames[ent.Events[0].Name] - } - return true - } - return func(st *entStack) bool { - return bytes.Equal(orgIDEncoded, st.OrgID) && optionalFieldFilterFn(st) - }, nil -} - -func (s *StoreKV) listStacksByID(ctx context.Context, orgID platform.ID, stackIDs []platform.ID) ([]Stack, error) { - var stacks []Stack - for _, id := range stackIDs { - st, err := s.ReadStackByID(ctx, id) - if errors.ErrorCode(err) == errors.ENotFound { - // since the stackIDs are a filter, if it is not found, we just continue - // on. If the user wants to verify the existence of a particular stack - // then it would be upon them to use the ReadByID call. - continue - } - if err != nil { - return nil, err - } - if orgID != st.OrgID { - continue - } - stacks = append(stacks, st) - } - return stacks, nil -} - -// ReadStackByID reads a stack by the provided ID. -func (s *StoreKV) ReadStackByID(ctx context.Context, id platform.ID) (Stack, error) { - var stack Stack - err := s.view(ctx, func(tx kv.Tx) error { - decodedEnt, err := s.indexBase.FindEnt(ctx, tx, kv.Entity{PK: kv.EncID(id)}) - if err != nil { - return err - } - stack, err = convertStackEntToStack(decodedEnt.(*entStack)) - return err - }) - return stack, err -} - -// UpdateStack updates a stack. -func (s *StoreKV) UpdateStack(ctx context.Context, stack Stack) error { - existing, err := s.ReadStackByID(ctx, stack.ID) - if err != nil { - return err - } - - if stack.OrgID != existing.OrgID { - return &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: "org id does not match", - } - } - - return s.put(ctx, stack, kv.PutUpdate()) -} - -// DeleteStack deletes a stack by id. -func (s *StoreKV) DeleteStack(ctx context.Context, id platform.ID) error { - return s.kvStore.Update(ctx, func(tx kv.Tx) error { - return s.indexBase.DeleteEnt(ctx, tx, kv.Entity{PK: kv.EncID(id)}) - }) -} - -func (s *StoreKV) put(ctx context.Context, stack Stack, opts ...kv.PutOptionFn) error { - ent, err := convertStackToEnt(stack) - if err != nil { - return influxErr(errors.EInvalid, err) - } - - return s.kvStore.Update(ctx, func(tx kv.Tx) error { - return s.indexBase.Put(ctx, tx, ent, opts...) - }) -} - -func (s *StoreKV) entStoreBase(resource string) *kv.StoreBase { - var decodeEntFn kv.DecodeBucketValFn = func(key, val []byte) (keyRepeat []byte, decodedVal interface{}, err error) { - var stack entStack - return key, &stack, json.Unmarshal(val, &stack) - } - - var decValToEntFn kv.ConvertValToEntFn = func(k []byte, i interface{}) (kv.Entity, error) { - s, ok := i.(*entStack) - if err := kv.IsErrUnexpectedDecodeVal(ok); err != nil { - return kv.Entity{}, err - } - - return kv.Entity{ - PK: kv.EncBytes(s.ID), - UniqueKey: kv.Encode(kv.EncBytes(s.OrgID), kv.EncBytes(s.ID)), - Body: s, - }, nil - } - - entityBucket := []byte("v1_pkger_stacks") - - return kv.NewStoreBase(resource, entityBucket, kv.EncIDKey, kv.EncBodyJSON, decodeEntFn, decValToEntFn) -} - -func (s *StoreKV) indexStoreBase(resource string) *kv.StoreBase { - var decValToEntFn kv.ConvertValToEntFn = func(k []byte, v interface{}) (kv.Entity, error) { - id, ok := v.(platform.ID) - if err := kv.IsErrUnexpectedDecodeVal(ok); err != nil { - return kv.Entity{}, err - } - - return kv.Entity{ - PK: kv.EncID(id), - UniqueKey: kv.EncBytes(k), - }, nil - } - - indexBucket := []byte("v1_pkger_stacks_index") - - return kv.NewStoreBase(resource, indexBucket, kv.EncUniqKey, kv.EncIDKey, kv.DecIndexID, decValToEntFn) -} - -func (s *StoreKV) view(ctx context.Context, fn func(tx kv.Tx) error) error { - return s.kvStore.View(ctx, fn) -} - -func convertStackToEnt(stack Stack) (kv.Entity, error) { - idBytes, err := stack.ID.Encode() - if err != nil { - return kv.Entity{}, err - } - - orgIDBytes, err := stack.OrgID.Encode() - if err != nil { - return kv.Entity{}, err - } - - stEnt := entStack{ - ID: idBytes, - OrgID: orgIDBytes, - CreatedAt: stack.CreatedAt, - } - for _, ev := range stack.Events { - var resources []entStackResource - for _, res := range ev.Resources { - var associations []entStackAssociation - for _, ass := range res.Associations { - associations = append(associations, entStackAssociation{ - Kind: ass.Kind.String(), - Name: ass.MetaName, - }) - } - resources = append(resources, entStackResource{ - APIVersion: res.APIVersion, - ID: res.ID.String(), - Kind: res.Kind.String(), - Name: res.MetaName, - Associations: associations, - }) - } - stEnt.Events = append(stEnt.Events, entStackEvent{ - EventType: ev.EventType, - Name: ev.Name, - Description: ev.Description, - Sources: ev.Sources, - URLs: ev.TemplateURLs, - Resources: resources, - UpdatedAt: ev.UpdatedAt, - }) - } - - return kv.Entity{ - PK: kv.EncBytes(stEnt.ID), - UniqueKey: kv.Encode(kv.EncBytes(stEnt.OrgID), kv.EncBytes(stEnt.ID)), - Body: stEnt, - }, nil -} - -func convertStackEntToStack(ent *entStack) (Stack, error) { - stack := Stack{ - CreatedAt: ent.CreatedAt, - } - if err := stack.ID.Decode(ent.ID); err != nil { - return Stack{}, err - } - - if err := stack.OrgID.Decode(ent.OrgID); err != nil { - return Stack{}, err - } - - entEvents := ent.Events - - // ensure backwards compatibility. All existing fields - // will be associated with a createEvent, regardless if - // they are or not - if !ent.UpdatedAt.IsZero() { - entEvents = append(entEvents, ent.entStackEvent) - } - - for _, entEv := range entEvents { - ev, err := convertEntStackEvent(entEv) - if err != nil { - return Stack{}, err - } - stack.Events = append(stack.Events, ev) - } - - return stack, nil -} - -func convertEntStackEvent(ent entStackEvent) (StackEvent, error) { - ev := StackEvent{ - EventType: ent.EventType, - Name: ent.Name, - Description: ent.Description, - Sources: ent.Sources, - TemplateURLs: ent.URLs, - UpdatedAt: ent.UpdatedAt, - } - out, err := convertStackEntResources(ent.Resources) - if err != nil { - return StackEvent{}, err - } - ev.Resources = out - return ev, nil -} - -func convertStackEntResources(entResources []entStackResource) ([]StackResource, error) { - var out []StackResource - for _, res := range entResources { - stackRes := StackResource{ - APIVersion: res.APIVersion, - Kind: Kind(res.Kind), - MetaName: res.Name, - } - if err := stackRes.ID.DecodeFromString(res.ID); err != nil { - return nil, err - } - - for _, ass := range res.Associations { - stackRes.Associations = append(stackRes.Associations, StackResourceAssociation{ - Kind: Kind(ass.Kind), - MetaName: ass.Name, - }) - } - out = append(out, stackRes) - } - return out, nil -} diff --git a/pkger/store_test.go b/pkger/store_test.go deleted file mode 100644 index 8db2fc882e6..00000000000 --- a/pkger/store_test.go +++ /dev/null @@ -1,385 +0,0 @@ -package pkger_test - -import ( - "context" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/pkger" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func TestStoreKV(t *testing.T) { - inMemStore := inmem.NewKVStore() - - // run all migrations against store - if err := all.Up(context.Background(), zaptest.NewLogger(t), inMemStore); err != nil { - t.Fatal(err) - } - - stackStub := func(id, orgID platform.ID) pkger.Stack { - now := time.Time{}.Add(10 * 365 * 24 * time.Hour) - urls := []string{ - "http://example.com", - "http://abc.gov", - } - return pkger.Stack{ - ID: id, - OrgID: orgID, - CreatedAt: now, - Events: []pkger.StackEvent{ - { - EventType: pkger.StackEventCreate, - Name: "threeve", - Description: "desc", - UpdatedAt: now.Add(time.Hour), - Sources: urls, - TemplateURLs: urls, - Resources: []pkger.StackResource{ - { - APIVersion: pkger.APIVersion, - ID: 9000, - Kind: pkger.KindBucket, - MetaName: "buzz lightyear", - Associations: []pkger.StackResourceAssociation{{ - Kind: pkger.KindLabel, - MetaName: "foo_label", - }}, - }, - { - APIVersion: pkger.APIVersion, - ID: 333, - Kind: pkger.KindBucket, - MetaName: "beyond", - }, - }, - }, - }, - } - } - - t.Run("create a stack", func(t *testing.T) { - defer inMemStore.Flush(context.Background()) - - storeKV := pkger.NewStoreKV(inMemStore) - - const orgID = 333 - seedEntities(t, storeKV, pkger.Stack{ - ID: 1, - OrgID: orgID, - }) - - t.Run("with no ID collisions creates successfully", func(t *testing.T) { - expected := stackStub(3, orgID) - - err := storeKV.CreateStack(context.Background(), expected) - require.NoError(t, err) - - readStackEqual(t, storeKV, expected) - }) - - t.Run("with ID collisions fails with conflict error", func(t *testing.T) { - for _, id := range []platform.ID{2, 3} { - err := storeKV.CreateStack(context.Background(), pkger.Stack{ - ID: 1, - OrgID: orgID, - }) - require.Errorf(t, err, "id=%d", id) - assert.Equalf(t, errors.EConflict, errors.ErrorCode(err), "id=%d", id) - } - }) - }) - - t.Run("list stacks", func(t *testing.T) { - defer inMemStore.Flush(context.Background()) - - storeKV := pkger.NewStoreKV(inMemStore) - - const orgID1 = 1 - const orgID2 = 2 - seedEntities(t, storeKV, - pkger.Stack{ - ID: 1, - OrgID: orgID1, - Events: []pkger.StackEvent{{ - Name: "first_name", - }}, - }, - pkger.Stack{ - ID: 2, - OrgID: orgID2, - Events: []pkger.StackEvent{{ - Name: "first_name", - }}, - }, - pkger.Stack{ - ID: 3, - OrgID: orgID1, - Events: []pkger.StackEvent{{ - Name: "second_name", - }}, - }, - pkger.Stack{ - ID: 4, - OrgID: orgID2, - Events: []pkger.StackEvent{{ - Name: "second_name", - }}, - }, - ) - - tests := []struct { - name string - orgID platform.ID - filter pkger.ListFilter - expected []pkger.Stack - }{ - { - name: "by org id", - orgID: orgID1, - expected: []pkger.Stack{ - { - ID: 1, - OrgID: orgID1, - Events: []pkger.StackEvent{{ - Name: "first_name", - }}, - }, - { - ID: 3, - OrgID: orgID1, - Events: []pkger.StackEvent{{ - Name: "second_name", - }}, - }, - }, - }, - { - name: "by stack ids", - orgID: orgID1, - filter: pkger.ListFilter{ - StackIDs: []platform.ID{1, 3}, - }, - expected: []pkger.Stack{ - { - ID: 1, - OrgID: orgID1, - Events: []pkger.StackEvent{{ - Name: "first_name", - }}, - }, - { - ID: 3, - OrgID: orgID1, - Events: []pkger.StackEvent{{ - Name: "second_name", - }}, - }, - }, - }, - { - name: "by stack ids skips ids that belong to different organization", - orgID: orgID1, - filter: pkger.ListFilter{ - StackIDs: []platform.ID{1, 2, 4}, - }, - expected: []pkger.Stack{{ - ID: 1, - OrgID: orgID1, - Events: []pkger.StackEvent{{ - Name: "first_name", - }}, - }}, - }, - { - name: "stack ids that do not exist are skipped", - orgID: orgID1, - filter: pkger.ListFilter{ - StackIDs: []platform.ID{1, 9000}, - }, - expected: []pkger.Stack{{ - ID: 1, - OrgID: orgID1, - Events: []pkger.StackEvent{{ - Name: "first_name", - }}, - }}, - }, - { - name: "by name", - orgID: orgID1, - filter: pkger.ListFilter{ - Names: []string{"first_name"}, - }, - expected: []pkger.Stack{{ - ID: 1, - OrgID: orgID1, - Events: []pkger.StackEvent{{ - Name: "first_name", - }}, - }}, - }, - { - name: "by name and id", - orgID: orgID1, - filter: pkger.ListFilter{ - StackIDs: []platform.ID{3}, - Names: []string{"first_name"}, - }, - expected: []pkger.Stack{ - { - ID: 1, - OrgID: orgID1, - Events: []pkger.StackEvent{{ - Name: "first_name", - }}, - }, - { - ID: 3, - OrgID: orgID1, - Events: []pkger.StackEvent{{ - Name: "second_name", - }}, - }, - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - stacks, err := storeKV.ListStacks(context.Background(), tt.orgID, tt.filter) - require.NoError(t, err) - assert.Equal(t, tt.expected, stacks) - } - - t.Run(tt.name, fn) - } - }) - - t.Run("read a stack", func(t *testing.T) { - defer inMemStore.Flush(context.Background()) - - storeKV := pkger.NewStoreKV(inMemStore) - - expected := stackStub(1, 3) - - seedEntities(t, storeKV, expected) - - t.Run("with valid ID returns stack successfully", func(t *testing.T) { - readStackEqual(t, storeKV, expected) - }) - - t.Run("when no match found fails with not found error", func(t *testing.T) { - unmatchedID := platform.ID(3000) - _, err := storeKV.ReadStackByID(context.Background(), unmatchedID) - require.Error(t, err) - assert.Equal(t, errors.ENotFound, errors.ErrorCode(err)) - }) - }) - - t.Run("update a stack", func(t *testing.T) { - defer inMemStore.Flush(context.Background()) - - storeKV := pkger.NewStoreKV(inMemStore) - - const orgID = 3 - const id = 3 - expected := stackStub(id, orgID) - - seedEntities(t, storeKV, expected) - - t.Run("with valid ID updates stack successfully", func(t *testing.T) { - expected := stackStub(id, orgID) - event := expected.LatestEvent() - event.EventType = pkger.StackEventUpdate - event.UpdatedAt = event.UpdatedAt.Add(time.Hour) - event.Resources = append(event.Resources, pkger.StackResource{ - APIVersion: pkger.APIVersion, - ID: 333, - Kind: pkger.KindBucket, - MetaName: "beyond", - }) - expected.Events = append(expected.Events, event) - - err := storeKV.UpdateStack(context.Background(), expected) - require.NoError(t, err) - - readStackEqual(t, storeKV, expected) - }) - - t.Run("when no match found fails with not found error", func(t *testing.T) { - unmatchedID := platform.ID(3000) - err := storeKV.UpdateStack(context.Background(), pkger.Stack{ - ID: unmatchedID, - OrgID: orgID, - }) - require.Error(t, err) - assert.Equalf(t, errors.ENotFound, errors.ErrorCode(err), "err: %s", err) - }) - - t.Run("when org id does not match fails with unprocessable entity error", func(t *testing.T) { - err := storeKV.UpdateStack(context.Background(), pkger.Stack{ - ID: id, - OrgID: orgID + 9000, - }) - require.Error(t, err) - assert.Equalf(t, errors.EUnprocessableEntity, errors.ErrorCode(err), "err: %s", err) - }) - }) - - t.Run("delete a stack", func(t *testing.T) { - defer inMemStore.Flush(context.Background()) - - storeKV := pkger.NewStoreKV(inMemStore) - - const orgID = 3 - expected := stackStub(1, orgID) - - seedEntities(t, storeKV, expected) - - t.Run("with valid ID deletes stack successfully", func(t *testing.T) { - err := storeKV.DeleteStack(context.Background(), expected.ID) - require.NoError(t, err) - - _, err = storeKV.ReadStackByID(context.Background(), expected.ID) - require.Error(t, err) - errCodeEqual(t, errors.ENotFound, err) - }) - - t.Run("when no match found fails with not found error", func(t *testing.T) { - unmatchedID := platform.ID(3000) - err := storeKV.DeleteStack(context.Background(), unmatchedID) - require.Error(t, err) - errCodeEqual(t, errors.ENotFound, err) - }) - }) -} - -func readStackEqual(t *testing.T, store pkger.Store, expected pkger.Stack) { - t.Helper() - - stack, err := store.ReadStackByID(context.Background(), expected.ID) - require.NoError(t, err) - assert.Equal(t, expected, stack) -} - -func errCodeEqual(t *testing.T, expected string, actual error) { - t.Helper() - - assert.Equalf(t, expected, errors.ErrorCode(actual), "err: %s", actual) -} - -func seedEntities(t *testing.T, store pkger.Store, first pkger.Stack, rest ...pkger.Stack) { - t.Helper() - - for _, st := range append(rest, first) { - err := store.CreateStack(context.Background(), st) - require.NoError(t, err) - } -} diff --git a/pkger/testdata/bucket.json b/pkger/testdata/bucket.json deleted file mode 100644 index e00dac33dd2..00000000000 --- a/pkger/testdata/bucket.json +++ /dev/null @@ -1,29 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Bucket", - "metadata": { - "name": "rucket-11" - }, - "spec": { - "description": "bucket 1 description", - "retentionRules": [ - { - "type": "expire", - "everySeconds": 3600 - } - ] - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Bucket", - "metadata": { - "name": "rucket-22" - }, - "spec": { - "name": "display name", - "description": "bucket 2 description" - } - } -] diff --git a/pkger/testdata/bucket.yml b/pkger/testdata/bucket.yml deleted file mode 100644 index 3ea9da8cef0..00000000000 --- a/pkger/testdata/bucket.yml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: rucket-11 -spec: - description: bucket 1 description - retentionRules: - - type: expire - everySeconds: 3600 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: rucket-22 -spec: - name: display name - description: bucket 2 description diff --git a/pkger/testdata/bucket_associates_label.json b/pkger/testdata/bucket_associates_label.json deleted file mode 100644 index 1e8f097585e..00000000000 --- a/pkger/testdata/bucket_associates_label.json +++ /dev/null @@ -1,65 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Label", - "metadata": { - "name": "label-2" - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Label", - "metadata": { - "name": "label-1" - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Bucket", - "metadata": { - "name": "rucket-1" - }, - "spec": { - "associations": [ - { - "kind": "Label", - "name": "label-1" - } - ] - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Bucket", - "metadata": { - "name": "rucket-2" - }, - "spec": { - "associations": [ - { - "kind": "Label", - "name": "label-2" - } - ] - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Bucket", - "metadata": { - "name": "rucket-3" - }, - "spec": { - "associations": [ - { - "kind": "Label", - "name": "label-2" - }, - { - "kind": "Label", - "name": "label-1" - } - ] - } - } -] diff --git a/pkger/testdata/bucket_associates_label.yml b/pkger/testdata/bucket_associates_label.yml deleted file mode 100644 index 47937350304..00000000000 --- a/pkger/testdata/bucket_associates_label.yml +++ /dev/null @@ -1,38 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-2 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: rucket-1 -spec: - associations: - - kind: Label - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: rucket-2 -spec: - associations: - - kind: Label - name: label-2 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: rucket-3 -spec: - associations: - - kind: Label - name: label-1 - - kind: Label - name: label-2 diff --git a/pkger/testdata/bucket_associates_labels.jsonnet b/pkger/testdata/bucket_associates_labels.jsonnet deleted file mode 100644 index 043f4516d20..00000000000 --- a/pkger/testdata/bucket_associates_labels.jsonnet +++ /dev/null @@ -1,38 +0,0 @@ -local Label(name, desc, color) = { - apiVersion: 'influxdata.com/v2alpha1', - kind: 'Label', - metadata: { - name: name - }, - spec: { - description: desc, - color: color - } -}; - -local LabelAssociations(names=[]) = [ - {kind: 'Label', name: name} - for name in names -]; - -local Bucket(name, desc, secs, associations=LabelAssociations(['label-1'])) = { - apiVersion: 'influxdata.com/v2alpha1', - kind: 'Bucket', - metadata: { - name: name - }, - spec: { - description: desc, - retentionRules: [ - {type: 'expire', everySeconds: secs} - ], - associations: associations - } -}; - -[ - Label("label-1",desc="desc_1", color='#eee888'), - Bucket(name="rucket-1", desc="desc_1", secs=10000), - Bucket("rucket-2", "desc-2", 20000), - Bucket("rucket-3", "desc_3", 30000), -] diff --git a/pkger/testdata/bucket_ref.yml b/pkger/testdata/bucket_ref.yml deleted file mode 100644 index 6870fc65172..00000000000 --- a/pkger/testdata/bucket_ref.yml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: - envRef: - key: meta-name - default: meta -spec: - name: - envRef: - key: spec-name - default: spectacles - associations: - - kind: Label - name: - envRef: - key: label-meta-name ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: - envRef: - key: label-meta-name diff --git a/pkger/testdata/bucket_schema.yml b/pkger/testdata/bucket_schema.yml deleted file mode 100644 index 624be3f1373..00000000000 --- a/pkger/testdata/bucket_schema.yml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: explicit-11 -spec: - measurementSchemas: - - columns: - - name: time - type: timestamp - - name: host - type: tag - - dataType: float - name: usage_user - type: field - name: cpu - name: my_explicit - schemaType: explicit diff --git a/pkger/testdata/checks.json b/pkger/testdata/checks.json deleted file mode 100644 index 0b67dca5d5d..00000000000 --- a/pkger/testdata/checks.json +++ /dev/null @@ -1,99 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Label", - "metadata": { - "name": "label-1" - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "CheckThreshold", - "metadata": { - "name": "check-0" - }, - "spec": { - "description": "desc_0", - "every": "1m", - "offset": "15s", - "query": "from(bucket: \"rucket_1\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"cpu\")\n |> filter(fn: (r) => r._field == \"usage_idle\")\n |> aggregateWindow(every: 1m, fn: mean)\n |> yield(name: \"mean\")", - "status": "inactive", - "statusMessageTemplate": "Check: ${ r._check_name } is: ${ r._level }", - "tags": [ - { - "key": "tag_1", - "value": "val_1" - }, - { - "key": "tag_2", - "value": "val_2" - } - ], - "thresholds": [ - { - "type": "greater", - "level": "CRIT", - "value": 50.0, - "allValues": true - }, - { - "type": "lesser", - "level": "warn", - "value": 49.9 - }, - { - "type": "inside_range", - "level": "INfO", - "min": 30.0, - "max": 45.0 - }, - { - "type": "outside_range", - "level": "ok", - "min": 30.0, - "max": 35.0 - } - ], - "associations": [ - { - "kind": "Label", - "name": "label-1" - } - ] - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "CheckDeadman", - "metadata": { - "name": "check-1" - }, - "spec": { - "name": "display name", - "description": "desc_1", - "every": "5m", - "offset": "10s", - "query": "from(bucket: \"rucket_1\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r._measurement == \"cpu\")\n |> filter(fn: (r) => r._field == \"usage_idle\")\n |> aggregateWindow(every: 1m, fn: mean)\n |> yield(name: \"mean\")", - "reportZero": true, - "staleTime": "10m", - "statusMessageTemplate": "Check: ${ r._check_name } is: ${ r._level }", - "tags": [ - { - "key": "tag_1", - "value": "val_1" - }, - { - "key": "tag_2", - "value": "val_2" - } - ], - "timeSince": "90s", - "associations": [ - { - "kind": "Label", - "name": "label-1" - } - ] - } - } -] diff --git a/pkger/testdata/checks.yml b/pkger/testdata/checks.yml deleted file mode 100644 index 5b5a2004b30..00000000000 --- a/pkger/testdata/checks.yml +++ /dev/null @@ -1,77 +0,0 @@ ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: CheckThreshold -metadata: - name: check-0 -spec: - description: desc_0 - every: 1m - offset: 15s - query: > - from(bucket: "rucket_1") - |> range(start: v.timeRangeStart, stop: v.timeRangeStop) - |> filter(fn: (r) => r._measurement == "cpu") - |> filter(fn: (r) => r._field == "usage_idle") - |> aggregateWindow(every: 1m, fn: mean) - |> yield(name: "mean") - status: inactive - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - tags: - - key: tag_1 - value: val_1 - - key: tag_2 - value: val_2 - thresholds: - - type: greater - level: CRIT - value: 50.0 - allValues: true - - type: lesser - level: warn - value: 49.9 - - type: inside_range - level: INfO - min: 30.0 - max: 45.0 - - type: outside_range - level: ok - min: 30.0 - max: 35.0 - associations: - - kind: Label - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: CheckDeadman -metadata: - name: check-1 -spec: - name: display name - description: desc_1 - every: 5m - level: cRiT - offset: 10s - query: > - from(bucket: "rucket_1") - |> range(start: v.timeRangeStart, stop: v.timeRangeStop) - |> filter(fn: (r) => r._measurement == "cpu") - |> filter(fn: (r) => r._field == "usage_idle") - |> aggregateWindow(every: 1m, fn: mean) - |> yield(name: "mean") - reportZero: true - staleTime: 10m - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - tags: - - key: tag_1 - value: val_1 - - key: tag_2 - value: val_2 - timeSince: 90s - associations: - - kind: Label - name: label-1 diff --git a/pkger/testdata/checks_ref.yml b/pkger/testdata/checks_ref.yml deleted file mode 100644 index 4e1590655ab..00000000000 --- a/pkger/testdata/checks_ref.yml +++ /dev/null @@ -1,33 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: CheckThreshold -metadata: - name: - envRef: - key: meta-name - default: meta -spec: - name: - envRef: - key: spec-name - default: spectacles - every: 1m - offset: 15s - query: | - from(bucket: "rucket_1") - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - thresholds: - - type: greater - level: CRIT - value: 50.0 - associations: - - kind: Label - name: - envRef: - key: label-meta-name ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: - envRef: - key: label-meta-name diff --git a/pkger/testdata/dashboard.json b/pkger/testdata/dashboard.json deleted file mode 100644 index 8b9f7ee256b..00000000000 --- a/pkger/testdata/dashboard.json +++ /dev/null @@ -1,57 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Dashboard", - "metadata": { - "name": "dash-1" - }, - "spec": { - "name": "display name", - "description": "desc1", - "charts": [ - { - "kind": "Single_Stat", - "name": "single stat", - "prefix": "sumtin", - "tickPrefix": "true", - "suffix": "days", - "tickSuffix": "true", - "note": "single stat note", - "noteOnEmpty": true, - "xPos": 1, - "yPos": 2, - "width": 6, - "height": 3, - "decimalPlaces": 1, - "shade": true, - "hoverDimension": "y", - "xColumn": "_time", - "yColumn": "_value", - "queries": [ - { - "query": "from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == \"processes\") |> filter(fn: (r) => r._field == \"running\" or r._field == \"blocked\") |> aggregateWindow(every: v.windowPeriod, fn: max) |> yield(name: \"max\")" - } - ], - "colors": [ - { - "name": "laser", - "type": "text", - "hex": "#8F8AF4", - "value": 3 - } - ] - } - ] - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Dashboard", - "metadata": { - "name": "dash-2" - }, - "spec": { - "description": "desc" - } - } -] diff --git a/pkger/testdata/dashboard.yml b/pkger/testdata/dashboard.yml deleted file mode 100644 index f4b8ded2023..00000000000 --- a/pkger/testdata/dashboard.yml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - name: display name - description: desc1 - charts: - - kind: Single_Stat - name: single stat - prefix: sumtin - tickPrefix: "true" - suffix: days - tickSuffix: "true" - note: single stat note - noteOnEmpty: true - xPos: 1 - yPos: 2 - width: 6 - height: 3 - decimalPlaces: 1 - shade: true - hoverDimension: "y" - queries: - - query: 'from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "processes") |> filter(fn: (r) => r._field == "running" or r._field == "blocked") |> aggregateWindow(every: v.windowPeriod, fn: max) |> yield(name: "max")' - colors: - - name: laser - type: text - hex: "#8F8AF4" - value: 3 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-2 -spec: - description: desc diff --git a/pkger/testdata/dashboard_associates_label.json b/pkger/testdata/dashboard_associates_label.json deleted file mode 100644 index 40cc51ae8fb..00000000000 --- a/pkger/testdata/dashboard_associates_label.json +++ /dev/null @@ -1,35 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Label", - "metadata": { - "name": "label-1" - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Label", - "metadata": { - "name": "label-2" - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Dashboard", - "metadata": { - "name": "dash-1" - }, - "spec": { - "associations": [ - { - "kind": "Label", - "name": "label-1" - }, - { - "kind": "Label", - "name": "label-2" - } - ] - } - } -] diff --git a/pkger/testdata/dashboard_associates_label.yml b/pkger/testdata/dashboard_associates_label.yml deleted file mode 100644 index 63a8e3c4933..00000000000 --- a/pkger/testdata/dashboard_associates_label.yml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-2 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - associations: - - kind: Label - name: label-1 - - kind: Label - name: label-2 diff --git a/pkger/testdata/dashboard_band.yml b/pkger/testdata/dashboard_band.yml deleted file mode 100644 index fe13758ed21..00000000000 --- a/pkger/testdata/dashboard_band.yml +++ /dev/null @@ -1,73 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: a dashboard w/ single band chart - charts: - - kind: Band - name: band chart - note: band note - noteOnEmpty: true - prefix: sumtin - suffix: days - xPos: 1 - yPos: 2 - xCol: _time - yCol: _value - upperColumn: foo - mainColumn: baz - lowerColumn: bar - hoverDimension: "y" - geom: line - width: 6 - height: 3 - generateXAxisTicks: - - xTotalTicks - - xTickStart - - xTickStep - xTotalTicks: 15 - xTickStart: 0 - xTickStep: 1000 - generateYAxisTicks: - - yTotalTicks - - yTickStart - - yTickStep - yTotalTicks: 10 - yTickStart: 0 - yTickStep: 100 - legendColorizeRows: true - legendHide: false - legendOpacity: 1.0 - legendOrientationThreshold: 5 - staticLegend: - colorizeRows: true - heightRatio: 0.2 - show: true - opacity: 1.0 - orientationThreshold: 5 - valueAxis: "y" - widthRatio: 1.0 - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") - colors: - - name: laser - type: scale - hex: "#8F8AF4" - value: 3 - axes: - - name: "x" - label: x_label - prefix: x_prefix - suffix: x_suffix - domain: - - 0 - - 10 - - name: "y" - label: y_label - prefix: y_prefix - suffix: y_suffix - domain: - - 0 - - 100 diff --git a/pkger/testdata/dashboard_gauge.json b/pkger/testdata/dashboard_gauge.json deleted file mode 100644 index f44f1016797..00000000000 --- a/pkger/testdata/dashboard_gauge.json +++ /dev/null @@ -1,56 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Dashboard", - "metadata": { - "name": "dash-1" - }, - "spec": { - "description": "desc1", - "charts": [ - { - "kind": "gauge", - "name": "gauge", - "prefix": "prefix", - "tickPrefix": "true", - "suffix": "suffix", - "tickSuffix": "false", - "note": "gauge note", - "noteOnEmpty": true, - "xPos": 1, - "yPos": 2, - "width": 6, - "height": 3, - "decimalPlaces": 1, - "xColumn": "_time", - "yColumn": "_value", - "queries": [ - { - "query": "from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == \"boltdb_writes_total\") |> filter(fn: (r) => r._field == \"counter\")" - } - ], - "colors": [ - { - "name": "laser", - "type": "min", - "hex": "#8F8AF4", - "value": 0 - }, - { - "name": "pool", - "type": "threshold", - "hex": "#F4CF31", - "value": 700 - }, - { - "name": "comet", - "type": "max", - "hex": "#F4CF31", - "value": 5000 - } - ] - } - ] - } - } -] diff --git a/pkger/testdata/dashboard_gauge.yml b/pkger/testdata/dashboard_gauge.yml deleted file mode 100644 index 277f48ffda3..00000000000 --- a/pkger/testdata/dashboard_gauge.yml +++ /dev/null @@ -1,33 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: desc1 - charts: - - kind: gauge - name: gauge - note: gauge note - noteOnEmpty: true - xPos: 1 - yPos: 2 - width: 6 - height: 3 - tickPrefix: "true" - tickSuffix: "false" - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_writes_total") |> filter(fn: (r) => r._field == "counter") - colors: - - name: laser - type: min - hex: "#8F8AF4" - value: 0 - - name: laser - type: threshold - hex: "#8F8AF4" - value: 700 - - name: laser - type: max - hex: "#8F8AF4" - value: 5000 diff --git a/pkger/testdata/dashboard_geo.json b/pkger/testdata/dashboard_geo.json deleted file mode 100644 index 26abaae8c83..00000000000 --- a/pkger/testdata/dashboard_geo.json +++ /dev/null @@ -1,69 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Dashboard", - "metadata": { - "name": "dash-geo" - }, - "spec": { - "charts": [ - { - "geoLayers": [ - { - "layerType": "circleMap", - "blur": 1, - "colorDimension": { - "name": "color" - }, - "intensityDimension": { - "name": "intensity" - }, - "intensityField": "count", - "radius": 5, - "radiusDimension": { - "name": "radius", - "prefix": "$", - "suffix": "%" - }, - "radiusField": "radius", - "speed": 1, - "trackWidth": 4, - "viewColors": [ - { - "name": "c1", - "type": "min", - "hex": "#8F8AF4", - "value": 0 - }, - { - "name": "c2", - "type": "max", - "hex": "#8F8AF4", - "value": 1606322562 - } - ] - } - ], - "height": 4, - "kind": "Geo", - "lat": 10, - "lon": -4, - "name": "view name", - "note": "a note", - "noteOnEmpty": true, - "queries": [ - { - "query": "from(v.bucket) |\u003e count()" - } - ], - "width": 3, - "xPos": 1, - "yPos": 2, - "zoom": 4 - } - ], - "description": "desc", - "name": "new name" - } - } -] diff --git a/pkger/testdata/dashboard_geo.yml b/pkger/testdata/dashboard_geo.yml deleted file mode 100644 index 54f8747defb..00000000000 --- a/pkger/testdata/dashboard_geo.yml +++ /dev/null @@ -1,46 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-geo -spec: - charts: - - geoLayers: - - blur: 1 - colorDimension: - name: color - intensityDimension: - name: intensity - intensityField: count - layerType: circleMap - radius: 5 - radiusDimension: - name: radius - prefix: $ - suffix: '%' - radiusField: radius - speed: 1 - trackWidth: 4 - viewColors: - - name: "c1" - type: min - hex: "#8F8AF4" - value: 0 - - name: "c2" - type: max - hex: "#8F8AF4" - value: 1.606322693e+09 - height: 4 - kind: Geo - lat: 10 - lon: -4 - name: view name - note: a note - noteOnEmpty: true - queries: - - query: from(v.bucket) |> count() - width: 3 - xPos: 1 - yPos: 2 - zoom: 4 - description: desc - name: new name diff --git a/pkger/testdata/dashboard_heatmap.json b/pkger/testdata/dashboard_heatmap.json deleted file mode 100644 index a97f26e876b..00000000000 --- a/pkger/testdata/dashboard_heatmap.json +++ /dev/null @@ -1,104 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Dashboard", - "metadata": { - "name": "dash-0" - }, - "spec": { - "description": "a dashboard w/ heatmap chart", - "charts": [ - { - "kind": "heatmap", - "name": "heatmap chart", - "note": "heatmap note", - "noteOnEmpty": true, - "xPos": 1, - "yPos": 2, - "width": 6, - "height": 3, - "xCol": "_time", - "generateXAxisTicks": ["xTotalTicks", "xTickStart", "xTickStep"], - "xTotalTicks": 15, - "xTickStart": 0, - "xTickStep": 1000, - "yCol": "_value", - "generateYAxisTicks": ["yTotalTicks", "yTickStart", "yTickStep"], - "yTotalTicks": 10, - "yTickStart": 0, - "yTickStep": 100, - "binSize": 10, - "legendColorizeRows": true, - "legendHide": false, - "legendOpacity": 1.0, - "legendOrientationThreshold": 5, - "queries": [ - { - "query": "from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == \"mem\") |> filter(fn: (r) => r._field == \"used_percent\") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: \"mean\")" - } - ], - "axes": [ - { - "name": "x", - "label": "x_label", - "prefix": "x_prefix", - "suffix": "x_suffix", - "domain": [ - 0, - 10 - ] - }, - { - "name": "y", - "label": "y_label", - "prefix": "y_prefix", - "suffix": "y_suffix", - "domain": [ - 0, - 100 - ] - } - ], - "colors": [ - { - "hex": "#000004" - }, - { - "hex": "#110a30" - }, - { - "hex": "#320a5e" - }, - { - "hex": "#57106e" - }, - { - "hex": "#781c6d" - }, - { - "hex": "#9a2865" - }, - { - "hex": "#bc3754" - }, - { - "hex": "#d84c3e" - }, - { - "hex": "#ed6925" - }, - { - "hex": "#f98e09" - }, - { - "hex": "#fbb61a" - }, - { - "hex": "#f4df53" - } - ] - } - ] - } - } -] \ No newline at end of file diff --git a/pkger/testdata/dashboard_heatmap.yml b/pkger/testdata/dashboard_heatmap.yml deleted file mode 100644 index 32e61212a95..00000000000 --- a/pkger/testdata/dashboard_heatmap.yml +++ /dev/null @@ -1,67 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-0 -spec: - description: a dashboard w/ heatmap chart - charts: - - kind: heatmap - name: heatmap - note: heatmap note - noteOnEmpty: true - xPos: 1 - yPos: 2 - width: 6 - height: 3 - binSize: 10 - legendColorizeRows: true - legendHide: false - legendOpacity: 1.0 - legendOrientationThreshold: 5 - generateXAxisTicks: - - xTotalTicks - - xTickStart - - xTickStep - xCol: _time - xTotalTicks: 15 - xTickStart: 0 - xTickStep: 1000 - yCol: _value - generateYAxisTicks: - - yTotalTicks - - yTickStart - - yTickStep - yTotalTicks: 10 - yTickStart: 0 - yTickStep: 100 - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") - colors: - - hex: "#000004" - - hex: "#110a30" - - hex: "#320a5e" - - hex: "#57106e" - - hex: "#781c6d" - - hex: "#9a2865" - - hex: "#bc3754" - - hex: "#d84c3e" - - hex: "#ed6925" - - hex: "#f98e09" - - hex: "#fbb61a" - - hex: "#f4df53" - axes: - - name: "x" - label: "x_label" - prefix: "x_prefix" - suffix: "x_suffix" - domain: - - 0 - - 10 - - name: "y" - label: "y_label" - prefix: "y_prefix" - suffix: "y_suffix" - domain: - - 0 - - 100 diff --git a/pkger/testdata/dashboard_histogram.json b/pkger/testdata/dashboard_histogram.json deleted file mode 100644 index b5bf158c6bf..00000000000 --- a/pkger/testdata/dashboard_histogram.json +++ /dev/null @@ -1,68 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Dashboard", - "metadata": { - "name": "dash-0" - }, - "spec": { - "description": "a dashboard w/ single histogram chart", - "charts": [ - { - "kind": "histogram", - "name": "histogram chart", - "note": "histogram note", - "noteOnEmpty": true, - "width": 6, - "height": 3, - "xCol": "_value", - "position": "stacked", - "binCount": 30, - "legendColorizeRows": true, - "legendHide": false, - "legendOpacity": 1.0, - "legendOrientationThreshold": 5, - "fillColumns": [ - "a", - "b" - ], - "queries": [ - { - "query": "from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == \"boltdb_reads_total\") |> filter(fn: (r) => r._field == \"counter\")" - } - ], - "axes": [ - { - "name": "x", - "label": "x_label", - "domain": [ - 0, - 10 - ] - } - ], - "colors": [ - { - "name": "mycolor", - "type": "scale", - "hex": "#8F8AF4", - "value": 0 - }, - { - "name": "mycolor", - "type": "scale", - "hex": "#F4CF31", - "value": 0 - }, - { - "name": "mycolor", - "type": "scale", - "hex": "#FFFFFF", - "value": 0 - } - ] - } - ] - } - } -] \ No newline at end of file diff --git a/pkger/testdata/dashboard_histogram.yml b/pkger/testdata/dashboard_histogram.yml deleted file mode 100644 index 9ca90298c87..00000000000 --- a/pkger/testdata/dashboard_histogram.yml +++ /dev/null @@ -1,42 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-0 -spec: - description: a dashboard w/ single histogram chart - charts: - - kind: Histogram - name: histogram chart - note: histogram note - noteOnEmpty: true - xCol: _value - width: 6 - height: 3 - binCount: 30 - fillColumns: ["a", "b"] - legendColorizeRows: true - legendHide: false - legendOpacity: 1.0 - legendOrientationThreshold: 5 - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_reads_total") |> filter(fn: (r) => r._field == "counter") - colors: - - hex: "#8F8AF4" - type: scale - value: 0 - name: mycolor - - hex: "#F4CF31" - type: scale - value: 0 - name: mycolor - - hex: "#FFFFFF" - type: scale - value: 0 - name: mycolor - axes: - - name : "x" - label: x_label - domain: - - 0 - - 10 diff --git a/pkger/testdata/dashboard_markdown.json b/pkger/testdata/dashboard_markdown.json deleted file mode 100644 index 54613a52dcb..00000000000 --- a/pkger/testdata/dashboard_markdown.json +++ /dev/null @@ -1,19 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Dashboard", - "metadata": { - "name": "dash-0" - }, - "spec": { - "description": "a dashboard w/ single markdown chart", - "charts": [ - { - "kind": "markdown", - "name": "markdown chart", - "note": "## markdown note" - } - ] - } - } -] diff --git a/pkger/testdata/dashboard_markdown.yml b/pkger/testdata/dashboard_markdown.yml deleted file mode 100644 index afd568299f6..00000000000 --- a/pkger/testdata/dashboard_markdown.yml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-0 -spec: - description: a dashboard w/ single markdown chart - charts: - - kind: Markdown - name: markdown chart - note: "## markdown note" diff --git a/pkger/testdata/dashboard_mosaic.yml b/pkger/testdata/dashboard_mosaic.yml deleted file mode 100644 index d412aca4122..00000000000 --- a/pkger/testdata/dashboard_mosaic.yml +++ /dev/null @@ -1,59 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-0 -spec: - description: a dashboard w/ single mosaic chart - charts: - - kind: Mosaic - name: mosaic chart - note: mosaic note - noteOnEmpty: true - prefix: sumtin - suffix: days - xPos: 1 - yPos: 2 - generateXAxisTicks: - - xTotalTicks - - xTickStart - - xTickStep - xCol: _time - hoverDimension: "y" - xTotalTicks: 15 - xTickStart: 0 - xTickStep: 1000 - yCol: _value - width: 6 - height: 3 - yLabelColumnSeparator: "," - yLabelColumns: - - foo - ySeriesColumns: - - _value - - foo - legendColorizeRows: true - legendHide: false - legendOpacity: 1.0 - legendOrientationThreshold: 5 - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") - colors: - - hex: "#8F8AF4" - - hex: "#F4CF31" - - hex: "#FFFFFF" - axes: - - name : "x" - label: x_label - prefix: x_prefix - suffix: x_suffix - domain: - - 0 - - 10 - - name: "y" - label: y_label - prefix: y_prefix - suffix: y_suffix - domain: - - 0 - - 100 diff --git a/pkger/testdata/dashboard_params.yml b/pkger/testdata/dashboard_params.yml deleted file mode 100644 index f83a1e26185..00000000000 --- a/pkger/testdata/dashboard_params.yml +++ /dev/null @@ -1,49 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - charts: - - kind: Single_Stat - name: single stat - xPos: 1 - yPos: 2 - width: 6 - height: 3 - queries: - - query: | - option params = { - bucket: "foo", - start: -1d, - stop: now(), - name: "max", - floatVal: 1.0, - minVal: 10 - } - - from(bucket: params.bucket) - |> range(start: params.start, stop: params.stop) - |> filter(fn: (r) => r._measurement == "processes") - |> filter(fn: (r) => r.floater == params.floatVal) - |> filter(fn: (r) => r._value > params.minVal) - |> aggregateWindow(every: v.windowPeriod, fn: max) - |> yield(name: params.name) - params: - - key: bucket - default: "bar" - type: string - - key: start - type: duration - - key: stop - type: time - - key: floatVal - default: 37.2 - type: float - - key: minVal - type: int - - key: name # infer type - colors: - - name: laser - type: text - hex: "#8F8AF4" - value: 3 diff --git a/pkger/testdata/dashboard_ref.yml b/pkger/testdata/dashboard_ref.yml deleted file mode 100644 index 76cafe52748..00000000000 --- a/pkger/testdata/dashboard_ref.yml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: - envRef: - key: meta-name - default: meta -spec: - name: - envRef: - key: spec-name - default: spectacles - associations: - - kind: Label - name: - envRef: - key: label-meta-name ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: - envRef: - key: label-meta-name diff --git a/pkger/testdata/dashboard_scatter.json b/pkger/testdata/dashboard_scatter.json deleted file mode 100644 index 7b00fe6564b..00000000000 --- a/pkger/testdata/dashboard_scatter.json +++ /dev/null @@ -1,76 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Dashboard", - "metadata": { - "name": "dash-0" - }, - "spec": { - "description": "a dashboard w/ single scatter chart", - "charts": [ - { - "kind": "scatter", - "name": "scatter chart", - "note": "scatter note", - "noteOnEmpty": true, - "xPos": 1, - "yPos": 2, - "width": 6, - "height": 3, - "generateXAxisTicks": ["xTotalTicks", "xTickStart", "xTickStep"], - "xCol": "_time", - "xTotalTicks": 15, - "xTickStart": 0, - "xTickStep": 1000, - "yCol": "_value", - "generateYAxisTicks": ["yTotalTicks", "yTickStart", "yTickStep"], - "yTotalTicks": 10, - "yTickStart": 0, - "yTickStep": 100, - "legendColorizeRows": true, - "legendHide": false, - "legendOpacity": 1.0, - "legendOrientationThreshold": 5, - "queries": [ - { - "query": "from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == \"mem\") |> filter(fn: (r) => r._field == \"used_percent\") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: \"mean\")" - } - ], - "axes": [ - { - "name": "x", - "label": "x_label", - "prefix": "x_prefix", - "suffix": "x_suffix", - "domain": [ - 0, - 10 - ] - }, - { - "name": "y", - "label": "y_label", - "prefix": "y_prefix", - "suffix": "y_suffix", - "domain": [ - 0, - 100 - ] - } - ], - "colors": [ - { - "hex": "#8F8AF4" - }, - { - "hex": "#F4CF31" - }, - { - "hex": "#FFFFFF" - } - ] - } - ] - } - } -] \ No newline at end of file diff --git a/pkger/testdata/dashboard_scatter.yml b/pkger/testdata/dashboard_scatter.yml deleted file mode 100644 index c7d67f791a6..00000000000 --- a/pkger/testdata/dashboard_scatter.yml +++ /dev/null @@ -1,59 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-0 -spec: - description: a dashboard w/ single scatter chart - charts: - - kind: Scatter - name: scatter chart - note: scatter note - noteOnEmpty: true - prefix: sumtin - suffix: days - xPos: 1 - yPos: 2 - generateXAxisTicks: - - xTotalTicks - - xTickStart - - xTickStep - xCol: _time - xTotalTicks: 15 - xTickStart: 0 - xTickStep: 1000 - yCol: _value - generateYAxisTicks: - - yTotalTicks - - yTickStart - - yTickStep - yTotalTicks: 10 - yTickStart: 0 - yTickStep: 100 - width: 6 - height: 3 - legendColorizeRows: true - legendHide: false - legendOpacity: 1.0 - legendOrientationThreshold: 5 - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") - colors: - - hex: "#8F8AF4" - - hex: "#F4CF31" - - hex: "#FFFFFF" - axes: - - name : "x" - label: x_label - prefix: x_prefix - suffix: x_suffix - domain: - - 0 - - 10 - - name: "y" - label: y_label - prefix: y_prefix - suffix: y_suffix - domain: - - 0 - - 100 diff --git a/pkger/testdata/dashboard_single_stat_plus_line.json b/pkger/testdata/dashboard_single_stat_plus_line.json deleted file mode 100644 index 113d68a034f..00000000000 --- a/pkger/testdata/dashboard_single_stat_plus_line.json +++ /dev/null @@ -1,92 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Dashboard", - "metadata": { - "name": "dash-1" - }, - "spec": { - "description": "desc1", - "charts": [ - { - "kind": "Single_Stat_Plus_Line", - "name": "single stat plus line", - "prefix": "sumtin", - "suffix": "days", - "note": "single stat plus line note", - "noteOnEmpty": true, - "xPos": 1, - "yPos": 2, - "width": 6, - "height": 3, - "decimalPlaces": 1, - "shade": true, - "hoverDimension": "y", - "generateXAxisTicks": ["xTotalTicks", "xTickStart", "xTickStep"], - "xColumn": "_time", - "xTotalTicks": 15, - "xTickStart": 0, - "xTickStep": 1000, - "yColumn": "_value", - "generateYAxisTicks": ["yTotalTicks", "yTickStart", "yTickStep"], - "yTotalTicks": 10, - "yTickStart": 0, - "yTickStep": 100, - "position": "overlaid", - "legendColorizeRows": true, - "legendHide": false, - "legendOpacity": 1.0, - "legendOrientationThreshold": 5, - "staticLegend": { - "colorizeRows": true, - "heightRatio": 0.2, - "show": true, - "opacity": 1.0, - "orientationThreshold": 5, - "valueAxis": "y", - "widthRatio": 1.0 - }, - "queries": [ - { - "query": "from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == \"mem\") |> filter(fn: (r) => r._field == \"used_percent\") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: \"mean\")" - } - ], - "colors": [ - { - "id": "base", - "name": "laser", - "type": "text", - "hex": "#8F8AF4", - "value": 3 - }, - { - "id": "base", - "name": "android", - "type": "scale", - "hex": "#F4CF31", - "value": 1 - } - ], - "axes": [ - { - "name": "x", - "label": "x_label", - "prefix": "x_prefix", - "suffix": "x_suffix", - "base": "10", - "scale": "linear" - }, - { - "name": "y", - "label": "y_label", - "prefix": "y_prefix", - "suffix": "y_suffix", - "base": "10", - "scale": "linear" - } - ] - } - ] - } - } -] \ No newline at end of file diff --git a/pkger/testdata/dashboard_single_stat_plus_line.yml b/pkger/testdata/dashboard_single_stat_plus_line.yml deleted file mode 100644 index 1f8edb6ee8b..00000000000 --- a/pkger/testdata/dashboard_single_stat_plus_line.yml +++ /dev/null @@ -1,74 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: desc1 - charts: - - kind: Single_Stat_Plus_Line - name: single stat plus line - note: single stat plus line note - noteOnEmpty: true - decimalPlaces: 1 - prefix: sumtin - suffix: days - xPos: 1 - yPos: 2 - width: 6 - height: 3 - shade: true - hoverDimension: "y" - position: overlaid - generateXAxisTicks: - - xTotalTicks - - xTickStart - - xTickStep - xTotalTicks: 15 - xTickStart: 0 - xTickStep: 1000 - generateYAxisTicks: - - yTotalTicks - - yTickStart - - yTickStep - yTotalTicks: 10 - yTickStart: 0 - yTickStep: 100 - legendColorizeRows: true - legendHide: false - legendOpacity: 1.0 - legendOrientationThreshold: 5 - staticLegend: - colorizeRows: true - heightRatio: 0.2 - show: true - opacity: 1.0 - orientationThreshold: 5 - valueAxis: "y" - widthRatio: 1.0 - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart) |> filter(fn: (r) => r._measurement == "mem") |> filter(fn: (r) => r._field == "used_percent") |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false) |> yield(name: "mean") - colors: - - name: laser - id: "base" - type: text - hex: "#8F8AF4" - value: 3 - - name: android - id: "base" - type: scale - hex: "#F4CF31" - value: 1 - axes: - - name: "x" - label: x_label - prefix: x_prefix - suffix: x_suffix - base: 10 - scale: linear - - name: "y" - label: y_label - prefix: y_prefix - suffix: y_suffix - base: 10 - scale: linear diff --git a/pkger/testdata/dashboard_table.json b/pkger/testdata/dashboard_table.json deleted file mode 100644 index ddb51dc533c..00000000000 --- a/pkger/testdata/dashboard_table.json +++ /dev/null @@ -1,57 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Dashboard", - "metadata": { - "name": "dash-1" - }, - "spec": { - "description": "desc1", - "charts": [ - { - "kind": "table", - "name": "table", - "note": "table note", - "noteOnEmpty": true, - "xPos": 1, - "yPos": 2, - "width": 6, - "height": 3, - "decimalPlaces": 1, - "queries": [ - { - "query": "from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == \"boltdb_writes_total\") |> filter(fn: (r) => r._field == \"counter\")" - } - ], - "colors": [ - { - "name": "laser", - "type": "min", - "hex": "#8F8AF4", - "value": 3.0 - } - ], - "fieldOptions": [ - { - "fieldName": "_value", - "displayName": "MB", - "visible": true - }, - { - "fieldName": "_time", - "displayName": "time (ms)", - "visible": true - } - ], - "tableOptions": { - "verticalTimeAxis": true, - "sortBy": "_time", - "wrapping": "truncate", - "fixFirstColumn": true - }, - "timeFormat": "YYYY:MMMM:DD" - } - ] - } - } -] diff --git a/pkger/testdata/dashboard_table.yml b/pkger/testdata/dashboard_table.yml deleted file mode 100644 index e332146027f..00000000000 --- a/pkger/testdata/dashboard_table.yml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: desc1 - charts: - - kind: Table - name: table - note: table note - noteOnEmpty: true - decimalPlaces: 1 - xPos: 1 - yPos: 2 - width: 6 - height: 3 - fieldOptions: - - fieldName: _time - displayName: time (ms) - visible: true - - fieldName: _value - displayName: MB - visible: true - tableOptions: - verticalTimeAxis: true - sortBy: _time - wrapping: truncate - fixFirstColumn: true - timeFormat: YYYY:MMMM:DD - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_writes_total") |> filter(fn: (r) => r._field == "counter") - colors: - - name: laser - type: min - hex: "#8F8AF4" - value: 3.0 diff --git a/pkger/testdata/dashboard_xy.json b/pkger/testdata/dashboard_xy.json deleted file mode 100644 index 80c84d723c2..00000000000 --- a/pkger/testdata/dashboard_xy.json +++ /dev/null @@ -1,84 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Dashboard", - "metadata": { - "name": "dash-1" - }, - "spec": { - "description": "desc1", - "charts": [ - { - "kind": "XY", - "name": "xy chart", - "prefix": "sumtin", - "note": "xy chart note", - "noteOnEmpty": true, - "xPos": 1, - "yPos": 2, - "width": 6, - "height": 3, - "decimalPlaces": 1, - "position": "stacked", - "shade": true, - "hoverDimension": "y", - "generateXAxisTicks": ["xTotalTicks", "xTickStart", "xTickStep"], - "xColumn": "_time", - "xTotalTicks": 15, - "xTickStart": 0, - "xTickStep": 1000, - "yColumn": "_value", - "generateYAxisTicks": ["yTotalTicks", "yTickStart", "yTickStep"], - "yTotalTicks": 10, - "yTickStart": 0, - "yTickStep": 100, - "legendColorizeRows": true, - "legendHide": false, - "legendOpacity": 1.0, - "legendOrientationThreshold": 5, - "staticLegend": { - "colorizeRows": true, - "heightRatio": 0.2, - "show": true, - "opacity": 1.0, - "orientationThreshold": 5, - "valueAxis": "y", - "widthRatio": 1.0 - }, - "queries": [ - { - "query": "from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == \"boltdb_writes_total\") |> filter(fn: (r) => r._field == \"counter\")" - } - ], - "colors": [ - { - "name": "laser", - "type": "scale", - "hex": "#8F8AF4", - "value": 3 - } - ], - "axes": [ - { - "name": "x", - "label": "x_label", - "prefix": "x_prefix", - "suffix": "x_suffix", - "base": "10", - "scale": "linear" - }, - { - "name": "y", - "label": "y_label", - "prefix": "y_prefix", - "suffix": "y_suffix", - "base": "10", - "scale": "linear" - } - ], - "geom": "line" - } - ] - } - } -] \ No newline at end of file diff --git a/pkger/testdata/dashboard_xy.yml b/pkger/testdata/dashboard_xy.yml deleted file mode 100644 index 0cb05845cb6..00000000000 --- a/pkger/testdata/dashboard_xy.yml +++ /dev/null @@ -1,66 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: dash-1 -spec: - description: desc1 - charts: - - kind: XY - name: xy chart - note: xy chart note - noteOnEmpty: true - xPos: 1 - yPos: 2 - width: 6 - height: 3 - shade: true - hoverDimension: "y" - geom: line - position: stacked - generateXAxisTicks: - - xTotalTicks - - xTickStart - - xTickStep - xTotalTicks: 15 - xTickStart: 0 - xTickStep: 1000 - generateYAxisTicks: - - yTotalTicks - - yTickStart - - yTickStep - yTotalTicks: 10 - yTickStart: 0 - yTickStep: 100 - legendColorizeRows: true - legendHide: false - legendOpacity: 1.0 - legendOrientationThreshold: 5 - staticLegend: - colorizeRows: true - heightRatio: 0.2 - show: true - opacity: 1.0 - orientationThreshold: 5 - valueAxis: "y" - widthRatio: 1.0 - queries: - - query: > - from(bucket: v.bucket) |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "boltdb_writes_total") |> filter(fn: (r) => r._field == "counter") - colors: - - name: laser - type: scale - hex: "#8F8AF4" - value: 3 - axes: - - name: "x" - label: x_label - prefix: x_prefix - suffix: x_suffix - base: 10 - scale: linear - - name: "y" - label: y_label - prefix: y_prefix - suffix: y_suffix - base: 10 - scale: linear diff --git a/pkger/testdata/env_refs.yml b/pkger/testdata/env_refs.yml deleted file mode 100644 index ac9c6355bd6..00000000000 --- a/pkger/testdata/env_refs.yml +++ /dev/null @@ -1,112 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: - envRef: - key: label-1-name-ref -spec: ---- -apiVersion: influxdata.com/v2alpha1 -kind: Bucket -metadata: - name: - envRef: - key: bkt-1-name-ref -spec: - associations: - - kind: Label - name: - envRef: - key: label-1-name-ref ---- -apiVersion: influxdata.com/v2alpha1 -kind: CheckDeadman -metadata: - name: - envRef: - key: check-1-name-ref -spec: - every: 5m - level: cRiT - query: > - from(bucket: "rucket_1") |> range(start: v.timeRangeStart, stop: v.timeRangeStop) - statusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }" - associations: - - kind: Label - name: - envRef: - key: label-1-name-ref ---- -apiVersion: influxdata.com/v2alpha1 -kind: Dashboard -metadata: - name: - envRef: - key: dash-1-name-ref -spec: - associations: - - kind: Label - name: - envRef: - key: label-1-name-ref ---- -apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointSlack -metadata: - name: - envRef: - key: endpoint-1-name-ref -spec: - url: https://hooks.slack.com/services/bip/piddy/boppidy ---- -apiVersion: influxdata.com/v2alpha1 -kind: NotificationRule -metadata: - name: - envRef: - key: rule-1-name-ref -spec: - endpointName: - envRef: - key: endpoint-1-name-ref - every: 10m - messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }" - statusRules: - - currentLevel: WARN ---- -apiVersion: influxdata.com/v2alpha1 -kind: Telegraf -metadata: - name: - envRef: - key: telegraf-1-name-ref -spec: - config: | - [agent] - interval = "10s" - [[outputs.influxdb_v2]] - urls = ["http://localhost:8086"] - token = "$INFLUX_TOKEN" - organization = "rg" - bucket = "rucket_3" ---- -apiVersion: influxdata.com/v2alpha1 -kind: Task -metadata: - name: - envRef: - key: task-1-name-ref -spec: - cron: 15 * * * * - query: > - from(bucket: "rucket_1") ---- -apiVersion: influxdata.com/v2alpha1 -kind: Variable -metadata: - name: - envRef: - key: var-1-name-ref -spec: - type: constant - values: [first val] diff --git a/pkger/testdata/label.json b/pkger/testdata/label.json deleted file mode 100644 index 5038f7e815f..00000000000 --- a/pkger/testdata/label.json +++ /dev/null @@ -1,35 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Label", - "metadata": { - "name": "label-2" - }, - "spec": { - "color": "#000000", - "description": "label 2 description" - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Label", - "metadata": { - "name": "label-1" - }, - "spec": { - "color": "#FFFFFF", - "description": "label 1 description" - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Label", - "metadata": { - "name": "label-3" - }, - "spec": { - "name": "display name", - "description": "label 3 description" - } - } -] diff --git a/pkger/testdata/label.yml b/pkger/testdata/label.yml deleted file mode 100644 index 80a98cb17f9..00000000000 --- a/pkger/testdata/label.yml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-2 -spec: - color: "#000000" - description: label 2 description ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-1 -spec: - color: "#FFFFFF" - description: label 1 description ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-3 -spec: - name: display name - description: label 3 description diff --git a/pkger/testdata/label_ref.yml b/pkger/testdata/label_ref.yml deleted file mode 100644 index 9696a24e16f..00000000000 --- a/pkger/testdata/label_ref.yml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: - envRef: - key: meta-name -spec: - name: - envRef: - key: spec-name diff --git a/pkger/testdata/notification_endpoint.json b/pkger/testdata/notification_endpoint.json deleted file mode 100644 index d1cadee1f4f..00000000000 --- a/pkger/testdata/notification_endpoint.json +++ /dev/null @@ -1,112 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Label", - "metadata": { - "name": "label-1" - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "NotificationEndpointSlack", - "metadata": { - "name": "slack-notification-endpoint" - }, - "spec":{ - "name": "slack name", - "description": "slack desc", - "url": "https://hooks.slack.com/services/bip/piddy/boppidy", - "token": "tokenval", - "status": "active", - "associations": [ - { - "kind": "Label", - "name": "label-1" - } - ] - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "NotificationEndpointHTTP", - "metadata": { - "name": "http-none-auth-notification-endpoint" - }, - "spec":{ - "description": "http none auth desc", - "method": "GET", - "type": "none", - "url": "https://www.example.com/endpoint/noneauth", - "status": "active", - "associations": [ - { - "kind": "Label", - "name": "label-1" - } - ] - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "NotificationEndpointHTTP", - "metadata": { - "name": "http-basic-auth-notification-endpoint" - }, - "spec":{ - "name": "basic endpoint name", - "description": "http basic auth desc", - "method": "POST", - "type": "basic", - "url": "https://www.example.com/endpoint/basicauth", - "username": "secret username", - "password": "secret password", - "status": "inactive", - "associations": [ - { - "kind": "Label", - "name": "label-1" - } - ] - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "NotificationEndpointHTTP", - "metadata": { - "name": "http-bearer-auth-notification-endpoint" - }, - "spec":{ - "description": "http bearer auth desc", - "type": "bearer", - "method": "PUT", - "url": "https://www.example.com/endpoint/bearerauth", - "token": "secret token", - "associations": [ - { - "kind": "Label", - "name": "label-1" - } - ] - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "NotificationEndpointPagerDuty", - "metadata": { - "name": "pager-duty-notification-endpoint" - }, - "spec":{ - "name": "pager duty name", - "description": "pager duty desc", - "url": "http://localhost:8080/orgs/7167eb6719fa34e5/alert-history", - "routingKey": "secret routing-key", - "status": "active", - "associations": [ - { - "kind": "Label", - "name": "label-1" - } - ] - } - } -] diff --git a/pkger/testdata/notification_endpoint.yml b/pkger/testdata/notification_endpoint.yml deleted file mode 100644 index c56f2f428b1..00000000000 --- a/pkger/testdata/notification_endpoint.yml +++ /dev/null @@ -1,77 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointSlack -metadata: - name: slack-notification-endpoint -spec: - name: slack name - description: slack desc - url: https://hooks.slack.com/services/bip/piddy/boppidy - status: active - token: tokenval - associations: - - kind: Label - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointHTTP -metadata: - name: http-none-auth-notification-endpoint -spec: - type: none - description: http none auth desc - method: get - url: https://www.example.com/endpoint/noneauth - status: active - associations: - - kind: Label - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointHTTP -metadata: - name: http-basic-auth-notification-endpoint -spec: - name: basic endpoint name - description: http basic auth desc - type: basic - method: pOsT - url: https://www.example.com/endpoint/basicauth - username: "secret username" - password: "secret password" - status: inactive - associations: - - kind: Label - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointHTTP -metadata: - name: http-bearer-auth-notification-endpoint -spec: - description: http bearer auth desc - type: bearer - method: puT - url: https://www.example.com/endpoint/bearerauth - token: "secret token" - associations: - - kind: Label - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointPagerDuty -metadata: - name: pager-duty-notification-endpoint -spec: - name: pager duty name - description: pager duty desc - url: http://localhost:8080/orgs/7167eb6719fa34e5/alert-history - routingKey: "secret routing-key" - status: active - associations: - - kind: Label - name: label-1 diff --git a/pkger/testdata/notification_endpoint_ref.yml b/pkger/testdata/notification_endpoint_ref.yml deleted file mode 100644 index 2cd6837349a..00000000000 --- a/pkger/testdata/notification_endpoint_ref.yml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointSlack -metadata: - name: - envRef: - key: meta-name - default: meta -spec: - name: - envRef: - key: spec-name - default: spectacles - url: https://hooks.slack.com/services/bip/piddy/boppidy - token: tokenval - associations: - - kind: Label - name: - envRef: - key: label-meta-name ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: - envRef: - key: label-meta-name diff --git a/pkger/testdata/notification_endpoint_secrets.yml b/pkger/testdata/notification_endpoint_secrets.yml deleted file mode 100644 index 301a910273f..00000000000 --- a/pkger/testdata/notification_endpoint_secrets.yml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointPagerDuty -metadata: - name: pager-duty-notification-endpoint -spec: - description: pager duty desc - url: http://localhost:8080/orgs/7167eb6719fa34e5/alert-history - routingKey: - secretRef: - key: "routing-key" diff --git a/pkger/testdata/notification_rule.json b/pkger/testdata/notification_rule.json deleted file mode 100644 index 94f97ad6fbc..00000000000 --- a/pkger/testdata/notification_rule.json +++ /dev/null @@ -1,78 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Label", - "metadata": { - "name": "label-1" - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Label", - "metadata": { - "name": "label-2" - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "NotificationRule", - "metadata": { - "name": "rule-uuid" - }, - "spec": { - "name": "rule_0", - "description": "desc_0", - "channel": "#two-fer-one", - "endpointName": "endpoint-0", - "every": "10m", - "offset": "30s", - "messageTemplate": "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }", - "status": "active", - "statusRules": [ - { - "currentLevel": "warn" - }, - { - "currentLevel": "CRIT", - "previousLevel": "OK" - } - ], - "tagRules": [ - { - "key": "k1", - "value": "v2", - "operator": "EQUAL" - }, - { - "key": "k1", - "value": "v1", - "operator": "EQUAL" - } - ], - "associations": [ - { - "kind": "Label", - "name": "label-1" - }, - - { - "kind": "Label", - "name": "label-2" - } - ] - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "NotificationEndpointSlack", - "metadata": { - "name": "endpoint-0" - }, - "spec": { - "url": "https://hooks.slack.com/services/bip/piddy/boppidy" - } - } -] - - - diff --git a/pkger/testdata/notification_rule.yml b/pkger/testdata/notification_rule.yml deleted file mode 100644 index bf494ab9b3d..00000000000 --- a/pkger/testdata/notification_rule.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-2 ---- -apiVersion: influxdata.com/v2alpha1 -kind: NotificationRule -metadata: - name: rule-uuid -spec: - name: rule_0 - description: desc_0 - channel: "#two-fer-one" - endpointName: endpoint-0 - every: 10m - offset: 30s - messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }" - status: active - statusRules: - - currentLevel: WARN - - currentLevel: CRIT - previousLevel: OK - tagRules: - - key: k1 - value: v2 - operator: eQuAl - - key: k1 - value: v1 - operator: eQuAl - associations: - - kind: Label - name: label-1 - - kind: Label - name: label-2 ---- -apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointSlack -metadata: - name: endpoint-0 -spec: - url: https://hooks.slack.com/services/bip/piddy/boppidy diff --git a/pkger/testdata/notification_rule_ref.yml b/pkger/testdata/notification_rule_ref.yml deleted file mode 100644 index c32773c56ee..00000000000 --- a/pkger/testdata/notification_rule_ref.yml +++ /dev/null @@ -1,41 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: NotificationRule -metadata: - name: - envRef: - key: meta-name - default: meta -spec: - name: - envRef: - key: spec-name - default: spectacles - endpointName: - envRef: - key: endpoint-meta-name - every: 10m - offset: 30s - messageTemplate: "Notification Rule: ${ r._notification_rule_name } triggered by check: ${ r._check_name }: ${ r._message }" - statusRules: - - currentLevel: WARN - associations: - - kind: Label - name: - envRef: - key: label-meta-name ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: - envRef: - key: label-meta-name ---- -apiVersion: influxdata.com/v2alpha1 -kind: NotificationEndpointSlack -metadata: - name: - envRef: - key: endpoint-meta-name -spec: - url: https://hooks.slack.com/services/bip/piddy/boppidy diff --git a/pkger/testdata/remote_bucket.json b/pkger/testdata/remote_bucket.json deleted file mode 100644 index 6fd9a25ba3c..00000000000 --- a/pkger/testdata/remote_bucket.json +++ /dev/null @@ -1,12 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Bucket", - "metadata": { - "name": "rucket-11" - }, - "spec": { - "description": "bucket 1 description" - } - } -] diff --git a/pkger/testdata/task_ref.yml b/pkger/testdata/task_ref.yml deleted file mode 100644 index ce8f0c62736..00000000000 --- a/pkger/testdata/task_ref.yml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Task -metadata: - name: - envRef: - key: meta-name - default: meta -spec: - name: - envRef: - key: spec-name - default: spectacles - every: 10m - offset: 15s - query: > - from(bucket: "rucket_1") - associations: - - kind: Label - name: - envRef: - key: label-meta-name ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: - envRef: - key: label-meta-name diff --git a/pkger/testdata/task_v2.yml b/pkger/testdata/task_v2.yml deleted file mode 100644 index 8967f17b3c5..00000000000 --- a/pkger/testdata/task_v2.yml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: influxdata.com/v2alpha2 -kind: Task -metadata: - name: task-1 -spec: - description: desc_1 - query: > - option task = { name: "bar", every: 1m, offset: 3m } - from(bucket: "rucket_1") - |> range(start: -5d, stop: -1h) - |> filter(fn: (r) => r._measurement == "cpu") - |> filter(fn: (r) => r._field == "usage_idle") - |> aggregateWindow(every: 1m, fn: mean) - |> yield(name: "mean") diff --git a/pkger/testdata/task_v2_params.yml b/pkger/testdata/task_v2_params.yml deleted file mode 100644 index 5236bef3867..00000000000 --- a/pkger/testdata/task_v2_params.yml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: influxdata.com/v2alpha2 -kind: Task -metadata: - name: task-1 -spec: - description: desc_1 - every: 1m - query: > - option params = { this: "foo" } - from(bucket: "rucket_1") - |> range(start: -5d, stop: -1h) - |> filter(fn: (r) => r._measurement == params.this) - |> filter(fn: (r) => r._field == "usage_idle") - |> aggregateWindow(every: 1m, fn: mean) - |> yield(name: "mean") diff --git a/pkger/testdata/task_v2_taskSpec.yml b/pkger/testdata/task_v2_taskSpec.yml deleted file mode 100644 index c5564ac0542..00000000000 --- a/pkger/testdata/task_v2_taskSpec.yml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: influxdata.com/v2alpha2 -kind: Task -metadata: - name: task-1 -spec: - task: - - key: name - default: "foo" - type: string - - key: every - default: 1m0s - type: duration - - key: offset - default: 1m0s - type: duration - description: desc_1 - query: > - option task = { name: "bar", every: 1m, offset: 3m } - from(bucket: "rucket_1") - |> range(start: -5d, stop: -1h) - |> filter(fn: (r) => r._measurement == "cpu") - |> filter(fn: (r) => r._field == "usage_idle") - |> aggregateWindow(every: 1m, fn: mean) - |> yield(name: "mean") diff --git a/pkger/testdata/tasks.json b/pkger/testdata/tasks.json deleted file mode 100644 index af07a9f4f00..00000000000 --- a/pkger/testdata/tasks.json +++ /dev/null @@ -1,48 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Label", - "metadata": { - "name": "label-1" - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Task", - "metadata": { - "name": "task-uuid" - }, - "spec": { - "name": "task-0", - "description": "desc_0", - "every": "1d1h", - "offset": "15s", - "query": "from(bucket: \"rucket_1\")\n |> range(start: -5d, stop: -1h)\n |> filter(fn: (r) => r._measurement == \"cpu\")\n |> filter(fn: (r) => r._field == \"usage_idle\")\n |> aggregateWindow(every: 1m, fn: mean)\n |> yield(name: \"mean\")", - "status": "inactive", - "associations": [ - { - "kind": "Label", - "name": "label-1" - } - ] - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Task", - "metadata": { - "name": "task-1" - }, - "spec": { - "description": "desc_1", - "cron": "15 * * * *", - "query": "from(bucket: \"rucket_1\")\n |> range(start: -5d, stop: -1h)\n |> filter(fn: (r) => r._measurement == \"cpu\")\n |> filter(fn: (r) => r._field == \"usage_idle\")\n |> aggregateWindow(every: 1m, fn: mean)\n |> yield(name: \"mean\")", - "associations": [ - { - "kind": "Label", - "name": "label-1" - } - ] - } - } -] diff --git a/pkger/testdata/tasks.yml b/pkger/testdata/tasks.yml deleted file mode 100644 index 6af84487167..00000000000 --- a/pkger/testdata/tasks.yml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Task -metadata: - name: task-uuid -spec: - name: task-0 - description: desc_0 - every: 1d1h - offset: 15s - query: > - from(bucket: "rucket_1") - |> range(start: -5d, stop: -1h) - |> filter(fn: (r) => r._measurement == "cpu") - |> filter(fn: (r) => r._field == "usage_idle") - |> aggregateWindow(every: 1m, fn: mean) - |> yield(name: "mean") - status: inactive - associations: - - kind: Label - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Task -metadata: - name: task-1 -spec: - description: desc_1 - cron: 15 * * * * - query: > - from(bucket: "rucket_1") - |> range(start: -5d, stop: -1h) - |> filter(fn: (r) => r._measurement == "cpu") - |> filter(fn: (r) => r._field == "usage_idle") - |> aggregateWindow(every: 1m, fn: mean) - |> yield(name: "mean") - associations: - - kind: Label - name: label-1 diff --git a/pkger/testdata/tasks_params.yml b/pkger/testdata/tasks_params.yml deleted file mode 100644 index 77f4e1c2bf0..00000000000 --- a/pkger/testdata/tasks_params.yml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Task -metadata: - name: task-uuid -spec: - every: 10m - query: | - option params = { - bucket: "foo", - start: -1d, - stop: now(), - name: "max", - floatVal: 1.0, - minVal: 10 - } - - from(bucket: params.bucket) - |> range(start: params.start, stop: params.stop) - |> filter(fn: (r) => r._measurement == "processes") - |> filter(fn: (r) => r.floater == params.floatVal) - |> filter(fn: (r) => r._value > params.minVal) - |> aggregateWindow(every: v.windowPeriod, fn: max) - |> yield(name: params.name) - params: - - key: bucket - default: "bar" - type: string - - key: start - type: duration - - key: stop - type: time - - key: floatVal - default: 37.2 - type: float - - key: minVal - type: int - - key: name # infer type diff --git a/pkger/testdata/telegraf.json b/pkger/testdata/telegraf.json deleted file mode 100644 index 9ef5f5d2789..00000000000 --- a/pkger/testdata/telegraf.json +++ /dev/null @@ -1,50 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Label", - "metadata": { - "name": "label-1" - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Label", - "metadata": { - "name": "label-2" - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Telegraf", - "metadata": { - "name": "first-tele-config" - }, - "spec": { - "name": "display name", - "description": "desc", - "associations": [ - { - "kind": "Label", - "name": "label-1" - }, - { - "kind": "Label", - "name": "label-2" - } - ], - "config": "# Configuration for telegraf agent\n [agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n [[outputs.influxdb_v2]]\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## urls exp: http://127.0.0.1:8086\n urls = [\"http://localhost:8086\"]\n\n ## Token for authentication.\n token = \"$INFLUX_TOKEN\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"rg\"\n\n ## Destination bucket to write into.\n bucket = \"rucket_3\"\n [[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n [[inputs.disk]]\n ## By default stats will be gathered for all mount points.\n ## Set mount_points will restrict the stats to only the specified mount points.\n # mount_points = [\"/\"]\n ## Ignore mount points by filesystem type.\n ignore_fs = [\"tmpfs\", \"devtmpfs\", \"devfs\", \"overlay\", \"aufs\", \"squashfs\"]\n [[inputs.diskio]]\n [[inputs.mem]]\n [[inputs.net]]\n [[inputs.processes]]\n [[inputs.swap]]\n [[inputs.system]]" - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Telegraf", - "metadata": { - "name": "tele-2" - }, - "spec": { - "config": "# Configuration for telegraf agent\n [agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## For failed writes, telegraf will cache metric_buffer_limit metrics for each\n ## output, and will flush this buffer on a successful write. Oldest metrics\n ## are dropped first when this buffer fills.\n ## This buffer only fills when writes fail to output plugin(s).\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Logging configuration:\n ## Run telegraf with debug log messages.\n debug = false\n ## Run telegraf in quiet mode (error log messages only).\n quiet = false\n ## Specify the log file name. The empty string means to log to stderr.\n logfile = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n [[outputs.influxdb_v2]]\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## urls exp: http://127.0.0.1:8086\n urls = [\"http://localhost:8086\"]\n\n ## Token for authentication.\n token = \"$INFLUX_TOKEN\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"rg\"\n\n ## Destination bucket to write into.\n bucket = \"rucket_3\"\n [[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n [[inputs.disk]]\n ## By default stats will be gathered for all mount points.\n ## Set mount_points will restrict the stats to only the specified mount points.\n # mount_points = [\"/\"]\n ## Ignore mount points by filesystem type.\n ignore_fs = [\"tmpfs\", \"devtmpfs\", \"devfs\", \"overlay\", \"aufs\", \"squashfs\"]\n [[inputs.diskio]]\n [[inputs.mem]]\n [[inputs.net]]\n [[inputs.processes]]\n [[inputs.swap]]\n [[inputs.system]]" - } - } -] - - diff --git a/pkger/testdata/telegraf.yml b/pkger/testdata/telegraf.yml deleted file mode 100644 index ebf4dc5c1cf..00000000000 --- a/pkger/testdata/telegraf.yml +++ /dev/null @@ -1,171 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-2 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Telegraf -metadata: - name: first-tele-config -spec: - name: display name - description: desc - associations: - - kind: Label - name: label-1 - - kind: Label - name: label-2 - config: | - # Configuration for telegraf agent - [agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 - - ## For failed writes, telegraf will cache metric_buffer_limit metrics for each - ## output, and will flush this buffer on a successful write. Oldest metrics - ## are dropped first when this buffer fills. - ## This buffer only fills when writes fail to output plugin(s). - metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s. - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". - precision = "" - - ## Logging configuration: - ## Run telegraf with debug log messages. - debug = false - ## Run telegraf in quiet mode (error log messages only). - quiet = false - ## Specify the log file name. The empty string means to log to stderr. - logfile = "" - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - ## If set to true, do no set the "host" tag in the telegraf agent. - omit_hostname = false - [[outputs.influxdb_v2]] - ## The URLs of the InfluxDB cluster nodes. - ## - ## Multiple URLs can be specified for a single cluster, only ONE of the - ## urls will be written to each interval. - ## urls exp: http://127.0.0.1:8086 - urls = ["http://localhost:8086"] - - ## Token for authentication. - token = "$INFLUX_TOKEN" - - ## Organization is the name of the organization you wish to write to; must exist. - organization = "rg" - - ## Destination bucket to write into. - bucket = "rucket_3" - [[inputs.cpu]] - ## Whether to report per-cpu stats or not - percpu = true - ## Whether to report total system cpu stats or not - totalcpu = true - ## If true, collect raw CPU time metrics. - collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. - report_active = false - [[inputs.disk]] - ## By default stats will be gathered for all mount points. - ## Set mount_points will restrict the stats to only the specified mount points. - # mount_points = ["/"] - ## Ignore mount points by filesystem type. - ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] - [[inputs.diskio]] - [[inputs.mem]] - [[inputs.net]] - [[inputs.processes]] - [[inputs.swap]] - [[inputs.system]] - - ---- -apiVersion: influxdata.com/v2alpha1 -kind: Telegraf -metadata: - name: tele-2 -spec: - config: | - # Configuration for telegraf agent - [agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 - - ## For failed writes, telegraf will cache metric_buffer_limit metrics for each - ## output, and will flush this buffer on a successful write. Oldest metrics - ## are dropped first when this buffer fills. - ## This buffer only fills when writes fail to output plugin(s). - metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s. - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". - precision = "" - debug = false - quiet = false - logfile = "" - - hostname = "" - omit_hostname = false - diff --git a/pkger/testdata/telegraf_ref.yml b/pkger/testdata/telegraf_ref.yml deleted file mode 100644 index ffad1dda567..00000000000 --- a/pkger/testdata/telegraf_ref.yml +++ /dev/null @@ -1,72 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Telegraf -metadata: - name: - envRef: - key: meta-name - default: meta -spec: - name: - envRef: - key: spec-name - default: spectacles - config: | - # Configuration for telegraf agent - [agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 - - ## For failed writes, telegraf will cache metric_buffer_limit metrics for each - ## output, and will flush this buffer on a successful write. Oldest metrics - ## are dropped first when this buffer fills. - ## This buffer only fills when writes fail to output plugin(s). - metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s. - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". - precision = "" - debug = false - quiet = false - logfile = "" - - hostname = "" - omit_hostname = false - associations: - - kind: Label - name: - envRef: - key: label-meta-name ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: - envRef: - key: label-meta-name diff --git a/pkger/testdata/variable_associates_label.yml b/pkger/testdata/variable_associates_label.yml deleted file mode 100644 index e707eeca874..00000000000 --- a/pkger/testdata/variable_associates_label.yml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: label-1 ---- -apiVersion: influxdata.com/v2alpha1 -kind: Variable -metadata: - name: var-1 -spec: - type: constant - values: [first val] - associations: - - kind: Label - name: label-1 diff --git a/pkger/testdata/variable_ref.yml b/pkger/testdata/variable_ref.yml deleted file mode 100644 index 2427566a7b7..00000000000 --- a/pkger/testdata/variable_ref.yml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Variable -metadata: - name: - envRef: - key: meta-name - default: meta -spec: - name: - envRef: - key: spec-name - default: spectacles - type: constant - values: - - first val - - second val - - third val - selected: - - envRef: - key: the-selected - default: second val - - envRef: - key: the-2nd - associations: - - kind: Label - name: - envRef: - key: label-meta-name ---- -apiVersion: influxdata.com/v2alpha1 -kind: Label -metadata: - name: - envRef: - key: label-meta-name diff --git a/pkger/testdata/variables.json b/pkger/testdata/variables.json deleted file mode 100644 index 41ea2c8e8d8..00000000000 --- a/pkger/testdata/variables.json +++ /dev/null @@ -1,57 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Variable", - "metadata": { - "name": "var-query-1" - }, - "spec": { - "name": "query var", - "description": "query var desc", - "type": "query", - "query": "buckets() |> filter(fn: (r) => r.name !~ /^_/) |> rename(columns: {name: \"_value\"}) |> keep(columns: [\"_value\"])", - "selected": ["rucket"], - "language": "flux" - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Variable", - "metadata": { - "name": "var-query-2" - }, - "spec": { - "description": "var-query-2 desc", - "type": "query", - "query": "an influxql query of sorts", - "language": "influxql" - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Variable", - "metadata": { - "name": "var-const-3" - }, - "spec": { - "description": "var-const-3 desc", - "type": "constant", - "values": ["first val"] - } - }, - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Variable", - "metadata": { - "name": "var-map-4" - }, - "spec": { - "description": "var-map-4 desc", - "type": "map", - "values": { - "k1": "v1" - } - } - } -] - diff --git a/pkger/testdata/variables.yml b/pkger/testdata/variables.yml deleted file mode 100644 index 5848f2388bf..00000000000 --- a/pkger/testdata/variables.yml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: influxdata.com/v2alpha1 -kind: Variable -metadata: - name: var-query-1 -spec: - name: query var - description: query var desc - type: query - language: flux - selected: - - rucket - query: | - buckets() |> filter(fn: (r) => r.name !~ /^_/) |> rename(columns: {name: "_value"}) |> keep(columns: ["_value"]) ---- -apiVersion: influxdata.com/v2alpha1 -kind: Variable -metadata: - name: var-query-2 -spec: - description: var-query-2 desc - type: query - query: an influxql query of sorts - language: influxql ---- -apiVersion: influxdata.com/v2alpha1 -kind: Variable -metadata: - name: var-const-3 -spec: - description: var-const-3 desc - type: constant - values: - - first val ---- -apiVersion: influxdata.com/v2alpha1 -kind: Variable -metadata: - name: var-map-4 -spec: - description: var-map-4 desc - type: map - values: - k1: v1 diff --git a/pprof/http_server.go b/pprof/http_server.go deleted file mode 100644 index 09be62cc1e0..00000000000 --- a/pprof/http_server.go +++ /dev/null @@ -1,171 +0,0 @@ -package pprof - -import ( - "fmt" - "io" - "net/http" - httppprof "net/http/pprof" - "strconv" - "time" - - "github.com/go-chi/chi" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - ihttp "github.com/influxdata/influxdb/v2/kit/transport/http" -) - -type Handler struct { - chi.Router -} - -func NewHTTPHandler(profilingEnabled bool) *Handler { - r := chi.NewRouter() - r.Route("/pprof", func(r chi.Router) { - if !profilingEnabled { - r.NotFound(profilingDisabledHandler) - return - } - r.Get("/cmdline", httppprof.Cmdline) - r.Get("/profile", httppprof.Profile) - r.Get("/symbol", httppprof.Symbol) - r.Get("/trace", httppprof.Trace) - r.Get("/all", archiveProfilesHandler) - r.Mount("/", http.HandlerFunc(httppprof.Index)) - }) - - return &Handler{r} -} - -func profilingDisabledHandler(w http.ResponseWriter, r *http.Request) { - ihttp.WriteErrorResponse(r.Context(), w, errors.EForbidden, "profiling disabled") -} - -func archiveProfilesHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - // We parse the form here so that we can use the http.Request.Form map. - // - // Otherwise we'd have to use r.FormValue() which makes it impossible to - // distinguish between a form value that exists and has no value and one that - // does not exist at all. - if err := r.ParseForm(); err != nil { - ihttp.WriteErrorResponse(ctx, w, errors.EInternal, err.Error()) - return - } - - // In the following two blocks, we check if the request should include cpu - // profiles and a trace log. - // - // Since the submitted form can contain multiple version of a variable like: - // - // http://localhost:8086?cpu=1s&cpu=30s&trace=3s&cpu=5s - // - // the question arises: which value should we use? We choose to use the LAST - // value supplied. - // - // This is an edge case but if for some reason, for example, a url is - // programmatically built and multiple values are supplied, this will do what - // is expected. - // - var traceDuration, cpuDuration time.Duration - - // last() returns either the last item from a slice of strings or an empty - // string if the supplied slice is empty or nil. - last := func(s []string) string { - if len(s) == 0 { - return "" - } - return s[len(s)-1] - } - - // If trace exists as a form value, add it to the profiles slice with the - // decoded duration. - // - // Requests for a trace should look like: - // - // ?trace=10s - // - if vals, exists := r.Form["trace"]; exists { - // parse the duration encoded in the last "trace" value supplied. - val := last(vals) - duration, err := time.ParseDuration(val) - - // If we can't parse the duration or if the user supplies a negative - // number, return an appropriate error status and message. - // - // In this case it is a StatusBadRequest (400) since the problem is in the - // supplied form data. - if duration < 0 { - ihttp.WriteErrorResponse(ctx, w, errors.EInvalid, "negative trace durations not allowed") - return - } - - if err != nil { - ihttp.WriteErrorResponse(ctx, w, errors.EInvalid, fmt.Sprintf("could not parse supplied duration for trace %q", val)) - return - } - - // Trace files can get big. Lets clamp the maximum trace duration to 45s. - if duration > 45*time.Second { - ihttp.WriteErrorResponse(ctx, w, errors.EInvalid, "cannot trace for longer than 45s") - return - } - - traceDuration = duration - } - - // Capturing CPU profiles is a little trickier. The preferred way to send the - // cpu profile duration is via the supplied "cpu" variable's value. - // - // The duration should be encoded as a Go duration that can be parsed by - // time.ParseDuration(). - // - // In the past users were encouraged to assign any value to cpu and provide - // the duration in a separate "seconds" value. - // - // The code below handles both -- first it attempts to use the old method - // which would look like: - // - // ?cpu=foobar&seconds=10 - // - // Then it attempts to ascertain the duration provided with: - // - // ?cpu=10s - // - // This preserves backwards compatibility with any tools that have been - // written to gather profiles. - // - if vals, exists := r.Form["cpu"]; exists { - duration := time.Second * 30 - val := last(vals) - - // getDuration is a small function literal that encapsulates the logic - // for getting the duration from either the "seconds" form value or from - // the value assigned to "cpu". - getDuration := func() (time.Duration, error) { - if seconds, exists := r.Form["seconds"]; exists { - s, err := strconv.ParseInt(last(seconds), 10, 64) - if err != nil { - return 0, err - } - return time.Second * time.Duration(s), nil - } - // see if the value of cpu is a duration like: cpu=10s - return time.ParseDuration(val) - } - - duration, err := getDuration() - if err != nil { - ihttp.WriteErrorResponse(ctx, w, errors.EInvalid, fmt.Sprintf("could not parse supplied duration for cpu profile %q", val)) - return - } - - cpuDuration = duration - } - - tarstream, err := collectAllProfiles(ctx, traceDuration, cpuDuration) - if err != nil { - ihttp.WriteErrorResponse(ctx, w, errors.EInternal, err.Error()) - return - } - _, _ = io.Copy(w, tarstream) -} diff --git a/pprof/pprof.go b/pprof/pprof.go deleted file mode 100644 index f5149102a05..00000000000 --- a/pprof/pprof.go +++ /dev/null @@ -1,129 +0,0 @@ -package pprof - -import ( - "archive/tar" - "bytes" - "context" - "fmt" - "io" - "path" - "runtime" - "runtime/pprof" - "runtime/trace" - "time" -) - -func SetGlobalProfiling(enabled bool) { - if enabled { - // Copy the rates used in 1.x. - runtime.MemProfileRate = 4096 - runtime.SetBlockProfileRate(int(1 * time.Second)) - runtime.SetMutexProfileFraction(1) - } else { - runtime.MemProfileRate = 0 - runtime.SetBlockProfileRate(0) - runtime.SetMutexProfileFraction(0) - } -} - -// collectAllProfiles generates a tarball containing: -// - goroutine profile -// - blocking profile -// - mutex profile -// - heap profile -// - allocations profile -// - (optionally) trace profile -// - (optionally) CPU profile -// -// All information is added to a tar archive and then compressed, before being -// returned to the requester as an archive file. Where profiles support debug -// parameters, the profile is collected with debug=1. -func collectAllProfiles(ctx context.Context, traceDuration time.Duration, cpuDuration time.Duration) (io.Reader, error) { - // prof describes a profile name and a debug value, or in the case of a CPU - // profile, the number of seconds to collect the profile for. - type prof struct { - Name string // name of profile - Duration time.Duration // duration of profile if applicable. currently only used by cpu and trace - } - - var profiles = []prof{ - {Name: "goroutine"}, - {Name: "block"}, - {Name: "mutex"}, - {Name: "heap"}, - {Name: "allocs"}, - {Name: "threadcreate"}, - } - if traceDuration > 0 { - profiles = append(profiles, prof{"trace", traceDuration}) - } - if cpuDuration > 0 { - // We want to gather CPU profiles first, if enabled. - profiles = append([]prof{{"cpu", cpuDuration}}, profiles...) - } - - tarball := &bytes.Buffer{} - buf := &bytes.Buffer{} // Temporary buffer for each profile/query result. - - tw := tar.NewWriter(tarball) - // Collect and write out profiles. - for _, profile := range profiles { - switch profile.Name { - case "cpu": - if err := pprof.StartCPUProfile(buf); err != nil { - return nil, err - } - sleep(ctx, profile.Duration) - pprof.StopCPUProfile() - - case "trace": - if err := trace.Start(buf); err != nil { - return nil, err - } - sleep(ctx, profile.Duration) - trace.Stop() - - default: - prof := pprof.Lookup(profile.Name) - if prof == nil { - return nil, fmt.Errorf("unable to find profile %q", profile.Name) - } - - if err := prof.WriteTo(buf, 0); err != nil { - return nil, err - } - } - - // Write the profile file's header. - if err := tw.WriteHeader(&tar.Header{ - Name: path.Join("profiles", profile.Name+".pb.gz"), - Mode: 0600, - Size: int64(buf.Len()), - }); err != nil { - return nil, err - } - - // Write the profile file's data. - if _, err := tw.Write(buf.Bytes()); err != nil { - return nil, err - } - - // Reset the buffer for the next profile. - buf.Reset() - } - - // Close the tar writer. - if err := tw.Close(); err != nil { - return nil, err - } - - return tarball, nil -} - -// Adapted from net/http/pprof/pprof.go -func sleep(ctx context.Context, d time.Duration) { - select { - case <-time.After(d): - case <-ctx.Done(): - } -} diff --git a/predicate/logical.go b/predicate/logical.go deleted file mode 100644 index 21311bec8f8..00000000000 --- a/predicate/logical.go +++ /dev/null @@ -1,60 +0,0 @@ -package predicate - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" -) - -// LogicalOperator is a string type of logical operator. -type LogicalOperator int - -// LogicalOperators -var ( - LogicalAnd LogicalOperator = 1 -) - -// Value returns the node logical type. -func (op LogicalOperator) Value() (datatypes.Node_Logical, error) { - switch op { - case LogicalAnd: - return datatypes.Node_LogicalAnd, nil - default: - return 0, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("the logical operator %q is invalid", op), - } - } -} - -// LogicalNode is a node type includes a logical expression with other nodes. -type LogicalNode struct { - Operator LogicalOperator `json:"operator"` - Children [2]Node `json:"children"` -} - -// ToDataType convert a LogicalNode to datatypes.Node. -func (n LogicalNode) ToDataType() (*datatypes.Node, error) { - logicalOp, err := n.Operator.Value() - if err != nil { - return nil, err - } - children := make([]*datatypes.Node, len(n.Children)) - for k, node := range n.Children { - children[k], err = node.ToDataType() - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("Err in Child %d, err: %s", k, err.Error()), - } - } - } - return &datatypes.Node{ - NodeType: datatypes.Node_TypeLogicalExpression, - Value: &datatypes.Node_Logical_{ - Logical: logicalOp, - }, - Children: children, - }, nil -} diff --git a/predicate/parser.go b/predicate/parser.go deleted file mode 100644 index 5a9cd625fe3..00000000000 --- a/predicate/parser.go +++ /dev/null @@ -1,232 +0,0 @@ -package predicate - -import ( - "fmt" - "strings" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxql" -) - -// a fixed buffer ring -type buffer [3]struct { - tok influxql.Token // last read token - pos influxql.Pos // last read pos - lit string // last read literal -} - -// parser of the predicate will convert -// such a statement `(a = "a" or b!="b") and c ! =~/efg/` -// to the predicate node -type parser struct { - sc *influxql.Scanner - i int // buffer index - n int // buffer size - openParen int - buf buffer -} - -// scan returns the next token from the underlying scanner. -// If a token has been unscanned then read that instead. -func (p *parser) scan() (tok influxql.Token, pos influxql.Pos, lit string) { - // If we have a token on the buffer, then return it. - if p.n > 0 { - p.n-- - return p.curr() - } - - // Move buffer position forward and save the token. - p.i = (p.i + 1) % len(p.buf) - buf := &p.buf[p.i] - buf.tok, buf.pos, buf.lit = p.sc.Scan() - - return p.curr() -} - -func (p *parser) unscan() { - p.n++ -} - -// curr returns the last read token. -func (p *parser) curr() (tok influxql.Token, pos influxql.Pos, lit string) { - buf := &p.buf[(p.i-p.n+len(p.buf))%len(p.buf)] - return buf.tok, buf.pos, buf.lit -} - -// scanIgnoreWhitespace scans the next non-whitespace token. -func (p *parser) scanIgnoreWhitespace() (tok influxql.Token, pos influxql.Pos, lit string) { - tok, pos, lit = p.scan() - if tok == influxql.WS { - tok, pos, lit = p.scan() - } - return -} - -// Parse the predicate statement. -func Parse(sts string) (n Node, err error) { - if sts == "" { - return nil, nil - } - p := new(parser) - p.sc = influxql.NewScanner(strings.NewReader(sts)) - return p.parseLogicalNode() -} - -func (p *parser) parseLogicalNode() (Node, error) { - n := new(LogicalNode) - for { - tok, pos, _ := p.scanIgnoreWhitespace() - switch tok { - case influxql.NUMBER, influxql.INTEGER, influxql.NAME, influxql.IDENT: - p.unscan() - tr, err := p.parseTagRuleNode() - if err != nil { - return *n, err - } - if n.Children[0] == nil { - n.Children[0] = tr - } else { - n.Children[1] = tr - } - case influxql.AND: - n.Operator = LogicalAnd - if n.Children[1] == nil { - continue - } - var n1 Node - var err error - if tokNext := p.peekTok(); tokNext == influxql.LPAREN { - n1, err = p.parseLogicalNode() - } else { - n1, err = p.parseTagRuleNode() - } - if err != nil { - return *n, err - } - n = &LogicalNode{ - Children: [2]Node{*n, n1}, - Operator: LogicalAnd, - } - case influxql.OR: - return *n, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("the logical operator OR is not supported yet at position %d", pos.Char), - } - case influxql.LPAREN: - p.openParen++ - currParen := p.openParen - n1, err := p.parseLogicalNode() - if err != nil { - return *n, err - } - if p.openParen != currParen-1 { - return *n, &errors.Error{ - Code: errors.EInvalid, - Msg: "extra ( seen", - } - } - if n.Children[0] == nil { - n.Children[0] = n1 - } else { - n.Children[1] = n1 - } - case influxql.RPAREN: - p.openParen-- - fallthrough - case influxql.EOF: - if p.openParen < 0 { - return *n, &errors.Error{ - Code: errors.EInvalid, - Msg: "extra ) seen", - } - } - if n.Children[1] == nil { - return n.Children[0], nil - } - return *n, nil - default: - return *n, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("bad logical expression, at position %d", pos.Char), - } - } - } -} - -func (p *parser) parseTagRuleNode() (TagRuleNode, error) { - n := new(TagRuleNode) - // scan the key - tok, pos, lit := p.scanIgnoreWhitespace() - switch tok { - case influxql.IDENT: - n.Key = lit - case influxql.NAME: - n.Key = "name" - default: - return *n, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("bad tag key, at position %d", pos.Char), - } - } - - tok, pos, _ = p.scanIgnoreWhitespace() - switch tok { - case influxql.EQ: - n.Operator = influxdb.Equal - goto scanRegularTagValue - case influxql.NEQ: - n.Operator = influxdb.NotEqual - goto scanRegularTagValue - case influxql.EQREGEX: - fallthrough - case influxql.NEQREGEX: - return *n, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("operator: %q at position: %d is not supported yet", tok.String(), pos.Char), - } - default: - return *n, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("invalid operator %q at position: %d", tok.String(), pos.Char), - } - } - // scan the value -scanRegularTagValue: - tok, pos, lit = p.scanIgnoreWhitespace() - switch tok { - case influxql.SUB: - n.Value = "-" - goto scanRegularTagValue - case influxql.IDENT: - fallthrough - case influxql.DURATIONVAL: - fallthrough - case influxql.NUMBER: - fallthrough - case influxql.INTEGER: - n.Value += lit - return *n, nil - case influxql.TRUE: - n.Value = "true" - return *n, nil - case influxql.FALSE: - n.Value = "false" - return *n, nil - default: - return *n, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("bad tag value: %q, at position %d", lit, pos.Char), - } - } -} - -// peekRune returns the next rune that would be read by the scanner. -func (p *parser) peekTok() influxql.Token { - tok, _, _ := p.scanIgnoreWhitespace() - if tok != influxql.EOF { - p.unscan() - } - - return tok -} diff --git a/predicate/parser_test.go b/predicate/parser_test.go deleted file mode 100644 index 164f1bec19b..00000000000 --- a/predicate/parser_test.go +++ /dev/null @@ -1,168 +0,0 @@ -package predicate - -import ( - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - influxtesting "github.com/influxdata/influxdb/v2/testing" - "github.com/influxdata/influxql" -) - -func TestParseNode(t *testing.T) { - cases := []struct { - str string - node Node - err error - }{ - { - str: `abc=opq`, - node: TagRuleNode{Tag: influxdb.Tag{Key: "abc", Value: "opq"}}, - }, - { - str: `abc=opq and gender="male"`, - node: LogicalNode{Operator: LogicalAnd, Children: [2]Node{ - TagRuleNode{Tag: influxdb.Tag{Key: "abc", Value: "opq"}}, - TagRuleNode{Tag: influxdb.Tag{Key: "gender", Value: "male"}}, - }}, - }, - { - str: ` abc="opq" AND gender="male" AND temp=1123`, - node: LogicalNode{Operator: LogicalAnd, Children: [2]Node{ - LogicalNode{Operator: LogicalAnd, Children: [2]Node{ - TagRuleNode{Tag: influxdb.Tag{Key: "abc", Value: "opq"}}, - TagRuleNode{Tag: influxdb.Tag{Key: "gender", Value: "male"}}, - }}, - TagRuleNode{Tag: influxdb.Tag{Key: "temp", Value: "1123"}}, - }}, - }, - { - str: ` abc="opq" Or gender="male" OR temp=1123`, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "the logical operator OR is not supported yet at position 11", - }, - }, - { - str: ` (t1="v1" and t2="v2") and (t3=v3 and (t4=v4 and t5=v5 and t6=v6))`, - node: LogicalNode{Operator: LogicalAnd, Children: [2]Node{ - LogicalNode{Operator: LogicalAnd, Children: [2]Node{ - TagRuleNode{Tag: influxdb.Tag{Key: "t1", Value: "v1"}}, - TagRuleNode{Tag: influxdb.Tag{Key: "t2", Value: "v2"}}, - }}, - LogicalNode{Operator: LogicalAnd, Children: [2]Node{ - TagRuleNode{Tag: influxdb.Tag{Key: "t3", Value: "v3"}}, - LogicalNode{Operator: LogicalAnd, Children: [2]Node{ - LogicalNode{Operator: LogicalAnd, Children: [2]Node{ - TagRuleNode{Tag: influxdb.Tag{Key: "t4", Value: "v4"}}, - TagRuleNode{Tag: influxdb.Tag{Key: "t5", Value: "v5"}}, - }}, - TagRuleNode{Tag: influxdb.Tag{Key: "t6", Value: "v6"}}, - }}, - }}, - }}, - }, - { - str: ` (t1="v1" and t2="v2") and (`, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "extra ( seen", - }, - }, - { - str: ` (t1="v1" and t2="v2"))`, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "extra ) seen", - }, - }, - } - for _, c := range cases { - node, err := Parse(c.str) - influxtesting.ErrorsEqual(t, err, c.err) - if c.err == nil { - if diff := cmp.Diff(node, c.node); diff != "" { - t.Errorf("tag rule mismatch:\n %s", diff) - } - } - } -} - -func TestParseTagRule(t *testing.T) { - cases := []struct { - str string - node TagRuleNode - err error - }{ - { - str: ` abc = "opq"`, - node: TagRuleNode{Tag: influxdb.Tag{Key: "abc", Value: "opq"}}, - }, - { - str: `abc=0x1231`, - node: TagRuleNode{Tag: influxdb.Tag{Key: "abc", Value: "0x1231"}}, - }, - { - str: `abc=2d`, - node: TagRuleNode{Tag: influxdb.Tag{Key: "abc", Value: "2d"}}, - }, - { - str: `abc=-5i`, - node: TagRuleNode{Tag: influxdb.Tag{Key: "abc", Value: "-5i"}}, - }, - { - str: `abc= -1221`, - node: TagRuleNode{Tag: influxdb.Tag{Key: "abc", Value: "-1221"}}, - }, - { - str: ` abc != "opq"`, - node: TagRuleNode{Tag: influxdb.Tag{Key: "abc", Value: "opq"}, Operator: influxdb.NotEqual}, - }, - { - str: `abc=123`, - node: TagRuleNode{Tag: influxdb.Tag{Key: "abc", Value: "123"}, Operator: influxdb.Equal}, - }, - { - str: `abc=true`, - node: TagRuleNode{Tag: influxdb.Tag{Key: "abc", Value: "true"}, Operator: influxdb.Equal}, - }, - { - str: `abc=false`, - node: TagRuleNode{Tag: influxdb.Tag{Key: "abc", Value: "false"}, Operator: influxdb.Equal}, - }, - { - str: `abc!~/^payments\./`, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: `operator: "!~" at position: 3 is not supported yet`, - }, - }, - { - str: `abc=~/^payments\./`, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: `operator: "=~" at position: 3 is not supported yet`, - }, - }, - { - str: `abc>1000`, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: `invalid operator ">" at position: 3`, - }, - }, - } - for _, c := range cases { - p := new(parser) - p.sc = influxql.NewScanner(strings.NewReader(c.str)) - tr, err := p.parseTagRuleNode() - influxtesting.ErrorsEqual(t, err, c.err) - if c.err == nil { - if diff := cmp.Diff(tr, c.node); diff != "" { - t.Errorf("tag rule mismatch:\n %s", diff) - } - } - } -} diff --git a/predicate/predicate.go b/predicate/predicate.go deleted file mode 100644 index a0ef42e1080..00000000000 --- a/predicate/predicate.go +++ /dev/null @@ -1,35 +0,0 @@ -package predicate - -import ( - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" -) - -// Node is a predicate node. -type Node interface { - ToDataType() (*datatypes.Node, error) -} - -// New predicate from a node -func New(n Node) (influxdb.Predicate, error) { - if n == nil { - return nil, nil - } - - dt, err := n.ToDataType() - if err != nil { - return nil, err - } - pred, err := tsm1.NewProtobufPredicate(&datatypes.Predicate{ - Root: dt, - }) - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - return pred, nil -} diff --git a/predicate/predicate_test.go b/predicate/predicate_test.go deleted file mode 100644 index 413d1e7869d..00000000000 --- a/predicate/predicate_test.go +++ /dev/null @@ -1,357 +0,0 @@ -package predicate - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/cmputil" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - influxtesting "github.com/influxdata/influxdb/v2/testing" -) - -func TestDataTypeConversion(t *testing.T) { - cases := []struct { - name string - node Node - err error - dataType *datatypes.Node - }{ - { - name: "empty node", - }, - { - name: "equal tag rule", - node: &TagRuleNode{ - Operator: influxdb.Equal, - Tag: influxdb.Tag{ - Key: "k1", - Value: "v1", - }, - }, - dataType: &datatypes.Node{ - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: datatypes.Node_ComparisonEqual}, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeTagRef, - Value: &datatypes.Node_TagRefValue{TagRefValue: "k1"}, - }, - { - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_StringValue{ - StringValue: "v1", - }, - }, - }, - }, - }, - { - name: "not equal tag rule", - node: &TagRuleNode{ - Operator: influxdb.NotEqual, - Tag: influxdb.Tag{ - Key: "k1", - Value: "v1", - }, - }, - dataType: &datatypes.Node{ - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: datatypes.Node_ComparisonNotEqual}, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeTagRef, - Value: &datatypes.Node_TagRefValue{TagRefValue: "k1"}, - }, - { - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_StringValue{ - StringValue: "v1", - }, - }, - }, - }, - }, - { - name: "measurement equal tag rule", - node: &TagRuleNode{ - Operator: influxdb.Equal, - Tag: influxdb.Tag{ - Key: "_measurement", - Value: "cpu", - }, - }, - dataType: &datatypes.Node{ - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: datatypes.Node_ComparisonEqual}, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeTagRef, - Value: &datatypes.Node_TagRefValue{TagRefValue: models.MeasurementTagKey}, - }, - { - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_StringValue{ - StringValue: "cpu", - }, - }, - }, - }, - }, - { - name: "measurement not equal tag rule", - node: &TagRuleNode{ - Operator: influxdb.NotEqual, - Tag: influxdb.Tag{ - Key: "_measurement", - Value: "cpu", - }, - }, - dataType: &datatypes.Node{ - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: datatypes.Node_ComparisonNotEqual}, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeTagRef, - Value: &datatypes.Node_TagRefValue{TagRefValue: models.MeasurementTagKey}, - }, - { - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_StringValue{ - StringValue: "cpu", - }, - }, - }, - }, - }, - { - name: "equal field tag rule", - node: &TagRuleNode{ - Operator: influxdb.Equal, - Tag: influxdb.Tag{ - Key: "_field", - Value: "cpu", - }, - }, - dataType: &datatypes.Node{ - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: datatypes.Node_ComparisonEqual}, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeTagRef, - Value: &datatypes.Node_TagRefValue{TagRefValue: models.FieldKeyTagKey}, - }, - { - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_StringValue{ - StringValue: "cpu", - }, - }, - }, - }, - }, - { - name: "not equal field tag rule", - node: &TagRuleNode{ - Operator: influxdb.NotEqual, - Tag: influxdb.Tag{ - Key: "_field", - Value: "cpu", - }, - }, - dataType: &datatypes.Node{ - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: datatypes.Node_ComparisonNotEqual}, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeTagRef, - Value: &datatypes.Node_TagRefValue{TagRefValue: models.FieldKeyTagKey}, - }, - { - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_StringValue{ - StringValue: "cpu", - }, - }, - }, - }, - }, - { - name: "logical", - node: &LogicalNode{ - Operator: LogicalAnd, - Children: [2]Node{ - &TagRuleNode{ - Operator: influxdb.Equal, - Tag: influxdb.Tag{ - Key: "k1", - Value: "v1", - }, - }, - &TagRuleNode{ - Operator: influxdb.Equal, - Tag: influxdb.Tag{ - Key: "k2", - Value: "v2", - }, - }, - }, - }, - dataType: &datatypes.Node{ - NodeType: datatypes.Node_TypeLogicalExpression, - Value: &datatypes.Node_Logical_{ - Logical: datatypes.Node_LogicalAnd, - }, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: datatypes.Node_ComparisonEqual}, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeTagRef, - Value: &datatypes.Node_TagRefValue{TagRefValue: "k1"}, - }, - { - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_StringValue{ - StringValue: "v1", - }, - }, - }, - }, - { - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: datatypes.Node_ComparisonEqual}, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeTagRef, - Value: &datatypes.Node_TagRefValue{TagRefValue: "k2"}, - }, - { - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_StringValue{ - StringValue: "v2", - }, - }, - }, - }, - }, - }, - }, - { - name: "conplex logical", - node: &LogicalNode{ - Operator: LogicalAnd, - Children: [2]Node{ - &LogicalNode{ - Operator: LogicalAnd, - Children: [2]Node{ - &TagRuleNode{ - Operator: influxdb.Equal, - Tag: influxdb.Tag{ - Key: "k3", - Value: "v3", - }, - }, - &TagRuleNode{ - Operator: influxdb.Equal, - Tag: influxdb.Tag{ - Key: "k4", - Value: "v4", - }, - }, - }, - }, - &TagRuleNode{ - Operator: influxdb.Equal, - Tag: influxdb.Tag{ - Key: "k2", - Value: "v2", - }, - }, - }, - }, - dataType: &datatypes.Node{ - NodeType: datatypes.Node_TypeLogicalExpression, - Value: &datatypes.Node_Logical_{ - Logical: datatypes.Node_LogicalAnd, - }, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeLogicalExpression, - Value: &datatypes.Node_Logical_{ - Logical: datatypes.Node_LogicalAnd, - }, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: datatypes.Node_ComparisonEqual}, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeTagRef, - Value: &datatypes.Node_TagRefValue{TagRefValue: "k3"}, - }, - { - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_StringValue{ - StringValue: "v3", - }, - }, - }, - }, - { - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: datatypes.Node_ComparisonEqual}, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeTagRef, - Value: &datatypes.Node_TagRefValue{TagRefValue: "k4"}, - }, - { - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_StringValue{ - StringValue: "v4", - }, - }, - }, - }, - }, - }, - { - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: datatypes.Node_ComparisonEqual}, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeTagRef, - Value: &datatypes.Node_TagRefValue{TagRefValue: "k2"}, - }, - { - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_StringValue{ - StringValue: "v2", - }, - }, - }, - }, - }, - }, - }, - } - for _, c := range cases { - if c.node != nil { - dataType, err := c.node.ToDataType() - influxtesting.ErrorsEqual(t, err, c.err) - if c.err != nil { - continue - } - if diff := cmp.Diff(dataType, c.dataType, cmputil.IgnoreProtobufUnexported()); diff != "" { - t.Fatalf("%s failed nodes are different, diff: %s", c.name, diff) - } - } - - if _, err := New(c.node); err != nil { - t.Fatalf("%s convert to predicate failed, err: %s", c.name, err.Error()) - } - } -} diff --git a/predicate/tag_rule.go b/predicate/tag_rule.go deleted file mode 100644 index 63a56798392..00000000000 --- a/predicate/tag_rule.go +++ /dev/null @@ -1,85 +0,0 @@ -package predicate - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" -) - -// TagRuleNode is a node type of a single tag rule. -type TagRuleNode influxdb.TagRule - -var specialKey = map[string]string{ - "_measurement": models.MeasurementTagKey, - "_field": models.FieldKeyTagKey, -} - -// NodeTypeLiteral convert a TagRuleNode to a nodeTypeLiteral. -func NodeTypeLiteral(tr TagRuleNode) *datatypes.Node { - switch tr.Operator { - case influxdb.RegexEqual: - fallthrough - case influxdb.NotRegexEqual: - return &datatypes.Node{ - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_RegexValue{ - RegexValue: tr.Value, - }, - } - default: - return &datatypes.Node{ - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_StringValue{ - StringValue: tr.Value, - }, - } - } -} - -// NodeComparison convert influxdb.Operator to Node_Comparison. -func NodeComparison(op influxdb.Operator) (datatypes.Node_Comparison, error) { - switch op { - case influxdb.Equal: - return datatypes.Node_ComparisonEqual, nil - case influxdb.NotEqual: - return datatypes.Node_ComparisonNotEqual, nil - case influxdb.RegexEqual: - fallthrough - case influxdb.NotRegexEqual: - return 0, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("Operator %s is not supported for delete predicate yet", op), - } - default: - return 0, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("Unsupported operator: %s", op), - } - } - -} - -// ToDataType convert a TagRuleNode to datatypes.Node. -func (n TagRuleNode) ToDataType() (*datatypes.Node, error) { - compare, err := NodeComparison(n.Operator) - if err != nil { - return nil, err - } - if special, ok := specialKey[n.Key]; ok { - n.Key = special - } - return &datatypes.Node{ - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: compare}, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeTagRef, - Value: &datatypes.Node_TagRefValue{TagRefValue: n.Key}, - }, - NodeTypeLiteral(n), - }, - }, nil -} diff --git a/preview.flux b/preview.flux deleted file mode 100644 index d621500ded1..00000000000 --- a/preview.flux +++ /dev/null @@ -1,8 +0,0 @@ -import "experimental/influxdb" -import "internal/debug" - -influxdb.preview(bucket: "preview-test") -|> range(start: -1d) -|> debug.pass() -|> group() -|> aggregateWindow(every: 1m, fn: mean) diff --git a/prometheus/auth_service.go b/prometheus/auth_service.go deleted file mode 100644 index f529c9f8d02..00000000000 --- a/prometheus/auth_service.go +++ /dev/null @@ -1,133 +0,0 @@ -package prometheus - -import ( - "context" - "fmt" - "time" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/prometheus/client_golang/prometheus" -) - -// AuthorizationService manages authorizations. -type AuthorizationService struct { - requestCount *prometheus.CounterVec - requestDuration *prometheus.HistogramVec - AuthorizationService platform.AuthorizationService -} - -// NewAuthorizationService creates an instance of AuthorizationService. -func NewAuthorizationService() *AuthorizationService { - // TODO: what to make these values - namespace := "auth" - subsystem := "prometheus" - s := &AuthorizationService{ - requestCount: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "requests_total", - Help: "Number of http requests received", - }, []string{"method", "error"}), - requestDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "request_duration_seconds", - Help: "Time taken to respond to HTTP request", - // TODO(desa): determine what spacing these buckets should have. - Buckets: prometheus.ExponentialBuckets(0.001, 1.5, 25), - }, []string{"method", "error"}), - } - - return s -} - -// FindAuthorizationByID returns an authorization given a id, records function call latency, and counts function calls. -func (s *AuthorizationService) FindAuthorizationByID(ctx context.Context, id platform2.ID) (a *platform.Authorization, err error) { - defer func(start time.Time) { - labels := prometheus.Labels{ - "method": "FindAuthorizationByID", - "error": fmt.Sprint(err != nil), - } - s.requestCount.With(labels).Add(1) - s.requestDuration.With(labels).Observe(time.Since(start).Seconds()) - }(time.Now()) - return s.AuthorizationService.FindAuthorizationByID(ctx, id) -} - -// FindAuthorizationByToken returns an authorization given a token, records function call latency, and counts function calls. -func (s *AuthorizationService) FindAuthorizationByToken(ctx context.Context, t string) (a *platform.Authorization, err error) { - defer func(start time.Time) { - labels := prometheus.Labels{ - "method": "FindAuthorizationByToken", - "error": fmt.Sprint(err != nil), - } - s.requestCount.With(labels).Add(1) - s.requestDuration.With(labels).Observe(time.Since(start).Seconds()) - }(time.Now()) - return s.AuthorizationService.FindAuthorizationByToken(ctx, t) -} - -// FindAuthorizations returns authorizations given a filter, records function call latency, and counts function calls. -func (s *AuthorizationService) FindAuthorizations(ctx context.Context, filter platform.AuthorizationFilter, opt ...platform.FindOptions) (as []*platform.Authorization, i int, err error) { - defer func(start time.Time) { - labels := prometheus.Labels{ - "method": "FindAuthorizations", - "error": fmt.Sprint(err != nil), - } - s.requestCount.With(labels).Add(1) - s.requestDuration.With(labels).Observe(time.Since(start).Seconds()) - }(time.Now()) - - return s.AuthorizationService.FindAuthorizations(ctx, filter, opt...) -} - -// CreateAuthorization creates an authorization, records function call latency, and counts function calls. -func (s *AuthorizationService) CreateAuthorization(ctx context.Context, a *platform.Authorization) (err error) { - defer func(start time.Time) { - labels := prometheus.Labels{ - "method": "CreateAuthorization", - "error": fmt.Sprint(err != nil), - } - s.requestCount.With(labels).Add(1) - s.requestDuration.With(labels).Observe(time.Since(start).Seconds()) - }(time.Now()) - - return s.AuthorizationService.CreateAuthorization(ctx, a) -} - -// DeleteAuthorization deletes an authorization, records function call latency, and counts function calls. -func (s *AuthorizationService) DeleteAuthorization(ctx context.Context, id platform2.ID) (err error) { - defer func(start time.Time) { - labels := prometheus.Labels{ - "method": "DeleteAuthorization", - "error": fmt.Sprint(err != nil), - } - s.requestCount.With(labels).Add(1) - s.requestDuration.With(labels).Observe(time.Since(start).Seconds()) - }(time.Now()) - - return s.AuthorizationService.DeleteAuthorization(ctx, id) -} - -// UpdateAuthorization updates the status and description. -func (s *AuthorizationService) UpdateAuthorization(ctx context.Context, id platform2.ID, upd *platform.AuthorizationUpdate) (a *platform.Authorization, err error) { - defer func(start time.Time) { - labels := prometheus.Labels{ - "method": "setAuthorizationStatus", - "error": fmt.Sprint(err != nil), - } - s.requestCount.With(labels).Add(1) - s.requestDuration.With(labels).Observe(time.Since(start).Seconds()) - }(time.Now()) - - return s.AuthorizationService.UpdateAuthorization(ctx, id, upd) -} - -// PrometheusCollectors returns all authorization service prometheus collectors. -func (s *AuthorizationService) PrometheusCollectors() []prometheus.Collector { - return []prometheus.Collector{ - s.requestCount, - s.requestDuration, - } -} diff --git a/prometheus/auth_service_test.go b/prometheus/auth_service_test.go deleted file mode 100644 index d4da5bafcd9..00000000000 --- a/prometheus/auth_service_test.go +++ /dev/null @@ -1,151 +0,0 @@ -package prometheus_test - -import ( - "context" - "errors" - "testing" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/prom" - "github.com/influxdata/influxdb/v2/kit/prom/promtest" - "github.com/influxdata/influxdb/v2/prometheus" - "go.uber.org/zap" -) - -// authzSvc is a test helper that returns its Err from every method on the AuthorizationService interface. -type authzSvc struct { - Err error -} - -var _ platform.AuthorizationService = (*authzSvc)(nil) - -func (a *authzSvc) FindAuthorizationByID(context.Context, platform2.ID) (*platform.Authorization, error) { - return nil, a.Err -} - -func (a *authzSvc) FindAuthorizationByToken(context.Context, string) (*platform.Authorization, error) { - return nil, a.Err -} - -func (a *authzSvc) FindAuthorizations(context.Context, platform.AuthorizationFilter, ...platform.FindOptions) ([]*platform.Authorization, int, error) { - return nil, 0, a.Err -} - -func (a *authzSvc) CreateAuthorization(context.Context, *platform.Authorization) error { - return a.Err -} - -func (a *authzSvc) DeleteAuthorization(context.Context, platform2.ID) error { - return a.Err -} - -func (a *authzSvc) UpdateAuthorization(context.Context, platform2.ID, *platform.AuthorizationUpdate) (*platform.Authorization, error) { - return nil, a.Err -} - -func TestAuthorizationService_Metrics(t *testing.T) { - a := new(authzSvc) - - svc := prometheus.NewAuthorizationService() - svc.AuthorizationService = a - reg := prom.NewRegistry(zap.NewNop()) - reg.MustRegister(svc.PrometheusCollectors()...) - - ctx := context.Background() - id := platform2.ID(1) - - if _, err := svc.FindAuthorizationByID(ctx, id); err != nil { - t.Fatal(err) - } - mfs := promtest.MustGather(t, reg) - m := promtest.MustFindMetric(t, mfs, "auth_prometheus_requests_total", map[string]string{"method": "FindAuthorizationByID", "error": "false"}) - if got := m.GetCounter().GetValue(); got != 1 { - t.Fatalf("exp 1 request, got %v", got) - } - - if _, err := svc.FindAuthorizationByToken(ctx, ""); err != nil { - t.Fatal(err) - } - mfs = promtest.MustGather(t, reg) - m = promtest.MustFindMetric(t, mfs, "auth_prometheus_requests_total", map[string]string{"method": "FindAuthorizationByToken", "error": "false"}) - if got := m.GetCounter().GetValue(); got != 1 { - t.Fatalf("exp 1 request, got %v", got) - } - - if _, _, err := svc.FindAuthorizations(ctx, platform.AuthorizationFilter{}); err != nil { - t.Fatal(err) - } - mfs = promtest.MustGather(t, reg) - m = promtest.MustFindMetric(t, mfs, "auth_prometheus_requests_total", map[string]string{"method": "FindAuthorizations", "error": "false"}) - if got := m.GetCounter().GetValue(); got != 1 { - t.Fatalf("exp 1 request, got %v", got) - } - - if err := svc.CreateAuthorization(ctx, nil); err != nil { - t.Fatal(err) - } - mfs = promtest.MustGather(t, reg) - m = promtest.MustFindMetric(t, mfs, "auth_prometheus_requests_total", map[string]string{"method": "CreateAuthorization", "error": "false"}) - if got := m.GetCounter().GetValue(); got != 1 { - t.Fatalf("exp 1 request, got %v", got) - } - - var tempID platform2.ID - if err := svc.DeleteAuthorization(ctx, tempID); err != nil { - t.Fatal(err) - } - mfs = promtest.MustGather(t, reg) - m = promtest.MustFindMetric(t, mfs, "auth_prometheus_requests_total", map[string]string{"method": "DeleteAuthorization", "error": "false"}) - if got := m.GetCounter().GetValue(); got != 1 { - t.Fatalf("exp 1 request, got %v", got) - } - - forced := errors.New("forced error") - a.Err = forced - - if _, err := svc.FindAuthorizationByID(ctx, id); err != forced { - t.Fatalf("expected forced error, got %v", err) - } - mfs = promtest.MustGather(t, reg) - m = promtest.MustFindMetric(t, mfs, "auth_prometheus_requests_total", map[string]string{"method": "FindAuthorizationByID", "error": "true"}) - if got := m.GetCounter().GetValue(); got != 1 { - t.Fatalf("exp 1 request, got %v", got) - } - - if _, err := svc.FindAuthorizationByToken(ctx, ""); err != forced { - t.Fatalf("expected forced error, got %v", err) - } - mfs = promtest.MustGather(t, reg) - m = promtest.MustFindMetric(t, mfs, "auth_prometheus_requests_total", map[string]string{"method": "FindAuthorizationByToken", "error": "true"}) - if got := m.GetCounter().GetValue(); got != 1 { - t.Fatalf("exp 1 request, got %v", got) - } - - if _, _, err := svc.FindAuthorizations(ctx, platform.AuthorizationFilter{}); err != forced { - t.Fatalf("expected forced error, got %v", err) - } - mfs = promtest.MustGather(t, reg) - m = promtest.MustFindMetric(t, mfs, "auth_prometheus_requests_total", map[string]string{"method": "FindAuthorizations", "error": "true"}) - if got := m.GetCounter().GetValue(); got != 1 { - t.Fatalf("exp 1 request, got %v", got) - } - - if err := svc.CreateAuthorization(ctx, nil); err != forced { - t.Fatalf("expected forced error, got %v", err) - } - mfs = promtest.MustGather(t, reg) - m = promtest.MustFindMetric(t, mfs, "auth_prometheus_requests_total", map[string]string{"method": "CreateAuthorization", "error": "true"}) - if got := m.GetCounter().GetValue(); got != 1 { - t.Fatalf("exp 1 request, got %v", got) - } - - if err := svc.DeleteAuthorization(ctx, tempID); err != forced { - t.Fatalf("expected forced error, got %v", err) - } - mfs = promtest.MustGather(t, reg) - m = promtest.MustFindMetric(t, mfs, "auth_prometheus_requests_total", map[string]string{"method": "DeleteAuthorization", "error": "true"}) - if got := m.GetCounter().GetValue(); got != 1 { - t.Fatalf("exp 1 request, got %v", got) - } -} diff --git a/prometheus/codec.go b/prometheus/codec.go deleted file mode 100644 index 215342ff688..00000000000 --- a/prometheus/codec.go +++ /dev/null @@ -1,224 +0,0 @@ -package prometheus - -import ( - "bytes" - "encoding/json" - "io" - "math" - "strconv" - "time" - - "github.com/influxdata/influxdb/v2/models" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" -) - -// Encoder transforms metric families into bytes. -type Encoder interface { - // Encode encodes metrics into bytes. - Encode(mfs []*dto.MetricFamily) ([]byte, error) -} - -// Expfmt encodes metric families into prometheus exposition format. -type Expfmt struct { - Format expfmt.Format -} - -// Encode encodes metrics into prometheus exposition format bytes. -func (e *Expfmt) Encode(mfs []*dto.MetricFamily) ([]byte, error) { - return EncodeExpfmt(mfs, e.Format) -} - -// DecodeExpfmt decodes the reader of format into metric families. -func DecodeExpfmt(r io.Reader, format expfmt.Format) ([]*dto.MetricFamily, error) { - dec := expfmt.NewDecoder(r, format) - mfs := []*dto.MetricFamily{} - for { - var mf dto.MetricFamily - if err := dec.Decode(&mf); err != nil { - if err == io.EOF { - break - } - if err != nil { - return nil, err - } - } - mfs = append(mfs, &mf) - } - return mfs, nil -} - -// EncodeExpfmt encodes the metrics family (defaults to expfmt.FmtProtoDelim). -func EncodeExpfmt(mfs []*dto.MetricFamily, opts ...expfmt.Format) ([]byte, error) { - format := expfmt.FmtProtoDelim - if len(opts) != 0 && opts[0] != "" { - format = opts[0] - } - buf := &bytes.Buffer{} - enc := expfmt.NewEncoder(buf, format) - for _, mf := range mfs { - if err := enc.Encode(mf); err != nil { - return nil, err - } - } - return buf.Bytes(), nil -} - -// JSON encodes metric families into JSON. -type JSON struct{} - -// Encode encodes metrics JSON bytes. This not always works -// as some prometheus values are NaN or Inf. -func (j *JSON) Encode(mfs []*dto.MetricFamily) ([]byte, error) { - return EncodeJSON(mfs) -} - -// DecodeJSON decodes a JSON array of metrics families. -func DecodeJSON(r io.Reader) ([]*dto.MetricFamily, error) { - dec := json.NewDecoder(r) - families := []*dto.MetricFamily{} - for { - mfs := []*dto.MetricFamily{} - - if err := dec.Decode(&mfs); err == io.EOF { - break - } else if err != nil { - return nil, err - } - families = append(families, mfs...) - } - return families, nil -} - -// EncodeJSON encodes the metric families to JSON. -func EncodeJSON(mfs []*dto.MetricFamily) ([]byte, error) { - return json.Marshal(mfs) -} - -const ( - // just in case the definition of time.Nanosecond changes from 1. - nsPerMilliseconds = int64(time.Millisecond / time.Nanosecond) -) - -// LineProtocol encodes metric families into influxdb line protocol. -type LineProtocol struct{} - -// Encode encodes metrics into line protocol format bytes. -func (l *LineProtocol) Encode(mfs []*dto.MetricFamily) ([]byte, error) { - return EncodeLineProtocol(mfs) -} - -// EncodeLineProtocol converts prometheus metrics into line protocol. -func EncodeLineProtocol(mfs []*dto.MetricFamily) ([]byte, error) { - var b bytes.Buffer - - pts := points(mfs) - for _, p := range pts { - if _, err := b.WriteString(p.String()); err != nil { - return nil, err - } - if err := b.WriteByte('\n'); err != nil { - return nil, err - } - } - return b.Bytes(), nil -} - -func points(mfs []*dto.MetricFamily) models.Points { - pts := make(models.Points, 0, len(mfs)) - for _, mf := range mfs { - mts := make(models.Points, 0, len(mf.Metric)) - name := mf.GetName() - for _, m := range mf.Metric { - ts := tags(m.Label) - fs := fields(mf.GetType(), m) - tm := timestamp(m) - - pt, err := models.NewPoint(name, ts, fs, tm) - if err != nil { - continue - } - mts = append(mts, pt) - } - pts = append(pts, mts...) - } - - return pts -} - -func timestamp(m *dto.Metric) time.Time { - var tm time.Time - if m.GetTimestampMs() > 0 { - tm = time.Unix(0, m.GetTimestampMs()*nsPerMilliseconds) - } - return tm - -} - -func tags(labels []*dto.LabelPair) models.Tags { - ts := make(models.Tags, len(labels)) - for i, label := range labels { - ts[i] = models.NewTag([]byte(label.GetName()), []byte(label.GetValue())) - } - return ts -} - -func fields(typ dto.MetricType, m *dto.Metric) models.Fields { - switch typ { - case dto.MetricType_SUMMARY: - return summary(m.GetSummary()) - case dto.MetricType_HISTOGRAM: - return histogram(m.GetHistogram()) - case dto.MetricType_GAUGE: - return value("gauge", m.GetGauge()) - case dto.MetricType_COUNTER: - return value("counter", m.GetCounter()) - case dto.MetricType_UNTYPED: - return value("value", m.GetUntyped()) - default: - return nil - } -} - -func summary(s *dto.Summary) map[string]interface{} { - fields := make(map[string]interface{}, len(s.Quantile)+2) - for _, q := range s.Quantile { - v := q.GetValue() - if !math.IsNaN(v) { - key := strconv.FormatFloat(q.GetQuantile(), 'f', -1, 64) - fields[key] = v - } - } - - fields["count"] = float64(s.GetSampleCount()) - fields["sum"] = float64(s.GetSampleSum()) - return fields -} - -func histogram(hist *dto.Histogram) map[string]interface{} { - fields := make(map[string]interface{}, len(hist.Bucket)+2) - for _, b := range hist.Bucket { - k := strconv.FormatFloat(b.GetUpperBound(), 'f', -1, 64) - fields[k] = float64(b.GetCumulativeCount()) - } - - fields["count"] = float64(hist.GetSampleCount()) - fields["sum"] = float64(hist.GetSampleSum()) - - return fields -} - -type valuer interface { - GetValue() float64 -} - -func value(typ string, m valuer) models.Fields { - vs := make(models.Fields, 1) - - v := m.GetValue() - if !math.IsNaN(v) { - vs[typ] = v - } - - return vs -} diff --git a/prometheus/codec_test.go b/prometheus/codec_test.go deleted file mode 100644 index 8fbe13b8d73..00000000000 --- a/prometheus/codec_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package prometheus_test - -import ( - "bytes" - "testing" - - pr "github.com/influxdata/influxdb/v2/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" -) - -func Test_CodecExpfmt(t *testing.T) { - mf1 := []*dto.MetricFamily{NewCounter("mf1", 1.0, pr.L("n1", "v1"))} - mf2 := []*dto.MetricFamily{NewCounter("mf2", 1.0, pr.L("n2", "v2"))} - - b1, err := pr.EncodeExpfmt(mf1) - if err != nil { - t.Fatalf("encodeExpfmt() error = %v", err) - } - - got1, err := pr.DecodeExpfmt(bytes.NewBuffer(b1), expfmt.FmtProtoDelim) - if err != nil { - t.Fatalf("decodeExpfmt() error = %v", err) - } - - for i := range got1 { - if got1[i].String() != mf1[i].String() { - t.Errorf("codec1() = %v, want %v", got1[i].String(), mf1[i].String()) - } - } - - b2, err := pr.EncodeExpfmt(mf2) - if err != nil { - t.Fatalf("encodeExpfmt() error = %v", err) - } - - got2, err := pr.DecodeExpfmt(bytes.NewBuffer(b2), expfmt.FmtProtoDelim) - if err != nil { - t.Fatalf("decodeExpfmt() error = %v", err) - } - - for i := range got2 { - if got2[i].String() != mf2[i].String() { - t.Errorf("codec2() = %v, want %v", got2[i].String(), mf2[i].String()) - } - } - - b3 := append(b2, b1...) - b3 = append(b3, b2...) - - mf3 := []*dto.MetricFamily{ - NewCounter("mf2", 1.0, pr.L("n2", "v2")), - NewCounter("mf1", 1.0, pr.L("n1", "v1")), - NewCounter("mf2", 1.0, pr.L("n2", "v2")), - } - - got3, err := pr.DecodeExpfmt(bytes.NewBuffer(b3), expfmt.FmtProtoDelim) - if err != nil { - t.Fatalf("decodeExpfmt() error = %v", err) - } - - for i := range got3 { - if got3[i].String() != mf3[i].String() { - t.Errorf("codec3() = %v, want %v", got3[i].String(), mf3[i].String()) - } - } -} - -func Test_CodecJSON(t *testing.T) { - mf1 := []*dto.MetricFamily{NewCounter("mf1", 1.0, pr.L("n1", "v1")), NewCounter("mf1", 1.0, pr.L("n1", "v1"))} - mf2 := []*dto.MetricFamily{NewCounter("mf2", 1.0, pr.L("n2", "v2"))} - - b1, err := pr.EncodeJSON(mf1) - if err != nil { - t.Fatalf("encodeJSON() error = %v", err) - } - - got1, err := pr.DecodeJSON(bytes.NewBuffer(b1)) - if err != nil { - t.Fatalf("decodeJSON() error = %v", err) - } - - for i := range got1 { - if got1[i].String() != mf1[i].String() { - t.Errorf("codec1() = %v, want %v", got1[i].String(), mf1[i].String()) - } - } - - b2, err := pr.EncodeJSON(mf2) - if err != nil { - t.Fatalf("encodeJSON() error = %v", err) - } - - got2, err := pr.DecodeJSON(bytes.NewBuffer(b2)) - if err != nil { - t.Fatalf("decodeJSON() error = %v", err) - } - - for i := range got2 { - if got2[i].String() != mf2[i].String() { - t.Errorf("codec2() = %v, want %v", got2[i].String(), mf2[i].String()) - } - } - - b3 := append(b2, b1...) - b3 = append(b3, b2...) - - mf3 := []*dto.MetricFamily{ - NewCounter("mf2", 1.0, pr.L("n2", "v2")), - NewCounter("mf1", 1.0, pr.L("n1", "v1")), - NewCounter("mf1", 1.0, pr.L("n1", "v1")), - NewCounter("mf2", 1.0, pr.L("n2", "v2")), - } - - got3, err := pr.DecodeJSON(bytes.NewBuffer(b3)) - if err != nil { - t.Fatalf("decodeJSON() error = %v", err) - } - - for i := range got3 { - if got3[i].String() != mf3[i].String() { - t.Errorf("codec3() = %v, want %v", got3[i].String(), mf3[i].String()) - } - } -} diff --git a/prometheus/example_test.go b/prometheus/example_test.go deleted file mode 100644 index 205832f76cf..00000000000 --- a/prometheus/example_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package prometheus_test - -import ( - "bytes" - "fmt" - - "github.com/influxdata/influxdb/v2/prometheus" - pr "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" -) - -const metrics = ` -# HELP go_goroutines Number of goroutines that currently exist. -# TYPE go_goroutines gauge -go_goroutines 85 -# HELP go_info Information about the Go environment. -# TYPE go_info gauge -go_info{version="go1.11.4"} 1 -# HELP storage_compactions_queued Number of queued compactions. -# TYPE storage_compactions_queued gauge -storage_compactions_queued{level="1"} 1 -storage_compactions_queued{level="2"} 2 -` - -func ExampleFilter_Gather() { - mfs, _ := prometheus.DecodeExpfmt(bytes.NewBufferString(metrics), expfmt.FmtText) - fmt.Printf("Start with %d metric families\n", len(mfs)) - fmt.Printf("%s\n", metrics) - - filter := &prometheus.Filter{ - Gatherer: pr.GathererFunc(func() ([]*dto.MetricFamily, error) { - return mfs, nil - }), - Matcher: prometheus.NewMatcher(). - Family("go_goroutines"). - Family( - "storage_compactions_queued", - prometheus.L("level", "2"), - ), - } - - fmt.Printf("Filtering for the entire go_goroutines family and\njust the level=2 label of the storage_compactions_queued family.\n\n") - filtered, _ := filter.Gather() - b, _ := prometheus.EncodeExpfmt(filtered, expfmt.FmtText) - - fmt.Printf("After filtering:\n\n%s", string(b)) - - // Output: - // Start with 3 metric families - // - // # HELP go_goroutines Number of goroutines that currently exist. - // # TYPE go_goroutines gauge - // go_goroutines 85 - // # HELP go_info Information about the Go environment. - // # TYPE go_info gauge - // go_info{version="go1.11.4"} 1 - // # HELP storage_compactions_queued Number of queued compactions. - // # TYPE storage_compactions_queued gauge - // storage_compactions_queued{level="1"} 1 - // storage_compactions_queued{level="2"} 2 - // - // Filtering for the entire go_goroutines family and - // just the level=2 label of the storage_compactions_queued family. - // - // After filtering: - // - // # HELP go_goroutines Number of goroutines that currently exist. - // # TYPE go_goroutines gauge - // go_goroutines 85 - // # HELP storage_compactions_queued Number of queued compactions. - // # TYPE storage_compactions_queued gauge - // storage_compactions_queued{level="2"} 2 -} diff --git a/prometheus/filter.go b/prometheus/filter.go deleted file mode 100644 index 0426afb821a..00000000000 --- a/prometheus/filter.go +++ /dev/null @@ -1,128 +0,0 @@ -package prometheus - -import ( - "fmt" - "sort" - "strings" - - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" -) - -var _ prometheus.Gatherer = (*Filter)(nil) - -// Filter filters the metrics from Gather using Matcher. -type Filter struct { - Gatherer prometheus.Gatherer - Matcher Matcher -} - -// Gather filters all metrics to only those that match the Matcher. -func (f *Filter) Gather() ([]*dto.MetricFamily, error) { - mfs, err := f.Gatherer.Gather() - if err != nil { - return nil, err - } - return f.Matcher.Match(mfs), nil -} - -// Matcher is used to match families of prometheus metrics. -type Matcher map[string]Labels // family name to label/value - -// NewMatcher returns a new matcher. -func NewMatcher() Matcher { - return Matcher{} -} - -// Family helps construct match by adding a metric family to match to. -func (m Matcher) Family(name string, lps ...*dto.LabelPair) Matcher { - // prometheus metrics labels are sorted by label name. - sort.Slice(lps, func(i, j int) bool { - return lps[i].GetName() < lps[j].GetName() - }) - - pairs := &labelPairs{ - Label: lps, - } - - family, ok := m[name] - if !ok { - family = make(Labels) - } - - family[pairs.String()] = true - m[name] = family - return m -} - -// Match returns all metric families that match. -func (m Matcher) Match(mfs []*dto.MetricFamily) []*dto.MetricFamily { - if len(mfs) == 0 { - return mfs - } - - filteredFamilies := []*dto.MetricFamily{} - for _, mf := range mfs { - labels, ok := m[mf.GetName()] - if !ok { - continue - } - - metrics := []*dto.Metric{} - match := false - for _, metric := range mf.Metric { - if labels.Match(metric) { - match = true - metrics = append(metrics, metric) - } - } - if match { - filteredFamilies = append(filteredFamilies, &dto.MetricFamily{ - Name: mf.Name, - Help: mf.Help, - Type: mf.Type, - Metric: metrics, - }) - } - } - - sort.Sort(familySorter(filteredFamilies)) - return filteredFamilies -} - -// L is used with Family to create a series of label pairs for matching. -func L(name, value string) *dto.LabelPair { - return &dto.LabelPair{ - Name: proto.String(name), - Value: proto.String(value), - } -} - -// Labels are string representations of a set of prometheus label pairs that -// are used to match to metric. -type Labels map[string]bool - -// Match checks if the metric's labels matches this set of labels. -func (ls Labels) Match(metric *dto.Metric) bool { - lp := &labelPairs{metric.Label} - return ls[lp.String()] || ls[""] // match empty string so no labels can be matched. -} - -// labelPairs is used to serialize a portion of dto.Metric into a serializable -// string. -type labelPairs struct { - Label []*dto.LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` -} - -func (l *labelPairs) Reset() {} - -func (l *labelPairs) String() string { - var a []string - for _, lbl := range l.Label { - a = append(a, fmt.Sprintf("label:<%s> ", lbl.String())) - } - return strings.Join(a, "") -} - -func (*labelPairs) ProtoMessage() {} diff --git a/prometheus/filter_test.go b/prometheus/filter_test.go deleted file mode 100644 index be11ef1cffc..00000000000 --- a/prometheus/filter_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package prometheus_test - -import ( - "fmt" - "reflect" - "testing" - - pr "github.com/influxdata/influxdb/v2/prometheus" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" -) - -func TestFilter_Gather(t *testing.T) { - type fields struct { - Gatherer prometheus.Gatherer - Matcher pr.Matcher - } - tests := []struct { - name string - fields fields - want []*dto.MetricFamily - wantErr bool - }{ - { - name: "no metrics returns nil", - fields: fields{ - Gatherer: prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { - return nil, nil - }), - Matcher: pr.NewMatcher(). - Family("http_api_requests_total", - pr.L("handler", "platform"), - pr.L("method", "GET"), - pr.L("path", "/api/v2"), - pr.L("status", "2XX"), - ), - }, - }, - { - name: "gather error returns error", - fields: fields{ - Gatherer: prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { - return nil, fmt.Errorf("e1") - }), - }, - wantErr: true, - }, - { - name: "no matches returns no metric families", - fields: fields{ - Gatherer: prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { - mf := &dto.MetricFamily{ - Name: proto.String("n1"), - Help: proto.String("h1"), - } - return []*dto.MetricFamily{mf}, nil - }), - Matcher: pr.NewMatcher(). - Family("http_api_requests_total", - pr.L("handler", "platform"), - pr.L("method", "GET"), - pr.L("path", "/api/v2"), - pr.L("status", "2XX"), - ), - }, - want: []*dto.MetricFamily{}, - }, - { - name: "matching family without metric matches nothing", - fields: fields{ - Gatherer: prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { - mf := &dto.MetricFamily{ - Name: proto.String("go_memstats_frees_total"), - } - return []*dto.MetricFamily{mf}, nil - }), - Matcher: pr.NewMatcher(). - Family("go_memstats_frees_total"), - }, - want: []*dto.MetricFamily{}, - }, - { - name: "matching family with no labels matches", - fields: fields{ - Gatherer: prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { - return []*dto.MetricFamily{NewCounter("go_memstats_frees_total", 1.0)}, nil - }), - Matcher: pr.NewMatcher(). - Family("go_memstats_frees_total"), - }, - want: []*dto.MetricFamily{NewCounter("go_memstats_frees_total", 1.0)}, - }, - { - name: "matching with labels a family with labels matches", - fields: fields{ - Gatherer: prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { - return []*dto.MetricFamily{NewCounter("go_memstats_frees_total", 1.0, pr.L("n1", "v1"))}, nil - }), - Matcher: pr.NewMatcher(). - Family("go_memstats_frees_total", pr.L("n1", "v1")), - }, - want: []*dto.MetricFamily{NewCounter("go_memstats_frees_total", 1.0, pr.L("n1", "v1"))}, - }, - { - name: "matching a family that has no labels with labels matches", - fields: fields{ - Gatherer: prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { - return []*dto.MetricFamily{NewCounter("go_memstats_frees_total", 1.0, pr.L("n1", "v1"))}, nil - }), - Matcher: pr.NewMatcher(). - Family("go_memstats_frees_total"), - }, - want: []*dto.MetricFamily{NewCounter("go_memstats_frees_total", 1.0, pr.L("n1", "v1"))}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - f := &pr.Filter{ - Gatherer: tt.fields.Gatherer, - Matcher: tt.fields.Matcher, - } - got, err := f.Gather() - if (err != nil) != tt.wantErr { - t.Errorf("Filter.Gather() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("Filter.Gather() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/prometheus/influx.go b/prometheus/influx.go deleted file mode 100644 index ff14dec1aee..00000000000 --- a/prometheus/influx.go +++ /dev/null @@ -1,56 +0,0 @@ -package prometheus - -import ( - "runtime" - "strconv" - "time" - - platform "github.com/influxdata/influxdb/v2" - "github.com/prometheus/client_golang/prometheus" -) - -type influxCollector struct { - influxInfoDesc *prometheus.Desc - influxUptimeDesc *prometheus.Desc - start time.Time -} - -// NewInfluxCollector returns a collector which exports influxdb process metrics. -func NewInfluxCollector(procID string, build platform.BuildInfo) prometheus.Collector { - return &influxCollector{ - influxInfoDesc: prometheus.NewDesc( - "influxdb_info", - "Information about the influxdb environment.", - nil, prometheus.Labels{ - "version": build.Version, - "commit": build.Commit, - "build_date": build.Date, - "os": runtime.GOOS, - "arch": runtime.GOARCH, - "cpus": strconv.Itoa(runtime.NumCPU()), - }, - ), - influxUptimeDesc: prometheus.NewDesc( - "influxdb_uptime_seconds", - "influxdb process uptime in seconds", - nil, prometheus.Labels{ - "id": procID, - }, - ), - start: time.Now(), - } -} - -// Describe returns all descriptions of the collector. -func (c *influxCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- c.influxInfoDesc - ch <- c.influxUptimeDesc -} - -// Collect returns the current state of all metrics of the collector. -func (c *influxCollector) Collect(ch chan<- prometheus.Metric) { - ch <- prometheus.MustNewConstMetric(c.influxInfoDesc, prometheus.GaugeValue, 1) - - uptime := time.Since(c.start).Seconds() - ch <- prometheus.MustNewConstMetric(c.influxUptimeDesc, prometheus.GaugeValue, float64(uptime)) -} diff --git a/prometheus/metric_recorder.go b/prometheus/metric_recorder.go deleted file mode 100644 index e1c6dd168b6..00000000000 --- a/prometheus/metric_recorder.go +++ /dev/null @@ -1,81 +0,0 @@ -package prometheus - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/http/metric" - "github.com/prometheus/client_golang/prometheus" -) - -// EventRecorder implements http/metric.EventRecorder. It is used to collect -// http api metrics. -type EventRecorder struct { - count *prometheus.CounterVec - requestBytes *prometheus.CounterVec - responseBytes *prometheus.CounterVec -} - -// NewEventRecorder returns an instance of a metric event recorder. Subsystem is expected to be -// descriptive of the type of metric being recorded. Possible values may include write, query, -// task, dashboard, etc. -// -// # The general structure of the metrics produced from the metric recorder should be -// -// http__request_count{org_id=, status=, endpoint=} ... -// http__request_bytes{org_id=, status=, endpoint=} ... -// http__response_bytes{org_id=, status=, endpoint=} ... -func NewEventRecorder(subsystem string) *EventRecorder { - const namespace = "http" - - labels := []string{"org_id", "status", "endpoint"} - - count := prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "request_count", - Help: "Total number of query requests", - }, labels) - - requestBytes := prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "request_bytes", - Help: "Count of bytes received", - }, labels) - - responseBytes := prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "response_bytes", - Help: "Count of bytes returned", - }, labels) - - return &EventRecorder{ - count: count, - requestBytes: requestBytes, - responseBytes: responseBytes, - } -} - -// Record metric records the request count, response bytes, and request bytes with labels -// for the org, endpoint, and status. -func (r *EventRecorder) Record(ctx context.Context, e metric.Event) { - labels := prometheus.Labels{ - "org_id": e.OrgID.String(), - "endpoint": e.Endpoint, - "status": fmt.Sprintf("%d", e.Status), - } - r.count.With(labels).Inc() - r.requestBytes.With(labels).Add(float64(e.RequestBytes)) - r.responseBytes.With(labels).Add(float64(e.ResponseBytes)) -} - -// PrometheusCollectors exposes the prometheus collectors associated with a metric recorder. -func (r *EventRecorder) PrometheusCollectors() []prometheus.Collector { - return []prometheus.Collector{ - r.count, - r.requestBytes, - r.responseBytes, - } -} diff --git a/prometheus/prometheus_test.go b/prometheus/prometheus_test.go deleted file mode 100644 index 49b23f9af85..00000000000 --- a/prometheus/prometheus_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package prometheus_test - -import ( - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" -) - -func NewCounter(name string, v float64, ls ...*dto.LabelPair) *dto.MetricFamily { - m := &dto.Metric{ - Label: ls, - Counter: &dto.Counter{ - Value: &v, - }, - } - return &dto.MetricFamily{ - Name: proto.String(name), - Type: dto.MetricType_COUNTER.Enum(), - Metric: []*dto.Metric{m}, - } -} diff --git a/prometheus/sort.go b/prometheus/sort.go deleted file mode 100644 index 28789ccfe1f..00000000000 --- a/prometheus/sort.go +++ /dev/null @@ -1,35 +0,0 @@ -package prometheus - -import dto "github.com/prometheus/client_model/go" - -// labelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. -type labelPairSorter []*dto.LabelPair - -func (s labelPairSorter) Len() int { - return len(s) -} - -func (s labelPairSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s labelPairSorter) Less(i, j int) bool { - return s[i].GetName() < s[j].GetName() -} - -// familySorter implements sort.Interface. It is used to sort a slice of -// dto.MetricFamily pointers. -type familySorter []*dto.MetricFamily - -func (s familySorter) Len() int { - return len(s) -} - -func (s familySorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s familySorter) Less(i, j int) bool { - return s[i].GetName() < s[j].GetName() -} diff --git a/prometheus/transformer.go b/prometheus/transformer.go deleted file mode 100644 index 6ca8a552d85..00000000000 --- a/prometheus/transformer.go +++ /dev/null @@ -1,90 +0,0 @@ -package prometheus - -import ( - "sort" - - dto "github.com/prometheus/client_model/go" -) - -// Transformer modifies prometheus metrics families. -type Transformer interface { - // Transform updates the metrics family - Transform(mfs []*dto.MetricFamily) []*dto.MetricFamily -} - -var _ Transformer = (*AddLabels)(nil) - -// AddLabels adds labels to all metrics. It will overwrite -// the label if it already exists. -type AddLabels struct { - Labels map[string]string -} - -// Transform adds labels to the metrics. -func (a *AddLabels) Transform(mfs []*dto.MetricFamily) []*dto.MetricFamily { - for i := range mfs { - for j, m := range mfs[i].Metric { - // Filter out labels to add - labels := m.Label[:0] - for _, l := range m.Label { - if _, ok := a.Labels[l.GetName()]; !ok { - labels = append(labels, l) - } - } - - // Add all new labels to the metric - for k, v := range a.Labels { - labels = append(labels, L(k, v)) - } - sort.Sort(labelPairSorter(labels)) - mfs[i].Metric[j].Label = labels - } - } - return mfs -} - -var _ Transformer = (*RemoveLabels)(nil) - -// RemoveLabels adds labels to all metrics. It will overwrite -// the label if it already exists. -type RemoveLabels struct { - Labels map[string]struct{} -} - -// Transform removes labels from the metrics. -func (r *RemoveLabels) Transform(mfs []*dto.MetricFamily) []*dto.MetricFamily { - for i := range mfs { - for j, m := range mfs[i].Metric { - // Filter out labels - labels := m.Label[:0] - for _, l := range m.Label { - if _, ok := r.Labels[l.GetName()]; !ok { - labels = append(labels, l) - } - } - mfs[i].Metric[j].Label = labels - } - } - return mfs -} - -var _ Transformer = (*RenameFamilies)(nil) - -// RenameFamilies changes the name of families to another name -type RenameFamilies struct { - FromTo map[string]string -} - -// Transform renames metric families names. -func (r *RenameFamilies) Transform(mfs []*dto.MetricFamily) []*dto.MetricFamily { - renamed := mfs[:0] - for _, mf := range mfs { - if to, ok := r.FromTo[mf.GetName()]; ok { - mf.Name = &to - } - renamed = append(renamed, mf) - - } - sort.Sort(familySorter(renamed)) - return renamed -} diff --git a/prometheus/transformer_test.go b/prometheus/transformer_test.go deleted file mode 100644 index fc79bebc124..00000000000 --- a/prometheus/transformer_test.go +++ /dev/null @@ -1,241 +0,0 @@ -package prometheus_test - -import ( - "reflect" - "testing" - - pr "github.com/influxdata/influxdb/v2/prometheus" - dto "github.com/prometheus/client_model/go" -) - -func TestAddLabels_Transform(t *testing.T) { - type fields struct { - Labels map[string]string - } - type args struct { - mfs []*dto.MetricFamily - } - tests := []struct { - name string - fields fields - args args - want []*dto.MetricFamily - }{ - { - name: "add label from metric replaces label", - fields: fields{ - Labels: map[string]string{ - "handler": "influxdb", - }, - }, - args: args{ - mfs: []*dto.MetricFamily{ - NewCounter("http_api_requests_total", 10, - pr.L("handler", "platform"), - pr.L("method", "GET"), - pr.L("path", "/api/v2"), - pr.L("status", "2XX"), - ), - }, - }, - want: []*dto.MetricFamily{ - NewCounter("http_api_requests_total", 10, - pr.L("handler", "influxdb"), - pr.L("method", "GET"), - pr.L("path", "/api/v2"), - pr.L("status", "2XX"), - ), - }, - }, - { - name: "add label from metric replaces label", - fields: fields{ - Labels: map[string]string{ - "org": "myorg", - }, - }, - args: args{ - mfs: []*dto.MetricFamily{ - NewCounter("http_api_requests_total", 10, - pr.L("handler", "platform"), - pr.L("method", "GET"), - pr.L("path", "/api/v2"), - pr.L("status", "2XX"), - ), - }, - }, - want: []*dto.MetricFamily{ - NewCounter("http_api_requests_total", 10, - pr.L("handler", "platform"), - pr.L("method", "GET"), - pr.L("org", "myorg"), - pr.L("path", "/api/v2"), - pr.L("status", "2XX"), - ), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - a := &pr.AddLabels{ - Labels: tt.fields.Labels, - } - if got := a.Transform(tt.args.mfs); !reflect.DeepEqual(got, tt.want) { - t.Errorf("AddLabels.Transform() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestRemoveLabels_Transform(t *testing.T) { - type fields struct { - Labels map[string]struct{} - } - type args struct { - mfs []*dto.MetricFamily - } - tests := []struct { - name string - fields fields - args args - want []*dto.MetricFamily - }{ - { - name: "remove label from metric", - fields: fields{ - Labels: map[string]struct{}{ - "handler": struct{}{}, - }, - }, - args: args{ - mfs: []*dto.MetricFamily{ - NewCounter("http_api_requests_total", 10, - pr.L("handler", "platform"), - pr.L("method", "GET"), - pr.L("path", "/api/v2"), - pr.L("status", "2XX"), - ), - }, - }, - want: []*dto.MetricFamily{ - NewCounter("http_api_requests_total", 10, - pr.L("method", "GET"), - pr.L("path", "/api/v2"), - pr.L("status", "2XX"), - ), - }, - }, - { - name: "no match removes no labels", - fields: fields{ - Labels: map[string]struct{}{ - "handler": struct{}{}, - }, - }, - args: args{ - mfs: []*dto.MetricFamily{ - NewCounter("http_api_requests_total", 10, - pr.L("method", "GET"), - pr.L("path", "/api/v2"), - pr.L("status", "2XX"), - ), - }, - }, - want: []*dto.MetricFamily{ - NewCounter("http_api_requests_total", 10, - pr.L("method", "GET"), - pr.L("path", "/api/v2"), - pr.L("status", "2XX"), - ), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := &pr.RemoveLabels{ - Labels: tt.fields.Labels, - } - if got := r.Transform(tt.args.mfs); !reflect.DeepEqual(got, tt.want) { - t.Errorf("RemoveLabels.Transform() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestRenameFamilies_Transform(t *testing.T) { - type fields struct { - FromTo map[string]string - } - type args struct { - mfs []*dto.MetricFamily - } - tests := []struct { - name string - fields fields - args args - want []*dto.MetricFamily - }{ - { - name: "rename metric family in sort order", - fields: fields{ - FromTo: map[string]string{ - "http_api_requests_total": "api_requests_total", - }, - }, - args: args{ - mfs: []*dto.MetricFamily{ - NewCounter("handler", 10, - pr.L("handler", "platform"), - ), - NewCounter("http_api_requests_total", 10, - pr.L("handler", "platform"), - pr.L("method", "GET"), - pr.L("path", "/api/v2"), - pr.L("status", "2XX"), - ), - }, - }, - want: []*dto.MetricFamily{ - NewCounter("api_requests_total", 10, - pr.L("handler", "platform"), - pr.L("method", "GET"), - pr.L("path", "/api/v2"), - pr.L("status", "2XX"), - ), - NewCounter("handler", 10, - pr.L("handler", "platform"), - ), - }, - }, - { - name: "ignored if not found", - fields: fields{ - FromTo: map[string]string{ - "http_api_requests_total": "api_requests_total", - }, - }, - args: args{ - mfs: []*dto.MetricFamily{ - NewCounter("handler", 10, - pr.L("handler", "platform"), - ), - }, - }, - want: []*dto.MetricFamily{ - NewCounter("handler", 10, - pr.L("handler", "platform"), - ), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := &pr.RenameFamilies{ - FromTo: tt.fields.FromTo, - } - if got := r.Transform(tt.args.mfs); !reflect.DeepEqual(got, tt.want) { - t.Errorf("RenameFamilies.Transform() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/query/benchmarks/flux/README.md b/query/benchmarks/flux/README.md deleted file mode 100644 index b50f0828229..00000000000 --- a/query/benchmarks/flux/README.md +++ /dev/null @@ -1,200 +0,0 @@ -## Benchmarks - -The output generated by `curl` uses `curl-format.txt`, present along side this readme. - -Telegraf is configured with the supplied `telegraf.conf` to capture metrics from fluxd and influxdb using the -Prometheus `/metrics` HTTP endpoint and machine metrics including CPU usage and disk I/O. Note that `influxd` is running -on port `8186`, allowing a separate `influxd` on the default port to receive metrics from Telegraf. - -## Dataset #1 - -| | | -| ----- | ----- | -| series | 100,000 | -| pps | 3,000 | -| shards | 12 | -| pps / shard | 250 | -| total points | 300,000,000 | - -**pps**: points per series - - -### Hardware - -| | | -| ----- | ----- | -| AWS instance type | c3.4xlarge | - - -### Generate dataset - -1. Use [ingen][ingen] to populate a database with data. - - ```sh - $ ingen -p=250 -t=1000,100 -shards=12 -start-time="2017-11-01T00:00:00Z" -data-path=~/.influxdb/data -meta-path=~/.influxdb/meta - ``` - - The previous command will - - * populate a database named `db` (default), - * create 100,000 series (1000×100), - * made up of 2 tag keys (`tag0` and `tag1`) each with 1000 and 100 tag values respectively. - * 250 points per series, per shard, for a total of 3,000 points per series. - * Points will start from `2017-11-01 00:00:00 UTC` and - * span 12 shards. - - -### Flux queries - -Query #1 - -```sh -HOST=localhost:8093; curl -w "@curl-format.txt" -H 'Accept: text/plain' -o /dev/null -s http://${HOST}/query \ - --data-urlencode 'q=from(bucket:"db/autogen").range(start:2017-11-01T00:00:00Z, stop:2017-11-02T00:00:00Z).filter(exp:{"_measurement" == "m0" and "_field" == "v0" and $ > 0}).sum()' - - -time_starttransfer: 0.138 -size_download: 5800000 -time_total: 7.578 - -``` - -Query #2 - -```sh -HOST=localhost:8093; curl -w "@curl-format.txt" -H 'Accept: text/plain' -o /dev/null -s http://${HOST}/query \ - --data-urlencode 'q=from(bucket:"db/autogen").range(start:2017-11-01T00:00:00Z, stop:2017-11-05T00:00:00Z).filter(exp:{"_measurement" == "m0" and "_field" == "v0" and $ > 0}).sum()' - - -time_starttransfer: 0.305 -size_download: 5900000 -time_total: 17.909 -``` - -Query #3 - -```sh -HOST=localhost:8093; curl -w "@curl-format.txt" -H 'Accept: text/plain' -o /dev/null -s http://${HOST}/query \ - --data-urlencode 'q=from(bucket:"db/autogen").range(start:2017-11-01T00:00:00Z, stop:2017-11-05T00:00:00Z).filter(exp:{"_measurement" == "m0" and "_field" == "v0" and $ > 0}).group(by:["tag0"]).sum()' - - -time_starttransfer: 22.727 -size_download: 60000 -time_total: 22.730 -``` - -Query #4 - -```sh -HOST=localhost:8093; curl -w "@curl-format.txt" -H 'Accept: text/plain' -o /dev/null -s http://${HOST}/query \ - --data-urlencode 'q=from(bucket:"db/autogen").range(start:2017-11-01T00:00:00Z, stop:2017-11-13T00:00:00Z).filter(exp:{"_measurement" == "m0" and "_field" == "v0" and $ > 0}).sum()' - - -time_starttransfer: 0.713 -size_download: 5900000 -time_total: 44.159 -``` - -Query #5 - -```sh -HOST=localhost:8093; curl -w "@curl-format.txt" -H 'Accept: text/plain' -o /dev/null -s http://${HOST}/query \ - --data-urlencode 'q=from(bucket:"db/autogen").range(start:2017-11-01T00:00:00Z, stop:2017-11-13T00:00:00Z).filter(exp:{"_measurement" == "m0" and "_field" == "v0" and $ > 0}).group(by:["tag0"]).sum()' - - -time_starttransfer: 56.257 -size_download: 60000 -time_total: 56.261 -``` - -## Dataset #2 - -| | | -| ----- | ----- | -| series | 10,000,000 | -| pps | 1,000 | -| shards | 4 | -| pps / shard | 250 | -| total points | 10,000,000,000 | - -**pps**: points per series - - -### Hardware - -| | | -| ----- | ----- | -| AWS instance type | c5.4xlarge | - - -### Generate dataset - -1. Use [ingen][ingen] to populate a database with data. - - ```sh - $ ingen -p=250 -t=10000,100,10 -shards=4 -start-time="2017-11-01T00:00:00Z" -data-path=~/.influxdb/data -meta-path=~/.influxdb/meta - ``` - - The previous command will - - * populate a database named `db` (default), - * create 10,000,000 series (10000×100×10), - * made up of 3 tag keys (`tag0`, `tag1`, `tag2`) each with 10000, 100 and 10 tag values respectively. - * 250 points per series, per shard, for a total of 1,000 points per series. - * Points will start from `2017-11-01 00:00:00 UTC` and - * span 4 shards. - - -### Flux queries - -Query #1 - -```sh -HOST=localhost:8093; curl -w "@curl-format.txt" -H 'Accept: text/plain' -o /dev/null -s http://${HOST}/query \ - --data-urlencode 'q=from(bucket:"db/autogen").range(start:2017-11-01T00:00:00Z, stop:2017-11-05T00:00:00Z).filter(exp:{"_measurement" == "m0" and "_field" == "v0" and "tag1" == "value00"}).group(by:["tag0"]).sum()' - - -time_starttransfer: 0.325 -size_download: 7200000 -time_total: 11.437 -``` - -Query #2 - -```sh -HOST=localhost:8093; curl -w "@curl-format.txt" -H 'Accept: text/plain' -o /dev/null -s http://${HOST}/query \ - --data-urlencode 'q=from(bucket:"db/autogen").range(start:2017-11-01T00:00:00Z, stop:2017-11-05T00:00:00Z).filter(exp:{"_measurement" == "m0" and "_field" == "v0" and "tag1" == "value00"}).group(by:["tag0"]).sum()' - - -time_starttransfer: 13.174 -size_download: 600000 -time_total: 13.215 -``` - -Query #3 - -```sh -HOST=localhost:8093; curl -w "@curl-format.txt" -H 'Accept: text/plain' -o /dev/null -s http://${HOST}/query \ - --data-urlencode 'q=from(bucket:"db/autogen").range(start:2017-11-01T00:00:00Z, stop:2017-11-05T00:00:00Z).filter(exp:{"_measurement" == "m0" and "_field" == "v0"}).group(by:["tag0"]).sum()' - - -time_starttransfer: 1190.204 -size_download: 620000 -time_total: 1190.244 -``` - -Query #4 - -```sh -HOST=localhost:8093; curl -w "@curl-format.txt" -H 'Accept: text/plain' -o /dev/null -s http://${HOST}/query \ - --data-urlencode 'q=from(bucket:"db/autogen").range(start:2017-11-01T00:00:00Z, stop:2017-11-05T00:00:00Z).filter(exp:{"_measurement" == "m0" and "_field" == "v0"}).sum()' - - -time_starttransfer: 23.975 -size_download: 720000000 -time_total: 803.254 -``` - - - -[ingen]: https://github.com/influxdata/ingen diff --git a/query/benchmarks/flux/config.toml b/query/benchmarks/flux/config.toml deleted file mode 100644 index 4a5ce33a4ed..00000000000 --- a/query/benchmarks/flux/config.toml +++ /dev/null @@ -1,130 +0,0 @@ -reporting-disabled = false -bind-address = ":8188" - -[meta] - dir = "/home/ubuntu/.influxdb/meta" - retention-autocreate = true - logging-enabled = true - -[data] - dir = "/home/ubuntu/.influxdb/data" - index-version = "inmem" - wal-dir = "/home/ubuntu/.influxdb/wal" - wal-fsync-delay = "0s" - query-log-enabled = true - cache-max-memory-size = 1073741824 - cache-snapshot-memory-size = 26214400 - cache-snapshot-write-cold-duration = "10m0s" - compact-full-write-cold-duration = "4h0m0s" - max-concurrent-compactions = 0 - trace-logging-enabled = false - -[coordinator] - write-timeout = "30s" - max-concurrent-queries = 0 - #query-timeout = "5s" - log-queries-after = "0s" - max-select-point = 0 - max-select-series = 0 - max-select-buckets = 0 - -[retention] - enabled = true - check-interval = "30m0s" - -[shard-precreation] - enabled = true - check-interval = "10m0s" - advance-period = "30m0s" - -[monitor] - store-enabled = false - store-database = "_internal" - store-interval = "10s" - -[subscriber] - enabled = true - http-timeout = "30s" - insecure-skip-verify = false - ca-certs = "" - write-concurrency = 40 - write-buffer-size = 1000 - -[http] - enabled = true - bind-address = ":8186" - auth-enabled = false - log-enabled = false - write-tracing = false - pprof-enabled = true - https-enabled = false - https-certificate = "/etc/ssl/influxdb.pem" - https-private-key = "" - max-row-limit = 0 - max-connection-limit = 0 - shared-secret = "" - realm = "InfluxDB" - unix-socket-enabled = false - bind-socket = "/var/run/influxdb.sock" - #max-body-size = 5 - -[ifql] - enabled = true - log-enabled = true - bind-address = ":8082" - -[[graphite]] - enabled = false - bind-address = ":2003" - database = "graphite" - retention-policy = "" - protocol = "tcp" - batch-size = 5000 - batch-pending = 10 - batch-timeout = "1s" - consistency-level = "one" - separator = "." - udp-read-buffer = 0 - -[[collectd]] - enabled = false - bind-address = ":25826" - database = "collectd" - retention-policy = "" - batch-size = 5000 - batch-pending = 10 - batch-timeout = "10s" - read-buffer = 0 - typesdb = "/usr/share/collectd/types.db" - security-level = "none" - auth-file = "/etc/collectd/auth_file" - -[[opentsdb]] - enabled = false - bind-address = ":4242" - database = "opentsdb" - retention-policy = "" - consistency-level = "one" - tls-enabled = false - certificate = "/etc/ssl/influxdb.pem" - batch-size = 1000 - batch-pending = 5 - batch-timeout = "1s" - log-point-errors = true - -[[udp]] - enabled = false - bind-address = ":8089" - database = "udp" - retention-policy = "" - batch-size = 5000 - batch-pending = 10 - read-buffer = 0 - batch-timeout = "1s" - precision = "" - -[continuous_queries] - log-enabled = true - enabled = true - run-interval = "1s" - diff --git a/query/benchmarks/flux/curl-format.txt b/query/benchmarks/flux/curl-format.txt deleted file mode 100644 index 76dfdb0d78f..00000000000 --- a/query/benchmarks/flux/curl-format.txt +++ /dev/null @@ -1,5 +0,0 @@ -\n -time_starttransfer: %{time_starttransfer}\n -size_download: %{size_download}\n -time_total: %{time_total}\n -\n diff --git a/query/benchmarks/flux/telegraf.conf b/query/benchmarks/flux/telegraf.conf deleted file mode 100644 index 9f9ec9a2fa9..00000000000 --- a/query/benchmarks/flux/telegraf.conf +++ /dev/null @@ -1,52 +0,0 @@ - -[global_tags] - -[agent] - interval = "2s" - round_interval = true - - metric_batch_size = 1000 - metric_buffer_limit = 10000 - - collection_jitter = "10ms" - - flush_interval = "10s" - flush_jitter = "1s" - - precision = "" - debug = false - quiet = false - hostname = "stuart-bench-oss-0" - omit_hostname = false - -[[inputs.cpu]] - percpu = true - totalcpu = true - fielddrop = ["time_*"] - -[[inputs.disk]] - ignore_fs = ["tmpfs", "devtmpfs"] - -# Read metrics about disk IO by device -[[inputs.diskio]] - ## By default, telegraf will gather stats for all devices including - ## disk partitions. - ## Setting devices will restrict the stats to the specified devices. - # devices = ["sda", "sdb"] - ## Uncomment the following line if you need disk serial numbers. - # skip_serial_number = false - -[[inputs.mem]] - -[[inputs.procstat]] - pattern = "influx" - - prefix = "" - fielddrop = ["cpu_time_*"] - -[[inputs.system]] - -# Read metrics from one or many prometheus clients -[[inputs.prometheus]] - ## An array of urls to scrape metrics from. - urls = ["http://localhost:8093/metrics","http://localhost:8186/metrics"] diff --git a/query/bridges.go b/query/bridges.go deleted file mode 100644 index 73b9c3cd287..00000000000 --- a/query/bridges.go +++ /dev/null @@ -1,207 +0,0 @@ -package query - -import ( - "bufio" - "context" - "io" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/csv" - platform "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/check" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/tracing" -) - -// QueryServiceBridge implements the QueryService interface while consuming the AsyncQueryService interface. -type QueryServiceBridge struct { - AsyncQueryService AsyncQueryService -} - -func (b QueryServiceBridge) Query(ctx context.Context, req *Request) (flux.ResultIterator, error) { - query, err := b.AsyncQueryService.Query(ctx, req) - if err != nil { - return nil, err - } - return flux.NewResultIteratorFromQuery(query), nil -} - -// Check returns the status of this query service. Since this bridge consumes an AsyncQueryService, -// which is not available over the network, this check always passes. -func (QueryServiceBridge) Check(context.Context) check.Response { - return check.Response{Name: "Query Service", Status: check.StatusPass} -} - -// QueryServiceProxyBridge implements QueryService while consuming a ProxyQueryService interface. -type QueryServiceProxyBridge struct { - ProxyQueryService ProxyQueryService -} - -func (b QueryServiceProxyBridge) Query(ctx context.Context, req *Request) (flux.ResultIterator, error) { - d := csv.Dialect{ResultEncoderConfig: csv.DefaultEncoderConfig()} - preq := &ProxyRequest{ - Request: *req, - Dialect: d, - } - - r, w := io.Pipe() - asri := &asyncStatsResultIterator{ - r: newBufferedReadCloser(r), - statsReady: make(chan struct{}), - } - - go func() { - stats, err := b.ProxyQueryService.Query(ctx, w, preq) - _ = w.CloseWithError(err) - asri.stats = stats - close(asri.statsReady) - }() - - return asri, nil -} - -func (b QueryServiceProxyBridge) Check(ctx context.Context) check.Response { - return b.ProxyQueryService.Check(ctx) -} - -type asyncStatsResultIterator struct { - flux.ResultIterator - - // The buffered reader and any error that has been - // encountered when reading. - r *bufferedReadCloser - err error - - // Channel that is closed when stats have been written. - statsReady chan struct{} - - // Statistics gathered from calling the proxy query service. - // This field must not be read until statsReady is closed. - stats flux.Statistics -} - -func (i *asyncStatsResultIterator) More() bool { - if i.ResultIterator == nil { - // Peek into the read. If there is an error - // before reading any bytes, do not use the - // result decoder and use the error that is - // returned as the error for this result iterator. - if _, err := i.r.Peek(1); err != nil { - // Only an error if this is not an EOF. - if err != io.EOF { - i.err = err - } - return false - } - - // At least one byte could be read so create a result - // iterator using the reader. - dec := csv.NewMultiResultDecoder(csv.ResultDecoderConfig{}) - ri, err := dec.Decode(i.r) - if err != nil { - i.err = err - return false - } - i.ResultIterator = ri - } - return i.ResultIterator.More() -} - -func (i *asyncStatsResultIterator) Err() error { - if i.err != nil { - return i.err - } - return i.ResultIterator.Err() -} - -func (i *asyncStatsResultIterator) Release() { - if i.ResultIterator != nil { - i.ResultIterator.Release() - } -} - -func (i *asyncStatsResultIterator) Statistics() flux.Statistics { - <-i.statsReady - return i.stats -} - -// ProxyQueryServiceAsyncBridge implements ProxyQueryService while consuming an AsyncQueryService -type ProxyQueryServiceAsyncBridge struct { - AsyncQueryService AsyncQueryService -} - -func (b ProxyQueryServiceAsyncBridge) Query(ctx context.Context, w io.Writer, req *ProxyRequest) (flux.Statistics, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - q, err := b.AsyncQueryService.Query(ctx, &req.Request) - if err != nil { - return flux.Statistics{}, tracing.LogError(span, err) - } - - results := flux.NewResultIteratorFromQuery(q) - defer results.Release() - - encoder := req.Dialect.Encoder() - _, err = encoder.Encode(w, results) - // Release the results and collect the statistics regardless of the error. - results.Release() - stats := results.Statistics() - if err != nil { - return stats, tracing.LogError(span, err) - } - - if results, err := q.ProfilerResults(); err != nil { - return stats, tracing.LogError(span, err) - } else if results != nil { - _, err = encoder.Encode(w, results) - if err != nil { - return stats, tracing.LogError(span, err) - } - } - return stats, nil -} - -// Check returns the status of this query service. Since this bridge consumes an AsyncQueryService, -// which is not available over the network, this check always passes. -func (ProxyQueryServiceAsyncBridge) Check(context.Context) check.Response { - return check.Response{Name: "Query Service", Status: check.StatusPass} -} - -// REPLQuerier implements the repl.Querier interface while consuming a QueryService -type REPLQuerier struct { - // Authorization is the authorization to provide for all requests - Authorization *platform.Authorization - // OrganizationID is the ID to provide for all requests - OrganizationID platform2.ID - QueryService QueryService -} - -// Query will pack a query to be sent to a remote server for execution. deps may be safely ignored since -// they will be correctly initialized on the server side. -func (q *REPLQuerier) Query(ctx context.Context, deps flux.Dependencies, compiler flux.Compiler) (flux.ResultIterator, error) { - req := &Request{ - Authorization: q.Authorization, - OrganizationID: q.OrganizationID, - Compiler: compiler, - } - return q.QueryService.Query(ctx, req) -} - -// bufferedReadCloser is a bufio.Reader that implements io.ReadCloser. -type bufferedReadCloser struct { - *bufio.Reader - r io.ReadCloser -} - -// newBufferedReadCloser constructs a new bufferedReadCloser. -func newBufferedReadCloser(r io.ReadCloser) *bufferedReadCloser { - return &bufferedReadCloser{ - Reader: bufio.NewReader(r), - r: r, - } -} - -func (br *bufferedReadCloser) Close() error { - return br.r.Close() -} diff --git a/query/bridges_test.go b/query/bridges_test.go deleted file mode 100644 index 94e02c0e3c0..00000000000 --- a/query/bridges_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package query_test - -import ( - "context" - "errors" - "fmt" - "strings" - "testing" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/csv" - "github.com/influxdata/flux/execute/executetest" - "github.com/influxdata/flux/metadata" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/query/mock" -) - -type failWriter struct { - Err error -} - -// Write returns len(p)/2, w.Err, -// simulating a partial write with some error. -func (w failWriter) Write(p []byte) (int, error) { - return len(p) / 2, w.Err -} - -func TestProxyQueryServiceAsyncBridge_StatsOnClientDisconnect(t *testing.T) { - q := mock.NewQuery() - q.Metadata = metadata.Metadata{ - "foo": []interface{}{"bar"}, - } - r := executetest.NewResult([]*executetest.Table{ - {}, - }) - r.Nm = "a" - q.SetResults(r) - - expReq := &query.Request{OrganizationID: 0x1234} - mockAsyncSvc := &mock.AsyncQueryService{ - QueryF: func(ctx context.Context, req *query.Request) (flux.Query, error) { - if req.OrganizationID != 0x1234 { - panic(fmt.Errorf("unexpected request: %v", req)) - } - return q, nil - }, - } - - // Use an io.Writer that returns a specific error on Write. - w := failWriter{Err: errors.New("something went wrong with the write!")} - - bridge := query.ProxyQueryServiceAsyncBridge{ - AsyncQueryService: mockAsyncSvc, - } - stats, err := bridge.Query(context.Background(), w, &query.ProxyRequest{ - Request: *expReq, - Dialect: csv.DefaultDialect(), - }) - if !strings.Contains(err.Error(), w.Err.Error()) { - t.Fatalf("Query should have failed with an error wrapping failWriter.Err, got %v", err) - } - - // Even though there was an error, the statistics should be from the mock query. - md := stats.Metadata - if md["foo"] == nil || len(md["foo"]) != 1 || md["foo"][0] != "bar" { - t.Fatalf("stats were missing or had wrong metadata: exp metadata[foo]=[bar], got %v", md) - } -} diff --git a/query/control/controller.go b/query/control/controller.go deleted file mode 100644 index a461ad39635..00000000000 --- a/query/control/controller.go +++ /dev/null @@ -1,1153 +0,0 @@ -// Package control keeps track of resources and manages queries. -// -// The Controller manages the resources available to each query by -// managing the memory allocation and concurrency usage of each query. -// The Controller will compile a program by using the passed in language -// and it will start the program using the ResourceManager. -// -// It will guarantee that each program that is started has at least -// one goroutine that it can use with the dispatcher and it will -// ensure a minimum amount of memory is available before the program -// runs. -// -// Other goroutines and memory usage is at the will of the specific -// resource strategy that the Controller is using. -// -// The Controller also provides visibility into the lifetime of the query -// and its current resource usage. -package control - -import ( - "context" - "fmt" - "math" - "runtime/debug" - "sync" - "sync/atomic" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/codes" - "github.com/influxdata/flux/dependency" - "github.com/influxdata/flux/execute/table" - "github.com/influxdata/flux/lang" - "github.com/influxdata/flux/memory" - "github.com/influxdata/flux/runtime" - "github.com/influxdata/influxdb/v2/kit/errors" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/prom" - "github.com/influxdata/influxdb/v2/kit/tracing" - influxlogger "github.com/influxdata/influxdb/v2/logger" - "github.com/influxdata/influxdb/v2/query" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -// orgLabel is the metric label to use in the controller -const orgLabel = "org" - -// Controller provides a central location to manage all incoming queries. -// The controller is responsible for compiling, queueing, and executing queries. -type Controller struct { - lastID uint64 - config Config - queriesMu sync.RWMutex - queries map[QueryID]*Query - queryQueue chan *Query - wg sync.WaitGroup - shutdown bool - done chan struct{} - abortOnce sync.Once - abort chan struct{} - memory *memoryManager - - metrics *controllerMetrics - labelKeys []string - - log *zap.Logger - - dependencies []flux.Dependency - - fluxLogEnabled bool -} - -type Config struct { - // ConcurrencyQuota is the number of queries that are allowed to execute concurrently. - // - // This value is limited to an int32 because it's used to set the initial delta on the - // controller's WaitGroup, and WG deltas have an effective limit of math.MaxInt32. - // See: https://github.com/golang/go/issues/20687 - ConcurrencyQuota int32 - - // InitialMemoryBytesQuotaPerQuery is the initial number of bytes allocated for a query - // when it is started. If this is unset, then the MemoryBytesQuotaPerQuery will be used. - InitialMemoryBytesQuotaPerQuery int64 - - // MemoryBytesQuotaPerQuery is the maximum number of bytes (in table memory) a query is allowed to use at - // any given time. - // - // A query may not be able to use its entire quota of memory if requesting more memory would conflict - // with the maximum amount of memory that the controller can request. - MemoryBytesQuotaPerQuery int64 - - // MaxMemoryBytes is the maximum amount of memory the controller is allowed to - // allocated to queries. - // - // If this is unset, then this number is ConcurrencyQuota * MemoryBytesQuotaPerQuery. - // This number must be greater than or equal to the ConcurrencyQuota * InitialMemoryBytesQuotaPerQuery. - // This number may be less than the ConcurrencyQuota * MemoryBytesQuotaPerQuery. - MaxMemoryBytes int64 - - // QueueSize is the number of queries that are allowed to be awaiting execution before new queries are rejected. - // - // This value is limited to an int32 because it's used to make(chan *Query, QueueSize) on controller startup. - // Through trial-and-error I found that make(chan *Query, N) starts to panic for N > 1<<45 - 12, so not all - // ints or int64s are safe to pass here. Using that max value still immediately crashes the program with an OOM, - // because it tries to allocate TBs of memory for the channel. - // I was able to boot influxd locally using math.MaxInt32 for this parameter. - // - // Less-scientifically, this was the only Config parameter other than ConcurrencyQuota to be typed as an int - // instead of an explicit int64. When ConcurrencyQuota changed to an int32, it felt like a decent idea for - // this to follow suit. - QueueSize int32 - - // MetricLabelKeys is a list of labels to add to the metrics produced by the controller. - // The value for a given key will be read off the context. - // The context value must be a string or an implementation of the Stringer interface. - MetricLabelKeys []string - - ExecutorDependencies []flux.Dependency - - // FluxLogEnabled logs any in-progress queries that get cancelled due to the server being shut down. - FluxLogEnabled bool -} - -// complete will fill in the defaults, validate the configuration, and -// return the new Config. -func (c *Config) complete(log *zap.Logger) (Config, error) { - config := *c - if config.MemoryBytesQuotaPerQuery == 0 { - // 0 means unlimited - config.MemoryBytesQuotaPerQuery = math.MaxInt64 - } - if config.InitialMemoryBytesQuotaPerQuery == 0 { - config.InitialMemoryBytesQuotaPerQuery = config.MemoryBytesQuotaPerQuery - } - if config.ConcurrencyQuota == 0 && config.QueueSize > 0 { - log.Warn("Ignoring query QueueSize > 0 when ConcurrencyQuota is 0") - config.QueueSize = 0 - } - - if err := config.validate(); err != nil { - return Config{}, err - } - return config, nil -} - -func (c *Config) validate() error { - if c.ConcurrencyQuota < 0 { - return errors.New("ConcurrencyQuota must not be negative") - } else if c.ConcurrencyQuota == 0 { - if c.QueueSize != 0 { - return errors.New("QueueSize must be unlimited when ConcurrencyQuota is unlimited") - } - if c.MaxMemoryBytes != 0 { - // This is because we have to account for the per-query reserved memory and remove it from - // the max total memory. If there is not a maximum number of queries this is not possible. - return errors.New("Cannot limit max memory when ConcurrencyQuota is unlimited") - } - } else { - if c.QueueSize <= 0 { - return errors.New("QueueSize must be positive when ConcurrencyQuota is limited") - } - } - if c.MemoryBytesQuotaPerQuery < 0 { - return errors.New("MemoryBytesQuotaPerQuery must be positive") - } - if c.InitialMemoryBytesQuotaPerQuery < 0 { - return errors.New("InitialMemoryBytesQuotaPerQuery must be positive") - } - if c.MaxMemoryBytes < 0 { - return errors.New("MaxMemoryBytes must be positive") - } - if c.MaxMemoryBytes != 0 { - if minMemory := int64(c.ConcurrencyQuota) * c.InitialMemoryBytesQuotaPerQuery; c.MaxMemoryBytes < minMemory { - return fmt.Errorf("MaxMemoryBytes must be greater than or equal to the ConcurrencyQuota * InitialMemoryBytesQuotaPerQuery: %d < %d (%d * %d)", c.MaxMemoryBytes, minMemory, c.ConcurrencyQuota, c.InitialMemoryBytesQuotaPerQuery) - } - } - return nil -} - -type QueryID uint64 - -func New(config Config, logger *zap.Logger) (*Controller, error) { - c, err := config.complete(logger) - if err != nil { - return nil, errors.Wrap(err, "invalid controller config") - } - metricLabelKeys := append(c.MetricLabelKeys, orgLabel) - if logger == nil { - logger = zap.NewNop() - } - logger.Info("Starting query controller", - zap.Int32("concurrency_quota", c.ConcurrencyQuota), - zap.Int64("initial_memory_bytes_quota_per_query", c.InitialMemoryBytesQuotaPerQuery), - zap.Int64("memory_bytes_quota_per_query", c.MemoryBytesQuotaPerQuery), - zap.Int64("max_memory_bytes", c.MaxMemoryBytes), - zap.Int32("queue_size", c.QueueSize)) - - mm := &memoryManager{ - initialBytesQuotaPerQuery: c.InitialMemoryBytesQuotaPerQuery, - memoryBytesQuotaPerQuery: c.MemoryBytesQuotaPerQuery, - } - if c.MaxMemoryBytes > 0 { - mm.unusedMemoryBytes = c.MaxMemoryBytes - (int64(c.ConcurrencyQuota) * c.InitialMemoryBytesQuotaPerQuery) - } else { - mm.unlimited = true - } - queryQueue := make(chan *Query, c.QueueSize) - if c.ConcurrencyQuota == 0 { - queryQueue = nil - } - ctrl := &Controller{ - config: c, - queries: make(map[QueryID]*Query), - queryQueue: queryQueue, - done: make(chan struct{}), - abort: make(chan struct{}), - memory: mm, - log: logger, - metrics: newControllerMetrics(metricLabelKeys), - labelKeys: metricLabelKeys, - dependencies: c.ExecutorDependencies, - fluxLogEnabled: config.FluxLogEnabled, - } - if c.ConcurrencyQuota != 0 { - quota := int(c.ConcurrencyQuota) - ctrl.wg.Add(quota) - for i := 0; i < quota; i++ { - go func() { - defer ctrl.wg.Done() - ctrl.processQueryQueue() - }() - } - } - return ctrl, nil -} - -// Query satisfies the AsyncQueryService while ensuring the request is propagated on the context. -func (c *Controller) Query(ctx context.Context, req *query.Request) (flux.Query, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - // Set the request on the context so platform specific Flux operations can retrieve it later. - ctx = query.ContextWithRequest(ctx, req) - // Set the org label value for controller metrics - ctx = context.WithValue(ctx, orgLabel, req.OrganizationID.String()) //lint:ignore SA1029 this is a temporary ignore until we have time to create an appropriate type - // The controller injects the dependencies for each incoming request. - ctx, deps := dependency.Inject(ctx, c.dependencies...) - q, err := c.query(ctx, req.Compiler, deps) - if err != nil { - deps.Finish() - return q, err - } - - return q, nil -} - -// query submits a query for execution returning immediately. -// Done must be called on any returned Query objects. -func (c *Controller) query(ctx context.Context, compiler flux.Compiler, deps *dependency.Span) (flux.Query, error) { - q, err := c.createQuery(ctx, compiler, deps) - if err != nil { - return nil, handleFluxError(err) - } - - if err := c.compileQuery(q, compiler); err != nil { - q.setErr(err) - c.finish(q) - c.countQueryRequest(q, labelCompileError) - return nil, q.Err() - } - if err := c.enqueueQuery(q); err != nil { - q.setErr(err) - c.finish(q) - c.countQueryRequest(q, labelQueueError) - return nil, q.Err() - } - return q, nil -} - -func (c *Controller) createQuery(ctx context.Context, compiler flux.Compiler, deps *dependency.Span) (*Query, error) { - c.queriesMu.RLock() - if c.shutdown { - c.queriesMu.RUnlock() - return nil, errors.New("query controller shutdown") - } - c.queriesMu.RUnlock() - - id := c.nextID() - labelValues := make([]string, len(c.labelKeys)) - compileLabelValues := make([]string, len(c.labelKeys)+1) - for i, k := range c.labelKeys { - value := ctx.Value(k) - var str string - switch v := value.(type) { - case string: - str = v - case fmt.Stringer: - str = v.String() - } - labelValues[i] = str - compileLabelValues[i] = str - } - compileLabelValues[len(compileLabelValues)-1] = string(compiler.CompilerType()) - - cctx, cancel := context.WithCancel(ctx) - parentSpan, parentCtx := tracing.StartSpanFromContextWithPromMetrics( - cctx, - "all", - c.metrics.allDur.WithLabelValues(labelValues...), - c.metrics.all.WithLabelValues(labelValues...), - ) - q := &Query{ - id: id, - labelValues: labelValues, - compileLabelValues: compileLabelValues, - state: Created, - c: c, - results: make(chan flux.Result), - parentCtx: parentCtx, - parentSpan: parentSpan, - cancel: cancel, - doneCh: make(chan struct{}), - deps: deps, - compiler: compiler, - } - - // Lock the queries mutex for the rest of this method. - c.queriesMu.Lock() - defer c.queriesMu.Unlock() - - if c.shutdown { - // Query controller was shutdown between when we started - // creating the query and ending it. - err := &flux.Error{ - Code: codes.Unavailable, - Msg: "query controller shutdown", - } - q.setErr(err) - return nil, err - } - c.queries[id] = q - return q, nil -} - -func (c *Controller) nextID() QueryID { - nextID := atomic.AddUint64(&c.lastID, 1) - return QueryID(nextID) -} - -func (c *Controller) countQueryRequest(q *Query, result requestsLabel) { - l := len(q.labelValues) - lvs := make([]string, l+1) - copy(lvs, q.labelValues) - lvs[l] = string(result) - c.metrics.requests.WithLabelValues(lvs...).Inc() -} - -func (c *Controller) compileQuery(q *Query, compiler flux.Compiler) (err error) { - log := c.log.With(influxlogger.TraceFields(q.parentCtx)...) - - defer func() { - if e := recover(); e != nil { - var ok bool - err, ok = e.(error) - if !ok { - err = fmt.Errorf("panic: %v", e) - } - if entry := log.Check(zapcore.InfoLevel, "panic during compile"); entry != nil { - entry.Stack = string(debug.Stack()) - entry.Write(zap.Error(err)) - } - } - }() - - ctx, ok := q.tryCompile() - if !ok { - return &flux.Error{ - Code: codes.Internal, - Msg: "failed to transition query to compiling state", - } - } - - prog, err := compiler.Compile(ctx, runtime.Default) - if err != nil { - return &flux.Error{ - Msg: "compilation failed", - Err: err, - } - } - - if p, ok := prog.(lang.LoggingProgram); ok { - p.SetLogger(log) - } - - q.program = prog - return nil -} - -func (c *Controller) enqueueQuery(q *Query) error { - if _, ok := q.tryQueue(); !ok { - return &flux.Error{ - Code: codes.Internal, - Msg: "failed to transition query to queueing state", - } - } - - if c.queryQueue == nil { - // unlimited queries case - c.queriesMu.RLock() - defer c.queriesMu.RUnlock() - if c.shutdown { - return &flux.Error{ - Code: codes.Internal, - Msg: "controller is shutting down, query not runnable", - } - } - // we can't start shutting down until unlock, so it is safe to add to the waitgroup - c.wg.Add(1) - - // unlimited queries, so start a goroutine for every query - go func() { - defer c.wg.Done() - c.executeQuery(q) - }() - } else { - select { - case c.queryQueue <- q: - default: - return &flux.Error{ - Code: codes.ResourceExhausted, - Msg: "queue length exceeded", - } - } - } - - return nil -} - -func (c *Controller) processQueryQueue() { - for { - select { - case <-c.done: - return - case q := <-c.queryQueue: - c.executeQuery(q) - } - } -} - -// executeQuery will execute a compiled program and wait for its completion. -func (c *Controller) executeQuery(q *Query) { - - defer c.waitForQuery(q) - defer func() { - if e := recover(); e != nil { - var ok bool - err, ok := e.(error) - if !ok { - err = fmt.Errorf("panic: %v", e) - } - q.setErr(err) - if entry := c.log.With(influxlogger.TraceFields(q.parentCtx)...). - Check(zapcore.InfoLevel, "panic during program start"); entry != nil { - entry.Stack = string(debug.Stack()) - entry.Write(zap.Error(err)) - } - } - }() - - ctx, ok := q.tryExec() - if !ok { - // This may happen if the query was cancelled (either because the - // client cancelled it, or because the controller is shutting down) - // In the case of cancellation, SetErr() should reset the error to an - // appropriate message. - q.setErr(&flux.Error{ - Code: codes.Internal, - Msg: "impossible state transition", - }) - - return - } - - q.c.createAllocator(q) - // Record unused memory before start. - q.recordUnusedMemory() - exec, err := q.program.Start(ctx, q.alloc) - if err != nil { - q.setErr(err) - return - } - q.exec = exec - q.pump(exec, ctx.Done()) -} - -// waitForQuery will wait until the query is done. -func (c *Controller) waitForQuery(q *Query) { - select { - case <-q.doneCh: - case <-c.done: - } -} - -func (c *Controller) finish(q *Query) { - c.queriesMu.Lock() - delete(c.queries, q.id) - if len(c.queries) == 0 && c.shutdown { - close(c.done) - } - c.queriesMu.Unlock() -} - -// Queries reports the active queries. -func (c *Controller) Queries() []*Query { - c.queriesMu.RLock() - defer c.queriesMu.RUnlock() - queries := make([]*Query, 0, len(c.queries)) - for _, q := range c.queries { - queries = append(queries, q) - } - return queries -} - -// Shutdown will signal to the Controller that it should not accept any -// new queries and that it should finish executing any existing queries. -// This will return once the Controller's run loop has been exited and all -// queries have been finished or until the Context has been canceled. -func (c *Controller) Shutdown(ctx context.Context) error { - // Wait for query processing goroutines to finish. - defer c.wg.Wait() - - // Mark that the controller is shutdown so it does not - // accept new queries. - func() { - c.queriesMu.Lock() - defer c.queriesMu.Unlock() - if !c.shutdown { - c.shutdown = true - if len(c.queries) == 0 { - // We hold the lock. No other queries can be spawned. - // No other queries are waiting to be finished, so we have to - // close the done channel here instead of in finish(*Query) - close(c.done) - } - } - }() - - // Cancel all of the currently active queries. - c.queriesMu.RLock() - for _, q := range c.queries { - if c.fluxLogEnabled { - var fluxScript string - fc, ok := q.compiler.(lang.FluxCompiler) - if !ok { - fluxScript = "unknown" - } else { - fluxScript = fc.Query - } - c.log.Info("Cancelling Flux query because of server shutdown", zap.String("query", fluxScript)) - } - - q.Cancel() - } - c.queriesMu.RUnlock() - - // Wait for query processing goroutines to finish. - defer c.wg.Wait() - - // Wait for all of the queries to be cleaned up or until the - // context is done. - select { - case <-c.done: - return nil - case <-ctx.Done(): - c.abortOnce.Do(func() { - close(c.abort) - }) - return ctx.Err() - } -} - -// PrometheusCollectors satisfies the prom.PrometheusCollector interface. -func (c *Controller) PrometheusCollectors() []prometheus.Collector { - collectors := c.metrics.PrometheusCollectors() - for _, dep := range c.dependencies { - if pc, ok := dep.(prom.PrometheusCollector); ok { - collectors = append(collectors, pc.PrometheusCollectors()...) - } - } - return collectors -} - -func (c *Controller) GetUnusedMemoryBytes() int64 { - return c.memory.getUnusedMemoryBytes() -} - -func (c *Controller) GetUsedMemoryBytes() int64 { - return c.config.MaxMemoryBytes - c.GetUnusedMemoryBytes() -} - -// Query represents a single request. -type Query struct { - id QueryID - - labelValues []string - compileLabelValues []string - - c *Controller - - // query state. The stateMu protects access for the group below. - stateMu sync.RWMutex - state State - err error - runtimeErrs []error - cancel func() - - parentCtx context.Context - parentSpan, currentSpan *tracing.Span - stats flux.Statistics - - done sync.Once - doneCh chan struct{} - - program flux.Program - exec flux.Query - results chan flux.Result - compiler flux.Compiler - - memoryManager *queryMemoryManager - alloc *memory.ResourceAllocator - deps *dependency.Span -} - -func (q *Query) ProfilerResults() (flux.ResultIterator, error) { - p := q.program.(*lang.AstProgram) - if len(p.Profilers) == 0 { - return nil, nil - } - tables := make([]flux.Table, 0) - for _, profiler := range p.Profilers { - if result, err := profiler.GetResult(q, q.alloc); err != nil { - return nil, err - } else { - tables = append(tables, result) - } - } - res := table.NewProfilerResult(tables...) - return flux.NewSliceResultIterator([]flux.Result{&res}), nil -} - -// ID reports an ephemeral unique ID for the query. -func (q *Query) ID() QueryID { - return q.id -} - -// Cancel will stop the query execution. -func (q *Query) Cancel() { - // Call the cancel function to signal that execution should - // be interrupted. - q.cancel() -} - -// Results returns a channel that will deliver the query results. -// -// It's possible that the channel is closed before any results arrive. -// In particular, if a query's context or the query itself is canceled, -// the query may close the results channel before any results are computed. -// -// The query may also have an error during execution so the Err() -// function should be used to check if an error happened. -func (q *Query) Results() <-chan flux.Result { - return q.results -} - -func (q *Query) recordUnusedMemory() { - unused := q.c.GetUnusedMemoryBytes() - q.c.metrics.memoryUnused.WithLabelValues(q.labelValues...).Set(float64(unused)) -} - -// Done signals to the Controller that this query is no longer -// being used and resources related to the query may be freed. -func (q *Query) Done() { - // This must only be invoked once. - q.done.Do(func() { - // All done calls should block until the first done call succeeds. - defer close(q.doneCh) - - // Lock the state mutex and transition to the finished state. - // Then force the query to cancel to tell it to stop executing. - // We transition to the new state first so that we do not enter - // the canceled state at any point (as we have not been canceled). - q.stateMu.Lock() - q.transitionTo(Finished) - q.cancel() - q.stateMu.Unlock() - - // Ensure that all of the results have been drained. - // It is ok to read this as the user has already indicated they don't - // care about the results. When this is closed, it tells us an error has - // been set or the results have finished being pumped. - for range q.results { - // Do nothing with the results. - } - - // No other goroutines should be modifying state at this point so we - // can do things that would be unsafe in another context. - if q.exec != nil { - // Mark the program as being done and copy out the error if it exists. - q.exec.Done() - if q.err == nil { - // TODO(jsternberg): The underlying program never returns - // this so maybe their interface should change? - q.err = q.exec.Err() - } - // Merge the metadata from the program into the controller stats. - q.mergeQueryStats(q.exec.Statistics()) - } - - // Retrieve the runtime errors that have been accumulated. - errMsgs := make([]string, 0, len(q.runtimeErrs)) - for _, e := range q.runtimeErrs { - errMsgs = append(errMsgs, e.Error()) - } - q.stats.RuntimeErrors = errMsgs - - // Clean up the dependencies. - q.deps.Finish() - - // Mark the query as finished so it is removed from the query map. - q.c.finish(q) - - // Release the additional memory associated with this query. - if q.memoryManager != nil { - q.memoryManager.Release() - // Record unused memory after finish. - q.recordUnusedMemory() - } - - // Count query request. - if q.err != nil || len(q.runtimeErrs) > 0 { - q.c.countQueryRequest(q, labelRuntimeError) - } else { - q.c.countQueryRequest(q, labelSuccess) - } - - }) - <-q.doneCh -} - -func (q *Query) mergeQueryStats(other flux.Statistics) { - // Clear out the durations and the statistics that we calculate. - // We don't want to double count things. - other.TotalDuration = 0 - other.CompileDuration = 0 - other.QueueDuration = 0 - other.PlanDuration = 0 - other.RequeueDuration = 0 - other.ExecuteDuration = 0 - other.Concurrency = 0 - other.MaxAllocated = 0 - other.TotalAllocated = 0 - other.RuntimeErrors = nil - - // Now use the Add method to combine the two. - // This should pick up any statistics from the other statistics. - q.stats = q.stats.Add(other) -} - -// Statistics reports the statistics for the query. -// -// This method must be called after Done. It will block until -// the query has been finalized unless a context is given. -func (q *Query) Statistics() flux.Statistics { - stats := q.stats - if q.alloc != nil { - stats.MaxAllocated = q.alloc.MaxAllocated() - } - return stats -} - -// State reports the current state of the query. -func (q *Query) State() State { - q.stateMu.RLock() - state := q.state - if !isFinishedState(state) { - // If the query is a non-finished state, check the - // context to see if we have been interrupted. - select { - case <-q.parentCtx.Done(): - // The query has been canceled so report to the - // outside world that we have been canceled. - // Do NOT attempt to change the internal state - // variable here. It is a minefield. Leave the - // normal query execution to figure that out. - state = Canceled - default: - // The context has not been canceled. - } - } - q.stateMu.RUnlock() - return state -} - -// transitionTo will transition from one state to another. If a list of current states -// is given, then the query must be in one of those states for the transition to succeed. -// This method must be called with a lock and it must be called from within the run loop. -func (q *Query) transitionTo(newState State, currentState ...State) (context.Context, bool) { - // If we are transitioning to a non-finished state, the query - // may have been canceled. If the query was canceled, then - // we need to transition to the canceled state - if !isFinishedState(newState) { - select { - case <-q.parentCtx.Done(): - // Transition to the canceled state and report that - // we failed to transition to the desired state. - _, _ = q.transitionTo(Canceled) - return nil, false - default: - } - } - - if len(currentState) > 0 { - // Find the current state in the list of current states. - for _, st := range currentState { - if q.state == st { - goto TRANSITION - } - } - return nil, false - } - -TRANSITION: - // We are transitioning to a new state. Close the current span (if it exists). - if q.currentSpan != nil { - q.currentSpan.Finish() - switch q.state { - case Compiling: - q.stats.CompileDuration += q.currentSpan.Duration - case Queueing: - q.stats.QueueDuration += q.currentSpan.Duration - case Executing: - q.stats.ExecuteDuration += q.currentSpan.Duration - } - } - q.currentSpan = nil - - if isFinishedState(newState) { - // Invoke the cancel function to ensure that we have signaled that the query should be done. - // The user is supposed to read the entirety of the tables returned before we end up in a finished - // state, but user error may have caused this not to happen so there's no harm to canceling multiple - // times. - q.cancel() - - // If we are transitioning to a finished state from a non-finished state, finish the parent span. - if q.parentSpan != nil { - q.parentSpan.Finish() - q.stats.TotalDuration = q.parentSpan.Duration - q.parentSpan = nil - } - } - - // Transition to the new state. - q.state = newState - - // Start a new span and set a new context. - var ( - dur *prometheus.HistogramVec - gauge *prometheus.GaugeVec - labelValues = q.labelValues - ) - switch newState { - case Compiling: - dur, gauge = q.c.metrics.compilingDur, q.c.metrics.compiling - labelValues = q.compileLabelValues - case Queueing: - dur, gauge = q.c.metrics.queueingDur, q.c.metrics.queueing - case Executing: - dur, gauge = q.c.metrics.executingDur, q.c.metrics.executing - default: - // This state is not tracked so do not create a new span or context for it. - // Use the parent context if one is needed. - return q.parentCtx, true - } - var currentCtx context.Context - q.currentSpan, currentCtx = tracing.StartSpanFromContextWithPromMetrics( - q.parentCtx, - newState.String(), - dur.WithLabelValues(labelValues...), - gauge.WithLabelValues(labelValues...), - ) - return currentCtx, true -} - -// Err reports any error the query may have encountered. -func (q *Query) Err() error { - q.stateMu.Lock() - err := q.err - q.stateMu.Unlock() - return handleFluxError(err) -} - -// setErr marks this query with an error. If the query was -// canceled, then the error is ignored. -// -// This will mark the query as ready so setResults must not -// be called if this method is invoked. -func (q *Query) setErr(err error) { - q.stateMu.Lock() - defer q.stateMu.Unlock() - - // We may have this get called when the query is canceled. - // If that is the case, transition to the canceled state - // instead and record the error from that since the error - // we received is probably wrong. - select { - case <-q.parentCtx.Done(): - q.transitionTo(Canceled) - err = q.parentCtx.Err() - default: - q.transitionTo(Errored) - } - q.err = err - - // Close the ready channel to report that no results - // will be sent. - close(q.results) -} - -func (q *Query) addRuntimeError(e error) { - q.stateMu.Lock() - defer q.stateMu.Unlock() - - q.runtimeErrs = append(q.runtimeErrs, e) -} - -// pump will read from the executing query results and pump the -// results to our destination. -// When there are no more results, then this will close our own -// results channel. -func (q *Query) pump(exec flux.Query, done <-chan struct{}) { - defer close(q.results) - - // When our context is canceled, we need to propagate that cancel - // signal down to the executing program just in case it is waiting - // for a cancel signal and is ignoring the passed in context. - // We want this signal to only be sent once and we want to continue - // draining the results until the underlying program has actually - // been finished so we copy this to a new channel and set it to - // nil when it has been closed. - signalCh := done - for { - select { - case res, ok := <-exec.Results(): - if !ok { - return - } - - // It is possible for the underlying query to misbehave. - // We have to continue pumping results even if this is the - // case, but if the query has been canceled or finished with - // done, nobody is going to read these values so we need - // to avoid blocking. - ecr := &errorCollectingResult{ - Result: res, - q: q, - } - select { - case <-done: - case q.results <- ecr: - } - case <-signalCh: - // Signal to the underlying executor that the query - // has been canceled. Usually, the signal on the context - // is likely enough, but this explicitly signals just in case. - exec.Cancel() - - // Set the done channel to nil so we don't do this again - // and we continue to drain the results. - signalCh = nil - case <-q.c.abort: - // If we get here, then any running queries should have been cancelled - // in controller.Shutdown(). - return - } - } -} - -// tryCompile attempts to transition the query into the Compiling state. -func (q *Query) tryCompile() (context.Context, bool) { - q.stateMu.Lock() - defer q.stateMu.Unlock() - - return q.transitionTo(Compiling, Created) -} - -// tryQueue attempts to transition the query into the Queueing state. -func (q *Query) tryQueue() (context.Context, bool) { - q.stateMu.Lock() - defer q.stateMu.Unlock() - - return q.transitionTo(Queueing, Compiling) -} - -// tryExec attempts to transition the query into the Executing state. -func (q *Query) tryExec() (context.Context, bool) { - q.stateMu.Lock() - defer q.stateMu.Unlock() - - return q.transitionTo(Executing, Queueing) -} - -type errorCollectingResult struct { - flux.Result - q *Query -} - -func (r *errorCollectingResult) Tables() flux.TableIterator { - return &errorCollectingTableIterator{ - TableIterator: r.Result.Tables(), - q: r.q, - } -} - -type errorCollectingTableIterator struct { - flux.TableIterator - q *Query -} - -func (ti *errorCollectingTableIterator) Do(f func(t flux.Table) error) error { - err := ti.TableIterator.Do(f) - if err != nil { - err = handleFluxError(err) - ti.q.addRuntimeError(err) - } - return err -} - -// State is the query state. -type State int - -const ( - // Created indicates the query has been created. - Created State = iota - - // Compiling indicates that the query is in the process - // of executing the compiler associated with the query. - Compiling - - // Queueing indicates the query is waiting inside of the - // scheduler to be executed. - Queueing - - // Executing indicates that the query is currently executing. - Executing - - // Errored indicates that there was an error when attempting - // to execute a query within any state inside of the controller. - Errored - - // Finished indicates that the query has been marked as Done - // and it is awaiting removal from the Controller or has already - // been removed. - Finished - - // Canceled indicates that the query was signaled to be - // canceled. A canceled query must still be released with Done. - Canceled -) - -func (s State) String() string { - switch s { - case Created: - return "created" - case Compiling: - return "compiling" - case Queueing: - return "queueing" - case Executing: - return "executing" - case Errored: - return "errored" - case Finished: - return "finished" - case Canceled: - return "canceled" - default: - return "unknown" - } -} - -func isFinishedState(state State) bool { - switch state { - case Canceled, Errored, Finished: - return true - default: - return false - } -} - -// handleFluxError will take a flux.Error and convert it into an influxdb.Error. -// It will match certain codes to the equivalent in influxdb. -// -// If the error is any other type of error, it will return the error untouched. -// -// TODO(jsternberg): This likely becomes a public function, but this is just an initial -// implementation so playing it safe by making it package local for now. -func handleFluxError(err error) error { - ferr, ok := err.(*flux.Error) - if !ok { - return err - } - werr := handleFluxError(ferr.Err) - - code := errors2.EInternal - switch ferr.Code { - case codes.Inherit: - // If we are inheriting the error code, influxdb doesn't - // have an equivalent of this so we need to retrieve - // the error code from the wrapped error which has already - // been translated to an influxdb error (if possible). - if werr != nil { - code = errors2.ErrorCode(werr) - } - case codes.NotFound: - code = errors2.ENotFound - case codes.Invalid: - code = errors2.EInvalid - // These don't really map correctly, but we want - // them to show up as 4XX so until influxdb error - // codes are updated for more types of failures, - // mapping these to invalid. - case codes.Canceled, - codes.ResourceExhausted, - codes.FailedPrecondition, - codes.Aborted, - codes.OutOfRange, - codes.Unimplemented: - code = errors2.EInvalid - case codes.PermissionDenied: - code = errors2.EForbidden - case codes.Unauthenticated: - code = errors2.EUnauthorized - default: - // Everything else is treated as an internal error - // which is set above. - } - return &errors2.Error{ - Code: code, - Msg: ferr.Msg, - Err: werr, - } -} diff --git a/query/control/controller_test.go b/query/control/controller_test.go deleted file mode 100644 index b25a9577216..00000000000 --- a/query/control/controller_test.go +++ /dev/null @@ -1,1534 +0,0 @@ -package control_test - -import ( - "context" - "errors" - "fmt" - "math" - "strings" - "sync" - "testing" - "time" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/arrow" - "github.com/influxdata/flux/codes" - "github.com/influxdata/flux/dependency" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/execute/executetest" - "github.com/influxdata/flux/lang" - "github.com/influxdata/flux/memory" - "github.com/influxdata/flux/mock" - "github.com/influxdata/flux/plan" - "github.com/influxdata/flux/plan/plantest" - "github.com/influxdata/flux/stdlib/universe" - _ "github.com/influxdata/influxdb/v2/fluxinit/static" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/query/control" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "go.uber.org/zap/zaptest" -) - -func init() { - execute.RegisterSource(executetest.AllocatingFromTestKind, executetest.CreateAllocatingFromSource) -} - -var ( - mockCompiler = &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - prev := time.Now() - for now := time.Now(); now.Equal(prev); now = time.Now() { - time.Sleep(time.Millisecond) - } - return &mock.Program{ - ExecuteFn: func(ctx context.Context, q *mock.Query, alloc memory.Allocator) { - prev := time.Now() - for now := time.Now(); now.Equal(prev); now = time.Now() { - time.Sleep(time.Millisecond) - } - q.ResultsCh <- &executetest.Result{} - }, - }, nil - }, - } - config = control.Config{ - MemoryBytesQuotaPerQuery: math.MaxInt64, - } - limitedConfig = control.Config{ - MemoryBytesQuotaPerQuery: math.MaxInt64, - ConcurrencyQuota: 1, - QueueSize: 1, - } - bothConfigs = map[string]control.Config{"unlimited": config, "limited": limitedConfig} -) - -func setupPromRegistry(c *control.Controller) *prometheus.Registry { - reg := prometheus.NewRegistry() - for _, col := range c.PrometheusCollectors() { - err := reg.Register(col) - if err != nil { - panic(err) - } - } - return reg -} - -func validateRequestTotals(t testing.TB, reg *prometheus.Registry, success, compile, runtime, queue int) { - t.Helper() - metrics, err := reg.Gather() - if err != nil { - t.Fatal(err) - } - validate := func(name string, want int) { - m := FindMetric( - metrics, - "qc_requests_total", - map[string]string{ - "result": name, - "org": "", - }, - ) - var got int - if m != nil { - got = int(*m.Counter.Value) - } - if got != want { - t.Errorf("unexpected %s total: got %d want: %d", name, got, want) - } - } - validate("success", success) - validate("compile_error", compile) - validate("runtime_error", runtime) - validate("queue_error", queue) -} - -func validateUnusedMemory(t testing.TB, reg *prometheus.Registry, c control.Config) { - t.Helper() - metrics, err := reg.Gather() - if err != nil { - t.Fatal(err) - } - m := FindMetric( - metrics, - "qc_memory_unused_bytes", - map[string]string{ - "org": "", - }, - ) - var got int64 - if m != nil { - got = int64(*m.Gauge.Value) - } - want := c.MaxMemoryBytes - (int64(c.ConcurrencyQuota) * c.InitialMemoryBytesQuotaPerQuery) - if got != want { - t.Errorf("unexpected memory unused bytes: got %d want: %d", got, want) - } -} - -func TestController_QuerySuccess(t *testing.T) { - for name, config := range bothConfigs { - t.Run(name, func(t *testing.T) { - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - - reg := setupPromRegistry(ctrl) - - q, err := ctrl.Query(context.Background(), makeRequest(mockCompiler)) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - for range q.Results() { - // discard the results as we do not care. - } - q.Done() - - if err := q.Err(); err != nil { - t.Errorf("unexpected error: %s", err) - } - - stats := q.Statistics() - if stats.CompileDuration == 0 { - t.Error("expected compile duration to be above zero") - } - if stats.ExecuteDuration == 0 { - t.Error("expected execute duration to be above zero") - } - if stats.TotalDuration == 0 { - t.Error("expected total duration to be above zero") - } - validateRequestTotals(t, reg, 1, 0, 0, 0) - }) - } -} - -func TestController_QueryCompileError(t *testing.T) { - for name, config := range bothConfigs { - t.Run(name, func(t *testing.T) { - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - - reg := setupPromRegistry(ctrl) - - q, err := ctrl.Query(context.Background(), makeRequest(&mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return nil, errors.New("compile error") - }, - })) - if err == nil { - t.Error("expected compiler error") - } - - if q != nil { - t.Errorf("unexpected query value: %v", q) - defer q.Done() - } - - validateRequestTotals(t, reg, 0, 1, 0, 0) - }) - } -} - -func TestController_QueryRuntimeError(t *testing.T) { - for name, config := range bothConfigs { - t.Run(name, func(t *testing.T) { - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - - reg := setupPromRegistry(ctrl) - - q, err := ctrl.Query(context.Background(), makeRequest(&mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - prev := time.Now() - for now := time.Now(); now.Equal(prev); now = time.Now() { - time.Sleep(time.Millisecond) - } - return &mock.Program{ - ExecuteFn: func(ctx context.Context, q *mock.Query, alloc memory.Allocator) { - prev := time.Now() - for now := time.Now(); now.Equal(prev); now = time.Now() { - time.Sleep(time.Millisecond) - } - q.SetErr(errors.New("runtime error")) - }, - }, nil - }, - })) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - for range q.Results() { - // discard the results as we do not care. - } - q.Done() - - if q.Err() == nil { - t.Error("expected runtime error") - } - - stats := q.Statistics() - if stats.CompileDuration == 0 { - t.Error("expected compile duration to be above zero") - } - if stats.ExecuteDuration == 0 { - t.Error("expected execute duration to be above zero") - } - if stats.TotalDuration == 0 { - t.Error("expected total duration to be above zero") - } - validateRequestTotals(t, reg, 0, 0, 1, 0) - }) - } -} - -func TestController_QueryQueueError(t *testing.T) { - t.Skip("This test exposed several race conditions, its not clear if the races are specific to the test case") - - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - - reg := setupPromRegistry(ctrl) - - // This channel blocks program execution until we are done - // with running the test. - done := make(chan struct{}) - defer close(done) - - // Insert three queries, two that block forever and a last that does not. - // The third should error to be enqueued. - for i := 0; i < 2; i++ { - q, err := ctrl.Query(context.Background(), makeRequest(&mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{ - ExecuteFn: func(ctx context.Context, q *mock.Query, alloc memory.Allocator) { - // Block until test is finished - <-done - }, - }, nil - }, - })) - if err != nil { - t.Fatal(err) - } - defer q.Done() - } - - // Third "normal" query - q, err := ctrl.Query(context.Background(), makeRequest(mockCompiler)) - if err == nil { - t.Error("expected queue error") - } - - if q != nil { - t.Errorf("unexpected query value: %v", q) - defer q.Done() - } - - validateRequestTotals(t, reg, 0, 0, 0, 1) -} - -// TODO(nathanielc): Use promtest in influxdb/kit - -// FindMetric iterates through mfs to find the first metric family matching name. -// If a metric family matches, then the metrics inside the family are searched, -// and the first metric whose labels match the given labels are returned. -// If no matches are found, FindMetric returns nil. -// -// FindMetric assumes that the labels on the metric family are well formed, -// i.e. there are no duplicate label names, and the label values are not empty strings. -func FindMetric(mfs []*dto.MetricFamily, name string, labels map[string]string) *dto.Metric { - _, m := findMetric(mfs, name, labels) - return m -} - -// findMetric is a helper that returns the matching family and the matching metric. -// The exported FindMetric function specifically only finds the metric, not the family, -// but for test it is more helpful to identify whether the family was matched. -func findMetric(mfs []*dto.MetricFamily, name string, labels map[string]string) (*dto.MetricFamily, *dto.Metric) { - var fam *dto.MetricFamily - - for _, mf := range mfs { - if mf.GetName() == name { - fam = mf - break - } - } - - if fam == nil { - // No family matching the name. - return nil, nil - } - - for _, m := range fam.Metric { - if len(m.Label) != len(labels) { - continue - } - - match := true - for _, l := range m.Label { - if labels[l.GetName()] != l.GetValue() { - match = false - break - } - } - - if !match { - continue - } - - // All labels matched. - return fam, m - } - - // Didn't find a metric whose labels all matched. - return fam, nil -} - -func TestController_AfterShutdown(t *testing.T) { - for name, config := range bothConfigs { - t.Run(name, func(t *testing.T) { - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - shutdown(t, ctrl) - - // No point in continuing. The shutdown didn't work - // even though there are no queries. - if t.Failed() { - return - } - - if _, err := ctrl.Query(context.Background(), makeRequest(mockCompiler)); err == nil { - t.Error("expected error") - } else if got, want := err.Error(), "query controller shutdown"; got != want { - t.Errorf("unexpected error -want/+got\n\t- %q\n\t+ %q", want, got) - } - }) - } -} - -func TestController_CompileError(t *testing.T) { - for name, config := range bothConfigs { - t.Run(name, func(t *testing.T) { - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return nil, &flux.Error{ - Code: codes.Invalid, - Msg: "expected error", - } - }, - } - if _, err := ctrl.Query(context.Background(), makeRequest(compiler)); err == nil { - t.Error("expected error") - } else if got, want := err.Error(), "compilation failed: expected error"; got != want { - t.Errorf("unexpected error -want/+got\n\t- %q\n\t+ %q", want, got) - } - }) - } -} - -func TestController_ExecuteError(t *testing.T) { - for name, config := range bothConfigs { - t.Run(name, func(t *testing.T) { - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{ - StartFn: func(ctx context.Context, alloc memory.Allocator) (*mock.Query, error) { - return nil, errors.New("expected error") - }, - }, nil - }, - } - - q, err := ctrl.Query(context.Background(), makeRequest(compiler)) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - // There should be no results. - numResults := 0 - for range q.Results() { - numResults++ - } - - if numResults != 0 { - t.Errorf("no results should have been returned, but %d were", numResults) - } - q.Done() - - if err := q.Err(); err == nil { - t.Error("expected error") - } else if got, want := err.Error(), "expected error"; got != want { - t.Errorf("unexpected error -want/+got\n\t- %q\n\t+ %q", want, got) - } - }) - } -} - -func TestController_LimitExceededError(t *testing.T) { - - const memoryBytesQuotaPerQuery = 64 - config := config - config.MemoryBytesQuotaPerQuery = memoryBytesQuotaPerQuery - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - // Return a program that will allocate one more byte than is allowed. - pts := plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("allocating-from-test", &executetest.AllocatingFromProcedureSpec{ - ByteCount: memoryBytesQuotaPerQuery + 1, - }), - plan.CreatePhysicalNode("yield", &universe.YieldProcedureSpec{Name: "_result"}), - }, - Edges: [][2]int{ - {0, 1}, - }, - Resources: flux.ResourceManagement{ - ConcurrencyQuota: 1, - }, - } - - ps := plantest.CreatePlanSpec(&pts) - prog := &lang.Program{ - Logger: zaptest.NewLogger(t), - PlanSpec: ps, - } - - return prog, nil - }, - } - - q, err := ctrl.Query(context.Background(), makeRequest(compiler)) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - ri := flux.NewResultIteratorFromQuery(q) - defer ri.Release() - for ri.More() { - res := ri.Next() - err = res.Tables().Do(func(t flux.Table) error { - return nil - }) - if err != nil { - break - } - } - ri.Release() - - if err == nil { - t.Fatal("expected an error") - } - - if !strings.Contains(err.Error(), "memory") { - t.Fatalf("expected an error about memory limit exceeded, got %v", err) - } - - stats := ri.Statistics() - if len(stats.RuntimeErrors) != 1 { - t.Fatal("expected one runtime error reported in stats") - } - - if !strings.Contains(stats.RuntimeErrors[0], "memory") { - t.Fatalf("expected an error about memory limit exceeded, got %v", err) - } -} - -func TestController_CompilePanic(t *testing.T) { - for name, config := range bothConfigs { - t.Run(name, func(t *testing.T) { - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - panic("panic during compile step") - }, - } - - _, err = ctrl.Query(context.Background(), makeRequest(compiler)) - if err == nil { - t.Fatalf("expected error when query was compiled") - } else if !strings.Contains(err.Error(), "panic during compile step") { - t.Fatalf(`expected error to contain "panic during compile step" instead it contains "%v"`, err.Error()) - } - }) - } -} - -func TestController_StartPanic(t *testing.T) { - for name, config := range bothConfigs { - t.Run(name, func(t *testing.T) { - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{ - StartFn: func(ctx context.Context, alloc memory.Allocator) (i *mock.Query, e error) { - panic("panic during start step") - }, - }, nil - }, - } - - q, err := ctrl.Query(context.Background(), makeRequest(compiler)) - if err != nil { - t.Fatalf("unexpected error when query was compiled") - } - - for range q.Results() { - } - q.Done() - - if err = q.Err(); err == nil { - t.Fatalf("expected error after query started") - } else if !strings.Contains(err.Error(), "panic during start step") { - t.Fatalf(`expected error to contain "panic during start step" instead it contains "%v"`, err.Error()) - } - }) - } -} - -func TestController_ShutdownWithRunningQuery(t *testing.T) { - for name, config := range bothConfigs { - t.Run(name, func(t *testing.T) { - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - - executing := make(chan struct{}) - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{ - ExecuteFn: func(ctx context.Context, q *mock.Query, alloc memory.Allocator) { - close(executing) - <-ctx.Done() - - // This should still be read even if we have been canceled. - q.ResultsCh <- &executetest.Result{} - }, - }, nil - }, - } - - q, err := ctrl.Query(context.Background(), makeRequest(compiler)) - if err != nil { - t.Errorf("unexpected error: %s", err) - } - - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - for range q.Results() { - // discard the results - } - q.Done() - }() - - // Wait until execution has started. - <-executing - - // Shutdown should succeed and not timeout. The above blocked - // query should be canceled and then shutdown should return. - shutdown(t, ctrl) - wg.Wait() - }) - } -} - -func TestController_ShutdownWithTimeout(t *testing.T) { - for name, config := range bothConfigs { - t.Run(name, func(t *testing.T) { - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - - // This channel blocks program execution until we are done - // with running the test. - done := make(chan struct{}) - defer close(done) - - executing := make(chan struct{}) - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{ - ExecuteFn: func(ctx context.Context, q *mock.Query, alloc memory.Allocator) { - // This should just block until the end of the test - // when we perform cleanup. - close(executing) - <-done - }, - }, nil - }, - } - - q, err := ctrl.Query(context.Background(), makeRequest(compiler)) - if err != nil { - t.Errorf("unexpected error: %s", err) - } - - go func() { - for range q.Results() { - // discard the results - } - q.Done() - }() - - // Wait until execution has started. - <-executing - - // The shutdown should not succeed. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - if err := ctrl.Shutdown(ctx); err == nil { - t.Error("expected error") - } else if got, want := err.Error(), context.DeadlineExceeded.Error(); got != want { - t.Errorf("unexpected error -want/+got\n\t- %q\n\t+ %q", want, got) - } - cancel() - }) - } -} - -func TestController_PerQueryMemoryLimit(t *testing.T) { - for name, config := range bothConfigs { - t.Run(name, func(t *testing.T) { - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{ - ExecuteFn: func(ctx context.Context, q *mock.Query, alloc memory.Allocator) { - defer func() { - if err, ok := recover().(error); ok && err != nil { - q.SetErr(err) - } - }() - - // This is emulating the behavior of exceeding the memory limit at runtime - mem := arrow.NewAllocator(alloc) - b := mem.Allocate(int(config.MemoryBytesQuotaPerQuery + 1)) - mem.Free(b) - }, - }, nil - }, - } - - q, err := ctrl.Query(context.Background(), makeRequest(compiler)) - if err != nil { - t.Fatal(err) - } - - for range q.Results() { - // discard the results - } - q.Done() - - if q.Err() == nil { - t.Fatal("expected error about memory limit exceeded") - } - }) - } -} - -func TestController_ConcurrencyQuota(t *testing.T) { - const ( - numQueries = 3 - concurrencyQuota = 2 - ) - - config := config - config.ConcurrencyQuota = concurrencyQuota - config.QueueSize = numQueries - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - - executing := make(chan struct{}, numQueries) - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{ - ExecuteFn: func(ctx context.Context, q *mock.Query, alloc memory.Allocator) { - select { - case <-q.Canceled: - default: - executing <- struct{}{} - <-q.Canceled - } - }, - }, nil - }, - } - - for i := 0; i < numQueries; i++ { - q, err := ctrl.Query(context.Background(), makeRequest(compiler)) - if err != nil { - t.Fatal(err) - } - go func() { - for range q.Results() { - // discard the results - } - q.Done() - }() - } - - // Give 2 queries a chance to begin executing. The remaining third query should stay queued. - time.Sleep(250 * time.Millisecond) - - if err := ctrl.Shutdown(context.Background()); err != nil { - t.Error(err) - } - - // There is a chance that the remaining query managed to get executed after the executing queries - // were canceled. As a result, this test is somewhat flaky. - - close(executing) - - var count int - for range executing { - count++ - } - - if count != concurrencyQuota { - t.Fatalf("expected exactly %v queries to execute, but got: %v", concurrencyQuota, count) - } -} - -func TestController_QueueSize(t *testing.T) { - const ( - concurrencyQuota = 2 - queueSize = 3 - ) - - config := config - config.ConcurrencyQuota = concurrencyQuota - config.QueueSize = queueSize - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - - // This channel blocks program execution until we are done - // with running the test. - done := make(chan struct{}) - defer close(done) - - executing := make(chan struct{}, concurrencyQuota+queueSize) - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{ - ExecuteFn: func(ctx context.Context, q *mock.Query, alloc memory.Allocator) { - executing <- struct{}{} - // Block until test is finished - <-done - }, - }, nil - }, - } - - // Start as many queries as can be running at the same time - for i := 0; i < concurrencyQuota; i++ { - q, err := ctrl.Query(context.Background(), makeRequest(compiler)) - if err != nil { - t.Fatal(err) - } - go func() { - for range q.Results() { - // discard the results - } - q.Done() - }() - - // Wait until it's executing - <-executing - } - - // Now fill up the queue - for i := 0; i < queueSize; i++ { - q, err := ctrl.Query(context.Background(), makeRequest(compiler)) - if err != nil { - t.Fatal(err) - } - go func() { - for range q.Results() { - // discard the results - } - q.Done() - }() - } - - _, err = ctrl.Query(context.Background(), makeRequest(compiler)) - if err == nil { - t.Fatal("expected an error about queue length exceeded") - } -} - -// Test that rapidly starting and canceling the query and then calling done will correctly -// cancel the query and not result in a race condition. -func TestController_CancelDone_Unlimited(t *testing.T) { - config := config - - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{ - ExecuteFn: func(ctx context.Context, q *mock.Query, alloc memory.Allocator) { - // Ensure the query takes a little bit of time so the cancel actually cancels something. - t := time.NewTimer(time.Second) - defer t.Stop() - - select { - case <-t.C: - case <-ctx.Done(): - } - }, - }, nil - }, - } - - var wg sync.WaitGroup - for i := 0; i < 100; i++ { - wg.Add(1) - go func() { - defer wg.Done() - - q, err := ctrl.Query(context.Background(), makeRequest(compiler)) - if err != nil { - t.Errorf("unexpected error: %s", err) - return - } - q.Cancel() - q.Done() - }() - } - wg.Wait() -} - -// Test that rapidly starts and calls done on queries without reading the result. -func TestController_DoneWithoutRead_Unlimited(t *testing.T) { - config := config - - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{ - ExecuteFn: func(ctx context.Context, q *mock.Query, alloc memory.Allocator) { - // Ensure the query takes a little bit of time so the cancel actually cancels something. - t := time.NewTimer(time.Second) - defer t.Stop() - - select { - case <-t.C: - q.ResultsCh <- &executetest.Result{ - Nm: "_result", - Tbls: []*executetest.Table{}, - } - case <-ctx.Done(): - } - }, - }, nil - }, - } - - var wg sync.WaitGroup - for i := 0; i < 100; i++ { - wg.Add(1) - go func() { - defer wg.Done() - - q, err := ctrl.Query(context.Background(), makeRequest(compiler)) - if err != nil { - t.Errorf("unexpected error: %s", err) - return - } - // If we call done without reading anything it should work just fine. - q.Done() - }() - } - wg.Wait() -} - -// Test that rapidly starting and canceling the query and then calling done will correctly -// cancel the query and not result in a race condition. -func TestController_CancelDone(t *testing.T) { - config := config - config.ConcurrencyQuota = 10 - config.QueueSize = 200 - - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{ - ExecuteFn: func(ctx context.Context, q *mock.Query, alloc memory.Allocator) { - // Ensure the query takes a little bit of time so the cancel actually cancels something. - t := time.NewTimer(time.Second) - defer t.Stop() - - select { - case <-t.C: - case <-ctx.Done(): - } - }, - }, nil - }, - } - - var wg sync.WaitGroup - for i := 0; i < 100; i++ { - wg.Add(1) - go func() { - defer wg.Done() - - q, err := ctrl.Query(context.Background(), makeRequest(compiler)) - if err != nil { - t.Errorf("unexpected error: %s", err) - return - } - q.Cancel() - q.Done() - }() - } - wg.Wait() -} - -// Test that rapidly starts and calls done on queries without reading the result. -func TestController_DoneWithoutRead(t *testing.T) { - config := config - config.ConcurrencyQuota = 10 - config.QueueSize = 200 - - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{ - ExecuteFn: func(ctx context.Context, q *mock.Query, alloc memory.Allocator) { - // Ensure the query takes a little bit of time so the cancel actually cancels something. - t := time.NewTimer(time.Second) - defer t.Stop() - - select { - case <-t.C: - q.ResultsCh <- &executetest.Result{ - Nm: "_result", - Tbls: []*executetest.Table{}, - } - case <-ctx.Done(): - } - }, - }, nil - }, - } - - var wg sync.WaitGroup - for i := 0; i < 100; i++ { - wg.Add(1) - go func() { - defer wg.Done() - - q, err := ctrl.Query(context.Background(), makeRequest(compiler)) - if err != nil { - t.Errorf("unexpected error: %s", err) - return - } - // If we call done without reading anything it should work just fine. - q.Done() - }() - } - wg.Wait() -} - -// This tests what happens when there is memory remaining, -// but we would go above the maximum amount of available memory. -func TestController_Error_MaxMemory(t *testing.T) { - config := config - config.InitialMemoryBytesQuotaPerQuery = 512 - config.MaxMemoryBytes = 2048 - config.MemoryBytesQuotaPerQuery = 512 - config.QueueSize = 1 - config.ConcurrencyQuota = 1 - - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - reg := setupPromRegistry(ctrl) - - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{ - ExecuteFn: func(ctx context.Context, q *mock.Query, alloc memory.Allocator) { - // Allocate memory continuously to hit the memory limit. - for i := 0; i < 16; i++ { - size := config.MemoryBytesQuotaPerQuery / 16 - if err := alloc.Account(int(size)); err != nil { - q.SetErr(err) - return - } - } - - // This final allocation should cause an error even though - // we haven't reached the maximum memory usage for the system. - if err := alloc.Account(32); err == nil { - t.Fatal("expected error") - } - }, - }, nil - }, - } - - q, err := ctrl.Query(context.Background(), makeRequest(compiler)) - if err != nil { - t.Errorf("unexpected error: %s", err) - return - } - consumeResults(t, q) - validateUnusedMemory(t, reg, config) -} - -// This tests that we can continuously run queries that do not use -// more than their initial memory allocation with some noisy neighbors. -// The noisy neighbors may occasionally fail because they are competing -// with each other, but they will never cause a small query to fail. -func TestController_NoisyNeighbor(t *testing.T) { - config := config - // We are fine using up to 1024 without an additional allocation. - config.InitialMemoryBytesQuotaPerQuery = 1024 - // Effectively no maximum quota per query. - config.MemoryBytesQuotaPerQuery = config.InitialMemoryBytesQuotaPerQuery * 100 - // The maximum number is about double what is needed to run - // all of the queries. - config.MaxMemoryBytes = config.InitialMemoryBytesQuotaPerQuery * 20 - // The concurrency is 10 which means at most 10 queries can run - // at any given time. - config.ConcurrencyQuota = 10 - // Set the queue length to something that can accommodate the input. - config.QueueSize = 1000 - - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - reg := setupPromRegistry(ctrl) - - wellBehavedNeighbor := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{ - ExecuteFn: func(ctx context.Context, q *mock.Query, alloc memory.Allocator) { - // Allocate memory until we hit our initial memory limit so we should - // never request more memory. - for amount := int64(0); amount < config.InitialMemoryBytesQuotaPerQuery; amount += 16 { - if err := alloc.Account(16); err != nil { - q.SetErr(fmt.Errorf("well behaved query affected by noisy neighbor: %s", err)) - return - } - } - }, - }, nil - }, - } - - noisyNeighbor := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{ - ExecuteFn: func(ctx context.Context, q *mock.Query, alloc memory.Allocator) { - // Allocate memory continuously to use up what we can and be as noisy as possible. - // Turn up the stereo and party on. - for { - if err := alloc.Account(16); err != nil { - // Whoops, party shut down. - return - } - } - }, - }, nil - }, - } - - var wg sync.WaitGroup - - // Launch 100 queriers that are well behaved. They should never fail. - errCh := make(chan error, 1) - for i := 0; i < 100; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for i := 0; i < 1000; i++ { - q, err := ctrl.Query(context.Background(), makeRequest(wellBehavedNeighbor)) - if err != nil { - select { - case errCh <- err: - default: - } - return - } - consumeResults(t, q) - } - }() - } - - // Launch 10 noisy neighbors. They will fail continuously. - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for i := 0; i < 1000; i++ { - q, err := ctrl.Query(context.Background(), makeRequest(noisyNeighbor)) - if err != nil { - select { - case errCh <- err: - default: - } - return - } - consumeResults(t, q) - } - }() - } - wg.Wait() - close(errCh) - - for err := range errCh { - t.Fatalf("unexpected error: %s", err) - } - validateUnusedMemory(t, reg, config) -} - -// This tests that a query that should be allowed is killed -// when it attempts to use more memory available than the -// system has. -func TestController_Error_NoRemainingMemory(t *testing.T) { - config := config - // We are fine using up to 1024 without an additional allocation. - config.InitialMemoryBytesQuotaPerQuery = 1024 - // Effectively no maximum quota per query. - config.MemoryBytesQuotaPerQuery = config.InitialMemoryBytesQuotaPerQuery * 100 - // The maximum memory available on the system is double the initial quota. - config.MaxMemoryBytes = config.InitialMemoryBytesQuotaPerQuery * 2 - - // Need to limit concurrency along with max memory or the config validation complains - config.ConcurrencyQuota = 1 - config.QueueSize = 1 - - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - reg := setupPromRegistry(ctrl) - - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{ - ExecuteFn: func(ctx context.Context, q *mock.Query, alloc memory.Allocator) { - // Allocate memory continuously to use up what we can until denied. - for size := int64(0); ; size += 16 { - if err := alloc.Account(16); err != nil { - // We were not allowed to allocate more. - // Ensure that the size never exceeded the - // MaxMemoryBytes value. - if size > config.MaxMemoryBytes { - t.Errorf("query was allowed to allocate more than the maximum memory: %d > %d", size, config.MaxMemoryBytes) - } - return - } - } - }, - }, nil - }, - } - - q, err := ctrl.Query(context.Background(), makeRequest(compiler)) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - consumeResults(t, q) - validateUnusedMemory(t, reg, config) -} - -// This test ensures the memory that the extra memory allocated -// for a query is properly returned when the query exits. -func TestController_MemoryRelease(t *testing.T) { - config := config - config.InitialMemoryBytesQuotaPerQuery = 16 - config.MemoryBytesQuotaPerQuery = 1024 - config.MaxMemoryBytes = config.MemoryBytesQuotaPerQuery * 2 - config.QueueSize = 1 - config.ConcurrencyQuota = 1 - - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - reg := setupPromRegistry(ctrl) - - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{ - ExecuteFn: func(ctx context.Context, q *mock.Query, alloc memory.Allocator) { - // Allocate some amount of memory and never release it. - if err := alloc.Account(int(config.MemoryBytesQuotaPerQuery) / 2); err != nil { - q.SetErr(err) - return - } - }, - }, nil - }, - } - - // Run 100 queries. If we do not release the memory properly, - // this would fail. - for i := 0; i < 100; i++ { - q, err := ctrl.Query(context.Background(), makeRequest(compiler)) - if err != nil { - t.Errorf("unexpected error: %s", err) - return - } - consumeResults(t, q) - - if t.Failed() { - return - } - } - validateUnusedMemory(t, reg, config) -} - -// Set an irregular memory quota so that doubling the limit continuously -// would send us over the memory quota limit and make sure that -// the quota is still enforced correctly. -func TestController_IrregularMemoryQuota(t *testing.T) { - config := config - config.InitialMemoryBytesQuotaPerQuery = 64 - config.MemoryBytesQuotaPerQuery = 768 - config.MaxMemoryBytes = config.MemoryBytesQuotaPerQuery * 2 - config.QueueSize = 1 - config.ConcurrencyQuota = 1 - - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - reg := setupPromRegistry(ctrl) - - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{ - ExecuteFn: func(ctx context.Context, q *mock.Query, alloc memory.Allocator) { - // Allocate memory continuously to hit the memory limit. - for size := 0; size < 768; size += 16 { - if err := alloc.Account(16); err != nil { - q.SetErr(err) - return - } - } - - // This final allocation should cause an error since we reached the - // memory quota. If the code for setting the limit is faulty, this - // would end up being allowed since the limit was set incorrectly. - if err := alloc.Account(16); err == nil { - t.Fatal("expected error") - } - }, - }, nil - }, - } - - q, err := ctrl.Query(context.Background(), makeRequest(compiler)) - if err != nil { - t.Errorf("unexpected error: %s", err) - return - } - consumeResults(t, q) - validateUnusedMemory(t, reg, config) -} - -// This tests that if we run a bunch of queries that reserve memory, -// we don't encounter any race conditions that cause the currently -// in use memory from the pool to be accounted incorrectly. -func TestController_ReserveMemoryWithoutExceedingMax(t *testing.T) { - config := config - // Small initial memory bytes allocation so most of the query - // is handled by the memory pool. - config.InitialMemoryBytesQuotaPerQuery = 16 - // We will allocate 1024. This is needed to ensure the queries do not - // allocate too much. - config.MemoryBytesQuotaPerQuery = 1024 - // The maximum amount of memory. We will run with a concurrency of - // 100 and each of these queries will allocate exactly 1024. - config.MaxMemoryBytes = 1024 * 100 - // The concurrency is 100 which means at most 100 queries can run - // at any given time. - config.ConcurrencyQuota = 100 - // Set the queue length to something that can accommodate the input. - config.QueueSize = 1000 - - ctrl, err := control.New(config, zaptest.NewLogger(t)) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - reg := setupPromRegistry(ctrl) - - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{ - ExecuteFn: func(ctx context.Context, q *mock.Query, alloc memory.Allocator) { - // Allocate memory continuously to use up what we can and be as noisy as possible. - // Turn up the stereo and party on. - for size := 0; size < 1024; size += 16 { - if err := alloc.Account(16); err != nil { - q.SetErr(err) - return - } - } - }, - }, nil - }, - } - - var wg sync.WaitGroup - - // Launch double the number of running queriers to ensure saturation. - errCh := make(chan error, 1) - for i := 0; i < 300; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for i := 0; i < 1000; i++ { - q, err := ctrl.Query(context.Background(), makeRequest(compiler)) - if err != nil { - select { - case errCh <- err: - default: - } - return - } - consumeResults(t, q) - } - }() - } - wg.Wait() - close(errCh) - - for err := range errCh { - t.Fatalf("unexpected error: %s", err) - } - validateUnusedMemory(t, reg, config) -} - -func TestController_OnFinish(t *testing.T) { - closed := false - config := control.Config{ - ConcurrencyQuota: 1, - MemoryBytesQuotaPerQuery: 1024, - QueueSize: 1, - ExecutorDependencies: []flux.Dependency{ - mock.Dependency{ - InjectFn: func(ctx context.Context) context.Context { - dependency.OnFinishFunc(ctx, func() error { - closed = true - return nil - }) - return ctx - }, - }, - }, - } - - logger := zaptest.NewLogger(t) - ctrl, err := control.New(config, logger) - if err != nil { - t.Fatal(err) - } - defer shutdown(t, ctrl) - - done := make(chan struct{}) - defer close(done) - - compiler := &mock.Compiler{ - CompileFn: func(ctx context.Context) (flux.Program, error) { - return &mock.Program{}, nil - }, - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - q, err := ctrl.Query(ctx, makeRequest(compiler)) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - consumeResults(t, q) - - // The dependency should be closed. - if !closed { - t.Error("finish function was not executed") - } -} - -func consumeResults(tb testing.TB, q flux.Query) { - tb.Helper() - for res := range q.Results() { - if err := res.Tables().Do(func(table flux.Table) error { - return nil - }); err != nil { - tb.Errorf("unexpected error: %s", err) - } - } - q.Done() - - if err := q.Err(); err != nil { - tb.Errorf("unexpected error: %s", err) - } -} - -func shutdown(t *testing.T, ctrl *control.Controller) { - t.Helper() - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - if err := ctrl.Shutdown(ctx); err != nil { - t.Error(err) - } -} - -func makeRequest(c flux.Compiler) *query.Request { - return &query.Request{ - Compiler: c, - } -} diff --git a/query/control/memory.go b/query/control/memory.go deleted file mode 100644 index 3889ee9d2ff..00000000000 --- a/query/control/memory.go +++ /dev/null @@ -1,157 +0,0 @@ -package control - -import ( - "errors" - "math" - "sync/atomic" - - "github.com/influxdata/flux/memory" -) - -type memoryManager struct { - // initialBytesQuotaPerQuery is the initial amount of memory - // allocated for each query. It does not count against the - // memory pool. - initialBytesQuotaPerQuery int64 - - // memoryBytesQuotaPerQuery is the maximum amount of memory - // that may be allocated to each query. - memoryBytesQuotaPerQuery int64 - - // unusedMemoryBytes is the amount of memory that may be used - // when a query requests more memory. This value is only used - // when unlimited is set to false. - unusedMemoryBytes int64 - - // unlimited indicates that the memory manager should indicate - // there is an unlimited amount of free memory available. - unlimited bool -} - -func (m *memoryManager) getUnusedMemoryBytes() int64 { - return atomic.LoadInt64(&m.unusedMemoryBytes) -} - -func (m *memoryManager) trySetUnusedMemoryBytes(old, new int64) bool { - return atomic.CompareAndSwapInt64(&m.unusedMemoryBytes, old, new) -} - -func (m *memoryManager) addUnusedMemoryBytes(amount int64) int64 { - return atomic.AddInt64(&m.unusedMemoryBytes, amount) -} - -// createAllocator will construct an allocator and memory manager -// for the given query. -func (c *Controller) createAllocator(q *Query) { - q.memoryManager = &queryMemoryManager{ - m: c.memory, - limit: c.memory.initialBytesQuotaPerQuery, - } - q.alloc = &memory.ResourceAllocator{ - // Use an anonymous function to ensure the value is copied. - Limit: func(v int64) *int64 { return &v }(q.memoryManager.limit), - Manager: q.memoryManager, - } -} - -// queryMemoryManager is a memory manager for a specific query. -type queryMemoryManager struct { - m *memoryManager - limit int64 - given int64 -} - -// RequestMemory will determine if the query can be given more memory -// when it is requested. -// -// Note: This function accesses the memoryManager whose attributes -// may be modified concurrently. Atomic operations are used to keep -// it lockless. The data associated with this specific query are only -// invoked from within a lock so they are safe to modify. -// Second Note: The errors here are discarded anyway so don't worry -// too much about the specific message or structure. -func (q *queryMemoryManager) RequestMemory(want int64) (got int64, err error) { - // It can be determined statically if we are going to violate - // the memoryBytesQuotaPerQuery. - if q.limit+want > q.m.memoryBytesQuotaPerQuery { - return 0, errors.New("query hit hard limit") - } - - for { - unused := int64(math.MaxInt64) - if !q.m.unlimited { - unused = q.m.getUnusedMemoryBytes() - if unused < want { - // We do not have the capacity for this query to - // be given more memory. - return 0, errors.New("not enough capacity") - } - } - - // The memory allocator will only request the bare amount of - // memory it needs, but it will probably ask for more memory - // so, if possible, give it more so it isn't repeatedly calling - // this method. - given := q.giveMemory(want, unused) - - // Reserve this memory for our own use. - if !q.m.unlimited { - if !q.m.trySetUnusedMemoryBytes(unused, unused-given) { - // The unused value has changed so someone may have taken - // the memory that we wanted. Retry. - continue - } - } - - // Successfully reserved the memory so update our own internal - // counter for the limit. - q.limit += given - q.given += given - return given, nil - } -} - -// giveMemory will determine an appropriate amount of memory to give -// a query based on what it wants and how much it has allocated in -// the past. It will always return a number greater than or equal -// to want. -func (q *queryMemoryManager) giveMemory(want, unused int64) int64 { - // If we can safely double the limit, then just do that. - if q.limit > want && q.limit < unused { - if q.limit*2 <= q.m.memoryBytesQuotaPerQuery { - return q.limit - } - // Doubling the limit sends us over the quota. - // Determine what would be our maximum amount. - max := q.m.memoryBytesQuotaPerQuery - q.limit - if max > want { - return max - } - } - - // If we can't double because there isn't enough space - // in unused, maybe we can just use everything. - if unused > want && unused < q.limit { - return unused - } - - // Otherwise we have already determined we can give the - // wanted number of bytes so just give that. - return want -} - -func (q *queryMemoryManager) FreeMemory(bytes int64) { - // Not implemented. There is no problem with invoking - // this method, but the controller won't recognize that - // the memory has been declared as returned. -} - -// Release will release all of the allocated memory to the -// memory manager. -func (q *queryMemoryManager) Release() { - if !q.m.unlimited { - q.m.addUnusedMemoryBytes(q.given) - } - q.limit = q.m.initialBytesQuotaPerQuery - q.given = 0 -} diff --git a/query/control/metrics.go b/query/control/metrics.go deleted file mode 100644 index a16d7dd6b70..00000000000 --- a/query/control/metrics.go +++ /dev/null @@ -1,111 +0,0 @@ -package control - -import "github.com/prometheus/client_golang/prometheus" - -// controllerMetrics holds metrics related to the query controller. -type controllerMetrics struct { - requests *prometheus.CounterVec - functions *prometheus.CounterVec - - all *prometheus.GaugeVec - compiling *prometheus.GaugeVec - queueing *prometheus.GaugeVec - executing *prometheus.GaugeVec - memoryUnused *prometheus.GaugeVec - - allDur *prometheus.HistogramVec - compilingDur *prometheus.HistogramVec - queueingDur *prometheus.HistogramVec - executingDur *prometheus.HistogramVec -} - -type requestsLabel string - -const ( - labelSuccess = requestsLabel("success") - labelCompileError = requestsLabel("compile_error") - labelRuntimeError = requestsLabel("runtime_error") - labelQueueError = requestsLabel("queue_error") -) - -func newControllerMetrics(labels []string) *controllerMetrics { - return &controllerMetrics{ - requests: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "qc_requests_total", - Help: "Count of the query requests", - }, append(labels, "result")), - - functions: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "qc_functions_total", - Help: "Count of functions in queries", - }, append(labels, "function")), - - all: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "qc_all_active", - Help: "Number of queries in all states", - }, labels), - - compiling: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "qc_compiling_active", - Help: "Number of queries actively compiling", - }, append(labels, "compiler_type")), - - queueing: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "qc_queueing_active", - Help: "Number of queries actively queueing", - }, labels), - - executing: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "qc_executing_active", - Help: "Number of queries actively executing", - }, labels), - - memoryUnused: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "qc_memory_unused_bytes", - Help: "The free memory as seen by the internal memory manager", - }, labels), - - allDur: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Name: "qc_all_duration_seconds", - Help: "Histogram of total times spent in all query states", - Buckets: prometheus.ExponentialBuckets(1e-3, 5, 7), - }, labels), - - compilingDur: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Name: "qc_compiling_duration_seconds", - Help: "Histogram of times spent compiling queries", - Buckets: prometheus.ExponentialBuckets(1e-3, 5, 7), - }, append(labels, "compiler_type")), - - queueingDur: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Name: "qc_queueing_duration_seconds", - Help: "Histogram of times spent queueing queries", - Buckets: prometheus.ExponentialBuckets(1e-3, 5, 7), - }, labels), - - executingDur: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Name: "qc_executing_duration_seconds", - Help: "Histogram of times spent executing queries", - Buckets: prometheus.ExponentialBuckets(1e-3, 5, 7), - }, labels), - } -} - -// PrometheusCollectors satisfies the prom.PrometheusCollector interface. -func (cm *controllerMetrics) PrometheusCollectors() []prometheus.Collector { - return []prometheus.Collector{ - cm.requests, - cm.functions, - - cm.all, - cm.compiling, - cm.queueing, - cm.executing, - cm.memoryUnused, - - cm.allDur, - cm.compilingDur, - cm.queueingDur, - cm.executingDur, - } -} diff --git a/query/dependency.go b/query/dependency.go deleted file mode 100644 index 566ad7b8a76..00000000000 --- a/query/dependency.go +++ /dev/null @@ -1,132 +0,0 @@ -package query - -import ( - "context" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/codes" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// FromBucketService wraps an influxdb.BucketService in the BucketLookup interface. -func FromBucketService(srv influxdb.BucketService) *BucketLookup { - return &BucketLookup{ - BucketService: srv, - } -} - -// BucketLookup converts Flux bucket lookups into influxdb.BucketService calls. -type BucketLookup struct { - BucketService influxdb.BucketService -} - -// Lookup returns the bucket id and its existence given an org id and bucket name. -func (b *BucketLookup) Lookup(ctx context.Context, orgID platform.ID, name string) (platform.ID, bool) { - filter := influxdb.BucketFilter{ - OrganizationID: &orgID, - Name: &name, - } - bucket, err := b.BucketService.FindBucket(ctx, filter) - if err != nil { - return platform.InvalidID(), false - } - return bucket.ID, true -} - -// LookupName returns an bucket name given its organization ID and its bucket ID. -func (b *BucketLookup) LookupName(ctx context.Context, orgID platform.ID, id platform.ID) string { - filter := influxdb.BucketFilter{ - OrganizationID: &orgID, - ID: &id, - } - bucket, err := b.BucketService.FindBucket(ctx, filter) - if err != nil || bucket == nil { - return "" - } - return bucket.Name -} - -func (b *BucketLookup) FindAllBuckets(ctx context.Context, orgID platform.ID) ([]*influxdb.Bucket, int) { - filter := influxdb.BucketFilter{ - OrganizationID: &orgID, - } - - var allBuckets []*influxdb.Bucket - opt := influxdb.FindOptions{Limit: 20} - for ; ; opt.Offset += opt.Limit { - buckets, _, err := b.BucketService.FindBuckets(ctx, filter, opt) - if err != nil { - return nil, len(buckets) - } - allBuckets = append(allBuckets, buckets...) - if len(buckets) < opt.Limit { - break - } - } - return allBuckets, len(allBuckets) -} - -// FromOrganizationService wraps a influxdb.OrganizationService in the OrganizationLookup interface. -func FromOrganizationService(srv influxdb.OrganizationService) *OrganizationLookup { - return &OrganizationLookup{OrganizationService: srv} -} - -// OrganizationLookup converts organization name lookups into influxdb.OrganizationService calls. -type OrganizationLookup struct { - OrganizationService influxdb.OrganizationService -} - -// Lookup returns the organization ID and its existence given an organization name. -func (o *OrganizationLookup) Lookup(ctx context.Context, name string) (platform.ID, bool) { - org, err := o.OrganizationService.FindOrganization( - ctx, - influxdb.OrganizationFilter{Name: &name}, - ) - - if err != nil { - return platform.InvalidID(), false - } - return org.ID, true -} - -// LookupName returns an organization name given its ID. -func (o *OrganizationLookup) LookupName(ctx context.Context, id platform.ID) string { - id = platform.ID(id) - org, err := o.OrganizationService.FindOrganization( - ctx, - influxdb.OrganizationFilter{ - ID: &id, - }, - ) - - if err != nil || org == nil { - return "" - } - return org.Name -} - -// SecretLookup wraps the influxdb.SecretService to perform lookups based on the organization -// in the context. -type SecretLookup struct { - SecretService influxdb.SecretService -} - -// FromSecretService wraps a influxdb.OrganizationService in the OrganizationLookup interface. -func FromSecretService(srv influxdb.SecretService) *SecretLookup { - return &SecretLookup{SecretService: srv} -} - -// LoadSecret loads the secret associated with the key in the current organization context. -func (s *SecretLookup) LoadSecret(ctx context.Context, key string) (string, error) { - req := RequestFromContext(ctx) - if req == nil { - return "", &flux.Error{ - Code: codes.Internal, - Msg: "missing request on context", - } - } - - orgID := req.OrganizationID - return s.SecretService.LoadSecret(ctx, orgID, key) -} diff --git a/query/dependency_test.go b/query/dependency_test.go deleted file mode 100644 index 7d3c64b561b..00000000000 --- a/query/dependency_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package query_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/query" -) - -func TestSecretLookup(t *testing.T) { - req := &query.Request{OrganizationID: orgID} - ctx := query.ContextWithRequest(context.Background(), req) - svc := &mock.SecretService{ - LoadSecretFn: func(ctx context.Context, orgID platform.ID, k string) (string, error) { - if want, got := req.OrganizationID, orgID; want != got { - t.Errorf("unexpected organization id -want/+got:\n\t- %v\n\t+ %v", want, got) - } - if want, got := "mysecret", k; want != got { - t.Errorf("unexpected secret key -want/+got:\n\t- %v\n\t+ %v", want, got) - } - return "mypassword", nil - }, - } - - dep := query.FromSecretService(svc) - if val, err := dep.LoadSecret(ctx, "mysecret"); err != nil { - t.Errorf("unexpected error: %s", err) - } else if want, got := "mypassword", val; want != got { - t.Errorf("unexpected secret value -want/+got:\n\t- %v\n\t+ %v", want, got) - } -} diff --git a/query/encode.go b/query/encode.go deleted file mode 100644 index 91f95ec50a9..00000000000 --- a/query/encode.go +++ /dev/null @@ -1,134 +0,0 @@ -package query - -import ( - "io" - "net/http" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/csv" -) - -const ( - NoContentDialectType = "no-content" - NoContentWErrDialectType = "no-content-with-error" -) - -// AddDialectMappings adds the mappings for the no-content dialects. -func AddDialectMappings(mappings flux.DialectMappings) error { - if err := mappings.Add(NoContentDialectType, func() flux.Dialect { - return NewNoContentDialect() - }); err != nil { - return err - } - return mappings.Add(NoContentWErrDialectType, func() flux.Dialect { - return NewNoContentWithErrorDialect() - }) -} - -// NoContentDialect is a dialect that provides an Encoder that discards query results. -// When invoking `dialect.Encoder().Encode(writer, results)`, `results` get consumed, -// while the `writer` is left intact. -// It is an HTTPDialect that sets the response status code to 204 NoContent. -type NoContentDialect struct{} - -func NewNoContentDialect() *NoContentDialect { - return &NoContentDialect{} -} - -func (d *NoContentDialect) Encoder() flux.MultiResultEncoder { - return &NoContentEncoder{} -} - -func (d *NoContentDialect) DialectType() flux.DialectType { - return NoContentDialectType -} - -func (d *NoContentDialect) SetHeaders(w http.ResponseWriter) { - w.WriteHeader(http.StatusNoContent) -} - -type NoContentEncoder struct{} - -func (e *NoContentEncoder) Encode(w io.Writer, results flux.ResultIterator) (int64, error) { - defer results.Release() - // Consume and discard results. - for results.More() { - if err := results.Next().Tables().Do(func(tbl flux.Table) error { - return tbl.Do(func(cr flux.ColReader) error { - return nil - }) - }); err != nil { - return 0, err - } - } - // Do not write anything. - return 0, nil -} - -// NoContentWithErrorDialect is a dialect that provides an Encoder that discards query results, -// but it encodes runtime errors from the Flux query in CSV format. -// To discover if there was any runtime error in the query, one should check the response size. -// If it is equal to zero, then no error was present. -// Otherwise one can decode the response body to get the error. For example: -// ``` -// _, err = csv.NewResultDecoder(csv.ResultDecoderConfig{}).Decode(bytes.NewReader(res)) -// -// if err != nil { -// // we got some runtime error -// } -// -// ``` -type NoContentWithErrorDialect struct { - csv.ResultEncoderConfig -} - -func NewNoContentWithErrorDialect() *NoContentWithErrorDialect { - return &NoContentWithErrorDialect{ - ResultEncoderConfig: csv.DefaultEncoderConfig(), - } -} - -func (d *NoContentWithErrorDialect) Encoder() flux.MultiResultEncoder { - return &NoContentWithErrorEncoder{ - errorEncoder: csv.NewResultEncoder(d.ResultEncoderConfig), - } -} - -func (d *NoContentWithErrorDialect) DialectType() flux.DialectType { - return NoContentWErrDialectType -} - -func (d *NoContentWithErrorDialect) SetHeaders(w http.ResponseWriter) { - w.Header().Set("Content-Type", "text/csv; charset=utf-8") - w.Header().Set("Transfer-Encoding", "chunked") -} - -type NoContentWithErrorEncoder struct { - errorEncoder *csv.ResultEncoder -} - -func (e *NoContentWithErrorEncoder) Encode(w io.Writer, results flux.ResultIterator) (int64, error) { - // Make sure we release results. - // Remember, it is safe to call `Release` multiple times. - defer results.Release() - // Consume and discard results, but keep an eye on errors. - for results.More() { - if err := results.Next().Tables().Do(func(tbl flux.Table) error { - return tbl.Do(func(cr flux.ColReader) error { - return nil - }) - }); err != nil { - // If there is an error, then encode it in the response. - if encErr := e.errorEncoder.EncodeError(w, err); encErr != nil { - return 0, encErr - } - } - } - // Now Release in order to populate the error, if present. - results.Release() - err := results.Err() - if err != nil { - return 0, e.errorEncoder.EncodeError(w, err) - } - return 0, nil -} diff --git a/query/encode_test.go b/query/encode_test.go deleted file mode 100644 index 7321e137999..00000000000 --- a/query/encode_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package query_test - -import ( - "bytes" - "context" - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/flux" - "github.com/influxdata/flux/csv" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/execute/executetest" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/query/mock" -) - -func TestReturnNoContent(t *testing.T) { - getMockResult := func() flux.Result { - // Some random data. - r := executetest.NewResult([]*executetest.Table{{ - KeyCols: []string{"t1"}, - ColMeta: []flux.ColMeta{ - {Label: "_time", Type: flux.TTime}, - {Label: "_value", Type: flux.TFloat}, - {Label: "t1", Type: flux.TString}, - {Label: "t2", Type: flux.TString}, - }, - Data: [][]interface{}{ - {execute.Time(0), 1.0, "a", "y"}, - {execute.Time(10), 2.0, "a", "x"}, - {execute.Time(20), 3.0, "a", "y"}, - {execute.Time(30), 4.0, "a", "x"}, - {execute.Time(40), 5.0, "a", "y"}, - }, - }}) - r.Nm = "foo" - return r - } - assertNoContent := func(t *testing.T, respBody []byte, stats flux.Statistics, reqErr error) { - if reqErr != nil { - t.Fatalf("unexpected error on query: %v", reqErr) - } - if body := string(respBody); len(body) > 0 { - t.Fatalf("response body should be empty, but was: %s", body) - } - } - - testCases := []struct { - name string - queryFn func(ctx context.Context, req *query.Request) (flux.Query, error) - dialect flux.Dialect - assertFn func(t *testing.T, respBody []byte, stats flux.Statistics, reqErr error) - }{ - { - name: "no-content - no error", - queryFn: func(ctx context.Context, req *query.Request) (flux.Query, error) { - q := mock.NewQuery() - q.SetResults(getMockResult()) - return q, nil - }, - dialect: query.NewNoContentDialect(), - assertFn: assertNoContent, - }, - { - name: "no-content - error", - queryFn: func(ctx context.Context, req *query.Request) (flux.Query, error) { - q := mock.NewQuery() - q.SetResults(getMockResult()) - q.SetErr(fmt.Errorf("I am a runtime error")) - return q, nil - }, - dialect: query.NewNoContentDialect(), - assertFn: assertNoContent, - }, - { - name: "no-content-with-error - no error", - queryFn: func(ctx context.Context, req *query.Request) (flux.Query, error) { - q := mock.NewQuery() - q.SetResults(getMockResult()) - return q, nil - }, - dialect: query.NewNoContentWithErrorDialect(), - assertFn: assertNoContent, - }, - { - name: "no-content-with-error - error", - queryFn: func(ctx context.Context, req *query.Request) (flux.Query, error) { - q := mock.NewQuery() - q.SetResults(getMockResult()) - q.SetErr(fmt.Errorf("I am a runtime error")) - return q, nil - }, - dialect: query.NewNoContentWithErrorDialect(), - assertFn: func(t *testing.T, respBody []byte, stats flux.Statistics, reqErr error) { - if reqErr != nil { - t.Fatalf("unexpected error on query: %v", reqErr) - } - if len(respBody) == 0 { - t.Fatalf("response body should not be empty, but it was") - } - _, err := csv.NewResultDecoder(csv.ResultDecoderConfig{}).Decode(bytes.NewReader(respBody)) - if err == nil { - t.Fatalf("expected error got none") - } else if diff := cmp.Diff(err.Error(), "I am a runtime error"); diff != "" { - t.Fatalf("unexpected error, -want/+got:\n\t%s", diff) - } - }, - }, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - mockAsyncSvc := &mock.AsyncQueryService{ - QueryF: tc.queryFn, - } - w := bytes.NewBuffer([]byte{}) - bridge := query.ProxyQueryServiceAsyncBridge{ - AsyncQueryService: mockAsyncSvc, - } - stats, err := bridge.Query(context.Background(), w, &query.ProxyRequest{ - Request: query.Request{}, - Dialect: tc.dialect, - }) - tc.assertFn(t, w.Bytes(), stats, err) - }) - } -} diff --git a/query/fluxlang/service.go b/query/fluxlang/service.go deleted file mode 100644 index d50f16d2963..00000000000 --- a/query/fluxlang/service.go +++ /dev/null @@ -1,63 +0,0 @@ -// Package language exposes the flux parser as an interface. -package fluxlang - -import ( - "context" - - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/ast/astutil" - "github.com/influxdata/flux/complete" - "github.com/influxdata/flux/interpreter" - "github.com/influxdata/flux/parser" - "github.com/influxdata/flux/runtime" - "github.com/influxdata/flux/values" -) - -// SourceQuery is a query for a source. -type SourceQuery struct { - Query string `json:"query"` - Type string `json:"type"` -} - -// FluxLanguageService is a service for interacting with flux code. -type FluxLanguageService interface { - // Parse will take flux source code and produce a package. - // If there are errors when parsing, the first error is returned. - // An ast.Package may be returned when a parsing error occurs, - // but it may be null if parsing didn't even occur. - Parse(source string) (*ast.Package, error) - - // Format will produce a string for the given *ast.File. - Format(f *ast.File) (string, error) - - // EvalAST will evaluate and run an AST. - EvalAST(ctx context.Context, astPkg *ast.Package) ([]interpreter.SideEffect, values.Scope, error) - - // Completer will return a flux completer. - Completer() complete.Completer -} - -// DefaultService is the default language service. -var DefaultService FluxLanguageService = defaultService{} - -type defaultService struct{} - -func (d defaultService) Parse(source string) (pkg *ast.Package, err error) { - pkg = parser.ParseSource(source) - if ast.Check(pkg) > 0 { - err = ast.GetError(pkg) - } - return pkg, err -} - -func (d defaultService) Format(f *ast.File) (string, error) { - return astutil.Format(f) -} - -func (d defaultService) EvalAST(ctx context.Context, astPkg *ast.Package) ([]interpreter.SideEffect, values.Scope, error) { - return runtime.EvalAST(ctx, astPkg) -} - -func (d defaultService) Completer() complete.Completer { - return complete.NewCompleter(runtime.Prelude()) -} diff --git a/query/logger.go b/query/logger.go deleted file mode 100644 index 7ec205cf69d..00000000000 --- a/query/logger.go +++ /dev/null @@ -1,57 +0,0 @@ -package query - -import ( - "time" - - "github.com/influxdata/flux" - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -// Logger persists metadata about executed queries. -type Logger interface { - Log(Log) error -} - -// Log captures a query and any relevant metadata for the query execution. -type Log struct { - // Time is the time the query was completed - Time time.Time - // OrganizationID is the ID of the organization that requested the query - OrganizationID platform2.ID - // TraceID is the ID of the trace related to this query - TraceID string - // Sampled specifies whether the trace for TraceID was chosen for permanent storage - // by the sampling mechanism of the tracer - Sampled bool - // Error is any error encountered by the query - Error error - - // ProxyRequest is the query request - ProxyRequest *ProxyRequest - // ResponseSize is the size in bytes of the query response - ResponseSize int64 - // Statistics is a set of statistics about the query execution - Statistics flux.Statistics -} - -// Redact removes any sensitive information before logging -func (q *Log) Redact() { - if q.ProxyRequest != nil && q.ProxyRequest.Request.Authorization != nil { - // Make shallow copy of request - request := new(ProxyRequest) - *request = *q.ProxyRequest - - // Make shallow copy of authorization - auth := new(platform.Authorization) - *auth = *q.ProxyRequest.Request.Authorization - // Redact authorization token - auth.Token = "" - - // Apply redacted authorization - request.Request.Authorization = auth - - // Apply redacted request - q.ProxyRequest = request - } -} diff --git a/query/logging.go b/query/logging.go deleted file mode 100644 index 7c803d514e4..00000000000 --- a/query/logging.go +++ /dev/null @@ -1,122 +0,0 @@ -package query - -import ( - "context" - "fmt" - "io" - "runtime/debug" - "time" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/iocounter" - "github.com/influxdata/influxdb/v2/kit/check" - "github.com/influxdata/influxdb/v2/kit/tracing" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -// LoggingProxyQueryService wraps a ProxyQueryService and logs the queries. -type LoggingProxyQueryService struct { - proxyQueryService ProxyQueryService - queryLogger Logger - nowFunction func() time.Time - log *zap.Logger - cond func(ctx context.Context) bool - - // If this is set then logging happens only if this key is present in the - // metadata. - requireMetadataKey string -} - -// LoggingProxyQueryServiceOption provides a way to modify the -// behavior of LoggingProxyQueryService. -type LoggingProxyQueryServiceOption func(lpqs *LoggingProxyQueryService) - -// ConditionalLogging returns a LoggingProxyQueryServiceOption -// that only logs if the passed in function returns true. -// Thus logging can be controlled by a request-scoped attribute, e.g., a feature flag. -func ConditionalLogging(cond func(context.Context) bool) LoggingProxyQueryServiceOption { - return func(lpqs *LoggingProxyQueryService) { - lpqs.cond = cond - } -} - -func RequireMetadataKey(metadataKey string) LoggingProxyQueryServiceOption { - return func(lpqs *LoggingProxyQueryService) { - lpqs.requireMetadataKey = metadataKey - } -} - -func NewLoggingProxyQueryService(log *zap.Logger, queryLogger Logger, proxyQueryService ProxyQueryService, opts ...LoggingProxyQueryServiceOption) *LoggingProxyQueryService { - lpqs := &LoggingProxyQueryService{ - proxyQueryService: proxyQueryService, - queryLogger: queryLogger, - nowFunction: time.Now, - log: log, - } - - for _, o := range opts { - o(lpqs) - } - - return lpqs -} - -func (s *LoggingProxyQueryService) SetNowFunctionForTesting(nowFunction func() time.Time) { - s.nowFunction = nowFunction -} - -// Query executes and logs the query. -func (s *LoggingProxyQueryService) Query(ctx context.Context, w io.Writer, req *ProxyRequest) (stats flux.Statistics, err error) { - if s.cond != nil && !s.cond(ctx) { - // Logging is conditional, and we are not logging this request. - // Just invoke the wrapped service directly. - return s.proxyQueryService.Query(ctx, w, req) - } - - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var n int64 - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("panic: %v", r) - if entry := s.log.Check(zapcore.InfoLevel, "QueryLogging panic"); entry != nil { - entry.Stack = string(debug.Stack()) - entry.Write(zap.Error(err)) - } - } - - // Enforce requireMetadataKey, if set. - if s.requireMetadataKey != "" { - if _, ok := stats.Metadata[s.requireMetadataKey]; !ok { - return - } - } - - traceID, sampled, _ := tracing.InfoFromContext(ctx) - log := Log{ - OrganizationID: req.Request.OrganizationID, - TraceID: traceID, - Sampled: sampled, - ProxyRequest: req, - ResponseSize: n, - Time: s.nowFunction(), - Statistics: stats, - Error: err, - } - s.queryLogger.Log(log) - }() - - wc := &iocounter.Writer{Writer: w} - stats, err = s.proxyQueryService.Query(ctx, wc, req) - if err != nil { - return stats, tracing.LogError(span, err) - } - n = wc.Count() - return stats, nil -} - -func (s *LoggingProxyQueryService) Check(ctx context.Context) check.Response { - return s.proxyQueryService.Check(ctx) -} diff --git a/query/logging_test.go b/query/logging_test.go deleted file mode 100644 index b905ff892b3..00000000000 --- a/query/logging_test.go +++ /dev/null @@ -1,181 +0,0 @@ -package query_test - -import ( - "bytes" - "context" - "io" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/flux" - "github.com/influxdata/flux/metadata" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/query/mock" - "github.com/opentracing/opentracing-go" - "github.com/uber/jaeger-client-go" - "go.uber.org/zap" -) - -var orgID = MustIDBase16("ba55ba55ba55ba55") - -// MustIDBase16 is an helper to ensure a correct ID is built during testing. -func MustIDBase16(s string) platform2.ID { - id, err := platform2.IDFromString(s) - if err != nil { - panic(err) - } - return *id -} - -var opts = []cmp.Option{ - cmpopts.IgnoreUnexported(query.ProxyRequest{}), - cmpopts.IgnoreUnexported(query.Request{}), -} - -type contextKey string - -const loggingCtxKey contextKey = "do-logging" - -func TestLoggingProxyQueryService(t *testing.T) { - // Set a Jaeger in-memory tracer to get span information in the query log. - oldTracer := opentracing.GlobalTracer() - defer opentracing.SetGlobalTracer(oldTracer) - sampler := jaeger.NewConstSampler(true) - reporter := jaeger.NewInMemoryReporter() - tracer, closer := jaeger.NewTracer(t.Name(), sampler, reporter) - defer closer.Close() - opentracing.SetGlobalTracer(tracer) - - wantStats := flux.Statistics{ - TotalDuration: time.Second, - CompileDuration: time.Second, - QueueDuration: time.Second, - PlanDuration: time.Second, - RequeueDuration: time.Second, - ExecuteDuration: time.Second, - Concurrency: 2, - MaxAllocated: 2048, - Metadata: make(metadata.Metadata), - } - wantStats.Metadata.Add("some-mock-metadata", 42) - wantBytes := 10 - pqs := &mock.ProxyQueryService{ - QueryF: func(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) { - w.Write(make([]byte, wantBytes)) - return wantStats, nil - }, - } - var logs []query.Log - logger := &mock.QueryLogger{ - LogFn: func(l query.Log) error { - logs = append(logs, l) - return nil - }, - } - - req := &query.ProxyRequest{ - Request: query.Request{ - Authorization: nil, - OrganizationID: orgID, - Compiler: nil, - }, - Dialect: nil, - } - - t.Run("log", func(t *testing.T) { - defer func() { - logs = nil - }() - wantTime := time.Now() - lpqs := query.NewLoggingProxyQueryService(zap.NewNop(), logger, pqs) - lpqs.SetNowFunctionForTesting(func() time.Time { - return wantTime - }) - - var buf bytes.Buffer - stats, err := lpqs.Query(context.Background(), &buf, req) - if err != nil { - t.Fatal(err) - } - if !cmp.Equal(wantStats, stats, opts...) { - t.Errorf("unexpected query stats: -want/+got\n%s", cmp.Diff(wantStats, stats, opts...)) - } - traceID := reporter.GetSpans()[0].Context().(jaeger.SpanContext).TraceID().String() - wantLogs := []query.Log{{ - Time: wantTime, - OrganizationID: orgID, - TraceID: traceID, - Sampled: true, - Error: nil, - ProxyRequest: req, - ResponseSize: int64(wantBytes), - Statistics: wantStats, - }} - if !cmp.Equal(wantLogs, logs, opts...) { - t.Errorf("unexpected query logs: -want/+got\n%s", cmp.Diff(wantLogs, logs, opts...)) - } - }) - - t.Run("conditional logging", func(t *testing.T) { - defer func() { - logs = nil - }() - - condLog := query.ConditionalLogging(func(ctx context.Context) bool { - return ctx.Value(loggingCtxKey) != nil - }) - - lpqs := query.NewLoggingProxyQueryService(zap.NewNop(), logger, pqs, condLog) - _, err := lpqs.Query(context.Background(), io.Discard, req) - if err != nil { - t.Fatal(err) - } - - if len(logs) != 0 { - t.Fatal("expected query service not to log") - } - - ctx := context.WithValue(context.Background(), loggingCtxKey, true) - _, err = lpqs.Query(ctx, io.Discard, req) - if err != nil { - t.Fatal(err) - } - - if len(logs) != 1 { - t.Fatal("expected query service to log") - } - }) - - t.Run("require metadata key", func(t *testing.T) { - defer func() { - logs = nil - }() - - reqMeta1 := query.RequireMetadataKey("this-metadata-wont-be-found") - lpqs1 := query.NewLoggingProxyQueryService(zap.NewNop(), logger, pqs, reqMeta1) - - _, err := lpqs1.Query(context.Background(), io.Discard, req) - if err != nil { - t.Fatal(err) - } - - if len(logs) != 0 { - t.Fatal("expected query service not to log") - } - - reqMeta2 := query.RequireMetadataKey("some-mock-metadata") - lpqs2 := query.NewLoggingProxyQueryService(zap.NewNop(), logger, pqs, reqMeta2) - - _, err = lpqs2.Query(context.Background(), io.Discard, req) - if err != nil { - t.Fatal(err) - } - - if len(logs) != 1 { - t.Fatal("expected query service to log") - } - }) -} diff --git a/query/mock/logger.go b/query/mock/logger.go deleted file mode 100644 index e1805ef5136..00000000000 --- a/query/mock/logger.go +++ /dev/null @@ -1,13 +0,0 @@ -package mock - -import "github.com/influxdata/influxdb/v2/query" - -var _ query.Logger = (*QueryLogger)(nil) - -type QueryLogger struct { - LogFn func(query.Log) error -} - -func (l *QueryLogger) Log(log query.Log) error { - return l.LogFn(log) -} diff --git a/query/mock/service.go b/query/mock/service.go deleted file mode 100644 index 9dea08940ad..00000000000 --- a/query/mock/service.go +++ /dev/null @@ -1,127 +0,0 @@ -package mock - -import ( - "context" - "io" - "sync" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/metadata" - "github.com/influxdata/influxdb/v2/kit/check" - "github.com/influxdata/influxdb/v2/query" -) - -// ProxyQueryService mocks the idpe QueryService for testing. -type ProxyQueryService struct { - QueryF func(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) -} - -// Query writes the results of the query request. -func (s *ProxyQueryService) Query(ctx context.Context, w io.Writer, req *query.ProxyRequest) (flux.Statistics, error) { - return s.QueryF(ctx, w, req) -} - -func (s *ProxyQueryService) Check(ctx context.Context) check.Response { - return check.Response{Name: "Mock Proxy Query Service", Status: check.StatusPass} -} - -// QueryService mocks the idep QueryService for testing. -type QueryService struct { - QueryF func(ctx context.Context, req *query.Request) (flux.ResultIterator, error) -} - -// Query writes the results of the query request. -func (s *QueryService) Query(ctx context.Context, req *query.Request) (flux.ResultIterator, error) { - return s.QueryF(ctx, req) -} - -func (s *QueryService) Check(ctx context.Context) check.Response { - return check.Response{Name: "Mock Query Service", Status: check.StatusPass} -} - -// AsyncQueryService mocks the idep QueryService for testing. -type AsyncQueryService struct { - QueryF func(ctx context.Context, req *query.Request) (flux.Query, error) -} - -// Query writes the results of the query request. -func (s *AsyncQueryService) Query(ctx context.Context, req *query.Request) (flux.Query, error) { - return s.QueryF(ctx, req) -} - -// Query is a mock implementation of a flux.Query. -// It contains controls to ensure that the flux.Query object is used correctly. -// Note: Query will only return one result, specified by calling the SetResults method. -type Query struct { - Metadata metadata.Metadata - - results chan flux.Result - once sync.Once - err error - mu sync.Mutex - done bool -} - -var _ flux.Query = (*Query)(nil) - -// NewQuery constructs a new asynchronous query. -func NewQuery() *Query { - return &Query{ - Metadata: make(metadata.Metadata), - results: make(chan flux.Result, 1), - } -} - -func (q *Query) SetResults(results flux.Result) *Query { - q.results <- results - q.once.Do(func() { - close(q.results) - }) - return q -} - -func (q *Query) SetErr(err error) *Query { - q.err = err - q.Cancel() - return q -} - -func (q *Query) Results() <-chan flux.Result { - return q.results -} -func (q *Query) ProfilerResults() (flux.ResultIterator, error) { - return nil, nil -} - -func (q *Query) Done() { - q.Cancel() - - q.mu.Lock() - q.done = true - q.mu.Unlock() -} - -// Cancel closes the results channel. -func (q *Query) Cancel() { - q.once.Do(func() { - close(q.results) - }) -} - -// Err will return an error if one was set. -func (q *Query) Err() error { - return q.err -} - -// Statistics will return Statistics. Unlike the normal flux.Query, this -// will panic if it is called before Done. -func (q *Query) Statistics() flux.Statistics { - q.mu.Lock() - defer q.mu.Unlock() - if !q.done { - panic("call to query.Statistics() before the query has been finished") - } - return flux.Statistics{ - Metadata: q.Metadata, - } -} diff --git a/query/query.go b/query/query.go deleted file mode 100644 index 3779f972824..00000000000 --- a/query/query.go +++ /dev/null @@ -1,503 +0,0 @@ -package query - -import ( - "errors" - "math" - "time" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxql" -) - -var ( - // ErrQueryInterrupted is an error returned when the query is interrupted. - ErrQueryInterrupted = errors.New("query interrupted") -) - -// ZeroTime is the Unix nanosecond timestamp for no time. -// This time is not used by the query engine or the storage engine as a valid time. -const ZeroTime = int64(math.MinInt64) - -// IteratorOptions is an object passed to CreateIterator to specify creation options. -type IteratorOptions struct { - // Expression to iterate for. - // This can be VarRef or a Call. - Expr influxql.Expr - - // Auxiliary tags or values to also retrieve for the point. - Aux []influxql.VarRef - - // Data sources from which to receive data. This is only used for encoding - // measurements over RPC and is no longer used in the open source version. - Sources []influxql.Source - - // Group by interval and tags. - Interval Interval - Dimensions []string // The final dimensions of the query (stays the same even in subqueries). - GroupBy map[string]struct{} // Dimensions to group points by in intermediate iterators. - Location *time.Location - - // Fill options. - Fill influxql.FillOption - FillValue interface{} - - // Condition to filter by. - Condition influxql.Expr - - // Time range for the iterator. - StartTime int64 - EndTime int64 - - // Sorted in time ascending order if true. - Ascending bool - - // Limits the number of points per series. - Limit, Offset int - - // Limits the number of series. - SLimit, SOffset int - - // Removes the measurement name. Useful for meta queries. - StripName bool - - // Removes duplicate rows from raw queries. - Dedupe bool - - // Determines if this is a query for raw data or an aggregate/selector. - Ordered bool - - // Limits on the creation of iterators. - MaxSeriesN int - - // If this channel is set and is closed, the iterator should try to exit - // and close as soon as possible. - InterruptCh <-chan struct{} - - // Authorizer can limit access to data - Authorizer Authorizer -} - -// SeekTime returns the time the iterator should start from. -// For ascending iterators this is the start time, for descending iterators it's the end time. -func (opt IteratorOptions) SeekTime() int64 { - if opt.Ascending { - return opt.StartTime - } - return opt.EndTime -} - -// StopTime returns the time the iterator should end at. -// For ascending iterators this is the end time, for descending iterators it's the start time. -func (opt IteratorOptions) StopTime() int64 { - if opt.Ascending { - return opt.EndTime - } - return opt.StartTime -} - -// Interval represents a repeating interval for a query. -type Interval struct { - Duration time.Duration - Offset time.Duration -} - -// Authorizer determines if certain operations are authorized. -type Authorizer interface { - // AuthorizeDatabase indicates whether the given Privilege is authorized on the database with the given name. - AuthorizeDatabase(p influxql.Privilege, name string) bool - - // AuthorizeQuery returns an error if the query cannot be executed - AuthorizeQuery(database string, query *influxql.Query) error - - // AuthorizeSeriesRead determines if a series is authorized for reading - AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool - - // AuthorizeSeriesWrite determines if a series is authorized for writing - AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool -} - -// FloatPoint represents a point with a float64 value. -// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. -// See TestPoint_Fields in influxql/point_test.go for more details. -type FloatPoint struct { - Name string - Tags Tags - - Time int64 - Value float64 - Aux []interface{} - - // Total number of points that were combined into this point from an aggregate. - // If this is zero, the point is not the result of an aggregate function. - Aggregated uint32 - Nil bool -} - -// Tags represent a map of keys and values. -// It memorizes its key so it can be used efficiently during query execution. -type Tags struct{} - -// Iterator represents a generic interface for all Iterators. -// Most iterator operations are done on the typed sub-interfaces. -type Iterator interface { - Stats() IteratorStats - Close() error -} - -// IteratorStats represents statistics about an iterator. -// Some statistics are available immediately upon iterator creation while -// some are derived as the iterator processes data. -type IteratorStats struct { - SeriesN int // series represented - PointN int // points returned -} - -// TagSet is a fundamental concept within the query system. It represents a composite series, -// composed of multiple individual series that share a set of tag attributes. -type TagSet struct { - Tags map[string]string - Filters []influxql.Expr - SeriesKeys []string - Key []byte -} - -// AddFilter adds a series-level filter to the Tagset. -func (t *TagSet) AddFilter(key string, filter influxql.Expr) { - t.SeriesKeys = append(t.SeriesKeys, key) - t.Filters = append(t.Filters, filter) -} - -func (t *TagSet) Len() int { return len(t.SeriesKeys) } -func (t *TagSet) Less(i, j int) bool { return t.SeriesKeys[i] < t.SeriesKeys[j] } -func (t *TagSet) Swap(i, j int) { - t.SeriesKeys[i], t.SeriesKeys[j] = t.SeriesKeys[j], t.SeriesKeys[i] - t.Filters[i], t.Filters[j] = t.Filters[j], t.Filters[i] -} - -// Reverse reverses the order of series keys and filters in the TagSet. -func (t *TagSet) Reverse() { - for i, j := 0, len(t.Filters)-1; i < j; i, j = i+1, j-1 { - t.Filters[i], t.Filters[j] = t.Filters[j], t.Filters[i] - t.SeriesKeys[i], t.SeriesKeys[j] = t.SeriesKeys[j], t.SeriesKeys[i] - } -} - -// IteratorCost contains statistics retrieved for explaining what potential -// cost may be incurred by instantiating an iterator. -type IteratorCost struct { - // The total number of shards that are touched by this query. - NumShards int64 - - // The total number of non-unique series that are accessed by this query. - // This number matches the number of cursors created by the query since - // one cursor is created for every series. - NumSeries int64 - - // CachedValues returns the number of cached values that may be read by this - // query. - CachedValues int64 - - // The total number of non-unique files that may be accessed by this query. - // This will count the number of files accessed by each series so files - // will likely be double counted. - NumFiles int64 - - // The number of blocks that had the potential to be accessed. - BlocksRead int64 - - // The amount of data that can be potentially read. - BlockSize int64 -} - -// Combine combines the results of two IteratorCost structures into one. -func (c IteratorCost) Combine(other IteratorCost) IteratorCost { - return IteratorCost{ - NumShards: c.NumShards + other.NumShards, - NumSeries: c.NumSeries + other.NumSeries, - CachedValues: c.CachedValues + other.CachedValues, - NumFiles: c.NumFiles + other.NumFiles, - BlocksRead: c.BlocksRead + other.BlocksRead, - BlockSize: c.BlockSize + other.BlockSize, - } -} - -// FloatIterator represents a stream of float points. -type FloatIterator interface { - Iterator - Next() (*FloatPoint, error) -} - -// IntegerIterator represents a stream of integer points. -type IntegerIterator interface { - Iterator - Next() (*IntegerPoint, error) -} - -// IntegerPoint represents a point with a int64 value. -// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. -// See TestPoint_Fields in influxql/point_test.go for more details. -type IntegerPoint struct { - Name string - Tags Tags - - Time int64 - Value int64 - Aux []interface{} - - // Total number of points that were combined into this point from an aggregate. - // If this is zero, the point is not the result of an aggregate function. - Aggregated uint32 - Nil bool -} - -// UnsignedIterator represents a stream of unsigned points. -type UnsignedIterator interface { - Iterator - Next() (*UnsignedPoint, error) -} - -// UnsignedPoint represents a point with a uint64 value. -// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. -// See TestPoint_Fields in influxql/point_test.go for more details. -type UnsignedPoint struct { - Name string - Tags Tags - - Time int64 - Value uint64 - Aux []interface{} - - // Total number of points that were combined into this point from an aggregate. - // If this is zero, the point is not the result of an aggregate function. - Aggregated uint32 - Nil bool -} - -// StringIterator represents a stream of string points. -type StringIterator interface { - Iterator - Next() (*StringPoint, error) -} - -// StringPoint represents a point with a string value. -// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. -// See TestPoint_Fields in influxql/point_test.go for more details. -type StringPoint struct { - Name string - Tags Tags - - Time int64 - Value string - Aux []interface{} - - // Total number of points that were combined into this point from an aggregate. - // If this is zero, the point is not the result of an aggregate function. - Aggregated uint32 - Nil bool -} - -// BooleanIterator represents a stream of boolean points. -type BooleanIterator interface { - Iterator - Next() (*BooleanPoint, error) -} - -// BooleanPoint represents a point with a bool value. -// DO NOT ADD ADDITIONAL FIELDS TO THIS STRUCT. -// See TestPoint_Fields in influxql/point_test.go for more details. -type BooleanPoint struct { - Name string - Tags Tags - - Time int64 - Value bool - Aux []interface{} - - // Total number of points that were combined into this point from an aggregate. - // If this is zero, the point is not the result of an aggregate function. - Aggregated uint32 - Nil bool -} - -type MathValuer struct{} - -var _ influxql.CallValuer = MathValuer{} - -func (MathValuer) Value(key string) (interface{}, bool) { - return nil, false -} - -func (v MathValuer) Call(name string, args []interface{}) (interface{}, bool) { - if len(args) == 1 { - arg0 := args[0] - switch name { - case "abs": - switch arg0 := arg0.(type) { - case float64: - return math.Abs(arg0), true - case int64, uint64: - return arg0, true - default: - return nil, true - } - case "sin": - if arg0, ok := asFloat(arg0); ok { - return math.Sin(arg0), true - } - return nil, true - case "cos": - if arg0, ok := asFloat(arg0); ok { - return math.Cos(arg0), true - } - return nil, true - case "tan": - if arg0, ok := asFloat(arg0); ok { - return math.Tan(arg0), true - } - return nil, true - case "floor": - switch arg0 := arg0.(type) { - case float64: - return math.Floor(arg0), true - case int64, uint64: - return arg0, true - default: - return nil, true - } - case "ceil": - switch arg0 := arg0.(type) { - case float64: - return math.Ceil(arg0), true - case int64, uint64: - return arg0, true - default: - return nil, true - } - case "round": - switch arg0 := arg0.(type) { - case float64: - return round(arg0), true - case int64, uint64: - return arg0, true - default: - return nil, true - } - case "asin": - if arg0, ok := asFloat(arg0); ok { - return math.Asin(arg0), true - } - return nil, true - case "acos": - if arg0, ok := asFloat(arg0); ok { - return math.Acos(arg0), true - } - return nil, true - case "atan": - if arg0, ok := asFloat(arg0); ok { - return math.Atan(arg0), true - } - return nil, true - case "exp": - if arg0, ok := asFloat(arg0); ok { - return math.Exp(arg0), true - } - return nil, true - case "ln": - if arg0, ok := asFloat(arg0); ok { - return math.Log(arg0), true - } - return nil, true - case "log2": - if arg0, ok := asFloat(arg0); ok { - return math.Log2(arg0), true - } - return nil, true - case "log10": - if arg0, ok := asFloat(arg0); ok { - return math.Log10(arg0), true - } - return nil, true - case "sqrt": - if arg0, ok := asFloat(arg0); ok { - return math.Sqrt(arg0), true - } - return nil, true - } - } else if len(args) == 2 { - arg0, arg1 := args[0], args[1] - switch name { - case "atan2": - if arg0, arg1, ok := asFloats(arg0, arg1); ok { - return math.Atan2(arg0, arg1), true - } - return nil, true - case "log": - if arg0, arg1, ok := asFloats(arg0, arg1); ok { - return math.Log(arg0) / math.Log(arg1), true - } - return nil, true - case "pow": - if arg0, arg1, ok := asFloats(arg0, arg1); ok { - return math.Pow(arg0, arg1), true - } - return nil, true - } - } - return nil, false -} - -func asFloat(x interface{}) (float64, bool) { - switch arg0 := x.(type) { - case float64: - return arg0, true - case int64: - return float64(arg0), true - case uint64: - return float64(arg0), true - default: - return 0, false - } -} - -func asFloats(x, y interface{}) (float64, float64, bool) { - arg0, ok := asFloat(x) - if !ok { - return 0, 0, false - } - arg1, ok := asFloat(y) - if !ok { - return 0, 0, false - } - return arg0, arg1, true -} - -func round(x float64) float64 { - t := math.Trunc(x) - if math.Abs(x-t) >= 0.5 { - return t + math.Copysign(1, x) - } - return t -} - -// OpenAuthorizer is the Authorizer used when authorization is disabled. -// It allows all operations. -type openAuthorizer struct{} - -// OpenAuthorizer can be shared by all goroutines. -var OpenAuthorizer = openAuthorizer{} - -// AuthorizeDatabase returns true to allow any operation on a database. -func (a openAuthorizer) AuthorizeDatabase(influxql.Privilege, string) bool { return true } - -// AuthorizeSeriesRead allows access to any series. -func (a openAuthorizer) AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool { - return true -} - -// AuthorizeSeriesWrite allows access to any series. -func (a openAuthorizer) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool { - return true -} - -// AuthorizeSeriesRead allows any query to execute. -func (a openAuthorizer) AuthorizeQuery(_ string, _ *influxql.Query) error { return nil } diff --git a/query/request.go b/query/request.go deleted file mode 100644 index d9eb8beecfd..00000000000 --- a/query/request.go +++ /dev/null @@ -1,204 +0,0 @@ -package query - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - - "github.com/influxdata/flux" - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -const ( - PreferHeaderKey = "Prefer" - PreferNoContentHeaderValue = "return-no-content" - PreferNoContentWErrHeaderValue = "return-no-content-with-error" -) - -// Request represents the query to run. -// Options to mutate the header associated to this Request can be specified -// via `WithOption` or associated methods. -// One should always `Request.ApplyOptions()` before encoding and sending the request. -type Request struct { - // Scope - Authorization *platform.Authorization `json:"authorization,omitempty"` - OrganizationID platform2.ID `json:"organization_id"` - - // Command - - // Compiler converts the query to a specification to run against the data. - Compiler flux.Compiler `json:"compiler"` - - // Source represents the ultimate source of the request. - Source string `json:"source"` - - // compilerMappings maps compiler types to creation methods - compilerMappings flux.CompilerMappings - - options []RequestHeaderOption -} - -// SetReturnNoContent sets the header for a Request to return no content. -func SetReturnNoContent(header http.Header, withError bool) { - if withError { - header.Set(PreferHeaderKey, PreferNoContentWErrHeaderValue) - } else { - header.Set(PreferHeaderKey, PreferNoContentHeaderValue) - } -} - -// RequestHeaderOption is a function that mutates the header associated to a Request. -type RequestHeaderOption = func(header http.Header) error - -// WithOption adds a RequestHeaderOption to this Request. -func (r *Request) WithOption(option RequestHeaderOption) { - r.options = append(r.options, option) -} - -// WithReturnNoContent makes this Request return no content. -func (r *Request) WithReturnNoContent(withError bool) { - r.WithOption(func(header http.Header) error { - SetReturnNoContent(header, withError) - return nil - }) -} - -// ApplyOptions applies every option added to this Request to the given header. -func (r *Request) ApplyOptions(header http.Header) error { - for _, visitor := range r.options { - if err := visitor(header); err != nil { - return err - } - } - return nil -} - -// WithCompilerMappings sets the query type mappings on the request. -func (r *Request) WithCompilerMappings(mappings flux.CompilerMappings) { - r.compilerMappings = mappings -} - -// UnmarshalJSON populates the request from the JSON data. -// WithCompilerMappings must have been called or an error will occur. -func (r *Request) UnmarshalJSON(data []byte) error { - type Alias Request - raw := struct { - *Alias - CompilerType flux.CompilerType `json:"compiler_type"` - Compiler json.RawMessage `json:"compiler"` - }{ - Alias: (*Alias)(r), - } - if err := json.Unmarshal(data, &raw); err != nil { - return err - } - - createCompiler, ok := r.compilerMappings[raw.CompilerType] - if !ok { - return fmt.Errorf("unsupported compiler type %q", raw.CompilerType) - } - - c := createCompiler() - if err := json.Unmarshal(raw.Compiler, c); err != nil { - return err - } - r.Compiler = c - - return nil -} - -func (r Request) MarshalJSON() ([]byte, error) { - type Alias Request - raw := struct { - Alias - CompilerType flux.CompilerType `json:"compiler_type"` - }{ - Alias: (Alias)(r), - CompilerType: r.Compiler.CompilerType(), - } - return json.Marshal(raw) -} - -type contextKey struct{} - -var activeContextKey = contextKey{} - -// ContextWithRequest returns a new context with a reference to the request. -func ContextWithRequest(ctx context.Context, req *Request) context.Context { - return context.WithValue(ctx, activeContextKey, req) -} - -// RequestFromContext retrieves a *Request from a context. -// If not request exists on the context nil is returned. -func RequestFromContext(ctx context.Context) *Request { - v := ctx.Value(activeContextKey) - if v == nil { - return nil - } - return v.(*Request) -} - -// ProxyRequest specifies a query request and the dialect for the results. -type ProxyRequest struct { - // Request is the basic query request - Request Request `json:"request"` - - // Dialect is the result encoder - Dialect flux.Dialect `json:"dialect"` - - // dialectMappings maps dialect types to creation methods - dialectMappings flux.DialectMappings -} - -// WithCompilerMappings sets the compiler type mappings on the request. -func (r *ProxyRequest) WithCompilerMappings(mappings flux.CompilerMappings) { - r.Request.WithCompilerMappings(mappings) -} - -// WithDialectMappings sets the dialect type mappings on the request. -func (r *ProxyRequest) WithDialectMappings(mappings flux.DialectMappings) { - r.dialectMappings = mappings -} - -// UnmarshalJSON populates the request from the JSON data. -// WithCompilerMappings and WithDialectMappings must have been called or an error will occur. -func (r *ProxyRequest) UnmarshalJSON(data []byte) error { - type Alias ProxyRequest - raw := struct { - *Alias - DialectType flux.DialectType `json:"dialect_type"` - Dialect json.RawMessage `json:"dialect"` - }{ - Alias: (*Alias)(r), - } - if err := json.Unmarshal(data, &raw); err != nil { - return err - } - - createDialect, ok := r.dialectMappings[raw.DialectType] - if !ok { - return fmt.Errorf("unsupported dialect type %q", raw.DialectType) - } - - d := createDialect() - if err := json.Unmarshal(raw.Dialect, d); err != nil { - return err - } - r.Dialect = d - - return nil -} - -func (r ProxyRequest) MarshalJSON() ([]byte, error) { - type Alias ProxyRequest - raw := struct { - Alias - DialectType flux.DialectType `json:"dialect_type"` - }{ - Alias: (Alias)(r), - DialectType: r.Dialect.DialectType(), - } - return json.Marshal(raw) -} diff --git a/query/service.go b/query/service.go deleted file mode 100644 index a1c82606a81..00000000000 --- a/query/service.go +++ /dev/null @@ -1,69 +0,0 @@ -package query - -import ( - "context" - "io" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/interpreter" - "github.com/influxdata/flux/values" - "github.com/influxdata/influxdb/v2/kit/check" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/query/fluxlang" -) - -// QueryService represents a type capable of performing queries. -type QueryService interface { - check.Checker - - // Query submits a query for execution returning a results iterator. - // Cancel must be called on any returned results to free resources. - Query(ctx context.Context, req *Request) (flux.ResultIterator, error) -} - -// AsyncQueryService represents a service for performing queries where the results are delivered asynchronously. -type AsyncQueryService interface { - // Query submits a query for execution returning immediately. - // Done must be called on any returned Query objects. - Query(ctx context.Context, req *Request) (flux.Query, error) -} - -// ProxyQueryService performs queries and encodes the result into a writer. -// The results are opaque to a ProxyQueryService. -type ProxyQueryService interface { - check.Checker - - // Query performs the requested query and encodes the results into w. - // The number of bytes written to w is returned __independent__ of any error. - Query(ctx context.Context, w io.Writer, req *ProxyRequest) (flux.Statistics, error) -} - -// Parse will take flux source code and produce a package. -// If there are errors when parsing, the first error is returned. -// An ast.Package may be returned when a parsing error occurs, -// but it may be null if parsing didn't even occur. -// -// This will return an error if the FluxLanguageService is nil. -func Parse(lang fluxlang.FluxLanguageService, source string) (*ast.Package, error) { - if lang == nil { - return nil, &errors.Error{ - Code: errors.EInternal, - Msg: "flux is not configured; cannot parse", - } - } - return lang.Parse(source) -} - -// EvalAST will evaluate and run an AST. -// -// This will return an error if the FluxLanguageService is nil. -func EvalAST(ctx context.Context, lang fluxlang.FluxLanguageService, astPkg *ast.Package) ([]interpreter.SideEffect, values.Scope, error) { - if lang == nil { - return nil, nil, &errors.Error{ - Code: errors.EInternal, - Msg: "flux is not configured; cannot evaluate", - } - } - return lang.EvalAST(ctx, astPkg) -} diff --git a/query/service_test.go b/query/service_test.go deleted file mode 100644 index ee7470ad07e..00000000000 --- a/query/service_test.go +++ /dev/null @@ -1,132 +0,0 @@ -package query_test - -import ( - "context" - "encoding/json" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/flux" - "github.com/influxdata/influxdb/v2/query" - platformtesting "github.com/influxdata/influxdb/v2/testing" -) - -var CmpOpts = []cmp.Option{ - cmpopts.IgnoreUnexported(query.ProxyRequest{}), - cmpopts.IgnoreUnexported(query.Request{}), -} - -type compilerA struct { - A string `json:"a"` -} - -func (c compilerA) Compile(ctx context.Context, runtime flux.Runtime) (flux.Program, error) { - panic("not implemented") -} - -func (c compilerA) CompilerType() flux.CompilerType { - return "compilerA" -} - -var compilerMappings = flux.CompilerMappings{ - "compilerA": func() flux.Compiler { return new(compilerA) }, -} - -type dialectB struct { - B int `json:"b"` -} - -func (d dialectB) Encoder() flux.MultiResultEncoder { - panic("not implemented") -} - -func (d dialectB) DialectType() flux.DialectType { - return "dialectB" -} - -var dialectMappings = flux.DialectMappings{ - "dialectB": func() flux.Dialect { return new(dialectB) }, -} - -func TestRequest_JSON(t *testing.T) { - testCases := []struct { - name string - data string - want query.Request - }{ - { - name: "simple", - data: `{"organization_id":"aaaaaaaaaaaaaaaa","compiler":{"a":"my custom compiler"},"source":"source","compiler_type":"compilerA"}`, - want: query.Request{ - OrganizationID: platformtesting.MustIDBase16("aaaaaaaaaaaaaaaa"), - Compiler: &compilerA{ - A: "my custom compiler", - }, - Source: "source", - }, - }, - } - for _, tc := range testCases { - var r query.Request - r.WithCompilerMappings(compilerMappings) - - if err := json.Unmarshal([]byte(tc.data), &r); err != nil { - t.Fatal(err) - } - if !cmp.Equal(tc.want, r, CmpOpts...) { - t.Fatalf("unexpected request: -want/+got:\n%s", cmp.Diff(tc.want, r, CmpOpts...)) - } - marshalled, err := json.Marshal(r) - if err != nil { - t.Fatal(err) - } - if got, want := string(marshalled), tc.data; got != want { - t.Fatalf("unexpected marshalled request: -want/+got:\n%s", cmp.Diff(want, got)) - } - } -} - -func TestProxyRequest_JSON(t *testing.T) { - testCases := []struct { - name string - data string - want query.ProxyRequest - }{ - { - name: "simple", - data: `{"request":{"organization_id":"aaaaaaaaaaaaaaaa","compiler":{"a":"my custom compiler"},"source":"source","compiler_type":"compilerA"},"dialect":{"b":42},"dialect_type":"dialectB"}`, - want: query.ProxyRequest{ - Request: query.Request{ - OrganizationID: platformtesting.MustIDBase16("aaaaaaaaaaaaaaaa"), - Compiler: &compilerA{ - A: "my custom compiler", - }, - Source: "source", - }, - Dialect: &dialectB{ - B: 42, - }, - }, - }, - } - for _, tc := range testCases { - var pr query.ProxyRequest - pr.WithCompilerMappings(compilerMappings) - pr.WithDialectMappings(dialectMappings) - - if err := json.Unmarshal([]byte(tc.data), &pr); err != nil { - t.Fatal(err) - } - if !cmp.Equal(tc.want, pr, CmpOpts...) { - t.Fatalf("unexpected proxy request: -want/+got:\n%s", cmp.Diff(tc.want, pr, CmpOpts...)) - } - marshalled, err := json.Marshal(pr) - if err != nil { - t.Fatal(err) - } - if got, want := string(marshalled), tc.data; got != want { - t.Fatalf("unexpected marshalled proxy request: -want/+got:\n%s", cmp.Diff(want, got)) - } - } -} diff --git a/query/stdlib/fluxtest.root b/query/stdlib/fluxtest.root deleted file mode 100644 index 30775bb658f..00000000000 --- a/query/stdlib/fluxtest.root +++ /dev/null @@ -1 +0,0 @@ -influxdb diff --git a/query/stdlib/influxdata/influxdb/buckets.go b/query/stdlib/influxdata/influxdb/buckets.go deleted file mode 100644 index f1030146440..00000000000 --- a/query/stdlib/influxdata/influxdb/buckets.go +++ /dev/null @@ -1,170 +0,0 @@ -package influxdb - -import ( - "context" - "fmt" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/codes" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/memory" - "github.com/influxdata/flux/plan" - "github.com/influxdata/flux/stdlib/influxdata/influxdb" - "github.com/influxdata/flux/values" - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/query" -) - -const BucketsKind = "influxdata/influxdb.localBuckets" - -func init() { - execute.RegisterSource(BucketsKind, createBucketsSource) - plan.RegisterPhysicalRules(LocalBucketsRule{}) -} - -type LocalBucketsProcedureSpec struct { - plan.DefaultCost -} - -func (s *LocalBucketsProcedureSpec) Kind() plan.ProcedureKind { - return BucketsKind -} - -func (s *LocalBucketsProcedureSpec) Copy() plan.ProcedureSpec { - return new(LocalBucketsProcedureSpec) -} - -type BucketsDecoder struct { - orgID platform2.ID - deps BucketDependencies - buckets []*platform.Bucket - alloc memory.Allocator -} - -func (bd *BucketsDecoder) Connect(ctx context.Context) error { - return nil -} - -func (bd *BucketsDecoder) Fetch(ctx context.Context) (bool, error) { - b, count := bd.deps.FindAllBuckets(ctx, bd.orgID) - if count <= 0 { - return false, &flux.Error{ - Code: codes.NotFound, - Msg: fmt.Sprintf("no buckets found in organization %v", bd.orgID), - } - } - bd.buckets = b - return false, nil -} - -func (bd *BucketsDecoder) Decode(ctx context.Context) (flux.Table, error) { - kb := execute.NewGroupKeyBuilder(nil) - kb.AddKeyValue("organizationID", values.NewString(bd.buckets[0].OrgID.String())) - gk, err := kb.Build() - if err != nil { - return nil, err - } - - b := execute.NewColListTableBuilder(gk, bd.alloc) - - if _, err := b.AddCol(flux.ColMeta{ - Label: "name", - Type: flux.TString, - }); err != nil { - return nil, err - } - if _, err := b.AddCol(flux.ColMeta{ - Label: "id", - Type: flux.TString, - }); err != nil { - return nil, err - } - if _, err := b.AddCol(flux.ColMeta{ - Label: "organizationID", - Type: flux.TString, - }); err != nil { - return nil, err - } - if _, err := b.AddCol(flux.ColMeta{ - Label: "retentionPolicy", - Type: flux.TString, - }); err != nil { - return nil, err - } - if _, err := b.AddCol(flux.ColMeta{ - Label: "retentionPeriod", - Type: flux.TInt, - }); err != nil { - return nil, err - } - - for _, bucket := range bd.buckets { - _ = b.AppendString(0, bucket.Name) - _ = b.AppendString(1, bucket.ID.String()) - _ = b.AppendString(2, bucket.OrgID.String()) - _ = b.AppendString(3, bucket.RetentionPolicyName) - _ = b.AppendInt(4, bucket.RetentionPeriod.Nanoseconds()) - } - - return b.Table() -} - -func (bd *BucketsDecoder) Close() error { - return nil -} - -func createBucketsSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a execute.Administration) (execute.Source, error) { - _, ok := prSpec.(*LocalBucketsProcedureSpec) - if !ok { - return nil, &flux.Error{ - Code: codes.Internal, - Msg: fmt.Sprintf("invalid spec type %T", prSpec), - } - } - - // the dependencies used for FromKind are adequate for what we need here - // so there's no need to inject custom dependencies for buckets() - deps := GetStorageDependencies(a.Context()).BucketDeps - req := query.RequestFromContext(a.Context()) - if req == nil { - return nil, &flux.Error{ - Code: codes.Internal, - Msg: "missing request on context", - } - } - orgID := req.OrganizationID - - bd := &BucketsDecoder{orgID: orgID, deps: deps, alloc: a.Allocator()} - - return execute.CreateSourceFromDecoder(bd, dsid, a) -} - -type AllBucketLookup interface { - FindAllBuckets(ctx context.Context, orgID platform2.ID) ([]*platform.Bucket, int) -} -type BucketDependencies AllBucketLookup - -type LocalBucketsRule struct{} - -func (rule LocalBucketsRule) Name() string { - return "influxdata/influxdb.LocalBucketsRule" -} - -func (rule LocalBucketsRule) Pattern() plan.Pattern { - return plan.MultiSuccessor(influxdb.BucketsKind) -} - -func (rule LocalBucketsRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) { - fromSpec := node.ProcedureSpec().(*influxdb.BucketsProcedureSpec) - if fromSpec.Host != nil { - return node, false, nil - } else if fromSpec.Org != nil { - return node, false, &flux.Error{ - Code: codes.Unimplemented, - Msg: "buckets cannot list from a separate organization; please specify a host or remove the organization", - } - } - - return plan.CreateLogicalNode("localBuckets", &LocalBucketsProcedureSpec{}), true, nil -} diff --git a/query/stdlib/influxdata/influxdb/dependencies.go b/query/stdlib/influxdata/influxdb/dependencies.go deleted file mode 100644 index 473c046c368..00000000000 --- a/query/stdlib/influxdata/influxdb/dependencies.go +++ /dev/null @@ -1,154 +0,0 @@ -package influxdb - -import ( - "context" - - "github.com/influxdata/flux" - fluxfeature "github.com/influxdata/flux/dependencies/feature" - "github.com/influxdata/flux/dependencies/http" - influxdeps "github.com/influxdata/flux/dependencies/influxdb" - "github.com/influxdata/flux/dependencies/url" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/feature" - "github.com/influxdata/influxdb/v2/kit/prom" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/storage" - "github.com/prometheus/client_golang/prometheus" -) - -type key int - -const dependenciesKey key = iota - -type StorageDependencies struct { - FromDeps FromDependencies - BucketDeps BucketDependencies - ToDeps ToDependencies -} - -func (d StorageDependencies) Inject(ctx context.Context) context.Context { - ctx = influxdeps.Dependency{ - Provider: Provider{ - Reader: d.FromDeps.Reader, - BucketLookup: d.FromDeps.BucketLookup, - }, - }.Inject(ctx) - return context.WithValue(ctx, dependenciesKey, d) -} - -func GetStorageDependencies(ctx context.Context) StorageDependencies { - if ctx.Value(dependenciesKey) == nil { - return StorageDependencies{} - } - return ctx.Value(dependenciesKey).(StorageDependencies) -} - -// PrometheusCollectors satisfies the prom.PrometheusCollector interface. -func (d StorageDependencies) PrometheusCollectors() []prometheus.Collector { - depS := []interface{}{ - d.FromDeps, - d.BucketDeps, - d.ToDeps, - } - collectors := make([]prometheus.Collector, 0, len(depS)) - for _, v := range depS { - if pc, ok := v.(prom.PrometheusCollector); ok { - collectors = append(collectors, pc.PrometheusCollectors()...) - } - } - return collectors -} - -type Dependencies struct { - StorageDeps StorageDependencies - FluxDeps flux.Dependencies -} - -func (d Dependencies) Inject(ctx context.Context) context.Context { - ctx = d.FluxDeps.Inject(ctx) - ctx = d.StorageDeps.Inject(ctx) - return InjectFlagsFromContext(ctx) -} - -// PrometheusCollectors satisfies the prom.PrometheusCollector interface. -func (d Dependencies) PrometheusCollectors() []prometheus.Collector { - collectors := d.StorageDeps.PrometheusCollectors() - if pc, ok := d.FluxDeps.(prom.PrometheusCollector); ok { - collectors = append(collectors, pc.PrometheusCollectors()...) - } - return collectors -} - -type FluxDepOption func(*flux.Deps) - -func WithURLValidator(v url.Validator) FluxDepOption { - return func(d *flux.Deps) { - d.Deps.URLValidator = v - d.Deps.HTTPClient = http.NewDefaultClient(d.Deps.URLValidator) - } -} - -func NewDependencies( - reader query.StorageReader, - writer storage.PointsWriter, - bucketSvc influxdb.BucketService, - orgSvc influxdb.OrganizationService, - ss influxdb.SecretService, - metricLabelKeys []string, - fluxopts ...FluxDepOption, -) (Dependencies, error) { - fdeps := flux.NewDefaultDependencies() - fdeps.Deps.HTTPClient = http.NewDefaultClient(url.PassValidator{}) - fdeps.Deps.SecretService = query.FromSecretService(ss) - // apply fluxopts before assigning fdeps to deps (ie, before casting) - for _, opt := range fluxopts { - opt(&fdeps) - } - - deps := Dependencies{FluxDeps: fdeps} - bucketLookupSvc := query.FromBucketService(bucketSvc) - orgLookupSvc := query.FromOrganizationService(orgSvc) - metrics := NewMetrics(metricLabelKeys) - deps.StorageDeps.FromDeps = FromDependencies{ - Reader: reader, - BucketLookup: bucketLookupSvc, - OrganizationLookup: orgLookupSvc, - Metrics: metrics, - } - if err := deps.StorageDeps.FromDeps.Validate(); err != nil { - return Dependencies{}, err - } - deps.StorageDeps.BucketDeps = bucketLookupSvc - deps.StorageDeps.ToDeps = ToDependencies{ - BucketLookup: bucketLookupSvc, - OrganizationLookup: orgLookupSvc, - PointsWriter: writer, - } - if err := deps.StorageDeps.ToDeps.Validate(); err != nil { - return Dependencies{}, err - } - return deps, nil -} - -type flags map[string]interface{} - -// InjectFlagsFromContext will take the idpe feature flags from -// the context and wrap them in a flux feature flagger for the -// flux engine. -func InjectFlagsFromContext(ctx context.Context) context.Context { - flagger := flags(feature.FlagsFromContext(ctx)) - return fluxfeature.Inject(ctx, flagger) -} - -func (f flags) FlagValue(ctx context.Context, flag fluxfeature.Flag) interface{} { - v, ok := f[flag.Key()] - if !ok { - v = flag.Default() - } - - // Flux uses int for intflag and influxdb uses int32 so convert here. - if i, ok := v.(int32); ok { - return int(i) - } - return v -} diff --git a/query/stdlib/influxdata/influxdb/filter_test.flux b/query/stdlib/influxdata/influxdb/filter_test.flux deleted file mode 100644 index 4c979364933..00000000000 --- a/query/stdlib/influxdata/influxdb/filter_test.flux +++ /dev/null @@ -1,76 +0,0 @@ -package influxdb_test - -import "csv" -import "testing" -import "testing/expect" - -option now = () => (2030-01-01T00:00:00Z) - -input = "#datatype,string,long,dateTime:RFC3339,string,string,string,double -#group,false,false,false,true,true,true,false -#default,_result,,,,,, -,result,table,_time,_measurement,host,_field,_value -,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83 -,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.63 -,,1,2018-05-22T19:53:26Z,system,host.local,load3,1.72 -,,2,2018-05-22T19:53:26Z,system,host.local,load4,1.77 -,,2,2018-05-22T19:53:36Z,system,host.local,load4,1.78 -,,2,2018-05-22T19:53:46Z,system,host.local,load4,1.77 -" - -testcase filter { - expect.planner(rules: [ - "influxdata/influxdb.FromStorageRule": 1, - "PushDownRangeRule": 1, - "PushDownFilterRule": 1, - ]) - - want = csv.from(csv: "#datatype,string,long,dateTime:RFC3339,string,string,string,double -#group,false,false,false,true,true,true,false -#default,_result,,,,,, -,result,table,_time,_measurement,host,_field,_value -,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83 -,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.63 -") - - got = csv.from(csv: input) - |> testing.load() - |> range(start: -100y) - |> filter(fn: (r) => r._measurement == "system" and r._field == "load1") - |> drop(columns: ["_start", "_stop"]) - testing.diff(want, got) -} - - -input_issue_4804 = "#datatype,string,long,dateTime:RFC3339,string,string,string,boolean -#group,false,false,false,true,true,true,false -#default,_result,,,,,, -,result,table,_time,_measurement,host,_field,_value -,,0,2018-05-22T19:53:26Z,system,host.local,load1,true -,,0,2018-05-22T19:53:36Z,system,host.local,load1,false -,,1,2018-05-22T19:53:26Z,system,host.local,load3,false -,,2,2018-05-22T19:53:26Z,system,host.local,load4,true -" - -testcase flux_issue_4804 { - expect.planner(rules: [ - "influxdata/influxdb.FromStorageRule": 1, - "PushDownRangeRule": 1, - "PushDownFilterRule": 1, - ]) - - want = csv.from(csv: "#datatype,string,long,dateTime:RFC3339,string,string,string,boolean -#group,false,false,false,true,true,true,false -#default,_result,,,,,, -,result,table,_time,_measurement,host,_field,_value -,,0,2018-05-22T19:53:26Z,system,host.local,load1,true -,,1,2018-05-22T19:53:26Z,system,host.local,load3,false -") - - got = csv.from(csv: input_issue_4804) - |> testing.load() - |> range(start: -100y) - |> filter(fn: (r) => ((r["_field"] == "load1" and r["_value"] == true) or (r["_field"] == "load3" and r["_value"] == false))) - |> drop(columns: ["_start", "_stop"]) - testing.diff(want, got) -} diff --git a/query/stdlib/influxdata/influxdb/from.go b/query/stdlib/influxdata/influxdb/from.go deleted file mode 100644 index 4e64dc879dc..00000000000 --- a/query/stdlib/influxdata/influxdb/from.go +++ /dev/null @@ -1,55 +0,0 @@ -package influxdb - -import ( - "fmt" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/codes" - "github.com/influxdata/flux/plan" - "github.com/influxdata/flux/stdlib/influxdata/influxdb" -) - -const FromKind = "influxDBFrom" - -type ( - NameOrID = influxdb.NameOrID - FromOpSpec = influxdb.FromOpSpec -) - -type FromStorageProcedureSpec struct { - Bucket influxdb.NameOrID -} - -func (s *FromStorageProcedureSpec) Kind() plan.ProcedureKind { - return FromKind -} - -func (s *FromStorageProcedureSpec) Copy() plan.ProcedureSpec { - ns := new(FromStorageProcedureSpec) - ns.Bucket = s.Bucket - return ns -} - -func (s *FromStorageProcedureSpec) PostPhysicalValidate(id plan.NodeID) error { - // FromStorageProcedureSpec is a logical operation representing any read - // from storage. However as a logical operation, it doesn't specify - // how data is to be read from storage. It is the query planner's - // job to determine the optimal read strategy and to convert this - // logical operation into the appropriate physical operation. - // - // Logical operations cannot be executed by the query engine. So if - // this operation is still around post physical planning, it means - // that a 'range' could not be pushed down to storage. Storage does - // not support unbounded reads, and so this query must not be - // validated. - var bucket string - if s.Bucket.Name != "" { - bucket = s.Bucket.Name - } else { - bucket = s.Bucket.ID - } - return &flux.Error{ - Code: codes.Invalid, - Msg: fmt.Sprintf("cannot submit unbounded read to %q; try bounding 'from' with a call to 'range'", bucket), - } -} diff --git a/query/stdlib/influxdata/influxdb/from_test.go b/query/stdlib/influxdata/influxdb/from_test.go deleted file mode 100644 index da0a31db899..00000000000 --- a/query/stdlib/influxdata/influxdb/from_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package influxdb_test - -import ( - "context" - "testing" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/plan" - "github.com/influxdata/flux/plan/plantest" - "github.com/influxdata/flux/stdlib/influxdata/influxdb" - "github.com/influxdata/flux/stdlib/universe" - qinfluxdb "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb" -) - -func TestFromValidation(t *testing.T) { - spec := plantest.PlanSpec{ - // from |> group (cannot query an infinite time range) - Nodes: []plan.Node{ - plan.CreateLogicalNode("from", &influxdb.FromProcedureSpec{ - Bucket: influxdb.NameOrID{Name: "my-bucket"}, - }), - plan.CreatePhysicalNode("group", &universe.GroupProcedureSpec{ - GroupMode: flux.GroupModeBy, - GroupKeys: []string{"_measurement", "_field"}, - }), - }, - Edges: [][2]int{ - {0, 1}, - }, - } - - ps := plantest.CreatePlanSpec(&spec) - pp := plan.NewPhysicalPlanner(plan.OnlyPhysicalRules( - qinfluxdb.FromStorageRule{}, - qinfluxdb.PushDownRangeRule{}, - qinfluxdb.PushDownFilterRule{}, - qinfluxdb.PushDownGroupRule{}, - )) - _, err := pp.Plan(context.Background(), ps) - if err == nil { - t.Error("Expected query with no call to range to fail physical planning") - } - want := `cannot submit unbounded read to "my-bucket"; try bounding 'from' with a call to 'range'` - got := err.Error() - if want != got { - t.Errorf("unexpected error; -want/+got\n- %s\n+ %s", want, got) - } -} diff --git a/query/stdlib/influxdata/influxdb/geo_mergefilter_test.flux b/query/stdlib/influxdata/influxdb/geo_mergefilter_test.flux deleted file mode 100644 index 7f3b2db70aa..00000000000 --- a/query/stdlib/influxdata/influxdb/geo_mergefilter_test.flux +++ /dev/null @@ -1,34 +0,0 @@ -package universe_test - -import "testing" -import "testing/expect" -import "planner" -import "csv" -import "experimental/geo" - -option now = () => 2030-01-01T00:00:00Z - -testcase geo_merge_filter { - input = " -#group,false,false,false,true,false,false,false -#datatype,string,long,dateTime:RFC3339,string,string,double,double -#default,_result,,,,,, -,result,table,_time,_measurement,id,lat,lon -,,0,2021-05-02T11:37:40Z,the_measurement,us7000dzhg,-30.133,-71.5399 -" - want = csv.from( - csv: " -#group,false,false,true,true,false,true,false,false,false -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,string,string,double,double -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_measurement,id,lat,lon -,,0,1930-01-01T00:00:00Z,2030-01-01T00:00:00Z,2021-05-02T11:37:40Z,the_measurement,us7000dzhg,-30.133,-71.5399 -", - ) - result = csv.from(csv: input) - |> range(start: -100y) - |> filter(fn: (r) => r["_measurement"] == "the_measurement") - |> geo.strictFilter(region: { lat: -30.000, lon: -71.0000, radius: 100.0 }) - - testing.diff(want: want, got: result) -} diff --git a/query/stdlib/influxdata/influxdb/group_agg_influxdb_test.flux b/query/stdlib/influxdata/influxdb/group_agg_influxdb_test.flux deleted file mode 100644 index 6025959783c..00000000000 --- a/query/stdlib/influxdata/influxdb/group_agg_influxdb_test.flux +++ /dev/null @@ -1,46 +0,0 @@ -package influxdb_test - - -import "testing/expect" - -testcase push_down_group_one_tag_count extends "flux/planner/group_agg_test.group_one_tag_count" { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - super() -} - -testcase push_down_group_all_filter_field_count extends "flux/planner/group_agg_test.group_all_filter_field_count" { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - super() -} - -testcase push_down_group_one_tag_filter_field_count extends "flux/planner/group_agg_test.group_one_tag_filter_field_count" -{ - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - super() - } - -testcase push_down_group_two_tag_filter_field_count extends "flux/planner/group_agg_test.group_two_tag_filter_field_count" -{ - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - super() - } - -testcase push_down_group_one_tag_sum extends "flux/planner/group_agg_test.group_one_tag_sum" { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - super() -} - -testcase push_down_group_all_filter_field_sum extends "flux/planner/group_agg_test.group_all_filter_field_sum" { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - super() -} - -testcase push_down_group_one_tag_filter_field_sum extends "flux/planner/group_agg_test.group_one_tag_filter_field_sum" { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - super() -} - -testcase push_down_group_two_tag_filter_field_sum extends "flux/planner/group_agg_test.group_two_tag_filter_field_sum" { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - super() -} diff --git a/query/stdlib/influxdata/influxdb/group_first_last_influxdb_test.flux b/query/stdlib/influxdata/influxdb/group_first_last_influxdb_test.flux deleted file mode 100644 index b09f1c95acb..00000000000 --- a/query/stdlib/influxdata/influxdb/group_first_last_influxdb_test.flux +++ /dev/null @@ -1,447 +0,0 @@ -package influxdb_test - - -import "array" -import "testing" -import "testing/expect" - -// N.b. `inData` is what this is named in the testcases using extension. -// Apparently we can't shadow the name here when extending, else we get an error -// about reassignment. -input = - array.from( - rows: [ - { - _field: "f0", - _measurement: "m0", - t0: "t0v0", - t1: "t1v0", - _time: 2021-07-06T23:06:30Z, - _value: 3, - }, - { - _field: "f0", - _measurement: "m0", - t0: "t0v0", - t1: "t1v0", - _time: 2021-07-06T23:06:40Z, - _value: 1, - }, - { - _field: "f0", - _measurement: "m0", - t0: "t0v0", - t1: "t1v0", - _time: 2021-07-06T23:06:50Z, - _value: 0, - }, - { - _field: "f0", - _measurement: "m0", - t0: "t0v0", - t1: "t1v1", - _time: 2021-07-06T23:06:30Z, - _value: 4, - }, - { - _field: "f0", - _measurement: "m0", - t0: "t0v0", - t1: "t1v1", - _time: 2021-07-06T23:06:40Z, - _value: 3, - }, - { - _field: "f0", - _measurement: "m0", - t0: "t0v0", - t1: "t1v1", - _time: 2021-07-06T23:06:50Z, - _value: 1, - }, - { - _field: "f0", - _measurement: "m0", - t0: "t0v1", - t1: "t1v0", - _time: 2021-07-06T23:06:30Z, - _value: 1, - }, - { - _field: "f0", - _measurement: "m0", - t0: "t0v1", - t1: "t1v0", - _time: 2021-07-06T23:06:40Z, - _value: 0, - }, - { - _field: "f0", - _measurement: "m0", - t0: "t0v1", - t1: "t1v0", - _time: 2021-07-06T23:06:50Z, - _value: 4, - }, - { - _field: "f0", - _measurement: "m0", - t0: "t0v1", - t1: "t1v1", - _time: 2021-07-06T23:06:30Z, - _value: 4, - }, - { - _field: "f0", - _measurement: "m0", - t0: "t0v1", - t1: "t1v1", - _time: 2021-07-06T23:06:40Z, - _value: 0, - }, - { - _field: "f0", - _measurement: "m0", - t0: "t0v1", - t1: "t1v1", - _time: 2021-07-06T23:06:50Z, - _value: 4, - }, - { - _field: "f1", - _measurement: "m0", - t0: "t0v0", - t1: "t1v0", - _time: 2021-07-06T23:06:30Z, - _value: 0, - }, - { - _field: "f1", - _measurement: "m0", - t0: "t0v0", - t1: "t1v0", - _time: 2021-07-06T23:06:40Z, - _value: 0, - }, - { - _field: "f1", - _measurement: "m0", - t0: "t0v0", - t1: "t1v0", - _time: 2021-07-06T23:06:50Z, - _value: 0, - }, - { - _field: "f1", - _measurement: "m0", - t0: "t0v0", - t1: "t1v1", - _time: 2021-07-06T23:06:30Z, - _value: 0, - }, - { - _field: "f1", - _measurement: "m0", - t0: "t0v0", - t1: "t1v1", - _time: 2021-07-06T23:06:40Z, - _value: 4, - }, - { - _field: "f1", - _measurement: "m0", - t0: "t0v0", - t1: "t1v1", - _time: 2021-07-06T23:06:50Z, - _value: 3, - }, - { - _field: "f1", - _measurement: "m0", - t0: "t0v1", - t1: "t1v0", - _time: 2021-07-06T23:06:30Z, - _value: 3, - }, - { - _field: "f1", - _measurement: "m0", - t0: "t0v1", - t1: "t1v0", - _time: 2021-07-06T23:06:40Z, - _value: 2, - }, - { - _field: "f1", - _measurement: "m0", - t0: "t0v1", - t1: "t1v0", - _time: 2021-07-06T23:06:50Z, - _value: 1, - }, - { - _field: "f1", - _measurement: "m0", - t0: "t0v1", - t1: "t1v1", - _time: 2021-07-06T23:06:30Z, - _value: 1, - }, - { - _field: "f1", - _measurement: "m0", - t0: "t0v1", - t1: "t1v1", - _time: 2021-07-06T23:06:40Z, - _value: 0, - }, - { - _field: "f1", - _measurement: "m0", - t0: "t0v1", - t1: "t1v1", - _time: 2021-07-06T23:06:50Z, - _value: 2, - }, - ], - ) - |> group(columns: ["_measurement", "_field", "t0", "t1"]) - -// FIXME: want to extend `flux/planner/group_first_last_test.group_one_tag_first` but can't -// A sort was added to allow the base case to pass in cloud, but it breaks the pushdown here. -// For now, the body of the testcase is included here, in full, minus the sort. -// Ref: https://github.com/influxdata/influxdb/issues/23757 -testcase push_down_group_one_tag_first { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - - want = - array.from( - rows: [ - { - _measurement: "m0", - _field: "f0", - "t0": "t0v0", - "t1": "t1v0", - "_value": 3, - _time: 2021-07-06T23:06:30Z, - }, - { - _measurement: "m0", - _field: "f0", - "t0": "t0v1", - "t1": "t1v0", - "_value": 1, - _time: 2021-07-06T23:06:30Z, - }, - ], - ) - |> group(columns: ["t0"]) - got = - testing.load(tables: input) - |> range(start: -100y) - |> group(columns: ["t0"]) - |> first() - |> drop(columns: ["_start", "_stop"]) - - testing.diff(got, want) |> yield() -} - -// FIXME: want to extend `flux/planner/group_first_last_test.group_all_filter_field_first` but can't -// A sort was added to allow the base case to pass in cloud, but it breaks the pushdown here. -// For now, the body of the testcase is included here, in full, minus the sort. -// Ref: https://github.com/influxdata/influxdb/issues/23757 -testcase push_down_group_all_filter_field_first { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - - want = - array.from( - rows: [ - { - _measurement: "m0", - _field: "f0", - "t0": "t0v0", - "t1": "t1v0", - "_value": 3, - _time: 2021-07-06T23:06:30Z, - }, - ], - ) - got = - testing.load(tables: input) - |> range(start: -100y) - |> filter(fn: (r) => r._field == "f0") - |> group() - |> first() - |> drop(columns: ["_start", "_stop"]) - - testing.diff(got, want) |> yield() -} - -// FIXME: want to extend `flux/planner/group_first_last_test.group_one_tag_filter_field_first` but can't -// A sort was added to allow the base case to pass in cloud, but it breaks the pushdown here. -// For now, the body of the testcase is included here, in full, minus the sort. -// Ref: https://github.com/influxdata/influxdb/issues/23757 -testcase push_down_group_one_tag_filter_field_first { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - - want = - array.from( - rows: [ - { - _measurement: "m0", - _field: "f0", - "t0": "t0v0", - "t1": "t1v0", - "_value": 3, - _time: 2021-07-06T23:06:30Z, - }, - { - _measurement: "m0", - _field: "f0", - "t0": "t0v1", - "t1": "t1v0", - "_value": 1, - _time: 2021-07-06T23:06:30Z, - }, - ], - ) - |> group(columns: ["t0"]) - got = - testing.load(tables: input) - |> range(start: -100y) - |> filter(fn: (r) => r._field == "f0") - |> group(columns: ["t0"]) - |> first() - |> drop(columns: ["_start", "_stop"]) - - testing.diff(got, want) |> yield() -} - -testcase -push_down_group_two_tag_filter_field_first -extends -"flux/planner/group_first_last_test.group_two_tag_filter_field_first" -{ - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - super() -} - -// Group + last tests -// FIXME: want to extend `flux/planner/group_first_last_test.group_one_tag_last` but can't -// A sort was added to allow the base case to pass in cloud, but it breaks the pushdown here. -// For now, the body of the testcase is included here, in full, minus the sort. -// Ref: https://github.com/influxdata/influxdb/issues/23757 -testcase push_down_group_one_tag_last { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - - want = - array.from( - rows: [ - { - _measurement: "m0", - _field: "f1", - "t0": "t0v0", - "t1": "t1v1", - _time: 2021-07-06T23:06:50Z, - _value: 3, - }, - { - _measurement: "m0", - _field: "f1", - "t0": "t0v1", - "t1": "t1v1", - _time: 2021-07-06T23:06:50Z, - _value: 2, - }, - ], - ) - |> group(columns: ["t0"]) - got = - testing.load(tables: input) - |> range(start: -100y) - |> group(columns: ["t0"]) - |> last() - |> drop(columns: ["_start", "_stop"]) - - testing.diff(got, want) |> yield() -} - -// FIXME: want to extend `flux/planner/group_first_last_test.group_all_filter_field_last` but can't -// A sort was added to allow the base case to pass in cloud, but it breaks the pushdown here. -// For now, the body of the testcase is included here, in full, minus the sort. -// Ref: https://github.com/influxdata/influxdb/issues/23757 -testcase push_down_group_all_filter_field_last { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - - want = - array.from( - rows: [ - { - _measurement: "m0", - _field: "f0", - "t0": "t0v1", - "t1": "t1v1", - _time: 2021-07-06T23:06:50Z, - _value: 4, - }, - ], - ) - got = - testing.load(tables: input) - |> range(start: -100y) - |> filter(fn: (r) => r._field == "f0") - |> group() - |> last() - |> drop(columns: ["_start", "_stop"]) - - testing.diff(got, want) |> yield() -} - -// FIXME: want to extend `flux/planner/group_first_last_test.group_one_tag_filter_field_last` but can't -// A sort was added to allow the base case to pass in cloud, but it breaks the pushdown here. -// For now, the body of the testcase is included here, in full, minus the sort. -// Ref: https://github.com/influxdata/influxdb/issues/23757 -testcase push_down_group_one_tag_filter_field_last { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - - want = - array.from( - rows: [ - { - _measurement: "m0", - _field: "f0", - "t0": "t0v0", - "t1": "t1v1", - _time: 2021-07-06T23:06:50Z, - _value: 1, - }, - { - _measurement: "m0", - _field: "f0", - "t0": "t0v1", - "t1": "t1v1", - _time: 2021-07-06T23:06:50Z, - _value: 4, - }, - ], - ) - |> group(columns: ["t0"]) - got = - testing.load(tables: input) - |> range(start: -100y) - |> filter(fn: (r) => r._field == "f0") - |> group(columns: ["t0"]) - |> last() - |> drop(columns: ["_start", "_stop"]) - - testing.diff(got, want) |> yield() -} - -testcase -push_down_group_two_tag_filter_field_last -extends -"flux/planner/group_first_last_test.group_two_tag_filter_field_last" -{ - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - super() -} diff --git a/query/stdlib/influxdata/influxdb/metrics.go b/query/stdlib/influxdata/influxdb/metrics.go deleted file mode 100644 index e4ea00fef55..00000000000 --- a/query/stdlib/influxdata/influxdb/metrics.go +++ /dev/null @@ -1,83 +0,0 @@ -package influxdb - -import ( - "context" - "fmt" - "time" - - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/prometheus/client_golang/prometheus" -) - -const ( - orgLabel = "org" - opLabel = "op" -) - -type metrics struct { - ctxLabelKeys []string - requestDur *prometheus.HistogramVec -} - -// NewMetrics produces a new metrics objects for an influxdb source. -// Currently it just collects the duration of read requests into a histogram. -// ctxLabelKeys is a list of labels to add to the produced metrics. -// The value for a given key will be read off the context. -// The context value must be a string or an implementation of the Stringer interface. -// In addition, produced metrics will be labeled with the orgID and type of operation requested. -func NewMetrics(ctxLabelKeys []string) *metrics { - labelKeys := make([]string, len(ctxLabelKeys)+2) - copy(labelKeys, ctxLabelKeys) - labelKeys[len(labelKeys)-2] = orgLabel - labelKeys[len(labelKeys)-1] = opLabel - - m := new(metrics) - m.requestDur = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "query", - Subsystem: "influxdb_source", - Name: "read_request_duration_seconds", - Help: "Histogram of times spent in read requests", - Buckets: prometheus.ExponentialBuckets(1e-3, 5, 7), - }, labelKeys) - m.ctxLabelKeys = ctxLabelKeys - return m -} - -// PrometheusCollectors satisfies the PrometheusCollector interface. -func (m *metrics) PrometheusCollectors() []prometheus.Collector { - if m == nil { - // if metrics happens to be nil here (such as for a test), then let's not panic. - return nil - } - return []prometheus.Collector{ - m.requestDur, - } -} - -func (m *metrics) getLabelValues(ctx context.Context, orgID platform2.ID, op string) []string { - if m == nil { - return nil - } - labelValues := make([]string, len(m.ctxLabelKeys)+2) - for i, k := range m.ctxLabelKeys { - value := ctx.Value(k) - var str string - switch v := value.(type) { - case string: - str = v - case fmt.Stringer: - str = v.String() - } - labelValues[i] = str - } - labelValues[len(labelValues)-2] = orgID.String() - labelValues[len(labelValues)-1] = op - return labelValues -} - -func (m *metrics) recordMetrics(labelValues []string, start time.Time) { - if m == nil { - return - } - m.requestDur.WithLabelValues(labelValues...).Observe(time.Since(start).Seconds()) -} diff --git a/query/stdlib/influxdata/influxdb/min_max_influxdb_test.flux b/query/stdlib/influxdata/influxdb/min_max_influxdb_test.flux deleted file mode 100644 index 50bb419a7a9..00000000000 --- a/query/stdlib/influxdata/influxdb/min_max_influxdb_test.flux +++ /dev/null @@ -1,44 +0,0 @@ -package influxdb_test - - -import "testing/expect" - -testcase push_down_min_bare extends "flux/planner/group_min_test.group_min_bare" { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - super() -} - -testcase push_down_min_bare_host extends "flux/planner/group_min_test.group_min_bare_host" { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - super() -} - -testcase push_down_min_bare_field extends "flux/planner/group_min_test.group_min_bare_field" { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - super() -} - -testcase push_down_max_bare extends "flux/planner/group_max_test.group_max_bare" { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - super() -} - -testcase push_down_max_bare_host extends "flux/planner/group_max_test.group_max_bare_host" { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - super() -} - -testcase push_down_max_bare_field extends "flux/planner/group_max_test.group_max_bare_field" { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - super() -} - -testcase push_down_table_test_min extends "flux/planner/group_min_max_table_test.group_min_table" { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - super() -} - -testcase push_down_table_test_max extends "flux/planner/group_min_max_table_test.group_max_table" { - expect.planner(rules: ["PushDownGroupAggregateRule": 1]) - super() -} diff --git a/query/stdlib/influxdata/influxdb/multi_measure_test.flux b/query/stdlib/influxdata/influxdb/multi_measure_test.flux deleted file mode 100644 index 4a0cc2a6570..00000000000 --- a/query/stdlib/influxdata/influxdb/multi_measure_test.flux +++ /dev/null @@ -1,267 +0,0 @@ -package influxdb_test - -import "csv" -import "testing" - -option now = () => 2030-01-01T00:00:00Z - -input = " -#datatype,string,long,dateTime:RFC3339,string,string,string,double -#group,false,false,false,true,true,true,false -#default,_result,,,,,, -,result,table,_time,_measurement,host,_field,_value -,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83 -,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72 -,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74 -,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63 -,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91 -,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84 - -,,1,2018-05-22T19:53:26Z,sys,host.local,load3,1.98 -,,1,2018-05-22T19:53:36Z,sys,host.local,load3,1.97 -,,1,2018-05-22T19:53:46Z,sys,host.local,load3,1.97 -,,1,2018-05-22T19:53:56Z,sys,host.local,load3,1.96 -,,1,2018-05-22T19:54:06Z,sys,host.local,load3,1.98 -,,1,2018-05-22T19:54:16Z,sys,host.local,load3,1.97 - -,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95 -,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92 -,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92 -,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89 -,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94 -,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93 - -,,3,2018-05-22T19:53:26Z,var,host.local,load3,91.98 -,,3,2018-05-22T19:53:36Z,var,host.local,load3,91.97 -,,3,2018-05-22T19:53:46Z,var,host.local,load3,91.97 -,,3,2018-05-22T19:53:56Z,var,host.local,load3,91.96 -,,3,2018-05-22T19:54:06Z,var,host.local,load3,91.98 -,,3,2018-05-22T19:54:16Z,var,host.local,load3,91.97 - -,,4,2018-05-22T19:53:26Z,swap,host.global,used_percent,82.98 -,,4,2018-05-22T19:53:36Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:53:46Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:53:56Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:54:06Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:54:16Z,swap,host.global,used_percent,82.64 - -#datatype,string,long,dateTime:RFC3339,string,string,string,double -#group,false,false,false,true,true,true,false -#default,_result,,,,,, -,result,table,_time,_measurement,loc,_field,_value -,,0,2018-05-22T19:53:26Z,locale,en,lat,37.09 -,,0,2018-05-22T19:53:36Z,locale,en,lat,37.10 -,,0,2018-05-22T19:53:46Z,locale,en,lat,37.08 -" - -testcase multi_measure { - got = csv.from(csv: input) - |> testing.load() - |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z) - |> filter(fn: (r) => r["_measurement"] == "system" or r["_measurement"] == "sys") - |> filter(fn: (r) => r["_field"] == "load1" or r["_field"] == "load3") - |> drop(columns: ["_start", "_stop"]) - - want = csv.from(csv: "#datatype,string,long,dateTime:RFC3339,string,string,string,double -#group,false,false,false,true,true,true,false -#default,_result,,,,,, -,result,table,_time,_measurement,host,_field,_value -,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83 -,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72 -,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74 -,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63 -,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91 -,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84 -,,1,2018-05-22T19:53:26Z,sys,host.local,load3,1.98 -,,1,2018-05-22T19:53:36Z,sys,host.local,load3,1.97 -,,1,2018-05-22T19:53:46Z,sys,host.local,load3,1.97 -,,1,2018-05-22T19:53:56Z,sys,host.local,load3,1.96 -,,1,2018-05-22T19:54:06Z,sys,host.local,load3,1.98 -,,1,2018-05-22T19:54:16Z,sys,host.local,load3,1.97 -") - - testing.diff(got, want) -} - -testcase multi_measure_match_all { - got = csv.from(csv: input) - |> testing.load() - |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z) - |> filter(fn: (r) => r["_measurement"] == "system" or r["_measurement"] == "sys" or r["_measurement"] == "var" or r["_measurement"] == "swap") - |> filter(fn: (r) => r["_field"] == "load1" or r["_field"] == "load3" or r["_field"] == "load5" or r["_field"] == "used_percent") - |> drop(columns: ["_start", "_stop"]) - - want = csv.from(csv: "#datatype,string,long,dateTime:RFC3339,string,string,string,double -#group,false,false,false,true,true,true,false -#default,_result,,,,,, -,result,table,_time,_measurement,host,_field,_value -,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83 -,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72 -,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74 -,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63 -,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91 -,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84 -,,1,2018-05-22T19:53:26Z,sys,host.local,load3,1.98 -,,1,2018-05-22T19:53:36Z,sys,host.local,load3,1.97 -,,1,2018-05-22T19:53:46Z,sys,host.local,load3,1.97 -,,1,2018-05-22T19:53:56Z,sys,host.local,load3,1.96 -,,1,2018-05-22T19:54:06Z,sys,host.local,load3,1.98 -,,1,2018-05-22T19:54:16Z,sys,host.local,load3,1.97 -,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95 -,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92 -,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92 -,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89 -,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94 -,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93 -,,3,2018-05-22T19:53:26Z,var,host.local,load3,91.98 -,,3,2018-05-22T19:53:36Z,var,host.local,load3,91.97 -,,3,2018-05-22T19:53:46Z,var,host.local,load3,91.97 -,,3,2018-05-22T19:53:56Z,var,host.local,load3,91.96 -,,3,2018-05-22T19:54:06Z,var,host.local,load3,91.98 -,,3,2018-05-22T19:54:16Z,var,host.local,load3,91.97 -,,4,2018-05-22T19:53:26Z,swap,host.global,used_percent,82.98 -,,4,2018-05-22T19:53:36Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:53:46Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:53:56Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:54:06Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:54:16Z,swap,host.global,used_percent,82.64 -") - - testing.diff(got, want) -} - -testcase multi_measure_tag_filter { - got = csv.from(csv: input) - |> testing.load() - |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z) - |> filter(fn: (r) => r["_measurement"] == "system" or r["_measurement"] == "swap") - |> filter(fn: (r) => r["_field"] == "load1" or r["_field"] == "load3" or r["_field"] == "used_percent") - |> filter(fn: (r) => r["host"] == "host.local" or r["host"] == "host.global") - |> drop(columns: ["_start", "_stop"]) - - want = csv.from(csv: "#datatype,string,long,dateTime:RFC3339,string,string,string,double -#group,false,false,false,true,true,true,false -#default,_result,,,,,, -,result,table,_time,_measurement,host,_field,_value -,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83 -,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72 -,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74 -,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63 -,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91 -,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84 -,,4,2018-05-22T19:53:26Z,swap,host.global,used_percent,82.98 -,,4,2018-05-22T19:53:36Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:53:46Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:53:56Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:54:06Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:54:16Z,swap,host.global,used_percent,82.64 -") - - testing.diff(got, want) -} - -testcase multi_measure_complex_or { - got = csv.from(csv: input) - |> testing.load() - |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z) - |> filter(fn: (r) => (r["_measurement"] == "system" or r["_measurement"] == "swap") or (r["_measurement"] != "var" and r["host"] == "host.local")) - |> drop(columns: ["_start", "_stop"]) - - want = csv.from(csv: "#datatype,string,long,dateTime:RFC3339,string,string,string,double -#group,false,false,false,true,true,true,false -#default,_result,,,,,, -,result,table,_time,_measurement,host,_field,_value -,,0,2018-05-22T19:53:26Z,system,host.local,load1,1.83 -,,0,2018-05-22T19:53:36Z,system,host.local,load1,1.72 -,,0,2018-05-22T19:53:46Z,system,host.local,load1,1.74 -,,0,2018-05-22T19:53:56Z,system,host.local,load1,1.63 -,,0,2018-05-22T19:54:06Z,system,host.local,load1,1.91 -,,0,2018-05-22T19:54:16Z,system,host.local,load1,1.84 -,,2,2018-05-22T19:53:26Z,system,host.local,load5,1.95 -,,2,2018-05-22T19:53:36Z,system,host.local,load5,1.92 -,,2,2018-05-22T19:53:46Z,system,host.local,load5,1.92 -,,2,2018-05-22T19:53:56Z,system,host.local,load5,1.89 -,,2,2018-05-22T19:54:06Z,system,host.local,load5,1.94 -,,2,2018-05-22T19:54:16Z,system,host.local,load5,1.93 -,,4,2018-05-22T19:53:26Z,swap,host.global,used_percent,82.98 -,,4,2018-05-22T19:53:36Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:53:46Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:53:56Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:54:06Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:54:16Z,swap,host.global,used_percent,82.64 -,,1,2018-05-22T19:53:26Z,sys,host.local,load3,1.98 -,,1,2018-05-22T19:53:36Z,sys,host.local,load3,1.97 -,,1,2018-05-22T19:53:46Z,sys,host.local,load3,1.97 -,,1,2018-05-22T19:53:56Z,sys,host.local,load3,1.96 -,,1,2018-05-22T19:54:06Z,sys,host.local,load3,1.98 -,,1,2018-05-22T19:54:16Z,sys,host.local,load3,1.97 -") - - testing.diff(got, want) -} - -testcase multi_measure_complex_and { - got = csv.from(csv: input) - |> testing.load() - |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z) - |> filter(fn: (r) => r["_measurement"] != "system" or r["_measurement"] == "swap") - |> filter(fn: (r) => r["_measurement"] == "swap" or r["_measurement"] == "var") - |> drop(columns: ["_start", "_stop"]) - - want = csv.from(csv: "#datatype,string,long,dateTime:RFC3339,string,string,string,double -#group,false,false,false,true,true,true,false -#default,_result,,,,,, -,result,table,_time,_measurement,host,_field,_value -,,4,2018-05-22T19:53:26Z,swap,host.global,used_percent,82.98 -,,4,2018-05-22T19:53:36Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:53:46Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:53:56Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:54:06Z,swap,host.global,used_percent,82.59 -,,4,2018-05-22T19:54:16Z,swap,host.global,used_percent,82.64 -,,3,2018-05-22T19:53:26Z,var,host.local,load3,91.98 -,,3,2018-05-22T19:53:36Z,var,host.local,load3,91.97 -,,3,2018-05-22T19:53:46Z,var,host.local,load3,91.97 -,,3,2018-05-22T19:53:56Z,var,host.local,load3,91.96 -,,3,2018-05-22T19:54:06Z,var,host.local,load3,91.98 -,,3,2018-05-22T19:54:16Z,var,host.local,load3,91.97 -") - - testing.diff(got, want) -} - -testcase multi_measure_negation { - got = csv.from(csv: input) - |> testing.load() - |> range(start: 2018-01-01T00:00:00Z, stop: 2019-01-01T00:00:00Z) - |> filter(fn: (r) => r["_measurement"] != "system") - |> filter(fn: (r) => r["host"] == "host.local" or not exists r["host"]) - |> drop(columns: ["_start", "_stop"]) - - want = csv.from(csv: "#datatype,string,long,dateTime:RFC3339,string,string,string,double -#group,false,false,false,true,true,true,false -#default,_result,,,,,, -,result,table,_time,_measurement,host,_field,_value -,,1,2018-05-22T19:53:26Z,sys,host.local,load3,1.98 -,,1,2018-05-22T19:53:36Z,sys,host.local,load3,1.97 -,,1,2018-05-22T19:53:46Z,sys,host.local,load3,1.97 -,,1,2018-05-22T19:53:56Z,sys,host.local,load3,1.96 -,,1,2018-05-22T19:54:06Z,sys,host.local,load3,1.98 -,,1,2018-05-22T19:54:16Z,sys,host.local,load3,1.97 -,,3,2018-05-22T19:53:26Z,var,host.local,load3,91.98 -,,3,2018-05-22T19:53:36Z,var,host.local,load3,91.97 -,,3,2018-05-22T19:53:46Z,var,host.local,load3,91.97 -,,3,2018-05-22T19:53:56Z,var,host.local,load3,91.96 -,,3,2018-05-22T19:54:06Z,var,host.local,load3,91.98 -,,3,2018-05-22T19:54:16Z,var,host.local,load3,91.97 - -#datatype,string,long,dateTime:RFC3339,string,string,string,double -#group,false,false,false,true,true,true,false -#default,_result,,,,,, -,result,table,_time,_measurement,loc,_field,_value -,,0,2018-05-22T19:53:26Z,locale,en,lat,37.09 -,,0,2018-05-22T19:53:36Z,locale,en,lat,37.10 -,,0,2018-05-22T19:53:46Z,locale,en,lat,37.08 -") - - testing.diff(got, want) -} diff --git a/query/stdlib/influxdata/influxdb/operators.go b/query/stdlib/influxdata/influxdb/operators.go deleted file mode 100644 index 9eaa060ef54..00000000000 --- a/query/stdlib/influxdata/influxdb/operators.go +++ /dev/null @@ -1,172 +0,0 @@ -package influxdb - -import ( - "context" - "fmt" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/codes" - "github.com/influxdata/flux/plan" - "github.com/influxdata/flux/values" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" -) - -const ( - ReadRangePhysKind = "ReadRangePhysKind" - ReadGroupPhysKind = "ReadGroupPhysKind" - ReadWindowAggregatePhysKind = "ReadWindowAggregatePhysKind" - ReadTagKeysPhysKind = "ReadTagKeysPhysKind" - ReadTagValuesPhysKind = "ReadTagValuesPhysKind" -) - -type ReadGroupPhysSpec struct { - plan.DefaultCost - ReadRangePhysSpec - - GroupMode flux.GroupMode - GroupKeys []string - - AggregateMethod string -} - -func (s *ReadGroupPhysSpec) PlanDetails() string { - return fmt.Sprintf("GroupMode: %v, GroupKeys: %v, AggregateMethod: \"%s\"", s.GroupMode, s.GroupKeys, s.AggregateMethod) -} - -func (s *ReadGroupPhysSpec) Kind() plan.ProcedureKind { - return ReadGroupPhysKind -} - -func (s *ReadGroupPhysSpec) Copy() plan.ProcedureSpec { - ns := new(ReadGroupPhysSpec) - ns.ReadRangePhysSpec = *s.ReadRangePhysSpec.Copy().(*ReadRangePhysSpec) - - ns.GroupMode = s.GroupMode - ns.GroupKeys = s.GroupKeys - - ns.AggregateMethod = s.AggregateMethod - return ns -} - -type ReadRangePhysSpec struct { - plan.DefaultCost - - Bucket string - BucketID string - - // Filter is the filter to use when calling into - // storage. It must be possible to push down this - // filter. - Filter *datatypes.Predicate - - Bounds flux.Bounds -} - -func (s *ReadRangePhysSpec) Kind() plan.ProcedureKind { - return ReadRangePhysKind -} -func (s *ReadRangePhysSpec) Copy() plan.ProcedureSpec { - ns := *s - return &ns -} - -func (s *ReadRangePhysSpec) LookupBucketID(ctx context.Context, orgID platform.ID, buckets BucketLookup) (platform.ID, error) { - // Determine bucketID - switch { - case s.Bucket != "": - b, ok := buckets.Lookup(ctx, orgID, s.Bucket) - if !ok { - return 0, &flux.Error{ - Code: codes.NotFound, - Msg: fmt.Sprintf("could not find bucket %q", s.Bucket), - } - } - return b, nil - case len(s.BucketID) != 0: - var b platform.ID - if err := b.DecodeFromString(s.BucketID); err != nil { - return 0, &flux.Error{ - Code: codes.Invalid, - Msg: "invalid bucket id", - Err: err, - } - } - return b, nil - default: - return 0, &flux.Error{ - Code: codes.Invalid, - Msg: "no bucket name or id have been specified", - } - } -} - -// TimeBounds implements plan.BoundsAwareProcedureSpec. -func (s *ReadRangePhysSpec) TimeBounds(predecessorBounds *plan.Bounds) *plan.Bounds { - return &plan.Bounds{ - Start: values.ConvertTime(s.Bounds.Start.Time(s.Bounds.Now)), - Stop: values.ConvertTime(s.Bounds.Stop.Time(s.Bounds.Now)), - } -} - -type ReadWindowAggregatePhysSpec struct { - plan.DefaultCost - ReadRangePhysSpec - - WindowEvery flux.Duration - Offset flux.Duration - Aggregates []plan.ProcedureKind - CreateEmpty bool - TimeColumn string - - // ForceAggregate forces the aggregates to be treated as - // aggregates even if they are selectors. - ForceAggregate bool -} - -func (s *ReadWindowAggregatePhysSpec) PlanDetails() string { - return fmt.Sprintf("every = %v, aggregates = %v, createEmpty = %v, timeColumn = \"%s\", forceAggregate = %v", s.WindowEvery, s.Aggregates, s.CreateEmpty, s.TimeColumn, s.ForceAggregate) -} - -func (s *ReadWindowAggregatePhysSpec) Kind() plan.ProcedureKind { - return ReadWindowAggregatePhysKind -} - -func (s *ReadWindowAggregatePhysSpec) Copy() plan.ProcedureSpec { - ns := *s - ns.ReadRangePhysSpec = *s.ReadRangePhysSpec.Copy().(*ReadRangePhysSpec) - ns.Aggregates = make([]plan.ProcedureKind, len(s.Aggregates)) - copy(ns.Aggregates, s.Aggregates) - - return &ns -} - -type ReadTagKeysPhysSpec struct { - ReadRangePhysSpec -} - -func (s *ReadTagKeysPhysSpec) Kind() plan.ProcedureKind { - return ReadTagKeysPhysKind -} - -func (s *ReadTagKeysPhysSpec) Copy() plan.ProcedureSpec { - ns := new(ReadTagKeysPhysSpec) - ns.ReadRangePhysSpec = *s.ReadRangePhysSpec.Copy().(*ReadRangePhysSpec) - return ns -} - -type ReadTagValuesPhysSpec struct { - ReadRangePhysSpec - TagKey string -} - -func (s *ReadTagValuesPhysSpec) Kind() plan.ProcedureKind { - return ReadTagValuesPhysKind -} - -func (s *ReadTagValuesPhysSpec) Copy() plan.ProcedureSpec { - ns := new(ReadTagValuesPhysSpec) - ns.ReadRangePhysSpec = *s.ReadRangePhysSpec.Copy().(*ReadRangePhysSpec) - ns.TagKey = s.TagKey - return ns -} diff --git a/query/stdlib/influxdata/influxdb/provider.go b/query/stdlib/influxdata/influxdb/provider.go deleted file mode 100644 index a3c18f70eda..00000000000 --- a/query/stdlib/influxdata/influxdb/provider.go +++ /dev/null @@ -1,324 +0,0 @@ -package influxdb - -import ( - "context" - "fmt" - - arrowmemory "github.com/apache/arrow/go/v7/arrow/memory" - "github.com/influxdata/flux" - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/codes" - "github.com/influxdata/flux/dependencies/influxdb" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/memory" - "github.com/influxdata/flux/values" - influxdb2 "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/storage" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - protocol "github.com/influxdata/line-protocol" -) - -type ( - Config = influxdb.Config - Predicate = influxdb.Predicate - PredicateSet = influxdb.PredicateSet -) - -// Provider is an implementation of influxdb.Provider that exposes the -// query.StorageReader to flux and, if a host or org were specified, it -// delegates to the influxdb.HttpProvider. -type Provider struct { - influxdb.HttpProvider - Reader query.StorageReader - BucketLookup BucketLookup -} - -func (p Provider) SeriesCardinalityReaderFor(ctx context.Context, conf influxdb.Config, bounds flux.Bounds, predicateSet influxdb.PredicateSet) (influxdb.Reader, error) { - // If an organization is specified, it must be retrieved through the http - // provider. - if conf.Org.IsValid() || conf.Host != "" { - return p.HttpProvider.SeriesCardinalityReaderFor(ctx, conf, bounds, predicateSet) - } - - if !p.Reader.SupportReadSeriesCardinality(ctx) { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "series cardinality option is not supported", - } - } - - spec, err := p.readFilterSpec(ctx, conf, bounds, predicateSet) - if err != nil { - return nil, err - } - return seriesCardinalityReader{ - reader: p.Reader, - spec: query.ReadSeriesCardinalitySpec{ - ReadFilterSpec: spec, - }, - }, nil -} - -func (p Provider) WriterFor(ctx context.Context, conf influxdb.Config) (influxdb.Writer, error) { - // If a host is specified, writes must be sent through the http provider. - if conf.Host != "" { - return p.HttpProvider.WriterFor(ctx, conf) - } - - deps := GetStorageDependencies(ctx).ToDeps - req := query.RequestFromContext(ctx) - if req == nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "missing request on context", - } - } - reqOrgID := req.OrganizationID - - { - // Check if the to() spec is pointing to an org. If so, ensure it's the same as the org executing the request. - // - // It's possible for flux to write points into an org other than the one running the query, but only via an HTTP - // request (which requires a `host` to be set). Specifying an `org` that's == to the one executing the query is - // redundant, but we allow it in order to support running the e2e tests imported from the flux codebase. - var toOrgID platform.ID - switch { - case conf.Org.Name != "": - var ok bool - toOrgID, ok = deps.OrganizationLookup.Lookup(ctx, conf.Org.Name) - if !ok { - return nil, &flux.Error{ - Code: codes.NotFound, - Msg: fmt.Sprintf("could not find org %q", conf.Org.Name), - } - } - case conf.Org.ID != "": - if err := toOrgID.DecodeFromString(conf.Org.ID); err != nil { - return nil, &flux.Error{ - Code: codes.Invalid, - Msg: "invalid org id", - Err: err, - } - } - default: - toOrgID = reqOrgID - } - - if toOrgID.Valid() && toOrgID != reqOrgID { - return nil, &flux.Error{ - Code: codes.Invalid, - Msg: "host must be specified when writing points to another org", - } - } - } - - bucketID, err := p.lookupBucketID(ctx, reqOrgID, conf.Bucket) - if err != nil { - return nil, err - } - - // err will be set if we are not authorized, we don't care about the other return values. - _, _, err = authorizer.AuthorizeWrite(ctx, influxdb2.BucketsResourceType, bucketID, reqOrgID) - if err != nil { - return nil, &errors.Error{ - Code: errors.EForbidden, - Msg: "user not authorized to write", - } - } - - return &localPointsWriter{ - ctx: ctx, - buf: make([]models.Point, 1<<14), - orgID: reqOrgID, - bucketID: bucketID, - wr: deps.PointsWriter, - }, nil -} - -// readFilterSpec will construct a query.ReadFilterSpec from the context and the -// configuration parameters. -func (p Provider) readFilterSpec(ctx context.Context, conf influxdb.Config, bounds flux.Bounds, predicateSet influxdb.PredicateSet) (query.ReadFilterSpec, error) { - // Retrieve the organization id from the request context. Do not use the - // configuration. - req := query.RequestFromContext(ctx) - if req == nil { - return query.ReadFilterSpec{}, &errors.Error{ - Code: errors.EInvalid, - Msg: "missing request on context", - } - } - - orgID := req.OrganizationID - bucketID, err := p.lookupBucketID(ctx, orgID, conf.Bucket) - if err != nil { - return query.ReadFilterSpec{}, err - } - - spec := query.ReadFilterSpec{ - OrganizationID: orgID, - BucketID: bucketID, - Bounds: execute.Bounds{ - Start: values.ConvertTime(bounds.Start.Time(bounds.Now)), - Stop: values.ConvertTime(bounds.Stop.Time(bounds.Now)), - }, - } - - if len(predicateSet) > 0 { - predicates := make([]*datatypes.Predicate, 0, len(predicateSet)) - for _, predicate := range predicateSet { - fn, ok := predicate.Fn.GetFunctionBodyExpression() - if !ok { - return query.ReadFilterSpec{}, &flux.Error{ - Code: codes.Invalid, - Msg: "predicate body cannot be pushed down", - } - } - - p, err := ToStoragePredicate(fn, "r") - if err != nil { - return query.ReadFilterSpec{}, err - } - predicates = append(predicates, p) - } - - mergedPredicate, err := mergePredicates(ast.AndOperator, predicates...) - if err != nil { - return query.ReadFilterSpec{}, err - } - spec.Predicate = mergedPredicate - } - return spec, nil -} - -func (p Provider) lookupBucketID(ctx context.Context, orgID platform.ID, bucket influxdb.NameOrID) (platform.ID, error) { - // Determine bucketID - switch { - case bucket.Name != "": - b, ok := p.BucketLookup.Lookup(ctx, orgID, bucket.Name) - if !ok { - return 0, &flux.Error{ - Code: codes.NotFound, - Msg: fmt.Sprintf("could not find bucket %q", bucket.Name), - } - } - return b, nil - case len(bucket.ID) != 0: - var b platform.ID - if err := b.DecodeFromString(bucket.ID); err != nil { - return 0, &flux.Error{ - Code: codes.Invalid, - Msg: "invalid bucket id", - Err: err, - } - } - return b, nil - default: - return 0, &flux.Error{ - Code: codes.Invalid, - Msg: "no bucket name or id have been specified", - } - } -} - -type seriesCardinalityReader struct { - reader query.StorageReader - spec query.ReadSeriesCardinalitySpec -} - -func (s seriesCardinalityReader) Read(ctx context.Context, f func(flux.Table) error, mem arrowmemory.Allocator) error { - alloc, ok := mem.(memory.Allocator) - if !ok { - alloc = &memory.ResourceAllocator{ - Allocator: mem, - } - } - - reader, err := s.reader.ReadSeriesCardinality(ctx, s.spec, alloc) - if err != nil { - return err - } - - return reader.Do(f) -} - -type localPointsWriter struct { - ctx context.Context - buf []models.Point - orgID platform.ID - bucketID platform.ID - n int - wr storage.PointsWriter - err error -} - -func (w *localPointsWriter) Write(ms ...protocol.Metric) error { - copyPoints := func() int { - n := 0 - for _, m := range ms { - if w.n+n == len(w.buf) { - break - } - mtags := m.TagList() - mfields := m.FieldList() - - tags := make(models.Tags, len(mtags)) - fields := make(models.Fields, len(mfields)) - for ti, t := range mtags { - tags[ti] = models.Tag{Key: []byte(t.Key), Value: []byte(t.Value)} - } - for _, f := range mfields { - fields[f.Key] = f.Value - } - w.buf[w.n+n], w.err = models.NewPoint(m.Name(), tags, fields, m.Time()) - if w.err != nil { - return n - } - n++ - } - return n - } - - for len(ms) > w.available() { - n := copyPoints() - if w.err != nil { - return w.err - } - w.n += n - w.err = w.flush() - if w.err != nil { - return w.err - } - ms = ms[n:] - } - w.n += copyPoints() - return w.err -} - -func (w *localPointsWriter) available() int { - return len(w.buf) - w.n -} - -func (w *localPointsWriter) flush() error { - if w.err != nil { - return w.err - } - if w.n == 0 { - return nil - } - - w.err = w.wr.WritePoints(w.ctx, w.orgID, w.bucketID, w.buf[:w.n]) - if w.err != nil { - return w.err - } - w.n = 0 - return nil -} - -func (w *localPointsWriter) Close() error { - return w.flush() -} diff --git a/query/stdlib/influxdata/influxdb/provider_test.go b/query/stdlib/influxdata/influxdb/provider_test.go deleted file mode 100644 index cff40702359..00000000000 --- a/query/stdlib/influxdata/influxdb/provider_test.go +++ /dev/null @@ -1,305 +0,0 @@ -package influxdb_test - -import ( - "context" - "testing" - "time" - - "github.com/apache/arrow/go/v7/arrow/memory" - "github.com/influxdata/flux" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/execute/table" - "github.com/influxdata/flux/execute/table/static" - influxdb2 "github.com/influxdata/influxdb/v2" - context2 "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb" - storageflux "github.com/influxdata/influxdb/v2/storage/flux" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/influxdata/influxdb/v2/tsdb/cursors" - "github.com/influxdata/influxdb/v2/v1/services/storage" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" -) - -var ( - orgID = platform.ID(10) - bucketID = platform.ID(1) // mock BucketLookup returns the name "my-bucket" for id 1 -) - -func TestProvider_SeriesCardinalityReader(t *testing.T) { - t.Parallel() - - store := &mock.ReadsStore{ - ReadSeriesCardinalityFn: func(ctx context.Context, req *datatypes.ReadSeriesCardinalityRequest) (cursors.Int64Iterator, error) { - source, err := storage.GetReadSource(req.GetReadSource()) - if err != nil { - return nil, err - } - - if want, got := orgID, platform.ID(source.GetOrgID()); want != got { - t.Errorf("unexpected org id -want/+got:\n\t- %d\n\t+ %d", want, got) - } - if want, got := bucketID, platform.ID(source.GetBucketID()); want != got { - t.Errorf("unexpected org id -want/+got:\n\t- %d\n\t+ %d", want, got) - } - - if want, got := req.Range.GetStart(), int64(1000000000); want != got { - t.Errorf("unexpected start range -want/+got:\n\t- %d\n\t+ %d", want, got) - } - if want, got := req.Range.GetEnd(), int64(2000000000); want != got { - t.Errorf("unexpected end range -want/+got:\n\t- %d\n\t+ %d", want, got) - } - - if req.Predicate != nil { - t.Error("expected predicate to be nil") - } - return cursors.NewInt64SliceIterator([]int64{4}), nil - }, - SupportReadSeriesCardinalityFn: func(ctx context.Context) bool { - return true - }, - GetSourceFn: func(orgID, bucketID uint64) proto.Message { - return &storage.ReadSource{ - BucketID: bucketID, - OrgID: orgID, - } - }, - } - - provider := influxdb.Provider{ - Reader: storageflux.NewReader(store), - BucketLookup: mock.BucketLookup{}, - } - - ctx := query.ContextWithRequest( - context.Background(), - &query.Request{ - OrganizationID: orgID, - }, - ) - - reader, err := provider.SeriesCardinalityReaderFor( - ctx, - influxdb.Config{ - Bucket: influxdb.NameOrID{ - Name: "my-bucket", - }, - }, - flux.Bounds{ - Start: flux.Time{ - Absolute: time.Unix(1, 0), - }, - Stop: flux.Time{ - Absolute: time.Unix(2, 0), - }, - }, - nil, - ) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - want := static.Table{ - static.Ints("_value", 4), - } - - got := table.Iterator{} - if err := reader.Read(ctx, func(tbl flux.Table) error { - cpy, err := execute.CopyTable(tbl) - if err != nil { - return err - } - got = append(got, cpy) - return nil - }, memory.DefaultAllocator); err != nil { - t.Errorf("unexpected error: %s", err) - } - - if diff := table.Diff(want, got); err != nil { - t.Errorf("unexpected output -want/+got:\n%s", diff) - } -} - -func TestProvider_SeriesCardinalityReader_Unsupported(t *testing.T) { - t.Parallel() - - store := &mock.ReadsStore{ - ReadSeriesCardinalityFn: func(ctx context.Context, req *datatypes.ReadSeriesCardinalityRequest) (cursors.Int64Iterator, error) { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unexpected read", - } - }, - SupportReadSeriesCardinalityFn: func(ctx context.Context) bool { - return false - }, - } - - provider := influxdb.Provider{ - Reader: storageflux.NewReader(store), - BucketLookup: mock.BucketLookup{}, - } - - ctx := query.ContextWithRequest( - context.Background(), - &query.Request{ - OrganizationID: orgID, - }, - ) - - wantErr := &errors.Error{ - Code: errors.EInvalid, - Msg: "series cardinality option is not supported", - } - - _, gotErr := provider.SeriesCardinalityReaderFor( - ctx, - influxdb.Config{ - Bucket: influxdb.NameOrID{ - Name: "my-bucket", - }, - }, - flux.Bounds{ - Start: flux.Time{ - Absolute: time.Unix(1, 0), - }, - Stop: flux.Time{ - Absolute: time.Unix(2, 0), - }, - }, - nil, - ) - - require.Equal(t, wantErr, gotErr) -} - -func TestProvider_SeriesCardinalityReader_MissingRequestContext(t *testing.T) { - t.Parallel() - - store := &mock.ReadsStore{ - ReadSeriesCardinalityFn: func(ctx context.Context, req *datatypes.ReadSeriesCardinalityRequest) (cursors.Int64Iterator, error) { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unexpected read", - } - }, - SupportReadSeriesCardinalityFn: func(ctx context.Context) bool { - return true - }, - } - - provider := influxdb.Provider{ - Reader: storageflux.NewReader(store), - BucketLookup: mock.BucketLookup{}, - } - - wantErr := &errors.Error{ - Code: errors.EInvalid, - Msg: "missing request on context", - } - - _, gotErr := provider.SeriesCardinalityReaderFor( - context.Background(), - influxdb.Config{ - Bucket: influxdb.NameOrID{ - Name: "my-bucket", - }, - }, - flux.Bounds{ - Start: flux.Time{ - Absolute: time.Unix(1, 0), - }, - Stop: flux.Time{ - Absolute: time.Unix(2, 0), - }, - }, - nil, - ) - - require.Equal(t, wantErr, gotErr) -} - -func TestWriterFor(t *testing.T) { - t.Parallel() - - auth := influxdb2.Authorization{ - Status: influxdb2.Active, - Permissions: []influxdb2.Permission{ - { - Action: influxdb2.WriteAction, - Resource: influxdb2.Resource{ - Type: influxdb2.BucketsResourceType, - }, - }, - }, - } - - provider := influxdb.Provider{ - Reader: storageflux.NewReader(&mock.ReadsStore{}), - BucketLookup: mock.BucketLookup{}, - } - - conf := influxdb.Config{ - Bucket: influxdb.NameOrID{ - Name: "my-bucket", - }, - } - - ctx := context.Background() - req := query.Request{ - OrganizationID: platform.ID(2), - } - ctx = query.ContextWithRequest(ctx, &req) - ctx = context2.SetAuthorizer(ctx, &auth) - - _, gotErr := provider.WriterFor(ctx, conf) - - require.Nil(t, gotErr) -} - -func TestWriterFor_Error(t *testing.T) { - t.Parallel() - - auth := influxdb2.Authorization{ - Status: influxdb2.Active, - Permissions: []influxdb2.Permission{ - { - Action: influxdb2.ReadAction, - Resource: influxdb2.Resource{ - Type: influxdb2.BucketsResourceType, - }, - }, - }, - } - - provider := influxdb.Provider{ - Reader: storageflux.NewReader(&mock.ReadsStore{}), - BucketLookup: mock.BucketLookup{}, - } - - conf := influxdb.Config{ - Bucket: influxdb.NameOrID{ - Name: "my-bucket", - }, - } - - ctx := context.Background() - req := query.Request{ - OrganizationID: platform.ID(2), - } - ctx = query.ContextWithRequest(ctx, &req) - ctx = context2.SetAuthorizer(ctx, &auth) - - _, gotErr := provider.WriterFor(ctx, conf) - - wantErr := &errors.Error{ - Code: errors.EForbidden, - Msg: "user not authorized to write", - } - - require.Equal(t, wantErr, gotErr) -} diff --git a/query/stdlib/influxdata/influxdb/rules.go b/query/stdlib/influxdata/influxdb/rules.go deleted file mode 100644 index d2f8b734786..00000000000 --- a/query/stdlib/influxdata/influxdb/rules.go +++ /dev/null @@ -1,1175 +0,0 @@ -package influxdb - -import ( - "context" - "math" - "time" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/codes" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/plan" - "github.com/influxdata/flux/semantic" - "github.com/influxdata/flux/stdlib/experimental/table" - "github.com/influxdata/flux/stdlib/influxdata/influxdb" - "github.com/influxdata/flux/stdlib/universe" - "github.com/influxdata/flux/values" - "github.com/influxdata/influxdb/v2/kit/feature" -) - -func init() { - plan.RegisterPhysicalRules( - FromStorageRule{}, - PushDownRangeRule{}, - PushDownFilterRule{}, - PushDownGroupRule{}, - PushDownReadTagKeysRule{}, - PushDownReadTagValuesRule{}, - SortedPivotRule{}, - PushDownWindowAggregateRule{}, - PushDownWindowForceAggregateRule{}, - PushDownWindowAggregateByTimeRule{}, - PushDownAggregateWindowRule{}, - PushDownBareAggregateRule{}, - GroupWindowAggregateTransposeRule{}, - PushDownGroupAggregateRule{}, - ) - // TODO(lesam): re-enable MergeFilterRule once it works with complex use cases - // such as filter() |> geo.strictFilter(). See geo_merge_filter flux test. - // plan.RegisterLogicalRules( - // MergeFiltersRule{}, - // ) -} - -type FromStorageRule struct{} - -func (rule FromStorageRule) Name() string { - return "influxdata/influxdb.FromStorageRule" -} - -func (rule FromStorageRule) Pattern() plan.Pattern { - return plan.MultiSuccessor(influxdb.FromKind) -} - -func (rule FromStorageRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) { - fromSpec := node.ProcedureSpec().(*influxdb.FromProcedureSpec) - if fromSpec.Host != nil { - return node, false, nil - } else if fromSpec.Org != nil { - return node, false, &flux.Error{ - Code: codes.Unimplemented, - Msg: "reads from the storage engine cannot read from a separate organization; please specify a host or remove the organization", - } - } - - return plan.CreateLogicalNode("fromStorage", &FromStorageProcedureSpec{ - Bucket: fromSpec.Bucket, - }), true, nil -} - -// PushDownGroupRule pushes down a group operation to storage -type PushDownGroupRule struct{} - -func (rule PushDownGroupRule) Name() string { - return "PushDownGroupRule" -} - -func (rule PushDownGroupRule) Pattern() plan.Pattern { - return plan.MultiSuccessor(universe.GroupKind, plan.SingleSuccessor(ReadRangePhysKind)) -} - -func (rule PushDownGroupRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) { - src := node.Predecessors()[0].ProcedureSpec().(*ReadRangePhysSpec) - grp := node.ProcedureSpec().(*universe.GroupProcedureSpec) - - switch grp.GroupMode { - case - flux.GroupModeBy: - default: - return node, false, nil - } - - for _, col := range grp.GroupKeys { - // Storage can only group by tag keys. - // Note the columns _start and _stop are ok since all tables - // coming from storage will have the same _start and _values. - if col == execute.DefaultTimeColLabel || col == execute.DefaultValueColLabel { - return node, false, nil - } - } - - return plan.CreateUniquePhysicalNode(ctx, "ReadGroup", &ReadGroupPhysSpec{ - ReadRangePhysSpec: *src.Copy().(*ReadRangePhysSpec), - GroupMode: grp.GroupMode, - GroupKeys: grp.GroupKeys, - }), true, nil -} - -// PushDownRangeRule pushes down a range filter to storage -type PushDownRangeRule struct{} - -func (rule PushDownRangeRule) Name() string { - return "PushDownRangeRule" -} - -// Pattern matches 'from |> range' -func (rule PushDownRangeRule) Pattern() plan.Pattern { - return plan.MultiSuccessor(universe.RangeKind, plan.SingleSuccessor(FromKind)) -} - -// Rewrite converts 'from |> range' into 'ReadRange' -func (rule PushDownRangeRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) { - fromNode := node.Predecessors()[0] - fromSpec := fromNode.ProcedureSpec().(*FromStorageProcedureSpec) - rangeSpec := node.ProcedureSpec().(*universe.RangeProcedureSpec) - return plan.CreateUniquePhysicalNode(ctx, "ReadRange", &ReadRangePhysSpec{ - Bucket: fromSpec.Bucket.Name, - BucketID: fromSpec.Bucket.ID, - Bounds: rangeSpec.Bounds, - }), true, nil -} - -// PushDownFilterRule is a rule that pushes filters into from procedures to be evaluated in the storage layer. -// This rule is likely to be replaced by a more generic rule when we have a better -// framework for pushing filters, etc into sources. -type PushDownFilterRule struct{} - -func (PushDownFilterRule) Name() string { - return "PushDownFilterRule" -} - -func (PushDownFilterRule) Pattern() plan.Pattern { - return plan.MultiSuccessor(universe.FilterKind, plan.SingleSuccessor(ReadRangePhysKind)) -} - -func (PushDownFilterRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { - filterSpec := pn.ProcedureSpec().(*universe.FilterProcedureSpec) - fromNode := pn.Predecessors()[0] - fromSpec := fromNode.ProcedureSpec().(*ReadRangePhysSpec) - - // Cannot push down when keeping empty tables. - if filterSpec.KeepEmptyTables { - return pn, false, nil - } - - bodyExpr, ok := filterSpec.Fn.Fn.GetFunctionBodyExpression() - if !ok { - return pn, false, nil - } - - if len(filterSpec.Fn.Fn.Parameters.List) != 1 { - // I would expect that type checking would catch this, but just to be safe... - return pn, false, nil - } - - paramName := filterSpec.Fn.Fn.Parameters.List[0].Key.Name.Name() - - pushable, notPushable, err := semantic.PartitionPredicates(bodyExpr, func(e semantic.Expression) (bool, error) { - return isPushableExpr(paramName, e) - }) - if err != nil { - return nil, false, err - } - - if pushable == nil { - // Nothing could be pushed down, no rewrite can happen - return pn, false, nil - } - pushable, _ = rewritePushableExpr(pushable) - - // Convert the pushable expression to a storage predicate. - predicate, err := ToStoragePredicate(pushable, paramName) - if err != nil { - return nil, false, err - } - - // If the filter has already been set, then combine the existing predicate - // with the new one. - if fromSpec.Filter != nil { - mergedPredicate, err := mergePredicates(ast.AndOperator, fromSpec.Filter, predicate) - if err != nil { - return nil, false, err - } - predicate = mergedPredicate - } - - // Copy the specification and set the predicate. - newFromSpec := fromSpec.Copy().(*ReadRangePhysSpec) - newFromSpec.Filter = predicate - - if notPushable == nil { - // All predicates could be pushed down, so eliminate the filter - mergedNode, err := plan.MergeToPhysicalNode(pn, fromNode, newFromSpec) - if err != nil { - return nil, false, err - } - return mergedNode, true, nil - } - - err = fromNode.ReplaceSpec(newFromSpec) - if err != nil { - return nil, false, err - } - - newFilterSpec := filterSpec.Copy().(*universe.FilterProcedureSpec) - newFilterSpec.Fn.Fn.Block = &semantic.Block{ - Body: []semantic.Statement{ - &semantic.ReturnStatement{Argument: notPushable}, - }, - } - if err := pn.ReplaceSpec(newFilterSpec); err != nil { - return nil, false, err - } - - return pn, true, nil -} - -// PushDownReadTagKeysRule matches 'ReadRange |> keys() |> keep() |> distinct()'. -// The 'from()' must have already been merged with 'range' and, optionally, -// may have been merged with 'filter'. -// If any other properties have been set on the from procedure, -// this rule will not rewrite anything. -type PushDownReadTagKeysRule struct{} - -func (rule PushDownReadTagKeysRule) Name() string { - return "PushDownReadTagKeysRule" -} - -func (rule PushDownReadTagKeysRule) Pattern() plan.Pattern { - return plan.MultiSuccessor(universe.DistinctKind, - plan.SingleSuccessor(universe.SchemaMutationKind, - plan.SingleSuccessor(universe.KeysKind, - plan.SingleSuccessor(ReadRangePhysKind)))) -} - -func (rule PushDownReadTagKeysRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { - // Retrieve the nodes and specs for all of the predecessors. - distinctSpec := pn.ProcedureSpec().(*universe.DistinctProcedureSpec) - keepNode := pn.Predecessors()[0] - keepSpec := asSchemaMutationProcedureSpec(keepNode.ProcedureSpec()) - keysNode := keepNode.Predecessors()[0] - keysSpec := keysNode.ProcedureSpec().(*universe.KeysProcedureSpec) - fromNode := keysNode.Predecessors()[0] - fromSpec := fromNode.ProcedureSpec().(*ReadRangePhysSpec) - - // A filter spec would have already been merged into the - // from spec if it existed so we will take that one when - // constructing our own replacement. We do not care about it - // at the moment though which is why it is not in the pattern. - - // The schema mutator needs to correspond to a keep call - // on the column specified by the keys procedure. - if len(keepSpec.Mutations) != 1 { - return pn, false, nil - } else if m, ok := keepSpec.Mutations[0].(*universe.KeepOpSpec); !ok { - return pn, false, nil - } else if m.Predicate.Fn != nil || len(m.Columns) != 1 { - // We have a keep mutator, but it uses a function or - // it retains more than one column so it does not match - // what we want. - return pn, false, nil - } else if m.Columns[0] != keysSpec.Column { - // We are not keeping the value column so this optimization - // will not work. - return pn, false, nil - } - - // The distinct spec should keep only the value column. - if distinctSpec.Column != keysSpec.Column { - return pn, false, nil - } - - // We have passed all of the necessary prerequisites - // so construct the procedure spec. - return plan.CreateUniquePhysicalNode(ctx, "ReadTagKeys", &ReadTagKeysPhysSpec{ - ReadRangePhysSpec: *fromSpec.Copy().(*ReadRangePhysSpec), - }), true, nil -} - -// PushDownReadTagValuesRule matches 'ReadRange |> keep(columns: [tag]) |> group() |> distinct(column: tag)'. -// The 'from()' must have already been merged with 'range' and, optionally, -// may have been merged with 'filter'. -// If any other properties have been set on the from procedure, -// this rule will not rewrite anything. -type PushDownReadTagValuesRule struct{} - -func (rule PushDownReadTagValuesRule) Name() string { - return "PushDownReadTagValuesRule" -} - -func (rule PushDownReadTagValuesRule) Pattern() plan.Pattern { - return plan.MultiSuccessor(universe.DistinctKind, - plan.SingleSuccessor(universe.GroupKind, - plan.SingleSuccessor(universe.SchemaMutationKind, - plan.SingleSuccessor(ReadRangePhysKind)))) -} - -func (rule PushDownReadTagValuesRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { - // Retrieve the nodes and specs for all of the predecessors. - distinctNode := pn - distinctSpec := distinctNode.ProcedureSpec().(*universe.DistinctProcedureSpec) - groupNode := distinctNode.Predecessors()[0] - groupSpec := groupNode.ProcedureSpec().(*universe.GroupProcedureSpec) - keepNode := groupNode.Predecessors()[0] - keepSpec := asSchemaMutationProcedureSpec(keepNode.ProcedureSpec()) - fromNode := keepNode.Predecessors()[0] - fromSpec := fromNode.ProcedureSpec().(*ReadRangePhysSpec) - - // A filter spec would have already been merged into the - // from spec if it existed so we will take that one when - // constructing our own replacement. We do not care about it - // at the moment though which is why it is not in the pattern. - - // All of the values need to be grouped into the same table. - if groupSpec.GroupMode != flux.GroupModeBy { - return pn, false, nil - } else if len(groupSpec.GroupKeys) > 0 { - return pn, false, nil - } - - // The column that distinct is for will be the tag key. - tagKey := distinctSpec.Column - if !isValidTagKeyForTagValues(tagKey) { - return pn, false, nil - } - - // The schema mutator needs to correspond to a keep call - // on the tag key column. - if len(keepSpec.Mutations) != 1 { - return pn, false, nil - } else if m, ok := keepSpec.Mutations[0].(*universe.KeepOpSpec); !ok { - return pn, false, nil - } else if m.Predicate.Fn != nil || len(m.Columns) != 1 { - // We have a keep mutator, but it uses a function or - // it retains more than one column so it does not match - // what we want. - return pn, false, nil - } else if m.Columns[0] != tagKey { - // We are not keeping the value column so this optimization - // will not work. - return pn, false, nil - } - - // We have passed all of the necessary prerequisites - // so construct the procedure spec. - return plan.CreateUniquePhysicalNode(ctx, "ReadTagValues", &ReadTagValuesPhysSpec{ - ReadRangePhysSpec: *fromSpec.Copy().(*ReadRangePhysSpec), - TagKey: tagKey, - }), true, nil -} - -var invalidTagKeysForTagValues = []string{ - execute.DefaultTimeColLabel, - execute.DefaultValueColLabel, - execute.DefaultStartColLabel, - execute.DefaultStopColLabel, -} - -// isValidTagKeyForTagValues returns true if the given key can -// be used in a tag values call. -func isValidTagKeyForTagValues(key string) bool { - for _, k := range invalidTagKeysForTagValues { - if k == key { - return false - } - } - return true -} - -// isPushableExpr determines if a predicate expression can be pushed down into the storage layer. -func isPushableExpr(paramName string, expr semantic.Expression) (bool, error) { - switch e := expr.(type) { - case *semantic.LogicalExpression: - b, err := isPushableExpr(paramName, e.Left) - if err != nil { - return false, err - } - - if !b { - return false, nil - } - - return isPushableExpr(paramName, e.Right) - - case *semantic.UnaryExpression: - if isPushableUnaryPredicate(paramName, e) { - return true, nil - } - - case *semantic.BinaryExpression: - if isPushableBinaryPredicate(paramName, e) { - return true, nil - } - } - - return false, nil -} - -func isPushableUnaryPredicate(paramName string, ue *semantic.UnaryExpression) bool { - switch ue.Operator { - case ast.NotOperator: - // TODO(jsternberg): We should be able to rewrite `not r.host == "tag"` to `r.host != "tag"` - // but that is beyond what we do right now. - arg, ok := ue.Argument.(*semantic.UnaryExpression) - if !ok { - return false - } - return isPushableUnaryPredicate(paramName, arg) - case ast.ExistsOperator: - return isTag(paramName, ue.Argument) - default: - return false - } -} - -func isPushableBinaryPredicate(paramName string, be *semantic.BinaryExpression) bool { - // Manual testing seems to indicate that (at least right now) we can - // only handle predicates of the form . - // and the literal must be on the RHS. - - if !isLiteral(be.Right) { - return false - } - - // If the predicate is a string literal, we are comparing for equality, - // it is a tag, and it is empty, then it is not pushable. - // - // This is because the storage engine does not consider there a difference - // between a tag with an empty value and a non-existant tag. We have made - // the decision that a missing tag is null and not an empty string, so empty - // string isn't something that can be returned from the storage layer. - if lit, ok := be.Right.(*semantic.StringLiteral); ok { - if be.Operator == ast.EqualOperator && isTag(paramName, be.Left) && lit.Value == "" { - // The string literal is pushable if the operator is != because - // != "" will evaluate to true with everything that has a tag value - // and false when the tag value is null. - return false - } - } - - if isField(paramName, be.Left) && isPushableFieldOperator(be.Operator) { - return true - } - - if isTag(paramName, be.Left) && isPushableTagOperator(be.Operator) { - return true - } - - return false -} - -// rewritePushableExpr will rewrite the expression for the storage layer. -func rewritePushableExpr(e semantic.Expression) (semantic.Expression, bool) { - switch e := e.(type) { - case *semantic.UnaryExpression: - var changed bool - if arg, ok := rewritePushableExpr(e.Argument); ok { - e = e.Copy().(*semantic.UnaryExpression) - e.Argument = arg - changed = true - } - - switch e.Operator { - case ast.NotOperator: - if be, ok := e.Argument.(*semantic.BinaryExpression); ok { - switch be.Operator { - case ast.EqualOperator: - be = be.Copy().(*semantic.BinaryExpression) - be.Operator = ast.NotEqualOperator - return be, true - case ast.NotEqualOperator: - be = be.Copy().(*semantic.BinaryExpression) - be.Operator = ast.EqualOperator - return be, true - } - } - case ast.ExistsOperator: - return &semantic.BinaryExpression{ - Operator: ast.NotEqualOperator, - Left: e.Argument, - Right: &semantic.StringLiteral{ - Value: "", - }, - }, true - } - return e, changed - - case *semantic.BinaryExpression: - left, lok := rewritePushableExpr(e.Left) - right, rok := rewritePushableExpr(e.Right) - if lok || rok { - e = e.Copy().(*semantic.BinaryExpression) - e.Left, e.Right = left, right - return e, true - } - - case *semantic.LogicalExpression: - left, lok := rewritePushableExpr(e.Left) - right, rok := rewritePushableExpr(e.Right) - if lok || rok { - e = e.Copy().(*semantic.LogicalExpression) - e.Left, e.Right = left, right - return e, true - } - } - return e, false -} - -func isLiteral(e semantic.Expression) bool { - switch e.(type) { - case *semantic.StringLiteral: - return true - case *semantic.IntegerLiteral: - return true - case *semantic.BooleanLiteral: - return true - case *semantic.FloatLiteral: - return true - case *semantic.RegexpLiteral: - return true - } - - return false -} - -const fieldValueProperty = "_value" - -func isTag(paramName string, e semantic.Expression) bool { - memberExpr := validateMemberExpr(paramName, e) - return memberExpr != nil && memberExpr.Property.Name() != fieldValueProperty -} - -func isField(paramName string, e semantic.Expression) bool { - memberExpr := validateMemberExpr(paramName, e) - return memberExpr != nil && memberExpr.Property.Name() == fieldValueProperty -} - -func validateMemberExpr(paramName string, e semantic.Expression) *semantic.MemberExpression { - memberExpr, ok := e.(*semantic.MemberExpression) - if !ok { - return nil - } - - idExpr, ok := memberExpr.Object.(*semantic.IdentifierExpression) - if !ok { - return nil - } - - if idExpr.Name.Name() != paramName { - return nil - } - - return memberExpr -} - -func isPushableTagOperator(kind ast.OperatorKind) bool { - pushableOperators := []ast.OperatorKind{ - ast.EqualOperator, - ast.NotEqualOperator, - ast.RegexpMatchOperator, - ast.NotRegexpMatchOperator, - } - - for _, op := range pushableOperators { - if op == kind { - return true - } - } - - return false -} - -func isPushableFieldOperator(kind ast.OperatorKind) bool { - if isPushableTagOperator(kind) { - return true - } - - // Fields can be filtered by anything that tags can be filtered by, - // plus range operators. - - moreOperators := []ast.OperatorKind{ - ast.LessThanEqualOperator, - ast.LessThanOperator, - ast.GreaterThanEqualOperator, - ast.GreaterThanOperator, - } - - for _, op := range moreOperators { - if op == kind { - return true - } - } - - return false -} - -// SortedPivotRule is a rule that optimizes a pivot when it is directly -// after an influxdb from. -type SortedPivotRule struct{} - -func (SortedPivotRule) Name() string { - return "SortedPivotRule" -} - -func (SortedPivotRule) Pattern() plan.Pattern { - return plan.MultiSuccessor(universe.PivotKind, plan.SingleSuccessor(ReadRangePhysKind)) -} - -func (SortedPivotRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { - pivotSpec := pn.ProcedureSpec().Copy().(*universe.PivotProcedureSpec) - pivotSpec.IsSortedByFunc = func(cols []string, desc bool) bool { - if desc { - return false - } - - // The only thing that disqualifies this from being - // sorted is if the _value column is mentioned or if - // the tag does not exist. - for _, label := range cols { - if label == execute.DefaultTimeColLabel { - continue - } else if label == execute.DefaultValueColLabel { - return false - } - - // Everything else is a tag. Even if the tag does not exist, - // this is still considered sorted since sorting doesn't depend - // on a tag existing. - } - - // We are already sorted. - return true - } - pivotSpec.IsKeyColumnFunc = func(label string) bool { - if label == execute.DefaultTimeColLabel || label == execute.DefaultValueColLabel { - return false - } - // Everything else would be a tag if it existed. - // The transformation itself will catch if the column does not exist. - return true - } - - if err := pn.ReplaceSpec(pivotSpec); err != nil { - return nil, false, err - } - return pn, false, nil -} - -// Push Down of window aggregates. -// ReadRangePhys |> window |> { min, max, mean, count, sum } -type PushDownWindowAggregateRule struct{} - -func (PushDownWindowAggregateRule) Name() string { - return "PushDownWindowAggregateRule" -} - -var windowPushableAggs = []plan.ProcedureKind{ - universe.CountKind, - universe.SumKind, - universe.MinKind, - universe.MaxKind, - universe.MeanKind, - universe.FirstKind, - universe.LastKind, -} - -func (rule PushDownWindowAggregateRule) Pattern() plan.Pattern { - return plan.MultiSuccessorOneOf(windowPushableAggs, - plan.SingleSuccessor(universe.WindowKind, plan.SingleSuccessor(ReadRangePhysKind))) -} - -func canPushWindowedAggregate(ctx context.Context, fnNode plan.Node) bool { - // Check the aggregate function spec. Require the operation on _value - // and check the feature flag associated with the aggregate function. - switch fnNode.Kind() { - case universe.MinKind: - minSpec := fnNode.ProcedureSpec().(*universe.MinProcedureSpec) - return minSpec.Column == execute.DefaultValueColLabel - case universe.MaxKind: - maxSpec := fnNode.ProcedureSpec().(*universe.MaxProcedureSpec) - return maxSpec.Column == execute.DefaultValueColLabel - case universe.MeanKind: - meanSpec := fnNode.ProcedureSpec().(*universe.MeanProcedureSpec) - return len(meanSpec.Columns) == 1 && - meanSpec.Columns[0] == execute.DefaultValueColLabel - case universe.CountKind: - countSpec := fnNode.ProcedureSpec().(*universe.CountProcedureSpec) - return len(countSpec.Columns) == 1 && - countSpec.Columns[0] == execute.DefaultValueColLabel - case universe.SumKind: - sumSpec := fnNode.ProcedureSpec().(*universe.SumProcedureSpec) - return len(sumSpec.Columns) == 1 && - sumSpec.Columns[0] == execute.DefaultValueColLabel - case universe.FirstKind: - firstSpec := fnNode.ProcedureSpec().(*universe.FirstProcedureSpec) - return firstSpec.Column == execute.DefaultValueColLabel - case universe.LastKind: - lastSpec := fnNode.ProcedureSpec().(*universe.LastProcedureSpec) - return lastSpec.Column == execute.DefaultValueColLabel - } - return true -} - -func isPushableWindow(windowSpec *universe.WindowProcedureSpec) bool { - // every and period must be equal - // every.isNegative must be false - // offset.isNegative must be false - // location must be UTC with no offset - // timeColumn: must be "_time" - // startColumn: must be "_start" - // stopColumn: must be "_stop" - // createEmpty: must be false - window := windowSpec.Window - return window.Every.Equal(window.Period) && - !window.Every.IsNegative() && - !window.Offset.IsNegative() && - window.Location.IsUTC() && - windowSpec.TimeColumn == "_time" && - windowSpec.StartColumn == "_start" && - windowSpec.StopColumn == "_stop" -} - -func (PushDownWindowAggregateRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { - fnNode := pn - if !canPushWindowedAggregate(ctx, fnNode) { - return pn, false, nil - } - - windowNode := fnNode.Predecessors()[0] - windowSpec := windowNode.ProcedureSpec().(*universe.WindowProcedureSpec) - fromNode := windowNode.Predecessors()[0] - fromSpec := fromNode.ProcedureSpec().(*ReadRangePhysSpec) - - if !isPushableWindow(windowSpec) { - return pn, false, nil - } - - // Rule passes. - return plan.CreateUniquePhysicalNode(ctx, "ReadWindowAggregate", &ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *fromSpec.Copy().(*ReadRangePhysSpec), - Aggregates: []plan.ProcedureKind{fnNode.Kind()}, - WindowEvery: windowSpec.Window.Every, - Offset: windowSpec.Window.Offset, - CreateEmpty: windowSpec.CreateEmpty, - }), true, nil -} - -// PushDownWindowForceAggregateRule will match the given pattern. -// ReadWindowAggregatePhys |> table.fill() -// -// If this pattern matches, then the ForceAggregate switch will be enabled -// on the ReadWindowAggregate which will force selectors to return a null value. -// -// This pattern is idempotent and may be applied multiple times with the same effect. -type PushDownWindowForceAggregateRule struct{} - -func (PushDownWindowForceAggregateRule) Name() string { - return "PushDownWindowForceAggregateRule" -} - -func (PushDownWindowForceAggregateRule) Pattern() plan.Pattern { - return plan.MultiSuccessor(table.FillKind, - plan.SingleSuccessor(ReadWindowAggregatePhysKind)) -} - -func (PushDownWindowForceAggregateRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { - windowAggregateNode := pn.Predecessors()[0] - windowAggregateSpec := windowAggregateNode.ProcedureSpec().(*ReadWindowAggregatePhysSpec) - if windowAggregateSpec.WindowEvery == flux.ConvertDuration(math.MaxInt64) { - // Do not apply this transformation to the bare aggregate case. - // There's virtually no benefit to pushing that down since there are no - // subsequent transformations to push down and I'm not actually sure the - // code works properly in that case. - return pn, false, nil - } - windowAggregateSpec.ForceAggregate = true - - newNode, err := plan.MergeToPhysicalNode(pn, windowAggregateNode, windowAggregateSpec) - if err != nil { - return pn, false, err - } - return newNode, true, nil -} - -// PushDownWindowAggregateByTimeRule will match the given pattern. -// ReadWindowAggregatePhys |> duplicate |> window(every: inf) -// -// If this pattern matches and the arguments to duplicate are -// matching time column names, it will set the time column on -// the spec. -type PushDownWindowAggregateByTimeRule struct{} - -func (PushDownWindowAggregateByTimeRule) Name() string { - return "PushDownWindowAggregateByTimeRule" -} - -func (PushDownWindowAggregateByTimeRule) Pattern() plan.Pattern { - return plan.MultiSuccessor(universe.WindowKind, - plan.SingleSuccessor(universe.SchemaMutationKind, - plan.SingleSuccessor(ReadWindowAggregatePhysKind))) -} - -func (PushDownWindowAggregateByTimeRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { - windowNode := pn - windowSpec := windowNode.ProcedureSpec().(*universe.WindowProcedureSpec) - - duplicateNode := windowNode.Predecessors()[0] - duplicateSpec, duplicateSpecOk := func() (*universe.DuplicateOpSpec, bool) { - s := asSchemaMutationProcedureSpec(duplicateNode.ProcedureSpec()) - if len(s.Mutations) != 1 { - return nil, false - } - mutator, ok := s.Mutations[0].(*universe.DuplicateOpSpec) - return mutator, ok - }() - if !duplicateSpecOk { - return pn, false, nil - } - - // The As field must be the default time value - // and the column must be start or stop. - if duplicateSpec.As != execute.DefaultTimeColLabel || - (duplicateSpec.Column != execute.DefaultStartColLabel && duplicateSpec.Column != execute.DefaultStopColLabel) { - return pn, false, nil - } - - // window(every: inf) - if windowSpec.Window.Every != values.ConvertDurationNsecs(math.MaxInt64) || - windowSpec.Window.Every != windowSpec.Window.Period || - windowSpec.TimeColumn != execute.DefaultTimeColLabel || - windowSpec.StartColumn != execute.DefaultStartColLabel || - windowSpec.StopColumn != execute.DefaultStopColLabel || - windowSpec.CreateEmpty { - return pn, false, nil - } - - // Cannot rewrite if already was rewritten. - windowAggregateNode := duplicateNode.Predecessors()[0] - windowAggregateSpec := windowAggregateNode.ProcedureSpec().(*ReadWindowAggregatePhysSpec) - if windowAggregateSpec.TimeColumn != "" { - return pn, false, nil - } - - // Rule passes. - windowAggregateSpec.TimeColumn = duplicateSpec.Column - return plan.CreateUniquePhysicalNode(ctx, "ReadWindowAggregateByTime", windowAggregateSpec), true, nil -} - -type PushDownAggregateWindowRule struct{} - -func (p PushDownAggregateWindowRule) Name() string { - return "PushDownAggregateWindowRule" -} - -func (p PushDownAggregateWindowRule) Pattern() plan.Pattern { - return plan.MultiSuccessor(universe.AggregateWindowKind, - plan.SingleSuccessor(ReadRangePhysKind)) -} - -func (p PushDownAggregateWindowRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { - aggregateWindowSpec := pn.ProcedureSpec().(*universe.AggregateWindowProcedureSpec) - fromNode := pn.Predecessors()[0] - fromSpec := fromNode.ProcedureSpec().(*ReadRangePhysSpec) - - if !isPushableWindow(aggregateWindowSpec.WindowSpec) { - return pn, false, nil - } - - if aggregateWindowSpec.ValueCol != execute.DefaultValueColLabel { - return pn, false, nil - } - - switch aggregateWindowSpec.AggregateKind { - case universe.MinKind, universe.MaxKind, - universe.MeanKind, universe.CountKind, universe.SumKind, - universe.FirstKind, universe.LastKind: - // All of these are supported. - default: - return pn, false, nil - } - - windowAggregateSpec := &ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *fromSpec.Copy().(*ReadRangePhysSpec), - Aggregates: []plan.ProcedureKind{ - aggregateWindowSpec.AggregateKind, - }, - WindowEvery: aggregateWindowSpec.WindowSpec.Window.Every, - Offset: aggregateWindowSpec.WindowSpec.Window.Offset, - CreateEmpty: aggregateWindowSpec.WindowSpec.CreateEmpty, - TimeColumn: execute.DefaultStopColLabel, - ForceAggregate: aggregateWindowSpec.ForceAggregate, - } - if aggregateWindowSpec.UseStart { - windowAggregateSpec.TimeColumn = execute.DefaultStartColLabel - } - - return plan.CreateUniquePhysicalNode(ctx, "ReadWindowAggregateByTime", windowAggregateSpec), true, nil -} - -// PushDownBareAggregateRule is a rule that allows pushing down of aggregates -// that are directly over a ReadRange source. -type PushDownBareAggregateRule struct{} - -func (p PushDownBareAggregateRule) Name() string { - return "PushDownBareAggregateRule" -} - -func (p PushDownBareAggregateRule) Pattern() plan.Pattern { - return plan.MultiSuccessorOneOf(windowPushableAggs, - plan.SingleSuccessor(ReadRangePhysKind)) -} - -func (p PushDownBareAggregateRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { - fnNode := pn - if !canPushWindowedAggregate(ctx, fnNode) { - return pn, false, nil - } - - fromNode := fnNode.Predecessors()[0] - fromSpec := fromNode.ProcedureSpec().(*ReadRangePhysSpec) - - return plan.CreateUniquePhysicalNode(ctx, "ReadWindowAggregate", &ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *fromSpec.Copy().(*ReadRangePhysSpec), - Aggregates: []plan.ProcedureKind{fnNode.Kind()}, - WindowEvery: flux.ConvertDuration(math.MaxInt64 * time.Nanosecond), - }), true, nil -} - -// GroupWindowAggregateTransposeRule will match the given pattern. -// ReadGroupPhys |> window |> { min, max, count, sum } -// -// This pattern will use the PushDownWindowAggregateRule to determine -// if the ReadWindowAggregatePhys operation is available before it will -// rewrite the above. This rewrites the above to: -// -// ReadWindowAggregatePhys |> group(columns: ["_start", "_stop", ...]) |> { min, max, sum } -// -// The count aggregate uses sum to merge the results. -type GroupWindowAggregateTransposeRule struct{} - -func (p GroupWindowAggregateTransposeRule) Name() string { - return "GroupWindowAggregateTransposeRule" -} - -var windowMergeablePushAggs = []plan.ProcedureKind{ - universe.MinKind, - universe.MaxKind, - universe.CountKind, - universe.SumKind, -} - -func (p GroupWindowAggregateTransposeRule) Pattern() plan.Pattern { - return plan.MultiSuccessorOneOf(windowMergeablePushAggs, - plan.SingleSuccessor(universe.WindowKind, plan.SingleSuccessor(ReadGroupPhysKind))) -} - -func (p GroupWindowAggregateTransposeRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { - if !feature.GroupWindowAggregateTranspose().Enabled(ctx) { - return pn, false, nil - } - - fnNode := pn - if !canPushWindowedAggregate(ctx, fnNode) { - return pn, false, nil - } - - windowNode := fnNode.Predecessors()[0] - windowSpec := windowNode.ProcedureSpec().(*universe.WindowProcedureSpec) - - if !isPushableWindow(windowSpec) { - return pn, false, nil - } - - fromNode := windowNode.Predecessors()[0] - fromSpec := fromNode.ProcedureSpec().(*ReadGroupPhysSpec) - - // This only works with GroupModeBy. It is the case - // that ReadGroup, which we depend on as a predecessor, - // only works with GroupModeBy so it should be impossible - // to fail this condition, but we add this here for extra - // protection. - if fromSpec.GroupMode != flux.GroupModeBy { - return pn, false, nil - } - - // Perform the rewrite by replacing each of the nodes. - newFromNode := plan.CreateUniquePhysicalNode(ctx, "ReadWindowAggregate", &ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *fromSpec.ReadRangePhysSpec.Copy().(*ReadRangePhysSpec), - Aggregates: []plan.ProcedureKind{fnNode.Kind()}, - WindowEvery: windowSpec.Window.Every, - Offset: windowSpec.Window.Offset, - CreateEmpty: windowSpec.CreateEmpty, - }) - - // Replace the window node with a group node first. - groupKeys := make([]string, len(fromSpec.GroupKeys), len(fromSpec.GroupKeys)+2) - copy(groupKeys, fromSpec.GroupKeys) - if !execute.ContainsStr(groupKeys, execute.DefaultStartColLabel) { - groupKeys = append(groupKeys, execute.DefaultStartColLabel) - } - if !execute.ContainsStr(groupKeys, execute.DefaultStopColLabel) { - groupKeys = append(groupKeys, execute.DefaultStopColLabel) - } - newGroupNode := plan.CreateUniquePhysicalNode(ctx, "group", &universe.GroupProcedureSpec{ - GroupMode: flux.GroupModeBy, - GroupKeys: groupKeys, - }) - newFromNode.AddSuccessors(newGroupNode) - newGroupNode.AddPredecessors(newFromNode) - - // Attach the existing function node to the new group node. - fnNode.ClearPredecessors() - newGroupNode.AddSuccessors(fnNode) - fnNode.AddPredecessors(newGroupNode) - - // Replace the spec for the function if needed. - switch spec := fnNode.ProcedureSpec().(type) { - case *universe.CountProcedureSpec: - newFnNode := plan.CreateUniquePhysicalNode(ctx, "sum", &universe.SumProcedureSpec{ - SimpleAggregateConfig: spec.SimpleAggregateConfig, - }) - plan.ReplaceNode(fnNode, newFnNode) - fnNode = newFnNode - default: - // No replacement required. The procedure is idempotent so - // we can use it over and over again and get the same result. - } - return fnNode, true, nil -} - -// Push Down of group aggregates. -// ReadGroupPhys |> { count } -type PushDownGroupAggregateRule struct{} - -func (PushDownGroupAggregateRule) Name() string { - return "PushDownGroupAggregateRule" -} - -func (rule PushDownGroupAggregateRule) Pattern() plan.Pattern { - return plan.MultiSuccessorOneOf( - []plan.ProcedureKind{ - universe.CountKind, - universe.SumKind, - universe.FirstKind, - universe.LastKind, - universe.MinKind, - universe.MaxKind, - }, - plan.SingleSuccessor(ReadGroupPhysKind)) -} - -func (PushDownGroupAggregateRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { - group := pn.Predecessors()[0].ProcedureSpec().(*ReadGroupPhysSpec) - // Cannot push down multiple aggregates - if len(group.AggregateMethod) > 0 { - return pn, false, nil - } - - if !canPushGroupedAggregate(ctx, pn) { - return pn, false, nil - } - - switch pn.Kind() { - case universe.CountKind: - // ReadGroup() -> count => ReadGroup(count) - node := plan.CreateUniquePhysicalNode(ctx, "ReadGroupAggregate", &ReadGroupPhysSpec{ - ReadRangePhysSpec: group.ReadRangePhysSpec, - GroupMode: group.GroupMode, - GroupKeys: group.GroupKeys, - AggregateMethod: universe.CountKind, - }) - return node, true, nil - case universe.SumKind: - // ReadGroup() -> sum => ReadGroup(sum) - node := plan.CreateUniquePhysicalNode(ctx, "ReadGroupAggregate", &ReadGroupPhysSpec{ - ReadRangePhysSpec: group.ReadRangePhysSpec, - GroupMode: group.GroupMode, - GroupKeys: group.GroupKeys, - AggregateMethod: universe.SumKind, - }) - return node, true, nil - case universe.FirstKind: - // ReadGroup() -> first => ReadGroup(first) - node := plan.CreateUniquePhysicalNode(ctx, "ReadGroupAggregate", &ReadGroupPhysSpec{ - ReadRangePhysSpec: group.ReadRangePhysSpec, - GroupMode: group.GroupMode, - GroupKeys: group.GroupKeys, - AggregateMethod: universe.FirstKind, - }) - return node, true, nil - case universe.LastKind: - // ReadGroup() -> last => ReadGroup(last) - node := plan.CreateUniquePhysicalNode(ctx, "ReadGroupAggregate", &ReadGroupPhysSpec{ - ReadRangePhysSpec: group.ReadRangePhysSpec, - GroupMode: group.GroupMode, - GroupKeys: group.GroupKeys, - AggregateMethod: universe.LastKind, - }) - return node, true, nil - case universe.MinKind: - // ReadGroup() -> min => ReadGroup(min) - node := plan.CreateUniquePhysicalNode(ctx, "ReadGroupAggregate", &ReadGroupPhysSpec{ - ReadRangePhysSpec: group.ReadRangePhysSpec, - GroupMode: group.GroupMode, - GroupKeys: group.GroupKeys, - AggregateMethod: universe.MinKind, - }) - return node, true, nil - case universe.MaxKind: - // ReadGroup() -> max => ReadGroup(max) - node := plan.CreateUniquePhysicalNode(ctx, "ReadGroupAggregate", &ReadGroupPhysSpec{ - ReadRangePhysSpec: group.ReadRangePhysSpec, - GroupMode: group.GroupMode, - GroupKeys: group.GroupKeys, - AggregateMethod: universe.MaxKind, - }) - return node, true, nil - } - return pn, false, nil -} - -func canPushGroupedAggregate(ctx context.Context, pn plan.Node) bool { - switch pn.Kind() { - case universe.CountKind: - agg := pn.ProcedureSpec().(*universe.CountProcedureSpec) - return len(agg.Columns) == 1 && agg.Columns[0] == execute.DefaultValueColLabel - case universe.SumKind: - agg := pn.ProcedureSpec().(*universe.SumProcedureSpec) - return len(agg.Columns) == 1 && agg.Columns[0] == execute.DefaultValueColLabel - case universe.FirstKind: - agg := pn.ProcedureSpec().(*universe.FirstProcedureSpec) - return agg.Column == execute.DefaultValueColLabel - case universe.LastKind: - agg := pn.ProcedureSpec().(*universe.LastProcedureSpec) - return agg.Column == execute.DefaultValueColLabel - case universe.MaxKind: - agg := pn.ProcedureSpec().(*universe.MaxProcedureSpec) - return agg.Column == execute.DefaultValueColLabel - case universe.MinKind: - agg := pn.ProcedureSpec().(*universe.MinProcedureSpec) - return agg.Column == execute.DefaultValueColLabel - } - return false -} - -func asSchemaMutationProcedureSpec(spec plan.ProcedureSpec) *universe.SchemaMutationProcedureSpec { - if s, ok := spec.(*universe.DualImplProcedureSpec); ok { - spec = s.ProcedureSpec - } - return spec.(*universe.SchemaMutationProcedureSpec) -} - -type MergeFiltersRule struct{} - -func (MergeFiltersRule) Name() string { - return universe.MergeFiltersRule{}.Name() -} - -func (MergeFiltersRule) Pattern() plan.Pattern { - return universe.MergeFiltersRule{}.Pattern() -} - -func (r MergeFiltersRule) Rewrite(ctx context.Context, pn plan.Node) (plan.Node, bool, error) { - return universe.MergeFiltersRule{}.Rewrite(ctx, pn) -} diff --git a/query/stdlib/influxdata/influxdb/rules_test.go b/query/stdlib/influxdata/influxdb/rules_test.go deleted file mode 100644 index a069b366896..00000000000 --- a/query/stdlib/influxdata/influxdb/rules_test.go +++ /dev/null @@ -1,3092 +0,0 @@ -package influxdb_test - -import ( - "context" - "math" - "testing" - "time" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/execute/executetest" - "github.com/influxdata/flux/interpreter" - "github.com/influxdata/flux/plan" - "github.com/influxdata/flux/plan/plantest" - "github.com/influxdata/flux/semantic" - "github.com/influxdata/flux/stdlib/experimental/table" - fluxinfluxdb "github.com/influxdata/flux/stdlib/influxdata/influxdb" - "github.com/influxdata/flux/stdlib/universe" - "github.com/influxdata/flux/values" - _ "github.com/influxdata/influxdb/v2/fluxinit/static" - "github.com/influxdata/influxdb/v2/kit/feature" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "google.golang.org/protobuf/testing/protocmp" -) - -func fluxTime(t int64) flux.Time { - return flux.Time{ - Absolute: time.Unix(0, t).UTC(), - } -} - -func TestPushDownRangeRule(t *testing.T) { - fromSpec := influxdb.FromStorageProcedureSpec{ - Bucket: influxdb.NameOrID{Name: "my-bucket"}, - } - rangeSpec := universe.RangeProcedureSpec{ - Bounds: flux.Bounds{ - Start: fluxTime(5), - Stop: fluxTime(10), - }, - } - createRangeSpec := func() *influxdb.ReadRangePhysSpec { - return &influxdb.ReadRangePhysSpec{ - Bucket: "my-bucket", - Bounds: flux.Bounds{ - Start: fluxTime(5), - Stop: fluxTime(10), - }, - } - } - - tests := []plantest.RuleTestCase{ - { - Name: "simple", - // from -> range => ReadRange - Rules: []plan.Rule{ - influxdb.FromStorageRule{}, - influxdb.PushDownRangeRule{}, - }, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("from", &fromSpec), - plan.CreateLogicalNode("range", &rangeSpec), - }, - Edges: [][2]int{{0, 1}}, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", createRangeSpec()), - }, - }, - }, - { - Name: "with successor", - // from -> range -> count => ReadRange -> count - Rules: []plan.Rule{ - influxdb.FromStorageRule{}, - influxdb.PushDownRangeRule{}, - }, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("from", &fromSpec), - plan.CreateLogicalNode("range", &rangeSpec), - plan.CreatePhysicalNode("count", &universe.CountProcedureSpec{}), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", createRangeSpec()), - plan.CreatePhysicalNode("count", &universe.CountProcedureSpec{}), - }, - Edges: [][2]int{{0, 1}}, - }, - }, - { - Name: "with multiple successors", - // count mean - // \ / count mean - // range => \ / - // | ReadRange - // from - Rules: []plan.Rule{ - influxdb.FromStorageRule{}, - influxdb.PushDownRangeRule{}, - }, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("from", &fromSpec), - plan.CreateLogicalNode("range", &rangeSpec), - plan.CreatePhysicalNode("count", &universe.CountProcedureSpec{}), - plan.CreatePhysicalNode("mean", &universe.MeanProcedureSpec{}), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {1, 3}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", createRangeSpec()), - plan.CreatePhysicalNode("count", &universe.CountProcedureSpec{}), - plan.CreatePhysicalNode("mean", &universe.MeanProcedureSpec{}), - }, - Edges: [][2]int{ - {0, 1}, - {0, 2}, - }, - }, - }, - } - - for _, tc := range tests { - tc := tc - t.Run(tc.Name, func(t *testing.T) { - t.Parallel() - plantest.PhysicalRuleTestHelper(t, &tc, protocmp.Transform()) - }) - } -} - -func TestPushDownFilterRule(t *testing.T) { - var ( - bounds = flux.Bounds{ - Start: fluxTime(5), - Stop: fluxTime(10), - } - - pushableFn1 = executetest.FunctionExpression(t, `(r) => r._measurement == "cpu"`) - pushableFn2 = executetest.FunctionExpression(t, `(r) => r._field == "cpu"`) - pushableFn1and2 = executetest.FunctionExpression(t, `(r) => r._measurement == "cpu" and r._field == "cpu"`) - unpushableFn = executetest.FunctionExpression(t, `(r) => 0.5 < r._value`) - pushableAndUnpushableFn = executetest.FunctionExpression(t, `(r) => r._measurement == "cpu" and 0.5 < r._value`) - ) - - makeResolvedFilterFn := func(expr *semantic.FunctionExpression) interpreter.ResolvedFunction { - return interpreter.ResolvedFunction{ - Fn: expr, - } - } - - toStoragePredicate := func(fn *semantic.FunctionExpression) *datatypes.Predicate { - body, ok := fn.GetFunctionBodyExpression() - if !ok { - panic("more than one statement in function body") - } - - predicate, err := influxdb.ToStoragePredicate(body, "r") - if err != nil { - panic(err) - } - return predicate - } - - tests := []plantest.RuleTestCase{ - { - Name: "simple", - // ReadRange -> filter => ReadRange - Rules: []plan.Rule{influxdb.PushDownFilterRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - }), - plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(pushableFn1), - }), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("merged_ReadRange_filter", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - Filter: toStoragePredicate(pushableFn1), - }), - }, - }, - }, - { - Name: "two filters", - // ReadRange -> filter -> filter => ReadRange (rule applied twice) - Rules: []plan.Rule{influxdb.PushDownFilterRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - }), - plan.CreatePhysicalNode("filter1", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(pushableFn1), - }), - plan.CreatePhysicalNode("filter2", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(pushableFn2), - }), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("merged_ReadRange_filter1_filter2", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - Filter: toStoragePredicate(pushableFn1and2), - }), - }, - }, - }, - { - Name: "partially pushable filter", - // ReadRange -> partially-pushable-filter => ReadRange -> unpushable-filter - Rules: []plan.Rule{influxdb.PushDownFilterRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - }), - plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(pushableAndUnpushableFn), - }), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - Filter: toStoragePredicate(pushableFn1), - }), - plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(unpushableFn), - }), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - }, - { - Name: "from range filter", - // from -> range -> filter => ReadRange - Rules: []plan.Rule{ - influxdb.FromStorageRule{}, - influxdb.PushDownRangeRule{}, - influxdb.PushDownFilterRule{}, - }, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("from", &influxdb.FromStorageProcedureSpec{}), - plan.CreatePhysicalNode("range", &universe.RangeProcedureSpec{ - Bounds: bounds, - }), - plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(pushableFn1)}, - ), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("merged_ReadRange_filter", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - Filter: toStoragePredicate(pushableFn1), - }), - }, - }, - }, - { - Name: "unpushable filter", - // from -> filter => from -> filter (no change) - Rules: []plan.Rule{influxdb.PushDownFilterRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - }), - plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(unpushableFn), - }), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - NoChange: true, - }, - { - Name: `exists r.host`, - Rules: []plan.Rule{influxdb.PushDownFilterRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - }), - plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(executetest.FunctionExpression(t, `(r) => exists r.host`)), - }), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("merged_ReadRange_filter", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - Filter: toStoragePredicate(executetest.FunctionExpression(t, `(r) => r.host != ""`)), - }), - }, - }, - }, - { - Name: `not exists r.host`, - Rules: []plan.Rule{influxdb.PushDownFilterRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - }), - plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(executetest.FunctionExpression(t, `(r) => not exists r.host`)), - }), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("merged_ReadRange_filter", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - Filter: toStoragePredicate(executetest.FunctionExpression(t, `(r) => r.host == ""`)), - }), - }, - }, - }, - { - Name: `r.host == ""`, - Rules: []plan.Rule{influxdb.PushDownFilterRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - }), - plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(executetest.FunctionExpression(t, `(r) => r.host == ""`)), - }), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - NoChange: true, - }, - { - Name: `r.host != ""`, - Rules: []plan.Rule{influxdb.PushDownFilterRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - }), - plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(executetest.FunctionExpression(t, `(r) => r.host != ""`)), - }), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("merged_ReadRange_filter", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - Filter: toStoragePredicate(executetest.FunctionExpression(t, `(r) => r.host != ""`)), - }), - }, - }, - }, - { - Name: `r._value == ""`, - Rules: []plan.Rule{influxdb.PushDownFilterRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - }), - plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(executetest.FunctionExpression(t, `(r) => r._value == ""`)), - }), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("merged_ReadRange_filter", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - Filter: toStoragePredicate(executetest.FunctionExpression(t, `(r) => r._value == ""`)), - }), - }, - }, - }, - { - // TODO(jsternberg): This one should be rewritten, but is not currently. - Name: `not r.host == "server01"`, - Rules: []plan.Rule{influxdb.PushDownFilterRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - }), - plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(executetest.FunctionExpression(t, `(r) => not r.host == "server01"`)), - }), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - NoChange: true, - }, - { - Name: `r._measurement == "cpu" and exists r.host`, - Rules: []plan.Rule{influxdb.PushDownFilterRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - }), - plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(executetest.FunctionExpression(t, `(r) => r.host == "cpu" and exists r.host`)), - }), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("merged_ReadRange_filter", &influxdb.ReadRangePhysSpec{ - Bounds: bounds, - Filter: toStoragePredicate(executetest.FunctionExpression(t, `(r) => r.host == "cpu" and r.host != ""`)), - }), - }, - }, - }, - } - - for _, tc := range tests { - tc := tc - t.Run(tc.Name, func(t *testing.T) { - plantest.PhysicalRuleTestHelper(t, &tc, protocmp.Transform()) - }) - } -} - -func TestPushDownGroupRule(t *testing.T) { - createRangeSpec := func() *influxdb.ReadRangePhysSpec { - return &influxdb.ReadRangePhysSpec{ - Bucket: "my-bucket", - Bounds: flux.Bounds{ - Start: fluxTime(5), - Stop: fluxTime(10), - }, - } - } - - tests := []plantest.RuleTestCase{ - { - Name: "simple", - // ReadRange -> group => ReadGroup - Rules: []plan.Rule{ - influxdb.PushDownGroupRule{}, - }, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("group", &universe.GroupProcedureSpec{ - GroupMode: flux.GroupModeBy, - GroupKeys: []string{"_measurement", "tag0", "tag1"}, - }), - }, - Edges: [][2]int{{0, 1}}, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadGroup", &influxdb.ReadGroupPhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - GroupMode: flux.GroupModeBy, - GroupKeys: []string{"_measurement", "tag0", "tag1"}, - }), - }, - }, - }, - { - Name: "with successor", - // ReadRange -> group -> count => ReadGroup -> count - Rules: []plan.Rule{ - influxdb.PushDownGroupRule{}, - }, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("group", &universe.GroupProcedureSpec{ - GroupMode: flux.GroupModeBy, - GroupKeys: []string{"_measurement", "tag0", "tag1"}, - }), - plan.CreatePhysicalNode("count", &universe.CountProcedureSpec{}), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadGroup", &influxdb.ReadGroupPhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - GroupMode: flux.GroupModeBy, - GroupKeys: []string{"_measurement", "tag0", "tag1"}, - }), - plan.CreatePhysicalNode("count", &universe.CountProcedureSpec{}), - }, - Edges: [][2]int{{0, 1}}, - }, - }, - { - Name: "with multiple successors", - // - // group count group count - // \ / => \ / - // ReadRange ReadRange - // - Rules: []plan.Rule{ - influxdb.PushDownGroupRule{}, - }, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("group", &universe.GroupProcedureSpec{ - GroupMode: flux.GroupModeBy, - GroupKeys: []string{"_measurement", "tag0", "tag1"}, - }), - plan.CreatePhysicalNode("count", &universe.CountProcedureSpec{}), - }, - Edges: [][2]int{ - {0, 1}, - {0, 2}, - }, - }, - NoChange: true, - }, - { - Name: "un-group", - // ReadRange -> group() => ReadGroup - Rules: []plan.Rule{ - influxdb.PushDownGroupRule{}, - }, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("group", &universe.GroupProcedureSpec{ - GroupMode: flux.GroupModeBy, - GroupKeys: []string{}, - }), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadGroup", &influxdb.ReadGroupPhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - GroupMode: flux.GroupModeBy, - GroupKeys: []string{}, - }), - }, - }, - }, - { - Name: "group except", - // ReadRange -> group(mode: "except") => ReadRange -> group(mode: "except") - Rules: []plan.Rule{ - influxdb.PushDownGroupRule{}, - }, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("group", &universe.GroupProcedureSpec{ - GroupMode: flux.GroupModeExcept, - GroupKeys: []string{"_measurement", "tag0", "tag1"}, - }), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - NoChange: true, - }, - { - Name: "group none", - Rules: []plan.Rule{ - influxdb.PushDownGroupRule{}, - }, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("group", &universe.GroupProcedureSpec{ - GroupMode: flux.GroupModeNone, - GroupKeys: []string{}, - }), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - NoChange: true, - }, - { - Name: "cannot push down", - // ReadRange -> count -> group => ReadRange -> count -> group - Rules: []plan.Rule{ - influxdb.PushDownGroupRule{}, - }, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreatePhysicalNode("count", &universe.CountProcedureSpec{}), - plan.CreateLogicalNode("group", &universe.GroupProcedureSpec{ - GroupMode: flux.GroupModeBy, - GroupKeys: []string{"_measurement", "tag0", "tag1"}, - }), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - }, - }, - NoChange: true, - }, - } - - for _, tc := range tests { - tc := tc - t.Run(tc.Name, func(t *testing.T) { - t.Parallel() - plantest.PhysicalRuleTestHelper(t, &tc, protocmp.Transform()) - }) - } -} - -func TestReadTagKeysRule(t *testing.T) { - fromSpec := influxdb.FromStorageProcedureSpec{ - Bucket: influxdb.NameOrID{Name: "my-bucket"}, - } - rangeSpec := universe.RangeProcedureSpec{ - Bounds: flux.Bounds{ - Start: fluxTime(5), - Stop: fluxTime(10), - }, - } - filterSpec := universe.FilterProcedureSpec{ - Fn: interpreter.ResolvedFunction{ - Scope: nil, - Fn: &semantic.FunctionExpression{ - Parameters: &semantic.FunctionParameters{ - List: []*semantic.FunctionParameter{{ - Key: &semantic.Identifier{ - Name: semantic.NewSymbol("r"), - }, - }}, - }, - Block: &semantic.Block{ - Body: []semantic.Statement{ - &semantic.ReturnStatement{ - Argument: &semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{ - Name: semantic.NewSymbol("r"), - }, - Property: semantic.NewSymbol("_measurement"), - }, - Right: &semantic.StringLiteral{ - Value: "cpu", - }, - }, - }, - }, - }, - }, - }, - } - keysSpec := universe.KeysProcedureSpec{ - Column: execute.DefaultValueColLabel, - } - keepSpec := universe.SchemaMutationProcedureSpec{ - Mutations: []universe.SchemaMutation{ - &universe.KeepOpSpec{ - Columns: []string{ - execute.DefaultValueColLabel, - }, - }, - }, - } - distinctSpec := universe.DistinctProcedureSpec{ - Column: execute.DefaultValueColLabel, - } - readTagKeysSpec := func(filter bool) plan.PhysicalProcedureSpec { - s := influxdb.ReadTagKeysPhysSpec{ - ReadRangePhysSpec: influxdb.ReadRangePhysSpec{ - Bucket: "my-bucket", - Bounds: flux.Bounds{ - Start: fluxTime(5), - Stop: fluxTime(10), - }, - }, - } - if filter { - bodyExpr, _ := filterSpec.Fn.Fn.GetFunctionBodyExpression() - s.Filter, _ = influxdb.ToStoragePredicate(bodyExpr, "r") - } - return &s - } - - tests := []plantest.RuleTestCase{ - { - Name: "simple", - // from -> range -> keys -> keep -> distinct => ReadTagKeys - Rules: []plan.Rule{ - influxdb.PushDownRangeRule{}, - influxdb.PushDownReadTagKeysRule{}, - }, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("from", &fromSpec), - plan.CreateLogicalNode("range", &rangeSpec), - plan.CreateLogicalNode("keys", &keysSpec), - plan.CreateLogicalNode("keep", &keepSpec), - plan.CreateLogicalNode("distinct", &distinctSpec), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - {3, 4}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadTagKeys", readTagKeysSpec(false)), - }, - }, - }, - { - Name: "with filter", - // from -> range -> filter -> keys -> keep -> distinct => ReadTagKeys - Rules: []plan.Rule{ - influxdb.PushDownRangeRule{}, - influxdb.PushDownFilterRule{}, - influxdb.PushDownReadTagKeysRule{}, - }, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("from", &fromSpec), - plan.CreateLogicalNode("range", &rangeSpec), - plan.CreateLogicalNode("filter", &filterSpec), - plan.CreateLogicalNode("keys", &keysSpec), - plan.CreateLogicalNode("keep", &keepSpec), - plan.CreateLogicalNode("distinct", &distinctSpec), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - {3, 4}, - {4, 5}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadTagKeys", readTagKeysSpec(true)), - }, - }, - }, - { - Name: "with successor", - // from -> range -> keys -> keep -> distinct -> count => ReadTagKeys -> count - Rules: []plan.Rule{ - influxdb.PushDownRangeRule{}, - influxdb.PushDownReadTagKeysRule{}, - }, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("from", &fromSpec), - plan.CreateLogicalNode("range", &rangeSpec), - plan.CreateLogicalNode("keys", &keysSpec), - plan.CreateLogicalNode("keep", &keepSpec), - plan.CreateLogicalNode("distinct", &distinctSpec), - plan.CreatePhysicalNode("count", &universe.CountProcedureSpec{}), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - {3, 4}, - {4, 5}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadTagKeys", readTagKeysSpec(false)), - plan.CreatePhysicalNode("count", &universe.CountProcedureSpec{}), - }, - Edges: [][2]int{{0, 1}}, - }, - }, - { - Name: "with multiple successors", - // count mean - // \ / count mean - // range => \ / - // | ReadTagKeys - // from - Rules: []plan.Rule{ - influxdb.PushDownRangeRule{}, - influxdb.PushDownReadTagKeysRule{}, - }, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("from", &fromSpec), - plan.CreateLogicalNode("range", &rangeSpec), - plan.CreateLogicalNode("keys", &keysSpec), - plan.CreateLogicalNode("keep", &keepSpec), - plan.CreateLogicalNode("distinct", &distinctSpec), - plan.CreatePhysicalNode("count", &universe.CountProcedureSpec{}), - plan.CreatePhysicalNode("mean", &universe.MeanProcedureSpec{}), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - {3, 4}, - {4, 5}, - {4, 6}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadTagKeys", readTagKeysSpec(false)), - plan.CreatePhysicalNode("count", &universe.CountProcedureSpec{}), - plan.CreatePhysicalNode("mean", &universe.MeanProcedureSpec{}), - }, - Edges: [][2]int{ - {0, 1}, - {0, 2}, - }, - }, - }, - } - - for _, tc := range tests { - tc := tc - t.Run(tc.Name, func(t *testing.T) { - t.Parallel() - plantest.PhysicalRuleTestHelper(t, &tc, protocmp.Transform()) - }) - } -} - -func TestReadTagValuesRule(t *testing.T) { - fromSpec := influxdb.FromStorageProcedureSpec{ - Bucket: influxdb.NameOrID{Name: "my-bucket"}, - } - rangeSpec := universe.RangeProcedureSpec{ - Bounds: flux.Bounds{ - Start: fluxTime(5), - Stop: fluxTime(10), - }, - } - filterSpec := universe.FilterProcedureSpec{ - Fn: interpreter.ResolvedFunction{ - Scope: nil, - Fn: &semantic.FunctionExpression{ - Parameters: &semantic.FunctionParameters{ - List: []*semantic.FunctionParameter{{ - Key: &semantic.Identifier{ - Name: semantic.NewSymbol("r"), - }, - }}, - }, - Block: &semantic.Block{ - Body: []semantic.Statement{ - &semantic.ReturnStatement{ - Argument: &semantic.BinaryExpression{ - Operator: ast.EqualOperator, - Left: &semantic.MemberExpression{ - Object: &semantic.IdentifierExpression{ - Name: semantic.NewSymbol("r"), - }, - Property: semantic.NewSymbol("_measurement"), - }, - Right: &semantic.StringLiteral{ - Value: "cpu", - }, - }, - }, - }, - }, - }, - }, - } - keepSpec := universe.SchemaMutationProcedureSpec{ - Mutations: []universe.SchemaMutation{ - &universe.KeepOpSpec{ - Columns: []string{ - "host", - }, - }, - }, - } - groupSpec := universe.GroupProcedureSpec{ - GroupMode: flux.GroupModeBy, - GroupKeys: []string{}, - } - distinctSpec := universe.DistinctProcedureSpec{ - Column: "host", - } - readTagValuesSpec := func(filter bool) plan.PhysicalProcedureSpec { - s := influxdb.ReadTagValuesPhysSpec{ - ReadRangePhysSpec: influxdb.ReadRangePhysSpec{ - Bucket: "my-bucket", - Bounds: flux.Bounds{ - Start: fluxTime(5), - Stop: fluxTime(10), - }, - }, - TagKey: "host", - } - if filter { - bodyExpr, _ := filterSpec.Fn.Fn.GetFunctionBodyExpression() - s.Filter, _ = influxdb.ToStoragePredicate(bodyExpr, "r") - } - return &s - } - - tests := []plantest.RuleTestCase{ - { - Name: "simple", - // from -> range -> keep -> group -> distinct => ReadTagValues - Rules: []plan.Rule{ - influxdb.PushDownRangeRule{}, - influxdb.PushDownReadTagValuesRule{}, - }, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("from", &fromSpec), - plan.CreateLogicalNode("range", &rangeSpec), - plan.CreateLogicalNode("keep", &keepSpec), - plan.CreateLogicalNode("group", &groupSpec), - plan.CreateLogicalNode("distinct", &distinctSpec), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - {3, 4}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadTagValues", readTagValuesSpec(false)), - }, - }, - }, - { - Name: "with filter", - // from -> range -> filter -> keep -> group -> distinct => ReadTagValues - Rules: []plan.Rule{ - influxdb.PushDownRangeRule{}, - influxdb.PushDownFilterRule{}, - influxdb.PushDownReadTagValuesRule{}, - }, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("from", &fromSpec), - plan.CreateLogicalNode("range", &rangeSpec), - plan.CreateLogicalNode("filter", &filterSpec), - plan.CreateLogicalNode("keep", &keepSpec), - plan.CreateLogicalNode("group", &groupSpec), - plan.CreateLogicalNode("distinct", &distinctSpec), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - {3, 4}, - {4, 5}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadTagValues", readTagValuesSpec(true)), - }, - }, - }, - { - Name: "with successor", - // from -> range -> keep -> group -> distinct -> count => ReadTagValues -> count - Rules: []plan.Rule{ - influxdb.PushDownRangeRule{}, - influxdb.PushDownReadTagValuesRule{}, - }, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("from", &fromSpec), - plan.CreateLogicalNode("range", &rangeSpec), - plan.CreateLogicalNode("keep", &keepSpec), - plan.CreateLogicalNode("group", &groupSpec), - plan.CreateLogicalNode("distinct", &distinctSpec), - plan.CreatePhysicalNode("count", &universe.CountProcedureSpec{}), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - {3, 4}, - {4, 5}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadTagValues", readTagValuesSpec(false)), - plan.CreatePhysicalNode("count", &universe.CountProcedureSpec{}), - }, - Edges: [][2]int{{0, 1}}, - }, - }, - { - Name: "with multiple successors", - // count mean - // \ / count mean - // range => \ / - // | ReadTagValues - // from - Rules: []plan.Rule{ - influxdb.PushDownRangeRule{}, - influxdb.PushDownReadTagValuesRule{}, - }, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("from", &fromSpec), - plan.CreateLogicalNode("range", &rangeSpec), - plan.CreateLogicalNode("keep", &keepSpec), - plan.CreateLogicalNode("group", &groupSpec), - plan.CreateLogicalNode("distinct", &distinctSpec), - plan.CreatePhysicalNode("count", &universe.CountProcedureSpec{}), - plan.CreatePhysicalNode("mean", &universe.MeanProcedureSpec{}), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - {3, 4}, - {4, 5}, - {4, 6}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadTagValues", readTagValuesSpec(false)), - plan.CreatePhysicalNode("count", &universe.CountProcedureSpec{}), - plan.CreatePhysicalNode("mean", &universe.MeanProcedureSpec{}), - }, - Edges: [][2]int{ - {0, 1}, - {0, 2}, - }, - }, - }, - } - - for _, tc := range tests { - tc := tc - t.Run(tc.Name, func(t *testing.T) { - t.Parallel() - plantest.PhysicalRuleTestHelper(t, &tc, protocmp.Transform()) - }) - } -} - -func minProcedureSpec() *universe.MinProcedureSpec { - return &universe.MinProcedureSpec{ - SelectorConfig: execute.SelectorConfig{Column: execute.DefaultValueColLabel}, - } -} -func maxProcedureSpec() *universe.MaxProcedureSpec { - return &universe.MaxProcedureSpec{ - SelectorConfig: execute.SelectorConfig{Column: execute.DefaultValueColLabel}, - } -} -func countProcedureSpec() *universe.CountProcedureSpec { - return &universe.CountProcedureSpec{ - SimpleAggregateConfig: execute.SimpleAggregateConfig{Columns: []string{execute.DefaultValueColLabel}}, - } -} -func sumProcedureSpec() *universe.SumProcedureSpec { - return &universe.SumProcedureSpec{ - SimpleAggregateConfig: execute.SimpleAggregateConfig{Columns: []string{execute.DefaultValueColLabel}}, - } -} -func firstProcedureSpec() *universe.FirstProcedureSpec { - return &universe.FirstProcedureSpec{ - SelectorConfig: execute.SelectorConfig{Column: execute.DefaultValueColLabel}, - } -} -func lastProcedureSpec() *universe.LastProcedureSpec { - return &universe.LastProcedureSpec{ - SelectorConfig: execute.SelectorConfig{Column: execute.DefaultValueColLabel}, - } -} -func meanProcedureSpec() *universe.MeanProcedureSpec { - return &universe.MeanProcedureSpec{ - SimpleAggregateConfig: execute.SimpleAggregateConfig{Columns: []string{execute.DefaultValueColLabel}}, - } -} - -// -// Window Aggregate Testing -// -func TestPushDownWindowAggregateRule(t *testing.T) { - rules := []plan.Rule{ - universe.AggregateWindowRule{}, - influxdb.PushDownWindowAggregateRule{}, - influxdb.PushDownWindowAggregateByTimeRule{}, - influxdb.PushDownAggregateWindowRule{}, - } - - createRangeSpec := func() *influxdb.ReadRangePhysSpec { - return &influxdb.ReadRangePhysSpec{ - Bucket: "my-bucket", - Bounds: flux.Bounds{ - Start: fluxTime(5), - Stop: fluxTime(10), - }, - } - } - - dur1m := values.ConvertDurationNsecs(60 * time.Second) - dur2m := values.ConvertDurationNsecs(120 * time.Second) - dur0 := values.ConvertDurationNsecs(0) - durNeg, _ := values.ParseDuration("-60s") - dur1mo, _ := values.ParseDuration("1mo") - dur1y, _ := values.ParseDuration("1y") - durInf := values.ConvertDurationNsecs(math.MaxInt64) - durMixed, _ := values.ParseDuration("1mo5m") - - window := func(dur values.Duration) universe.WindowProcedureSpec { - return universe.WindowProcedureSpec{ - Window: plan.WindowSpec{ - Every: dur, - Period: dur, - Offset: dur0, - Location: plan.Location{ - Name: "UTC", - }, - }, - TimeColumn: "_time", - StartColumn: "_start", - StopColumn: "_stop", - CreateEmpty: false, - } - } - - window1m := window(dur1m) - window2m := window(dur2m) - windowNeg := window(durNeg) - window1y := window(dur1y) - window1mo := window(dur1mo) - windowInf := window(durInf) - windowInfCreateEmpty := windowInf - windowInfCreateEmpty.CreateEmpty = true - - tests := make([]plantest.RuleTestCase, 0) - - // construct a simple plan with a specific window and aggregate function - simplePlanWithWindowAgg := func(window universe.WindowProcedureSpec, agg plan.NodeID, spec plan.ProcedureSpec) *plantest.PlanSpec { - return &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("window", &window), - plan.CreateLogicalNode(agg, spec), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - }, - } - } - - // construct a simple result - simpleResult := func(proc plan.ProcedureKind, createEmpty bool, successors ...plan.Node) *plantest.PlanSpec { - spec := &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadWindowAggregate", &influxdb.ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - Aggregates: []plan.ProcedureKind{proc}, - WindowEvery: flux.ConvertDuration(60000000000 * time.Nanosecond), - CreateEmpty: createEmpty, - }), - }, - } - for i, successor := range successors { - spec.Nodes = append(spec.Nodes, successor) - spec.Edges = append(spec.Edges, [2]int{i, i + 1}) - } - return spec - } - - // ReadRange -> window -> min => ReadWindowAggregate - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "SimplePassMin", - Rules: rules, - Before: simplePlanWithWindowAgg(window1m, universe.MinKind, minProcedureSpec()), - After: simpleResult(universe.MinKind, false), - }) - - // ReadRange -> window -> max => ReadWindowAggregate - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "SimplePassMax", - Rules: rules, - Before: simplePlanWithWindowAgg(window1m, universe.MaxKind, maxProcedureSpec()), - After: simpleResult(universe.MaxKind, false), - }) - - // ReadRange -> window -> mean => ReadWindowAggregate - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "SimplePassMean", - Rules: rules, - Before: simplePlanWithWindowAgg(window1m, universe.MeanKind, meanProcedureSpec()), - After: simpleResult(universe.MeanKind, false), - }) - - // ReadRange -> window -> count => ReadWindowAggregate - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "SimplePassCount", - Rules: rules, - Before: simplePlanWithWindowAgg(window1m, universe.CountKind, countProcedureSpec()), - After: simpleResult(universe.CountKind, false), - }) - - // ReadRange -> window -> sum => ReadWindowAggregate - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "SimplePassSum", - Rules: rules, - Before: simplePlanWithWindowAgg(window1m, universe.SumKind, sumProcedureSpec()), - After: simpleResult(universe.SumKind, false), - }) - - // ReadRange -> window -> first => ReadWindowAggregate - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "SimplePassFirst", - Rules: rules, - Before: simplePlanWithWindowAgg(window1m, universe.FirstKind, firstProcedureSpec()), - After: simpleResult(universe.FirstKind, false), - }) - - // ReadRange -> window -> last => ReadWindowAggregate - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "SimplePassLast", - Rules: rules, - Before: simplePlanWithWindowAgg(window1m, universe.LastKind, lastProcedureSpec()), - After: simpleResult(universe.LastKind, false), - }) - - // Rewrite with successors - // ReadRange -> window -> min -> count {2} => ReadWindowAggregate -> count {2} - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "WithSuccessor", - Rules: rules, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("window", &window1m), - plan.CreateLogicalNode("min", minProcedureSpec()), - plan.CreateLogicalNode("count", countProcedureSpec()), - plan.CreateLogicalNode("count", countProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - {2, 4}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadWindowAggregate", &influxdb.ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - Aggregates: []plan.ProcedureKind{"min"}, - WindowEvery: flux.ConvertDuration(60000000000 * time.Nanosecond), - }), - plan.CreateLogicalNode("count", countProcedureSpec()), - plan.CreateLogicalNode("count", countProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - {0, 2}, - }, - }, - }) - - // ReadRange -> window(offset: ...) -> last => ReadWindowAggregate - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "WindowPositiveOffset", - Rules: rules, - Before: simplePlanWithWindowAgg(universe.WindowProcedureSpec{ - Window: plan.WindowSpec{ - Every: dur2m, - Period: dur2m, - Offset: dur1m, - Location: plan.Location{ - Name: "UTC", - }, - }, - TimeColumn: "_time", - StartColumn: "_start", - StopColumn: "_stop", - }, universe.LastKind, lastProcedureSpec()), - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadWindowAggregate", &influxdb.ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - Aggregates: []plan.ProcedureKind{universe.LastKind}, - WindowEvery: flux.ConvertDuration(120000000000 * time.Nanosecond), - Offset: flux.ConvertDuration(60000000000 * time.Nanosecond), - }), - }, - }, - }) - - // ReadRange -> window(every: 1mo) -> last => ReadWindowAggregate - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "WindowByMonth", - Rules: rules, - Before: simplePlanWithWindowAgg(window1mo, universe.LastKind, lastProcedureSpec()), - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadWindowAggregate", &influxdb.ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - Aggregates: []plan.ProcedureKind{universe.LastKind}, - WindowEvery: dur1mo, - }), - }, - }, - }) - - // ReadRange -> window(every: 1y) -> last => ReadWindowAggregate - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "WindowByYear", - Rules: rules, - Before: simplePlanWithWindowAgg(window1y, universe.LastKind, lastProcedureSpec()), - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadWindowAggregate", &influxdb.ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - Aggregates: []plan.ProcedureKind{universe.LastKind}, - WindowEvery: dur1y, - }), - }, - }, - }) - - // ReadRange -> window(every: 1y, offset: 1mo) -> last => ReadWindowAggregate - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "WindowMonthlyOffset", - Rules: rules, - Before: simplePlanWithWindowAgg(func() universe.WindowProcedureSpec { - spec := window1y - spec.Window.Offset = dur1mo - return spec - }(), universe.LastKind, lastProcedureSpec()), - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadWindowAggregate", &influxdb.ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - Aggregates: []plan.ProcedureKind{universe.LastKind}, - WindowEvery: dur1y, - Offset: dur1mo, - }), - }, - }, - }) - - // ReadRange -> window(every: 1y, offset: 1mo5m) -> last => ReadWindowAggregate - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "WindowMixedOffset", - Rules: rules, - Before: simplePlanWithWindowAgg(func() universe.WindowProcedureSpec { - spec := window1y - spec.Window.Offset = durMixed - return spec - }(), universe.LastKind, lastProcedureSpec()), - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadWindowAggregate", &influxdb.ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - Aggregates: []plan.ProcedureKind{universe.LastKind}, - WindowEvery: dur1y, - Offset: durMixed, - }), - }, - }, - }) - - // Helper that adds a test with a simple plan that does not pass due to a - // specified bad window - simpleMinUnchanged := func(name string, window universe.WindowProcedureSpec) { - // Note: NoChange is not working correctly for these tests. It is - // expecting empty time, start, and stop column fields. - tests = append(tests, plantest.RuleTestCase{ - Name: name, - Context: context.Background(), - Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, - Before: simplePlanWithWindowAgg(window, "min", countProcedureSpec()), - NoChange: true, - }) - } - - // Condition not met: period not equal to every - badWindow1 := window1m - badWindow1.Window.Period = dur2m - simpleMinUnchanged("BadPeriod", badWindow1) - - // Condition not met: negative offset - badWindow2 := window1m - badWindow2.Window.Offset = durNeg - simpleMinUnchanged("NegOffset", badWindow2) - - // Condition not met: non-standard _time column - badWindow3 := window1m - badWindow3.TimeColumn = "_timmy" - simpleMinUnchanged("BadTime", badWindow3) - - // Condition not met: non-standard start column - badWindow4 := window1m - badWindow4.StartColumn = "_stooort" - simpleMinUnchanged("BadStart", badWindow4) - - // Condition not met: non-standard stop column - badWindow5 := window1m - badWindow5.StopColumn = "_stappp" - simpleMinUnchanged("BadStop", badWindow5) - - // Condition not met: non-UTC location - badWindow6 := window1m - badWindow6.Window.Location.Name = "America/Los_Angeles" - simpleMinUnchanged("BadLocation", badWindow6) - - // Condition not met: non-zero location offset - badWindow7 := window1m - badWindow7.Window.Location.Offset = values.ConvertDurationNsecs(time.Hour) - simpleMinUnchanged("BadLocationOffset", badWindow7) - - // Condition met: createEmpty is true. - windowCreateEmpty1m := window1m - windowCreateEmpty1m.CreateEmpty = true - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "CreateEmptyPassMin", - Rules: rules, - Before: simplePlanWithWindowAgg(windowCreateEmpty1m, "min", minProcedureSpec()), - After: simpleResult("min", true), - }) - - // Condition not met: neg duration. - simpleMinUnchanged("WindowNeg", windowNeg) - - // Bad min column - // ReadRange -> window -> min => NO-CHANGE - tests = append(tests, plantest.RuleTestCase{ - Name: "BadMinCol", - Context: context.Background(), - Rules: rules, - Before: simplePlanWithWindowAgg(window1m, "min", &universe.MinProcedureSpec{ - SelectorConfig: execute.SelectorConfig{Column: "_valmoo"}, - }), - NoChange: true, - }) - - // Bad max column - // ReadRange -> window -> max => NO-CHANGE - tests = append(tests, plantest.RuleTestCase{ - Name: "BadMaxCol", - Context: context.Background(), - Rules: rules, - Before: simplePlanWithWindowAgg(window1m, "max", &universe.MaxProcedureSpec{ - SelectorConfig: execute.SelectorConfig{Column: "_valmoo"}, - }), - NoChange: true, - }) - - // Bad mean columns - // ReadRange -> window -> mean => NO-CHANGE - tests = append(tests, plantest.RuleTestCase{ - Name: "BadMeanCol1", - Context: context.Background(), - Rules: rules, - Before: simplePlanWithWindowAgg(window1m, "mean", &universe.MeanProcedureSpec{ - SimpleAggregateConfig: execute.SimpleAggregateConfig{Columns: []string{"_valmoo"}}, - }), - NoChange: true, - }) - tests = append(tests, plantest.RuleTestCase{ - Name: "BadMeanCol2", - Context: context.Background(), - Rules: rules, - Before: simplePlanWithWindowAgg(window1m, "mean", &universe.MeanProcedureSpec{ - SimpleAggregateConfig: execute.SimpleAggregateConfig{Columns: []string{"_value", "_valmoo"}}, - }), - NoChange: true, - }) - - // No match due to a collapsed node having a successor - // ReadRange -> window -> min - // \-> min - tests = append(tests, plantest.RuleTestCase{ - Name: "CollapsedWithSuccessor1", - Context: context.Background(), - Rules: rules, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("window", &window1m), - plan.CreateLogicalNode("min", minProcedureSpec()), - plan.CreateLogicalNode("min", minProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {1, 3}, - }, - }, - NoChange: true, - }) - - // No match due to a collapsed node having a successor - // ReadRange -> window -> min - // \-> window - tests = append(tests, plantest.RuleTestCase{ - Name: "CollapsedWithSuccessor2", - Context: context.Background(), - Rules: rules, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("window", &window1m), - plan.CreateLogicalNode("min", minProcedureSpec()), - plan.CreateLogicalNode("window", &window2m), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {0, 3}, - }, - }, - NoChange: true, - }) - - // No pattern match - // ReadRange -> filter -> window -> min -> NO-CHANGE - pushableFn1 := executetest.FunctionExpression(t, `(r) => true`) - - makeResolvedFilterFn := func(expr *semantic.FunctionExpression) interpreter.ResolvedFunction { - return interpreter.ResolvedFunction{ - Scope: nil, - Fn: expr, - } - } - noPatternMatch1 := func() *plantest.PlanSpec { - return &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(pushableFn1), - }), - plan.CreateLogicalNode("window", &window1m), - plan.CreateLogicalNode("min", minProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - }, - } - } - tests = append(tests, plantest.RuleTestCase{ - Name: "NoPatternMatch1", - Context: context.Background(), - Rules: rules, - Before: noPatternMatch1(), - NoChange: true, - }) - - // No pattern match 2 - // ReadRange -> window -> filter -> min -> NO-CHANGE - noPatternMatch2 := func() *plantest.PlanSpec { - return &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("window", &window1m), - plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(pushableFn1), - }), - plan.CreateLogicalNode("min", minProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - }, - } - } - tests = append(tests, plantest.RuleTestCase{ - Name: "NoPatternMatch2", - Context: context.Background(), - Rules: rules, - Before: noPatternMatch2(), - NoChange: true, - }) - - duplicate := func(column, as string) *universe.SchemaMutationProcedureSpec { - return &universe.SchemaMutationProcedureSpec{ - Mutations: []universe.SchemaMutation{ - &universe.DuplicateOpSpec{ - Column: column, - As: as, - }, - }, - } - } - - aggregateWindowPlan := func(window universe.WindowProcedureSpec, agg plan.NodeID, spec plan.ProcedureSpec, timeColumn string) *plantest.PlanSpec { - return &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("window1", &window), - plan.CreateLogicalNode(agg, spec), - plan.CreateLogicalNode("duplicate", duplicate(timeColumn, "_time")), - plan.CreateLogicalNode("window2", &windowInf), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - {3, 4}, - }, - } - } - - aggregateWindowResult := func(proc plan.ProcedureKind, createEmpty bool, timeColumn string, successors ...plan.Node) *plantest.PlanSpec { - spec := &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadWindowAggregateByTime", &influxdb.ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - Aggregates: []plan.ProcedureKind{proc}, - WindowEvery: flux.ConvertDuration(60000000000 * time.Nanosecond), - CreateEmpty: createEmpty, - TimeColumn: timeColumn, - }), - }, - } - for i, successor := range successors { - spec.Nodes = append(spec.Nodes, successor) - spec.Edges = append(spec.Edges, [2]int{i, i + 1}) - } - return spec - } - - // Push down the duplicate |> window(every: inf) - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "AggregateWindowCount", - Rules: rules, - Before: aggregateWindowPlan(window1m, "count", countProcedureSpec(), "_stop"), - After: aggregateWindowResult("count", false, "_stop"), - }) - - // Push down the duplicate |> window(every: inf) using _start column - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "AggregateWindowCount", - Rules: rules, - Before: aggregateWindowPlan(window1m, "count", countProcedureSpec(), "_start"), - After: aggregateWindowResult("count", false, "_start"), - }) - - // Push down duplicate |> window(every: inf) with create empty. - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "AggregateWindowCountCreateEmpty", - Rules: rules, - Before: aggregateWindowPlan(windowCreateEmpty1m, "count", countProcedureSpec(), "_stop"), - After: aggregateWindowResult("count", true, "_stop"), - }) - - // Invalid duplicate column. - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "AggregateWindowCountInvalidDuplicateColumn", - Rules: rules, - Before: aggregateWindowPlan(window1m, "count", countProcedureSpec(), "_value"), - After: simpleResult("count", false, - plan.CreatePhysicalNode("duplicate", duplicate("_value", "_time")), - plan.CreatePhysicalNode("window2", &windowInf), - ), - }) - - // Invalid duplicate as. - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "AggregateWindowCountInvalidDuplicateAs", - Rules: rules, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("window1", &window1m), - plan.CreateLogicalNode("count", countProcedureSpec()), - plan.CreateLogicalNode("duplicate", duplicate("_stop", "time")), - plan.CreateLogicalNode("window2", &windowInf), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - {3, 4}, - }, - }, - After: simpleResult("count", false, - plan.CreatePhysicalNode("duplicate", duplicate("_stop", "time")), - plan.CreatePhysicalNode("window2", &windowInf), - ), - }) - - // Invalid closing window. - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "AggregateWindowCountInvalidClosingWindow", - Rules: rules, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("window1", &window1m), - plan.CreateLogicalNode("count", countProcedureSpec()), - plan.CreateLogicalNode("duplicate", duplicate("_stop", "_time")), - plan.CreateLogicalNode("window2", &window1m), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - {3, 4}, - }, - }, - After: simpleResult("count", false, - plan.CreatePhysicalNode("duplicate", duplicate("_stop", "_time")), - plan.CreatePhysicalNode("window2", &window1m), - ), - }) - - // Invalid closing window with multiple problems. - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "AggregateWindowCountInvalidClosingWindowMultiple", - Rules: rules, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("window1", &window1m), - plan.CreateLogicalNode("count", countProcedureSpec()), - plan.CreateLogicalNode("duplicate", duplicate("_stop", "_time")), - plan.CreateLogicalNode("window2", &badWindow3), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - {3, 4}, - }, - }, - After: simpleResult("count", false, - plan.CreatePhysicalNode("duplicate", duplicate("_stop", "_time")), - plan.CreatePhysicalNode("window2", &badWindow3), - ), - }) - - // Invalid closing window with multiple problems. - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "AggregateWindowCountInvalidClosingWindowCreateEmpty", - Rules: rules, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("window1", &window1m), - plan.CreateLogicalNode("count", countProcedureSpec()), - plan.CreateLogicalNode("duplicate", duplicate("_stop", "_time")), - plan.CreateLogicalNode("window2", &windowInfCreateEmpty), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - {3, 4}, - }, - }, - After: simpleResult("count", false, - plan.CreatePhysicalNode("duplicate", duplicate("_stop", "_time")), - plan.CreatePhysicalNode("window2", &windowInfCreateEmpty), - ), - }) - - // Multiple matching patterns. - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "AggregateWindowCountMultipleMatches", - Rules: rules, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("window1", &window1m), - plan.CreateLogicalNode("count", countProcedureSpec()), - plan.CreateLogicalNode("duplicate", duplicate("_stop", "_time")), - plan.CreateLogicalNode("window2", &windowInf), - plan.CreateLogicalNode("duplicate2", duplicate("_stop", "_time")), - plan.CreateLogicalNode("window3", &windowInf), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - {3, 4}, - {4, 5}, - {5, 6}, - }, - }, - After: aggregateWindowResult("count", false, "_stop", - plan.CreatePhysicalNode("duplicate2", duplicate("_stop", "_time")), - plan.CreatePhysicalNode("window3", &windowInf), - ), - }) - - rename := universe.SchemaMutationProcedureSpec{ - Mutations: []universe.SchemaMutation{ - &universe.RenameOpSpec{ - Columns: map[string]string{"_time": "time"}, - }, - }, - } - - // Wrong schema mutator. - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "AggregateWindowCountWrongSchemaMutator", - Rules: rules, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("window1", &window1m), - plan.CreateLogicalNode("count", countProcedureSpec()), - plan.CreateLogicalNode("rename", &rename), - plan.CreateLogicalNode("window2", &windowInf), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - {3, 4}, - }, - }, - After: simpleResult("count", false, - plan.CreatePhysicalNode("rename", &rename), - plan.CreatePhysicalNode("window2", &windowInf), - ), - }) - - for _, tc := range tests { - tc := tc - t.Run(tc.Name, func(t *testing.T) { - t.Parallel() - plantest.PhysicalRuleTestHelper(t, &tc, protocmp.Transform()) - }) - } -} - -func TestPushDownWindowForceAggregateRule(t *testing.T) { - rules := []plan.Rule{ - influxdb.PushDownWindowAggregateRule{}, - influxdb.PushDownWindowForceAggregateRule{}, - influxdb.PushDownWindowAggregateByTimeRule{}, - influxdb.PushDownAggregateWindowRule{}, - } - - createRangeSpec := func() *influxdb.ReadRangePhysSpec { - return &influxdb.ReadRangePhysSpec{ - Bucket: "test", - Bounds: flux.Bounds{ - Start: flux.Time{ - IsRelative: true, - Relative: -time.Hour, - }, - Stop: flux.Time{ - IsRelative: true, - }, - }, - } - } - - tests := []plantest.RuleTestCase{ - { - Name: "simple", - Rules: rules, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadWindowAggregate", &influxdb.ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - WindowEvery: flux.ConvertDuration(5 * time.Minute), - Aggregates: []plan.ProcedureKind{ - universe.MaxKind, - }, - }), - plan.CreatePhysicalNode("fill", &table.FillProcedureSpec{}), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("merged_ReadWindowAggregate_fill", &influxdb.ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - WindowEvery: flux.ConvertDuration(5 * time.Minute), - Aggregates: []plan.ProcedureKind{ - universe.MaxKind, - }, - ForceAggregate: true, - }), - }, - }, - }, - { - Name: "idempotent", - Rules: rules, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadWindowAggregate", &influxdb.ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - WindowEvery: flux.ConvertDuration(5 * time.Minute), - Aggregates: []plan.ProcedureKind{ - universe.MaxKind, - }, - }), - plan.CreatePhysicalNode("fill0", &table.FillProcedureSpec{}), - plan.CreatePhysicalNode("fill1", &table.FillProcedureSpec{}), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("merged_ReadWindowAggregate_fill0_fill1", &influxdb.ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - WindowEvery: flux.ConvertDuration(5 * time.Minute), - Aggregates: []plan.ProcedureKind{ - universe.MaxKind, - }, - ForceAggregate: true, - }), - }, - }, - }, - { - Name: "bare", - Rules: rules, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadWindowAggregate", &influxdb.ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - WindowEvery: flux.ConvertDuration(math.MaxInt64), - Aggregates: []plan.ProcedureKind{ - universe.MaxKind, - }, - }), - plan.CreatePhysicalNode("fill", &table.FillProcedureSpec{}), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - NoChange: true, - }, - } - - for _, tc := range tests { - t.Run(tc.Name, func(t *testing.T) { - plantest.PhysicalRuleTestHelper(t, &tc, protocmp.Transform()) - }) - } -} - -func TestTransposeGroupToWindowAggregateRule(t *testing.T) { - // Turn on all variants. - flagger := mock.NewFlagger(map[feature.Flag]interface{}{ - feature.GroupWindowAggregateTranspose(): true, - }) - - rules := []plan.Rule{ - influxdb.PushDownGroupRule{}, - influxdb.PushDownWindowAggregateRule{}, - influxdb.PushDownWindowAggregateByTimeRule{}, - influxdb.GroupWindowAggregateTransposeRule{}, - } - - withFlagger, _ := feature.Annotate(context.Background(), flagger) - - haveCaps := withFlagger - noCaps := context.Background() - - createRangeSpec := func() *influxdb.ReadRangePhysSpec { - return &influxdb.ReadRangePhysSpec{ - Bucket: "my-bucket", - Bounds: flux.Bounds{ - Start: fluxTime(5), - Stop: fluxTime(10), - }, - } - } - - group := func(mode flux.GroupMode, keys ...string) *universe.GroupProcedureSpec { - return &universe.GroupProcedureSpec{ - GroupMode: mode, - GroupKeys: keys, - } - } - - groupResult := func(keys ...string) *universe.GroupProcedureSpec { - keys = append(keys, execute.DefaultStartColLabel, execute.DefaultStopColLabel) - return group(flux.GroupModeBy, keys...) - } - - dur1m := values.ConvertDurationNsecs(60 * time.Second) - dur2m := values.ConvertDurationNsecs(120 * time.Second) - dur0 := values.ConvertDurationNsecs(0) - durNeg, _ := values.ParseDuration("-60s") - durInf := values.ConvertDurationNsecs(math.MaxInt64) - - window := func(dur values.Duration) universe.WindowProcedureSpec { - return universe.WindowProcedureSpec{ - Window: plan.WindowSpec{ - Every: dur, - Period: dur, - Offset: dur0, - Location: plan.Location{ - Name: "UTC", - }, - }, - TimeColumn: "_time", - StartColumn: "_start", - StopColumn: "_stop", - CreateEmpty: false, - } - } - - window1m := window(dur1m) - window1mCreateEmpty := window1m - window1mCreateEmpty.CreateEmpty = true - window2m := window(dur2m) - windowNeg := window(durNeg) - windowInf := window(durInf) - windowInfCreateEmpty := windowInf - windowInfCreateEmpty.CreateEmpty = true - - tests := make([]plantest.RuleTestCase, 0) - - // construct a simple plan with a specific window and aggregate function - simplePlan := func(window universe.WindowProcedureSpec, agg plan.NodeID, spec plan.ProcedureSpec, successors ...plan.Node) *plantest.PlanSpec { - pspec := &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("group", group(flux.GroupModeBy)), - plan.CreateLogicalNode("window", &window), - plan.CreateLogicalNode(agg, spec), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - }, - } - for i, successor := range successors { - pspec.Nodes = append(pspec.Nodes, successor) - pspec.Edges = append(pspec.Edges, [2]int{i + 3, i + 4}) - } - return pspec - } - - // construct a simple result - simpleResult := func(proc plan.ProcedureKind, every values.Duration, createEmpty bool, successors ...plan.Node) *plantest.PlanSpec { - spec := &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadWindowAggregate", &influxdb.ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - Aggregates: []plan.ProcedureKind{proc}, - WindowEvery: every, - CreateEmpty: createEmpty, - }), - }, - } - for i, successor := range successors { - spec.Nodes = append(spec.Nodes, successor) - spec.Edges = append(spec.Edges, [2]int{i, i + 1}) - } - return spec - } - - duplicateSpec := func(column, as string) *universe.SchemaMutationProcedureSpec { - return &universe.SchemaMutationProcedureSpec{ - Mutations: []universe.SchemaMutation{ - &universe.DuplicateOpSpec{ - Column: execute.DefaultStopColLabel, - As: execute.DefaultTimeColLabel, - }, - }, - } - } - - // ReadRange -> group -> window -> min => ReadWindowAggregate -> group -> min - tests = append(tests, plantest.RuleTestCase{ - Context: haveCaps, - Name: "SimplePassMin", - Rules: rules, - Before: simplePlan(window1m, "min", minProcedureSpec()), - After: simpleResult("min", dur1m, false, - plan.CreatePhysicalNode("group", groupResult()), - plan.CreatePhysicalNode("min", minProcedureSpec()), - ), - }) - - // ReadRange -> group -> window -> max => ReadWindowAggregate -> group -> max - tests = append(tests, plantest.RuleTestCase{ - Context: haveCaps, - Name: "SimplePassMax", - Rules: rules, - Before: simplePlan(window1m, "max", maxProcedureSpec()), - After: simpleResult("max", dur1m, false, - plan.CreatePhysicalNode("group", groupResult()), - plan.CreatePhysicalNode("max", maxProcedureSpec()), - ), - }) - - // ReadRange -> group -> window -> mean => ReadGroup -> mean - // TODO(jsternberg): When we begin pushing down mean calls, - // this test will need to be updated to the appropriate pattern. - // The reason why this is included is because we cannot rewrite - // a grouped mean to use read window aggregate with mean. We - // will need this plan to be something different that doesn't - // exist yet so this is testing that we don't attempt to use - // this planner rule for mean. - tests = append(tests, plantest.RuleTestCase{ - Context: haveCaps, - Name: "SimplePassMean", - Rules: rules, - Before: simplePlan(window1m, "mean", meanProcedureSpec()), - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadGroup", &influxdb.ReadGroupPhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - GroupMode: flux.GroupModeBy, - }), - plan.CreatePhysicalNode("window", &window1m), - plan.CreatePhysicalNode("mean", meanProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - }, - }, - }) - - // ReadRange -> group -> window -> count => ReadWindowAggregate -> group -> sum - tests = append(tests, plantest.RuleTestCase{ - Context: haveCaps, - Name: "SimplePassCount", - Rules: rules, - Before: simplePlan(window1m, "count", countProcedureSpec()), - After: simpleResult("count", dur1m, false, - plan.CreatePhysicalNode("group", groupResult()), - plan.CreatePhysicalNode("sum", sumProcedureSpec()), - ), - }) - - // ReadRange -> group -> window -> sum => ReadWindowAggregate -> group -> sum - tests = append(tests, plantest.RuleTestCase{ - Context: haveCaps, - Name: "SimplePassSum", - Rules: rules, - Before: simplePlan(window1m, "sum", sumProcedureSpec()), - After: simpleResult("sum", dur1m, false, - plan.CreatePhysicalNode("group", groupResult()), - plan.CreatePhysicalNode("sum", sumProcedureSpec()), - ), - }) - - // Rewrite with aggregate window - // ReadRange -> group -> window -> min -> duplicate -> window - tests = append(tests, plantest.RuleTestCase{ - Context: haveCaps, - Name: "WithSuccessor", - Rules: rules, - Before: simplePlan(window1mCreateEmpty, "min", minProcedureSpec(), - plan.CreateLogicalNode("duplicate", duplicateSpec("_stop", "_time")), - plan.CreateLogicalNode("window", &windowInf), - ), - After: simpleResult("min", dur1m, true, - plan.CreatePhysicalNode("group", groupResult()), - plan.CreatePhysicalNode("min", minProcedureSpec()), - plan.CreatePhysicalNode("duplicate", duplicateSpec("_stop", "_time")), - plan.CreatePhysicalNode("window", &windowInf), - ), - }) - - // ReadRange -> group(host) -> window -> min => ReadWindowAggregate -> group(host, _start, _stop) -> min - tests = append(tests, plantest.RuleTestCase{ - Context: haveCaps, - Name: "GroupByHostPassMin", - Rules: rules, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("group", group(flux.GroupModeBy, "host")), - plan.CreateLogicalNode("window", &window1m), - plan.CreateLogicalNode("min", minProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - }, - }, - After: simpleResult("min", dur1m, false, - plan.CreatePhysicalNode("group", groupResult("host")), - plan.CreatePhysicalNode("min", minProcedureSpec()), - ), - }) - - // ReadRange -> group(_start, host) -> window -> min => ReadWindowAggregate -> group(_start, host, _stop) -> min - tests = append(tests, plantest.RuleTestCase{ - Context: haveCaps, - Name: "GroupByStartPassMin", - Rules: rules, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("group", group(flux.GroupModeBy, "_start", "host")), - plan.CreateLogicalNode("window", &window1m), - plan.CreateLogicalNode("min", minProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - }, - }, - After: simpleResult("min", dur1m, false, - plan.CreatePhysicalNode("group", group(flux.GroupModeBy, "_start", "host", "_stop")), - plan.CreatePhysicalNode("min", minProcedureSpec()), - ), - }) - - // ReadRange -> group(host) -> window(offset: ...) -> min => ReadWindowAggregate -> group(host, _start, _stop) -> min - tests = append(tests, plantest.RuleTestCase{ - Context: haveCaps, - Name: "PositiveOffset", - Rules: rules, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("group", group(flux.GroupModeBy, "host")), - plan.CreateLogicalNode("window", &universe.WindowProcedureSpec{ - Window: plan.WindowSpec{ - Every: dur2m, - Period: dur2m, - Offset: dur1m, - Location: plan.Location{ - Name: "UTC", - }, - }, - TimeColumn: "_time", - StartColumn: "_start", - StopColumn: "_stop", - }), - plan.CreateLogicalNode("min", minProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadWindowAggregate", &influxdb.ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - Aggregates: []plan.ProcedureKind{universe.MinKind}, - WindowEvery: dur2m, - Offset: dur1m, - }), - plan.CreatePhysicalNode("group", group(flux.GroupModeBy, "host", "_start", "_stop")), - plan.CreatePhysicalNode("min", minProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - }, - }, - }) - - // Helper that adds a test with a simple plan that does not pass due to a - // specified bad window - simpleMinUnchanged := func(name string, window universe.WindowProcedureSpec) { - tests = append(tests, plantest.RuleTestCase{ - Context: haveCaps, - Name: name, - Rules: rules, - Before: simplePlan(window, "min", minProcedureSpec()), - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadGroup", &influxdb.ReadGroupPhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - GroupMode: flux.GroupModeBy, - }), - plan.CreatePhysicalNode("window", &window), - plan.CreatePhysicalNode("min", minProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - }, - }, - }) - } - - // Condition not met: period not equal to every - badWindow1 := window1m - badWindow1.Window.Period = dur2m - simpleMinUnchanged("BadPeriod", badWindow1) - - // Condition not met: non-standard _time column - badWindow3 := window1m - badWindow3.TimeColumn = "_timmy" - simpleMinUnchanged("BadTime", badWindow3) - - // Condition not met: non-standard start column - badWindow4 := window1m - badWindow4.StartColumn = "_stooort" - simpleMinUnchanged("BadStart", badWindow4) - - // Condition not met: non-standard stop column - badWindow5 := window1m - badWindow5.StopColumn = "_stappp" - simpleMinUnchanged("BadStop", badWindow5) - - // Condition not met: non-UTC location - badWindow6 := window1m - badWindow6.Window.Location.Name = "America/Los_Angeles" - simpleMinUnchanged("BadLocation", badWindow6) - - // Condition not met: non-zero location offset - badWindow7 := window1m - badWindow7.Window.Location.Offset = values.ConvertDurationNsecs(time.Hour) - simpleMinUnchanged("BadLocationOffset", badWindow7) - - // Condition met: createEmpty is true. - windowCreateEmpty1m := window1m - windowCreateEmpty1m.CreateEmpty = true - tests = append(tests, plantest.RuleTestCase{ - Context: haveCaps, - Name: "CreateEmptyPassMin", - Rules: rules, - Before: simplePlan(window1mCreateEmpty, "min", minProcedureSpec()), - After: simpleResult("min", dur1m, true, - plan.CreatePhysicalNode("group", groupResult()), - plan.CreatePhysicalNode("min", minProcedureSpec()), - ), - }) - - // Condition not met: neg duration. - simpleMinUnchanged("WindowNeg", windowNeg) - - // Bad min column - // ReadRange -> group -> window -> min => ReadGroup -> window -> min - badMinSpec := universe.MinProcedureSpec{ - SelectorConfig: execute.SelectorConfig{Column: "_valmoo"}, - } - tests = append(tests, plantest.RuleTestCase{ - Name: "BadMinCol", - Context: haveCaps, - Rules: rules, - Before: simplePlan(window1m, "min", &badMinSpec), - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadGroup", &influxdb.ReadGroupPhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - GroupMode: flux.GroupModeBy, - }), - plan.CreatePhysicalNode("window", &window1m), - plan.CreatePhysicalNode("min", &badMinSpec), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - }, - }, - }) - - // Bad max column - // ReadRange -> group -> window -> max => ReadGroup -> window -> max - badMaxSpec := universe.MaxProcedureSpec{ - SelectorConfig: execute.SelectorConfig{Column: "_valmoo"}, - } - tests = append(tests, plantest.RuleTestCase{ - Name: "BadMaxCol", - Context: haveCaps, - Rules: rules, - Before: simplePlan(window1m, "max", &badMaxSpec), - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadGroup", &influxdb.ReadGroupPhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - GroupMode: flux.GroupModeBy, - }), - plan.CreatePhysicalNode("window", &window1m), - plan.CreatePhysicalNode("max", &badMaxSpec), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - }, - }, - }) - - // No match due to a collapsed node having a successor - // ReadRange -> group -> window -> min - // \-> min - tests = append(tests, plantest.RuleTestCase{ - Name: "CollapsedWithSuccessor1", - Context: haveCaps, - Rules: rules, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("group", group(flux.GroupModeBy)), - plan.CreateLogicalNode("window", &window1m), - plan.CreateLogicalNode("min", minProcedureSpec()), - plan.CreateLogicalNode("min", minProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - {2, 4}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadGroup", &influxdb.ReadGroupPhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - GroupMode: flux.GroupModeBy, - }), - plan.CreatePhysicalNode("window", &window1m), - plan.CreatePhysicalNode("min", minProcedureSpec()), - plan.CreatePhysicalNode("min", minProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {1, 3}, - }, - }, - }) - - // No match due to a collapsed node having a successor - // ReadRange -> group -> window -> min - // \-> window - tests = append(tests, plantest.RuleTestCase{ - Name: "CollapsedWithSuccessor2", - Context: haveCaps, - Rules: rules, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadRange", createRangeSpec()), - plan.CreateLogicalNode("group", group(flux.GroupModeBy)), - plan.CreateLogicalNode("window", &window1m), - plan.CreateLogicalNode("min", minProcedureSpec()), - plan.CreateLogicalNode("window", &window2m), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {2, 3}, - {1, 4}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadGroup", &influxdb.ReadGroupPhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - GroupMode: flux.GroupModeBy, - }), - plan.CreatePhysicalNode("window", &window1m), - plan.CreatePhysicalNode("min", minProcedureSpec()), - plan.CreatePhysicalNode("window", &window2m), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {0, 3}, - }, - }, - }) - - // Fail due to no capabilities present. - tests = append(tests, plantest.RuleTestCase{ - Context: noCaps, - Name: "FailNoCaps", - Rules: rules, - Before: simplePlan(window1m, "count", countProcedureSpec()), - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadGroup", &influxdb.ReadGroupPhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - GroupMode: flux.GroupModeBy, - }), - plan.CreatePhysicalNode("window", &window1m), - plan.CreatePhysicalNode("count", countProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - }, - }, - }) - - for _, tc := range tests { - tc := tc - t.Run(tc.Name, func(t *testing.T) { - t.Parallel() - plantest.PhysicalRuleTestHelper(t, &tc, protocmp.Transform()) - }) - } -} - -func TestPushDownBareAggregateRule(t *testing.T) { - createRangeSpec := func() *influxdb.ReadRangePhysSpec { - return &influxdb.ReadRangePhysSpec{ - Bucket: "my-bucket", - Bounds: flux.Bounds{ - Start: fluxTime(5), - Stop: fluxTime(10), - }, - } - } - - readWindowAggregate := func(proc plan.ProcedureKind) *influxdb.ReadWindowAggregatePhysSpec { - return &influxdb.ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: *createRangeSpec(), - WindowEvery: flux.ConvertDuration(math.MaxInt64 * time.Nanosecond), - Aggregates: []plan.ProcedureKind{proc}, - } - } - - testcases := []plantest.RuleTestCase{ - { - // ReadRange -> count => ReadWindowAggregate - Context: context.Background(), - Name: "push down count", - Rules: []plan.Rule{influxdb.PushDownBareAggregateRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", createRangeSpec()), - plan.CreatePhysicalNode("count", countProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadWindowAggregate", readWindowAggregate(universe.CountKind)), - }, - }, - }, - { - // ReadRange -> sum => ReadWindowAggregate - Context: context.Background(), - Name: "push down sum", - Rules: []plan.Rule{influxdb.PushDownBareAggregateRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", createRangeSpec()), - plan.CreatePhysicalNode("sum", sumProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadWindowAggregate", readWindowAggregate(universe.SumKind)), - }, - }, - }, - { - // ReadRange -> first => ReadWindowAggregate - Context: context.Background(), - Name: "push down first", - Rules: []plan.Rule{influxdb.PushDownBareAggregateRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", createRangeSpec()), - plan.CreatePhysicalNode("first", firstProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadWindowAggregate", readWindowAggregate(universe.FirstKind)), - }, - }, - }, - { - // ReadRange -> last => ReadWindowAggregate - Context: context.Background(), - Name: "push down last", - Rules: []plan.Rule{influxdb.PushDownBareAggregateRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadRange", createRangeSpec()), - plan.CreatePhysicalNode("last", lastProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadWindowAggregate", readWindowAggregate(universe.LastKind)), - }, - }, - }, - } - - for _, tc := range testcases { - tc := tc - t.Run(tc.Name, func(t *testing.T) { - t.Parallel() - plantest.PhysicalRuleTestHelper(t, &tc, protocmp.Transform()) - }) - } -} - -// -// Group Aggregate Testing -// -func TestPushDownGroupAggregateRule(t *testing.T) { - readGroupAgg := func(aggregateMethod string) *influxdb.ReadGroupPhysSpec { - return &influxdb.ReadGroupPhysSpec{ - ReadRangePhysSpec: influxdb.ReadRangePhysSpec{ - Bucket: "my-bucket", - Bounds: flux.Bounds{ - Start: fluxTime(5), - Stop: fluxTime(10), - }, - }, - GroupMode: flux.GroupModeBy, - GroupKeys: []string{"_measurement", "tag0", "tag1"}, - AggregateMethod: aggregateMethod, - } - } - readGroup := func() *influxdb.ReadGroupPhysSpec { - return readGroupAgg("") - } - - tests := make([]plantest.RuleTestCase, 0) - - // construct a simple plan with a specific aggregate - simplePlanWithAgg := func(agg plan.NodeID, spec plan.ProcedureSpec) *plantest.PlanSpec { - return &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadGroup", readGroup()), - plan.CreateLogicalNode(agg, spec), - }, - Edges: [][2]int{ - {0, 1}, - }, - } - } - - minProcedureSpec := func() *universe.MinProcedureSpec { - return &universe.MinProcedureSpec{ - SelectorConfig: execute.SelectorConfig{ - Column: execute.DefaultTimeColLabel, - }, - } - } - minProcedureSpecVal := func() *universe.MinProcedureSpec { - return &universe.MinProcedureSpec{ - SelectorConfig: execute.SelectorConfig{ - Column: execute.DefaultValueColLabel, - }, - } - } - maxProcedureSpecVal := func() *universe.MaxProcedureSpec { - return &universe.MaxProcedureSpec{ - SelectorConfig: execute.SelectorConfig{ - Column: execute.DefaultValueColLabel, - }, - } - } - countProcedureSpec := func() *universe.CountProcedureSpec { - return &universe.CountProcedureSpec{ - SimpleAggregateConfig: execute.DefaultSimpleAggregateConfig, - } - } - sumProcedureSpec := func() *universe.SumProcedureSpec { - return &universe.SumProcedureSpec{ - SimpleAggregateConfig: execute.DefaultSimpleAggregateConfig, - } - } - firstProcedureSpec := func() *universe.FirstProcedureSpec { - return &universe.FirstProcedureSpec{ - SelectorConfig: execute.DefaultSelectorConfig, - } - } - lastProcedureSpec := func() *universe.LastProcedureSpec { - return &universe.LastProcedureSpec{ - SelectorConfig: execute.DefaultSelectorConfig, - } - } - - // ReadGroup() -> count => ReadGroup(count) - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "RewriteGroupCount", - Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, - Before: simplePlanWithAgg("count", countProcedureSpec()), - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadGroupAggregate", readGroupAgg("count")), - }, - }, - }) - - // ReadGroup() -> sum => ReadGroup(sum) - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "RewriteGroupSum", - Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, - Before: simplePlanWithAgg("sum", sumProcedureSpec()), - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadGroupAggregate", readGroupAgg("sum")), - }, - }, - }) - - // ReadGroup() -> first => ReadGroup(first) - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "RewriteGroupFirst", - Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, - Before: simplePlanWithAgg("first", firstProcedureSpec()), - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadGroupAggregate", readGroupAgg("first")), - }, - }, - }) - - // ReadGroup() -> last => ReadGroup(last) - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "RewriteGroupLast", - Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, - Before: simplePlanWithAgg("last", lastProcedureSpec()), - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadGroupAggregate", readGroupAgg("last")), - }, - }, - }) - - // ReadGroup() -> max => ReadGroup(max) - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "RewriteGroupMax", - Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, - Before: simplePlanWithAgg("max", maxProcedureSpecVal()), - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadGroupAggregate", readGroupAgg("max")), - }, - }, - }) - - // ReadGroup() -> min => ReadGroup(min) - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "RewriteGroupMin", - Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, - Before: simplePlanWithAgg("min", minProcedureSpecVal()), - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadGroupAggregate", readGroupAgg("min")), - }, - }, - }) - - // Rewrite with successors - // ReadGroup() -> count -> sum {2} => ReadGroup(count) -> sum {2} - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "WithSuccessor1", - Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadGroup", readGroup()), - plan.CreateLogicalNode("count", countProcedureSpec()), - plan.CreateLogicalNode("sum", sumProcedureSpec()), - plan.CreateLogicalNode("sum", sumProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - {1, 3}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadGroupAggregate", readGroupAgg("count")), - plan.CreateLogicalNode("sum", sumProcedureSpec()), - plan.CreateLogicalNode("sum", sumProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - }, - }, - }) - - // Cannot replace a ReadGroup that already has an aggregate. This exercises - // the check that ReadGroup aggregate is not set. - // ReadGroup() -> count -> count => ReadGroup(count) -> count - tests = append(tests, plantest.RuleTestCase{ - Context: context.Background(), - Name: "WithSuccessor2", - Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadGroup", readGroup()), - plan.CreateLogicalNode("count", countProcedureSpec()), - plan.CreateLogicalNode("count", countProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("ReadGroupAggregate", readGroupAgg("count")), - plan.CreateLogicalNode("count", countProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - }, - }, - }) - - // Bad count column - // ReadGroup -> count => NO-CHANGE - tests = append(tests, plantest.RuleTestCase{ - Name: "BadCountCol", - Context: context.Background(), - Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, - Before: simplePlanWithAgg("count", &universe.CountProcedureSpec{ - SimpleAggregateConfig: execute.SimpleAggregateConfig{Columns: []string{"_valmoo"}}, - }), - NoChange: true, - }) - - // No match due to a collapsed node having a successor - // ReadGroup -> count - // \-> min - tests = append(tests, plantest.RuleTestCase{ - Name: "CollapsedWithSuccessor", - Context: context.Background(), - Rules: []plan.Rule{influxdb.PushDownGroupAggregateRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadGroup", readGroup()), - plan.CreateLogicalNode("count", countProcedureSpec()), - plan.CreateLogicalNode("min", minProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - {0, 2}, - }, - }, - NoChange: true, - }) - - // No pattern match - // ReadGroup -> filter -> min -> NO-CHANGE - pushableFn1 := executetest.FunctionExpression(t, `(r) => true`) - - makeResolvedFilterFn := func(expr *semantic.FunctionExpression) interpreter.ResolvedFunction { - return interpreter.ResolvedFunction{ - Scope: nil, - Fn: expr, - } - } - noPatternMatch1 := func() *plantest.PlanSpec { - return &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreateLogicalNode("ReadGroup", readGroup()), - plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ - Fn: makeResolvedFilterFn(pushableFn1), - }), - plan.CreateLogicalNode("count", countProcedureSpec()), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - }, - } - } - tests = append(tests, plantest.RuleTestCase{ - Name: "NoPatternMatch", - Context: context.Background(), - Rules: []plan.Rule{influxdb.PushDownWindowAggregateRule{}}, - Before: noPatternMatch1(), - NoChange: true, - }) - - for _, tc := range tests { - tc := tc - t.Run(tc.Name, func(t *testing.T) { - t.Parallel() - plantest.PhysicalRuleTestHelper(t, &tc, protocmp.Transform()) - }) - } -} - -func TestMergeFilterRule(t *testing.T) { - from := &fluxinfluxdb.FromProcedureSpec{} - filter0 := func() *universe.FilterProcedureSpec { - return &universe.FilterProcedureSpec{ - Fn: interpreter.ResolvedFunction{ - Fn: executetest.FunctionExpression(t, `(r) => r._field == "usage_idle"`), - }, - } - } - filter1 := func() *universe.FilterProcedureSpec { - return &universe.FilterProcedureSpec{ - Fn: interpreter.ResolvedFunction{ - Fn: executetest.FunctionExpression(t, `(r) => r._measurement == "cpu"`), - }, - } - } - filterMerge := func() *universe.FilterProcedureSpec { - return &universe.FilterProcedureSpec{ - Fn: interpreter.ResolvedFunction{ - Fn: executetest.FunctionExpression(t, `(r) => r._measurement == "cpu" and r._field == "usage_idle"`), - }, - } - } - - testcases := []plantest.RuleTestCase{ - { - Context: context.Background(), - Name: "merge filter on", - Rules: []plan.Rule{influxdb.MergeFiltersRule{}}, - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("from", from), - plan.CreatePhysicalNode("filter0", filter0()), - plan.CreatePhysicalNode("filter1", filter1()), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - }, - }, - After: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("from", from), - plan.CreatePhysicalNode("filter0", filterMerge()), - }, - Edges: [][2]int{{0, 1}}, - }, - }, - { - Context: context.Background(), - Name: "merge filter off", - Before: &plantest.PlanSpec{ - Nodes: []plan.Node{ - plan.CreatePhysicalNode("from", from), - plan.CreatePhysicalNode("filter0", filter0()), - plan.CreatePhysicalNode("filter1", filter1()), - }, - Edges: [][2]int{ - {0, 1}, - {1, 2}, - }, - }, - NoChange: true, - }, - } - for _, tc := range testcases { - tc := tc - t.Run(tc.Name, func(t *testing.T) { - t.Parallel() - plantest.LogicalRuleTestHelper(t, &tc, protocmp.Transform()) - }) - } -} diff --git a/query/stdlib/influxdata/influxdb/schema_test.flux b/query/stdlib/influxdata/influxdb/schema_test.flux deleted file mode 100644 index 9052e8cbef7..00000000000 --- a/query/stdlib/influxdata/influxdb/schema_test.flux +++ /dev/null @@ -1,33 +0,0 @@ -package influxdb_test - - -import "testing/expect" - -testcase push_down_tagValues extends "flux/influxdata/influxdb/schema/schema_test.tagValues" { - expect.planner(rules: ["PushDownReadTagValuesRule": 1]) - super() -} -testcase push_down_measurementTagValues extends "flux/influxdata/influxdb/schema/schema_test.measurementTagValues" { - expect.planner(rules: ["PushDownReadTagValuesRule": 1]) - super() -} -testcase push_down_tagKeys extends "flux/influxdata/influxdb/schema/schema_test.tagKeys" { - expect.planner(rules: ["PushDownReadTagKeysRule": 1]) - super() -} -testcase push_down_measurementTagKeys extends "flux/influxdata/influxdb/schema/schema_test.measurementTagKeys" { - expect.planner(rules: ["PushDownReadTagKeysRule": 1]) - super() -} -testcase push_down_fieldKeys extends "flux/influxdata/influxdb/schema/schema_test.fieldKeys" { - expect.planner(rules: ["PushDownReadTagValuesRule": 1]) - super() -} -testcase push_down_measurementFieldKeys extends "flux/influxdata/influxdb/schema/schema_test.measurementFieldKeys" { - expect.planner(rules: ["PushDownReadTagValuesRule": 1]) - super() -} -testcase push_down_measurements extends "flux/influxdata/influxdb/schema/schema_test.measurements" { - expect.planner(rules: ["PushDownReadTagValuesRule": 1]) - super() -} diff --git a/query/stdlib/influxdata/influxdb/source.go b/query/stdlib/influxdata/influxdb/source.go deleted file mode 100644 index e0ba60d3a3e..00000000000 --- a/query/stdlib/influxdata/influxdb/source.go +++ /dev/null @@ -1,486 +0,0 @@ -package influxdb - -import ( - "context" - "errors" - "time" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/codes" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/memory" - "github.com/influxdata/flux/metadata" - "github.com/influxdata/flux/plan" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -func init() { - execute.RegisterSource(ReadRangePhysKind, createReadFilterSource) - execute.RegisterSource(ReadGroupPhysKind, createReadGroupSource) - execute.RegisterSource(ReadWindowAggregatePhysKind, createReadWindowAggregateSource) - execute.RegisterSource(ReadTagKeysPhysKind, createReadTagKeysSource) - execute.RegisterSource(ReadTagValuesPhysKind, createReadTagValuesSource) -} - -type runner interface { - run(ctx context.Context) error -} - -type Source struct { - execute.ExecutionNode - id execute.DatasetID - ts []execute.Transformation - - alloc memory.Allocator - stats cursors.CursorStats - - runner runner - - m *metrics - orgID platform2.ID - op string -} - -func (s *Source) Run(ctx context.Context) { - labelValues := s.m.getLabelValues(ctx, s.orgID, s.op) - start := time.Now() - err := s.runner.run(ctx) - s.m.recordMetrics(labelValues, start) - for _, t := range s.ts { - t.Finish(s.id, err) - } -} - -func (s *Source) AddTransformation(t execute.Transformation) { - s.ts = append(s.ts, t) -} - -func (s *Source) Metadata() metadata.Metadata { - return metadata.Metadata{ - "influxdb/scanned-bytes": []interface{}{s.stats.ScannedBytes}, - "influxdb/scanned-values": []interface{}{s.stats.ScannedValues}, - } -} - -func (s *Source) processTables(ctx context.Context, tables query.TableIterator, watermark execute.Time) error { - err := tables.Do(func(tbl flux.Table) error { - return s.processTable(ctx, tbl) - }) - if err != nil { - return err - } - - // Track the number of bytes and values scanned. - stats := tables.Statistics() - s.stats.ScannedValues += stats.ScannedValues - s.stats.ScannedBytes += stats.ScannedBytes - - for _, t := range s.ts { - if err := t.UpdateWatermark(s.id, watermark); err != nil { - return err - } - } - return nil -} - -func (s *Source) processTable(ctx context.Context, tbl flux.Table) error { - if len(s.ts) == 0 { - tbl.Done() - return nil - } else if len(s.ts) == 1 { - return s.ts[0].Process(s.id, tbl) - } - - // There is more than one transformation so we need to - // copy the table for each transformation. - bufTable, err := execute.CopyTable(tbl) - if err != nil { - return err - } - defer bufTable.Done() - - for _, t := range s.ts { - if err := t.Process(s.id, bufTable.Copy()); err != nil { - return err - } - } - return nil -} - -type readFilterSource struct { - Source - reader query.StorageReader - readSpec query.ReadFilterSpec -} - -func ReadFilterSource(id execute.DatasetID, r query.StorageReader, readSpec query.ReadFilterSpec, a execute.Administration) execute.Source { - src := new(readFilterSource) - - src.id = id - src.alloc = a.Allocator() - - src.reader = r - src.readSpec = readSpec - - src.m = GetStorageDependencies(a.Context()).FromDeps.Metrics - src.orgID = readSpec.OrganizationID - src.op = "readFilter" - - src.runner = src - return src -} - -func (s *readFilterSource) run(ctx context.Context) error { - stop := s.readSpec.Bounds.Stop - tables, err := s.reader.ReadFilter( - ctx, - s.readSpec, - s.alloc, - ) - if err != nil { - return err - } - return s.processTables(ctx, tables, stop) -} - -func createReadFilterSource(s plan.ProcedureSpec, id execute.DatasetID, a execute.Administration) (execute.Source, error) { - span, ctx := tracing.StartSpanFromContext(a.Context()) - defer span.Finish() - - spec := s.(*ReadRangePhysSpec) - - bounds := a.StreamContext().Bounds() - if bounds == nil { - return nil, &flux.Error{ - Code: codes.Internal, - Msg: "nil bounds passed to from", - } - } - - deps := GetStorageDependencies(a.Context()).FromDeps - - req := query.RequestFromContext(a.Context()) - if req == nil { - return nil, &flux.Error{ - Code: codes.Internal, - Msg: "missing request on context", - } - } - - orgID := req.OrganizationID - bucketID, err := spec.LookupBucketID(ctx, orgID, deps.BucketLookup) - if err != nil { - return nil, err - } - - return ReadFilterSource( - id, - deps.Reader, - query.ReadFilterSpec{ - OrganizationID: orgID, - BucketID: bucketID, - Bounds: *bounds, - Predicate: spec.Filter, - }, - a, - ), nil -} - -type readGroupSource struct { - Source - reader query.StorageReader - readSpec query.ReadGroupSpec -} - -func ReadGroupSource(id execute.DatasetID, r query.StorageReader, readSpec query.ReadGroupSpec, a execute.Administration) execute.Source { - src := new(readGroupSource) - - src.id = id - src.alloc = a.Allocator() - - src.reader = r - src.readSpec = readSpec - - src.m = GetStorageDependencies(a.Context()).FromDeps.Metrics - src.orgID = readSpec.OrganizationID - src.op = readSpec.Name() - - src.runner = src - return src -} - -func (s *readGroupSource) run(ctx context.Context) error { - stop := s.readSpec.Bounds.Stop - tables, err := s.reader.ReadGroup( - ctx, - s.readSpec, - s.alloc, - ) - if err != nil { - return err - } - return s.processTables(ctx, tables, stop) -} - -func createReadGroupSource(s plan.ProcedureSpec, id execute.DatasetID, a execute.Administration) (execute.Source, error) { - span, ctx := tracing.StartSpanFromContext(a.Context()) - defer span.Finish() - - spec := s.(*ReadGroupPhysSpec) - - bounds := a.StreamContext().Bounds() - if bounds == nil { - return nil, errors.New("nil bounds passed to from") - } - - deps := GetStorageDependencies(a.Context()).FromDeps - - req := query.RequestFromContext(a.Context()) - if req == nil { - return nil, errors.New("missing request on context") - } - - orgID := req.OrganizationID - bucketID, err := spec.LookupBucketID(ctx, orgID, deps.BucketLookup) - if err != nil { - return nil, err - } - - return ReadGroupSource( - id, - deps.Reader, - query.ReadGroupSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: orgID, - BucketID: bucketID, - Bounds: *bounds, - Predicate: spec.Filter, - }, - GroupMode: query.ToGroupMode(spec.GroupMode), - GroupKeys: spec.GroupKeys, - AggregateMethod: spec.AggregateMethod, - }, - a, - ), nil -} - -type readWindowAggregateSource struct { - Source - reader query.StorageReader - readSpec query.ReadWindowAggregateSpec -} - -func ReadWindowAggregateSource(id execute.DatasetID, r query.StorageReader, readSpec query.ReadWindowAggregateSpec, a execute.Administration) execute.Source { - src := new(readWindowAggregateSource) - - src.id = id - src.alloc = a.Allocator() - - src.reader = r - src.readSpec = readSpec - - src.m = GetStorageDependencies(a.Context()).FromDeps.Metrics - src.orgID = readSpec.OrganizationID - src.op = readSpec.Name() - - src.runner = src - return src -} - -func (s *readWindowAggregateSource) run(ctx context.Context) error { - stop := s.readSpec.Bounds.Stop - tables, err := s.reader.ReadWindowAggregate( - ctx, - s.readSpec, - s.alloc, - ) - if err != nil { - return err - } - return s.processTables(ctx, tables, stop) -} - -func createReadWindowAggregateSource(s plan.ProcedureSpec, id execute.DatasetID, a execute.Administration) (execute.Source, error) { - span, ctx := tracing.StartSpanFromContext(a.Context()) - defer span.Finish() - - spec := s.(*ReadWindowAggregatePhysSpec) - - bounds := a.StreamContext().Bounds() - if bounds == nil { - return nil, &flux.Error{ - Code: codes.Internal, - Msg: "nil bounds passed to from", - } - } - - deps := GetStorageDependencies(a.Context()).FromDeps - req := query.RequestFromContext(a.Context()) - if req == nil { - return nil, &flux.Error{ - Code: codes.Internal, - Msg: "missing request on context", - } - } - - orgID := req.OrganizationID - bucketID, err := spec.LookupBucketID(ctx, orgID, deps.BucketLookup) - if err != nil { - return nil, err - } - - return ReadWindowAggregateSource( - id, - deps.Reader, - query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: orgID, - BucketID: bucketID, - Bounds: *bounds, - Predicate: spec.Filter, - }, - Window: execute.Window{ - Every: spec.WindowEvery, - Period: spec.WindowEvery, - Offset: spec.Offset, - }, - Aggregates: spec.Aggregates, - CreateEmpty: spec.CreateEmpty, - TimeColumn: spec.TimeColumn, - ForceAggregate: spec.ForceAggregate, - }, - a, - ), nil -} - -func createReadTagKeysSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a execute.Administration) (execute.Source, error) { - span, ctx := tracing.StartSpanFromContext(a.Context()) - defer span.Finish() - - spec := prSpec.(*ReadTagKeysPhysSpec) - deps := GetStorageDependencies(a.Context()).FromDeps - req := query.RequestFromContext(a.Context()) - if req == nil { - return nil, errors.New("missing request on context") - } - orgID := req.OrganizationID - - bucketID, err := spec.LookupBucketID(ctx, orgID, deps.BucketLookup) - if err != nil { - return nil, err - } - - bounds := a.StreamContext().Bounds() - return ReadTagKeysSource( - dsid, - deps.Reader, - query.ReadTagKeysSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: orgID, - BucketID: bucketID, - Bounds: *bounds, - Predicate: spec.Filter, - }, - }, - a, - ), nil -} - -type readTagKeysSource struct { - Source - - reader query.StorageReader - readSpec query.ReadTagKeysSpec -} - -func ReadTagKeysSource(id execute.DatasetID, r query.StorageReader, readSpec query.ReadTagKeysSpec, a execute.Administration) execute.Source { - src := &readTagKeysSource{ - reader: r, - readSpec: readSpec, - } - src.id = id - src.alloc = a.Allocator() - - src.m = GetStorageDependencies(a.Context()).FromDeps.Metrics - src.orgID = readSpec.OrganizationID - src.op = "readTagKeys" - - src.runner = src - return src -} - -func (s *readTagKeysSource) run(ctx context.Context) error { - ti, err := s.reader.ReadTagKeys(ctx, s.readSpec, s.alloc) - if err != nil { - return err - } - return s.processTables(ctx, ti, execute.Now()) -} - -func createReadTagValuesSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a execute.Administration) (execute.Source, error) { - span, ctx := tracing.StartSpanFromContext(a.Context()) - defer span.Finish() - - spec := prSpec.(*ReadTagValuesPhysSpec) - deps := GetStorageDependencies(a.Context()).FromDeps - req := query.RequestFromContext(a.Context()) - if req == nil { - return nil, errors.New("missing request on context") - } - orgID := req.OrganizationID - - bucketID, err := spec.LookupBucketID(ctx, orgID, deps.BucketLookup) - if err != nil { - return nil, err - } - - bounds := a.StreamContext().Bounds() - return ReadTagValuesSource( - dsid, - deps.Reader, - query.ReadTagValuesSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: orgID, - BucketID: bucketID, - Bounds: *bounds, - Predicate: spec.Filter, - }, - TagKey: spec.TagKey, - }, - a, - ), nil -} - -type readTagValuesSource struct { - Source - - reader query.StorageReader - readSpec query.ReadTagValuesSpec -} - -func ReadTagValuesSource(id execute.DatasetID, r query.StorageReader, readSpec query.ReadTagValuesSpec, a execute.Administration) execute.Source { - src := &readTagValuesSource{ - reader: r, - readSpec: readSpec, - } - src.id = id - src.alloc = a.Allocator() - - src.m = GetStorageDependencies(a.Context()).FromDeps.Metrics - src.orgID = readSpec.OrganizationID - src.op = "readTagValues" - - src.runner = src - return src -} - -func (s *readTagValuesSource) run(ctx context.Context) error { - ti, err := s.reader.ReadTagValues(ctx, s.readSpec, s.alloc) - if err != nil { - return err - } - return s.processTables(ctx, ti, execute.Now()) -} diff --git a/query/stdlib/influxdata/influxdb/source_internal_test.go b/query/stdlib/influxdata/influxdb/source_internal_test.go deleted file mode 100644 index a8afefe465a..00000000000 --- a/query/stdlib/influxdata/influxdb/source_internal_test.go +++ /dev/null @@ -1,10 +0,0 @@ -package influxdb - -import ( - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/plan" -) - -func CreateReadWindowAggregateSource(s plan.ProcedureSpec, id execute.DatasetID, a execute.Administration) (execute.Source, error) { - return createReadWindowAggregateSource(s, id, a) -} diff --git a/query/stdlib/influxdata/influxdb/source_test.go b/query/stdlib/influxdata/influxdb/source_test.go deleted file mode 100644 index e6ec6cf67be..00000000000 --- a/query/stdlib/influxdata/influxdb/source_test.go +++ /dev/null @@ -1,308 +0,0 @@ -package influxdb_test - -import ( - "context" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/flux" - "github.com/influxdata/flux/dependencies/dependenciestest" - "github.com/influxdata/flux/dependency" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/execute/executetest" - "github.com/influxdata/flux/memory" - "github.com/influxdata/flux/plan" - "github.com/influxdata/flux/stdlib/universe" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/prom/promtest" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb" - "github.com/influxdata/influxdb/v2/tsdb/cursors" - "github.com/influxdata/influxdb/v2/uuid" - "github.com/prometheus/client_golang/prometheus" -) - -type mockTableIterator struct { -} - -func (mockTableIterator) Do(f func(flux.Table) error) error { - return nil -} - -func (mockTableIterator) Statistics() cursors.CursorStats { - return cursors.CursorStats{} -} - -type mockReader struct { -} - -func (mockReader) ReadFilter(ctx context.Context, spec query.ReadFilterSpec, alloc memory.Allocator) (query.TableIterator, error) { - return &mockTableIterator{}, nil -} - -func (mockReader) ReadGroup(ctx context.Context, spec query.ReadGroupSpec, alloc memory.Allocator) (query.TableIterator, error) { - return &mockTableIterator{}, nil -} - -func (mockReader) ReadTagKeys(ctx context.Context, spec query.ReadTagKeysSpec, alloc memory.Allocator) (query.TableIterator, error) { - return &mockTableIterator{}, nil -} - -func (mockReader) ReadTagValues(ctx context.Context, spec query.ReadTagValuesSpec, alloc memory.Allocator) (query.TableIterator, error) { - return &mockTableIterator{}, nil -} - -func (mockReader) ReadWindowAggregate(ctx context.Context, spec query.ReadWindowAggregateSpec, alloc memory.Allocator) (query.TableIterator, error) { - return &mockTableIterator{}, nil -} - -func (mockReader) ReadSeriesCardinality(ctx context.Context, spec query.ReadSeriesCardinalitySpec, alloc memory.Allocator) (query.TableIterator, error) { - return &mockTableIterator{}, nil -} - -func (mockReader) SupportReadSeriesCardinality(ctx context.Context) bool { - return false -} - -func (mockReader) Close() { -} - -type mockAdministration struct { - Ctx context.Context - StreamBounds *execute.Bounds -} - -func (a mockAdministration) Context() context.Context { - return a.Ctx -} - -func (mockAdministration) ResolveTime(qt flux.Time) execute.Time { - return 0 -} - -func (a mockAdministration) StreamContext() execute.StreamContext { - return a -} - -func (a mockAdministration) Bounds() *execute.Bounds { - return a.StreamBounds -} - -func (mockAdministration) Allocator() memory.Allocator { - return memory.DefaultAllocator -} - -func (mockAdministration) Parents() []execute.DatasetID { - return nil -} - -func (mockAdministration) ParallelOpts() execute.ParallelOpts { - panic("implement me") -} - -const ( - labelKey = "key1" - labelValue = "value1" -) - -// TestMetrics ensures that the metrics collected by an influxdb source are recorded. -func TestMetrics(t *testing.T) { - reg := prometheus.NewRegistry() - - orgID, err := platform2.IDFromString("deadbeefbeefdead") - if err != nil { - t.Fatal(err) - } - - deps := influxdb.Dependencies{ - FluxDeps: dependenciestest.Default(), - StorageDeps: influxdb.StorageDependencies{ - FromDeps: influxdb.FromDependencies{ - Reader: &mockReader{}, - BucketLookup: mock.BucketLookup{}, - OrganizationLookup: mock.OrganizationLookup{}, - Metrics: influxdb.NewMetrics([]string{labelKey}), - }, - }, - } - reg.MustRegister(deps.PrometheusCollectors()...) - - // This key/value pair added to the context will appear as a label in the prometheus histogram. - ctx := context.WithValue(context.Background(), labelKey, labelValue) //lint:ignore SA1029 this is a temporary ignore until we have time to create an appropriate type - // Injecting deps - ctx, span := dependency.Inject(ctx, deps) - defer span.Finish() - a := &mockAdministration{Ctx: ctx} - rfs := influxdb.ReadFilterSource( - execute.DatasetID(uuid.FromTime(time.Now())), - &mockReader{}, - query.ReadFilterSpec{ - OrganizationID: *orgID, - }, - a, - ) - rfs.Run(ctx) - - // Verify that we sampled the execution of the source by checking the prom registry. - mfs := promtest.MustGather(t, reg) - expectedLabels := map[string]string{ - "org": "deadbeefbeefdead", - "key1": "value1", - "op": "readFilter", - } - m := promtest.MustFindMetric(t, mfs, "query_influxdb_source_read_request_duration_seconds", expectedLabels) - if want, got := uint64(1), *(m.Histogram.SampleCount); want != got { - t.Fatalf("expected sample count of %v, got %v", want, got) - } -} - -type TableIterator struct { - Tables []*executetest.Table -} - -func (t *TableIterator) Do(f func(flux.Table) error) error { - for _, table := range t.Tables { - if err := f(table); err != nil { - return err - } - } - return nil -} - -func (t *TableIterator) Statistics() cursors.CursorStats { - return cursors.CursorStats{} -} - -func TestReadWindowAggregateSource(t *testing.T) { - t.Skip("test panics in CI; issue: https://github.com/influxdata/influxdb/issues/17847") - - orgID, bucketID := platform2.ID(1), platform2.ID(2) - executetest.RunSourceHelper(t, - context.Background(), - []*executetest.Table{ - { - ColMeta: []flux.ColMeta{ - {Label: "_time", Type: flux.TTime}, - {Label: "_measurement", Type: flux.TString}, - {Label: "_field", Type: flux.TString}, - {Label: "host", Type: flux.TString}, - {Label: "_value", Type: flux.TFloat}, - }, - KeyCols: []string{"_measurement", "_field", "host"}, - Data: [][]interface{}{ - {execute.Time(0), "cpu", "usage_user", "server01", 2.0}, - {execute.Time(10), "cpu", "usage_user", "server01", 1.5}, - {execute.Time(20), "cpu", "usage_user", "server01", 5.0}, - }, - }, - { - ColMeta: []flux.ColMeta{ - {Label: "_time", Type: flux.TTime}, - {Label: "_measurement", Type: flux.TString}, - {Label: "_field", Type: flux.TString}, - {Label: "host", Type: flux.TString}, - {Label: "_value", Type: flux.TFloat}, - }, - KeyCols: []string{"_measurement", "_field", "host"}, - Data: [][]interface{}{ - {execute.Time(0), "cpu", "usage_system", "server01", 8.0}, - {execute.Time(10), "cpu", "usage_system", "server01", 3.0}, - {execute.Time(20), "cpu", "usage_system", "server01", 6.0}, - }, - }, - }, - nil, - func(id execute.DatasetID) execute.Source { - pspec := &influxdb.ReadWindowAggregatePhysSpec{ - ReadRangePhysSpec: influxdb.ReadRangePhysSpec{ - BucketID: bucketID.String(), - }, - WindowEvery: flux.ConvertDuration(10 * time.Nanosecond), - Aggregates: []plan.ProcedureKind{ - universe.SumKind, - }, - } - reader := &mock.StorageReader{ - ReadWindowAggregateFn: func(ctx context.Context, spec query.ReadWindowAggregateSpec, alloc memory.Allocator) (query.TableIterator, error) { - if want, got := orgID, spec.OrganizationID; want != got { - t.Errorf("unexpected organization id -want/+got:\n\t- %s\n\t+ %s", want, got) - } - if want, got := bucketID, spec.BucketID; want != got { - t.Errorf("unexpected bucket id -want/+got:\n\t- %s\n\t+ %s", want, got) - } - if want, got := (execute.Bounds{Start: 0, Stop: 30}), spec.Bounds; want != got { - t.Errorf("unexpected bounds -want/+got:\n%s", cmp.Diff(want, got)) - } - if want, got := int64(10), spec.WindowEvery; want != got { - t.Errorf("unexpected window every value -want/+got:\n\t- %d\n\t+ %d", want, got) - } - if want, got := []plan.ProcedureKind{universe.SumKind}, spec.Aggregates; !cmp.Equal(want, got) { - t.Errorf("unexpected aggregates -want/+got:\n%s", cmp.Diff(want, got)) - } - return &TableIterator{ - Tables: []*executetest.Table{ - { - ColMeta: []flux.ColMeta{ - {Label: "_time", Type: flux.TTime}, - {Label: "_measurement", Type: flux.TString}, - {Label: "_field", Type: flux.TString}, - {Label: "host", Type: flux.TString}, - {Label: "_value", Type: flux.TFloat}, - }, - KeyCols: []string{"_measurement", "_field", "host"}, - Data: [][]interface{}{ - {execute.Time(0), "cpu", "usage_user", "server01", 2.0}, - {execute.Time(10), "cpu", "usage_user", "server01", 1.5}, - {execute.Time(20), "cpu", "usage_user", "server01", 5.0}, - }, - }, - { - ColMeta: []flux.ColMeta{ - {Label: "_time", Type: flux.TTime}, - {Label: "_measurement", Type: flux.TString}, - {Label: "_field", Type: flux.TString}, - {Label: "host", Type: flux.TString}, - {Label: "_value", Type: flux.TFloat}, - }, - KeyCols: []string{"_measurement", "_field", "host"}, - Data: [][]interface{}{ - {execute.Time(0), "cpu", "usage_system", "server01", 8.0}, - {execute.Time(10), "cpu", "usage_system", "server01", 3.0}, - {execute.Time(20), "cpu", "usage_system", "server01", 6.0}, - }, - }, - }, - }, nil - }, - } - - metrics := influxdb.NewMetrics(nil) - deps := influxdb.StorageDependencies{ - FromDeps: influxdb.FromDependencies{ - Reader: reader, - Metrics: metrics, - }, - } - ctx, span := dependency.Inject(context.Background(), deps) - defer span.Finish() - ctx = query.ContextWithRequest(ctx, &query.Request{ - OrganizationID: orgID, - }) - a := mockAdministration{ - Ctx: ctx, - StreamBounds: &execute.Bounds{ - Start: execute.Time(0), - Stop: execute.Time(30), - }, - } - - s, err := influxdb.CreateReadWindowAggregateSource(pspec, id, a) - if err != nil { - t.Fatal(err) - } - return s - }, - ) -} diff --git a/query/stdlib/influxdata/influxdb/storage.go b/query/stdlib/influxdata/influxdb/storage.go deleted file mode 100644 index a7f9b3534a5..00000000000 --- a/query/stdlib/influxdata/influxdb/storage.go +++ /dev/null @@ -1,96 +0,0 @@ -package influxdb - -import ( - "context" - - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/prom" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/storage" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" -) - -type HostLookup interface { - Hosts() []string - Watch() <-chan struct{} -} - -type BucketLookup interface { - Lookup(ctx context.Context, orgID platform2.ID, name string) (platform2.ID, bool) -} - -type OrganizationLookup interface { - Lookup(ctx context.Context, name string) (platform2.ID, bool) -} - -type FromDependencies struct { - Reader query.StorageReader - BucketLookup BucketLookup - OrganizationLookup OrganizationLookup - Metrics *metrics -} - -func (d FromDependencies) Validate() error { - if d.Reader == nil { - return errors.New("missing reader dependency") - } - if d.BucketLookup == nil { - return errors.New("missing bucket lookup dependency") - } - if d.OrganizationLookup == nil { - return errors.New("missing organization lookup dependency") - } - return nil -} - -// PrometheusCollectors satisfies the PrometheusCollector interface. -func (d FromDependencies) PrometheusCollectors() []prometheus.Collector { - collectors := make([]prometheus.Collector, 0) - if pc, ok := d.Reader.(prom.PrometheusCollector); ok { - collectors = append(collectors, pc.PrometheusCollectors()...) - } - if d.Metrics != nil { - collectors = append(collectors, d.Metrics.PrometheusCollectors()...) - } - return collectors -} - -// ToDependencies contains the dependencies for executing the `to` function. -type ToDependencies struct { - BucketLookup BucketLookup - OrganizationLookup OrganizationLookup - PointsWriter storage.PointsWriter -} - -// Validate returns an error if any required field is unset. -func (d ToDependencies) Validate() error { - if d.BucketLookup == nil { - return errors.New("missing bucket lookup dependency") - } - if d.OrganizationLookup == nil { - return errors.New("missing organization lookup dependency") - } - if d.PointsWriter == nil { - return errors.New("missing points writer dependency") - } - return nil -} - -type StaticLookup struct { - hosts []string -} - -func NewStaticLookup(hosts []string) StaticLookup { - return StaticLookup{ - hosts: hosts, - } -} - -func (l StaticLookup) Hosts() []string { - return l.hosts -} -func (l StaticLookup) Watch() <-chan struct{} { - // A nil channel always blocks, since hosts never change this is appropriate. - return nil -} diff --git a/query/stdlib/influxdata/influxdb/storage_predicate.go b/query/stdlib/influxdata/influxdb/storage_predicate.go deleted file mode 100644 index ba8d49f3911..00000000000 --- a/query/stdlib/influxdata/influxdb/storage_predicate.go +++ /dev/null @@ -1,208 +0,0 @@ -package influxdb - -import ( - "fmt" - - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/semantic" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/pkg/errors" -) - -// ToStoragePredicate will convert a FunctionExpression into a predicate that can be -// sent down to the storage layer. -func ToStoragePredicate(n semantic.Expression, objectName string) (*datatypes.Predicate, error) { - root, err := toStoragePredicateHelper(n, objectName) - if err != nil { - return nil, err - } - - return &datatypes.Predicate{ - Root: root, - }, nil -} - -func mergePredicates(op ast.LogicalOperatorKind, predicates ...*datatypes.Predicate) (*datatypes.Predicate, error) { - if len(predicates) == 0 { - return nil, errors.New("at least one predicate is needed") - } - - var value datatypes.Node_Logical - switch op { - case ast.AndOperator: - value = datatypes.Node_LogicalAnd - case ast.OrOperator: - value = datatypes.Node_LogicalOr - default: - return nil, fmt.Errorf("unknown logical operator %v", op) - } - - // Nest the predicates backwards. This way we get a tree like this: - // a AND (b AND c) - root := predicates[len(predicates)-1].Root - for i := len(predicates) - 2; i >= 0; i-- { - root = &datatypes.Node{ - NodeType: datatypes.Node_TypeLogicalExpression, - Value: &datatypes.Node_Logical_{Logical: value}, - Children: []*datatypes.Node{ - predicates[i].Root, - root, - }, - } - } - return &datatypes.Predicate{ - Root: root, - }, nil -} - -func toStoragePredicateHelper(n semantic.Expression, objectName string) (*datatypes.Node, error) { - switch n := n.(type) { - case *semantic.LogicalExpression: - left, err := toStoragePredicateHelper(n.Left, objectName) - if err != nil { - return nil, errors.Wrap(err, "left hand side") - } - right, err := toStoragePredicateHelper(n.Right, objectName) - if err != nil { - return nil, errors.Wrap(err, "right hand side") - } - children := []*datatypes.Node{left, right} - switch n.Operator { - case ast.AndOperator: - return &datatypes.Node{ - NodeType: datatypes.Node_TypeLogicalExpression, - Value: &datatypes.Node_Logical_{Logical: datatypes.Node_LogicalAnd}, - Children: children, - }, nil - case ast.OrOperator: - return &datatypes.Node{ - NodeType: datatypes.Node_TypeLogicalExpression, - Value: &datatypes.Node_Logical_{Logical: datatypes.Node_LogicalOr}, - Children: children, - }, nil - default: - return nil, fmt.Errorf("unknown logical operator %v", n.Operator) - } - case *semantic.BinaryExpression: - left, err := toStoragePredicateHelper(n.Left, objectName) - if err != nil { - return nil, errors.Wrap(err, "left hand side") - } - right, err := toStoragePredicateHelper(n.Right, objectName) - if err != nil { - return nil, errors.Wrap(err, "right hand side") - } - children := []*datatypes.Node{left, right} - op, err := toComparisonOperator(n.Operator) - if err != nil { - return nil, err - } - return &datatypes.Node{ - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: op}, - Children: children, - }, nil - case *semantic.StringLiteral: - return &datatypes.Node{ - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_StringValue{ - StringValue: n.Value, - }, - }, nil - case *semantic.IntegerLiteral: - return &datatypes.Node{ - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_IntegerValue{ - IntegerValue: n.Value, - }, - }, nil - case *semantic.BooleanLiteral: - return &datatypes.Node{ - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_BooleanValue{ - BooleanValue: n.Value, - }, - }, nil - case *semantic.FloatLiteral: - return &datatypes.Node{ - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_FloatValue{ - FloatValue: n.Value, - }, - }, nil - case *semantic.RegexpLiteral: - return &datatypes.Node{ - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_RegexValue{ - RegexValue: n.Value.String(), - }, - }, nil - case *semantic.MemberExpression: - // Sanity check that the object is the objectName identifier - if ident, ok := n.Object.(*semantic.IdentifierExpression); !ok || ident.Name.Name() != objectName { - return nil, fmt.Errorf("unknown object %q", n.Object) - } - switch n.Property.Name() { - case datatypes.FieldKey: - return &datatypes.Node{ - NodeType: datatypes.Node_TypeTagRef, - Value: &datatypes.Node_TagRefValue{ - TagRefValue: models.FieldKeyTagKey, - }, - }, nil - case datatypes.MeasurementKey: - return &datatypes.Node{ - NodeType: datatypes.Node_TypeTagRef, - Value: &datatypes.Node_TagRefValue{ - TagRefValue: models.MeasurementTagKey, - }, - }, nil - case datatypes.ValueKey: - return &datatypes.Node{ - NodeType: datatypes.Node_TypeFieldRef, - Value: &datatypes.Node_FieldRefValue{ - FieldRefValue: datatypes.ValueKey, - }, - }, nil - - } - return &datatypes.Node{ - NodeType: datatypes.Node_TypeTagRef, - Value: &datatypes.Node_TagRefValue{ - TagRefValue: n.Property.Name(), - }, - }, nil - case *semantic.DurationLiteral: - return nil, errors.New("duration literals not supported in storage predicates") - case *semantic.DateTimeLiteral: - return nil, errors.New("time literals not supported in storage predicates") - default: - return nil, fmt.Errorf("unsupported semantic expression type %T", n) - } -} - -func toComparisonOperator(o ast.OperatorKind) (datatypes.Node_Comparison, error) { - switch o { - case ast.EqualOperator: - return datatypes.Node_ComparisonEqual, nil - case ast.NotEqualOperator: - return datatypes.Node_ComparisonNotEqual, nil - case ast.RegexpMatchOperator: - return datatypes.Node_ComparisonRegex, nil - case ast.NotRegexpMatchOperator: - return datatypes.Node_ComparisonNotRegex, nil - case ast.StartsWithOperator: - return datatypes.Node_ComparisonStartsWith, nil - case ast.LessThanOperator: - return datatypes.Node_ComparisonLess, nil - case ast.LessThanEqualOperator: - return datatypes.Node_ComparisonLessEqual, nil - case ast.GreaterThanOperator: - return datatypes.Node_ComparisonGreater, nil - case ast.GreaterThanEqualOperator: - return datatypes.Node_ComparisonGreaterEqual, nil - default: - return 0, fmt.Errorf("unknown operator %v", o) - } -} diff --git a/query/stdlib/influxdata/influxdb/tag_values_measurement_or_predicate_test.flux b/query/stdlib/influxdata/influxdb/tag_values_measurement_or_predicate_test.flux deleted file mode 100644 index fbc71a53ab2..00000000000 --- a/query/stdlib/influxdata/influxdb/tag_values_measurement_or_predicate_test.flux +++ /dev/null @@ -1,135 +0,0 @@ -// TODO(whb): These tests should get ported to the flux repo and removed here -// when they are included with a flux release that InfluxDB uses to remove the -// redundancy. - -package influxdb_test - -import "csv" -import "testing" -import "testing/expect" - -option now = () => 2030-01-01T00:00:00Z - -input = " -#group,false,false,false,false,true,true,true,true,true,true,true -#datatype,string,long,dateTime:RFC3339,long,string,string,string,string,string,string,string -#default,_result,,,,,,,,,, -,result,table,_time,_value,_field,_measurement,device,fstype,host,mode,path -,,0,2020-10-21T20:48:30Z,4881964326,inodes_free,disk,disk1s5,apfs,euterpe.local,ro,/ -,,0,2020-10-21T20:48:40Z,4881964326,inodes_free,disk,disk1s5,apfs,euterpe.local,ro,/ -,,0,2020-10-21T20:48:50Z,4881964326,inodes_free,disk,disk1s5,apfs,euterpe.local,ro,/ -,,1,2020-10-21T20:48:30Z,4294963701,inodes_free,disk,disk2s1,hfs,euterpe.local,ro,/Volumes/IntelliJ IDEA CE -,,1,2020-10-21T20:48:40Z,4294963701,inodes_free,disk,disk2s1,hfs,euterpe.local,ro,/Volumes/IntelliJ IDEA CE -,,1,2020-10-21T20:48:50Z,4294963701,inodes_free,disk,disk2s1,hfs,euterpe.local,ro,/Volumes/IntelliJ IDEA CE -,,2,2020-10-21T20:48:30Z,488514,inodes_used,disk,disk1s5,apfs,euterpe.local,ro,/ -,,2,2020-10-21T20:48:40Z,488514,inodes_used,disk,disk1s5,apfs,euterpe.local,ro,/ -,,2,2020-10-21T20:48:50Z,488514,inodes_used,disk,disk1s5,apfs,euterpe.local,ro,/ -,,3,2020-10-21T20:48:30Z,3578,inodes_used,disk,disk2s1,hfs,euterpe.local,ro,/Volumes/IntelliJ IDEA CE -,,3,2020-10-21T20:48:40Z,3578,inodes_used,disk,disk2s1,hfs,euterpe.local,ro,/Volumes/IntelliJ IDEA CE -,,3,2020-10-21T20:48:50Z,3578,inodes_used,disk,disk2s1,hfs,euterpe.local,ro,/Volumes/IntelliJ IDEA CE - -#group,false,false,false,false,true,true,true,true,true -#datatype,string,long,dateTime:RFC3339,double,string,string,string,string,string -#default,_result,,,,,,,, -,result,table,_time,_value,_field,_measurement,cpu,host,region -,,4,2020-10-21T20:48:30Z,69.30000000167638,usage_idle,cpu,cpu0,euterpe.local,south -,,4,2020-10-21T20:48:40Z,67.36736736724372,usage_idle,cpu,cpu0,euterpe.local,south -,,4,2020-10-21T20:48:50Z,69.23076923005354,usage_idle,cpu,cpu0,euterpe.local,south -,,5,2020-10-21T20:48:30Z,96.10000000102445,usage_idle,cpu,cpu1,euterpe.local,south -,,5,2020-10-21T20:48:40Z,95.70000000055181,usage_idle,cpu,cpu1,euterpe.local,south -,,5,2020-10-21T20:48:50Z,95.89999999860534,usage_idle,cpu,cpu1,euterpe.local,south - -#group,false,false,false,false,true,true,true,true,true -#datatype,string,long,dateTime:RFC3339,double,string,string,string,string,string -#default,_result,,,,,,,, -,result,table,_time,_value,_field,_measurement,cpu,host,region -,,6,2020-10-21T20:48:30Z,69.30000000167638,usage_idle,cpu,cpu0,mnemosyne.local,east -,,6,2020-10-21T20:48:40Z,67.36736736724372,usage_idle,cpu,cpu0,mnemosyne.local,east -,,6,2020-10-21T20:48:50Z,69.23076923005354,usage_idle,cpu,cpu0,mnemosyne.local,east -,,7,2020-10-21T20:48:30Z,96.10000000102445,usage_idle,cpu,cpu1,mnemosyne.local,east -,,7,2020-10-21T20:48:40Z,95.70000000055181,usage_idle,cpu,cpu1,mnemosyne.local,east -,,7,2020-10-21T20:48:50Z,95.89999999860534,usage_idle,cpu,cpu1,mnemosyne.local,east - -#group,false,false,true,true,false,false,true,true,true -#datatype,string,long,string,string,dateTime:RFC3339,double,string,string,string -#default,_result,,,,,,,, -,result,table,_field,_measurement,_time,_value,cpu,host,region -,,8,usage_user,cpu,2020-10-21T20:48:30Z,19.30000000007567,cpu0,euterpe.local,north -,,8,usage_user,cpu,2020-10-21T20:48:40Z,20.020020020038682,cpu0,euterpe.local,north -,,8,usage_user,cpu,2020-10-21T20:48:50Z,18.581418581407107,cpu0,euterpe.local,north -,,9,usage_user,cpu,2020-10-21T20:48:30Z,2.3000000000138243,cpu1,euterpe.local,north -,,9,usage_user,cpu,2020-10-21T20:48:40Z,2.4000000000536965,cpu1,euterpe.local,north -,,9,usage_user,cpu,2020-10-21T20:48:50Z,2.0999999999423746,cpu1,euterpe.local,north -" - -testcase tag_values_measurement_or_predicate { - got = csv.from(csv: input) - |> testing.load() - |> range(start: -100y) - |> filter(fn: (r) => r["_measurement"] == "cpu") - |> filter(fn: (r) => r["_measurement"] == "someOtherThing" or r["host"] == "euterpe.local") - |> keep(columns: ["region"]) - |> group() - |> distinct(column: "region") - |> limit(n: 200) - |> sort() - - want = csv.from(csv: "#datatype,string,long,string -#group,false,false,false -#default,0,, -,result,table,_value -,,0,north -,,0,south -") - - expect.planner(rules: ["PushDownReadTagValuesRule": 1]) - testing.diff(got, want) -} - -testcase tag_values_measurement_or_negation { - got = csv.from(csv: input) - |> testing.load() - |> range(start: -100y) - |> filter(fn: (r) => r["_measurement"] != "cpu") - |> filter(fn: (r) => r["_measurement"] == "someOtherThing" or r["fstype"] != "apfs") - |> keep(columns: ["fstype"]) - |> group() - |> distinct(column: "fstype") - |> limit(n: 200) - |> sort() - - want = csv.from(csv: "#datatype,string,long,string -#group,false,false,false -#default,0,, -,result,table,_value -,,0,hfs -") - - expect.planner(rules: ["PushDownReadTagValuesRule": 1]) - testing.diff(got, want) -} - -testcase tag_values_measurement_or_regex { - got = csv.from(csv: input) - |> testing.load() - |> range(start: -100y) - |> filter(fn: (r) => r["_measurement"] =~ /cp.*/) - |> filter(fn: (r) => r["_measurement"] == "someOtherThing" or r["host"] !~ /mnemo.*/) - |> keep(columns: ["region"]) - |> group() - |> distinct(column: "region") - |> limit(n: 200) - |> sort() - - want = csv.from(csv: "#datatype,string,long,string -#group,false,false,false -#default,0,, -,result,table,_value -,,0,north -,,0,south -") - - expect.planner(rules: ["PushDownReadTagValuesRule": 1]) - testing.diff(got, want) -} - diff --git a/query/stdlib/influxdata/influxdb/v1/databases.go b/query/stdlib/influxdata/influxdb/v1/databases.go deleted file mode 100644 index daa726afe76..00000000000 --- a/query/stdlib/influxdata/influxdb/v1/databases.go +++ /dev/null @@ -1,218 +0,0 @@ -package v1 - -import ( - "context" - "fmt" - "time" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/codes" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/memory" - "github.com/influxdata/flux/plan" - v1 "github.com/influxdata/flux/stdlib/influxdata/influxdb/v1" - "github.com/influxdata/flux/values" - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/query" - "github.com/pkg/errors" -) - -const DatabasesKind = "influxdata/influxdb/v1.localDatabases" - -func init() { - execute.RegisterSource(DatabasesKind, createDatabasesSource) - plan.RegisterPhysicalRules(LocalDatabasesRule{}) -} - -type LocalDatabasesProcedureSpec struct { - plan.DefaultCost -} - -func (s *LocalDatabasesProcedureSpec) Kind() plan.ProcedureKind { - return DatabasesKind -} - -func (s *LocalDatabasesProcedureSpec) Copy() plan.ProcedureSpec { - ns := new(LocalDatabasesProcedureSpec) - return ns -} - -type DatabasesDecoder struct { - orgID platform2.ID - deps *DatabasesDependencies - databases []*platform.DBRPMapping - alloc memory.Allocator -} - -func (bd *DatabasesDecoder) Connect(ctx context.Context) error { - return nil -} - -func (bd *DatabasesDecoder) Fetch(ctx context.Context) (bool, error) { - b, _, err := bd.deps.DBRP.FindMany(ctx, platform.DBRPMappingFilter{}) - if err != nil { - return false, err - } - bd.databases = b - return false, nil -} - -func (bd *DatabasesDecoder) Decode(ctx context.Context) (flux.Table, error) { - type databaseInfo struct { - *platform.DBRPMapping - RetentionPeriod time.Duration - } - - databases := make([]databaseInfo, 0, len(bd.databases)) - for _, db := range bd.databases { - bucket, err := bd.deps.BucketLookup.FindBucketByID(ctx, db.BucketID) - if err != nil { - code := errors2.ErrorCode(err) - if code == errors2.EUnauthorized || code == errors2.EForbidden { - continue - } - return nil, err - } - databases = append(databases, databaseInfo{ - DBRPMapping: db, - RetentionPeriod: bucket.RetentionPeriod, - }) - } - - if len(databases) == 0 { - return nil, &errors2.Error{ - Code: errors2.ENotFound, - Msg: "no 1.x databases found", - } - } - - kb := execute.NewGroupKeyBuilder(nil) - kb.AddKeyValue("organizationID", values.NewString(databases[0].OrganizationID.String())) - gk, err := kb.Build() - if err != nil { - return nil, err - } - - b := execute.NewColListTableBuilder(gk, bd.alloc) - if _, err := b.AddCol(flux.ColMeta{ - Label: "organizationID", - Type: flux.TString, - }); err != nil { - return nil, err - } - if _, err := b.AddCol(flux.ColMeta{ - Label: "databaseName", - Type: flux.TString, - }); err != nil { - return nil, err - } - if _, err := b.AddCol(flux.ColMeta{ - Label: "retentionPolicy", - Type: flux.TString, - }); err != nil { - return nil, err - } - if _, err := b.AddCol(flux.ColMeta{ - Label: "retentionPeriod", - Type: flux.TInt, - }); err != nil { - return nil, err - } - if _, err := b.AddCol(flux.ColMeta{ - Label: "default", - Type: flux.TBool, - }); err != nil { - return nil, err - } - if _, err := b.AddCol(flux.ColMeta{ - Label: "bucketId", - Type: flux.TString, - }); err != nil { - return nil, err - } - - for _, db := range databases { - _ = b.AppendString(0, db.OrganizationID.String()) - _ = b.AppendString(1, db.Database) - _ = b.AppendString(2, db.RetentionPolicy) - _ = b.AppendInt(3, db.RetentionPeriod.Nanoseconds()) - _ = b.AppendBool(4, db.Default) - _ = b.AppendString(5, db.BucketID.String()) - } - - return b.Table() -} - -func (bd *DatabasesDecoder) Close() error { - return nil -} - -func createDatabasesSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a execute.Administration) (execute.Source, error) { - _, ok := prSpec.(*LocalDatabasesProcedureSpec) - if !ok { - return nil, fmt.Errorf("invalid spec type %T", prSpec) - } - deps := GetDatabasesDependencies(a.Context()) - req := query.RequestFromContext(a.Context()) - if req == nil { - return nil, errors.New("missing request on context") - } - orgID := req.OrganizationID - - bd := &DatabasesDecoder{orgID: orgID, deps: &deps, alloc: a.Allocator()} - - return execute.CreateSourceFromDecoder(bd, dsid, a) -} - -type key int - -const dependenciesKey key = iota - -type DatabasesDependencies struct { - DBRP platform.DBRPMappingService - BucketLookup platform.BucketService -} - -func (d DatabasesDependencies) Inject(ctx context.Context) context.Context { - return context.WithValue(ctx, dependenciesKey, d) -} - -func GetDatabasesDependencies(ctx context.Context) DatabasesDependencies { - return ctx.Value(dependenciesKey).(DatabasesDependencies) -} - -func (d DatabasesDependencies) Validate() error { - if d.DBRP == nil { - return errors.New("missing all databases lookup dependency") - } - if d.BucketLookup == nil { - return errors.New("missing buckets lookup dependency") - } - return nil -} - -type LocalDatabasesRule struct{} - -func (rule LocalDatabasesRule) Name() string { - return "influxdata/influxdb.LocalDatabasesRule" -} - -func (rule LocalDatabasesRule) Pattern() plan.Pattern { - return plan.MultiSuccessor(v1.DatabasesKind) -} - -func (rule LocalDatabasesRule) Rewrite(ctx context.Context, node plan.Node) (plan.Node, bool, error) { - fromSpec := node.ProcedureSpec().(*v1.DatabasesProcedureSpec) - if fromSpec.Host != nil { - return node, false, nil - } else if fromSpec.Org != nil { - return node, false, &flux.Error{ - Code: codes.Unimplemented, - Msg: "buckets cannot list from a separate organization; please specify a host or remove the organization", - } - } - - return plan.CreateLogicalNode("localDatabases", &LocalDatabasesProcedureSpec{}), true, nil -} diff --git a/query/stdlib/packages.go b/query/stdlib/packages.go deleted file mode 100644 index 24ebf478d34..00000000000 --- a/query/stdlib/packages.go +++ /dev/null @@ -1,7 +0,0 @@ -package stdlib - -// Import all stdlib packages -import ( - _ "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb" - _ "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb/v1" -) diff --git a/query/stdlib/universe/last_test.flux b/query/stdlib/universe/last_test.flux deleted file mode 100644 index f4e0d731042..00000000000 --- a/query/stdlib/universe/last_test.flux +++ /dev/null @@ -1,57 +0,0 @@ -package universe_test - -import "testing" -import "testing/expect" -import "planner" -import "csv" - -testcase last_multi_shard { - expect.planner(rules: ["PushDownBareAggregateRule": 1]) - - input = " -#group,false,false,true,true,false,false,true,true,true -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string,string,string -#default,_result,,,,,,,, -,result,table,_start,_stop,_time,_value,_field,_measurement,meter -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2019-04-11T07:00:00Z,0,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2019-04-23T07:00:00Z,64,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2019-05-22T07:00:00Z,759,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2019-06-24T07:00:00Z,1234,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2019-07-24T07:00:00Z,1503,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2019-08-22T07:00:00Z,1707,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2019-09-23T07:00:00Z,1874,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2019-10-23T07:00:00Z,2086,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2019-11-21T08:00:00Z,2187,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2019-12-24T08:00:00Z,1851,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2020-01-24T08:00:00Z,1391,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2020-02-24T08:00:00Z,1221,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2020-03-25T07:00:00Z,0,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2020-04-23T07:00:00Z,447,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2020-05-22T07:00:00Z,868,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2020-06-23T07:00:00Z,1321,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2020-07-23T07:00:00Z,1453,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2020-08-21T07:00:00Z,1332,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2020-09-23T07:00:00Z,1312,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2020-10-22T07:00:00Z,1261,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2020-11-20T08:00:00Z,933,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2020-12-23T08:00:00Z,233,bank,pge_bill,35632393IN -,,0,2017-02-16T20:30:31.713576368Z,2021-02-16T20:30:31.713576368Z,2021-01-26T08:00:00Z,-1099,bank,pge_bill,35632393IN -" - want = csv.from( - csv: " -#group,false,false,false,false,true,true -#datatype,string,long,dateTime:RFC3339,double,string,string -#default,_result,,,,, -,result,table,_time,_value,_field,_measurement -,,0,2021-01-26T08:00:00Z,-1099,bank,pge_bill -", - ) - result = csv.from(csv: input) - |> testing.load() - |> range(start: -3y) - |> filter(fn: (r) => r._measurement == "pge_bill" and r._field == "bank") - |> last() - |> keep(columns: ["_time", "_value", "_field", "_measurement"]) - - testing.diff(want: want, got: result) -} diff --git a/query/storage.go b/query/storage.go deleted file mode 100644 index 5ec0d7f1e30..00000000000 --- a/query/storage.go +++ /dev/null @@ -1,117 +0,0 @@ -package query - -import ( - "context" - "fmt" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/memory" - "github.com/influxdata/flux/plan" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -// StorageReader is an interface for reading tables from the storage subsystem. -type StorageReader interface { - ReadFilter(ctx context.Context, spec ReadFilterSpec, alloc memory.Allocator) (TableIterator, error) - ReadGroup(ctx context.Context, spec ReadGroupSpec, alloc memory.Allocator) (TableIterator, error) - ReadWindowAggregate(ctx context.Context, spec ReadWindowAggregateSpec, alloc memory.Allocator) (TableIterator, error) - - ReadTagKeys(ctx context.Context, spec ReadTagKeysSpec, alloc memory.Allocator) (TableIterator, error) - ReadTagValues(ctx context.Context, spec ReadTagValuesSpec, alloc memory.Allocator) (TableIterator, error) - - ReadSeriesCardinality(ctx context.Context, spec ReadSeriesCardinalitySpec, alloc memory.Allocator) (TableIterator, error) - SupportReadSeriesCardinality(ctx context.Context) bool - - Close() -} - -type ReadFilterSpec struct { - OrganizationID platform.ID - BucketID platform.ID - - Bounds execute.Bounds - Predicate *datatypes.Predicate -} - -type ReadGroupSpec struct { - ReadFilterSpec - - GroupMode GroupMode - GroupKeys []string - - AggregateMethod string -} - -func (spec *ReadGroupSpec) Name() string { - return fmt.Sprintf("readGroup(%s)", spec.AggregateMethod) -} - -type ReadTagKeysSpec struct { - ReadFilterSpec -} - -type ReadTagValuesSpec struct { - ReadFilterSpec - TagKey string -} - -type ReadSeriesCardinalitySpec struct { - ReadFilterSpec -} - -// ReadWindowAggregateSpec defines the options for WindowAggregate. -// -// Window and the WindowEvery/Offset should be mutually exclusive. If you set either the WindowEvery or Offset with -// nanosecond values, then the Window will be ignored. -type ReadWindowAggregateSpec struct { - ReadFilterSpec - WindowEvery int64 - Offset int64 - Aggregates []plan.ProcedureKind - CreateEmpty bool - TimeColumn string - Window execute.Window - - // ForceAggregate forces all aggregates to be treated as aggregates. - // This forces selectors, which normally don't return values for empty - // windows, to return a null value. - ForceAggregate bool -} - -func (spec *ReadWindowAggregateSpec) Name() string { - var agg string - if len(spec.Aggregates) > 0 { - agg = string(spec.Aggregates[0]) - } - return fmt.Sprintf("readWindow(%s)", agg) -} - -// TableIterator is a table iterator that also keeps track of cursor statistics from the storage engine. -type TableIterator interface { - flux.TableIterator - Statistics() cursors.CursorStats -} - -type GroupMode int - -const ( - // GroupModeNone merges all series into a single group. - GroupModeNone GroupMode = iota - // GroupModeBy produces a table for each unique value of the specified GroupKeys. - GroupModeBy -) - -// ToGroupMode accepts the group mode from Flux and produces the appropriate storage group mode. -func ToGroupMode(fluxMode flux.GroupMode) GroupMode { - switch fluxMode { - case flux.GroupModeNone: - return GroupModeNone - case flux.GroupModeBy: - return GroupModeBy - default: - panic(fmt.Sprint("unknown group mode: ", fluxMode)) - } -} diff --git a/rand/id.go b/rand/id.go deleted file mode 100644 index ef8b451acee..00000000000 --- a/rand/id.go +++ /dev/null @@ -1,72 +0,0 @@ -package rand - -import ( - "encoding/binary" - "math/rand" - "sync" - - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ platform.IDGenerator = (*OrgBucketID)(nil) - -// OrgBucketID creates an id that does not have ascii -// backslash, commas, or spaces. Used to create IDs for organizations -// and buckets. -// -// It is implemented without those characters because orgbucket -// pairs are placed in the old measurement field. Measurement -// was interpreted as a string delimited with commas. Therefore, -// to continue to use the underlying storage engine we need to -// sanitize ids. -// -// Safe for concurrent use by multiple goroutines. -type OrgBucketID struct { - m sync.Mutex - src *rand.Rand -} - -// NewOrgBucketID creates an influxdb.IDGenerator that creates -// random numbers seeded with seed. Ascii backslash, comma, -// and space are manipulated by incrementing. -// -// Typically, seed with `time.Now().UnixNano()` -func NewOrgBucketID(seed int64) *OrgBucketID { - return &OrgBucketID{ - src: rand.New(rand.NewSource(seed)), - } -} - -// Seed allows one to override the current seed. -// Typically, this override is done for tests. -func (r *OrgBucketID) Seed(seed int64) { - r.m.Lock() - r.src = rand.New(rand.NewSource(seed)) - r.m.Unlock() -} - -// ID generates an ID that does not have backslashes, commas, or spaces. -func (r *OrgBucketID) ID() platform.ID { - r.m.Lock() - n := r.src.Uint64() - r.m.Unlock() - - n = sanitize(n) - return platform.ID(n) -} - -func sanitize(n uint64) uint64 { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, n) - for i := range b { - switch b[i] { - // these bytes must be remove here to prevent the need - // to escape/unescape. See the models package for - // additional detail. - // \ , " " - case 0x5C, 0x2C, 0x20: - b[i] = b[i] + 1 - } - } - return binary.BigEndian.Uint64(b) -} diff --git a/rand/id_test.go b/rand/id_test.go deleted file mode 100644 index 0879b1e77cd..00000000000 --- a/rand/id_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package rand - -import ( - "encoding/binary" - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2/kit/platform" -) - -func TestOrgBucketID_ID(t *testing.T) { - tests := []struct { - name string - seed int64 - want platform.ID - }{ - { - name: "when seeded with 6 the first random number contains characters", - seed: 6, - want: platform.ID(0xaddff35d7fe88f15), - }, - { - name: "when seeded with 1234567890 we get a random number without any bad chars", - seed: 1234567890, - want: platform.ID(0x8a95c1bf40518fee), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := NewOrgBucketID(tt.seed) - if got := r.ID(); !reflect.DeepEqual(got, tt.want) { - t.Errorf("OrgBucketID.ID() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestOrgBucketID_ID_sanitized(t *testing.T) { - r := NewOrgBucketID(42) - b := make([]byte, 8) - for i := 0; i < 1000; i++ { - id := r.ID() - binary.LittleEndian.PutUint64(b, uint64(id)) - for j := range b { - switch b[j] { - case 0x5C, 0x2C, 0x20: - t.Fatalf("unexpected bytes found in IDs") - } - } - } -} diff --git a/rand/token.go b/rand/token.go deleted file mode 100644 index 0e84d6cb847..00000000000 --- a/rand/token.go +++ /dev/null @@ -1,41 +0,0 @@ -package rand - -import ( - "crypto/rand" - "encoding/base64" - - platform "github.com/influxdata/influxdb/v2" -) - -// TODO: rename to token.go - -// TokenGenerator implements platform.TokenGenerator. -type TokenGenerator struct { - size int -} - -// NewTokenGenerator creates an instance of an platform.TokenGenerator. -func NewTokenGenerator(n int) platform.TokenGenerator { - return &TokenGenerator{ - size: n, - } -} - -// Token returns a new string token of size t.size. -func (t *TokenGenerator) Token() (string, error) { - return generateRandomString(t.size) -} - -func generateRandomString(s int) (string, error) { - b, err := generateRandomBytes(s) - return base64.URLEncoding.EncodeToString(b), err -} - -func generateRandomBytes(n int) ([]byte, error) { - b := make([]byte, n) - if _, err := rand.Read(b); err != nil { - return nil, err - } - - return b, nil -} diff --git a/releng/protoc-gen b/releng/protoc-gen deleted file mode 100755 index 712220403e9..00000000000 --- a/releng/protoc-gen +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash -set -o errexit \ - -o nounset \ - -o pipefail - -ROOT="$(realpath "${BASH_SOURCE}")" # -> /releng/protoc-gen -ROOT="$(dirname "${ROOT}")" # -> /releng/ -ROOT="$(dirname "${ROOT}")" # -> / - -( - # Since this script is run outside of a docker container, it is - # possible that one (or more) of the following executables is - # not installed. - set -x - which docker - which sudo - which yq -) 1>/dev/null - -CROSS_BUILDER_VERSION="$(yq -e eval '.parameters.cross-container-tag.default' "${ROOT}/.circleci/config.yml")" - -# Updating ownership within the container requires both the "UID" and "GID" -# of the current user. Since the current user does not exist within the -# container, "${USER}:" cannot be supplied to `chown`. -USER_UID="$(id -u)" -USER_GID="$(id -g)" - -read -d '' DOCKERSCRIPT <= len(expected) { - t.Fatalf("count larger than expected len, %d > %d", count, len(expected)) - } - require.Equal(t, expected[count], string(b)) - if wg != nil { - wg.Done() - } - // only progress the "pointer" if the data is successful - // enqueueing with a returned error means the same first point is retried - if returning == nil { - count++ - } - return time.Second, returning - } - - writer := &testRemoteWriter{} - - writer.writeFn = writeFn - - return writer -} - -func getTestRemoteWriter(t *testing.T, expected string) remoteWriter { - t.Helper() - - writer := &testRemoteWriter{ - writeFn: func(b []byte, i int) (time.Duration, error) { - require.Equal(t, expected, string(b)) - return time.Second, nil - }, - } - - return writer -} - -func TestEnqueueData(t *testing.T) { - t.Parallel() - - queuePath := t.TempDir() - - logger := zaptest.NewLogger(t) - qm := NewDurableQueueManager(logger, queuePath, metrics.NewReplicationsMetrics(), replicationsMock.NewMockHttpConfigStore(nil)) - - require.NoError(t, qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0)) - require.DirExists(t, filepath.Join(queuePath, id1.String())) - - sizes, err := qm.CurrentQueueSizes([]platform.ID{id1}) - require.NoError(t, err) - // Empty queues are 8 bytes for the footer. - require.Equal(t, map[platform.ID]int64{id1: 8}, sizes) - // Remaining queue should initially be empty: - rsizes, err := qm.RemainingQueueSizes([]platform.ID{id1}) - require.NoError(t, err) - // Empty queue = 0 bytes: - require.Equal(t, map[platform.ID]int64{id1: 0}, rsizes) - - data := "some fake data" - - // close the scanner goroutine to specifically test EnqueueData() - rq, ok := qm.replicationQueues[id1] - require.True(t, ok) - closeRq(rq) - t.Cleanup(func() { - require.NoError(t, rq.queue.Close()) - }) - go func() { <-rq.receive }() // absorb the receive to avoid testcase deadlock - - require.NoError(t, qm.EnqueueData(id1, []byte(data), 1)) - sizes, err = qm.CurrentQueueSizes([]platform.ID{id1}) - require.NoError(t, err) - require.Greater(t, sizes[id1], int64(8)) - rsizes, err = qm.RemainingQueueSizes([]platform.ID{id1}) - require.NoError(t, err) - require.Greater(t, rsizes[id1], int64(0)) - // Difference between disk size and queue should only be footer size - require.Equal(t, sizes[id1]-rsizes[id1], int64(8)) - - written, err := qm.replicationQueues[id1].queue.Current() - require.NoError(t, err) - - require.Equal(t, data, string(written)) -} - -// this test ensures that data does not get incorrectly dropped from the Queue on remote write failures -func TestSendWrite(t *testing.T) { - t.Parallel() - - // data points to test - var pointIndex int - points := []string{ - "this is some data", - "this is also some data", - "this is even more data", - } - - path, qm := initQueueManager(t) - require.NoError(t, qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0)) - require.DirExists(t, filepath.Join(path, id1.String())) - - // close the scanner goroutine to test SendWrite() with more granularity - rq, ok := qm.replicationQueues[id1] - require.True(t, ok) - closeRq(rq) - t.Cleanup(func() { - require.NoError(t, rq.queue.Close()) - }) - go func() { <-rq.receive }() // absorb the receive to avoid testcase deadlock - - // Create custom remote writer that does some expected behavior - // Will periodically fail to simulate a timeout - shouldFailThisWrite := false - writer := &testRemoteWriter{} - writer.writeFn = func(data []byte, attempt int) (time.Duration, error) { - require.Equal(t, []byte(points[pointIndex]), data) - if shouldFailThisWrite { - return 100, errors.New("remote timeout") - } - return 0, nil // current "success" return values - } - rq.remoteWriter = writer - - // Write first point - require.NoError(t, qm.EnqueueData(id1, []byte(points[pointIndex]), 1)) - // Make sure the data is in the queue - scan, err := rq.queue.NewScanner() - require.NoError(t, err) - require.True(t, scan.Next()) - require.Equal(t, []byte(points[pointIndex]), scan.Bytes()) - require.NoError(t, scan.Err()) - // Initial Queue size should be size of data + footer - rsizesI, err := qm.RemainingQueueSizes([]platform.ID{id1}) - require.NoError(t, err) - require.Equal(t, rsizesI[id1], int64(8+len(points[pointIndex]))) - // Send the write to the "remote" with a success - rq.SendWrite() - // Queue becomes empty after write: - rsizesJ, err := qm.RemainingQueueSizes([]platform.ID{id1}) - require.NoError(t, err) - require.Equal(t, rsizesJ[id1], int64(0)) - - // Make sure the data is no longer in the queue - _, err = rq.queue.NewScanner() - require.Equal(t, io.EOF, err) - - // Write second point - pointIndex++ - require.NoError(t, qm.EnqueueData(id1, []byte(points[pointIndex]), 1)) - // Make sure the data is in the queue - scan, err = rq.queue.NewScanner() - require.NoError(t, err) - require.True(t, scan.Next()) - require.Equal(t, []byte(points[pointIndex]), scan.Bytes()) - require.NoError(t, scan.Err()) - rsizesI, err = qm.RemainingQueueSizes([]platform.ID{id1}) - require.NoError(t, err) - // Send the write to the "remote" with a FAILURE - shouldFailThisWrite = true - rq.SendWrite() - // Queue size should not have decreased if write has failed: - rsizesJ, err = qm.RemainingQueueSizes([]platform.ID{id1}) - require.NoError(t, err) - require.Equal(t, rsizesJ[id1], rsizesI[id1]) - // Make sure the data is still in the queue - scan, err = rq.queue.NewScanner() - require.NoError(t, err) - require.True(t, scan.Next()) - require.Equal(t, []byte(points[pointIndex]), scan.Bytes()) - require.NoError(t, scan.Err()) - // Send the write to the "remote" again, with a SUCCESS - shouldFailThisWrite = false - rq.SendWrite() - // Queue Becomes empty after a successful write - rsizesJ, err = qm.RemainingQueueSizes([]platform.ID{id1}) - require.NoError(t, err) - require.Equal(t, rsizesJ[id1], int64(0)) - - // Make sure the data is no longer in the queue - _, err = rq.queue.NewScanner() - require.Equal(t, io.EOF, err) -} - -func TestEnqueueData_WithMetrics(t *testing.T) { - t.Parallel() - - path, qm := initQueueManager(t) - require.NoError(t, qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0)) - require.DirExists(t, filepath.Join(path, id1.String())) - - // close the scanner goroutine to specifically test EnqueueData() - rq, ok := qm.replicationQueues[id1] - require.True(t, ok) - closeRq(rq) - t.Cleanup(func() { - require.NoError(t, rq.queue.Close()) - }) - - reg := prom.NewRegistry(zaptest.NewLogger(t)) - reg.MustRegister(qm.metrics.PrometheusCollectors()...) - - data := "some fake data" - numPointsPerData := 3 - numDataToAdd := 4 - rq.remoteWriter = getTestRemoteWriter(t, data) - - for i := 1; i <= numDataToAdd; i++ { - go func() { <-rq.receive }() // absorb the receive to avoid testcase deadlock - require.NoError(t, qm.EnqueueData(id1, []byte(data), numPointsPerData)) - - pointCount := getPromMetric(t, "replications_queue_total_points_queued", reg) - require.Equal(t, i*numPointsPerData, int(pointCount.Counter.GetValue())) - - totalBytesQueued := getPromMetric(t, "replications_queue_total_bytes_queued", reg) - require.Equal(t, i*len(data), int(totalBytesQueued.Counter.GetValue())) - - currentBytesQueued := getPromMetric(t, "replications_queue_current_bytes_queued", reg) - // 8 extra bytes for each byte slice appended to the queue - require.Equal(t, i*(8+len(data)), int(currentBytesQueued.Gauge.GetValue())) - } - - // Queue size should be 0 after SendWrite completes - rq.SendWrite() - currentBytesQueued := getPromMetric(t, "replications_queue_current_bytes_queued", reg) - require.Equal(t, float64(0), currentBytesQueued.Gauge.GetValue()) -} - -func TestEnqueueData_EnqueueFailure(t *testing.T) { - t.Parallel() - - path, qm := initQueueManager(t) - require.NoError(t, qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0)) - require.DirExists(t, filepath.Join(path, id1.String())) - - rq, ok := qm.replicationQueues[id1] - require.True(t, ok) - // Close the underlying queue so an error is generated if we try to append to it - require.NoError(t, rq.queue.Close()) - - reg := prom.NewRegistry(zaptest.NewLogger(t)) - reg.MustRegister(qm.metrics.PrometheusCollectors()...) - - data := "some fake data" - numPointsPerData := 3 - require.Error(t, qm.EnqueueData(id1, []byte(data), numPointsPerData)) // this will generate an error because of the closed queue - - droppedPoints := getPromMetric(t, "replications_queue_points_failed_to_queue", reg) - require.Equal(t, numPointsPerData, int(droppedPoints.Counter.GetValue())) - droppedBytes := getPromMetric(t, "replications_queue_bytes_failed_to_queue", reg) - require.Equal(t, len(data), int(droppedBytes.Counter.GetValue())) -} - -func getPromMetric(t *testing.T, name string, reg *prom.Registry) *dto.Metric { - mfs := promtest.MustGather(t, reg) - return promtest.FindMetric(mfs, name, map[string]string{ - "replicationID": id1.String(), - }) -} - -func TestGoroutineReceives(t *testing.T) { - t.Parallel() - - path, qm := initQueueManager(t) - require.NoError(t, qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0)) - require.DirExists(t, filepath.Join(path, id1.String())) - - rq, ok := qm.replicationQueues[id1] - require.True(t, ok) - require.NotNil(t, rq) - closeRq(rq) // atypical from normal behavior, but lets us receive channels to test - t.Cleanup(func() { - require.NoError(t, rq.queue.Close()) - }) - - go func() { require.NoError(t, qm.EnqueueData(id1, []byte("1234"), 1)) }() - select { - case <-rq.receive: - return - case <-time.After(time.Second): - t.Fatal("Test timed out") - return - } -} - -func TestGoroutineCloses(t *testing.T) { - t.Parallel() - - path, qm := initQueueManager(t) - require.NoError(t, qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0)) - require.DirExists(t, filepath.Join(path, id1.String())) - - rq, ok := qm.replicationQueues[id1] - require.True(t, ok) - require.NotNil(t, rq) - require.NoError(t, qm.CloseAll()) - - // wg should be zero here, indicating that the goroutine has closed - // if this does not panic, then the routine is still active - require.Panics(t, func() { rq.wg.Add(-1) }) -} - -// closeRq closes the done channel of a replication queue so that the run() function returns, but keeps the underlying -// queue open for testing purposes. -func closeRq(rq *replicationQueue) { - close(rq.done) - rq.wg.Wait() // wait for run() function to return -} - -func TestGetReplications(t *testing.T) { - t.Parallel() - - path, qm := initQueueManager(t) - t.Cleanup(func() { - shutdown(t, qm) - }) - - // Initialize 3 queues (2nd and 3rd share the same orgID and localBucket) - require.NoError(t, qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0)) - require.DirExists(t, filepath.Join(path, id1.String())) - - require.NoError(t, qm.InitializeQueue(id2, maxQueueSizeBytes, orgID2, localBucketID2, 0)) - require.DirExists(t, filepath.Join(path, id1.String())) - - require.NoError(t, qm.InitializeQueue(id3, maxQueueSizeBytes, orgID2, localBucketID2, 0)) - require.DirExists(t, filepath.Join(path, id1.String())) - - // Should return one matching replication queue (repl ID 1) - expectedRepls := []platform.ID{id1} - repls := qm.GetReplications(orgID1, localBucketID1) - require.ElementsMatch(t, expectedRepls, repls) - - // Should return no matching replication queues - require.Equal(t, 0, len(qm.GetReplications(orgID1, localBucketID2))) - - // Should return two matching replication queues (repl IDs 2 and 3) - expectedRepls = []platform.ID{id2, id3} - repls = qm.GetReplications(orgID2, localBucketID2) - require.ElementsMatch(t, expectedRepls, repls) -} - -func TestReplicationStartMissingQueue(t *testing.T) { - t.Parallel() - - queuePath, qm := initQueueManager(t) - - // Create new queue - err := qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0) - require.NoError(t, err) - require.DirExists(t, filepath.Join(queuePath, id1.String())) - - // Represents the replications tracked in sqlite, this one is tracked - trackedReplications := make(map[platform.ID]*influxdb.TrackedReplication) - trackedReplications[id1] = &influxdb.TrackedReplication{ - MaxQueueSizeBytes: maxQueueSizeBytes, - MaxAgeSeconds: 0, - OrgID: orgID1, - LocalBucketID: localBucketID1, - } - - // Simulate server shutdown by closing all queues and clearing replicationQueues map - shutdown(t, qm) - - // Delete the queue to simulate restoring from a backup - err = os.RemoveAll(filepath.Join(queuePath)) - require.NoError(t, err) - - // Call startup function - err = qm.StartReplicationQueues(trackedReplications) - require.NoError(t, err) - t.Cleanup(func() { - shutdown(t, qm) - }) - - // Make sure queue is stored in map - require.NotNil(t, qm.replicationQueues[id1]) - - // Ensure queue is open by trying to remove, will error if open - err = qm.replicationQueues[id1].queue.Remove() - require.Errorf(t, err, "queue is open") -} diff --git a/replications/internal/store.go b/replications/internal/store.go deleted file mode 100644 index 24ef62836b0..00000000000 --- a/replications/internal/store.go +++ /dev/null @@ -1,305 +0,0 @@ -package internal - -import ( - "context" - "database/sql" - "errors" - "fmt" - - sq "github.com/Masterminds/squirrel" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - ierrors "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/sqlite" - "github.com/mattn/go-sqlite3" -) - -var errReplicationNotFound = &ierrors.Error{ - Code: ierrors.ENotFound, - Msg: "replication not found", -} - -var errMissingIDName = &ierrors.Error{ - Code: ierrors.EUnprocessableEntity, - Msg: "one of remote_bucket_id, remote_bucket_name should be provided", -} - -func errRemoteNotFound(id platform.ID, cause error) error { - return &ierrors.Error{ - Code: ierrors.EInvalid, - Msg: fmt.Sprintf("remote %q not found", id), - Err: cause, - } -} - -type Store struct { - sqlStore *sqlite.SqlStore -} - -func NewStore(sqlStore *sqlite.SqlStore) *Store { - return &Store{ - sqlStore: sqlStore, - } -} - -func (s *Store) Lock() { - s.sqlStore.Mu.Lock() -} - -func (s *Store) Unlock() { - s.sqlStore.Mu.Unlock() -} - -// ListReplications returns a list of replications matching the provided filter. -func (s *Store) ListReplications(ctx context.Context, filter influxdb.ReplicationListFilter) (*influxdb.Replications, error) { - q := sq.Select( - "id", "org_id", "name", "description", "remote_id", "local_bucket_id", "remote_bucket_id", "remote_bucket_name", - "max_queue_size_bytes", "latest_response_code", "latest_error_message", "drop_non_retryable_data", - "max_age_seconds"). - From("replications") - - if filter.OrgID.Valid() { - q = q.Where(sq.Eq{"org_id": filter.OrgID}) - } - if filter.Name != nil { - q = q.Where(sq.Eq{"name": *filter.Name}) - } - if filter.RemoteID != nil { - q = q.Where(sq.Eq{"remote_id": *filter.RemoteID}) - } - if filter.LocalBucketID != nil { - q = q.Where(sq.Eq{"local_bucket_id": *filter.LocalBucketID}) - } - - query, args, err := q.ToSql() - if err != nil { - return nil, err - } - - var rs influxdb.Replications - if err := s.sqlStore.DB.SelectContext(ctx, &rs.Replications, query, args...); err != nil { - return nil, err - } - - return &rs, nil -} - -// CreateReplication persists a new replication in the database. Caller is responsible for managing locks. -func (s *Store) CreateReplication(ctx context.Context, newID platform.ID, request influxdb.CreateReplicationRequest) (*influxdb.Replication, error) { - fields := sq.Eq{ - "id": newID, - "org_id": request.OrgID, - "name": request.Name, - "description": request.Description, - "remote_id": request.RemoteID, - "local_bucket_id": request.LocalBucketID, - "max_queue_size_bytes": request.MaxQueueSizeBytes, - "drop_non_retryable_data": request.DropNonRetryableData, - "max_age_seconds": request.MaxAgeSeconds, - "created_at": "datetime('now')", - "updated_at": "datetime('now')", - } - - if request.RemoteBucketID != platform.ID(0) { - fields["remote_bucket_id"] = request.RemoteBucketID - fields["remote_bucket_name"] = "" - } else if request.RemoteBucketName != "" { - fields["remote_bucket_id"] = nil - fields["remote_bucket_name"] = request.RemoteBucketName - } else { - return nil, errMissingIDName - } - - q := sq.Insert("replications"). - SetMap(fields). - Suffix("RETURNING id, org_id, name, description, remote_id, local_bucket_id, remote_bucket_id, remote_bucket_name, max_queue_size_bytes, drop_non_retryable_data, max_age_seconds") - - query, args, err := q.ToSql() - if err != nil { - return nil, err - } - - var r influxdb.Replication - - if err := s.sqlStore.DB.GetContext(ctx, &r, query, args...); err != nil { - if sqlErr, ok := err.(sqlite3.Error); ok && sqlErr.ExtendedCode == sqlite3.ErrConstraintForeignKey { - return nil, errRemoteNotFound(request.RemoteID, err) - } - return nil, err - } - - return &r, nil -} - -// GetReplication gets a replication by ID from the database. -func (s *Store) GetReplication(ctx context.Context, id platform.ID) (*influxdb.Replication, error) { - q := sq.Select( - "id", "org_id", "name", "description", "remote_id", "local_bucket_id", "remote_bucket_id", "remote_bucket_name", - "max_queue_size_bytes", "latest_response_code", "latest_error_message", "drop_non_retryable_data", - "max_age_seconds"). - From("replications"). - Where(sq.Eq{"id": id}) - - query, args, err := q.ToSql() - if err != nil { - return nil, err - } - - var r influxdb.Replication - if err := s.sqlStore.DB.GetContext(ctx, &r, query, args...); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, errReplicationNotFound - } - return nil, err - } - - return &r, nil -} - -// UpdateReplication updates a replication by ID. Caller is responsible for managing locks. -func (s *Store) UpdateReplication(ctx context.Context, id platform.ID, request influxdb.UpdateReplicationRequest) (*influxdb.Replication, error) { - updates := sq.Eq{"updated_at": sq.Expr("datetime('now')")} - if request.Name != nil { - updates["name"] = *request.Name - } - if request.Description != nil { - updates["description"] = *request.Description - } - if request.RemoteID != nil { - updates["remote_id"] = *request.RemoteID - } - if request.RemoteBucketID != nil { - updates["remote_bucket_id"] = *request.RemoteBucketID - } - if request.RemoteBucketName != nil { - updates["remote_bucket_name"] = *request.RemoteBucketName - } - if request.MaxQueueSizeBytes != nil { - updates["max_queue_size_bytes"] = *request.MaxQueueSizeBytes - } - if request.DropNonRetryableData != nil { - updates["drop_non_retryable_data"] = *request.DropNonRetryableData - } - if request.MaxAgeSeconds != nil { - updates["max_age_seconds"] = *request.MaxAgeSeconds - } - - q := sq.Update("replications").SetMap(updates).Where(sq.Eq{"id": id}). - Suffix("RETURNING id, org_id, name, description, remote_id, local_bucket_id, remote_bucket_id, remote_bucket_name, max_queue_size_bytes, drop_non_retryable_data, max_age_seconds") - - query, args, err := q.ToSql() - if err != nil { - return nil, err - } - - var r influxdb.Replication - if err := s.sqlStore.DB.GetContext(ctx, &r, query, args...); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, errReplicationNotFound - } - if sqlErr, ok := err.(sqlite3.Error); ok && request.RemoteID != nil && sqlErr.ExtendedCode == sqlite3.ErrConstraintForeignKey { - return nil, errRemoteNotFound(*request.RemoteID, err) - } - return nil, err - } - - return &r, nil -} - -// UpdateResponseInfo sets the most recent HTTP status code and error message received for a replication remote write. -func (s *Store) UpdateResponseInfo(ctx context.Context, id platform.ID, code int, message string) error { - updates := sq.Eq{ - "latest_response_code": code, - "latest_error_message": message, - } - - q := sq.Update("replications").SetMap(updates).Where(sq.Eq{"id": id}).Suffix("RETURNING id") - - query, args, err := q.ToSql() - if err != nil { - return err - } - - var d platform.ID - if err := s.sqlStore.DB.GetContext(ctx, &d, query, args...); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return errReplicationNotFound - } - return err - } - - return nil -} - -// DeleteReplication deletes a replication by ID from the database. Caller is responsible for managing locks. -func (s *Store) DeleteReplication(ctx context.Context, id platform.ID) error { - q := sq.Delete("replications").Where(sq.Eq{"id": id}).Suffix("RETURNING id") - query, args, err := q.ToSql() - if err != nil { - return err - } - - var d platform.ID - if err := s.sqlStore.DB.GetContext(ctx, &d, query, args...); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return errReplicationNotFound - } - return err - } - - return nil -} - -// DeleteBucketReplications deletes the replications for the provided localBucketID from the database. Caller is -// responsible for managing locks. A list of deleted IDs is returned for further processing by the caller. -func (s *Store) DeleteBucketReplications(ctx context.Context, localBucketID platform.ID) ([]platform.ID, error) { - q := sq.Delete("replications").Where(sq.Eq{"local_bucket_id": localBucketID}).Suffix("RETURNING id") - query, args, err := q.ToSql() - if err != nil { - return nil, err - } - - var deleted []platform.ID - if err := s.sqlStore.DB.SelectContext(ctx, &deleted, query, args...); err != nil { - return nil, err - } - - return deleted, nil -} - -func (s *Store) GetFullHTTPConfig(ctx context.Context, id platform.ID) (*influxdb.ReplicationHTTPConfig, error) { - q := sq.Select("c.remote_url", "c.remote_api_token", "c.remote_org_id", "c.allow_insecure_tls", "r.remote_bucket_id", "r.remote_bucket_name", "r.drop_non_retryable_data"). - From("replications r").InnerJoin("remotes c ON r.remote_id = c.id AND r.id = ?", id) - - query, args, err := q.ToSql() - if err != nil { - return nil, err - } - - var rc influxdb.ReplicationHTTPConfig - if err := s.sqlStore.DB.GetContext(ctx, &rc, query, args...); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, errReplicationNotFound - } - return nil, err - } - return &rc, nil -} - -func (s *Store) PopulateRemoteHTTPConfig(ctx context.Context, id platform.ID, target *influxdb.ReplicationHTTPConfig) error { - q := sq.Select("remote_url", "remote_api_token", "remote_org_id", "allow_insecure_tls"). - From("remotes").Where(sq.Eq{"id": id}) - query, args, err := q.ToSql() - if err != nil { - return err - } - - if err := s.sqlStore.DB.GetContext(ctx, target, query, args...); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return errRemoteNotFound(id, nil) - } - return err - } - - return nil -} diff --git a/replications/internal/store_test.go b/replications/internal/store_test.go deleted file mode 100644 index ee438ebc6ea..00000000000 --- a/replications/internal/store_test.go +++ /dev/null @@ -1,627 +0,0 @@ -package internal - -import ( - "context" - "fmt" - "net/http" - "sort" - "testing" - - sq "github.com/Masterminds/squirrel" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/influxdata/influxdb/v2/sqlite" - "github.com/influxdata/influxdb/v2/sqlite/migrations" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -var ( - ctx = context.Background() - initID = platform.ID(1) - desc = "testing testing" - replication = influxdb.Replication{ - ID: initID, - OrgID: platform.ID(10), - Name: "test", - Description: &desc, - RemoteID: platform.ID(100), - LocalBucketID: platform.ID(1000), - RemoteBucketID: idPointer(99999), - MaxQueueSizeBytes: 3 * influxdb.DefaultReplicationMaxQueueSizeBytes, - MaxAgeSeconds: 0, - } - createReq = influxdb.CreateReplicationRequest{ - OrgID: replication.OrgID, - Name: replication.Name, - Description: replication.Description, - RemoteID: replication.RemoteID, - LocalBucketID: replication.LocalBucketID, - RemoteBucketID: *replication.RemoteBucketID, - MaxQueueSizeBytes: replication.MaxQueueSizeBytes, - MaxAgeSeconds: replication.MaxAgeSeconds, - } - httpConfig = influxdb.ReplicationHTTPConfig{ - RemoteURL: fmt.Sprintf("http://%s.cloud", replication.RemoteID), - RemoteToken: replication.RemoteID.String(), - RemoteOrgID: idPointer(888888), - AllowInsecureTLS: true, - RemoteBucketID: replication.RemoteBucketID, - } - newQueueSize = influxdb.MinReplicationMaxQueueSizeBytes - updateReq = influxdb.UpdateReplicationRequest{ - RemoteID: idPointer(200), - MaxQueueSizeBytes: &newQueueSize, - DropNonRetryableData: boolPointer(true), - } - updatedReplication = influxdb.Replication{ - ID: replication.ID, - OrgID: replication.OrgID, - Name: replication.Name, - Description: replication.Description, - RemoteID: *updateReq.RemoteID, - LocalBucketID: replication.LocalBucketID, - RemoteBucketID: replication.RemoteBucketID, - MaxQueueSizeBytes: *updateReq.MaxQueueSizeBytes, - DropNonRetryableData: true, - MaxAgeSeconds: replication.MaxAgeSeconds, - } -) - -func idPointer(id int) *platform.ID { - p := platform.ID(id) - return &p -} - -func TestCreateAndGetReplication(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - - insertRemote(t, testStore, replication.RemoteID) - - // Getting an invalid ID should return an error. - got, err := testStore.GetReplication(ctx, initID) - require.Equal(t, errReplicationNotFound, err) - require.Nil(t, got) - - // Create a replication, check the results. - created, err := testStore.CreateReplication(ctx, initID, createReq) - require.NoError(t, err) - require.Equal(t, replication, *created) - - // Read the created replication and assert it matches the creation response. - got, err = testStore.GetReplication(ctx, created.ID) - require.NoError(t, err) - require.Equal(t, replication, *got) -} - -func TestCreateAndGetReplicationName(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - - insertRemote(t, testStore, replication.RemoteID) - - // Getting an invalid ID should return an error. - got, err := testStore.GetReplication(ctx, initID) - require.Equal(t, errReplicationNotFound, err) - require.Nil(t, got) - - req := createReq - req.RemoteBucketID = platform.ID(0) - req.RemoteBucketName = "testbucket" - expected := replication - expected.RemoteBucketName = "testbucket" - expected.RemoteBucketID = nil - - // Create a replication, check the results. - created, err := testStore.CreateReplication(ctx, initID, req) - require.NoError(t, err) - require.Equal(t, expected, *created) - - // Read the created replication and assert it matches the creation response. - got, err = testStore.GetReplication(ctx, created.ID) - require.NoError(t, err) - require.Equal(t, expected, *got) -} - -func TestCreateAndGetReplicationNameAndID(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - - insertRemote(t, testStore, replication.RemoteID) - - // Getting an invalid ID should return an error. - got, err := testStore.GetReplication(ctx, initID) - require.Equal(t, errReplicationNotFound, err) - require.Nil(t, got) - - req := createReq - req.RemoteBucketID = platform.ID(100) - req.RemoteBucketName = "testbucket" - expected := replication - expected.RemoteBucketName = "" - expected.RemoteBucketID = idPointer(100) - - // Create a replication, check the results. - created, err := testStore.CreateReplication(ctx, initID, req) - require.NoError(t, err) - require.Equal(t, expected, *created) - - // Read the created replication and assert it matches the creation response. - got, err = testStore.GetReplication(ctx, created.ID) - require.NoError(t, err) - require.Equal(t, expected, *got) -} - -func TestCreateAndGetReplicationNameError(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - - insertRemote(t, testStore, replication.RemoteID) - - // Getting an invalid ID should return an error. - got, err := testStore.GetReplication(ctx, initID) - require.Equal(t, errReplicationNotFound, err) - require.Nil(t, got) - - req := createReq - req.RemoteBucketID = platform.ID(0) - req.RemoteBucketName = "" - - // Create a replication, should fail due to missing params - created, err := testStore.CreateReplication(ctx, initID, req) - require.Equal(t, errMissingIDName, err) - require.Nil(t, created) -} - -func TestCreateMissingRemote(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - - created, err := testStore.CreateReplication(ctx, initID, createReq) - require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("remote %q not found", createReq.RemoteID)) - require.Nil(t, created) - - // Make sure nothing was persisted. - got, err := testStore.GetReplication(ctx, initID) - require.Equal(t, errReplicationNotFound, err) - require.Nil(t, got) -} - -func TestUpdateAndGetReplication(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - - insertRemote(t, testStore, replication.RemoteID) - insertRemote(t, testStore, updatedReplication.RemoteID) - - // Updating a nonexistent ID fails. - updated, err := testStore.UpdateReplication(ctx, initID, updateReq) - require.Equal(t, errReplicationNotFound, err) - require.Nil(t, updated) - - // Create a replication. - created, err := testStore.CreateReplication(ctx, initID, createReq) - require.NoError(t, err) - require.Equal(t, replication, *created) - - // Update the replication. - updated, err = testStore.UpdateReplication(ctx, created.ID, updateReq) - require.NoError(t, err) - require.Equal(t, updatedReplication, *updated) -} - -func TestUpdateResponseInfo(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - - insertRemote(t, testStore, replication.RemoteID) - insertRemote(t, testStore, updatedReplication.RemoteID) - - testCode := http.StatusBadRequest - testMsg := "some error message" - - // Updating a nonexistent ID fails. - err := testStore.UpdateResponseInfo(ctx, initID, testCode, testMsg) - require.Equal(t, errReplicationNotFound, err) - - // Create a replication. - created, err := testStore.CreateReplication(ctx, initID, createReq) - require.NoError(t, err) - require.Equal(t, replication, *created) - - // Update the replication response info. - err = testStore.UpdateResponseInfo(ctx, initID, testCode, testMsg) - require.NoError(t, err) - - // Check the updated response code and error message. - got, err := testStore.GetReplication(ctx, initID) - require.NoError(t, err) - require.Equal(t, int32(testCode), *got.LatestResponseCode) - require.Equal(t, testMsg, *got.LatestErrorMessage) -} - -func TestUpdateMissingRemote(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - - insertRemote(t, testStore, replication.RemoteID) - - // Create a replication. - created, err := testStore.CreateReplication(ctx, initID, createReq) - require.NoError(t, err) - require.Equal(t, replication, *created) - - // Attempt to update the replication to point at a nonexistent remote. - updated, err := testStore.UpdateReplication(ctx, created.ID, updateReq) - require.Error(t, err) - require.Contains(t, err.Error(), fmt.Sprintf("remote %q not found", *updateReq.RemoteID)) - require.Nil(t, updated) - - // Make sure nothing changed in the DB. - got, err := testStore.GetReplication(ctx, created.ID) - require.NoError(t, err) - require.Equal(t, replication, *got) -} - -func TestUpdateNoop(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - - insertRemote(t, testStore, replication.RemoteID) - - // Create a replication. - created, err := testStore.CreateReplication(ctx, initID, createReq) - require.NoError(t, err) - require.Equal(t, replication, *created) - - // Send a no-op update, assert nothing changed. - updated, err := testStore.UpdateReplication(ctx, created.ID, influxdb.UpdateReplicationRequest{}) - require.NoError(t, err) - require.Equal(t, replication, *updated) -} - -func TestDeleteReplication(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - - insertRemote(t, testStore, replication.RemoteID) - - // Deleting a nonexistent ID should return an error. - require.Equal(t, errReplicationNotFound, testStore.DeleteReplication(ctx, initID)) - - // Create a replication, then delete it. - created, err := testStore.CreateReplication(ctx, initID, createReq) - require.NoError(t, err) - require.Equal(t, replication, *created) - require.NoError(t, testStore.DeleteReplication(ctx, created.ID)) - - // Looking up the ID should again produce an error. - got, err := testStore.GetReplication(ctx, created.ID) - require.Equal(t, errReplicationNotFound, err) - require.Nil(t, got) -} - -func TestDeleteReplications(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - - // Deleting when there is no bucket is OK. - _, err := testStore.DeleteBucketReplications(ctx, replication.LocalBucketID) - require.NoError(t, err) - - // Register a handful of replications. - createReq2, createReq3 := createReq, createReq - createReq2.Name, createReq3.Name = "test2", "test3" - createReq2.LocalBucketID = platform.ID(77777) - createReq3.RemoteID = updatedReplication.RemoteID - insertRemote(t, testStore, createReq.RemoteID) - insertRemote(t, testStore, createReq3.RemoteID) - - for _, req := range []influxdb.CreateReplicationRequest{createReq, createReq2, createReq3} { - _, err := testStore.CreateReplication(ctx, snowflake.NewIDGenerator().ID(), req) - require.NoError(t, err) - } - - listed, err := testStore.ListReplications(ctx, influxdb.ReplicationListFilter{OrgID: replication.OrgID}) - require.NoError(t, err) - require.Len(t, listed.Replications, 3) - - // Delete 2/3 by bucket ID. - deleted, err := testStore.DeleteBucketReplications(ctx, createReq.LocalBucketID) - require.NoError(t, err) - require.Len(t, deleted, 2) - - // Ensure they were deleted. - listed, err = testStore.ListReplications(ctx, influxdb.ReplicationListFilter{OrgID: replication.OrgID}) - require.NoError(t, err) - require.Len(t, listed.Replications, 1) - require.Equal(t, createReq2.LocalBucketID, listed.Replications[0].LocalBucketID) -} - -func TestListReplications(t *testing.T) { - t.Parallel() - - createReq2, createReq3 := createReq, createReq - createReq2.Name, createReq3.Name = "test2", "test3" - createReq2.LocalBucketID = platform.ID(77777) - createReq3.RemoteID = updatedReplication.RemoteID - - setup := func(t *testing.T, testStore *Store) []influxdb.Replication { - insertRemote(t, testStore, createReq.RemoteID) - insertRemote(t, testStore, createReq3.RemoteID) - - var allReplications []influxdb.Replication - for _, req := range []influxdb.CreateReplicationRequest{createReq, createReq2, createReq3} { - created, err := testStore.CreateReplication(ctx, snowflake.NewIDGenerator().ID(), req) - require.NoError(t, err) - allReplications = append(allReplications, *created) - } - return allReplications - } - - t.Run("list all for org", func(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - allRepls := setup(t, testStore) - - listed, err := testStore.ListReplications(ctx, influxdb.ReplicationListFilter{OrgID: createReq.OrgID}) - require.NoError(t, err) - - // The order from sqlite is not the same, so for simplicity we sort both lists before comparing. - sort.Slice(allRepls, func(i int, j int) bool { - return allRepls[i].ID < allRepls[j].ID - }) - sort.Slice(listed.Replications, func(i int, j int) bool { - return listed.Replications[i].ID < listed.Replications[j].ID - }) - require.Equal(t, influxdb.Replications{Replications: allRepls}, *listed) - }) - - t.Run("list all with empty filter", func(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - allRepls := setup(t, testStore) - - otherOrgReq := createReq - otherOrgReq.OrgID = platform.ID(12345) - created, err := testStore.CreateReplication(ctx, snowflake.NewIDGenerator().ID(), otherOrgReq) - require.NoError(t, err) - allRepls = append(allRepls, *created) - - listed, err := testStore.ListReplications(ctx, influxdb.ReplicationListFilter{}) - require.NoError(t, err) - require.Equal(t, influxdb.Replications{Replications: allRepls}, *listed) - }) - - t.Run("list by name", func(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - allRepls := setup(t, testStore) - - listed, err := testStore.ListReplications(ctx, influxdb.ReplicationListFilter{ - OrgID: createReq.OrgID, - Name: &createReq2.Name, - }) - require.NoError(t, err) - require.Equal(t, influxdb.Replications{Replications: allRepls[1:2]}, *listed) - }) - - t.Run("list by remote ID", func(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - allRepls := setup(t, testStore) - - listed, err := testStore.ListReplications(ctx, influxdb.ReplicationListFilter{ - OrgID: createReq.OrgID, - RemoteID: &createReq.RemoteID, - }) - require.NoError(t, err) - require.Equal(t, influxdb.Replications{Replications: allRepls[0:2]}, *listed) - }) - - t.Run("list by bucket ID", func(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - allRepls := setup(t, testStore) - - listed, err := testStore.ListReplications(ctx, influxdb.ReplicationListFilter{ - OrgID: createReq.OrgID, - LocalBucketID: &createReq.LocalBucketID, - }) - require.NoError(t, err) - require.Equal(t, influxdb.Replications{Replications: append(allRepls[0:1], allRepls[2:]...)}, *listed) - }) - - t.Run("list by other org ID", func(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - - listed, err := testStore.ListReplications(ctx, influxdb.ReplicationListFilter{OrgID: platform.ID(2)}) - require.NoError(t, err) - require.Equal(t, influxdb.Replications{}, *listed) - }) -} - -func TestMigrateDownFromReplicationsWithName(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - - insertRemote(t, testStore, replication.RemoteID) - - req := createReq - req.RemoteBucketID = platform.ID(100) - _, err := testStore.CreateReplication(ctx, platform.ID(10), req) - require.NoError(t, err) - - req.RemoteBucketID = platform.ID(0) - req.RemoteBucketName = "testbucket" - req.Name = "namedrepl" - _, err = testStore.CreateReplication(ctx, platform.ID(20), req) - require.NoError(t, err) - - replications, err := testStore.ListReplications(context.Background(), influxdb.ReplicationListFilter{OrgID: replication.OrgID}) - require.NoError(t, err) - require.Equal(t, 2, len(replications.Replications)) - - logger := zaptest.NewLogger(t) - sqliteMigrator := sqlite.NewMigrator(testStore.sqlStore, logger) - require.NoError(t, sqliteMigrator.Down(ctx, 5, migrations.AllDown)) - - // Can't use ListReplications because it expects the `remote_bucket_name` column to be there in this version of influx. - q := sq.Select( - "id", "org_id", "name", "description", "remote_id", "local_bucket_id", "remote_bucket_id", - "max_queue_size_bytes", "latest_response_code", "latest_error_message", "drop_non_retryable_data", - "max_age_seconds"). - From("replications") - - q = q.Where(sq.Eq{"org_id": replication.OrgID}) - - query, args, err := q.ToSql() - require.NoError(t, err) - var rs influxdb.Replications - if err := testStore.sqlStore.DB.SelectContext(ctx, &rs.Replications, query, args...); err != nil { - require.NoError(t, err) - } - require.Equal(t, 1, len(rs.Replications)) - require.Equal(t, platform.ID(10), rs.Replications[0].ID) -} - -func TestMigrateUpToRemotesNullRemoteOrg(t *testing.T) { - sqlStore := sqlite.NewTestStore(t) - logger := zaptest.NewLogger(t) - sqliteMigrator := sqlite.NewMigrator(sqlStore, logger) - require.NoError(t, sqliteMigrator.UpUntil(ctx, 7, migrations.AllUp)) - - // Make sure foreign-key checking is enabled. - _, err := sqlStore.DB.Exec("PRAGMA foreign_keys = ON;") - require.NoError(t, err) - - testStore := NewStore(sqlStore) - - insertRemote(t, testStore, replication.RemoteID) - - req := createReq - req.RemoteBucketID = platform.ID(100) - _, err = testStore.CreateReplication(ctx, platform.ID(10), req) - require.NoError(t, err) - - req.RemoteBucketID = platform.ID(0) - req.RemoteBucketName = "testbucket" - req.Name = "namedrepl" - _, err = testStore.CreateReplication(ctx, platform.ID(20), req) - require.NoError(t, err) - - replications, err := testStore.ListReplications(context.Background(), influxdb.ReplicationListFilter{OrgID: replication.OrgID}) - require.NoError(t, err) - require.Equal(t, 2, len(replications.Replications)) - - require.NoError(t, sqliteMigrator.UpUntil(ctx, 8, migrations.AllUp)) -} - -func TestGetFullHTTPConfig(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - - // Does not exist returns the appropriate error - _, err := testStore.GetFullHTTPConfig(ctx, initID) - require.Equal(t, errReplicationNotFound, err) - - // Valid result - insertRemote(t, testStore, replication.RemoteID) - created, err := testStore.CreateReplication(ctx, initID, createReq) - require.NoError(t, err) - require.Equal(t, replication, *created) - - conf, err := testStore.GetFullHTTPConfig(ctx, initID) - require.NoError(t, err) - require.Equal(t, httpConfig, *conf) -} - -func TestPopulateRemoteHTTPConfig(t *testing.T) { - t.Parallel() - - testStore := newTestStore(t) - - emptyConfig := &influxdb.ReplicationHTTPConfig{RemoteOrgID: idPointer(0)} - - // Remote not found returns the appropriate error - target := &influxdb.ReplicationHTTPConfig{} - err := testStore.PopulateRemoteHTTPConfig(ctx, replication.RemoteID, target) - require.Equal(t, errRemoteNotFound(replication.RemoteID, nil), err) - require.Equal(t, emptyConfig, target) - - // Valid result - want := influxdb.ReplicationHTTPConfig{ - RemoteURL: httpConfig.RemoteURL, - RemoteToken: httpConfig.RemoteToken, - RemoteOrgID: httpConfig.RemoteOrgID, - AllowInsecureTLS: httpConfig.AllowInsecureTLS, - } - insertRemote(t, testStore, replication.RemoteID) - err = testStore.PopulateRemoteHTTPConfig(ctx, replication.RemoteID, target) - require.NoError(t, err) - require.Equal(t, want, *target) -} - -func newTestStore(t *testing.T) *Store { - sqlStore := sqlite.NewTestStore(t) - logger := zaptest.NewLogger(t) - sqliteMigrator := sqlite.NewMigrator(sqlStore, logger) - require.NoError(t, sqliteMigrator.Up(ctx, migrations.AllUp)) - - // Make sure foreign-key checking is enabled. - _, err := sqlStore.DB.Exec("PRAGMA foreign_keys = ON;") - require.NoError(t, err) - - return NewStore(sqlStore) -} - -func insertRemote(t *testing.T, store *Store, id platform.ID) { - sqlStore := store.sqlStore - - sqlStore.Mu.Lock() - defer sqlStore.Mu.Unlock() - - q := sq.Insert("remotes").SetMap(sq.Eq{ - "id": id, - "org_id": replication.OrgID, - "name": fmt.Sprintf("foo-%s", id), - "remote_url": fmt.Sprintf("http://%s.cloud", id), - "remote_api_token": id.String(), - "remote_org_id": platform.ID(888888), - "allow_insecure_tls": true, - "created_at": "datetime('now')", - "updated_at": "datetime('now')", - }) - query, args, err := q.ToSql() - require.NoError(t, err) - - _, err = sqlStore.DB.Exec(query, args...) - require.NoError(t, err) -} - -func boolPointer(b bool) *bool { - return &b -} diff --git a/replications/internal/validator.go b/replications/internal/validator.go deleted file mode 100644 index 77487e2f49d..00000000000 --- a/replications/internal/validator.go +++ /dev/null @@ -1,21 +0,0 @@ -package internal - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/replications/remotewrite" -) - -func NewValidator() *noopWriteValidator { - return &noopWriteValidator{} -} - -// noopWriteValidator checks if replication parameters are valid by attempting to write an empty payload -// to the remote host using the configured information. -type noopWriteValidator struct{} - -func (s noopWriteValidator) ValidateReplication(ctx context.Context, config *influxdb.ReplicationHTTPConfig) error { - _, err := remotewrite.PostWrite(ctx, config, []byte{}, remotewrite.DefaultTimeout) - return err -} diff --git a/replications/internal/validator_test.go b/replications/internal/validator_test.go deleted file mode 100644 index afdd011d558..00000000000 --- a/replications/internal/validator_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package internal - -import ( - "context" - "fmt" - "net/http" - "net/http/httptest" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/stretchr/testify/require" -) - -func TestValidateReplication(t *testing.T) { - tests := []struct { - status int - valid bool - }{ - {http.StatusNoContent, true}, - {http.StatusOK, false}, - {http.StatusBadRequest, false}, - {http.StatusTeapot, false}, - {http.StatusInternalServerError, false}, - } - - for _, tt := range tests { - t.Run(fmt.Sprintf("status code %d", tt.status), func(t *testing.T) { - svr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(tt.status) - })) - defer svr.Close() - - validator := noopWriteValidator{} - - config := &influxdb.ReplicationHTTPConfig{ - RemoteURL: svr.URL, - } - - err := validator.ValidateReplication(context.Background(), config) - if tt.valid { - require.NoError(t, err) - return - } - - require.Error(t, err) - }) - } -} diff --git a/replications/metrics/replications_metrics.go b/replications/metrics/replications_metrics.go deleted file mode 100644 index a15f1918392..00000000000 --- a/replications/metrics/replications_metrics.go +++ /dev/null @@ -1,122 +0,0 @@ -package metrics - -import ( - "strconv" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/prometheus/client_golang/prometheus" -) - -type ReplicationsMetrics struct { - TotalPointsQueued *prometheus.CounterVec - TotalBytesQueued *prometheus.CounterVec - CurrentBytesQueued *prometheus.GaugeVec - RemoteWriteErrors *prometheus.CounterVec - RemoteWriteBytesSent *prometheus.CounterVec - RemoteWriteBytesDropped *prometheus.CounterVec - PointsFailedToQueue *prometheus.CounterVec - BytesFailedToQueue *prometheus.CounterVec -} - -func NewReplicationsMetrics() *ReplicationsMetrics { - const namespace = "replications" - const subsystem = "queue" - - return &ReplicationsMetrics{ - TotalPointsQueued: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "total_points_queued", - Help: "Sum of all points that have been successfully added to the replication stream queue", - }, []string{"replicationID"}), - TotalBytesQueued: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "total_bytes_queued", - Help: "Sum of all bytes that have been successfully added to the replication stream queue", - }, []string{"replicationID"}), - CurrentBytesQueued: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "current_bytes_queued", - Help: "Current number of bytes in the replication stream queue remaining to be processed", - }, []string{"replicationID"}), - RemoteWriteErrors: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "remote_write_errors", - Help: "Error codes returned from attempted remote writes", - }, []string{"replicationID", "code"}), - RemoteWriteBytesSent: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "remote_write_bytes_sent", - Help: "Bytes of data successfully sent to the remote by the replication stream", - }, []string{"replicationID"}), - RemoteWriteBytesDropped: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "remote_write_bytes_dropped", - Help: "Bytes of data dropped due to remote write failures", - }, []string{"replicationID"}), - PointsFailedToQueue: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "points_failed_to_queue", - Help: "Sum of all points that could not be added to the local replication queue", - }, []string{"replicationID"}), - BytesFailedToQueue: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "bytes_failed_to_queue", - Help: "Sum of all bytes that could not be added to the local replication queue", - }, []string{"replicationID"}), - } -} - -// PrometheusCollectors satisfies the prom.PrometheusCollector interface. -func (rm *ReplicationsMetrics) PrometheusCollectors() []prometheus.Collector { - return []prometheus.Collector{ - rm.TotalPointsQueued, - rm.TotalBytesQueued, - rm.CurrentBytesQueued, - rm.RemoteWriteErrors, - rm.RemoteWriteBytesSent, - rm.RemoteWriteBytesDropped, - rm.PointsFailedToQueue, - rm.BytesFailedToQueue, - } -} - -// EnqueueData updates the metrics when adding new data to a replication queue. -func (rm *ReplicationsMetrics) EnqueueData(replicationID platform.ID, numBytes, numPoints int, queueSize int64) { - rm.TotalPointsQueued.WithLabelValues(replicationID.String()).Add(float64(numPoints)) - rm.TotalBytesQueued.WithLabelValues(replicationID.String()).Add(float64(numBytes)) - rm.CurrentBytesQueued.WithLabelValues(replicationID.String()).Set(float64(queueSize)) -} - -// Dequeue updates the metrics when data has been removed from the queue. -func (rm *ReplicationsMetrics) Dequeue(replicationID platform.ID, queueSize int64) { - rm.CurrentBytesQueued.WithLabelValues(replicationID.String()).Set(float64(queueSize)) -} - -// EnqueueError updates the metrics when data fails to be added to the replication queue. -func (rm *ReplicationsMetrics) EnqueueError(replicationID platform.ID, numBytes, numPoints int) { - rm.PointsFailedToQueue.WithLabelValues(replicationID.String()).Add(float64(numPoints)) - rm.BytesFailedToQueue.WithLabelValues(replicationID.String()).Add(float64(numBytes)) -} - -// RemoteWriteError increments the error code counter for the replication. -func (rm *ReplicationsMetrics) RemoteWriteError(replicationID platform.ID, errorCode int) { - rm.RemoteWriteErrors.WithLabelValues(replicationID.String(), strconv.Itoa(errorCode)).Inc() -} - -// RemoteWriteSent increases the total count of bytes sent following a successful remote write -func (rm *ReplicationsMetrics) RemoteWriteSent(replicationID platform.ID, bytes int) { - rm.RemoteWriteBytesSent.WithLabelValues(replicationID.String()).Add(float64(bytes)) -} - -// RemoteWriteDropped increases the total count of bytes dropped when data is dropped -func (rm *ReplicationsMetrics) RemoteWriteDropped(replicationID platform.ID, bytes int) { - rm.RemoteWriteBytesDropped.WithLabelValues(replicationID.String()).Add(float64(bytes)) -} diff --git a/replications/mock/bucket_service.go b/replications/mock/bucket_service.go deleted file mode 100644 index e87ffdab817..00000000000 --- a/replications/mock/bucket_service.go +++ /dev/null @@ -1,76 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/replications (interfaces: BucketService) - -// Package mock is a generated GoMock package. -package mock - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - influxdb "github.com/influxdata/influxdb/v2" - platform "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockBucketService is a mock of BucketService interface. -type MockBucketService struct { - ctrl *gomock.Controller - recorder *MockBucketServiceMockRecorder -} - -// MockBucketServiceMockRecorder is the mock recorder for MockBucketService. -type MockBucketServiceMockRecorder struct { - mock *MockBucketService -} - -// NewMockBucketService creates a new mock instance. -func NewMockBucketService(ctrl *gomock.Controller) *MockBucketService { - mock := &MockBucketService{ctrl: ctrl} - mock.recorder = &MockBucketServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockBucketService) EXPECT() *MockBucketServiceMockRecorder { - return m.recorder -} - -// FindBucketByID mocks base method. -func (m *MockBucketService) FindBucketByID(arg0 context.Context, arg1 platform.ID) (*influxdb.Bucket, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FindBucketByID", arg0, arg1) - ret0, _ := ret[0].(*influxdb.Bucket) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FindBucketByID indicates an expected call of FindBucketByID. -func (mr *MockBucketServiceMockRecorder) FindBucketByID(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBucketByID", reflect.TypeOf((*MockBucketService)(nil).FindBucketByID), arg0, arg1) -} - -// RLock mocks base method. -func (m *MockBucketService) RLock() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RLock") -} - -// RLock indicates an expected call of RLock. -func (mr *MockBucketServiceMockRecorder) RLock() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RLock", reflect.TypeOf((*MockBucketService)(nil).RLock)) -} - -// RUnlock mocks base method. -func (m *MockBucketService) RUnlock() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "RUnlock") -} - -// RUnlock indicates an expected call of RUnlock. -func (mr *MockBucketServiceMockRecorder) RUnlock() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RUnlock", reflect.TypeOf((*MockBucketService)(nil).RUnlock)) -} diff --git a/replications/mock/http_config_store.go b/replications/mock/http_config_store.go deleted file mode 100644 index e3f85379829..00000000000 --- a/replications/mock/http_config_store.go +++ /dev/null @@ -1,66 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/replications/remotewrite (interfaces: HttpConfigStore) - -// Package mock is a generated GoMock package. -package mock - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - influxdb "github.com/influxdata/influxdb/v2" - platform "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockHttpConfigStore is a mock of HttpConfigStore interface. -type MockHttpConfigStore struct { - ctrl *gomock.Controller - recorder *MockHttpConfigStoreMockRecorder -} - -// MockHttpConfigStoreMockRecorder is the mock recorder for MockHttpConfigStore. -type MockHttpConfigStoreMockRecorder struct { - mock *MockHttpConfigStore -} - -// NewMockHttpConfigStore creates a new mock instance. -func NewMockHttpConfigStore(ctrl *gomock.Controller) *MockHttpConfigStore { - mock := &MockHttpConfigStore{ctrl: ctrl} - mock.recorder = &MockHttpConfigStoreMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockHttpConfigStore) EXPECT() *MockHttpConfigStoreMockRecorder { - return m.recorder -} - -// GetFullHTTPConfig mocks base method. -func (m *MockHttpConfigStore) GetFullHTTPConfig(arg0 context.Context, arg1 platform.ID) (*influxdb.ReplicationHTTPConfig, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetFullHTTPConfig", arg0, arg1) - ret0, _ := ret[0].(*influxdb.ReplicationHTTPConfig) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetFullHTTPConfig indicates an expected call of GetFullHTTPConfig. -func (mr *MockHttpConfigStoreMockRecorder) GetFullHTTPConfig(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFullHTTPConfig", reflect.TypeOf((*MockHttpConfigStore)(nil).GetFullHTTPConfig), arg0, arg1) -} - -// UpdateResponseInfo mocks base method. -func (m *MockHttpConfigStore) UpdateResponseInfo(arg0 context.Context, arg1 platform.ID, arg2 int, arg3 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateResponseInfo", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateResponseInfo indicates an expected call of UpdateResponseInfo. -func (mr *MockHttpConfigStoreMockRecorder) UpdateResponseInfo(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateResponseInfo", reflect.TypeOf((*MockHttpConfigStore)(nil).UpdateResponseInfo), arg0, arg1, arg2, arg3) -} diff --git a/replications/mock/points_writer.go b/replications/mock/points_writer.go deleted file mode 100644 index 6397c1bd967..00000000000 --- a/replications/mock/points_writer.go +++ /dev/null @@ -1,51 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/storage (interfaces: PointsWriter) - -// Package mock is a generated GoMock package. -package mock - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - platform "github.com/influxdata/influxdb/v2/kit/platform" - models "github.com/influxdata/influxdb/v2/models" -) - -// MockPointsWriter is a mock of PointsWriter interface. -type MockPointsWriter struct { - ctrl *gomock.Controller - recorder *MockPointsWriterMockRecorder -} - -// MockPointsWriterMockRecorder is the mock recorder for MockPointsWriter. -type MockPointsWriterMockRecorder struct { - mock *MockPointsWriter -} - -// NewMockPointsWriter creates a new mock instance. -func NewMockPointsWriter(ctrl *gomock.Controller) *MockPointsWriter { - mock := &MockPointsWriter{ctrl: ctrl} - mock.recorder = &MockPointsWriterMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockPointsWriter) EXPECT() *MockPointsWriterMockRecorder { - return m.recorder -} - -// WritePoints mocks base method. -func (m *MockPointsWriter) WritePoints(arg0 context.Context, arg1, arg2 platform.ID, arg3 []models.Point) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WritePoints", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(error) - return ret0 -} - -// WritePoints indicates an expected call of WritePoints. -func (mr *MockPointsWriterMockRecorder) WritePoints(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WritePoints", reflect.TypeOf((*MockPointsWriter)(nil).WritePoints), arg0, arg1, arg2, arg3) -} diff --git a/replications/mock/queue_management.go b/replications/mock/queue_management.go deleted file mode 100644 index 2e8c01d30e8..00000000000 --- a/replications/mock/queue_management.go +++ /dev/null @@ -1,164 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/replications (interfaces: DurableQueueManager) - -// Package mock is a generated GoMock package. -package mock - -import ( - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - influxdb "github.com/influxdata/influxdb/v2" - platform "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockDurableQueueManager is a mock of DurableQueueManager interface. -type MockDurableQueueManager struct { - ctrl *gomock.Controller - recorder *MockDurableQueueManagerMockRecorder -} - -// MockDurableQueueManagerMockRecorder is the mock recorder for MockDurableQueueManager. -type MockDurableQueueManagerMockRecorder struct { - mock *MockDurableQueueManager -} - -// NewMockDurableQueueManager creates a new mock instance. -func NewMockDurableQueueManager(ctrl *gomock.Controller) *MockDurableQueueManager { - mock := &MockDurableQueueManager{ctrl: ctrl} - mock.recorder = &MockDurableQueueManagerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockDurableQueueManager) EXPECT() *MockDurableQueueManagerMockRecorder { - return m.recorder -} - -// CloseAll mocks base method. -func (m *MockDurableQueueManager) CloseAll() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloseAll") - ret0, _ := ret[0].(error) - return ret0 -} - -// CloseAll indicates an expected call of CloseAll. -func (mr *MockDurableQueueManagerMockRecorder) CloseAll() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseAll", reflect.TypeOf((*MockDurableQueueManager)(nil).CloseAll)) -} - -// CurrentQueueSizes mocks base method. -func (m *MockDurableQueueManager) CurrentQueueSizes(arg0 []platform.ID) (map[platform.ID]int64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CurrentQueueSizes", arg0) - ret0, _ := ret[0].(map[platform.ID]int64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CurrentQueueSizes indicates an expected call of CurrentQueueSizes. -func (mr *MockDurableQueueManagerMockRecorder) CurrentQueueSizes(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentQueueSizes", reflect.TypeOf((*MockDurableQueueManager)(nil).CurrentQueueSizes), arg0) -} - -// DeleteQueue mocks base method. -func (m *MockDurableQueueManager) DeleteQueue(arg0 platform.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteQueue", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteQueue indicates an expected call of DeleteQueue. -func (mr *MockDurableQueueManagerMockRecorder) DeleteQueue(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteQueue", reflect.TypeOf((*MockDurableQueueManager)(nil).DeleteQueue), arg0) -} - -// EnqueueData mocks base method. -func (m *MockDurableQueueManager) EnqueueData(arg0 platform.ID, arg1 []byte, arg2 int) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "EnqueueData", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// EnqueueData indicates an expected call of EnqueueData. -func (mr *MockDurableQueueManagerMockRecorder) EnqueueData(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnqueueData", reflect.TypeOf((*MockDurableQueueManager)(nil).EnqueueData), arg0, arg1, arg2) -} - -// GetReplications mocks base method. -func (m *MockDurableQueueManager) GetReplications(arg0, arg1 platform.ID) []platform.ID { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetReplications", arg0, arg1) - ret0, _ := ret[0].([]platform.ID) - return ret0 -} - -// GetReplications indicates an expected call of GetReplications. -func (mr *MockDurableQueueManagerMockRecorder) GetReplications(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReplications", reflect.TypeOf((*MockDurableQueueManager)(nil).GetReplications), arg0, arg1) -} - -// InitializeQueue mocks base method. -func (m *MockDurableQueueManager) InitializeQueue(arg0 platform.ID, arg1 int64, arg2, arg3 platform.ID, arg4 int64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "InitializeQueue", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].(error) - return ret0 -} - -// InitializeQueue indicates an expected call of InitializeQueue. -func (mr *MockDurableQueueManagerMockRecorder) InitializeQueue(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitializeQueue", reflect.TypeOf((*MockDurableQueueManager)(nil).InitializeQueue), arg0, arg1, arg2, arg3, arg4) -} - -// RemainingQueueSizes mocks base method. -func (m *MockDurableQueueManager) RemainingQueueSizes(arg0 []platform.ID) (map[platform.ID]int64, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RemainingQueueSizes", arg0) - ret0, _ := ret[0].(map[platform.ID]int64) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RemainingQueueSizes indicates an expected call of RemainingQueueSizes. -func (mr *MockDurableQueueManagerMockRecorder) RemainingQueueSizes(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemainingQueueSizes", reflect.TypeOf((*MockDurableQueueManager)(nil).RemainingQueueSizes), arg0) -} - -// StartReplicationQueues mocks base method. -func (m *MockDurableQueueManager) StartReplicationQueues(arg0 map[platform.ID]*influxdb.TrackedReplication) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StartReplicationQueues", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// StartReplicationQueues indicates an expected call of StartReplicationQueues. -func (mr *MockDurableQueueManagerMockRecorder) StartReplicationQueues(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartReplicationQueues", reflect.TypeOf((*MockDurableQueueManager)(nil).StartReplicationQueues), arg0) -} - -// UpdateMaxQueueSize mocks base method. -func (m *MockDurableQueueManager) UpdateMaxQueueSize(arg0 platform.ID, arg1 int64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateMaxQueueSize", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateMaxQueueSize indicates an expected call of UpdateMaxQueueSize. -func (mr *MockDurableQueueManagerMockRecorder) UpdateMaxQueueSize(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateMaxQueueSize", reflect.TypeOf((*MockDurableQueueManager)(nil).UpdateMaxQueueSize), arg0, arg1) -} diff --git a/replications/mock/service.go b/replications/mock/service.go deleted file mode 100644 index 001edb3fd68..00000000000 --- a/replications/mock/service.go +++ /dev/null @@ -1,153 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/replications/transport (interfaces: ReplicationService) - -// Package mock is a generated GoMock package. -package mock - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - influxdb "github.com/influxdata/influxdb/v2" - platform "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockReplicationService is a mock of ReplicationService interface. -type MockReplicationService struct { - ctrl *gomock.Controller - recorder *MockReplicationServiceMockRecorder -} - -// MockReplicationServiceMockRecorder is the mock recorder for MockReplicationService. -type MockReplicationServiceMockRecorder struct { - mock *MockReplicationService -} - -// NewMockReplicationService creates a new mock instance. -func NewMockReplicationService(ctrl *gomock.Controller) *MockReplicationService { - mock := &MockReplicationService{ctrl: ctrl} - mock.recorder = &MockReplicationServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockReplicationService) EXPECT() *MockReplicationServiceMockRecorder { - return m.recorder -} - -// CreateReplication mocks base method. -func (m *MockReplicationService) CreateReplication(arg0 context.Context, arg1 influxdb.CreateReplicationRequest) (*influxdb.Replication, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateReplication", arg0, arg1) - ret0, _ := ret[0].(*influxdb.Replication) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateReplication indicates an expected call of CreateReplication. -func (mr *MockReplicationServiceMockRecorder) CreateReplication(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateReplication", reflect.TypeOf((*MockReplicationService)(nil).CreateReplication), arg0, arg1) -} - -// DeleteReplication mocks base method. -func (m *MockReplicationService) DeleteReplication(arg0 context.Context, arg1 platform.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteReplication", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteReplication indicates an expected call of DeleteReplication. -func (mr *MockReplicationServiceMockRecorder) DeleteReplication(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteReplication", reflect.TypeOf((*MockReplicationService)(nil).DeleteReplication), arg0, arg1) -} - -// GetReplication mocks base method. -func (m *MockReplicationService) GetReplication(arg0 context.Context, arg1 platform.ID) (*influxdb.Replication, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetReplication", arg0, arg1) - ret0, _ := ret[0].(*influxdb.Replication) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetReplication indicates an expected call of GetReplication. -func (mr *MockReplicationServiceMockRecorder) GetReplication(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReplication", reflect.TypeOf((*MockReplicationService)(nil).GetReplication), arg0, arg1) -} - -// ListReplications mocks base method. -func (m *MockReplicationService) ListReplications(arg0 context.Context, arg1 influxdb.ReplicationListFilter) (*influxdb.Replications, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListReplications", arg0, arg1) - ret0, _ := ret[0].(*influxdb.Replications) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListReplications indicates an expected call of ListReplications. -func (mr *MockReplicationServiceMockRecorder) ListReplications(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListReplications", reflect.TypeOf((*MockReplicationService)(nil).ListReplications), arg0, arg1) -} - -// UpdateReplication mocks base method. -func (m *MockReplicationService) UpdateReplication(arg0 context.Context, arg1 platform.ID, arg2 influxdb.UpdateReplicationRequest) (*influxdb.Replication, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateReplication", arg0, arg1, arg2) - ret0, _ := ret[0].(*influxdb.Replication) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateReplication indicates an expected call of UpdateReplication. -func (mr *MockReplicationServiceMockRecorder) UpdateReplication(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateReplication", reflect.TypeOf((*MockReplicationService)(nil).UpdateReplication), arg0, arg1, arg2) -} - -// ValidateNewReplication mocks base method. -func (m *MockReplicationService) ValidateNewReplication(arg0 context.Context, arg1 influxdb.CreateReplicationRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateNewReplication", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ValidateNewReplication indicates an expected call of ValidateNewReplication. -func (mr *MockReplicationServiceMockRecorder) ValidateNewReplication(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateNewReplication", reflect.TypeOf((*MockReplicationService)(nil).ValidateNewReplication), arg0, arg1) -} - -// ValidateReplication mocks base method. -func (m *MockReplicationService) ValidateReplication(arg0 context.Context, arg1 platform.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateReplication", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ValidateReplication indicates an expected call of ValidateReplication. -func (mr *MockReplicationServiceMockRecorder) ValidateReplication(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateReplication", reflect.TypeOf((*MockReplicationService)(nil).ValidateReplication), arg0, arg1) -} - -// ValidateUpdatedReplication mocks base method. -func (m *MockReplicationService) ValidateUpdatedReplication(arg0 context.Context, arg1 platform.ID, arg2 influxdb.UpdateReplicationRequest) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateUpdatedReplication", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// ValidateUpdatedReplication indicates an expected call of ValidateUpdatedReplication. -func (mr *MockReplicationServiceMockRecorder) ValidateUpdatedReplication(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateUpdatedReplication", reflect.TypeOf((*MockReplicationService)(nil).ValidateUpdatedReplication), arg0, arg1, arg2) -} diff --git a/replications/mock/service_store.go b/replications/mock/service_store.go deleted file mode 100644 index e070f451dd4..00000000000 --- a/replications/mock/service_store.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/replications (interfaces: ServiceStore) - -// Package mock is a generated GoMock package. -package mock - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - influxdb "github.com/influxdata/influxdb/v2" - platform "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockServiceStore is a mock of ServiceStore interface. -type MockServiceStore struct { - ctrl *gomock.Controller - recorder *MockServiceStoreMockRecorder -} - -// MockServiceStoreMockRecorder is the mock recorder for MockServiceStore. -type MockServiceStoreMockRecorder struct { - mock *MockServiceStore -} - -// NewMockServiceStore creates a new mock instance. -func NewMockServiceStore(ctrl *gomock.Controller) *MockServiceStore { - mock := &MockServiceStore{ctrl: ctrl} - mock.recorder = &MockServiceStoreMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockServiceStore) EXPECT() *MockServiceStoreMockRecorder { - return m.recorder -} - -// CreateReplication mocks base method. -func (m *MockServiceStore) CreateReplication(arg0 context.Context, arg1 platform.ID, arg2 influxdb.CreateReplicationRequest) (*influxdb.Replication, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateReplication", arg0, arg1, arg2) - ret0, _ := ret[0].(*influxdb.Replication) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateReplication indicates an expected call of CreateReplication. -func (mr *MockServiceStoreMockRecorder) CreateReplication(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateReplication", reflect.TypeOf((*MockServiceStore)(nil).CreateReplication), arg0, arg1, arg2) -} - -// DeleteBucketReplications mocks base method. -func (m *MockServiceStore) DeleteBucketReplications(arg0 context.Context, arg1 platform.ID) ([]platform.ID, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketReplications", arg0, arg1) - ret0, _ := ret[0].([]platform.ID) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketReplications indicates an expected call of DeleteBucketReplications. -func (mr *MockServiceStoreMockRecorder) DeleteBucketReplications(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketReplications", reflect.TypeOf((*MockServiceStore)(nil).DeleteBucketReplications), arg0, arg1) -} - -// DeleteReplication mocks base method. -func (m *MockServiceStore) DeleteReplication(arg0 context.Context, arg1 platform.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteReplication", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteReplication indicates an expected call of DeleteReplication. -func (mr *MockServiceStoreMockRecorder) DeleteReplication(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteReplication", reflect.TypeOf((*MockServiceStore)(nil).DeleteReplication), arg0, arg1) -} - -// GetFullHTTPConfig mocks base method. -func (m *MockServiceStore) GetFullHTTPConfig(arg0 context.Context, arg1 platform.ID) (*influxdb.ReplicationHTTPConfig, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetFullHTTPConfig", arg0, arg1) - ret0, _ := ret[0].(*influxdb.ReplicationHTTPConfig) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetFullHTTPConfig indicates an expected call of GetFullHTTPConfig. -func (mr *MockServiceStoreMockRecorder) GetFullHTTPConfig(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFullHTTPConfig", reflect.TypeOf((*MockServiceStore)(nil).GetFullHTTPConfig), arg0, arg1) -} - -// GetReplication mocks base method. -func (m *MockServiceStore) GetReplication(arg0 context.Context, arg1 platform.ID) (*influxdb.Replication, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetReplication", arg0, arg1) - ret0, _ := ret[0].(*influxdb.Replication) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetReplication indicates an expected call of GetReplication. -func (mr *MockServiceStoreMockRecorder) GetReplication(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReplication", reflect.TypeOf((*MockServiceStore)(nil).GetReplication), arg0, arg1) -} - -// ListReplications mocks base method. -func (m *MockServiceStore) ListReplications(arg0 context.Context, arg1 influxdb.ReplicationListFilter) (*influxdb.Replications, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListReplications", arg0, arg1) - ret0, _ := ret[0].(*influxdb.Replications) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListReplications indicates an expected call of ListReplications. -func (mr *MockServiceStoreMockRecorder) ListReplications(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListReplications", reflect.TypeOf((*MockServiceStore)(nil).ListReplications), arg0, arg1) -} - -// Lock mocks base method. -func (m *MockServiceStore) Lock() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Lock") -} - -// Lock indicates an expected call of Lock. -func (mr *MockServiceStoreMockRecorder) Lock() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lock", reflect.TypeOf((*MockServiceStore)(nil).Lock)) -} - -// PopulateRemoteHTTPConfig mocks base method. -func (m *MockServiceStore) PopulateRemoteHTTPConfig(arg0 context.Context, arg1 platform.ID, arg2 *influxdb.ReplicationHTTPConfig) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PopulateRemoteHTTPConfig", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// PopulateRemoteHTTPConfig indicates an expected call of PopulateRemoteHTTPConfig. -func (mr *MockServiceStoreMockRecorder) PopulateRemoteHTTPConfig(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PopulateRemoteHTTPConfig", reflect.TypeOf((*MockServiceStore)(nil).PopulateRemoteHTTPConfig), arg0, arg1, arg2) -} - -// Unlock mocks base method. -func (m *MockServiceStore) Unlock() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Unlock") -} - -// Unlock indicates an expected call of Unlock. -func (mr *MockServiceStoreMockRecorder) Unlock() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unlock", reflect.TypeOf((*MockServiceStore)(nil).Unlock)) -} - -// UpdateReplication mocks base method. -func (m *MockServiceStore) UpdateReplication(arg0 context.Context, arg1 platform.ID, arg2 influxdb.UpdateReplicationRequest) (*influxdb.Replication, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateReplication", arg0, arg1, arg2) - ret0, _ := ret[0].(*influxdb.Replication) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateReplication indicates an expected call of UpdateReplication. -func (mr *MockServiceStoreMockRecorder) UpdateReplication(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateReplication", reflect.TypeOf((*MockServiceStore)(nil).UpdateReplication), arg0, arg1, arg2) -} diff --git a/replications/mock/validator.go b/replications/mock/validator.go deleted file mode 100644 index e2138b2cad0..00000000000 --- a/replications/mock/validator.go +++ /dev/null @@ -1,50 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/replications (interfaces: ReplicationValidator) - -// Package mock is a generated GoMock package. -package mock - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - influxdb "github.com/influxdata/influxdb/v2" -) - -// MockReplicationValidator is a mock of ReplicationValidator interface. -type MockReplicationValidator struct { - ctrl *gomock.Controller - recorder *MockReplicationValidatorMockRecorder -} - -// MockReplicationValidatorMockRecorder is the mock recorder for MockReplicationValidator. -type MockReplicationValidatorMockRecorder struct { - mock *MockReplicationValidator -} - -// NewMockReplicationValidator creates a new mock instance. -func NewMockReplicationValidator(ctrl *gomock.Controller) *MockReplicationValidator { - mock := &MockReplicationValidator{ctrl: ctrl} - mock.recorder = &MockReplicationValidatorMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockReplicationValidator) EXPECT() *MockReplicationValidatorMockRecorder { - return m.recorder -} - -// ValidateReplication mocks base method. -func (m *MockReplicationValidator) ValidateReplication(arg0 context.Context, arg1 *influxdb.ReplicationHTTPConfig) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateReplication", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ValidateReplication indicates an expected call of ValidateReplication. -func (mr *MockReplicationValidatorMockRecorder) ValidateReplication(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateReplication", reflect.TypeOf((*MockReplicationValidator)(nil).ValidateReplication), arg0, arg1) -} diff --git a/replications/remotewrite/writer.go b/replications/remotewrite/writer.go deleted file mode 100644 index 39a29b21be5..00000000000 --- a/replications/remotewrite/writer.go +++ /dev/null @@ -1,282 +0,0 @@ -package remotewrite - -import ( - "context" - "fmt" - "math" - "net" - "net/http" - "net/url" - "runtime" - "strconv" - "sync" - "time" - - "github.com/influxdata/influx-cli/v2/api" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - ierrors "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/replications/metrics" - "go.uber.org/zap" -) - -const ( - retryAfterHeaderKey = "Retry-After" - maximumBackoffTime = 15 * time.Minute - maximumAttempts = 10 // After this many attempts, wait maximumBackoffTime - DefaultTimeout = 2 * time.Minute -) - -var ( - userAgent = fmt.Sprintf( - "influxdb-oss-replication/%s (%s) Sha/%s Date/%s", - influxdb.GetBuildInfo().Version, - runtime.GOOS, - influxdb.GetBuildInfo().Commit, - influxdb.GetBuildInfo().Date) -) - -func invalidRemoteUrl(remoteUrl string, err error) *ierrors.Error { - return &ierrors.Error{ - Code: ierrors.EInvalid, - Msg: fmt.Sprintf("host URL %q is invalid", remoteUrl), - Err: err, - } -} - -func invalidResponseCode(code int) *ierrors.Error { - return &ierrors.Error{ - Code: ierrors.EInvalid, - Msg: fmt.Sprintf("invalid response code %d, must be %d", code, http.StatusNoContent), - } -} - -type HttpConfigStore interface { - GetFullHTTPConfig(context.Context, platform.ID) (*influxdb.ReplicationHTTPConfig, error) - UpdateResponseInfo(context.Context, platform.ID, int, string) error -} - -type waitFunc func(time.Duration) <-chan time.Time - -type writer struct { - replicationID platform.ID - configStore HttpConfigStore - metrics *metrics.ReplicationsMetrics - logger *zap.Logger - maximumBackoffTime time.Duration - maximumAttemptsForBackoffTime int - clientTimeout time.Duration - done chan struct{} - waitFunc waitFunc // used for testing -} - -func NewWriter(replicationID platform.ID, store HttpConfigStore, metrics *metrics.ReplicationsMetrics, logger *zap.Logger, done chan struct{}) *writer { - return &writer{ - replicationID: replicationID, - configStore: store, - metrics: metrics, - logger: logger, - maximumBackoffTime: maximumBackoffTime, - maximumAttemptsForBackoffTime: maximumAttempts, - clientTimeout: DefaultTimeout, - done: done, - waitFunc: func(t time.Duration) <-chan time.Time { - return time.After(t) - }, - } -} - -func (w *writer) Write(data []byte, attempts int) (backoff time.Duration, err error) { - cancelOnce := &sync.Once{} - // Cancel any outstanding HTTP requests if the replicationQueue is closed. - ctx, cancel := context.WithCancel(context.Background()) - - defer func() { - cancelOnce.Do(cancel) - }() - - go func() { - select { - case <-w.done: - cancelOnce.Do(cancel) - case <-ctx.Done(): - // context is cancelled already - } - }() - - // Get the most recent config on every attempt, in case the user has updated the config to correct errors. - conf, err := w.configStore.GetFullHTTPConfig(ctx, w.replicationID) - if err != nil { - return w.backoff(attempts), err - } - - res, postWriteErr := PostWrite(ctx, conf, data, w.clientTimeout) - res, msg, ok := normalizeResponse(res, postWriteErr) - if !ok { - // Update Response info: - if err := w.configStore.UpdateResponseInfo(ctx, w.replicationID, res.StatusCode, msg); err != nil { - w.logger.Debug("failed to update config store with latest remote write response info", zap.Error(err)) - return w.backoff(attempts), err - } - // bail out - return w.backoff(attempts), postWriteErr - } - - // Update metrics and most recent error diagnostic information. - if err := w.configStore.UpdateResponseInfo(ctx, w.replicationID, res.StatusCode, msg); err != nil { - // TODO: We shouldn't fail/retry a successful remote write for not successfully writing to the config store - // we should only log instead of returning, like: - w.logger.Debug("failed to update config store with latest remote write response info", zap.Error(err)) - // Unfortunately this will mess up a lot of tests that are using UpdateResponseInfo failures as a proxy for - // write failures. - return w.backoff(attempts), err - } - - if postWriteErr == nil { - // Successful write - w.metrics.RemoteWriteSent(w.replicationID, len(data)) - w.logger.Debug("remote write successful", zap.Int("attempt", attempts), zap.Int("bytes", len(data))) - return 0, nil - } - - w.metrics.RemoteWriteError(w.replicationID, res.StatusCode) - w.logger.Debug("remote write error", zap.Int("attempt", attempts), zap.String("error message", "msg"), zap.Int("status code", res.StatusCode)) - - var waitTime time.Duration - hasSetWaitTime := false - - switch res.StatusCode { - case http.StatusBadRequest: - if conf.DropNonRetryableData { - var errBody []byte - res.Body.Read(errBody) - w.logger.Warn("dropped data", zap.Int("bytes", len(data)), zap.String("reason", string(errBody))) - w.metrics.RemoteWriteDropped(w.replicationID, len(data)) - return 0, nil - } - case http.StatusTooManyRequests: - headerTime := w.waitTimeFromHeader(res) - if headerTime != 0 { - waitTime = headerTime - hasSetWaitTime = true - } - } - - if !hasSetWaitTime { - waitTime = w.backoff(attempts) - } - - return waitTime, postWriteErr -} - -// normalizeResponse returns a guaranteed non-nil value for *http.Response, and an extracted error message string for use -// in logging. The returned bool indicates if the response is a time-out - false means that the write request should be -// aborted due to a malformed request. -func normalizeResponse(r *http.Response, err error) (*http.Response, string, bool) { - var errMsg string - if err != nil { - errMsg = err.Error() - } - - if r == nil { - if errorIsTimeout(err) { - return &http.Response{}, errMsg, true - } - - return &http.Response{}, errMsg, false - } - - return r, errMsg, true -} - -func errorIsTimeout(err error) bool { - if err, ok := err.(net.Error); ok && err.Timeout() { - return true - } - - return false -} - -func PostWrite(ctx context.Context, config *influxdb.ReplicationHTTPConfig, data []byte, timeout time.Duration) (*http.Response, error) { - u, err := url.Parse(config.RemoteURL) - if err != nil { - return nil, invalidRemoteUrl(config.RemoteURL, err) - } - - params := api.ConfigParams{ - Host: u, - UserAgent: userAgent, - Token: &config.RemoteToken, - AllowInsecureTLS: config.AllowInsecureTLS, - } - conf := api.NewAPIConfig(params) - conf.HTTPClient.Timeout = timeout - client := api.NewAPIClient(conf).WriteApi - - var bucket string - if config.RemoteBucketID == nil || config.RemoteBucketName != "" { - bucket = config.RemoteBucketName - } else { - bucket = config.RemoteBucketID.String() - } - - var org string - if config.RemoteOrgID != nil { - org = config.RemoteOrgID.String() - } else { - // We need to provide something here for the write api to be happy - org = platform.InvalidID().String() - } - - req := client.PostWrite(ctx). - Bucket(bucket). - Body(data). - Org(org) - - // Don't set the encoding header for empty bodies, like those used for validation. - if len(data) > 0 { - req = req.ContentEncoding("gzip") - } - - res, err := req.ExecuteWithHttpInfo() - if res == nil { - return nil, err - } - - // Only a response of 204 is valid for a successful write - if res.StatusCode != http.StatusNoContent { - err = invalidResponseCode(res.StatusCode) - } - - // Must return the response so that the status code and headers can be inspected by the caller, even if the response - // was not 204. - return res, err -} - -func (w *writer) backoff(numAttempts int) time.Duration { - if numAttempts > w.maximumAttemptsForBackoffTime { - return w.maximumBackoffTime - } - - s := 0.5 * math.Pow(2, float64(numAttempts-1)) - return time.Duration(s * float64(time.Second)) -} - -func (w *writer) waitTimeFromHeader(r *http.Response) time.Duration { - str := r.Header.Get(retryAfterHeaderKey) - if str == "" { - return 0 - } - - // Use a minimal backoff time if the header is set to 0 for some reason, maybe due to rounding. - if str == "0" { - return w.backoff(1) - } - - rtr, err := strconv.Atoi(str) - if err != nil { - return 0 - } - - return time.Duration(rtr * int(time.Second)) -} diff --git a/replications/remotewrite/writer_test.go b/replications/remotewrite/writer_test.go deleted file mode 100644 index e5362dc79fa..00000000000 --- a/replications/remotewrite/writer_test.go +++ /dev/null @@ -1,484 +0,0 @@ -package remotewrite - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "net/http/httptest" - "strconv" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/prom" - "github.com/influxdata/influxdb/v2/kit/prom/promtest" - "github.com/influxdata/influxdb/v2/replications/metrics" - replicationsMock "github.com/influxdata/influxdb/v2/replications/mock" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -//go:generate go run github.com/golang/mock/mockgen -package mock -destination ../mock/http_config_store.go github.com/influxdata/influxdb/v2/replications/remotewrite HttpConfigStore - -var ( - testID = platform.ID(1) -) - -func testWriter(t *testing.T) (*writer, *replicationsMock.MockHttpConfigStore, chan struct{}) { - ctrl := gomock.NewController(t) - configStore := replicationsMock.NewMockHttpConfigStore(ctrl) - done := make(chan struct{}) - w := NewWriter(testID, configStore, metrics.NewReplicationsMetrics(), zaptest.NewLogger(t), done) - return w, configStore, done -} - -func constantStatus(i int) func(int) int { - return func(int) int { - return i - } -} - -func testServer(t *testing.T, statusForCount func(int) int, wantData []byte) *httptest.Server { - count := 0 - return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - gotData, err := io.ReadAll(r.Body) - require.NoError(t, err) - require.Equal(t, wantData, gotData) - w.WriteHeader(statusForCount(count)) - count++ - })) -} - -func instaWait() waitFunc { - return func(t time.Duration) <-chan time.Time { - out := make(chan time.Time) - close(out) - return out - } -} - -func TestWrite(t *testing.T) { - t.Parallel() - - testData := []byte("some data") - - t.Run("error getting config", func(t *testing.T) { - wantErr := errors.New("uh oh") - - w, configStore, _ := testWriter(t) - - configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(nil, wantErr) - _, actualErr := w.Write([]byte{}, 1) - require.Equal(t, wantErr, actualErr) - }) - - t.Run("nil response from PostWrite", func(t *testing.T) { - testConfig := &influxdb.ReplicationHTTPConfig{ - RemoteURL: "not a good URL", - } - w, configStore, _ := testWriter(t) - configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil) - configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, int(0), gomock.Any()) - _, actualErr := w.Write([]byte{}, 1) - require.Error(t, actualErr) - }) - - t.Run("immediate good response", func(t *testing.T) { - svr := testServer(t, constantStatus(http.StatusNoContent), testData) - defer svr.Close() - - testConfig := &influxdb.ReplicationHTTPConfig{ - RemoteURL: svr.URL, - } - - w, configStore, _ := testWriter(t) - - configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil) - configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusNoContent, "").Return(nil) - _, actualErr := w.Write(testData, 0) - require.NoError(t, actualErr) - }) - - t.Run("error updating response info", func(t *testing.T) { - wantErr := errors.New("o no") - - svr := testServer(t, constantStatus(http.StatusNoContent), testData) - defer svr.Close() - - testConfig := &influxdb.ReplicationHTTPConfig{ - RemoteURL: svr.URL, - } - - w, configStore, _ := testWriter(t) - - configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil) - configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusNoContent, "").Return(wantErr) - _, actualErr := w.Write(testData, 1) - require.Equal(t, wantErr, actualErr) - }) - - t.Run("bad server responses that never succeed", func(t *testing.T) { - testAttempts := 3 - - for _, status := range []int{http.StatusOK, http.StatusTeapot, http.StatusInternalServerError} { - t.Run(fmt.Sprintf("status code %d", status), func(t *testing.T) { - svr := testServer(t, constantStatus(status), testData) - defer svr.Close() - - testConfig := &influxdb.ReplicationHTTPConfig{ - RemoteURL: svr.URL, - } - - w, configStore, _ := testWriter(t) - w.waitFunc = instaWait() - - configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil) - configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, status, invalidResponseCode(status).Error()).Return(nil) - _, actualErr := w.Write(testData, testAttempts) - require.NotNil(t, actualErr) - require.Contains(t, actualErr.Error(), fmt.Sprintf("invalid response code %d", status)) - }) - } - }) - - t.Run("drops bad data after config is updated", func(t *testing.T) { - testAttempts := 5 - - svr := testServer(t, constantStatus(http.StatusBadRequest), testData) - defer svr.Close() - - testConfig := &influxdb.ReplicationHTTPConfig{ - RemoteURL: svr.URL, - } - - updatedConfig := &influxdb.ReplicationHTTPConfig{ - RemoteURL: svr.URL, - DropNonRetryableData: true, - } - - w, configStore, _ := testWriter(t) - w.waitFunc = instaWait() - - configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil).Times(testAttempts - 1) - configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(updatedConfig, nil) - configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusBadRequest, invalidResponseCode(http.StatusBadRequest).Error()).Return(nil).Times(testAttempts) - for i := 1; i <= testAttempts; i++ { - _, actualErr := w.Write(testData, i) - if testAttempts == i { - require.NoError(t, actualErr) - } else { - require.Error(t, actualErr) - } - } - }) - - t.Run("gives backoff time on write response", func(t *testing.T) { - svr := testServer(t, constantStatus(http.StatusBadRequest), testData) - defer svr.Close() - - testConfig := &influxdb.ReplicationHTTPConfig{ - RemoteURL: svr.URL, - } - - w, configStore, _ := testWriter(t) - - configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil) - configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusBadRequest, gomock.Any()).Return(nil) - backoff, actualErr := w.Write(testData, 1) - require.Equal(t, backoff, w.backoff(1)) - require.Equal(t, invalidResponseCode(http.StatusBadRequest), actualErr) - }) - - t.Run("uses wait time from response header if present", func(t *testing.T) { - numSeconds := 5 - waitTimeFromHeader := 5 * time.Second - - svr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - gotData, err := io.ReadAll(r.Body) - require.NoError(t, err) - require.Equal(t, testData, gotData) - w.Header().Set(retryAfterHeaderKey, strconv.Itoa(numSeconds)) - w.WriteHeader(http.StatusTooManyRequests) - })) - defer svr.Close() - - testConfig := &influxdb.ReplicationHTTPConfig{ - RemoteURL: svr.URL, - } - - w, configStore, done := testWriter(t) - w.waitFunc = func(dur time.Duration) <-chan time.Time { - require.Equal(t, waitTimeFromHeader, dur) - close(done) - return instaWait()(dur) - } - - configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil) - configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusTooManyRequests, invalidResponseCode(http.StatusTooManyRequests).Error()).Return(nil) - _, actualErr := w.Write(testData, 1) - require.Equal(t, invalidResponseCode(http.StatusTooManyRequests), actualErr) - }) - - t.Run("can cancel with done channel", func(t *testing.T) { - svr := testServer(t, constantStatus(http.StatusInternalServerError), testData) - defer svr.Close() - - testConfig := &influxdb.ReplicationHTTPConfig{ - RemoteURL: svr.URL, - } - - w, configStore, _ := testWriter(t) - - configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil) - configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusInternalServerError, invalidResponseCode(http.StatusInternalServerError).Error()).Return(nil) - _, actualErr := w.Write(testData, 1) - require.Equal(t, invalidResponseCode(http.StatusInternalServerError), actualErr) - }) - - t.Run("writes resume after temporary remote disconnect", func(t *testing.T) { - // Attempt to write data a total of 5 times. - // Succeed on the first point, writing point 1. (baseline test) - // Fail on the second and third, then succeed on the fourth, writing point 2. - // Fail on the fifth, sixth and seventh, then succeed on the eighth, writing point 3. - attemptMap := make([]bool, 8) - attemptMap[0] = true - attemptMap[3] = true - attemptMap[7] = true - var attempt uint8 - - var currentWrite int - testWrites := []string{ - "this is some data", - "this is also some data", - "this is even more data", - } - - svr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if attemptMap[attempt] { - gotData, err := io.ReadAll(r.Body) - require.NoError(t, err) - require.Equal(t, []byte(testWrites[currentWrite]), gotData) - w.WriteHeader(http.StatusNoContent) - } else { - // Simulate a timeout, as if the remote connection were offline - w.WriteHeader(http.StatusGatewayTimeout) - } - attempt++ - })) - defer svr.Close() - - testConfig := &influxdb.ReplicationHTTPConfig{ - RemoteURL: svr.URL, - } - w, configStore, _ := testWriter(t) - - numAttempts := 0 - for i := 0; i < len(testWrites); i++ { - currentWrite = i - configStore.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(testConfig, nil) - if attemptMap[attempt] { - // should succeed - configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusNoContent, gomock.Any()).Return(nil) - _, err := w.Write([]byte(testWrites[i]), numAttempts) - require.NoError(t, err) - numAttempts = 0 - } else { - // should fail - configStore.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusGatewayTimeout, invalidResponseCode(http.StatusGatewayTimeout).Error()).Return(nil) - _, err := w.Write([]byte(testWrites[i]), numAttempts) - require.Error(t, err) - numAttempts++ - i-- // decrement so that we retry this same data point in the next loop iteration - } - } - }) -} - -func TestWrite_Metrics(t *testing.T) { - testData := []byte("this is some data") - - tests := []struct { - name string - status func(int) int - expectedErr error - data []byte - registerExpectations func(*testing.T, *replicationsMock.MockHttpConfigStore, *influxdb.ReplicationHTTPConfig) - checkMetrics func(*testing.T, *prom.Registry) - }{ - { - name: "server errors", - status: constantStatus(http.StatusTeapot), - expectedErr: invalidResponseCode(http.StatusTeapot), - data: []byte{}, - registerExpectations: func(t *testing.T, store *replicationsMock.MockHttpConfigStore, conf *influxdb.ReplicationHTTPConfig) { - store.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(conf, nil) - store.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusTeapot, invalidResponseCode(http.StatusTeapot).Error()).Return(nil) - }, - checkMetrics: func(t *testing.T, reg *prom.Registry) { - mfs := promtest.MustGather(t, reg) - errorCodes := promtest.FindMetric(mfs, "replications_queue_remote_write_errors", map[string]string{ - "replicationID": testID.String(), - "code": strconv.Itoa(http.StatusTeapot), - }) - require.NotNil(t, errorCodes) - }, - }, - { - name: "successful write", - status: constantStatus(http.StatusNoContent), - data: testData, - registerExpectations: func(t *testing.T, store *replicationsMock.MockHttpConfigStore, conf *influxdb.ReplicationHTTPConfig) { - store.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(conf, nil) - store.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusNoContent, "").Return(nil) - }, - checkMetrics: func(t *testing.T, reg *prom.Registry) { - mfs := promtest.MustGather(t, reg) - - bytesSent := promtest.FindMetric(mfs, "replications_queue_remote_write_bytes_sent", map[string]string{ - "replicationID": testID.String(), - }) - require.NotNil(t, bytesSent) - require.Equal(t, float64(len(testData)), bytesSent.Counter.GetValue()) - }, - }, - { - name: "dropped data", - status: constantStatus(http.StatusBadRequest), - data: testData, - registerExpectations: func(t *testing.T, store *replicationsMock.MockHttpConfigStore, conf *influxdb.ReplicationHTTPConfig) { - store.EXPECT().GetFullHTTPConfig(gomock.Any(), testID).Return(conf, nil) - store.EXPECT().UpdateResponseInfo(gomock.Any(), testID, http.StatusBadRequest, invalidResponseCode(http.StatusBadRequest).Error()).Return(nil) - }, - checkMetrics: func(t *testing.T, reg *prom.Registry) { - mfs := promtest.MustGather(t, reg) - - bytesDropped := promtest.FindMetric(mfs, "replications_queue_remote_write_bytes_dropped", map[string]string{ - "replicationID": testID.String(), - }) - require.NotNil(t, bytesDropped) - require.Equal(t, float64(len(testData)), bytesDropped.Counter.GetValue()) - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - svr := testServer(t, tt.status, tt.data) - defer svr.Close() - - testConfig := &influxdb.ReplicationHTTPConfig{ - RemoteURL: svr.URL, - DropNonRetryableData: true, - } - - w, configStore, _ := testWriter(t) - w.waitFunc = instaWait() - reg := prom.NewRegistry(zaptest.NewLogger(t)) - reg.MustRegister(w.metrics.PrometheusCollectors()...) - - tt.registerExpectations(t, configStore, testConfig) - _, actualErr := w.Write(tt.data, 1) - require.Equal(t, tt.expectedErr, actualErr) - tt.checkMetrics(t, reg) - }) - } -} - -func TestPostWrite(t *testing.T) { - testData := []byte("some data") - - tests := []struct { - status int - wantErr bool - }{ - { - status: http.StatusOK, - wantErr: true, - }, - { - status: http.StatusNoContent, - wantErr: false, - }, - { - status: http.StatusBadRequest, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(fmt.Sprintf("status code %d", tt.status), func(t *testing.T) { - svr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - recData, err := io.ReadAll(r.Body) - require.NoError(t, err) - require.Equal(t, testData, recData) - - w.WriteHeader(tt.status) - })) - defer svr.Close() - - config := &influxdb.ReplicationHTTPConfig{ - RemoteURL: svr.URL, - } - - res, err := PostWrite(context.Background(), config, testData, time.Second) - if tt.wantErr { - require.Error(t, err) - return - } else { - require.Nil(t, err) - } - - require.Equal(t, tt.status, res.StatusCode) - }) - } -} - -func TestWaitTimeFromHeader(t *testing.T) { - w := &writer{ - maximumAttemptsForBackoffTime: maximumAttempts, - } - - tests := []struct { - headerKey string - headerVal string - want time.Duration - }{ - { - headerKey: retryAfterHeaderKey, - headerVal: "30", - want: 30 * time.Second, - }, - { - headerKey: retryAfterHeaderKey, - headerVal: "0", - want: w.backoff(1), - }, - { - headerKey: retryAfterHeaderKey, - headerVal: "not a number", - want: 0, - }, - { - headerKey: "some other thing", - headerVal: "not a number", - want: 0, - }, - } - - for _, tt := range tests { - t.Run(fmt.Sprintf("%q - %q", tt.headerKey, tt.headerVal), func(t *testing.T) { - r := &http.Response{ - Header: http.Header{ - tt.headerKey: []string{tt.headerVal}, - }, - } - - got := w.waitTimeFromHeader(r) - require.Equal(t, tt.want, got) - }) - } -} diff --git a/replications/service.go b/replications/service.go deleted file mode 100644 index 460f2240a4d..00000000000 --- a/replications/service.go +++ /dev/null @@ -1,460 +0,0 @@ -package replications - -import ( - "bytes" - "compress/gzip" - "context" - "fmt" - "path/filepath" - "sync" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - ierrors "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/replications/internal" - "github.com/influxdata/influxdb/v2/replications/metrics" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/influxdata/influxdb/v2/sqlite" - "github.com/influxdata/influxdb/v2/storage" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" -) - -// InfluxDB docs suggest a batch size of 5000 lines for optimal write performance. -// https://docs.influxdata.com/influxdb/v2.1/write-data/best-practices/optimize-writes/ -const maxRemoteWritePointSize = 5000 - -// Uncompressed size (bytes) is used as a secondary limit to prevent network issues and stay below cloud maximum payload -// limitations. 2.5 MB is about 50% of the limit on a basic cloud plan. -// https://docs.influxdata.com/influxdb/cloud/account-management/pricing-plans/#data-limits -const maxRemoteWriteBatchSize = 2500000 - -func errLocalBucketNotFound(id platform.ID, cause error) error { - return &ierrors.Error{ - Code: ierrors.EInvalid, - Msg: fmt.Sprintf("local bucket %q not found", id), - Err: cause, - } -} - -func NewService(sqlStore *sqlite.SqlStore, bktSvc BucketService, localWriter storage.PointsWriter, log *zap.Logger, enginePath string, instanceID string) (*service, *metrics.ReplicationsMetrics) { - metrs := metrics.NewReplicationsMetrics() - store := internal.NewStore(sqlStore) - - return &service{ - store: store, - idGenerator: snowflake.NewIDGenerator(), - bucketService: bktSvc, - localWriter: localWriter, - validator: internal.NewValidator(), - log: log, - durableQueueManager: internal.NewDurableQueueManager( - log, - filepath.Join(enginePath, "replicationq"), - metrs, - store, - ), - maxRemoteWriteBatchSize: maxRemoteWriteBatchSize, - maxRemoteWritePointSize: maxRemoteWritePointSize, - instanceID: instanceID, - }, metrs -} - -type ReplicationValidator interface { - ValidateReplication(context.Context, *influxdb.ReplicationHTTPConfig) error -} - -type BucketService interface { - RLock() - RUnlock() - FindBucketByID(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) -} - -type DurableQueueManager interface { - InitializeQueue(replicationID platform.ID, maxQueueSizeBytes int64, orgID platform.ID, localBucketID platform.ID, maxAge int64) error - DeleteQueue(replicationID platform.ID) error - UpdateMaxQueueSize(replicationID platform.ID, maxQueueSizeBytes int64) error - CurrentQueueSizes(ids []platform.ID) (map[platform.ID]int64, error) - RemainingQueueSizes(ids []platform.ID) (map[platform.ID]int64, error) - StartReplicationQueues(trackedReplications map[platform.ID]*influxdb.TrackedReplication) error - CloseAll() error - EnqueueData(replicationID platform.ID, data []byte, numPoints int) error - GetReplications(orgId platform.ID, localBucketID platform.ID) []platform.ID -} - -type ServiceStore interface { - Lock() - Unlock() - ListReplications(context.Context, influxdb.ReplicationListFilter) (*influxdb.Replications, error) - CreateReplication(context.Context, platform.ID, influxdb.CreateReplicationRequest) (*influxdb.Replication, error) - GetReplication(context.Context, platform.ID) (*influxdb.Replication, error) - UpdateReplication(context.Context, platform.ID, influxdb.UpdateReplicationRequest) (*influxdb.Replication, error) - DeleteReplication(context.Context, platform.ID) error - PopulateRemoteHTTPConfig(context.Context, platform.ID, *influxdb.ReplicationHTTPConfig) error - GetFullHTTPConfig(context.Context, platform.ID) (*influxdb.ReplicationHTTPConfig, error) - DeleteBucketReplications(context.Context, platform.ID) ([]platform.ID, error) -} - -type service struct { - store ServiceStore - idGenerator platform.IDGenerator - bucketService BucketService - validator ReplicationValidator - durableQueueManager DurableQueueManager - localWriter storage.PointsWriter - log *zap.Logger - maxRemoteWriteBatchSize int - maxRemoteWritePointSize int - instanceID string -} - -func (s *service) ListReplications(ctx context.Context, filter influxdb.ReplicationListFilter) (*influxdb.Replications, error) { - rs, err := s.store.ListReplications(ctx, filter) - if err != nil { - return nil, err - } - - if len(rs.Replications) == 0 { - return rs, nil - } - - ids := make([]platform.ID, len(rs.Replications)) - for i := range rs.Replications { - ids[i] = rs.Replications[i].ID - } - sizes, err := s.durableQueueManager.CurrentQueueSizes(ids) - if err != nil { - return nil, err - } - for i := range rs.Replications { - rs.Replications[i].CurrentQueueSizeBytes = sizes[rs.Replications[i].ID] - } - rsizes, err := s.durableQueueManager.RemainingQueueSizes(ids) - if err != nil { - return nil, err - } - for i := range rs.Replications { - rs.Replications[i].RemainingBytesToBeSynced = rsizes[rs.Replications[i].ID] - } - - return rs, nil -} - -func (s *service) CreateReplication(ctx context.Context, request influxdb.CreateReplicationRequest) (*influxdb.Replication, error) { - s.bucketService.RLock() - defer s.bucketService.RUnlock() - - s.store.Lock() - defer s.store.Unlock() - - if request.RemoteID == platform.ID(0) && request.RemoteBucketName == "" { - return nil, fmt.Errorf("please supply one of: remoteBucketID, remoteBucketName") - } - - if _, err := s.bucketService.FindBucketByID(ctx, request.LocalBucketID); err != nil { - return nil, errLocalBucketNotFound(request.LocalBucketID, err) - } - - newID := s.idGenerator.ID() - if err := s.durableQueueManager.InitializeQueue(newID, request.MaxQueueSizeBytes, request.OrgID, request.LocalBucketID, request.MaxAgeSeconds); err != nil { - return nil, err - } - - r, err := s.store.CreateReplication(ctx, newID, request) - if err != nil { - if cleanupErr := s.durableQueueManager.DeleteQueue(newID); cleanupErr != nil { - s.log.Warn("durable queue remaining on disk after initialization failure", zap.Error(cleanupErr), zap.String("id", newID.String())) - } - - return nil, err - } - - return r, nil -} - -func (s *service) ValidateNewReplication(ctx context.Context, request influxdb.CreateReplicationRequest) error { - if _, err := s.bucketService.FindBucketByID(ctx, request.LocalBucketID); err != nil { - return errLocalBucketNotFound(request.LocalBucketID, err) - } - - config := influxdb.ReplicationHTTPConfig{RemoteBucketID: &request.RemoteBucketID} - if err := s.store.PopulateRemoteHTTPConfig(ctx, request.RemoteID, &config); err != nil { - return err - } - - if err := s.validator.ValidateReplication(ctx, &config); err != nil { - return &ierrors.Error{ - Code: ierrors.EInvalid, - Msg: "replication parameters fail validation", - Err: err, - } - } - return nil -} - -func (s *service) GetReplication(ctx context.Context, id platform.ID) (*influxdb.Replication, error) { - r, err := s.store.GetReplication(ctx, id) - if err != nil { - return nil, err - } - - sizes, err := s.durableQueueManager.CurrentQueueSizes([]platform.ID{r.ID}) - if err != nil { - return nil, err - } - r.CurrentQueueSizeBytes = sizes[r.ID] - rsizes, err := s.durableQueueManager.RemainingQueueSizes([]platform.ID{r.ID}) - if err != nil { - return nil, err - } - r.RemainingBytesToBeSynced = rsizes[r.ID] - - return r, nil -} - -func (s *service) UpdateReplication(ctx context.Context, id platform.ID, request influxdb.UpdateReplicationRequest) (*influxdb.Replication, error) { - s.store.Lock() - defer s.store.Unlock() - - r, err := s.store.UpdateReplication(ctx, id, request) - if err != nil { - return nil, err - } - - if request.MaxQueueSizeBytes != nil { - if err := s.durableQueueManager.UpdateMaxQueueSize(id, *request.MaxQueueSizeBytes); err != nil { - s.log.Warn("actual max queue size does not match the max queue size recorded in database", zap.String("id", id.String())) - return nil, err - } - } - - sizes, err := s.durableQueueManager.CurrentQueueSizes([]platform.ID{r.ID}) - if err != nil { - return nil, err - } - r.CurrentQueueSizeBytes = sizes[r.ID] - rsizes, err := s.durableQueueManager.RemainingQueueSizes([]platform.ID{r.ID}) - if err != nil { - return nil, err - } - r.RemainingBytesToBeSynced = rsizes[r.ID] - - return r, nil -} - -func (s *service) ValidateUpdatedReplication(ctx context.Context, id platform.ID, request influxdb.UpdateReplicationRequest) error { - baseConfig, err := s.store.GetFullHTTPConfig(ctx, id) - if err != nil { - return err - } - if request.RemoteBucketID != nil { - baseConfig.RemoteBucketID = request.RemoteBucketID - } - - if request.RemoteID != nil { - if err := s.store.PopulateRemoteHTTPConfig(ctx, *request.RemoteID, baseConfig); err != nil { - return err - } - } - - if err := s.validator.ValidateReplication(ctx, baseConfig); err != nil { - return &ierrors.Error{ - Code: ierrors.EInvalid, - Msg: "validation fails after applying update", - Err: err, - } - } - return nil -} - -func (s *service) DeleteReplication(ctx context.Context, id platform.ID) error { - s.store.Lock() - defer s.store.Unlock() - - if err := s.store.DeleteReplication(ctx, id); err != nil { - return err - } - - if err := s.durableQueueManager.DeleteQueue(id); err != nil { - return err - } - - return nil -} - -func (s *service) DeleteBucketReplications(ctx context.Context, localBucketID platform.ID) error { - s.store.Lock() - defer s.store.Unlock() - - deletedIDs, err := s.store.DeleteBucketReplications(ctx, localBucketID) - if err != nil { - return err - } - - errOccurred := false - deletedStrings := make([]string, 0, len(deletedIDs)) - for _, id := range deletedIDs { - if err := s.durableQueueManager.DeleteQueue(id); err != nil { - s.log.Error("durable queue remaining on disk after deletion failure", zap.Error(err), zap.String("id", id.String())) - errOccurred = true - } - - deletedStrings = append(deletedStrings, id.String()) - } - - s.log.Debug("deleted replications for local bucket", - zap.String("bucket_id", localBucketID.String()), zap.Strings("ids", deletedStrings)) - - if errOccurred { - return fmt.Errorf("deleting replications for bucket %q failed, see server logs for details", localBucketID) - } - - return nil -} - -func (s *service) ValidateReplication(ctx context.Context, id platform.ID) error { - config, err := s.store.GetFullHTTPConfig(ctx, id) - if err != nil { - return err - } - if err := s.validator.ValidateReplication(ctx, config); err != nil { - return &ierrors.Error{ - Code: ierrors.EInvalid, - Msg: "replication failed validation", - Err: err, - } - } - return nil -} - -type batch struct { - data *bytes.Buffer - numPoints int -} - -func (s *service) WritePoints(ctx context.Context, orgID platform.ID, bucketID platform.ID, points []models.Point) error { - replications := s.durableQueueManager.GetReplications(orgID, bucketID) - - // If there are no registered replications, all we need to do is a local write. - if len(replications) == 0 { - return s.localWriter.WritePoints(ctx, orgID, bucketID, points) - } - - if s.instanceID != "" { - for i := range points { - points[i].AddTag("_instance_id", s.instanceID) - } - } - - // Concurrently... - var egroup errgroup.Group - var batches []*batch - - // 1. Write points to local TSM - egroup.Go(func() error { - return s.localWriter.WritePoints(ctx, orgID, bucketID, points) - }) - // 2. Serialize points to gzipped line protocol, to be enqueued for replication if the local write succeeds. - // We gzip the LP to take up less room on disk. On the other end of the queue, we can send the gzip data - // directly to the remote API without needing to decompress it. - egroup.Go(func() error { - // Set up an initial batch - batches = append(batches, &batch{ - data: &bytes.Buffer{}, - numPoints: 0, - }) - - currentBatchSize := 0 - gzw := gzip.NewWriter(batches[0].data) - - // Iterate through points and compress in batches - for count, p := range points { - // If current point will cause this batch to exceed max size, start a new batch for it first - if s.startNewBatch(currentBatchSize, p.StringSize(), count) { - batches = append(batches, &batch{ - data: &bytes.Buffer{}, - numPoints: 0, - }) - - if err := gzw.Close(); err != nil { - return err - } - currentBatchSize = 0 - gzw = gzip.NewWriter(batches[len(batches)-1].data) - } - - // Compress point and append to buffer - if _, err := gzw.Write(append([]byte(p.PrecisionString("ns")), '\n')); err != nil { - _ = gzw.Close() - return fmt.Errorf("failed to serialize points for replication: %w", err) - } - - batches[len(batches)-1].numPoints += 1 - currentBatchSize += p.StringSize() - } - if err := gzw.Close(); err != nil { - return err - } - return nil - }) - - if err := egroup.Wait(); err != nil { - return err - } - - // Enqueue the data into all registered replications. - var wg sync.WaitGroup - wg.Add(len(replications)) - - for _, id := range replications { - go func(id platform.ID) { - defer wg.Done() - - // Iterate through batches and enqueue each - for _, batch := range batches { - if err := s.durableQueueManager.EnqueueData(id, batch.data.Bytes(), batch.numPoints); err != nil { - s.log.Error("Failed to enqueue points for replication", zap.String("id", id.String()), zap.Error(err)) - } - } - }(id) - } - wg.Wait() - - return nil -} - -func (s *service) Open(ctx context.Context) error { - trackedReplications, err := s.store.ListReplications(ctx, influxdb.ReplicationListFilter{}) - if err != nil { - return err - } - - trackedReplicationsMap := make(map[platform.ID]*influxdb.TrackedReplication) - for _, r := range trackedReplications.Replications { - trackedReplicationsMap[r.ID] = &influxdb.TrackedReplication{ - MaxQueueSizeBytes: r.MaxQueueSizeBytes, - MaxAgeSeconds: r.MaxAgeSeconds, - OrgID: r.OrgID, - LocalBucketID: r.LocalBucketID, - } - } - - // Queue manager completes startup tasks - if err := s.durableQueueManager.StartReplicationQueues(trackedReplicationsMap); err != nil { - return err - } - return nil -} - -func (s *service) Close() error { - if err := s.durableQueueManager.CloseAll(); err != nil { - return err - } - return nil -} - -func (s *service) startNewBatch(currentSize, nextSize, pointCount int) bool { - return currentSize+nextSize > s.maxRemoteWriteBatchSize || - pointCount > 0 && pointCount%s.maxRemoteWritePointSize == 0 -} diff --git a/replications/service_test.go b/replications/service_test.go deleted file mode 100644 index 9b821bf1cbd..00000000000 --- a/replications/service_test.go +++ /dev/null @@ -1,1040 +0,0 @@ -package replications - -import ( - "bytes" - "compress/gzip" - "context" - "errors" - "fmt" - "testing" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - ierrors "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/models" - replicationsMock "github.com/influxdata/influxdb/v2/replications/mock" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -//go:generate go run github.com/golang/mock/mockgen -package mock -destination ./mock/validator.go github.com/influxdata/influxdb/v2/replications ReplicationValidator -//go:generate go run github.com/golang/mock/mockgen -package mock -destination ./mock/bucket_service.go github.com/influxdata/influxdb/v2/replications BucketService -//go:generate go run github.com/golang/mock/mockgen -package mock -destination ./mock/queue_management.go github.com/influxdata/influxdb/v2/replications DurableQueueManager -//go:generate go run github.com/golang/mock/mockgen -package mock -destination ./mock/points_writer.go github.com/influxdata/influxdb/v2/storage PointsWriter -//go:generate go run github.com/golang/mock/mockgen -package mock -destination ./mock/service_store.go github.com/influxdata/influxdb/v2/replications ServiceStore - -var ( - ctx = context.Background() - orgID = platform.ID(10) - id1 = platform.ID(1) - id2 = platform.ID(2) - desc = "testing testing" - replication1 = influxdb.Replication{ - ID: id1, - OrgID: orgID, - Name: "test", - Description: &desc, - RemoteID: platform.ID(100), - LocalBucketID: platform.ID(1000), - RemoteBucketID: idPointer(99999), - MaxQueueSizeBytes: 3 * influxdb.DefaultReplicationMaxQueueSizeBytes, - } - replication2 = influxdb.Replication{ - ID: id2, - OrgID: orgID, - Name: "test", - Description: &desc, - RemoteID: platform.ID(100), - LocalBucketID: platform.ID(1000), - RemoteBucketID: idPointer(99999), - MaxQueueSizeBytes: 3 * influxdb.DefaultReplicationMaxQueueSizeBytes, - } - createReq = influxdb.CreateReplicationRequest{ - OrgID: replication1.OrgID, - Name: replication1.Name, - Description: replication1.Description, - RemoteID: replication1.RemoteID, - LocalBucketID: replication1.LocalBucketID, - RemoteBucketID: *replication1.RemoteBucketID, - MaxQueueSizeBytes: replication1.MaxQueueSizeBytes, - } - newRemoteID = platform.ID(200) - newQueueSize = influxdb.MinReplicationMaxQueueSizeBytes - updateReqWithNewSize = influxdb.UpdateReplicationRequest{ - RemoteID: &newRemoteID, - MaxQueueSizeBytes: &newQueueSize, - } - updatedReplicationWithNewSize = influxdb.Replication{ - ID: replication1.ID, - OrgID: replication1.OrgID, - Name: replication1.Name, - Description: replication1.Description, - RemoteID: *updateReqWithNewSize.RemoteID, - LocalBucketID: replication1.LocalBucketID, - RemoteBucketID: replication1.RemoteBucketID, - MaxQueueSizeBytes: *updateReqWithNewSize.MaxQueueSizeBytes, - } - updateReqWithNoNewSize = influxdb.UpdateReplicationRequest{ - RemoteID: &newRemoteID, - } - updatedReplicationWithNoNewSize = influxdb.Replication{ - ID: replication1.ID, - OrgID: replication1.OrgID, - Name: replication1.Name, - Description: replication1.Description, - RemoteID: *updateReqWithNewSize.RemoteID, - LocalBucketID: replication1.LocalBucketID, - RemoteBucketID: replication1.RemoteBucketID, - MaxQueueSizeBytes: replication1.MaxQueueSizeBytes, - } - remoteID = platform.ID(888888) - httpConfig = influxdb.ReplicationHTTPConfig{ - RemoteURL: fmt.Sprintf("http://%s.cloud", replication1.RemoteID), - RemoteToken: replication1.RemoteID.String(), - RemoteOrgID: &remoteID, - AllowInsecureTLS: true, - RemoteBucketID: replication1.RemoteBucketID, - } -) - -func idPointer(id int) *platform.ID { - p := platform.ID(id) - return &p -} - -func TestListReplications(t *testing.T) { - t.Parallel() - - filter := influxdb.ReplicationListFilter{} - - tests := []struct { - name string - list influxdb.Replications - ids []platform.ID - sizes map[platform.ID]int64 - rsizes map[platform.ID]int64 - storeErr error - queueManagerErr error - queueManagerRemainingSizesErr error - }{ - { - name: "matches multiple", - list: influxdb.Replications{ - Replications: []influxdb.Replication{replication1, replication2}, - }, - ids: []platform.ID{replication1.ID, replication2.ID}, - sizes: map[platform.ID]int64{replication1.ID: 1000, replication2.ID: 2000}, - }, - { - name: "matches one", - list: influxdb.Replications{ - Replications: []influxdb.Replication{replication1}, - }, - ids: []platform.ID{replication1.ID}, - sizes: map[platform.ID]int64{replication1.ID: 1000}, - }, - { - name: "matches none", - list: influxdb.Replications{}, - }, - { - name: "store error", - storeErr: errors.New("error from store"), - }, - { - name: "queue manager error", - list: influxdb.Replications{ - Replications: []influxdb.Replication{replication1}, - }, - ids: []platform.ID{replication1.ID}, - queueManagerErr: errors.New("error from queue manager"), - }, - { - name: "queue manager error - remaining queue size", - list: influxdb.Replications{ - Replications: []influxdb.Replication{replication1}, - }, - ids: []platform.ID{replication1.ID}, - queueManagerRemainingSizesErr: errors.New("Remaining Queue Size erro"), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - svc, mocks := newTestService(t) - - mocks.serviceStore.EXPECT().ListReplications(gomock.Any(), filter).Return(&tt.list, tt.storeErr) - - if tt.storeErr == nil && len(tt.list.Replications) > 0 { - mocks.durableQueueManager.EXPECT().CurrentQueueSizes(tt.ids).Return(tt.sizes, tt.queueManagerErr) - } - - if tt.storeErr == nil && tt.queueManagerErr == nil && len(tt.list.Replications) > 0 { - mocks.durableQueueManager.EXPECT().RemainingQueueSizes(tt.ids).Return(tt.rsizes, tt.queueManagerRemainingSizesErr) - } - got, err := svc.ListReplications(ctx, filter) - - var wantErr error - if tt.storeErr != nil { - wantErr = tt.storeErr - } else if tt.queueManagerErr != nil { - wantErr = tt.queueManagerErr - } else if tt.queueManagerRemainingSizesErr != nil { - wantErr = tt.queueManagerRemainingSizesErr - } - - require.Equal(t, wantErr, err) - - if wantErr != nil { - require.Nil(t, got) - return - } - - for _, r := range got.Replications { - require.Equal(t, tt.sizes[r.ID], r.CurrentQueueSizeBytes) - require.Equal(t, tt.rsizes[r.ID], r.RemainingBytesToBeSynced) - } - }) - } -} - -func TestCreateReplication(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - create influxdb.CreateReplicationRequest - storeErr error - bucketErr error - queueManagerErr error - want *influxdb.Replication - wantErr error - }{ - { - name: "success", - create: createReq, - want: &replication1, - }, - { - name: "bucket service error", - create: createReq, - bucketErr: errors.New("bucket service error"), - wantErr: errLocalBucketNotFound(createReq.LocalBucketID, errors.New("bucket service error")), - }, - { - name: "initialize queue error", - create: createReq, - queueManagerErr: errors.New("queue manager error"), - wantErr: errors.New("queue manager error"), - }, - { - name: "store create error", - create: createReq, - storeErr: errors.New("store create error"), - wantErr: errors.New("store create error"), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - svc, mocks := newTestService(t) - - mocks.bucketSvc.EXPECT().RLock() - mocks.bucketSvc.EXPECT().RUnlock() - mocks.serviceStore.EXPECT().Lock() - mocks.serviceStore.EXPECT().Unlock() - - mocks.bucketSvc.EXPECT().FindBucketByID(gomock.Any(), tt.create.LocalBucketID).Return(nil, tt.bucketErr) - - if tt.bucketErr == nil { - mocks.durableQueueManager.EXPECT().InitializeQueue(id1, tt.create.MaxQueueSizeBytes, tt.create.OrgID, tt.create.LocalBucketID, tt.create.MaxAgeSeconds).Return(tt.queueManagerErr) - } - - if tt.queueManagerErr == nil && tt.bucketErr == nil { - mocks.serviceStore.EXPECT().CreateReplication(gomock.Any(), id1, tt.create).Return(tt.want, tt.storeErr) - } - - if tt.storeErr != nil { - mocks.durableQueueManager.EXPECT().DeleteQueue(id1).Return(nil) - } - - got, err := svc.CreateReplication(ctx, tt.create) - require.Equal(t, tt.want, got) - require.Equal(t, tt.wantErr, err) - }) - } -} - -func TestValidateNewReplication(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - req influxdb.CreateReplicationRequest - storeErr error - bucketErr error - validatorErr error - wantErr error - }{ - { - name: "valid", - req: createReq, - }, - { - name: "bucket service error", - req: createReq, - bucketErr: errors.New("bucket service error"), - wantErr: errLocalBucketNotFound(createReq.LocalBucketID, errors.New("bucket service error")), - }, - { - name: "store populate error", - req: createReq, - storeErr: errors.New("store populate error"), - wantErr: errors.New("store populate error"), - }, - { - name: "validation error - invalid replication", - req: createReq, - validatorErr: errors.New("validation error"), - wantErr: &ierrors.Error{ - Code: ierrors.EInvalid, - Msg: "replication parameters fail validation", - Err: errors.New("validation error"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - svc, mocks := newTestService(t) - - mocks.bucketSvc.EXPECT().FindBucketByID(gomock.Any(), tt.req.LocalBucketID).Return(nil, tt.bucketErr) - - testConfig := &influxdb.ReplicationHTTPConfig{RemoteBucketID: &tt.req.RemoteBucketID} - if tt.bucketErr == nil { - mocks.serviceStore.EXPECT().PopulateRemoteHTTPConfig(gomock.Any(), tt.req.RemoteID, testConfig).Return(tt.storeErr) - } - - if tt.bucketErr == nil && tt.storeErr == nil { - mocks.validator.EXPECT().ValidateReplication(gomock.Any(), testConfig).Return(tt.validatorErr) - } - - err := svc.ValidateNewReplication(ctx, tt.req) - require.Equal(t, tt.wantErr, err) - }) - } -} - -func TestGetReplication(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - sizes map[platform.ID]int64 - rsizes map[platform.ID]int64 - storeErr error - queueManagerErr error - queueManagerRemainingSizesErr error - storeWant influxdb.Replication - want influxdb.Replication - }{ - { - name: "success", - sizes: map[platform.ID]int64{replication1.ID: 1000}, - storeWant: replication1, - want: replication1, - }, - { - name: "store error", - storeErr: errors.New("store error"), - }, - { - name: "queue manager error", - storeWant: replication1, - queueManagerErr: errors.New("queue manager error"), - }, - { - name: "queue manager error - remaining queue size", - storeWant: replication1, - queueManagerRemainingSizesErr: errors.New("queue manager error"), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - svc, mocks := newTestService(t) - - mocks.serviceStore.EXPECT().GetReplication(gomock.Any(), id1).Return(&tt.storeWant, tt.storeErr) - - if tt.storeErr == nil { - mocks.durableQueueManager.EXPECT().CurrentQueueSizes([]platform.ID{id1}).Return(tt.sizes, tt.queueManagerErr) - } - if tt.storeErr == nil && tt.queueManagerErr == nil { - mocks.durableQueueManager.EXPECT().RemainingQueueSizes([]platform.ID{id1}).Return(tt.rsizes, tt.queueManagerRemainingSizesErr) - } - - got, err := svc.GetReplication(ctx, id1) - - var wantErr error - if tt.storeErr != nil { - wantErr = tt.storeErr - } else if tt.queueManagerErr != nil { - wantErr = tt.queueManagerErr - } else if tt.queueManagerRemainingSizesErr != nil { - wantErr = tt.queueManagerRemainingSizesErr - } - - require.Equal(t, wantErr, err) - - if wantErr != nil { - require.Nil(t, got) - return - } - - require.Equal(t, tt.sizes[got.ID], got.CurrentQueueSizeBytes) - require.Equal(t, tt.rsizes[got.ID], got.RemainingBytesToBeSynced) - - }) - } -} - -func TestUpdateReplication(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - request influxdb.UpdateReplicationRequest - sizes map[platform.ID]int64 - rsizes map[platform.ID]int64 - storeErr error - queueManagerUpdateSizeErr error - queueManagerCurrentSizesErr error - queueManagerRemainingSizesErr error - storeUpdate *influxdb.Replication - want *influxdb.Replication - wantErr error - }{ - { - name: "success with new max queue size", - request: updateReqWithNewSize, - sizes: map[platform.ID]int64{replication1.ID: *updateReqWithNewSize.MaxQueueSizeBytes}, - storeUpdate: &updatedReplicationWithNewSize, - want: &updatedReplicationWithNewSize, - }, - { - name: "success with no new max queue size", - request: updateReqWithNoNewSize, - sizes: map[platform.ID]int64{replication1.ID: updatedReplicationWithNoNewSize.MaxQueueSizeBytes}, - storeUpdate: &updatedReplicationWithNoNewSize, - want: &updatedReplicationWithNoNewSize, - }, - { - name: "store error", - request: updateReqWithNoNewSize, - storeErr: errors.New("store error"), - wantErr: errors.New("store error"), - }, - { - name: "queue manager error - update max queue size", - request: updateReqWithNewSize, - queueManagerUpdateSizeErr: errors.New("update max size err"), - wantErr: errors.New("update max size err"), - }, - { - name: "queue manager error - current queue size", - request: updateReqWithNoNewSize, - queueManagerCurrentSizesErr: errors.New("current size err"), - storeUpdate: &updatedReplicationWithNoNewSize, - wantErr: errors.New("current size err"), - }, - { - name: "queue manager error - remaining queue size", - request: updateReqWithNoNewSize, - queueManagerRemainingSizesErr: errors.New("remaining queue size err"), - storeUpdate: &updatedReplicationWithNoNewSize, - wantErr: errors.New("remaining queue size err"), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - svc, mocks := newTestService(t) - - mocks.serviceStore.EXPECT().Lock() - mocks.serviceStore.EXPECT().Unlock() - - mocks.serviceStore.EXPECT().UpdateReplication(gomock.Any(), id1, tt.request).Return(tt.storeUpdate, tt.storeErr) - - if tt.storeErr == nil && tt.request.MaxQueueSizeBytes != nil { - mocks.durableQueueManager.EXPECT().UpdateMaxQueueSize(id1, *tt.request.MaxQueueSizeBytes).Return(tt.queueManagerUpdateSizeErr) - } - - if tt.storeErr == nil && tt.queueManagerUpdateSizeErr == nil { - mocks.durableQueueManager.EXPECT().CurrentQueueSizes([]platform.ID{id1}).Return(tt.sizes, tt.queueManagerCurrentSizesErr) - } - - if tt.storeErr == nil && tt.queueManagerUpdateSizeErr == nil && tt.queueManagerCurrentSizesErr == nil { - mocks.durableQueueManager.EXPECT().RemainingQueueSizes([]platform.ID{id1}).Return(tt.rsizes, tt.queueManagerRemainingSizesErr) - } - - got, err := svc.UpdateReplication(ctx, id1, tt.request) - require.Equal(t, tt.want, got) - require.Equal(t, tt.wantErr, err) - }) - } -} - -func TestValidateUpdatedReplication(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - request influxdb.UpdateReplicationRequest - baseConfig *influxdb.ReplicationHTTPConfig - storeGetConfigErr error - storePopulateConfigErr error - validatorErr error - want error - }{ - { - name: "success", - request: updateReqWithNoNewSize, - baseConfig: &httpConfig, - }, - { - name: "store get full http config error", - storeGetConfigErr: errors.New("store get full http config error"), - want: errors.New("store get full http config error"), - }, - { - name: "store get populate remote config error", - request: updateReqWithNoNewSize, - storePopulateConfigErr: errors.New("store populate http config error"), - want: errors.New("store populate http config error"), - }, - { - name: "invalid update", - request: updateReqWithNoNewSize, - validatorErr: errors.New("invalid"), - want: &ierrors.Error{ - Code: ierrors.EInvalid, - Msg: "validation fails after applying update", - Err: errors.New("invalid"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - svc, mocks := newTestService(t) - - mocks.serviceStore.EXPECT().GetFullHTTPConfig(gomock.Any(), id1).Return(tt.baseConfig, tt.storeGetConfigErr) - - if tt.storeGetConfigErr == nil { - mocks.serviceStore.EXPECT().PopulateRemoteHTTPConfig(gomock.Any(), *tt.request.RemoteID, tt.baseConfig).Return(tt.storePopulateConfigErr) - } - - if tt.storeGetConfigErr == nil && tt.storePopulateConfigErr == nil { - mocks.validator.EXPECT().ValidateReplication(gomock.Any(), tt.baseConfig).Return(tt.validatorErr) - } - - err := svc.ValidateUpdatedReplication(ctx, id1, tt.request) - require.Equal(t, tt.want, err) - }) - } -} - -func TestDeleteReplication(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - storeErr error - queueManagerErr error - }{ - { - name: "success", - }, - { - name: "store error", - storeErr: errors.New("store error"), - }, - { - name: "queue manager error", - queueManagerErr: errors.New("queue manager error"), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - svc, mocks := newTestService(t) - - mocks.serviceStore.EXPECT().Lock() - mocks.serviceStore.EXPECT().Unlock() - - mocks.serviceStore.EXPECT().DeleteReplication(gomock.Any(), id1).Return(tt.storeErr) - - if tt.storeErr == nil { - mocks.durableQueueManager.EXPECT().DeleteQueue(id1).Return(tt.queueManagerErr) - } - - err := svc.DeleteReplication(ctx, id1) - - var wantErr error - if tt.storeErr != nil { - wantErr = tt.storeErr - } else if tt.queueManagerErr != nil { - wantErr = tt.queueManagerErr - } - - require.Equal(t, wantErr, err) - }) - } -} - -func TestDeleteBucketReplications(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - storeErr error - storeIDs []platform.ID - queueManagerErr error - wantErr error - }{ - { - name: "success - single replication IDs match bucket ID", - storeIDs: []platform.ID{id1}, - }, - { - name: "success - multiple replication IDs match bucket ID", - storeIDs: []platform.ID{id1, id2}, - }, - { - name: "zero replication IDs match bucket ID", - storeIDs: []platform.ID{}, - }, - { - name: "store error", - storeErr: errors.New("store error"), - wantErr: errors.New("store error"), - }, - { - name: "queue manager delete queue error", - storeIDs: []platform.ID{id1}, - queueManagerErr: errors.New("queue manager error"), - wantErr: fmt.Errorf("deleting replications for bucket %q failed, see server logs for details", id1), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - svc, mocks := newTestService(t) - - mocks.serviceStore.EXPECT().Lock() - mocks.serviceStore.EXPECT().Unlock() - - mocks.serviceStore.EXPECT().DeleteBucketReplications(gomock.Any(), id1).Return(tt.storeIDs, tt.storeErr) - - if tt.storeErr == nil { - for _, id := range tt.storeIDs { - mocks.durableQueueManager.EXPECT().DeleteQueue(id).Return(tt.queueManagerErr) - } - } - - err := svc.DeleteBucketReplications(ctx, id1) - require.Equal(t, tt.wantErr, err) - }) - } -} - -func TestValidateReplication(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - storeErr error - validatorErr error - wantErr error - }{ - { - name: "valid", - }, - { - name: "store error", - storeErr: errors.New("store error"), - wantErr: errors.New("store error"), - }, - { - name: "validation error - invalid replication", - validatorErr: errors.New("validation error"), - wantErr: &ierrors.Error{ - Code: ierrors.EInvalid, - Msg: "replication failed validation", - Err: errors.New("validation error"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - svc, mocks := newTestService(t) - - mocks.serviceStore.EXPECT().GetFullHTTPConfig(gomock.Any(), id1).Return(&httpConfig, tt.storeErr) - if tt.storeErr == nil { - mocks.validator.EXPECT().ValidateReplication(gomock.Any(), &httpConfig).Return(tt.validatorErr) - } - - err := svc.ValidateReplication(ctx, id1) - require.Equal(t, tt.wantErr, err) - }) - } -} - -func TestWritePoints(t *testing.T) { - t.Parallel() - - svc, mocks := newTestService(t) - - replications := make([]platform.ID, 2) - replications[0] = replication1.ID - replications[1] = replication2.ID - - mocks.durableQueueManager.EXPECT().GetReplications(orgID, id1).Return(replications) - - points, err := models.ParsePointsString(` -cpu,host=0 value=1.1 6000000000 -cpu,host=A value=1.2 2000000000 -cpu,host=A value=1.3 3000000000 -cpu,host=B value=1.3 4000000000 -cpu,host=B value=1.3 5000000000 -cpu,host=C value=1.3 1000000000 -mem,host=C value=1.3 1000000000 -disk,host=C value=1.3 1000000000`) - require.NoError(t, err) - - // Points should successfully write to local TSM. - mocks.pointWriter.EXPECT().WritePoints(gomock.Any(), orgID, id1, points).Return(nil) - - // Points should successfully be enqueued in the 2 replications associated with the local bucket. - for _, id := range replications { - mocks.durableQueueManager.EXPECT(). - EnqueueData(id, gomock.Any(), len(points)). - DoAndReturn(func(_ platform.ID, data []byte, numPoints int) error { - require.Equal(t, len(points), numPoints) - checkCompressedData(t, data, points) - return nil - }) - } - - require.NoError(t, svc.WritePoints(ctx, orgID, id1, points)) -} - -func TestWritePointsBatches(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - setupFn func(*testing.T, *service) - }{ - { - name: "batch bytes size", - setupFn: func(t *testing.T, svc *service) { - t.Helper() - // Set batch size to smaller size for testing (should result in 3 batches sized 93, 93, and 63 - total size 249) - svc.maxRemoteWriteBatchSize = 100 - }, - }, - { - name: "batch point size", - setupFn: func(t *testing.T, svc *service) { - t.Helper() - // Set point size to smaller size for testing (should result in 3 batches with 3 points, 3 points, and 2 points) - svc.maxRemoteWritePointSize = 3 - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - svc, mocks := newTestService(t) - - tt.setupFn(t, svc) - - // Define metadata for two replications - replications := make([]platform.ID, 2) - replications[0] = replication1.ID - replications[1] = replication2.ID - - mocks.durableQueueManager.EXPECT().GetReplications(orgID, id1).Return(replications) - - // Define some points of line protocol, parse string --> []Point - points, err := models.ParsePointsString(` -cpu,host=0 value=1.1 6000000000 -cpu,host=A value=1.2 2000000000 -cpu,host=A value=1.3 3000000000 -cpu,host=B value=1.3 4000000000 -cpu,host=B value=1.3 5000000000 -cpu,host=C value=1.3 1000000000 -mem,host=C value=1.3 1000000000 -disk,host=C value=1.3 1000000000`) - require.NoError(t, err) - - // Points should successfully write to local TSM. - mocks.pointWriter.EXPECT().WritePoints(gomock.Any(), orgID, id1, points).Return(nil) - - // Points should successfully be enqueued in the 2 replications associated with the local bucket. - for _, id := range replications { - // Check batch 1 - mocks.durableQueueManager.EXPECT(). - EnqueueData(id, gomock.Any(), 3). - DoAndReturn(func(_ platform.ID, data []byte, numPoints int) error { - require.Equal(t, 3, numPoints) - checkCompressedData(t, data, points[:3]) - return nil - }) - - // Check batch 2 - mocks.durableQueueManager.EXPECT(). - EnqueueData(id, gomock.Any(), 3). - DoAndReturn(func(_ platform.ID, data []byte, numPoints int) error { - require.Equal(t, 3, numPoints) - checkCompressedData(t, data, points[3:6]) - return nil - }) - - // Check batch 3 - mocks.durableQueueManager.EXPECT(). - EnqueueData(id, gomock.Any(), 2). - DoAndReturn(func(_ platform.ID, data []byte, numPoints int) error { - require.Equal(t, 2, numPoints) - checkCompressedData(t, data, points[6:]) - return nil - }) - } - - require.NoError(t, svc.WritePoints(ctx, orgID, id1, points)) - }) - } -} - -func TestWritePointsInstanceID(t *testing.T) { - t.Parallel() - - svc, mocks := newTestService(t) - svc.instanceID = "hello-edge" - replications := make([]platform.ID, 2) - replications[0] = replication1.ID - replications[1] = replication2.ID - - mocks.durableQueueManager.EXPECT().GetReplications(orgID, id1).Return(replications) - - writePoints, err := models.ParsePointsString(` -cpu,host=0 value=1.1 6000000000 -cpu,host=A value=1.2 2000000000 -cpu,host=A value=1.3 3000000000 -cpu,host=B value=1.3 4000000000 -cpu,host=B value=1.3 5000000000 -cpu,host=C value=1.3 1000000000 -mem,host=C value=1.3 1000000000 -disk,host=C value=1.3 1000000000`) - require.NoError(t, err) - - expectedPoints, err := models.ParsePointsString(` -cpu,host=0,_instance_id=hello-edge value=1.1 6000000000 -cpu,host=A,_instance_id=hello-edge value=1.2 2000000000 -cpu,host=A,_instance_id=hello-edge value=1.3 3000000000 -cpu,host=B,_instance_id=hello-edge value=1.3 4000000000 -cpu,host=B,_instance_id=hello-edge value=1.3 5000000000 -cpu,host=C,_instance_id=hello-edge value=1.3 1000000000 -mem,host=C,_instance_id=hello-edge value=1.3 1000000000 -disk,host=C,_instance_id=hello-edge value=1.3 1000000000`) - require.NoError(t, err) - - // Points should successfully write to local TSM. - mocks.pointWriter.EXPECT().WritePoints(gomock.Any(), orgID, id1, writePoints).Return(nil) - - // Points should successfully be enqueued in the 2 replications associated with the local bucket. - for _, id := range replications { - mocks.durableQueueManager.EXPECT(). - EnqueueData(id, gomock.Any(), len(writePoints)). - DoAndReturn(func(_ platform.ID, data []byte, numPoints int) error { - require.Equal(t, len(writePoints), numPoints) - checkCompressedData(t, data, expectedPoints) - return nil - }) - } - - require.NoError(t, svc.WritePoints(ctx, orgID, id1, writePoints)) - -} - -func TestWritePoints_LocalFailure(t *testing.T) { - t.Parallel() - - svc, mocks := newTestService(t) - - replications := make([]platform.ID, 2) - replications[0] = replication1.ID - replications[1] = replication2.ID - - mocks.durableQueueManager.EXPECT().GetReplications(orgID, id1).Return(replications) - - points, err := models.ParsePointsString(` -cpu,host=0 value=1.1 6000000000 -cpu,host=A value=1.2 2000000000 -cpu,host=A value=1.3 3000000000 -cpu,host=B value=1.3 4000000000 -cpu,host=B value=1.3 5000000000 -cpu,host=C value=1.3 1000000000 -mem,host=C value=1.3 1000000000 -disk,host=C value=1.3 1000000000`) - require.NoError(t, err) - - // Points should fail to write to local TSM. - writeErr := errors.New("O NO") - mocks.pointWriter.EXPECT().WritePoints(gomock.Any(), orgID, id1, points).Return(writeErr) - // Don't expect any calls to enqueue points. - require.Equal(t, writeErr, svc.WritePoints(ctx, orgID, id1, points)) -} - -func TestOpen(t *testing.T) { - t.Parallel() - - filter := influxdb.ReplicationListFilter{} - - tests := []struct { - name string - storeErr error - queueManagerErr error - replicationsMap map[platform.ID]*influxdb.TrackedReplication - list *influxdb.Replications - }{ - { - name: "no error, multiple replications from storage", - replicationsMap: map[platform.ID]*influxdb.TrackedReplication{ - replication1.ID: { - MaxQueueSizeBytes: replication1.MaxQueueSizeBytes, - MaxAgeSeconds: replication1.MaxAgeSeconds, - OrgID: replication1.OrgID, - LocalBucketID: replication1.LocalBucketID, - }, - replication2.ID: { - MaxQueueSizeBytes: replication2.MaxQueueSizeBytes, - MaxAgeSeconds: replication2.MaxAgeSeconds, - OrgID: replication2.OrgID, - LocalBucketID: replication2.LocalBucketID, - }, - }, - list: &influxdb.Replications{ - Replications: []influxdb.Replication{replication1, replication2}, - }, - }, - { - name: "no error, one stored replication", - replicationsMap: map[platform.ID]*influxdb.TrackedReplication{ - replication1.ID: { - MaxQueueSizeBytes: replication1.MaxQueueSizeBytes, - MaxAgeSeconds: replication1.MaxAgeSeconds, - OrgID: replication1.OrgID, - LocalBucketID: replication1.LocalBucketID, - }, - }, - list: &influxdb.Replications{ - Replications: []influxdb.Replication{replication1}, - }, - }, - { - name: "store error", - storeErr: errors.New("store error"), - }, - { - name: "queue manager error", - replicationsMap: map[platform.ID]*influxdb.TrackedReplication{ - replication1.ID: { - MaxQueueSizeBytes: replication1.MaxQueueSizeBytes, - MaxAgeSeconds: replication1.MaxAgeSeconds, - OrgID: replication1.OrgID, - LocalBucketID: replication1.LocalBucketID, - }, - }, - list: &influxdb.Replications{ - Replications: []influxdb.Replication{replication1}, - }, - queueManagerErr: errors.New("queue manager error"), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - svc, mocks := newTestService(t) - - mocks.serviceStore.EXPECT().ListReplications(gomock.Any(), filter).Return(tt.list, tt.storeErr) - if tt.storeErr == nil { - mocks.durableQueueManager.EXPECT().StartReplicationQueues(tt.replicationsMap).Return(tt.queueManagerErr) - } - - var wantErr error - if tt.storeErr != nil { - wantErr = tt.storeErr - } else if tt.queueManagerErr != nil { - wantErr = tt.queueManagerErr - } - - err := svc.Open(ctx) - require.Equal(t, wantErr, err) - }) - } -} - -type mocks struct { - bucketSvc *replicationsMock.MockBucketService - validator *replicationsMock.MockReplicationValidator - durableQueueManager *replicationsMock.MockDurableQueueManager - pointWriter *replicationsMock.MockPointsWriter - serviceStore *replicationsMock.MockServiceStore -} - -func checkCompressedData(t *testing.T, data []byte, expectedPoints []models.Point) { - gzBuf := bytes.NewBuffer(data) - gzr, err := gzip.NewReader(gzBuf) - require.NoError(t, err) - defer gzr.Close() - - var buf bytes.Buffer - _, err = buf.ReadFrom(gzr) - require.NoError(t, err) - require.NoError(t, gzr.Close()) - - writtenPoints, err := models.ParsePoints(buf.Bytes()) - require.NoError(t, err) - require.ElementsMatch(t, writtenPoints, expectedPoints) -} - -func newTestService(t *testing.T) (*service, mocks) { - logger := zaptest.NewLogger(t) - - ctrl := gomock.NewController(t) - mocks := mocks{ - bucketSvc: replicationsMock.NewMockBucketService(ctrl), - validator: replicationsMock.NewMockReplicationValidator(ctrl), - durableQueueManager: replicationsMock.NewMockDurableQueueManager(ctrl), - pointWriter: replicationsMock.NewMockPointsWriter(ctrl), - serviceStore: replicationsMock.NewMockServiceStore(ctrl), - } - svc := service{ - store: mocks.serviceStore, - idGenerator: mock.NewIncrementingIDGenerator(id1), - bucketService: mocks.bucketSvc, - validator: mocks.validator, - log: logger, - durableQueueManager: mocks.durableQueueManager, - localWriter: mocks.pointWriter, - maxRemoteWriteBatchSize: maxRemoteWriteBatchSize, - maxRemoteWritePointSize: maxRemoteWritePointSize, - } - - return &svc, mocks -} diff --git a/replications/transport/http.go b/replications/transport/http.go deleted file mode 100644 index 14e99a86dab..00000000000 --- a/replications/transport/http.go +++ /dev/null @@ -1,277 +0,0 @@ -package transport - -import ( - "context" - "net/http" - - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/kv" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -const ( - prefixReplications = "/api/v2/replications" -) - -var ( - errBadOrg = &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid or missing org ID", - } - - errBadRemoteID = &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid remote ID", - } - - errBadLocalBucketID = &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid local bucket ID", - } - - errBadId = &errors.Error{ - Code: errors.EInvalid, - Msg: "replication ID is invalid", - } -) - -type ReplicationService interface { - // ListReplications returns all info about registered replications matching a filter. - ListReplications(context.Context, influxdb.ReplicationListFilter) (*influxdb.Replications, error) - - // CreateReplication registers a new replication stream. - CreateReplication(context.Context, influxdb.CreateReplicationRequest) (*influxdb.Replication, error) - - // ValidateNewReplication validates that the given settings for a replication are usable, - // without persisting the configuration. - ValidateNewReplication(context.Context, influxdb.CreateReplicationRequest) error - - // GetReplication returns metadata about the replication with the given ID. - GetReplication(context.Context, platform.ID) (*influxdb.Replication, error) - - // UpdateReplication updates the settings for the replication with the given ID. - UpdateReplication(context.Context, platform.ID, influxdb.UpdateReplicationRequest) (*influxdb.Replication, error) - - // ValidateUpdatedReplication valdiates that a replication is still usable after applying the - // given update, without persisting the new configuration. - ValidateUpdatedReplication(context.Context, platform.ID, influxdb.UpdateReplicationRequest) error - - // DeleteReplication deletes all info for the replication with the given ID. - DeleteReplication(context.Context, platform.ID) error - - // ValidateReplication checks that the replication with the given ID is still usable with its - // persisted settings. - ValidateReplication(context.Context, platform.ID) error -} - -type ReplicationHandler struct { - chi.Router - - log *zap.Logger - api *kithttp.API - - replicationsService ReplicationService -} - -func NewInstrumentedReplicationHandler(log *zap.Logger, reg prometheus.Registerer, kv kv.Store, svc ReplicationService) *ReplicationHandler { - // Collect telemetry - svc = newTelemetryCollectingService(kv, svc) - // Collect metrics. - svc = newMetricCollectingService(reg, svc) - // Wrap logging. - svc = newLoggingService(log, svc) - // Wrap authz. - svc = newAuthCheckingService(svc) - - return newReplicationHandler(log, svc) -} - -func newReplicationHandler(log *zap.Logger, svc ReplicationService) *ReplicationHandler { - h := &ReplicationHandler{ - log: log, - api: kithttp.NewAPI(kithttp.WithLog(log)), - replicationsService: svc, - } - - r := chi.NewRouter() - r.Use( - middleware.Recoverer, - middleware.RequestID, - middleware.RealIP, - ) - - r.Route("/", func(r chi.Router) { - r.Get("/", h.handleGetReplications) - r.Post("/", h.handlePostReplication) - - r.Route("/{id}", func(r chi.Router) { - r.Get("/", h.handleGetReplication) - r.Patch("/", h.handlePatchReplication) - r.Delete("/", h.handleDeleteReplication) - r.Post("/validate", h.handleValidateReplication) - }) - }) - - h.Router = r - return h -} - -func (h *ReplicationHandler) Prefix() string { - return prefixReplications -} - -func (h *ReplicationHandler) handleGetReplications(w http.ResponseWriter, r *http.Request) { - q := r.URL.Query() - - // orgID is required for listing replications. - orgID := q.Get("orgID") - o, err := platform.IDFromString(orgID) - if err != nil { - h.api.Err(w, r, errBadOrg) - return - } - - // name, remoteID, and localBucketID are optional additional filters. - name := q.Get("name") - remoteID := q.Get("remoteID") - localBucketID := q.Get("localBucketID") - - filters := influxdb.ReplicationListFilter{OrgID: *o} - if name != "" { - filters.Name = &name - } - if remoteID != "" { - i, err := platform.IDFromString(remoteID) - if err != nil { - h.api.Err(w, r, errBadRemoteID) - return - } - filters.RemoteID = i - } - if localBucketID != "" { - i, err := platform.IDFromString(localBucketID) - if err != nil { - h.api.Err(w, r, errBadLocalBucketID) - return - } - filters.LocalBucketID = i - } - - rs, err := h.replicationsService.ListReplications(r.Context(), filters) - if err != nil { - h.api.Err(w, r, err) - return - } - h.api.Respond(w, r, http.StatusOK, rs) -} - -func (h *ReplicationHandler) handlePostReplication(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - q := r.URL.Query() - - validate := q.Get("validate") == "true" - req := influxdb.CreateReplicationRequest{MaxQueueSizeBytes: influxdb.DefaultReplicationMaxQueueSizeBytes} - if err := h.api.DecodeJSON(r.Body, &req); err != nil { - h.api.Err(w, r, err) - return - } - - if validate { - if err := h.replicationsService.ValidateNewReplication(ctx, req); err != nil { - h.api.Err(w, r, err) - return - } - h.api.Respond(w, r, http.StatusNoContent, nil) - return - } - - replication, err := h.replicationsService.CreateReplication(ctx, req) - if err != nil { - h.api.Err(w, r, err) - return - } - h.api.Respond(w, r, http.StatusCreated, replication) -} - -func (h *ReplicationHandler) handleGetReplication(w http.ResponseWriter, r *http.Request) { - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, errBadId) - return - } - - replication, err := h.replicationsService.GetReplication(r.Context(), *id) - if err != nil { - h.api.Err(w, r, err) - return - } - h.api.Respond(w, r, http.StatusOK, replication) -} - -func (h *ReplicationHandler) handlePatchReplication(w http.ResponseWriter, r *http.Request) { - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, errBadId) - return - } - - ctx := r.Context() - q := r.URL.Query() - - validate := q.Get("validate") == "true" - var req influxdb.UpdateReplicationRequest - if err := h.api.DecodeJSON(r.Body, &req); err != nil { - h.api.Err(w, r, err) - return - } - - if validate { - if err := h.replicationsService.ValidateUpdatedReplication(ctx, *id, req); err != nil { - h.api.Err(w, r, err) - return - } - h.api.Respond(w, r, http.StatusNoContent, nil) - return - } - - replication, err := h.replicationsService.UpdateReplication(ctx, *id, req) - if err != nil { - h.api.Err(w, r, err) - return - } - h.api.Respond(w, r, http.StatusOK, replication) -} - -func (h *ReplicationHandler) handleDeleteReplication(w http.ResponseWriter, r *http.Request) { - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, errBadId) - return - } - - if err := h.replicationsService.DeleteReplication(r.Context(), *id); err != nil { - h.api.Err(w, r, err) - return - } - h.api.Respond(w, r, http.StatusNoContent, nil) -} - -func (h *ReplicationHandler) handleValidateReplication(w http.ResponseWriter, r *http.Request) { - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, errBadId) - return - } - - if err := h.replicationsService.ValidateReplication(r.Context(), *id); err != nil { - h.api.Err(w, r, err) - return - } - h.api.Respond(w, r, http.StatusNoContent, nil) -} diff --git a/replications/transport/http_test.go b/replications/transport/http_test.go deleted file mode 100644 index 6406298820a..00000000000 --- a/replications/transport/http_test.go +++ /dev/null @@ -1,341 +0,0 @@ -package transport - -import ( - "bytes" - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/replications/mock" - "github.com/stretchr/testify/assert" - tmock "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -//go:generate go run github.com/golang/mock/mockgen -package mock -destination ../mock/service.go github.com/influxdata/influxdb/v2/replications/transport ReplicationService - -var ( - orgStr = "1234123412341234" - orgID, _ = platform.IDFromString(orgStr) - remoteStr = "9876987698769876" - remoteID, _ = platform.IDFromString(remoteStr) - idStr = "4321432143214321" - id, _ = platform.IDFromString(idStr) - localBucketStr = "1111111111111111" - localBucketId, _ = platform.IDFromString(localBucketStr) - remoteBucketStr = "1234567887654321" - remoteBucketID, _ = platform.IDFromString(remoteBucketStr) - testReplication = influxdb.Replication{ - ID: *id, - OrgID: *orgID, - RemoteID: *remoteID, - LocalBucketID: *localBucketId, - RemoteBucketID: remoteBucketID, - Name: "example", - MaxQueueSizeBytes: influxdb.DefaultReplicationMaxQueueSizeBytes, - } -) - -func TestReplicationHandler(t *testing.T) { - t.Run("get replications happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "GET", ts.URL, nil) - - q := req.URL.Query() - q.Add("orgID", orgStr) - q.Add("name", testReplication.Name) - q.Add("remoteID", remoteStr) - q.Add("localBucketID", localBucketStr) - req.URL.RawQuery = q.Encode() - - expected := influxdb.Replications{Replications: []influxdb.Replication{testReplication}} - - svc.EXPECT(). - ListReplications(gomock.Any(), tmock.MatchedBy(func(in influxdb.ReplicationListFilter) bool { - return assert.Equal(t, *orgID, in.OrgID) && - assert.Equal(t, testReplication.Name, *in.Name) && - assert.Equal(t, testReplication.RemoteID, *in.RemoteID) && - assert.Equal(t, testReplication.LocalBucketID, *in.LocalBucketID) - })).Return(&expected, nil) - - res := doTestRequest(t, req, http.StatusOK, true) - - var got influxdb.Replications - require.NoError(t, json.NewDecoder(res.Body).Decode(&got)) - require.Equal(t, expected, got) - }) - - t.Run("create replication happy path", func(t *testing.T) { - - body := influxdb.CreateReplicationRequest{ - OrgID: testReplication.OrgID, - Name: testReplication.Name, - RemoteID: testReplication.RemoteID, - LocalBucketID: testReplication.LocalBucketID, - RemoteBucketID: *testReplication.RemoteBucketID, - } - - t.Run("with explicit queue size", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - body := body - body.MaxQueueSizeBytes = 2 * influxdb.DefaultReplicationMaxQueueSizeBytes - - req := newTestRequest(t, "POST", ts.URL, &body) - - svc.EXPECT().CreateReplication(gomock.Any(), body).Return(&testReplication, nil) - - res := doTestRequest(t, req, http.StatusCreated, true) - - var got influxdb.Replication - require.NoError(t, json.NewDecoder(res.Body).Decode(&got)) - require.Equal(t, testReplication, got) - }) - - t.Run("with default queue size", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "POST", ts.URL, &body) - - expectedBody := body - expectedBody.MaxQueueSizeBytes = influxdb.DefaultReplicationMaxQueueSizeBytes - - svc.EXPECT().CreateReplication(gomock.Any(), expectedBody).Return(&testReplication, nil) - - res := doTestRequest(t, req, http.StatusCreated, true) - - var got influxdb.Replication - require.NoError(t, json.NewDecoder(res.Body).Decode(&got)) - require.Equal(t, testReplication, got) - }) - }) - - t.Run("dry-run create happy path", func(t *testing.T) { - - body := influxdb.CreateReplicationRequest{ - OrgID: testReplication.OrgID, - Name: testReplication.Name, - RemoteID: testReplication.RemoteID, - LocalBucketID: testReplication.LocalBucketID, - RemoteBucketID: *testReplication.RemoteBucketID, - } - - t.Run("with explicit queue size", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - body := body - body.MaxQueueSizeBytes = 2 * influxdb.DefaultReplicationMaxQueueSizeBytes - - req := newTestRequest(t, "POST", ts.URL, &body) - q := req.URL.Query() - q.Add("validate", "true") - req.URL.RawQuery = q.Encode() - - svc.EXPECT().ValidateNewReplication(gomock.Any(), body).Return(nil) - - doTestRequest(t, req, http.StatusNoContent, false) - }) - - t.Run("with default queue size", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "POST", ts.URL, &body) - q := req.URL.Query() - q.Add("validate", "true") - req.URL.RawQuery = q.Encode() - - expectedBody := body - expectedBody.MaxQueueSizeBytes = influxdb.DefaultReplicationMaxQueueSizeBytes - - svc.EXPECT().ValidateNewReplication(gomock.Any(), expectedBody).Return(nil) - - doTestRequest(t, req, http.StatusNoContent, false) - }) - }) - - t.Run("get replication happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "GET", ts.URL+"/"+id.String(), nil) - - svc.EXPECT().GetReplication(gomock.Any(), *id).Return(&testReplication, nil) - - res := doTestRequest(t, req, http.StatusOK, true) - - var got influxdb.Replication - require.NoError(t, json.NewDecoder(res.Body).Decode(&got)) - require.Equal(t, testReplication, got) - }) - - t.Run("delete replication happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "DELETE", ts.URL+"/"+id.String(), nil) - - svc.EXPECT().DeleteReplication(gomock.Any(), *id).Return(nil) - - doTestRequest(t, req, http.StatusNoContent, false) - }) - - t.Run("update replication happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - newDescription := "my cool replication" - newQueueSize := 3 * influxdb.DefaultReplicationMaxQueueSizeBytes - body := influxdb.UpdateReplicationRequest{Description: &newDescription, MaxQueueSizeBytes: &newQueueSize} - - req := newTestRequest(t, "PATCH", ts.URL+"/"+id.String(), body) - - svc.EXPECT().UpdateReplication(gomock.Any(), *id, body).Return(&testReplication, nil) - - res := doTestRequest(t, req, http.StatusOK, true) - - var got influxdb.Replication - require.NoError(t, json.NewDecoder(res.Body).Decode(&got)) - require.Equal(t, testReplication, got) - }) - - t.Run("dry-run update happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - newDescription := "my cool replication" - newQueueSize := 3 * influxdb.DefaultReplicationMaxQueueSizeBytes - body := influxdb.UpdateReplicationRequest{Description: &newDescription, MaxQueueSizeBytes: &newQueueSize} - - req := newTestRequest(t, "PATCH", ts.URL+"/"+id.String(), body) - q := req.URL.Query() - q.Add("validate", "true") - req.URL.RawQuery = q.Encode() - - svc.EXPECT().ValidateUpdatedReplication(gomock.Any(), *id, body).Return(nil) - - doTestRequest(t, req, http.StatusNoContent, false) - }) - - t.Run("validate replication happy path", func(t *testing.T) { - ts, svc := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "POST", ts.URL+"/"+id.String()+"/validate", nil) - - svc.EXPECT().ValidateReplication(gomock.Any(), *id).Return(nil) - - doTestRequest(t, req, http.StatusNoContent, false) - }) - - t.Run("invalid replication IDs return 400", func(t *testing.T) { - ts, _ := newTestServer(t) - defer ts.Close() - - req1 := newTestRequest(t, "GET", ts.URL+"/foo", nil) - req2 := newTestRequest(t, "PATCH", ts.URL+"/foo", &influxdb.UpdateReplicationRequest{}) - req3 := newTestRequest(t, "DELETE", ts.URL+"/foo", nil) - - for _, req := range []*http.Request{req1, req2, req3} { - t.Run(req.Method, func(t *testing.T) { - doTestRequest(t, req, http.StatusBadRequest, true) - }) - } - }) - - t.Run("invalid org ID to GET /replications returns 400", func(t *testing.T) { - ts, _ := newTestServer(t) - defer ts.Close() - - req := newTestRequest(t, "GET", ts.URL, nil) - q := req.URL.Query() - q.Add("orgID", "foo") - req.URL.RawQuery = q.Encode() - - doTestRequest(t, req, http.StatusBadRequest, true) - }) - - t.Run("invalid request bodies return 400", func(t *testing.T) { - ts, _ := newTestServer(t) - defer ts.Close() - - body := "o no not an object" - req1 := newTestRequest(t, "POST", ts.URL, &body) - req2 := newTestRequest(t, "PATCH", ts.URL+"/"+id.String(), &body) - - for _, req := range []*http.Request{req1, req2} { - t.Run(req.Method, func(t *testing.T) { - doTestRequest(t, req, http.StatusBadRequest, true) - }) - } - }) - - t.Run("too-small queue size on create is rejected", func(t *testing.T) { - ts, _ := newTestServer(t) - defer ts.Close() - - body := influxdb.CreateReplicationRequest{ - OrgID: testReplication.OrgID, - Name: testReplication.Name, - RemoteID: testReplication.RemoteID, - LocalBucketID: testReplication.LocalBucketID, - RemoteBucketID: *testReplication.RemoteBucketID, - MaxQueueSizeBytes: influxdb.MinReplicationMaxQueueSizeBytes / 2, - } - - req := newTestRequest(t, "POST", ts.URL, &body) - - doTestRequest(t, req, http.StatusBadRequest, true) - }) - - t.Run("too-small queue size on update is rejected", func(t *testing.T) { - ts, _ := newTestServer(t) - defer ts.Close() - - newSize := influxdb.MinReplicationMaxQueueSizeBytes / 2 - body := influxdb.UpdateReplicationRequest{MaxQueueSizeBytes: &newSize} - - req := newTestRequest(t, "PATCH", ts.URL+"/"+id.String(), &body) - - doTestRequest(t, req, http.StatusBadRequest, true) - }) -} - -func newTestServer(t *testing.T) (*httptest.Server, *mock.MockReplicationService) { - ctrl := gomock.NewController(t) - svc := mock.NewMockReplicationService(ctrl) - server := newReplicationHandler(zaptest.NewLogger(t), svc) - return httptest.NewServer(server), svc -} - -func newTestRequest(t *testing.T, method, path string, body interface{}) *http.Request { - dat, err := json.Marshal(body) - require.NoError(t, err) - - req, err := http.NewRequest(method, path, bytes.NewBuffer(dat)) - require.NoError(t, err) - - req.Header.Add("Content-Type", "application/json") - - return req -} - -func doTestRequest(t *testing.T, req *http.Request, wantCode int, needJSON bool) *http.Response { - res, err := http.DefaultClient.Do(req) - require.NoError(t, err) - require.Equal(t, wantCode, res.StatusCode) - if needJSON { - require.Equal(t, "application/json; charset=utf-8", res.Header.Get("Content-Type")) - } - return res -} diff --git a/replications/transport/middleware_auth.go b/replications/transport/middleware_auth.go deleted file mode 100644 index cfb386c2089..00000000000 --- a/replications/transport/middleware_auth.go +++ /dev/null @@ -1,131 +0,0 @@ -package transport - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -func newAuthCheckingService(underlying ReplicationService) *authCheckingService { - return &authCheckingService{underlying} -} - -type authCheckingService struct { - underlying ReplicationService -} - -var _ ReplicationService = (*authCheckingService)(nil) - -func (a authCheckingService) ListReplications(ctx context.Context, filter influxdb.ReplicationListFilter) (*influxdb.Replications, error) { - rs, err := a.underlying.ListReplications(ctx, filter) - if err != nil { - return nil, err - } - - rrs := rs.Replications[:0] - for _, r := range rs.Replications { - _, _, err := authorizer.AuthorizeRead(ctx, influxdb.ReplicationsResourceType, r.ID, r.OrgID) - if err != nil && errors.ErrorCode(err) != errors.EUnauthorized { - return nil, err - } - if errors.ErrorCode(err) == errors.EUnauthorized { - continue - } - rrs = append(rrs, r) - } - return &influxdb.Replications{Replications: rrs}, nil -} - -func (a authCheckingService) CreateReplication(ctx context.Context, request influxdb.CreateReplicationRequest) (*influxdb.Replication, error) { - if err := a.authCreateReplication(ctx, request); err != nil { - return nil, err - } - return a.underlying.CreateReplication(ctx, request) -} - -func (a authCheckingService) ValidateNewReplication(ctx context.Context, request influxdb.CreateReplicationRequest) error { - if err := a.authCreateReplication(ctx, request); err != nil { - return err - } - return a.underlying.ValidateNewReplication(ctx, request) -} - -func (a authCheckingService) authCreateReplication(ctx context.Context, request influxdb.CreateReplicationRequest) error { - if _, _, err := authorizer.AuthorizeCreate(ctx, influxdb.ReplicationsResourceType, request.OrgID); err != nil { - return err - } - // N.B. creating a replication requires read-access to both the source bucket and the target remote. - if _, _, err := authorizer.AuthorizeRead(ctx, influxdb.BucketsResourceType, request.LocalBucketID, request.OrgID); err != nil { - return err - } - if _, _, err := authorizer.AuthorizeRead(ctx, influxdb.RemotesResourceType, request.RemoteID, request.OrgID); err != nil { - return err - } - return nil -} - -func (a authCheckingService) GetReplication(ctx context.Context, id platform.ID) (*influxdb.Replication, error) { - r, err := a.underlying.GetReplication(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := authorizer.AuthorizeRead(ctx, influxdb.ReplicationsResourceType, id, r.OrgID); err != nil { - return nil, err - } - return r, nil -} - -func (a authCheckingService) UpdateReplication(ctx context.Context, id platform.ID, request influxdb.UpdateReplicationRequest) (*influxdb.Replication, error) { - if err := a.authUpdateReplication(ctx, id, request); err != nil { - return nil, err - } - return a.underlying.UpdateReplication(ctx, id, request) -} - -func (a authCheckingService) ValidateUpdatedReplication(ctx context.Context, id platform.ID, request influxdb.UpdateReplicationRequest) error { - if err := a.authUpdateReplication(ctx, id, request); err != nil { - return err - } - return a.underlying.ValidateUpdatedReplication(ctx, id, request) -} - -func (a authCheckingService) authUpdateReplication(ctx context.Context, id platform.ID, request influxdb.UpdateReplicationRequest) error { - r, err := a.underlying.GetReplication(ctx, id) - if err != nil { - return err - } - if _, _, err := authorizer.AuthorizeWrite(ctx, influxdb.ReplicationsResourceType, id, r.OrgID); err != nil { - return err - } - if request.RemoteID != nil { - if _, _, err := authorizer.AuthorizeRead(ctx, influxdb.RemotesResourceType, *request.RemoteID, r.OrgID); err != nil { - return err - } - } - return nil -} - -func (a authCheckingService) DeleteReplication(ctx context.Context, id platform.ID) error { - r, err := a.underlying.GetReplication(ctx, id) - if err != nil { - return err - } - if _, _, err := authorizer.AuthorizeWrite(ctx, influxdb.ReplicationsResourceType, id, r.OrgID); err != nil { - return err - } - return a.underlying.DeleteReplication(ctx, id) -} - -func (a authCheckingService) ValidateReplication(ctx context.Context, id platform.ID) error { - r, err := a.underlying.GetReplication(ctx, id) - if err != nil { - return err - } - if _, _, err := authorizer.AuthorizeRead(ctx, influxdb.ReplicationsResourceType, id, r.OrgID); err != nil { - return err - } - return a.underlying.ValidateReplication(ctx, id) -} diff --git a/replications/transport/middleware_kv.go b/replications/transport/middleware_kv.go deleted file mode 100644 index 8dd8d35f064..00000000000 --- a/replications/transport/middleware_kv.go +++ /dev/null @@ -1,109 +0,0 @@ -package transport - -import ( - "bytes" - "context" - "encoding/binary" - "fmt" - - "github.com/influxdata/influxdb/v2/kit/platform" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kv" -) - -var replicationsBucket = []byte("replicationsv2") - -func newTelemetryCollectingService(kv kv.Store, underlying ReplicationService) *telemetryService { - return &telemetryService{ - kv: kv, - underlying: underlying, - } -} - -type telemetryService struct { - kv kv.Store - underlying ReplicationService -} - -func (t telemetryService) ListReplications(ctx context.Context, filter influxdb.ReplicationListFilter) (*influxdb.Replications, error) { - return t.underlying.ListReplications(ctx, filter) -} - -func (t telemetryService) GetReplication(ctx context.Context, id platform.ID) (*influxdb.Replication, error) { - return t.underlying.GetReplication(ctx, id) -} - -func (t telemetryService) UpdateReplication(ctx context.Context, id platform.ID, request influxdb.UpdateReplicationRequest) (*influxdb.Replication, error) { - return t.underlying.UpdateReplication(ctx, id, request) -} - -func (t telemetryService) ValidateNewReplication(ctx context.Context, request influxdb.CreateReplicationRequest) error { - return t.underlying.ValidateNewReplication(ctx, request) -} - -func (t telemetryService) ValidateUpdatedReplication(ctx context.Context, id platform.ID, request influxdb.UpdateReplicationRequest) error { - return t.underlying.ValidateUpdatedReplication(ctx, id, request) -} - -func (t telemetryService) ValidateReplication(ctx context.Context, id platform.ID) error { - return t.underlying.ValidateReplication(ctx, id) -} - -func (t telemetryService) CreateReplication(ctx context.Context, request influxdb.CreateReplicationRequest) (*influxdb.Replication, error) { - conn, err := t.underlying.CreateReplication(ctx, request) - if err != nil { - return conn, err - } - err = t.storeReplicationMetrics(ctx, request.OrgID) - return conn, err -} - -func (t telemetryService) DeleteReplication(ctx context.Context, id platform.ID) error { - rc, err := t.underlying.GetReplication(ctx, id) - if err != nil { - return err - } - - err = t.underlying.DeleteReplication(ctx, id) - if err != nil { - return err - } - return t.storeReplicationMetrics(ctx, rc.OrgID) -} - -func (t telemetryService) storeReplicationMetrics(ctx context.Context, orgID platform.ID) error { - if err := t.kv.Update(ctx, func(tx kv.Tx) error { - encodedID, err := orgID.Encode() - if err != nil { - return platform.ErrInvalidID - } - bucket, err := tx.Bucket(replicationsBucket) - if err != nil { - return err - } - count, err := t.countReplications(ctx, orgID) - if err != nil { - return err - } - return bucket.Put(encodedID, count) - }); err != nil { - return fmt.Errorf("updating telemetry failed: %v", err) - } - return nil -} - -func (t telemetryService) countReplications(ctx context.Context, orgID platform.ID) ([]byte, error) { - req := influxdb.ReplicationListFilter{ - OrgID: orgID, - } - list, err := t.underlying.ListReplications(ctx, req) - if err != nil { - return nil, err - } - - b := make([]byte, 0, 8) - buf := bytes.NewBuffer(b) - err = binary.Write(buf, binary.BigEndian, int64(len(list.Replications))) - return buf.Bytes(), err -} diff --git a/replications/transport/middleware_kv_test.go b/replications/transport/middleware_kv_test.go deleted file mode 100644 index acdc4bd8064..00000000000 --- a/replications/transport/middleware_kv_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package transport - -import ( - "context" - "encoding/binary" - "testing" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/replications/mock" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -func getCount(kvStore kv.Store, orgID platform.ID) (uint64, error) { - var count uint64 - if err := kvStore.Update(context.Background(), func(tx kv.Tx) error { - encodedID, err := orgID.Encode() - if err != nil { - return err - } - bucket, err := tx.Bucket([]byte("replicationsv2")) - if err != nil { - return err - } - c, err := bucket.Get(encodedID) - if err != nil { - return err - } - - count = binary.BigEndian.Uint64(c) - return nil - }); err != nil { - return 0, err - } - - return count, nil -} - -func TestReplicationCreateKVUpdate(t *testing.T) { - kvStore := inmem.NewKVStore() - gmock := gomock.NewController(t) - defer gmock.Finish() - mockRemote := mock.NewMockReplicationService(gmock) - telemetry := newTelemetryCollectingService(kvStore, mockRemote) - - kvMigrator, err := migration.NewMigrator( - zap.L(), - kvStore, - all.Migrations[:]..., - ) - require.NoError(t, err) - require.NoError(t, kvMigrator.Up(context.Background())) - - ctx := context.Background() - req := influxdb.CreateReplicationRequest{ - OrgID: platform.ID(1), - Name: "test1", - RemoteBucketID: platform.ID(11), - LocalBucketID: platform.ID(22), - } - - replication := influxdb.Replication{ - OrgID: platform.ID(1), - } - replications := influxdb.Replications{ - Replications: []influxdb.Replication{replication}, - } - - mockRemote.EXPECT().CreateReplication(ctx, req).Return(&replication, nil).Times(1) - mockRemote.EXPECT().ListReplications(ctx, influxdb.ReplicationListFilter{OrgID: req.OrgID}).Return(&replications, nil).Times(1) - - repl, err := telemetry.CreateReplication(ctx, req) - require.NoError(t, err) - - count, err := getCount(kvStore, repl.OrgID) - require.NoError(t, err) - require.Equal(t, int64(1), int64(count)) -} - -func TestReplicationDeleteKVUpdate(t *testing.T) { - kvStore := inmem.NewKVStore() - gmock := gomock.NewController(t) - defer gmock.Finish() - mockRemote := mock.NewMockReplicationService(gmock) - telemetry := newTelemetryCollectingService(kvStore, mockRemote) - - ctx := context.Background() - - kvMigrator, err := migration.NewMigrator( - zap.L(), - kvStore, - all.Migrations[:]..., - ) - require.NoError(t, err) - require.NoError(t, kvMigrator.Up(ctx)) - - req := influxdb.CreateReplicationRequest{ - OrgID: platform.ID(1), - Name: "test1", - RemoteBucketID: platform.ID(11), - LocalBucketID: platform.ID(22), - } - req2 := req - req2.Name = "test2" - - replication1 := influxdb.Replication{ - ID: platform.ID(1), - OrgID: platform.ID(1), - } - replication2 := replication1 - replication2.ID = platform.ID(2) - - remoteConnectionsPreDelete := influxdb.Replications{ - Replications: []influxdb.Replication{replication1, replication2}, - } - - remoteConnectionsPostDelete := influxdb.Replications{ - Replications: []influxdb.Replication{replication1}, - } - - mockRemote.EXPECT().CreateReplication(ctx, req).Return(&replication1, nil).Times(1) - mockRemote.EXPECT().CreateReplication(ctx, req2).Return(&replication2, nil).Times(1) - mockRemote.EXPECT().ListReplications(ctx, influxdb.ReplicationListFilter{OrgID: req.OrgID}).Return(&remoteConnectionsPreDelete, nil).Times(2) - - mockRemote.EXPECT().DeleteReplication(ctx, replication1.ID).Return(nil).Times(1) - mockRemote.EXPECT().GetReplication(ctx, replication1.ID).Return(&replication1, nil).Times(1) - mockRemote.EXPECT().ListReplications(ctx, influxdb.ReplicationListFilter{OrgID: req.OrgID}).Return(&remoteConnectionsPostDelete, nil).Times(1) - - _, err = telemetry.CreateReplication(ctx, req) - require.NoError(t, err) - - repl, err := telemetry.CreateReplication(ctx, req2) - require.NoError(t, err) - - err = telemetry.DeleteReplication(ctx, replication1.ID) - require.NoError(t, err) - - count, err := getCount(kvStore, repl.OrgID) - require.NoError(t, err) - require.Equal(t, int64(1), int64(count)) -} diff --git a/replications/transport/middleware_logging.go b/replications/transport/middleware_logging.go deleted file mode 100644 index d924a5349f8..00000000000 --- a/replications/transport/middleware_logging.go +++ /dev/null @@ -1,120 +0,0 @@ -package transport - -import ( - "context" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "go.uber.org/zap" -) - -func newLoggingService(logger *zap.Logger, underlying ReplicationService) *loggingService { - return &loggingService{ - logger: logger, - underlying: underlying, - } -} - -type loggingService struct { - logger *zap.Logger - underlying ReplicationService -} - -var _ ReplicationService = (*loggingService)(nil) - -func (l loggingService) ListReplications(ctx context.Context, filter influxdb.ReplicationListFilter) (rs *influxdb.Replications, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to find replications", zap.Error(err), dur) - return - } - l.logger.Debug("replications find", dur) - }(time.Now()) - return l.underlying.ListReplications(ctx, filter) -} - -func (l loggingService) CreateReplication(ctx context.Context, request influxdb.CreateReplicationRequest) (r *influxdb.Replication, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to create replication", zap.Error(err), dur) - return - } - l.logger.Debug("replication create", dur) - }(time.Now()) - return l.underlying.CreateReplication(ctx, request) -} - -func (l loggingService) ValidateNewReplication(ctx context.Context, request influxdb.CreateReplicationRequest) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to validate replication create", zap.Error(err), dur) - return - } - l.logger.Debug("replication validate create", dur) - }(time.Now()) - return l.underlying.ValidateNewReplication(ctx, request) -} - -func (l loggingService) GetReplication(ctx context.Context, id platform.ID) (r *influxdb.Replication, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to find replication by ID", zap.Error(err), dur) - return - } - l.logger.Debug("replication find by ID", dur) - }(time.Now()) - return l.underlying.GetReplication(ctx, id) -} - -func (l loggingService) UpdateReplication(ctx context.Context, id platform.ID, request influxdb.UpdateReplicationRequest) (r *influxdb.Replication, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to update replication", zap.Error(err), dur) - return - } - l.logger.Debug("replication update", dur) - }(time.Now()) - return l.underlying.UpdateReplication(ctx, id, request) -} - -func (l loggingService) ValidateUpdatedReplication(ctx context.Context, id platform.ID, request influxdb.UpdateReplicationRequest) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to validate replication update", zap.Error(err), dur) - return - } - l.logger.Debug("replication validate update", dur) - }(time.Now()) - return l.underlying.ValidateUpdatedReplication(ctx, id, request) -} - -func (l loggingService) DeleteReplication(ctx context.Context, id platform.ID) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to delete replication", zap.Error(err), dur) - return - } - l.logger.Debug("replication delete", dur) - }(time.Now()) - return l.underlying.DeleteReplication(ctx, id) -} - -func (l loggingService) ValidateReplication(ctx context.Context, id platform.ID) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to validate replication", zap.Error(err), dur) - return - } - l.logger.Debug("replication validate", dur) - }(time.Now()) - return l.underlying.ValidateReplication(ctx, id) -} diff --git a/replications/transport/middleware_metrics.go b/replications/transport/middleware_metrics.go deleted file mode 100644 index 16cfe4fad02..00000000000 --- a/replications/transport/middleware_metrics.go +++ /dev/null @@ -1,69 +0,0 @@ -package transport - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/metric" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/prometheus/client_golang/prometheus" -) - -func newMetricCollectingService(reg prometheus.Registerer, underlying ReplicationService, opts ...metric.ClientOptFn) *metricsService { - o := metric.ApplyMetricOpts(opts...) - return &metricsService{ - rec: metric.New(reg, o.ApplySuffix("replication")), - underlying: underlying, - } -} - -type metricsService struct { - rec *metric.REDClient - underlying ReplicationService -} - -var _ ReplicationService = (*metricsService)(nil) - -func (m metricsService) ListReplications(ctx context.Context, filter influxdb.ReplicationListFilter) (*influxdb.Replications, error) { - rec := m.rec.Record("find_replications") - rcs, err := m.underlying.ListReplications(ctx, filter) - return rcs, rec(err) -} - -func (m metricsService) CreateReplication(ctx context.Context, request influxdb.CreateReplicationRequest) (*influxdb.Replication, error) { - rec := m.rec.Record("create_replication") - r, err := m.underlying.CreateReplication(ctx, request) - return r, rec(err) -} - -func (m metricsService) ValidateNewReplication(ctx context.Context, request influxdb.CreateReplicationRequest) error { - rec := m.rec.Record("validate_create_replication") - return rec(m.underlying.ValidateNewReplication(ctx, request)) -} - -func (m metricsService) GetReplication(ctx context.Context, id platform.ID) (*influxdb.Replication, error) { - rec := m.rec.Record("find_replication_by_id") - r, err := m.underlying.GetReplication(ctx, id) - return r, rec(err) -} - -func (m metricsService) UpdateReplication(ctx context.Context, id platform.ID, request influxdb.UpdateReplicationRequest) (*influxdb.Replication, error) { - rec := m.rec.Record("update_replication") - r, err := m.underlying.UpdateReplication(ctx, id, request) - return r, rec(err) -} - -func (m metricsService) ValidateUpdatedReplication(ctx context.Context, id platform.ID, request influxdb.UpdateReplicationRequest) error { - rec := m.rec.Record("validate_update_replication") - return rec(m.underlying.ValidateUpdatedReplication(ctx, id, request)) -} - -func (m metricsService) DeleteReplication(ctx context.Context, id platform.ID) error { - rec := m.rec.Record("delete_replication") - return rec(m.underlying.DeleteReplication(ctx, id)) -} - -func (m metricsService) ValidateReplication(ctx context.Context, id platform.ID) error { - rec := m.rec.Record("validate_replication") - return rec(m.underlying.ValidateReplication(ctx, id)) -} diff --git a/resource/noop/resource_logger.go b/resource/noop/resource_logger.go deleted file mode 100644 index 1bc0eef0916..00000000000 --- a/resource/noop/resource_logger.go +++ /dev/null @@ -1,9 +0,0 @@ -package noop - -import "github.com/influxdata/influxdb/v2/resource" - -type ResourceLogger struct{} - -func (ResourceLogger) Log(resource.Change) error { - return nil -} diff --git a/resource/resource.go b/resource/resource.go deleted file mode 100644 index 9a4fb378013..00000000000 --- a/resource/resource.go +++ /dev/null @@ -1,50 +0,0 @@ -// Package resource defines an interface for recording changes to InfluxDB resources. -// -// A resource is an entity in our system, e.g. an organization, task or bucket. -// A change includes the creation, update or deletion of a resource. -package resource - -import ( - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// Logger records changes to resources. -type Logger interface { - // Log a change to a resource. - Log(Change) error -} - -// Change to a resource. -type Change struct { - // Type of change. - Type ChangeType - // ResourceID of the changed resource. - ResourceID platform.ID - // ResourceType that was changed. - ResourceType influxdb.ResourceType - // OrganizationID of the organization owning the changed resource. - OrganizationID platform.ID - // UserID of the user changing the resource. - UserID platform.ID - // ResourceBody after the change. - ResourceBody []byte - // Time when the resource was changed. - Time time.Time -} - -// Type of change. -type ChangeType string - -const ( - // Create a resource. - Create ChangeType = "create" - // Put a resource. - Put = "put" - // Update a resource. - Update = "update" - // Delete a resource - Delete = "delete" -) diff --git a/scraper.go b/scraper.go deleted file mode 100644 index 71fcd7cd888..00000000000 --- a/scraper.go +++ /dev/null @@ -1,66 +0,0 @@ -package influxdb - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// ErrScraperTargetNotFound is the error msg for a missing scraper target. -const ErrScraperTargetNotFound = "scraper target not found" - -// ops for ScraperTarget Store -const ( - OpListTargets = "ListTargets" - OpAddTarget = "AddTarget" - OpGetTargetByID = "GetTargetByID" - OpRemoveTarget = "RemoveTarget" - OpUpdateTarget = "UpdateTarget" -) - -// ScraperTarget is a target to scrape -type ScraperTarget struct { - ID platform.ID `json:"id,omitempty"` - Name string `json:"name"` - Type ScraperType `json:"type"` - URL string `json:"url"` - OrgID platform.ID `json:"orgID,omitempty"` - BucketID platform.ID `json:"bucketID,omitempty"` - AllowInsecure bool `json:"allowInsecure,omitempty"` -} - -// ScraperTargetStoreService defines the crud service for ScraperTarget. -type ScraperTargetStoreService interface { - ListTargets(ctx context.Context, filter ScraperTargetFilter) ([]ScraperTarget, error) - AddTarget(ctx context.Context, t *ScraperTarget, userID platform.ID) error - GetTargetByID(ctx context.Context, id platform.ID) (*ScraperTarget, error) - RemoveTarget(ctx context.Context, id platform.ID) error - UpdateTarget(ctx context.Context, t *ScraperTarget, userID platform.ID) (*ScraperTarget, error) -} - -// ScraperTargetFilter represents a set of filter that restrict the returned results. -type ScraperTargetFilter struct { - IDs map[platform.ID]bool `json:"ids"` - Name *string `json:"name"` - OrgID *platform.ID `json:"orgID"` - Org *string `json:"org"` -} - -// ScraperType defines the scraper methods. -type ScraperType string - -// Scraper types -const ( - // PrometheusScraperType parses metrics from a prometheus endpoint. - PrometheusScraperType = "prometheus" -) - -// ValidScraperType returns true is the type string is valid -func ValidScraperType(s string) bool { - switch s { - case PrometheusScraperType: - return true - default: - return false - } -} diff --git a/scripts/ci/CHANGELOG_frozen.md b/scripts/ci/CHANGELOG_frozen.md deleted file mode 100644 index 56eec394d27..00000000000 --- a/scripts/ci/CHANGELOG_frozen.md +++ /dev/null @@ -1,1805 +0,0 @@ -## v2.5.1 [2022-11-02] ----------------------- - -### Other - -1. [5b6fdbf](https://github.com/influxdata/influxdb/commit/5b6fdbf): Revert "fix: set limited permissions on package installs - -## v2.5.0 [2022-11-01] ----------------------- - -### Bug Fixes - -1. [daaf866](https://github.com/influxdata/influxdb/commit/daaf866): Several minor quality issues -1. [ee8ca45](https://github.com/influxdata/influxdb/commit/ee8ca45): Use copy of loop variable in parallel test -1. [11019d2](https://github.com/influxdata/influxdb/commit/11019d2): Check that user IDs are not in use in user create -1. [b87deb4](https://github.com/influxdata/influxdb/commit/b87deb4): Don't allow creating an auth with instance resources -1. [b51fefd](https://github.com/influxdata/influxdb/commit/b51fefd): Set limited permissions on package installs -1. [663d43d](https://github.com/influxdata/influxdb/commit/663d43d): Allow backup of all buckets -1. [a0f1184](https://github.com/influxdata/influxdb/commit/a0f1184): Manually scheduled task runs now run when expected -1. [4ed184d](https://github.com/influxdata/influxdb/commit/4ed184d): Fixes an error querying virtual dbrps -1. [2ad8995](https://github.com/influxdata/influxdb/commit/2ad8995): Improve delete speed when a measurement is part of the predicate -1. [3ac7a10](https://github.com/influxdata/influxdb/commit/3ac7a10): Downgrading to 2.3 was broken -1. [55b7d29](https://github.com/influxdata/influxdb/commit/55b7d29): Sql scan error on remote bucket id when replication to 1.x -1. [81e2ec6](https://github.com/influxdata/influxdb/commit/81e2ec6): Enable gzipped responses with the legacy handler -1. [e61485a](https://github.com/influxdata/influxdb/commit/e61485a): Only the latest scraper being run -1. [9582826](https://github.com/influxdata/influxdb/commit/9582826): Handle a potential nil iterator leading to a panic -1. [6fc66ac](https://github.com/influxdata/influxdb/commit/6fc66ac): Do not require remoteOrgID in remote config/creation request - -### Features - -1. [485968c](https://github.com/influxdata/influxdb/commit/485968c): Unpin ui to point at latest -1. [b72848d](https://github.com/influxdata/influxdb/commit/b72848d): Optimize saving changes to fields.idx -1. [f36646d](https://github.com/influxdata/influxdb/commit/f36646d): Bump to latest UI - -### Other - -1. [accce86](https://github.com/influxdata/influxdb/commit/accce86): Chore: update CHANGELOG_frozen.md for 2.4 -1. [785a465](https://github.com/influxdata/influxdb/commit/785a465): Refactor: remove reference to flux.Spec in query tests -1. [728070e](https://github.com/influxdata/influxdb/commit/728070e): Chore: upgrade Rust to 1.63.0 -1. [aa9c49e](https://github.com/influxdata/influxdb/commit/aa9c49e): Build(flux): update flux to v0.180.1 -1. [8f15620](https://github.com/influxdata/influxdb/commit/8f15620): Build(flux): update flux to v0.181.0 -1. [1c6fbf9](https://github.com/influxdata/influxdb/commit/1c6fbf9): Chore: add protoc-gen script to releng (2.x) -1. [c433342](https://github.com/influxdata/influxdb/commit/c433342): Chore: remove duplicate word in comments -1. [91623dd](https://github.com/influxdata/influxdb/commit/91623dd): Docs(logger): fix incorrect doc string -1. [c40ad64](https://github.com/influxdata/influxdb/commit/c40ad64): Feat(security): set SameSite=strict on session cookie -1. [43c2e08](https://github.com/influxdata/influxdb/commit/43c2e08): Chore: upgrade to Go 1.18.6 -1. [635f8d8](https://github.com/influxdata/influxdb/commit/635f8d8): Build(flux): update flux to v0.184.2 -1. [eada36b](https://github.com/influxdata/influxdb/commit/eada36b): Test: remove group skips -1. [aa5c1c0](https://github.com/influxdata/influxdb/commit/aa5c1c0): Docs: cleanup CONTRIBUTING.md - clarify instructions and output. -1. [a321e72](https://github.com/influxdata/influxdb/commit/a321e72): Build(flux): update flux to v0.185.0 -1. [d8553c0](https://github.com/influxdata/influxdb/commit/d8553c0): Test(flux): use vanilla flagger for fluxtest -1. [34254ee](https://github.com/influxdata/influxdb/commit/34254ee): Build(flux): update flux to v0.186.0 -1. [a0c3703](https://github.com/influxdata/influxdb/commit/a0c3703): Build(flux): update flux to v0.187.0 -1. [89d9207](https://github.com/influxdata/influxdb/commit/89d9207): Chore: update to use scheduled pipeline (2.x) -1. [0389d51](https://github.com/influxdata/influxdb/commit/0389d51): Chore: upgrade to Go 1.18.7 -1. [fa393cc](https://github.com/influxdata/influxdb/commit/fa393cc): Chore(readme): add resource links and logo -1. [1033334](https://github.com/influxdata/influxdb/commit/1033334): Build(flux): update flux to v0.188.0 -1. [8c23f92](https://github.com/influxdata/influxdb/commit/8c23f92): Build(flux): update flux to v0.188.1 - -## v2.4.0 [2022-08-18] ----------------------- - -### Bug Fixes - -1. [21885a7](https://github.com/influxdata/influxdb/commit/21885a7): Log the log level at startup -1. [76cfddb](https://github.com/influxdata/influxdb/commit/76cfddb): Emit zipfile for windows -1. [69a95dc](https://github.com/influxdata/influxdb/commit/69a95dc): Update the condition when reseting cursor -1. [4789d54](https://github.com/influxdata/influxdb/commit/4789d54): Improve error messages opening index partitions -1. [00edb77](https://github.com/influxdata/influxdb/commit/00edb77): Create TSI MANIFEST files atomically -1. [f762346](https://github.com/influxdata/influxdb/commit/f762346): Add paths to tsi log and index file errors -1. [619eb1c](https://github.com/influxdata/influxdb/commit/619eb1c): Restore in-memory Manifest on write error -1. [f7b1905](https://github.com/influxdata/influxdb/commit/f7b1905): Do not delete replication on remote config delete -1. [afbbfac](https://github.com/influxdata/influxdb/commit/afbbfac): Fix virtual DBRP FindMany, make virtual bucket default if not overridden -1. [187f991](https://github.com/influxdata/influxdb/commit/187f991): Improve virtual DBRP default handling - -### Features - -1. [bf5e6eb](https://github.com/influxdata/influxdb/commit/bf5e6eb): Update Contributing.md to be more accurate for a clean checkout -1. [e7cf522](https://github.com/influxdata/influxdb/commit/e7cf522): Implement nightly docker builds without goreleaser -1. [67ccbae](https://github.com/influxdata/influxdb/commit/67ccbae): Add the concept of an instance owner -1. [adeac8b](https://github.com/influxdata/influxdb/commit/adeac8b): Add virtual DBRP mappings based on bucket name -1. [90d45e8](https://github.com/influxdata/influxdb/commit/90d45e8): Enable static-pie builds (2.x) -1. [6f50e70](https://github.com/influxdata/influxdb/commit/6f50e70): Replicate based on bucket name rather than id - -### Other - -1. [83bb8ed](https://github.com/influxdata/influxdb/commit/83bb8ed): Build: update frozen changelog -1. [85e4e63](https://github.com/influxdata/influxdb/commit/85e4e63): Build: fix release workflow -1. [cbbf4b2](https://github.com/influxdata/influxdb/commit/cbbf4b2): Build(flux): update flux to v0.172.0 -1. [3fcc085](https://github.com/influxdata/influxdb/commit/3fcc085): Chore: Fix link in the README -1. [07bab31](https://github.com/influxdata/influxdb/commit/07bab31): Build(flux): update flux to v0.173.0 -1. [4d33c70](https://github.com/influxdata/influxdb/commit/4d33c70): Build(flux): update flux to v0.174.0 -1. [4da4d03](https://github.com/influxdata/influxdb/commit/4da4d03): Build(flux): update flux to v0.174.1 -1. [4b2949a](https://github.com/influxdata/influxdb/commit/4b2949a): Build: upload release and nightly CHANGELOG.md -1. [33a7add](https://github.com/influxdata/influxdb/commit/33a7add): Test(label): Invalid closure capture -1. [91a83ba](https://github.com/influxdata/influxdb/commit/91a83ba): Chore: Update PULL_REQUEST_TEMPLATE.md -1. [85dc158](https://github.com/influxdata/influxdb/commit/85dc158): Chore: upgrade CircleCI Mac OSX image -1. [37562c7](https://github.com/influxdata/influxdb/commit/37562c7): Build: upgrade to Go 1.18.4 -1. [a9f751f](https://github.com/influxdata/influxdb/commit/a9f751f): Feat(query): add planner rule for converting aggregate window to a push down -1. [c58bbab](https://github.com/influxdata/influxdb/commit/c58bbab): Build(flux): update flux to v0.176.0 -1. [f0072ef](https://github.com/influxdata/influxdb/commit/f0072ef): Chore(pkger): fix typo in README.md -1. [7e7d1db](https://github.com/influxdata/influxdb/commit/7e7d1db): Build(flux): update flux to v0.177.0 -1. [cd4f93b](https://github.com/influxdata/influxdb/commit/cd4f93b): Build(flux): update flux to v0.177.1 -1. [78c969e](https://github.com/influxdata/influxdb/commit/78c969e): Build(flux): update flux to v0.178.0 -1. [c2c9d17](https://github.com/influxdata/influxdb/commit/c2c9d17): Build(flux): update flux to v0.179.0 -1. [48fb5ce](https://github.com/influxdata/influxdb/commit/48fb5ce): Chore: update fluxtest skip list -1. [de247ba](https://github.com/influxdata/influxdb/commit/de247ba): Chore: use 22.04 image instead of 21.10 for perf test - -## v2.3.0 [2022-06-16] ----------------------- - -### Bug Fixes - -1. [c535994](https://github.com/influxdata//commit/c535994): Remove controller 64bit misalignment -1. [30a9fd4](https://github.com/influxdata//commit/30a9fd4): MeasurementsCardinality should not be less than 0 -1. [9c33764](https://github.com/influxdata//commit/9c33764): Do not panic on cleaning up failed iterators -1. [8c9768c](https://github.com/influxdata//commit/8c9768c): Replace unprintable and invalid characters in errors -1. [a9df3f8](https://github.com/influxdata//commit/a9df3f8): Fully clean up partially opened TSI -1. [53580ea](https://github.com/influxdata//commit/53580ea): Remember shards that fail Open(), avoid repeated attempts -1. [9e55686](https://github.com/influxdata//commit/9e55686): Replications remote write failure can deadlock remote writer -1. [8bd4fc5](https://github.com/influxdata//commit/8bd4fc5): Lost TSI reference / close TagValueSeriesIDIterator in error case - -### Features - -1. [9e20f9f](https://github.com/influxdata//commit/9e20f9f): Add signifier to replication user agent -1. [a10adf6](https://github.com/influxdata//commit/a10adf6): Add fields to tasks bucket to match cloud -1. [d705841](https://github.com/influxdata//commit/d705841): Error when creating v1 auth with a nonexistent bucket id -1. [692b0d5](https://github.com/influxdata//commit/692b0d5): Add instance-id flag for identifying edge nodes -1. [090f681](https://github.com/influxdata//commit/090f681): Add remotes and replications to telemetry - -### Other - -1. [72c4c55](https://github.com/influxdata//commit/72c4c55): Build(flux): update flux to v0.162.0 -1. [bb84905](https://github.com/influxdata//commit/bb84905): Build(flux): update flux to v0.163.0 -1. [cf1f2e2](https://github.com/influxdata//commit/cf1f2e2): Build(flux): update flux to v0.164.0 -1. [82d1123](https://github.com/influxdata//commit/82d1123): Build: upgrade to Go 1.18.1 -1. [24f64a7](https://github.com/influxdata//commit/24f64a7): Fix(annotations): skip flaky annotation tests -1. [05840ce](https://github.com/influxdata//commit/05840ce): Fix(systemd): fix operator in host detection -1. [1805f4b](https://github.com/influxdata//commit/1805f4b): Build(flux): update flux to v0.164.1 -1. [438eadc](https://github.com/influxdata//commit/438eadc): Chore: use common semantic PR and commit message checks -1. [71a02c2](https://github.com/influxdata//commit/71a02c2): Chore: remove previous semantic pull request config -1. [d906507](https://github.com/influxdata//commit/d906507): Chore: upgrade flux to v0.167.0 -1. [6222ea2](https://github.com/influxdata//commit/6222ea2): Build(flux): update flux to v0.168.0 -1. [afd0b99](https://github.com/influxdata//commit/afd0b99): Build: update changelogger to ignore case when parsing verbs -1. [2e9e174](https://github.com/influxdata//commit/2e9e174): Fix(query/control): add all of the statistics from flux statistics instead of only metadata -1. [8f54774](https://github.com/influxdata//commit/8f54774): Build(flux): update flux to v0.169.0 -1. [ab8be80](https://github.com/influxdata//commit/ab8be80): Chore: update jsonparser to 1.1.1 and yaml.v3 to 3.0.1 -1. [910d5a2](https://github.com/influxdata//commit/910d5a2): Build: upgrade Go to 1.18.3 -1. [a8054f8](https://github.com/influxdata//commit/a8054f8): Test: add openapi spec validation to OSS grace tests -1. [841be3c](https://github.com/influxdata//commit/841be3c): Build: remove goreleaser for build workflow -1. [a492993](https://github.com/influxdata//commit/a492993): Build(flux): update flux to v0.171.0 - -## v2.2.0 [2022-03-29] ----------------------- - -### Bug Fixes - -1. [84776d7](https://github.com/influxdata//commit/84776d7): Manual task runs are scheduled asyncronously -1. [5e6b0d5](https://github.com/influxdata//commit/5e6b0d5): Extend snapshot copy to filesystems that cannot link -1. [88afa92](https://github.com/influxdata//commit/88afa92): Detect misquoted tag values and return an error -1. [2bace77](https://github.com/influxdata//commit/2bace77): Unhandled errors returned by Sketch.Merge -1. [fa9ba8e](https://github.com/influxdata//commit/fa9ba8e): Duplicated X-version and X-Build headers for /ping endpoint -1. [8aa3a8f](https://github.com/influxdata//commit/8aa3a8f): Add causal error when meta.db is missing -1. [5ce164f](https://github.com/influxdata//commit/5ce164f): Remove influx CLI output from CONTRIBUTING -1. [e4e1633](https://github.com/influxdata//commit/e4e1633): Replications remote writes do not block server shutdown -1. [e5cbd27](https://github.com/influxdata//commit/e5cbd27): Advance replications queue after successful remote writes -1. [4fd4bd0](https://github.com/influxdata//commit/4fd4bd0): Use copy when a rename spans volumes -1. [0c30afd](https://github.com/influxdata//commit/0c30afd): Updating a check does not require an owner id -1. [b8ccf5b](https://github.com/influxdata//commit/b8ccf5b): Correctly handle PartialWriteError -1. [e20b5e9](https://github.com/influxdata//commit/e20b5e9): Remove nats for scraper processing -1. [0bd28f6](https://github.com/influxdata//commit/0bd28f6): Update 422 dry-run response to conform to API spec -1. [e5ccbb8](https://github.com/influxdata//commit/e5ccbb8): Forbid reading OSS buckets for a token with only write permissions -1. [49ce57c](https://github.com/influxdata//commit/49ce57c): Remove telegraf endpoint pagination -1. [7c0ec4d](https://github.com/influxdata//commit/7c0ec4d): Replications replicates flux to() writes -1. [df01d93](https://github.com/influxdata//commit/df01d93): Allow flux http calls to be unlimited -1. [3ec5a57](https://github.com/influxdata//commit/3ec5a57): Tell browser about cookie expiry -1. [e304ef9](https://github.com/influxdata//commit/e304ef9): Add write permissions check for DELETE and DROP MEASUREMENT -1. [a2f8538](https://github.com/influxdata//commit/a2f8538): Pin UI to OSS-2.1.2 so tokens can be accessed - -### Features - -1. [504f0e4](https://github.com/influxdata//commit/504f0e4): Passing `type=basic` returns task metadata without query text -1. [58139c4](https://github.com/influxdata//commit/58139c4): Add auth to remotes & replications APIs -1. [8825cd5](https://github.com/influxdata//commit/8825cd5): Replication apis durable queue management -1. [cd0243d](https://github.com/influxdata//commit/cd0243d): Added replications queue management to launcher tasks -1. [6b56af3](https://github.com/influxdata//commit/6b56af3): Mirror writes to registered replications -1. [40d9587](https://github.com/influxdata//commit/40d9587): Add replications queue scanner -1. [ad52815](https://github.com/influxdata//commit/ad52815): Add field for dropping data resulting in non-retryable errors to individual replications -1. [fea3037](https://github.com/influxdata//commit/fea3037): Configure perf tests with yaml -1. [dece95d](https://github.com/influxdata//commit/dece95d): Tsm compaction metrics via prometheus -1. [3a81166](https://github.com/influxdata//commit/3a81166): Added metrics collection for replications -1. [0a74085](https://github.com/influxdata//commit/0a74085): Point write requests have metrics -1. [a74e051](https://github.com/influxdata//commit/a74e051): Disk size metrics per shard -1. [feb459c](https://github.com/influxdata//commit/feb459c): Metrics for cache subsystem -1. [edb21ab](https://github.com/influxdata//commit/edb21ab): Metrics for wal subsystem -1. [9873ccd](https://github.com/influxdata//commit/9873ccd): Remote write function for replications -1. [f05d013](https://github.com/influxdata//commit/f05d013): Metrics collection for replications remote writes -1. [3460f1c](https://github.com/influxdata//commit/3460f1c): Replication remote writes do not block local writes -1. [b970e35](https://github.com/influxdata//commit/b970e35): Remaining storage metrics from OSS engine -1. [28bcd41](https://github.com/influxdata//commit/28bcd41): Batch replications remote writes to avoid payload limit errors -1. [6096ee2](https://github.com/influxdata//commit/6096ee2): Replications metrics include failure to enqueue -1. [a7a5233](https://github.com/influxdata//commit/a7a5233): Advance queue scanner periodically instead of every remote write -1. [5a919b6](https://github.com/influxdata//commit/5a919b6): Enable remotes and replication streams feature -1. [c51a0df](https://github.com/influxdata//commit/c51a0df): Error out when config file contains 1.x config values -1. [afb167a](https://github.com/influxdata//commit/afb167a): `query-memory-bytes` zero-value is unlimited -1. [f78f9ed](https://github.com/influxdata//commit/f78f9ed): Api/v2/config endpoint displays runtime configuration -1. [4f74049](https://github.com/influxdata//commit/4f74049): Add downgrade target for 2.1 -1. [b02c89e](https://github.com/influxdata//commit/b02c89e): Option to log flux queries cancelled because of server shutdown -1. [4e08604](https://github.com/influxdata//commit/4e08604): Add MeasurementNames method to MeasurementFieldSet -1. [a40e12b](https://github.com/influxdata//commit/a40e12b): Allow changing a password with `influxd recovery user update` -1. [2c930fd](https://github.com/influxdata//commit/2c930fd): Add --hardening-enabled option to limit flux/pkger HTTP requests -1. [359fcc4](https://github.com/influxdata//commit/359fcc4): Add maximum age to replication queues - -### Other - -1. [05e6dc6](https://github.com/influxdata//commit/05e6dc6): Build(flux): update flux to v0.135.0 -1. [de7f052](https://github.com/influxdata//commit/de7f052): Chore: fix `dump_tsi` deadlock -1. [fba7fac](https://github.com/influxdata//commit/fba7fac): Build(flux): update flux to v0.136.0 -1. [b93f3a3](https://github.com/influxdata//commit/b93f3a3): Build(flux): update flux to v0.137.0 -1. [f4e9ae9](https://github.com/influxdata//commit/f4e9ae9): Build: upgrade protobuf library -1. [ca633cd](https://github.com/influxdata//commit/ca633cd): Chore: Remove outdated query readme -1. [a7f3b67](https://github.com/influxdata//commit/a7f3b67): Chore: clean up protobuf loose ends -1. [1aac92c](https://github.com/influxdata//commit/1aac92c): Refactor: remove replications.current_queue_size_bytes from sqlite -1. [f6568a7](https://github.com/influxdata//commit/f6568a7): Build(flux): update flux to v0.140.0 -1. [5a0051a](https://github.com/influxdata//commit/5a0051a): Build: Remove additional commit file -1. [0ecde93](https://github.com/influxdata//commit/0ecde93): Build: stop building & pushing images to quay.io -1. [11f6052](https://github.com/influxdata//commit/11f6052): Build: better versioning and verification for releases -1. [16e3b16](https://github.com/influxdata//commit/16e3b16): Chore: refactor performance test to generate queries and data together -1. [0572ae0](https://github.com/influxdata//commit/0572ae0): Build: replace cross_build job with parallelized build matrix -1. [e7a77f0](https://github.com/influxdata//commit/e7a77f0): Build: follow-up fixes after CI refactor -1. [a5f6431](https://github.com/influxdata//commit/a5f6431): Build: pull SHA-specific image before tagging it as latest -1. [9d8173c](https://github.com/influxdata//commit/9d8173c): Chore: delete dead pprof related code -1. [90baa80](https://github.com/influxdata//commit/90baa80): Chore: use tagged version of pkg-config -1. [b9b86a1](https://github.com/influxdata//commit/b9b86a1): Chore: remove remote validation code -1. [6ee4727](https://github.com/influxdata//commit/6ee4727): Refactor: use remote write func in NewDurableQueueManager -1. [5a1e375](https://github.com/influxdata//commit/5a1e375): Build: allow to build on FreeBSD -1. [f47d514](https://github.com/influxdata//commit/f47d514): Refactor: move replications store functionality to separate package -1. [4ee93a9](https://github.com/influxdata//commit/4ee93a9): Chore: fix up message when build fails due to OS -1. [9308b65](https://github.com/influxdata//commit/9308b65): Chore: remove unused member from TagSet -1. [799d349](https://github.com/influxdata//commit/799d349): Fix(tsi): sync index file before close -1. [9060150](https://github.com/influxdata//commit/9060150): Test: add e2e test for replication streams -1. [39eeb3e](https://github.com/influxdata//commit/39eeb3e): Fix(restore): fix race condition which could cause restore command to fail -1. [e3ff434](https://github.com/influxdata//commit/e3ff434): Test: fix flaky replications tests -1. [0e5b14f](https://github.com/influxdata//commit/0e5b14f): Chore: increase replications batch size limits -1. [f78c189](https://github.com/influxdata//commit/f78c189): Build(flux): update flux to v0.146.0 -1. [6023496](https://github.com/influxdata//commit/6023496): Refactor: replications local write optimization -1. [b0a0e73](https://github.com/influxdata//commit/b0a0e73): Chore: remove unused user http code -1. [a8ca413](https://github.com/influxdata//commit/a8ca413): Chore: add deprecation notice for print-config command -1. [c889d0f](https://github.com/influxdata//commit/c889d0f): Chore: remove outdated `/docs` folder -1. [11c0081](https://github.com/influxdata//commit/11c0081): Fix(templates): disable use of jsonnet with `/api/v2/templates/apply` -1. [c1d384d](https://github.com/influxdata//commit/c1d384d): Test: fix flaky enqueue test -1. [a812d8b](https://github.com/influxdata//commit/a812d8b): Build(flux): update flux to v0.148.0 -1. [888f82c](https://github.com/influxdata//commit/888f82c): Build(flux): update flux to v0.150.0 -1. [2a957c9](https://github.com/influxdata//commit/2a957c9): Chore(query/influxql): remove the influxql transpiler -1. [64615f4](https://github.com/influxdata//commit/64615f4): Chore: remove unused dockerfile -1. [f68758b](https://github.com/influxdata//commit/f68758b): Build(flux): update flux to v0.152.0 -1. [34c150f](https://github.com/influxdata//commit/34c150f): Build(flux): update flux to v0.154.0 -1. [c4717ef](https://github.com/influxdata//commit/c4717ef): Refactor(query): remove detailed query traces -1. [afb9733](https://github.com/influxdata//commit/afb9733): Build(flux): update flux to v0.156.0 -1. [adf29df](https://github.com/influxdata//commit/adf29df): Feat(kit/feature): allow influxdb to set flux feature flags -1. [0c5bedf](https://github.com/influxdata//commit/0c5bedf): Refactor: improve logging during influxd upgrade to give mapping metadata at info level -1. [355d32b](https://github.com/influxdata//commit/355d32b): Chore: update OSS ci pipeline to match the updates to monitor-ci -1. [36df687](https://github.com/influxdata//commit/36df687): Build(flux): update flux to v0.158.0 -1. [7d310c2](https://github.com/influxdata//commit/7d310c2): Build(flux): update flux to v0.159.0 -1. [253451b](https://github.com/influxdata//commit/253451b): Test: unskip flux group tests -1. [dedaa7f](https://github.com/influxdata//commit/dedaa7f): Chore: upgrade to go1.17.8 -1. [5e3ea7b](https://github.com/influxdata//commit/5e3ea7b): Refactor(flux): convert the allocator into an interface -1. [0504498](https://github.com/influxdata//commit/0504498): Fix(models): reset provided slice correctly -1. [5231d2d](https://github.com/influxdata//commit/5231d2d): Feat(query): enable the mqtt pool dialer by default -1. [89916ec](https://github.com/influxdata//commit/89916ec): Build(flux): update flux to v0.161.0 - - -## v2.1.1 [2021-11-08] ----------------------- - -### Other - -1. [688b8c9](https://github.com/influxdata//commit/688b8c9): Build: updated changelog automation -1. [657e183](https://github.com/influxdata//commit/657e183): Chore: don't look for non-existent changelog file in release job - -## v2.1.0 [2021-11-04] - -### `influx` CLI moved to separate repository - -The `influx` CLI has been moved to its [own GitHub repository](https://github.com/influxdata/influx-cli/). Release artifacts -produced by `influxdb` are impacted as follows: - -* Release archives (`.tar.gz` and `.zip`) no longer contain the `influx` binary. -* The `influxdb2` package (`.deb` and `.rpm`) no longer contains the `influx` binary. Instead, it declares a recommended - dependency on the new `influx-cli` package. -* The `quay.io/influxdb/influxdb` image no longer contains the `influx` binary. Users are recommended to migrate to the - `influxdb` image hosted in DockerHub. - -With this change, versions of the `influx` CLI and `influxd` server are not guaranteed to exactly match. Please use -`influxd version` or `curl /health` when checking the version of the installed/running server. - -### Notebooks and Annotations - -Support for Notebooks and Annotations is included with this release. - -### Features - -1. [21218](https://github.com/influxdata/influxdb/pull/21218): Add the properties of a static legend for line graphs and band plots -1. [21367](https://github.com/influxdata/influxdb/pull/21367): List users via the API now supports pagination -1. [21543](https://github.com/influxdata/influxdb/pull/21543): Added `influxd` configuration flag `--sqlite-path` for specifying a user-defined path to the SQLite database file -1. [21543](https://github.com/influxdata/influxdb/pull/21543): Updated `influxd` configuration flag `--store` to work with string values `disk` or `memory`. Memory continues to store metadata in-memory for testing; `disk` will persist metadata to disk via bolt and SQLite -1. [21547](https://github.com/influxdata/influxdb/pull/21547): Allow hiding the tooltip independently of the static legend -1. [21584](https://github.com/influxdata/influxdb/pull/21584): Added the `api/v2/backup/metadata` endpoint for backing up both KV and SQL metadata, and the `api/v2/restore/sql` for restoring SQL metadata -1. [21635](https://github.com/influxdata/influxdb/pull/21635): Port `influxd inspect verify-seriesfile` to 2.x -1. [21621](https://github.com/influxdata/influxdb/pull/21621): Add `storage-wal-max-concurrent-writes` config option to `influxd` to enable tuning memory pressure under heavy write load -1. [21621](https://github.com/influxdata/influxdb/pull/21621): Add `storage-wal-max-write-delay` config option to `influxd` to prevent deadlocks when the WAL is overloaded with concurrent writes -1. [21615](https://github.com/influxdata/influxdb/pull/21615): Ported the `influxd inspect verify-tsm` command from 1.x -1. [21646](https://github.com/influxdata/influxdb/pull/21646): Ported the `influxd inspect verify-tombstone` command from 1.x -1. [21761](https://github.com/influxdata/influxdb/pull/21761): Ported the `influxd inspect dump-tsm` command from 1.x -1. [21788](https://github.com/influxdata/influxdb/pull/21788): Ported the `influxd inspect report-tsi` command from 1.x -1. [21784](https://github.com/influxdata/influxdb/pull/21784): Ported the `influxd inspect dumptsi` command from 1.x -1. [21786](https://github.com/influxdata/influxdb/pull/21786): Ported the `influxd inspect deletetsm` command from 1.x -1. [21888](https://github.com/influxdata/influxdb/pull/21888): Ported the `influxd inspect dump-wal` command from 1.x -1. [21828](https://github.com/influxdata/influxdb/pull/21828): Added the command `influx inspect verify-wal` -1. [21814](https://github.com/influxdata/influxdb/pull/21814): Ported the `influxd inspect report-tsm` command from 1.x -1. [21936](https://github.com/influxdata/influxdb/pull/21936): Ported the `influxd inspect build-tsi` command from 1.x -1. [21938](https://github.com/influxdata/influxdb/pull/21938): Added route to delete individual secret -1. [21972](https://github.com/influxdata/influxdb/pull/21972): Added support for notebooks and annotations -1. [22311](https://github.com/influxdata/influxdb/pull/22311): Add `storage-no-validate-field-size` config to `influxd` to disable enforcement of max field size -1. [22322](https://github.com/influxdata/influxdb/pull/22322): Add support for `merge_hll`, `sum_hll`, and `count_hll` in InfluxQL -1. [22476](https://github.com/influxdata/influxdb/pull/22476): Allow new telegraf input plugins and update toml -1. [22607](https://github.com/influxdata/influxdb/pull/22607): Update push down window logic for location option -1. [22617](https://github.com/influxdata/influxdb/pull/22617): Add `--storage-write-timeout` flag to set write request timeouts -1. [22396](https://github.com/influxdata/influxdb/pull/22396): Show measurement database and retention policy wildcards -1. [22590](https://github.com/influxdata/influxdb/pull/22590): New recovery subcommand allows creating recovery user/token -1. [22629](https://github.com/influxdata/influxdb/pull/22629): Return new operator token during backup overwrite -1. [22635](https://github.com/influxdata/influxdb/pull/22635): update window planner rules for location changes to support fixed offsets -1. [22634](https://github.com/influxdata/influxdb/pull/22634): enable writing to remote hosts via `to()` and `experimental.to()` -1. [22498](https://github.com/influxdata/influxdb/pull/22498): Add Bearer token auth -1. [22669](https://github.com/influxdata/influxdb/pull/22669): Enable new dashboard autorefresh -1. [22674](https://github.com/influxdata/influxdb/pull/22674): list-bucket API supports pagination when filtering by org -1. [22810](https://github.com/influxdata/influxdb/pull/22810): Recommend `influxd downgrade` when encountering an unknown migration during startup -1. [22816](https://github.com/influxdata/influxdb/pull/22816): Update flux to `v0.139.0` -1. [22818](https://github.com/influxdata/influxdb/pull/22818): Add `influxd downgrade` command for downgrading metadata stores be compatible with previous versions of InfluxDB -1. [22819](https://github.com/influxdata/influxdb/pull/22819): Update UI to `OSS-2.1.2` - -### Bug Fixes - -1. [21648](https://github.com/influxdata/influxdb/pull/21648): Change static legend's `hide` to `show` to let users decide if they want it -1. [22448](https://github.com/influxdata/influxdb/pull/22448): Log API errors to server logs and tell clients to check the server logs for the error message -1. [22545](https://github.com/influxdata/influxdb/pull/22545): Sync series segment to disk after writing -1. [22604](https://github.com/influxdata/influxdb/pull/22604): Do not allow shard creation to create overlapping shards -1. [22650](https://github.com/influxdata/influxdb/pull/22650): Don't drop shard-group durations when upgrading DBs - -## v2.0.9 [2021-09-27] - -### Features - -1. [22346](https://github.com/influxdata/influxdb/pull/22346): Add `--flux-log-enabled` flag for detailed flux logs -1. [22370](https://github.com/influxdata/influxdb/pull/22370): Add additional log to flux e2e tests (#22366) -1. [22464](https://github.com/influxdata/influxdb/pull/22464): Multi-measurement query optimization -1. [22466](https://github.com/influxdata/influxdb/pull/22466): Support for flux cardinality query -1. [22519](https://github.com/influxdata/influxdb/pull/22519): Optimize series iteration -1. [22531](https://github.com/influxdata/influxdb/pull/22531): Update ui to v2.0.9 -1. [22530](https://github.com/influxdata/influxdb/pull/22530): Update flux to v0.131.0 -1. [22547](https://github.com/influxdata/influxdb/pull/22547): Set x-influxdb-version and x-influxdb-build headers -1. [22579](https://github.com/influxdata/influxdb/pull/22579): Add route to return platform known resources (#22135) - -### Bug Fixes - -1. [22242](https://github.com/influxdata/influxdb/pull/22242): Switch flux formatter to one that preserves comments -1. [22236](https://github.com/influxdata/influxdb/pull/22236): Influxdb2 packages should depend on curl -1. [22243](https://github.com/influxdata/influxdb/pull/22243): Inactive task runs when updated -1. [22245](https://github.com/influxdata/influxdb/pull/22245): Avoid compaction queue stats flutter -1. [22278](https://github.com/influxdata/influxdb/pull/22278): Auth requests use org and user names if present -1. [22325](https://github.com/influxdata/influxdb/pull/22325): Change build type to 'oss', use correct version -1. [22355](https://github.com/influxdata/influxdb/pull/22355): Repair bad port dropping return value names (#22307) -1. [22397](https://github.com/influxdata/influxdb/pull/22397): Discard excessive errors (#22379) -1. [22504](https://github.com/influxdata/influxdb/pull/22504): Upgrade influxql to latest version & fix predicate handling for show tag values metaqueries -1. [22517](https://github.com/influxdata/influxdb/pull/22517): Use consistent path separator in permission string representation -1. [22523](https://github.com/influxdata/influxdb/pull/22523): Upgrade golang.org/x/sys to avoid panics on macs -1. [22520](https://github.com/influxdata/influxdb/pull/22520): Make tsi index compact old and too-large log files -1. [22525](https://github.com/influxdata/influxdb/pull/22525): Hard limit on field size while parsing line protocol -1. [22548](https://github.com/influxdata/influxdb/pull/22548): Suggest setting flux content-type when query fails to parse as json -1. [22563](https://github.com/influxdata/influxdb/pull/22563): For windows, copy snapshot files being backed up (#22551) (#22562) -1. [22578](https://github.com/influxdata/influxdb/pull/22578): Allow empty request bodies to write api - -## v2.0.8 [2021-08-13] - -### WARNING: Upcoming changes to CLI packaging - -Beginning with the next minor version, the `influx` CLI will no longer be packaged in releases from `influxdb`. -Future versions of the CLI will instead be released from the [`influx-cli`](https://github.com/influxdata/influx-cli) -repository. - -Users who wish to adopt the new CLI can download its latest release from [GitHub](https://github.com/influxdata/influx-cli/releases/latest) -or from the InfluxData [Downloads Portal](https://portal.influxdata.com/downloads/). - -### Go Version - -This release upgrades the project to `go` version 1.16. - -#### Minimum macOS Version - -Because of the version bump to `go`, the macOS build for this release requires at least version 10.12 Sierra to run. - -### Features - -1. [21922](https://github.com/influxdata/influxdb/pull/21922): Add `--ui-disabled` option to `influxd` to allow for running with the UI disabled. -1. [21969](https://github.com/influxdata/influxdb/pull/21969): Telemetry improvements: Do not record telemetry data for non-existant paths; replace invalid static asset paths with a slug. -1. [22098](https://github.com/influxdata/influxdb/pull/22098): Upgrade Flux to v0.124.0. -1. [22101](https://github.com/influxdata/influxdb/pull/22101): Upgrade UI to [v2.0.8](https://github.com/influxdata/ui/releases/tag/OSS-v2.0.8). -1. [22101](https://github.com/influxdata/influxdb/pull/22101): Upgrade `flux-lsp-browser` to v0.5.53. - -### Bug Fixes - -1. [21748](https://github.com/influxdata/influxdb/pull/21748): Rename arm rpms with yum-compatible names. -1. [21851](https://github.com/influxdata/influxdb/pull/21851): Upgrade to latest version of `influxdata/cron` so that tasks can be created with interval of `every: 1w`. -1. [21859](https://github.com/influxdata/influxdb/pull/21859): Avoid rewriting `fields.idx` unnecessarily. -1. [21860](https://github.com/influxdata/influxdb/pull/21860): Do not close connection twice in DigestWithOptions. -1. [21866](https://github.com/influxdata/influxdb/pull/21866): Remove incorrect optimization for group-by. -1. [21867](https://github.com/influxdata/influxdb/pull/21867): Return an error instead of panicking when InfluxQL statement rewrites fail. -1. [21868](https://github.com/influxdata/influxdb/pull/21868): Migrate restored KV snapshots to latest schema before using them. -1. [21869](https://github.com/influxdata/influxdb/pull/21869): Specify which fields are missing when rejecting an incomplete onboarding request. -1. [21864](https://github.com/influxdata/influxdb/pull/21864): Systemd unit should block on startup until http endpoint is ready -1. [21839](https://github.com/influxdata/influxdb/pull/21839): Fix display and parsing of `influxd upgrade` CLI prompts in PowerShell. -1. [21898](https://github.com/influxdata/influxdb/pull/21898): Removed unused `chronograf-migator` package & chronograf API service, and updated various "chronograf" references. -1. [21919](https://github.com/influxdata/influxdb/pull/21919): Fix display and parsing of interactive `influx` CLI prompts in PowerShell. -1. [21941](https://github.com/influxdata/influxdb/pull/21941): Upgrade to golang-jwt 3.2.1. -1. [21951](https://github.com/influxdata/influxdb/pull/21951): Prevent silently dropped writes when there are overlapping shards. -1. [21955](https://github.com/influxdata/influxdb/pull/21955): Invalid requests to `/api/v2` subroutes now return 404 instead of a list of links. -1. [21977](https://github.com/influxdata/influxdb/pull/21977): Flux metaqueries for `_field` take fast path if `_measurement` is the only predicate. -1. [22060](https://github.com/influxdata/influxdb/pull/22060): Copy names from mmapped memory before closing iterator -1. [22052](https://github.com/influxdata/influxdb/pull/22052): systemd service -- handle 40x and block indefinitely - -## v2.0.7 [2021-06-04] - -### Features - -1. [21519](https://github.com/influxdata/influxdb/pull/21519): Upgrade Flux to v0.117.0 -1. [21519](https://github.com/influxdata/influxdb/pull/21519): Optimize `table.fill()` execution within Flux aggregate windows. -1. [21564](https://github.com/influxdata/influxdb/pull/21564): Upgrade UI to [v2.0.7](https://github.com/influxdata/ui/releases/tag/OSS-v2.0.7) - -### Bug Fixes - -1. [21349](https://github.com/influxdata/influxdb/pull/21349): Fix off-by-one error in query range calculation over partially compacted data. -1. [21350](https://github.com/influxdata/influxdb/pull/21350): Deprecate the unsupported `PostSetupUser` API. -1. [21376](https://github.com/influxdata/influxdb/pull/21376): Add limits to the `/api/v2/delete` endpoint for start and stop times with error messages. -1. [21379](https://github.com/influxdata/influxdb/pull/21379): Add logging to NATS streaming server to help debug startup failures. -1. [21479](https://github.com/influxdata/influxdb/pull/21477): Accept `--input` instead of a positional arg in `influx restore`. -1. [21479](https://github.com/influxdata/influxdb/pull/21477): Print error instead of panicking when `influx restore` fails to find backup manifests. -1. [21485](https://github.com/influxdata/influxdb/pull/21485): Set last-modified time of empty shard directory to the directory's mod time instead of Unix epoch. -1. [21499](https://github.com/influxdata/influxdb/pull/21499): Remove erroneous dependency on istio. -1. [21501](https://github.com/influxdata/influxdb/pull/21501): Don't deadlock in `influx org members list` when an org has > 10 members. -1. [21524](https://github.com/influxdata/influxdb/pull/21524): Replace telemetry file name with slug for `ttf`, `woff`, and `eot` files. -1. [21549](https://github.com/influxdata/influxdb/pull/21549): Enable use of absolute path for `--upgrade-log` when running `influxd upgrade` on Windows. -1. [21548](https://github.com/influxdata/influxdb/pull/21548): Make InfluxQL meta queries respect query timeouts. - -## v2.0.6 [2021-04-29] - -### Bug Fixes - -1. [21325](https://github.com/influxdata/influxdb/pull/21325): Ensure query config written by influxd upgrade is valid. -1. [21325](https://github.com/influxdata/influxdb/pull/21325): Revert to nonzero defaults for `query-concurrency` and `query-queue-size` to avoid validation failures for upgrading users. -1. [21325](https://github.com/influxdata/influxdb/pull/21325): Don't fail validation when `query-concurrency` is 0 and `query-queue-size` is > 0. -1. [21361](https://github.com/influxdata/influxdb/pull/21361): Disable MergeFiltersRule until it is more stable. - -## v2.0.5 [2021-04-27] - -### Windows Support - -This release includes our initial Windows preview build. - -### Breaking Changes - -#### /debug/vars removed - -Prior to this release, the `influxd` server would always expose profiling information over `/debug/vars`. -This endpoint was unauthenticated, and not used by InfluxDB systems to report diagnostics. For security and clarity, -the endpoint has been removed. Use the `/metrics` endpoint to collect system statistics. - -#### `influx transpile` removed - -The `transpile` command has been retired. Users can send InfluxQL directly to the server via the `/api/v2/query` -or `/query` HTTP endpoints. - -#### Default query concurrency changed - -The default setting for the max number of concurrent Flux queries has been changed from 10 to unlimited. Set the -`query-concurrency` config parameter to > 0 when running `influxd` to re-limit the maximum running query count, -and the `query-queue-size` config parameter to > 0 to set the max number of queries that can be queued before the -server starts rejecting requests. - -#### Prefix for query-controller metrics changed - -The prefix used for Prometheus metrics from the query controller has changed from `query_control_` to `qc_`. - -### Features - -1. [20860](https://github.com/influxdata/influxdb/pull/20860): Add `--pprof-disabled` option to `influxd` to disable exposing profiling information over HTTP. -1. [20860](https://github.com/influxdata/influxdb/pull/20860): Add `/debug/pprof/all` HTTP endpoint to gather all profiles at once. -1. [20860](https://github.com/influxdata/influxdb/pull/20860): Upgrade `http.pprof-enabled` config in `influxd upgrade`. -1. [20846](https://github.com/influxdata/influxdb/pull/20846): Add `--compression` option to `influx write` to support GZIP inputs. -1. [20845](https://github.com/influxdata/influxdb/pull/20845): Add `influx task retry-failed` command to rerun failed runs. -1. [20965](https://github.com/influxdata/influxdb/pull/20965): Add `--metrics-disabled` option to `influxd` to disable exposing Prometheus metrics over HTTP. -1. [20962](https://github.com/influxdata/influxdb/pull/20962): Rewrite regex conditions in InfluxQL subqueries for performance. Thanks @yujiahaol68! -1. [20988](https://github.com/influxdata/influxdb/pull/20988): Add `--http-read-header-timeout`, `--http-read-timeout`, `--http-write-timeout`, and `--http-idle-timeout` options to `influxd`. -1. [20988](https://github.com/influxdata/influxdb/pull/20988): Set a default `--http-read-header-timeout` of 10s in `influxd`. -1. [20988](https://github.com/influxdata/influxdb/pull/20988): Set a default `--http-idle-timeout` of 3m in `influxd`. -1. [20949](https://github.com/influxdata/influxdb/pull/20949): Add support for explicitly setting shard-group durations on buckets. Thanks @hinst! -1. [20838](https://github.com/influxdata/influxdb/pull/20838): Add Swift client library to the data loading section of the UI -1. [21032](https://github.com/influxdata/influxdb/pull/21032): Display task IDs in the UI. -1. [21030](https://github.com/influxdata/influxdb/pull/21030): Update Telegraf plugins in UI to include additions and changes in 1.18 release. -1. [21049](https://github.com/influxdata/influxdb/pull/21049): Write to standard out when `--output-path -` is passed to `influxd inspect export-lp`. -1. [21050](https://github.com/influxdata/influxdb/pull/21050): Add `-p, --profilers` flag to `influx query` command. -1. [21126](https://github.com/influxdata/influxdb/pull/21126): Update UI to match InfluxDB Cloud. -1. [21144](https://github.com/influxdata/influxdb/pull/21144): Allow for disabling concurrency-limits in Flux controller. -1. [21166](https://github.com/influxdata/influxdb/pull/21166): Replace unique resource IDs (UI assets, backup shards) with slugs to reduce cardinality of telemetry data. -1. [21181](https://github.com/influxdata/influxdb/pull/21181): Enabled several UI features: Band & mosaic plot types, axis tick mark configuration, CSV file uploader, editable telegraf configurations, legend orientation options, and dashboard single cell refresh. -1. [21241](https://github.com/influxdata/influxdb/pull/21241): HTTP server errors output logs following the standard format. -1. [21227](https://github.com/influxdata/influxdb/pull/21268): Upgrade Flux to v0.113.0. - -### Bug Fixes - -1. [20886](https://github.com/influxdata/influxdb/pull/20886): Prevent "do not have an execution context" error when parsing Flux options in tasks. -1. [20872](https://github.com/influxdata/influxdb/pull/20872): Respect 24 hour clock formats in the UI and allow more choices -1. [20860](https://github.com/influxdata/influxdb/pull/20860): Remove unauthenticated, unsupported `/debug/vars` HTTP endpoint. -1. [20839](https://github.com/influxdata/influxdb/pull/20839): Fix TSM WAL segment size check. Thanks @foobar! -1. [20841](https://github.com/influxdata/influxdb/pull/20841): Update references to docs site to use current URLs. -1. [20837](https://github.com/influxdata/influxdb/pull/20837): Fix use-after-free bug in series ID iterator. Thanks @foobar! -1. [20834](https://github.com/influxdata/influxdb/pull/20834): Fix InfluxDB port in Flux function UI examples. Thanks @sunjincheng121! -1. [20833](https://github.com/influxdata/influxdb/pull/20833): Fix Single Stat graphs with thresholds crashing on negative values. -1. [20843](https://github.com/influxdata/influxdb/pull/20843): Fix data race in TSM cache. Thanks @StoneYunZhao! -1. [20967](https://github.com/influxdata/influxdb/pull/20967): Log error details when `influxd upgrade` fails to migrate databases. -1. [20966](https://github.com/influxdata/influxdb/pull/20966): Prevent time field names from being formatted in the Table visualization. -1. [20918](https://github.com/influxdata/influxdb/pull/20918): Deprecate misleading `retentionPeriodHrs` key in onboarding API. -1. [20851](https://github.com/influxdata/influxdb/pull/20851): Fix TSM WAL segment size computing. Thanks @StoneYunZhao! -1. [20844](https://github.com/influxdata/influxdb/pull/20844): Repair swagger to match implementation of DBRPs type. -1. [20987](https://github.com/influxdata/influxdb/pull/20987): Fix the cipher suite used when TLS strict ciphers are enabled in `influxd`. -1. [21031](https://github.com/influxdata/influxdb/pull/21031): Fix parse error in UI for tag filters containing regex meta characters. -1. [20836](https://github.com/influxdata/influxdb/pull/20836): Fix data race in TSM engine when inspecting tombstone stats. -1. [21048](https://github.com/influxdata/influxdb/pull/21048): Prevent concurrent access panic when gathering bolt metrics. -1. [21144](https://github.com/influxdata/influxdb/pull/21144): Fix race condition in Flux controller shutdown. -1. [21151](https://github.com/influxdata/influxdb/pull/21151): Use descending cursor when needed in window aggregate Flux queries. -1. [21230](https://github.com/influxdata/influxdb/pull/21230): Reduce lock contention when adding new fields and measurements. -1. [21232](https://github.com/influxdata/influxdb/pull/21232): Escape dots in community templates hostname regex. - -## v2.0.4 [2021-02-08] - -### Docker - -#### ARM64 -This release extends the Docker builds hosted in `quay.io` to support the `linux/arm64` platform. - -#### 2.x nightly images -Prior to this release, competing nightly builds caused the `nightly` Docker tag to contain outdated -binaries. This conflict has been fixed, and the image tagged with `nightly` will now contain `2.x` -binaries built from the `HEAD` of the `master` branch. - -### Breaking Changes - -#### inmem index option removed -This release fully removes the `inmem` indexing option, along with the associated config options: -* `max-series-per-database` -* `max-values-per-tag` - -Replacement `tsi1` indexes will be automatically generated on startup for shards that need it. - -#### Artifact naming conventions - -The names of artifacts produced by our nightly & release builds have been updated according to the -[Google developer guidelines](https://developers.google.com/style/filenames). Underscores (`_`) have -been replaced by hyphens (`-`) in nearly all cases; the one exception is the use of `x86_64` in our -RPM packages, which has been left unchanged. - -### Features - -1. [20537](https://github.com/influxdata/influxdb/pull/20537): Add `--overwrite-existing-v2` flag to `influxd upgrade` to overwrite existing files at output paths (instead of aborting). -1. [20616](https://github.com/influxdata/influxdb/pull/20616): Update telegraf plugins list in UI to include Beat, Intel PowerStats, and Rienmann. -1. [20550](https://github.com/influxdata/influxdb/pull/20550): Add `influxd print-config` command to support automated config inspection. -1. [20591](https://github.com/influxdata/influxdb/pull/20591): Add `nats-port` config option for `influxd` server. -1. [20591](https://github.com/influxdata/influxdb/pull/20591): Add `nats-max-payload-bytes` config option for `influxd` server. -1. [20608](https://github.com/influxdata/influxdb/pull/20608): Add `influxd inspect export-lp` command to extract data in line-protocol format. -1. [20650](https://github.com/influxdata/influxdb/pull/20650): Promote schema and fill query optimizations to default behavior. -1. [20688](https://github.com/influxdata/influxdb/pull/20688): Upgrade Flux to v0.104.0. -1. [20688](https://github.com/influxdata/influxdb/pull/20688): UI: Upgrade flux-lsp-browser to v0.5.31. - -### Bug Fixes - -1. [20351](https://github.com/influxdata/influxdb/pull/20351): Ensure `influxdb` service sees default env variables when running under `init.d`. -1. [20350](https://github.com/influxdata/influxdb/pull/20350): Don't show the upgrade notice on fresh `influxdb2` installs. -1. [20350](https://github.com/influxdata/influxdb/pull/20350): Ensure `config.toml` is initialized on fresh `influxdb2` installs. -1. [20347](https://github.com/influxdata/influxdb/pull/20347): Include upgrade helper script in goreleaser manifest. -1. [20376](https://github.com/influxdata/influxdb/pull/20376): Don't overwrite stack name/description on `influx stack update`. -1. [20375](https://github.com/influxdata/influxdb/pull/20375): Fix timeout setup for `influxd` graceful shutdown. -1. [20354](https://github.com/influxdata/influxdb/pull/20354): Don't ignore failures to set password during initial user onboarding. -1. [20402](https://github.com/influxdata/influxdb/pull/20402): Remove duplication from task error messages. -1. [20403](https://github.com/influxdata/influxdb/pull/20403): Improve error message shown when `influx` CLI can't find an org by name. -1. [20411](https://github.com/influxdata/influxdb/pull/20411): Fix logging initialization for storage engine. -1. [20456](https://github.com/influxdata/influxdb/pull/20456): Automatically build `tsi1` indexes for shards that need it instead of falling back to `inmem`. -1. [20455](https://github.com/influxdata/influxdb/pull/20455): Don't return 500 codes for partial write failures. -1. [20471](https://github.com/influxdata/influxdb/pull/20471): Improve messages in DBRP API validation errors. -1. [20472](https://github.com/influxdata/influxdb/pull/20472): Add confirmation step w/ file sizes before copying data files in `influxd upgrade`. -1. [20538](https://github.com/influxdata/influxdb/pull/20538): Don't leak .tmp files while backing up shards. -1. [20538](https://github.com/influxdata/influxdb/pull/20538): Allow backups to complete while a snapshot is in progress. -1. [20536](https://github.com/influxdata/influxdb/pull/20536): Fix silent failure to register CLI args as required. -1. [20534](https://github.com/influxdata/influxdb/pull/20534): Fix loading config when INFLUXD_CONFIG_PATH points to a `.yml` file. -1. [20535](https://github.com/influxdata/influxdb/pull/20535): Improve error message when opening BoltDB with unsupported file system options. -1. [20542](https://github.com/influxdata/influxdb/pull/20542): Prevent extra output row from GROUP BY crossing DST boundary. -1. [20615](https://github.com/influxdata/influxdb/pull/20615): Update Flux functions list in UI to reflect that `v1` package was renamed to `schema`. -1. [20558](https://github.com/influxdata/influxdb/pull/20558): Prevent panic in `influxd upgrade` when V1 users exist and no V1 config is given. -1. [20592](https://github.com/influxdata/influxdb/pull/20592): Set correct Content-Type on v1 query responses. -1. [20592](https://github.com/influxdata/influxdb/pull/20592): Update V1 API spec to document all valid Accept headers and matching Content-Types. -1. [20611](https://github.com/influxdata/influxdb/pull/20611): Respect the --skip-verify flag when running `influx query`. -1. [20671](https://github.com/influxdata/influxdb/pull/20671): Remove blank lines from payloads sent by `influx write`. -1. [20688](https://github.com/influxdata/influxdb/pull/20688): Fix infinite loop in Flux parser caused by invalid array expressions. -1. [20672](https://github.com/influxdata/influxdb/pull/20672): Allow for creating users without initial passwords in `influx user create`. -1. [20689](https://github.com/influxdata/influxdb/pull/20689): Fix incorrect "bucket not found" errors when passing `--bucket-id` to `influx write`. -1. [20710](https://github.com/influxdata/influxdb/pull/20710): Fix loading config when `INFLUXD_CONFIG_PATH` points to a directory with `.` in its name. -1. [20708](https://github.com/influxdata/influxdb/pull/20708): Update API spec to document Flux dictionary features. - -## v2.0.3 [2020-12-14] - -### ARM Support - -This release includes our initial ARM64 "preview" build. - -### Breaking Changes - -#### influxd upgrade -Previously, `influxd upgrade` would attempt to write upgraded `config.toml` files into the same directory as the source -`influxdb.conf` file. If this failed, a warning would be logged and `config.toml` would be written into the `HOME` directory. - -This release breaks this behavior in two ways: -1. By default, `config.toml` is now written into the same directory as the Bolt DB and engine files (`~/.influxdbv2/`) -2. If writing upgraded config fails, the `upgrade` process exits with an error instead of falling back to the `HOME` directory - -Users can use the new `--v2-config-path` option to override the output path for upgraded config if they can't or don't -want to use the default. - -#### v2 packaging -Based on community feedback, the v2 deb and rpm packaging has been improved to avoid confusion between versions. The package -name is now influxdb2 and conflicts with any previous influxdb package (including initial 2.0.0, 2.0.1, and 2.0.2 packages). -Additionally, v2 specific path defaults are now defined and helper scripts are provided for `influxd upgrade` and cleanup cases. - -### Features - -1. [20128](https://github.com/influxdata/influxdb/pull/20128): Allow password to be specified as a CLI option in `influx v1 auth create`. -1. [20128](https://github.com/influxdata/influxdb/pull/20128): Allow password to be specified as a CLI option in `influx v1 auth set-password`. -1. [20146](https://github.com/influxdata/influxdb/pull/20146): Allow for users to specify where V2 config should be written in `influxd upgrade`. -1. [20243](https://github.com/influxdata/influxdb/pull/20243): Implement delete with predicate. -1. [20204](https://github.com/influxdata/influxdb/pull/20204): Improve ID-related error messages for `influx v1 dbrp` commands. -1. [20328](https://github.com/influxdata/influxdb/pull/20328): Upgrade Flux to v0.99.0 -1. [20328](https://github.com/influxdata/influxdb/pull/20328): UI: Upgrade flux-lsp-browser to v0.5.26 - -### Bug Fixes - -1. [20146](https://github.com/influxdata/influxdb/pull/20146): Use V2 directory for default V2 config path in `influxd upgrade`. -1. [20153](https://github.com/influxdata/influxdb/pull/20153): Don't log bodies of V1 write requests. -1. [20154](https://github.com/influxdata/influxdb/pull/20154): Fix panic when writing a point with 100 tags. Thanks @foobar! -1. [20160](https://github.com/influxdata/influxdb/pull/20160): Ensure KV index walks only select exactly-matched keys. -1. [20166](https://github.com/influxdata/influxdb/pull/20166): Enforce max value of 2147483647 on query concurrency to avoid startup panic. -1. [20166](https://github.com/influxdata/influxdb/pull/20166): Enforce max value of 2147483647 on query queue size to avoid startup panic. -1. [20182](https://github.com/influxdata/influxdb/pull/20182): Auto-migrate existing DBRP mappings from old schema to avoid panic. -1. [20202](https://github.com/influxdata/influxdb/pull/20202): Optimize shard lookup in groups containing only one shard. Thanks @StoneYunZhao! -1. [20235](https://github.com/influxdata/influxdb/pull/20235): Respect the `--name` option in `influx setup` whether configs already exist or not. -1. [20235](https://github.com/influxdata/influxdb/pull/20235): Allow for 0 (infinite) values for `--retention` in `influx setup`. -1. [20329](https://github.com/influxdata/influxdb/pull/20329): Set v2 default paths and provide upgrade helper scripts in release packages. - -## v2.0.2 [2020-11-18] - -### Features - -1. [20072](https://github.com/influxdata/influxdb/pull/20072): Warn if V1 users are upgraded, but V1 auth wasn't enabled. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): CLI: Export 1.x CQs as part of `influxd upgrade`. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): Upgrade Flux to v0.95.0. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): CLI: Add DBRP CLI commands as `influx v1 dbrp`. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): UI: Upgrade flux-lsp-browser to v0.5.23. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): Added functionality to filter task runs by time. - -### Bug Fixes - -1. [19992](https://github.com/influxdata/influxdb/pull/19992): Fix various typos. Thanks @kumakichi! -1. [19999](https://github.com/influxdata/influxdb/pull/19999): Use --skip-verify flag for backup/restore CLI command. -1. [19999](https://github.com/influxdata/influxdb/pull/19999): Suggest running with -h on error instead of printing usage when launching `influxd`. -1. [20047](https://github.com/influxdata/influxdb/pull/20072): Allow self signed certificates for scraper targets. Thanks @cmackenzie1! -1. [20072](https://github.com/influxdata/influxdb/pull/20072): Add locking during TSI iterator creation. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): Do not use global viper APIs, which breaks testing. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): Remove fragile NATS port assignment loop. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): Add same site strict flag to session cookie. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): CLI: Validate all input paths to `upgrade` up-front. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): Delete deprecated kv service code. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): Reinstate minimal read-only document store for dashboard template. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): UI: Skip dashboard index CRUD case. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): Task: Fixed logic checking time filter exists. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): Task: Fixed error message semantic. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): Track seen databases in map and skip duplicates. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): Build: Remove lint-feature-flag job from OSS. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): CLI: Don't validate unused paths in `upgrade`. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): Continue reading until itrs is empty, even for nil cursors. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): CLI: Remove internal `influxd upgrade` subcommands from help text. -1. [20072](https://github.com/influxdata/influxdb/pull/20072): Use default DBRP mapping on V1 write when no RP is specified. -1. [20072](https://github.com/influxdata/influxdb/pull/20077): UI: Bump version in package.json so it displays correctly. -1. [20089](https://github.com/influxdata/influxdb/pull/20089): UI: UX improvements and bug fixes to dbrp commands. -1. [20089](https://github.com/influxdata/influxdb/pull/20089): API: Make the dbrp api match the swagger spec. -1. [20089](https://github.com/influxdata/influxdb/pull/20089): Revert changes to API page-sizes. -1. [20089](https://github.com/influxdata/influxdb/pull/20089): Exclude pkger\_test.go from linting -1. [20091](https://github.com/influxdata/influxdb/pull/20091): Make the DBRP http API match the swagger spec. - -## v2.0.1 [2020-11-10] - -### Bug Fixes - -1. [19918](https://github.com/influxdata/influxdb/pull/19918): Swagger: add operationId to /delete -1. [19967](https://github.com/influxdata/influxdb/pull/19967): Upgrade: add log-level option -1. [19969](https://github.com/influxdata/influxdb/pull/19969): Check for existing 2.x CLI configs file -1. [19971](https://github.com/influxdata/influxdb/pull/19971): Swagger: remove Invites from swagger -1. [19972](https://github.com/influxdata/influxdb/pull/19972): Remove obsolete unused option (influx-command-path) -1. [19980](https://github.com/influxdata/influxdb/pull/19980): check write permission in legacy write path - -## v2.0.0 [2020-11-09] - -### Features - -1. [19935](https://github.com/influxdata/influxdb/pull/19935): Improve the UI for the influx v1 auth commands -1. [19940](https://github.com/influxdata/influxdb/pull/19940): Update Flux to v0.94.0 -1. [19943](https://github.com/influxdata/influxdb/pull/19943): Upgrade flux-lsp-browser to v0.5.22 -1. [19946](https://github.com/influxdata/influxdb/pull/19946): Adding RAS telegraf input - -### Bug Fixes - -1. [19924](https://github.com/influxdata/influxdb/pull/19924): Remove unused 'security-script' option from upgrade command -1. [19925](https://github.com/influxdata/influxdb/pull/19937): Create CLI configs in `influxd upgrade` -1. [19928](https://github.com/influxdata/influxdb/pull/19928): Fix parsing of retention policy CLI args in `influx setup` and `influxd upgrade` -1. [19930](https://github.com/influxdata/influxdb/pull/19930): Replace 0 with MaxInt when upgrading query-concurrency -1. [19937](https://github.com/influxdata/influxdb/pull/19937): Create CLI configs -1. [19939](https://github.com/influxdata/influxdb/pull/19939): Make influxd help more specific -1. [19945](https://github.com/influxdata/influxdb/pull/19945): Allow write-only V1 tokens to find DBRPs -1. [19947](https://github.com/influxdata/influxdb/pull/19947): Updating v1 auth description -1. [19952](https://github.com/influxdata/influxdb/pull/19952): Use `db`/`rp` naming convention when migrating DBs to buckets -1. [19956](https://github.com/influxdata/influxdb/pull/19956): Improve help for --no-password switch -1. [19959](https://github.com/influxdata/influxdb/pull/19959): Use 10 instead of MaxInt when rewriting query-concurrency -1. [19960](https://github.com/influxdata/influxdb/pull/19960): Remove bucket and mapping auto-creation from v1 /write API -1. [19885](https://github.com/influxdata/influxdb/pull/19875): Misuse of reflect.SliceHeader - -## v2.0.0-rc.4 [2020-11-05] - -### Features - -1. [19854](https://github.com/influxdata/influxdb/pull/19854): Use v1 authorization for users upgrade -1. [19855](https://github.com/influxdata/influxdb/pull/19855): Enable window pushdowns -1. [19864](https://github.com/influxdata/influxdb/pull/19864): Implement backup/restore CLI subcommands -1. [19865](https://github.com/influxdata/influxdb/pull/19865): Implementation of v1 authorization -1. [19879](https://github.com/influxdata/influxdb/pull/19879): Make sure the query plan nodes have unique ids -1. [19881](https://github.com/influxdata/influxdb/pull/19881): Update Flux to v0.93.0 - -### Bug Fixes - -1. [19685](https://github.com/influxdata/influxdb/pull/19685): Cloning tasks makes actions shared in task list view -1. [19712](https://github.com/influxdata/influxdb/pull/19712): Reduce filesize of influx binary -1. [19819](https://github.com/influxdata/influxdb/pull/19819): Isolate telegraf config service and remove URM interactions -1. [19853](https://github.com/influxdata/influxdb/pull/19853): Use updated HTTP client for authorization service -1. [19856](https://github.com/influxdata/influxdb/pull/19856): Make tagKeys and tagValues work for edge cases involving fields -1. [19870](https://github.com/influxdata/influxdb/pull/19870): Correctly parse float as 64-bits -1. [19873](https://github.com/influxdata/influxdb/pull/19873): Add simple metrics related to installed templates -1. [19885](https://github.com/influxdata/influxdb/pull/19885): Remove extra multiplication of retention policies in onboarding -1. [19887](https://github.com/influxdata/influxdb/pull/19887): Use fluxinit package to init flux library instead of builtin -1. [19886](https://github.com/influxdata/influxdb/pull/19886): Add Logger to constructor function to ensure log field is initialized -1. [19894](https://github.com/influxdata/influxdb/pull/19894): Return empty iterator instead of null in tagValues -1. [19899](https://github.com/influxdata/influxdb/pull/19899): Docs: flux 0.92 functions -1. [19908](https://github.com/influxdata/influxdb/pull/19908): Fix /ready response content type - -## v2.0.0-rc.3 [2020-10-29] - -### Features - -1. [19807](https://github.com/influxdata/influxdb/pull/19807): Enable window agg mean pushdown -1. [19813](https://github.com/influxdata/influxdb/pull/19813): Aggregate array cursors -1. [19815](https://github.com/influxdata/influxdb/pull/19815): Create a v1 authorization service -1. [19826](https://github.com/influxdata/influxdb/pull/19826): Update FLux to v0.91.0 -1. [19829](https://github.com/influxdata/influxdb/pull/19829): Extend CLI with v1 authorization commands -1. [19839](https://github.com/influxdata/influxdb/pull/19839): Add tick generation properties and legendColorizeRows -1. [19840](https://github.com/influxdata/influxdb/pull/19840): Add bcrypt password support to v1 authorizations -1. [19850](https://github.com/influxdata/influxdb/pull/19850): Update generate ticks into an array of properties for each axis - -### Bug Fixes - -1. [19784](https://github.com/influxdata/influxdb/pull/19784): UI: bump papaparse from 4.6.3 to 5.2.0 -1. [19802](https://github.com/influxdata/influxdb/pull/19802): Docs: update PostDBRP docs to reflect mutual exclusive requirement of org vs orgID -1. [19804](https://github.com/influxdata/influxdb/pull/19804): Notifications: move rule service into own package -1. [19816](https://github.com/influxdata/influxdb/pull/19816): Type-convert fs.Bavail for portability -1. [19818](https://github.com/influxdata/influxdb/pull/19818): Notifications: isolate endpoint service -1. [19823](https://github.com/influxdata/influxdb/pull/19823): Clear Logout -1. [19825](https://github.com/influxdata/influxdb/pull/19825): Docs: Update FUZZ.md -1. [19828](https://github.com/influxdata/influxdb/pull/19828): Add 1.x compatible endpoints to swagger -1. [19833](https://github.com/influxdata/influxdb/pull/19833): allow newIndexSeriesCursor() to accept an influxql.Expr -1. [19834](https://github.com/influxdata/influxdb/pull/19834): Docs: Fix typos in http/swagger.yml -1. [19836](https://github.com/influxdata/influxdb/pull/19836): UI: import flux-lsp v0.5.21 -1. [19846](https://github.com/influxdata/influxdb/pull/19846): prune some unreferenced packages - -## v2.0.0-rc.2 [2020-10-21] - -### Features - -1. [19725](https://github.com/influxdata/influxdb/pull/19725): Add window agg result set -1. [19740](https://github.com/influxdata/influxdb/pull/19740): Provide means to remove stack without confirmation -1. [19750](https://github.com/influxdata/influxdb/pull/19750): Return error on failed resource addition -1. [19774](https://github.com/influxdata/influxdb/pull/19774): Update Flux to v0.90.0 - -### Bug Fixes - -1. [19465](https://github.com/influxdata/influxdb/pull/19465): Use valid flux in pkger test templates -1. [19773](https://github.com/influxdata/influxdb/pull/19773): Upgrade: fallback to user's home when saving upgraded config -1. [19775](https://github.com/influxdata/influxdb/pull/19775): Telegraf plugin updates (remove RAS for now) -1. [19776](https://github.com/influxdata/influxdb/pull/19776): TimeMachine: change derivative to 1s -1. [19789](https://github.com/influxdata/influxdb/pull/19789): Launcher: Switch to AuthorizationService from authorization package -1. [19780](https://github.com/influxdata/influxdb/pull/19780): Upgrade: proper default 2.x config filename -1. [19781](https://github.com/influxdata/influxdb/pull/19781): Upgrade: fixing typos and grammar errors - -## v2.0.0-rc.1 [2020-10-14] - -### Features - -1. [19641](https://github.com/influxdata/influxdb/pull/19641): Added `influx upgrade` command for upgrading from 1.x to 2.0 -1. [19746](https://github.com/influxdata/influxdb/pull/19746): Added Intel RDT and RAS Daemon telegraf plugins -1. [19731](https://github.com/influxdata/influxdb/pull/19731): Upgraded Flux to v0.89.0 - -### Bug Fixes - -1. [19708](https://github.com/influxdata/influxdb/pull/19708): Scrapers not working in RC0 -1. [19732](https://github.com/influxdata/influxdb/pull/19732): Update default value of list tasks influx CLI command to 100 -1. [19710](https://github.com/influxdata/influxdb/pull/19710): InfluxDB Templates: allow same duration unit identifiers that the tasks api allows -1. [19700](https://github.com/influxdata/influxdb/pull/19700): InfluxDB Templates: preserve cell colors on export/import -1. [19695](https://github.com/influxdata/influxdb/pull/19695): Influx CLI fix an issue where a single telegraf config was not being returned -1. [19593](https://github.com/influxdata/influxdb/pull/19593): Don't allow short passwords in `influx setup` - -## v2.0.0-rc.0 [2020-09-29] - -### Breaking Changes - -In the interests of simplifying the migration for existing users of InfluxDB 1.x, this -release includes significant breaking changes. - -**Upgrading from previous beta builds of `influxd` is not supported** - -In order to continue using `influxd` betas, users will be required to move all existing -data out of their `~/.influxdbv2` (or equivalent) path, including `influxd.bolt`. This -means all existing dashboards, tasks, integrations, alerts, users and tokens will need to -be recreated. The `influx export all` command may be used to export and re-import most -of this data. - -At this time, there is no tooling to convert existing time series data from previous -beta releases. If data from a prior beta release is found, `influxd` will refuse to start. - -We have also changed the default port of InfluxDB from 9999 back to 8086. If you still would like -to run on port 9999, you can start influxd with the `--http-bind-address` option. You will also -need to update any InfluxDB CLI config profiles with the new port number. - -1. [19446](https://github.com/influxdata/influxdb/pull/19446): Port TSM1 storage engine -1. [19494](https://github.com/influxdata/influxdb/pull/19494): Changing the default port from 9999 to 8086 -1. [19636](https://github.com/influxdata/influxdb/pull/19636): Disable unimplemented delete with predicate API - -### Features - -1. [18779](https://github.com/influxdata/influxdb/pull/18779): Add new processing options and enhancements to influx write. -1. [19246](https://github.com/influxdata/influxdb/pull/19246): Redesign load data page to increase discovery and ease of use -1. [19334](https://github.com/influxdata/influxdb/pull/19334): Add --active-config flag to influx to set config for single command -1. [19219](https://github.com/influxdata/influxdb/pull/19219): List buckets via the API now supports after (ID) parameter as an alternative to offset. -1. [19390](https://github.com/influxdata/influxdb/pull/19390): Record last success and failure run times in the Task -1. [19402](https://github.com/influxdata/influxdb/pull/19402): Inject Task's LatestSuccess Timestamp In Flux Extern -1. [19433](https://github.com/influxdata/influxdb/pull/19433): Add option to dump raw query results in CLI -1. [19506](https://github.com/influxdata/influxdb/pull/19506): Add TSM 1.x storage options as flags -1. [19508](https://github.com/influxdata/influxdb/pull/19508): Add subset of InfluxQL coordinator options as flags -1. [19457](https://github.com/influxdata/influxdb/pull/19457): Add ability to export resources by name via the CLI -1. [19640](https://github.com/influxdata/influxdb/pull/19640): Turn on Community Templates -1. [19663](https://github.com/influxdata/influxdb/pull/19663): Added InfluxDB v2 Listener, NSD, OPC-UA, and Windows Event Log to the sources page -1. [19662](https://github.com/influxdata/influxdb/pull/19662): Add `max-line-length` switch to `influx write` command to address `token too long` errors for large inputs -1. [19660](https://github.com/influxdata/influxdb/pull/19660): Add --rate-limit option to `influx write`. -1. [19740](https://github.com/influxdata/influxdb/pull/19740): Add `--force` option to `influx stack rm` to skip confirmation - -### Bug Fixes - -1. [19331](https://github.com/influxdata/influxdb/pull/19331): Add description to auth influx command outputs. -1. [19392](https://github.com/influxdata/influxdb/pull/19392): Include the edge of the boundary we are observing. -1. [19453](https://github.com/influxdata/influxdb/pull/19453): Warn about duplicate tag names during influx write csv. -1. [19466](https://github.com/influxdata/influxdb/pull/19466): Do not override existing line part in group annotation. -1. [19637](https://github.com/influxdata/influxdb/pull/19637): Added PATCH to the list of allowed methods - -## v2.0.0-beta.16 [2020-08-07] - -### Breaking - -1. [19066](https://github.com/influxdata/influxdb/pull/19066): Drop deprecated /packages route tree -1. [19116](https://github.com/influxdata/influxdb/pull/19116): Support more types for template envRef default value and require explicit default values -1. [19104](https://github.com/influxdata/influxdb/pull/19104): Remove orgs/labels nested routes from the API. -1. [19653](https://github.com/influxdata/influxdb/pull/19653): Remove PointBatcher from tsdb package API - -### Features - -1. [19075](https://github.com/influxdata/influxdb/pull/19075): Add resource links to a stack's resources from public HTTP API list/read calls -1. [19103](https://github.com/influxdata/influxdb/pull/19103): Enhance resource creation experience when limits are reached -1. [19223](https://github.com/influxdata/influxdb/pull/19223): Add dashboards command to influx CLI -1. [19225](https://github.com/influxdata/influxdb/pull/19225): Allow user onboarding to optionally set passwords -1. [18841](https://github.com/influxdata/influxdb/pull/18841): Limit query response sizes for queries built in QueryBuilder by requiring an aggregate window -1. [19135](https://github.com/influxdata/influxdb/pull/19135): Add telegram notification. - -### Bug Fixes - -1. [19043](https://github.com/influxdata/influxdb/pull/19043): Enforce all influx CLI flag args are valid -1. [19188](https://github.com/influxdata/influxdb/pull/19188): Dashboard cells correctly map results when multiple queries exist -1. [19146](https://github.com/influxdata/influxdb/pull/19146): Dashboard cells and overlay use UTC as query time when toggling to UTC timezone -1. [19222](https://github.com/influxdata/influxdb/pull/19222): Bucket names may not include quotation marks -1. [19317](https://github.com/influxdata/influxdb/pull/19317): Add validation to Variable name creation for valid Flux identifiers. - -### UI Improvements -1. [19231](https://github.com/influxdata/influxdb/pull/19231): Alerts page filter inputs now have tab indices for keyboard navigation -1. [19364](https://github.com/influxdata/influxdb/pull/19364): Errors in OSS are now properly printed to the console - -## v2.0.0-beta.15 [2020-07-23] - -### Breaking - -1. [19004](https://github.com/influxdata/influxdb/pull/19004): Removed the `migrate` command from the `influxd` binary. -1. [18921](https://github.com/influxdata/influxdb/pull/18921): Restricted UI variable names to not clash with Flux reserved words - -### Features - -1. [18888](https://github.com/influxdata/influxdb/pull/18888): Add event source to influx stack operations -1. [18910](https://github.com/influxdata/influxdb/pull/18910): Add uninstall functionality for stacks -1. [18912](https://github.com/influxdata/influxdb/pull/18912): Drop deprecated influx pkg command tree -1. [18997](https://github.com/influxdata/influxdb/pull/18997): Add telegraf management commands to influx CLI -1. [19030](https://github.com/influxdata/influxdb/pull/19030): Enable dynamic destination for the influx CLI configs file -1. [19029](https://github.com/influxdata/influxdb/pull/19029): Navigating away from a dashboard cancels all pending queries -1. [19003](https://github.com/influxdata/influxdb/pull/19003): Upgrade to Flux v0.74.0 -1. [19040](https://github.com/influxdata/influxdb/pull/19040): Drop the REPL command from influx CLI -1. [19032](https://github.com/influxdata/influxdb/pull/19032): Redesign asset & rate limit alerts - -### Bug Fixes - -1. [18891](https://github.com/influxdata/influxdb/pull/18891): Allow 0 to be the custom set minimum value for Y Domain -1. [18969](https://github.com/influxdata/influxdb/pull/18969): Single Stat cells should render properly in Safari again -1. [18974](https://github.com/influxdata/influxdb/pull/18974): Limit variable querying when submitting queries to used variables -1. [19039](https://github.com/influxdata/influxdb/pull/19039): Fix an issue where switching orgs was not redirecting correctly -1. [18989](https://github.com/influxdata/influxdb/pull/18989): Stopped fetching tags in the advanced builder -1. [19044](https://github.com/influxdata/influxdb/pull/19044): Graph customization: X and Y axis properly accept values - -## v2.0.0-beta.14 [2020-07-08] - -### Features - -1. [18758](https://github.com/influxdata/influxdb/pull/18758): Extend influx stacks update cmd with ability to add resources without apply template -1. [18793](https://github.com/influxdata/influxdb/pull/18793): Normalize InfluxDB templates under new /api/v2/templates and /api/v2/stacks public API -1. [18818](https://github.com/influxdata/influxdb/pull/18818): Extend template Summary and Diff nested types with kind identifiers -1. [18857](https://github.com/influxdata/influxdb/pull/18857): Flux updated to v0.71.1 -1. [18805](https://github.com/influxdata/influxdb/pull/18805): Added static builds for Linux - -### Bug Fixes - -1. [18878](https://github.com/influxdata/influxdb/pull/18878): Don't overwrite build date set via ldflags -1. [18842](https://github.com/influxdata/influxdb/pull/18842): Fixed an issue where define query was unusable after importing a Check -1. [18845](https://github.com/influxdata/influxdb/pull/18845): Update documentation links - -## v2.0.0-beta.13 [2020-06-25] - -### Features - -1. [18387](https://github.com/influxdata/influxdb/pull/18387): Integrate query cancellation after queries have been submitted -1. [18515](https://github.com/influxdata/influxdb/pull/18515): Extend templates with the source file|url|reader. -1. [18539](https://github.com/influxdata/influxdb/pull/18539): Collect stats on installed influxdata community template usage. -1. [18541](https://github.com/influxdata/influxdb/pull/18541): Pkger allow raw github.com host URLs for yaml|json|jsonnet URLs -1. [18546](https://github.com/influxdata/influxdb/pull/18546): Influx allow for files to be remotes for all template commands -1. [18560](https://github.com/influxdata/influxdb/pull/18560): Extend stacks API with update capability -1. [18568](https://github.com/influxdata/influxdb/pull/18568): Add support for config files to influxd and any cli.NewCommand use case -1. [18573](https://github.com/influxdata/influxdb/pull/18573): Extend influx stacks cmd with new influx stacks update cmd -1. [18595](https://github.com/influxdata/influxdb/pull/18595): Add ability to skip resources in a template by kind or by metadata.name -1. [18600](https://github.com/influxdata/influxdb/pull/18600): Extend influx apply with resource filter capabilities -1. [18601](https://github.com/influxdata/influxdb/pull/18601): Provide active config running influx config without args -1. [18606](https://github.com/influxdata/influxdb/pull/18606): Enable influxd binary to look for a config file on startup -1. [18647](https://github.com/influxdata/influxdb/pull/18647): Add support for env ref default values to the template parser -1. [18655](https://github.com/influxdata/influxdb/pull/18655): Add support for platform variable selected field to templates - -### Bug Fixes - -1. [18602](https://github.com/influxdata/influxdb/pull/18602): Fix uint overflow during setup on 32bit systems -1. [18623](https://github.com/influxdata/influxdb/pull/18623): Drop support for --local flag within influx CLI -1. [18632](https://github.com/influxdata/influxdb/pull/18632): Prevents undefined queries in cells from erroring out in dashboards -1. [18649](https://github.com/influxdata/influxdb/pull/18649): Fixes bucket selection issue and query builder state -1. [18658](https://github.com/influxdata/influxdb/pull/18658): Add support for 'd' day and 'w' week time identifiers in the CLI for bucket and setup commands -1. [18581](https://github.com/influxdata/influxdb/pull/18581): Cache dashboard cell query results to use as a reference for cell configurations -1. [18707](https://github.com/influxdata/influxdb/pull/18707): Validate host-url for influx config create/set commands -1. [18713](https://github.com/influxdata/influxdb/pull/18713): Fix influx CLI flags to accurately depict flags for all commands - -## v2.0.0-beta.12 [2020-06-12] - -### Features - -1. [18279](https://github.com/influxdata/influxdb/pull/18279): Make all pkg applications stateful via stacks -1. [18322](https://github.com/influxdata/influxdb/pull/18322): Add ability to export a stack's existing (as they are in the platform) resource state as a pkg -1. [18334](https://github.com/influxdata/influxdb/pull/18334): Update influx pkg commands with improved usage and examples in long form. -1. [18344](https://github.com/influxdata/influxdb/pull/18344): Extend influx CLI with version and User-Agent. -1. [18355](https://github.com/influxdata/influxdb/pull/18355): Integrate RedirectTo functionality so CLOUD users now get navigated back to the originally linked page after login -1. [18392](https://github.com/influxdata/influxdb/pull/18392): Consolidate pkg influx commands under templates. This removes some nesting of the CLI commands as part of that. -1. [18400](https://github.com/influxdata/influxdb/pull/18400): Dashboards maintain sort order after navigating away -1. [18480](https://github.com/influxdata/influxdb/pull/18480): Allows tasks to open in new tabs -1. [18553](https://github.com/influxdata/influxdb/pull/18553): Update usage and soften comparisons for kind matching on 'influx export --resourceType' cmd - -### Bug Fixes - -1. [18331](https://github.com/influxdata/influxdb/pull/18331): Support organization name in addition to ID in DBRP operations -1. [18335](https://github.com/influxdata/influxdb/pull/18335): Disable failing when providing an unexpected error to influx CLI -1. [18345](https://github.com/influxdata/influxdb/pull/18345): Have influx delete cmd respect the config -1. [18385](https://github.com/influxdata/influxdb/pull/18385): Store initialization for pkger enforced on reads -1. [18434](https://github.com/influxdata/influxdb/pull/18434): Backfill missing fillColumns field for histograms in pkger -1. [18471](https://github.com/influxdata/influxdb/pull/18471): Notifies the user how to escape presentation mode when the feature is toggled - -### UI Improvements - -1. [18319](https://github.com/influxdata/influxdb/pull/18319): Display bucket ID in bucket list and enable 1 click copying -1. [18361](https://github.com/influxdata/influxdb/pull/18361): Tokens list is now consistent with the other resource lists -1. [18346](https://github.com/influxdata/influxdb/pull/18346): Reduce the number of variables being hydrated when toggling variables -1. [18447](https://github.com/influxdata/influxdb/pull/18447): Redesign dashboard cell loading indicator to be more obvious -1. [18593](https://github.com/influxdata/influxdb/pull/18593): Add copyable User and Organization Ids to About page - -## v2.0.0-beta.11 [2020-05-26] - -### Features - -1. [18011](https://github.com/influxdata/influxdb/pull/18011): Integrate UTC dropdown when making custom time range query -1. [18040](https://github.com/influxdata/influxdb/pull/18040): Allow for min OR max y-axis visualization settings rather than min AND max -1. [17764](https://github.com/influxdata/influxdb/pull/17764): Add CSV to line protocol conversion library -1. [18059](https://github.com/influxdata/influxdb/pull/18059): Make the dropdown width adjustable -1. [18173](https://github.com/influxdata/influxdb/pull/18173): Add version to /health response - -### Bug Fixes - -1. [18066](https://github.com/influxdata/influxdb/pull/18066): Fixed bug that wasn't persisting timeFormat for Graph + Single Stat selections -1. [17959](https://github.com/influxdata/influxdb/pull/17959): Authorizer now exposes full permission set -1. [18071](https://github.com/influxdata/influxdb/pull/18071): Fixed issue that was causing variable selections to hydrate all variable values -1. [18016](https://github.com/influxdata/influxdb/pull/18016): Remove the fancy scrollbars -1. [18171](https://github.com/influxdata/influxdb/pull/18171): Check status now displaying warning if loading a large amount - -## v2.0.0-beta.10 [2020-05-07] - -### Features - -1. [17934](https://github.com/influxdata/influxdb/pull/17934): Add ability to delete a stack and all the resources associated with it -1. [17941](https://github.com/influxdata/influxdb/pull/17941): Enforce DNS name compliance on all pkger resources' metadata.name field -1. [17989](https://github.com/influxdata/influxdb/pull/17989): Add stateful pkg management with stacks -1. [18007](https://github.com/influxdata/influxdb/pull/18007): Add remove and list pkger stack commands to influx CLI -1. [18017](https://github.com/influxdata/influxdb/pull/18017): Fixup display message for interactive influx setup cmd - -### Bug Fixes - -1. [17906](https://github.com/influxdata/influxdb/pull/17906): Ensure UpdateUser cleans up the index when updating names -1. [17933](https://github.com/influxdata/influxdb/pull/17933): Ensure Checks can be set for zero values - -### UI Improvements - -1. [17860](https://github.com/influxdata/influxdb/pull/17860): Allow bucket creation from the Data Explorer and Cell Editor - -## v2.0.0-beta.9 [2020-04-23] - -### Features - -1. [17851](https://github.com/influxdata/influxdb/pull/17851): Add feature flag package capability and flags endpoint - -### Bug Fixes - -1. [17618](https://github.com/influxdata/influxdb/pull/17618): Add index for URM by user ID to improve lookup performance -1. [17751](https://github.com/influxdata/influxdb/pull/17751): Existing session expiration time is respected on session renewal -1. [17817](https://github.com/influxdata/influxdb/pull/17817): Make CLI respect env vars and flags in addition to the configs and extend support for config orgs to all commands - -### UI Improvements - -1. [17714](https://github.com/influxdata/influxdb/pull/17714): Cloud environments no longer render markdown images, for security reasons. -1. [17321](https://github.com/influxdata/influxdb/pull/17321): Improve UI for sorting resources -1. [17740](https://github.com/influxdata/influxdb/pull/17740): Add single-color color schemes for visualizations -1. [17849](https://github.com/influxdata/influxdb/pull/17849): Move Organization navigation items to user menu. - -## v2.0.0-beta.8 [2020-04-10] - -### Features - -1. [17490](https://github.com/influxdata/influxdb/pull/17490): `influx config -`, to switch back to previous activated configuration -1. [17581](https://github.com/influxdata/influxdb/pull/17581): Introduce new navigation menu -1. [17595](https://github.com/influxdata/influxdb/pull/17595): Add -f (--file) option to `influx query` and `influx task` commands -1. [17498](https://github.com/influxdata/influxdb/pull/17498): Added support for command line options to limit memory for queries - -### Bug Fixes - -1. [17257](https://github.com/influxdata/influxdb/pull/17769): Fix retention policy after bucket is migrated -1. [17612](https://github.com/influxdata/influxdb/pull/17612): Fix card size and layout jank in dashboards index view -1. [17651](https://github.com/influxdata/influxdb/pull/17651): Fix check graph font and lines defaulting to black causing graph to be unreadable -1. [17660](https://github.com/influxdata/influxdb/pull/17660): Fix text wrapping display issue and popover sizing bug when adding labels to a resource -1. [17670](https://github.com/influxdata/influxdb/pull/17670): Respect the now-time of the compiled query if it's provided -1. [17692](https://github.com/influxdata/influxdb/pull/17692): Update giraffe to fix spacing between ticks -1. [17694](https://github.com/influxdata/influxdb/pull/17694): Fixed typos in the Flux functions list -1. [17701](https://github.com/influxdata/influxdb/pull/17701): Allow mouse cursor inside Script Editor for Safari -1. [17609](https://github.com/influxdata/influxdb/pull/17609): Fixed an issue where Variables could not use other Variables -1. [17754](https://github.com/influxdata/influxdb/pull/17754): Adds error messaging for Cells in Dashboard View - -### UI Improvements - -1. [17583](https://github.com/influxdata/influxdb/pull/17583): Update layout of Alerts page to work on all screen sizes -1. [17657](https://github.com/influxdata/influxdb/pull/17657): Sort dashboards on Getting Started page by recently modified - -## v2.0.0-beta.7 [2020-03-27] - -### Features - -1. [17232](https://github.com/influxdata/influxdb/pull/17232): Allow dashboards to optionally be displayed in light mode -1. [17273](https://github.com/influxdata/influxdb/pull/17273): Add shell completions command for the influx cli -1. [17353](https://github.com/influxdata/influxdb/pull/17353): Make all pkg resources unique by metadata.name field -1. [17363](https://github.com/influxdata/influxdb/pull/17363): Telegraf config tokens can no longer be retrieved after creation, but new tokens can be created after a telegraf has been setup -1. [17400](https://github.com/influxdata/influxdb/pull/17400): Be able to delete bucket by name via cli -1. [17396](https://github.com/influxdata/influxdb/pull/17396): Add module to write line data to specified url, org, and bucket -1. [17398](https://github.com/influxdata/influxdb/pull/17398): Extend influx cli write command with ability to process CSV data -1. [17448](https://github.com/influxdata/influxdb/pull/17448): Add foundation for pkger stacks, stateful package management -1. [17462](https://github.com/influxdata/influxdb/pull/17462): Flag to disable scheduling of tasks -1. [17470](https://github.com/influxdata/influxdb/pull/17470): Add ability to output cli output as json and hide table headers -1. [17472](https://github.com/influxdata/influxdb/pull/17472): Add an easy way to switch config via cli - -### Bug Fixes - -1. [17240](https://github.com/influxdata/influxdb/pull/17240): NodeJS logo displays properly in Firefox -1. [17363](https://github.com/influxdata/influxdb/pull/17363): Fixed telegraf configuration bugs where system buckets were appearing in the buckets dropdown -1. [17391](https://github.com/influxdata/influxdb/pull/17391): Fixed threshold check bug where checks could not be created when a field had a space in the name -1. [17384](https://github.com/influxdata/influxdb/pull/17384): Reuse slices built by iterator to reduce allocations -1. [17404](https://github.com/influxdata/influxdb/pull/17404): Updated duplicate check error message to be more explicit and actionable -1. [17515](https://github.com/influxdata/influxdb/pull/17515): Editing a table cell shows the proper values and respects changes -1. [17521](https://github.com/influxdata/influxdb/pull/17521): Table view scrolling should be slightly smoother -1. [17601](https://github.com/influxdata/influxdb/pull/17601): URL table values on single columns are being correctly parsed -1. [17552](https://github.com/influxdata/influxdb/pull/17552): Fixed a regression bug that insert aggregate functions where the cursor is rather than a new line - -### UI Improvements - -1. [17291](https://github.com/influxdata/influxdb/pull/17291): Redesign OSS Login page -1. [17297](https://github.com/influxdata/influxdb/pull/17297): Display graphic when a dashboard has no cells - -## v2.0.0-beta.6 [2020-03-12] - -### Features - -1. [17085](https://github.com/influxdata/influxdb/pull/17085): Clicking on bucket name takes user to Data Explorer with bucket selected -1. [17095](https://github.com/influxdata/influxdb/pull/17095): Extend pkger dashboards with table view support -1. [17114](https://github.com/influxdata/influxdb/pull/17114): Allow for retention to be provided to influx setup command as a duration -1. [17138](https://github.com/influxdata/influxdb/pull/17138): Extend pkger export all capabilities to support filtering by lable name and resource type -1. [17049](https://github.com/influxdata/influxdb/pull/17049): Added new login and sign-up screen that for cloud users that allows direct login from their region -1. [17170](https://github.com/influxdata/influxdb/pull/17170): Added new cli multiple profiles management tool -1. [17145](https://github.com/influxdata/influxdb/pull/17145): Update kv.Store to define schema changes via new kv.Migrator types - -### Bug Fixes - -1. [17039](https://github.com/influxdata/influxdb/pull/17039): Fixed issue where tasks are exported for notification rules -1. [17042](https://github.com/influxdata/influxdb/pull/17042): Fixed issue where tasks are not exported when exporting by org id -1. [17070](https://github.com/influxdata/influxdb/pull/17070): Fixed issue where tasks with imports in query break in pkger -1. [17028](https://github.com/influxdata/influxdb/pull/17028): Fixed issue where selecting an aggregate function in the script editor was not adding the function to a new line -1. [17072](https://github.com/influxdata/influxdb/pull/17072): Fixed issue where creating a variable of type map was piping the incorrect value when map variables were used in queries -1. [17050](https://github.com/influxdata/influxdb/pull/17050): Added missing user names to auth CLI commands -1. [17113](https://github.com/influxdata/influxdb/pull/17113): Disabled group functionality for check query builder -1. [17120](https://github.com/influxdata/influxdb/pull/17120): Fixed cell configuration error that was popping up when users create a dashboard and accessed the disk usage cell for the first time -1. [17097](https://github.com/influxdata/influxdb/pull/17097): Listing all the default variables in the VariableTab of the script editor -1. [17049](https://github.com/influxdata/influxdb/pull/17049): Fixed bug that was preventing the interval status on the dashboard header from refreshing on selections -1. [17161](https://github.com/influxdata/influxdb/pull/17161): Update table custom decimal feature for tables to update table onFocus -1. [17168](https://github.com/influxdata/influxdb/pull/17168): Fixed UI bug that was setting Telegraf config buttons off-center and was resizing config selections when filtering through the data -1. [17208](https://github.com/influxdata/influxdb/pull/17208): Fixed UI bug that was setting causing dashboard cells to error when the a v.bucket was being used and was being configured for the first time -1. [17214](https://github.com/influxdata/influxdb/pull/17214): Fix appearance of client library logos in Safari -1. [17202](https://github.com/influxdata/influxdb/pull/17202): Fixed UI bug that was preventing checks created with the query builder from updating. Also fixed a bug that was preventing dashboard cell queries from working properly when creating group queries using the query builder - -## v2.0.0-beta.5 [2020-02-27] - -### Features - -1. [16991](https://github.com/influxdata/influxdb/pull/16991): Update Flux functions list for v0.61 -1. [16574](https://github.com/influxdata/influxdb/pull/16574): Add secure flag to session cookie - -### Bug Fixes - -1. [16919](https://github.com/influxdata/influxdb/pull/16919): Sort dashboards on homepage alphabetically -1. [16934](https://github.com/influxdata/influxdb/pull/16934): Tokens page now sorts by status -1. [16931](https://github.com/influxdata/influxdb/pull/16931): Set the default value of tags in a Check -1. [16935](https://github.com/influxdata/influxdb/pull/16935): Fix sort by variable type -1. [16973](https://github.com/influxdata/influxdb/pull/16973): Calculate correct stacked line cumulative when lines are different lengths -1. [17010](https://github.com/influxdata/influxdb/pull/17010): Fixed scrollbar issue where resource cards would overflow the parent container rather than be hidden and scrollable -1. [16992](https://github.com/influxdata/influxdb/pull/16992): Query Builder now groups on column values, not tag values -1. [17013](https://github.com/influxdata/influxdb/pull/17013): Scatterplots can once again render the tooltip correctly -1. [17027](https://github.com/influxdata/influxdb/pull/17027): Drop pkger gauge chart requirement for color threshold type -1. [17040](https://github.com/influxdata/influxdb/pull/17040): Fixed bug that was preventing the interval status on the dashboard header from refreshing on selections -1. [16961](https://github.com/influxdata/influxdb/pull/16961): Remove cli confirmation of secret, add an optional parameter of secret value - -## v2.0.0-beta.4 [2020-02-14] - -### Features - -1. [16855](https://github.com/influxdata/influxdb/pull/16855): Added labels to buckets in UI -1. [16842](https://github.com/influxdata/influxdb/pull/16842): Connect monaco editor to Flux LSP server -1. [16856](https://github.com/influxdata/influxdb/pull/16856): Update Flux to v0.59.6 - -### Bug Fixes - -1. [16852](https://github.com/influxdata/influxdb/pull/16852): Revert for bad indexing of UserResourceMappings and Authorizations -1. [15911](https://github.com/influxdata/influxdb/pull/15911): Gauge no longer allowed to become too small -1. [16878](https://github.com/influxdata/influxdb/pull/16878): Fix issue with INFLUX_TOKEN env vars being overridden by default token - -## v2.0.0-beta.3 [2020-02-11] - -### Features - -1. [16765](https://github.com/influxdata/influxdb/pull/16765): Extend influx cli pkg command with ability to take multiple files and directories -1. [16767](https://github.com/influxdata/influxdb/pull/16767): Extend influx cli pkg command with ability to take multiple urls, files, directories, and stdin at the same time -1. [16786](https://github.com/influxdata/influxdb/pull/16786): influx cli can manage secrets. - -### Bug Fixes - -1. [16733](https://github.com/influxdata/influxdb/pull/16733): Fix notification rule renaming panics from UI -1. [16769](https://github.com/influxdata/influxdb/pull/16769): Fix the tooltip for stacked line graphs -1. [16825](https://github.com/influxdata/influxdb/pull/16825): Fixed false success notification for read-only users creating dashboards -1. [16822](https://github.com/influxdata/influxdb/pull/16822): Fix issue with pkger/http stack crashing on dupe content type - -## v2.0.0-beta.2 [2020-01-24] - -### Features - -1. [16711](https://github.com/influxdata/influxdb/pull/16711): Query Builder supports group() function (change the dropdown from filter to group) -1. [16523](https://github.com/influxdata/influxdb/pull/16523): Change influx packages to be CRD compliant -1. [16547](https://github.com/influxdata/influxdb/pull/16547): Allow trailing newline in credentials file and CLI integration -1. [16545](https://github.com/influxdata/influxdb/pull/16545): Add support for prefixed cursor search to ForwardCursor types -1. [16504](https://github.com/influxdata/influxdb/pull/16504): Add backup and restore -1. [16522](https://github.com/influxdata/influxdb/pull/16522): Introduce resource logger to tasks, buckets and organizations - -### Bug Fixes - -1. [16656](https://github.com/influxdata/influxdb/pull/16656): Check engine closed before collecting index metrics -1. [16412](https://github.com/influxdata/influxdb/pull/16412): Reject writes which use any of the reserved tag keys -1. [16715](https://github.com/influxdata/influxdb/pull/16715): Fixed dashboard mapping for getDashboards to map correct prop -1. [16716](https://github.com/influxdata/influxdb/pull/16716): Improve the lacking error responses for unmarshal errors in org service - -### Bug Fixes - -1. [16527](https://github.com/influxdata/influxdb/pull/16527): fix /telegrafs panics when using org=org_name parameter - -### UI Improvements - -1. [16575](https://github.com/influxdata/influxdb/pull/16575): Swap billingURL with checkoutURL -1. [16203](https://github.com/influxdata/influxdb/pull/16203): Move cloud navigation to top of page instead of within left side navigation -1. [16536](https://github.com/influxdata/influxdb/pull/16536): Adjust aggregate window periods to be more "reasonable". Use duration input with validation. - -## v2.0.0-beta.1 [2020-01-08] - -### Features - -1. [16234](https://github.com/influxdata/influxdb/pull/16234): Add support for notification endpoints to influx templates/pkgs. -1. [16242](https://github.com/influxdata/influxdb/pull/16242): Drop id prefix for secret key requirement for notification endpoints -1. [16259](https://github.com/influxdata/influxdb/pull/16259): Add support for check resource to pkger parser -1. [16262](https://github.com/influxdata/influxdb/pull/16262): Add support for check resource pkger dry run functionality -1. [16275](https://github.com/influxdata/influxdb/pull/16275): Add support for check resource pkger apply functionality -1. [16283](https://github.com/influxdata/influxdb/pull/16283): Add support for check resource pkger export functionality -1. [16212](https://github.com/influxdata/influxdb/pull/16212): Add new kv.ForwardCursor interface -1. [16297](https://github.com/influxdata/influxdb/pull/16297): Add support for notification rule to pkger parser -1. [16298](https://github.com/influxdata/influxdb/pull/16298): Add support for notification rule pkger dry run functionality -1. [16305](https://github.com/influxdata/influxdb/pull/16305): Add support for notification rule pkger apply functionality -1. [16312](https://github.com/influxdata/influxdb/pull/16312): Add support for notification rule pkger export functionality -1. [16320](https://github.com/influxdata/influxdb/pull/16320): Add support for tasks to pkger parser -1. [16322](https://github.com/influxdata/influxdb/pull/16322): Add support for tasks to pkger dry run functionality -1. [16323](https://github.com/influxdata/influxdb/pull/16323): Add support for tasks to pkger apply functionality -1. [16324](https://github.com/influxdata/influxdb/pull/16324): Add support for tasks to pkger export functionality -1. [16226](https://github.com/influxdata/influxdb/pull/16226): Add group() to Query Builder -1. [16338](https://github.com/influxdata/influxdb/pull/16338): Add last run status to check and notification rules -1. [16340](https://github.com/influxdata/influxdb/pull/16340): Add last run status to tasks -1. [16341](https://github.com/influxdata/influxdb/pull/16341): Extend pkger apply functionality with ability to provide secrets outside of pkg -1. [16345](https://github.com/influxdata/influxdb/pull/16345): Add hide headers flag to influx cli task find cmd -1. [16336](https://github.com/influxdata/influxdb/pull/16336): Manual Overrides for Readiness Endpoint -1. [16347](https://github.com/influxdata/influxdb/pull/16347): Drop legacy inmem service implementation in favor of kv service with inmem dependency -1. [16348](https://github.com/influxdata/influxdb/pull/16348): Drop legacy bolt service implementation in favor of kv service with bolt dependency -1. [16014](https://github.com/influxdata/influxdb/pull/16014): While creating check, also display notification rules that would match check based on tag rules -1. [16389](https://github.com/influxdata/influxdb/pull/16389): Increase default bucket retention period to 30 days -1. [16430](https://github.com/influxdata/influxdb/pull/16430): Added toggle to table thresholds to allow users to choose between setting threshold colors to text or background -1. [16418](https://github.com/influxdata/influxdb/pull/16418): Add Developer Documentation -1. [16260](https://github.com/influxdata/influxdb/pull/16260): Capture User-Agent header as query source for logging purposes -1. [16469](https://github.com/influxdata/influxdb/pull/16469): Add support for configurable max batch size in points write handler -1. [16509](https://github.com/influxdata/influxdb/pull/16509): Add support for applying an influx package via a public facing URL -1. [16511](https://github.com/influxdata/influxdb/pull/16511): Add jsonnet support for influx packages -1. [14782](https://github.com/influxdata/influxdb/pull/16336): Add view page for Check -1. [16537](https://github.com/influxdata/influxdb/pull/16537): Add update password for CLI - -### Bug Fixes - -1. [16225](https://github.com/influxdata/influxdb/pull/16225): Ensures env vars are applied consistently across cmd, and fixes issue where INFLUX\_ env var prefix was not set globally. -1. [16235](https://github.com/influxdata/influxdb/pull/16235): Removed default frontend sorting when flux queries specify sorting -1. [16238](https://github.com/influxdata/influxdb/pull/16238): Store canceled task runs in the correct bucket -1. [16237](https://github.com/influxdata/influxdb/pull/16237): Updated Sortby functionality for table frontend sorts to sort numbers correctly -1. [16249](https://github.com/influxdata/influxdb/pull/16249): Prevent potential infinite loop when finding tasks by organization. -1. [16255](https://github.com/influxdata/influxdb/pull/16255): Retain user input when parsing invalid JSON during import -1. [16268](https://github.com/influxdata/influxdb/pull/16268): Fixed test flakiness that stemmed from multiple flush/signins being called in the same test suite -1. [16346](https://github.com/influxdata/influxdb/pull/16346): Update pkger task export to only trim out option task and not all vars provided -1. [16374](https://github.com/influxdata/influxdb/pull/16374): Update influx CLI, only show "see help" message, instead of the whole usage. -1. [16380](https://github.com/influxdata/influxdb/pull/16380): Fix notification tag matching rules and enable tests to verify -1. [16376](https://github.com/influxdata/influxdb/pull/16376): Extend the y-axis when stacked graph is selected -1. [16404](https://github.com/influxdata/influxdb/pull/16404): Fixed query reset bug that was resetting query in script editor whenever dates were changed -1. [16430](https://github.com/influxdata/influxdb/pull/16430): Fixed table threshold bug that was defaulting set colors to the background. -1. [16435](https://github.com/influxdata/influxdb/pull/16435): Time labels are no longer squished to the left -1. [16427](https://github.com/influxdata/influxdb/pull/16427): Fixed underlying issue with disappearing queries made in Advanced Mode -1. [16439](https://github.com/influxdata/influxdb/pull/16439): Prevent negative zero and allow zero to have decimal places -1. [16376](https://github.com/influxdata/influxdb/pull/16413): Limit data loader bucket selection to non system buckets -1. [16458](https://github.com/influxdata/influxdb/pull/16458): Fix EOF error when manually running tasks from the Task Page. -1. [16491](https://github.com/influxdata/influxdb/pull/16491): Add missing env vals to influx cli usage and fixes precedence of flag/env var priority - -### UI Improvements - -1. [16444](https://github.com/influxdata/influxdb/pull/16444): Add honeybadger reporting to create checks - -## v2.0.0-alpha.21 [2019-12-13] - -### Features - -1. [15836](https://github.com/influxdata/influxdb/pull/16077): Add stacked line layer option to graphs -1. [16094](https://github.com/influxdata/influxdb/pull/16094): Annotate log messages with trace ID, if available -1. [16187](https://github.com/influxdata/influxdb/pull/16187): Bucket create to accept an org name flag -1. [16158](https://github.com/influxdata/influxdb/pull/16158): Add trace ID response header to query endpoint - -### Bug Fixes - -1. [15655](https://github.com/influxdata/influxdb/pull/15655): Allow table columns to be draggable in table settings -1. [15757](https://github.com/influxdata/influxdb/pull/15757): Light up the home page icon when active -1. [15797](https://github.com/influxdata/influxdb/pull/15797): Make numeric inputs first class citizens -1. [15853](https://github.com/influxdata/influxdb/pull/15853): Prompt users to make a dashboard when dashboards are empty -1. [15884](https://github.com/influxdata/influxdb/pull/15884): Remove name editing from query definition during threshold check creation -1. [15975](https://github.com/influxdata/influxdb/pull/15975): Wait until user stops dragging and releases marker before zooming in after threshold changes -1. [16057](https://github.com/influxdata/influxdb/pull/16057): Adds `properties` to each cell on GET /dashboards/{dashboardID} -1. [16101](https://github.com/influxdata/influxdb/pull/16101): Gracefully handle invalid user-supplied JSON -1. [16105](https://github.com/influxdata/influxdb/pull/16105): Fix crash when loading queries built using Query Builder -1. [16112](https://github.com/influxdata/influxdb/pull/16112): Create cell view properties on dashboard creation -1. [16144](https://github.com/influxdata/influxdb/pull/16144): Scrollbars are dapper and proper -1. [16172](https://github.com/influxdata/influxdb/pull/16172): Fixed table ui threshold colorization issue where setting thresholds would not change table UI -1. [16194](https://github.com/influxdata/influxdb/pull/16194): Fixed windowPeriod issue that stemmed from webpack rules -1. [16175](https://github.com/influxdata/influxdb/pull/16175): Added delete functionality to note cells so that they can be deleted -1. [16204](https://github.com/influxdata/influxdb/pull/16204): Fix failure to create labels when creating telegraf configs -1. [16207](https://github.com/influxdata/influxdb/pull/16207): Fix crash when editing a Telegraf config -1. [16201](https://github.com/influxdata/influxdb/pull/16201): Updated start/endtime functionality so that custom script timeranges overwrite dropdown selections -1. [16217](https://github.com/influxdata/influxdb/pull/16217): Fix 12-hour time format to use consistent formatting and number of time ticks - -### UI Improvements - -## v2.0.0-alpha.20 [2019-11-20] - -### Features - -1. [15805](https://github.com/influxdata/influxdb/pull/15924): Add tls insecure skip verify to influx CLI. -1. [15981](https://github.com/influxdata/influxdb/pull/15981): Extend influx cli user create to allow for organization ID and user passwords to be set on user. -1. [15983](https://github.com/influxdata/influxdb/pull/15983): Autopopulate organization ids in the code samples -1. [15749](https://github.com/influxdata/influxdb/pull/15749): Expose bundle analysis tools for frontend resources -1. [15674](https://github.com/influxdata/influxdb/pull/15674): Allow users to view just the output section of a telegraf config -1. [15923](https://github.com/influxdata/influxdb/pull/15923): Allow the users to see string data in the single stat graph type - -### Bug Fixes - -1. [15777](https://github.com/influxdata/influxdb/pull/15777): Fix long startup when running 'influx help' -1. [15713](https://github.com/influxdata/influxdb/pull/15713): Mock missing Flux dependencies when creating tasks -1. [15731](https://github.com/influxdata/influxdb/pull/15731): Ensure array cursor iterator stats accumulate all cursor stats -1. [15866](https://github.com/influxdata/influxdb/pull/15866): Do not show Members section in Cloud environments -1. [15801](https://github.com/influxdata/influxdb/pull/15801): Change how cloud mode is enabled -1. [15820](https://github.com/influxdata/influxdb/pull/15820): Merge frontend development environments -1. [15944](https://github.com/influxdata/influxdb/pull/15944): Refactor table state logic on the frontend -1. [15920](https://github.com/influxdata/influxdb/pull/15920): Arrows in tables now show data in ascending and descening order -1. [15728](https://github.com/influxdata/influxdb/pull/15728): Sort by retention rules now sorts by seconds -1. [15628](https://github.com/influxdata/influxdb/pull/15628): Horizontal scrollbar no longer covering data - -### UI Improvements - -1. [15809](https://github.com/influxdata/influxdb/pull/15809): Redesign cards and animations on getting started page -1. [15787](https://github.com/influxdata/influxdb/pull/15787): Allow the users to filter with labels in telegraph input search - -## v2.0.0-alpha.19 [2019-10-30] - -### Features - -1. [15313](https://github.com/influxdata/influxdb/pull/15313): Add shortcut for toggling comments in script editor -1. [15650](https://github.com/influxdata/influxdb/pull/15650): Expose last run status and last run error in task API - -### UI Improvements - -1. [15503](https://github.com/influxdata/influxdb/pull/15503): Redesign page headers to be more space efficient -1. [15426](https://github.com/influxdata/influxdb/pull/15426): Add 403 handler that redirects back to the sign-in page on oats-generated routes. -1. [15710](https://github.com/influxdata/influxdb/pull/15710): Add button to nginx and redis configuration sections to make interaction more clear - -### Bug Fixes - -1. [15295](https://github.com/influxdata/influxdb/pull/15295): Ensures users are created with an active status -1. [15306](https://github.com/influxdata/influxdb/pull/15306): Added missing string values for CacheStatus type -1. [15348](https://github.com/influxdata/influxdb/pull/15348): Disable saving for threshold check if no threshold selected -1. [15354](https://github.com/influxdata/influxdb/pull/15354): Query variable selector shows variable keys, not values -1. [15246](https://github.com/influxdata/influxdb/pull/15427): UI/Telegraf filter functionality shows results based on input name -1. [13940](https://github.com/influxdata/influxdb/pull/15443): Create Label Overlay UI will disable the submit button and return a UI error if the name field is empty -1. [15452](https://github.com/influxdata/influxdb/pull/15452): Log error as info message on unauthorized API call attempts -1. [15504](https://github.com/influxdata/influxdb/pull/15504): Ensure members&owners eps 404 when /org resource does not exist -1. [15510](https://github.com/influxdata/influxdb/pull/15510): UI/Telegraf sort functionality fixed -1. [15549](https://github.com/influxdata/influxdb/pull/15549): UI/Task edit functionality fixed -1. [15559](https://github.com/influxdata/influxdb/pull/15559): Exiting a configuration of a dashboard cell now properly renders the cell content -1. [15556](https://github.com/influxdata/influxdb/pull/15556): Creating a check now displays on the checklist -1. [15592](https://github.com/influxdata/influxdb/pull/15592): Changed task runs success status code from 200 to 201 to match Swagger documentation. -1. [15634](https://github.com/influxdata/influxdb/pull/15634): TextAreas have the correct height -1. [15647](https://github.com/influxdata/influxdb/pull/15647): Ensures labels are unique by organization in the kv store -1. [15695](https://github.com/influxdata/influxdb/pull/15695): Ensures variable names are unique by organization - -## v2.0.0-alpha.18 [2019-09-26] - -### Features - -1. [15151](https://github.com/influxdata/influxdb/pull/15151): Add jsonweb package for future JWT support -1. [15168](https://github.com/influxdata/influxdb/pull/15168): Added the JMeter Template dashboard -1. [15152](https://github.com/influxdata/influxdb/pull/15152): Add JWT support to http auth middleware - -### UI Improvements - -1. [15211](https://github.com/influxdata/influxdb/pull/15211): Display dashboards index as a grid -1. [15099](https://github.com/influxdata/influxdb/pull/15099): Add viewport scaling to html meta for responsive mobile scaling -1. [15056](https://github.com/influxdata/influxdb/pull/15056): Remove rename and delete functionality from system buckets -1. [15056](https://github.com/influxdata/influxdb/pull/15056): Prevent new buckets from being named with the reserved "\_" prefix -1. [15056](https://github.com/influxdata/influxdb/pull/15056): Prevent user from selecting system buckets when creating Scrapers, Telegraf configurations, read/write tokens, and when saving as a task -1. [15056](https://github.com/influxdata/influxdb/pull/15056): Limit values from draggable threshold handles to 2 decimal places -1. [15040](https://github.com/influxdata/influxdb/pull/15040): Redesign check builder UI to fill the screen and make more room for composing message templates -1. [14990](https://github.com/influxdata/influxdb/pull/14990): Move Tokens tab from Settings to Load Data page -1. [14990](https://github.com/influxdata/influxdb/pull/14990): Expose all Settings tabs in navigation menu -1. [15289](https://github.com/influxdata/influxdb/pull/15289): Added Stream and table functions to query builder - -### Bug Fixes - -1. [14931](https://github.com/influxdata/influxdb/pull/14931): Remove scrollbars blocking onboarding UI step. - -## v2.0.0-alpha.17 [2019-08-14] - -### Features - -1. [14809](https://github.com/influxdata/influxdb/pull/14809): Add task middleware's for checks and notifications -1. [14495](https://github.com/influxdata/influxdb/pull/14495): optional gzip compression of the query CSV response. -1. [14567](https://github.com/influxdata/influxdb/pull/14567): Add task types. -1. [14604](https://github.com/influxdata/influxdb/pull/14604): When getting task runs from the API, runs will be returned in order of most recently scheduled first. -1. [14631](https://github.com/influxdata/influxdb/pull/14631): Added Github and Apache templates -1. [14631](https://github.com/influxdata/influxdb/pull/14631): Updated name of Local Metrics template -1. [14631](https://github.com/influxdata/influxdb/pull/14631): Dashboards for all Telegraf config bundles now created -1. [14694](https://github.com/influxdata/influxdb/pull/14694): Add ability to find tasks by name. -1. [14901](https://github.com/influxdata/influxdb/pull/14901): Add ability to Peek() on reads package StreamReader types. - -### UI Improvements - -1. [14917](https://github.com/influxdata/influxdb/pull/14917): Make first steps in Monitoring & Alerting more obvious -1. [14889](https://github.com/influxdata/influxdb/pull/14889): Make adding data to buckets more discoverable -1. [14709](https://github.com/influxdata/influxdb/pull/14709): Move Buckets, Telgrafs, and Scrapers pages into a tab called "Load Data" for ease of discovery -1. [14846](https://github.com/influxdata/influxdb/pull/14846): Standardize formatting of "updated at" timestamp in all resource cards -1. [14887](https://github.com/influxdata/influxdb/pull/14887): Move no buckets warning in telegraf tab above the search box - -### Bug Fixes - -1. [14480](https://github.com/influxdata/influxdb/pull/14480): Fix authentication when updating a task with invalid org or bucket. -1. [14497](https://github.com/influxdata/influxdb/pull/14497): Update the documentation link for Telegraf. -1. [14492](https://github.com/influxdata/influxdb/pull/14492): Fix to surface errors properly as task notifications on create. -1. [14569](https://github.com/influxdata/influxdb/pull/14569): Fix limiting of get runs for task. -1. [14779](https://github.com/influxdata/influxdb/pull/14779): Refactor tasks coordinator. -1. [14846](https://github.com/influxdata/influxdb/pull/14846): Ensure onboarding "advanced" button goes to correct location - -## v2.0.0-alpha.16 [2019-07-25] - -### Bug Fixes - -1. [14385](https://github.com/influxdata/influxdb/pull/14385): Add link to Documentation text in line protocol upload overlay -1. [14344](https://github.com/influxdata/influxdb/pull/14344): Fix issue in Authorization API, can't create auth for another user. -1. [14352](https://github.com/influxdata/influxdb/pull/14352): Fix Influx CLI ignored user flag for auth creation. -1. [14379](https://github.com/influxdata/influxdb/pull/14379): Fix the map example in the documentation -1. [14423](https://github.com/influxdata/influxdb/pull/14423): Ignore null/empty Flux rows which prevents a single stat/gauge crash. -1. [14434](https://github.com/influxdata/influxdb/pull/14434): Fixes an issue where clicking on a dashboard name caused an incorrect redirect. -1. [14441](https://github.com/influxdata/influxdb/pull/14441): Upgrade templates lib to 0.5.0 -1. [14453](https://github.com/influxdata/influxdb/pull/14453): Upgrade giraffe lib to 0.16.1 -1. [14412](https://github.com/influxdata/influxdb/pull/14412): Fix incorrect notification type for manually running a Task -1. [14356](https://github.com/influxdata/influxdb/pull/14356): Fix an issue where canceled tasks did not resume. - -## v2.0.0-alpha.15 [2019-07-11] - -### Features - -1. [14256](https://github.com/influxdata/influxdb/pull/14256): Add time zone support to UI -2. [14243](https://github.com/influxdata/influxdb/pull/14243): Addded new storage inspection tool to verify tsm files -3. [14353](https://github.com/influxdata/influxdb/pull/14353): Require a token to be supplied for all task creation - -### Bug Fixes - -1. [14287](https://github.com/influxdata/influxdb/pull/14287): Fix incorrect reporting of task as successful when error occurs during result iteration -1. [14412](https://github.com/influxdata/influxdb/pull/14412): Fix incorrect notification type for manually running a Task - -### Known Issues - -1. [influxdata/flux#1492](https://github.com/influxdata/flux/issues/1492): Null support in Flux was introduced in Alhpa 14. Several null issues were fixed in this release, but one known issue remains - Users may hit a panic if the first record processed by a map function has a null value. - -## v2.0.0-alpha.14 [2019-06-28] - -### Features - -1. [14221](https://github.com/influxdata/influxdb/pull/14221): Add influxd inspect verify-wal tool -1. [14218](https://github.com/influxdata/influxdb/commit/4faf2a24def4f351aef5b3c0f2907c385f82fdb9): Move to Flux .34.2 - which includes new string functions and initial multi-datasource support with Sql.from() -1. [14164](https://github.com/influxdata/influxdb/pull/14164): Only click save once to save cell -1. [14188](https://github.com/influxdata/influxdb/pull/14188): Enable selecting more columns for line visualizations - -### UI Improvements - -1. [14194](https://github.com/influxdata/influxdb/pull/14194): Draw gauges correctly on HiDPI displays -1. [14194](https://github.com/influxdata/influxdb/pull/14194): Clamp gauge position to gauge domain -1. [14168](https://github.com/influxdata/influxdb/pull/14168): Improve display of error messages -1. [14157](https://github.com/influxdata/influxdb/pull/14157): Remove rendering bottleneck when streaming Flux responses -1. [14165](https://github.com/influxdata/influxdb/pull/14165): Prevent variable dropdown from clipping - -## v2.0.0-alpha.13 [2019-06-13] - -### Features - -1. [14130](https://github.com/influxdata/influxdb/pull/14130): Add static templates for system, docker, redis, kubernetes -1. [14189](https://github.com/influxdata/influxdb/pull/14189): Add option to select a token when creating a task -1. [14200](https://github.com/influxdata/influxdb/pull/14200): Add the ability to update a token when updating a task - -## v2.0.0-alpha.12 [2019-06-13] - -### Features - -1. [14059](https://github.com/influxdata/influxdb/pull/14059): Enable formatting line graph y ticks with binary prefix -1. [14052](https://github.com/influxdata/influxdb/pull/14052): Add x and y column pickers to graph types -1. [14128](https://github.com/influxdata/influxdb/pull/14128): Add option to shade area below line graphs - -### Bug Fixes - -1. [14085](https://github.com/influxdata/influxdb/pull/14085): Fix performance regression in graph tooltips - -### UI Improvements - -## v2.0.0-alpha.11 [2019-05-31] - -1. [14031](https://github.com/influxdata/influxdb/pull/14031): Correctly check if columnKeys include xColumn in heatmap - -## v2.0.0-alpha.10 [2019-05-30] - -### Features - -1. [13945](https://github.com/influxdata/influxdb/pull/13945): Add heatmap visualization type -1. [13961](https://github.com/influxdata/influxdb/pull/13961): Add scatter graph visualization type -1. [13850](https://github.com/influxdata/influxdb/pull/13850): Add description field to Tasks -1. [13924](https://github.com/influxdata/influxdb/pull/13924): Add CLI arguments for configuring session length and renewal -1. [13961](https://github.com/influxdata/influxdb/pull/13961): Add smooth interpolation option to line graphs - -### Bug Fixes - -1. [13753](https://github.com/influxdata/influxdb/pull/13753): Removed hardcoded bucket for Getting Started with Flux dashboard -1. [13783](https://github.com/influxdata/influxdb/pull/13783): Ensure map type variables allow for selecting values -1. [13800](https://github.com/influxdata/influxdb/pull/13800): Generate more idiomatic Flux in query builder -1. [13797](https://github.com/influxdata/influxdb/pull/13797): Expand tab key presses to 2 spaces in the Flux editor -1. [13823](https://github.com/influxdata/influxdb/pull/13823): Prevent dragging of Variable Dropdowns when dragging a scrollbar inside the dropdown -1. [13853](https://github.com/influxdata/influxdb/pull/13853): Improve single stat computation -1. [13945](https://github.com/influxdata/influxdb/pull/13945): Fix crash when opening histogram settings with no data - -### UI Improvements - -1. [#13835](https://github.com/influxdata/influxdb/pull/13835): Render checkboxes in query builder tag selection lists -1. [#13856](https://github.com/influxdata/influxdb/pull/13856): Fix jumbled card text in Telegraf configuration wizard -1. [#13888](https://github.com/influxdata/influxdb/pull/13888): Change scrapers in scrapers list to be resource cards -1. [#13925](https://github.com/influxdata/influxdb/pull/13925): Export and download resource with formatted resource name with no spaces - -## v2.0.0-alpha.9 [2019-05-01] - -**NOTE: This will remove all tasks from your InfluxDB v2.0 instance.** - -### Features - -1. [13423](https://github.com/influxdata/influxdb/pull/13423): Set autorefresh of dashboard to pause if absolute time range is selected -1. [13473](https://github.com/influxdata/influxdb/pull/13473): Switch task back end to a more modular and flexible system -1. [13493](https://github.com/influxdata/influxdb/pull/13493): Add org profile tab with ability to edit organization name -1. [13510](https://github.com/influxdata/influxdb/pull/13510): Add org name to dahboard page title -1. [13520](https://github.com/influxdata/influxdb/pull/13520): Add cautioning to bucket renaming -1. [13560](https://github.com/influxdata/influxdb/pull/13560): Add option to generate all access token in tokens tab -1. [13601](https://github.com/influxdata/influxdb/pull/13601): Add option to generate read/write token in tokens tab -1. [13715](https://github.com/influxdata/influxdb/pull/13715): Added a new Local Metrics Dashboard template that is created during Quick Start - -### Bug Fixes - -1. [13584](https://github.com/influxdata/influxdb/pull/13584): Fixed scroll clipping found in label editing flow -1. [13585](https://github.com/influxdata/influxdb/pull/13585): Prevent overlapping text and dot in time range dropdown -1. [13602](https://github.com/influxdata/influxdb/pull/13602): Updated link in notes cell to a more useful site -1. [13618](https://github.com/influxdata/influxdb/pull/13618): Show error message when adding line protocol -1. [13657](https://github.com/influxdata/influxdb/pull/13657): Update UI Flux function documentation -1. [13718](https://github.com/influxdata/influxdb/pull/13718): Updated System template to support math with floats -1. [13732](https://github.com/influxdata/influxdb/pull/13732): Fixed the window function documentation -1. [13738](https://github.com/influxdata/influxdb/pull/13738): Fixed typo in the `range` Flux function example -1. [13742](https://github.com/influxdata/influxdb/pull/13742): Updated the `systemTime` function to use `system.time` - -### UI Improvements - -1. [13424](https://github.com/influxdata/influxdb/pull/13424): Add general polish and empty states to Create Dashboard from Template overlay - -## v2.0.0-alpha.8 [2019-04-12] - -### Features - -1. [13024](https://github.com/influxdata/influxdb/pull/13024): Add the ability to edit token's description -1. [13078](https://github.com/influxdata/influxdb/pull/13078): Add the option to create a Dashboard from a Template. -1. [13161](https://github.com/influxdata/influxdb/pull/13161): Add the ability to add labels on variables -1. [13171](https://github.com/influxdata/influxdb/pull/13171): Add switch organizations dropdown to home navigation menu item. -1. [13173](https://github.com/influxdata/influxdb/pull/13173): Add create org to side nav -1. [13345](https://github.com/influxdata/influxdb/pull/13345): Added a new Getting Started with Flux Template - -### Bug Fixes - -1. [13284](https://github.com/influxdata/influxdb/pull/13284): Update shift to timeShift in the flux functions side bar - -### UI Improvements - -1. [13287](https://github.com/influxdata/influxdb/pull/13287): Update cursor to grab when hovering draggable areas -1. [13311](https://github.com/influxdata/influxdb/pull/13311): Sync note editor text and preview scrolling -1. [13249](https://github.com/influxdata/influxdb/pull/13249): Add the ability to create a bucket when creating an organization - -## v2.0.0-alpha.7 [2019-03-28] - -### Features - -1. [12663](https://github.com/influxdata/influxdb/pull/12663): Insert flux function near cursor in flux editor -1. [12678](https://github.com/influxdata/influxdb/pull/12678): Enable the use of variables in the Data Explorer and Cell Editor Overlay -1. [12655](https://github.com/influxdata/influxdb/pull/12655): Add a variable control bar to dashboards to select values for variables. -1. [12706](https://github.com/influxdata/influxdb/pull/12706): Add ability to add variable to script from the side menu. -1. [12791](https://github.com/influxdata/influxdb/pull/12791): Use time range for metaqueries in Data Explorer and Cell Editor Overlay -1. [12827](https://github.com/influxdata/influxdb/pull/12827): Fix screen tearing bug in Raw Data View -1. [12843](https://github.com/influxdata/influxdb/pull/12843): Add copy to clipboard button to export overlays -1. [12826](https://github.com/influxdata/influxdb/pull/12826): Enable copying error messages to the clipboard from dashboard cells -1. [12876](https://github.com/influxdata/influxdb/pull/12876): Add the ability to update token's status in Token list -1. [12821](https://github.com/influxdata/influxdb/pull/12821): Allow variables to be re-ordered within control bar on a dashboard. -1. [12888](https://github.com/influxdata/influxdb/pull/12888): Add the ability to delete a template -1. [12901](https://github.com/influxdata/influxdb/pull/12901): Save user preference for variable control bar visibility and default to visible -1. [12910](https://github.com/influxdata/influxdb/pull/12910): Add the ability to clone a template -1. [12958](https://github.com/influxdata/influxdb/pull/12958): Add the ability to import a variable - -### Bug Fixes - -1. [12684](https://github.com/influxdata/influxdb/pull/12684): Fix mismatch in bucket row and header -1. [12703](https://github.com/influxdata/influxdb/pull/12703): Allows user to edit note on cell -1. [12764](https://github.com/influxdata/influxdb/pull/12764): Fix empty state styles in scrapers in org view -1. [12790](https://github.com/influxdata/influxdb/pull/12790): Fix bucket creation error when changing rentention rules types. -1. [12793](https://github.com/influxdata/influxdb/pull/12793): Fix task creation error when switching schedule types. -1. [12805](https://github.com/influxdata/influxdb/pull/12805): Fix hidden horizonal scrollbars in flux raw data view -1. [12827](https://github.com/influxdata/influxdb/pull/12827): Fix screen tearing bug in Raw Data View -1. [12961](https://github.com/influxdata/influxdb/pull/12961): Fix scroll clipping in graph legends & dropdown menus -1. [12959](https://github.com/influxdata/influxdb/pull/12959): Fix routing loop - -### UI Improvements - -1. [12782](https://github.com/influxdata/influxdb/pull/12782): Move bucket selection in the query builder to the first card in the list -1. [12850](https://github.com/influxdata/influxdb/pull/12850): Ensure editor is automatically focused in note editor -1. [12915](https://github.com/influxdata/influxdb/pull/12915): Add ability to edit a template's name. - -## v2.0.0-alpha.6 [2019-03-15] - -### Release Notes - -We have updated the way we do predefined dashboards to [include Templates](https://github.com/influxdata/influxdb/pull/12532) in this release which will cause existing Organizations to not have a System dashboard created when they build a new Telegraf configuration. In order to get this functionality, remove your existing data and start from scratch. - -**NOTE: This will remove all data from your InfluxDB v2.0 instance including timeseries data.** - -On most `linux` systems including `macOS`: - -```sh -$ rm -r ~/.influxdbv2 -``` - -Once completed, `v2.0.0-alpha.6` can be started. - -### Features - -1. [12496](https://github.com/influxdata/influxdb/pull/12496): Add ability to import a dashboard -1. [12524](https://github.com/influxdata/influxdb/pull/12524): Add ability to import a dashboard from org view -1. [12531](https://github.com/influxdata/influxdb/pull/12531): Add ability to export a dashboard and a task -1. [12615](https://github.com/influxdata/influxdb/pull/12615): Add `run` subcommand to influxd binary. This is also the default when no subcommand is specified. -1. [12523](https://github.com/influxdata/influxdb/pull/12523): Add ability to save a query as a variable from the Data Explorer. -1. [12532](https://github.com/influxdata/influxdb/pull/12532): Add System template on onboarding - -### Bug Fixes - -1. [12641](https://github.com/influxdata/influxdb/pull/12641): Stop scrollbars from covering text in flux editor - -### UI Improvements - -1. [12610](https://github.com/influxdata/influxdb/pull/12610): Fine tune keyboard interactions for managing labels from a resource card - -## v2.0.0-alpha.5 [2019-03-08] - -### Release Notes - -This release includes a [breaking change](https://github.com/influxdata/influxdb/pull/12391) to the format that TSM and index data are stored on disk. -Any existing local data will not be queryable once InfluxDB is upgraded to this release. -Prior to installing this release we recommend all storage-engine data is removed from your local InfluxDB `2.x` installation; this can be achieved without losing any of your other InfluxDB `2.x` data (settings etc). -To remove only local storage data, run the following in a terminal. - -On most `linux` systems: - -```sh - -# Replace with your actual username. - -$ rm -r /home//.influxdbv2/engine -``` - -On `macOS`: - -```sh -# Replace with your actual username. - -$ rm -r /Users//.influxdbv2/engine -``` - -Once completed, `v2.0.0-alpha.5` can be started. - -### Features - -1. [12096](https://github.com/influxdata/influxdb/pull/12096): Add labels to cloned tasks -1. [12111](https://github.com/influxdata/influxdb/pull/12111): Add ability to filter resources by clicking a label -1. [12401](https://github.com/influxdata/influxdb/pull/12401): Add ability to add a member to org -1. [12391](https://github.com/influxdata/influxdb/pull/12391): Improve representation of TSM tagsets on disk -1. [12437](https://github.com/influxdata/influxdb/pull/12437): Add ability to remove a member from org - -### Bug Fixes - -1. [12302](https://github.com/influxdata/influxdb/pull/12302): Prevent clipping of code snippets in Firefox -1. [12379](https://github.com/influxdata/influxdb/pull/12379): Prevent clipping of cell edit menus in dashboards - -### UI Improvements - -1. [12302](https://github.com/influxdata/influxdb/pull/12302): Make code snippet copy functionality easier to use -1. [12304](https://github.com/influxdata/influxdb/pull/12304): Always show live preview in Note Cell editor -1. [12317](https://github.com/influxdata/influxdb/pull/12317): Redesign Create Scraper workflow -1. [12317](https://github.com/influxdata/influxdb/pull/12317): Show warning in Telegrafs and Scrapers lists when user has no buckets -1. [12384](https://github.com/influxdata/influxdb/pull/12384): Streamline label addition, removal, and creation from the dashboards list -1. [12464](https://github.com/influxdata/influxdb/pull/12464): Improve label color selection - -## v2.0.0-alpha.4 [2019-02-21] - -### Features - -1. [11954](https://github.com/influxdata/influxdb/pull/11954): Add the ability to run a task manually from tasks page -1. [11990](https://github.com/influxdata/influxdb/pull/11990): Add the ability to select a custom time range in explorer and dashboard -1. [12009](https://github.com/influxdata/influxdb/pull/12009): Display the version information on the login page -1. [12011](https://github.com/influxdata/influxdb/pull/12011): Add the ability to update a Variable's name and query. -1. [12026](https://github.com/influxdata/influxdb/pull/12026): Add labels to cloned dashboard -1. [12018](https://github.com/influxdata/influxdb/pull/12057): Add ability filter resources by label name -1. [11973](https://github.com/influxdata/influxdb/pull/11973): Add ability to create or add labels to a resource from labels editor - -### Bug Fixes - -1. [11997](https://github.com/influxdata/influxdb/pull/11997): Update the bucket retention policy to update the time in seconds - -### UI Improvements - -1. [12016](https://github.com/influxdata/influxdb/pull/12016): Update the preview in the label overlays to be shorter -1. [12012](https://github.com/influxdata/influxdb/pull/12012): Add notifications to scrapers page for created/deleted/updated scrapers -1. [12023](https://github.com/influxdata/influxdb/pull/12023): Add notifications to buckets page for created/deleted/updated buckets -1. [12072](https://github.com/influxdata/influxdb/pull/12072): Update the admin page to display error for password length - -## v2.0.0-alpha.3 [2019-02-15] - -### Features - -1. [11809](https://github.com/influxdata/influxdb/pull/11809): Add the ability to name a scraper target -1. [11821](https://github.com/influxdata/influxdb/pull/11821): Display scraper name as the first and only updatable column in scrapers list -1. [11804](https://github.com/influxdata/influxdb/pull/11804): Add the ability to view runs for a task -1. [11824](https://github.com/influxdata/influxdb/pull/11824): Display last completed run for tasks list -1. [11836](https://github.com/influxdata/influxdb/pull/11836): Add the ability to view the logs for a specific task run - -### Bug Fixes - -1. [11819](https://github.com/influxdata/influxdb/pull/11819): Update the inline edit for resource names to guard for empty strings -1. [11852](https://github.com/influxdata/influxdb/pull/11852): Prevent a new template dashboard from being created on every telegraf config update -1. [11848](https://github.com/influxdata/influxdb/pull/11848): Fix overlapping buttons in the telegrafs verify data step - -### UI Improvements - -1. [11764](https://github.com/influxdata/influxdb/pull/11764): Move the download telegraf config button to view config overlay -1. [11879](https://github.com/influxdata/influxdb/pull/11879): Combine permissions for user by type -1. [11938](https://github.com/influxdata/influxdb/pull/11938): Add ordering to UI list items - -## v2.0.0-alpha.2 [2019-02-07] - -### Features - -1. [11677](https://github.com/influxdata/influxdb/pull/11677): Add instructions button to view `$INFLUX_TOKEN` setup for telegraf configs -1. [11693](https://github.com/influxdata/influxdb/pull/11693): Save the \$INFLUX_TOKEN environmental variable in telegraf configs -1. [11700](https://github.com/influxdata/influxdb/pull/11700): Update Tasks tab on Org page to look like Tasks Page -1. [11740](https://github.com/influxdata/influxdb/pull/11740): Add view button to view the telegraf config toml -1. [11522](https://github.com/influxdata/influxdb/pull/11522): Add plugin information step to allow for config naming and configure one plugin at a time -1. [11758](https://github.com/influxdata/influxdb/pull/11758): Update Dashboards tab on Org page to look like Dashboards Page -1. [11810](https://github.com/influxdata/influxdb/pull/11810): Add tab for template variables under organizations page - -## Bug Fixes - -1. [11678](https://github.com/influxdata/influxdb/pull/11678): Update the System Telegraf Plugin bundle to include the swap plugin -1. [11722](https://github.com/influxdata/influxdb/pull/11722): Revert behavior allowing users to create authorizations on behalf of another user - -### UI Improvements - -1. [11683](https://github.com/influxdata/influxdb/pull/11683): Change the wording for the plugin config form button to Done -1. [11689](https://github.com/influxdata/influxdb/pull/11689): Change the wording for the Collectors configure step button to Create and Verify -1. [11697](https://github.com/influxdata/influxdb/pull/11697): Standardize page loading spinner styles -1. [11711](https://github.com/influxdata/influxdb/pull/11711): Show checkbox on Save As button in data explorer -1. [11705](https://github.com/influxdata/influxdb/pull/11705): Make collectors plugins side bar visible in only the configure step -1. [11745](https://github.com/influxdata/influxdb/pull/11745): Swap retention policies on Create bucket page - -## v2.0.0-alpha.1 [2019-01-23] - -### Release Notes - -This is the initial alpha release of InfluxDB 2.0. diff --git a/scripts/ci/build-tests.sh b/scripts/ci/build-tests.sh deleted file mode 100755 index e983a10d480..00000000000 --- a/scripts/ci/build-tests.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env bash -set -exo pipefail - -function build_linux () { - local tags=osusergo,netgo,sqlite_foreign_keys,sqlite_json,static_build - local cc - case $(go env GOARCH) in - amd64) - cc=$(xcc linux x86_64) - ;; - arm64) - cc=$(xcc linux aarch64) - tags="$tags,noasm" - ;; - *) - >&2 echo Error: Unknown arch $(go env GOARCH) - exit 1 - ;; - esac - - local -r extld="-fno-PIC -static -Wl,-z,stack-size=8388608" - CGO_ENABLED=1 PKG_CONFIG=$(which pkg-config) CC="${cc}" go-test-compile \ - -tags "$tags" -o "${1}/" -ldflags "-extldflags '$extld'" ./... -} - -function build_mac () { - CGO_ENABLED=1 PKG_CONFIG=$(which pkg-config) CC="$(xcc darwin)" go-test-compile \ - -tags sqlite_foreign_keys,sqlite_json -o "${1}/" ./... -} - -function build_windows () { - CGO_ENABLED=1 PKG_CONFIG=$(which pkg-config) CC="$(xcc windows)" go-test-compile \ - -tags sqlite_foreign_keys,sqlite_json,timetzdata -o "${1}/" ./... -} - -function build_test_tools () { - # Copy pre-built gotestsum out of the cross-builder. - local ext="" - if [ "$(go env GOOS)" = windows ]; then - ext=".exe" - fi - cp "/usr/local/bin/gotestsum_$(go env GOOS)_$(go env GOARCH)${ext}" "$1/gotestsum${ext}" - - # Build test2json from the installed Go distribution. - CGO_ENABLED=0 go build -o "${1}/" -ldflags="-s -w" cmd/test2json -} - -function write_test_metadata () { - # Write version that should be reported in test results. - echo "$(go env GOVERSION) $(go env GOOS)/$(go env GOARCH)" > "${1}/go.version" - - # Write list of all packages. - go list ./... > "${1}/tests.list" -} - -function main () { - if [[ $# != 1 ]]; then - >&2 echo Usage: $0 '' - exit 1 - fi - local -r out_dir="$1" - - mkdir -p "$out_dir" - case $(go env GOOS) in - linux) - build_linux "$out_dir" - ;; - darwin) - build_mac "$out_dir" - ;; - windows) - build_windows "$out_dir" - ;; - *) - >&2 echo Error: unknown OS $(go env GOOS) - exit 1 - ;; - esac - - # Build gotestsum and test2json so downstream jobs can use it without needing `go`. - build_test_tools "$out_dir" - # Write other metadata needed for testing. - write_test_metadata "$out_dir" -} - -main ${@} diff --git a/scripts/ci/build.sh b/scripts/ci/build.sh deleted file mode 100755 index 48b84fdcb08..00000000000 --- a/scripts/ci/build.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env bash -set -exo pipefail - -declare -r LINUX_EXTLD="-fno-PIC -static-pie -Wl,-z,stack-size=8388608" - -function main () { - if [[ $# != 3 ]]; then - >&2 echo Usage: $0 '' '' '' - exit 1 - fi - local -r out_dir=$1 build_type=$2 pkg=$3 - local -r version="$(build-version.sh "$build_type")" - - mkdir -p "$out_dir" - - local -r commit=$(git rev-parse --short HEAD) - local -r build_date=$(date -u +'%Y-%m-%dT%H:%M:%SZ') - - # NOTE: This code is purposefully repetitive, to enable easier copy-paste of individual build commands. - local -r os_arch="$(go env GOOS)_$(go env GOARCH)" - case "$os_arch" in - linux_amd64) - export CC="$(xcc linux x86_64)" - CGO_ENABLED=1 PKG_CONFIG=$(which pkg-config) go build \ - -tags assets,sqlite_foreign_keys,sqlite_json,static_build,noasm \ - -buildmode=pie \ - -ldflags "-s -w -X main.version=${version} -X main.commit=${commit} -X main.date=${build_date} -linkmode=external -extld=${CC} -extldflags '${LINUX_EXTLD}'" \ - -o "$out_dir/" \ - "$pkg" - ;; - linux_arm64) - export CC="$(xcc linux aarch64)" - CGO_ENABLED=1 PKG_CONFIG=$(which pkg-config) go build \ - -tags assets,sqlite_foreign_keys,sqlite_json,static_build,noasm \ - -buildmode=pie \ - -ldflags "-s -w -X main.version=${version} -X main.commit=${commit} -X main.date=${build_date} -linkmode=external -extld=${CC} -extldflags '${LINUX_EXTLD}'" \ - -o "$out_dir/" \ - "$pkg" - ;; - darwin_amd64) - export CC="$(xcc darwin)" - CGO_ENABLED=1 PKG_CONFIG=$(which pkg-config) go build \ - -tags assets,sqlite_foreign_keys,sqlite_json \ - -buildmode pie \ - -ldflags "-s -w -X main.version=${version} -X main.commit=${commit} -X main.date=${build_date}" \ - -o "$out_dir/" \ - "$pkg" - ;; - windows_amd64) - export CC="$(xcc windows)" - CGO_ENABLED=1 PKG_CONFIG=$(which pkg-config) go build \ - -tags assets,sqlite_foreign_keys,sqlite_json,timetzdata \ - -buildmode exe \ - -ldflags "-s -w -X main.version=${version} -X main.commit=${commit} -X main.date=${build_date}" \ - -o "$out_dir/" \ - "$pkg" - ;; - *) - >&2 echo Error: unsupported OS_ARCH pair "'$os_arch'" - exit 1 - ;; - esac -} - -main ${@} diff --git a/scripts/ci/check-system-go-matches-go-mod.sh b/scripts/ci/check-system-go-matches-go-mod.sh deleted file mode 100755 index 53ef90ae980..00000000000 --- a/scripts/ci/check-system-go-matches-go-mod.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash -set -eo pipefail - -# Check that the same major/minor version of go is used in the mod file as on CI to prevent -# the two from accidentally getting out-of-sync. -function main () { - local -r version_diff=$(go mod edit -go=$(go version | sed -n 's/^.*go\([0-9]*.[0-9]*\).*$/\1/p') -print | diff - go.mod) - if [ -n "$version_diff" ]; then - >&2 echo Error: unexpected difference in go version: - >&2 echo "$version_diff" - exit 1 - fi -} - -main diff --git a/scripts/ci/install-aws-cli.sh b/scripts/ci/install-aws-cli.sh deleted file mode 100755 index 8d2d7ea7b0c..00000000000 --- a/scripts/ci/install-aws-cli.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -ex - - -cat << EOF > aws_pub_key ------BEGIN PGP PUBLIC KEY BLOCK----- - -mQINBF2Cr7UBEADJZHcgusOJl7ENSyumXh85z0TRV0xJorM2B/JL0kHOyigQluUG -ZMLhENaG0bYatdrKP+3H91lvK050pXwnO/R7fB/FSTouki4ciIx5OuLlnJZIxSzx -PqGl0mkxImLNbGWoi6Lto0LYxqHN2iQtzlwTVmq9733zd3XfcXrZ3+LblHAgEt5G -TfNxEKJ8soPLyWmwDH6HWCnjZ/aIQRBTIQ05uVeEoYxSh6wOai7ss/KveoSNBbYz -gbdzoqI2Y8cgH2nbfgp3DSasaLZEdCSsIsK1u05CinE7k2qZ7KgKAUIcT/cR/grk -C6VwsnDU0OUCideXcQ8WeHutqvgZH1JgKDbznoIzeQHJD238GEu+eKhRHcz8/jeG -94zkcgJOz3KbZGYMiTh277Fvj9zzvZsbMBCedV1BTg3TqgvdX4bdkhf5cH+7NtWO -lrFj6UwAsGukBTAOxC0l/dnSmZhJ7Z1KmEWilro/gOrjtOxqRQutlIqG22TaqoPG -fYVN+en3Zwbt97kcgZDwqbuykNt64oZWc4XKCa3mprEGC3IbJTBFqglXmZ7l9ywG -EEUJYOlb2XrSuPWml39beWdKM8kzr1OjnlOm6+lpTRCBfo0wa9F8YZRhHPAkwKkX -XDeOGpWRj4ohOx0d2GWkyV5xyN14p2tQOCdOODmz80yUTgRpPVQUtOEhXQARAQAB -tCFBV1MgQ0xJIFRlYW0gPGF3cy1jbGlAYW1hem9uLmNvbT6JAlQEEwEIAD4WIQT7 -Xbd/1cEYuAURraimMQrMRnJHXAUCXYKvtQIbAwUJB4TOAAULCQgHAgYVCgkICwIE -FgIDAQIeAQIXgAAKCRCmMQrMRnJHXJIXEAChLUIkg80uPUkGjE3jejvQSA1aWuAM -yzy6fdpdlRUz6M6nmsUhOExjVIvibEJpzK5mhuSZ4lb0vJ2ZUPgCv4zs2nBd7BGJ -MxKiWgBReGvTdqZ0SzyYH4PYCJSE732x/Fw9hfnh1dMTXNcrQXzwOmmFNNegG0Ox -au+VnpcR5Kz3smiTrIwZbRudo1ijhCYPQ7t5CMp9kjC6bObvy1hSIg2xNbMAN/Do -ikebAl36uA6Y/Uczjj3GxZW4ZWeFirMidKbtqvUz2y0UFszobjiBSqZZHCreC34B -hw9bFNpuWC/0SrXgohdsc6vK50pDGdV5kM2qo9tMQ/izsAwTh/d/GzZv8H4lV9eO -tEis+EpR497PaxKKh9tJf0N6Q1YLRHof5xePZtOIlS3gfvsH5hXA3HJ9yIxb8T0H -QYmVr3aIUes20i6meI3fuV36VFupwfrTKaL7VXnsrK2fq5cRvyJLNzXucg0WAjPF -RrAGLzY7nP1xeg1a0aeP+pdsqjqlPJom8OCWc1+6DWbg0jsC74WoesAqgBItODMB -rsal1y/q+bPzpsnWjzHV8+1/EtZmSc8ZUGSJOPkfC7hObnfkl18h+1QtKTjZme4d -H17gsBJr+opwJw/Zio2LMjQBOqlm3K1A4zFTh7wBC7He6KPQea1p2XAMgtvATtNe -YLZATHZKTJyiqA== -=vYOk ------END PGP PUBLIC KEY BLOCK----- -EOF - -gpg --import aws_pub_key - -curl -o awscliv2.sig https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip.sig -curl -o awscliv2.zip https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip -gpg --verify awscliv2.sig awscliv2.zip - -unzip awscliv2.zip -sudo ./aws/install diff --git a/scripts/ci/lint/flags.bash b/scripts/ci/lint/flags.bash deleted file mode 100755 index f96ef88389c..00000000000 --- a/scripts/ci/lint/flags.bash +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -set -e - -# This script regenerates the flag list and checks for differences to ensure flags -# have been regenerated in case of changes to flags.yml. - -make flags - -if ! git --no-pager diff --exit-code -- ./kit/feature/list.go -then - echo "Differences detected! Run 'make flags' to regenerate feature flag list." - exit 1 -fi diff --git a/scripts/ci/main.tf b/scripts/ci/main.tf deleted file mode 100644 index 07081964894..00000000000 --- a/scripts/ci/main.tf +++ /dev/null @@ -1,152 +0,0 @@ -terraform { - required_providers { - aws = { - source = "hashicorp/aws" - version = "~> 2.70" - } - } -} - -#################### -# Declare variables - -# Variables without default values -# these variables need to be changed - see terraform.tfvars -variable "key_name" { } -variable "package_path" { } -variable "instance_name" { } - -# Variables with default values -variable "additional_files_dir" { - type = string - default = "" -} - -variable "instance_type" { - type = string - default = "t3.micro" -} - -variable "region" { - type = string - default = "us-west-2" -} - -#################### -# Declare data -locals { - additional_files_dest = "/home/ubuntu/files" - package_path = "/tmp/workspace/packages" - ubuntu_home = "/home/ubuntu" - ubuntu_user = "ubuntu" -} - -data "aws_ami" "ubuntu" { - most_recent = true - - filter { - name = "name" - values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"] - } - - filter { - name = "virtualization-type" - values = ["hvm"] - } - - owners = ["099720109477"] # Canonical -} - -#################### -# Declare resources -provider "aws" { - profile = "default" - region = var.region -} - -# The security group defines access restrictions -resource "aws_security_group" "influxdb_test_sg" { - ingress { - description = "Allow ssh connection" - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - description = "Allow all egress" - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } -} - -# The data node for the cluster -resource "aws_instance" "test_node" { - count = 1 - - ami = data.aws_ami.ubuntu.id - instance_type = var.instance_type - key_name = var.key_name - vpc_security_group_ids = [aws_security_group.influxdb_test_sg.id] - - tags = { - Name = var.instance_name - } - - provisioner "file" { - source = var.package_path - destination = "${local.ubuntu_home}/influxdb.deb" - - connection { - type = "ssh" - user = local.ubuntu_user - host = self.public_dns - agent = true - } - } - - provisioner "remote-exec" { - inline = [ - "mkdir -p ${local.additional_files_dest}", - ] - - connection { - type = "ssh" - user = local.ubuntu_user - host = self.public_dns - agent = true - } - } - - provisioner "file" { - source = var.additional_files_dir - destination = "${local.additional_files_dest}" - - connection { - type = "ssh" - user = local.ubuntu_user - host = self.public_dns - agent = true - } - } - - provisioner "remote-exec" { - inline = [ - "chmod +x ${local.additional_files_dest}/*.sh", - ] - - connection { - type = "ssh" - user = local.ubuntu_user - host = self.public_dns - agent = true - } - } -} - -#################### -# Declare outputs -output "test_node_ssh" { value = aws_instance.test_node.0.public_dns } diff --git a/scripts/ci/perf-tests/iot.yaml b/scripts/ci/perf-tests/iot.yaml deleted file mode 100644 index a20e8d63384..00000000000 --- a/scripts/ci/perf-tests/iot.yaml +++ /dev/null @@ -1,94 +0,0 @@ ---- -name: iot -start_time: "2018-01-01T00:00:00Z" -end_time: "2018-01-01T12:00:00Z" -data: - type: bulk_data_file_loader -query_tests: - # influxql versions (http means influxql) - - {"type": "build_query_file", "format": "http", "use_case": "window-agg", "query_type": "min"} - - {"type": "build_query_file", "format": "http", "use_case": "window-agg", "query_type": "mean"} - - {"type": "build_query_file", "format": "http", "use_case": "window-agg", "query_type": "max"} - - {"type": "build_query_file", "format": "http", "use_case": "window-agg", "query_type": "first"} - - {"type": "build_query_file", "format": "http", "use_case": "window-agg", "query_type": "last"} - - {"type": "build_query_file", "format": "http", "use_case": "window-agg", "query_type": "count"} - - {"type": "build_query_file", "format": "http", "use_case": "window-agg", "query_type": "sum"} - - {"type": "build_query_file", "format": "http", "use_case": "group-agg", "query_type": "min"} - - {"type": "build_query_file", "format": "http", "use_case": "group-agg", "query_type": "mean"} - - {"type": "build_query_file", "format": "http", "use_case": "group-agg", "query_type": "max"} - - {"type": "build_query_file", "format": "http", "use_case": "group-agg", "query_type": "first"} - - {"type": "build_query_file", "format": "http", "use_case": "group-agg", "query_type": "last"} - - {"type": "build_query_file", "format": "http", "use_case": "group-agg", "query_type": "count"} - - {"type": "build_query_file", "format": "http", "use_case": "group-agg", "query_type": "sum"} - - {"type": "build_query_file", "format": "http", "use_case": "bare-agg", "query_type": "min"} - - {"type": "build_query_file", "format": "http", "use_case": "bare-agg", "query_type": "mean"} - - {"type": "build_query_file", "format": "http", "use_case": "bare-agg", "query_type": "max"} - - {"type": "build_query_file", "format": "http", "use_case": "bare-agg", "query_type": "first"} - - {"type": "build_query_file", "format": "http", "use_case": "bare-agg", "query_type": "last"} - - {"type": "build_query_file", "format": "http", "use_case": "bare-agg", "query_type": "count"} - - {"type": "build_query_file", "format": "http", "use_case": "bare-agg", "query_type": "sum"} - - {"type": "build_query_file", "format": "http", "use_case": "ungrouped-agg", "query_type": "min"} - - {"type": "build_query_file", "format": "http", "use_case": "ungrouped-agg", "query_type": "mean"} - - {"type": "build_query_file", "format": "http", "use_case": "ungrouped-agg", "query_type": "max"} - - {"type": "build_query_file", "format": "http", "use_case": "ungrouped-agg", "query_type": "first"} - - {"type": "build_query_file", "format": "http", "use_case": "ungrouped-agg", "query_type": "last"} - - {"type": "build_query_file", "format": "http", "use_case": "ungrouped-agg", "query_type": "count"} - - {"type": "build_query_file", "format": "http", "use_case": "ungrouped-agg", "query_type": "sum"} - - - {"type": "build_query_file", "format": "http", "use_case": "iot", "query_type": "fast-query-small-data"} - - {"type": "build_query_file", "format": "http", "use_case": "iot", "query_type": "standalone-filter"} - - {"type": "build_query_file", "format": "http", "use_case": "iot", "query_type": "aggregate-keep"} - - {"type": "build_query_file", "format": "http", "use_case": "iot", "query_type": "aggregate-drop"} - - {"type": "build_query_file", "format": "http", "use_case": "iot", "query_type": "sorted-pivot"} - - - {"type": "build_query_file", "format": "http", "use_case": "group-window-transpose-low-card", "query_type": "min"} - - {"type": "build_query_file", "format": "http", "use_case": "group-window-transpose-low-card", "query_type": "mean"} - - {"type": "build_query_file", "format": "http", "use_case": "group-window-transpose-low-card", "query_type": "max"} - - {"type": "build_query_file", "format": "http", "use_case": "group-window-transpose-low-card", "query_type": "first"} - - {"type": "build_query_file", "format": "http", "use_case": "group-window-transpose-low-card", "query_type": "last"} - - {"type": "build_query_file", "format": "http", "use_case": "group-window-transpose-low-card", "query_type": "count"} - - {"type": "build_query_file", "format": "http", "use_case": "group-window-transpose-low-card", "query_type": "sum"} - - # flux versions - - {"type": "build_query_file", "format": "flux-http", "use_case": "window-agg", "query_type": "min"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "window-agg", "query_type": "mean"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "window-agg", "query_type": "max"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "window-agg", "query_type": "first"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "window-agg", "query_type": "last"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "window-agg", "query_type": "count"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "window-agg", "query_type": "sum"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-agg", "query_type": "min"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-agg", "query_type": "mean"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-agg", "query_type": "max"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-agg", "query_type": "first"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-agg", "query_type": "last"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-agg", "query_type": "count"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-agg", "query_type": "sum"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "bare-agg", "query_type": "min"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "bare-agg", "query_type": "mean"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "bare-agg", "query_type": "max"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "bare-agg", "query_type": "first"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "bare-agg", "query_type": "last"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "bare-agg", "query_type": "count"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "bare-agg", "query_type": "sum"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "ungrouped-agg", "query_type": "min"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "ungrouped-agg", "query_type": "mean"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "ungrouped-agg", "query_type": "max"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "ungrouped-agg", "query_type": "first"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "ungrouped-agg", "query_type": "last"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "ungrouped-agg", "query_type": "count"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "ungrouped-agg", "query_type": "sum"} - - - {"type": "build_query_file", "format": "flux-http", "use_case": "iot", "query_type": "fast-query-small-data"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "iot", "query_type": "standalone-filter"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "iot", "query_type": "aggregate-keep"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "iot", "query_type": "aggregate-drop"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "iot", "query_type": "sorted-pivot"} - - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-window-transpose-low-card", "query_type": "min"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-window-transpose-low-card", "query_type": "mean"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-window-transpose-low-card", "query_type": "max"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-window-transpose-low-card", "query_type": "first"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-window-transpose-low-card", "query_type": "last"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-window-transpose-low-card", "query_type": "count"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-window-transpose-low-card", "query_type": "sum"} diff --git a/scripts/ci/perf-tests/metaquery.yaml b/scripts/ci/perf-tests/metaquery.yaml deleted file mode 100644 index 527ecf91b21..00000000000 --- a/scripts/ci/perf-tests/metaquery.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- -name: metaquery -start_time: "2018-01-01T00:00:00Z" -end_time: "2019-01-01T00:00:00Z" -data: - type: bulk_data_file_loader -query_tests: - - {"type": "build_query_file", "format": "http", "use_case": "metaquery", "query_type": "field-keys"} - - {"type": "build_query_file", "format": "http", "use_case": "metaquery", "query_type": "tag-values"} - - {"type": "build_query_file", "format": "http", "use_case": "metaquery", "query_type": "cardinality"} - - - {"type": "build_query_file", "format": "http", "use_case": "group-window-transpose-high-card", "query_type": "min"} - - {"type": "build_query_file", "format": "http", "use_case": "group-window-transpose-high-card", "query_type": "mean"} - - {"type": "build_query_file", "format": "http", "use_case": "group-window-transpose-high-card", "query_type": "max"} - - {"type": "build_query_file", "format": "http", "use_case": "group-window-transpose-high-card", "query_type": "first"} - - {"type": "build_query_file", "format": "http", "use_case": "group-window-transpose-high-card", "query_type": "last"} - - {"type": "build_query_file", "format": "http", "use_case": "group-window-transpose-high-card", "query_type": "count"} - - {"type": "build_query_file", "format": "http", "use_case": "group-window-transpose-high-card", "query_type": "sum"} - - - {"type": "build_query_file", "format": "flux-http", "use_case": "metaquery", "query_type": "field-keys"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "metaquery", "query_type": "tag-values"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "metaquery", "query_type": "cardinality"} - - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-window-transpose-high-card", "query_type": "min"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-window-transpose-high-card", "query_type": "mean"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-window-transpose-high-card", "query_type": "max"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-window-transpose-high-card", "query_type": "first"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-window-transpose-high-card", "query_type": "last"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-window-transpose-high-card", "query_type": "count"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "group-window-transpose-high-card", "query_type": "sum"} diff --git a/scripts/ci/perf-tests/multi-measurement.yaml b/scripts/ci/perf-tests/multi-measurement.yaml deleted file mode 100644 index 4771ada5279..00000000000 --- a/scripts/ci/perf-tests/multi-measurement.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -name: multi-measurement -start_time: "2018-01-01T00:00:00Z" -end_time: "2019-01-01T00:00:00Z" -data: - type: bulk_data_file_loader -query_tests: - - {"type": "build_query_file", "format": "http", "use_case": "multi-measurement", "query_type": "multi-measurement-or"} - - {"type": "build_query_file", "format": "flux-http", "use_case": "multi-measurement", "query_type": "multi-measurement-or"} diff --git a/scripts/ci/perf_test.sh b/scripts/ci/perf_test.sh deleted file mode 100755 index 26a11a9ce05..00000000000 --- a/scripts/ci/perf_test.sh +++ /dev/null @@ -1,59 +0,0 @@ -set -ex -o pipefail - -source vars.sh - -# get latest ubuntu 21.10 ami for us-west-2 -ami_id=$(aws --region us-west-2 ssm get-parameters --names /aws/service/canonical/ubuntu/server/22.04/stable/current/amd64/hvm/ebs-gp2/ami-id --query 'Parameters[0].[Value]' --output text) - -# launch ec2 instance -datestring=$(date +%Y%m%d) -instance_info=$(aws --region us-west-2 ec2 run-instances \ - --image-id $ami_id \ - --instance-type $DATA_I_TYPE \ - --block-device-mappings DeviceName=/dev/sda1,Ebs={VolumeSize=200} \ - --key-name circleci-oss-test \ - --security-group-ids sg-03004366a38eccc97 \ - --subnet-id subnet-0c079d746f27ede5e \ - --tag-specifications "ResourceType=instance,Tags=[{Key=Name,Value=oss-perftest-$datestring-${CIRCLE_BRANCH}-${CIRCLE_SHA1}}]") - -# get instance info -ec2_instance_id=$(echo $instance_info | jq -r .Instances[].InstanceId) -echo "export EC2_INSTANCE_ID=$ec2_instance_id" >> vars.sh - -ec2_ip="" -while [ -z $ec2_ip ]; do - sleep 5 - ec2_ip=$(aws \ - --region us-west-2 \ - ec2 describe-instances \ - --instance-ids $ec2_instance_id \ - --query "Reservations[].Instances[].PublicIpAddress" \ - --output text) -done -echo "export EC2_IP=$ec2_ip" >> vars.sh - -# push binary and script to instance -debname=$(find /tmp/workspace/artifacts/influxdb2*amd64.deb) -base_debname=$(basename $debname) -source_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" - -# On the first try, add the host key to the list of known hosts -until ssh -o StrictHostKeyChecking=no ubuntu@$ec2_ip echo Connected ; do - echo Tried to ssh to ec2 instance, will retry - sleep 5 -done - -scp $debname ubuntu@$ec2_ip:/home/ubuntu/$base_debname -scp ${source_dir}/run_perftest.sh ubuntu@$ec2_ip:/home/ubuntu/run_perftest.sh -scp -r ${source_dir}/perf-tests ubuntu@$ec2_ip:/home/ubuntu/perf-tests - -echo "export TEST_COMMIT_TIME=$(git show -s --format=%ct)" >> vars.sh -echo "export CIRCLE_TEARDOWN=true" >> vars.sh -echo "export CIRCLE_TOKEN=${CIRCLE_API_CALLBACK_TOKEN}" >> vars.sh -echo "export CLOUD2_BUCKET=${CLOUD2_PERFTEST_BUCKET}" >> vars.sh -echo "export CLOUD2_ORG=${CLOUD2_PERFTEST_ORG}" >> vars.sh -echo "export DB_TOKEN=${CLOUD2_PERFTEST_TOKEN}" >> vars.sh -echo "export INFLUXDB_VERSION=${CIRCLE_BRANCH}" >> vars.sh -echo "export NGINX_HOST=localhost" >> vars.sh -echo "export TEST_COMMIT=${CIRCLE_SHA1}" >> vars.sh -scp vars.sh ubuntu@$ec2_ip:/home/ubuntu/vars.sh diff --git a/scripts/ci/run-monitor-ci-tests.bash b/scripts/ci/run-monitor-ci-tests.bash deleted file mode 100755 index 12963c42a80..00000000000 --- a/scripts/ci/run-monitor-ci-tests.bash +++ /dev/null @@ -1,325 +0,0 @@ -#!/bin/bash - -set -eu -o pipefail - -######################## -# --- Script Summary --- -# This script is the junction between the CIs of the public UI and influxdb OSS repos and the monitor-ci CI (private). -# When the public CI is started, this script kicks off the private CI and waits for it to complete. -# This script uses the CircleCI APIs to make this magic happen. -# -# If the private CI fails, this script will collect the names and artifacts of the failed jobs and report them. -# This script should support multiple workflows if more are added, although it has not been tested. -# This script waits 50 minutes for the private CI to complete otherwise it fails. -# -# **For Running from the UI Repository:** -# If you want to retry failed jobs in the private CI, simply retry this job from the public CI. -# - This script uses your commit SHA to search for a failed pipeline to retry before starting a new one. -# -# If you retry a failing job in the private CI and it passes, you can safely rerun this job in the public CI. -# - This script uses your commit SHA to search for a passing pipeline before starting a new one. -# - If you rerun the private CI and it passes, this script will find that pipeline and will not start a new one. -# - In this situation the script will exit quickly with success. -# -# Pipeline Workflow options: -# - RUN_WORKFLOW Env Var required to determine which workflow to run. -# - enum options: 'build_oss', 'build_oss_embedded' -# - e.g. RUN_WORKFLOW='build_oss_embedded' -# -# Required Env Vars for all workflows: -# - RUN_WORKFLOW: enum for which pipeline workflow -# - API_KEY: the CircleCI API access key -# - MONITOR_CI_BRANCH: the branch of the monitor-ci repo to start a pipeline with (usually 'master') -# -# **For OSS-specific testing:** -# Since the OSS private CI is very simple, retrying a failing job in the private CI is not supported. -# OSS-specific testing can include evaluating changes on the OSS master branch against the latest UI acceptance image -# to make sure OSS API changes don't break the UI, and evaluating changes to an OSS binary with embedded UI assets with -# a specified UI commit that the UI is from (like a tagged release commit). This allows for the master branches of -# both the UI and influxdb OSS respositories to always stay compatible, and for OSS release builds to be e2e tested -# without needing to duplicate the entire private test infrastructure provided in monitor-ci. -# -# Required Env Vars for Testing Changes to OSS Master with the Latest Image Published from UI Master: -# - RUN_WORKFLOW='build_oss' -# - OSS_SHA: the influxdb repo commit SHA we're running against -# -# Required Env Vars for Testing Changes to an OSS Image with Embedded UI with e2e tests from a Specific UI Commit: -# - RUN_WORKFLOW='build_oss_embedded' -# - UI_SHA: the UI repo commit SHA we want to build and run e2e tests from -# - UI_BRANCH: the UI branch where the commit exists -# - OSS_SHA: the influxdb repo commit SHA we're running against - -######################## - -# starts a new monitor-ci pipeline with provided parameters -startNewPipeline() { - pipelineStartMsg=$1 - reqData=$2 - - printf "\n${pipelineStartMsg}\n" - pipeline=$(curl -s --fail --request POST \ - --url https://circleci.com/api/v2/project/gh/influxdata/monitor-ci/pipeline \ - --header "Circle-Token: ${API_KEY}" \ - --header 'content-type: application/json' \ - --header 'Accept: application/json' \ - --data "${reqData}") - - if [ $? != 0 ]; then - echo "failed to start the monitor-ci pipeline, quitting" - exit 1 - fi - - # set variables to identify pipeline to watch - pipeline_id=$(echo ${pipeline} | jq -r '.id') - pipeline_number=$(echo ${pipeline} | jq -r '.number') - - printf "\nwaiting for monitor-ci pipeline to begin...\n" - sleep 1m - printf "\nmonitor-ci pipeline has begun. Running pipeline number ${pipeline_number} with id ${pipeline_id}\n" -} - -# retries all failed jobs from a previously failed monitor-ci pipeline -retryFailedPipeline() { - failed_pipeline_workflow_id=$1 - failed_pipeline_id=$2 - failed_pipeline_number=$3 - - pipeline=$(curl -s --fail --request POST \ - --url https://circleci.com/api/v2/workflow/${failed_pipeline_workflow_id}/rerun \ - --header "Circle-Token: ${API_KEY}" \ - --header 'content-type: application/json' \ - --header 'Accept: application/json' \ - --data "{ \"from_failed\": true }") - - if [ $? != 0 ]; then - echo "failed to re-run the monitor-ci pipeline, quitting" - exit 1 - fi - - # set variables to identify pipeline to watch - pipeline_id=$failed_pipeline_id - pipeline_number=$failed_pipeline_number - - printf "\nwaiting for monitor-ci pipeline to begin the re-run...\n" - sleep 1m - printf "\nmonitor-ci pipeline re-run has begun. Running pipeline number ${pipeline_number} with id ${pipeline_id}\n" -} - -# cancel if already have a passing pipeline for a given SHA -earlyTermination() { - local current_sha=$1 - local regex_line=$2 - local regex_exclusion=$3 - - all_pipelines=$(curl -s --request GET \ - --url "https://circleci.com/api/v2/project/gh/influxdata/monitor-ci/pipeline" \ - --header "Circle-Token: ${API_KEY}" \ - --header 'content-type: application/json' \ - --header 'Accept: application/json') - - # check the status of the workflows for each of these pipelines - all_pipelines_ids=( $(echo ${all_pipelines} | jq -r '.items | .[].id') ) - for pipeline_id in "${all_pipelines_ids[@]}"; do - - config=$(curl -s --request GET \ - --url "https://circleci.com/api/v2/pipeline/${pipeline_id}/config" \ - --header "Circle-Token: ${API_KEY}" \ - --header 'content-type: application/json' \ - --header 'Accept: application/json') - - # finds the SHA parameter used in this pipeline by hunting for a specific line - pipeline_sha=$(echo ${config} | jq '.compiled' | grep -o ${regex_line} | grep -v ${regex_exclusion} | head -1 | sed 's/=/\n/g' | tail -1 || true) - - if [[ "${current_sha}" == "${pipeline_sha}" ]]; then - # check if this pipeline's 'build' workflow is passing - workflows=$(curl -s --request GET \ - --url "https://circleci.com/api/v2/pipeline/${pipeline_id}/workflow" \ - --header "Circle-Token: ${API_KEY}" \ - --header 'content-type: application/json' \ - --header 'Accept: application/json') - - number_build_success_workflows=$(echo ${workflows} | jq '.items | map(select(.name == "build" and .status == "success")) | length') - if [ $number_build_success_workflows -gt 0 ]; then - # we've found a successful run - found_passing_pipeline=1 - break - fi - - number_build_failed_workflows=$(echo ${workflows} | jq '.items | map(select(.name == "build" and .status == "failed")) | length') - if [ $number_build_failed_workflows -gt 0 ]; then - # there's a failed run, let's retry it - found_failed_pipeline=1 - failed_pipeline_workflow_id=$(echo ${workflows} | jq -r '.items | .[0] | .id') - failed_pipeline_id=$pipeline_id - failed_pipeline_number=$(echo ${all_pipelines} | jq -r --arg pipeline_id "${pipeline_id}" '.items | map(select(.id == $pipeline_id)) | .[0] | .number') - break - fi - fi - done - - # terminate early if we found a passing pipeline for this SHA - if [ $found_passing_pipeline -eq 1 ]; then - printf "\nSUCCESS: Found a passing monitor-ci pipeline for this SHA, will not re-run these tests\n" - exit 0 - elif [ $found_failed_pipeline -eq 1 ]; then - printf "\nfound a failed monitor-ci pipeline for this SHA, will retry the failed jobs\n" - else - printf "\nno passing monitor-ci pipelines found for this SHA, starting a new one\n" - fi -} - -# make dir for artifacts -mkdir -p monitor-ci/test-artifacts/results/{build-oss-image,oss-e2e,build-image,cloud-e2e,cloud-lighthouse,smoke,build-prod-image,deploy}/{shared,oss,cloud} - -# get monitor-ci pipelines we've already run on this SHA -found_passing_pipeline=0 -found_failed_pipeline=0 - -if [[ -z "${API_KEY:-}" ]] || [[ -z "${MONITOR_CI_BRANCH:-}" ]]; then - printf "\nERROR: monitor-ci pipeline missing required env vars. Must set API_KEY and MONITOR_CI_BRANCH.\n" - exit 1 -fi - -if [[ "${RUN_WORKFLOW}" == "build_oss" ]]; then - required_workflows=( "build_oss" ) - if [[ -z "${OSS_SHA:-}" ]]; then - printf "\nERROR: monitor-ci pipeline missing required env vars. Must set OSS_SHA.\n" - exit 1 - fi -elif [[ "${RUN_WORKFLOW}" == "build_oss_embedded" ]]; then - required_workflows=( "build_oss_embedded" ) - if [[ -z "${UI_SHA:-}" ]] || [[ -z "${UI_BRANCH:-}" ]] || [[ -z "${OSS_SHA:-}" ]]; then - printf "\nERROR: monitor-ci pipeline missing required env vars. Must set UI_SHA, UI_BRANCH, and OSS_SHA.\n" - exit 1 - fi -else - printf "\nERROR: monitor-ci pipeline missing env var RUN_WORKFLOW.\nMust choose one of: 'build_oss', 'build_oss_embedded'\n" - exit 1 -fi - -pipelineStartMsg="starting monitor-ci pipeline targeting monitor-ci branch ${MONITOR_CI_BRANCH}, UI branch ${UI_BRANCH:-master} and using UI SHA ${UI_SHA:-latest}, using OSS SHA ${OSS_SHA:-latest}." - -reqData="{\"branch\":\"${MONITOR_CI_BRANCH}\", \"parameters\":{ \"run-workflow\":\"${RUN_WORKFLOW}\", \"ui-sha\":\"${UI_SHA:-not-a-real-sha}\", \"ui-branch\":\"${UI_BRANCH:-master}\", \"oss-sha\":\"${OSS_SHA:-}\"}}" - -# start a new pipeline if we didn't find an existing one to retry -if [ $found_failed_pipeline -eq 0 ]; then - startNewPipeline "${pipelineStartMsg}" "${reqData}" -else - retryFailedPipeline ${failed_pipeline_workflow_id} ${failed_pipeline_id} ${failed_pipeline_number} -fi - -# poll the status of the monitor-ci pipeline -is_failure=0 -attempts=0 -max_attempts=30 # minutes -while [ $attempts -le $max_attempts ]; -do - - workflows=$(curl -s --request GET \ - --url "https://circleci.com/api/v2/pipeline/${pipeline_id}/workflow" \ - --header "Circle-Token: ${API_KEY}" \ - --header 'content-type: application/json' \ - --header 'Accept: application/json') - - - number_running_workflows=$(echo ${workflows} | jq -r '.items | map(select(.status == "running" or .status == "failing")) | length') - - # when the pipeline has finished - if [ ${number_running_workflows} -eq 0 ]; then - # report failed jobs per required workflow - for required_workflow_name in "${required_workflows[@]}"; do - workflow_id=$(echo ${workflows} | jq -r --arg name "${required_workflow_name}" '.items | map(select(.name == $name and .status == "success")) | .[].id') - - if [ -n "${workflow_id}" ]; then - printf "\nSUCCESS: monitor-ci workflow with id ${workflow_id} passed: https://app.circleci.com/pipelines/github/influxdata/monitor-ci/${pipeline_number}/workflows/${workflow_id} \n" - else - # set job failure - is_failure=1 - - # get the workflow_id of this failed required workflow (if there are multiple, get the most recent one) - workflow_id=$(echo ${workflows} | jq -r --arg name "${required_workflow_name}" '.items |= sort_by(.created_at) | .items | map(select(.name == $name and .status == "failed")) | .[-1].id') - - # get the jobs that failed for this workflow - jobs=$(curl -s --request GET \ - --url "https://circleci.com/api/v2/workflow/${workflow_id}/job" \ - --header "Circle-Token: ${API_KEY}" \ - --header 'content-type: application/json' \ - --header 'Accept: application/json') - - # print the names of the failed jobs - printf "\nFailed jobs:\n" - failed_jobs=$(echo ${jobs} | jq '.items | map(select(.status == "failed"))') - failed_jobs_names=( $(echo ${failed_jobs} | jq -r '.[].name') ) - for name in "${failed_jobs_names[@]}"; do - printf " - ${name}\n" - done - - # get the artifacts for each failed job - printf "\nArtifacts from failed jobs:\n" - for name in "${failed_jobs_names[@]}"; do - printf "\n===== ${name} =====\n" - job_number=$(echo ${failed_jobs} | jq -r --arg name "${name}" 'map(select(.name == $name)) | .[].job_number') - artifacts=$(curl -s --request GET \ - --url "https://circleci.com/api/v1.1/project/github/influxdata/monitor-ci/${job_number}/artifacts" \ - --header "Circle-Token: ${API_KEY}" \ - --header 'content-type: application/json' \ - --header 'Accept: application/json') - - artifacts_length=$(echo ${artifacts} | jq -r 'length') - if [ ${artifacts_length} -eq 0 ]; then - printf "\n No artifacts for this failed job.\n" - else - artifacts_urls=( $(echo ${artifacts} | jq -r '.[].url') ) - # download each artifact - for url in "${artifacts_urls[@]}"; do - path=$(echo ${artifacts} | jq --arg url "${url}" 'map(select(.url == $url)) | .[].pretty_path') - - # download artifact - filename=$(basename "${path}") - filename="${filename::-1}" # removes extra " from end - # put shared artifacts in the shared folder - if [[ "${path}" == *"shared"* ]] ; then - subdirectory="shared" - else - if [[ "${path}" == *"cloud"* ]] ; then - subdirectory="cloud" - else - subdirectory="oss" - fi - fi - safeName="${name//\//-}" - if [[ "${safeName}" == *"remocal"* ]]; then - # put all remocal artifacts in the same parent directory - safeName="remocal/${safeName}" - fi - mkdir -p "monitor-ci/test-artifacts/results/${safeName}/${subdirectory}" - output="monitor-ci/test-artifacts/results/${safeName}/${subdirectory}/${filename}" - curl -L -s --request GET \ - --output "${output}" \ - --url "${url}" \ - --header "Circle-Token: ${API_KEY}" - done - printf "\n ${artifacts_length} artifacts successfully downloaded for this failed job.\n" - fi - done - - printf "\n\nFAILURE: monitor-ci workflow with id ${workflow_id} failed.\n" - printf "\n********************************************************\n" - printf "monitor-ci pipeline link: \nhttps://app.circleci.com/pipelines/github/influxdata/monitor-ci/${pipeline_number}/workflows/${workflow_id}\n" - printf "\n********************************************************\n" - fi - done - - exit $is_failure - fi - - # sleep 1 minute and poll the status again - attempts=$(($attempts+1)) - remaining_attempts=$(($max_attempts-$attempts)) - printf "\nmonitor-ci pipeline ${pipeline_number} isn't finished yet, waiting another minute... ($remaining_attempts minutes left)\n" - sleep 1m - -done - -printf "\nmonitor-ci pipeline did not finish in time, quitting\n" -exit 1 diff --git a/scripts/ci/run-prebuilt-tests.sh b/scripts/ci/run-prebuilt-tests.sh deleted file mode 100755 index a65df8e14bf..00000000000 --- a/scripts/ci/run-prebuilt-tests.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env bash -set -exo pipefail - -function test_package () { - local -r pkg="$1" bin_dir="$2" result_dir="$3" - - local -r test_file="${bin_dir}/${pkg}.test" - if [ ! -f "$test_file" ]; then - return - fi - - out_dir="${result_dir}/${pkg}" - mkdir -p "${out_dir}" - - # Run test files from within their original packages so any relative references - # to data files resolve properly. - local source_dir="${pkg##github.com/influxdata/influxdb/v2}" - source_dir="${source_dir##/}" - if [ -z "$source_dir" ]; then - source_dir="." - fi - ( - set +e - cd "$source_dir" - GOVERSION="$(cat ${bin_dir}/go.version)" "${bin_dir}/gotestsum" --junitfile "${out_dir}/report.xml" --raw-command -- \ - "${bin_dir}/test2json" -t -p "$pkg" "$test_file" -test.v - if [ $? != 0 ]; then - echo 1 > "${result_dir}/rc" - fi - ) -} - -function main () { - if [[ $# != 2 ]]; then - >&2 echo Usage: $0 '' '' - exit 1 - fi - local -r bin_dir="$1" result_dir="$2" - - mkdir -p "$result_dir" - - local -r test_packages="$(cat "${bin_dir}/tests.list" | circleci tests split --split-by=timings --timings-type=classname)" - - echo 0 > "${result_dir}/rc" - for pkg in ${test_packages[@]}; do - test_package "$pkg" "$bin_dir" "$result_dir" - done - - exit $(cat "${result_dir}/rc") -} - -main ${@} diff --git a/scripts/ci/run-race-tests.sh b/scripts/ci/run-race-tests.sh deleted file mode 100755 index 171ba5abc1b..00000000000 --- a/scripts/ci/run-race-tests.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash -set -exo pipefail - -function main () { - if [[ $# != 1 ]]; then - >&2 echo Usage: $0 '' - exit 1 - fi - if [[ $(go env GOOS) != linux || $(go env GOARCH) != amd64 ]]; then - >&2 echo Race tests only supported on linux/amd64 - exit 1 - fi - - local -r out_dir="$1" - mkdir -p "$out_dir" - - # Get list of packages to test on this node according to Circle's timings. - local -r test_packages="$(go list ./... | circleci tests split --split-by=timings --timings-type=classname)" - - # Run tests - local -r tags=osuergo,netgo,sqlite_foreign_keys,sqlite_json - gotestsum --junitfile "${out_dir}/report.xml" -- -tags "$tags" -race ${test_packages[@]} -} - -main ${@} diff --git a/scripts/ci/run_perftest.sh b/scripts/ci/run_perftest.sh deleted file mode 100755 index 2eab5b0c9b6..00000000000 --- a/scripts/ci/run_perftest.sh +++ /dev/null @@ -1,332 +0,0 @@ -#!/bin/bash -ex - -echo "Running as user: $(whoami)" - -SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" - -# Source env variables -if [[ -f /home/ubuntu/vars.sh ]] ; then - . /home/ubuntu/vars.sh -fi - -install_influxdb() { - # Install influxdb - DEBIAN_FRONTEND=noninteractive apt-get install --assume-yes /home/ubuntu/influxdb2*amd64.deb - systemctl start influxdb - - # set up influxdb - export INFLUXDB2=true - export TEST_ORG=example_org - export TEST_TOKEN=token - result="$(curl -s -o /dev/null -H "Content-Type: application/json" -XPOST -d '{"username": "default", "password": "thisisnotused", "retentionPeriodSeconds": 0, "org": "'"$TEST_ORG"'", "bucket": "unused_bucket", "token": "'"$TEST_TOKEN"'"}' http://localhost:8086/api/v2/setup -w %{http_code})" - if [[ "$result" != "201" ]] ; then - echo "Influxdb2 failed to setup correctly" - exit 1 - fi -} - -install_telegraf() { - # Install Telegraf - wget -qO- https://repos.influxdata.com/influxdata-archive_compat.key - echo '393e8779c89ac8d958f81f942f9ad7fb82a25e133faddaf92e15b16e6ac9ce4c influxdata-archive_compat.key' | sha256sum -c && cat influxdata-archive_compat.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null - echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list - - DEBIAN_FRONTEND=noninteractive apt-get update - DEBIAN_FRONTEND=noninteractive apt-get install -y git jq telegraf awscli - - # Install influx_tools - aws --region us-west-2 s3 cp s3://perftest-binaries-influxdb/influx_tools/influx_tools-d3be25b251256755d622792ec91826c5670c6106 ./influx_tools - mv ./influx_tools /usr/bin/influx_tools - chmod 755 /usr/bin/influx_tools - - root_branch="$(echo "${INFLUXDB_VERSION}" | rev | cut -d '-' -f1 | rev)" - log_date=$(date +%Y%m%d%H%M%S) - - mkdir -p /etc/telegraf -cat << EOF > /etc/telegraf/telegraf.conf -[[outputs.influxdb_v2]] - urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] - token = "${DB_TOKEN}" - organization = "${CLOUD2_ORG}" - bucket = "${CLOUD2_BUCKET}" - -[[inputs.file]] - name_override = "ingest" - files = ["$working_dir/test-ingest-*.json"] - data_format = "json" - json_strict = true - json_string_fields = [ - "branch", - "commit", - "i_type", - "time", - "use_case" - ] - tagexclude = ["host"] - json_time_key = "time" - json_time_format = "unix" - tag_keys = [ - "i_type", - "use_case", - "branch" - ] - -[[inputs.file]] - name_override = "query" - files = ["$working_dir/test-query-*.json"] - data_format = "json" - json_strict = true - json_string_fields = [ - "branch", - "commit", - "i_type", - "query_format", - "query_type", - "time", - "use_case" - ] - tagexclude = ["host"] - json_time_key = "time" - json_time_format = "unix" - tag_keys = [ - "i_type", - "query_format", - "use_case", - "query_type", - "branch" - ] -EOF - systemctl restart telegraf -} - -install_go() { - # install golang latest version - go_endpoint="go1.17.3.linux-amd64.tar.gz" - - wget "https://dl.google.com/go/$go_endpoint" -O "$working_dir/$go_endpoint" - rm -rf /usr/local/go - tar -C /usr/local -xzf "$working_dir/$go_endpoint" - - # set env variables necessary for go to work during cloud-init - if [[ `whoami` = root ]] ; then - mkdir -p /root/go/bin - export HOME=/root - export GOPATH=/root/go - export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin - fi - go version -} - -install_go_bins() { - # install influxdb-comparisons cmds - go get \ - github.com/influxdata/influxdb-comparisons/cmd/bulk_data_gen \ - github.com/influxdata/influxdb-comparisons/cmd/bulk_load_influx \ - github.com/influxdata/influxdb-comparisons/cmd/bulk_query_gen \ - github.com/influxdata/influxdb-comparisons/cmd/query_benchmarker_influxdb - # install yq - go get github.com/mikefarah/yq/v4 -} - -# Helper functions containing common logic -bucket_id() { - bucket_id=$(curl -H "Authorization: Token $TEST_TOKEN" "http://${NGINX_HOST}:8086/api/v2/buckets?org=$TEST_ORG" | jq -r ".buckets[] | select(.name | contains(\"$db_name\")).id") - echo $bucket_id -} - -force_compaction() { - # id of the bucket that will be compacted - b_id=$(bucket_id) - - # stop daemon and force compaction - systemctl stop influxdb - set +e - shards=$(find /var/lib/influxdb/engine/data/$b_id/autogen/ -maxdepth 1 -mindepth 1) - - set -e - for shard in $shards; do - if [[ -n "$(find $shard -name *.tsm)" ]]; then - # compact as the influxdb user in order to keep file permissions correct - sudo -u influxdb influx_tools compact-shard -force -verbose -path $shard - fi - done - - # restart daemon - systemctl start influxdb -} - -org_flag() { - case $1 in - flux-http) - echo -organization=$TEST_ORG - ;; - http) - echo -use-compatibility=true - ;; - *) - echo echo "unknown query format: $1" - exit 1 - ;; - esac -} - -create_dbrp() { -curl -XPOST -H "Authorization: Token ${TEST_TOKEN}" \ - -d "{\"org\":\"${TEST_ORG}\",\"bucketID\":\"$(bucket_id)\",\"database\":\"$db_name\",\"retention_policy\":\"autogen\"}" \ - http://${NGINX_HOST}:8086/api/v2/dbrps -} - -bulk_data_file_loader() { - local data_fname="influx-bulk-records-usecase-$test_name" - $GOPATH/bin/bulk_data_gen \ - -seed=$TEST_COMMIT_TIME \ - -use-case=$test_name \ - -scale-var=1000 \ - -timestamp-start="$start_time" \ - -timestamp-end="$end_time" > \ - ${USECASE_DIR}/$data_fname - - influxdb2_opts= - if [[ -z $INFLUXDB2 || $INFLUXDB2 = true ]] ; then - influxdb2_opts="-organization=$TEST_ORG -token=$TEST_TOKEN" - fi - - $GOPATH/bin/bulk_load_influx \ - -file=${USECASE_DIR}/$data_fname \ - -batch-size=5000 \ - -workers=4 \ - -urls=http://${NGINX_HOST}:8086 \ - -do-abort-on-exist=false \ - -do-db-create=true \ - -backoff=1s \ - -backoff-timeout=300m0s \ - $influxdb2_opts | \ - jq ". += {branch: \"$INFLUXDB_VERSION\", commit: \"$TEST_COMMIT\", time: \"$TEST_COMMIT_TIME\", i_type: \"$DATA_I_TYPE\", use_case: \"$test_name\"}" > "$working_dir/test-ingest-$test_name.json" - - # Cleanup from the data generation and loading. - force_compaction - - # Generate a DBRP mapping for use by InfluxQL queries. - create_dbrp - - rm ${USECASE_DIR}/$data_fname -} - -build_query_file() { - i=$1 - local query_usecase="$( yq e ".query_tests[$i].use_case" "$yaml_file")" - local type="$( yq e ".query_tests[$i].query_type" "$yaml_file")" - local format="$( yq e ".query_tests[$i].format" "$yaml_file")" - - local query_file="${format}_${query_usecase}_${type}" - local scale_var=1000 - $GOPATH/bin/bulk_query_gen \ - -use-case=$query_usecase \ - -query-type=$type \ - -format=influx-"$format" \ - -timestamp-start="$start_time" \ - -timestamp-end="$end_time" \ - -queries=500 \ - -scale-var=$scale_var > \ - ${USECASE_DIR}/$query_file - - # How long to run each set of query tests. Specify a duration to limit the maximum amount of time the queries can run, - # since individual queries can take a long time. - duration=30s - - ${GOPATH}/bin/query_benchmarker_influxdb \ - -file=${USECASE_DIR}/$query_file \ - -urls=http://${NGINX_HOST}:8086 \ - -debug=0 \ - -print-interval=0 \ - -json=true \ - $(org_flag $format) \ - -token=$TEST_TOKEN \ - -workers=4 \ - -benchmark-duration=$duration | \ - jq '."all queries"' | \ - jq -s '.[-1]' | \ - jq ". += {use_case: \"$query_usecase\", query_type: \"$type\", branch: \"$INFLUXDB_VERSION\", commit: \"$TEST_COMMIT\", time: \"$TEST_COMMIT_TIME\", i_type: \"$DATA_I_TYPE\", query_format: \"$format\"}" > \ - $working_dir/test-query-$format-$query_usecase-$type.json - - rm ${USECASE_DIR}/$query_file - - # Restart daemon between query tests. - systemctl restart influxdb -} - -run_dataset() { - yaml_file="$1" - - test_name="$( yq e '.name' "$yaml_file" )" - USECASE_DIR="${DATASET_DIR}/$test_name" - mkdir "$USECASE_DIR" - - start_time="$( yq e '.start_time' "$yaml_file" )" - end_time="$( yq e '.end_time' "$yaml_file" )" - - data_loader_type="$( yq e '.data.type' "$yaml_file" )" - case "$data_loader_type" in - bulk_data_file_loader) - bulk_data_file_loader - ;; - *) - echo "ERROR: unknown data loader type $data_loader_type" - exit 1 - ;; - esac - - num_query_tests="$( yq e '.query_tests | length' "$yaml_file" )" - for (( i=0; i<$num_query_tests; i++ )) ; do - local query_runner="$( yq e ".query_tests[$i].type" "$yaml_file" )" - case "$query_runner" in - build_query_file) - build_query_file $i - ;; - *) - echo "ERROR: unknown data loader type $data_loader_type" - exit 1 - ;; - esac - done - - # Delete DB to start anew. - curl -X DELETE -H "Authorization: Token ${TEST_TOKEN}" http://${NGINX_HOST}:8086/api/v2/buckets/$(bucket_id) - rm -rf "$USECASE_DIR" -} - -########################## -## Setup for perf tests ## -########################## - -working_dir=$(mktemp -d) - -DATASET_DIR=/mnt/ramdisk -mkdir -p "$DATASET_DIR" -mount -t tmpfs -o size=32G tmpfs "$DATASET_DIR" - -install_influxdb -install_telegraf -install_go -install_go_bins - -# Common variables used across all tests -db_name="benchmark_db" - -########################## -## Run and record tests ## -########################## - -# Generate and ingest bulk data. Record the time spent as an ingest test if -# specified, and run the query performance tests for each dataset. -for file in "$SCRIPT_DIR"/perf-tests/* ; do - run_dataset $file -done - -echo "Using Telegraph to report results from the following files:" -ls $working_dir -if [[ "${TEST_RECORD_RESULTS}" = "true" ]] ; then - telegraf --debug --once -else - telegraf --debug --test -fi diff --git a/scripts/ci/terraform.tfvars b/scripts/ci/terraform.tfvars deleted file mode 100644 index 93b7b5597b5..00000000000 --- a/scripts/ci/terraform.tfvars +++ /dev/null @@ -1,6 +0,0 @@ -################################## -# YOU MUST CHANGE THESE VARIABLES - -# find your key pair id (or create one) at https://console.aws.amazon.com/ec2/v2/home#KeyPairs -# You will need your private key to ssh to your instances -key_name = "circleci-oss-test" diff --git a/scripts/ci/test-downgrade.sh b/scripts/ci/test-downgrade.sh deleted file mode 100755 index 6b12eef1980..00000000000 --- a/scripts/ci/test-downgrade.sh +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env bash -set -exo pipefail - -declare -a DOWNGRADE_TARGETS=('2.0' '2.1') - -declare -r TEST_ORG=testorg -declare -r TEST_TOKEN=supersecretfaketoken -declare -r INIT_PING_ATTEMPTS=600 - -function download_older_binary () { - local -r target_version=$1 dl_dir=$2 - - local dl_url - local dl_sha - case ${target_version} in - 2.0) - dl_url=https://dl.influxdata.com/influxdb/releases/influxdb2-2.0.9-linux-amd64.tar.gz - dl_sha=64b9cfea1b5ca07479a16332056f9e7f806ad71df9c6cc0ec70c4333372b9d26 - ;; - 2.1) - dl_url=https://dl.influxdata.com/influxdb/releases/influxdb2-2.1.1-linux-amd64.tar.gz - dl_sha=1688e3afa7f875d472768e4f4f5a909b357287a45a8f28287021e4184a185927 - ;; - *) - >&2 echo Error: Unknown downgrade target "'$target_version'" - exit 1 - ;; - esac - - local -r archive="influxd-${target_version}.tar.gz" - curl -sL -o "${dl_dir}/${archive}" "$dl_url" - echo "${dl_sha} ${dl_dir}/${archive}" | sha256sum --check -- - tar xzf "${dl_dir}/${archive}" -C "$dl_dir" --strip-components=1 - rm "${dl_dir}/${archive}" - mv "${dl_dir}/influxd" "${dl_dir}/influxd-${target_version}" -} - -function test_downgrade_target () { - local -r influxd_path=$1 target_version=$2 tmp=$3 - - download_older_binary "$target_version" "$tmp" - - local -r bolt_path="${tmp}/influxd-${target_version}.bolt" - local -r sqlite_path="${tmp}/influxd-${target_version}.sqlite" - - cp "${tmp}/influxd.bolt" "$bolt_path" - cp "${tmp}/influxd.sqlite" "$sqlite_path" - - "$influxd_path" downgrade \ - --bolt-path "$bolt_path" \ - --sqlite-path "$sqlite_path" \ - "$target_version" - - INFLUXD_BOLT_PATH="$bolt_path" INFLUXD_SQLITE_PATH="$sqlite_path" INFLUXD_ENGINE_PATH="${tmp}/engine" \ - "${tmp}/influxd-${target_version}" & - local -r influxd_pid="$!" - - wait_for_influxd "$influxd_pid" - - if [[ "$(curl -s -o /dev/null -H "Authorization: Token $TEST_TOKEN" "http://localhost:8086/api/v2/me" -w "%{http_code}")" != "200" ]]; then - >&2 echo Error: "Downgraded DB doesn't recognize auth token" - exit 1 - fi - - kill -TERM "$influxd_pid" - wait "$influxd_pid" || true -} - -function wait_for_influxd () { - local -r influxd_pid=$1 - local ping_count=0 - while kill -0 "${influxd_pid}" && [ ${ping_count} -lt ${INIT_PING_ATTEMPTS} ]; do - sleep 1 - ping_count=$((ping_count+1)) - if [[ "$(curl -s -o /dev/null "http://localhost:8086/health" -w "%{http_code}")" = "200" ]]; then - return - fi - done - if [ ${ping_count} -eq ${INIT_PING_ATTEMPTS} ]; then - >&2 echo influxd took too long to start up - else - >&2 echo influxd crashed during startup - fi - return 1 -} - -function setup_influxd () { - local -r influxd_path=$1 tmp=$2 - INFLUXD_BOLT_PATH="${tmp}/influxd.bolt" INFLUXD_SQLITE_PATH="${tmp}/influxd.sqlite" INFLUXD_ENGINE_PATH="${tmp}/engine" \ - "$influxd_path" & - local -r influxd_pid="$!" - - wait_for_influxd "$influxd_pid" - curl -s -o /dev/null -XPOST \ - -d '{"username":"default","password":"fakepassword","org":"'$TEST_ORG'","bucket":"unused","token":"'$TEST_TOKEN'"}' \ - http://localhost:8086/api/v2/setup - - kill -TERM "$influxd_pid" - wait "$influxd_pid" || true -} - -function main () { - if [[ $# != 1 ]]; then - >&2 echo Usage: $0 '' - exit 1 - fi - local -r influxd_path=$1 - - local -r tmp="$(mktemp -d -t "test-downgrade-${target_version}-XXXXXX")" - trap "rm -rf ${tmp}" EXIT - - setup_influxd "$influxd_path" "$tmp" - - for target in ${DOWNGRADE_TARGETS[@]}; do - test_downgrade_target "$influxd_path" "$target" "$tmp" - done -} - -main "${@}" diff --git a/scripts/ci/tests/01-default-config.sh b/scripts/ci/tests/01-default-config.sh deleted file mode 100755 index 2a742b0371b..00000000000 --- a/scripts/ci/tests/01-default-config.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -eux - -service influxdb start -service influxdb stop diff --git a/scripts/ci/tests/02-self-signed-certs.sh b/scripts/ci/tests/02-self-signed-certs.sh deleted file mode 100755 index 5a6675970d6..00000000000 --- a/scripts/ci/tests/02-self-signed-certs.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -eux - -echo 'tls-cert = "/etc/ssl/influxdb.crt"' >> /etc/influxdb/config.toml -echo 'tls-key = "/etc/ssl/influxdb.key"' >> /etc/influxdb/config.toml -openssl req -x509 -nodes -newkey rsa:2048 -keyout /etc/ssl/influxdb.key -out /etc/ssl/influxdb.crt -days 365 -subj /C=US/ST=CA/L=sanfrancisco/O=influxdata/OU=edgeteam/CN=localhost -chown influxdb:influxdb /etc/ssl/influxdb.* -service influxdb start -service influxdb stop -contents="$(head -n -2 /etc/influxdb/config.toml)" -echo "$contents" > /etc/influxdb/config.toml diff --git a/scripts/ci/tests/03-auth-enabled.sh b/scripts/ci/tests/03-auth-enabled.sh deleted file mode 100755 index 2a0a8e29e9f..00000000000 --- a/scripts/ci/tests/03-auth-enabled.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -eux - -service influxdb start -result=$(curl -s -o /dev/null -H "Content-Type: application/json" -XPOST -d '{"username": "default", "password": "thisisnotused", "retentionPeriodSeconds": 0, "org": "testorg", "bucket": "unusedbucket", "token": "thisisatesttoken"}' http://localhost:8086/api/v2/setup -w %{http_code}) -if [ "$result" != "201" ]; then - exit 1 -fi -service influxdb stop -service influxdb start -service influxdb stop diff --git a/scripts/fetch-swagger.sh b/scripts/fetch-swagger.sh deleted file mode 100755 index 08f202a9d60..00000000000 --- a/scripts/fetch-swagger.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash - -# This script clones the openapi repo and extracts the OSS swagger.json for the -# specified commit. - -set -e - -declare -r SCRIPT_DIR=$(cd $(dirname ${0}) >/dev/null 2>&1 && pwd) -declare -r ROOT_DIR=$(dirname ${SCRIPT_DIR}) -declare -r STATIC_DIR="$ROOT_DIR/static" - -# Pins the swagger that will be downloaded to a specific commit -declare -r OPENAPI_SHA=8b5f1bbb2cd388eb454dc9da19e3d2c4061cdf5f - -# Don't do a shallow clone since the commit we want might be several commits -# back; but do only clone the main branch. -git clone https://github.com/influxdata/openapi.git --single-branch -mkdir -p "$STATIC_DIR/data" -cd openapi && git checkout ${OPENAPI_SHA} --quiet && cp contracts/oss.json "$STATIC_DIR/data/swagger.json" -cd ../ && rm -rf openapi diff --git a/scripts/fetch-ui-assets.sh b/scripts/fetch-ui-assets.sh deleted file mode 100755 index 4628bb96d11..00000000000 --- a/scripts/fetch-ui-assets.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash - -# This script is used to download built UI assets from the "influxdata/ui" -# repository. The built UI assets are attached to a release in "influxdata/ui", -# which is linked here. -# -# The master branch of "influxdata/influxdb" (this repository) downloads from the -# release tagged at the latest released version of influxdb. -# For example, if master is tracking slightly ahead of 2.6.1, then the tag would be OSS-v2.6.1. -# -# Feature branches of "influxdata/influxdb" (2.0, 2.1, etc) download from their -# respective releases in "influxdata/ui" (OSS-2.0, OSS-2.1, etc). Those releases -# are updated only when a bug fix needs included for the UI of that OSS release. - -set -e - -declare -r SCRIPT_DIR=$(cd $(dirname ${0}) >/dev/null 2>&1 && pwd) -declare -r ROOT_DIR=$(dirname ${SCRIPT_DIR}) -declare -r STATIC_DIR="$ROOT_DIR/static" - -UI_RELEASE="OSS-v2.7.1" - -# Download the SHA256 checksum attached to the release. To verify the integrity -# of the download, this checksum will be used to check the download tar file -# containing the built UI assets. -curl -Ls https://github.com/influxdata/ui/releases/download/$UI_RELEASE/sha256.txt --output sha256.txt - -# Download the tar file containing the built UI assets. -curl -L https://github.com/influxdata/ui/releases/download/$UI_RELEASE/build.tar.gz --output build.tar.gz - -# Verify the checksums match; exit if they don't. -case "$(uname -s)" in - FreeBSD | Darwin) - echo "$(cat sha256.txt)" | shasum --algorithm 256 --check \ - || { echo "Checksums did not match for downloaded UI assets!"; exit 1; } ;; - Linux) - echo "$(cat sha256.txt)" | sha256sum --check -- \ - || { echo "Checksums did not match for downloaded UI assets!"; exit 1; } ;; - *) - echo "The '$(uname -s)' operating system is not supported as a build host for the UI" >&2 - exit 1 -esac - -# Extract the assets and clean up. -mkdir -p "$STATIC_DIR/data" -tar -xzf build.tar.gz -C "$STATIC_DIR/data" -rm sha256.txt -rm build.tar.gz diff --git a/scripts/pkg-config.sh b/scripts/pkg-config.sh deleted file mode 100755 index 76ae2d2be52..00000000000 --- a/scripts/pkg-config.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -tmpdir=$(mktemp -d) -trap "{ rm -rf ${tmpdir}; }" EXIT - -# "go build" can be noisy, and when Go invokes pkg-config (by calling this script) it will merge stdout and stderr. -# Discard any output unless "go build" terminates with an error. -go build -o ${tmpdir}/pkg-config github.com/influxdata/pkg-config &> ${tmpdir}/go_build_output -if [ "$?" -ne 0 ]; then - cat ${tmpdir}/go_build_output 1>&2 - exit 1 -fi - -${tmpdir}/pkg-config "$@" diff --git a/secret.go b/secret.go deleted file mode 100644 index 719408bc3b8..00000000000 --- a/secret.go +++ /dev/null @@ -1,76 +0,0 @@ -package influxdb - -import ( - "context" - "encoding/json" - "strings" - - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// ErrSecretNotFound is the error msg for a missing secret. -const ErrSecretNotFound = "secret not found" - -// SecretService a service for storing and retrieving secrets. -type SecretService interface { - // LoadSecret retrieves the secret value v found at key k for organization orgID. - LoadSecret(ctx context.Context, orgID platform.ID, k string) (string, error) - - // GetSecretKeys retrieves all secret keys that are stored for the organization orgID. - GetSecretKeys(ctx context.Context, orgID platform.ID) ([]string, error) - - // PutSecret stores the secret pair (k,v) for the organization orgID. - PutSecret(ctx context.Context, orgID platform.ID, k string, v string) error - - // PutSecrets puts all provided secrets and overwrites any previous values. - PutSecrets(ctx context.Context, orgID platform.ID, m map[string]string) error - - // PatchSecrets patches all provided secrets and updates any previous values. - PatchSecrets(ctx context.Context, orgID platform.ID, m map[string]string) error - - // DeleteSecret removes a single secret from the secret store. - DeleteSecret(ctx context.Context, orgID platform.ID, ks ...string) error -} - -// SecretField contains a key string, and value pointer. -type SecretField struct { - Key string `json:"key"` - Value *string `json:"value,omitempty"` -} - -// String returns the key of the secret. -func (s SecretField) String() string { - if s.Key == "" { - return "" - } - return "secret: " + s.Key -} - -// MarshalJSON implement the json marshaler interface. -func (s SecretField) MarshalJSON() ([]byte, error) { - return json.Marshal(s.String()) -} - -// UnmarshalJSON implement the json unmarshaler interface. -func (s *SecretField) UnmarshalJSON(b []byte) error { - var ss string - if err := json.Unmarshal(b, &ss); err != nil { - return err - } - if ss == "" { - s.Key = "" - return nil - } - if strings.HasPrefix(ss, "secret: ") { - s.Key = ss[len("secret: "):] - } else { - s.Value = strPtr(ss) - } - return nil -} - -func strPtr(s string) *string { - ss := new(string) - *ss = s - return ss -} diff --git a/secret/http_client.go b/secret/http_client.go deleted file mode 100644 index 0880e9e4b5b..00000000000 --- a/secret/http_client.go +++ /dev/null @@ -1,89 +0,0 @@ -package secret - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/pkg/httpc" -) - -type Client struct { - Client *httpc.Client -} - -// LoadSecret is not implemented for http -func (s *Client) LoadSecret(ctx context.Context, orgID platform.ID, k string) (string, error) { - return "", &errors.Error{ - Code: errors.EMethodNotAllowed, - Msg: "load secret is not implemented for http", - } -} - -// PutSecret is not implemented for http. -func (s *Client) PutSecret(ctx context.Context, orgID platform.ID, k string, v string) error { - return &errors.Error{ - Code: errors.EMethodNotAllowed, - Msg: "put secret is not implemented for http", - } -} - -// GetSecretKeys get all secret keys mathing an org ID via HTTP. -func (s *Client) GetSecretKeys(ctx context.Context, orgID platform.ID) ([]string, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - span.LogKV("org-id", orgID) - - path := fmt.Sprintf("/api/v2/orgs/%s/secrets", orgID.String()) - - var ss secretsResponse - err := s.Client. - Get(path). - DecodeJSON(&ss). - Do(ctx) - if err != nil { - return nil, err - } - - return ss.Secrets, nil -} - -// PutSecrets is not implemented for http. -func (s *Client) PutSecrets(ctx context.Context, orgID platform.ID, m map[string]string) error { - return &errors.Error{ - Code: errors.EMethodNotAllowed, - Msg: "put secrets is not implemented for http", - } -} - -// PatchSecrets will update the existing secret with new via http. -func (s *Client) PatchSecrets(ctx context.Context, orgID platform.ID, m map[string]string) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if orgID != 0 { - span.LogKV("org-id", orgID) - } - - path := fmt.Sprintf("/api/v2/orgs/%s/secrets", orgID.String()) - - return s.Client. - PatchJSON(m, path). - Do(ctx) -} - -// DeleteSecret removes a single secret via HTTP. -func (s *Client) DeleteSecret(ctx context.Context, orgID platform.ID, ks ...string) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - path := fmt.Sprintf("/api/v2/orgs/%s/secrets/delete", orgID.String()) - return s.Client. - PostJSON(secretsDeleteBody{ - Secrets: ks, - }, path). - Do(ctx) -} diff --git a/secret/http_server.go b/secret/http_server.go deleted file mode 100644 index 0679e86744a..00000000000 --- a/secret/http_server.go +++ /dev/null @@ -1,152 +0,0 @@ -package secret - -import ( - "fmt" - "net/http" - - "github.com/go-chi/chi" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "go.uber.org/zap" -) - -type handler struct { - log *zap.Logger - svc influxdb.SecretService - api *kithttp.API - - idLookupKey string -} - -// NewHandler creates a new handler for the secret service -func NewHandler(log *zap.Logger, idLookupKey string, svc influxdb.SecretService) http.Handler { - h := &handler{ - log: log, - svc: svc, - api: kithttp.NewAPI(kithttp.WithLog(log)), - - idLookupKey: idLookupKey, - } - - r := chi.NewRouter() - - r.Get("/", h.handleGetSecrets) - r.Patch("/", h.handlePatchSecrets) - r.Delete("/{secretID}", h.handleDeleteSecret) - r.Post("/delete", h.handleDeleteSecrets) // deprecated - return r -} - -// handleGetSecrets is the HTTP handler for the GET /api/v2/orgs/:id/secrets route. -func (h *handler) handleGetSecrets(w http.ResponseWriter, r *http.Request) { - orgID, err := h.decodeOrgID(r) - if err != nil { - h.api.Err(w, r, err) - } - - ks, err := h.svc.GetSecretKeys(r.Context(), orgID) - if err != nil && errors.ErrorCode(err) != errors.ENotFound { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, newSecretsResponse(orgID, ks)) -} - -type secretsResponse struct { - Links map[string]string `json:"links"` - Secrets []string `json:"secrets"` -} - -func newSecretsResponse(orgID platform.ID, ks []string) *secretsResponse { - if ks == nil { - ks = []string{} - } - return &secretsResponse{ - Links: map[string]string{ - "org": fmt.Sprintf("/api/v2/orgs/%s", orgID), - "self": fmt.Sprintf("/api/v2/orgs/%s/secrets", orgID), - }, - Secrets: ks, - } -} - -// handleGetPatchSecrets is the HTTP handler for the PATCH /api/v2/orgs/:id/secrets route. -func (h *handler) handlePatchSecrets(w http.ResponseWriter, r *http.Request) { - orgID, err := h.decodeOrgID(r) - if err != nil { - h.api.Err(w, r, err) - } - - var secrets map[string]string - if err := h.api.DecodeJSON(r.Body, &secrets); err != nil { - h.api.Err(w, r, err) - return - } - - if err := h.svc.PatchSecrets(r.Context(), orgID, secrets); err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusNoContent, nil) -} - -type secretsDeleteBody struct { - Secrets []string `json:"secrets"` -} - -// handleDeleteSecrets is the HTTP handler for the POST /api/v2/orgs/:id/secrets/delete route. -// deprecated. -func (h *handler) handleDeleteSecrets(w http.ResponseWriter, r *http.Request) { - orgID, err := h.decodeOrgID(r) - if err != nil { - h.api.Err(w, r, err) - } - - var reqBody secretsDeleteBody - - if err := h.api.DecodeJSON(r.Body, &reqBody); err != nil { - h.api.Err(w, r, err) - return - } - - if err := h.svc.DeleteSecret(r.Context(), orgID, reqBody.Secrets...); err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusNoContent, nil) -} - -// handleDeleteSecret is the HTTP handler for the DELETE /api/v2/orgs/:id/secrets/:id route. -func (h *handler) handleDeleteSecret(w http.ResponseWriter, r *http.Request) { - orgID, err := h.decodeOrgID(r) - if err != nil { - h.api.Err(w, r, err) - } - - if err := h.svc.DeleteSecret(r.Context(), orgID, chi.URLParam(r, "secretID")); err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusNoContent, nil) -} - -func (h *handler) decodeOrgID(r *http.Request) (platform.ID, error) { - org := chi.URLParam(r, h.idLookupKey) - if org == "" { - return platform.InvalidID(), &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - id, err := platform.IDFromString(org) - if err != nil { - return platform.InvalidID(), err - } - return *id, nil -} diff --git a/secret/http_server_test.go b/secret/http_server_test.go deleted file mode 100644 index 90fc9bd3371..00000000000 --- a/secret/http_server_test.go +++ /dev/null @@ -1,414 +0,0 @@ -package secret - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/go-chi/chi" - "github.com/influxdata/influxdb/v2" - influxdbhttp "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func initSecretService(f influxdbtesting.SecretServiceFields, t *testing.T) (influxdb.SecretService, func()) { - t.Helper() - s := inmem.NewKVStore() - - ctx := context.Background() - if err := all.Up(ctx, zaptest.NewLogger(t), s); err != nil { - t.Fatal(err) - } - - storage, err := NewStore(s) - if err != nil { - t.Fatal(err) - } - svc := NewService(storage) - - for _, s := range f.Secrets { - if err := svc.PutSecrets(context.Background(), s.OrganizationID, s.Env); err != nil { - t.Fatalf("failed to populate users") - } - } - - for _, ss := range f.Secrets { - if err := svc.PutSecrets(ctx, ss.OrganizationID, ss.Env); err != nil { - t.Fatalf("failed to populate secrets") - } - } - - handler := NewHandler(zaptest.NewLogger(t), "id", svc) - router := chi.NewRouter() - router.Mount("/api/v2/orgs/{id}/secrets", handler) - server := httptest.NewServer(router) - httpClient, err := influxdbhttp.NewHTTPClient(server.URL, "", false) - if err != nil { - t.Fatal(err) - } - client := Client{ - Client: httpClient, - } - return &client, server.Close -} - -func TestSecretService(t *testing.T) { - t.Parallel() - influxdbtesting.GetSecretKeys(initSecretService, t) - influxdbtesting.PatchSecrets(initSecretService, t) - influxdbtesting.DeleteSecrets(initSecretService, t) -} - -func TestSecretService_handleGetSecrets(t *testing.T) { - type fields struct { - SecretService influxdb.SecretService - } - type args struct { - orgID platform.ID - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get basic secrets", - fields: fields{ - &mock.SecretService{ - GetSecretKeysFn: func(ctx context.Context, orgID platform.ID) ([]string, error) { - return []string{"hello", "world"}, nil - }, - }, - }, - args: args{ - orgID: 1, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: "{\n\t\"links\": {\n\t\t\"org\": \"/api/v2/orgs/0000000000000001\",\n\t\t\"self\": \"/api/v2/orgs/0000000000000001/secrets\"\n\t},\n\t\"secrets\": [\n\t\t\"hello\",\n\t\t\"world\"\n\t]\n}", - }, - }, - { - name: "get secrets when there are none", - fields: fields{ - &mock.SecretService{ - GetSecretKeysFn: func(ctx context.Context, orgID platform.ID) ([]string, error) { - return []string{}, nil - }, - }, - }, - args: args{ - orgID: 1, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: "{\n\t\"links\": {\n\t\t\"org\": \"/api/v2/orgs/0000000000000001\",\n\t\t\"self\": \"/api/v2/orgs/0000000000000001/secrets\"\n\t},\n\t\"secrets\": []\n}", - }, - }, - { - name: "get secrets when organization has no secret keys", - fields: fields{ - &mock.SecretService{ - GetSecretKeysFn: func(ctx context.Context, orgID platform.ID) ([]string, error) { - return []string{}, &errors.Error{ - Code: errors.ENotFound, - Msg: "organization has no secret keys", - } - - }, - }, - }, - args: args{ - orgID: 1, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: "{\n\t\"links\": {\n\t\t\"org\": \"/api/v2/orgs/0000000000000001\",\n\t\t\"self\": \"/api/v2/orgs/0000000000000001/secrets\"\n\t},\n\t\"secrets\": []\n}", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := NewHandler(zaptest.NewLogger(t), "id", tt.fields.SecretService) - router := chi.NewRouter() - router.Mount("/api/v2/orgs/{id}/secrets", h) - - u := fmt.Sprintf("http://any.url/api/v2/orgs/%s/secrets", tt.args.orgID) - r := httptest.NewRequest("GET", u, nil) - w := httptest.NewRecorder() - - router.ServeHTTP(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("handleGetSecrets() = %v, want %v", res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("handleGetSecrets() = %v, want %v", content, tt.wants.contentType) - } - if tt.wants.body != "" { - if string(body) != tt.wants.body { - t.Errorf("%q. handleGetSecrets() invalid body: %q", tt.name, body) - } - } - }) - } -} - -func TestSecretService_handlePatchSecrets(t *testing.T) { - type fields struct { - SecretService influxdb.SecretService - } - type args struct { - orgID platform.ID - secrets map[string]string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get basic secrets", - fields: fields{ - &mock.SecretService{ - PatchSecretsFn: func(ctx context.Context, orgID platform.ID, s map[string]string) error { - return nil - }, - }, - }, - args: args{ - orgID: 1, - secrets: map[string]string{ - "abc": "123", - }, - }, - wants: wants{ - statusCode: http.StatusNoContent, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := NewHandler(zaptest.NewLogger(t), "id", tt.fields.SecretService) - router := chi.NewRouter() - router.Mount("/api/v2/orgs/{id}/secrets", h) - - b, err := json.Marshal(tt.args.secrets) - if err != nil { - t.Fatalf("failed to marshal secrets: %v", err) - } - - buf := bytes.NewReader(b) - u := fmt.Sprintf("http://any.url/api/v2/orgs/%s/secrets", tt.args.orgID) - r := httptest.NewRequest("PATCH", u, buf) - w := httptest.NewRecorder() - - router.ServeHTTP(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("handlePatchSecrets() = %v, want %v", res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("handlePatchSecrets() = %v, want %v", content, tt.wants.contentType) - } - if tt.wants.body != "" { - if string(body) != tt.wants.body { - t.Errorf("%q. handlePatchSecrets() invalid body", tt.name) - } - } - - }) - } -} - -func TestSecretService_handleDeleteSecrets(t *testing.T) { - type fields struct { - SecretService influxdb.SecretService - } - type args struct { - orgID platform.ID - secrets []string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get basic secrets", - fields: fields{ - &mock.SecretService{ - DeleteSecretFn: func(ctx context.Context, orgID platform.ID, s ...string) error { - return nil - }, - }, - }, - args: args{ - orgID: 1, - secrets: []string{ - "abc", - }, - }, - wants: wants{ - statusCode: http.StatusNoContent, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := NewHandler(zaptest.NewLogger(t), "id", tt.fields.SecretService) - router := chi.NewRouter() - router.Mount("/api/v2/orgs/{id}/secrets", h) - - b, err := json.Marshal(struct { - Secrets []string `json:"secrets"` - }{ - Secrets: tt.args.secrets, - }) - if err != nil { - t.Fatalf("failed to marshal secrets: %v", err) - } - - buf := bytes.NewReader(b) - u := fmt.Sprintf("http://any.url/api/v2/orgs/%s/secrets/delete", tt.args.orgID) - r := httptest.NewRequest("POST", u, buf) - w := httptest.NewRecorder() - - router.ServeHTTP(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("handleDeleteSecrets() = %v, want %v", res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("handleDeleteSecrets() = %v, want %v", content, tt.wants.contentType) - } - if tt.wants.body != "" { - if string(body) != tt.wants.body { - t.Errorf("%q. handleDeleteSecrets() invalid body", tt.name) - } - } - - }) - } -} - -func TestSecretService_handleDeleteSecret(t *testing.T) { - type fields struct { - SecretService influxdb.SecretService - } - type args struct { - orgID platform.ID - secretID string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "delete secret", - fields: fields{ - &mock.SecretService{ - DeleteSecretFn: func(ctx context.Context, orgID platform.ID, s ...string) error { - return nil - }, - }, - }, - args: args{ - orgID: 1, - secretID: "abc", - }, - wants: wants{ - statusCode: http.StatusNoContent, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := NewHandler(zaptest.NewLogger(t), "id", tt.fields.SecretService) - router := chi.NewRouter() - router.Mount("/api/v2/orgs/{id}/secrets", h) - - u := fmt.Sprintf("http://any.url/api/v2/orgs/%s/secrets/%s", tt.args.orgID, tt.args.secretID) - r := httptest.NewRequest("DELETE", u, nil) - w := httptest.NewRecorder() - - router.ServeHTTP(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("handleDeleteSecrets() = %v, want %v", res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("handleDeleteSecrets() = %v, want %v", content, tt.wants.contentType) - } - if tt.wants.body != "" { - if string(body) != tt.wants.body { - t.Errorf("%q. handleDeleteSecrets() invalid body", tt.name) - } - } - - }) - } -} diff --git a/secret/middleware_auth.go b/secret/middleware_auth.go deleted file mode 100644 index efafb8190ee..00000000000 --- a/secret/middleware_auth.go +++ /dev/null @@ -1,101 +0,0 @@ -package secret - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.SecretService = (*AuthedSvc)(nil) - -// AuthedSvc wraps a influxdb.AuthedSvc and authorizes actions -// against it appropriately. -type AuthedSvc struct { - s influxdb.SecretService -} - -// NewAuthedService constructs an instance of an authorizing secret service. -func NewAuthedService(s influxdb.SecretService) *AuthedSvc { - return &AuthedSvc{ - s: s, - } -} - -// LoadSecret checks to see if the authorizer on context has read access to the secret key provided. -func (s *AuthedSvc) LoadSecret(ctx context.Context, orgID platform.ID, key string) (string, error) { - if _, _, err := authorizer.AuthorizeOrgReadResource(ctx, influxdb.SecretsResourceType, orgID); err != nil { - return "", err - } - secret, err := s.s.LoadSecret(ctx, orgID, key) - if err != nil { - return "", err - } - return secret, nil -} - -// GetSecretKeys checks to see if the authorizer on context has read access to all the secrets belonging to orgID. -func (s *AuthedSvc) GetSecretKeys(ctx context.Context, orgID platform.ID) ([]string, error) { - if _, _, err := authorizer.AuthorizeOrgReadResource(ctx, influxdb.SecretsResourceType, orgID); err != nil { - return []string{}, err - } - secrets, err := s.s.GetSecretKeys(ctx, orgID) - if err != nil { - return []string{}, err - } - return secrets, nil -} - -// PutSecret checks to see if the authorizer on context has write access to the secret key provided. -func (s *AuthedSvc) PutSecret(ctx context.Context, orgID platform.ID, key string, val string) error { - if _, _, err := authorizer.AuthorizeCreate(ctx, influxdb.SecretsResourceType, orgID); err != nil { - return err - } - err := s.s.PutSecret(ctx, orgID, key, val) - if err != nil { - return err - } - return nil -} - -// PutSecrets checks to see if the authorizer on context has read and write access to the secret keys provided. -func (s *AuthedSvc) PutSecrets(ctx context.Context, orgID platform.ID, m map[string]string) error { - // PutSecrets operates on intersection between m and keys beloging to orgID. - // We need to have read access to those secrets since it deletes the secrets (within the intersection) that have not be overridden. - if _, _, err := authorizer.AuthorizeOrgReadResource(ctx, influxdb.SecretsResourceType, orgID); err != nil { - return err - } - if _, _, err := authorizer.AuthorizeOrgWriteResource(ctx, influxdb.SecretsResourceType, orgID); err != nil { - return err - } - err := s.s.PutSecrets(ctx, orgID, m) - if err != nil { - return err - } - return nil -} - -// PatchSecrets checks to see if the authorizer on context has write access to the secret keys provided. -func (s *AuthedSvc) PatchSecrets(ctx context.Context, orgID platform.ID, m map[string]string) error { - if _, _, err := authorizer.AuthorizeOrgWriteResource(ctx, influxdb.SecretsResourceType, orgID); err != nil { - return err - } - err := s.s.PatchSecrets(ctx, orgID, m) - if err != nil { - return err - } - return nil -} - -// DeleteSecret checks to see if the authorizer on context has write access to the secret keys provided. -func (s *AuthedSvc) DeleteSecret(ctx context.Context, orgID platform.ID, keys ...string) error { - if _, _, err := authorizer.AuthorizeOrgWriteResource(ctx, influxdb.SecretsResourceType, orgID); err != nil { - return err - } - err := s.s.DeleteSecret(ctx, orgID, keys...) - if err != nil { - return err - } - return nil -} diff --git a/secret/middleware_auth_test.go b/secret/middleware_auth_test.go deleted file mode 100644 index e45c2ba01c5..00000000000 --- a/secret/middleware_auth_test.go +++ /dev/null @@ -1,700 +0,0 @@ -package secret_test - -import ( - "bytes" - "context" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/secret" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -var secretCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), -} - -func TestSecretService_LoadSecret(t *testing.T) { - type fields struct { - SecretService influxdb.SecretService - } - type args struct { - permission influxdb.Permission - org platform.ID - key string - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access secret within org", - fields: fields{ - SecretService: &mock.SecretService{ - LoadSecretFn: func(ctx context.Context, orgID platform.ID, k string) (string, error) { - if k == "key" { - return "val", nil - } - return "", &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrSecretNotFound, - } - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - key: "key", - org: platform.ID(10), - }, - wants: wants{ - err: nil, - }, - }, - { - name: "cannot access not existing secret", - fields: fields{ - SecretService: &mock.SecretService{ - LoadSecretFn: func(ctx context.Context, orgID platform.ID, k string) (string, error) { - if k == "key" { - return "val", nil - } - return "", &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrSecretNotFound, - } - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - key: "not existing", - org: platform.ID(10), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrSecretNotFound, - }, - }, - }, - { - name: "unauthorized to access secret within org", - fields: fields{ - SecretService: &mock.SecretService{ - LoadSecretFn: func(ctx context.Context, orgID platform.ID, k string) (string, error) { - if k == "key" { - return "val", nil - } - return "", &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrSecretNotFound, - } - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - ID: influxdbtesting.IDPtr(10), - }, - }, - org: platform.ID(2), - key: "key", - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/0000000000000002/secrets is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := secret.NewAuthedService(tt.fields.SecretService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.LoadSecret(ctx, tt.args.org, tt.args.key) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestSecretService_GetSecretKeys(t *testing.T) { - type fields struct { - SecretService influxdb.SecretService - } - type args struct { - permission influxdb.Permission - org platform.ID - } - type wants struct { - err error - secrets []string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all secrets within an org", - fields: fields{ - SecretService: &mock.SecretService{ - GetSecretKeysFn: func(ctx context.Context, orgID platform.ID) ([]string, error) { - return []string{ - "0000000000000001secret1", - "0000000000000001secret2", - "0000000000000001secret3", - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - org: platform.ID(1), - }, - wants: wants{ - secrets: []string{ - "0000000000000001secret1", - "0000000000000001secret2", - "0000000000000001secret3", - }, - }, - }, - { - name: "unauthorized to see all secrets within an org", - fields: fields{ - SecretService: &mock.SecretService{ - GetSecretKeysFn: func(ctx context.Context, orgID platform.ID) ([]string, error) { - return []string{ - "0000000000000002secret1", - "0000000000000002secret2", - "0000000000000002secret3", - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - org: platform.ID(2), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EUnauthorized, - Msg: "read:orgs/0000000000000002/secrets is unauthorized", - }, - secrets: []string{}, - }, - }, - { - name: "errors when there are not secret into an org", - fields: fields{ - SecretService: &mock.SecretService{ - GetSecretKeysFn: func(ctx context.Context, orgID platform.ID) ([]string, error) { - return []string(nil), &errors.Error{ - Code: errors.ENotFound, - Msg: "organization has no secret keys", - } - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - org: platform.ID(10), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "organization has no secret keys", - }, - secrets: []string{}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := secret.NewAuthedService(tt.fields.SecretService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - secrets, err := s.GetSecretKeys(ctx, tt.args.org) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(secrets, tt.wants.secrets, secretCmpOptions...); diff != "" { - t.Errorf("secrets are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestSecretService_PatchSecrets(t *testing.T) { - type fields struct { - SecretService influxdb.SecretService - } - type args struct { - org platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to patch secrets", - fields: fields{ - SecretService: &mock.SecretService{ - PatchSecretsFn: func(ctx context.Context, orgID platform.ID, m map[string]string) error { - return nil - }, - }, - }, - args: args{ - org: platform.ID(1), - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to update secret", - fields: fields{ - SecretService: &mock.SecretService{ - PatchSecretsFn: func(ctx context.Context, orgID platform.ID, m map[string]string) error { - return nil - }, - }, - }, - args: args{ - org: platform.ID(1), - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/0000000000000001/secrets is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := secret.NewAuthedService(tt.fields.SecretService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - patches := make(map[string]string) - err := s.PatchSecrets(ctx, tt.args.org, patches) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestSecretService_DeleteSecret(t *testing.T) { - type fields struct { - SecretService influxdb.SecretService - } - type args struct { - org platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete secret", - fields: fields{ - SecretService: &mock.SecretService{ - DeleteSecretFn: func(ctx context.Context, orgID platform.ID, keys ...string) error { - return nil - }, - }, - }, - args: args{ - org: platform.ID(1), - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete secret", - fields: fields{ - SecretService: &mock.SecretService{ - DeleteSecretFn: func(ctx context.Context, orgID platform.ID, keys ...string) error { - return nil - }, - }, - }, - args: args{ - org: 10, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/secrets is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := secret.NewAuthedService(tt.fields.SecretService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.DeleteSecret(ctx, tt.args.org) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestSecretService_PutSecret(t *testing.T) { - type fields struct { - SecretService influxdb.SecretService - } - type args struct { - permission influxdb.Permission - orgID platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to put a secret", - fields: fields{ - SecretService: &mock.SecretService{ - PutSecretFn: func(ctx context.Context, orgID platform.ID, key string, val string) error { - return nil - }, - }, - }, - args: args{ - orgID: platform.ID(10), - permission: influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to put a secret", - fields: fields{ - SecretService: &mock.SecretService{ - PutSecretFn: func(ctx context.Context, orgID platform.ID, key string, val string) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/secrets is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := secret.NewAuthedService(tt.fields.SecretService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.PutSecret(ctx, tt.args.orgID, "", "") - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestSecretService_PutSecrets(t *testing.T) { - type fields struct { - SecretService influxdb.SecretService - } - type args struct { - permissions []influxdb.Permission - orgID platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to put secrets", - fields: fields{ - SecretService: &mock.SecretService{ - PutSecretsFn: func(ctx context.Context, orgID platform.ID, m map[string]string) error { - return nil - }, - }, - }, - args: args{ - orgID: platform.ID(10), - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to put secrets", - fields: fields{ - SecretService: &mock.SecretService{ - PutSecretsFn: func(ctx context.Context, orgID platform.ID, m map[string]string) error { - return nil - }, - }, - }, - args: args{ - orgID: platform.ID(2), - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(2), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/0000000000000002/secrets is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - { - name: "unauthorized to put secrets without read access to their org", - fields: fields{ - SecretService: &mock.SecretService{ - PutSecretFn: func(ctx context.Context, orgID platform.ID, key string, val string) error { - return nil - }, - PutSecretsFn: func(ctx context.Context, orgID platform.ID, m map[string]string) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/000000000000000a/secrets is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - { - name: "unauthorized to put secrets without write access to their org", - fields: fields{ - SecretService: &mock.SecretService{ - PutSecretFn: func(ctx context.Context, orgID platform.ID, key string, val string) error { - return nil - }, - PutSecretsFn: func(ctx context.Context, orgID platform.ID, m map[string]string) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.SecretsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/secrets is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := secret.NewAuthedService(tt.fields.SecretService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - secrets := make(map[string]string) - err := s.PutSecrets(ctx, tt.args.orgID, secrets) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} diff --git a/secret/middleware_logging.go b/secret/middleware_logging.go deleted file mode 100644 index 5c42c90fcec..00000000000 --- a/secret/middleware_logging.go +++ /dev/null @@ -1,110 +0,0 @@ -package secret - -import ( - "context" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "go.uber.org/zap" -) - -// Logger is a logger service middleware for secrets -type Logger struct { - logger *zap.Logger - secretService influxdb.SecretService -} - -var _ influxdb.SecretService = (*Logger)(nil) - -// NewLogger returns a logging service middleware for the User Service. -func NewLogger(log *zap.Logger, s influxdb.SecretService) *Logger { - return &Logger{ - logger: log, - secretService: s, - } -} - -// LoadSecret retrieves the secret value v found at key k for organization orgID. -func (l *Logger) LoadSecret(ctx context.Context, orgID platform.ID, key string) (str string, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to load secret", zap.Error(err), dur) - return - } - l.logger.Debug("secret load", dur) - }(time.Now()) - return l.secretService.LoadSecret(ctx, orgID, key) - -} - -// GetSecretKeys retrieves all secret keys that are stored for the organization orgID. -func (l *Logger) GetSecretKeys(ctx context.Context, orgID platform.ID) (strs []string, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to get secret keys", zap.Error(err), dur) - return - } - l.logger.Debug("secret get keys", dur) - }(time.Now()) - return l.secretService.GetSecretKeys(ctx, orgID) - -} - -// PutSecret stores the secret pair (k,v) for the organization orgID. -func (l *Logger) PutSecret(ctx context.Context, orgID platform.ID, key string, val string) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to put secret", zap.Error(err), dur) - return - } - l.logger.Debug("secret put", dur) - }(time.Now()) - return l.secretService.PutSecret(ctx, orgID, key, val) - -} - -// PutSecrets puts all provided secrets and overwrites any previous values. -func (l *Logger) PutSecrets(ctx context.Context, orgID platform.ID, m map[string]string) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to put secrets", zap.Error(err), dur) - return - } - l.logger.Debug("secret puts", dur) - }(time.Now()) - return l.secretService.PutSecrets(ctx, orgID, m) - -} - -// PatchSecrets patches all provided secrets and updates any previous values. -func (l *Logger) PatchSecrets(ctx context.Context, orgID platform.ID, m map[string]string) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to patch secret", zap.Error(err), dur) - return - } - l.logger.Debug("secret patch", dur) - }(time.Now()) - return l.secretService.PatchSecrets(ctx, orgID, m) - -} - -// DeleteSecret removes a single secret from the secret store. -func (l *Logger) DeleteSecret(ctx context.Context, orgID platform.ID, keys ...string) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to delete secret", zap.Error(err), dur) - return - } - l.logger.Debug("secret delete", dur) - }(time.Now()) - return l.secretService.DeleteSecret(ctx, orgID, keys...) - -} diff --git a/secret/middleware_metrics.go b/secret/middleware_metrics.go deleted file mode 100644 index c510a72f619..00000000000 --- a/secret/middleware_metrics.go +++ /dev/null @@ -1,70 +0,0 @@ -package secret - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/metric" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/prometheus/client_golang/prometheus" -) - -// SecreteService is a metrics middleware system for the secret service -type SecreteService struct { - // RED metrics - rec *metric.REDClient - - secretSvc influxdb.SecretService -} - -var _ influxdb.SecretService = (*SecreteService)(nil) - -// NewMetricService creates a new secret metrics middleware -func NewMetricService(reg prometheus.Registerer, s influxdb.SecretService) *SecreteService { - return &SecreteService{ - rec: metric.New(reg, "secret"), - secretSvc: s, - } -} - -// LoadSecret retrieves the secret value v found at key k for organization orgID. -func (ms *SecreteService) LoadSecret(ctx context.Context, orgID platform.ID, key string) (string, error) { - rec := ms.rec.Record("load_secret") - secret, err := ms.secretSvc.LoadSecret(ctx, orgID, key) - return secret, rec(err) -} - -// GetSecretKeys retrieves all secret keys that are stored for the organization orgID. -func (ms *SecreteService) GetSecretKeys(ctx context.Context, orgID platform.ID) ([]string, error) { - rec := ms.rec.Record("get_secret_keys") - secrets, err := ms.secretSvc.GetSecretKeys(ctx, orgID) - return secrets, rec(err) -} - -// PutSecret stores the secret pair (k,v) for the organization orgID. -func (ms *SecreteService) PutSecret(ctx context.Context, orgID platform.ID, key string, val string) error { - rec := ms.rec.Record("put_secret") - err := ms.secretSvc.PutSecret(ctx, orgID, key, val) - return rec(err) -} - -// PutSecrets puts all provided secrets and overwrites any previous values. -func (ms *SecreteService) PutSecrets(ctx context.Context, orgID platform.ID, m map[string]string) error { - rec := ms.rec.Record("put_secrets") - err := ms.secretSvc.PutSecrets(ctx, orgID, m) - return rec(err) -} - -// PatchSecrets patches all provided secrets and updates any previous values. -func (ms *SecreteService) PatchSecrets(ctx context.Context, orgID platform.ID, m map[string]string) error { - rec := ms.rec.Record("patch_secrets") - err := ms.secretSvc.PatchSecrets(ctx, orgID, m) - return rec(err) -} - -// DeleteSecret removes a single secret from the secret store. -func (ms *SecreteService) DeleteSecret(ctx context.Context, orgID platform.ID, keys ...string) error { - rec := ms.rec.Record("delete_secret") - err := ms.secretSvc.DeleteSecret(ctx, orgID, keys...) - return rec(err) -} diff --git a/secret/service.go b/secret/service.go deleted file mode 100644 index 7407cbd602d..00000000000 --- a/secret/service.go +++ /dev/null @@ -1,89 +0,0 @@ -package secret - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" -) - -type Service struct { - s *Storage -} - -// NewService creates a new service implementation for secrets -func NewService(s *Storage) *Service { - return &Service{s} -} - -// LoadSecret retrieves the secret value v found at key k for organization orgID. -func (s *Service) LoadSecret(ctx context.Context, orgID platform.ID, k string) (string, error) { - var v string - err := s.s.View(ctx, func(tx kv.Tx) error { - var err error - v, err = s.s.GetSecret(ctx, tx, orgID, k) - return err - }) - return v, err -} - -// GetSecretKeys retrieves all secret keys that are stored for the organization orgID. -func (s *Service) GetSecretKeys(ctx context.Context, orgID platform.ID) ([]string, error) { - var v []string - err := s.s.View(ctx, func(tx kv.Tx) error { - var err error - v, err = s.s.ListSecret(ctx, tx, orgID) - return err - }) - return v, err -} - -// PutSecret stores the secret pair (k,v) for the organization orgID. -func (s *Service) PutSecret(ctx context.Context, orgID platform.ID, k, v string) error { - err := s.s.Update(ctx, func(tx kv.Tx) error { - return s.s.PutSecret(ctx, tx, orgID, k, v) - }) - return err -} - -// PutSecrets puts all provided secrets and overwrites any previous values. -func (s *Service) PutSecrets(ctx context.Context, orgID platform.ID, m map[string]string) error { - // put secretes expects to replace all existing secretes - keys, err := s.GetSecretKeys(ctx, orgID) - if err != nil { - return err - } - if err := s.DeleteSecret(ctx, orgID, keys...); err != nil { - return err - } - - return s.PatchSecrets(ctx, orgID, m) -} - -// PatchSecrets patches all provided secrets and updates any previous values. -func (s *Service) PatchSecrets(ctx context.Context, orgID platform.ID, m map[string]string) error { - err := s.s.Update(ctx, func(tx kv.Tx) error { - for k, v := range m { - err := s.s.PutSecret(ctx, tx, orgID, k, v) - if err != nil { - return err - } - } - return nil - }) - return err -} - -// DeleteSecret removes a single secret from the secret store. -func (s *Service) DeleteSecret(ctx context.Context, orgID platform.ID, ks ...string) error { - err := s.s.Update(ctx, func(tx kv.Tx) error { - for _, k := range ks { - err := s.s.DeleteSecret(ctx, tx, orgID, k) - if err != nil { - return err - } - } - return nil - }) - return err -} diff --git a/secret/service_test.go b/secret/service_test.go deleted file mode 100644 index 0f9bf0ff026..00000000000 --- a/secret/service_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package secret_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/secret" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func TestBoltSecretService(t *testing.T) { - influxdbtesting.SecretService(initSvc, t) -} - -func initSvc(f influxdbtesting.SecretServiceFields, t *testing.T) (influxdb.SecretService, func()) { - t.Helper() - - s := inmem.NewKVStore() - - ctx := context.Background() - if err := all.Up(ctx, zaptest.NewLogger(t), s); err != nil { - t.Fatal(err) - } - - storage, err := secret.NewStore(s) - if err != nil { - t.Fatal(err) - } - - svc := secret.NewService(storage) - - for _, s := range f.Secrets { - if err := svc.PutSecrets(ctx, s.OrganizationID, s.Env); err != nil { - t.Fatalf("failed to populate users: %q", err) - } - } - - return svc, func() {} -} diff --git a/secret/storage.go b/secret/storage.go deleted file mode 100644 index b46d940cadd..00000000000 --- a/secret/storage.go +++ /dev/null @@ -1,187 +0,0 @@ -package secret - -import ( - "context" - "encoding/base64" - "errors" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" -) - -var secretBucket = []byte("secretsv1") - -// Storage is a store translation layer between the data storage unit and the -// service layer. -type Storage struct { - store kv.Store -} - -// NewStore creates a new storage system -func NewStore(s kv.Store) (*Storage, error) { - return &Storage{s}, nil -} - -func (s *Storage) View(ctx context.Context, fn func(kv.Tx) error) error { - return s.store.View(ctx, fn) -} - -func (s *Storage) Update(ctx context.Context, fn func(kv.Tx) error) error { - return s.store.Update(ctx, fn) -} - -// GetSecret Returns the value of a secret -func (s *Storage) GetSecret(ctx context.Context, tx kv.Tx, orgID platform.ID, k string) (string, error) { - key, err := encodeSecretKey(orgID, k) - if err != nil { - return "", err - } - - b, err := tx.Bucket(secretBucket) - if err != nil { - return "", err - } - - val, err := b.Get(key) - if kv.IsNotFound(err) { - return "", &errors2.Error{ - Code: errors2.ENotFound, - Msg: influxdb.ErrSecretNotFound, - } - } - - if err != nil { - return "", err - } - - v, err := decodeSecretValue(val) - if err != nil { - return "", err - } - - return v, nil -} - -// ListSecrets returns a list of secret keys -func (s *Storage) ListSecret(ctx context.Context, tx kv.Tx, orgID platform.ID) ([]string, error) { - b, err := tx.Bucket(secretBucket) - if err != nil { - return nil, err - } - - prefix, err := orgID.Encode() - if err != nil { - return nil, err - } - - cur, err := b.ForwardCursor(prefix, kv.WithCursorPrefix(prefix)) - if err != nil { - return nil, err - } - - keys := []string{} - - err = kv.WalkCursor(ctx, cur, func(k, v []byte) (bool, error) { - id, key, err := decodeSecretKey(k) - if err != nil { - return false, err - } - - if id != orgID { - // We've reached the end of the keyspace for the provided orgID - return false, nil - } - - keys = append(keys, key) - - return true, nil - }) - if err != nil { - return nil, err - } - - return keys, nil -} - -// PutSecret sets a secret in the db. -func (s *Storage) PutSecret(ctx context.Context, tx kv.Tx, orgID platform.ID, k, v string) error { - key, err := encodeSecretKey(orgID, k) - if err != nil { - return err - } - - val := encodeSecretValue(v) - - b, err := tx.Bucket(secretBucket) - if err != nil { - return err - } - - if err := b.Put(key, val); err != nil { - return err - } - - return nil -} - -// DeleteSecret removes a secret for the db -func (s *Storage) DeleteSecret(ctx context.Context, tx kv.Tx, orgID platform.ID, k string) error { - key, err := encodeSecretKey(orgID, k) - if err != nil { - return err - } - - b, err := tx.Bucket(secretBucket) - if err != nil { - return err - } - - return b.Delete(key) -} - -func encodeSecretKey(orgID platform.ID, k string) ([]byte, error) { - buf, err := orgID.Encode() - if err != nil { - return nil, err - } - - key := make([]byte, 0, platform.IDLength+len(k)) - key = append(key, buf...) - key = append(key, k...) - - return key, nil -} - -func decodeSecretKey(key []byte) (platform.ID, string, error) { - if len(key) < platform.IDLength { - // This should not happen. - return platform.InvalidID(), "", errors.New("provided key is too short to contain an ID (please report this error)") - } - - var id platform.ID - if err := id.Decode(key[:platform.IDLength]); err != nil { - return platform.InvalidID(), "", err - } - - k := string(key[platform.IDLength:]) - - return id, k, nil -} - -func decodeSecretValue(val []byte) (string, error) { - // store the secret value base64 encoded so that it's marginally better than plaintext - v, err := base64.StdEncoding.DecodeString(string(val)) - if err != nil { - return "", err - } - - return string(v), nil -} - -func encodeSecretValue(v string) []byte { - val := make([]byte, base64.StdEncoding.EncodedLen(len(v))) - base64.StdEncoding.Encode(val, []byte(v)) - return val -} diff --git a/secret_test.go b/secret_test.go deleted file mode 100644 index ad9bd6cdf6e..00000000000 --- a/secret_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package influxdb - -import ( - "encoding/json" - "testing" - - "github.com/google/go-cmp/cmp" -) - -func TestSecretFieldJSON(t *testing.T) { - cases := []struct { - name string - fld *SecretField - json string - target SecretField - }{ - { - name: "regular", - fld: &SecretField{Key: "some key"}, - json: `"secret: some key"`, - target: SecretField{Key: "some key"}, - }, - {name: "blank", fld: &SecretField{}, json: `""`}, - { - name: "with value", - fld: &SecretField{ - Key: "some key", - Value: strPtr("some value"), - }, - json: `"secret: some key"`, - target: SecretField{ - Key: "some key", - }, - }, - { - name: "unmarshal a post", - json: `"some value"`, - target: SecretField{ - Value: strPtr("some value"), - }, - }, - } - for _, c := range cases { - if c.fld != nil { - serialized, err := json.Marshal(c.fld) - if err != nil { - t.Fatalf("%s failed, secret key marshal err: %q", c.name, err.Error()) - } - if string(serialized) != c.json { - t.Fatalf("%s failed, secret key marshal result is unexpected, got %q, want %q", c.name, string(serialized), c.json) - } - } - var deserialized SecretField - if err := json.Unmarshal([]byte(c.json), &deserialized); err != nil { - t.Fatalf("%s failed, secret key unmarshal err: %q", c.name, err.Error()) - } - if diff := cmp.Diff(deserialized, c.target); diff != "" { - t.Fatalf("%s failed, secret key unmarshal result is unexpected, diff %s", c.name, diff) - } - } -} diff --git a/seed.flux b/seed.flux deleted file mode 100644 index 5cd80bde1e3..00000000000 --- a/seed.flux +++ /dev/null @@ -1,4 +0,0 @@ -import "internal/gen" - -gen.tables(n: 50000, tags: [{name: "_measurement", cardinality: 1}, {name: "_field", cardinality: 1}, {name: "t0", cardinality: 10}, {name: "t1", cardinality: 5}]) - |> to(bucket: "preview-test") diff --git a/semaphore.go b/semaphore.go deleted file mode 100644 index 8131dfaaae3..00000000000 --- a/semaphore.go +++ /dev/null @@ -1,56 +0,0 @@ -package influxdb - -import ( - "context" - "errors" - "time" -) - -// ErrNoAcquire is returned when it was not possible to acquire ownership of the -// semaphore. -var ErrNoAcquire = errors.New("ownership not acquired") - -// DefaultLeaseTTL is used when a specific lease TTL is not requested. -const DefaultLeaseTTL = time.Minute - -// A Semaphore provides an API for requesting ownership of an expirable semaphore. -// -// Acquired semaphores have an expiration. If they're not released or kept alive -// during this period then they will expire and ownership of the semaphore will -// be lost. -// -// TODO(edd): add AcquireTTL when needed. It should block. -type Semaphore interface { - // TryAcquire attempts to acquire ownership of the semaphore. TryAcquire - // must not block. Failure to get ownership of the semaphore should be - // signalled to the caller via the return of the ErrNoAcquire error. - TryAcquire(ctx context.Context, ttl time.Duration) (Lease, error) -} - -// A Lease represents ownership over a semaphore. It gives the owner the ability -// to extend ownership over the semaphore or release ownership of the semaphore. -type Lease interface { - // TTL returns the duration of time remaining before the lease expires. - TTL(context.Context) (time.Duration, error) - - // Release terminates ownership of the semaphore by revoking the lease. - Release(context.Context) error - - // KeepAlive extends the lease back to the original TTL. - KeepAlive(context.Context) error -} - -// NopSemaphore is a Semaphore that always hands out leases. -var NopSemaphore Semaphore = nopSemaphore{} - -type nopSemaphore struct{} - -func (nopSemaphore) TryAcquire(ctx context.Context, ttl time.Duration) (Lease, error) { - return nopLease{}, nil -} - -type nopLease struct{} - -func (nopLease) TTL(context.Context) (time.Duration, error) { return DefaultLeaseTTL, nil } -func (nopLease) Release(context.Context) error { return nil } -func (nopLease) KeepAlive(context.Context) error { return nil } diff --git a/session.go b/session.go deleted file mode 100644 index e506ceb43ed..00000000000 --- a/session.go +++ /dev/null @@ -1,103 +0,0 @@ -package influxdb - -import ( - "context" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// ErrSessionNotFound is the error messages for a missing sessions. -const ErrSessionNotFound = "session not found" - -// ErrSessionExpired is the error message for expired sessions. -const ErrSessionExpired = "session has expired" - -// RenewSessionTime is the time to extend session, currently set to 5min. -var RenewSessionTime = time.Duration(time.Second * 300) - -// DefaultSessionLength is the default session length on initial creation. -var DefaultSessionLength = time.Hour - -var ( - // OpFindSession represents the operation that looks for sessions. - OpFindSession = "FindSession" - // OpExpireSession represents the operation that expires sessions. - OpExpireSession = "ExpireSession" - // OpCreateSession represents the operation that creates a session for a given user. - OpCreateSession = "CreateSession" - // OpRenewSession = "RenewSession" - OpRenewSession = "RenewSession" -) - -// SessionAuthorizationKind defines the type of authorizer -const SessionAuthorizationKind = "session" - -// Session is a user session. -type Session struct { - // ID is only required for auditing purposes. - ID platform.ID `json:"id"` - Key string `json:"key"` - CreatedAt time.Time `json:"createdAt"` - ExpiresAt time.Time `json:"expiresAt"` - UserID platform.ID `json:"userID,omitempty"` - Permissions []Permission `json:"permissions,omitempty"` -} - -// Expired returns an error if the session is expired. -func (s *Session) Expired() error { - if time.Now().After(s.ExpiresAt) { - return &errors.Error{ - Code: errors.EForbidden, - Msg: ErrSessionExpired, - } - } - - return nil -} - -// PermissionSet returns the set of permissions associated with the session. -func (s *Session) PermissionSet() (PermissionSet, error) { - if err := s.Expired(); err != nil { - return nil, &errors.Error{ - Code: errors.EUnauthorized, - Err: err, - } - } - - return s.Permissions, nil -} - -// Kind returns session and is used for auditing. -func (s *Session) Kind() string { return SessionAuthorizationKind } - -// Identifier returns the sessions ID and is used for auditing. -func (s *Session) Identifier() platform.ID { return s.ID } - -// GetUserID returns the user id. -func (s *Session) GetUserID() platform.ID { - return s.UserID -} - -// EphemeralAuth generates an Authorization that is not stored -// but at the user's max privs. -func (s *Session) EphemeralAuth(orgID platform.ID) *Authorization { - return &Authorization{ - ID: s.ID, - OrgID: orgID, - Status: Active, - UserID: s.UserID, - Permissions: s.Permissions, - } -} - -// SessionService represents a service for managing user sessions. -type SessionService interface { - FindSession(ctx context.Context, key string) (*Session, error) - ExpireSession(ctx context.Context, key string) error - CreateSession(ctx context.Context, user string) (*Session, error) - // TODO: update RenewSession to take a ID instead of a session. - // By taking a session object it could be confused to update more things about the session - RenewSession(ctx context.Context, session *Session, newExpiration time.Time) error -} diff --git a/session/errors.go b/session/errors.go deleted file mode 100644 index b13557d16cd..00000000000 --- a/session/errors.go +++ /dev/null @@ -1,14 +0,0 @@ -package session - -import ( - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - // ErrUnauthorized when a session request is unauthorized - // usually due to password mismatch - ErrUnauthorized = &errors.Error{ - Code: errors.EUnauthorized, - Msg: "unauthorized access", - } -) diff --git a/session/http_server.go b/session/http_server.go deleted file mode 100644 index 98a07ffe8ee..00000000000 --- a/session/http_server.go +++ /dev/null @@ -1,237 +0,0 @@ -package session - -import ( - "context" - "net/http" - - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "go.uber.org/zap" -) - -const ( - prefixSignIn = "/api/v2/signin" - prefixSignOut = "/api/v2/signout" -) - -// SessionHandler represents an HTTP API handler for authorizations. -type SessionHandler struct { - chi.Router - api *kithttp.API - log *zap.Logger - - sessionSvc influxdb.SessionService - passSvc influxdb.PasswordsService - userSvc influxdb.UserService -} - -// NewSessionHandler returns a new instance of SessionHandler. -func NewSessionHandler(log *zap.Logger, sessionSvc influxdb.SessionService, userSvc influxdb.UserService, passwordsSvc influxdb.PasswordsService) *SessionHandler { - svr := &SessionHandler{ - api: kithttp.NewAPI(kithttp.WithLog(log)), - log: log, - - passSvc: passwordsSvc, - sessionSvc: sessionSvc, - userSvc: userSvc, - } - - return svr -} - -type resourceHandler struct { - prefix string - *SessionHandler -} - -// Prefix is necessary to mount the router as a resource handler -func (r resourceHandler) Prefix() string { return r.prefix } - -// SignInResourceHandler allows us to return 2 different resource handler -// for the appropriate mounting location -func (h SessionHandler) SignInResourceHandler() *resourceHandler { - h.Router = chi.NewRouter() - h.Router.Use( - middleware.Recoverer, - middleware.RequestID, - middleware.RealIP, - ) - h.Router.Post("/", h.handleSignin) - return &resourceHandler{prefix: prefixSignIn, SessionHandler: &h} -} - -// SignOutResourceHandler allows us to return 2 different resource handler -// for the appropriate mounting location -func (h SessionHandler) SignOutResourceHandler() *resourceHandler { - h.Router = chi.NewRouter() - h.Router.Use( - middleware.Recoverer, - middleware.RequestID, - middleware.RealIP, - ) - h.Router.Post("/", h.handleSignout) - return &resourceHandler{prefix: prefixSignOut, SessionHandler: &h} -} - -// handleSignin is the HTTP handler for the POST /signin route. -func (h *SessionHandler) handleSignin(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - req, decErr := decodeSigninRequest(ctx, r) - if decErr != nil { - h.api.Err(w, r, ErrUnauthorized) - return - } - - u, err := h.userSvc.FindUser(ctx, influxdb.UserFilter{ - Name: &req.Username, - }) - if err != nil { - h.api.Err(w, r, ErrUnauthorized) - return - } - - if err := h.passSvc.ComparePassword(ctx, u.ID, req.Password); err != nil { - h.api.Err(w, r, ErrUnauthorized) - return - } - - s, e := h.sessionSvc.CreateSession(ctx, req.Username) - if e != nil { - h.api.Err(w, r, ErrUnauthorized) - return - } - - encodeCookieSession(w, s) - w.WriteHeader(http.StatusNoContent) -} - -type signinRequest struct { - Username string - Password string -} - -func decodeSigninRequest(ctx context.Context, r *http.Request) (*signinRequest, *errors.Error) { - u, p, ok := r.BasicAuth() - if !ok { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid basic auth", - } - } - - return &signinRequest{ - Username: u, - Password: p, - }, nil -} - -// handleSignout is the HTTP handler for the POST /signout route. -func (h *SessionHandler) handleSignout(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - req, err := decodeSignoutRequest(ctx, r) - if err != nil { - h.api.Err(w, r, ErrUnauthorized) - return - } - - if err := h.sessionSvc.ExpireSession(ctx, req.Key); err != nil { - h.api.Err(w, r, ErrUnauthorized) - return - } - - w.WriteHeader(http.StatusNoContent) -} - -type signoutRequest struct { - Key string -} - -func decodeSignoutRequest(ctx context.Context, r *http.Request) (*signoutRequest, error) { - key, err := DecodeCookieSession(ctx, r) - if err != nil { - return nil, err - } - return &signoutRequest{ - Key: key, - }, nil -} - -const cookieSessionName = "influxdb-oss-session" - -func encodeCookieSession(w http.ResponseWriter, s *influxdb.Session) { - // We only need the session cookie for accesses to "/api/...", so limit - // it to that using "Path". - // - // Since the cookie is limited to "/api/..." and we don't expect any - // links directly into /api/..., use SameSite=Strict as a hardening - // measure. This works because external links into the UI have the form - // https:///orgs//..., https:///signin, etc and don't - // need the cookie sent. By the time the UI itself calls out to - // /api/..., the location bar matches the cookie's domain and - // everything is 1st party and Strict's restriction work fine. - // - // SameSite=Lax would also be safe to use (modern browser's default if - // unset) since it only sends the cookie with GET (and other safe HTTP - // methods like HEAD and OPTIONS as defined in RFC6264) requests when - // the location bar matches the domain of the cookie and we know that - // our APIs do not perform state-changing actions with GET and other - // safe methods. Using SameSite=Strict helps future-proof us against - // that changing (ie, we add a state-changing GET API). - // - // Note: it's generally recommended that SameSite should not be relied - // upon (particularly Lax) because: - // a) SameSite doesn't work with (cookie-less) Basic Auth. We don't - // share browser session BasicAuth with accesses to to /api/... so - // this isn't a problem - // b) SameSite=lax allows GET (and other safe HTTP methods) and some - // services might allow state-changing requests via GET. Our API - // doesn't support state-changing GETs and SameSite=strict doesn't - // allow GETs from 3rd party sites at all, so this isn't a problem - // c) similar to 'b', some frameworks will accept HTTP methods for - // other handlers. Eg, the application is designed for POST but it - // will accept requests converted to the GET method. Golang does not - // do this itself and our route mounts explicitly map the HTTP - // method to the specific handler and thus we are not susceptible to - // this - // d) SameSite could be bypassed if the attacker were able to - // manipulate the location bar in the browser (a serious browser - // bug; it is reasonable for us to expect browsers to enforce their - // SameSite restrictions) - c := &http.Cookie{ - Name: cookieSessionName, - Value: s.Key, - Path: "/api/", // since UI doesn't need it, limit cookie usage to API requests - Expires: s.ExpiresAt, - SameSite: http.SameSiteStrictMode, - } - - http.SetCookie(w, c) -} - -func DecodeCookieSession(ctx context.Context, r *http.Request) (string, error) { - c, err := r.Cookie(cookieSessionName) - if err != nil { - return "", &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - return c.Value, nil -} - -// SetCookieSession adds a cookie for the session to an http request -func SetCookieSession(key string, r *http.Request) { - c := &http.Cookie{ - Name: cookieSessionName, - Value: key, - Secure: true, - SameSite: http.SameSiteStrictMode, - } - - r.AddCookie(c) -} diff --git a/session/http_server_test.go b/session/http_server_test.go deleted file mode 100644 index b22e4c357e4..00000000000 --- a/session/http_server_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package session - -import ( - "context" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/mock" - "go.uber.org/zap/zaptest" -) - -func TestSessionHandler_handleSignin(t *testing.T) { - type fields struct { - PasswordsService influxdb.PasswordsService - SessionService influxdb.SessionService - } - type args struct { - user string - password string - } - type wants struct { - cookie string - code int - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "successful compare password", - fields: fields{ - SessionService: &mock.SessionService{ - CreateSessionFn: func(context.Context, string) (*influxdb.Session, error) { - return &influxdb.Session{ - ID: platform.ID(0), - Key: "abc123xyz", - CreatedAt: time.Date(2018, 9, 26, 0, 0, 0, 0, time.UTC), - ExpiresAt: time.Date(2030, 9, 26, 0, 0, 0, 0, time.UTC), - UserID: platform.ID(1), - }, nil - }, - }, - PasswordsService: &mock.PasswordsService{ - ComparePasswordFn: func(context.Context, platform.ID, string) error { - return nil - }, - }, - }, - args: args{ - user: "user1", - password: "supersecret", - }, - wants: wants{ - cookie: "influxdb-oss-session=abc123xyz; Path=/api/; Expires=Thu, 26 Sep 2030 00:00:00 GMT; SameSite=Strict", - code: http.StatusNoContent, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - userSVC := mock.NewUserService() - userSVC.FindUserFn = func(_ context.Context, f influxdb.UserFilter) (*influxdb.User, error) { - return &influxdb.User{ID: 1}, nil - } - h := NewSessionHandler(zaptest.NewLogger(t), tt.fields.SessionService, userSVC, tt.fields.PasswordsService) - - server := httptest.NewServer(h.SignInResourceHandler()) - client := server.Client() - - r, err := http.NewRequest("POST", server.URL, nil) - if err != nil { - t.Fatal(err) - } - r.SetBasicAuth(tt.args.user, tt.args.password) - - resp, err := client.Do(r) - if err != nil { - t.Fatal(err) - } - - if got, want := resp.StatusCode, tt.wants.code; got != want { - t.Errorf("bad status code: got %d want %d", got, want) - } - - cookie := resp.Header.Get("Set-Cookie") - if got, want := cookie, tt.wants.cookie; got != want { - t.Errorf("expected session cookie to be set: got %q want %q", got, want) - } - }) - } -} diff --git a/session/middleware_logging.go b/session/middleware_logging.go deleted file mode 100644 index 0466e670af6..00000000000 --- a/session/middleware_logging.go +++ /dev/null @@ -1,81 +0,0 @@ -package session - -import ( - "context" - "time" - - "github.com/influxdata/influxdb/v2" - "go.uber.org/zap" -) - -// SessionLogger is a logger service middleware for sessions -type SessionLogger struct { - logger *zap.Logger - sessionService influxdb.SessionService -} - -var _ influxdb.SessionService = (*SessionLogger)(nil) - -// NewSessionLogger returns a logging service middleware for the User Service. -func NewSessionLogger(log *zap.Logger, s influxdb.SessionService) *SessionLogger { - return &SessionLogger{ - logger: log, - sessionService: s, - } -} - -// FindSession calls the underlying session service and logs the results of the request -func (l *SessionLogger) FindSession(ctx context.Context, key string) (session *influxdb.Session, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to session find", zap.Error(err), dur) - return - } - l.logger.Debug("session find", dur) - }(time.Now()) - return l.sessionService.FindSession(ctx, key) - -} - -// ExpireSession calls the underlying session service and logs the results of the request -func (l *SessionLogger) ExpireSession(ctx context.Context, key string) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to session expire", zap.Error(err), dur) - return - } - l.logger.Debug("session expire", dur) - }(time.Now()) - return l.sessionService.ExpireSession(ctx, key) - -} - -// CreateSession calls the underlying session service and logs the results of the request -func (l *SessionLogger) CreateSession(ctx context.Context, user string) (s *influxdb.Session, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to session create", zap.Error(err), dur) - return - } - l.logger.Debug("session create", dur) - }(time.Now()) - return l.sessionService.CreateSession(ctx, user) - -} - -// RenewSession calls the underlying session service and logs the results of the request -func (l *SessionLogger) RenewSession(ctx context.Context, session *influxdb.Session, newExpiration time.Time) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to session renew", zap.Error(err), dur) - return - } - l.logger.Debug("session renew", dur) - }(time.Now()) - return l.sessionService.RenewSession(ctx, session, newExpiration) - -} diff --git a/session/middleware_metrics.go b/session/middleware_metrics.go deleted file mode 100644 index 6e16d027993..00000000000 --- a/session/middleware_metrics.go +++ /dev/null @@ -1,56 +0,0 @@ -package session - -import ( - "context" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/metric" - "github.com/prometheus/client_golang/prometheus" -) - -// SessionMetrics is a metrics middleware system for the session service -type SessionMetrics struct { - // RED metrics - rec *metric.REDClient - - sessionSvc influxdb.SessionService -} - -var _ influxdb.SessionService = (*SessionMetrics)(nil) - -// NewSessionMetrics creates a new session metrics middleware -func NewSessionMetrics(reg prometheus.Registerer, s influxdb.SessionService) *SessionMetrics { - return &SessionMetrics{ - rec: metric.New(reg, "session"), - sessionSvc: s, - } -} - -// FindSession calls the underlying session service and tracks RED metrics for the call -func (m *SessionMetrics) FindSession(ctx context.Context, key string) (session *influxdb.Session, err error) { - rec := m.rec.Record("find_session") - session, err = m.sessionSvc.FindSession(ctx, key) - return session, rec(err) -} - -// ExpireSession calls the underlying session service and tracks RED metrics for the call -func (m *SessionMetrics) ExpireSession(ctx context.Context, key string) (err error) { - rec := m.rec.Record("expire_session") - err = m.sessionSvc.ExpireSession(ctx, key) - return rec(err) -} - -// CreateSession calls the underlying session service and tracks RED metrics for the call -func (m *SessionMetrics) CreateSession(ctx context.Context, user string) (s *influxdb.Session, err error) { - rec := m.rec.Record("create_session") - s, err = m.sessionSvc.CreateSession(ctx, user) - return s, rec(err) -} - -// RenewSession calls the underlying session service and tracks RED metrics for the call -func (m *SessionMetrics) RenewSession(ctx context.Context, session *influxdb.Session, newExpiration time.Time) (err error) { - rec := m.rec.Record("renew_session") - err = m.sessionSvc.RenewSession(ctx, session, newExpiration) - return rec(err) -} diff --git a/session/service.go b/session/service.go deleted file mode 100644 index 0b734d842cc..00000000000 --- a/session/service.go +++ /dev/null @@ -1,206 +0,0 @@ -package session - -import ( - "context" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/rand" - "github.com/influxdata/influxdb/v2/snowflake" -) - -// Service implements the influxdb.SessionService interface and -// handles communication between session and the necessary user and urm services -type Service struct { - store *Storage - userService influxdb.UserService - urmService influxdb.UserResourceMappingService - authService influxdb.AuthorizationService - sessionLength time.Duration - - idGen platform.IDGenerator - tokenGen influxdb.TokenGenerator - - disableAuthorizationsForMaxPermissions func(context.Context) bool -} - -// ServiceOption is a functional option for configuring a *Service -type ServiceOption func(*Service) - -// WithSessionLength configures the length of the session with the provided -// duration when the resulting option is called on a *Service. -func WithSessionLength(length time.Duration) ServiceOption { - return func(s *Service) { - s.sessionLength = length - } -} - -// WithIDGenerator overrides the default ID generator with the one -// provided to this function when called on a *Service -func WithIDGenerator(gen platform.IDGenerator) ServiceOption { - return func(s *Service) { - s.idGen = gen - } -} - -// WithTokenGenerator overrides the default token generator with the one -// provided to this function when called on a *Service -func WithTokenGenerator(gen influxdb.TokenGenerator) ServiceOption { - return func(s *Service) { - s.tokenGen = gen - } -} - -// NewService creates a new session service -func NewService(store *Storage, userService influxdb.UserService, urmService influxdb.UserResourceMappingService, authSvc influxdb.AuthorizationService, opts ...ServiceOption) *Service { - service := &Service{ - store: store, - userService: userService, - urmService: urmService, - authService: authSvc, - sessionLength: time.Hour, - idGen: snowflake.NewIDGenerator(), - tokenGen: rand.NewTokenGenerator(64), - disableAuthorizationsForMaxPermissions: func(context.Context) bool { - return false - }, - } - - for _, opt := range opts { - opt(service) - } - - return service -} - -// WithMaxPermissionFunc sets the useAuthorizationsForMaxPermissions function -// which can trigger whether or not max permissions uses the users authorizations -// to derive maximum permissions. -func (s *Service) WithMaxPermissionFunc(fn func(context.Context) bool) { - s.disableAuthorizationsForMaxPermissions = fn -} - -// FindSession finds a session based on the session key -func (s *Service) FindSession(ctx context.Context, key string) (*influxdb.Session, error) { - session, err := s.store.FindSessionByKey(ctx, key) - if err != nil { - return nil, err - } - - // TODO: We want to be able to store permissions in the session - // but the contract provided by urm's doesn't give us enough information to quickly repopulate our - // session permissions on updates so we are required to pull the permissions every time we find the session. - permissions, err := s.getPermissionSet(ctx, session.UserID) - if err != nil { - return nil, err - } - - session.Permissions = permissions - return session, nil -} - -// ExpireSession removes a session from the system -func (s *Service) ExpireSession(ctx context.Context, key string) error { - session, err := s.store.FindSessionByKey(ctx, key) - if err != nil { - return err - } - return s.store.DeleteSession(ctx, session.ID) -} - -// CreateSession -func (s *Service) CreateSession(ctx context.Context, user string) (*influxdb.Session, error) { - u, err := s.userService.FindUser(ctx, influxdb.UserFilter{ - Name: &user, - }) - if err != nil { - return nil, err - } - - token, err := s.tokenGen.Token() - if err != nil { - return nil, err - } - - // for now we are not storing the permissions because we need to pull them every time we find - // so we might as well keep the session stored small - now := time.Now() - session := &influxdb.Session{ - ID: s.idGen.ID(), - Key: token, - CreatedAt: now, - ExpiresAt: now.Add(s.sessionLength), - UserID: u.ID, - } - - return session, s.store.CreateSession(ctx, session) -} - -// RenewSession update the sessions expiration time -func (s *Service) RenewSession(ctx context.Context, session *influxdb.Session, newExpiration time.Time) error { - if session == nil { - return &errors.Error{ - Msg: "session is nil", - } - } - return s.store.RefreshSession(ctx, session.ID, newExpiration) -} - -func (s *Service) getPermissionSet(ctx context.Context, uid platform.ID) ([]influxdb.Permission, error) { - mappings, _, err := s.urmService.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{UserID: uid}, influxdb.FindOptions{Limit: 100}) - if err != nil { - return nil, err - } - - permissions, err := permissionFromMapping(mappings) - if err != nil { - return nil, err - } - - if len(mappings) == 100 { - // if we got 100 mappings we probably need to pull more pages - // account for paginated results - for i := len(mappings); len(mappings) > 0; i += len(mappings) { - mappings, _, err = s.urmService.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{UserID: uid}, influxdb.FindOptions{Offset: i, Limit: 100}) - if err != nil { - return nil, err - } - pms, err := permissionFromMapping(mappings) - if err != nil { - return nil, err - } - permissions = append(permissions, pms...) - } - } - - if !s.disableAuthorizationsForMaxPermissions(ctx) { - as, _, err := s.authService.FindAuthorizations(ctx, influxdb.AuthorizationFilter{UserID: &uid}) - if err != nil { - return nil, err - } - for _, a := range as { - permissions = append(permissions, a.Permissions...) - } - } - - permissions = append(permissions, influxdb.MePermissions(uid)...) - return permissions, nil -} - -func permissionFromMapping(mappings []*influxdb.UserResourceMapping) ([]influxdb.Permission, error) { - ps := make([]influxdb.Permission, 0, len(mappings)) - for _, m := range mappings { - p, err := m.ToPermissions() - if err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - ps = append(ps, p...) - } - - return ps, nil -} diff --git a/session/service_test.go b/session/service_test.go deleted file mode 100644 index 937d909b914..00000000000 --- a/session/service_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package session - -import ( - "context" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/tenant" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func TestSessionService(t *testing.T) { - influxdbtesting.SessionService(initSessionService, t) -} - -func initSessionService(f influxdbtesting.SessionFields, t *testing.T) (influxdb.SessionService, string, func()) { - ss := NewStorage(inmem.NewSessionStore()) - - kvStore := inmem.NewKVStore() - - ctx := context.Background() - if err := all.Up(ctx, zaptest.NewLogger(t), kvStore); err != nil { - t.Fatal(err) - } - - ten := tenant.NewService(tenant.NewStore(kvStore)) - - svc := NewService(ss, ten, ten, &mock.AuthorizationService{ - FindAuthorizationsFn: func(context.Context, influxdb.AuthorizationFilter, ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - return []*influxdb.Authorization{}, 0, nil - }, - }, WithSessionLength(time.Minute)) - - if f.IDGenerator != nil { - WithIDGenerator(f.IDGenerator)(svc) - } - - if f.TokenGenerator != nil { - WithTokenGenerator(f.TokenGenerator)(svc) - } - - for _, u := range f.Users { - if err := ten.CreateUser(ctx, u); err != nil { - t.Fatalf("failed to populate users") - } - } - for _, s := range f.Sessions { - if err := ss.CreateSession(ctx, s); err != nil { - t.Fatalf("failed to populate sessions") - } - } - return svc, "session", func() {} -} diff --git a/session/storage.go b/session/storage.go deleted file mode 100644 index 958a5a265b6..00000000000 --- a/session/storage.go +++ /dev/null @@ -1,138 +0,0 @@ -package session - -import ( - "context" - "encoding/json" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -type Store interface { - Set(key, val string, expireAt time.Time) error - Get(key string) (string, error) - Delete(key string) error - ExpireAt(key string, expireAt time.Time) error -} - -var storePrefix = "sessionsv2/" -var storeIndex = "sessionsindexv2/" - -// Storage is a store translation layer between the data storage unit and the -// service layer. -type Storage struct { - store Store -} - -// NewStorage creates a new storage system -func NewStorage(s Store) *Storage { - return &Storage{s} -} - -// FindSessionByKey use a given key to retrieve the stored session -func (s *Storage) FindSessionByKey(ctx context.Context, key string) (*influxdb.Session, error) { - val, err := s.store.Get(sessionIndexKey(key)) - if err != nil { - return nil, err - } - - if val == "" { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrSessionNotFound, - } - } - - id, err := platform.IDFromString(val) - if err != nil { - return nil, err - } - return s.FindSessionByID(ctx, *id) -} - -// FindSessionByID use a provided id to retrieve the stored session -func (s *Storage) FindSessionByID(ctx context.Context, id platform.ID) (*influxdb.Session, error) { - val, err := s.store.Get(storePrefix + id.String()) - if err != nil { - return nil, err - } - if val == "" { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: influxdb.ErrSessionNotFound, - } - } - - session := &influxdb.Session{} - return session, json.Unmarshal([]byte(val), session) -} - -// CreateSession creates a new session -func (s *Storage) CreateSession(ctx context.Context, session *influxdb.Session) error { - // create session - sessionBytes, err := json.Marshal(session) - if err != nil { - return err - } - - // use a minute time just so the session will expire if we fail to set the expiration later - sessionID := sessionID(session.ID) - if err := s.store.Set(sessionID, string(sessionBytes), session.ExpiresAt); err != nil { - return err - } - - // create index - indexKey := sessionIndexKey(session.Key) - if err := s.store.Set(indexKey, session.ID.String(), session.ExpiresAt); err != nil { - return err - } - - return nil -} - -// RefreshSession updates the expiration time of a session. -func (s *Storage) RefreshSession(ctx context.Context, id platform.ID, expireAt time.Time) error { - session, err := s.FindSessionByID(ctx, id) - if err != nil { - return err - } - - if expireAt.Before(session.ExpiresAt) { - // no need to recreate the session if we aren't extending the expiration - return nil - } - - session.ExpiresAt = expireAt - return s.CreateSession(ctx, session) -} - -// DeleteSession removes the session and index from storage -func (s *Storage) DeleteSession(ctx context.Context, id platform.ID) error { - session, err := s.FindSessionByID(ctx, id) - if err != nil { - return err - } - if session == nil { - return nil - } - - if err := s.store.Delete(sessionID(session.ID)); err != nil { - return err - } - - if err := s.store.Delete(sessionIndexKey(session.Key)); err != nil { - return err - } - - return nil -} - -func sessionID(id platform.ID) string { - return storePrefix + id.String() -} - -func sessionIndexKey(key string) string { - return storeIndex + key -} diff --git a/session/storage_test.go b/session/storage_test.go deleted file mode 100644 index 3e208c65e3b..00000000000 --- a/session/storage_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package session_test - -import ( - "context" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/session" -) - -func TestSessionStore(t *testing.T) { - driver := func() session.Store { - return inmem.NewSessionStore() - } - - expected := &influxdb.Session{ - ID: 1, - Key: "2", - CreatedAt: time.Now(), - ExpiresAt: time.Now().Add(time.Hour), - } - - simpleSetup := func(t *testing.T, store *session.Storage) { - err := store.CreateSession( - context.Background(), - expected, - ) - if err != nil { - t.Fatal(err) - } - } - - st := []struct { - name string - setup func(*testing.T, *session.Storage) - update func(*testing.T, *session.Storage) - results func(*testing.T, *session.Storage) - }{ - { - name: "create", - setup: simpleSetup, - results: func(t *testing.T, store *session.Storage) { - session, err := store.FindSessionByID(context.Background(), 1) - if err != nil { - t.Fatal(err) - } - - if !cmp.Equal(session, expected) { - t.Fatalf("expected identical sessions: \n%+v\n%+v", session, expected) - } - }, - }, - { - name: "get", - setup: simpleSetup, - results: func(t *testing.T, store *session.Storage) { - session, err := store.FindSessionByID(context.Background(), 1) - if err != nil { - t.Fatal(err) - } - - if !cmp.Equal(session, expected) { - t.Fatalf("expected identical sessions: \n%+v\n%+v", session, expected) - } - - session, err = store.FindSessionByKey(context.Background(), "2") - if err != nil { - t.Fatal(err) - } - - if !cmp.Equal(session, expected) { - t.Fatalf("expected identical sessions: \n%+v\n%+v", session, expected) - } - }, - }, - { - name: "delete", - setup: simpleSetup, - update: func(t *testing.T, store *session.Storage) { - err := store.DeleteSession(context.Background(), 1) - if err != nil { - t.Fatal(err) - } - }, - results: func(t *testing.T, store *session.Storage) { - session, err := store.FindSessionByID(context.Background(), 1) - if err == nil { - t.Fatal("expected error on deleted session but got none") - } - - if session != nil { - t.Fatal("got a session when none should have existed") - } - }, - }, - } - for _, testScenario := range st { - t.Run(testScenario.name, func(t *testing.T) { - ss := session.NewStorage(driver()) - - // setup - if testScenario.setup != nil { - testScenario.setup(t, ss) - } - - // update - if testScenario.update != nil { - testScenario.update(t, ss) - } - - // results - if testScenario.results != nil { - testScenario.results(t, ss) - } - }) - } -} diff --git a/snowflake/id.go b/snowflake/id.go deleted file mode 100644 index d72eb57c8cf..00000000000 --- a/snowflake/id.go +++ /dev/null @@ -1,99 +0,0 @@ -package snowflake - -import ( - "errors" - "math/rand" - "sync" - "time" - - rand2 "github.com/influxdata/influxdb/v2/internal/rand" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/pkg/snowflake" -) - -var seededRand *rand.Rand - -func init() { - lockedSource := rand2.NewLockedSourceFromSeed(time.Now().UnixNano()) - seededRand = rand.New(lockedSource) - SetGlobalMachineID(seededRand.Intn(1023)) -} - -var globalmachineID struct { - id int - set bool - sync.RWMutex -} - -// ErrGlobalIDBadVal means that the global machine id value wasn't properly set. -var ErrGlobalIDBadVal = errors.New("globalID must be a number between (inclusive) 0 and 1023") - -// SetGlobalMachineID returns the global machine id. This number is limited to a number between 0 and 1023 inclusive. -func SetGlobalMachineID(id int) error { - if id > 1023 || id < 0 { - return ErrGlobalIDBadVal - } - globalmachineID.Lock() - globalmachineID.id = id - globalmachineID.set = true - globalmachineID.Unlock() - return nil -} - -// GlobalMachineID returns the global machine id. This number is limited to a number between 0 and 1023 inclusive. -func GlobalMachineID() int { - var id int - globalmachineID.RLock() - id = int(globalmachineID.id) - globalmachineID.RUnlock() - return id -} - -// NewDefaultIDGenerator returns an *IDGenerator that uses the currently set global machine ID. -// If you change the global machine id, it will not change the id in any generators that have already been created. -func NewDefaultIDGenerator() *IDGenerator { - globalmachineID.RLock() - defer globalmachineID.RUnlock() - if globalmachineID.set { - return NewIDGenerator(WithMachineID(globalmachineID.id)) - } - return NewIDGenerator() -} - -// IDGenerator holds the ID generator. -type IDGenerator struct { - Generator *snowflake.Generator -} - -// IDGeneratorOp is an option for an IDGenerator. -type IDGeneratorOp func(*IDGenerator) - -// WithMachineID uses the low 12 bits of machineID to set the machine ID for the snowflake ID. -func WithMachineID(machineID int) IDGeneratorOp { - return func(g *IDGenerator) { - g.Generator = snowflake.New(machineID & 1023) - } -} - -// NewIDGenerator returns a new IDGenerator. Optionally you can use an IDGeneratorOp. -// to use a specific Generator -func NewIDGenerator(opts ...IDGeneratorOp) *IDGenerator { - gen := &IDGenerator{} - for _, f := range opts { - f(gen) - } - if gen.Generator == nil { - machineId := seededRand.Intn(1023) - gen.Generator = snowflake.New(machineId) - } - return gen -} - -// ID returns the next platform.ID from an IDGenerator. -func (g *IDGenerator) ID() platform2.ID { - var id platform2.ID - for !id.Valid() { - id = platform2.ID(g.Generator.Next()) - } - return id -} diff --git a/snowflake/id_test.go b/snowflake/id_test.go deleted file mode 100644 index 3498294681f..00000000000 --- a/snowflake/id_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package snowflake - -import ( - "testing" - - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -func TestIDLength(t *testing.T) { - gen := NewIDGenerator() - id := gen.ID() - if !id.Valid() { - t.Fail() - } - enc, _ := id.Encode() - if len(enc) != platform2.IDLength { - t.Fail() - } -} - -func TestToFromString(t *testing.T) { - gen := NewIDGenerator() - id := gen.ID() - var clone platform2.ID - if err := clone.DecodeFromString(id.String()); err != nil { - t.Error(err) - } else if id != clone { - t.Errorf("id started as %x but got back %x", id, clone) - } -} - -func TestWithMachineID(t *testing.T) { - gen := NewIDGenerator(WithMachineID(1023)) - if gen.Generator.MachineID() != 1023 { - t.Errorf("expected machineID of %d but got %d", 1023, gen.Generator.MachineID()) - } - gen = NewIDGenerator(WithMachineID(1023)) - if gen.Generator.MachineID() != 1023 { - t.Errorf("expected machineID of %d but got %d", 1023, gen.Generator.MachineID()) - } - gen = NewIDGenerator(WithMachineID(99)) - if gen.Generator.MachineID() != 99 { - t.Errorf("expected machineID of %d but got %d", 99, gen.Generator.MachineID()) - } - gen = NewIDGenerator(WithMachineID(101376)) - if gen.Generator.MachineID() != 0 { - t.Errorf("expected machineID of %d but got %d", 0, gen.Generator.MachineID()) - } - gen = NewIDGenerator(WithMachineID(102399)) - if gen.Generator.MachineID() != 1023 { - t.Errorf("expected machineID of %d but got %d", 1023, gen.Generator.MachineID()) - } -} - -func TestGlobalMachineID(t *testing.T) { - if !globalmachineID.set { - t.Error("expected global machine ID to be set") - } - if GlobalMachineID() < 0 || GlobalMachineID() > 1023 { - t.Error("expected global machine ID to be between 0 and 1023 inclusive") - } -} diff --git a/source.go b/source.go deleted file mode 100644 index bcf4d92c4c1..00000000000 --- a/source.go +++ /dev/null @@ -1,138 +0,0 @@ -package influxdb - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kit/platform" -) - -const ( - // ErrSourceNotFound is an error message when a source does not exist. - ErrSourceNotFound = "source not found" -) - -// SourceType is a string for types of sources. -type SourceType string - -const ( - // V2SourceType is an InfluxDBv2 type. - V2SourceType = "v2" - // V1SourceType is an InfluxDBv1 type. - V1SourceType = "v1" - // SelfSourceType is the source hosting the UI. - SelfSourceType = "self" -) - -// Source is an external Influx with time series data. -// TODO(desa): do we still need default? -// TODO(desa): do sources belong -type Source struct { - ID platform.ID `json:"id,omitempty"` // ID is the unique ID of the source - OrganizationID platform.ID `json:"orgID"` // OrganizationID is the organization ID that resource belongs to - Default bool `json:"default"` // Default specifies the default source for the application - Name string `json:"name"` // Name is the user-defined name for the source - Type SourceType `json:"type,omitempty"` // Type specifies which kinds of source (enterprise vs oss vs 2.0) - URL string `json:"url"` // URL are the connections to the source - InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` // InsecureSkipVerify as true means any certificate presented by the source is accepted - Telegraf string `json:"telegraf"` // Telegraf is the db telegraf is written to. By default it is "telegraf" - SourceFields - V1SourceFields -} - -// V1SourceFields are the fields for connecting to a 1.0 source (oss or enterprise) -type V1SourceFields struct { - Username string `json:"username,omitempty"` // Username is the username to connect to the source - Password string `json:"password,omitempty"` // Password is in CLEARTEXT - SharedSecret string `json:"sharedSecret,omitempty"` // ShareSecret is the optional signing secret for Influx JWT authorization - MetaURL string `json:"metaUrl,omitempty"` // MetaURL is the url for the meta node - DefaultRP string `json:"defaultRP"` // DefaultRP is the default retention policy used in database queries to this source -} - -// SourceFields is used to authorize against an influx 2.0 source. -type SourceFields struct { - Token string `json:"token"` // Token is the 2.0 authorization token associated with a source -} - -// ops for sources. -const ( - OpDefaultSource = "DefaultSource" - OpFindSourceByID = "FindSourceByID" - OpFindSources = "FindSources" - OpCreateSource = "CreateSource" - OpUpdateSource = "UpdateSource" - OpDeleteSource = "DeleteSource" -) - -// SourceService is a service for managing sources. -type SourceService interface { - // DefaultSource retrieves the default source. - DefaultSource(ctx context.Context) (*Source, error) - // FindSourceByID retrieves a source by its ID. - FindSourceByID(ctx context.Context, id platform.ID) (*Source, error) - // FindSources returns a list of all sources. - FindSources(ctx context.Context, opts FindOptions) ([]*Source, int, error) - // CreateSource sets the sources ID and stores it. - CreateSource(ctx context.Context, s *Source) error - // UpdateSource updates the source. - UpdateSource(ctx context.Context, id platform.ID, upd SourceUpdate) (*Source, error) - // DeleteSource removes the source. - DeleteSource(ctx context.Context, id platform.ID) error -} - -// DefaultSourceFindOptions are the default find options for sources -var DefaultSourceFindOptions = FindOptions{} - -// SourceUpdate represents updates to a source. -type SourceUpdate struct { - Name *string `json:"name"` - Type *SourceType `json:"type,omitempty"` - Token *string `json:"token"` - URL *string `json:"url"` - InsecureSkipVerify *bool `json:"insecureSkipVerify,omitempty"` - Telegraf *string `json:"telegraf"` - Username *string `json:"username,omitempty"` - Password *string `json:"password,omitempty"` - SharedSecret *string `json:"sharedSecret,omitempty"` - MetaURL *string `json:"metaURL,omitempty"` - Role *string `json:"role,omitempty"` - DefaultRP *string `json:"defaultRP"` -} - -// Apply applies an update to a source. -func (u SourceUpdate) Apply(s *Source) error { - if u.Name != nil { - s.Name = *u.Name - } - if u.Type != nil { - s.Type = *u.Type - } - if u.Token != nil { - s.Token = *u.Token - } - if u.URL != nil { - s.URL = *u.URL - } - if u.InsecureSkipVerify != nil { - s.InsecureSkipVerify = *u.InsecureSkipVerify - } - if u.Telegraf != nil { - s.Telegraf = *u.Telegraf - } - if u.Username != nil { - s.Username = *u.Username - } - if u.Password != nil { - s.Password = *u.Password - } - if u.SharedSecret != nil { - s.SharedSecret = *u.SharedSecret - } - if u.MetaURL != nil { - s.MetaURL = *u.MetaURL - } - if u.DefaultRP != nil { - s.DefaultRP = *u.DefaultRP - } - - return nil -} diff --git a/source/query.go b/source/query.go deleted file mode 100644 index 37869214b34..00000000000 --- a/source/query.go +++ /dev/null @@ -1,39 +0,0 @@ -package source - -import ( - "fmt" - - platform "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/http/influxdb" - "github.com/influxdata/influxdb/v2/query" -) - -// NewQueryService creates a bucket service from a source. -func NewQueryService(s *platform.Source) (query.ProxyQueryService, error) { - switch s.Type { - case platform.SelfSourceType: - // TODO(fntlnz): this is supposed to call a query service directly locally, - // we are letting it err for now since we have some refactoring to do on - // how services are instantiated - return nil, fmt.Errorf("self source type not implemented") - case platform.V2SourceType: - // This is an influxd that calls another influxd, the query path is /v1/query - in future /v2/query - // it basically is the same as Self but on an external influxd. - return &http.SourceProxyQueryService{ - InsecureSkipVerify: s.InsecureSkipVerify, - Addr: s.URL, - SourceFields: s.SourceFields, - }, nil - case platform.V1SourceType: - // This is an InfluxDB 1.7 source, which supports both InfluxQL and Flux queries - return &influxdb.SourceProxyQueryService{ - InsecureSkipVerify: s.InsecureSkipVerify, - URL: s.URL, - SourceFields: s.SourceFields, - V1SourceFields: s.V1SourceFields, - OrganizationID: s.OrganizationID, - }, nil - } - return nil, fmt.Errorf("unsupported source type %s", s.Type) -} diff --git a/sqlite/README.md b/sqlite/README.md deleted file mode 100644 index c75ad931595..00000000000 --- a/sqlite/README.md +++ /dev/null @@ -1,83 +0,0 @@ -## SQlite - -### Purpose - -This `sqlite` package provides a basic interface for interacting with the -embedded sqlite database used by various InfluxDB services which require storing -relational data. - -The actual sqlite driver is provided by -[`mattn/go-sqlite3`](https://github.com/mattn/go-sqlite3). - -### Usage - -A single instance of `SqlStore` should be created using the `NewSqlStore` -function. Currently, this is done in the top-level `launcher` package, and a -pointer to the `SqlStore` instance is passed to services which require it as -part of their initialization. - -The [`jmoiron/sqlx`](https://github.com/jmoiron/sqlx) package provides a -convenient and lightweight means to write and read structs into and out of the -database and is sufficient for performing simple, static queries. For more -complicated & dynamically constructed queries, the -[`Masterminds/squirrel`](https://github.com/Masterminds/squirrel) package can be -used as a query builder. - -### Concurrent Access - -An interesting aspect of using the file-based sqlite database is that while it -can support multiple concurrent read requests, only a single write request can -be processed at a time. A traditional RDBMS would manage concurrent write -requests on the database server, but for this sqlite implementation write -requests need to be managed in the application code. - -In practice, this means that code intended to mutate the database needs to -obtain a write lock prior to making queries that would result in a change to the -data. If locks are not obtained in the application code, it is possible that -errors will be encountered if concurrent write requests hit the database file at -the same time. - -### Migrations - -A simple migration system is implemented in `migrator.go`. When starting the -influx daemon, the migrator runs migrations defined in `.sql` files using -sqlite-compatible sql scripts. Records of these migrations are maintained in a -table called "migrations". If records of migrations exist in the "migrations" -table that are not embedded in the binary, an error will be raised on startup. - -When creating new migrations, follow the file naming convention established by -existing migration scripts, which should look like `00XX_script_name.up.sql` & -`00xx_script_name.down.sql` for the "up" and "down" migration, where `XX` is the -version number. New scripts should have the version number incremented by 1. - -The "up" migrations are run when starting the influx daemon and when metadata -backups are restored. The "down" migrations are run with the `influxd downgrade` -command. - -### In-Memory Database - -When running `influxd` with the `--store=memory` flag, the database will be -opened using the `:memory:` path, and the maximum number of open database -connections is set to 1. Because of the way in-memory databases work with -sqlite, each connection would see a completely new database, so using only a -single connection will ensure that requests to `influxd` will return a -consistent set of data. - -### Backup & Restore - -Methods for backing up and restoring the sqlite database are available on the -`SqlStore` struct. These operations make use of the [sqlite backup -API](https://www.sqlite.org/backup.html) made available by the `go-sqlite3` -driver. It is possible to restore and backup into sqlite databases either stored -in memory or on disk. - -### Sqlite Features / Extensions - -There are many additional features and extensions available, see [the go-sqlite3 -package docs](https://github.com/mattn/go-sqlite3#feature--extension-list) for -the full list. - -We currently use the `sqlite_foreign_keys` and `sqlite_json` extensions for -foreign key support & JSON query support. These features are enabled using -build tags defined in the `Makefile` and `.goreleaser` config for use in -local builds & CI builds respectively. diff --git a/sqlite/migrations/0001_create_migrations_table.down.sql b/sqlite/migrations/0001_create_migrations_table.down.sql deleted file mode 100644 index 594ffc9fb78..00000000000 --- a/sqlite/migrations/0001_create_migrations_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE migrations; diff --git a/sqlite/migrations/0001_create_migrations_table.up.sql b/sqlite/migrations/0001_create_migrations_table.up.sql deleted file mode 100644 index 2422a9acf9e..00000000000 --- a/sqlite/migrations/0001_create_migrations_table.up.sql +++ /dev/null @@ -1,4 +0,0 @@ -CREATE TABLE migrations ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - name TEXT UNIQUE NOT NULL -); diff --git a/sqlite/migrations/0002_create_notebooks_table.down.sql b/sqlite/migrations/0002_create_notebooks_table.down.sql deleted file mode 100644 index 07fbfee1ef0..00000000000 --- a/sqlite/migrations/0002_create_notebooks_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE notebooks; diff --git a/sqlite/migrations/0002_create_notebooks_table.up.sql b/sqlite/migrations/0002_create_notebooks_table.up.sql deleted file mode 100644 index e746184478f..00000000000 --- a/sqlite/migrations/0002_create_notebooks_table.up.sql +++ /dev/null @@ -1,8 +0,0 @@ -CREATE TABLE notebooks ( - id TEXT NOT NULL PRIMARY KEY, - org_id TEXT NOT NULL, - name TEXT NOT NULL, - spec TEXT NOT NULL, - created_at TIMESTAMP, - updated_at TIMESTAMP -); diff --git a/sqlite/migrations/0003_create_annotations_tables.down.sql b/sqlite/migrations/0003_create_annotations_tables.down.sql deleted file mode 100644 index 1c9015ce865..00000000000 --- a/sqlite/migrations/0003_create_annotations_tables.down.sql +++ /dev/null @@ -1,2 +0,0 @@ -DROP TABLE streams; -DROP TABLE annotations; diff --git a/sqlite/migrations/0003_create_annotations_tables.up.sql b/sqlite/migrations/0003_create_annotations_tables.up.sql deleted file mode 100644 index 1218a917850..00000000000 --- a/sqlite/migrations/0003_create_annotations_tables.up.sql +++ /dev/null @@ -1,30 +0,0 @@ --- Create the initial table to store streams -CREATE TABLE streams ( - id VARCHAR(16) PRIMARY KEY, - org_id VARCHAR(16) NOT NULL, - name TEXT NOT NULL, - description TEXT NOT NULL, - created_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - - CONSTRAINT streams_uniq_orgid_name UNIQUE (org_id, name) -); - --- Create the initial table to store annotations -CREATE TABLE annotations ( - id VARCHAR(16) PRIMARY KEY, - org_id VARCHAR(16) NOT NULL, - stream_id VARCHAR(16) NOT NULL, - summary TEXT NOT NULL, - message TEXT NOT NULL, - stickers TEXT NOT NULL, - duration TEXT NOT NULL, - lower TIMESTAMP NOT NULL, - upper TIMESTAMP NOT NULL, - - FOREIGN KEY (stream_id) REFERENCES streams(id) ON DELETE CASCADE -); - --- Create indexes for stream_id and stickers to support fast queries -CREATE INDEX idx_annotations_stream ON annotations (stream_id); -CREATE INDEX idx_annotations_stickers ON annotations (stickers); diff --git a/sqlite/migrations/0004_create_remotes_table.down.sql b/sqlite/migrations/0004_create_remotes_table.down.sql deleted file mode 100644 index ea7aebac63d..00000000000 --- a/sqlite/migrations/0004_create_remotes_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE remotes; diff --git a/sqlite/migrations/0004_create_remotes_table.up.sql b/sqlite/migrations/0004_create_remotes_table.up.sql deleted file mode 100644 index 59b20351ae7..00000000000 --- a/sqlite/migrations/0004_create_remotes_table.up.sql +++ /dev/null @@ -1,17 +0,0 @@ -CREATE TABLE remotes ( - id VARCHAR(16) NOT NULL PRIMARY KEY, - org_id VARCHAR(16) NOT NULL, - name TEXT NOT NULL, - description TEXT, - remote_url TEXT NOT NULL, - remote_api_token TEXT NOT NULL, - remote_org_id VARCHAR(16) NOT NULL, - allow_insecure_tls BOOLEAN NOT NULL, - created_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - - CONSTRAINT remotes_uniq_orgid_name UNIQUE (org_id, name) -); - --- Create indexes on lookup patterns we expect to be common -CREATE INDEX idx_remote_url_per_org ON remotes (org_id, remote_url); diff --git a/sqlite/migrations/0005_create_replications_table.down.sql b/sqlite/migrations/0005_create_replications_table.down.sql deleted file mode 100644 index 3bcc2cf06df..00000000000 --- a/sqlite/migrations/0005_create_replications_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE replications; diff --git a/sqlite/migrations/0005_create_replications_table.up.sql b/sqlite/migrations/0005_create_replications_table.up.sql deleted file mode 100644 index e6d278b95e3..00000000000 --- a/sqlite/migrations/0005_create_replications_table.up.sql +++ /dev/null @@ -1,24 +0,0 @@ -CREATE TABLE replications -( - id VARCHAR(16) NOT NULL PRIMARY KEY, - org_id VARCHAR(16) NOT NULL, - name TEXT NOT NULL, - description TEXT, - remote_id VARCHAR(16) NOT NULL, - local_bucket_id VARCHAR(16) NOT NULL, - remote_bucket_id VARCHAR(16) NOT NULL, - max_queue_size_bytes INTEGER NOT NULL, - max_age_seconds INTEGER NOT NULL, - latest_response_code INTEGER, - latest_error_message TEXT, - drop_non_retryable_data BOOLEAN NOT NULL, - created_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - - CONSTRAINT replications_uniq_orgid_name UNIQUE (org_id, name), - FOREIGN KEY (remote_id) REFERENCES remotes (id) ON DELETE CASCADE -); - --- Create indexes on lookup patterns we expect to be common -CREATE INDEX idx_local_bucket_id_per_org ON replications (org_id, local_bucket_id); -CREATE INDEX idx_remote_id_per_org ON replications (org_id, remote_id); diff --git a/sqlite/migrations/0006_migrate_replications_foreign_key.down.sql b/sqlite/migrations/0006_migrate_replications_foreign_key.down.sql deleted file mode 100644 index 78d04e57f0b..00000000000 --- a/sqlite/migrations/0006_migrate_replications_foreign_key.down.sql +++ /dev/null @@ -1,33 +0,0 @@ -PRAGMA foreign_keys=off; - -ALTER TABLE replications RENAME TO _replications_old; - -CREATE TABLE replications -( - id VARCHAR(16) NOT NULL PRIMARY KEY, - org_id VARCHAR(16) NOT NULL, - name TEXT NOT NULL, - description TEXT, - remote_id VARCHAR(16) NOT NULL, - local_bucket_id VARCHAR(16) NOT NULL, - remote_bucket_id VARCHAR(16) NOT NULL, - max_queue_size_bytes INTEGER NOT NULL, - max_age_seconds INTEGER NOT NULL, - latest_response_code INTEGER, - latest_error_message TEXT, - drop_non_retryable_data BOOLEAN NOT NULL, - created_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - - CONSTRAINT replications_uniq_orgid_name UNIQUE (org_id, name), - FOREIGN KEY (remote_id) REFERENCES remotes (id) ON DELETE CASCADE -); - -INSERT INTO replications SELECT * FROM _replications_old; -DROP TABLE _replications_old; - --- Create indexes on lookup patterns we expect to be common -CREATE INDEX idx_local_bucket_id_per_org ON replications (org_id, local_bucket_id); -CREATE INDEX idx_remote_id_per_org ON replications (org_id, remote_id); - -PRAGMA foreign_keys=on; \ No newline at end of file diff --git a/sqlite/migrations/0006_migrate_replications_foreign_key.up.sql b/sqlite/migrations/0006_migrate_replications_foreign_key.up.sql deleted file mode 100644 index 2a127e6c70d..00000000000 --- a/sqlite/migrations/0006_migrate_replications_foreign_key.up.sql +++ /dev/null @@ -1,34 +0,0 @@ -PRAGMA foreign_keys=off; - --- Removes the "ON DELETE CASCADE" from the foreign key constraint -ALTER TABLE replications RENAME TO _replications_old; - -CREATE TABLE replications -( - id VARCHAR(16) NOT NULL PRIMARY KEY, - org_id VARCHAR(16) NOT NULL, - name TEXT NOT NULL, - description TEXT, - remote_id VARCHAR(16) NOT NULL, - local_bucket_id VARCHAR(16) NOT NULL, - remote_bucket_id VARCHAR(16) NOT NULL, - max_queue_size_bytes INTEGER NOT NULL, - max_age_seconds INTEGER NOT NULL, - latest_response_code INTEGER, - latest_error_message TEXT, - drop_non_retryable_data BOOLEAN NOT NULL, - created_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - - CONSTRAINT replications_uniq_orgid_name UNIQUE (org_id, name), - FOREIGN KEY (remote_id) REFERENCES remotes (id) -); - -INSERT INTO replications SELECT * FROM _replications_old; -DROP TABLE _replications_old; - --- Create indexes on lookup patterns we expect to be common -CREATE INDEX idx_local_bucket_id_per_org ON replications (org_id, local_bucket_id); -CREATE INDEX idx_remote_id_per_org ON replications (org_id, remote_id); - -PRAGMA foreign_keys=on; diff --git a/sqlite/migrations/0007_migrate_replications_add_bucket_name.down.sql b/sqlite/migrations/0007_migrate_replications_add_bucket_name.down.sql deleted file mode 100644 index 375e6e540c7..00000000000 --- a/sqlite/migrations/0007_migrate_replications_add_bucket_name.down.sql +++ /dev/null @@ -1,43 +0,0 @@ --- Adds the "NOT NULL" to `remote_bucket_id` and removes `remote_bucket_name`. -ALTER TABLE replications RENAME TO _replications_old; - -CREATE TABLE replications -( - id VARCHAR(16) NOT NULL PRIMARY KEY, - org_id VARCHAR(16) NOT NULL, - name TEXT NOT NULL, - description TEXT, - remote_id VARCHAR(16) NOT NULL, - local_bucket_id VARCHAR(16) NOT NULL, - remote_bucket_id VARCHAR(16) NOT NULL, - max_queue_size_bytes INTEGER NOT NULL, - max_age_seconds INTEGER NOT NULL, - latest_response_code INTEGER, - latest_error_message TEXT, - drop_non_retryable_data BOOLEAN NOT NULL, - created_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - - CONSTRAINT replications_uniq_orgid_name UNIQUE (org_id, name), - FOREIGN KEY (remote_id) REFERENCES remotes (id) -); - -INSERT INTO replications SELECT - id, - org_id, - name, - description, - remote_id, - local_bucket_id, - remote_bucket_id, - max_queue_size_bytes, - max_age_seconds, - latest_response_code, - latest_error_message, - drop_non_retryable_data, - created_at,updated_at FROM _replications_old WHERE remote_bucket_name = ''; -DROP TABLE _replications_old; - --- Create indexes on lookup patterns we expect to be common -CREATE INDEX idx_local_bucket_id_per_org ON replications (org_id, local_bucket_id); -CREATE INDEX idx_remote_id_per_org ON replications (org_id, remote_id); \ No newline at end of file diff --git a/sqlite/migrations/0007_migrate_replications_add_bucket_name.up.sql b/sqlite/migrations/0007_migrate_replications_add_bucket_name.up.sql deleted file mode 100644 index ba4f3020445..00000000000 --- a/sqlite/migrations/0007_migrate_replications_add_bucket_name.up.sql +++ /dev/null @@ -1,46 +0,0 @@ --- Removes the "NOT NULL" from `remote_bucket_id` and adds `remote_bucket_name`. -ALTER TABLE replications RENAME TO _replications_old; - -CREATE TABLE replications -( - id VARCHAR(16) NOT NULL PRIMARY KEY, - org_id VARCHAR(16) NOT NULL, - name TEXT NOT NULL, - description TEXT, - remote_id VARCHAR(16) NOT NULL, - local_bucket_id VARCHAR(16) NOT NULL, - remote_bucket_id VARCHAR(16), - remote_bucket_name TEXT DEFAULT '', - max_queue_size_bytes INTEGER NOT NULL, - max_age_seconds INTEGER NOT NULL, - latest_response_code INTEGER, - latest_error_message TEXT, - drop_non_retryable_data BOOLEAN NOT NULL, - created_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - - CONSTRAINT replications_uniq_orgid_name UNIQUE (org_id, name), - CONSTRAINT replications_one_of_id_name CHECK (remote_bucket_id IS NOT NULL OR remote_bucket_name != ''), - FOREIGN KEY (remote_id) REFERENCES remotes (id) -); - -INSERT INTO replications ( - id, - org_id, - name, - description, - remote_id, - local_bucket_id, - remote_bucket_id, - max_queue_size_bytes, - max_age_seconds, - latest_response_code, - latest_error_message, - drop_non_retryable_data, - created_at,updated_at -) SELECT * FROM _replications_old; -DROP TABLE _replications_old; - --- Create indexes on lookup patterns we expect to be common -CREATE INDEX idx_local_bucket_id_per_org ON replications (org_id, local_bucket_id); -CREATE INDEX idx_remote_id_per_org ON replications (org_id, remote_id); \ No newline at end of file diff --git a/sqlite/migrations/0008_migrate_remotes_null_remote_org.down.sql b/sqlite/migrations/0008_migrate_remotes_null_remote_org.down.sql deleted file mode 100644 index c5266263c63..00000000000 --- a/sqlite/migrations/0008_migrate_remotes_null_remote_org.down.sql +++ /dev/null @@ -1,84 +0,0 @@ --- Adds the "NOT NULL" to remote_org_id -ALTER TABLE remotes RENAME TO _remotes_old; - -CREATE TABLE remotes ( - id VARCHAR(16) NOT NULL PRIMARY KEY, - org_id VARCHAR(16) NOT NULL, - name TEXT NOT NULL, - description TEXT, - remote_url TEXT NOT NULL, - remote_api_token TEXT NOT NULL, - remote_org_id VARCHAR(16) NOT NULL, - allow_insecure_tls BOOLEAN NOT NULL, - created_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - - CONSTRAINT remotes_uniq_orgid_name UNIQUE (org_id, name) -); - -INSERT INTO remotes ( - id, - org_id, - name, - description, - remote_url, - remote_api_token, - remote_org_id, - allow_insecure_tls, - created_at, - updated_at -) SELECT * FROM _remotes_old WHERE remote_org_id IS NOT NULL; - --- Edit the replications table as the remotes table key has changed -ALTER TABLE replications RENAME TO _replications_old; - -CREATE TABLE replications -( - id VARCHAR(16) NOT NULL PRIMARY KEY, - org_id VARCHAR(16) NOT NULL, - name TEXT NOT NULL, - description TEXT, - remote_id VARCHAR(16) NOT NULL, - local_bucket_id VARCHAR(16) NOT NULL, - remote_bucket_id VARCHAR(16), - remote_bucket_name TEXT DEFAULT '', - max_queue_size_bytes INTEGER NOT NULL, - max_age_seconds INTEGER NOT NULL, - latest_response_code INTEGER, - latest_error_message TEXT, - drop_non_retryable_data BOOLEAN NOT NULL, - created_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - - CONSTRAINT replications_uniq_orgid_name UNIQUE (org_id, name), - CONSTRAINT replications_one_of_id_name CHECK (remote_bucket_id IS NOT NULL OR remote_bucket_name != '') - ); - -INSERT INTO replications ( - id, - org_id, - name, - description, - remote_id, - local_bucket_id, - remote_bucket_id, - remote_bucket_name, - max_queue_size_bytes, - max_age_seconds, - latest_response_code, - latest_error_message, - drop_non_retryable_data, - created_at, - updated_at -) SELECT * FROM _replications_old; -DROP TABLE _replications_old; -DROP TABLE _remotes_old; - --- The DROP _remotes has to be at the end due to the FK from replications remote_id to remotes id. --- The replications table will follow the ALTER TABLE and FK to _remotes until we --- reinsert. By putting the DROP after all the data is re-entered, it will stay consistent throughout the process. - --- Create indexes on lookup patterns we expect to be common -CREATE INDEX idx_local_bucket_id_per_org ON replications (org_id, local_bucket_id); --- Create indexes on lookup patterns we expect to be common -CREATE INDEX idx_remote_url_per_org ON remotes (org_id, remote_url); diff --git a/sqlite/migrations/0008_migrate_remotes_null_remote_org.up.sql b/sqlite/migrations/0008_migrate_remotes_null_remote_org.up.sql deleted file mode 100644 index e74f5714993..00000000000 --- a/sqlite/migrations/0008_migrate_remotes_null_remote_org.up.sql +++ /dev/null @@ -1,82 +0,0 @@ --- Removes the "NOT NULL" from remote_org_id -ALTER TABLE remotes RENAME TO _remotes_old; -DROP INDEX idx_remote_url_per_org; - -CREATE TABLE remotes ( - id VARCHAR(16) NOT NULL PRIMARY KEY, - org_id VARCHAR(16) NOT NULL, - name TEXT NOT NULL, - description TEXT, - remote_url TEXT NOT NULL, - remote_api_token TEXT NOT NULL, - remote_org_id VARCHAR(16), - allow_insecure_tls BOOLEAN NOT NULL, - created_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - - CONSTRAINT remotes_uniq_orgid_name UNIQUE (org_id, name) -); - -INSERT INTO remotes ( - id, - org_id, - name, - description, - remote_url, - remote_api_token, - remote_org_id, - allow_insecure_tls, - created_at, - updated_at -) SELECT * FROM _remotes_old; --- Create indexes on lookup patterns we expect to be common -CREATE INDEX idx_remote_url_per_org ON remotes (org_id, remote_url); - --- Edit the replications table as the remotes table key has changed -ALTER TABLE replications RENAME TO _replications_old; - -CREATE TABLE replications -( - id VARCHAR(16) NOT NULL PRIMARY KEY, - org_id VARCHAR(16) NOT NULL, - name TEXT NOT NULL, - description TEXT, - remote_id VARCHAR(16) NOT NULL, - local_bucket_id VARCHAR(16) NOT NULL, - remote_bucket_id VARCHAR(16), - remote_bucket_name TEXT DEFAULT '', - max_queue_size_bytes INTEGER NOT NULL, - max_age_seconds INTEGER NOT NULL, - latest_response_code INTEGER, - latest_error_message TEXT, - drop_non_retryable_data BOOLEAN NOT NULL, - created_at TIMESTAMP NOT NULL, - updated_at TIMESTAMP NOT NULL, - - CONSTRAINT replications_uniq_orgid_name UNIQUE (org_id, name), - CONSTRAINT replications_one_of_id_name CHECK (remote_bucket_id IS NOT NULL OR remote_bucket_name != ''), - FOREIGN KEY (remote_id) REFERENCES remotes (id) - ); - -INSERT INTO replications ( - id, - org_id, - name, - description, - remote_id, - local_bucket_id, - remote_bucket_id, - remote_bucket_name, - max_queue_size_bytes, - max_age_seconds, - latest_response_code, - latest_error_message, - drop_non_retryable_data, - created_at, - updated_at -) SELECT * FROM _replications_old; -DROP TABLE _replications_old; -DROP TABLE _remotes_old; - --- Create indexes on lookup patterns we expect to be common -CREATE INDEX idx_local_bucket_id_per_org ON replications (org_id, local_bucket_id); diff --git a/sqlite/migrations/all.go b/sqlite/migrations/all.go deleted file mode 100644 index 591a9a0668f..00000000000 --- a/sqlite/migrations/all.go +++ /dev/null @@ -1,9 +0,0 @@ -package migrations - -import "embed" - -//go:embed *up.sql -var AllUp embed.FS - -//go:embed *down.sql -var AllDown embed.FS diff --git a/sqlite/migrator.go b/sqlite/migrator.go deleted file mode 100644 index 893b8fe60be..00000000000 --- a/sqlite/migrator.go +++ /dev/null @@ -1,211 +0,0 @@ -package sqlite - -import ( - "context" - "embed" - "fmt" - "os" - "sort" - "strconv" - "strings" - - "github.com/influxdata/influxdb/v2/kit/migration" - "go.uber.org/zap" -) - -type Migrator struct { - store *SqlStore - log *zap.Logger - - backupPath string -} - -func NewMigrator(store *SqlStore, log *zap.Logger) *Migrator { - return &Migrator{ - store: store, - log: log, - } -} - -// SetBackupPath records the filepath where pre-migration state should be written prior to running migrations. -func (m *Migrator) SetBackupPath(path string) { - m.backupPath = path -} - -func (m *Migrator) Up(ctx context.Context, source embed.FS) error { - return m.UpUntil(ctx, -1, source) -} - -// UpUntil migrates until a specific migration. -// -1 or 0 will run all migrations, any other number will run up until that. -// Returns no error untilMigration is less than the already run migrations. -func (m *Migrator) UpUntil(ctx context.Context, untilMigration int, source embed.FS) error { - knownMigrations, err := source.ReadDir(".") - if err != nil { - return err - } - - // sort the list according to the version number to ensure the migrations are applied in the correct order - sort.Slice(knownMigrations, func(i, j int) bool { - return knownMigrations[i].Name() < knownMigrations[j].Name() - }) - - executedMigrations, err := m.store.allMigrationNames() - if err != nil { - return err - } - - var lastMigration int - for idx := range executedMigrations { - if idx > len(knownMigrations)-1 || executedMigrations[idx] != dropExtension(knownMigrations[idx].Name()) { - return migration.ErrInvalidMigration(executedMigrations[idx]) - } - - lastMigration, err = scriptVersion(executedMigrations[idx]) - if err != nil { - return err - } - } - - var migrationsToDo int - if untilMigration < 1 { - migrationsToDo = len(knownMigrations[lastMigration:]) - untilMigration = len(knownMigrations) - } else if untilMigration >= lastMigration { - migrationsToDo = len(knownMigrations[lastMigration:untilMigration]) - } else { - return nil - } - - if migrationsToDo == 0 { - return nil - } - - if m.backupPath != "" && lastMigration != 0 { - m.log.Info("Backing up pre-migration metadata", zap.String("backup_path", m.backupPath)) - if err := func() error { - out, err := os.Create(m.backupPath) - if err != nil { - return err - } - defer out.Close() - - if err := m.store.BackupSqlStore(ctx, out); err != nil { - return err - } - return nil - }(); err != nil { - return fmt.Errorf("failed to back up pre-migration metadata: %w", err) - } - } - - m.log.Info("Bringing up metadata migrations", zap.Int("migration_count", migrationsToDo)) - - for _, f := range knownMigrations[lastMigration:untilMigration] { - n := f.Name() - - m.log.Debug("Executing metadata migration", zap.String("migration_name", n)) - mBytes, err := source.ReadFile(n) - if err != nil { - return err - } - - recordStmt := fmt.Sprintf(`INSERT INTO %s (name) VALUES (%q);`, migrationsTableName, dropExtension(n)) - - if err := m.store.execTrans(ctx, string(mBytes)+recordStmt); err != nil { - return err - } - - } - - return nil -} - -// Down applies the "down" migrations until the SQL database has migrations only >= untilMigration. Use untilMigration = 0 to apply all -// down migrations, which will delete all data from the database. -func (m *Migrator) Down(ctx context.Context, untilMigration int, source embed.FS) error { - knownMigrations, err := source.ReadDir(".") - if err != nil { - return err - } - - // sort the list according to the version number to ensure the migrations are applied in the correct order - sort.Slice(knownMigrations, func(i, j int) bool { - return knownMigrations[i].Name() < knownMigrations[j].Name() - }) - - executedMigrations, err := m.store.allMigrationNames() - if err != nil { - return err - } - - migrationsToDo := len(executedMigrations) - untilMigration - if migrationsToDo == 0 { - return nil - } - - if migrationsToDo < 0 { - m.log.Warn("SQL metadata is already on a schema older than target, nothing to do") - return nil - } - - if m.backupPath != "" { - m.log.Info("Backing up pre-migration metadata", zap.String("backup_path", m.backupPath)) - if err := func() error { - out, err := os.Create(m.backupPath) - if err != nil { - return err - } - defer out.Close() - - if err := m.store.BackupSqlStore(ctx, out); err != nil { - return err - } - return nil - }(); err != nil { - return fmt.Errorf("failed to back up pre-migration metadata: %w", err) - } - } - - m.log.Info("Tearing down metadata migrations", zap.Int("migration_count", migrationsToDo)) - - for i := len(executedMigrations) - 1; i >= untilMigration; i-- { - downName := knownMigrations[i].Name() - downNameNoExtension := dropExtension(downName) - - m.log.Debug("Executing metadata migration", zap.String("migration_name", downName)) - mBytes, err := source.ReadFile(downName) - if err != nil { - return err - } - - deleteStmt := fmt.Sprintf(`DELETE FROM %s WHERE name = %q;`, migrationsTableName, downNameNoExtension) - - if err := m.store.execTrans(ctx, deleteStmt+string(mBytes)); err != nil { - return err - } - } - - return nil -} - -// extract the version number as an integer from a file named like "0002_migration_name.sql" -func scriptVersion(filename string) (int, error) { - vString := strings.Split(filename, "_")[0] - vInt, err := strconv.Atoi(vString) - if err != nil { - return 0, err - } - - return vInt, nil -} - -// dropExtension returns the filename excluding anything after the first "." -func dropExtension(filename string) string { - idx := strings.Index(filename, ".") - if idx == -1 { - return filename - } - - return filename[:idx] -} diff --git a/sqlite/migrator_test.go b/sqlite/migrator_test.go deleted file mode 100644 index a7592627bce..00000000000 --- a/sqlite/migrator_test.go +++ /dev/null @@ -1,269 +0,0 @@ -package sqlite - -import ( - "context" - "embed" - "fmt" - "io/fs" - "os" - "testing" - - "github.com/influxdata/influxdb/v2/kit/errors" - "github.com/influxdata/influxdb/v2/kit/migration" - "github.com/influxdata/influxdb/v2/sqlite/test_migrations" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" -) - -type tableInfo struct { - Cid int `db:"cid"` - Name string `db:"name"` - Db_type string `db:"type"` - Notnull int `db:"notnull"` - Dflt_value interface{} `db:"dflt_value"` - Pk int `db:"pk"` -} - -func TestUp(t *testing.T) { - t.Parallel() - - store := NewTestStore(t) - - upsOnlyAll, err := test_migrations.AllUp.ReadDir(".") - require.NoError(t, err) - - upsOnlyFirst, err := test_migrations.FirstUp.ReadDir(".") - require.NoError(t, err) - - migrator := NewMigrator(store, zaptest.NewLogger(t)) - - // empty db contains no migrations - names, err := store.allMigrationNames() - require.NoError(t, err) - require.Equal(t, []string(nil), names) - - // run the first migrations - migrateUpAndCheck(t, migrator, store, test_migrations.FirstUp, upsOnlyFirst) - - // run the rest of the migrations - migrateUpAndCheck(t, migrator, store, test_migrations.AllUp, upsOnlyAll) - - // test_table_1 had the "id" column renamed to "org_id" - var table1Info []*tableInfo - err = store.DB.Select(&table1Info, "PRAGMA table_info(test_table_1)") - require.NoError(t, err) - require.Len(t, table1Info, 3) - require.Equal(t, "org_id", table1Info[0].Name) - - // test_table_2 was created correctly - var table2Info []*tableInfo - err = store.DB.Select(&table2Info, "PRAGMA table_info(test_table_2)") - require.NoError(t, err) - require.Len(t, table2Info, 3) - require.Equal(t, "user_id", table2Info[0].Name) -} - -func TestUpErrors(t *testing.T) { - t.Parallel() - - t.Run("only unknown migration exists", func(t *testing.T) { - store := NewTestStore(t) - ctx := context.Background() - - migrator := NewMigrator(store, zaptest.NewLogger(t)) - require.NoError(t, migrator.Up(ctx, test_migrations.MigrationTable)) - require.NoError(t, store.execTrans(ctx, `INSERT INTO migrations (name) VALUES ("0010_some_bad_migration")`)) - require.Equal(t, migration.ErrInvalidMigration("0010_some_bad_migration"), migrator.Up(ctx, test_migrations.AllUp)) - }) - - t.Run("known + unknown migrations exist", func(t *testing.T) { - store := NewTestStore(t) - ctx := context.Background() - - migrator := NewMigrator(store, zaptest.NewLogger(t)) - require.NoError(t, migrator.Up(ctx, test_migrations.FirstUp)) - require.NoError(t, store.execTrans(ctx, `INSERT INTO migrations (name) VALUES ("0010_some_bad_migration")`)) - require.Equal(t, migration.ErrInvalidMigration("0010_some_bad_migration"), migrator.Up(ctx, test_migrations.AllUp)) - }) -} - -func TestUpWithBackups(t *testing.T) { - t.Parallel() - - store := NewTestStore(t) - - logger := zaptest.NewLogger(t) - migrator := NewMigrator(store, logger) - backupPath := fmt.Sprintf("%s.bak", store.path) - migrator.SetBackupPath(backupPath) - - upsOnlyAll, err := test_migrations.AllUp.ReadDir(".") - require.NoError(t, err) - - upsOnlyFirst, err := test_migrations.FirstUp.ReadDir(".") - require.NoError(t, err) - - // Run the first migrations. - migrateUpAndCheck(t, migrator, store, test_migrations.FirstUp, upsOnlyFirst) - - // Backup file shouldn't exist, because there was nothing to back up. - _, err = os.Stat(backupPath) - require.True(t, os.IsNotExist(err)) - - // Run the remaining migrations. - migrateUpAndCheck(t, migrator, store, test_migrations.AllUp, upsOnlyAll) - - // Backup file should now exist. - _, err = os.Stat(backupPath) - require.NoError(t, err) - - // Open a 2nd store using the backup file. - backupStore, err := NewSqlStore(backupPath, zap.NewNop()) - require.NoError(t, err) - defer backupStore.Close() - - // Backup store contains the first migrations records. - backupNames, err := backupStore.allMigrationNames() - require.NoError(t, err) - migrationNamesMatch(t, backupNames, upsOnlyFirst) - - // Run the remaining migrations on the backup and verify that it now contains the rest of the migration records. - backupMigrator := NewMigrator(backupStore, logger) - migrateUpAndCheck(t, backupMigrator, store, test_migrations.AllUp, upsOnlyAll) -} - -func TestDown(t *testing.T) { - t.Parallel() - - store := NewTestStore(t) - - upsOnlyAll, err := test_migrations.AllUp.ReadDir(".") - require.NoError(t, err) - - upsOnlyFirst, err := test_migrations.FirstUp.ReadDir(".") - require.NoError(t, err) - - migrator := NewMigrator(store, zaptest.NewLogger(t)) - - // no up migrations, then some down migrations - migrateDownAndCheck(t, migrator, store, test_migrations.FirstDown, []fs.DirEntry{}, 0) - - // all up migrations, then all down migrations - migrateUpAndCheck(t, migrator, store, test_migrations.AllUp, upsOnlyAll) - migrateDownAndCheck(t, migrator, store, test_migrations.AllDown, []fs.DirEntry{}, 0) - - // first of the up migrations, then first of the down migrations - migrateUpAndCheck(t, migrator, store, test_migrations.FirstUp, upsOnlyFirst) - migrateDownAndCheck(t, migrator, store, test_migrations.FirstDown, []fs.DirEntry{}, 0) - - // first of the up migrations, then all of the down migrations - migrateUpAndCheck(t, migrator, store, test_migrations.FirstUp, upsOnlyFirst) - migrateDownAndCheck(t, migrator, store, test_migrations.AllDown, []fs.DirEntry{}, 0) - - // all up migrations, then some of the down migrations (using untilMigration) - migrateUpAndCheck(t, migrator, store, test_migrations.AllUp, upsOnlyAll) - migrateDownAndCheck(t, migrator, store, test_migrations.AllDown, upsOnlyFirst, 2) -} - -func TestScriptVersion(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - filename string - want int - wantErr error - }{ - { - "single digit number", - "0001_some_file_name.sql", - 1, - nil, - }, - { - "larger number", - "0921_another_file.sql", - 921, - nil, - }, - { - "bad name", - "not_numbered_correctly.sql", - 0, - &errors.Error{}, - }, - } - - for _, tt := range tests { - tt := tt // capture range variable - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - got, err := scriptVersion(tt.filename) - require.Equal(t, tt.want, got) - if tt.wantErr == nil { - require.NoError(t, err) - } else { - require.Error(t, err) - } - }) - } -} - -func TestDropExtension(t *testing.T) { - tests := []struct { - input string - want string - }{ - { - input: "0001_some_migration", - want: "0001_some_migration", - }, - { - input: "0001_some_migration.sql", - want: "0001_some_migration", - }, - { - input: "0001_some_migration.down.sql", - want: "0001_some_migration", - }, - { - input: "0001_some_migration.something.anything.else", - want: "0001_some_migration", - }, - } - - for _, tt := range tests { - got := dropExtension(tt.input) - require.Equal(t, tt.want, got) - } -} - -func migrateUpAndCheck(t *testing.T, m *Migrator, s *SqlStore, source embed.FS, expected []fs.DirEntry) { - t.Helper() - - require.NoError(t, m.Up(context.Background(), source)) - names, err := s.allMigrationNames() - require.NoError(t, err) - migrationNamesMatch(t, names, expected) -} - -func migrateDownAndCheck(t *testing.T, m *Migrator, s *SqlStore, source embed.FS, expected []fs.DirEntry, untilMigration int) { - t.Helper() - - require.NoError(t, m.Down(context.Background(), untilMigration, source)) - names, err := s.allMigrationNames() - require.NoError(t, err) - migrationNamesMatch(t, names, expected) -} - -func migrationNamesMatch(t *testing.T, names []string, files []fs.DirEntry) { - t.Helper() - - require.Equal(t, len(names), len(files)) - - for idx := range files { - require.Equal(t, dropExtension(files[idx].Name()), names[idx]) - } -} diff --git a/sqlite/sqlite.go b/sqlite/sqlite.go deleted file mode 100644 index 8176b9fd77b..00000000000 --- a/sqlite/sqlite.go +++ /dev/null @@ -1,379 +0,0 @@ -package sqlite - -import ( - "context" - "database/sql" - "fmt" - "io" - "os" - "path/filepath" - "sync" - - errors2 "github.com/influxdata/influxdb/v2/pkg/errors" - - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/pkg/fs" - sqliteMigrations "github.com/influxdata/influxdb/v2/sqlite/migrations" - "github.com/jmoiron/sqlx" - "github.com/mattn/go-sqlite3" - "go.uber.org/zap" -) - -const ( - DefaultFilename = "influxd.sqlite" - InmemPath = ":memory:" - migrationsTableName = "migrations" -) - -// SqlStore is a wrapper around the db and provides basic functionality for maintaining the db -// including flushing the data from the db during end-to-end testing. -type SqlStore struct { - Mu sync.RWMutex - DB *sqlx.DB - log *zap.Logger - path string -} - -func NewSqlStore(path string, log *zap.Logger) (*SqlStore, error) { - s := &SqlStore{ - log: log, - path: path, - } - - if err := s.openDB(); err != nil { - return nil, err - } - - return s, nil -} - -// open the file at the specified path -func (s *SqlStore) openDB() error { - db, err := sqlx.Open("sqlite3", s.path) - if err != nil { - return err - } - s.log.Info("Resources opened", zap.String("path", s.path)) - - // If using an in-memory database, don't allow more than 1 connection. Each connection - // is given a "new" database. We can't use a shared cache in-memory database because - // parallel tests that run multiple launchers in the same process will have issues doing - // concurrent writes to the database. See: https://sqlite.org/inmemorydb.html - if s.path == InmemPath { - db.SetMaxOpenConns(1) - } - - s.DB = db - - return nil -} - -// Close the connection to the sqlite database -func (s *SqlStore) Close() error { - err := s.DB.Close() - if err != nil { - return err - } - - return nil -} - -// RLockSqlStore locks the database using the mutex. This is intended to lock the database for writes. -// It is the responsibilty of implementing service code to manage locks for write operations. -func (s *SqlStore) RLockSqlStore() { - s.Mu.RLock() -} - -// RUnlockSqlStore unlocks the database. -func (s *SqlStore) RUnlockSqlStore() { - s.Mu.RUnlock() -} - -// Flush deletes all records for all tables in the database except for the migration table. This method should only be -// used during end-to-end testing. -func (s *SqlStore) Flush(ctx context.Context) { - tables, err := s.tableNames() - if err != nil { - s.log.Fatal("unable to flush sqlite", zap.Error(err)) - } - - for _, t := range tables { - if t == migrationsTableName { - continue - } - - stmt := fmt.Sprintf("DELETE FROM %s", t) - err := s.execTrans(ctx, stmt) - if err != nil { - s.log.Fatal("unable to flush sqlite", zap.Error(err)) - } - } - s.log.Debug("sqlite data flushed successfully") -} - -// BackupSqlStore creates a new temporary database and uses the sqlite backup API -// to back the database up into the temporary database. It then writes the temporary -// database file to the writer. Using the sqlite backup API allows the backup to be -// performed without needing to lock the database, and also allows it to work with -// in-memory databases. See: https://www.sqlite.org/backup.html -// -// The backup works by copying the SOURCE database to the DESTINATION database. -// The SOURCE is the running database that needs to be backed up, and the DESTINATION -// is the resulting backup. The underlying sqlite connection is needed for both -// SOURCE and DESTINATION databases to use the sqlite backup API made available by the -// go-sqlite3 driver. -func (s *SqlStore) BackupSqlStore(ctx context.Context, w io.Writer) (rErr error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - // create a destination db in a temporary directory to hold the backup. - tempDir, err := os.MkdirTemp("", "") - if err != nil { - return err - } - defer os.RemoveAll(tempDir) - - destPath := filepath.Join(tempDir, DefaultFilename) - dest, err := NewSqlStore(destPath, zap.NewNop()) - if err != nil { - return err - } - defer errors2.Capture(&rErr, dest.Close) - - if err := backup(ctx, dest, s); err != nil { - return err - } - - // open the backup file so it can be copied to the destination writer - f, err := os.Open(destPath) - if err != nil { - return err - } - defer f.Close() - - // copy the backup to the destination writer - _, err = io.Copy(w, f) - return err -} - -func backup(ctx context.Context, dest, src *SqlStore) error { - // get the connection for the destination so we can get the underlying sqlite connection - destConn, err := dest.DB.Conn(ctx) - if err != nil { - return err - } - defer destConn.Close() - - // get the sqlite connection for the destination to access the sqlite backup API - destSqliteConn, err := sqliteFromSqlConn(destConn) - if err != nil { - return err - } - - // get the connection for the source database so we can get the underlying sqlite connection - srcConn, err := src.DB.Conn(ctx) - if err != nil { - return err - } - defer srcConn.Close() - - // get the sqlite connection for the source to access the sqlite backup API - srcSqliteConn, err := sqliteFromSqlConn(srcConn) - if err != nil { - return err - } - - // call Backup on the destination sqlite connection - which initializes the backup - bk, err := destSqliteConn.Backup("main", srcSqliteConn, "main") - if err != nil { - return err - } - - // perform the backup - _, err = bk.Step(-1) - if err != nil { - return err - } - - // close the backup once it's done - return bk.Finish() -} - -// sqliteFromSqlConn returns the underlying sqlite3 connection from an sql connection -func sqliteFromSqlConn(c *sql.Conn) (*sqlite3.SQLiteConn, error) { - var sqliteConn *sqlite3.SQLiteConn - err := c.Raw(func(driverConn interface{}) error { - sqliteConn = driverConn.(*sqlite3.SQLiteConn) - return nil - }) - if err != nil { - return nil, err - } - - return sqliteConn, nil -} - -// RestoreSqlStore replaces the underlying database with the data from r. -func (s *SqlStore) RestoreSqlStore(ctx context.Context, r io.Reader) (rErr error) { - tempDir, err := os.MkdirTemp("", "") - if err != nil { - return err - } - defer os.RemoveAll(tempDir) - - tempFileName := filepath.Join(tempDir, DefaultFilename) - - f, err := os.Create(tempFileName) - if err != nil { - return err - } - defer errors2.Capture(&rErr, f.Close) - - // Copy the contents of r to the temporary file - if _, err := io.Copy(f, r); err != nil { - return err - } - if err := f.Sync(); err != nil { - return err - } - if err := f.Close(); err != nil { - return err - } - - // Run the migrations on the restored database prior to swapping it in. - if err := s.migrateRestored(ctx, tempFileName); err != nil { - return err - } - - // Use a lock while swapping over to the temporary database. - s.Mu.Lock() - defer s.Mu.Unlock() - - // Close the current DB. - if err := s.Close(); err != nil { - return err - } - - // If we're using a :memory: database, we need to open a new DB (which will be completely empty), - // and then use the sqlite backup API to copy the data from the restored db file into the database. - // Otherwise, we can just atomically swap the file and re-open the DB. - if s.path == InmemPath { - if err := s.openDB(); err != nil { - return err - } - // Open the temporary file - this is the "source" DB for doing the restore - tempDB, err := NewSqlStore(tempFileName, s.log.With(zap.String("service", "temp backup sqlite"))) - if err != nil { - return err - } - defer tempDB.Close() - - // Copy the data from the temporary restored DB into the currently open DB - return backup(ctx, s, tempDB) - } - - // Atomically swap the temporary file with the current DB file. - if err := fs.RenameFileWithReplacement(tempFileName, s.path); err != nil { - return err - } - - // Reopen the new database file - return s.openDB() -} - -// migrateRestored opens the database at the temporary path and applies the -// migrations to it. The database at the temporary path is closed after the -// migrations are complete. This should be used as part of the restore -// operation, prior to swapping the restored database (or its contents) with the -// active database. -func (s *SqlStore) migrateRestored(ctx context.Context, tempFileName string) error { - restoredSql, err := NewSqlStore(tempFileName, s.log.With(zap.String("service", "restored sqlite"))) - if err != nil { - return err - } - defer restoredSql.Close() - - restoreMigrator := NewMigrator( - restoredSql, - s.log.With(zap.String("service", "sqlite restore migrations")), - ) - - return restoreMigrator.Up(ctx, sqliteMigrations.AllUp) -} - -func (s *SqlStore) execTrans(ctx context.Context, stmt string) error { - // use a lock to prevent two potential simultaneous write operations to the database, - // which would throw an error - s.Mu.Lock() - defer s.Mu.Unlock() - - tx, err := s.DB.BeginTx(ctx, nil) - if err != nil { - return err - } - - _, err = tx.ExecContext(ctx, stmt) - if err != nil { - tx.Rollback() - return err - } - - err = tx.Commit() - if err != nil { - return err - } - - return nil -} - -func (s *SqlStore) allMigrationNames() ([]string, error) { - checkStmt := fmt.Sprintf(`SELECT name FROM sqlite_master WHERE type='table' AND name='%s'`, migrationsTableName) - tbls, err := s.queryToStrings(checkStmt) - if err != nil { - return nil, err - } - - if len(tbls) == 0 { - return nil, nil - } - - migrStmt := fmt.Sprintf(`SELECT name FROM %s ORDER BY name`, migrationsTableName) - migr, err := s.queryToStrings(migrStmt) - if err != nil { - return nil, err - } - - if len(migr) == 0 { - return nil, nil - } - - return migr, nil -} - -func (s *SqlStore) tableNames() ([]string, error) { - stmt := `SELECT name FROM sqlite_master WHERE type='table'` - return s.queryToStrings(stmt) -} - -// helper function for running a read-only query resulting in a slice of strings from -// an arbitrary statement. -func (s *SqlStore) queryToStrings(stmt string) ([]string, error) { - var output []string - - rows, err := s.DB.Query(stmt) - if err != nil { - return nil, err - } - - for rows.Next() { - var i string - err = rows.Scan(&i) - if err != nil { - return nil, err - } - - output = append(output, i) - } - - return output, nil -} diff --git a/sqlite/sqlite_helpers.go b/sqlite/sqlite_helpers.go deleted file mode 100644 index 1bde3f677cb..00000000000 --- a/sqlite/sqlite_helpers.go +++ /dev/null @@ -1,21 +0,0 @@ -package sqlite - -import ( - "testing" - - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -func NewTestStore(t *testing.T) *SqlStore { - tempDir := t.TempDir() - - s, err := NewSqlStore(tempDir+"/"+DefaultFilename, zap.NewNop()) - require.NoError(t, err, "unable to open testing database") - - t.Cleanup(func() { - require.NoError(t, s.Close(), "failed to close testing database") - }) - - return s -} diff --git a/sqlite/sqlite_test.go b/sqlite/sqlite_test.go deleted file mode 100644 index f165a8095db..00000000000 --- a/sqlite/sqlite_test.go +++ /dev/null @@ -1,250 +0,0 @@ -package sqlite - -import ( - "context" - "fmt" - "os" - "testing" - - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -func TestFlush(t *testing.T) { - t.Parallel() - - ctx := context.Background() - store := NewTestStore(t) - - err := store.execTrans(ctx, `CREATE TABLE test_table_1 (id TEXT NOT NULL PRIMARY KEY)`) - require.NoError(t, err) - - err = store.execTrans(ctx, `INSERT INTO test_table_1 (id) VALUES ("one"), ("two"), ("three")`) - require.NoError(t, err) - - vals, err := store.queryToStrings(`SELECT * FROM test_table_1`) - require.NoError(t, err) - require.Equal(t, 3, len(vals)) - - store.Flush(context.Background()) - - vals, err = store.queryToStrings(`SELECT * FROM test_table_1`) - require.NoError(t, err) - require.Equal(t, 0, len(vals)) -} - -func TestFlushMigrationsTable(t *testing.T) { - t.Parallel() - - ctx := context.Background() - store := NewTestStore(t) - - require.NoError(t, store.execTrans(ctx, fmt.Sprintf(`CREATE TABLE %s (id TEXT NOT NULL PRIMARY KEY)`, migrationsTableName))) - require.NoError(t, store.execTrans(ctx, fmt.Sprintf(`INSERT INTO %s (id) VALUES ("one"), ("two"), ("three")`, migrationsTableName))) - store.Flush(context.Background()) - - got, err := store.queryToStrings(fmt.Sprintf(`SELECT * FROM %s`, migrationsTableName)) - require.NoError(t, err) - want := []string{"one", "two", "three"} - require.Equal(t, want, got) -} - -func TestBackupSqlStore(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - // this temporary dir/file is is used as the source db path for testing a bacup - // from a non-memory database. each individual test also creates a separate temporary dir/file - // to backup into. - td := t.TempDir() - tf := fmt.Sprintf("%s/%s", td, DefaultFilename) - - tests := []struct { - name string - dbPath string - }{ - { - "in-memory db", - ":memory:", - }, - { - "file-based db", - tf, - }, - } - - for _, tt := range tests { - store, err := NewSqlStore(tt.dbPath, zap.NewNop()) - require.NoError(t, err) - defer store.Close() - - _, err = store.DB.Exec(`CREATE TABLE test_table_1 (id TEXT NOT NULL PRIMARY KEY)`) - require.NoError(t, err) - _, err = store.DB.Exec(`INSERT INTO test_table_1 (id) VALUES ("one"), ("two"), ("three")`) - require.NoError(t, err) - - _, err = store.DB.Exec(`CREATE TABLE test_table_2 (id TEXT NOT NULL PRIMARY KEY)`) - require.NoError(t, err) - _, err = store.DB.Exec(`INSERT INTO test_table_2 (id) VALUES ("four"), ("five"), ("six")`) - require.NoError(t, err) - - // create a file to write the backup to. - tempDir := t.TempDir() - - // open the file to use as a writer for BackupSqlStore - backupPath := tempDir + "/db.sqlite" - dest, err := os.Create(backupPath) - require.NoError(t, err) - defer dest.Close() - - // run the backup - err = store.BackupSqlStore(ctx, dest) - require.NoError(t, err) - - // open the backup file as a database - backup, err := NewSqlStore(backupPath, zap.NewNop()) - require.NoError(t, err) - defer backup.Close() - - // perform a query to verify that the database has been backed up properly - var res1, res2 []string - err = backup.DB.Select(&res1, `SELECT * FROM test_table_1`) - require.NoError(t, err) - err = backup.DB.Select(&res2, `SELECT * FROM test_table_2`) - require.NoError(t, err) - - require.Equal(t, []string{"one", "two", "three"}, res1) - require.Equal(t, []string{"four", "five", "six"}, res2) - } -} - -func TestRestoreSqlStore(t *testing.T) { - t.Parallel() - - // this temporary dir/file is is used as the destination db path for testing a restore - // into a non-memory database. each individual test also creates a separate temporary dir/file - // to hold a test db to restore from. - td := t.TempDir() - tf := fmt.Sprintf("%s/%s", td, DefaultFilename) - - tests := []struct { - name string - dbPath string - }{ - { - "in-memory db", - ":memory:", - }, - { - "file-based db", - tf, - }, - } - - for _, tt := range tests { - ctx := context.Background() - - // create the test db to restore from - tempDir := t.TempDir() - tempFileName := fmt.Sprintf("%s/%s", tempDir, DefaultFilename) - - restoreDB, err := NewSqlStore(tempFileName, zap.NewNop()) - require.NoError(t, err) - t.Cleanup(func() { restoreDB.Close() }) - - // add some data to the test db - _, err = restoreDB.DB.Exec(`CREATE TABLE test_table_1 (id TEXT NOT NULL PRIMARY KEY)`) - require.NoError(t, err) - _, err = restoreDB.DB.Exec(`INSERT INTO test_table_1 (id) VALUES ("one"), ("two"), ("three")`) - require.NoError(t, err) - - _, err = restoreDB.DB.Exec(`CREATE TABLE test_table_2 (id TEXT NOT NULL PRIMARY KEY)`) - require.NoError(t, err) - _, err = restoreDB.DB.Exec(`INSERT INTO test_table_2 (id) VALUES ("four"), ("five"), ("six")`) - require.NoError(t, err) - - // we're done using the restore db as a database, so close it now - err = restoreDB.Close() - require.NoError(t, err) - - // open the test "restore-from" db file as a reader - f, err := os.Open(tempFileName) - require.NoError(t, err) - t.Cleanup(func() { f.Close() }) - - // open a db to restore into. it will be empty to begin with. - restore, err := NewSqlStore(tt.dbPath, zap.NewNop()) - require.NoError(t, err) - t.Cleanup(func() { restore.Close() }) - - // run the restore - err = restore.RestoreSqlStore(ctx, f) - require.NoError(t, err) - - // perform a query to verify that the database has been restored up properly - var res1, res2 []string - err = restore.DB.Select(&res1, `SELECT * FROM test_table_1`) - require.NoError(t, err) - err = restore.DB.Select(&res2, `SELECT * FROM test_table_2`) - require.NoError(t, err) - - require.Equal(t, []string{"one", "two", "three"}, res1) - require.Equal(t, []string{"four", "five", "six"}, res2) - - require.NoError(t, f.Close()) - } -} - -func TestTableNames(t *testing.T) { - t.Parallel() - - store := NewTestStore(t) - ctx := context.Background() - - err := store.execTrans(ctx, `CREATE TABLE test_table_1 (id TEXT NOT NULL PRIMARY KEY); - CREATE TABLE test_table_3 (id TEXT NOT NULL PRIMARY KEY); - CREATE TABLE test_table_2 (id TEXT NOT NULL PRIMARY KEY);`) - require.NoError(t, err) - - got, err := store.tableNames() - require.NoError(t, err) - require.Equal(t, []string{"test_table_1", "test_table_3", "test_table_2"}, got) -} - -func TestAllMigrationNames(t *testing.T) { - t.Parallel() - - store := NewTestStore(t) - ctx := context.Background() - - // Empty db, returns nil slice and no error - got, err := store.allMigrationNames() - require.NoError(t, err) - require.Equal(t, []string(nil), got) - - // DB contains migrations table but no migrations - err = store.execTrans(ctx, `CREATE TABLE migrations ( - id TEXT NOT NULL PRIMARY KEY, - name TEXT NOT NULL)`) - require.NoError(t, err) - got, err = store.allMigrationNames() - require.NoError(t, err) - require.Equal(t, []string(nil), got) - - // DB contains one migration - err = store.execTrans(ctx, `INSERT INTO migrations (id, name) VALUES ("1", "0000_create_migrations_table.sql")`) - require.NoError(t, err) - got, err = store.allMigrationNames() - require.NoError(t, err) - require.Equal(t, []string{"0000_create_migrations_table.sql"}, got) - - // DB contains multiple migrations - they are returned sorted by name - err = store.execTrans(ctx, `INSERT INTO migrations (id, name) VALUES ("3", "0001_first_migration.sql")`) - require.NoError(t, err) - err = store.execTrans(ctx, `INSERT INTO migrations (id, name) VALUES ("2", "0002_second_migration.sql")`) - require.NoError(t, err) - got, err = store.allMigrationNames() - require.NoError(t, err) - require.Equal(t, []string{"0000_create_migrations_table.sql", "0001_first_migration.sql", "0002_second_migration.sql"}, got) -} diff --git a/sqlite/test_migrations/0001_create_migrations_table.down.sql b/sqlite/test_migrations/0001_create_migrations_table.down.sql deleted file mode 100644 index 594ffc9fb78..00000000000 --- a/sqlite/test_migrations/0001_create_migrations_table.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE migrations; diff --git a/sqlite/test_migrations/0001_create_migrations_table.up.sql b/sqlite/test_migrations/0001_create_migrations_table.up.sql deleted file mode 100644 index 2422a9acf9e..00000000000 --- a/sqlite/test_migrations/0001_create_migrations_table.up.sql +++ /dev/null @@ -1,4 +0,0 @@ -CREATE TABLE migrations ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - name TEXT UNIQUE NOT NULL -); diff --git a/sqlite/test_migrations/0002_create_test_table_1.down.sql b/sqlite/test_migrations/0002_create_test_table_1.down.sql deleted file mode 100644 index dc282e65f10..00000000000 --- a/sqlite/test_migrations/0002_create_test_table_1.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE test_table_1; diff --git a/sqlite/test_migrations/0002_create_test_table_1.up.sql b/sqlite/test_migrations/0002_create_test_table_1.up.sql deleted file mode 100644 index 8a81a6058d5..00000000000 --- a/sqlite/test_migrations/0002_create_test_table_1.up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE test_table_1 ( - id TEXT NOT NULL PRIMARY KEY, - created_at TIMESTAMP, - updated_at TIMESTAMP -); diff --git a/sqlite/test_migrations/0003_rename_test_table_id_1.down.sql b/sqlite/test_migrations/0003_rename_test_table_id_1.down.sql deleted file mode 100644 index 600dcbd2f92..00000000000 --- a/sqlite/test_migrations/0003_rename_test_table_id_1.down.sql +++ /dev/null @@ -1,13 +0,0 @@ -ALTER TABLE test_table_1 RENAME TO _test_table_1_old; - -CREATE TABLE test_table_1 ( - id TEXT NOT NULL PRIMARY KEY, - created_at TIMESTAMP, - updated_at TIMESTAMP -); - -INSERT INTO test_table_1 (id, updated_at, created_at) - SELECT org_id, updated_at, created_at - FROM _test_table_1_old; - -DROP TABLE _test_table_1_old; diff --git a/sqlite/test_migrations/0003_rename_test_table_id_1.up.sql b/sqlite/test_migrations/0003_rename_test_table_id_1.up.sql deleted file mode 100644 index c766ea25d17..00000000000 --- a/sqlite/test_migrations/0003_rename_test_table_id_1.up.sql +++ /dev/null @@ -1,13 +0,0 @@ -ALTER TABLE test_table_1 RENAME TO _test_table_1_old; - -CREATE TABLE test_table_1 ( - org_id TEXT NOT NULL PRIMARY KEY, - created_at TIMESTAMP, - updated_at TIMESTAMP -); - -INSERT INTO test_table_1 (org_id, updated_at, created_at) - SELECT id, updated_at, created_at - FROM _test_table_1_old; - -DROP TABLE _test_table_1_old; diff --git a/sqlite/test_migrations/0004_create_test_table_2.down.sql b/sqlite/test_migrations/0004_create_test_table_2.down.sql deleted file mode 100644 index de650778655..00000000000 --- a/sqlite/test_migrations/0004_create_test_table_2.down.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE test_table_2; diff --git a/sqlite/test_migrations/0004_create_test_table_2.up.sql b/sqlite/test_migrations/0004_create_test_table_2.up.sql deleted file mode 100644 index 40fc822ce78..00000000000 --- a/sqlite/test_migrations/0004_create_test_table_2.up.sql +++ /dev/null @@ -1,5 +0,0 @@ -CREATE TABLE test_table_2 ( - user_id TEXT NOT NULL PRIMARY KEY, - created_at TIMESTAMP, - updated_at TIMESTAMP -); diff --git a/sqlite/test_migrations/all.go b/sqlite/test_migrations/all.go deleted file mode 100644 index e733e5cbe61..00000000000 --- a/sqlite/test_migrations/all.go +++ /dev/null @@ -1,18 +0,0 @@ -package test_migrations - -import "embed" - -//go:embed *.up.sql -var AllUp embed.FS - -//go:embed *.down.sql -var AllDown embed.FS - -//go:embed 0001_create_migrations_table.up.sql -var MigrationTable embed.FS - -//go:embed 0001_create_migrations_table.up.sql 0002_create_test_table_1.up.sql -var FirstUp embed.FS - -//go:embed 0001_create_migrations_table.down.sql 0002_create_test_table_1.down.sql -var FirstDown embed.FS diff --git a/static/TODO.go b/static/TODO.go deleted file mode 100644 index 62c2a5353df..00000000000 --- a/static/TODO.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build !assets - -package static - -import ( - "errors" - "os" -) - -// The functions defined in this file are placeholders for when the binary is -// built without assets. - -var errNoAssets = errors.New("no assets included in binary") - -// Asset returns an error stating no assets were included in the binary. -func Asset(string) ([]byte, error) { - return nil, errNoAssets -} - -// AssetInfo returns an error stating no assets were included in the binary. -func AssetInfo(name string) (os.FileInfo, error) { - return nil, errNoAssets -} - -// AssetDir returns nil because there are no assets included in the binary. -func AssetDir(name string) ([]string, error) { - return nil, errNoAssets -} diff --git a/static/static.go b/static/static.go deleted file mode 100644 index ec8c67d8ae6..00000000000 --- a/static/static.go +++ /dev/null @@ -1,214 +0,0 @@ -//go:generate env GO111MODULE=on go run github.com/kevinburke/go-bindata/go-bindata -o static_gen.go -ignore 'map|go' -tags assets -pkg static data/... - -package static - -import ( - "fmt" - "io" - "io/fs" - "net/http" - "os" - "path" - "path/filepath" - "strings" - "time" - - assetfs "github.com/elazarl/go-bindata-assetfs" - platform "github.com/influxdata/influxdb/v2" -) - -const ( - // defaultFile is the default UI asset file that will be served if no other - // static asset matches. This is particularly useful for serving content - // related to a SPA with client-side routing. - defaultFile = "index.html" - - // embedBaseDir is the prefix for files in the bundle with the binary. - embedBaseDir = "data" - - // uiBaseDir is the directory in embedBaseDir where the built UI assets - // reside. - uiBaseDir = "build" - - // swaggerFile is the name of the swagger JSON. - swaggerFile = "swagger.json" - - // fallbackPathSlug is the path to re-write on the request if the requested - // path does not match a file and the default file is served. For telemetry - // and metrics reporting purposes. - fallbackPathSlug = "/:fallback_path" -) - -// NewAssetHandler returns an http.Handler to serve files from the provided -// path. If no --assets-path flag is used when starting influxd, the path will -// be empty and files are served from the embedded filesystem. -func NewAssetHandler(assetsPath string) http.Handler { - var fileOpener http.FileSystem - - if assetsPath == "" { - fileOpener = &assetfs.AssetFS{ - Asset: Asset, - AssetDir: AssetDir, - AssetInfo: AssetInfo, - Prefix: filepath.Join(embedBaseDir, uiBaseDir), - } - } else { - fileOpener = http.FS(os.DirFS(assetsPath)) - } - - return mwSetCacheControl(assetHandler(fileOpener)) -} - -// NewSwaggerHandler returns an http.Handler to serve the swaggerFile from the -// embedBaseDir. If the swaggerFile is not found, returns a 404. -func NewSwaggerHandler() http.Handler { - fileOpener := &assetfs.AssetFS{ - Asset: Asset, - AssetDir: AssetDir, - AssetInfo: AssetInfo, - Prefix: embedBaseDir, - } - - return mwSetCacheControl(swaggerHandler(fileOpener)) -} - -// mwSetCacheControl sets a default cache control header. -func mwSetCacheControl(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Cache-Control", "public, max-age=3600") - next.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) -} - -// swaggerHandler returns a handler that serves the swaggerFile or returns a 404 -// if the swaggerFile is not present. -func swaggerHandler(fileOpener http.FileSystem) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - f, err := fileOpener.Open(swaggerFile) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - defer f.Close() - - staticFileHandler(f).ServeHTTP(w, r) - } - - return http.HandlerFunc(fn) -} - -// assetHandler returns a handler that either serves the file at that path, or -// the default file if a file cannot be found at that path. If the default file -// is served, the request path is re-written to the root path to simplify -// metrics reporting. -func assetHandler(fileOpener http.FileSystem) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - name := strings.TrimPrefix(path.Clean(r.URL.Path), "/") - // If the root directory is being requested, respond with the default file. - if name == "" { - name = defaultFile - r.URL.Path = "/" + defaultFile - } - - // Try to open the file requested by name, falling back to the default file. - // If even the default file can't be found, the binary must not have been - // built with assets, so respond with not found. - f, fallback, err := openAsset(fileOpener, name) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - defer f.Close() - - // If the default file will be served because the requested path didn't - // match any existing files, re-write the request path a placeholder value. - // This is to ensure that metrics do not get collected for an arbitrarily - // large range of incorrect paths. - if fallback { - r.URL.Path = fallbackPathSlug - } - - staticFileHandler(f).ServeHTTP(w, r) - } - - return http.HandlerFunc(fn) -} - -// staticFileHandler sets the ETag header prior to calling http.ServeContent -// with the contents of the file. -func staticFileHandler(f fs.File) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - content, ok := f.(io.ReadSeeker) - if !ok { - err := fmt.Errorf("could not open file for reading") - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - i, err := f.Stat() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - modTime, err := modTimeFromInfo(i, buildTime) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - w.Header().Set("ETag", etag(i.Size(), modTime)) - - // ServeContent will automatically set the content-type header for files - // from the extension of "name", and will also set the Last-Modified header - // from the provided time. - http.ServeContent(w, r, i.Name(), modTime, content) - } - - return http.HandlerFunc(fn) -} - -// openAsset attempts to open the asset by name in the given directory, falling -// back to the default file if the named asset can't be found. Returns an error -// if even the default asset can't be opened. -func openAsset(fileOpener http.FileSystem, name string) (fs.File, bool, error) { - var fallback bool - - f, err := fileOpener.Open(name) - if err != nil { - if os.IsNotExist(err) { - fallback = true - f, err = fileOpener.Open(defaultFile) - } - if err != nil { - return nil, fallback, err - } - } - - return f, fallback, nil -} - -// modTimeFromInfo gets the modification time from an fs.FileInfo. If this -// modification time is time.Time{}, it falls back to the time returned by -// timeFunc. The modification time will only be time.Time{} if using assets -// embedded with go:embed. -func modTimeFromInfo(i fs.FileInfo, timeFunc func() (time.Time, error)) (time.Time, error) { - modTime := i.ModTime() - if modTime.IsZero() { - return timeFunc() - } - - return modTime, nil -} - -// etag calculates an etag string from the provided file size and modification -// time. -func etag(s int64, mt time.Time) string { - hour, minute, second := mt.Clock() - return fmt.Sprintf(`"%d%d%d%d%d"`, s, mt.Day(), hour, minute, second) -} - -func buildTime() (time.Time, error) { - return time.Parse(time.RFC3339, platform.GetBuildInfo().Date) -} diff --git a/static/static_test.go b/static/static_test.go deleted file mode 100644 index 139da2d9baf..00000000000 --- a/static/static_test.go +++ /dev/null @@ -1,202 +0,0 @@ -package static - -import ( - "io" - "io/fs" - "net/http" - "net/http/httptest" - "testing" - "testing/fstest" - "time" - - "github.com/stretchr/testify/require" -) - -func TestAssetHandler(t *testing.T) { - t.Parallel() - - defaultData := []byte("this is the default file") - otherData := []byte("this is a different file") - - m := http.FS(fstest.MapFS{ - defaultFile: { - Data: defaultData, - ModTime: time.Now(), - }, - "somethingElse.js": { - Data: otherData, - ModTime: time.Now(), - }, - }) - - tests := []struct { - name string - reqPath string - newPath string - wantData []byte - }{ - { - name: "path matches default", - reqPath: "/" + defaultFile, - newPath: "/" + defaultFile, - wantData: defaultData, - }, - { - name: "root path", - reqPath: "/", - newPath: "/" + defaultFile, - wantData: defaultData, - }, - { - name: "path matches a file", - reqPath: "/somethingElse.js", - newPath: "/somethingElse.js", - wantData: otherData, - }, - { - name: "path matches nothing", - reqPath: "/something_random", - newPath: fallbackPathSlug, - wantData: defaultData, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - h := assetHandler(m) - r := httptest.NewRequest("GET", tt.reqPath, nil) - w := httptest.NewRecorder() - h.ServeHTTP(w, r) - - b, err := io.ReadAll(w.Result().Body) - require.NoError(t, err) - - require.Equal(t, http.StatusOK, w.Result().StatusCode) - require.Equal(t, tt.wantData, b) - require.Equal(t, tt.newPath, r.URL.Path) - }) - } -} - -func TestModTimeFromInfo(t *testing.T) { - t.Parallel() - - nowTime := time.Now() - - timeFunc := func() (time.Time, error) { - return nowTime, nil - } - - fsys := fstest.MapFS{ - "zeroTime.file": { - ModTime: time.Time{}, - }, - "notZeroTime.file": { - ModTime: nowTime, - }, - } - - info1, err := fsys.Stat("zeroTime.file") - require.NoError(t, err) - - info2, err := fsys.Stat("notZeroTime.file") - require.NoError(t, err) - - tests := []struct { - name string - info fs.FileInfo - want time.Time - }{ - { - "zero time returns fallback time", - info1, - nowTime, - }, - { - "non-zero time returns same time", - info2, - nowTime, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := modTimeFromInfo(tt.info, timeFunc) - - require.NoError(t, err) - require.Equal(t, tt.want, got) - }) - } -} - -func TestOpenAsset(t *testing.T) { - t.Parallel() - - defaultData := []byte("this is the default file") - otherData := []byte("this is a different file") - - m := http.FS(fstest.MapFS{ - defaultFile: { - Data: defaultData, - }, - "somethingElse.js": { - Data: otherData, - }, - }) - - tests := []struct { - name string - file string - fallback bool - want []byte - }{ - { - "default file by name", - defaultFile, - false, - defaultData, - }, - { - "other file by name", - "somethingElse.js", - false, - otherData, - }, - { - "falls back to default if can't find", - "badFile.exe", - true, - defaultData, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotFile, fallback, err := openAsset(m, tt.file) - require.NoError(t, err) - require.Equal(t, tt.fallback, fallback) - - got, err := io.ReadAll(gotFile) - require.NoError(t, err) - - require.Equal(t, tt.want, got) - }) - } -} - -func TestEtag(t *testing.T) { - t.Parallel() - - testTime := time.Time{} - - testTime = testTime.Add(26 * time.Hour) - testTime = testTime.Add(15 * time.Minute) - testTime = testTime.Add(20 * time.Second) - - testSize := int64(1500) - - got := etag(testSize, testTime) - want := `"1500221520"` - - require.Equal(t, got, want) -} diff --git a/status.go b/status.go deleted file mode 100644 index fc113acbd38..00000000000 --- a/status.go +++ /dev/null @@ -1,35 +0,0 @@ -package influxdb - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// Status defines if a resource is active or inactive. -type Status string - -const ( - // Active status means that the resource can be used. - Active Status = "active" - // Inactive status means that the resource cannot be used. - Inactive Status = "inactive" -) - -// Valid determines if a Status value matches the enum. -func (s Status) Valid() error { - switch s { - case Active, Inactive: - return nil - default: - return &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("invalid status: must be %v or %v", Active, Inactive), - } - } -} - -// Ptr returns the pointer of that status. -func (s Status) Ptr() *Status { - return &s -} diff --git a/storage/bucket_service.go b/storage/bucket_service.go deleted file mode 100644 index 2c4b6a65739..00000000000 --- a/storage/bucket_service.go +++ /dev/null @@ -1,98 +0,0 @@ -package storage - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "go.uber.org/zap" -) - -type EngineSchema interface { - CreateBucket(context.Context, *influxdb.Bucket) error - UpdateBucketRetentionPolicy(context.Context, platform.ID, *influxdb.BucketUpdate) error - DeleteBucket(context.Context, platform.ID, platform.ID) error -} - -// BucketService wraps an existing influxdb.BucketService implementation. -// -// BucketService ensures that when a bucket is deleted, all stored data -// associated with the bucket is either removed, or marked to be removed via a -// future compaction. -type BucketService struct { - influxdb.BucketService - log *zap.Logger - engine EngineSchema -} - -// NewBucketService returns a new BucketService for the provided EngineSchema, -// which typically will be an Engine. -func NewBucketService(log *zap.Logger, s influxdb.BucketService, engine EngineSchema) *BucketService { - return &BucketService{ - BucketService: s, - log: log, - engine: engine, - } -} - -func (s *BucketService) CreateBucket(ctx context.Context, b *influxdb.Bucket) (err error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - defer func() { - if err == nil { - return - } - - if b.ID.Valid() { - if err := s.BucketService.DeleteBucket(ctx, b.ID); err != nil { - s.log.Error("Unable to cleanup bucket after create failed", zap.Error(err)) - } - } - }() - - // Normalize the bucket's shard-group - b.ShardGroupDuration = meta.NormalisedShardDuration(b.ShardGroupDuration, b.RetentionPeriod) - - if err = s.BucketService.CreateBucket(ctx, b); err != nil { - return err - } - - if err = s.engine.CreateBucket(ctx, b); err != nil { - return err - } - - return nil -} - -func (s *BucketService) UpdateBucket(ctx context.Context, id platform.ID, upd influxdb.BucketUpdate) (b *influxdb.Bucket, err error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if err = s.engine.UpdateBucketRetentionPolicy(ctx, id, &upd); err != nil { - return nil, err - } - - return s.BucketService.UpdateBucket(ctx, id, upd) -} - -// DeleteBucket removes a bucket by ID. -func (s *BucketService) DeleteBucket(ctx context.Context, bucketID platform.ID) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - bucket, err := s.FindBucketByID(ctx, bucketID) - if err != nil { - return err - } - - // The data is dropped first from the storage engine. If this fails for any - // reason, then the bucket will still be available in the future to retrieve - // the orgID, which is needed for the engine. - if err := s.engine.DeleteBucket(ctx, bucket.OrgID, bucketID); err != nil { - return err - } - return s.BucketService.DeleteBucket(ctx, bucketID) -} diff --git a/storage/bucket_service_test.go b/storage/bucket_service_test.go deleted file mode 100644 index c19c1f600e5..00000000000 --- a/storage/bucket_service_test.go +++ /dev/null @@ -1,144 +0,0 @@ -package storage_test - -import ( - "context" - "testing" - "time" - - "github.com/dustin/go-humanize" - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/storage" - "github.com/influxdata/influxdb/v2/storage/mocks" - "github.com/influxdata/influxdb/v2/tenant" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" -) - -func TestBucketService_CreateBucket(t *testing.T) { - testCases := []struct { - name string - rp time.Duration - inSg time.Duration - outSg time.Duration - }{ - { - name: "infinite RP, derived SGD", - rp: influxdb.InfiniteRetention, - inSg: 0, - outSg: humanize.Week, - }, - { - name: "infinite RP, pinned SGD", - rp: influxdb.InfiniteRetention, - inSg: time.Hour, - outSg: time.Hour, - }, - { - name: "large RP, derived SGD", - rp: humanize.Week, - inSg: 0, - outSg: humanize.Day, - }, - { - name: "large RP, pinned SGD", - rp: humanize.Week, - inSg: 5 * time.Hour, - outSg: 5 * time.Hour, - }, - { - name: "small RP, derived SGD", - rp: humanize.Day, - inSg: 0, - outSg: time.Hour, - }, - { - name: "small RP, pinned SGD", - rp: humanize.Day, - inSg: 5 * time.Hour, - outSg: 5 * time.Hour, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - engine := mocks.NewMockEngineSchema(ctrl) - logger := zaptest.NewLogger(t) - inmemService := newTenantService(t, logger) - service := storage.NewBucketService(logger, inmemService, engine) - ctx := context.Background() - - org := &influxdb.Organization{Name: "org1"} - require.NoError(t, inmemService.CreateOrganization(ctx, org)) - - bucket := &influxdb.Bucket{OrgID: org.ID, RetentionPeriod: tc.rp, ShardGroupDuration: tc.inSg} - - // Test creating a bucket calls into the creator. - engine.EXPECT().CreateBucket(gomock.Any(), bucket) - require.NoError(t, service.CreateBucket(ctx, bucket)) - - // Test that a shard-group duration was created for the bucket - require.Equal(t, tc.outSg, bucket.ShardGroupDuration) - - // Test that the shard-group duration was recorded in the KV store. - kvBucket, err := inmemService.FindBucketByID(ctx, bucket.ID) - require.NoError(t, err) - require.Equal(t, tc.outSg, kvBucket.ShardGroupDuration) - }) - } -} - -func TestBucketService_DeleteBucket(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - engine := mocks.NewMockEngineSchema(ctrl) - logger := zaptest.NewLogger(t) - inmemService := newTenantService(t, logger) - service := storage.NewBucketService(logger, inmemService, engine) - ctx := context.Background() - - org := &influxdb.Organization{Name: "org1"} - require.NoError(t, inmemService.CreateOrganization(ctx, org)) - - bucket := &influxdb.Bucket{OrgID: org.ID} - require.NoError(t, inmemService.CreateBucket(ctx, bucket)) - - // Test deleting a bucket calls into the deleter. - engine.EXPECT().DeleteBucket(gomock.Any(), org.ID, bucket.ID) - require.NoError(t, service.DeleteBucket(ctx, bucket.ID)) -} - -func TestBucketService_DeleteNonexistentBucket(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - i, err := platform.IDFromString("2222222222222222") - require.NoError(t, err) - - engine := mocks.NewMockEngineSchema(ctrl) - logger := zaptest.NewLogger(t) - inmemService := newTenantService(t, logger) - service := storage.NewBucketService(logger, inmemService, engine) - ctx := context.Background() - - require.Error(t, service.DeleteBucket(ctx, *i)) -} - -func newTenantService(t *testing.T, logger *zap.Logger) *tenant.Service { - t.Helper() - - store := inmem.NewKVStore() - if err := all.Up(context.Background(), logger, store); err != nil { - t.Fatal(err) - } - - return tenant.NewService(tenant.NewStore(store)) -} diff --git a/storage/config.go b/storage/config.go deleted file mode 100644 index 23320e4fcd9..00000000000 --- a/storage/config.go +++ /dev/null @@ -1,31 +0,0 @@ -package storage - -import ( - "time" - - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/v1/services/precreator" - "github.com/influxdata/influxdb/v2/v1/services/retention" -) - -// DefaultWriteTimeout is the default timeout for a complete write to succeed. -const DefaultWriteTimeout = 10 * time.Second - -// Config holds the configuration for an Engine. -type Config struct { - Data tsdb.Config - WriteTimeout time.Duration - - RetentionService retention.Config - PrecreatorConfig precreator.Config -} - -// NewConfig initialises a new config for an Engine. -func NewConfig() Config { - return Config{ - Data: tsdb.NewConfig(), - WriteTimeout: DefaultWriteTimeout, - RetentionService: retention.NewConfig(), - PrecreatorConfig: precreator.NewConfig(), - } -} diff --git a/storage/engine.go b/storage/engine.go deleted file mode 100644 index 9b9142f3f94..00000000000 --- a/storage/engine.go +++ /dev/null @@ -1,521 +0,0 @@ -package storage - -import ( - "context" - "fmt" - "io" - "path/filepath" - "sync" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb" - _ "github.com/influxdata/influxdb/v2/tsdb/engine" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - _ "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" - "github.com/influxdata/influxdb/v2/v1/coordinator" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/influxdata/influxdb/v2/v1/services/precreator" - "github.com/influxdata/influxdb/v2/v1/services/retention" - "github.com/influxdata/influxql" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/multierr" - "go.uber.org/zap" -) - -var ( - // ErrEngineClosed is returned when a caller attempts to use the engine while - // it's closed. - ErrEngineClosed = errors.New("engine is closed") - - // ErrNotImplemented is returned for APIs that are temporarily not implemented. - ErrNotImplemented = errors.New("not implemented") -) - -type Engine struct { - config Config - path string - - mu sync.RWMutex - closing chan struct{} // closing returns the zero value when the engine is shutting down. - tsdbStore *tsdb.Store - metaClient MetaClient - pointsWriter interface { - WritePoints(ctx context.Context, database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, user meta.User, points []models.Point) error - Close() error - } - - retentionService *retention.Service - precreatorService *precreator.Service - - writePointsValidationEnabled bool - - logger *zap.Logger - metricsDisabled bool -} - -// Option provides a set -type Option func(*Engine) - -func WithMetaClient(c MetaClient) Option { - return func(e *Engine) { - e.metaClient = c - } -} - -func WithMetricsDisabled(m bool) Option { - return func(e *Engine) { - e.metricsDisabled = m - } -} - -type MetaClient interface { - CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) - DropDatabase(name string) error - CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) - Database(name string) (di *meta.DatabaseInfo) - Databases() []meta.DatabaseInfo - DeleteShardGroup(database, policy string, id uint64) error - PrecreateShardGroups(now, cutoff time.Time) error - PruneShardGroups() error - RetentionPolicy(database, policy string) (*meta.RetentionPolicyInfo, error) - ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) - UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error - RLock() - RUnlock() - Backup(ctx context.Context, w io.Writer) error - Restore(ctx context.Context, r io.Reader) error - Data() meta.Data - SetData(data *meta.Data) error -} - -type TSDBStore interface { - DeleteMeasurement(ctx context.Context, database, name string) error - DeleteSeries(ctx context.Context, database string, sources []influxql.Source, condition influxql.Expr) error - MeasurementNames(ctx context.Context, auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error) - ShardGroup(ids []uint64) tsdb.ShardGroup - Shards(ids []uint64) []*tsdb.Shard - TagKeys(ctx context.Context, auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagKeys, error) - TagValues(ctx context.Context, auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagValues, error) - SeriesCardinality(ctx context.Context, database string) (int64, error) - SeriesCardinalityFromShards(ctx context.Context, shards []*tsdb.Shard) (*tsdb.SeriesIDSet, error) - SeriesFile(database string) *tsdb.SeriesFile -} - -// NewEngine initialises a new storage engine, including a series file, index and -// TSM engine. -func NewEngine(path string, c Config, options ...Option) *Engine { - c.Data.Dir = filepath.Join(path, "data") - c.Data.WALDir = filepath.Join(path, "wal") - - e := &Engine{ - config: c, - path: path, - tsdbStore: tsdb.NewStore(c.Data.Dir), - logger: zap.NewNop(), - - writePointsValidationEnabled: true, - } - - for _, opt := range options { - opt(e) - } - - e.tsdbStore.EngineOptions.Config = c.Data - - // Copy TSDB configuration. - e.tsdbStore.EngineOptions.EngineVersion = c.Data.Engine - e.tsdbStore.EngineOptions.IndexVersion = c.Data.Index - e.tsdbStore.EngineOptions.MetricsDisabled = e.metricsDisabled - - pw := coordinator.NewPointsWriter(c.WriteTimeout, path) - pw.TSDBStore = e.tsdbStore - pw.MetaClient = e.metaClient - e.pointsWriter = pw - - e.retentionService = retention.NewService(c.RetentionService) - e.retentionService.TSDBStore = e.tsdbStore - e.retentionService.MetaClient = e.metaClient - - e.precreatorService = precreator.NewService(c.PrecreatorConfig) - e.precreatorService.MetaClient = e.metaClient - - return e -} - -// WithLogger sets the logger on the Store. It must be called before Open. -func (e *Engine) WithLogger(log *zap.Logger) { - e.logger = log.With(zap.String("service", "storage-engine")) - - e.tsdbStore.WithLogger(e.logger) - if pw, ok := e.pointsWriter.(*coordinator.PointsWriter); ok { - pw.WithLogger(e.logger) - } - - if e.retentionService != nil { - e.retentionService.WithLogger(log) - } - - if e.precreatorService != nil { - e.precreatorService.WithLogger(log) - } -} - -// PrometheusCollectors returns all the prometheus collectors associated with -// the engine and its components. -func (e *Engine) PrometheusCollectors() []prometheus.Collector { - var metrics []prometheus.Collector - metrics = append(metrics, tsm1.PrometheusCollectors()...) - metrics = append(metrics, coordinator.PrometheusCollectors()...) - metrics = append(metrics, tsdb.ShardCollectors()...) - metrics = append(metrics, tsdb.BucketCollectors()...) - metrics = append(metrics, retention.PrometheusCollectors()...) - return metrics -} - -// Open opens the store and all underlying resources. It returns an error if -// any of the underlying systems fail to open. -func (e *Engine) Open(ctx context.Context) (err error) { - e.mu.Lock() - defer e.mu.Unlock() - - if e.closing != nil { - return nil // Already open - } - - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if err := e.tsdbStore.Open(ctx); err != nil { - return err - } - - if err := e.retentionService.Open(ctx); err != nil { - return err - } - - if err := e.precreatorService.Open(ctx); err != nil { - return err - } - - e.closing = make(chan struct{}) - - return nil -} - -// EnableCompactions allows the series file, index, & underlying engine to compact. -func (e *Engine) EnableCompactions() { -} - -// DisableCompactions disables compactions in the series file, index, & engine. -func (e *Engine) DisableCompactions() { -} - -// Close closes the store and all underlying resources. It returns an error if -// any of the underlying systems fail to close. -func (e *Engine) Close() error { - e.mu.RLock() - if e.closing == nil { - e.mu.RUnlock() - // Unusual if an engine is closed more than once, so note it. - e.logger.Info("Close() called on already-closed engine") - return nil // Already closed - } - - close(e.closing) - e.mu.RUnlock() - - e.mu.Lock() - defer e.mu.Unlock() - e.closing = nil - - var retErr error - if err := e.precreatorService.Close(); err != nil { - retErr = multierr.Append(retErr, fmt.Errorf("error closing shard precreator service: %w", err)) - } - - if err := e.retentionService.Close(); err != nil { - retErr = multierr.Append(retErr, fmt.Errorf("error closing retention service: %w", err)) - } - - if err := e.tsdbStore.Close(); err != nil { - retErr = multierr.Append(retErr, fmt.Errorf("error closing TSDB store: %w", err)) - } - - if err := e.pointsWriter.Close(); err != nil { - retErr = multierr.Append(retErr, fmt.Errorf("error closing points writer: %w", err)) - } - return retErr -} - -// WritePoints writes the provided points to the engine. -// -// The Engine expects all points to have been correctly validated by the caller. -// However, WritePoints will determine if any tag key-pairs are missing, or if -// there are any field type conflicts. -// Rosalie was here lockdown 2020 -// -// Appropriate errors are returned in those cases. -func (e *Engine) WritePoints(ctx context.Context, orgID platform.ID, bucketID platform.ID, points []models.Point) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - //TODO - remember to add back unicode validation... - - e.mu.RLock() - defer e.mu.RUnlock() - - if e.closing == nil { - return ErrEngineClosed - } - - return e.pointsWriter.WritePoints(ctx, bucketID.String(), meta.DefaultRetentionPolicyName, models.ConsistencyLevelAll, &meta.UserInfo{}, points) -} - -func (e *Engine) CreateBucket(ctx context.Context, b *influxdb.Bucket) (err error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - spec := meta.RetentionPolicySpec{ - Name: meta.DefaultRetentionPolicyName, - Duration: &b.RetentionPeriod, - ShardGroupDuration: b.ShardGroupDuration, - } - - if _, err = e.metaClient.CreateDatabaseWithRetentionPolicy(b.ID.String(), &spec); err != nil { - return err - } - - return nil -} - -func (e *Engine) UpdateBucketRetentionPolicy(ctx context.Context, bucketID platform.ID, upd *influxdb.BucketUpdate) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - rpu := meta.RetentionPolicyUpdate{ - Duration: upd.RetentionPeriod, - ShardGroupDuration: upd.ShardGroupDuration, - } - - err := e.metaClient.UpdateRetentionPolicy(bucketID.String(), meta.DefaultRetentionPolicyName, &rpu, true) - if err == meta.ErrIncompatibleDurations { - err = &errors2.Error{ - Code: errors2.EUnprocessableEntity, - Msg: "shard-group duration must also be updated to be smaller than new retention duration", - } - } - return err -} - -// DeleteBucket deletes an entire bucket from the storage engine. -func (e *Engine) DeleteBucket(ctx context.Context, orgID, bucketID platform.ID) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - err := e.tsdbStore.DeleteDatabase(bucketID.String()) - if err != nil { - return err - } - return e.metaClient.DropDatabase(bucketID.String()) -} - -// DeleteBucketRangePredicate deletes data within a bucket from the storage engine. Any data -// deleted must be in [min, max], and the key must match the predicate if provided. -func (e *Engine) DeleteBucketRangePredicate(ctx context.Context, orgID, bucketID platform.ID, min, max int64, pred influxdb.Predicate, measurement influxql.Expr) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - e.mu.RLock() - defer e.mu.RUnlock() - if e.closing == nil { - return ErrEngineClosed - } - return e.tsdbStore.DeleteSeriesWithPredicate(ctx, bucketID.String(), min, max, pred, measurement) -} - -// RLockKVStore locks the KV store as well as the engine in preparation for doing a backup. -func (e *Engine) RLockKVStore() { - e.mu.RLock() - e.metaClient.RLock() -} - -// RUnlockKVStore unlocks the KV store & engine, intended to be used after a backup is complete. -func (e *Engine) RUnlockKVStore() { - e.mu.RUnlock() - e.metaClient.RUnlock() -} - -func (e *Engine) BackupKVStore(ctx context.Context, w io.Writer) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if e.closing == nil { - return ErrEngineClosed - } - - return e.metaClient.Backup(ctx, w) -} - -func (e *Engine) BackupShard(ctx context.Context, w io.Writer, shardID uint64, since time.Time) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - e.mu.RLock() - defer e.mu.RUnlock() - - if e.closing == nil { - return ErrEngineClosed - } - - return e.tsdbStore.BackupShard(shardID, since, w) -} - -func (e *Engine) RestoreKVStore(ctx context.Context, r io.Reader) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - e.mu.RLock() - defer e.mu.RUnlock() - - if e.closing == nil { - return ErrEngineClosed - } - - // Replace KV store data and remove all existing shard data. - if err := e.metaClient.Restore(ctx, r); err != nil { - return err - } else if err := e.tsdbStore.DeleteShards(); err != nil { - return err - } - - // Create new shards based on the restored KV data. - data := e.metaClient.Data() - for _, dbi := range data.Databases { - for _, rpi := range dbi.RetentionPolicies { - for _, sgi := range rpi.ShardGroups { - if sgi.Deleted() { - continue - } - - for _, sh := range sgi.Shards { - if err := e.tsdbStore.CreateShard(ctx, dbi.Name, rpi.Name, sh.ID, true); err != nil { - return err - } - } - } - } - } - - return nil -} - -func (e *Engine) RestoreBucket(ctx context.Context, id platform.ID, buf []byte) (map[uint64]uint64, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - e.mu.RLock() - defer e.mu.RUnlock() - - if e.closing == nil { - return nil, ErrEngineClosed - } - - var newDBI meta.DatabaseInfo - if err := newDBI.UnmarshalBinary(buf); err != nil { - return nil, err - } - - data := e.metaClient.Data() - dbi := data.Database(id.String()) - if dbi == nil { - return nil, fmt.Errorf("bucket dbi for %q not found during restore", newDBI.Name) - } else if len(newDBI.RetentionPolicies) != 1 { - return nil, fmt.Errorf("bucket must have 1 retention policy; attempting to restore %d retention policies", len(newDBI.RetentionPolicies)) - } - - dbi.RetentionPolicies = newDBI.RetentionPolicies - dbi.ContinuousQueries = newDBI.ContinuousQueries - - // Generate shard ID mapping. - shardIDMap := make(map[uint64]uint64) - rpi := newDBI.RetentionPolicies[0] - for j, sgi := range rpi.ShardGroups { - data.MaxShardGroupID++ - rpi.ShardGroups[j].ID = data.MaxShardGroupID - - for k := range sgi.Shards { - data.MaxShardID++ - shardIDMap[sgi.Shards[k].ID] = data.MaxShardID - sgi.Shards[k].ID = data.MaxShardID - sgi.Shards[k].Owners = []meta.ShardOwner{} - } - } - - // Update data. - if err := e.metaClient.SetData(&data); err != nil { - return nil, err - } - - // Create shards. - for _, sgi := range rpi.ShardGroups { - if sgi.Deleted() { - continue - } - - for _, sh := range sgi.Shards { - if err := e.tsdbStore.CreateShard(ctx, dbi.Name, rpi.Name, sh.ID, true); err != nil { - return nil, err - } - } - } - - return shardIDMap, nil -} - -func (e *Engine) RestoreShard(ctx context.Context, shardID uint64, r io.Reader) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - e.mu.RLock() - defer e.mu.RUnlock() - - if e.closing == nil { - return ErrEngineClosed - } - - return e.tsdbStore.RestoreShard(ctx, shardID, r) -} - -// SeriesCardinality returns the number of series in the engine. -func (e *Engine) SeriesCardinality(ctx context.Context, bucketID platform.ID) int64 { - e.mu.RLock() - defer e.mu.RUnlock() - if e.closing == nil { - return 0 - } - - n, err := e.tsdbStore.SeriesCardinality(ctx, bucketID.String()) - if err != nil { - return 0 - } - return n -} - -// Path returns the path of the engine's base directory. -func (e *Engine) Path() string { - return e.path -} - -func (e *Engine) TSDBStore() TSDBStore { - return e.tsdbStore -} - -func (e *Engine) MetaClient() MetaClient { - return e.metaClient -} diff --git a/storage/flux/reader.go b/storage/flux/reader.go deleted file mode 100644 index e7eeaec6b5c..00000000000 --- a/storage/flux/reader.go +++ /dev/null @@ -1,1092 +0,0 @@ -package storageflux - -import ( - "context" - "fmt" - "strings" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/interval" - "github.com/influxdata/flux/memory" - "github.com/influxdata/flux/plan" - "github.com/influxdata/flux/values" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/query" - storage "github.com/influxdata/influxdb/v2/storage/reads" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/influxdata/influxdb/v2/tsdb/cursors" - "google.golang.org/protobuf/types/known/anypb" -) - -// GroupCursorError is returned when two different cursor types -// are read for the same table. -type GroupCursorError struct { - typ string - cursor cursors.Cursor -} - -func (err *GroupCursorError) Error() string { - var got string - switch err.cursor.(type) { - case cursors.FloatArrayCursor: - got = "float" - case cursors.IntegerArrayCursor: - got = "integer" - case cursors.UnsignedArrayCursor: - got = "unsigned" - case cursors.StringArrayCursor: - got = "string" - case cursors.BooleanArrayCursor: - got = "boolean" - default: - got = "invalid" - } - return fmt.Sprintf("schema collision: cannot group %s and %s types together", err.typ, got) -} - -type storageTable interface { - flux.Table - Close() - Cancel() - Statistics() cursors.CursorStats -} - -type storeReader struct { - s storage.Store -} - -// NewReader returns a new storageflux reader -func NewReader(s storage.Store) query.StorageReader { - return &storeReader{s: s} -} - -func (r *storeReader) ReadFilter(ctx context.Context, spec query.ReadFilterSpec, alloc memory.Allocator) (query.TableIterator, error) { - return &filterIterator{ - ctx: ctx, - s: r.s, - spec: spec, - cache: newTagsCache(0), - alloc: alloc, - }, nil -} - -func (r *storeReader) ReadGroup(ctx context.Context, spec query.ReadGroupSpec, alloc memory.Allocator) (query.TableIterator, error) { - return &groupIterator{ - ctx: ctx, - s: r.s, - spec: spec, - cache: newTagsCache(0), - alloc: alloc, - }, nil -} - -func (r *storeReader) ReadWindowAggregate(ctx context.Context, spec query.ReadWindowAggregateSpec, alloc memory.Allocator) (query.TableIterator, error) { - return &windowAggregateIterator{ - ctx: ctx, - s: r.s, - spec: spec, - cache: newTagsCache(0), - alloc: alloc, - }, nil -} - -func (r *storeReader) ReadTagKeys(ctx context.Context, spec query.ReadTagKeysSpec, alloc memory.Allocator) (query.TableIterator, error) { - return &tagKeysIterator{ - ctx: ctx, - bounds: spec.Bounds, - s: r.s, - readSpec: spec, - predicate: spec.Predicate, - alloc: alloc, - }, nil -} - -func (r *storeReader) ReadTagValues(ctx context.Context, spec query.ReadTagValuesSpec, alloc memory.Allocator) (query.TableIterator, error) { - return &tagValuesIterator{ - ctx: ctx, - bounds: spec.Bounds, - s: r.s, - readSpec: spec, - predicate: spec.Predicate, - alloc: alloc, - }, nil -} - -func (r *storeReader) ReadSeriesCardinality(ctx context.Context, spec query.ReadSeriesCardinalitySpec, alloc memory.Allocator) (query.TableIterator, error) { - return &seriesCardinalityIterator{ - ctx: ctx, - bounds: spec.Bounds, - s: r.s, - readSpec: spec, - predicate: spec.Predicate, - alloc: alloc, - }, nil -} - -func (r *storeReader) SupportReadSeriesCardinality(ctx context.Context) bool { - return r.s.SupportReadSeriesCardinality(ctx) -} - -func (r *storeReader) Close() {} - -type filterIterator struct { - ctx context.Context - s storage.Store - spec query.ReadFilterSpec - stats cursors.CursorStats - cache *tagsCache - alloc memory.Allocator -} - -func (fi *filterIterator) Statistics() cursors.CursorStats { return fi.stats } - -func (fi *filterIterator) Do(f func(flux.Table) error) error { - src := fi.s.GetSource( - uint64(fi.spec.OrganizationID), - uint64(fi.spec.BucketID), - ) - - // Setup read request - any, err := anypb.New(src) - if err != nil { - return err - } - - var req datatypes.ReadFilterRequest - req.ReadSource = any - req.Predicate = fi.spec.Predicate - req.Range = &datatypes.TimestampRange{ - Start: int64(fi.spec.Bounds.Start), - End: int64(fi.spec.Bounds.Stop), - } - - rs, err := fi.s.ReadFilter(fi.ctx, &req) - if err != nil { - return err - } - - if rs == nil { - return nil - } - - return fi.handleRead(f, rs) -} - -func (fi *filterIterator) handleRead(f func(flux.Table) error, rs storage.ResultSet) error { - // these resources must be closed if not nil on return - var ( - cur cursors.Cursor - table storageTable - ) - - defer func() { - if table != nil { - table.Close() - } - if cur != nil { - cur.Close() - } - rs.Close() - fi.cache.Release() - }() - -READ: - for rs.Next() { - cur = rs.Cursor() - if cur == nil { - // no data for series key + field combination - continue - } - - bnds := fi.spec.Bounds - key := defaultGroupKeyForSeries(rs.Tags(), bnds) - done := make(chan struct{}) - switch typedCur := cur.(type) { - case cursors.IntegerArrayCursor: - cols, defs := determineTableColsForSeries(rs.Tags(), flux.TInt) - table = newIntegerTable(done, typedCur, bnds, key, cols, rs.Tags(), defs, fi.cache, fi.alloc) - case cursors.FloatArrayCursor: - cols, defs := determineTableColsForSeries(rs.Tags(), flux.TFloat) - table = newFloatTable(done, typedCur, bnds, key, cols, rs.Tags(), defs, fi.cache, fi.alloc) - case cursors.UnsignedArrayCursor: - cols, defs := determineTableColsForSeries(rs.Tags(), flux.TUInt) - table = newUnsignedTable(done, typedCur, bnds, key, cols, rs.Tags(), defs, fi.cache, fi.alloc) - case cursors.BooleanArrayCursor: - cols, defs := determineTableColsForSeries(rs.Tags(), flux.TBool) - table = newBooleanTable(done, typedCur, bnds, key, cols, rs.Tags(), defs, fi.cache, fi.alloc) - case cursors.StringArrayCursor: - cols, defs := determineTableColsForSeries(rs.Tags(), flux.TString) - table = newStringTable(done, typedCur, bnds, key, cols, rs.Tags(), defs, fi.cache, fi.alloc) - default: - panic(fmt.Sprintf("unreachable: %T", typedCur)) - } - - cur = nil - - if !table.Empty() { - if err := f(table); err != nil { - table.Close() - table = nil - return err - } - select { - case <-done: - case <-fi.ctx.Done(): - table.Cancel() - break READ - } - } - - stats := table.Statistics() - fi.stats.ScannedValues += stats.ScannedValues - fi.stats.ScannedBytes += stats.ScannedBytes - table.Close() - table = nil - } - return rs.Err() -} - -type groupIterator struct { - ctx context.Context - s storage.Store - spec query.ReadGroupSpec - stats cursors.CursorStats - cache *tagsCache - alloc memory.Allocator -} - -func (gi *groupIterator) Statistics() cursors.CursorStats { return gi.stats } - -func (gi *groupIterator) Do(f func(flux.Table) error) error { - src := gi.s.GetSource( - uint64(gi.spec.OrganizationID), - uint64(gi.spec.BucketID), - ) - - // Setup read request - any, err := anypb.New(src) - if err != nil { - return err - } - - var req datatypes.ReadGroupRequest - req.ReadSource = any - req.Predicate = gi.spec.Predicate - req.Range = &datatypes.TimestampRange{ - Start: int64(gi.spec.Bounds.Start), - End: int64(gi.spec.Bounds.Stop), - } - - if len(gi.spec.GroupKeys) > 0 && gi.spec.GroupMode == query.GroupModeNone { - return &errors.Error{ - Code: errors.EInternal, - Msg: "cannot have group mode none with group key values", - } - } - req.Group = convertGroupMode(gi.spec.GroupMode) - req.GroupKeys = gi.spec.GroupKeys - - if agg, err := determineAggregateMethod(gi.spec.AggregateMethod); err != nil { - return err - } else if agg != datatypes.Aggregate_AggregateTypeNone { - req.Aggregate = &datatypes.Aggregate{Type: agg} - } - - rs, err := gi.s.ReadGroup(gi.ctx, &req) - if err != nil { - return err - } - - if rs == nil { - return nil - } - return gi.handleRead(f, rs) -} - -func (gi *groupIterator) handleRead(f func(flux.Table) error, rs storage.GroupResultSet) error { - // these resources must be closed if not nil on return - var ( - gc storage.GroupCursor - cur cursors.Cursor - table storageTable - ) - - defer func() { - if table != nil { - table.Close() - } - if cur != nil { - cur.Close() - } - if gc != nil { - gc.Close() - } - rs.Close() - gi.cache.Release() - }() - - gc = rs.Next() -READ: - for gc != nil { - for gc.Next() { - cur = gc.Cursor() - if cur != nil { - break - } - } - - if cur == nil { - gc.Close() - gc = rs.Next() - continue - } - - bnds := gi.spec.Bounds - key := groupKeyForGroup(gc.PartitionKeyVals(), &gi.spec, bnds) - done := make(chan struct{}) - switch typedCur := cur.(type) { - case cursors.IntegerArrayCursor: - cols, defs := determineTableColsForGroup(gc.Keys(), flux.TInt, gc.Aggregate(), key) - table = newIntegerGroupTable(done, gc, typedCur, bnds, key, cols, gc.Tags(), defs, gi.cache, gi.alloc) - case cursors.FloatArrayCursor: - cols, defs := determineTableColsForGroup(gc.Keys(), flux.TFloat, gc.Aggregate(), key) - table = newFloatGroupTable(done, gc, typedCur, bnds, key, cols, gc.Tags(), defs, gi.cache, gi.alloc) - case cursors.UnsignedArrayCursor: - cols, defs := determineTableColsForGroup(gc.Keys(), flux.TUInt, gc.Aggregate(), key) - table = newUnsignedGroupTable(done, gc, typedCur, bnds, key, cols, gc.Tags(), defs, gi.cache, gi.alloc) - case cursors.BooleanArrayCursor: - cols, defs := determineTableColsForGroup(gc.Keys(), flux.TBool, gc.Aggregate(), key) - table = newBooleanGroupTable(done, gc, typedCur, bnds, key, cols, gc.Tags(), defs, gi.cache, gi.alloc) - case cursors.StringArrayCursor: - cols, defs := determineTableColsForGroup(gc.Keys(), flux.TString, gc.Aggregate(), key) - table = newStringGroupTable(done, gc, typedCur, bnds, key, cols, gc.Tags(), defs, gi.cache, gi.alloc) - default: - panic(fmt.Sprintf("unreachable: %T", typedCur)) - } - - // table owns these resources and is responsible for closing them - cur = nil - gc = nil - - if err := f(table); err != nil { - table.Close() - table = nil - return err - } - select { - case <-done: - case <-gi.ctx.Done(): - table.Cancel() - break READ - } - - stats := table.Statistics() - gi.stats.ScannedValues += stats.ScannedValues - gi.stats.ScannedBytes += stats.ScannedBytes - table.Close() - table = nil - - gc = rs.Next() - } - return rs.Err() -} - -func determineAggregateMethod(agg string) (datatypes.Aggregate_AggregateType, error) { - if agg == "" { - return datatypes.Aggregate_AggregateTypeNone, nil - } - - if t, ok := datatypes.AggregateNameMap[strings.ToUpper(agg)]; ok { - return datatypes.Aggregate_AggregateType(t), nil - } - return 0, fmt.Errorf("unknown aggregate type %q", agg) -} - -func convertGroupMode(m query.GroupMode) datatypes.ReadGroupRequest_Group { - switch m { - case query.GroupModeNone: - return datatypes.ReadGroupRequest_GroupNone - case query.GroupModeBy: - return datatypes.ReadGroupRequest_GroupBy - } - panic(fmt.Sprint("invalid group mode: ", m)) -} - -const ( - startColIdx = 0 - stopColIdx = 1 - timeColIdx = 2 - valueColIdxWithoutTime = 2 - valueColIdx = 3 -) - -func determineTableColsForWindowAggregate(tags models.Tags, typ flux.ColType, hasTimeCol bool) ([]flux.ColMeta, [][]byte) { - var cols []flux.ColMeta - var defs [][]byte - - // aggregates remove the _time column - size := 3 - if hasTimeCol { - size++ - } - cols = make([]flux.ColMeta, size+len(tags)) - defs = make([][]byte, size+len(tags)) - cols[startColIdx] = flux.ColMeta{ - Label: execute.DefaultStartColLabel, - Type: flux.TTime, - } - cols[stopColIdx] = flux.ColMeta{ - Label: execute.DefaultStopColLabel, - Type: flux.TTime, - } - if hasTimeCol { - cols[timeColIdx] = flux.ColMeta{ - Label: execute.DefaultTimeColLabel, - Type: flux.TTime, - } - cols[valueColIdx] = flux.ColMeta{ - Label: execute.DefaultValueColLabel, - Type: typ, - } - } else { - cols[valueColIdxWithoutTime] = flux.ColMeta{ - Label: execute.DefaultValueColLabel, - Type: typ, - } - } - for j, tag := range tags { - cols[size+j] = flux.ColMeta{ - Label: string(tag.Key), - Type: flux.TString, - } - defs[size+j] = []byte("") - } - return cols, defs -} - -func determineTableColsForSeries(tags models.Tags, typ flux.ColType) ([]flux.ColMeta, [][]byte) { - cols := make([]flux.ColMeta, 4+len(tags)) - defs := make([][]byte, 4+len(tags)) - cols[startColIdx] = flux.ColMeta{ - Label: execute.DefaultStartColLabel, - Type: flux.TTime, - } - cols[stopColIdx] = flux.ColMeta{ - Label: execute.DefaultStopColLabel, - Type: flux.TTime, - } - cols[timeColIdx] = flux.ColMeta{ - Label: execute.DefaultTimeColLabel, - Type: flux.TTime, - } - cols[valueColIdx] = flux.ColMeta{ - Label: execute.DefaultValueColLabel, - Type: typ, - } - for j, tag := range tags { - cols[4+j] = flux.ColMeta{ - Label: string(tag.Key), - Type: flux.TString, - } - defs[4+j] = []byte("") - } - return cols, defs -} - -func defaultGroupKeyForSeries(tags models.Tags, bnds execute.Bounds) flux.GroupKey { - cols := make([]flux.ColMeta, 2, len(tags)+2) - vs := make([]values.Value, 2, len(tags)+2) - cols[startColIdx] = flux.ColMeta{ - Label: execute.DefaultStartColLabel, - Type: flux.TTime, - } - vs[startColIdx] = values.NewTime(bnds.Start) - cols[stopColIdx] = flux.ColMeta{ - Label: execute.DefaultStopColLabel, - Type: flux.TTime, - } - vs[stopColIdx] = values.NewTime(bnds.Stop) - for i := range tags { - cols = append(cols, flux.ColMeta{ - Label: string(tags[i].Key), - Type: flux.TString, - }) - vs = append(vs, values.NewString(string(tags[i].Value))) - } - return execute.NewGroupKey(cols, vs) -} - -func IsSelector(agg *datatypes.Aggregate) bool { - if agg == nil { - return false - } - return agg.Type == datatypes.Aggregate_AggregateTypeMin || - agg.Type == datatypes.Aggregate_AggregateTypeMax || - agg.Type == datatypes.Aggregate_AggregateTypeFirst || - agg.Type == datatypes.Aggregate_AggregateTypeLast -} - -func determineTableColsForGroup(tagKeys [][]byte, typ flux.ColType, agg *datatypes.Aggregate, groupKey flux.GroupKey) ([]flux.ColMeta, [][]byte) { - var colSize int - if agg == nil || IsSelector(agg) { - // The group without aggregate or with selector (min, max, first, last) case: - // _start, _stop, _time, _value + tags - colSize += 4 + len(tagKeys) - } else { - // The group aggregate case: - // Only the group keys + _value are needed. - // Note that `groupKey` will contain _start, _stop, plus any group columns specified. - // _start and _stop will always be in the first two slots, see: groupKeyForGroup() - // For the group aggregate case the output does not contain a _time column. - - // Also note that if in the future we will add support for mean, then it should also fall onto this branch. - - colSize = len(groupKey.Cols()) + 1 - } - - cols := make([]flux.ColMeta, colSize) - defs := make([][]byte, colSize) - // No matter this has aggregate, selector, or neither, the first two columns are always _start and _stop - cols[startColIdx] = flux.ColMeta{ - Label: execute.DefaultStartColLabel, - Type: flux.TTime, - } - cols[stopColIdx] = flux.ColMeta{ - Label: execute.DefaultStopColLabel, - Type: flux.TTime, - } - - if agg == nil || IsSelector(agg) { - // For the group without aggregate or with selector case: - cols[timeColIdx] = flux.ColMeta{ - Label: execute.DefaultTimeColLabel, - Type: flux.TTime, - } - cols[valueColIdx] = flux.ColMeta{ - Label: execute.DefaultValueColLabel, - Type: typ, - } - for j, tag := range tagKeys { - cols[4+j] = flux.ColMeta{ - Label: string(tag), - Type: flux.TString, - } - defs[4+j] = []byte("") - } - } else { - // Aggregate has no _time - cols[valueColIdxWithoutTime] = flux.ColMeta{ - Label: execute.DefaultValueColLabel, - Type: typ, - } - // From now on, only include group keys that are not _start and _stop. - // which are already included as the first two columns - // This highly depends on the implementation of groupKeyForGroup() which - // put _start and _stop into the first two slots. - for j := 2; j < len(groupKey.Cols()); j++ { - // the starting columns index for other group key columns is 3 (1+j) - cols[1+j] = flux.ColMeta{ - Label: groupKey.Cols()[j].Label, - Type: groupKey.Cols()[j].Type, - } - defs[1+j] = []byte("") - } - } - return cols, defs -} - -func groupKeyForGroup(kv [][]byte, spec *query.ReadGroupSpec, bnds execute.Bounds) flux.GroupKey { - cols := make([]flux.ColMeta, 2, len(spec.GroupKeys)+2) - vs := make([]values.Value, 2, len(spec.GroupKeys)+2) - cols[startColIdx] = flux.ColMeta{ - Label: execute.DefaultStartColLabel, - Type: flux.TTime, - } - vs[startColIdx] = values.NewTime(bnds.Start) - cols[stopColIdx] = flux.ColMeta{ - Label: execute.DefaultStopColLabel, - Type: flux.TTime, - } - vs[stopColIdx] = values.NewTime(bnds.Stop) - for i := range spec.GroupKeys { - if spec.GroupKeys[i] == execute.DefaultStartColLabel || spec.GroupKeys[i] == execute.DefaultStopColLabel { - continue - } - cols = append(cols, flux.ColMeta{ - Label: spec.GroupKeys[i], - Type: flux.TString, - }) - vs = append(vs, values.NewString(string(kv[i]))) - } - return execute.NewGroupKey(cols, vs) -} - -type windowAggregateIterator struct { - ctx context.Context - s storage.Store - spec query.ReadWindowAggregateSpec - stats cursors.CursorStats - cache *tagsCache - alloc memory.Allocator -} - -func (wai *windowAggregateIterator) Statistics() cursors.CursorStats { return wai.stats } - -func (wai *windowAggregateIterator) Do(f func(flux.Table) error) error { - src := wai.s.GetSource( - uint64(wai.spec.OrganizationID), - uint64(wai.spec.BucketID), - ) - - // Setup read request - any, err := anypb.New(src) - if err != nil { - return err - } - - var req datatypes.ReadWindowAggregateRequest - req.ReadSource = any - req.Predicate = wai.spec.Predicate - req.Range = &datatypes.TimestampRange{ - Start: int64(wai.spec.Bounds.Start), - End: int64(wai.spec.Bounds.Stop), - } - - req.Window = &datatypes.Window{ - Every: &datatypes.Duration{ - Nsecs: wai.spec.Window.Every.Nanoseconds(), - Months: wai.spec.Window.Every.Months(), - Negative: wai.spec.Window.Every.IsNegative(), - }, - Offset: &datatypes.Duration{ - Nsecs: wai.spec.Window.Offset.Nanoseconds(), - Months: wai.spec.Window.Offset.Months(), - Negative: wai.spec.Window.Offset.IsNegative(), - }, - } - - req.Aggregate = make([]*datatypes.Aggregate, len(wai.spec.Aggregates)) - - for i, aggKind := range wai.spec.Aggregates { - if agg, err := determineAggregateMethod(string(aggKind)); err != nil { - return err - } else if agg != datatypes.Aggregate_AggregateTypeNone { - req.Aggregate[i] = &datatypes.Aggregate{Type: agg} - } - } - - rs, err := wai.s.WindowAggregate(wai.ctx, &req) - if err != nil { - return err - } - - if rs == nil { - return nil - } - return wai.handleRead(f, rs) -} - -const ( - CountKind = "count" - SumKind = "sum" - FirstKind = "first" - LastKind = "last" - MinKind = "min" - MaxKind = "max" - MeanKind = "mean" -) - -// isSelector returns true if given a procedure kind that represents a selector operator. -func isSelector(kind plan.ProcedureKind) bool { - return kind == FirstKind || kind == LastKind || kind == MinKind || kind == MaxKind -} - -func (wai *windowAggregateIterator) handleRead(f func(flux.Table) error, rs storage.ResultSet) error { - createEmpty := wai.spec.CreateEmpty - - selector := len(wai.spec.Aggregates) > 0 && isSelector(wai.spec.Aggregates[0]) - - timeColumn := wai.spec.TimeColumn - if timeColumn == "" { - tableFn := f - f = func(table flux.Table) error { - return splitWindows(wai.ctx, wai.alloc, table, selector, tableFn) - } - } - - window, err := interval.NewWindow(wai.spec.Window.Every, wai.spec.Window.Period, wai.spec.Window.Offset) - if err != nil { - return err - } - - // these resources must be closed if not nil on return - var ( - cur cursors.Cursor - table storageTable - ) - - defer func() { - if table != nil { - table.Close() - } - if cur != nil { - cur.Close() - } - rs.Close() - wai.cache.Release() - }() - -READ: - for rs.Next() { - cur = rs.Cursor() - if cur == nil { - // no data for series key + field combination - continue - } - - bnds := wai.spec.Bounds - key := defaultGroupKeyForSeries(rs.Tags(), bnds) - done := make(chan struct{}) - hasTimeCol := timeColumn != "" - switch typedCur := cur.(type) { - case cursors.IntegerArrayCursor: - if !selector || wai.spec.ForceAggregate { - var fillValue *int64 - if isAggregateCount(wai.spec.Aggregates[0]) { - fillValue = func(v int64) *int64 { return &v }(0) - } - cols, defs := determineTableColsForWindowAggregate(rs.Tags(), flux.TInt, hasTimeCol) - table = newIntegerWindowTable(done, typedCur, bnds, window, createEmpty, timeColumn, !selector, fillValue, key, cols, rs.Tags(), defs, wai.cache, wai.alloc) - } else if createEmpty && !hasTimeCol { - cols, defs := determineTableColsForSeries(rs.Tags(), flux.TInt) - table = newIntegerEmptyWindowSelectorTable(done, typedCur, bnds, window, timeColumn, key, cols, rs.Tags(), defs, wai.cache, wai.alloc) - } else { - // Note hasTimeCol == true means that aggregateWindow() was called. - // Because aggregateWindow() ultimately removes empty tables we - // don't bother creating them here. - cols, defs := determineTableColsForSeries(rs.Tags(), flux.TInt) - table = newIntegerWindowSelectorTable(done, typedCur, bnds, window, timeColumn, key, cols, rs.Tags(), defs, wai.cache, wai.alloc) - } - case cursors.FloatArrayCursor: - if !selector || wai.spec.ForceAggregate { - cols, defs := determineTableColsForWindowAggregate(rs.Tags(), flux.TFloat, hasTimeCol) - table = newFloatWindowTable(done, typedCur, bnds, window, createEmpty, timeColumn, !selector, key, cols, rs.Tags(), defs, wai.cache, wai.alloc) - } else if createEmpty && !hasTimeCol { - cols, defs := determineTableColsForSeries(rs.Tags(), flux.TFloat) - table = newFloatEmptyWindowSelectorTable(done, typedCur, bnds, window, timeColumn, key, cols, rs.Tags(), defs, wai.cache, wai.alloc) - } else { - // Note hasTimeCol == true means that aggregateWindow() was called. - // Because aggregateWindow() ultimately removes empty tables we - // don't bother creating them here. - cols, defs := determineTableColsForSeries(rs.Tags(), flux.TFloat) - table = newFloatWindowSelectorTable(done, typedCur, bnds, window, timeColumn, key, cols, rs.Tags(), defs, wai.cache, wai.alloc) - } - case cursors.UnsignedArrayCursor: - if !selector || wai.spec.ForceAggregate { - cols, defs := determineTableColsForWindowAggregate(rs.Tags(), flux.TUInt, hasTimeCol) - table = newUnsignedWindowTable(done, typedCur, bnds, window, createEmpty, timeColumn, !selector, key, cols, rs.Tags(), defs, wai.cache, wai.alloc) - } else if createEmpty && !hasTimeCol { - cols, defs := determineTableColsForSeries(rs.Tags(), flux.TUInt) - table = newUnsignedEmptyWindowSelectorTable(done, typedCur, bnds, window, timeColumn, key, cols, rs.Tags(), defs, wai.cache, wai.alloc) - } else { - // Note hasTimeCol == true means that aggregateWindow() was called. - // Because aggregateWindow() ultimately removes empty tables we - // don't bother creating them here. - cols, defs := determineTableColsForSeries(rs.Tags(), flux.TUInt) - table = newUnsignedWindowSelectorTable(done, typedCur, bnds, window, timeColumn, key, cols, rs.Tags(), defs, wai.cache, wai.alloc) - } - case cursors.BooleanArrayCursor: - if !selector || wai.spec.ForceAggregate { - cols, defs := determineTableColsForWindowAggregate(rs.Tags(), flux.TBool, hasTimeCol) - table = newBooleanWindowTable(done, typedCur, bnds, window, createEmpty, timeColumn, !selector, key, cols, rs.Tags(), defs, wai.cache, wai.alloc) - } else if createEmpty && !hasTimeCol { - cols, defs := determineTableColsForSeries(rs.Tags(), flux.TBool) - table = newBooleanEmptyWindowSelectorTable(done, typedCur, bnds, window, timeColumn, key, cols, rs.Tags(), defs, wai.cache, wai.alloc) - } else { - // Note hasTimeCol == true means that aggregateWindow() was called. - // Because aggregateWindow() ultimately removes empty tables we - // don't bother creating them here. - cols, defs := determineTableColsForSeries(rs.Tags(), flux.TBool) - table = newBooleanWindowSelectorTable(done, typedCur, bnds, window, timeColumn, key, cols, rs.Tags(), defs, wai.cache, wai.alloc) - } - case cursors.StringArrayCursor: - if !selector || wai.spec.ForceAggregate { - cols, defs := determineTableColsForWindowAggregate(rs.Tags(), flux.TString, hasTimeCol) - table = newStringWindowTable(done, typedCur, bnds, window, createEmpty, timeColumn, !selector, key, cols, rs.Tags(), defs, wai.cache, wai.alloc) - } else if createEmpty && !hasTimeCol { - cols, defs := determineTableColsForSeries(rs.Tags(), flux.TString) - table = newStringEmptyWindowSelectorTable(done, typedCur, bnds, window, timeColumn, key, cols, rs.Tags(), defs, wai.cache, wai.alloc) - } else { - // Note hasTimeCol == true means that aggregateWindow() was called. - // Because aggregateWindow() ultimately removes empty tables we - // don't bother creating them here. - cols, defs := determineTableColsForSeries(rs.Tags(), flux.TString) - table = newStringWindowSelectorTable(done, typedCur, bnds, window, timeColumn, key, cols, rs.Tags(), defs, wai.cache, wai.alloc) - } - default: - panic(fmt.Sprintf("unreachable: %T", typedCur)) - } - - cur = nil - - if !table.Empty() { - if err := f(table); err != nil { - table.Close() - table = nil - return err - } - select { - case <-done: - case <-wai.ctx.Done(): - table.Cancel() - break READ - } - } - - stats := table.Statistics() - wai.stats.ScannedValues += stats.ScannedValues - wai.stats.ScannedBytes += stats.ScannedBytes - table.Close() - table = nil - } - return rs.Err() -} - -func isAggregateCount(kind plan.ProcedureKind) bool { - return kind == CountKind -} - -type tagKeysIterator struct { - ctx context.Context - bounds execute.Bounds - s storage.Store - readSpec query.ReadTagKeysSpec - predicate *datatypes.Predicate - alloc memory.Allocator -} - -func (ti *tagKeysIterator) Do(f func(flux.Table) error) error { - src := ti.s.GetSource( - uint64(ti.readSpec.OrganizationID), - uint64(ti.readSpec.BucketID), - ) - - var req datatypes.TagKeysRequest - any, err := anypb.New(src) - if err != nil { - return err - } - - req.TagsSource = any - req.Predicate = ti.predicate - req.Range = &datatypes.TimestampRange{ - Start: int64(ti.bounds.Start), - End: int64(ti.bounds.Stop), - } - - rs, err := ti.s.TagKeys(ti.ctx, &req) - if err != nil { - return err - } - return ti.handleRead(f, rs) -} - -func (ti *tagKeysIterator) handleRead(f func(flux.Table) error, rs cursors.StringIterator) error { - key := execute.NewGroupKey(nil, nil) - builder := execute.NewColListTableBuilder(key, ti.alloc) - valueIdx, err := builder.AddCol(flux.ColMeta{ - Label: execute.DefaultValueColLabel, - Type: flux.TString, - }) - if err != nil { - return err - } - defer builder.ClearData() - - // Add the _start and _stop columns that come from storage. - if err := builder.AppendString(valueIdx, "_start"); err != nil { - return err - } - if err := builder.AppendString(valueIdx, "_stop"); err != nil { - return err - } - - for rs.Next() { - v := rs.Value() - switch v { - case models.MeasurementTagKey: - v = "_measurement" - case models.FieldKeyTagKey: - v = "_field" - } - - if err := builder.AppendString(valueIdx, v); err != nil { - return err - } - } - - // Construct the table and add to the reference count - // so we can free the table later. - tbl, err := builder.Table() - if err != nil { - return err - } - - // Release the references to the arrays held by the builder. - builder.ClearData() - return f(tbl) -} - -func (ti *tagKeysIterator) Statistics() cursors.CursorStats { - return cursors.CursorStats{} -} - -type tagValuesIterator struct { - ctx context.Context - bounds execute.Bounds - s storage.Store - readSpec query.ReadTagValuesSpec - predicate *datatypes.Predicate - alloc memory.Allocator -} - -func (ti *tagValuesIterator) Do(f func(flux.Table) error) error { - src := ti.s.GetSource( - uint64(ti.readSpec.OrganizationID), - uint64(ti.readSpec.BucketID), - ) - - var req datatypes.TagValuesRequest - any, err := anypb.New(src) - if err != nil { - return err - } - req.TagsSource = any - - switch ti.readSpec.TagKey { - case "_measurement": - req.TagKey = models.MeasurementTagKey - case "_field": - req.TagKey = models.FieldKeyTagKey - default: - req.TagKey = ti.readSpec.TagKey - } - req.Predicate = ti.predicate - req.Range = &datatypes.TimestampRange{ - Start: int64(ti.bounds.Start), - End: int64(ti.bounds.Stop), - } - - rs, err := ti.s.TagValues(ti.ctx, &req) - if err != nil { - return err - } - return ti.handleRead(f, rs) -} - -func (ti *tagValuesIterator) handleRead(f func(flux.Table) error, rs cursors.StringIterator) error { - key := execute.NewGroupKey(nil, nil) - builder := execute.NewColListTableBuilder(key, ti.alloc) - valueIdx, err := builder.AddCol(flux.ColMeta{ - Label: execute.DefaultValueColLabel, - Type: flux.TString, - }) - if err != nil { - return err - } - defer builder.ClearData() - - for rs.Next() { - if err := builder.AppendString(valueIdx, rs.Value()); err != nil { - return err - } - } - - // Construct the table and add to the reference count - // so we can free the table later. - tbl, err := builder.Table() - if err != nil { - return err - } - - // Release the references to the arrays held by the builder. - builder.ClearData() - return f(tbl) -} - -func (ti *tagValuesIterator) Statistics() cursors.CursorStats { - return cursors.CursorStats{} -} - -type seriesCardinalityIterator struct { - ctx context.Context - bounds execute.Bounds - s storage.Store - readSpec query.ReadSeriesCardinalitySpec - predicate *datatypes.Predicate - alloc memory.Allocator - stats cursors.CursorStats -} - -func (si *seriesCardinalityIterator) Do(f func(flux.Table) error) error { - src := si.s.GetSource( - uint64(si.readSpec.OrganizationID), - uint64(si.readSpec.BucketID), - ) - - var req datatypes.ReadSeriesCardinalityRequest - any, err := anypb.New(src) - if err != nil { - return err - } - req.ReadSource = any - - req.Predicate = si.predicate - req.Range = &datatypes.TimestampRange{ - Start: int64(si.bounds.Start), - End: int64(si.bounds.Stop), - } - - rs, err := si.s.ReadSeriesCardinality(si.ctx, &req) - if err != nil { - return err - } - si.stats.Add(rs.Stats()) - return si.handleRead(f, rs) -} - -func (si *seriesCardinalityIterator) handleRead(f func(flux.Table) error, rs cursors.Int64Iterator) error { - key := execute.NewGroupKey(nil, nil) - builder := execute.NewColListTableBuilder(key, si.alloc) - valueIdx, err := builder.AddCol(flux.ColMeta{ - Label: execute.DefaultValueColLabel, - Type: flux.TInt, - }) - if err != nil { - return err - } - defer builder.ClearData() - - for rs.Next() { - if err := builder.AppendInt(valueIdx, rs.Value()); err != nil { - return err - } - } - - // Construct the table and add to the reference count so we can free the table - // later. - tbl, err := builder.Table() - if err != nil { - return err - } - - // Release the references to the arrays held by the builder. - builder.ClearData() - return f(tbl) -} - -func (si *seriesCardinalityIterator) Statistics() cursors.CursorStats { - return si.stats -} diff --git a/storage/flux/table.gen.go b/storage/flux/table.gen.go deleted file mode 100644 index 7e2ab6deb46..00000000000 --- a/storage/flux/table.gen.go +++ /dev/null @@ -1,4822 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: table.gen.go.tmpl - -package storageflux - -import ( - "fmt" - "math" - "sync" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/array" - "github.com/influxdata/flux/arrow" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/interval" - "github.com/influxdata/flux/memory" - "github.com/influxdata/flux/values" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/models" - storage "github.com/influxdata/influxdb/v2/storage/reads" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -// -// *********** Float *********** -// - -type floatTable struct { - table - mu sync.Mutex - cur cursors.FloatArrayCursor - alloc memory.Allocator -} - -func newFloatTable( - done chan struct{}, - cur cursors.FloatArrayCursor, - bounds execute.Bounds, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *floatTable { - t := &floatTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - } - t.readTags(tags) - t.init(t.advance) - - return t -} - -func (t *floatTable) Close() { - t.mu.Lock() - if t.cur != nil { - t.cur.Close() - t.cur = nil - } - t.mu.Unlock() -} - -func (t *floatTable) Statistics() cursors.CursorStats { - t.mu.Lock() - defer t.mu.Unlock() - cur := t.cur - if cur == nil { - return cursors.CursorStats{} - } - cs := cur.Stats() - return cursors.CursorStats{ - ScannedValues: cs.ScannedValues, - ScannedBytes: cs.ScannedBytes, - } -} - -func (t *floatTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *floatTable) advance() bool { - a := t.cur.Next() - l := a.Len() - if l == 0 { - return false - } - - // Retrieve the buffer for the data to avoid allocating - // additional slices. If the buffer is still being used - // because the references were retained, then we will - // allocate a new buffer. - cr := t.allocateBuffer(l) - cr.cols[timeColIdx] = arrow.NewInt(a.Timestamps, t.alloc) - cr.cols[valueColIdx] = t.toArrowBuffer(a.Values) - t.appendTags(cr) - t.appendBounds(cr) - return true -} - -// window table -type floatWindowTable struct { - floatTable - arr *cursors.FloatArray - windowBounds interval.Bounds - idxInArr int - createEmpty bool - timeColumn string - isAggregate bool - window interval.Window -} - -func newFloatWindowTable( - done chan struct{}, - cur cursors.FloatArrayCursor, - bounds execute.Bounds, - window interval.Window, - createEmpty bool, - timeColumn string, - isAggregate bool, - - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *floatWindowTable { - t := &floatWindowTable{ - floatTable: floatTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - }, - window: window, - createEmpty: createEmpty, - timeColumn: timeColumn, - isAggregate: isAggregate, - } - if t.createEmpty { - start := int64(bounds.Start) - t.windowBounds = window.GetLatestBounds(values.Time(start)) - } - t.readTags(tags) - t.init(t.advance) - - return t -} - -func (t *floatWindowTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -// createNextBufferTimes will read the timestamps from the array -// cursor and construct the values for the next buffer. -func (t *floatWindowTable) createNextBufferTimes() (start, stop *array.Int, ok bool) { - startB := arrow.NewIntBuilder(t.alloc) - stopB := arrow.NewIntBuilder(t.alloc) - - if t.createEmpty { - // There are no more windows when the start time is greater - // than or equal to the stop time. - if startT := int64(t.windowBounds.Start()); startT >= int64(t.bounds.Stop) { - return nil, nil, false - } - - // Create a buffer with the buffer size. - // TODO(jsternberg): Calculate the exact size with max points as the maximum. - startB.Resize(storage.MaxPointsPerBlock) - stopB.Resize(storage.MaxPointsPerBlock) - for ; ; t.windowBounds = t.window.NextBounds(t.windowBounds) { - startT, stopT := t.getWindowBoundsFor(t.windowBounds) - if startT >= int64(t.bounds.Stop) { - break - } - startB.Append(startT) - stopB.Append(stopT) - } - start = startB.NewIntArray() - stop = stopB.NewIntArray() - return start, stop, true - } - - // Retrieve the next buffer so we can copy the timestamps. - if !t.nextBuffer() { - return nil, nil, false - } - - // Copy over the timestamps from the next buffer and adjust - // times for the boundaries. - startB.Resize(len(t.arr.Timestamps)) - stopB.Resize(len(t.arr.Timestamps)) - for _, stopT := range t.arr.Timestamps { - bounds := t.window.PrevBounds(t.window.GetLatestBounds(values.Time(stopT))) - startT, stopT := t.getWindowBoundsFor(bounds) - startB.Append(startT) - stopB.Append(stopT) - } - start = startB.NewIntArray() - stop = stopB.NewIntArray() - return start, stop, true -} - -func (t *floatWindowTable) getWindowBoundsFor(bounds interval.Bounds) (int64, int64) { - beg := int64(bounds.Start()) - end := int64(bounds.Stop()) - if beg < int64(t.bounds.Start) { - beg = int64(t.bounds.Start) - } - if end > int64(t.bounds.Stop) { - end = int64(t.bounds.Stop) - } - return beg, end -} - -// nextAt will retrieve the next value that can be used with -// the given stop timestamp. If no values can be used with the timestamp, -// it will return the default value and false. -func (t *floatWindowTable) nextAt(stop int64) (v float64, ok bool) { - if !t.nextBuffer() { - return - } else if !t.isInWindow(stop, t.arr.Timestamps[t.idxInArr]) { - return - } - v, ok = t.arr.Values[t.idxInArr], true - t.idxInArr++ - return v, ok -} - -// isInWindow will check if the given time may be used within the window -// denoted by the stop timestamp. The stop may be a truncated stop time -// because of a restricted boundary. -// -// When used with an aggregate, ts will be the true stop time returned -// by storage. When used with an aggregate, it will be the real time -// for the point. -func (t *floatWindowTable) isInWindow(stop int64, ts int64) bool { - // Retrieve the boundary associated with this stop time. - // This will be the boundary for the previous nanosecond. - bounds := t.window.GetLatestBounds(values.Time(stop - 1)) - start, stop := int64(bounds.Start()), int64(bounds.Stop()) - - // For an aggregate, the timestamp will be the stop time of the boundary. - if t.isAggregate { - return start < ts && ts <= stop - } - - // For a selector, the timestamp should be within the boundary. - return start <= ts && ts < stop -} - -// nextBuffer will ensure the array cursor is filled -// and will return true if there is at least one value -// that can be read from it. -func (t *floatWindowTable) nextBuffer() bool { - // Discard the current array cursor if we have - // exceeded it. - if t.arr != nil && t.idxInArr >= t.arr.Len() { - t.arr = nil - } - - // Retrieve the next array cursor if needed. - if t.arr == nil { - arr := t.cur.Next() - if arr.Len() == 0 { - return false - } - t.arr, t.idxInArr = arr, 0 - } - return true -} - -// appendValues will scan the timestamps and append values -// that match those timestamps from the buffer. -func (t *floatWindowTable) appendValues(intervals []int64, appendValue func(v float64), appendNull func()) { - for i := 0; i < len(intervals); i++ { - if v, ok := t.nextAt(intervals[i]); ok { - appendValue(v) - continue - } - appendNull() - } -} - -func (t *floatWindowTable) advance() bool { - if !t.nextBuffer() { - return false - } - // Create the timestamps for the next window. - start, stop, ok := t.createNextBufferTimes() - if !ok { - return false - } - values := t.mergeValues(stop.Int64Values()) - - // Retrieve the buffer for the data to avoid allocating - // additional slices. If the buffer is still being used - // because the references were retained, then we will - // allocate a new buffer. - cr := t.allocateBuffer(stop.Len()) - if t.timeColumn != "" { - switch t.timeColumn { - case execute.DefaultStopColLabel: - cr.cols[timeColIdx] = stop - start.Release() - case execute.DefaultStartColLabel: - cr.cols[timeColIdx] = start - stop.Release() - } - cr.cols[valueColIdx] = values - t.appendBounds(cr) - } else { - cr.cols[startColIdx] = start - cr.cols[stopColIdx] = stop - cr.cols[valueColIdxWithoutTime] = values - } - t.appendTags(cr) - return true -} - -// This table implementation will not have any empty windows. -type floatWindowSelectorTable struct { - floatTable - timeColumn string - window interval.Window -} - -func newFloatWindowSelectorTable( - done chan struct{}, - cur cursors.FloatArrayCursor, - bounds execute.Bounds, - window interval.Window, - timeColumn string, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *floatWindowSelectorTable { - t := &floatWindowSelectorTable{ - floatTable: floatTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - }, - window: window, - timeColumn: timeColumn, - } - t.readTags(tags) - t.init(t.advance) - return t -} - -func (t *floatWindowSelectorTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *floatWindowSelectorTable) advance() bool { - arr := t.cur.Next() - if arr.Len() == 0 { - return false - } - - cr := t.allocateBuffer(arr.Len()) - - switch t.timeColumn { - case execute.DefaultStartColLabel: - cr.cols[timeColIdx] = t.startTimes(arr) - t.appendBounds(cr) - case execute.DefaultStopColLabel: - cr.cols[timeColIdx] = t.stopTimes(arr) - t.appendBounds(cr) - default: - cr.cols[startColIdx] = t.startTimes(arr) - cr.cols[stopColIdx] = t.stopTimes(arr) - cr.cols[timeColIdx] = arrow.NewInt(arr.Timestamps, t.alloc) - } - - cr.cols[valueColIdx] = t.toArrowBuffer(arr.Values) - t.appendTags(cr) - return true -} - -func (t *floatWindowSelectorTable) startTimes(arr *cursors.FloatArray) *array.Int { - start := arrow.NewIntBuilder(t.alloc) - start.Resize(arr.Len()) - - rangeStart := int64(t.bounds.Start) - - for _, v := range arr.Timestamps { - if windowStart := int64(t.window.GetLatestBounds(values.Time(v)).Start()); windowStart < rangeStart { - start.Append(rangeStart) - } else { - start.Append(windowStart) - } - } - return start.NewIntArray() -} - -func (t *floatWindowSelectorTable) stopTimes(arr *cursors.FloatArray) *array.Int { - stop := arrow.NewIntBuilder(t.alloc) - stop.Resize(arr.Len()) - - rangeStop := int64(t.bounds.Stop) - - for _, v := range arr.Timestamps { - if windowStop := int64(t.window.GetLatestBounds(values.Time(v)).Stop()); windowStop > rangeStop { - stop.Append(rangeStop) - } else { - stop.Append(windowStop) - } - } - return stop.NewIntArray() -} - -// This table implementation may contain empty windows -// in addition to non-empty windows. -type floatEmptyWindowSelectorTable struct { - floatTable - arr *cursors.FloatArray - idx int - rangeStart int64 - rangeStop int64 - windowBounds interval.Bounds - timeColumn string - window interval.Window -} - -func newFloatEmptyWindowSelectorTable( - done chan struct{}, - cur cursors.FloatArrayCursor, - bounds execute.Bounds, - window interval.Window, - timeColumn string, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *floatEmptyWindowSelectorTable { - rangeStart := int64(bounds.Start) - rangeStop := int64(bounds.Stop) - t := &floatEmptyWindowSelectorTable{ - floatTable: floatTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - }, - arr: cur.Next(), - rangeStart: rangeStart, - rangeStop: rangeStop, - windowBounds: window.GetLatestBounds(values.Time(rangeStart)), - window: window, - timeColumn: timeColumn, - } - t.readTags(tags) - t.init(t.advance) - return t -} - -func (t *floatEmptyWindowSelectorTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *floatEmptyWindowSelectorTable) advance() bool { - if t.arr.Len() == 0 { - return false - } - - values := t.arrowBuilder() - values.Resize(storage.MaxPointsPerBlock) - - var cr *colReader - - switch t.timeColumn { - case execute.DefaultStartColLabel: - start := t.startTimes(values) - cr = t.allocateBuffer(start.Len()) - cr.cols[timeColIdx] = start - t.appendBounds(cr) - case execute.DefaultStopColLabel: - stop := t.stopTimes(values) - cr = t.allocateBuffer(stop.Len()) - cr.cols[timeColIdx] = stop - t.appendBounds(cr) - default: - start, stop, time := t.startStopTimes(values) - cr = t.allocateBuffer(time.Len()) - cr.cols[startColIdx] = start - cr.cols[stopColIdx] = stop - cr.cols[timeColIdx] = time - } - - cr.cols[valueColIdx] = values.NewFloatArray() - t.appendTags(cr) - return true -} - -func (t *floatEmptyWindowSelectorTable) startTimes(builder *array.FloatBuilder) *array.Int { - start := arrow.NewIntBuilder(t.alloc) - start.Resize(storage.MaxPointsPerBlock) - - for int64(t.windowBounds.Start()) < t.rangeStop { - // The first window should start at the - // beginning of the time range. - if int64(t.windowBounds.Start()) < t.rangeStart { - start.Append(t.rangeStart) - } else { - start.Append(int64(t.windowBounds.Start())) - } - - var v int64 - - if t.arr.Len() == 0 { - v = math.MaxInt64 - } else { - v = t.arr.Timestamps[t.idx] - } - - // If the current timestamp falls within the - // current window, append the value to the - // builder, otherwise append a null value. - if int64(t.windowBounds.Start()) <= v && v < int64(t.windowBounds.Stop()) { - t.append(builder, t.arr.Values[t.idx]) - t.idx++ - } else { - builder.AppendNull() - } - - t.windowBounds = t.window.NextBounds(t.windowBounds) - - // If the current array is non-empty and has - // been read in its entirety, call Next(). - if t.arr.Len() > 0 && t.idx == t.arr.Len() { - t.arr = t.cur.Next() - t.idx = 0 - } - - if start.Len() == storage.MaxPointsPerBlock { - break - } - } - return start.NewIntArray() -} - -func (t *floatEmptyWindowSelectorTable) stopTimes(builder *array.FloatBuilder) *array.Int { - stop := arrow.NewIntBuilder(t.alloc) - stop.Resize(storage.MaxPointsPerBlock) - - for int64(t.windowBounds.Start()) < t.rangeStop { - // The last window should stop at the end of - // the time range. - if int64(t.windowBounds.Stop()) > t.rangeStop { - stop.Append(t.rangeStop) - } else { - stop.Append(int64(t.windowBounds.Stop())) - } - - var v int64 - - if t.arr.Len() == 0 { - v = math.MaxInt64 - } else { - v = t.arr.Timestamps[t.idx] - } - - // If the current timestamp falls within the - // current window, append the value to the - // builder, otherwise append a null value. - if int64(t.windowBounds.Start()) <= v && v < int64(t.windowBounds.Stop()) { - t.append(builder, t.arr.Values[t.idx]) - t.idx++ - } else { - builder.AppendNull() - } - - t.windowBounds = t.window.NextBounds(t.windowBounds) - - // If the current array is non-empty and has - // been read in its entirety, call Next(). - if t.arr.Len() > 0 && t.idx == t.arr.Len() { - t.arr = t.cur.Next() - t.idx = 0 - } - - if stop.Len() == storage.MaxPointsPerBlock { - break - } - } - return stop.NewIntArray() -} - -func (t *floatEmptyWindowSelectorTable) startStopTimes(builder *array.FloatBuilder) (*array.Int, *array.Int, *array.Int) { - start := arrow.NewIntBuilder(t.alloc) - start.Resize(storage.MaxPointsPerBlock) - - stop := arrow.NewIntBuilder(t.alloc) - stop.Resize(storage.MaxPointsPerBlock) - - time := arrow.NewIntBuilder(t.alloc) - time.Resize(storage.MaxPointsPerBlock) - - for int64(t.windowBounds.Start()) < t.rangeStop { - - // The first window should start at the - // beginning of the time range. - if int64(t.windowBounds.Start()) < t.rangeStart { - start.Append(t.rangeStart) - } else { - start.Append(int64(t.windowBounds.Start())) - } - - // The last window should stop at the end of - // the time range. - if int64(t.windowBounds.Stop()) > t.rangeStop { - stop.Append(t.rangeStop) - } else { - stop.Append(int64(t.windowBounds.Stop())) - } - - var v int64 - - if t.arr.Len() == 0 { - v = math.MaxInt64 - } else { - v = t.arr.Timestamps[t.idx] - } - - // If the current timestamp falls within the - // current window, append the value to the - // builder, otherwise append a null value. - if int64(t.windowBounds.Start()) <= v && v < int64(t.windowBounds.Stop()) { - time.Append(v) - t.append(builder, t.arr.Values[t.idx]) - t.idx++ - } else { - time.AppendNull() - builder.AppendNull() - } - - t.windowBounds = t.window.NextBounds(t.windowBounds) - - // If the current array is non-empty and has - // been read in its entirety, call Next(). - if t.arr.Len() > 0 && t.idx == t.arr.Len() { - t.arr = t.cur.Next() - t.idx = 0 - } - - if time.Len() == storage.MaxPointsPerBlock { - break - } - } - return start.NewIntArray(), stop.NewIntArray(), time.NewIntArray() -} - -// group table - -type floatGroupTable struct { - table - mu sync.Mutex - gc storage.GroupCursor - cur cursors.FloatArrayCursor -} - -func newFloatGroupTable( - done chan struct{}, - gc storage.GroupCursor, - cur cursors.FloatArrayCursor, - bounds execute.Bounds, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *floatGroupTable { - t := &floatGroupTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - gc: gc, - cur: cur, - } - t.readTags(tags) - t.init(t.advance) - - return t -} - -func (t *floatGroupTable) Close() { - t.mu.Lock() - if t.cur != nil { - t.cur.Close() - t.cur = nil - } - if t.gc != nil { - t.gc.Close() - t.gc = nil - } - t.mu.Unlock() -} - -func (t *floatGroupTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *floatGroupTable) advance() bool { - if t.cur == nil { - // For group aggregates, we will try to get all the series and all table buffers within those series - // all at once and merge them into one row when this advance() function is first called. - // At the end of this process, t.advanceCursor() already returns false and t.cur becomes nil. - // But we still need to return true to indicate that there is data to be returned. - // The second time when we call this advance(), t.cur is already nil, so we directly return false. - return false - } - var arr *cursors.FloatArray - var len int - for { - arr = t.cur.Next() - len = arr.Len() - if len > 0 { - break - } - if !t.advanceCursor() { - return false - } - } - - // handle the group without aggregate case - if t.gc.Aggregate() == nil { - // Retrieve the buffer for the data to avoid allocating - // additional slices. If the buffer is still being used - // because the references were retained, then we will - // allocate a new buffer. - colReader := t.allocateBuffer(len) - colReader.cols[timeColIdx] = arrow.NewInt(arr.Timestamps, t.alloc) - colReader.cols[valueColIdx] = t.toArrowBuffer(arr.Values) - t.appendTags(colReader) - t.appendBounds(colReader) - return true - } - - aggregate, err := makeFloatAggregateAccumulator(t.gc.Aggregate().Type) - if err != nil { - t.err = err - return false - } - - aggregate.AccumulateFirst(arr.Timestamps, arr.Values, t.tags) - for { - arr = t.cur.Next() - if arr.Len() > 0 { - aggregate.AccumulateMore(arr.Timestamps, arr.Values, t.tags) - continue - } - - if !t.advanceCursor() { - break - } - } - timestamp, value, tags := aggregate.Result() - - colReader := t.allocateBuffer(1) - if IsSelector(t.gc.Aggregate()) { - colReader.cols[timeColIdx] = arrow.NewInt([]int64{timestamp}, t.alloc) - colReader.cols[valueColIdx] = t.toArrowBuffer([]float64{value}) - } else { - colReader.cols[valueColIdxWithoutTime] = t.toArrowBuffer([]float64{value}) - } - t.appendTheseTags(colReader, tags) - t.appendBounds(colReader) - return true -} - -type FloatAggregateAccumulator interface { - // AccumulateFirst receives an initial array of items to select from. - // It selects an item and stores the state. Afterwards, more data can - // be supplied with AccumulateMore and the results can be requested at - // any time. Without a call to AccumulateFirst the results are not - // defined. - AccumulateFirst(timestamps []int64, values []float64, tags [][]byte) - - // AccumulateMore receives additional array elements to select from. - AccumulateMore(timestamps []int64, values []float64, tags [][]byte) - - // Result returns the item selected from the data received so far. - Result() (int64, float64, [][]byte) -} - -// The selector method takes a ( timestamp, value ) pair, a -// ( []timestamp, []value ) pair, and a starting index. It applies the selector -// to the single value and the array, starting at the supplied index. It -// returns -1 if the single value is selected and a non-negative value if an -// item from the array is selected. -type floatSelectorMethod func(int64, float64, []int64, []float64, int) int - -// The selector accumulator tracks currently-selected item. -type floatSelectorAccumulator struct { - selector floatSelectorMethod - - ts int64 - v float64 - tags [][]byte -} - -func (a *floatSelectorAccumulator) AccumulateFirst(timestamps []int64, values []float64, tags [][]byte) { - index := a.selector(timestamps[0], values[0], timestamps, values, 1) - if index < 0 { - a.ts = timestamps[0] - a.v = values[0] - } else { - a.ts = timestamps[index] - a.v = values[index] - } - a.tags = make([][]byte, len(tags)) - copy(a.tags, tags) -} - -func (a *floatSelectorAccumulator) AccumulateMore(timestamps []int64, values []float64, tags [][]byte) { - index := a.selector(a.ts, a.v, timestamps, values, 0) - if index >= 0 { - a.ts = timestamps[index] - a.v = values[index] - - if len(tags) > cap(a.tags) { - a.tags = make([][]byte, len(tags)) - } else { - a.tags = a.tags[:len(tags)] - } - copy(a.tags, tags) - } -} - -func (a *floatSelectorAccumulator) Result() (int64, float64, [][]byte) { - return a.ts, a.v, a.tags -} - -// The aggregate method takes a value, an array of values, and a starting -// index, applies an aggregate operation over the value and the array, starting -// at the given index, and returns the result. -type floatAggregateMethod func(float64, []float64, int) float64 - -type floatAggregateAccumulator struct { - aggregate floatAggregateMethod - accum float64 - - // For pure aggregates it doesn't matter what we return for tags, but - // we need to satisfy the interface. We will just return the most - // recently seen tags. - tags [][]byte -} - -func (a *floatAggregateAccumulator) AccumulateFirst(timestamps []int64, values []float64, tags [][]byte) { - a.accum = a.aggregate(values[0], values, 1) - a.tags = tags -} - -func (a *floatAggregateAccumulator) AccumulateMore(timestamps []int64, values []float64, tags [][]byte) { - a.accum = a.aggregate(a.accum, values, 0) - a.tags = tags -} - -// For group aggregates (non-selectors), the timestamp is always math.MaxInt64. -// their final result does not contain _time, so this timestamp value can be -// anything and it won't matter. -func (a *floatAggregateAccumulator) Result() (int64, float64, [][]byte) { - return math.MaxInt64, a.accum, a.tags -} - -// makeFloatAggregateAccumulator returns the interface implementation for -// aggregating returned points within the same group. The incoming points are -// the ones returned for each series and the struct returned here will -// aggregate the aggregates. -func makeFloatAggregateAccumulator(agg datatypes.Aggregate_AggregateType) (FloatAggregateAccumulator, error) { - switch agg { - case datatypes.Aggregate_AggregateTypeFirst: - return &floatSelectorAccumulator{selector: selectorFirstGroupsFloat}, nil - case datatypes.Aggregate_AggregateTypeLast: - return &floatSelectorAccumulator{selector: selectorLastGroupsFloat}, nil - case datatypes.Aggregate_AggregateTypeCount: - - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unsupported for aggregate count: Float", - } - - case datatypes.Aggregate_AggregateTypeSum: - - return &floatAggregateAccumulator{aggregate: aggregateSumGroupsFloat}, nil - - case datatypes.Aggregate_AggregateTypeMin: - - return &floatSelectorAccumulator{selector: selectorMinGroupsFloat}, nil - - case datatypes.Aggregate_AggregateTypeMax: - - return &floatSelectorAccumulator{selector: selectorMaxGroupsFloat}, nil - - default: - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("unknown/unimplemented aggregate type: %v", agg), - } - } -} - -func selectorMinGroupsFloat(ts int64, v float64, timestamps []int64, values []float64, i int) int { - index := -1 - - for ; i < len(values); i++ { - if v > values[i] { - index = i - v = values[i] - } - } - - return index -} - -func selectorMaxGroupsFloat(ts int64, v float64, timestamps []int64, values []float64, i int) int { - index := -1 - - for ; i < len(values); i++ { - if v < values[i] { - index = i - v = values[i] - } - } - - return index -} - -func aggregateSumGroupsFloat(sum float64, values []float64, i int) float64 { - for ; i < len(values); i++ { - sum += values[i] - } - return sum -} - -func selectorFirstGroupsFloat(ts int64, v float64, timestamps []int64, values []float64, i int) int { - index := -1 - - for ; i < len(values); i++ { - if ts > timestamps[i] { - index = i - ts = timestamps[i] - } - } - - return index -} - -func selectorLastGroupsFloat(ts int64, v float64, timestamps []int64, values []float64, i int) int { - index := -1 - - for ; i < len(values); i++ { - if ts <= timestamps[i] { - index = i - ts = timestamps[i] - } - } - - return index -} - -func (t *floatGroupTable) advanceCursor() bool { - t.cur.Close() - t.cur = nil - for t.gc.Next() { - cur := t.gc.Cursor() - if cur == nil { - continue - } - - if typedCur, ok := cur.(cursors.FloatArrayCursor); !ok { - // TODO(sgc): error or skip? - cur.Close() - t.err = &errors.Error{ - Code: errors.EInvalid, - Err: &GroupCursorError{ - typ: "float", - cursor: cur, - }, - } - return false - } else { - t.readTags(t.gc.Tags()) - t.cur = typedCur - return true - } - } - return false -} - -func (t *floatGroupTable) Statistics() cursors.CursorStats { - if t.cur == nil { - return cursors.CursorStats{} - } - cs := t.cur.Stats() - return cursors.CursorStats{ - ScannedValues: cs.ScannedValues, - ScannedBytes: cs.ScannedBytes, - } -} - -// -// *********** Integer *********** -// - -type integerTable struct { - table - mu sync.Mutex - cur cursors.IntegerArrayCursor - alloc memory.Allocator -} - -func newIntegerTable( - done chan struct{}, - cur cursors.IntegerArrayCursor, - bounds execute.Bounds, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *integerTable { - t := &integerTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - } - t.readTags(tags) - t.init(t.advance) - - return t -} - -func (t *integerTable) Close() { - t.mu.Lock() - if t.cur != nil { - t.cur.Close() - t.cur = nil - } - t.mu.Unlock() -} - -func (t *integerTable) Statistics() cursors.CursorStats { - t.mu.Lock() - defer t.mu.Unlock() - cur := t.cur - if cur == nil { - return cursors.CursorStats{} - } - cs := cur.Stats() - return cursors.CursorStats{ - ScannedValues: cs.ScannedValues, - ScannedBytes: cs.ScannedBytes, - } -} - -func (t *integerTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *integerTable) advance() bool { - a := t.cur.Next() - l := a.Len() - if l == 0 { - return false - } - - // Retrieve the buffer for the data to avoid allocating - // additional slices. If the buffer is still being used - // because the references were retained, then we will - // allocate a new buffer. - cr := t.allocateBuffer(l) - cr.cols[timeColIdx] = arrow.NewInt(a.Timestamps, t.alloc) - cr.cols[valueColIdx] = t.toArrowBuffer(a.Values) - t.appendTags(cr) - t.appendBounds(cr) - return true -} - -// window table -type integerWindowTable struct { - integerTable - arr *cursors.IntegerArray - windowBounds interval.Bounds - idxInArr int - createEmpty bool - timeColumn string - isAggregate bool - window interval.Window - fillValue *int64 -} - -func newIntegerWindowTable( - done chan struct{}, - cur cursors.IntegerArrayCursor, - bounds execute.Bounds, - window interval.Window, - createEmpty bool, - timeColumn string, - isAggregate bool, - fillValue *int64, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *integerWindowTable { - t := &integerWindowTable{ - integerTable: integerTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - }, - window: window, - createEmpty: createEmpty, - timeColumn: timeColumn, - isAggregate: isAggregate, - fillValue: fillValue, - } - if t.createEmpty { - start := int64(bounds.Start) - t.windowBounds = window.GetLatestBounds(values.Time(start)) - } - t.readTags(tags) - t.init(t.advance) - - return t -} - -func (t *integerWindowTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -// createNextBufferTimes will read the timestamps from the array -// cursor and construct the values for the next buffer. -func (t *integerWindowTable) createNextBufferTimes() (start, stop *array.Int, ok bool) { - startB := arrow.NewIntBuilder(t.alloc) - stopB := arrow.NewIntBuilder(t.alloc) - - if t.createEmpty { - // There are no more windows when the start time is greater - // than or equal to the stop time. - if startT := int64(t.windowBounds.Start()); startT >= int64(t.bounds.Stop) { - return nil, nil, false - } - - // Create a buffer with the buffer size. - // TODO(jsternberg): Calculate the exact size with max points as the maximum. - startB.Resize(storage.MaxPointsPerBlock) - stopB.Resize(storage.MaxPointsPerBlock) - for ; ; t.windowBounds = t.window.NextBounds(t.windowBounds) { - startT, stopT := t.getWindowBoundsFor(t.windowBounds) - if startT >= int64(t.bounds.Stop) { - break - } - startB.Append(startT) - stopB.Append(stopT) - } - start = startB.NewIntArray() - stop = stopB.NewIntArray() - return start, stop, true - } - - // Retrieve the next buffer so we can copy the timestamps. - if !t.nextBuffer() { - return nil, nil, false - } - - // Copy over the timestamps from the next buffer and adjust - // times for the boundaries. - startB.Resize(len(t.arr.Timestamps)) - stopB.Resize(len(t.arr.Timestamps)) - for _, stopT := range t.arr.Timestamps { - bounds := t.window.PrevBounds(t.window.GetLatestBounds(values.Time(stopT))) - startT, stopT := t.getWindowBoundsFor(bounds) - startB.Append(startT) - stopB.Append(stopT) - } - start = startB.NewIntArray() - stop = stopB.NewIntArray() - return start, stop, true -} - -func (t *integerWindowTable) getWindowBoundsFor(bounds interval.Bounds) (int64, int64) { - beg := int64(bounds.Start()) - end := int64(bounds.Stop()) - if beg < int64(t.bounds.Start) { - beg = int64(t.bounds.Start) - } - if end > int64(t.bounds.Stop) { - end = int64(t.bounds.Stop) - } - return beg, end -} - -// nextAt will retrieve the next value that can be used with -// the given stop timestamp. If no values can be used with the timestamp, -// it will return the default value and false. -func (t *integerWindowTable) nextAt(stop int64) (v int64, ok bool) { - if !t.nextBuffer() { - return - } else if !t.isInWindow(stop, t.arr.Timestamps[t.idxInArr]) { - return - } - v, ok = t.arr.Values[t.idxInArr], true - t.idxInArr++ - return v, ok -} - -// isInWindow will check if the given time may be used within the window -// denoted by the stop timestamp. The stop may be a truncated stop time -// because of a restricted boundary. -// -// When used with an aggregate, ts will be the true stop time returned -// by storage. When used with an aggregate, it will be the real time -// for the point. -func (t *integerWindowTable) isInWindow(stop int64, ts int64) bool { - // Retrieve the boundary associated with this stop time. - // This will be the boundary for the previous nanosecond. - bounds := t.window.GetLatestBounds(values.Time(stop - 1)) - start, stop := int64(bounds.Start()), int64(bounds.Stop()) - - // For an aggregate, the timestamp will be the stop time of the boundary. - if t.isAggregate { - return start < ts && ts <= stop - } - - // For a selector, the timestamp should be within the boundary. - return start <= ts && ts < stop -} - -// nextBuffer will ensure the array cursor is filled -// and will return true if there is at least one value -// that can be read from it. -func (t *integerWindowTable) nextBuffer() bool { - // Discard the current array cursor if we have - // exceeded it. - if t.arr != nil && t.idxInArr >= t.arr.Len() { - t.arr = nil - } - - // Retrieve the next array cursor if needed. - if t.arr == nil { - arr := t.cur.Next() - if arr.Len() == 0 { - return false - } - t.arr, t.idxInArr = arr, 0 - } - return true -} - -// appendValues will scan the timestamps and append values -// that match those timestamps from the buffer. -func (t *integerWindowTable) appendValues(intervals []int64, appendValue func(v int64), appendNull func()) { - for i := 0; i < len(intervals); i++ { - if v, ok := t.nextAt(intervals[i]); ok { - appendValue(v) - continue - } - appendNull() - } -} - -func (t *integerWindowTable) advance() bool { - if !t.nextBuffer() { - return false - } - // Create the timestamps for the next window. - start, stop, ok := t.createNextBufferTimes() - if !ok { - return false - } - values := t.mergeValues(stop.Int64Values()) - - // Retrieve the buffer for the data to avoid allocating - // additional slices. If the buffer is still being used - // because the references were retained, then we will - // allocate a new buffer. - cr := t.allocateBuffer(stop.Len()) - if t.timeColumn != "" { - switch t.timeColumn { - case execute.DefaultStopColLabel: - cr.cols[timeColIdx] = stop - start.Release() - case execute.DefaultStartColLabel: - cr.cols[timeColIdx] = start - stop.Release() - } - cr.cols[valueColIdx] = values - t.appendBounds(cr) - } else { - cr.cols[startColIdx] = start - cr.cols[stopColIdx] = stop - cr.cols[valueColIdxWithoutTime] = values - } - t.appendTags(cr) - return true -} - -// This table implementation will not have any empty windows. -type integerWindowSelectorTable struct { - integerTable - timeColumn string - window interval.Window -} - -func newIntegerWindowSelectorTable( - done chan struct{}, - cur cursors.IntegerArrayCursor, - bounds execute.Bounds, - window interval.Window, - timeColumn string, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *integerWindowSelectorTable { - t := &integerWindowSelectorTable{ - integerTable: integerTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - }, - window: window, - timeColumn: timeColumn, - } - t.readTags(tags) - t.init(t.advance) - return t -} - -func (t *integerWindowSelectorTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *integerWindowSelectorTable) advance() bool { - arr := t.cur.Next() - if arr.Len() == 0 { - return false - } - - cr := t.allocateBuffer(arr.Len()) - - switch t.timeColumn { - case execute.DefaultStartColLabel: - cr.cols[timeColIdx] = t.startTimes(arr) - t.appendBounds(cr) - case execute.DefaultStopColLabel: - cr.cols[timeColIdx] = t.stopTimes(arr) - t.appendBounds(cr) - default: - cr.cols[startColIdx] = t.startTimes(arr) - cr.cols[stopColIdx] = t.stopTimes(arr) - cr.cols[timeColIdx] = arrow.NewInt(arr.Timestamps, t.alloc) - } - - cr.cols[valueColIdx] = t.toArrowBuffer(arr.Values) - t.appendTags(cr) - return true -} - -func (t *integerWindowSelectorTable) startTimes(arr *cursors.IntegerArray) *array.Int { - start := arrow.NewIntBuilder(t.alloc) - start.Resize(arr.Len()) - - rangeStart := int64(t.bounds.Start) - - for _, v := range arr.Timestamps { - if windowStart := int64(t.window.GetLatestBounds(values.Time(v)).Start()); windowStart < rangeStart { - start.Append(rangeStart) - } else { - start.Append(windowStart) - } - } - return start.NewIntArray() -} - -func (t *integerWindowSelectorTable) stopTimes(arr *cursors.IntegerArray) *array.Int { - stop := arrow.NewIntBuilder(t.alloc) - stop.Resize(arr.Len()) - - rangeStop := int64(t.bounds.Stop) - - for _, v := range arr.Timestamps { - if windowStop := int64(t.window.GetLatestBounds(values.Time(v)).Stop()); windowStop > rangeStop { - stop.Append(rangeStop) - } else { - stop.Append(windowStop) - } - } - return stop.NewIntArray() -} - -// This table implementation may contain empty windows -// in addition to non-empty windows. -type integerEmptyWindowSelectorTable struct { - integerTable - arr *cursors.IntegerArray - idx int - rangeStart int64 - rangeStop int64 - windowBounds interval.Bounds - timeColumn string - window interval.Window -} - -func newIntegerEmptyWindowSelectorTable( - done chan struct{}, - cur cursors.IntegerArrayCursor, - bounds execute.Bounds, - window interval.Window, - timeColumn string, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *integerEmptyWindowSelectorTable { - rangeStart := int64(bounds.Start) - rangeStop := int64(bounds.Stop) - t := &integerEmptyWindowSelectorTable{ - integerTable: integerTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - }, - arr: cur.Next(), - rangeStart: rangeStart, - rangeStop: rangeStop, - windowBounds: window.GetLatestBounds(values.Time(rangeStart)), - window: window, - timeColumn: timeColumn, - } - t.readTags(tags) - t.init(t.advance) - return t -} - -func (t *integerEmptyWindowSelectorTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *integerEmptyWindowSelectorTable) advance() bool { - if t.arr.Len() == 0 { - return false - } - - values := t.arrowBuilder() - values.Resize(storage.MaxPointsPerBlock) - - var cr *colReader - - switch t.timeColumn { - case execute.DefaultStartColLabel: - start := t.startTimes(values) - cr = t.allocateBuffer(start.Len()) - cr.cols[timeColIdx] = start - t.appendBounds(cr) - case execute.DefaultStopColLabel: - stop := t.stopTimes(values) - cr = t.allocateBuffer(stop.Len()) - cr.cols[timeColIdx] = stop - t.appendBounds(cr) - default: - start, stop, time := t.startStopTimes(values) - cr = t.allocateBuffer(time.Len()) - cr.cols[startColIdx] = start - cr.cols[stopColIdx] = stop - cr.cols[timeColIdx] = time - } - - cr.cols[valueColIdx] = values.NewIntArray() - t.appendTags(cr) - return true -} - -func (t *integerEmptyWindowSelectorTable) startTimes(builder *array.IntBuilder) *array.Int { - start := arrow.NewIntBuilder(t.alloc) - start.Resize(storage.MaxPointsPerBlock) - - for int64(t.windowBounds.Start()) < t.rangeStop { - // The first window should start at the - // beginning of the time range. - if int64(t.windowBounds.Start()) < t.rangeStart { - start.Append(t.rangeStart) - } else { - start.Append(int64(t.windowBounds.Start())) - } - - var v int64 - - if t.arr.Len() == 0 { - v = math.MaxInt64 - } else { - v = t.arr.Timestamps[t.idx] - } - - // If the current timestamp falls within the - // current window, append the value to the - // builder, otherwise append a null value. - if int64(t.windowBounds.Start()) <= v && v < int64(t.windowBounds.Stop()) { - t.append(builder, t.arr.Values[t.idx]) - t.idx++ - } else { - builder.AppendNull() - } - - t.windowBounds = t.window.NextBounds(t.windowBounds) - - // If the current array is non-empty and has - // been read in its entirety, call Next(). - if t.arr.Len() > 0 && t.idx == t.arr.Len() { - t.arr = t.cur.Next() - t.idx = 0 - } - - if start.Len() == storage.MaxPointsPerBlock { - break - } - } - return start.NewIntArray() -} - -func (t *integerEmptyWindowSelectorTable) stopTimes(builder *array.IntBuilder) *array.Int { - stop := arrow.NewIntBuilder(t.alloc) - stop.Resize(storage.MaxPointsPerBlock) - - for int64(t.windowBounds.Start()) < t.rangeStop { - // The last window should stop at the end of - // the time range. - if int64(t.windowBounds.Stop()) > t.rangeStop { - stop.Append(t.rangeStop) - } else { - stop.Append(int64(t.windowBounds.Stop())) - } - - var v int64 - - if t.arr.Len() == 0 { - v = math.MaxInt64 - } else { - v = t.arr.Timestamps[t.idx] - } - - // If the current timestamp falls within the - // current window, append the value to the - // builder, otherwise append a null value. - if int64(t.windowBounds.Start()) <= v && v < int64(t.windowBounds.Stop()) { - t.append(builder, t.arr.Values[t.idx]) - t.idx++ - } else { - builder.AppendNull() - } - - t.windowBounds = t.window.NextBounds(t.windowBounds) - - // If the current array is non-empty and has - // been read in its entirety, call Next(). - if t.arr.Len() > 0 && t.idx == t.arr.Len() { - t.arr = t.cur.Next() - t.idx = 0 - } - - if stop.Len() == storage.MaxPointsPerBlock { - break - } - } - return stop.NewIntArray() -} - -func (t *integerEmptyWindowSelectorTable) startStopTimes(builder *array.IntBuilder) (*array.Int, *array.Int, *array.Int) { - start := arrow.NewIntBuilder(t.alloc) - start.Resize(storage.MaxPointsPerBlock) - - stop := arrow.NewIntBuilder(t.alloc) - stop.Resize(storage.MaxPointsPerBlock) - - time := arrow.NewIntBuilder(t.alloc) - time.Resize(storage.MaxPointsPerBlock) - - for int64(t.windowBounds.Start()) < t.rangeStop { - - // The first window should start at the - // beginning of the time range. - if int64(t.windowBounds.Start()) < t.rangeStart { - start.Append(t.rangeStart) - } else { - start.Append(int64(t.windowBounds.Start())) - } - - // The last window should stop at the end of - // the time range. - if int64(t.windowBounds.Stop()) > t.rangeStop { - stop.Append(t.rangeStop) - } else { - stop.Append(int64(t.windowBounds.Stop())) - } - - var v int64 - - if t.arr.Len() == 0 { - v = math.MaxInt64 - } else { - v = t.arr.Timestamps[t.idx] - } - - // If the current timestamp falls within the - // current window, append the value to the - // builder, otherwise append a null value. - if int64(t.windowBounds.Start()) <= v && v < int64(t.windowBounds.Stop()) { - time.Append(v) - t.append(builder, t.arr.Values[t.idx]) - t.idx++ - } else { - time.AppendNull() - builder.AppendNull() - } - - t.windowBounds = t.window.NextBounds(t.windowBounds) - - // If the current array is non-empty and has - // been read in its entirety, call Next(). - if t.arr.Len() > 0 && t.idx == t.arr.Len() { - t.arr = t.cur.Next() - t.idx = 0 - } - - if time.Len() == storage.MaxPointsPerBlock { - break - } - } - return start.NewIntArray(), stop.NewIntArray(), time.NewIntArray() -} - -// group table - -type integerGroupTable struct { - table - mu sync.Mutex - gc storage.GroupCursor - cur cursors.IntegerArrayCursor -} - -func newIntegerGroupTable( - done chan struct{}, - gc storage.GroupCursor, - cur cursors.IntegerArrayCursor, - bounds execute.Bounds, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *integerGroupTable { - t := &integerGroupTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - gc: gc, - cur: cur, - } - t.readTags(tags) - t.init(t.advance) - - return t -} - -func (t *integerGroupTable) Close() { - t.mu.Lock() - if t.cur != nil { - t.cur.Close() - t.cur = nil - } - if t.gc != nil { - t.gc.Close() - t.gc = nil - } - t.mu.Unlock() -} - -func (t *integerGroupTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *integerGroupTable) advance() bool { - if t.cur == nil { - // For group aggregates, we will try to get all the series and all table buffers within those series - // all at once and merge them into one row when this advance() function is first called. - // At the end of this process, t.advanceCursor() already returns false and t.cur becomes nil. - // But we still need to return true to indicate that there is data to be returned. - // The second time when we call this advance(), t.cur is already nil, so we directly return false. - return false - } - var arr *cursors.IntegerArray - var len int - for { - arr = t.cur.Next() - len = arr.Len() - if len > 0 { - break - } - if !t.advanceCursor() { - return false - } - } - - // handle the group without aggregate case - if t.gc.Aggregate() == nil { - // Retrieve the buffer for the data to avoid allocating - // additional slices. If the buffer is still being used - // because the references were retained, then we will - // allocate a new buffer. - colReader := t.allocateBuffer(len) - colReader.cols[timeColIdx] = arrow.NewInt(arr.Timestamps, t.alloc) - colReader.cols[valueColIdx] = t.toArrowBuffer(arr.Values) - t.appendTags(colReader) - t.appendBounds(colReader) - return true - } - - aggregate, err := makeIntegerAggregateAccumulator(t.gc.Aggregate().Type) - if err != nil { - t.err = err - return false - } - - aggregate.AccumulateFirst(arr.Timestamps, arr.Values, t.tags) - for { - arr = t.cur.Next() - if arr.Len() > 0 { - aggregate.AccumulateMore(arr.Timestamps, arr.Values, t.tags) - continue - } - - if !t.advanceCursor() { - break - } - } - timestamp, value, tags := aggregate.Result() - - colReader := t.allocateBuffer(1) - if IsSelector(t.gc.Aggregate()) { - colReader.cols[timeColIdx] = arrow.NewInt([]int64{timestamp}, t.alloc) - colReader.cols[valueColIdx] = t.toArrowBuffer([]int64{value}) - } else { - colReader.cols[valueColIdxWithoutTime] = t.toArrowBuffer([]int64{value}) - } - t.appendTheseTags(colReader, tags) - t.appendBounds(colReader) - return true -} - -type IntegerAggregateAccumulator interface { - // AccumulateFirst receives an initial array of items to select from. - // It selects an item and stores the state. Afterwards, more data can - // be supplied with AccumulateMore and the results can be requested at - // any time. Without a call to AccumulateFirst the results are not - // defined. - AccumulateFirst(timestamps []int64, values []int64, tags [][]byte) - - // AccumulateMore receives additional array elements to select from. - AccumulateMore(timestamps []int64, values []int64, tags [][]byte) - - // Result returns the item selected from the data received so far. - Result() (int64, int64, [][]byte) -} - -// The selector method takes a ( timestamp, value ) pair, a -// ( []timestamp, []value ) pair, and a starting index. It applies the selector -// to the single value and the array, starting at the supplied index. It -// returns -1 if the single value is selected and a non-negative value if an -// item from the array is selected. -type integerSelectorMethod func(int64, int64, []int64, []int64, int) int - -// The selector accumulator tracks currently-selected item. -type integerSelectorAccumulator struct { - selector integerSelectorMethod - - ts int64 - v int64 - tags [][]byte -} - -func (a *integerSelectorAccumulator) AccumulateFirst(timestamps []int64, values []int64, tags [][]byte) { - index := a.selector(timestamps[0], values[0], timestamps, values, 1) - if index < 0 { - a.ts = timestamps[0] - a.v = values[0] - } else { - a.ts = timestamps[index] - a.v = values[index] - } - a.tags = make([][]byte, len(tags)) - copy(a.tags, tags) -} - -func (a *integerSelectorAccumulator) AccumulateMore(timestamps []int64, values []int64, tags [][]byte) { - index := a.selector(a.ts, a.v, timestamps, values, 0) - if index >= 0 { - a.ts = timestamps[index] - a.v = values[index] - - if len(tags) > cap(a.tags) { - a.tags = make([][]byte, len(tags)) - } else { - a.tags = a.tags[:len(tags)] - } - copy(a.tags, tags) - } -} - -func (a *integerSelectorAccumulator) Result() (int64, int64, [][]byte) { - return a.ts, a.v, a.tags -} - -// The aggregate method takes a value, an array of values, and a starting -// index, applies an aggregate operation over the value and the array, starting -// at the given index, and returns the result. -type integerAggregateMethod func(int64, []int64, int) int64 - -type integerAggregateAccumulator struct { - aggregate integerAggregateMethod - accum int64 - - // For pure aggregates it doesn't matter what we return for tags, but - // we need to satisfy the interface. We will just return the most - // recently seen tags. - tags [][]byte -} - -func (a *integerAggregateAccumulator) AccumulateFirst(timestamps []int64, values []int64, tags [][]byte) { - a.accum = a.aggregate(values[0], values, 1) - a.tags = tags -} - -func (a *integerAggregateAccumulator) AccumulateMore(timestamps []int64, values []int64, tags [][]byte) { - a.accum = a.aggregate(a.accum, values, 0) - a.tags = tags -} - -// For group aggregates (non-selectors), the timestamp is always math.MaxInt64. -// their final result does not contain _time, so this timestamp value can be -// anything and it won't matter. -func (a *integerAggregateAccumulator) Result() (int64, int64, [][]byte) { - return math.MaxInt64, a.accum, a.tags -} - -// makeIntegerAggregateAccumulator returns the interface implementation for -// aggregating returned points within the same group. The incoming points are -// the ones returned for each series and the struct returned here will -// aggregate the aggregates. -func makeIntegerAggregateAccumulator(agg datatypes.Aggregate_AggregateType) (IntegerAggregateAccumulator, error) { - switch agg { - case datatypes.Aggregate_AggregateTypeFirst: - return &integerSelectorAccumulator{selector: selectorFirstGroupsInteger}, nil - case datatypes.Aggregate_AggregateTypeLast: - return &integerSelectorAccumulator{selector: selectorLastGroupsInteger}, nil - case datatypes.Aggregate_AggregateTypeCount: - - return &integerAggregateAccumulator{aggregate: aggregateCountGroupsInteger}, nil - - case datatypes.Aggregate_AggregateTypeSum: - - return &integerAggregateAccumulator{aggregate: aggregateSumGroupsInteger}, nil - - case datatypes.Aggregate_AggregateTypeMin: - - return &integerSelectorAccumulator{selector: selectorMinGroupsInteger}, nil - - case datatypes.Aggregate_AggregateTypeMax: - - return &integerSelectorAccumulator{selector: selectorMaxGroupsInteger}, nil - - default: - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("unknown/unimplemented aggregate type: %v", agg), - } - } -} - -func selectorMinGroupsInteger(ts int64, v int64, timestamps []int64, values []int64, i int) int { - index := -1 - - for ; i < len(values); i++ { - if v > values[i] { - index = i - v = values[i] - } - } - - return index -} - -func selectorMaxGroupsInteger(ts int64, v int64, timestamps []int64, values []int64, i int) int { - index := -1 - - for ; i < len(values); i++ { - if v < values[i] { - index = i - v = values[i] - } - } - - return index -} - -func aggregateCountGroupsInteger(accum int64, values []int64, i int) int64 { - return aggregateSumGroupsInteger(accum, values, i) -} - -func aggregateSumGroupsInteger(sum int64, values []int64, i int) int64 { - for ; i < len(values); i++ { - sum += values[i] - } - return sum -} - -func selectorFirstGroupsInteger(ts int64, v int64, timestamps []int64, values []int64, i int) int { - index := -1 - - for ; i < len(values); i++ { - if ts > timestamps[i] { - index = i - ts = timestamps[i] - } - } - - return index -} - -func selectorLastGroupsInteger(ts int64, v int64, timestamps []int64, values []int64, i int) int { - index := -1 - - for ; i < len(values); i++ { - if ts <= timestamps[i] { - index = i - ts = timestamps[i] - } - } - - return index -} - -func (t *integerGroupTable) advanceCursor() bool { - t.cur.Close() - t.cur = nil - for t.gc.Next() { - cur := t.gc.Cursor() - if cur == nil { - continue - } - - if typedCur, ok := cur.(cursors.IntegerArrayCursor); !ok { - // TODO(sgc): error or skip? - cur.Close() - t.err = &errors.Error{ - Code: errors.EInvalid, - Err: &GroupCursorError{ - typ: "integer", - cursor: cur, - }, - } - return false - } else { - t.readTags(t.gc.Tags()) - t.cur = typedCur - return true - } - } - return false -} - -func (t *integerGroupTable) Statistics() cursors.CursorStats { - if t.cur == nil { - return cursors.CursorStats{} - } - cs := t.cur.Stats() - return cursors.CursorStats{ - ScannedValues: cs.ScannedValues, - ScannedBytes: cs.ScannedBytes, - } -} - -// -// *********** Unsigned *********** -// - -type unsignedTable struct { - table - mu sync.Mutex - cur cursors.UnsignedArrayCursor - alloc memory.Allocator -} - -func newUnsignedTable( - done chan struct{}, - cur cursors.UnsignedArrayCursor, - bounds execute.Bounds, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *unsignedTable { - t := &unsignedTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - } - t.readTags(tags) - t.init(t.advance) - - return t -} - -func (t *unsignedTable) Close() { - t.mu.Lock() - if t.cur != nil { - t.cur.Close() - t.cur = nil - } - t.mu.Unlock() -} - -func (t *unsignedTable) Statistics() cursors.CursorStats { - t.mu.Lock() - defer t.mu.Unlock() - cur := t.cur - if cur == nil { - return cursors.CursorStats{} - } - cs := cur.Stats() - return cursors.CursorStats{ - ScannedValues: cs.ScannedValues, - ScannedBytes: cs.ScannedBytes, - } -} - -func (t *unsignedTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *unsignedTable) advance() bool { - a := t.cur.Next() - l := a.Len() - if l == 0 { - return false - } - - // Retrieve the buffer for the data to avoid allocating - // additional slices. If the buffer is still being used - // because the references were retained, then we will - // allocate a new buffer. - cr := t.allocateBuffer(l) - cr.cols[timeColIdx] = arrow.NewInt(a.Timestamps, t.alloc) - cr.cols[valueColIdx] = t.toArrowBuffer(a.Values) - t.appendTags(cr) - t.appendBounds(cr) - return true -} - -// window table -type unsignedWindowTable struct { - unsignedTable - arr *cursors.UnsignedArray - windowBounds interval.Bounds - idxInArr int - createEmpty bool - timeColumn string - isAggregate bool - window interval.Window -} - -func newUnsignedWindowTable( - done chan struct{}, - cur cursors.UnsignedArrayCursor, - bounds execute.Bounds, - window interval.Window, - createEmpty bool, - timeColumn string, - isAggregate bool, - - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *unsignedWindowTable { - t := &unsignedWindowTable{ - unsignedTable: unsignedTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - }, - window: window, - createEmpty: createEmpty, - timeColumn: timeColumn, - isAggregate: isAggregate, - } - if t.createEmpty { - start := int64(bounds.Start) - t.windowBounds = window.GetLatestBounds(values.Time(start)) - } - t.readTags(tags) - t.init(t.advance) - - return t -} - -func (t *unsignedWindowTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -// createNextBufferTimes will read the timestamps from the array -// cursor and construct the values for the next buffer. -func (t *unsignedWindowTable) createNextBufferTimes() (start, stop *array.Int, ok bool) { - startB := arrow.NewIntBuilder(t.alloc) - stopB := arrow.NewIntBuilder(t.alloc) - - if t.createEmpty { - // There are no more windows when the start time is greater - // than or equal to the stop time. - if startT := int64(t.windowBounds.Start()); startT >= int64(t.bounds.Stop) { - return nil, nil, false - } - - // Create a buffer with the buffer size. - // TODO(jsternberg): Calculate the exact size with max points as the maximum. - startB.Resize(storage.MaxPointsPerBlock) - stopB.Resize(storage.MaxPointsPerBlock) - for ; ; t.windowBounds = t.window.NextBounds(t.windowBounds) { - startT, stopT := t.getWindowBoundsFor(t.windowBounds) - if startT >= int64(t.bounds.Stop) { - break - } - startB.Append(startT) - stopB.Append(stopT) - } - start = startB.NewIntArray() - stop = stopB.NewIntArray() - return start, stop, true - } - - // Retrieve the next buffer so we can copy the timestamps. - if !t.nextBuffer() { - return nil, nil, false - } - - // Copy over the timestamps from the next buffer and adjust - // times for the boundaries. - startB.Resize(len(t.arr.Timestamps)) - stopB.Resize(len(t.arr.Timestamps)) - for _, stopT := range t.arr.Timestamps { - bounds := t.window.PrevBounds(t.window.GetLatestBounds(values.Time(stopT))) - startT, stopT := t.getWindowBoundsFor(bounds) - startB.Append(startT) - stopB.Append(stopT) - } - start = startB.NewIntArray() - stop = stopB.NewIntArray() - return start, stop, true -} - -func (t *unsignedWindowTable) getWindowBoundsFor(bounds interval.Bounds) (int64, int64) { - beg := int64(bounds.Start()) - end := int64(bounds.Stop()) - if beg < int64(t.bounds.Start) { - beg = int64(t.bounds.Start) - } - if end > int64(t.bounds.Stop) { - end = int64(t.bounds.Stop) - } - return beg, end -} - -// nextAt will retrieve the next value that can be used with -// the given stop timestamp. If no values can be used with the timestamp, -// it will return the default value and false. -func (t *unsignedWindowTable) nextAt(stop int64) (v uint64, ok bool) { - if !t.nextBuffer() { - return - } else if !t.isInWindow(stop, t.arr.Timestamps[t.idxInArr]) { - return - } - v, ok = t.arr.Values[t.idxInArr], true - t.idxInArr++ - return v, ok -} - -// isInWindow will check if the given time may be used within the window -// denoted by the stop timestamp. The stop may be a truncated stop time -// because of a restricted boundary. -// -// When used with an aggregate, ts will be the true stop time returned -// by storage. When used with an aggregate, it will be the real time -// for the point. -func (t *unsignedWindowTable) isInWindow(stop int64, ts int64) bool { - // Retrieve the boundary associated with this stop time. - // This will be the boundary for the previous nanosecond. - bounds := t.window.GetLatestBounds(values.Time(stop - 1)) - start, stop := int64(bounds.Start()), int64(bounds.Stop()) - - // For an aggregate, the timestamp will be the stop time of the boundary. - if t.isAggregate { - return start < ts && ts <= stop - } - - // For a selector, the timestamp should be within the boundary. - return start <= ts && ts < stop -} - -// nextBuffer will ensure the array cursor is filled -// and will return true if there is at least one value -// that can be read from it. -func (t *unsignedWindowTable) nextBuffer() bool { - // Discard the current array cursor if we have - // exceeded it. - if t.arr != nil && t.idxInArr >= t.arr.Len() { - t.arr = nil - } - - // Retrieve the next array cursor if needed. - if t.arr == nil { - arr := t.cur.Next() - if arr.Len() == 0 { - return false - } - t.arr, t.idxInArr = arr, 0 - } - return true -} - -// appendValues will scan the timestamps and append values -// that match those timestamps from the buffer. -func (t *unsignedWindowTable) appendValues(intervals []int64, appendValue func(v uint64), appendNull func()) { - for i := 0; i < len(intervals); i++ { - if v, ok := t.nextAt(intervals[i]); ok { - appendValue(v) - continue - } - appendNull() - } -} - -func (t *unsignedWindowTable) advance() bool { - if !t.nextBuffer() { - return false - } - // Create the timestamps for the next window. - start, stop, ok := t.createNextBufferTimes() - if !ok { - return false - } - values := t.mergeValues(stop.Int64Values()) - - // Retrieve the buffer for the data to avoid allocating - // additional slices. If the buffer is still being used - // because the references were retained, then we will - // allocate a new buffer. - cr := t.allocateBuffer(stop.Len()) - if t.timeColumn != "" { - switch t.timeColumn { - case execute.DefaultStopColLabel: - cr.cols[timeColIdx] = stop - start.Release() - case execute.DefaultStartColLabel: - cr.cols[timeColIdx] = start - stop.Release() - } - cr.cols[valueColIdx] = values - t.appendBounds(cr) - } else { - cr.cols[startColIdx] = start - cr.cols[stopColIdx] = stop - cr.cols[valueColIdxWithoutTime] = values - } - t.appendTags(cr) - return true -} - -// This table implementation will not have any empty windows. -type unsignedWindowSelectorTable struct { - unsignedTable - timeColumn string - window interval.Window -} - -func newUnsignedWindowSelectorTable( - done chan struct{}, - cur cursors.UnsignedArrayCursor, - bounds execute.Bounds, - window interval.Window, - timeColumn string, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *unsignedWindowSelectorTable { - t := &unsignedWindowSelectorTable{ - unsignedTable: unsignedTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - }, - window: window, - timeColumn: timeColumn, - } - t.readTags(tags) - t.init(t.advance) - return t -} - -func (t *unsignedWindowSelectorTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *unsignedWindowSelectorTable) advance() bool { - arr := t.cur.Next() - if arr.Len() == 0 { - return false - } - - cr := t.allocateBuffer(arr.Len()) - - switch t.timeColumn { - case execute.DefaultStartColLabel: - cr.cols[timeColIdx] = t.startTimes(arr) - t.appendBounds(cr) - case execute.DefaultStopColLabel: - cr.cols[timeColIdx] = t.stopTimes(arr) - t.appendBounds(cr) - default: - cr.cols[startColIdx] = t.startTimes(arr) - cr.cols[stopColIdx] = t.stopTimes(arr) - cr.cols[timeColIdx] = arrow.NewInt(arr.Timestamps, t.alloc) - } - - cr.cols[valueColIdx] = t.toArrowBuffer(arr.Values) - t.appendTags(cr) - return true -} - -func (t *unsignedWindowSelectorTable) startTimes(arr *cursors.UnsignedArray) *array.Int { - start := arrow.NewIntBuilder(t.alloc) - start.Resize(arr.Len()) - - rangeStart := int64(t.bounds.Start) - - for _, v := range arr.Timestamps { - if windowStart := int64(t.window.GetLatestBounds(values.Time(v)).Start()); windowStart < rangeStart { - start.Append(rangeStart) - } else { - start.Append(windowStart) - } - } - return start.NewIntArray() -} - -func (t *unsignedWindowSelectorTable) stopTimes(arr *cursors.UnsignedArray) *array.Int { - stop := arrow.NewIntBuilder(t.alloc) - stop.Resize(arr.Len()) - - rangeStop := int64(t.bounds.Stop) - - for _, v := range arr.Timestamps { - if windowStop := int64(t.window.GetLatestBounds(values.Time(v)).Stop()); windowStop > rangeStop { - stop.Append(rangeStop) - } else { - stop.Append(windowStop) - } - } - return stop.NewIntArray() -} - -// This table implementation may contain empty windows -// in addition to non-empty windows. -type unsignedEmptyWindowSelectorTable struct { - unsignedTable - arr *cursors.UnsignedArray - idx int - rangeStart int64 - rangeStop int64 - windowBounds interval.Bounds - timeColumn string - window interval.Window -} - -func newUnsignedEmptyWindowSelectorTable( - done chan struct{}, - cur cursors.UnsignedArrayCursor, - bounds execute.Bounds, - window interval.Window, - timeColumn string, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *unsignedEmptyWindowSelectorTable { - rangeStart := int64(bounds.Start) - rangeStop := int64(bounds.Stop) - t := &unsignedEmptyWindowSelectorTable{ - unsignedTable: unsignedTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - }, - arr: cur.Next(), - rangeStart: rangeStart, - rangeStop: rangeStop, - windowBounds: window.GetLatestBounds(values.Time(rangeStart)), - window: window, - timeColumn: timeColumn, - } - t.readTags(tags) - t.init(t.advance) - return t -} - -func (t *unsignedEmptyWindowSelectorTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *unsignedEmptyWindowSelectorTable) advance() bool { - if t.arr.Len() == 0 { - return false - } - - values := t.arrowBuilder() - values.Resize(storage.MaxPointsPerBlock) - - var cr *colReader - - switch t.timeColumn { - case execute.DefaultStartColLabel: - start := t.startTimes(values) - cr = t.allocateBuffer(start.Len()) - cr.cols[timeColIdx] = start - t.appendBounds(cr) - case execute.DefaultStopColLabel: - stop := t.stopTimes(values) - cr = t.allocateBuffer(stop.Len()) - cr.cols[timeColIdx] = stop - t.appendBounds(cr) - default: - start, stop, time := t.startStopTimes(values) - cr = t.allocateBuffer(time.Len()) - cr.cols[startColIdx] = start - cr.cols[stopColIdx] = stop - cr.cols[timeColIdx] = time - } - - cr.cols[valueColIdx] = values.NewUintArray() - t.appendTags(cr) - return true -} - -func (t *unsignedEmptyWindowSelectorTable) startTimes(builder *array.UintBuilder) *array.Int { - start := arrow.NewIntBuilder(t.alloc) - start.Resize(storage.MaxPointsPerBlock) - - for int64(t.windowBounds.Start()) < t.rangeStop { - // The first window should start at the - // beginning of the time range. - if int64(t.windowBounds.Start()) < t.rangeStart { - start.Append(t.rangeStart) - } else { - start.Append(int64(t.windowBounds.Start())) - } - - var v int64 - - if t.arr.Len() == 0 { - v = math.MaxInt64 - } else { - v = t.arr.Timestamps[t.idx] - } - - // If the current timestamp falls within the - // current window, append the value to the - // builder, otherwise append a null value. - if int64(t.windowBounds.Start()) <= v && v < int64(t.windowBounds.Stop()) { - t.append(builder, t.arr.Values[t.idx]) - t.idx++ - } else { - builder.AppendNull() - } - - t.windowBounds = t.window.NextBounds(t.windowBounds) - - // If the current array is non-empty and has - // been read in its entirety, call Next(). - if t.arr.Len() > 0 && t.idx == t.arr.Len() { - t.arr = t.cur.Next() - t.idx = 0 - } - - if start.Len() == storage.MaxPointsPerBlock { - break - } - } - return start.NewIntArray() -} - -func (t *unsignedEmptyWindowSelectorTable) stopTimes(builder *array.UintBuilder) *array.Int { - stop := arrow.NewIntBuilder(t.alloc) - stop.Resize(storage.MaxPointsPerBlock) - - for int64(t.windowBounds.Start()) < t.rangeStop { - // The last window should stop at the end of - // the time range. - if int64(t.windowBounds.Stop()) > t.rangeStop { - stop.Append(t.rangeStop) - } else { - stop.Append(int64(t.windowBounds.Stop())) - } - - var v int64 - - if t.arr.Len() == 0 { - v = math.MaxInt64 - } else { - v = t.arr.Timestamps[t.idx] - } - - // If the current timestamp falls within the - // current window, append the value to the - // builder, otherwise append a null value. - if int64(t.windowBounds.Start()) <= v && v < int64(t.windowBounds.Stop()) { - t.append(builder, t.arr.Values[t.idx]) - t.idx++ - } else { - builder.AppendNull() - } - - t.windowBounds = t.window.NextBounds(t.windowBounds) - - // If the current array is non-empty and has - // been read in its entirety, call Next(). - if t.arr.Len() > 0 && t.idx == t.arr.Len() { - t.arr = t.cur.Next() - t.idx = 0 - } - - if stop.Len() == storage.MaxPointsPerBlock { - break - } - } - return stop.NewIntArray() -} - -func (t *unsignedEmptyWindowSelectorTable) startStopTimes(builder *array.UintBuilder) (*array.Int, *array.Int, *array.Int) { - start := arrow.NewIntBuilder(t.alloc) - start.Resize(storage.MaxPointsPerBlock) - - stop := arrow.NewIntBuilder(t.alloc) - stop.Resize(storage.MaxPointsPerBlock) - - time := arrow.NewIntBuilder(t.alloc) - time.Resize(storage.MaxPointsPerBlock) - - for int64(t.windowBounds.Start()) < t.rangeStop { - - // The first window should start at the - // beginning of the time range. - if int64(t.windowBounds.Start()) < t.rangeStart { - start.Append(t.rangeStart) - } else { - start.Append(int64(t.windowBounds.Start())) - } - - // The last window should stop at the end of - // the time range. - if int64(t.windowBounds.Stop()) > t.rangeStop { - stop.Append(t.rangeStop) - } else { - stop.Append(int64(t.windowBounds.Stop())) - } - - var v int64 - - if t.arr.Len() == 0 { - v = math.MaxInt64 - } else { - v = t.arr.Timestamps[t.idx] - } - - // If the current timestamp falls within the - // current window, append the value to the - // builder, otherwise append a null value. - if int64(t.windowBounds.Start()) <= v && v < int64(t.windowBounds.Stop()) { - time.Append(v) - t.append(builder, t.arr.Values[t.idx]) - t.idx++ - } else { - time.AppendNull() - builder.AppendNull() - } - - t.windowBounds = t.window.NextBounds(t.windowBounds) - - // If the current array is non-empty and has - // been read in its entirety, call Next(). - if t.arr.Len() > 0 && t.idx == t.arr.Len() { - t.arr = t.cur.Next() - t.idx = 0 - } - - if time.Len() == storage.MaxPointsPerBlock { - break - } - } - return start.NewIntArray(), stop.NewIntArray(), time.NewIntArray() -} - -// group table - -type unsignedGroupTable struct { - table - mu sync.Mutex - gc storage.GroupCursor - cur cursors.UnsignedArrayCursor -} - -func newUnsignedGroupTable( - done chan struct{}, - gc storage.GroupCursor, - cur cursors.UnsignedArrayCursor, - bounds execute.Bounds, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *unsignedGroupTable { - t := &unsignedGroupTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - gc: gc, - cur: cur, - } - t.readTags(tags) - t.init(t.advance) - - return t -} - -func (t *unsignedGroupTable) Close() { - t.mu.Lock() - if t.cur != nil { - t.cur.Close() - t.cur = nil - } - if t.gc != nil { - t.gc.Close() - t.gc = nil - } - t.mu.Unlock() -} - -func (t *unsignedGroupTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *unsignedGroupTable) advance() bool { - if t.cur == nil { - // For group aggregates, we will try to get all the series and all table buffers within those series - // all at once and merge them into one row when this advance() function is first called. - // At the end of this process, t.advanceCursor() already returns false and t.cur becomes nil. - // But we still need to return true to indicate that there is data to be returned. - // The second time when we call this advance(), t.cur is already nil, so we directly return false. - return false - } - var arr *cursors.UnsignedArray - var len int - for { - arr = t.cur.Next() - len = arr.Len() - if len > 0 { - break - } - if !t.advanceCursor() { - return false - } - } - - // handle the group without aggregate case - if t.gc.Aggregate() == nil { - // Retrieve the buffer for the data to avoid allocating - // additional slices. If the buffer is still being used - // because the references were retained, then we will - // allocate a new buffer. - colReader := t.allocateBuffer(len) - colReader.cols[timeColIdx] = arrow.NewInt(arr.Timestamps, t.alloc) - colReader.cols[valueColIdx] = t.toArrowBuffer(arr.Values) - t.appendTags(colReader) - t.appendBounds(colReader) - return true - } - - aggregate, err := makeUnsignedAggregateAccumulator(t.gc.Aggregate().Type) - if err != nil { - t.err = err - return false - } - - aggregate.AccumulateFirst(arr.Timestamps, arr.Values, t.tags) - for { - arr = t.cur.Next() - if arr.Len() > 0 { - aggregate.AccumulateMore(arr.Timestamps, arr.Values, t.tags) - continue - } - - if !t.advanceCursor() { - break - } - } - timestamp, value, tags := aggregate.Result() - - colReader := t.allocateBuffer(1) - if IsSelector(t.gc.Aggregate()) { - colReader.cols[timeColIdx] = arrow.NewInt([]int64{timestamp}, t.alloc) - colReader.cols[valueColIdx] = t.toArrowBuffer([]uint64{value}) - } else { - colReader.cols[valueColIdxWithoutTime] = t.toArrowBuffer([]uint64{value}) - } - t.appendTheseTags(colReader, tags) - t.appendBounds(colReader) - return true -} - -type UnsignedAggregateAccumulator interface { - // AccumulateFirst receives an initial array of items to select from. - // It selects an item and stores the state. Afterwards, more data can - // be supplied with AccumulateMore and the results can be requested at - // any time. Without a call to AccumulateFirst the results are not - // defined. - AccumulateFirst(timestamps []int64, values []uint64, tags [][]byte) - - // AccumulateMore receives additional array elements to select from. - AccumulateMore(timestamps []int64, values []uint64, tags [][]byte) - - // Result returns the item selected from the data received so far. - Result() (int64, uint64, [][]byte) -} - -// The selector method takes a ( timestamp, value ) pair, a -// ( []timestamp, []value ) pair, and a starting index. It applies the selector -// to the single value and the array, starting at the supplied index. It -// returns -1 if the single value is selected and a non-negative value if an -// item from the array is selected. -type unsignedSelectorMethod func(int64, uint64, []int64, []uint64, int) int - -// The selector accumulator tracks currently-selected item. -type unsignedSelectorAccumulator struct { - selector unsignedSelectorMethod - - ts int64 - v uint64 - tags [][]byte -} - -func (a *unsignedSelectorAccumulator) AccumulateFirst(timestamps []int64, values []uint64, tags [][]byte) { - index := a.selector(timestamps[0], values[0], timestamps, values, 1) - if index < 0 { - a.ts = timestamps[0] - a.v = values[0] - } else { - a.ts = timestamps[index] - a.v = values[index] - } - a.tags = make([][]byte, len(tags)) - copy(a.tags, tags) -} - -func (a *unsignedSelectorAccumulator) AccumulateMore(timestamps []int64, values []uint64, tags [][]byte) { - index := a.selector(a.ts, a.v, timestamps, values, 0) - if index >= 0 { - a.ts = timestamps[index] - a.v = values[index] - - if len(tags) > cap(a.tags) { - a.tags = make([][]byte, len(tags)) - } else { - a.tags = a.tags[:len(tags)] - } - copy(a.tags, tags) - } -} - -func (a *unsignedSelectorAccumulator) Result() (int64, uint64, [][]byte) { - return a.ts, a.v, a.tags -} - -// The aggregate method takes a value, an array of values, and a starting -// index, applies an aggregate operation over the value and the array, starting -// at the given index, and returns the result. -type unsignedAggregateMethod func(uint64, []uint64, int) uint64 - -type unsignedAggregateAccumulator struct { - aggregate unsignedAggregateMethod - accum uint64 - - // For pure aggregates it doesn't matter what we return for tags, but - // we need to satisfy the interface. We will just return the most - // recently seen tags. - tags [][]byte -} - -func (a *unsignedAggregateAccumulator) AccumulateFirst(timestamps []int64, values []uint64, tags [][]byte) { - a.accum = a.aggregate(values[0], values, 1) - a.tags = tags -} - -func (a *unsignedAggregateAccumulator) AccumulateMore(timestamps []int64, values []uint64, tags [][]byte) { - a.accum = a.aggregate(a.accum, values, 0) - a.tags = tags -} - -// For group aggregates (non-selectors), the timestamp is always math.MaxInt64. -// their final result does not contain _time, so this timestamp value can be -// anything and it won't matter. -func (a *unsignedAggregateAccumulator) Result() (int64, uint64, [][]byte) { - return math.MaxInt64, a.accum, a.tags -} - -// makeUnsignedAggregateAccumulator returns the interface implementation for -// aggregating returned points within the same group. The incoming points are -// the ones returned for each series and the struct returned here will -// aggregate the aggregates. -func makeUnsignedAggregateAccumulator(agg datatypes.Aggregate_AggregateType) (UnsignedAggregateAccumulator, error) { - switch agg { - case datatypes.Aggregate_AggregateTypeFirst: - return &unsignedSelectorAccumulator{selector: selectorFirstGroupsUnsigned}, nil - case datatypes.Aggregate_AggregateTypeLast: - return &unsignedSelectorAccumulator{selector: selectorLastGroupsUnsigned}, nil - case datatypes.Aggregate_AggregateTypeCount: - - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unsupported for aggregate count: Unsigned", - } - - case datatypes.Aggregate_AggregateTypeSum: - - return &unsignedAggregateAccumulator{aggregate: aggregateSumGroupsUnsigned}, nil - - case datatypes.Aggregate_AggregateTypeMin: - - return &unsignedSelectorAccumulator{selector: selectorMinGroupsUnsigned}, nil - - case datatypes.Aggregate_AggregateTypeMax: - - return &unsignedSelectorAccumulator{selector: selectorMaxGroupsUnsigned}, nil - - default: - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("unknown/unimplemented aggregate type: %v", agg), - } - } -} - -func selectorMinGroupsUnsigned(ts int64, v uint64, timestamps []int64, values []uint64, i int) int { - index := -1 - - for ; i < len(values); i++ { - if v > values[i] { - index = i - v = values[i] - } - } - - return index -} - -func selectorMaxGroupsUnsigned(ts int64, v uint64, timestamps []int64, values []uint64, i int) int { - index := -1 - - for ; i < len(values); i++ { - if v < values[i] { - index = i - v = values[i] - } - } - - return index -} - -func aggregateSumGroupsUnsigned(sum uint64, values []uint64, i int) uint64 { - for ; i < len(values); i++ { - sum += values[i] - } - return sum -} - -func selectorFirstGroupsUnsigned(ts int64, v uint64, timestamps []int64, values []uint64, i int) int { - index := -1 - - for ; i < len(values); i++ { - if ts > timestamps[i] { - index = i - ts = timestamps[i] - } - } - - return index -} - -func selectorLastGroupsUnsigned(ts int64, v uint64, timestamps []int64, values []uint64, i int) int { - index := -1 - - for ; i < len(values); i++ { - if ts <= timestamps[i] { - index = i - ts = timestamps[i] - } - } - - return index -} - -func (t *unsignedGroupTable) advanceCursor() bool { - t.cur.Close() - t.cur = nil - for t.gc.Next() { - cur := t.gc.Cursor() - if cur == nil { - continue - } - - if typedCur, ok := cur.(cursors.UnsignedArrayCursor); !ok { - // TODO(sgc): error or skip? - cur.Close() - t.err = &errors.Error{ - Code: errors.EInvalid, - Err: &GroupCursorError{ - typ: "unsigned", - cursor: cur, - }, - } - return false - } else { - t.readTags(t.gc.Tags()) - t.cur = typedCur - return true - } - } - return false -} - -func (t *unsignedGroupTable) Statistics() cursors.CursorStats { - if t.cur == nil { - return cursors.CursorStats{} - } - cs := t.cur.Stats() - return cursors.CursorStats{ - ScannedValues: cs.ScannedValues, - ScannedBytes: cs.ScannedBytes, - } -} - -// -// *********** String *********** -// - -type stringTable struct { - table - mu sync.Mutex - cur cursors.StringArrayCursor - alloc memory.Allocator -} - -func newStringTable( - done chan struct{}, - cur cursors.StringArrayCursor, - bounds execute.Bounds, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *stringTable { - t := &stringTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - } - t.readTags(tags) - t.init(t.advance) - - return t -} - -func (t *stringTable) Close() { - t.mu.Lock() - if t.cur != nil { - t.cur.Close() - t.cur = nil - } - t.mu.Unlock() -} - -func (t *stringTable) Statistics() cursors.CursorStats { - t.mu.Lock() - defer t.mu.Unlock() - cur := t.cur - if cur == nil { - return cursors.CursorStats{} - } - cs := cur.Stats() - return cursors.CursorStats{ - ScannedValues: cs.ScannedValues, - ScannedBytes: cs.ScannedBytes, - } -} - -func (t *stringTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *stringTable) advance() bool { - a := t.cur.Next() - l := a.Len() - if l == 0 { - return false - } - - // Retrieve the buffer for the data to avoid allocating - // additional slices. If the buffer is still being used - // because the references were retained, then we will - // allocate a new buffer. - cr := t.allocateBuffer(l) - cr.cols[timeColIdx] = arrow.NewInt(a.Timestamps, t.alloc) - cr.cols[valueColIdx] = t.toArrowBuffer(a.Values) - t.appendTags(cr) - t.appendBounds(cr) - return true -} - -// window table -type stringWindowTable struct { - stringTable - arr *cursors.StringArray - windowBounds interval.Bounds - idxInArr int - createEmpty bool - timeColumn string - isAggregate bool - window interval.Window -} - -func newStringWindowTable( - done chan struct{}, - cur cursors.StringArrayCursor, - bounds execute.Bounds, - window interval.Window, - createEmpty bool, - timeColumn string, - isAggregate bool, - - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *stringWindowTable { - t := &stringWindowTable{ - stringTable: stringTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - }, - window: window, - createEmpty: createEmpty, - timeColumn: timeColumn, - isAggregate: isAggregate, - } - if t.createEmpty { - start := int64(bounds.Start) - t.windowBounds = window.GetLatestBounds(values.Time(start)) - } - t.readTags(tags) - t.init(t.advance) - - return t -} - -func (t *stringWindowTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -// createNextBufferTimes will read the timestamps from the array -// cursor and construct the values for the next buffer. -func (t *stringWindowTable) createNextBufferTimes() (start, stop *array.Int, ok bool) { - startB := arrow.NewIntBuilder(t.alloc) - stopB := arrow.NewIntBuilder(t.alloc) - - if t.createEmpty { - // There are no more windows when the start time is greater - // than or equal to the stop time. - if startT := int64(t.windowBounds.Start()); startT >= int64(t.bounds.Stop) { - return nil, nil, false - } - - // Create a buffer with the buffer size. - // TODO(jsternberg): Calculate the exact size with max points as the maximum. - startB.Resize(storage.MaxPointsPerBlock) - stopB.Resize(storage.MaxPointsPerBlock) - for ; ; t.windowBounds = t.window.NextBounds(t.windowBounds) { - startT, stopT := t.getWindowBoundsFor(t.windowBounds) - if startT >= int64(t.bounds.Stop) { - break - } - startB.Append(startT) - stopB.Append(stopT) - } - start = startB.NewIntArray() - stop = stopB.NewIntArray() - return start, stop, true - } - - // Retrieve the next buffer so we can copy the timestamps. - if !t.nextBuffer() { - return nil, nil, false - } - - // Copy over the timestamps from the next buffer and adjust - // times for the boundaries. - startB.Resize(len(t.arr.Timestamps)) - stopB.Resize(len(t.arr.Timestamps)) - for _, stopT := range t.arr.Timestamps { - bounds := t.window.PrevBounds(t.window.GetLatestBounds(values.Time(stopT))) - startT, stopT := t.getWindowBoundsFor(bounds) - startB.Append(startT) - stopB.Append(stopT) - } - start = startB.NewIntArray() - stop = stopB.NewIntArray() - return start, stop, true -} - -func (t *stringWindowTable) getWindowBoundsFor(bounds interval.Bounds) (int64, int64) { - beg := int64(bounds.Start()) - end := int64(bounds.Stop()) - if beg < int64(t.bounds.Start) { - beg = int64(t.bounds.Start) - } - if end > int64(t.bounds.Stop) { - end = int64(t.bounds.Stop) - } - return beg, end -} - -// nextAt will retrieve the next value that can be used with -// the given stop timestamp. If no values can be used with the timestamp, -// it will return the default value and false. -func (t *stringWindowTable) nextAt(stop int64) (v string, ok bool) { - if !t.nextBuffer() { - return - } else if !t.isInWindow(stop, t.arr.Timestamps[t.idxInArr]) { - return - } - v, ok = t.arr.Values[t.idxInArr], true - t.idxInArr++ - return v, ok -} - -// isInWindow will check if the given time may be used within the window -// denoted by the stop timestamp. The stop may be a truncated stop time -// because of a restricted boundary. -// -// When used with an aggregate, ts will be the true stop time returned -// by storage. When used with an aggregate, it will be the real time -// for the point. -func (t *stringWindowTable) isInWindow(stop int64, ts int64) bool { - // Retrieve the boundary associated with this stop time. - // This will be the boundary for the previous nanosecond. - bounds := t.window.GetLatestBounds(values.Time(stop - 1)) - start, stop := int64(bounds.Start()), int64(bounds.Stop()) - - // For an aggregate, the timestamp will be the stop time of the boundary. - if t.isAggregate { - return start < ts && ts <= stop - } - - // For a selector, the timestamp should be within the boundary. - return start <= ts && ts < stop -} - -// nextBuffer will ensure the array cursor is filled -// and will return true if there is at least one value -// that can be read from it. -func (t *stringWindowTable) nextBuffer() bool { - // Discard the current array cursor if we have - // exceeded it. - if t.arr != nil && t.idxInArr >= t.arr.Len() { - t.arr = nil - } - - // Retrieve the next array cursor if needed. - if t.arr == nil { - arr := t.cur.Next() - if arr.Len() == 0 { - return false - } - t.arr, t.idxInArr = arr, 0 - } - return true -} - -// appendValues will scan the timestamps and append values -// that match those timestamps from the buffer. -func (t *stringWindowTable) appendValues(intervals []int64, appendValue func(v string), appendNull func()) { - for i := 0; i < len(intervals); i++ { - if v, ok := t.nextAt(intervals[i]); ok { - appendValue(v) - continue - } - appendNull() - } -} - -func (t *stringWindowTable) advance() bool { - if !t.nextBuffer() { - return false - } - // Create the timestamps for the next window. - start, stop, ok := t.createNextBufferTimes() - if !ok { - return false - } - values := t.mergeValues(stop.Int64Values()) - - // Retrieve the buffer for the data to avoid allocating - // additional slices. If the buffer is still being used - // because the references were retained, then we will - // allocate a new buffer. - cr := t.allocateBuffer(stop.Len()) - if t.timeColumn != "" { - switch t.timeColumn { - case execute.DefaultStopColLabel: - cr.cols[timeColIdx] = stop - start.Release() - case execute.DefaultStartColLabel: - cr.cols[timeColIdx] = start - stop.Release() - } - cr.cols[valueColIdx] = values - t.appendBounds(cr) - } else { - cr.cols[startColIdx] = start - cr.cols[stopColIdx] = stop - cr.cols[valueColIdxWithoutTime] = values - } - t.appendTags(cr) - return true -} - -// This table implementation will not have any empty windows. -type stringWindowSelectorTable struct { - stringTable - timeColumn string - window interval.Window -} - -func newStringWindowSelectorTable( - done chan struct{}, - cur cursors.StringArrayCursor, - bounds execute.Bounds, - window interval.Window, - timeColumn string, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *stringWindowSelectorTable { - t := &stringWindowSelectorTable{ - stringTable: stringTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - }, - window: window, - timeColumn: timeColumn, - } - t.readTags(tags) - t.init(t.advance) - return t -} - -func (t *stringWindowSelectorTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *stringWindowSelectorTable) advance() bool { - arr := t.cur.Next() - if arr.Len() == 0 { - return false - } - - cr := t.allocateBuffer(arr.Len()) - - switch t.timeColumn { - case execute.DefaultStartColLabel: - cr.cols[timeColIdx] = t.startTimes(arr) - t.appendBounds(cr) - case execute.DefaultStopColLabel: - cr.cols[timeColIdx] = t.stopTimes(arr) - t.appendBounds(cr) - default: - cr.cols[startColIdx] = t.startTimes(arr) - cr.cols[stopColIdx] = t.stopTimes(arr) - cr.cols[timeColIdx] = arrow.NewInt(arr.Timestamps, t.alloc) - } - - cr.cols[valueColIdx] = t.toArrowBuffer(arr.Values) - t.appendTags(cr) - return true -} - -func (t *stringWindowSelectorTable) startTimes(arr *cursors.StringArray) *array.Int { - start := arrow.NewIntBuilder(t.alloc) - start.Resize(arr.Len()) - - rangeStart := int64(t.bounds.Start) - - for _, v := range arr.Timestamps { - if windowStart := int64(t.window.GetLatestBounds(values.Time(v)).Start()); windowStart < rangeStart { - start.Append(rangeStart) - } else { - start.Append(windowStart) - } - } - return start.NewIntArray() -} - -func (t *stringWindowSelectorTable) stopTimes(arr *cursors.StringArray) *array.Int { - stop := arrow.NewIntBuilder(t.alloc) - stop.Resize(arr.Len()) - - rangeStop := int64(t.bounds.Stop) - - for _, v := range arr.Timestamps { - if windowStop := int64(t.window.GetLatestBounds(values.Time(v)).Stop()); windowStop > rangeStop { - stop.Append(rangeStop) - } else { - stop.Append(windowStop) - } - } - return stop.NewIntArray() -} - -// This table implementation may contain empty windows -// in addition to non-empty windows. -type stringEmptyWindowSelectorTable struct { - stringTable - arr *cursors.StringArray - idx int - rangeStart int64 - rangeStop int64 - windowBounds interval.Bounds - timeColumn string - window interval.Window -} - -func newStringEmptyWindowSelectorTable( - done chan struct{}, - cur cursors.StringArrayCursor, - bounds execute.Bounds, - window interval.Window, - timeColumn string, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *stringEmptyWindowSelectorTable { - rangeStart := int64(bounds.Start) - rangeStop := int64(bounds.Stop) - t := &stringEmptyWindowSelectorTable{ - stringTable: stringTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - }, - arr: cur.Next(), - rangeStart: rangeStart, - rangeStop: rangeStop, - windowBounds: window.GetLatestBounds(values.Time(rangeStart)), - window: window, - timeColumn: timeColumn, - } - t.readTags(tags) - t.init(t.advance) - return t -} - -func (t *stringEmptyWindowSelectorTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *stringEmptyWindowSelectorTable) advance() bool { - if t.arr.Len() == 0 { - return false - } - - values := t.arrowBuilder() - values.Resize(storage.MaxPointsPerBlock) - - var cr *colReader - - switch t.timeColumn { - case execute.DefaultStartColLabel: - start := t.startTimes(values) - cr = t.allocateBuffer(start.Len()) - cr.cols[timeColIdx] = start - t.appendBounds(cr) - case execute.DefaultStopColLabel: - stop := t.stopTimes(values) - cr = t.allocateBuffer(stop.Len()) - cr.cols[timeColIdx] = stop - t.appendBounds(cr) - default: - start, stop, time := t.startStopTimes(values) - cr = t.allocateBuffer(time.Len()) - cr.cols[startColIdx] = start - cr.cols[stopColIdx] = stop - cr.cols[timeColIdx] = time - } - - cr.cols[valueColIdx] = values.NewStringArray() - t.appendTags(cr) - return true -} - -func (t *stringEmptyWindowSelectorTable) startTimes(builder *array.StringBuilder) *array.Int { - start := arrow.NewIntBuilder(t.alloc) - start.Resize(storage.MaxPointsPerBlock) - - for int64(t.windowBounds.Start()) < t.rangeStop { - // The first window should start at the - // beginning of the time range. - if int64(t.windowBounds.Start()) < t.rangeStart { - start.Append(t.rangeStart) - } else { - start.Append(int64(t.windowBounds.Start())) - } - - var v int64 - - if t.arr.Len() == 0 { - v = math.MaxInt64 - } else { - v = t.arr.Timestamps[t.idx] - } - - // If the current timestamp falls within the - // current window, append the value to the - // builder, otherwise append a null value. - if int64(t.windowBounds.Start()) <= v && v < int64(t.windowBounds.Stop()) { - t.append(builder, t.arr.Values[t.idx]) - t.idx++ - } else { - builder.AppendNull() - } - - t.windowBounds = t.window.NextBounds(t.windowBounds) - - // If the current array is non-empty and has - // been read in its entirety, call Next(). - if t.arr.Len() > 0 && t.idx == t.arr.Len() { - t.arr = t.cur.Next() - t.idx = 0 - } - - if start.Len() == storage.MaxPointsPerBlock { - break - } - } - return start.NewIntArray() -} - -func (t *stringEmptyWindowSelectorTable) stopTimes(builder *array.StringBuilder) *array.Int { - stop := arrow.NewIntBuilder(t.alloc) - stop.Resize(storage.MaxPointsPerBlock) - - for int64(t.windowBounds.Start()) < t.rangeStop { - // The last window should stop at the end of - // the time range. - if int64(t.windowBounds.Stop()) > t.rangeStop { - stop.Append(t.rangeStop) - } else { - stop.Append(int64(t.windowBounds.Stop())) - } - - var v int64 - - if t.arr.Len() == 0 { - v = math.MaxInt64 - } else { - v = t.arr.Timestamps[t.idx] - } - - // If the current timestamp falls within the - // current window, append the value to the - // builder, otherwise append a null value. - if int64(t.windowBounds.Start()) <= v && v < int64(t.windowBounds.Stop()) { - t.append(builder, t.arr.Values[t.idx]) - t.idx++ - } else { - builder.AppendNull() - } - - t.windowBounds = t.window.NextBounds(t.windowBounds) - - // If the current array is non-empty and has - // been read in its entirety, call Next(). - if t.arr.Len() > 0 && t.idx == t.arr.Len() { - t.arr = t.cur.Next() - t.idx = 0 - } - - if stop.Len() == storage.MaxPointsPerBlock { - break - } - } - return stop.NewIntArray() -} - -func (t *stringEmptyWindowSelectorTable) startStopTimes(builder *array.StringBuilder) (*array.Int, *array.Int, *array.Int) { - start := arrow.NewIntBuilder(t.alloc) - start.Resize(storage.MaxPointsPerBlock) - - stop := arrow.NewIntBuilder(t.alloc) - stop.Resize(storage.MaxPointsPerBlock) - - time := arrow.NewIntBuilder(t.alloc) - time.Resize(storage.MaxPointsPerBlock) - - for int64(t.windowBounds.Start()) < t.rangeStop { - - // The first window should start at the - // beginning of the time range. - if int64(t.windowBounds.Start()) < t.rangeStart { - start.Append(t.rangeStart) - } else { - start.Append(int64(t.windowBounds.Start())) - } - - // The last window should stop at the end of - // the time range. - if int64(t.windowBounds.Stop()) > t.rangeStop { - stop.Append(t.rangeStop) - } else { - stop.Append(int64(t.windowBounds.Stop())) - } - - var v int64 - - if t.arr.Len() == 0 { - v = math.MaxInt64 - } else { - v = t.arr.Timestamps[t.idx] - } - - // If the current timestamp falls within the - // current window, append the value to the - // builder, otherwise append a null value. - if int64(t.windowBounds.Start()) <= v && v < int64(t.windowBounds.Stop()) { - time.Append(v) - t.append(builder, t.arr.Values[t.idx]) - t.idx++ - } else { - time.AppendNull() - builder.AppendNull() - } - - t.windowBounds = t.window.NextBounds(t.windowBounds) - - // If the current array is non-empty and has - // been read in its entirety, call Next(). - if t.arr.Len() > 0 && t.idx == t.arr.Len() { - t.arr = t.cur.Next() - t.idx = 0 - } - - if time.Len() == storage.MaxPointsPerBlock { - break - } - } - return start.NewIntArray(), stop.NewIntArray(), time.NewIntArray() -} - -// group table - -type stringGroupTable struct { - table - mu sync.Mutex - gc storage.GroupCursor - cur cursors.StringArrayCursor -} - -func newStringGroupTable( - done chan struct{}, - gc storage.GroupCursor, - cur cursors.StringArrayCursor, - bounds execute.Bounds, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *stringGroupTable { - t := &stringGroupTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - gc: gc, - cur: cur, - } - t.readTags(tags) - t.init(t.advance) - - return t -} - -func (t *stringGroupTable) Close() { - t.mu.Lock() - if t.cur != nil { - t.cur.Close() - t.cur = nil - } - if t.gc != nil { - t.gc.Close() - t.gc = nil - } - t.mu.Unlock() -} - -func (t *stringGroupTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *stringGroupTable) advance() bool { - if t.cur == nil { - // For group aggregates, we will try to get all the series and all table buffers within those series - // all at once and merge them into one row when this advance() function is first called. - // At the end of this process, t.advanceCursor() already returns false and t.cur becomes nil. - // But we still need to return true to indicate that there is data to be returned. - // The second time when we call this advance(), t.cur is already nil, so we directly return false. - return false - } - var arr *cursors.StringArray - var len int - for { - arr = t.cur.Next() - len = arr.Len() - if len > 0 { - break - } - if !t.advanceCursor() { - return false - } - } - - // handle the group without aggregate case - if t.gc.Aggregate() == nil { - // Retrieve the buffer for the data to avoid allocating - // additional slices. If the buffer is still being used - // because the references were retained, then we will - // allocate a new buffer. - colReader := t.allocateBuffer(len) - colReader.cols[timeColIdx] = arrow.NewInt(arr.Timestamps, t.alloc) - colReader.cols[valueColIdx] = t.toArrowBuffer(arr.Values) - t.appendTags(colReader) - t.appendBounds(colReader) - return true - } - - aggregate, err := makeStringAggregateAccumulator(t.gc.Aggregate().Type) - if err != nil { - t.err = err - return false - } - - aggregate.AccumulateFirst(arr.Timestamps, arr.Values, t.tags) - for { - arr = t.cur.Next() - if arr.Len() > 0 { - aggregate.AccumulateMore(arr.Timestamps, arr.Values, t.tags) - continue - } - - if !t.advanceCursor() { - break - } - } - timestamp, value, tags := aggregate.Result() - - colReader := t.allocateBuffer(1) - if IsSelector(t.gc.Aggregate()) { - colReader.cols[timeColIdx] = arrow.NewInt([]int64{timestamp}, t.alloc) - colReader.cols[valueColIdx] = t.toArrowBuffer([]string{value}) - } else { - colReader.cols[valueColIdxWithoutTime] = t.toArrowBuffer([]string{value}) - } - t.appendTheseTags(colReader, tags) - t.appendBounds(colReader) - return true -} - -type StringAggregateAccumulator interface { - // AccumulateFirst receives an initial array of items to select from. - // It selects an item and stores the state. Afterwards, more data can - // be supplied with AccumulateMore and the results can be requested at - // any time. Without a call to AccumulateFirst the results are not - // defined. - AccumulateFirst(timestamps []int64, values []string, tags [][]byte) - - // AccumulateMore receives additional array elements to select from. - AccumulateMore(timestamps []int64, values []string, tags [][]byte) - - // Result returns the item selected from the data received so far. - Result() (int64, string, [][]byte) -} - -// The selector method takes a ( timestamp, value ) pair, a -// ( []timestamp, []value ) pair, and a starting index. It applies the selector -// to the single value and the array, starting at the supplied index. It -// returns -1 if the single value is selected and a non-negative value if an -// item from the array is selected. -type stringSelectorMethod func(int64, string, []int64, []string, int) int - -// The selector accumulator tracks currently-selected item. -type stringSelectorAccumulator struct { - selector stringSelectorMethod - - ts int64 - v string - tags [][]byte -} - -func (a *stringSelectorAccumulator) AccumulateFirst(timestamps []int64, values []string, tags [][]byte) { - index := a.selector(timestamps[0], values[0], timestamps, values, 1) - if index < 0 { - a.ts = timestamps[0] - a.v = values[0] - } else { - a.ts = timestamps[index] - a.v = values[index] - } - a.tags = make([][]byte, len(tags)) - copy(a.tags, tags) -} - -func (a *stringSelectorAccumulator) AccumulateMore(timestamps []int64, values []string, tags [][]byte) { - index := a.selector(a.ts, a.v, timestamps, values, 0) - if index >= 0 { - a.ts = timestamps[index] - a.v = values[index] - - if len(tags) > cap(a.tags) { - a.tags = make([][]byte, len(tags)) - } else { - a.tags = a.tags[:len(tags)] - } - copy(a.tags, tags) - } -} - -func (a *stringSelectorAccumulator) Result() (int64, string, [][]byte) { - return a.ts, a.v, a.tags -} - -// makeStringAggregateAccumulator returns the interface implementation for -// aggregating returned points within the same group. The incoming points are -// the ones returned for each series and the struct returned here will -// aggregate the aggregates. -func makeStringAggregateAccumulator(agg datatypes.Aggregate_AggregateType) (StringAggregateAccumulator, error) { - switch agg { - case datatypes.Aggregate_AggregateTypeFirst: - return &stringSelectorAccumulator{selector: selectorFirstGroupsString}, nil - case datatypes.Aggregate_AggregateTypeLast: - return &stringSelectorAccumulator{selector: selectorLastGroupsString}, nil - case datatypes.Aggregate_AggregateTypeCount: - - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unsupported for aggregate count: String", - } - - case datatypes.Aggregate_AggregateTypeSum: - - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unsupported for aggregate sum: String", - } - - case datatypes.Aggregate_AggregateTypeMin: - - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unsupported for aggregate min: String", - } - - case datatypes.Aggregate_AggregateTypeMax: - - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unsupported for aggregate max: String", - } - - default: - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("unknown/unimplemented aggregate type: %v", agg), - } - } -} - -func selectorFirstGroupsString(ts int64, v string, timestamps []int64, values []string, i int) int { - index := -1 - - for ; i < len(values); i++ { - if ts > timestamps[i] { - index = i - ts = timestamps[i] - } - } - - return index -} - -func selectorLastGroupsString(ts int64, v string, timestamps []int64, values []string, i int) int { - index := -1 - - for ; i < len(values); i++ { - if ts <= timestamps[i] { - index = i - ts = timestamps[i] - } - } - - return index -} - -func (t *stringGroupTable) advanceCursor() bool { - t.cur.Close() - t.cur = nil - for t.gc.Next() { - cur := t.gc.Cursor() - if cur == nil { - continue - } - - if typedCur, ok := cur.(cursors.StringArrayCursor); !ok { - // TODO(sgc): error or skip? - cur.Close() - t.err = &errors.Error{ - Code: errors.EInvalid, - Err: &GroupCursorError{ - typ: "string", - cursor: cur, - }, - } - return false - } else { - t.readTags(t.gc.Tags()) - t.cur = typedCur - return true - } - } - return false -} - -func (t *stringGroupTable) Statistics() cursors.CursorStats { - if t.cur == nil { - return cursors.CursorStats{} - } - cs := t.cur.Stats() - return cursors.CursorStats{ - ScannedValues: cs.ScannedValues, - ScannedBytes: cs.ScannedBytes, - } -} - -// -// *********** Boolean *********** -// - -type booleanTable struct { - table - mu sync.Mutex - cur cursors.BooleanArrayCursor - alloc memory.Allocator -} - -func newBooleanTable( - done chan struct{}, - cur cursors.BooleanArrayCursor, - bounds execute.Bounds, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *booleanTable { - t := &booleanTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - } - t.readTags(tags) - t.init(t.advance) - - return t -} - -func (t *booleanTable) Close() { - t.mu.Lock() - if t.cur != nil { - t.cur.Close() - t.cur = nil - } - t.mu.Unlock() -} - -func (t *booleanTable) Statistics() cursors.CursorStats { - t.mu.Lock() - defer t.mu.Unlock() - cur := t.cur - if cur == nil { - return cursors.CursorStats{} - } - cs := cur.Stats() - return cursors.CursorStats{ - ScannedValues: cs.ScannedValues, - ScannedBytes: cs.ScannedBytes, - } -} - -func (t *booleanTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *booleanTable) advance() bool { - a := t.cur.Next() - l := a.Len() - if l == 0 { - return false - } - - // Retrieve the buffer for the data to avoid allocating - // additional slices. If the buffer is still being used - // because the references were retained, then we will - // allocate a new buffer. - cr := t.allocateBuffer(l) - cr.cols[timeColIdx] = arrow.NewInt(a.Timestamps, t.alloc) - cr.cols[valueColIdx] = t.toArrowBuffer(a.Values) - t.appendTags(cr) - t.appendBounds(cr) - return true -} - -// window table -type booleanWindowTable struct { - booleanTable - arr *cursors.BooleanArray - windowBounds interval.Bounds - idxInArr int - createEmpty bool - timeColumn string - isAggregate bool - window interval.Window -} - -func newBooleanWindowTable( - done chan struct{}, - cur cursors.BooleanArrayCursor, - bounds execute.Bounds, - window interval.Window, - createEmpty bool, - timeColumn string, - isAggregate bool, - - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *booleanWindowTable { - t := &booleanWindowTable{ - booleanTable: booleanTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - }, - window: window, - createEmpty: createEmpty, - timeColumn: timeColumn, - isAggregate: isAggregate, - } - if t.createEmpty { - start := int64(bounds.Start) - t.windowBounds = window.GetLatestBounds(values.Time(start)) - } - t.readTags(tags) - t.init(t.advance) - - return t -} - -func (t *booleanWindowTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -// createNextBufferTimes will read the timestamps from the array -// cursor and construct the values for the next buffer. -func (t *booleanWindowTable) createNextBufferTimes() (start, stop *array.Int, ok bool) { - startB := arrow.NewIntBuilder(t.alloc) - stopB := arrow.NewIntBuilder(t.alloc) - - if t.createEmpty { - // There are no more windows when the start time is greater - // than or equal to the stop time. - if startT := int64(t.windowBounds.Start()); startT >= int64(t.bounds.Stop) { - return nil, nil, false - } - - // Create a buffer with the buffer size. - // TODO(jsternberg): Calculate the exact size with max points as the maximum. - startB.Resize(storage.MaxPointsPerBlock) - stopB.Resize(storage.MaxPointsPerBlock) - for ; ; t.windowBounds = t.window.NextBounds(t.windowBounds) { - startT, stopT := t.getWindowBoundsFor(t.windowBounds) - if startT >= int64(t.bounds.Stop) { - break - } - startB.Append(startT) - stopB.Append(stopT) - } - start = startB.NewIntArray() - stop = stopB.NewIntArray() - return start, stop, true - } - - // Retrieve the next buffer so we can copy the timestamps. - if !t.nextBuffer() { - return nil, nil, false - } - - // Copy over the timestamps from the next buffer and adjust - // times for the boundaries. - startB.Resize(len(t.arr.Timestamps)) - stopB.Resize(len(t.arr.Timestamps)) - for _, stopT := range t.arr.Timestamps { - bounds := t.window.PrevBounds(t.window.GetLatestBounds(values.Time(stopT))) - startT, stopT := t.getWindowBoundsFor(bounds) - startB.Append(startT) - stopB.Append(stopT) - } - start = startB.NewIntArray() - stop = stopB.NewIntArray() - return start, stop, true -} - -func (t *booleanWindowTable) getWindowBoundsFor(bounds interval.Bounds) (int64, int64) { - beg := int64(bounds.Start()) - end := int64(bounds.Stop()) - if beg < int64(t.bounds.Start) { - beg = int64(t.bounds.Start) - } - if end > int64(t.bounds.Stop) { - end = int64(t.bounds.Stop) - } - return beg, end -} - -// nextAt will retrieve the next value that can be used with -// the given stop timestamp. If no values can be used with the timestamp, -// it will return the default value and false. -func (t *booleanWindowTable) nextAt(stop int64) (v bool, ok bool) { - if !t.nextBuffer() { - return - } else if !t.isInWindow(stop, t.arr.Timestamps[t.idxInArr]) { - return - } - v, ok = t.arr.Values[t.idxInArr], true - t.idxInArr++ - return v, ok -} - -// isInWindow will check if the given time may be used within the window -// denoted by the stop timestamp. The stop may be a truncated stop time -// because of a restricted boundary. -// -// When used with an aggregate, ts will be the true stop time returned -// by storage. When used with an aggregate, it will be the real time -// for the point. -func (t *booleanWindowTable) isInWindow(stop int64, ts int64) bool { - // Retrieve the boundary associated with this stop time. - // This will be the boundary for the previous nanosecond. - bounds := t.window.GetLatestBounds(values.Time(stop - 1)) - start, stop := int64(bounds.Start()), int64(bounds.Stop()) - - // For an aggregate, the timestamp will be the stop time of the boundary. - if t.isAggregate { - return start < ts && ts <= stop - } - - // For a selector, the timestamp should be within the boundary. - return start <= ts && ts < stop -} - -// nextBuffer will ensure the array cursor is filled -// and will return true if there is at least one value -// that can be read from it. -func (t *booleanWindowTable) nextBuffer() bool { - // Discard the current array cursor if we have - // exceeded it. - if t.arr != nil && t.idxInArr >= t.arr.Len() { - t.arr = nil - } - - // Retrieve the next array cursor if needed. - if t.arr == nil { - arr := t.cur.Next() - if arr.Len() == 0 { - return false - } - t.arr, t.idxInArr = arr, 0 - } - return true -} - -// appendValues will scan the timestamps and append values -// that match those timestamps from the buffer. -func (t *booleanWindowTable) appendValues(intervals []int64, appendValue func(v bool), appendNull func()) { - for i := 0; i < len(intervals); i++ { - if v, ok := t.nextAt(intervals[i]); ok { - appendValue(v) - continue - } - appendNull() - } -} - -func (t *booleanWindowTable) advance() bool { - if !t.nextBuffer() { - return false - } - // Create the timestamps for the next window. - start, stop, ok := t.createNextBufferTimes() - if !ok { - return false - } - values := t.mergeValues(stop.Int64Values()) - - // Retrieve the buffer for the data to avoid allocating - // additional slices. If the buffer is still being used - // because the references were retained, then we will - // allocate a new buffer. - cr := t.allocateBuffer(stop.Len()) - if t.timeColumn != "" { - switch t.timeColumn { - case execute.DefaultStopColLabel: - cr.cols[timeColIdx] = stop - start.Release() - case execute.DefaultStartColLabel: - cr.cols[timeColIdx] = start - stop.Release() - } - cr.cols[valueColIdx] = values - t.appendBounds(cr) - } else { - cr.cols[startColIdx] = start - cr.cols[stopColIdx] = stop - cr.cols[valueColIdxWithoutTime] = values - } - t.appendTags(cr) - return true -} - -// This table implementation will not have any empty windows. -type booleanWindowSelectorTable struct { - booleanTable - timeColumn string - window interval.Window -} - -func newBooleanWindowSelectorTable( - done chan struct{}, - cur cursors.BooleanArrayCursor, - bounds execute.Bounds, - window interval.Window, - timeColumn string, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *booleanWindowSelectorTable { - t := &booleanWindowSelectorTable{ - booleanTable: booleanTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - }, - window: window, - timeColumn: timeColumn, - } - t.readTags(tags) - t.init(t.advance) - return t -} - -func (t *booleanWindowSelectorTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *booleanWindowSelectorTable) advance() bool { - arr := t.cur.Next() - if arr.Len() == 0 { - return false - } - - cr := t.allocateBuffer(arr.Len()) - - switch t.timeColumn { - case execute.DefaultStartColLabel: - cr.cols[timeColIdx] = t.startTimes(arr) - t.appendBounds(cr) - case execute.DefaultStopColLabel: - cr.cols[timeColIdx] = t.stopTimes(arr) - t.appendBounds(cr) - default: - cr.cols[startColIdx] = t.startTimes(arr) - cr.cols[stopColIdx] = t.stopTimes(arr) - cr.cols[timeColIdx] = arrow.NewInt(arr.Timestamps, t.alloc) - } - - cr.cols[valueColIdx] = t.toArrowBuffer(arr.Values) - t.appendTags(cr) - return true -} - -func (t *booleanWindowSelectorTable) startTimes(arr *cursors.BooleanArray) *array.Int { - start := arrow.NewIntBuilder(t.alloc) - start.Resize(arr.Len()) - - rangeStart := int64(t.bounds.Start) - - for _, v := range arr.Timestamps { - if windowStart := int64(t.window.GetLatestBounds(values.Time(v)).Start()); windowStart < rangeStart { - start.Append(rangeStart) - } else { - start.Append(windowStart) - } - } - return start.NewIntArray() -} - -func (t *booleanWindowSelectorTable) stopTimes(arr *cursors.BooleanArray) *array.Int { - stop := arrow.NewIntBuilder(t.alloc) - stop.Resize(arr.Len()) - - rangeStop := int64(t.bounds.Stop) - - for _, v := range arr.Timestamps { - if windowStop := int64(t.window.GetLatestBounds(values.Time(v)).Stop()); windowStop > rangeStop { - stop.Append(rangeStop) - } else { - stop.Append(windowStop) - } - } - return stop.NewIntArray() -} - -// This table implementation may contain empty windows -// in addition to non-empty windows. -type booleanEmptyWindowSelectorTable struct { - booleanTable - arr *cursors.BooleanArray - idx int - rangeStart int64 - rangeStop int64 - windowBounds interval.Bounds - timeColumn string - window interval.Window -} - -func newBooleanEmptyWindowSelectorTable( - done chan struct{}, - cur cursors.BooleanArrayCursor, - bounds execute.Bounds, - window interval.Window, - timeColumn string, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *booleanEmptyWindowSelectorTable { - rangeStart := int64(bounds.Start) - rangeStop := int64(bounds.Stop) - t := &booleanEmptyWindowSelectorTable{ - booleanTable: booleanTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - }, - arr: cur.Next(), - rangeStart: rangeStart, - rangeStop: rangeStop, - windowBounds: window.GetLatestBounds(values.Time(rangeStart)), - window: window, - timeColumn: timeColumn, - } - t.readTags(tags) - t.init(t.advance) - return t -} - -func (t *booleanEmptyWindowSelectorTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *booleanEmptyWindowSelectorTable) advance() bool { - if t.arr.Len() == 0 { - return false - } - - values := t.arrowBuilder() - values.Resize(storage.MaxPointsPerBlock) - - var cr *colReader - - switch t.timeColumn { - case execute.DefaultStartColLabel: - start := t.startTimes(values) - cr = t.allocateBuffer(start.Len()) - cr.cols[timeColIdx] = start - t.appendBounds(cr) - case execute.DefaultStopColLabel: - stop := t.stopTimes(values) - cr = t.allocateBuffer(stop.Len()) - cr.cols[timeColIdx] = stop - t.appendBounds(cr) - default: - start, stop, time := t.startStopTimes(values) - cr = t.allocateBuffer(time.Len()) - cr.cols[startColIdx] = start - cr.cols[stopColIdx] = stop - cr.cols[timeColIdx] = time - } - - cr.cols[valueColIdx] = values.NewBooleanArray() - t.appendTags(cr) - return true -} - -func (t *booleanEmptyWindowSelectorTable) startTimes(builder *array.BooleanBuilder) *array.Int { - start := arrow.NewIntBuilder(t.alloc) - start.Resize(storage.MaxPointsPerBlock) - - for int64(t.windowBounds.Start()) < t.rangeStop { - // The first window should start at the - // beginning of the time range. - if int64(t.windowBounds.Start()) < t.rangeStart { - start.Append(t.rangeStart) - } else { - start.Append(int64(t.windowBounds.Start())) - } - - var v int64 - - if t.arr.Len() == 0 { - v = math.MaxInt64 - } else { - v = t.arr.Timestamps[t.idx] - } - - // If the current timestamp falls within the - // current window, append the value to the - // builder, otherwise append a null value. - if int64(t.windowBounds.Start()) <= v && v < int64(t.windowBounds.Stop()) { - t.append(builder, t.arr.Values[t.idx]) - t.idx++ - } else { - builder.AppendNull() - } - - t.windowBounds = t.window.NextBounds(t.windowBounds) - - // If the current array is non-empty and has - // been read in its entirety, call Next(). - if t.arr.Len() > 0 && t.idx == t.arr.Len() { - t.arr = t.cur.Next() - t.idx = 0 - } - - if start.Len() == storage.MaxPointsPerBlock { - break - } - } - return start.NewIntArray() -} - -func (t *booleanEmptyWindowSelectorTable) stopTimes(builder *array.BooleanBuilder) *array.Int { - stop := arrow.NewIntBuilder(t.alloc) - stop.Resize(storage.MaxPointsPerBlock) - - for int64(t.windowBounds.Start()) < t.rangeStop { - // The last window should stop at the end of - // the time range. - if int64(t.windowBounds.Stop()) > t.rangeStop { - stop.Append(t.rangeStop) - } else { - stop.Append(int64(t.windowBounds.Stop())) - } - - var v int64 - - if t.arr.Len() == 0 { - v = math.MaxInt64 - } else { - v = t.arr.Timestamps[t.idx] - } - - // If the current timestamp falls within the - // current window, append the value to the - // builder, otherwise append a null value. - if int64(t.windowBounds.Start()) <= v && v < int64(t.windowBounds.Stop()) { - t.append(builder, t.arr.Values[t.idx]) - t.idx++ - } else { - builder.AppendNull() - } - - t.windowBounds = t.window.NextBounds(t.windowBounds) - - // If the current array is non-empty and has - // been read in its entirety, call Next(). - if t.arr.Len() > 0 && t.idx == t.arr.Len() { - t.arr = t.cur.Next() - t.idx = 0 - } - - if stop.Len() == storage.MaxPointsPerBlock { - break - } - } - return stop.NewIntArray() -} - -func (t *booleanEmptyWindowSelectorTable) startStopTimes(builder *array.BooleanBuilder) (*array.Int, *array.Int, *array.Int) { - start := arrow.NewIntBuilder(t.alloc) - start.Resize(storage.MaxPointsPerBlock) - - stop := arrow.NewIntBuilder(t.alloc) - stop.Resize(storage.MaxPointsPerBlock) - - time := arrow.NewIntBuilder(t.alloc) - time.Resize(storage.MaxPointsPerBlock) - - for int64(t.windowBounds.Start()) < t.rangeStop { - - // The first window should start at the - // beginning of the time range. - if int64(t.windowBounds.Start()) < t.rangeStart { - start.Append(t.rangeStart) - } else { - start.Append(int64(t.windowBounds.Start())) - } - - // The last window should stop at the end of - // the time range. - if int64(t.windowBounds.Stop()) > t.rangeStop { - stop.Append(t.rangeStop) - } else { - stop.Append(int64(t.windowBounds.Stop())) - } - - var v int64 - - if t.arr.Len() == 0 { - v = math.MaxInt64 - } else { - v = t.arr.Timestamps[t.idx] - } - - // If the current timestamp falls within the - // current window, append the value to the - // builder, otherwise append a null value. - if int64(t.windowBounds.Start()) <= v && v < int64(t.windowBounds.Stop()) { - time.Append(v) - t.append(builder, t.arr.Values[t.idx]) - t.idx++ - } else { - time.AppendNull() - builder.AppendNull() - } - - t.windowBounds = t.window.NextBounds(t.windowBounds) - - // If the current array is non-empty and has - // been read in its entirety, call Next(). - if t.arr.Len() > 0 && t.idx == t.arr.Len() { - t.arr = t.cur.Next() - t.idx = 0 - } - - if time.Len() == storage.MaxPointsPerBlock { - break - } - } - return start.NewIntArray(), stop.NewIntArray(), time.NewIntArray() -} - -// group table - -type booleanGroupTable struct { - table - mu sync.Mutex - gc storage.GroupCursor - cur cursors.BooleanArrayCursor -} - -func newBooleanGroupTable( - done chan struct{}, - gc storage.GroupCursor, - cur cursors.BooleanArrayCursor, - bounds execute.Bounds, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *booleanGroupTable { - t := &booleanGroupTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - gc: gc, - cur: cur, - } - t.readTags(tags) - t.init(t.advance) - - return t -} - -func (t *booleanGroupTable) Close() { - t.mu.Lock() - if t.cur != nil { - t.cur.Close() - t.cur = nil - } - if t.gc != nil { - t.gc.Close() - t.gc = nil - } - t.mu.Unlock() -} - -func (t *booleanGroupTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *booleanGroupTable) advance() bool { - if t.cur == nil { - // For group aggregates, we will try to get all the series and all table buffers within those series - // all at once and merge them into one row when this advance() function is first called. - // At the end of this process, t.advanceCursor() already returns false and t.cur becomes nil. - // But we still need to return true to indicate that there is data to be returned. - // The second time when we call this advance(), t.cur is already nil, so we directly return false. - return false - } - var arr *cursors.BooleanArray - var len int - for { - arr = t.cur.Next() - len = arr.Len() - if len > 0 { - break - } - if !t.advanceCursor() { - return false - } - } - - // handle the group without aggregate case - if t.gc.Aggregate() == nil { - // Retrieve the buffer for the data to avoid allocating - // additional slices. If the buffer is still being used - // because the references were retained, then we will - // allocate a new buffer. - colReader := t.allocateBuffer(len) - colReader.cols[timeColIdx] = arrow.NewInt(arr.Timestamps, t.alloc) - colReader.cols[valueColIdx] = t.toArrowBuffer(arr.Values) - t.appendTags(colReader) - t.appendBounds(colReader) - return true - } - - aggregate, err := makeBooleanAggregateAccumulator(t.gc.Aggregate().Type) - if err != nil { - t.err = err - return false - } - - aggregate.AccumulateFirst(arr.Timestamps, arr.Values, t.tags) - for { - arr = t.cur.Next() - if arr.Len() > 0 { - aggregate.AccumulateMore(arr.Timestamps, arr.Values, t.tags) - continue - } - - if !t.advanceCursor() { - break - } - } - timestamp, value, tags := aggregate.Result() - - colReader := t.allocateBuffer(1) - if IsSelector(t.gc.Aggregate()) { - colReader.cols[timeColIdx] = arrow.NewInt([]int64{timestamp}, t.alloc) - colReader.cols[valueColIdx] = t.toArrowBuffer([]bool{value}) - } else { - colReader.cols[valueColIdxWithoutTime] = t.toArrowBuffer([]bool{value}) - } - t.appendTheseTags(colReader, tags) - t.appendBounds(colReader) - return true -} - -type BooleanAggregateAccumulator interface { - // AccumulateFirst receives an initial array of items to select from. - // It selects an item and stores the state. Afterwards, more data can - // be supplied with AccumulateMore and the results can be requested at - // any time. Without a call to AccumulateFirst the results are not - // defined. - AccumulateFirst(timestamps []int64, values []bool, tags [][]byte) - - // AccumulateMore receives additional array elements to select from. - AccumulateMore(timestamps []int64, values []bool, tags [][]byte) - - // Result returns the item selected from the data received so far. - Result() (int64, bool, [][]byte) -} - -// The selector method takes a ( timestamp, value ) pair, a -// ( []timestamp, []value ) pair, and a starting index. It applies the selector -// to the single value and the array, starting at the supplied index. It -// returns -1 if the single value is selected and a non-negative value if an -// item from the array is selected. -type booleanSelectorMethod func(int64, bool, []int64, []bool, int) int - -// The selector accumulator tracks currently-selected item. -type booleanSelectorAccumulator struct { - selector booleanSelectorMethod - - ts int64 - v bool - tags [][]byte -} - -func (a *booleanSelectorAccumulator) AccumulateFirst(timestamps []int64, values []bool, tags [][]byte) { - index := a.selector(timestamps[0], values[0], timestamps, values, 1) - if index < 0 { - a.ts = timestamps[0] - a.v = values[0] - } else { - a.ts = timestamps[index] - a.v = values[index] - } - a.tags = make([][]byte, len(tags)) - copy(a.tags, tags) -} - -func (a *booleanSelectorAccumulator) AccumulateMore(timestamps []int64, values []bool, tags [][]byte) { - index := a.selector(a.ts, a.v, timestamps, values, 0) - if index >= 0 { - a.ts = timestamps[index] - a.v = values[index] - - if len(tags) > cap(a.tags) { - a.tags = make([][]byte, len(tags)) - } else { - a.tags = a.tags[:len(tags)] - } - copy(a.tags, tags) - } -} - -func (a *booleanSelectorAccumulator) Result() (int64, bool, [][]byte) { - return a.ts, a.v, a.tags -} - -// makeBooleanAggregateAccumulator returns the interface implementation for -// aggregating returned points within the same group. The incoming points are -// the ones returned for each series and the struct returned here will -// aggregate the aggregates. -func makeBooleanAggregateAccumulator(agg datatypes.Aggregate_AggregateType) (BooleanAggregateAccumulator, error) { - switch agg { - case datatypes.Aggregate_AggregateTypeFirst: - return &booleanSelectorAccumulator{selector: selectorFirstGroupsBoolean}, nil - case datatypes.Aggregate_AggregateTypeLast: - return &booleanSelectorAccumulator{selector: selectorLastGroupsBoolean}, nil - case datatypes.Aggregate_AggregateTypeCount: - - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unsupported for aggregate count: Boolean", - } - - case datatypes.Aggregate_AggregateTypeSum: - - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unsupported for aggregate sum: Boolean", - } - - case datatypes.Aggregate_AggregateTypeMin: - - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unsupported for aggregate min: Boolean", - } - - case datatypes.Aggregate_AggregateTypeMax: - - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unsupported for aggregate max: Boolean", - } - - default: - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("unknown/unimplemented aggregate type: %v", agg), - } - } -} - -func selectorFirstGroupsBoolean(ts int64, v bool, timestamps []int64, values []bool, i int) int { - index := -1 - - for ; i < len(values); i++ { - if ts > timestamps[i] { - index = i - ts = timestamps[i] - } - } - - return index -} - -func selectorLastGroupsBoolean(ts int64, v bool, timestamps []int64, values []bool, i int) int { - index := -1 - - for ; i < len(values); i++ { - if ts <= timestamps[i] { - index = i - ts = timestamps[i] - } - } - - return index -} - -func (t *booleanGroupTable) advanceCursor() bool { - t.cur.Close() - t.cur = nil - for t.gc.Next() { - cur := t.gc.Cursor() - if cur == nil { - continue - } - - if typedCur, ok := cur.(cursors.BooleanArrayCursor); !ok { - // TODO(sgc): error or skip? - cur.Close() - t.err = &errors.Error{ - Code: errors.EInvalid, - Err: &GroupCursorError{ - typ: "boolean", - cursor: cur, - }, - } - return false - } else { - t.readTags(t.gc.Tags()) - t.cur = typedCur - return true - } - } - return false -} - -func (t *booleanGroupTable) Statistics() cursors.CursorStats { - if t.cur == nil { - return cursors.CursorStats{} - } - cs := t.cur.Stats() - return cursors.CursorStats{ - ScannedValues: cs.ScannedValues, - ScannedBytes: cs.ScannedBytes, - } -} diff --git a/storage/flux/table.gen.go.tmpl b/storage/flux/table.gen.go.tmpl deleted file mode 100644 index a55116fb84c..00000000000 --- a/storage/flux/table.gen.go.tmpl +++ /dev/null @@ -1,1039 +0,0 @@ -package storageflux - -import ( - "fmt" - "math" - "sync" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/array" - "github.com/influxdata/flux/arrow" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/interval" - "github.com/influxdata/flux/memory" - "github.com/influxdata/flux/values" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - storage "github.com/influxdata/influxdb/v2/storage/reads" - "github.com/influxdata/influxdb/v2/tsdb/cursors" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" -) -{{range .}} -// -// *********** {{.Name}} *********** -// - -type {{.name}}Table struct { - table - mu sync.Mutex - cur cursors.{{.Name}}ArrayCursor - alloc memory.Allocator -} - -func new{{.Name}}Table( - done chan struct{}, - cur cursors.{{.Name}}ArrayCursor, - bounds execute.Bounds, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *{{.name}}Table { - t := &{{.name}}Table{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - } - t.readTags(tags) - t.init(t.advance) - - return t -} - -func (t *{{.name}}Table) Close() { - t.mu.Lock() - if t.cur != nil { - t.cur.Close() - t.cur = nil - } - t.mu.Unlock() -} - -func (t *{{.name}}Table) Statistics() cursors.CursorStats { - t.mu.Lock() - defer t.mu.Unlock() - cur := t.cur - if cur == nil { - return cursors.CursorStats{} - } - cs := cur.Stats() - return cursors.CursorStats{ - ScannedValues: cs.ScannedValues, - ScannedBytes: cs.ScannedBytes, - } -} - -func (t *{{.name}}Table) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *{{.name}}Table) advance() bool { - a := t.cur.Next() - l := a.Len() - if l == 0 { - return false - } - - // Retrieve the buffer for the data to avoid allocating - // additional slices. If the buffer is still being used - // because the references were retained, then we will - // allocate a new buffer. - cr := t.allocateBuffer(l) - cr.cols[timeColIdx] = arrow.NewInt(a.Timestamps, t.alloc) - cr.cols[valueColIdx] = t.toArrowBuffer(a.Values) - t.appendTags(cr) - t.appendBounds(cr) - return true -} - -// window table -type {{.name}}WindowTable struct { - {{.name}}Table - arr *cursors.{{.Name}}Array - windowBounds interval.Bounds - idxInArr int - createEmpty bool - timeColumn string - isAggregate bool - window interval.Window - {{if eq .Name "Integer"}}fillValue *{{.Type}}{{end}} -} - -func new{{.Name}}WindowTable( - done chan struct{}, - cur cursors.{{.Name}}ArrayCursor, - bounds execute.Bounds, - window interval.Window, - createEmpty bool, - timeColumn string, - isAggregate bool, - {{if eq .Name "Integer"}}fillValue *{{.Type}},{{end}} - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *{{.name}}WindowTable { - t := &{{.name}}WindowTable{ - {{.name}}Table: {{.name}}Table{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - }, - window: window, - createEmpty: createEmpty, - timeColumn: timeColumn, - isAggregate: isAggregate, - {{if eq .Name "Integer"}}fillValue: fillValue,{{end}} - } - if t.createEmpty { - start := int64(bounds.Start) - t.windowBounds = window.GetLatestBounds(values.Time(start)) - } - t.readTags(tags) - t.init(t.advance) - - return t -} - -func (t *{{.name}}WindowTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -// createNextBufferTimes will read the timestamps from the array -// cursor and construct the values for the next buffer. -func (t *{{.name}}WindowTable) createNextBufferTimes() (start, stop *array.Int, ok bool) { - startB := arrow.NewIntBuilder(t.alloc) - stopB := arrow.NewIntBuilder(t.alloc) - - if t.createEmpty { - // There are no more windows when the start time is greater - // than or equal to the stop time. - if startT := int64(t.windowBounds.Start()); startT >= int64(t.bounds.Stop) { - return nil, nil, false - } - - // Create a buffer with the buffer size. - // TODO(jsternberg): Calculate the exact size with max points as the maximum. - startB.Resize(storage.MaxPointsPerBlock) - stopB.Resize(storage.MaxPointsPerBlock) - for ; ; t.windowBounds = t.window.NextBounds(t.windowBounds) { - startT, stopT := t.getWindowBoundsFor(t.windowBounds) - if startT >= int64(t.bounds.Stop) { - break - } - startB.Append(startT) - stopB.Append(stopT) - } - start = startB.NewIntArray() - stop = stopB.NewIntArray() - return start, stop, true - } - - // Retrieve the next buffer so we can copy the timestamps. - if !t.nextBuffer() { - return nil, nil, false - } - - // Copy over the timestamps from the next buffer and adjust - // times for the boundaries. - startB.Resize(len(t.arr.Timestamps)) - stopB.Resize(len(t.arr.Timestamps)) - for _, stopT := range t.arr.Timestamps { - bounds := t.window.PrevBounds(t.window.GetLatestBounds(values.Time(stopT))) - startT, stopT := t.getWindowBoundsFor(bounds) - startB.Append(startT) - stopB.Append(stopT) - } - start = startB.NewIntArray() - stop = stopB.NewIntArray() - return start, stop, true -} - -func (t *{{.name}}WindowTable) getWindowBoundsFor(bounds interval.Bounds) (int64, int64) { - beg := int64(bounds.Start()) - end := int64(bounds.Stop()) - if beg < int64(t.bounds.Start) { - beg = int64(t.bounds.Start) - } - if end > int64(t.bounds.Stop) { - end = int64(t.bounds.Stop) - } - return beg, end -} - -// nextAt will retrieve the next value that can be used with -// the given stop timestamp. If no values can be used with the timestamp, -// it will return the default value and false. -func (t *{{.name}}WindowTable) nextAt(stop int64) (v {{.Type}}, ok bool) { - if !t.nextBuffer() { - return - } else if !t.isInWindow(stop, t.arr.Timestamps[t.idxInArr]) { - return - } - v, ok = t.arr.Values[t.idxInArr], true - t.idxInArr++ - return v, ok -} - -// isInWindow will check if the given time may be used within the window -// denoted by the stop timestamp. The stop may be a truncated stop time -// because of a restricted boundary. -// -// When used with an aggregate, ts will be the true stop time returned -// by storage. When used with an aggregate, it will be the real time -// for the point. -func (t *{{.name}}WindowTable) isInWindow(stop int64, ts int64) bool { - // Retrieve the boundary associated with this stop time. - // This will be the boundary for the previous nanosecond. - bounds := t.window.GetLatestBounds(values.Time(stop - 1)) - start, stop := int64(bounds.Start()), int64(bounds.Stop()) - - // For an aggregate, the timestamp will be the stop time of the boundary. - if t.isAggregate { - return start < ts && ts <= stop - } - - // For a selector, the timestamp should be within the boundary. - return start <= ts && ts < stop -} - -// nextBuffer will ensure the array cursor is filled -// and will return true if there is at least one value -// that can be read from it. -func (t *{{.name}}WindowTable) nextBuffer() bool { - // Discard the current array cursor if we have - // exceeded it. - if t.arr != nil && t.idxInArr >= t.arr.Len() { - t.arr = nil - } - - // Retrieve the next array cursor if needed. - if t.arr == nil { - arr := t.cur.Next() - if arr.Len() == 0 { - return false - } - t.arr, t.idxInArr = arr, 0 - } - return true -} - -// appendValues will scan the timestamps and append values -// that match those timestamps from the buffer. -func (t *{{.name}}WindowTable) appendValues(intervals []int64, appendValue func(v {{.Type}}), appendNull func()) { - for i := 0; i < len(intervals); i++ { - if v, ok := t.nextAt(intervals[i]); ok { - appendValue(v) - continue - } - appendNull() - } -} - -func (t *{{.name}}WindowTable) advance() bool { - if !t.nextBuffer() { - return false - } - // Create the timestamps for the next window. - start, stop, ok := t.createNextBufferTimes() - if !ok { - return false - } - values := t.mergeValues(stop.Int64Values()) - - // Retrieve the buffer for the data to avoid allocating - // additional slices. If the buffer is still being used - // because the references were retained, then we will - // allocate a new buffer. - cr := t.allocateBuffer(stop.Len()) - if t.timeColumn != "" { - switch t.timeColumn { - case execute.DefaultStopColLabel: - cr.cols[timeColIdx] = stop - start.Release() - case execute.DefaultStartColLabel: - cr.cols[timeColIdx] = start - stop.Release() - } - cr.cols[valueColIdx] = values - t.appendBounds(cr) - } else { - cr.cols[startColIdx] = start - cr.cols[stopColIdx] = stop - cr.cols[valueColIdxWithoutTime] = values - } - t.appendTags(cr) - return true -} - -// This table implementation will not have any empty windows. -type {{.name}}WindowSelectorTable struct { - {{.name}}Table - timeColumn string - window interval.Window -} - -func new{{.Name}}WindowSelectorTable( - done chan struct{}, - cur cursors.{{.Name}}ArrayCursor, - bounds execute.Bounds, - window interval.Window, - timeColumn string, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *{{.name}}WindowSelectorTable { - t := &{{.name}}WindowSelectorTable{ - {{.name}}Table: {{.name}}Table{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - }, - window: window, - timeColumn: timeColumn, - } - t.readTags(tags) - t.init(t.advance) - return t -} - -func (t *{{.name}}WindowSelectorTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *{{.name}}WindowSelectorTable) advance() bool { - arr := t.cur.Next() - if arr.Len() == 0 { - return false - } - - cr := t.allocateBuffer(arr.Len()) - - switch t.timeColumn { - case execute.DefaultStartColLabel: - cr.cols[timeColIdx] = t.startTimes(arr) - t.appendBounds(cr) - case execute.DefaultStopColLabel: - cr.cols[timeColIdx] = t.stopTimes(arr) - t.appendBounds(cr) - default: - cr.cols[startColIdx] = t.startTimes(arr) - cr.cols[stopColIdx] = t.stopTimes(arr) - cr.cols[timeColIdx] = arrow.NewInt(arr.Timestamps, t.alloc) - } - - cr.cols[valueColIdx] = t.toArrowBuffer(arr.Values) - t.appendTags(cr) - return true -} - -func (t *{{.name}}WindowSelectorTable) startTimes(arr *cursors.{{.Name}}Array) *array.Int { - start := arrow.NewIntBuilder(t.alloc) - start.Resize(arr.Len()) - - rangeStart := int64(t.bounds.Start) - - for _, v := range arr.Timestamps { - if windowStart := int64(t.window.GetLatestBounds(values.Time(v)).Start()); windowStart < rangeStart { - start.Append(rangeStart) - } else { - start.Append(windowStart) - } - } - return start.NewIntArray() -} - -func (t *{{.name}}WindowSelectorTable) stopTimes(arr *cursors.{{.Name}}Array) *array.Int { - stop := arrow.NewIntBuilder(t.alloc) - stop.Resize(arr.Len()) - - rangeStop := int64(t.bounds.Stop) - - for _, v := range arr.Timestamps { - if windowStop := int64(t.window.GetLatestBounds(values.Time(v)).Stop()); windowStop > rangeStop { - stop.Append(rangeStop) - } else { - stop.Append(windowStop) - } - } - return stop.NewIntArray() -} - -// This table implementation may contain empty windows -// in addition to non-empty windows. -type {{.name}}EmptyWindowSelectorTable struct { - {{.name}}Table - arr *cursors.{{.Name}}Array - idx int - rangeStart int64 - rangeStop int64 - windowBounds interval.Bounds - timeColumn string - window interval.Window -} - -func new{{.Name}}EmptyWindowSelectorTable( - done chan struct{}, - cur cursors.{{.Name}}ArrayCursor, - bounds execute.Bounds, - window interval.Window, - timeColumn string, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *{{.name}}EmptyWindowSelectorTable { - rangeStart := int64(bounds.Start) - rangeStop := int64(bounds.Stop) - t := &{{.name}}EmptyWindowSelectorTable{ - {{.name}}Table: {{.name}}Table{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - cur: cur, - }, - arr: cur.Next(), - rangeStart: rangeStart, - rangeStop: rangeStop, - windowBounds: window.GetLatestBounds(values.Time(rangeStart)), - window: window, - timeColumn: timeColumn, - } - t.readTags(tags) - t.init(t.advance) - return t -} - -func (t *{{.name}}EmptyWindowSelectorTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *{{.name}}EmptyWindowSelectorTable) advance() bool { - if t.arr.Len() == 0 { - return false - } - - values := t.arrowBuilder() - values.Resize(storage.MaxPointsPerBlock) - - var cr *colReader - - switch t.timeColumn { - case execute.DefaultStartColLabel: - start := t.startTimes(values) - cr = t.allocateBuffer(start.Len()) - cr.cols[timeColIdx] = start - t.appendBounds(cr) - case execute.DefaultStopColLabel: - stop := t.stopTimes(values) - cr = t.allocateBuffer(stop.Len()) - cr.cols[timeColIdx] = stop - t.appendBounds(cr) - default: - start, stop, time := t.startStopTimes(values) - cr = t.allocateBuffer(time.Len()) - cr.cols[startColIdx] = start - cr.cols[stopColIdx] = stop - cr.cols[timeColIdx] = time - } - - cr.cols[valueColIdx] = values.New{{.ArrowType}}Array() - t.appendTags(cr) - return true -} - -func (t *{{.name}}EmptyWindowSelectorTable) startTimes(builder *array.{{.ArrowType}}Builder) *array.Int { - start := arrow.NewIntBuilder(t.alloc) - start.Resize(storage.MaxPointsPerBlock) - - for int64(t.windowBounds.Start()) < t.rangeStop { - // The first window should start at the - // beginning of the time range. - if int64(t.windowBounds.Start()) < t.rangeStart { - start.Append(t.rangeStart) - } else { - start.Append(int64(t.windowBounds.Start())) - } - - var v int64 - - if t.arr.Len() == 0 { - v = math.MaxInt64 - } else { - v = t.arr.Timestamps[t.idx] - } - - // If the current timestamp falls within the - // current window, append the value to the - // builder, otherwise append a null value. - if int64(t.windowBounds.Start()) <= v && v < int64(t.windowBounds.Stop()) { - t.append(builder, t.arr.Values[t.idx]) - t.idx++ - } else { - builder.AppendNull() - } - - t.windowBounds = t.window.NextBounds(t.windowBounds) - - // If the current array is non-empty and has - // been read in its entirety, call Next(). - if t.arr.Len() > 0 && t.idx == t.arr.Len() { - t.arr = t.cur.Next() - t.idx = 0 - } - - if start.Len() == storage.MaxPointsPerBlock { - break - } - } - return start.NewIntArray() -} - -func (t *{{.name}}EmptyWindowSelectorTable) stopTimes(builder *array.{{.ArrowType}}Builder) *array.Int { - stop := arrow.NewIntBuilder(t.alloc) - stop.Resize(storage.MaxPointsPerBlock) - - for int64(t.windowBounds.Start()) < t.rangeStop { - // The last window should stop at the end of - // the time range. - if int64(t.windowBounds.Stop()) > t.rangeStop { - stop.Append(t.rangeStop) - } else { - stop.Append(int64(t.windowBounds.Stop())) - } - - var v int64 - - if t.arr.Len() == 0 { - v = math.MaxInt64 - } else { - v = t.arr.Timestamps[t.idx] - } - - // If the current timestamp falls within the - // current window, append the value to the - // builder, otherwise append a null value. - if int64(t.windowBounds.Start()) <= v && v < int64(t.windowBounds.Stop()) { - t.append(builder, t.arr.Values[t.idx]) - t.idx++ - } else { - builder.AppendNull() - } - - t.windowBounds = t.window.NextBounds(t.windowBounds) - - // If the current array is non-empty and has - // been read in its entirety, call Next(). - if t.arr.Len() > 0 && t.idx == t.arr.Len() { - t.arr = t.cur.Next() - t.idx = 0 - } - - if stop.Len() == storage.MaxPointsPerBlock { - break - } - } - return stop.NewIntArray() -} - -func (t *{{.name}}EmptyWindowSelectorTable) startStopTimes(builder *array.{{.ArrowType}}Builder) (*array.Int, *array.Int, *array.Int) { - start := arrow.NewIntBuilder(t.alloc) - start.Resize(storage.MaxPointsPerBlock) - - stop := arrow.NewIntBuilder(t.alloc) - stop.Resize(storage.MaxPointsPerBlock) - - time := arrow.NewIntBuilder(t.alloc) - time.Resize(storage.MaxPointsPerBlock) - - for int64(t.windowBounds.Start()) < t.rangeStop { - - // The first window should start at the - // beginning of the time range. - if int64(t.windowBounds.Start()) < t.rangeStart { - start.Append(t.rangeStart) - } else { - start.Append(int64(t.windowBounds.Start())) - } - - // The last window should stop at the end of - // the time range. - if int64(t.windowBounds.Stop()) > t.rangeStop { - stop.Append(t.rangeStop) - } else { - stop.Append(int64(t.windowBounds.Stop())) - } - - var v int64 - - if t.arr.Len() == 0 { - v = math.MaxInt64 - } else { - v = t.arr.Timestamps[t.idx] - } - - // If the current timestamp falls within the - // current window, append the value to the - // builder, otherwise append a null value. - if int64(t.windowBounds.Start()) <= v && v < int64(t.windowBounds.Stop()) { - time.Append(v) - t.append(builder, t.arr.Values[t.idx]) - t.idx++ - } else { - time.AppendNull() - builder.AppendNull() - } - - t.windowBounds = t.window.NextBounds(t.windowBounds) - - // If the current array is non-empty and has - // been read in its entirety, call Next(). - if t.arr.Len() > 0 && t.idx == t.arr.Len() { - t.arr = t.cur.Next() - t.idx = 0 - } - - if time.Len() == storage.MaxPointsPerBlock { - break - } - } - return start.NewIntArray(), stop.NewIntArray(), time.NewIntArray() -} - -// group table - -type {{.name}}GroupTable struct { - table - mu sync.Mutex - gc storage.GroupCursor - cur cursors.{{.Name}}ArrayCursor -} - -func new{{.Name}}GroupTable( - done chan struct{}, - gc storage.GroupCursor, - cur cursors.{{.Name}}ArrayCursor, - bounds execute.Bounds, - key flux.GroupKey, - cols []flux.ColMeta, - tags models.Tags, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) *{{.name}}GroupTable { - t := &{{.name}}GroupTable{ - table: newTable(done, bounds, key, cols, defs, cache, alloc), - gc: gc, - cur: cur, - } - t.readTags(tags) - t.init(t.advance) - - return t -} - -func (t *{{.name}}GroupTable) Close() { - t.mu.Lock() - if t.cur != nil { - t.cur.Close() - t.cur = nil - } - if t.gc != nil { - t.gc.Close() - t.gc = nil - } - t.mu.Unlock() -} - -func (t *{{.name}}GroupTable) Do(f func(flux.ColReader) error) error { - return t.do(f, t.advance) -} - -func (t *{{.name}}GroupTable) advance() bool { - if t.cur == nil { - // For group aggregates, we will try to get all the series and all table buffers within those series - // all at once and merge them into one row when this advance() function is first called. - // At the end of this process, t.advanceCursor() already returns false and t.cur becomes nil. - // But we still need to return true to indicate that there is data to be returned. - // The second time when we call this advance(), t.cur is already nil, so we directly return false. - return false - } - var arr *cursors.{{.Name}}Array - var len int - for { - arr = t.cur.Next() - len = arr.Len() - if len > 0 { - break - } - if !t.advanceCursor() { - return false - } - } - - // handle the group without aggregate case - if t.gc.Aggregate() == nil { - // Retrieve the buffer for the data to avoid allocating - // additional slices. If the buffer is still being used - // because the references were retained, then we will - // allocate a new buffer. - colReader := t.allocateBuffer(len) - colReader.cols[timeColIdx] = arrow.NewInt(arr.Timestamps, t.alloc) - colReader.cols[valueColIdx] = t.toArrowBuffer(arr.Values) - t.appendTags(colReader) - t.appendBounds(colReader) - return true - } - - aggregate, err := make{{.Name}}AggregateAccumulator(t.gc.Aggregate().Type) - if err != nil { - t.err = err - return false - } - - aggregate.AccumulateFirst(arr.Timestamps, arr.Values, t.tags) - for { - arr = t.cur.Next() - if arr.Len() > 0 { - aggregate.AccumulateMore(arr.Timestamps, arr.Values, t.tags) - continue - } - - if !t.advanceCursor() { - break - } - } - timestamp, value, tags := aggregate.Result() - - colReader := t.allocateBuffer(1) - if IsSelector(t.gc.Aggregate()) { - colReader.cols[timeColIdx] = arrow.NewInt([]int64{timestamp}, t.alloc) - colReader.cols[valueColIdx] = t.toArrowBuffer([]{{.Type}}{value}) - } else { - colReader.cols[valueColIdxWithoutTime] = t.toArrowBuffer([]{{.Type}}{value}) - } - t.appendTheseTags(colReader, tags) - t.appendBounds(colReader) - return true -} - -type {{.Name}}AggregateAccumulator interface { - // AccumulateFirst receives an initial array of items to select from. - // It selects an item and stores the state. Afterwards, more data can - // be supplied with AccumulateMore and the results can be requested at - // any time. Without a call to AccumulateFirst the results are not - // defined. - AccumulateFirst(timestamps []int64, values []{{.Type}}, tags [][]byte) - - // AccumulateMore receives additional array elements to select from. - AccumulateMore(timestamps []int64, values []{{.Type}}, tags [][]byte) - - // Result returns the item selected from the data received so far. - Result() (int64, {{.Type}}, [][]byte) -} - -// The selector method takes a ( timestamp, value ) pair, a -// ( []timestamp, []value ) pair, and a starting index. It applies the selector -// to the single value and the array, starting at the supplied index. It -// returns -1 if the single value is selected and a non-negative value if an -// item from the array is selected. -type {{.name}}SelectorMethod func(int64, {{.Type}}, []int64, []{{.Type}}, int) (int) - -// The selector accumulator tracks currently-selected item. -type {{.name}}SelectorAccumulator struct { - selector {{.name}}SelectorMethod - - ts int64 - v {{.Type}} - tags [][]byte -} - -func (a *{{.name}}SelectorAccumulator) AccumulateFirst(timestamps []int64, values []{{.Type}}, tags [][]byte) { - index := a.selector(timestamps[0], values[0], timestamps, values, 1) - if index < 0 { - a.ts = timestamps[0] - a.v = values[0] - } else { - a.ts = timestamps[index] - a.v = values[index] - } - a.tags = make([][]byte, len(tags)) - copy(a.tags, tags) -} - -func (a *{{.name}}SelectorAccumulator) AccumulateMore(timestamps []int64, values []{{.Type}}, tags [][]byte) { - index := a.selector(a.ts, a.v, timestamps, values, 0) - if index >= 0 { - a.ts = timestamps[index] - a.v = values[index] - - if len(tags) > cap(a.tags) { - a.tags = make([][]byte, len(tags)) - } else { - a.tags = a.tags[:len(tags)] - } - copy(a.tags, tags) - } -} - -func (a *{{.name}}SelectorAccumulator) Result() (int64, {{.Type}}, [][]byte) { - return a.ts, a.v, a.tags -} - -{{if and (ne .Name "Boolean") (ne .Name "String")}} - -// The aggregate method takes a value, an array of values, and a starting -// index, applies an aggregate operation over the value and the array, starting -// at the given index, and returns the result. -type {{.name}}AggregateMethod func({{.Type}}, []{{.Type}}, int) ({{.Type}}) - -type {{.name}}AggregateAccumulator struct { - aggregate {{.name}}AggregateMethod - accum {{.Type}} - - // For pure aggregates it doesn't matter what we return for tags, but - // we need to satisfy the interface. We will just return the most - // recently seen tags. - tags [][]byte -} - -func (a *{{.name}}AggregateAccumulator) AccumulateFirst(timestamps []int64, values []{{.Type}}, tags [][]byte) { - a.accum = a.aggregate(values[0], values, 1) - a.tags = tags -} - -func (a *{{.name}}AggregateAccumulator) AccumulateMore(timestamps []int64, values []{{.Type}}, tags [][]byte) { - a.accum = a.aggregate(a.accum, values, 0) - a.tags = tags -} - -// For group aggregates (non-selectors), the timestamp is always math.MaxInt64. -// their final result does not contain _time, so this timestamp value can be -// anything and it won't matter. -func (a *{{.name}}AggregateAccumulator) Result() (int64, {{.Type}}, [][]byte) { - return math.MaxInt64, a.accum, a.tags -} - -{{end}} - -// make{{.Name}}AggregateAccumulator returns the interface implementation for -// aggregating returned points within the same group. The incoming points are -// the ones returned for each series and the struct returned here will -// aggregate the aggregates. -func make{{.Name}}AggregateAccumulator(agg datatypes.Aggregate_AggregateType) ({{.Name}}AggregateAccumulator, error){ - switch agg { - case datatypes.Aggregate_AggregateTypeFirst: - return &{{.name}}SelectorAccumulator{selector: selectorFirstGroups{{.Name}}}, nil - case datatypes.Aggregate_AggregateTypeLast: - return &{{.name}}SelectorAccumulator{selector: selectorLastGroups{{.Name}}}, nil - case datatypes.Aggregate_AggregateTypeCount: - {{if eq .Name "Integer"}} - return &{{.name}}AggregateAccumulator{aggregate: aggregateCountGroups{{.Name}}}, nil - {{else}} - return nil, &errors.Error { - Code: errors.EInvalid, - Msg: "unsupported for aggregate count: {{.Name}}", - } - {{end}} - case datatypes.Aggregate_AggregateTypeSum: - {{if and (ne .Name "Boolean") (ne .Name "String")}} - return &{{.name}}AggregateAccumulator{aggregate: aggregateSumGroups{{.Name}}}, nil - {{else}} - return nil, &errors.Error { - Code: errors.EInvalid, - Msg: "unsupported for aggregate sum: {{.Name}}", - } - {{end}} - case datatypes.Aggregate_AggregateTypeMin: - {{if and (ne .Name "Boolean") (ne .Name "String")}} - return &{{.name}}SelectorAccumulator{selector: selectorMinGroups{{.Name}}}, nil - {{else}} - return nil, &errors.Error { - Code: errors.EInvalid, - Msg: "unsupported for aggregate min: {{.Name}}", - } - {{end}} - case datatypes.Aggregate_AggregateTypeMax: - {{if and (ne .Name "Boolean") (ne .Name "String")}} - return &{{.name}}SelectorAccumulator{selector: selectorMaxGroups{{.Name}}}, nil - {{else}} - return nil, &errors.Error { - Code: errors.EInvalid, - Msg: "unsupported for aggregate max: {{.Name}}", - } - {{end}} - default: - return nil, &errors.Error { - Code: errors.EInvalid, - Msg: fmt.Sprintf("unknown/unimplemented aggregate type: %v", agg), - } - } -} - -{{if and (ne .Name "Boolean") (ne .Name "String")}} -func selectorMinGroups{{.Name}}(ts int64, v {{.Type}}, timestamps []int64, values []{{.Type}}, i int) (int) { - index := -1 - - for ; i < len(values); i++ { - if v > values[i] { - index = i - v = values[i] - } - } - - return index -} -{{end}} - -{{if and (ne .Name "Boolean") (ne .Name "String")}} -func selectorMaxGroups{{.Name}}(ts int64, v {{.Type}}, timestamps []int64, values []{{.Type}}, i int) (int) { - index := -1 - - for ; i < len(values); i++ { - if v < values[i] { - index = i - v = values[i] - } - } - - return index -} -{{end}} - -{{if eq .Name "Integer"}} -func aggregateCountGroups{{.Name}}(accum {{.Type}}, values []{{.Type}}, i int) ({{.Type}}) { - return aggregateSumGroups{{.Name}}(accum, values, i) -} -{{end}} - -{{if and (ne .Name "Boolean") (ne .Name "String")}} -func aggregateSumGroups{{.Name}}(sum {{.Type}}, values []{{.Type}}, i int) ({{.Type}}) { - for ; i< len(values); i++ { - sum += values[i] - } - return sum -} -{{end}} - -func selectorFirstGroups{{.Name}}(ts int64, v {{.Type}}, timestamps []int64, values []{{.Type}}, i int) (int) { - index := -1 - - for ; i < len(values); i++ { - if ts > timestamps[i] { - index = i - ts = timestamps[i] - } - } - - return index -} - -func selectorLastGroups{{.Name}}(ts int64, v {{.Type}}, timestamps []int64, values []{{.Type}}, i int) (int) { - index := -1 - - for ; i < len(values); i++ { - if ts <= timestamps[i] { - index = i - ts = timestamps[i] - } - } - - return index -} - -func (t *{{.name}}GroupTable) advanceCursor() bool { - t.cur.Close() - t.cur = nil - for t.gc.Next() { - cur := t.gc.Cursor() - if cur == nil { - continue - } - - if typedCur, ok := cur.(cursors.{{.Name}}ArrayCursor); !ok { - // TODO(sgc): error or skip? - cur.Close() - t.err = &errors.Error { - Code: errors.EInvalid, - Err: &GroupCursorError { - typ: "{{.name}}", - cursor: cur, - }, - } - return false - } else { - t.readTags(t.gc.Tags()) - t.cur = typedCur - return true - } - } - return false -} - -func (t *{{.name}}GroupTable) Statistics() cursors.CursorStats { - if t.cur == nil { - return cursors.CursorStats{} - } - cs := t.cur.Stats() - return cursors.CursorStats{ - ScannedValues: cs.ScannedValues, - ScannedBytes: cs.ScannedBytes, - } -} - -{{end}} diff --git a/storage/flux/table.go b/storage/flux/table.go deleted file mode 100644 index aaa2df54142..00000000000 --- a/storage/flux/table.go +++ /dev/null @@ -1,350 +0,0 @@ -package storageflux - -//go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@types.tmpldata table.gen.go.tmpl - -import ( - "errors" - "sync/atomic" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/array" - "github.com/influxdata/flux/arrow" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/memory" - "github.com/influxdata/influxdb/v2/models" -) - -type table struct { - bounds execute.Bounds - key flux.GroupKey - cols []flux.ColMeta - - // cache of the tags on the current series. - // len(tags) == len(colMeta) - tags [][]byte - defs [][]byte - - done chan struct{} - - colBufs *colReader - empty bool - - err error - - cancelled, used int32 - cache *tagsCache - alloc memory.Allocator -} - -func newTable( - done chan struct{}, - bounds execute.Bounds, - key flux.GroupKey, - cols []flux.ColMeta, - defs [][]byte, - cache *tagsCache, - alloc memory.Allocator, -) table { - return table{ - done: done, - bounds: bounds, - key: key, - tags: make([][]byte, len(cols)), - defs: defs, - cols: cols, - cache: cache, - alloc: alloc, - } -} - -func (t *table) Key() flux.GroupKey { return t.key } -func (t *table) Cols() []flux.ColMeta { return t.cols } -func (t *table) Err() error { return t.err } -func (t *table) Empty() bool { return t.empty } - -func (t *table) Cancel() { - atomic.StoreInt32(&t.cancelled, 1) -} - -func (t *table) isCancelled() bool { - return atomic.LoadInt32(&t.cancelled) != 0 -} - -func (t *table) init(advance func() bool) { - t.empty = !advance() && t.err == nil -} - -func (t *table) do(f func(flux.ColReader) error, advance func() bool) error { - // Mark this table as having been used. If this doesn't - // succeed, then this has already been invoked somewhere else. - if !atomic.CompareAndSwapInt32(&t.used, 0, 1) { - return errors.New("table already used") - } - defer t.closeDone() - - // If an error occurred during initialization, that is - // returned here. - if t.err != nil { - return t.err - } - - if !t.Empty() { - t.err = f(t.colBufs) - t.colBufs.Release() - - for !t.isCancelled() && t.err == nil && advance() { - t.err = f(t.colBufs) - t.colBufs.Release() - } - t.colBufs = nil - } - - return t.err -} - -func (t *table) Done() { - // Mark the table as having been used. If this has already - // been done, then nothing needs to be done. - if atomic.CompareAndSwapInt32(&t.used, 0, 1) { - defer t.closeDone() - } - - if t.colBufs != nil { - t.colBufs.Release() - t.colBufs = nil - } -} - -// allocateBuffer will allocate a suitable buffer for the -// table implementations to use. If the existing buffer -// is not used anymore, then it may be reused. -// -// The allocated buffer can be accessed at colBufs or -// through the returned colReader. -func (t *table) allocateBuffer(l int) *colReader { - if t.colBufs == nil || atomic.LoadInt64(&t.colBufs.refCount) > 0 { - // The current buffer is still being used so we should - // generate a new one. - t.colBufs = &colReader{ - key: t.key, - colMeta: t.cols, - cols: make([]array.Array, len(t.cols)), - } - } - t.colBufs.refCount = 1 - t.colBufs.l = l - return t.colBufs -} - -type colReader struct { - refCount int64 - - key flux.GroupKey - colMeta []flux.ColMeta - cols []array.Array - l int -} - -func (cr *colReader) Retain() { - atomic.AddInt64(&cr.refCount, 1) -} -func (cr *colReader) Release() { - if atomic.AddInt64(&cr.refCount, -1) == 0 { - for _, col := range cr.cols { - col.Release() - } - } -} - -func (cr *colReader) Key() flux.GroupKey { return cr.key } -func (cr *colReader) Cols() []flux.ColMeta { return cr.colMeta } -func (cr *colReader) Len() int { return cr.l } - -func (cr *colReader) Bools(j int) *array.Boolean { - execute.CheckColType(cr.colMeta[j], flux.TBool) - return cr.cols[j].(*array.Boolean) -} - -func (cr *colReader) Ints(j int) *array.Int { - execute.CheckColType(cr.colMeta[j], flux.TInt) - return cr.cols[j].(*array.Int) -} - -func (cr *colReader) UInts(j int) *array.Uint { - execute.CheckColType(cr.colMeta[j], flux.TUInt) - return cr.cols[j].(*array.Uint) -} - -func (cr *colReader) Floats(j int) *array.Float { - execute.CheckColType(cr.colMeta[j], flux.TFloat) - return cr.cols[j].(*array.Float) -} - -func (cr *colReader) Strings(j int) *array.String { - execute.CheckColType(cr.colMeta[j], flux.TString) - return cr.cols[j].(*array.String) -} - -func (cr *colReader) Times(j int) *array.Int { - execute.CheckColType(cr.colMeta[j], flux.TTime) - return cr.cols[j].(*array.Int) -} - -// readTags populates b.tags with the provided tags -func (t *table) readTags(tags models.Tags) { - for j := range t.tags { - t.tags[j] = t.defs[j] - } - - if len(tags) == 0 { - return - } - - for _, tag := range tags { - j := execute.ColIdx(string(tag.Key), t.cols) - // In the case of group aggregate, tags that are not referenced in group() are not included in the result, but - // readTags () still get a complete tag list. Here is just to skip the tags that should not be present in the result. - if j < 0 { - continue - } - t.tags[j] = tag.Value - } -} - -// appendTheseTags fills the colBufs for the tag columns with the given tag values. -func (t *table) appendTheseTags(cr *colReader, tags [][]byte) { - for j := range t.cols { - v := tags[j] - if v != nil { - cr.cols[j] = t.cache.GetTag(string(v), cr.l, t.alloc) - } - } -} - -// appendTags fills the colBufs for the tag columns with the tag values from the table structure. -func (t *table) appendTags(cr *colReader) { - t.appendTheseTags(cr, t.tags) -} - -// appendBounds fills the colBufs for the time bounds -func (t *table) appendBounds(cr *colReader) { - start, stop := t.cache.GetBounds(t.bounds, cr.l, t.alloc) - cr.cols[startColIdx], cr.cols[stopColIdx] = start, stop -} - -func (t *table) closeDone() { - if t.done != nil { - close(t.done) - t.done = nil - } -} - -func (t *floatTable) toArrowBuffer(vs []float64) *array.Float { - return arrow.NewFloat(vs, t.alloc) -} -func (t *floatGroupTable) toArrowBuffer(vs []float64) *array.Float { - return arrow.NewFloat(vs, t.alloc) -} -func (t *floatWindowSelectorTable) toArrowBuffer(vs []float64) *array.Float { - return arrow.NewFloat(vs, t.alloc) -} -func (t *floatWindowTable) mergeValues(intervals []int64) *array.Float { - b := arrow.NewFloatBuilder(t.alloc) - b.Resize(len(intervals)) - t.appendValues(intervals, b.Append, b.AppendNull) - return b.NewFloatArray() -} -func (t *floatEmptyWindowSelectorTable) arrowBuilder() *array.FloatBuilder { - return arrow.NewFloatBuilder(t.alloc) -} -func (t *floatEmptyWindowSelectorTable) append(builder *array.FloatBuilder, v float64) { - builder.Append(v) -} -func (t *integerTable) toArrowBuffer(vs []int64) *array.Int { - return arrow.NewInt(vs, t.alloc) -} -func (t *integerWindowSelectorTable) toArrowBuffer(vs []int64) *array.Int { - return arrow.NewInt(vs, t.alloc) -} -func (t *integerGroupTable) toArrowBuffer(vs []int64) *array.Int { - return arrow.NewInt(vs, t.alloc) -} -func (t *integerWindowTable) mergeValues(intervals []int64) *array.Int { - b := arrow.NewIntBuilder(t.alloc) - b.Resize(len(intervals)) - appendNull := b.AppendNull - if t.fillValue != nil { - appendNull = func() { b.Append(*t.fillValue) } - } - t.appendValues(intervals, b.Append, appendNull) - return b.NewIntArray() -} -func (t *integerEmptyWindowSelectorTable) arrowBuilder() *array.IntBuilder { - return arrow.NewIntBuilder(t.alloc) -} -func (t *integerEmptyWindowSelectorTable) append(builder *array.IntBuilder, v int64) { - builder.Append(v) -} -func (t *unsignedTable) toArrowBuffer(vs []uint64) *array.Uint { - return arrow.NewUint(vs, t.alloc) -} -func (t *unsignedGroupTable) toArrowBuffer(vs []uint64) *array.Uint { - return arrow.NewUint(vs, t.alloc) -} -func (t *unsignedWindowSelectorTable) toArrowBuffer(vs []uint64) *array.Uint { - return arrow.NewUint(vs, t.alloc) -} -func (t *unsignedWindowTable) mergeValues(intervals []int64) *array.Uint { - b := arrow.NewUintBuilder(t.alloc) - b.Resize(len(intervals)) - t.appendValues(intervals, b.Append, b.AppendNull) - return b.NewUintArray() -} -func (t *unsignedEmptyWindowSelectorTable) arrowBuilder() *array.UintBuilder { - return arrow.NewUintBuilder(t.alloc) -} -func (t *unsignedEmptyWindowSelectorTable) append(builder *array.UintBuilder, v uint64) { - builder.Append(v) -} -func (t *stringTable) toArrowBuffer(vs []string) *array.String { - return arrow.NewString(vs, t.alloc) -} -func (t *stringGroupTable) toArrowBuffer(vs []string) *array.String { - return arrow.NewString(vs, t.alloc) -} -func (t *stringWindowSelectorTable) toArrowBuffer(vs []string) *array.String { - return arrow.NewString(vs, t.alloc) -} -func (t *stringWindowTable) mergeValues(intervals []int64) *array.String { - b := arrow.NewStringBuilder(t.alloc) - b.Resize(len(intervals)) - t.appendValues(intervals, b.Append, b.AppendNull) - return b.NewStringArray() -} -func (t *stringEmptyWindowSelectorTable) arrowBuilder() *array.StringBuilder { - return arrow.NewStringBuilder(t.alloc) -} -func (t *stringEmptyWindowSelectorTable) append(builder *array.StringBuilder, v string) { - builder.Append(v) -} -func (t *booleanTable) toArrowBuffer(vs []bool) *array.Boolean { - return arrow.NewBool(vs, t.alloc) -} -func (t *booleanGroupTable) toArrowBuffer(vs []bool) *array.Boolean { - return arrow.NewBool(vs, t.alloc) -} -func (t *booleanWindowSelectorTable) toArrowBuffer(vs []bool) *array.Boolean { - return arrow.NewBool(vs, t.alloc) -} -func (t *booleanWindowTable) mergeValues(intervals []int64) *array.Boolean { - b := arrow.NewBoolBuilder(t.alloc) - b.Resize(len(intervals)) - t.appendValues(intervals, b.Append, b.AppendNull) - return b.NewBooleanArray() -} -func (t *booleanEmptyWindowSelectorTable) arrowBuilder() *array.BooleanBuilder { - return arrow.NewBoolBuilder(t.alloc) -} -func (t *booleanEmptyWindowSelectorTable) append(builder *array.BooleanBuilder, v bool) { - builder.Append(v) -} diff --git a/storage/flux/table_internal_test.go b/storage/flux/table_internal_test.go deleted file mode 100644 index 182a78387d7..00000000000 --- a/storage/flux/table_internal_test.go +++ /dev/null @@ -1,7 +0,0 @@ -package storageflux - -import "sync/atomic" - -func (t *table) IsDone() bool { - return atomic.LoadInt32(&t.used) != 0 -} diff --git a/storage/flux/table_test.go b/storage/flux/table_test.go deleted file mode 100644 index d71240f0a56..00000000000 --- a/storage/flux/table_test.go +++ /dev/null @@ -1,3895 +0,0 @@ -package storageflux_test - -import ( - "context" - "io" - "math" - "math/rand" - "os" - "path/filepath" - "sort" - "strconv" - "sync" - "testing" - "time" - - arrowmem "github.com/apache/arrow/go/v7/arrow/memory" - "github.com/google/go-cmp/cmp" - "github.com/influxdata/flux" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/execute/executetest" - "github.com/influxdata/flux/execute/table" - "github.com/influxdata/flux/execute/table/static" - "github.com/influxdata/flux/memory" - "github.com/influxdata/flux/plan" - "github.com/influxdata/flux/values" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/internal/shard" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/models" - datagen "github.com/influxdata/influxdb/v2/pkg/data/gen" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/storage" - storageflux "github.com/influxdata/influxdb/v2/storage/flux" - storageproto "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/influxdata/influxdb/v2/v1/services/meta" - storagev1 "github.com/influxdata/influxdb/v2/v1/services/storage" -) - -type SetupFunc func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) - -type StorageReader struct { - Org platform.ID - Bucket platform.ID - Bounds execute.Bounds - Close func() - query.StorageReader -} - -func NewStorageReader(tb testing.TB, setupFn SetupFunc) *StorageReader { - rootDir := tb.TempDir() - - var closers []io.Closer - close := func() { - for _, c := range closers { - if err := c.Close(); err != nil { - tb.Errorf("close error: %s", err) - } - } - } - - // Create an underlying kv store. We use the inmem version to speed - // up test runs. - kvStore := inmem.NewKVStore() - - // Manually create the meta bucket. - // This seems to be the only bucket used for the read path. - // If, in the future, there are any "bucket not found" errors due to - // a change in the storage code, then this section of code will need - // to be changed to correctly configure the kv store. - // We do this abbreviated setup instead of a full migration because - // the full migration is both unnecessary and long. - if err := kvStore.CreateBucket(context.Background(), meta.BucketName); err != nil { - close() - tb.Fatalf("failed to create meta bucket: %s", err) - } - - // Use this kv store for the meta client. The storage reader - // uses the meta client for shard information. - metaClient := meta.NewClient(meta.NewConfig(), kvStore) - if err := metaClient.Open(); err != nil { - close() - tb.Fatalf("failed to open meta client: %s", err) - } - closers = append(closers, metaClient) - - // Create the organization and the bucket. - idgen := mock.NewMockIDGenerator() - org, bucket := idgen.ID(), idgen.ID() - - // Run the setup function to create the series generator. - sg, tr := setupFn(org, bucket) - - // Construct a database with a retention policy. - // This would normally be done by the storage bucket service, but the storage - // bucket service requires us to already have a storage engine and a fully migrated - // kv store. Since we don't have either of those, we add the metadata necessary - // for the storage reader to function. - // We construct the database with a retention policy that is a year long - // so that we do not have to generate more than one shard which can get complicated. - rp := &meta.RetentionPolicySpec{ - Name: meta.DefaultRetentionPolicyName, - ShardGroupDuration: 24 * 7 * time.Hour * 52, - } - if _, err := metaClient.CreateDatabaseWithRetentionPolicy(bucket.String(), rp); err != nil { - close() - tb.Fatalf("failed to create database: %s", err) - } - - // Create the shard group for the data. There should only be one and - // it should include the entire time range. - sgi, err := metaClient.CreateShardGroup(bucket.String(), rp.Name, tr.Start) - if err != nil { - close() - tb.Fatalf("failed to create shard group: %s", err) - } else if sgi.StartTime.After(tr.Start) || sgi.EndTime.Before(tr.End) { - close() - tb.Fatal("shard data range exceeded the shard group range; please use a range for data that is within the same year") - } - - // Open the series file and prepare the directory for the shard writer. - enginePath := filepath.Join(rootDir, "engine") - dbPath := filepath.Join(enginePath, "data", bucket.String()) - if err := os.MkdirAll(dbPath, 0700); err != nil { - close() - tb.Fatalf("failed to create data directory: %s", err) - } - - sfile := tsdb.NewSeriesFile(filepath.Join(dbPath, tsdb.SeriesFileDirectory)) - if err := sfile.Open(); err != nil { - close() - tb.Fatalf("failed to open series file: %s", err) - } - // Ensure the series file is closed in case of failure. - defer sfile.Close() - // Disable compactions to speed up the shard writer. - sfile.DisableCompactions() - - // Write the shard data. - shardPath := filepath.Join(dbPath, rp.Name) - if err := os.MkdirAll(filepath.Join(shardPath, strconv.FormatUint(sgi.Shards[0].ID, 10)), 0700); err != nil { - close() - tb.Fatalf("failed to create shard directory: %s", err) - } - if err := writeShard(sfile, sg, sgi.Shards[0].ID, shardPath); err != nil { - close() - tb.Fatalf("failed to write shard: %s", err) - } - - // Run the partition compactor on the series file. - for i, p := range sfile.Partitions() { - c := tsdb.NewSeriesPartitionCompactor() - if err := c.Compact(p); err != nil { - close() - tb.Fatalf("failed to compact series file %d: %s", i, err) - } - } - - // Close the series file as it will be opened by the storage engine. - if err := sfile.Close(); err != nil { - close() - tb.Fatalf("failed to close series file: %s", err) - } - - // Now load the engine. - engine := storage.NewEngine( - enginePath, - storage.NewConfig(), - storage.WithMetaClient(metaClient), - ) - if err := engine.Open(context.Background()); err != nil { - close() - tb.Fatalf("failed to open storage engine: %s", err) - } - closers = append(closers, engine) - - store := storagev1.NewStore(engine.TSDBStore(), engine.MetaClient()) - reader := storageflux.NewReader(store) - return &StorageReader{ - Org: org, - Bucket: bucket, - Bounds: execute.Bounds{ - Start: values.ConvertTime(tr.Start), - Stop: values.ConvertTime(tr.End), - }, - Close: close, - StorageReader: reader, - } -} - -func (r *StorageReader) ReadWindowAggregate(ctx context.Context, spec query.ReadWindowAggregateSpec, alloc memory.Allocator) (query.TableIterator, error) { - return r.StorageReader.ReadWindowAggregate(ctx, spec, alloc) -} - -func TestStorageReader_ReadFilter(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - spec := Spec(org, bucket, - MeasurementSpec("m0", - FloatArrayValuesSequence("f0", 10*time.Second, []float64{1.0, 2.0, 3.0}), - TagValuesSequence("t0", "a-%s", 0, 3), - ), - ) - tr := TimeRange("2019-11-25T00:00:00Z", "2019-11-25T00:00:30Z") - return datagen.NewSeriesGeneratorFromSpec(spec, tr), tr - }) - defer reader.Close() - - mem := arrowmem.NewCheckedAllocator(arrowmem.DefaultAllocator) - defer mem.AssertSize(t, 0) - - alloc := &memory.ResourceAllocator{ - Allocator: mem, - } - ti, err := reader.ReadFilter(context.Background(), query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, alloc) - if err != nil { - t.Fatal(err) - } - - makeTable := func(t0 string) *executetest.Table { - start, stop := reader.Bounds.Start, reader.Bounds.Stop - return &executetest.Table{ - KeyCols: []string{"_start", "_stop", "_field", "_measurement", "t0"}, - ColMeta: []flux.ColMeta{ - {Label: "_start", Type: flux.TTime}, - {Label: "_stop", Type: flux.TTime}, - {Label: "_time", Type: flux.TTime}, - {Label: "_value", Type: flux.TFloat}, - {Label: "_field", Type: flux.TString}, - {Label: "_measurement", Type: flux.TString}, - {Label: "t0", Type: flux.TString}, - }, - Data: [][]interface{}{ - {start, stop, Time("2019-11-25T00:00:00Z"), 1.0, "f0", "m0", t0}, - {start, stop, Time("2019-11-25T00:00:10Z"), 2.0, "f0", "m0", t0}, - {start, stop, Time("2019-11-25T00:00:20Z"), 3.0, "f0", "m0", t0}, - }, - } - } - - want := []*executetest.Table{ - makeTable("a-0"), - makeTable("a-1"), - makeTable("a-2"), - } - executetest.NormalizeTables(want) - sort.Sort(executetest.SortedTables(want)) - - var got []*executetest.Table - if err := ti.Do(func(table flux.Table) error { - t, err := executetest.ConvertTable(table) - if err != nil { - return err - } - got = append(got, t) - return nil - }); err != nil { - t.Fatal(err) - } - executetest.NormalizeTables(got) - sort.Sort(executetest.SortedTables(got)) - - // compare these two - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } -} - -func TestStorageReader_Table(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - spec := Spec(org, bucket, - MeasurementSpec("m0", - FloatArrayValuesSequence("f0", 10*time.Second, []float64{1.0, 2.0, 3.0}), - TagValuesSequence("t0", "a-%s", 0, 3), - ), - ) - tr := TimeRange("2019-11-25T00:00:00Z", "2019-11-25T00:00:30Z") - return datagen.NewSeriesGeneratorFromSpec(spec, tr), tr - }) - defer reader.Close() - - for _, tc := range []struct { - name string - newFn func(ctx context.Context, alloc memory.Allocator) flux.TableIterator - }{ - { - name: "ReadFilter", - newFn: func(ctx context.Context, alloc memory.Allocator) flux.TableIterator { - ti, err := reader.ReadFilter(context.Background(), query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, alloc) - if err != nil { - t.Fatal(err) - } - return ti - }, - }, - } { - t.Run(tc.name, func(t *testing.T) { - executetest.RunTableTests(t, executetest.TableTest{ - NewFn: tc.newFn, - IsDone: func(table flux.Table) bool { - return table.(interface { - IsDone() bool - }).IsDone() - }, - }) - }) - } -} - -func TestStorageReader_ReadWindowAggregate(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - spec := Spec(org, bucket, - MeasurementSpec("m0", - FloatArrayValuesSequence("f0", 10*time.Second, []float64{1.0, 2.0, 3.0, 4.0}), - TagValuesSequence("t0", "a-%s", 0, 3), - ), - ) - tr := TimeRange("2019-11-25T00:00:00Z", "2019-11-25T00:02:00Z") - return datagen.NewSeriesGeneratorFromSpec(spec, tr), tr - }) - defer reader.Close() - - for _, tt := range []struct { - aggregate plan.ProcedureKind - want flux.TableIterator - }{ - { - aggregate: storageflux.CountKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:00:30Z"), - static.Ints("_value", 3), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:30Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.Ints("_value", 3), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:01:00Z"), - static.TimeKey("_stop", "2019-11-25T00:01:30Z"), - static.Ints("_value", 3), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:01:30Z"), - static.TimeKey("_stop", "2019-11-25T00:02:00Z"), - static.Ints("_value", 3), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MinKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:00:30Z"), - static.Times("_time", "2019-11-25T00:00:00Z"), - static.Floats("_value", 1), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:30Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.Times("_time", "2019-11-25T00:00:40Z"), - static.Floats("_value", 1), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:01:00Z"), - static.TimeKey("_stop", "2019-11-25T00:01:30Z"), - static.Times("_time", "2019-11-25T00:01:20Z"), - static.Floats("_value", 1), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:01:30Z"), - static.TimeKey("_stop", "2019-11-25T00:02:00Z"), - static.Times("_time", "2019-11-25T00:01:30Z"), - static.Floats("_value", 2), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MaxKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:00:30Z"), - static.Times("_time", "2019-11-25T00:00:20Z"), - static.Floats("_value", 3), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:30Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.Times("_time", "2019-11-25T00:00:30Z"), - static.Floats("_value", 4), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:01:00Z"), - static.TimeKey("_stop", "2019-11-25T00:01:30Z"), - static.Times("_time", "2019-11-25T00:01:10Z"), - static.Floats("_value", 4), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:01:30Z"), - static.TimeKey("_stop", "2019-11-25T00:02:00Z"), - static.Times("_time", "2019-11-25T00:01:50Z"), - static.Floats("_value", 4), - }, - }, - }, - }, - }, - } { - t.Run(string(tt.aggregate), func(t *testing.T) { - mem := arrowmem.NewCheckedAllocator(arrowmem.DefaultAllocator) - defer mem.AssertSize(t, 0) - - alloc := &memory.ResourceAllocator{ - Allocator: mem, - } - got, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - Window: execute.Window{ - Every: flux.ConvertDuration(30 * time.Second), - Period: flux.ConvertDuration(30 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - tt.aggregate, - }, - }, alloc) - if err != nil { - t.Fatal(err) - } - - if diff := table.Diff(tt.want, got); diff != "" { - t.Fatalf("unexpected output -want/+got:\n%s", diff) - } - }) - } -} - -func TestStorageReader_ReadWindowAggregate_ByStopTime(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - spec := Spec(org, bucket, - MeasurementSpec("m0", - FloatArrayValuesSequence("f0", 10*time.Second, []float64{1.0, 2.0, 3.0, 4.0}), - TagValuesSequence("t0", "a-%s", 0, 3), - ), - ) - tr := TimeRange("2019-11-25T00:00:00Z", "2019-11-25T00:02:00Z") - return datagen.NewSeriesGeneratorFromSpec(spec, tr), tr - }) - defer reader.Close() - - for _, tt := range []struct { - aggregate plan.ProcedureKind - want flux.TableIterator - }{ - { - aggregate: storageflux.CountKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:02:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:30Z", 30, 60, 90), - static.Ints("_value", 3, 3, 3, 3), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MinKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:02:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:30Z", 30, 60, 90), - static.Floats("_value", 1, 1, 1, 2), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MaxKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:02:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:30Z", 30, 60, 90), - static.Floats("_value", 3, 4, 4, 4), - }, - }, - }, - }, - }, - } { - got, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - TimeColumn: execute.DefaultStopColLabel, - Window: execute.Window{ - Every: flux.ConvertDuration(30 * time.Second), - Period: flux.ConvertDuration(30 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - tt.aggregate, - }, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - if diff := table.Diff(tt.want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } - } -} - -func TestStorageReader_ReadWindowAggregate_ByStartTime(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - spec := Spec(org, bucket, - MeasurementSpec("m0", - FloatArrayValuesSequence("f0", 10*time.Second, []float64{1.0, 2.0, 3.0, 4.0}), - TagValuesSequence("t0", "a-%s", 0, 3), - ), - ) - tr := TimeRange("2019-11-25T00:00:00Z", "2019-11-25T00:02:00Z") - return datagen.NewSeriesGeneratorFromSpec(spec, tr), tr - }) - defer reader.Close() - - for _, tt := range []struct { - aggregate plan.ProcedureKind - want flux.TableIterator - }{ - { - aggregate: storageflux.CountKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:02:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:00Z", 30, 60, 90), - static.Ints("_value", 3, 3, 3, 3), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MinKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:02:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:00Z", 30, 60, 90), - static.Floats("_value", 1, 1, 1, 2), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MaxKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:02:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:00Z", 30, 60, 90), - static.Floats("_value", 3, 4, 4, 4), - }, - }, - }, - }, - }, - } { - t.Run(string(tt.aggregate), func(t *testing.T) { - got, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - TimeColumn: execute.DefaultStartColLabel, - Window: execute.Window{ - Every: flux.ConvertDuration(30 * time.Second), - Period: flux.ConvertDuration(30 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - tt.aggregate, - }, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - if diff := table.Diff(tt.want, got); diff != "" { - t.Fatalf("unexpected output -want/+got:\n%s", diff) - } - }) - } -} - -func TestStorageReader_ReadWindowAggregate_CreateEmpty(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - spec := Spec(org, bucket, - MeasurementSpec("m0", - FloatArrayValuesSequence("f0", 15*time.Second, []float64{1.0, 2.0, 3.0, 4.0}), - TagValuesSequence("t0", "a-%s", 0, 3), - ), - ) - tr := TimeRange("2019-11-25T00:00:00Z", "2019-11-25T00:01:00Z") - return datagen.NewSeriesGeneratorFromSpec(spec, tr), tr - }) - defer reader.Close() - - for _, tt := range []struct { - aggregate plan.ProcedureKind - want flux.TableIterator - }{ - { - aggregate: storageflux.CountKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:00:10Z"), - static.Ints("_value", 1), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:10Z"), - static.TimeKey("_stop", "2019-11-25T00:00:20Z"), - static.Ints("_value", 1), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:20Z"), - static.TimeKey("_stop", "2019-11-25T00:00:30Z"), - static.Ints("_value", 0), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:30Z"), - static.TimeKey("_stop", "2019-11-25T00:00:40Z"), - static.Ints("_value", 1), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:40Z"), - static.TimeKey("_stop", "2019-11-25T00:00:50Z"), - static.Ints("_value", 1), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:50Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.Ints("_value", 0), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MinKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:00:10Z"), - static.Times("_time", "2019-11-25T00:00:00Z"), - static.Floats("_value", 1), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:10Z"), - static.TimeKey("_stop", "2019-11-25T00:00:20Z"), - static.Times("_time", "2019-11-25T00:00:15Z"), - static.Floats("_value", 2), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:20Z"), - static.TimeKey("_stop", "2019-11-25T00:00:30Z"), - static.Times("_time"), - static.Floats("_value"), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:30Z"), - static.TimeKey("_stop", "2019-11-25T00:00:40Z"), - static.Times("_time", "2019-11-25T00:00:30Z"), - static.Floats("_value", 3), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:40Z"), - static.TimeKey("_stop", "2019-11-25T00:00:50Z"), - static.Times("_time", "2019-11-25T00:00:45Z"), - static.Floats("_value", 4), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:50Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.Times("_time"), - static.Floats("_value"), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MaxKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:00:10Z"), - static.Times("_time", "2019-11-25T00:00:00Z"), - static.Floats("_value", 1), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:10Z"), - static.TimeKey("_stop", "2019-11-25T00:00:20Z"), - static.Times("_time", "2019-11-25T00:00:15Z"), - static.Floats("_value", 2), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:20Z"), - static.TimeKey("_stop", "2019-11-25T00:00:30Z"), - static.Times("_time"), - static.Floats("_value"), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:30Z"), - static.TimeKey("_stop", "2019-11-25T00:00:40Z"), - static.Times("_time", "2019-11-25T00:00:30Z"), - static.Floats("_value", 3), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:40Z"), - static.TimeKey("_stop", "2019-11-25T00:00:50Z"), - static.Times("_time", "2019-11-25T00:00:45Z"), - static.Floats("_value", 4), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:50Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.Times("_time"), - static.Floats("_value"), - }, - }, - }, - }, - }, - } { - t.Run(string(tt.aggregate), func(t *testing.T) { - got, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - Window: execute.Window{ - Every: flux.ConvertDuration(10 * time.Second), - Period: flux.ConvertDuration(10 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - tt.aggregate, - }, - CreateEmpty: true, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - if diff := table.Diff(tt.want, got); diff != "" { - t.Fatalf("unexpected output -want/+got:\n%s", diff) - } - }) - } -} - -func TestStorageReader_ReadWindowAggregate_CreateEmptyByStopTime(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - spec := Spec(org, bucket, - MeasurementSpec("m0", - FloatArrayValuesSequence("f0", 15*time.Second, []float64{1.0, 2.0, 3.0, 4.0}), - TagValuesSequence("t0", "a-%s", 0, 3), - ), - ) - tr := TimeRange("2019-11-25T00:00:00Z", "2019-11-25T00:01:00Z") - return datagen.NewSeriesGeneratorFromSpec(spec, tr), tr - }) - defer reader.Close() - - for _, tt := range []struct { - aggregate plan.ProcedureKind - want flux.TableIterator - }{ - { - aggregate: storageflux.CountKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:10Z", 10, 20, 30, 40, 50), - static.Ints("_value", 1, 1, 0, 1, 1, 0), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MinKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:10Z", 10, 30, 40), - static.Floats("_value", 1, 2, 3, 4), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MaxKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:10Z", 10, 30, 40), - static.Floats("_value", 1, 2, 3, 4), - }, - }, - }, - }, - }, - } { - t.Run(string(tt.aggregate), func(t *testing.T) { - got, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - TimeColumn: execute.DefaultStopColLabel, - Window: execute.Window{ - Every: flux.ConvertDuration(10 * time.Second), - Period: flux.ConvertDuration(10 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - tt.aggregate, - }, - CreateEmpty: true, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - if diff := table.Diff(tt.want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } - }) - } -} - -func TestStorageReader_ReadWindowAggregate_CreateEmptyByStartTime(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - spec := Spec(org, bucket, - MeasurementSpec("m0", - FloatArrayValuesSequence("f0", 15*time.Second, []float64{1.0, 2.0, 3.0, 4.0}), - TagValuesSequence("t0", "a-%s", 0, 3), - ), - ) - tr := TimeRange("2019-11-25T00:00:00Z", "2019-11-25T00:01:00Z") - return datagen.NewSeriesGeneratorFromSpec(spec, tr), tr - }) - defer reader.Close() - - for _, tt := range []struct { - aggregate plan.ProcedureKind - want flux.TableIterator - }{ - { - aggregate: storageflux.CountKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:00Z", 10, 20, 30, 40, 50), - static.Ints("_value", 1, 1, 0, 1, 1, 0), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MinKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:00Z", 10, 30, 40), - static.Floats("_value", 1, 2, 3, 4), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MaxKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:00Z", 10, 30, 40), - static.Floats("_value", 1, 2, 3, 4), - }, - }, - }, - }, - }, - } { - t.Run(string(tt.aggregate), func(t *testing.T) { - got, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - TimeColumn: execute.DefaultStartColLabel, - Window: execute.Window{ - Every: flux.ConvertDuration(10 * time.Second), - Period: flux.ConvertDuration(10 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - tt.aggregate, - }, - CreateEmpty: true, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - if diff := table.Diff(tt.want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } - }) - } -} - -func TestStorageReader_ReadWindowAggregate_CreateEmptyAggregateByStopTime(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - spec := Spec(org, bucket, - MeasurementSpec("m0", - FloatArrayValuesSequence("f0", 15*time.Second, []float64{1.0, 2.0, 3.0, 4.0}), - TagValuesSequence("t0", "a-%s", 0, 3), - ), - ) - tr := TimeRange("2019-11-25T00:00:00Z", "2019-11-25T00:01:00Z") - return datagen.NewSeriesGeneratorFromSpec(spec, tr), tr - }) - defer reader.Close() - - for _, tt := range []struct { - aggregate plan.ProcedureKind - want flux.TableIterator - }{ - { - aggregate: storageflux.CountKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:10Z", 10, 20, 30, 40, 50), - static.Ints("_value", 1, 1, 0, 1, 1, 0), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MinKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:10Z", 10, 20, 30, 40, 50), - static.Floats("_value", 1, 2, nil, 3, 4, nil), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MaxKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:10Z", 10, 20, 30, 40, 50), - static.Floats("_value", 1, 2, nil, 3, 4, nil), - }, - }, - }, - }, - }, - } { - t.Run(string(tt.aggregate), func(t *testing.T) { - got, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - TimeColumn: execute.DefaultStopColLabel, - Window: execute.Window{ - Every: flux.ConvertDuration(10 * time.Second), - Period: flux.ConvertDuration(10 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - tt.aggregate, - }, - CreateEmpty: true, - ForceAggregate: true, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - if diff := table.Diff(tt.want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } - }) - } -} - -func TestStorageReader_ReadWindowAggregate_CreateEmptyAggregateByStartTime(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - spec := Spec(org, bucket, - MeasurementSpec("m0", - FloatArrayValuesSequence("f0", 15*time.Second, []float64{1.0, 2.0, 3.0, 4.0}), - TagValuesSequence("t0", "a-%s", 0, 3), - ), - ) - tr := TimeRange("2019-11-25T00:00:00Z", "2019-11-25T00:01:00Z") - return datagen.NewSeriesGeneratorFromSpec(spec, tr), tr - }) - defer reader.Close() - - for _, tt := range []struct { - aggregate plan.ProcedureKind - want flux.TableIterator - }{ - { - aggregate: storageflux.CountKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:00Z", 10, 20, 30, 40, 50), - static.Ints("_value", 1, 1, 0, 1, 1, 0), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MinKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:00Z", 10, 20, 30, 40, 50), - static.Floats("_value", 1, 2, nil, 3, 4, nil), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MaxKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:00Z", 10, 20, 30, 40, 50), - static.Floats("_value", 1, 2, nil, 3, 4, nil), - }, - }, - }, - }, - }, - } { - t.Run(string(tt.aggregate), func(t *testing.T) { - got, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - TimeColumn: execute.DefaultStartColLabel, - Window: execute.Window{ - Every: flux.ConvertDuration(10 * time.Second), - Period: flux.ConvertDuration(10 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - tt.aggregate, - }, - CreateEmpty: true, - ForceAggregate: true, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - if diff := table.Diff(tt.want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } - }) - } -} - -func TestStorageReader_ReadWindowAggregate_TruncatedBounds(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - spec := Spec(org, bucket, - MeasurementSpec("m0", - FloatArrayValuesSequence("f0", 5*time.Second, []float64{1.0, 2.0, 3.0, 4.0}), - TagValuesSequence("t0", "a-%s", 0, 3), - ), - ) - tr := TimeRange("2019-11-25T00:00:00Z", "2019-11-25T00:01:00Z") - return datagen.NewSeriesGeneratorFromSpec(spec, tr), tr - }) - defer reader.Close() - - for _, tt := range []struct { - aggregate plan.ProcedureKind - want flux.TableIterator - }{ - { - aggregate: storageflux.CountKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:05Z"), - static.TimeKey("_stop", "2019-11-25T00:00:10Z"), - static.Ints("_value", 1), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:10Z"), - static.TimeKey("_stop", "2019-11-25T00:00:20Z"), - static.Ints("_value", 2), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:20Z"), - static.TimeKey("_stop", "2019-11-25T00:00:25Z"), - static.Ints("_value", 1), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MinKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:05Z"), - static.TimeKey("_stop", "2019-11-25T00:00:10Z"), - static.Times("_time", "2019-11-25T00:00:05Z"), - static.Floats("_value", 2), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:10Z"), - static.TimeKey("_stop", "2019-11-25T00:00:20Z"), - static.Times("_time", "2019-11-25T00:00:10Z"), - static.Floats("_value", 3), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:20Z"), - static.TimeKey("_stop", "2019-11-25T00:00:25Z"), - static.Times("_time", "2019-11-25T00:00:20Z"), - static.Floats("_value", 1), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MaxKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:05Z"), - static.TimeKey("_stop", "2019-11-25T00:00:10Z"), - static.Times("_time", "2019-11-25T00:00:05Z"), - static.Floats("_value", 2), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:10Z"), - static.TimeKey("_stop", "2019-11-25T00:00:20Z"), - static.Times("_time", "2019-11-25T00:00:15Z"), - static.Floats("_value", 4), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:20Z"), - static.TimeKey("_stop", "2019-11-25T00:00:25Z"), - static.Times("_time", "2019-11-25T00:00:20Z"), - static.Floats("_value", 1), - }, - }, - }, - }, - }, - } { - t.Run(string(tt.aggregate), func(t *testing.T) { - got, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: execute.Bounds{ - Start: values.ConvertTime(mustParseTime("2019-11-25T00:00:05Z")), - Stop: values.ConvertTime(mustParseTime("2019-11-25T00:00:25Z")), - }, - }, - Window: execute.Window{ - Every: flux.ConvertDuration(10 * time.Second), - Period: flux.ConvertDuration(10 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - tt.aggregate, - }, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - if diff := table.Diff(tt.want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } - }) - } -} - -func TestStorageReader_ReadWindowAggregate_TruncatedBoundsCreateEmpty(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - spec := Spec(org, bucket, - MeasurementSpec("m0", - FloatArrayValuesSequence("f0", 15*time.Second, []float64{1.0, 2.0, 3.0, 4.0}), - TagValuesSequence("t0", "a-%s", 0, 3), - ), - ) - tr := TimeRange("2019-11-25T00:00:00Z", "2019-11-25T00:01:00Z") - return datagen.NewSeriesGeneratorFromSpec(spec, tr), tr - }) - defer reader.Close() - - for _, tt := range []struct { - aggregate plan.ProcedureKind - want flux.TableIterator - }{ - { - aggregate: storageflux.CountKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:05Z"), - static.TimeKey("_stop", "2019-11-25T00:00:10Z"), - static.Ints("_value", 0), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:10Z"), - static.TimeKey("_stop", "2019-11-25T00:00:20Z"), - static.Ints("_value", 1), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:20Z"), - static.TimeKey("_stop", "2019-11-25T00:00:25Z"), - static.Ints("_value", 0), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MinKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:05Z"), - static.TimeKey("_stop", "2019-11-25T00:00:10Z"), - static.Times("_time"), - static.Floats("_value"), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:10Z"), - static.TimeKey("_stop", "2019-11-25T00:00:20Z"), - static.Times("_time", "2019-11-25T00:00:15Z"), - static.Floats("_value", 2), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:20Z"), - static.TimeKey("_stop", "2019-11-25T00:00:25Z"), - static.Times("_time"), - static.Floats("_value"), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MaxKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:05Z"), - static.TimeKey("_stop", "2019-11-25T00:00:10Z"), - static.Times("_time"), - static.Floats("_value"), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:10Z"), - static.TimeKey("_stop", "2019-11-25T00:00:20Z"), - static.Times("_time", "2019-11-25T00:00:15Z"), - static.Floats("_value", 2), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:20Z"), - static.TimeKey("_stop", "2019-11-25T00:00:25Z"), - static.Times("_time"), - static.Floats("_value"), - }, - }, - }, - }, - }, - } { - t.Run(string(tt.aggregate), func(t *testing.T) { - got, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: execute.Bounds{ - Start: values.ConvertTime(mustParseTime("2019-11-25T00:00:05Z")), - Stop: values.ConvertTime(mustParseTime("2019-11-25T00:00:25Z")), - }, - }, - Window: execute.Window{ - Every: flux.ConvertDuration(10 * time.Second), - Period: flux.ConvertDuration(10 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - tt.aggregate, - }, - CreateEmpty: true, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - if diff := table.Diff(tt.want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } - }) - } -} - -func TestStorageReader_ReadWindowAggregate_Mean(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - tagsSpec := &datagen.TagsSpec{ - Tags: []*datagen.TagValuesSpec{ - { - TagKey: "t0", - Values: func() datagen.CountableSequence { - return datagen.NewCounterByteSequence("a%s", 0, 1) - }, - }, - }, - } - spec := datagen.Spec{ - Measurements: []datagen.MeasurementSpec{ - { - Name: "m0", - TagsSpec: tagsSpec, - FieldValuesSpec: &datagen.FieldValuesSpec{ - Name: "f0", - TimeSequenceSpec: datagen.TimeSequenceSpec{ - Count: math.MaxInt32, - Delta: 5 * time.Second, - }, - DataType: models.Integer, - Values: func(spec datagen.TimeSequenceSpec) datagen.TimeValuesSequence { - return datagen.NewTimeIntegerValuesSequence( - spec.Count, - datagen.NewTimestampSequenceFromSpec(spec), - datagen.NewIntegerArrayValuesSequence([]int64{1, 2, 3, 4}), - ) - }, - }, - }, - }, - } - tr := datagen.TimeRange{ - Start: mustParseTime("2019-11-25T00:00:00Z"), - End: mustParseTime("2019-11-25T00:01:00Z"), - } - return datagen.NewSeriesGeneratorFromSpec(&spec, tr), tr - }) - defer reader.Close() - - t.Run("unwindowed mean", func(t *testing.T) { - ti, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - Window: execute.Window{ - Every: flux.ConvertDuration(math.MaxInt64 * time.Nanosecond), - Period: flux.ConvertDuration(math.MaxInt64 * time.Nanosecond), - }, - Aggregates: []plan.ProcedureKind{ - storageflux.MeanKind, - }, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - want := static.Table{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.StringKey("t0", "a0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.Floats("_value", 2.5), - } - if diff := table.Diff(want, ti); diff != "" { - t.Fatalf("table iterators do not match; -want/+got:\n%s", diff) - } - }) - - t.Run("windowed mean", func(t *testing.T) { - ti, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - Window: execute.Window{ - Every: flux.ConvertDuration(10 * time.Second), - Period: flux.ConvertDuration(10 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - storageflux.MeanKind, - }, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - want := static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.StringKey("t0", "a0"), - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:00:10Z"), - static.Floats("_value", 1.5), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:10Z"), - static.TimeKey("_stop", "2019-11-25T00:00:20Z"), - static.Floats("_value", 3.5), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:20Z"), - static.TimeKey("_stop", "2019-11-25T00:00:30Z"), - static.Floats("_value", 1.5), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:30Z"), - static.TimeKey("_stop", "2019-11-25T00:00:40Z"), - static.Floats("_value", 3.5), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:40Z"), - static.TimeKey("_stop", "2019-11-25T00:00:50Z"), - static.Floats("_value", 1.5), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:50Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.Floats("_value", 3.5), - }, - } - if diff := table.Diff(want, ti); diff != "" { - t.Fatalf("table iterators do not match; -want/+got:\n%s", diff) - } - }) - - t.Run("windowed mean with offset", func(t *testing.T) { - ti, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - Window: execute.Window{ - Every: flux.ConvertDuration(10 * time.Second), - Period: flux.ConvertDuration(10 * time.Second), - Offset: flux.ConvertDuration(2 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - storageflux.MeanKind, - }, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - want := static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.StringKey("t0", "a0"), - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:00:02Z"), - static.Floats("_value", 1.0), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:02Z"), - static.TimeKey("_stop", "2019-11-25T00:00:12Z"), - static.Floats("_value", 2.5), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:12Z"), - static.TimeKey("_stop", "2019-11-25T00:00:22Z"), - static.Floats("_value", 2.5), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:22Z"), - static.TimeKey("_stop", "2019-11-25T00:00:32Z"), - static.Floats("_value", 2.5), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:32Z"), - static.TimeKey("_stop", "2019-11-25T00:00:42Z"), - static.Floats("_value", 2.5), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:42Z"), - static.TimeKey("_stop", "2019-11-25T00:00:52Z"), - static.Floats("_value", 2.5), - }, - static.Table{ - static.TimeKey("_start", "2019-11-25T00:00:52Z"), - static.TimeKey("_stop", "2019-11-25T00:01:00Z"), - static.Floats("_value", 4), - }, - } - if diff := table.Diff(want, ti); diff != "" { - t.Fatalf("table iterators do not match; -want/+got:\n%s", diff) - } - }) -} - -func TestStorageReader_ReadWindowFirst(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - tagsSpec := &datagen.TagsSpec{ - Tags: []*datagen.TagValuesSpec{ - { - TagKey: "t0", - Values: func() datagen.CountableSequence { - return datagen.NewCounterByteSequence("a%s", 0, 1) - }, - }, - }, - } - spec := datagen.Spec{ - Measurements: []datagen.MeasurementSpec{ - { - Name: "m0", - TagsSpec: tagsSpec, - FieldValuesSpec: &datagen.FieldValuesSpec{ - Name: "f0", - TimeSequenceSpec: datagen.TimeSequenceSpec{ - Count: math.MaxInt32, - Delta: 5 * time.Second, - }, - DataType: models.Integer, - Values: func(spec datagen.TimeSequenceSpec) datagen.TimeValuesSequence { - return datagen.NewTimeIntegerValuesSequence( - spec.Count, - datagen.NewTimestampSequenceFromSpec(spec), - datagen.NewIntegerArrayValuesSequence([]int64{1, 2, 3, 4}), - ) - }, - }, - }, - }, - } - tr := datagen.TimeRange{ - Start: mustParseTime("2019-11-25T00:00:00Z"), - End: mustParseTime("2019-11-25T00:01:00Z"), - } - return datagen.NewSeriesGeneratorFromSpec(&spec, tr), tr - }) - defer reader.Close() - - ti, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - Window: execute.Window{ - Every: flux.ConvertDuration(10 * time.Second), - Period: flux.ConvertDuration(10 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - storageflux.FirstKind, - }, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - makeWindowTable := func(start, stop, time values.Time, v int64) *executetest.Table { - return &executetest.Table{ - KeyCols: []string{"_start", "_stop", "_field", "_measurement", "t0"}, - ColMeta: []flux.ColMeta{ - {Label: "_start", Type: flux.TTime}, - {Label: "_stop", Type: flux.TTime}, - {Label: "_time", Type: flux.TTime}, - {Label: "_value", Type: flux.TInt}, - {Label: "_field", Type: flux.TString}, - {Label: "_measurement", Type: flux.TString}, - {Label: "t0", Type: flux.TString}, - }, - Data: [][]interface{}{ - {start, stop, time, v, "f0", "m0", "a0"}, - }, - } - } - want := []*executetest.Table{ - makeWindowTable(Time("2019-11-25T00:00:00Z"), Time("2019-11-25T00:00:10Z"), Time("2019-11-25T00:00:00Z"), 1), - makeWindowTable(Time("2019-11-25T00:00:10Z"), Time("2019-11-25T00:00:20Z"), Time("2019-11-25T00:00:10Z"), 3), - makeWindowTable(Time("2019-11-25T00:00:20Z"), Time("2019-11-25T00:00:30Z"), Time("2019-11-25T00:00:20Z"), 1), - makeWindowTable(Time("2019-11-25T00:00:30Z"), Time("2019-11-25T00:00:40Z"), Time("2019-11-25T00:00:30Z"), 3), - makeWindowTable(Time("2019-11-25T00:00:40Z"), Time("2019-11-25T00:00:50Z"), Time("2019-11-25T00:00:40Z"), 1), - makeWindowTable(Time("2019-11-25T00:00:50Z"), Time("2019-11-25T00:01:00Z"), Time("2019-11-25T00:00:50Z"), 3), - } - - executetest.NormalizeTables(want) - sort.Sort(executetest.SortedTables(want)) - - var got []*executetest.Table - if err := ti.Do(func(table flux.Table) error { - t, err := executetest.ConvertTable(table) - if err != nil { - return err - } - got = append(got, t) - return nil - }); err != nil { - t.Fatal(err) - } - executetest.NormalizeTables(got) - sort.Sort(executetest.SortedTables(got)) - - // compare these two - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } -} - -func TestStorageReader_WindowFirstOffset(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - tagsSpec := &datagen.TagsSpec{ - Tags: []*datagen.TagValuesSpec{ - { - TagKey: "t0", - Values: func() datagen.CountableSequence { - return datagen.NewCounterByteSequence("a%s", 0, 1) - }, - }, - }, - } - spec := datagen.Spec{ - Measurements: []datagen.MeasurementSpec{ - { - Name: "m0", - TagsSpec: tagsSpec, - FieldValuesSpec: &datagen.FieldValuesSpec{ - Name: "f0", - TimeSequenceSpec: datagen.TimeSequenceSpec{ - Count: math.MaxInt32, - Delta: 5 * time.Second, - }, - DataType: models.Integer, - Values: func(spec datagen.TimeSequenceSpec) datagen.TimeValuesSequence { - return datagen.NewTimeIntegerValuesSequence( - spec.Count, - datagen.NewTimestampSequenceFromSpec(spec), - datagen.NewIntegerArrayValuesSequence([]int64{1, 2, 3, 4}), - ) - }, - }, - }, - }, - } - tr := datagen.TimeRange{ - Start: mustParseTime("2019-11-25T00:00:00Z"), - End: mustParseTime("2019-11-25T00:01:00Z"), - } - return datagen.NewSeriesGeneratorFromSpec(&spec, tr), tr - }) - defer reader.Close() - - ti, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - Window: execute.Window{ - Every: flux.ConvertDuration(10 * time.Second), - Period: flux.ConvertDuration(10 * time.Second), - Offset: flux.ConvertDuration(5 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - storageflux.FirstKind, - }, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - makeWindowTable := func(start, stop, time values.Time, v int64) *executetest.Table { - return &executetest.Table{ - KeyCols: []string{"_start", "_stop", "_field", "_measurement", "t0"}, - ColMeta: []flux.ColMeta{ - {Label: "_start", Type: flux.TTime}, - {Label: "_stop", Type: flux.TTime}, - {Label: "_time", Type: flux.TTime}, - {Label: "_value", Type: flux.TInt}, - {Label: "_field", Type: flux.TString}, - {Label: "_measurement", Type: flux.TString}, - {Label: "t0", Type: flux.TString}, - }, - Data: [][]interface{}{ - {start, stop, time, v, "f0", "m0", "a0"}, - }, - } - } - want := []*executetest.Table{ - makeWindowTable(Time("2019-11-25T00:00:00Z"), Time("2019-11-25T00:00:05Z"), Time("2019-11-25T00:00:00Z"), 1), - makeWindowTable(Time("2019-11-25T00:00:05Z"), Time("2019-11-25T00:00:15Z"), Time("2019-11-25T00:00:05Z"), 2), - makeWindowTable(Time("2019-11-25T00:00:15Z"), Time("2019-11-25T00:00:25Z"), Time("2019-11-25T00:00:15Z"), 4), - makeWindowTable(Time("2019-11-25T00:00:25Z"), Time("2019-11-25T00:00:35Z"), Time("2019-11-25T00:00:25Z"), 2), - makeWindowTable(Time("2019-11-25T00:00:35Z"), Time("2019-11-25T00:00:45Z"), Time("2019-11-25T00:00:35Z"), 4), - makeWindowTable(Time("2019-11-25T00:00:45Z"), Time("2019-11-25T00:00:55Z"), Time("2019-11-25T00:00:45Z"), 2), - makeWindowTable(Time("2019-11-25T00:00:55Z"), Time("2019-11-25T00:01:00Z"), Time("2019-11-25T00:00:55Z"), 4), - } - - executetest.NormalizeTables(want) - sort.Sort(executetest.SortedTables(want)) - - var got []*executetest.Table - if err := ti.Do(func(table flux.Table) error { - t, err := executetest.ConvertTable(table) - if err != nil { - return err - } - got = append(got, t) - return nil - }); err != nil { - t.Fatal(err) - } - executetest.NormalizeTables(got) - sort.Sort(executetest.SortedTables(got)) - - // compare these two - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } -} - -func TestStorageReader_WindowSumOffset(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - tagsSpec := &datagen.TagsSpec{ - Tags: []*datagen.TagValuesSpec{ - { - TagKey: "t0", - Values: func() datagen.CountableSequence { - return datagen.NewCounterByteSequence("a%s", 0, 1) - }, - }, - }, - } - spec := datagen.Spec{ - Measurements: []datagen.MeasurementSpec{ - { - Name: "m0", - TagsSpec: tagsSpec, - FieldValuesSpec: &datagen.FieldValuesSpec{ - Name: "f0", - TimeSequenceSpec: datagen.TimeSequenceSpec{ - Count: math.MaxInt32, - Delta: 5 * time.Second, - }, - DataType: models.Integer, - Values: func(spec datagen.TimeSequenceSpec) datagen.TimeValuesSequence { - return datagen.NewTimeIntegerValuesSequence( - spec.Count, - datagen.NewTimestampSequenceFromSpec(spec), - datagen.NewIntegerArrayValuesSequence([]int64{1, 2, 3, 4}), - ) - }, - }, - }, - }, - } - tr := datagen.TimeRange{ - Start: mustParseTime("2019-11-25T00:00:00Z"), - End: mustParseTime("2019-11-25T00:01:00Z"), - } - return datagen.NewSeriesGeneratorFromSpec(&spec, tr), tr - }) - defer reader.Close() - - ti, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - Window: execute.Window{ - Every: flux.ConvertDuration(10 * time.Second), - Period: flux.ConvertDuration(10 * time.Second), - Offset: flux.ConvertDuration(2 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - storageflux.SumKind, - }, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - makeWindowTable := func(start, stop values.Time, v int64) *executetest.Table { - return &executetest.Table{ - KeyCols: []string{"_start", "_stop", "_field", "_measurement", "t0"}, - ColMeta: []flux.ColMeta{ - {Label: "_start", Type: flux.TTime}, - {Label: "_stop", Type: flux.TTime}, - {Label: "_value", Type: flux.TInt}, - {Label: "_field", Type: flux.TString}, - {Label: "_measurement", Type: flux.TString}, - {Label: "t0", Type: flux.TString}, - }, - Data: [][]interface{}{ - {start, stop, v, "f0", "m0", "a0"}, - }, - } - } - want := []*executetest.Table{ - makeWindowTable(Time("2019-11-25T00:00:00Z"), Time("2019-11-25T00:00:02Z"), 1), - makeWindowTable(Time("2019-11-25T00:00:02Z"), Time("2019-11-25T00:00:12Z"), 5), - makeWindowTable(Time("2019-11-25T00:00:12Z"), Time("2019-11-25T00:00:22Z"), 5), - makeWindowTable(Time("2019-11-25T00:00:22Z"), Time("2019-11-25T00:00:32Z"), 5), - makeWindowTable(Time("2019-11-25T00:00:32Z"), Time("2019-11-25T00:00:42Z"), 5), - makeWindowTable(Time("2019-11-25T00:00:42Z"), Time("2019-11-25T00:00:52Z"), 5), - makeWindowTable(Time("2019-11-25T00:00:52Z"), Time("2019-11-25T00:01:00Z"), 4), - } - - executetest.NormalizeTables(want) - sort.Sort(executetest.SortedTables(want)) - - var got []*executetest.Table - if err := ti.Do(func(table flux.Table) error { - t, err := executetest.ConvertTable(table) - if err != nil { - return err - } - got = append(got, t) - return nil - }); err != nil { - t.Fatal(err) - } - executetest.NormalizeTables(got) - sort.Sort(executetest.SortedTables(got)) - - // compare these two - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } -} - -func TestStorageReader_ReadWindowFirstCreateEmpty(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - tagsSpec := &datagen.TagsSpec{ - Tags: []*datagen.TagValuesSpec{ - { - TagKey: "t0", - Values: func() datagen.CountableSequence { - return datagen.NewCounterByteSequence("a%s", 0, 1) - }, - }, - }, - } - spec := datagen.Spec{ - Measurements: []datagen.MeasurementSpec{ - { - Name: "m0", - TagsSpec: tagsSpec, - FieldValuesSpec: &datagen.FieldValuesSpec{ - Name: "f0", - TimeSequenceSpec: datagen.TimeSequenceSpec{ - Count: math.MaxInt32, - Delta: 20 * time.Second, - }, - DataType: models.Integer, - Values: func(spec datagen.TimeSequenceSpec) datagen.TimeValuesSequence { - return datagen.NewTimeIntegerValuesSequence( - spec.Count, - datagen.NewTimestampSequenceFromSpec(spec), - datagen.NewIntegerArrayValuesSequence([]int64{1, 2}), - ) - }, - }, - }, - }, - } - tr := datagen.TimeRange{ - Start: mustParseTime("2019-11-25T00:00:00Z"), - End: mustParseTime("2019-11-25T00:01:00Z"), - } - return datagen.NewSeriesGeneratorFromSpec(&spec, tr), tr - }) - defer reader.Close() - - ti, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - Window: execute.Window{ - Every: flux.ConvertDuration(10 * time.Second), - Period: flux.ConvertDuration(10 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - storageflux.FirstKind, - }, - CreateEmpty: true, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - makeEmptyTable := func(start, stop values.Time) *executetest.Table { - return &executetest.Table{ - KeyCols: []string{"_start", "_stop", "_field", "_measurement", "t0"}, - KeyValues: []interface{}{start, stop, "f0", "m0", "a0"}, - ColMeta: []flux.ColMeta{ - {Label: "_start", Type: flux.TTime}, - {Label: "_stop", Type: flux.TTime}, - {Label: "_time", Type: flux.TTime}, - {Label: "_value", Type: flux.TInt}, - {Label: "_field", Type: flux.TString}, - {Label: "_measurement", Type: flux.TString}, - {Label: "t0", Type: flux.TString}, - }, - Data: nil, - } - } - makeWindowTable := func(start, stop, time values.Time, v int64) *executetest.Table { - return &executetest.Table{ - KeyCols: []string{"_start", "_stop", "_field", "_measurement", "t0"}, - ColMeta: []flux.ColMeta{ - {Label: "_start", Type: flux.TTime}, - {Label: "_stop", Type: flux.TTime}, - {Label: "_time", Type: flux.TTime}, - {Label: "_value", Type: flux.TInt}, - {Label: "_field", Type: flux.TString}, - {Label: "_measurement", Type: flux.TString}, - {Label: "t0", Type: flux.TString}, - }, - Data: [][]interface{}{ - {start, stop, time, v, "f0", "m0", "a0"}, - }, - } - } - want := []*executetest.Table{ - makeWindowTable( - Time("2019-11-25T00:00:00Z"), Time("2019-11-25T00:00:10Z"), Time("2019-11-25T00:00:00Z"), 1, - ), - makeEmptyTable( - Time("2019-11-25T00:00:10Z"), Time("2019-11-25T00:00:20Z"), - ), - makeWindowTable( - Time("2019-11-25T00:00:20Z"), Time("2019-11-25T00:00:30Z"), Time("2019-11-25T00:00:20Z"), 2, - ), - makeEmptyTable( - Time("2019-11-25T00:00:30Z"), Time("2019-11-25T00:00:40Z"), - ), - makeWindowTable( - Time("2019-11-25T00:00:40Z"), Time("2019-11-25T00:00:50Z"), Time("2019-11-25T00:00:40Z"), 1, - ), - makeEmptyTable( - Time("2019-11-25T00:00:50Z"), Time("2019-11-25T00:01:00Z"), - ), - } - - executetest.NormalizeTables(want) - sort.Sort(executetest.SortedTables(want)) - - var got []*executetest.Table - if err := ti.Do(func(table flux.Table) error { - t, err := executetest.ConvertTable(table) - if err != nil { - return err - } - got = append(got, t) - return nil - }); err != nil { - t.Fatal(err) - } - executetest.NormalizeTables(got) - sort.Sort(executetest.SortedTables(got)) - - // compare these two - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } -} - -func TestStorageReader_WindowFirstOffsetCreateEmpty(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - tagsSpec := &datagen.TagsSpec{ - Tags: []*datagen.TagValuesSpec{ - { - TagKey: "t0", - Values: func() datagen.CountableSequence { - return datagen.NewCounterByteSequence("a%s", 0, 1) - }, - }, - }, - } - spec := datagen.Spec{ - Measurements: []datagen.MeasurementSpec{ - { - Name: "m0", - TagsSpec: tagsSpec, - FieldValuesSpec: &datagen.FieldValuesSpec{ - Name: "f0", - TimeSequenceSpec: datagen.TimeSequenceSpec{ - Count: math.MaxInt32, - Delta: 20 * time.Second, - }, - DataType: models.Integer, - Values: func(spec datagen.TimeSequenceSpec) datagen.TimeValuesSequence { - return datagen.NewTimeIntegerValuesSequence( - spec.Count, - datagen.NewTimestampSequenceFromSpec(spec), - datagen.NewIntegerArrayValuesSequence([]int64{1, 2}), - ) - }, - }, - }, - }, - } - tr := datagen.TimeRange{ - Start: mustParseTime("2019-11-25T00:00:00Z"), - End: mustParseTime("2019-11-25T00:01:00Z"), - } - return datagen.NewSeriesGeneratorFromSpec(&spec, tr), tr - }) - defer reader.Close() - - ti, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - Window: execute.Window{ - Every: flux.ConvertDuration(10 * time.Second), - Period: flux.ConvertDuration(10 * time.Second), - Offset: flux.ConvertDuration(5 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - storageflux.FirstKind, - }, - CreateEmpty: true, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - makeEmptyTable := func(start, stop values.Time) *executetest.Table { - return &executetest.Table{ - KeyCols: []string{"_start", "_stop", "_field", "_measurement", "t0"}, - KeyValues: []interface{}{start, stop, "f0", "m0", "a0"}, - ColMeta: []flux.ColMeta{ - {Label: "_start", Type: flux.TTime}, - {Label: "_stop", Type: flux.TTime}, - {Label: "_time", Type: flux.TTime}, - {Label: "_value", Type: flux.TInt}, - {Label: "_field", Type: flux.TString}, - {Label: "_measurement", Type: flux.TString}, - {Label: "t0", Type: flux.TString}, - }, - Data: nil, - } - } - makeWindowTable := func(start, stop, time values.Time, v int64) *executetest.Table { - return &executetest.Table{ - KeyCols: []string{"_start", "_stop", "_field", "_measurement", "t0"}, - ColMeta: []flux.ColMeta{ - {Label: "_start", Type: flux.TTime}, - {Label: "_stop", Type: flux.TTime}, - {Label: "_time", Type: flux.TTime}, - {Label: "_value", Type: flux.TInt}, - {Label: "_field", Type: flux.TString}, - {Label: "_measurement", Type: flux.TString}, - {Label: "t0", Type: flux.TString}, - }, - Data: [][]interface{}{ - {start, stop, time, v, "f0", "m0", "a0"}, - }, - } - } - want := []*executetest.Table{ - makeWindowTable( - Time("2019-11-25T00:00:00Z"), Time("2019-11-25T00:00:05Z"), Time("2019-11-25T00:00:00Z"), 1, - ), - makeEmptyTable( - Time("2019-11-25T00:00:05Z"), Time("2019-11-25T00:00:15Z"), - ), - makeWindowTable( - Time("2019-11-25T00:00:15Z"), Time("2019-11-25T00:00:25Z"), Time("2019-11-25T00:00:20Z"), 2, - ), - makeEmptyTable( - Time("2019-11-25T00:00:25Z"), Time("2019-11-25T00:00:35Z"), - ), - makeWindowTable( - Time("2019-11-25T00:00:35Z"), Time("2019-11-25T00:00:45Z"), Time("2019-11-25T00:00:40Z"), 1, - ), - makeEmptyTable( - Time("2019-11-25T00:00:45Z"), Time("2019-11-25T00:00:55Z"), - ), - makeEmptyTable( - Time("2019-11-25T00:00:55Z"), Time("2019-11-25T00:01:00Z"), - ), - } - - executetest.NormalizeTables(want) - sort.Sort(executetest.SortedTables(want)) - - var got []*executetest.Table - if err := ti.Do(func(table flux.Table) error { - t, err := executetest.ConvertTable(table) - if err != nil { - return err - } - got = append(got, t) - return nil - }); err != nil { - t.Fatal(err) - } - executetest.NormalizeTables(got) - sort.Sort(executetest.SortedTables(got)) - - // compare these two - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } -} - -func TestStorageReader_WindowSumOffsetCreateEmpty(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - tagsSpec := &datagen.TagsSpec{ - Tags: []*datagen.TagValuesSpec{ - { - TagKey: "t0", - Values: func() datagen.CountableSequence { - return datagen.NewCounterByteSequence("a%s", 0, 1) - }, - }, - }, - } - spec := datagen.Spec{ - Measurements: []datagen.MeasurementSpec{ - { - Name: "m0", - TagsSpec: tagsSpec, - FieldValuesSpec: &datagen.FieldValuesSpec{ - Name: "f0", - TimeSequenceSpec: datagen.TimeSequenceSpec{ - Count: math.MaxInt32, - Delta: 20 * time.Second, - }, - DataType: models.Integer, - Values: func(spec datagen.TimeSequenceSpec) datagen.TimeValuesSequence { - return datagen.NewTimeIntegerValuesSequence( - spec.Count, - datagen.NewTimestampSequenceFromSpec(spec), - datagen.NewIntegerArrayValuesSequence([]int64{1, 2}), - ) - }, - }, - }, - }, - } - tr := datagen.TimeRange{ - Start: mustParseTime("2019-11-25T00:00:00Z"), - End: mustParseTime("2019-11-25T00:01:00Z"), - } - return datagen.NewSeriesGeneratorFromSpec(&spec, tr), tr - }) - defer reader.Close() - - ti, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - Window: execute.Window{ - Every: flux.ConvertDuration(10 * time.Second), - Period: flux.ConvertDuration(10 * time.Second), - Offset: flux.ConvertDuration(5 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - storageflux.SumKind, - }, - CreateEmpty: true, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - makeEmptyTable := func(start, stop values.Time) *executetest.Table { - return &executetest.Table{ - KeyCols: []string{"_start", "_stop", "_field", "_measurement", "t0"}, - KeyValues: []interface{}{start, stop, "f0", "m0", "a0"}, - ColMeta: []flux.ColMeta{ - {Label: "_start", Type: flux.TTime}, - {Label: "_stop", Type: flux.TTime}, - {Label: "_value", Type: flux.TInt}, - {Label: "_field", Type: flux.TString}, - {Label: "_measurement", Type: flux.TString}, - {Label: "t0", Type: flux.TString}, - }, - Data: [][]interface{}{ - {start, stop, nil, "f0", "m0", "a0"}, - }, - } - } - makeWindowTable := func(start, stop values.Time, v int64) *executetest.Table { - return &executetest.Table{ - KeyCols: []string{"_start", "_stop", "_field", "_measurement", "t0"}, - ColMeta: []flux.ColMeta{ - {Label: "_start", Type: flux.TTime}, - {Label: "_stop", Type: flux.TTime}, - {Label: "_value", Type: flux.TInt}, - {Label: "_field", Type: flux.TString}, - {Label: "_measurement", Type: flux.TString}, - {Label: "t0", Type: flux.TString}, - }, - Data: [][]interface{}{ - {start, stop, v, "f0", "m0", "a0"}, - }, - } - } - want := []*executetest.Table{ - makeWindowTable( - Time("2019-11-25T00:00:00Z"), Time("2019-11-25T00:00:05Z"), 1, - ), - makeEmptyTable( - Time("2019-11-25T00:00:05Z"), Time("2019-11-25T00:00:15Z"), - ), - makeWindowTable( - Time("2019-11-25T00:00:15Z"), Time("2019-11-25T00:00:25Z"), 2, - ), - makeEmptyTable( - Time("2019-11-25T00:00:25Z"), Time("2019-11-25T00:00:35Z"), - ), - makeWindowTable( - Time("2019-11-25T00:00:35Z"), Time("2019-11-25T00:00:45Z"), 1, - ), - makeEmptyTable( - Time("2019-11-25T00:00:45Z"), Time("2019-11-25T00:00:55Z"), - ), - makeEmptyTable( - Time("2019-11-25T00:00:55Z"), Time("2019-11-25T00:01:00Z"), - ), - } - - executetest.NormalizeTables(want) - sort.Sort(executetest.SortedTables(want)) - - var got []*executetest.Table - if err := ti.Do(func(table flux.Table) error { - t, err := executetest.ConvertTable(table) - if err != nil { - return err - } - got = append(got, t) - return nil - }); err != nil { - t.Fatal(err) - } - executetest.NormalizeTables(got) - sort.Sort(executetest.SortedTables(got)) - - // compare these two - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } -} - -func TestStorageReader_ReadWindowFirstTimeColumn(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - tagsSpec := &datagen.TagsSpec{ - Tags: []*datagen.TagValuesSpec{ - { - TagKey: "t0", - Values: func() datagen.CountableSequence { - return datagen.NewCounterByteSequence("a%s", 0, 1) - }, - }, - }, - } - spec := datagen.Spec{ - Measurements: []datagen.MeasurementSpec{ - { - Name: "m0", - TagsSpec: tagsSpec, - FieldValuesSpec: &datagen.FieldValuesSpec{ - Name: "f0", - TimeSequenceSpec: datagen.TimeSequenceSpec{ - Count: math.MaxInt32, - Delta: 20 * time.Second, - }, - DataType: models.Integer, - Values: func(spec datagen.TimeSequenceSpec) datagen.TimeValuesSequence { - return datagen.NewTimeIntegerValuesSequence( - spec.Count, - datagen.NewTimestampSequenceFromSpec(spec), - datagen.NewIntegerArrayValuesSequence([]int64{1, 2}), - ) - }, - }, - }, - }, - } - tr := datagen.TimeRange{ - Start: mustParseTime("2019-11-25T00:00:00Z"), - End: mustParseTime("2019-11-25T00:01:00Z"), - } - return datagen.NewSeriesGeneratorFromSpec(&spec, tr), tr - }) - defer reader.Close() - - ti, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - Window: execute.Window{ - Every: flux.ConvertDuration(10 * time.Second), - Period: flux.ConvertDuration(10 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - storageflux.FirstKind, - }, - CreateEmpty: true, - TimeColumn: execute.DefaultStopColLabel, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - want := []*executetest.Table{{ - KeyCols: []string{"_start", "_stop", "_field", "_measurement", "t0"}, - ColMeta: []flux.ColMeta{ - {Label: "_start", Type: flux.TTime}, - {Label: "_stop", Type: flux.TTime}, - {Label: "_time", Type: flux.TTime}, - {Label: "_value", Type: flux.TInt}, - {Label: "_field", Type: flux.TString}, - {Label: "_measurement", Type: flux.TString}, - {Label: "t0", Type: flux.TString}, - }, - Data: [][]interface{}{ - {Time("2019-11-25T00:00:00Z"), Time("2019-11-25T00:01:00Z"), Time("2019-11-25T00:00:10Z"), int64(1), "f0", "m0", "a0"}, - {Time("2019-11-25T00:00:00Z"), Time("2019-11-25T00:01:00Z"), Time("2019-11-25T00:00:30Z"), int64(2), "f0", "m0", "a0"}, - {Time("2019-11-25T00:00:00Z"), Time("2019-11-25T00:01:00Z"), Time("2019-11-25T00:00:50Z"), int64(1), "f0", "m0", "a0"}, - }, - }} - - executetest.NormalizeTables(want) - sort.Sort(executetest.SortedTables(want)) - - var got []*executetest.Table - if err := ti.Do(func(table flux.Table) error { - t, err := executetest.ConvertTable(table) - if err != nil { - return err - } - got = append(got, t) - return nil - }); err != nil { - t.Fatal(err) - } - executetest.NormalizeTables(got) - sort.Sort(executetest.SortedTables(got)) - - // compare these two - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } -} - -func TestStorageReader_WindowFirstOffsetTimeColumn(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - tagsSpec := &datagen.TagsSpec{ - Tags: []*datagen.TagValuesSpec{ - { - TagKey: "t0", - Values: func() datagen.CountableSequence { - return datagen.NewCounterByteSequence("a%s", 0, 1) - }, - }, - }, - } - spec := datagen.Spec{ - Measurements: []datagen.MeasurementSpec{ - { - Name: "m0", - TagsSpec: tagsSpec, - FieldValuesSpec: &datagen.FieldValuesSpec{ - Name: "f0", - TimeSequenceSpec: datagen.TimeSequenceSpec{ - Count: math.MaxInt32, - Delta: 20 * time.Second, - }, - DataType: models.Integer, - Values: func(spec datagen.TimeSequenceSpec) datagen.TimeValuesSequence { - return datagen.NewTimeIntegerValuesSequence( - spec.Count, - datagen.NewTimestampSequenceFromSpec(spec), - datagen.NewIntegerArrayValuesSequence([]int64{1, 2}), - ) - }, - }, - }, - }, - } - tr := datagen.TimeRange{ - Start: mustParseTime("2019-11-25T00:00:00Z"), - End: mustParseTime("2019-11-25T00:01:00Z"), - } - return datagen.NewSeriesGeneratorFromSpec(&spec, tr), tr - }) - defer reader.Close() - - ti, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - Window: execute.Window{ - Every: flux.ConvertDuration(10 * time.Second), - Period: flux.ConvertDuration(10 * time.Second), - Offset: flux.ConvertDuration(18 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - storageflux.FirstKind, - }, - CreateEmpty: true, - TimeColumn: execute.DefaultStopColLabel, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - want := []*executetest.Table{{ - KeyCols: []string{"_start", "_stop", "_field", "_measurement", "t0"}, - ColMeta: []flux.ColMeta{ - {Label: "_start", Type: flux.TTime}, - {Label: "_stop", Type: flux.TTime}, - {Label: "_time", Type: flux.TTime}, - {Label: "_value", Type: flux.TInt}, - {Label: "_field", Type: flux.TString}, - {Label: "_measurement", Type: flux.TString}, - {Label: "t0", Type: flux.TString}, - }, - Data: [][]interface{}{ - {Time("2019-11-25T00:00:00Z"), Time("2019-11-25T00:01:00Z"), Time("2019-11-25T00:00:08Z"), int64(1), "f0", "m0", "a0"}, - {Time("2019-11-25T00:00:00Z"), Time("2019-11-25T00:01:00Z"), Time("2019-11-25T00:00:28Z"), int64(2), "f0", "m0", "a0"}, - {Time("2019-11-25T00:00:00Z"), Time("2019-11-25T00:01:00Z"), Time("2019-11-25T00:00:48Z"), int64(1), "f0", "m0", "a0"}, - }, - }} - - executetest.NormalizeTables(want) - sort.Sort(executetest.SortedTables(want)) - - var got []*executetest.Table - if err := ti.Do(func(table flux.Table) error { - t, err := executetest.ConvertTable(table) - if err != nil { - return err - } - got = append(got, t) - return nil - }); err != nil { - t.Fatal(err) - } - executetest.NormalizeTables(got) - sort.Sort(executetest.SortedTables(got)) - - // compare these two - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } -} - -func TestStorageReader_WindowSumOffsetTimeColumn(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - tagsSpec := &datagen.TagsSpec{ - Tags: []*datagen.TagValuesSpec{ - { - TagKey: "t0", - Values: func() datagen.CountableSequence { - return datagen.NewCounterByteSequence("a%s", 0, 1) - }, - }, - }, - } - spec := datagen.Spec{ - Measurements: []datagen.MeasurementSpec{ - { - Name: "m0", - TagsSpec: tagsSpec, - FieldValuesSpec: &datagen.FieldValuesSpec{ - Name: "f0", - TimeSequenceSpec: datagen.TimeSequenceSpec{ - Count: math.MaxInt32, - Delta: 20 * time.Second, - }, - DataType: models.Integer, - Values: func(spec datagen.TimeSequenceSpec) datagen.TimeValuesSequence { - return datagen.NewTimeIntegerValuesSequence( - spec.Count, - datagen.NewTimestampSequenceFromSpec(spec), - datagen.NewIntegerArrayValuesSequence([]int64{1, 2}), - ) - }, - }, - }, - }, - } - tr := datagen.TimeRange{ - Start: mustParseTime("2019-11-25T00:00:00Z"), - End: mustParseTime("2019-11-25T00:01:00Z"), - } - return datagen.NewSeriesGeneratorFromSpec(&spec, tr), tr - }) - defer reader.Close() - - ti, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - Window: execute.Window{ - Every: flux.ConvertDuration(10 * time.Second), - Period: flux.ConvertDuration(10 * time.Second), - Offset: flux.ConvertDuration(18 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - storageflux.SumKind, - }, - CreateEmpty: true, - TimeColumn: execute.DefaultStopColLabel, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - want := []*executetest.Table{{ - KeyCols: []string{"_start", "_stop", "_field", "_measurement", "t0"}, - ColMeta: []flux.ColMeta{ - {Label: "_start", Type: flux.TTime}, - {Label: "_stop", Type: flux.TTime}, - {Label: "_time", Type: flux.TTime}, - {Label: "_value", Type: flux.TInt}, - {Label: "_field", Type: flux.TString}, - {Label: "_measurement", Type: flux.TString}, - {Label: "t0", Type: flux.TString}, - }, - Data: [][]interface{}{ - {Time("2019-11-25T00:00:00Z"), Time("2019-11-25T00:01:00Z"), Time("2019-11-25T00:00:08Z"), int64(1), "f0", "m0", "a0"}, - {Time("2019-11-25T00:00:00Z"), Time("2019-11-25T00:01:00Z"), Time("2019-11-25T00:00:18Z"), nil, "f0", "m0", "a0"}, - {Time("2019-11-25T00:00:00Z"), Time("2019-11-25T00:01:00Z"), Time("2019-11-25T00:00:28Z"), int64(2), "f0", "m0", "a0"}, - {Time("2019-11-25T00:00:00Z"), Time("2019-11-25T00:01:00Z"), Time("2019-11-25T00:00:38Z"), nil, "f0", "m0", "a0"}, - {Time("2019-11-25T00:00:00Z"), Time("2019-11-25T00:01:00Z"), Time("2019-11-25T00:00:48Z"), int64(1), "f0", "m0", "a0"}, - {Time("2019-11-25T00:00:00Z"), Time("2019-11-25T00:01:00Z"), Time("2019-11-25T00:00:58Z"), nil, "f0", "m0", "a0"}, - {Time("2019-11-25T00:00:00Z"), Time("2019-11-25T00:01:00Z"), Time("2019-11-25T00:01:00Z"), nil, "f0", "m0", "a0"}, - }, - }} - - executetest.NormalizeTables(want) - sort.Sort(executetest.SortedTables(want)) - - var got []*executetest.Table - if err := ti.Do(func(table flux.Table) error { - t, err := executetest.ConvertTable(table) - if err != nil { - return err - } - got = append(got, t) - return nil - }); err != nil { - t.Fatal(err) - } - executetest.NormalizeTables(got) - sort.Sort(executetest.SortedTables(got)) - - // compare these two - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } -} - -func TestStorageReader_EmptyTableNoEmptyWindows(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - tagsSpec := &datagen.TagsSpec{ - Tags: []*datagen.TagValuesSpec{ - { - TagKey: "t0", - Values: func() datagen.CountableSequence { - return datagen.NewCounterByteSequence("a%s", 0, 1) - }, - }, - }, - } - spec := datagen.Spec{ - Measurements: []datagen.MeasurementSpec{ - { - Name: "m0", - TagsSpec: tagsSpec, - FieldValuesSpec: &datagen.FieldValuesSpec{ - Name: "f0", - TimeSequenceSpec: datagen.TimeSequenceSpec{ - Count: math.MaxInt32, - Delta: 10 * time.Second, - }, - DataType: models.Integer, - Values: func(spec datagen.TimeSequenceSpec) datagen.TimeValuesSequence { - return datagen.NewTimeIntegerValuesSequence( - spec.Count, - datagen.NewTimestampSequenceFromSpec(spec), - datagen.NewIntegerArrayValuesSequence([]int64{1}), - ) - }, - }, - }, - }, - } - tr := datagen.TimeRange{ - Start: mustParseTime("2019-11-25T00:00:10Z"), - End: mustParseTime("2019-11-25T00:00:30Z"), - } - return datagen.NewSeriesGeneratorFromSpec(&spec, tr), tr - }) - defer reader.Close() - - ti, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - Window: execute.Window{ - Every: flux.ConvertDuration(10 * time.Second), - Period: flux.ConvertDuration(10 * time.Second), - }, - Aggregates: []plan.ProcedureKind{ - storageflux.FirstKind, - }, - CreateEmpty: true, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - makeWindowTable := func(start, stop, time values.Time, v int64) *executetest.Table { - return &executetest.Table{ - KeyCols: []string{"_start", "_stop", "_field", "_measurement", "t0"}, - ColMeta: []flux.ColMeta{ - {Label: "_start", Type: flux.TTime}, - {Label: "_stop", Type: flux.TTime}, - {Label: "_time", Type: flux.TTime}, - {Label: "_value", Type: flux.TInt}, - {Label: "_field", Type: flux.TString}, - {Label: "_measurement", Type: flux.TString}, - {Label: "t0", Type: flux.TString}, - }, - Data: [][]interface{}{ - {start, stop, time, v, "f0", "m0", "a0"}, - }, - } - } - want := []*executetest.Table{ - makeWindowTable( - Time("2019-11-25T00:00:10Z"), Time("2019-11-25T00:00:20Z"), Time("2019-11-25T00:00:10Z"), 1, - ), - makeWindowTable( - Time("2019-11-25T00:00:20Z"), Time("2019-11-25T00:00:30Z"), Time("2019-11-25T00:00:20Z"), 1, - ), - } - - executetest.NormalizeTables(want) - sort.Sort(executetest.SortedTables(want)) - - var got []*executetest.Table - if err := ti.Do(func(table flux.Table) error { - t, err := executetest.ConvertTable(table) - if err != nil { - return err - } - got = append(got, t) - return nil - }); err != nil { - t.Fatal(err) - } - executetest.NormalizeTables(got) - sort.Sort(executetest.SortedTables(got)) - - // compare these two - if diff := cmp.Diff(want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } -} - -func getStorageEqPred(lhsTagKey, rhsTagValue string) *storageproto.Predicate { - return &storageproto.Predicate{ - Root: &storageproto.Node{ - NodeType: storageproto.Node_TypeComparisonExpression, - Value: &storageproto.Node_Comparison_{ - Comparison: storageproto.Node_ComparisonEqual, - }, - Children: []*storageproto.Node{ - { - NodeType: storageproto.Node_TypeTagRef, - Value: &storageproto.Node_TagRefValue{ - TagRefValue: lhsTagKey, - }, - }, - { - NodeType: storageproto.Node_TypeLiteral, - Value: &storageproto.Node_StringValue{ - StringValue: rhsTagValue, - }, - }, - }, - }, - } -} - -func TestStorageReader_ReadGroup(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - spec := Spec(org, bucket, - MeasurementSpec("m0", - FloatArrayValuesSequence("f0", 10*time.Second, []float64{1.0, 2.0, 3.0, 4.0}), - TagValuesSequence("t0", "a-%s", 0, 3), - ), - ) - tr := TimeRange("2019-11-25T00:00:00Z", "2019-11-25T00:02:00Z") - return datagen.NewSeriesGeneratorFromSpec(spec, tr), tr - }) - defer reader.Close() - - for _, tt := range []struct { - aggregate string - filter *storageproto.Predicate - want flux.TableIterator - }{ - { - aggregate: storageflux.CountKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:02:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Ints("_value", 12), - }, - }, - }, - }, - }, - { - aggregate: storageflux.SumKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:02:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Floats("_value", 30), - }, - }, - }, - }, - }, - { - aggregate: storageflux.SumKind, - filter: getStorageEqPred("t0", "z-9"), - want: static.TableGroup{}, - }, - { - aggregate: storageflux.MinKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:02:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:00Z"), - static.Floats("_value", 1), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MaxKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:02:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:30Z"), - static.Floats("_value", 4), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MaxKind, - filter: getStorageEqPred("t0", "z-9"), - want: static.TableGroup{}, - }, - { - aggregate: storageflux.FirstKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:02:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:00:00Z"), - static.Floats("_value", 1), - }, - }, - }, - }, - }, - { - aggregate: storageflux.LastKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:02:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Times("_time", "2019-11-25T00:01:50Z"), - static.Floats("_value", 4), - }, - }, - }, - }, - }, - } { - t.Run(tt.aggregate, func(t *testing.T) { - mem := arrowmem.NewCheckedAllocator(arrowmem.DefaultAllocator) - defer mem.AssertSize(t, 0) - - alloc := &memory.ResourceAllocator{ - Allocator: mem, - } - got, err := reader.ReadGroup(context.Background(), query.ReadGroupSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - Predicate: tt.filter, - }, - GroupMode: query.GroupModeBy, - GroupKeys: []string{"_measurement", "_field", "t0"}, - AggregateMethod: tt.aggregate, - }, alloc) - if err != nil { - t.Fatal(err) - } - - if diff := table.Diff(tt.want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } - }) - } -} - -// TestStorageReader_ReadGroupSelectTags exercises group-selects where the tag -// values vary among the candidate items for select and the read-group -// operation must track and return the correct set of tags. -func TestStorageReader_ReadGroupSelectTags(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - spec := Spec(org, bucket, - MeasurementSpec("m0", - FloatArrayValuesSequence("f0", 10*time.Second, []float64{1.0, 2.0, 3.0, 4.0}), - TagValuesSequence("t0", "a-%s", 0, 3), - TagValuesSequence("t1", "b-%s", 0, 1), - ), - MeasurementSpec("m0", - FloatArrayValuesSequence("f0", 10*time.Second, []float64{5.0, 6.0, 7.0, 8.0}), - TagValuesSequence("t0", "a-%s", 0, 3), - TagValuesSequence("t1", "b-%s", 1, 2), - ), - ) - tr := TimeRange("2019-11-25T00:00:00Z", "2019-11-25T00:02:00Z") - return datagen.NewSeriesGeneratorFromSpec(spec, tr), tr - }) - defer reader.Close() - - cases := []struct { - aggregate string - want flux.TableIterator - }{ - { - aggregate: storageflux.MinKind, - want: static.TableGroup{ - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:02:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Strings("t1", "b-0"), - static.Strings("_measurement", "m0"), - static.Strings("_field", "f0"), - static.Times("_time", "2019-11-25T00:00:00Z"), - static.Floats("_value", 1.0), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MaxKind, - want: static.TableGroup{ - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:02:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Strings("t1", "b-1"), - static.Strings("_measurement", "m0"), - static.Strings("_field", "f0"), - static.Times("_time", "2019-11-25T00:00:30Z"), - static.Floats("_value", 8.0), - }, - }, - }, - }, - }, - { - aggregate: storageflux.FirstKind, - want: static.TableGroup{ - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:02:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Strings("t1", "b-0"), - static.Strings("_measurement", "m0"), - static.Strings("_field", "f0"), - static.Times("_time", "2019-11-25T00:00:00Z"), - static.Floats("_value", 1.0), - }, - }, - }, - }, - }, - { - aggregate: storageflux.LastKind, - want: static.TableGroup{ - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:02:00Z"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.Strings("t1", "b-1"), - static.Strings("_measurement", "m0"), - static.Strings("_field", "f0"), - static.Times("_time", "2019-11-25T00:01:50Z"), - static.Floats("_value", 8.0), - }, - }, - }, - }, - }, - } - - for _, tt := range cases { - t.Run(tt.aggregate, func(t *testing.T) { - got, err := reader.ReadGroup(context.Background(), query.ReadGroupSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - GroupMode: query.GroupModeBy, - GroupKeys: []string{"t0"}, - AggregateMethod: tt.aggregate, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - if diff := table.Diff(tt.want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } - }) - } -} - -// TestStorageReader_ReadGroupNoAgg exercises the path where no aggregate is specified -func TestStorageReader_ReadGroupNoAgg(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - spec := Spec(org, bucket, - MeasurementSpec("m0", - FloatArrayValuesSequence("f0", 10*time.Second, []float64{1.0, 2.0, 3.0, 4.0}), - TagValuesSequence("t1", "b-%s", 0, 2), - ), - ) - tr := TimeRange("2019-11-25T00:00:00Z", "2019-11-25T00:00:40Z") - return datagen.NewSeriesGeneratorFromSpec(spec, tr), tr - }) - defer reader.Close() - - cases := []struct { - aggregate string - want flux.TableIterator - }{ - { - want: static.TableGroup{ - static.TableMatrix{ - { - static.Table{ - static.StringKey("t1", "b-0"), - static.Strings("_measurement", "m0", "m0", "m0", "m0"), - static.Strings("_field", "f0", "f0", "f0", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:00:40Z"), - static.Times("_time", "2019-11-25T00:00:00Z", "2019-11-25T00:00:10Z", "2019-11-25T00:00:20Z", "2019-11-25T00:00:30Z"), - static.Floats("_value", 1.0, 2.0, 3.0, 4.0), - }, - }, - { - static.Table{ - static.StringKey("t1", "b-1"), - static.Strings("_measurement", "m0", "m0", "m0", "m0"), - static.Strings("_field", "f0", "f0", "f0", "f0"), - static.TimeKey("_start", "2019-11-25T00:00:00Z"), - static.TimeKey("_stop", "2019-11-25T00:00:40Z"), - static.Times("_time", "2019-11-25T00:00:00Z", "2019-11-25T00:00:10Z", "2019-11-25T00:00:20Z", "2019-11-25T00:00:30Z"), - static.Floats("_value", 1.0, 2.0, 3.0, 4.0), - }, - }, - }, - }, - }, - } - - for _, tt := range cases { - t.Run("", func(t *testing.T) { - got, err := reader.ReadGroup(context.Background(), query.ReadGroupSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - GroupMode: query.GroupModeBy, - GroupKeys: []string{"t1"}, - }, memory.DefaultAllocator) - if err != nil { - t.Fatal(err) - } - - if diff := table.Diff(tt.want, got); diff != "" { - t.Errorf("unexpected results -want/+got:\n%s", diff) - } - }) - } -} - -func TestStorageReader_ReadWindowAggregateMonths(t *testing.T) { - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - spec := Spec(org, bucket, - MeasurementSpec("m0", - FloatArrayValuesSequence("f0", 24*time.Hour, []float64{1.0, 2.0, 3.0, 4.0}), - TagValuesSequence("t0", "a-%s", 0, 3), - ), - ) - tr := TimeRange("2019-09-01T00:00:00Z", "2019-12-01T00:00:00Z") - return datagen.NewSeriesGeneratorFromSpec(spec, tr), tr - }) - defer reader.Close() - - for _, tt := range []struct { - aggregate plan.ProcedureKind - want flux.TableIterator - }{ - { - aggregate: storageflux.CountKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.TimeKey("_start", "2019-09-01T00:00:00Z"), - static.TimeKey("_stop", "2019-10-01T00:00:00Z"), - static.Ints("_value", 30), - }, - }, - { - static.Table{ - static.TimeKey("_start", "2019-10-01T00:00:00Z"), - static.TimeKey("_stop", "2019-11-01T00:00:00Z"), - static.Ints("_value", 31), - }, - }, - { - static.Table{ - static.TimeKey("_start", "2019-11-01T00:00:00Z"), - static.TimeKey("_stop", "2019-12-01T00:00:00Z"), - static.Ints("_value", 30), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MinKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.TimeKey("_start", "2019-09-01T00:00:00Z"), - static.TimeKey("_stop", "2019-10-01T00:00:00Z"), - static.Times("_time", "2019-09-01T00:00:00Z"), - static.Floats("_value", 1), - }, - static.Table{ - static.TimeKey("_start", "2019-10-01T00:00:00Z"), - static.TimeKey("_stop", "2019-11-01T00:00:00Z"), - static.Times("_time", "2019-10-03T00:00:00Z"), - static.Floats("_value", 1), - }, - static.Table{ - static.TimeKey("_start", "2019-11-01T00:00:00Z"), - static.TimeKey("_stop", "2019-12-01T00:00:00Z"), - static.Times("_time", "2019-11-04T00:00:00Z"), - static.Floats("_value", 1), - }, - }, - }, - }, - }, - { - aggregate: storageflux.MaxKind, - want: static.TableGroup{ - static.StringKey("_measurement", "m0"), - static.StringKey("_field", "f0"), - static.TableMatrix{ - static.StringKeys("t0", "a-0", "a-1", "a-2"), - { - static.Table{ - static.TimeKey("_start", "2019-09-01T00:00:00Z"), - static.TimeKey("_stop", "2019-10-01T00:00:00Z"), - static.Times("_time", "2019-09-04T00:00:00Z"), - static.Floats("_value", 4), - }, - static.Table{ - static.TimeKey("_start", "2019-10-01T00:00:00Z"), - static.TimeKey("_stop", "2019-11-01T00:00:00Z"), - static.Times("_time", "2019-10-02T00:00:00Z"), - static.Floats("_value", 4), - }, - static.Table{ - static.TimeKey("_start", "2019-11-01T00:00:00Z"), - static.TimeKey("_stop", "2019-12-01T00:00:00Z"), - static.Times("_time", "2019-11-03T00:00:00Z"), - static.Floats("_value", 4), - }, - }, - }, - }, - }, - } { - mem := arrowmem.NewCheckedAllocator(arrowmem.DefaultAllocator) - defer mem.AssertSize(t, 0) - - alloc := &memory.ResourceAllocator{ - Allocator: mem, - } - got, err := reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - Window: execute.Window{ - Every: values.MakeDuration(0, 1, false), - Period: values.MakeDuration(0, 1, false), - }, - Aggregates: []plan.ProcedureKind{ - tt.aggregate, - }, - }, alloc) - - if err != nil { - t.Fatal(err) - } - - if diff := table.Diff(tt.want, got); diff != "" { - t.Errorf("unexpected results for %v aggregate -want/+got:\n%s", tt.aggregate, diff) - } - } -} - -// TestStorageReader_Backoff will invoke the read function -// and then send the table to a separate goroutine so it doesn't -// block the table iterator. The table iterator should be blocked -// until it is read by the other goroutine. -func TestStorageReader_Backoff(t *testing.T) { - t.Skip("memory allocations are not tracked properly") - reader := NewStorageReader(t, func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - spec := Spec(org, bucket, - MeasurementSpec("m0", - FloatArrayValuesSequence("f0", 10*time.Second, []float64{1.0, 2.0, 3.0, 4.0}), - TagValuesSequence("t0", "a-%s", 0, 3), - ), - ) - tr := TimeRange("2019-09-01T00:00:00Z", "2019-09-02T00:00:00Z") - return datagen.NewSeriesGeneratorFromSpec(spec, tr), tr - }) - defer reader.Close() - - for _, tt := range []struct { - name string - read func(reader *StorageReader, mem memory.Allocator) (flux.TableIterator, error) - }{ - { - name: "ReadFilter", - read: func(reader *StorageReader, mem memory.Allocator) (flux.TableIterator, error) { - return reader.ReadFilter(context.Background(), query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, mem) - }, - }, - { - name: "ReadGroup", - read: func(reader *StorageReader, mem memory.Allocator) (flux.TableIterator, error) { - return reader.ReadGroup(context.Background(), query.ReadGroupSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - GroupMode: query.GroupModeBy, - GroupKeys: []string{"_measurement", "_field"}, - }, mem) - }, - }, - { - name: "ReadWindowAggregate", - read: func(reader *StorageReader, mem memory.Allocator) (flux.TableIterator, error) { - return reader.ReadWindowAggregate(context.Background(), query.ReadWindowAggregateSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: reader.Org, - BucketID: reader.Bucket, - Bounds: reader.Bounds, - }, - Aggregates: []plan.ProcedureKind{ - storageflux.MeanKind, - }, - TimeColumn: execute.DefaultStopColLabel, - Window: execute.Window{ - Every: values.ConvertDurationNsecs(20 * time.Second), - Period: values.ConvertDurationNsecs(20 * time.Second), - }, - }, mem) - }, - }, - } { - t.Run(tt.name, func(t *testing.T) { - // Read the table and learn what the maximum allocated - // value is. We don't want to exceed this. - mem := &memory.ResourceAllocator{} - tables, err := tt.read(reader, mem) - if err != nil { - t.Fatal(err) - } - - if err := tables.Do(func(t flux.Table) error { - return t.Do(func(cr flux.ColReader) error { - return nil - }) - }); err != nil { - t.Fatal(err) - } - - // The total allocated should not be the same - // as the max allocated. If this is the case, we - // either had one buffer or did not correctly - // release memory for each buffer. - if mem.MaxAllocated() == mem.TotalAllocated() { - t.Fatal("max allocated is the same as total allocated, they must be different for this test to be meaningful") - } - - // Recreate the memory allocator and set the limit - // to the max allocated. This will cause a panic - // if the next buffer attempts to be allocated - // before the first. - limit := mem.MaxAllocated() - mem = &memory.ResourceAllocator{Limit: &limit} - tables, err = tt.read(reader, mem) - if err != nil { - t.Fatal(err) - } - - var wg sync.WaitGroup - _ = tables.Do(func(t flux.Table) error { - wg.Add(1) - go func() { - defer wg.Done() - _ = t.Do(func(cr flux.ColReader) error { - return nil - }) - }() - return nil - }) - wg.Wait() - }) - } -} - -func BenchmarkReadFilter(b *testing.B) { - setupFn := func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - tagsSpec := &datagen.TagsSpec{ - Tags: []*datagen.TagValuesSpec{ - { - TagKey: "t0", - Values: func() datagen.CountableSequence { - return datagen.NewCounterByteSequence("a-%s", 0, 5) - }, - }, - { - TagKey: "t1", - Values: func() datagen.CountableSequence { - return datagen.NewCounterByteSequence("b-%s", 0, 1000) - }, - }, - }, - } - spec := datagen.Spec{ - Measurements: []datagen.MeasurementSpec{ - { - Name: "m0", - TagsSpec: tagsSpec, - FieldValuesSpec: &datagen.FieldValuesSpec{ - Name: "f0", - TimeSequenceSpec: datagen.TimeSequenceSpec{ - Count: math.MaxInt32, - Delta: time.Minute, - }, - DataType: models.Float, - Values: func(spec datagen.TimeSequenceSpec) datagen.TimeValuesSequence { - r := rand.New(rand.NewSource(10)) - return datagen.NewTimeFloatValuesSequence( - spec.Count, - datagen.NewTimestampSequenceFromSpec(spec), - datagen.NewFloatRandomValuesSequence(0, 90, r), - ) - }, - }, - }, - { - Name: "m0", - TagsSpec: tagsSpec, - FieldValuesSpec: &datagen.FieldValuesSpec{ - Name: "f1", - TimeSequenceSpec: datagen.TimeSequenceSpec{ - Count: math.MaxInt32, - Delta: time.Minute, - }, - DataType: models.Float, - Values: func(spec datagen.TimeSequenceSpec) datagen.TimeValuesSequence { - r := rand.New(rand.NewSource(11)) - return datagen.NewTimeFloatValuesSequence( - spec.Count, - datagen.NewTimestampSequenceFromSpec(spec), - datagen.NewFloatRandomValuesSequence(0, 180, r), - ) - }, - }, - }, - { - Name: "m0", - TagsSpec: tagsSpec, - FieldValuesSpec: &datagen.FieldValuesSpec{ - Name: "f1", - TimeSequenceSpec: datagen.TimeSequenceSpec{ - Count: math.MaxInt32, - Delta: time.Minute, - }, - DataType: models.Float, - Values: func(spec datagen.TimeSequenceSpec) datagen.TimeValuesSequence { - r := rand.New(rand.NewSource(12)) - return datagen.NewTimeFloatValuesSequence( - spec.Count, - datagen.NewTimestampSequenceFromSpec(spec), - datagen.NewFloatRandomValuesSequence(10, 10000, r), - ) - }, - }, - }, - }, - } - tr := datagen.TimeRange{ - Start: mustParseTime("2019-11-25T00:00:00Z"), - End: mustParseTime("2019-11-26T00:00:00Z"), - } - return datagen.NewSeriesGeneratorFromSpec(&spec, tr), tr - } - benchmarkRead(b, setupFn, func(r *StorageReader) error { - tables, err := r.ReadFilter(context.Background(), query.ReadFilterSpec{ - OrganizationID: r.Org, - BucketID: r.Bucket, - Bounds: r.Bounds, - }, memory.DefaultAllocator) - if err != nil { - return err - } - return tables.Do(func(table flux.Table) error { - table.Done() - return nil - }) - }) -} - -func BenchmarkReadGroup(b *testing.B) { - setupFn := func(org, bucket platform.ID) (datagen.SeriesGenerator, datagen.TimeRange) { - tagsSpec := &datagen.TagsSpec{ - Tags: []*datagen.TagValuesSpec{ - { - TagKey: "t0", - Values: func() datagen.CountableSequence { - return datagen.NewCounterByteSequence("a-%s", 0, 5) - }, - }, - { - TagKey: "t1", - Values: func() datagen.CountableSequence { - return datagen.NewCounterByteSequence("b-%s", 0, 1000) - }, - }, - }, - } - spec := datagen.Spec{ - Measurements: []datagen.MeasurementSpec{ - { - Name: "m0", - TagsSpec: tagsSpec, - FieldValuesSpec: &datagen.FieldValuesSpec{ - Name: "f0", - TimeSequenceSpec: datagen.TimeSequenceSpec{ - Count: math.MaxInt32, - Delta: time.Minute, - }, - DataType: models.Float, - Values: func(spec datagen.TimeSequenceSpec) datagen.TimeValuesSequence { - r := rand.New(rand.NewSource(10)) - return datagen.NewTimeFloatValuesSequence( - spec.Count, - datagen.NewTimestampSequenceFromSpec(spec), - datagen.NewFloatRandomValuesSequence(0, 90, r), - ) - }, - }, - }, - { - Name: "m0", - TagsSpec: tagsSpec, - FieldValuesSpec: &datagen.FieldValuesSpec{ - Name: "f1", - TimeSequenceSpec: datagen.TimeSequenceSpec{ - Count: math.MaxInt32, - Delta: time.Minute, - }, - DataType: models.Float, - Values: func(spec datagen.TimeSequenceSpec) datagen.TimeValuesSequence { - r := rand.New(rand.NewSource(11)) - return datagen.NewTimeFloatValuesSequence( - spec.Count, - datagen.NewTimestampSequenceFromSpec(spec), - datagen.NewFloatRandomValuesSequence(0, 180, r), - ) - }, - }, - }, - { - Name: "m0", - TagsSpec: tagsSpec, - FieldValuesSpec: &datagen.FieldValuesSpec{ - Name: "f1", - TimeSequenceSpec: datagen.TimeSequenceSpec{ - Count: math.MaxInt32, - Delta: time.Minute, - }, - DataType: models.Float, - Values: func(spec datagen.TimeSequenceSpec) datagen.TimeValuesSequence { - r := rand.New(rand.NewSource(12)) - return datagen.NewTimeFloatValuesSequence( - spec.Count, - datagen.NewTimestampSequenceFromSpec(spec), - datagen.NewFloatRandomValuesSequence(10, 100, r), - ) - }, - }, - }, - }, - } - tr := datagen.TimeRange{ - Start: mustParseTime("2019-11-25T00:00:00Z"), - End: mustParseTime("2019-11-25T00:10:00Z"), - } - return datagen.NewSeriesGeneratorFromSpec(&spec, tr), tr - } - benchmarkRead(b, setupFn, func(r *StorageReader) error { - tables, err := r.ReadGroup(context.Background(), query.ReadGroupSpec{ - ReadFilterSpec: query.ReadFilterSpec{ - OrganizationID: r.Org, - BucketID: r.Bucket, - Bounds: r.Bounds, - }, - GroupMode: query.GroupModeBy, - GroupKeys: []string{"_start", "_stop", "t0"}, - AggregateMethod: storageflux.MinKind, - }, memory.DefaultAllocator) - if err != nil { - return err - } - - err = tables.Do(func(table flux.Table) error { - table.Done() - return nil - }) - - return err - }) -} - -func benchmarkRead(b *testing.B, setupFn SetupFunc, f func(r *StorageReader) error) { - reader := NewStorageReader(b, setupFn) - defer reader.Close() - - b.ResetTimer() - b.ReportAllocs() - for i := 0; i < b.N; i++ { - if err := f(reader); err != nil { - b.Fatal(err) - } - } -} - -func Time(s string) execute.Time { - ts := mustParseTime(s) - return execute.Time(ts.UnixNano()) -} - -func mustParseTime(s string) time.Time { - ts, err := time.Parse(time.RFC3339, s) - if err != nil { - panic(err) - } - return ts -} - -func Spec(org, bucket platform.ID, measurements ...datagen.MeasurementSpec) *datagen.Spec { - return &datagen.Spec{ - Measurements: measurements, - } -} - -func MeasurementSpec(name string, field *datagen.FieldValuesSpec, tags ...*datagen.TagValuesSpec) datagen.MeasurementSpec { - return datagen.MeasurementSpec{ - Name: name, - TagsSpec: TagsSpec(tags...), - FieldValuesSpec: field, - } -} - -func FloatArrayValuesSequence(name string, delta time.Duration, values []float64) *datagen.FieldValuesSpec { - return &datagen.FieldValuesSpec{ - Name: name, - TimeSequenceSpec: datagen.TimeSequenceSpec{ - Count: math.MaxInt32, - Delta: delta, - }, - DataType: models.Float, - Values: func(spec datagen.TimeSequenceSpec) datagen.TimeValuesSequence { - return datagen.NewTimeFloatValuesSequence( - spec.Count, - datagen.NewTimestampSequenceFromSpec(spec), - datagen.NewFloatArrayValuesSequence(values), - ) - }, - } -} - -func TagsSpec(specs ...*datagen.TagValuesSpec) *datagen.TagsSpec { - return &datagen.TagsSpec{Tags: specs} -} - -func TagValuesSequence(key, format string, start, stop int) *datagen.TagValuesSpec { - return &datagen.TagValuesSpec{ - TagKey: key, - Values: func() datagen.CountableSequence { - return datagen.NewCounterByteSequence(format, start, stop) - }, - } -} - -func TimeRange(start, end string) datagen.TimeRange { - return datagen.TimeRange{ - Start: mustParseTime(start), - End: mustParseTime(end), - } -} - -// seriesBatchSize specifies the number of series keys passed to the index. -const seriesBatchSize = 1000 - -func writeShard(sfile *tsdb.SeriesFile, sg datagen.SeriesGenerator, id uint64, path string) error { - sw := shard.NewWriter(id, path) - defer sw.Close() - - var ( - keys [][]byte - names [][]byte - tags []models.Tags - ) - - for sg.Next() { - seriesKey := sg.Key() - keys = append(keys, seriesKey) - names = append(names, sg.Name()) - tags = append(tags, sg.Tags()) - - if len(keys) == seriesBatchSize { - if _, err := sfile.CreateSeriesListIfNotExists(names, tags); err != nil { - return err - } - keys = keys[:0] - names = names[:0] - tags = tags[:0] - } - - vg := sg.TimeValuesGenerator() - - key := tsm1.SeriesFieldKeyBytes(string(seriesKey), string(sg.Field())) - for vg.Next() { - sw.WriteV(key, vg.Values()) - } - - if err := sw.Err(); err != nil { - return err - } - } - - if len(keys) > seriesBatchSize { - if _, err := sfile.CreateSeriesListIfNotExists(names, tags); err != nil { - return err - } - } - return nil -} diff --git a/storage/flux/tags_cache.go b/storage/flux/tags_cache.go deleted file mode 100644 index e24ccf9b69a..00000000000 --- a/storage/flux/tags_cache.go +++ /dev/null @@ -1,286 +0,0 @@ -package storageflux - -import ( - "container/list" - "sync" - - "github.com/apache/arrow/go/v7/arrow/memory" - "github.com/influxdata/flux/array" - "github.com/influxdata/flux/execute" -) - -// defaultMaxLengthForTagsCache is the default maximum number of -// tags that will be memoized when retrieving tags from the tags -// cache. -const defaultMaxLengthForTagsCache = 100 - -type tagsCache struct { - // startColumn is a special slot for holding the start column. - startColumn *array.Int - - // stopColumn is a special slot for holding the stop column. - stopColumn *array.Int - - // tags holds cached arrays for various tag values. - // An lru is used to keep track of the least recently used - // item in the cache so that it can be ejected. An lru is used - // here because we cannot be certain if tag values are going to - // be used again and we do not want to retain a reference - // that may have already been released. This makes an lru a good - // fit since it will be more likely to eject a value that is not - // going to be used again than another data structure. - // - // The increase in performance by reusing arrays for tag values - // is dependent on the order of the tags coming out of storage. - // It is possible that a value will be reused but could get - // ejected from the cache before it would be reused. - // - // The map contains the tag **values** and not the tag keys. - // An array can get shared among two different tag keys that - // have the same value. - tags map[string]*list.Element - mu sync.RWMutex - lru *list.List - maxLength int -} - -// newTagsCache will create a tags cache that will retain -// the last sz entries. If zero, the default will be used. -func newTagsCache(sz int) *tagsCache { - return &tagsCache{maxLength: sz} -} - -// GetBounds will return arrays that match with the bounds. -// If an array that is within the cache works with the bounds -// and can be sliced to the length, a reference to it will be -// returned. -func (c *tagsCache) GetBounds(b execute.Bounds, l int, mem memory.Allocator) (start *array.Int, stop *array.Int) { - if c == nil { - start = c.createBounds(b.Start, l, mem) - stop = c.createBounds(b.Stop, l, mem) - return start, stop - } - - // Retrieve the columns from the cache if they exist. - c.mu.RLock() - start, _ = c.getBoundsFromCache(c.startColumn, l) - stop, _ = c.getBoundsFromCache(c.stopColumn, l) - c.mu.RUnlock() - - // If we could not retrieve an array from the cache, - // create one here outside of the lock. - // Record that we will need to replace the values in - // the cache. - replace := false - if start == nil { - start, replace = c.createBounds(b.Start, l, mem), true - } - if stop == nil { - stop, replace = c.createBounds(b.Stop, l, mem), true - } - - if !replace { - // No need to retrieve the write lock. - // Return now since we retrieved all values from - // the cache. - return start, stop - } - - c.mu.Lock() - c.replaceBounds(&c.startColumn, start) - c.replaceBounds(&c.stopColumn, stop) - c.mu.Unlock() - return start, stop -} - -// getBoundsFromCache will return an array of values -// if the array in the cache is of the appropriate size. -// This must be called from inside of a lock. -func (c *tagsCache) getBoundsFromCache(arr *array.Int, l int) (*array.Int, bool) { - if arr == nil || arr.Len() < l { - return nil, false - } else if arr.Len() == l { - arr.Retain() - return arr, true - } - - // If the lengths do not match, but the cached array is less - // than the desired array, then we can use slice. - // NewSlice will automatically create a new reference to the - // passed in array so we do not need to manually retain. - vs := array.Slice(arr, 0, l) - return vs.(*array.Int), true -} - -// replaceBounds will examine the array and replace it if -// the length of the array is greater than the current array -// or if there isn't an array in the cache. -// This must be called from inside of a write lock. -func (c *tagsCache) replaceBounds(cache **array.Int, arr *array.Int) { - if *cache != nil { - if (*cache).Len() >= arr.Len() { - // The cached value is longer so just keep it. - return - } - (*cache).Release() - } - arr.Retain() - *cache = arr -} - -// createBounds will create an array of times for the given time with -// the given length. -// -// DO NOT CALL THIS METHOD IN A LOCK. It is slow and will probably -// cause lock contention. -func (c *tagsCache) createBounds(ts execute.Time, l int, mem memory.Allocator) *array.Int { - b := array.NewIntBuilder(mem) - b.Resize(l) - for i := 0; i < l; i++ { - b.Append(int64(ts)) - } - return b.NewIntArray() -} - -// GetTag returns a binary arrow array that contains the value -// repeated l times. If an array with a length greater than or -// equal to the length and with the same value exists in the cache, -// a reference to the data will be retained and returned. -// Otherwise, the allocator will be used to construct a new column. -func (c *tagsCache) GetTag(value string, l int, mem memory.Allocator) *array.String { - if l == 0 || c == nil { - return c.createTag(value, l, mem) - } - - // Attempt to retrieve the array from the cache. - arr, ok := c.getTagFromCache(value, l) - if !ok { - // The array is not in the cache so create it. - arr = c.createTag(value, l, mem) - } - c.touchOrReplaceTag(arr) - return arr -} - -// getTagFromCache will return an array of values with the -// specified value at the specified length. If there is no -// cache entry or the entry is not large enough for the -// specified length, then this returns false. -func (c *tagsCache) getTagFromCache(value string, l int) (*array.String, bool) { - c.mu.RLock() - defer c.mu.RUnlock() - - elem, ok := c.tags[value] - if !ok { - return nil, false - } - - arr := elem.Value.(*array.String) - if arr.Len() == l { - arr.Retain() - return arr, true - } else if arr.Len() < l { - return nil, false - } - - // If the lengths do not match, but the cached array is less - // than the desired array, then we can use slice. - // Slice will automatically create a new reference to the - // passed in array so we do not need to manually retain. - vs := array.Slice(arr, 0, l) - return vs.(*array.String), true -} - -// touchOrReplaceTag will update the LRU cache to have -// the value specified by the array as the most recently -// used entry. If the cache entry does not exist or the -// current array in the cache is shorter than this one, -// it will replace the array. -func (c *tagsCache) touchOrReplaceTag(arr *array.String) { - c.mu.Lock() - defer c.mu.Unlock() - - if c.lru == nil { - c.lru = list.New() - } - if c.tags == nil { - c.tags = make(map[string]*list.Element) - } - - value := arr.Value(0) - if elem, ok := c.tags[value]; ok { - // If the array in the cache is longer to or - // equal to the current tag, then do not touch it. - carr := elem.Value.(*array.String) - if carr.Len() < arr.Len() { - // Retain this array again and release our - // previous reference to the other array. - arr.Retain() - elem.Value = arr - carr.Release() - } - - // Move this element to the front of the lru. - c.lru.MoveBefore(elem, c.lru.Front()) - } else { - arr.Retain() - c.tags[value] = c.lru.PushFront(arr) - } - c.maintainLRU() -} - -// maintainLRU will ensure the lru cache maintains the appropriate -// length by ejecting the least recently used value from the cache -// until the cache is the appropriate size. -// -// This function must be called from inside of a lock. -func (c *tagsCache) maintainLRU() { - max := c.maxLength - if max == 0 { - max = defaultMaxLengthForTagsCache - } - if c.lru.Len() <= max { - return - } - arr := c.lru.Remove(c.lru.Back()).(*array.String) - value := arr.Value(0) - delete(c.tags, value) - arr.Release() -} - -// createTag will create a new array for a tag with the given -// length. -// -// DO NOT CALL THIS METHOD IN A LOCK. It is slow and will probably -// cause lock contention. -func (c *tagsCache) createTag(value string, l int, mem memory.Allocator) *array.String { - b := array.NewStringBuilder(mem) - b.Resize(l) - b.ReserveData(l * len(value)) - for i := 0; i < l; i++ { - b.Append(value) - } - return b.NewStringArray() -} - -// Release will release all references to cached tag columns. -func (c *tagsCache) Release() { - c.mu.Lock() - defer c.mu.Unlock() - - if c.startColumn != nil { - c.startColumn.Release() - c.startColumn = nil - } - - if c.stopColumn != nil { - c.stopColumn.Release() - c.stopColumn = nil - } - - for _, elem := range c.tags { - elem.Value.(*array.String).Release() - } - c.tags = nil - c.lru = nil -} diff --git a/storage/flux/tags_cache_test.go b/storage/flux/tags_cache_test.go deleted file mode 100644 index 25d735920d5..00000000000 --- a/storage/flux/tags_cache_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package storageflux - -import ( - "fmt" - "math/rand" - "sync" - "testing" - "time" - - "github.com/apache/arrow/go/v7/arrow/memory" - "github.com/influxdata/flux/execute" -) - -func TestTagsCache_GetBounds_Concurrency(t *testing.T) { - // Concurrently use the tags cache by retrieving - // a tag of random sizes and then iterating over the - // retrieved tag. The test should exceed the cache's - // size so we get values being evicted. - cache := newTagsCache(4) - bounds := execute.Bounds{ - Start: execute.Time(time.Second), - Stop: execute.Time(2 * time.Second), - } - mem := NewCheckedAllocator(memory.DefaultAllocator) - defer mem.AssertSize(t, 0) - - var wg sync.WaitGroup - for i := 0; i < 32; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - - for j := 0; j < 128; j++ { - l := rand.Intn(128) + 1 - start, stop := cache.GetBounds(bounds, l, mem) - for i := 0; i < l; i++ { - if want, got := int64(bounds.Start), start.Value(i); want != got { - t.Errorf("unexpected value in start array: %d != %d", want, got) - start.Release() - stop.Release() - return - } - if want, got := int64(bounds.Stop), stop.Value(i); want != got { - t.Errorf("unexpected value in stop array: %d != %d", want, got) - start.Release() - stop.Release() - return - } - } - start.Release() - stop.Release() - } - }(i) - } - - wg.Wait() - cache.Release() -} - -func TestTagsCache_GetTags_Concurrency(t *testing.T) { - // Concurrently use the tags cache by retrieving - // a tag of random sizes and then iterating over the - // retrieved tag. The test should exceed the cache's - // size so we get values being evicted. - cache := newTagsCache(4) - mem := NewCheckedAllocator(memory.DefaultAllocator) - defer mem.AssertSize(t, 0) - - var wg sync.WaitGroup - for i := 0; i < 32; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - - // Cardinality of 8 so it exceeds the cache size - // but we also reuse tags across different goroutines. - value := fmt.Sprintf("t%d", i%8) - for j := 0; j < 128; j++ { - l := rand.Intn(128) + 1 - vs := cache.GetTag(value, l, mem) - for i := 0; i < l; i++ { - if want, got := value, vs.Value(i); want != got { - t.Errorf("unexpected value in array: %s != %s", want, got) - vs.Release() - return - } - } - vs.Release() - } - }(i) - } - - wg.Wait() - cache.Release() -} - -type CheckedAllocator struct { - mem *memory.CheckedAllocator - mu sync.Mutex -} - -func NewCheckedAllocator(mem memory.Allocator) *CheckedAllocator { - return &CheckedAllocator{ - mem: memory.NewCheckedAllocator(mem), - } -} - -func (c *CheckedAllocator) Allocate(size int) []byte { - c.mu.Lock() - defer c.mu.Unlock() - return c.mem.Allocate(size) -} - -func (c *CheckedAllocator) Reallocate(size int, b []byte) []byte { - c.mu.Lock() - defer c.mu.Unlock() - return c.mem.Reallocate(size, b) -} - -func (c *CheckedAllocator) Free(b []byte) { - c.mu.Lock() - defer c.mu.Unlock() - c.mem.Free(b) -} - -func (c *CheckedAllocator) AssertSize(t memory.TestingT, sz int) { - c.mu.Lock() - defer c.mu.Unlock() - c.mem.AssertSize(t, sz) -} diff --git a/storage/flux/types.tmpldata b/storage/flux/types.tmpldata deleted file mode 100644 index 6d07b7dbde5..00000000000 --- a/storage/flux/types.tmpldata +++ /dev/null @@ -1,32 +0,0 @@ -[ - { - "Name":"Float", - "name":"float", - "Type":"float64", - "ArrowType":"Float" - }, - { - "Name":"Integer", - "name":"integer", - "Type":"int64", - "ArrowType":"Int" - }, - { - "Name":"Unsigned", - "name":"unsigned", - "Type":"uint64", - "ArrowType":"Uint" - }, - { - "Name":"String", - "name":"string", - "Type":"string", - "ArrowType":"String" - }, - { - "Name":"Boolean", - "name":"boolean", - "Type":"bool", - "ArrowType":"Boolean" - } -] diff --git a/storage/flux/window.go b/storage/flux/window.go deleted file mode 100644 index 881d1cbb81c..00000000000 --- a/storage/flux/window.go +++ /dev/null @@ -1,199 +0,0 @@ -package storageflux - -import ( - "context" - "fmt" - "sync/atomic" - - "github.com/apache/arrow/go/v7/arrow/memory" - "github.com/influxdata/flux" - "github.com/influxdata/flux/array" - "github.com/influxdata/flux/arrow" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/values" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// splitWindows will split a windowTable by creating a new table from each -// row and modifying the group key to use the start and stop values from -// that row. -func splitWindows(ctx context.Context, alloc memory.Allocator, in flux.Table, selector bool, f func(t flux.Table) error) error { - wts := &windowTableSplitter{ - ctx: ctx, - in: in, - alloc: alloc, - selector: selector, - } - return wts.Do(f) -} - -type windowTableSplitter struct { - ctx context.Context - in flux.Table - alloc memory.Allocator - selector bool -} - -func (w *windowTableSplitter) Do(f func(flux.Table) error) error { - defer w.in.Done() - - startIdx, err := w.getTimeColumnIndex(execute.DefaultStartColLabel) - if err != nil { - return err - } - - stopIdx, err := w.getTimeColumnIndex(execute.DefaultStopColLabel) - if err != nil { - return err - } - - return w.in.Do(func(cr flux.ColReader) error { - // Retrieve the start and stop columns for splitting - // the windows. - start := cr.Times(startIdx) - stop := cr.Times(stopIdx) - - // Iterate through each time to produce a table - // using the start and stop values. - arrs := make([]array.Array, len(cr.Cols())) - for j := range cr.Cols() { - arrs[j] = getColumnValues(cr, j) - } - - values := arrs[valueColIdx] - - for i, n := 0, cr.Len(); i < n; i++ { - startT, stopT := start.Value(i), stop.Value(i) - - // Rewrite the group key using the new time. - key := groupKeyForWindow(cr.Key(), startT, stopT) - if w.selector && values.IsNull(i) { - // Produce an empty table if the value is null - // and this is a selector. - table := execute.NewEmptyTable(key, cr.Cols()) - if err := f(table); err != nil { - return err - } - continue - } - - // Produce a slice for each column into a new - // table buffer. - buffer := arrow.TableBuffer{ - GroupKey: key, - Columns: cr.Cols(), - Values: make([]array.Array, len(cr.Cols())), - } - for j, arr := range arrs { - buffer.Values[j] = arrow.Slice(arr, int64(i), int64(i+1)) - } - - // Wrap these into a single table and execute. - done := make(chan struct{}) - table := &windowTableRow{ - buffer: buffer, - done: done, - } - if err := f(table); err != nil { - return err - } - - select { - case <-done: - case <-w.ctx.Done(): - return w.ctx.Err() - } - } - return nil - }) -} - -func (w *windowTableSplitter) getTimeColumnIndex(label string) (int, error) { - j := execute.ColIdx(label, w.in.Cols()) - if j < 0 { - return -1, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("missing %q column from window splitter", label), - } - } else if c := w.in.Cols()[j]; c.Type != flux.TTime { - return -1, &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("%q column must be of type time", label), - } - } - return j, nil -} - -type windowTableRow struct { - used int32 - buffer arrow.TableBuffer - done chan struct{} -} - -func (w *windowTableRow) Key() flux.GroupKey { - return w.buffer.GroupKey -} - -func (w *windowTableRow) Cols() []flux.ColMeta { - return w.buffer.Columns -} - -func (w *windowTableRow) Do(f func(flux.ColReader) error) error { - if !atomic.CompareAndSwapInt32(&w.used, 0, 1) { - return &errors.Error{ - Code: errors.EInternal, - Msg: "table already read", - } - } - defer close(w.done) - - err := f(&w.buffer) - w.buffer.Release() - return err -} - -func (w *windowTableRow) Done() { - if atomic.CompareAndSwapInt32(&w.used, 0, 1) { - w.buffer.Release() - close(w.done) - } -} - -func (w *windowTableRow) Empty() bool { - return false -} - -func groupKeyForWindow(key flux.GroupKey, start, stop int64) flux.GroupKey { - cols := key.Cols() - vs := make([]values.Value, len(cols)) - for j, c := range cols { - if c.Label == execute.DefaultStartColLabel { - vs[j] = values.NewTime(values.Time(start)) - } else if c.Label == execute.DefaultStopColLabel { - vs[j] = values.NewTime(values.Time(stop)) - } else { - vs[j] = key.Value(j) - } - } - return execute.NewGroupKey(cols, vs) -} - -// getColumnValues returns the array from the column reader as an array.Array. -func getColumnValues(cr flux.ColReader, j int) array.Array { - switch typ := cr.Cols()[j].Type; typ { - case flux.TInt: - return cr.Ints(j) - case flux.TUInt: - return cr.UInts(j) - case flux.TFloat: - return cr.Floats(j) - case flux.TString: - return cr.Strings(j) - case flux.TBool: - return cr.Bools(j) - case flux.TTime: - return cr.Times(j) - default: - panic(fmt.Errorf("unimplemented column type: %s", typ)) - } -} diff --git a/storage/mocks/EngineSchema.go b/storage/mocks/EngineSchema.go deleted file mode 100644 index 8778114f0f4..00000000000 --- a/storage/mocks/EngineSchema.go +++ /dev/null @@ -1,79 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/storage (interfaces: EngineSchema) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - influxdb "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockEngineSchema is a mock of EngineSchema interface. -type MockEngineSchema struct { - ctrl *gomock.Controller - recorder *MockEngineSchemaMockRecorder -} - -// MockEngineSchemaMockRecorder is the mock recorder for MockEngineSchema. -type MockEngineSchemaMockRecorder struct { - mock *MockEngineSchema -} - -// NewMockEngineSchema creates a new mock instance. -func NewMockEngineSchema(ctrl *gomock.Controller) *MockEngineSchema { - mock := &MockEngineSchema{ctrl: ctrl} - mock.recorder = &MockEngineSchemaMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockEngineSchema) EXPECT() *MockEngineSchemaMockRecorder { - return m.recorder -} - -// CreateBucket mocks base method. -func (m *MockEngineSchema) CreateBucket(arg0 context.Context, arg1 *influxdb.Bucket) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateBucket", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// CreateBucket indicates an expected call of CreateBucket. -func (mr *MockEngineSchemaMockRecorder) CreateBucket(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBucket", reflect.TypeOf((*MockEngineSchema)(nil).CreateBucket), arg0, arg1) -} - -// DeleteBucket mocks base method. -func (m *MockEngineSchema) DeleteBucket(arg0 context.Context, arg1, arg2 platform.ID) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucket", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteBucket indicates an expected call of DeleteBucket. -func (mr *MockEngineSchemaMockRecorder) DeleteBucket(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucket", reflect.TypeOf((*MockEngineSchema)(nil).DeleteBucket), arg0, arg1, arg2) -} - -// UpdateBucketRetentionPolicy mocks base method. -func (m *MockEngineSchema) UpdateBucketRetentionPolicy(arg0 context.Context, arg1 platform.ID, arg2 *influxdb.BucketUpdate) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateBucketRetentionPolicy", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateBucketRetentionPolicy indicates an expected call of UpdateBucketRetentionPolicy. -func (mr *MockEngineSchemaMockRecorder) UpdateBucketRetentionPolicy(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateBucketRetentionPolicy", reflect.TypeOf((*MockEngineSchema)(nil).UpdateBucketRetentionPolicy), arg0, arg1, arg2) -} diff --git a/storage/points_writer.go b/storage/points_writer.go deleted file mode 100644 index f60f55127bd..00000000000 --- a/storage/points_writer.go +++ /dev/null @@ -1,69 +0,0 @@ -package storage - -import ( - "context" - "fmt" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/models" -) - -// PointsWriter describes the ability to write points into a storage engine. -type PointsWriter interface { - WritePoints(ctx context.Context, orgID platform.ID, bucketID platform.ID, points []models.Point) error -} - -// LoggingPointsWriter wraps an underlying points writer but writes logs to -// another bucket when an error occurs. -type LoggingPointsWriter struct { - // Wrapped points writer. Errored writes from here will be logged. - Underlying PointsWriter - - // Service used to look up logging bucket. - BucketFinder BucketFinder - - // Name of the bucket to log to. - LogBucketName string -} - -// WritePoints writes points to the underlying PointsWriter. Logs on error. -func (w *LoggingPointsWriter) WritePoints(ctx context.Context, orgID platform.ID, bucketID platform.ID, p []models.Point) error { - if len(p) == 0 { - return nil - } - - // Write to underlying writer and exit immediately if successful. - err := w.Underlying.WritePoints(ctx, orgID, bucketID, p) - if err == nil { - return nil - } - - // Attempt to lookup log bucket. - bkts, n, e := w.BucketFinder.FindBuckets(ctx, influxdb.BucketFilter{ - OrganizationID: &orgID, - Name: &w.LogBucketName, - }) - if e != nil { - return e - } else if n == 0 { - return fmt.Errorf("logging bucket not found: %q", w.LogBucketName) - } - - // Log error to bucket. - pt, e := models.NewPoint( - "write_errors", - nil, - models.Fields{"error": err.Error()}, - time.Now(), - ) - if e != nil { - return e - } - if e := w.Underlying.WritePoints(ctx, orgID, bkts[0].ID, []models.Point{pt}); e != nil { - return e - } - - return err -} diff --git a/storage/reads/Makefile b/storage/reads/Makefile deleted file mode 100644 index 1e08553daef..00000000000 --- a/storage/reads/Makefile +++ /dev/null @@ -1,33 +0,0 @@ -# List any generated files here -TARGETS = array_cursor.gen.go \ - array_cursor_test.gen.go - -# List any source files used to generate the targets here -SOURCES = gen.go \ - array_cursor.gen.go.tmpl \ - array_cursor_test.gen.go.tmpl \ - array_cursor.gen.go.tmpldata \ - types.tmpldata - -# List any directories that have their own Makefile here -SUBDIRS = datatypes - -# Default target -all: $(SUBDIRS) $(TARGETS) - -# Recurse into subdirs for same make goal -$(SUBDIRS): - $(MAKE) -C $@ $(MAKECMDGOALS) - -# Clean all targets recursively -clean: $(SUBDIRS) - rm -f $(TARGETS) - -# Define go generate if not already defined -GO_GENERATE := go generate - -# Run go generate for the targets -$(TARGETS): $(SOURCES) - $(GO_GENERATE) -x - -.PHONY: all clean $(SUBDIRS) diff --git a/storage/reads/aggregate_resultset.go b/storage/reads/aggregate_resultset.go deleted file mode 100644 index 92023f64795..00000000000 --- a/storage/reads/aggregate_resultset.go +++ /dev/null @@ -1,159 +0,0 @@ -package reads - -import ( - "context" - "math" - - "github.com/influxdata/flux/interval" - "github.com/influxdata/flux/values" - "github.com/influxdata/influxdb/v2/kit/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -type windowAggregateResultSet struct { - ctx context.Context - req *datatypes.ReadWindowAggregateRequest - seriesCursor SeriesCursor - seriesRow SeriesRow - arrayCursors multiShardCursors - cursor cursors.Cursor - err error -} - -// IsLastDescendingAggregateOptimization checks two things: If the request passed in -// is using the `last` aggregate type, and if it doesn't have a window. If both -// conditions are met, it returns false, otherwise, it returns true. -func IsLastDescendingAggregateOptimization(req *datatypes.ReadWindowAggregateRequest) bool { - if len(req.Aggregate) != 1 { - // Descending optimization for last only applies when it is the only aggregate. - return false - } - - // The following is an optimization where in the case of a single window, - // the selector `last` is implemented as a descending array cursor followed - // by a limit array cursor that selects only the first point, i.e the point - // with the largest timestamp, from the descending array cursor. - if req.Aggregate[0].Type == datatypes.Aggregate_AggregateTypeLast { - if req.Window == nil { - if req.WindowEvery == 0 || req.WindowEvery == math.MaxInt64 { - return true - } - } else if (req.Window.Every.Nsecs == 0 && req.Window.Every.Months == 0) || req.Window.Every.Nsecs == math.MaxInt64 { - return true - } - } - return false -} - -func NewWindowAggregateResultSet(ctx context.Context, req *datatypes.ReadWindowAggregateRequest, cursor SeriesCursor) (ResultSet, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - span.LogKV("aggregate_window_every", req.WindowEvery) - for _, aggregate := range req.Aggregate { - span.LogKV("aggregate_type", aggregate.String()) - } - - if nAggs := len(req.Aggregate); nAggs != 1 { - return nil, errors.Errorf(errors.InternalError, "attempt to create a windowAggregateResultSet with %v aggregate functions", nAggs) - } - - ascending := !IsLastDescendingAggregateOptimization(req) - results := &windowAggregateResultSet{ - ctx: ctx, - req: req, - seriesCursor: cursor, - arrayCursors: newMultiShardArrayCursors(ctx, req.Range.GetStart(), req.Range.GetEnd(), ascending), - } - return results, nil -} - -func (r *windowAggregateResultSet) Next() bool { - if r == nil || r.err != nil { - return false - } - - seriesRow := r.seriesCursor.Next() - if seriesRow == nil { - return false - } - r.seriesRow = *seriesRow - r.cursor, r.err = r.createCursor(r.seriesRow) - return r.err == nil -} - -func convertNsecs(nsecs int64) values.Duration { - negative := false - if nsecs < 0 { - negative, nsecs = true, -nsecs - } - return values.MakeDuration(nsecs, 0, negative) -} - -func (r *windowAggregateResultSet) createCursor(seriesRow SeriesRow) (cursors.Cursor, error) { - agg := r.req.Aggregate[0] - every := r.req.WindowEvery - offset := r.req.Offset - cursor := r.arrayCursors.createCursor(seriesRow) - - var everyDur values.Duration - var offsetDur values.Duration - var periodDur values.Duration - - if r.req.Window != nil { - // assume window was passed in and translate protobuf window to execute.Window - everyDur = values.MakeDuration(r.req.Window.Every.Nsecs, r.req.Window.Every.Months, r.req.Window.Every.Negative) - periodDur = values.MakeDuration(r.req.Window.Every.Nsecs, r.req.Window.Every.Months, r.req.Window.Every.Negative) - if r.req.Window.Offset != nil { - offsetDur = values.MakeDuration(r.req.Window.Offset.Nsecs, r.req.Window.Offset.Months, r.req.Window.Offset.Negative) - } else { - offsetDur = values.MakeDuration(0, 0, false) - } - } else { - // nanosecond values were passed in and need to be converted to windows - everyDur = convertNsecs(every) - periodDur = convertNsecs(every) - offsetDur = convertNsecs(offset) - } - - window, err := interval.NewWindow(everyDur, periodDur, offsetDur) - if err != nil { - return nil, err - } - - if everyDur.Nanoseconds() == math.MaxInt64 { - // This means to aggregate over whole series for the query's time range - return newAggregateArrayCursor(r.ctx, agg, cursor) - } else { - return newWindowAggregateArrayCursor(r.ctx, agg, window, cursor) - } -} - -func (r *windowAggregateResultSet) Cursor() cursors.Cursor { - return r.cursor -} - -func (r *windowAggregateResultSet) Close() { - if r == nil { - return - } - r.seriesRow.Query = nil - r.seriesCursor.Close() -} - -func (r *windowAggregateResultSet) Err() error { return r.err } - -func (r *windowAggregateResultSet) Stats() cursors.CursorStats { - if r.seriesRow.Query == nil { - return cursors.CursorStats{} - } - // See the equivalent method in *resultSet.Stats. - return r.seriesRow.Query.Stats() -} - -func (r *windowAggregateResultSet) Tags() models.Tags { - return r.seriesRow.Tags -} diff --git a/storage/reads/aggregate_resultset_test.go b/storage/reads/aggregate_resultset_test.go deleted file mode 100644 index ac86137eef4..00000000000 --- a/storage/reads/aggregate_resultset_test.go +++ /dev/null @@ -1,335 +0,0 @@ -package reads_test - -import ( - "context" - "reflect" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/storage/reads" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -func TestNewWindowAggregateResultSet_Tags(t *testing.T) { - - newCursor := sliceSeriesCursor{ - rows: newSeriesRows( - "clicks click=1 1", - )} - - request := datatypes.ReadWindowAggregateRequest{ - Aggregate: []*datatypes.Aggregate{ - { - Type: datatypes.Aggregate_AggregateTypeMean, - }, - }, - } - resultSet, err := reads.NewWindowAggregateResultSet(context.Background(), &request, &newCursor) - - if err != nil { - t.Fatalf("error creating WindowAggregateResultSet: %s", err) - } - - // If .Next() was never called, seriesRow is nil and tags are empty. - expectedTags := "[]" - if resultSet.Tags().String() != expectedTags { - t.Errorf("expected tags: %s got: %s", expectedTags, resultSet.Tags().String()) - } - - resultSet.Next() - expectedTags = "[{_m clicks}]" - if resultSet.Tags().String() != expectedTags { - t.Errorf("expected tags: %s got: %s", expectedTags, resultSet.Tags().String()) - } -} - -type mockIntegerArrayCursor struct { - callCount int -} - -func (i *mockIntegerArrayCursor) Close() {} -func (i *mockIntegerArrayCursor) Err() error { return nil } -func (i *mockIntegerArrayCursor) Stats() cursors.CursorStats { return cursors.CursorStats{} } -func (i *mockIntegerArrayCursor) Next() *cursors.IntegerArray { - if i.callCount == 1 { - return &cursors.IntegerArray{} - } - i.callCount++ - return &cursors.IntegerArray{ - Timestamps: []int64{ - 1000000000, - 1000000005, - 1000000010, - 1000000011, - 1000000012, - 1000000013, - 1000000014, - 1000000020, - 2678400000000000, - 5000000000000000, - 5097600000000001, - }, - Values: []int64{100, 55, 256, 83, 99, 124, 1979, 4, 67, 49929, 51000}, - } -} - -type mockStringArrayCursor struct{} - -func (i *mockStringArrayCursor) Close() {} -func (i *mockStringArrayCursor) Err() error { return nil } -func (i *mockStringArrayCursor) Stats() cursors.CursorStats { return cursors.CursorStats{} } -func (i *mockStringArrayCursor) Next() *cursors.StringArray { - return &cursors.StringArray{ - Timestamps: []int64{1000000000}, - Values: []string{"a"}, - } -} - -type mockCursorIterator struct { - newCursorFn func(req *cursors.CursorRequest) cursors.Cursor - statsFn func() cursors.CursorStats -} - -func (i *mockCursorIterator) Next(ctx context.Context, req *cursors.CursorRequest) (cursors.Cursor, error) { - return i.newCursorFn(req), nil -} -func (i *mockCursorIterator) Stats() cursors.CursorStats { - if i.statsFn == nil { - return cursors.CursorStats{} - } - return i.statsFn() -} - -type mockReadCursor struct { - rows []reads.SeriesRow - index int64 -} - -func newMockReadCursor(keys ...string) mockReadCursor { - rows := make([]reads.SeriesRow, len(keys)) - for i := range keys { - rows[i].Name, rows[i].SeriesTags = models.ParseKeyBytes([]byte(keys[i])) - rows[i].Tags = rows[i].SeriesTags.Clone() - var itrs cursors.CursorIterators - cur := &mockCursorIterator{ - newCursorFn: func(req *cursors.CursorRequest) cursors.Cursor { - return &mockIntegerArrayCursor{} - }, - statsFn: func() cursors.CursorStats { - return cursors.CursorStats{ScannedBytes: 500, ScannedValues: 10} - }, - } - itrs = append(itrs, cur) - rows[i].Query = itrs - } - return mockReadCursor{rows: rows} -} - -func (c *mockReadCursor) Next() *reads.SeriesRow { - if c.index == int64(len(c.rows)) { - return nil - } - row := c.rows[c.index] - c.index++ - return &row -} -func (c *mockReadCursor) Close() {} -func (c *mockReadCursor) Err() error { return nil } - -// The stats from a WindowAggregateResultSet are retrieved from the cursor. -func TestNewWindowAggregateResultSet_Stats(t *testing.T) { - - newCursor := newMockReadCursor( - "clicks click=1 1", - ) - - request := datatypes.ReadWindowAggregateRequest{ - Aggregate: []*datatypes.Aggregate{ - { - Type: datatypes.Aggregate_AggregateTypeMean, - }, - }, - } - resultSet, err := reads.NewWindowAggregateResultSet(context.Background(), &request, &newCursor) - - if err != nil { - t.Fatalf("error creating WindowAggregateResultSet: %s", err) - } - - // If .Next() was never called, seriesRow is nil and stats are empty. - stats := resultSet.Stats() - if stats.ScannedBytes != 0 || stats.ScannedValues != 0 { - t.Errorf("expected statistics to be empty") - } - - resultSet.Next() - stats = resultSet.Stats() - if stats.ScannedBytes != 500 { - t.Errorf("Expected scanned bytes: %d got: %d", 500, stats.ScannedBytes) - } - if stats.ScannedValues != 10 { - t.Errorf("Expected scanned values: %d got: %d", 10, stats.ScannedValues) - } -} - -// A mean window aggregate is supported -func TestNewWindowAggregateResultSet_Mean(t *testing.T) { - - newCursor := newMockReadCursor( - "clicks click=1 1", - ) - - request := datatypes.ReadWindowAggregateRequest{ - Aggregate: []*datatypes.Aggregate{ - &datatypes.Aggregate{Type: datatypes.Aggregate_AggregateTypeMean}, - }, - WindowEvery: 10, - } - resultSet, err := reads.NewWindowAggregateResultSet(context.Background(), &request, &newCursor) - - if err != nil { - t.Fatalf("error creating WindowAggregateResultSet: %s", err) - } - - if !resultSet.Next() { - t.Fatalf("unexpected: resultSet could not advance") - } - cursor := resultSet.Cursor() - if cursor == nil { - t.Fatalf("unexpected: cursor was nil") - } - floatArrayCursor := cursor.(cursors.FloatArrayCursor) - floatArray := floatArrayCursor.Next() - - if !reflect.DeepEqual(floatArray.Timestamps, []int64{1000000010, 1000000020, 1000000030, 2678400000000010, 5000000000000010, 5097600000000010}) { - t.Log(time.Unix(0, floatArray.Timestamps[0])) - t.Errorf("unexpected mean timestamps: %v", floatArray.Timestamps) - } - if !reflect.DeepEqual(floatArray.Values, []float64{77.5, 508.2, 4, 67, 49929, 51000}) { - t.Errorf("unexpected mean values: %v", floatArray.Values) - } -} - -func TestNewWindowAggregateResultSet_Months(t *testing.T) { - - newCursor := newMockReadCursor( - "clicks click=1 1", - ) - request := datatypes.ReadWindowAggregateRequest{ - Aggregate: []*datatypes.Aggregate{ - &datatypes.Aggregate{Type: datatypes.Aggregate_AggregateTypeMean}, - }, - Window: &datatypes.Window{ - Every: &datatypes.Duration{ - Nsecs: 0, - Months: 1, - Negative: false, - }, - }, - } - resultSet, err := reads.NewWindowAggregateResultSet(context.Background(), &request, &newCursor) - - if err != nil { - t.Fatalf("error creating WindowAggregateResultSet: %s", err) - } - - if !resultSet.Next() { - t.Fatalf("unexpected: resultSet could not advance") - } - cursor := resultSet.Cursor() - if cursor == nil { - t.Fatalf("unexpected: cursor was nil") - } - floatArrayCursor := cursor.(cursors.FloatArrayCursor) - floatArray := floatArrayCursor.Next() - - if !reflect.DeepEqual(floatArray.Timestamps, []int64{2678400000000000, 5097600000000000, 7776000000000000}) { - t.Log(time.Unix(0, floatArray.Timestamps[0])) - t.Errorf("unexpected month timestamps: %v", floatArray.Timestamps) - } - if !reflect.DeepEqual(floatArray.Values, []float64{337.5, 24998, 51000}) { - t.Errorf("unexpected month values: %v", floatArray.Values) - } -} - -func TestNewWindowAggregateResultSet_UnsupportedTyped(t *testing.T) { - newCursor := newMockReadCursor( - "clicks click=1 1", - ) - for i := range newCursor.rows[0].Query { - newCursor.rows[0].Query[i] = &mockCursorIterator{ - newCursorFn: func(req *cursors.CursorRequest) cursors.Cursor { - return &mockStringArrayCursor{} - }, - } - } - - request := datatypes.ReadWindowAggregateRequest{ - Aggregate: []*datatypes.Aggregate{ - {Type: datatypes.Aggregate_AggregateTypeMean}, - }, - WindowEvery: 10, - } - resultSet, err := reads.NewWindowAggregateResultSet(context.Background(), &request, &newCursor) - - if err != nil { - t.Fatalf("error creating WindowAggregateResultSet: %s", err) - } - - if resultSet.Next() { - t.Fatal("unexpected: resultSet should not have advanced") - } - err = resultSet.Err() - if err == nil { - t.Fatal("expected error") - } - if want, got := "unsupported input type for mean aggregate: string", err.Error(); want != got { - t.Fatalf("unexpected error:\n\t- %q\n\t+ %q", want, got) - } -} - -func TestNewWindowAggregateResultSet_TimeRange(t *testing.T) { - newCursor := newMockReadCursor( - "clicks click=1 1", - ) - for i := range newCursor.rows[0].Query { - newCursor.rows[0].Query[i] = &mockCursorIterator{ - newCursorFn: func(req *cursors.CursorRequest) cursors.Cursor { - if want, got := int64(0), req.StartTime; want != got { - t.Errorf("unexpected start time -want/+got:\n\t- %d\n\t+ %d", want, got) - } - if want, got := int64(29), req.EndTime; want != got { - t.Errorf("unexpected end time -want/+got:\n\t- %d\n\t+ %d", want, got) - } - return &mockIntegerArrayCursor{} - }, - } - } - - ctx := context.Background() - req := datatypes.ReadWindowAggregateRequest{ - Range: &datatypes.TimestampRange{ - Start: 0, - End: 30, - }, - Aggregate: []*datatypes.Aggregate{ - { - Type: datatypes.Aggregate_AggregateTypeCount, - }, - }, - Window: &datatypes.Window{ - Every: &datatypes.Duration{Nsecs: int64(time.Minute)}, - }, - } - - resultSet, err := reads.NewWindowAggregateResultSet(ctx, &req, &newCursor) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if !resultSet.Next() { - t.Fatal("expected result") - } -} diff --git a/storage/reads/array_cursor.gen.go b/storage/reads/array_cursor.gen.go deleted file mode 100644 index 63b45cda913..00000000000 --- a/storage/reads/array_cursor.gen.go +++ /dev/null @@ -1,3715 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: array_cursor.gen.go.tmpl - -package reads - -import ( - "errors" - "fmt" - "math" - - "github.com/influxdata/flux/interval" - "github.com/influxdata/flux/values" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -const ( - // MaxPointsPerBlock is the maximum number of points in an encoded - // block in a TSM file. It should match the value in the tsm1 - // package, but we don't want to import it. - MaxPointsPerBlock = 1000 -) - -func newLimitArrayCursor(cur cursors.Cursor) cursors.Cursor { - switch cur := cur.(type) { - - case cursors.FloatArrayCursor: - return newFloatLimitArrayCursor(cur) - - case cursors.IntegerArrayCursor: - return newIntegerLimitArrayCursor(cur) - - case cursors.UnsignedArrayCursor: - return newUnsignedLimitArrayCursor(cur) - - case cursors.StringArrayCursor: - return newStringLimitArrayCursor(cur) - - case cursors.BooleanArrayCursor: - return newBooleanLimitArrayCursor(cur) - - default: - panic(fmt.Sprintf("unreachable: %T", cur)) - } -} - -func newWindowFirstArrayCursor(cur cursors.Cursor, window interval.Window) cursors.Cursor { - if window.IsZero() { - return newLimitArrayCursor(cur) - } - switch cur := cur.(type) { - - case cursors.FloatArrayCursor: - return newFloatWindowFirstArrayCursor(cur, window) - - case cursors.IntegerArrayCursor: - return newIntegerWindowFirstArrayCursor(cur, window) - - case cursors.UnsignedArrayCursor: - return newUnsignedWindowFirstArrayCursor(cur, window) - - case cursors.StringArrayCursor: - return newStringWindowFirstArrayCursor(cur, window) - - case cursors.BooleanArrayCursor: - return newBooleanWindowFirstArrayCursor(cur, window) - - default: - panic(fmt.Sprintf("unreachable: %T", cur)) - } -} - -func newWindowLastArrayCursor(cur cursors.Cursor, window interval.Window) cursors.Cursor { - if window.IsZero() { - return newLimitArrayCursor(cur) - } - switch cur := cur.(type) { - - case cursors.FloatArrayCursor: - return newFloatWindowLastArrayCursor(cur, window) - - case cursors.IntegerArrayCursor: - return newIntegerWindowLastArrayCursor(cur, window) - - case cursors.UnsignedArrayCursor: - return newUnsignedWindowLastArrayCursor(cur, window) - - case cursors.StringArrayCursor: - return newStringWindowLastArrayCursor(cur, window) - - case cursors.BooleanArrayCursor: - return newBooleanWindowLastArrayCursor(cur, window) - - default: - panic(fmt.Sprintf("unreachable: %T", cur)) - } -} - -func newWindowCountArrayCursor(cur cursors.Cursor, window interval.Window) cursors.Cursor { - switch cur := cur.(type) { - - case cursors.FloatArrayCursor: - return newFloatWindowCountArrayCursor(cur, window) - - case cursors.IntegerArrayCursor: - return newIntegerWindowCountArrayCursor(cur, window) - - case cursors.UnsignedArrayCursor: - return newUnsignedWindowCountArrayCursor(cur, window) - - case cursors.StringArrayCursor: - return newStringWindowCountArrayCursor(cur, window) - - case cursors.BooleanArrayCursor: - return newBooleanWindowCountArrayCursor(cur, window) - - default: - panic(fmt.Sprintf("unreachable: %T", cur)) - } -} - -func newWindowSumArrayCursor(cur cursors.Cursor, window interval.Window) (cursors.Cursor, error) { - switch cur := cur.(type) { - - case cursors.FloatArrayCursor: - return newFloatWindowSumArrayCursor(cur, window), nil - - case cursors.IntegerArrayCursor: - return newIntegerWindowSumArrayCursor(cur, window), nil - - case cursors.UnsignedArrayCursor: - return newUnsignedWindowSumArrayCursor(cur, window), nil - - default: - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: fmt.Sprintf("unsupported input type for sum aggregate: %s", arrayCursorType(cur)), - } - } -} - -func newWindowMinArrayCursor(cur cursors.Cursor, window interval.Window) cursors.Cursor { - switch cur := cur.(type) { - - case cursors.FloatArrayCursor: - return newFloatWindowMinArrayCursor(cur, window) - - case cursors.IntegerArrayCursor: - return newIntegerWindowMinArrayCursor(cur, window) - - case cursors.UnsignedArrayCursor: - return newUnsignedWindowMinArrayCursor(cur, window) - - default: - panic(fmt.Sprintf("unsupported for aggregate min: %T", cur)) - } -} - -func newWindowMaxArrayCursor(cur cursors.Cursor, window interval.Window) cursors.Cursor { - switch cur := cur.(type) { - - case cursors.FloatArrayCursor: - return newFloatWindowMaxArrayCursor(cur, window) - - case cursors.IntegerArrayCursor: - return newIntegerWindowMaxArrayCursor(cur, window) - - case cursors.UnsignedArrayCursor: - return newUnsignedWindowMaxArrayCursor(cur, window) - - default: - panic(fmt.Sprintf("unsupported for aggregate max: %T", cur)) - } -} - -func newWindowMeanArrayCursor(cur cursors.Cursor, window interval.Window) (cursors.Cursor, error) { - switch cur := cur.(type) { - - case cursors.FloatArrayCursor: - return newFloatWindowMeanArrayCursor(cur, window), nil - - case cursors.IntegerArrayCursor: - return newIntegerWindowMeanArrayCursor(cur, window), nil - - case cursors.UnsignedArrayCursor: - return newUnsignedWindowMeanArrayCursor(cur, window), nil - - default: - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: fmt.Sprintf("unsupported input type for mean aggregate: %s", arrayCursorType(cur)), - } - } -} - -// ******************** -// Float Array Cursor - -type floatArrayFilterCursor struct { - cursors.FloatArrayCursor - cond expression - m *singleValue - res *cursors.FloatArray - tmp *cursors.FloatArray -} - -func newFloatFilterArrayCursor(cond expression) *floatArrayFilterCursor { - return &floatArrayFilterCursor{ - cond: cond, - m: &singleValue{}, - res: cursors.NewFloatArrayLen(MaxPointsPerBlock), - tmp: &cursors.FloatArray{}, - } -} - -func (c *floatArrayFilterCursor) reset(cur cursors.FloatArrayCursor) { - c.FloatArrayCursor = cur - c.tmp.Timestamps, c.tmp.Values = nil, nil -} - -func (c *floatArrayFilterCursor) Stats() cursors.CursorStats { return c.FloatArrayCursor.Stats() } - -func (c *floatArrayFilterCursor) Next() *cursors.FloatArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.FloatArray - - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.FloatArrayCursor.Next() - } - -LOOP: - for len(a.Timestamps) > 0 { - for i, v := range a.Values { - c.m.v = v - if c.cond.EvalBool(c.m) { - c.res.Timestamps[pos] = a.Timestamps[i] - c.res.Values[pos] = v - pos++ - if pos >= MaxPointsPerBlock { - c.tmp.Timestamps = a.Timestamps[i+1:] - c.tmp.Values = a.Values[i+1:] - break LOOP - } - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - a = c.FloatArrayCursor.Next() - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type floatMultiShardArrayCursor struct { - cursors.FloatArrayCursor - cursorContext - filter *floatArrayFilterCursor -} - -func (c *floatMultiShardArrayCursor) reset(cur cursors.FloatArrayCursor, itrs cursors.CursorIterators, cond expression) { - if cond != nil { - if c.filter == nil { - c.filter = newFloatFilterArrayCursor(cond) - } else { - c.filter.cond = cond - } - c.filter.reset(cur) - cur = c.filter - } - - c.FloatArrayCursor = cur - c.itrs = itrs - c.err = nil -} - -func (c *floatMultiShardArrayCursor) Err() error { return c.err } - -func (c *floatMultiShardArrayCursor) Stats() cursors.CursorStats { - return c.FloatArrayCursor.Stats() -} - -func (c *floatMultiShardArrayCursor) Next() *cursors.FloatArray { - for { - a := c.FloatArrayCursor.Next() - if a.Len() == 0 { - if c.nextArrayCursor() { - continue - } - } - return a - } -} - -func (c *floatMultiShardArrayCursor) nextArrayCursor() bool { - if len(c.itrs) == 0 { - return false - } - - c.FloatArrayCursor.Close() - - var itr cursors.CursorIterator - var cur cursors.Cursor - for cur == nil && len(c.itrs) > 0 { - itr, c.itrs = c.itrs[0], c.itrs[1:] - cur, _ = itr.Next(c.ctx, c.req) - } - - var ok bool - if cur != nil { - var next cursors.FloatArrayCursor - next, ok = cur.(cursors.FloatArrayCursor) - if !ok { - cur.Close() - next = FloatEmptyArrayCursor - c.err = errors.New("expected float cursor") - } else { - if c.filter != nil { - c.filter.reset(next) - next = c.filter - } - } - c.FloatArrayCursor = next - } else { - c.FloatArrayCursor = FloatEmptyArrayCursor - } - - return ok -} - -type floatLimitArrayCursor struct { - cursors.FloatArrayCursor - res *cursors.FloatArray - done bool -} - -func newFloatLimitArrayCursor(cur cursors.FloatArrayCursor) *floatLimitArrayCursor { - return &floatLimitArrayCursor{ - FloatArrayCursor: cur, - res: cursors.NewFloatArrayLen(1), - } -} - -func (c *floatLimitArrayCursor) Stats() cursors.CursorStats { return c.FloatArrayCursor.Stats() } - -func (c *floatLimitArrayCursor) Next() *cursors.FloatArray { - if c.done { - return &cursors.FloatArray{} - } - a := c.FloatArrayCursor.Next() - if len(a.Timestamps) == 0 { - return a - } - c.done = true - c.res.Timestamps[0] = a.Timestamps[0] - c.res.Values[0] = a.Values[0] - return c.res -} - -type floatWindowLastArrayCursor struct { - cursors.FloatArrayCursor - windowEnd int64 - res *cursors.FloatArray - tmp *cursors.FloatArray - window interval.Window -} - -// Window array cursors assume that every != 0 && every != MaxInt64. -// Such a cursor will panic in the first case and possibly overflow in the second. -func newFloatWindowLastArrayCursor(cur cursors.FloatArrayCursor, window interval.Window) *floatWindowLastArrayCursor { - return &floatWindowLastArrayCursor{ - FloatArrayCursor: cur, - windowEnd: math.MinInt64, - res: cursors.NewFloatArrayLen(MaxPointsPerBlock), - tmp: &cursors.FloatArray{}, - window: window, - } -} - -func (c *floatWindowLastArrayCursor) Stats() cursors.CursorStats { - return c.FloatArrayCursor.Stats() -} - -func (c *floatWindowLastArrayCursor) Next() *cursors.FloatArray { - cur := -1 - -NEXT: - var a *cursors.FloatArray - - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.FloatArrayCursor.Next() - } - - if a.Len() == 0 { - c.res.Timestamps = c.res.Timestamps[:cur+1] - c.res.Values = c.res.Values[:cur+1] - return c.res - } - - for i, t := range a.Timestamps { - if t >= c.windowEnd { - cur++ - } - - if cur == MaxPointsPerBlock { - c.tmp.Timestamps = a.Timestamps[i:] - c.tmp.Values = a.Values[i:] - return c.res - } - - c.res.Timestamps[cur] = t - c.res.Values[cur] = a.Values[i] - - c.windowEnd = int64(c.window.GetLatestBounds(values.Time(t)).Stop()) - } - - c.tmp.Timestamps = nil - c.tmp.Values = nil - - goto NEXT -} - -type floatWindowFirstArrayCursor struct { - cursors.FloatArrayCursor - windowEnd int64 - res *cursors.FloatArray - tmp *cursors.FloatArray - window interval.Window -} - -// Window array cursors assume that every != 0 && every != MaxInt64. -// Such a cursor will panic in the first case and possibly overflow in the second. -func newFloatWindowFirstArrayCursor(cur cursors.FloatArrayCursor, window interval.Window) *floatWindowFirstArrayCursor { - return &floatWindowFirstArrayCursor{ - FloatArrayCursor: cur, - windowEnd: math.MinInt64, - res: cursors.NewFloatArrayLen(MaxPointsPerBlock), - tmp: &cursors.FloatArray{}, - window: window, - } -} - -func (c *floatWindowFirstArrayCursor) Stats() cursors.CursorStats { - return c.FloatArrayCursor.Stats() -} - -func (c *floatWindowFirstArrayCursor) Next() *cursors.FloatArray { - c.res.Timestamps = c.res.Timestamps[:0] - c.res.Values = c.res.Values[:0] - -NEXT: - var a *cursors.FloatArray - - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.FloatArrayCursor.Next() - } - - if a.Len() == 0 { - return c.res - } - - for i, t := range a.Timestamps { - if t < c.windowEnd { - continue - } - - c.windowEnd = int64(c.window.GetLatestBounds(values.Time(t)).Stop()) - - c.res.Timestamps = append(c.res.Timestamps, t) - c.res.Values = append(c.res.Values, a.Values[i]) - - if c.res.Len() == MaxPointsPerBlock { - c.tmp.Timestamps = a.Timestamps[i+1:] - c.tmp.Values = a.Values[i+1:] - return c.res - } - } - - c.tmp.Timestamps = nil - c.tmp.Values = nil - - goto NEXT -} - -type floatWindowCountArrayCursor struct { - cursors.FloatArrayCursor - res *cursors.IntegerArray - tmp *cursors.FloatArray - window interval.Window -} - -func newFloatWindowCountArrayCursor(cur cursors.FloatArrayCursor, window interval.Window) *floatWindowCountArrayCursor { - resLen := MaxPointsPerBlock - if window.IsZero() { - resLen = 1 - } - return &floatWindowCountArrayCursor{ - FloatArrayCursor: cur, - res: cursors.NewIntegerArrayLen(resLen), - tmp: &cursors.FloatArray{}, - window: window, - } -} - -func (c *floatWindowCountArrayCursor) Stats() cursors.CursorStats { - return c.FloatArrayCursor.Stats() -} - -func (c *floatWindowCountArrayCursor) Next() *cursors.IntegerArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.FloatArray - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.FloatArrayCursor.Next() - } - - if a.Len() == 0 { - return &cursors.IntegerArray{} - } - - rowIdx := 0 - var acc int64 = 0 - - var windowEnd int64 - if !c.window.IsZero() { - windowEnd = int64(c.window.GetLatestBounds(values.Time(a.Timestamps[rowIdx])).Stop()) - } else { - windowEnd = math.MaxInt64 - } - windowHasPoints := false - - // enumerate windows -WINDOWS: - for { - for ; rowIdx < a.Len(); rowIdx++ { - ts := a.Timestamps[rowIdx] - if !c.window.IsZero() && ts >= windowEnd { - // new window detected, close the current window - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = acc - pos++ - if pos >= MaxPointsPerBlock { - // the output array is full, - // save the remaining points in the input array in tmp. - // they will be processed in the next call to Next() - c.tmp.Timestamps = a.Timestamps[rowIdx:] - c.tmp.Values = a.Values[rowIdx:] - break WINDOWS - } - } - - // start the new window - acc = 0 - windowEnd = int64(c.window.GetLatestBounds(values.Time(ts)).Stop()) - windowHasPoints = false - - continue WINDOWS - } else { - acc++ - windowHasPoints = true - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - // get the next chunk - a = c.FloatArrayCursor.Next() - if a.Len() == 0 { - // write the final point - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = acc - pos++ - } - break WINDOWS - } - rowIdx = 0 - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type floatWindowSumArrayCursor struct { - cursors.FloatArrayCursor - res *cursors.FloatArray - tmp *cursors.FloatArray - window interval.Window -} - -func newFloatWindowSumArrayCursor(cur cursors.FloatArrayCursor, window interval.Window) *floatWindowSumArrayCursor { - resLen := MaxPointsPerBlock - if window.IsZero() { - resLen = 1 - } - return &floatWindowSumArrayCursor{ - FloatArrayCursor: cur, - res: cursors.NewFloatArrayLen(resLen), - tmp: &cursors.FloatArray{}, - window: window, - } -} - -func (c *floatWindowSumArrayCursor) Stats() cursors.CursorStats { - return c.FloatArrayCursor.Stats() -} - -func (c *floatWindowSumArrayCursor) Next() *cursors.FloatArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.FloatArray - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.FloatArrayCursor.Next() - } - - if a.Len() == 0 { - return &cursors.FloatArray{} - } - - rowIdx := 0 - var acc float64 = 0 - - var windowEnd int64 - if !c.window.IsZero() { - windowEnd = int64(c.window.GetLatestBounds(values.Time(a.Timestamps[rowIdx])).Stop()) - } else { - windowEnd = math.MaxInt64 - } - windowHasPoints := false - - // enumerate windows -WINDOWS: - for { - for ; rowIdx < a.Len(); rowIdx++ { - ts := a.Timestamps[rowIdx] - if !c.window.IsZero() && ts >= windowEnd { - // new window detected, close the current window - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = acc - pos++ - if pos >= MaxPointsPerBlock { - // the output array is full, - // save the remaining points in the input array in tmp. - // they will be processed in the next call to Next() - c.tmp.Timestamps = a.Timestamps[rowIdx:] - c.tmp.Values = a.Values[rowIdx:] - break WINDOWS - } - } - - // start the new window - acc = 0 - windowEnd = int64(c.window.GetLatestBounds(values.Time(ts)).Stop()) - windowHasPoints = false - - continue WINDOWS - } else { - acc += a.Values[rowIdx] - windowHasPoints = true - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - // get the next chunk - a = c.FloatArrayCursor.Next() - if a.Len() == 0 { - // write the final point - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = acc - pos++ - } - break WINDOWS - } - rowIdx = 0 - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type floatWindowMinArrayCursor struct { - cursors.FloatArrayCursor - res *cursors.FloatArray - tmp *cursors.FloatArray - window interval.Window -} - -func newFloatWindowMinArrayCursor(cur cursors.FloatArrayCursor, window interval.Window) *floatWindowMinArrayCursor { - resLen := MaxPointsPerBlock - if window.IsZero() { - resLen = 1 - } - return &floatWindowMinArrayCursor{ - FloatArrayCursor: cur, - res: cursors.NewFloatArrayLen(resLen), - tmp: &cursors.FloatArray{}, - window: window, - } -} - -func (c *floatWindowMinArrayCursor) Stats() cursors.CursorStats { - return c.FloatArrayCursor.Stats() -} - -func (c *floatWindowMinArrayCursor) Next() *cursors.FloatArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.FloatArray - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.FloatArrayCursor.Next() - } - - if a.Len() == 0 { - return &cursors.FloatArray{} - } - - rowIdx := 0 - var acc float64 = math.MaxFloat64 - var tsAcc int64 - - var windowEnd int64 - if !c.window.IsZero() { - windowEnd = int64(c.window.GetLatestBounds(values.Time(a.Timestamps[rowIdx])).Stop()) - } else { - windowEnd = math.MaxInt64 - } - windowHasPoints := false - - // enumerate windows -WINDOWS: - for { - for ; rowIdx < a.Len(); rowIdx++ { - ts := a.Timestamps[rowIdx] - if !c.window.IsZero() && ts >= windowEnd { - // new window detected, close the current window - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = tsAcc - c.res.Values[pos] = acc - pos++ - if pos >= MaxPointsPerBlock { - // the output array is full, - // save the remaining points in the input array in tmp. - // they will be processed in the next call to Next() - c.tmp.Timestamps = a.Timestamps[rowIdx:] - c.tmp.Values = a.Values[rowIdx:] - break WINDOWS - } - } - - // start the new window - acc = math.MaxFloat64 - windowEnd = int64(c.window.GetLatestBounds(values.Time(ts)).Stop()) - windowHasPoints = false - - continue WINDOWS - } else { - if !windowHasPoints || a.Values[rowIdx] < acc { - acc = a.Values[rowIdx] - tsAcc = a.Timestamps[rowIdx] - } - windowHasPoints = true - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - // get the next chunk - a = c.FloatArrayCursor.Next() - if a.Len() == 0 { - // write the final point - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = tsAcc - c.res.Values[pos] = acc - pos++ - } - break WINDOWS - } - rowIdx = 0 - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type floatWindowMaxArrayCursor struct { - cursors.FloatArrayCursor - res *cursors.FloatArray - tmp *cursors.FloatArray - window interval.Window -} - -func newFloatWindowMaxArrayCursor(cur cursors.FloatArrayCursor, window interval.Window) *floatWindowMaxArrayCursor { - resLen := MaxPointsPerBlock - if window.IsZero() { - resLen = 1 - } - return &floatWindowMaxArrayCursor{ - FloatArrayCursor: cur, - res: cursors.NewFloatArrayLen(resLen), - tmp: &cursors.FloatArray{}, - window: window, - } -} - -func (c *floatWindowMaxArrayCursor) Stats() cursors.CursorStats { - return c.FloatArrayCursor.Stats() -} - -func (c *floatWindowMaxArrayCursor) Next() *cursors.FloatArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.FloatArray - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.FloatArrayCursor.Next() - } - - if a.Len() == 0 { - return &cursors.FloatArray{} - } - - rowIdx := 0 - var acc float64 = -math.MaxFloat64 - var tsAcc int64 - - var windowEnd int64 - if !c.window.IsZero() { - windowEnd = int64(c.window.GetLatestBounds(values.Time(a.Timestamps[rowIdx])).Stop()) - } else { - windowEnd = math.MaxInt64 - } - windowHasPoints := false - - // enumerate windows -WINDOWS: - for { - for ; rowIdx < a.Len(); rowIdx++ { - ts := a.Timestamps[rowIdx] - if !c.window.IsZero() && ts >= windowEnd { - // new window detected, close the current window - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = tsAcc - c.res.Values[pos] = acc - pos++ - if pos >= MaxPointsPerBlock { - // the output array is full, - // save the remaining points in the input array in tmp. - // they will be processed in the next call to Next() - c.tmp.Timestamps = a.Timestamps[rowIdx:] - c.tmp.Values = a.Values[rowIdx:] - break WINDOWS - } - } - - // start the new window - acc = -math.MaxFloat64 - windowEnd = int64(c.window.GetLatestBounds(values.Time(ts)).Stop()) - windowHasPoints = false - - continue WINDOWS - } else { - if !windowHasPoints || a.Values[rowIdx] > acc { - acc = a.Values[rowIdx] - tsAcc = a.Timestamps[rowIdx] - } - windowHasPoints = true - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - // get the next chunk - a = c.FloatArrayCursor.Next() - if a.Len() == 0 { - // write the final point - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = tsAcc - c.res.Values[pos] = acc - pos++ - } - break WINDOWS - } - rowIdx = 0 - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type floatWindowMeanArrayCursor struct { - cursors.FloatArrayCursor - res *cursors.FloatArray - tmp *cursors.FloatArray - window interval.Window -} - -func newFloatWindowMeanArrayCursor(cur cursors.FloatArrayCursor, window interval.Window) *floatWindowMeanArrayCursor { - resLen := MaxPointsPerBlock - if window.IsZero() { - resLen = 1 - } - return &floatWindowMeanArrayCursor{ - FloatArrayCursor: cur, - res: cursors.NewFloatArrayLen(resLen), - tmp: &cursors.FloatArray{}, - window: window, - } -} - -func (c *floatWindowMeanArrayCursor) Stats() cursors.CursorStats { - return c.FloatArrayCursor.Stats() -} - -func (c *floatWindowMeanArrayCursor) Next() *cursors.FloatArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.FloatArray - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.FloatArrayCursor.Next() - } - - if a.Len() == 0 { - return &cursors.FloatArray{} - } - - rowIdx := 0 - var sum float64 - var count int64 - - var windowEnd int64 - if !c.window.IsZero() { - windowEnd = int64(c.window.GetLatestBounds(values.Time(a.Timestamps[rowIdx])).Stop()) - } else { - windowEnd = math.MaxInt64 - } - windowHasPoints := false - - // enumerate windows -WINDOWS: - for { - for ; rowIdx < a.Len(); rowIdx++ { - ts := a.Timestamps[rowIdx] - if !c.window.IsZero() && ts >= windowEnd { - // new window detected, close the current window - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = sum / float64(count) - pos++ - if pos >= MaxPointsPerBlock { - // the output array is full, - // save the remaining points in the input array in tmp. - // they will be processed in the next call to Next() - c.tmp.Timestamps = a.Timestamps[rowIdx:] - c.tmp.Values = a.Values[rowIdx:] - break WINDOWS - } - } - - // start the new window - sum = 0 - count = 0 - windowEnd = int64(c.window.GetLatestBounds(values.Time(ts)).Stop()) - windowHasPoints = false - - continue WINDOWS - } else { - sum += a.Values[rowIdx] - count++ - windowHasPoints = true - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - // get the next chunk - a = c.FloatArrayCursor.Next() - if a.Len() == 0 { - // write the final point - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = sum / float64(count) - pos++ - } - break WINDOWS - } - rowIdx = 0 - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type floatEmptyArrayCursor struct { - res cursors.FloatArray -} - -var FloatEmptyArrayCursor cursors.FloatArrayCursor = &floatEmptyArrayCursor{} - -func (c *floatEmptyArrayCursor) Err() error { return nil } -func (c *floatEmptyArrayCursor) Close() {} -func (c *floatEmptyArrayCursor) Stats() cursors.CursorStats { return cursors.CursorStats{} } -func (c *floatEmptyArrayCursor) Next() *cursors.FloatArray { return &c.res } - -// ******************** -// Integer Array Cursor - -type integerArrayFilterCursor struct { - cursors.IntegerArrayCursor - cond expression - m *singleValue - res *cursors.IntegerArray - tmp *cursors.IntegerArray -} - -func newIntegerFilterArrayCursor(cond expression) *integerArrayFilterCursor { - return &integerArrayFilterCursor{ - cond: cond, - m: &singleValue{}, - res: cursors.NewIntegerArrayLen(MaxPointsPerBlock), - tmp: &cursors.IntegerArray{}, - } -} - -func (c *integerArrayFilterCursor) reset(cur cursors.IntegerArrayCursor) { - c.IntegerArrayCursor = cur - c.tmp.Timestamps, c.tmp.Values = nil, nil -} - -func (c *integerArrayFilterCursor) Stats() cursors.CursorStats { return c.IntegerArrayCursor.Stats() } - -func (c *integerArrayFilterCursor) Next() *cursors.IntegerArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.IntegerArray - - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.IntegerArrayCursor.Next() - } - -LOOP: - for len(a.Timestamps) > 0 { - for i, v := range a.Values { - c.m.v = v - if c.cond.EvalBool(c.m) { - c.res.Timestamps[pos] = a.Timestamps[i] - c.res.Values[pos] = v - pos++ - if pos >= MaxPointsPerBlock { - c.tmp.Timestamps = a.Timestamps[i+1:] - c.tmp.Values = a.Values[i+1:] - break LOOP - } - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - a = c.IntegerArrayCursor.Next() - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type integerMultiShardArrayCursor struct { - cursors.IntegerArrayCursor - cursorContext - filter *integerArrayFilterCursor -} - -func (c *integerMultiShardArrayCursor) reset(cur cursors.IntegerArrayCursor, itrs cursors.CursorIterators, cond expression) { - if cond != nil { - if c.filter == nil { - c.filter = newIntegerFilterArrayCursor(cond) - } else { - c.filter.cond = cond - } - c.filter.reset(cur) - cur = c.filter - } - - c.IntegerArrayCursor = cur - c.itrs = itrs - c.err = nil -} - -func (c *integerMultiShardArrayCursor) Err() error { return c.err } - -func (c *integerMultiShardArrayCursor) Stats() cursors.CursorStats { - return c.IntegerArrayCursor.Stats() -} - -func (c *integerMultiShardArrayCursor) Next() *cursors.IntegerArray { - for { - a := c.IntegerArrayCursor.Next() - if a.Len() == 0 { - if c.nextArrayCursor() { - continue - } - } - return a - } -} - -func (c *integerMultiShardArrayCursor) nextArrayCursor() bool { - if len(c.itrs) == 0 { - return false - } - - c.IntegerArrayCursor.Close() - - var itr cursors.CursorIterator - var cur cursors.Cursor - for cur == nil && len(c.itrs) > 0 { - itr, c.itrs = c.itrs[0], c.itrs[1:] - cur, _ = itr.Next(c.ctx, c.req) - } - - var ok bool - if cur != nil { - var next cursors.IntegerArrayCursor - next, ok = cur.(cursors.IntegerArrayCursor) - if !ok { - cur.Close() - next = IntegerEmptyArrayCursor - c.err = errors.New("expected integer cursor") - } else { - if c.filter != nil { - c.filter.reset(next) - next = c.filter - } - } - c.IntegerArrayCursor = next - } else { - c.IntegerArrayCursor = IntegerEmptyArrayCursor - } - - return ok -} - -type integerLimitArrayCursor struct { - cursors.IntegerArrayCursor - res *cursors.IntegerArray - done bool -} - -func newIntegerLimitArrayCursor(cur cursors.IntegerArrayCursor) *integerLimitArrayCursor { - return &integerLimitArrayCursor{ - IntegerArrayCursor: cur, - res: cursors.NewIntegerArrayLen(1), - } -} - -func (c *integerLimitArrayCursor) Stats() cursors.CursorStats { return c.IntegerArrayCursor.Stats() } - -func (c *integerLimitArrayCursor) Next() *cursors.IntegerArray { - if c.done { - return &cursors.IntegerArray{} - } - a := c.IntegerArrayCursor.Next() - if len(a.Timestamps) == 0 { - return a - } - c.done = true - c.res.Timestamps[0] = a.Timestamps[0] - c.res.Values[0] = a.Values[0] - return c.res -} - -type integerWindowLastArrayCursor struct { - cursors.IntegerArrayCursor - windowEnd int64 - res *cursors.IntegerArray - tmp *cursors.IntegerArray - window interval.Window -} - -// Window array cursors assume that every != 0 && every != MaxInt64. -// Such a cursor will panic in the first case and possibly overflow in the second. -func newIntegerWindowLastArrayCursor(cur cursors.IntegerArrayCursor, window interval.Window) *integerWindowLastArrayCursor { - return &integerWindowLastArrayCursor{ - IntegerArrayCursor: cur, - windowEnd: math.MinInt64, - res: cursors.NewIntegerArrayLen(MaxPointsPerBlock), - tmp: &cursors.IntegerArray{}, - window: window, - } -} - -func (c *integerWindowLastArrayCursor) Stats() cursors.CursorStats { - return c.IntegerArrayCursor.Stats() -} - -func (c *integerWindowLastArrayCursor) Next() *cursors.IntegerArray { - cur := -1 - -NEXT: - var a *cursors.IntegerArray - - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.IntegerArrayCursor.Next() - } - - if a.Len() == 0 { - c.res.Timestamps = c.res.Timestamps[:cur+1] - c.res.Values = c.res.Values[:cur+1] - return c.res - } - - for i, t := range a.Timestamps { - if t >= c.windowEnd { - cur++ - } - - if cur == MaxPointsPerBlock { - c.tmp.Timestamps = a.Timestamps[i:] - c.tmp.Values = a.Values[i:] - return c.res - } - - c.res.Timestamps[cur] = t - c.res.Values[cur] = a.Values[i] - - c.windowEnd = int64(c.window.GetLatestBounds(values.Time(t)).Stop()) - } - - c.tmp.Timestamps = nil - c.tmp.Values = nil - - goto NEXT -} - -type integerWindowFirstArrayCursor struct { - cursors.IntegerArrayCursor - windowEnd int64 - res *cursors.IntegerArray - tmp *cursors.IntegerArray - window interval.Window -} - -// Window array cursors assume that every != 0 && every != MaxInt64. -// Such a cursor will panic in the first case and possibly overflow in the second. -func newIntegerWindowFirstArrayCursor(cur cursors.IntegerArrayCursor, window interval.Window) *integerWindowFirstArrayCursor { - return &integerWindowFirstArrayCursor{ - IntegerArrayCursor: cur, - windowEnd: math.MinInt64, - res: cursors.NewIntegerArrayLen(MaxPointsPerBlock), - tmp: &cursors.IntegerArray{}, - window: window, - } -} - -func (c *integerWindowFirstArrayCursor) Stats() cursors.CursorStats { - return c.IntegerArrayCursor.Stats() -} - -func (c *integerWindowFirstArrayCursor) Next() *cursors.IntegerArray { - c.res.Timestamps = c.res.Timestamps[:0] - c.res.Values = c.res.Values[:0] - -NEXT: - var a *cursors.IntegerArray - - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.IntegerArrayCursor.Next() - } - - if a.Len() == 0 { - return c.res - } - - for i, t := range a.Timestamps { - if t < c.windowEnd { - continue - } - - c.windowEnd = int64(c.window.GetLatestBounds(values.Time(t)).Stop()) - - c.res.Timestamps = append(c.res.Timestamps, t) - c.res.Values = append(c.res.Values, a.Values[i]) - - if c.res.Len() == MaxPointsPerBlock { - c.tmp.Timestamps = a.Timestamps[i+1:] - c.tmp.Values = a.Values[i+1:] - return c.res - } - } - - c.tmp.Timestamps = nil - c.tmp.Values = nil - - goto NEXT -} - -type integerWindowCountArrayCursor struct { - cursors.IntegerArrayCursor - res *cursors.IntegerArray - tmp *cursors.IntegerArray - window interval.Window -} - -func newIntegerWindowCountArrayCursor(cur cursors.IntegerArrayCursor, window interval.Window) *integerWindowCountArrayCursor { - resLen := MaxPointsPerBlock - if window.IsZero() { - resLen = 1 - } - return &integerWindowCountArrayCursor{ - IntegerArrayCursor: cur, - res: cursors.NewIntegerArrayLen(resLen), - tmp: &cursors.IntegerArray{}, - window: window, - } -} - -func (c *integerWindowCountArrayCursor) Stats() cursors.CursorStats { - return c.IntegerArrayCursor.Stats() -} - -func (c *integerWindowCountArrayCursor) Next() *cursors.IntegerArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.IntegerArray - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.IntegerArrayCursor.Next() - } - - if a.Len() == 0 { - return &cursors.IntegerArray{} - } - - rowIdx := 0 - var acc int64 = 0 - - var windowEnd int64 - if !c.window.IsZero() { - windowEnd = int64(c.window.GetLatestBounds(values.Time(a.Timestamps[rowIdx])).Stop()) - } else { - windowEnd = math.MaxInt64 - } - windowHasPoints := false - - // enumerate windows -WINDOWS: - for { - for ; rowIdx < a.Len(); rowIdx++ { - ts := a.Timestamps[rowIdx] - if !c.window.IsZero() && ts >= windowEnd { - // new window detected, close the current window - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = acc - pos++ - if pos >= MaxPointsPerBlock { - // the output array is full, - // save the remaining points in the input array in tmp. - // they will be processed in the next call to Next() - c.tmp.Timestamps = a.Timestamps[rowIdx:] - c.tmp.Values = a.Values[rowIdx:] - break WINDOWS - } - } - - // start the new window - acc = 0 - windowEnd = int64(c.window.GetLatestBounds(values.Time(ts)).Stop()) - windowHasPoints = false - - continue WINDOWS - } else { - acc++ - windowHasPoints = true - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - // get the next chunk - a = c.IntegerArrayCursor.Next() - if a.Len() == 0 { - // write the final point - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = acc - pos++ - } - break WINDOWS - } - rowIdx = 0 - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type integerWindowSumArrayCursor struct { - cursors.IntegerArrayCursor - res *cursors.IntegerArray - tmp *cursors.IntegerArray - window interval.Window -} - -func newIntegerWindowSumArrayCursor(cur cursors.IntegerArrayCursor, window interval.Window) *integerWindowSumArrayCursor { - resLen := MaxPointsPerBlock - if window.IsZero() { - resLen = 1 - } - return &integerWindowSumArrayCursor{ - IntegerArrayCursor: cur, - res: cursors.NewIntegerArrayLen(resLen), - tmp: &cursors.IntegerArray{}, - window: window, - } -} - -func (c *integerWindowSumArrayCursor) Stats() cursors.CursorStats { - return c.IntegerArrayCursor.Stats() -} - -func (c *integerWindowSumArrayCursor) Next() *cursors.IntegerArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.IntegerArray - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.IntegerArrayCursor.Next() - } - - if a.Len() == 0 { - return &cursors.IntegerArray{} - } - - rowIdx := 0 - var acc int64 = 0 - - var windowEnd int64 - if !c.window.IsZero() { - windowEnd = int64(c.window.GetLatestBounds(values.Time(a.Timestamps[rowIdx])).Stop()) - } else { - windowEnd = math.MaxInt64 - } - windowHasPoints := false - - // enumerate windows -WINDOWS: - for { - for ; rowIdx < a.Len(); rowIdx++ { - ts := a.Timestamps[rowIdx] - if !c.window.IsZero() && ts >= windowEnd { - // new window detected, close the current window - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = acc - pos++ - if pos >= MaxPointsPerBlock { - // the output array is full, - // save the remaining points in the input array in tmp. - // they will be processed in the next call to Next() - c.tmp.Timestamps = a.Timestamps[rowIdx:] - c.tmp.Values = a.Values[rowIdx:] - break WINDOWS - } - } - - // start the new window - acc = 0 - windowEnd = int64(c.window.GetLatestBounds(values.Time(ts)).Stop()) - windowHasPoints = false - - continue WINDOWS - } else { - acc += a.Values[rowIdx] - windowHasPoints = true - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - // get the next chunk - a = c.IntegerArrayCursor.Next() - if a.Len() == 0 { - // write the final point - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = acc - pos++ - } - break WINDOWS - } - rowIdx = 0 - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type integerWindowMinArrayCursor struct { - cursors.IntegerArrayCursor - res *cursors.IntegerArray - tmp *cursors.IntegerArray - window interval.Window -} - -func newIntegerWindowMinArrayCursor(cur cursors.IntegerArrayCursor, window interval.Window) *integerWindowMinArrayCursor { - resLen := MaxPointsPerBlock - if window.IsZero() { - resLen = 1 - } - return &integerWindowMinArrayCursor{ - IntegerArrayCursor: cur, - res: cursors.NewIntegerArrayLen(resLen), - tmp: &cursors.IntegerArray{}, - window: window, - } -} - -func (c *integerWindowMinArrayCursor) Stats() cursors.CursorStats { - return c.IntegerArrayCursor.Stats() -} - -func (c *integerWindowMinArrayCursor) Next() *cursors.IntegerArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.IntegerArray - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.IntegerArrayCursor.Next() - } - - if a.Len() == 0 { - return &cursors.IntegerArray{} - } - - rowIdx := 0 - var acc int64 = math.MaxInt64 - var tsAcc int64 - - var windowEnd int64 - if !c.window.IsZero() { - windowEnd = int64(c.window.GetLatestBounds(values.Time(a.Timestamps[rowIdx])).Stop()) - } else { - windowEnd = math.MaxInt64 - } - windowHasPoints := false - - // enumerate windows -WINDOWS: - for { - for ; rowIdx < a.Len(); rowIdx++ { - ts := a.Timestamps[rowIdx] - if !c.window.IsZero() && ts >= windowEnd { - // new window detected, close the current window - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = tsAcc - c.res.Values[pos] = acc - pos++ - if pos >= MaxPointsPerBlock { - // the output array is full, - // save the remaining points in the input array in tmp. - // they will be processed in the next call to Next() - c.tmp.Timestamps = a.Timestamps[rowIdx:] - c.tmp.Values = a.Values[rowIdx:] - break WINDOWS - } - } - - // start the new window - acc = math.MaxInt64 - windowEnd = int64(c.window.GetLatestBounds(values.Time(ts)).Stop()) - windowHasPoints = false - - continue WINDOWS - } else { - if !windowHasPoints || a.Values[rowIdx] < acc { - acc = a.Values[rowIdx] - tsAcc = a.Timestamps[rowIdx] - } - windowHasPoints = true - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - // get the next chunk - a = c.IntegerArrayCursor.Next() - if a.Len() == 0 { - // write the final point - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = tsAcc - c.res.Values[pos] = acc - pos++ - } - break WINDOWS - } - rowIdx = 0 - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type integerWindowMaxArrayCursor struct { - cursors.IntegerArrayCursor - res *cursors.IntegerArray - tmp *cursors.IntegerArray - window interval.Window -} - -func newIntegerWindowMaxArrayCursor(cur cursors.IntegerArrayCursor, window interval.Window) *integerWindowMaxArrayCursor { - resLen := MaxPointsPerBlock - if window.IsZero() { - resLen = 1 - } - return &integerWindowMaxArrayCursor{ - IntegerArrayCursor: cur, - res: cursors.NewIntegerArrayLen(resLen), - tmp: &cursors.IntegerArray{}, - window: window, - } -} - -func (c *integerWindowMaxArrayCursor) Stats() cursors.CursorStats { - return c.IntegerArrayCursor.Stats() -} - -func (c *integerWindowMaxArrayCursor) Next() *cursors.IntegerArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.IntegerArray - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.IntegerArrayCursor.Next() - } - - if a.Len() == 0 { - return &cursors.IntegerArray{} - } - - rowIdx := 0 - var acc int64 = math.MinInt64 - var tsAcc int64 - - var windowEnd int64 - if !c.window.IsZero() { - windowEnd = int64(c.window.GetLatestBounds(values.Time(a.Timestamps[rowIdx])).Stop()) - } else { - windowEnd = math.MaxInt64 - } - windowHasPoints := false - - // enumerate windows -WINDOWS: - for { - for ; rowIdx < a.Len(); rowIdx++ { - ts := a.Timestamps[rowIdx] - if !c.window.IsZero() && ts >= windowEnd { - // new window detected, close the current window - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = tsAcc - c.res.Values[pos] = acc - pos++ - if pos >= MaxPointsPerBlock { - // the output array is full, - // save the remaining points in the input array in tmp. - // they will be processed in the next call to Next() - c.tmp.Timestamps = a.Timestamps[rowIdx:] - c.tmp.Values = a.Values[rowIdx:] - break WINDOWS - } - } - - // start the new window - acc = math.MinInt64 - windowEnd = int64(c.window.GetLatestBounds(values.Time(ts)).Stop()) - windowHasPoints = false - - continue WINDOWS - } else { - if !windowHasPoints || a.Values[rowIdx] > acc { - acc = a.Values[rowIdx] - tsAcc = a.Timestamps[rowIdx] - } - windowHasPoints = true - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - // get the next chunk - a = c.IntegerArrayCursor.Next() - if a.Len() == 0 { - // write the final point - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = tsAcc - c.res.Values[pos] = acc - pos++ - } - break WINDOWS - } - rowIdx = 0 - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type integerWindowMeanArrayCursor struct { - cursors.IntegerArrayCursor - res *cursors.FloatArray - tmp *cursors.IntegerArray - window interval.Window -} - -func newIntegerWindowMeanArrayCursor(cur cursors.IntegerArrayCursor, window interval.Window) *integerWindowMeanArrayCursor { - resLen := MaxPointsPerBlock - if window.IsZero() { - resLen = 1 - } - return &integerWindowMeanArrayCursor{ - IntegerArrayCursor: cur, - res: cursors.NewFloatArrayLen(resLen), - tmp: &cursors.IntegerArray{}, - window: window, - } -} - -func (c *integerWindowMeanArrayCursor) Stats() cursors.CursorStats { - return c.IntegerArrayCursor.Stats() -} - -func (c *integerWindowMeanArrayCursor) Next() *cursors.FloatArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.IntegerArray - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.IntegerArrayCursor.Next() - } - - if a.Len() == 0 { - return &cursors.FloatArray{} - } - - rowIdx := 0 - var sum int64 - var count int64 - - var windowEnd int64 - if !c.window.IsZero() { - windowEnd = int64(c.window.GetLatestBounds(values.Time(a.Timestamps[rowIdx])).Stop()) - } else { - windowEnd = math.MaxInt64 - } - windowHasPoints := false - - // enumerate windows -WINDOWS: - for { - for ; rowIdx < a.Len(); rowIdx++ { - ts := a.Timestamps[rowIdx] - if !c.window.IsZero() && ts >= windowEnd { - // new window detected, close the current window - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = float64(sum) / float64(count) - pos++ - if pos >= MaxPointsPerBlock { - // the output array is full, - // save the remaining points in the input array in tmp. - // they will be processed in the next call to Next() - c.tmp.Timestamps = a.Timestamps[rowIdx:] - c.tmp.Values = a.Values[rowIdx:] - break WINDOWS - } - } - - // start the new window - sum = 0 - count = 0 - windowEnd = int64(c.window.GetLatestBounds(values.Time(ts)).Stop()) - windowHasPoints = false - - continue WINDOWS - } else { - sum += a.Values[rowIdx] - count++ - windowHasPoints = true - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - // get the next chunk - a = c.IntegerArrayCursor.Next() - if a.Len() == 0 { - // write the final point - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = float64(sum) / float64(count) - pos++ - } - break WINDOWS - } - rowIdx = 0 - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type integerEmptyArrayCursor struct { - res cursors.IntegerArray -} - -var IntegerEmptyArrayCursor cursors.IntegerArrayCursor = &integerEmptyArrayCursor{} - -func (c *integerEmptyArrayCursor) Err() error { return nil } -func (c *integerEmptyArrayCursor) Close() {} -func (c *integerEmptyArrayCursor) Stats() cursors.CursorStats { return cursors.CursorStats{} } -func (c *integerEmptyArrayCursor) Next() *cursors.IntegerArray { return &c.res } - -// ******************** -// Unsigned Array Cursor - -type unsignedArrayFilterCursor struct { - cursors.UnsignedArrayCursor - cond expression - m *singleValue - res *cursors.UnsignedArray - tmp *cursors.UnsignedArray -} - -func newUnsignedFilterArrayCursor(cond expression) *unsignedArrayFilterCursor { - return &unsignedArrayFilterCursor{ - cond: cond, - m: &singleValue{}, - res: cursors.NewUnsignedArrayLen(MaxPointsPerBlock), - tmp: &cursors.UnsignedArray{}, - } -} - -func (c *unsignedArrayFilterCursor) reset(cur cursors.UnsignedArrayCursor) { - c.UnsignedArrayCursor = cur - c.tmp.Timestamps, c.tmp.Values = nil, nil -} - -func (c *unsignedArrayFilterCursor) Stats() cursors.CursorStats { return c.UnsignedArrayCursor.Stats() } - -func (c *unsignedArrayFilterCursor) Next() *cursors.UnsignedArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.UnsignedArray - - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.UnsignedArrayCursor.Next() - } - -LOOP: - for len(a.Timestamps) > 0 { - for i, v := range a.Values { - c.m.v = v - if c.cond.EvalBool(c.m) { - c.res.Timestamps[pos] = a.Timestamps[i] - c.res.Values[pos] = v - pos++ - if pos >= MaxPointsPerBlock { - c.tmp.Timestamps = a.Timestamps[i+1:] - c.tmp.Values = a.Values[i+1:] - break LOOP - } - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - a = c.UnsignedArrayCursor.Next() - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type unsignedMultiShardArrayCursor struct { - cursors.UnsignedArrayCursor - cursorContext - filter *unsignedArrayFilterCursor -} - -func (c *unsignedMultiShardArrayCursor) reset(cur cursors.UnsignedArrayCursor, itrs cursors.CursorIterators, cond expression) { - if cond != nil { - if c.filter == nil { - c.filter = newUnsignedFilterArrayCursor(cond) - } else { - c.filter.cond = cond - } - c.filter.reset(cur) - cur = c.filter - } - - c.UnsignedArrayCursor = cur - c.itrs = itrs - c.err = nil -} - -func (c *unsignedMultiShardArrayCursor) Err() error { return c.err } - -func (c *unsignedMultiShardArrayCursor) Stats() cursors.CursorStats { - return c.UnsignedArrayCursor.Stats() -} - -func (c *unsignedMultiShardArrayCursor) Next() *cursors.UnsignedArray { - for { - a := c.UnsignedArrayCursor.Next() - if a.Len() == 0 { - if c.nextArrayCursor() { - continue - } - } - return a - } -} - -func (c *unsignedMultiShardArrayCursor) nextArrayCursor() bool { - if len(c.itrs) == 0 { - return false - } - - c.UnsignedArrayCursor.Close() - - var itr cursors.CursorIterator - var cur cursors.Cursor - for cur == nil && len(c.itrs) > 0 { - itr, c.itrs = c.itrs[0], c.itrs[1:] - cur, _ = itr.Next(c.ctx, c.req) - } - - var ok bool - if cur != nil { - var next cursors.UnsignedArrayCursor - next, ok = cur.(cursors.UnsignedArrayCursor) - if !ok { - cur.Close() - next = UnsignedEmptyArrayCursor - c.err = errors.New("expected unsigned cursor") - } else { - if c.filter != nil { - c.filter.reset(next) - next = c.filter - } - } - c.UnsignedArrayCursor = next - } else { - c.UnsignedArrayCursor = UnsignedEmptyArrayCursor - } - - return ok -} - -type unsignedLimitArrayCursor struct { - cursors.UnsignedArrayCursor - res *cursors.UnsignedArray - done bool -} - -func newUnsignedLimitArrayCursor(cur cursors.UnsignedArrayCursor) *unsignedLimitArrayCursor { - return &unsignedLimitArrayCursor{ - UnsignedArrayCursor: cur, - res: cursors.NewUnsignedArrayLen(1), - } -} - -func (c *unsignedLimitArrayCursor) Stats() cursors.CursorStats { return c.UnsignedArrayCursor.Stats() } - -func (c *unsignedLimitArrayCursor) Next() *cursors.UnsignedArray { - if c.done { - return &cursors.UnsignedArray{} - } - a := c.UnsignedArrayCursor.Next() - if len(a.Timestamps) == 0 { - return a - } - c.done = true - c.res.Timestamps[0] = a.Timestamps[0] - c.res.Values[0] = a.Values[0] - return c.res -} - -type unsignedWindowLastArrayCursor struct { - cursors.UnsignedArrayCursor - windowEnd int64 - res *cursors.UnsignedArray - tmp *cursors.UnsignedArray - window interval.Window -} - -// Window array cursors assume that every != 0 && every != MaxInt64. -// Such a cursor will panic in the first case and possibly overflow in the second. -func newUnsignedWindowLastArrayCursor(cur cursors.UnsignedArrayCursor, window interval.Window) *unsignedWindowLastArrayCursor { - return &unsignedWindowLastArrayCursor{ - UnsignedArrayCursor: cur, - windowEnd: math.MinInt64, - res: cursors.NewUnsignedArrayLen(MaxPointsPerBlock), - tmp: &cursors.UnsignedArray{}, - window: window, - } -} - -func (c *unsignedWindowLastArrayCursor) Stats() cursors.CursorStats { - return c.UnsignedArrayCursor.Stats() -} - -func (c *unsignedWindowLastArrayCursor) Next() *cursors.UnsignedArray { - cur := -1 - -NEXT: - var a *cursors.UnsignedArray - - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.UnsignedArrayCursor.Next() - } - - if a.Len() == 0 { - c.res.Timestamps = c.res.Timestamps[:cur+1] - c.res.Values = c.res.Values[:cur+1] - return c.res - } - - for i, t := range a.Timestamps { - if t >= c.windowEnd { - cur++ - } - - if cur == MaxPointsPerBlock { - c.tmp.Timestamps = a.Timestamps[i:] - c.tmp.Values = a.Values[i:] - return c.res - } - - c.res.Timestamps[cur] = t - c.res.Values[cur] = a.Values[i] - - c.windowEnd = int64(c.window.GetLatestBounds(values.Time(t)).Stop()) - } - - c.tmp.Timestamps = nil - c.tmp.Values = nil - - goto NEXT -} - -type unsignedWindowFirstArrayCursor struct { - cursors.UnsignedArrayCursor - windowEnd int64 - res *cursors.UnsignedArray - tmp *cursors.UnsignedArray - window interval.Window -} - -// Window array cursors assume that every != 0 && every != MaxInt64. -// Such a cursor will panic in the first case and possibly overflow in the second. -func newUnsignedWindowFirstArrayCursor(cur cursors.UnsignedArrayCursor, window interval.Window) *unsignedWindowFirstArrayCursor { - return &unsignedWindowFirstArrayCursor{ - UnsignedArrayCursor: cur, - windowEnd: math.MinInt64, - res: cursors.NewUnsignedArrayLen(MaxPointsPerBlock), - tmp: &cursors.UnsignedArray{}, - window: window, - } -} - -func (c *unsignedWindowFirstArrayCursor) Stats() cursors.CursorStats { - return c.UnsignedArrayCursor.Stats() -} - -func (c *unsignedWindowFirstArrayCursor) Next() *cursors.UnsignedArray { - c.res.Timestamps = c.res.Timestamps[:0] - c.res.Values = c.res.Values[:0] - -NEXT: - var a *cursors.UnsignedArray - - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.UnsignedArrayCursor.Next() - } - - if a.Len() == 0 { - return c.res - } - - for i, t := range a.Timestamps { - if t < c.windowEnd { - continue - } - - c.windowEnd = int64(c.window.GetLatestBounds(values.Time(t)).Stop()) - - c.res.Timestamps = append(c.res.Timestamps, t) - c.res.Values = append(c.res.Values, a.Values[i]) - - if c.res.Len() == MaxPointsPerBlock { - c.tmp.Timestamps = a.Timestamps[i+1:] - c.tmp.Values = a.Values[i+1:] - return c.res - } - } - - c.tmp.Timestamps = nil - c.tmp.Values = nil - - goto NEXT -} - -type unsignedWindowCountArrayCursor struct { - cursors.UnsignedArrayCursor - res *cursors.IntegerArray - tmp *cursors.UnsignedArray - window interval.Window -} - -func newUnsignedWindowCountArrayCursor(cur cursors.UnsignedArrayCursor, window interval.Window) *unsignedWindowCountArrayCursor { - resLen := MaxPointsPerBlock - if window.IsZero() { - resLen = 1 - } - return &unsignedWindowCountArrayCursor{ - UnsignedArrayCursor: cur, - res: cursors.NewIntegerArrayLen(resLen), - tmp: &cursors.UnsignedArray{}, - window: window, - } -} - -func (c *unsignedWindowCountArrayCursor) Stats() cursors.CursorStats { - return c.UnsignedArrayCursor.Stats() -} - -func (c *unsignedWindowCountArrayCursor) Next() *cursors.IntegerArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.UnsignedArray - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.UnsignedArrayCursor.Next() - } - - if a.Len() == 0 { - return &cursors.IntegerArray{} - } - - rowIdx := 0 - var acc int64 = 0 - - var windowEnd int64 - if !c.window.IsZero() { - windowEnd = int64(c.window.GetLatestBounds(values.Time(a.Timestamps[rowIdx])).Stop()) - } else { - windowEnd = math.MaxInt64 - } - windowHasPoints := false - - // enumerate windows -WINDOWS: - for { - for ; rowIdx < a.Len(); rowIdx++ { - ts := a.Timestamps[rowIdx] - if !c.window.IsZero() && ts >= windowEnd { - // new window detected, close the current window - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = acc - pos++ - if pos >= MaxPointsPerBlock { - // the output array is full, - // save the remaining points in the input array in tmp. - // they will be processed in the next call to Next() - c.tmp.Timestamps = a.Timestamps[rowIdx:] - c.tmp.Values = a.Values[rowIdx:] - break WINDOWS - } - } - - // start the new window - acc = 0 - windowEnd = int64(c.window.GetLatestBounds(values.Time(ts)).Stop()) - windowHasPoints = false - - continue WINDOWS - } else { - acc++ - windowHasPoints = true - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - // get the next chunk - a = c.UnsignedArrayCursor.Next() - if a.Len() == 0 { - // write the final point - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = acc - pos++ - } - break WINDOWS - } - rowIdx = 0 - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type unsignedWindowSumArrayCursor struct { - cursors.UnsignedArrayCursor - res *cursors.UnsignedArray - tmp *cursors.UnsignedArray - window interval.Window -} - -func newUnsignedWindowSumArrayCursor(cur cursors.UnsignedArrayCursor, window interval.Window) *unsignedWindowSumArrayCursor { - resLen := MaxPointsPerBlock - if window.IsZero() { - resLen = 1 - } - return &unsignedWindowSumArrayCursor{ - UnsignedArrayCursor: cur, - res: cursors.NewUnsignedArrayLen(resLen), - tmp: &cursors.UnsignedArray{}, - window: window, - } -} - -func (c *unsignedWindowSumArrayCursor) Stats() cursors.CursorStats { - return c.UnsignedArrayCursor.Stats() -} - -func (c *unsignedWindowSumArrayCursor) Next() *cursors.UnsignedArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.UnsignedArray - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.UnsignedArrayCursor.Next() - } - - if a.Len() == 0 { - return &cursors.UnsignedArray{} - } - - rowIdx := 0 - var acc uint64 = 0 - - var windowEnd int64 - if !c.window.IsZero() { - windowEnd = int64(c.window.GetLatestBounds(values.Time(a.Timestamps[rowIdx])).Stop()) - } else { - windowEnd = math.MaxInt64 - } - windowHasPoints := false - - // enumerate windows -WINDOWS: - for { - for ; rowIdx < a.Len(); rowIdx++ { - ts := a.Timestamps[rowIdx] - if !c.window.IsZero() && ts >= windowEnd { - // new window detected, close the current window - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = acc - pos++ - if pos >= MaxPointsPerBlock { - // the output array is full, - // save the remaining points in the input array in tmp. - // they will be processed in the next call to Next() - c.tmp.Timestamps = a.Timestamps[rowIdx:] - c.tmp.Values = a.Values[rowIdx:] - break WINDOWS - } - } - - // start the new window - acc = 0 - windowEnd = int64(c.window.GetLatestBounds(values.Time(ts)).Stop()) - windowHasPoints = false - - continue WINDOWS - } else { - acc += a.Values[rowIdx] - windowHasPoints = true - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - // get the next chunk - a = c.UnsignedArrayCursor.Next() - if a.Len() == 0 { - // write the final point - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = acc - pos++ - } - break WINDOWS - } - rowIdx = 0 - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type unsignedWindowMinArrayCursor struct { - cursors.UnsignedArrayCursor - res *cursors.UnsignedArray - tmp *cursors.UnsignedArray - window interval.Window -} - -func newUnsignedWindowMinArrayCursor(cur cursors.UnsignedArrayCursor, window interval.Window) *unsignedWindowMinArrayCursor { - resLen := MaxPointsPerBlock - if window.IsZero() { - resLen = 1 - } - return &unsignedWindowMinArrayCursor{ - UnsignedArrayCursor: cur, - res: cursors.NewUnsignedArrayLen(resLen), - tmp: &cursors.UnsignedArray{}, - window: window, - } -} - -func (c *unsignedWindowMinArrayCursor) Stats() cursors.CursorStats { - return c.UnsignedArrayCursor.Stats() -} - -func (c *unsignedWindowMinArrayCursor) Next() *cursors.UnsignedArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.UnsignedArray - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.UnsignedArrayCursor.Next() - } - - if a.Len() == 0 { - return &cursors.UnsignedArray{} - } - - rowIdx := 0 - var acc uint64 = math.MaxUint64 - var tsAcc int64 - - var windowEnd int64 - if !c.window.IsZero() { - windowEnd = int64(c.window.GetLatestBounds(values.Time(a.Timestamps[rowIdx])).Stop()) - } else { - windowEnd = math.MaxInt64 - } - windowHasPoints := false - - // enumerate windows -WINDOWS: - for { - for ; rowIdx < a.Len(); rowIdx++ { - ts := a.Timestamps[rowIdx] - if !c.window.IsZero() && ts >= windowEnd { - // new window detected, close the current window - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = tsAcc - c.res.Values[pos] = acc - pos++ - if pos >= MaxPointsPerBlock { - // the output array is full, - // save the remaining points in the input array in tmp. - // they will be processed in the next call to Next() - c.tmp.Timestamps = a.Timestamps[rowIdx:] - c.tmp.Values = a.Values[rowIdx:] - break WINDOWS - } - } - - // start the new window - acc = math.MaxUint64 - windowEnd = int64(c.window.GetLatestBounds(values.Time(ts)).Stop()) - windowHasPoints = false - - continue WINDOWS - } else { - if !windowHasPoints || a.Values[rowIdx] < acc { - acc = a.Values[rowIdx] - tsAcc = a.Timestamps[rowIdx] - } - windowHasPoints = true - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - // get the next chunk - a = c.UnsignedArrayCursor.Next() - if a.Len() == 0 { - // write the final point - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = tsAcc - c.res.Values[pos] = acc - pos++ - } - break WINDOWS - } - rowIdx = 0 - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type unsignedWindowMaxArrayCursor struct { - cursors.UnsignedArrayCursor - res *cursors.UnsignedArray - tmp *cursors.UnsignedArray - window interval.Window -} - -func newUnsignedWindowMaxArrayCursor(cur cursors.UnsignedArrayCursor, window interval.Window) *unsignedWindowMaxArrayCursor { - resLen := MaxPointsPerBlock - if window.IsZero() { - resLen = 1 - } - return &unsignedWindowMaxArrayCursor{ - UnsignedArrayCursor: cur, - res: cursors.NewUnsignedArrayLen(resLen), - tmp: &cursors.UnsignedArray{}, - window: window, - } -} - -func (c *unsignedWindowMaxArrayCursor) Stats() cursors.CursorStats { - return c.UnsignedArrayCursor.Stats() -} - -func (c *unsignedWindowMaxArrayCursor) Next() *cursors.UnsignedArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.UnsignedArray - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.UnsignedArrayCursor.Next() - } - - if a.Len() == 0 { - return &cursors.UnsignedArray{} - } - - rowIdx := 0 - var acc uint64 = 0 - var tsAcc int64 - - var windowEnd int64 - if !c.window.IsZero() { - windowEnd = int64(c.window.GetLatestBounds(values.Time(a.Timestamps[rowIdx])).Stop()) - } else { - windowEnd = math.MaxInt64 - } - windowHasPoints := false - - // enumerate windows -WINDOWS: - for { - for ; rowIdx < a.Len(); rowIdx++ { - ts := a.Timestamps[rowIdx] - if !c.window.IsZero() && ts >= windowEnd { - // new window detected, close the current window - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = tsAcc - c.res.Values[pos] = acc - pos++ - if pos >= MaxPointsPerBlock { - // the output array is full, - // save the remaining points in the input array in tmp. - // they will be processed in the next call to Next() - c.tmp.Timestamps = a.Timestamps[rowIdx:] - c.tmp.Values = a.Values[rowIdx:] - break WINDOWS - } - } - - // start the new window - acc = 0 - windowEnd = int64(c.window.GetLatestBounds(values.Time(ts)).Stop()) - windowHasPoints = false - - continue WINDOWS - } else { - if !windowHasPoints || a.Values[rowIdx] > acc { - acc = a.Values[rowIdx] - tsAcc = a.Timestamps[rowIdx] - } - windowHasPoints = true - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - // get the next chunk - a = c.UnsignedArrayCursor.Next() - if a.Len() == 0 { - // write the final point - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = tsAcc - c.res.Values[pos] = acc - pos++ - } - break WINDOWS - } - rowIdx = 0 - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type unsignedWindowMeanArrayCursor struct { - cursors.UnsignedArrayCursor - res *cursors.FloatArray - tmp *cursors.UnsignedArray - window interval.Window -} - -func newUnsignedWindowMeanArrayCursor(cur cursors.UnsignedArrayCursor, window interval.Window) *unsignedWindowMeanArrayCursor { - resLen := MaxPointsPerBlock - if window.IsZero() { - resLen = 1 - } - return &unsignedWindowMeanArrayCursor{ - UnsignedArrayCursor: cur, - res: cursors.NewFloatArrayLen(resLen), - tmp: &cursors.UnsignedArray{}, - window: window, - } -} - -func (c *unsignedWindowMeanArrayCursor) Stats() cursors.CursorStats { - return c.UnsignedArrayCursor.Stats() -} - -func (c *unsignedWindowMeanArrayCursor) Next() *cursors.FloatArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.UnsignedArray - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.UnsignedArrayCursor.Next() - } - - if a.Len() == 0 { - return &cursors.FloatArray{} - } - - rowIdx := 0 - var sum uint64 - var count int64 - - var windowEnd int64 - if !c.window.IsZero() { - windowEnd = int64(c.window.GetLatestBounds(values.Time(a.Timestamps[rowIdx])).Stop()) - } else { - windowEnd = math.MaxInt64 - } - windowHasPoints := false - - // enumerate windows -WINDOWS: - for { - for ; rowIdx < a.Len(); rowIdx++ { - ts := a.Timestamps[rowIdx] - if !c.window.IsZero() && ts >= windowEnd { - // new window detected, close the current window - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = float64(sum) / float64(count) - pos++ - if pos >= MaxPointsPerBlock { - // the output array is full, - // save the remaining points in the input array in tmp. - // they will be processed in the next call to Next() - c.tmp.Timestamps = a.Timestamps[rowIdx:] - c.tmp.Values = a.Values[rowIdx:] - break WINDOWS - } - } - - // start the new window - sum = 0 - count = 0 - windowEnd = int64(c.window.GetLatestBounds(values.Time(ts)).Stop()) - windowHasPoints = false - - continue WINDOWS - } else { - sum += a.Values[rowIdx] - count++ - windowHasPoints = true - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - // get the next chunk - a = c.UnsignedArrayCursor.Next() - if a.Len() == 0 { - // write the final point - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = float64(sum) / float64(count) - pos++ - } - break WINDOWS - } - rowIdx = 0 - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type unsignedEmptyArrayCursor struct { - res cursors.UnsignedArray -} - -var UnsignedEmptyArrayCursor cursors.UnsignedArrayCursor = &unsignedEmptyArrayCursor{} - -func (c *unsignedEmptyArrayCursor) Err() error { return nil } -func (c *unsignedEmptyArrayCursor) Close() {} -func (c *unsignedEmptyArrayCursor) Stats() cursors.CursorStats { return cursors.CursorStats{} } -func (c *unsignedEmptyArrayCursor) Next() *cursors.UnsignedArray { return &c.res } - -// ******************** -// String Array Cursor - -type stringArrayFilterCursor struct { - cursors.StringArrayCursor - cond expression - m *singleValue - res *cursors.StringArray - tmp *cursors.StringArray -} - -func newStringFilterArrayCursor(cond expression) *stringArrayFilterCursor { - return &stringArrayFilterCursor{ - cond: cond, - m: &singleValue{}, - res: cursors.NewStringArrayLen(MaxPointsPerBlock), - tmp: &cursors.StringArray{}, - } -} - -func (c *stringArrayFilterCursor) reset(cur cursors.StringArrayCursor) { - c.StringArrayCursor = cur - c.tmp.Timestamps, c.tmp.Values = nil, nil -} - -func (c *stringArrayFilterCursor) Stats() cursors.CursorStats { return c.StringArrayCursor.Stats() } - -func (c *stringArrayFilterCursor) Next() *cursors.StringArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.StringArray - - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.StringArrayCursor.Next() - } - -LOOP: - for len(a.Timestamps) > 0 { - for i, v := range a.Values { - c.m.v = v - if c.cond.EvalBool(c.m) { - c.res.Timestamps[pos] = a.Timestamps[i] - c.res.Values[pos] = v - pos++ - if pos >= MaxPointsPerBlock { - c.tmp.Timestamps = a.Timestamps[i+1:] - c.tmp.Values = a.Values[i+1:] - break LOOP - } - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - a = c.StringArrayCursor.Next() - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type stringMultiShardArrayCursor struct { - cursors.StringArrayCursor - cursorContext - filter *stringArrayFilterCursor -} - -func (c *stringMultiShardArrayCursor) reset(cur cursors.StringArrayCursor, itrs cursors.CursorIterators, cond expression) { - if cond != nil { - if c.filter == nil { - c.filter = newStringFilterArrayCursor(cond) - } else { - c.filter.cond = cond - } - c.filter.reset(cur) - cur = c.filter - } - - c.StringArrayCursor = cur - c.itrs = itrs - c.err = nil -} - -func (c *stringMultiShardArrayCursor) Err() error { return c.err } - -func (c *stringMultiShardArrayCursor) Stats() cursors.CursorStats { - return c.StringArrayCursor.Stats() -} - -func (c *stringMultiShardArrayCursor) Next() *cursors.StringArray { - for { - a := c.StringArrayCursor.Next() - if a.Len() == 0 { - if c.nextArrayCursor() { - continue - } - } - return a - } -} - -func (c *stringMultiShardArrayCursor) nextArrayCursor() bool { - if len(c.itrs) == 0 { - return false - } - - c.StringArrayCursor.Close() - - var itr cursors.CursorIterator - var cur cursors.Cursor - for cur == nil && len(c.itrs) > 0 { - itr, c.itrs = c.itrs[0], c.itrs[1:] - cur, _ = itr.Next(c.ctx, c.req) - } - - var ok bool - if cur != nil { - var next cursors.StringArrayCursor - next, ok = cur.(cursors.StringArrayCursor) - if !ok { - cur.Close() - next = StringEmptyArrayCursor - c.err = errors.New("expected string cursor") - } else { - if c.filter != nil { - c.filter.reset(next) - next = c.filter - } - } - c.StringArrayCursor = next - } else { - c.StringArrayCursor = StringEmptyArrayCursor - } - - return ok -} - -type stringLimitArrayCursor struct { - cursors.StringArrayCursor - res *cursors.StringArray - done bool -} - -func newStringLimitArrayCursor(cur cursors.StringArrayCursor) *stringLimitArrayCursor { - return &stringLimitArrayCursor{ - StringArrayCursor: cur, - res: cursors.NewStringArrayLen(1), - } -} - -func (c *stringLimitArrayCursor) Stats() cursors.CursorStats { return c.StringArrayCursor.Stats() } - -func (c *stringLimitArrayCursor) Next() *cursors.StringArray { - if c.done { - return &cursors.StringArray{} - } - a := c.StringArrayCursor.Next() - if len(a.Timestamps) == 0 { - return a - } - c.done = true - c.res.Timestamps[0] = a.Timestamps[0] - c.res.Values[0] = a.Values[0] - return c.res -} - -type stringWindowLastArrayCursor struct { - cursors.StringArrayCursor - windowEnd int64 - res *cursors.StringArray - tmp *cursors.StringArray - window interval.Window -} - -// Window array cursors assume that every != 0 && every != MaxInt64. -// Such a cursor will panic in the first case and possibly overflow in the second. -func newStringWindowLastArrayCursor(cur cursors.StringArrayCursor, window interval.Window) *stringWindowLastArrayCursor { - return &stringWindowLastArrayCursor{ - StringArrayCursor: cur, - windowEnd: math.MinInt64, - res: cursors.NewStringArrayLen(MaxPointsPerBlock), - tmp: &cursors.StringArray{}, - window: window, - } -} - -func (c *stringWindowLastArrayCursor) Stats() cursors.CursorStats { - return c.StringArrayCursor.Stats() -} - -func (c *stringWindowLastArrayCursor) Next() *cursors.StringArray { - cur := -1 - -NEXT: - var a *cursors.StringArray - - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.StringArrayCursor.Next() - } - - if a.Len() == 0 { - c.res.Timestamps = c.res.Timestamps[:cur+1] - c.res.Values = c.res.Values[:cur+1] - return c.res - } - - for i, t := range a.Timestamps { - if t >= c.windowEnd { - cur++ - } - - if cur == MaxPointsPerBlock { - c.tmp.Timestamps = a.Timestamps[i:] - c.tmp.Values = a.Values[i:] - return c.res - } - - c.res.Timestamps[cur] = t - c.res.Values[cur] = a.Values[i] - - c.windowEnd = int64(c.window.GetLatestBounds(values.Time(t)).Stop()) - } - - c.tmp.Timestamps = nil - c.tmp.Values = nil - - goto NEXT -} - -type stringWindowFirstArrayCursor struct { - cursors.StringArrayCursor - windowEnd int64 - res *cursors.StringArray - tmp *cursors.StringArray - window interval.Window -} - -// Window array cursors assume that every != 0 && every != MaxInt64. -// Such a cursor will panic in the first case and possibly overflow in the second. -func newStringWindowFirstArrayCursor(cur cursors.StringArrayCursor, window interval.Window) *stringWindowFirstArrayCursor { - return &stringWindowFirstArrayCursor{ - StringArrayCursor: cur, - windowEnd: math.MinInt64, - res: cursors.NewStringArrayLen(MaxPointsPerBlock), - tmp: &cursors.StringArray{}, - window: window, - } -} - -func (c *stringWindowFirstArrayCursor) Stats() cursors.CursorStats { - return c.StringArrayCursor.Stats() -} - -func (c *stringWindowFirstArrayCursor) Next() *cursors.StringArray { - c.res.Timestamps = c.res.Timestamps[:0] - c.res.Values = c.res.Values[:0] - -NEXT: - var a *cursors.StringArray - - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.StringArrayCursor.Next() - } - - if a.Len() == 0 { - return c.res - } - - for i, t := range a.Timestamps { - if t < c.windowEnd { - continue - } - - c.windowEnd = int64(c.window.GetLatestBounds(values.Time(t)).Stop()) - - c.res.Timestamps = append(c.res.Timestamps, t) - c.res.Values = append(c.res.Values, a.Values[i]) - - if c.res.Len() == MaxPointsPerBlock { - c.tmp.Timestamps = a.Timestamps[i+1:] - c.tmp.Values = a.Values[i+1:] - return c.res - } - } - - c.tmp.Timestamps = nil - c.tmp.Values = nil - - goto NEXT -} - -type stringWindowCountArrayCursor struct { - cursors.StringArrayCursor - res *cursors.IntegerArray - tmp *cursors.StringArray - window interval.Window -} - -func newStringWindowCountArrayCursor(cur cursors.StringArrayCursor, window interval.Window) *stringWindowCountArrayCursor { - resLen := MaxPointsPerBlock - if window.IsZero() { - resLen = 1 - } - return &stringWindowCountArrayCursor{ - StringArrayCursor: cur, - res: cursors.NewIntegerArrayLen(resLen), - tmp: &cursors.StringArray{}, - window: window, - } -} - -func (c *stringWindowCountArrayCursor) Stats() cursors.CursorStats { - return c.StringArrayCursor.Stats() -} - -func (c *stringWindowCountArrayCursor) Next() *cursors.IntegerArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.StringArray - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.StringArrayCursor.Next() - } - - if a.Len() == 0 { - return &cursors.IntegerArray{} - } - - rowIdx := 0 - var acc int64 = 0 - - var windowEnd int64 - if !c.window.IsZero() { - windowEnd = int64(c.window.GetLatestBounds(values.Time(a.Timestamps[rowIdx])).Stop()) - } else { - windowEnd = math.MaxInt64 - } - windowHasPoints := false - - // enumerate windows -WINDOWS: - for { - for ; rowIdx < a.Len(); rowIdx++ { - ts := a.Timestamps[rowIdx] - if !c.window.IsZero() && ts >= windowEnd { - // new window detected, close the current window - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = acc - pos++ - if pos >= MaxPointsPerBlock { - // the output array is full, - // save the remaining points in the input array in tmp. - // they will be processed in the next call to Next() - c.tmp.Timestamps = a.Timestamps[rowIdx:] - c.tmp.Values = a.Values[rowIdx:] - break WINDOWS - } - } - - // start the new window - acc = 0 - windowEnd = int64(c.window.GetLatestBounds(values.Time(ts)).Stop()) - windowHasPoints = false - - continue WINDOWS - } else { - acc++ - windowHasPoints = true - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - // get the next chunk - a = c.StringArrayCursor.Next() - if a.Len() == 0 { - // write the final point - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = acc - pos++ - } - break WINDOWS - } - rowIdx = 0 - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type stringEmptyArrayCursor struct { - res cursors.StringArray -} - -var StringEmptyArrayCursor cursors.StringArrayCursor = &stringEmptyArrayCursor{} - -func (c *stringEmptyArrayCursor) Err() error { return nil } -func (c *stringEmptyArrayCursor) Close() {} -func (c *stringEmptyArrayCursor) Stats() cursors.CursorStats { return cursors.CursorStats{} } -func (c *stringEmptyArrayCursor) Next() *cursors.StringArray { return &c.res } - -// ******************** -// Boolean Array Cursor - -type booleanArrayFilterCursor struct { - cursors.BooleanArrayCursor - cond expression - m *singleValue - res *cursors.BooleanArray - tmp *cursors.BooleanArray -} - -func newBooleanFilterArrayCursor(cond expression) *booleanArrayFilterCursor { - return &booleanArrayFilterCursor{ - cond: cond, - m: &singleValue{}, - res: cursors.NewBooleanArrayLen(MaxPointsPerBlock), - tmp: &cursors.BooleanArray{}, - } -} - -func (c *booleanArrayFilterCursor) reset(cur cursors.BooleanArrayCursor) { - c.BooleanArrayCursor = cur - c.tmp.Timestamps, c.tmp.Values = nil, nil -} - -func (c *booleanArrayFilterCursor) Stats() cursors.CursorStats { return c.BooleanArrayCursor.Stats() } - -func (c *booleanArrayFilterCursor) Next() *cursors.BooleanArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.BooleanArray - - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.BooleanArrayCursor.Next() - } - -LOOP: - for len(a.Timestamps) > 0 { - for i, v := range a.Values { - c.m.v = v - if c.cond.EvalBool(c.m) { - c.res.Timestamps[pos] = a.Timestamps[i] - c.res.Values[pos] = v - pos++ - if pos >= MaxPointsPerBlock { - c.tmp.Timestamps = a.Timestamps[i+1:] - c.tmp.Values = a.Values[i+1:] - break LOOP - } - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - a = c.BooleanArrayCursor.Next() - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type booleanMultiShardArrayCursor struct { - cursors.BooleanArrayCursor - cursorContext - filter *booleanArrayFilterCursor -} - -func (c *booleanMultiShardArrayCursor) reset(cur cursors.BooleanArrayCursor, itrs cursors.CursorIterators, cond expression) { - if cond != nil { - if c.filter == nil { - c.filter = newBooleanFilterArrayCursor(cond) - } else { - c.filter.cond = cond - } - c.filter.reset(cur) - cur = c.filter - } - - c.BooleanArrayCursor = cur - c.itrs = itrs - c.err = nil -} - -func (c *booleanMultiShardArrayCursor) Err() error { return c.err } - -func (c *booleanMultiShardArrayCursor) Stats() cursors.CursorStats { - return c.BooleanArrayCursor.Stats() -} - -func (c *booleanMultiShardArrayCursor) Next() *cursors.BooleanArray { - for { - a := c.BooleanArrayCursor.Next() - if a.Len() == 0 { - if c.nextArrayCursor() { - continue - } - } - return a - } -} - -func (c *booleanMultiShardArrayCursor) nextArrayCursor() bool { - if len(c.itrs) == 0 { - return false - } - - c.BooleanArrayCursor.Close() - - var itr cursors.CursorIterator - var cur cursors.Cursor - for cur == nil && len(c.itrs) > 0 { - itr, c.itrs = c.itrs[0], c.itrs[1:] - cur, _ = itr.Next(c.ctx, c.req) - } - - var ok bool - if cur != nil { - var next cursors.BooleanArrayCursor - next, ok = cur.(cursors.BooleanArrayCursor) - if !ok { - cur.Close() - next = BooleanEmptyArrayCursor - c.err = errors.New("expected boolean cursor") - } else { - if c.filter != nil { - c.filter.reset(next) - next = c.filter - } - } - c.BooleanArrayCursor = next - } else { - c.BooleanArrayCursor = BooleanEmptyArrayCursor - } - - return ok -} - -type booleanLimitArrayCursor struct { - cursors.BooleanArrayCursor - res *cursors.BooleanArray - done bool -} - -func newBooleanLimitArrayCursor(cur cursors.BooleanArrayCursor) *booleanLimitArrayCursor { - return &booleanLimitArrayCursor{ - BooleanArrayCursor: cur, - res: cursors.NewBooleanArrayLen(1), - } -} - -func (c *booleanLimitArrayCursor) Stats() cursors.CursorStats { return c.BooleanArrayCursor.Stats() } - -func (c *booleanLimitArrayCursor) Next() *cursors.BooleanArray { - if c.done { - return &cursors.BooleanArray{} - } - a := c.BooleanArrayCursor.Next() - if len(a.Timestamps) == 0 { - return a - } - c.done = true - c.res.Timestamps[0] = a.Timestamps[0] - c.res.Values[0] = a.Values[0] - return c.res -} - -type booleanWindowLastArrayCursor struct { - cursors.BooleanArrayCursor - windowEnd int64 - res *cursors.BooleanArray - tmp *cursors.BooleanArray - window interval.Window -} - -// Window array cursors assume that every != 0 && every != MaxInt64. -// Such a cursor will panic in the first case and possibly overflow in the second. -func newBooleanWindowLastArrayCursor(cur cursors.BooleanArrayCursor, window interval.Window) *booleanWindowLastArrayCursor { - return &booleanWindowLastArrayCursor{ - BooleanArrayCursor: cur, - windowEnd: math.MinInt64, - res: cursors.NewBooleanArrayLen(MaxPointsPerBlock), - tmp: &cursors.BooleanArray{}, - window: window, - } -} - -func (c *booleanWindowLastArrayCursor) Stats() cursors.CursorStats { - return c.BooleanArrayCursor.Stats() -} - -func (c *booleanWindowLastArrayCursor) Next() *cursors.BooleanArray { - cur := -1 - -NEXT: - var a *cursors.BooleanArray - - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.BooleanArrayCursor.Next() - } - - if a.Len() == 0 { - c.res.Timestamps = c.res.Timestamps[:cur+1] - c.res.Values = c.res.Values[:cur+1] - return c.res - } - - for i, t := range a.Timestamps { - if t >= c.windowEnd { - cur++ - } - - if cur == MaxPointsPerBlock { - c.tmp.Timestamps = a.Timestamps[i:] - c.tmp.Values = a.Values[i:] - return c.res - } - - c.res.Timestamps[cur] = t - c.res.Values[cur] = a.Values[i] - - c.windowEnd = int64(c.window.GetLatestBounds(values.Time(t)).Stop()) - } - - c.tmp.Timestamps = nil - c.tmp.Values = nil - - goto NEXT -} - -type booleanWindowFirstArrayCursor struct { - cursors.BooleanArrayCursor - windowEnd int64 - res *cursors.BooleanArray - tmp *cursors.BooleanArray - window interval.Window -} - -// Window array cursors assume that every != 0 && every != MaxInt64. -// Such a cursor will panic in the first case and possibly overflow in the second. -func newBooleanWindowFirstArrayCursor(cur cursors.BooleanArrayCursor, window interval.Window) *booleanWindowFirstArrayCursor { - return &booleanWindowFirstArrayCursor{ - BooleanArrayCursor: cur, - windowEnd: math.MinInt64, - res: cursors.NewBooleanArrayLen(MaxPointsPerBlock), - tmp: &cursors.BooleanArray{}, - window: window, - } -} - -func (c *booleanWindowFirstArrayCursor) Stats() cursors.CursorStats { - return c.BooleanArrayCursor.Stats() -} - -func (c *booleanWindowFirstArrayCursor) Next() *cursors.BooleanArray { - c.res.Timestamps = c.res.Timestamps[:0] - c.res.Values = c.res.Values[:0] - -NEXT: - var a *cursors.BooleanArray - - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.BooleanArrayCursor.Next() - } - - if a.Len() == 0 { - return c.res - } - - for i, t := range a.Timestamps { - if t < c.windowEnd { - continue - } - - c.windowEnd = int64(c.window.GetLatestBounds(values.Time(t)).Stop()) - - c.res.Timestamps = append(c.res.Timestamps, t) - c.res.Values = append(c.res.Values, a.Values[i]) - - if c.res.Len() == MaxPointsPerBlock { - c.tmp.Timestamps = a.Timestamps[i+1:] - c.tmp.Values = a.Values[i+1:] - return c.res - } - } - - c.tmp.Timestamps = nil - c.tmp.Values = nil - - goto NEXT -} - -type booleanWindowCountArrayCursor struct { - cursors.BooleanArrayCursor - res *cursors.IntegerArray - tmp *cursors.BooleanArray - window interval.Window -} - -func newBooleanWindowCountArrayCursor(cur cursors.BooleanArrayCursor, window interval.Window) *booleanWindowCountArrayCursor { - resLen := MaxPointsPerBlock - if window.IsZero() { - resLen = 1 - } - return &booleanWindowCountArrayCursor{ - BooleanArrayCursor: cur, - res: cursors.NewIntegerArrayLen(resLen), - tmp: &cursors.BooleanArray{}, - window: window, - } -} - -func (c *booleanWindowCountArrayCursor) Stats() cursors.CursorStats { - return c.BooleanArrayCursor.Stats() -} - -func (c *booleanWindowCountArrayCursor) Next() *cursors.IntegerArray { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.BooleanArray - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.BooleanArrayCursor.Next() - } - - if a.Len() == 0 { - return &cursors.IntegerArray{} - } - - rowIdx := 0 - var acc int64 = 0 - - var windowEnd int64 - if !c.window.IsZero() { - windowEnd = int64(c.window.GetLatestBounds(values.Time(a.Timestamps[rowIdx])).Stop()) - } else { - windowEnd = math.MaxInt64 - } - windowHasPoints := false - - // enumerate windows -WINDOWS: - for { - for ; rowIdx < a.Len(); rowIdx++ { - ts := a.Timestamps[rowIdx] - if !c.window.IsZero() && ts >= windowEnd { - // new window detected, close the current window - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = acc - pos++ - if pos >= MaxPointsPerBlock { - // the output array is full, - // save the remaining points in the input array in tmp. - // they will be processed in the next call to Next() - c.tmp.Timestamps = a.Timestamps[rowIdx:] - c.tmp.Values = a.Values[rowIdx:] - break WINDOWS - } - } - - // start the new window - acc = 0 - windowEnd = int64(c.window.GetLatestBounds(values.Time(ts)).Stop()) - windowHasPoints = false - - continue WINDOWS - } else { - acc++ - windowHasPoints = true - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - // get the next chunk - a = c.BooleanArrayCursor.Next() - if a.Len() == 0 { - // write the final point - // do not generate a point for empty windows - if windowHasPoints { - c.res.Timestamps[pos] = windowEnd - c.res.Values[pos] = acc - pos++ - } - break WINDOWS - } - rowIdx = 0 - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type booleanEmptyArrayCursor struct { - res cursors.BooleanArray -} - -var BooleanEmptyArrayCursor cursors.BooleanArrayCursor = &booleanEmptyArrayCursor{} - -func (c *booleanEmptyArrayCursor) Err() error { return nil } -func (c *booleanEmptyArrayCursor) Close() {} -func (c *booleanEmptyArrayCursor) Stats() cursors.CursorStats { return cursors.CursorStats{} } -func (c *booleanEmptyArrayCursor) Next() *cursors.BooleanArray { return &c.res } - -func arrayCursorType(cur cursors.Cursor) string { - switch cur.(type) { - - case cursors.FloatArrayCursor: - return "float" - - case cursors.IntegerArrayCursor: - return "integer" - - case cursors.UnsignedArrayCursor: - return "unsigned" - - case cursors.StringArrayCursor: - return "string" - - case cursors.BooleanArrayCursor: - return "boolean" - - default: - return "unknown" - } -} diff --git a/storage/reads/array_cursor.gen.go.tmpl b/storage/reads/array_cursor.gen.go.tmpl deleted file mode 100644 index e41ab7e9748..00000000000 --- a/storage/reads/array_cursor.gen.go.tmpl +++ /dev/null @@ -1,590 +0,0 @@ -package reads - -import ( - "errors" - "fmt" - "math" - - "github.com/influxdata/flux/interval" - "github.com/influxdata/flux/values" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -const ( - // MaxPointsPerBlock is the maximum number of points in an encoded - // block in a TSM file. It should match the value in the tsm1 - // package, but we don't want to import it. - MaxPointsPerBlock = 1000 -) - -func newLimitArrayCursor(cur cursors.Cursor) cursors.Cursor { - switch cur := cur.(type) { -{{range .}}{{/* every type supports limit */}} - case cursors.{{.Name}}ArrayCursor: - return new{{.Name}}LimitArrayCursor(cur) -{{end}} - default: - panic(fmt.Sprintf("unreachable: %T", cur)) - } -} - -func newWindowFirstArrayCursor(cur cursors.Cursor, window interval.Window) cursors.Cursor { - if window.IsZero() { - return newLimitArrayCursor(cur) - } - switch cur := cur.(type) { -{{range .}}{{/* every type supports first */}} - case cursors.{{.Name}}ArrayCursor: - return new{{.Name}}WindowFirstArrayCursor(cur, window) -{{end}} - default: - panic(fmt.Sprintf("unreachable: %T", cur)) - } -} - -func newWindowLastArrayCursor(cur cursors.Cursor, window interval.Window) cursors.Cursor { - if window.IsZero() { - return newLimitArrayCursor(cur) - } - switch cur := cur.(type) { -{{range .}}{{/* every type supports last */}} - case cursors.{{.Name}}ArrayCursor: - return new{{.Name}}WindowLastArrayCursor(cur, window) -{{end}} - default: - panic(fmt.Sprintf("unreachable: %T", cur)) - } -} - -func newWindowCountArrayCursor(cur cursors.Cursor, window interval.Window) cursors.Cursor { - switch cur := cur.(type) { -{{range .}}{{/* every type supports count */}} - case cursors.{{.Name}}ArrayCursor: - return new{{.Name}}WindowCountArrayCursor(cur, window) -{{end}} - default: - panic(fmt.Sprintf("unreachable: %T", cur)) - } -} - -func newWindowSumArrayCursor(cur cursors.Cursor, window interval.Window) (cursors.Cursor, error) { - switch cur := cur.(type) { -{{range .}} -{{$Type := .Name}} -{{range .Aggs}} -{{if eq .Name "Sum"}} - case cursors.{{$Type}}ArrayCursor: - return new{{$Type}}WindowSumArrayCursor(cur, window), nil -{{end}} -{{end}}{{/* for each supported agg fn */}} -{{end}}{{/* for each field type */}} - default: - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: fmt.Sprintf("unsupported input type for sum aggregate: %s", arrayCursorType(cur)), - } - } -} - -func newWindowMinArrayCursor(cur cursors.Cursor, window interval.Window) cursors.Cursor { - switch cur := cur.(type) { -{{range .}} -{{$Type := .Name}} -{{range .Aggs}} -{{if eq .Name "Min"}} - case cursors.{{$Type}}ArrayCursor: - return new{{$Type}}WindowMinArrayCursor(cur, window) -{{end}} -{{end}}{{/* for each supported agg fn */}} -{{end}}{{/* for each field type */}} - default: - panic(fmt.Sprintf("unsupported for aggregate min: %T", cur)) - } -} - -func newWindowMaxArrayCursor(cur cursors.Cursor, window interval.Window) cursors.Cursor { - switch cur := cur.(type) { -{{range .}} -{{$Type := .Name}} -{{range .Aggs}} -{{if eq .Name "Max"}} - case cursors.{{$Type}}ArrayCursor: - return new{{$Type}}WindowMaxArrayCursor(cur, window) -{{end}} -{{end}}{{/* for each supported agg fn */}} -{{end}}{{/* for each field type */}} - default: - panic(fmt.Sprintf("unsupported for aggregate max: %T", cur)) - } -} - -func newWindowMeanArrayCursor(cur cursors.Cursor, window interval.Window) (cursors.Cursor, error) { - switch cur := cur.(type) { -{{range .}} -{{$Type := .Name}} -{{range .Aggs}} -{{if eq .Name "Mean"}} - case cursors.{{$Type}}ArrayCursor: - return new{{$Type}}WindowMeanArrayCursor(cur, window), nil -{{end}} -{{end}}{{/* for each supported agg fn */}} -{{end}}{{/* for each field type */}} - default: - return nil, &errors2.Error{ - Code: errors2.EInvalid, - Msg: fmt.Sprintf("unsupported input type for mean aggregate: %s", arrayCursorType(cur)), - } - } -} -{{range .}} -{{$arrayType := print "*cursors." .Name "Array"}} -{{$type := print .name "ArrayFilterCursor"}} -{{$Type := print .Name "ArrayFilterCursor"}} - -// ******************** -// {{.Name}} Array Cursor - -type {{$type}} struct { - cursors.{{.Name}}ArrayCursor - cond expression - m *singleValue - res {{$arrayType}} - tmp {{$arrayType}} -} - -func new{{.Name}}FilterArrayCursor(cond expression) *{{$type}} { - return &{{$type}}{ - cond: cond, - m: &singleValue{}, - res: cursors.New{{.Name}}ArrayLen(MaxPointsPerBlock), - tmp: &cursors.{{.Name}}Array{}, - } -} - -func (c *{{$type}}) reset(cur cursors.{{.Name}}ArrayCursor) { - c.{{.Name}}ArrayCursor = cur - c.tmp.Timestamps, c.tmp.Values = nil, nil -} - -func (c *{{$type}}) Stats() cursors.CursorStats { return c.{{.Name}}ArrayCursor.Stats() } - -func (c *{{$type}}) Next() {{$arrayType}} { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a {{$arrayType}} - - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.{{.Name}}ArrayCursor.Next() - } - -LOOP: - for len(a.Timestamps) > 0 { - for i, v := range a.Values { - c.m.v = v - if c.cond.EvalBool(c.m) { - c.res.Timestamps[pos] = a.Timestamps[i] - c.res.Values[pos] = v - pos++ - if pos >= MaxPointsPerBlock { - c.tmp.Timestamps = a.Timestamps[i+1:] - c.tmp.Values = a.Values[i+1:] - break LOOP - } - } - } - - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - a = c.{{.Name}}ArrayCursor.Next() - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -type {{.name}}MultiShardArrayCursor struct { - cursors.{{.Name}}ArrayCursor - cursorContext - filter *{{$type}} -} - -func (c *{{.name}}MultiShardArrayCursor) reset(cur cursors.{{.Name}}ArrayCursor, itrs cursors.CursorIterators, cond expression) { - if cond != nil { - if c.filter == nil { - c.filter = new{{.Name}}FilterArrayCursor(cond) - } else { - c.filter.cond = cond - } - c.filter.reset(cur) - cur = c.filter - } - - c.{{.Name}}ArrayCursor = cur - c.itrs = itrs - c.err = nil -} - - -func (c *{{.name}}MultiShardArrayCursor) Err() error { return c.err } - -func (c *{{.name}}MultiShardArrayCursor) Stats() cursors.CursorStats { - return c.{{.Name}}ArrayCursor.Stats() -} - -func (c *{{.name}}MultiShardArrayCursor) Next() {{$arrayType}} { - for { - a := c.{{.Name}}ArrayCursor.Next() - if a.Len() == 0 { - if c.nextArrayCursor() { - continue - } - } - return a - } -} - -func (c *{{.name}}MultiShardArrayCursor) nextArrayCursor() bool { - if len(c.itrs) == 0 { - return false - } - - c.{{.Name}}ArrayCursor.Close() - - var itr cursors.CursorIterator - var cur cursors.Cursor - for cur == nil && len(c.itrs) > 0 { - itr, c.itrs = c.itrs[0], c.itrs[1:] - cur, _ = itr.Next(c.ctx, c.req) - } - - var ok bool - if cur != nil { - var next cursors.{{.Name}}ArrayCursor - next, ok = cur.(cursors.{{.Name}}ArrayCursor) - if !ok { - cur.Close() - next = {{.Name}}EmptyArrayCursor - c.err = errors.New("expected {{.name}} cursor") - } else { - if c.filter != nil { - c.filter.reset(next) - next = c.filter - } - } - c.{{.Name}}ArrayCursor = next - } else { - c.{{.Name}}ArrayCursor = {{.Name}}EmptyArrayCursor - } - - return ok -} - -type {{.name}}LimitArrayCursor struct { - cursors.{{.Name}}ArrayCursor - res {{$arrayType}} - done bool -} - -func new{{.Name}}LimitArrayCursor(cur cursors.{{.Name}}ArrayCursor) *{{.name}}LimitArrayCursor { - return &{{.name}}LimitArrayCursor{ - {{.Name}}ArrayCursor: cur, - res: cursors.New{{.Name}}ArrayLen(1), - } -} - -func (c *{{.name}}LimitArrayCursor) Stats() cursors.CursorStats { return c.{{.Name}}ArrayCursor.Stats() } - -func (c *{{.name}}LimitArrayCursor) Next() {{$arrayType}} { - if c.done { - return &cursors.{{.Name}}Array{} - } - a := c.{{.Name}}ArrayCursor.Next() - if len(a.Timestamps) == 0 { - return a - } - c.done = true - c.res.Timestamps[0] = a.Timestamps[0] - c.res.Values[0] = a.Values[0] - return c.res -} - -type {{.name}}WindowLastArrayCursor struct { - cursors.{{.Name}}ArrayCursor - windowEnd int64 - res {{$arrayType}} - tmp {{$arrayType}} - window interval.Window -} - -// Window array cursors assume that every != 0 && every != MaxInt64. -// Such a cursor will panic in the first case and possibly overflow in the second. -func new{{.Name}}WindowLastArrayCursor(cur cursors.{{.Name}}ArrayCursor, window interval.Window) *{{.name}}WindowLastArrayCursor { - return &{{.name}}WindowLastArrayCursor{ - {{.Name}}ArrayCursor: cur, - windowEnd: math.MinInt64, - res: cursors.New{{.Name}}ArrayLen(MaxPointsPerBlock), - tmp: &cursors.{{.Name}}Array{}, - window: window, - } -} - -func (c *{{.name}}WindowLastArrayCursor) Stats() cursors.CursorStats { - return c.{{.Name}}ArrayCursor.Stats() -} - -func (c *{{.name}}WindowLastArrayCursor) Next() *cursors.{{.Name}}Array { - cur := -1 - -NEXT: - var a *cursors.{{.Name}}Array - - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.{{.Name}}ArrayCursor.Next() - } - - if a.Len() == 0 { - c.res.Timestamps = c.res.Timestamps[:cur+1] - c.res.Values = c.res.Values[:cur+1] - return c.res - } - - for i, t := range a.Timestamps { - if t >= c.windowEnd { - cur++ - } - - if cur == MaxPointsPerBlock { - c.tmp.Timestamps = a.Timestamps[i:] - c.tmp.Values = a.Values[i:] - return c.res - } - - c.res.Timestamps[cur] = t - c.res.Values[cur] = a.Values[i] - - c.windowEnd = int64(c.window.GetLatestBounds(values.Time(t)).Stop()) - } - - c.tmp.Timestamps = nil - c.tmp.Values = nil - - goto NEXT -} - -type {{.name}}WindowFirstArrayCursor struct { - cursors.{{.Name}}ArrayCursor - windowEnd int64 - res {{$arrayType}} - tmp {{$arrayType}} - window interval.Window -} - -// Window array cursors assume that every != 0 && every != MaxInt64. -// Such a cursor will panic in the first case and possibly overflow in the second. -func new{{.Name}}WindowFirstArrayCursor(cur cursors.{{.Name}}ArrayCursor, window interval.Window) *{{.name}}WindowFirstArrayCursor { - return &{{.name}}WindowFirstArrayCursor{ - {{.Name}}ArrayCursor: cur, - windowEnd: math.MinInt64, - res: cursors.New{{.Name}}ArrayLen(MaxPointsPerBlock), - tmp: &cursors.{{.Name}}Array{}, - window: window, - } -} - -func (c *{{.name}}WindowFirstArrayCursor) Stats() cursors.CursorStats { - return c.{{.Name}}ArrayCursor.Stats() -} - -func (c *{{.name}}WindowFirstArrayCursor) Next() *cursors.{{.Name}}Array { - c.res.Timestamps = c.res.Timestamps[:0] - c.res.Values = c.res.Values[:0] - -NEXT: - var a *cursors.{{.Name}}Array - - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.{{.Name}}ArrayCursor.Next() - } - - if a.Len() == 0 { - return c.res - } - - for i, t := range a.Timestamps { - if t < c.windowEnd { - continue - } - - c.windowEnd = int64(c.window.GetLatestBounds(values.Time(t)).Stop()) - - c.res.Timestamps = append(c.res.Timestamps, t) - c.res.Values = append(c.res.Values, a.Values[i]) - - if c.res.Len() == MaxPointsPerBlock { - c.tmp.Timestamps = a.Timestamps[i+1:] - c.tmp.Values = a.Values[i+1:] - return c.res - } - } - - c.tmp.Timestamps = nil - c.tmp.Values = nil - - goto NEXT -} - -{{/* create an aggregate cursor for each aggregate function supported by the type */}} -{{$Name := .Name}} -{{$name := .name}} -{{range .Aggs}} -{{$aggName := .Name}} - -type {{$name}}Window{{$aggName}}ArrayCursor struct { - cursors.{{$Name}}ArrayCursor - res *cursors.{{.OutputTypeName}}Array - tmp {{$arrayType}} - window interval.Window -} - -func new{{$Name}}Window{{$aggName}}ArrayCursor(cur cursors.{{$Name}}ArrayCursor, window interval.Window) *{{$name}}Window{{$aggName}}ArrayCursor { - resLen := MaxPointsPerBlock - if window.IsZero() { - resLen = 1 - } - return &{{$name}}Window{{$aggName}}ArrayCursor{ - {{$Name}}ArrayCursor: cur, - res: cursors.New{{.OutputTypeName}}ArrayLen(resLen), - tmp: &cursors.{{$Name}}Array{}, - window: window, - } -} - -func (c *{{$name}}Window{{$aggName}}ArrayCursor) Stats() cursors.CursorStats { - return c.{{$Name}}ArrayCursor.Stats() -} - -func (c *{{$name}}Window{{$aggName}}ArrayCursor) Next() *cursors.{{.OutputTypeName}}Array { - pos := 0 - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - var a *cursors.{{$Name}}Array - if c.tmp.Len() > 0 { - a = c.tmp - } else { - a = c.{{$Name}}ArrayCursor.Next() - } - - if a.Len() == 0 { - return &cursors.{{.OutputTypeName}}Array{} - } - - rowIdx := 0 - {{.AccDecls}} - - var windowEnd int64 - if !c.window.IsZero() { - windowEnd = int64(c.window.GetLatestBounds(values.Time(a.Timestamps[rowIdx])).Stop()) - } else { - windowEnd = math.MaxInt64 - } - windowHasPoints := false - - // enumerate windows -WINDOWS: - for { - for ; rowIdx < a.Len(); rowIdx++ { - ts := a.Timestamps[rowIdx] - if !c.window.IsZero() && ts >= windowEnd { - // new window detected, close the current window - // do not generate a point for empty windows - if windowHasPoints { - {{.AccEmit}} - pos++ - if pos >= MaxPointsPerBlock { - // the output array is full, - // save the remaining points in the input array in tmp. - // they will be processed in the next call to Next() - c.tmp.Timestamps = a.Timestamps[rowIdx:] - c.tmp.Values = a.Values[rowIdx:] - break WINDOWS - } - } - - // start the new window - {{.AccReset}} - windowEnd = int64(c.window.GetLatestBounds(values.Time(ts)).Stop()) - windowHasPoints = false - - continue WINDOWS - } else { - {{.Accumulate}} - windowHasPoints = true - } - } - - // Clear buffered timestamps & values if we make it through a cursor. - // The break above will skip this if a cursor is partially read. - c.tmp.Timestamps = nil - c.tmp.Values = nil - - // get the next chunk - a = c.{{$Name}}ArrayCursor.Next() - if a.Len() == 0 { - // write the final point - // do not generate a point for empty windows - if windowHasPoints { - {{.AccEmit}} - pos++ - } - break WINDOWS - } - rowIdx = 0 - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -{{end}}{{/* range .Aggs */}} - -type {{.name}}EmptyArrayCursor struct { - res cursors.{{.Name}}Array -} - -var {{.Name}}EmptyArrayCursor cursors.{{.Name}}ArrayCursor = &{{.name}}EmptyArrayCursor{} - -func (c *{{.name}}EmptyArrayCursor) Err() error { return nil } -func (c *{{.name}}EmptyArrayCursor) Close() {} -func (c *{{.name}}EmptyArrayCursor) Stats() cursors.CursorStats { return cursors.CursorStats{} } -func (c *{{.name}}EmptyArrayCursor) Next() {{$arrayType}} { return &c.res } - -{{end}}{{/* range . */}} - -func arrayCursorType(cur cursors.Cursor) string { - switch cur.(type) { - {{range .}} - case cursors.{{.Name}}ArrayCursor: - return "{{.name}}" - {{end}}{{/* range . */}} - default: - return "unknown" - } -} diff --git a/storage/reads/array_cursor.gen.go.tmpldata b/storage/reads/array_cursor.gen.go.tmpldata deleted file mode 100644 index 86a1ae86193..00000000000 --- a/storage/reads/array_cursor.gen.go.tmpldata +++ /dev/null @@ -1,173 +0,0 @@ -[ - { - "Name":"Float", - "name":"float", - "Type":"float64", - "Aggs": [ - { - "Name":"Count", - "OutputTypeName":"Integer", - "AccDecls":"var acc int64 = 0", - "Accumulate":"acc++", - "AccEmit": "c.res.Timestamps[pos] = windowEnd; c.res.Values[pos] = acc", - "AccReset":"acc = 0" - }, - { - "Name":"Sum", - "OutputTypeName":"Float", - "AccDecls":"var acc float64 = 0", - "Accumulate":"acc += a.Values[rowIdx]", - "AccEmit":"c.res.Timestamps[pos] = windowEnd; c.res.Values[pos] = acc", - "AccReset":"acc = 0" - }, - { - "Name":"Min", - "OutputTypeName":"Float", - "AccDecls":"var acc float64 = math.MaxFloat64; var tsAcc int64", - "Accumulate":"if !windowHasPoints || a.Values[rowIdx] < acc { acc = a.Values[rowIdx]; tsAcc = a.Timestamps[rowIdx] }", - "AccEmit":"c.res.Timestamps[pos] = tsAcc; c.res.Values[pos] = acc", - "AccReset":"acc = math.MaxFloat64" - }, - { - "Name":"Max", - "OutputTypeName":"Float", - "AccDecls":"var acc float64 = -math.MaxFloat64; var tsAcc int64", - "Accumulate":"if !windowHasPoints || a.Values[rowIdx] > acc { acc = a.Values[rowIdx]; tsAcc = a.Timestamps[rowIdx] }", - "AccEmit":"c.res.Timestamps[pos] = tsAcc; c.res.Values[pos] = acc", - "AccReset":"acc = -math.MaxFloat64" - }, - { - "Name":"Mean", - "OutputTypeName":"Float", - "AccDecls":"var sum float64; var count int64", - "Accumulate":"sum += a.Values[rowIdx]; count++", - "AccEmit":"c.res.Timestamps[pos] = windowEnd; c.res.Values[pos] = sum / float64(count)", - "AccReset":"sum = 0; count = 0" - } - ] - }, - { - "Name":"Integer", - "name":"integer", - "Type":"int64", - "Aggs": [ - { - "Name":"Count", - "OutputTypeName":"Integer", - "AccDecls":"var acc int64 = 0", - "Accumulate":"acc++", - "AccEmit": "c.res.Timestamps[pos] = windowEnd; c.res.Values[pos] = acc", - "AccReset":"acc = 0" - }, - { - "Name":"Sum", - "OutputTypeName":"Integer", - "AccDecls":"var acc int64 = 0", - "Accumulate":"acc += a.Values[rowIdx]", - "AccEmit":"c.res.Timestamps[pos] = windowEnd; c.res.Values[pos] = acc", - "AccReset":"acc = 0" - }, - { - "Name":"Min", - "OutputTypeName":"Integer", - "AccDecls":"var acc int64 = math.MaxInt64; var tsAcc int64", - "Accumulate":"if !windowHasPoints || a.Values[rowIdx] < acc { acc = a.Values[rowIdx]; tsAcc = a.Timestamps[rowIdx] }", - "AccEmit":"c.res.Timestamps[pos] = tsAcc; c.res.Values[pos] = acc", - "AccReset":"acc = math.MaxInt64" - }, - { - "Name":"Max", - "OutputTypeName":"Integer", - "AccDecls":"var acc int64 = math.MinInt64; var tsAcc int64", - "Accumulate":"if !windowHasPoints || a.Values[rowIdx] > acc { acc = a.Values[rowIdx]; tsAcc = a.Timestamps[rowIdx] }", - "AccEmit":"c.res.Timestamps[pos] = tsAcc; c.res.Values[pos] = acc", - "AccReset":"acc = math.MinInt64" - }, - { - "Name":"Mean", - "OutputTypeName":"Float", - "AccDecls":"var sum int64; var count int64", - "Accumulate":"sum += a.Values[rowIdx]; count++", - "AccEmit":"c.res.Timestamps[pos] = windowEnd; c.res.Values[pos] = float64(sum) / float64(count)", - "AccReset":"sum = 0; count = 0" - } - ] - }, - { - "Name":"Unsigned", - "name":"unsigned", - "Type":"uint64", - "Aggs": [ - { - "Name":"Count", - "OutputTypeName":"Integer", - "AccDecls":"var acc int64 = 0", - "Accumulate":"acc++", - "AccEmit": "c.res.Timestamps[pos] = windowEnd; c.res.Values[pos] = acc", - "AccReset":"acc = 0" - }, - { - "Name":"Sum", - "OutputTypeName":"Unsigned", - "AccDecls":"var acc uint64 = 0", - "Accumulate":"acc += a.Values[rowIdx]", - "AccEmit":"c.res.Timestamps[pos] = windowEnd; c.res.Values[pos] = acc", - "AccReset":"acc = 0" - }, - { - "Name":"Min", - "OutputTypeName":"Unsigned", - "AccDecls":"var acc uint64 = math.MaxUint64; var tsAcc int64", - "Accumulate":"if !windowHasPoints || a.Values[rowIdx] < acc { acc = a.Values[rowIdx]; tsAcc = a.Timestamps[rowIdx] }", - "AccEmit":"c.res.Timestamps[pos] = tsAcc; c.res.Values[pos] = acc", - "AccReset":"acc = math.MaxUint64" - }, - { - "Name":"Max", - "OutputTypeName":"Unsigned", - "AccDecls":"var acc uint64 = 0; var tsAcc int64", - "Accumulate":"if !windowHasPoints || a.Values[rowIdx] > acc { acc = a.Values[rowIdx]; tsAcc = a.Timestamps[rowIdx] }", - "AccEmit":"c.res.Timestamps[pos] = tsAcc; c.res.Values[pos] = acc", - "AccReset":"acc = 0" - }, - { - "Name":"Mean", - "OutputTypeName":"Float", - "AccDecls":"var sum uint64; var count int64", - "Accumulate":"sum += a.Values[rowIdx]; count++", - "AccEmit":"c.res.Timestamps[pos] = windowEnd; c.res.Values[pos] = float64(sum) / float64(count)", - "AccReset":"sum = 0; count = 0" - } - ] - }, - { - "Name":"String", - "name":"string", - "Type":"string", - "Aggs": [ - { - "Name":"Count", - "OutputTypeName":"Integer", - "AccDecls":"var acc int64 = 0", - "Accumulate":"acc++", - "AccEmit": "c.res.Timestamps[pos] = windowEnd; c.res.Values[pos] = acc", - "AccReset":"acc = 0" - } - ] - }, - { - "Name":"Boolean", - "name":"boolean", - "Type":"bool", - "Aggs": [ - { - "Name":"Count", - "OutputTypeName":"Integer", - "AccDecls":"var acc int64 = 0", - "Accumulate":"acc++", - "AccEmit": "c.res.Timestamps[pos] = windowEnd; c.res.Values[pos] = acc", - "AccReset":"acc = 0" - } - ] - } -] diff --git a/storage/reads/array_cursor.go b/storage/reads/array_cursor.go deleted file mode 100644 index 5ae1f1eb8de..00000000000 --- a/storage/reads/array_cursor.go +++ /dev/null @@ -1,144 +0,0 @@ -package reads - -import ( - "context" - "fmt" - - "github.com/influxdata/flux/interval" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -type singleValue struct { - v interface{} -} - -func (v *singleValue) Value(key string) (interface{}, bool) { - return v.v, true -} - -func newAggregateArrayCursor(ctx context.Context, agg *datatypes.Aggregate, cursor cursors.Cursor) (cursors.Cursor, error) { - switch agg.Type { - case datatypes.Aggregate_AggregateTypeFirst, datatypes.Aggregate_AggregateTypeLast: - return newLimitArrayCursor(cursor), nil - } - return newWindowAggregateArrayCursor(ctx, agg, interval.Window{}, cursor) -} - -func newWindowAggregateArrayCursor(ctx context.Context, agg *datatypes.Aggregate, window interval.Window, cursor cursors.Cursor) (cursors.Cursor, error) { - if cursor == nil { - return nil, nil - } - - switch agg.Type { - case datatypes.Aggregate_AggregateTypeCount: - return newWindowCountArrayCursor(cursor, window), nil - case datatypes.Aggregate_AggregateTypeSum: - return newWindowSumArrayCursor(cursor, window) - case datatypes.Aggregate_AggregateTypeFirst: - return newWindowFirstArrayCursor(cursor, window), nil - case datatypes.Aggregate_AggregateTypeLast: - return newWindowLastArrayCursor(cursor, window), nil - case datatypes.Aggregate_AggregateTypeMin: - return newWindowMinArrayCursor(cursor, window), nil - case datatypes.Aggregate_AggregateTypeMax: - return newWindowMaxArrayCursor(cursor, window), nil - case datatypes.Aggregate_AggregateTypeMean: - return newWindowMeanArrayCursor(cursor, window) - default: - // TODO(sgc): should be validated higher up - panic("invalid aggregate") - } -} - -type cursorContext struct { - ctx context.Context - req *cursors.CursorRequest - itrs cursors.CursorIterators - err error -} - -type multiShardArrayCursors struct { - ctx context.Context - req cursors.CursorRequest - - cursors struct { - i integerMultiShardArrayCursor - f floatMultiShardArrayCursor - u unsignedMultiShardArrayCursor - b booleanMultiShardArrayCursor - s stringMultiShardArrayCursor - } -} - -// newMultiShardArrayCursors is a factory for creating cursors for each series key. -// The range of the cursor is [start, end). The start time is the lower absolute time -// and the end time is the higher absolute time regardless of ascending or descending order. -func newMultiShardArrayCursors(ctx context.Context, start, end int64, asc bool) *multiShardArrayCursors { - // When we construct the CursorRequest, we translate the time range - // from [start, stop) to [start, stop]. The cursor readers from storage are - // inclusive on both ends and we perform that conversion here. - m := &multiShardArrayCursors{ - ctx: ctx, - req: cursors.CursorRequest{ - Ascending: asc, - StartTime: start, - EndTime: end - 1, - }, - } - - cc := cursorContext{ - ctx: ctx, - req: &m.req, - } - - m.cursors.i.cursorContext = cc - m.cursors.f.cursorContext = cc - m.cursors.u.cursorContext = cc - m.cursors.b.cursorContext = cc - m.cursors.s.cursorContext = cc - - return m -} - -func (m *multiShardArrayCursors) createCursor(row SeriesRow) cursors.Cursor { - m.req.Name = row.Name - m.req.Tags = row.SeriesTags - m.req.Field = row.Field - - var cond expression - if row.ValueCond != nil { - cond = &astExpr{row.ValueCond} - } - - var shard cursors.CursorIterator - var cur cursors.Cursor - for cur == nil && len(row.Query) > 0 { - shard, row.Query = row.Query[0], row.Query[1:] - cur, _ = shard.Next(m.ctx, &m.req) - } - - if cur == nil { - return nil - } - - switch c := cur.(type) { - case cursors.IntegerArrayCursor: - m.cursors.i.reset(c, row.Query, cond) - return &m.cursors.i - case cursors.FloatArrayCursor: - m.cursors.f.reset(c, row.Query, cond) - return &m.cursors.f - case cursors.UnsignedArrayCursor: - m.cursors.u.reset(c, row.Query, cond) - return &m.cursors.u - case cursors.StringArrayCursor: - m.cursors.s.reset(c, row.Query, cond) - return &m.cursors.s - case cursors.BooleanArrayCursor: - m.cursors.b.reset(c, row.Query, cond) - return &m.cursors.b - default: - panic(fmt.Sprintf("unreachable: %T", cur)) - } -} diff --git a/storage/reads/array_cursor_gen_test.go b/storage/reads/array_cursor_gen_test.go deleted file mode 100644 index 917f5f6cf11..00000000000 --- a/storage/reads/array_cursor_gen_test.go +++ /dev/null @@ -1,1297 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: array_cursor_test.gen.go.tmpl - -package reads - -import ( - "context" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/flux/interval" - "github.com/influxdata/flux/values" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -var cmpOptions = cmp.AllowUnexported(interval.Window{}) - -type MockFloatArrayCursor struct { - CloseFunc func() - ErrFunc func() error - StatsFunc func() cursors.CursorStats - NextFunc func() *cursors.FloatArray -} - -func (c *MockFloatArrayCursor) Close() { c.CloseFunc() } -func (c *MockFloatArrayCursor) Err() error { return c.ErrFunc() } -func (c *MockFloatArrayCursor) Stats() cursors.CursorStats { return c.StatsFunc() } -func (c *MockFloatArrayCursor) Next() *cursors.FloatArray { return c.NextFunc() } - -func TestNewAggregateArrayCursor_Float(t *testing.T) { - - t.Run("Count", func(t *testing.T) { - want := &floatWindowCountArrayCursor{ - FloatArrayCursor: &MockFloatArrayCursor{}, - res: cursors.NewIntegerArrayLen(1), - tmp: &cursors.FloatArray{}, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeCount, - } - - got, _ := newAggregateArrayCursor(context.Background(), agg, &MockFloatArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(floatWindowCountArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Sum", func(t *testing.T) { - want := &floatWindowSumArrayCursor{ - FloatArrayCursor: &MockFloatArrayCursor{}, - res: cursors.NewFloatArrayLen(1), - tmp: &cursors.FloatArray{}, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeSum, - } - - got, _ := newAggregateArrayCursor(context.Background(), agg, &MockFloatArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(floatWindowSumArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Min", func(t *testing.T) { - want := &floatWindowMinArrayCursor{ - FloatArrayCursor: &MockFloatArrayCursor{}, - res: cursors.NewFloatArrayLen(1), - tmp: &cursors.FloatArray{}, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMin, - } - - got, _ := newAggregateArrayCursor(context.Background(), agg, &MockFloatArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(floatWindowMinArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Max", func(t *testing.T) { - want := &floatWindowMaxArrayCursor{ - FloatArrayCursor: &MockFloatArrayCursor{}, - res: cursors.NewFloatArrayLen(1), - tmp: &cursors.FloatArray{}, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMax, - } - - got, _ := newAggregateArrayCursor(context.Background(), agg, &MockFloatArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(floatWindowMaxArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Mean", func(t *testing.T) { - want := &floatWindowMeanArrayCursor{ - FloatArrayCursor: &MockFloatArrayCursor{}, - res: cursors.NewFloatArrayLen(1), - tmp: &cursors.FloatArray{}, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMean, - } - - got, _ := newAggregateArrayCursor(context.Background(), agg, &MockFloatArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(floatWindowMeanArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - -} - -func TestNewWindowAggregateArrayCursorMonths_Float(t *testing.T) { - - t.Run("Count", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(0, 0, false), - ) - - want := &floatWindowCountArrayCursor{ - FloatArrayCursor: &MockFloatArrayCursor{}, - res: cursors.NewIntegerArrayLen(MaxPointsPerBlock), - tmp: &cursors.FloatArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeCount, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockFloatArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(floatWindowCountArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Sum", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(0, 0, false), - ) - - want := &floatWindowSumArrayCursor{ - FloatArrayCursor: &MockFloatArrayCursor{}, - res: cursors.NewFloatArrayLen(MaxPointsPerBlock), - tmp: &cursors.FloatArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeSum, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockFloatArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(floatWindowSumArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Min", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(0, 0, false), - ) - - want := &floatWindowMinArrayCursor{ - FloatArrayCursor: &MockFloatArrayCursor{}, - res: cursors.NewFloatArrayLen(MaxPointsPerBlock), - tmp: &cursors.FloatArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMin, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockFloatArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(floatWindowMinArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Max", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(0, 0, false), - ) - - want := &floatWindowMaxArrayCursor{ - FloatArrayCursor: &MockFloatArrayCursor{}, - res: cursors.NewFloatArrayLen(MaxPointsPerBlock), - tmp: &cursors.FloatArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMax, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockFloatArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(floatWindowMaxArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Mean", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(0, 0, false), - ) - - want := &floatWindowMeanArrayCursor{ - FloatArrayCursor: &MockFloatArrayCursor{}, - res: cursors.NewFloatArrayLen(MaxPointsPerBlock), - tmp: &cursors.FloatArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMean, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockFloatArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(floatWindowMeanArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - -} - -func TestNewWindowAggregateArrayCursor_Float(t *testing.T) { - - t.Run("Count", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - - want := &floatWindowCountArrayCursor{ - FloatArrayCursor: &MockFloatArrayCursor{}, - res: cursors.NewIntegerArrayLen(MaxPointsPerBlock), - tmp: &cursors.FloatArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeCount, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockFloatArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(floatWindowCountArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Sum", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - - want := &floatWindowSumArrayCursor{ - FloatArrayCursor: &MockFloatArrayCursor{}, - res: cursors.NewFloatArrayLen(MaxPointsPerBlock), - tmp: &cursors.FloatArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeSum, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockFloatArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(floatWindowSumArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Min", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - - want := &floatWindowMinArrayCursor{ - FloatArrayCursor: &MockFloatArrayCursor{}, - res: cursors.NewFloatArrayLen(MaxPointsPerBlock), - tmp: &cursors.FloatArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMin, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockFloatArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(floatWindowMinArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Max", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - - want := &floatWindowMaxArrayCursor{ - FloatArrayCursor: &MockFloatArrayCursor{}, - res: cursors.NewFloatArrayLen(MaxPointsPerBlock), - tmp: &cursors.FloatArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMax, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockFloatArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(floatWindowMaxArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Mean", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - - want := &floatWindowMeanArrayCursor{ - FloatArrayCursor: &MockFloatArrayCursor{}, - res: cursors.NewFloatArrayLen(MaxPointsPerBlock), - tmp: &cursors.FloatArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMean, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockFloatArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(floatWindowMeanArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - -} - -type MockIntegerArrayCursor struct { - CloseFunc func() - ErrFunc func() error - StatsFunc func() cursors.CursorStats - NextFunc func() *cursors.IntegerArray -} - -func (c *MockIntegerArrayCursor) Close() { c.CloseFunc() } -func (c *MockIntegerArrayCursor) Err() error { return c.ErrFunc() } -func (c *MockIntegerArrayCursor) Stats() cursors.CursorStats { return c.StatsFunc() } -func (c *MockIntegerArrayCursor) Next() *cursors.IntegerArray { return c.NextFunc() } - -func TestNewAggregateArrayCursor_Integer(t *testing.T) { - - t.Run("Count", func(t *testing.T) { - want := &integerWindowCountArrayCursor{ - IntegerArrayCursor: &MockIntegerArrayCursor{}, - res: cursors.NewIntegerArrayLen(1), - tmp: &cursors.IntegerArray{}, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeCount, - } - - got, _ := newAggregateArrayCursor(context.Background(), agg, &MockIntegerArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(integerWindowCountArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Sum", func(t *testing.T) { - want := &integerWindowSumArrayCursor{ - IntegerArrayCursor: &MockIntegerArrayCursor{}, - res: cursors.NewIntegerArrayLen(1), - tmp: &cursors.IntegerArray{}, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeSum, - } - - got, _ := newAggregateArrayCursor(context.Background(), agg, &MockIntegerArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(integerWindowSumArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Min", func(t *testing.T) { - want := &integerWindowMinArrayCursor{ - IntegerArrayCursor: &MockIntegerArrayCursor{}, - res: cursors.NewIntegerArrayLen(1), - tmp: &cursors.IntegerArray{}, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMin, - } - - got, _ := newAggregateArrayCursor(context.Background(), agg, &MockIntegerArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(integerWindowMinArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Max", func(t *testing.T) { - want := &integerWindowMaxArrayCursor{ - IntegerArrayCursor: &MockIntegerArrayCursor{}, - res: cursors.NewIntegerArrayLen(1), - tmp: &cursors.IntegerArray{}, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMax, - } - - got, _ := newAggregateArrayCursor(context.Background(), agg, &MockIntegerArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(integerWindowMaxArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Mean", func(t *testing.T) { - want := &integerWindowMeanArrayCursor{ - IntegerArrayCursor: &MockIntegerArrayCursor{}, - res: cursors.NewFloatArrayLen(1), - tmp: &cursors.IntegerArray{}, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMean, - } - - got, _ := newAggregateArrayCursor(context.Background(), agg, &MockIntegerArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(integerWindowMeanArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - -} - -func TestNewWindowAggregateArrayCursorMonths_Integer(t *testing.T) { - - t.Run("Count", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(0, 0, false), - ) - - want := &integerWindowCountArrayCursor{ - IntegerArrayCursor: &MockIntegerArrayCursor{}, - res: cursors.NewIntegerArrayLen(MaxPointsPerBlock), - tmp: &cursors.IntegerArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeCount, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockIntegerArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(integerWindowCountArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Sum", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(0, 0, false), - ) - - want := &integerWindowSumArrayCursor{ - IntegerArrayCursor: &MockIntegerArrayCursor{}, - res: cursors.NewIntegerArrayLen(MaxPointsPerBlock), - tmp: &cursors.IntegerArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeSum, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockIntegerArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(integerWindowSumArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Min", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(0, 0, false), - ) - - want := &integerWindowMinArrayCursor{ - IntegerArrayCursor: &MockIntegerArrayCursor{}, - res: cursors.NewIntegerArrayLen(MaxPointsPerBlock), - tmp: &cursors.IntegerArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMin, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockIntegerArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(integerWindowMinArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Max", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(0, 0, false), - ) - - want := &integerWindowMaxArrayCursor{ - IntegerArrayCursor: &MockIntegerArrayCursor{}, - res: cursors.NewIntegerArrayLen(MaxPointsPerBlock), - tmp: &cursors.IntegerArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMax, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockIntegerArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(integerWindowMaxArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Mean", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(0, 0, false), - ) - - want := &integerWindowMeanArrayCursor{ - IntegerArrayCursor: &MockIntegerArrayCursor{}, - res: cursors.NewFloatArrayLen(MaxPointsPerBlock), - tmp: &cursors.IntegerArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMean, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockIntegerArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(integerWindowMeanArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - -} - -func TestNewWindowAggregateArrayCursor_Integer(t *testing.T) { - - t.Run("Count", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - - want := &integerWindowCountArrayCursor{ - IntegerArrayCursor: &MockIntegerArrayCursor{}, - res: cursors.NewIntegerArrayLen(MaxPointsPerBlock), - tmp: &cursors.IntegerArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeCount, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockIntegerArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(integerWindowCountArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Sum", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - - want := &integerWindowSumArrayCursor{ - IntegerArrayCursor: &MockIntegerArrayCursor{}, - res: cursors.NewIntegerArrayLen(MaxPointsPerBlock), - tmp: &cursors.IntegerArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeSum, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockIntegerArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(integerWindowSumArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Min", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - - want := &integerWindowMinArrayCursor{ - IntegerArrayCursor: &MockIntegerArrayCursor{}, - res: cursors.NewIntegerArrayLen(MaxPointsPerBlock), - tmp: &cursors.IntegerArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMin, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockIntegerArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(integerWindowMinArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Max", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - - want := &integerWindowMaxArrayCursor{ - IntegerArrayCursor: &MockIntegerArrayCursor{}, - res: cursors.NewIntegerArrayLen(MaxPointsPerBlock), - tmp: &cursors.IntegerArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMax, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockIntegerArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(integerWindowMaxArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Mean", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - - want := &integerWindowMeanArrayCursor{ - IntegerArrayCursor: &MockIntegerArrayCursor{}, - res: cursors.NewFloatArrayLen(MaxPointsPerBlock), - tmp: &cursors.IntegerArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMean, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockIntegerArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(integerWindowMeanArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - -} - -type MockUnsignedArrayCursor struct { - CloseFunc func() - ErrFunc func() error - StatsFunc func() cursors.CursorStats - NextFunc func() *cursors.UnsignedArray -} - -func (c *MockUnsignedArrayCursor) Close() { c.CloseFunc() } -func (c *MockUnsignedArrayCursor) Err() error { return c.ErrFunc() } -func (c *MockUnsignedArrayCursor) Stats() cursors.CursorStats { return c.StatsFunc() } -func (c *MockUnsignedArrayCursor) Next() *cursors.UnsignedArray { return c.NextFunc() } - -func TestNewAggregateArrayCursor_Unsigned(t *testing.T) { - - t.Run("Count", func(t *testing.T) { - want := &unsignedWindowCountArrayCursor{ - UnsignedArrayCursor: &MockUnsignedArrayCursor{}, - res: cursors.NewIntegerArrayLen(1), - tmp: &cursors.UnsignedArray{}, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeCount, - } - - got, _ := newAggregateArrayCursor(context.Background(), agg, &MockUnsignedArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(unsignedWindowCountArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Sum", func(t *testing.T) { - want := &unsignedWindowSumArrayCursor{ - UnsignedArrayCursor: &MockUnsignedArrayCursor{}, - res: cursors.NewUnsignedArrayLen(1), - tmp: &cursors.UnsignedArray{}, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeSum, - } - - got, _ := newAggregateArrayCursor(context.Background(), agg, &MockUnsignedArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(unsignedWindowSumArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Min", func(t *testing.T) { - want := &unsignedWindowMinArrayCursor{ - UnsignedArrayCursor: &MockUnsignedArrayCursor{}, - res: cursors.NewUnsignedArrayLen(1), - tmp: &cursors.UnsignedArray{}, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMin, - } - - got, _ := newAggregateArrayCursor(context.Background(), agg, &MockUnsignedArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(unsignedWindowMinArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Max", func(t *testing.T) { - want := &unsignedWindowMaxArrayCursor{ - UnsignedArrayCursor: &MockUnsignedArrayCursor{}, - res: cursors.NewUnsignedArrayLen(1), - tmp: &cursors.UnsignedArray{}, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMax, - } - - got, _ := newAggregateArrayCursor(context.Background(), agg, &MockUnsignedArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(unsignedWindowMaxArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Mean", func(t *testing.T) { - want := &unsignedWindowMeanArrayCursor{ - UnsignedArrayCursor: &MockUnsignedArrayCursor{}, - res: cursors.NewFloatArrayLen(1), - tmp: &cursors.UnsignedArray{}, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMean, - } - - got, _ := newAggregateArrayCursor(context.Background(), agg, &MockUnsignedArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(unsignedWindowMeanArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - -} - -func TestNewWindowAggregateArrayCursorMonths_Unsigned(t *testing.T) { - - t.Run("Count", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(0, 0, false), - ) - - want := &unsignedWindowCountArrayCursor{ - UnsignedArrayCursor: &MockUnsignedArrayCursor{}, - res: cursors.NewIntegerArrayLen(MaxPointsPerBlock), - tmp: &cursors.UnsignedArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeCount, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockUnsignedArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(unsignedWindowCountArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Sum", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(0, 0, false), - ) - - want := &unsignedWindowSumArrayCursor{ - UnsignedArrayCursor: &MockUnsignedArrayCursor{}, - res: cursors.NewUnsignedArrayLen(MaxPointsPerBlock), - tmp: &cursors.UnsignedArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeSum, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockUnsignedArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(unsignedWindowSumArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Min", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(0, 0, false), - ) - - want := &unsignedWindowMinArrayCursor{ - UnsignedArrayCursor: &MockUnsignedArrayCursor{}, - res: cursors.NewUnsignedArrayLen(MaxPointsPerBlock), - tmp: &cursors.UnsignedArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMin, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockUnsignedArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(unsignedWindowMinArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Max", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(0, 0, false), - ) - - want := &unsignedWindowMaxArrayCursor{ - UnsignedArrayCursor: &MockUnsignedArrayCursor{}, - res: cursors.NewUnsignedArrayLen(MaxPointsPerBlock), - tmp: &cursors.UnsignedArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMax, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockUnsignedArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(unsignedWindowMaxArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Mean", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(0, 0, false), - ) - - want := &unsignedWindowMeanArrayCursor{ - UnsignedArrayCursor: &MockUnsignedArrayCursor{}, - res: cursors.NewFloatArrayLen(MaxPointsPerBlock), - tmp: &cursors.UnsignedArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMean, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockUnsignedArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(unsignedWindowMeanArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - -} - -func TestNewWindowAggregateArrayCursor_Unsigned(t *testing.T) { - - t.Run("Count", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - - want := &unsignedWindowCountArrayCursor{ - UnsignedArrayCursor: &MockUnsignedArrayCursor{}, - res: cursors.NewIntegerArrayLen(MaxPointsPerBlock), - tmp: &cursors.UnsignedArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeCount, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockUnsignedArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(unsignedWindowCountArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Sum", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - - want := &unsignedWindowSumArrayCursor{ - UnsignedArrayCursor: &MockUnsignedArrayCursor{}, - res: cursors.NewUnsignedArrayLen(MaxPointsPerBlock), - tmp: &cursors.UnsignedArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeSum, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockUnsignedArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(unsignedWindowSumArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Min", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - - want := &unsignedWindowMinArrayCursor{ - UnsignedArrayCursor: &MockUnsignedArrayCursor{}, - res: cursors.NewUnsignedArrayLen(MaxPointsPerBlock), - tmp: &cursors.UnsignedArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMin, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockUnsignedArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(unsignedWindowMinArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Max", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - - want := &unsignedWindowMaxArrayCursor{ - UnsignedArrayCursor: &MockUnsignedArrayCursor{}, - res: cursors.NewUnsignedArrayLen(MaxPointsPerBlock), - tmp: &cursors.UnsignedArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMax, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockUnsignedArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(unsignedWindowMaxArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - - t.Run("Mean", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - - want := &unsignedWindowMeanArrayCursor{ - UnsignedArrayCursor: &MockUnsignedArrayCursor{}, - res: cursors.NewFloatArrayLen(MaxPointsPerBlock), - tmp: &cursors.UnsignedArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeMean, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockUnsignedArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(unsignedWindowMeanArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - -} - -type MockStringArrayCursor struct { - CloseFunc func() - ErrFunc func() error - StatsFunc func() cursors.CursorStats - NextFunc func() *cursors.StringArray -} - -func (c *MockStringArrayCursor) Close() { c.CloseFunc() } -func (c *MockStringArrayCursor) Err() error { return c.ErrFunc() } -func (c *MockStringArrayCursor) Stats() cursors.CursorStats { return c.StatsFunc() } -func (c *MockStringArrayCursor) Next() *cursors.StringArray { return c.NextFunc() } - -func TestNewAggregateArrayCursor_String(t *testing.T) { - - t.Run("Count", func(t *testing.T) { - want := &stringWindowCountArrayCursor{ - StringArrayCursor: &MockStringArrayCursor{}, - res: cursors.NewIntegerArrayLen(1), - tmp: &cursors.StringArray{}, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeCount, - } - - got, _ := newAggregateArrayCursor(context.Background(), agg, &MockStringArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(stringWindowCountArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - -} - -func TestNewWindowAggregateArrayCursorMonths_String(t *testing.T) { - - t.Run("Count", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(0, 0, false), - ) - - want := &stringWindowCountArrayCursor{ - StringArrayCursor: &MockStringArrayCursor{}, - res: cursors.NewIntegerArrayLen(MaxPointsPerBlock), - tmp: &cursors.StringArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeCount, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockStringArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(stringWindowCountArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - -} - -func TestNewWindowAggregateArrayCursor_String(t *testing.T) { - - t.Run("Count", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - - want := &stringWindowCountArrayCursor{ - StringArrayCursor: &MockStringArrayCursor{}, - res: cursors.NewIntegerArrayLen(MaxPointsPerBlock), - tmp: &cursors.StringArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeCount, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockStringArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(stringWindowCountArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - -} - -type MockBooleanArrayCursor struct { - CloseFunc func() - ErrFunc func() error - StatsFunc func() cursors.CursorStats - NextFunc func() *cursors.BooleanArray -} - -func (c *MockBooleanArrayCursor) Close() { c.CloseFunc() } -func (c *MockBooleanArrayCursor) Err() error { return c.ErrFunc() } -func (c *MockBooleanArrayCursor) Stats() cursors.CursorStats { return c.StatsFunc() } -func (c *MockBooleanArrayCursor) Next() *cursors.BooleanArray { return c.NextFunc() } - -func TestNewAggregateArrayCursor_Boolean(t *testing.T) { - - t.Run("Count", func(t *testing.T) { - want := &booleanWindowCountArrayCursor{ - BooleanArrayCursor: &MockBooleanArrayCursor{}, - res: cursors.NewIntegerArrayLen(1), - tmp: &cursors.BooleanArray{}, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeCount, - } - - got, _ := newAggregateArrayCursor(context.Background(), agg, &MockBooleanArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(booleanWindowCountArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - -} - -func TestNewWindowAggregateArrayCursorMonths_Boolean(t *testing.T) { - - t.Run("Count", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(0, 0, false), - ) - - want := &booleanWindowCountArrayCursor{ - BooleanArrayCursor: &MockBooleanArrayCursor{}, - res: cursors.NewIntegerArrayLen(MaxPointsPerBlock), - tmp: &cursors.BooleanArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeCount, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockBooleanArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(booleanWindowCountArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - -} - -func TestNewWindowAggregateArrayCursor_Boolean(t *testing.T) { - - t.Run("Count", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - - want := &booleanWindowCountArrayCursor{ - BooleanArrayCursor: &MockBooleanArrayCursor{}, - res: cursors.NewIntegerArrayLen(MaxPointsPerBlock), - tmp: &cursors.BooleanArray{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateTypeCount, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &MockBooleanArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported(booleanWindowCountArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) - -} diff --git a/storage/reads/array_cursor_test.gen.go.tmpl b/storage/reads/array_cursor_test.gen.go.tmpl deleted file mode 100644 index a6872992f2a..00000000000 --- a/storage/reads/array_cursor_test.gen.go.tmpl +++ /dev/null @@ -1,115 +0,0 @@ -package reads - -import ( - "context" - "testing" - "time" - - "github.com/influxdata/flux/interval" - "github.com/influxdata/flux/values" - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/tsdb/cursors" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" -) - -var cmpOptions = cmp.AllowUnexported(interval.Window{}) - -{{range .}} -{{$ColType := .Name}} -{{$colType := .name}} - -type Mock{{$ColType}}ArrayCursor struct { - CloseFunc func() - ErrFunc func() error - StatsFunc func() cursors.CursorStats - NextFunc func() *cursors.{{$ColType}}Array -} - -func (c *Mock{{$ColType}}ArrayCursor) Close() { c.CloseFunc() } -func (c *Mock{{$ColType}}ArrayCursor) Err() error { return c.ErrFunc() } -func (c *Mock{{$ColType}}ArrayCursor) Stats() cursors.CursorStats { return c.StatsFunc() } -func (c *Mock{{$ColType}}ArrayCursor) Next() *cursors.{{$ColType}}Array { return c.NextFunc() } - -func TestNewAggregateArrayCursor_{{$ColType}}(t *testing.T) { -{{range .Aggs}} -{{$Agg := .Name}} - t.Run("{{$Agg}}", func(t *testing.T) { - want := &{{$colType}}Window{{$Agg}}ArrayCursor{ - {{$ColType}}ArrayCursor: &Mock{{$ColType}}ArrayCursor{}, - res: cursors.New{{.OutputTypeName}}ArrayLen(1), - tmp: &cursors.{{$ColType}}Array{}, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateType{{$Agg}}, - } - - got, _ := newAggregateArrayCursor(context.Background(), agg, &Mock{{$ColType}}ArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported({{$colType}}Window{{$Agg}}ArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) -{{end}} -} - -func TestNewWindowAggregateArrayCursorMonths_{{$ColType}}(t *testing.T) { -{{range .Aggs}} -{{$Agg := .Name}} - t.Run("{{$Agg}}", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(int64(time.Hour), 0, false), - values.MakeDuration(0, 0, false), - ) - - want := &{{$colType}}Window{{$Agg}}ArrayCursor{ - {{$ColType}}ArrayCursor: &Mock{{$ColType}}ArrayCursor{}, - res: cursors.New{{.OutputTypeName}}ArrayLen(MaxPointsPerBlock), - tmp: &cursors.{{$ColType}}Array{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateType{{$Agg}}, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &Mock{{$ColType}}ArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported({{$colType}}Window{{$Agg}}ArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) -{{end}} -} - -func TestNewWindowAggregateArrayCursor_{{$ColType}}(t *testing.T) { -{{range .Aggs}} -{{$Agg := .Name}} - t.Run("{{$Agg}}", func(t *testing.T) { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - - want := &{{$colType}}Window{{$Agg}}ArrayCursor{ - {{$ColType}}ArrayCursor: &Mock{{$ColType}}ArrayCursor{}, - res: cursors.New{{.OutputTypeName}}ArrayLen(MaxPointsPerBlock), - tmp: &cursors.{{$ColType}}Array{}, - window: window, - } - - agg := &datatypes.Aggregate{ - Type: datatypes.Aggregate_AggregateType{{$Agg}}, - } - - got, _ := newWindowAggregateArrayCursor(context.Background(), agg, window, &Mock{{$ColType}}ArrayCursor{}) - - if diff := cmp.Diff(got, want, cmp.AllowUnexported({{$colType}}Window{{$Agg}}ArrayCursor{}), cmpOptions); diff != "" { - t.Fatalf("did not get expected cursor; -got/+want:\n%v", diff) - } - }) -{{end}} -} -{{end}}{{/* range over each supported field type */}} diff --git a/storage/reads/array_cursor_test.go b/storage/reads/array_cursor_test.go deleted file mode 100644 index 3f4389f94e9..00000000000 --- a/storage/reads/array_cursor_test.go +++ /dev/null @@ -1,2251 +0,0 @@ -package reads - -import ( - "context" - "math" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/google/go-cmp/cmp" - "github.com/influxdata/flux/interval" - "github.com/influxdata/flux/values" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb/cursors" - "github.com/influxdata/influxdb/v2/tsdb/cursors/mock" - "github.com/stretchr/testify/require" -) - -func TestIntegerFilterArrayCursor(t *testing.T) { - var i int - expr := MockExpression{ - EvalBoolFunc: func(v Valuer) bool { - i++ - return i%2 == 0 - }, - } - - var resultN int - ac := MockIntegerArrayCursor{ - CloseFunc: func() {}, - ErrFunc: func() error { return nil }, - StatsFunc: func() cursors.CursorStats { return cursors.CursorStats{} }, - NextFunc: func() *cursors.IntegerArray { - resultN++ - if resultN == 4 { - return cursors.NewIntegerArrayLen(0) - } - return cursors.NewIntegerArrayLen(900) - }, - } - - c := newIntegerFilterArrayCursor(&expr) - c.reset(&ac) - - if got, want := len(c.Next().Timestamps), 1000; got != want { - t.Fatalf("len(Next())=%d, want %d", got, want) - } else if got, want := len(c.Next().Timestamps), 350; got != want { - t.Fatalf("len(Next())=%d, want %d", got, want) - } -} - -func makeIntegerArray(n int, tsStart time.Time, tsStep time.Duration, valueFn func(i int64) int64) *cursors.IntegerArray { - ia := &cursors.IntegerArray{ - Timestamps: make([]int64, n), - Values: make([]int64, n), - } - - for i := 0; i < n; i++ { - ia.Timestamps[i] = tsStart.UnixNano() + int64(i)*int64(tsStep) - ia.Values[i] = valueFn(int64(i)) - } - - return ia -} - -func makeFloatArray(n int, tsStart time.Time, tsStep time.Duration, valueFn func(i int64) float64) *cursors.FloatArray { - fa := &cursors.FloatArray{ - Timestamps: make([]int64, n), - Values: make([]float64, n), - } - - for i := 0; i < n; i++ { - fa.Timestamps[i] = tsStart.UnixNano() + int64(i)*int64(tsStep) - fa.Values[i] = valueFn(int64(i)) - } - - return fa -} - -func mustParseTime(ts string) time.Time { - t, err := time.Parse(time.RFC3339, ts) - if err != nil { - panic(err) - } - return t -} - -func copyIntegerArray(src *cursors.IntegerArray) *cursors.IntegerArray { - dst := cursors.NewIntegerArrayLen(src.Len()) - copy(dst.Timestamps, src.Timestamps) - copy(dst.Values, src.Values) - return dst -} - -func copyFloatArray(src *cursors.FloatArray) *cursors.FloatArray { - dst := cursors.NewFloatArrayLen(src.Len()) - copy(dst.Timestamps, src.Timestamps) - copy(dst.Values, src.Values) - return dst -} - -type aggArrayCursorTest struct { - name string - createCursorFn func(cur cursors.IntegerArrayCursor, every, offset int64, window interval.Window) cursors.Cursor - every time.Duration - offset time.Duration - inputArrays []*cursors.IntegerArray - wantIntegers []*cursors.IntegerArray - wantFloats []*cursors.FloatArray - window interval.Window -} - -func (a *aggArrayCursorTest) run(t *testing.T) { - t.Helper() - t.Run(a.name, func(t *testing.T) { - var resultN int - mc := &MockIntegerArrayCursor{ - CloseFunc: func() {}, - ErrFunc: func() error { return nil }, - StatsFunc: func() cursors.CursorStats { return cursors.CursorStats{} }, - NextFunc: func() *cursors.IntegerArray { - if resultN < len(a.inputArrays) { - a := a.inputArrays[resultN] - resultN++ - return a - } - return &cursors.IntegerArray{} - }, - } - c := a.createCursorFn(mc, int64(a.every), int64(a.offset), a.window) - switch cursor := c.(type) { - case cursors.IntegerArrayCursor: - got := make([]*cursors.IntegerArray, 0, len(a.wantIntegers)) - for a := cursor.Next(); a.Len() != 0; a = cursor.Next() { - got = append(got, copyIntegerArray(a)) - } - - if diff := cmp.Diff(got, a.wantIntegers); diff != "" { - t.Fatalf("did not get expected result from count array cursor; -got/+want:\n%v", diff) - } - case cursors.FloatArrayCursor: - got := make([]*cursors.FloatArray, 0, len(a.wantFloats)) - for a := cursor.Next(); a.Len() != 0; a = cursor.Next() { - got = append(got, copyFloatArray(a)) - } - - if diff := cmp.Diff(got, a.wantFloats); diff != "" { - t.Fatalf("did not get expected result from count array cursor; -got/+want:\n%v", diff) - } - default: - t.Fatalf("unsupported cursor type: %T", cursor) - } - }) -} - -func TestLimitArrayCursor(t *testing.T) { - arr := []*cursors.IntegerArray{ - makeIntegerArray( - 1000, - mustParseTime("1970-01-01T00:00:01Z"), time.Millisecond, - func(i int64) int64 { return 3 + i }, - ), - makeIntegerArray( - 1000, - mustParseTime("1970-01-01T00:00:02Z"), time.Millisecond, - func(i int64) int64 { return 1003 + i }, - ), - } - idx := -1 - cur := &MockIntegerArrayCursor{ - CloseFunc: func() {}, - ErrFunc: func() error { return nil }, - StatsFunc: func() cursors.CursorStats { return cursors.CursorStats{} }, - NextFunc: func() *cursors.IntegerArray { - if idx++; idx < len(arr) { - return arr[idx] - } - return &cursors.IntegerArray{} - }, - } - aggCursor := newIntegerLimitArrayCursor(cur) - want := []*cursors.IntegerArray{ - { - Timestamps: []int64{mustParseTime("1970-01-01T00:00:01Z").UnixNano()}, - Values: []int64{3}, - }, - } - got := []*cursors.IntegerArray{} - for a := aggCursor.Next(); a.Len() != 0; a = aggCursor.Next() { - got = append(got, a) - } - if !cmp.Equal(want, got) { - t.Fatalf("unexpected result; -want/+got:\n%v", cmp.Diff(want, got)) - } -} - -func TestWindowFirstArrayCursor(t *testing.T) { - testcases := []aggArrayCursorTest{ - { - name: "window", - every: 15 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 4, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { return 15 * i }, - ), - }, - }, - { - name: "offset window", - every: 15 * time.Minute, - offset: time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - { - Timestamps: []int64{ - mustParseTime("2010-01-01T00:00:00Z").UnixNano(), - mustParseTime("2010-01-01T00:01:00Z").UnixNano(), - mustParseTime("2010-01-01T00:16:00Z").UnixNano(), - mustParseTime("2010-01-01T00:31:00Z").UnixNano(), - mustParseTime("2010-01-01T00:46:00Z").UnixNano(), - }, - Values: []int64{0, 1, 16, 31, 46}, - }, - }, - }, - { - name: "empty windows", - every: time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 4, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { return i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 4, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { return i }, - ), - }, - }, - { - name: "empty offset windows", - every: time.Minute, - offset: time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 4, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { return i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 4, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { return i }, - ), - }, - }, - { - name: "unaligned window", - every: 15 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:30Z"), time.Minute, - func(i int64) int64 { return i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 4, - mustParseTime("2010-01-01T00:00:30Z"), 15*time.Minute, - func(i int64) int64 { return 15 * i }, - ), - }, - }, - { - name: "unaligned offset window", - every: 15 * time.Minute, - offset: 45 * time.Second, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:30Z"), time.Minute, - func(i int64) int64 { return i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - { - Timestamps: []int64{ - mustParseTime("2010-01-01T00:00:30Z").UnixNano(), - mustParseTime("2010-01-01T00:01:30Z").UnixNano(), - mustParseTime("2010-01-01T00:16:30Z").UnixNano(), - mustParseTime("2010-01-01T00:31:30Z").UnixNano(), - mustParseTime("2010-01-01T00:46:30Z").UnixNano(), - }, - Values: []int64{0, 1, 16, 31, 46}, - }, - }, - }, - { - name: "more unaligned window", - every: 15 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:01:30Z"), time.Minute, - func(i int64) int64 { return i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - { - Timestamps: []int64{ - mustParseTime("2010-01-01T00:01:30Z").UnixNano(), - mustParseTime("2010-01-01T00:15:30Z").UnixNano(), - mustParseTime("2010-01-01T00:30:30Z").UnixNano(), - mustParseTime("2010-01-01T00:45:30Z").UnixNano(), - mustParseTime("2010-01-01T01:00:30Z").UnixNano(), - }, - Values: []int64{0, 14, 29, 44, 59}, - }, - }, - }, - { - name: "window two input arrays", - every: 15 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return i }, - ), - makeIntegerArray( - 60, - mustParseTime("2010-01-01T01:00:00Z"), time.Minute, - func(i int64) int64 { return 60 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 8, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { return 15 * i }, - ), - }, - }, - { - name: "offset window two input arrays", - every: 30 * time.Minute, - offset: 27 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return i }, - ), - makeIntegerArray( - 60, - mustParseTime("2010-01-01T01:00:00Z"), time.Minute, - func(i int64) int64 { return 60 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - { - Timestamps: []int64{ - mustParseTime("2010-01-01T00:00:00Z").UnixNano(), - mustParseTime("2010-01-01T00:27:00Z").UnixNano(), - mustParseTime("2010-01-01T00:57:00Z").UnixNano(), - mustParseTime("2010-01-01T01:27:00Z").UnixNano(), - mustParseTime("2010-01-01T01:57:00Z").UnixNano(), - }, - Values: []int64{0, 27, 57, 87, 117}, - }, - }, - }, - { - name: "window spans input arrays", - every: 40 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return i }, - ), - makeIntegerArray( - 60, - mustParseTime("2010-01-01T01:00:00Z"), time.Minute, - func(i int64) int64 { return 60 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 3, - mustParseTime("2010-01-01T00:00:00Z"), 40*time.Minute, - func(i int64) int64 { return 40 * i }, - ), - }, - }, - { - name: "offset window spans input arrays", - every: 40 * time.Minute, - offset: 10 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return i }, - ), - makeIntegerArray( - 60, - mustParseTime("2010-01-01T01:00:00Z"), time.Minute, - func(i int64) int64 { return 60 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - { - Timestamps: []int64{ - mustParseTime("2010-01-01T00:00:00Z").UnixNano(), - mustParseTime("2010-01-01T00:10:00Z").UnixNano(), - mustParseTime("2010-01-01T00:50:00Z").UnixNano(), - mustParseTime("2010-01-01T01:30:00Z").UnixNano(), - }, - Values: []int64{0, 10, 50, 90}, - }, - }, - }, - { - name: "more windows than MaxPointsPerBlock", - every: 2 * time.Millisecond, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:00Z"), time.Millisecond, - func(i int64) int64 { return i }, - ), - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:01Z"), time.Millisecond, - func(i int64) int64 { return 1000 + i }, - ), - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:02Z"), time.Millisecond, - func(i int64) int64 { return 2000 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 1000, - mustParseTime("2010-01-01T00:00:00.000Z"), 2*time.Millisecond, - func(i int64) int64 { return 2 * i }, - ), - makeIntegerArray( - 500, - mustParseTime("2010-01-01T00:00:02.000Z"), 2*time.Millisecond, - func(i int64) int64 { return 2000 + 2*i }, - ), - }, - }, - { - name: "more offset windows than MaxPointsPerBlock", - every: 2 * time.Millisecond, - offset: 1 * time.Millisecond, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:00Z"), time.Millisecond, - func(i int64) int64 { return i }, - ), - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:01Z"), time.Millisecond, - func(i int64) int64 { return 1000 + i }, - ), - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:02Z"), time.Millisecond, - func(i int64) int64 { return 2000 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - func() *cursors.IntegerArray { - arr := makeIntegerArray( - 999, - mustParseTime("2010-01-01T00:00:00.001Z"), 2*time.Millisecond, - func(i int64) int64 { return 1 + 2*i }, - ) - return &cursors.IntegerArray{ - Timestamps: append([]int64{mustParseTime("2010-01-01T00:00:00.000Z").UnixNano()}, arr.Timestamps...), - Values: append([]int64{0}, arr.Values...), - } - }(), - makeIntegerArray( - 501, - mustParseTime("2010-01-01T00:00:01.999Z"), 2*time.Millisecond, - func(i int64) int64 { return 1999 + 2*i }, - ), - }, - }, - { - name: "whole series", - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 1, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(int64) int64 { return 100 }, - ), - }, - }, - { - name: "whole series no points", - inputArrays: []*cursors.IntegerArray{{}}, - wantIntegers: []*cursors.IntegerArray{}, - }, - { - name: "whole series two arrays", - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 10 + i }, - ), - makeIntegerArray( - 60, - mustParseTime("2010-01-01T01:00:00Z"), time.Minute, - func(i int64) int64 { return 70 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 1, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(int64) int64 { return 10 }, - ), - }, - }, - { - name: "whole series span epoch", - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 120, - mustParseTime("1969-12-31T23:00:00Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 1, - mustParseTime("1969-12-31T23:00:00Z"), time.Minute, - func(int64) int64 { return 100 }, - ), - }, - }, - { - name: "whole series span epoch two arrays", - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("1969-12-31T23:00:00Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - makeIntegerArray( - 60, - mustParseTime("1970-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 160 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 1, - mustParseTime("1969-12-31T23:00:00Z"), time.Minute, - func(int64) int64 { return 100 }, - ), - }, - }, - { - name: "whole series, with max int64 timestamp", - inputArrays: []*cursors.IntegerArray{ - { - Timestamps: []int64{math.MaxInt64}, - Values: []int64{12}, - }, - }, - wantIntegers: []*cursors.IntegerArray{ - { - Timestamps: []int64{math.MaxInt64}, - Values: []int64{12}, - }, - }, - }, - } - for _, tc := range testcases { - tc.createCursorFn = func(cur cursors.IntegerArrayCursor, every, offset int64, window interval.Window) cursors.Cursor { - if every == 0 { - if window.IsZero() { - return newIntegerLimitArrayCursor(cur) - } - } - // if either the every or offset are set, then create a window for nsec values - // every and window.Every should never BOTH be zero here - if every != 0 || offset != 0 { - window, _ = interval.NewWindow( - values.MakeDuration(every, 0, false), - values.MakeDuration(every, 0, false), - values.MakeDuration(offset, 0, false), - ) - } - - // otherwise just use the window that was passed in - return newIntegerWindowFirstArrayCursor(cur, window) - } - tc.run(t) - } -} - -func TestWindowLastArrayCursor(t *testing.T) { - testcases := []aggArrayCursorTest{ - { - name: "window", - every: 15 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 4, - mustParseTime("2010-01-01T00:14:00Z"), 15*time.Minute, - func(i int64) int64 { return 14 + 15*i }, - ), - }, - }, - { - name: "empty windows", - every: time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 4, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { return i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 4, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { return i }, - ), - }, - }, - { - name: "unaligned window", - every: 15 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:30Z"), time.Minute, - func(i int64) int64 { return i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 4, - mustParseTime("2010-01-01T00:14:30Z"), 15*time.Minute, - func(i int64) int64 { return 14 + 15*i }, - ), - }, - }, - { - name: "more unaligned window", - every: 15 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:01:30Z"), time.Minute, - func(i int64) int64 { return i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - { - Timestamps: []int64{ - mustParseTime("2010-01-01T00:14:30Z").UnixNano(), - mustParseTime("2010-01-01T00:29:30Z").UnixNano(), - mustParseTime("2010-01-01T00:44:30Z").UnixNano(), - mustParseTime("2010-01-01T00:59:30Z").UnixNano(), - mustParseTime("2010-01-01T01:00:30Z").UnixNano(), - }, - Values: []int64{13, 28, 43, 58, 59}, - }, - }, - }, - { - name: "window two input arrays", - every: 15 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return i }, - ), - makeIntegerArray( - 60, - mustParseTime("2010-01-01T01:00:00Z"), time.Minute, - func(i int64) int64 { return 60 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 8, - mustParseTime("2010-01-01T00:14:00Z"), 15*time.Minute, - func(i int64) int64 { return 14 + 15*i }, - ), - }, - }, - { - name: "window spans input arrays", - every: 40 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return i }, - ), - makeIntegerArray( - 60, - mustParseTime("2010-01-01T01:00:00Z"), time.Minute, - func(i int64) int64 { return 60 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 3, - mustParseTime("2010-01-01T00:39:00Z"), 40*time.Minute, - func(i int64) int64 { return 39 + 40*i }, - ), - }, - }, - { - name: "more windows than MaxPointsPerBlock", - every: 2 * time.Millisecond, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:00Z"), time.Millisecond, - func(i int64) int64 { return i }, - ), - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:01Z"), time.Millisecond, - func(i int64) int64 { return 1000 + i }, - ), - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:02Z"), time.Millisecond, - func(i int64) int64 { return 2000 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 1000, - mustParseTime("2010-01-01T00:00:00.001Z"), 2*time.Millisecond, - func(i int64) int64 { return 1 + 2*i }, - ), - makeIntegerArray( - 500, - mustParseTime("2010-01-01T00:00:02.001Z"), 2*time.Millisecond, - func(i int64) int64 { return 2001 + 2*i }, - ), - }, - }, - { - name: "MaxPointsPerBlock", - every: time.Millisecond, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:00Z"), time.Millisecond, - func(i int64) int64 { return i }, - ), - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:01Z"), time.Millisecond, - func(i int64) int64 { return 1000 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 1000, - mustParseTime("2010-01-01T00:00:00Z"), time.Millisecond, - func(i int64) int64 { return i }, - ), - makeIntegerArray( - 1000, - mustParseTime("2010-01-01T00:00:01Z"), time.Millisecond, - func(i int64) int64 { return 1000 + i }, - ), - }, - }, - } - for _, tc := range testcases { - tc.createCursorFn = func(cur cursors.IntegerArrayCursor, every, offset int64, window interval.Window) cursors.Cursor { - if every != 0 || offset != 0 { - window, _ = interval.NewWindow( - values.MakeDuration(every, 0, false), - values.MakeDuration(every, 0, false), - values.MakeDuration(offset, 0, false), - ) - } - return newIntegerWindowLastArrayCursor(cur, window) - } - tc.run(t) - } -} - -func TestIntegerCountArrayCursor(t *testing.T) { - maxTimestamp := time.Unix(0, math.MaxInt64) - - testcases := []aggArrayCursorTest{ - { - name: "window", - every: 15 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(4, mustParseTime("2010-01-01T00:15:00Z"), 15*time.Minute, func(int64) int64 { return 15 }), - }, - }, - { - name: "offset window", - every: 15 * time.Minute, - offset: time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(5, mustParseTime("2010-01-01T00:01:00Z"), 15*time.Minute, func(i int64) int64 { - switch i { - case 0: - return 1 - case 4: - return 14 - default: - return 15 - } - }), - }, - }, - { - name: "empty windows", - every: time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 4, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { return 100 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 4, - mustParseTime("2010-01-01T00:01:00Z"), 15*time.Minute, - func(i int64) int64 { return 1 }, - ), - }, - }, - { - name: "empty offset windows", - every: time.Minute, - offset: time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 4, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { return 100 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 4, - mustParseTime("2010-01-01T00:01:00Z"), 15*time.Minute, - func(int64) int64 { return 1 }, - ), - }, - }, - { - name: "unaligned window", - every: 15 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:30Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 4, - mustParseTime("2010-01-01T00:15:00Z"), 15*time.Minute, - func(i int64) int64 { - return 15 - }), - }, - }, - { - name: "unaligned offset window", - every: 15 * time.Minute, - offset: 45 * time.Second, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:30Z"), time.Minute, - func(i int64) int64 { return i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 5, - mustParseTime("2010-01-01T00:00:45Z"), 15*time.Minute, - func(i int64) int64 { - switch i { - case 0: - return 1 - case 4: - return 14 - default: - return 15 - } - }), - }, - }, - { - name: "more unaligned window", - every: 15 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:01:30Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 5, - mustParseTime("2010-01-01T00:15:00Z"), 15*time.Minute, - func(i int64) int64 { - switch i { - case 0: - return 14 - case 4: - return 1 - default: - return 15 - } - }), - }, - }, - { - name: "window two input arrays", - every: 15 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - makeIntegerArray( - 60, - mustParseTime("2010-01-01T01:00:00Z"), time.Minute, - func(i int64) int64 { return 200 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(8, mustParseTime("2010-01-01T00:15:00Z"), 15*time.Minute, func(int64) int64 { return 15 }), - }, - }, - { - name: "offset window two input arrays", - every: 30 * time.Minute, - offset: 27 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return i }, - ), - makeIntegerArray( - 60, - mustParseTime("2010-01-01T01:00:00Z"), time.Minute, - func(i int64) int64 { return 60 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(5, mustParseTime("2010-01-01T00:27:00Z"), 30*time.Minute, func(i int64) int64 { - switch i { - case 0: - return 27 - case 4: - return 3 - default: - return 30 - } - }), - }, - }, - { - name: "window spans input arrays", - every: 40 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - makeIntegerArray( - 60, - mustParseTime("2010-01-01T01:00:00Z"), time.Minute, - func(i int64) int64 { return 200 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(3, mustParseTime("2010-01-01T00:40:00Z"), 40*time.Minute, func(int64) int64 { return 40 }), - }, - }, - { - name: "offset window spans input arrays", - every: 40 * time.Minute, - offset: 10 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return i }, - ), - makeIntegerArray( - 60, - mustParseTime("2010-01-01T01:00:00Z"), time.Minute, - func(i int64) int64 { return 60 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(4, mustParseTime("2010-01-01T00:10:00Z"), 40*time.Minute, func(i int64) int64 { - switch i { - case 0: - return 10 - case 3: - return 30 - default: - return 40 - } - }), - }, - }, - { - name: "more windows than MaxPointsPerBlock", - every: 2 * time.Millisecond, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:00Z"), time.Millisecond, - func(i int64) int64 { return i }, - ), - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:01Z"), time.Millisecond, - func(i int64) int64 { return i }, - ), - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:02Z"), time.Millisecond, - func(i int64) int64 { return i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 1000, - mustParseTime("2010-01-01T00:00:00.002Z"), 2*time.Millisecond, - func(i int64) int64 { return 2 }, - ), - makeIntegerArray( - 500, - mustParseTime("2010-01-01T00:00:02.002Z"), 2*time.Millisecond, - func(i int64) int64 { return 2 }, - ), - }, - }, - { - name: "more offset windows than MaxPointsPerBlock", - every: 2 * time.Millisecond, - offset: 1 * time.Millisecond, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:00Z"), time.Millisecond, - func(i int64) int64 { return i }, - ), - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:01Z"), time.Millisecond, - func(i int64) int64 { return 1000 + i }, - ), - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:02Z"), time.Millisecond, - func(i int64) int64 { return 2000 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 1000, - mustParseTime("2010-01-01T00:00:00.001Z"), 2*time.Millisecond, - func(i int64) int64 { - switch i { - case 0: - return 1 - default: - return 2 - } - }, - ), - makeIntegerArray( - 501, - mustParseTime("2010-01-01T00:00:02.001Z"), 2*time.Millisecond, - func(i int64) int64 { - switch i { - case 500: - return 1 - default: - return 2 - } - }, - ), - }, - }, - { - name: "whole series", - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(1, maxTimestamp, 40*time.Minute, func(i int64) int64 { return 60 }), - }, - }, - { - name: "whole series no points", - inputArrays: []*cursors.IntegerArray{{}}, - wantIntegers: []*cursors.IntegerArray{}, - }, - { - name: "whole series two arrays", - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - makeIntegerArray( - 60, - mustParseTime("2010-01-01T01:00:00Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(1, maxTimestamp, 40*time.Minute, func(int64) int64 { return 120 }), - }, - }, - { - name: "whole series span epoch", - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 120, - mustParseTime("1969-12-31T23:00:00Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(1, maxTimestamp, 40*time.Minute, func(int64) int64 { return 120 }), - }, - }, - { - name: "whole series span epoch two arrays", - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("1969-12-31T23:00:00Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - makeIntegerArray( - 60, - mustParseTime("1970-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(1, maxTimestamp, 40*time.Minute, func(int64) int64 { return 120 }), - }, - }, - { - name: "whole series, with max int64 timestamp", - inputArrays: []*cursors.IntegerArray{ - { - Timestamps: []int64{math.MaxInt64}, - Values: []int64{0}, - }, - }, - wantIntegers: []*cursors.IntegerArray{ - { - Timestamps: []int64{math.MaxInt64}, - Values: []int64{1}, - }, - }, - }, - { - name: "monthly spans multiple periods", - window: func() interval.Window { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - return window - }(), - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - makeIntegerArray( - 60, - mustParseTime("2010-02-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(2, mustParseTime("2010-02-01T00:00:00Z"), 2419200000000000, func(int64) int64 { return 60 }), - }, - }, - { - name: "monthly window w/ offset", - window: func() interval.Window { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(1209600000000000, 0, false), - ) - return window - }(), - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(1, mustParseTime("2010-01-15T00:00:00Z"), 0, func(int64) int64 { return 60 }), - }, - }, - { - name: "monthly windows", - window: func() interval.Window { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - return window - }(), - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(1, mustParseTime("2010-02-01T00:00:00Z"), 0, func(int64) int64 { return 60 }), - }, - }, - } - for _, tc := range testcases { - tc.createCursorFn = func(cur cursors.IntegerArrayCursor, every, offset int64, window interval.Window) cursors.Cursor { - if every != 0 || offset != 0 { - window, _ = interval.NewWindow( - values.MakeDuration(every, 0, false), - values.MakeDuration(every, 0, false), - values.MakeDuration(offset, 0, false), - ) - } - return newIntegerWindowCountArrayCursor(cur, window) - } - tc.run(t) - } -} - -func TestIntegerSumArrayCursor(t *testing.T) { - maxTimestamp := time.Unix(0, math.MaxInt64) - - testcases := []aggArrayCursorTest{ - { - name: "window", - every: 15 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 2 }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(4, mustParseTime("2010-01-01T00:15:00Z"), 15*time.Minute, func(int64) int64 { return 30 }), - }, - }, - { - name: "empty windows", - every: time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 4, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { return 100 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 4, - mustParseTime("2010-01-01T00:01:00Z"), 15*time.Minute, - func(i int64) int64 { return 100 + i }, - ), - }, - }, - { - name: "unaligned window", - every: 15 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:30Z"), time.Minute, - func(i int64) int64 { return 2 }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 4, - mustParseTime("2010-01-01T00:15:00Z"), 15*time.Minute, - func(i int64) int64 { - return 30 - }), - }, - }, - { - name: "more unaligned window", - every: 15 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:01:30Z"), time.Minute, - func(i int64) int64 { return 2 }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 5, - mustParseTime("2010-01-01T00:15:00Z"), 15*time.Minute, - func(i int64) int64 { - switch i { - case 0: - return 28 - case 4: - return 2 - default: - return 30 - } - }), - }, - }, - { - name: "window two input arrays", - every: 15 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 2 }, - ), - makeIntegerArray( - 60, - mustParseTime("2010-01-01T01:00:00Z"), time.Minute, - func(i int64) int64 { return 3 }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(8, mustParseTime("2010-01-01T00:15:00Z"), 15*time.Minute, - func(i int64) int64 { - if i < 4 { - return 30 - } else { - return 45 - } - }), - }, - }, - { - name: "window spans input arrays", - every: 40 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 2 }, - ), - makeIntegerArray( - 60, - mustParseTime("2010-01-01T01:00:00Z"), time.Minute, - func(i int64) int64 { return 3 }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(3, mustParseTime("2010-01-01T00:40:00Z"), 40*time.Minute, - func(i int64) int64 { - switch i { - case 0: - return 80 - case 1: - return 100 - case 2: - return 120 - } - return -1 - }), - }, - }, - { - name: "more windows than MaxPointsPerBlock", - every: 2 * time.Millisecond, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:00Z"), time.Millisecond, - func(i int64) int64 { return 2 }, - ), - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:01Z"), time.Millisecond, - func(i int64) int64 { return 3 }, - ), - makeIntegerArray( // 1 second, one point per ms - 1000, - mustParseTime("2010-01-01T00:00:02Z"), time.Millisecond, - func(i int64) int64 { return 4 }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray( - 1000, - mustParseTime("2010-01-01T00:00:00.002Z"), 2*time.Millisecond, - func(i int64) int64 { - if i < 500 { - return 4 - } else { - return 6 - } - }, - ), - makeIntegerArray( - 500, - mustParseTime("2010-01-01T00:00:02.002Z"), 2*time.Millisecond, - func(i int64) int64 { return 8 }, - ), - }, - }, - { - name: "whole series", - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 2 }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(1, maxTimestamp, 40*time.Minute, func(i int64) int64 { return 120 }), - }, - }, - { - name: "whole series no points", - inputArrays: []*cursors.IntegerArray{{}}, - wantIntegers: []*cursors.IntegerArray{}, - }, - { - name: "whole series two arrays", - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 2 }, - ), - makeIntegerArray( - 60, - mustParseTime("2010-01-01T01:00:00Z"), time.Minute, - func(i int64) int64 { return 3 }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(1, maxTimestamp, 40*time.Minute, - func(int64) int64 { - return 300 - }), - }, - }, - { - name: "whole series span epoch", - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 120, - mustParseTime("1969-12-31T23:00:00Z"), time.Minute, - func(i int64) int64 { return 2 }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(1, maxTimestamp, 40*time.Minute, func(int64) int64 { return 240 }), - }, - }, - { - name: "whole series span epoch two arrays", - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("1969-12-31T23:00:00Z"), time.Minute, - func(i int64) int64 { return 2 }, - ), - makeIntegerArray( - 60, - mustParseTime("1970-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 3 }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(1, maxTimestamp, 40*time.Minute, func(int64) int64 { return 300 }), - }, - }, - { - name: "whole series, with max int64 timestamp", - inputArrays: []*cursors.IntegerArray{ - { - Timestamps: []int64{math.MaxInt64}, - Values: []int64{100}, - }, - }, - wantIntegers: []*cursors.IntegerArray{ - { - Timestamps: []int64{math.MaxInt64}, - Values: []int64{100}, - }, - }, - }, - } - for _, tc := range testcases { - tc.createCursorFn = func(cur cursors.IntegerArrayCursor, every, offset int64, window interval.Window) cursors.Cursor { - if every != 0 || offset != 0 { - window, _ = interval.NewWindow( - values.MakeDuration(every, 0, false), - values.MakeDuration(every, 0, false), - values.MakeDuration(offset, 0, false), - ) - } - return newIntegerWindowSumArrayCursor(cur, window) - } - tc.run(t) - } -} - -func TestWindowMinArrayCursor(t *testing.T) { - testcases := []aggArrayCursorTest{ - { - name: "no window", - every: 0, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(1, mustParseTime("2010-01-01T00:00:00Z"), 0, func(int64) int64 { return 100 }), - }, - }, - { - name: "no window min int", - every: 0, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { - if i%2 == 0 { - return math.MinInt64 - } - return 0 - }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(1, mustParseTime("2010-01-01T00:00:00Z"), 0, func(int64) int64 { return math.MinInt64 }), - }, - }, - { - name: "no window max int", - every: 0, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { - if i%2 == 0 { - return math.MaxInt64 - } - return 0 - }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(1, mustParseTime("2010-01-01T00:01:00Z"), 0, func(int64) int64 { return 0 }), - }, - }, - { - name: "window", - every: time.Hour, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 16, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { - base := (i / 4) * 100 - m := (i % 4) * 15 - return base + m - }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(4, mustParseTime("2010-01-01T00:00:00Z"), time.Hour, - func(i int64) int64 { return i * 100 }), - }, - }, - { - name: "window offset", - every: time.Hour, - offset: 30 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 16, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { - base := (i / 4) * 100 - m := (i % 4) * 15 - return base + m - }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - { - Timestamps: []int64{ - mustParseTime("2010-01-01T00:00:00Z").UnixNano(), - mustParseTime("2010-01-01T00:30:00Z").UnixNano(), - mustParseTime("2010-01-01T01:30:00Z").UnixNano(), - mustParseTime("2010-01-01T02:30:00Z").UnixNano(), - mustParseTime("2010-01-01T03:30:00Z").UnixNano(), - }, - Values: []int64{0, 30, 130, 230, 330}, - }, - }, - }, - { - name: "window desc values", - every: time.Hour, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 16, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { - base := (i / 4) * 100 - m := 60 - (i%4)*15 - return base + m - }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(4, mustParseTime("2010-01-01T00:45:00Z"), time.Hour, - func(i int64) int64 { return i*100 + 15 }), - }, - }, - { - name: "window offset desc values", - every: time.Hour, - offset: 30 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 16, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { - base := (i / 4) * 100 - m := 60 - (i%4)*15 - return base + m - }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - { - Timestamps: []int64{ - mustParseTime("2010-01-01T00:15:00Z").UnixNano(), - mustParseTime("2010-01-01T00:45:00Z").UnixNano(), - mustParseTime("2010-01-01T01:45:00Z").UnixNano(), - mustParseTime("2010-01-01T02:45:00Z").UnixNano(), - mustParseTime("2010-01-01T03:45:00Z").UnixNano(), - }, - Values: []int64{45, 15, 115, 215, 315}, - }, - }, - }, - { - name: "window min int", - every: time.Hour, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 16, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { - return math.MinInt64 - }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(4, mustParseTime("2010-01-01T00:00:00Z"), time.Hour, - func(i int64) int64 { return math.MinInt64 }), - }, - }, - { - name: "window max int", - every: time.Hour, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 16, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { - return math.MaxInt64 - }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(4, mustParseTime("2010-01-01T00:00:00Z"), time.Hour, - func(i int64) int64 { return math.MaxInt64 }), - }, - }, - { - name: "empty window", - every: 15 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 2, - mustParseTime("2010-01-01T00:05:00Z"), 30*time.Minute, - func(i int64) int64 { - return 100 + i - }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(2, mustParseTime("2010-01-01T00:05:00Z"), 30*time.Minute, - func(i int64) int64 { return 100 + i }), - }, - }, - { - name: "monthly windows", - window: func() interval.Window { - window, _ := interval.NewWindow( - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 1, false), - values.MakeDuration(0, 0, false), - ) - return window - }(), - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 1, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(1, mustParseTime("2010-01-01T00:00:00Z"), 0, func(int64) int64 { return 100 }), - }, - }, - } - for _, tc := range testcases { - tc.createCursorFn = func(cur cursors.IntegerArrayCursor, every, offset int64, window interval.Window) cursors.Cursor { - if every != 0 || offset != 0 { - window, _ = interval.NewWindow( - values.MakeDuration(every, 0, false), - values.MakeDuration(every, 0, false), - values.MakeDuration(offset, 0, false), - ) - } - return newIntegerWindowMinArrayCursor(cur, window) - } - tc.run(t) - } -} - -func TestWindowMaxArrayCursor(t *testing.T) { - testcases := []aggArrayCursorTest{ - { - name: "no window", - every: 0, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return 100 + i }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(1, mustParseTime("2010-01-01T00:59:00Z"), 0, func(int64) int64 { return 159 }), - }, - }, - { - name: "no window min int", - every: 0, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { - if i%2 == 0 { - return math.MinInt64 - } - return 0 - }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(1, mustParseTime("2010-01-01T00:01:00Z"), 0, func(int64) int64 { return 0 }), - }, - }, - { - name: "no window max int", - every: 0, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 60, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { - if i%2 == 0 { - return math.MaxInt64 - } - return 0 - }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(1, mustParseTime("2010-01-01T00:00:00Z"), 0, func(int64) int64 { return math.MaxInt64 }), - }, - }, - { - name: "window", - every: time.Hour, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 16, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { - base := (i / 4) * 100 - m := (i % 4) * 15 - return base + m - }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(4, mustParseTime("2010-01-01T00:45:00Z"), time.Hour, - func(i int64) int64 { return i*100 + 45 }), - }, - }, - { - name: "window offset", - every: time.Hour, - offset: 30 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 16, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { - base := (i / 4) * 100 - m := (i % 4) * 15 - return base + m - }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - { - Timestamps: []int64{ - mustParseTime("2010-01-01T00:15:00Z").UnixNano(), - mustParseTime("2010-01-01T01:15:00Z").UnixNano(), - mustParseTime("2010-01-01T02:15:00Z").UnixNano(), - mustParseTime("2010-01-01T03:15:00Z").UnixNano(), - mustParseTime("2010-01-01T03:45:00Z").UnixNano(), - }, - Values: []int64{15, 115, 215, 315, 345}, - }, - }, - }, - { - name: "window desc values", - every: time.Hour, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 16, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { - base := (i / 4) * 100 - m := 60 - (i%4)*15 - return base + m - }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(4, mustParseTime("2010-01-01T00:00:00Z"), time.Hour, - func(i int64) int64 { return i*100 + 60 }), - }, - }, - { - name: "window offset desc values", - every: time.Hour, - offset: 30 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 16, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { - base := (i / 4) * 100 - m := 60 - (i%4)*15 - return base + m - }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - { - Timestamps: []int64{ - mustParseTime("2010-01-01T00:00:00Z").UnixNano(), - mustParseTime("2010-01-01T01:00:00Z").UnixNano(), - mustParseTime("2010-01-01T02:00:00Z").UnixNano(), - mustParseTime("2010-01-01T03:00:00Z").UnixNano(), - mustParseTime("2010-01-01T03:30:00Z").UnixNano(), - }, - Values: []int64{60, 160, 260, 360, 330}, - }, - }, - }, - { - name: "window min int", - every: time.Hour, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 16, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { - return math.MinInt64 - }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(4, mustParseTime("2010-01-01T00:00:00Z"), time.Hour, - func(i int64) int64 { return math.MinInt64 }), - }, - }, - { - name: "window max int", - every: time.Hour, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 16, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { - return math.MaxInt64 - }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(4, mustParseTime("2010-01-01T00:00:00Z"), time.Hour, - func(i int64) int64 { return math.MaxInt64 }), - }, - }, - { - name: "empty window", - every: 15 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 2, - mustParseTime("2010-01-01T00:05:00Z"), 30*time.Minute, - func(i int64) int64 { - return 100 + i - }, - ), - }, - wantIntegers: []*cursors.IntegerArray{ - makeIntegerArray(2, mustParseTime("2010-01-01T00:05:00Z"), 30*time.Minute, - func(i int64) int64 { return 100 + i }), - }, - }, - } - for _, tc := range testcases { - tc.createCursorFn = func(cur cursors.IntegerArrayCursor, every, offset int64, window interval.Window) cursors.Cursor { - if every != 0 || offset != 0 { - window, _ = interval.NewWindow( - values.MakeDuration(every, 0, false), - values.MakeDuration(every, 0, false), - values.MakeDuration(offset, 0, false), - ) - } - return newIntegerWindowMaxArrayCursor(cur, window) - } - tc.run(t) - } -} - -func TestWindowMeanArrayCursor(t *testing.T) { - maxTimestamp := time.Unix(0, math.MaxInt64) - - testcases := []aggArrayCursorTest{ - { - name: "no window", - every: 0, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 5, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return i + 1 }, - ), - }, - wantFloats: []*cursors.FloatArray{ - makeFloatArray(1, maxTimestamp, 0, func(int64) float64 { return 3.0 }), - }, - }, - { - name: "no window fraction result", - every: 0, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 6, - mustParseTime("2010-01-01T00:00:00Z"), time.Minute, - func(i int64) int64 { return i + 1 }, - ), - }, - wantFloats: []*cursors.FloatArray{ - makeFloatArray(1, maxTimestamp, 0, func(int64) float64 { return 3.5 }), - }, - }, - { - name: "no window empty", - every: 0, - inputArrays: []*cursors.IntegerArray{}, - wantFloats: []*cursors.FloatArray{}, - }, - { - name: "window", - every: 30 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 8, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { - return i - }, - ), - }, - wantFloats: []*cursors.FloatArray{ - makeFloatArray(4, mustParseTime("2010-01-01T00:30:00Z"), 30*time.Minute, - func(i int64) float64 { return 0.5 + float64(i)*2 }), - }, - }, - { - name: "window offset", - every: 30 * time.Minute, - offset: 5 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 8, - mustParseTime("2010-01-01T00:00:00Z"), 15*time.Minute, - func(i int64) int64 { - return i - }, - ), - }, - wantFloats: []*cursors.FloatArray{ - makeFloatArray(5, mustParseTime("2010-01-01T00:05:00Z"), 30*time.Minute, - func(i int64) float64 { return []float64{0, 1.5, 3.5, 5.5, 7}[i] }), - }, - }, - { - name: "empty window", - every: 15 * time.Minute, - inputArrays: []*cursors.IntegerArray{ - makeIntegerArray( - 2, - mustParseTime("2010-01-01T00:05:00Z"), 30*time.Minute, - func(i int64) int64 { - return 100 + i - }, - ), - }, - wantFloats: []*cursors.FloatArray{ - makeFloatArray(2, mustParseTime("2010-01-01T00:15:00Z"), 30*time.Minute, - func(i int64) float64 { return 100 + float64(i) }), - }, - }, - } - for _, tc := range testcases { - tc.createCursorFn = func(cur cursors.IntegerArrayCursor, every, offset int64, window interval.Window) cursors.Cursor { - if every != 0 || offset != 0 { - window, _ = interval.NewWindow( - values.MakeDuration(every, 0, false), - values.MakeDuration(every, 0, false), - values.MakeDuration(offset, 0, false), - ) - } - return newIntegerWindowMeanArrayCursor(cur, window) - } - tc.run(t) - } -} - -// This test replicates GitHub issue -// https://github.com/influxdata/influxdb/issues/20035 -func TestMultiShardArrayCursor(t *testing.T) { - t.Run("should drain all CursorIterators", func(t *testing.T) { - ctrl := gomock.NewController(t) - t.Cleanup(ctrl.Finish) - - var ( - emptyArray = cursors.NewIntegerArrayLen(0) - oneElementArray = cursors.NewIntegerArrayLen(1) - iter cursors.CursorIterators - ) - - { - mc := mock.NewMockIntegerArrayCursor(ctrl) - mc.EXPECT(). - Next(). - Return(oneElementArray) - mc.EXPECT(). - Next(). - Return(emptyArray) - mc.EXPECT(). - Close() - - ci := mock.NewMockCursorIterator(ctrl) - ci.EXPECT(). - Next(gomock.Any(), gomock.Any()). - Return(mc, nil) - iter = append(iter, ci) - } - - // return an empty cursor, which should be skipped - { - ci := mock.NewMockCursorIterator(ctrl) - ci.EXPECT(). - Next(gomock.Any(), gomock.Any()). - Return(nil, nil) - iter = append(iter, ci) - } - { - mc := mock.NewMockIntegerArrayCursor(ctrl) - mc.EXPECT(). - Next(). - Return(oneElementArray) - mc.EXPECT(). - Next(). - Return(emptyArray) - - ci := mock.NewMockCursorIterator(ctrl) - ci.EXPECT(). - Next(gomock.Any(), gomock.Any()). - Return(mc, nil) - iter = append(iter, ci) - } - - row := SeriesRow{Query: iter} - ctx := context.Background() - msac := newMultiShardArrayCursors(ctx, models.MinNanoTime, models.MaxNanoTime, true) - cur, ok := msac.createCursor(row).(cursors.IntegerArrayCursor) - require.Truef(t, ok, "Expected IntegerArrayCursor") - - ia := cur.Next() - require.NotNil(t, ia) - require.Equal(t, 1, ia.Len()) - - ia = cur.Next() - require.NotNil(t, ia) - require.Equal(t, 1, ia.Len()) - - ia = cur.Next() - require.NotNil(t, ia) - require.Equal(t, 0, ia.Len()) - }) -} - -type MockExpression struct { - EvalBoolFunc func(v Valuer) bool -} - -func (e *MockExpression) EvalBool(v Valuer) bool { return e.EvalBoolFunc(v) } diff --git a/storage/reads/datatypes/Makefile b/storage/reads/datatypes/Makefile deleted file mode 100644 index 9ec158ee621..00000000000 --- a/storage/reads/datatypes/Makefile +++ /dev/null @@ -1,30 +0,0 @@ -# List any generated files here -TARGETS = predicate.pb.go \ - storage_common.pb.go - -# List any source files used to generate the targets here -SOURCES = gen.go \ - predicate.proto \ - storage_common.proto - -# List any directories that have their own Makefile here -SUBDIRS = - -# Default target -all: $(SUBDIRS) $(TARGETS) - -# Recurse into subdirs for same make goal -$(SUBDIRS): - $(MAKE) -C $@ $(MAKECMDGOALS) - -# Clean all targets recursively -clean: $(SUBDIRS) - rm -f $(TARGETS) - -# Define go generate if not already defined -GO_GENERATE := go generate - -$(TARGETS): $(SOURCES) - $(GO_GENERATE) -x - -.PHONY: all clean $(SUBDIRS) diff --git a/storage/reads/datatypes/gen.go b/storage/reads/datatypes/gen.go deleted file mode 100644 index d58781c23ae..00000000000 --- a/storage/reads/datatypes/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -package datatypes - -//go:generate protoc --go_out=. predicate.proto storage_common.proto diff --git a/storage/reads/datatypes/hintflags.go b/storage/reads/datatypes/hintflags.go deleted file mode 100644 index 303823ed8d4..00000000000 --- a/storage/reads/datatypes/hintflags.go +++ /dev/null @@ -1,50 +0,0 @@ -package datatypes - -import "strings" - -type HintFlags uint32 - -func (h HintFlags) NoPoints() bool { - return uint32(h)&uint32(ReadGroupRequest_HintNoPoints) != 0 -} - -func (h *HintFlags) SetNoPoints() { - *h |= HintFlags(ReadGroupRequest_HintNoPoints) -} - -func (h HintFlags) NoSeries() bool { - return uint32(h)&uint32(ReadGroupRequest_HintNoSeries) != 0 -} - -func (h *HintFlags) SetNoSeries() { - *h |= HintFlags(ReadGroupRequest_HintNoSeries) -} - -func (h HintFlags) HintSchemaAllTime() bool { - return uint32(h)&uint32(ReadGroupRequest_HintSchemaAllTime) != 0 -} - -func (h *HintFlags) SetHintSchemaAllTime() { - *h |= HintFlags(ReadGroupRequest_HintSchemaAllTime) -} - -func (h HintFlags) String() string { - f := uint32(h) - - var s []string - if h == 0 { - return "HINT_NONE" - } - - for k, v := range ReadGroupRequest_HintFlags_value { - if v == 0 { - continue - } - v := uint32(v) - if f&v == v { - s = append(s, k) - } - } - - return strings.Join(s, ",") -} diff --git a/storage/reads/datatypes/key_types.go b/storage/reads/datatypes/key_types.go deleted file mode 100644 index 9e4584cdb22..00000000000 --- a/storage/reads/datatypes/key_types.go +++ /dev/null @@ -1,7 +0,0 @@ -package datatypes - -const ( - FieldKey = "_field" - MeasurementKey = "_measurement" - ValueKey = "_value" -) diff --git a/storage/reads/datatypes/predicate.pb.go b/storage/reads/datatypes/predicate.pb.go deleted file mode 100644 index 1206e1f398d..00000000000 --- a/storage/reads/datatypes/predicate.pb.go +++ /dev/null @@ -1,624 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.27.1 -// protoc v3.17.3 -// source: predicate.proto - -package datatypes - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Node_Type int32 - -const ( - Node_TypeLogicalExpression Node_Type = 0 - Node_TypeComparisonExpression Node_Type = 1 - Node_TypeParenExpression Node_Type = 2 - Node_TypeTagRef Node_Type = 3 - Node_TypeLiteral Node_Type = 4 - Node_TypeFieldRef Node_Type = 5 -) - -// Enum value maps for Node_Type. -var ( - Node_Type_name = map[int32]string{ - 0: "TypeLogicalExpression", - 1: "TypeComparisonExpression", - 2: "TypeParenExpression", - 3: "TypeTagRef", - 4: "TypeLiteral", - 5: "TypeFieldRef", - } - Node_Type_value = map[string]int32{ - "TypeLogicalExpression": 0, - "TypeComparisonExpression": 1, - "TypeParenExpression": 2, - "TypeTagRef": 3, - "TypeLiteral": 4, - "TypeFieldRef": 5, - } -) - -func (x Node_Type) Enum() *Node_Type { - p := new(Node_Type) - *p = x - return p -} - -func (x Node_Type) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Node_Type) Descriptor() protoreflect.EnumDescriptor { - return file_predicate_proto_enumTypes[0].Descriptor() -} - -func (Node_Type) Type() protoreflect.EnumType { - return &file_predicate_proto_enumTypes[0] -} - -func (x Node_Type) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Node_Type.Descriptor instead. -func (Node_Type) EnumDescriptor() ([]byte, []int) { - return file_predicate_proto_rawDescGZIP(), []int{0, 0} -} - -type Node_Comparison int32 - -const ( - Node_ComparisonEqual Node_Comparison = 0 - Node_ComparisonNotEqual Node_Comparison = 1 - Node_ComparisonStartsWith Node_Comparison = 2 - Node_ComparisonRegex Node_Comparison = 3 - Node_ComparisonNotRegex Node_Comparison = 4 - Node_ComparisonLess Node_Comparison = 5 - Node_ComparisonLessEqual Node_Comparison = 6 - Node_ComparisonGreater Node_Comparison = 7 - Node_ComparisonGreaterEqual Node_Comparison = 8 -) - -// Enum value maps for Node_Comparison. -var ( - Node_Comparison_name = map[int32]string{ - 0: "ComparisonEqual", - 1: "ComparisonNotEqual", - 2: "ComparisonStartsWith", - 3: "ComparisonRegex", - 4: "ComparisonNotRegex", - 5: "ComparisonLess", - 6: "ComparisonLessEqual", - 7: "ComparisonGreater", - 8: "ComparisonGreaterEqual", - } - Node_Comparison_value = map[string]int32{ - "ComparisonEqual": 0, - "ComparisonNotEqual": 1, - "ComparisonStartsWith": 2, - "ComparisonRegex": 3, - "ComparisonNotRegex": 4, - "ComparisonLess": 5, - "ComparisonLessEqual": 6, - "ComparisonGreater": 7, - "ComparisonGreaterEqual": 8, - } -) - -func (x Node_Comparison) Enum() *Node_Comparison { - p := new(Node_Comparison) - *p = x - return p -} - -func (x Node_Comparison) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Node_Comparison) Descriptor() protoreflect.EnumDescriptor { - return file_predicate_proto_enumTypes[1].Descriptor() -} - -func (Node_Comparison) Type() protoreflect.EnumType { - return &file_predicate_proto_enumTypes[1] -} - -func (x Node_Comparison) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Node_Comparison.Descriptor instead. -func (Node_Comparison) EnumDescriptor() ([]byte, []int) { - return file_predicate_proto_rawDescGZIP(), []int{0, 1} -} - -// Logical operators apply to boolean values and combine to produce a single boolean result. -type Node_Logical int32 - -const ( - Node_LogicalAnd Node_Logical = 0 - Node_LogicalOr Node_Logical = 1 -) - -// Enum value maps for Node_Logical. -var ( - Node_Logical_name = map[int32]string{ - 0: "LogicalAnd", - 1: "LogicalOr", - } - Node_Logical_value = map[string]int32{ - "LogicalAnd": 0, - "LogicalOr": 1, - } -) - -func (x Node_Logical) Enum() *Node_Logical { - p := new(Node_Logical) - *p = x - return p -} - -func (x Node_Logical) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Node_Logical) Descriptor() protoreflect.EnumDescriptor { - return file_predicate_proto_enumTypes[2].Descriptor() -} - -func (Node_Logical) Type() protoreflect.EnumType { - return &file_predicate_proto_enumTypes[2] -} - -func (x Node_Logical) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Node_Logical.Descriptor instead. -func (Node_Logical) EnumDescriptor() ([]byte, []int) { - return file_predicate_proto_rawDescGZIP(), []int{0, 2} -} - -type Node struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NodeType Node_Type `protobuf:"varint,1,opt,name=node_type,json=nodeType,proto3,enum=influxdata.platform.storage.Node_Type" json:"node_type,omitempty"` // [(gogoproto.customname) = "NodeType", (gogoproto.jsontag) = "nodeType"]; - Children []*Node `protobuf:"bytes,2,rep,name=children,proto3" json:"children,omitempty"` - // Types that are assignable to Value: - // - // *Node_StringValue - // *Node_BooleanValue - // *Node_IntegerValue - // *Node_UnsignedValue - // *Node_FloatValue - // *Node_RegexValue - // *Node_TagRefValue - // *Node_FieldRefValue - // *Node_Logical_ - // *Node_Comparison_ - Value isNode_Value `protobuf_oneof:"value"` -} - -func (x *Node) Reset() { - *x = Node{} - if protoimpl.UnsafeEnabled { - mi := &file_predicate_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Node) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Node) ProtoMessage() {} - -func (x *Node) ProtoReflect() protoreflect.Message { - mi := &file_predicate_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Node.ProtoReflect.Descriptor instead. -func (*Node) Descriptor() ([]byte, []int) { - return file_predicate_proto_rawDescGZIP(), []int{0} -} - -func (x *Node) GetNodeType() Node_Type { - if x != nil { - return x.NodeType - } - return Node_TypeLogicalExpression -} - -func (x *Node) GetChildren() []*Node { - if x != nil { - return x.Children - } - return nil -} - -func (m *Node) GetValue() isNode_Value { - if m != nil { - return m.Value - } - return nil -} - -func (x *Node) GetStringValue() string { - if x, ok := x.GetValue().(*Node_StringValue); ok { - return x.StringValue - } - return "" -} - -func (x *Node) GetBooleanValue() bool { - if x, ok := x.GetValue().(*Node_BooleanValue); ok { - return x.BooleanValue - } - return false -} - -func (x *Node) GetIntegerValue() int64 { - if x, ok := x.GetValue().(*Node_IntegerValue); ok { - return x.IntegerValue - } - return 0 -} - -func (x *Node) GetUnsignedValue() uint64 { - if x, ok := x.GetValue().(*Node_UnsignedValue); ok { - return x.UnsignedValue - } - return 0 -} - -func (x *Node) GetFloatValue() float64 { - if x, ok := x.GetValue().(*Node_FloatValue); ok { - return x.FloatValue - } - return 0 -} - -func (x *Node) GetRegexValue() string { - if x, ok := x.GetValue().(*Node_RegexValue); ok { - return x.RegexValue - } - return "" -} - -func (x *Node) GetTagRefValue() string { - if x, ok := x.GetValue().(*Node_TagRefValue); ok { - return x.TagRefValue - } - return "" -} - -func (x *Node) GetFieldRefValue() string { - if x, ok := x.GetValue().(*Node_FieldRefValue); ok { - return x.FieldRefValue - } - return "" -} - -func (x *Node) GetLogical() Node_Logical { - if x, ok := x.GetValue().(*Node_Logical_); ok { - return x.Logical - } - return Node_LogicalAnd -} - -func (x *Node) GetComparison() Node_Comparison { - if x, ok := x.GetValue().(*Node_Comparison_); ok { - return x.Comparison - } - return Node_ComparisonEqual -} - -type isNode_Value interface { - isNode_Value() -} - -type Node_StringValue struct { - StringValue string `protobuf:"bytes,3,opt,name=StringValue,proto3,oneof"` -} - -type Node_BooleanValue struct { - BooleanValue bool `protobuf:"varint,4,opt,name=BooleanValue,proto3,oneof"` -} - -type Node_IntegerValue struct { - IntegerValue int64 `protobuf:"varint,5,opt,name=IntegerValue,proto3,oneof"` -} - -type Node_UnsignedValue struct { - UnsignedValue uint64 `protobuf:"varint,6,opt,name=UnsignedValue,proto3,oneof"` -} - -type Node_FloatValue struct { - FloatValue float64 `protobuf:"fixed64,7,opt,name=FloatValue,proto3,oneof"` -} - -type Node_RegexValue struct { - RegexValue string `protobuf:"bytes,8,opt,name=RegexValue,proto3,oneof"` -} - -type Node_TagRefValue struct { - TagRefValue string `protobuf:"bytes,9,opt,name=TagRefValue,proto3,oneof"` -} - -type Node_FieldRefValue struct { - FieldRefValue string `protobuf:"bytes,10,opt,name=FieldRefValue,proto3,oneof"` -} - -type Node_Logical_ struct { - Logical Node_Logical `protobuf:"varint,11,opt,name=logical,proto3,enum=influxdata.platform.storage.Node_Logical,oneof"` -} - -type Node_Comparison_ struct { - Comparison Node_Comparison `protobuf:"varint,12,opt,name=comparison,proto3,enum=influxdata.platform.storage.Node_Comparison,oneof"` -} - -func (*Node_StringValue) isNode_Value() {} - -func (*Node_BooleanValue) isNode_Value() {} - -func (*Node_IntegerValue) isNode_Value() {} - -func (*Node_UnsignedValue) isNode_Value() {} - -func (*Node_FloatValue) isNode_Value() {} - -func (*Node_RegexValue) isNode_Value() {} - -func (*Node_TagRefValue) isNode_Value() {} - -func (*Node_FieldRefValue) isNode_Value() {} - -func (*Node_Logical_) isNode_Value() {} - -func (*Node_Comparison_) isNode_Value() {} - -type Predicate struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Root *Node `protobuf:"bytes,1,opt,name=root,proto3" json:"root,omitempty"` -} - -func (x *Predicate) Reset() { - *x = Predicate{} - if protoimpl.UnsafeEnabled { - mi := &file_predicate_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Predicate) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Predicate) ProtoMessage() {} - -func (x *Predicate) ProtoReflect() protoreflect.Message { - mi := &file_predicate_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Predicate.ProtoReflect.Descriptor instead. -func (*Predicate) Descriptor() ([]byte, []int) { - return file_predicate_proto_rawDescGZIP(), []int{1} -} - -func (x *Predicate) GetRoot() *Node { - if x != nil { - return x.Root - } - return nil -} - -var File_predicate_proto protoreflect.FileDescriptor - -var file_predicate_proto_rawDesc = []byte{ - 0x0a, 0x0f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x1b, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, - 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0xed, - 0x07, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x69, 0x6e, 0x66, - 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3d, 0x0a, 0x08, - 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, - 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, - 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x4e, 0x6f, 0x64, - 0x65, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x12, 0x22, 0x0a, 0x0b, 0x53, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x24, 0x0a, 0x0c, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0c, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x24, 0x0a, 0x0c, 0x49, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0c, 0x49, - 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x26, 0x0a, 0x0d, 0x55, - 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x04, 0x48, 0x00, 0x52, 0x0d, 0x55, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x20, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x20, 0x0a, 0x0a, 0x52, 0x65, 0x67, 0x65, 0x78, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0a, 0x52, 0x65, 0x67, - 0x65, 0x78, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x22, 0x0a, 0x0b, 0x54, 0x61, 0x67, 0x52, 0x65, - 0x66, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, - 0x54, 0x61, 0x67, 0x52, 0x65, 0x66, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x26, 0x0a, 0x0d, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x52, 0x65, 0x66, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x09, 0x48, 0x00, 0x52, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x65, 0x66, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x45, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x48, - 0x00, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x12, 0x4e, 0x0a, 0x0a, 0x63, 0x6f, - 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, - 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, - 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x4e, 0x6f, 0x64, - 0x65, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0a, - 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x22, 0x8b, 0x01, 0x0a, 0x04, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x15, 0x54, 0x79, 0x70, 0x65, 0x4c, 0x6f, 0x67, 0x69, 0x63, - 0x61, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x10, 0x00, 0x12, 0x1c, - 0x0a, 0x18, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, - 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, - 0x54, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x79, 0x70, 0x65, 0x54, 0x61, 0x67, - 0x52, 0x65, 0x66, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x79, 0x70, 0x65, 0x4c, 0x69, 0x74, - 0x65, 0x72, 0x61, 0x6c, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x52, 0x65, 0x66, 0x10, 0x05, 0x22, 0xe0, 0x01, 0x0a, 0x0a, 0x43, 0x6f, 0x6d, - 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x6f, 0x6d, 0x70, 0x61, - 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, - 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x45, 0x71, 0x75, - 0x61, 0x6c, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x73, 0x57, 0x69, 0x74, 0x68, 0x10, 0x02, 0x12, 0x13, - 0x0a, 0x0f, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x52, 0x65, 0x67, 0x65, - 0x78, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, - 0x6e, 0x4e, 0x6f, 0x74, 0x52, 0x65, 0x67, 0x65, 0x78, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, 0x43, - 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x4c, 0x65, 0x73, 0x73, 0x10, 0x05, 0x12, - 0x17, 0x0a, 0x13, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x4c, 0x65, 0x73, - 0x73, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x70, - 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x47, 0x72, 0x65, 0x61, 0x74, 0x65, 0x72, 0x10, 0x07, 0x12, - 0x1a, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x47, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x72, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x10, 0x08, 0x22, 0x28, 0x0a, 0x07, 0x4c, - 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x12, 0x0e, 0x0a, 0x0a, 0x4c, 0x6f, 0x67, 0x69, 0x63, 0x61, - 0x6c, 0x41, 0x6e, 0x64, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x69, 0x63, 0x61, - 0x6c, 0x4f, 0x72, 0x10, 0x01, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x42, - 0x0a, 0x09, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x72, - 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x69, 0x6e, 0x66, 0x6c, - 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x72, 0x6f, - 0x6f, 0x74, 0x42, 0x0d, 0x5a, 0x0b, 0x2e, 0x3b, 0x64, 0x61, 0x74, 0x61, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_predicate_proto_rawDescOnce sync.Once - file_predicate_proto_rawDescData = file_predicate_proto_rawDesc -) - -func file_predicate_proto_rawDescGZIP() []byte { - file_predicate_proto_rawDescOnce.Do(func() { - file_predicate_proto_rawDescData = protoimpl.X.CompressGZIP(file_predicate_proto_rawDescData) - }) - return file_predicate_proto_rawDescData -} - -var file_predicate_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_predicate_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_predicate_proto_goTypes = []interface{}{ - (Node_Type)(0), // 0: influxdata.platform.storage.Node.Type - (Node_Comparison)(0), // 1: influxdata.platform.storage.Node.Comparison - (Node_Logical)(0), // 2: influxdata.platform.storage.Node.Logical - (*Node)(nil), // 3: influxdata.platform.storage.Node - (*Predicate)(nil), // 4: influxdata.platform.storage.Predicate -} -var file_predicate_proto_depIdxs = []int32{ - 0, // 0: influxdata.platform.storage.Node.node_type:type_name -> influxdata.platform.storage.Node.Type - 3, // 1: influxdata.platform.storage.Node.children:type_name -> influxdata.platform.storage.Node - 2, // 2: influxdata.platform.storage.Node.logical:type_name -> influxdata.platform.storage.Node.Logical - 1, // 3: influxdata.platform.storage.Node.comparison:type_name -> influxdata.platform.storage.Node.Comparison - 3, // 4: influxdata.platform.storage.Predicate.root:type_name -> influxdata.platform.storage.Node - 5, // [5:5] is the sub-list for method output_type - 5, // [5:5] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name -} - -func init() { file_predicate_proto_init() } -func file_predicate_proto_init() { - if File_predicate_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_predicate_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Node); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_predicate_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Predicate); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_predicate_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*Node_StringValue)(nil), - (*Node_BooleanValue)(nil), - (*Node_IntegerValue)(nil), - (*Node_UnsignedValue)(nil), - (*Node_FloatValue)(nil), - (*Node_RegexValue)(nil), - (*Node_TagRefValue)(nil), - (*Node_FieldRefValue)(nil), - (*Node_Logical_)(nil), - (*Node_Comparison_)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_predicate_proto_rawDesc, - NumEnums: 3, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_predicate_proto_goTypes, - DependencyIndexes: file_predicate_proto_depIdxs, - EnumInfos: file_predicate_proto_enumTypes, - MessageInfos: file_predicate_proto_msgTypes, - }.Build() - File_predicate_proto = out.File - file_predicate_proto_rawDesc = nil - file_predicate_proto_goTypes = nil - file_predicate_proto_depIdxs = nil -} diff --git a/storage/reads/datatypes/predicate.proto b/storage/reads/datatypes/predicate.proto deleted file mode 100644 index 7bcae7e9d78..00000000000 --- a/storage/reads/datatypes/predicate.proto +++ /dev/null @@ -1,53 +0,0 @@ -syntax = "proto3"; -package influxdata.platform.storage; -option go_package = ".;datatypes"; - -message Node { - enum Type { - TypeLogicalExpression = 0; - TypeComparisonExpression = 1; - TypeParenExpression = 2; - TypeTagRef = 3; - TypeLiteral = 4; - TypeFieldRef = 5; - } - - enum Comparison { - ComparisonEqual = 0; - ComparisonNotEqual = 1; - ComparisonStartsWith = 2; - ComparisonRegex = 3; - ComparisonNotRegex = 4; - ComparisonLess = 5; - ComparisonLessEqual = 6; - ComparisonGreater = 7; - ComparisonGreaterEqual = 8; - } - - // Logical operators apply to boolean values and combine to produce a single boolean result. - enum Logical { - LogicalAnd = 0; - LogicalOr = 1; - } - - - Type node_type = 1; // [(gogoproto.customname) = "NodeType", (gogoproto.jsontag) = "nodeType"]; - repeated Node children = 2; - - oneof value { - string StringValue = 3; - bool BooleanValue = 4; - int64 IntegerValue = 5; - uint64 UnsignedValue = 6; - double FloatValue = 7; - string RegexValue = 8; - string TagRefValue = 9; - string FieldRefValue = 10; - Logical logical = 11; - Comparison comparison = 12; - } -} - -message Predicate { - Node root = 1; -} diff --git a/storage/reads/datatypes/storage_common.go b/storage/reads/datatypes/storage_common.go deleted file mode 100644 index e9eaff83f35..00000000000 --- a/storage/reads/datatypes/storage_common.go +++ /dev/null @@ -1,13 +0,0 @@ -package datatypes - -import "strings" - -// AggregateNameMap is a map of uppercase aggregate names. -var AggregateNameMap = make(map[string]int32) - -func init() { - for k, v := range Aggregate_AggregateType_value { - name := strings.ToUpper(strings.TrimPrefix(k, "AggregateType")) - AggregateNameMap[name] = v - } -} diff --git a/storage/reads/datatypes/storage_common.pb.go b/storage/reads/datatypes/storage_common.pb.go deleted file mode 100644 index 72b45e82385..00000000000 --- a/storage/reads/datatypes/storage_common.pb.go +++ /dev/null @@ -1,3084 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.27.1 -// protoc v3.17.3 -// source: storage_common.proto - -package datatypes - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type ReadGroupRequest_Group int32 - -const ( - // GroupNone returns all series as a single group. - // The single GroupFrame.TagKeys will be the union of all tag keys. - ReadGroupRequest_GroupNone ReadGroupRequest_Group = 0 - // GroupBy returns a group for each unique value of the specified GroupKeys. - ReadGroupRequest_GroupBy ReadGroupRequest_Group = 2 -) - -// Enum value maps for ReadGroupRequest_Group. -var ( - ReadGroupRequest_Group_name = map[int32]string{ - 0: "GroupNone", - 2: "GroupBy", - } - ReadGroupRequest_Group_value = map[string]int32{ - "GroupNone": 0, - "GroupBy": 2, - } -) - -func (x ReadGroupRequest_Group) Enum() *ReadGroupRequest_Group { - p := new(ReadGroupRequest_Group) - *p = x - return p -} - -func (x ReadGroupRequest_Group) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ReadGroupRequest_Group) Descriptor() protoreflect.EnumDescriptor { - return file_storage_common_proto_enumTypes[0].Descriptor() -} - -func (ReadGroupRequest_Group) Type() protoreflect.EnumType { - return &file_storage_common_proto_enumTypes[0] -} - -func (x ReadGroupRequest_Group) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ReadGroupRequest_Group.Descriptor instead. -func (ReadGroupRequest_Group) EnumDescriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{1, 0} -} - -// TODO(jlapacik): This field is only used in unit tests. -// Specifically the two tests in group_resultset_test.go. -// This field should be removed and the tests that depend -// on it refactored. -type ReadGroupRequest_HintFlags int32 - -const ( - ReadGroupRequest_HintNone ReadGroupRequest_HintFlags = 0 - ReadGroupRequest_HintNoPoints ReadGroupRequest_HintFlags = 1 - ReadGroupRequest_HintNoSeries ReadGroupRequest_HintFlags = 2 - // HintSchemaAllTime performs schema queries without using time ranges - ReadGroupRequest_HintSchemaAllTime ReadGroupRequest_HintFlags = 4 -) - -// Enum value maps for ReadGroupRequest_HintFlags. -var ( - ReadGroupRequest_HintFlags_name = map[int32]string{ - 0: "HintNone", - 1: "HintNoPoints", - 2: "HintNoSeries", - 4: "HintSchemaAllTime", - } - ReadGroupRequest_HintFlags_value = map[string]int32{ - "HintNone": 0, - "HintNoPoints": 1, - "HintNoSeries": 2, - "HintSchemaAllTime": 4, - } -) - -func (x ReadGroupRequest_HintFlags) Enum() *ReadGroupRequest_HintFlags { - p := new(ReadGroupRequest_HintFlags) - *p = x - return p -} - -func (x ReadGroupRequest_HintFlags) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ReadGroupRequest_HintFlags) Descriptor() protoreflect.EnumDescriptor { - return file_storage_common_proto_enumTypes[1].Descriptor() -} - -func (ReadGroupRequest_HintFlags) Type() protoreflect.EnumType { - return &file_storage_common_proto_enumTypes[1] -} - -func (x ReadGroupRequest_HintFlags) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ReadGroupRequest_HintFlags.Descriptor instead. -func (ReadGroupRequest_HintFlags) EnumDescriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{1, 1} -} - -type Aggregate_AggregateType int32 - -const ( - Aggregate_AggregateTypeNone Aggregate_AggregateType = 0 - Aggregate_AggregateTypeSum Aggregate_AggregateType = 1 - Aggregate_AggregateTypeCount Aggregate_AggregateType = 2 - Aggregate_AggregateTypeMin Aggregate_AggregateType = 3 - Aggregate_AggregateTypeMax Aggregate_AggregateType = 4 - Aggregate_AggregateTypeFirst Aggregate_AggregateType = 5 - Aggregate_AggregateTypeLast Aggregate_AggregateType = 6 - Aggregate_AggregateTypeMean Aggregate_AggregateType = 7 -) - -// Enum value maps for Aggregate_AggregateType. -var ( - Aggregate_AggregateType_name = map[int32]string{ - 0: "AggregateTypeNone", - 1: "AggregateTypeSum", - 2: "AggregateTypeCount", - 3: "AggregateTypeMin", - 4: "AggregateTypeMax", - 5: "AggregateTypeFirst", - 6: "AggregateTypeLast", - 7: "AggregateTypeMean", - } - Aggregate_AggregateType_value = map[string]int32{ - "AggregateTypeNone": 0, - "AggregateTypeSum": 1, - "AggregateTypeCount": 2, - "AggregateTypeMin": 3, - "AggregateTypeMax": 4, - "AggregateTypeFirst": 5, - "AggregateTypeLast": 6, - "AggregateTypeMean": 7, - } -) - -func (x Aggregate_AggregateType) Enum() *Aggregate_AggregateType { - p := new(Aggregate_AggregateType) - *p = x - return p -} - -func (x Aggregate_AggregateType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Aggregate_AggregateType) Descriptor() protoreflect.EnumDescriptor { - return file_storage_common_proto_enumTypes[2].Descriptor() -} - -func (Aggregate_AggregateType) Type() protoreflect.EnumType { - return &file_storage_common_proto_enumTypes[2] -} - -func (x Aggregate_AggregateType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Aggregate_AggregateType.Descriptor instead. -func (Aggregate_AggregateType) EnumDescriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{2, 0} -} - -type ReadResponse_FrameType int32 - -const ( - ReadResponse_FrameTypeSeries ReadResponse_FrameType = 0 - ReadResponse_FrameTypePoints ReadResponse_FrameType = 1 -) - -// Enum value maps for ReadResponse_FrameType. -var ( - ReadResponse_FrameType_name = map[int32]string{ - 0: "FrameTypeSeries", - 1: "FrameTypePoints", - } - ReadResponse_FrameType_value = map[string]int32{ - "FrameTypeSeries": 0, - "FrameTypePoints": 1, - } -) - -func (x ReadResponse_FrameType) Enum() *ReadResponse_FrameType { - p := new(ReadResponse_FrameType) - *p = x - return p -} - -func (x ReadResponse_FrameType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ReadResponse_FrameType) Descriptor() protoreflect.EnumDescriptor { - return file_storage_common_proto_enumTypes[3].Descriptor() -} - -func (ReadResponse_FrameType) Type() protoreflect.EnumType { - return &file_storage_common_proto_enumTypes[3] -} - -func (x ReadResponse_FrameType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ReadResponse_FrameType.Descriptor instead. -func (ReadResponse_FrameType) EnumDescriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{4, 0} -} - -type ReadResponse_DataType int32 - -const ( - ReadResponse_DataTypeFloat ReadResponse_DataType = 0 - ReadResponse_DataTypeInteger ReadResponse_DataType = 1 - ReadResponse_DataTypeUnsigned ReadResponse_DataType = 2 - ReadResponse_DataTypeBoolean ReadResponse_DataType = 3 - ReadResponse_DataTypeString ReadResponse_DataType = 4 -) - -// Enum value maps for ReadResponse_DataType. -var ( - ReadResponse_DataType_name = map[int32]string{ - 0: "DataTypeFloat", - 1: "DataTypeInteger", - 2: "DataTypeUnsigned", - 3: "DataTypeBoolean", - 4: "DataTypeString", - } - ReadResponse_DataType_value = map[string]int32{ - "DataTypeFloat": 0, - "DataTypeInteger": 1, - "DataTypeUnsigned": 2, - "DataTypeBoolean": 3, - "DataTypeString": 4, - } -) - -func (x ReadResponse_DataType) Enum() *ReadResponse_DataType { - p := new(ReadResponse_DataType) - *p = x - return p -} - -func (x ReadResponse_DataType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ReadResponse_DataType) Descriptor() protoreflect.EnumDescriptor { - return file_storage_common_proto_enumTypes[4].Descriptor() -} - -func (ReadResponse_DataType) Type() protoreflect.EnumType { - return &file_storage_common_proto_enumTypes[4] -} - -func (x ReadResponse_DataType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ReadResponse_DataType.Descriptor instead. -func (ReadResponse_DataType) EnumDescriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{4, 1} -} - -type MeasurementFieldsResponse_FieldType int32 - -const ( - MeasurementFieldsResponse_FieldTypeFloat MeasurementFieldsResponse_FieldType = 0 - MeasurementFieldsResponse_FieldTypeInteger MeasurementFieldsResponse_FieldType = 1 - MeasurementFieldsResponse_FieldTypeUnsigned MeasurementFieldsResponse_FieldType = 2 - MeasurementFieldsResponse_FieldTypeString MeasurementFieldsResponse_FieldType = 3 - MeasurementFieldsResponse_FieldTypeBoolean MeasurementFieldsResponse_FieldType = 4 - MeasurementFieldsResponse_FieldTypeUndefined MeasurementFieldsResponse_FieldType = 5 -) - -// Enum value maps for MeasurementFieldsResponse_FieldType. -var ( - MeasurementFieldsResponse_FieldType_name = map[int32]string{ - 0: "FieldTypeFloat", - 1: "FieldTypeInteger", - 2: "FieldTypeUnsigned", - 3: "FieldTypeString", - 4: "FieldTypeBoolean", - 5: "FieldTypeUndefined", - } - MeasurementFieldsResponse_FieldType_value = map[string]int32{ - "FieldTypeFloat": 0, - "FieldTypeInteger": 1, - "FieldTypeUnsigned": 2, - "FieldTypeString": 3, - "FieldTypeBoolean": 4, - "FieldTypeUndefined": 5, - } -) - -func (x MeasurementFieldsResponse_FieldType) Enum() *MeasurementFieldsResponse_FieldType { - p := new(MeasurementFieldsResponse_FieldType) - *p = x - return p -} - -func (x MeasurementFieldsResponse_FieldType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (MeasurementFieldsResponse_FieldType) Descriptor() protoreflect.EnumDescriptor { - return file_storage_common_proto_enumTypes[5].Descriptor() -} - -func (MeasurementFieldsResponse_FieldType) Type() protoreflect.EnumType { - return &file_storage_common_proto_enumTypes[5] -} - -func (x MeasurementFieldsResponse_FieldType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use MeasurementFieldsResponse_FieldType.Descriptor instead. -func (MeasurementFieldsResponse_FieldType) EnumDescriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{16, 0} -} - -type ReadFilterRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ReadSource *anypb.Any `protobuf:"bytes,1,opt,name=ReadSource,proto3" json:"ReadSource,omitempty"` - Range *TimestampRange `protobuf:"bytes,2,opt,name=range,proto3" json:"range,omitempty"` - Predicate *Predicate `protobuf:"bytes,3,opt,name=predicate,proto3" json:"predicate,omitempty"` -} - -func (x *ReadFilterRequest) Reset() { - *x = ReadFilterRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReadFilterRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadFilterRequest) ProtoMessage() {} - -func (x *ReadFilterRequest) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadFilterRequest.ProtoReflect.Descriptor instead. -func (*ReadFilterRequest) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{0} -} - -func (x *ReadFilterRequest) GetReadSource() *anypb.Any { - if x != nil { - return x.ReadSource - } - return nil -} - -func (x *ReadFilterRequest) GetRange() *TimestampRange { - if x != nil { - return x.Range - } - return nil -} - -func (x *ReadFilterRequest) GetPredicate() *Predicate { - if x != nil { - return x.Predicate - } - return nil -} - -type ReadGroupRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ReadSource *anypb.Any `protobuf:"bytes,1,opt,name=ReadSource,proto3" json:"ReadSource,omitempty"` - Range *TimestampRange `protobuf:"bytes,2,opt,name=range,proto3" json:"range,omitempty"` - Predicate *Predicate `protobuf:"bytes,3,opt,name=predicate,proto3" json:"predicate,omitempty"` - // GroupKeys specifies a list of tag keys used to order the data. - // It is dependent on the Group property to determine its behavior. - GroupKeys []string `protobuf:"bytes,4,rep,name=GroupKeys,proto3" json:"GroupKeys,omitempty"` - Group ReadGroupRequest_Group `protobuf:"varint,5,opt,name=group,proto3,enum=influxdata.platform.storage.ReadGroupRequest_Group" json:"group,omitempty"` - Aggregate *Aggregate `protobuf:"bytes,6,opt,name=aggregate,proto3" json:"aggregate,omitempty"` - Hints uint32 `protobuf:"fixed32,7,opt,name=Hints,proto3" json:"Hints,omitempty"` -} - -func (x *ReadGroupRequest) Reset() { - *x = ReadGroupRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReadGroupRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadGroupRequest) ProtoMessage() {} - -func (x *ReadGroupRequest) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadGroupRequest.ProtoReflect.Descriptor instead. -func (*ReadGroupRequest) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{1} -} - -func (x *ReadGroupRequest) GetReadSource() *anypb.Any { - if x != nil { - return x.ReadSource - } - return nil -} - -func (x *ReadGroupRequest) GetRange() *TimestampRange { - if x != nil { - return x.Range - } - return nil -} - -func (x *ReadGroupRequest) GetPredicate() *Predicate { - if x != nil { - return x.Predicate - } - return nil -} - -func (x *ReadGroupRequest) GetGroupKeys() []string { - if x != nil { - return x.GroupKeys - } - return nil -} - -func (x *ReadGroupRequest) GetGroup() ReadGroupRequest_Group { - if x != nil { - return x.Group - } - return ReadGroupRequest_GroupNone -} - -func (x *ReadGroupRequest) GetAggregate() *Aggregate { - if x != nil { - return x.Aggregate - } - return nil -} - -func (x *ReadGroupRequest) GetHints() uint32 { - if x != nil { - return x.Hints - } - return 0 -} - -type Aggregate struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type Aggregate_AggregateType `protobuf:"varint,1,opt,name=type,proto3,enum=influxdata.platform.storage.Aggregate_AggregateType" json:"type,omitempty"` -} - -func (x *Aggregate) Reset() { - *x = Aggregate{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Aggregate) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Aggregate) ProtoMessage() {} - -func (x *Aggregate) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Aggregate.ProtoReflect.Descriptor instead. -func (*Aggregate) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{2} -} - -func (x *Aggregate) GetType() Aggregate_AggregateType { - if x != nil { - return x.Type - } - return Aggregate_AggregateTypeNone -} - -type Tag struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *Tag) Reset() { - *x = Tag{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Tag) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Tag) ProtoMessage() {} - -func (x *Tag) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Tag.ProtoReflect.Descriptor instead. -func (*Tag) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{3} -} - -func (x *Tag) GetKey() []byte { - if x != nil { - return x.Key - } - return nil -} - -func (x *Tag) GetValue() []byte { - if x != nil { - return x.Value - } - return nil -} - -// Response message for ReadFilter and ReadGroup -type ReadResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Frames []*ReadResponse_Frame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"` // [(gogoproto.nullable) = false]; -} - -func (x *ReadResponse) Reset() { - *x = ReadResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReadResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadResponse) ProtoMessage() {} - -func (x *ReadResponse) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadResponse.ProtoReflect.Descriptor instead. -func (*ReadResponse) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{4} -} - -func (x *ReadResponse) GetFrames() []*ReadResponse_Frame { - if x != nil { - return x.Frames - } - return nil -} - -type Capability struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Features contains the specific features supported - // by this capability. - Features []string `protobuf:"bytes,1,rep,name=features,proto3" json:"features,omitempty"` -} - -func (x *Capability) Reset() { - *x = Capability{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Capability) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Capability) ProtoMessage() {} - -func (x *Capability) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Capability.ProtoReflect.Descriptor instead. -func (*Capability) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{5} -} - -func (x *Capability) GetFeatures() []string { - if x != nil { - return x.Features - } - return nil -} - -type CapabilitiesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Capabilities contains the set of capabilities supported - // by the storage engine. It is a map of method names to - // the detailed capability information for the method. - Caps map[string]*Capability `protobuf:"bytes,1,rep,name=caps,proto3" json:"caps,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *CapabilitiesResponse) Reset() { - *x = CapabilitiesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CapabilitiesResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CapabilitiesResponse) ProtoMessage() {} - -func (x *CapabilitiesResponse) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CapabilitiesResponse.ProtoReflect.Descriptor instead. -func (*CapabilitiesResponse) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{6} -} - -func (x *CapabilitiesResponse) GetCaps() map[string]*Capability { - if x != nil { - return x.Caps - } - return nil -} - -// Specifies a continuous range of nanosecond timestamps. -type TimestampRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Start defines the inclusive lower bound. - Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` - // End defines the exclusive upper bound. - End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` -} - -func (x *TimestampRange) Reset() { - *x = TimestampRange{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TimestampRange) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TimestampRange) ProtoMessage() {} - -func (x *TimestampRange) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TimestampRange.ProtoReflect.Descriptor instead. -func (*TimestampRange) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{7} -} - -func (x *TimestampRange) GetStart() int64 { - if x != nil { - return x.Start - } - return 0 -} - -func (x *TimestampRange) GetEnd() int64 { - if x != nil { - return x.End - } - return 0 -} - -// TagKeysRequest is the request message for Storage.TagKeys. -type TagKeysRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TagsSource *anypb.Any `protobuf:"bytes,1,opt,name=TagsSource,proto3" json:"TagsSource,omitempty"` - Range *TimestampRange `protobuf:"bytes,2,opt,name=range,proto3" json:"range,omitempty"` - Predicate *Predicate `protobuf:"bytes,3,opt,name=predicate,proto3" json:"predicate,omitempty"` -} - -func (x *TagKeysRequest) Reset() { - *x = TagKeysRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TagKeysRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TagKeysRequest) ProtoMessage() {} - -func (x *TagKeysRequest) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TagKeysRequest.ProtoReflect.Descriptor instead. -func (*TagKeysRequest) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{8} -} - -func (x *TagKeysRequest) GetTagsSource() *anypb.Any { - if x != nil { - return x.TagsSource - } - return nil -} - -func (x *TagKeysRequest) GetRange() *TimestampRange { - if x != nil { - return x.Range - } - return nil -} - -func (x *TagKeysRequest) GetPredicate() *Predicate { - if x != nil { - return x.Predicate - } - return nil -} - -// TagValuesRequest is the request message for Storage.TagValues. -type TagValuesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TagsSource *anypb.Any `protobuf:"bytes,1,opt,name=TagsSource,proto3" json:"TagsSource,omitempty"` - Range *TimestampRange `protobuf:"bytes,2,opt,name=range,proto3" json:"range,omitempty"` - Predicate *Predicate `protobuf:"bytes,3,opt,name=predicate,proto3" json:"predicate,omitempty"` - TagKey string `protobuf:"bytes,4,opt,name=tag_key,json=tagKey,proto3" json:"tag_key,omitempty"` -} - -func (x *TagValuesRequest) Reset() { - *x = TagValuesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TagValuesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TagValuesRequest) ProtoMessage() {} - -func (x *TagValuesRequest) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TagValuesRequest.ProtoReflect.Descriptor instead. -func (*TagValuesRequest) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{9} -} - -func (x *TagValuesRequest) GetTagsSource() *anypb.Any { - if x != nil { - return x.TagsSource - } - return nil -} - -func (x *TagValuesRequest) GetRange() *TimestampRange { - if x != nil { - return x.Range - } - return nil -} - -func (x *TagValuesRequest) GetPredicate() *Predicate { - if x != nil { - return x.Predicate - } - return nil -} - -func (x *TagValuesRequest) GetTagKey() string { - if x != nil { - return x.TagKey - } - return "" -} - -type ReadSeriesCardinalityRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ReadSource *anypb.Any `protobuf:"bytes,1,opt,name=ReadSource,proto3" json:"ReadSource,omitempty"` - Range *TimestampRange `protobuf:"bytes,2,opt,name=range,proto3" json:"range,omitempty"` - Predicate *Predicate `protobuf:"bytes,3,opt,name=predicate,proto3" json:"predicate,omitempty"` -} - -func (x *ReadSeriesCardinalityRequest) Reset() { - *x = ReadSeriesCardinalityRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReadSeriesCardinalityRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadSeriesCardinalityRequest) ProtoMessage() {} - -func (x *ReadSeriesCardinalityRequest) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadSeriesCardinalityRequest.ProtoReflect.Descriptor instead. -func (*ReadSeriesCardinalityRequest) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{10} -} - -func (x *ReadSeriesCardinalityRequest) GetReadSource() *anypb.Any { - if x != nil { - return x.ReadSource - } - return nil -} - -func (x *ReadSeriesCardinalityRequest) GetRange() *TimestampRange { - if x != nil { - return x.Range - } - return nil -} - -func (x *ReadSeriesCardinalityRequest) GetPredicate() *Predicate { - if x != nil { - return x.Predicate - } - return nil -} - -// Response message for Storage.TagKeys, Storage.TagValues Storage.MeasurementNames, -// Storage.MeasurementTagKeys and Storage.MeasurementTagValues. -type StringValuesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Values [][]byte `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` -} - -func (x *StringValuesResponse) Reset() { - *x = StringValuesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StringValuesResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StringValuesResponse) ProtoMessage() {} - -func (x *StringValuesResponse) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StringValuesResponse.ProtoReflect.Descriptor instead. -func (*StringValuesResponse) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{11} -} - -func (x *StringValuesResponse) GetValues() [][]byte { - if x != nil { - return x.Values - } - return nil -} - -// MeasurementNamesRequest is the request message for Storage.MeasurementNames. -type MeasurementNamesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Source *anypb.Any `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` - Range *TimestampRange `protobuf:"bytes,2,opt,name=range,proto3" json:"range,omitempty"` - Predicate *Predicate `protobuf:"bytes,3,opt,name=predicate,proto3" json:"predicate,omitempty"` -} - -func (x *MeasurementNamesRequest) Reset() { - *x = MeasurementNamesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MeasurementNamesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MeasurementNamesRequest) ProtoMessage() {} - -func (x *MeasurementNamesRequest) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MeasurementNamesRequest.ProtoReflect.Descriptor instead. -func (*MeasurementNamesRequest) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{12} -} - -func (x *MeasurementNamesRequest) GetSource() *anypb.Any { - if x != nil { - return x.Source - } - return nil -} - -func (x *MeasurementNamesRequest) GetRange() *TimestampRange { - if x != nil { - return x.Range - } - return nil -} - -func (x *MeasurementNamesRequest) GetPredicate() *Predicate { - if x != nil { - return x.Predicate - } - return nil -} - -// MeasurementTagKeysRequest is the request message for Storage.MeasurementTagKeys. -type MeasurementTagKeysRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Source *anypb.Any `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` - Measurement string `protobuf:"bytes,2,opt,name=measurement,proto3" json:"measurement,omitempty"` - Range *TimestampRange `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"` - Predicate *Predicate `protobuf:"bytes,4,opt,name=predicate,proto3" json:"predicate,omitempty"` -} - -func (x *MeasurementTagKeysRequest) Reset() { - *x = MeasurementTagKeysRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MeasurementTagKeysRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MeasurementTagKeysRequest) ProtoMessage() {} - -func (x *MeasurementTagKeysRequest) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MeasurementTagKeysRequest.ProtoReflect.Descriptor instead. -func (*MeasurementTagKeysRequest) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{13} -} - -func (x *MeasurementTagKeysRequest) GetSource() *anypb.Any { - if x != nil { - return x.Source - } - return nil -} - -func (x *MeasurementTagKeysRequest) GetMeasurement() string { - if x != nil { - return x.Measurement - } - return "" -} - -func (x *MeasurementTagKeysRequest) GetRange() *TimestampRange { - if x != nil { - return x.Range - } - return nil -} - -func (x *MeasurementTagKeysRequest) GetPredicate() *Predicate { - if x != nil { - return x.Predicate - } - return nil -} - -// MeasurementTagValuesRequest is the request message for Storage.MeasurementTagValues. -type MeasurementTagValuesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Source *anypb.Any `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` - Measurement string `protobuf:"bytes,2,opt,name=measurement,proto3" json:"measurement,omitempty"` - TagKey string `protobuf:"bytes,3,opt,name=tag_key,json=tagKey,proto3" json:"tag_key,omitempty"` - Range *TimestampRange `protobuf:"bytes,4,opt,name=range,proto3" json:"range,omitempty"` - Predicate *Predicate `protobuf:"bytes,5,opt,name=predicate,proto3" json:"predicate,omitempty"` -} - -func (x *MeasurementTagValuesRequest) Reset() { - *x = MeasurementTagValuesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MeasurementTagValuesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MeasurementTagValuesRequest) ProtoMessage() {} - -func (x *MeasurementTagValuesRequest) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MeasurementTagValuesRequest.ProtoReflect.Descriptor instead. -func (*MeasurementTagValuesRequest) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{14} -} - -func (x *MeasurementTagValuesRequest) GetSource() *anypb.Any { - if x != nil { - return x.Source - } - return nil -} - -func (x *MeasurementTagValuesRequest) GetMeasurement() string { - if x != nil { - return x.Measurement - } - return "" -} - -func (x *MeasurementTagValuesRequest) GetTagKey() string { - if x != nil { - return x.TagKey - } - return "" -} - -func (x *MeasurementTagValuesRequest) GetRange() *TimestampRange { - if x != nil { - return x.Range - } - return nil -} - -func (x *MeasurementTagValuesRequest) GetPredicate() *Predicate { - if x != nil { - return x.Predicate - } - return nil -} - -// MeasurementFieldsRequest is the request message for Storage.MeasurementFields. -type MeasurementFieldsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Source *anypb.Any `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` - Measurement string `protobuf:"bytes,2,opt,name=measurement,proto3" json:"measurement,omitempty"` - Range *TimestampRange `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"` - Predicate *Predicate `protobuf:"bytes,4,opt,name=predicate,proto3" json:"predicate,omitempty"` -} - -func (x *MeasurementFieldsRequest) Reset() { - *x = MeasurementFieldsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MeasurementFieldsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MeasurementFieldsRequest) ProtoMessage() {} - -func (x *MeasurementFieldsRequest) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MeasurementFieldsRequest.ProtoReflect.Descriptor instead. -func (*MeasurementFieldsRequest) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{15} -} - -func (x *MeasurementFieldsRequest) GetSource() *anypb.Any { - if x != nil { - return x.Source - } - return nil -} - -func (x *MeasurementFieldsRequest) GetMeasurement() string { - if x != nil { - return x.Measurement - } - return "" -} - -func (x *MeasurementFieldsRequest) GetRange() *TimestampRange { - if x != nil { - return x.Range - } - return nil -} - -func (x *MeasurementFieldsRequest) GetPredicate() *Predicate { - if x != nil { - return x.Predicate - } - return nil -} - -// MeasurementFieldsResponse is the response message for Storage.MeasurementFields. -type MeasurementFieldsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Fields []*MeasurementFieldsResponse_MessageField `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"` // [(gogoproto.nullable) = false]; -} - -func (x *MeasurementFieldsResponse) Reset() { - *x = MeasurementFieldsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MeasurementFieldsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MeasurementFieldsResponse) ProtoMessage() {} - -func (x *MeasurementFieldsResponse) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MeasurementFieldsResponse.ProtoReflect.Descriptor instead. -func (*MeasurementFieldsResponse) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{16} -} - -func (x *MeasurementFieldsResponse) GetFields() []*MeasurementFieldsResponse_MessageField { - if x != nil { - return x.Fields - } - return nil -} - -type ReadWindowAggregateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ReadSource *anypb.Any `protobuf:"bytes,1,opt,name=ReadSource,proto3" json:"ReadSource,omitempty"` - Range *TimestampRange `protobuf:"bytes,2,opt,name=range,proto3" json:"range,omitempty"` - Predicate *Predicate `protobuf:"bytes,3,opt,name=predicate,proto3" json:"predicate,omitempty"` - WindowEvery int64 `protobuf:"varint,4,opt,name=WindowEvery,proto3" json:"WindowEvery,omitempty"` - Offset int64 `protobuf:"varint,6,opt,name=Offset,proto3" json:"Offset,omitempty"` - Aggregate []*Aggregate `protobuf:"bytes,5,rep,name=aggregate,proto3" json:"aggregate,omitempty"` - Window *Window `protobuf:"bytes,7,opt,name=window,proto3" json:"window,omitempty"` -} - -func (x *ReadWindowAggregateRequest) Reset() { - *x = ReadWindowAggregateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReadWindowAggregateRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadWindowAggregateRequest) ProtoMessage() {} - -func (x *ReadWindowAggregateRequest) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadWindowAggregateRequest.ProtoReflect.Descriptor instead. -func (*ReadWindowAggregateRequest) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{17} -} - -func (x *ReadWindowAggregateRequest) GetReadSource() *anypb.Any { - if x != nil { - return x.ReadSource - } - return nil -} - -func (x *ReadWindowAggregateRequest) GetRange() *TimestampRange { - if x != nil { - return x.Range - } - return nil -} - -func (x *ReadWindowAggregateRequest) GetPredicate() *Predicate { - if x != nil { - return x.Predicate - } - return nil -} - -func (x *ReadWindowAggregateRequest) GetWindowEvery() int64 { - if x != nil { - return x.WindowEvery - } - return 0 -} - -func (x *ReadWindowAggregateRequest) GetOffset() int64 { - if x != nil { - return x.Offset - } - return 0 -} - -func (x *ReadWindowAggregateRequest) GetAggregate() []*Aggregate { - if x != nil { - return x.Aggregate - } - return nil -} - -func (x *ReadWindowAggregateRequest) GetWindow() *Window { - if x != nil { - return x.Window - } - return nil -} - -type Window struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Every *Duration `protobuf:"bytes,1,opt,name=every,proto3" json:"every,omitempty"` - Offset *Duration `protobuf:"bytes,2,opt,name=offset,proto3" json:"offset,omitempty"` -} - -func (x *Window) Reset() { - *x = Window{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Window) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Window) ProtoMessage() {} - -func (x *Window) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Window.ProtoReflect.Descriptor instead. -func (*Window) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{18} -} - -func (x *Window) GetEvery() *Duration { - if x != nil { - return x.Every - } - return nil -} - -func (x *Window) GetOffset() *Duration { - if x != nil { - return x.Offset - } - return nil -} - -type Duration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Nsecs int64 `protobuf:"varint,1,opt,name=nsecs,proto3" json:"nsecs,omitempty"` - Months int64 `protobuf:"varint,2,opt,name=months,proto3" json:"months,omitempty"` - Negative bool `protobuf:"varint,3,opt,name=negative,proto3" json:"negative,omitempty"` -} - -func (x *Duration) Reset() { - *x = Duration{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Duration) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Duration) ProtoMessage() {} - -func (x *Duration) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Duration.ProtoReflect.Descriptor instead. -func (*Duration) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{19} -} - -func (x *Duration) GetNsecs() int64 { - if x != nil { - return x.Nsecs - } - return 0 -} - -func (x *Duration) GetMonths() int64 { - if x != nil { - return x.Months - } - return 0 -} - -func (x *Duration) GetNegative() bool { - if x != nil { - return x.Negative - } - return false -} - -type ReadResponse_Frame struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Data: - // - // *ReadResponse_Frame_Group - // *ReadResponse_Frame_Series - // *ReadResponse_Frame_FloatPoints - // *ReadResponse_Frame_IntegerPoints - // *ReadResponse_Frame_UnsignedPoints - // *ReadResponse_Frame_BooleanPoints - // *ReadResponse_Frame_StringPoints - Data isReadResponse_Frame_Data `protobuf_oneof:"data"` -} - -func (x *ReadResponse_Frame) Reset() { - *x = ReadResponse_Frame{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReadResponse_Frame) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadResponse_Frame) ProtoMessage() {} - -func (x *ReadResponse_Frame) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadResponse_Frame.ProtoReflect.Descriptor instead. -func (*ReadResponse_Frame) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{4, 0} -} - -func (m *ReadResponse_Frame) GetData() isReadResponse_Frame_Data { - if m != nil { - return m.Data - } - return nil -} - -func (x *ReadResponse_Frame) GetGroup() *ReadResponse_GroupFrame { - if x, ok := x.GetData().(*ReadResponse_Frame_Group); ok { - return x.Group - } - return nil -} - -func (x *ReadResponse_Frame) GetSeries() *ReadResponse_SeriesFrame { - if x, ok := x.GetData().(*ReadResponse_Frame_Series); ok { - return x.Series - } - return nil -} - -func (x *ReadResponse_Frame) GetFloatPoints() *ReadResponse_FloatPointsFrame { - if x, ok := x.GetData().(*ReadResponse_Frame_FloatPoints); ok { - return x.FloatPoints - } - return nil -} - -func (x *ReadResponse_Frame) GetIntegerPoints() *ReadResponse_IntegerPointsFrame { - if x, ok := x.GetData().(*ReadResponse_Frame_IntegerPoints); ok { - return x.IntegerPoints - } - return nil -} - -func (x *ReadResponse_Frame) GetUnsignedPoints() *ReadResponse_UnsignedPointsFrame { - if x, ok := x.GetData().(*ReadResponse_Frame_UnsignedPoints); ok { - return x.UnsignedPoints - } - return nil -} - -func (x *ReadResponse_Frame) GetBooleanPoints() *ReadResponse_BooleanPointsFrame { - if x, ok := x.GetData().(*ReadResponse_Frame_BooleanPoints); ok { - return x.BooleanPoints - } - return nil -} - -func (x *ReadResponse_Frame) GetStringPoints() *ReadResponse_StringPointsFrame { - if x, ok := x.GetData().(*ReadResponse_Frame_StringPoints); ok { - return x.StringPoints - } - return nil -} - -type isReadResponse_Frame_Data interface { - isReadResponse_Frame_Data() -} - -type ReadResponse_Frame_Group struct { - Group *ReadResponse_GroupFrame `protobuf:"bytes,7,opt,name=group,proto3,oneof"` -} - -type ReadResponse_Frame_Series struct { - Series *ReadResponse_SeriesFrame `protobuf:"bytes,1,opt,name=series,proto3,oneof"` -} - -type ReadResponse_Frame_FloatPoints struct { - FloatPoints *ReadResponse_FloatPointsFrame `protobuf:"bytes,2,opt,name=FloatPoints,proto3,oneof"` -} - -type ReadResponse_Frame_IntegerPoints struct { - IntegerPoints *ReadResponse_IntegerPointsFrame `protobuf:"bytes,3,opt,name=IntegerPoints,proto3,oneof"` -} - -type ReadResponse_Frame_UnsignedPoints struct { - UnsignedPoints *ReadResponse_UnsignedPointsFrame `protobuf:"bytes,4,opt,name=UnsignedPoints,proto3,oneof"` -} - -type ReadResponse_Frame_BooleanPoints struct { - BooleanPoints *ReadResponse_BooleanPointsFrame `protobuf:"bytes,5,opt,name=BooleanPoints,proto3,oneof"` -} - -type ReadResponse_Frame_StringPoints struct { - StringPoints *ReadResponse_StringPointsFrame `protobuf:"bytes,6,opt,name=StringPoints,proto3,oneof"` -} - -func (*ReadResponse_Frame_Group) isReadResponse_Frame_Data() {} - -func (*ReadResponse_Frame_Series) isReadResponse_Frame_Data() {} - -func (*ReadResponse_Frame_FloatPoints) isReadResponse_Frame_Data() {} - -func (*ReadResponse_Frame_IntegerPoints) isReadResponse_Frame_Data() {} - -func (*ReadResponse_Frame_UnsignedPoints) isReadResponse_Frame_Data() {} - -func (*ReadResponse_Frame_BooleanPoints) isReadResponse_Frame_Data() {} - -func (*ReadResponse_Frame_StringPoints) isReadResponse_Frame_Data() {} - -type ReadResponse_GroupFrame struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // TagKeys - TagKeys [][]byte `protobuf:"bytes,1,rep,name=TagKeys,proto3" json:"TagKeys,omitempty"` - // PartitionKeyVals is the values of the partition key for this group, order matching ReadGroupRequest.GroupKeys - PartitionKeyVals [][]byte `protobuf:"bytes,2,rep,name=PartitionKeyVals,proto3" json:"PartitionKeyVals,omitempty"` -} - -func (x *ReadResponse_GroupFrame) Reset() { - *x = ReadResponse_GroupFrame{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReadResponse_GroupFrame) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadResponse_GroupFrame) ProtoMessage() {} - -func (x *ReadResponse_GroupFrame) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadResponse_GroupFrame.ProtoReflect.Descriptor instead. -func (*ReadResponse_GroupFrame) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{4, 1} -} - -func (x *ReadResponse_GroupFrame) GetTagKeys() [][]byte { - if x != nil { - return x.TagKeys - } - return nil -} - -func (x *ReadResponse_GroupFrame) GetPartitionKeyVals() [][]byte { - if x != nil { - return x.PartitionKeyVals - } - return nil -} - -type ReadResponse_SeriesFrame struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Tags []*Tag `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` // [(gogoproto.nullable) = false]; - DataType ReadResponse_DataType `protobuf:"varint,2,opt,name=data_type,json=dataType,proto3,enum=influxdata.platform.storage.ReadResponse_DataType" json:"data_type,omitempty"` -} - -func (x *ReadResponse_SeriesFrame) Reset() { - *x = ReadResponse_SeriesFrame{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReadResponse_SeriesFrame) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadResponse_SeriesFrame) ProtoMessage() {} - -func (x *ReadResponse_SeriesFrame) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadResponse_SeriesFrame.ProtoReflect.Descriptor instead. -func (*ReadResponse_SeriesFrame) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{4, 2} -} - -func (x *ReadResponse_SeriesFrame) GetTags() []*Tag { - if x != nil { - return x.Tags - } - return nil -} - -func (x *ReadResponse_SeriesFrame) GetDataType() ReadResponse_DataType { - if x != nil { - return x.DataType - } - return ReadResponse_DataTypeFloat -} - -type ReadResponse_FloatPointsFrame struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Timestamps []int64 `protobuf:"fixed64,1,rep,packed,name=timestamps,proto3" json:"timestamps,omitempty"` - Values []float64 `protobuf:"fixed64,2,rep,packed,name=values,proto3" json:"values,omitempty"` -} - -func (x *ReadResponse_FloatPointsFrame) Reset() { - *x = ReadResponse_FloatPointsFrame{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReadResponse_FloatPointsFrame) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadResponse_FloatPointsFrame) ProtoMessage() {} - -func (x *ReadResponse_FloatPointsFrame) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadResponse_FloatPointsFrame.ProtoReflect.Descriptor instead. -func (*ReadResponse_FloatPointsFrame) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{4, 3} -} - -func (x *ReadResponse_FloatPointsFrame) GetTimestamps() []int64 { - if x != nil { - return x.Timestamps - } - return nil -} - -func (x *ReadResponse_FloatPointsFrame) GetValues() []float64 { - if x != nil { - return x.Values - } - return nil -} - -type ReadResponse_IntegerPointsFrame struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Timestamps []int64 `protobuf:"fixed64,1,rep,packed,name=timestamps,proto3" json:"timestamps,omitempty"` - Values []int64 `protobuf:"varint,2,rep,packed,name=values,proto3" json:"values,omitempty"` -} - -func (x *ReadResponse_IntegerPointsFrame) Reset() { - *x = ReadResponse_IntegerPointsFrame{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReadResponse_IntegerPointsFrame) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadResponse_IntegerPointsFrame) ProtoMessage() {} - -func (x *ReadResponse_IntegerPointsFrame) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadResponse_IntegerPointsFrame.ProtoReflect.Descriptor instead. -func (*ReadResponse_IntegerPointsFrame) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{4, 4} -} - -func (x *ReadResponse_IntegerPointsFrame) GetTimestamps() []int64 { - if x != nil { - return x.Timestamps - } - return nil -} - -func (x *ReadResponse_IntegerPointsFrame) GetValues() []int64 { - if x != nil { - return x.Values - } - return nil -} - -type ReadResponse_UnsignedPointsFrame struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Timestamps []int64 `protobuf:"fixed64,1,rep,packed,name=timestamps,proto3" json:"timestamps,omitempty"` - Values []uint64 `protobuf:"varint,2,rep,packed,name=values,proto3" json:"values,omitempty"` -} - -func (x *ReadResponse_UnsignedPointsFrame) Reset() { - *x = ReadResponse_UnsignedPointsFrame{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReadResponse_UnsignedPointsFrame) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadResponse_UnsignedPointsFrame) ProtoMessage() {} - -func (x *ReadResponse_UnsignedPointsFrame) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadResponse_UnsignedPointsFrame.ProtoReflect.Descriptor instead. -func (*ReadResponse_UnsignedPointsFrame) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{4, 5} -} - -func (x *ReadResponse_UnsignedPointsFrame) GetTimestamps() []int64 { - if x != nil { - return x.Timestamps - } - return nil -} - -func (x *ReadResponse_UnsignedPointsFrame) GetValues() []uint64 { - if x != nil { - return x.Values - } - return nil -} - -type ReadResponse_BooleanPointsFrame struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Timestamps []int64 `protobuf:"fixed64,1,rep,packed,name=timestamps,proto3" json:"timestamps,omitempty"` - Values []bool `protobuf:"varint,2,rep,packed,name=values,proto3" json:"values,omitempty"` -} - -func (x *ReadResponse_BooleanPointsFrame) Reset() { - *x = ReadResponse_BooleanPointsFrame{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReadResponse_BooleanPointsFrame) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadResponse_BooleanPointsFrame) ProtoMessage() {} - -func (x *ReadResponse_BooleanPointsFrame) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadResponse_BooleanPointsFrame.ProtoReflect.Descriptor instead. -func (*ReadResponse_BooleanPointsFrame) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{4, 6} -} - -func (x *ReadResponse_BooleanPointsFrame) GetTimestamps() []int64 { - if x != nil { - return x.Timestamps - } - return nil -} - -func (x *ReadResponse_BooleanPointsFrame) GetValues() []bool { - if x != nil { - return x.Values - } - return nil -} - -type ReadResponse_StringPointsFrame struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Timestamps []int64 `protobuf:"fixed64,1,rep,packed,name=timestamps,proto3" json:"timestamps,omitempty"` - Values []string `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"` -} - -func (x *ReadResponse_StringPointsFrame) Reset() { - *x = ReadResponse_StringPointsFrame{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReadResponse_StringPointsFrame) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadResponse_StringPointsFrame) ProtoMessage() {} - -func (x *ReadResponse_StringPointsFrame) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadResponse_StringPointsFrame.ProtoReflect.Descriptor instead. -func (*ReadResponse_StringPointsFrame) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{4, 7} -} - -func (x *ReadResponse_StringPointsFrame) GetTimestamps() []int64 { - if x != nil { - return x.Timestamps - } - return nil -} - -func (x *ReadResponse_StringPointsFrame) GetValues() []string { - if x != nil { - return x.Values - } - return nil -} - -type MeasurementFieldsResponse_MessageField struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Type MeasurementFieldsResponse_FieldType `protobuf:"varint,2,opt,name=type,proto3,enum=influxdata.platform.storage.MeasurementFieldsResponse_FieldType" json:"type,omitempty"` - Timestamp int64 `protobuf:"fixed64,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` -} - -func (x *MeasurementFieldsResponse_MessageField) Reset() { - *x = MeasurementFieldsResponse_MessageField{} - if protoimpl.UnsafeEnabled { - mi := &file_storage_common_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MeasurementFieldsResponse_MessageField) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MeasurementFieldsResponse_MessageField) ProtoMessage() {} - -func (x *MeasurementFieldsResponse_MessageField) ProtoReflect() protoreflect.Message { - mi := &file_storage_common_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MeasurementFieldsResponse_MessageField.ProtoReflect.Descriptor instead. -func (*MeasurementFieldsResponse_MessageField) Descriptor() ([]byte, []int) { - return file_storage_common_proto_rawDescGZIP(), []int{16, 0} -} - -func (x *MeasurementFieldsResponse_MessageField) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *MeasurementFieldsResponse_MessageField) GetType() MeasurementFieldsResponse_FieldType { - if x != nil { - return x.Type - } - return MeasurementFieldsResponse_FieldTypeFloat -} - -func (x *MeasurementFieldsResponse_MessageField) GetTimestamp() int64 { - if x != nil { - return x.Timestamp - } - return 0 -} - -var File_storage_common_proto protoreflect.FileDescriptor - -var file_storage_common_proto_rawDesc = []byte{ - 0x0a, 0x14, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1b, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0f, - 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xd2, 0x01, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x0a, 0x52, 0x65, 0x61, 0x64, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x05, 0x72, - 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x69, 0x6e, 0x66, - 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, - 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x26, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, - 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x22, 0x91, 0x04, 0x0a, 0x10, 0x52, 0x65, 0x61, 0x64, 0x47, 0x72, 0x6f, - 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x0a, 0x52, 0x65, 0x61, - 0x64, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x41, 0x6e, 0x79, 0x52, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, - 0x41, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, - 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, - 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, - 0x67, 0x65, 0x12, 0x44, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x09, 0x70, - 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x47, 0x72, 0x6f, 0x75, - 0x70, 0x4b, 0x65, 0x79, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x47, 0x72, 0x6f, - 0x75, 0x70, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x49, 0x0a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, - 0x70, 0x12, 0x44, 0x0a, 0x09, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x52, 0x09, 0x61, 0x67, - 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x48, 0x69, 0x6e, 0x74, 0x73, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x48, 0x69, 0x6e, 0x74, 0x73, 0x22, 0x23, 0x0a, - 0x05, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x0d, 0x0a, 0x09, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4e, - 0x6f, 0x6e, 0x65, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x79, - 0x10, 0x02, 0x22, 0x54, 0x0a, 0x09, 0x48, 0x69, 0x6e, 0x74, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, - 0x0c, 0x0a, 0x08, 0x48, 0x69, 0x6e, 0x74, 0x4e, 0x6f, 0x6e, 0x65, 0x10, 0x00, 0x12, 0x10, 0x0a, - 0x0c, 0x48, 0x69, 0x6e, 0x74, 0x4e, 0x6f, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x10, 0x01, 0x12, - 0x10, 0x0a, 0x0c, 0x48, 0x69, 0x6e, 0x74, 0x4e, 0x6f, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x10, - 0x02, 0x12, 0x15, 0x0a, 0x11, 0x48, 0x69, 0x6e, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x41, - 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x10, 0x04, 0x22, 0x9e, 0x02, 0x0a, 0x09, 0x41, 0x67, 0x67, - 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x12, 0x48, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x41, 0x67, 0x67, - 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x22, 0xc6, 0x01, 0x0a, 0x0d, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x4e, 0x6f, 0x6e, 0x65, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x67, 0x67, - 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x53, 0x75, 0x6d, 0x10, 0x01, 0x12, - 0x16, 0x0a, 0x12, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x67, 0x67, 0x72, 0x65, - 0x67, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x69, 0x6e, 0x10, 0x03, 0x12, 0x14, 0x0a, - 0x10, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, - 0x78, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, 0x72, 0x73, 0x74, 0x10, 0x05, 0x12, 0x15, 0x0a, 0x11, 0x41, - 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4c, 0x61, 0x73, 0x74, - 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x4d, 0x65, 0x61, 0x6e, 0x10, 0x07, 0x22, 0x2d, 0x0a, 0x03, 0x54, 0x61, 0x67, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x99, 0x0c, 0x0a, 0x0c, 0x52, 0x65, 0x61, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x66, 0x72, 0x61, - 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x69, 0x6e, 0x66, 0x6c, - 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x06, 0x66, 0x72, 0x61, 0x6d, - 0x65, 0x73, 0x1a, 0xa6, 0x05, 0x0a, 0x05, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x4c, 0x0a, 0x05, - 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x69, 0x6e, - 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, - 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x46, 0x72, 0x61, 0x6d, - 0x65, 0x48, 0x00, 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x4f, 0x0a, 0x06, 0x73, 0x65, - 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x69, 0x6e, 0x66, - 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x46, 0x72, 0x61, 0x6d, - 0x65, 0x48, 0x00, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x5e, 0x0a, 0x0b, 0x46, - 0x6c, 0x6f, 0x61, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x3a, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, - 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x52, - 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x61, - 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x48, 0x00, 0x52, 0x0b, - 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x64, 0x0a, 0x0d, 0x49, - 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x49, 0x6e, - 0x74, 0x65, 0x67, 0x65, 0x72, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x46, 0x72, 0x61, 0x6d, 0x65, - 0x48, 0x00, 0x52, 0x0d, 0x49, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x50, 0x6f, 0x69, 0x6e, 0x74, - 0x73, 0x12, 0x67, 0x0a, 0x0e, 0x55, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x50, 0x6f, 0x69, - 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x69, 0x6e, 0x66, 0x6c, - 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x55, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x50, 0x6f, 0x69, - 0x6e, 0x74, 0x73, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x48, 0x00, 0x52, 0x0e, 0x55, 0x6e, 0x73, 0x69, - 0x67, 0x6e, 0x65, 0x64, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x64, 0x0a, 0x0d, 0x42, 0x6f, - 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x3c, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, - 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x6f, 0x6f, - 0x6c, 0x65, 0x61, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x48, - 0x00, 0x52, 0x0d, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, - 0x12, 0x61, 0x0a, 0x0c, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x46, 0x72, - 0x61, 0x6d, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, - 0x6e, 0x74, 0x73, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x52, 0x0a, 0x0a, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x54, 0x61, 0x67, - 0x4b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x54, 0x61, 0x67, 0x4b, - 0x65, 0x79, 0x73, 0x12, 0x2a, 0x0a, 0x10, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x50, - 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x73, 0x1a, - 0x94, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, - 0x34, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, - 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, - 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x61, 0x67, 0x52, - 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x4f, 0x0a, 0x09, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, - 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x64, 0x61, - 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x4a, 0x0a, 0x10, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x50, - 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x10, 0x52, 0x0a, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x01, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x73, 0x1a, 0x4c, 0x0a, 0x12, 0x49, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x50, 0x6f, 0x69, - 0x6e, 0x74, 0x73, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x10, 0x52, 0x0a, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x03, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, - 0x1a, 0x4d, 0x0a, 0x13, 0x55, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x50, 0x6f, 0x69, 0x6e, - 0x74, 0x73, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x10, 0x52, 0x0a, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x04, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x1a, - 0x4c, 0x0a, 0x12, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, - 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x10, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x08, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x1a, 0x4b, 0x0a, - 0x11, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x46, 0x72, 0x61, - 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x10, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x35, 0x0a, 0x09, 0x46, 0x72, - 0x61, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x46, 0x72, 0x61, 0x6d, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, - 0x46, 0x72, 0x61, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x10, - 0x01, 0x22, 0x71, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x11, 0x0a, - 0x0d, 0x44, 0x61, 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x10, 0x00, - 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x61, 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x49, 0x6e, 0x74, 0x65, - 0x67, 0x65, 0x72, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x61, 0x74, 0x61, 0x54, 0x79, 0x70, - 0x65, 0x55, 0x6e, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x44, - 0x61, 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x10, 0x03, - 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x61, 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x53, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x10, 0x04, 0x22, 0x28, 0x0a, 0x0a, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, - 0x74, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xc9, - 0x01, 0x0a, 0x14, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x04, 0x63, 0x61, 0x70, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x61, 0x70, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x04, 0x63, 0x61, 0x70, 0x73, 0x1a, 0x60, 0x0a, 0x09, 0x43, 0x61, 0x70, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x38, 0x0a, 0x0e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcf, 0x01, 0x0a, 0x0e, 0x54, 0x61, 0x67, 0x4b, 0x65, 0x79, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x0a, 0x54, 0x61, 0x67, 0x73, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x0a, 0x54, 0x61, 0x67, 0x73, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x41, 0x0a, - 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x69, - 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, - 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x44, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x09, 0x70, 0x72, 0x65, - 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0xea, 0x01, 0x0a, 0x10, 0x54, 0x61, 0x67, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x0a, 0x54, - 0x61, 0x67, 0x73, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0a, 0x54, 0x61, 0x67, 0x73, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x12, 0x41, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2b, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, - 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, - 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, - 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x67, - 0x4b, 0x65, 0x79, 0x22, 0xdd, 0x01, 0x0a, 0x1c, 0x52, 0x65, 0x61, 0x64, 0x53, 0x65, 0x72, 0x69, - 0x65, 0x73, 0x43, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0a, - 0x52, 0x65, 0x61, 0x64, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x05, 0x72, 0x61, - 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x69, 0x6e, 0x66, 0x6c, - 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, - 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x26, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, - 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x50, - 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x22, 0x2e, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x73, 0x22, 0xd0, 0x01, 0x0a, 0x17, 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x2c, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x41, 0x0a, - 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x69, - 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, - 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x44, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x09, 0x70, 0x72, 0x65, - 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0xf4, 0x01, 0x0a, 0x19, 0x4d, 0x65, 0x61, 0x73, 0x75, - 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x6d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x41, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x69, 0x6e, 0x66, - 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0x8f, 0x02, - 0x0a, 0x1b, 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x67, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, - 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x41, 0x6e, 0x79, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x6d, - 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x17, 0x0a, - 0x07, 0x74, 0x61, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x74, 0x61, 0x67, 0x4b, 0x65, 0x79, 0x12, 0x41, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x09, 0x70, 0x72, 0x65, - 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x69, - 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, - 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, - 0xf3, 0x01, 0x0a, 0x18, 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x06, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, - 0x6e, 0x79, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x6d, 0x65, - 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x6d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x41, 0x0a, 0x05, - 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x69, 0x6e, - 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, - 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x44, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0xa1, 0x03, 0x0a, 0x19, 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x5b, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, - 0x1a, 0x94, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x54, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x40, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, - 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x10, 0x52, 0x09, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x8f, 0x01, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x54, 0x79, - 0x70, 0x65, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x10, 0x01, 0x12, - 0x15, 0x0a, 0x11, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x55, 0x6e, 0x73, 0x69, - 0x67, 0x6e, 0x65, 0x64, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x54, - 0x79, 0x70, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x10, - 0x04, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x55, 0x6e, - 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x10, 0x05, 0x22, 0x98, 0x03, 0x0a, 0x1a, 0x52, 0x65, - 0x61, 0x64, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, - 0x6e, 0x79, 0x52, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x41, - 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, - 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, - 0x65, 0x12, 0x44, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x09, 0x70, 0x72, - 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x57, 0x69, 0x6e, 0x64, 0x6f, - 0x77, 0x45, 0x76, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x57, 0x69, - 0x6e, 0x64, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x4f, 0x66, 0x66, - 0x73, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4f, 0x66, 0x66, 0x73, 0x65, - 0x74, 0x12, 0x44, 0x0a, 0x09, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x52, 0x09, 0x61, 0x67, - 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x06, 0x77, 0x69, 0x6e, 0x64, 0x6f, - 0x77, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x52, 0x06, 0x77, 0x69, - 0x6e, 0x64, 0x6f, 0x77, 0x22, 0x84, 0x01, 0x0a, 0x06, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, - 0x3b, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, - 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, - 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x65, 0x76, 0x65, 0x72, 0x79, 0x12, 0x3d, 0x0a, 0x06, - 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x69, - 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, - 0x72, 0x6d, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x22, 0x54, 0x0a, 0x08, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x73, 0x65, 0x63, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6e, 0x73, 0x65, 0x63, 0x73, 0x12, 0x16, 0x0a, - 0x06, 0x6d, 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6d, - 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, - 0x65, 0x42, 0x0d, 0x5a, 0x0b, 0x2e, 0x3b, 0x64, 0x61, 0x74, 0x61, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_storage_common_proto_rawDescOnce sync.Once - file_storage_common_proto_rawDescData = file_storage_common_proto_rawDesc -) - -func file_storage_common_proto_rawDescGZIP() []byte { - file_storage_common_proto_rawDescOnce.Do(func() { - file_storage_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_storage_common_proto_rawDescData) - }) - return file_storage_common_proto_rawDescData -} - -var file_storage_common_proto_enumTypes = make([]protoimpl.EnumInfo, 6) -var file_storage_common_proto_msgTypes = make([]protoimpl.MessageInfo, 30) -var file_storage_common_proto_goTypes = []interface{}{ - (ReadGroupRequest_Group)(0), // 0: influxdata.platform.storage.ReadGroupRequest.Group - (ReadGroupRequest_HintFlags)(0), // 1: influxdata.platform.storage.ReadGroupRequest.HintFlags - (Aggregate_AggregateType)(0), // 2: influxdata.platform.storage.Aggregate.AggregateType - (ReadResponse_FrameType)(0), // 3: influxdata.platform.storage.ReadResponse.FrameType - (ReadResponse_DataType)(0), // 4: influxdata.platform.storage.ReadResponse.DataType - (MeasurementFieldsResponse_FieldType)(0), // 5: influxdata.platform.storage.MeasurementFieldsResponse.FieldType - (*ReadFilterRequest)(nil), // 6: influxdata.platform.storage.ReadFilterRequest - (*ReadGroupRequest)(nil), // 7: influxdata.platform.storage.ReadGroupRequest - (*Aggregate)(nil), // 8: influxdata.platform.storage.Aggregate - (*Tag)(nil), // 9: influxdata.platform.storage.Tag - (*ReadResponse)(nil), // 10: influxdata.platform.storage.ReadResponse - (*Capability)(nil), // 11: influxdata.platform.storage.Capability - (*CapabilitiesResponse)(nil), // 12: influxdata.platform.storage.CapabilitiesResponse - (*TimestampRange)(nil), // 13: influxdata.platform.storage.TimestampRange - (*TagKeysRequest)(nil), // 14: influxdata.platform.storage.TagKeysRequest - (*TagValuesRequest)(nil), // 15: influxdata.platform.storage.TagValuesRequest - (*ReadSeriesCardinalityRequest)(nil), // 16: influxdata.platform.storage.ReadSeriesCardinalityRequest - (*StringValuesResponse)(nil), // 17: influxdata.platform.storage.StringValuesResponse - (*MeasurementNamesRequest)(nil), // 18: influxdata.platform.storage.MeasurementNamesRequest - (*MeasurementTagKeysRequest)(nil), // 19: influxdata.platform.storage.MeasurementTagKeysRequest - (*MeasurementTagValuesRequest)(nil), // 20: influxdata.platform.storage.MeasurementTagValuesRequest - (*MeasurementFieldsRequest)(nil), // 21: influxdata.platform.storage.MeasurementFieldsRequest - (*MeasurementFieldsResponse)(nil), // 22: influxdata.platform.storage.MeasurementFieldsResponse - (*ReadWindowAggregateRequest)(nil), // 23: influxdata.platform.storage.ReadWindowAggregateRequest - (*Window)(nil), // 24: influxdata.platform.storage.Window - (*Duration)(nil), // 25: influxdata.platform.storage.Duration - (*ReadResponse_Frame)(nil), // 26: influxdata.platform.storage.ReadResponse.Frame - (*ReadResponse_GroupFrame)(nil), // 27: influxdata.platform.storage.ReadResponse.GroupFrame - (*ReadResponse_SeriesFrame)(nil), // 28: influxdata.platform.storage.ReadResponse.SeriesFrame - (*ReadResponse_FloatPointsFrame)(nil), // 29: influxdata.platform.storage.ReadResponse.FloatPointsFrame - (*ReadResponse_IntegerPointsFrame)(nil), // 30: influxdata.platform.storage.ReadResponse.IntegerPointsFrame - (*ReadResponse_UnsignedPointsFrame)(nil), // 31: influxdata.platform.storage.ReadResponse.UnsignedPointsFrame - (*ReadResponse_BooleanPointsFrame)(nil), // 32: influxdata.platform.storage.ReadResponse.BooleanPointsFrame - (*ReadResponse_StringPointsFrame)(nil), // 33: influxdata.platform.storage.ReadResponse.StringPointsFrame - nil, // 34: influxdata.platform.storage.CapabilitiesResponse.CapsEntry - (*MeasurementFieldsResponse_MessageField)(nil), // 35: influxdata.platform.storage.MeasurementFieldsResponse.MessageField - (*anypb.Any)(nil), // 36: google.protobuf.Any - (*Predicate)(nil), // 37: influxdata.platform.storage.Predicate -} -var file_storage_common_proto_depIdxs = []int32{ - 36, // 0: influxdata.platform.storage.ReadFilterRequest.ReadSource:type_name -> google.protobuf.Any - 13, // 1: influxdata.platform.storage.ReadFilterRequest.range:type_name -> influxdata.platform.storage.TimestampRange - 37, // 2: influxdata.platform.storage.ReadFilterRequest.predicate:type_name -> influxdata.platform.storage.Predicate - 36, // 3: influxdata.platform.storage.ReadGroupRequest.ReadSource:type_name -> google.protobuf.Any - 13, // 4: influxdata.platform.storage.ReadGroupRequest.range:type_name -> influxdata.platform.storage.TimestampRange - 37, // 5: influxdata.platform.storage.ReadGroupRequest.predicate:type_name -> influxdata.platform.storage.Predicate - 0, // 6: influxdata.platform.storage.ReadGroupRequest.group:type_name -> influxdata.platform.storage.ReadGroupRequest.Group - 8, // 7: influxdata.platform.storage.ReadGroupRequest.aggregate:type_name -> influxdata.platform.storage.Aggregate - 2, // 8: influxdata.platform.storage.Aggregate.type:type_name -> influxdata.platform.storage.Aggregate.AggregateType - 26, // 9: influxdata.platform.storage.ReadResponse.frames:type_name -> influxdata.platform.storage.ReadResponse.Frame - 34, // 10: influxdata.platform.storage.CapabilitiesResponse.caps:type_name -> influxdata.platform.storage.CapabilitiesResponse.CapsEntry - 36, // 11: influxdata.platform.storage.TagKeysRequest.TagsSource:type_name -> google.protobuf.Any - 13, // 12: influxdata.platform.storage.TagKeysRequest.range:type_name -> influxdata.platform.storage.TimestampRange - 37, // 13: influxdata.platform.storage.TagKeysRequest.predicate:type_name -> influxdata.platform.storage.Predicate - 36, // 14: influxdata.platform.storage.TagValuesRequest.TagsSource:type_name -> google.protobuf.Any - 13, // 15: influxdata.platform.storage.TagValuesRequest.range:type_name -> influxdata.platform.storage.TimestampRange - 37, // 16: influxdata.platform.storage.TagValuesRequest.predicate:type_name -> influxdata.platform.storage.Predicate - 36, // 17: influxdata.platform.storage.ReadSeriesCardinalityRequest.ReadSource:type_name -> google.protobuf.Any - 13, // 18: influxdata.platform.storage.ReadSeriesCardinalityRequest.range:type_name -> influxdata.platform.storage.TimestampRange - 37, // 19: influxdata.platform.storage.ReadSeriesCardinalityRequest.predicate:type_name -> influxdata.platform.storage.Predicate - 36, // 20: influxdata.platform.storage.MeasurementNamesRequest.source:type_name -> google.protobuf.Any - 13, // 21: influxdata.platform.storage.MeasurementNamesRequest.range:type_name -> influxdata.platform.storage.TimestampRange - 37, // 22: influxdata.platform.storage.MeasurementNamesRequest.predicate:type_name -> influxdata.platform.storage.Predicate - 36, // 23: influxdata.platform.storage.MeasurementTagKeysRequest.source:type_name -> google.protobuf.Any - 13, // 24: influxdata.platform.storage.MeasurementTagKeysRequest.range:type_name -> influxdata.platform.storage.TimestampRange - 37, // 25: influxdata.platform.storage.MeasurementTagKeysRequest.predicate:type_name -> influxdata.platform.storage.Predicate - 36, // 26: influxdata.platform.storage.MeasurementTagValuesRequest.source:type_name -> google.protobuf.Any - 13, // 27: influxdata.platform.storage.MeasurementTagValuesRequest.range:type_name -> influxdata.platform.storage.TimestampRange - 37, // 28: influxdata.platform.storage.MeasurementTagValuesRequest.predicate:type_name -> influxdata.platform.storage.Predicate - 36, // 29: influxdata.platform.storage.MeasurementFieldsRequest.source:type_name -> google.protobuf.Any - 13, // 30: influxdata.platform.storage.MeasurementFieldsRequest.range:type_name -> influxdata.platform.storage.TimestampRange - 37, // 31: influxdata.platform.storage.MeasurementFieldsRequest.predicate:type_name -> influxdata.platform.storage.Predicate - 35, // 32: influxdata.platform.storage.MeasurementFieldsResponse.fields:type_name -> influxdata.platform.storage.MeasurementFieldsResponse.MessageField - 36, // 33: influxdata.platform.storage.ReadWindowAggregateRequest.ReadSource:type_name -> google.protobuf.Any - 13, // 34: influxdata.platform.storage.ReadWindowAggregateRequest.range:type_name -> influxdata.platform.storage.TimestampRange - 37, // 35: influxdata.platform.storage.ReadWindowAggregateRequest.predicate:type_name -> influxdata.platform.storage.Predicate - 8, // 36: influxdata.platform.storage.ReadWindowAggregateRequest.aggregate:type_name -> influxdata.platform.storage.Aggregate - 24, // 37: influxdata.platform.storage.ReadWindowAggregateRequest.window:type_name -> influxdata.platform.storage.Window - 25, // 38: influxdata.platform.storage.Window.every:type_name -> influxdata.platform.storage.Duration - 25, // 39: influxdata.platform.storage.Window.offset:type_name -> influxdata.platform.storage.Duration - 27, // 40: influxdata.platform.storage.ReadResponse.Frame.group:type_name -> influxdata.platform.storage.ReadResponse.GroupFrame - 28, // 41: influxdata.platform.storage.ReadResponse.Frame.series:type_name -> influxdata.platform.storage.ReadResponse.SeriesFrame - 29, // 42: influxdata.platform.storage.ReadResponse.Frame.FloatPoints:type_name -> influxdata.platform.storage.ReadResponse.FloatPointsFrame - 30, // 43: influxdata.platform.storage.ReadResponse.Frame.IntegerPoints:type_name -> influxdata.platform.storage.ReadResponse.IntegerPointsFrame - 31, // 44: influxdata.platform.storage.ReadResponse.Frame.UnsignedPoints:type_name -> influxdata.platform.storage.ReadResponse.UnsignedPointsFrame - 32, // 45: influxdata.platform.storage.ReadResponse.Frame.BooleanPoints:type_name -> influxdata.platform.storage.ReadResponse.BooleanPointsFrame - 33, // 46: influxdata.platform.storage.ReadResponse.Frame.StringPoints:type_name -> influxdata.platform.storage.ReadResponse.StringPointsFrame - 9, // 47: influxdata.platform.storage.ReadResponse.SeriesFrame.tags:type_name -> influxdata.platform.storage.Tag - 4, // 48: influxdata.platform.storage.ReadResponse.SeriesFrame.data_type:type_name -> influxdata.platform.storage.ReadResponse.DataType - 11, // 49: influxdata.platform.storage.CapabilitiesResponse.CapsEntry.value:type_name -> influxdata.platform.storage.Capability - 5, // 50: influxdata.platform.storage.MeasurementFieldsResponse.MessageField.type:type_name -> influxdata.platform.storage.MeasurementFieldsResponse.FieldType - 51, // [51:51] is the sub-list for method output_type - 51, // [51:51] is the sub-list for method input_type - 51, // [51:51] is the sub-list for extension type_name - 51, // [51:51] is the sub-list for extension extendee - 0, // [0:51] is the sub-list for field type_name -} - -func init() { file_storage_common_proto_init() } -func file_storage_common_proto_init() { - if File_storage_common_proto != nil { - return - } - file_predicate_proto_init() - if !protoimpl.UnsafeEnabled { - file_storage_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadFilterRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadGroupRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Aggregate); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Tag); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Capability); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CapabilitiesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TimestampRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TagKeysRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TagValuesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadSeriesCardinalityRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StringValuesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MeasurementNamesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MeasurementTagKeysRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MeasurementTagValuesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MeasurementFieldsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MeasurementFieldsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadWindowAggregateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Window); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Duration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadResponse_Frame); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadResponse_GroupFrame); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadResponse_SeriesFrame); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadResponse_FloatPointsFrame); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadResponse_IntegerPointsFrame); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadResponse_UnsignedPointsFrame); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadResponse_BooleanPointsFrame); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadResponse_StringPointsFrame); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_storage_common_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MeasurementFieldsResponse_MessageField); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_storage_common_proto_msgTypes[20].OneofWrappers = []interface{}{ - (*ReadResponse_Frame_Group)(nil), - (*ReadResponse_Frame_Series)(nil), - (*ReadResponse_Frame_FloatPoints)(nil), - (*ReadResponse_Frame_IntegerPoints)(nil), - (*ReadResponse_Frame_UnsignedPoints)(nil), - (*ReadResponse_Frame_BooleanPoints)(nil), - (*ReadResponse_Frame_StringPoints)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_storage_common_proto_rawDesc, - NumEnums: 6, - NumMessages: 30, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_storage_common_proto_goTypes, - DependencyIndexes: file_storage_common_proto_depIdxs, - EnumInfos: file_storage_common_proto_enumTypes, - MessageInfos: file_storage_common_proto_msgTypes, - }.Build() - File_storage_common_proto = out.File - file_storage_common_proto_rawDesc = nil - file_storage_common_proto_goTypes = nil - file_storage_common_proto_depIdxs = nil -} diff --git a/storage/reads/datatypes/storage_common.proto b/storage/reads/datatypes/storage_common.proto deleted file mode 100644 index 5c34c640ec6..00000000000 --- a/storage/reads/datatypes/storage_common.proto +++ /dev/null @@ -1,259 +0,0 @@ -syntax = "proto3"; -package influxdata.platform.storage; -option go_package = ".;datatypes"; - -import "google/protobuf/any.proto"; -import "predicate.proto"; - - -message ReadFilterRequest { - google.protobuf.Any ReadSource = 1; - TimestampRange range = 2; - Predicate predicate = 3; -} - -message ReadGroupRequest { - google.protobuf.Any ReadSource = 1; - TimestampRange range = 2; - Predicate predicate = 3; - - enum Group { - // GroupNone returns all series as a single group. - // The single GroupFrame.TagKeys will be the union of all tag keys. - GroupNone = 0; - - // GroupBy returns a group for each unique value of the specified GroupKeys. - GroupBy = 2; - } - - // GroupKeys specifies a list of tag keys used to order the data. - // It is dependent on the Group property to determine its behavior. - repeated string GroupKeys = 4; - - Group group = 5; - Aggregate aggregate = 6; - - // TODO(jlapacik): This field is only used in unit tests. - // Specifically the two tests in group_resultset_test.go. - // This field should be removed and the tests that depend - // on it refactored. - enum HintFlags { - HintNone = 0x00; - HintNoPoints = 0x01; - HintNoSeries = 0x02; - // HintSchemaAllTime performs schema queries without using time ranges - HintSchemaAllTime = 0x04; - } - fixed32 Hints = 7; -} - -message Aggregate { - enum AggregateType { - AggregateTypeNone = 0; - AggregateTypeSum = 1; - AggregateTypeCount = 2; - AggregateTypeMin = 3; - AggregateTypeMax = 4; - AggregateTypeFirst = 5; - AggregateTypeLast = 6; - AggregateTypeMean = 7; - } - - AggregateType type = 1; - - // additional arguments? -} - -message Tag { - bytes key = 1; - bytes value = 2; -} - -// Response message for ReadFilter and ReadGroup -message ReadResponse { - enum FrameType { - FrameTypeSeries = 0; - FrameTypePoints = 1; - } - - enum DataType { - DataTypeFloat = 0; - DataTypeInteger = 1; - DataTypeUnsigned = 2; - DataTypeBoolean = 3; - DataTypeString = 4; - } - - message Frame { - oneof data { - GroupFrame group = 7; - SeriesFrame series = 1; - FloatPointsFrame FloatPoints = 2; - IntegerPointsFrame IntegerPoints = 3; - UnsignedPointsFrame UnsignedPoints = 4; - BooleanPointsFrame BooleanPoints = 5; - StringPointsFrame StringPoints = 6; - } - } - - message GroupFrame { - // TagKeys - repeated bytes TagKeys = 1; - // PartitionKeyVals is the values of the partition key for this group, order matching ReadGroupRequest.GroupKeys - repeated bytes PartitionKeyVals = 2; - } - - message SeriesFrame { - repeated Tag tags = 1; // [(gogoproto.nullable) = false]; - DataType data_type = 2; - } - - message FloatPointsFrame { - repeated sfixed64 timestamps = 1; - repeated double values = 2; - } - - message IntegerPointsFrame { - repeated sfixed64 timestamps = 1; - repeated int64 values = 2; - } - - message UnsignedPointsFrame { - repeated sfixed64 timestamps = 1; - repeated uint64 values = 2; - } - - message BooleanPointsFrame { - repeated sfixed64 timestamps = 1; - repeated bool values = 2; - } - - message StringPointsFrame { - repeated sfixed64 timestamps = 1; - repeated string values = 2; - } - - repeated Frame frames = 1; // [(gogoproto.nullable) = false]; -} - -message Capability { - // Features contains the specific features supported - // by this capability. - repeated string features = 1; -} - -message CapabilitiesResponse { - // Capabilities contains the set of capabilities supported - // by the storage engine. It is a map of method names to - // the detailed capability information for the method. - map caps = 1; -} - -// Specifies a continuous range of nanosecond timestamps. -message TimestampRange { - // Start defines the inclusive lower bound. - int64 start = 1; - - // End defines the exclusive upper bound. - int64 end = 2; -} - -// TagKeysRequest is the request message for Storage.TagKeys. -message TagKeysRequest { - google.protobuf.Any TagsSource = 1; - TimestampRange range = 2; - Predicate predicate = 3; -} - -// TagValuesRequest is the request message for Storage.TagValues. -message TagValuesRequest { - google.protobuf.Any TagsSource = 1; - TimestampRange range = 2; - Predicate predicate = 3; - string tag_key = 4; -} - -message ReadSeriesCardinalityRequest { - google.protobuf.Any ReadSource = 1; - TimestampRange range = 2; - Predicate predicate = 3; -} - -// Response message for Storage.TagKeys, Storage.TagValues Storage.MeasurementNames, -// Storage.MeasurementTagKeys and Storage.MeasurementTagValues. -message StringValuesResponse { - repeated bytes values = 1; -} - -// MeasurementNamesRequest is the request message for Storage.MeasurementNames. -message MeasurementNamesRequest { - google.protobuf.Any source = 1; - TimestampRange range = 2; - Predicate predicate = 3; -} - -// MeasurementTagKeysRequest is the request message for Storage.MeasurementTagKeys. -message MeasurementTagKeysRequest { - google.protobuf.Any source = 1; - string measurement = 2; - TimestampRange range = 3; - Predicate predicate = 4; -} - -// MeasurementTagValuesRequest is the request message for Storage.MeasurementTagValues. -message MeasurementTagValuesRequest { - google.protobuf.Any source = 1; - string measurement = 2; - string tag_key = 3; - TimestampRange range = 4; - Predicate predicate = 5; -} - -// MeasurementFieldsRequest is the request message for Storage.MeasurementFields. -message MeasurementFieldsRequest { - google.protobuf.Any source = 1; - string measurement = 2; - TimestampRange range = 3; - Predicate predicate = 4; -} - -// MeasurementFieldsResponse is the response message for Storage.MeasurementFields. -message MeasurementFieldsResponse { - enum FieldType { - FieldTypeFloat = 0; - FieldTypeInteger = 1; - FieldTypeUnsigned = 2; - FieldTypeString = 3; - FieldTypeBoolean = 4; - FieldTypeUndefined = 5; - } - - message MessageField { - string key = 1; - FieldType type = 2; - sfixed64 timestamp = 3; - } - - repeated MessageField fields = 1; // [(gogoproto.nullable) = false]; -} - -message ReadWindowAggregateRequest { - google.protobuf.Any ReadSource = 1; - TimestampRange range = 2; - Predicate predicate = 3; - int64 WindowEvery = 4; - int64 Offset = 6; - repeated Aggregate aggregate = 5; - Window window = 7; -} - -message Window { - Duration every = 1; - Duration offset = 2; -} - -message Duration { - int64 nsecs = 1; - int64 months = 2; - bool negative = 3; -} diff --git a/storage/reads/gen.go b/storage/reads/gen.go deleted file mode 100644 index c31fcc3ed47..00000000000 --- a/storage/reads/gen.go +++ /dev/null @@ -1,4 +0,0 @@ -package reads - -//go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@array_cursor.gen.go.tmpldata array_cursor.gen.go.tmpl -//go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@array_cursor.gen.go.tmpldata -o=array_cursor_gen_test.go array_cursor_test.gen.go.tmpl diff --git a/storage/reads/group_resultset.go b/storage/reads/group_resultset.go deleted file mode 100644 index 04cbfa3c7fe..00000000000 --- a/storage/reads/group_resultset.go +++ /dev/null @@ -1,370 +0,0 @@ -package reads - -import ( - "bytes" - "context" - "fmt" - "sort" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -type groupResultSet struct { - ctx context.Context - req *datatypes.ReadGroupRequest - agg *datatypes.Aggregate - arrayCursors multiShardCursors - - i int - seriesRows []*SeriesRow - keys [][]byte - nilSort []byte - groupByCursor groupByCursor - km KeyMerger - - newSeriesCursorFn func() (SeriesCursor, error) - nextGroupFn func(c *groupResultSet) GroupCursor - - eof bool -} - -type GroupOption func(g *groupResultSet) - -// GroupOptionNilSortLo configures nil values to be sorted lower than any -// other value -func GroupOptionNilSortLo() GroupOption { - return func(g *groupResultSet) { - g.nilSort = NilSortLo - } -} - -// IsLastDescendingGroupOptimization checks if this request is using the `last` aggregate type. -// It returns true if an ascending cursor should be used (all other conditions) -// or a descending cursor (when `last` is used). -func IsLastDescendingGroupOptimization(req *datatypes.ReadGroupRequest) bool { - return req.Aggregate != nil && req.Aggregate.Type == datatypes.Aggregate_AggregateTypeLast -} - -func NewGroupResultSet(ctx context.Context, req *datatypes.ReadGroupRequest, newSeriesCursorFn func() (SeriesCursor, error), opts ...GroupOption) GroupResultSet { - g := &groupResultSet{ - ctx: ctx, - req: req, - agg: req.Aggregate, - keys: make([][]byte, len(req.GroupKeys)), - nilSort: NilSortHi, - newSeriesCursorFn: newSeriesCursorFn, - } - - for _, o := range opts { - o(g) - } - - ascending := !IsLastDescendingGroupOptimization(req) - g.arrayCursors = newMultiShardArrayCursors(ctx, req.Range.GetStart(), req.Range.GetEnd(), ascending) - - for i, k := range req.GroupKeys { - g.keys[i] = []byte(k) - } - - switch req.Group { - case datatypes.ReadGroupRequest_GroupBy: - g.nextGroupFn = groupByNextGroup - g.groupByCursor = groupByCursor{ - ctx: ctx, - arrayCursors: g.arrayCursors, - agg: req.Aggregate, - vals: make([][]byte, len(req.GroupKeys)), - } - - if n, err := g.groupBySort(); n == 0 || err != nil { - return nil - } - - case datatypes.ReadGroupRequest_GroupNone: - g.nextGroupFn = groupNoneNextGroup - - if n, err := g.groupNoneSort(); n == 0 || err != nil { - return nil - } - - default: - panic("not implemented") - } - - return g -} - -// NilSort values determine the lexicographical order of nil values in the -// partition key -var ( - // nil sorts lowest - NilSortLo = []byte{0x00} - // nil sorts highest - NilSortHi = []byte{0xff} -) - -func (g *groupResultSet) Err() error { return nil } - -func (g *groupResultSet) Close() {} - -func (g *groupResultSet) Next() GroupCursor { - if g.eof { - return nil - } - - return g.nextGroupFn(g) -} - -// seriesHasPoints reads the first block of TSM data to verify the series has points for -// the time range of the query. -func (g *groupResultSet) seriesHasPoints(row *SeriesRow) bool { - // TODO(sgc): this is expensive. Storage engine must provide efficient time range queries of series keys. - cur := g.arrayCursors.createCursor(*row) - var ts []int64 - switch c := cur.(type) { - case cursors.IntegerArrayCursor: - a := c.Next() - ts = a.Timestamps - case cursors.FloatArrayCursor: - a := c.Next() - ts = a.Timestamps - case cursors.UnsignedArrayCursor: - a := c.Next() - ts = a.Timestamps - case cursors.BooleanArrayCursor: - a := c.Next() - ts = a.Timestamps - case cursors.StringArrayCursor: - a := c.Next() - ts = a.Timestamps - case nil: - return false - default: - panic(fmt.Sprintf("unreachable: %T", c)) - } - cur.Close() - return len(ts) > 0 -} - -func groupNoneNextGroup(g *groupResultSet) GroupCursor { - seriesCursor, err := g.newSeriesCursorFn() - if err != nil { - // TODO(sgc): store error - return nil - } else if seriesCursor == nil { - return nil - } - - g.eof = true - return &groupNoneCursor{ - ctx: g.ctx, - arrayCursors: g.arrayCursors, - agg: g.agg, - cur: seriesCursor, - keys: g.km.Get(), - } -} - -func (g *groupResultSet) groupNoneSort() (int, error) { - seriesCursor, err := g.newSeriesCursorFn() - if err != nil { - return 0, err - } else if seriesCursor == nil { - return 0, nil - } - - allTime := datatypes.HintFlags(g.req.Hints).HintSchemaAllTime() - g.km.Clear() - n := 0 - seriesRow := seriesCursor.Next() - for seriesRow != nil { - if allTime || g.seriesHasPoints(seriesRow) { - n++ - g.km.MergeTagKeys(seriesRow.Tags) - } - seriesRow = seriesCursor.Next() - } - - seriesCursor.Close() - return n, nil -} - -func groupByNextGroup(g *groupResultSet) GroupCursor { - row := g.seriesRows[g.i] - for i := range g.keys { - g.groupByCursor.vals[i] = row.Tags.Get(g.keys[i]) - } - - g.km.Clear() - rowKey := row.SortKey - j := g.i - for j < len(g.seriesRows) && bytes.Equal(rowKey, g.seriesRows[j].SortKey) { - g.km.MergeTagKeys(g.seriesRows[j].Tags) - j++ - } - - g.groupByCursor.reset(g.seriesRows[g.i:j]) - g.groupByCursor.keys = g.km.Get() - - g.i = j - if j == len(g.seriesRows) { - g.eof = true - } - - return &g.groupByCursor -} - -func (g *groupResultSet) groupBySort() (int, error) { - seriesCursor, err := g.newSeriesCursorFn() - if err != nil { - return 0, err - } else if seriesCursor == nil { - return 0, nil - } - - var seriesRows []*SeriesRow - vals := make([][]byte, len(g.keys)) - tagsBuf := &tagsBuffer{sz: 4096} - allTime := datatypes.HintFlags(g.req.Hints).HintSchemaAllTime() - - seriesRow := seriesCursor.Next() - for seriesRow != nil { - if allTime || g.seriesHasPoints(seriesRow) { - nr := *seriesRow - nr.SeriesTags = tagsBuf.copyTags(nr.SeriesTags) - nr.Tags = tagsBuf.copyTags(nr.Tags) - - l := len(g.keys) // for sort key separators - for i, k := range g.keys { - vals[i] = nr.Tags.Get(k) - if len(vals[i]) == 0 { - vals[i] = g.nilSort - } - l += len(vals[i]) - } - - nr.SortKey = make([]byte, 0, l) - for _, v := range vals { - nr.SortKey = append(nr.SortKey, v...) - // separate sort key values with ascii null character - nr.SortKey = append(nr.SortKey, '\000') - } - - seriesRows = append(seriesRows, &nr) - } - seriesRow = seriesCursor.Next() - } - - sort.Slice(seriesRows, func(i, j int) bool { - return bytes.Compare(seriesRows[i].SortKey, seriesRows[j].SortKey) == -1 - }) - - g.seriesRows = seriesRows - - seriesCursor.Close() - return len(seriesRows), nil -} - -type groupNoneCursor struct { - ctx context.Context - arrayCursors multiShardCursors - agg *datatypes.Aggregate - cur SeriesCursor - row SeriesRow - keys [][]byte - cursor cursors.Cursor - err error -} - -func (c *groupNoneCursor) Err() error { return nil } -func (c *groupNoneCursor) Tags() models.Tags { return c.row.Tags } -func (c *groupNoneCursor) Keys() [][]byte { return c.keys } -func (c *groupNoneCursor) PartitionKeyVals() [][]byte { return nil } -func (c *groupNoneCursor) Close() { c.cur.Close() } -func (c *groupNoneCursor) Stats() cursors.CursorStats { return c.row.Query.Stats() } - -func (c *groupNoneCursor) Aggregate() *datatypes.Aggregate { - return c.agg -} - -func (c *groupNoneCursor) Next() bool { - row := c.cur.Next() - if row == nil { - return false - } - - c.row = *row - - c.cursor, c.err = c.createCursor(c.row) - return c.err == nil -} - -func (c *groupNoneCursor) createCursor(seriesRow SeriesRow) (cur cursors.Cursor, err error) { - cur = c.arrayCursors.createCursor(c.row) - if c.agg != nil { - cur, err = newAggregateArrayCursor(c.ctx, c.agg, cur) - } - return cur, err -} - -func (c *groupNoneCursor) Cursor() cursors.Cursor { - return c.cursor -} - -type groupByCursor struct { - ctx context.Context - arrayCursors multiShardCursors - agg *datatypes.Aggregate - i int - seriesRows []*SeriesRow - keys [][]byte - vals [][]byte - cursor cursors.Cursor - err error -} - -func (c *groupByCursor) reset(seriesRows []*SeriesRow) { - c.i = 0 - c.seriesRows = seriesRows -} - -func (c *groupByCursor) Err() error { return nil } -func (c *groupByCursor) Keys() [][]byte { return c.keys } -func (c *groupByCursor) PartitionKeyVals() [][]byte { return c.vals } -func (c *groupByCursor) Tags() models.Tags { return c.seriesRows[c.i-1].Tags } -func (c *groupByCursor) Close() {} - -func (c *groupByCursor) Aggregate() *datatypes.Aggregate { - return c.agg -} - -func (c *groupByCursor) Next() bool { - if c.i < len(c.seriesRows) { - c.i++ - c.cursor, c.err = c.createCursor(*c.seriesRows[c.i-1]) - return c.err == nil - } - return false -} - -func (c *groupByCursor) createCursor(seriesRow SeriesRow) (cur cursors.Cursor, err error) { - cur = c.arrayCursors.createCursor(seriesRow) - if c.agg != nil { - cur, err = newAggregateArrayCursor(c.ctx, c.agg, cur) - } - return cur, err -} - -func (c *groupByCursor) Cursor() cursors.Cursor { - return c.cursor -} - -func (c *groupByCursor) Stats() cursors.CursorStats { - var stats cursors.CursorStats - for _, seriesRow := range c.seriesRows { - stats.Add(seriesRow.Query.Stats()) - } - return stats -} diff --git a/storage/reads/group_resultset_test.go b/storage/reads/group_resultset_test.go deleted file mode 100644 index 52b78f647c8..00000000000 --- a/storage/reads/group_resultset_test.go +++ /dev/null @@ -1,503 +0,0 @@ -package reads_test - -import ( - "context" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/data/gen" - "github.com/influxdata/influxdb/v2/storage/reads" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -func TestNewGroupResultSet_Sorting(t *testing.T) { - tests := []struct { - name string - cur reads.SeriesCursor - group datatypes.ReadGroupRequest_Group - keys []string - exp string - }{ - { - name: "group by tag1 in all series", - cur: &sliceSeriesCursor{ - rows: newSeriesRows( - "cpu,tag0=val00,tag1=val10", - "cpu,tag0=val00,tag1=val11", - "cpu,tag0=val00,tag1=val12", - "cpu,tag0=val01,tag1=val10", - "cpu,tag0=val01,tag1=val11", - "cpu,tag0=val01,tag1=val12", - )}, - group: datatypes.ReadGroupRequest_GroupBy, - keys: []string{"tag1"}, - exp: `group: - tag key : _m,tag0,tag1 - partition key: val10 - series: _m=cpu,tag0=val00,tag1=val10 - series: _m=cpu,tag0=val01,tag1=val10 -group: - tag key : _m,tag0,tag1 - partition key: val11 - series: _m=cpu,tag0=val00,tag1=val11 - series: _m=cpu,tag0=val01,tag1=val11 -group: - tag key : _m,tag0,tag1 - partition key: val12 - series: _m=cpu,tag0=val00,tag1=val12 - series: _m=cpu,tag0=val01,tag1=val12 -`, - }, - { - name: "group by tags key collision", - cur: &sliceSeriesCursor{ - rows: newSeriesRows( - "cpu,tag0=000,tag1=111", - "cpu,tag0=00,tag1=0111", - "cpu,tag0=0,tag1=00111", - "cpu,tag0=0001,tag1=11", - "cpu,tag0=00011,tag1=1", - )}, - group: datatypes.ReadGroupRequest_GroupBy, - keys: []string{"tag0", "tag1"}, - exp: `group: - tag key : _m,tag0,tag1 - partition key: 0,00111 - series: _m=cpu,tag0=0,tag1=00111 -group: - tag key : _m,tag0,tag1 - partition key: 00,0111 - series: _m=cpu,tag0=00,tag1=0111 -group: - tag key : _m,tag0,tag1 - partition key: 000,111 - series: _m=cpu,tag0=000,tag1=111 -group: - tag key : _m,tag0,tag1 - partition key: 0001,11 - series: _m=cpu,tag0=0001,tag1=11 -group: - tag key : _m,tag0,tag1 - partition key: 00011,1 - series: _m=cpu,tag0=00011,tag1=1 -`, - }, - { - name: "group by tags key sort collision", - cur: &sliceSeriesCursor{ - rows: newSeriesRows( - "cpu,tag0=a,tag1=b", - "cpu,tag0=a*,tag1=b", - "cpu,tag0=a*", - )}, - group: datatypes.ReadGroupRequest_GroupBy, - keys: []string{"tag0", "tag1"}, - exp: `group: - tag key : _m,tag0,tag1 - partition key: a,b - series: _m=cpu,tag0=a,tag1=b -group: - tag key : _m,tag0,tag1 - partition key: a*,b - series: _m=cpu,tag0=a*,tag1=b -group: - tag key : _m,tag0 - partition key: a*, - series: _m=cpu,tag0=a* -`, - }, - { - name: "group by tags missing tag", - cur: &sliceSeriesCursor{ - rows: newSeriesRows( - "cpu,tag0=a,tag1=b", - "cpu,tag1=b", - )}, - group: datatypes.ReadGroupRequest_GroupBy, - keys: []string{"tag0", "tag1"}, - exp: `group: - tag key : _m,tag0,tag1 - partition key: a,b - series: _m=cpu,tag0=a,tag1=b -group: - tag key : _m,tag1 - partition key: ,b - series: _m=cpu,tag1=b -`, - }, - { - name: "group by tag1 in partial series", - cur: &sliceSeriesCursor{ - rows: newSeriesRows( - "aaa,tag0=val00", - "aaa,tag0=val01", - "cpu,tag0=val00,tag1=val10", - "cpu,tag0=val00,tag1=val11", - "cpu,tag0=val00,tag1=val12", - "cpu,tag0=val01,tag1=val10", - "cpu,tag0=val01,tag1=val11", - "cpu,tag0=val01,tag1=val12", - )}, - group: datatypes.ReadGroupRequest_GroupBy, - keys: []string{"tag1"}, - exp: `group: - tag key : _m,tag0,tag1 - partition key: val10 - series: _m=cpu,tag0=val00,tag1=val10 - series: _m=cpu,tag0=val01,tag1=val10 -group: - tag key : _m,tag0,tag1 - partition key: val11 - series: _m=cpu,tag0=val00,tag1=val11 - series: _m=cpu,tag0=val01,tag1=val11 -group: - tag key : _m,tag0,tag1 - partition key: val12 - series: _m=cpu,tag0=val00,tag1=val12 - series: _m=cpu,tag0=val01,tag1=val12 -group: - tag key : _m,tag0 - partition key: - series: _m=aaa,tag0=val00 - series: _m=aaa,tag0=val01 -`, - }, - { - name: "group by tag2,tag1 with partial series", - cur: &sliceSeriesCursor{ - rows: newSeriesRows( - "aaa,tag0=val00", - "aaa,tag0=val01", - "cpu,tag0=val00,tag1=val10", - "cpu,tag0=val00,tag1=val11", - "cpu,tag0=val00,tag1=val12", - "mem,tag1=val10,tag2=val20", - "mem,tag1=val11,tag2=val20", - "mem,tag1=val11,tag2=val21", - )}, - group: datatypes.ReadGroupRequest_GroupBy, - keys: []string{"tag2", "tag1"}, - exp: `group: - tag key : _m,tag1,tag2 - partition key: val20,val10 - series: _m=mem,tag1=val10,tag2=val20 -group: - tag key : _m,tag1,tag2 - partition key: val20,val11 - series: _m=mem,tag1=val11,tag2=val20 -group: - tag key : _m,tag1,tag2 - partition key: val21,val11 - series: _m=mem,tag1=val11,tag2=val21 -group: - tag key : _m,tag0,tag1 - partition key: ,val10 - series: _m=cpu,tag0=val00,tag1=val10 -group: - tag key : _m,tag0,tag1 - partition key: ,val11 - series: _m=cpu,tag0=val00,tag1=val11 -group: - tag key : _m,tag0,tag1 - partition key: ,val12 - series: _m=cpu,tag0=val00,tag1=val12 -group: - tag key : _m,tag0 - partition key: , - series: _m=aaa,tag0=val00 - series: _m=aaa,tag0=val01 -`, - }, - { - name: "group by tag0,tag2 with partial series", - cur: &sliceSeriesCursor{ - rows: newSeriesRows( - "aaa,tag0=val00", - "aaa,tag0=val01", - "cpu,tag0=val00,tag1=val10", - "cpu,tag0=val00,tag1=val11", - "cpu,tag0=val00,tag1=val12", - "mem,tag1=val10,tag2=val20", - "mem,tag1=val11,tag2=val20", - "mem,tag1=val11,tag2=val21", - )}, - group: datatypes.ReadGroupRequest_GroupBy, - keys: []string{"tag0", "tag2"}, - exp: `group: - tag key : _m,tag0,tag1 - partition key: val00, - series: _m=aaa,tag0=val00 - series: _m=cpu,tag0=val00,tag1=val10 - series: _m=cpu,tag0=val00,tag1=val11 - series: _m=cpu,tag0=val00,tag1=val12 -group: - tag key : _m,tag0 - partition key: val01, - series: _m=aaa,tag0=val01 -group: - tag key : _m,tag1,tag2 - partition key: ,val20 - series: _m=mem,tag1=val10,tag2=val20 - series: _m=mem,tag1=val11,tag2=val20 -group: - tag key : _m,tag1,tag2 - partition key: ,val21 - series: _m=mem,tag1=val11,tag2=val21 -`, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - newCursor := func() (reads.SeriesCursor, error) { - return tt.cur, nil - } - - var hints datatypes.HintFlags - hints.SetHintSchemaAllTime() - rs := reads.NewGroupResultSet(context.Background(), &datatypes.ReadGroupRequest{ - Group: tt.group, - GroupKeys: tt.keys, - // TODO(jlapacik): - // Hints is not used except for the tests in this file. - // Eventually this field should be removed entirely. - Hints: uint32(hints), - }, newCursor) - - sb := new(strings.Builder) - GroupResultSetToString(sb, rs, SkipNilCursor()) - - if got := sb.String(); !cmp.Equal(got, tt.exp) { - t.Errorf("unexpected value; -got/+exp\n%s", cmp.Diff(strings.Split(got, "\n"), strings.Split(tt.exp, "\n"))) - } - }) - } -} - -func TestNewGroupResultSet_GroupNone_NoDataReturnsNil(t *testing.T) { - newCursor := func() (reads.SeriesCursor, error) { - return &sliceSeriesCursor{ - rows: newSeriesRows( - "aaa,tag0=val00", - "aaa,tag0=val01", - )}, nil - } - - rs := reads.NewGroupResultSet(context.Background(), &datatypes.ReadGroupRequest{Group: datatypes.ReadGroupRequest_GroupNone}, newCursor) - if rs != nil { - t.Errorf("expected nil cursor") - } -} - -func TestNewGroupResultSet_GroupBy_NoDataReturnsNil(t *testing.T) { - newCursor := func() (reads.SeriesCursor, error) { - return &sliceSeriesCursor{ - rows: newSeriesRows( - "aaa,tag0=val00", - "aaa,tag0=val01", - )}, nil - } - - rs := reads.NewGroupResultSet(context.Background(), &datatypes.ReadGroupRequest{Group: datatypes.ReadGroupRequest_GroupBy, GroupKeys: []string{"tag0"}}, newCursor) - if rs != nil { - t.Errorf("expected nil cursor") - } -} - -func TestNewGroupResultSet_SortOrder(t *testing.T) { - tests := []struct { - name string - keys []string - opts []reads.GroupOption - exp string - }{ - { - name: "nil hi", - keys: []string{"tag0", "tag2"}, - exp: `group: - tag key : _m,tag0,tag1 - partition key: val00, - series: _m=aaa,tag0=val00 - series: _m=cpu,tag0=val00,tag1=val10 - series: _m=cpu,tag0=val00,tag1=val11 - series: _m=cpu,tag0=val00,tag1=val12 -group: - tag key : _m,tag0 - partition key: val01, - series: _m=aaa,tag0=val01 -group: - tag key : _m,tag1,tag2 - partition key: ,val20 - series: _m=mem,tag1=val10,tag2=val20 - series: _m=mem,tag1=val11,tag2=val20 -group: - tag key : _m,tag1,tag2 - partition key: ,val21 - series: _m=mem,tag1=val11,tag2=val21 -`, - }, - { - name: "nil lo", - keys: []string{"tag0", "tag2"}, - opts: []reads.GroupOption{reads.GroupOptionNilSortLo()}, - exp: `group: - tag key : _m,tag1,tag2 - partition key: ,val20 - series: _m=mem,tag1=val10,tag2=val20 - series: _m=mem,tag1=val11,tag2=val20 -group: - tag key : _m,tag1,tag2 - partition key: ,val21 - series: _m=mem,tag1=val11,tag2=val21 -group: - tag key : _m,tag0,tag1 - partition key: val00, - series: _m=aaa,tag0=val00 - series: _m=cpu,tag0=val00,tag1=val10 - series: _m=cpu,tag0=val00,tag1=val11 - series: _m=cpu,tag0=val00,tag1=val12 -group: - tag key : _m,tag0 - partition key: val01, - series: _m=aaa,tag0=val01 -`, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - newCursor := func() (reads.SeriesCursor, error) { - return &sliceSeriesCursor{ - rows: newSeriesRows( - "aaa,tag0=val00", - "aaa,tag0=val01", - "cpu,tag0=val00,tag1=val10", - "cpu,tag0=val00,tag1=val11", - "cpu,tag0=val00,tag1=val12", - "mem,tag1=val10,tag2=val20", - "mem,tag1=val11,tag2=val20", - "mem,tag1=val11,tag2=val21", - )}, nil - } - - var hints datatypes.HintFlags - hints.SetHintSchemaAllTime() - rs := reads.NewGroupResultSet(context.Background(), &datatypes.ReadGroupRequest{ - Group: datatypes.ReadGroupRequest_GroupBy, - GroupKeys: tt.keys, - // TODO(jlapacik): - // Hints is not used except for the tests in this file. - // Eventually this field should be removed entirely. - Hints: uint32(hints), - }, newCursor, tt.opts...) - - sb := new(strings.Builder) - GroupResultSetToString(sb, rs, SkipNilCursor()) - - if got := sb.String(); !cmp.Equal(got, tt.exp) { - t.Errorf("unexpected value; -got/+exp\n%s", cmp.Diff(strings.Split(got, "\n"), strings.Split(tt.exp, "\n"))) - } - }) - } -} - -type sliceSeriesCursor struct { - rows []reads.SeriesRow - i int -} - -func newSeriesRows(keys ...string) []reads.SeriesRow { - rows := make([]reads.SeriesRow, len(keys)) - for i := range keys { - rows[i].Name, rows[i].SeriesTags = models.ParseKeyBytes([]byte(keys[i])) - rows[i].Tags = rows[i].SeriesTags.Clone() - rows[i].Tags.Set([]byte("_m"), rows[i].Name) - } - return rows -} - -func (s *sliceSeriesCursor) Close() {} -func (s *sliceSeriesCursor) Err() error { return nil } - -func (s *sliceSeriesCursor) Next() *reads.SeriesRow { - if s.i < len(s.rows) { - s.i++ - return &s.rows[s.i-1] - } - return nil -} - -func BenchmarkNewGroupResultSet_GroupBy(b *testing.B) { - card := []int{10, 10, 10} - vals := make([]gen.CountableSequence, len(card)) - for i := range card { - vals[i] = gen.NewCounterByteSequenceCount(card[i]) - } - - tags := gen.NewTagsValuesSequenceValues("tag", vals) - rows := make([]reads.SeriesRow, tags.Count()) - for i := range rows { - tags.Next() - t := tags.Value().Clone() - rows[i].SeriesTags = t - rows[i].Tags = t - rows[i].Name = []byte("m0") - } - - cur := &sliceSeriesCursor{rows: rows} - newCursor := func() (reads.SeriesCursor, error) { - cur.i = 0 - return cur, nil - } - - b.ResetTimer() - b.ReportAllocs() - var hints datatypes.HintFlags - hints.SetHintSchemaAllTime() - - for i := 0; i < b.N; i++ { - rs := reads.NewGroupResultSet(context.Background(), &datatypes.ReadGroupRequest{Group: datatypes.ReadGroupRequest_GroupBy, GroupKeys: []string{"tag2"}, Hints: uint32(hints)}, newCursor) - rs.Close() - } -} - -func TestNewGroupResultSet_TimeRange(t *testing.T) { - newCursor := newMockReadCursor( - "clicks click=1 1", - ) - for i := range newCursor.rows { - newCursor.rows[0].Query[i] = &mockCursorIterator{ - newCursorFn: func(req *cursors.CursorRequest) cursors.Cursor { - if want, got := int64(0), req.StartTime; want != got { - t.Errorf("unexpected start time -want/+got:\n\t- %d\n\t+ %d", want, got) - } - if want, got := int64(29), req.EndTime; want != got { - t.Errorf("unexpected end time -want/+got:\n\t- %d\n\t+ %d", want, got) - } - return &mockIntegerArrayCursor{} - }, - } - } - - ctx := context.Background() - req := datatypes.ReadGroupRequest{ - Range: &datatypes.TimestampRange{ - Start: 0, - End: 30, - }, - } - - resultSet := reads.NewGroupResultSet(ctx, &req, func() (reads.SeriesCursor, error) { - return &newCursor, nil - }) - groupByCursor := resultSet.Next() - if groupByCursor == nil { - t.Fatal("unexpected: groupByCursor was nil") - } - if groupByCursor.Next() { - t.Fatal("unexpected: groupByCursor.Next should not have advanced") - } -} diff --git a/storage/reads/helpers_test.go b/storage/reads/helpers_test.go deleted file mode 100644 index ff8698b8939..00000000000 --- a/storage/reads/helpers_test.go +++ /dev/null @@ -1 +0,0 @@ -package reads_test diff --git a/storage/reads/influxql_eval.go b/storage/reads/influxql_eval.go deleted file mode 100644 index 73679a4772a..00000000000 --- a/storage/reads/influxql_eval.go +++ /dev/null @@ -1,284 +0,0 @@ -package reads - -import ( - "math" - "regexp" - - "github.com/influxdata/influxql" -) - -// evalExpr evaluates expr against a map. -func evalExpr(expr influxql.Expr, m Valuer) interface{} { - if expr == nil { - return nil - } - - switch expr := expr.(type) { - case *influxql.BinaryExpr: - return evalBinaryExpr(expr, m) - case *influxql.BooleanLiteral: - return expr.Val - case *influxql.IntegerLiteral: - return expr.Val - case *influxql.UnsignedLiteral: - return expr.Val - case *influxql.NumberLiteral: - return expr.Val - case *influxql.ParenExpr: - return evalExpr(expr.Expr, m) - case *influxql.RegexLiteral: - return expr.Val - case *influxql.StringLiteral: - return expr.Val - case *influxql.VarRef: - v, _ := m.Value(expr.Val) - return v - default: - return nil - } -} - -func evalBinaryExpr(expr *influxql.BinaryExpr, m Valuer) interface{} { - lhs := evalExpr(expr.LHS, m) - rhs := evalExpr(expr.RHS, m) - if lhs == nil && rhs != nil { - // When the LHS is nil and the RHS is a boolean, implicitly cast the - // nil to false. - if _, ok := rhs.(bool); ok { - lhs = false - } - } else if lhs != nil && rhs == nil { - // Implicit cast of the RHS nil to false when the LHS is a boolean. - if _, ok := lhs.(bool); ok { - rhs = false - } - } - - // Evaluate if both sides are simple types. - switch lhs := lhs.(type) { - case bool: - rhs, ok := rhs.(bool) - switch expr.Op { - case influxql.AND: - return ok && (lhs && rhs) - case influxql.OR: - return ok && (lhs || rhs) - case influxql.BITWISE_AND: - return ok && (lhs && rhs) - case influxql.BITWISE_OR: - return ok && (lhs || rhs) - case influxql.BITWISE_XOR: - return ok && (lhs != rhs) - case influxql.EQ: - return ok && (lhs == rhs) - case influxql.NEQ: - return ok && (lhs != rhs) - } - case float64: - // Try the rhs as a float64 or int64 - rhsf, ok := rhs.(float64) - if !ok { - var rhsi int64 - if rhsi, ok = rhs.(int64); ok { - rhsf = float64(rhsi) - } - } - - rhs := rhsf - switch expr.Op { - case influxql.EQ: - return ok && (lhs == rhs) - case influxql.NEQ: - return ok && (lhs != rhs) - case influxql.LT: - return ok && (lhs < rhs) - case influxql.LTE: - return ok && (lhs <= rhs) - case influxql.GT: - return ok && (lhs > rhs) - case influxql.GTE: - return ok && (lhs >= rhs) - case influxql.ADD: - if !ok { - return nil - } - return lhs + rhs - case influxql.SUB: - if !ok { - return nil - } - return lhs - rhs - case influxql.MUL: - if !ok { - return nil - } - return lhs * rhs - case influxql.DIV: - if !ok { - return nil - } else if rhs == 0 { - return float64(0) - } - return lhs / rhs - case influxql.MOD: - if !ok { - return nil - } - return math.Mod(lhs, rhs) - } - case int64: - // Try as a float64 to see if a float cast is required. - rhsf, ok := rhs.(float64) - if ok { - lhs := float64(lhs) - rhs := rhsf - switch expr.Op { - case influxql.EQ: - return lhs == rhs - case influxql.NEQ: - return lhs != rhs - case influxql.LT: - return lhs < rhs - case influxql.LTE: - return lhs <= rhs - case influxql.GT: - return lhs > rhs - case influxql.GTE: - return lhs >= rhs - case influxql.ADD: - return lhs + rhs - case influxql.SUB: - return lhs - rhs - case influxql.MUL: - return lhs * rhs - case influxql.DIV: - if rhs == 0 { - return float64(0) - } - return lhs / rhs - case influxql.MOD: - return math.Mod(lhs, rhs) - } - } else { - rhs, ok := rhs.(int64) - switch expr.Op { - case influxql.EQ: - return ok && (lhs == rhs) - case influxql.NEQ: - return ok && (lhs != rhs) - case influxql.LT: - return ok && (lhs < rhs) - case influxql.LTE: - return ok && (lhs <= rhs) - case influxql.GT: - return ok && (lhs > rhs) - case influxql.GTE: - return ok && (lhs >= rhs) - case influxql.ADD: - if !ok { - return nil - } - return lhs + rhs - case influxql.SUB: - if !ok { - return nil - } - return lhs - rhs - case influxql.MUL: - if !ok { - return nil - } - return lhs * rhs - case influxql.DIV: - if !ok { - return nil - } else if rhs == 0 { - return float64(0) - } - return lhs / rhs - case influxql.MOD: - if !ok { - return nil - } else if rhs == 0 { - return int64(0) - } - return lhs % rhs - case influxql.BITWISE_AND: - if !ok { - return nil - } - return lhs & rhs - case influxql.BITWISE_OR: - if !ok { - return nil - } - return lhs | rhs - case influxql.BITWISE_XOR: - if !ok { - return nil - } - return lhs ^ rhs - } - } - case string: - switch expr.Op { - case influxql.EQ: - rhs, ok := rhs.(string) - if !ok { - return nil - } - return lhs == rhs - case influxql.NEQ: - rhs, ok := rhs.(string) - if !ok { - return nil - } - return lhs != rhs - case influxql.EQREGEX: - rhs, ok := rhs.(*regexp.Regexp) - if !ok { - return nil - } - return rhs.MatchString(lhs) - case influxql.NEQREGEX: - rhs, ok := rhs.(*regexp.Regexp) - if !ok { - return nil - } - return !rhs.MatchString(lhs) - } - case []byte: - switch expr.Op { - case influxql.EQ: - rhs, ok := rhs.(string) - if !ok { - return nil - } - return string(lhs) == rhs - case influxql.NEQ: - rhs, ok := rhs.(string) - if !ok { - return nil - } - return string(lhs) != rhs - case influxql.EQREGEX: - rhs, ok := rhs.(*regexp.Regexp) - if !ok { - return nil - } - return rhs.Match(lhs) - case influxql.NEQREGEX: - rhs, ok := rhs.(*regexp.Regexp) - if !ok { - return nil - } - return !rhs.Match(lhs) - } - } - return nil -} - -func EvalExprBool(expr influxql.Expr, m Valuer) bool { - v, _ := evalExpr(expr, m).(bool) - return v -} diff --git a/storage/reads/influxql_expr.go b/storage/reads/influxql_expr.go deleted file mode 100644 index 941d0e141ca..00000000000 --- a/storage/reads/influxql_expr.go +++ /dev/null @@ -1,25 +0,0 @@ -package reads - -import ( - "github.com/influxdata/influxql" -) - -// TODO(sgc): build expression evaluator that does not use influxql AST - -type expression interface { - EvalBool(v Valuer) bool -} - -type astExpr struct { - expr influxql.Expr -} - -func (e *astExpr) EvalBool(v Valuer) bool { - return EvalExprBool(e.expr, v) -} - -// Valuer is the interface that wraps the Value() method. -type Valuer interface { - // Value returns the value and existence flag for a given key. - Value(key string) (interface{}, bool) -} diff --git a/storage/reads/influxql_predicate.go b/storage/reads/influxql_predicate.go deleted file mode 100644 index 1b56ab85c78..00000000000 --- a/storage/reads/influxql_predicate.go +++ /dev/null @@ -1,286 +0,0 @@ -package reads - -import ( - "regexp" - - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/influxdata/influxql" - "github.com/pkg/errors" -) - -const ( - fieldRef = "$" -) - -// NodeToExpr transforms a predicate node to an influxql.Expr. -func NodeToExpr(node *datatypes.Node, remap map[string]string) (influxql.Expr, error) { - v := &nodeToExprVisitor{remap: remap} - WalkNode(v, node) - if err := v.Err(); err != nil { - return nil, err - } - - if len(v.exprs) > 1 { - return nil, errors.New("invalid expression") - } - - if len(v.exprs) == 0 { - return nil, nil - } - - // TODO(edd): It would be preferable if RewriteRegexConditions was a - // package level function in influxql. - stmt := &influxql.SelectStatement{ - Condition: v.exprs[0], - } - stmt.RewriteRegexConditions() - return stmt.Condition, nil -} - -type nodeToExprVisitor struct { - remap map[string]string - exprs []influxql.Expr - err error -} - -func (v *nodeToExprVisitor) Visit(n *datatypes.Node) NodeVisitor { - if v.err != nil { - return nil - } - - switch n.NodeType { - case datatypes.Node_TypeLogicalExpression: - if len(n.Children) > 1 { - op := influxql.AND - if n.GetLogical() == datatypes.Node_LogicalOr { - op = influxql.OR - } - - WalkNode(v, n.Children[0]) - if v.err != nil { - return nil - } - - for i := 1; i < len(n.Children); i++ { - WalkNode(v, n.Children[i]) - if v.err != nil { - return nil - } - - if len(v.exprs) >= 2 { - lhs, rhs := v.pop2() - v.exprs = append(v.exprs, &influxql.BinaryExpr{LHS: lhs, Op: op, RHS: rhs}) - } - } - - return nil - } - - case datatypes.Node_TypeParenExpression: - if len(n.Children) != 1 { - v.err = errors.New("parenExpression expects one child") - return nil - } - - WalkNode(v, n.Children[0]) - if v.err != nil { - return nil - } - - if len(v.exprs) > 0 { - v.exprs = append(v.exprs, &influxql.ParenExpr{Expr: v.pop()}) - } - - return nil - - case datatypes.Node_TypeComparisonExpression: - WalkChildren(v, n) - - if len(v.exprs) < 2 { - v.err = errors.New("comparisonExpression expects two children") - return nil - } - - lhs, rhs := v.pop2() - - be := &influxql.BinaryExpr{LHS: lhs, RHS: rhs} - switch n.GetComparison() { - case datatypes.Node_ComparisonEqual: - be.Op = influxql.EQ - case datatypes.Node_ComparisonNotEqual: - be.Op = influxql.NEQ - case datatypes.Node_ComparisonStartsWith: - // TODO(sgc): rewrite to anchored RE, as index does not support startsWith yet - v.err = errors.New("startsWith not implemented") - return nil - case datatypes.Node_ComparisonRegex: - be.Op = influxql.EQREGEX - case datatypes.Node_ComparisonNotRegex: - be.Op = influxql.NEQREGEX - case datatypes.Node_ComparisonLess: - be.Op = influxql.LT - case datatypes.Node_ComparisonLessEqual: - be.Op = influxql.LTE - case datatypes.Node_ComparisonGreater: - be.Op = influxql.GT - case datatypes.Node_ComparisonGreaterEqual: - be.Op = influxql.GTE - default: - v.err = errors.New("invalid comparison operator") - return nil - } - - v.exprs = append(v.exprs, be) - - return nil - - case datatypes.Node_TypeTagRef: - ref := n.GetTagRefValue() - if v.remap != nil { - if nk, ok := v.remap[ref]; ok { - ref = nk - } - } - - v.exprs = append(v.exprs, &influxql.VarRef{Val: ref, Type: influxql.Tag}) - return nil - - case datatypes.Node_TypeFieldRef: - v.exprs = append(v.exprs, &influxql.VarRef{Val: fieldRef}) - return nil - - case datatypes.Node_TypeLiteral: - switch val := n.Value.(type) { - case *datatypes.Node_StringValue: - v.exprs = append(v.exprs, &influxql.StringLiteral{Val: val.StringValue}) - - case *datatypes.Node_RegexValue: - // TODO(sgc): consider hashing the RegexValue and cache compiled version - re, err := regexp.Compile(val.RegexValue) - if err != nil { - v.err = err - } - v.exprs = append(v.exprs, &influxql.RegexLiteral{Val: re}) - return nil - - case *datatypes.Node_IntegerValue: - v.exprs = append(v.exprs, &influxql.IntegerLiteral{Val: val.IntegerValue}) - - case *datatypes.Node_UnsignedValue: - v.exprs = append(v.exprs, &influxql.UnsignedLiteral{Val: val.UnsignedValue}) - - case *datatypes.Node_FloatValue: - v.exprs = append(v.exprs, &influxql.NumberLiteral{Val: val.FloatValue}) - - case *datatypes.Node_BooleanValue: - v.exprs = append(v.exprs, &influxql.BooleanLiteral{Val: val.BooleanValue}) - - default: - v.err = errors.New("unexpected literal type") - return nil - } - - return nil - - default: - return v - } - return nil -} - -func (v *nodeToExprVisitor) Err() error { - return v.err -} - -func (v *nodeToExprVisitor) pop() influxql.Expr { - if len(v.exprs) == 0 { - panic("stack empty") - } - - var top influxql.Expr - top, v.exprs = v.exprs[len(v.exprs)-1], v.exprs[:len(v.exprs)-1] - return top -} - -func (v *nodeToExprVisitor) pop2() (influxql.Expr, influxql.Expr) { - if len(v.exprs) < 2 { - panic("stack empty") - } - - rhs := v.exprs[len(v.exprs)-1] - lhs := v.exprs[len(v.exprs)-2] - v.exprs = v.exprs[:len(v.exprs)-2] - return lhs, rhs -} - -func IsTrueBooleanLiteral(expr influxql.Expr) bool { - b, ok := expr.(*influxql.BooleanLiteral) - if ok { - return b.Val - } - return false -} - -func IsFalseBooleanLiteral(expr influxql.Expr) bool { - b, ok := expr.(*influxql.BooleanLiteral) - if ok { - return !b.Val - } - return false -} - -func RewriteExprRemoveFieldValue(expr influxql.Expr) influxql.Expr { - return influxql.RewriteExpr(expr, func(expr influxql.Expr) influxql.Expr { - if be, ok := expr.(*influxql.BinaryExpr); ok { - if ref, ok := be.LHS.(*influxql.VarRef); ok { - if ref.Val == fieldRef { - return &influxql.BooleanLiteral{Val: true} - } - } - } - - return expr - }) -} - -type hasRefs struct { - refs []string - found []bool -} - -func (v *hasRefs) allFound() bool { - for _, val := range v.found { - if !val { - return false - } - } - return true -} - -func (v *hasRefs) Visit(node influxql.Node) influxql.Visitor { - if v.allFound() { - return nil - } - - if n, ok := node.(*influxql.VarRef); ok { - for i, r := range v.refs { - if !v.found[i] && r == n.Val { - v.found[i] = true - if v.allFound() { - return nil - } - } - } - } - return v -} - -func ExprHasKey(expr influxql.Expr, key string) bool { - refs := hasRefs{refs: []string{key}, found: make([]bool, 1)} - influxql.Walk(&refs, expr) - return refs.found[0] -} - -func HasFieldValueKey(expr influxql.Expr) bool { - return ExprHasKey(expr, fieldRef) -} diff --git a/storage/reads/influxql_predicate_test.go b/storage/reads/influxql_predicate_test.go deleted file mode 100644 index 078b1062579..00000000000 --- a/storage/reads/influxql_predicate_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package reads_test - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/storage/reads" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" -) - -func TestHasFieldValueKey(t *testing.T) { - predicates := []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{ - Comparison: datatypes.Node_ComparisonLess, - }, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeFieldRef, - Value: &datatypes.Node_FieldRefValue{ - FieldRefValue: "_value", - }, - }, - { - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_IntegerValue{ - IntegerValue: 3000, - }, - }, - }, - }, - { - NodeType: datatypes.Node_TypeLogicalExpression, - Value: &datatypes.Node_Logical_{ - Logical: datatypes.Node_LogicalAnd, - }, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{ - Comparison: datatypes.Node_ComparisonEqual, - }, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeTagRef, - Value: &datatypes.Node_TagRefValue{ - TagRefValue: "_measurement", - }, - }, - { - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_StringValue{ - StringValue: "cpu", - }, - }, - }, - }, - { - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{ - Comparison: datatypes.Node_ComparisonLess, - }, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeFieldRef, - Value: &datatypes.Node_FieldRefValue{ - FieldRefValue: "_value", - }, - }, - { - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_IntegerValue{ - IntegerValue: 3000, - }, - }, - }, - }, - }, - }, - } - for _, predicate := range predicates { - t.Run("", func(t *testing.T) { - expr, err := reads.NodeToExpr(predicate, nil) - if err != nil { - t.Fatalf("unexpected error converting predicate to InfluxQL expression: %v", err) - } - if !reads.HasFieldValueKey(expr) { - t.Fatalf("did not find a field reference in %v", expr) - } - }) - } -} diff --git a/storage/reads/keymerger.go b/storage/reads/keymerger.go deleted file mode 100644 index da47ee2c1b1..00000000000 --- a/storage/reads/keymerger.go +++ /dev/null @@ -1,109 +0,0 @@ -package reads - -import ( - "bytes" - "strings" - - "github.com/influxdata/influxdb/v2/models" -) - -// tagsKeyMerger is responsible for determining a merged set of tag keys -type KeyMerger struct { - i int - tmp [][]byte - keys [2][][]byte -} - -func (km *KeyMerger) Clear() { - km.i = 0 - km.keys[0] = km.keys[0][:0] - if km.tmp != nil { - tmp := km.tmp[:cap(km.tmp)] - for i := range tmp { - tmp[i] = nil - } - } -} - -func (km *KeyMerger) Get() [][]byte { return km.keys[km.i&1] } - -func (km *KeyMerger) String() string { - var s []string - for _, k := range km.Get() { - s = append(s, string(k)) - } - return strings.Join(s, ",") -} - -func (km *KeyMerger) MergeTagKeys(tags models.Tags) { - if cap(km.tmp) < len(tags) { - km.tmp = make([][]byte, len(tags)) - } else { - km.tmp = km.tmp[:len(tags)] - } - - for i := range tags { - km.tmp[i] = tags[i].Key - } - - km.MergeKeys(km.tmp) -} - -func (km *KeyMerger) MergeKeys(in [][]byte) { - keys := km.keys[km.i&1] - i, j := 0, 0 - for i < len(keys) && j < len(in) && bytes.Equal(keys[i], in[j]) { - i++ - j++ - } - - if j == len(in) { - // no new tags - return - } - - km.i = (km.i + 1) & 1 - l := len(keys) + len(in) - if cap(km.keys[km.i]) < l { - km.keys[km.i] = make([][]byte, l) - } else { - km.keys[km.i] = km.keys[km.i][:l] - } - - keya := km.keys[km.i] - - // back up the pointers - if i > 0 { - i-- - j-- - } - - k := i - copy(keya[:k], keys[:k]) - - for i < len(keys) && j < len(in) { - cmp := bytes.Compare(keys[i], in[j]) - if cmp < 0 { - keya[k] = keys[i] - i++ - } else if cmp > 0 { - keya[k] = in[j] - j++ - } else { - keya[k] = keys[i] - i++ - j++ - } - k++ - } - - if i < len(keys) { - k += copy(keya[k:], keys[i:]) - } - - if j < len(in) { - k += copy(keya[k:], in[j:]) - } - - km.keys[km.i] = keya[:k] -} diff --git a/storage/reads/keymerger_test.go b/storage/reads/keymerger_test.go deleted file mode 100644 index 07034f31ecd..00000000000 --- a/storage/reads/keymerger_test.go +++ /dev/null @@ -1,204 +0,0 @@ -package reads - -import ( - "bytes" - "math/rand" - "strconv" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/models" -) - -func TestKeyMerger_MergeTagKeys(t *testing.T) { - tests := []struct { - name string - tags []models.Tags - exp string - }{ - { - name: "mixed", - tags: []models.Tags{ - models.ParseTags([]byte("foo,tag0=v0,tag1=v0,tag2=v0")), - models.ParseTags([]byte("foo,tag0=v0,tag1=v0,tag2=v1")), - models.ParseTags([]byte("foo,tag0=v0")), - models.ParseTags([]byte("foo,tag0=v0,tag3=v0")), - }, - exp: "tag0,tag1,tag2,tag3", - }, - { - name: "mixed 2", - tags: []models.Tags{ - models.ParseTags([]byte("foo,tag0=v0")), - models.ParseTags([]byte("foo,tag0=v0,tag3=v0")), - models.ParseTags([]byte("foo,tag0=v0,tag1=v0,tag2=v0")), - models.ParseTags([]byte("foo,tag0=v0,tag1=v0,tag2=v1")), - }, - exp: "tag0,tag1,tag2,tag3", - }, - { - name: "all different", - tags: []models.Tags{ - models.ParseTags([]byte("foo,tag0=v0")), - models.ParseTags([]byte("foo,tag1=v0")), - models.ParseTags([]byte("foo,tag2=v1")), - models.ParseTags([]byte("foo,tag3=v0")), - }, - exp: "tag0,tag1,tag2,tag3", - }, - { - name: "new tags,verify clear", - tags: []models.Tags{ - models.ParseTags([]byte("foo,tag9=v0")), - models.ParseTags([]byte("foo,tag8=v0")), - }, - exp: "tag8,tag9", - }, - } - - var km KeyMerger - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - km.Clear() - for _, tags := range tt.tags { - km.MergeTagKeys(tags) - } - - if got := km.String(); !cmp.Equal(got, tt.exp) { - t.Errorf("unexpected keys -got/+exp\n%s", cmp.Diff(got, tt.exp)) - } - }) - } -} - -var commaB = []byte(",") - -func TestKeyMerger_MergeKeys(t *testing.T) { - - tests := []struct { - name string - keys [][][]byte - exp string - }{ - { - name: "mixed", - keys: [][][]byte{ - bytes.Split([]byte("tag0,tag1,tag2"), commaB), - bytes.Split([]byte("tag0,tag1,tag2"), commaB), - bytes.Split([]byte("tag0"), commaB), - bytes.Split([]byte("tag0,tag3"), commaB), - }, - exp: "tag0,tag1,tag2,tag3", - }, - { - name: "mixed 2", - keys: [][][]byte{ - bytes.Split([]byte("tag0"), commaB), - bytes.Split([]byte("tag0,tag3"), commaB), - bytes.Split([]byte("tag0,tag1,tag2"), commaB), - bytes.Split([]byte("tag0,tag1,tag2"), commaB), - }, - exp: "tag0,tag1,tag2,tag3", - }, - { - name: "all different", - keys: [][][]byte{ - bytes.Split([]byte("tag0"), commaB), - bytes.Split([]byte("tag3"), commaB), - bytes.Split([]byte("tag1"), commaB), - bytes.Split([]byte("tag2"), commaB), - }, - exp: "tag0,tag1,tag2,tag3", - }, - { - name: "new tags,verify clear", - keys: [][][]byte{ - bytes.Split([]byte("tag9"), commaB), - bytes.Split([]byte("tag8"), commaB), - }, - exp: "tag8,tag9", - }, - } - - var km KeyMerger - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - km.Clear() - for _, keys := range tt.keys { - km.MergeKeys(keys) - } - - if got := km.String(); !cmp.Equal(got, tt.exp) { - t.Errorf("unexpected keys -got/+exp\n%s", cmp.Diff(got, tt.exp)) - } - }) - } -} - -func BenchmarkKeyMerger_MergeKeys(b *testing.B) { - keys := [][][]byte{ - bytes.Split([]byte("tag00,tag01,tag02"), commaB), - bytes.Split([]byte("tag00,tag01,tag02"), commaB), - bytes.Split([]byte("tag00,tag01,tag05,tag06,tag10,tag11,tag12,tag13,tag14,tag15"), commaB), - bytes.Split([]byte("tag00"), commaB), - bytes.Split([]byte("tag00,tag03"), commaB), - bytes.Split([]byte("tag01,tag03,tag13,tag14,tag15"), commaB), - bytes.Split([]byte("tag04,tag05"), commaB), - } - - seededRand := rand.New(rand.NewSource(20040409)) - - tests := []int{ - 10, - 1000, - 1000000, - } - - for _, n := range tests { - b.Run(strconv.Itoa(n), func(b *testing.B) { - b.ResetTimer() - - var km KeyMerger - for i := 0; i < b.N; i++ { - for j := 0; j < n; j++ { - km.MergeKeys(keys[seededRand.Int()%len(keys)]) - } - km.Clear() - } - }) - } -} - -func BenchmarkKeyMerger_MergeTagKeys(b *testing.B) { - tags := []models.Tags{ - models.ParseTags([]byte("foo,tag00=v0,tag01=v0,tag02=v0")), - models.ParseTags([]byte("foo,tag00=v0,tag01=v0,tag02=v0")), - models.ParseTags([]byte("foo,tag00=v0,tag01=v0,tag05=v0,tag06=v0,tag10=v0,tag11=v0,tag12=v0,tag13=v0,tag14=v0,tag15=v0")), - models.ParseTags([]byte("foo,tag00=v0")), - models.ParseTags([]byte("foo,tag00=v0,tag03=v0")), - models.ParseTags([]byte("foo,tag01=v0,tag03=v0,tag13=v0,tag14=v0,tag15=v0")), - models.ParseTags([]byte("foo,tag04=v0,tag05=v0")), - } - - seededRand := rand.New(rand.NewSource(20040409)) - - tests := []int{ - 10, - 1000, - 1000000, - } - - for _, n := range tests { - b.Run(strconv.Itoa(n), func(b *testing.B) { - b.ResetTimer() - - var km KeyMerger - for i := 0; i < b.N; i++ { - for j := 0; j < n; j++ { - km.MergeTagKeys(tags[seededRand.Int()%len(tags)]) - } - km.Clear() - } - }) - } -} diff --git a/storage/reads/modulo.go b/storage/reads/modulo.go deleted file mode 100644 index 7bbdd6c0a2c..00000000000 --- a/storage/reads/modulo.go +++ /dev/null @@ -1,43 +0,0 @@ -package reads - -func Modulo(dividend, modulus int64) int64 { - r := dividend % modulus - if r < 0 { - r += modulus - } - return r -} - -// WindowStart calculates the start time of a window given a timestamp, -// the window period (every), and the offset starting from the epoch. -// -// Note that the normalized offset value can fall on either side of the -// normalized timestamp. If it lies to the left we know it represents -// the start time. Otherwise it represents the stop time, in which case -// we decrement by the window period to get the start time. -func WindowStart(t, every, offset int64) int64 { - mod := Modulo(t, every) - off := Modulo(offset, every) - beg := t - mod + off - if mod < off { - beg -= every - } - return beg -} - -// WindowStop calculates the stop time of a window given a timestamp, -// the window period (every), and the offset starting from the epoch. -// -// Note that the normalized offset value can fall on either side of the -// normalized timestamp. If it lies to the right we know it represents -// the stop time. Otherwise it represents the start time, in which case -// we increment by the window period to get the stop time. -func WindowStop(t, every, offset int64) int64 { - mod := Modulo(t, every) - off := Modulo(offset, every) - end := t - mod + off - if mod >= off { - end += every - } - return end -} diff --git a/storage/reads/predicate.go b/storage/reads/predicate.go deleted file mode 100644 index c33b8ec09e2..00000000000 --- a/storage/reads/predicate.go +++ /dev/null @@ -1,142 +0,0 @@ -package reads - -import ( - "bytes" - "strconv" - - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" -) - -// NodeVisitor can be called by Walk to traverse the Node hierarchy. -// The Visit() function is called once per node. -type NodeVisitor interface { - Visit(*datatypes.Node) NodeVisitor -} - -func WalkChildren(v NodeVisitor, node *datatypes.Node) { - for _, n := range node.Children { - WalkNode(v, n) - } -} - -func WalkNode(v NodeVisitor, node *datatypes.Node) { - if v = v.Visit(node); v == nil { - return - } - - WalkChildren(v, node) -} - -func PredicateToExprString(p *datatypes.Predicate) string { - if p == nil { - return "[none]" - } - - var v predicateExpressionPrinter - WalkNode(&v, p.Root) - return v.Buffer.String() -} - -type predicateExpressionPrinter struct { - bytes.Buffer -} - -func (v *predicateExpressionPrinter) Visit(n *datatypes.Node) NodeVisitor { - switch n.NodeType { - case datatypes.Node_TypeLogicalExpression: - if len(n.Children) > 0 { - var op string - if n.GetLogical() == datatypes.Node_LogicalAnd { - op = " AND " - } else { - op = " OR " - } - WalkNode(v, n.Children[0]) - for _, e := range n.Children[1:] { - v.Buffer.WriteString(op) - WalkNode(v, e) - } - } - - return nil - - case datatypes.Node_TypeParenExpression: - if len(n.Children) == 1 { - v.Buffer.WriteString("( ") - WalkNode(v, n.Children[0]) - v.Buffer.WriteString(" )") - } - - return nil - - case datatypes.Node_TypeComparisonExpression: - WalkNode(v, n.Children[0]) - v.Buffer.WriteByte(' ') - switch n.GetComparison() { - case datatypes.Node_ComparisonEqual: - v.Buffer.WriteByte('=') - case datatypes.Node_ComparisonNotEqual: - v.Buffer.WriteString("!=") - case datatypes.Node_ComparisonStartsWith: - v.Buffer.WriteString("startsWith") - case datatypes.Node_ComparisonRegex: - v.Buffer.WriteString("=~") - case datatypes.Node_ComparisonNotRegex: - v.Buffer.WriteString("!~") - case datatypes.Node_ComparisonLess: - v.Buffer.WriteByte('<') - case datatypes.Node_ComparisonLessEqual: - v.Buffer.WriteString("<=") - case datatypes.Node_ComparisonGreater: - v.Buffer.WriteByte('>') - case datatypes.Node_ComparisonGreaterEqual: - v.Buffer.WriteString(">=") - } - - v.Buffer.WriteByte(' ') - WalkNode(v, n.Children[1]) - return nil - - case datatypes.Node_TypeTagRef: - v.Buffer.WriteByte('\'') - v.Buffer.WriteString(n.GetTagRefValue()) - v.Buffer.WriteByte('\'') - return nil - - case datatypes.Node_TypeFieldRef: - v.Buffer.WriteByte('$') - return nil - - case datatypes.Node_TypeLiteral: - switch val := n.Value.(type) { - case *datatypes.Node_StringValue: - v.Buffer.WriteString(strconv.Quote(val.StringValue)) - - case *datatypes.Node_RegexValue: - v.Buffer.WriteByte('/') - v.Buffer.WriteString(val.RegexValue) - v.Buffer.WriteByte('/') - - case *datatypes.Node_IntegerValue: - v.Buffer.WriteString(strconv.FormatInt(val.IntegerValue, 10)) - - case *datatypes.Node_UnsignedValue: - v.Buffer.WriteString(strconv.FormatUint(val.UnsignedValue, 10)) - - case *datatypes.Node_FloatValue: - v.Buffer.WriteString(strconv.FormatFloat(val.FloatValue, 'f', 10, 64)) - - case *datatypes.Node_BooleanValue: - if val.BooleanValue { - v.Buffer.WriteString("true") - } else { - v.Buffer.WriteString("false") - } - } - - return nil - - default: - return v - } -} diff --git a/storage/reads/predicate_test.go b/storage/reads/predicate_test.go deleted file mode 100644 index 9bd39569beb..00000000000 --- a/storage/reads/predicate_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package reads_test - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/storage/reads" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" -) - -func TestPredicateToExprString(t *testing.T) { - cases := []struct { - n string - r *datatypes.Predicate - e string - }{ - { - n: "returns [none] for nil", - r: nil, - e: "[none]", - }, - { - n: "logical AND", - r: &datatypes.Predicate{ - Root: &datatypes.Node{ - NodeType: datatypes.Node_TypeLogicalExpression, - Value: &datatypes.Node_Logical_{Logical: datatypes.Node_LogicalAnd}, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: datatypes.Node_ComparisonEqual}, - Children: []*datatypes.Node{ - {NodeType: datatypes.Node_TypeTagRef, Value: &datatypes.Node_TagRefValue{TagRefValue: "host"}}, - {NodeType: datatypes.Node_TypeLiteral, Value: &datatypes.Node_StringValue{StringValue: "host1"}}, - }, - }, - { - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: datatypes.Node_ComparisonRegex}, - Children: []*datatypes.Node{ - {NodeType: datatypes.Node_TypeTagRef, Value: &datatypes.Node_TagRefValue{TagRefValue: "region"}}, - {NodeType: datatypes.Node_TypeLiteral, Value: &datatypes.Node_RegexValue{RegexValue: "^us-west"}}, - }, - }, - }, - }, - }, - e: `'host' = "host1" AND 'region' =~ /^us-west/`, - }, - } - - for _, tc := range cases { - t.Run(tc.n, func(t *testing.T) { - if got, wanted := reads.PredicateToExprString(tc.r), tc.e; got != wanted { - t.Fatal("got:", got, "wanted:", wanted) - } - }) - } -} diff --git a/storage/reads/resultset.go b/storage/reads/resultset.go deleted file mode 100644 index 775d56417e2..00000000000 --- a/storage/reads/resultset.go +++ /dev/null @@ -1,70 +0,0 @@ -package reads - -import ( - "context" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -type multiShardCursors interface { - createCursor(row SeriesRow) cursors.Cursor -} - -type resultSet struct { - ctx context.Context - seriesCursor SeriesCursor - seriesRow SeriesRow - arrayCursors multiShardCursors -} - -// TODO(jsternberg): The range is [start, end) for this function which is consistent -// with the documented interface for datatypes.ReadFilterRequest. This function should -// be refactored to take in a datatypes.ReadFilterRequest similar to the other -// ResultSet functions. -func NewFilteredResultSet(ctx context.Context, start, end int64, seriesCursor SeriesCursor) ResultSet { - return &resultSet{ - ctx: ctx, - seriesCursor: seriesCursor, - arrayCursors: newMultiShardArrayCursors(ctx, start, end, true), - } -} - -func (r *resultSet) Err() error { return nil } - -// Close closes the result set. Close is idempotent. -func (r *resultSet) Close() { - if r == nil { - return // Nothing to do. - } - r.seriesRow.Query = nil - r.seriesCursor.Close() -} - -// Next returns true if there are more results available. -func (r *resultSet) Next() bool { - if r == nil { - return false - } - - seriesRow := r.seriesCursor.Next() - if seriesRow == nil { - return false - } - - r.seriesRow = *seriesRow - - return true -} - -func (r *resultSet) Cursor() cursors.Cursor { - return r.arrayCursors.createCursor(r.seriesRow) -} - -func (r *resultSet) Tags() models.Tags { - return r.seriesRow.Tags -} - -// Stats returns the stats for the underlying cursors. -// Available after resultset has been scanned. -func (r *resultSet) Stats() cursors.CursorStats { return r.seriesRow.Query.Stats() } diff --git a/storage/reads/resultset_lineprotocol.go b/storage/reads/resultset_lineprotocol.go deleted file mode 100644 index 3de073b522c..00000000000 --- a/storage/reads/resultset_lineprotocol.go +++ /dev/null @@ -1,129 +0,0 @@ -package reads - -import ( - "errors" - "io" - "strconv" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -// ResultSetToLineProtocol transforms rs to line protocol and writes the -// output to wr. -func ResultSetToLineProtocol(wr io.Writer, rs ResultSet) (err error) { - defer rs.Close() - - line := make([]byte, 0, 4096) - for rs.Next() { - tags := rs.Tags() - name := tags.Get(models.MeasurementTagKeyBytes) - field := tags.Get(models.FieldKeyTagKeyBytes) - if len(name) == 0 || len(field) == 0 { - return errors.New("missing measurement / field") - } - - line = append(line[:0], name...) - if tags.Len() > 2 { - tags = tags[1 : len(tags)-1] // take first and last elements which are measurement and field keys - line = tags.AppendHashKey(line) - } - - line = append(line, ' ') - line = append(line, field...) - line = append(line, '=') - err = cursorToLineProtocol(wr, line, rs.Cursor()) - if err != nil { - return err - } - } - - return rs.Err() -} - -func cursorToLineProtocol(wr io.Writer, line []byte, cur cursors.Cursor) error { - var newLine = []byte{'\n'} - - switch ccur := cur.(type) { - case cursors.IntegerArrayCursor: - for { - a := ccur.Next() - if a.Len() > 0 { - for i := range a.Timestamps { - buf := strconv.AppendInt(line, a.Values[i], 10) - buf = append(buf, 'i', ' ') - buf = strconv.AppendInt(buf, a.Timestamps[i], 10) - wr.Write(buf) - wr.Write(newLine) - } - } else { - break - } - } - case cursors.FloatArrayCursor: - for { - a := ccur.Next() - if a.Len() > 0 { - for i := range a.Timestamps { - buf := strconv.AppendFloat(line, a.Values[i], 'f', -1, 64) - buf = append(buf, ' ') - buf = strconv.AppendInt(buf, a.Timestamps[i], 10) - wr.Write(buf) - wr.Write(newLine) - } - } else { - break - } - } - case cursors.UnsignedArrayCursor: - for { - a := ccur.Next() - if a.Len() > 0 { - for i := range a.Timestamps { - buf := strconv.AppendUint(line, a.Values[i], 10) - buf = append(buf, 'u', ' ') - buf = strconv.AppendInt(buf, a.Timestamps[i], 10) - wr.Write(buf) - wr.Write(newLine) - } - } else { - break - } - } - case cursors.BooleanArrayCursor: - for { - a := ccur.Next() - if a.Len() > 0 { - for i := range a.Timestamps { - buf := strconv.AppendBool(line, a.Values[i]) - buf = append(buf, ' ') - buf = strconv.AppendInt(buf, a.Timestamps[i], 10) - wr.Write(buf) - wr.Write(newLine) - } - } else { - break - } - } - case cursors.StringArrayCursor: - for { - a := ccur.Next() - if a.Len() > 0 { - for i := range a.Timestamps { - buf := strconv.AppendQuote(line, a.Values[i]) - buf = append(buf, ' ') - buf = strconv.AppendInt(buf, a.Timestamps[i], 10) - wr.Write(buf) - wr.Write(newLine) - } - } else { - break - } - } - default: - panic("unreachable") - } - - cur.Close() - return cur.Err() -} diff --git a/storage/reads/resultset_test.go b/storage/reads/resultset_test.go deleted file mode 100644 index eacd0161532..00000000000 --- a/storage/reads/resultset_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package reads_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2/storage/reads" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -func TestNewFilteredResultSet_TimeRange(t *testing.T) { - newCursor := newMockReadCursor( - "clicks click=1 1", - ) - for i := range newCursor.rows { - newCursor.rows[0].Query[i] = &mockCursorIterator{ - newCursorFn: func(req *cursors.CursorRequest) cursors.Cursor { - if want, got := int64(0), req.StartTime; want != got { - t.Errorf("unexpected start time -want/+got:\n\t- %d\n\t+ %d", want, got) - } - if want, got := int64(29), req.EndTime; want != got { - t.Errorf("unexpected end time -want/+got:\n\t- %d\n\t+ %d", want, got) - } - return &mockIntegerArrayCursor{} - }, - } - } - - ctx := context.Background() - req := datatypes.ReadFilterRequest{ - Range: &datatypes.TimestampRange{ - Start: 0, - End: 30, - }, - } - - resultSet := reads.NewFilteredResultSet(ctx, req.Range.GetStart(), req.Range.GetEnd(), &newCursor) - if !resultSet.Next() { - t.Fatal("expected result") - } -} diff --git a/storage/reads/series_cursor.go b/storage/reads/series_cursor.go deleted file mode 100644 index cc5ae61b16b..00000000000 --- a/storage/reads/series_cursor.go +++ /dev/null @@ -1,51 +0,0 @@ -package reads - -import ( - "context" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb/cursors" - "github.com/influxdata/influxql" -) - -type SeriesCursor interface { - Close() - Next() *SeriesRow - Err() error -} - -type SeriesRow struct { - SortKey []byte - Name []byte // measurement name - SeriesTags models.Tags // unmodified series tags - Tags models.Tags - Field string - Query cursors.CursorIterators - ValueCond influxql.Expr -} - -type limitSeriesCursor struct { - SeriesCursor - n, o, c int64 -} - -func NewLimitSeriesCursor(ctx context.Context, cur SeriesCursor, n, o int64) SeriesCursor { - return &limitSeriesCursor{SeriesCursor: cur, o: o, n: n} -} - -func (c *limitSeriesCursor) Next() *SeriesRow { - if c.o > 0 { - for i := int64(0); i < c.o; i++ { - if c.SeriesCursor.Next() == nil { - break - } - } - c.o = 0 - } - - if c.c >= c.n { - return nil - } - c.c++ - return c.SeriesCursor.Next() -} diff --git a/storage/reads/series_cursor_test.go b/storage/reads/series_cursor_test.go deleted file mode 100644 index 8eee6fe0b5c..00000000000 --- a/storage/reads/series_cursor_test.go +++ /dev/null @@ -1 +0,0 @@ -package reads diff --git a/storage/reads/store.go b/storage/reads/store.go deleted file mode 100644 index 72db72d130b..00000000000 --- a/storage/reads/store.go +++ /dev/null @@ -1,91 +0,0 @@ -package reads - -import ( - "context" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/influxdata/influxdb/v2/tsdb/cursors" - "google.golang.org/protobuf/proto" -) - -type ResultSet interface { - // Next advances the ResultSet to the next cursor. It returns false - // when there are no more cursors. - Next() bool - - // Cursor returns the most recent cursor after a call to Next. - Cursor() cursors.Cursor - - // Tags returns the tags for the most recent cursor after a call to Next. - Tags() models.Tags - - // Close releases any resources allocated by the ResultSet. - Close() - - // Err returns the first error encountered by the ResultSet. - Err() error - - Stats() cursors.CursorStats -} - -type GroupResultSet interface { - // Next advances the GroupResultSet and returns the next GroupCursor. It - // returns nil if there are no more groups. - Next() GroupCursor - - // Close releases any resources allocated by the GroupResultSet. - Close() - - // Err returns the first error encountered by the GroupResultSet. - Err() error -} - -type GroupCursor interface { - // Next advances to the next cursor. Next will return false when there are no - // more cursors in the current group. - Next() bool - - // Cursor returns the most recent cursor after a call to Next. - Cursor() cursors.Cursor - - // Tags returns the tags for the most recent cursor after a call to Next. - Tags() models.Tags - - // Keys returns the union of all tag key names for all series produced by - // this GroupCursor. - Keys() [][]byte - - // PartitionKeyVals returns the values of all tags identified by the - // keys specified in ReadRequest#GroupKeys. The tag values values will - // appear in the same order as the GroupKeys. - // - // When the datatypes.GroupNone strategy is specified, PartitionKeyVals will - // be nil. - PartitionKeyVals() [][]byte - - // Close releases any resources allocated by the GroupCursor. - Close() - - // Err returns the first error encountered by the GroupCursor. - Err() error - - Stats() cursors.CursorStats - - Aggregate() *datatypes.Aggregate -} - -type Store interface { - ReadFilter(ctx context.Context, req *datatypes.ReadFilterRequest) (ResultSet, error) - ReadGroup(ctx context.Context, req *datatypes.ReadGroupRequest) (GroupResultSet, error) - // WindowAggregate will invoke a ReadWindowAggregateRequest against the Store. - WindowAggregate(ctx context.Context, req *datatypes.ReadWindowAggregateRequest) (ResultSet, error) - - TagKeys(ctx context.Context, req *datatypes.TagKeysRequest) (cursors.StringIterator, error) - TagValues(ctx context.Context, req *datatypes.TagValuesRequest) (cursors.StringIterator, error) - - ReadSeriesCardinality(ctx context.Context, req *datatypes.ReadSeriesCardinalityRequest) (cursors.Int64Iterator, error) - SupportReadSeriesCardinality(ctx context.Context) bool - - GetSource(orgID, bucketID uint64) proto.Message -} diff --git a/storage/reads/store_test.go b/storage/reads/store_test.go deleted file mode 100644 index 304557a93a4..00000000000 --- a/storage/reads/store_test.go +++ /dev/null @@ -1,219 +0,0 @@ -package reads_test - -import ( - "bytes" - "fmt" - "io" - "strings" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/storage/reads" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -func cursorToString(wr io.Writer, cur cursors.Cursor) { - switch ccur := cur.(type) { - case cursors.IntegerArrayCursor: - fmt.Fprintln(wr, "Integer") - for { - a := ccur.Next() - if a.Len() > 0 { - for i := range a.Timestamps { - fmt.Fprintf(wr, "%20d | %20d\n", a.Timestamps[i], a.Values[i]) - } - } else { - break - } - } - case cursors.FloatArrayCursor: - fmt.Fprintln(wr, "Float") - for { - a := ccur.Next() - if a.Len() > 0 { - for i := range a.Timestamps { - fmt.Fprintf(wr, "%20d | %18.2f\n", a.Timestamps[i], a.Values[i]) - } - } else { - break - } - } - case cursors.UnsignedArrayCursor: - fmt.Fprintln(wr, "Unsigned") - for { - a := ccur.Next() - if a.Len() > 0 { - for i := range a.Timestamps { - fmt.Fprintf(wr, "%20d | %20d\n", a.Timestamps[i], a.Values[i]) - } - } else { - break - } - } - case cursors.BooleanArrayCursor: - fmt.Fprintln(wr, "Boolean") - for { - a := ccur.Next() - if a.Len() > 0 { - for i := range a.Timestamps { - fmt.Fprintf(wr, "%20d | %t\n", a.Timestamps[i], a.Values[i]) - } - } else { - break - } - } - case cursors.StringArrayCursor: - fmt.Fprintln(wr, "String") - for { - a := ccur.Next() - if a.Len() > 0 { - for i := range a.Timestamps { - fmt.Fprintf(wr, "%20d | %20s\n", a.Timestamps[i], a.Values[i]) - } - } else { - break - } - } - default: - fmt.Fprintln(wr, "Invalid") - fmt.Fprintf(wr, "unreachable: %T\n", cur) - } - - if err := cur.Err(); err != nil && err != io.EOF { - fmt.Fprintf(wr, "cursor err: %s\n", cur.Err().Error()) - } - - cur.Close() -} - -const nilVal = "" - -func joinString(b [][]byte) string { - s := make([]string, len(b)) - for i := range b { - v := b[i] - if len(v) == 0 { - s[i] = nilVal - } else { - s[i] = string(v) - } - } - return strings.Join(s, ",") -} - -func tagsToString(wr io.Writer, tags models.Tags, opts ...optionFn) { - if k := tags.HashKey(); len(k) > 0 { - fmt.Fprintf(wr, "%s", string(k[1:])) - } - fmt.Fprintln(wr) -} - -func resultSetToString(wr io.Writer, rs reads.ResultSet, opts ...optionFn) { - var po PrintOptions - for _, o := range opts { - o(&po) - } - - iw := ensureIndentWriter(wr) - wr = iw - - for rs.Next() { - fmt.Fprint(wr, "series: ") - tagsToString(wr, rs.Tags()) - cur := rs.Cursor() - - if po.SkipNilCursor && cur == nil { - continue - } - - iw.Indent(2) - - fmt.Fprint(wr, "cursor:") - if cur == nil { - fmt.Fprintln(wr, nilVal) - goto LOOP - } - - cursorToString(wr, cur) - LOOP: - iw.Indent(-2) - } -} - -func GroupResultSetToString(wr io.Writer, rs reads.GroupResultSet, opts ...optionFn) { - iw := ensureIndentWriter(wr) - wr = iw - - gc := rs.Next() - for gc != nil { - fmt.Fprintln(wr, "group:") - iw.Indent(2) - fmt.Fprintf(wr, "tag key : %s\n", joinString(gc.Keys())) - fmt.Fprintf(wr, "partition key: %s\n", joinString(gc.PartitionKeyVals())) - iw.Indent(2) - resultSetToString(wr, gc, opts...) - iw.Indent(-4) - gc = rs.Next() - } -} - -type PrintOptions struct { - SkipNilCursor bool -} - -type optionFn func(o *PrintOptions) - -func SkipNilCursor() optionFn { - return func(o *PrintOptions) { - o.SkipNilCursor = true - } -} - -type indentWriter struct { - l int - p []byte - wr io.Writer - bol bool -} - -func ensureIndentWriter(wr io.Writer) *indentWriter { - if iw, ok := wr.(*indentWriter); ok { - return iw - } else { - return newIndentWriter(wr) - } -} - -func newIndentWriter(wr io.Writer) *indentWriter { - return &indentWriter{ - wr: wr, - bol: true, - } -} - -func (w *indentWriter) Indent(n int) { - w.l += n - if w.l < 0 { - panic("negative indent") - } - w.p = bytes.Repeat([]byte(" "), w.l) -} - -func (w *indentWriter) Write(p []byte) (n int, err error) { - for _, c := range p { - if w.bol { - _, err = w.wr.Write(w.p) - if err != nil { - break - } - w.bol = false - } - _, err = w.wr.Write([]byte{c}) - if err != nil { - break - } - n++ - w.bol = c == '\n' - } - - return n, err -} diff --git a/storage/reads/tagsbuffer.go b/storage/reads/tagsbuffer.go deleted file mode 100644 index e0982b0b277..00000000000 --- a/storage/reads/tagsbuffer.go +++ /dev/null @@ -1,30 +0,0 @@ -package reads - -import ( - "github.com/influxdata/influxdb/v2/models" -) - -type tagsBuffer struct { - sz int - i int - buf models.Tags -} - -func (tb *tagsBuffer) copyTags(src models.Tags) models.Tags { - var buf models.Tags - if len(src) > tb.sz { - buf = make(models.Tags, len(src)) - } else { - if tb.i+len(src) > len(tb.buf) { - tb.buf = make(models.Tags, tb.sz) - tb.i = 0 - } - - buf = tb.buf[tb.i : tb.i+len(src)] - tb.i += len(src) - } - - copy(buf, src) - - return buf -} diff --git a/storage/reads/types.tmpldata b/storage/reads/types.tmpldata deleted file mode 100644 index b15cb586d5d..00000000000 --- a/storage/reads/types.tmpldata +++ /dev/null @@ -1,27 +0,0 @@ -[ - { - "Name":"Float", - "name":"float", - "Type":"float64" - }, - { - "Name":"Integer", - "name":"integer", - "Type":"int64" - }, - { - "Name":"Unsigned", - "name":"unsigned", - "Type":"uint64" - }, - { - "Name":"String", - "name":"string", - "Type":"string" - }, - { - "Name":"Boolean", - "name":"boolean", - "Type":"bool" - } -] diff --git a/storage/readservice/service.go b/storage/readservice/service.go deleted file mode 100644 index 9ac62c13c79..00000000000 --- a/storage/readservice/service.go +++ /dev/null @@ -1,14 +0,0 @@ -package readservice - -import ( - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/query/control" -) - -// NewProxyQueryService returns a proxy query service based on the given queryController -// suitable for the storage read service. -func NewProxyQueryService(queryController *control.Controller) query.ProxyQueryService { - return query.ProxyQueryServiceAsyncBridge{ - AsyncQueryService: queryController, - } -} diff --git a/storage/retention.go b/storage/retention.go deleted file mode 100644 index d7a3f43e3f8..00000000000 --- a/storage/retention.go +++ /dev/null @@ -1,12 +0,0 @@ -package storage - -import ( - "context" - - "github.com/influxdata/influxdb/v2" -) - -// A BucketFinder is responsible for providing access to buckets via a filter. -type BucketFinder interface { - FindBuckets(context.Context, influxdb.BucketFilter, ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) -} diff --git a/tag.go b/tag.go deleted file mode 100644 index 22fe7475a2c..00000000000 --- a/tag.go +++ /dev/null @@ -1,139 +0,0 @@ -package influxdb - -import ( - "encoding/json" - "regexp" - "strings" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// Operator is an Enum value of operators. -type Operator int - -// Valid returns invalid error if the operator is invalid. -func (op Operator) Valid() error { - if op < Equal || op > NotRegexEqual { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "Operator is invalid", - } - } - return nil -} - -// operators -const ( - Equal Operator = iota - NotEqual - RegexEqual - NotRegexEqual -) - -var opStr = []string{ - "equal", - "notequal", - "equalregex", - "notequalregex", -} - -var opStrMap = map[string]Operator{ - "equal": Equal, - "notequal": NotEqual, - "equalregex": RegexEqual, - "notequalregex": NotRegexEqual, -} - -// ToOperator converts a string into its equivalent Operator. -func ToOperator(s string) (Operator, bool) { - s = strings.ToLower(s) - if op, ok := opStrMap[s]; ok { - return op, true - } - return -1, false -} - -// String returns the string value of the operator. -func (op Operator) String() string { - if err := op.Valid(); err != nil { - return "" - } - return opStr[op] -} - -// MarshalJSON implements json.Marshal interface. -func (op Operator) MarshalJSON() ([]byte, error) { - return json.Marshal(op.String()) -} - -// UnmarshalJSON implements json.Unmarshaler interface. -func (op *Operator) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - var ok bool - if *op, ok = opStrMap[s]; !ok { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "unrecognized operator", - } - } - return nil -} - -// Tag is a tag key-value pair. -type Tag struct { - Key string `json:"key"` - Value string `json:"value"` -} - -// NewTag generates a tag pair from a string in the format key:value. -func NewTag(s string) (Tag, error) { - var tagPair Tag - - matched, err := regexp.MatchString(`^[a-zA-Z0-9_]+:[a-zA-Z0-9_]+$`, s) - if !matched || err != nil { - return tagPair, &errors.Error{ - Code: errors.EInvalid, - Msg: `tag must be in form key:value`, - } - } - - tagPair.Key, tagPair.Value, _ = strings.Cut(s, ":") - return tagPair, nil -} - -// Valid returns an error if the tagpair is missing fields -func (t Tag) Valid() error { - if t.Key == "" || t.Value == "" { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "tag must contain a key and a value", - } - } - return nil -} - -// QueryParam converts a Tag to a string query parameter -func (t *Tag) QueryParam() string { - return strings.Join([]string{t.Key, t.Value}, ":") -} - -// TagRule is the struct of tag rule. -type TagRule struct { - Tag - Operator Operator `json:"operator"` -} - -// Valid returns error for invalid operators. -func (tr TagRule) Valid() error { - if err := tr.Tag.Valid(); err != nil { - return err - } - - return tr.Operator.Valid() -} diff --git a/tag_test.go b/tag_test.go deleted file mode 100644 index 919331405d1..00000000000 --- a/tag_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package influxdb_test - -import ( - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - influxTesting "github.com/influxdata/influxdb/v2/testing" -) - -func TestTagValid(t *testing.T) { - cases := []struct { - name string - src influxdb.TagRule - err error - }{ - { - name: "regular status rule", - src: influxdb.TagRule{ - Tag: influxdb.Tag{Key: "k1", Value: "v1"}, - Operator: influxdb.Equal, - }, - }, - { - name: "empty", - src: influxdb.TagRule{}, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "tag must contain a key and a value", - }, - }, - { - name: "empty key", - src: influxdb.TagRule{ - Tag: influxdb.Tag{Value: "v1"}, - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "tag must contain a key and a value", - }, - }, - { - name: "empty value", - src: influxdb.TagRule{ - Tag: influxdb.Tag{Key: "k1"}, - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "tag must contain a key and a value", - }, - }, - { - name: "invalid operator", - src: influxdb.TagRule{ - Tag: influxdb.Tag{Key: "k1", Value: "v1"}, - Operator: influxdb.Operator(-1), - }, - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "Operator is invalid", - }, - }, - } - for _, c := range cases { - err := c.src.Valid() - influxTesting.ErrorsEqual(t, err, c.err) - } -} diff --git a/task/backend/analytical_storage.go b/task/backend/analytical_storage.go deleted file mode 100644 index 3c41170c2de..00000000000 --- a/task/backend/analytical_storage.go +++ /dev/null @@ -1,454 +0,0 @@ -package backend - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "time" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/lang" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/errors" - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/storage" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "go.uber.org/zap" -) - -const ( - traceIDField = "ot_trace_id" - traceSampledTag = "ot_trace_sampled" - - runIDField = "runID" - nameField = "name" - scheduledForField = "scheduledFor" - startedAtField = "startedAt" - finishedAtField = "finishedAt" - requestedAtField = "requestedAt" - logField = "logs" - fluxField = "flux" - - taskIDTag = "taskID" - statusTag = "status" -) - -// RunRecorder is a type which records runs into an influxdb -// backed storage mechanism -type RunRecorder interface { - Record(ctx context.Context, bucketID platform.ID, bucket string, task *taskmodel.Task, run *taskmodel.Run) error -} - -// NewAnalyticalStorage creates a new analytical store with access to the necessary systems for storing data and to act as a middleware (deprecated) -func NewAnalyticalStorage(log *zap.Logger, ts taskmodel.TaskService, bs influxdb.BucketService, tcs TaskControlService, pw storage.PointsWriter, qs query.QueryService) *AnalyticalStorage { - return &AnalyticalStorage{ - log: log, - TaskService: ts, - BucketService: bs, - TaskControlService: tcs, - rr: NewStoragePointsWriterRecorder(log, pw), - qs: qs, - } -} - -type AnalyticalStorage struct { - taskmodel.TaskService - influxdb.BucketService - TaskControlService - - rr RunRecorder - qs query.QueryService - log *zap.Logger -} - -func (as *AnalyticalStorage) FinishRun(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { - run, err := as.TaskControlService.FinishRun(ctx, taskID, runID) - if run != nil && run.ID.String() != "" { - task, err := as.TaskService.FindTaskByID(ctx, run.TaskID) - if err != nil { - return run, err - } - - sb, err := as.BucketService.FindBucketByName(ctx, task.OrganizationID, influxdb.TasksSystemBucketName) - if err != nil { - return run, err - } - - return run, as.rr.Record(ctx, sb.ID, influxdb.TasksSystemBucketName, task, run) - } - - return run, err -} - -// FindLogs returns logs for a run. -// First attempt to use the TaskService, then append additional analytical's logs to the list -func (as *AnalyticalStorage) FindLogs(ctx context.Context, filter taskmodel.LogFilter) ([]*taskmodel.Log, int, error) { - var logs []*taskmodel.Log - if filter.Run != nil { - run, err := as.FindRunByID(ctx, filter.Task, *filter.Run) - if err != nil { - return nil, 0, err - } - for i := 0; i < len(run.Log); i++ { - logs = append(logs, &run.Log[i]) - } - return logs, len(logs), nil - } - - // add historical logs to the transactional logs. - runs, n, err := as.FindRuns(ctx, taskmodel.RunFilter{Task: filter.Task}) - if err != nil { - return nil, 0, err - } - - for _, run := range runs { - for i := 0; i < len(run.Log); i++ { - logs = append(logs, &run.Log[i]) - } - } - - return logs, n, err -} - -// FindRuns returns a list of runs that match a filter and the total count of returned runs. -// First attempt to use the TaskService, then append additional analytical's runs to the list -func (as *AnalyticalStorage) FindRuns(ctx context.Context, filter taskmodel.RunFilter) ([]*taskmodel.Run, int, error) { - if filter.Limit == 0 { - filter.Limit = taskmodel.TaskDefaultPageSize - } - - if filter.Limit < 0 || filter.Limit > taskmodel.TaskMaxPageSize { - return nil, 0, taskmodel.ErrOutOfBoundsLimit - } - - runs, n, err := as.TaskService.FindRuns(ctx, filter) - if err != nil { - return runs, n, err - } - - // if we reached the limit lets stop here - if len(runs) >= filter.Limit { - return runs, n, err - } - - task, err := as.TaskService.FindTaskByID(ctx, filter.Task) - if err != nil { - return runs, n, err - } - - sb, err := as.BucketService.FindBucketByName(ctx, task.OrganizationID, influxdb.TasksSystemBucketName) - if err != nil { - return runs, n, err - } - - filterPart := "" - if filter.After != nil { - filterPart = fmt.Sprintf(`|> filter(fn: (r) => r.runID > %q)`, filter.After.String()) - } - - // creates flux script to filter based on time, if given - constructedTimeFilter := "" - if len(filter.AfterTime) > 0 || len(filter.BeforeTime) > 0 { - parsedAfterTime := time.Time{} - parsedBeforeTime := time.Now() - if len(filter.AfterTime) > 0 { - parsedAfterTime, err = time.Parse(time.RFC3339, filter.AfterTime) - if err != nil { - return nil, 0, fmt.Errorf("failed parsing after time: %s", err.Error()) - } - } - if len(filter.BeforeTime) > 0 { - parsedBeforeTime, err = time.Parse(time.RFC3339, filter.BeforeTime) - if err != nil { - return nil, 0, fmt.Errorf("failed parsing before time: %s", err.Error()) - } - - } - if !parsedBeforeTime.After(parsedAfterTime) { - return nil, 0, errors.New("given after time must be prior to before time") - } - - constructedTimeFilter = fmt.Sprintf( - `|> filter(fn: (r) =>time(v: r["scheduledFor"]) > %s and time(v: r["scheduledFor"]) < %s)`, - parsedAfterTime.Format(time.RFC3339), - parsedBeforeTime.Format(time.RFC3339)) - } - - // the data will be stored for 7 days in the system bucket so pulling 14d's is sufficient. - runsScript := fmt.Sprintf(`from(bucketID: %q) - |> range(start: -14d) - |> filter(fn: (r) => r._field != "status") - |> filter(fn: (r) => r._measurement == "runs" and r.taskID == %q) - %s - |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value") - %s - |> group(columns: ["taskID"]) - |> sort(columns:["scheduledFor"], desc: true) - |> limit(n:%d) - - `, sb.ID.String(), filter.Task.String(), filterPart, constructedTimeFilter, filter.Limit-len(runs)) - - // At this point we are behind authorization - // so we are faking a read only permission to the org's system bucket - runSystemBucketID := sb.ID - runAuth := &influxdb.Authorization{ - Status: influxdb.Active, - ID: sb.ID, - OrgID: task.OrganizationID, - Permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: &task.OrganizationID, - ID: &runSystemBucketID, - }, - }, - }, - } - request := &query.Request{Authorization: runAuth, OrganizationID: task.OrganizationID, Compiler: lang.FluxCompiler{Query: runsScript}} - - ittr, err := as.qs.Query(ctx, request) - if err != nil { - return nil, 0, err - } - defer ittr.Release() - - re := &runReader{log: as.log.With(zap.String("component", "run-reader"), zap.String("taskID", filter.Task.String()))} - for ittr.More() { - err := ittr.Next().Tables().Do(re.readTable) - if err != nil { - return runs, n, err - } - } - - if err := ittr.Err(); err != nil { - return nil, 0, fmt.Errorf("unexpected internal error while decoding run response: %v", err) - } - runs = as.combineRuns(runs, re.runs) - - return runs, len(runs), err - -} - -// remove any kv runs that exist in the list of completed runs -func (as *AnalyticalStorage) combineRuns(currentRuns, completeRuns []*taskmodel.Run) []*taskmodel.Run { - crMap := map[platform.ID]int{} - - // track the current runs - for i, cr := range currentRuns { - crMap[cr.ID] = i - } - - // if we find a complete run that matches a current run the current run is out dated and - // should be removed. - for _, completeRun := range completeRuns { - if i, ok := crMap[completeRun.ID]; ok { - currentRuns = append(currentRuns[:i], currentRuns[i+1:]...) - } - } - - return append(currentRuns, completeRuns...) -} - -// FindRunByID returns a single run. -// First see if it is in the existing TaskService. If not pull it from analytical storage. -func (as *AnalyticalStorage) FindRunByID(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { - // check the taskService to see if the run is on its list - run, err := as.TaskService.FindRunByID(ctx, taskID, runID) - if err != nil { - if err, ok := err.(*errors2.Error); !ok || err.Msg != "run not found" { - return run, err - } - } - if run != nil { - return run, err - } - - task, err := as.TaskService.FindTaskByID(ctx, taskID) - if err != nil { - return run, err - } - - sb, err := as.BucketService.FindBucketByName(ctx, task.OrganizationID, influxdb.TasksSystemBucketName) - if err != nil { - return run, err - } - - // the data will be stored for 7 days in the system bucket so pulling 14d's is sufficient. - findRunScript := fmt.Sprintf(`from(bucketID: %q) - |> range(start: -14d) - |> filter(fn: (r) => r._field != "status") - |> filter(fn: (r) => r._measurement == "runs" and r.taskID == %q) - |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value") - |> group(columns: ["taskID"]) - |> filter(fn: (r) => r.runID == %q) - `, sb.ID.String(), taskID.String(), runID.String()) - - // At this point we are behind authorization - // so we are faking a read only permission to the org's system bucket - runSystemBucketID := sb.ID - runAuth := &influxdb.Authorization{ - ID: sb.ID, - Status: influxdb.Active, - OrgID: task.OrganizationID, - Permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: &task.OrganizationID, - ID: &runSystemBucketID, - }, - }, - }, - } - request := &query.Request{Authorization: runAuth, OrganizationID: task.OrganizationID, Compiler: lang.FluxCompiler{Query: findRunScript}} - - ittr, err := as.qs.Query(ctx, request) - if err != nil { - return nil, err - } - defer ittr.Release() - - re := &runReader{} - for ittr.More() { - err := ittr.Next().Tables().Do(re.readTable) - if err != nil { - return nil, err - } - } - - if err := ittr.Err(); err != nil { - return nil, fmt.Errorf("unexpected internal error while decoding run response: %v", err) - } - - if len(re.runs) == 0 { - return nil, taskmodel.ErrRunNotFound - - } - - if len(re.runs) != 1 { - return nil, &errors2.Error{ - Msg: "found multiple runs with id " + runID.String(), - Code: errors2.EInternal, - } - } - - return re.runs[0], err -} - -func (as *AnalyticalStorage) RetryRun(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { - run, err := as.TaskService.RetryRun(ctx, taskID, runID) - if err != nil { - if err, ok := err.(*errors2.Error); !ok || err.Msg != "run not found" { - return run, err - } - } - - if run != nil { - return run, err - } - - // try finding the run (in our system or underlying) - run, err = as.FindRunByID(ctx, taskID, runID) - if err != nil { - return run, err - } - - sf := run.ScheduledFor - - return as.ForceRun(ctx, taskID, sf.Unix()) -} - -type runReader struct { - runs []*taskmodel.Run - log *zap.Logger -} - -func (re *runReader) readTable(tbl flux.Table) error { - return tbl.Do(re.readRuns) -} - -func (re *runReader) readRuns(cr flux.ColReader) error { - for i := 0; i < cr.Len(); i++ { - var r taskmodel.Run - for j, col := range cr.Cols() { - switch col.Label { - case runIDField: - if cr.Strings(j).Value(i) != "" { - id, err := platform.IDFromString(cr.Strings(j).Value(i)) - if err != nil { - re.log.Info("Failed to parse runID", zap.Error(err)) - continue - } - r.ID = *id - } - case taskIDTag: - if cr.Strings(j).Value(i) != "" { - id, err := platform.IDFromString(cr.Strings(j).Value(i)) - if err != nil { - re.log.Info("Failed to parse taskID", zap.Error(err)) - continue - } - r.TaskID = *id - } - case startedAtField: - started, err := time.Parse(time.RFC3339Nano, cr.Strings(j).Value(i)) - if err != nil { - re.log.Info("Failed to parse startedAt time", zap.Error(err)) - continue - } - r.StartedAt = started.UTC() - case requestedAtField: - requested, err := time.Parse(time.RFC3339Nano, cr.Strings(j).Value(i)) - if err != nil { - re.log.Info("Failed to parse requestedAt time", zap.Error(err)) - continue - } - r.RequestedAt = requested.UTC() - case scheduledForField: - scheduled, err := time.Parse(time.RFC3339, cr.Strings(j).Value(i)) - if err != nil { - re.log.Info("Failed to parse scheduledFor time", zap.Error(err)) - continue - } - r.ScheduledFor = scheduled.UTC() - case statusTag: - r.Status = cr.Strings(j).Value(i) - case fluxField: - r.Flux = cr.Strings(j).Value(i) - case traceIDField: - r.TraceID = cr.Strings(j).Value(i) - case traceSampledTag: - r.IsSampled = cr.Bools(j).Value(i) - case finishedAtField: - finished, err := time.Parse(time.RFC3339Nano, cr.Strings(j).Value(i)) - if err != nil { - re.log.Info("Failed to parse finishedAt time", zap.Error(err)) - continue - } - r.FinishedAt = finished.UTC() - case logField: - logBytes := bytes.TrimSpace([]byte(cr.Strings(j).Value(i))) - if len(logBytes) != 0 { - err := json.Unmarshal(logBytes, &r.Log) - if err != nil { - re.log.Info("Failed to parse log data", zap.Error(err), zap.ByteString("log_bytes", logBytes)) - } - } - } - } - - // if we dont have a full enough data set we fail here. - if r.ID.Valid() { - re.runs = append(re.runs, &r) - } - - } - - return nil -} diff --git a/task/backend/analytical_storage_test.go b/task/backend/analytical_storage_test.go deleted file mode 100644 index e9bdfd15310..00000000000 --- a/task/backend/analytical_storage_test.go +++ /dev/null @@ -1,228 +0,0 @@ -package backend_test - -import ( - "context" - "os" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/influxdata/flux" - "github.com/influxdata/flux/dependencies/url" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorization" - icontext "github.com/influxdata/influxdb/v2/context" - _ "github.com/influxdata/influxdb/v2/fluxinit/static" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/query/control" - "github.com/influxdata/influxdb/v2/query/fluxlang" - stdlib "github.com/influxdata/influxdb/v2/query/stdlib/influxdata/influxdb" - "github.com/influxdata/influxdb/v2/storage" - storageflux "github.com/influxdata/influxdb/v2/storage/flux" - "github.com/influxdata/influxdb/v2/task/backend" - "github.com/influxdata/influxdb/v2/task/servicetest" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "github.com/influxdata/influxdb/v2/tenant" - "github.com/influxdata/influxdb/v2/v1/services/meta" - storage2 "github.com/influxdata/influxdb/v2/v1/services/storage" - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" -) - -func TestAnalyticalStore(t *testing.T) { - t.Skip("https://github.com/influxdata/influxdb/issues/22920") - servicetest.TestTaskService( - t, - func(t *testing.T) (*servicetest.System, context.CancelFunc) { - ctx, cancelFunc := context.WithCancel(context.Background()) - logger := zaptest.NewLogger(t) - store := inmem.NewKVStore() - if err := all.Up(ctx, logger, store); err != nil { - t.Fatal(err) - } - - tenantStore := tenant.NewStore(store) - ts := tenant.NewService(tenantStore) - - authStore, err := authorization.NewStore(store) - require.NoError(t, err) - authSvc := authorization.NewService(authStore, ts) - - svc := kv.NewService(logger, store, ts, kv.ServiceConfig{ - FluxLanguageService: fluxlang.DefaultService, - }) - - metaClient := meta.NewClient(meta.NewConfig(), store) - require.NoError(t, metaClient.Open()) - - var ( - ab = newAnalyticalBackend(t, ts.OrganizationService, ts.BucketService, metaClient) - svcStack = backend.NewAnalyticalStorage(logger, svc, ts.BucketService, svc, ab.PointsWriter(), ab.QueryService()) - ) - - ts.BucketService = storage.NewBucketService(logger, ts.BucketService, ab.storageEngine) - - authCtx := icontext.SetAuthorizer(ctx, &influxdb.Authorization{ - Permissions: influxdb.OperPermissions(), - }) - - return &servicetest.System{ - TaskControlService: svcStack, - TaskService: svcStack, - OrganizationService: ts.OrganizationService, - UserService: ts.UserService, - UserResourceMappingService: ts.UserResourceMappingService, - AuthorizationService: authSvc, - Ctx: authCtx, - CallFinishRun: true, - }, func() { - cancelFunc() - ab.Close(t) - } - }, - ) -} - -func TestDeduplicateRuns(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - logger := zaptest.NewLogger(t) - store := inmem.NewKVStore() - if err := all.Up(context.Background(), logger, store); err != nil { - t.Fatal(err) - } - - tenantStore := tenant.NewStore(store) - ts := tenant.NewService(tenantStore) - - metaClient := meta.NewClient(meta.NewConfig(), store) - require.NoError(t, metaClient.Open()) - - _, err := metaClient.CreateDatabase(platform.ID(10).String()) - require.NoError(t, err) - - ab := newAnalyticalBackend(t, ts.OrganizationService, ts.BucketService, metaClient) - defer ab.Close(t) - - mockTS := &mock.TaskService{ - FindTaskByIDFn: func(context.Context, platform.ID) (*taskmodel.Task, error) { - return &taskmodel.Task{ID: 1, OrganizationID: 20}, nil - }, - FindRunsFn: func(context.Context, taskmodel.RunFilter) ([]*taskmodel.Run, int, error) { - return []*taskmodel.Run{ - {ID: 2, Status: "started"}, - }, 1, nil - }, - } - mockTCS := &mock.TaskControlService{ - FinishRunFn: func(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { - return &taskmodel.Run{ID: 2, TaskID: 1, Status: "success", ScheduledFor: time.Now(), StartedAt: time.Now().Add(1), FinishedAt: time.Now().Add(2)}, nil - }, - } - mockBS := mock.NewBucketService() - - svcStack := backend.NewAnalyticalStorage(zaptest.NewLogger(t), mockTS, mockBS, mockTCS, ab.PointsWriter(), ab.QueryService()) - - _, err = svcStack.FinishRun(context.Background(), 1, 2) - if err != nil { - t.Fatal(err) - } - - runs, _, err := svcStack.FindRuns(context.Background(), taskmodel.RunFilter{Task: 1}) - if err != nil { - t.Fatal(err) - } - - if len(runs) != 1 { - t.Fatalf("expected 1 run but got %d", len(runs)) - } - - if runs[0].Status != "success" { - t.Fatalf("expected the deduped run to be 'success', got: %s", runs[0].Status) - } -} - -type analyticalBackend struct { - queryController *control.Controller - rootDir string - storageEngine *storage.Engine -} - -func (ab *analyticalBackend) PointsWriter() storage.PointsWriter { - return ab.storageEngine -} - -func (ab *analyticalBackend) QueryService() query.QueryService { - return query.QueryServiceBridge{AsyncQueryService: ab.queryController} -} - -func (ab *analyticalBackend) Close(t *testing.T) { - if err := ab.queryController.Shutdown(context.Background()); err != nil { - t.Error(err) - } - if err := ab.storageEngine.Close(); err != nil { - t.Error(err) - } - if err := os.RemoveAll(ab.rootDir); err != nil { - t.Error(err) - } -} - -func newAnalyticalBackend(t *testing.T, orgSvc influxdb.OrganizationService, bucketSvc influxdb.BucketService, metaClient storage.MetaClient) *analyticalBackend { - // Mostly copied out of cmd/influxd/main.go. - logger := zaptest.NewLogger(t) - - rootDir := t.TempDir() - - engine := storage.NewEngine(rootDir, storage.NewConfig(), storage.WithMetaClient(metaClient)) - engine.WithLogger(logger) - - if err := engine.Open(context.Background()); err != nil { - t.Fatal(err) - } - - defer func() { - if t.Failed() { - engine.Close() - } - }() - - const ( - concurrencyQuota = 10 - memoryBytesQuotaPerQuery = 1e6 - queueSize = 10 - ) - - // TODO(adam): do we need a proper secret service here? - storageStore := storage2.NewStore(engine.TSDBStore(), engine.MetaClient()) - readsReader := storageflux.NewReader(storageStore) - - deps, err := stdlib.NewDependencies(readsReader, engine, bucketSvc, orgSvc, nil, nil, stdlib.WithURLValidator(url.PassValidator{})) - if err != nil { - t.Fatal(err) - } - cc := control.Config{ - ExecutorDependencies: []flux.Dependency{deps}, - ConcurrencyQuota: concurrencyQuota, - MemoryBytesQuotaPerQuery: int64(memoryBytesQuotaPerQuery), - QueueSize: queueSize, - } - - queryController, err := control.New(cc, logger.With(zap.String("service", "storage-reads"))) - if err != nil { - t.Fatal(err) - } - - return &analyticalBackend{ - queryController: queryController, - rootDir: rootDir, - storageEngine: engine, - } -} diff --git a/task/backend/check_task_error.go b/task/backend/check_task_error.go deleted file mode 100644 index dc9571c0556..00000000000 --- a/task/backend/check_task_error.go +++ /dev/null @@ -1,31 +0,0 @@ -package backend - -import ( - "strings" -) - -// IsUnrecoverable takes in an error and determines if it is permanent (requiring user intervention to fix) -func IsUnrecoverable(err error) bool { - if err == nil { - return false - } - - errString := err.Error() - - // missing bucket requires user intervention to resolve - if strings.Contains(errString, "could not find bucket") { - return true - } - - // unparsable Flux requires user intervention to resolve - if strings.Contains(errString, "could not parse Flux script") { - return true - } - - // Flux script uses an API that attempts to read the filesystem - if strings.Contains(errString, "filesystem service uninitialized") { - return true - } - - return false -} diff --git a/task/backend/coordinator.go b/task/backend/coordinator.go deleted file mode 100644 index 7989206612b..00000000000 --- a/task/backend/coordinator.go +++ /dev/null @@ -1,116 +0,0 @@ -package backend - -import ( - "context" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "go.uber.org/zap" -) - -var now = func() time.Time { - return time.Now().UTC() -} - -// TaskService is a type on which tasks can be listed -type TaskService interface { - FindTasks(context.Context, taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) - UpdateTask(context.Context, platform.ID, taskmodel.TaskUpdate) (*taskmodel.Task, error) -} - -// Coordinator is a type with a single method which -// is called when a task has been created -type Coordinator interface { - TaskCreated(context.Context, *taskmodel.Task) error -} - -// NotifyCoordinatorOfExisting lists all tasks by the provided task service and for -// each task it calls the provided coordinators task created method -func NotifyCoordinatorOfExisting(ctx context.Context, log *zap.Logger, ts TaskService, coord Coordinator) error { - // If we missed a Create Action - tasks, _, err := ts.FindTasks(ctx, taskmodel.TaskFilter{}) - if err != nil { - return err - } - - latestCompleted := now() - for len(tasks) > 0 { - for _, task := range tasks { - if task.Status != string(taskmodel.TaskActive) { - continue - } - - task, err := ts.UpdateTask(context.Background(), task.ID, taskmodel.TaskUpdate{ - LatestCompleted: &latestCompleted, - LatestScheduled: &latestCompleted, - }) - if err != nil { - log.Error("Failed to set latestCompleted", zap.Error(err)) - continue - } - - coord.TaskCreated(ctx, task) - } - - tasks, _, err = ts.FindTasks(ctx, taskmodel.TaskFilter{ - After: &tasks[len(tasks)-1].ID, - }) - if err != nil { - return err - } - } - - return nil -} - -type TaskResumer func(ctx context.Context, id platform.ID, runID platform.ID) error - -// TaskNotifyCoordinatorOfExisting lists all tasks by the provided task service and for -// each task it calls the provided coordinators task created method -// TODO(docmerlin): this is temporary untill the executor queue is persistent -func TaskNotifyCoordinatorOfExisting(ctx context.Context, ts TaskService, tcs TaskControlService, coord Coordinator, exec TaskResumer, log *zap.Logger) error { - // If we missed a Create Action - tasks, _, err := ts.FindTasks(ctx, taskmodel.TaskFilter{}) - if err != nil { - return err - } - - latestCompleted := now() - for len(tasks) > 0 { - for _, task := range tasks { - if task.Status != string(taskmodel.TaskActive) { - continue - } - - task, err := ts.UpdateTask(context.Background(), task.ID, taskmodel.TaskUpdate{ - LatestCompleted: &latestCompleted, - LatestScheduled: &latestCompleted, - }) - if err != nil { - log.Error("Failed to set latestCompleted", zap.Error(err)) - continue - } - - coord.TaskCreated(ctx, task) - runs, err := tcs.CurrentlyRunning(ctx, task.ID) - if err != nil { - return err - } - for i := range runs { - if err := exec(ctx, runs[i].TaskID, runs[i].ID); err != nil { - return err - } - } - } - - tasks, _, err = ts.FindTasks(ctx, taskmodel.TaskFilter{ - After: &tasks[len(tasks)-1].ID, - }) - if err != nil { - return err - } - } - - return nil -} diff --git a/task/backend/coordinator/coordinator.go b/task/backend/coordinator/coordinator.go deleted file mode 100644 index 47476fc9eb9..00000000000 --- a/task/backend/coordinator/coordinator.go +++ /dev/null @@ -1,185 +0,0 @@ -package coordinator - -import ( - "context" - "errors" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/task/backend/executor" - "github.com/influxdata/influxdb/v2/task/backend/middleware" - "github.com/influxdata/influxdb/v2/task/backend/scheduler" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "go.uber.org/zap" -) - -var _ middleware.Coordinator = (*Coordinator)(nil) -var _ Executor = (*executor.Executor)(nil) - -// DefaultLimit is the maximum number of tasks that a given taskd server can own -const DefaultLimit = 1000 - -// Executor is an abstraction of the task executor with only the functions needed by the coordinator -type Executor interface { - ManualRun(ctx context.Context, id platform.ID, runID platform.ID) (executor.Promise, error) - ScheduleManualRun(ctx context.Context, id platform.ID, runID platform.ID) error - Cancel(ctx context.Context, runID platform.ID) error -} - -// Coordinator is the intermediary between the scheduling/executing system and the rest of the task system -type Coordinator struct { - log *zap.Logger - sch scheduler.Scheduler - ex Executor - - limit int -} - -type CoordinatorOption func(*Coordinator) - -// SchedulableTask is a wrapper around the Task struct, giving it methods to make it compatible with the Scheduler -type SchedulableTask struct { - *taskmodel.Task - sch scheduler.Schedule - lsc time.Time -} - -func (t SchedulableTask) ID() scheduler.ID { - return scheduler.ID(t.Task.ID) -} - -// Schedule takes the time a Task is scheduled for and returns a Schedule object -func (t SchedulableTask) Schedule() scheduler.Schedule { - return t.sch -} - -// Offset returns a time.Duration for the Task's offset property -func (t SchedulableTask) Offset() time.Duration { - return t.Task.Offset -} - -// LastScheduled parses the task's LatestCompleted value as a Time object -func (t SchedulableTask) LastScheduled() time.Time { - return t.lsc -} - -func WithLimitOpt(i int) CoordinatorOption { - return func(c *Coordinator) { - c.limit = i - } -} - -// NewSchedulableTask transforms an influxdb task to a schedulable task type -func NewSchedulableTask(task *taskmodel.Task) (SchedulableTask, error) { - - if task.Cron == "" && task.Every == "" { - return SchedulableTask{}, errors.New("invalid cron or every") - } - effCron := task.EffectiveCron() - ts := task.CreatedAt - if task.LatestScheduled.IsZero() || task.LatestScheduled.Before(task.LatestCompleted) { - ts = task.LatestCompleted - } else if !task.LatestScheduled.IsZero() { - ts = task.LatestScheduled - } - - var sch scheduler.Schedule - var err error - sch, ts, err = scheduler.NewSchedule(effCron, ts) - if err != nil { - return SchedulableTask{}, err - } - return SchedulableTask{Task: task, sch: sch, lsc: ts}, nil -} - -func NewCoordinator(log *zap.Logger, scheduler scheduler.Scheduler, executor Executor, opts ...CoordinatorOption) *Coordinator { - c := &Coordinator{ - log: log, - sch: scheduler, - ex: executor, - limit: DefaultLimit, - } - - for _, opt := range opts { - opt(c) - } - - return c -} - -// TaskCreated asks the Scheduler to schedule the newly created task -func (c *Coordinator) TaskCreated(ctx context.Context, task *taskmodel.Task) error { - t, err := NewSchedulableTask(task) - - if err != nil { - return err - } - // func new schedulable task - // catch errors from offset and last scheduled - if err = c.sch.Schedule(t); err != nil { - return err - } - - return nil -} - -// TaskUpdated releases the task if it is being disabled, and schedules it otherwise -func (c *Coordinator) TaskUpdated(ctx context.Context, from, to *taskmodel.Task) error { - sid := scheduler.ID(to.ID) - t, err := NewSchedulableTask(to) - if err != nil { - return err - } - - // if the tasks is already inactive, we don't do anything - if to.Status == from.Status && to.Status == string(taskmodel.TaskInactive) { - return nil - } - - // if disabling the task, release it before schedule update - if to.Status != from.Status && to.Status == string(taskmodel.TaskInactive) { - if err := c.sch.Release(sid); err != nil && err != taskmodel.ErrTaskNotClaimed { - return err - } - } else { - if err := c.sch.Schedule(t); err != nil { - return err - } - } - - return nil -} - -// TaskDeleted asks the Scheduler to release the deleted task -func (c *Coordinator) TaskDeleted(ctx context.Context, id platform.ID) error { - tid := scheduler.ID(id) - if err := c.sch.Release(tid); err != nil && err != taskmodel.ErrTaskNotClaimed { - return err - } - - return nil -} - -// RunCancelled speaks directly to the executor to cancel a task run -func (c *Coordinator) RunCancelled(ctx context.Context, runID platform.ID) error { - err := c.ex.Cancel(ctx, runID) - - return err -} - -// RunForced speaks directly to the Executor to run a task immediately, or schedule the run if `scheduledFor` is set. -func (c *Coordinator) RunForced(ctx context.Context, task *taskmodel.Task, run *taskmodel.Run) error { - var err error - if !run.ScheduledFor.IsZero() { - err = c.ex.ScheduleManualRun(ctx, task.ID, run.ID) - } else { - // the returned promise is not used, since clients expect the HTTP server to return immediately after scheduling the - // task rather than waiting for the task to finish - _, err = c.ex.ManualRun(ctx, task.ID, run.ID) - } - - if err != nil { - return taskmodel.ErrRunExecutionError(err) - } - return nil -} diff --git a/task/backend/coordinator/coordinator_test.go b/task/backend/coordinator/coordinator_test.go deleted file mode 100644 index ccc9f068a20..00000000000 --- a/task/backend/coordinator/coordinator_test.go +++ /dev/null @@ -1,274 +0,0 @@ -package coordinator - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/task/backend/scheduler" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "go.uber.org/zap/zaptest" -) - -func Test_Coordinator_Executor_Methods(t *testing.T) { - var ( - one = platform.ID(1) - taskOne = &taskmodel.Task{ID: one} - - runOne = &taskmodel.Run{ - ID: one, - TaskID: one, - } - - allowUnexported = cmp.AllowUnexported(executorE{}, schedulerC{}, SchedulableTask{}) - - scheduledTime = time.Now() - ) - - for _, test := range []struct { - name string - claimErr error - updateErr error - releaseErr error - call func(*testing.T, *Coordinator) - executor *executorE - }{ - { - name: "RunForced", - call: func(t *testing.T, c *Coordinator) { - if err := c.RunForced(context.Background(), taskOne, runOne); err != nil { - t.Errorf("expected nil error found %q", err) - } - }, - executor: &executorE{ - calls: []interface{}{ - manualRunCall{taskOne.ID, runOne.ID, false}, - }, - }, - }, - { - name: "RunForcedScheduled", - call: func(t *testing.T, c *Coordinator) { - rr := runOne - rr.ScheduledFor = scheduledTime - if err := c.RunForced(context.Background(), taskOne, runOne); err != nil { - t.Errorf("expected nil error found %q", err) - } - }, - executor: &executorE{ - calls: []interface{}{ - manualRunCall{taskOne.ID, runOne.ID, true}, - }, - }, - }, - { - name: "RunCancelled", - call: func(t *testing.T, c *Coordinator) { - if err := c.RunCancelled(context.Background(), runOne.ID); err != nil { - t.Errorf("expected nil error found %q", err) - } - }, - executor: &executorE{ - calls: []interface{}{ - cancelCallC{runOne.ID}, - }, - }, - }, - } { - t.Run(test.name, func(t *testing.T) { - var ( - executor = &executorE{} - scheduler = &schedulerC{} - coord = NewCoordinator(zaptest.NewLogger(t), scheduler, executor) - ) - - test.call(t, coord) - - if diff := cmp.Diff( - test.executor.calls, - executor.calls, - allowUnexported); diff != "" { - t.Errorf("unexpected executor contents %s", diff) - } - }) - } -} - -func TestNewSchedulableTask(t *testing.T) { - now := time.Now().UTC() - one := platform.ID(1) - taskOne := &taskmodel.Task{ID: one, CreatedAt: now, Cron: "* * * * *", LatestCompleted: now} - schedulableT, err := NewSchedulableTask(taskOne) - if err != nil { - t.Fatal(err) - } - if !schedulableT.LastScheduled().Truncate(time.Second).Equal(now.Truncate(time.Second)) { - fmt.Println(schedulableT.LastScheduled()) - t.Fatalf("expected SchedulableTask's LatestScheduled to equal %s but it was %s", now.Truncate(time.Second), schedulableT.LastScheduled()) - } - - taskTwo := &taskmodel.Task{ID: one, CreatedAt: now, Cron: "* * * * *", LatestCompleted: now, LatestScheduled: now.Add(-10 * time.Second)} - schedulableT, err = NewSchedulableTask(taskTwo) - if err != nil { - t.Fatal(err) - } - if !schedulableT.LastScheduled().Truncate(time.Second).Equal(now.Truncate(time.Second)) { - t.Fatalf("expected SchedulableTask's LatestScheduled to equal %s but it was %s", now.Truncate(time.Second), schedulableT.LastScheduled()) - } - -} - -func Test_Coordinator_Scheduler_Methods(t *testing.T) { - - var ( - one = platform.ID(1) - two = platform.ID(2) - three = platform.ID(3) - now = time.Now().UTC() - - taskOne = &taskmodel.Task{ID: one, CreatedAt: now, Cron: "* * * * *"} - taskTwo = &taskmodel.Task{ID: two, Status: "active", CreatedAt: now, Cron: "* * * * *"} - taskTwoInactive = &taskmodel.Task{ID: two, Status: "inactive", CreatedAt: now, Cron: "* * * * *"} - taskThreeOriginal = &taskmodel.Task{ - ID: three, - Status: "active", - Name: "Previous", - CreatedAt: now, - Cron: "* * * * *", - } - taskThreeNew = &taskmodel.Task{ - ID: three, - Status: "active", - Name: "Renamed", - CreatedAt: now, - Cron: "* * * * *", - } - ) - - schedulableT, err := NewSchedulableTask(taskOne) - if err != nil { - t.Fatal(err) - } - schedulableTaskTwo, err := NewSchedulableTask(taskTwo) - if err != nil { - t.Fatal(err) - } - - schedulableTaskThree, err := NewSchedulableTask(taskThreeNew) - if err != nil { - t.Fatal(err) - } - - runOne := &taskmodel.Run{ - ID: one, - TaskID: one, - ScheduledFor: time.Now().UTC(), - } - - for _, test := range []struct { - name string - claimErr error - updateErr error - releaseErr error - call func(*testing.T, *Coordinator) - scheduler *schedulerC - }{ - { - name: "TaskCreated", - call: func(t *testing.T, c *Coordinator) { - if err := c.TaskCreated(context.Background(), taskOne); err != nil { - t.Errorf("expected nil error found %q", err) - } - }, - scheduler: &schedulerC{ - calls: []interface{}{ - scheduleCall{schedulableT}, - }, - }, - }, - { - name: "TaskUpdated - deactivate task", - call: func(t *testing.T, c *Coordinator) { - if err := c.TaskUpdated(context.Background(), taskTwo, taskTwoInactive); err != nil { - t.Errorf("expected nil error found %q", err) - } - }, - scheduler: &schedulerC{ - calls: []interface{}{ - releaseCallC{scheduler.ID(taskTwo.ID)}, - }, - }, - }, - { - name: "TaskUpdated - activate task", - call: func(t *testing.T, c *Coordinator) { - if err := c.TaskUpdated(context.Background(), taskTwoInactive, taskTwo); err != nil { - t.Errorf("expected nil error found %q", err) - } - }, - scheduler: &schedulerC{ - calls: []interface{}{ - scheduleCall{schedulableTaskTwo}, - }, - }, - }, - { - name: "TaskUpdated - change name", - call: func(t *testing.T, c *Coordinator) { - if err := c.TaskUpdated(context.Background(), taskThreeOriginal, taskThreeNew); err != nil { - t.Errorf("expected nil error found %q", err) - } - }, - scheduler: &schedulerC{ - calls: []interface{}{ - scheduleCall{schedulableTaskThree}, - }, - }, - }, - { - name: "TaskUpdated - inactive task is not scheduled", - call: func(t *testing.T, c *Coordinator) { - if err := c.TaskUpdated(context.Background(), taskTwoInactive, taskTwoInactive); err != nil { - t.Errorf("expected nil error found %q", err) - } - }, - scheduler: &schedulerC{}, - }, - { - name: "TaskDeleted", - call: func(t *testing.T, c *Coordinator) { - if err := c.TaskDeleted(context.Background(), runOne.ID); err != nil { - t.Errorf("expected nil error found %q", err) - } - }, - scheduler: &schedulerC{ - calls: []interface{}{ - releaseCallC{scheduler.ID(taskOne.ID)}, - }, - }, - }, - } { - t.Run(test.name, func(t *testing.T) { - var ( - executor = &executorE{} - sch = &schedulerC{} - coord = NewCoordinator(zaptest.NewLogger(t), sch, executor) - ) - - test.call(t, coord) - - if diff := cmp.Diff( - test.scheduler.calls, - sch.calls, - cmp.AllowUnexported(executorE{}, schedulerC{}, SchedulableTask{}, *coord), - cmpopts.IgnoreUnexported(scheduler.Schedule{}), - ); diff != "" { - t.Errorf("unexpected scheduler contents %s", diff) - } - }) - } -} diff --git a/task/backend/coordinator/support_test.go b/task/backend/coordinator/support_test.go deleted file mode 100644 index 6ee351a5eef..00000000000 --- a/task/backend/coordinator/support_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package coordinator - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/task/backend/executor" - "github.com/influxdata/influxdb/v2/task/backend/scheduler" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -var _ Executor = (*executorE)(nil) - -type ( - executorE struct { - calls []interface{} - } - - manualRunCall struct { - TaskID platform.ID - RunID platform.ID - WasScheduled bool - } - - cancelCallC struct { - RunID platform.ID - } -) - -type ( - schedulerC struct { - scheduler.Scheduler - - calls []interface{} - } - - scheduleCall struct { - Task scheduler.Schedulable - } - - releaseCallC struct { - TaskID scheduler.ID - } -) - -type ( - promise struct { - run *taskmodel.Run - - done chan struct{} - err error - - ctx context.Context - cancelFunc context.CancelFunc - } -) - -// ID is the id of the run that was created -func (p *promise) ID() platform.ID { - return p.run.ID -} - -// Cancel is used to cancel a executing query -func (p *promise) Cancel(ctx context.Context) { - // call cancelfunc - p.cancelFunc() - - // wait for ctx.Done or p.Done - select { - case <-p.Done(): - case <-ctx.Done(): - } -} - -// Done provides a channel that closes on completion of a promise -func (p *promise) Done() <-chan struct{} { - return p.done -} - -// Error returns the error resulting from a run execution. -// If the execution is not complete error waits on Done(). -func (p *promise) Error() error { - <-p.done - return p.err -} - -func (s *schedulerC) Schedule(task scheduler.Schedulable) error { - s.calls = append(s.calls, scheduleCall{task}) - - return nil -} - -func (s *schedulerC) Release(taskID scheduler.ID) error { - s.calls = append(s.calls, releaseCallC{taskID}) - - return nil -} - -func (e *executorE) ManualRun(ctx context.Context, id platform.ID, runID platform.ID) (executor.Promise, error) { - e.calls = append(e.calls, manualRunCall{id, runID, false}) - ctx, cancel := context.WithCancel(ctx) - p := promise{ - done: make(chan struct{}), - ctx: ctx, - cancelFunc: cancel, - } - close(p.done) - - err := p.Error() - return &p, err -} - -func (e *executorE) ScheduleManualRun(ctx context.Context, id platform.ID, runID platform.ID) error { - e.calls = append(e.calls, manualRunCall{id, runID, true}) - return nil -} - -func (e *executorE) Cancel(ctx context.Context, runID platform.ID) error { - e.calls = append(e.calls, cancelCallC{runID}) - return nil -} diff --git a/task/backend/coordinator_test.go b/task/backend/coordinator_test.go deleted file mode 100644 index be3e60901a3..00000000000 --- a/task/backend/coordinator_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package backend - -import ( - "context" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "go.uber.org/zap/zaptest" -) - -var ( - one = platform.ID(1) - two = platform.ID(2) - three = platform.ID(3) - four = platform.ID(4) - - aTime = time.Now().UTC() - - taskOne = &taskmodel.Task{ID: one} - taskTwo = &taskmodel.Task{ID: two, Status: "active"} - taskThree = &taskmodel.Task{ID: three, Status: "inactive"} - taskFour = &taskmodel.Task{ID: four} - - allTasks = map[platform.ID]*taskmodel.Task{ - one: taskOne, - two: taskTwo, - three: taskThree, - four: taskFour, - } -) - -func Test_NotifyCoordinatorOfCreated(t *testing.T) { - var ( - coordinator = &coordinator{} - tasks = &taskService{ - // paginated responses - pageOne: []*taskmodel.Task{taskOne}, - otherPages: map[platform.ID][]*taskmodel.Task{ - one: {taskTwo, taskThree}, - three: {taskFour}, - }, - } - ) - - defer func(old func() time.Time) { - now = old - }(now) - - now = func() time.Time { return aTime } - - if err := NotifyCoordinatorOfExisting(context.Background(), zaptest.NewLogger(t), tasks, coordinator); err != nil { - t.Errorf("expected nil, found %q", err) - } - - if diff := cmp.Diff([]update{ - {two, taskmodel.TaskUpdate{LatestCompleted: &aTime, LatestScheduled: &aTime}}, - }, tasks.updates); diff != "" { - t.Errorf("unexpected updates to task service %v", diff) - } - - if diff := cmp.Diff([]*taskmodel.Task{ - taskTwo, - }, coordinator.tasks); diff != "" { - t.Errorf("unexpected tasks sent to coordinator %v", diff) - } -} - -type coordinator struct { - tasks []*taskmodel.Task -} - -func (c *coordinator) TaskCreated(_ context.Context, task *taskmodel.Task) error { - c.tasks = append(c.tasks, task) - - return nil -} - -// TasksService mocking -type taskService struct { - // paginated tasks - pageOne []*taskmodel.Task - otherPages map[platform.ID][]*taskmodel.Task - - // find tasks call - filter taskmodel.TaskFilter - // update call - updates []update -} - -type update struct { - ID platform.ID - Update taskmodel.TaskUpdate -} - -func (t *taskService) UpdateTask(_ context.Context, id platform.ID, upd taskmodel.TaskUpdate) (*taskmodel.Task, error) { - t.updates = append(t.updates, update{id, upd}) - - return allTasks[id], nil -} - -func (t *taskService) FindTasks(_ context.Context, filter taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { - t.filter = filter - - if filter.After == nil { - return t.pageOne, len(t.pageOne), nil - } - - tasks := t.otherPages[*filter.After] - return tasks, len(tasks), nil -} diff --git a/task/backend/executor/executor.go b/task/backend/executor/executor.go deleted file mode 100644 index 1ebfaa83033..00000000000 --- a/task/backend/executor/executor.go +++ /dev/null @@ -1,824 +0,0 @@ -package executor - -import ( - "context" - "encoding/json" - "fmt" - "sync" - "time" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/lang" - "github.com/influxdata/flux/runtime" - "github.com/influxdata/influxdb/v2" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/feature" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/task/backend" - "github.com/influxdata/influxdb/v2/task/backend/scheduler" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "go.uber.org/zap" -) - -const ( - maxPromises = 1000 - defaultMaxWorkers = 100 - - lastSuccessOption = "tasks.lastSuccessTime" -) - -var _ scheduler.Executor = (*Executor)(nil) - -type PermissionService interface { - FindPermissionForUser(ctx context.Context, UserID platform.ID) (influxdb.PermissionSet, error) -} - -type Promise interface { - ID() platform.ID - Cancel(ctx context.Context) - Done() <-chan struct{} - Error() error -} - -// MultiLimit allows us to create a single limit func that applies more then one limit. -func MultiLimit(limits ...LimitFunc) LimitFunc { - return func(task *taskmodel.Task, run *taskmodel.Run) error { - for _, lf := range limits { - if err := lf(task, run); err != nil { - return err - } - } - return nil - } -} - -// LimitFunc is a function the executor will use to -type LimitFunc func(*taskmodel.Task, *taskmodel.Run) error - -type executorConfig struct { - maxWorkers int - systemBuildCompiler CompilerBuilderFunc - nonSystemBuildCompiler CompilerBuilderFunc - flagger feature.Flagger -} - -type executorOption func(*executorConfig) - -// WithMaxWorkers specifies the number of workers used by the Executor. -func WithMaxWorkers(n int) executorOption { - return func(o *executorConfig) { - o.maxWorkers = n - } -} - -// CompilerBuilderFunc is a function that yields a new flux.Compiler. The -// context.Context provided can be assumed to be an authorized context. -type CompilerBuilderFunc func(ctx context.Context, query string, ts CompilerBuilderTimestamps) (flux.Compiler, error) - -// CompilerBuilderTimestamps contains timestamps which should be provided along -// with a Task query. -type CompilerBuilderTimestamps struct { - Now time.Time - LatestSuccess time.Time -} - -func (ts CompilerBuilderTimestamps) Extern() *ast.File { - var body []ast.Statement - - if !ts.LatestSuccess.IsZero() { - body = append(body, &ast.OptionStatement{ - Assignment: &ast.VariableAssignment{ - ID: &ast.Identifier{Name: lastSuccessOption}, - Init: &ast.DateTimeLiteral{ - Value: ts.LatestSuccess, - }, - }, - }) - } - - return &ast.File{Body: body} -} - -// WithSystemCompilerBuilder is an Executor option that configures a -// CompilerBuilderFunc to be used when compiling queries for System Tasks. -func WithSystemCompilerBuilder(builder CompilerBuilderFunc) executorOption { - return func(o *executorConfig) { - o.systemBuildCompiler = builder - } -} - -// WithNonSystemCompilerBuilder is an Executor option that configures a -// CompilerBuilderFunc to be used when compiling queries for non-System Tasks -// (Checks and Notifications). -func WithNonSystemCompilerBuilder(builder CompilerBuilderFunc) executorOption { - return func(o *executorConfig) { - o.nonSystemBuildCompiler = builder - } -} - -// WithFlagger is an Executor option that allows us to use a feature flagger in the executor -func WithFlagger(flagger feature.Flagger) executorOption { - return func(o *executorConfig) { - o.flagger = flagger - } -} - -// NewExecutor creates a new task executor -func NewExecutor(log *zap.Logger, qs query.QueryService, us PermissionService, ts taskmodel.TaskService, tcs backend.TaskControlService, opts ...executorOption) (*Executor, *ExecutorMetrics) { - cfg := &executorConfig{ - maxWorkers: defaultMaxWorkers, - systemBuildCompiler: NewASTCompiler, - nonSystemBuildCompiler: NewASTCompiler, - } - for _, opt := range opts { - opt(cfg) - } - - e := &Executor{ - log: log, - ts: ts, - tcs: tcs, - qs: qs, - ps: us, - - currentPromises: sync.Map{}, - futurePromises: sync.Map{}, - promiseQueue: make(chan *promise, maxPromises), - workerLimit: make(chan struct{}, cfg.maxWorkers), - limitFunc: func(*taskmodel.Task, *taskmodel.Run) error { return nil }, // noop - systemBuildCompiler: cfg.systemBuildCompiler, - nonSystemBuildCompiler: cfg.nonSystemBuildCompiler, - flagger: cfg.flagger, - } - - e.metrics = NewExecutorMetrics(e) - - wm := &workerMaker{ - e: e, - } - - go e.processScheduledTasks() - - e.workerPool = sync.Pool{New: wm.new} - return e, e.metrics -} - -// Executor it a task specific executor that works with the new scheduler system. -type Executor struct { - log *zap.Logger - ts taskmodel.TaskService - tcs backend.TaskControlService - - qs query.QueryService - ps PermissionService - - metrics *ExecutorMetrics - - // currentPromises are all the promises we are made that have not been fulfilled - currentPromises sync.Map - - // futurePromises are promises that are scheduled to be executed in the future - futurePromises sync.Map - - // keep a pool of promise's we have in queue - promiseQueue chan *promise - - limitFunc LimitFunc - - // keep a pool of execution workers. - workerPool sync.Pool - workerLimit chan struct{} - - nonSystemBuildCompiler CompilerBuilderFunc - systemBuildCompiler CompilerBuilderFunc - flagger feature.Flagger -} - -func (e *Executor) LoadExistingScheduleRuns(ctx context.Context) error { - tasks, _, err := e.ts.FindTasks(ctx, taskmodel.TaskFilter{}) - if err != nil { - e.log.Error("err finding tasks:", zap.Error(err)) - return err - } - for _, t := range tasks { - beforeTime := time.Now().Add(time.Hour * 24 * 365).Format(time.RFC3339) - runs, _, err := e.ts.FindRuns(ctx, taskmodel.RunFilter{Task: t.ID, BeforeTime: beforeTime}) - if err != nil { - e.log.Error("err finding runs:", zap.Error(err)) - return err - } - for _, run := range runs { - if run.ScheduledFor.After(time.Now()) { - perm, err := e.ps.FindPermissionForUser(ctx, t.OwnerID) - if err != nil { - e.log.Error("err finding perms:", zap.Error(err)) - return err - } - - ctx, cancel := context.WithCancel(ctx) - // create promise - p := &promise{ - run: run, - task: t, - auth: &influxdb.Authorization{ - Status: influxdb.Active, - UserID: t.OwnerID, - ID: platform.ID(1), - OrgID: t.OrganizationID, - Permissions: perm, - }, - createdAt: time.Now().UTC(), - done: make(chan struct{}), - ctx: ctx, - cancelFunc: cancel, - } - e.futurePromises.Store(run.ID, p) - } - } - } - - return nil -} - -// SetLimitFunc sets the limit func for this task executor -func (e *Executor) SetLimitFunc(l LimitFunc) { - e.limitFunc = l -} - -// Execute is a executor to satisfy the needs of tasks -func (e *Executor) Execute(ctx context.Context, id scheduler.ID, scheduledFor time.Time, runAt time.Time) error { - _, err := e.PromisedExecute(ctx, id, scheduledFor, runAt) - return err -} - -// PromisedExecute begins execution for the tasks id with a specific scheduledFor time. -// When we execute we will first build a run for the scheduledFor time, -// We then want to add to the queue anything that was manually queued to run. -// If the queue is full the call to execute should hang and apply back pressure to the caller -// We then start a worker to work the newly queued jobs. -func (e *Executor) PromisedExecute(ctx context.Context, id scheduler.ID, scheduledFor time.Time, runAt time.Time) (Promise, error) { - iid := platform.ID(id) - // create a run - p, err := e.createRun(ctx, iid, scheduledFor, runAt) - if err != nil { - return nil, err - } - - e.startWorker() - return p, nil -} - -func (e *Executor) ManualRun(ctx context.Context, id platform.ID, runID platform.ID) (Promise, error) { - // create promises for any manual runs - r, err := e.tcs.StartManualRun(ctx, id, runID) - if err != nil { - return nil, err - } - - auth, err := icontext.GetAuthorizer(ctx) - if err != nil { - return nil, err - } - - // create a new context for running the task in the background so that returning the HTTP response does not cancel the - // context of the task to be run - ctx = icontext.SetAuthorizer(context.Background(), auth) - p, err := e.createPromise(ctx, r) - - e.startWorker() - e.metrics.manualRunsCounter.WithLabelValues(id.String()).Inc() - return p, err -} - -func (e *Executor) ScheduleManualRun(ctx context.Context, id platform.ID, runID platform.ID) error { - // create promises for any manual runs - r, err := e.tcs.StartManualRun(ctx, id, runID) - if err != nil { - return err - } - - auth, err := icontext.GetAuthorizer(ctx) - if err != nil { - return err - } - - // create a new context for running the task in the background so that returning the HTTP response does not cancel the - // context of the task to be run - ctx = icontext.SetAuthorizer(context.Background(), auth) - - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - t, err := e.ts.FindTaskByID(ctx, r.TaskID) - if err != nil { - return err - } - - perm, err := e.ps.FindPermissionForUser(ctx, t.OwnerID) - if err != nil { - return err - } - - ctx, cancel := context.WithCancel(ctx) - // create promise - p := &promise{ - run: r, - task: t, - auth: &influxdb.Authorization{ - Status: influxdb.Active, - UserID: t.OwnerID, - ID: platform.ID(1), - OrgID: t.OrganizationID, - Permissions: perm, - }, - createdAt: time.Now().UTC(), - done: make(chan struct{}), - ctx: ctx, - cancelFunc: cancel, - } - e.metrics.manualRunsCounter.WithLabelValues(id.String()).Inc() - - e.futurePromises.Store(runID, p) - return nil -} - -func (e *Executor) ResumeCurrentRun(ctx context.Context, id platform.ID, runID platform.ID) (Promise, error) { - cr, err := e.tcs.CurrentlyRunning(ctx, id) - if err != nil { - return nil, err - } - - for _, run := range cr { - if run.ID == runID { - if _, ok := e.currentPromises.Load(run.ID); ok { - continue - } - - p, err := e.createPromise(ctx, run) - - e.startWorker() - e.metrics.resumeRunsCounter.WithLabelValues(id.String()).Inc() - return p, err - } - } - return nil, taskmodel.ErrRunNotFound -} - -func (e *Executor) createRun(ctx context.Context, id platform.ID, scheduledFor time.Time, runAt time.Time) (*promise, error) { - r, err := e.tcs.CreateRun(ctx, id, scheduledFor.UTC(), runAt.UTC()) - if err != nil { - return nil, err - } - p, err := e.createPromise(ctx, r) - if err != nil { - if err := e.tcs.AddRunLog(ctx, id, r.ID, time.Now().UTC(), fmt.Sprintf("Failed to enqueue run: %s", err.Error())); err != nil { - e.log.Error("failed to fail create run: AddRunLog:", zap.Error(err)) - } - if err := e.tcs.UpdateRunState(ctx, id, r.ID, time.Now().UTC(), taskmodel.RunFail); err != nil { - e.log.Error("failed to fail create run: UpdateRunState:", zap.Error(err)) - } - if _, err := e.tcs.FinishRun(ctx, id, r.ID); err != nil { - e.log.Error("failed to fail create run: FinishRun:", zap.Error(err)) - } - } - - return p, err -} - -func (e *Executor) startWorker() { - // see if have available workers - select { - case e.workerLimit <- struct{}{}: - default: - // we have reached our worker limit and we cannot start any more. - return - } - // fire up some workers - worker := e.workerPool.Get().(*worker) - if worker != nil { - // if the worker is nil all the workers are busy and one of them will pick up the work we enqueued. - go func() { - // don't forget to put the worker back when we are done - defer e.workerPool.Put(worker) - worker.work() - - // remove a struct from the worker limit to another worker to work - <-e.workerLimit - }() - } -} - -// Cancel a run of a specific task. -func (e *Executor) Cancel(ctx context.Context, runID platform.ID) error { - // find the promise - val, ok := e.currentPromises.Load(runID) - if !ok { - return nil - } - promise := val.(*promise) - - // call cancel on it. - promise.Cancel(ctx) - - return nil -} - -func (e *Executor) createPromise(ctx context.Context, run *taskmodel.Run) (*promise, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - t, err := e.ts.FindTaskByID(ctx, run.TaskID) - if err != nil { - return nil, err - } - - perm, err := e.ps.FindPermissionForUser(ctx, t.OwnerID) - if err != nil { - return nil, err - } - - ctx, cancel := context.WithCancel(ctx) - // create promise - p := &promise{ - run: run, - task: t, - auth: &influxdb.Authorization{ - Status: influxdb.Active, - UserID: t.OwnerID, - ID: platform.ID(1), - OrgID: t.OrganizationID, - Permissions: perm, - }, - createdAt: time.Now().UTC(), - done: make(chan struct{}), - ctx: ctx, - cancelFunc: cancel, - } - - // insert promise into queue to be worked - // when the queue gets full we will hand and apply back pressure to the scheduler - e.promiseQueue <- p - - // insert the promise into the registry - e.currentPromises.Store(run.ID, p) - return p, nil -} - -func (e *Executor) processScheduledTasks() { - t := time.Tick(1 * time.Second) - for range t { - e.futurePromises.Range(func(k any, v any) bool { - vv := v.(*promise) - if vv.run.ScheduledFor.Equal(time.Now()) || vv.run.ScheduledFor.Before(time.Now()) { - if vv.run.RunAt.IsZero() { - e.promiseQueue <- vv - e.futurePromises.Delete(k) - e.startWorker() - } - } - return true - }) - } -} - -type workerMaker struct { - e *Executor -} - -func (wm *workerMaker) new() interface{} { - return &worker{ - e: wm.e, - exhaustResultIterators: exhaustResultIterators, - systemBuildCompiler: wm.e.systemBuildCompiler, - nonSystemBuildCompiler: wm.e.nonSystemBuildCompiler, - } -} - -type worker struct { - e *Executor - - // exhaustResultIterators is used to exhaust the result - // of a flux query - exhaustResultIterators func(res flux.Result) error - - systemBuildCompiler CompilerBuilderFunc - nonSystemBuildCompiler CompilerBuilderFunc -} - -func (w *worker) work() { - // loop until we have no more work to do in the promise queue - for { - var prom *promise - // check to see if we can execute - select { - case p, ok := <-w.e.promiseQueue: - - if !ok { - // the promiseQueue has been closed - return - } - prom = p - default: - // if nothing is left in the queue we are done - return - } - - // check to make sure we are below the limits. - for { - err := w.e.limitFunc(prom.task, prom.run) - if err == nil { - break - } - - // add to the run log - w.e.tcs.AddRunLog(prom.ctx, prom.task.ID, prom.run.ID, time.Now().UTC(), fmt.Sprintf("Task limit reached: %s", err.Error())) - - // sleep - select { - // If done the promise was canceled - case <-prom.ctx.Done(): - w.e.tcs.AddRunLog(prom.ctx, prom.task.ID, prom.run.ID, time.Now().UTC(), "Run canceled") - w.e.tcs.UpdateRunState(prom.ctx, prom.task.ID, prom.run.ID, time.Now().UTC(), taskmodel.RunCanceled) - prom.err = taskmodel.ErrRunCanceled - close(prom.done) - return - case <-time.After(time.Second): - } - } - - // execute the promise - w.executeQuery(prom) - - // close promise done channel and set appropriate error - close(prom.done) - - // remove promise from registry - w.e.currentPromises.Delete(prom.run.ID) - } -} - -func (w *worker) start(p *promise) { - // trace - span, ctx := tracing.StartSpanFromContext(p.ctx) - defer span.Finish() - - // add to run log - if err := w.e.tcs.AddRunLog(p.ctx, p.task.ID, p.run.ID, time.Now().UTC(), fmt.Sprintf("Started task from script: %q", p.task.Flux)); err != nil { - tid := zap.String("taskID", p.task.ID.String()) - rid := zap.String("runID", p.run.ID.String()) - w.e.log.With(zap.Error(err)).With(tid).With(rid).Warn("error adding run log: ") - } - - // update run status - if err := w.e.tcs.UpdateRunState(ctx, p.task.ID, p.run.ID, time.Now().UTC(), taskmodel.RunStarted); err != nil { - tid := zap.String("taskID", p.task.ID.String()) - rid := zap.String("runID", p.run.ID.String()) - w.e.log.With(zap.Error(err)).With(tid).With(rid).Warn("error updating run state: ") - } - - // add to metrics - w.e.metrics.StartRun(p.task, time.Since(p.createdAt), time.Since(p.run.RunAt)) - p.startedAt = time.Now() -} - -func (w *worker) finish(p *promise, rs taskmodel.RunStatus, err error) { - span, ctx := tracing.StartSpanFromContext(p.ctx) - defer span.Finish() - - // add to run log - w.e.tcs.AddRunLog(p.ctx, p.task.ID, p.run.ID, time.Now().UTC(), fmt.Sprintf("Completed(%s)", rs.String())) - // update run status - w.e.tcs.UpdateRunState(ctx, p.task.ID, p.run.ID, time.Now().UTC(), rs) - - // add to metrics - rd := time.Since(p.startedAt) - w.e.metrics.FinishRun(p.task, rs, rd) - - // log error - if err != nil { - w.e.tcs.AddRunLog(p.ctx, p.task.ID, p.run.ID, time.Now().UTC(), err.Error()) - w.e.log.Debug("Execution failed", zap.Error(err), zap.String("taskID", p.task.ID.String())) - w.e.metrics.LogError(p.task.Type, err) - - if backend.IsUnrecoverable(err) { - // TODO (al): once user notification system is put in place, this code should be uncommented - // if we get an error that requires user intervention to fix, deactivate the task and alert the user - // inactive := string(backend.TaskInactive) - // w.te.ts.UpdateTask(p.ctx, p.task.ID, influxdb.TaskUpdate{Status: &inactive}) - - // and add to run logs - w.e.tcs.AddRunLog(p.ctx, p.task.ID, p.run.ID, time.Now().UTC(), fmt.Sprintf("Task encountered unrecoverable error, requires admin action: %v", err.Error())) - // add to metrics - w.e.metrics.LogUnrecoverableError(p.task.ID, err) - } - - p.err = err - } else { - w.e.log.Debug("Completed successfully", zap.String("taskID", p.task.ID.String())) - } - - if _, err := w.e.tcs.FinishRun(p.ctx, p.task.ID, p.run.ID); err != nil { - w.e.log.Error("Failed to finish run", zap.String("taskID", p.task.ID.String()), zap.String("runID", p.run.ID.String()), zap.Error(err)) - } -} - -func (w *worker) executeQuery(p *promise) { - span, ctx := tracing.StartSpanFromContext(p.ctx) - defer span.Finish() - - // start - w.start(p) - - ctx = icontext.SetAuthorizer(ctx, p.auth) - - buildCompiler := w.systemBuildCompiler - if p.task.Type != taskmodel.TaskSystemType { - buildCompiler = w.nonSystemBuildCompiler - } - compiler, err := buildCompiler(ctx, p.task.Flux, CompilerBuilderTimestamps{ - Now: p.run.ScheduledFor, - LatestSuccess: p.task.LatestSuccess, - }) - if err != nil { - w.finish(p, taskmodel.RunFail, taskmodel.ErrFluxParseError(err)) - return - } - - req := &query.Request{ - Authorization: p.auth, - OrganizationID: p.task.OrganizationID, - Compiler: compiler, - } - req.WithReturnNoContent(true) - it, err := w.e.qs.Query(ctx, req) - if err != nil { - // Assume the error should not be part of the runResult. - w.finish(p, taskmodel.RunFail, taskmodel.ErrQueryError(err)) - return - } - - var runErr error - // Drain the result iterator. - for it.More() { - // Consume the full iterator so that we don't leak outstanding iterators. - res := it.Next() - if runErr = w.exhaustResultIterators(res); runErr != nil { - w.e.log.Info("Error exhausting result iterator", zap.Error(runErr), zap.String("name", res.Name())) - } - } - - it.Release() - - // log the trace id and whether or not it was sampled into the run log - if traceID, isSampled, ok := tracing.InfoFromSpan(span); ok { - msg := fmt.Sprintf("trace_id=%s is_sampled=%t", traceID, isSampled) - w.e.tcs.AddRunLog(p.ctx, p.task.ID, p.run.ID, time.Now().UTC(), msg) - } - - if runErr != nil { - w.finish(p, taskmodel.RunFail, taskmodel.ErrRunExecutionError(runErr)) - return - } - - if it.Err() != nil { - w.finish(p, taskmodel.RunFail, taskmodel.ErrResultIteratorError(it.Err())) - return - } - - w.finish(p, taskmodel.RunSuccess, nil) -} - -// RunsActive returns the current number of workers, which is equivalent to -// the number of runs actively running -func (e *Executor) RunsActive() int { - return len(e.workerLimit) -} - -// WorkersBusy returns the percent of total workers that are busy -func (e *Executor) WorkersBusy() float64 { - return float64(len(e.workerLimit)) / float64(cap(e.workerLimit)) -} - -// PromiseQueueUsage returns the percent of the Promise Queue that is currently filled -func (e *Executor) PromiseQueueUsage() float64 { - return float64(len(e.promiseQueue)) / float64(cap(e.promiseQueue)) -} - -// promise represents a promise the executor makes to finish a run's execution asynchronously. -type promise struct { - run *taskmodel.Run - task *taskmodel.Task - auth *influxdb.Authorization - - done chan struct{} - err error - - createdAt time.Time - startedAt time.Time - - ctx context.Context - cancelFunc context.CancelFunc -} - -// ID is the id of the run that was created -func (p *promise) ID() platform.ID { - return p.run.ID -} - -// Cancel is used to cancel a executing query -func (p *promise) Cancel(ctx context.Context) { - // call cancelfunc - p.cancelFunc() - - // wait for ctx.Done or p.Done - select { - case <-p.Done(): - case <-ctx.Done(): - } -} - -// Done provides a channel that closes on completion of a promise -func (p *promise) Done() <-chan struct{} { - return p.done -} - -// Error returns the error resulting from a run execution. -// If the execution is not complete error waits on Done(). -func (p *promise) Error() error { - <-p.done - return p.err -} - -// exhaustResultIterators drains all the iterators from a flux query Result. -func exhaustResultIterators(res flux.Result) error { - return res.Tables().Do(func(tbl flux.Table) error { - return tbl.Do(func(flux.ColReader) error { - return nil - }) - }) -} - -// NewASTCompiler parses a Flux query string into an AST representation. -func NewASTCompiler(ctx context.Context, query string, ts CompilerBuilderTimestamps) (flux.Compiler, error) { - pkg, err := runtime.ParseToJSON(ctx, query) - if err != nil { - return nil, err - } - var externBytes []byte - if feature.InjectLatestSuccessTime().Enabled(ctx) { - extern := ts.Extern() - if len(extern.Body) > 0 { - var err error - externBytes, err = json.Marshal(extern) - if err != nil { - return nil, err - } - } - } - return lang.ASTCompiler{ - AST: pkg, - Now: ts.Now, - Extern: externBytes, - }, nil -} - -// NewFluxCompiler wraps a Flux query string in a raw-query representation. -func NewFluxCompiler(ctx context.Context, query string, ts CompilerBuilderTimestamps) (flux.Compiler, error) { - var externBytes []byte - if feature.InjectLatestSuccessTime().Enabled(ctx) { - extern := ts.Extern() - if len(extern.Body) > 0 { - var err error - externBytes, err = json.Marshal(extern) - if err != nil { - return nil, err - } - } - } - return lang.FluxCompiler{ - Query: query, - Extern: externBytes, - // TODO(brett): This mitigates an immediate problem where - // Checks/Notifications breaks when sending Now, and system Tasks do not - // break when sending Now. We are currently sending C+N through using - // Flux Compiler and Tasks as AST Compiler until we come to the root - // cause. - // - // Removing Now here will keep the system humming along normally until - // we are able to locate the root cause and use Flux Compiler for all - // Task types. - // - // It turns out this is due to the exclusive nature of the stop time in - // Flux "from" and that we weren't including the left-hand boundary of - // the range check for notifications. We're shipping a fix soon in - // - // https://github.com/influxdata/influxdb/pull/19392 - // - // Once this has merged, we can send Now again. - // - // Now: now, - }, nil -} diff --git a/task/backend/executor/executor_metrics.go b/task/backend/executor/executor_metrics.go deleted file mode 100644 index 2de1a5914df..00000000000 --- a/task/backend/executor/executor_metrics.go +++ /dev/null @@ -1,191 +0,0 @@ -package executor - -import ( - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "github.com/prometheus/client_golang/prometheus" -) - -type ExecutorMetrics struct { - totalRunsComplete *prometheus.CounterVec - activeRuns prometheus.Collector - queueDelta *prometheus.SummaryVec - runDuration *prometheus.SummaryVec - errorsCounter *prometheus.CounterVec - manualRunsCounter *prometheus.CounterVec - resumeRunsCounter *prometheus.CounterVec - unrecoverableCounter *prometheus.CounterVec - runLatency *prometheus.HistogramVec -} - -type runCollector struct { - totalRunsActive *prometheus.Desc - workersBusy *prometheus.Desc - promiseQueueUsage *prometheus.Desc - ex *Executor -} - -func NewExecutorMetrics(ex *Executor) *ExecutorMetrics { - const namespace = "task" - const subsystem = "executor" - - return &ExecutorMetrics{ - totalRunsComplete: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "total_runs_complete", - Help: "Total number of runs completed across all tasks, split out by success or failure.", - }, []string{"task_type", "status"}), - - activeRuns: NewRunCollector(ex), - - queueDelta: prometheus.NewSummaryVec(prometheus.SummaryOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "run_queue_delta", - Help: "The duration in seconds between a run being due to start and actually starting.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, []string{"task_type", "taskID"}), - - runDuration: prometheus.NewSummaryVec(prometheus.SummaryOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "run_duration", - Help: "The duration in seconds between a run starting and finishing.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }, []string{"task_type", "taskID"}), - - errorsCounter: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "errors_counter", - Help: "The number of errors thrown by the executor with the type of error (ex. Invalid, Internal, etc.)", - }, []string{"task_type", "errorType"}), - - unrecoverableCounter: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "unrecoverable_counter", - Help: "The number of errors by taskID that must be manually resolved or have the task deactivated.", - }, []string{"taskID", "errorType"}), - - manualRunsCounter: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "manual_runs_counter", - Help: "Total number of manual runs scheduled to run by task ID", - }, []string{"taskID"}), - - resumeRunsCounter: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "resume_runs_counter", - Help: "Total number of runs resumed by task ID", - }, []string{"taskID"}), - - runLatency: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "run_latency_seconds", - Help: "Records the latency between the time the run was due to run and the time the task started execution, by task type", - }, []string{"task_type"}), - } -} - -// NewRunCollector returns a collector which exports influxdb process metrics. -func NewRunCollector(ex *Executor) prometheus.Collector { - return &runCollector{ - workersBusy: prometheus.NewDesc( - "task_executor_workers_busy", - "Percent of total available workers that are currently busy", - nil, - prometheus.Labels{}, - ), - totalRunsActive: prometheus.NewDesc( - "task_executor_total_runs_active", - "Total number of workers currently running tasks", - nil, - prometheus.Labels{}, - ), - promiseQueueUsage: prometheus.NewDesc( - "task_executor_promise_queue_usage", - "Percent of the promise queue that is currently full", - nil, - prometheus.Labels{}, - ), - ex: ex, - } -} - -// PrometheusCollectors satisfies the prom.PrometheusCollector interface. -func (em *ExecutorMetrics) PrometheusCollectors() []prometheus.Collector { - return []prometheus.Collector{ - em.totalRunsComplete, - em.activeRuns, - em.queueDelta, - em.errorsCounter, - em.runDuration, - em.manualRunsCounter, - em.resumeRunsCounter, - em.unrecoverableCounter, - em.runLatency, - } -} - -// StartRun store the delta time between when a run is due to start and actually starting. -func (em *ExecutorMetrics) StartRun(task *taskmodel.Task, queueDelta time.Duration, runLatency time.Duration) { - em.queueDelta.WithLabelValues(task.Type, "all").Observe(queueDelta.Seconds()) - em.queueDelta.WithLabelValues("", task.ID.String()).Observe(queueDelta.Seconds()) - - // schedule interval duration = (time task was scheduled to run) - (time it actually ran) - em.runLatency.WithLabelValues(task.Type).Observe(runLatency.Seconds()) -} - -// FinishRun adjusts the metrics to indicate a run is no longer in progress for the given task ID. -func (em *ExecutorMetrics) FinishRun(task *taskmodel.Task, status taskmodel.RunStatus, runDuration time.Duration) { - em.totalRunsComplete.WithLabelValues(task.Type, status.String()).Inc() - - em.runDuration.WithLabelValues(task.Type, "all").Observe(runDuration.Seconds()) - em.runDuration.WithLabelValues("", task.ID.String()).Observe(runDuration.Seconds()) -} - -// LogError increments the count of errors by error code. -func (em *ExecutorMetrics) LogError(taskType string, err error) { - switch e := err.(type) { - case *errors.Error: - em.errorsCounter.WithLabelValues(taskType, e.Code).Inc() - default: - em.errorsCounter.WithLabelValues(taskType, "unknown").Inc() - } -} - -// LogUnrecoverableError increments the count of unrecoverable errors, which require admin intervention to resolve or deactivate -// This count is separate from the errors count so that the errors metric can be used to identify only internal, rather than user errors -// and so that unrecoverable errors can be quickly identified for deactivation -func (em *ExecutorMetrics) LogUnrecoverableError(taskID platform.ID, err error) { - switch e := err.(type) { - case *errors.Error: - em.unrecoverableCounter.WithLabelValues(taskID.String(), e.Code).Inc() - default: - em.unrecoverableCounter.WithLabelValues(taskID.String(), "unknown").Inc() - } -} - -// Describe returns all descriptions associated with the run collector. -func (r *runCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- r.workersBusy - ch <- r.promiseQueueUsage - ch <- r.totalRunsActive -} - -// Collect returns the current state of all metrics of the run collector. -func (r *runCollector) Collect(ch chan<- prometheus.Metric) { - ch <- prometheus.MustNewConstMetric(r.workersBusy, prometheus.GaugeValue, r.ex.WorkersBusy()) - - ch <- prometheus.MustNewConstMetric(r.promiseQueueUsage, prometheus.GaugeValue, r.ex.PromiseQueueUsage()) - - ch <- prometheus.MustNewConstMetric(r.totalRunsActive, prometheus.GaugeValue, float64(r.ex.RunsActive())) -} diff --git a/task/backend/executor/executor_test.go b/task/backend/executor/executor_test.go deleted file mode 100644 index 2c62e1f19c7..00000000000 --- a/task/backend/executor/executor_test.go +++ /dev/null @@ -1,602 +0,0 @@ -package executor - -import ( - "context" - "errors" - "fmt" - "os" - "strings" - "sync" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/influxdata/flux" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorization" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/prom" - "github.com/influxdata/influxdb/v2/kit/prom/promtest" - tracetest "github.com/influxdata/influxdb/v2/kit/tracing/testing" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/query" - "github.com/influxdata/influxdb/v2/query/fluxlang" - "github.com/influxdata/influxdb/v2/task/backend" - "github.com/influxdata/influxdb/v2/task/backend/executor/mock" - "github.com/influxdata/influxdb/v2/task/backend/scheduler" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "github.com/influxdata/influxdb/v2/tenant" - "github.com/opentracing/opentracing-go" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/uber/jaeger-client-go" - "go.uber.org/zap/zaptest" -) - -func TestMain(m *testing.M) { - var code int - func() { - defer tracetest.SetupInMemoryTracing("task_backend_tests")() - - code = m.Run() - }() - - os.Exit(code) -} - -type tes struct { - svc *fakeQueryService - ex *Executor - metrics *ExecutorMetrics - i *kv.Service - tcs *taskControlService - tc testCreds -} - -func taskExecutorSystem(t *testing.T) tes { - var ( - aqs = newFakeQueryService() - qs = query.QueryServiceBridge{ - AsyncQueryService: aqs, - } - ctx = context.Background() - logger = zaptest.NewLogger(t) - store = inmem.NewKVStore() - ) - - if err := all.Up(ctx, logger, store); err != nil { - t.Fatal(err) - } - ctrl := gomock.NewController(t) - ps := mock.NewMockPermissionService(ctrl) - ps.EXPECT().FindPermissionForUser(gomock.Any(), gomock.Any()).Return(influxdb.PermissionSet{}, nil).AnyTimes() - - tenantStore := tenant.NewStore(store) - tenantSvc := tenant.NewService(tenantStore) - - authStore, err := authorization.NewStore(store) - require.NoError(t, err) - authSvc := authorization.NewService(authStore, tenantSvc) - - var ( - svc = kv.NewService(logger, store, tenantSvc, kv.ServiceConfig{ - FluxLanguageService: fluxlang.DefaultService, - }) - - tcs = &taskControlService{TaskControlService: svc} - ex, metrics = NewExecutor(zaptest.NewLogger(t), qs, ps, svc, tcs) - ) - return tes{ - svc: aqs, - ex: ex, - metrics: metrics, - i: svc, - tcs: tcs, - tc: createCreds(t, tenantSvc, tenantSvc, authSvc), - } -} - -func TestTaskExecutor(t *testing.T) { - t.Run("QuerySuccess", testQuerySuccess) - t.Run("QueryFailure", testQueryFailure) - t.Run("ManualRun", testManualRun) - t.Run("ResumeRun", testResumingRun) - t.Run("WorkerLimit", testWorkerLimit) - t.Run("LimitFunc", testLimitFunc) - t.Run("Metrics", testMetrics) - t.Run("IteratorFailure", testIteratorFailure) - t.Run("ErrorHandling", testErrorHandling) -} - -func testQuerySuccess(t *testing.T) { - t.Parallel() - - tes := taskExecutorSystem(t) - - var ( - script = fmt.Sprintf(fmtTestScript, t.Name()) - ctx = icontext.SetAuthorizer(context.Background(), tes.tc.Auth) - span = opentracing.GlobalTracer().StartSpan("test-span") - ) - ctx = opentracing.ContextWithSpan(ctx, span) - - task, err := tes.i.CreateTask(ctx, taskmodel.TaskCreate{OrganizationID: tes.tc.OrgID, OwnerID: tes.tc.Auth.GetUserID(), Flux: script}) - if err != nil { - t.Fatal(err) - } - - promise, err := tes.ex.PromisedExecute(ctx, scheduler.ID(task.ID), time.Unix(123, 0), time.Unix(126, 0)) - if err != nil { - t.Fatal(err) - } - promiseID := platform.ID(promise.ID()) - - run, err := tes.i.FindRunByID(context.Background(), task.ID, promiseID) - if err != nil { - t.Fatal(err) - } - - if run.ID != promiseID { - t.Fatal("promise and run dont match") - } - - if run.RunAt != time.Unix(126, 0).UTC() { - t.Fatalf("did not correctly set RunAt value, got: %v", run.RunAt) - } - - tes.svc.WaitForQueryLive(t, script) - tes.svc.SucceedQuery(script) - - <-promise.Done() - - if got := promise.Error(); got != nil { - t.Fatal(got) - } - - // confirm run is removed from in-mem store - run, err = tes.i.FindRunByID(context.Background(), task.ID, run.ID) - if run != nil || err == nil || !strings.Contains(err.Error(), "run not found") { - t.Fatal("run was returned when it should have been removed from kv") - } - - // ensure the run returned by TaskControlService.FinishRun(...) - // has run logs formatted as expected - if run = tes.tcs.run; run == nil { - t.Fatal("expected run returned by FinishRun to not be nil") - } - - if len(run.Log) < 3 { - t.Fatalf("expected 3 run logs, found %d", len(run.Log)) - } - - sctx := span.Context().(jaeger.SpanContext) - expectedMessage := fmt.Sprintf("trace_id=%s is_sampled=true", sctx.TraceID()) - if expectedMessage != run.Log[1].Message { - t.Errorf("expected %q, found %q", expectedMessage, run.Log[1].Message) - } -} - -func testQueryFailure(t *testing.T) { - t.Parallel() - tes := taskExecutorSystem(t) - - script := fmt.Sprintf(fmtTestScript, t.Name()) - ctx := icontext.SetAuthorizer(context.Background(), tes.tc.Auth) - task, err := tes.i.CreateTask(ctx, taskmodel.TaskCreate{OrganizationID: tes.tc.OrgID, OwnerID: tes.tc.Auth.GetUserID(), Flux: script}) - if err != nil { - t.Fatal(err) - } - - promise, err := tes.ex.PromisedExecute(ctx, scheduler.ID(task.ID), time.Unix(123, 0), time.Unix(126, 0)) - if err != nil { - t.Fatal(err) - } - promiseID := platform.ID(promise.ID()) - - run, err := tes.i.FindRunByID(context.Background(), task.ID, promiseID) - if err != nil { - t.Fatal(err) - } - - if run.ID != promiseID { - t.Fatal("promise and run dont match") - } - - tes.svc.WaitForQueryLive(t, script) - tes.svc.FailQuery(script, errors.New("blargyblargblarg")) - - <-promise.Done() - - if got := promise.Error(); got == nil { - t.Fatal("got no error when I should have") - } -} - -func testManualRun(t *testing.T) { - t.Parallel() - tes := taskExecutorSystem(t) - - script := fmt.Sprintf(fmtTestScript, t.Name()) - ctx := icontext.SetAuthorizer(context.Background(), tes.tc.Auth) - task, err := tes.i.CreateTask(ctx, taskmodel.TaskCreate{OrganizationID: tes.tc.OrgID, OwnerID: tes.tc.Auth.GetUserID(), Flux: script}) - if err != nil { - t.Fatal(err) - } - - manualRun, err := tes.i.ForceRun(ctx, task.ID, 123) - if err != nil { - t.Fatal(err) - } - - mrs, err := tes.i.ManualRuns(ctx, task.ID) - if err != nil { - t.Fatal(err) - } - - if len(mrs) != 1 { - t.Fatal("manual run not created by force run") - } - - promise, err := tes.ex.ManualRun(ctx, task.ID, manualRun.ID) - if err != nil { - t.Fatal(err) - } - - run, err := tes.i.FindRunByID(context.Background(), task.ID, promise.ID()) - if err != nil { - t.Fatal(err) - } - - if run.ID != promise.ID() || manualRun.ID != promise.ID() { - t.Fatal("promise and run and manual run dont match") - } - - tes.svc.WaitForQueryLive(t, script) - tes.svc.SucceedQuery(script) - - if got := promise.Error(); got != nil { - t.Fatal(got) - } -} - -func testResumingRun(t *testing.T) { - t.Parallel() - tes := taskExecutorSystem(t) - - script := fmt.Sprintf(fmtTestScript, t.Name()) - ctx := icontext.SetAuthorizer(context.Background(), tes.tc.Auth) - task, err := tes.i.CreateTask(ctx, taskmodel.TaskCreate{OrganizationID: tes.tc.OrgID, OwnerID: tes.tc.Auth.GetUserID(), Flux: script}) - if err != nil { - t.Fatal(err) - } - - stalledRun, err := tes.i.CreateRun(ctx, task.ID, time.Unix(123, 0), time.Unix(126, 0)) - if err != nil { - t.Fatal(err) - } - - promise, err := tes.ex.ResumeCurrentRun(ctx, task.ID, stalledRun.ID) - if err != nil { - t.Fatal(err) - } - - // ensure that it doesn't recreate a promise - if _, err := tes.ex.ResumeCurrentRun(ctx, task.ID, stalledRun.ID); err != taskmodel.ErrRunNotFound { - t.Fatal("failed to error when run has already been resumed") - } - - run, err := tes.i.FindRunByID(context.Background(), task.ID, promise.ID()) - if err != nil { - t.Fatal(err) - } - - if run.ID != promise.ID() || stalledRun.ID != promise.ID() { - t.Fatal("promise and run and manual run dont match") - } - - tes.svc.WaitForQueryLive(t, script) - tes.svc.SucceedQuery(script) - - if got := promise.Error(); got != nil { - t.Fatal(got) - } -} - -func testWorkerLimit(t *testing.T) { - t.Parallel() - tes := taskExecutorSystem(t) - - script := fmt.Sprintf(fmtTestScript, t.Name()) - ctx := icontext.SetAuthorizer(context.Background(), tes.tc.Auth) - task, err := tes.i.CreateTask(ctx, taskmodel.TaskCreate{OrganizationID: tes.tc.OrgID, OwnerID: tes.tc.Auth.GetUserID(), Flux: script}) - if err != nil { - t.Fatal(err) - } - - promise, err := tes.ex.PromisedExecute(ctx, scheduler.ID(task.ID), time.Unix(123, 0), time.Unix(126, 0)) - if err != nil { - t.Fatal(err) - } - - if len(tes.ex.workerLimit) != 1 { - t.Fatal("expected a worker to be started") - } - - tes.svc.WaitForQueryLive(t, script) - tes.svc.FailQuery(script, errors.New("blargyblargblarg")) - - <-promise.Done() - - if got := promise.Error(); got == nil { - t.Fatal("got no error when I should have") - } -} - -func testLimitFunc(t *testing.T) { - t.Parallel() - tes := taskExecutorSystem(t) - - script := fmt.Sprintf(fmtTestScript, t.Name()) - ctx := icontext.SetAuthorizer(context.Background(), tes.tc.Auth) - task, err := tes.i.CreateTask(ctx, taskmodel.TaskCreate{OrganizationID: tes.tc.OrgID, OwnerID: tes.tc.Auth.GetUserID(), Flux: script}) - if err != nil { - t.Fatal(err) - } - forcedErr := errors.New("forced") - forcedQueryErr := taskmodel.ErrQueryError(forcedErr) - tes.svc.FailNextQuery(forcedErr) - - count := 0 - tes.ex.SetLimitFunc(func(*taskmodel.Task, *taskmodel.Run) error { - count++ - if count < 2 { - return errors.New("not there yet") - } - return nil - }) - - promise, err := tes.ex.PromisedExecute(ctx, scheduler.ID(task.ID), time.Unix(123, 0), time.Unix(126, 0)) - if err != nil { - t.Fatal(err) - } - - <-promise.Done() - - if got := promise.Error(); got.Error() != forcedQueryErr.Error() { - t.Fatal("failed to get failure from forced error") - } - - if count != 2 { - t.Fatalf("failed to call limitFunc enough times: %d", count) - } -} - -func testMetrics(t *testing.T) { - t.Parallel() - tes := taskExecutorSystem(t) - metrics := tes.metrics - reg := prom.NewRegistry(zaptest.NewLogger(t)) - reg.MustRegister(metrics.PrometheusCollectors()...) - - mg := promtest.MustGather(t, reg) - m := promtest.MustFindMetric(t, mg, "task_executor_total_runs_active", nil) - assert.EqualValues(t, 0, *m.Gauge.Value, "unexpected number of active runs") - - script := fmt.Sprintf(fmtTestScript, t.Name()) - ctx := icontext.SetAuthorizer(context.Background(), tes.tc.Auth) - task, err := tes.i.CreateTask(ctx, taskmodel.TaskCreate{OrganizationID: tes.tc.OrgID, OwnerID: tes.tc.Auth.GetUserID(), Flux: script}) - assert.NoError(t, err) - - promise, err := tes.ex.PromisedExecute(ctx, scheduler.ID(task.ID), time.Unix(123, 0), time.Unix(126, 0)) - assert.NoError(t, err) - promiseID := promise.ID() - - run, err := tes.i.FindRunByID(context.Background(), task.ID, promiseID) - assert.NoError(t, err) - assert.EqualValues(t, promiseID, run.ID, "promise and run dont match") - - tes.svc.WaitForQueryLive(t, script) - - mg = promtest.MustGather(t, reg) - m = promtest.MustFindMetric(t, mg, "task_executor_total_runs_active", nil) - assert.EqualValues(t, 1, *m.Gauge.Value, "unexpected number of active runs") - - tes.svc.SucceedQuery(script) - <-promise.Done() - - // N.B. You might think the _runs_complete and _runs_active metrics are updated atomically, - // but that's not the case. As a task run completes and is being cleaned up, there's a small - // window where it can be counted under both metrics. - // - // Our CI is very good at hitting this window, causing failures when we assert on the metric - // values below. We sleep a small amount before gathering metrics to avoid flaky errors. - time.Sleep(500 * time.Millisecond) - - mg = promtest.MustGather(t, reg) - m = promtest.MustFindMetric(t, mg, "task_executor_total_runs_complete", map[string]string{"task_type": "", "status": "success"}) - assert.EqualValues(t, 1, *m.Counter.Value, "unexpected number of successful runs") - - m = promtest.MustFindMetric(t, mg, "task_executor_total_runs_active", nil) - assert.EqualValues(t, 0, *m.Gauge.Value, "unexpected number of active runs") - - assert.NoError(t, promise.Error()) - - // manual runs metrics - mt, err := tes.i.CreateTask(ctx, taskmodel.TaskCreate{OrganizationID: tes.tc.OrgID, OwnerID: tes.tc.Auth.GetUserID(), Flux: script}) - assert.NoError(t, err) - - scheduledFor := int64(123) - - r, err := tes.i.ForceRun(ctx, mt.ID, scheduledFor) - assert.NoError(t, err) - - _, err = tes.ex.ManualRun(ctx, mt.ID, r.ID) - assert.NoError(t, err) - - mg = promtest.MustGather(t, reg) - m = promtest.MustFindMetric(t, mg, "task_executor_manual_runs_counter", map[string]string{"taskID": mt.ID.String()}) - assert.EqualValues(t, 1, *m.Counter.Value, "unexpected number of manual runs") - - m = promtest.MustFindMetric(t, mg, "task_executor_run_latency_seconds", map[string]string{"task_type": ""}) - assert.GreaterOrEqual(t, *m.Histogram.SampleCount, uint64(1), "run latency metric not found") - assert.Greater(t, *m.Histogram.SampleSum, float64(100), "run latency metric unexpectedly small") -} - -func testIteratorFailure(t *testing.T) { - t.Parallel() - tes := taskExecutorSystem(t) - - // replace iterator exhaust function with one which errors - tes.ex.workerPool = sync.Pool{New: func() interface{} { - return &worker{ - e: tes.ex, - exhaustResultIterators: func(flux.Result) error { - return errors.New("something went wrong exhausting iterator") - }, - systemBuildCompiler: NewASTCompiler, - nonSystemBuildCompiler: NewASTCompiler, - } - }} - - script := fmt.Sprintf(fmtTestScript, t.Name()) - ctx := icontext.SetAuthorizer(context.Background(), tes.tc.Auth) - task, err := tes.i.CreateTask(ctx, taskmodel.TaskCreate{OrganizationID: tes.tc.OrgID, OwnerID: tes.tc.Auth.GetUserID(), Flux: script}) - if err != nil { - t.Fatal(err) - } - - promise, err := tes.ex.PromisedExecute(ctx, scheduler.ID(task.ID), time.Unix(123, 0), time.Unix(126, 0)) - if err != nil { - t.Fatal(err) - } - promiseID := platform.ID(promise.ID()) - - run, err := tes.i.FindRunByID(context.Background(), task.ID, promiseID) - if err != nil { - t.Fatal(err) - } - - if run.ID != promiseID { - t.Fatal("promise and run dont match") - } - - tes.svc.WaitForQueryLive(t, script) - tes.svc.SucceedQuery(script) - - <-promise.Done() - - if got := promise.Error(); got == nil { - t.Fatal("got no error when I should have") - } -} - -func testErrorHandling(t *testing.T) { - t.Parallel() - tes := taskExecutorSystem(t) - - metrics := tes.metrics - reg := prom.NewRegistry(zaptest.NewLogger(t)) - reg.MustRegister(metrics.PrometheusCollectors()...) - - script := fmt.Sprintf(fmtTestScript, t.Name()) - ctx := icontext.SetAuthorizer(context.Background(), tes.tc.Auth) - task, err := tes.i.CreateTask(ctx, taskmodel.TaskCreate{OrganizationID: tes.tc.OrgID, OwnerID: tes.tc.Auth.GetUserID(), Flux: script, Status: "active"}) - if err != nil { - t.Fatal(err) - } - - // encountering a bucket not found error should log an unrecoverable error in the metrics - forcedErr := errors.New("could not find bucket") - tes.svc.FailNextQuery(forcedErr) - - promise, err := tes.ex.PromisedExecute(ctx, scheduler.ID(task.ID), time.Unix(123, 0), time.Unix(126, 0)) - if err != nil { - t.Fatal(err) - } - - <-promise.Done() - - mg := promtest.MustGather(t, reg) - - m := promtest.MustFindMetric(t, mg, "task_executor_unrecoverable_counter", map[string]string{"taskID": task.ID.String(), "errorType": "internal error"}) - if got := *m.Counter.Value; got != 1 { - t.Fatalf("expected 1 unrecoverable error, got %v", got) - } - - // TODO (al): once user notification system is put in place, this code should be uncommented - // encountering a bucket not found error should deactivate the task - /* - inactive, err := tes.i.FindTaskByID(context.Background(), task.ID) - if err != nil { - t.Fatal(err) - } - if inactive.Status != "inactive" { - t.Fatal("expected task to be deactivated after permanent error") - } - */ -} - -func TestPromiseFailure(t *testing.T) { - t.Parallel() - - tes := taskExecutorSystem(t) - - var ( - script = fmt.Sprintf(fmtTestScript, t.Name()) - ctx = icontext.SetAuthorizer(context.Background(), tes.tc.Auth) - span = opentracing.GlobalTracer().StartSpan("test-span") - ) - ctx = opentracing.ContextWithSpan(ctx, span) - - task, err := tes.i.CreateTask(ctx, taskmodel.TaskCreate{OrganizationID: tes.tc.OrgID, OwnerID: tes.tc.Auth.GetUserID(), Flux: script}) - if err != nil { - t.Fatal(err) - } - - if err := tes.i.DeleteTask(ctx, task.ID); err != nil { - t.Fatal(err) - } - - promise, err := tes.ex.PromisedExecute(ctx, scheduler.ID(task.ID), time.Unix(123, 0), time.Unix(126, 0)) - if err == nil { - t.Fatal("failed to error on promise create") - } - - if promise != nil { - t.Fatalf("expected no promise but received one: %+v", promise) - } - - runs, _, err := tes.i.FindRuns(context.Background(), taskmodel.RunFilter{Task: task.ID}) - if err != nil { - t.Fatal(err) - } - - if len(runs) != 1 { - t.Fatalf("expected 1 runs on failed promise: got: %d, %#v", len(runs), runs[0]) - } - - if runs[0].Status != "failed" { - t.Fatal("failed to set failed state") - } - -} - -type taskControlService struct { - backend.TaskControlService - - run *taskmodel.Run -} - -func (t *taskControlService) FinishRun(ctx context.Context, taskID platform.ID, runID platform.ID) (*taskmodel.Run, error) { - // ensure auth set on context - _, err := icontext.GetAuthorizer(ctx) - if err != nil { - panic(err) - } - - t.run, err = t.TaskControlService.FinishRun(ctx, taskID, runID) - return t.run, err -} diff --git a/task/backend/executor/limits.go b/task/backend/executor/limits.go deleted file mode 100644 index 121a0aa033b..00000000000 --- a/task/backend/executor/limits.go +++ /dev/null @@ -1,53 +0,0 @@ -package executor - -import ( - "context" - "sort" - - "github.com/influxdata/influxdb/v2/query/fluxlang" - "github.com/influxdata/influxdb/v2/task/options" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -// ConcurrencyLimit creates a concurrency limit func that uses the executor to determine -// if the task has exceeded the concurrency limit. -func ConcurrencyLimit(exec *Executor, lang fluxlang.FluxLanguageService) LimitFunc { - return func(t *taskmodel.Task, r *taskmodel.Run) error { - o, err := options.FromScriptAST(lang, t.Flux) - if err != nil { - return err - } - if o.Concurrency == nil { - return nil - } - - runs, err := exec.tcs.CurrentlyRunning(context.Background(), t.ID) - if err != nil { - return err - } - - // sort by scheduledFor time because we want to make sure older scheduled for times - // are higher priority - sort.SliceStable(runs, func(i, j int) bool { - runi := runs[i].ScheduledFor - - runj := runs[j].ScheduledFor - - return runi.Before(runj) - }) - - if len(runs) > int(*o.Concurrency) { - for i, run := range runs { - if run.ID == r.ID { - if i >= int(*o.Concurrency) { - return taskmodel.ErrTaskConcurrencyLimitReached(i - int(*o.Concurrency)) - } - return nil // no need to keep looping. - } - } - // this run isn't currently running. but we have more run's then the concurrency allows - return taskmodel.ErrTaskConcurrencyLimitReached(len(runs) - int(*o.Concurrency)) - } - return nil - } -} diff --git a/task/backend/executor/limits_test.go b/task/backend/executor/limits_test.go deleted file mode 100644 index e8f512a37c6..00000000000 --- a/task/backend/executor/limits_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package executor - -import ( - "context" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/query/fluxlang" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -var ( - taskWith1Concurrency = &taskmodel.Task{ID: 1, Flux: `option task = {concurrency: 1, name:"x", every:1m} from(bucket:"b-src") |> range(start:-1m) |> to(bucket:"b-dst", org:"o")`} - taskWith10Concurrency = &taskmodel.Task{ID: 1, Flux: `option task = {concurrency: 10, name:"x", every:1m} from(bucket:"b-src") |> range(start:-1m) |> to(bucket:"b-dst", org:"o")`} -) - -func TestTaskConcurrency(t *testing.T) { - tes := taskExecutorSystem(t) - te := tes.ex - r1, err := te.tcs.CreateRun(context.Background(), taskWith1Concurrency.ID, time.Now().Add(-4*time.Second), time.Now()) - if err != nil { - t.Fatal(err) - } - r2, err := te.tcs.CreateRun(context.Background(), taskWith1Concurrency.ID, time.Now().Add(-3*time.Second), time.Now()) - if err != nil { - t.Fatal(err) - } - r3, err := te.tcs.CreateRun(context.Background(), taskWith1Concurrency.ID, time.Now().Add(-2*time.Second), time.Now()) - if err != nil { - t.Fatal(err) - } - - r4 := &taskmodel.Run{ - ID: 3, - ScheduledFor: time.Now(), - } - - clFunc := ConcurrencyLimit(te, fluxlang.DefaultService) - if err := clFunc(taskWith1Concurrency, r1); err != nil { - t.Fatal(err) - } - if err := clFunc(taskWith1Concurrency, r2); err == nil { - t.Fatal("failed to error when exceeding limit by 1") - } - if err := clFunc(taskWith1Concurrency, r3); err == nil { - t.Fatal("failed to error when exceeding limit by 2") - } - if err := clFunc(taskWith1Concurrency, r4); err == nil { - t.Fatal("failed to error when exceeding limit before saving run") - } - - if err := clFunc(taskWith10Concurrency, r4); err != nil { - t.Fatal(err) - } -} diff --git a/task/backend/executor/mock/permission_service.go b/task/backend/executor/mock/permission_service.go deleted file mode 100644 index a904c0f97bf..00000000000 --- a/task/backend/executor/mock/permission_service.go +++ /dev/null @@ -1,129 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: executor.go - -// Package mock is a generated GoMock package. -package mock - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - influxdb "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockPermissionService is a mock of PermissionService interface -type MockPermissionService struct { - ctrl *gomock.Controller - recorder *MockPermissionServiceMockRecorder -} - -// MockPermissionServiceMockRecorder is the mock recorder for MockPermissionService -type MockPermissionServiceMockRecorder struct { - mock *MockPermissionService -} - -// NewMockPermissionService creates a new mock instance -func NewMockPermissionService(ctrl *gomock.Controller) *MockPermissionService { - mock := &MockPermissionService{ctrl: ctrl} - mock.recorder = &MockPermissionServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockPermissionService) EXPECT() *MockPermissionServiceMockRecorder { - return m.recorder -} - -// FindPermissionForUser mocks base method -func (m *MockPermissionService) FindPermissionForUser(ctx context.Context, UserID platform.ID) (influxdb.PermissionSet, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FindPermissionForUser", ctx, UserID) - ret0, _ := ret[0].(influxdb.PermissionSet) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FindPermissionForUser indicates an expected call of FindPermissionForUser -func (mr *MockPermissionServiceMockRecorder) FindPermissionForUser(ctx, UserID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindPermissionForUser", reflect.TypeOf((*MockPermissionService)(nil).FindPermissionForUser), ctx, UserID) -} - -// MockPromise is a mock of Promise interface -type MockPromise struct { - ctrl *gomock.Controller - recorder *MockPromiseMockRecorder -} - -// MockPromiseMockRecorder is the mock recorder for MockPromise -type MockPromiseMockRecorder struct { - mock *MockPromise -} - -// NewMockPromise creates a new mock instance -func NewMockPromise(ctrl *gomock.Controller) *MockPromise { - mock := &MockPromise{ctrl: ctrl} - mock.recorder = &MockPromiseMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockPromise) EXPECT() *MockPromiseMockRecorder { - return m.recorder -} - -// ID mocks base method -func (m *MockPromise) ID() platform.ID { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ID") - ret0, _ := ret[0].(platform.ID) - return ret0 -} - -// ID indicates an expected call of ID -func (mr *MockPromiseMockRecorder) ID() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockPromise)(nil).ID)) -} - -// Cancel mocks base method -func (m *MockPromise) Cancel(ctx context.Context) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Cancel", ctx) -} - -// Cancel indicates an expected call of Cancel -func (mr *MockPromiseMockRecorder) Cancel(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Cancel", reflect.TypeOf((*MockPromise)(nil).Cancel), ctx) -} - -// Done mocks base method -func (m *MockPromise) Done() <-chan struct{} { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Done") - ret0, _ := ret[0].(<-chan struct{}) - return ret0 -} - -// Done indicates an expected call of Done -func (mr *MockPromiseMockRecorder) Done() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Done", reflect.TypeOf((*MockPromise)(nil).Done)) -} - -// Error mocks base method -func (m *MockPromise) Error() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Error") - ret0, _ := ret[0].(error) - return ret0 -} - -// Error indicates an expected call of Error -func (mr *MockPromiseMockRecorder) Error() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockPromise)(nil).Error)) -} diff --git a/task/backend/executor/support_test.go b/task/backend/executor/support_test.go deleted file mode 100644 index 9fd6fb4dffd..00000000000 --- a/task/backend/executor/support_test.go +++ /dev/null @@ -1,298 +0,0 @@ -package executor - -import ( - "context" - "encoding/json" - "fmt" - "sync" - "testing" - "time" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/lang" - "github.com/influxdata/flux/memory" - "github.com/influxdata/flux/runtime" - "github.com/influxdata/flux/values" - "github.com/influxdata/influxdb/v2" - _ "github.com/influxdata/influxdb/v2/fluxinit/static" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/query" -) - -type fakeQueryService struct { - mu sync.Mutex - queries map[string]*fakeQuery - queryErr error - // The most recent ctx received in the Query method. - // Used to validate that the executor applied the correct authorizer. - mostRecentCtx context.Context -} - -var _ query.AsyncQueryService = (*fakeQueryService)(nil) - -func makeAST(q string) lang.ASTCompiler { - pkg, err := runtime.ParseToJSON(context.Background(), q) - if err != nil { - panic(err) - } - return lang.ASTCompiler{ - AST: pkg, - Now: time.Unix(123, 0), - } -} - -func makeASTString(q lang.ASTCompiler) string { - b, err := json.Marshal(q) - if err != nil { - panic(err) - } - return string(b) -} - -func newFakeQueryService() *fakeQueryService { - return &fakeQueryService{queries: make(map[string]*fakeQuery)} -} - -func (s *fakeQueryService) Query(ctx context.Context, req *query.Request) (flux.Query, error) { - if req.Authorization == nil { - panic("authorization required") - } - - s.mu.Lock() - defer s.mu.Unlock() - s.mostRecentCtx = ctx - if s.queryErr != nil { - err := s.queryErr - s.queryErr = nil - return nil, err - } - - astc, ok := req.Compiler.(lang.ASTCompiler) - if !ok { - return nil, fmt.Errorf("fakeQueryService only supports the ASTCompiler, got %T", req.Compiler) - } - - fq := &fakeQuery{ - wait: make(chan struct{}), - results: make(chan flux.Result), - } - s.queries[makeASTString(astc)] = fq - - go fq.run(ctx) - - return fq, nil -} - -// SucceedQuery allows the running query matching the given script to return on its Ready channel. -func (s *fakeQueryService) SucceedQuery(script string) { - s.mu.Lock() - defer s.mu.Unlock() - - // Unblock the flux. - ast := makeAST(script) - spec := makeASTString(ast) - fq, ok := s.queries[spec] - if !ok { - ast.Now = ast.Now.UTC() - spec = makeASTString(ast) - fq = s.queries[spec] - } - close(fq.wait) - delete(s.queries, spec) -} - -// FailQuery closes the running query's Ready channel and sets its error to the given value. -func (s *fakeQueryService) FailQuery(script string, forced error) { - s.mu.Lock() - defer s.mu.Unlock() - - // Unblock the flux. - ast := makeAST(script) - spec := makeASTString(ast) - fq, ok := s.queries[spec] - if !ok { - ast.Now = ast.Now.UTC() - spec = makeASTString(ast) - fq = s.queries[spec] - } - fq.forcedError = forced - close(fq.wait) - delete(s.queries, spec) -} - -// FailNextQuery causes the next call to QueryWithCompile to return the given error. -func (s *fakeQueryService) FailNextQuery(forced error) { - s.queryErr = forced -} - -// WaitForQueryLive ensures that the query has made it into the service. -// This is particularly useful for the synchronous executor, -// because the execution starts on a separate goroutine. -func (s *fakeQueryService) WaitForQueryLive(t *testing.T, script string) { - t.Helper() - - const attempts = 100 - ast := makeAST(script) - astUTC := makeAST(script) - astUTC.Now = ast.Now.UTC() - spec := makeASTString(ast) - specUTC := makeASTString(astUTC) - for i := 0; i < attempts; i++ { - if i != 0 { - time.Sleep(10 * time.Millisecond) - } - - s.mu.Lock() - _, ok := s.queries[spec] - s.mu.Unlock() - if ok { - return - } - s.mu.Lock() - _, ok = s.queries[specUTC] - s.mu.Unlock() - if ok { - return - } - - } - - t.Fatalf("Did not see live query %q in time", script) -} - -type fakeQuery struct { - results chan flux.Result - wait chan struct{} // Blocks Ready from returning. - forcedError error // Value to return from Err() method. - - ctxErr error // Error from ctx.Done. -} - -var _ flux.Query = (*fakeQuery)(nil) - -func (q *fakeQuery) Done() {} -func (q *fakeQuery) Cancel() { close(q.results) } -func (q *fakeQuery) Statistics() flux.Statistics { return flux.Statistics{} } -func (q *fakeQuery) Results() <-chan flux.Result { return q.results } -func (q *fakeQuery) ProfilerResults() (flux.ResultIterator, error) { return nil, nil } - -func (q *fakeQuery) Err() error { - if q.ctxErr != nil { - return q.ctxErr - } - return q.forcedError -} - -// run is intended to be run on its own goroutine. -// It blocks until q.wait is closed, then sends a fake result on the q.results channel. -func (q *fakeQuery) run(ctx context.Context) { - defer close(q.results) - - // Wait for call to set query success/fail. - select { - case <-ctx.Done(): - q.ctxErr = ctx.Err() - return - case <-q.wait: - // Normal case. - } - - if q.forcedError == nil { - res := newFakeResult() - q.results <- res - } -} - -// fakeResult is a dumb implementation of flux.Result that always returns the same values. -type fakeResult struct { - name string - table flux.Table -} - -var _ flux.Result = (*fakeResult)(nil) - -func newFakeResult() *fakeResult { - meta := []flux.ColMeta{{Label: "x", Type: flux.TInt}} - vals := []values.Value{values.NewInt(int64(1))} - gk := execute.NewGroupKey(meta, vals) - b := execute.NewColListTableBuilder(gk, memory.DefaultAllocator) - i, _ := b.AddCol(meta[0]) - b.AppendInt(i, int64(1)) - t, err := b.Table() - if err != nil { - panic(err) - } - return &fakeResult{name: "res", table: t} -} - -func (r *fakeResult) Statistics() flux.Statistics { - return flux.Statistics{} -} - -func (r *fakeResult) Name() string { return r.name } -func (r *fakeResult) Tables() flux.TableIterator { return tables{r.table} } - -// tables makes a TableIterator out of a slice of Tables. -type tables []flux.Table - -var _ flux.TableIterator = tables(nil) - -func (ts tables) Do(f func(flux.Table) error) error { - for _, t := range ts { - if err := f(t); err != nil { - return err - } - } - return nil -} - -func (ts tables) Statistics() flux.Statistics { return flux.Statistics{} } - -type testCreds struct { - OrgID, UserID platform.ID - Auth *influxdb.Authorization -} - -func createCreds(t *testing.T, orgSvc influxdb.OrganizationService, userSvc influxdb.UserService, authSvc influxdb.AuthorizationService) testCreds { - t.Helper() - - org := &influxdb.Organization{Name: t.Name() + "-org"} - if err := orgSvc.CreateOrganization(context.Background(), org); err != nil { - t.Fatal(err) - } - - user := &influxdb.User{Name: t.Name() + "-user"} - if err := userSvc.CreateUser(context.Background(), user); err != nil { - t.Fatal(err) - } - - readPerm, err := influxdb.NewGlobalPermission(influxdb.ReadAction, influxdb.BucketsResourceType) - if err != nil { - t.Fatal(err) - } - writePerm, err := influxdb.NewGlobalPermission(influxdb.WriteAction, influxdb.BucketsResourceType) - if err != nil { - t.Fatal(err) - } - auth := &influxdb.Authorization{ - OrgID: org.ID, - UserID: user.ID, - Token: "hifriend!", - Permissions: []influxdb.Permission{*readPerm, *writePerm}, - } - if err := authSvc.CreateAuthorization(context.Background(), auth); err != nil { - t.Fatal(err) - } - - return testCreds{OrgID: org.ID, Auth: auth} -} - -// Some tests use t.Parallel, and the fake query service depends on unique scripts, -// so format a new script with the test name in each test. -const fmtTestScript = ` -option task = { - name: %q, - every: 1m, -} -from(bucket: "one") |> to(bucket: "two", orgID: "0000000000000000")` diff --git a/task/backend/middleware/check_middleware.go b/task/backend/middleware/check_middleware.go deleted file mode 100644 index ec37e6b7ee0..00000000000 --- a/task/backend/middleware/check_middleware.go +++ /dev/null @@ -1,139 +0,0 @@ -package middleware - -import ( - "context" - "fmt" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -// CoordinatingCheckService acts as a CheckService decorator that handles coordinating the api request -// with the required task control actions asynchronously via a message dispatcher -type CoordinatingCheckService struct { - influxdb.CheckService - coordinator Coordinator - taskService taskmodel.TaskService - Now func() time.Time -} - -// NewCheckService constructs a new coordinating check service -func NewCheckService(cs influxdb.CheckService, ts taskmodel.TaskService, coordinator Coordinator) *CoordinatingCheckService { - c := &CoordinatingCheckService{ - CheckService: cs, - taskService: ts, - coordinator: coordinator, - Now: func() time.Time { - return time.Now().UTC() - }, - } - - return c -} - -// CreateCheck Creates a check and Publishes the change it can be scheduled. -func (cs *CoordinatingCheckService) CreateCheck(ctx context.Context, c influxdb.CheckCreate, userID platform.ID) error { - - if err := cs.CheckService.CreateCheck(ctx, c, userID); err != nil { - return err - } - - t, err := cs.taskService.FindTaskByID(ctx, c.GetTaskID()) - if err != nil { - return err - } - - if err := cs.coordinator.TaskCreated(ctx, t); err != nil { - if derr := cs.CheckService.DeleteCheck(ctx, c.GetID()); derr != nil { - return fmt.Errorf("schedule task failed: %s\n\tcleanup also failed: %s", err, derr) - } - - return err - } - - return nil -} - -// UpdateCheck Updates a check and publishes the change so the task owner can act on the update -func (cs *CoordinatingCheckService) UpdateCheck(ctx context.Context, id platform.ID, c influxdb.CheckCreate) (influxdb.Check, error) { - from, err := cs.CheckService.FindCheckByID(ctx, id) - if err != nil { - return nil, err - } - - fromTask, err := cs.taskService.FindTaskByID(ctx, from.GetTaskID()) - if err != nil { - return nil, err - } - - to, err := cs.CheckService.UpdateCheck(ctx, id, c) - if err != nil { - return to, err - } - - toTask, err := cs.taskService.FindTaskByID(ctx, to.GetTaskID()) - if err != nil { - return nil, err - } - - // if the update is to activate and the previous task was inactive we should add a "latest completed" update - // this allows us to see not run the task for inactive time - if fromTask.Status == string(taskmodel.TaskInactive) && toTask.Status == string(taskmodel.TaskActive) { - toTask.LatestCompleted = cs.Now() - } - - return to, cs.coordinator.TaskUpdated(ctx, fromTask, toTask) -} - -// PatchCheck Updates a check and publishes the change so the task owner can act on the update -func (cs *CoordinatingCheckService) PatchCheck(ctx context.Context, id platform.ID, upd influxdb.CheckUpdate) (influxdb.Check, error) { - from, err := cs.CheckService.FindCheckByID(ctx, id) - if err != nil { - return nil, err - } - - fromTask, err := cs.taskService.FindTaskByID(ctx, from.GetTaskID()) - if err != nil { - return nil, err - } - - to, err := cs.CheckService.PatchCheck(ctx, id, upd) - if err != nil { - return to, err - } - - toTask, err := cs.taskService.FindTaskByID(ctx, to.GetTaskID()) - if err != nil { - return nil, err - } - - // if the update is to activate and the previous task was inactive we should add a "latest completed" update - // this allows us to see not run the task for inactive time - if fromTask.Status == string(taskmodel.TaskInactive) && toTask.Status == string(taskmodel.TaskActive) { - toTask.LatestCompleted = cs.Now() - } - - return to, cs.coordinator.TaskUpdated(ctx, fromTask, toTask) - -} - -// DeleteCheck delete the check and publishes the change, to allow the task owner to find out about this change faster. -func (cs *CoordinatingCheckService) DeleteCheck(ctx context.Context, id platform.ID) error { - check, err := cs.CheckService.FindCheckByID(ctx, id) - if err != nil { - return err - } - - t, err := cs.taskService.FindTaskByID(ctx, check.GetTaskID()) - if err != nil { - return err - } - - if err := cs.coordinator.TaskDeleted(ctx, t.ID); err != nil { - return err - } - - return cs.CheckService.DeleteCheck(ctx, id) -} diff --git a/task/backend/middleware/check_middleware_test.go b/task/backend/middleware/check_middleware_test.go deleted file mode 100644 index 4b0513887b3..00000000000 --- a/task/backend/middleware/check_middleware_test.go +++ /dev/null @@ -1,329 +0,0 @@ -package middleware_test - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/notification/check" - "github.com/influxdata/influxdb/v2/notification/rule" - "github.com/influxdata/influxdb/v2/task/backend/middleware" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -type pipingCoordinator struct { - err error - taskCreatedPipe chan *taskmodel.Task - taskUpdatedPipe chan *taskmodel.Task - taskDeletedPipe chan platform.ID -} - -func (p *pipingCoordinator) taskCreatedChan() <-chan *taskmodel.Task { - if p.taskCreatedPipe == nil { - p.taskCreatedPipe = make(chan *taskmodel.Task, 1) - } - return p.taskCreatedPipe -} -func (p *pipingCoordinator) taskUpdatedChan() <-chan *taskmodel.Task { - if p.taskUpdatedPipe == nil { - p.taskUpdatedPipe = make(chan *taskmodel.Task, 1) - } - return p.taskUpdatedPipe -} -func (p *pipingCoordinator) taskDeletedChan() <-chan platform.ID { - if p.taskDeletedPipe == nil { - p.taskDeletedPipe = make(chan platform.ID, 1) - } - return p.taskDeletedPipe -} - -func (p *pipingCoordinator) TaskCreated(_ context.Context, t *taskmodel.Task) error { - if p.taskCreatedPipe != nil { - p.taskCreatedPipe <- t - } - return p.err -} -func (p *pipingCoordinator) TaskUpdated(_ context.Context, from, to *taskmodel.Task) error { - if p.taskUpdatedPipe != nil { - p.taskUpdatedPipe <- to - } - return p.err -} -func (p *pipingCoordinator) TaskDeleted(_ context.Context, id platform.ID) error { - if p.taskDeletedPipe != nil { - p.taskDeletedPipe <- id - } - return p.err -} -func (p *pipingCoordinator) RunCancelled(ctx context.Context, runID platform.ID) error { - return p.err -} -func (p *pipingCoordinator) RunRetried(ctx context.Context, task *taskmodel.Task, run *taskmodel.Run) error { - return p.err -} -func (p *pipingCoordinator) RunForced(ctx context.Context, task *taskmodel.Task, run *taskmodel.Run) error { - return p.err -} - -type mockedSvc struct { - taskSvc *mock.TaskService - checkSvc *mock.CheckService - notificationSvc *mock.NotificationRuleStore - pipingCoordinator *pipingCoordinator -} - -func newMockServices() mockedSvc { - return mockedSvc{ - taskSvc: &mock.TaskService{ - FindTaskByIDFn: func(_ context.Context, id platform.ID) (*taskmodel.Task, error) { return &taskmodel.Task{ID: id}, nil }, - CreateTaskFn: func(context.Context, taskmodel.TaskCreate) (*taskmodel.Task, error) { - return &taskmodel.Task{ID: 1}, nil - }, - UpdateTaskFn: func(_ context.Context, id platform.ID, _ taskmodel.TaskUpdate) (*taskmodel.Task, error) { - return &taskmodel.Task{ID: id}, nil - }, - DeleteTaskFn: func(context.Context, platform.ID) error { return nil }, - }, - checkSvc: &mock.CheckService{ - FindCheckByIDFn: func(_ context.Context, id platform.ID) (influxdb.Check, error) { - c := &check.Deadman{} - c.SetID(id) - return c, nil - }, - CreateCheckFn: func(context.Context, influxdb.CheckCreate, platform.ID) error { return nil }, - UpdateCheckFn: func(_ context.Context, _ platform.ID, c influxdb.CheckCreate) (influxdb.Check, error) { return c, nil }, - PatchCheckFn: func(_ context.Context, id platform.ID, _ influxdb.CheckUpdate) (influxdb.Check, error) { - c := &check.Deadman{} - c.SetID(id) - return c, nil - }, - DeleteCheckFn: func(context.Context, platform.ID) error { return nil }, - }, - notificationSvc: &mock.NotificationRuleStore{ - FindNotificationRuleByIDF: func(_ context.Context, id platform.ID) (influxdb.NotificationRule, error) { - c := &rule.HTTP{} - c.SetID(id) - return c, nil - }, - CreateNotificationRuleF: func(context.Context, influxdb.NotificationRuleCreate, platform.ID) error { return nil }, - UpdateNotificationRuleF: func(_ context.Context, _ platform.ID, c influxdb.NotificationRuleCreate, _ platform.ID) (influxdb.NotificationRule, error) { - return c, nil - }, - PatchNotificationRuleF: func(_ context.Context, id platform.ID, _ influxdb.NotificationRuleUpdate) (influxdb.NotificationRule, error) { - c := &rule.HTTP{} - c.SetID(id) - return c, nil - }, - DeleteNotificationRuleF: func(context.Context, platform.ID) error { return nil }, - }, - pipingCoordinator: &pipingCoordinator{}, - } -} - -func newCheckSvcStack() (mockedSvc, *middleware.CoordinatingCheckService) { - msvcs := newMockServices() - return msvcs, middleware.NewCheckService(msvcs.checkSvc, msvcs.taskSvc, msvcs.pipingCoordinator) -} - -func TestCheckCreate(t *testing.T) { - mocks, checkService := newCheckSvcStack() - ch := mocks.pipingCoordinator.taskCreatedChan() - - check := &check.Deadman{} - check.SetTaskID(4) - - cc := influxdb.CheckCreate{ - Check: check, - Status: influxdb.Active, - } - - err := checkService.CreateCheck(context.Background(), cc, 1) - if err != nil { - t.Fatal(err) - } - - select { - case task := <-ch: - if task.ID != check.GetTaskID() { - t.Fatalf("task sent to coordinator doesn't match expected") - } - default: - t.Fatal("didn't receive task") - } - - mocks.pipingCoordinator.err = fmt.Errorf("bad") - mocks.checkSvc.DeleteCheckFn = func(context.Context, platform.ID) error { return fmt.Errorf("AARGH") } - - err = checkService.CreateCheck(context.Background(), cc, 1) - if err.Error() != "schedule task failed: bad\n\tcleanup also failed: AARGH" { - t.Fatal(err) - } -} - -func TestCheckUpdateFromInactive(t *testing.T) { - mocks, checkService := newCheckSvcStack() - latest := time.Now().UTC() - checkService.Now = func() time.Time { - return latest - } - ch := mocks.pipingCoordinator.taskUpdatedChan() - - mocks.checkSvc.UpdateCheckFn = func(_ context.Context, _ platform.ID, c influxdb.CheckCreate) (influxdb.Check, error) { - c.SetTaskID(10) - c.SetUpdatedAt(latest.Add(-20 * time.Hour)) - return c, nil - } - - mocks.checkSvc.PatchCheckFn = func(_ context.Context, _ platform.ID, c influxdb.CheckUpdate) (influxdb.Check, error) { - ic := &check.Deadman{} - ic.SetTaskID(10) - ic.SetUpdatedAt(latest.Add(-20 * time.Hour)) - return ic, nil - } - - mocks.checkSvc.FindCheckByIDFn = func(_ context.Context, id platform.ID) (influxdb.Check, error) { - c := &check.Deadman{} - c.SetID(id) - c.SetTaskID(1) - return c, nil - } - - mocks.taskSvc.FindTaskByIDFn = func(_ context.Context, id platform.ID) (*taskmodel.Task, error) { - if id == 1 { - return &taskmodel.Task{ID: id, Status: string(taskmodel.TaskInactive)}, nil - } else if id == 10 { - return &taskmodel.Task{ID: id, Status: string(taskmodel.TaskActive)}, nil - } - return &taskmodel.Task{ID: id}, nil - } - - deadman := &check.Deadman{} - deadman.SetTaskID(10) - - cc := influxdb.CheckCreate{ - Check: deadman, - Status: influxdb.Active, - } - - thecheck, err := checkService.UpdateCheck(context.Background(), 1, cc) - if err != nil { - t.Fatal(err) - } - select { - case task := <-ch: - if task.ID != thecheck.GetTaskID() { - t.Fatalf("task sent to coordinator doesn't match expected") - } - if task.LatestCompleted != latest { - t.Fatalf("update returned incorrect LatestCompleted, expected %s got %s, or ", latest.Format(time.RFC3339), task.LatestCompleted) - } - default: - t.Fatal("didn't receive task") - } - - action := influxdb.Active - thecheck, err = checkService.PatchCheck(context.Background(), 1, influxdb.CheckUpdate{Status: &action}) - if err != nil { - t.Fatal(err) - } - select { - case task := <-ch: - if task.ID != thecheck.GetTaskID() { - t.Fatalf("task sent to coordinator doesn't match expected") - } - if task.LatestCompleted != latest { - t.Fatalf("update returned incorrect LatestCompleted, expected %s got %s, or ", latest.Format(time.RFC3339), task.LatestCompleted) - } - default: - t.Fatal("didn't receive task") - } - -} - -func TestCheckUpdate(t *testing.T) { - mocks, checkService := newCheckSvcStack() - ch := mocks.pipingCoordinator.taskUpdatedChan() - - mocks.checkSvc.UpdateCheckFn = func(_ context.Context, _ platform.ID, c influxdb.CheckCreate) (influxdb.Check, error) { - c.SetTaskID(10) - return c, nil - } - - deadman := &check.Deadman{} - deadman.SetTaskID(4) - - cc := influxdb.CheckCreate{ - Check: deadman, - Status: influxdb.Active, - } - - check, err := checkService.UpdateCheck(context.Background(), 1, cc) - if err != nil { - t.Fatal(err) - } - - select { - case task := <-ch: - if task.ID != check.GetTaskID() { - t.Fatalf("task sent to coordinator doesn't match expected") - } - default: - t.Fatal("didn't receive task") - } -} - -func TestCheckPatch(t *testing.T) { - mocks, checkService := newCheckSvcStack() - ch := mocks.pipingCoordinator.taskUpdatedChan() - - deadman := &check.Deadman{} - deadman.SetTaskID(4) - - mocks.checkSvc.PatchCheckFn = func(context.Context, platform.ID, influxdb.CheckUpdate) (influxdb.Check, error) { - return deadman, nil - } - - check, err := checkService.PatchCheck(context.Background(), 1, influxdb.CheckUpdate{}) - if err != nil { - t.Fatal(err) - } - - select { - case task := <-ch: - if task.ID != check.GetTaskID() { - t.Fatalf("task sent to coordinator doesn't match expected") - } - default: - t.Fatal("didn't receive task") - } -} - -func TestCheckDelete(t *testing.T) { - mocks, checkService := newCheckSvcStack() - ch := mocks.pipingCoordinator.taskDeletedChan() - - mocks.checkSvc.FindCheckByIDFn = func(_ context.Context, id platform.ID) (influxdb.Check, error) { - c := &check.Deadman{} - c.SetID(id) - c.SetTaskID(21) - return c, nil - } - - err := checkService.DeleteCheck(context.Background(), 1) - if err != nil { - t.Fatal(err) - } - - select { - case id := <-ch: - if id != platform.ID(21) { - t.Fatalf("task sent to coordinator doesn't match expected") - } - default: - t.Fatal("didn't receive task") - } -} diff --git a/task/backend/middleware/middleware.go b/task/backend/middleware/middleware.go deleted file mode 100644 index 1ecb5c5cb58..00000000000 --- a/task/backend/middleware/middleware.go +++ /dev/null @@ -1,127 +0,0 @@ -package middleware - -import ( - "context" - "fmt" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -// Coordinator is a type which is used to react to -// task related actions -type Coordinator interface { - TaskCreated(context.Context, *taskmodel.Task) error - TaskUpdated(ctx context.Context, from, to *taskmodel.Task) error - TaskDeleted(context.Context, platform.ID) error - RunCancelled(ctx context.Context, runID platform.ID) error - RunForced(ctx context.Context, task *taskmodel.Task, run *taskmodel.Run) error -} - -// CoordinatingTaskService acts as a TaskService decorator that handles coordinating the api request -// with the required task control actions asynchronously via a message dispatcher -type CoordinatingTaskService struct { - taskmodel.TaskService - coordinator Coordinator - - now func() time.Time -} - -// New constructs a new coordinating task service -func New(service taskmodel.TaskService, coordinator Coordinator, opts ...Option) *CoordinatingTaskService { - c := &CoordinatingTaskService{ - TaskService: service, - coordinator: coordinator, - now: func() time.Time { - return time.Now().UTC() - }, - } - - for _, opt := range opts { - opt(c) - } - - return c -} - -// CreateTask Creates a task in the existing task service and Publishes the change so any TaskD service can lease it. -func (s *CoordinatingTaskService) CreateTask(ctx context.Context, tc taskmodel.TaskCreate) (*taskmodel.Task, error) { - t, err := s.TaskService.CreateTask(ctx, tc) - if err != nil { - return t, err - } - - if err := s.coordinator.TaskCreated(ctx, t); err != nil { - if derr := s.TaskService.DeleteTask(ctx, t.ID); derr != nil { - return t, fmt.Errorf("schedule task failed: %s\n\tcleanup also failed: %s", err, derr) - } - - return t, err - } - - return t, nil -} - -// UpdateTask Updates a task and publishes the change so the task owner can act on the update -func (s *CoordinatingTaskService) UpdateTask(ctx context.Context, id platform.ID, upd taskmodel.TaskUpdate) (*taskmodel.Task, error) { - from, err := s.TaskService.FindTaskByID(ctx, id) - if err != nil { - return nil, err - } - - to, err := s.TaskService.UpdateTask(ctx, id, upd) - if err != nil { - return to, err - } - - return to, s.coordinator.TaskUpdated(ctx, from, to) -} - -// DeleteTask delete the task and publishes the change, to allow the task owner to find out about this change faster. -func (s *CoordinatingTaskService) DeleteTask(ctx context.Context, id platform.ID) error { - if err := s.coordinator.TaskDeleted(ctx, id); err != nil { - return err - } - - return s.TaskService.DeleteTask(ctx, id) -} - -// CancelRun Cancel the run and publish the cancellation. -func (s *CoordinatingTaskService) CancelRun(ctx context.Context, taskID, runID platform.ID) error { - if err := s.TaskService.CancelRun(ctx, taskID, runID); err != nil { - return err - } - - return s.coordinator.RunCancelled(ctx, runID) -} - -// RetryRun calls retry on the task service and publishes the retry. -func (s *CoordinatingTaskService) RetryRun(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { - t, err := s.TaskService.FindTaskByID(ctx, taskID) - if err != nil { - return nil, err - } - - r, err := s.TaskService.RetryRun(ctx, taskID, runID) - if err != nil { - return r, err - } - - return r, s.coordinator.RunForced(ctx, t, r) -} - -// ForceRun create the forced run in the task system and publish to the pubSub. -func (s *CoordinatingTaskService) ForceRun(ctx context.Context, taskID platform.ID, scheduledFor int64) (*taskmodel.Run, error) { - t, err := s.TaskService.FindTaskByID(ctx, taskID) - if err != nil { - return nil, err - } - - r, err := s.TaskService.ForceRun(ctx, taskID, scheduledFor) - if err != nil { - return r, err - } - - return r, s.coordinator.RunForced(ctx, t, r) -} diff --git a/task/backend/middleware/middleware_test.go b/task/backend/middleware/middleware_test.go deleted file mode 100644 index bb4993b8212..00000000000 --- a/task/backend/middleware/middleware_test.go +++ /dev/null @@ -1,228 +0,0 @@ -package middleware_test - -import ( - "context" - "errors" - "sync" - "testing" - "time" - - _ "github.com/influxdata/influxdb/v2/fluxinit/static" - "github.com/influxdata/influxdb/v2/kit/platform" - pmock "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/influxdata/influxdb/v2/task/backend" - "github.com/influxdata/influxdb/v2/task/backend/coordinator" - "github.com/influxdata/influxdb/v2/task/backend/middleware" - "github.com/influxdata/influxdb/v2/task/backend/scheduler" - "github.com/influxdata/influxdb/v2/task/mock" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "go.uber.org/zap/zaptest" -) - -func timeoutSelector(ch <-chan scheduler.ID) (scheduler.ID, error) { - select { - case id := <-ch: - return id, nil - case <-time.After(10 * time.Second): - return 0, errors.New("timeout on select") - } -} - -const script = `option task = {name: "a task",cron: "* * * * *"} from(bucket:"test") |> range(start:-1h)` - -func inmemTaskService() taskmodel.TaskService { - gen := snowflake.NewDefaultIDGenerator() - tasks := map[platform.ID]*taskmodel.Task{} - mu := sync.Mutex{} - - ts := &pmock.TaskService{ - CreateTaskFn: func(ctx context.Context, tc taskmodel.TaskCreate) (*taskmodel.Task, error) { - mu.Lock() - defer mu.Unlock() - id := gen.ID() - task := &taskmodel.Task{ID: id, Flux: tc.Flux, Cron: "* * * * *", Status: tc.Status, OrganizationID: tc.OrganizationID, Organization: tc.Organization} - if task.Status == "" { - task.Status = string(taskmodel.TaskActive) - } - tasks[id] = task - - return tasks[id], nil - }, - DeleteTaskFn: func(ctx context.Context, id platform.ID) error { - mu.Lock() - defer mu.Unlock() - delete(tasks, id) - return nil - }, - UpdateTaskFn: func(ctx context.Context, id platform.ID, upd taskmodel.TaskUpdate) (*taskmodel.Task, error) { - mu.Lock() - defer mu.Unlock() - t, ok := tasks[id] - if !ok { - return nil, taskmodel.ErrTaskNotFound - } - if upd.Flux != nil { - t.Flux = *upd.Flux - - } - if upd.Status != nil { - t.Status = *upd.Status - } - if upd.LatestCompleted != nil { - t.LatestCompleted = *upd.LatestCompleted - } - - return t, nil - }, - FindTaskByIDFn: func(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { - mu.Lock() - defer mu.Unlock() - t, ok := tasks[id] - if !ok { - return nil, taskmodel.ErrTaskNotFound - } - newt := *t - return &newt, nil - }, - FindTasksFn: func(ctx context.Context, tf taskmodel.TaskFilter) ([]*taskmodel.Task, int, error) { - mu.Lock() - defer mu.Unlock() - if tf.After != nil { - return []*taskmodel.Task{}, 0, nil - } - rtn := []*taskmodel.Task{} - for _, task := range tasks { - rtn = append(rtn, task) - } - return rtn, len(rtn), nil - }, - ForceRunFn: func(ctx context.Context, id platform.ID, scheduledFor int64) (*taskmodel.Run, error) { - mu.Lock() - defer mu.Unlock() - t, ok := tasks[id] - if !ok { - return nil, taskmodel.ErrTaskNotFound - } - - return &taskmodel.Run{ID: id, TaskID: t.ID, ScheduledFor: time.Unix(scheduledFor, 0)}, nil - }, - } - return ts - -} - -func TestCoordinatingTaskService(t *testing.T) { - var ( - ts = inmemTaskService() - ex = mock.NewExecutor() - sch, _, _ = scheduler.NewScheduler(ex, backend.NewSchedulableTaskService(ts)) - coord = coordinator.NewCoordinator(zaptest.NewLogger(t), sch, ex) - middleware = middleware.New(ts, coord) - ) - - task, err := middleware.CreateTask(context.Background(), taskmodel.TaskCreate{OrganizationID: 1, Flux: script}) - if err != nil { - t.Fatal(err) - } - - id, err := timeoutSelector(ex.ExecutedChan) - if err != nil { - t.Fatal(err) - } - - if id != scheduler.ID(task.ID) { - t.Fatalf("task given to scheduler not the same as task created. expected: %v, got: %v", task.ID, id) - } - - if task.Flux != script { - t.Fatal("task sent to scheduler doesnt match task created") - } - - if err := middleware.DeleteTask(context.Background(), task.ID); err != nil { - t.Fatal(err) - } - - if task.Flux != script { - t.Fatal("task sent to scheduler doesn't match task created") - } - - task, err = middleware.CreateTask(context.Background(), taskmodel.TaskCreate{OrganizationID: 1, Flux: script}) - if err != nil { - t.Fatal(err) - } - - inactive := string(taskmodel.TaskInactive) - res, err := middleware.UpdateTask(context.Background(), task.ID, taskmodel.TaskUpdate{Status: &inactive}) - if err != nil { - t.Fatal(err) - } - // Only validating res on the first update. - if res.ID != task.ID { - t.Fatalf("unexpected ID on update result: got %s, want %s", res.ID.String(), task.ID.String()) - } - if res.Flux != task.Flux { - t.Fatalf("unexpected script on update result: got %q, want %q", res.Flux, task.Flux) - } - if res.Status != inactive { - t.Fatalf("unexpected meta status on update result: got %q, want %q", res.Status, inactive) - } - - if task.Flux != script { - t.Fatal("task sent to scheduler doesnt match task created") - } - - active := string(taskmodel.TaskActive) - if _, err := middleware.UpdateTask(context.Background(), task.ID, taskmodel.TaskUpdate{Status: &active}); err != nil { - t.Fatal(err) - } - - if task.Flux != script { - t.Fatal("task sent to scheduler doesnt match task created") - } - - newScript := `option task = {name: "a task",cron: "1 * * * *"} from(bucket:"test") |> range(start:-2h)` - if _, err := middleware.UpdateTask(context.Background(), task.ID, taskmodel.TaskUpdate{Flux: &newScript}); err != nil { - t.Fatal(err) - } - - if task.Flux != newScript { - t.Fatal("task sent to scheduler doesnt match task created") - } -} - -func TestCoordinatingTaskService_ForceRun(t *testing.T) { - var ( - ts = inmemTaskService() - ex = mock.NewExecutor() - sch, _, _ = scheduler.NewScheduler(ex, backend.NewSchedulableTaskService(ts)) - coord = coordinator.NewCoordinator(zaptest.NewLogger(t), sch, ex) - middleware = middleware.New(ts, coord) - ) - - // Create an isolated task directly through the store so the coordinator doesn't know about it. - task, err := middleware.CreateTask(context.Background(), taskmodel.TaskCreate{OrganizationID: 1, Flux: script}) - if err != nil { - t.Fatal(err) - } - - id, err := timeoutSelector(ex.ExecutedChan) - if err != nil { - t.Fatal(err) - } - - task, err = middleware.FindTaskByID(context.Background(), task.ID) - if err != nil { - t.Fatal(err) - } - - manualRunTime := time.Now().Unix() - if _, err = middleware.ForceRun(context.Background(), task.ID, manualRunTime); err != nil { - t.Fatal(err) - } - - if platform.ID(id) != task.ID { - t.Fatalf("expected task ID passed to scheduler to match create task ID %v, got: %v", task.ID, id) - } - -} diff --git a/task/backend/middleware/notification_middleware.go b/task/backend/middleware/notification_middleware.go deleted file mode 100644 index e32f6cc8b45..00000000000 --- a/task/backend/middleware/notification_middleware.go +++ /dev/null @@ -1,138 +0,0 @@ -package middleware - -import ( - "context" - "fmt" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -// CoordinatingNotificationRuleStore acts as a NotificationRuleStore decorator that handles coordinating the api request -// with the required task control actions asynchronously via a message dispatcher -type CoordinatingNotificationRuleStore struct { - influxdb.NotificationRuleStore - coordinator Coordinator - taskService taskmodel.TaskService - Now func() time.Time -} - -// NewNotificationRuleStore constructs a new coordinating notification service -func NewNotificationRuleStore(ns influxdb.NotificationRuleStore, ts taskmodel.TaskService, coordinator Coordinator) *CoordinatingNotificationRuleStore { - c := &CoordinatingNotificationRuleStore{ - NotificationRuleStore: ns, - taskService: ts, - coordinator: coordinator, - Now: func() time.Time { - return time.Now().UTC() - }, - } - - return c -} - -// CreateNotificationRule Creates a notification and Publishes the change it can be scheduled. -func (ns *CoordinatingNotificationRuleStore) CreateNotificationRule(ctx context.Context, nr influxdb.NotificationRuleCreate, userID platform.ID) error { - - if err := ns.NotificationRuleStore.CreateNotificationRule(ctx, nr, userID); err != nil { - return err - } - - t, err := ns.taskService.FindTaskByID(ctx, nr.GetTaskID()) - if err != nil { - return err - } - - if err := ns.coordinator.TaskCreated(ctx, t); err != nil { - if derr := ns.NotificationRuleStore.DeleteNotificationRule(ctx, nr.GetID()); derr != nil { - return fmt.Errorf("schedule task failed: %s\n\tcleanup also failed: %s", err, derr) - } - - return err - } - - return nil -} - -// UpdateNotificationRule Updates a notification and publishes the change so the task owner can act on the update -func (ns *CoordinatingNotificationRuleStore) UpdateNotificationRule(ctx context.Context, id platform.ID, nr influxdb.NotificationRuleCreate, uid platform.ID) (influxdb.NotificationRule, error) { - from, err := ns.NotificationRuleStore.FindNotificationRuleByID(ctx, id) - if err != nil { - return nil, err - } - - fromTask, err := ns.taskService.FindTaskByID(ctx, from.GetTaskID()) - if err != nil { - return nil, err - } - - to, err := ns.NotificationRuleStore.UpdateNotificationRule(ctx, id, nr, uid) - if err != nil { - return to, err - } - - toTask, err := ns.taskService.FindTaskByID(ctx, to.GetTaskID()) - if err != nil { - return nil, err - } - // if the update is to activate and the previous task was inactive we should add a "latest completed" update - // this allows us to see not run the task for inactive time - if fromTask.Status == string(taskmodel.TaskInactive) && toTask.Status == string(taskmodel.TaskActive) { - toTask.LatestCompleted = ns.Now() - } - - return to, ns.coordinator.TaskUpdated(ctx, fromTask, toTask) -} - -// PatchNotificationRule Updates a notification and publishes the change so the task owner can act on the update -func (ns *CoordinatingNotificationRuleStore) PatchNotificationRule(ctx context.Context, id platform.ID, upd influxdb.NotificationRuleUpdate) (influxdb.NotificationRule, error) { - from, err := ns.NotificationRuleStore.FindNotificationRuleByID(ctx, id) - if err != nil { - return nil, err - } - - fromTask, err := ns.taskService.FindTaskByID(ctx, from.GetTaskID()) - if err != nil { - return nil, err - } - - to, err := ns.NotificationRuleStore.PatchNotificationRule(ctx, id, upd) - if err != nil { - return to, err - } - - toTask, err := ns.taskService.FindTaskByID(ctx, to.GetTaskID()) - if err != nil { - return nil, err - } - - // if the update is to activate and the previous task was inactive we should add a "latest completed" update - // this allows us to see not run the task for inactive time - if fromTask.Status == string(taskmodel.TaskInactive) && toTask.Status == string(taskmodel.TaskActive) { - toTask.LatestCompleted = ns.Now() - } - - return to, ns.coordinator.TaskUpdated(ctx, fromTask, toTask) - -} - -// DeleteNotificationRule delete the notification and publishes the change, to allow the task owner to find out about this change faster. -func (ns *CoordinatingNotificationRuleStore) DeleteNotificationRule(ctx context.Context, id platform.ID) error { - notification, err := ns.NotificationRuleStore.FindNotificationRuleByID(ctx, id) - if err != nil { - return err - } - - t, err := ns.taskService.FindTaskByID(ctx, notification.GetTaskID()) - if err != nil { - return err - } - - if err := ns.coordinator.TaskDeleted(ctx, t.ID); err != nil { - return err - } - - return ns.NotificationRuleStore.DeleteNotificationRule(ctx, id) -} diff --git a/task/backend/middleware/notification_middleware_test.go b/task/backend/middleware/notification_middleware_test.go deleted file mode 100644 index e687c4b94b6..00000000000 --- a/task/backend/middleware/notification_middleware_test.go +++ /dev/null @@ -1,217 +0,0 @@ -package middleware_test - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/notification/rule" - "github.com/influxdata/influxdb/v2/task/backend/middleware" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -func newNotificationRuleSvcStack() (mockedSvc, *middleware.CoordinatingNotificationRuleStore) { - msvcs := newMockServices() - return msvcs, middleware.NewNotificationRuleStore(msvcs.notificationSvc, msvcs.taskSvc, msvcs.pipingCoordinator) -} - -func TestNotificationRuleCreate(t *testing.T) { - mocks, nrService := newNotificationRuleSvcStack() - ch := mocks.pipingCoordinator.taskCreatedChan() - - nr := &rule.HTTP{} - nr.SetTaskID(4) - - nrc := influxdb.NotificationRuleCreate{ - NotificationRule: nr, - Status: influxdb.Active, - } - - err := nrService.CreateNotificationRule(context.Background(), nrc, 1) - if err != nil { - t.Fatal(err) - } - - select { - case task := <-ch: - if task.ID != nr.GetTaskID() { - t.Fatalf("task sent to coordinator doesn't match expected") - } - default: - t.Fatal("didn't receive task") - } - - mocks.pipingCoordinator.err = fmt.Errorf("bad") - mocks.notificationSvc.DeleteNotificationRuleF = func(context.Context, platform.ID) error { return fmt.Errorf("AARGH") } - - err = nrService.CreateNotificationRule(context.Background(), nrc, 1) - if err.Error() != "schedule task failed: bad\n\tcleanup also failed: AARGH" { - t.Fatal(err) - } -} - -func TestNotificationRuleUpdateFromInactive(t *testing.T) { - mocks, nrService := newNotificationRuleSvcStack() - latest := time.Now().UTC() - nrService.Now = func() time.Time { - return latest - } - ch := mocks.pipingCoordinator.taskUpdatedChan() - - mocks.notificationSvc.UpdateNotificationRuleF = func(_ context.Context, _ platform.ID, c influxdb.NotificationRuleCreate, _ platform.ID) (influxdb.NotificationRule, error) { - c.SetTaskID(10) - c.SetUpdatedAt(latest.Add(-20 * time.Hour)) - return c, nil - } - - mocks.notificationSvc.PatchNotificationRuleF = func(_ context.Context, id platform.ID, _ influxdb.NotificationRuleUpdate) (influxdb.NotificationRule, error) { - ic := &rule.HTTP{} - ic.SetTaskID(10) - ic.SetUpdatedAt(latest.Add(-20 * time.Hour)) - return ic, nil - } - - mocks.notificationSvc.FindNotificationRuleByIDF = func(_ context.Context, id platform.ID) (influxdb.NotificationRule, error) { - c := &rule.HTTP{} - c.SetID(id) - c.SetTaskID(1) - return c, nil - } - - mocks.taskSvc.FindTaskByIDFn = func(_ context.Context, id platform.ID) (*taskmodel.Task, error) { - if id == 1 { - return &taskmodel.Task{ID: id, Status: string(taskmodel.TaskInactive)}, nil - } else if id == 10 { - return &taskmodel.Task{ID: id, Status: string(taskmodel.TaskActive)}, nil - } - return &taskmodel.Task{ID: id}, nil - } - - deadman := &rule.HTTP{} - deadman.SetTaskID(10) - - nrc := influxdb.NotificationRuleCreate{ - NotificationRule: deadman, - Status: influxdb.Active, - } - - therule, err := nrService.UpdateNotificationRule(context.Background(), 1, nrc, 11) - if err != nil { - t.Fatal(err) - } - select { - case task := <-ch: - if task.ID != therule.GetTaskID() { - t.Fatalf("task sent to coordinator doesn't match expected") - } - if task.LatestCompleted != latest { - t.Fatalf("update returned incorrect LatestCompleted, expected %s got %s, or ", latest.Format(time.RFC3339), task.LatestCompleted) - } - default: - t.Fatal("didn't receive task") - } - - action := influxdb.Active - therule, err = nrService.PatchNotificationRule(context.Background(), 1, influxdb.NotificationRuleUpdate{Status: &action}) - if err != nil { - t.Fatal(err) - } - select { - case task := <-ch: - if task.ID != therule.GetTaskID() { - t.Fatalf("task sent to coordinator doesn't match expected") - } - if task.LatestCompleted != latest { - t.Fatalf("update returned incorrect LatestCompleted, expected %s got %s, or ", latest.Format(time.RFC3339), task.LatestCompleted) - } - default: - t.Fatal("didn't receive task") - } - -} -func TestNotificationRuleUpdate(t *testing.T) { - mocks, nrService := newNotificationRuleSvcStack() - ch := mocks.pipingCoordinator.taskUpdatedChan() - - mocks.notificationSvc.UpdateNotificationRuleF = func(_ context.Context, _ platform.ID, c influxdb.NotificationRuleCreate, _ platform.ID) (influxdb.NotificationRule, error) { - c.SetTaskID(10) - return c, nil - } - - deadman := &rule.HTTP{} - deadman.SetTaskID(4) - - nrc := influxdb.NotificationRuleCreate{ - NotificationRule: deadman, - Status: influxdb.Active, - } - - nr, err := nrService.UpdateNotificationRule(context.Background(), 1, nrc, 2) - if err != nil { - t.Fatal(err) - } - - select { - case task := <-ch: - if task.ID != nr.GetTaskID() { - t.Fatalf("task sent to coordinator doesn't match expected") - } - default: - t.Fatal("didn't receive task") - } -} - -func TestNotificationRulePatch(t *testing.T) { - mocks, nrService := newNotificationRuleSvcStack() - ch := mocks.pipingCoordinator.taskUpdatedChan() - - deadman := &rule.HTTP{} - deadman.SetTaskID(4) - - mocks.notificationSvc.PatchNotificationRuleF = func(context.Context, platform.ID, influxdb.NotificationRuleUpdate) (influxdb.NotificationRule, error) { - return deadman, nil - } - - nr, err := nrService.PatchNotificationRule(context.Background(), 1, influxdb.NotificationRuleUpdate{}) - if err != nil { - t.Fatal(err) - } - - select { - case task := <-ch: - if task.ID != nr.GetTaskID() { - t.Fatalf("task sent to coordinator doesn't match expected") - } - default: - t.Fatal("didn't receive task") - } -} - -func TestNotificationRuleDelete(t *testing.T) { - mocks, nrService := newNotificationRuleSvcStack() - ch := mocks.pipingCoordinator.taskDeletedChan() - - mocks.notificationSvc.FindNotificationRuleByIDF = func(_ context.Context, id platform.ID) (influxdb.NotificationRule, error) { - c := &rule.HTTP{} - c.SetID(id) - c.SetTaskID(21) - return c, nil - } - - err := nrService.DeleteNotificationRule(context.Background(), 1) - if err != nil { - t.Fatal(err) - } - - select { - case id := <-ch: - if id != platform.ID(21) { - t.Fatalf("task sent to coordinator doesn't match expected") - } - default: - t.Fatal("didn't receive task") - } -} diff --git a/task/backend/middleware/options.go b/task/backend/middleware/options.go deleted file mode 100644 index 50db20d3390..00000000000 --- a/task/backend/middleware/options.go +++ /dev/null @@ -1,13 +0,0 @@ -package middleware - -import "time" - -// Option is a functional option for the coordinating task service -type Option func(*CoordinatingTaskService) - -// WithNowFunc sets the now func used to derive time -func WithNowFunc(fn func() time.Time) Option { - return func(c *CoordinatingTaskService) { - c.now = fn - } -} diff --git a/task/backend/read_table_test.go b/task/backend/read_table_test.go deleted file mode 100644 index 4361f956d04..00000000000 --- a/task/backend/read_table_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package backend - -import ( - "bytes" - "io" - "testing" - - "github.com/influxdata/flux/csv" - "go.uber.org/zap/zaptest" -) - -func TestReadTable(t *testing.T) { - encoded := []byte(`group,false,false,true,true,false,true,false,false,false,false,false,false -#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string,string,string,string -#default,_result,,,,,,,,,,, -,result,table,_start,_stop,_time,taskID,finishedAt,logs,runID,scheduledFor,startedAt,status -,,0,2019-07-23T20:06:24.369913228Z,2019-07-23T20:11:24.369913228Z,2019-07-23T20:06:30.232988837Z,0432e57782b51000,2019-07-23T20:06:30.300005674Z,"[{""runID"":""04341baa937a1000"",""time"":""2019-07-23T20:06:30.223615661Z"",""message"":""Started task from script: \""option v = {timeRangeStart: -1h, timeRangeStop: now(), windowPeriod: 15000ms}\\noption task = {name: \\\""howdy\\\"", every: 10s}\\n\\nfrom(bucket: \\\""goller+acc1's Bucket\\\"")\\n\\t|\u003e range(start: v.timeRangeStart, stop: v.timeRangeStop)\\n\\t|\u003e filter(fn: (r) =\u003e\\n\\t\\t(r._measurement == \\\""disk\\\""))\\n\\t|\u003e filter(fn: (r) =\u003e\\n\\t\\t(r._field == \\\""free\\\""))\\n\\t|\u003e filter(fn: (r) =\u003e\\n\\t\\t(r.device == \\\""disk1s1\\\""))\\n\\t|\u003e aggregateWindow(every: v.windowPeriod, fn: mean)\\n\\t|\u003e yield(name: \\\""mean\\\"")\\n\\t|\u003e to(bucket: \\\""goller+acc1's Bucket\\\"", org: \\\""goller+acc1@gmail.com\\\"")\""""},{""runID"":""04341baa937a1000"",""time"":""2019-07-23T20:06:30.291420936Z"",""message"":""Run failed to execute: panic: column _value:float is not of type int""},{""runID"":""04341baa937a1000"",""time"":""2019-07-23T20:06:30.295269151Z"",""message"":""Failed""}]",04341baa937a1000,2019-07-23T20:06:30Z,2019-07-23T20:06:30.232988837Z,failed -,,0,2019-07-23T20:06:24.369913228Z,2019-07-23T20:11:24.369913228Z,2019-07-23T20:06:40.215226536Z,0432e57782b51000,2019-07-23T20:06:40.284116882Z,"[{""runID"":""04341bb4543a1000"",""time"":""2019-07-23T20:06:40.210364486Z"",""message"":""Started task from script: \""option v = {timeRangeStart: -1h, timeRangeStop: now(), windowPeriod: 15000ms}\\noption task = {name: \\\""howdy\\\"", every: 10s}\\n\\nfrom(bucket: \\\""goller+acc1's Bucket\\\"")\\n\\t|\u003e range(start: v.timeRangeStart, stop: v.timeRangeStop)\\n\\t|\u003e filter(fn: (r) =\u003e\\n\\t\\t(r._measurement == \\\""disk\\\""))\\n\\t|\u003e filter(fn: (r) =\u003e\\n\\t\\t(r._field == \\\""free\\\""))\\n\\t|\u003e filter(fn: (r) =\u003e\\n\\t\\t(r.device == \\\""disk1s1\\\""))\\n\\t|\u003e aggregateWindow(every: v.windowPeriod, fn: mean)\\n\\t|\u003e yield(name: \\\""mean\\\"")\\n\\t|\u003e to(bucket: \\\""goller+acc1's Bucket\\\"", org: \\\""goller+acc1@gmail.com\\\"")\""""},{""runID"":""04341bb4543a1000"",""time"":""2019-07-23T20:06:40.277078243Z"",""message"":""Run failed to execute: panic: column _value:float is not of type int""},{""runID"":""04341bb4543a1000"",""time"":""2019-07-23T20:06:40.280158006Z"",""message"":""Failed""}]",04341bb4543a1000,2019-07-23T20:06:40Z,2019-07-23T20:06:40.215226536Z,failed`) - - decoder := csv.NewMultiResultDecoder(csv.ResultDecoderConfig{}) - itr, err := decoder.Decode(io.NopCloser(bytes.NewReader(encoded))) - if err != nil { - t.Fatalf("got error decoding csv: %v", err) - } - - defer itr.Release() - re := &runReader{log: zaptest.NewLogger(t)} - - for itr.More() { - err := itr.Next().Tables().Do(re.readTable) - if err != nil { - t.Fatalf("received error in runs table: %v", err) - } - } - - if itr.Err() != nil { - t.Fatalf("got error from iterator %v", itr.Err()) - } -} diff --git a/task/backend/run_recorder.go b/task/backend/run_recorder.go deleted file mode 100644 index f1f31ca2c9c..00000000000 --- a/task/backend/run_recorder.go +++ /dev/null @@ -1,75 +0,0 @@ -package backend - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/storage" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "go.uber.org/zap" -) - -// StoragePointsWriterRecorder is an implementation of RunRecorder which -// writes runs via an implementation of storage PointsWriter -type StoragePointsWriterRecorder struct { - pw storage.PointsWriter - - log *zap.Logger -} - -// NewStoragePointsWriterRecorder configures and returns a new *StoragePointsWriterRecorder -func NewStoragePointsWriterRecorder(log *zap.Logger, pw storage.PointsWriter) *StoragePointsWriterRecorder { - return &StoragePointsWriterRecorder{pw, log} -} - -// Record formats the provided run as a models.Point and writes the resulting -// point to an underlying storage.PointsWriter -func (s *StoragePointsWriterRecorder) Record(ctx context.Context, bucketID platform.ID, bucket string, task *taskmodel.Task, run *taskmodel.Run) error { - tags := models.NewTags(map[string]string{ - statusTag: run.Status, - taskIDTag: run.TaskID.String(), - }) - - // log an error if we have incomplete data on finish - if !run.ID.Valid() || - run.ScheduledFor.IsZero() || - run.StartedAt.IsZero() || - run.FinishedAt.IsZero() || - run.Status == "" { - s.log.Error("Run missing critical fields", zap.String("run", fmt.Sprintf("%+v", run)), zap.String("runID", run.ID.String())) - } - - fields := map[string]interface{}{} - fields[runIDField] = run.ID.String() - fields[nameField] = task.Name - fields[startedAtField] = run.StartedAt.Format(time.RFC3339Nano) - fields[finishedAtField] = run.FinishedAt.Format(time.RFC3339Nano) - fields[scheduledForField] = run.ScheduledFor.Format(time.RFC3339) - fields[requestedAtField] = run.RequestedAt.Format(time.RFC3339) - fields[fluxField] = run.Flux - fields[traceIDField] = run.TraceID - fields[traceSampledTag] = run.IsSampled - - startedAt := run.StartedAt - if startedAt.IsZero() { - startedAt = time.Now().UTC() - } - - logBytes, err := json.Marshal(run.Log) - if err != nil { - return err - } - fields[logField] = string(logBytes) - - point, err := models.NewPoint("runs", tags, fields, startedAt) - if err != nil { - return err - } - - // TODO - fix - return s.pw.WritePoints(ctx, task.OrganizationID, bucketID, models.Points{point}) -} diff --git a/task/backend/schedulable_task_service.go b/task/backend/schedulable_task_service.go deleted file mode 100644 index 81d496de302..00000000000 --- a/task/backend/schedulable_task_service.go +++ /dev/null @@ -1,40 +0,0 @@ -package backend - -import ( - "context" - "fmt" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/task/backend/scheduler" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -var _ scheduler.SchedulableService = (*SchedulableTaskService)(nil) - -// UpdateTaskService provides an API to update the LatestScheduled time of a task -type UpdateTaskService interface { - UpdateTask(ctx context.Context, id platform.ID, upd taskmodel.TaskUpdate) (*taskmodel.Task, error) -} - -// SchedulableTaskService implements the SchedulableService interface -type SchedulableTaskService struct { - UpdateTaskService -} - -// NewSchedulableTaskService initializes a new SchedulableTaskService given an UpdateTaskService -func NewSchedulableTaskService(ts UpdateTaskService) SchedulableTaskService { - return SchedulableTaskService{ts} -} - -// UpdateLastScheduled uses the task service to store the latest time a task was scheduled to run -func (s SchedulableTaskService) UpdateLastScheduled(ctx context.Context, id scheduler.ID, t time.Time) error { - _, err := s.UpdateTask(ctx, platform.ID(id), taskmodel.TaskUpdate{ - LatestScheduled: &t, - }) - - if err != nil { - return fmt.Errorf("could not update last scheduled for task; Err: %v", err) - } - return nil -} diff --git a/task/backend/schedulable_task_service_test.go b/task/backend/schedulable_task_service_test.go deleted file mode 100644 index 3120c08d678..00000000000 --- a/task/backend/schedulable_task_service_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package backend - -import ( - "context" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/task/backend/scheduler" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -var ( - mockTaskID = platform.ID(1) - mockTimeNow = time.Now() -) - -func (m MockTaskService) UpdateTask(_ context.Context, id platform.ID, _ taskmodel.TaskUpdate) (*taskmodel.Task, error) { - return &taskmodel.Task{ID: id, UpdatedAt: mockTimeNow}, nil -} - -type MockTaskService struct{} - -func Test_Schedulable_Task_Service(t *testing.T) { - - for _, test := range []struct { - name string - task *taskmodel.Task - }{ - { - name: "Create New Schedulable Task Service", - task: taskOne, - }, - } { - t.Run(test.name, func(t *testing.T) { - ts := MockTaskService{} - - schedulableService := NewSchedulableTaskService(ts) - - err := schedulableService.UpdateLastScheduled(context.Background(), scheduler.ID(mockTaskID), mockTimeNow) - if err != nil { - t.Fatalf("expected nil error, got: %v", err) - } - }) - } -} diff --git a/task/backend/scheduler/noop_scheduler.go b/task/backend/scheduler/noop_scheduler.go deleted file mode 100644 index b83b1bb2f5d..00000000000 --- a/task/backend/scheduler/noop_scheduler.go +++ /dev/null @@ -1,18 +0,0 @@ -package scheduler - -// NoopScheduler is a no-op scheduler. It is used when we don't want the -// standard scheduler to run (e.g. when "--no-tasks" flag is present). -type NoopScheduler struct{} - -// Schedule is a mocked Scheduler.Schedule method. -func (n *NoopScheduler) Schedule(task Schedulable) error { - return nil -} - -// Release is a mocked Scheduler.Release method. -func (n *NoopScheduler) Release(taskID ID) error { - return nil -} - -// Stop is a mocked stop method. -func (n *NoopScheduler) Stop() {} diff --git a/task/backend/scheduler/scheduler.go b/task/backend/scheduler/scheduler.go deleted file mode 100644 index 801070472c5..00000000000 --- a/task/backend/scheduler/scheduler.go +++ /dev/null @@ -1,128 +0,0 @@ -package scheduler - -import ( - "context" - "strings" - "time" - - "github.com/influxdata/cron" - "github.com/influxdata/influxdb/v2/task/options" -) - -// ID duplicates the influxdb ID so users of the scheduler don't have to -// import influxdb for the ID. -type ID uint64 - -// Executor is a system used by the scheduler to actually execute the scheduleable item. -type Executor interface { - // Execute is used to execute run's for any schedulable object. - // the executor can go through manual runs, clean currently running, and then create a new run based on `now`. - // if Now is zero we can just do the first 2 steps (This is how we would trigger manual runs). - // Errors returned from the execute request imply that this attempt has failed and - // should be put back in scheduler and re executed at a alter time. We will add scheduler specific errors - // so the time can be configurable. - Execute(ctx context.Context, id ID, scheduledFor time.Time, runAt time.Time) error -} - -// Schedulable is the interface that encapsulates work that -// is to be executed on a specified schedule. -type Schedulable interface { - // ID is the unique identifier for this Schedulable - ID() ID - - // Schedule defines the frequency for which this Schedulable should be - // queued for execution. - Schedule() Schedule - - // Offset defines a negative or positive duration that should be added - // to the scheduled time, resulting in the instance running earlier or later - // than the scheduled time. - Offset() time.Duration - - // LastScheduled specifies last time this Schedulable was queued - // for execution. - LastScheduled() time.Time -} - -// SchedulableService encapsulates the work necessary to schedule a job -type SchedulableService interface { - - // UpdateLastScheduled notifies the instance that it was scheduled for - // execution at the specified time - UpdateLastScheduled(ctx context.Context, id ID, t time.Time) error -} - -func NewSchedule(unparsed string, lastScheduledAt time.Time) (Schedule, time.Time, error) { - lastScheduledAt = lastScheduledAt.UTC().Truncate(time.Second) - c, err := cron.ParseUTC(unparsed) - if err != nil { - return Schedule{}, lastScheduledAt, err - } - - unparsed = strings.TrimSpace(unparsed) - - // Align create to the hour/minute - if strings.HasPrefix(unparsed, "@every ") { - everyString := strings.TrimSpace(strings.TrimPrefix(unparsed, "@every ")) - every := options.Duration{} - err := every.Parse(everyString) - if err != nil { - // We cannot align a invalid time - return Schedule{c}, lastScheduledAt, nil - } - - // drop nanoseconds - lastScheduledAt = time.Unix(lastScheduledAt.UTC().Unix(), 0).UTC() - everyDur, err := every.DurationFrom(lastScheduledAt) - if err != nil { - return Schedule{c}, lastScheduledAt, nil - } - - // and align - lastScheduledAt = lastScheduledAt.Truncate(everyDur).Truncate(time.Second) - } - - return Schedule{c}, lastScheduledAt, err -} - -// Schedule is an object a valid schedule of runs -type Schedule struct { - cron cron.Parsed -} - -// Next returns the next time after from that a schedule should trigger on. -func (s Schedule) Next(from time.Time) (time.Time, error) { - return cron.Parsed(s.cron).Next(from) -} - -// ValidSchedule returns an error if the cron string is invalid. -func ValidateSchedule(c string) error { - _, err := cron.ParseUTC(c) - return err -} - -// Scheduler is a example interface of a Scheduler. -// // todo(lh): remove this once we start building the actual scheduler -type Scheduler interface { - - // Schedule adds the specified task to the scheduler. - Schedule(task Schedulable) error - - // Release removes the specified task from the scheduler. - Release(taskID ID) error -} - -type ErrUnrecoverable struct { - error -} - -func (e *ErrUnrecoverable) Error() string { - if e.error != nil { - return "error unrecoverable error on task run " + e.error.Error() - } - return "error unrecoverable error on task run" -} - -func (e *ErrUnrecoverable) Unwrap() error { - return e.error -} diff --git a/task/backend/scheduler/scheduler_metrics.go b/task/backend/scheduler/scheduler_metrics.go deleted file mode 100644 index 52eff0511c3..00000000000 --- a/task/backend/scheduler/scheduler_metrics.go +++ /dev/null @@ -1,141 +0,0 @@ -package scheduler - -import ( - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -type SchedulerMetrics struct { - totalExecuteCalls prometheus.Counter - totalExecuteFailure prometheus.Counter - scheduleCalls prometheus.Counter - scheduleFails prometheus.Counter - releaseCalls prometheus.Counter - - executingTasks *executingTasks - scheduleDelay prometheus.Summary - executeDelta prometheus.Summary -} - -type executingTasks struct { - desc *prometheus.Desc - ts *TreeScheduler -} - -func NewSchedulerMetrics(te *TreeScheduler) *SchedulerMetrics { - const namespace = "task" - const subsystem = "scheduler" - - return &SchedulerMetrics{ - totalExecuteCalls: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "total_execution_calls", - Help: "Total number of executions across all tasks.", - }), - scheduleCalls: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "total_schedule_calls", - Help: "Total number of schedule requests.", - }), - scheduleFails: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "total_schedule_fails", - Help: "Total number of schedule requests that fail to schedule.", - }), - - totalExecuteFailure: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "total_execute_failure", - Help: "Total number of times an execution has failed.", - }), - - releaseCalls: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "total_release_calls", - Help: "Total number of release requests.", - }), - executingTasks: newExecutingTasks(te), - scheduleDelay: prometheus.NewSummary(prometheus.SummaryOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "schedule_delay", - Help: "The duration between when a Item should be scheduled and when it is told to execute.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }), - - executeDelta: prometheus.NewSummary(prometheus.SummaryOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "execute_delta", - Help: "The duration in seconds between a run starting and finishing.", - Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, - }), - } -} - -// PrometheusCollectors satisfies the prom.PrometheusCollector interface. -func (em *SchedulerMetrics) PrometheusCollectors() []prometheus.Collector { - return []prometheus.Collector{ - em.totalExecuteCalls, - em.totalExecuteFailure, - em.scheduleCalls, - em.scheduleFails, - em.releaseCalls, - em.executingTasks, - em.scheduleDelay, - em.executeDelta, - } -} - -func (em *SchedulerMetrics) schedule(taskID ID) { - em.scheduleCalls.Inc() -} - -func (em *SchedulerMetrics) scheduleFail(taskID ID) { - em.scheduleFails.Inc() -} - -func (em *SchedulerMetrics) release(taskID ID) { - em.releaseCalls.Inc() -} - -func (em *SchedulerMetrics) reportScheduleDelay(d time.Duration) { - em.scheduleDelay.Observe(d.Seconds()) -} - -func (em *SchedulerMetrics) reportExecution(err error, d time.Duration) { - em.totalExecuteCalls.Inc() - em.executeDelta.Observe(d.Seconds()) - if err != nil { - em.totalExecuteFailure.Inc() - } -} - -func newExecutingTasks(ts *TreeScheduler) *executingTasks { - return &executingTasks{ - desc: prometheus.NewDesc( - "task_scheduler_current_execution", - "Number of tasks currently being executed", - nil, - prometheus.Labels{}, - ), - ts: ts, - } -} - -// Describe returns all descriptions associated with the run collector. -func (r *executingTasks) Describe(ch chan<- *prometheus.Desc) { - ch <- r.desc -} - -// Collect returns the current state of all metrics of the run collector. -func (r *executingTasks) Collect(ch chan<- prometheus.Metric) { - // TODO(docmerlin): fix this metric - ch <- prometheus.MustNewConstMetric(r.desc, prometheus.GaugeValue, float64(len(r.ts.workchans))) -} diff --git a/task/backend/scheduler/scheduler_test.go b/task/backend/scheduler/scheduler_test.go deleted file mode 100644 index 88161c53adf..00000000000 --- a/task/backend/scheduler/scheduler_test.go +++ /dev/null @@ -1,643 +0,0 @@ -package scheduler - -import ( - "context" - "reflect" - "sync" - "testing" - "time" - - "github.com/benbjohnson/clock" - "github.com/influxdata/cron" -) - -type mockExecutor struct { - sync.Mutex - fn func(l *sync.Mutex, ctx context.Context, id ID, scheduledFor time.Time) - Err error -} - -type mockSchedulable struct { - id ID - schedule Schedule - offset time.Duration - lastScheduled time.Time -} - -func (s mockSchedulable) ID() ID { - return s.id -} - -func (s mockSchedulable) Schedule() Schedule { - return s.schedule -} -func (s mockSchedulable) Offset() time.Duration { - return s.offset -} -func (s mockSchedulable) LastScheduled() time.Time { - return s.lastScheduled -} - -func (e *mockExecutor) Execute(ctx context.Context, id ID, scheduledFor time.Time, runAt time.Time) error { - done := make(chan struct{}, 1) - select { - case <-ctx.Done(): - default: - e.fn(&sync.Mutex{}, ctx, id, scheduledFor) - done <- struct{}{} - } - return nil -} - -type mockSchedulableService struct { - fn func(ctx context.Context, id ID, t time.Time) error -} - -func (m *mockSchedulableService) UpdateLastScheduled(ctx context.Context, id ID, t time.Time) error { - - return nil -} - -func TestSchedule_Next(t *testing.T) { - t.Run("@every fires on appropriate boundaries", func(t *testing.T) { - // For these tests, the "timeElapsed" is the amount of time that is - // simulated to pass for the purposes of verifying that the task fires the - // correct amount of times. It is multiplied by a factor within the tests to - // simulated firing multiple times. - tests := []struct { - name string // also used as the cron time string - timeElapsed time.Duration - }{ - { - name: "@every 1m", - timeElapsed: 1 * time.Minute, - }, - { - name: "@every 1h", - timeElapsed: 1 * time.Hour, - }, - { - name: "@every 1w", // regression test for https://github.com/influxdata/influxdb/issues/21842 - timeElapsed: 7 * 24 * time.Hour, // 1 week - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := make(chan time.Time, 100) - exe := &mockExecutor{fn: func(l *sync.Mutex, ctx context.Context, id ID, scheduledAt time.Time) { - select { - case <-ctx.Done(): - t.Log("ctx done") - case c <- scheduledAt: - } - }} - mockTime := clock.NewMock() - // need to run on a time window which does not include daylight savings - // time for testing on systems which do not default to UTC. - testTime, err := time.Parse(time.RFC3339, "2020-04-01T00:00:00Z") - if err != nil { - t.Fatal(err) - } - mockTime.Set(testTime) - sch, _, err := NewScheduler( - exe, - &mockSchedulableService{fn: func(ctx context.Context, id ID, t time.Time) error { - return nil - }}, - WithTime(mockTime), - WithMaxConcurrentWorkers(20)) - if err != nil { - t.Fatal(err) - } - defer sch.Stop() - schedule, ts, err := NewSchedule(tt.name, mockTime.Now().UTC()) - if err != nil { - t.Fatal(err) - } - - err = sch.Schedule(mockSchedulable{id: 1, schedule: schedule, offset: time.Second, lastScheduled: ts}) - if err != nil { - t.Fatal(err) - } - go func() { - sch.mu.Lock() - mockTime.Set(mockTime.Now().UTC().Add(17 * tt.timeElapsed)) - sch.mu.Unlock() - }() - - after := time.After(6 * time.Second) - oldCheckC := ts - for i := 0; i < 16; i++ { - select { - case checkC := <-c: - if checkC.Sub(oldCheckC) != tt.timeElapsed { - t.Fatalf("task didn't fire on correct interval fired on %s interval", checkC.Sub(oldCheckC)) - } - if !checkC.Truncate(tt.timeElapsed).Equal(checkC) { - t.Fatalf("task didn't fire at the correct time boundary") - } - oldCheckC = checkC - case <-after: - t.Fatalf("test timed out, only fired %d times but should have fired 16 times", i) - } - } - }) - } - }) - t.Run("fires properly with non-mocked time", func(t *testing.T) { - now := time.Now() - c := make(chan time.Time, 100) - exe := &mockExecutor{fn: func(l *sync.Mutex, ctx context.Context, id ID, scheduledFor time.Time) { - select { - case <-ctx.Done(): - t.Log("ctx done") - case c <- scheduledFor: - default: - t.Errorf("called the executor too many times") - } - }} - sch, _, err := NewScheduler( - exe, - &mockSchedulableService{fn: func(ctx context.Context, id ID, t time.Time) error { - return nil - }}, - WithMaxConcurrentWorkers(2)) - if err != nil { - t.Fatal(err) - } - defer sch.Stop() - schedule, _, err := NewSchedule("* * * * * * *", time.Time{}) - if err != nil { - t.Fatal(err) - } - - err = sch.Schedule(mockSchedulable{id: 1, schedule: schedule, offset: time.Second, lastScheduled: now.Add(-20 * time.Second)}) - if err != nil { - t.Fatal(err) - } - - select { - case <-c: - case <-time.After(10 * time.Second): - t.Fatal("test timed out") - } - }) - t.Run("doesn't fire when the task isn't ready", func(t *testing.T) { - mockTime := clock.NewMock() - mockTime.Set(time.Now()) - c := make(chan time.Time, 100) - exe := &mockExecutor{fn: func(l *sync.Mutex, ctx context.Context, id ID, scheduledFor time.Time) { - select { - case <-ctx.Done(): - t.Log("ctx done") - case c <- scheduledFor: - default: - t.Errorf("called the executor too many times") - } - }} - sch, _, err := NewScheduler( - exe, - &mockSchedulableService{fn: func(ctx context.Context, id ID, t time.Time) error { - return nil - }}, - WithTime(mockTime), - WithMaxConcurrentWorkers(2)) - if err != nil { - t.Fatal(err) - } - defer sch.Stop() - schedule, _, err := NewSchedule("* * * * * * *", time.Time{}) - if err != nil { - t.Fatal(err) - } - - err = sch.Schedule(mockSchedulable{id: 1, schedule: schedule, offset: time.Second, lastScheduled: mockTime.Now().UTC().Add(time.Second)}) - if err != nil { - t.Fatal(err) - } - go func() { - sch.mu.Lock() - mockTime.Set(mockTime.Now().Add(2 * time.Second)) - sch.mu.Unlock() - }() - - select { - case <-c: - t.Fatal("test timed out") - case <-time.After(2 * time.Second): - } - - }) - - t.Run("fires the correct number of times for the interval with a single schedulable", func(t *testing.T) { - c := make(chan time.Time, 100) - exe := &mockExecutor{fn: func(l *sync.Mutex, ctx context.Context, id ID, scheduledFor time.Time) { - select { - case <-ctx.Done(): - t.Log("ctx done") - case c <- scheduledFor: - } - }} - mockTime := clock.NewMock() - mockTime.Set(time.Now()) - sch, _, err := NewScheduler( - exe, - &mockSchedulableService{fn: func(ctx context.Context, id ID, t time.Time) error { - return nil - }}, - WithTime(mockTime), - WithMaxConcurrentWorkers(20)) - if err != nil { - t.Fatal(err) - } - defer sch.Stop() - schedule, _, err := NewSchedule("* * * * * * *", time.Time{}) - if err != nil { - t.Fatal(err) - } - - err = sch.Schedule(mockSchedulable{id: 1, schedule: schedule, offset: time.Second, lastScheduled: mockTime.Now().UTC()}) - if err != nil { - t.Fatal(err) - } - go func() { - sch.mu.Lock() - mockTime.Set(mockTime.Now().UTC().Add(17 * time.Second)) - sch.mu.Unlock() - }() - - after := time.After(6 * time.Second) - for i := 0; i < 16; i++ { - select { - case <-c: - case <-after: - t.Fatalf("test timed out, only fired %d times but should have fired 16 times", i) - } - } - go func() { - sch.mu.Lock() - mockTime.Set(mockTime.Now().UTC().Add(2 * time.Second)) - sch.mu.Unlock() - }() - - after = time.After(6 * time.Second) - - for i := 0; i < 2; i++ { - select { - case <-c: - case <-after: - t.Fatalf("test timed out, only fired %d times but should have fired 2 times", i) - } - } - - select { - case <-c: - t.Fatalf("test scheduler fired too many times") - case <-time.After(2 * time.Second): - } - }) - - t.Run("fires the correct number of times for the interval with multiple schedulables", func(t *testing.T) { - now := time.Date(2016, 0, 0, 0, 1, 1, 0, time.UTC) - c := make(chan struct { - ts time.Time - id ID - }, 100) - exe := &mockExecutor{fn: func(l *sync.Mutex, ctx context.Context, id ID, scheduledFor time.Time) { - select { - case <-ctx.Done(): - t.Log("ctx done") - case c <- struct { - ts time.Time - id ID - }{ - ts: scheduledFor, - id: id, - }: - } - }} - mockTime := clock.NewMock() - mockTime.Set(now) - sch, _, err := NewScheduler( - exe, - &mockSchedulableService{fn: func(ctx context.Context, id ID, t time.Time) error { - return nil - }}, - WithTime(mockTime), - WithMaxConcurrentWorkers(20)) - if err != nil { - t.Fatal(err) - } - defer sch.Stop() - schedule, _, err := NewSchedule("* * * * * * *", time.Time{}) - if err != nil { - t.Fatal(err) - } - - err = sch.Schedule(mockSchedulable{id: 1, schedule: schedule, offset: time.Second, lastScheduled: now}) - if err != nil { - t.Fatal(err) - } - - schedule2, _, err := NewSchedule("*/2 * * * * * *", time.Time{}) - if err != nil { - t.Fatal(err) - } - - err = sch.Schedule(mockSchedulable{id: 2, schedule: schedule2, offset: time.Second, lastScheduled: now}) - if err != nil { - t.Fatal(err) - } - go func() { - sch.mu.Lock() - mockTime.Set(mockTime.Now().Add(17 * time.Second)) - sch.mu.Unlock() - }() - - after := time.After(6 * time.Second) - for i := 0; i < 24; i++ { - select { - case <-c: - case <-after: - t.Fatalf("test timed out, only fired %d times but should have fired 24 times", i) - } - } - - go func() { - sch.mu.Lock() - mockTime.Set(mockTime.Now().Add(2 * time.Second)) - sch.mu.Unlock() - }() - - after = time.After(6 * time.Second) - - for i := 0; i < 3; i++ { - select { - case <-c: - case <-after: - t.Fatalf("test timed out, only fired %d times but should have fired 3 times", i) - } - } - - select { - case <-c: - t.Fatalf("test scheduler fired too many times") - case <-time.After(2 * time.Second): - } - }) -} - -func TestTreeScheduler_Stop(t *testing.T) { - now := time.Now().Add(-20 * time.Second) - mockTime := clock.NewMock() - mockTime.Set(now) - exe := &mockExecutor{fn: func(l *sync.Mutex, ctx context.Context, id ID, scheduledFor time.Time) {}} - sch, _, err := NewScheduler(exe, &mockSchedulableService{fn: func(ctx context.Context, id ID, t time.Time) error { - return nil - }}, - WithTime(mockTime)) - if err != nil { - t.Fatal(err) - } - sch.Stop() -} - -func TestSchedule_panic(t *testing.T) { - // panics in the executor should be treated as errors - now := time.Now().UTC() - c := make(chan struct { - ts time.Time - err error - }, 1) - - exe := &mockExecutor{fn: func(l *sync.Mutex, ctx context.Context, id ID, scheduledFor time.Time) { - panic("yikes oh no!") - }} - - sch, _, err := NewScheduler( - exe, - &mockSchedulableService{fn: func(ctx context.Context, id ID, t time.Time) error { - return nil - }}, - WithMaxConcurrentWorkers(1), // to make debugging easier - WithOnErrorFn(func(_ context.Context, _ ID, ts time.Time, err error) { - c <- struct { - ts time.Time - err error - }{ - ts: ts, - err: err, - } - })) - if err != nil { - t.Fatal(err) - } - - schedule, _, err := NewSchedule("* * * * * * *", time.Time{}) - if err != nil { - t.Fatal(err) - } - - err = sch.Schedule(mockSchedulable{id: 1, schedule: schedule, offset: time.Second, lastScheduled: now.Add(-20 * time.Second)}) - if err != nil { - t.Fatal(err) - } - - select { - case <-c: // panic was caught and error handler used - case <-time.After(10 * time.Second): - t.Fatal("test timed out", now.UTC().Unix()) - } -} - -func TestTreeScheduler_LongPanicTest(t *testing.T) { - // This test is to catch one specifgic type of race condition that can occur and isn't caught by race test, but causes a panic - // in the google btree library - now := time.Date(2096, time.December, 30, 0, 0, 0, 0, time.UTC) - - mockTime := clock.NewMock() - mockTime.Set(now) - - exe := &mockExecutor{fn: func(l *sync.Mutex, ctx context.Context, id ID, scheduledFor time.Time) { - select { - case <-ctx.Done(): - t.Log("ctx done") - default: - } - }} - - sch, _, err := NewScheduler( - exe, - &mockSchedulableService{fn: func(ctx context.Context, id ID, t time.Time) error { - return nil - }}, - WithTime(mockTime), - WithMaxConcurrentWorkers(20)) - if err != nil { - t.Fatal(err) - } - defer sch.Stop() - - // this tests for a race condition in the btree that isn't normally caught by the race detector - schedule, _, err := NewSchedule("* * * * * * *", now.Add(-1*time.Second)) - if err != nil { - t.Fatal(err) - } - badSchedule, ts, err := NewSchedule("0 0 1 12 *", now.Add(-1*time.Second)) - if err != nil { - t.Fatal(err) - } - - for i := ID(1); i <= 2000; i++ { // since a valid ID probably shouldn't be zero - if i%100 == 0 { - err = sch.Schedule(mockSchedulable{id: i, schedule: badSchedule, offset: 0, lastScheduled: ts}) - if err != nil { - t.Fatal(err) - } - } else { - err = sch.Schedule(mockSchedulable{id: i, schedule: schedule, offset: 0, lastScheduled: ts}) - if err != nil { - t.Fatal(err) - } - } - } - time.Sleep(2 * time.Second) - sch.mu.Lock() - mockTime.Set(mockTime.Now().UTC().Add(99 * time.Second)) - sch.mu.Unlock() - time.Sleep(5 * time.Second) - -} - -func TestTreeScheduler_Release(t *testing.T) { - c := make(chan time.Time, 100) - exe := &mockExecutor{fn: func(l *sync.Mutex, ctx context.Context, id ID, scheduledFor time.Time) { - select { - case <-ctx.Done(): - t.Log("ctx done") - case c <- scheduledFor: - } - }} - mockTime := clock.NewMock() - mockTime.Set(time.Now()) - sch, _, err := NewScheduler( - exe, - &mockSchedulableService{fn: func(ctx context.Context, id ID, t time.Time) error { - return nil - }}, - WithTime(mockTime), - WithMaxConcurrentWorkers(20)) - if err != nil { - t.Fatal(err) - } - defer sch.Stop() - schedule, ts, err := NewSchedule("* * * * * * *", mockTime.Now().UTC()) - if err != nil { - t.Fatal(err) - } - - err = sch.Schedule(mockSchedulable{id: 1, schedule: schedule, offset: time.Second, lastScheduled: ts.UTC()}) - if err != nil { - t.Fatal(err) - } - go func() { - sch.mu.Lock() - mockTime.Set(mockTime.Now().UTC().Add(2 * time.Second)) - sch.mu.Unlock() - }() - - select { - case <-c: - case <-time.After(6 * time.Second): - t.Fatalf("test timed out, it should have fired but didn't") - } - if err := sch.Release(1); err != nil { - t.Error(err) - } - - go func() { - sch.mu.Lock() - mockTime.Set(mockTime.Now().UTC().Add(6 * time.Second)) - sch.mu.Unlock() - }() - - select { - case <-c: - t.Fatal("expected test not to fire here, because task was released, but it did anyway") - case <-time.After(2 * time.Second): - } -} - -func mustCron(s string) Schedule { - cr, err := cron.ParseUTC(s) - if err != nil { - panic(err) - } - return Schedule{cron: cr} -} - -func TestNewSchedule(t *testing.T) { - tests := []struct { - name string - unparsed string - lastScheduledAt time.Time - want Schedule - want1 time.Time - wantErr bool - }{ - { - name: "bad cron", - unparsed: "this is not a cron string", - lastScheduledAt: time.Now(), - wantErr: true, - }, - { - name: "align to minute", - unparsed: "@every 1m", - lastScheduledAt: time.Date(2016, 01, 01, 01, 10, 23, 1234567, time.UTC), - want: mustCron("@every 1m"), - want1: time.Date(2016, 01, 01, 01, 10, 0, 0, time.UTC), - }, - { - name: "align to minute with @every 7m", - unparsed: "@every 7m", - lastScheduledAt: time.Date(2016, 01, 01, 01, 10, 23, 1234567, time.UTC), - want: mustCron("@every 7m"), - want1: time.Date(2016, 01, 01, 01, 4, 0, 0, time.UTC), - }, - - { - name: "align to hour", - unparsed: "@every 1h", - lastScheduledAt: time.Date(2016, 01, 01, 01, 10, 23, 1234567, time.UTC), - want: mustCron("@every 1h"), - want1: time.Date(2016, 01, 01, 01, 0, 0, 0, time.UTC), - }, - { - name: "align to hour @every 3h", - unparsed: "@every 3h", - lastScheduledAt: time.Date(2016, 01, 01, 01, 10, 23, 1234567, time.UTC), - want: mustCron("@every 3h"), - want1: time.Date(2016, 01, 01, 00, 0, 0, 0, time.UTC), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, got1, err := NewSchedule(tt.unparsed, tt.lastScheduledAt) - if (err != nil) != tt.wantErr { - t.Errorf("NewSchedule() error = %v, wantErr %v", err, tt.wantErr) - return - } - if err != nil { - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("NewSchedule() got = %v, want %v", got, tt.want) - } - if !reflect.DeepEqual(got1, tt.want1) { - t.Errorf("NewSchedule() got1 = %v, want %v", got1, tt.want1) - } - }) - } -} diff --git a/task/backend/scheduler/treescheduler.go b/task/backend/scheduler/treescheduler.go deleted file mode 100644 index 31f02fde050..00000000000 --- a/task/backend/scheduler/treescheduler.go +++ /dev/null @@ -1,418 +0,0 @@ -package scheduler - -import ( - "context" - "encoding/binary" - "errors" - "sync" - "time" - - "github.com/benbjohnson/clock" - "github.com/cespare/xxhash" - "github.com/google/btree" -) - -const ( - // degreeBtreeScheduled is the btree degree for the btree internal to the tree scheduler. - // it is purely a performance tuning parameter, but required by github.com/google/btree - degreeBtreeScheduled = 3 // TODO(docmerlin): find the best number for this, its purely a perf optimization - - // defaultMaxWorkers is a constant that sets the default number of maximum workers for a TreeScheduler - defaultMaxWorkers = 128 -) - -// TreeScheduler is a Scheduler based on a btree. -// It calls Executor in-order per ID. That means you are guaranteed that for a specific ID, -// -// - The scheduler should, after creation, automatically call ExecutorFunc, when a task should run as defined by its Schedulable. -// -// - the scheduler's should not be able to get into a state where blocks Release and Schedule indefinitely. -// -// - Schedule should add a Schedulable to being scheduled, and Release should remove a task from being scheduled. -// -// - Calling of ExecutorFunc should be serial in time on a per taskID basis. I.E.: the run at 12:00 will go before the run at 12:01. -// -// Design: -// -// The core of the scheduler is a btree keyed by time, a nonce, and a task ID, and a map keyed by task ID and containing a -// nonce and a time (called a uniqueness index from now on). -// The map is to ensure task uniqueness in the tree, so we can replace or delete tasks in the tree. -// -// Scheduling in the tree consists of a main loop that feeds a fixed set of workers, each with their own communication channel. -// Distribution is handled by hashing the TaskID (to ensure uniform distribution) and then distributing over those channels -// evenly based on the hashed ID. This is to ensure that all tasks of the same ID go to the same worker. -// -// The workers call ExecutorFunc handle any errors and update the LastScheduled time internally and also via the Checkpointer. -// -// The main loop: -// -// The main loop waits on a time.Timer to grab the task with the minimum time. Once it successfully grabs a task ready -// to trigger, it will start walking the btree from the item nearest -// -// Putting a task into the scheduler: -// -// Adding a task to the scheduler acquires a write lock, grabs the task from the uniqueness map, and replaces the item -// in the uniqueness index and btree. If new task would trigger sooner than the current soonest triggering task, it -// replaces the Timer when added to the scheduler. Finally it releases the write lock. -// -// Removing a task from the scheduler: -// -// Removing a task from the scheduler acquires a write lock, deletes the task from the uniqueness index and from the -// btree, then releases the lock. We do not have to readjust the time on delete, because, if the minimum task isn't -// ready yet, the main loop just resets the timer and keeps going. -type TreeScheduler struct { - mu sync.RWMutex - priorityQueue *btree.BTree - nextTime map[ID]int64 // we need this index so we can delete items from the scheduled - when time.Time - executor Executor - onErr ErrorFunc - time clock.Clock - timer *clock.Timer - done chan struct{} - workchans []chan Item - wg sync.WaitGroup - checkpointer SchedulableService - items *itemList - - sm *SchedulerMetrics -} - -// ErrorFunc is a function for error handling. It is a good way to inject logging into a TreeScheduler. -type ErrorFunc func(ctx context.Context, taskID ID, scheduledFor time.Time, err error) - -type treeSchedulerOptFunc func(t *TreeScheduler) error - -// WithOnErrorFn is an option that sets the error function that gets called when there is an error in a TreeScheduler. -// its useful for injecting logging or special error handling. -func WithOnErrorFn(fn ErrorFunc) treeSchedulerOptFunc { - return func(t *TreeScheduler) error { - t.onErr = fn - return nil - } -} - -// WithMaxConcurrentWorkers is an option that sets the max number of concurrent workers that a TreeScheduler will use. -func WithMaxConcurrentWorkers(n int) treeSchedulerOptFunc { - return func(t *TreeScheduler) error { - t.workchans = make([]chan Item, n) - return nil - } -} - -// WithTime is an option for NewScheduler that allows you to inject a clock.Clock from ben johnson's github.com/benbjohnson/clock library, for testing purposes. -func WithTime(t clock.Clock) treeSchedulerOptFunc { - return func(sch *TreeScheduler) error { - sch.time = t - return nil - } -} - -// NewScheduler gives us a new TreeScheduler and SchedulerMetrics when given an Executor, a SchedulableService, and zero or more options. -// Schedulers should be initialized with this function. -func NewScheduler(executor Executor, checkpointer SchedulableService, opts ...treeSchedulerOptFunc) (*TreeScheduler, *SchedulerMetrics, error) { - s := &TreeScheduler{ - executor: executor, - priorityQueue: btree.New(degreeBtreeScheduled), - nextTime: map[ID]int64{}, - onErr: func(_ context.Context, _ ID, _ time.Time, _ error) {}, - time: clock.New(), - done: make(chan struct{}, 1), - checkpointer: checkpointer, - items: &itemList{}, - } - - // apply options - for i := range opts { - if err := opts[i](s); err != nil { - return nil, nil, err - } - } - if s.workchans == nil { - s.workchans = make([]chan Item, defaultMaxWorkers) - - } - - s.wg.Add(len(s.workchans)) - for i := 0; i < len(s.workchans); i++ { - s.workchans[i] = make(chan Item) - go s.work(context.Background(), s.workchans[i]) - } - - s.sm = NewSchedulerMetrics(s) - s.when = time.Time{} - s.timer = s.time.Timer(0) - s.timer.Stop() - // because a stopped timer will wait forever, this allows us to wait for items to be added before triggering. - - if executor == nil { - return nil, nil, errors.New("executor must be a non-nil function") - } - s.wg.Add(1) - go func() { - defer s.wg.Done() - schedulerLoop: - for { - select { - case <-s.done: - s.mu.Lock() - s.timer.Stop() - // close workchans - for i := range s.workchans { - close(s.workchans[i]) - } - s.mu.Unlock() - return - case <-s.timer.C: - for { // this for loop is a work around to the way clock's mock works when you reset duration 0 in a different thread than you are calling your clock.Set - s.mu.Lock() - min := s.priorityQueue.Min() - if min == nil { // grab a new item, because there could be a different item at the top of the queue - s.when = time.Time{} - s.mu.Unlock() - continue schedulerLoop - } - it := min.(Item) - if ts := s.time.Now().UTC(); it.When().After(ts) { - s.timer.Reset(ts.Sub(it.When())) - s.mu.Unlock() - continue schedulerLoop - } - s.process() - min = s.priorityQueue.Min() - if min == nil { // grab a new item, because there could be a different item at the top of the queue after processing - s.when = time.Time{} - s.mu.Unlock() - continue schedulerLoop - } - it = min.(Item) - s.when = it.When() - until := s.when.Sub(s.time.Now()) - - if until > 0 { - s.resetTimer(until) // we can reset without a stop because we know it is fired here - s.mu.Unlock() - continue schedulerLoop - } - s.mu.Unlock() - } - } - } - }() - return s, s.sm, nil -} - -func (s *TreeScheduler) Stop() { - s.mu.Lock() - close(s.done) - s.mu.Unlock() - s.wg.Wait() -} - -// itemList is a list of items for deleting and inserting. We have to do them separately instead of just a re-add, -// because usually the items key must be changed between the delete and insert -type itemList struct { - toInsert []Item - toDelete []Item -} - -func (s *TreeScheduler) process() { - // Reset the length of the slice in preparation of the next iterator. - s.items.toDelete = s.items.toDelete[:0] - s.items.toInsert = s.items.toInsert[:0] - - toReAdd := s.items - iter := s.iterator(s.time.Now()) - s.priorityQueue.Ascend(iter) - for i := range toReAdd.toDelete { - delete(s.nextTime, toReAdd.toDelete[i].id) - s.priorityQueue.Delete(toReAdd.toDelete[i]) - } - for i := range toReAdd.toInsert { - s.nextTime[toReAdd.toInsert[i].id] = toReAdd.toInsert[i].when - s.priorityQueue.ReplaceOrInsert(toReAdd.toInsert[i]) - } -} - -func (s *TreeScheduler) resetTimer(whenFromNow time.Duration) { - s.when = s.time.Now().Add(whenFromNow) - s.timer.Reset(whenFromNow) -} - -func (s *TreeScheduler) iterator(ts time.Time) btree.ItemIterator { - return func(i btree.Item) bool { - if i == nil { - return false - } - it := i.(Item) // we want it to panic if things other than Items are populating the scheduler, as it is something we can't recover from. - if time.Unix(it.next+it.Offset, 0).After(ts) { - return false - } - // distribute to the right worker. - { - buf := [8]byte{} - binary.LittleEndian.PutUint64(buf[:], uint64(it.id)) - wc := xxhash.Sum64(buf[:]) % uint64(len(s.workchans)) // we just hash so that the number is uniformly distributed - select { - case s.workchans[wc] <- it: - s.items.toDelete = append(s.items.toDelete, it) - if err := it.updateNext(); err != nil { - // in this error case we can't schedule next, so we have to drop the task - s.onErr(context.Background(), it.id, it.Next(), &ErrUnrecoverable{err}) - return true - } - s.items.toInsert = append(s.items.toInsert, it) - - case <-s.done: - return false - default: - return true - } - } - return true - } -} - -// When gives us the next time the scheduler will run a task. -func (s *TreeScheduler) When() time.Time { - s.mu.RLock() - w := s.when - s.mu.RUnlock() - return w -} - -func (s *TreeScheduler) release(taskID ID) { - when, ok := s.nextTime[taskID] - if !ok { - return - } - - // delete the old task run time - s.priorityQueue.Delete(Item{id: taskID, when: when}) - delete(s.nextTime, taskID) -} - -// Release releases a task. -// Release also cancels the running task. -// Task deletion would be faster if the tree supported deleting ranges. -func (s *TreeScheduler) Release(taskID ID) error { - s.sm.release(taskID) - s.mu.Lock() - s.release(taskID) - s.mu.Unlock() - return nil -} - -// work does work from the channel and checkpoints it. -func (s *TreeScheduler) work(ctx context.Context, ch chan Item) { - var it Item - defer func() { - s.wg.Done() - }() - for it = range ch { - t := time.Unix(it.next, 0) - err := func() (err error) { - defer func() { - if r := recover(); r != nil { - err = &ErrUnrecoverable{errors.New("executor panicked")} - } - }() - // report the difference between when the item was supposed to be scheduled and now - s.sm.reportScheduleDelay(time.Since(it.Next())) - preExec := time.Now() - // execute - err = s.executor.Execute(ctx, it.id, t, it.When()) - // report how long execution took - s.sm.reportExecution(err, time.Since(preExec)) - return err - }() - if err != nil { - s.onErr(ctx, it.id, it.Next(), err) - } - // TODO(docmerlin): we can increase performance by making the call to UpdateLastScheduled async - if err := s.checkpointer.UpdateLastScheduled(ctx, it.id, t); err != nil { - s.onErr(ctx, it.id, it.Next(), err) - } - } -} - -// Schedule put puts a Schedulable on the TreeScheduler. -func (s *TreeScheduler) Schedule(sch Schedulable) error { - s.sm.schedule(sch.ID()) - it := Item{ - cron: sch.Schedule(), - id: sch.ID(), - Offset: int64(sch.Offset().Seconds()), - //last: sch.LastScheduled().Unix(), - } - nt, err := it.cron.Next(sch.LastScheduled()) - if err != nil { - s.sm.scheduleFail(it.id) - s.onErr(context.Background(), it.id, time.Time{}, err) - return err - } - it.next = nt.UTC().Unix() - it.when = it.next + it.Offset - - s.mu.Lock() - defer s.mu.Unlock() - - nt = nt.Add(sch.Offset()) - if s.when.IsZero() || s.when.After(nt) { - s.when = nt - s.timer.Stop() - until := s.when.Sub(s.time.Now()) - if until <= 0 { - s.timer.Reset(0) - } else { - s.timer.Reset(s.when.Sub(s.time.Now())) - } - } - nextTime, ok := s.nextTime[it.id] - - if ok { - // delete the old task run time - s.priorityQueue.Delete(Item{ - when: nextTime, - id: it.id, - }) - } - s.nextTime[it.id] = it.next + it.Offset - - // insert the new task run time - s.priorityQueue.ReplaceOrInsert(it) - return nil -} - -// Item is a task in the scheduler. -type Item struct { - when int64 - id ID - cron Schedule - next int64 - Offset int64 -} - -func (it Item) Next() time.Time { - return time.Unix(it.next, 0) -} - -func (it Item) When() time.Time { - return time.Unix(it.when, 0) -} - -// Less tells us if one Item is less than another -func (it Item) Less(bItem btree.Item) bool { - it2 := bItem.(Item) - return it.when < it2.when || ((it.when == it2.when) && it.id < it2.id) -} - -func (it *Item) updateNext() error { - newNext, err := it.cron.Next(time.Unix(it.next, 0)) - if err != nil { - return err - } - it.next = newNext.UTC().Unix() - it.when = it.next + it.Offset - return nil -} diff --git a/task/backend/task.go b/task/backend/task.go deleted file mode 100644 index 67e7fb18ea2..00000000000 --- a/task/backend/task.go +++ /dev/null @@ -1,32 +0,0 @@ -package backend - -import ( - "context" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -// TaskControlService is a low-level controller interface, intended to be passed to -// task executors and schedulers, which allows creation, completion, and status updates of runs. -type TaskControlService interface { - - // CreateRun creates a run with a scheduled for time. - CreateRun(ctx context.Context, taskID platform.ID, scheduledFor time.Time, runAt time.Time) (*taskmodel.Run, error) - - CurrentlyRunning(ctx context.Context, taskID platform.ID) ([]*taskmodel.Run, error) - ManualRuns(ctx context.Context, taskID platform.ID) ([]*taskmodel.Run, error) - - // StartManualRun pulls a manual run from the list and moves it to currently running. - StartManualRun(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) - - // FinishRun removes runID from the list of running tasks and if its `ScheduledFor` is later then last completed update it. - FinishRun(ctx context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) - - // UpdateRunState sets the run state at the respective time. - UpdateRunState(ctx context.Context, taskID, runID platform.ID, when time.Time, state taskmodel.RunStatus) error - - // AddRunLog adds a log line to the run. - AddRunLog(ctx context.Context, taskID, runID platform.ID, when time.Time, log string) error -} diff --git a/task/mock/executor.go b/task/mock/executor.go deleted file mode 100644 index f1eb25a9c01..00000000000 --- a/task/mock/executor.go +++ /dev/null @@ -1,128 +0,0 @@ -// Package mock contains mock implementations of different task interfaces. -package mock - -import ( - "context" - "errors" - "sync" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/task/backend/executor" - "github.com/influxdata/influxdb/v2/task/backend/scheduler" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -type promise struct { - run *taskmodel.Run - hangingFor time.Duration - - done chan struct{} - err error - - ctx context.Context - cancelFunc context.CancelFunc -} - -// ID is the id of the run that was created -func (p *promise) ID() platform.ID { - return p.run.ID -} - -// Cancel is used to cancel a executing query -func (p *promise) Cancel(ctx context.Context) { - // call cancelfunc - p.cancelFunc() - - // wait for ctx.Done or p.Done - select { - case <-p.Done(): - case <-ctx.Done(): - } -} - -// Done provides a channel that closes on completion of a promise -func (p *promise) Done() <-chan struct{} { - return p.done -} - -// Error returns the error resulting from a run execution. -// If the execution is not complete error waits on Done(). -func (p *promise) Error() error { - <-p.done - return p.err -} - -func (e *Executor) createPromise(ctx context.Context, run *taskmodel.Run) (*promise, error) { - ctx, cancel := context.WithCancel(ctx) - p := &promise{ - run: run, - done: make(chan struct{}), - ctx: ctx, - cancelFunc: cancel, - hangingFor: e.hangingFor, - } - - go func() { - time.Sleep(p.hangingFor) - close(p.done) - }() - - return p, nil -} - -type Executor struct { - mu sync.Mutex - wg sync.WaitGroup - hangingFor time.Duration - - // Forced error for next call to Execute. - nextExecuteErr error - - ExecutedChan chan scheduler.ID -} - -var _ scheduler.Executor = (*Executor)(nil) - -func NewExecutor() *Executor { - return &Executor{ - hangingFor: time.Second, - ExecutedChan: make(chan scheduler.ID, 10), - } -} - -func (e *Executor) Execute(ctx context.Context, id scheduler.ID, scheduledAt time.Time, runAt time.Time) error { - - select { - case e.ExecutedChan <- scheduler.ID(id): - default: - return errors.New("could not add task ID to executedChan") - } - - return nil -} - -func (e *Executor) ManualRun(ctx context.Context, id platform.ID, runID platform.ID) (executor.Promise, error) { - run := &taskmodel.Run{ID: runID, TaskID: id, StartedAt: time.Now().UTC()} - p, err := e.createPromise(ctx, run) - return p, err -} - -func (e *Executor) ScheduleManualRun(ctx context.Context, id platform.ID, runID platform.ID) error { - return nil -} - -func (e *Executor) Wait() { - e.wg.Wait() -} - -func (e *Executor) Cancel(context.Context, platform.ID) error { - return nil -} - -// FailNextCallToExecute causes the next call to e.Execute to unconditionally return err. -func (e *Executor) FailNextCallToExecute(err error) { - e.mu.Lock() - e.nextExecuteErr = err - e.mu.Unlock() -} diff --git a/task/mock/task_control_service.go b/task/mock/task_control_service.go deleted file mode 100644 index 98662ef5dc4..00000000000 --- a/task/mock/task_control_service.go +++ /dev/null @@ -1,225 +0,0 @@ -package mock - -import ( - "context" - "fmt" - "sort" - "sync" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/influxdata/influxdb/v2/task/backend" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -var idgen = snowflake.NewDefaultIDGenerator() - -// TaskControlService is a mock implementation of TaskControlService (used by NewScheduler). -type TaskControlService struct { - mu sync.Mutex - // Map of stringified task ID to last ID used for run. - runs map[platform.ID]map[platform.ID]*taskmodel.Run - - // Map of stringified, concatenated task and platform ID, to runs that have been created. - created map[string]*taskmodel.Run - - // Map of stringified task ID to task meta. - tasks map[platform.ID]*taskmodel.Task - manualRuns []*taskmodel.Run - // Map of task ID to total number of runs created for that task. - totalRunsCreated map[platform.ID]int - finishedRuns map[platform.ID]*taskmodel.Run -} - -var _ backend.TaskControlService = (*TaskControlService)(nil) - -func NewTaskControlService() *TaskControlService { - return &TaskControlService{ - runs: make(map[platform.ID]map[platform.ID]*taskmodel.Run), - finishedRuns: make(map[platform.ID]*taskmodel.Run), - tasks: make(map[platform.ID]*taskmodel.Task), - created: make(map[string]*taskmodel.Run), - totalRunsCreated: make(map[platform.ID]int), - } -} - -// SetTask sets the task. -// SetTask must be called before CreateNextRun, for a given task ID. -func (d *TaskControlService) SetTask(task *taskmodel.Task) { - d.mu.Lock() - defer d.mu.Unlock() - - d.tasks[task.ID] = task -} - -func (d *TaskControlService) SetManualRuns(runs []*taskmodel.Run) { - d.manualRuns = runs -} - -func (t *TaskControlService) CreateRun(_ context.Context, taskID platform.ID, scheduledFor time.Time, runAt time.Time) (*taskmodel.Run, error) { - t.mu.Lock() - defer t.mu.Unlock() - - runID := idgen.ID() - runs, ok := t.runs[taskID] - if !ok { - runs = make(map[platform.ID]*taskmodel.Run) - } - runs[runID] = &taskmodel.Run{ - ID: runID, - ScheduledFor: scheduledFor, - } - t.runs[taskID] = runs - return runs[runID], nil -} - -func (t *TaskControlService) StartManualRun(_ context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { - t.mu.Lock() - defer t.mu.Unlock() - - var run *taskmodel.Run - for i, r := range t.manualRuns { - if r.ID == runID { - run = r - t.manualRuns = append(t.manualRuns[:i], t.manualRuns[i+1:]...) - } - } - if run == nil { - return nil, taskmodel.ErrRunNotFound - } - return run, nil -} - -func (d *TaskControlService) FinishRun(_ context.Context, taskID, runID platform.ID) (*taskmodel.Run, error) { - d.mu.Lock() - defer d.mu.Unlock() - - tid := taskID - rid := runID - r := d.runs[tid][rid] - delete(d.runs[tid], rid) - t := d.tasks[tid] - - if r.ScheduledFor.After(t.LatestCompleted) { - t.LatestCompleted = r.ScheduledFor - } - - d.finishedRuns[rid] = r - delete(d.created, tid.String()+rid.String()) - return r, nil -} - -func (t *TaskControlService) CurrentlyRunning(ctx context.Context, taskID platform.ID) ([]*taskmodel.Run, error) { - t.mu.Lock() - defer t.mu.Unlock() - rtn := []*taskmodel.Run{} - for _, run := range t.runs[taskID] { - rtn = append(rtn, run) - } - return rtn, nil -} - -func (t *TaskControlService) ManualRuns(ctx context.Context, taskID platform.ID) ([]*taskmodel.Run, error) { - t.mu.Lock() - defer t.mu.Unlock() - - if t.manualRuns != nil { - return t.manualRuns, nil - } - return []*taskmodel.Run{}, nil -} - -// UpdateRunState sets the run state at the respective time. -func (d *TaskControlService) UpdateRunState(ctx context.Context, taskID, runID platform.ID, when time.Time, state taskmodel.RunStatus) error { - d.mu.Lock() - defer d.mu.Unlock() - - run, ok := d.runs[taskID][runID] - if !ok { - panic("run state called without a run") - } - switch state { - case taskmodel.RunStarted: - run.StartedAt = when - case taskmodel.RunSuccess, taskmodel.RunFail, taskmodel.RunCanceled: - run.FinishedAt = when - case taskmodel.RunScheduled: - // nothing - default: - panic("invalid status") - } - run.Status = state.String() - return nil -} - -// AddRunLog adds a log line to the run. -func (d *TaskControlService) AddRunLog(ctx context.Context, taskID, runID platform.ID, when time.Time, log string) error { - d.mu.Lock() - defer d.mu.Unlock() - - run := d.runs[taskID][runID] - if run == nil { - panic("cannot add a log to a non existent run") - } - run.Log = append(run.Log, taskmodel.Log{RunID: runID, Time: when.Format(time.RFC3339Nano), Message: log}) - return nil -} - -func (d *TaskControlService) CreatedFor(taskID platform.ID) []*taskmodel.Run { - d.mu.Lock() - defer d.mu.Unlock() - - var qrs []*taskmodel.Run - for _, qr := range d.created { - if qr.TaskID == taskID { - qrs = append(qrs, qr) - } - } - - return qrs -} - -// TotalRunsCreatedForTask returns the number of runs created for taskID. -func (d *TaskControlService) TotalRunsCreatedForTask(taskID platform.ID) int { - d.mu.Lock() - defer d.mu.Unlock() - - return d.totalRunsCreated[taskID] -} - -// PollForNumberCreated blocks for a small amount of time waiting for exactly the given count of created and unfinished runs for the given task ID. -// If the expected number isn't found in time, it returns an error. -// -// Because the scheduler and executor do a lot of state changes asynchronously, this is useful in test. -func (d *TaskControlService) PollForNumberCreated(taskID platform.ID, count int) ([]*taskmodel.Run, error) { - const numAttempts = 50 - actualCount := 0 - var created []*taskmodel.Run - for i := 0; i < numAttempts; i++ { - time.Sleep(2 * time.Millisecond) // we sleep even on first so it becomes more likely that we catch when too many are produced. - created = d.CreatedFor(taskID) - actualCount = len(created) - if actualCount == count { - return created, nil - } - } - return created, fmt.Errorf("did not see count of %d created run(s) for task with ID %s in time, instead saw %d", count, taskID, actualCount) // we return created anyways, to make it easier to debug -} - -func (d *TaskControlService) FinishedRun(runID platform.ID) *taskmodel.Run { - d.mu.Lock() - defer d.mu.Unlock() - - return d.finishedRuns[runID] -} - -func (d *TaskControlService) FinishedRuns() []*taskmodel.Run { - rtn := []*taskmodel.Run{} - for _, run := range d.finishedRuns { - rtn = append(rtn, run) - } - - sort.Slice(rtn, func(i, j int) bool { return rtn[i].ScheduledFor.Before(rtn[j].ScheduledFor) }) - return rtn -} diff --git a/task/options/options.go b/task/options/options.go deleted file mode 100644 index 4d29e3d8c9e..00000000000 --- a/task/options/options.go +++ /dev/null @@ -1,418 +0,0 @@ -// Package options provides ways to extract the task-related options from a Flux script. -package options - -import ( - "context" - "errors" - "fmt" - "strings" - "time" - - "github.com/influxdata/cron" - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/ast/edit" - "github.com/influxdata/flux/interpreter" - "github.com/influxdata/flux/parser" - "github.com/influxdata/flux/values" - "github.com/influxdata/influxdb/v2/pkg/pointer" -) - -const maxConcurrency = 100 -const maxRetry = 10 - -// Options are the task-related options that can be specified in a Flux script. -type Options struct { - // Name is a non optional name designator for each task. - Name string `json:"name,omitempty"` - - // Cron is a cron style time schedule that can be used in place of Every. - Cron string `json:"cron,omitempty"` - - // Every represents a fixed period to repeat execution. - // this can be unmarshaled from json as a string i.e.: "1d" will unmarshal as 1 day - Every Duration `json:"every,omitempty"` - - // Offset represents a delay before execution. - // this can be unmarshaled from json as a string i.e.: "1d" will unmarshal as 1 day - Offset *Duration `json:"offset,omitempty"` - - Concurrency *int64 `json:"concurrency,omitempty"` - - Retry *int64 `json:"retry,omitempty"` -} - -// Duration is a time span that supports the same units as the flux parser's time duration, as well as negative length time spans. -type Duration struct { - Node ast.DurationLiteral -} - -func (a Duration) String() string { - return parser.FormatDuration(&a.Node) -} - -// Parse parses a string into a Duration. -func (a *Duration) Parse(s string) error { - q, err := ParseSignedDuration(s) - if err != nil { - return errTaskInvalidDuration(err) - } - a.Node = *q - return nil -} - -// MustParseDuration parses a string and returns a duration. It panics if there is an error. -func MustParseDuration(s string) (dur *Duration) { - dur = &Duration{} - if err := dur.Parse(s); err != nil { - panic(err) - } - return dur -} - -// UnmarshalText unmarshals text into a Duration. -func (a *Duration) UnmarshalText(text []byte) error { - q, err := ParseSignedDuration(string(text)) - if err != nil { - return err - } - a.Node = *q - return nil -} - -// MarshalText marshals text into a Duration. -func (a Duration) MarshalText() ([]byte, error) { - return []byte(a.String()), nil -} - -// IsZero checks if each segment of the duration is zero, it doesn't check if the Duration sums to zero, just if each internal duration is zero. -func (a *Duration) IsZero() bool { - for i := range a.Node.Values { - if a.Node.Values[i].Magnitude != 0 { - return false - } - } - return true -} - -// DurationFrom gives us a time.Duration from a time. -// Currently because of how flux works, this is just an approfimation for any time unit larger than hours. -func (a *Duration) DurationFrom(t time.Time) (time.Duration, error) { - return ast.DurationFrom(&a.Node, t) -} - -// Add adds the duration to a time. -func (a *Duration) Add(t time.Time) (time.Time, error) { - d, err := ast.DurationFrom(&a.Node, t) - if err != nil { - return time.Time{}, err - } - return t.Add(d), nil -} - -// Clear clears out all options in the options struct, it us useful if you wish to reuse it. -func (o *Options) Clear() { - o.Name = "" - o.Cron = "" - o.Every = Duration{} - o.Offset = nil - o.Concurrency = nil - o.Retry = nil -} - -// IsZero tells us if the options has been zeroed out. -func (o *Options) IsZero() bool { - return o.Name == "" && - o.Cron == "" && - o.Every.IsZero() && - (o.Offset == nil || o.Offset.IsZero()) && - o.Concurrency == nil && - o.Retry == nil -} - -// All the task option names we accept. -const ( - optName = "name" - optCron = "cron" - optEvery = "every" - optOffset = "offset" - optConcurrency = "concurrency" - optRetry = "retry" -) - -// FluxLanguageService is a service for interacting with flux code. -type FluxLanguageService interface { - // Parse will take flux source code and produce a package. - // If there are errors when parsing, the first error is returned. - // An ast.Package may be returned when a parsing error occurs, - // but it may be null if parsing didn't even occur. - Parse(source string) (*ast.Package, error) - - // EvalAST will evaluate and run an AST. - EvalAST(ctx context.Context, astPkg *ast.Package) ([]interpreter.SideEffect, values.Scope, error) -} - -// FromScriptAST extracts Task options from a Flux script using only the AST (no -// evaluation of the script). Using AST here allows us to avoid having to -// contend with functions that aren't available in some parsing contexts (within -// Gateway for example). -func FromScriptAST(lang FluxLanguageService, script string) (Options, error) { - opts := Options{ - Retry: pointer.Int64(1), - Concurrency: pointer.Int64(1), - } - - fluxAST, err := parse(lang, script) - if err != nil { - return opts, err - } - - if len(fluxAST.Files) == 0 { - return opts, ErrNoASTFile - } - - file := fluxAST.Files[0] - if hasDuplicateOptions(file, "task") { - return opts, ErrMultipleTaskOptionsDefined - } - - obj, err := edit.GetOption(file, "task") - if err != nil { - return opts, ErrNoTaskOptionsDefined - } - - objExpr, ok := obj.(*ast.ObjectExpression) - if !ok { - return opts, errTaskOptionNotObjectExpression(objExpr.Type()) - } - - for _, fn := range taskOptionExtractors { - if err := fn(&opts, objExpr); err != nil { - return opts, err - } - } - - if err := opts.Validate(); err != nil { - return opts, err - } - - return opts, nil -} - -// hasDuplicateOptions determines whether or not there are multiple assignments -// to the same option variable. -// -// TODO(brett): This will be superceded by edit.HasDuplicateOptions once its available. -func hasDuplicateOptions(file *ast.File, name string) bool { - var n int - for _, st := range file.Body { - if val, ok := st.(*ast.OptionStatement); ok { - assign := val.Assignment - if va, ok := assign.(*ast.VariableAssignment); ok { - if va.ID.Name == name { - n++ - } - } - } - } - return n > 1 -} - -type extractFn func(*Options, *ast.ObjectExpression) error - -var taskOptionExtractors = []extractFn{ - extractNameOption, - extractScheduleOptions, - extractOffsetOption, - extractConcurrencyOption, - extractRetryOption, -} - -func extractNameOption(opts *Options, objExpr *ast.ObjectExpression) error { - nameExpr, err := edit.GetProperty(objExpr, optName) - if err != nil { - return errMissingRequiredTaskOption(optName) - } - nameStr, ok := nameExpr.(*ast.StringLiteral) - if !ok { - return errParseTaskOptionField(optName) - } - opts.Name = ast.StringFromLiteral(nameStr) - - return nil -} - -func extractScheduleOptions(opts *Options, objExpr *ast.ObjectExpression) error { - cronExpr, cronErr := edit.GetProperty(objExpr, optCron) - everyExpr, everyErr := edit.GetProperty(objExpr, optEvery) - if cronErr == nil && everyErr == nil { - return ErrDuplicateIntervalField - } - if cronErr != nil && everyErr != nil { - return errMissingRequiredTaskOption("cron or every") - } - - if cronErr == nil { - cronExprStr, ok := cronExpr.(*ast.StringLiteral) - if !ok { - return errParseTaskOptionField(optCron) - } - opts.Cron = ast.StringFromLiteral(cronExprStr) - } - - if everyErr == nil { - everyDur, ok := everyExpr.(*ast.DurationLiteral) - if !ok { - return errParseTaskOptionField(optEvery) - } - opts.Every = Duration{Node: *everyDur} - } - - return nil -} - -func extractOffsetOption(opts *Options, objExpr *ast.ObjectExpression) error { - offsetExpr, offsetErr := edit.GetProperty(objExpr, optOffset) - if offsetErr != nil { - return nil - } - - switch offsetExprV := offsetExpr.(type) { - case *ast.UnaryExpression: - offsetDur, err := ParseSignedDuration(offsetExprV.Loc.Source) - if err != nil { - return err - } - opts.Offset = &Duration{Node: *offsetDur} - case *ast.DurationLiteral: - opts.Offset = &Duration{Node: *offsetExprV} - default: - return errParseTaskOptionField(optOffset) - } - - return nil -} - -func extractConcurrencyOption(opts *Options, objExpr *ast.ObjectExpression) error { - concurExpr, err := edit.GetProperty(objExpr, optConcurrency) - if err != nil { - return nil - } - - concurInt, ok := concurExpr.(*ast.IntegerLiteral) - if !ok { - return errParseTaskOptionField(optConcurrency) - } - val := ast.IntegerFromLiteral(concurInt) - opts.Concurrency = &val - - return nil -} - -func extractRetryOption(opts *Options, objExpr *ast.ObjectExpression) error { - retryExpr, err := edit.GetProperty(objExpr, optRetry) - if err != nil { - return nil - } - - retryInt, ok := retryExpr.(*ast.IntegerLiteral) - if !ok { - return errParseTaskOptionField(optRetry) - } - val := ast.IntegerFromLiteral(retryInt) - opts.Retry = &val - - return nil -} - -// Validate returns an error if the options aren't valid. -func (o *Options) Validate() error { - now := time.Now() - var errs []string - if o.Name == "" { - errs = append(errs, "name required") - } - - cronPresent := o.Cron != "" - everyPresent := !o.Every.IsZero() - if cronPresent == everyPresent { - // They're both present or both missing. - errs = append(errs, "must specify exactly one of either cron or every") - } else if cronPresent { - _, err := cron.ParseUTC(o.Cron) - if err != nil { - errs = append(errs, "cron invalid: "+err.Error()) - } - } else if everyPresent { - every, err := o.Every.DurationFrom(now) - if err != nil { - return err - } - if every < time.Second { - errs = append(errs, "every option must be at least 1 second") - } else if every.Truncate(time.Second) != every { - errs = append(errs, "every option must be expressible as whole seconds") - } - } - if o.Offset != nil { - offset, err := o.Offset.DurationFrom(now) - if err != nil { - return err - } - if offset.Truncate(time.Second) != offset { - // For now, allowing negative offset delays. Maybe they're useful for forecasting? - errs = append(errs, "offset option must be expressible as whole seconds") - } - } - if o.Concurrency != nil { - if *o.Concurrency < 1 { - errs = append(errs, "concurrency must be at least 1") - } else if *o.Concurrency > maxConcurrency { - errs = append(errs, fmt.Sprintf("concurrency exceeded max of %d", maxConcurrency)) - } - } - if o.Retry != nil { - if *o.Retry < 1 { - errs = append(errs, "retry must be at least 1") - } else if *o.Retry > maxRetry { - errs = append(errs, fmt.Sprintf("retry exceeded max of %d", maxRetry)) - } - } - - if len(errs) == 0 { - return nil - } - - return fmt.Errorf("invalid options: %s", strings.Join(errs, ", ")) -} - -// EffectiveCronString returns the effective cron string of the options. -// If the cron option was specified, it is returned. -// If the every option was specified, it is converted into a cron string using "@every". -// Otherwise, the empty string is returned. -// The value of the offset option is not considered. -// TODO(docmerlin): create an EffectiveCronStringFrom(t time.Time) string, -// that works from a unit of time. -// Do not use this if you haven't checked for validity already. -func (o *Options) EffectiveCronString() string { - if o.Cron != "" { - return o.Cron - } - every, _ := o.Every.DurationFrom(time.Now()) // we can ignore errors here because we have already checked for validity. - if every > 0 { - return "@every " + o.Every.String() - } - return "" -} - -// parse will take flux source code and produce a package. -// If there are errors when parsing, the first error is returned. -// An ast.Package may be returned when a parsing error occurs, -// but it may be null if parsing didn't even occur. -// -// This will return an error if the FluxLanguageService is nil. -func parse(lang FluxLanguageService, source string) (*ast.Package, error) { - if lang == nil { - return nil, errors.New("flux is not configured; cannot parse") - } - return lang.Parse(source) -} diff --git a/task/options/options_errors.go b/task/options/options_errors.go deleted file mode 100644 index f6306759449..00000000000 --- a/task/options/options_errors.go +++ /dev/null @@ -1,36 +0,0 @@ -package options - -import ( - "errors" - "fmt" -) - -// errParseTaskOptionField is returned when we fail to parse a single field in -// task options. -func errParseTaskOptionField(opt string) error { - return fmt.Errorf("failed to parse field '%s' in task options", opt) -} - -// errMissingRequiredTaskOption is returned when we a required option is -// missing. -func errMissingRequiredTaskOption(opt string) error { - return fmt.Errorf("missing required option: %s", opt) -} - -// errTaskInvalidDuration is returned when an "every" or "offset" option is invalid in a task. -func errTaskInvalidDuration(err error) error { - return fmt.Errorf("invalid duration in task %s", err) -} - -// errTaskOptionNotObjectExpression is returned when the type of an task option -// value is not an object literal expression. -func errTaskOptionNotObjectExpression(actualType string) error { - return fmt.Errorf("task option expected to be object literal, but found %q", actualType) -} - -var ( - ErrDuplicateIntervalField = errors.New("cannot use both cron and every in task options") - ErrNoTaskOptionsDefined = errors.New("no task options defined") - ErrMultipleTaskOptionsDefined = errors.New("multiple task options defined") - ErrNoASTFile = errors.New("expected parsed file, but found none") -) diff --git a/task/options/options_test.go b/task/options/options_test.go deleted file mode 100644 index f5445c55785..00000000000 --- a/task/options/options_test.go +++ /dev/null @@ -1,287 +0,0 @@ -package options_test - -import ( - "fmt" - "math" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/flux/ast" - _ "github.com/influxdata/influxdb/v2/fluxinit/static" - "github.com/influxdata/influxdb/v2/pkg/pointer" - "github.com/influxdata/influxdb/v2/query/fluxlang" - "github.com/influxdata/influxdb/v2/task/options" -) - -func scriptGenerator(opt options.Options, body string) string { - taskData := "" - if opt.Name != "" { - taskData = fmt.Sprintf("%s name: %q,\n", taskData, opt.Name) - } - if opt.Cron != "" { - taskData = fmt.Sprintf("%s cron: %q,\n", taskData, opt.Cron) - } - if !opt.Every.IsZero() { - taskData = fmt.Sprintf("%s every: %s,\n", taskData, opt.Every.String()) - } - if opt.Offset != nil && !(*opt.Offset).IsZero() { - taskData = fmt.Sprintf("%s offset: %s,\n", taskData, opt.Offset.String()) - } - if opt.Concurrency != nil && *opt.Concurrency != 0 { - taskData = fmt.Sprintf("%s concurrency: %d,\n", taskData, *opt.Concurrency) - } - if opt.Retry != nil && *opt.Retry != 0 { - taskData = fmt.Sprintf("%s retry: %d,\n", taskData, *opt.Retry) - } - if body == "" { - body = `from(bucket: "test") - |> range(start:-1h)` - } - - return fmt.Sprintf(`option task = { -%s -} - -%s`, taskData, body) -} - -func TestNegDurations(t *testing.T) { - dur := options.MustParseDuration("-1m") - d, err := dur.DurationFrom(time.Now()) - if err != nil { - t.Fatal(err) - } - if d != -time.Minute { - t.Fatalf("expected duration to be -1m but was %s", d) - } -} - -func TestFromScriptAST(t *testing.T) { - for _, c := range []struct { - script string - exp options.Options - shouldErr bool - }{ - {script: scriptGenerator(options.Options{Name: "name0", Cron: "* * * * *", Concurrency: pointer.Int64(2), Retry: pointer.Int64(3), Offset: options.MustParseDuration("-1m")}, ""), - exp: options.Options{Name: "name0", - Cron: "* * * * *", - Concurrency: pointer.Int64(2), - Retry: pointer.Int64(3), - Offset: options.MustParseDuration("-1m")}}, - {script: scriptGenerator(options.Options{Name: "name1", Every: *(options.MustParseDuration("5s"))}, ""), exp: options.Options{Name: "name1", Every: *(options.MustParseDuration("5s")), Concurrency: pointer.Int64(1), Retry: pointer.Int64(1)}}, - {script: scriptGenerator(options.Options{Name: "name2", Cron: "* * * * *"}, ""), exp: options.Options{Name: "name2", Cron: "* * * * *", Concurrency: pointer.Int64(1), Retry: pointer.Int64(1)}}, - {script: scriptGenerator(options.Options{Name: "name3", Every: *(options.MustParseDuration("1h")), Cron: "* * * * *"}, ""), shouldErr: true}, - {script: scriptGenerator(options.Options{Name: "name4", Concurrency: pointer.Int64(1000), Every: *(options.MustParseDuration("1h"))}, ""), shouldErr: true}, - {script: "option task = {\n name: \"name5\",\n concurrency: 0,\n every: 1m0s,\n\n}\n\nfrom(bucket: \"test\")\n |> range(start:-1h)", shouldErr: true}, - {script: "option task = {\n name: \"name6\",\n concurrency: 1,\n every: 1,\n\n}\n\nfrom(bucket: \"test\")\n |> range(start:-1h)", shouldErr: true}, - {script: scriptGenerator(options.Options{Name: "name7", Retry: pointer.Int64(20), Every: *(options.MustParseDuration("1h"))}, ""), shouldErr: true}, - {script: "option task = {\n name: \"name8\",\n retry: 0,\n every: 1m0s,\n\n}\n\nfrom(bucket: \"test\")\n |> range(start:-1h)", shouldErr: true}, - {script: scriptGenerator(options.Options{Name: "name9"}, ""), shouldErr: true}, - {script: scriptGenerator(options.Options{}, ""), shouldErr: true}, - {script: `option task = { - name: "name10", - every: 1d, - offset: 1m, - } - from(bucket: "metrics") - |> range(start: now(), stop: 8w) - `, - exp: options.Options{Name: "name10", Every: *(options.MustParseDuration("1d")), Concurrency: pointer.Int64(1), Retry: pointer.Int64(1), Offset: options.MustParseDuration("1m")}, - }, - {script: `option task = { - name: "name11", - every: 1m, - offset: 1d, - } - from(bucket: "metrics") - |> range(start: now(), stop: 8w) - - `, - exp: options.Options{Name: "name11", Every: *(options.MustParseDuration("1m")), Concurrency: pointer.Int64(1), Retry: pointer.Int64(1), Offset: options.MustParseDuration("1d")}, - }, - {script: "option task = {name:\"test_task_smoke_name\", every:30s} from(bucket:\"test_tasks_smoke_bucket_source\") |> range(start: -1h) |> map(fn: (r) => ({r with _time: r._time, _value:r._value, t : \"quality_rocks\"}))|> to(bucket:\"test_tasks_smoke_bucket_dest\", orgID:\"3e73e749495d37d5\")", - exp: options.Options{Name: "test_task_smoke_name", Every: *(options.MustParseDuration("30s")), Retry: pointer.Int64(1), Concurrency: pointer.Int64(1)}, shouldErr: false}, // TODO(docmerlin): remove this once tasks fully supports all flux duration units. - - } { - o, err := options.FromScriptAST(fluxlang.DefaultService, c.script) - if c.shouldErr && err == nil { - t.Fatalf("script %q should have errored but didn't", c.script) - } else if !c.shouldErr && err != nil { - t.Fatalf("script %q should not have errored, but got %v", c.script, err) - } - - if err != nil { - continue - } - - ignoreLocation := cmpopts.IgnoreFields(ast.BaseNode{}, "Loc") - - if !cmp.Equal(o, c.exp, ignoreLocation) { - t.Fatalf("script %q got unexpected result -got/+exp\n%s", c.script, cmp.Diff(o, c.exp)) - } - } -} - -func TestValidate(t *testing.T) { - good := options.Options{Name: "x", Cron: "* * * * *", Concurrency: pointer.Int64(1), Retry: pointer.Int64(1)} - if err := good.Validate(); err != nil { - t.Fatal(err) - } - - bad := new(options.Options) - *bad = good - bad.Name = "" - if err := bad.Validate(); err == nil { - t.Error("expected error for options without name") - } - - *bad = good - bad.Cron = "" - if err := bad.Validate(); err == nil { - t.Error("expected error for options without cron or every") - } - - *bad = good - bad.Every = *options.MustParseDuration("1m") - if err := bad.Validate(); err == nil { - t.Error("expected error for options with both cron and every") - } - - *bad = good - bad.Cron = "not a cron string" - if err := bad.Validate(); err == nil { - t.Error("expected error for options with invalid cron") - } - - *bad = good - bad.Cron = "" - bad.Every = *options.MustParseDuration("-1m") - if err := bad.Validate(); err == nil { - t.Error("expected error for negative every") - } - - *bad = good - bad.Offset = options.MustParseDuration("1500ms") - if err := bad.Validate(); err == nil { - t.Error("expected error for sub-second delay resolution") - } - - *bad = good - bad.Concurrency = pointer.Int64(0) - if err := bad.Validate(); err == nil { - t.Error("expected error for 0 concurrency") - } - - *bad = good - bad.Concurrency = pointer.Int64(math.MaxInt64) - if err := bad.Validate(); err == nil { - t.Error("expected error for concurrency too large") - } - - *bad = good - bad.Retry = pointer.Int64(0) - if err := bad.Validate(); err == nil { - t.Error("expected error for 0 retry") - } - - *bad = good - bad.Retry = pointer.Int64(math.MaxInt64) - if err := bad.Validate(); err == nil { - t.Error("expected error for retry too large") - } - - notbad := new(options.Options) - *notbad = good - notbad.Cron = "" - notbad.Every = *options.MustParseDuration("22d") - if err := notbad.Validate(); err != nil { - t.Error("expected no error for days every") - } - -} - -func TestEffectiveCronString(t *testing.T) { - for _, c := range []struct { - c string - e options.Duration - exp string - }{ - {c: "10 * * * *", exp: "10 * * * *"}, - {e: *(options.MustParseDuration("10s")), exp: "@every 10s"}, - {exp: ""}, - {e: *(options.MustParseDuration("10d")), exp: "@every 10d"}, - } { - o := options.Options{Cron: c.c, Every: c.e} - got := o.EffectiveCronString() - if got != c.exp { - t.Fatalf("exp cron string %q, got %q for %v", c.exp, got, o) - } - } -} - -func TestDurationMarshaling(t *testing.T) { - t.Run("unmarshalling", func(t *testing.T) { - now := time.Now().UTC() /* to guarantee 24 hour days*/ - dur1 := options.Duration{} - if err := dur1.UnmarshalText([]byte("1d1h10m3s")); err != nil { - t.Fatal(err) - } - d1, err1 := dur1.DurationFrom(now) - if err1 != nil { - t.Fatal(err1) - } - - dur2 := options.Duration{} - if err := dur2.Parse("1d1h10m3s"); err != nil { - t.Fatal(err) - } - d2, err2 := dur2.DurationFrom(now) - if err2 != nil { - t.Fatal(err2) - } - - if d1 != d2 || d1 != 25*time.Hour+10*time.Minute+3*time.Second /* we know that this day is 24 hours long because its UTC and go ignores leap seconds*/ { - t.Fatal("Parse and Marshaling do not give us the same result") - } - }) - - t.Run("marshaling", func(t *testing.T) { - dur := options.Duration{} - if err := dur.UnmarshalText([]byte("1h10m3s")); err != nil { - t.Fatal(err) - } - if dur.String() != "1h10m3s" { - t.Fatalf("duration string should be \"1h10m3s\" but was %s", dur.String()) - } - text, err := dur.MarshalText() - if err != nil { - t.Fatal(err) - } - if string(text) != "1h10m3s" { - t.Fatalf("duration text should be \"1h10m3s\" but was %s", text) - } - }) - - t.Run("parse zero", func(t *testing.T) { - dur := options.Duration{} - if err := dur.UnmarshalText([]byte("0h0s")); err != nil { - t.Fatal(err) - } - if !dur.IsZero() { - t.Fatalf("expected duration \"0s\" to be zero but was %s", dur.String()) - } - }) -} - -func TestDurationMath(t *testing.T) { - dur := options.MustParseDuration("10s") - d, err := dur.DurationFrom(time.Now()) - if err != nil { - t.Fatal(err) - } - if d != 10*time.Second { - t.Fatalf("expected duration to be 10s but it was %s", d) - } -} diff --git a/task/options/strconv.go b/task/options/strconv.go deleted file mode 100644 index 7730fee4577..00000000000 --- a/task/options/strconv.go +++ /dev/null @@ -1,108 +0,0 @@ -package options - -import ( - "fmt" - "strconv" - "unicode" - "unicode/utf8" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/codes" -) - -// TODO(jsternberg): This file copies over code from an internal package -// because we need them from an internal package and the only way they -// are exposed is through a package that depends on the core flux parser. -// We want to avoid a dependency on the core parser so we copy these -// implementations. -// -// In the future, we should consider exposing these functions from flux -// in a non-internal package outside of the parser package. - -// ParseSignedDuration is a helper wrapper around parser.ParseSignedDuration. -// We use it because we need to clear the basenode, but flux does not. -func ParseSignedDuration(text string) (*ast.DurationLiteral, error) { - // TODO(jsternberg): This is copied from an internal package in flux to break a dependency - // on the parser package where this method is exposed. - // Consider exposing this properly in flux. - if r, s := utf8.DecodeRuneInString(text); r == '-' { - d, err := parseDuration(text[s:]) - if err != nil { - return nil, err - } - for i := range d { - d[i].Magnitude = -d[i].Magnitude - } - return &ast.DurationLiteral{Values: d}, nil - } - - d, err := parseDuration(text) - if err != nil { - return nil, err - } - return &ast.DurationLiteral{Values: d}, nil -} - -// parseDuration will convert a string into components of the duration. -func parseDuration(lit string) ([]ast.Duration, error) { - var values []ast.Duration - for len(lit) > 0 { - n := 0 - for n < len(lit) { - ch, size := utf8.DecodeRuneInString(lit[n:]) - if size == 0 { - panic("invalid rune in duration") - } - - if !unicode.IsDigit(ch) { - break - } - n += size - } - - if n == 0 { - return nil, &flux.Error{ - Code: codes.Invalid, - Msg: fmt.Sprintf("invalid duration %s", lit), - } - } - - magnitude, err := strconv.ParseInt(lit[:n], 10, 64) - if err != nil { - return nil, err - } - lit = lit[n:] - - n = 0 - for n < len(lit) { - ch, size := utf8.DecodeRuneInString(lit[n:]) - if size == 0 { - panic("invalid rune in duration") - } - - if !unicode.IsLetter(ch) { - break - } - n += size - } - - if n == 0 { - return nil, &flux.Error{ - Code: codes.Invalid, - Msg: fmt.Sprintf("duration is missing a unit: %s", lit), - } - } - - unit := lit[:n] - if unit == "µs" { - unit = "us" - } - values = append(values, ast.Duration{ - Magnitude: magnitude, - Unit: unit, - }) - lit = lit[n:] - } - return values, nil -} diff --git a/task/servicetest/servicetest.go b/task/servicetest/servicetest.go deleted file mode 100644 index 671fc6d7ef5..00000000000 --- a/task/servicetest/servicetest.go +++ /dev/null @@ -1,1924 +0,0 @@ -// Package servicetest provides tests to ensure that implementations of -// platform/task/backend.Store and platform/task/backend.LogReader meet the requirements of influxdb.TaskService. -// -// Consumers of this package must import query/builtin. -// This package does not import it directly, to avoid requiring it too early. -package servicetest - -import ( - "context" - "fmt" - "math" - "reflect" - "runtime" - "strings" - "sync" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/feature" - "github.com/influxdata/influxdb/v2/kit/platform" - influxdbmock "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/task/backend" - "github.com/influxdata/influxdb/v2/task/options" - "github.com/influxdata/influxdb/v2/task/taskmodel" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// BackendComponentFactory is supplied by consumers of the adaptertest package, -// to provide the values required to constitute a PlatformAdapter. -// The provided context.CancelFunc is called after the test, -// and it is the implementer's responsibility to clean up after that is called. -// -// If creating the System fails, the implementer should call t.Fatal. -type BackendComponentFactory func(t *testing.T) (*System, context.CancelFunc) - -// TestTaskService should be called by consumers of the servicetest package. -// This will call fn once to create a single influxdb.TaskService -// used across all subtests in TestTaskService. -func TestTaskService(t *testing.T, fn BackendComponentFactory, testCategory ...string) { - sys, cancel := fn(t) - defer cancel() - - if len(testCategory) == 0 { - testCategory = []string{"transactional", "analytical"} - } - - for _, category := range testCategory { - switch category { - case "transactional": - t.Run("TransactionalTaskService", func(t *testing.T) { - // We're running the subtests in parallel, but if we don't use this wrapper, - // the defer cancel() call above would return before the parallel subtests completed. - // - // Running the subtests in parallel might make them slightly faster, - // but more importantly, it should exercise concurrency to catch data races. - - t.Run("Task CRUD", func(t *testing.T) { - t.Parallel() - testTaskCRUD(t, sys) - }) - - t.Run("FindTasks basic", func(t *testing.T) { - testTaskFindTasksBasic(t, sys) - }) - - t.Run("FindTasks paging", func(t *testing.T) { - testTaskFindTasksPaging(t, sys) - }) - - t.Run("FindTasks after paging", func(t *testing.T) { - testTaskFindTasksAfterPaging(t, sys) - }) - - t.Run("Task Update Options Full", func(t *testing.T) { - t.Parallel() - testTaskOptionsUpdateFull(t, sys) - }) - - t.Run("Task Runs", func(t *testing.T) { - t.Parallel() - testTaskRuns(t, sys) - }) - - t.Run("Task Concurrency", func(t *testing.T) { - if testing.Short() { - t.Skip("skipping in short mode") - } - t.Parallel() - testTaskConcurrency(t, sys) - }) - - t.Run("Task Updates", func(t *testing.T) { - t.Parallel() - testUpdate(t, sys) - }) - - t.Run("Task Manual Run", func(t *testing.T) { - t.Parallel() - testManualRun(t, sys) - }) - - t.Run("Task Type", func(t *testing.T) { - t.Parallel() - testTaskType(t, sys) - }) - - }) - case "analytical": - t.Run("AnalyticalTaskService", func(t *testing.T) { - t.Run("Task Run Storage", func(t *testing.T) { - t.Parallel() - testRunStorage(t, sys) - }) - t.Run("Task RetryRun", func(t *testing.T) { - t.Parallel() - testRetryAcrossStorage(t, sys) - }) - t.Run("task Log Storage", func(t *testing.T) { - t.Parallel() - testLogsAcrossStorage(t, sys) - }) - }) - } - } - -} - -// TestCreds encapsulates credentials needed for a system to properly work with tasks. -type TestCreds struct { - OrgID, UserID, AuthorizationID platform.ID - Org string - Token string -} - -// Authorizer returns an authorizer for the credentials in the struct -func (tc TestCreds) Authorizer() influxdb.Authorizer { - return &influxdb.Authorization{ - ID: tc.AuthorizationID, - OrgID: tc.OrgID, - UserID: tc.UserID, - Token: tc.Token, - } -} - -type OrganizationService interface { - CreateOrganization(ctx context.Context, b *influxdb.Organization) error -} - -type UserService interface { - CreateUser(ctx context.Context, u *influxdb.User) error -} - -type UserResourceMappingService interface { - CreateUserResourceMapping(ctx context.Context, m *influxdb.UserResourceMapping) error -} - -type AuthorizationService interface { - CreateAuthorization(ctx context.Context, a *influxdb.Authorization) error -} - -// System as in "system under test" encapsulates the required parts of a influxdb.TaskAdapter -type System struct { - TaskControlService backend.TaskControlService - - // Used in the Creds function to create valid organizations, users, tokens, etc. - OrganizationService OrganizationService - UserService UserService - UserResourceMappingService UserResourceMappingService - AuthorizationService AuthorizationService - - // Set this context, to be used in tests, so that any spawned goroutines watching Ctx.Done() - // will clean up after themselves. - Ctx context.Context - - // TaskService is the task service we would like to test - TaskService taskmodel.TaskService - - // Override for accessing credentials for an individual test. - // Callers can leave this nil and the test will create its own random IDs for each test. - // However, if the system needs to verify credentials, - // the caller should set this value and return valid IDs and a valid token. - // It is safe if this returns the same values every time it is called. - CredsFunc func(*testing.T) (TestCreds, error) - - // Toggles behavior between KV and archive storage because FinishRun() deletes runs after completion - CallFinishRun bool -} - -func testTaskCRUD(t *testing.T, sys *System) { - cr := creds(t, sys) - - // Create a task. - tc := taskmodel.TaskCreate{ - OrganizationID: cr.OrgID, - Flux: fmt.Sprintf(scriptFmt, 0), - OwnerID: cr.UserID, - Type: taskmodel.TaskSystemType, - } - - authorizedCtx := icontext.SetAuthorizer(sys.Ctx, cr.Authorizer()) - - tsk, err := sys.TaskService.CreateTask(authorizedCtx, tc) - if err != nil { - t.Fatal(err) - } - if !tsk.ID.Valid() { - t.Fatal("no task ID set") - } - - findTask := func(tasks []*taskmodel.Task, id platform.ID) (*taskmodel.Task, error) { - for _, t := range tasks { - if t.ID == id { - return t, nil - } - } - return nil, fmt.Errorf("failed to find task by id %s", id) - } - - findTasksByStatus := func(tasks []*taskmodel.Task, status string) []*taskmodel.Task { - var foundTasks = []*taskmodel.Task{} - for _, t := range tasks { - if t.Status == status { - foundTasks = append(foundTasks, t) - } - } - return foundTasks - } - - // TODO: replace with ErrMissingOwner test - // // should not be able to create a task without a token - // noToken := influxdb.TaskCreate{ - // OrganizationID: cr.OrgID, - // Flux: fmt.Sprintf(scriptFmt, 0), - // // OwnerID: cr.UserID, // should fail - // } - // _, err = sys.TaskService.CreateTask(authorizedCtx, noToken) - - // if err != influxdb.ErrMissingToken { - // t.Fatalf("expected error missing token, got: %v", err) - // } - - // Look up a task the different ways we can. - // Map of method name to found task. - found := map[string]*taskmodel.Task{ - "Created": tsk, - } - - // Find by ID should return the right task. - f, err := sys.TaskService.FindTaskByID(sys.Ctx, tsk.ID) - if err != nil { - t.Fatal(err) - } - found["FindTaskByID"] = f - - fs, _, err := sys.TaskService.FindTasks(sys.Ctx, taskmodel.TaskFilter{OrganizationID: &cr.OrgID}) - if err != nil { - t.Fatal(err) - } - f, err = findTask(fs, tsk.ID) - if err != nil { - t.Fatal(err) - } - found["FindTasks with Organization filter"] = f - - fs, _, err = sys.TaskService.FindTasks(sys.Ctx, taskmodel.TaskFilter{Organization: cr.Org}) - if err != nil { - t.Fatal(err) - } - f, err = findTask(fs, tsk.ID) - if err != nil { - t.Fatal(err) - } - found["FindTasks with Organization name filter"] = f - - fs, _, err = sys.TaskService.FindTasks(sys.Ctx, taskmodel.TaskFilter{User: &cr.UserID}) - if err != nil { - t.Fatal(err) - } - f, err = findTask(fs, tsk.ID) - if err != nil { - t.Fatal(err) - } - found["FindTasks with User filter"] = f - - want := &taskmodel.Task{ - ID: tsk.ID, - CreatedAt: tsk.CreatedAt, - LatestCompleted: tsk.LatestCompleted, - LatestScheduled: tsk.LatestScheduled, - OrganizationID: cr.OrgID, - Organization: cr.Org, - OwnerID: tsk.OwnerID, - Name: "task #0", - Cron: "* * * * *", - Offset: 5 * time.Second, - Status: string(taskmodel.DefaultTaskStatus), - Flux: fmt.Sprintf(scriptFmt, 0), - Type: taskmodel.TaskSystemType, - } - - for fn, f := range found { - if diff := cmp.Diff(f, want); diff != "" { - t.Logf("got: %+#v", f) - t.Errorf("expected %s task to be consistant: -got/+want: %s", fn, diff) - } - } - - // Check limits - tc2 := taskmodel.TaskCreate{ - OrganizationID: cr.OrgID, - Flux: fmt.Sprintf(scriptFmt, 1), - OwnerID: cr.UserID, - Status: string(taskmodel.TaskInactive), - } - - if _, err := sys.TaskService.CreateTask(authorizedCtx, tc2); err != nil { - t.Fatal(err) - } - if !tsk.ID.Valid() { - t.Fatal("no task ID set") - } - tasks, _, err := sys.TaskService.FindTasks(sys.Ctx, taskmodel.TaskFilter{OrganizationID: &cr.OrgID, Limit: 1}) - if err != nil { - t.Fatal(err) - } - if len(tasks) > 1 { - t.Fatalf("failed to limit tasks: expected: 1, got : %d", len(tasks)) - } - - // Check after - first := tasks[0] - tasks, _, err = sys.TaskService.FindTasks(sys.Ctx, taskmodel.TaskFilter{OrganizationID: &cr.OrgID, After: &first.ID}) - if err != nil { - t.Fatal(err) - } - // because this test runs concurrently we can only guarantee we at least 2 tasks - // when using after we can check to make sure the after is not in the list - if len(tasks) == 0 { - t.Fatalf("expected at least 1 task: got 0") - } - for _, task := range tasks { - if first.ID == task.ID { - t.Fatalf("after task included in task list") - } - } - - // Check task status filter - active := string(taskmodel.TaskActive) - fs, _, err = sys.TaskService.FindTasks(sys.Ctx, taskmodel.TaskFilter{Status: &active}) - if err != nil { - t.Fatal(err) - } - - activeTasks := findTasksByStatus(fs, string(taskmodel.TaskActive)) - if len(fs) != len(activeTasks) { - t.Fatalf("expected to find %d active tasks, found: %d", len(activeTasks), len(fs)) - } - - inactive := string(taskmodel.TaskInactive) - fs, _, err = sys.TaskService.FindTasks(sys.Ctx, taskmodel.TaskFilter{Status: &inactive}) - if err != nil { - t.Fatal(err) - } - - inactiveTasks := findTasksByStatus(fs, string(taskmodel.TaskInactive)) - if len(fs) != len(inactiveTasks) { - t.Fatalf("expected to find %d inactive tasks, found: %d", len(inactiveTasks), len(fs)) - } - - // Update task: script only. - newFlux := fmt.Sprintf(scriptFmt, 99) - origID := f.ID - f, err = sys.TaskService.UpdateTask(authorizedCtx, origID, taskmodel.TaskUpdate{Flux: &newFlux}) - if err != nil { - t.Fatal(err) - } - - if origID != f.ID { - t.Fatalf("task ID unexpectedly changed during update, from %s to %s", origID.String(), f.ID.String()) - } - - if f.Flux != newFlux { - t.Fatalf("wrong flux from update; want %q, got %q", newFlux, f.Flux) - } - if f.Status != string(taskmodel.TaskActive) { - t.Fatalf("expected task to be created active, got %q", f.Status) - } - - // Update task: status only. - newStatus := string(taskmodel.TaskInactive) - f, err = sys.TaskService.UpdateTask(authorizedCtx, origID, taskmodel.TaskUpdate{Status: &newStatus}) - if err != nil { - t.Fatal(err) - } - if f.Flux != newFlux { - t.Fatalf("flux unexpected updated: %s", f.Flux) - } - if f.Status != newStatus { - t.Fatalf("expected task status to be inactive, got %q", f.Status) - } - - // Update task: reactivate status and update script. - newStatus = string(taskmodel.TaskActive) - newFlux = fmt.Sprintf(scriptFmt, 98) - f, err = sys.TaskService.UpdateTask(authorizedCtx, origID, taskmodel.TaskUpdate{Flux: &newFlux, Status: &newStatus}) - if err != nil { - t.Fatal(err) - } - if f.Flux != newFlux { - t.Fatalf("flux unexpected updated: %s", f.Flux) - } - if f.Status != newStatus { - t.Fatalf("expected task status to be inactive, got %q", f.Status) - } - - // Update task: just update an option. - newStatus = string(taskmodel.TaskActive) - newFlux = "option task = {name: \"task-changed #98\", cron: \"* * * * *\", offset: 5s, concurrency: 100}\n\n// This comment should persist.\nfrom(bucket: \"b\")\n |> to(bucket: \"two\", orgID: \"000000000000000\")\n" - f, err = sys.TaskService.UpdateTask(authorizedCtx, origID, taskmodel.TaskUpdate{Options: options.Options{Name: "task-changed #98"}}) - if err != nil { - t.Fatal(err) - } - if f.Flux != newFlux { - diff := cmp.Diff(f.Flux, newFlux) - t.Fatalf("flux unexpected updated: %s", diff) - } - if f.Status != newStatus { - t.Fatalf("expected task status to be active, got %q", f.Status) - } - - // Update task: switch to every. - newStatus = string(taskmodel.TaskActive) - newFlux = "option task = {name: \"task-changed #98\", every: 30s, offset: 5s, concurrency: 100}\n\n// This comment should persist.\nfrom(bucket: \"b\")\n |> to(bucket: \"two\", orgID: \"000000000000000\")\n" - f, err = sys.TaskService.UpdateTask(authorizedCtx, origID, taskmodel.TaskUpdate{Options: options.Options{Every: *(options.MustParseDuration("30s"))}}) - if err != nil { - t.Fatal(err) - } - if f.Flux != newFlux { - diff := cmp.Diff(f.Flux, newFlux) - t.Fatalf("flux unexpected updated: %s", diff) - } - if f.Status != newStatus { - t.Fatalf("expected task status to be active, got %q", f.Status) - } - - // Update task: just cron. - newStatus = string(taskmodel.TaskActive) - newFlux = fmt.Sprintf(scriptDifferentName, 98) - f, err = sys.TaskService.UpdateTask(authorizedCtx, origID, taskmodel.TaskUpdate{Options: options.Options{Cron: "* * * * *"}}) - if err != nil { - t.Fatal(err) - } - if f.Flux != newFlux { - diff := cmp.Diff(f.Flux, newFlux) - t.Fatalf("flux unexpected updated: %s", diff) - } - if f.Status != newStatus { - t.Fatalf("expected task status to be active, got %q", f.Status) - } - - // // Update task: use a new token on the context and modify some other option. - // // Ensure the authorization doesn't change -- it did change at one time, which was bug https://github.com/influxdata/influxdb/issues/12218. - // newAuthz := &influxdb.Authorization{OrgID: cr.OrgID, UserID: cr.UserID, Permissions: influxdb.OperPermissions()} - // if err := sys.I.CreateAuthorization(sys.Ctx, newAuthz); err != nil { - // t.Fatal(err) - // } - // newAuthorizedCtx := icontext.SetAuthorizer(sys.Ctx, newAuthz) - // f, err = sys.TaskService.UpdateTask(newAuthorizedCtx, origID, influxdb.TaskUpdate{Options: options.Options{Name: "foo"}}) - // if err != nil { - // t.Fatal(err) - // } - // if f.Name != "foo" { - // t.Fatalf("expected name to update to foo, got %s", f.Name) - // } - // if f.AuthorizationID != authzID { - // t.Fatalf("expected authorization ID to remain %v, got %v", authzID, f.AuthorizationID) - // } - - // // Now actually update to use the new token, from the original authorization context. - // f, err = sys.TaskService.UpdateTask(authorizedCtx, origID, influxdb.TaskUpdate{Token: newAuthz.Token}) - // if err != nil { - // t.Fatal(err) - // } - // if f.AuthorizationID != newAuthz.ID { - // t.Fatalf("expected authorization ID %v, got %v", newAuthz.ID, f.AuthorizationID) - // } - - // Delete task. - if err := sys.TaskService.DeleteTask(sys.Ctx, origID); err != nil { - t.Fatal(err) - } - - // Task should not be returned. - if _, err := sys.TaskService.FindTaskByID(sys.Ctx, origID); err != taskmodel.ErrTaskNotFound { - t.Fatalf("expected %v, got %v", taskmodel.ErrTaskNotFound, err) - } -} - -func testTaskFindTasksBasic(t *testing.T, sys *System) { - script := `option task = {name: "Task %03d", cron: "* * * * *", concurrency: 100, offset: 10s} - -from(bucket: "b") - |> to(bucket: "two", orgID: "000000000000000")` - - cr := creds(t, sys) - - tc := taskmodel.TaskCreate{ - OrganizationID: cr.OrgID, - OwnerID: cr.UserID, - Type: taskmodel.TaskSystemType, - } - - authorizedCtx := icontext.SetAuthorizer(sys.Ctx, cr.Authorizer()) - - created := make([]*taskmodel.Task, 50) - for i := 0; i < 50; i++ { - tc.Flux = fmt.Sprintf(script, i/10) - tc.Description = fmt.Sprintf("Task %d", i) - tsk, err := sys.TaskService.CreateTask(authorizedCtx, tc) - require.NoError(t, err) - require.True(t, tsk.ID.Valid(), "no task ID set") - created[i] = tsk - } - - tasks, _, err := sys.TaskService.FindTasks(sys.Ctx, taskmodel.TaskFilter{Type: &taskmodel.TaskBasicType}) - require.NoError(t, err) - require.Equal(t, 50, len(tasks)) - - // Basic results should exclude query text, but include other metdata like description. - for i, tsk := range tasks { - require.Empty(t, tsk.Flux) - require.Equal(t, fmt.Sprintf("Task %d", i), tsk.Description) - } -} - -func testTaskFindTasksPaging(t *testing.T, sys *System) { - script := `option task = {name: "Task %03d", cron: "* * * * *", concurrency: 100, offset: 10s} - -from(bucket: "b") - |> to(bucket: "two", orgID: "000000000000000")` - - cr := creds(t, sys) - - tc := taskmodel.TaskCreate{ - OrganizationID: cr.OrgID, - OwnerID: cr.UserID, - Type: taskmodel.TaskSystemType, - } - - authorizedCtx := icontext.SetAuthorizer(sys.Ctx, cr.Authorizer()) - - created := make([]*taskmodel.Task, 50) - for i := 0; i < 50; i++ { - tc.Flux = fmt.Sprintf(script, i/10) - tsk, err := sys.TaskService.CreateTask(authorizedCtx, tc) - if err != nil { - t.Fatal(err) - } - if !tsk.ID.Valid() { - t.Fatal("no task ID set") - } - - created[i] = tsk - } - - tasks, _, err := sys.TaskService.FindTasks(sys.Ctx, taskmodel.TaskFilter{Limit: 5}) - if err != nil { - t.Fatalf("FindTasks: %v", err) - } - - if got, exp := len(tasks), 5; got != exp { - t.Fatalf("unexpected len(taksks), -got/+exp: %v", cmp.Diff(got, exp)) - } - - // find tasks using name which are after first 10 - name := "Task 004" - tasks, _, err = sys.TaskService.FindTasks(sys.Ctx, taskmodel.TaskFilter{Limit: 5, Name: &name}) - if err != nil { - t.Fatalf("FindTasks: %v", err) - } - - if got, exp := len(tasks), 5; got != exp { - t.Fatalf("unexpected len(taksks), -got/+exp: %v", cmp.Diff(got, exp)) - } -} - -func testTaskFindTasksAfterPaging(t *testing.T, sys *System) { - var ( - script = `option task = {name: "some-unique-task-name", cron: "* * * * *", concurrency: 100, offset: 10s} - -from(bucket: "b") - |> to(bucket: "two", orgID: "000000000000000")` - cr = creds(t, sys) - tc = taskmodel.TaskCreate{ - OrganizationID: cr.OrgID, - OwnerID: cr.UserID, - Type: taskmodel.TaskSystemType, - Flux: script, - } - authorizedCtx = icontext.SetAuthorizer(sys.Ctx, cr.Authorizer()) - created = make([]*taskmodel.Task, 10) - taskName = "some-unique-task-name" - ) - - for i := 0; i < len(created); i++ { - tsk, err := sys.TaskService.CreateTask(authorizedCtx, tc) - if err != nil { - t.Fatal(err) - } - if !tsk.ID.Valid() { - t.Fatal("no task ID set") - } - - created[i] = tsk - } - - var ( - expected = [][]platform.ID{ - {created[0].ID, created[1].ID}, - {created[2].ID, created[3].ID}, - {created[4].ID, created[5].ID}, - {created[6].ID, created[7].ID}, - {created[8].ID, created[9].ID}, - // last page should be empty - nil, - } - found = make([][]platform.ID, 0, 6) - after *platform.ID - ) - - // one more than expected pages - for i := 0; i < 6; i++ { - tasks, _, err := sys.TaskService.FindTasks(sys.Ctx, taskmodel.TaskFilter{ - Limit: 2, - After: after, - Name: &taskName, - }) - if err != nil { - t.Fatalf("FindTasks: %v", err) - } - - var page []platform.ID - for _, task := range tasks { - page = append(page, task.ID) - } - - found = append(found, page) - - if len(tasks) == 0 { - break - } - - after = &tasks[len(tasks)-1].ID - } - - if !reflect.DeepEqual(expected, found) { - t.Errorf("expected %#v, found %#v", expected, found) - } -} - -// Create a new task with a Cron and Offset option -// Update the task to remove the Offset option, and change Cron to Every -// Retrieve the task again to ensure the options are now Every, without Cron or Offset -func testTaskOptionsUpdateFull(t *testing.T, sys *System) { - - script := `option task = {name: "task-Options-Update", cron: "* * * * *", concurrency: 100, offset: 10s} - -from(bucket: "b") - |> to(bucket: "two", orgID: "000000000000000") -` - - cr := creds(t, sys) - - ct := taskmodel.TaskCreate{ - OrganizationID: cr.OrgID, - Flux: script, - OwnerID: cr.UserID, - } - authorizedCtx := icontext.SetAuthorizer(sys.Ctx, cr.Authorizer()) - task, err := sys.TaskService.CreateTask(authorizedCtx, ct) - if err != nil { - t.Fatal(err) - } - t.Run("update task and delete offset", func(t *testing.T) { - expectedFlux := `option task = {name: "task-Options-Update", every: 10s, concurrency: 100} - -from(bucket: "b") - |> to(bucket: "two", orgID: "000000000000000") -` - f, err := sys.TaskService.UpdateTask(authorizedCtx, task.ID, taskmodel.TaskUpdate{Options: options.Options{Offset: &options.Duration{}, Every: *(options.MustParseDuration("10s"))}}) - if err != nil { - t.Fatal(err) - } - savedTask, err := sys.TaskService.FindTaskByID(sys.Ctx, f.ID) - if err != nil { - t.Fatal(err) - } - if savedTask.Flux != expectedFlux { - diff := cmp.Diff(savedTask.Flux, expectedFlux) - t.Fatalf("flux unexpected updated: %s", diff) - } - }) - t.Run("update task with different offset option", func(t *testing.T) { - expectedFlux := `option task = {name: "task-Options-Update", every: 10s, concurrency: 100, offset: 10s} - -from(bucket: "b") - |> to(bucket: "two", orgID: "000000000000000") -` - f, err := sys.TaskService.UpdateTask(authorizedCtx, task.ID, taskmodel.TaskUpdate{Options: options.Options{Offset: options.MustParseDuration("10s")}}) - if err != nil { - t.Fatal(err) - } - savedTask, err := sys.TaskService.FindTaskByID(sys.Ctx, f.ID) - if err != nil { - t.Fatal(err) - } - if savedTask.Flux != expectedFlux { - diff := cmp.Diff(savedTask.Flux, expectedFlux) - t.Fatalf("flux unexpected updated: %s", diff) - } - - withoutOffset := `option task = {name: "task-Options-Update", every: 10s, concurrency: 100} - -from(bucket: "b") - |> to(bucket: "two", orgID: "000000000000000") -` - fNoOffset, err := sys.TaskService.UpdateTask(authorizedCtx, task.ID, taskmodel.TaskUpdate{Flux: &withoutOffset}) - if err != nil { - t.Fatal(err) - } - var zero time.Duration - if fNoOffset.Offset != zero { - t.Fatal("removing offset failed") - } - }) - -} - -func testUpdate(t *testing.T, sys *System) { - cr := creds(t, sys) - - now := time.Now() - earliestCA := now.Add(-time.Second) - - ct := taskmodel.TaskCreate{ - OrganizationID: cr.OrgID, - Flux: fmt.Sprintf(scriptFmt, 0), - OwnerID: cr.UserID, - } - authorizedCtx := icontext.SetAuthorizer(sys.Ctx, cr.Authorizer()) - task, err := sys.TaskService.CreateTask(authorizedCtx, ct) - if err != nil { - t.Fatal(err) - } - - if task.LatestScheduled.IsZero() { - t.Fatal("expected a non-zero LatestScheduled on created task") - } - - st, err := sys.TaskService.FindTaskByID(sys.Ctx, task.ID) - if err != nil { - t.Fatal(err) - } - - after := time.Now() - latestCA := after.Add(time.Second) - - ca := st.CreatedAt - - if earliestCA.After(ca) || latestCA.Before(ca) { - t.Fatalf("createdAt not accurate, expected %s to be between %s and %s", ca, earliestCA, latestCA) - } - - ti := st.LatestCompleted - - if now.Sub(ti) > 10*time.Second { - t.Fatalf("latest completed not accurate, expected: ~%s, got %s", now, ti) - } - - requestedAt := time.Now().Add(5 * time.Minute).UTC() - - rc, err := sys.TaskControlService.CreateRun(sys.Ctx, task.ID, requestedAt, requestedAt.Add(time.Second)) - if err != nil { - t.Fatal(err) - } - - if err := sys.TaskControlService.UpdateRunState(sys.Ctx, task.ID, rc.ID, time.Now(), taskmodel.RunStarted); err != nil { - t.Fatal(err) - } - - if err := sys.TaskControlService.UpdateRunState(sys.Ctx, task.ID, rc.ID, time.Now(), taskmodel.RunSuccess); err != nil { - t.Fatal(err) - } - - if _, err := sys.TaskControlService.FinishRun(sys.Ctx, task.ID, rc.ID); err != nil { - t.Fatal(err) - } - - st2, err := sys.TaskService.FindTaskByID(sys.Ctx, task.ID) - if err != nil { - t.Fatal(err) - } - - if st2.LatestCompleted.Before(st.LatestCompleted) { - t.Fatalf("executed task has not updated latest complete: expected %s > %s", st2.LatestCompleted, st.LatestCompleted) - } - - if st2.LastRunStatus != "success" { - t.Fatal("executed task has not updated last run status") - } - - if st2.LastRunError != "" { - t.Fatal("executed task has updated last run error on success") - } - - rc2, err := sys.TaskControlService.CreateRun(sys.Ctx, task.ID, requestedAt, requestedAt) - if err != nil { - t.Fatal(err) - } - - if err := sys.TaskControlService.UpdateRunState(sys.Ctx, task.ID, rc2.ID, time.Now(), taskmodel.RunStarted); err != nil { - t.Fatal(err) - } - - if err := sys.TaskControlService.AddRunLog(sys.Ctx, task.ID, rc2.ID, time.Now(), "error message"); err != nil { - t.Fatal(err) - } - - if err := sys.TaskControlService.UpdateRunState(sys.Ctx, task.ID, rc2.ID, time.Now(), taskmodel.RunFail); err != nil { - t.Fatal(err) - } - - if err := sys.TaskControlService.AddRunLog(sys.Ctx, task.ID, rc2.ID, time.Now(), "last message"); err != nil { - t.Fatal(err) - } - - if _, err := sys.TaskControlService.FinishRun(sys.Ctx, task.ID, rc2.ID); err != nil { - t.Fatal(err) - } - - st3, err := sys.TaskService.FindTaskByID(sys.Ctx, task.ID) - if err != nil { - t.Fatal(err) - } - - if st3.LatestCompleted.Before(st2.LatestCompleted) { - t.Fatalf("executed task has not updated latest complete: expected %s > %s", st3.LatestCompleted, st2.LatestCompleted) - } - - if st3.LastRunStatus != "failed" { - t.Fatal("executed task has not updated last run status") - } - - if st3.LastRunError != "error message" { - t.Fatal("executed task has not updated last run error on failed") - } - - now = time.Now() - flux := fmt.Sprintf(scriptFmt, 1) - task, err = sys.TaskService.UpdateTask(authorizedCtx, task.ID, taskmodel.TaskUpdate{Flux: &flux}) - if err != nil { - t.Fatal(err) - } - after = time.Now() - - earliestUA := now.Add(-time.Second) - latestUA := after.Add(time.Second) - - ua := task.UpdatedAt - - if earliestUA.After(ua) || latestUA.Before(ua) { - t.Fatalf("updatedAt not accurate, expected %s to be between %s and %s", ua, earliestUA, latestUA) - } - - st, err = sys.TaskService.FindTaskByID(sys.Ctx, task.ID) - if err != nil { - t.Fatal(err) - } - - ua = st.UpdatedAt - - if earliestUA.After(ua) || latestUA.Before(ua) { - t.Fatalf("updatedAt not accurate after pulling new task, expected %s to be between %s and %s", ua, earliestUA, latestUA) - } - - ls := time.Now().Round(time.Second) // round to remove monotonic clock - task, err = sys.TaskService.UpdateTask(authorizedCtx, task.ID, taskmodel.TaskUpdate{LatestScheduled: &ls}) - if err != nil { - t.Fatal(err) - } - - st, err = sys.TaskService.FindTaskByID(sys.Ctx, task.ID) - if err != nil { - t.Fatal(err) - } - if !st.LatestScheduled.Equal(ls) { - t.Fatalf("expected latest scheduled to update, expected: %v, got: %v", ls, st.LatestScheduled) - } - -} - -func testTaskRuns(t *testing.T, sys *System) { - cr := creds(t, sys) - - t.Run("FindRuns and FindRunByID", func(t *testing.T) { - t.Parallel() - - // Script is set to run every minute. The platform adapter is currently hardcoded to schedule after "now", - // which makes timing of runs somewhat difficult. - ct := taskmodel.TaskCreate{ - OrganizationID: cr.OrgID, - Flux: fmt.Sprintf(scriptFmt, 0), - OwnerID: cr.UserID, - } - task, err := sys.TaskService.CreateTask(icontext.SetAuthorizer(sys.Ctx, cr.Authorizer()), ct) - if err != nil { - t.Fatal(err) - } - - // check run filter errors - _, _, err0 := sys.TaskService.FindRuns(sys.Ctx, taskmodel.RunFilter{Task: task.ID, Limit: -1}) - if err0 != taskmodel.ErrOutOfBoundsLimit { - t.Fatalf("failed to error with out of bounds run limit: %d", -1) - } - - _, _, err1 := sys.TaskService.FindRuns(sys.Ctx, taskmodel.RunFilter{Task: task.ID, Limit: taskmodel.TaskMaxPageSize + 1}) - if err1 != taskmodel.ErrOutOfBoundsLimit { - t.Fatalf("failed to error with out of bounds run limit: %d", taskmodel.TaskMaxPageSize+1) - } - - requestedAt := time.Now().Add(time.Hour * -1).UTC() // This should guarantee we can make two runs. - - rc0, err := sys.TaskControlService.CreateRun(sys.Ctx, task.ID, requestedAt, requestedAt.Add(time.Second)) - if err != nil { - t.Fatal(err) - } - if rc0.TaskID != task.ID { - t.Fatalf("wrong task ID on created task: got %s, want %s", rc0.TaskID, task.ID) - } - - startedAt := time.Now().UTC() - - // Update the run state to Started; normally the scheduler would do this. - if err := sys.TaskControlService.UpdateRunState(sys.Ctx, task.ID, rc0.ID, startedAt, taskmodel.RunStarted); err != nil { - t.Fatal(err) - } - - rc1, err := sys.TaskControlService.CreateRun(sys.Ctx, task.ID, requestedAt, requestedAt.Add(time.Second)) - if err != nil { - t.Fatal(err) - } - if rc1.TaskID != task.ID { - t.Fatalf("wrong task ID on created task run: got %s, want %s", rc1.TaskID, task.ID) - } - - // Update the run state to Started; normally the scheduler would do this. - if err := sys.TaskControlService.UpdateRunState(sys.Ctx, task.ID, rc1.ID, startedAt, taskmodel.RunStarted); err != nil { - t.Fatal(err) - } - - runs, _, err := sys.TaskService.FindRuns(sys.Ctx, taskmodel.RunFilter{Task: task.ID, Limit: 1}) - if err != nil { - t.Fatal(err) - } - - if len(runs) != 1 { - t.Fatalf("expected 1 run, got %#v", runs) - } - - // Mark the second run finished. - if err := sys.TaskControlService.UpdateRunState(sys.Ctx, task.ID, rc1.ID, startedAt.Add(time.Second), taskmodel.RunSuccess); err != nil { - t.Fatal(err) - } - - if _, err := sys.TaskControlService.FinishRun(sys.Ctx, task.ID, rc1.ID); err != nil { - t.Fatal(err) - } - - // Limit 1 should only return the earlier run. - runs, _, err = sys.TaskService.FindRuns(sys.Ctx, taskmodel.RunFilter{Task: task.ID, Limit: 1}) - if err != nil { - t.Fatal(err) - } - if len(runs) != 1 { - t.Fatalf("expected 1 run, got %v", runs) - } - if runs[0].ID != rc0.ID { - t.Fatalf("retrieved wrong run ID; want %s, got %s", rc0.ID, runs[0].ID) - } - if runs[0].StartedAt != startedAt { - t.Fatalf("unexpectedStartedAt; want %s, got %s", startedAt, runs[0].StartedAt) - } - if runs[0].Status != taskmodel.RunStarted.String() { - t.Fatalf("unexpected run status; want %s, got %s", taskmodel.RunStarted.String(), runs[0].Status) - } - - if !runs[0].FinishedAt.IsZero() { - t.Fatalf("expected empty FinishedAt, got %q", runs[0].FinishedAt) - } - - // Look for a run that doesn't exist. - _, err = sys.TaskService.FindRunByID(sys.Ctx, task.ID, platform.ID(math.MaxUint64)) - if err == nil { - t.Fatalf("expected %s but got %s instead", taskmodel.ErrRunNotFound, err) - } - - // look for a taskID that doesn't exist. - _, err = sys.TaskService.FindRunByID(sys.Ctx, platform.ID(math.MaxUint64), runs[0].ID) - if err == nil { - t.Fatalf("expected %s but got %s instead", taskmodel.ErrRunNotFound, err) - } - - foundRun0, err := sys.TaskService.FindRunByID(sys.Ctx, task.ID, runs[0].ID) - if err != nil { - t.Fatal(err) - } - - if diff := cmp.Diff(foundRun0, runs[0]); diff != "" { - t.Fatalf("difference between listed run and found run: %s", diff) - } - }) - - t.Run("FindRunsByTime", func(t *testing.T) { - - t.Parallel() - ctx := icontext.SetAuthorizer(sys.Ctx, cr.Authorizer()) - ctx, err := feature.Annotate(ctx, influxdbmock.NewFlagger(map[feature.Flag]interface{}{ - feature.TimeFilterFlags(): true, - })) - require.NoError(t, err) - - // Script is set to run every minute. The platform adapter is currently hardcoded to schedule after "now", - // which makes timing of runs somewhat difficult. - ct := taskmodel.TaskCreate{ - OrganizationID: cr.OrgID, - Flux: fmt.Sprintf(scriptFmt, 0), - OwnerID: cr.UserID, - } - task, err := sys.TaskService.CreateTask(ctx, ct) - if err != nil { - t.Fatal(err) - } - - // set to one hour before now because of bucket retention policy - scheduledFor := time.Now().Add(time.Hour * -1).UTC() - runs := make([]*taskmodel.Run, 0, 5) - // create runs to put into Context - for i := 5; i > 0; i-- { - run, err := sys.TaskControlService.CreateRun(ctx, task.ID, scheduledFor.Add(time.Second*time.Duration(i)), scheduledFor.Add(time.Second*time.Duration(i))) - if err != nil { - t.Fatal(err) - } - err = sys.TaskControlService.UpdateRunState(ctx, task.ID, run.ID, scheduledFor.Add(time.Second*time.Duration(i+1)), taskmodel.RunStarted) - if err != nil { - t.Fatal(err) - } - err = sys.TaskControlService.UpdateRunState(ctx, task.ID, run.ID, scheduledFor.Add(time.Second*time.Duration(i+2)), taskmodel.RunSuccess) - if err != nil { - t.Fatal(err) - } - // setting run in memory to match the fields in Context - run.StartedAt = scheduledFor.Add(time.Second * time.Duration(i+1)) - run.FinishedAt = scheduledFor.Add(time.Second * time.Duration(i+2)) - run.RunAt = scheduledFor.Add(time.Second * time.Duration(i)) - run.Status = taskmodel.RunSuccess.String() - run.Log = nil - - if sys.CallFinishRun { - run, err = sys.TaskControlService.FinishRun(ctx, task.ID, run.ID) - if err != nil { - t.Fatal(err) - } - // Analytical storage does not store run at - run.RunAt = time.Time{} - } - - runs = append(runs, run) - } - - found, _, err := sys.TaskService.FindRuns(ctx, - taskmodel.RunFilter{ - Task: task.ID, - Limit: 2, - AfterTime: scheduledFor.Add(time.Second * time.Duration(1)).Format(time.RFC3339), - BeforeTime: scheduledFor.Add(time.Second * time.Duration(4)).Format(time.RFC3339), - }) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, runs[2:4], found) - - }) - - t.Run("ForceRun", func(t *testing.T) { - t.Parallel() - - ct := taskmodel.TaskCreate{ - OrganizationID: cr.OrgID, - Flux: fmt.Sprintf(scriptFmt, 0), - OwnerID: cr.UserID, - } - task, err := sys.TaskService.CreateTask(icontext.SetAuthorizer(sys.Ctx, cr.Authorizer()), ct) - if err != nil { - t.Fatal(err) - } - - const scheduledFor = 77 - r, err := sys.TaskService.ForceRun(sys.Ctx, task.ID, scheduledFor) - if err != nil { - t.Fatal(err) - } - exp, _ := time.Parse(time.RFC3339, "1970-01-01T00:01:17Z") - if r.ScheduledFor != exp { - t.Fatalf("expected: 1970-01-01T00:01:17Z, got %s", r.ScheduledFor) - } - - // Forcing the same run before it's executed should be rejected. - if _, err = sys.TaskService.ForceRun(sys.Ctx, task.ID, scheduledFor); err == nil { - t.Fatalf("subsequent force should have been rejected; failed to error: %s", task.ID) - } - }) - - t.Run("FindLogs", func(t *testing.T) { - t.Parallel() - - ct := taskmodel.TaskCreate{ - OrganizationID: cr.OrgID, - Flux: fmt.Sprintf(scriptFmt, 0), - OwnerID: cr.UserID, - } - task, err := sys.TaskService.CreateTask(icontext.SetAuthorizer(sys.Ctx, cr.Authorizer()), ct) - if err != nil { - t.Fatal(err) - } - - requestedAt := time.Now().Add(time.Hour * -1).UTC() // This should guarantee we can make a run. - - // Create two runs. - rc1, err := sys.TaskControlService.CreateRun(sys.Ctx, task.ID, requestedAt, requestedAt.Add(time.Second)) - if err != nil { - t.Fatal(err) - } - if err := sys.TaskControlService.UpdateRunState(sys.Ctx, task.ID, rc1.ID, time.Now(), taskmodel.RunStarted); err != nil { - t.Fatal(err) - } - - rc2, err := sys.TaskControlService.CreateRun(sys.Ctx, task.ID, requestedAt, requestedAt.Add(time.Second)) - if err != nil { - t.Fatal(err) - } - if err := sys.TaskControlService.UpdateRunState(sys.Ctx, task.ID, rc2.ID, time.Now(), taskmodel.RunStarted); err != nil { - t.Fatal(err) - } - // Add a log for the first run. - log1Time := time.Now().UTC() - if err := sys.TaskControlService.AddRunLog(sys.Ctx, task.ID, rc1.ID, log1Time, "entry 1"); err != nil { - t.Fatal(err) - } - - // Ensure it is returned when filtering logs by run ID. - logs, _, err := sys.TaskService.FindLogs(sys.Ctx, taskmodel.LogFilter{ - Task: task.ID, - Run: &rc1.ID, - }) - if err != nil { - t.Fatal(err) - } - - expLine1 := &taskmodel.Log{RunID: rc1.ID, Time: log1Time.Format(time.RFC3339Nano), Message: "entry 1"} - exp := []*taskmodel.Log{expLine1} - if diff := cmp.Diff(logs, exp); diff != "" { - t.Fatalf("unexpected log: -got/+want: %s", diff) - } - - // Add a log for the second run. - log2Time := time.Now().UTC() - if err := sys.TaskControlService.AddRunLog(sys.Ctx, task.ID, rc2.ID, log2Time, "entry 2"); err != nil { - t.Fatal(err) - } - - // Ensure both returned when filtering logs by task ID. - logs, _, err = sys.TaskService.FindLogs(sys.Ctx, taskmodel.LogFilter{ - Task: task.ID, - }) - if err != nil { - t.Fatal(err) - } - expLine2 := &taskmodel.Log{RunID: rc2.ID, Time: log2Time.Format(time.RFC3339Nano), Message: "entry 2"} - exp = []*taskmodel.Log{expLine1, expLine2} - if diff := cmp.Diff(logs, exp); diff != "" { - t.Fatalf("unexpected log: -got/+want: %s", diff) - } - }) -} - -func testTaskConcurrency(t *testing.T, sys *System) { - cr := creds(t, sys) - - const numTasks = 450 // Arbitrarily chosen to get a reasonable count of concurrent creates and deletes. - createTaskCh := make(chan taskmodel.TaskCreate, numTasks) - - // Since this test is run in parallel with other tests, - // we need to keep a whitelist of IDs that are okay to delete. - // This only matters when the creds function returns an identical user/org from another test. - var idMu sync.Mutex - taskIDs := make(map[platform.ID]struct{}) - - var createWg sync.WaitGroup - for i := 0; i < runtime.GOMAXPROCS(0); i++ { - createWg.Add(1) - go func() { - defer createWg.Done() - aCtx := icontext.SetAuthorizer(sys.Ctx, cr.Authorizer()) - for ct := range createTaskCh { - task, err := sys.TaskService.CreateTask(aCtx, ct) - if err != nil { - t.Errorf("error creating task: %v", err) - continue - } - idMu.Lock() - taskIDs[task.ID] = struct{}{} - idMu.Unlock() - } - }() - } - - // Signal for non-creator goroutines to stop. - quitCh := make(chan struct{}) - go func() { - createWg.Wait() - close(quitCh) - }() - - var extraWg sync.WaitGroup - // Get all the tasks, and delete the first one we find. - extraWg.Add(1) - go func() { - defer extraWg.Done() - - deleted := 0 - defer func() { - t.Logf("Concurrently deleted %d tasks", deleted) - }() - for { - // Check if we need to quit. - select { - case <-quitCh: - return - default: - } - - // Get all the tasks. - tasks, _, err := sys.TaskService.FindTasks(sys.Ctx, taskmodel.TaskFilter{OrganizationID: &cr.OrgID}) - if err != nil { - t.Errorf("error finding tasks: %v", err) - return - } - if len(tasks) == 0 { - continue - } - - // Check again if we need to quit. - select { - case <-quitCh: - return - default: - } - - for _, tsk := range tasks { - // Was the retrieved task an ID we're allowed to delete? - idMu.Lock() - _, ok := taskIDs[tsk.ID] - idMu.Unlock() - if !ok { - continue - } - - // Task was in whitelist. Delete it from the TaskService. - // We could remove it from the taskIDs map, but this test is short-lived enough - // that clearing out the map isn't really worth taking the lock again. - if err := sys.TaskService.DeleteTask(sys.Ctx, tsk.ID); err != nil { - t.Errorf("error deleting task: %v", err) - return - } - deleted++ - - // Wait just a tiny bit. - time.Sleep(time.Millisecond) - break - } - } - }() - - extraWg.Add(1) - go func() { - defer extraWg.Done() - - runsCreated := 0 - defer func() { - t.Logf("Concurrently created %d runs", runsCreated) - }() - for { - // Check if we need to quit. - select { - case <-quitCh: - return - default: - } - - // Get all the tasks. - tasks, _, err := sys.TaskService.FindTasks(sys.Ctx, taskmodel.TaskFilter{OrganizationID: &cr.OrgID}) - if err != nil { - t.Errorf("error finding tasks: %v", err) - return - } - if len(tasks) == 0 { - continue - } - - // Check again if we need to quit. - select { - case <-quitCh: - return - default: - } - - // Create a run for the last task we found. - // The script should run every minute, so use max now. - var tid platform.ID - idMu.Lock() - for i := len(tasks) - 1; i >= 0; i-- { - _, ok := taskIDs[tasks[i].ID] - if ok { - tid = tasks[i].ID - break - } - } - idMu.Unlock() - if !tid.Valid() { - continue - } - if _, err := sys.TaskControlService.CreateRun(sys.Ctx, tid, time.Unix(253339232461, 0), time.Unix(253339232469, 1)); err != nil { - // This may have errored due to the task being deleted. Check if the task still exists. - - if _, err2 := sys.TaskService.FindTaskByID(sys.Ctx, tid); err2 == taskmodel.ErrTaskNotFound { - // It was deleted. Just continue. - continue - } - // Otherwise, we were able to find the task, so something went wrong here. - t.Errorf("error creating next run: %v", err) - return - } - runsCreated++ - - // Wait just a tiny bit. - time.Sleep(time.Millisecond) - } - }() - - // Start adding tasks. - for i := 0; i < numTasks; i++ { - createTaskCh <- taskmodel.TaskCreate{ - OrganizationID: cr.OrgID, - Flux: fmt.Sprintf(scriptFmt, i), - OwnerID: cr.UserID, - } - } - - // Done adding. Wait for cleanup. - close(createTaskCh) - createWg.Wait() - extraWg.Wait() -} - -func testManualRun(t *testing.T, s *System) { - cr := creds(t, s) - - // Create a task. - tc := taskmodel.TaskCreate{ - OrganizationID: cr.OrgID, - Flux: fmt.Sprintf(scriptFmt, 0), - OwnerID: cr.UserID, - } - - authorizedCtx := icontext.SetAuthorizer(s.Ctx, cr.Authorizer()) - - tsk, err := s.TaskService.CreateTask(authorizedCtx, tc) - if err != nil { - t.Fatal(err) - } - if !tsk.ID.Valid() { - t.Fatal("no task ID set") - } - - scheduledFor := int64(77) - run, err := s.TaskService.ForceRun(authorizedCtx, tsk.ID, scheduledFor) - if err != nil { - t.Fatal(err) - } - - exp, _ := time.Parse(time.RFC3339, "1970-01-01T00:01:17Z") - if run.ScheduledFor != exp { - t.Fatalf("force run returned a different scheduled for time expected: %s, got %s", exp, run.ScheduledFor) - } - - runs, err := s.TaskControlService.ManualRuns(authorizedCtx, tsk.ID) - if err != nil { - t.Fatal(err) - } - if len(runs) != 1 { - t.Fatalf("expected 1 manual run: got %d", len(runs)) - } - if runs[0].ID != run.ID { - diff := cmp.Diff(runs[0], run) - t.Fatalf("manual run missmatch: %s", diff) - } -} - -func testRunStorage(t *testing.T, sys *System) { - cr := creds(t, sys) - - // Script is set to run every minute. The platform adapter is currently hardcoded to schedule after "now", - // which makes timing of runs somewhat difficult. - ct := taskmodel.TaskCreate{ - OrganizationID: cr.OrgID, - Flux: fmt.Sprintf(scriptFmt, 0), - OwnerID: cr.UserID, - } - task, err := sys.TaskService.CreateTask(icontext.SetAuthorizer(sys.Ctx, cr.Authorizer()), ct) - if err != nil { - t.Fatal(err) - } - - // check run filter errors - _, _, err0 := sys.TaskService.FindRuns(sys.Ctx, taskmodel.RunFilter{Task: task.ID, Limit: -1}) - if err0 != taskmodel.ErrOutOfBoundsLimit { - t.Fatalf("failed to error with out of bounds run limit: %d", -1) - } - - _, _, err1 := sys.TaskService.FindRuns(sys.Ctx, taskmodel.RunFilter{Task: task.ID, Limit: taskmodel.TaskMaxPageSize + 1}) - if err1 != taskmodel.ErrOutOfBoundsLimit { - t.Fatalf("failed to error with out of bounds run limit: %d", taskmodel.TaskMaxPageSize+1) - } - - requestedAt := time.Now().Add(time.Hour * -1).UTC() // This should guarantee we can make two runs. - - rc0, err := sys.TaskControlService.CreateRun(sys.Ctx, task.ID, requestedAt, requestedAt.Add(time.Second)) - if err != nil { - t.Fatal(err) - } - if rc0.TaskID != task.ID { - t.Fatalf("wrong task ID on created task: got %s, want %s", rc0.TaskID, task.ID) - } - - startedAt := time.Now().UTC().Add(time.Second * -10) - - // Update the run state to Started; normally the scheduler would do this. - if err := sys.TaskControlService.UpdateRunState(sys.Ctx, task.ID, rc0.ID, startedAt, taskmodel.RunStarted); err != nil { - t.Fatal(err) - } - - rc1, err := sys.TaskControlService.CreateRun(sys.Ctx, task.ID, requestedAt, requestedAt.Add(time.Second)) - if err != nil { - t.Fatal(err) - } - if rc1.TaskID != task.ID { - t.Fatalf("wrong task ID on created task run: got %s, want %s", rc1.TaskID, task.ID) - } - - // Update the run state to Started; normally the scheduler would do this. - if err := sys.TaskControlService.UpdateRunState(sys.Ctx, task.ID, rc1.ID, startedAt.Add(time.Second), taskmodel.RunStarted); err != nil { - t.Fatal(err) - } - - // Mark the second run finished. - if err := sys.TaskControlService.UpdateRunState(sys.Ctx, task.ID, rc1.ID, startedAt.Add(time.Second*2), taskmodel.RunFail); err != nil { - t.Fatal(err) - } - - if _, err := sys.TaskControlService.FinishRun(sys.Ctx, task.ID, rc1.ID); err != nil { - t.Fatal(err) - } - - // Limit 1 should only return the earlier run. - runs, _, err := sys.TaskService.FindRuns(sys.Ctx, taskmodel.RunFilter{Task: task.ID, Limit: 1}) - if err != nil { - t.Fatal(err) - } - if len(runs) != 1 { - t.Fatalf("expected 1 run, got %v", runs) - } - if runs[0].ID != rc0.ID { - t.Fatalf("retrieved wrong run ID; want %s, got %s", rc0.ID, runs[0].ID) - } - if runs[0].StartedAt != startedAt { - t.Fatalf("unexpectedStartedAt; want %s, got %s", startedAt, runs[0].StartedAt) - } - if runs[0].Status != taskmodel.RunStarted.String() { - t.Fatalf("unexpected run status; want %s, got %s", taskmodel.RunStarted.String(), runs[0].Status) - } - - if !runs[0].FinishedAt.IsZero() { - t.Fatalf("expected empty FinishedAt, got %q", runs[0].FinishedAt) - } - - // Create 3rd run and test limiting to 2 runs - rc2, err := sys.TaskControlService.CreateRun(sys.Ctx, task.ID, requestedAt, requestedAt.Add(time.Second)) - if err != nil { - t.Fatal(err) - } - if err := sys.TaskControlService.UpdateRunState(sys.Ctx, task.ID, rc2.ID, startedAt.Add(time.Second*3), taskmodel.RunStarted); err != nil { - t.Fatal(err) - } - - if err := sys.TaskControlService.UpdateRunState(sys.Ctx, task.ID, rc2.ID, startedAt.Add(time.Second*4), taskmodel.RunSuccess); err != nil { - t.Fatal(err) - } - if _, err := sys.TaskControlService.FinishRun(sys.Ctx, task.ID, rc2.ID); err != nil { - t.Fatal(err) - } - - runsLimit2, _, err := sys.TaskService.FindRuns(sys.Ctx, taskmodel.RunFilter{Task: task.ID, Limit: 2}) - if err != nil { - t.Fatal(err) - } - if len(runsLimit2) != 2 { - t.Fatalf("expected 2 runs, got %v", runsLimit2) - } - if runsLimit2[0].ID != rc0.ID { - t.Fatalf("retrieved wrong run ID; want %s, got %s", rc0.ID, runs[0].ID) - } - - // Unspecified limit returns all three runs, sorted by most recently scheduled first. - runs, _, err = sys.TaskService.FindRuns(sys.Ctx, taskmodel.RunFilter{Task: task.ID}) - - if err != nil { - t.Fatal(err) - } - if len(runs) != 3 { - t.Fatalf("expected 3 runs, got %v", runs) - } - if runs[0].ID != rc0.ID { - t.Fatalf("retrieved wrong run ID; want %s, got %s", rc0.ID, runs[0].ID) - } - if runs[0].StartedAt != startedAt { - t.Fatalf("unexpectedStartedAt; want %s, got %s", startedAt, runs[0].StartedAt) - } - if runs[0].Status != taskmodel.RunStarted.String() { - t.Fatalf("unexpected run status; want %s, got %s", taskmodel.RunStarted.String(), runs[0].Status) - } - // TODO (al): handle empty finishedAt - // if runs[0].FinishedAt != "" { - // t.Fatalf("expected empty FinishedAt, got %q", runs[0].FinishedAt) - // } - - if runs[2].ID != rc1.ID { - t.Fatalf("retrieved wrong run ID; want %s, got %s", rc1.ID, runs[2].ID) - } - - if exp := startedAt.Add(time.Second); runs[2].StartedAt != exp { - t.Fatalf("unexpected StartedAt; want %s, got %s", exp, runs[2].StartedAt) - } - if runs[2].Status != taskmodel.RunFail.String() { - t.Fatalf("unexpected run status; want %s, got %s", taskmodel.RunSuccess.String(), runs[2].Status) - } - if exp := startedAt.Add(time.Second * 2); runs[2].FinishedAt != exp { - t.Fatalf("unexpected FinishedAt; want %s, got %s", exp, runs[2].FinishedAt) - } - - // Look for a run that doesn't exist. - _, err = sys.TaskService.FindRunByID(sys.Ctx, task.ID, platform.ID(math.MaxUint64)) - if err == nil { - t.Fatalf("expected %s but got %s instead", taskmodel.ErrRunNotFound, err) - } - - // look for a taskID that doesn't exist. - _, err = sys.TaskService.FindRunByID(sys.Ctx, platform.ID(math.MaxUint64), runs[0].ID) - if err == nil { - t.Fatalf("expected %s but got %s instead", taskmodel.ErrRunNotFound, err) - } - - foundRun0, err := sys.TaskService.FindRunByID(sys.Ctx, task.ID, runs[0].ID) - if err != nil { - t.Fatal(err) - } - - if diff := cmp.Diff(foundRun0, runs[0]); diff != "" { - t.Fatalf("difference between listed run and found run: %s", diff) - } - - foundRun1, err := sys.TaskService.FindRunByID(sys.Ctx, task.ID, runs[1].ID) - if err != nil { - t.Fatal(err) - } - if diff := cmp.Diff(foundRun1, runs[1]); diff != "" { - t.Fatalf("difference between listed run and found run: %s", diff) - } -} - -func testRetryAcrossStorage(t *testing.T, sys *System) { - cr := creds(t, sys) - - // Script is set to run every minute. - ct := taskmodel.TaskCreate{ - OrganizationID: cr.OrgID, - Flux: fmt.Sprintf(scriptFmt, 0), - OwnerID: cr.UserID, - } - task, err := sys.TaskService.CreateTask(icontext.SetAuthorizer(sys.Ctx, cr.Authorizer()), ct) - if err != nil { - t.Fatal(err) - } - // Non-existent ID should return the right error. - _, err = sys.TaskService.RetryRun(sys.Ctx, task.ID, platform.ID(math.MaxUint64)) - if !strings.Contains(err.Error(), "run not found") { - t.Errorf("expected retrying run that doesn't exist to return %v, got %v", taskmodel.ErrRunNotFound, err) - } - - requestedAt := time.Now().Add(time.Hour * -1).UTC() // This should guarantee we can make a run. - - rc, err := sys.TaskControlService.CreateRun(sys.Ctx, task.ID, requestedAt, requestedAt.Add(time.Second)) - if err != nil { - t.Fatal(err) - } - if rc.TaskID != task.ID { - t.Fatalf("wrong task ID on created task: got %s, want %s", rc.TaskID, task.ID) - } - - startedAt := time.Now().UTC() - - // Update the run state to Started then Failed; normally the scheduler would do this. - if err := sys.TaskControlService.UpdateRunState(sys.Ctx, task.ID, rc.ID, startedAt, taskmodel.RunStarted); err != nil { - t.Fatal(err) - } - if err := sys.TaskControlService.UpdateRunState(sys.Ctx, task.ID, rc.ID, startedAt.Add(time.Second), taskmodel.RunFail); err != nil { - t.Fatal(err) - } - if _, err := sys.TaskControlService.FinishRun(sys.Ctx, task.ID, rc.ID); err != nil { - t.Fatal(err) - } - - // Now retry the run. - m, err := sys.TaskService.RetryRun(sys.Ctx, task.ID, rc.ID) - if err != nil { - t.Fatal(err) - } - if m.TaskID != task.ID { - t.Fatalf("wrong task ID on retried run: got %s, want %s", m.TaskID, task.ID) - } - if m.Status != "scheduled" { - t.Fatal("expected new retried run to have status of scheduled") - } - - if m.ScheduledFor != rc.ScheduledFor { - t.Fatalf("wrong scheduledFor on task: got %s, want %s", m.ScheduledFor, rc.ScheduledFor) - } - - exp := taskmodel.RequestStillQueuedError{Start: rc.ScheduledFor.Unix(), End: rc.ScheduledFor.Unix()} - - // Retrying a run which has been queued but not started, should be rejected. - if _, err = sys.TaskService.RetryRun(sys.Ctx, task.ID, rc.ID); err != exp && err.Error() != "run already queued" { - t.Fatalf("subsequent retry should have been rejected with %v; got %v", exp, err) - } -} - -func testLogsAcrossStorage(t *testing.T, sys *System) { - cr := creds(t, sys) - - // Script is set to run every minute. The platform adapter is currently hardcoded to schedule after "now", - // which makes timing of runs somewhat difficult. - ct := taskmodel.TaskCreate{ - OrganizationID: cr.OrgID, - Flux: fmt.Sprintf(scriptFmt, 0), - OwnerID: cr.UserID, - } - task, err := sys.TaskService.CreateTask(icontext.SetAuthorizer(sys.Ctx, cr.Authorizer()), ct) - if err != nil { - t.Fatal(err) - } - - requestedAt := time.Now().Add(time.Hour * -1).UTC() // This should guarantee we can make two runs. - - rc0, err := sys.TaskControlService.CreateRun(sys.Ctx, task.ID, requestedAt, requestedAt.Add(time.Second)) - if err != nil { - t.Fatal(err) - } - if rc0.TaskID != task.ID { - t.Fatalf("wrong task ID on created task: got %s, want %s", rc0.TaskID, task.ID) - } - - startedAt := time.Now().UTC() - - // Update the run state to Started; normally the scheduler would do this. - if err := sys.TaskControlService.UpdateRunState(sys.Ctx, task.ID, rc0.ID, startedAt, taskmodel.RunStarted); err != nil { - t.Fatal(err) - } - - rc1, err := sys.TaskControlService.CreateRun(sys.Ctx, task.ID, requestedAt, requestedAt.Add(time.Second)) - if err != nil { - t.Fatal(err) - } - if rc1.TaskID != task.ID { - t.Fatalf("wrong task ID on created task run: got %s, want %s", rc1.TaskID, task.ID) - } - - // Update the run state to Started; normally the scheduler would do this. - if err := sys.TaskControlService.UpdateRunState(sys.Ctx, task.ID, rc1.ID, startedAt, taskmodel.RunStarted); err != nil { - t.Fatal(err) - } - - // Mark the second run finished. - if err := sys.TaskControlService.UpdateRunState(sys.Ctx, task.ID, rc1.ID, startedAt.Add(time.Second), taskmodel.RunSuccess); err != nil { - t.Fatal(err) - } - - // Create several run logs in both rc0 and rc1 - // We can then finalize rc1 and ensure that both the transactional (currently running logs) can be found with analytical (completed) logs. - sys.TaskControlService.AddRunLog(sys.Ctx, task.ID, rc0.ID, time.Now(), "0-0") - sys.TaskControlService.AddRunLog(sys.Ctx, task.ID, rc0.ID, time.Now(), "0-1") - sys.TaskControlService.AddRunLog(sys.Ctx, task.ID, rc0.ID, time.Now(), "0-2") - sys.TaskControlService.AddRunLog(sys.Ctx, task.ID, rc1.ID, time.Now(), "1-0") - sys.TaskControlService.AddRunLog(sys.Ctx, task.ID, rc1.ID, time.Now(), "1-1") - sys.TaskControlService.AddRunLog(sys.Ctx, task.ID, rc1.ID, time.Now(), "1-2") - sys.TaskControlService.AddRunLog(sys.Ctx, task.ID, rc1.ID, time.Now(), "1-3") - if _, err := sys.TaskControlService.FinishRun(sys.Ctx, task.ID, rc1.ID); err != nil { - t.Fatal(err) - } - - logs, _, err := sys.TaskService.FindLogs(sys.Ctx, taskmodel.LogFilter{Task: task.ID}) - if err != nil { - t.Fatal(err) - } - if len(logs) != 7 { - for _, log := range logs { - t.Logf("log: %+v\n", log) - } - t.Fatalf("failed to get all logs: expected: 7 got: %d", len(logs)) - } - smash := func(logs []*taskmodel.Log) string { - smashed := "" - for _, log := range logs { - smashed = smashed + log.Message - } - return smashed - } - if smash(logs) != "0-00-10-21-01-11-21-3" { - t.Fatalf("log contents not acceptable, expected: %q, got: %q", "0-00-10-21-01-11-21-3", smash(logs)) - } - - logs, _, err = sys.TaskService.FindLogs(sys.Ctx, taskmodel.LogFilter{Task: task.ID, Run: &rc1.ID}) - if err != nil { - t.Fatal(err) - } - if len(logs) != 4 { - t.Fatalf("failed to get all logs: expected: 4 got: %d", len(logs)) - } - - if smash(logs) != "1-01-11-21-3" { - t.Fatalf("log contents not acceptable, expected: %q, got: %q", "1-01-11-21-3", smash(logs)) - } - - logs, _, err = sys.TaskService.FindLogs(sys.Ctx, taskmodel.LogFilter{Task: task.ID, Run: &rc0.ID}) - if err != nil { - t.Fatal(err) - } - if len(logs) != 3 { - t.Fatalf("failed to get all logs: expected: 3 got: %d", len(logs)) - } - - if smash(logs) != "0-00-10-2" { - t.Fatalf("log contents not acceptable, expected: %q, got: %q", "0-00-10-2", smash(logs)) - } - -} - -func creds(t *testing.T, s *System) TestCreds { - // t.Helper() - - if s.CredsFunc == nil { - u := &influxdb.User{Name: t.Name() + "-user"} - if err := s.UserService.CreateUser(s.Ctx, u); err != nil { - t.Fatal(err) - } - o := &influxdb.Organization{Name: t.Name() + "-org"} - if err := s.OrganizationService.CreateOrganization(s.Ctx, o); err != nil { - t.Fatal(err) - } - - if err := s.UserResourceMappingService.CreateUserResourceMapping(s.Ctx, &influxdb.UserResourceMapping{ - ResourceType: influxdb.OrgsResourceType, - ResourceID: o.ID, - UserID: u.ID, - UserType: influxdb.Owner, - }); err != nil { - t.Fatal(err) - } - - authz := influxdb.Authorization{ - OrgID: o.ID, - UserID: u.ID, - Permissions: influxdb.OperPermissions(), - } - if err := s.AuthorizationService.CreateAuthorization(context.Background(), &authz); err != nil { - t.Fatal(err) - } - return TestCreds{ - OrgID: o.ID, - Org: o.Name, - UserID: u.ID, - AuthorizationID: authz.ID, - Token: authz.Token, - } - } - - c, err := s.CredsFunc(t) - if err != nil { - t.Fatal(err) - } - return c -} - -const ( - scriptFmt = `option task = {name: "task #%d", cron: "* * * * *", offset: 5s, concurrency: 100} - -// This comment should persist. -from(bucket: "b") - |> to(bucket: "two", orgID: "000000000000000") -` - - scriptDifferentName = `option task = {name: "task-changed #%d", cron: "* * * * *", offset: 5s, concurrency: 100} - -// This comment should persist. -from(bucket: "b") - |> to(bucket: "two", orgID: "000000000000000") -` -) - -func testTaskType(t *testing.T, sys *System) { - cr := creds(t, sys) - authorizedCtx := icontext.SetAuthorizer(sys.Ctx, cr.Authorizer()) - - // Create a tasks - ts := taskmodel.TaskCreate{ - OrganizationID: cr.OrgID, - Flux: fmt.Sprintf(scriptFmt, 0), - OwnerID: cr.UserID, - } - - tsk, err := sys.TaskService.CreateTask(authorizedCtx, ts) - if err != nil { - t.Fatal(err) - } - if !tsk.ID.Valid() { - t.Fatal("no task ID set") - } - - tc := taskmodel.TaskCreate{ - Type: "cows", - OrganizationID: cr.OrgID, - Flux: fmt.Sprintf(scriptFmt, 0), - OwnerID: cr.UserID, - } - - tskCow, err := sys.TaskService.CreateTask(authorizedCtx, tc) - if err != nil { - t.Fatal(err) - } - if !tskCow.ID.Valid() { - t.Fatal("no task ID set") - } - - tp := taskmodel.TaskCreate{ - Type: "pigs", - OrganizationID: cr.OrgID, - Flux: fmt.Sprintf(scriptFmt, 0), - OwnerID: cr.UserID, - } - - tskPig, err := sys.TaskService.CreateTask(authorizedCtx, tp) - if err != nil { - t.Fatal(err) - } - if !tskPig.ID.Valid() { - t.Fatal("no task ID set") - } - - // get system tasks (or task's with no type) - tasks, _, err := sys.TaskService.FindTasks(sys.Ctx, taskmodel.TaskFilter{OrganizationID: &cr.OrgID, Type: &taskmodel.TaskSystemType}) - if err != nil { - t.Fatal(err) - } - - for _, task := range tasks { - if task.Type != "" && task.Type != taskmodel.TaskSystemType { - t.Fatal("received a task with a type when sending no type restriction") - } - } - - // get filtered tasks - tasks, _, err = sys.TaskService.FindTasks(sys.Ctx, taskmodel.TaskFilter{OrganizationID: &cr.OrgID, Type: &tc.Type}) - if err != nil { - t.Fatal(err) - } - - if len(tasks) != 1 { - fmt.Printf("tasks: %+v\n", tasks) - t.Fatalf("failed to return tasks by type, expected 1, got %d", len(tasks)) - } - - // get all tasks - tasks, _, err = sys.TaskService.FindTasks(sys.Ctx, taskmodel.TaskFilter{OrganizationID: &cr.OrgID}) - if err != nil { - t.Fatal(err) - } - - if len(tasks) != 3 { - t.Fatalf("failed to return tasks with wildcard, expected 3, got %d", len(tasks)) - } -} diff --git a/task/taskmodel/task.go b/task/taskmodel/task.go deleted file mode 100644 index c1c88ddf407..00000000000 --- a/task/taskmodel/task.go +++ /dev/null @@ -1,548 +0,0 @@ -package taskmodel - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "strconv" - "time" - - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/ast/astutil" - "github.com/influxdata/flux/ast/edit" - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/query/fluxlang" - "github.com/influxdata/influxdb/v2/task/options" -) - -const ( - TaskDefaultPageSize = 100 - TaskMaxPageSize = 500 - - // TODO(jsteenb2): make these constants of type Status - - TaskStatusActive = "active" - TaskStatusInactive = "inactive" -) - -var ( - // TaskSystemType is the type set in tasks' for all crud requests - TaskSystemType = "system" - // TaskBasicType is short-hand used by the UI to request a minimal subset of system task metadata - TaskBasicType = "basic" -) - -// Task is a task. 🎊 -type Task struct { - ID platform.ID `json:"id"` - Type string `json:"type,omitempty"` - OrganizationID platform.ID `json:"orgID"` - Organization string `json:"org"` - OwnerID platform.ID `json:"ownerID"` - Name string `json:"name"` - Description string `json:"description,omitempty"` - Status string `json:"status"` - Flux string `json:"flux"` - Every string `json:"every,omitempty"` - Cron string `json:"cron,omitempty"` - Offset time.Duration `json:"offset,omitempty"` - LatestCompleted time.Time `json:"latestCompleted,omitempty"` - LatestScheduled time.Time `json:"latestScheduled,omitempty"` - LatestSuccess time.Time `json:"latestSuccess,omitempty"` - LatestFailure time.Time `json:"latestFailure,omitempty"` - LastRunStatus string `json:"lastRunStatus,omitempty"` - LastRunError string `json:"lastRunError,omitempty"` - CreatedAt time.Time `json:"createdAt,omitempty"` - UpdatedAt time.Time `json:"updatedAt,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` -} - -// EffectiveCron returns the effective cron string of the options. -// If the cron option was specified, it is returned. -// If the every option was specified, it is converted into a cron string using "@every". -// Otherwise, the empty string is returned. -// The value of the offset option is not considered. -func (t *Task) EffectiveCron() string { - if t.Cron != "" { - return t.Cron - } - if t.Every != "" { - return "@every " + t.Every - } - return "" -} - -// Run is a record createId when a run of a task is scheduled. -type Run struct { - ID platform.ID `json:"id,omitempty"` - TaskID platform.ID `json:"taskID"` - Status string `json:"status"` - ScheduledFor time.Time `json:"scheduledFor"` // ScheduledFor is the Now time used in the task's query - RunAt time.Time `json:"runAt"` // RunAt is the time the task is scheduled to be run, which is ScheduledFor + Offset - Flux string `json:"flux"` // Flux used in run - StartedAt time.Time `json:"startedAt,omitempty"` // StartedAt is the time the executor begins running the task - FinishedAt time.Time `json:"finishedAt,omitempty"` // FinishedAt is the time the executor finishes running the task - RequestedAt time.Time `json:"requestedAt,omitempty"` // RequestedAt is the time the coordinator told the scheduler to schedule the task - Log []Log `json:"log,omitempty"` - - TraceID string `json:"traceID"` // TraceID preserves the trace id - IsSampled bool `json:"isSampled"` // IsSampled preserves whether this run was sampled -} - -// Log represents a link to a log resource -type Log struct { - RunID platform.ID `json:"runID,omitempty"` - Time string `json:"time"` - Message string `json:"message"` -} - -func (l Log) String() string { - return l.Time + ": " + l.Message -} - -// TaskService represents a service for managing one-off and recurring tasks. -type TaskService interface { - // FindTaskByID returns a single task - FindTaskByID(ctx context.Context, id platform.ID) (*Task, error) - - // FindTasks returns a list of tasks that match a filter (limit 100) and the total count - // of matching tasks. - FindTasks(ctx context.Context, filter TaskFilter) ([]*Task, int, error) - - // CreateTask creates a new task. - // The owner of the task is inferred from the authorizer associated with ctx. - CreateTask(ctx context.Context, t TaskCreate) (*Task, error) - - // UpdateTask updates a single task with changeset. - UpdateTask(ctx context.Context, id platform.ID, upd TaskUpdate) (*Task, error) - - // DeleteTask removes a task by ID and purges all associated data and scheduled runs. - DeleteTask(ctx context.Context, id platform.ID) error - - // FindLogs returns logs for a run. - FindLogs(ctx context.Context, filter LogFilter) ([]*Log, int, error) - - // FindRuns returns a list of runs that match a filter and the total count of returned runs. - FindRuns(ctx context.Context, filter RunFilter) ([]*Run, int, error) - - // FindRunByID returns a single run. - FindRunByID(ctx context.Context, taskID, runID platform.ID) (*Run, error) - - // CancelRun cancels a currently running run. - CancelRun(ctx context.Context, taskID, runID platform.ID) error - - // RetryRun creates and returns a new run (which is a retry of another run). - RetryRun(ctx context.Context, taskID, runID platform.ID) (*Run, error) - - // ForceRun forces a run to occur with unix timestamp scheduledFor, to be executed as soon as possible. - // The value of scheduledFor may or may not align with the task's schedule. - ForceRun(ctx context.Context, taskID platform.ID, scheduledFor int64) (*Run, error) -} - -// TaskCreate is the set of values to create a task. -type TaskCreate struct { - Type string `json:"type,omitempty"` - Flux string `json:"flux"` - Description string `json:"description,omitempty"` - Status string `json:"status,omitempty"` - OrganizationID platform.ID `json:"orgID,omitempty"` - Organization string `json:"org,omitempty"` - OwnerID platform.ID `json:"-"` - Metadata map[string]interface{} `json:"-"` // not to be set through a web request but rather used by a http service using tasks backend. -} - -func (t TaskCreate) Validate() error { - switch { - case t.Flux == "": - return errors.New("missing flux") - case !t.OrganizationID.Valid() && t.Organization == "": - return errors.New("missing orgID and org") - case t.Status != "" && t.Status != TaskStatusActive && t.Status != TaskStatusInactive: - return fmt.Errorf("invalid task status: %q", t.Status) - } - return nil -} - -// TaskUpdate represents updates to a task. Options updates override any options set in the Flux field. -type TaskUpdate struct { - Flux *string `json:"flux,omitempty"` - Status *string `json:"status,omitempty"` - Description *string `json:"description,omitempty"` - - // LatestCompleted us to set latest completed on startup to skip task catchup - LatestCompleted *time.Time `json:"-"` - LatestScheduled *time.Time `json:"-"` - LatestSuccess *time.Time `json:"-"` - LatestFailure *time.Time `json:"-"` - LastRunStatus *string `json:"-"` - LastRunError *string `json:"-"` - Metadata map[string]interface{} `json:"-"` // not to be set through a web request but rather used by a http service using tasks backend. - - // Options gets unmarshalled from json as if it was flat, with the same level as Flux and Status. - Options options.Options // when we unmarshal this gets unmarshalled from flat key-values -} - -func (t *TaskUpdate) UnmarshalJSON(data []byte) error { - // this is a type so we can marshal string into durations nicely - jo := struct { - Flux *string `json:"flux,omitempty"` - Status *string `json:"status,omitempty"` - Name string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - - // Cron is a cron style time schedule that can be used in place of Every. - Cron string `json:"cron,omitempty"` - - // Every represents a fixed period to repeat execution. - // It gets marshalled from a string duration, i.e.: "10s" is 10 seconds - Every options.Duration `json:"every,omitempty"` - - // Offset represents a delay before execution. - // It gets marshalled from a string duration, i.e.: "10s" is 10 seconds - Offset *options.Duration `json:"offset,omitempty"` - - Concurrency *int64 `json:"concurrency,omitempty"` - - Retry *int64 `json:"retry,omitempty"` - }{} - - if err := json.Unmarshal(data, &jo); err != nil { - return err - } - t.Options.Name = jo.Name - t.Description = jo.Description - t.Options.Cron = jo.Cron - t.Options.Every = jo.Every - if jo.Offset != nil { - offset := *jo.Offset - t.Options.Offset = &offset - } - t.Options.Concurrency = jo.Concurrency - t.Options.Retry = jo.Retry - t.Flux = jo.Flux - t.Status = jo.Status - return nil -} - -func (t *TaskUpdate) MarshalJSON() ([]byte, error) { - jo := struct { - Flux *string `json:"flux,omitempty"` - Status *string `json:"status,omitempty"` - Name string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - - // Cron is a cron style time schedule that can be used in place of Every. - Cron string `json:"cron,omitempty"` - - // Every represents a fixed period to repeat execution. - Every options.Duration `json:"every,omitempty"` - - // Offset represents a delay before execution. - Offset *options.Duration `json:"offset,omitempty"` - - Concurrency *int64 `json:"concurrency,omitempty"` - - Retry *int64 `json:"retry,omitempty"` - }{} - jo.Name = t.Options.Name - jo.Cron = t.Options.Cron - jo.Every = t.Options.Every - jo.Description = t.Description - if t.Options.Offset != nil { - offset := *t.Options.Offset - jo.Offset = &offset - } - jo.Concurrency = t.Options.Concurrency - jo.Retry = t.Options.Retry - jo.Flux = t.Flux - jo.Status = t.Status - return json.Marshal(jo) -} - -func (t *TaskUpdate) Validate() error { - switch { - case !t.Options.Every.IsZero() && t.Options.Cron != "": - return errors.New("cannot specify both every and cron") - case !t.Options.Every.IsZero(): - if _, err := options.ParseSignedDuration(t.Options.Every.String()); err != nil { - return fmt.Errorf("every: %s is invalid", err) - } - case t.Options.Offset != nil && !t.Options.Offset.IsZero(): - if _, err := time.ParseDuration(t.Options.Offset.String()); err != nil { - return fmt.Errorf("offset: %s, %s is invalid, the largest unit supported is h", t.Options.Offset.String(), err) - } - case t.Flux == nil && t.Status == nil && t.Options.IsZero(): - return errors.New("cannot update task without content") - case t.Status != nil && *t.Status != TaskStatusActive && *t.Status != TaskStatusInactive: - return fmt.Errorf("invalid task status: %q", *t.Status) - } - return nil -} - -// safeParseSource calls the Flux parser.ParseSource function -// and is guaranteed not to panic. -func safeParseSource(parser fluxlang.FluxLanguageService, f string) (pkg *ast.Package, err error) { - if parser == nil { - return nil, &errors2.Error{ - Code: errors2.EInternal, - Msg: "flux parser is not configured; updating a task requires the flux parser to be set", - } - } - defer func() { - if r := recover(); r != nil { - err = &errors2.Error{ - Code: errors2.EInternal, - Msg: "internal error in flux engine; unable to parse", - } - } - }() - - return parser.Parse(f) -} - -// UpdateFlux updates the TaskUpdate to go from updating options to updating a -// flux string, that now has those updated options in it. It zeros the options -// in the TaskUpdate. -func (t *TaskUpdate) UpdateFlux(parser fluxlang.FluxLanguageService, oldFlux string) error { - return t.updateFlux(parser, oldFlux) -} - -func (t *TaskUpdate) updateFlux(parser fluxlang.FluxLanguageService, oldFlux string) error { - if t.Flux != nil && *t.Flux != "" { - oldFlux = *t.Flux - } - toDelete := map[string]struct{}{} - parsedPKG, err := safeParseSource(parser, oldFlux) - if err != nil { - return err - } - - parsed := parsedPKG.Files[0] - if !t.Options.Every.IsZero() && t.Options.Cron != "" { - return errors.New("cannot specify both cron and every") - } - op := make(map[string]ast.Expression, 4) - - if t.Options.Name != "" { - op["name"] = &ast.StringLiteral{Value: t.Options.Name} - } - if !t.Options.Every.IsZero() { - op["every"] = &t.Options.Every.Node - } - if t.Options.Cron != "" { - op["cron"] = &ast.StringLiteral{Value: t.Options.Cron} - } - if t.Options.Offset != nil { - if !t.Options.Offset.IsZero() { - op["offset"] = &t.Options.Offset.Node - } else { - toDelete["offset"] = struct{}{} - } - } - if len(op) > 0 || len(toDelete) > 0 { - editFunc := func(opt *ast.OptionStatement) (ast.Expression, error) { - a, ok := opt.Assignment.(*ast.VariableAssignment) - if !ok { - return nil, errors.New("option assignment must be variable assignment") - } - obj, ok := a.Init.(*ast.ObjectExpression) - if !ok { - return nil, fmt.Errorf("value is is %s, not an object expression", a.Init.Type()) - } - // modify in the keys and values that already are in the ast - for i, p := range obj.Properties { - k := p.Key.Key() - if _, ok := toDelete[k]; ok { - obj.Properties = append(obj.Properties[:i], obj.Properties[i+1:]...) - } - switch k { - case "name": - if name, ok := op["name"]; ok && t.Options.Name != "" { - delete(op, "name") - p.Value = name - } - case "offset": - if offset, ok := op["offset"]; ok && t.Options.Offset != nil { - delete(op, "offset") - p.Value = offset.Copy().(*ast.DurationLiteral) - } - case "every": - if every, ok := op["every"]; ok && !t.Options.Every.IsZero() { - p.Value = every.Copy().(*ast.DurationLiteral) - delete(op, "every") - } else if cron, ok := op["cron"]; ok && t.Options.Cron != "" { - delete(op, "cron") - p.Value = cron - p.Key = &ast.Identifier{Name: "cron"} - } - case "cron": - if cron, ok := op["cron"]; ok && t.Options.Cron != "" { - delete(op, "cron") - p.Value = cron - } else if every, ok := op["every"]; ok && !t.Options.Every.IsZero() { - delete(op, "every") - p.Key = &ast.Identifier{Name: "every"} - p.Value = every.Copy().(*ast.DurationLiteral) - } - } - } - // add in new keys and values to the ast - for k := range op { - obj.Properties = append(obj.Properties, &ast.Property{ - Key: &ast.Identifier{Name: k}, - Value: op[k], - }) - } - return nil, nil - } - - ok, err := edit.Option(parsed, "task", editFunc) - - if err != nil { - return err - } - if !ok { - return errors.New("unable to edit option") - } - - t.Options.Clear() - s, err := astutil.Format(parsed) - if err != nil { - return err - } - t.Flux = &s - } - return nil -} - -// TaskFilter represents a set of filters that restrict the returned results -type TaskFilter struct { - Type *string - Name *string - After *platform.ID - OrganizationID *platform.ID - Organization string - User *platform.ID - Limit int - Status *string -} - -// QueryParams Converts TaskFilter fields to url query params. -func (f TaskFilter) QueryParams() map[string][]string { - qp := map[string][]string{} - if f.After != nil { - qp["after"] = []string{f.After.String()} - } - - if f.OrganizationID != nil { - qp["orgID"] = []string{f.OrganizationID.String()} - } - - if f.Organization != "" { - qp["org"] = []string{f.Organization} - } - - if f.User != nil { - qp["user"] = []string{f.User.String()} - } - - if f.Limit > 0 { - qp["limit"] = []string{strconv.Itoa(f.Limit)} - } - - return qp -} - -// RunFilter represents a set of filters that restrict the returned results -type RunFilter struct { - // Task ID is required for listing runs. - Task platform.ID - - After *platform.ID - Limit int - AfterTime string - BeforeTime string -} - -// LogFilter represents a set of filters that restrict the returned log results. -type LogFilter struct { - // Task ID is required. - Task platform.ID - - // The optional Run ID limits logs to a single run. - Run *platform.ID -} - -type TaskStatus string - -const ( - TaskActive TaskStatus = "active" - TaskInactive TaskStatus = "inactive" - - DefaultTaskStatus TaskStatus = TaskActive -) - -type RunStatus int - -const ( - RunStarted RunStatus = iota - RunSuccess - RunFail - RunCanceled - RunScheduled -) - -func (r RunStatus) String() string { - switch r { - case RunStarted: - return "started" - case RunSuccess: - return "success" - case RunFail: - return "failed" - case RunCanceled: - return "canceled" - case RunScheduled: - return "scheduled" - } - panic(fmt.Sprintf("unknown RunStatus: %d", r)) -} - -// RequestStillQueuedError is returned when attempting to retry a run which has not yet completed. -type RequestStillQueuedError struct { - // Unix timestamps matching existing request's start and end. - Start, End int64 -} - -const fmtRequestStillQueued = "previous retry for start=%s end=%s has not yet finished" - -func (e RequestStillQueuedError) Error() string { - return fmt.Sprintf(fmtRequestStillQueued, - time.Unix(e.Start, 0).UTC().Format(time.RFC3339), - time.Unix(e.End, 0).UTC().Format(time.RFC3339), - ) -} - -// ParseRequestStillQueuedError attempts to parse a RequestStillQueuedError from msg. -// If msg is formatted correctly, the resultant error is returned; otherwise it returns nil. -func ParseRequestStillQueuedError(msg string) *RequestStillQueuedError { - var s, e string - n, err := fmt.Sscanf(msg, fmtRequestStillQueued, &s, &e) - if err != nil || n != 2 { - return nil - } - - start, err := time.Parse(time.RFC3339, s) - if err != nil { - return nil - } - - end, err := time.Parse(time.RFC3339, e) - if err != nil { - return nil - } - - return &RequestStillQueuedError{Start: start.Unix(), End: end.Unix()} -} diff --git a/task/taskmodel/task_errors.go b/task/taskmodel/task_errors.go deleted file mode 100644 index 09bf0a00b47..00000000000 --- a/task/taskmodel/task_errors.go +++ /dev/null @@ -1,178 +0,0 @@ -package taskmodel - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - // ErrRunCanceled is returned from the RunResult when a Run is Canceled. It is used mostly internally. - ErrRunCanceled = &errors.Error{ - Code: errors.EInternal, - Msg: "run canceled", - } - - // ErrTaskNotClaimed is returned when attempting to operate against a task that must be claimed but is not. - ErrTaskNotClaimed = &errors.Error{ - Code: errors.EConflict, - Msg: "task not claimed", - } - - // ErrTaskAlreadyClaimed is returned when attempting to operate against a task that must not be claimed but is. - ErrTaskAlreadyClaimed = &errors.Error{ - Code: errors.EConflict, - Msg: "task already claimed", - } - - // ErrNoRunsFound is returned when searching for a range of runs, but none are found. - ErrNoRunsFound = &errors.Error{ - Code: errors.ENotFound, - Msg: "no matching runs found", - } - - // ErrInvalidTaskID error object for bad id's - ErrInvalidTaskID = &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid id", - } - - // ErrTaskNotFound indicates no task could be found for given parameters. - ErrTaskNotFound = &errors.Error{ - Code: errors.ENotFound, - Msg: "task not found", - } - - // ErrRunNotFound is returned when searching for a single run that doesn't exist. - ErrRunNotFound = &errors.Error{ - Code: errors.ENotFound, - Msg: "run not found", - } - - ErrRunKeyNotFound = &errors.Error{ - Code: errors.ENotFound, - Msg: "run key not found", - } - - ErrPageSizeTooSmall = &errors.Error{ - Msg: "cannot have negative page limit", - Code: errors.EInvalid, - } - - // ErrPageSizeTooLarge indicates the page size is too large. This error is only - // used in the kv task service implementation. The name of this error may lead it - // to be used in a place that is not useful. The TaskMaxPageSize is the only one - // at 500, the rest at 100. This would likely benefit from a more specific name - // since those limits aren't shared globally. - ErrPageSizeTooLarge = &errors.Error{ - Msg: fmt.Sprintf("cannot use page size larger then %d", TaskMaxPageSize), - Code: errors.EInvalid, - } - - ErrOrgNotFound = &errors.Error{ - Msg: "organization not found", - Code: errors.ENotFound, - } - - ErrTaskRunAlreadyQueued = &errors.Error{ - Msg: "run already queued", - Code: errors.EConflict, - } - - // ErrOutOfBoundsLimit is returned with FindRuns is called with an invalid filter limit. - ErrOutOfBoundsLimit = &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: "run limit is out of bounds, must be between 1 and 500", - } - - // ErrInvalidOwnerID is called when trying to create a task with out a valid ownerID - ErrInvalidOwnerID = &errors.Error{ - Code: errors.EInvalid, - Msg: "cannot create task with invalid ownerID", - } -) - -// ErrFluxParseError is returned when an error is thrown by Flux.Parse in the task executor -func ErrFluxParseError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "could not parse Flux script", - Op: "taskExecutor", - Err: err, - } -} - -// ErrQueryError is returned when an error is thrown by Query service in the task executor -func ErrQueryError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: "unexpected error from queryd", - Op: "taskExecutor", - Err: err, - } -} - -// ErrResultIteratorError is returned when an error is thrown by exhaustResultIterators in the executor -func ErrResultIteratorError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "error exhausting result iterator", - Op: "taskExecutor", - Err: err, - } -} - -func ErrInternalTaskServiceError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: "unexpected error in tasks", - Op: "task", - Err: err, - } -} - -// ErrUnexpectedTaskBucketErr a generic error we can use when we rail to retrieve a bucket -func ErrUnexpectedTaskBucketErr(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: "unexpected error retrieving task bucket", - Op: "taskBucket", - Err: err, - } -} - -// ErrTaskTimeParse an error for time parsing errors -func ErrTaskTimeParse(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: "unexpected error parsing time", - Op: "taskCron", - Err: err, - } -} - -func ErrTaskOptionParse(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid options", - Op: "taskOptions", - Err: err, - } -} - -func ErrRunExecutionError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: "could not execute task run", - Op: "taskExecutor", - Err: err, - } -} - -func ErrTaskConcurrencyLimitReached(runsInFront int) *errors.Error { - return &errors.Error{ - Code: errors.ETooManyRequests, - Msg: fmt.Sprintf("could not execute task, concurrency limit reached, runs in front: %d", runsInFront), - Op: "taskExecutor", - } -} diff --git a/task/taskmodel/task_test.go b/task/taskmodel/task_test.go deleted file mode 100644 index bb3c58ba9bb..00000000000 --- a/task/taskmodel/task_test.go +++ /dev/null @@ -1,168 +0,0 @@ -package taskmodel_test - -import ( - "encoding/json" - "testing" - - "github.com/google/go-cmp/cmp" - _ "github.com/influxdata/influxdb/v2/fluxinit/static" - "github.com/influxdata/influxdb/v2/query/fluxlang" - "github.com/influxdata/influxdb/v2/task/options" - "github.com/influxdata/influxdb/v2/task/taskmodel" -) - -func TestUpdateValidate(t *testing.T) { - tu := &taskmodel.TaskUpdate{} - // this is to make sure that string durations are properly marshaled into durations - if err := json.Unmarshal([]byte(`{"every":"3d2h", "offset":"1h"}`), tu); err != nil { - t.Fatal(err) - } - if tu.Options.Every.String() != "3d2h" { - t.Fatalf("option.every not properly unmarshaled, expected 10s got %s", tu.Options.Every) - } - if tu.Options.Offset.String() != "1h" { - t.Fatalf("option.every not properly unmarshaled, expected 1h got %s", tu.Options.Offset) - } - if err := tu.Validate(); err != nil { - t.Fatalf("expected task update to be valid but it was not: %s", err) - } - -} - -func TestOptionsMarshal(t *testing.T) { - tu := &taskmodel.TaskUpdate{} - // this is to make sure that string durations are properly marshaled into durations - if err := json.Unmarshal([]byte(`{"every":"10s", "offset":"1h"}`), tu); err != nil { - t.Fatal(err) - } - if tu.Options.Every.String() != "10s" { - t.Fatalf("option.every not properly unmarshaled, expected 10s got %s", tu.Options.Every) - } - if tu.Options.Offset.String() != "1h" { - t.Fatalf("option.every not properly unmarshaled, expected 1h got %s", tu.Options.Offset) - } - - tu = &taskmodel.TaskUpdate{} - // this is to make sure that string durations are properly marshaled into durations - if err := json.Unmarshal([]byte(`{"flux":"option task = {\n\tname: \"task #99\",\n\tcron: \"* * * * *\",\n\toffset: 5s,\n\tconcurrency: 100,\n}\nfrom(bucket:\"b\") |\u003e toHTTP(url:\"http://example.com\")"}`), tu); err != nil { - t.Fatal(err) - } - - if tu.Flux == nil { - t.Fatalf("flux not properly unmarshaled, expected not nil but got nil") - } -} - -func TestOptionsEditWithAST(t *testing.T) { - tu := &taskmodel.TaskUpdate{} - tu.Options.Every = *(options.MustParseDuration("10s")) - if err := tu.UpdateFlux(fluxlang.DefaultService, `option task = {every: 20s, name: "foo"} from(bucket:"x") |> range(start:-1h)`); err != nil { - t.Fatal(err) - } - t.Run("zeroing", func(t *testing.T) { - if !tu.Options.Every.IsZero() { - t.Errorf("expected Every to be zeroed but it was not") - } - }) - t.Run("fmt string", func(t *testing.T) { - expected := `option task = {every: 10s, name: "foo"} - -from(bucket: "x") |> range(start: -1h) -` - if *tu.Flux != expected { - t.Errorf("got the wrong task back, expected %s,\n got %s\n diff: %s", expected, *tu.Flux, cmp.Diff(expected, *tu.Flux)) - } - }) - t.Run("replacement", func(t *testing.T) { - op, err := options.FromScriptAST(fluxlang.DefaultService, *tu.Flux) - if err != nil { - t.Error(err) - } - if op.Every.String() != "10s" { - t.Logf("expected every to be 10s but was %s", op.Every) - t.Fail() - } - }) - t.Run("add new option", func(t *testing.T) { - tu := &taskmodel.TaskUpdate{} - tu.Options.Offset = options.MustParseDuration("30s") - if err := tu.UpdateFlux(fluxlang.DefaultService, `option task = {every: 20s, name: "foo"} from(bucket:"x") |> range(start:-1h)`); err != nil { - t.Fatal(err) - } - op, err := options.FromScriptAST(fluxlang.DefaultService, *tu.Flux) - if err != nil { - t.Error(err) - } - if op.Offset == nil || op.Offset.String() != "30s" { - t.Fatalf("expected offset to be 30s but was %s", op.Offset) - } - }) - t.Run("switching from every to cron", func(t *testing.T) { - tu := &taskmodel.TaskUpdate{} - tu.Options.Cron = "* * * * *" - if err := tu.UpdateFlux(fluxlang.DefaultService, `option task = {every: 20s, name: "foo"} from(bucket:"x") |> range(start:-1h)`); err != nil { - t.Fatal(err) - } - op, err := options.FromScriptAST(fluxlang.DefaultService, *tu.Flux) - if err != nil { - t.Error(err) - } - if !op.Every.IsZero() { - t.Fatalf("expected every to be 0 but was %s", op.Every) - } - if op.Cron != "* * * * *" { - t.Fatalf("expected Cron to be \"* * * * *\" but was %s", op.Cron) - } - }) - t.Run("switching from cron to every", func(t *testing.T) { - tu := &taskmodel.TaskUpdate{} - tu.Options.Every = *(options.MustParseDuration("10s")) - if err := tu.UpdateFlux(fluxlang.DefaultService, `option task = {cron: "* * * * *", name: "foo"} from(bucket:"x") |> range(start:-1h)`); err != nil { - t.Fatal(err) - } - op, err := options.FromScriptAST(fluxlang.DefaultService, *tu.Flux) - if err != nil { - t.Error(err) - } - if op.Every.String() != "10s" { - t.Fatalf("expected every to be 10s but was %s", op.Every) - } - if op.Cron != "" { - t.Fatalf("expected Cron to be \"\" but was %s", op.Cron) - } - }) - t.Run("delete deletable option", func(t *testing.T) { - tu := &taskmodel.TaskUpdate{} - tu.Options.Offset = &options.Duration{} - expscript := `option task = {cron: "* * * * *", name: "foo"} - -from(bucket: "x") |> range(start: -1h) -` - if err := tu.UpdateFlux(fluxlang.DefaultService, `option task = {cron: "* * * * *", name: "foo", offset: 10s} from(bucket:"x") |> range(start:-1h)`); err != nil { - t.Fatal(err) - } - op, err := options.FromScriptAST(fluxlang.DefaultService, *tu.Flux) - if err != nil { - t.Error(err) - } - if !op.Every.IsZero() { - t.Fatalf("expected every to be 0s but was %s", op.Every) - } - if op.Cron != "* * * * *" { - t.Fatalf("expected Cron to be \"\" but was %s", op.Cron) - } - if !cmp.Equal(*tu.Flux, expscript) { - t.Fatalf(cmp.Diff(*tu.Flux, expscript)) - } - }) - -} - -func TestParseRequestStillQueuedError(t *testing.T) { - e := taskmodel.RequestStillQueuedError{Start: 1000, End: 2000} - validMsg := e.Error() - - if err := taskmodel.ParseRequestStillQueuedError(validMsg); err == nil || *err != e { - t.Fatalf("%q should have parsed to %v, but got %v", validMsg, e, err) - } -} diff --git a/telegraf.go b/telegraf.go deleted file mode 100644 index c439239168d..00000000000 --- a/telegraf.go +++ /dev/null @@ -1,322 +0,0 @@ -package influxdb - -import ( - "context" - "encoding/json" - "fmt" - "regexp" - - "github.com/BurntSushi/toml" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/telegraf/plugins" - "github.com/influxdata/influxdb/v2/telegraf/plugins/inputs" - "github.com/influxdata/influxdb/v2/telegraf/plugins/outputs" -) - -const ( - ErrTelegrafConfigInvalidOrgID = "invalid org ID" // ErrTelegrafConfigInvalidOrgID is the error message for a missing or invalid organization ID. - ErrTelegrafConfigNotFound = "telegraf configuration not found" // ErrTelegrafConfigNotFound is the error message for a missing telegraf config. - ErrTelegrafPluginNameUnmatch = "the telegraf plugin is name %s doesn't match the config %s" - ErrNoTelegrafPlugins = "there is no telegraf plugin in the config" - ErrUnsupportTelegrafPluginType = "unsupported telegraf plugin type %s" - ErrUnsupportTelegrafPluginName = "unsupported telegraf plugin %s, type %s" -) - -// ops for buckets error and buckets op logs. -var ( - OpFindTelegrafConfigByID = "FindTelegrafConfigByID" - OpFindTelegrafConfigs = "FindTelegrafConfigs" - OpCreateTelegrafConfig = "CreateTelegrafConfig" - OpUpdateTelegrafConfig = "UpdateTelegrafConfig" - OpDeleteTelegrafConfig = "DeleteTelegrafConfig" -) - -// TelegrafConfigStore represents a service for managing telegraf config data. -type TelegrafConfigStore interface { - // FindTelegrafConfigByID returns a single telegraf config by ID. - FindTelegrafConfigByID(ctx context.Context, id platform.ID) (*TelegrafConfig, error) - - // FindTelegrafConfigs returns a list of telegraf configs that match filter and the total count of matching telegraf configs. - // Additional options provide pagination & sorting. - FindTelegrafConfigs(ctx context.Context, filter TelegrafConfigFilter, opt ...FindOptions) ([]*TelegrafConfig, int, error) - - // CreateTelegrafConfig creates a new telegraf config and sets b.ID with the new identifier. - CreateTelegrafConfig(ctx context.Context, tc *TelegrafConfig, userID platform.ID) error - - // UpdateTelegrafConfig updates a single telegraf config. - // Returns the new telegraf config after update. - UpdateTelegrafConfig(ctx context.Context, id platform.ID, tc *TelegrafConfig, userID platform.ID) (*TelegrafConfig, error) - - // DeleteTelegrafConfig removes a telegraf config by ID. - DeleteTelegrafConfig(ctx context.Context, id platform.ID) error -} - -// TelegrafConfigFilter represents a set of filter that restrict the returned telegraf configs. -type TelegrafConfigFilter struct { - OrgID *platform.ID - Organization *string -} - -// TelegrafConfig stores telegraf config for one telegraf instance. -type TelegrafConfig struct { - ID platform.ID `json:"id,omitempty"` // ID of this config object. - OrgID platform.ID `json:"orgID,omitempty"` // OrgID is the id of the owning organization. - Name string `json:"name,omitempty"` // Name of this config object. - Description string `json:"description,omitempty"` // Decription of this config object. - Config string `json:"config,omitempty"` // ConfigTOML contains the raw toml config. - Metadata map[string]interface{} `json:"metadata,omitempty"` // Metadata for the config. -} - -var pluginCount = regexp.MustCompilePOSIX(`\[\[(inputs\..*|outputs\..*|aggregators\..*|processors\..*)\]\]`) - -// CountPlugins returns a map of the number of times each plugin is used. -func (tc *TelegrafConfig) CountPlugins() map[string]float64 { - plugins := map[string]float64{} - founds := pluginCount.FindAllStringSubmatch(tc.Config, -1) - - for _, v := range founds { - if len(v) < 2 { - continue - } - plugins[v[1]]++ - } - - return plugins -} - -// UnmarshalJSON implement the json.Unmarshaler interface. -// Gets called when reading from the kv db. mostly legacy so loading old/stored configs still work. -// May not remove for a while. Primarily will get hit when user views/downloads config. -func (tc *TelegrafConfig) UnmarshalJSON(b []byte) error { - tcd := new(telegrafConfigDecode) - - if err := json.Unmarshal(b, tcd); err != nil { - return err - } - - orgID := tcd.OrgID - if orgID == nil || !orgID.Valid() { - orgID = tcd.OrganizationID - } - - if tcd.ID != nil { - tc.ID = *tcd.ID - } - - if orgID != nil { - tc.OrgID = *orgID - } - - tc.Name = tcd.Name - tc.Description = tcd.Description - - // Prefer new structure; use full toml config. - tc.Config = tcd.Config - tc.Metadata = tcd.Metadata - - if tcd.Plugins != nil { - // legacy, remove after some moons. or a migration. - if len(tcd.Plugins) > 0 { - bkts, conf, err := decodePluginRaw(tcd) - if err != nil { - return err - } - tc.Config = plugins.AgentConfig + conf - tc.Metadata = map[string]interface{}{"buckets": bkts} - } else if c, ok := plugins.GetPlugin("output", "influxdb_v2"); ok { - // Handles legacy adding of default plugins (agent and output). - tc.Config = plugins.AgentConfig + c.Config - tc.Metadata = map[string]interface{}{ - "buckets": []string{}, - } - } - } else if tcd.Metadata == nil || len(tcd.Metadata) == 0 { - // Get buckets from the config. - m, err := parseMetadata(tc.Config) - if err != nil { - return err - } - - tc.Metadata = m - } - - return nil -} - -type buckets []string - -func (t *buckets) UnmarshalTOML(data interface{}) error { - dataOk, ok := data.(map[string]interface{}) - if !ok { - return &errors.Error{ - Code: errors.EEmptyValue, - Msg: "no config to get buckets", - } - } - bkts := []string{} - for tp, ps := range dataOk { - if tp != "outputs" { - continue - } - plugins, ok := ps.(map[string]interface{}) - if !ok { - return &errors.Error{ - Code: errors.EEmptyValue, - Msg: "no plugins in config to get buckets", - } - } - for name, configDataArray := range plugins { - if name != "influxdb_v2" { - continue - } - config, ok := configDataArray.([]map[string]interface{}) - if !ok { - return &errors.Error{ - Code: errors.EEmptyValue, - Msg: "influxdb_v2 output has no config", - } - } - for i := range config { - if b, ok := config[i]["bucket"]; ok { - bkts = append(bkts, b.(string)) - } - } - } - } - - *t = bkts - - return nil -} - -func parseMetadata(cfg string) (map[string]interface{}, error) { - bs := []string{} - - this := &buckets{} - _, err := toml.Decode(cfg, this) - if err != nil { - return nil, err - } - - for _, i := range *this { - if i != "" { - bs = append(bs, i) - } - } - - return map[string]interface{}{"buckets": bs}, nil -} - -// return bucket, config, error -func decodePluginRaw(tcd *telegrafConfigDecode) ([]string, string, error) { - op := "unmarshal telegraf config raw plugin" - ps := "" - bucket := []string{} - - for _, pr := range tcd.Plugins { - var tpFn func() plugins.Config - var ok bool - - switch pr.Type { - case "input": - tpFn, ok = availableInputPlugins[pr.Name] - case "output": - tpFn, ok = availableOutputPlugins[pr.Name] - default: - return nil, "", &errors.Error{ - Code: errors.EInvalid, - Op: op, - Msg: fmt.Sprintf(ErrUnsupportTelegrafPluginType, pr.Type), - } - } - - if !ok { - // This removes the validation (and does not create toml) for new "input" plugins - // but keeps in place the existing behavior for certain "input" plugins - if pr.Type == "output" { - return nil, "", &errors.Error{ - Code: errors.EInvalid, - Op: op, - Msg: fmt.Sprintf(ErrUnsupportTelegrafPluginName, pr.Name, pr.Type), - } - } - continue - } - - config := tpFn() - // if pr.Config if empty, make it a blank obj, - // so it will still go to the unmarshalling process to validate. - if pr.Config == nil || len(string(pr.Config)) == 0 { - pr.Config = []byte("{}") - } - - if err := json.Unmarshal(pr.Config, config); err != nil { - return nil, "", &errors.Error{ - Code: errors.EInvalid, - Err: err, - Op: op, - } - } - - if pr.Name == "influxdb_v2" { - if b := config.(*outputs.InfluxDBV2).Bucket; b != "" { - bucket = []string{b} - } - } - - ps += config.TOML() - - } - - return bucket, ps, nil -} - -// telegrafConfigDecode is the helper struct for json decoding. legacy. -type telegrafConfigDecode struct { - ID *platform.ID `json:"id,omitempty"` - OrganizationID *platform.ID `json:"organizationID,omitempty"` - OrgID *platform.ID `json:"orgID,omitempty"` - Name string `json:"name,omitempty"` - Description string `json:"description,omitempty"` - Config string `json:"config,omitempty"` - Plugins []telegrafPluginDecode `json:"plugins,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` -} - -// telegrafPluginDecode is the helper struct for json decoding. legacy. -type telegrafPluginDecode struct { - Type string `json:"type,omitempty"` // Type of the plugin. - Name string `json:"name,omitempty"` // Name of the plugin. - Alias string `json:"alias,omitempty"` // Alias of the plugin. - Description string `json:"description,omitempty"` // Description of the plugin. - Config json.RawMessage `json:"config,omitempty"` // Config is the currently stored plugin configuration. -} - -var availableInputPlugins = map[string](func() plugins.Config){ - "cpu": func() plugins.Config { return &inputs.CPUStats{} }, - "disk": func() plugins.Config { return &inputs.DiskStats{} }, - "diskio": func() plugins.Config { return &inputs.DiskIO{} }, - "docker": func() plugins.Config { return &inputs.Docker{} }, - "file": func() plugins.Config { return &inputs.File{} }, - "kernel": func() plugins.Config { return &inputs.Kernel{} }, - "kubernetes": func() plugins.Config { return &inputs.Kubernetes{} }, - "logparser": func() plugins.Config { return &inputs.LogParserPlugin{} }, - "mem": func() plugins.Config { return &inputs.MemStats{} }, - "net_response": func() plugins.Config { return &inputs.NetResponse{} }, - "net": func() plugins.Config { return &inputs.NetIOStats{} }, - "nginx": func() plugins.Config { return &inputs.Nginx{} }, - "processes": func() plugins.Config { return &inputs.Processes{} }, - "procstat": func() plugins.Config { return &inputs.Procstat{} }, - "prometheus": func() plugins.Config { return &inputs.Prometheus{} }, - "redis": func() plugins.Config { return &inputs.Redis{} }, - "swap": func() plugins.Config { return &inputs.SwapStats{} }, - "syslog": func() plugins.Config { return &inputs.Syslog{} }, - "system": func() plugins.Config { return &inputs.SystemStats{} }, - "tail": func() plugins.Config { return &inputs.Tail{} }, -} - -var availableOutputPlugins = map[string](func() plugins.Config){ - "file": func() plugins.Config { return &outputs.File{} }, - "influxdb_v2": func() plugins.Config { return &outputs.InfluxDBV2{} }, -} diff --git a/telegraf/index.go b/telegraf/index.go deleted file mode 100644 index c22d153fb1d..00000000000 --- a/telegraf/index.go +++ /dev/null @@ -1,26 +0,0 @@ -package telegraf - -import ( - "encoding/json" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kv" -) - -var ( - // ByOrganizationIndexMapping is the mapping definition for fetching - // telegrafs by organization ID. - ByOrganizationIndexMapping = kv.NewIndexMapping( - []byte("telegrafv1"), - []byte("telegrafbyorgindexv1"), - func(v []byte) ([]byte, error) { - var telegraf influxdb.TelegrafConfig - if err := json.Unmarshal(v, &telegraf); err != nil { - return nil, err - } - - id, _ := telegraf.OrgID.Encode() - return id, nil - }, - ) -) diff --git a/telegraf/plugins/inputs/base.go b/telegraf/plugins/inputs/base.go deleted file mode 100644 index 136adb3ffef..00000000000 --- a/telegraf/plugins/inputs/base.go +++ /dev/null @@ -1,9 +0,0 @@ -package inputs - -import "github.com/influxdata/influxdb/v2/telegraf/plugins" - -type baseInput int - -func (b baseInput) Type() plugins.Type { - return plugins.Input -} diff --git a/telegraf/plugins/inputs/cpu.go b/telegraf/plugins/inputs/cpu.go deleted file mode 100644 index 228564257da..00000000000 --- a/telegraf/plugins/inputs/cpu.go +++ /dev/null @@ -1,34 +0,0 @@ -package inputs - -import ( - "fmt" -) - -// CPUStats is based on telegraf CPUStats. -type CPUStats struct { - baseInput -} - -// PluginName is based on telegraf plugin name. -func (c *CPUStats) PluginName() string { - return "cpu" -} - -// UnmarshalTOML decodes the parsed data to the object -func (c *CPUStats) UnmarshalTOML(data interface{}) error { - return nil -} - -// TOML encodes to toml string -func (c *CPUStats) TOML() string { - return fmt.Sprintf(`[[inputs.%s]] - ## Whether to report per-cpu stats or not - percpu = true - ## Whether to report total system cpu stats or not - totalcpu = true - ## If true, collect raw CPU time metrics - collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states - report_active = false -`, c.PluginName()) -} diff --git a/telegraf/plugins/inputs/disk.go b/telegraf/plugins/inputs/disk.go deleted file mode 100644 index 4ba3ba74201..00000000000 --- a/telegraf/plugins/inputs/disk.go +++ /dev/null @@ -1,31 +0,0 @@ -package inputs - -import ( - "fmt" -) - -// DiskStats is based on telegraf DiskStats. -type DiskStats struct { - baseInput -} - -// PluginName is based on telegraf plugin name. -func (d *DiskStats) PluginName() string { - return "disk" -} - -// UnmarshalTOML decodes the parsed data to the object -func (d *DiskStats) UnmarshalTOML(data interface{}) error { - return nil -} - -// TOML encodes to toml string -func (d *DiskStats) TOML() string { - return fmt.Sprintf(`[[inputs.%s]] - ## By default stats will be gathered for all mount points. - ## Set mount_points will restrict the stats to only the specified mount points. - # mount_points = ["/"] - ## Ignore mount points by filesystem type. - ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] -`, d.PluginName()) -} diff --git a/telegraf/plugins/inputs/diskio.go b/telegraf/plugins/inputs/diskio.go deleted file mode 100644 index 195fcefbd08..00000000000 --- a/telegraf/plugins/inputs/diskio.go +++ /dev/null @@ -1,51 +0,0 @@ -package inputs - -import ( - "fmt" -) - -// DiskIO is based on telegraf DiskIO. -type DiskIO struct { - baseInput -} - -// PluginName is based on telegraf plugin name. -func (d *DiskIO) PluginName() string { - return "diskio" -} - -// UnmarshalTOML decodes the parsed data to the object -func (d *DiskIO) UnmarshalTOML(data interface{}) error { - return nil -} - -// TOML encodes to toml string. -func (d *DiskIO) TOML() string { - return fmt.Sprintf(`[[inputs.%s]] - ## By default, telegraf will gather stats for all devices including - ## disk partitions. - ## Setting devices will restrict the stats to the specified devices. - # devices = ["sda", "sdb", "vd*"] - ## Uncomment the following line if you need disk serial numbers. - # skip_serial_number = false - # - ## On systems which support it, device metadata can be added in the form of - ## tags. - ## Currently only Linux is supported via udev properties. You can view - ## available properties for a device by running: - ## 'udevadm info -q property -n /dev/sda' - ## Note: Most, but not all, udev properties can be accessed this way. Properties - ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. - # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] - # - ## Using the same metadata source as device_tags, you can also customize the - ## name of the device via templates. - ## The 'name_templates' parameter is a list of templates to try and apply to - ## the device. The template may contain variables in the form of '$PROPERTY' or - ## '${PROPERTY}'. The first template which does not contain any variables not - ## present for the device is used as the device name tag. - ## The typical use case is for LVM volumes, to get the VG/LV name instead of - ## the near-meaningless DM-0 name. - # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] -`, d.PluginName()) -} diff --git a/telegraf/plugins/inputs/docker.go b/telegraf/plugins/inputs/docker.go deleted file mode 100644 index ecbb5d586a6..00000000000 --- a/telegraf/plugins/inputs/docker.go +++ /dev/null @@ -1,102 +0,0 @@ -package inputs - -import ( - "errors" - "fmt" -) - -// Docker is based on telegraf Docker plugin. -type Docker struct { - baseInput - Endpoint string `json:"endpoint"` -} - -// PluginName is based on telegraf plugin name. -func (d *Docker) PluginName() string { - return "docker" -} - -// UnmarshalTOML decodes the parsed data to the object -func (d *Docker) UnmarshalTOML(data interface{}) error { - dataOK, ok := data.(map[string]interface{}) - if !ok { - return errors.New("bad endpoint for docker input plugin") - } - if d.Endpoint, ok = dataOK["endpoint"].(string); !ok { - return errors.New("endpoint is not a string value") - } - return nil -} - -// TOML encodes to toml string -func (d *Docker) TOML() string { - return fmt.Sprintf(`[[inputs.%s]] - ## Docker Endpoint - ## To use TCP, set endpoint = "tcp://[ip]:[port]" - ## To use environment variables (ie, docker-machine), set endpoint = "ENV" - endpoint = "%s" - # - ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) - gather_services = false - # - ## Only collect metrics for these containers, collect all if empty - container_names = [] - # - ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars - source_tag = false - # - ## Containers to include and exclude. Globs accepted. - ## Note that an empty array for both will include all containers - container_name_include = [] - container_name_exclude = [] - # - ## Container states to include and exclude. Globs accepted. - ## When empty only containers in the "running" state will be captured. - ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] - ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] - # container_state_include = [] - # container_state_exclude = [] - # - ## Timeout for docker list, info, and stats commands - timeout = "5s" - # - ## Whether to report for each container per-device blkio (8:0, 8:1...), - ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. - ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. - ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting - ## is honored. - perdevice = true - # - ## Specifies for which classes a per-device metric should be issued - ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) - ## Please note that this setting has no effect if 'perdevice' is set to 'true' - # perdevice_include = ["cpu"] - # - ## Whether to report for each container total blkio and network stats or not. - ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. - ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting - ## is honored. - total = false - # - ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. - ## Possible values are 'cpu', 'blkio' and 'network' - ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. - ## Please note that this setting has no effect if 'total' is set to 'false' - # total_include = ["cpu", "blkio", "network"] - # - ## Which environment variables should we use as a tag - ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] - # - ## docker labels to include and exclude as tags. Globs accepted. - ## Note that an empty array for both will include all labels as tags - docker_label_include = [] - docker_label_exclude = [] - # - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -`, d.PluginName(), d.Endpoint) -} diff --git a/telegraf/plugins/inputs/file.go b/telegraf/plugins/inputs/file.go deleted file mode 100644 index 00090d0a996..00000000000 --- a/telegraf/plugins/inputs/file.go +++ /dev/null @@ -1,67 +0,0 @@ -package inputs - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -// File is based on telegraf input File plugin. -type File struct { - baseInput - Files []string `json:"files"` -} - -// PluginName is based on telegraf plugin name. -func (f *File) PluginName() string { - return "file" -} - -// UnmarshalTOML decodes the parsed data to the object -func (f *File) UnmarshalTOML(data interface{}) error { - dataOK, ok := data.(map[string]interface{}) - if !ok { - return errors.New("bad files for file input plugin") - } - files, ok := dataOK["files"].([]interface{}) - if !ok { - return errors.New("not an array for file input plugin") - } - for _, fl := range files { - f.Files = append(f.Files, fl.(string)) - } - return nil -} - -// TOML encodes to toml string -func (f *File) TOML() string { - s := make([]string, len(f.Files)) - for k, v := range f.Files { - s[k] = strconv.Quote(v) - } - return fmt.Sprintf(`[[inputs.%s]] - ## Files to parse each interval. Accept standard unix glob matching rules, - ## as well as ** to match recursive files and directories. - files = [%s] - - ## Name a tag containing the name of the file the data was parsed from. Leave empty - ## to disable. - # file_tag = "" - - ## Character encoding to use when interpreting the file contents. Invalid - ## characters are replaced using the unicode replacement character. When set - ## to the empty string the data is not decoded to text. - ## ex: character_encoding = "utf-8" - ## character_encoding = "utf-16le" - ## character_encoding = "utf-16be" - ## character_encoding = "" - # character_encoding = "" - - ## The dataformat to be read from files - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" -`, f.PluginName(), strings.Join(s, ", ")) -} diff --git a/telegraf/plugins/inputs/inputs_test.go b/telegraf/plugins/inputs/inputs_test.go deleted file mode 100644 index 452ed15fe6d..00000000000 --- a/telegraf/plugins/inputs/inputs_test.go +++ /dev/null @@ -1,1409 +0,0 @@ -package inputs - -import ( - "errors" - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2/telegraf/plugins" -) - -// local plugin -type telegrafPluginConfig interface { - TOML() string - Type() plugins.Type - PluginName() string - UnmarshalTOML(data interface{}) error -} - -func TestType(t *testing.T) { - b := baseInput(0) - if b.Type() != plugins.Input { - t.Fatalf("input plugins type should be input, got %s", b.Type()) - } -} - -func TestEncodeTOML(t *testing.T) { - cases := []struct { - name string - plugins map[telegrafPluginConfig]string - }{ - { - name: "test empty plugins", - plugins: map[telegrafPluginConfig]string{ - &CPUStats{}: `[[inputs.cpu]] - ## Whether to report per-cpu stats or not - percpu = true - ## Whether to report total system cpu stats or not - totalcpu = true - ## If true, collect raw CPU time metrics - collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states - report_active = false -`, - &DiskStats{}: `[[inputs.disk]] - ## By default stats will be gathered for all mount points. - ## Set mount_points will restrict the stats to only the specified mount points. - # mount_points = ["/"] - ## Ignore mount points by filesystem type. - ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] -`, - &DiskIO{}: `[[inputs.diskio]] - ## By default, telegraf will gather stats for all devices including - ## disk partitions. - ## Setting devices will restrict the stats to the specified devices. - # devices = ["sda", "sdb", "vd*"] - ## Uncomment the following line if you need disk serial numbers. - # skip_serial_number = false - # - ## On systems which support it, device metadata can be added in the form of - ## tags. - ## Currently only Linux is supported via udev properties. You can view - ## available properties for a device by running: - ## 'udevadm info -q property -n /dev/sda' - ## Note: Most, but not all, udev properties can be accessed this way. Properties - ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. - # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] - # - ## Using the same metadata source as device_tags, you can also customize the - ## name of the device via templates. - ## The 'name_templates' parameter is a list of templates to try and apply to - ## the device. The template may contain variables in the form of '$PROPERTY' or - ## '${PROPERTY}'. The first template which does not contain any variables not - ## present for the device is used as the device name tag. - ## The typical use case is for LVM volumes, to get the VG/LV name instead of - ## the near-meaningless DM-0 name. - # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] -`, - &Docker{}: `[[inputs.docker]] - ## Docker Endpoint - ## To use TCP, set endpoint = "tcp://[ip]:[port]" - ## To use environment variables (ie, docker-machine), set endpoint = "ENV" - endpoint = "" - # - ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) - gather_services = false - # - ## Only collect metrics for these containers, collect all if empty - container_names = [] - # - ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars - source_tag = false - # - ## Containers to include and exclude. Globs accepted. - ## Note that an empty array for both will include all containers - container_name_include = [] - container_name_exclude = [] - # - ## Container states to include and exclude. Globs accepted. - ## When empty only containers in the "running" state will be captured. - ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] - ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] - # container_state_include = [] - # container_state_exclude = [] - # - ## Timeout for docker list, info, and stats commands - timeout = "5s" - # - ## Whether to report for each container per-device blkio (8:0, 8:1...), - ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. - ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. - ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting - ## is honored. - perdevice = true - # - ## Specifies for which classes a per-device metric should be issued - ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) - ## Please note that this setting has no effect if 'perdevice' is set to 'true' - # perdevice_include = ["cpu"] - # - ## Whether to report for each container total blkio and network stats or not. - ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. - ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting - ## is honored. - total = false - # - ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. - ## Possible values are 'cpu', 'blkio' and 'network' - ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. - ## Please note that this setting has no effect if 'total' is set to 'false' - # total_include = ["cpu", "blkio", "network"] - # - ## Which environment variables should we use as a tag - ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] - # - ## docker labels to include and exclude as tags. Globs accepted. - ## Note that an empty array for both will include all labels as tags - docker_label_include = [] - docker_label_exclude = [] - # - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -`, - &File{}: `[[inputs.file]] - ## Files to parse each interval. Accept standard unix glob matching rules, - ## as well as ** to match recursive files and directories. - files = [] - - ## Name a tag containing the name of the file the data was parsed from. Leave empty - ## to disable. - # file_tag = "" - - ## Character encoding to use when interpreting the file contents. Invalid - ## characters are replaced using the unicode replacement character. When set - ## to the empty string the data is not decoded to text. - ## ex: character_encoding = "utf-8" - ## character_encoding = "utf-16le" - ## character_encoding = "utf-16be" - ## character_encoding = "" - # character_encoding = "" - - ## The dataformat to be read from files - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" -`, - &Kernel{}: `[[inputs.kernel]] - # no configuration -`, - &Kubernetes{}: `[[inputs.kubernetes]] - ## URL for the kubelet - url = "" - - ## Use bearer token for authorization. ('bearer_token' takes priority) - ## If both of these are empty, we'll use the default serviceaccount: - ## at: /run/secrets/kubernetes.io/serviceaccount/token - # bearer_token = "/path/to/bearer/token" - ## OR - # bearer_token_string = "abc_123" - - ## Pod labels to be added as tags. An empty array for both include and - ## exclude will include all labels. - # label_include = [] - # label_exclude = ["*"] - - ## Set response_timeout (default 5 seconds) - # response_timeout = "5s" - - ## Optional TLS Config - # tls_ca = /path/to/cafile - # tls_cert = /path/to/certfile - # tls_key = /path/to/keyfile - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -`, - &LogParserPlugin{}: `[[inputs.logparser]] - ## Log files to parse. - ## These accept standard unix glob matching rules, but with the addition of - ## ** as a "super asterisk". ie: - ## /var/log/**.log -> recursively find all .log files in /var/log - ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log - ## /var/log/apache.log -> only tail the apache log file - files = [] - - ## Read files that currently exist from the beginning. Files that are created - ## while telegraf is running (and that match the "files" globs) will always - ## be read from the beginning. - from_beginning = false - - ## Method used to watch for file updates. Can be either "inotify" or "poll". - # watch_method = "inotify" - - ## Parse logstash-style "grok" patterns: - [inputs.logparser.grok] - ## This is a list of patterns to check the given log file(s) for. - ## Note that adding patterns here increases processing time. The most - ## efficient configuration is to have one pattern per logparser. - ## Other common built-in patterns are: - ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) - ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) - patterns = ["%{COMBINED_LOG_FORMAT}"] - - ## Name of the outputted measurement name. - measurement = "apache_access_log" - - ## Full path(s) to custom pattern files. - custom_pattern_files = [] - - ## Custom patterns can also be defined here. Put one pattern per line. - custom_patterns = ''' - ''' - - ## Timezone allows you to provide an override for timestamps that - ## don't already include an offset - ## e.g. 04/06/2016 12:41:45 data one two 5.43µs - ## - ## Default: "" which renders UTC - ## Options are as follows: - ## 1. Local -- interpret based on machine localtime - ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones - ## 3. UTC -- or blank/unspecified, will return timestamp in UTC - # timezone = "Canada/Eastern" - - ## When set to "disable", timestamp will not incremented if there is a - ## duplicate. - # unique_timestamp = "auto" -`, - &MemStats{}: `[[inputs.mem]] - # no configuration -`, - &NetIOStats{}: `[[inputs.net]] - ## By default, telegraf gathers stats from any up interface (excluding loopback) - ## Setting interfaces will tell it to gather these explicit interfaces, - ## regardless of status. - ## - # interfaces = ["eth0"] - ## - ## On linux systems telegraf also collects protocol stats. - ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. - ## - # ignore_protocol_stats = false - ## -`, - &NetResponse{}: `[[inputs.net_response]] - ## Protocol, must be "tcp" or "udp" - ## NOTE: because the "udp" protocol does not respond to requests, it requires - ## a send/expect string pair (see below). - protocol = "tcp" - ## Server address (default localhost) - address = "localhost:80" - - ## Set timeout - # timeout = "1s" - - ## Set read timeout (only used if expecting a response) - # read_timeout = "1s" - - ## The following options are required for UDP checks. For TCP, they are - ## optional. The plugin will send the given string to the server and then - ## expect to receive the given 'expect' string back. - ## string sent to the server - # send = "ssh" - ## expected string in answer - # expect = "ssh" - - ## Uncomment to remove deprecated fields - # fielddrop = ["result_type", "string_found"] -`, - &Nginx{}: `[[inputs.nginx]] - # An array of Nginx stub_status URI to gather stats. - urls = [] - - ## Optional TLS Config - tls_ca = "/etc/telegraf/ca.pem" - tls_cert = "/etc/telegraf/cert.cer" - tls_key = "/etc/telegraf/key.key" - ## Use TLS but skip chain & host verification - insecure_skip_verify = false - - # HTTP response timeout (default: 5s) - response_timeout = "5s" -`, - &Processes{}: `[[inputs.processes]] - # no configuration -`, - &Procstat{}: `[[inputs.procstat]] - ## PID file to monitor process - pid_file = "/var/run/nginx.pid" - ## executable name (ie, pgrep ) - # exe = "" - ## pattern as argument for pgrep (ie, pgrep -f ) - # pattern = "nginx" - ## user as argument for pgrep (ie, pgrep -u ) - # user = "nginx" - ## Systemd unit name - # systemd_unit = "nginx.service" - ## CGroup name or path - # cgroup = "systemd/system.slice/nginx.service" - - ## Windows service name - # win_service = "" - - ## override for process_name - ## This is optional; default is sourced from /proc//status - # process_name = "bar" - - ## Field name prefix - # prefix = "" - - ## When true add the full cmdline as a tag. - # cmdline_tag = false - - ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. - # mode = "irix" - - ## Add the PID as a tag instead of as a field. When collecting multiple - ## processes with otherwise matching tags this setting should be enabled to - ## ensure each process has a unique identity. - ## - ## Enabling this option may result in a large number of series, especially - ## when processes have a short lifetime. - # pid_tag = false - - ## Method to use when finding process IDs. Can be one of 'pgrep', or - ## 'native'. The pgrep finder calls the pgrep executable in the PATH while - ## the native finder performs the search directly in a manor dependent on the - ## platform. Default is 'pgrep' - # pid_finder = "pgrep" -`, - &Prometheus{}: `[[inputs.prometheus]] - ## An array of urls to scrape metrics from. - urls = [] - - ## Metric version controls the mapping from Prometheus metrics into - ## Telegraf metrics. When using the prometheus_client output, use the same - ## value in both plugins to ensure metrics are round-tripped without - ## modification. - ## - ## example: metric_version = 1; - ## metric_version = 2; recommended version - # metric_version = 1 - - ## Url tag name (tag containing scrapped url. optional, default is "url") - # url_tag = "url" - - ## An array of Kubernetes services to scrape metrics from. - # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] - - ## Kubernetes config file to create client from. - # kube_config = "/path/to/kubernetes.config" - - ## Scrape Kubernetes pods for the following prometheus annotations: - ## - prometheus.io/scrape: Enable scraping for this pod - ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to - ## set this to 'https' & most likely set the tls config. - ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. - ## - prometheus.io/port: If port is not 9102 use this annotation - # monitor_kubernetes_pods = true - ## Get the list of pods to scrape with either the scope of - ## - cluster: the kubernetes watch api (default, no need to specify) - ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. - # pod_scrape_scope = "cluster" - ## Only for node scrape scope: node IP of the node that telegraf is running on. - ## Either this config or the environment variable NODE_IP must be set. - # node_ip = "10.180.1.1" - # ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. - # ## Default is 60 seconds. - # # pod_scrape_interval = 60 - ## Restricts Kubernetes monitoring to a single namespace - ## ex: monitor_kubernetes_pods_namespace = "default" - # monitor_kubernetes_pods_namespace = "" - # label selector to target pods which have the label - # kubernetes_label_selector = "env=dev,app=nginx" - # field selector to target pods - # eg. To scrape pods on a specific node - # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" - - ## Use bearer token for authorization. ('bearer_token' takes priority) - # bearer_token = "/path/to/bearer/token" - ## OR - # bearer_token_string = "abc_123" - - ## HTTP Basic Authentication username and password. ('bearer_token' and - ## 'bearer_token_string' take priority) - # username = "" - # password = "" - - ## Specify timeout duration for slower prometheus clients (default is 3s) - # response_timeout = "3s" - - ## Optional TLS Config - # tls_ca = /path/to/cafile - # tls_cert = /path/to/certfile - # tls_key = /path/to/keyfile - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -`, - &Redis{}: `[[inputs.redis]] - ## specify servers via a url matching: - ## [protocol://][:password]@address[:port] - ## e.g. - ## tcp://localhost:6379 - ## tcp://:password@192.168.99.100 - ## unix:///var/run/redis.sock - ## - ## If no servers are specified, then localhost is used as the host. - ## If no port is specified, 6379 is used - servers = [] - - ## Optional. Specify redis commands to retrieve values - # [[inputs.redis.commands]] - # # The command to run where each argument is a separate element - # command = ["get", "sample-key"] - # # The field to store the result in - # field = "sample-key-value" - # # The type of the result - # # Can be "string", "integer", or "float" - # type = "string" - - ## specify server password - # password = "" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = true -`, - &SwapStats{}: `[[inputs.swap]] - # no configuration -`, - &Syslog{}: `[[inputs.syslog]] - ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 - ## Protocol, address and port to host the syslog receiver. - ## If no host is specified, then localhost is used. - ## If no port is specified, 6514 is used (RFC5425#section-4.1). - server = "" - - ## TLS Config - # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - - ## Period between keep alive probes. - ## 0 disables keep alive probes. - ## Defaults to the OS configuration. - ## Only applies to stream sockets (e.g. TCP). - # keep_alive_period = "5m" - - ## Maximum number of concurrent connections (default = 0). - ## 0 means unlimited. - ## Only applies to stream sockets (e.g. TCP). - # max_connections = 1024 - - ## Read timeout is the maximum time allowed for reading a single message (default = 5s). - ## 0 means unlimited. - # read_timeout = "5s" - - ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). - ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), - ## or the non-transparent framing technique (RFC6587#section-3.4.2). - ## Must be one of "octet-counting", "non-transparent". - # framing = "octet-counting" - - ## The trailer to be expected in case of non-transparent framing (default = "LF"). - ## Must be one of "LF", or "NUL". - # trailer = "LF" - - ## Whether to parse in best effort mode or not (default = false). - ## By default best effort parsing is off. - # best_effort = false - - ## Character to prepend to SD-PARAMs (default = "_"). - ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. - ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] - ## For each combination a field is created. - ## Its name is created concatenating identifier, sdparam_separator, and parameter name. - # sdparam_separator = "_" -`, - &SystemStats{}: `[[inputs.system]] - ## Uncomment to remove deprecated metrics. - # fielddrop = ["uptime_format"] -`, - &Tail{}: `[[inputs.tail]] - ## File names or a pattern to tail. - ## These accept standard unix glob matching rules, but with the addition of - ## ** as a "super asterisk". ie: - ## "/var/log/**.log" -> recursively find all .log files in /var/log - ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log - ## "/var/log/apache.log" -> just tail the apache log file - ## "/var/log/log[!1-2]* -> tail files without 1-2 - ## "/var/log/log[^1-2]* -> identical behavior as above - ## See https://github.com/gobwas/glob for more examples - ## - files = [] - - ## Read file from beginning. - # from_beginning = false - - ## Whether file is a named pipe - # pipe = false - - ## Method used to watch for file updates. Can be either "inotify" or "poll". - # watch_method = "inotify" - - ## Maximum lines of the file to process that have not yet be written by the - ## output. For best throughput set based on the number of metrics on each - ## line and the size of the output's metric_batch_size. - # max_undelivered_lines = 1000 - - ## Character encoding to use when interpreting the file contents. Invalid - ## characters are replaced using the unicode replacement character. When set - ## to the empty string the data is not decoded to text. - ## ex: character_encoding = "utf-8" - ## character_encoding = "utf-16le" - ## character_encoding = "utf-16be" - ## character_encoding = "" - # character_encoding = "" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" - - ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. - # path_tag = "path" - - ## multiline parser/codec - ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html - #[inputs.tail.multiline] - ## The pattern should be a regexp which matches what you believe to be an - ## indicator that the field is part of an event consisting of multiple lines of log data. - #pattern = "^\s" - - ## This field must be either "previous" or "next". - ## If a line matches the pattern, "previous" indicates that it belongs to the previous line, - ## whereas "next" indicates that the line belongs to the next one. - #match_which_line = "previous" - - ## The invert_match field can be true or false (defaults to false). - ## If true, a message not matching the pattern will constitute a match of the multiline - ## filter and the what will be applied. (vice-versa is also true) - #invert_match = false - - ## After the specified timeout, this plugin sends a multiline event even if no new pattern - ## is found to start a new event. The default timeout is 5s. - #timeout = 5s -`, - }, - }, - { - name: "standard testing", - plugins: map[telegrafPluginConfig]string{ - &Docker{ - Endpoint: "unix:///var/run/docker.sock", - }: `[[inputs.docker]] - ## Docker Endpoint - ## To use TCP, set endpoint = "tcp://[ip]:[port]" - ## To use environment variables (ie, docker-machine), set endpoint = "ENV" - endpoint = "unix:///var/run/docker.sock" - # - ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) - gather_services = false - # - ## Only collect metrics for these containers, collect all if empty - container_names = [] - # - ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars - source_tag = false - # - ## Containers to include and exclude. Globs accepted. - ## Note that an empty array for both will include all containers - container_name_include = [] - container_name_exclude = [] - # - ## Container states to include and exclude. Globs accepted. - ## When empty only containers in the "running" state will be captured. - ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] - ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] - # container_state_include = [] - # container_state_exclude = [] - # - ## Timeout for docker list, info, and stats commands - timeout = "5s" - # - ## Whether to report for each container per-device blkio (8:0, 8:1...), - ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. - ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. - ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting - ## is honored. - perdevice = true - # - ## Specifies for which classes a per-device metric should be issued - ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) - ## Please note that this setting has no effect if 'perdevice' is set to 'true' - # perdevice_include = ["cpu"] - # - ## Whether to report for each container total blkio and network stats or not. - ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. - ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting - ## is honored. - total = false - # - ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. - ## Possible values are 'cpu', 'blkio' and 'network' - ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. - ## Please note that this setting has no effect if 'total' is set to 'false' - # total_include = ["cpu", "blkio", "network"] - # - ## Which environment variables should we use as a tag - ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] - # - ## docker labels to include and exclude as tags. Globs accepted. - ## Note that an empty array for both will include all labels as tags - docker_label_include = [] - docker_label_exclude = [] - # - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -`, - &File{ - Files: []string{ - "/var/log/**.log", - "/var/log/apache.log", - }, - }: `[[inputs.file]] - ## Files to parse each interval. Accept standard unix glob matching rules, - ## as well as ** to match recursive files and directories. - files = ["/var/log/**.log", "/var/log/apache.log"] - - ## Name a tag containing the name of the file the data was parsed from. Leave empty - ## to disable. - # file_tag = "" - - ## Character encoding to use when interpreting the file contents. Invalid - ## characters are replaced using the unicode replacement character. When set - ## to the empty string the data is not decoded to text. - ## ex: character_encoding = "utf-8" - ## character_encoding = "utf-16le" - ## character_encoding = "utf-16be" - ## character_encoding = "" - # character_encoding = "" - - ## The dataformat to be read from files - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" -`, - &Kubernetes{URL: "http://1.1.1.1:10255"}: `[[inputs.kubernetes]] - ## URL for the kubelet - url = "http://1.1.1.1:10255" - - ## Use bearer token for authorization. ('bearer_token' takes priority) - ## If both of these are empty, we'll use the default serviceaccount: - ## at: /run/secrets/kubernetes.io/serviceaccount/token - # bearer_token = "/path/to/bearer/token" - ## OR - # bearer_token_string = "abc_123" - - ## Pod labels to be added as tags. An empty array for both include and - ## exclude will include all labels. - # label_include = [] - # label_exclude = ["*"] - - ## Set response_timeout (default 5 seconds) - # response_timeout = "5s" - - ## Optional TLS Config - # tls_ca = /path/to/cafile - # tls_cert = /path/to/certfile - # tls_key = /path/to/keyfile - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -`, - &LogParserPlugin{ - Files: []string{ - "/var/log/**.log", - "/var/log/apache.log", - }, - }: `[[inputs.logparser]] - ## Log files to parse. - ## These accept standard unix glob matching rules, but with the addition of - ## ** as a "super asterisk". ie: - ## /var/log/**.log -> recursively find all .log files in /var/log - ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log - ## /var/log/apache.log -> only tail the apache log file - files = ["/var/log/**.log", "/var/log/apache.log"] - - ## Read files that currently exist from the beginning. Files that are created - ## while telegraf is running (and that match the "files" globs) will always - ## be read from the beginning. - from_beginning = false - - ## Method used to watch for file updates. Can be either "inotify" or "poll". - # watch_method = "inotify" - - ## Parse logstash-style "grok" patterns: - [inputs.logparser.grok] - ## This is a list of patterns to check the given log file(s) for. - ## Note that adding patterns here increases processing time. The most - ## efficient configuration is to have one pattern per logparser. - ## Other common built-in patterns are: - ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) - ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) - patterns = ["%{COMBINED_LOG_FORMAT}"] - - ## Name of the outputted measurement name. - measurement = "apache_access_log" - - ## Full path(s) to custom pattern files. - custom_pattern_files = [] - - ## Custom patterns can also be defined here. Put one pattern per line. - custom_patterns = ''' - ''' - - ## Timezone allows you to provide an override for timestamps that - ## don't already include an offset - ## e.g. 04/06/2016 12:41:45 data one two 5.43µs - ## - ## Default: "" which renders UTC - ## Options are as follows: - ## 1. Local -- interpret based on machine localtime - ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones - ## 3. UTC -- or blank/unspecified, will return timestamp in UTC - # timezone = "Canada/Eastern" - - ## When set to "disable", timestamp will not incremented if there is a - ## duplicate. - # unique_timestamp = "auto" -`, - &Nginx{ - URLs: []string{ - "http://localhost/server_status", - "http://192.168.1.1/server_status", - }, - }: `[[inputs.nginx]] - # An array of Nginx stub_status URI to gather stats. - urls = ["http://localhost/server_status", "http://192.168.1.1/server_status"] - - ## Optional TLS Config - tls_ca = "/etc/telegraf/ca.pem" - tls_cert = "/etc/telegraf/cert.cer" - tls_key = "/etc/telegraf/key.key" - ## Use TLS but skip chain & host verification - insecure_skip_verify = false - - # HTTP response timeout (default: 5s) - response_timeout = "5s" -`, - &Procstat{ - Exe: "finder", - }: `[[inputs.procstat]] - ## PID file to monitor process - pid_file = "/var/run/nginx.pid" - ## executable name (ie, pgrep ) - # exe = "finder" - ## pattern as argument for pgrep (ie, pgrep -f ) - # pattern = "nginx" - ## user as argument for pgrep (ie, pgrep -u ) - # user = "nginx" - ## Systemd unit name - # systemd_unit = "nginx.service" - ## CGroup name or path - # cgroup = "systemd/system.slice/nginx.service" - - ## Windows service name - # win_service = "" - - ## override for process_name - ## This is optional; default is sourced from /proc//status - # process_name = "bar" - - ## Field name prefix - # prefix = "" - - ## When true add the full cmdline as a tag. - # cmdline_tag = false - - ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. - # mode = "irix" - - ## Add the PID as a tag instead of as a field. When collecting multiple - ## processes with otherwise matching tags this setting should be enabled to - ## ensure each process has a unique identity. - ## - ## Enabling this option may result in a large number of series, especially - ## when processes have a short lifetime. - # pid_tag = false - - ## Method to use when finding process IDs. Can be one of 'pgrep', or - ## 'native'. The pgrep finder calls the pgrep executable in the PATH while - ## the native finder performs the search directly in a manor dependent on the - ## platform. Default is 'pgrep' - # pid_finder = "pgrep" -`, - &Prometheus{ - URLs: []string{ - "http://192.168.2.1:9090", - "http://192.168.2.2:9090", - }, - }: `[[inputs.prometheus]] - ## An array of urls to scrape metrics from. - urls = ["http://192.168.2.1:9090", "http://192.168.2.2:9090"] - - ## Metric version controls the mapping from Prometheus metrics into - ## Telegraf metrics. When using the prometheus_client output, use the same - ## value in both plugins to ensure metrics are round-tripped without - ## modification. - ## - ## example: metric_version = 1; - ## metric_version = 2; recommended version - # metric_version = 1 - - ## Url tag name (tag containing scrapped url. optional, default is "url") - # url_tag = "url" - - ## An array of Kubernetes services to scrape metrics from. - # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] - - ## Kubernetes config file to create client from. - # kube_config = "/path/to/kubernetes.config" - - ## Scrape Kubernetes pods for the following prometheus annotations: - ## - prometheus.io/scrape: Enable scraping for this pod - ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to - ## set this to 'https' & most likely set the tls config. - ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. - ## - prometheus.io/port: If port is not 9102 use this annotation - # monitor_kubernetes_pods = true - ## Get the list of pods to scrape with either the scope of - ## - cluster: the kubernetes watch api (default, no need to specify) - ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. - # pod_scrape_scope = "cluster" - ## Only for node scrape scope: node IP of the node that telegraf is running on. - ## Either this config or the environment variable NODE_IP must be set. - # node_ip = "10.180.1.1" - # ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. - # ## Default is 60 seconds. - # # pod_scrape_interval = 60 - ## Restricts Kubernetes monitoring to a single namespace - ## ex: monitor_kubernetes_pods_namespace = "default" - # monitor_kubernetes_pods_namespace = "" - # label selector to target pods which have the label - # kubernetes_label_selector = "env=dev,app=nginx" - # field selector to target pods - # eg. To scrape pods on a specific node - # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" - - ## Use bearer token for authorization. ('bearer_token' takes priority) - # bearer_token = "/path/to/bearer/token" - ## OR - # bearer_token_string = "abc_123" - - ## HTTP Basic Authentication username and password. ('bearer_token' and - ## 'bearer_token_string' take priority) - # username = "" - # password = "" - - ## Specify timeout duration for slower prometheus clients (default is 3s) - # response_timeout = "3s" - - ## Optional TLS Config - # tls_ca = /path/to/cafile - # tls_cert = /path/to/certfile - # tls_key = /path/to/keyfile - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -`, - &Redis{ - Servers: []string{ - "tcp://localhost:6379", - "unix:///var/run/redis.sock", - }, - Password: "somepassword123", - }: `[[inputs.redis]] - ## specify servers via a url matching: - ## [protocol://][:password]@address[:port] - ## e.g. - ## tcp://localhost:6379 - ## tcp://:password@192.168.99.100 - ## unix:///var/run/redis.sock - ## - ## If no servers are specified, then localhost is used as the host. - ## If no port is specified, 6379 is used - servers = ["tcp://localhost:6379", "unix:///var/run/redis.sock"] - - ## Optional. Specify redis commands to retrieve values - # [[inputs.redis.commands]] - # # The command to run where each argument is a separate element - # command = ["get", "sample-key"] - # # The field to store the result in - # field = "sample-key-value" - # # The type of the result - # # Can be "string", "integer", or "float" - # type = "string" - - ## specify server password - # password = "somepassword123" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = true -`, - &Syslog{ - Address: "tcp://10.0.0.1:6514", - }: `[[inputs.syslog]] - ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 - ## Protocol, address and port to host the syslog receiver. - ## If no host is specified, then localhost is used. - ## If no port is specified, 6514 is used (RFC5425#section-4.1). - server = "tcp://10.0.0.1:6514" - - ## TLS Config - # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - - ## Period between keep alive probes. - ## 0 disables keep alive probes. - ## Defaults to the OS configuration. - ## Only applies to stream sockets (e.g. TCP). - # keep_alive_period = "5m" - - ## Maximum number of concurrent connections (default = 0). - ## 0 means unlimited. - ## Only applies to stream sockets (e.g. TCP). - # max_connections = 1024 - - ## Read timeout is the maximum time allowed for reading a single message (default = 5s). - ## 0 means unlimited. - # read_timeout = "5s" - - ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). - ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), - ## or the non-transparent framing technique (RFC6587#section-3.4.2). - ## Must be one of "octet-counting", "non-transparent". - # framing = "octet-counting" - - ## The trailer to be expected in case of non-transparent framing (default = "LF"). - ## Must be one of "LF", or "NUL". - # trailer = "LF" - - ## Whether to parse in best effort mode or not (default = false). - ## By default best effort parsing is off. - # best_effort = false - - ## Character to prepend to SD-PARAMs (default = "_"). - ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. - ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] - ## For each combination a field is created. - ## Its name is created concatenating identifier, sdparam_separator, and parameter name. - # sdparam_separator = "_" -`, - &Tail{ - Files: []string{"/var/log/**.log", "/var/log/apache.log"}, - }: `[[inputs.tail]] - ## File names or a pattern to tail. - ## These accept standard unix glob matching rules, but with the addition of - ## ** as a "super asterisk". ie: - ## "/var/log/**.log" -> recursively find all .log files in /var/log - ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log - ## "/var/log/apache.log" -> just tail the apache log file - ## "/var/log/log[!1-2]* -> tail files without 1-2 - ## "/var/log/log[^1-2]* -> identical behavior as above - ## See https://github.com/gobwas/glob for more examples - ## - files = ["/var/log/**.log", "/var/log/apache.log"] - - ## Read file from beginning. - # from_beginning = false - - ## Whether file is a named pipe - # pipe = false - - ## Method used to watch for file updates. Can be either "inotify" or "poll". - # watch_method = "inotify" - - ## Maximum lines of the file to process that have not yet be written by the - ## output. For best throughput set based on the number of metrics on each - ## line and the size of the output's metric_batch_size. - # max_undelivered_lines = 1000 - - ## Character encoding to use when interpreting the file contents. Invalid - ## characters are replaced using the unicode replacement character. When set - ## to the empty string the data is not decoded to text. - ## ex: character_encoding = "utf-8" - ## character_encoding = "utf-16le" - ## character_encoding = "utf-16be" - ## character_encoding = "" - # character_encoding = "" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" - - ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. - # path_tag = "path" - - ## multiline parser/codec - ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html - #[inputs.tail.multiline] - ## The pattern should be a regexp which matches what you believe to be an - ## indicator that the field is part of an event consisting of multiple lines of log data. - #pattern = "^\s" - - ## This field must be either "previous" or "next". - ## If a line matches the pattern, "previous" indicates that it belongs to the previous line, - ## whereas "next" indicates that the line belongs to the next one. - #match_which_line = "previous" - - ## The invert_match field can be true or false (defaults to false). - ## If true, a message not matching the pattern will constitute a match of the multiline - ## filter and the what will be applied. (vice-versa is also true) - #invert_match = false - - ## After the specified timeout, this plugin sends a multiline event even if no new pattern - ## is found to start a new event. The default timeout is 5s. - #timeout = 5s -`, - }, - }, - } - for _, c := range cases { - for input, toml := range c.plugins { - if toml != input.TOML() { - t.Fatalf("%s failed want %s, got %v", c.name, toml, input.TOML()) - } - } - } -} - -func TestDecodeTOML(t *testing.T) { - cases := []struct { - name string - want telegrafPluginConfig - wantErr error - input telegrafPluginConfig - data interface{} - }{ - { - name: "cpu", - want: &CPUStats{}, - input: &CPUStats{}, - }, - { - name: "disk", - want: &DiskStats{}, - input: &DiskStats{}, - }, - { - name: "diskio", - want: &DiskIO{}, - input: &DiskIO{}, - }, - { - name: "docker bad data", - want: &Docker{}, - wantErr: errors.New("bad endpoint for docker input plugin"), - input: &Docker{}, - data: map[string]int{}, - }, - { - name: "docker", - want: &Docker{ - Endpoint: "unix:///var/run/docker.sock", - }, - input: &Docker{}, - data: map[string]interface{}{ - "endpoint": "unix:///var/run/docker.sock", - }, - }, - { - name: "file empty", - want: &File{}, - wantErr: errors.New("bad files for file input plugin"), - input: &File{}, - }, - { - name: "file bad data not array", - want: &File{}, - wantErr: errors.New("not an array for file input plugin"), - input: &File{}, - data: map[string]interface{}{ - "files": "", - }, - }, - { - name: "file", - want: &File{ - Files: []string{ - "/var/log/**.log", - "/var/log/apache.log", - }, - }, - input: &File{}, - data: map[string]interface{}{ - "files": []interface{}{ - "/var/log/**.log", - "/var/log/apache.log", - }, - }, - }, - { - name: "kernel", - want: &Kernel{}, - input: &Kernel{}, - }, - { - name: "kubernetes empty", - want: &Kubernetes{}, - wantErr: errors.New("bad url for kubernetes input plugin"), - input: &Kubernetes{}, - }, - { - name: "kubernetes", - want: &Kubernetes{ - URL: "http://1.1.1.1:10255", - }, - input: &Kubernetes{}, - data: map[string]interface{}{ - "url": "http://1.1.1.1:10255", - }, - }, - { - name: "logparser empty", - want: &LogParserPlugin{}, - wantErr: errors.New("bad files for logparser input plugin"), - input: &LogParserPlugin{}, - }, - { - name: "logparser file not array", - want: &LogParserPlugin{}, - wantErr: errors.New("files is not an array for logparser input plugin"), - input: &LogParserPlugin{}, - data: map[string]interface{}{ - "files": "ok", - }, - }, - { - name: "logparser", - want: &LogParserPlugin{ - Files: []string{ - "/var/log/**.log", - "/var/log/apache.log", - }, - }, - input: &LogParserPlugin{}, - data: map[string]interface{}{ - "files": []interface{}{ - "/var/log/**.log", - "/var/log/apache.log", - }, - }, - }, - { - name: "mem", - want: &MemStats{}, - input: &MemStats{}, - }, - { - name: "net_response", - want: &NetResponse{}, - input: &NetResponse{}, - }, - { - name: "net", - want: &NetIOStats{}, - input: &NetIOStats{}, - }, - { - name: "nginx empty", - want: &Nginx{}, - wantErr: errors.New("bad urls for nginx input plugin"), - input: &Nginx{}, - }, - { - name: "nginx bad data not array", - want: &Nginx{}, - wantErr: errors.New("urls is not an array for nginx input plugin"), - input: &Nginx{}, - data: map[string]interface{}{ - "urls": "", - }, - }, - { - name: "nginx", - want: &Nginx{ - URLs: []string{ - "http://localhost/server_status", - "http://192.168.1.1/server_status", - }, - }, - input: &Nginx{}, - data: map[string]interface{}{ - "urls": []interface{}{ - "http://localhost/server_status", - "http://192.168.1.1/server_status", - }, - }, - }, - { - name: "processes", - want: &Processes{}, - input: &Processes{}, - }, - { - name: "procstat empty", - want: &Procstat{}, - wantErr: errors.New("bad exe for procstat input plugin"), - input: &Procstat{}, - }, - { - name: "procstat", - want: &Procstat{ - Exe: "finder", - }, - input: &Procstat{}, - data: map[string]interface{}{ - "exe": "finder", - }, - }, - { - name: "prometheus empty", - want: &Prometheus{}, - wantErr: errors.New("bad urls for prometheus input plugin"), - input: &Prometheus{}, - }, - { - name: "prometheus bad data not array", - want: &Prometheus{}, - wantErr: errors.New("urls is not an array for prometheus input plugin"), - input: &Prometheus{}, - data: map[string]interface{}{ - "urls": "", - }, - }, - { - name: "prometheus", - want: &Prometheus{ - URLs: []string{ - "http://192.168.2.1:9090", - "http://192.168.2.2:9090", - }, - }, - input: &Prometheus{}, - data: map[string]interface{}{ - "urls": []interface{}{ - "http://192.168.2.1:9090", - "http://192.168.2.2:9090", - }, - }, - }, - { - name: "redis empty", - want: &Redis{}, - wantErr: errors.New("bad servers for redis input plugin"), - input: &Redis{}, - }, - { - name: "redis bad data not array", - want: &Redis{}, - wantErr: errors.New("servers is not an array for redis input plugin"), - input: &Redis{}, - data: map[string]interface{}{ - "servers": "", - }, - }, - { - name: "redis without password", - want: &Redis{ - Servers: []string{ - "http://192.168.2.1:9090", - "http://192.168.2.2:9090", - }, - }, - input: &Redis{}, - data: map[string]interface{}{ - "servers": []interface{}{ - "http://192.168.2.1:9090", - "http://192.168.2.2:9090", - }, - }, - }, - { - name: "redis with password", - want: &Redis{ - Servers: []string{ - "http://192.168.2.1:9090", - "http://192.168.2.2:9090", - }, - Password: "pass1", - }, - input: &Redis{}, - data: map[string]interface{}{ - "servers": []interface{}{ - "http://192.168.2.1:9090", - "http://192.168.2.2:9090", - }, - "password": "pass1", - }, - }, - { - name: "swap", - want: &SwapStats{}, - input: &SwapStats{}, - }, - { - name: "syslog empty", - want: &Syslog{}, - wantErr: errors.New("bad server for syslog input plugin"), - input: &Syslog{}, - }, - { - name: "syslog", - want: &Syslog{ - Address: "http://1.1.1.1:10255", - }, - input: &Syslog{}, - data: map[string]interface{}{ - "server": "http://1.1.1.1:10255", - }, - }, - { - name: "system", - want: &SystemStats{}, - input: &SystemStats{}, - }, - { - name: "tail empty", - want: &Tail{}, - wantErr: errors.New("bad files for tail input plugin"), - input: &Tail{}, - }, - { - name: "tail bad data not array", - want: &Tail{}, - wantErr: errors.New("not an array for tail input plugin"), - input: &Tail{}, - data: map[string]interface{}{ - "files": "", - }, - }, - { - name: "tail", - want: &Tail{ - Files: []string{ - "http://192.168.2.1:9090", - "http://192.168.2.2:9090", - }, - }, - input: &Tail{}, - data: map[string]interface{}{ - "files": []interface{}{ - "http://192.168.2.1:9090", - "http://192.168.2.2:9090", - }, - }, - }, - } - for _, c := range cases { - err := c.input.UnmarshalTOML(c.data) - if c.wantErr != nil && (err == nil || err.Error() != c.wantErr.Error()) { - t.Fatalf("%s failed want err %s, got %v", c.name, c.wantErr.Error(), err) - } - if c.wantErr == nil && err != nil { - t.Fatalf("%s failed want err nil, got %v", c.name, err) - } - if !reflect.DeepEqual(c.input, c.want) { - t.Fatalf("%s failed want %v, got %v", c.name, c.want, c.input) - } - } -} diff --git a/telegraf/plugins/inputs/kernel.go b/telegraf/plugins/inputs/kernel.go deleted file mode 100644 index 663337f49b9..00000000000 --- a/telegraf/plugins/inputs/kernel.go +++ /dev/null @@ -1,27 +0,0 @@ -package inputs - -import ( - "fmt" -) - -// Kernel is based on telegraf Kernel. -type Kernel struct { - baseInput -} - -// PluginName is based on telegraf plugin name. -func (k *Kernel) PluginName() string { - return "kernel" -} - -// TOML encodes to toml string -func (k *Kernel) TOML() string { - return fmt.Sprintf(`[[inputs.%s]] - # no configuration -`, k.PluginName()) -} - -// UnmarshalTOML decodes the parsed data to the object -func (k *Kernel) UnmarshalTOML(data interface{}) error { - return nil -} diff --git a/telegraf/plugins/inputs/kubernetes.go b/telegraf/plugins/inputs/kubernetes.go deleted file mode 100644 index 542543ff8e4..00000000000 --- a/telegraf/plugins/inputs/kubernetes.go +++ /dev/null @@ -1,59 +0,0 @@ -package inputs - -import ( - "errors" - "fmt" -) - -// Kubernetes is based on telegraf Kubernetes plugin -type Kubernetes struct { - baseInput - URL string `json:"url"` -} - -// PluginName is based on telegraf plugin name. -func (k *Kubernetes) PluginName() string { - return "kubernetes" -} - -// TOML encodes to toml string. -func (k *Kubernetes) TOML() string { - return fmt.Sprintf(`[[inputs.%s]] - ## URL for the kubelet - url = "%s" - - ## Use bearer token for authorization. ('bearer_token' takes priority) - ## If both of these are empty, we'll use the default serviceaccount: - ## at: /run/secrets/kubernetes.io/serviceaccount/token - # bearer_token = "/path/to/bearer/token" - ## OR - # bearer_token_string = "abc_123" - - ## Pod labels to be added as tags. An empty array for both include and - ## exclude will include all labels. - # label_include = [] - # label_exclude = ["*"] - - ## Set response_timeout (default 5 seconds) - # response_timeout = "5s" - - ## Optional TLS Config - # tls_ca = /path/to/cafile - # tls_cert = /path/to/certfile - # tls_key = /path/to/keyfile - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -`, k.PluginName(), k.URL) -} - -// UnmarshalTOML decodes the parsed data to the object -func (k *Kubernetes) UnmarshalTOML(data interface{}) error { - dataOK, ok := data.(map[string]interface{}) - if !ok { - return errors.New("bad url for kubernetes input plugin") - } - if k.URL, ok = dataOK["url"].(string); !ok { - return errors.New("url is not a string value") - } - return nil -} diff --git a/telegraf/plugins/inputs/logparser.go b/telegraf/plugins/inputs/logparser.go deleted file mode 100644 index 367057650a3..00000000000 --- a/telegraf/plugins/inputs/logparser.go +++ /dev/null @@ -1,95 +0,0 @@ -package inputs - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -// LogParserPlugin is based on telegraf LogParserPlugin. -type LogParserPlugin struct { - baseInput - Files []string `json:"files"` -} - -// PluginName is based on telegraf plugin name. -func (l *LogParserPlugin) PluginName() string { - return "logparser" -} - -// TOML encodes to toml string -func (l *LogParserPlugin) TOML() string { - s := make([]string, len(l.Files)) - for k, v := range l.Files { - s[k] = strconv.Quote(v) - } - return fmt.Sprintf(`[[inputs.%s]] - ## Log files to parse. - ## These accept standard unix glob matching rules, but with the addition of - ## ** as a "super asterisk". ie: - ## /var/log/**.log -> recursively find all .log files in /var/log - ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log - ## /var/log/apache.log -> only tail the apache log file - files = [%s] - - ## Read files that currently exist from the beginning. Files that are created - ## while telegraf is running (and that match the "files" globs) will always - ## be read from the beginning. - from_beginning = false - - ## Method used to watch for file updates. Can be either "inotify" or "poll". - # watch_method = "inotify" - - ## Parse logstash-style "grok" patterns: - [inputs.logparser.grok] - ## This is a list of patterns to check the given log file(s) for. - ## Note that adding patterns here increases processing time. The most - ## efficient configuration is to have one pattern per logparser. - ## Other common built-in patterns are: - ## %%{COMMON_LOG_FORMAT} (plain apache & nginx access logs) - ## %%{COMBINED_LOG_FORMAT} (access logs + referrer & agent) - patterns = ["%%{COMBINED_LOG_FORMAT}"] - - ## Name of the outputted measurement name. - measurement = "apache_access_log" - - ## Full path(s) to custom pattern files. - custom_pattern_files = [] - - ## Custom patterns can also be defined here. Put one pattern per line. - custom_patterns = ''' - ''' - - ## Timezone allows you to provide an override for timestamps that - ## don't already include an offset - ## e.g. 04/06/2016 12:41:45 data one two 5.43µs - ## - ## Default: "" which renders UTC - ## Options are as follows: - ## 1. Local -- interpret based on machine localtime - ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones - ## 3. UTC -- or blank/unspecified, will return timestamp in UTC - # timezone = "Canada/Eastern" - - ## When set to "disable", timestamp will not incremented if there is a - ## duplicate. - # unique_timestamp = "auto" -`, l.PluginName(), strings.Join(s, ", ")) -} - -// UnmarshalTOML decodes the parsed data to the object -func (l *LogParserPlugin) UnmarshalTOML(data interface{}) error { - dataOK, ok := data.(map[string]interface{}) - if !ok { - return errors.New("bad files for logparser input plugin") - } - files, ok := dataOK["files"].([]interface{}) - if !ok { - return errors.New("files is not an array for logparser input plugin") - } - for _, fi := range files { - l.Files = append(l.Files, fi.(string)) - } - return nil -} diff --git a/telegraf/plugins/inputs/mem.go b/telegraf/plugins/inputs/mem.go deleted file mode 100644 index 2e1620df7ed..00000000000 --- a/telegraf/plugins/inputs/mem.go +++ /dev/null @@ -1,27 +0,0 @@ -package inputs - -import ( - "fmt" -) - -// MemStats is based on telegraf MemStats. -type MemStats struct { - baseInput -} - -// PluginName is based on telegraf plugin name. -func (m *MemStats) PluginName() string { - return "mem" -} - -// TOML encodes to toml string -func (m *MemStats) TOML() string { - return fmt.Sprintf(`[[inputs.%s]] - # no configuration -`, m.PluginName()) -} - -// UnmarshalTOML decodes the parsed data to the object -func (m *MemStats) UnmarshalTOML(data interface{}) error { - return nil -} diff --git a/telegraf/plugins/inputs/net.go b/telegraf/plugins/inputs/net.go deleted file mode 100644 index 65f9a8c6e6f..00000000000 --- a/telegraf/plugins/inputs/net.go +++ /dev/null @@ -1,37 +0,0 @@ -package inputs - -import ( - "fmt" -) - -// NetIOStats is based on telegraf NetIOStats. -type NetIOStats struct { - baseInput -} - -// PluginName is based on telegraf plugin name. -func (n *NetIOStats) PluginName() string { - return "net" -} - -// TOML encodes to toml string -func (n *NetIOStats) TOML() string { - return fmt.Sprintf(`[[inputs.%s]] - ## By default, telegraf gathers stats from any up interface (excluding loopback) - ## Setting interfaces will tell it to gather these explicit interfaces, - ## regardless of status. - ## - # interfaces = ["eth0"] - ## - ## On linux systems telegraf also collects protocol stats. - ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. - ## - # ignore_protocol_stats = false - ## -`, n.PluginName()) -} - -// UnmarshalTOML decodes the parsed data to the object -func (n *NetIOStats) UnmarshalTOML(data interface{}) error { - return nil -} diff --git a/telegraf/plugins/inputs/net_response.go b/telegraf/plugins/inputs/net_response.go deleted file mode 100644 index cdf42602b9c..00000000000 --- a/telegraf/plugins/inputs/net_response.go +++ /dev/null @@ -1,49 +0,0 @@ -package inputs - -import ( - "fmt" -) - -// NetResponse is based on telegraf NetResponse. -type NetResponse struct { - baseInput -} - -// PluginName is based on telegraf plugin name. -func (n *NetResponse) PluginName() string { - return "net_response" -} - -// TOML encodes to toml string -func (n *NetResponse) TOML() string { - return fmt.Sprintf(`[[inputs.%s]] - ## Protocol, must be "tcp" or "udp" - ## NOTE: because the "udp" protocol does not respond to requests, it requires - ## a send/expect string pair (see below). - protocol = "tcp" - ## Server address (default localhost) - address = "localhost:80" - - ## Set timeout - # timeout = "1s" - - ## Set read timeout (only used if expecting a response) - # read_timeout = "1s" - - ## The following options are required for UDP checks. For TCP, they are - ## optional. The plugin will send the given string to the server and then - ## expect to receive the given 'expect' string back. - ## string sent to the server - # send = "ssh" - ## expected string in answer - # expect = "ssh" - - ## Uncomment to remove deprecated fields - # fielddrop = ["result_type", "string_found"] -`, n.PluginName()) -} - -// UnmarshalTOML decodes the parsed data to the object -func (n *NetResponse) UnmarshalTOML(data interface{}) error { - return nil -} diff --git a/telegraf/plugins/inputs/nginx.go b/telegraf/plugins/inputs/nginx.go deleted file mode 100644 index ea8a2dc44c8..00000000000 --- a/telegraf/plugins/inputs/nginx.go +++ /dev/null @@ -1,57 +0,0 @@ -package inputs - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -// Nginx is based on telegraf nginx plugin. -type Nginx struct { - baseInput - URLs []string `json:"urls"` -} - -// PluginName is based on telegraf plugin name. -func (n *Nginx) PluginName() string { - return "nginx" -} - -// TOML encodes to toml string -func (n *Nginx) TOML() string { - s := make([]string, len(n.URLs)) - for k, v := range n.URLs { - s[k] = strconv.Quote(v) - } - return fmt.Sprintf(`[[inputs.%s]] - # An array of Nginx stub_status URI to gather stats. - urls = [%s] - - ## Optional TLS Config - tls_ca = "/etc/telegraf/ca.pem" - tls_cert = "/etc/telegraf/cert.cer" - tls_key = "/etc/telegraf/key.key" - ## Use TLS but skip chain & host verification - insecure_skip_verify = false - - # HTTP response timeout (default: 5s) - response_timeout = "5s" -`, n.PluginName(), strings.Join(s, ", ")) -} - -// UnmarshalTOML decodes the parsed data to the object -func (n *Nginx) UnmarshalTOML(data interface{}) error { - dataOK, ok := data.(map[string]interface{}) - if !ok { - return errors.New("bad urls for nginx input plugin") - } - urls, ok := dataOK["urls"].([]interface{}) - if !ok { - return errors.New("urls is not an array for nginx input plugin") - } - for _, url := range urls { - n.URLs = append(n.URLs, url.(string)) - } - return nil -} diff --git a/telegraf/plugins/inputs/processes.go b/telegraf/plugins/inputs/processes.go deleted file mode 100644 index f097fa7fab7..00000000000 --- a/telegraf/plugins/inputs/processes.go +++ /dev/null @@ -1,27 +0,0 @@ -package inputs - -import ( - "fmt" -) - -// Processes is based on telegraf Processes. -type Processes struct { - baseInput -} - -// PluginName is based on telegraf plugin name. -func (p *Processes) PluginName() string { - return "processes" -} - -// TOML encodes to toml string -func (p *Processes) TOML() string { - return fmt.Sprintf(`[[inputs.%s]] - # no configuration -`, p.PluginName()) -} - -// UnmarshalTOML decodes the parsed data to the object -func (p *Processes) UnmarshalTOML(data interface{}) error { - return nil -} diff --git a/telegraf/plugins/inputs/procstat.go b/telegraf/plugins/inputs/procstat.go deleted file mode 100644 index 6570b4e2cdd..00000000000 --- a/telegraf/plugins/inputs/procstat.go +++ /dev/null @@ -1,77 +0,0 @@ -package inputs - -import ( - "errors" - "fmt" -) - -// Procstat is based on telegraf procstat input plugin. -type Procstat struct { - baseInput - Exe string `json:"exe"` -} - -// PluginName is based on telegraf plugin name. -func (p *Procstat) PluginName() string { - return "procstat" -} - -// TOML encodes to toml string. -func (p *Procstat) TOML() string { - return fmt.Sprintf(`[[inputs.%s]] - ## PID file to monitor process - pid_file = "/var/run/nginx.pid" - ## executable name (ie, pgrep ) - # exe = "%s" - ## pattern as argument for pgrep (ie, pgrep -f ) - # pattern = "nginx" - ## user as argument for pgrep (ie, pgrep -u ) - # user = "nginx" - ## Systemd unit name - # systemd_unit = "nginx.service" - ## CGroup name or path - # cgroup = "systemd/system.slice/nginx.service" - - ## Windows service name - # win_service = "" - - ## override for process_name - ## This is optional; default is sourced from /proc//status - # process_name = "bar" - - ## Field name prefix - # prefix = "" - - ## When true add the full cmdline as a tag. - # cmdline_tag = false - - ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. - # mode = "irix" - - ## Add the PID as a tag instead of as a field. When collecting multiple - ## processes with otherwise matching tags this setting should be enabled to - ## ensure each process has a unique identity. - ## - ## Enabling this option may result in a large number of series, especially - ## when processes have a short lifetime. - # pid_tag = false - - ## Method to use when finding process IDs. Can be one of 'pgrep', or - ## 'native'. The pgrep finder calls the pgrep executable in the PATH while - ## the native finder performs the search directly in a manor dependent on the - ## platform. Default is 'pgrep' - # pid_finder = "pgrep" -`, p.PluginName(), p.Exe) -} - -// UnmarshalTOML decodes the parsed data to the object -func (p *Procstat) UnmarshalTOML(data interface{}) error { - dataOK, ok := data.(map[string]interface{}) - if !ok { - return errors.New("bad exe for procstat input plugin") - } - if p.Exe, ok = dataOK["exe"].(string); !ok { - return errors.New("exe is not a string value") - } - return nil -} diff --git a/telegraf/plugins/inputs/prometheus.go b/telegraf/plugins/inputs/prometheus.go deleted file mode 100644 index c00721c1b37..00000000000 --- a/telegraf/plugins/inputs/prometheus.go +++ /dev/null @@ -1,111 +0,0 @@ -package inputs - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -// Prometheus is based on telegraf Prometheus plugin. -type Prometheus struct { - baseInput - URLs []string `json:"urls"` -} - -// PluginName is based on telegraf plugin name. -func (p *Prometheus) PluginName() string { - return "prometheus" -} - -// TOML encodes to toml string -func (p *Prometheus) TOML() string { - s := make([]string, len(p.URLs)) - for k, v := range p.URLs { - s[k] = strconv.Quote(v) - } - return fmt.Sprintf(`[[inputs.%s]] - ## An array of urls to scrape metrics from. - urls = [%s] - - ## Metric version controls the mapping from Prometheus metrics into - ## Telegraf metrics. When using the prometheus_client output, use the same - ## value in both plugins to ensure metrics are round-tripped without - ## modification. - ## - ## example: metric_version = 1; - ## metric_version = 2; recommended version - # metric_version = 1 - - ## Url tag name (tag containing scrapped url. optional, default is "url") - # url_tag = "url" - - ## An array of Kubernetes services to scrape metrics from. - # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] - - ## Kubernetes config file to create client from. - # kube_config = "/path/to/kubernetes.config" - - ## Scrape Kubernetes pods for the following prometheus annotations: - ## - prometheus.io/scrape: Enable scraping for this pod - ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to - ## set this to 'https' & most likely set the tls config. - ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. - ## - prometheus.io/port: If port is not 9102 use this annotation - # monitor_kubernetes_pods = true - ## Get the list of pods to scrape with either the scope of - ## - cluster: the kubernetes watch api (default, no need to specify) - ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. - # pod_scrape_scope = "cluster" - ## Only for node scrape scope: node IP of the node that telegraf is running on. - ## Either this config or the environment variable NODE_IP must be set. - # node_ip = "10.180.1.1" - # ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. - # ## Default is 60 seconds. - # # pod_scrape_interval = 60 - ## Restricts Kubernetes monitoring to a single namespace - ## ex: monitor_kubernetes_pods_namespace = "default" - # monitor_kubernetes_pods_namespace = "" - # label selector to target pods which have the label - # kubernetes_label_selector = "env=dev,app=nginx" - # field selector to target pods - # eg. To scrape pods on a specific node - # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" - - ## Use bearer token for authorization. ('bearer_token' takes priority) - # bearer_token = "/path/to/bearer/token" - ## OR - # bearer_token_string = "abc_123" - - ## HTTP Basic Authentication username and password. ('bearer_token' and - ## 'bearer_token_string' take priority) - # username = "" - # password = "" - - ## Specify timeout duration for slower prometheus clients (default is 3s) - # response_timeout = "3s" - - ## Optional TLS Config - # tls_ca = /path/to/cafile - # tls_cert = /path/to/certfile - # tls_key = /path/to/keyfile - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -`, p.PluginName(), strings.Join(s, ", ")) -} - -// UnmarshalTOML decodes the parsed data to the object -func (p *Prometheus) UnmarshalTOML(data interface{}) error { - dataOK, ok := data.(map[string]interface{}) - if !ok { - return errors.New("bad urls for prometheus input plugin") - } - urls, ok := dataOK["urls"].([]interface{}) - if !ok { - return errors.New("urls is not an array for prometheus input plugin") - } - for _, url := range urls { - p.URLs = append(p.URLs, url.(string)) - } - return nil -} diff --git a/telegraf/plugins/inputs/redis.go b/telegraf/plugins/inputs/redis.go deleted file mode 100644 index 3c148a6510d..00000000000 --- a/telegraf/plugins/inputs/redis.go +++ /dev/null @@ -1,83 +0,0 @@ -package inputs - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -// Redis is based on telegraf Redis plugin. -type Redis struct { - baseInput - Servers []string `json:"servers"` - Password string `json:"password"` -} - -// PluginName is based on telegraf plugin name. -func (r *Redis) PluginName() string { - return "redis" -} - -// TOML encodes to toml string -func (r *Redis) TOML() string { - s := make([]string, len(r.Servers)) - for k, v := range r.Servers { - s[k] = strconv.Quote(v) - } - password := ` # password = ""` - if r.Password != "" { - password = fmt.Sprintf(` # password = "%s"`, r.Password) - } - return fmt.Sprintf(`[[inputs.%s]] - ## specify servers via a url matching: - ## [protocol://][:password]@address[:port] - ## e.g. - ## tcp://localhost:6379 - ## tcp://:password@192.168.99.100 - ## unix:///var/run/redis.sock - ## - ## If no servers are specified, then localhost is used as the host. - ## If no port is specified, 6379 is used - servers = [%s] - - ## Optional. Specify redis commands to retrieve values - # [[inputs.redis.commands]] - # # The command to run where each argument is a separate element - # command = ["get", "sample-key"] - # # The field to store the result in - # field = "sample-key-value" - # # The type of the result - # # Can be "string", "integer", or "float" - # type = "string" - - ## specify server password -%s - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = true -`, r.PluginName(), strings.Join(s, ", "), password) -} - -// UnmarshalTOML decodes the parsed data to the object -func (r *Redis) UnmarshalTOML(data interface{}) error { - dataOK, ok := data.(map[string]interface{}) - if !ok { - return errors.New("bad servers for redis input plugin") - } - servers, ok := dataOK["servers"].([]interface{}) - if !ok { - return errors.New("servers is not an array for redis input plugin") - } - for _, server := range servers { - r.Servers = append(r.Servers, server.(string)) - } - - r.Password, _ = dataOK["password"].(string) - - return nil -} diff --git a/telegraf/plugins/inputs/swap.go b/telegraf/plugins/inputs/swap.go deleted file mode 100644 index 2e704188b2f..00000000000 --- a/telegraf/plugins/inputs/swap.go +++ /dev/null @@ -1,27 +0,0 @@ -package inputs - -import ( - "fmt" -) - -// SwapStats is based on telegraf SwapStats. -type SwapStats struct { - baseInput -} - -// PluginName is based on telegraf plugin name. -func (s *SwapStats) PluginName() string { - return "swap" -} - -// TOML encodes to toml string. -func (s *SwapStats) TOML() string { - return fmt.Sprintf(`[[inputs.%s]] - # no configuration -`, s.PluginName()) -} - -// UnmarshalTOML decodes the parsed data to the object -func (s *SwapStats) UnmarshalTOML(data interface{}) error { - return nil -} diff --git a/telegraf/plugins/inputs/syslog.go b/telegraf/plugins/inputs/syslog.go deleted file mode 100644 index 4b4e4c5fd0f..00000000000 --- a/telegraf/plugins/inputs/syslog.go +++ /dev/null @@ -1,79 +0,0 @@ -package inputs - -import ( - "errors" - "fmt" -) - -// Syslog is based on telegraf Syslog plugin. -type Syslog struct { - baseInput - Address string `json:"server"` -} - -// PluginName is based on telegraf plugin name. -func (s *Syslog) PluginName() string { - return "syslog" -} - -// TOML encodes to toml string -func (s *Syslog) TOML() string { - return fmt.Sprintf(`[[inputs.%s]] - ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 - ## Protocol, address and port to host the syslog receiver. - ## If no host is specified, then localhost is used. - ## If no port is specified, 6514 is used (RFC5425#section-4.1). - server = "%s" - - ## TLS Config - # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - - ## Period between keep alive probes. - ## 0 disables keep alive probes. - ## Defaults to the OS configuration. - ## Only applies to stream sockets (e.g. TCP). - # keep_alive_period = "5m" - - ## Maximum number of concurrent connections (default = 0). - ## 0 means unlimited. - ## Only applies to stream sockets (e.g. TCP). - # max_connections = 1024 - - ## Read timeout is the maximum time allowed for reading a single message (default = 5s). - ## 0 means unlimited. - # read_timeout = "5s" - - ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). - ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), - ## or the non-transparent framing technique (RFC6587#section-3.4.2). - ## Must be one of "octet-counting", "non-transparent". - # framing = "octet-counting" - - ## The trailer to be expected in case of non-transparent framing (default = "LF"). - ## Must be one of "LF", or "NUL". - # trailer = "LF" - - ## Whether to parse in best effort mode or not (default = false). - ## By default best effort parsing is off. - # best_effort = false - - ## Character to prepend to SD-PARAMs (default = "_"). - ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. - ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] - ## For each combination a field is created. - ## Its name is created concatenating identifier, sdparam_separator, and parameter name. - # sdparam_separator = "_" -`, s.PluginName(), s.Address) -} - -// UnmarshalTOML decodes the parsed data to the object -func (s *Syslog) UnmarshalTOML(data interface{}) error { - dataOK, ok := data.(map[string]interface{}) - if !ok { - return errors.New("bad server for syslog input plugin") - } - s.Address, _ = dataOK["server"].(string) - return nil -} diff --git a/telegraf/plugins/inputs/system.go b/telegraf/plugins/inputs/system.go deleted file mode 100644 index cfecc8aaccf..00000000000 --- a/telegraf/plugins/inputs/system.go +++ /dev/null @@ -1,28 +0,0 @@ -package inputs - -import ( - "fmt" -) - -// SystemStats is based on telegraf SystemStats. -type SystemStats struct { - baseInput -} - -// PluginName is based on telegraf plugin name. -func (s *SystemStats) PluginName() string { - return "system" -} - -// TOML encodes to toml string -func (s *SystemStats) TOML() string { - return fmt.Sprintf(`[[inputs.%s]] - ## Uncomment to remove deprecated metrics. - # fielddrop = ["uptime_format"] -`, s.PluginName()) -} - -// UnmarshalTOML decodes the parsed data to the object -func (s *SystemStats) UnmarshalTOML(data interface{}) error { - return nil -} diff --git a/telegraf/plugins/inputs/tail.go b/telegraf/plugins/inputs/tail.go deleted file mode 100644 index b91f1bb28c1..00000000000 --- a/telegraf/plugins/inputs/tail.go +++ /dev/null @@ -1,109 +0,0 @@ -package inputs - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -// Tail is based on telegraf Tail plugin. -type Tail struct { - baseInput - Files []string `json:"files"` -} - -// PluginName is based on telegraf plugin name. -func (t *Tail) PluginName() string { - return "tail" -} - -// TOML encodes to toml string -func (t *Tail) TOML() string { - s := make([]string, len(t.Files)) - for k, v := range t.Files { - s[k] = strconv.Quote(v) - } - return fmt.Sprintf(`[[inputs.%s]] - ## File names or a pattern to tail. - ## These accept standard unix glob matching rules, but with the addition of - ## ** as a "super asterisk". ie: - ## "/var/log/**.log" -> recursively find all .log files in /var/log - ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log - ## "/var/log/apache.log" -> just tail the apache log file - ## "/var/log/log[!1-2]* -> tail files without 1-2 - ## "/var/log/log[^1-2]* -> identical behavior as above - ## See https://github.com/gobwas/glob for more examples - ## - files = [%s] - - ## Read file from beginning. - # from_beginning = false - - ## Whether file is a named pipe - # pipe = false - - ## Method used to watch for file updates. Can be either "inotify" or "poll". - # watch_method = "inotify" - - ## Maximum lines of the file to process that have not yet be written by the - ## output. For best throughput set based on the number of metrics on each - ## line and the size of the output's metric_batch_size. - # max_undelivered_lines = 1000 - - ## Character encoding to use when interpreting the file contents. Invalid - ## characters are replaced using the unicode replacement character. When set - ## to the empty string the data is not decoded to text. - ## ex: character_encoding = "utf-8" - ## character_encoding = "utf-16le" - ## character_encoding = "utf-16be" - ## character_encoding = "" - # character_encoding = "" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" - - ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. - # path_tag = "path" - - ## multiline parser/codec - ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html - #[inputs.tail.multiline] - ## The pattern should be a regexp which matches what you believe to be an - ## indicator that the field is part of an event consisting of multiple lines of log data. - #pattern = "^\s" - - ## This field must be either "previous" or "next". - ## If a line matches the pattern, "previous" indicates that it belongs to the previous line, - ## whereas "next" indicates that the line belongs to the next one. - #match_which_line = "previous" - - ## The invert_match field can be true or false (defaults to false). - ## If true, a message not matching the pattern will constitute a match of the multiline - ## filter and the what will be applied. (vice-versa is also true) - #invert_match = false - - ## After the specified timeout, this plugin sends a multiline event even if no new pattern - ## is found to start a new event. The default timeout is 5s. - #timeout = 5s -`, t.PluginName(), strings.Join(s, ", ")) -} - -// UnmarshalTOML decodes the parsed data to the object -func (t *Tail) UnmarshalTOML(data interface{}) error { - dataOK, ok := data.(map[string]interface{}) - if !ok { - return errors.New("bad files for tail input plugin") - } - files, ok := dataOK["files"].([]interface{}) - if !ok { - return errors.New("not an array for tail input plugin") - } - for _, fi := range files { - t.Files = append(t.Files, fi.(string)) - } - return nil -} diff --git a/telegraf/plugins/outputs/base.go b/telegraf/plugins/outputs/base.go deleted file mode 100644 index 0c1e7196acc..00000000000 --- a/telegraf/plugins/outputs/base.go +++ /dev/null @@ -1,9 +0,0 @@ -package outputs - -import "github.com/influxdata/influxdb/v2/telegraf/plugins" - -type baseOutput int - -func (b baseOutput) Type() plugins.Type { - return plugins.Output -} diff --git a/telegraf/plugins/outputs/file.go b/telegraf/plugins/outputs/file.go deleted file mode 100644 index 426323ae715..00000000000 --- a/telegraf/plugins/outputs/file.go +++ /dev/null @@ -1,89 +0,0 @@ -package outputs - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -// File is based on telegraf file output plugin. -type File struct { - baseOutput - Files []FileConfig `json:"files"` -} - -// FileConfig is the config settings of outpu file plugin. -type FileConfig struct { - Typ string `json:"type"` - Path string `json:"path"` -} - -// PluginName is based on telegraf plugin name. -func (f *File) PluginName() string { - return "file" -} - -// TOML encodes to toml string. -func (f *File) TOML() string { - s := make([]string, len(f.Files)) - for k, v := range f.Files { - if v.Typ == "stdout" { - s[k] = strconv.Quote(v.Typ) - continue - } - s[k] = strconv.Quote(v.Path) - } - return fmt.Sprintf(`[[outputs.%s]] - ## Files to write to, "stdout" is a specially handled file. - files = [%s] - - ## Use batch serialization format instead of line based delimiting. The - ## batch format allows for the production of non line based output formats and - ## may more efficiently encode metric groups. - # use_batch_format = false - - ## The file will be rotated after the time interval specified. When set - ## to 0 no time based rotation is performed. - # rotation_interval = "0d" - - ## The logfile will be rotated when it becomes larger than the specified - ## size. When set to 0 no size based rotation is performed. - # rotation_max_size = "0MB" - - ## Maximum number of rotated archives to keep, any older logs are deleted. - ## If set to -1, no archives are removed. - # rotation_max_archives = 5 - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "influx" -`, f.PluginName(), strings.Join(s, ", ")) -} - -// UnmarshalTOML decodes the parsed data to the object -func (f *File) UnmarshalTOML(data interface{}) error { - dataOK, ok := data.(map[string]interface{}) - if !ok { - return errors.New("bad files for file output plugin") - } - files, ok := dataOK["files"].([]interface{}) - if !ok { - return errors.New("not an array for file output plugin") - } - for _, fi := range files { - fl := fi.(string) - if fl == "stdout" { - f.Files = append(f.Files, FileConfig{ - Typ: "stdout", - }) - continue - } - f.Files = append(f.Files, FileConfig{ - Path: fl, - }) - } - return nil -} diff --git a/telegraf/plugins/outputs/influxdb_v2.go b/telegraf/plugins/outputs/influxdb_v2.go deleted file mode 100644 index c4985a8cc10..00000000000 --- a/telegraf/plugins/outputs/influxdb_v2.go +++ /dev/null @@ -1,112 +0,0 @@ -package outputs - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -// InfluxDBV2 is based on telegraf influxdb_v2 output plugin. -type InfluxDBV2 struct { - baseOutput - URLs []string `json:"urls"` - Token string `json:"token"` - Organization string `json:"organization"` - Bucket string `json:"bucket"` -} - -// PluginName is based on telegraf plugin name. -func (i *InfluxDBV2) PluginName() string { - return "influxdb_v2" -} - -// TOML encodes to toml string. -func (i *InfluxDBV2) TOML() string { - s := make([]string, len(i.URLs)) - for k, v := range i.URLs { - s[k] = strconv.Quote(v) - } - return fmt.Sprintf(`[[outputs.%s]] - ## The URLs of the InfluxDB cluster nodes. - ## - ## Multiple URLs can be specified for a single cluster, only ONE of the - ## urls will be written to each interval. - ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] - urls = [%s] - - ## Token for authentication. - token = "%s" - - ## Organization is the name of the organization you wish to write to; must exist. - organization = "%s" - - ## Destination bucket to write into. - bucket = "%s" - - ## The value of this tag will be used to determine the bucket. If this - ## tag is not set the 'bucket' option is used as the default. - # bucket_tag = "" - - ## If true, the bucket tag will not be added to the metric. - # exclude_bucket_tag = false - - ## Timeout for HTTP messages. - # timeout = "5s" - - ## Additional HTTP headers - # http_headers = {"X-Special-Header" = "Special-Value"} - - ## HTTP Proxy override, if unset values the standard proxy environment - ## variables are consulted to determine which proxy, if any, should be used. - # http_proxy = "http://corporate.proxy:3128" - - ## HTTP User-Agent - # user_agent = "telegraf" - - ## Content-Encoding for write request body, can be set to "gzip" to - ## compress body or "identity" to apply no encoding. - # content_encoding = "gzip" - - ## Enable or disable uint support for writing uints influxdb 2.0. - # influx_uint_support = false - - ## Optional TLS Config for use on HTTP connections. - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -`, i.PluginName(), strings.Join(s, ", "), i.Token, i.Organization, i.Bucket) -} - -// UnmarshalTOML decodes the parsed data to the object -func (i *InfluxDBV2) UnmarshalTOML(data interface{}) error { - dataOK, ok := data.(map[string]interface{}) - if !ok { - return errors.New("bad urls for influxdb_v2 output plugin") - } - urls, ok := dataOK["urls"].([]interface{}) - if !ok { - return errors.New("urls is not an array for influxdb_v2 output plugin") - } - for _, url := range urls { - i.URLs = append(i.URLs, url.(string)) - } - - i.Token, ok = dataOK["token"].(string) - if !ok { - return errors.New("token is missing for influxdb_v2 output plugin") - } - - i.Organization, ok = dataOK["organization"].(string) - if !ok { - return errors.New("organization is missing for influxdb_v2 output plugin") - } - - i.Bucket, ok = dataOK["bucket"].(string) - if !ok { - return errors.New("bucket is missing for influxdb_v2 output plugin") - } - return nil -} diff --git a/telegraf/plugins/outputs/outputs_test.go b/telegraf/plugins/outputs/outputs_test.go deleted file mode 100644 index 92a44d5795b..00000000000 --- a/telegraf/plugins/outputs/outputs_test.go +++ /dev/null @@ -1,366 +0,0 @@ -package outputs - -import ( - "errors" - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2/telegraf/plugins" -) - -// local plugin -type telegrafPluginConfig interface { - TOML() string - Type() plugins.Type - PluginName() string - UnmarshalTOML(data interface{}) error -} - -func TestType(t *testing.T) { - b := baseOutput(0) - if b.Type() != plugins.Output { - t.Fatalf("output plugins type should be output, got %s", b.Type()) - } -} - -func TestTOML(t *testing.T) { - cases := []struct { - name string - plugins map[telegrafPluginConfig]string - }{ - { - name: "test empty plugins", - plugins: map[telegrafPluginConfig]string{ - &File{}: `[[outputs.file]] - ## Files to write to, "stdout" is a specially handled file. - files = [] - - ## Use batch serialization format instead of line based delimiting. The - ## batch format allows for the production of non line based output formats and - ## may more efficiently encode metric groups. - # use_batch_format = false - - ## The file will be rotated after the time interval specified. When set - ## to 0 no time based rotation is performed. - # rotation_interval = "0d" - - ## The logfile will be rotated when it becomes larger than the specified - ## size. When set to 0 no size based rotation is performed. - # rotation_max_size = "0MB" - - ## Maximum number of rotated archives to keep, any older logs are deleted. - ## If set to -1, no archives are removed. - # rotation_max_archives = 5 - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "influx" -`, - &InfluxDBV2{}: `[[outputs.influxdb_v2]] - ## The URLs of the InfluxDB cluster nodes. - ## - ## Multiple URLs can be specified for a single cluster, only ONE of the - ## urls will be written to each interval. - ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] - urls = [] - - ## Token for authentication. - token = "" - - ## Organization is the name of the organization you wish to write to; must exist. - organization = "" - - ## Destination bucket to write into. - bucket = "" - - ## The value of this tag will be used to determine the bucket. If this - ## tag is not set the 'bucket' option is used as the default. - # bucket_tag = "" - - ## If true, the bucket tag will not be added to the metric. - # exclude_bucket_tag = false - - ## Timeout for HTTP messages. - # timeout = "5s" - - ## Additional HTTP headers - # http_headers = {"X-Special-Header" = "Special-Value"} - - ## HTTP Proxy override, if unset values the standard proxy environment - ## variables are consulted to determine which proxy, if any, should be used. - # http_proxy = "http://corporate.proxy:3128" - - ## HTTP User-Agent - # user_agent = "telegraf" - - ## Content-Encoding for write request body, can be set to "gzip" to - ## compress body or "identity" to apply no encoding. - # content_encoding = "gzip" - - ## Enable or disable uint support for writing uints influxdb 2.0. - # influx_uint_support = false - - ## Optional TLS Config for use on HTTP connections. - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -`, - }, - }, - { - name: "standard testing", - plugins: map[telegrafPluginConfig]string{ - &File{ - Files: []FileConfig{ - {Typ: "stdout"}, - {Path: "/tmp/out.txt"}, - }, - }: `[[outputs.file]] - ## Files to write to, "stdout" is a specially handled file. - files = ["stdout", "/tmp/out.txt"] - - ## Use batch serialization format instead of line based delimiting. The - ## batch format allows for the production of non line based output formats and - ## may more efficiently encode metric groups. - # use_batch_format = false - - ## The file will be rotated after the time interval specified. When set - ## to 0 no time based rotation is performed. - # rotation_interval = "0d" - - ## The logfile will be rotated when it becomes larger than the specified - ## size. When set to 0 no size based rotation is performed. - # rotation_max_size = "0MB" - - ## Maximum number of rotated archives to keep, any older logs are deleted. - ## If set to -1, no archives are removed. - # rotation_max_archives = 5 - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "influx" -`, - &InfluxDBV2{ - URLs: []string{ - "http://192.168.1.10:9999", - "http://192.168.1.11:9999", - }, - Token: "tok1", - Organization: "org1", - Bucket: "bucket1", - }: `[[outputs.influxdb_v2]] - ## The URLs of the InfluxDB cluster nodes. - ## - ## Multiple URLs can be specified for a single cluster, only ONE of the - ## urls will be written to each interval. - ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] - urls = ["http://192.168.1.10:9999", "http://192.168.1.11:9999"] - - ## Token for authentication. - token = "tok1" - - ## Organization is the name of the organization you wish to write to; must exist. - organization = "org1" - - ## Destination bucket to write into. - bucket = "bucket1" - - ## The value of this tag will be used to determine the bucket. If this - ## tag is not set the 'bucket' option is used as the default. - # bucket_tag = "" - - ## If true, the bucket tag will not be added to the metric. - # exclude_bucket_tag = false - - ## Timeout for HTTP messages. - # timeout = "5s" - - ## Additional HTTP headers - # http_headers = {"X-Special-Header" = "Special-Value"} - - ## HTTP Proxy override, if unset values the standard proxy environment - ## variables are consulted to determine which proxy, if any, should be used. - # http_proxy = "http://corporate.proxy:3128" - - ## HTTP User-Agent - # user_agent = "telegraf" - - ## Content-Encoding for write request body, can be set to "gzip" to - ## compress body or "identity" to apply no encoding. - # content_encoding = "gzip" - - ## Enable or disable uint support for writing uints influxdb 2.0. - # influx_uint_support = false - - ## Optional TLS Config for use on HTTP connections. - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -`, - }, - }, - } - for _, c := range cases { - for output, toml := range c.plugins { - if toml != output.TOML() { - t.Fatalf("%s failed want %s, got %v", c.name, toml, output.TOML()) - } - } - } -} - -func TestDecodeTOML(t *testing.T) { - cases := []struct { - name string - want telegrafPluginConfig - wantErr error - output telegrafPluginConfig - data interface{} - }{ - { - name: "file empty", - want: &File{}, - wantErr: errors.New("bad files for file output plugin"), - output: &File{}, - }, - { - name: "file bad data not array", - want: &File{}, - wantErr: errors.New("not an array for file output plugin"), - output: &File{}, - data: map[string]interface{}{ - "files": "", - }, - }, - { - name: "file", - want: &File{ - Files: []FileConfig{ - {Path: "/tmp/out.txt"}, - {Typ: "stdout"}, - }, - }, - output: &File{}, - data: map[string]interface{}{ - "files": []interface{}{ - "/tmp/out.txt", - "stdout", - }, - }, - }, - { - name: "influxdb_v2 empty", - want: &InfluxDBV2{}, - wantErr: errors.New("bad urls for influxdb_v2 output plugin"), - output: &InfluxDBV2{}, - }, - { - name: "influxdb_v2 bad urls", - want: &InfluxDBV2{}, - wantErr: errors.New("urls is not an array for influxdb_v2 output plugin"), - output: &InfluxDBV2{}, - data: map[string]interface{}{ - "urls": "", - }, - }, - { - name: "influxdb_v2 missing token", - want: &InfluxDBV2{ - URLs: []string{ - "http://localhost:9999", - "http://192.168.0.1:9999", - }, - }, - wantErr: errors.New("token is missing for influxdb_v2 output plugin"), - output: &InfluxDBV2{}, - data: map[string]interface{}{ - "urls": []interface{}{ - "http://localhost:9999", - "http://192.168.0.1:9999", - }, - }, - }, - { - name: "influxdb_v2 missing org", - want: &InfluxDBV2{ - URLs: []string{ - "http://localhost:9999", - "http://192.168.0.1:9999", - }, - Token: "token1", - }, - wantErr: errors.New("organization is missing for influxdb_v2 output plugin"), - output: &InfluxDBV2{}, - data: map[string]interface{}{ - "urls": []interface{}{ - "http://localhost:9999", - "http://192.168.0.1:9999", - }, - "token": "token1", - }, - }, - { - name: "influxdb_v2 missing bucket", - want: &InfluxDBV2{ - URLs: []string{ - "http://localhost:9999", - "http://192.168.0.1:9999", - }, - Token: "token1", - Organization: "org1", - }, - wantErr: errors.New("bucket is missing for influxdb_v2 output plugin"), - output: &InfluxDBV2{}, - data: map[string]interface{}{ - "urls": []interface{}{ - "http://localhost:9999", - "http://192.168.0.1:9999", - }, - "token": "token1", - "organization": "org1", - }, - }, - { - name: "influxdb_v2", - want: &InfluxDBV2{ - URLs: []string{ - "http://localhost:9999", - "http://192.168.0.1:9999", - }, - Token: "token1", - Organization: "org1", - Bucket: "bucket1", - }, - output: &InfluxDBV2{}, - data: map[string]interface{}{ - "urls": []interface{}{ - "http://localhost:9999", - "http://192.168.0.1:9999", - }, - "token": "token1", - "organization": "org1", - "bucket": "bucket1", - }, - }, - } - for _, c := range cases { - err := c.output.UnmarshalTOML(c.data) - if c.wantErr != nil && (err == nil || err.Error() != c.wantErr.Error()) { - t.Fatalf("%s failed want err %s, got %v", c.name, c.wantErr.Error(), err) - } - if c.wantErr == nil && err != nil { - t.Fatalf("%s failed want err nil, got %v", c.name, err) - } - if !reflect.DeepEqual(c.output, c.want) { - t.Fatalf("%s failed want %v, got %v", c.name, c.want, c.output) - } - } -} diff --git a/telegraf/plugins/plugins.go b/telegraf/plugins/plugins.go deleted file mode 100644 index 647797ba9df..00000000000 --- a/telegraf/plugins/plugins.go +++ /dev/null @@ -1,1630 +0,0 @@ -package plugins - -import ( - "encoding/json" - "fmt" - "sort" -) - -// Plugin defines a Telegraf plugin. -type Plugin struct { - Type string `json:"type,omitempty"` // Type of the plugin. - Name string `json:"name,omitempty"` // Name of the plugin. - Description string `json:"description,omitempty"` // Description of the plugin. - Config string `json:"config,omitempty"` // Config contains the toml config of the plugin. -} - -// TelegrafPlugins defines a Telegraf version's collection of plugins. -type TelegrafPlugins struct { - Version string `json:"version,omitempty"` // Version of telegraf plugins are for. - OS string `json:"os,omitempty"` // OS the plugins apply to. - Plugins []Plugin `json:"plugins,omitempty"` // Plugins this version of telegraf supports. -} - -// ListAvailablePlugins lists available plugins based on type. -func ListAvailablePlugins(t string) (*TelegrafPlugins, error) { - switch t { - case "input": - return AvailableInputs() - case "output": - return AvailableOutputs() - case "processor": - return AvailableProcessors() - case "aggregator": - return AvailableAggregators() - case "bundle": - return AvailableBundles() - default: - return nil, fmt.Errorf("unknown plugin type '%s'", t) - } -} - -// GetPlugin returns the plugin's sample config, if available. -func GetPlugin(t, name string) (*Plugin, bool) { - var p *TelegrafPlugins - var err error - - switch t { - case "input": - p, err = AvailableInputs() - - case "output": - p, err = AvailableOutputs() - - case "processor": - p, err = AvailableProcessors() - - case "aggregator": - p, err = AvailableAggregators() - - case "bundle": - p, err = AvailableBundles() - - default: - return nil, false - } - - if err != nil { - return nil, false - } - - return p.findPluginByName(name) -} - -// findPluginByName returns a plugin named "name". This should only be run on -// TelegrafPlugins containing the same type of plugin. -func (t *TelegrafPlugins) findPluginByName(name string) (*Plugin, bool) { - for i := range t.Plugins { - if t.Plugins[i].Name == name { - return &t.Plugins[i], true - } - } - - return nil, false -} - -// AvailablePlugins returns the base list of available plugins. -func AvailablePlugins() (*TelegrafPlugins, error) { - all := &TelegrafPlugins{} - - t, err := AvailableInputs() - if err != nil { - return nil, err - } - all.Version = t.Version - all.Plugins = append(all.Plugins, t.Plugins...) - - t, err = AvailableOutputs() - if err != nil { - return nil, err - } - all.Plugins = append(all.Plugins, t.Plugins...) - - t, err = AvailableProcessors() - if err != nil { - return nil, err - } - all.Plugins = append(all.Plugins, t.Plugins...) - - t, err = AvailableAggregators() - if err != nil { - return nil, err - } - all.Plugins = append(all.Plugins, t.Plugins...) - - return all, nil -} - -func sortPlugins(t *TelegrafPlugins) *TelegrafPlugins { - sort.Slice(t.Plugins, func(i, j int) bool { - return t.Plugins[i].Name < t.Plugins[j].Name - }) - - return t -} - -// AvailableInputs returns the base list of available input plugins. -func AvailableInputs() (*TelegrafPlugins, error) { - t := &TelegrafPlugins{} - err := json.Unmarshal([]byte(availableInputs), t) - if err != nil { - return nil, err - } - return sortPlugins(t), nil -} - -// AvailableOutputs returns the base list of available output plugins. -func AvailableOutputs() (*TelegrafPlugins, error) { - t := &TelegrafPlugins{} - err := json.Unmarshal([]byte(availableOutputs), t) - if err != nil { - return nil, err - } - return sortPlugins(t), nil -} - -// AvailableProcessors returns the base list of available processor plugins. -func AvailableProcessors() (*TelegrafPlugins, error) { - t := &TelegrafPlugins{} - err := json.Unmarshal([]byte(availableProcessors), t) - if err != nil { - return nil, err - } - return sortPlugins(t), nil -} - -// AvailableAggregators returns the base list of available aggregator plugins. -func AvailableAggregators() (*TelegrafPlugins, error) { - t := &TelegrafPlugins{} - err := json.Unmarshal([]byte(availableAggregators), t) - if err != nil { - return nil, err - } - return sortPlugins(t), nil -} - -// AvailableBundles returns the base list of available bundled plugins. -func AvailableBundles() (*TelegrafPlugins, error) { - return &TelegrafPlugins{ - Version: "1.13.0", - OS: "unix", - Plugins: []Plugin{ - { - Type: "bundle", - Name: "System Bundle", - Description: "Collection of system related inputs", - Config: "" + - "# Read metrics about cpu usage\n[[inputs.cpu]]\n # alias=\"cpu\"\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n" + - "# Read metrics about swap memory usage\n[[inputs.swap]]\n # alias=\"swap\"\n" + - "# Read metrics about disk usage by mount point\n[[inputs.disk]]\n # alias=\"disk\"\n ## By default stats will be gathered for all mount points.\n ## Set mount_points will restrict the stats to only the specified mount points.\n # mount_points = [\"/\"]\n\n ## Ignore mount points by filesystem type.\n ignore_fs = [\"tmpfs\", \"devtmpfs\", \"devfs\", \"iso9660\", \"overlay\", \"aufs\", \"squashfs\"]\n" + - "# Read metrics about memory usage\n[[inputs.mem]]\n # alias=\"mem\"\n", - }, - }, - }, nil -} - -// AgentConfig contains the default agent config. -var AgentConfig = `# Configuration for telegraf agent -[agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 - - ## Maximum number of unwritten metrics per output. Increasing this value - ## allows for longer periods of output downtime without dropping metrics at the - ## cost of higher maximum memory usage. - metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s. - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". - precision = "" - - ## Log at debug level. - # debug = false - ## Log only error level messages. - # quiet = false - - ## Log target controls the destination for logs and can be one of "file", - ## "stderr" or, on Windows, "eventlog". When set to "file", the output file - ## is determined by the "logfile" setting. - # logtarget = "file" - - ## Name of the file to be logged to when using the "file" logtarget. If set to - ## the empty string then logs are written to stderr. - # logfile = "" - - ## The logfile will be rotated after the time interval specified. When set - ## to 0 no time based rotation is performed. Logs are rotated only when - ## written to, if there is no log activity rotation may be delayed. - # logfile_rotation_interval = "0d" - - ## The logfile will be rotated when it becomes larger than the specified - ## size. When set to 0 no size based rotation is performed. - # logfile_rotation_max_size = "0MB" - - ## Maximum number of rotated archives to keep, any older logs are deleted. - ## If set to -1, no archives are removed. - # logfile_rotation_max_archives = 5 - - ## Pick a timezone to use when logging or type 'local' for local time. - ## Example: America/Chicago - # log_with_timezone = "" - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - ## If set to true, do no set the "host" tag in the telegraf agent. - omit_hostname = false -` - -var availableInputs = `{ - "version": "1.13.0", - "os": "linux", - "plugins": [ - { - "type": "input", - "name": "tcp_listener", - "description": "Generic TCP listener", - "config": "# Generic TCP listener\n[[inputs.tcp_listener]]\n # alias=\"tcp_listener\"\n # DEPRECATED: the TCP listener plugin has been deprecated in favor of the\n # socket_listener plugin\n # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener\n\n" - }, - { - "type": "input", - "name": "kernel", - "description": "Get kernel statistics from /proc/stat", - "config": "# Get kernel statistics from /proc/stat\n[[inputs.kernel]]\n # alias=\"kernel\"\n" - }, - { - "type": "input", - "name": "powerdns", - "description": "Read metrics from one or many PowerDNS servers", - "config": "# Read metrics from one or many PowerDNS servers\n[[inputs.powerdns]]\n # alias=\"powerdns\"\n ## An array of sockets to gather stats about.\n ## Specify a path to unix socket.\n unix_sockets = [\"/var/run/pdns.controlsocket\"]\n\n" - }, - { - "type": "input", - "name": "processes", - "description": "Get the number of processes and group them by status", - "config": "# Get the number of processes and group them by status\n[[inputs.processes]]\n # alias=\"processes\"\n" - }, - { - "type": "input", - "name": "snmp_legacy", - "description": "DEPRECATED! PLEASE USE inputs.snmp INSTEAD.", - "config": "# DEPRECATED! PLEASE USE inputs.snmp INSTEAD.\n[[inputs.snmp_legacy]]\n # alias=\"snmp_legacy\"\n ## Use 'oids.txt' file to translate oids to names\n ## To generate 'oids.txt' you need to run:\n ## snmptranslate -m all -Tz -On | sed -e 's/\"//g' \u003e /tmp/oids.txt\n ## Or if you have an other MIB folder with custom MIBs\n ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/\"//g' \u003e oids.txt\n snmptranslate_file = \"/tmp/oids.txt\"\n [[inputs.snmp.host]]\n address = \"192.168.2.2:161\"\n # SNMP community\n community = \"public\" # default public\n # SNMP version (1, 2 or 3)\n # Version 3 not supported yet\n version = 2 # default 2\n # SNMP response timeout\n timeout = 2.0 # default 2.0\n # SNMP request retries\n retries = 2 # default 2\n # Which get/bulk do you want to collect for this host\n collect = [\"mybulk\", \"sysservices\", \"sysdescr\"]\n # Simple list of OIDs to get, in addition to \"collect\"\n get_oids = []\n\n [[inputs.snmp.host]]\n address = \"192.168.2.3:161\"\n community = \"public\"\n version = 2\n timeout = 2.0\n retries = 2\n collect = [\"mybulk\"]\n get_oids = [\n \"ifNumber\",\n \".1.3.6.1.2.1.1.3.0\",\n ]\n\n [[inputs.snmp.get]]\n name = \"ifnumber\"\n oid = \"ifNumber\"\n\n [[inputs.snmp.get]]\n name = \"interface_speed\"\n oid = \"ifSpeed\"\n instance = \"0\"\n\n [[inputs.snmp.get]]\n name = \"sysuptime\"\n oid = \".1.3.6.1.2.1.1.3.0\"\n unit = \"second\"\n\n [[inputs.snmp.bulk]]\n name = \"mybulk\"\n max_repetition = 127\n oid = \".1.3.6.1.2.1.1\"\n\n [[inputs.snmp.bulk]]\n name = \"ifoutoctets\"\n max_repetition = 127\n oid = \"ifOutOctets\"\n\n [[inputs.snmp.host]]\n address = \"192.168.2.13:161\"\n #address = \"127.0.0.1:161\"\n community = \"public\"\n version = 2\n timeout = 2.0\n retries = 2\n #collect = [\"mybulk\", \"sysservices\", \"sysdescr\", \"systype\"]\n collect = [\"sysuptime\" ]\n [[inputs.snmp.host.table]]\n name = \"iftable3\"\n include_instances = [\"enp5s0\", \"eth1\"]\n\n # SNMP TABLEs\n # table without mapping neither subtables\n [[inputs.snmp.table]]\n name = \"iftable1\"\n oid = \".1.3.6.1.2.1.31.1.1.1\"\n\n # table without mapping but with subtables\n [[inputs.snmp.table]]\n name = \"iftable2\"\n oid = \".1.3.6.1.2.1.31.1.1.1\"\n sub_tables = [\".1.3.6.1.2.1.2.2.1.13\"]\n\n # table with mapping but without subtables\n [[inputs.snmp.table]]\n name = \"iftable3\"\n oid = \".1.3.6.1.2.1.31.1.1.1\"\n # if empty. get all instances\n mapping_table = \".1.3.6.1.2.1.31.1.1.1.1\"\n # if empty, get all subtables\n\n # table with both mapping and subtables\n [[inputs.snmp.table]]\n name = \"iftable4\"\n oid = \".1.3.6.1.2.1.31.1.1.1\"\n # if empty get all instances\n mapping_table = \".1.3.6.1.2.1.31.1.1.1.1\"\n # if empty get all subtables\n # sub_tables could be not \"real subtables\"\n sub_tables=[\".1.3.6.1.2.1.2.2.1.13\", \"bytes_recv\", \"bytes_send\"]\n\n" - }, - { - "type": "input", - "name": "statsd", - "description": "Statsd UDP/TCP Server", - "config": "# Statsd UDP/TCP Server\n[[inputs.statsd]]\n # alias=\"statsd\"\n ## Protocol, must be \"tcp\", \"udp\", \"udp4\" or \"udp6\" (default=udp)\n protocol = \"udp\"\n\n ## MaxTCPConnection - applicable when protocol is set to tcp (default=250)\n max_tcp_connections = 250\n\n ## Enable TCP keep alive probes (default=false)\n tcp_keep_alive = false\n\n ## Specifies the keep-alive period for an active network connection.\n ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false.\n ## Defaults to the OS configuration.\n # tcp_keep_alive_period = \"2h\"\n\n ## Address and port to host UDP listener on\n service_address = \":8125\"\n\n ## The following configuration options control when telegraf clears it's cache\n ## of previous values. If set to false, then telegraf will only clear it's\n ## cache when the daemon is restarted.\n ## Reset gauges every interval (default=true)\n delete_gauges = true\n ## Reset counters every interval (default=true)\n delete_counters = true\n ## Reset sets every interval (default=true)\n delete_sets = true\n ## Reset timings \u0026 histograms every interval (default=true)\n delete_timings = true\n\n ## Percentiles to calculate for timing \u0026 histogram stats\n percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0]\n\n ## separator to use between elements of a statsd metric\n metric_separator = \"_\"\n\n ## Parses tags in the datadog statsd format\n ## http://docs.datadoghq.com/guides/dogstatsd/\n parse_data_dog_tags = false\n\n ## Parses datadog extensions to the statsd format\n datadog_extensions = false\n\n ## Statsd data translation templates, more info can be read here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md\n # templates = [\n # \"cpu.* measurement*\"\n # ]\n\n ## Number of UDP messages allowed to queue up, once filled,\n ## the statsd server will start dropping packets\n allowed_pending_messages = 10000\n\n ## Number of timing/histogram values to track per-measurement in the\n ## calculation of percentiles. Raising this limit increases the accuracy\n ## of percentiles but also increases the memory usage and cpu time.\n percentile_limit = 1000\n\n" - }, - { - "type": "input", - "name": "bcache", - "description": "Read metrics of bcache from stats_total and dirty_data", - "config": "# Read metrics of bcache from stats_total and dirty_data\n[[inputs.bcache]]\n # alias=\"bcache\"\n ## Bcache sets path\n ## If not specified, then default is:\n bcachePath = \"/sys/fs/bcache\"\n\n ## By default, telegraf gather stats for all bcache devices\n ## Setting devices will restrict the stats to the specified\n ## bcache devices.\n bcacheDevs = [\"bcache0\"]\n\n" - }, - { - "type": "input", - "name": "mesos", - "description": "Telegraf plugin for gathering metrics from N Mesos masters", - "config": "# Telegraf plugin for gathering metrics from N Mesos masters\n[[inputs.mesos]]\n # alias=\"mesos\"\n ## Timeout, in ms.\n timeout = 100\n\n ## A list of Mesos masters.\n masters = [\"http://localhost:5050\"]\n\n ## Master metrics groups to be collected, by default, all enabled.\n master_collections = [\n \"resources\",\n \"master\",\n \"system\",\n \"agents\",\n \"frameworks\",\n \"framework_offers\",\n \"tasks\",\n \"messages\",\n \"evqueue\",\n \"registrar\",\n \"allocator\",\n ]\n\n ## A list of Mesos slaves, default is []\n # slaves = []\n\n ## Slave metrics groups to be collected, by default, all enabled.\n # slave_collections = [\n # \"resources\",\n # \"agent\",\n # \"system\",\n # \"executors\",\n # \"tasks\",\n # \"messages\",\n # ]\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "pf", - "description": "Gather counters from PF", - "config": "# Gather counters from PF\n[[inputs.pf]]\n # alias=\"pf\"\n ## PF require root access on most systems.\n ## Setting 'use_sudo' to true will make use of sudo to run pfctl.\n ## Users must configure sudo to allow telegraf user to run pfctl with no password.\n ## pfctl can be restricted to only list command \"pfctl -s info\".\n use_sudo = false\n\n" - }, - { - "type": "input", - "name": "webhooks", - "description": "A Webhooks Event collector", - "config": "# A Webhooks Event collector\n[[inputs.webhooks]]\n # alias=\"webhooks\"\n ## Address and port to host Webhook listener on\n service_address = \":1619\"\n\n [inputs.webhooks.filestack]\n path = \"/filestack\"\n\n [inputs.webhooks.github]\n path = \"/github\"\n # secret = \"\"\n\n [inputs.webhooks.mandrill]\n path = \"/mandrill\"\n\n [inputs.webhooks.rollbar]\n path = \"/rollbar\"\n\n [inputs.webhooks.papertrail]\n path = \"/papertrail\"\n\n [inputs.webhooks.particle]\n path = \"/particle\"\n\n" - }, - { - "type": "input", - "name": "http_listener_v2", - "description": "Generic HTTP write listener", - "config": "# Generic HTTP write listener\n[[inputs.http_listener_v2]]\n # alias=\"http_listener_v2\"\n ## Address and port to host HTTP listener on\n service_address = \":8080\"\n\n ## Path to listen to.\n # path = \"/telegraf\"\n\n ## HTTP methods to accept.\n # methods = [\"POST\", \"PUT\"]\n\n ## maximum duration before timing out read of the request\n # read_timeout = \"10s\"\n ## maximum duration before timing out write of the response\n # write_timeout = \"10s\"\n\n ## Maximum allowed http request body size in bytes.\n ## 0 means to use the default of 524,288,00 bytes (500 mebibytes)\n # max_body_size = \"500MB\"\n\n ## Part of the request to consume. Available options are \"body\" and\n ## \"query\".\n # data_source = \"body\"\n\n ## Set one or more allowed client CA certificate file names to\n ## enable mutually authenticated TLS connections\n # tls_allowed_cacerts = [\"/etc/telegraf/clientca.pem\"]\n\n ## Add service certificate and key\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n\n ## Optional username and password to accept for HTTP basic authentication.\n ## You probably want to make sure you have TLS configured above for this.\n # basic_username = \"foobar\"\n # basic_password = \"barfoo\"\n\n ## Data format to consume.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n\n" - }, - { - "type": "input", - "name": "http_listener", - "description": "Influx HTTP write listener", - "config": "# Influx HTTP write listener\n[[inputs.http_listener]]\n # alias=\"http_listener\"\n ## Address and port to host HTTP listener on\n service_address = \":8186\"\n\n ## maximum duration before timing out read of the request\n read_timeout = \"10s\"\n ## maximum duration before timing out write of the response\n write_timeout = \"10s\"\n\n ## Maximum allowed http request body size in bytes.\n ## 0 means to use the default of 524,288,000 bytes (500 mebibytes)\n max_body_size = \"500MiB\"\n\n ## Maximum line size allowed to be sent in bytes.\n ## 0 means to use the default of 65536 bytes (64 kibibytes)\n max_line_size = \"64KiB\"\n \n\n ## Optional tag name used to store the database. \n ## If the write has a database in the query string then it will be kept in this tag name.\n ## This tag can be used in downstream outputs.\n ## The default value of nothing means it will be off and the database will not be recorded.\n # database_tag = \"\"\n\n ## Set one or more allowed client CA certificate file names to\n ## enable mutually authenticated TLS connections\n tls_allowed_cacerts = [\"/etc/telegraf/clientca.pem\"]\n\n ## Add service certificate and key\n tls_cert = \"/etc/telegraf/cert.pem\"\n tls_key = \"/etc/telegraf/key.pem\"\n\n ## Optional username and password to accept for HTTP basic authentication.\n ## You probably want to make sure you have TLS configured above for this.\n # basic_username = \"foobar\"\n # basic_password = \"barfoo\"\n\n" - }, - { - "type": "input", - "name": "sysstat", - "description": "Sysstat metrics collector", - "config": "# Sysstat metrics collector\n[[inputs.sysstat]]\n # alias=\"sysstat\"\n ## Path to the sadc command.\n #\n ## Common Defaults:\n ## Debian/Ubuntu: /usr/lib/sysstat/sadc\n ## Arch: /usr/lib/sa/sadc\n ## RHEL/CentOS: /usr/lib64/sa/sadc\n sadc_path = \"/usr/lib/sa/sadc\" # required\n\n ## Path to the sadf command, if it is not in PATH\n # sadf_path = \"/usr/bin/sadf\"\n\n ## Activities is a list of activities, that are passed as argument to the\n ## sadc collector utility (e.g: DISK, SNMP etc...)\n ## The more activities that are added, the more data is collected.\n # activities = [\"DISK\"]\n\n ## Group metrics to measurements.\n ##\n ## If group is false each metric will be prefixed with a description\n ## and represents itself a measurement.\n ##\n ## If Group is true, corresponding metrics are grouped to a single measurement.\n # group = true\n\n ## Options for the sadf command. The values on the left represent the sadf\n ## options and the values on the right their description (which are used for\n ## grouping and prefixing metrics).\n ##\n ## Run 'sar -h' or 'man sar' to find out the supported options for your\n ## sysstat version.\n [inputs.sysstat.options]\n -C = \"cpu\"\n -B = \"paging\"\n -b = \"io\"\n -d = \"disk\" # requires DISK activity\n \"-n ALL\" = \"network\"\n \"-P ALL\" = \"per_cpu\"\n -q = \"queue\"\n -R = \"mem\"\n -r = \"mem_util\"\n -S = \"swap_util\"\n -u = \"cpu_util\"\n -v = \"inode\"\n -W = \"swap\"\n -w = \"task\"\n # -H = \"hugepages\" # only available for newer linux distributions\n # \"-I ALL\" = \"interrupts\" # requires INT activity\n\n ## Device tags can be used to add additional tags for devices.\n ## For example the configuration below adds a tag vg with value rootvg for\n ## all metrics with sda devices.\n # [[inputs.sysstat.device_tags.sda]]\n # vg = \"rootvg\"\n\n" - }, - { - "type": "input", - "name": "systemd_units", - "description": "Gather systemd units state", - "config": "# Gather systemd units state\n[[inputs.systemd_units]]\n # alias=\"systemd_units\"\n ## Set timeout for systemctl execution\n # timeout = \"1s\"\n #\n ## Filter for a specific unit type, default is \"service\", other possible\n ## values are \"socket\", \"target\", \"device\", \"mount\", \"automount\", \"swap\",\n ## \"timer\", \"path\", \"slice\" and \"scope \":\n # unittype = \"service\"\n\n" - }, - { - "type": "input", - "name": "temp", - "description": "Read metrics about temperature", - "config": "# Read metrics about temperature\n[[inputs.temp]]\n # alias=\"temp\"\n" - }, - { - "type": "input", - "name": "cgroup", - "description": "Read specific statistics per cgroup", - "config": "# Read specific statistics per cgroup\n[[inputs.cgroup]]\n # alias=\"cgroup\"\n ## Directories in which to look for files, globs are supported.\n ## Consider restricting paths to the set of cgroups you really\n ## want to monitor if you have a large number of cgroups, to avoid\n ## any cardinality issues.\n # paths = [\n # \"/cgroup/memory\",\n # \"/cgroup/memory/child1\",\n # \"/cgroup/memory/child2/*\",\n # ]\n ## cgroup stat fields, as file names, globs are supported.\n ## these file names are appended to each path from above.\n # files = [\"memory.*usage*\", \"memory.limit_in_bytes\"]\n\n" - }, - { - "type": "input", - "name": "mysql", - "description": "Read metrics from one or many mysql servers", - "config": "# Read metrics from one or many mysql servers\n[[inputs.mysql]]\n # alias=\"mysql\"\n ## specify servers via a url matching:\n ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]]\n ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name\n ## e.g.\n ## servers = [\"user:passwd@tcp(127.0.0.1:3306)/?tls=false\"]\n ## servers = [\"user@tcp(127.0.0.1:3306)/?tls=false\"]\n #\n ## If no servers are specified, then localhost is used as the host.\n servers = [\"tcp(127.0.0.1:3306)/\"]\n\n ## Selects the metric output format.\n ##\n ## This option exists to maintain backwards compatibility, if you have\n ## existing metrics do not set or change this value until you are ready to\n ## migrate to the new format.\n ##\n ## If you do not have existing metrics from this plugin set to the latest\n ## version.\n ##\n ## Telegraf \u003e=1.6: metric_version = 2\n ## \u003c1.6: metric_version = 1 (or unset)\n metric_version = 2\n\n ## if the list is empty, then metrics are gathered from all databasee tables\n # table_schema_databases = []\n\n ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list\n # gather_table_schema = false\n\n ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST\n # gather_process_list = false\n\n ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS\n # gather_user_statistics = false\n\n ## gather auto_increment columns and max values from information schema\n # gather_info_schema_auto_inc = false\n\n ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS\n # gather_innodb_metrics = false\n\n ## gather metrics from SHOW SLAVE STATUS command output\n # gather_slave_status = false\n\n ## gather metrics from SHOW BINARY LOGS command output\n # gather_binary_logs = false\n\n ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE\n # gather_table_io_waits = false\n\n ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS\n # gather_table_lock_waits = false\n\n ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE\n # gather_index_io_waits = false\n\n ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS\n # gather_event_waits = false\n\n ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME\n # gather_file_events_stats = false\n\n ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST\n # gather_perf_events_statements = false\n\n ## the limits for metrics form perf_events_statements\n # perf_events_statements_digest_text_limit = 120\n # perf_events_statements_limit = 250\n # perf_events_statements_time_limit = 86400\n\n ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES)\n ## example: interval_slow = \"30m\"\n # interval_slow = \"\"\n\n ## Optional TLS Config (will be used if tls=custom parameter specified in server uri)\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "redis", - "description": "Read metrics from one or many redis servers", - "config": "# Read metrics from one or many redis servers\n[[inputs.redis]]\n # alias=\"redis\"\n ## specify servers via a url matching:\n ## [protocol://][:password]@address[:port]\n ## e.g.\n ## tcp://localhost:6379\n ## tcp://:password@192.168.99.100\n ## unix:///var/run/redis.sock\n ##\n ## If no servers are specified, then localhost is used as the host.\n ## If no port is specified, 6379 is used\n servers = [\"tcp://localhost:6379\"]\n\n ## specify server password\n # password = \"s#cr@t%\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = true\n\n" - }, - { - "type": "input", - "name": "couchbase", - "description": "Read metrics from one or many couchbase clusters", - "config": "# Read metrics from one or many couchbase clusters\n[[inputs.couchbase]]\n # alias=\"couchbase\"\n ## specify servers via a url matching:\n ## [protocol://][:password]@address[:port]\n ## e.g.\n ## http://couchbase-0.example.com/\n ## http://admin:secret@couchbase-0.example.com:8091/\n ##\n ## If no servers are specified, then localhost is used as the host.\n ## If no protocol is specified, HTTP is used.\n ## If no port is specified, 8091 is used.\n servers = [\"http://localhost:8091\"]\n\n" - }, - { - "type": "input", - "name": "file", - "description": "Reload and gather from file[s] on telegraf's interval.", - "config": "# Reload and gather from file[s] on telegraf's interval.\n[[inputs.file]]\n # alias=\"file\"\n ## Files to parse each interval.\n ## These accept standard unix glob matching rules, but with the addition of\n ## ** as a \"super asterisk\". ie:\n ## /var/log/**.log -\u003e recursively find all .log files in /var/log\n ## /var/log/*/*.log -\u003e find all .log files with a parent dir in /var/log\n ## /var/log/apache.log -\u003e only read the apache log file\n files = [\"/var/log/apache/access.log\"]\n\n ## The dataformat to be read from files\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n\n ## Name a tag containing the name of the file the data was parsed from. Leave empty\n ## to disable.\n # file_tag = \"\"\n\n" - }, - { - "type": "input", - "name": "kube_inventory", - "description": "Read metrics from the Kubernetes api", - "config": "# Read metrics from the Kubernetes api\n[[inputs.kube_inventory]]\n # alias=\"kube_inventory\"\n ## URL for the Kubernetes API\n url = \"https://127.0.0.1\"\n\n ## Namespace to use. Set to \"\" to use all namespaces.\n # namespace = \"default\"\n\n ## Use bearer token for authorization. ('bearer_token' takes priority)\n ## If both of these are empty, we'll use the default serviceaccount:\n ## at: /run/secrets/kubernetes.io/serviceaccount/token\n # bearer_token = \"/path/to/bearer/token\"\n ## OR\n # bearer_token_string = \"abc_123\"\n\n ## Set response_timeout (default 5 seconds)\n # response_timeout = \"5s\"\n\n ## Optional Resources to exclude from gathering\n ## Leave them with blank with try to gather everything available.\n ## Values can be - \"daemonsets\", deployments\", \"endpoints\", \"ingress\", \"nodes\",\n ## \"persistentvolumes\", \"persistentvolumeclaims\", \"pods\", \"services\", \"statefulsets\"\n # resource_exclude = [ \"deployments\", \"nodes\", \"statefulsets\" ]\n\n ## Optional Resources to include when gathering\n ## Overrides resource_exclude if both set.\n # resource_include = [ \"deployments\", \"nodes\", \"statefulsets\" ]\n\n ## Optional TLS Config\n # tls_ca = \"/path/to/cafile\"\n # tls_cert = \"/path/to/certfile\"\n # tls_key = \"/path/to/keyfile\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "neptune_apex", - "description": "Neptune Apex data collector", - "config": "# Neptune Apex data collector\n[[inputs.neptune_apex]]\n # alias=\"neptune_apex\"\n ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex.\n ## Measurements will be logged under \"apex\".\n\n ## The base URL of the local Apex(es). If you specify more than one server, they will\n ## be differentiated by the \"source\" tag.\n servers = [\n \"http://apex.local\",\n ]\n\n ## The response_timeout specifies how long to wait for a reply from the Apex.\n #response_timeout = \"5s\"\n\n" - }, - { - "type": "input", - "name": "openntpd", - "description": "Get standard NTP query metrics from OpenNTPD.", - "config": "# Get standard NTP query metrics from OpenNTPD.\n[[inputs.openntpd]]\n # alias=\"openntpd\"\n ## Run ntpctl binary with sudo.\n # use_sudo = false\n\n ## Location of the ntpctl binary.\n # binary = \"/usr/sbin/ntpctl\"\n\n ## Maximum time the ntpctl binary is allowed to run.\n # timeout = \"5ms\"\n \n" - }, - { - "type": "input", - "name": "ipset", - "description": "Gather packets and bytes counters from Linux ipsets", - "config": "# Gather packets and bytes counters from Linux ipsets\n[[inputs.ipset]]\n # alias=\"ipset\"\n ## By default, we only show sets which have already matched at least 1 packet.\n ## set include_unmatched_sets = true to gather them all.\n include_unmatched_sets = false\n ## Adjust your sudo settings appropriately if using this option (\"sudo ipset save\")\n use_sudo = false\n ## The default timeout of 1s for ipset execution can be overridden here:\n # timeout = \"1s\"\n\n" - }, - { - "type": "input", - "name": "tengine", - "description": "Read Tengine's basic status information (ngx_http_reqstat_module)", - "config": "# Read Tengine's basic status information (ngx_http_reqstat_module)\n[[inputs.tengine]]\n # alias=\"tengine\"\n # An array of Tengine reqstat module URI to gather stats.\n urls = [\"http://127.0.0.1/us\"]\n\n # HTTP response timeout (default: 5s)\n # response_timeout = \"5s\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.cer\"\n # tls_key = \"/etc/telegraf/key.key\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "vsphere", - "description": "Read metrics from VMware vCenter", - "config": "# Read metrics from VMware vCenter\n[[inputs.vsphere]]\n # alias=\"vsphere\"\n ## List of vCenter URLs to be monitored. These three lines must be uncommented\n ## and edited for the plugin to work.\n vcenters = [ \"https://vcenter.local/sdk\" ]\n username = \"user@corp.local\"\n password = \"secret\"\n\n ## VMs\n ## Typical VM metrics (if omitted or empty, all metrics are collected)\n vm_metric_include = [\n \"cpu.demand.average\",\n \"cpu.idle.summation\",\n \"cpu.latency.average\",\n \"cpu.readiness.average\",\n \"cpu.ready.summation\",\n \"cpu.run.summation\",\n \"cpu.usagemhz.average\",\n \"cpu.used.summation\",\n \"cpu.wait.summation\",\n \"mem.active.average\",\n \"mem.granted.average\",\n \"mem.latency.average\",\n \"mem.swapin.average\",\n \"mem.swapinRate.average\",\n \"mem.swapout.average\",\n \"mem.swapoutRate.average\",\n \"mem.usage.average\",\n \"mem.vmmemctl.average\",\n \"net.bytesRx.average\",\n \"net.bytesTx.average\",\n \"net.droppedRx.summation\",\n \"net.droppedTx.summation\",\n \"net.usage.average\",\n \"power.power.average\",\n \"virtualDisk.numberReadAveraged.average\",\n \"virtualDisk.numberWriteAveraged.average\",\n \"virtualDisk.read.average\",\n \"virtualDisk.readOIO.latest\",\n \"virtualDisk.throughput.usage.average\",\n \"virtualDisk.totalReadLatency.average\",\n \"virtualDisk.totalWriteLatency.average\",\n \"virtualDisk.write.average\",\n \"virtualDisk.writeOIO.latest\",\n \"sys.uptime.latest\",\n ]\n # vm_metric_exclude = [] ## Nothing is excluded by default\n # vm_instances = true ## true by default\n\n ## Hosts\n ## Typical host metrics (if omitted or empty, all metrics are collected)\n host_metric_include = [\n \"cpu.coreUtilization.average\",\n \"cpu.costop.summation\",\n \"cpu.demand.average\",\n \"cpu.idle.summation\",\n \"cpu.latency.average\",\n \"cpu.readiness.average\",\n \"cpu.ready.summation\",\n \"cpu.swapwait.summation\",\n \"cpu.usage.average\",\n \"cpu.usagemhz.average\",\n \"cpu.used.summation\",\n \"cpu.utilization.average\",\n \"cpu.wait.summation\",\n \"disk.deviceReadLatency.average\",\n \"disk.deviceWriteLatency.average\",\n \"disk.kernelReadLatency.average\",\n \"disk.kernelWriteLatency.average\",\n \"disk.numberReadAveraged.average\",\n \"disk.numberWriteAveraged.average\",\n \"disk.read.average\",\n \"disk.totalReadLatency.average\",\n \"disk.totalWriteLatency.average\",\n \"disk.write.average\",\n \"mem.active.average\",\n \"mem.latency.average\",\n \"mem.state.latest\",\n \"mem.swapin.average\",\n \"mem.swapinRate.average\",\n \"mem.swapout.average\",\n \"mem.swapoutRate.average\",\n \"mem.totalCapacity.average\",\n \"mem.usage.average\",\n \"mem.vmmemctl.average\",\n \"net.bytesRx.average\",\n \"net.bytesTx.average\",\n \"net.droppedRx.summation\",\n \"net.droppedTx.summation\",\n \"net.errorsRx.summation\",\n \"net.errorsTx.summation\",\n \"net.usage.average\",\n \"power.power.average\",\n \"storageAdapter.numberReadAveraged.average\",\n \"storageAdapter.numberWriteAveraged.average\",\n \"storageAdapter.read.average\",\n \"storageAdapter.write.average\",\n \"sys.uptime.latest\",\n ]\n ## Collect IP addresses? Valid values are \"ipv4\" and \"ipv6\"\n # ip_addresses = [\"ipv6\", \"ipv4\" ]\n # host_metric_exclude = [] ## Nothing excluded by default\n # host_instances = true ## true by default\n\n ## Clusters\n # cluster_metric_include = [] ## if omitted or empty, all metrics are collected\n # cluster_metric_exclude = [] ## Nothing excluded by default\n # cluster_instances = false ## false by default\n\n ## Datastores\n # datastore_metric_include = [] ## if omitted or empty, all metrics are collected\n # datastore_metric_exclude = [] ## Nothing excluded by default\n # datastore_instances = false ## false by default for Datastores only\n\n ## Datacenters\n datacenter_metric_include = [] ## if omitted or empty, all metrics are collected\n datacenter_metric_exclude = [ \"*\" ] ## Datacenters are not collected by default.\n # datacenter_instances = false ## false by default for Datastores only\n\n ## Plugin Settings \n ## separator character to use for measurement and field names (default: \"_\")\n # separator = \"_\"\n\n ## number of objects to retreive per query for realtime resources (vms and hosts)\n ## set to 64 for vCenter 5.5 and 6.0 (default: 256)\n # max_query_objects = 256\n\n ## number of metrics to retreive per query for non-realtime resources (clusters and datastores)\n ## set to 64 for vCenter 5.5 and 6.0 (default: 256)\n # max_query_metrics = 256\n\n ## number of go routines to use for collection and discovery of objects and metrics\n # collect_concurrency = 1\n # discover_concurrency = 1\n\n ## whether or not to force discovery of new objects on initial gather call before collecting metrics\n ## when true for large environments this may cause errors for time elapsed while collecting metrics\n ## when false (default) the first collection cycle may result in no or limited metrics while objects are discovered\n # force_discover_on_init = false\n\n ## the interval before (re)discovering objects subject to metrics collection (default: 300s)\n # object_discovery_interval = \"300s\"\n\n ## timeout applies to any of the api request made to vcenter\n # timeout = \"60s\"\n\n ## When set to true, all samples are sent as integers. This makes the output\n ## data types backwards compatible with Telegraf 1.9 or lower. Normally all\n ## samples from vCenter, with the exception of percentages, are integer\n ## values, but under some conditions, some averaging takes place internally in\n ## the plugin. Setting this flag to \"false\" will send values as floats to\n ## preserve the full precision when averaging takes place.\n # use_int_samples = true\n\n ## Custom attributes from vCenter can be very useful for queries in order to slice the\n ## metrics along different dimension and for forming ad-hoc relationships. They are disabled\n ## by default, since they can add a considerable amount of tags to the resulting metrics. To\n ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include\n ## to select the attributes you want to include.\n # custom_attribute_include = []\n # custom_attribute_exclude = [\"*\"] \n\n ## Optional SSL Config\n # ssl_ca = \"/path/to/cafile\"\n # ssl_cert = \"/path/to/certfile\"\n # ssl_key = \"/path/to/keyfile\"\n ## Use SSL but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "aurora", - "description": "Gather metrics from Apache Aurora schedulers", - "config": "# Gather metrics from Apache Aurora schedulers\n[[inputs.aurora]]\n # alias=\"aurora\"\n ## Schedulers are the base addresses of your Aurora Schedulers\n schedulers = [\"http://127.0.0.1:8081\"]\n\n ## Set of role types to collect metrics from.\n ##\n ## The scheduler roles are checked each interval by contacting the\n ## scheduler nodes; zookeeper is not contacted.\n # roles = [\"leader\", \"follower\"]\n\n ## Timeout is the max time for total network operations.\n # timeout = \"5s\"\n\n ## Username and password are sent using HTTP Basic Auth.\n # username = \"username\"\n # password = \"pa$$word\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "burrow", - "description": "Collect Kafka topics and consumers status from Burrow HTTP API.", - "config": "# Collect Kafka topics and consumers status from Burrow HTTP API.\n[[inputs.burrow]]\n # alias=\"burrow\"\n ## Burrow API endpoints in format \"schema://host:port\".\n ## Default is \"http://localhost:8000\".\n servers = [\"http://localhost:8000\"]\n\n ## Override Burrow API prefix.\n ## Useful when Burrow is behind reverse-proxy.\n # api_prefix = \"/v3/kafka\"\n\n ## Maximum time to receive response.\n # response_timeout = \"5s\"\n\n ## Limit per-server concurrent connections.\n ## Useful in case of large number of topics or consumer groups.\n # concurrent_connections = 20\n\n ## Filter clusters, default is no filtering.\n ## Values can be specified as glob patterns.\n # clusters_include = []\n # clusters_exclude = []\n\n ## Filter consumer groups, default is no filtering.\n ## Values can be specified as glob patterns.\n # groups_include = []\n # groups_exclude = []\n\n ## Filter topics, default is no filtering.\n ## Values can be specified as glob patterns.\n # topics_include = []\n # topics_exclude = []\n\n ## Credentials for basic HTTP authentication.\n # username = \"\"\n # password = \"\"\n\n ## Optional SSL config\n # ssl_ca = \"/etc/telegraf/ca.pem\"\n # ssl_cert = \"/etc/telegraf/cert.pem\"\n # ssl_key = \"/etc/telegraf/key.pem\"\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "consul", - "description": "Gather health check statuses from services registered in Consul", - "config": "# Gather health check statuses from services registered in Consul\n[[inputs.consul]]\n # alias=\"consul\"\n ## Consul server address\n # address = \"localhost\"\n\n ## URI scheme for the Consul server, one of \"http\", \"https\"\n # scheme = \"http\"\n\n ## ACL token used in every request\n # token = \"\"\n\n ## HTTP Basic Authentication username and password.\n # username = \"\"\n # password = \"\"\n\n ## Data center to query the health checks from\n # datacenter = \"\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = true\n\n ## Consul checks' tag splitting\n # When tags are formatted like \"key:value\" with \":\" as a delimiter then\n # they will be splitted and reported as proper key:value in Telegraf\n # tag_delimiter = \":\"\n\n" - }, - { - "type": "input", - "name": "dovecot", - "description": "Read statistics from one or many dovecot servers", - "config": "# Read statistics from one or many dovecot servers\n[[inputs.dovecot]]\n # alias=\"dovecot\"\n ## specify dovecot servers via an address:port list\n ## e.g.\n ## localhost:24242\n ##\n ## If no servers are specified, then localhost is used as the host.\n servers = [\"localhost:24242\"]\n\n ## Type is one of \"user\", \"domain\", \"ip\", or \"global\"\n type = \"global\"\n\n ## Wildcard matches like \"*.com\". An empty string \"\" is same as \"*\"\n ## If type = \"ip\" filters should be \u003cIP/network\u003e\n filters = [\"\"]\n\n" - }, - { - "type": "input", - "name": "fireboard", - "description": "Read real time temps from fireboard.io servers", - "config": "# Read real time temps from fireboard.io servers\n[[inputs.fireboard]]\n # alias=\"fireboard\"\n ## Specify auth token for your account\n auth_token = \"invalidAuthToken\"\n ## You can override the fireboard server URL if necessary\n # url = https://fireboard.io/api/v1/devices.json\n ## You can set a different http_timeout if you need to\n ## You should set a string using an number and time indicator\n ## for example \"12s\" for 12 seconds.\n # http_timeout = \"4s\"\n\n" - }, - { - "type": "input", - "name": "ecs", - "description": "Read metrics about docker containers from Fargate/ECS v2 meta endpoints.", - "config": "# Read metrics about docker containers from Fargate/ECS v2 meta endpoints.\n[[inputs.ecs]]\n # alias=\"ecs\"\n ## ECS metadata url\n # endpoint_url = \"http://169.254.170.2\"\n\n ## Containers to include and exclude. Globs accepted.\n ## Note that an empty array for both will include all containers\n # container_name_include = []\n # container_name_exclude = []\n\n ## Container states to include and exclude. Globs accepted.\n ## When empty only containers in the \"RUNNING\" state will be captured.\n ## Possible values are \"NONE\", \"PULLED\", \"CREATED\", \"RUNNING\",\n ## \"RESOURCES_PROVISIONED\", \"STOPPED\".\n # container_status_include = []\n # container_status_exclude = []\n\n ## ecs labels to include and exclude as tags. Globs accepted.\n ## Note that an empty array for both will include all labels as tags\n ecs_label_include = [ \"com.amazonaws.ecs.*\" ]\n ecs_label_exclude = []\n\n ## Timeout for queries.\n # timeout = \"5s\"\n\n" - }, - { - "type": "input", - "name": "icinga2", - "description": "Gather Icinga2 status", - "config": "# Gather Icinga2 status\n[[inputs.icinga2]]\n # alias=\"icinga2\"\n ## Required Icinga2 server address\n # server = \"https://localhost:5665\"\n \n ## Required Icinga2 object type (\"services\" or \"hosts\")\n # object_type = \"services\"\n\n ## Credentials for basic HTTP authentication\n # username = \"admin\"\n # password = \"admin\"\n\n ## Maximum time to receive response.\n # response_timeout = \"5s\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = true\n \n" - }, - { - "type": "input", - "name": "diskio", - "description": "Read metrics about disk IO by device", - "config": "# Read metrics about disk IO by device\n[[inputs.diskio]]\n # alias=\"diskio\"\n ## By default, telegraf will gather stats for all devices including\n ## disk partitions.\n ## Setting devices will restrict the stats to the specified devices.\n # devices = [\"sda\", \"sdb\", \"vd*\"]\n ## Uncomment the following line if you need disk serial numbers.\n # skip_serial_number = false\n #\n ## On systems which support it, device metadata can be added in the form of\n ## tags.\n ## Currently only Linux is supported via udev properties. You can view\n ## available properties for a device by running:\n ## 'udevadm info -q property -n /dev/sda'\n ## Note: Most, but not all, udev properties can be accessed this way. Properties\n ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH.\n # device_tags = [\"ID_FS_TYPE\", \"ID_FS_USAGE\"]\n #\n ## Using the same metadata source as device_tags, you can also customize the\n ## name of the device via templates.\n ## The 'name_templates' parameter is a list of templates to try and apply to\n ## the device. The template may contain variables in the form of '$PROPERTY' or\n ## '${PROPERTY}'. The first template which does not contain any variables not\n ## present for the device is used as the device name tag.\n ## The typical use case is for LVM volumes, to get the VG/LV name instead of\n ## the near-meaningless DM-0 name.\n # name_templates = [\"$ID_FS_LABEL\",\"$DM_VG_NAME/$DM_LV_NAME\"]\n\n" - }, - { - "type": "input", - "name": "http", - "description": "Read formatted metrics from one or more HTTP endpoints", - "config": "# Read formatted metrics from one or more HTTP endpoints\n[[inputs.http]]\n # alias=\"http\"\n ## One or more URLs from which to read formatted metrics\n urls = [\n \"http://localhost/metrics\"\n ]\n\n ## HTTP method\n # method = \"GET\"\n\n ## Optional HTTP headers\n # headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## Optional HTTP Basic Auth Credentials\n # username = \"username\"\n # password = \"pa$$word\"\n\n ## HTTP entity-body to send with POST/PUT requests.\n # body = \"\"\n\n ## HTTP Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"identity\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## Amount of time allowed to complete the HTTP request\n # timeout = \"5s\"\n\n ## List of success status codes\n # success_status_codes = [200]\n\n ## Data format to consume.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n # data_format = \"influx\"\n\n" - }, - { - "type": "input", - "name": "uwsgi", - "description": "Read uWSGI metrics.", - "config": "# Read uWSGI metrics.\n[[inputs.uwsgi]]\n # alias=\"uwsgi\"\n ## List with urls of uWSGI Stats servers. URL must match pattern:\n ## scheme://address[:port]\n ##\n ## For example:\n ## servers = [\"tcp://localhost:5050\", \"http://localhost:1717\", \"unix:///tmp/statsock\"]\n servers = [\"tcp://127.0.0.1:1717\"]\n\n ## General connection timout\n # timeout = \"5s\"\n\n" - }, - { - "type": "input", - "name": "chrony", - "description": "Get standard chrony metrics, requires chronyc executable.", - "config": "# Get standard chrony metrics, requires chronyc executable.\n[[inputs.chrony]]\n # alias=\"chrony\"\n ## If true, chronyc tries to perform a DNS lookup for the time server.\n # dns_lookup = false\n \n" - }, - { - "type": "input", - "name": "elasticsearch", - "description": "Read stats from one or more Elasticsearch servers or clusters", - "config": "# Read stats from one or more Elasticsearch servers or clusters\n[[inputs.elasticsearch]]\n # alias=\"elasticsearch\"\n ## specify a list of one or more Elasticsearch servers\n # you can add username and password to your url to use basic authentication:\n # servers = [\"http://user:pass@localhost:9200\"]\n servers = [\"http://localhost:9200\"]\n\n ## Timeout for HTTP requests to the elastic search server(s)\n http_timeout = \"5s\"\n\n ## When local is true (the default), the node will read only its own stats.\n ## Set local to false when you want to read the node stats from all nodes\n ## of the cluster.\n local = true\n\n ## Set cluster_health to true when you want to also obtain cluster health stats\n cluster_health = false\n\n ## Adjust cluster_health_level when you want to also obtain detailed health stats\n ## The options are\n ## - indices (default)\n ## - cluster\n # cluster_health_level = \"indices\"\n\n ## Set cluster_stats to true when you want to also obtain cluster stats.\n cluster_stats = false\n\n ## Only gather cluster_stats from the master node. To work this require local = true\n cluster_stats_only_from_master = true\n\n ## Indices to collect; can be one or more indices names or _all\n indices_include = [\"_all\"]\n\n ## One of \"shards\", \"cluster\", \"indices\"\n indices_level = \"shards\"\n\n ## node_stats is a list of sub-stats that you want to have gathered. Valid options\n ## are \"indices\", \"os\", \"process\", \"jvm\", \"thread_pool\", \"fs\", \"transport\", \"http\",\n ## \"breaker\". Per default, all stats are gathered.\n # node_stats = [\"jvm\", \"http\"]\n\n ## HTTP Basic Authentication username and password.\n # username = \"\"\n # password = \"\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "kafka_consumer", - "description": "Read metrics from Kafka topics", - "config": "# Read metrics from Kafka topics\n[[inputs.kafka_consumer]]\n # alias=\"kafka_consumer\"\n ## Kafka brokers.\n brokers = [\"localhost:9092\"]\n\n ## Topics to consume.\n topics = [\"telegraf\"]\n\n ## When set this tag will be added to all metrics with the topic as the value.\n # topic_tag = \"\"\n\n ## Optional Client id\n # client_id = \"Telegraf\"\n\n ## Set the minimal supported Kafka version. Setting this enables the use of new\n ## Kafka features and APIs. Must be 0.10.2.0 or greater.\n ## ex: version = \"1.1.0\"\n # version = \"\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## Optional SASL Config\n # sasl_username = \"kafka\"\n # sasl_password = \"secret\"\n\n ## Name of the consumer group.\n # consumer_group = \"telegraf_metrics_consumers\"\n\n ## Initial offset position; one of \"oldest\" or \"newest\".\n # offset = \"oldest\"\n\n ## Consumer group partition assignment strategy; one of \"range\", \"roundrobin\" or \"sticky\".\n # balance_strategy = \"range\"\n\n ## Maximum length of a message to consume, in bytes (default 0/unlimited);\n ## larger messages are dropped\n max_message_len = 1000000\n\n ## Maximum messages to read from the broker that have not been written by an\n ## output. For best throughput set based on the number of metrics within\n ## each message and the size of the output's metric_batch_size.\n ##\n ## For example, if each message from the queue contains 10 metrics and the\n ## output metric_batch_size is 1000, setting this to 100 will ensure that a\n ## full batch is collected and the write is triggered immediately without\n ## waiting until the next flush_interval.\n # max_undelivered_messages = 1000\n\n ## Data format to consume.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n\n" - }, - { - "type": "input", - "name": "tail", - "description": "Stream a log file, like the tail -f command", - "config": "# Stream a log file, like the tail -f command\n[[inputs.tail]]\n # alias=\"tail\"\n ## files to tail.\n ## These accept standard unix glob matching rules, but with the addition of\n ## ** as a \"super asterisk\". ie:\n ## \"/var/log/**.log\" -\u003e recursively find all .log files in /var/log\n ## \"/var/log/*/*.log\" -\u003e find all .log files with a parent dir in /var/log\n ## \"/var/log/apache.log\" -\u003e just tail the apache log file\n ##\n ## See https://github.com/gobwas/glob for more examples\n ##\n files = [\"/var/mymetrics.out\"]\n ## Read file from beginning.\n from_beginning = false\n ## Whether file is a named pipe\n pipe = false\n\n ## Method used to watch for file updates. Can be either \"inotify\" or \"poll\".\n # watch_method = \"inotify\"\n\n ## Data format to consume.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n\n" - }, - { - "type": "input", - "name": "udp_listener", - "description": "Generic UDP listener", - "config": "# Generic UDP listener\n[[inputs.udp_listener]]\n # alias=\"udp_listener\"\n # DEPRECATED: the TCP listener plugin has been deprecated in favor of the\n # socket_listener plugin\n # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener\n\n" - }, - { - "type": "input", - "name": "beanstalkd", - "description": "Collects Beanstalkd server and tubes stats", - "config": "# Collects Beanstalkd server and tubes stats\n[[inputs.beanstalkd]]\n # alias=\"beanstalkd\"\n ## Server to collect data from\n server = \"localhost:11300\"\n\n ## List of tubes to gather stats about.\n ## If no tubes specified then data gathered for each tube on server reported by list-tubes command\n tubes = [\"notifications\"]\n\n" - }, - { - "type": "input", - "name": "github", - "description": "Gather repository information from GitHub hosted repositories.", - "config": "# Gather repository information from GitHub hosted repositories.\n[[inputs.github]]\n # alias=\"github\"\n ## List of repositories to monitor.\n repositories = [\n\t \"influxdata/telegraf\",\n\t \"influxdata/influxdb\"\n ]\n\n ## Github API access token. Unauthenticated requests are limited to 60 per hour.\n # access_token = \"\"\n\n ## Github API enterprise url. Github Enterprise accounts must specify their base url.\n # enterprise_base_url = \"\"\n\n ## Timeout for HTTP requests.\n # http_timeout = \"5s\"\n\n" - }, - { - "type": "input", - "name": "logparser", - "description": "Stream and parse log file(s).", - "config": "# Stream and parse log file(s).\n[[inputs.logparser]]\n # alias=\"logparser\"\n ## Log files to parse.\n ## These accept standard unix glob matching rules, but with the addition of\n ## ** as a \"super asterisk\". ie:\n ## /var/log/**.log -\u003e recursively find all .log files in /var/log\n ## /var/log/*/*.log -\u003e find all .log files with a parent dir in /var/log\n ## /var/log/apache.log -\u003e only tail the apache log file\n files = [\"/var/log/apache/access.log\"]\n\n ## Read files that currently exist from the beginning. Files that are created\n ## while telegraf is running (and that match the \"files\" globs) will always\n ## be read from the beginning.\n from_beginning = false\n\n ## Method used to watch for file updates. Can be either \"inotify\" or \"poll\".\n # watch_method = \"inotify\"\n\n ## Parse logstash-style \"grok\" patterns:\n [inputs.logparser.grok]\n ## This is a list of patterns to check the given log file(s) for.\n ## Note that adding patterns here increases processing time. The most\n ## efficient configuration is to have one pattern per logparser.\n ## Other common built-in patterns are:\n ## %{COMMON_LOG_FORMAT} (plain apache \u0026 nginx access logs)\n ## %{COMBINED_LOG_FORMAT} (access logs + referrer \u0026 agent)\n patterns = [\"%{COMBINED_LOG_FORMAT}\"]\n\n ## Name of the outputted measurement name.\n measurement = \"apache_access_log\"\n\n ## Full path(s) to custom pattern files.\n custom_pattern_files = []\n\n ## Custom patterns can also be defined here. Put one pattern per line.\n custom_patterns = '''\n '''\n\n ## Timezone allows you to provide an override for timestamps that\n ## don't already include an offset\n ## e.g. 04/06/2016 12:41:45 data one two 5.43µs\n ##\n ## Default: \"\" which renders UTC\n ## Options are as follows:\n ## 1. Local -- interpret based on machine localtime\n ## 2. \"Canada/Eastern\" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones\n ## 3. UTC -- or blank/unspecified, will return timestamp in UTC\n # timezone = \"Canada/Eastern\"\n\n\t## When set to \"disable\", timestamp will not incremented if there is a\n\t## duplicate.\n # unique_timestamp = \"auto\"\n\n" - }, - { - "type": "input", - "name": "tomcat", - "description": "Gather metrics from the Tomcat server status page.", - "config": "# Gather metrics from the Tomcat server status page.\n[[inputs.tomcat]]\n # alias=\"tomcat\"\n ## URL of the Tomcat server status\n # url = \"http://127.0.0.1:8080/manager/status/all?XML=true\"\n\n ## HTTP Basic Auth Credentials\n # username = \"tomcat\"\n # password = \"s3cret\"\n\n ## Request timeout\n # timeout = \"5s\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "twemproxy", - "description": "Read Twemproxy stats data", - "config": "# Read Twemproxy stats data\n[[inputs.twemproxy]]\n # alias=\"twemproxy\"\n ## Twemproxy stats address and port (no scheme)\n addr = \"localhost:22222\"\n ## Monitor pool name\n pools = [\"redis_pool\", \"mc_pool\"]\n\n" - }, - { - "type": "input", - "name": "influxdb_listener", - "description": "Influx HTTP write listener", - "config": "# Influx HTTP write listener\n[[inputs.influxdb_listener]]\n # alias=\"influxdb_listener\"\n ## Address and port to host HTTP listener on\n service_address = \":8186\"\n\n ## maximum duration before timing out read of the request\n read_timeout = \"10s\"\n ## maximum duration before timing out write of the response\n write_timeout = \"10s\"\n\n ## Maximum allowed http request body size in bytes.\n ## 0 means to use the default of 524,288,000 bytes (500 mebibytes)\n max_body_size = \"500MiB\"\n\n ## Maximum line size allowed to be sent in bytes.\n ## 0 means to use the default of 65536 bytes (64 kibibytes)\n max_line_size = \"64KiB\"\n \n\n ## Optional tag name used to store the database. \n ## If the write has a database in the query string then it will be kept in this tag name.\n ## This tag can be used in downstream outputs.\n ## The default value of nothing means it will be off and the database will not be recorded.\n # database_tag = \"\"\n\n ## Set one or more allowed client CA certificate file names to\n ## enable mutually authenticated TLS connections\n tls_allowed_cacerts = [\"/etc/telegraf/clientca.pem\"]\n\n ## Add service certificate and key\n tls_cert = \"/etc/telegraf/cert.pem\"\n tls_key = \"/etc/telegraf/key.pem\"\n\n ## Optional username and password to accept for HTTP basic authentication.\n ## You probably want to make sure you have TLS configured above for this.\n # basic_username = \"foobar\"\n # basic_password = \"barfoo\"\n\n" - }, - { - "type": "input", - "name": "jti_openconfig_telemetry", - "description": "Read JTI OpenConfig Telemetry from listed sensors", - "config": "# Read JTI OpenConfig Telemetry from listed sensors\n[[inputs.jti_openconfig_telemetry]]\n # alias=\"jti_openconfig_telemetry\"\n ## List of device addresses to collect telemetry from\n servers = [\"localhost:1883\"]\n\n ## Authentication details. Username and password are must if device expects\n ## authentication. Client ID must be unique when connecting from multiple instances\n ## of telegraf to the same device\n username = \"user\"\n password = \"pass\"\n client_id = \"telegraf\"\n\n ## Frequency to get data\n sample_frequency = \"1000ms\"\n\n ## Sensors to subscribe for\n ## A identifier for each sensor can be provided in path by separating with space\n ## Else sensor path will be used as identifier\n ## When identifier is used, we can provide a list of space separated sensors.\n ## A single subscription will be created with all these sensors and data will\n ## be saved to measurement with this identifier name\n sensors = [\n \"/interfaces/\",\n \"collection /components/ /lldp\",\n ]\n\n ## We allow specifying sensor group level reporting rate. To do this, specify the\n ## reporting rate in Duration at the beginning of sensor paths / collection\n ## name. For entries without reporting rate, we use configured sample frequency\n sensors = [\n \"1000ms customReporting /interfaces /lldp\",\n \"2000ms collection /components\",\n \"/interfaces\",\n ]\n\n ## Optional TLS Config\n # enable_tls = true\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms.\n ## Failed streams/calls will not be retried if 0 is provided\n retry_delay = \"1000ms\"\n\n ## To treat all string values as tags, set this to true\n str_as_tags = false\n\n" - }, - { - "type": "input", - "name": "kinesis_consumer", - "description": "Configuration for the AWS Kinesis input.", - "config": "# Configuration for the AWS Kinesis input.\n[[inputs.kinesis_consumer]]\n # alias=\"kinesis_consumer\"\n ## Amazon REGION of kinesis endpoint.\n region = \"ap-southeast-2\"\n\n ## Amazon Credentials\n ## Credentials are loaded in the following order\n ## 1) Assumed credentials via STS if role_arn is specified\n ## 2) explicit credentials from 'access_key' and 'secret_key'\n ## 3) shared profile from 'profile'\n ## 4) environment variables\n ## 5) shared credentials file\n ## 6) EC2 Instance Profile\n # access_key = \"\"\n # secret_key = \"\"\n # token = \"\"\n # role_arn = \"\"\n # profile = \"\"\n # shared_credential_file = \"\"\n\n ## Endpoint to make request against, the correct endpoint is automatically\n ## determined and this option should only be set if you wish to override the\n ## default.\n ## ex: endpoint_url = \"http://localhost:8000\"\n # endpoint_url = \"\"\n\n ## Kinesis StreamName must exist prior to starting telegraf.\n streamname = \"StreamName\"\n\n ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported)\n # shard_iterator_type = \"TRIM_HORIZON\"\n\n ## Maximum messages to read from the broker that have not been written by an\n ## output. For best throughput set based on the number of metrics within\n ## each message and the size of the output's metric_batch_size.\n ##\n ## For example, if each message from the queue contains 10 metrics and the\n ## output metric_batch_size is 1000, setting this to 100 will ensure that a\n ## full batch is collected and the write is triggered immediately without\n ## waiting until the next flush_interval.\n # max_undelivered_messages = 1000\n\n ## Data format to consume.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n\n ## Optional\n ## Configuration for a dynamodb checkpoint\n [inputs.kinesis_consumer.checkpoint_dynamodb]\n\t## unique name for this consumer\n\tapp_name = \"default\"\n\ttable_name = \"default\"\n\n" - }, - { - "type": "input", - "name": "pgbouncer", - "description": "Read metrics from one or many pgbouncer servers", - "config": "# Read metrics from one or many pgbouncer servers\n[[inputs.pgbouncer]]\n # alias=\"pgbouncer\"\n ## specify address via a url matching:\n ## postgres://[pqgotest[:password]]@localhost[/dbname]\\\n ## ?sslmode=[disable|verify-ca|verify-full]\n ## or a simple string:\n ## host=localhost user=pqotest password=... sslmode=... dbname=app_production\n ##\n ## All connection parameters are optional.\n ##\n address = \"host=localhost user=pgbouncer sslmode=disable\"\n\n" - }, - { - "type": "input", - "name": "internal", - "description": "Collect statistics about itself", - "config": "# Collect statistics about itself\n[[inputs.internal]]\n # alias=\"internal\"\n ## If true, collect telegraf memory stats.\n # collect_memstats = true\n\n" - }, - { - "type": "input", - "name": "mcrouter", - "description": "Read metrics from one or many mcrouter servers", - "config": "# Read metrics from one or many mcrouter servers\n[[inputs.mcrouter]]\n # alias=\"mcrouter\"\n ## An array of address to gather stats about. Specify an ip or hostname\n ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc.\n\tservers = [\"tcp://localhost:11211\", \"unix:///var/run/mcrouter.sock\"]\n\n\t## Timeout for metric collections from all servers. Minimum timeout is \"1s\".\n # timeout = \"5s\"\n\n" - }, - { - "type": "input", - "name": "postgresql_extensible", - "description": "Read metrics from one or many postgresql servers", - "config": "# Read metrics from one or many postgresql servers\n[[inputs.postgresql_extensible]]\n # alias=\"postgresql_extensible\"\n ## specify address via a url matching:\n ## postgres://[pqgotest[:password]]@localhost[/dbname]\\\n ## ?sslmode=[disable|verify-ca|verify-full]\n ## or a simple string:\n ## host=localhost user=pqotest password=... sslmode=... dbname=app_production\n #\n ## All connection parameters are optional. #\n ## Without the dbname parameter, the driver will default to a database\n ## with the same name as the user. This dbname is just for instantiating a\n ## connection with the server and doesn't restrict the databases we are trying\n ## to grab metrics for.\n #\n address = \"host=localhost user=postgres sslmode=disable\"\n\n ## connection configuration.\n ## maxlifetime - specify the maximum lifetime of a connection.\n ## default is forever (0s)\n max_lifetime = \"0s\"\n\n ## A list of databases to pull metrics about. If not specified, metrics for all\n ## databases are gathered.\n ## databases = [\"app_production\", \"testing\"]\n #\n ## A custom name for the database that will be used as the \"server\" tag in the\n ## measurement output. If not specified, a default one generated from\n ## the connection address is used.\n # outputaddress = \"db01\"\n #\n ## Define the toml config where the sql queries are stored\n ## New queries can be added, if the withdbname is set to true and there is no\n ## databases defined in the 'databases field', the sql query is ended by a\n ## 'is not null' in order to make the query succeed.\n ## Example :\n ## The sqlquery : \"SELECT * FROM pg_stat_database where datname\" become\n ## \"SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')\"\n ## because the databases variable was set to ['postgres', 'pgbench' ] and the\n ## withdbname was true. Be careful that if the withdbname is set to false you\n ## don't have to define the where clause (aka with the dbname) the tagvalue\n ## field is used to define custom tags (separated by commas)\n ## The optional \"measurement\" value can be used to override the default\n ## output measurement name (\"postgresql\").\n ##\n ## The script option can be used to specify the .sql file path.\n ## If script and sqlquery options specified at same time, sqlquery will be used \n ##\n ## Structure :\n ## [[inputs.postgresql_extensible.query]]\n ## sqlquery string\n ## version string\n ## withdbname boolean\n ## tagvalue string (comma separated)\n ## measurement string\n [[inputs.postgresql_extensible.query]]\n sqlquery=\"SELECT * FROM pg_stat_database\"\n version=901\n withdbname=false\n tagvalue=\"\"\n measurement=\"\"\n [[inputs.postgresql_extensible.query]]\n sqlquery=\"SELECT * FROM pg_stat_bgwriter\"\n version=901\n withdbname=false\n tagvalue=\"postgresql.stats\"\n\n" - }, - { - "type": "input", - "name": "varnish", - "description": "A plugin to collect stats from Varnish HTTP Cache", - "config": "# A plugin to collect stats from Varnish HTTP Cache\n[[inputs.varnish]]\n # alias=\"varnish\"\n ## If running as a restricted user you can prepend sudo for additional access:\n #use_sudo = false\n\n ## The default location of the varnishstat binary can be overridden with:\n binary = \"/usr/bin/varnishstat\"\n\n ## By default, telegraf gather stats for 3 metric points.\n ## Setting stats will override the defaults shown below.\n ## Glob matching can be used, ie, stats = [\"MAIN.*\"]\n ## stats may also be set to [\"*\"], which will collect all stats\n stats = [\"MAIN.cache_hit\", \"MAIN.cache_miss\", \"MAIN.uptime\"]\n\n ## Optional name for the varnish instance (or working directory) to query\n ## Usually appened after -n in varnish cli\n # instance_name = instanceName\n\n ## Timeout for varnishstat command\n # timeout = \"1s\"\n\n" - }, - { - "type": "input", - "name": "wireless", - "description": "Monitor wifi signal strength and quality", - "config": "# Monitor wifi signal strength and quality\n[[inputs.wireless]]\n # alias=\"wireless\"\n ## Sets 'proc' directory path\n ## If not specified, then default is /proc\n # host_proc = \"/proc\"\n\n" - }, - { - "type": "input", - "name": "rabbitmq", - "description": "Reads metrics from RabbitMQ servers via the Management Plugin", - "config": "# Reads metrics from RabbitMQ servers via the Management Plugin\n[[inputs.rabbitmq]]\n # alias=\"rabbitmq\"\n ## Management Plugin url. (default: http://localhost:15672)\n # url = \"http://localhost:15672\"\n ## Tag added to rabbitmq_overview series; deprecated: use tags\n # name = \"rmq-server-1\"\n ## Credentials\n # username = \"guest\"\n # password = \"guest\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## Optional request timeouts\n ##\n ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait\n ## for a server's response headers after fully writing the request.\n # header_timeout = \"3s\"\n ##\n ## client_timeout specifies a time limit for requests made by this client.\n ## Includes connection time, any redirects, and reading the response body.\n # client_timeout = \"4s\"\n\n ## A list of nodes to gather as the rabbitmq_node measurement. If not\n ## specified, metrics for all nodes are gathered.\n # nodes = [\"rabbit@node1\", \"rabbit@node2\"]\n\n ## A list of queues to gather as the rabbitmq_queue measurement. If not\n ## specified, metrics for all queues are gathered.\n # queues = [\"telegraf\"]\n\n ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not\n ## specified, metrics for all exchanges are gathered.\n # exchanges = [\"telegraf\"]\n\n ## Queues to include and exclude. Globs accepted.\n ## Note that an empty array for both will include all queues\n queue_name_include = []\n queue_name_exclude = []\n\n ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement.\n ## If neither are specified, metrics for all federation upstreams are gathered.\n ## Federation link metrics will only be gathered for queues and exchanges\n ## whose non-federation metrics will be collected (e.g a queue excluded\n ## by the 'queue_name_exclude' option will also be excluded from federation).\n ## Globs accepted.\n # federation_upstream_include = [\"dataCentre-*\"]\n # federation_upstream_exclude = []\n\n" - }, - { - "type": "input", - "name": "x509_cert", - "description": "Reads metrics from a SSL certificate", - "config": "# Reads metrics from a SSL certificate\n[[inputs.x509_cert]]\n # alias=\"x509_cert\"\n ## List certificate sources\n sources = [\"/etc/ssl/certs/ssl-cert-snakeoil.pem\", \"tcp://example.org:443\"]\n\n ## Timeout for SSL connection\n # timeout = \"5s\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n\n" - }, - { - "type": "input", - "name": "cassandra", - "description": "Read Cassandra metrics through Jolokia", - "config": "# Read Cassandra metrics through Jolokia\n[[inputs.cassandra]]\n # alias=\"cassandra\"\n ## DEPRECATED: The cassandra plugin has been deprecated. Please use the\n ## jolokia2 plugin instead.\n ##\n ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2\n\n context = \"/jolokia/read\"\n ## List of cassandra servers exposing jolokia read service\n servers = [\"myuser:mypassword@10.10.10.1:8778\",\"10.10.10.2:8778\",\":8778\"]\n ## List of metrics collected on above servers\n ## Each metric consists of a jmx path.\n ## This will collect all heap memory usage metrics from the jvm and\n ## ReadLatency metrics for all keyspaces and tables.\n ## \"type=Table\" in the query works with Cassandra3.0. Older versions might\n ## need to use \"type=ColumnFamily\"\n metrics = [\n \"/java.lang:type=Memory/HeapMemoryUsage\",\n \"/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency\"\n ]\n\n" - }, - { - "type": "input", - "name": "cloud_pubsub", - "description": "Read metrics from Google PubSub", - "config": "# Read metrics from Google PubSub\n[[inputs.cloud_pubsub]]\n # alias=\"cloud_pubsub\"\n ## Required. Name of Google Cloud Platform (GCP) Project that owns\n ## the given PubSub subscription.\n project = \"my-project\"\n\n ## Required. Name of PubSub subscription to ingest metrics from.\n subscription = \"my-subscription\"\n\n ## Required. Data format to consume.\n ## Each data format has its own unique set of configuration options.\n ## Read more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n\n ## Optional. Filepath for GCP credentials JSON file to authorize calls to\n ## PubSub APIs. If not set explicitly, Telegraf will attempt to use\n ## Application Default Credentials, which is preferred.\n # credentials_file = \"path/to/my/creds.json\"\n\n ## Optional. Number of seconds to wait before attempting to restart the \n ## PubSub subscription receiver after an unexpected error. \n ## If the streaming pull for a PubSub Subscription fails (receiver),\n ## the agent attempts to restart receiving messages after this many seconds.\n # retry_delay_seconds = 5\n\n ## Optional. Maximum byte length of a message to consume.\n ## Larger messages are dropped with an error. If less than 0 or unspecified,\n ## treated as no limit.\n # max_message_len = 1000000\n\n ## Optional. Maximum messages to read from PubSub that have not been written\n ## to an output. Defaults to 1000.\n ## For best throughput set based on the number of metrics within\n ## each message and the size of the output's metric_batch_size.\n ##\n ## For example, if each message contains 10 metrics and the output\n ## metric_batch_size is 1000, setting this to 100 will ensure that a\n ## full batch is collected and the write is triggered immediately without\n ## waiting until the next flush_interval.\n # max_undelivered_messages = 1000\n\n ## The following are optional Subscription ReceiveSettings in PubSub.\n ## Read more about these values:\n ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings\n\n ## Optional. Maximum number of seconds for which a PubSub subscription\n ## should auto-extend the PubSub ACK deadline for each message. If less than\n ## 0, auto-extension is disabled.\n # max_extension = 0\n\n ## Optional. Maximum number of unprocessed messages in PubSub\n ## (unacknowledged but not yet expired in PubSub).\n ## A value of 0 is treated as the default PubSub value.\n ## Negative values will be treated as unlimited.\n # max_outstanding_messages = 0\n\n ## Optional. Maximum size in bytes of unprocessed messages in PubSub\n ## (unacknowledged but not yet expired in PubSub).\n ## A value of 0 is treated as the default PubSub value.\n ## Negative values will be treated as unlimited.\n # max_outstanding_bytes = 0\n\n ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn\n ## to pull messages from PubSub concurrently. This limit applies to each\n ## subscription separately and is treated as the PubSub default if less than\n ## 1. Note this setting does not limit the number of messages that can be\n ## processed concurrently (use \"max_outstanding_messages\" instead).\n # max_receiver_go_routines = 0\n\n ## Optional. If true, Telegraf will attempt to base64 decode the \n ## PubSub message data before parsing\n # base64_data = false\n\n" - }, - { - "type": "input", - "name": "ipmi_sensor", - "description": "Read metrics from the bare metal servers via IPMI", - "config": "# Read metrics from the bare metal servers via IPMI\n[[inputs.ipmi_sensor]]\n # alias=\"ipmi_sensor\"\n ## optionally specify the path to the ipmitool executable\n # path = \"/usr/bin/ipmitool\"\n ##\n ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR\n # privilege = \"ADMINISTRATOR\"\n ##\n ## optionally specify one or more servers via a url matching\n ## [username[:password]@][protocol[(address)]]\n ## e.g.\n ## root:passwd@lan(127.0.0.1)\n ##\n ## if no servers are specified, local machine sensor stats will be queried\n ##\n # servers = [\"USERID:PASSW0RD@lan(192.168.1.1)\"]\n\n ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid\n ## gaps or overlap in pulled data\n interval = \"30s\"\n\n ## Timeout for the ipmitool command to complete\n timeout = \"20s\"\n\n ## Schema Version: (Optional, defaults to version 1)\n metric_version = 2\n\n" - }, - { - "type": "input", - "name": "jolokia", - "description": "Read JMX metrics through Jolokia", - "config": "# Read JMX metrics through Jolokia\n[[inputs.jolokia]]\n # alias=\"jolokia\"\n # DEPRECATED: the jolokia plugin has been deprecated in favor of the\n # jolokia2 plugin\n # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2\n\n ## This is the context root used to compose the jolokia url\n ## NOTE that Jolokia requires a trailing slash at the end of the context root\n ## NOTE that your jolokia security policy must allow for POST requests.\n context = \"/jolokia/\"\n\n ## This specifies the mode used\n # mode = \"proxy\"\n #\n ## When in proxy mode this section is used to specify further\n ## proxy address configurations.\n ## Remember to change host address to fit your environment.\n # [inputs.jolokia.proxy]\n # host = \"127.0.0.1\"\n # port = \"8080\"\n\n ## Optional http timeouts\n ##\n ## response_header_timeout, if non-zero, specifies the amount of time to wait\n ## for a server's response headers after fully writing the request.\n # response_header_timeout = \"3s\"\n ##\n ## client_timeout specifies a time limit for requests made by this client.\n ## Includes connection time, any redirects, and reading the response body.\n # client_timeout = \"4s\"\n\n ## Attribute delimiter\n ##\n ## When multiple attributes are returned for a single\n ## [inputs.jolokia.metrics], the field name is a concatenation of the metric\n ## name, and the attribute name, separated by the given delimiter.\n # delimiter = \"_\"\n\n ## List of servers exposing jolokia read service\n [[inputs.jolokia.servers]]\n name = \"as-server-01\"\n host = \"127.0.0.1\"\n port = \"8080\"\n # username = \"myuser\"\n # password = \"mypassword\"\n\n ## List of metrics collected on above servers\n ## Each metric consists in a name, a jmx path and either\n ## a pass or drop slice attribute.\n ## This collect all heap memory usage metrics.\n [[inputs.jolokia.metrics]]\n name = \"heap_memory_usage\"\n mbean = \"java.lang:type=Memory\"\n attribute = \"HeapMemoryUsage\"\n\n ## This collect thread counts metrics.\n [[inputs.jolokia.metrics]]\n name = \"thread_count\"\n mbean = \"java.lang:type=Threading\"\n attribute = \"TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount\"\n\n ## This collect number of class loaded/unloaded counts metrics.\n [[inputs.jolokia.metrics]]\n name = \"class_count\"\n mbean = \"java.lang:type=ClassLoading\"\n attribute = \"LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount\"\n\n" - }, - { - "type": "input", - "name": "mem", - "description": "Read metrics about memory usage", - "config": "# Read metrics about memory usage\n[[inputs.mem]]\n # alias=\"mem\"\n" - }, - { - "type": "input", - "name": "filecount", - "description": "Count files in a directory", - "config": "# Count files in a directory\n[[inputs.filecount]]\n # alias=\"filecount\"\n ## Directory to gather stats about.\n ## deprecated in 1.9; use the directories option\n # directory = \"/var/cache/apt/archives\"\n\n ## Directories to gather stats about.\n ## This accept standard unit glob matching rules, but with the addition of\n ## ** as a \"super asterisk\". ie:\n ## /var/log/** -\u003e recursively find all directories in /var/log and count files in each directories\n ## /var/log/*/* -\u003e find all directories with a parent dir in /var/log and count files in each directories\n ## /var/log -\u003e count all files in /var/log and all of its subdirectories\n directories = [\"/var/cache/apt/archives\"]\n\n ## Only count files that match the name pattern. Defaults to \"*\".\n name = \"*.deb\"\n\n ## Count files in subdirectories. Defaults to true.\n recursive = false\n\n ## Only count regular files. Defaults to true.\n regular_only = true\n\n ## Follow all symlinks while walking the directory tree. Defaults to false.\n follow_symlinks = false\n\n ## Only count files that are at least this size. If size is\n ## a negative number, only count files that are smaller than the\n ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ...\n ## Without quotes and units, interpreted as size in bytes.\n size = \"0B\"\n\n ## Only count files that have not been touched for at least this\n ## duration. If mtime is negative, only count files that have been\n ## touched in this duration. Defaults to \"0s\".\n mtime = \"0s\"\n\n" - }, - { - "type": "input", - "name": "kafka_consumer_legacy", - "description": "Read metrics from Kafka topic(s)", - "config": "# Read metrics from Kafka topic(s)\n[[inputs.kafka_consumer_legacy]]\n # alias=\"kafka_consumer_legacy\"\n ## topic(s) to consume\n topics = [\"telegraf\"]\n\n ## an array of Zookeeper connection strings\n zookeeper_peers = [\"localhost:2181\"]\n\n ## Zookeeper Chroot\n zookeeper_chroot = \"\"\n\n ## the name of the consumer group\n consumer_group = \"telegraf_metrics_consumers\"\n\n ## Offset (must be either \"oldest\" or \"newest\")\n offset = \"oldest\"\n\n ## Data format to consume.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n\n ## Maximum length of a message to consume, in bytes (default 0/unlimited);\n ## larger messages are dropped\n max_message_len = 65536\n\n" - }, - { - "type": "input", - "name": "net", - "description": "Read metrics about network interface usage", - "config": "# Read metrics about network interface usage\n[[inputs.net]]\n # alias=\"net\"\n ## By default, telegraf gathers stats from any up interface (excluding loopback)\n ## Setting interfaces will tell it to gather these explicit interfaces,\n ## regardless of status.\n ##\n # interfaces = [\"eth0\"]\n ##\n ## On linux systems telegraf also collects protocol stats.\n ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics.\n ##\n # ignore_protocol_stats = false\n ##\n\n" - }, - { - "type": "input", - "name": "nsq", - "description": "Read NSQ topic and channel statistics.", - "config": "# Read NSQ topic and channel statistics.\n[[inputs.nsq]]\n # alias=\"nsq\"\n ## An array of NSQD HTTP API endpoints\n endpoints = [\"http://localhost:4151\"]\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "conntrack", - "description": "Collects conntrack stats from the configured directories and files.", - "config": "# Collects conntrack stats from the configured directories and files.\n[[inputs.conntrack]]\n # alias=\"conntrack\"\n ## The following defaults would work with multiple versions of conntrack.\n ## Note the nf_ and ip_ filename prefixes are mutually exclusive across\n ## kernel versions, as are the directory locations.\n\n ## Superset of filenames to look for within the conntrack dirs.\n ## Missing files will be ignored.\n files = [\"ip_conntrack_count\",\"ip_conntrack_max\",\n \"nf_conntrack_count\",\"nf_conntrack_max\"]\n\n ## Directories to search within for the conntrack files above.\n ## Missing directrories will be ignored.\n dirs = [\"/proc/sys/net/ipv4/netfilter\",\"/proc/sys/net/netfilter\"]\n\n" - }, - { - "type": "input", - "name": "iptables", - "description": "Gather packets and bytes throughput from iptables", - "config": "# Gather packets and bytes throughput from iptables\n[[inputs.iptables]]\n # alias=\"iptables\"\n ## iptables require root access on most systems.\n ## Setting 'use_sudo' to true will make use of sudo to run iptables.\n ## Users must configure sudo to allow telegraf user to run iptables with no password.\n ## iptables can be restricted to only list command \"iptables -nvL\".\n use_sudo = false\n ## Setting 'use_lock' to true runs iptables with the \"-w\" option.\n ## Adjust your sudo settings appropriately if using this option (\"iptables -w 5 -nvl\")\n use_lock = false\n ## Define an alternate executable, such as \"ip6tables\". Default is \"iptables\".\n # binary = \"ip6tables\"\n ## defines the table to monitor:\n table = \"filter\"\n ## defines the chains to monitor.\n ## NOTE: iptables rules without a comment will not be monitored.\n ## Read the plugin documentation for more information.\n chains = [ \"INPUT\" ]\n\n" - }, - { - "type": "input", - "name": "memcached", - "description": "Read metrics from one or many memcached servers", - "config": "# Read metrics from one or many memcached servers\n[[inputs.memcached]]\n # alias=\"memcached\"\n ## An array of address to gather stats about. Specify an ip on hostname\n ## with optional port. ie localhost, 10.0.0.1:11211, etc.\n servers = [\"localhost:11211\"]\n # unix_sockets = [\"/var/run/memcached.sock\"]\n\n" - }, - { - "type": "input", - "name": "snmp_trap", - "description": "Receive SNMP traps", - "config": "# Receive SNMP traps\n[[inputs.snmp_trap]]\n # alias=\"snmp_trap\"\n ## Transport, local address, and port to listen on. Transport must\n ## be \"udp://\". Omit local address to listen on all interfaces.\n ## example: \"udp://127.0.0.1:1234\"\n # service_address = udp://:162\n ## Timeout running snmptranslate command\n # timeout = \"5s\"\n\n" - }, - { - "type": "input", - "name": "dns_query", - "description": "Query given DNS server and gives statistics", - "config": "# Query given DNS server and gives statistics\n[[inputs.dns_query]]\n # alias=\"dns_query\"\n ## servers to query\n servers = [\"8.8.8.8\"]\n\n ## Network is the network protocol name.\n # network = \"udp\"\n\n ## Domains or subdomains to query.\n # domains = [\".\"]\n\n ## Query record type.\n ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV.\n # record_type = \"A\"\n\n ## Dns server port.\n # port = 53\n\n ## Query timeout in seconds.\n # timeout = 2\n\n" - }, - { - "type": "input", - "name": "linux_sysctl_fs", - "description": "Provides Linux sysctl fs metrics", - "config": "# Provides Linux sysctl fs metrics\n[[inputs.linux_sysctl_fs]]\n # alias=\"linux_sysctl_fs\"\n" - }, - { - "type": "input", - "name": "netstat", - "description": "Read TCP metrics such as established, time wait and sockets counts.", - "config": "# Read TCP metrics such as established, time wait and sockets counts.\n[[inputs.netstat]]\n # alias=\"netstat\"\n" - }, - { - "type": "input", - "name": "postfix", - "description": "Measure postfix queue statistics", - "config": "# Measure postfix queue statistics\n[[inputs.postfix]]\n # alias=\"postfix\"\n ## Postfix queue directory. If not provided, telegraf will try to use\n ## 'postconf -h queue_directory' to determine it.\n # queue_directory = \"/var/spool/postfix\"\n\n" - }, - { - "type": "input", - "name": "rethinkdb", - "description": "Read metrics from one or many RethinkDB servers", - "config": "# Read metrics from one or many RethinkDB servers\n[[inputs.rethinkdb]]\n # alias=\"rethinkdb\"\n ## An array of URI to gather stats about. Specify an ip or hostname\n ## with optional port add password. ie,\n ## rethinkdb://user:auth_key@10.10.3.30:28105,\n ## rethinkdb://10.10.3.33:18832,\n ## 10.0.0.1:10000, etc.\n servers = [\"127.0.0.1:28015\"]\n ##\n ## If you use actual rethinkdb of \u003e 2.3.0 with username/password authorization,\n ## protocol have to be named \"rethinkdb2\" - it will use 1_0 H.\n # servers = [\"rethinkdb2://username:password@127.0.0.1:28015\"]\n ##\n ## If you use older versions of rethinkdb (\u003c2.2) with auth_key, protocol\n ## have to be named \"rethinkdb\".\n # servers = [\"rethinkdb://username:auth_key@127.0.0.1:28015\"]\n\n" - }, - { - "type": "input", - "name": "bond", - "description": "Collect bond interface status, slaves statuses and failures count", - "config": "# Collect bond interface status, slaves statuses and failures count\n[[inputs.bond]]\n # alias=\"bond\"\n ## Sets 'proc' directory path\n ## If not specified, then default is /proc\n # host_proc = \"/proc\"\n\n ## By default, telegraf gather stats for all bond interfaces\n ## Setting interfaces will restrict the stats to the specified\n ## bond interfaces.\n # bond_interfaces = [\"bond0\"]\n\n" - }, - { - "type": "input", - "name": "couchdb", - "description": "Read CouchDB Stats from one or more servers", - "config": "# Read CouchDB Stats from one or more servers\n[[inputs.couchdb]]\n # alias=\"couchdb\"\n ## Works with CouchDB stats endpoints out of the box\n ## Multiple Hosts from which to read CouchDB stats:\n hosts = [\"http://localhost:8086/_stats\"]\n\n ## Use HTTP Basic Authentication.\n # basic_username = \"telegraf\"\n # basic_password = \"p@ssw0rd\"\n\n" - }, - { - "type": "input", - "name": "kibana", - "description": "Read status information from one or more Kibana servers", - "config": "# Read status information from one or more Kibana servers\n[[inputs.kibana]]\n # alias=\"kibana\"\n ## specify a list of one or more Kibana servers\n servers = [\"http://localhost:5601\"]\n\n ## Timeout for HTTP requests\n timeout = \"5s\"\n\n ## HTTP Basic Auth credentials\n # username = \"username\"\n # password = \"pa$$word\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "sensors", - "description": "Monitor sensors, requires lm-sensors package", - "config": "# Monitor sensors, requires lm-sensors package\n[[inputs.sensors]]\n # alias=\"sensors\"\n ## Remove numbers from field names.\n ## If true, a field name like 'temp1_input' will be changed to 'temp_input'.\n # remove_numbers = true\n\n ## Timeout is the maximum amount of time that the sensors command can run.\n # timeout = \"5s\"\n\n" - }, - { - "type": "input", - "name": "synproxy", - "description": "Get synproxy counter statistics from procfs", - "config": "# Get synproxy counter statistics from procfs\n[[inputs.synproxy]]\n # alias=\"synproxy\"\n" - }, - { - "type": "input", - "name": "prometheus", - "description": "Read metrics from one or many prometheus clients", - "config": "# Read metrics from one or many prometheus clients\n[[inputs.prometheus]]\n # alias=\"prometheus\"\n ## An array of urls to scrape metrics from.\n urls = [\"http://localhost:9100/metrics\"]\n\n ## Metric version controls the mapping from Prometheus metrics into\n ## Telegraf metrics. When using the prometheus_client output, use the same\n ## value in both plugins to ensure metrics are round-tripped without\n ## modification.\n ##\n ## example: metric_version = 1; deprecated in 1.13\n ## metric_version = 2; recommended version\n # metric_version = 1\n\n ## Url tag name (tag containing scrapped url. optional, default is \"url\")\n # url_tag = \"scrapeUrl\"\n\n ## An array of Kubernetes services to scrape metrics from.\n # kubernetes_services = [\"http://my-service-dns.my-namespace:9100/metrics\"]\n\n ## Kubernetes config file to create client from.\n # kube_config = \"/path/to/kubernetes.config\"\n\n ## Scrape Kubernetes pods for the following prometheus annotations:\n ## - prometheus.io/scrape: Enable scraping for this pod\n ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to\n ## set this to 'https' \u0026 most likely set the tls config.\n ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation.\n ## - prometheus.io/port: If port is not 9102 use this annotation\n # monitor_kubernetes_pods = true\n ## Restricts Kubernetes monitoring to a single namespace\n ## ex: monitor_kubernetes_pods_namespace = \"default\"\n # monitor_kubernetes_pods_namespace = \"\"\n\n ## Use bearer token for authorization. ('bearer_token' takes priority)\n # bearer_token = \"/path/to/bearer/token\"\n ## OR\n # bearer_token_string = \"abc_123\"\n\n ## HTTP Basic Authentication username and password. ('bearer_token' and\n ## 'bearer_token_string' take priority)\n # username = \"\"\n # password = \"\"\n\n ## Specify timeout duration for slower prometheus clients (default is 3s)\n # response_timeout = \"3s\"\n\n ## Optional TLS Config\n # tls_ca = /path/to/cafile\n # tls_cert = /path/to/certfile\n # tls_key = /path/to/keyfile\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "cisco_telemetry_mdt", - "description": "Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms", - "config": "# Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms\n[[inputs.cisco_telemetry_mdt]]\n # alias=\"cisco_telemetry_mdt\"\n ## Telemetry transport can be \"tcp\" or \"grpc\". TLS is only supported when\n ## using the grpc transport.\n transport = \"grpc\"\n\n ## Address and port to host telemetry listener\n service_address = \":57000\"\n\n ## Enable TLS; grpc transport only.\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n\n ## Enable TLS client authentication and define allowed CA certificates; grpc\n ## transport only.\n # tls_allowed_cacerts = [\"/etc/telegraf/clientca.pem\"]\n\n ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags\n # embedded_tags = [\"Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name\"]\n\n ## Define aliases to map telemetry encoding paths to simple measurement names\n [inputs.cisco_telemetry_mdt.aliases]\n ifstats = \"ietf-interfaces:interfaces-state/interface/statistics\"\n\n" - }, - { - "type": "input", - "name": "fail2ban", - "description": "Read metrics from fail2ban.", - "config": "# Read metrics from fail2ban.\n[[inputs.fail2ban]]\n # alias=\"fail2ban\"\n ## Use sudo to run fail2ban-client\n use_sudo = false\n\n" - }, - { - "type": "input", - "name": "nsq_consumer", - "description": "Read NSQ topic for metrics.", - "config": "# Read NSQ topic for metrics.\n[[inputs.nsq_consumer]]\n # alias=\"nsq_consumer\"\n ## Server option still works but is deprecated, we just prepend it to the nsqd array.\n # server = \"localhost:4150\"\n\n ## An array representing the NSQD TCP HTTP Endpoints\n nsqd = [\"localhost:4150\"]\n\n ## An array representing the NSQLookupd HTTP Endpoints\n nsqlookupd = [\"localhost:4161\"]\n topic = \"telegraf\"\n channel = \"consumer\"\n max_in_flight = 100\n\n ## Maximum messages to read from the broker that have not been written by an\n ## output. For best throughput set based on the number of metrics within\n ## each message and the size of the output's metric_batch_size.\n ##\n ## For example, if each message from the queue contains 10 metrics and the\n ## output metric_batch_size is 1000, setting this to 100 will ensure that a\n ## full batch is collected and the write is triggered immediately without\n ## waiting until the next flush_interval.\n # max_undelivered_messages = 1000\n\n ## Data format to consume.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n\n" - }, - { - "type": "input", - "name": "opensmtpd", - "description": "A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver ", - "config": "# A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver \n[[inputs.opensmtpd]]\n # alias=\"opensmtpd\"\n ## If running as a restricted user you can prepend sudo for additional access:\n #use_sudo = false\n\n ## The default location of the smtpctl binary can be overridden with:\n binary = \"/usr/sbin/smtpctl\"\n\n ## The default timeout of 1000ms can be overriden with (in milliseconds):\n timeout = 1000\n\n" - }, - { - "type": "input", - "name": "postgresql", - "description": "Read metrics from one or many postgresql servers", - "config": "# Read metrics from one or many postgresql servers\n[[inputs.postgresql]]\n # alias=\"postgresql\"\n ## specify address via a url matching:\n ## postgres://[pqgotest[:password]]@localhost[/dbname]\\\n ## ?sslmode=[disable|verify-ca|verify-full]\n ## or a simple string:\n ## host=localhost user=pqotest password=... sslmode=... dbname=app_production\n ##\n ## All connection parameters are optional.\n ##\n ## Without the dbname parameter, the driver will default to a database\n ## with the same name as the user. This dbname is just for instantiating a\n ## connection with the server and doesn't restrict the databases we are trying\n ## to grab metrics for.\n ##\n address = \"host=localhost user=postgres sslmode=disable\"\n ## A custom name for the database that will be used as the \"server\" tag in the\n ## measurement output. If not specified, a default one generated from\n ## the connection address is used.\n # outputaddress = \"db01\"\n\n ## connection configuration.\n ## maxlifetime - specify the maximum lifetime of a connection.\n ## default is forever (0s)\n max_lifetime = \"0s\"\n\n ## A list of databases to explicitly ignore. If not specified, metrics for all\n ## databases are gathered. Do NOT use with the 'databases' option.\n # ignored_databases = [\"postgres\", \"template0\", \"template1\"]\n\n ## A list of databases to pull metrics about. If not specified, metrics for all\n ## databases are gathered. Do NOT use with the 'ignored_databases' option.\n # databases = [\"app_production\", \"testing\"]\n\n" - }, - { - "type": "input", - "name": "apcupsd", - "description": "Monitor APC UPSes connected to apcupsd", - "config": "# Monitor APC UPSes connected to apcupsd\n[[inputs.apcupsd]]\n # alias=\"apcupsd\"\n # A list of running apcupsd server to connect to.\n # If not provided will default to tcp://127.0.0.1:3551\n servers = [\"tcp://127.0.0.1:3551\"]\n\n ## Timeout for dialing server.\n timeout = \"5s\"\n\n" - }, - { - "type": "input", - "name": "phpfpm", - "description": "Read metrics of phpfpm, via HTTP status page or socket", - "config": "# Read metrics of phpfpm, via HTTP status page or socket\n[[inputs.phpfpm]]\n # alias=\"phpfpm\"\n ## An array of addresses to gather stats about. Specify an ip or hostname\n ## with optional port and path\n ##\n ## Plugin can be configured in three modes (either can be used):\n ## - http: the URL must start with http:// or https://, ie:\n ## \"http://localhost/status\"\n ## \"http://192.168.130.1/status?full\"\n ##\n ## - unixsocket: path to fpm socket, ie:\n ## \"/var/run/php5-fpm.sock\"\n ## or using a custom fpm status path:\n ## \"/var/run/php5-fpm.sock:fpm-custom-status-path\"\n ##\n ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie:\n ## \"fcgi://10.0.0.12:9000/status\"\n ## \"cgi://10.0.10.12:9001/status\"\n ##\n ## Example of multiple gathering from local socket and remote host\n ## urls = [\"http://192.168.1.20/status\", \"/tmp/fpm.sock\"]\n urls = [\"http://localhost/status\"]\n\n ## Duration allowed to complete HTTP requests.\n # timeout = \"5s\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "smart", - "description": "Read metrics from storage devices supporting S.M.A.R.T.", - "config": "# Read metrics from storage devices supporting S.M.A.R.T.\n[[inputs.smart]]\n # alias=\"smart\"\n ## Optionally specify the path to the smartctl executable\n # path = \"/usr/bin/smartctl\"\n\n ## On most platforms smartctl requires root access.\n ## Setting 'use_sudo' to true will make use of sudo to run smartctl.\n ## Sudo must be configured to to allow the telegraf user to run smartctl\n ## without a password.\n # use_sudo = false\n\n ## Skip checking disks in this power mode. Defaults to\n ## \"standby\" to not wake up disks that have stoped rotating.\n ## See --nocheck in the man pages for smartctl.\n ## smartctl version 5.41 and 5.42 have faulty detection of\n ## power mode and might require changing this value to\n ## \"never\" depending on your disks.\n # nocheck = \"standby\"\n\n ## Gather all returned S.M.A.R.T. attribute metrics and the detailed\n ## information from each drive into the 'smart_attribute' measurement.\n # attributes = false\n\n ## Optionally specify devices to exclude from reporting.\n # excludes = [ \"/dev/pass6\" ]\n\n ## Optionally specify devices and device type, if unset\n ## a scan (smartctl --scan) for S.M.A.R.T. devices will\n ## done and all found will be included except for the\n ## excluded in excludes.\n # devices = [ \"/dev/ada0 -d atacam\" ]\n\n ## Timeout for the smartctl command to complete.\n # timeout = \"30s\"\n\n" - }, - { - "type": "input", - "name": "swap", - "description": "Read metrics about swap memory usage", - "config": "# Read metrics about swap memory usage\n[[inputs.swap]]\n # alias=\"swap\"\n" - }, - { - "type": "input", - "name": "zookeeper", - "description": "Reads 'mntr' stats from one or many zookeeper servers", - "config": "# Reads 'mntr' stats from one or many zookeeper servers\n[[inputs.zookeeper]]\n # alias=\"zookeeper\"\n ## An array of address to gather stats about. Specify an ip or hostname\n ## with port. ie localhost:2181, 10.0.0.1:2181, etc.\n\n ## If no servers are specified, then localhost is used as the host.\n ## If no port is specified, 2181 is used\n servers = [\":2181\"]\n\n ## Timeout for metric collections from all servers. Minimum timeout is \"1s\".\n # timeout = \"5s\"\n\n ## Optional TLS Config\n # enable_tls = true\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## If false, skip chain \u0026 host verification\n # insecure_skip_verify = true\n\n" - }, - { - "type": "input", - "name": "disque", - "description": "Read metrics from one or many disque servers", - "config": "# Read metrics from one or many disque servers\n[[inputs.disque]]\n # alias=\"disque\"\n ## An array of URI to gather stats about. Specify an ip or hostname\n ## with optional port and password.\n ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc.\n ## If no servers are specified, then localhost is used as the host.\n servers = [\"localhost\"]\n\n" - }, - { - "type": "input", - "name": "hddtemp", - "description": "Monitor disks' temperatures using hddtemp", - "config": "# Monitor disks' temperatures using hddtemp\n[[inputs.hddtemp]]\n # alias=\"hddtemp\"\n ## By default, telegraf gathers temps data from all disks detected by the\n ## hddtemp.\n ##\n ## Only collect temps from the selected disks.\n ##\n ## A * as the device name will return the temperature values of all disks.\n ##\n # address = \"127.0.0.1:7634\"\n # devices = [\"sda\", \"*\"]\n\n" - }, - { - "type": "input", - "name": "interrupts", - "description": "This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs.", - "config": "# This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs.\n[[inputs.interrupts]]\n # alias=\"interrupts\"\n ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is\n ## stored as a field.\n ##\n ## The default is false for backwards compatibility, and will be changed to\n ## true in a future version. It is recommended to set to true on new\n ## deployments.\n # cpu_as_tag = false\n\n ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e.\n # [inputs.interrupts.tagdrop]\n # irq = [ \"NET_RX\", \"TASKLET\" ]\n\n" - }, - { - "type": "input", - "name": "jenkins", - "description": "Read jobs and cluster metrics from Jenkins instances", - "config": "# Read jobs and cluster metrics from Jenkins instances\n[[inputs.jenkins]]\n # alias=\"jenkins\"\n ## The Jenkins URL in the format \"schema://host:port\"\n url = \"http://my-jenkins-instance:8080\"\n # username = \"admin\"\n # password = \"admin\"\n\n ## Set response_timeout\n response_timeout = \"5s\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use SSL but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## Optional Max Job Build Age filter\n ## Default 1 hour, ignore builds older than max_build_age\n # max_build_age = \"1h\"\n\n ## Optional Sub Job Depth filter\n ## Jenkins can have unlimited layer of sub jobs\n ## This config will limit the layers of pulling, default value 0 means\n ## unlimited pulling until no more sub jobs\n # max_subjob_depth = 0\n\n ## Optional Sub Job Per Layer\n ## In workflow-multibranch-plugin, each branch will be created as a sub job.\n ## This config will limit to call only the lasted branches in each layer, \n ## empty will use default value 10\n # max_subjob_per_layer = 10\n\n ## Jobs to exclude from gathering\n # job_exclude = [ \"job1\", \"job2/subjob1/subjob2\", \"job3/*\"]\n\n ## Nodes to exclude from gathering\n # node_exclude = [ \"node1\", \"node2\" ]\n\n ## Worker pool for jenkins plugin only\n ## Empty this field will use default value 5\n # max_connections = 5\n\n" - }, - { - "type": "input", - "name": "nvidia_smi", - "description": "Pulls statistics from nvidia GPUs attached to the host", - "config": "# Pulls statistics from nvidia GPUs attached to the host\n[[inputs.nvidia_smi]]\n # alias=\"nvidia_smi\"\n ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath\n # bin_path = \"/usr/bin/nvidia-smi\"\n\n ## Optional: timeout for GPU polling\n # timeout = \"5s\"\n\n" - }, - { - "type": "input", - "name": "ceph", - "description": "Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.", - "config": "# Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster.\n[[inputs.ceph]]\n # alias=\"ceph\"\n ## This is the recommended interval to poll. Too frequent and you will lose\n ## data points due to timeouts during rebalancing and recovery\n interval = '1m'\n\n ## All configuration values are optional, defaults are shown below\n\n ## location of ceph binary\n ceph_binary = \"/usr/bin/ceph\"\n\n ## directory in which to look for socket files\n socket_dir = \"/var/run/ceph\"\n\n ## prefix of MON and OSD socket files, used to determine socket type\n mon_prefix = \"ceph-mon\"\n osd_prefix = \"ceph-osd\"\n\n ## suffix used to identify socket files\n socket_suffix = \"asok\"\n\n ## Ceph user to authenticate as\n ceph_user = \"client.admin\"\n\n ## Ceph configuration to use to locate the cluster\n ceph_config = \"/etc/ceph/ceph.conf\"\n\n ## Whether to gather statistics via the admin socket\n gather_admin_socket_stats = true\n\n ## Whether to gather statistics via ceph commands\n gather_cluster_stats = false\n\n" - }, - { - "type": "input", - "name": "dmcache", - "description": "Provide a native collection for dmsetup based statistics for dm-cache", - "config": "# Provide a native collection for dmsetup based statistics for dm-cache\n[[inputs.dmcache]]\n # alias=\"dmcache\"\n ## Whether to report per-device stats or not\n per_device = true\n\n" - }, - { - "type": "input", - "name": "net_response", - "description": "Collect response time of a TCP or UDP connection", - "config": "# Collect response time of a TCP or UDP connection\n[[inputs.net_response]]\n # alias=\"net_response\"\n ## Protocol, must be \"tcp\" or \"udp\"\n ## NOTE: because the \"udp\" protocol does not respond to requests, it requires\n ## a send/expect string pair (see below).\n protocol = \"tcp\"\n ## Server address (default localhost)\n address = \"localhost:80\"\n\n ## Set timeout\n # timeout = \"1s\"\n\n ## Set read timeout (only used if expecting a response)\n # read_timeout = \"1s\"\n\n ## The following options are required for UDP checks. For TCP, they are\n ## optional. The plugin will send the given string to the server and then\n ## expect to receive the given 'expect' string back.\n ## string sent to the server\n # send = \"ssh\"\n ## expected string in answer\n # expect = \"ssh\"\n\n ## Uncomment to remove deprecated fields\n # fielddrop = [\"result_type\", \"string_found\"]\n\n" - }, - { - "type": "input", - "name": "puppetagent", - "description": "Reads last_run_summary.yaml file and converts to measurements", - "config": "# Reads last_run_summary.yaml file and converts to measurements\n[[inputs.puppetagent]]\n # alias=\"puppetagent\"\n ## Location of puppet last run summary file\n location = \"/var/lib/puppet/state/last_run_summary.yaml\"\n\n" - }, - { - "type": "input", - "name": "zfs", - "description": "Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools", - "config": "# Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools\n[[inputs.zfs]]\n # alias=\"zfs\"\n ## ZFS kstat path. Ignored on FreeBSD\n ## If not specified, then default is:\n # kstatPath = \"/proc/spl/kstat/zfs\"\n\n ## By default, telegraf gather all zfs stats\n ## If not specified, then default is:\n # kstatMetrics = [\"arcstats\", \"zfetchstats\", \"vdev_cache_stats\"]\n ## For Linux, the default is:\n # kstatMetrics = [\"abdstats\", \"arcstats\", \"dnodestats\", \"dbufcachestats\",\n # \"dmu_tx\", \"fm\", \"vdev_mirror_stats\", \"zfetchstats\", \"zil\"]\n ## By default, don't gather zpool stats\n # poolMetrics = false\n\n" - }, - { - "type": "input", - "name": "aerospike", - "description": "Read stats from aerospike server(s)", - "config": "# Read stats from aerospike server(s)\n[[inputs.aerospike]]\n # alias=\"aerospike\"\n ## Aerospike servers to connect to (with port)\n ## This plugin will query all namespaces the aerospike\n ## server has configured and get stats for them.\n servers = [\"localhost:3000\"]\n\n # username = \"telegraf\"\n # password = \"pa$$word\"\n\n ## Optional TLS Config\n # enable_tls = false\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## If false, skip chain \u0026 host verification\n # insecure_skip_verify = true\n \n" - }, - { - "type": "input", - "name": "exec", - "description": "Read metrics from one or more commands that can output to stdout", - "config": "# Read metrics from one or more commands that can output to stdout\n[[inputs.exec]]\n # alias=\"exec\"\n ## Commands array\n commands = [\n \"/tmp/test.sh\",\n \"/usr/bin/mycollector --foo=bar\",\n \"/tmp/collect_*.sh\"\n ]\n\n ## Timeout for each command to complete.\n timeout = \"5s\"\n\n ## measurement name suffix (for separating different commands)\n name_suffix = \"_mycollector\"\n\n ## Data format to consume.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n\n" - }, - { - "type": "input", - "name": "influxdb", - "description": "Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints", - "config": "# Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints\n[[inputs.influxdb]]\n # alias=\"influxdb\"\n ## Works with InfluxDB debug endpoints out of the box,\n ## but other services can use this format too.\n ## See the influxdb plugin's README for more details.\n\n ## Multiple URLs from which to read InfluxDB-formatted JSON\n ## Default is \"http://localhost:8086/debug/vars\".\n urls = [\n \"http://localhost:8086/debug/vars\"\n ]\n\n ## Username and password to send using HTTP Basic Authentication.\n # username = \"\"\n # password = \"\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## http request \u0026 header timeout\n timeout = \"5s\"\n\n" - }, - { - "type": "input", - "name": "nginx", - "description": "Read Nginx's basic status information (ngx_http_stub_status_module)", - "config": "# Read Nginx's basic status information (ngx_http_stub_status_module)\n[[inputs.nginx]]\n # alias=\"nginx\"\n # An array of Nginx stub_status URI to gather stats.\n urls = [\"http://localhost/server_status\"]\n\n ## Optional TLS Config\n tls_ca = \"/etc/telegraf/ca.pem\"\n tls_cert = \"/etc/telegraf/cert.cer\"\n tls_key = \"/etc/telegraf/key.key\"\n ## Use TLS but skip chain \u0026 host verification\n insecure_skip_verify = false\n\n # HTTP response timeout (default: 5s)\n response_timeout = \"5s\"\n\n" - }, - { - "type": "input", - "name": "ping", - "description": "Ping given url(s) and return statistics", - "config": "# Ping given url(s) and return statistics\n[[inputs.ping]]\n # alias=\"ping\"\n ## Hosts to send ping packets to.\n urls = [\"example.org\"]\n\n ## Method used for sending pings, can be either \"exec\" or \"native\". When set\n ## to \"exec\" the systems ping command will be executed. When set to \"native\"\n ## the plugin will send pings directly.\n ##\n ## While the default is \"exec\" for backwards compatibility, new deployments\n ## are encouraged to use the \"native\" method for improved compatibility and\n ## performance.\n # method = \"exec\"\n\n ## Number of ping packets to send per interval. Corresponds to the \"-c\"\n ## option of the ping command.\n # count = 1\n\n ## Time to wait between sending ping packets in seconds. Operates like the\n ## \"-i\" option of the ping command.\n # ping_interval = 1.0\n\n ## If set, the time to wait for a ping response in seconds. Operates like\n ## the \"-W\" option of the ping command.\n # timeout = 1.0\n\n ## If set, the total ping deadline, in seconds. Operates like the -w option\n ## of the ping command.\n # deadline = 10\n\n ## Interface or source address to send ping from. Operates like the -I or -S\n ## option of the ping command.\n # interface = \"\"\n\n ## Specify the ping executable binary.\n # binary = \"ping\"\n\n ## Arguments for ping command. When arguments is not empty, the command from\n ## the binary option will be used and other options (ping_interval, timeout,\n ## etc) will be ignored.\n # arguments = [\"-c\", \"3\"]\n\n ## Use only IPv6 addresses when resolving a hostname.\n # ipv6 = false\n\n" - }, - { - "type": "input", - "name": "stackdriver", - "description": "Gather timeseries from Google Cloud Platform v3 monitoring API", - "config": "# Gather timeseries from Google Cloud Platform v3 monitoring API\n[[inputs.stackdriver]]\n # alias=\"stackdriver\"\n ## GCP Project\n project = \"erudite-bloom-151019\"\n\n ## Include timeseries that start with the given metric type.\n metric_type_prefix_include = [\n \"compute.googleapis.com/\",\n ]\n\n ## Exclude timeseries that start with the given metric type.\n # metric_type_prefix_exclude = []\n\n ## Many metrics are updated once per minute; it is recommended to override\n ## the agent level interval with a value of 1m or greater.\n interval = \"1m\"\n\n ## Maximum number of API calls to make per second. The quota for accounts\n ## varies, it can be viewed on the API dashboard:\n ## https://cloud.google.com/monitoring/quotas#quotas_and_limits\n # rate_limit = 14\n\n ## The delay and window options control the number of points selected on\n ## each gather. When set, metrics are gathered between:\n ## start: now() - delay - window\n ## end: now() - delay\n #\n ## Collection delay; if set too low metrics may not yet be available.\n # delay = \"5m\"\n #\n ## If unset, the window will start at 1m and be updated dynamically to span\n ## the time between calls (approximately the length of the plugin interval).\n # window = \"1m\"\n\n ## TTL for cached list of metric types. This is the maximum amount of time\n ## it may take to discover new metrics.\n # cache_ttl = \"1h\"\n\n ## If true, raw bucket counts are collected for distribution value types.\n ## For a more lightweight collection, you may wish to disable and use\n ## distribution_aggregation_aligners instead.\n # gather_raw_distribution_buckets = true\n\n ## Aggregate functions to be used for metrics whose value type is\n ## distribution. These aggregate values are recorded in in addition to raw\n ## bucket counts; if they are enabled.\n ##\n ## For a list of aligner strings see:\n ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner\n # distribution_aggregation_aligners = [\n # \t\"ALIGN_PERCENTILE_99\",\n # \t\"ALIGN_PERCENTILE_95\",\n # \t\"ALIGN_PERCENTILE_50\",\n # ]\n\n ## Filters can be added to reduce the number of time series matched. All\n ## functions are supported: starts_with, ends_with, has_substring, and\n ## one_of. Only the '=' operator is supported.\n ##\n ## The logical operators when combining filters are defined statically using\n ## the following values:\n ## filter ::= \u003cresource_labels\u003e {AND \u003cmetric_labels\u003e}\n ## resource_labels ::= \u003cresource_labels\u003e {OR \u003cresource_label\u003e}\n ## metric_labels ::= \u003cmetric_labels\u003e {OR \u003cmetric_label\u003e}\n ##\n ## For more details, see https://cloud.google.com/monitoring/api/v3/filters\n #\n ## Resource labels refine the time series selection with the following expression:\n ## resource.labels.\u003ckey\u003e = \u003cvalue\u003e\n # [[inputs.stackdriver.filter.resource_labels]]\n # key = \"instance_name\"\n # value = 'starts_with(\"localhost\")'\n #\n ## Metric labels refine the time series selection with the following expression:\n ## metric.labels.\u003ckey\u003e = \u003cvalue\u003e\n # [[inputs.stackdriver.filter.metric_labels]]\n # \t key = \"device_name\"\n # \t value = 'one_of(\"sda\", \"sdb\")'\n\n" - }, - { - "type": "input", - "name": "syslog", - "description": "Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587", - "config": "# Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587\n[[inputs.syslog]]\n # alias=\"syslog\"\n ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514\n ## Protocol, address and port to host the syslog receiver.\n ## If no host is specified, then localhost is used.\n ## If no port is specified, 6514 is used (RFC5425#section-4.1).\n server = \"tcp://:6514\"\n\n ## TLS Config\n # tls_allowed_cacerts = [\"/etc/telegraf/ca.pem\"]\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n\n ## Period between keep alive probes.\n ## 0 disables keep alive probes.\n ## Defaults to the OS configuration.\n ## Only applies to stream sockets (e.g. TCP).\n # keep_alive_period = \"5m\"\n\n ## Maximum number of concurrent connections (default = 0).\n ## 0 means unlimited.\n ## Only applies to stream sockets (e.g. TCP).\n # max_connections = 1024\n\n ## Read timeout is the maximum time allowed for reading a single message (default = 5s).\n ## 0 means unlimited.\n # read_timeout = \"5s\"\n\n ## The framing technique with which it is expected that messages are transported (default = \"octet-counting\").\n ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),\n ## or the non-transparent framing technique (RFC6587#section-3.4.2).\n ## Must be one of \"octet-counting\", \"non-transparent\".\n # framing = \"octet-counting\"\n\n ## The trailer to be expected in case of non-trasparent framing (default = \"LF\").\n ## Must be one of \"LF\", or \"NUL\".\n # trailer = \"LF\"\n\n ## Whether to parse in best effort mode or not (default = false).\n ## By default best effort parsing is off.\n # best_effort = false\n\n ## Character to prepend to SD-PARAMs (default = \"_\").\n ## A syslog message can contain multiple parameters and multiple identifiers within structured data section.\n ## Eg., [id1 name1=\"val1\" name2=\"val2\"][id2 name1=\"val1\" nameA=\"valA\"]\n ## For each combination a field is created.\n ## Its name is created concatenating identifier, sdparam_separator, and parameter name.\n # sdparam_separator = \"_\"\n\n" - }, - { - "type": "input", - "name": "activemq", - "description": "Gather ActiveMQ metrics", - "config": "# Gather ActiveMQ metrics\n[[inputs.activemq]]\n # alias=\"activemq\"\n ## ActiveMQ WebConsole URL\n url = \"http://127.0.0.1:8161\"\n\n ## Required ActiveMQ Endpoint\n ## deprecated in 1.11; use the url option\n # server = \"127.0.0.1\"\n # port = 8161\n\n ## Credentials for basic HTTP authentication\n # username = \"admin\"\n # password = \"admin\"\n\n ## Required ActiveMQ webadmin root path\n # webadmin = \"admin\"\n\n ## Maximum time to receive response.\n # response_timeout = \"5s\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n \n" - }, - { - "type": "input", - "name": "bind", - "description": "Read BIND nameserver XML statistics", - "config": "# Read BIND nameserver XML statistics\n[[inputs.bind]]\n # alias=\"bind\"\n ## An array of BIND XML statistics URI to gather stats.\n ## Default is \"http://localhost:8053/xml/v3\".\n # urls = [\"http://localhost:8053/xml/v3\"]\n # gather_memory_contexts = false\n # gather_views = false\n\n" - }, - { - "type": "input", - "name": "httpjson", - "description": "Read flattened metrics from one or more JSON HTTP endpoints", - "config": "# Read flattened metrics from one or more JSON HTTP endpoints\n[[inputs.httpjson]]\n # alias=\"httpjson\"\n ## NOTE This plugin only reads numerical measurements, strings and booleans\n ## will be ignored.\n\n ## Name for the service being polled. Will be appended to the name of the\n ## measurement e.g. httpjson_webserver_stats\n ##\n ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead.\n name = \"webserver_stats\"\n\n ## URL of each server in the service's cluster\n servers = [\n \"http://localhost:9999/stats/\",\n \"http://localhost:9998/stats/\",\n ]\n ## Set response_timeout (default 5 seconds)\n response_timeout = \"5s\"\n\n ## HTTP method to use: GET or POST (case-sensitive)\n method = \"GET\"\n\n ## List of tag names to extract from top-level of JSON server response\n # tag_keys = [\n # \"my_tag_1\",\n # \"my_tag_2\"\n # ]\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## HTTP parameters (all values must be strings). For \"GET\" requests, data\n ## will be included in the query. For \"POST\" requests, data will be included\n ## in the request body as \"x-www-form-urlencoded\".\n # [inputs.httpjson.parameters]\n # event_type = \"cpu_spike\"\n # threshold = \"0.75\"\n\n ## HTTP Headers (all values must be strings)\n # [inputs.httpjson.headers]\n # X-Auth-Token = \"my-xauth-token\"\n # apiVersion = \"v1\"\n\n" - }, - { - "type": "input", - "name": "kapacitor", - "description": "Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints", - "config": "# Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints\n[[inputs.kapacitor]]\n # alias=\"kapacitor\"\n ## Multiple URLs from which to read Kapacitor-formatted JSON\n ## Default is \"http://localhost:9092/kapacitor/v1/debug/vars\".\n urls = [\n \"http://localhost:9092/kapacitor/v1/debug/vars\"\n ]\n\n ## Time limit for http requests\n timeout = \"5s\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "multifile", - "description": "Aggregates the contents of multiple files into a single point", - "config": "# Aggregates the contents of multiple files into a single point\n[[inputs.multifile]]\n # alias=\"multifile\"\n ## Base directory where telegraf will look for files.\n ## Omit this option to use absolute paths.\n base_dir = \"/sys/bus/i2c/devices/1-0076/iio:device0\"\n\n ## If true, Telegraf discard all data when a single file can't be read.\n ## Else, Telegraf omits the field generated from this file.\n # fail_early = true\n\n ## Files to parse each interval.\n [[inputs.multifile.file]]\n file = \"in_pressure_input\"\n dest = \"pressure\"\n conversion = \"float\"\n [[inputs.multifile.file]]\n file = \"in_temp_input\"\n dest = \"temperature\"\n conversion = \"float(3)\"\n [[inputs.multifile.file]]\n file = \"in_humidityrelative_input\"\n dest = \"humidityrelative\"\n conversion = \"float(3)\"\n\n" - }, - { - "type": "input", - "name": "raindrops", - "description": "Read raindrops stats (raindrops - real-time stats for preforking Rack servers)", - "config": "# Read raindrops stats (raindrops - real-time stats for preforking Rack servers)\n[[inputs.raindrops]]\n # alias=\"raindrops\"\n ## An array of raindrops middleware URI to gather stats.\n urls = [\"http://localhost:8080/_raindrops\"]\n\n" - }, - { - "type": "input", - "name": "riak", - "description": "Read metrics one or many Riak servers", - "config": "# Read metrics one or many Riak servers\n[[inputs.riak]]\n # alias=\"riak\"\n # Specify a list of one or more riak http servers\n servers = [\"http://localhost:8098\"]\n\n" - }, - { - "type": "input", - "name": "socket_listener", - "description": "Generic socket listener capable of handling multiple socket types.", - "config": "# Generic socket listener capable of handling multiple socket types.\n[[inputs.socket_listener]]\n # alias=\"socket_listener\"\n ## URL to listen on\n # service_address = \"tcp://:8094\"\n # service_address = \"tcp://127.0.0.1:http\"\n # service_address = \"tcp4://:8094\"\n # service_address = \"tcp6://:8094\"\n # service_address = \"tcp6://[2001:db8::1]:8094\"\n # service_address = \"udp://:8094\"\n # service_address = \"udp4://:8094\"\n # service_address = \"udp6://:8094\"\n # service_address = \"unix:///tmp/telegraf.sock\"\n # service_address = \"unixgram:///tmp/telegraf.sock\"\n\n ## Change the file mode bits on unix sockets. These permissions may not be\n ## respected by some platforms, to safely restrict write permissions it is best\n ## to place the socket into a directory that has previously been created\n ## with the desired permissions.\n ## ex: socket_mode = \"777\"\n # socket_mode = \"\"\n\n ## Maximum number of concurrent connections.\n ## Only applies to stream sockets (e.g. TCP).\n ## 0 (default) is unlimited.\n # max_connections = 1024\n\n ## Read timeout.\n ## Only applies to stream sockets (e.g. TCP).\n ## 0 (default) is unlimited.\n # read_timeout = \"30s\"\n\n ## Optional TLS configuration.\n ## Only applies to stream sockets (e.g. TCP).\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Enables client authentication if set.\n # tls_allowed_cacerts = [\"/etc/telegraf/clientca.pem\"]\n\n ## Maximum socket buffer size (in bytes when no unit specified).\n ## For stream sockets, once the buffer fills up, the sender will start backing up.\n ## For datagram sockets, once the buffer fills up, metrics will start dropping.\n ## Defaults to the OS default.\n # read_buffer_size = \"64KiB\"\n\n ## Period between keep alive probes.\n ## Only applies to TCP sockets.\n ## 0 disables keep alive probes.\n ## Defaults to the OS configuration.\n # keep_alive_period = \"5m\"\n\n ## Data format to consume.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n # data_format = \"influx\"\n\n ## Content encoding for message payloads, can be set to \"gzip\" to or\n ## \"identity\" to apply no encoding.\n # content_encoding = \"identity\"\n\n" - }, - { - "type": "input", - "name": "cisco_telemetry_gnmi", - "description": "Cisco GNMI telemetry input plugin based on GNMI telemetry data produced in IOS XR", - "config": "# Cisco GNMI telemetry input plugin based on GNMI telemetry data produced in IOS XR\n[[inputs.cisco_telemetry_gnmi]]\n # alias=\"cisco_telemetry_gnmi\"\n ## Address and port of the GNMI GRPC server\n addresses = [\"10.49.234.114:57777\"]\n\n ## define credentials\n username = \"cisco\"\n password = \"cisco\"\n\n ## GNMI encoding requested (one of: \"proto\", \"json\", \"json_ietf\")\n # encoding = \"proto\"\n\n ## redial in case of failures after\n redial = \"10s\"\n\n ## enable client-side TLS and define CA to authenticate the device\n # enable_tls = true\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # insecure_skip_verify = true\n\n ## define client-side TLS certificate \u0026 key to authenticate to the device\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n\n ## GNMI subscription prefix (optional, can usually be left empty)\n ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths\n # origin = \"\"\n # prefix = \"\"\n # target = \"\"\n\n ## Define additional aliases to map telemetry encoding paths to simple measurement names\n #[inputs.cisco_telemetry_gnmi.aliases]\n # ifcounters = \"openconfig:/interfaces/interface/state/counters\"\n\n [[inputs.cisco_telemetry_gnmi.subscription]]\n ## Name of the measurement that will be emitted\n name = \"ifcounters\"\n\n ## Origin and path of the subscription\n ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths\n ##\n ## origin usually refers to a (YANG) data model implemented by the device\n ## and path to a specific substructe inside it that should be subscribed to (similar to an XPath)\n ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr\n origin = \"openconfig-interfaces\"\n path = \"/interfaces/interface/state/counters\"\n\n # Subscription mode (one of: \"target_defined\", \"sample\", \"on_change\") and interval\n subscription_mode = \"sample\"\n sample_interval = \"10s\"\n\n ## Suppress redundant transmissions when measured values are unchanged\n # suppress_redundant = false\n\n ## If suppression is enabled, send updates at least every X seconds anyway\n # heartbeat_interval = \"60s\"\n\n" - }, - { - "type": "input", - "name": "haproxy", - "description": "Read metrics of haproxy, via socket or csv stats page", - "config": "# Read metrics of haproxy, via socket or csv stats page\n[[inputs.haproxy]]\n # alias=\"haproxy\"\n ## An array of address to gather stats about. Specify an ip on hostname\n ## with optional port. ie localhost, 10.10.3.33:1936, etc.\n ## Make sure you specify the complete path to the stats endpoint\n ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats\n\n ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats\n servers = [\"http://myhaproxy.com:1936/haproxy?stats\"]\n\n ## Credentials for basic HTTP authentication\n # username = \"admin\"\n # password = \"admin\"\n\n ## You can also use local socket with standard wildcard globbing.\n ## Server address not starting with 'http' will be treated as a possible\n ## socket, so both examples below are valid.\n # servers = [\"socket:/run/haproxy/admin.sock\", \"/run/haproxy/*.sock\"]\n\n ## By default, some of the fields are renamed from what haproxy calls them.\n ## Setting this option to true results in the plugin keeping the original\n ## field names.\n # keep_field_names = false\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "kubernetes", - "description": "Read metrics from the kubernetes kubelet api", - "config": "# Read metrics from the kubernetes kubelet api\n[[inputs.kubernetes]]\n # alias=\"kubernetes\"\n ## URL for the kubelet\n url = \"http://127.0.0.1:10255\"\n\n ## Use bearer token for authorization. ('bearer_token' takes priority)\n ## If both of these are empty, we'll use the default serviceaccount:\n ## at: /run/secrets/kubernetes.io/serviceaccount/token\n # bearer_token = \"/path/to/bearer/token\"\n ## OR\n # bearer_token_string = \"abc_123\"\n\n ## Set response_timeout (default 5 seconds)\n # response_timeout = \"5s\"\n\n ## Optional TLS Config\n # tls_ca = /path/to/cafile\n # tls_cert = /path/to/certfile\n # tls_key = /path/to/keyfile\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "logstash", - "description": "Read metrics exposed by Logstash", - "config": "# Read metrics exposed by Logstash\n[[inputs.logstash]]\n # alias=\"logstash\"\n ## The URL of the exposed Logstash API endpoint.\n url = \"http://127.0.0.1:9600\"\n\n ## Use Logstash 5 single pipeline API, set to true when monitoring\n ## Logstash 5.\n # single_pipeline = false\n\n ## Enable optional collection components. Can contain\n ## \"pipelines\", \"process\", and \"jvm\".\n # collect = [\"pipelines\", \"process\", \"jvm\"]\n\n ## Timeout for HTTP requests.\n # timeout = \"5s\"\n\n ## Optional HTTP Basic Auth credentials.\n # username = \"username\"\n # password = \"pa$$word\"\n\n ## Optional TLS Config.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n\n ## Use TLS but skip chain \u0026 host verification.\n # insecure_skip_verify = false\n\n ## Optional HTTP headers.\n # [inputs.logstash.headers]\n # \"X-Special-Header\" = \"Special-Value\"\n\n" - }, - { - "type": "input", - "name": "nats_consumer", - "description": "Read metrics from NATS subject(s)", - "config": "# Read metrics from NATS subject(s)\n[[inputs.nats_consumer]]\n # alias=\"nats_consumer\"\n ## urls of NATS servers\n servers = [\"nats://localhost:4222\"]\n\n ## subject(s) to consume\n subjects = [\"telegraf\"]\n\n ## name a queue group\n queue_group = \"telegraf_consumers\"\n\n ## Optional credentials\n # username = \"\"\n # password = \"\"\n\n ## Use Transport Layer Security\n # secure = false\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## Sets the limits for pending msgs and bytes for each subscription\n ## These shouldn't need to be adjusted except in very high throughput scenarios\n # pending_message_limit = 65536\n # pending_bytes_limit = 67108864\n\n ## Maximum messages to read from the broker that have not been written by an\n ## output. For best throughput set based on the number of metrics within\n ## each message and the size of the output's metric_batch_size.\n ##\n ## For example, if each message from the queue contains 10 metrics and the\n ## output metric_batch_size is 1000, setting this to 100 will ensure that a\n ## full batch is collected and the write is triggered immediately without\n ## waiting until the next flush_interval.\n # max_undelivered_messages = 1000\n\n ## Data format to consume.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n\n" - }, - { - "type": "input", - "name": "trig", - "description": "Inserts sine and cosine waves for demonstration purposes", - "config": "# Inserts sine and cosine waves for demonstration purposes\n[[inputs.trig]]\n # alias=\"trig\"\n ## Set the amplitude\n amplitude = 10.0\n\n" - }, - { - "type": "input", - "name": "mqtt_consumer", - "description": "Read metrics from MQTT topic(s)", - "config": "# Read metrics from MQTT topic(s)\n[[inputs.mqtt_consumer]]\n # alias=\"mqtt_consumer\"\n ## MQTT broker URLs to be used. The format should be scheme://host:port,\n ## schema can be tcp, ssl, or ws.\n servers = [\"tcp://127.0.0.1:1883\"]\n\n ## Topics that will be subscribed to.\n topics = [\n \"telegraf/host01/cpu\",\n \"telegraf/+/mem\",\n \"sensors/#\",\n ]\n\n ## The message topic will be stored in a tag specified by this value. If set\n ## to the empty string no topic tag will be created.\n # topic_tag = \"topic\"\n\n ## QoS policy for messages\n ## 0 = at most once\n ## 1 = at least once\n ## 2 = exactly once\n ##\n ## When using a QoS of 1 or 2, you should enable persistent_session to allow\n ## resuming unacknowledged messages.\n # qos = 0\n\n ## Connection timeout for initial connection in seconds\n # connection_timeout = \"30s\"\n\n ## Maximum messages to read from the broker that have not been written by an\n ## output. For best throughput set based on the number of metrics within\n ## each message and the size of the output's metric_batch_size.\n ##\n ## For example, if each message from the queue contains 10 metrics and the\n ## output metric_batch_size is 1000, setting this to 100 will ensure that a\n ## full batch is collected and the write is triggered immediately without\n ## waiting until the next flush_interval.\n # max_undelivered_messages = 1000\n\n ## Persistent session disables clearing of the client session on connection.\n ## In order for this option to work you must also set client_id to identify\n ## the client. To receive messages that arrived while the client is offline,\n ## also set the qos option to 1 or 2 and don't forget to also set the QoS when\n ## publishing.\n # persistent_session = false\n\n ## If unset, a random client ID will be generated.\n # client_id = \"\"\n\n ## Username and password to connect MQTT server.\n # username = \"telegraf\"\n # password = \"metricsmetricsmetricsmetrics\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## Data format to consume.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n\n" - }, - { - "type": "input", - "name": "snmp", - "description": "Retrieves SNMP values from remote agents", - "config": "# Retrieves SNMP values from remote agents\n[[inputs.snmp]]\n # alias=\"snmp\"\n agents = [ \"127.0.0.1:161\" ]\n ## Timeout for each SNMP query.\n timeout = \"5s\"\n ## Number of retries to attempt within timeout.\n retries = 3\n ## SNMP version, values can be 1, 2, or 3\n version = 2\n\n ## SNMP community string.\n community = \"public\"\n\n ## The GETBULK max-repetitions parameter\n max_repetitions = 10\n\n ## SNMPv3 auth parameters\n #sec_name = \"myuser\"\n #auth_protocol = \"md5\" # Values: \"MD5\", \"SHA\", \"\"\n #auth_password = \"pass\"\n #sec_level = \"authNoPriv\" # Values: \"noAuthNoPriv\", \"authNoPriv\", \"authPriv\"\n #context_name = \"\"\n #priv_protocol = \"\" # Values: \"DES\", \"AES\", \"\"\n #priv_password = \"\"\n\n ## measurement name\n name = \"system\"\n [[inputs.snmp.field]]\n name = \"hostname\"\n oid = \".1.0.0.1.1\"\n [[inputs.snmp.field]]\n name = \"uptime\"\n oid = \".1.0.0.1.2\"\n [[inputs.snmp.field]]\n name = \"load\"\n oid = \".1.0.0.1.3\"\n [[inputs.snmp.field]]\n oid = \"HOST-RESOURCES-MIB::hrMemorySize\"\n\n [[inputs.snmp.table]]\n ## measurement name\n name = \"remote_servers\"\n inherit_tags = [ \"hostname\" ]\n [[inputs.snmp.table.field]]\n name = \"server\"\n oid = \".1.0.0.0.1.0\"\n is_tag = true\n [[inputs.snmp.table.field]]\n name = \"connections\"\n oid = \".1.0.0.0.1.1\"\n [[inputs.snmp.table.field]]\n name = \"latency\"\n oid = \".1.0.0.0.1.2\"\n\n [[inputs.snmp.table]]\n ## auto populate table's fields using the MIB\n oid = \"HOST-RESOURCES-MIB::hrNetworkTable\"\n\n" - }, - { - "type": "input", - "name": "teamspeak", - "description": "Reads metrics from a Teamspeak 3 Server via ServerQuery", - "config": "# Reads metrics from a Teamspeak 3 Server via ServerQuery\n[[inputs.teamspeak]]\n # alias=\"teamspeak\"\n ## Server address for Teamspeak 3 ServerQuery\n # server = \"127.0.0.1:10011\"\n ## Username for ServerQuery\n username = \"serverqueryuser\"\n ## Password for ServerQuery\n password = \"secret\"\n ## Array of virtual servers\n # virtual_servers = [1]\n\n" - }, - { - "type": "input", - "name": "azure_storage_queue", - "description": "Gather Azure Storage Queue metrics", - "config": "# Gather Azure Storage Queue metrics\n[[inputs.azure_storage_queue]]\n # alias=\"azure_storage_queue\"\n ## Required Azure Storage Account name\n account_name = \"mystorageaccount\"\n\n ## Required Azure Storage Account access key\n account_key = \"storageaccountaccesskey\"\n\n ## Set to false to disable peeking age of oldest message (executes faster)\n # peek_oldest_message_age = true\n \n" - }, - { - "type": "input", - "name": "cpu", - "description": "Read metrics about cpu usage", - "config": "# Read metrics about cpu usage\n[[inputs.cpu]]\n # alias=\"cpu\"\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n\n" - }, - { - "type": "input", - "name": "dcos", - "description": "Input plugin for DC/OS metrics", - "config": "# Input plugin for DC/OS metrics\n[[inputs.dcos]]\n # alias=\"dcos\"\n ## The DC/OS cluster URL.\n cluster_url = \"https://dcos-ee-master-1\"\n\n ## The ID of the service account.\n service_account_id = \"telegraf\"\n ## The private key file for the service account.\n service_account_private_key = \"/etc/telegraf/telegraf-sa-key.pem\"\n\n ## Path containing login token. If set, will read on every gather.\n # token_file = \"/home/dcos/.dcos/token\"\n\n ## In all filter options if both include and exclude are empty all items\n ## will be collected. Arrays may contain glob patterns.\n ##\n ## Node IDs to collect metrics from. If a node is excluded, no metrics will\n ## be collected for its containers or apps.\n # node_include = []\n # node_exclude = []\n ## Container IDs to collect container metrics from.\n # container_include = []\n # container_exclude = []\n ## Container IDs to collect app metrics from.\n # app_include = []\n # app_exclude = []\n\n ## Maximum concurrent connections to the cluster.\n # max_connections = 10\n ## Maximum time to receive a response from cluster.\n # response_timeout = \"20s\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## If false, skip chain \u0026 host verification\n # insecure_skip_verify = true\n\n ## Recommended filtering to reduce series cardinality.\n # [inputs.dcos.tagdrop]\n # path = [\"/var/lib/mesos/slave/slaves/*\"]\n\n" - }, - { - "type": "input", - "name": "http_response", - "description": "HTTP/HTTPS request given an address a method and a timeout", - "config": "# HTTP/HTTPS request given an address a method and a timeout\n[[inputs.http_response]]\n # alias=\"http_response\"\n ## Deprecated in 1.12, use 'urls'\n ## Server address (default http://localhost)\n # address = \"http://localhost\"\n\n ## List of urls to query.\n # urls = [\"http://localhost\"]\n\n ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set)\n # http_proxy = \"http://localhost:8888\"\n\n ## Set response_timeout (default 5 seconds)\n # response_timeout = \"5s\"\n\n ## HTTP Request Method\n # method = \"GET\"\n\n ## Whether to follow redirects from the server (defaults to false)\n # follow_redirects = false\n\n ## Optional HTTP Request Body\n # body = '''\n # {'fake':'data'}\n # '''\n\n ## Optional substring or regex match in body of the response\n # response_string_match = \"\\\"service_status\\\": \\\"up\\\"\"\n # response_string_match = \"ok\"\n # response_string_match = \"\\\".*_status\\\".?:.?\\\"up\\\"\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## HTTP Request Headers (all values must be strings)\n # [inputs.http_response.headers]\n # Host = \"github.com\"\n\n ## Interface to use when dialing an address\n # interface = \"eth0\"\n\n" - }, - { - "type": "input", - "name": "mongodb", - "description": "Read metrics from one or many MongoDB servers", - "config": "# Read metrics from one or many MongoDB servers\n[[inputs.mongodb]]\n # alias=\"mongodb\"\n ## An array of URLs of the form:\n ## \"mongodb://\" [user \":\" pass \"@\"] host [ \":\" port]\n ## For example:\n ## mongodb://user:auth_key@10.10.3.30:27017,\n ## mongodb://10.10.3.33:18832,\n servers = [\"mongodb://127.0.0.1:27017\"]\n\n ## When true, collect per database stats\n # gather_perdb_stats = false\n\n ## When true, collect per collection stats\n # gather_col_stats = false\n\n ## List of db where collections stats are collected\n ## If empty, all db are concerned\n # col_stats_dbs = [\"local\"]\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "unbound", - "description": "A plugin to collect stats from the Unbound DNS resolver", - "config": "# A plugin to collect stats from the Unbound DNS resolver\n[[inputs.unbound]]\n # alias=\"unbound\"\n ## Address of server to connect to, read from unbound conf default, optionally ':port'\n ## Will lookup IP if given a hostname\n server = \"127.0.0.1:8953\"\n\n ## If running as a restricted user you can prepend sudo for additional access:\n # use_sudo = false\n\n ## The default location of the unbound-control binary can be overridden with:\n # binary = \"/usr/sbin/unbound-control\"\n\n ## The default timeout of 1s can be overriden with:\n # timeout = \"1s\"\n\n ## When set to true, thread metrics are tagged with the thread id.\n ##\n ## The default is false for backwards compatibility, and will be changed to\n ## true in a future version. It is recommended to set to true on new\n ## deployments.\n thread_as_tag = false\n\n" - }, - { - "type": "input", - "name": "jolokia2_agent", - "description": "Read JMX metrics from a Jolokia REST agent endpoint", - "config": "# Read JMX metrics from a Jolokia REST agent endpoint\n[[inputs.jolokia2_agent]]\n # alias=\"jolokia2_agent\"\n # default_tag_prefix = \"\"\n # default_field_prefix = \"\"\n # default_field_separator = \".\"\n\n # Add agents URLs to query\n urls = [\"http://localhost:8080/jolokia\"]\n # username = \"\"\n # password = \"\"\n # response_timeout = \"5s\"\n\n ## Optional TLS config\n # tls_ca = \"/var/private/ca.pem\"\n # tls_cert = \"/var/private/client.pem\"\n # tls_key = \"/var/private/client-key.pem\"\n # insecure_skip_verify = false\n\n ## Add metrics to read\n [[inputs.jolokia2_agent.metric]]\n name = \"java_runtime\"\n mbean = \"java.lang:type=Runtime\"\n paths = [\"Uptime\"]\n\n" - }, - { - "type": "input", - "name": "jolokia2_proxy", - "description": "Read JMX metrics from a Jolokia REST proxy endpoint", - "config": "# Read JMX metrics from a Jolokia REST proxy endpoint\n[[inputs.jolokia2_proxy]]\n # alias=\"jolokia2_proxy\"\n # default_tag_prefix = \"\"\n # default_field_prefix = \"\"\n # default_field_separator = \".\"\n\n ## Proxy agent\n url = \"http://localhost:8080/jolokia\"\n # username = \"\"\n # password = \"\"\n # response_timeout = \"5s\"\n\n ## Optional TLS config\n # tls_ca = \"/var/private/ca.pem\"\n # tls_cert = \"/var/private/client.pem\"\n # tls_key = \"/var/private/client-key.pem\"\n # insecure_skip_verify = false\n\n ## Add proxy targets to query\n # default_target_username = \"\"\n # default_target_password = \"\"\n [[inputs.jolokia2_proxy.target]]\n url = \"service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi\"\n # username = \"\"\n # password = \"\"\n\n ## Add metrics to read\n [[inputs.jolokia2_proxy.metric]]\n name = \"java_runtime\"\n mbean = \"java.lang:type=Runtime\"\n paths = [\"Uptime\"]\n\n" - }, - { - "type": "input", - "name": "mailchimp", - "description": "Gathers metrics from the /3.0/reports MailChimp API", - "config": "# Gathers metrics from the /3.0/reports MailChimp API\n[[inputs.mailchimp]]\n # alias=\"mailchimp\"\n ## MailChimp API key\n ## get from https://admin.mailchimp.com/account/api/\n api_key = \"\" # required\n ## Reports for campaigns sent more than days_old ago will not be collected.\n ## 0 means collect all.\n days_old = 0\n ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old\n # campaign_id = \"\"\n\n" - }, - { - "type": "input", - "name": "minecraft", - "description": "Collects scores from a Minecraft server's scoreboard using the RCON protocol", - "config": "# Collects scores from a Minecraft server's scoreboard using the RCON protocol\n[[inputs.minecraft]]\n # alias=\"minecraft\"\n ## Address of the Minecraft server.\n # server = \"localhost\"\n\n ## Server RCON Port.\n # port = \"25575\"\n\n ## Server RCON Password.\n password = \"\"\n\n ## Uncomment to remove deprecated metric components.\n # tagdrop = [\"server\"]\n\n" - }, - { - "type": "input", - "name": "solr", - "description": "Read stats from one or more Solr servers or cores", - "config": "# Read stats from one or more Solr servers or cores\n[[inputs.solr]]\n # alias=\"solr\"\n ## specify a list of one or more Solr servers\n servers = [\"http://localhost:8983\"]\n\n ## specify a list of one or more Solr cores (default - all)\n # cores = [\"main\"]\n\n ## Optional HTTP Basic Auth Credentials\n # username = \"username\"\n # password = \"pa$$word\"\n\n" - }, - { - "type": "input", - "name": "nginx_plus_api", - "description": "Read Nginx Plus Api documentation", - "config": "# Read Nginx Plus Api documentation\n[[inputs.nginx_plus_api]]\n # alias=\"nginx_plus_api\"\n ## An array of API URI to gather stats.\n urls = [\"http://localhost/api\"]\n\n # Nginx API version, default: 3\n # api_version = 3\n\n # HTTP response timeout (default: 5s)\n response_timeout = \"5s\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "nstat", - "description": "Collect kernel snmp counters and network interface statistics", - "config": "# Collect kernel snmp counters and network interface statistics\n[[inputs.nstat]]\n # alias=\"nstat\"\n ## file paths for proc files. If empty default paths will be used:\n ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6\n ## These can also be overridden with env variables, see README.\n proc_net_netstat = \"/proc/net/netstat\"\n proc_net_snmp = \"/proc/net/snmp\"\n proc_net_snmp6 = \"/proc/net/snmp6\"\n ## dump metrics with 0 values too\n dump_zeros = true\n\n" - }, - { - "type": "input", - "name": "openweathermap", - "description": "Read current weather and forecasts data from openweathermap.org", - "config": "# Read current weather and forecasts data from openweathermap.org\n[[inputs.openweathermap]]\n # alias=\"openweathermap\"\n ## OpenWeatherMap API key.\n app_id = \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\n\n ## City ID's to collect weather data from.\n city_id = [\"5391959\"]\n\n ## Language of the description field. Can be one of \"ar\", \"bg\",\n ## \"ca\", \"cz\", \"de\", \"el\", \"en\", \"fa\", \"fi\", \"fr\", \"gl\", \"hr\", \"hu\",\n ## \"it\", \"ja\", \"kr\", \"la\", \"lt\", \"mk\", \"nl\", \"pl\", \"pt\", \"ro\", \"ru\",\n ## \"se\", \"sk\", \"sl\", \"es\", \"tr\", \"ua\", \"vi\", \"zh_cn\", \"zh_tw\"\n # lang = \"en\"\n\n ## APIs to fetch; can contain \"weather\" or \"forecast\".\n fetch = [\"weather\", \"forecast\"]\n\n ## OpenWeatherMap base URL\n # base_url = \"https://api.openweathermap.org/\"\n\n ## Timeout for HTTP response.\n # response_timeout = \"5s\"\n\n ## Preferred unit system for temperature and wind speed. Can be one of\n ## \"metric\", \"imperial\", or \"standard\".\n # units = \"metric\"\n\n ## Query interval; OpenWeatherMap updates their weather data every 10\n ## minutes.\n interval = \"10m\"\n\n" - }, - { - "type": "input", - "name": "amqp_consumer", - "description": "AMQP consumer plugin", - "config": "# AMQP consumer plugin\n[[inputs.amqp_consumer]]\n # alias=\"amqp_consumer\"\n ## Broker to consume from.\n ## deprecated in 1.7; use the brokers option\n # url = \"amqp://localhost:5672/influxdb\"\n\n ## Brokers to consume from. If multiple brokers are specified a random broker\n ## will be selected anytime a connection is established. This can be\n ## helpful for load balancing when not using a dedicated load balancer.\n brokers = [\"amqp://localhost:5672/influxdb\"]\n\n ## Authentication credentials for the PLAIN auth_method.\n # username = \"\"\n # password = \"\"\n\n ## Name of the exchange to declare. If unset, no exchange will be declared.\n exchange = \"telegraf\"\n\n ## Exchange type; common types are \"direct\", \"fanout\", \"topic\", \"header\", \"x-consistent-hash\".\n # exchange_type = \"topic\"\n\n ## If true, exchange will be passively declared.\n # exchange_passive = false\n\n ## Exchange durability can be either \"transient\" or \"durable\".\n # exchange_durability = \"durable\"\n\n ## Additional exchange arguments.\n # exchange_arguments = { }\n # exchange_arguments = {\"hash_propery\" = \"timestamp\"}\n\n ## AMQP queue name.\n queue = \"telegraf\"\n\n ## AMQP queue durability can be \"transient\" or \"durable\".\n queue_durability = \"durable\"\n\n ## If true, queue will be passively declared.\n # queue_passive = false\n\n ## A binding between the exchange and queue using this binding key is\n ## created. If unset, no binding is created.\n binding_key = \"#\"\n\n ## Maximum number of messages server should give to the worker.\n # prefetch_count = 50\n\n ## Maximum messages to read from the broker that have not been written by an\n ## output. For best throughput set based on the number of metrics within\n ## each message and the size of the output's metric_batch_size.\n ##\n ## For example, if each message from the queue contains 10 metrics and the\n ## output metric_batch_size is 1000, setting this to 100 will ensure that a\n ## full batch is collected and the write is triggered immediately without\n ## waiting until the next flush_interval.\n # max_undelivered_messages = 1000\n\n ## Auth method. PLAIN and EXTERNAL are supported\n ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as\n ## described here: https://www.rabbitmq.com/plugins.html\n # auth_method = \"PLAIN\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## Content encoding for message payloads, can be set to \"gzip\" to or\n ## \"identity\" to apply no encoding.\n # content_encoding = \"identity\"\n\n ## Data format to consume.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n\n" - }, - { - "type": "input", - "name": "ethtool", - "description": "Returns ethtool statistics for given interfaces", - "config": "# Returns ethtool statistics for given interfaces\n[[inputs.ethtool]]\n # alias=\"ethtool\"\n ## List of interfaces to pull metrics for\n # interface_include = [\"eth0\"]\n\n ## List of interfaces to ignore when pulling metrics.\n # interface_exclude = [\"eth1\"]\n\n" - }, - { - "type": "input", - "name": "filestat", - "description": "Read stats about given file(s)", - "config": "# Read stats about given file(s)\n[[inputs.filestat]]\n # alias=\"filestat\"\n ## Files to gather stats about.\n ## These accept standard unix glob matching rules, but with the addition of\n ## ** as a \"super asterisk\". ie:\n ## \"/var/log/**.log\" -\u003e recursively find all .log files in /var/log\n ## \"/var/log/*/*.log\" -\u003e find all .log files with a parent dir in /var/log\n ## \"/var/log/apache.log\" -\u003e just tail the apache log file\n ##\n ## See https://github.com/gobwas/glob for more examples\n ##\n files = [\"/var/log/**.log\"]\n\n ## If true, read the entire file and calculate an md5 checksum.\n md5 = false\n\n" - }, - { - "type": "input", - "name": "kernel_vmstat", - "description": "Get kernel statistics from /proc/vmstat", - "config": "# Get kernel statistics from /proc/vmstat\n[[inputs.kernel_vmstat]]\n # alias=\"kernel_vmstat\"\n" - }, - { - "type": "input", - "name": "nginx_plus", - "description": "Read Nginx Plus' full status information (ngx_http_status_module)", - "config": "# Read Nginx Plus' full status information (ngx_http_status_module)\n[[inputs.nginx_plus]]\n # alias=\"nginx_plus\"\n ## An array of ngx_http_status_module or status URI to gather stats.\n urls = [\"http://localhost/status\"]\n\n # HTTP response timeout (default: 5s)\n response_timeout = \"5s\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "powerdns_recursor", - "description": "Read metrics from one or many PowerDNS Recursor servers", - "config": "# Read metrics from one or many PowerDNS Recursor servers\n[[inputs.powerdns_recursor]]\n # alias=\"powerdns_recursor\"\n ## Path to the Recursor control socket.\n unix_sockets = [\"/var/run/pdns_recursor.controlsocket\"]\n\n ## Directory to create receive socket. This default is likely not writable,\n ## please reference the full plugin documentation for a recommended setup.\n # socket_dir = \"/var/run/\"\n ## Socket permissions for the receive socket.\n # socket_mode = \"0666\"\n\n" - }, - { - "type": "input", - "name": "sqlserver", - "description": "Read metrics from Microsoft SQL Server", - "config": "# Read metrics from Microsoft SQL Server\n[[inputs.sqlserver]]\n # alias=\"sqlserver\"\n ## Specify instances to monitor with a list of connection strings.\n ## All connection parameters are optional.\n ## By default, the host is localhost, listening on default port, TCP 1433.\n ## for Windows, the user is the currently running AD user (SSO).\n ## See https://github.com/denisenkom/go-mssqldb for detailed connection\n ## parameters, in particular, tls connections can be created like so:\n ## \"encrypt=true;certificate=\u003ccert\u003e;hostNameInCertificate=\u003cSqlServer host fqdn\u003e\"\n # servers = [\n # \"Server=192.168.1.10;Port=1433;User Id=\u003cuser\u003e;Password=\u003cpw\u003e;app name=telegraf;log=1;\",\n # ]\n\n ## Optional parameter, setting this to 2 will use a new version\n ## of the collection queries that break compatibility with the original\n ## dashboards.\n query_version = 2\n\n ## If you are using AzureDB, setting this to true will gather resource utilization metrics\n # azuredb = false\n\n ## If you would like to exclude some of the metrics queries, list them here\n ## Possible choices:\n ## - PerformanceCounters\n ## - WaitStatsCategorized\n ## - DatabaseIO\n ## - DatabaseProperties\n ## - CPUHistory\n ## - DatabaseSize\n ## - DatabaseStats\n ## - MemoryClerk\n ## - VolumeSpace\n ## - PerformanceMetrics\n ## - Schedulers\n ## - AzureDBResourceStats\n ## - AzureDBResourceGovernance\n ## - SqlRequests\n ## - ServerProperties\n exclude_query = [ 'Schedulers' ]\n\n" - }, - { - "type": "input", - "name": "disk", - "description": "Read metrics about disk usage by mount point", - "config": "# Read metrics about disk usage by mount point\n[[inputs.disk]]\n # alias=\"disk\"\n ## By default stats will be gathered for all mount points.\n ## Set mount_points will restrict the stats to only the specified mount points.\n # mount_points = [\"/\"]\n\n ## Ignore mount points by filesystem type.\n ignore_fs = [\"tmpfs\", \"devtmpfs\", \"devfs\", \"iso9660\", \"overlay\", \"aufs\", \"squashfs\"]\n\n" - }, - { - "type": "input", - "name": "fibaro", - "description": "Read devices value(s) from a Fibaro controller", - "config": "# Read devices value(s) from a Fibaro controller\n[[inputs.fibaro]]\n # alias=\"fibaro\"\n ## Required Fibaro controller address/hostname.\n ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available\n url = \"http://\u003ccontroller\u003e:80\"\n\n ## Required credentials to access the API (http://\u003ccontroller/api/\u003ccomponent\u003e)\n username = \"\u003cusername\u003e\"\n password = \"\u003cpassword\u003e\"\n\n ## Amount of time allowed to complete the HTTP request\n # timeout = \"5s\"\n\n" - }, - { - "type": "input", - "name": "graylog", - "description": "Read flattened metrics from one or more GrayLog HTTP endpoints", - "config": "# Read flattened metrics from one or more GrayLog HTTP endpoints\n[[inputs.graylog]]\n # alias=\"graylog\"\n ## API endpoint, currently supported API:\n ##\n ## - multiple (Ex http://\u003chost\u003e:12900/system/metrics/multiple)\n ## - namespace (Ex http://\u003chost\u003e:12900/system/metrics/namespace/{namespace})\n ##\n ## For namespace endpoint, the metrics array will be ignored for that call.\n ## Endpoint can contain namespace and multiple type calls.\n ##\n ## Please check http://[graylog-server-ip]:12900/api-browser for full list\n ## of endpoints\n servers = [\n \"http://[graylog-server-ip]:12900/system/metrics/multiple\",\n ]\n\n ## Metrics list\n ## List of metrics can be found on Graylog webservice documentation.\n ## Or by hitting the web service api at:\n ## http://[graylog-host]:12900/system/metrics\n metrics = [\n \"jvm.cl.loaded\",\n \"jvm.memory.pools.Metaspace.committed\"\n ]\n\n ## Username and password\n username = \"\"\n password = \"\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "lustre2", - "description": "Read metrics from local Lustre service on OST, MDS", - "config": "# Read metrics from local Lustre service on OST, MDS\n[[inputs.lustre2]]\n # alias=\"lustre2\"\n ## An array of /proc globs to search for Lustre stats\n ## If not specified, the default will work on Lustre 2.5.x\n ##\n # ost_procfiles = [\n # \"/proc/fs/lustre/obdfilter/*/stats\",\n # \"/proc/fs/lustre/osd-ldiskfs/*/stats\",\n # \"/proc/fs/lustre/obdfilter/*/job_stats\",\n # ]\n # mds_procfiles = [\n # \"/proc/fs/lustre/mdt/*/md_stats\",\n # \"/proc/fs/lustre/mdt/*/job_stats\",\n # ]\n\n" - }, - { - "type": "input", - "name": "nginx_upstream_check", - "description": "Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module)", - "config": "# Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module)\n[[inputs.nginx_upstream_check]]\n # alias=\"nginx_upstream_check\"\n ## An URL where Nginx Upstream check module is enabled\n ## It should be set to return a JSON formatted response\n url = \"http://127.0.0.1/status?format=json\"\n\n ## HTTP method\n # method = \"GET\"\n\n ## Optional HTTP headers\n # headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## Override HTTP \"Host\" header\n # host_header = \"check.example.com\"\n\n ## Timeout for HTTP requests\n timeout = \"5s\"\n\n ## Optional HTTP Basic Auth credentials\n # username = \"username\"\n # password = \"pa$$word\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "apache", - "description": "Read Apache status information (mod_status)", - "config": "# Read Apache status information (mod_status)\n[[inputs.apache]]\n # alias=\"apache\"\n ## An array of URLs to gather from, must be directed at the machine\n ## readable version of the mod_status page including the auto query string.\n ## Default is \"http://localhost/server-status?auto\".\n urls = [\"http://localhost/server-status?auto\"]\n\n ## Credentials for basic HTTP authentication.\n # username = \"myuser\"\n # password = \"mypassword\"\n\n ## Maximum time to receive response.\n # response_timeout = \"5s\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "passenger", - "description": "Read metrics of passenger using passenger-status", - "config": "# Read metrics of passenger using passenger-status\n[[inputs.passenger]]\n # alias=\"passenger\"\n ## Path of passenger-status.\n ##\n ## Plugin gather metric via parsing XML output of passenger-status\n ## More information about the tool:\n ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html\n ##\n ## If no path is specified, then the plugin simply execute passenger-status\n ## hopefully it can be found in your PATH\n command = \"passenger-status -v --show=xml\"\n\n" - }, - { - "type": "input", - "name": "suricata", - "description": "Suricata stats plugin", - "config": "# Suricata stats plugin\n[[inputs.suricata]]\n # alias=\"suricata\"\n ## Data sink for Suricata stats log\n # This is expected to be a filename of a\n # unix socket to be created for listening.\n source = \"/var/run/suricata-stats.sock\"\n\n # Delimiter for flattening field keys, e.g. subitem \"alert\" of \"detect\"\n # becomes \"detect_alert\" when delimiter is \"_\".\n delimiter = \"_\"\n\n" - }, - { - "type": "input", - "name": "zipkin", - "description": "This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.", - "config": "# This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures.\n[[inputs.zipkin]]\n # alias=\"zipkin\"\n # path = \"/api/v1/spans\" # URL path for span data\n # port = 9411 # Port on which Telegraf listens\n\n" - }, - { - "type": "input", - "name": "marklogic", - "description": "Retrieves information on a specific host in a MarkLogic Cluster", - "config": "# Retrieves information on a specific host in a MarkLogic Cluster\n[[inputs.marklogic]]\n # alias=\"marklogic\"\n ## Base URL of the MarkLogic HTTP Server.\n url = \"http://localhost:8002\"\n\n ## List of specific hostnames to retrieve information. At least (1) required.\n # hosts = [\"hostname1\", \"hostname2\"]\n\n ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges\n # username = \"myuser\"\n # password = \"mypassword\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "cloudwatch", - "description": "Pull Metric Statistics from Amazon CloudWatch", - "config": "# Pull Metric Statistics from Amazon CloudWatch\n[[inputs.cloudwatch]]\n # alias=\"cloudwatch\"\n ## Amazon Region\n region = \"us-east-1\"\n\n ## Amazon Credentials\n ## Credentials are loaded in the following order\n ## 1) Assumed credentials via STS if role_arn is specified\n ## 2) explicit credentials from 'access_key' and 'secret_key'\n ## 3) shared profile from 'profile'\n ## 4) environment variables\n ## 5) shared credentials file\n ## 6) EC2 Instance Profile\n # access_key = \"\"\n # secret_key = \"\"\n # token = \"\"\n # role_arn = \"\"\n # profile = \"\"\n # shared_credential_file = \"\"\n\n ## Endpoint to make request against, the correct endpoint is automatically\n ## determined and this option should only be set if you wish to override the\n ## default.\n ## ex: endpoint_url = \"http://localhost:8000\"\n # endpoint_url = \"\"\n\n # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all\n # metrics are made available to the 1 minute period. Some are collected at\n # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring.\n # Note that if a period is configured that is smaller than the minimum for a\n # particular metric, that metric will not be returned by the Cloudwatch API\n # and will not be collected by Telegraf.\n #\n ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s)\n period = \"5m\"\n\n ## Collection Delay (required - must account for metrics availability via CloudWatch API)\n delay = \"5m\"\n\n ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid\n ## gaps or overlap in pulled data\n interval = \"5m\"\n\n ## Configure the TTL for the internal cache of metrics.\n # cache_ttl = \"1h\"\n\n ## Metric Statistic Namespace (required)\n namespace = \"AWS/ELB\"\n\n ## Maximum requests per second. Note that the global default AWS rate limit is\n ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a\n ## maximum of 50.\n ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html\n # ratelimit = 25\n\n ## Timeout for http requests made by the cloudwatch client.\n # timeout = \"5s\"\n\n ## Namespace-wide statistic filters. These allow fewer queries to be made to\n ## cloudwatch.\n # statistic_include = [ \"average\", \"sum\", \"minimum\", \"maximum\", sample_count\" ]\n # statistic_exclude = []\n\n ## Metrics to Pull\n ## Defaults to all Metrics in Namespace if nothing is provided\n ## Refreshes Namespace available metrics every 1h\n #[[inputs.cloudwatch.metrics]]\n # names = [\"Latency\", \"RequestCount\"]\n #\n # ## Statistic filters for Metric. These allow for retrieving specific\n # ## statistics for an individual metric.\n # # statistic_include = [ \"average\", \"sum\", \"minimum\", \"maximum\", sample_count\" ]\n # # statistic_exclude = []\n #\n # ## Dimension filters for Metric. All dimensions defined for the metric names\n # ## must be specified in order to retrieve the metric statistics.\n # [[inputs.cloudwatch.metrics.dimensions]]\n # name = \"LoadBalancerName\"\n # value = \"p-example\"\n\n" - }, - { - "type": "input", - "name": "system", - "description": "Read metrics about system load \u0026 uptime", - "config": "# Read metrics about system load \u0026 uptime\n[[inputs.system]]\n # alias=\"system\"\n ## Uncomment to remove deprecated metrics.\n # fielddrop = [\"uptime_format\"]\n\n" - }, - { - "type": "input", - "name": "docker", - "description": "Read metrics about docker containers", - "config": "# Read metrics about docker containers\n[[inputs.docker]]\n # alias=\"docker\"\n ## Docker Endpoint\n ## To use TCP, set endpoint = \"tcp://[ip]:[port]\"\n ## To use environment variables (ie, docker-machine), set endpoint = \"ENV\"\n endpoint = \"unix:///var/run/docker.sock\"\n\n ## Set to true to collect Swarm metrics(desired_replicas, running_replicas)\n gather_services = false\n\n ## Only collect metrics for these containers, collect all if empty\n container_names = []\n\n ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars\n source_tag = false\n\n ## Containers to include and exclude. Globs accepted.\n ## Note that an empty array for both will include all containers\n container_name_include = []\n container_name_exclude = []\n\n ## Container states to include and exclude. Globs accepted.\n ## When empty only containers in the \"running\" state will be captured.\n ## example: container_state_include = [\"created\", \"restarting\", \"running\", \"removing\", \"paused\", \"exited\", \"dead\"]\n ## example: container_state_exclude = [\"created\", \"restarting\", \"running\", \"removing\", \"paused\", \"exited\", \"dead\"]\n # container_state_include = []\n # container_state_exclude = []\n\n ## Timeout for docker list, info, and stats commands\n timeout = \"5s\"\n\n ## Whether to report for each container per-device blkio (8:0, 8:1...) and\n ## network (eth0, eth1, ...) stats or not\n perdevice = true\n\n ## Whether to report for each container total blkio and network stats or not\n total = false\n\n ## Which environment variables should we use as a tag\n ##tag_env = [\"JAVA_HOME\", \"HEAP_SIZE\"]\n\n ## docker labels to include and exclude as tags. Globs accepted.\n ## Note that an empty array for both will include all labels as tags\n docker_label_include = []\n docker_label_exclude = []\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "docker_log", - "description": "Read logging output from the Docker engine", - "config": "# Read logging output from the Docker engine\n[[inputs.docker_log]]\n # alias=\"docker_log\"\n ## Docker Endpoint\n ## To use TCP, set endpoint = \"tcp://[ip]:[port]\"\n ## To use environment variables (ie, docker-machine), set endpoint = \"ENV\"\n # endpoint = \"unix:///var/run/docker.sock\"\n\n ## When true, container logs are read from the beginning; otherwise\n ## reading begins at the end of the log.\n # from_beginning = false\n\n ## Timeout for Docker API calls.\n # timeout = \"5s\"\n\n ## Containers to include and exclude. Globs accepted.\n ## Note that an empty array for both will include all containers\n # container_name_include = []\n # container_name_exclude = []\n\n ## Container states to include and exclude. Globs accepted.\n ## When empty only containers in the \"running\" state will be captured.\n # container_state_include = []\n # container_state_exclude = []\n\n ## docker labels to include and exclude as tags. Globs accepted.\n ## Note that an empty array for both will include all labels as tags\n # docker_label_include = []\n # docker_label_exclude = []\n\n ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars\n source_tag = false\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "leofs", - "description": "Read metrics from a LeoFS Server via SNMP", - "config": "# Read metrics from a LeoFS Server via SNMP\n[[inputs.leofs]]\n # alias=\"leofs\"\n ## An array of URLs of the form:\n ## host [ \":\" port]\n servers = [\"127.0.0.1:4020\"]\n\n" - }, - { - "type": "input", - "name": "procstat", - "description": "Monitor process cpu and memory usage", - "config": "# Monitor process cpu and memory usage\n[[inputs.procstat]]\n # alias=\"procstat\"\n ## PID file to monitor process\n pid_file = \"/var/run/nginx.pid\"\n ## executable name (ie, pgrep \u003cexe\u003e)\n # exe = \"nginx\"\n ## pattern as argument for pgrep (ie, pgrep -f \u003cpattern\u003e)\n # pattern = \"nginx\"\n ## user as argument for pgrep (ie, pgrep -u \u003cuser\u003e)\n # user = \"nginx\"\n ## Systemd unit name\n # systemd_unit = \"nginx.service\"\n ## CGroup name or path\n # cgroup = \"systemd/system.slice/nginx.service\"\n\n ## Windows service name\n # win_service = \"\"\n\n ## override for process_name\n ## This is optional; default is sourced from /proc/\u003cpid\u003e/status\n # process_name = \"bar\"\n\n ## Field name prefix\n # prefix = \"\"\n\n ## When true add the full cmdline as a tag.\n # cmdline_tag = false\n\n ## Add PID as a tag instead of a field; useful to differentiate between\n ## processes whose tags are otherwise the same. Can create a large number\n ## of series, use judiciously.\n # pid_tag = false\n\n ## Method to use when finding process IDs. Can be one of 'pgrep', or\n ## 'native'. The pgrep finder calls the pgrep executable in the PATH while\n ## the native finder performs the search directly in a manor dependent on the\n ## platform. Default is 'pgrep'\n # pid_finder = \"pgrep\"\n\n" - }, - { - "type": "input", - "name": "salesforce", - "description": "Read API usage and limits for a Salesforce organisation", - "config": "# Read API usage and limits for a Salesforce organisation\n[[inputs.salesforce]]\n # alias=\"salesforce\"\n ## specify your credentials\n ##\n username = \"your_username\"\n password = \"your_password\"\n ##\n ## (optional) security token\n # security_token = \"your_security_token\"\n ##\n ## (optional) environment type (sandbox or production)\n ## default is: production\n ##\n # environment = \"production\"\n ##\n ## (optional) API version (default: \"39.0\")\n ##\n # version = \"39.0\"\n\n" - }, - { - "type": "input", - "name": "cloud_pubsub_push", - "description": "Google Cloud Pub/Sub Push HTTP listener", - "config": "# Google Cloud Pub/Sub Push HTTP listener\n[[inputs.cloud_pubsub_push]]\n # alias=\"cloud_pubsub_push\"\n ## Address and port to host HTTP listener on\n service_address = \":8080\"\n\n ## Application secret to verify messages originate from Cloud Pub/Sub\n # token = \"\"\n\n ## Path to listen to.\n # path = \"/\"\n\n ## Maximum duration before timing out read of the request\n # read_timeout = \"10s\"\n ## Maximum duration before timing out write of the response. This should be set to a value\n ## large enough that you can send at least 'metric_batch_size' number of messages within the\n ## duration.\n # write_timeout = \"10s\"\n\n ## Maximum allowed http request body size in bytes.\n ## 0 means to use the default of 524,288,00 bytes (500 mebibytes)\n # max_body_size = \"500MB\"\n\n ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag.\n # add_meta = false\n\n ## Optional. Maximum messages to read from PubSub that have not been written\n ## to an output. Defaults to 1000.\n ## For best throughput set based on the number of metrics within\n ## each message and the size of the output's metric_batch_size.\n ##\n ## For example, if each message contains 10 metrics and the output\n ## metric_batch_size is 1000, setting this to 100 will ensure that a\n ## full batch is collected and the write is triggered immediately without\n ## waiting until the next flush_interval.\n # max_undelivered_messages = 1000\n\n ## Set one or more allowed client CA certificate file names to\n ## enable mutually authenticated TLS connections\n # tls_allowed_cacerts = [\"/etc/telegraf/clientca.pem\"]\n\n ## Add service certificate and key\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n\n ## Data format to consume.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n\n" - }, - { - "type": "input", - "name": "ipvs", - "description": "Collect virtual and real server stats from Linux IPVS", - "config": "# Collect virtual and real server stats from Linux IPVS\n[[inputs.ipvs]]\n # alias=\"ipvs\"\n" - }, - { - "type": "input", - "name": "nginx_vts", - "description": "Read Nginx virtual host traffic status module information (nginx-module-vts)", - "config": "# Read Nginx virtual host traffic status module information (nginx-module-vts)\n[[inputs.nginx_vts]]\n # alias=\"nginx_vts\"\n ## An array of ngx_http_status_module or status URI to gather stats.\n urls = [\"http://localhost/status\"]\n\n ## HTTP response timeout (default: 5s)\n response_timeout = \"5s\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "input", - "name": "ntpq", - "description": "Get standard NTP query metrics, requires ntpq executable.", - "config": "# Get standard NTP query metrics, requires ntpq executable.\n[[inputs.ntpq]]\n # alias=\"ntpq\"\n ## If false, set the -n ntpq flag. Can reduce metric gather time.\n dns_lookup = true\n\n" - }, - { - "type": "input", - "name": "openldap", - "description": "OpenLDAP cn=Monitor plugin", - "config": "# OpenLDAP cn=Monitor plugin\n[[inputs.openldap]]\n # alias=\"openldap\"\n host = \"localhost\"\n port = 389\n\n # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption.\n # note that port will likely need to be changed to 636 for ldaps\n # valid options: \"\" | \"starttls\" | \"ldaps\"\n tls = \"\"\n\n # skip peer certificate verification. Default is false.\n insecure_skip_verify = false\n\n # Path to PEM-encoded Root certificate to use to verify server certificate\n tls_ca = \"/etc/ssl/certs.pem\"\n\n # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed.\n bind_dn = \"\"\n bind_password = \"\"\n\n # Reverse metric names so they sort more naturally. Recommended.\n # This defaults to false if unset, but is set to true when generating a new config\n reverse_metric_names = true\n\n" - }, - { - "type": "input", - "name": "fluentd", - "description": "Read metrics exposed by fluentd in_monitor plugin", - "config": "# Read metrics exposed by fluentd in_monitor plugin\n[[inputs.fluentd]]\n # alias=\"fluentd\"\n ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint).\n ##\n ## Endpoint:\n ## - only one URI is allowed\n ## - https is not supported\n endpoint = \"http://localhost:24220/api/plugins.json\"\n\n ## Define which plugins have to be excluded (based on \"type\" field - e.g. monitor_agent)\n exclude = [\n\t \"monitor_agent\",\n\t \"dummy\",\n ]\n\n" - }, - { - "type": "input", - "name": "nats", - "description": "Provides metrics about the state of a NATS server", - "config": "# Provides metrics about the state of a NATS server\n[[inputs.nats]]\n # alias=\"nats\"\n ## The address of the monitoring endpoint of the NATS server\n server = \"http://localhost:8222\"\n\n ## Maximum time to receive response\n # response_timeout = \"5s\"\n\n" - } - ] -} -` -var availableOutputs = `{ - "version": "1.13.0", - "os": "linux", - "plugins": [ - { - "type": "output", - "name": "http", - "description": "A plugin that can transmit metrics over HTTP", - "config": "# A plugin that can transmit metrics over HTTP\n[[outputs.http]]\n # alias=\"http\"\n ## URL is the address to send metrics to\n url = \"http://127.0.0.1:8080/telegraf\"\n\n ## Timeout for HTTP message\n # timeout = \"5s\"\n\n ## HTTP method, one of: \"POST\" or \"PUT\"\n # method = \"POST\"\n\n ## HTTP Basic Auth credentials\n # username = \"username\"\n # password = \"pa$$word\"\n\n ## OAuth2 Client Credentials Grant\n # client_id = \"clientid\"\n # client_secret = \"secret\"\n # token_url = \"https://indentityprovider/oauth2/v1/token\"\n # scopes = [\"urn:opc:idm:__myscopes__\"]\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## Data format to output.\n ## Each data format has it's own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md\n # data_format = \"influx\"\n\n ## HTTP Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"identity\"\n\n ## Additional HTTP headers\n # [outputs.http.headers]\n # # Should be set manually to \"application/json\" for json data_format\n # Content-Type = \"text/plain; charset=utf-8\"\n\n" - }, - { - "type": "output", - "name": "influxdb", - "description": "Configuration for sending metrics to InfluxDB", - "config": "# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb]]\n # alias=\"influxdb\"\n ## The full HTTP or UDP URL for your InfluxDB instance.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n # urls = [\"unix:///var/run/influxdb.sock\"]\n # urls = [\"udp://127.0.0.1:8089\"]\n # urls = [\"http://127.0.0.1:8086\"]\n\n ## The target database for metrics; will be created as needed.\n ## For UDP url endpoint database needs to be configured on server side.\n # database = \"telegraf\"\n\n ## The value of this tag will be used to determine the database. If this\n ## tag is not set the 'database' option is used as the default.\n # database_tag = \"\"\n\n ## If true, the database tag will not be added to the metric.\n # exclude_database_tag = false\n\n ## If true, no CREATE DATABASE queries will be sent. Set to true when using\n ## Telegraf with a user without permissions to create databases or when the\n ## database already exists.\n # skip_database_creation = false\n\n ## Name of existing retention policy to write to. Empty string writes to\n ## the default retention policy. Only takes effect when using HTTP.\n # retention_policy = \"\"\n\n ## Write consistency (clusters only), can be: \"any\", \"one\", \"quorum\", \"all\".\n ## Only takes effect when using HTTP.\n # write_consistency = \"any\"\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## HTTP Basic Auth\n # username = \"telegraf\"\n # password = \"metricsmetricsmetricsmetrics\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## UDP payload size is the maximum packet size to send.\n # udp_payload = \"512B\"\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"identity\"\n\n ## When true, Telegraf will output unsigned integers as unsigned values,\n ## i.e.: \"42u\". You will need a version of InfluxDB supporting unsigned\n ## integer values. Enabling this option will result in field type errors if\n ## existing data has been written.\n # influx_uint_support = false\n\n" - }, - { - "type": "output", - "name": "exec", - "description": "Send metrics to command as input over stdin", - "config": "# Send metrics to command as input over stdin\n[[outputs.exec]]\n # alias=\"exec\"\n ## Command to injest metrics via stdin.\n command = [\"tee\", \"-a\", \"/dev/null\"]\n\n ## Timeout for command to complete.\n # timeout = \"5s\"\n\n ## Data format to output.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md\n # data_format = \"influx\"\n\n" - }, - { - "type": "output", - "name": "graphite", - "description": "Configuration for Graphite server to send metrics to", - "config": "# Configuration for Graphite server to send metrics to\n[[outputs.graphite]]\n # alias=\"graphite\"\n ## TCP endpoint for your graphite instance.\n ## If multiple endpoints are configured, output will be load balanced.\n ## Only one of the endpoints will be written to with each iteration.\n servers = [\"localhost:2003\"]\n ## Prefix metrics name\n prefix = \"\"\n ## Graphite output template\n ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md\n template = \"host.tags.measurement.field\"\n\n ## Enable Graphite tags support\n # graphite_tag_support = false\n\n ## timeout in seconds for the write connection to graphite\n timeout = 2\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "output", - "name": "graylog", - "description": "Send telegraf metrics to graylog(s)", - "config": "# Send telegraf metrics to graylog(s)\n[[outputs.graylog]]\n # alias=\"graylog\"\n ## UDP endpoint for your graylog instance.\n servers = [\"127.0.0.1:12201\", \"192.168.1.1:12201\"]\n\n" - }, - { - "type": "output", - "name": "nats", - "description": "Send telegraf measurements to NATS", - "config": "# Send telegraf measurements to NATS\n[[outputs.nats]]\n # alias=\"nats\"\n ## URLs of NATS servers\n servers = [\"nats://localhost:4222\"]\n ## Optional credentials\n # username = \"\"\n # password = \"\"\n ## NATS subject for producer messages\n subject = \"telegraf\"\n\n ## Use Transport Layer Security\n # secure = false\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## Data format to output.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md\n data_format = \"influx\"\n\n" - }, - { - "type": "output", - "name": "prometheus_client", - "description": "Configuration for the Prometheus client to spawn", - "config": "# Configuration for the Prometheus client to spawn\n[[outputs.prometheus_client]]\n # alias=\"prometheus_client\"\n ## Address to listen on\n listen = \":9273\"\n\n ## Metric version controls the mapping from Telegraf metrics into\n ## Prometheus format. When using the prometheus input, use the same value in\n ## both plugins to ensure metrics are round-tripped without modification.\n ##\n ## example: metric_version = 1; deprecated in 1.13\n ## metric_version = 2; recommended version\n # metric_version = 1\n\n ## Use HTTP Basic Authentication.\n # basic_username = \"Foo\"\n # basic_password = \"Bar\"\n\n ## If set, the IP Ranges which are allowed to access metrics.\n ## ex: ip_range = [\"192.168.0.0/24\", \"192.168.1.0/30\"]\n # ip_range = []\n\n ## Path to publish the metrics on.\n # path = \"/metrics\"\n\n ## Expiration interval for each metric. 0 == no expiration\n # expiration_interval = \"60s\"\n\n ## Collectors to enable, valid entries are \"gocollector\" and \"process\".\n ## If unset, both are enabled.\n # collectors_exclude = [\"gocollector\", \"process\"]\n\n ## Send string metrics as Prometheus labels.\n ## Unless set to false all string metrics will be sent as labels.\n # string_as_label = true\n\n ## If set, enable TLS with the given certificate.\n # tls_cert = \"/etc/ssl/telegraf.crt\"\n # tls_key = \"/etc/ssl/telegraf.key\"\n\n ## Set one or more allowed client CA certificate file names to\n ## enable mutually authenticated TLS connections\n # tls_allowed_cacerts = [\"/etc/telegraf/clientca.pem\"]\n\n ## Export metric collection time.\n # export_timestamp = false\n\n" - }, - { - "type": "output", - "name": "riemann", - "description": "Configuration for the Riemann server to send metrics to", - "config": "# Configuration for the Riemann server to send metrics to\n[[outputs.riemann]]\n # alias=\"riemann\"\n ## The full TCP or UDP URL of the Riemann server\n url = \"tcp://localhost:5555\"\n\n ## Riemann event TTL, floating-point time in seconds.\n ## Defines how long that an event is considered valid for in Riemann\n # ttl = 30.0\n\n ## Separator to use between measurement and field name in Riemann service name\n ## This does not have any effect if 'measurement_as_attribute' is set to 'true'\n separator = \"/\"\n\n ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name\n # measurement_as_attribute = false\n\n ## Send string metrics as Riemann event states.\n ## Unless enabled all string metrics will be ignored\n # string_as_state = false\n\n ## A list of tag keys whose values get sent as Riemann tags.\n ## If empty, all Telegraf tag values will be sent as tags\n # tag_keys = [\"telegraf\",\"custom_tag\"]\n\n ## Additional Riemann tags to send.\n # tags = [\"telegraf-output\"]\n\n ## Description for Riemann event\n # description_text = \"metrics collected from telegraf\"\n\n ## Riemann client write timeout, defaults to \"5s\" if not set.\n # timeout = \"5s\"\n\n" - }, - { - "type": "output", - "name": "wavefront", - "description": "Configuration for Wavefront server to send metrics to", - "config": "# Configuration for Wavefront server to send metrics to\n[[outputs.wavefront]]\n # alias=\"wavefront\"\n ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy\n ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878\n url = \"https://metrics.wavefront.com\"\n\n ## Authentication Token for Wavefront. Only required if using Direct Ingestion\n #token = \"DUMMY_TOKEN\" \n \n ## DNS name of the wavefront proxy server. Do not use if url is specified\n #host = \"wavefront.example.com\"\n\n ## Port that the Wavefront proxy server listens on. Do not use if url is specified\n #port = 2878\n\n ## prefix for metrics keys\n #prefix = \"my.specific.prefix.\"\n\n ## whether to use \"value\" for name of simple fields. default is false\n #simple_fields = false\n\n ## character to use between metric and field name. default is . (dot)\n #metric_separator = \".\"\n\n ## Convert metric name paths to use metricSeparator character\n ## When true will convert all _ (underscore) characters in final metric name. default is true\n #convert_paths = true\n\n ## Use Strict rules to sanitize metric and tag names from invalid characters\n ## When enabled forward slash (/) and comma (,) will be accpeted\n #use_strict = false\n\n ## Use Regex to sanitize metric and tag names from invalid characters\n ## Regex is more thorough, but significantly slower. default is false\n #use_regex = false\n\n ## point tags to use as the source name for Wavefront (if none found, host will be used)\n #source_override = [\"hostname\", \"address\", \"agent_host\", \"node_host\"]\n\n ## whether to convert boolean values to numeric values, with false -\u003e 0.0 and true -\u003e 1.0. default is true\n #convert_bool = true\n\n ## Define a mapping, namespaced by metric prefix, from string values to numeric values\n ## deprecated in 1.9; use the enum processor plugin\n #[[outputs.wavefront.string_to_number.elasticsearch]]\n # green = 1.0\n # yellow = 0.5\n # red = 0.0\n\n" - }, - { - "type": "output", - "name": "cloudwatch", - "description": "Configuration for AWS CloudWatch output.", - "config": "# Configuration for AWS CloudWatch output.\n[[outputs.cloudwatch]]\n # alias=\"cloudwatch\"\n ## Amazon REGION\n region = \"us-east-1\"\n\n ## Amazon Credentials\n ## Credentials are loaded in the following order\n ## 1) Assumed credentials via STS if role_arn is specified\n ## 2) explicit credentials from 'access_key' and 'secret_key'\n ## 3) shared profile from 'profile'\n ## 4) environment variables\n ## 5) shared credentials file\n ## 6) EC2 Instance Profile\n #access_key = \"\"\n #secret_key = \"\"\n #token = \"\"\n #role_arn = \"\"\n #profile = \"\"\n #shared_credential_file = \"\"\n\n ## Endpoint to make request against, the correct endpoint is automatically\n ## determined and this option should only be set if you wish to override the\n ## default.\n ## ex: endpoint_url = \"http://localhost:8000\"\n # endpoint_url = \"\"\n\n ## Namespace for the CloudWatch MetricDatums\n namespace = \"InfluxData/Telegraf\"\n\n ## If you have a large amount of metrics, you should consider to send statistic \n ## values instead of raw metrics which could not only improve performance but \n ## also save AWS API cost. If enable this flag, this plugin would parse the required \n ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. \n ## You could use basicstats aggregator to calculate those fields. If not all statistic \n ## fields are available, all fields would still be sent as raw metrics. \n # write_statistics = false\n\n ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision)\n # high_resolution_metrics = false\n\n" - }, - { - "type": "output", - "name": "datadog", - "description": "Configuration for DataDog API to send metrics to.", - "config": "# Configuration for DataDog API to send metrics to.\n[[outputs.datadog]]\n # alias=\"datadog\"\n ## Datadog API key\n apikey = \"my-secret-key\" # required.\n\n # The base endpoint URL can optionally be specified but it defaults to:\n #url = \"https://app.datadoghq.com/api/v1/series\"\n\n ## Connection timeout.\n # timeout = \"5s\"\n\n" - }, - { - "type": "output", - "name": "discard", - "description": "Send metrics to nowhere at all", - "config": "# Send metrics to nowhere at all\n[[outputs.discard]]\n # alias=\"discard\"\n" - }, - { - "type": "output", - "name": "health", - "description": "Configurable HTTP health check resource based on metrics", - "config": "# Configurable HTTP health check resource based on metrics\n[[outputs.health]]\n # alias=\"health\"\n ## Address and port to listen on.\n ## ex: service_address = \"http://localhost:8080\"\n ## service_address = \"unix:///var/run/telegraf-health.sock\"\n # service_address = \"http://:8080\"\n\n ## The maximum duration for reading the entire request.\n # read_timeout = \"5s\"\n ## The maximum duration for writing the entire response.\n # write_timeout = \"5s\"\n\n ## Username and password to accept for HTTP basic authentication.\n # basic_username = \"user1\"\n # basic_password = \"secret\"\n\n ## Allowed CA certificates for client certificates.\n # tls_allowed_cacerts = [\"/etc/telegraf/clientca.pem\"]\n\n ## TLS server certificate and private key.\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n\n ## One or more check sub-tables should be defined, it is also recommended to\n ## use metric filtering to limit the metrics that flow into this output.\n ##\n ## When using the default buffer sizes, this example will fail when the\n ## metric buffer is half full.\n ##\n ## namepass = [\"internal_write\"]\n ## tagpass = { output = [\"influxdb\"] }\n ##\n ## [[outputs.health.compares]]\n ## field = \"buffer_size\"\n ## lt = 5000.0\n ##\n ## [[outputs.health.contains]]\n ## field = \"buffer_size\"\n\n" - }, - { - "type": "output", - "name": "kinesis", - "description": "Configuration for the AWS Kinesis output.", - "config": "# Configuration for the AWS Kinesis output.\n[[outputs.kinesis]]\n # alias=\"kinesis\"\n ## Amazon REGION of kinesis endpoint.\n region = \"ap-southeast-2\"\n\n ## Amazon Credentials\n ## Credentials are loaded in the following order\n ## 1) Assumed credentials via STS if role_arn is specified\n ## 2) explicit credentials from 'access_key' and 'secret_key'\n ## 3) shared profile from 'profile'\n ## 4) environment variables\n ## 5) shared credentials file\n ## 6) EC2 Instance Profile\n #access_key = \"\"\n #secret_key = \"\"\n #token = \"\"\n #role_arn = \"\"\n #profile = \"\"\n #shared_credential_file = \"\"\n\n ## Endpoint to make request against, the correct endpoint is automatically\n ## determined and this option should only be set if you wish to override the\n ## default.\n ## ex: endpoint_url = \"http://localhost:8000\"\n # endpoint_url = \"\"\n\n ## Kinesis StreamName must exist prior to starting telegraf.\n streamname = \"StreamName\"\n ## DEPRECATED: PartitionKey as used for sharding data.\n partitionkey = \"PartitionKey\"\n ## DEPRECATED: If set the paritionKey will be a random UUID on every put.\n ## This allows for scaling across multiple shards in a stream.\n ## This will cause issues with ordering.\n use_random_partitionkey = false\n ## The partition key can be calculated using one of several methods:\n ##\n ## Use a static value for all writes:\n # [outputs.kinesis.partition]\n # method = \"static\"\n # key = \"howdy\"\n #\n ## Use a random partition key on each write:\n # [outputs.kinesis.partition]\n # method = \"random\"\n #\n ## Use the measurement name as the partition key:\n # [outputs.kinesis.partition]\n # method = \"measurement\"\n #\n ## Use the value of a tag for all writes, if the tag is not set the empty\n ## default option will be used. When no default, defaults to \"telegraf\"\n # [outputs.kinesis.partition]\n # method = \"tag\"\n # key = \"host\"\n # default = \"mykey\"\n\n\n ## Data format to output.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md\n data_format = \"influx\"\n\n ## debug will show upstream aws messages.\n debug = false\n\n" - }, - { - "type": "output", - "name": "riemann_legacy", - "description": "Configuration for the Riemann server to send metrics to", - "config": "# Configuration for the Riemann server to send metrics to\n[[outputs.riemann_legacy]]\n # alias=\"riemann_legacy\"\n ## URL of server\n url = \"localhost:5555\"\n ## transport protocol to use either tcp or udp\n transport = \"tcp\"\n ## separator to use between input name and field name in Riemann service name\n separator = \" \"\n\n" - }, - { - "type": "output", - "name": "stackdriver", - "description": "Configuration for Google Cloud Stackdriver to send metrics to", - "config": "# Configuration for Google Cloud Stackdriver to send metrics to\n[[outputs.stackdriver]]\n # alias=\"stackdriver\"\n ## GCP Project\n project = \"erudite-bloom-151019\"\n\n ## The namespace for the metric descriptor\n namespace = \"telegraf\"\n\n ## Custom resource type\n # resource_type = \"generic_node\"\n\n ## Additonal resource labels\n # [outputs.stackdriver.resource_labels]\n # node_id = \"$HOSTNAME\"\n # namespace = \"myapp\"\n # location = \"eu-north0\"\n\n" - }, - { - "type": "output", - "name": "amon", - "description": "Configuration for Amon Server to send metrics to.", - "config": "# Configuration for Amon Server to send metrics to.\n[[outputs.amon]]\n # alias=\"amon\"\n ## Amon Server Key\n server_key = \"my-server-key\" # required.\n\n ## Amon Instance URL\n amon_instance = \"https://youramoninstance\" # required\n\n ## Connection timeout.\n # timeout = \"5s\"\n\n" - }, - { - "type": "output", - "name": "application_insights", - "description": "Send metrics to Azure Application Insights", - "config": "# Send metrics to Azure Application Insights\n[[outputs.application_insights]]\n # alias=\"application_insights\"\n ## Instrumentation key of the Application Insights resource.\n instrumentation_key = \"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx\"\n\n ## Timeout for closing (default: 5s).\n # timeout = \"5s\"\n\n ## Enable additional diagnostic logging.\n # enable_diagnostic_logging = false\n\n ## Context Tag Sources add Application Insights context tags to a tag value.\n ##\n ## For list of allowed context tag keys see:\n ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go\n # [outputs.application_insights.context_tag_sources]\n # \"ai.cloud.role\" = \"kubernetes_container_name\"\n # \"ai.cloud.roleInstance\" = \"kubernetes_pod_name\"\n\n" - }, - { - "type": "output", - "name": "file", - "description": "Send telegraf metrics to file(s)", - "config": "# Send telegraf metrics to file(s)\n[[outputs.file]]\n # alias=\"file\"\n ## Files to write to, \"stdout\" is a specially handled file.\n files = [\"stdout\", \"/tmp/metrics.out\"]\n\n ## Use batch serialization format instead of line based delimiting. The\n ## batch format allows for the production of non line based output formats and\n ## may more effiently encode metric groups.\n # use_batch_format = false\n\n ## The file will be rotated after the time interval specified. When set\n ## to 0 no time based rotation is performed.\n # rotation_interval = \"0d\"\n\n ## The logfile will be rotated when it becomes larger than the specified\n ## size. When set to 0 no size based rotation is performed.\n # rotation_max_size = \"0MB\"\n\n ## Maximum number of rotated archives to keep, any older logs are deleted.\n ## If set to -1, no archives are removed.\n # rotation_max_archives = 5\n\n ## Data format to output.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md\n data_format = \"influx\"\n\n" - }, - { - "type": "output", - "name": "opentsdb", - "description": "Configuration for OpenTSDB server to send metrics to", - "config": "# Configuration for OpenTSDB server to send metrics to\n[[outputs.opentsdb]]\n # alias=\"opentsdb\"\n ## prefix for metrics keys\n prefix = \"my.specific.prefix.\"\n\n ## DNS name of the OpenTSDB server\n ## Using \"opentsdb.example.com\" or \"tcp://opentsdb.example.com\" will use the\n ## telnet API. \"http://opentsdb.example.com\" will use the Http API.\n host = \"opentsdb.example.com\"\n\n ## Port of the OpenTSDB server\n port = 4242\n\n ## Number of data points to send to OpenTSDB in Http requests.\n ## Not used with telnet API.\n http_batch_size = 50\n\n ## URI Path for Http requests to OpenTSDB.\n ## Used in cases where OpenTSDB is located behind a reverse proxy.\n http_path = \"/api/put\"\n\n ## Debug true - Prints OpenTSDB communication\n debug = false\n\n ## Separator separates measurement name from field\n separator = \"_\"\n\n" - }, - { - "type": "output", - "name": "amqp", - "description": "Publishes metrics to an AMQP broker", - "config": "# Publishes metrics to an AMQP broker\n[[outputs.amqp]]\n # alias=\"amqp\"\n ## Broker to publish to.\n ## deprecated in 1.7; use the brokers option\n # url = \"amqp://localhost:5672/influxdb\"\n\n ## Brokers to publish to. If multiple brokers are specified a random broker\n ## will be selected anytime a connection is established. This can be\n ## helpful for load balancing when not using a dedicated load balancer.\n brokers = [\"amqp://localhost:5672/influxdb\"]\n\n ## Maximum messages to send over a connection. Once this is reached, the\n ## connection is closed and a new connection is made. This can be helpful for\n ## load balancing when not using a dedicated load balancer.\n # max_messages = 0\n\n ## Exchange to declare and publish to.\n exchange = \"telegraf\"\n\n ## Exchange type; common types are \"direct\", \"fanout\", \"topic\", \"header\", \"x-consistent-hash\".\n # exchange_type = \"topic\"\n\n ## If true, exchange will be passively declared.\n # exchange_passive = false\n\n ## Exchange durability can be either \"transient\" or \"durable\".\n # exchange_durability = \"durable\"\n\n ## Additional exchange arguments.\n # exchange_arguments = { }\n # exchange_arguments = {\"hash_propery\" = \"timestamp\"}\n\n ## Authentication credentials for the PLAIN auth_method.\n # username = \"\"\n # password = \"\"\n\n ## Auth method. PLAIN and EXTERNAL are supported\n ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as\n ## described here: https://www.rabbitmq.com/plugins.html\n # auth_method = \"PLAIN\"\n\n ## Metric tag to use as a routing key.\n ## ie, if this tag exists, its value will be used as the routing key\n # routing_tag = \"host\"\n\n ## Static routing key. Used when no routing_tag is set or as a fallback\n ## when the tag specified in routing tag is not found.\n # routing_key = \"\"\n # routing_key = \"telegraf\"\n\n ## Delivery Mode controls if a published message is persistent.\n ## One of \"transient\" or \"persistent\".\n # delivery_mode = \"transient\"\n\n ## InfluxDB database added as a message header.\n ## deprecated in 1.7; use the headers option\n # database = \"telegraf\"\n\n ## InfluxDB retention policy added as a message header\n ## deprecated in 1.7; use the headers option\n # retention_policy = \"default\"\n\n ## Static headers added to each published message.\n # headers = { }\n # headers = {\"database\" = \"telegraf\", \"retention_policy\" = \"default\"}\n\n ## Connection timeout. If not provided, will default to 5s. 0s means no\n ## timeout (not recommended).\n # timeout = \"5s\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## If true use batch serialization format instead of line based delimiting.\n ## Only applies to data formats which are not line based such as JSON.\n ## Recommended to set to true.\n # use_batch_format = false\n\n ## Content encoding for message payloads, can be set to \"gzip\" to or\n ## \"identity\" to apply no encoding.\n ##\n ## Please note that when use_batch_format = false each amqp message contains only\n ## a single metric, it is recommended to use compression with batch format\n ## for best results.\n # content_encoding = \"identity\"\n\n ## Data format to output.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md\n # data_format = \"influx\"\n\n" - }, - { - "type": "output", - "name": "azure_monitor", - "description": "Send aggregate metrics to Azure Monitor", - "config": "# Send aggregate metrics to Azure Monitor\n[[outputs.azure_monitor]]\n # alias=\"azure_monitor\"\n ## Timeout for HTTP writes.\n # timeout = \"20s\"\n\n ## Set the namespace prefix, defaults to \"Telegraf/\u003cinput-name\u003e\".\n # namespace_prefix = \"Telegraf/\"\n\n ## Azure Monitor doesn't have a string value type, so convert string\n ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows\n ## a maximum of 10 dimensions so Telegraf will only send the first 10\n ## alphanumeric dimensions.\n # strings_as_dimensions = false\n\n ## Both region and resource_id must be set or be available via the\n ## Instance Metadata service on Azure Virtual Machines.\n #\n ## Azure Region to publish metrics against.\n ## ex: region = \"southcentralus\"\n # region = \"\"\n #\n ## The Azure Resource ID against which metric will be logged, e.g.\n ## ex: resource_id = \"/subscriptions/\u003csubscription_id\u003e/resourceGroups/\u003cresource_group\u003e/providers/Microsoft.Compute/virtualMachines/\u003cvm_name\u003e\"\n # resource_id = \"\"\n\n ## Optionally, if in Azure US Government, China or other sovereign\n ## cloud environment, set appropriate REST endpoint for receiving\n ## metrics. (Note: region may be unused in this context)\n # endpoint_url = \"https://monitoring.core.usgovcloudapi.net\"\n\n" - }, - { - "type": "output", - "name": "syslog", - "description": "Configuration for Syslog server to send metrics to", - "config": "# Configuration for Syslog server to send metrics to\n[[outputs.syslog]]\n # alias=\"syslog\"\n ## URL to connect to\n ## ex: address = \"tcp://127.0.0.1:8094\"\n ## ex: address = \"tcp4://127.0.0.1:8094\"\n ## ex: address = \"tcp6://127.0.0.1:8094\"\n ## ex: address = \"tcp6://[2001:db8::1]:8094\"\n ## ex: address = \"udp://127.0.0.1:8094\"\n ## ex: address = \"udp4://127.0.0.1:8094\"\n ## ex: address = \"udp6://127.0.0.1:8094\"\n address = \"tcp://127.0.0.1:8094\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## Period between keep alive probes.\n ## Only applies to TCP sockets.\n ## 0 disables keep alive probes.\n ## Defaults to the OS configuration.\n # keep_alive_period = \"5m\"\n\n ## The framing technique with which it is expected that messages are\n ## transported (default = \"octet-counting\"). Whether the messages come\n ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1),\n ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must\n ## be one of \"octet-counting\", \"non-transparent\".\n # framing = \"octet-counting\"\n\n ## The trailer to be expected in case of non-trasparent framing (default = \"LF\").\n ## Must be one of \"LF\", or \"NUL\".\n # trailer = \"LF\"\n\n ## SD-PARAMs settings\n ## Syslog messages can contain key/value pairs within zero or more\n ## structured data sections. For each unrecognised metric tag/field a\n ## SD-PARAMS is created.\n ##\n ## Example:\n ## [[outputs.syslog]]\n ## sdparam_separator = \"_\"\n ## default_sdid = \"default@32473\"\n ## sdids = [\"foo@123\", \"bar@456\"]\n ##\n ## input =\u003e xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1\n ## output (structured data only) =\u003e [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y]\n\n ## SD-PARAMs separator between the sdid and tag/field key (default = \"_\")\n # sdparam_separator = \"_\"\n\n ## Default sdid used for tags/fields that don't contain a prefix defined in\n ## the explict sdids setting below If no default is specified, no SD-PARAMs\n ## will be used for unrecognised field.\n # default_sdid = \"default@32473\"\n\n ## List of explicit prefixes to extract from tag/field keys and use as the\n ## SDID, if they match (see above example for more details):\n # sdids = [\"foo@123\", \"bar@456\"]\n\n ## Default severity value. Severity and Facility are used to calculate the\n ## message PRI value (RFC5424#section-6.2.1). Used when no metric field\n ## with key \"severity_code\" is defined. If unset, 5 (notice) is the default\n # default_severity_code = 5\n\n ## Default facility value. Facility and Severity are used to calculate the\n ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with\n ## key \"facility_code\" is defined. If unset, 1 (user-level) is the default\n # default_facility_code = 1\n\n ## Default APP-NAME value (RFC5424#section-6.2.5)\n ## Used when no metric tag with key \"appname\" is defined.\n ## If unset, \"Telegraf\" is the default\n # default_appname = \"Telegraf\"\n\n" - }, - { - "type": "output", - "name": "nsq", - "description": "Send telegraf measurements to NSQD", - "config": "# Send telegraf measurements to NSQD\n[[outputs.nsq]]\n # alias=\"nsq\"\n ## Location of nsqd instance listening on TCP\n server = \"localhost:4150\"\n ## NSQ topic for producer messages\n topic = \"telegraf\"\n\n ## Data format to output.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md\n data_format = \"influx\"\n\n" - }, - { - "type": "output", - "name": "socket_writer", - "description": "Generic socket writer capable of handling multiple socket types.", - "config": "# Generic socket writer capable of handling multiple socket types.\n[[outputs.socket_writer]]\n # alias=\"socket_writer\"\n ## URL to connect to\n # address = \"tcp://127.0.0.1:8094\"\n # address = \"tcp://example.com:http\"\n # address = \"tcp4://127.0.0.1:8094\"\n # address = \"tcp6://127.0.0.1:8094\"\n # address = \"tcp6://[2001:db8::1]:8094\"\n # address = \"udp://127.0.0.1:8094\"\n # address = \"udp4://127.0.0.1:8094\"\n # address = \"udp6://127.0.0.1:8094\"\n # address = \"unix:///tmp/telegraf.sock\"\n # address = \"unixgram:///tmp/telegraf.sock\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## Period between keep alive probes.\n ## Only applies to TCP sockets.\n ## 0 disables keep alive probes.\n ## Defaults to the OS configuration.\n # keep_alive_period = \"5m\"\n\n ## Data format to generate.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n # data_format = \"influx\"\n\n" - }, - { - "type": "output", - "name": "mqtt", - "description": "Configuration for MQTT server to send metrics to", - "config": "# Configuration for MQTT server to send metrics to\n[[outputs.mqtt]]\n # alias=\"mqtt\"\n servers = [\"localhost:1883\"] # required.\n\n ## MQTT outputs send metrics to this topic format\n ## \"\u003ctopic_prefix\u003e/\u003chostname\u003e/\u003cpluginname\u003e/\"\n ## ex: prefix/web01.example.com/mem\n topic_prefix = \"telegraf\"\n\n ## QoS policy for messages\n ## 0 = at most once\n ## 1 = at least once\n ## 2 = exactly once\n # qos = 2\n\n ## username and password to connect MQTT server.\n # username = \"telegraf\"\n # password = \"metricsmetricsmetricsmetrics\"\n\n ## client ID, if not set a random ID is generated\n # client_id = \"\"\n\n ## Timeout for write operations. default: 5s\n # timeout = \"5s\"\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## When true, metrics will be sent in one MQTT message per flush. Otherwise,\n ## metrics are written one metric per MQTT message.\n # batch = false\n\n ## When true, metric will have RETAIN flag set, making broker cache entries until someone\n ## actually reads it\n # retain = false\n\n ## Data format to output.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md\n data_format = \"influx\"\n\n" - }, - { - "type": "output", - "name": "cloud_pubsub", - "description": "Publish Telegraf metrics to a Google Cloud PubSub topic", - "config": "# Publish Telegraf metrics to a Google Cloud PubSub topic\n[[outputs.cloud_pubsub]]\n # alias=\"cloud_pubsub\"\n ## Required. Name of Google Cloud Platform (GCP) Project that owns\n ## the given PubSub topic.\n project = \"my-project\"\n\n ## Required. Name of PubSub topic to publish metrics to.\n topic = \"my-topic\"\n\n ## Required. Data format to consume.\n ## Each data format has its own unique set of configuration options.\n ## Read more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n\n ## Optional. Filepath for GCP credentials JSON file to authorize calls to\n ## PubSub APIs. If not set explicitly, Telegraf will attempt to use\n ## Application Default Credentials, which is preferred.\n # credentials_file = \"path/to/my/creds.json\"\n\n ## Optional. If true, will send all metrics per write in one PubSub message.\n # send_batched = true\n\n ## The following publish_* parameters specifically configures batching\n ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read\n ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings\n\n ## Optional. Send a request to PubSub (i.e. actually publish a batch)\n ## when it has this many PubSub messages. If send_batched is true,\n ## this is ignored and treated as if it were 1.\n # publish_count_threshold = 1000\n\n ## Optional. Send a request to PubSub (i.e. actually publish a batch)\n ## when it has this many PubSub messages. If send_batched is true,\n ## this is ignored and treated as if it were 1\n # publish_byte_threshold = 1000000\n\n ## Optional. Specifically configures requests made to the PubSub API.\n # publish_num_go_routines = 2\n\n ## Optional. Specifies a timeout for requests to the PubSub API.\n # publish_timeout = \"30s\"\n\n ## Optional. If true, published PubSub message data will be base64-encoded.\n # base64_data = false\n\n ## Optional. PubSub attributes to add to metrics.\n # [[inputs.pubsub.attributes]]\n # my_attr = \"tag_value\"\n\n" - }, - { - "type": "output", - "name": "influxdb_v2", - "description": "Configuration for sending metrics to InfluxDB", - "config": "# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:9999\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n" - }, - { - "type": "output", - "name": "cratedb", - "description": "Configuration for CrateDB to send metrics to.", - "config": "# Configuration for CrateDB to send metrics to.\n[[outputs.cratedb]]\n # alias=\"cratedb\"\n # A github.com/jackc/pgx connection string.\n # See https://godoc.org/github.com/jackc/pgx#ParseDSN\n url = \"postgres://user:password@localhost/schema?sslmode=disable\"\n # Timeout for all CrateDB queries.\n timeout = \"5s\"\n # Name of the table to store metrics in.\n table = \"metrics\"\n # If true, and the metrics table does not exist, create it automatically.\n table_create = true\n\n" - }, - { - "type": "output", - "name": "kafka", - "description": "Configuration for the Kafka server to send metrics to", - "config": "# Configuration for the Kafka server to send metrics to\n[[outputs.kafka]]\n # alias=\"kafka\"\n ## URLs of kafka brokers\n brokers = [\"localhost:9092\"]\n ## Kafka topic for producer messages\n topic = \"telegraf\"\n\n ## Optional Client id\n # client_id = \"Telegraf\"\n\n ## Set the minimal supported Kafka version. Setting this enables the use of new\n ## Kafka features and APIs. Of particular interest, lz4 compression\n ## requires at least version 0.10.0.0.\n ## ex: version = \"1.1.0\"\n # version = \"\"\n\n ## Optional topic suffix configuration.\n ## If the section is omitted, no suffix is used.\n ## Following topic suffix methods are supported:\n ## measurement - suffix equals to separator + measurement's name\n ## tags - suffix equals to separator + specified tags' values\n ## interleaved with separator\n\n ## Suffix equals to \"_\" + measurement name\n # [outputs.kafka.topic_suffix]\n # method = \"measurement\"\n # separator = \"_\"\n\n ## Suffix equals to \"__\" + measurement's \"foo\" tag value.\n ## If there's no such a tag, suffix equals to an empty string\n # [outputs.kafka.topic_suffix]\n # method = \"tags\"\n # keys = [\"foo\"]\n # separator = \"__\"\n\n ## Suffix equals to \"_\" + measurement's \"foo\" and \"bar\"\n ## tag values, separated by \"_\". If there is no such tags,\n ## their values treated as empty strings.\n # [outputs.kafka.topic_suffix]\n # method = \"tags\"\n # keys = [\"foo\", \"bar\"]\n # separator = \"_\"\n\n ## Telegraf tag to use as a routing key\n ## ie, if this tag exists, its value will be used as the routing key\n routing_tag = \"host\"\n\n ## Static routing key. Used when no routing_tag is set or as a fallback\n ## when the tag specified in routing tag is not found. If set to \"random\",\n ## a random value will be generated for each message.\n ## ex: routing_key = \"random\"\n ## routing_key = \"telegraf\"\n # routing_key = \"\"\n\n ## CompressionCodec represents the various compression codecs recognized by\n ## Kafka in messages.\n ## 0 : No compression\n ## 1 : Gzip compression\n ## 2 : Snappy compression\n ## 3 : LZ4 compression\n # compression_codec = 0\n\n ## RequiredAcks is used in Produce Requests to tell the broker how many\n ## replica acknowledgements it must see before responding\n ## 0 : the producer never waits for an acknowledgement from the broker.\n ## This option provides the lowest latency but the weakest durability\n ## guarantees (some data will be lost when a server fails).\n ## 1 : the producer gets an acknowledgement after the leader replica has\n ## received the data. This option provides better durability as the\n ## client waits until the server acknowledges the request as successful\n ## (only messages that were written to the now-dead leader but not yet\n ## replicated will be lost).\n ## -1: the producer gets an acknowledgement after all in-sync replicas have\n ## received the data. This option provides the best durability, we\n ## guarantee that no messages will be lost as long as at least one in\n ## sync replica remains.\n # required_acks = -1\n\n ## The maximum number of times to retry sending a metric before failing\n ## until the next flush.\n # max_retry = 3\n\n ## The maximum permitted size of a message. Should be set equal to or\n ## smaller than the broker's 'message.max.bytes'.\n # max_message_bytes = 1000000\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## Optional SASL Config\n # sasl_username = \"kafka\"\n # sasl_password = \"secret\"\n\n ## Data format to output.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md\n # data_format = \"influx\"\n\n" - }, - { - "type": "output", - "name": "librato", - "description": "Configuration for Librato API to send metrics to.", - "config": "# Configuration for Librato API to send metrics to.\n[[outputs.librato]]\n # alias=\"librato\"\n ## Librator API Docs\n ## http://dev.librato.com/v1/metrics-authentication\n ## Librato API user\n api_user = \"telegraf@influxdb.com\" # required.\n ## Librato API token\n api_token = \"my-secret-token\" # required.\n ## Debug\n # debug = false\n ## Connection timeout.\n # timeout = \"5s\"\n ## Output source Template (same as graphite buckets)\n ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite\n ## This template is used in librato's source (not metric's name)\n template = \"host\"\n\n\n" - }, - { - "type": "output", - "name": "elasticsearch", - "description": "Configuration for Elasticsearch to send metrics to.", - "config": "# Configuration for Elasticsearch to send metrics to.\n[[outputs.elasticsearch]]\n # alias=\"elasticsearch\"\n ## The full HTTP endpoint URL for your Elasticsearch instance\n ## Multiple urls can be specified as part of the same cluster,\n ## this means that only ONE of the urls will be written to each interval.\n urls = [ \"http://node1.es.example.com:9200\" ] # required.\n ## Elasticsearch client timeout, defaults to \"5s\" if not set.\n timeout = \"5s\"\n ## Set to true to ask Elasticsearch a list of all cluster nodes,\n ## thus it is not necessary to list all nodes in the urls config option.\n enable_sniffer = false\n ## Set the interval to check if the Elasticsearch nodes are available\n ## Setting to \"0s\" will disable the health check (not recommended in production)\n health_check_interval = \"10s\"\n ## HTTP basic authentication details\n # username = \"telegraf\"\n # password = \"mypassword\"\n\n ## Index Config\n ## The target index for metrics (Elasticsearch will create if it not exists).\n ## You can use the date specifiers below to create indexes per time frame.\n ## The metric timestamp will be used to decide the destination index name\n # %Y - year (2016)\n # %y - last two digits of year (00..99)\n # %m - month (01..12)\n # %d - day of month (e.g., 01)\n # %H - hour (00..23)\n # %V - week of the year (ISO week) (01..53)\n ## Additionally, you can specify a tag name using the notation {{tag_name}}\n ## which will be used as part of the index name. If the tag does not exist,\n ## the default tag value will be used.\n # index_name = \"telegraf-{{host}}-%Y.%m.%d\"\n # default_tag_value = \"none\"\n index_name = \"telegraf-%Y.%m.%d\" # required.\n\n ## Optional TLS Config\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain \u0026 host verification\n # insecure_skip_verify = false\n\n ## Template Config\n ## Set to true if you want telegraf to manage its index template.\n ## If enabled it will create a recommended index template for telegraf indexes\n manage_template = true\n ## The template name used for telegraf indexes\n template_name = \"telegraf\"\n ## Set to true if you want telegraf to overwrite an existing template\n overwrite_template = false\n\n" - }, - { - "type": "output", - "name": "instrumental", - "description": "Configuration for sending metrics to an Instrumental project", - "config": "# Configuration for sending metrics to an Instrumental project\n[[outputs.instrumental]]\n # alias=\"instrumental\"\n ## Project API Token (required)\n api_token = \"API Token\" # required\n ## Prefix the metrics with a given name\n prefix = \"\"\n ## Stats output template (Graphite formatting)\n ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite\n template = \"host.tags.measurement.field\"\n ## Timeout in seconds to connect\n timeout = \"2s\"\n ## Display Communcation to Instrumental\n debug = false\n\n" - } - ] -} -` -var availableProcessors = `{ - "version": "1.13.0", - "os": "linux", - "plugins": [ - { - "type": "processor", - "name": "converter", - "description": "Convert values to another metric value type", - "config": "# Convert values to another metric value type\n[[processors.converter]]\n # alias=\"converter\"\n ## Tags to convert\n ##\n ## The table key determines the target type, and the array of key-values\n ## select the keys to convert. The array may contain globs.\n ## \u003ctarget-type\u003e = [\u003ctag-key\u003e...]\n [processors.converter.tags]\n string = []\n integer = []\n unsigned = []\n boolean = []\n float = []\n\n ## Fields to convert\n ##\n ## The table key determines the target type, and the array of key-values\n ## select the keys to convert. The array may contain globs.\n ## \u003ctarget-type\u003e = [\u003cfield-key\u003e...]\n [processors.converter.fields]\n tag = []\n string = []\n integer = []\n unsigned = []\n boolean = []\n float = []\n\n" - }, - { - "type": "processor", - "name": "override", - "description": "Apply metric modifications using override semantics.", - "config": "# Apply metric modifications using override semantics.\n[[processors.override]]\n # alias=\"override\"\n ## All modifications on inputs and aggregators can be overridden:\n # name_override = \"new_name\"\n # name_prefix = \"new_name_prefix\"\n # name_suffix = \"new_name_suffix\"\n\n ## Tags to be added (all values must be strings)\n # [processors.override.tags]\n # additional_tag = \"tag_value\"\n\n" - }, - { - "type": "processor", - "name": "strings", - "description": "Perform string processing on tags, fields, and measurements", - "config": "# Perform string processing on tags, fields, and measurements\n[[processors.strings]]\n # alias=\"strings\"\n ## Convert a tag value to uppercase\n # [[processors.strings.uppercase]]\n # tag = \"method\"\n\n ## Convert a field value to lowercase and store in a new field\n # [[processors.strings.lowercase]]\n # field = \"uri_stem\"\n # dest = \"uri_stem_normalised\"\n\n ## Trim leading and trailing whitespace using the default cutset\n # [[processors.strings.trim]]\n # field = \"message\"\n\n ## Trim leading characters in cutset\n # [[processors.strings.trim_left]]\n # field = \"message\"\n # cutset = \"\\t\"\n\n ## Trim trailing characters in cutset\n # [[processors.strings.trim_right]]\n # field = \"message\"\n # cutset = \"\\r\\n\"\n\n ## Trim the given prefix from the field\n # [[processors.strings.trim_prefix]]\n # field = \"my_value\"\n # prefix = \"my_\"\n\n ## Trim the given suffix from the field\n # [[processors.strings.trim_suffix]]\n # field = \"read_count\"\n # suffix = \"_count\"\n\n ## Replace all non-overlapping instances of old with new\n # [[processors.strings.replace]]\n # measurement = \"*\"\n # old = \":\"\n # new = \"_\"\n\n ## Trims strings based on width\n # [[processors.strings.left]]\n # field = \"message\"\n # width = 10\n\n ## Decode a base64 encoded utf-8 string\n # [[processors.strings.base64decode]]\n # field = \"message\"\n\n" - }, - { - "type": "processor", - "name": "tag_limit", - "description": "Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit.", - "config": "# Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit.\n[[processors.tag_limit]]\n # alias=\"tag_limit\"\n ## Maximum number of tags to preserve\n limit = 10\n\n ## List of tags to preferentially preserve\n keep = [\"foo\", \"bar\", \"baz\"]\n\n" - }, - { - "type": "processor", - "name": "date", - "description": "Dates measurements, tags, and fields that pass through this filter.", - "config": "# Dates measurements, tags, and fields that pass through this filter.\n[[processors.date]]\n # alias=\"date\"\n ## New tag to create\n tag_key = \"month\"\n\n ## Date format string, must be a representation of the Go \"reference time\"\n ## which is \"Mon Jan 2 15:04:05 -0700 MST 2006\".\n date_format = \"Jan\"\n\n" - }, - { - "type": "processor", - "name": "parser", - "description": "Parse a value in a specified field/tag(s) and add the result in a new metric", - "config": "# Parse a value in a specified field/tag(s) and add the result in a new metric\n[[processors.parser]]\n # alias=\"parser\"\n ## The name of the fields whose value will be parsed.\n parse_fields = []\n\n ## If true, incoming metrics are not emitted.\n drop_original = false\n\n ## If set to override, emitted metrics will be merged by overriding the\n ## original metric using the newly parsed metrics.\n merge = \"override\"\n\n ## The dataformat to be read from files\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n\n" - }, - { - "type": "processor", - "name": "pivot", - "description": "Rotate a single valued metric into a multi field metric", - "config": "# Rotate a single valued metric into a multi field metric\n[[processors.pivot]]\n # alias=\"pivot\"\n ## Tag to use for naming the new field.\n tag_key = \"name\"\n ## Field to use as the value of the new field.\n value_key = \"value\"\n\n" - }, - { - "type": "processor", - "name": "printer", - "description": "Print all metrics that pass through this filter.", - "config": "# Print all metrics that pass through this filter.\n[[processors.printer]]\n # alias=\"printer\"\n\n" - }, - { - "type": "processor", - "name": "clone", - "description": "Clone metrics and apply modifications.", - "config": "# Clone metrics and apply modifications.\n[[processors.clone]]\n # alias=\"clone\"\n ## All modifications on inputs and aggregators can be overridden:\n # name_override = \"new_name\"\n # name_prefix = \"new_name_prefix\"\n # name_suffix = \"new_name_suffix\"\n\n ## Tags to be added (all values must be strings)\n # [processors.clone.tags]\n # additional_tag = \"tag_value\"\n\n" - }, - { - "type": "processor", - "name": "enum", - "description": "Map enum values according to given table.", - "config": "# Map enum values according to given table.\n[[processors.enum]]\n # alias=\"enum\"\n [[processors.enum.mapping]]\n ## Name of the field to map\n field = \"status\"\n\n ## Name of the tag to map\n # tag = \"status\"\n\n ## Destination tag or field to be used for the mapped value. By default the\n ## source tag or field is used, overwriting the original value.\n dest = \"status_code\"\n\n ## Default value to be used for all values not contained in the mapping\n ## table. When unset, the unmodified value for the field will be used if no\n ## match is found.\n # default = 0\n\n ## Table of mappings\n [processors.enum.mapping.value_mappings]\n green = 1\n amber = 2\n red = 3\n\n" - }, - { - "type": "processor", - "name": "rename", - "description": "Rename measurements, tags, and fields that pass through this filter.", - "config": "# Rename measurements, tags, and fields that pass through this filter.\n[[processors.rename]]\n # alias=\"rename\"\n\n" - }, - { - "type": "processor", - "name": "topk", - "description": "Print all metrics that pass through this filter.", - "config": "# Print all metrics that pass through this filter.\n[[processors.topk]]\n # alias=\"topk\"\n ## How many seconds between aggregations\n # period = 10\n\n ## How many top metrics to return\n # k = 10\n\n ## Over which tags should the aggregation be done. Globs can be specified, in\n ## which case any tag matching the glob will aggregated over. If set to an\n ## empty list is no aggregation over tags is done\n # group_by = ['*']\n\n ## Over which fields are the top k are calculated\n # fields = [\"value\"]\n\n ## What aggregation to use. Options: sum, mean, min, max\n # aggregation = \"mean\"\n\n ## Instead of the top k largest metrics, return the bottom k lowest metrics\n # bottomk = false\n\n ## The plugin assigns each metric a GroupBy tag generated from its name and\n ## tags. If this setting is different than \"\" the plugin will add a\n ## tag (which name will be the value of this setting) to each metric with\n ## the value of the calculated GroupBy tag. Useful for debugging\n # add_groupby_tag = \"\"\n\n ## These settings provide a way to know the position of each metric in\n ## the top k. The 'add_rank_field' setting allows to specify for which\n ## fields the position is required. If the list is non empty, then a field\n ## will be added to each and every metric for each string present in this\n ## setting. This field will contain the ranking of the group that\n ## the metric belonged to when aggregated over that field.\n ## The name of the field will be set to the name of the aggregation field,\n ## suffixed with the string '_topk_rank'\n # add_rank_fields = []\n\n ## These settings provide a way to know what values the plugin is generating\n ## when aggregating metrics. The 'add_agregate_field' setting allows to\n ## specify for which fields the final aggregation value is required. If the\n ## list is non empty, then a field will be added to each every metric for\n ## each field present in this setting. This field will contain\n ## the computed aggregation for the group that the metric belonged to when\n ## aggregated over that field.\n ## The name of the field will be set to the name of the aggregation field,\n ## suffixed with the string '_topk_aggregate'\n # add_aggregate_fields = []\n\n" - }, - { - "type": "processor", - "name": "regex", - "description": "Transforms tag and field values with regex pattern", - "config": "# Transforms tag and field values with regex pattern\n[[processors.regex]]\n # alias=\"regex\"\n ## Tag and field conversions defined in a separate sub-tables\n # [[processors.regex.tags]]\n # ## Tag to change\n # key = \"resp_code\"\n # ## Regular expression to match on a tag value\n # pattern = \"^(\\\\d)\\\\d\\\\d$\"\n # ## Matches of the pattern will be replaced with this string. Use ${1}\n # ## notation to use the text of the first submatch.\n # replacement = \"${1}xx\"\n\n # [[processors.regex.fields]]\n # ## Field to change\n # key = \"request\"\n # ## All the power of the Go regular expressions available here\n # ## For example, named subgroups\n # pattern = \"^/api(?P\u003cmethod\u003e/[\\\\w/]+)\\\\S*\"\n # replacement = \"${method}\"\n # ## If result_key is present, a new field will be created\n # ## instead of changing existing field\n # result_key = \"method\"\n\n ## Multiple conversions may be applied for one field sequentially\n ## Let's extract one more value\n # [[processors.regex.fields]]\n # key = \"request\"\n # pattern = \".*category=(\\\\w+).*\"\n # replacement = \"${1}\"\n # result_key = \"search_category\"\n\n" - }, - { - "type": "processor", - "name": "unpivot", - "description": "Rotate multi field metric into several single field metrics", - "config": "# Rotate multi field metric into several single field metrics\n[[processors.unpivot]]\n # alias=\"unpivot\"\n ## Tag to use for the name.\n tag_key = \"name\"\n ## Field to use for the name of the value.\n value_key = \"value\"\n\n" - } - ] -} -` -var availableAggregators = `{ - "version": "1.13.0", - "os": "linux", - "plugins": [ - { - "type": "aggregator", - "name": "merge", - "description": "Merge metrics into multifield metrics by series key", - "config": "# Merge metrics into multifield metrics by series key\n[[aggregators.merge]]\n # alias=\"merge\"\n" - }, - { - "type": "aggregator", - "name": "minmax", - "description": "Keep the aggregate min/max of each metric passing through.", - "config": "# Keep the aggregate min/max of each metric passing through.\n[[aggregators.minmax]]\n # alias=\"minmax\"\n ## General Aggregator Arguments:\n ## The period on which to flush \u0026 clear the aggregator.\n period = \"30s\"\n ## If true, the original metric will be dropped by the\n ## aggregator and will not get sent to the output plugins.\n drop_original = false\n\n" - }, - { - "type": "aggregator", - "name": "valuecounter", - "description": "Count the occurrence of values in fields.", - "config": "# Count the occurrence of values in fields.\n[[aggregators.valuecounter]]\n # alias=\"valuecounter\"\n ## General Aggregator Arguments:\n ## The period on which to flush \u0026 clear the aggregator.\n period = \"30s\"\n ## If true, the original metric will be dropped by the\n ## aggregator and will not get sent to the output plugins.\n drop_original = false\n ## The fields for which the values will be counted\n fields = []\n\n" - }, - { - "type": "aggregator", - "name": "basicstats", - "description": "Keep the aggregate basicstats of each metric passing through.", - "config": "# Keep the aggregate basicstats of each metric passing through.\n[[aggregators.basicstats]]\n # alias=\"basicstats\"\n ## The period on which to flush \u0026 clear the aggregator.\n period = \"30s\"\n\n ## If true, the original metric will be dropped by the\n ## aggregator and will not get sent to the output plugins.\n drop_original = false\n\n ## Configures which basic stats to push as fields\n # stats = [\"count\", \"min\", \"max\", \"mean\", \"stdev\", \"s2\", \"sum\"]\n\n" - }, - { - "type": "aggregator", - "name": "final", - "description": "Report the final metric of a series", - "config": "# Report the final metric of a series\n[[aggregators.final]]\n # alias=\"final\"\n ## The period on which to flush \u0026 clear the aggregator.\n period = \"30s\"\n ## If true, the original metric will be dropped by the\n ## aggregator and will not get sent to the output plugins.\n drop_original = false\n\n ## The time that a series is not updated until considering it final.\n series_timeout = \"5m\"\n\n" - }, - { - "type": "aggregator", - "name": "histogram", - "description": "Create aggregate histograms.", - "config": "# Create aggregate histograms.\n[[aggregators.histogram]]\n # alias=\"histogram\"\n ## The period in which to flush the aggregator.\n period = \"30s\"\n\n ## If true, the original metric will be dropped by the\n ## aggregator and will not get sent to the output plugins.\n drop_original = false\n\n ## If true, the histogram will be reset on flush instead\n ## of accumulating the results.\n reset = false\n\n ## Example config that aggregates all fields of the metric.\n # [[aggregators.histogram.config]]\n # ## The set of buckets.\n # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0]\n # ## The name of metric.\n # measurement_name = \"cpu\"\n\n ## Example config that aggregates only specific fields of the metric.\n # [[aggregators.histogram.config]]\n # ## The set of buckets.\n # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]\n # ## The name of metric.\n # measurement_name = \"diskio\"\n # ## The concrete fields of metric\n # fields = [\"io_time\", \"read_time\", \"write_time\"]\n\n" - } - ] -} -` diff --git a/telegraf/plugins/plugins_test.go b/telegraf/plugins/plugins_test.go deleted file mode 100644 index 10910d3ce8e..00000000000 --- a/telegraf/plugins/plugins_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package plugins - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestAvailablePlugins(t *testing.T) { - cases, err := AvailablePlugins() - require.NoError(t, err) - require.Equal(t, 223, len(cases.Plugins)) -} - -func TestAvailableBundles(t *testing.T) { - cases, err := AvailableBundles() - require.NoError(t, err) - require.Equal(t, 1, len(cases.Plugins)) -} - -func TestGetPlugin(t *testing.T) { - tests := []struct { - typ string - name string - expected string - ok bool - }{ - { - typ: "input", - name: "cpu", - expected: "# Read metrics about cpu usage\n[[inputs.cpu]]\n # alias=\"cpu\"\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n\n", - ok: true, - }, - { - typ: "output", - name: "file", - expected: "# Send telegraf metrics to file(s)\n[[outputs.file]]\n # alias=\"file\"\n ## Files to write to, \"stdout\" is a specially handled file.\n files = [\"stdout\", \"/tmp/metrics.out\"]\n\n ## Use batch serialization format instead of line based delimiting. The\n ## batch format allows for the production of non line based output formats and\n ## may more effiently encode metric groups.\n # use_batch_format = false\n\n ## The file will be rotated after the time interval specified. When set\n ## to 0 no time based rotation is performed.\n # rotation_interval = \"0d\"\n\n ## The logfile will be rotated when it becomes larger than the specified\n ## size. When set to 0 no size based rotation is performed.\n # rotation_max_size = \"0MB\"\n\n ## Maximum number of rotated archives to keep, any older logs are deleted.\n ## If set to -1, no archives are removed.\n # rotation_max_archives = 5\n\n ## Data format to output.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md\n data_format = \"influx\"\n\n", - ok: true, - }, - { - typ: "processor", - name: "converter", - expected: "# Convert values to another metric value type\n[[processors.converter]]\n # alias=\"converter\"\n ## Tags to convert\n ##\n ## The table key determines the target type, and the array of key-values\n ## select the keys to convert. The array may contain globs.\n ## = [...]\n [processors.converter.tags]\n string = []\n integer = []\n unsigned = []\n boolean = []\n float = []\n\n ## Fields to convert\n ##\n ## The table key determines the target type, and the array of key-values\n ## select the keys to convert. The array may contain globs.\n ## = [...]\n [processors.converter.fields]\n tag = []\n string = []\n integer = []\n unsigned = []\n boolean = []\n float = []\n\n", - ok: true, - }, - { - typ: "aggregator", - name: "merge", - expected: "# Merge metrics into multifield metrics by series key\n[[aggregators.merge]]\n # alias=\"merge\"\n", - ok: true, - }, - { - typ: "bundle", - name: "System Bundle", - expected: "# Read metrics about cpu usage\n[[inputs.cpu]]\n # alias=\"cpu\"\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics.\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states.\n report_active = false\n# Read metrics about swap memory usage\n[[inputs.swap]]\n # alias=\"swap\"\n# Read metrics about disk usage by mount point\n[[inputs.disk]]\n # alias=\"disk\"\n ## By default stats will be gathered for all mount points.\n ## Set mount_points will restrict the stats to only the specified mount points.\n # mount_points = [\"/\"]\n\n ## Ignore mount points by filesystem type.\n ignore_fs = [\"tmpfs\", \"devtmpfs\", \"devfs\", \"iso9660\", \"overlay\", \"aufs\", \"squashfs\"]\n# Read metrics about memory usage\n[[inputs.mem]]\n # alias=\"mem\"\n", - ok: true, - }, - { - typ: "input", - name: "not-a-real-plugin", - expected: "", - ok: false, - }, - } - for _, test := range tests { - p, ok := GetPlugin(test.typ, test.name) - require.Equal(t, test.ok, ok) - if ok { - require.Equal(t, test.expected, p.Config) - } - } -} - -func TestListAvailablePlugins(t *testing.T) { - tests := []struct { - typ string - expected int - err string - }{ - { - typ: "input", - expected: 170, - }, - { - typ: "output", - expected: 33, - }, - { - typ: "processor", - expected: 14, - }, - { - typ: "aggregator", - expected: 6, - }, - { - typ: "bundle", - expected: 1, - }, - { - typ: "other", - expected: 0, - err: "unknown plugin type 'other'", - }, - } - for _, test := range tests { - p, err := ListAvailablePlugins(test.typ) - if err != nil { - require.Equal(t, test.err, err.Error()) - } else { - require.Equal(t, test.expected, len(p.Plugins)) - } - } -} diff --git a/telegraf/plugins/type.go b/telegraf/plugins/type.go deleted file mode 100644 index 58874ac1195..00000000000 --- a/telegraf/plugins/type.go +++ /dev/null @@ -1,24 +0,0 @@ -package plugins - -// Type is a telegraf plugin type. -type Type string - -// available types. -const ( - Input Type = "input" // Input is an input plugin. - Output Type = "output" // Output is an output plugin. - Processor Type = "processor" // Processor is a processor plugin. - Aggregator Type = "aggregator" // Aggregator is an aggregator plugin. -) - -// Config interface for all plugins. -type Config interface { - // TOML encodes to toml string - TOML() string - // UnmarshalTOML decodes the parsed data to the object - UnmarshalTOML(data interface{}) error - // Type is the plugin type - Type() Type - // PluginName is the string value of telegraf plugin package name. - PluginName() string -} diff --git a/telegraf/service/telegraf.go b/telegraf/service/telegraf.go deleted file mode 100644 index 75f5085e9ef..00000000000 --- a/telegraf/service/telegraf.go +++ /dev/null @@ -1,400 +0,0 @@ -package service - -import ( - "context" - "encoding/json" - "fmt" - - influxdb "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/influxdata/influxdb/v2/telegraf" -) - -var ( - // ErrTelegrafNotFound is used when the telegraf configuration is not found. - ErrTelegrafNotFound = &errors.Error{ - Msg: "telegraf configuration not found", - Code: errors.ENotFound, - } - - // ErrInvalidTelegrafID is used when the service was provided - // an invalid ID format. - ErrInvalidTelegrafID = &errors.Error{ - Code: errors.EInvalid, - Msg: "provided telegraf configuration ID has invalid format", - } - - // ErrInvalidTelegrafOrgID is the error message for a missing or invalid organization ID. - ErrInvalidTelegrafOrgID = &errors.Error{ - Code: errors.EEmptyValue, - Msg: "provided telegraf configuration organization ID is missing or invalid", - } -) - -// UnavailableTelegrafServiceError is used if we aren't able to interact with the -// store, it means the store is not available at the moment (e.g. network). -func UnavailableTelegrafServiceError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("Unable to connect to telegraf service. Please try again; Err: %v", err), - Op: "kv/telegraf", - } -} - -// InternalTelegrafServiceError is used when the error comes from an -// internal system. -func InternalTelegrafServiceError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("Unknown internal telegraf data error; Err: %v", err), - Op: "kv/telegraf", - } -} - -// CorruptTelegrafError is used when the config cannot be unmarshalled from the -// bytes stored in the kv. -func CorruptTelegrafError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("Unknown internal telegraf data error; Err: %v", err), - Op: "kv/telegraf", - } -} - -// ErrUnprocessableTelegraf is used when a telegraf is not able to be converted to JSON. -func ErrUnprocessableTelegraf(err error) *errors.Error { - return &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: fmt.Sprintf("unable to convert telegraf configuration into JSON; Err %v", err), - } -} - -var ( - telegrafBucket = []byte("telegrafv1") - telegrafPluginsBucket = []byte("telegrafPluginsv1") -) - -var _ influxdb.TelegrafConfigStore = (*Service)(nil) - -// Service is a telegraf config service. -type Service struct { - kv kv.Store - - byOrganisationIndex *kv.Index - - IDGenerator platform.IDGenerator -} - -// New constructs and configures a new telegraf config service. -func New(store kv.Store) *Service { - return &Service{ - kv: store, - byOrganisationIndex: kv.NewIndex( - telegraf.ByOrganizationIndexMapping, - kv.WithIndexReadPathEnabled, - ), - IDGenerator: snowflake.NewIDGenerator(), - } -} - -func (s *Service) telegrafBucket(tx kv.Tx) (kv.Bucket, error) { - b, err := tx.Bucket(telegrafBucket) - if err != nil { - return nil, UnavailableTelegrafServiceError(err) - } - return b, nil -} - -func (s *Service) telegrafPluginsBucket(tx kv.Tx) (kv.Bucket, error) { - b, err := tx.Bucket(telegrafPluginsBucket) - if err != nil { - return nil, UnavailableTelegrafServiceError(err) - } - return b, nil -} - -// FindTelegrafConfigByID returns a single telegraf config by ID. -func (s *Service) FindTelegrafConfigByID(ctx context.Context, id platform.ID) (*influxdb.TelegrafConfig, error) { - var ( - tc *influxdb.TelegrafConfig - err error - ) - - err = s.kv.View(ctx, func(tx kv.Tx) error { - tc, err = s.findTelegrafConfigByID(ctx, tx, id) - return err - }) - - return tc, err -} - -func (s *Service) findTelegrafConfigByID(ctx context.Context, tx kv.Tx, id platform.ID) (*influxdb.TelegrafConfig, error) { - encID, err := id.Encode() - if err != nil { - return nil, ErrInvalidTelegrafID - } - - bucket, err := s.telegrafBucket(tx) - if err != nil { - return nil, err - } - - v, err := bucket.Get(encID) - if kv.IsNotFound(err) { - return nil, ErrTelegrafNotFound - } - if err != nil { - return nil, InternalTelegrafServiceError(err) - } - - return unmarshalTelegraf(v) -} - -// FindTelegrafConfigs returns a list of telegraf configs that match filter and the total count of matching telegraf configs. -// FindOptions are ignored. -func (s *Service) FindTelegrafConfigs(ctx context.Context, filter influxdb.TelegrafConfigFilter, opt ...influxdb.FindOptions) (tcs []*influxdb.TelegrafConfig, n int, err error) { - err = s.kv.View(ctx, func(tx kv.Tx) error { - tcs, n, err = s.findTelegrafConfigs(ctx, tx, filter) - return err - }) - return tcs, n, err -} - -func (s *Service) findTelegrafConfigs(ctx context.Context, tx kv.Tx, filter influxdb.TelegrafConfigFilter) ([]*influxdb.TelegrafConfig, int, error) { - var ( - tcs = make([]*influxdb.TelegrafConfig, 0) - ) - - visit := func(k, v []byte) (bool, error) { - var tc influxdb.TelegrafConfig - if err := json.Unmarshal(v, &tc); err != nil { - return false, err - } - - tcs = append(tcs, &tc) - - // stop cursing when limit is reached - return true, nil - } - - if filter.OrgID == nil { - // forward cursor entire bucket - bucket, err := s.telegrafBucket(tx) - if err != nil { - return nil, 0, err - } - - // cursors do not support numeric offset - // but we can at least constrain the response - // size by the offset + limit since we are - // not doing any other filtering - // REMOVE this cursor option if you do any - // other filtering - - cursor, err := bucket.ForwardCursor(nil) - if err != nil { - return nil, 0, err - } - - return tcs, len(tcs), kv.WalkCursor(ctx, cursor, visit) - } - - orgID, err := filter.OrgID.Encode() - if err != nil { - return nil, 0, err - } - - return tcs, len(tcs), s.byOrganisationIndex.Walk(ctx, tx, orgID, visit) -} - -// PutTelegrafConfig put a telegraf config to storage. -func (s *Service) PutTelegrafConfig(ctx context.Context, tc *influxdb.TelegrafConfig) error { - return s.kv.Update(ctx, func(tx kv.Tx) (err error) { - return s.putTelegrafConfig(ctx, tx, tc) - }) -} - -func (s *Service) putTelegrafConfig(ctx context.Context, tx kv.Tx, tc *influxdb.TelegrafConfig) error { - encodedID, err := tc.ID.Encode() - if err != nil { - return ErrInvalidTelegrafID - } - - if !tc.OrgID.Valid() { - return ErrInvalidTelegrafOrgID - } - - orgID, err := tc.OrgID.Encode() - if err != nil { - return err - } - - // insert index entry for orgID -> id - if err := s.byOrganisationIndex.Insert(tx, orgID, encodedID); err != nil { - return err - } - - v, err := marshalTelegraf(tc) - if err != nil { - return err - } - - bucket, err := s.telegrafBucket(tx) - if err != nil { - return err - } - - if err := bucket.Put(encodedID, v); err != nil { - return UnavailableTelegrafServiceError(err) - } - - return s.putTelegrafConfigStats(encodedID, tx, tc) -} - -func (s *Service) putTelegrafConfigStats(encodedID []byte, tx kv.Tx, tc *influxdb.TelegrafConfig) error { - bucket, err := s.telegrafPluginsBucket(tx) - if err != nil { - return err - } - - v, err := marshalTelegrafPlugins(tc.CountPlugins()) - if err != nil { - return err - } - - if err := bucket.Put(encodedID, v); err != nil { - return UnavailableTelegrafServiceError(err) - } - - return nil -} - -// CreateTelegrafConfig creates a new telegraf config and sets b.ID with the new identifier. -func (s *Service) CreateTelegrafConfig(ctx context.Context, tc *influxdb.TelegrafConfig, userID platform.ID) error { - return s.kv.Update(ctx, func(tx kv.Tx) error { - return s.createTelegrafConfig(ctx, tx, tc, userID) - }) -} - -func (s *Service) createTelegrafConfig(ctx context.Context, tx kv.Tx, tc *influxdb.TelegrafConfig, userID platform.ID) error { - tc.ID = s.IDGenerator.ID() - - return s.putTelegrafConfig(ctx, tx, tc) -} - -// UpdateTelegrafConfig updates a single telegraf config. -// Returns the new telegraf config after update. -func (s *Service) UpdateTelegrafConfig(ctx context.Context, id platform.ID, tc *influxdb.TelegrafConfig, userID platform.ID) (*influxdb.TelegrafConfig, error) { - var err error - err = s.kv.Update(ctx, func(tx kv.Tx) error { - tc, err = s.updateTelegrafConfig(ctx, tx, id, tc, userID) - return err - }) - return tc, err -} - -func (s *Service) updateTelegrafConfig(ctx context.Context, tx kv.Tx, id platform.ID, tc *influxdb.TelegrafConfig, userID platform.ID) (*influxdb.TelegrafConfig, error) { - current, err := s.findTelegrafConfigByID(ctx, tx, id) - if err != nil { - return nil, err - } - - // ID and OrganizationID can not be updated - tc.ID = current.ID - tc.OrgID = current.OrgID - err = s.putTelegrafConfig(ctx, tx, tc) - return tc, err -} - -// DeleteTelegrafConfig removes a telegraf config by ID. -func (s *Service) DeleteTelegrafConfig(ctx context.Context, id platform.ID) error { - return s.kv.Update(ctx, func(tx kv.Tx) error { - return s.deleteTelegrafConfig(ctx, tx, id) - }) -} - -func (s *Service) deleteTelegrafConfig(ctx context.Context, tx kv.Tx, id platform.ID) error { - tc, err := s.findTelegrafConfigByID(ctx, tx, id) - if err != nil { - return err - } - - encodedID, err := tc.ID.Encode() - if err != nil { - return ErrInvalidTelegrafID - } - - orgID, err := tc.OrgID.Encode() - if err != nil { - return err - } - - // removing index entry for orgID -> id - if err := s.byOrganisationIndex.Delete(tx, orgID, encodedID); err != nil { - return err - } - - bucket, err := s.telegrafBucket(tx) - if err != nil { - return err - } - - _, err = bucket.Get(encodedID) - if kv.IsNotFound(err) { - return ErrTelegrafNotFound - } - if err != nil { - return InternalTelegrafServiceError(err) - } - - if err := bucket.Delete(encodedID); err != nil { - return UnavailableTelegrafServiceError(err) - } - - return s.deleteTelegrafConfigStats(encodedID, tx) -} - -func (s *Service) deleteTelegrafConfigStats(encodedID []byte, tx kv.Tx) error { - bucket, err := s.telegrafPluginsBucket(tx) - if err != nil { - return err - } - - if err := bucket.Delete(encodedID); err != nil { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("Unable to connect to telegraf config stats service. Please try again; Err: %v", err), - Op: "kv/telegraf", - } - } - - return nil -} - -// unmarshalTelegraf turns the stored byte slice in the kv into a *influxdb.TelegrafConfig. -func unmarshalTelegraf(v []byte) (*influxdb.TelegrafConfig, error) { - t := &influxdb.TelegrafConfig{} - if err := json.Unmarshal(v, t); err != nil { - return nil, CorruptTelegrafError(err) - } - return t, nil -} - -func marshalTelegraf(tc *influxdb.TelegrafConfig) ([]byte, error) { - v, err := json.Marshal(tc) - if err != nil { - return nil, ErrUnprocessableTelegraf(err) - } - return v, nil -} - -func marshalTelegrafPlugins(plugins map[string]float64) ([]byte, error) { - v, err := json.Marshal(plugins) - if err != nil { - return nil, ErrUnprocessableTelegraf(err) - } - return v, nil -} diff --git a/telegraf/service/telegraf_test.go b/telegraf/service/telegraf_test.go deleted file mode 100644 index f64e7cb4142..00000000000 --- a/telegraf/service/telegraf_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package service_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kv" - telegrafservice "github.com/influxdata/influxdb/v2/telegraf/service" - telegraftesting "github.com/influxdata/influxdb/v2/telegraf/service/testing" - itesting "github.com/influxdata/influxdb/v2/testing" -) - -func TestBoltTelegrafService(t *testing.T) { - telegraftesting.TelegrafConfigStore(initBoltTelegrafService, t) -} - -func initBoltTelegrafService(f telegraftesting.TelegrafConfigFields, t *testing.T) (influxdb.TelegrafConfigStore, func()) { - s, closeBolt := itesting.NewTestBoltStore(t) - svc, closeSvc := initTelegrafService(s, f, t) - return svc, func() { - closeSvc() - closeBolt() - } -} - -func initTelegrafService(s kv.SchemaStore, f telegraftesting.TelegrafConfigFields, t *testing.T) (influxdb.TelegrafConfigStore, func()) { - ctx := context.Background() - - svc := telegrafservice.New(s) - svc.IDGenerator = f.IDGenerator - - for _, tc := range f.TelegrafConfigs { - if err := svc.PutTelegrafConfig(ctx, tc); err != nil { - t.Fatalf("failed to populate telegraf config: %v", err) - } - } - - return svc, func() { - for _, tc := range f.TelegrafConfigs { - if err := svc.DeleteTelegrafConfig(ctx, tc.ID); err != nil { - t.Logf("failed to remove telegraf config: %v", err) - } - } - } -} diff --git a/telegraf/service/testing/testing.go b/telegraf/service/testing/testing.go deleted file mode 100644 index e33bf72a0ce..00000000000 --- a/telegraf/service/testing/testing.go +++ /dev/null @@ -1,859 +0,0 @@ -package testing - -import ( - "context" - "fmt" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - influxdb "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/telegraf/plugins/inputs" - "github.com/influxdata/influxdb/v2/telegraf/plugins/outputs" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var ( - oneID = platform.ID(1) - twoID = platform.ID(2) - threeID = platform.ID(3) - fourID = platform.ID(4) -) - -// TelegrafConfigFields includes prepopulated data for mapping tests. -type TelegrafConfigFields struct { - IDGenerator platform.IDGenerator - TelegrafConfigs []*influxdb.TelegrafConfig -} - -var telegrafCmpOptions = cmp.Options{ - cmpopts.IgnoreUnexported( - inputs.CPUStats{}, - inputs.MemStats{}, - inputs.Kubernetes{}, - inputs.File{}, - outputs.File{}, - outputs.InfluxDBV2{}, - ), - cmp.Transformer("Sort", func(in []*influxdb.TelegrafConfig) []*influxdb.TelegrafConfig { - out := append([]*influxdb.TelegrafConfig(nil), in...) - sort.Slice(out, func(i, j int) bool { - return out[i].ID > out[j].ID - }) - return out - }), -} - -type telegrafTestFactoryFunc func(TelegrafConfigFields, *testing.T) (influxdb.TelegrafConfigStore, func()) - -// TelegrafConfigStore tests all the service functions. -func TelegrafConfigStore( - init telegrafTestFactoryFunc, t *testing.T, -) { - tests := []struct { - name string - fn func(init telegrafTestFactoryFunc, - t *testing.T) - }{ - { - name: "CreateTelegrafConfig", - fn: CreateTelegrafConfig, - }, - { - name: "FindTelegrafConfigByID", - fn: FindTelegrafConfigByID, - }, - { - name: "FindTelegrafConfigs", - fn: FindTelegrafConfigs, - }, - { - name: "UpdateTelegrafConfig", - fn: UpdateTelegrafConfig, - }, - { - name: "DeleteTelegrafConfig", - fn: DeleteTelegrafConfig, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt := tt - t.Parallel() - tt.fn(init, t) - }) - } -} - -// CreateTelegrafConfig testing. -func CreateTelegrafConfig( - init telegrafTestFactoryFunc, - t *testing.T, -) { - type args struct { - telegrafConfig *influxdb.TelegrafConfig - userID platform.ID - } - type wants struct { - err error - telegrafs []*influxdb.TelegrafConfig - } - - tests := []struct { - name string - fields TelegrafConfigFields - args args - wants wants - }{ - { - name: "create telegraf config without organization ID should error", - fields: TelegrafConfigFields{ - IDGenerator: mock.NewStaticIDGenerator(oneID), - TelegrafConfigs: []*influxdb.TelegrafConfig{}, - }, - args: args{ - telegrafConfig: &influxdb.TelegrafConfig{}, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EEmptyValue, - Msg: influxdb.ErrTelegrafConfigInvalidOrgID, - }, - }, - }, - { - name: "create telegraf config with empty set", - fields: TelegrafConfigFields{ - IDGenerator: mock.NewStaticIDGenerator(oneID), - TelegrafConfigs: []*influxdb.TelegrafConfig{}, - }, - args: args{ - userID: threeID, - telegrafConfig: &influxdb.TelegrafConfig{ - OrgID: twoID, - Name: "name1", - Config: "[[inputs.cpu]]\n[[outputs.influxdb_v2]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - }, - wants: wants{ - telegrafs: []*influxdb.TelegrafConfig{ - { - ID: oneID, - OrgID: twoID, - Name: "name1", - Config: "[[inputs.cpu]]\n[[outputs.influxdb_v2]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - }, - }, - }, - { - name: "basic create telegraf config", - fields: TelegrafConfigFields{ - IDGenerator: mock.NewStaticIDGenerator(twoID), - TelegrafConfigs: []*influxdb.TelegrafConfig{ - { - ID: oneID, - OrgID: twoID, - Name: "tc1", - Config: "[[inputs.mem_stats]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - }, - }, - args: args{ - userID: threeID, - telegrafConfig: &influxdb.TelegrafConfig{ - OrgID: twoID, - Name: "name2", - Config: "[[inputs.cpu]]\n[[outputs.influxdb_v2]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, // for inmem test as it doesn't unmarshal.. - }, - }, - wants: wants{ - telegrafs: []*influxdb.TelegrafConfig{ - { - ID: oneID, - OrgID: twoID, - Name: "tc1", - Config: "[[inputs.mem_stats]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - { - ID: twoID, - OrgID: twoID, - Name: "name2", - Config: "[[inputs.cpu]]\n[[outputs.influxdb_v2]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.CreateTelegrafConfig(ctx, tt.args.telegrafConfig, tt.args.userID) - if (err != nil) != (tt.wants.err != nil) { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - if tt.wants.err == nil && !tt.args.telegrafConfig.ID.Valid() { - t.Fatalf("telegraf config ID not set from CreateTelegrafConfig") - } - - if err != nil && tt.wants.err != nil { - if errors.ErrorCode(err) != errors.ErrorCode(tt.wants.err) { - t.Fatalf("expected error messages to match '%v' got '%v'", errors.ErrorCode(tt.wants.err), errors.ErrorCode(err)) - } - } - - filter := influxdb.TelegrafConfigFilter{} - tcs, _, err := s.FindTelegrafConfigs(ctx, filter) - if err != nil { - t.Fatalf("failed to retrieve telegraf configs: %v", err) - } - if diff := cmp.Diff(tcs, tt.wants.telegrafs, telegrafCmpOptions...); diff != "" { - t.Errorf("telegraf configs are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindTelegrafConfigByID testing. -func FindTelegrafConfigByID( - init telegrafTestFactoryFunc, - t *testing.T, -) { - type args struct { - id platform.ID - } - type wants struct { - err error - telegrafConfig *influxdb.TelegrafConfig - } - - tests := []struct { - name string - fields TelegrafConfigFields - args args - wants wants - }{ - { - name: "bad id", - fields: TelegrafConfigFields{ - TelegrafConfigs: []*influxdb.TelegrafConfig{ - { - ID: oneID, - OrgID: twoID, - Name: "tc1", - Config: "[[inputs.cpu]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - { - ID: twoID, - OrgID: twoID, - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.mem]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - }, - }, - args: args{ - id: platform.ID(0), - }, - wants: wants{ - err: fmt.Errorf("provided telegraf configuration ID has invalid format"), - }, - }, - { - name: "not found", - fields: TelegrafConfigFields{ - TelegrafConfigs: []*influxdb.TelegrafConfig{ - { - ID: oneID, - OrgID: twoID, - Name: "tc1", - Config: "[[inputs.cpu]]\n", - }, - { - ID: twoID, - OrgID: twoID, - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.mem]]\n", - }, - }, - }, - args: args{ - id: threeID, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "telegraf configuration not found", - }, - }, - }, - { - name: "basic find telegraf config by id", - fields: TelegrafConfigFields{ - TelegrafConfigs: []*influxdb.TelegrafConfig{ - { - ID: oneID, - OrgID: threeID, - Name: "tc1", - Config: "[[inputs.cpu]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - { - ID: twoID, - OrgID: threeID, - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.mem]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - }, - }, - args: args{ - id: twoID, - }, - wants: wants{ - telegrafConfig: &influxdb.TelegrafConfig{ - ID: twoID, - OrgID: threeID, - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.mem]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - tc, err := s.FindTelegrafConfigByID(ctx, tt.args.id) - if (err != nil) != (tt.wants.err != nil) { - t.Fatalf("expected errors to be equal '%v' got '%v'", tt.wants.err, err) - } - - if err != nil && tt.wants.err != nil { - if want, got := tt.wants.err.Error(), err.Error(); want != got { - t.Fatalf("expected error '%s' got '%s'", want, got) - } - } - if diff := cmp.Diff(tc, tt.wants.telegrafConfig, telegrafCmpOptions...); diff != "" { - t.Errorf("telegraf configs are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindTelegrafConfigs testing -func FindTelegrafConfigs( - init telegrafTestFactoryFunc, - t *testing.T, -) { - type args struct { - filter influxdb.TelegrafConfigFilter - opts []influxdb.FindOptions - } - - type wants struct { - telegrafConfigs []*influxdb.TelegrafConfig - err error - } - tests := []struct { - name string - fields TelegrafConfigFields - args args - wants wants - }{ - { - name: "find nothing (empty set)", - fields: TelegrafConfigFields{ - TelegrafConfigs: []*influxdb.TelegrafConfig{}, - }, - args: args{ - filter: influxdb.TelegrafConfigFilter{}, - }, - wants: wants{ - telegrafConfigs: []*influxdb.TelegrafConfig{}, - }, - }, - { - name: "find all telegraf configs (across orgs)", - fields: TelegrafConfigFields{ - IDGenerator: mock.NewIncrementingIDGenerator(oneID), - TelegrafConfigs: []*influxdb.TelegrafConfig{ - { - ID: oneID, - OrgID: twoID, - Name: "tc1", - Config: "[[inputs.cpu]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - { - ID: twoID, - OrgID: threeID, - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.mem]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - }, - }, - args: args{ - filter: influxdb.TelegrafConfigFilter{}, - }, - wants: wants{ - telegrafConfigs: []*influxdb.TelegrafConfig{ - { - ID: oneID, - OrgID: twoID, - Name: "tc1", - Config: "[[inputs.cpu]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - { - ID: twoID, - OrgID: threeID, - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.mem]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - }, - }, - }, - { - name: "filter by organization only", - fields: TelegrafConfigFields{ - IDGenerator: mock.NewIncrementingIDGenerator(oneID), - TelegrafConfigs: []*influxdb.TelegrafConfig{ - { - ID: oneID, - OrgID: fourID, - Name: "tc1", - Config: "[[inputs.cpu]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - { - ID: twoID, - OrgID: fourID, - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.mem]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - { - ID: threeID, - OrgID: oneID, - Name: "tc3", - Config: "[[inputs.cpu]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - { - ID: fourID, - OrgID: oneID, - Name: "tc4", - Config: "[[inputs.cpu]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - }, - }, - args: args{ - filter: influxdb.TelegrafConfigFilter{ - OrgID: &oneID, - }, - }, - wants: wants{ - telegrafConfigs: []*influxdb.TelegrafConfig{ - { - ID: threeID, - OrgID: oneID, - Name: "tc3", - Config: "[[inputs.cpu]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - { - ID: fourID, - OrgID: oneID, - Name: "tc4", - Config: "[[inputs.cpu]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - }, - }, - }, - { - name: "empty for provided org", - fields: TelegrafConfigFields{ - IDGenerator: mock.NewIncrementingIDGenerator(oneID), - TelegrafConfigs: []*influxdb.TelegrafConfig{ - { - ID: oneID, - OrgID: threeID, - Name: "tc1", - Config: "[[inputs.cpu]]\n", - }, - { - ID: twoID, - OrgID: threeID, - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.mem]]\n", - }, - }, - }, - args: args{ - filter: influxdb.TelegrafConfigFilter{ - OrgID: &oneID, - }, - }, - wants: wants{ - telegrafConfigs: []*influxdb.TelegrafConfig{}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - tcs, _, err := s.FindTelegrafConfigs(ctx, tt.args.filter, tt.args.opts...) - if err != nil && tt.wants.err == nil { - t.Fatalf("expected errors to be nil got '%v'", err) - } - - require.Equal(t, tt.wants.err, err) - assert.Equal(t, tt.wants.telegrafConfigs, tcs) - }) - } -} - -// UpdateTelegrafConfig testing. -func UpdateTelegrafConfig( - init telegrafTestFactoryFunc, - t *testing.T, -) { - type args struct { - userID platform.ID - id platform.ID - telegrafConfig *influxdb.TelegrafConfig - } - - type wants struct { - telegrafConfig *influxdb.TelegrafConfig - err error - } - tests := []struct { - name string - fields TelegrafConfigFields - args args - wants wants - }{ - { - name: "can't find the id", - fields: TelegrafConfigFields{ - TelegrafConfigs: []*influxdb.TelegrafConfig{ - { - ID: oneID, - OrgID: fourID, - Name: "tc1", - Config: "[[inputs.cpu]]\n", - }, - { - ID: twoID, - OrgID: fourID, - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.mem]]\n", - }, - }, - }, - args: args{ - userID: threeID, - id: fourID, - telegrafConfig: &influxdb.TelegrafConfig{ - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.mem]]\n", - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: fmt.Sprintf("telegraf config with ID %v not found", fourID), - }, - }, - }, - { - fields: TelegrafConfigFields{ - TelegrafConfigs: []*influxdb.TelegrafConfig{ - { - ID: oneID, - OrgID: fourID, - Name: "tc1", - Config: "[[inputs.cpu]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - { - ID: twoID, - OrgID: fourID, - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.mem]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - }, - }, - args: args{ - userID: fourID, - id: twoID, - telegrafConfig: &influxdb.TelegrafConfig{ - OrgID: oneID, // notice this get ignored - ie., resulting TelegrafConfig will have OrgID equal to fourID - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.mem]]\n", - }, - }, - wants: wants{ - telegrafConfig: &influxdb.TelegrafConfig{ - ID: twoID, - OrgID: fourID, - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.mem]]\n", - }, - }, - }, - { - name: "config update", - fields: TelegrafConfigFields{ - TelegrafConfigs: []*influxdb.TelegrafConfig{ - { - ID: oneID, - OrgID: oneID, - Name: "tc1", - Config: "[[inputs.cpu]]\n", - }, - { - ID: twoID, - OrgID: oneID, - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.kubernetes]]\n[[inputs.kubernetes]]\n", - }, - }, - }, - args: args{ - userID: fourID, - id: twoID, - telegrafConfig: &influxdb.TelegrafConfig{ - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.kubernetes]]\n[[inputs.kubernetes]]\n", - }, - }, - wants: wants{ - telegrafConfig: &influxdb.TelegrafConfig{ - ID: twoID, - OrgID: oneID, - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.kubernetes]]\n[[inputs.kubernetes]]\n", - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - tc, err := s.UpdateTelegrafConfig(ctx, tt.args.id, - tt.args.telegrafConfig, tt.args.userID) - if err != nil && tt.wants.err == nil { - t.Fatalf("expected errors to be nil got '%v'", err) - } - if err != nil && tt.wants.err != nil { - if errors.ErrorCode(err) != errors.ErrorCode(tt.wants.err) { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - } - if diff := cmp.Diff(tc, tt.wants.telegrafConfig, telegrafCmpOptions...); tt.wants.err == nil && diff != "" { - fmt.Println(tc.Metadata, tt.wants.telegrafConfig.Metadata) - t.Errorf("telegraf configs are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// DeleteTelegrafConfig testing. -func DeleteTelegrafConfig( - init telegrafTestFactoryFunc, - t *testing.T, -) { - type args struct { - id platform.ID - } - - type wants struct { - telegrafConfigs []*influxdb.TelegrafConfig - err error - } - tests := []struct { - name string - fields TelegrafConfigFields - args args - wants wants - }{ - { - name: "bad id", - fields: TelegrafConfigFields{ - IDGenerator: mock.NewIncrementingIDGenerator(oneID), - TelegrafConfigs: []*influxdb.TelegrafConfig{ - { - ID: oneID, - OrgID: fourID, - Name: "tc1", - Config: "[[inputs.cpu]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - { - ID: twoID, - OrgID: fourID, - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.mem]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - }, - }, - args: args{ - id: platform.ID(0), - }, - wants: wants{ - err: fmt.Errorf("provided telegraf configuration ID has invalid format"), - telegrafConfigs: []*influxdb.TelegrafConfig{ - { - ID: oneID, - OrgID: fourID, - Name: "tc1", - Config: "[[inputs.cpu]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - { - ID: twoID, - OrgID: fourID, - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.mem]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - }, - }, - }, - { - name: "none existing config", - fields: TelegrafConfigFields{ - IDGenerator: mock.NewIncrementingIDGenerator(oneID), - TelegrafConfigs: []*influxdb.TelegrafConfig{ - { - ID: oneID, - OrgID: threeID, - Name: "tc1", - Config: "[[inputs.cpu]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - { - ID: twoID, - OrgID: threeID, - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.mem]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - }, - }, - args: args{ - id: fourID, - }, - wants: wants{ - err: fmt.Errorf("telegraf configuration not found"), - telegrafConfigs: []*influxdb.TelegrafConfig{ - { - ID: oneID, - OrgID: threeID, - Name: "tc1", - Config: "[[inputs.cpu]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - { - ID: twoID, - OrgID: threeID, - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.mem]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - }, - }, - }, - { - name: "regular delete", - fields: TelegrafConfigFields{ - IDGenerator: mock.NewIncrementingIDGenerator(oneID), - TelegrafConfigs: []*influxdb.TelegrafConfig{ - { - ID: oneID, - OrgID: twoID, - Name: "tc1", - Config: "[[inputs.cpu]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - { - ID: twoID, - OrgID: twoID, - Name: "tc2", - Config: "[[inputs.file]]\n[[inputs.mem]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - }, - }, - args: args{ - id: twoID, - }, - wants: wants{ - telegrafConfigs: []*influxdb.TelegrafConfig{ - { - ID: oneID, - OrgID: twoID, - Name: "tc1", - Config: "[[inputs.cpu]]\n", - Metadata: map[string]interface{}{"buckets": []interface{}{}}, - }, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.DeleteTelegrafConfig(ctx, tt.args.id) - if err != nil && tt.wants.err == nil { - t.Fatalf("expected errors to be nil got '%v'", err) - } - - if err != nil && tt.wants.err != nil { - if want, got := tt.wants.err.Error(), err.Error(); want != got { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - } - - tcs, _, err := s.FindTelegrafConfigs(ctx, influxdb.TelegrafConfigFilter{}) - require.NoError(t, err) - assert.Equal(t, tt.wants.telegrafConfigs, tcs) - }) - } -} diff --git a/telegraf_test.go b/telegraf_test.go deleted file mode 100644 index 47729ef9783..00000000000 --- a/telegraf_test.go +++ /dev/null @@ -1,890 +0,0 @@ -package influxdb - -import ( - "encoding/json" - "fmt" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/telegraf/plugins" - "github.com/influxdata/influxdb/v2/telegraf/plugins/inputs" - "github.com/influxdata/influxdb/v2/telegraf/plugins/outputs" - "github.com/stretchr/testify/require" -) - -var telegrafCmpOptions = cmp.Options{ - cmpopts.IgnoreUnexported( - inputs.CPUStats{}, - inputs.Kernel{}, - inputs.Kubernetes{}, - inputs.File{}, - outputs.File{}, - outputs.InfluxDBV2{}, - ), - cmp.Transformer("Sort", func(in []*TelegrafConfig) []*TelegrafConfig { - out := append([]*TelegrafConfig(nil), in...) - sort.Slice(out, func(i, j int) bool { - return out[i].ID > out[j].ID - }) - return out - }), -} - -// tests backwards compatibillity with the current plugin aware system. -func TestTelegrafConfigJSONDecodeWithoutID(t *testing.T) { - s := `{ - "name": "config 2", - "agent": { - "collectionInterval": 120000 - }, - "plugins": [ - { - "name": "cpu", - "type": "input", - "comment": "cpu collect cpu metrics", - "config": {} - }, - { - "name": "kernel", - "type": "input" - }, - { - "name": "kubernetes", - "type": "input", - "config":{ - "url": "http://1.1.1.1:12" - } - }, - { - "name": "influxdb_v2", - "type": "output", - "comment": "3", - "config": { - "urls": [ - "http://127.0.0.1:9999" - ], - "token": "token1", - "organization": "org", - "bucket": "bucket" - } - } - ] - }` - want := &TelegrafConfig{ - Name: "config 2", - Config: `# Configuration for telegraf agent -[agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 - - ## Maximum number of unwritten metrics per output. Increasing this value - ## allows for longer periods of output downtime without dropping metrics at the - ## cost of higher maximum memory usage. - metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s. - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". - precision = "" - - ## Log at debug level. - # debug = false - ## Log only error level messages. - # quiet = false - - ## Log target controls the destination for logs and can be one of "file", - ## "stderr" or, on Windows, "eventlog". When set to "file", the output file - ## is determined by the "logfile" setting. - # logtarget = "file" - - ## Name of the file to be logged to when using the "file" logtarget. If set to - ## the empty string then logs are written to stderr. - # logfile = "" - - ## The logfile will be rotated after the time interval specified. When set - ## to 0 no time based rotation is performed. Logs are rotated only when - ## written to, if there is no log activity rotation may be delayed. - # logfile_rotation_interval = "0d" - - ## The logfile will be rotated when it becomes larger than the specified - ## size. When set to 0 no size based rotation is performed. - # logfile_rotation_max_size = "0MB" - - ## Maximum number of rotated archives to keep, any older logs are deleted. - ## If set to -1, no archives are removed. - # logfile_rotation_max_archives = 5 - - ## Pick a timezone to use when logging or type 'local' for local time. - ## Example: America/Chicago - # log_with_timezone = "" - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - ## If set to true, do no set the "host" tag in the telegraf agent. - omit_hostname = false -[[inputs.cpu]] - ## Whether to report per-cpu stats or not - percpu = true - ## Whether to report total system cpu stats or not - totalcpu = true - ## If true, collect raw CPU time metrics - collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states - report_active = false -[[inputs.kernel]] - # no configuration -[[inputs.kubernetes]] - ## URL for the kubelet - url = "http://1.1.1.1:12" - - ## Use bearer token for authorization. ('bearer_token' takes priority) - ## If both of these are empty, we'll use the default serviceaccount: - ## at: /run/secrets/kubernetes.io/serviceaccount/token - # bearer_token = "/path/to/bearer/token" - ## OR - # bearer_token_string = "abc_123" - - ## Pod labels to be added as tags. An empty array for both include and - ## exclude will include all labels. - # label_include = [] - # label_exclude = ["*"] - - ## Set response_timeout (default 5 seconds) - # response_timeout = "5s" - - ## Optional TLS Config - # tls_ca = /path/to/cafile - # tls_cert = /path/to/certfile - # tls_key = /path/to/keyfile - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -[[outputs.influxdb_v2]] - ## The URLs of the InfluxDB cluster nodes. - ## - ## Multiple URLs can be specified for a single cluster, only ONE of the - ## urls will be written to each interval. - ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] - urls = ["http://127.0.0.1:9999"] - - ## Token for authentication. - token = "token1" - - ## Organization is the name of the organization you wish to write to; must exist. - organization = "org" - - ## Destination bucket to write into. - bucket = "bucket" - - ## The value of this tag will be used to determine the bucket. If this - ## tag is not set the 'bucket' option is used as the default. - # bucket_tag = "" - - ## If true, the bucket tag will not be added to the metric. - # exclude_bucket_tag = false - - ## Timeout for HTTP messages. - # timeout = "5s" - - ## Additional HTTP headers - # http_headers = {"X-Special-Header" = "Special-Value"} - - ## HTTP Proxy override, if unset values the standard proxy environment - ## variables are consulted to determine which proxy, if any, should be used. - # http_proxy = "http://corporate.proxy:3128" - - ## HTTP User-Agent - # user_agent = "telegraf" - - ## Content-Encoding for write request body, can be set to "gzip" to - ## compress body or "identity" to apply no encoding. - # content_encoding = "gzip" - - ## Enable or disable uint support for writing uints influxdb 2.0. - # influx_uint_support = false - - ## Optional TLS Config for use on HTTP connections. - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -`, - Metadata: map[string]interface{}{"buckets": []string{"bucket"}}, - } - got := new(TelegrafConfig) - err := json.Unmarshal([]byte(s), got) - if err != nil { - t.Fatal("json decode error", err.Error()) - } - if diff := cmp.Diff(got, want, telegrafCmpOptions...); diff != "" { - t.Errorf("telegraf configs are different -got/+want\ndiff %s", diff) - } -} - -// tests forwards compatibillity with the new plugin unaware system. -func TestTelegrafConfigJSONDecodeTOML(t *testing.T) { - s := `{ - "name": "config 2", - "config": "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## Maximum number of unwritten metrics per output. Increasing this value\n ## allows for longer periods of output downtime without dropping metrics at the\n ## cost of higher maximum memory usage.\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Log at debug level.\n # debug = false\n ## Log only error level messages.\n # quiet = false\n\n ## Log target controls the destination for logs and can be one of \"file\",\n ## \"stderr\" or, on Windows, \"eventlog\". When set to \"file\", the output file\n ## is determined by the \"logfile\" setting.\n # logtarget = \"file\"\n\n ## Name of the file to be logged to when using the \"file\" logtarget. If set to\n ## the empty string then logs are written to stderr.\n # logfile = \"\"\n\n ## The logfile will be rotated after the time interval specified. When set\n ## to 0 no time based rotation is performed. Logs are rotated only when\n ## written to, if there is no log activity rotation may be delayed.\n # logfile_rotation_interval = \"0d\"\n\n ## The logfile will be rotated when it becomes larger than the specified\n ## size. When set to 0 no size based rotation is performed.\n # logfile_rotation_max_size = \"0MB\"\n\n ## Maximum number of rotated archives to keep, any older logs are deleted.\n ## If set to -1, no archives are removed.\n # logfile_rotation_max_archives = 5\n\n ## Pick a timezone to use when logging or type 'local' for local time.\n ## Example: America/Chicago\n # log_with_timezone = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n[[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states\n report_active = false\n[[inputs.kernel]]\n # no configuration\n[[inputs.kubernetes]]\n ## URL for the kubelet\n url = \"http://1.1.1.1:12\"\n\n ## Use bearer token for authorization. ('bearer_token' takes priority)\n ## If both of these are empty, we'll use the default serviceaccount:\n ## at: /run/secrets/kubernetes.io/serviceaccount/token\n # bearer_token = \"/path/to/bearer/token\"\n ## OR\n # bearer_token_string = \"abc_123\"\n\n ## Pod labels to be added as tags. An empty array for both include and\n ## exclude will include all labels.\n # label_include = []\n # label_exclude = [\"*\"]\n\n ## Set response_timeout (default 5 seconds)\n # response_timeout = \"5s\"\n\n ## Optional TLS Config\n # tls_ca = /path/to/cafile\n # tls_cert = /path/to/certfile\n # tls_key = /path/to/keyfile\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n[[outputs.influxdb_v2]]\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"url1\", \"url2\"]\n\n ## Token for authentication.\n token = \"token1\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"org1\"\n\n ## Destination bucket to write into.\n bucket = \"bucket1\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n" - }` - - want := &TelegrafConfig{ - Name: "config 2", - Config: `# Configuration for telegraf agent -[agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 - - ## Maximum number of unwritten metrics per output. Increasing this value - ## allows for longer periods of output downtime without dropping metrics at the - ## cost of higher maximum memory usage. - metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s. - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". - precision = "" - - ## Log at debug level. - # debug = false - ## Log only error level messages. - # quiet = false - - ## Log target controls the destination for logs and can be one of "file", - ## "stderr" or, on Windows, "eventlog". When set to "file", the output file - ## is determined by the "logfile" setting. - # logtarget = "file" - - ## Name of the file to be logged to when using the "file" logtarget. If set to - ## the empty string then logs are written to stderr. - # logfile = "" - - ## The logfile will be rotated after the time interval specified. When set - ## to 0 no time based rotation is performed. Logs are rotated only when - ## written to, if there is no log activity rotation may be delayed. - # logfile_rotation_interval = "0d" - - ## The logfile will be rotated when it becomes larger than the specified - ## size. When set to 0 no size based rotation is performed. - # logfile_rotation_max_size = "0MB" - - ## Maximum number of rotated archives to keep, any older logs are deleted. - ## If set to -1, no archives are removed. - # logfile_rotation_max_archives = 5 - - ## Pick a timezone to use when logging or type 'local' for local time. - ## Example: America/Chicago - # log_with_timezone = "" - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - ## If set to true, do no set the "host" tag in the telegraf agent. - omit_hostname = false -[[inputs.cpu]] - ## Whether to report per-cpu stats or not - percpu = true - ## Whether to report total system cpu stats or not - totalcpu = true - ## If true, collect raw CPU time metrics - collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states - report_active = false -[[inputs.kernel]] - # no configuration -[[inputs.kubernetes]] - ## URL for the kubelet - url = "http://1.1.1.1:12" - - ## Use bearer token for authorization. ('bearer_token' takes priority) - ## If both of these are empty, we'll use the default serviceaccount: - ## at: /run/secrets/kubernetes.io/serviceaccount/token - # bearer_token = "/path/to/bearer/token" - ## OR - # bearer_token_string = "abc_123" - - ## Pod labels to be added as tags. An empty array for both include and - ## exclude will include all labels. - # label_include = [] - # label_exclude = ["*"] - - ## Set response_timeout (default 5 seconds) - # response_timeout = "5s" - - ## Optional TLS Config - # tls_ca = /path/to/cafile - # tls_cert = /path/to/certfile - # tls_key = /path/to/keyfile - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -[[outputs.influxdb_v2]] - ## The URLs of the InfluxDB cluster nodes. - ## - ## Multiple URLs can be specified for a single cluster, only ONE of the - ## urls will be written to each interval. - ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] - urls = ["url1", "url2"] - - ## Token for authentication. - token = "token1" - - ## Organization is the name of the organization you wish to write to; must exist. - organization = "org1" - - ## Destination bucket to write into. - bucket = "bucket1" - - ## The value of this tag will be used to determine the bucket. If this - ## tag is not set the 'bucket' option is used as the default. - # bucket_tag = "" - - ## If true, the bucket tag will not be added to the metric. - # exclude_bucket_tag = false - - ## Timeout for HTTP messages. - # timeout = "5s" - - ## Additional HTTP headers - # http_headers = {"X-Special-Header" = "Special-Value"} - - ## HTTP Proxy override, if unset values the standard proxy environment - ## variables are consulted to determine which proxy, if any, should be used. - # http_proxy = "http://corporate.proxy:3128" - - ## HTTP User-Agent - # user_agent = "telegraf" - - ## Content-Encoding for write request body, can be set to "gzip" to - ## compress body or "identity" to apply no encoding. - # content_encoding = "gzip" - - ## Enable or disable uint support for writing uints influxdb 2.0. - # influx_uint_support = false - - ## Optional TLS Config for use on HTTP connections. - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -`, - Metadata: map[string]interface{}{"buckets": []string{"bucket1"}}, - } - got := new(TelegrafConfig) - err := json.Unmarshal([]byte(s), got) - if err != nil { - t.Fatal("json decode error", err.Error()) - } - if diff := cmp.Diff(got, want, telegrafCmpOptions...); diff != "" { - t.Errorf("telegraf configs are different -got/+want\ndiff %s", diff) - } -} - -func TestTelegrafConfigJSONCompatibleMode(t *testing.T) { - id1, _ := platform.IDFromString("020f755c3c082000") - id2, _ := platform.IDFromString("020f755c3c082222") - id3, _ := platform.IDFromString("020f755c3c082223") - cases := []struct { - name string - src []byte - cfg *TelegrafConfig - expMeta map[string]interface{} - err error - }{ - { - name: "newest", - src: []byte(`{"id":"020f755c3c082000","orgID":"020f755c3c082222","plugins":[]}`), - cfg: &TelegrafConfig{ - ID: *id1, - OrgID: *id2, - Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## Maximum number of unwritten metrics per output. Increasing this value\n ## allows for longer periods of output downtime without dropping metrics at the\n ## cost of higher maximum memory usage.\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Log at debug level.\n # debug = false\n ## Log only error level messages.\n # quiet = false\n\n ## Log target controls the destination for logs and can be one of \"file\",\n ## \"stderr\" or, on Windows, \"eventlog\". When set to \"file\", the output file\n ## is determined by the \"logfile\" setting.\n # logtarget = \"file\"\n\n ## Name of the file to be logged to when using the \"file\" logtarget. If set to\n ## the empty string then logs are written to stderr.\n # logfile = \"\"\n\n ## The logfile will be rotated after the time interval specified. When set\n ## to 0 no time based rotation is performed. Logs are rotated only when\n ## written to, if there is no log activity rotation may be delayed.\n # logfile_rotation_interval = \"0d\"\n\n ## The logfile will be rotated when it becomes larger than the specified\n ## size. When set to 0 no size based rotation is performed.\n # logfile_rotation_max_size = \"0MB\"\n\n ## Maximum number of rotated archives to keep, any older logs are deleted.\n ## If set to -1, no archives are removed.\n # logfile_rotation_max_archives = 5\n\n ## Pick a timezone to use when logging or type 'local' for local time.\n ## Example: America/Chicago\n # log_with_timezone = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:9999\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n\n", - }, - expMeta: map[string]interface{}{"buckets": []string{}}, - }, - { - name: "old", - src: []byte(`{"id":"020f755c3c082000","organizationID":"020f755c3c082222","plugins":[]}`), - cfg: &TelegrafConfig{ - ID: *id1, - OrgID: *id2, - Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## Maximum number of unwritten metrics per output. Increasing this value\n ## allows for longer periods of output downtime without dropping metrics at the\n ## cost of higher maximum memory usage.\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Log at debug level.\n # debug = false\n ## Log only error level messages.\n # quiet = false\n\n ## Log target controls the destination for logs and can be one of \"file\",\n ## \"stderr\" or, on Windows, \"eventlog\". When set to \"file\", the output file\n ## is determined by the \"logfile\" setting.\n # logtarget = \"file\"\n\n ## Name of the file to be logged to when using the \"file\" logtarget. If set to\n ## the empty string then logs are written to stderr.\n # logfile = \"\"\n\n ## The logfile will be rotated after the time interval specified. When set\n ## to 0 no time based rotation is performed. Logs are rotated only when\n ## written to, if there is no log activity rotation may be delayed.\n # logfile_rotation_interval = \"0d\"\n\n ## The logfile will be rotated when it becomes larger than the specified\n ## size. When set to 0 no size based rotation is performed.\n # logfile_rotation_max_size = \"0MB\"\n\n ## Maximum number of rotated archives to keep, any older logs are deleted.\n ## If set to -1, no archives are removed.\n # logfile_rotation_max_archives = 5\n\n ## Pick a timezone to use when logging or type 'local' for local time.\n ## Example: America/Chicago\n # log_with_timezone = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:9999\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n\n", - }, - expMeta: map[string]interface{}{"buckets": []string{}}, - }, - { - name: "conflict", - src: []byte(`{"id":"020f755c3c082000","organizationID":"020f755c3c082222","orgID":"020f755c3c082223","plugins":[]}`), - cfg: &TelegrafConfig{ - ID: *id1, - OrgID: *id3, - Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## Maximum number of unwritten metrics per output. Increasing this value\n ## allows for longer periods of output downtime without dropping metrics at the\n ## cost of higher maximum memory usage.\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Log at debug level.\n # debug = false\n ## Log only error level messages.\n # quiet = false\n\n ## Log target controls the destination for logs and can be one of \"file\",\n ## \"stderr\" or, on Windows, \"eventlog\". When set to \"file\", the output file\n ## is determined by the \"logfile\" setting.\n # logtarget = \"file\"\n\n ## Name of the file to be logged to when using the \"file\" logtarget. If set to\n ## the empty string then logs are written to stderr.\n # logfile = \"\"\n\n ## The logfile will be rotated after the time interval specified. When set\n ## to 0 no time based rotation is performed. Logs are rotated only when\n ## written to, if there is no log activity rotation may be delayed.\n # logfile_rotation_interval = \"0d\"\n\n ## The logfile will be rotated when it becomes larger than the specified\n ## size. When set to 0 no size based rotation is performed.\n # logfile_rotation_max_size = \"0MB\"\n\n ## Maximum number of rotated archives to keep, any older logs are deleted.\n ## If set to -1, no archives are removed.\n # logfile_rotation_max_archives = 5\n\n ## Pick a timezone to use when logging or type 'local' for local time.\n ## Example: America/Chicago\n # log_with_timezone = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n# Configuration for sending metrics to InfluxDB\n[[outputs.influxdb_v2]]\n # alias=\"influxdb_v2\"\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"http://127.0.0.1:9999\"]\n\n ## Token for authentication.\n token = \"\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n\n", - }, - expMeta: map[string]interface{}{"buckets": []string{}}, - }, - } - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - got := new(TelegrafConfig) - err := json.Unmarshal(c.src, got) - if diff := cmp.Diff(err, c.err); diff != "" { - t.Fatalf("%s decode failed, got err: %v, should be %v", c.name, err, c.err) - } - - c.cfg.Metadata = c.expMeta - // todo:this might not set buckets but should - if diff := cmp.Diff(got, c.cfg, telegrafCmpOptions...); c.err == nil && diff != "" { - t.Errorf("failed %s, telegraf configs are different -got/+want\ndiff %s", c.name, diff) - } - }) - } -} - -func TestTelegrafConfigJSON(t *testing.T) { - id1, _ := platform.IDFromString("020f755c3c082000") - id2, _ := platform.IDFromString("020f755c3c082222") - cases := []struct { - name string - expect *TelegrafConfig - cfg string - err error - }{ - { - name: "regular config", - cfg: fmt.Sprintf(`{ - "ID": "%v", - "OrgID": "%v", - "Name": "n1", - "Agent": { - "Interval": 4000 - }, - "Plugins": [ - { - "name": "file", - "type": "input", - "comment": "comment1", - "config": { - "files": ["f1", "f2"] - } - }, - { - "name": "cpu", - "type": "input", - "comment": "comment2", - "config": {} - }, - { - "name": "file", - "type": "output", - "comment": "comment3", - "config": { - "files": [ - { - "type": "stdout" - } - ] - } - }, - { - "name": "influxdb_v2", - "type": "output", - "comment": "comment4", - "config": { - "URLs": ["url1", "url2"], - "Token": "tok1" - } - } - ] - }`, id1, id2), - expect: &TelegrafConfig{ - ID: *id1, - OrgID: *id2, - Name: "n1", - Config: "# Configuration for telegraf agent\n[agent]\n ## Default data collection interval for all inputs\n interval = \"10s\"\n ## Rounds collection interval to 'interval'\n ## ie, if interval=\"10s\" then always collect on :00, :10, :20, etc.\n round_interval = true\n\n ## Telegraf will send metrics to outputs in batches of at most\n ## metric_batch_size metrics.\n ## This controls the size of writes that Telegraf sends to output plugins.\n metric_batch_size = 1000\n\n ## Maximum number of unwritten metrics per output. Increasing this value\n ## allows for longer periods of output downtime without dropping metrics at the\n ## cost of higher maximum memory usage.\n metric_buffer_limit = 10000\n\n ## Collection jitter is used to jitter the collection by a random amount.\n ## Each plugin will sleep for a random time within jitter before collecting.\n ## This can be used to avoid many plugins querying things like sysfs at the\n ## same time, which can have a measurable effect on the system.\n collection_jitter = \"0s\"\n\n ## Default flushing interval for all outputs. Maximum flush_interval will be\n ## flush_interval + flush_jitter\n flush_interval = \"10s\"\n ## Jitter the flush interval by a random amount. This is primarily to avoid\n ## large write spikes for users running a large number of telegraf instances.\n ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s\n flush_jitter = \"0s\"\n\n ## By default or when set to \"0s\", precision will be set to the same\n ## timestamp order as the collection interval, with the maximum being 1s.\n ## ie, when interval = \"10s\", precision will be \"1s\"\n ## when interval = \"250ms\", precision will be \"1ms\"\n ## Precision will NOT be used for service inputs. It is up to each individual\n ## service input to set the timestamp at the appropriate precision.\n ## Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\".\n precision = \"\"\n\n ## Log at debug level.\n # debug = false\n ## Log only error level messages.\n # quiet = false\n\n ## Log target controls the destination for logs and can be one of \"file\",\n ## \"stderr\" or, on Windows, \"eventlog\". When set to \"file\", the output file\n ## is determined by the \"logfile\" setting.\n # logtarget = \"file\"\n\n ## Name of the file to be logged to when using the \"file\" logtarget. If set to\n ## the empty string then logs are written to stderr.\n # logfile = \"\"\n\n ## The logfile will be rotated after the time interval specified. When set\n ## to 0 no time based rotation is performed. Logs are rotated only when\n ## written to, if there is no log activity rotation may be delayed.\n # logfile_rotation_interval = \"0d\"\n\n ## The logfile will be rotated when it becomes larger than the specified\n ## size. When set to 0 no size based rotation is performed.\n # logfile_rotation_max_size = \"0MB\"\n\n ## Maximum number of rotated archives to keep, any older logs are deleted.\n ## If set to -1, no archives are removed.\n # logfile_rotation_max_archives = 5\n\n ## Pick a timezone to use when logging or type 'local' for local time.\n ## Example: America/Chicago\n # log_with_timezone = \"\"\n\n ## Override default hostname, if empty use os.Hostname()\n hostname = \"\"\n ## If set to true, do no set the \"host\" tag in the telegraf agent.\n omit_hostname = false\n[[inputs.file]]\n ## Files to parse each interval. Accept standard unix glob matching rules,\n ## as well as ** to match recursive files and directories.\n files = [\"f1\", \"f2\"]\n\n ## Name a tag containing the name of the file the data was parsed from. Leave empty\n ## to disable.\n # file_tag = \"\"\n\n ## Character encoding to use when interpreting the file contents. Invalid\n ## characters are replaced using the unicode replacement character. When set\n ## to the empty string the data is not decoded to text.\n ## ex: character_encoding = \"utf-8\"\n ## character_encoding = \"utf-16le\"\n ## character_encoding = \"utf-16be\"\n ## character_encoding = \"\"\n # character_encoding = \"\"\n\n ## The dataformat to be read from files\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md\n data_format = \"influx\"\n[[inputs.cpu]]\n ## Whether to report per-cpu stats or not\n percpu = true\n ## Whether to report total system cpu stats or not\n totalcpu = true\n ## If true, collect raw CPU time metrics\n collect_cpu_time = false\n ## If true, compute and report the sum of all non-idle CPU states\n report_active = false\n[[outputs.file]]\n ## Files to write to, \"stdout\" is a specially handled file.\n files = [\"stdout\"]\n\n ## Use batch serialization format instead of line based delimiting. The\n ## batch format allows for the production of non line based output formats and\n ## may more efficiently encode metric groups.\n # use_batch_format = false\n\n ## The file will be rotated after the time interval specified. When set\n ## to 0 no time based rotation is performed.\n # rotation_interval = \"0d\"\n\n ## The logfile will be rotated when it becomes larger than the specified\n ## size. When set to 0 no size based rotation is performed.\n # rotation_max_size = \"0MB\"\n\n ## Maximum number of rotated archives to keep, any older logs are deleted.\n ## If set to -1, no archives are removed.\n # rotation_max_archives = 5\n\n ## Data format to output.\n ## Each data format has its own unique set of configuration options, read\n ## more about them here:\n ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md\n data_format = \"influx\"\n[[outputs.influxdb_v2]]\n ## The URLs of the InfluxDB cluster nodes.\n ##\n ## Multiple URLs can be specified for a single cluster, only ONE of the\n ## urls will be written to each interval.\n ## ex: urls = [\"https://us-west-2-1.aws.cloud2.influxdata.com\"]\n urls = [\"url1\", \"url2\"]\n\n ## Token for authentication.\n token = \"tok1\"\n\n ## Organization is the name of the organization you wish to write to; must exist.\n organization = \"\"\n\n ## Destination bucket to write into.\n bucket = \"\"\n\n ## The value of this tag will be used to determine the bucket. If this\n ## tag is not set the 'bucket' option is used as the default.\n # bucket_tag = \"\"\n\n ## If true, the bucket tag will not be added to the metric.\n # exclude_bucket_tag = false\n\n ## Timeout for HTTP messages.\n # timeout = \"5s\"\n\n ## Additional HTTP headers\n # http_headers = {\"X-Special-Header\" = \"Special-Value\"}\n\n ## HTTP Proxy override, if unset values the standard proxy environment\n ## variables are consulted to determine which proxy, if any, should be used.\n # http_proxy = \"http://corporate.proxy:3128\"\n\n ## HTTP User-Agent\n # user_agent = \"telegraf\"\n\n ## Content-Encoding for write request body, can be set to \"gzip\" to\n ## compress body or \"identity\" to apply no encoding.\n # content_encoding = \"gzip\"\n\n ## Enable or disable uint support for writing uints influxdb 2.0.\n # influx_uint_support = false\n\n ## Optional TLS Config for use on HTTP connections.\n # tls_ca = \"/etc/telegraf/ca.pem\"\n # tls_cert = \"/etc/telegraf/cert.pem\"\n # tls_key = \"/etc/telegraf/key.pem\"\n ## Use TLS but skip chain & host verification\n # insecure_skip_verify = false\n", - Metadata: map[string]interface{}{"buckets": []string{}}, - }, - }, - { - name: "unsupported plugin type", - cfg: fmt.Sprintf(`{ - "ID": "%v", - "OrgID": "%v", - "Name": "n1", - "Plugins": [ - { - "name": "bad_type", - "type": "aggregator", - "Comment": "comment3", - "Config": { - "Field": "f1" - } - } - ] - }`, id1, id2), - err: &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf(ErrUnsupportTelegrafPluginType, "aggregator"), - Op: "unmarshal telegraf config raw plugin", - }, - }, - { - name: "unsupported plugin", - cfg: fmt.Sprintf(`{ - "ID": "%v", - "OrgID": "%v", - "Name": "n1", - "Plugins": [ - { - "name": "kafka", - "type": "output", - "Config": { - "Field": "f2" - } - } - ] - }`, id1, id2), - err: &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf(ErrUnsupportTelegrafPluginName, "kafka", plugins.Output), - Op: "unmarshal telegraf config raw plugin", - }, - }, - } - for _, c := range cases { - got := new(TelegrafConfig) - err := json.Unmarshal([]byte(c.cfg), got) - if diff := cmp.Diff(err, c.err); diff != "" { - t.Fatalf("%s decode failed, got err: %v, should be %v", c.name, err, c.err) - } - - if diff := cmp.Diff(got, c.expect, telegrafCmpOptions...); c.err == nil && diff != "" { - t.Errorf("failed %s, telegraf configs are different -got/+want\ndiff %s", c.name, diff) - } - } -} - -func TestLegacyStruct(t *testing.T) { - id1, _ := platform.IDFromString("020f755c3c082000") - - telConfOld := fmt.Sprintf(`{ - "id": "%v", - "name": "n1", - "agent": { - "interval": 10000 - }, - "plugins": [ - { - "name": "file", - "type": "input", - "comment": "comment1", - "config": { - "files": ["f1", "f2"] - } - }, - { - "name": "cpu", - "type": "input", - "comment": "comment2" - }, - { - "name": "file", - "type": "output", - "comment": "comment3", - "config": {"files": [ - {"type": "stdout"} - ]} - }, - { - "name": "influxdb_v2", - "type": "output", - "comment": "comment4", - "config": { - "urls": [ - "url1", - "url2" - ], - "token": "token1", - "organization": "org1", - "bucket": "bucket1" - } - } - ] - }`, id1) - want := `# Configuration for telegraf agent -[agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 - - ## Maximum number of unwritten metrics per output. Increasing this value - ## allows for longer periods of output downtime without dropping metrics at the - ## cost of higher maximum memory usage. - metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s. - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". - precision = "" - - ## Log at debug level. - # debug = false - ## Log only error level messages. - # quiet = false - - ## Log target controls the destination for logs and can be one of "file", - ## "stderr" or, on Windows, "eventlog". When set to "file", the output file - ## is determined by the "logfile" setting. - # logtarget = "file" - - ## Name of the file to be logged to when using the "file" logtarget. If set to - ## the empty string then logs are written to stderr. - # logfile = "" - - ## The logfile will be rotated after the time interval specified. When set - ## to 0 no time based rotation is performed. Logs are rotated only when - ## written to, if there is no log activity rotation may be delayed. - # logfile_rotation_interval = "0d" - - ## The logfile will be rotated when it becomes larger than the specified - ## size. When set to 0 no size based rotation is performed. - # logfile_rotation_max_size = "0MB" - - ## Maximum number of rotated archives to keep, any older logs are deleted. - ## If set to -1, no archives are removed. - # logfile_rotation_max_archives = 5 - - ## Pick a timezone to use when logging or type 'local' for local time. - ## Example: America/Chicago - # log_with_timezone = "" - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - ## If set to true, do no set the "host" tag in the telegraf agent. - omit_hostname = false -[[inputs.file]] - ## Files to parse each interval. Accept standard unix glob matching rules, - ## as well as ** to match recursive files and directories. - files = ["f1", "f2"] - - ## Name a tag containing the name of the file the data was parsed from. Leave empty - ## to disable. - # file_tag = "" - - ## Character encoding to use when interpreting the file contents. Invalid - ## characters are replaced using the unicode replacement character. When set - ## to the empty string the data is not decoded to text. - ## ex: character_encoding = "utf-8" - ## character_encoding = "utf-16le" - ## character_encoding = "utf-16be" - ## character_encoding = "" - # character_encoding = "" - - ## The dataformat to be read from files - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" -[[inputs.cpu]] - ## Whether to report per-cpu stats or not - percpu = true - ## Whether to report total system cpu stats or not - totalcpu = true - ## If true, collect raw CPU time metrics - collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states - report_active = false -[[outputs.file]] - ## Files to write to, "stdout" is a specially handled file. - files = ["stdout"] - - ## Use batch serialization format instead of line based delimiting. The - ## batch format allows for the production of non line based output formats and - ## may more efficiently encode metric groups. - # use_batch_format = false - - ## The file will be rotated after the time interval specified. When set - ## to 0 no time based rotation is performed. - # rotation_interval = "0d" - - ## The logfile will be rotated when it becomes larger than the specified - ## size. When set to 0 no size based rotation is performed. - # rotation_max_size = "0MB" - - ## Maximum number of rotated archives to keep, any older logs are deleted. - ## If set to -1, no archives are removed. - # rotation_max_archives = 5 - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "influx" -[[outputs.influxdb_v2]] - ## The URLs of the InfluxDB cluster nodes. - ## - ## Multiple URLs can be specified for a single cluster, only ONE of the - ## urls will be written to each interval. - ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] - urls = ["url1", "url2"] - - ## Token for authentication. - token = "token1" - - ## Organization is the name of the organization you wish to write to; must exist. - organization = "org1" - - ## Destination bucket to write into. - bucket = "bucket1" - - ## The value of this tag will be used to determine the bucket. If this - ## tag is not set the 'bucket' option is used as the default. - # bucket_tag = "" - - ## If true, the bucket tag will not be added to the metric. - # exclude_bucket_tag = false - - ## Timeout for HTTP messages. - # timeout = "5s" - - ## Additional HTTP headers - # http_headers = {"X-Special-Header" = "Special-Value"} - - ## HTTP Proxy override, if unset values the standard proxy environment - ## variables are consulted to determine which proxy, if any, should be used. - # http_proxy = "http://corporate.proxy:3128" - - ## HTTP User-Agent - # user_agent = "telegraf" - - ## Content-Encoding for write request body, can be set to "gzip" to - ## compress body or "identity" to apply no encoding. - # content_encoding = "gzip" - - ## Enable or disable uint support for writing uints influxdb 2.0. - # influx_uint_support = false - - ## Optional TLS Config for use on HTTP connections. - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - tc := &TelegrafConfig{} - require.NoError(t, json.Unmarshal([]byte(telConfOld), tc)) - - if tc.Config != want { - t.Fatalf("telegraf config's toml is incorrect, got %+v", tc.Config) - } -} - -func TestCountPlugins(t *testing.T) { - tc := TelegrafConfig{ - Name: "test", - Config: ` -[[inputs.file]] - some = "config" -[[inputs.file]] - some = "config" -[[outputs.influxdb_v2]] - some = "config" -[[inputs.cpu]] - some = "config" -[[outputs.stuff]] - some = "config" -[[aggregators.thing]] - some = "config" -[[processors.thing]] - some = "config" -[[serializers.thing]] - some = "config" -[[inputs.file]] - some = "config" -`, - } - - pCount := tc.CountPlugins() - - require.Equal(t, 6, len(pCount)) - - require.Equal(t, float64(3), pCount["inputs.file"]) -} diff --git a/telemetry/README.md b/telemetry/README.md deleted file mode 100644 index c14a2c1efd6..00000000000 --- a/telemetry/README.md +++ /dev/null @@ -1,8 +0,0 @@ -## Telemetry Data - -Telemetry is first collected by retrieving prometheus data from a Gatherer. -Next, the collected data is filtered by matching a subset of prometheus families. -Finally, the data is transmitted to a prometheus push gateway handler. - -The handler enriches the metrics with the timestamp when the data is -received. diff --git a/telemetry/handler.go b/telemetry/handler.go deleted file mode 100644 index e4f14697573..00000000000 --- a/telemetry/handler.go +++ /dev/null @@ -1,168 +0,0 @@ -package telemetry - -import ( - "context" - "fmt" - "io" - "net/http" - "time" - - "github.com/influxdata/influxdb/v2/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" - "go.uber.org/zap" -) - -const ( - // DefaultTimeout is the length of time servicing the metrics before canceling. - DefaultTimeout = 10 * time.Second - // DefaultMaxBytes is the largest request body read. - DefaultMaxBytes = 1024000 -) - -var ( - // ErrMetricsTimestampPresent is returned when the prometheus metrics has timestamps set. - // Not sure why, but, pushgateway does not allow timestamps. - ErrMetricsTimestampPresent = fmt.Errorf("pushed metrics must not have timestamp") -) - -// PushGateway handles receiving prometheus push metrics and forwards them to the Store. -// If Format is not set, the format of the inbound metrics are used. -type PushGateway struct { - Timeout time.Duration // handler returns after this duration with an error; defaults to 5 seconds - MaxBytes int64 // maximum number of bytes to read from the body; defaults to 1024000 - log *zap.Logger - - Store Store - Transformers []prometheus.Transformer - - Encoder prometheus.Encoder -} - -// NewPushGateway constructs the PushGateway. -func NewPushGateway(log *zap.Logger, store Store, xforms ...prometheus.Transformer) *PushGateway { - if len(xforms) == 0 { - xforms = append(xforms, &AddTimestamps{}) - } - return &PushGateway{ - Store: store, - Transformers: xforms, - log: log, - Timeout: DefaultTimeout, - MaxBytes: DefaultMaxBytes, - } -} - -// Handler accepts prometheus metrics send via the Push client and sends those -// metrics into the store. -func (p *PushGateway) Handler(w http.ResponseWriter, r *http.Request) { - // redirect to agreement to give our users information about - // this collected data. - switch r.Method { - case http.MethodGet, http.MethodHead: - http.Redirect(w, r, "https://www.influxdata.com/telemetry", http.StatusSeeOther) - return - case http.MethodPost, http.MethodPut: - default: - w.Header().Set("Allow", "GET, HEAD, PUT, POST") - http.Error(w, - http.StatusText(http.StatusMethodNotAllowed), - http.StatusMethodNotAllowed, - ) - return - } - - if p.Timeout == 0 { - p.Timeout = DefaultTimeout - } - - if p.MaxBytes == 0 { - p.MaxBytes = DefaultMaxBytes - } - - if p.Encoder == nil { - p.Encoder = &prometheus.Expfmt{ - Format: expfmt.FmtText, - } - } - - ctx, cancel := context.WithTimeout( - r.Context(), - p.Timeout, - ) - defer cancel() - - r = r.WithContext(ctx) - defer r.Body.Close() - - format, err := metricsFormat(r.Header) - if err != nil { - p.log.Error("Metrics format not support", zap.Error(err)) - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - mfs, err := decodePostMetricsRequest(r.Body, format, p.MaxBytes) - if err != nil { - p.log.Error("Unable to decode metrics", zap.Error(err)) - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - if err := valid(mfs); err != nil { - p.log.Error("Invalid metrics", zap.Error(err)) - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - for _, transformer := range p.Transformers { - mfs = transformer.Transform(mfs) - } - - data, err := p.Encoder.Encode(mfs) - if err != nil { - p.log.Error("Unable to encode metric families", zap.Error(err)) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - if err := p.Store.WriteMessage(ctx, data); err != nil { - p.log.Error("Unable to write to store", zap.Error(err)) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - w.WriteHeader(http.StatusAccepted) -} - -func metricsFormat(headers http.Header) (expfmt.Format, error) { - format := expfmt.ResponseFormat(headers) - if format == expfmt.FmtUnknown { - return "", fmt.Errorf("unknown format metrics format") - } - return format, nil -} - -func decodePostMetricsRequest(body io.Reader, format expfmt.Format, maxBytes int64) ([]*dto.MetricFamily, error) { - // protect against reading too many bytes - r := io.LimitReader(body, maxBytes) - - mfs, err := prometheus.DecodeExpfmt(r, format) - if err != nil { - return nil, err - } - return mfs, nil -} - -// prom's pushgateway does not allow timestamps for some reason. -func valid(mfs []*dto.MetricFamily) error { - // Checks if any timestamps have been specified. - for i := range mfs { - for j := range mfs[i].Metric { - if mfs[i].Metric[j].TimestampMs != nil { - return ErrMetricsTimestampPresent - } - } - } - return nil -} diff --git a/telemetry/handler_test.go b/telemetry/handler_test.go deleted file mode 100644 index 33512bc21c0..00000000000 --- a/telemetry/handler_test.go +++ /dev/null @@ -1,305 +0,0 @@ -package telemetry - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - "net/http/httptest" - "reflect" - "testing" - "time" - - pr "github.com/influxdata/influxdb/v2/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" - "go.uber.org/zap/zaptest" - "google.golang.org/protobuf/proto" -) - -func TestPushGateway_Handler(t *testing.T) { - type fields struct { - Store *mockStore - now func() time.Time - } - type args struct { - w *httptest.ResponseRecorder - r *http.Request - } - tests := []struct { - name string - fields fields - args args - contentType string - wantStatus int - want []byte - }{ - - { - name: "unknown content-type is a bad request", - fields: fields{ - Store: &mockStore{}, - }, - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("POST", "/", nil), - }, - wantStatus: http.StatusBadRequest, - }, - - { - name: "bad metric with timestamp is a bad request", - fields: fields{ - Store: &mockStore{}, - now: func() time.Time { return time.Unix(0, 0) }, - }, - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("POST", "/", - mustEncode(t, - []*dto.MetricFamily{badMetric()}, - ), - ), - }, - contentType: string(expfmt.FmtProtoDelim), - wantStatus: http.StatusBadRequest, - }, - { - name: "store error is an internal server error", - fields: fields{ - Store: &mockStore{ - err: fmt.Errorf("e1"), - }, - now: func() time.Time { return time.Unix(0, 0) }, - }, - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("POST", "/", - mustEncode(t, - []*dto.MetricFamily{NewCounter("mf1", 1.0, pr.L("n1", "v1"))}, - ), - ), - }, - contentType: string(expfmt.FmtProtoDelim), - wantStatus: http.StatusInternalServerError, - want: []byte(`[{"name":"mf1","type":0,"metric":[{"label":[{"name":"n1","value":"v1"}],"counter":{"value":1},"timestamp_ms":0}]}]`), - }, - { - name: "metric store in store", - fields: fields{ - Store: &mockStore{}, - now: func() time.Time { return time.Unix(0, 0) }, - }, - args: args{ - w: httptest.NewRecorder(), - r: httptest.NewRequest("POST", "/", - mustEncode(t, - []*dto.MetricFamily{NewCounter("mf1", 1.0, pr.L("n1", "v1"))}, - ), - ), - }, - contentType: string(expfmt.FmtProtoDelim), - wantStatus: http.StatusAccepted, - want: []byte(`[{"name":"mf1","type":0,"metric":[{"label":[{"name":"n1","value":"v1"}],"counter":{"value":1},"timestamp_ms":0}]}]`), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - p := NewPushGateway( - zaptest.NewLogger(t), - tt.fields.Store, - &AddTimestamps{ - now: tt.fields.now, - }, - ) - p.Encoder = &pr.JSON{} - tt.args.r.Header.Set("Content-Type", tt.contentType) - p.Handler(tt.args.w, tt.args.r) - - if tt.args.w.Code != http.StatusAccepted { - t.Logf("Body: %s", tt.args.w.Body.String()) - } - if got, want := tt.args.w.Code, tt.wantStatus; got != want { - t.Errorf("PushGateway.Handler() StatusCode = %v, want %v", got, want) - } - - if got, want := tt.fields.Store.data, tt.want; string(got) != string(want) { - t.Errorf("PushGateway.Handler() Data = %s, want %s", got, want) - } - }) - } -} - -func Test_decodePostMetricsRequest(t *testing.T) { - type args struct { - req *http.Request - maxBytes int64 - } - tests := []struct { - name string - args args - contentType string - want []*dto.MetricFamily - wantErr bool - }{ - { - name: "bad body returns no metrics", - args: args{ - req: httptest.NewRequest("POST", "/", bytes.NewBuffer([]byte{0x10})), - maxBytes: 10, - }, - contentType: string(expfmt.FmtProtoDelim), - want: []*dto.MetricFamily{}, - }, - { - name: "no body returns no metrics", - args: args{ - req: httptest.NewRequest("POST", "/", nil), - maxBytes: 10, - }, - contentType: string(expfmt.FmtProtoDelim), - want: []*dto.MetricFamily{}, - }, - { - name: "metrics are returned from POST", - args: args{ - req: httptest.NewRequest("POST", "/", - mustEncode(t, - []*dto.MetricFamily{NewCounter("mf1", 1.0, pr.L("n1", "v1"))}, - ), - ), - maxBytes: 31, - }, - contentType: string(expfmt.FmtProtoDelim), - want: []*dto.MetricFamily{NewCounter("mf1", 1.0, pr.L("n1", "v1"))}, - }, - { - name: "max bytes limits on record boundary returns a single record", - args: args{ - req: httptest.NewRequest("POST", "/", - mustEncode(t, - []*dto.MetricFamily{ - NewCounter("mf1", 1.0, pr.L("n1", "v1")), - NewCounter("mf2", 1.0, pr.L("n2", "v2")), - }, - ), - ), - maxBytes: 31, - }, - contentType: string(expfmt.FmtProtoDelim), - want: []*dto.MetricFamily{NewCounter("mf1", 1.0, pr.L("n1", "v1"))}, - }, - { - name: "exceeding max bytes returns an error", - args: args{ - req: httptest.NewRequest("POST", "/", - mustEncode(t, - []*dto.MetricFamily{ - NewCounter("mf1", 1.0, pr.L("n1", "v1")), - NewCounter("mf2", 1.0, pr.L("n2", "v2")), - }, - ), - ), - maxBytes: 33, - }, - contentType: string(expfmt.FmtProtoDelim), - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.args.req.Header.Set("Content-Type", tt.contentType) - got, err := decodePostMetricsRequest(tt.args.req.Body, expfmt.Format(tt.contentType), tt.args.maxBytes) - if (err != nil) != tt.wantErr { - t.Errorf("decodePostMetricsRequest() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("decodePostMetricsRequest() = %v, want %v", got, tt.want) - } - }) - } -} - -func badMetric() *dto.MetricFamily { - return &dto.MetricFamily{ - Name: proto.String("bad"), - Type: dto.MetricType_COUNTER.Enum(), - Metric: []*dto.Metric{ - { - Label: []*dto.LabelPair{pr.L("n1", "v1")}, - Counter: &dto.Counter{ - Value: proto.Float64(1.0), - }, - TimestampMs: proto.Int64(1), - }, - }, - } -} - -func goodMetric() *dto.MetricFamily { - return &dto.MetricFamily{ - Name: proto.String("good"), - Type: dto.MetricType_COUNTER.Enum(), - Metric: []*dto.Metric{ - { - Label: []*dto.LabelPair{pr.L("n1", "v1")}, - Counter: &dto.Counter{ - Value: proto.Float64(1.0), - }, - }, - }, - } -} - -func Test_valid(t *testing.T) { - type args struct { - mfs []*dto.MetricFamily - } - tests := []struct { - name string - args args - wantErr bool - }{ - { - name: "metric with timestamp is invalid", - args: args{ - mfs: []*dto.MetricFamily{badMetric()}, - }, - wantErr: true, - }, - { - name: "metric without timestamp is valid", - args: args{ - mfs: []*dto.MetricFamily{goodMetric()}, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := valid(tt.args.mfs); (err != nil) != tt.wantErr { - t.Errorf("valid() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -type mockStore struct { - data []byte - err error -} - -func (m *mockStore) WriteMessage(ctx context.Context, data []byte) error { - m.data = data - return m.err - -} - -func mustEncode(t *testing.T, mfs []*dto.MetricFamily) io.Reader { - b, err := pr.EncodeExpfmt(mfs) - if err != nil { - t.Fatalf("unable to encode %v", err) - } - return bytes.NewBuffer(b) -} diff --git a/telemetry/metrics.go b/telemetry/metrics.go deleted file mode 100644 index 47dd4674982..00000000000 --- a/telemetry/metrics.go +++ /dev/null @@ -1,56 +0,0 @@ -package telemetry - -import ( - pr "github.com/influxdata/influxdb/v2/prometheus" -) - -var telemetryMatcher = pr.NewMatcher(). - /* - * Runtime stats - */ - Family("influxdb_info"). // includes version, os, etc. - Family("influxdb_uptime_seconds"). - /* - * Resource Counts - */ - Family("influxdb_organizations_total"). - Family("influxdb_buckets_total"). - Family("influxdb_users_total"). - Family("influxdb_tokens_total"). - Family("influxdb_dashboards_total"). - Family("influxdb_scrapers_total"). - Family("influxdb_telegrafs_total"). - Family("influxdb_telegraf_plugins_count"). - Family("influxdb_remotes_total"). - Family("influxdb_replications_total"). - Family("task_scheduler_claims_active"). // Count of currently active tasks - /* - * Count of API requests including success and failure - */ - Family("http_api_requests_total"). - /* - * Count of writes and queries - */ - Family("storage_wal_writes_total"). - Family("query_control_requests_total"). - /* - * Query analysis - */ - Family("query_control_functions_total"). // Count of functions in queries (e.g. mean, median) - Family("query_control_all_duration_seconds"). // Total query duration per org. - /* - * Write analysis - */ - Family("http_api_request_duration_seconds_bucket", - pr.L("path", "/api/v2/write"), // Count only the durations of the /write endpoint. - ). - /* - * Storage cardinality - */ - Family("storage_tsi_index_series_total"). - /* - * Storage disk usage - */ - Family("storage_series_file_disk_bytes"). // All families need to be aggregated to - Family("storage_wal_current_segment_bytes"). // get a true idea of disk usage. - Family("storage_tsm_files_disk_bytes") diff --git a/telemetry/push.go b/telemetry/push.go deleted file mode 100644 index 495d4782ee5..00000000000 --- a/telemetry/push.go +++ /dev/null @@ -1,114 +0,0 @@ -package telemetry - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - "time" - - pr "github.com/influxdata/influxdb/v2/prometheus" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/expfmt" -) - -// Pusher pushes metrics to a prometheus push gateway. -type Pusher struct { - URL string - Gather prometheus.Gatherer - Client *http.Client - PushFormat expfmt.Format -} - -// NewPusher sends usage metrics to a prometheus push gateway. -func NewPusher(g prometheus.Gatherer) *Pusher { - return &Pusher{ - URL: "https://telemetry.influxdata.com/metrics/job/influxdb", - Gather: &pr.Filter{ - Gatherer: g, - Matcher: telemetryMatcher, - }, - Client: &http.Client{ - Transport: http.DefaultTransport, - Timeout: 10 * time.Second, - }, - PushFormat: expfmt.FmtText, - } -} - -// Push POSTs prometheus metrics in protobuf delimited format to a push gateway. -func (p *Pusher) Push(ctx context.Context) error { - if p.PushFormat == "" { - p.PushFormat = expfmt.FmtText - } - - resps := make(chan (error)) - go func() { - resps <- p.push(ctx) - }() - - select { - case err := <-resps: - return err - case <-ctx.Done(): - return ctx.Err() - } -} - -func (p *Pusher) push(ctx context.Context) error { - r, err := p.encode() - if err != nil { - return err - } - - // when there are no metrics to send, then, no need to POST. - if r == nil { - return nil - } - - req, err := http.NewRequest(http.MethodPost, p.URL, r) - if err != nil { - return err - } - - req = req.WithContext(ctx) - - req.Header.Set("Content-Type", string(p.PushFormat)) - - res, err := p.Client.Do(req) - - // FIXME: consider why we're checking for cancellation here. - if err := ctx.Err(); err != nil { - return err - } - - if err != nil { - return err - } - - defer res.Body.Close() - if res.StatusCode != http.StatusAccepted { - body, _ := io.ReadAll(res.Body) - return fmt.Errorf("unable to POST metrics; received status %s: %s", http.StatusText(res.StatusCode), body) - } - return nil -} - -func (p *Pusher) encode() (io.Reader, error) { - mfs, err := p.Gather.Gather() - if err != nil { - return nil, err - } - - if len(mfs) == 0 { - return nil, nil - } - - b, err := pr.EncodeExpfmt(mfs, p.PushFormat) - if err != nil { - return nil, err - } - - return bytes.NewBuffer(b), nil -} diff --git a/telemetry/push_test.go b/telemetry/push_test.go deleted file mode 100644 index 4a7f26b77ee..00000000000 --- a/telemetry/push_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package telemetry - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" - "google.golang.org/protobuf/proto" -) - -func TestPusher_Push(t *testing.T) { - type check struct { - Method string - Body []byte - } - tests := []struct { - name string - gather prometheus.Gatherer - timeout time.Duration - status int - - want check - wantErr bool - }{ - { - name: "no metrics no push", - gather: prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { - return nil, nil - }), - }, - { - name: "timeout while gathering data returns error", - gather: prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { - time.Sleep(time.Hour) - return nil, nil - }), - timeout: time.Millisecond, - wantErr: true, - }, - { - name: "timeout server timeout data returns error", - gather: prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { - mf := &dto.MetricFamily{} - return []*dto.MetricFamily{mf}, nil - }), - timeout: time.Millisecond, - wantErr: true, - }, - { - name: "error gathering metrics returns error", - gather: prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { - return nil, fmt.Errorf("e1") - }), - wantErr: true, - }, - { - name: "status code that is not Accepted (202) is an error", - gather: prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { - mf := &dto.MetricFamily{} - return []*dto.MetricFamily{mf}, nil - }), - status: http.StatusInternalServerError, - want: check{ - Method: http.MethodPost, - Body: []byte{0x00}, - }, - wantErr: true, - }, - { - name: "sending metric are marshalled into delimited protobufs", - gather: prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { - mf := &dto.MetricFamily{ - Name: proto.String("n1"), - Help: proto.String("h1"), - } - return []*dto.MetricFamily{mf}, nil - }), - status: http.StatusAccepted, - want: check{ - Method: http.MethodPost, - Body: MustMarshal(&dto.MetricFamily{ - Name: proto.String("n1"), - Help: proto.String("h1"), - }), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - if tt.timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, tt.timeout) - defer cancel() - } - - var got check - srv := httptest.NewServer( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if tt.timeout > 0 { // testing server timeouts - r = r.WithContext(ctx) - _ = r - <-ctx.Done() - return - } - got.Method = r.Method - got.Body, _ = io.ReadAll(r.Body) - w.WriteHeader(tt.status) - }), - ) - defer srv.Close() - - url := srv.URL - client := srv.Client() - p := &Pusher{ - URL: url, - Gather: tt.gather, - Client: client, - PushFormat: expfmt.FmtProtoDelim, - } - if err := p.Push(ctx); (err != nil) != tt.wantErr { - t.Errorf("Pusher.Push() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !cmp.Equal(got, tt.want) { - t.Errorf("%q. Pusher.Push() = -got/+want %s", tt.name, cmp.Diff(got, tt.want)) - t.Logf("%v\n%v", got.Body, tt.want.Body) - } - }) - } -} - -func MustMarshal(mf *dto.MetricFamily) []byte { - buf := &bytes.Buffer{} - _, err := pbutil.WriteDelimited(buf, mf) - if err != nil { - panic(err) - } - return buf.Bytes() -} diff --git a/telemetry/reporter.go b/telemetry/reporter.go deleted file mode 100644 index 5429f9f395b..00000000000 --- a/telemetry/reporter.go +++ /dev/null @@ -1,55 +0,0 @@ -package telemetry - -import ( - "context" - "time" - - influxlogger "github.com/influxdata/influxdb/v2/logger" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -// Reporter reports telemetry metrics to a prometheus push -// gateway every interval. -type Reporter struct { - Pusher *Pusher - log *zap.Logger - Interval time.Duration -} - -// NewReporter reports telemetry every 24 hours. -func NewReporter(log *zap.Logger, g prometheus.Gatherer) *Reporter { - return &Reporter{ - Pusher: NewPusher(g), - log: log, - Interval: 24 * time.Hour, - } -} - -// Report starts periodic telemetry reporting each interval. -func (r *Reporter) Report(ctx context.Context) { - logger := r.log.With( - zap.String("service", "telemetry"), - influxlogger.DurationLiteral("interval", r.Interval), - ) - - logger.Info("Starting") - if err := r.Pusher.Push(ctx); err != nil { - logger.Debug("Failure reporting telemetry metrics", zap.Error(err)) - } - - ticker := time.NewTicker(r.Interval) - defer ticker.Stop() - for { - select { - case <-ticker.C: - logger.Debug("Reporting") - if err := r.Pusher.Push(ctx); err != nil { - logger.Debug("Failure reporting telemetry metrics", zap.Error(err)) - } - case <-ctx.Done(): - logger.Info("Stopping") - return - } - } -} diff --git a/telemetry/reporter_test.go b/telemetry/reporter_test.go deleted file mode 100644 index ea130569ce9..00000000000 --- a/telemetry/reporter_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package telemetry - -import ( - "context" - "net/http" - "net/http/httptest" - "reflect" - "sync" - "testing" - "time" - - pr "github.com/influxdata/influxdb/v2/prometheus" - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "go.uber.org/zap/zaptest" -) - -func TestReport(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - - logger := zaptest.NewLogger(t) - store := newReportingStore() - timestamps := &AddTimestamps{ - now: func() time.Time { - return time.Unix(0, 0) - }, - } - - gw := NewPushGateway(logger, store, timestamps) - gw.Encoder = &pr.JSON{} - - ts := httptest.NewServer(http.HandlerFunc(gw.Handler)) - defer ts.Close() - - mfs := []*dto.MetricFamily{NewCounter("influxdb_buckets_total", 1.0)} - gatherer := prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { - return mfs, nil - }) - - reporter := NewReporter(logger, gatherer) - reporter.Pusher.URL = ts.URL - reporter.Interval = 30 * time.Second - - var wg sync.WaitGroup - wg.Add(1) - defer wg.Wait() - go func() { - defer wg.Done() - reporter.Report(ctx) - }() - - got := <-store.ch - - // Encode to JSON to make it easier to compare - want, _ := pr.EncodeJSON(timestamps.Transform(mfs)) - if !reflect.DeepEqual(got, want) { - t.Errorf("Reporter.Report() = %s, want %s", got, want) - } - - cancel() -} - -func newReportingStore() *reportingStore { - return &reportingStore{ - ch: make(chan []byte, 1), - } -} - -type reportingStore struct { - ch chan []byte -} - -func (s *reportingStore) WriteMessage(ctx context.Context, data []byte) error { - s.ch <- data - return nil -} diff --git a/telemetry/store.go b/telemetry/store.go deleted file mode 100644 index 6d6ad3165cd..00000000000 --- a/telemetry/store.go +++ /dev/null @@ -1,32 +0,0 @@ -package telemetry - -import ( - "context" - - "go.uber.org/zap" -) - -// Store records usage data. -type Store interface { - // WriteMessage stores data into the store. - WriteMessage(ctx context.Context, data []byte) error -} - -var _ Store = (*LogStore)(nil) - -// LogStore logs data written to the store. -type LogStore struct { - log *zap.Logger -} - -func NewLogStore(log *zap.Logger) *LogStore { - return &LogStore{ - log: log, - } -} - -// WriteMessage logs data at Info level. -func (s *LogStore) WriteMessage(ctx context.Context, data []byte) error { - s.log.Info("Write", zap.String("data", string(data))) - return nil -} diff --git a/telemetry/telemetry_test.go b/telemetry/telemetry_test.go deleted file mode 100644 index c8702873f2b..00000000000 --- a/telemetry/telemetry_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package telemetry - -import ( - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" -) - -func NewCounter(name string, v float64, ls ...*dto.LabelPair) *dto.MetricFamily { - m := &dto.Metric{ - Label: ls, - Counter: &dto.Counter{ - Value: &v, - }, - } - return &dto.MetricFamily{ - Name: proto.String(name), - Type: dto.MetricType_COUNTER.Enum(), - Metric: []*dto.Metric{m}, - } -} diff --git a/telemetry/timestamps.go b/telemetry/timestamps.go deleted file mode 100644 index 7a32f0948f1..00000000000 --- a/telemetry/timestamps.go +++ /dev/null @@ -1,36 +0,0 @@ -package telemetry - -import ( - "time" - - "github.com/influxdata/influxdb/v2/prometheus" - dto "github.com/prometheus/client_model/go" -) - -const ( - // just in case the definition of time.Nanosecond changes from 1. - nsPerMillisecond = int64(time.Millisecond / time.Nanosecond) -) - -var _ prometheus.Transformer = (*AddTimestamps)(nil) - -// AddTimestamps enriches prometheus metrics by adding timestamps. -type AddTimestamps struct { - now func() time.Time -} - -// Transform adds now as a timestamp to all metrics. -func (a *AddTimestamps) Transform(mfs []*dto.MetricFamily) []*dto.MetricFamily { - now := a.now - if now == nil { - now = time.Now - } - nowMilliseconds := now().UnixNano() / nsPerMillisecond - - for i := range mfs { - for j := range mfs[i].Metric { - mfs[i].Metric[j].TimestampMs = &nowMilliseconds - } - } - return mfs -} diff --git a/telemetry/timestamps_test.go b/telemetry/timestamps_test.go deleted file mode 100644 index ae49bca5820..00000000000 --- a/telemetry/timestamps_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package telemetry - -import ( - "reflect" - "testing" - "time" - - pr "github.com/influxdata/influxdb/v2/prometheus" - dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" -) - -func goodMetricWithTime() *dto.MetricFamily { - return &dto.MetricFamily{ - Name: proto.String("good"), - Type: dto.MetricType_COUNTER.Enum(), - Metric: []*dto.Metric{ - { - Label: []*dto.LabelPair{pr.L("n1", "v1")}, - Counter: &dto.Counter{ - Value: proto.Float64(1.0), - }, - TimestampMs: proto.Int64(1), - }, - }, - } -} - -func TestAddTimestamps(t *testing.T) { - type args struct { - mfs []*dto.MetricFamily - now func() time.Time - } - tests := []struct { - name string - args args - }{ - { - args: args{ - mfs: []*dto.MetricFamily{goodMetric()}, - now: func() time.Time { return time.Unix(0, int64(time.Millisecond)) }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ts := AddTimestamps{ - now: tt.args.now, - } - got := ts.Transform(tt.args.mfs) - want := []*dto.MetricFamily{goodMetricWithTime()} - if !reflect.DeepEqual(got, want) { - t.Errorf("AddTimestamps.Transform() = %v, want %v", got, want) - } - }) - } -} diff --git a/tenant/doc.go b/tenant/doc.go deleted file mode 100644 index e3596aa3c0c..00000000000 --- a/tenant/doc.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -The tenant domain encapsulates all the storage critical metadata services: -User -Organization -Bucket -URM's - -These services are the cornerstone of all other metadata services. The intent is to have -a single location for all tenant related code. THis should facilitate faster bug resolution and -allow us to make changes to this service without effecting any dependant services. - -When a new request for the tenant service comes in it should follow this pattern: -1 http_server_resource - this is where the request is parsed and rejected if the client didn't send - - the right information - -2 middleware_resource_auth - We now confirm the user that generated the request has sufficient permission - - to accomplish this task, in some cases we adjust the request if the user is without the correct permissions - -3 middleware_resource_metrics - Track RED metrics for this request -4 middleware_resource_logging - add logging around request duration and status. -5 service_resource - When a request reaches the service we verify the content for compatibility with the existing dataset, - - for instance if a resource has a "orgID" we will ensure the organization exists - -6 storage_resource - Basic CRUD actions for the system. - -This pattern of api -> middleware -> service -> basic crud helps us to break down the responsibilities into digestible -chunks and allows us to swap in or out any pieces we need depending on the situation. Currently the storage layer is using -a kv store but by breaking the crud actions into its own independent set of concerns we allow ourselves to move away from kv -if the need arises without having to be concerned about messing up some other pieces of logic. -*/ -package tenant diff --git a/tenant/error.go b/tenant/error.go deleted file mode 100644 index 90ec96cb036..00000000000 --- a/tenant/error.go +++ /dev/null @@ -1,93 +0,0 @@ -package tenant - -import ( - "fmt" - "strings" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - // ErrNameisEmpty is when a name is empty - ErrNameisEmpty = &errors.Error{ - Code: errors.EInvalid, - Msg: "name is empty", - } - - // ErrIDNotUnique is used when attempting to create an org or bucket that already - // exists. - ErrIDNotUnique = &errors.Error{ - Code: errors.EConflict, - Msg: "ID already exists", - } - - // ErrFailureGeneratingID occurs ony when the random number generator - // cannot generate an ID in MaxIDGenerationN times. - ErrFailureGeneratingID = &errors.Error{ - Code: errors.EInternal, - Msg: "unable to generate valid id", - } - - // ErrOnboardingNotAllowed occurs when request to onboard comes in and we are not allowing this request - ErrOnboardingNotAllowed = &errors.Error{ - Code: errors.EConflict, - Msg: "onboarding has already been completed", - } - - ErrNotFound = &errors.Error{ - Code: errors.ENotFound, - Msg: "not found", - } -) - -// ErrInternalServiceError is used when the error comes from an internal system. -func ErrInternalServiceError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Err: err, - } -} - -type errSlice []error - -func (e errSlice) Error() string { - l := len(e) - sb := strings.Builder{} - for i, err := range e { - if i > 0 { - sb.WriteRune('\n') - } - sb.WriteString(fmt.Sprintf("error %d/%d: %s", i+1, l, err.Error())) - } - return sb.String() -} - -// AggregateError enables composing multiple errors. -// This is ideal in the case that you are applying functions with side effects to a slice of elements. -// E.g., deleting/updating a slice of resources. -type AggregateError struct { - errs errSlice -} - -// NewAggregateError returns a new AggregateError. -func NewAggregateError() *AggregateError { - return &AggregateError{ - errs: make([]error, 0), - } -} - -// Add adds an error to the aggregate. -func (e *AggregateError) Add(err error) { - if err == nil { - return - } - e.errs = append(e.errs, err) -} - -// Err returns a proper error from this aggregate error. -func (e *AggregateError) Err() error { - if len(e.errs) > 0 { - return e.errs - } - return nil -} diff --git a/tenant/error_bucket.go b/tenant/error_bucket.go deleted file mode 100644 index 4c7c61beb5b..00000000000 --- a/tenant/error_bucket.go +++ /dev/null @@ -1,73 +0,0 @@ -package tenant - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - invalidBucketListRequest = &errors.Error{ - Code: errors.EInternal, - Msg: "invalid bucket list action, call should be GetBucketByName", - Op: "kv/listBucket", - } - - errRenameSystemBucket = &errors.Error{ - Code: errors.EInvalid, - Msg: "system buckets cannot be renamed", - } - - errDeleteSystemBucket = &errors.Error{ - Code: errors.EInvalid, - Msg: "system buckets cannot be deleted", - } - - ErrBucketNotFound = &errors.Error{ - Code: errors.ENotFound, - Msg: "bucket not found", - } - - ErrBucketNameNotUnique = &errors.Error{ - Code: errors.EConflict, - Msg: "bucket name is not unique", - } -) - -// ErrBucketNotFoundByName is used when the user is not found. -func ErrBucketNotFoundByName(n string) *errors.Error { - return &errors.Error{ - Msg: fmt.Sprintf("bucket %q not found", n), - Code: errors.ENotFound, - } -} - -// ErrCorruptBucket is used when the user cannot be unmarshalled from the bytes -// stored in the kv. -func ErrCorruptBucket(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: "user could not be unmarshalled", - Err: err, - Op: "kv/UnmarshalBucket", - } -} - -// BucketAlreadyExistsError is used when attempting to create a user with a name -// that already exists. -func BucketAlreadyExistsError(n string) *errors.Error { - return &errors.Error{ - Code: errors.EConflict, - Msg: fmt.Sprintf("bucket with name %s already exists", n), - } -} - -// ErrUnprocessableBucket is used when a org is not able to be processed. -func ErrUnprocessableBucket(err error) *errors.Error { - return &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: "user could not be marshalled", - Err: err, - Op: "kv/MarshalBucket", - } -} diff --git a/tenant/error_org.go b/tenant/error_org.go deleted file mode 100644 index 3fab66b09db..00000000000 --- a/tenant/error_org.go +++ /dev/null @@ -1,64 +0,0 @@ -package tenant - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - // ErrOrgNotFound is used when the user is not found. - ErrOrgNotFound = &errors.Error{ - Msg: "organization not found", - Code: errors.ENotFound, - } -) - -// OrgAlreadyExistsError is used when creating a new organization with -// a name that has already been used. Organization names must be unique. -func OrgAlreadyExistsError(name string) error { - return &errors.Error{ - Code: errors.EConflict, - Msg: fmt.Sprintf("organization with name %s already exists", name), - } -} - -func OrgNotFoundByName(name string) error { - return &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindOrganizations, - Msg: fmt.Sprintf("organization name \"%s\" not found", name), - } -} - -// ErrCorruptOrg is used when the user cannot be unmarshalled from the bytes -// stored in the kv. -func ErrCorruptOrg(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: "user could not be unmarshalled", - Err: err, - Op: "kv/UnmarshalOrg", - } -} - -// ErrUnprocessableOrg is used when a org is not able to be processed. -func ErrUnprocessableOrg(err error) *errors.Error { - return &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: "user could not be marshalled", - Err: err, - Op: "kv/MarshalOrg", - } -} - -// InvalidOrgIDError is used when a service was provided an invalid ID. -// This is some sort of internal server error. -func InvalidOrgIDError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "org id provided is invalid", - Err: err, - } -} diff --git a/tenant/error_urm.go b/tenant/error_urm.go deleted file mode 100644 index 6da953fa081..00000000000 --- a/tenant/error_urm.go +++ /dev/null @@ -1,60 +0,0 @@ -package tenant - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - // ErrInvalidURMID is used when the service was provided - // an invalid ID format. - ErrInvalidURMID = &errors.Error{ - Code: errors.EInvalid, - Msg: "provided user resource mapping ID has invalid format", - } - - // ErrURMNotFound is used when the user resource mapping is not found. - ErrURMNotFound = &errors.Error{ - Msg: "user to resource mapping not found", - Code: errors.ENotFound, - } -) - -// UnavailableURMServiceError is used if we aren't able to interact with the -// store, it means the store is not available at the moment (e.g. network). -func UnavailableURMServiceError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("Unable to connect to resource mapping service. Please try again; Err: %v", err), - Op: "kv/userResourceMapping", - } -} - -// CorruptURMError is used when the config cannot be unmarshalled from the -// bytes stored in the kv. -func CorruptURMError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("Unknown internal user resource mapping data error; Err: %v", err), - Op: "kv/userResourceMapping", - } -} - -// ErrUnprocessableMapping is used when a user resource mapping is not able to be converted to JSON. -func ErrUnprocessableMapping(err error) *errors.Error { - return &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: fmt.Sprintf("unable to convert mapping of user to resource into JSON; Err %v", err), - } -} - -// NonUniqueMappingError is an internal error when a user already has -// been mapped to a resource -func NonUniqueMappingError(userID platform.ID) error { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("Unexpected error when assigning user to a resource: mapping for user %s already exists", userID.String()), - } -} diff --git a/tenant/error_user.go b/tenant/error_user.go deleted file mode 100644 index c564e37b040..00000000000 --- a/tenant/error_user.go +++ /dev/null @@ -1,116 +0,0 @@ -package tenant - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -const MinPasswordLen int = 8 - -var ( - // ErrUserNotFound is used when the user is not found. - ErrUserNotFound = &errors.Error{ - Msg: "user not found", - Code: errors.ENotFound, - } - - // EIncorrectPassword is returned when any password operation fails in which - // we do not want to leak information. - EIncorrectPassword = &errors.Error{ - Code: errors.EForbidden, - Msg: "your username or password is incorrect", - } - - // EIncorrectUser is returned when any user is failed to be found which indicates - // the userID provided is for a user that does not exist. - EIncorrectUser = &errors.Error{ - Code: errors.EForbidden, - Msg: "your userID is incorrect", - } - - // EShortPassword is used when a password is less than the minimum - // acceptable password length. - EShortPassword = &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("passwords must be at least %d characters long", MinPasswordLen), - } -) - -// UserAlreadyExistsError is used when attempting to create a user with a name -// that already exists. -func UserAlreadyExistsError(n string) *errors.Error { - return &errors.Error{ - Code: errors.EConflict, - Msg: fmt.Sprintf("user with name %s already exists", n), - } -} - -// UserIDAlreadyExistsError is used when attempting to create a user with an ID -// that already exists. -func UserIDAlreadyExistsError(id string) *errors.Error { - return &errors.Error{ - Code: errors.EConflict, - Msg: fmt.Sprintf("user with ID %s already exists", id), - } -} - -// UnexpectedUserBucketError is used when the error comes from an internal system. -func UnexpectedUserBucketError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("unexpected error retrieving user bucket; Err: %v", err), - Op: "kv/userBucket", - } -} - -// UnexpectedUserIndexError is used when the error comes from an internal system. -func UnexpectedUserIndexError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("unexpected error retrieving user index; Err: %v", err), - Op: "kv/userIndex", - } -} - -// InvalidUserIDError is used when a service was provided an invalid ID. -// This is some sort of internal server error. -func InvalidUserIDError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "user id provided is invalid", - Err: err, - } -} - -// ErrCorruptUser is used when the user cannot be unmarshalled from the bytes -// stored in the kv. -func ErrCorruptUser(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: "user could not be unmarshalled", - Err: err, - Op: "kv/UnmarshalUser", - } -} - -// ErrUnprocessableUser is used when a user is not able to be processed. -func ErrUnprocessableUser(err error) *errors.Error { - return &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: "user could not be marshalled", - Err: err, - Op: "kv/MarshalUser", - } -} - -// UnavailablePasswordServiceError is used if we aren't able to add the -// password to the store, it means the store is not available at the moment -// (e.g. network). -func UnavailablePasswordServiceError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EUnavailable, - Msg: fmt.Sprintf("Unable to connect to password service. Please try again; Err: %v", err), - Op: "kv/setPassword", - } -} diff --git a/tenant/http_client_bucket.go b/tenant/http_client_bucket.go deleted file mode 100644 index bca3e65137e..00000000000 --- a/tenant/http_client_bucket.go +++ /dev/null @@ -1,174 +0,0 @@ -package tenant - -import ( - "context" - "fmt" - "path" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/pkg/httpc" -) - -// BucketClientService connects to Influx via HTTP using tokens to manage buckets -type BucketClientService struct { - Client *httpc.Client - // OpPrefix is an additional property for error - // find bucket service, when finds nothing. - OpPrefix string -} - -// FindBucketByName returns a single bucket by name -func (s *BucketClientService) FindBucketByName(ctx context.Context, orgID platform.ID, name string) (*influxdb.Bucket, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if name == "" { - return nil, &errors.Error{ - Code: errors.EUnprocessableEntity, - Op: s.OpPrefix + influxdb.OpFindBuckets, - Msg: "bucket name is required", - } - } - - bkts, n, err := s.FindBuckets(ctx, influxdb.BucketFilter{ - Name: &name, - OrganizationID: &orgID, - }) - if err != nil { - return nil, err - } - if n == 0 || len(bkts) == 0 { - return nil, &errors.Error{ - Code: errors.ENotFound, - Op: s.OpPrefix + influxdb.OpFindBucket, - Msg: fmt.Sprintf("bucket %q not found", name), - } - } - - return bkts[0], nil -} - -// FindBucketByID returns a single bucket by ID. -func (s *BucketClientService) FindBucketByID(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - // TODO(@jsteenb2): are tracing - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var br bucketResponse - err := s.Client. - Get(path.Join(prefixBuckets, id.String())). - DecodeJSON(&br). - Do(ctx) - if err != nil { - return nil, err - } - return br.toInfluxDB(), nil -} - -// FindBucket returns the first bucket that matches filter. -func (s *BucketClientService) FindBucket(ctx context.Context, filter influxdb.BucketFilter) (*influxdb.Bucket, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - bs, n, err := s.FindBuckets(ctx, filter) - if err != nil { - return nil, err - } - - if n == 0 && filter.Name != nil { - return nil, &errors.Error{ - Code: errors.ENotFound, - Op: s.OpPrefix + influxdb.OpFindBucket, - Msg: fmt.Sprintf("bucket %q not found", *filter.Name), - } - } else if n == 0 { - return nil, &errors.Error{ - Code: errors.ENotFound, - Op: s.OpPrefix + influxdb.OpFindBucket, - Msg: "bucket not found", - } - } - - return bs[0], nil -} - -// FindBuckets returns a list of buckets that match filter and the total count of matching buckets. -// Additional options provide pagination & sorting. -func (s *BucketClientService) FindBuckets(ctx context.Context, filter influxdb.BucketFilter, opt ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - params := influxdb.FindOptionParams(opt...) - if filter.OrganizationID != nil { - params = append(params, [2]string{"orgID", filter.OrganizationID.String()}) - } - if filter.Org != nil { - params = append(params, [2]string{"org", *filter.Org}) - } - if filter.ID != nil { - params = append(params, [2]string{"id", filter.ID.String()}) - } - if filter.Name != nil { - params = append(params, [2]string{"name", (*filter.Name)}) - } - - var bs bucketsResponse - err := s.Client. - Get(prefixBuckets). - QueryParams(params...). - DecodeJSON(&bs). - Do(ctx) - if err != nil { - return nil, 0, err - } - buckets := make([]*influxdb.Bucket, 0, len(bs.Buckets)) - for _, b := range bs.Buckets { - pb := b.bucket.toInfluxDB() - buckets = append(buckets, pb) - } - - return buckets, len(buckets), nil -} - -// CreateBucket creates a new bucket and sets b.ID with the new identifier. -func (s *BucketClientService) CreateBucket(ctx context.Context, b *influxdb.Bucket) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - var br bucketResponse - err := s.Client. - PostJSON(newBucket(b), prefixBuckets). - DecodeJSON(&br). - Do(ctx) - if err != nil { - return err - } - - pb := br.toInfluxDB() - *b = *pb - return nil -} - -// UpdateBucket updates a single bucket with changeset. -// Returns the new bucket state after update. -func (s *BucketClientService) UpdateBucket(ctx context.Context, id platform.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { - var br bucketResponse - err := s.Client. - PatchJSON(newBucketUpdate(&upd), path.Join(prefixBuckets, id.String())). - DecodeJSON(&br). - Do(ctx) - if err != nil { - return nil, err - } - return br.toInfluxDB(), nil -} - -// DeleteBucket removes a bucket by ID. -func (s *BucketClientService) DeleteBucket(ctx context.Context, id platform.ID) error { - return s.Client. - Delete(path.Join(prefixBuckets, id.String())). - Do(ctx) -} diff --git a/tenant/http_client_onboarding.go b/tenant/http_client_onboarding.go deleted file mode 100644 index 623fa9e28c1..00000000000 --- a/tenant/http_client_onboarding.go +++ /dev/null @@ -1,48 +0,0 @@ -package tenant - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/pkg/httpc" -) - -// OnboardClientService connects to Influx via HTTP to perform onboarding operations -type OnboardClientService struct { - Client *httpc.Client -} - -// IsOnboarding determine if onboarding request is allowed. -func (s *OnboardClientService) IsOnboarding(ctx context.Context) (bool, error) { - var resp isOnboardingResponse - err := s.Client. - Get(prefixOnboard). - DecodeJSON(&resp). - Do(ctx) - - if err != nil { - return false, err - } - return resp.Allowed, nil -} - -// OnboardInitialUser OnboardingResults. -func (s *OnboardClientService) OnboardInitialUser(ctx context.Context, or *influxdb.OnboardingRequest) (*influxdb.OnboardingResults, error) { - res := &onboardingResponse{} - - err := s.Client. - PostJSON(or, prefixOnboard). - DecodeJSON(res). - Do(ctx) - - if err != nil { - return nil, err - } - - return &influxdb.OnboardingResults{ - Org: &res.Organization.Organization, - User: &res.User.User, - Auth: res.Auth.toPlatform(), - Bucket: res.Bucket.toInfluxDB(), - }, nil -} diff --git a/tenant/http_client_org.go b/tenant/http_client_org.go deleted file mode 100644 index 846aa5ec6fc..00000000000 --- a/tenant/http_client_org.go +++ /dev/null @@ -1,152 +0,0 @@ -package tenant - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/pkg/httpc" -) - -// OrgClientService connects to Influx via HTTP using tokens to manage organizations -type OrgClientService struct { - Client *httpc.Client - // OpPrefix is for not found errors. - OpPrefix string -} - -func (o orgsResponse) toInfluxdb() []*influxdb.Organization { - orgs := make([]*influxdb.Organization, len(o.Organizations)) - for i := range o.Organizations { - orgs[i] = &o.Organizations[i].Organization - } - return orgs -} - -// FindOrganizationByID gets a single organization with a given id using HTTP. -func (s *OrgClientService) FindOrganizationByID(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - filter := influxdb.OrganizationFilter{ID: &id} - o, err := s.FindOrganization(ctx, filter) - if err != nil { - return nil, &errors.Error{ - Err: err, - Op: s.OpPrefix + influxdb.OpFindOrganizationByID, - } - } - return o, nil -} - -// FindOrganization gets a single organization matching the filter using HTTP. -func (s *OrgClientService) FindOrganization(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { - if filter.ID == nil && filter.Name == nil { - return nil, influxdb.ErrInvalidOrgFilter - } - os, n, err := s.FindOrganizations(ctx, filter) - if err != nil { - return nil, &errors.Error{ - Err: err, - Op: s.OpPrefix + influxdb.OpFindOrganization, - } - } - - if n == 0 { - return nil, &errors.Error{ - Code: errors.ENotFound, - Op: s.OpPrefix + influxdb.OpFindOrganization, - Msg: "organization not found", - } - } - - return os[0], nil -} - -// FindOrganizations returns all organizations that match the filter via HTTP. -func (s *OrgClientService) FindOrganizations(ctx context.Context, filter influxdb.OrganizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Organization, int, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - params := influxdb.FindOptionParams(opt...) - if filter.Name != nil { - span.LogKV("org", *filter.Name) - params = append(params, [2]string{"org", *filter.Name}) - } - if filter.ID != nil { - span.LogKV("org-id", *filter.ID) - params = append(params, [2]string{"orgID", filter.ID.String()}) - } - for _, o := range opt { - if o.Offset != 0 { - span.LogKV("offset", o.Offset) - } - span.LogKV("descending", o.Descending) - if o.Limit > 0 { - span.LogKV("limit", o.Limit) - } - if o.SortBy != "" { - span.LogKV("sortBy", o.SortBy) - } - } - - var os orgsResponse - err := s.Client. - Get(prefixOrganizations). - QueryParams(params...). - DecodeJSON(&os). - Do(ctx) - if err != nil { - return nil, 0, err - } - - orgs := os.toInfluxdb() - return orgs, len(orgs), nil -} - -// CreateOrganization creates an organization. -func (s *OrgClientService) CreateOrganization(ctx context.Context, o *influxdb.Organization) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if o.Name != "" { - span.LogKV("org", o.Name) - } - if o.ID != 0 { - span.LogKV("org-id", o.ID) - } - - return s.Client. - PostJSON(o, prefixOrganizations). - DecodeJSON(o). - Do(ctx) -} - -// UpdateOrganization updates the organization over HTTP. -func (s *OrgClientService) UpdateOrganization(ctx context.Context, id platform.ID, upd influxdb.OrganizationUpdate) (*influxdb.Organization, error) { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - span.LogKV("org-id", id) - span.LogKV("name", upd.Name) - - var o influxdb.Organization - err := s.Client. - PatchJSON(upd, prefixOrganizations, id.String()). - DecodeJSON(&o). - Do(ctx) - if err != nil { - return nil, tracing.LogError(span, err) - } - - return &o, nil -} - -// DeleteOrganization removes organization id over HTTP. -func (s *OrgClientService) DeleteOrganization(ctx context.Context, id platform.ID) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - return s.Client. - Delete(prefixOrganizations, id.String()). - Do(ctx) -} diff --git a/tenant/http_client_urm.go b/tenant/http_client_urm.go deleted file mode 100644 index 679972e1934..00000000000 --- a/tenant/http_client_urm.go +++ /dev/null @@ -1,128 +0,0 @@ -package tenant - -import ( - "context" - "path" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/pkg/httpc" -) - -type UserResourceMappingClient struct { - Client *httpc.Client -} - -// CreateUserResourceMapping will create a user resource mapping -func (s *UserResourceMappingClient) CreateUserResourceMapping(ctx context.Context, m *influxdb.UserResourceMapping) error { - if err := m.Validate(); err != nil { - return err - } - - urlPath := resourceIDPath(m.ResourceType, m.ResourceID, string(m.UserType)+"s") - return s.Client. - PostJSON(influxdb.User{ID: m.UserID}, urlPath). - DecodeJSON(m). - Do(ctx) -} - -// FindUserResourceMappings returns the user resource mappings -func (s *UserResourceMappingClient) FindUserResourceMappings(ctx context.Context, f influxdb.UserResourceMappingFilter, opt ...influxdb.FindOptions) ([]*influxdb.UserResourceMapping, int, error) { - var results resourceUsersResponse - err := s.Client. - Get(resourceIDPath(f.ResourceType, f.ResourceID, string(f.UserType)+"s")). - DecodeJSON(&results). - Do(ctx) - if err != nil { - return nil, 0, err - } - - urs := make([]*influxdb.UserResourceMapping, len(results.Users)) - for k, item := range results.Users { - urs[k] = &influxdb.UserResourceMapping{ - ResourceID: f.ResourceID, - ResourceType: f.ResourceType, - UserID: item.User.ID, - UserType: item.Role, - } - } - return urs, len(urs), nil -} - -// DeleteUserResourceMapping will delete user resource mapping based in criteria. -func (s *UserResourceMappingClient) DeleteUserResourceMapping(ctx context.Context, resourceID platform.ID, userID platform.ID) error { - urlPath := resourceIDUserPath(influxdb.OrgsResourceType, resourceID, influxdb.Member, userID) - return s.Client. - Delete(urlPath). - Do(ctx) -} - -// SpecificURMSvc returns a urm service with specific resource and user types. -// this will help us stay compatible with the existing service contract but also allow for urm deletes to go through the correct -// api -func (s *UserResourceMappingClient) SpecificURMSvc(rt influxdb.ResourceType, ut influxdb.UserType) *SpecificURMSvc { - return &SpecificURMSvc{ - Client: s.Client, - rt: rt, - ut: ut, - } -} - -// SpecificURMSvc is a URM client that speaks to a specific resource with a specified user type -type SpecificURMSvc struct { - Client *httpc.Client - rt influxdb.ResourceType - ut influxdb.UserType -} - -// FindUserResourceMappings returns the user resource mappings -func (s *SpecificURMSvc) FindUserResourceMappings(ctx context.Context, f influxdb.UserResourceMappingFilter, opt ...influxdb.FindOptions) ([]*influxdb.UserResourceMapping, int, error) { - var results resourceUsersResponse - err := s.Client. - Get(resourceIDPath(s.rt, f.ResourceID, string(s.ut)+"s")). - DecodeJSON(&results). - Do(ctx) - if err != nil { - return nil, 0, err - } - - urs := make([]*influxdb.UserResourceMapping, len(results.Users)) - for k, item := range results.Users { - urs[k] = &influxdb.UserResourceMapping{ - ResourceID: f.ResourceID, - ResourceType: f.ResourceType, - UserID: item.User.ID, - UserType: item.Role, - } - } - return urs, len(urs), nil -} - -// CreateUserResourceMapping will create a user resource mapping -func (s *SpecificURMSvc) CreateUserResourceMapping(ctx context.Context, m *influxdb.UserResourceMapping) error { - if err := m.Validate(); err != nil { - return err - } - - urlPath := resourceIDPath(s.rt, m.ResourceID, string(s.ut)+"s") - return s.Client. - PostJSON(influxdb.User{ID: m.UserID}, urlPath). - DecodeJSON(m). - Do(ctx) -} - -// DeleteUserResourceMapping will delete user resource mapping based in criteria. -func (s *SpecificURMSvc) DeleteUserResourceMapping(ctx context.Context, resourceID platform.ID, userID platform.ID) error { - urlPath := resourceIDUserPath(s.rt, resourceID, s.ut, userID) - return s.Client. - Delete(urlPath). - Do(ctx) -} - -func resourceIDPath(resourceType influxdb.ResourceType, resourceID platform.ID, p string) string { - return path.Join("/api/v2/", string(resourceType), resourceID.String(), p) -} - -func resourceIDUserPath(resourceType influxdb.ResourceType, resourceID platform.ID, userType influxdb.UserType, userID platform.ID) string { - return path.Join("/api/v2/", string(resourceType), resourceID.String(), string(userType)+"s", userID.String()) -} diff --git a/tenant/http_client_user.go b/tenant/http_client_user.go deleted file mode 100644 index 2127576a534..00000000000 --- a/tenant/http_client_user.go +++ /dev/null @@ -1,169 +0,0 @@ -package tenant - -import ( - "context" - "net/http" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - khttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/pkg/httpc" -) - -// UserService connects to Influx via HTTP using tokens to manage users -type UserClientService struct { - Client *httpc.Client - // OpPrefix is the ops of not found error. - OpPrefix string -} - -// FindMe returns user information about the owner of the token -func (s *UserClientService) FindMe(ctx context.Context, id platform.ID) (*influxdb.User, error) { - var res influxdb.UserResponse - err := s.Client. - Get(prefixMe). - DecodeJSON(&res). - Do(ctx) - if err != nil { - return nil, err - } - return &res.User, nil -} - -// FindUserByID returns a single user by ID. -func (s *UserClientService) FindUserByID(ctx context.Context, id platform.ID) (*influxdb.User, error) { - var res influxdb.UserResponse - err := s.Client. - Get(prefixUsers, id.String()). - DecodeJSON(&res). - Do(ctx) - if err != nil { - return nil, err - } - return &res.User, nil -} - -// FindUser returns the first user that matches filter. -func (s *UserClientService) FindUser(ctx context.Context, filter influxdb.UserFilter) (*influxdb.User, error) { - if filter.ID == nil && filter.Name == nil { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: "user not found", - } - } - users, n, err := s.FindUsers(ctx, filter) - if err != nil { - return nil, &errors.Error{ - Op: s.OpPrefix + influxdb.OpFindUser, - Err: err, - } - } - - if n == 0 { - return nil, &errors.Error{ - Code: errors.ENotFound, - Op: s.OpPrefix + influxdb.OpFindUser, - Msg: "no results found", - } - } - - return users[0], nil -} - -// FindUsers returns a list of users that match filter and the total count of matching users. -// Additional options provide pagination & sorting. -func (s *UserClientService) FindUsers(ctx context.Context, filter influxdb.UserFilter, opt ...influxdb.FindOptions) ([]*influxdb.User, int, error) { - params := influxdb.FindOptionParams(opt...) - if filter.ID != nil { - params = append(params, [2]string{"id", filter.ID.String()}) - } - if filter.Name != nil { - params = append(params, [2]string{"name", *filter.Name}) - } - - var r usersResponse - err := s.Client. - Get(prefixUsers). - QueryParams(params...). - DecodeJSON(&r). - Do(ctx) - if err != nil { - return nil, 0, err - } - - us := r.ToInfluxdb() - return us, len(us), nil -} - -// CreateUser creates a new user and sets u.ID with the new identifier. -func (s *UserClientService) CreateUser(ctx context.Context, u *influxdb.User) error { - return s.Client. - PostJSON(u, prefixUsers). - DecodeJSON(u). - Do(ctx) -} - -// UpdateUser updates a single user with changeset. -// Returns the new user state after update. -func (s *UserClientService) UpdateUser(ctx context.Context, id platform.ID, upd influxdb.UserUpdate) (*influxdb.User, error) { - var res influxdb.UserResponse - err := s.Client. - PatchJSON(upd, prefixUsers, id.String()). - DecodeJSON(&res). - Do(ctx) - if err != nil { - return nil, err - } - return &res.User, nil -} - -// DeleteUser removes a user by ID. -func (s *UserClientService) DeleteUser(ctx context.Context, id platform.ID) error { - return s.Client. - Delete(prefixUsers, id.String()). - StatusFn(func(resp *http.Response) error { - return khttp.CheckErrorStatus(http.StatusNoContent, resp) - }). - Do(ctx) -} - -// FindUserByID returns a single user by ID. -func (s *UserClientService) FindPermissionForUser(ctx context.Context, id platform.ID) (influxdb.PermissionSet, error) { - var ps influxdb.PermissionSet - err := s.Client. - Get(prefixUsers, id.String(), "permissions"). - DecodeJSON(&ps). - Do(ctx) - if err != nil { - return nil, err - } - return ps, nil -} - -// PasswordClientService is an http client to speak to the password service. -type PasswordClientService struct { - Client *httpc.Client -} - -var _ influxdb.PasswordsService = (*PasswordClientService)(nil) - -// SetPassword sets the user's password. -func (s *PasswordClientService) SetPassword(ctx context.Context, userID platform.ID, password string) error { - return s.Client. - PostJSON(passwordSetRequest{ - Password: password, - }, prefixUsers, userID.String(), "password"). - Do(ctx) -} - -// ComparePassword compares the user new password with existing. Note: is not implemented. -func (s *PasswordClientService) ComparePassword(ctx context.Context, userID platform.ID, password string) error { - panic("not implemented") -} - -// CompareAndSetPassword compares the old and new password and submits the new password if possible. -// Note: is not implemented. -func (s *PasswordClientService) CompareAndSetPassword(ctx context.Context, userID platform.ID, old string, new string) error { - panic("not implemented") -} diff --git a/tenant/http_handler_urm.go b/tenant/http_handler_urm.go deleted file mode 100644 index aff0cb43dd8..00000000000 --- a/tenant/http_handler_urm.go +++ /dev/null @@ -1,279 +0,0 @@ -package tenant - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "path" - - "github.com/go-chi/chi" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "go.uber.org/zap" -) - -type urmHandler struct { - log *zap.Logger - svc influxdb.UserResourceMappingService - userSvc influxdb.UserService - api *kithttp.API - - rt influxdb.ResourceType - idLookupKey string -} - -// NewURMHandler generates a mountable handler for URMs. It needs to know how it will be looking up your resource id -// this system assumes you are using chi syntax for query string params `/orgs/{id}/` so it can use chi.URLParam(). -func NewURMHandler(log *zap.Logger, rt influxdb.ResourceType, idLookupKey string, uSvc influxdb.UserService, urmSvc influxdb.UserResourceMappingService) http.Handler { - h := &urmHandler{ - log: log, - svc: urmSvc, - userSvc: uSvc, - api: kithttp.NewAPI(kithttp.WithLog(log)), - - rt: rt, - idLookupKey: idLookupKey, - } - - r := chi.NewRouter() - r.Get("/", h.getURMsByType) - r.Post("/", h.postURMByType) - r.Delete("/{userID}", h.deleteURM) - return r -} - -func (h *urmHandler) getURMsByType(w http.ResponseWriter, r *http.Request) { - userType := userTypeFromPath(r.URL.Path) - ctx := r.Context() - req, err := h.decodeGetRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - - filter := influxdb.UserResourceMappingFilter{ - ResourceID: req.ResourceID, - ResourceType: h.rt, - UserType: userType, - } - mappings, _, err := h.svc.FindUserResourceMappings(ctx, filter) - if err != nil { - h.api.Err(w, r, err) - return - } - - users := make([]*influxdb.User, 0, len(mappings)) - for _, m := range mappings { - if m.MappingType == influxdb.OrgMappingType { - continue - } - user, err := h.userSvc.FindUserByID(ctx, m.UserID) - if err != nil { - h.api.Err(w, r, err) - return - } - - users = append(users, user) - } - h.log.Debug("Members/owners retrieved", zap.String("users", fmt.Sprint(users))) - - h.api.Respond(w, r, http.StatusOK, newResourceUsersResponse(filter, users)) - -} - -type getRequest struct { - ResourceID platform.ID -} - -func (h *urmHandler) decodeGetRequest(ctx context.Context, r *http.Request) (*getRequest, error) { - id := chi.URLParam(r, h.idLookupKey) - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - - req := &getRequest{ - ResourceID: i, - } - - return req, nil -} - -func (h *urmHandler) postURMByType(w http.ResponseWriter, r *http.Request) { - userType := userTypeFromPath(r.URL.Path) - ctx := r.Context() - req, err := h.decodePostRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - - user, err := h.userSvc.FindUserByID(ctx, req.UserID) - if err != nil { - h.api.Err(w, r, err) - return - } - - mapping := &influxdb.UserResourceMapping{ - ResourceID: req.ResourceID, - ResourceType: h.rt, - UserID: req.UserID, - UserType: userType, - } - if err := h.svc.CreateUserResourceMapping(ctx, mapping); err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Member/owner created", zap.String("mapping", fmt.Sprint(mapping))) - - h.api.Respond(w, r, http.StatusCreated, newResourceUserResponse(user, userType)) -} - -type postRequest struct { - UserID platform.ID - ResourceID platform.ID -} - -func (h urmHandler) decodePostRequest(ctx context.Context, r *http.Request) (*postRequest, error) { - id := chi.URLParam(r, h.idLookupKey) - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var rid platform.ID - if err := rid.DecodeFromString(id); err != nil { - return nil, err - } - - u := &influxdb.User{} - if err := json.NewDecoder(r.Body).Decode(u); err != nil { - return nil, err - } - - if !u.ID.Valid() { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "user id missing or invalid", - } - } - - return &postRequest{ - UserID: u.ID, - ResourceID: rid, - }, nil -} - -func (h *urmHandler) deleteURM(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := h.decodeDeleteRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - - if err := h.svc.DeleteUserResourceMapping(ctx, req.resourceID, req.userID); err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Member deleted", zap.String("resourceID", req.resourceID.String()), zap.String("memberID", req.userID.String())) - - w.WriteHeader(http.StatusNoContent) -} - -type deleteRequest struct { - userID platform.ID - resourceID platform.ID -} - -func (h *urmHandler) decodeDeleteRequest(ctx context.Context, r *http.Request) (*deleteRequest, error) { - id := chi.URLParam(r, h.idLookupKey) - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var rid platform.ID - if err := rid.DecodeFromString(id); err != nil { - return nil, err - } - - id = chi.URLParam(r, "userID") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing member id", - } - } - - var uid platform.ID - if err := uid.DecodeFromString(id); err != nil { - return nil, err - } - - return &deleteRequest{ - userID: uid, - resourceID: rid, - }, nil -} - -type resourceUserResponse struct { - Role influxdb.UserType `json:"role"` - *influxdb.UserResponse -} - -func newResourceUserResponse(u *influxdb.User, userType influxdb.UserType) *resourceUserResponse { - return &resourceUserResponse{ - Role: userType, - UserResponse: newUserResponse(u), - } -} - -type resourceUsersResponse struct { - Links map[string]string `json:"links"` - Users []*resourceUserResponse `json:"users"` -} - -func newResourceUsersResponse(f influxdb.UserResourceMappingFilter, users []*influxdb.User) *resourceUsersResponse { - rs := resourceUsersResponse{ - Links: map[string]string{ - "self": fmt.Sprintf("/api/v2/%s/%s/%ss", f.ResourceType, f.ResourceID, f.UserType), - }, - Users: make([]*resourceUserResponse, 0, len(users)), - } - - for _, user := range users { - rs.Users = append(rs.Users, newResourceUserResponse(user, f.UserType)) - } - return &rs -} - -// determine the type of request from the path. -func userTypeFromPath(p string) influxdb.UserType { - if p == "" { - return influxdb.Member - } - - switch path.Base(p) { - case "members": - return influxdb.Member - case "owners": - return influxdb.Owner - default: - return userTypeFromPath(path.Dir(p)) - } -} diff --git a/tenant/http_handler_urm_test.go b/tenant/http_handler_urm_test.go deleted file mode 100644 index 36040dadd23..00000000000 --- a/tenant/http_handler_urm_test.go +++ /dev/null @@ -1,494 +0,0 @@ -package tenant_test - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/go-chi/chi" - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - ihttp "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/tenant" - itesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func TestUserResourceMappingService_GetMembersHandler(t *testing.T) { - type fields struct { - userService influxdb.UserService - userResourceMappingService influxdb.UserResourceMappingService - } - type args struct { - resourceID string - userType influxdb.UserType - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get members", - fields: fields{ - userService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ID: id, Name: fmt.Sprintf("user%s", id), Status: influxdb.Active}, nil - }, - }, - userResourceMappingService: &mock.UserResourceMappingService{ - FindMappingsFn: func(ctx context.Context, filter influxdb.UserResourceMappingFilter) ([]*influxdb.UserResourceMapping, int, error) { - ms := []*influxdb.UserResourceMapping{ - { - ResourceID: filter.ResourceID, - ResourceType: filter.ResourceType, - UserType: filter.UserType, - UserID: 1, - }, - { - ResourceID: filter.ResourceID, - ResourceType: filter.ResourceType, - UserType: filter.UserType, - UserID: 2, - }, - } - return ms, len(ms), nil - }, - }, - }, - args: args{ - resourceID: "0000000000000099", - userType: influxdb.Member, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: `{ - "links": { - "self": "/api/v2/%s/0000000000000099/members" - }, - "users": [ - { - "role": "member", - "links": { - "self": "/api/v2/users/0000000000000001" - }, - "id": "0000000000000001", - "name": "user0000000000000001", - "status": "active" - }, - { - "role": "member", - "links": { - "self": "/api/v2/users/0000000000000002" - }, - "id": "0000000000000002", - "name": "user0000000000000002", - "status": "active" - } - ] -}`, - }, - }, - - { - name: "get owners", - fields: fields{ - userService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ID: id, Name: fmt.Sprintf("user%s", id), Status: influxdb.Active}, nil - }, - }, - userResourceMappingService: &mock.UserResourceMappingService{ - FindMappingsFn: func(ctx context.Context, filter influxdb.UserResourceMappingFilter) ([]*influxdb.UserResourceMapping, int, error) { - ms := []*influxdb.UserResourceMapping{ - { - ResourceID: filter.ResourceID, - ResourceType: filter.ResourceType, - UserType: filter.UserType, - UserID: 1, - }, - { - ResourceID: filter.ResourceID, - ResourceType: filter.ResourceType, - UserType: filter.UserType, - UserID: 2, - }, - } - return ms, len(ms), nil - }, - }, - }, - args: args{ - resourceID: "0000000000000099", - userType: influxdb.Owner, - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: `{ - "links": { - "self": "/api/v2/%s/0000000000000099/owners" - }, - "users": [ - { - "role": "owner", - "links": { - "self": "/api/v2/users/0000000000000001" - }, - "id": "0000000000000001", - "name": "user0000000000000001", - "status": "active" - }, - { - "role": "owner", - "links": { - "self": "/api/v2/users/0000000000000002" - }, - "id": "0000000000000002", - "name": "user0000000000000002", - "status": "active" - } - ] -}`, - }, - }, - } - - for _, tt := range tests { - resourceTypes := []influxdb.ResourceType{ - influxdb.BucketsResourceType, - influxdb.DashboardsResourceType, - influxdb.OrgsResourceType, - influxdb.SourcesResourceType, - influxdb.TasksResourceType, - influxdb.TelegrafsResourceType, - influxdb.UsersResourceType, - } - - for _, resourceType := range resourceTypes { - t.Run(tt.name+"_"+string(resourceType), func(t *testing.T) { - // create server - h := tenant.NewURMHandler(zaptest.NewLogger(t), resourceType, "id", tt.fields.userService, tt.fields.userResourceMappingService) - router := chi.NewRouter() - router.Mount(fmt.Sprintf("/api/v2/%s/{id}/members", resourceType), h) - router.Mount(fmt.Sprintf("/api/v2/%s/{id}/owners", resourceType), h) - s := httptest.NewServer(router) - defer s.Close() - - // craft request - r, err := http.NewRequest("GET", fmt.Sprintf("%s/api/v2/%s/%s/%ss", s.URL, resourceType, tt.args.resourceID, tt.args.userType), nil) - if err != nil { - t.Fatal(err) - } - - c := s.Client() - res, err := c.Do(r) - if err != nil { - t.Fatal(err) - } - // check response - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. GetMembersHandler() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. GetMembersHandler() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if diff := cmp.Diff(string(body), fmt.Sprintf(tt.wants.body, resourceType)); tt.wants.body != "" && diff != "" { - t.Errorf("%q. GetMembersHandler() = ***%s***", tt.name, diff) - } - }) - } - } -} - -func TestUserResourceMappingService_PostMembersHandler(t *testing.T) { - type fields struct { - userService influxdb.UserService - userResourceMappingService influxdb.UserResourceMappingService - } - type args struct { - resourceID string - userType influxdb.UserType - user influxdb.User - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "post members", - fields: fields{ - userService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ID: id, Name: fmt.Sprintf("user%s", id), Status: influxdb.Active}, nil - }, - }, - userResourceMappingService: &mock.UserResourceMappingService{ - CreateMappingFn: func(ctx context.Context, m *influxdb.UserResourceMapping) error { - return nil - }, - }, - }, - args: args{ - resourceID: "0000000000000099", - user: influxdb.User{ - ID: 1, - Name: "user0000000000000001", - Status: influxdb.Active, - }, - userType: influxdb.Member, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: `{ - "role": "member", - "links": { - "self": "/api/v2/users/0000000000000001" - }, - "id": "0000000000000001", - "name": "user0000000000000001", - "status": "active" -}`, - }, - }, - - { - name: "post owners", - fields: fields{ - userService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ID: id, Name: fmt.Sprintf("user%s", id), Status: influxdb.Active}, nil - }, - }, - userResourceMappingService: &mock.UserResourceMappingService{ - CreateMappingFn: func(ctx context.Context, m *influxdb.UserResourceMapping) error { - return nil - }, - }, - }, - args: args{ - resourceID: "0000000000000099", - user: influxdb.User{ - ID: 2, - Name: "user0000000000000002", - Status: influxdb.Active, - }, - userType: influxdb.Owner, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: `{ - "role": "owner", - "links": { - "self": "/api/v2/users/0000000000000002" - }, - "id": "0000000000000002", - "name": "user0000000000000002", - "status": "active" -}`, - }, - }, - } - - for _, tt := range tests { - resourceTypes := []influxdb.ResourceType{ - influxdb.BucketsResourceType, - influxdb.DashboardsResourceType, - influxdb.OrgsResourceType, - influxdb.SourcesResourceType, - influxdb.TasksResourceType, - influxdb.TelegrafsResourceType, - influxdb.UsersResourceType, - } - - for _, resourceType := range resourceTypes { - t.Run(tt.name+"_"+string(resourceType), func(t *testing.T) { - // create server - h := tenant.NewURMHandler(zaptest.NewLogger(t), resourceType, "id", tt.fields.userService, tt.fields.userResourceMappingService) - router := chi.NewRouter() - router.Mount(fmt.Sprintf("/api/v2/%s/{id}/members", resourceType), h) - router.Mount(fmt.Sprintf("/api/v2/%s/{id}/owners", resourceType), h) - s := httptest.NewServer(router) - defer s.Close() - - // craft request - b, err := json.Marshal(tt.args.user) - if err != nil { - t.Fatalf("failed to unmarshal user: %v", err) - } - - r, err := http.NewRequest("POST", fmt.Sprintf("%s/api/v2/%s/%s/%ss", s.URL, resourceType, tt.args.resourceID, tt.args.userType), bytes.NewReader(b)) - if err != nil { - t.Fatal(err) - } - - c := s.Client() - res, err := c.Do(r) - if err != nil { - t.Fatal(err) - } - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. PostMembersHandler() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. PostMembersHandler() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if diff := cmp.Diff(string(body), tt.wants.body); diff != "" { - t.Errorf("%q. PostMembersHandler() = ***%s***", tt.name, diff) - } - } - }) - } - } -} - -func TestUserResourceMappingService_Client(t *testing.T) { - type fields struct { - userService influxdb.UserService - userResourceMappingService influxdb.UserResourceMappingService - } - type args struct { - resourceID string - userType influxdb.UserType - user influxdb.User - } - tests := []struct { - name string - fields fields - args args - }{ - { - name: "post members", - fields: fields{ - userService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ID: id, Name: fmt.Sprintf("user%s", id), Status: influxdb.Active}, nil - }, - }, - userResourceMappingService: &mock.UserResourceMappingService{ - CreateMappingFn: func(ctx context.Context, m *influxdb.UserResourceMapping) error { - return nil - }, - FindMappingsFn: func(ctx context.Context, f influxdb.UserResourceMappingFilter) ([]*influxdb.UserResourceMapping, int, error) { - return []*influxdb.UserResourceMapping{&influxdb.UserResourceMapping{}}, 1, nil - }, - }, - }, - args: args{ - resourceID: "0000000000000099", - user: influxdb.User{ - ID: 1, - Name: "user0000000000000001", - Status: influxdb.Active, - }, - userType: influxdb.Member, - }, - }, - - { - name: "post owners", - fields: fields{ - userService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ID: id, Name: fmt.Sprintf("user%s", id), Status: influxdb.Active}, nil - }, - }, - userResourceMappingService: &mock.UserResourceMappingService{ - CreateMappingFn: func(ctx context.Context, m *influxdb.UserResourceMapping) error { - return nil - }, - FindMappingsFn: func(ctx context.Context, f influxdb.UserResourceMappingFilter) ([]*influxdb.UserResourceMapping, int, error) { - return []*influxdb.UserResourceMapping{&influxdb.UserResourceMapping{}}, 1, nil - }, - }, - }, - args: args{ - resourceID: "0000000000000099", - user: influxdb.User{ - ID: 2, - Name: "user0000000000000002", - Status: influxdb.Active, - }, - userType: influxdb.Owner, - }, - }, - } - - for _, tt := range tests { - resourceTypes := []influxdb.ResourceType{ - influxdb.BucketsResourceType, - influxdb.DashboardsResourceType, - influxdb.OrgsResourceType, - influxdb.SourcesResourceType, - influxdb.TasksResourceType, - influxdb.TelegrafsResourceType, - influxdb.UsersResourceType, - } - - for _, resourceType := range resourceTypes { - t.Run(tt.name+"_"+string(resourceType), func(t *testing.T) { - // create server - h := tenant.NewURMHandler(zaptest.NewLogger(t), resourceType, "id", tt.fields.userService, tt.fields.userResourceMappingService) - router := chi.NewRouter() - router.Mount(fmt.Sprintf("/api/v2/%s/{id}/members", resourceType), h) - router.Mount(fmt.Sprintf("/api/v2/%s/{id}/owners", resourceType), h) - s := httptest.NewServer(router) - defer s.Close() - ctx := context.Background() - - resourceID := itesting.MustIDBase16(tt.args.resourceID) - urm := &influxdb.UserResourceMapping{ResourceType: resourceType, ResourceID: resourceID, UserType: tt.args.userType, UserID: tt.args.user.ID} - - httpClient, err := ihttp.NewHTTPClient(s.URL, "", false) - if err != nil { - t.Fatal(err) - } - c := tenant.UserResourceMappingClient{Client: httpClient} - err = c.CreateUserResourceMapping(ctx, urm) - - if err != nil { - t.Fatal(err) - } - - _, n, err := c.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{ResourceID: resourceID, ResourceType: resourceType, UserType: tt.args.userType}) - if err != nil { - t.Fatal(err) - } - if n != 1 { - t.Fatalf("expected 1 urm to be created, got: %d", n) - } - }) - } - } -} diff --git a/tenant/http_server_bucket.go b/tenant/http_server_bucket.go deleted file mode 100644 index 960410c2d42..00000000000 --- a/tenant/http_server_bucket.go +++ /dev/null @@ -1,525 +0,0 @@ -package tenant - -import ( - "context" - "fmt" - "net/http" - "time" - - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "go.uber.org/zap" -) - -// BucketHandler represents an HTTP API handler for users. -type BucketHandler struct { - chi.Router - api *kithttp.API - log *zap.Logger - bucketSvc influxdb.BucketService - labelSvc influxdb.LabelService // we may need this for now but we dont want it permanently -} - -const ( - prefixBuckets = "/api/v2/buckets" -) - -// NewHTTPBucketHandler constructs a new http server. -func NewHTTPBucketHandler(log *zap.Logger, bucketSvc influxdb.BucketService, labelSvc influxdb.LabelService, urmHandler, labelHandler http.Handler) *BucketHandler { - svr := &BucketHandler{ - api: kithttp.NewAPI(kithttp.WithLog(log)), - log: log, - bucketSvc: bucketSvc, - labelSvc: labelSvc, - } - - r := chi.NewRouter() - r.Use( - middleware.Recoverer, - middleware.RequestID, - middleware.RealIP, - ) - - // RESTy routes for "articles" resource - r.Route("/", func(r chi.Router) { - r.Post("/", svr.handlePostBucket) - r.Get("/", svr.handleGetBuckets) - - r.Route("/{id}", func(r chi.Router) { - r.Get("/", svr.handleGetBucket) - r.Patch("/", svr.handlePatchBucket) - r.Delete("/", svr.handleDeleteBucket) - - // mount embedded resources - mountableRouter := r.With(kithttp.ValidResource(svr.api, svr.lookupOrgByBucketID)) - mountableRouter.Mount("/members", urmHandler) - mountableRouter.Mount("/owners", urmHandler) - mountableRouter.Mount("/labels", labelHandler) - }) - }) - - svr.Router = r - return svr -} - -func (h *BucketHandler) Prefix() string { - return prefixBuckets -} - -// bucket is used for serialization/deserialization with duration string syntax. -type bucket struct { - ID platform.ID `json:"id,omitempty"` - OrgID platform.ID `json:"orgID,omitempty"` - Type string `json:"type"` - Description string `json:"description,omitempty"` - Name string `json:"name"` - RetentionPolicyName string `json:"rp,omitempty"` // This to support v1 sources - RetentionRules []retentionRule `json:"retentionRules"` - influxdb.CRUDLog -} - -// retentionRule is the retention rule action for a bucket. -type retentionRule struct { - Type string `json:"type"` - EverySeconds int64 `json:"everySeconds"` - ShardGroupDurationSeconds int64 `json:"shardGroupDurationSeconds"` -} - -func (b *bucket) toInfluxDB() *influxdb.Bucket { - if b == nil { - return nil - } - - var rpDuration time.Duration // zero value implies infinite retention policy - var sgDuration time.Duration // zero value implies the server should pick a value - - // Only support a single retention period for the moment - if len(b.RetentionRules) > 0 { - rpDuration = time.Duration(b.RetentionRules[0].EverySeconds) * time.Second - sgDuration = time.Duration(b.RetentionRules[0].ShardGroupDurationSeconds) * time.Second - } - - return &influxdb.Bucket{ - ID: b.ID, - OrgID: b.OrgID, - Type: influxdb.ParseBucketType(b.Type), - Description: b.Description, - Name: b.Name, - RetentionPolicyName: b.RetentionPolicyName, - RetentionPeriod: rpDuration, - ShardGroupDuration: sgDuration, - CRUDLog: b.CRUDLog, - } -} - -func newBucket(pb *influxdb.Bucket) *bucket { - if pb == nil { - return nil - } - - bkt := bucket{ - ID: pb.ID, - OrgID: pb.OrgID, - Type: pb.Type.String(), - Name: pb.Name, - Description: pb.Description, - RetentionPolicyName: pb.RetentionPolicyName, - RetentionRules: []retentionRule{}, - CRUDLog: pb.CRUDLog, - } - - // Only append a retention rule if the user wants to explicitly set - // a parameter on the rule. - // - // This is for backwards-compatibility with older versions of the API, - // which didn't support setting shard-group durations and used an empty - // array of rules to represent infinite retention. - if pb.RetentionPeriod > 0 || pb.ShardGroupDuration > 0 { - bkt.RetentionRules = append(bkt.RetentionRules, retentionRule{ - Type: "expire", - EverySeconds: int64(pb.RetentionPeriod.Round(time.Second) / time.Second), - ShardGroupDurationSeconds: int64(pb.ShardGroupDuration.Round(time.Second) / time.Second), - }) - } - - return &bkt -} - -type retentionRuleUpdate struct { - Type string `json:"type"` - EverySeconds *int64 `json:"everySeconds"` - ShardGroupDurationSeconds *int64 `json:"shardGroupDurationSeconds"` -} - -// bucketUpdate is used for serialization/deserialization with retention rules. -type bucketUpdate struct { - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - RetentionRules []retentionRuleUpdate `json:"retentionRules,omitempty"` -} - -func (b *bucketUpdate) OK() error { - if len(b.RetentionRules) > 1 { - return &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: "buckets cannot have more than one retention rule at this time", - } - } - - if len(b.RetentionRules) > 0 { - rule := b.RetentionRules[0] - if rule.EverySeconds != nil && *rule.EverySeconds < 0 { - return &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: "expiration seconds cannot be negative", - } - } - if rule.ShardGroupDurationSeconds != nil && *rule.ShardGroupDurationSeconds < 0 { - return &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: "shard-group duration seconds cannot be negative", - } - } - } - - return nil -} - -func (b *bucketUpdate) toInfluxDB() *influxdb.BucketUpdate { - if b == nil { - return nil - } - - upd := influxdb.BucketUpdate{ - Name: b.Name, - Description: b.Description, - } - - // For now, only use a single retention rule. - if len(b.RetentionRules) > 0 { - rule := b.RetentionRules[0] - if rule.EverySeconds != nil { - rp := time.Duration(*rule.EverySeconds) * time.Second - upd.RetentionPeriod = &rp - } - if rule.ShardGroupDurationSeconds != nil { - sgd := time.Duration(*rule.ShardGroupDurationSeconds) * time.Second - upd.ShardGroupDuration = &sgd - } - } - - return &upd -} - -func newBucketUpdate(pb *influxdb.BucketUpdate) *bucketUpdate { - if pb == nil { - return nil - } - - up := &bucketUpdate{ - Name: pb.Name, - Description: pb.Description, - RetentionRules: []retentionRuleUpdate{}, - } - - if pb.RetentionPeriod == nil && pb.ShardGroupDuration == nil { - return up - } - - rule := retentionRuleUpdate{Type: "expire"} - - if pb.RetentionPeriod != nil { - rp := int64((*pb.RetentionPeriod).Round(time.Second) / time.Second) - rule.EverySeconds = &rp - } - if pb.ShardGroupDuration != nil { - sgd := int64((*pb.ShardGroupDuration).Round(time.Second) / time.Second) - rule.ShardGroupDurationSeconds = &sgd - } - - up.RetentionRules = append(up.RetentionRules, rule) - return up -} - -type bucketResponse struct { - bucket - Links map[string]string `json:"links"` - Labels []influxdb.Label `json:"labels"` -} - -func NewBucketResponse(b *influxdb.Bucket, labels ...*influxdb.Label) *bucketResponse { - res := &bucketResponse{ - Links: map[string]string{ - "self": fmt.Sprintf("/api/v2/buckets/%s", b.ID), - "org": fmt.Sprintf("/api/v2/orgs/%s", b.OrgID), - "members": fmt.Sprintf("/api/v2/buckets/%s/members", b.ID), - "owners": fmt.Sprintf("/api/v2/buckets/%s/owners", b.ID), - "labels": fmt.Sprintf("/api/v2/buckets/%s/labels", b.ID), - "write": fmt.Sprintf("/api/v2/write?org=%s&bucket=%s", b.OrgID, b.ID), - }, - bucket: *newBucket(b), - Labels: []influxdb.Label{}, - } - for _, l := range labels { - res.Labels = append(res.Labels, *l) - } - - return res -} - -type bucketsResponse struct { - Links *influxdb.PagingLinks `json:"links"` - Buckets []*bucketResponse `json:"buckets"` -} - -func newBucketsResponse(ctx context.Context, opts influxdb.FindOptions, f influxdb.BucketFilter, bs []*influxdb.Bucket, labelSvc influxdb.LabelService) *bucketsResponse { - rs := make([]*bucketResponse, 0, len(bs)) - for _, b := range bs { - var labels []*influxdb.Label - if labelSvc != nil { // allow for no label svc - labels, _ = labelSvc.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: b.ID, ResourceType: influxdb.BucketsResourceType}) - } - rs = append(rs, NewBucketResponse(b, labels...)) - } - return &bucketsResponse{ - Links: influxdb.NewPagingLinks(prefixBuckets, opts, f, len(bs)), - Buckets: rs, - } -} - -// handlePostBucket is the HTTP handler for the POST /api/v2/buckets route. -func (h *BucketHandler) handlePostBucket(w http.ResponseWriter, r *http.Request) { - var b postBucketRequest - if err := h.api.DecodeJSON(r.Body, &b); err != nil { - h.api.Err(w, r, err) - return - } - if err := b.OK(); err != nil { - h.api.Err(w, r, err) - return - } - - bucket := b.toInfluxDB() - - if err := h.bucketSvc.CreateBucket(r.Context(), bucket); err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Bucket created", zap.String("bucket", fmt.Sprint(bucket))) - - h.api.Respond(w, r, http.StatusCreated, NewBucketResponse(bucket)) -} - -type postBucketRequest struct { - OrgID platform.ID `json:"orgID,omitempty"` - Name string `json:"name"` - Description string `json:"description"` - RetentionPolicyName string `json:"rp,omitempty"` // This to support v1 sources - RetentionRules []retentionRule `json:"retentionRules"` -} - -func (b *postBucketRequest) OK() error { - if !b.OrgID.Valid() { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "organization id must be provided", - } - } - - if len(b.RetentionRules) > 1 { - return &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: "buckets cannot have more than one retention rule at this time", - } - } - - if len(b.RetentionRules) > 0 { - rule := b.RetentionRules[0] - - if rule.EverySeconds < 0 { - return &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: "expiration seconds cannot be negative", - } - } - if rule.ShardGroupDurationSeconds < 0 { - return &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: "shard-group duration seconds cannot be negative", - } - } - } - - return nil -} - -func (b postBucketRequest) toInfluxDB() *influxdb.Bucket { - // Only support a single retention period for the moment - var rpDur time.Duration - var sgDur time.Duration - if len(b.RetentionRules) > 0 { - rule := b.RetentionRules[0] - rpDur = time.Duration(rule.EverySeconds) * time.Second - sgDur = time.Duration(rule.ShardGroupDurationSeconds) * time.Second - } - - return &influxdb.Bucket{ - OrgID: b.OrgID, - Description: b.Description, - Name: b.Name, - Type: influxdb.BucketTypeUser, - RetentionPolicyName: b.RetentionPolicyName, - RetentionPeriod: rpDur, - ShardGroupDuration: sgDur, - } -} - -// handleGetBucket is the HTTP handler for the GET /api/v2/buckets/:id route. -func (h *BucketHandler) handleGetBucket(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, err) - return - } - - b, err := h.bucketSvc.FindBucketByID(ctx, *id) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.log.Debug("Bucket retrieved", zap.String("bucket", fmt.Sprint(b))) - var labels []*influxdb.Label - if h.labelSvc != nil { // allow for no label svc - labels, _ = h.labelSvc.FindResourceLabels(ctx, influxdb.LabelMappingFilter{ResourceID: b.ID, ResourceType: influxdb.BucketsResourceType}) - } - - h.api.Respond(w, r, http.StatusOK, NewBucketResponse(b, labels...)) -} - -// handleDeleteBucket is the HTTP handler for the DELETE /api/v2/buckets/:id route. -func (h *BucketHandler) handleDeleteBucket(w http.ResponseWriter, r *http.Request) { - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, err) - return - } - - if err := h.bucketSvc.DeleteBucket(r.Context(), *id); err != nil { - h.api.Err(w, r, err) - return - } - - h.log.Debug("Bucket deleted", zap.String("bucketID", id.String())) - - h.api.Respond(w, r, http.StatusNoContent, nil) -} - -// handleGetBuckets is the HTTP handler for the GET /api/v2/buckets route. -func (h *BucketHandler) handleGetBuckets(w http.ResponseWriter, r *http.Request) { - bucketsRequest, err := decodeGetBucketsRequest(r) - if err != nil { - h.api.Err(w, r, err) - return - } - - bs, _, err := h.bucketSvc.FindBuckets(r.Context(), bucketsRequest.filter, bucketsRequest.opts) - if err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Buckets retrieved", zap.String("buckets", fmt.Sprint(bs))) - - h.api.Respond(w, r, http.StatusOK, newBucketsResponse(r.Context(), bucketsRequest.opts, bucketsRequest.filter, bs, h.labelSvc)) -} - -type getBucketsRequest struct { - filter influxdb.BucketFilter - opts influxdb.FindOptions -} - -func decodeGetBucketsRequest(r *http.Request) (*getBucketsRequest, error) { - qp := r.URL.Query() - req := &getBucketsRequest{} - - opts, err := influxdb.DecodeFindOptions(r) - if err != nil { - return nil, err - } - - req.opts = *opts - - if orgID := qp.Get("orgID"); orgID != "" { - id, err := platform.IDFromString(orgID) - if err != nil { - return nil, err - } - req.filter.OrganizationID = id - } - - if org := qp.Get("org"); org != "" { - req.filter.Org = &org - } - - if name := qp.Get("name"); name != "" { - req.filter.Name = &name - } - - if bucketID := qp.Get("id"); bucketID != "" { - id, err := platform.IDFromString(bucketID) - if err != nil { - return nil, err - } - req.filter.ID = id - } - - return req, nil -} - -// handlePatchBucket is the HTTP handler for the PATCH /api/v2/buckets route. -func (h *BucketHandler) handlePatchBucket(w http.ResponseWriter, r *http.Request) { - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, err) - return - } - - var reqBody bucketUpdate - if err := h.api.DecodeJSON(r.Body, &reqBody); err != nil { - h.api.Err(w, r, err) - return - } - - if reqBody.Name != nil { - b, err := h.bucketSvc.FindBucketByID(r.Context(), *id) - if err != nil { - h.api.Err(w, r, err) - return - } - b.Name = *reqBody.Name - } - - b, err := h.bucketSvc.UpdateBucket(r.Context(), *id, *reqBody.toInfluxDB()) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.log.Debug("Bucket updated", zap.String("bucket", fmt.Sprint(b))) - - h.api.Respond(w, r, http.StatusOK, NewBucketResponse(b)) -} - -func (h *BucketHandler) lookupOrgByBucketID(ctx context.Context, id platform.ID) (platform.ID, error) { - b, err := h.bucketSvc.FindBucketByID(ctx, id) - if err != nil { - return 0, err - } - return b.OrgID, nil -} diff --git a/tenant/http_server_bucket_test.go b/tenant/http_server_bucket_test.go deleted file mode 100644 index 5cf8628d7ea..00000000000 --- a/tenant/http_server_bucket_test.go +++ /dev/null @@ -1,210 +0,0 @@ -package tenant_test - -import ( - "context" - "net/http/httptest" - "testing" - "time" - - "github.com/dustin/go-humanize" - "github.com/go-chi/chi" - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - ihttp "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/tenant" - itesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func initBucketHttpService(f itesting.BucketFields, t *testing.T) (influxdb.BucketService, string, func()) { - t.Helper() - - s := itesting.NewTestInmemStore(t) - - store := tenant.NewStore(s) - if f.IDGenerator != nil { - store.IDGen = f.IDGenerator - } - - if f.OrgIDs != nil { - store.OrgIDGen = f.OrgIDs - } - - if f.BucketIDs != nil { - store.BucketIDGen = f.BucketIDs - } - - ctx := context.Background() - - // go direct to storage for test data - if err := s.Update(ctx, func(tx kv.Tx) error { - for _, o := range f.Organizations { - if err := store.CreateOrg(tx.Context(), tx, o); err != nil { - return err - } - } - - for _, b := range f.Buckets { - if err := store.CreateBucket(tx.Context(), tx, b); err != nil { - return err - } - } - - return nil - }); err != nil { - t.Fatalf("failed to seed data: %s", err) - } - - handler := tenant.NewHTTPBucketHandler(zaptest.NewLogger(t), tenant.NewService(store), nil, nil, nil) - r := chi.NewRouter() - r.Mount(handler.Prefix(), handler) - server := httptest.NewServer(r) - httpClient, err := ihttp.NewHTTPClient(server.URL, "", false) - if err != nil { - t.Fatal(err) - } - - client := tenant.BucketClientService{ - Client: httpClient, - } - - return &client, "http_tenant", server.Close -} - -func TestHTTPBucketService(t *testing.T) { - itesting.BucketService(initBucketHttpService, t) -} - -const idOne = platform.ID(iota + 1) - -func TestHTTPBucketService_InvalidRetention(t *testing.T) { - type args struct { - name string - id platform.ID - retention int - shardDuration int - description *string - } - type wants struct { - err error - bucket *influxdb.Bucket - } - - tests := []struct { - name string - fields itesting.BucketFields - args args - wants wants - }{ - { - name: "update with negative retention", - fields: itesting.BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "bucket1", - RetentionPeriod: humanize.Day, - ShardGroupDuration: time.Hour, - }, - { - // ID(2) - OrgID: idOne, - Name: "bucket2", - }, - }, - }, - args: args{ - id: idOne, - retention: -1, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: "expiration seconds cannot be negative", - }, - }, - }, - { - name: "update with negative shard-group duration", - fields: itesting.BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "bucket1", - RetentionPeriod: humanize.Day, - ShardGroupDuration: time.Hour, - }, - { - // ID(2) - OrgID: idOne, - Name: "bucket2", - }, - }, - }, - args: args{ - id: idOne, - shardDuration: -1, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: "shard-group duration seconds cannot be negative", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, done := initBucketHttpService(tt.fields, t) - defer done() - ctx := context.Background() - - upd := influxdb.BucketUpdate{} - if tt.args.name != "" { - upd.Name = &tt.args.name - } - if tt.args.retention != 0 { - d := time.Duration(tt.args.retention) * time.Minute - upd.RetentionPeriod = &d - } - if tt.args.shardDuration != 0 { - d := time.Duration(tt.args.shardDuration) * time.Minute - upd.ShardGroupDuration = &d - } - - upd.Description = tt.args.description - - bucket, err := s.UpdateBucket(ctx, tt.args.id, upd) - itesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(bucket, tt.wants.bucket, bucketCmpOptions...); diff != "" { - t.Errorf("bucket is different -got/+want\ndiff %s", diff) - } - }) - } -} diff --git a/tenant/http_server_onboarding.go b/tenant/http_server_onboarding.go deleted file mode 100644 index b6dce718d96..00000000000 --- a/tenant/http_server_onboarding.go +++ /dev/null @@ -1,145 +0,0 @@ -package tenant - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" - "github.com/influxdata/influxdb/v2" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "go.uber.org/zap" -) - -// OnboardHandler represents an HTTP API handler for users. -type OnboardHandler struct { - chi.Router - api *kithttp.API - log *zap.Logger - onboardingSvc influxdb.OnboardingService -} - -const ( - prefixOnboard = "/api/v2/setup" -) - -// NewHTTPOnboardHandler constructs a new http server. -func NewHTTPOnboardHandler(log *zap.Logger, onboardSvc influxdb.OnboardingService) *OnboardHandler { - svr := &OnboardHandler{ - api: kithttp.NewAPI(kithttp.WithLog(log)), - log: log, - onboardingSvc: onboardSvc, - } - - r := chi.NewRouter() - r.Use( - middleware.Recoverer, - middleware.RequestID, - middleware.RealIP, - ) - - // RESTy routes for "articles" resource - r.Route("/", func(r chi.Router) { - r.Post("/", svr.handleInitialOnboardRequest) - r.Get("/", svr.handleIsOnboarding) - - }) - - svr.Router = r - return svr -} - -func (h *OnboardHandler) Prefix() string { - return prefixOnboard -} - -type isOnboardingResponse struct { - Allowed bool `json:"allowed"` -} - -// isOnboarding is the HTTP handler for the POST /api/v2/setup route. -func (h *OnboardHandler) handleIsOnboarding(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - result, err := h.onboardingSvc.IsOnboarding(ctx) - if err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Onboarding eligibility check finished", zap.String("result", fmt.Sprint(result))) - - h.api.Respond(w, r, http.StatusOK, isOnboardingResponse{result}) -} - -// handleInitialOnboardRequest is the HTTP handler for the GET /api/v2/setup route. -func (h *OnboardHandler) handleInitialOnboardRequest(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req := &influxdb.OnboardingRequest{} - if err := json.NewDecoder(r.Body).Decode(req); err != nil { - h.api.Err(w, r, err) - return - } - results, err := h.onboardingSvc.OnboardInitialUser(ctx, req) - if err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Onboarding setup completed", zap.String("results", fmt.Sprint(results))) - - h.api.Respond(w, r, http.StatusCreated, NewOnboardingResponse(results)) -} - -type onboardingResponse struct { - User *influxdb.UserResponse `json:"user"` - Bucket *bucketResponse `json:"bucket"` - Organization orgResponse `json:"org"` - Auth *authResponse `json:"auth"` -} - -func NewOnboardingResponse(results *influxdb.OnboardingResults) *onboardingResponse { - return &onboardingResponse{ - User: newUserResponse(results.User), - Bucket: NewBucketResponse(results.Bucket), - Organization: newOrgResponse(*results.Org), - Auth: newAuthResponse(results.Auth), - } -} - -type authResponse struct { - influxdb.Authorization - Links map[string]string `json:"links"` -} - -func newAuthResponse(a *influxdb.Authorization) *authResponse { - if a == nil { - return nil - } - - res := &authResponse{ - Authorization: *a, - Links: map[string]string{ - "self": fmt.Sprintf("/api/v2/authorizations/%s", a.ID), - "user": fmt.Sprintf("/api/v2/users/%s", a.UserID), - }, - } - return res -} - -func (a *authResponse) toPlatform() *influxdb.Authorization { - res := &influxdb.Authorization{ - ID: a.ID, - Token: a.Token, - Status: a.Status, - Description: a.Description, - OrgID: a.OrgID, - UserID: a.UserID, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: a.CreatedAt, - UpdatedAt: a.UpdatedAt, - }, - } - for _, p := range a.Permissions { - res.Permissions = append(res.Permissions, influxdb.Permission{Action: p.Action, Resource: p.Resource}) - } - return res -} diff --git a/tenant/http_server_onboarding_test.go b/tenant/http_server_onboarding_test.go deleted file mode 100644 index 7269140c5df..00000000000 --- a/tenant/http_server_onboarding_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package tenant_test - -import ( - "context" - "net/http/httptest" - "testing" - - "github.com/go-chi/chi" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorization" - ihttp "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/tenant" - itesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func initOnboardHttpService(f itesting.OnboardingFields, t *testing.T) (influxdb.OnboardingService, func()) { - t.Helper() - - s := itesting.NewTestInmemStore(t) - storage := tenant.NewStore(s) - - ten := tenant.NewService(storage) - - authStore, err := authorization.NewStore(s) - require.NoError(t, err) - authSvc := authorization.NewService(authStore, ten) - - svc := tenant.NewOnboardService(ten, authSvc) - - ctx := context.Background() - if !f.IsOnboarding { - // create a dummy so so we can no longer onboard - err := ten.CreateUser(ctx, &influxdb.User{Name: "dummy", Status: influxdb.Active}) - if err != nil { - t.Fatal(err) - } - } - - handler := tenant.NewHTTPOnboardHandler(zaptest.NewLogger(t), svc) - r := chi.NewRouter() - r.Mount(handler.Prefix(), handler) - server := httptest.NewServer(r) - httpClient, err := ihttp.NewHTTPClient(server.URL, "", false) - if err != nil { - t.Fatal(err) - } - - client := tenant.OnboardClientService{ - Client: httpClient, - } - - return &client, server.Close -} - -func TestOnboardService(t *testing.T) { - itesting.OnboardInitialUser(initOnboardHttpService, t) -} diff --git a/tenant/http_server_org.go b/tenant/http_server_org.go deleted file mode 100644 index 20cfeed259f..00000000000 --- a/tenant/http_server_org.go +++ /dev/null @@ -1,231 +0,0 @@ -package tenant - -import ( - "context" - "fmt" - "net/http" - - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "go.uber.org/zap" -) - -// OrgHandler represents an HTTP API handler for organizations. -type OrgHandler struct { - chi.Router - api *kithttp.API - log *zap.Logger - orgSvc influxdb.OrganizationService -} - -const ( - prefixOrganizations = "/api/v2/orgs" -) - -func (h *OrgHandler) Prefix() string { - return prefixOrganizations -} - -// NewHTTPOrgHandler constructs a new http server. -func NewHTTPOrgHandler(log *zap.Logger, orgService influxdb.OrganizationService, urm http.Handler, secretHandler http.Handler) *OrgHandler { - svr := &OrgHandler{ - api: kithttp.NewAPI(kithttp.WithLog(log)), - log: log, - orgSvc: orgService, - } - - r := chi.NewRouter() - r.Use( - middleware.Recoverer, - middleware.RequestID, - middleware.RealIP, - ) - - r.Route("/", func(r chi.Router) { - r.Post("/", svr.handlePostOrg) - r.Get("/", svr.handleGetOrgs) - - r.Route("/{id}", func(r chi.Router) { - r.Get("/", svr.handleGetOrg) - r.Patch("/", svr.handlePatchOrg) - r.Delete("/", svr.handleDeleteOrg) - - // mount embedded resources - mountableRouter := r.With(kithttp.ValidResource(svr.api, svr.lookupOrgByID)) - mountableRouter.Mount("/members", urm) - mountableRouter.Mount("/owners", urm) - mountableRouter.Mount("/secrets", secretHandler) - }) - }) - svr.Router = r - return svr -} - -type orgResponse struct { - Links map[string]string `json:"links"` - influxdb.Organization -} - -func newOrgResponse(o influxdb.Organization) orgResponse { - return orgResponse{ - Links: map[string]string{ - "self": fmt.Sprintf("/api/v2/orgs/%s", o.ID), - "logs": fmt.Sprintf("/api/v2/orgs/%s/logs", o.ID), - "members": fmt.Sprintf("/api/v2/orgs/%s/members", o.ID), - "owners": fmt.Sprintf("/api/v2/orgs/%s/owners", o.ID), - "secrets": fmt.Sprintf("/api/v2/orgs/%s/secrets", o.ID), - "labels": fmt.Sprintf("/api/v2/orgs/%s/labels", o.ID), - "buckets": fmt.Sprintf("/api/v2/buckets?org=%s", o.Name), - "tasks": fmt.Sprintf("/api/v2/tasks?org=%s", o.Name), - "dashboards": fmt.Sprintf("/api/v2/dashboards?org=%s", o.Name), - }, - Organization: o, - } -} - -type orgsResponse struct { - Links map[string]string `json:"links"` - Organizations []orgResponse `json:"orgs"` -} - -func newOrgsResponse(orgs []*influxdb.Organization) *orgsResponse { - res := orgsResponse{ - Links: map[string]string{ - "self": "/api/v2/orgs", - }, - Organizations: []orgResponse{}, - } - for _, org := range orgs { - res.Organizations = append(res.Organizations, newOrgResponse(*org)) - } - return &res -} - -// handlePostOrg is the HTTP handler for the POST /api/v2/orgs route. -func (h *OrgHandler) handlePostOrg(w http.ResponseWriter, r *http.Request) { - var org influxdb.Organization - if err := h.api.DecodeJSON(r.Body, &org); err != nil { - h.api.Err(w, r, err) - return - } - - if err := h.orgSvc.CreateOrganization(r.Context(), &org); err != nil { - h.api.Err(w, r, err) - return - } - - h.log.Debug("Org created", zap.String("org", fmt.Sprint(org))) - - h.api.Respond(w, r, http.StatusCreated, newOrgResponse(org)) -} - -// handleGetOrg is the HTTP handler for the GET /api/v2/orgs/:id route. -func (h *OrgHandler) handleGetOrg(w http.ResponseWriter, r *http.Request) { - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, err) - return - } - - org, err := h.orgSvc.FindOrganizationByID(r.Context(), *id) - if err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Org retrieved", zap.String("org", fmt.Sprint(org))) - - h.api.Respond(w, r, http.StatusOK, newOrgResponse(*org)) -} - -// handleGetOrgs is the HTTP handler for the GET /api/v2/orgs route. -func (h *OrgHandler) handleGetOrgs(w http.ResponseWriter, r *http.Request) { - qp := r.URL.Query() - - opts, err := influxdb.DecodeFindOptions(r) - if err != nil { - h.api.Err(w, r, err) - return - } - - var filter influxdb.OrganizationFilter - if name := qp.Get("org"); name != "" { - filter.Name = &name - } - - if id := qp.Get("orgID"); id != "" { - i, err := platform.IDFromString(id) - if err == nil { - filter.ID = i - } - } - - if id := qp.Get("userID"); id != "" { - i, err := platform.IDFromString(id) - if err == nil { - filter.UserID = i - } - } - - orgs, _, err := h.orgSvc.FindOrganizations(r.Context(), filter, *opts) - if err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Orgs retrieved", zap.String("org", fmt.Sprint(orgs))) - - h.api.Respond(w, r, http.StatusOK, newOrgsResponse(orgs)) -} - -// handlePatchOrg is the HTTP handler for the PATH /api/v2/orgs route. -func (h *OrgHandler) handlePatchOrg(w http.ResponseWriter, r *http.Request) { - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, err) - return - } - - var upd influxdb.OrganizationUpdate - if err := h.api.DecodeJSON(r.Body, &upd); err != nil { - h.api.Err(w, r, err) - return - } - - org, err := h.orgSvc.UpdateOrganization(r.Context(), *id, upd) - if err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Org updated", zap.String("org", fmt.Sprint(org))) - - h.api.Respond(w, r, http.StatusOK, newOrgResponse(*org)) -} - -// handleDeleteOrganization is the HTTP handler for the DELETE /api/v2/orgs/:id route. -func (h *OrgHandler) handleDeleteOrg(w http.ResponseWriter, r *http.Request) { - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.api.Err(w, r, err) - return - } - - ctx := r.Context() - if err := h.orgSvc.DeleteOrganization(ctx, *id); err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Org deleted", zap.String("orgID", fmt.Sprint(id))) - - h.api.Respond(w, r, http.StatusNoContent, nil) -} - -func (h *OrgHandler) lookupOrgByID(ctx context.Context, id platform.ID) (platform.ID, error) { - _, err := h.orgSvc.FindOrganizationByID(ctx, id) - if err != nil { - return 0, err - } - - return id, nil -} diff --git a/tenant/http_server_org_test.go b/tenant/http_server_org_test.go deleted file mode 100644 index af56ab9e8b5..00000000000 --- a/tenant/http_server_org_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package tenant_test - -import ( - "context" - "net/http/httptest" - "testing" - - "github.com/go-chi/chi" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/tenant" - itesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func initHttpOrgService(f itesting.OrganizationFields, t *testing.T) (influxdb.OrganizationService, string, func()) { - t.Helper() - - s := itesting.NewTestInmemStore(t) - storage := tenant.NewStore(s) - - if f.OrgBucketIDs != nil { - storage.OrgIDGen = f.OrgBucketIDs - storage.BucketIDGen = f.OrgBucketIDs - } - - // go direct to storage for test data - if err := s.Update(context.Background(), func(tx kv.Tx) error { - for _, o := range f.Organizations { - if err := storage.CreateOrg(tx.Context(), tx, o); err != nil { - return err - } - } - - return nil - }); err != nil { - t.Fatalf("failed to populate organizations: %s", err) - } - - handler := tenant.NewHTTPOrgHandler(zaptest.NewLogger(t), tenant.NewService(storage), nil, nil) - r := chi.NewRouter() - r.Mount(handler.Prefix(), handler) - server := httptest.NewServer(r) - httpClient, err := http.NewHTTPClient(server.URL, "", false) - if err != nil { - t.Fatal(err) - } - - orgClient := tenant.OrgClientService{ - Client: httpClient, - } - - return &orgClient, "http_tenant", server.Close -} - -func TestHTTPOrgService(t *testing.T) { - itesting.OrganizationService(initHttpOrgService, t) -} diff --git a/tenant/http_server_user.go b/tenant/http_server_user.go deleted file mode 100644 index ca45e3b096e..00000000000 --- a/tenant/http_server_user.go +++ /dev/null @@ -1,537 +0,0 @@ -package tenant - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" - "github.com/influxdata/influxdb/v2" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "go.uber.org/zap" -) - -// UserHandler represents an HTTP API handler for users. -type UserHandler struct { - chi.Router - api *kithttp.API - log *zap.Logger - userSvc influxdb.UserService - passwordSvc influxdb.PasswordsService -} - -const ( - prefixUsers = "/api/v2/users" - prefixMe = "/api/v2/me" -) - -// NewHTTPUserHandler constructs a new http server. -func NewHTTPUserHandler(log *zap.Logger, userService influxdb.UserService, passwordService influxdb.PasswordsService) *UserHandler { - svr := &UserHandler{ - api: kithttp.NewAPI(kithttp.WithLog(log)), - log: log, - userSvc: userService, - passwordSvc: passwordService, - } - - r := chi.NewRouter() - r.Use( - middleware.Recoverer, - middleware.RequestID, - middleware.RealIP, - ) - - // RESTy routes for "articles" resource - r.Route("/", func(r chi.Router) { - r.Post("/", svr.handlePostUser) - r.Get("/", svr.handleGetUsers) - r.Put("/password", svr.handlePutUserPassword) - - r.Route("/{id}", func(r chi.Router) { - r.Get("/", svr.handleGetUser) - r.Patch("/", svr.handlePatchUser) - r.Delete("/", svr.handleDeleteUser) - r.Get("/permissions", svr.handleGetPermissions) - r.Put("/password", svr.handlePutUserPassword) - r.Post("/password", svr.handlePostUserPassword) - }) - }) - - svr.Router = r - return svr -} - -func (h *UserHandler) Prefix() string { - return prefixUsers -} - -type passwordSetRequest struct { - Password string `json:"password"` -} - -// handlePutPassword is the HTTP handler for the PUT /api/v2/users/:id/password -func (h *UserHandler) handlePostUserPassword(w http.ResponseWriter, r *http.Request) { - var body passwordSetRequest - err := json.NewDecoder(r.Body).Decode(&body) - if err != nil { - h.api.Err(w, r, &errors.Error{ - Code: errors.EInvalid, - Err: err, - }) - return - } - - param := chi.URLParam(r, "id") - userID, err := platform.IDFromString(param) - if err != nil { - h.api.Err(w, r, &errors.Error{ - Msg: "invalid user ID provided in route", - }) - return - } - - err = h.passwordSvc.SetPassword(r.Context(), *userID, body.Password) - if err != nil { - h.api.Err(w, r, err) - return - } - - w.WriteHeader(http.StatusNoContent) -} - -// handlePutPassword is the HTTP handler for the PUT /api/v2/users/:id/password -func (h *UserHandler) handlePutUserPassword(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePasswordResetRequest(r) - if err != nil { - h.api.Err(w, r, &errors.Error{ - Msg: fmt.Sprintf("error decoding password reset request: %s", err), - }) - return - } - - param := chi.URLParam(r, "id") - userID, err := platform.IDFromString(param) - if err != nil { - h.api.Err(w, r, &errors.Error{ - Msg: "invalid user ID provided in route", - }) - return - } - err = h.passwordSvc.CompareAndSetPassword(ctx, *userID, req.PasswordOld, req.PasswordNew) - if err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("User password updated") - w.WriteHeader(http.StatusNoContent) -} - -type passwordResetRequest struct { - Username string - PasswordOld string - PasswordNew string -} - -type passwordResetRequestBody struct { - Password string `json:"password"` -} - -func decodePasswordResetRequest(r *http.Request) (*passwordResetRequest, error) { - u, o, ok := r.BasicAuth() - if !ok { - return nil, fmt.Errorf("invalid basic auth") - } - - pr := new(passwordResetRequestBody) - err := json.NewDecoder(r.Body).Decode(pr) - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - return &passwordResetRequest{ - Username: u, - PasswordOld: o, - PasswordNew: pr.Password, - }, nil -} - -// handlePostUser is the HTTP handler for the POST /api/v2/users route. -func (h *UserHandler) handlePostUser(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePostUserRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - - if req.User.Status == "" { - req.User.Status = influxdb.Active - } - - if err := h.userSvc.CreateUser(ctx, req.User); err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("User created", zap.String("user", fmt.Sprint(req.User))) - - h.api.Respond(w, r, http.StatusCreated, newUserResponse(req.User)) -} - -type postUserRequest struct { - User *influxdb.User -} - -func decodePostUserRequest(ctx context.Context, r *http.Request) (*postUserRequest, error) { - b := &influxdb.User{} - if err := json.NewDecoder(r.Body).Decode(b); err != nil { - return nil, err - } - - return &postUserRequest{ - User: b, - }, nil -} - -// handleGetUser is the HTTP handler for the GET /api/v2/users/:id route. -func (h *UserHandler) handleGetUser(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeGetUserRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - - b, err := h.userSvc.FindUserByID(ctx, req.UserID) - if err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("User retrieved", zap.String("user", fmt.Sprint(b))) - - h.api.Respond(w, r, http.StatusOK, newUserResponse(b)) -} - -func (h *UserHandler) handleGetPermissions(w http.ResponseWriter, r *http.Request) { - id := chi.URLParam(r, "id") - if id == "" { - err := &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - h.api.Err(w, r, err) - return - } - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - h.api.Err(w, r, err) - return - } - - ps, err := h.userSvc.FindPermissionForUser(r.Context(), i) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, ps) -} - -type getUserRequest struct { - UserID platform.ID -} - -func decodeGetUserRequest(ctx context.Context, r *http.Request) (*getUserRequest, error) { - id := chi.URLParam(r, "id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - - req := &getUserRequest{ - UserID: i, - } - - return req, nil -} - -// handleDeleteUser is the HTTP handler for the DELETE /api/v2/users/:id route. -func (h *UserHandler) handleDeleteUser(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeDeleteUserRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - - if err := h.userSvc.DeleteUser(ctx, req.UserID); err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("User deleted", zap.String("userID", fmt.Sprint(req.UserID))) - - w.WriteHeader(http.StatusNoContent) -} - -type deleteUserRequest struct { - UserID platform.ID -} - -func decodeDeleteUserRequest(ctx context.Context, r *http.Request) (*deleteUserRequest, error) { - id := chi.URLParam(r, "id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - - return &deleteUserRequest{ - UserID: i, - }, nil -} - -type usersResponse struct { - Links map[string]string `json:"links"` - Users []*influxdb.UserResponse `json:"users"` -} - -func (us usersResponse) ToInfluxdb() []*influxdb.User { - users := make([]*influxdb.User, len(us.Users)) - for i := range us.Users { - users[i] = &us.Users[i].User - } - return users -} - -func newUsersResponse(users []*influxdb.User) *usersResponse { - res := usersResponse{ - Links: map[string]string{ - "self": "/api/v2/users", - }, - Users: []*influxdb.UserResponse{}, - } - for _, user := range users { - res.Users = append(res.Users, newUserResponse(user)) - } - return &res -} - -func newUserResponse(u *influxdb.User) *influxdb.UserResponse { - return &influxdb.UserResponse{ - Links: map[string]string{ - "self": fmt.Sprintf("/api/v2/users/%s", u.ID), - }, - User: *u, - } -} - -// handleGetUsers is the HTTP handler for the GET /api/v2/users route. -func (h *UserHandler) handleGetUsers(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeGetUsersRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - - users, _, err := h.userSvc.FindUsers(ctx, req.filter, req.opts) - if err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Users retrieved", zap.String("users", fmt.Sprint(users))) - - h.api.Respond(w, r, http.StatusOK, newUsersResponse(users)) -} - -type getUsersRequest struct { - filter influxdb.UserFilter - opts influxdb.FindOptions -} - -func decodeGetUsersRequest(ctx context.Context, r *http.Request) (*getUsersRequest, error) { - opts, err := influxdb.DecodeFindOptions(r) - if err != nil { - return nil, err - } - - qp := r.URL.Query() - req := &getUsersRequest{ - opts: *opts, - } - - if userID := qp.Get("id"); userID != "" { - id, err := platform.IDFromString(userID) - if err != nil { - return nil, err - } - req.filter.ID = id - } - - if name := qp.Get("name"); name != "" { - req.filter.Name = &name - } - - return req, nil -} - -// handlePatchUser is the HTTP handler for the PATCH /api/v2/users/:id route. -func (h *UserHandler) handlePatchUser(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodePatchUserRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - - b, err := h.userSvc.UpdateUser(ctx, req.UserID, req.Update) - if err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Users updated", zap.String("user", fmt.Sprint(b))) - - h.api.Respond(w, r, http.StatusOK, newUserResponse(b)) -} - -type patchUserRequest struct { - Update influxdb.UserUpdate - UserID platform.ID -} - -func decodePatchUserRequest(ctx context.Context, r *http.Request) (*patchUserRequest, error) { - id := chi.URLParam(r, "id") - if id == "" { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "url missing id", - } - } - - var i platform.ID - if err := i.DecodeFromString(id); err != nil { - return nil, err - } - - var upd influxdb.UserUpdate - if err := json.NewDecoder(r.Body).Decode(&upd); err != nil { - return nil, err - } - - if err := upd.Valid(); err != nil { - return nil, err - } - - return &patchUserRequest{ - Update: upd, - UserID: i, - }, nil -} - -// MeHandler represents an HTTP API handler for /me routes. -type MeHandler struct { - chi.Router - api *kithttp.API - log *zap.Logger - userSvc influxdb.UserService - passwordSvc influxdb.PasswordsService -} - -func (h *MeHandler) Prefix() string { - return prefixMe -} - -func NewHTTPMeHandler(log *zap.Logger, userService influxdb.UserService, passwordService influxdb.PasswordsService) *MeHandler { - svr := &MeHandler{ - api: kithttp.NewAPI(kithttp.WithLog(log)), - log: log, - userSvc: userService, - passwordSvc: passwordService, - } - - r := chi.NewRouter() - r.Use( - middleware.Recoverer, - middleware.RequestID, - middleware.RealIP, - ) - - // RESTy routes for "articles" resource - r.Route("/", func(r chi.Router) { - r.Get("/", svr.handleMe) - r.Put("/password", svr.handlePutMePassword) - }) - - svr.Router = r - return svr -} - -func (h *MeHandler) getUserID(ctx context.Context) (*platform.ID, error) { - a, err := icontext.GetAuthorizer(ctx) - if err != nil { - return nil, err - } - - id := a.GetUserID() - return &id, nil -} - -func (h *MeHandler) handleMe(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - userID, err := h.getUserID(ctx) - if err != nil { - h.api.Err(w, r, err) - return - } - user, err := h.userSvc.FindUserByID(ctx, *userID) - if err != nil { - h.api.Err(w, r, err) - return - } - h.api.Respond(w, r, http.StatusOK, newUserResponse(user)) -} - -func (h *MeHandler) handlePutMePassword(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - userID, err := h.getUserID(ctx) - if err != nil { - h.api.Err(w, r, err) - return - } - - req, err := decodePasswordResetRequest(r) - if err != nil { - h.api.Err(w, r, &errors.Error{ - Msg: fmt.Sprintf("error decoding password reset request: %s", err), - }) - } - - err = h.passwordSvc.CompareAndSetPassword(ctx, *userID, req.PasswordOld, req.PasswordNew) - if err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("User password updated") - w.WriteHeader(http.StatusNoContent) -} diff --git a/tenant/http_server_user_test.go b/tenant/http_server_user_test.go deleted file mode 100644 index 325395d534a..00000000000 --- a/tenant/http_server_user_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package tenant_test - -import ( - "context" - "net/http/httptest" - "testing" - - "github.com/go-chi/chi" - platform "github.com/influxdata/influxdb/v2" - ihttp "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/tenant" - platformtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func initHttpUserService(f platformtesting.UserFields, t *testing.T) (platform.UserService, string, func()) { - t.Helper() - - s := platformtesting.NewTestInmemStore(t) - - storage := tenant.NewStore(s) - svc := tenant.NewService(storage) - - ctx := context.Background() - for _, u := range f.Users { - if err := svc.CreateUser(ctx, u); err != nil { - t.Fatalf("failed to populate users") - } - } - - handler := tenant.NewHTTPUserHandler(zaptest.NewLogger(t), svc, svc) - r := chi.NewRouter() - r.Mount("/api/v2/users", handler) - r.Mount("/api/v2/me", handler) - server := httptest.NewServer(r) - - httpClient, err := ihttp.NewHTTPClient(server.URL, "", false) - if err != nil { - t.Fatal(err) - } - - client := tenant.UserClientService{ - Client: httpClient, - } - - return &client, "http_tenant", server.Close -} - -func TestUserService(t *testing.T) { - t.Parallel() - platformtesting.UserService(initHttpUserService, t) -} diff --git a/tenant/index/index.go b/tenant/index/index.go deleted file mode 100644 index 1c1fee95a52..00000000000 --- a/tenant/index/index.go +++ /dev/null @@ -1,24 +0,0 @@ -package index - -import ( - "encoding/json" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kv" -) - -// URMByUserIndeMappingx is the mapping description of an index -// between a user and a URM -var URMByUserIndexMapping = kv.NewIndexMapping( - []byte("userresourcemappingsv1"), - []byte("userresourcemappingsbyuserindexv1"), - func(v []byte) ([]byte, error) { - var urm influxdb.UserResourceMapping - if err := json.Unmarshal(v, &urm); err != nil { - return nil, err - } - - id, _ := urm.UserID.Encode() - return id, nil - }, -) diff --git a/tenant/middleware_bucket_auth.go b/tenant/middleware_bucket_auth.go deleted file mode 100644 index 4fbffa7d19e..00000000000 --- a/tenant/middleware_bucket_auth.go +++ /dev/null @@ -1,121 +0,0 @@ -package tenant - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/tracing" -) - -var _ influxdb.BucketService = (*AuthedBucketService)(nil) - -// TODO (al): remove authorizer/bucket when the bucket service moves to tenant - -// AuthedBucketService wraps a influxdb.BucketService and authorizes actions -// against it appropriately. -type AuthedBucketService struct { - s influxdb.BucketService -} - -// NewAuthedBucketService constructs an instance of an authorizing bucket service. -func NewAuthedBucketService(s influxdb.BucketService) *AuthedBucketService { - return &AuthedBucketService{ - s: s, - } -} - -// FindBucketByID checks to see if the authorizer on context has read access to the id provided. -func (s *AuthedBucketService) FindBucketByID(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - b, err := s.s.FindBucketByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := authorizer.AuthorizeReadBucket(ctx, b.Type, b.ID, b.OrgID); err != nil { - return nil, err - } - return b, nil -} - -// FindBucketByName returns a bucket by name for a particular organization. -func (s *AuthedBucketService) FindBucketByName(ctx context.Context, orgID platform.ID, n string) (*influxdb.Bucket, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - b, err := s.s.FindBucketByName(ctx, orgID, n) - if err != nil { - return nil, err - } - if _, _, err := authorizer.AuthorizeReadBucket(ctx, b.Type, b.ID, b.OrgID); err != nil { - return nil, err - } - return b, nil -} - -// FindBucket retrieves the bucket and checks to see if the authorizer on context has read access to the bucket. -func (s *AuthedBucketService) FindBucket(ctx context.Context, filter influxdb.BucketFilter) (*influxdb.Bucket, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - b, err := s.s.FindBucket(ctx, filter) - if err != nil { - return nil, err - } - if _, _, err := authorizer.AuthorizeReadBucket(ctx, b.Type, b.ID, b.OrgID); err != nil { - return nil, err - } - return b, nil -} - -// FindBuckets retrieves all buckets that match the provided filter and then filters the list down to only the resources that are authorized. -func (s *AuthedBucketService) FindBuckets(ctx context.Context, filter influxdb.BucketFilter, opt ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - // TODO: we'll likely want to push this operation into the database eventually since fetching the whole list of data - // will likely be expensive. - bs, _, err := s.s.FindBuckets(ctx, filter, opt...) - if err != nil { - return nil, 0, err - } - return authorizer.AuthorizeFindBuckets(ctx, bs) -} - -// CreateBucket checks to see if the authorizer on context has write access to the global buckets resource. -func (s *AuthedBucketService) CreateBucket(ctx context.Context, b *influxdb.Bucket) error { - span, ctx := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - if _, _, err := authorizer.AuthorizeCreate(ctx, influxdb.BucketsResourceType, b.OrgID); err != nil { - return err - } - return s.s.CreateBucket(ctx, b) -} - -// UpdateBucket checks to see if the authorizer on context has write access to the bucket provided. -func (s *AuthedBucketService) UpdateBucket(ctx context.Context, id platform.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { - b, err := s.s.FindBucketByID(ctx, id) - if err != nil { - return nil, err - } - if _, _, err := authorizer.AuthorizeWrite(ctx, influxdb.BucketsResourceType, id, b.OrgID); err != nil { - return nil, err - } - return s.s.UpdateBucket(ctx, id, upd) -} - -// DeleteBucket checks to see if the authorizer on context has write access to the bucket provided. -func (s *AuthedBucketService) DeleteBucket(ctx context.Context, id platform.ID) error { - b, err := s.s.FindBucketByID(ctx, id) - if err != nil { - return err - } - if _, _, err := authorizer.AuthorizeWrite(ctx, influxdb.BucketsResourceType, id, b.OrgID); err != nil { - return err - } - return s.s.DeleteBucket(ctx, id) -} diff --git a/tenant/middleware_bucket_auth_test.go b/tenant/middleware_bucket_auth_test.go deleted file mode 100644 index 2f7ed918d99..00000000000 --- a/tenant/middleware_bucket_auth_test.go +++ /dev/null @@ -1,630 +0,0 @@ -package tenant_test - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/tenant" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -var bucketCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.Bucket) []*influxdb.Bucket { - out := append([]*influxdb.Bucket(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -func TestBucketService_FindBucketByID(t *testing.T) { - type fields struct { - BucketService influxdb.BucketService - } - type args struct { - permission influxdb.Permission - id platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access id", - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: id, - OrgID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access id", - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: id, - OrgID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - id: 1, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/000000000000000a/buckets/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := tenant.NewAuthedBucketService(tt.fields.BucketService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindBucketByID(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestBucketService_FindBucket(t *testing.T) { - type fields struct { - BucketService influxdb.BucketService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access bucket", - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketFn: func(ctx context.Context, filter influxdb.BucketFilter) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: 1, - OrgID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access bucket", - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketFn: func(ctx context.Context, filter influxdb.BucketFilter) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: 1, - OrgID: 10, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/000000000000000a/buckets/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := tenant.NewAuthedBucketService(tt.fields.BucketService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindBucket(ctx, influxdb.BucketFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestBucketService_FindBuckets(t *testing.T) { - type fields struct { - BucketService influxdb.BucketService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - buckets []*influxdb.Bucket - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all buckets", - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketsFn: func(ctx context.Context, filter influxdb.BucketFilter, opt ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - return []*influxdb.Bucket{ - { - ID: 1, - OrgID: 10, - }, - { - ID: 2, - OrgID: 10, - }, - { - ID: 3, - OrgID: 11, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - }, - }, - }, - wants: wants{ - buckets: []*influxdb.Bucket{ - { - ID: 1, - OrgID: 10, - }, - { - ID: 2, - OrgID: 10, - }, - { - ID: 3, - OrgID: 11, - }, - }, - }, - }, - { - name: "authorized to access a single orgs buckets", - fields: fields{ - BucketService: &mock.BucketService{ - - FindBucketsFn: func(ctx context.Context, filter influxdb.BucketFilter, opt ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - return []*influxdb.Bucket{ - { - ID: 1, - OrgID: 10, - }, - { - ID: 2, - OrgID: 10, - }, - { - ID: 3, - OrgID: 11, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - buckets: []*influxdb.Bucket{ - { - ID: 1, - OrgID: 10, - }, - { - ID: 2, - OrgID: 10, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := tenant.NewAuthedBucketService(tt.fields.BucketService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - buckets, _, err := s.FindBuckets(ctx, influxdb.BucketFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(buckets, tt.wants.buckets, bucketCmpOptions...); diff != "" { - t.Errorf("buckets are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestBucketService_UpdateBucket(t *testing.T) { - type fields struct { - BucketService influxdb.BucketService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to update bucket", - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: 1, - OrgID: 10, - }, nil - }, - UpdateBucketFn: func(ctx context.Context, id platform.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: 1, - OrgID: 10, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to update bucket", - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: 1, - OrgID: 10, - }, nil - }, - UpdateBucketFn: func(ctx context.Context, id platform.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: 1, - OrgID: 10, - }, nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/buckets/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := tenant.NewAuthedBucketService(tt.fields.BucketService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - _, err := s.UpdateBucket(ctx, tt.args.id, influxdb.BucketUpdate{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestBucketService_DeleteBucket(t *testing.T) { - type fields struct { - BucketService influxdb.BucketService - } - type args struct { - id platform.ID - permissions []influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete bucket", - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: 1, - OrgID: 10, - }, nil - }, - DeleteBucketFn: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete bucket", - fields: fields{ - BucketService: &mock.BucketService{ - FindBucketByIDFn: func(ctc context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: 1, - OrgID: 10, - }, nil - }, - DeleteBucketFn: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permissions: []influxdb.Permission{ - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/buckets/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := tenant.NewAuthedBucketService(tt.fields.BucketService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - err := s.DeleteBucket(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestBucketService_CreateBucket(t *testing.T) { - type fields struct { - BucketService influxdb.BucketService - } - type args struct { - permission influxdb.Permission - orgID platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to create bucket", - fields: fields{ - BucketService: &mock.BucketService{ - CreateBucketFn: func(ctx context.Context, b *influxdb.Bucket) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to create bucket", - fields: fields{ - BucketService: &mock.BucketService{ - CreateBucketFn: func(ctx context.Context, b *influxdb.Bucket) error { - return nil - }, - }, - }, - args: args{ - orgID: 10, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/000000000000000a/buckets is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := tenant.NewAuthedBucketService(tt.fields.BucketService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.CreateBucket(ctx, &influxdb.Bucket{OrgID: tt.args.orgID}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} diff --git a/tenant/middleware_bucket_logging.go b/tenant/middleware_bucket_logging.go deleted file mode 100644 index ea4a057f564..00000000000 --- a/tenant/middleware_bucket_logging.go +++ /dev/null @@ -1,113 +0,0 @@ -package tenant - -import ( - "context" - "fmt" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "go.uber.org/zap" -) - -type BucketLogger struct { - logger *zap.Logger - bucketService influxdb.BucketService -} - -// NewBucketLogger returns a logging service middleware for the Bucket Service. -func NewBucketLogger(log *zap.Logger, s influxdb.BucketService) *BucketLogger { - return &BucketLogger{ - logger: log, - bucketService: s, - } -} - -var _ influxdb.BucketService = (*BucketLogger)(nil) - -func (l *BucketLogger) CreateBucket(ctx context.Context, u *influxdb.Bucket) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to create bucket", zap.Error(err), dur) - return - } - l.logger.Debug("bucket create", dur) - }(time.Now()) - return l.bucketService.CreateBucket(ctx, u) -} - -func (l *BucketLogger) FindBucketByID(ctx context.Context, id platform.ID) (u *influxdb.Bucket, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - msg := fmt.Sprintf("failed to find bucket with ID %v", id) - l.logger.Debug(msg, zap.Error(err), dur) - return - } - l.logger.Debug("bucket find by ID", dur) - }(time.Now()) - return l.bucketService.FindBucketByID(ctx, id) -} - -func (l *BucketLogger) FindBucketByName(ctx context.Context, orgID platform.ID, name string) (u *influxdb.Bucket, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - msg := fmt.Sprintf("failed to find bucket with name %v in org %v", name, orgID) - l.logger.Debug(msg, zap.Error(err), dur) - return - } - l.logger.Debug("bucket find by name", dur) - }(time.Now()) - return l.bucketService.FindBucketByName(ctx, orgID, name) -} - -func (l *BucketLogger) FindBucket(ctx context.Context, filter influxdb.BucketFilter) (u *influxdb.Bucket, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to find bucket matching the given filter", zap.Error(err), dur) - return - } - l.logger.Debug("bucket find", dur) - }(time.Now()) - return l.bucketService.FindBucket(ctx, filter) -} - -func (l *BucketLogger) FindBuckets(ctx context.Context, filter influxdb.BucketFilter, opt ...influxdb.FindOptions) (buckets []*influxdb.Bucket, n int, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to find bucket matching the given filter", zap.Error(err), dur) - return - } - l.logger.Debug("buckets find", dur) - }(time.Now()) - return l.bucketService.FindBuckets(ctx, filter, opt...) -} - -func (l *BucketLogger) UpdateBucket(ctx context.Context, id platform.ID, upd influxdb.BucketUpdate) (u *influxdb.Bucket, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to update bucket", zap.Error(err), dur) - return - } - l.logger.Debug("bucket update", dur) - }(time.Now()) - return l.bucketService.UpdateBucket(ctx, id, upd) -} - -func (l *BucketLogger) DeleteBucket(ctx context.Context, id platform.ID) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - msg := fmt.Sprintf("failed to delete bucket with ID %v", id) - l.logger.Debug(msg, zap.Error(err), dur) - return - } - l.logger.Debug("bucket delete", dur) - }(time.Now()) - return l.bucketService.DeleteBucket(ctx, id) -} diff --git a/tenant/middleware_bucket_logging_test.go b/tenant/middleware_bucket_logging_test.go deleted file mode 100644 index c1f2000c768..00000000000 --- a/tenant/middleware_bucket_logging_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package tenant_test - -import ( - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/tenant" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func TestBucketLoggingService(t *testing.T) { - influxdbtesting.BucketService(initInmemBucketLoggingService, t) -} - -func initInmemBucketLoggingService(f influxdbtesting.BucketFields, t *testing.T) (influxdb.BucketService, string, func()) { - svc, s, closer := initInmemBucketService(f, t) - return tenant.NewBucketLogger(zaptest.NewLogger(t), svc), s, closer -} diff --git a/tenant/middleware_bucket_metrics.go b/tenant/middleware_bucket_metrics.go deleted file mode 100644 index 0e1c77b0e8f..00000000000 --- a/tenant/middleware_bucket_metrics.go +++ /dev/null @@ -1,77 +0,0 @@ -package tenant - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/metric" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/prometheus/client_golang/prometheus" -) - -type BucketMetrics struct { - // RED metrics - rec *metric.REDClient - - bucketService influxdb.BucketService -} - -var _ influxdb.BucketService = (*BucketMetrics)(nil) - -// NewBucketMetrics returns a metrics service middleware for the Bucket Service. -func NewBucketMetrics(reg prometheus.Registerer, s influxdb.BucketService, opts ...metric.ClientOptFn) *BucketMetrics { - o := metric.ApplyMetricOpts(opts...) - return &BucketMetrics{ - rec: metric.New(reg, o.ApplySuffix("bucket")), - bucketService: s, - } -} - -// Returns a single bucket by ID. -func (m *BucketMetrics) FindBucketByID(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - rec := m.rec.Record("find_bucket_by_id") - bucket, err := m.bucketService.FindBucketByID(ctx, id) - return bucket, rec(err) -} - -// Returns the first bucket that matches filter. -func (m *BucketMetrics) FindBucket(ctx context.Context, filter influxdb.BucketFilter) (*influxdb.Bucket, error) { - rec := m.rec.Record("find_bucket") - bucket, err := m.bucketService.FindBucket(ctx, filter) - return bucket, rec(err) -} - -// FindBuckets returns a list of buckets that match filter and the total count of matching buckets. -func (m *BucketMetrics) FindBuckets(ctx context.Context, filter influxdb.BucketFilter, opt ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - rec := m.rec.Record("find_buckets") - buckets, n, err := m.bucketService.FindBuckets(ctx, filter, opt...) - return buckets, n, rec(err) -} - -// Creates a new bucket and sets b.ID with the new identifier. -func (m *BucketMetrics) CreateBucket(ctx context.Context, b *influxdb.Bucket) error { - rec := m.rec.Record("create_bucket") - err := m.bucketService.CreateBucket(ctx, b) - return rec(err) -} - -// Updates a single bucket with changeset and returns the new bucket state after update. -func (m *BucketMetrics) UpdateBucket(ctx context.Context, id platform.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { - rec := m.rec.Record("update_bucket") - updatedBucket, err := m.bucketService.UpdateBucket(ctx, id, upd) - return updatedBucket, rec(err) -} - -// Removes a bucket by ID. -func (m *BucketMetrics) DeleteBucket(ctx context.Context, id platform.ID) error { - rec := m.rec.Record("delete_bucket") - err := m.bucketService.DeleteBucket(ctx, id) - return rec(err) -} - -// FindBucketByName finds a Bucket given its name and Organization ID -func (m *BucketMetrics) FindBucketByName(ctx context.Context, orgID platform.ID, name string) (*influxdb.Bucket, error) { - rec := m.rec.Record("find_bucket_by_name") - bucket, err := m.bucketService.FindBucketByName(ctx, orgID, name) - return bucket, rec(err) -} diff --git a/tenant/middleware_onboarding_auth.go b/tenant/middleware_onboarding_auth.go deleted file mode 100644 index bbe2c3677d5..00000000000 --- a/tenant/middleware_onboarding_auth.go +++ /dev/null @@ -1,34 +0,0 @@ -package tenant - -import ( - "context" - - "github.com/influxdata/influxdb/v2" -) - -var _ influxdb.OnboardingService = (*AuthedOnboardSvc)(nil) - -// TODO (al): remove authorizer/org when the org service moves to tenant - -// AuthedOnboardSvc wraps a influxdb.OnboardingService and authorizes actions -// against it appropriately. -type AuthedOnboardSvc struct { - s influxdb.OnboardingService -} - -// NewAuthedOnboardSvc constructs an instance of an authorizing org service. -func NewAuthedOnboardSvc(s influxdb.OnboardingService) *AuthedOnboardSvc { - return &AuthedOnboardSvc{ - s: s, - } -} - -// IsOnboarding pass through. this is handled by the underlying service layer -func (s *AuthedOnboardSvc) IsOnboarding(ctx context.Context) (bool, error) { - return s.s.IsOnboarding(ctx) -} - -// OnboardInitialUser pass through. this is handled by the underlying service layer -func (s *AuthedOnboardSvc) OnboardInitialUser(ctx context.Context, req *influxdb.OnboardingRequest) (*influxdb.OnboardingResults, error) { - return s.s.OnboardInitialUser(ctx, req) -} diff --git a/tenant/middleware_onboarding_logging.go b/tenant/middleware_onboarding_logging.go deleted file mode 100644 index 8c2826ef210..00000000000 --- a/tenant/middleware_onboarding_logging.go +++ /dev/null @@ -1,50 +0,0 @@ -package tenant - -import ( - "context" - "fmt" - "time" - - "github.com/influxdata/influxdb/v2" - "go.uber.org/zap" -) - -type OnboardingLogger struct { - logger *zap.Logger - onboardingService influxdb.OnboardingService -} - -// NewOnboardingLogger returns a logging service middleware for the Bucket Service. -func NewOnboardingLogger(log *zap.Logger, s influxdb.OnboardingService) *OnboardingLogger { - return &OnboardingLogger{ - logger: log, - onboardingService: s, - } -} - -var _ influxdb.OnboardingService = (*OnboardingLogger)(nil) - -func (l *OnboardingLogger) IsOnboarding(ctx context.Context) (available bool, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Error("failed to check onboarding", zap.Error(err), dur) - return - } - l.logger.Debug("is onboarding", dur) - }(time.Now()) - return l.onboardingService.IsOnboarding(ctx) -} - -func (l *OnboardingLogger) OnboardInitialUser(ctx context.Context, req *influxdb.OnboardingRequest) (res *influxdb.OnboardingResults, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - msg := fmt.Sprintf("failed to onboard user %s", req.User) - l.logger.Error(msg, zap.Error(err), dur) - return - } - l.logger.Debug("onboard initial user", dur) - }(time.Now()) - return l.onboardingService.OnboardInitialUser(ctx, req) -} diff --git a/tenant/middleware_onboarding_metrics.go b/tenant/middleware_onboarding_metrics.go deleted file mode 100644 index e60a9fa1771..00000000000 --- a/tenant/middleware_onboarding_metrics.go +++ /dev/null @@ -1,39 +0,0 @@ -package tenant - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/metric" - "github.com/prometheus/client_golang/prometheus" -) - -var _ influxdb.OnboardingService = (*OnboardingMetrics)(nil) - -type OnboardingMetrics struct { - // RED metrics - rec *metric.REDClient - - onboardingService influxdb.OnboardingService -} - -// NewOnboardingMetrics returns a metrics service middleware for the User Service. -func NewOnboardingMetrics(reg prometheus.Registerer, s influxdb.OnboardingService, opts ...metric.ClientOptFn) *OnboardingMetrics { - o := metric.ApplyMetricOpts(opts...) - return &OnboardingMetrics{ - rec: metric.New(reg, o.ApplySuffix("onboard")), - onboardingService: s, - } -} - -func (m *OnboardingMetrics) IsOnboarding(ctx context.Context) (bool, error) { - rec := m.rec.Record("is_onboarding") - available, err := m.onboardingService.IsOnboarding(ctx) - return available, rec(err) -} - -func (m *OnboardingMetrics) OnboardInitialUser(ctx context.Context, req *influxdb.OnboardingRequest) (*influxdb.OnboardingResults, error) { - rec := m.rec.Record("onboard_initial_user") - res, err := m.onboardingService.OnboardInitialUser(ctx, req) - return res, rec(err) -} diff --git a/tenant/middleware_org_auth.go b/tenant/middleware_org_auth.go deleted file mode 100644 index ce9a33befbb..00000000000 --- a/tenant/middleware_org_auth.go +++ /dev/null @@ -1,92 +0,0 @@ -package tenant - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.OrganizationService = (*AuthedOrgService)(nil) - -// TODO (al): remove authorizer/org when the org service moves to tenant - -// AuthedOrgService wraps a influxdb.OrganizationService and authorizes actions -// against it appropriately. -type AuthedOrgService struct { - s influxdb.OrganizationService -} - -// NewAuthedOrgService constructs an instance of an authorizing org service. -func NewAuthedOrgService(s influxdb.OrganizationService) *AuthedOrgService { - return &AuthedOrgService{ - s: s, - } -} - -// FindOrganizationByID checks to see if the authorizer on context has read access to the id provided. -func (s *AuthedOrgService) FindOrganizationByID(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - if _, _, err := authorizer.AuthorizeReadOrg(ctx, id); err != nil { - return nil, err - } - return s.s.FindOrganizationByID(ctx, id) -} - -// FindOrganization retrieves the organization and checks to see if the authorizer on context has read access to the org. -func (s *AuthedOrgService) FindOrganization(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { - o, err := s.s.FindOrganization(ctx, filter) - if err != nil { - return nil, err - } - if _, _, err := authorizer.AuthorizeReadOrg(ctx, o.ID); err != nil { - return nil, err - } - return o, nil -} - -// FindOrganizations retrieves all organizations that match the provided filter and then filters the list down to only the resources that are authorized. -func (s *AuthedOrgService) FindOrganizations(ctx context.Context, filter influxdb.OrganizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Organization, int, error) { - if filter.Name == nil && filter.ID == nil && filter.UserID == nil { - // if the user doesnt have permission to look up all orgs we need to add this users id to the filter to save lookup time - auth, err := icontext.GetAuthorizer(ctx) - if err != nil { - return nil, 0, err - } - if _, _, err := authorizer.AuthorizeReadGlobal(ctx, influxdb.OrgsResourceType); err != nil { - userid := auth.GetUserID() - filter.UserID = &userid - } - } - - os, _, err := s.s.FindOrganizations(ctx, filter, opt...) - if err != nil { - return nil, 0, err - } - return authorizer.AuthorizeFindOrganizations(ctx, os) -} - -// CreateOrganization checks to see if the authorizer on context has write access to the global orgs resource. -func (s *AuthedOrgService) CreateOrganization(ctx context.Context, o *influxdb.Organization) error { - if _, _, err := authorizer.AuthorizeWriteGlobal(ctx, influxdb.OrgsResourceType); err != nil { - return err - } - return s.s.CreateOrganization(ctx, o) -} - -// UpdateOrganization checks to see if the authorizer on context has write access to the organization provided. -func (s *AuthedOrgService) UpdateOrganization(ctx context.Context, id platform.ID, upd influxdb.OrganizationUpdate) (*influxdb.Organization, error) { - if _, _, err := authorizer.AuthorizeWriteOrg(ctx, id); err != nil { - return nil, err - } - return s.s.UpdateOrganization(ctx, id, upd) -} - -// DeleteOrganization checks to see if the authorizer on context has write access to the organization provided. -func (s *AuthedOrgService) DeleteOrganization(ctx context.Context, id platform.ID) error { - if _, _, err := authorizer.AuthorizeWriteOrg(ctx, id); err != nil { - return err - } - return s.s.DeleteOrganization(ctx, id) -} diff --git a/tenant/middleware_org_auth_test.go b/tenant/middleware_org_auth_test.go deleted file mode 100644 index 5907c6b8f03..00000000000 --- a/tenant/middleware_org_auth_test.go +++ /dev/null @@ -1,559 +0,0 @@ -package tenant_test - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/tenant" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -var orgCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.Organization) []*influxdb.Organization { - out := append([]*influxdb.Organization(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -func TestOrgService_FindOrganizationByID(t *testing.T) { - type fields struct { - OrgService influxdb.OrganizationService - } - type args struct { - permission influxdb.Permission - id platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access id", - fields: fields{ - OrgService: &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: id, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access id", - fields: fields{ - OrgService: &mock.OrganizationService{ - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: id, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - id: 1, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := tenant.NewAuthedOrgService(tt.fields.OrgService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindOrganizationByID(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestOrgService_FindOrganization(t *testing.T) { - type fields struct { - OrgService influxdb.OrganizationService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access org", - fields: fields{ - OrgService: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: 1, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access org", - fields: fields{ - OrgService: &mock.OrganizationService{ - FindOrganizationF: func(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: 1, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:orgs/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := tenant.NewAuthedOrgService(tt.fields.OrgService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindOrganization(ctx, influxdb.OrganizationFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestOrgService_FindOrganizations(t *testing.T) { - type fields struct { - OrgService influxdb.OrganizationService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - orgs []*influxdb.Organization - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all orgs", - fields: fields{ - OrgService: &mock.OrganizationService{ - FindOrganizationsF: func(ctx context.Context, filter influxdb.OrganizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Organization, int, error) { - return []*influxdb.Organization{ - { - ID: 1, - }, - { - ID: 2, - }, - { - ID: 3, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - }, - }, - }, - wants: wants{ - orgs: []*influxdb.Organization{ - { - ID: 1, - }, - { - ID: 2, - }, - { - ID: 3, - }, - }, - }, - }, - { - name: "authorized to access a single org", - fields: fields{ - OrgService: &mock.OrganizationService{ - FindOrganizationsF: func(ctx context.Context, filter influxdb.OrganizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Organization, int, error) { - return []*influxdb.Organization{ - { - ID: 1, - }, - { - ID: 2, - }, - { - ID: 3, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - wants: wants{ - orgs: []*influxdb.Organization{ - { - ID: 2, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := tenant.NewAuthedOrgService(tt.fields.OrgService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - orgs, _, err := s.FindOrganizations(ctx, influxdb.OrganizationFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(orgs, tt.wants.orgs, orgCmpOptions...); diff != "" { - t.Errorf("organizations are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestOrgService_UpdateOrganization(t *testing.T) { - type fields struct { - OrgService influxdb.OrganizationService - } - type args struct { - id platform.ID - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to update org", - fields: fields{ - OrgService: &mock.OrganizationService{ - UpdateOrganizationF: func(ctx context.Context, id platform.ID, upd influxdb.OrganizationUpdate) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: 1, - }, nil - }, - }, - }, - args: args{ - id: 1, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to update org", - fields: fields{ - OrgService: &mock.OrganizationService{ - UpdateOrganizationF: func(ctx context.Context, id platform.ID, upd influxdb.OrganizationUpdate) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: 1, - }, nil - }, - }, - }, - args: args{ - id: 1, - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := tenant.NewAuthedOrgService(tt.fields.OrgService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.UpdateOrganization(ctx, tt.args.id, influxdb.OrganizationUpdate{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestOrgService_DeleteOrganization(t *testing.T) { - type fields struct { - OrgService influxdb.OrganizationService - } - type args struct { - id platform.ID - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete org", - fields: fields{ - OrgService: &mock.OrganizationService{ - DeleteOrganizationF: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete org", - fields: fields{ - OrgService: &mock.OrganizationService{ - DeleteOrganizationF: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := tenant.NewAuthedOrgService(tt.fields.OrgService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.DeleteOrganization(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestOrgService_CreateOrganization(t *testing.T) { - type fields struct { - OrgService influxdb.OrganizationService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to create org", - fields: fields{ - OrgService: &mock.OrganizationService{ - CreateOrganizationF: func(ctx context.Context, o *influxdb.Organization) error { - return nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to create org", - fields: fields{ - OrgService: &mock.OrganizationService{ - CreateOrganizationF: func(ctx context.Context, o *influxdb.Organization) error { - return nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:orgs is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := tenant.NewAuthedOrgService(tt.fields.OrgService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.CreateOrganization(ctx, &influxdb.Organization{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} diff --git a/tenant/middleware_org_logging.go b/tenant/middleware_org_logging.go deleted file mode 100644 index d28d55782a4..00000000000 --- a/tenant/middleware_org_logging.go +++ /dev/null @@ -1,101 +0,0 @@ -package tenant - -import ( - "context" - "fmt" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "go.uber.org/zap" -) - -type OrgLogger struct { - logger *zap.Logger - orgService influxdb.OrganizationService -} - -// NewOrgLogger returns a logging service middleware for the Organization Service. -func NewOrgLogger(log *zap.Logger, s influxdb.OrganizationService) *OrgLogger { - return &OrgLogger{ - logger: log, - orgService: s, - } -} - -var _ influxdb.OrganizationService = (*OrgLogger)(nil) - -func (l *OrgLogger) CreateOrganization(ctx context.Context, u *influxdb.Organization) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to create org", zap.Error(err), dur) - return - } - l.logger.Debug("org create", dur) - }(time.Now()) - return l.orgService.CreateOrganization(ctx, u) -} - -func (l *OrgLogger) FindOrganizationByID(ctx context.Context, id platform.ID) (u *influxdb.Organization, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - msg := fmt.Sprintf("failed to find org with ID %v", id) - l.logger.Debug(msg, zap.Error(err), dur) - return - } - l.logger.Debug("org find by ID", dur) - }(time.Now()) - return l.orgService.FindOrganizationByID(ctx, id) -} - -func (l *OrgLogger) FindOrganization(ctx context.Context, filter influxdb.OrganizationFilter) (u *influxdb.Organization, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to find org matching the given filter", zap.Error(err), dur) - return - } - l.logger.Debug("org find", dur) - - }(time.Now()) - return l.orgService.FindOrganization(ctx, filter) -} - -func (l *OrgLogger) FindOrganizations(ctx context.Context, filter influxdb.OrganizationFilter, opt ...influxdb.FindOptions) (orgs []*influxdb.Organization, n int, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to find org matching the given filter", zap.Error(err), dur) - return - } - l.logger.Debug("orgs find", dur) - }(time.Now()) - return l.orgService.FindOrganizations(ctx, filter, opt...) -} - -func (l *OrgLogger) UpdateOrganization(ctx context.Context, id platform.ID, upd influxdb.OrganizationUpdate) (u *influxdb.Organization, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to update org", zap.Error(err), dur) - return - } - l.logger.Debug("org update", dur) - }(time.Now()) - return l.orgService.UpdateOrganization(ctx, id, upd) -} - -func (l *OrgLogger) DeleteOrganization(ctx context.Context, id platform.ID) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - msg := fmt.Sprintf("failed to delete org with ID %v", id) - l.logger.Debug(msg, zap.Error(err), dur) - return - } - l.logger.Debug("org delete", dur) - }(time.Now()) - return l.orgService.DeleteOrganization(ctx, id) -} diff --git a/tenant/middleware_org_logging_test.go b/tenant/middleware_org_logging_test.go deleted file mode 100644 index ede755bf0ff..00000000000 --- a/tenant/middleware_org_logging_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package tenant_test - -import ( - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/tenant" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func TestOrganizationLoggingService(t *testing.T) { - influxdbtesting.OrganizationService(initBoltOrganizationLoggingService, t) -} - -func initBoltOrganizationLoggingService(f influxdbtesting.OrganizationFields, t *testing.T) (influxdb.OrganizationService, string, func()) { - orgSvc, s, closer := initBoltOrganizationService(f, t) - return tenant.NewOrgLogger(zaptest.NewLogger(t), orgSvc), s, closer -} diff --git a/tenant/middleware_org_metrics.go b/tenant/middleware_org_metrics.go deleted file mode 100644 index 15bf8b9eada..00000000000 --- a/tenant/middleware_org_metrics.go +++ /dev/null @@ -1,64 +0,0 @@ -package tenant - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/metric" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/prometheus/client_golang/prometheus" -) - -type OrgMetrics struct { - // RED metrics - rec *metric.REDClient - - orgService influxdb.OrganizationService -} - -var _ influxdb.OrganizationService = (*OrgMetrics)(nil) - -// NewOrgMetrics returns a metrics service middleware for the Organization Service. -func NewOrgMetrics(reg prometheus.Registerer, s influxdb.OrganizationService, opts ...metric.ClientOptFn) *OrgMetrics { - o := metric.ApplyMetricOpts(opts...) - return &OrgMetrics{ - rec: metric.New(reg, o.ApplySuffix("org")), - orgService: s, - } -} - -func (m *OrgMetrics) FindOrganizationByID(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - rec := m.rec.Record("find_org_by_id") - org, err := m.orgService.FindOrganizationByID(ctx, id) - return org, rec(err) -} - -func (m *OrgMetrics) FindOrganization(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { - rec := m.rec.Record("find_org") - org, err := m.orgService.FindOrganization(ctx, filter) - return org, rec(err) -} - -func (m *OrgMetrics) FindOrganizations(ctx context.Context, filter influxdb.OrganizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Organization, int, error) { - rec := m.rec.Record("find_orgs") - orgs, n, err := m.orgService.FindOrganizations(ctx, filter, opt...) - return orgs, n, rec(err) -} - -func (m *OrgMetrics) CreateOrganization(ctx context.Context, b *influxdb.Organization) error { - rec := m.rec.Record("create_org") - err := m.orgService.CreateOrganization(ctx, b) - return rec(err) -} - -func (m *OrgMetrics) UpdateOrganization(ctx context.Context, id platform.ID, upd influxdb.OrganizationUpdate) (*influxdb.Organization, error) { - rec := m.rec.Record("update_org") - updatedOrg, err := m.orgService.UpdateOrganization(ctx, id, upd) - return updatedOrg, rec(err) -} - -func (m *OrgMetrics) DeleteOrganization(ctx context.Context, id platform.ID) error { - rec := m.rec.Record("delete_org") - err := m.orgService.DeleteOrganization(ctx, id) - return rec(err) -} diff --git a/tenant/middleware_urm_auth.go b/tenant/middleware_urm_auth.go deleted file mode 100644 index 5a9be361670..00000000000 --- a/tenant/middleware_urm_auth.go +++ /dev/null @@ -1,100 +0,0 @@ -package tenant - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - "github.com/influxdata/influxdb/v2/kit/platform" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" -) - -type AuthedURMService struct { - s influxdb.UserResourceMappingService - orgService influxdb.OrganizationService -} - -func NewAuthedURMService(orgSvc influxdb.OrganizationService, s influxdb.UserResourceMappingService) *AuthedURMService { - return &AuthedURMService{ - s: s, - orgService: orgSvc, - } -} - -func (s *AuthedURMService) FindUserResourceMappings(ctx context.Context, filter influxdb.UserResourceMappingFilter, opt ...influxdb.FindOptions) ([]*influxdb.UserResourceMapping, int, error) { - orgID := kithttp.OrgIDFromContext(ctx) // resource's orgID - - // Check if user making request has read access to organization prior to listing URMs. - if orgID != nil { - if _, _, err := authorizer.AuthorizeReadResource(ctx, influxdb.OrgsResourceType, *orgID); err != nil { - return nil, 0, ErrNotFound - } - } - - urms, _, err := s.s.FindUserResourceMappings(ctx, filter, opt...) - if err != nil { - return nil, 0, err - } - - authedUrms := urms[:0] - for _, urm := range urms { - if orgID != nil { - if _, _, err := authorizer.AuthorizeRead(ctx, urm.ResourceType, urm.ResourceID, *orgID); err != nil { - continue - } - } else { - if _, _, err := authorizer.AuthorizeReadResource(ctx, urm.ResourceType, urm.ResourceID); err != nil { - continue - } - } - authedUrms = append(authedUrms, urm) - } - - return authedUrms, len(authedUrms), nil -} - -func (s *AuthedURMService) CreateUserResourceMapping(ctx context.Context, m *influxdb.UserResourceMapping) error { - orgID := kithttp.OrgIDFromContext(ctx) - if orgID != nil { - if _, _, err := authorizer.AuthorizeWrite(ctx, m.ResourceType, m.ResourceID, *orgID); err != nil { - return err - } - } else { - if _, _, err := authorizer.AuthorizeWriteResource(ctx, m.ResourceType, m.ResourceID); err != nil { - return err - } - } - - return s.s.CreateUserResourceMapping(ctx, m) -} - -func (s *AuthedURMService) DeleteUserResourceMapping(ctx context.Context, resourceID platform.ID, userID platform.ID) error { - if !resourceID.Valid() || !userID.Valid() { - return ErrInvalidURMID - } - - f := influxdb.UserResourceMappingFilter{ResourceID: resourceID, UserID: userID} - urms, _, err := s.s.FindUserResourceMappings(ctx, f) - if err != nil { - return err - } - - // There should only be one because resourceID and userID are used to create the primary key for urms - for _, urm := range urms { - orgID := kithttp.OrgIDFromContext(ctx) - if orgID != nil { - if _, _, err := authorizer.AuthorizeWrite(ctx, urm.ResourceType, urm.ResourceID, *orgID); err != nil { - return err - } - } else { - if _, _, err := authorizer.AuthorizeWriteResource(ctx, urm.ResourceType, urm.ResourceID); err != nil { - return err - } - } - - if err := s.s.DeleteUserResourceMapping(ctx, urm.ResourceID, urm.UserID); err != nil { - return err - } - } - return nil -} diff --git a/tenant/middleware_urm_auth_test.go b/tenant/middleware_urm_auth_test.go deleted file mode 100644 index 5e51e089d66..00000000000 --- a/tenant/middleware_urm_auth_test.go +++ /dev/null @@ -1,389 +0,0 @@ -package tenant - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - influxdbcontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/feature" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "github.com/influxdata/influxdb/v2/mock" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -var idOne platform.ID = 1 -var idTwo platform.ID = 2 -var idThree platform.ID = 3 - -func TestURMService_FindUserResourceMappings(t *testing.T) { - type fields struct { - UserResourceMappingService influxdb.UserResourceMappingService - OrgService influxdb.OrganizationService - } - type args struct { - permissions []influxdb.Permission - } - type wants struct { - err error - urms []*influxdb.UserResourceMapping - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all users", - fields: fields{ - UserResourceMappingService: &mock.UserResourceMappingService{ - FindMappingsFn: func(ctx context.Context, filter influxdb.UserResourceMappingFilter) ([]*influxdb.UserResourceMapping, int, error) { - return []*influxdb.UserResourceMapping{ - { - ResourceID: 1, - ResourceType: influxdb.BucketsResourceType, - }, - { - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - { - ResourceID: 3, - ResourceType: influxdb.BucketsResourceType, - }, - }, 3, nil - }, - }, - }, - args: args{ - permissions: []influxdb.Permission{ - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: influxdbtesting.IDPtr(10), - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - // ID: &idOne, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - urms: []*influxdb.UserResourceMapping{ - { - ResourceID: 1, - ResourceType: influxdb.BucketsResourceType, - }, - { - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - { - ResourceID: 3, - ResourceType: influxdb.BucketsResourceType, - }, - }, - }, - }, - { - name: "authorized to see all users by org auth", - fields: fields{ - UserResourceMappingService: &mock.UserResourceMappingService{ - FindMappingsFn: func(ctx context.Context, filter influxdb.UserResourceMappingFilter) ([]*influxdb.UserResourceMapping, int, error) { - return []*influxdb.UserResourceMapping{ - { - ResourceID: 1, - ResourceType: influxdb.BucketsResourceType, - }, - { - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - { - ResourceID: 3, - ResourceType: influxdb.BucketsResourceType, - }, - }, 3, nil - }, - }, - }, - args: args{ - permissions: []influxdb.Permission{ - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - }, - wants: wants{ - err: ErrNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := NewAuthedURMService(tt.fields.OrgService, tt.fields.UserResourceMappingService) - orgID := influxdbtesting.IDPtr(10) - ctx := context.WithValue(context.Background(), kithttp.CtxOrgKey, *orgID) - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - ctx, _ = feature.Annotate(ctx, feature.DefaultFlagger(), feature.MakeBoolFlag("Org Only Member list", - "orgOnlyMemberList", - "Compute Team", - true, - feature.Temporary, - false, - )) - - urms, _, err := s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(urms, tt.wants.urms); diff != "" { - t.Errorf("urms are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestURMService_FindUserResourceMappingsBucketAuth(t *testing.T) { - type fields struct { - UserResourceMappingService influxdb.UserResourceMappingService - OrgService influxdb.OrganizationService - } - type args struct { - permissions []influxdb.Permission - } - type wants struct { - err error - urms []*influxdb.UserResourceMapping - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all users by bucket auth", - fields: fields{ - UserResourceMappingService: &mock.UserResourceMappingService{ - FindMappingsFn: func(ctx context.Context, filter influxdb.UserResourceMappingFilter) ([]*influxdb.UserResourceMapping, int, error) { - return []*influxdb.UserResourceMapping{ - { - ResourceID: 1, - ResourceType: influxdb.BucketsResourceType, - }, - { - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - { - ResourceID: 3, - ResourceType: influxdb.BucketsResourceType, - }, - }, 3, nil - }, - }, - }, - args: args{ - permissions: []influxdb.Permission{ - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: &idOne, - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: &idTwo, - }, - }, - { - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: &idThree, - }, - }, - }, - }, - wants: wants{ - urms: []*influxdb.UserResourceMapping{ - { - ResourceID: 1, - ResourceType: influxdb.BucketsResourceType, - }, - { - ResourceID: 2, - ResourceType: influxdb.BucketsResourceType, - }, - { - ResourceID: 3, - ResourceType: influxdb.BucketsResourceType, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := NewAuthedURMService(tt.fields.OrgService, tt.fields.UserResourceMappingService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, tt.args.permissions)) - - urms, _, err := s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(urms, tt.wants.urms); diff != "" { - t.Errorf("urms are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestURMService_WriteUserResourceMapping(t *testing.T) { - type fields struct { - UserResourceMappingService influxdb.UserResourceMappingService - OrgService influxdb.OrganizationService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to write urm", - fields: fields{ - UserResourceMappingService: &mock.UserResourceMappingService{ - CreateMappingFn: func(ctx context.Context, m *influxdb.UserResourceMapping) error { - return nil - }, - DeleteMappingFn: func(ctx context.Context, rid, uid platform.ID) error { - return nil - }, - FindMappingsFn: func(ctx context.Context, filter influxdb.UserResourceMappingFilter) ([]*influxdb.UserResourceMapping, int, error) { - return []*influxdb.UserResourceMapping{ - { - ResourceID: 1, - ResourceType: influxdb.BucketsResourceType, - UserID: 100, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - ID: &idOne, - OrgID: influxdbtesting.IDPtr(10), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to write urm", - fields: fields{ - UserResourceMappingService: &mock.UserResourceMappingService{ - CreateMappingFn: func(ctx context.Context, m *influxdb.UserResourceMapping) error { - return nil - }, - DeleteMappingFn: func(ctx context.Context, rid, uid platform.ID) error { - return nil - }, - FindMappingsFn: func(ctx context.Context, filter influxdb.UserResourceMappingFilter) ([]*influxdb.UserResourceMapping, int, error) { - return []*influxdb.UserResourceMapping{ - { - ResourceID: 1, - ResourceType: influxdb.BucketsResourceType, - UserID: 100, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: influxdbtesting.IDPtr(11), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:buckets/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := NewAuthedURMService(tt.fields.OrgService, tt.fields.UserResourceMappingService) - - ctx := context.Background() - ctx = influxdbcontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - t.Run("create urm", func(t *testing.T) { - err := s.CreateUserResourceMapping(ctx, &influxdb.UserResourceMapping{ResourceType: influxdb.BucketsResourceType, ResourceID: 1}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - - t.Run("delete urm", func(t *testing.T) { - err := s.DeleteUserResourceMapping(ctx, 1, 100) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - - }) - } -} diff --git a/tenant/middleware_urm_logging.go b/tenant/middleware_urm_logging.go deleted file mode 100644 index 4a657f8cf0a..00000000000 --- a/tenant/middleware_urm_logging.go +++ /dev/null @@ -1,63 +0,0 @@ -package tenant - -import ( - "context" - "fmt" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "go.uber.org/zap" -) - -type URMLogger struct { - logger *zap.Logger - urmService influxdb.UserResourceMappingService -} - -// NewUrmLogger returns a logging service middleware for the User Resource Mapping Service. -func NewURMLogger(log *zap.Logger, s influxdb.UserResourceMappingService) *URMLogger { - return &URMLogger{ - logger: log, - urmService: s, - } -} - -var _ influxdb.UserResourceMappingService = (*URMLogger)(nil) - -func (l *URMLogger) CreateUserResourceMapping(ctx context.Context, u *influxdb.UserResourceMapping) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Error("failed to create urm", zap.Error(err), dur) - return - } - l.logger.Debug("urm create", dur) - }(time.Now()) - return l.urmService.CreateUserResourceMapping(ctx, u) -} - -func (l *URMLogger) FindUserResourceMappings(ctx context.Context, filter influxdb.UserResourceMappingFilter, opt ...influxdb.FindOptions) (urms []*influxdb.UserResourceMapping, n int, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Error("failed to find urms matching the given filter", zap.Error(err), dur) - return - } - l.logger.Debug("urm find", dur) - }(time.Now()) - return l.urmService.FindUserResourceMappings(ctx, filter, opt...) -} - -func (l *URMLogger) DeleteUserResourceMapping(ctx context.Context, resourceID, userID platform.ID) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - msg := fmt.Sprintf("failed to delete urm for resource %v and user %v", resourceID, userID) - l.logger.Error(msg, zap.Error(err), dur) - return - } - l.logger.Debug("urm delete", dur) - }(time.Now()) - return l.urmService.DeleteUserResourceMapping(ctx, resourceID, userID) -} diff --git a/tenant/middleware_urm_logging_test.go b/tenant/middleware_urm_logging_test.go deleted file mode 100644 index 9d9790067ec..00000000000 --- a/tenant/middleware_urm_logging_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package tenant_test - -import ( - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/tenant" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func TestUserResourceMappingLoggingService(t *testing.T) { - influxdbtesting.UserResourceMappingService(initBoltUserResourceMappingLoggingService, t) -} - -func initBoltUserResourceMappingLoggingService(f influxdbtesting.UserResourceFields, t *testing.T) (influxdb.UserResourceMappingService, func()) { - svc, closer := initBoltUserResourceMappingService(f, t) - return tenant.NewURMLogger(zaptest.NewLogger(t), svc), closer -} diff --git a/tenant/middleware_urm_metrics.go b/tenant/middleware_urm_metrics.go deleted file mode 100644 index 8906fd66376..00000000000 --- a/tenant/middleware_urm_metrics.go +++ /dev/null @@ -1,46 +0,0 @@ -package tenant - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/metric" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/prometheus/client_golang/prometheus" -) - -type UrmMetrics struct { - // RED metrics - rec *metric.REDClient - - urmService influxdb.UserResourceMappingService -} - -var _ influxdb.UserResourceMappingService = (*UrmMetrics)(nil) - -// NewUrmMetrics returns a metrics service middleware for the User Resource Mapping Service. -func NewUrmMetrics(reg prometheus.Registerer, s influxdb.UserResourceMappingService, opts ...metric.ClientOptFn) *UrmMetrics { - o := metric.ApplyMetricOpts(opts...) - return &UrmMetrics{ - rec: metric.New(reg, o.ApplySuffix("urm")), - urmService: s, - } -} - -func (m *UrmMetrics) FindUserResourceMappings(ctx context.Context, filter influxdb.UserResourceMappingFilter, opt ...influxdb.FindOptions) ([]*influxdb.UserResourceMapping, int, error) { - rec := m.rec.Record("find_urms") - urms, n, err := m.urmService.FindUserResourceMappings(ctx, filter, opt...) - return urms, n, rec(err) -} - -func (m *UrmMetrics) CreateUserResourceMapping(ctx context.Context, urm *influxdb.UserResourceMapping) error { - rec := m.rec.Record("create_urm") - err := m.urmService.CreateUserResourceMapping(ctx, urm) - return rec(err) -} - -func (m *UrmMetrics) DeleteUserResourceMapping(ctx context.Context, resourceID, userID platform.ID) error { - rec := m.rec.Record("delete_urm") - err := m.urmService.DeleteUserResourceMapping(ctx, resourceID, userID) - return rec(err) -} diff --git a/tenant/middleware_user_auth.go b/tenant/middleware_user_auth.go deleted file mode 100644 index 47f6d3c1eef..00000000000 --- a/tenant/middleware_user_auth.go +++ /dev/null @@ -1,124 +0,0 @@ -package tenant - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ influxdb.UserService = (*AuthedUserService)(nil) - -// TODO (al): remove authorizer/user when the user service moves to tenant - -// AuthedUserService wraps a influxdb.UserService and authorizes actions -// against it appropriately. -type AuthedUserService struct { - s influxdb.UserService -} - -// NewAuthedUserService constructs an instance of an authorizing user service. -func NewAuthedUserService(s influxdb.UserService) *AuthedUserService { - return &AuthedUserService{ - s: s, - } -} - -// FindUserByID checks to see if the authorizer on context has read access to the id provided. -func (s *AuthedUserService) FindUserByID(ctx context.Context, id platform.ID) (*influxdb.User, error) { - if _, _, err := authorizer.AuthorizeReadResource(ctx, influxdb.UsersResourceType, id); err != nil { - return nil, err - } - return s.s.FindUserByID(ctx, id) -} - -// FindUser retrieves the user and checks to see if the authorizer on context has read access to the user. -func (s *AuthedUserService) FindUser(ctx context.Context, filter influxdb.UserFilter) (*influxdb.User, error) { - u, err := s.s.FindUser(ctx, filter) - if err != nil { - return nil, err - } - if _, _, err := authorizer.AuthorizeReadResource(ctx, influxdb.UsersResourceType, u.ID); err != nil { - return nil, err - } - return u, nil -} - -// FindUsers retrieves all users that match the provided filter and then filters the list down to only the resources that are authorized. -func (s *AuthedUserService) FindUsers(ctx context.Context, filter influxdb.UserFilter, opt ...influxdb.FindOptions) ([]*influxdb.User, int, error) { - // TODO (desa): we'll likely want to push this operation into the database eventually since fetching the whole list of data - // will likely be expensive. - us, _, err := s.s.FindUsers(ctx, filter, opt...) - if err != nil { - return nil, 0, err - } - return authorizer.AuthorizeFindUsers(ctx, us) -} - -// CreateUser checks to see if the authorizer on context has write access to the global users resource. -func (s *AuthedUserService) CreateUser(ctx context.Context, o *influxdb.User) error { - if _, _, err := authorizer.AuthorizeWriteGlobal(ctx, influxdb.UsersResourceType); err != nil { - return err - } - return s.s.CreateUser(ctx, o) -} - -// UpdateUser checks to see if the authorizer on context has write access to the user provided. -func (s *AuthedUserService) UpdateUser(ctx context.Context, id platform.ID, upd influxdb.UserUpdate) (*influxdb.User, error) { - if _, _, err := authorizer.AuthorizeWriteResource(ctx, influxdb.UsersResourceType, id); err != nil { - return nil, err - } - return s.s.UpdateUser(ctx, id, upd) -} - -// DeleteUser checks to see if the authorizer on context has write access to the user provided. -func (s *AuthedUserService) DeleteUser(ctx context.Context, id platform.ID) error { - if _, _, err := authorizer.AuthorizeWriteResource(ctx, influxdb.UsersResourceType, id); err != nil { - return err - } - return s.s.DeleteUser(ctx, id) -} - -func (s *AuthedUserService) FindPermissionForUser(ctx context.Context, id platform.ID) (influxdb.PermissionSet, error) { - if _, _, err := authorizer.AuthorizeReadResource(ctx, influxdb.UsersResourceType, id); err != nil { - return nil, err - } - return s.s.FindPermissionForUser(ctx, id) -} - -// AuthedPasswordService is a new authorization middleware for a password service. -type AuthedPasswordService struct { - s influxdb.PasswordsService -} - -// NewAuthedPasswordService wraps an existing password service with auth middleware. -func NewAuthedPasswordService(svc influxdb.PasswordsService) *AuthedPasswordService { - return &AuthedPasswordService{s: svc} -} - -// SetPassword overrides the password of a known user. -func (s *AuthedPasswordService) SetPassword(ctx context.Context, userID platform.ID, password string) error { - if _, _, err := authorizer.AuthorizeWriteResource(ctx, influxdb.UsersResourceType, userID); err != nil { - return err - } - return s.s.SetPassword(ctx, userID, password) -} - -// ComparePassword checks if the password matches the password recorded. -// Passwords that do not match return errors. -func (s *AuthedPasswordService) ComparePassword(ctx context.Context, userID platform.ID, password string) error { - if _, _, err := authorizer.AuthorizeWriteResource(ctx, influxdb.UsersResourceType, userID); err != nil { - return err - } - return s.s.ComparePassword(ctx, userID, password) -} - -// CompareAndSetPassword checks the password and if they match -// updates to the new password. -func (s *AuthedPasswordService) CompareAndSetPassword(ctx context.Context, userID platform.ID, old string, new string) error { - if _, _, err := authorizer.AuthorizeWriteResource(ctx, influxdb.UsersResourceType, userID); err != nil { - return err - } - return s.s.CompareAndSetPassword(ctx, userID, old, new) -} diff --git a/tenant/middleware_user_auth_test.go b/tenant/middleware_user_auth_test.go deleted file mode 100644 index 8d71b8f3157..00000000000 --- a/tenant/middleware_user_auth_test.go +++ /dev/null @@ -1,650 +0,0 @@ -package tenant_test - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/tenant" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/require" -) - -var userCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.User) []*influxdb.User { - out := append([]*influxdb.User(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -func TestUserService_FindUserByID(t *testing.T) { - type fields struct { - UserService influxdb.UserService - } - type args struct { - permission influxdb.Permission - id platform.ID - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access id", - fields: fields{ - UserService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ - ID: id, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - id: 1, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access id", - fields: fields{ - UserService: &mock.UserService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ - ID: id, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - id: 1, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:users/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := tenant.NewAuthedUserService(tt.fields.UserService) - - ctx := context.Background() - ctx = icontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindUserByID(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestUserService_FindUser(t *testing.T) { - type fields struct { - UserService influxdb.UserService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to access user", - fields: fields{ - UserService: &mock.UserService{ - FindUserFn: func(ctx context.Context, filter influxdb.UserFilter) (*influxdb.User, error) { - return &influxdb.User{ - ID: 1, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to access user", - fields: fields{ - UserService: &mock.UserService{ - FindUserFn: func(ctx context.Context, filter influxdb.UserFilter) (*influxdb.User, error) { - return &influxdb.User{ - ID: 1, - }, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "read:users/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := tenant.NewAuthedUserService(tt.fields.UserService) - - ctx := context.Background() - ctx = icontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.FindUser(ctx, influxdb.UserFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestUserService_FindUsers(t *testing.T) { - type fields struct { - UserService influxdb.UserService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - users []*influxdb.User - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to see all users", - fields: fields{ - UserService: &mock.UserService{ - FindUsersFn: func(ctx context.Context, filter influxdb.UserFilter, opt ...influxdb.FindOptions) ([]*influxdb.User, int, error) { - return []*influxdb.User{ - { - ID: 1, - }, - { - ID: 2, - }, - { - ID: 3, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - }, - }, - }, - wants: wants{ - users: []*influxdb.User{ - { - ID: 1, - }, - { - ID: 2, - }, - { - ID: 3, - }, - }, - }, - }, - { - name: "authorized to access a single user", - fields: fields{ - UserService: &mock.UserService{ - FindUsersFn: func(ctx context.Context, filter influxdb.UserFilter, opt ...influxdb.FindOptions) ([]*influxdb.User, int, error) { - return []*influxdb.User{ - { - ID: 1, - }, - { - ID: 2, - }, - { - ID: 3, - }, - }, 3, nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(2), - }, - }, - }, - wants: wants{ - users: []*influxdb.User{ - { - ID: 2, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := tenant.NewAuthedUserService(tt.fields.UserService) - - ctx := context.Background() - ctx = icontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - users, _, err := s.FindUsers(ctx, influxdb.UserFilter{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - - if diff := cmp.Diff(users, tt.wants.users, userCmpOptions...); diff != "" { - t.Errorf("users are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func TestUserService_UpdateUser(t *testing.T) { - type fields struct { - UserService influxdb.UserService - } - type args struct { - id platform.ID - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to update user", - fields: fields{ - UserService: &mock.UserService{ - UpdateUserFn: func(ctx context.Context, id platform.ID, upd influxdb.UserUpdate) (*influxdb.User, error) { - return &influxdb.User{ - ID: 1, - }, nil - }, - }, - }, - args: args{ - id: 1, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to update user", - fields: fields{ - UserService: &mock.UserService{ - UpdateUserFn: func(ctx context.Context, id platform.ID, upd influxdb.UserUpdate) (*influxdb.User, error) { - return &influxdb.User{ - ID: 1, - }, nil - }, - }, - }, - args: args{ - id: 1, - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:users/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := tenant.NewAuthedUserService(tt.fields.UserService) - - ctx := context.Background() - ctx = icontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - _, err := s.UpdateUser(ctx, tt.args.id, influxdb.UserUpdate{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestUserService_DeleteUser(t *testing.T) { - type fields struct { - UserService influxdb.UserService - } - type args struct { - id platform.ID - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to delete user", - fields: fields{ - UserService: &mock.UserService{ - DeleteUserFn: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to delete user", - fields: fields{ - UserService: &mock.UserService{ - DeleteUserFn: func(ctx context.Context, id platform.ID) error { - return nil - }, - }, - }, - args: args{ - id: 1, - permission: influxdb.Permission{ - Action: "read", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:users/0000000000000001 is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := tenant.NewAuthedUserService(tt.fields.UserService) - - ctx := context.Background() - ctx = icontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.DeleteUser(ctx, tt.args.id) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestUserService_CreateUser(t *testing.T) { - type fields struct { - UserService influxdb.UserService - } - type args struct { - permission influxdb.Permission - } - type wants struct { - err error - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "authorized to create user", - fields: fields{ - UserService: &mock.UserService{ - CreateUserFn: func(ctx context.Context, o *influxdb.User) error { - return nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - }, - }, - }, - wants: wants{ - err: nil, - }, - }, - { - name: "unauthorized to create user", - fields: fields{ - UserService: &mock.UserService{ - CreateUserFn: func(ctx context.Context, o *influxdb.User) error { - return nil - }, - }, - }, - args: args{ - permission: influxdb.Permission{ - Action: "write", - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: influxdbtesting.IDPtr(1), - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Msg: "write:users is unauthorized", - Code: errors.EUnauthorized, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s := tenant.NewAuthedUserService(tt.fields.UserService) - - ctx := context.Background() - ctx = icontext.SetAuthorizer(ctx, mock.NewMockAuthorizer(false, []influxdb.Permission{tt.args.permission})) - - err := s.CreateUser(ctx, &influxdb.User{}) - influxdbtesting.ErrorsEqual(t, err, tt.wants.err) - }) - } -} - -func TestPasswordService(t *testing.T) { - t.Run("SetPassword", func(t *testing.T) { - t.Run("user with permissions should proceed", func(t *testing.T) { - userID := platform.ID(1) - - permission := influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: &userID, - }, - } - - fakeSVC := mock.NewPasswordsService() - fakeSVC.SetPasswordFn = func(_ context.Context, _ platform.ID, _ string) error { - return nil - } - s := tenant.NewAuthedPasswordService(fakeSVC) - - ctx := icontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{permission})) - - err := s.SetPassword(ctx, 1, "password") - require.NoError(t, err) - }) - - t.Run("user without permissions should proceed", func(t *testing.T) { - goodUserID := platform.ID(1) - badUserID := platform.ID(3) - - tests := []struct { - name string - badPermission influxdb.Permission - }{ - { - name: "has no access", - }, - { - name: "has read only access on correct resource", - badPermission: influxdb.Permission{ - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: &goodUserID, - }, - }, - }, - { - name: "has write access on incorrect resource", - badPermission: influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.OrgsResourceType, - ID: &goodUserID, - }, - }, - }, - { - name: "user accessing user that is not self", - badPermission: influxdb.Permission{ - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.UsersResourceType, - ID: &badUserID, - }, - }, - }, - } - - for _, tt := range tests { - fn := func(t *testing.T) { - fakeSVC := &mock.PasswordsService{ - SetPasswordFn: func(_ context.Context, _ platform.ID, _ string) error { - return nil - }, - } - s := authorizer.NewPasswordService(fakeSVC) - - ctx := icontext.SetAuthorizer(context.Background(), mock.NewMockAuthorizer(false, []influxdb.Permission{tt.badPermission})) - - err := s.SetPassword(ctx, goodUserID, "password") - require.Error(t, err) - } - - t.Run(tt.name, fn) - } - }) - }) -} diff --git a/tenant/middleware_user_logging.go b/tenant/middleware_user_logging.go deleted file mode 100644 index 4d973505d7c..00000000000 --- a/tenant/middleware_user_logging.go +++ /dev/null @@ -1,166 +0,0 @@ -package tenant - -import ( - "context" - "fmt" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "go.uber.org/zap" -) - -var _ influxdb.UserService = (*UserLogger)(nil) -var _ influxdb.PasswordsService = (*PasswordLogger)(nil) - -type UserLogger struct { - logger *zap.Logger - userService influxdb.UserService -} - -// NewUserLogger returns a logging service middleware for the User Service. -func NewUserLogger(log *zap.Logger, s influxdb.UserService) *UserLogger { - return &UserLogger{ - logger: log, - userService: s, - } -} - -func (l *UserLogger) CreateUser(ctx context.Context, u *influxdb.User) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to create user", zap.Error(err), dur) - return - } - l.logger.Debug("user create", dur) - }(time.Now()) - return l.userService.CreateUser(ctx, u) -} - -func (l *UserLogger) FindUserByID(ctx context.Context, id platform.ID) (u *influxdb.User, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - msg := fmt.Sprintf("failed to find user with ID %v", id) - l.logger.Debug(msg, zap.Error(err), dur) - return - } - l.logger.Debug("user find by ID", dur) - }(time.Now()) - return l.userService.FindUserByID(ctx, id) -} - -func (l *UserLogger) FindUser(ctx context.Context, filter influxdb.UserFilter) (u *influxdb.User, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to find user matching the given filter", zap.Error(err), dur) - return - } - l.logger.Debug("user find", dur) - }(time.Now()) - return l.userService.FindUser(ctx, filter) -} - -func (l *UserLogger) FindUsers(ctx context.Context, filter influxdb.UserFilter, opt ...influxdb.FindOptions) (users []*influxdb.User, n int, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to find users matching the given filter", zap.Error(err), dur) - return - } - l.logger.Debug("users find", dur) - }(time.Now()) - return l.userService.FindUsers(ctx, filter, opt...) -} - -func (l *UserLogger) UpdateUser(ctx context.Context, id platform.ID, upd influxdb.UserUpdate) (u *influxdb.User, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - l.logger.Debug("failed to update user", zap.Error(err), dur) - return - } - l.logger.Debug("user update", dur) - }(time.Now()) - return l.userService.UpdateUser(ctx, id, upd) -} - -func (l *UserLogger) DeleteUser(ctx context.Context, id platform.ID) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - msg := fmt.Sprintf("failed to delete user with ID %v", id) - l.logger.Debug(msg, zap.Error(err), dur) - return - } - l.logger.Debug("user create", dur) - }(time.Now()) - return l.userService.DeleteUser(ctx, id) -} - -func (l *UserLogger) FindPermissionForUser(ctx context.Context, id platform.ID) (ps influxdb.PermissionSet, err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - msg := fmt.Sprintf("failed to delete user with ID %v", id) - l.logger.Debug(msg, zap.Error(err), dur) - return - } - l.logger.Debug("find permission for user", dur) - }(time.Now()) - return l.userService.FindPermissionForUser(ctx, id) -} - -type PasswordLogger struct { - logger *zap.Logger - pwdService influxdb.PasswordsService -} - -// NewPasswordLogger returns a logging service middleware for the Password Service. -func NewPasswordLogger(log *zap.Logger, s influxdb.PasswordsService) *PasswordLogger { - return &PasswordLogger{ - logger: log, - pwdService: s, - } -} - -func (l *PasswordLogger) SetPassword(ctx context.Context, userID platform.ID, password string) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - msg := fmt.Sprintf("failed to set password for user with ID %v", userID) - l.logger.Debug(msg, zap.Error(err), dur) - return - } - l.logger.Debug("set password", dur) - }(time.Now()) - return l.pwdService.SetPassword(ctx, userID, password) -} - -func (l *PasswordLogger) ComparePassword(ctx context.Context, userID platform.ID, password string) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - msg := fmt.Sprintf("failed to compare password for user with ID %v", userID) - l.logger.Debug(msg, zap.Error(err), dur) - return - } - l.logger.Debug("compare password", dur) - }(time.Now()) - return l.pwdService.ComparePassword(ctx, userID, password) -} - -func (l *PasswordLogger) CompareAndSetPassword(ctx context.Context, userID platform.ID, old, new string) (err error) { - defer func(start time.Time) { - dur := zap.Duration("took", time.Since(start)) - if err != nil { - msg := fmt.Sprintf("failed to compare and set password for user with ID %v", userID) - l.logger.Debug(msg, zap.Error(err), dur) - return - } - l.logger.Debug("compare and set password", dur) - }(time.Now()) - return l.pwdService.CompareAndSetPassword(ctx, userID, old, new) -} diff --git a/tenant/middleware_user_logging_test.go b/tenant/middleware_user_logging_test.go deleted file mode 100644 index 6ea0e24091b..00000000000 --- a/tenant/middleware_user_logging_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package tenant_test - -import ( - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/tenant" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func TestUserLoggingService(t *testing.T) { - influxdbtesting.UserService(initBoltUserLoggingService, t) -} - -func initBoltUserLoggingService(f influxdbtesting.UserFields, t *testing.T) (influxdb.UserService, string, func()) { - svc, s, closer := initBoltUserService(f, t) - return tenant.NewUserLogger(zaptest.NewLogger(t), svc), s, closer -} diff --git a/tenant/middleware_user_metrics.go b/tenant/middleware_user_metrics.go deleted file mode 100644 index b03093becb3..00000000000 --- a/tenant/middleware_user_metrics.go +++ /dev/null @@ -1,105 +0,0 @@ -package tenant - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/metric" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/prometheus/client_golang/prometheus" -) - -var _ influxdb.UserService = (*UserMetrics)(nil) -var _ influxdb.PasswordsService = (*PasswordMetrics)(nil) - -type UserMetrics struct { - // RED metrics - rec *metric.REDClient - - userService influxdb.UserService -} - -// NewUserMetrics returns a metrics service middleware for the User Service. -func NewUserMetrics(reg prometheus.Registerer, s influxdb.UserService, opts ...metric.ClientOptFn) *UserMetrics { - o := metric.ApplyMetricOpts(opts...) - return &UserMetrics{ - rec: metric.New(reg, o.ApplySuffix("user")), - userService: s, - } -} - -func (m *UserMetrics) FindUserByID(ctx context.Context, id platform.ID) (*influxdb.User, error) { - rec := m.rec.Record("find_user_by_id") - user, err := m.userService.FindUserByID(ctx, id) - return user, rec(err) -} - -func (m *UserMetrics) FindUser(ctx context.Context, filter influxdb.UserFilter) (*influxdb.User, error) { - rec := m.rec.Record("find_user") - user, err := m.userService.FindUser(ctx, filter) - return user, rec(err) -} - -func (m *UserMetrics) FindUsers(ctx context.Context, filter influxdb.UserFilter, opt ...influxdb.FindOptions) ([]*influxdb.User, int, error) { - rec := m.rec.Record("find_users") - users, n, err := m.userService.FindUsers(ctx, filter, opt...) - return users, n, rec(err) -} - -func (m *UserMetrics) CreateUser(ctx context.Context, u *influxdb.User) error { - rec := m.rec.Record("create_user") - err := m.userService.CreateUser(ctx, u) - return rec(err) -} - -func (m *UserMetrics) UpdateUser(ctx context.Context, id platform.ID, upd influxdb.UserUpdate) (*influxdb.User, error) { - rec := m.rec.Record("update_user") - updatedUser, err := m.userService.UpdateUser(ctx, id, upd) - return updatedUser, rec(err) -} - -func (m *UserMetrics) DeleteUser(ctx context.Context, id platform.ID) error { - rec := m.rec.Record("delete_user") - err := m.userService.DeleteUser(ctx, id) - return rec(err) -} - -func (m *UserMetrics) FindPermissionForUser(ctx context.Context, id platform.ID) (influxdb.PermissionSet, error) { - rec := m.rec.Record("find_permission_for_user") - ps, err := m.userService.FindPermissionForUser(ctx, id) - return ps, rec(err) -} - -type PasswordMetrics struct { - // RED metrics - rec *metric.REDClient - - pwdService influxdb.PasswordsService -} - -// NewPasswordMetrics returns a metrics service middleware for the Password Service. -func NewPasswordMetrics(reg prometheus.Registerer, s influxdb.PasswordsService, opts ...metric.ClientOptFn) *PasswordMetrics { - o := metric.ApplyMetricOpts(opts...) - return &PasswordMetrics{ - rec: metric.New(reg, o.ApplySuffix("password")), - pwdService: s, - } -} - -func (m *PasswordMetrics) SetPassword(ctx context.Context, userID platform.ID, password string) error { - rec := m.rec.Record("set_password") - err := m.pwdService.SetPassword(ctx, userID, password) - return rec(err) -} - -func (m *PasswordMetrics) ComparePassword(ctx context.Context, userID platform.ID, password string) error { - rec := m.rec.Record("compare_password") - err := m.pwdService.ComparePassword(ctx, userID, password) - return rec(err) -} - -func (m *PasswordMetrics) CompareAndSetPassword(ctx context.Context, userID platform.ID, old, new string) error { - rec := m.rec.Record("compare_and_set_password") - err := m.pwdService.CompareAndSetPassword(ctx, userID, old, new) - return rec(err) -} diff --git a/tenant/service.go b/tenant/service.go deleted file mode 100644 index 3bb0d2f99fb..00000000000 --- a/tenant/service.go +++ /dev/null @@ -1,89 +0,0 @@ -package tenant - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/metric" - "github.com/influxdata/influxdb/v2/label" - "github.com/influxdata/influxdb/v2/secret" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -type contextKey string - -const ( - ctxInternal contextKey = "influx/tenant/internal" -) - -func internalCtx(ctx context.Context) context.Context { - return context.WithValue(ctx, ctxInternal, true) -} - -func isInternal(ctx context.Context) bool { - _, ok := ctx.Value(ctxInternal).(bool) - return ok -} - -type Service struct { - store *Store - influxdb.UserService - influxdb.PasswordsService - influxdb.UserResourceMappingService - influxdb.OrganizationService - influxdb.BucketService -} - -func (s *Service) RLock() { - s.store.RLock() -} - -func (s *Service) RUnlock() { - s.store.RUnlock() -} - -// NewService creates a new base tenant service. -func NewService(st *Store) *Service { - svc := &Service{store: st} - userSvc := NewUserSvc(st, svc) - svc.UserService = userSvc - svc.PasswordsService = userSvc - svc.UserResourceMappingService = NewUserResourceMappingSvc(st, svc) - svc.OrganizationService = NewOrganizationSvc(st, svc) - svc.BucketService = NewBucketSvc(st, svc) - - return svc -} - -// creates a new Service with logging and metrics middleware wrappers. -func NewSystem(store *Store, log *zap.Logger, reg prometheus.Registerer, metricOpts ...metric.ClientOptFn) *Service { - ts := NewService(store) - ts.UserService = NewUserLogger(log, NewUserMetrics(reg, ts.UserService, metricOpts...)) - ts.PasswordsService = NewPasswordLogger(log, NewPasswordMetrics(reg, ts.PasswordsService, metricOpts...)) - ts.UserResourceMappingService = NewURMLogger(log, NewUrmMetrics(reg, ts.UserResourceMappingService, metricOpts...)) - ts.OrganizationService = NewOrgLogger(log, NewOrgMetrics(reg, ts.OrganizationService, metricOpts...)) - ts.BucketService = NewBucketLogger(log, NewBucketMetrics(reg, ts.BucketService, metricOpts...)) - - return ts -} - -func (ts *Service) NewOrgHTTPHandler(log *zap.Logger, secretSvc influxdb.SecretService) *OrgHandler { - secretHandler := secret.NewHandler(log, "id", secret.NewAuthedService(secretSvc)) - urmHandler := NewURMHandler(log.With(zap.String("handler", "urm")), influxdb.OrgsResourceType, "id", ts.UserService, NewAuthedURMService(ts.OrganizationService, ts.UserResourceMappingService)) - return NewHTTPOrgHandler(log.With(zap.String("handler", "org")), NewAuthedOrgService(ts.OrganizationService), urmHandler, secretHandler) -} - -func (ts *Service) NewBucketHTTPHandler(log *zap.Logger, labelSvc influxdb.LabelService) *BucketHandler { - urmHandler := NewURMHandler(log.With(zap.String("handler", "urm")), influxdb.BucketsResourceType, "id", ts.UserService, NewAuthedURMService(ts.OrganizationService, ts.UserResourceMappingService)) - labelHandler := label.NewHTTPEmbeddedHandler(log.With(zap.String("handler", "label")), influxdb.BucketsResourceType, labelSvc) - return NewHTTPBucketHandler(log.With(zap.String("handler", "bucket")), NewAuthedBucketService(ts.BucketService), labelSvc, urmHandler, labelHandler) -} - -func (ts *Service) NewUserHTTPHandler(log *zap.Logger) *UserHandler { - return NewHTTPUserHandler(log.With(zap.String("handler", "user")), NewAuthedUserService(ts.UserService), NewAuthedPasswordService(ts.PasswordsService)) -} - -func (ts *Service) NewMeHTTPHandler(log *zap.Logger) *MeHandler { - return NewHTTPMeHandler(log.With(zap.String("handler", "user")), NewAuthedUserService(ts.UserService), NewAuthedPasswordService(ts.PasswordsService)) -} diff --git a/tenant/service_bucket.go b/tenant/service_bucket.go deleted file mode 100644 index 461ffee7f33..00000000000 --- a/tenant/service_bucket.go +++ /dev/null @@ -1,236 +0,0 @@ -package tenant - -import ( - "context" - "fmt" - "strings" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" -) - -type BucketSvc struct { - store *Store - svc *Service -} - -func NewBucketSvc(st *Store, svc *Service) *BucketSvc { - return &BucketSvc{ - store: st, - svc: svc, - } -} - -// FindBucketByID returns a single bucket by ID. -func (s *BucketSvc) FindBucketByID(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - var bucket *influxdb.Bucket - err := s.store.View(ctx, func(tx kv.Tx) error { - b, err := s.store.GetBucket(ctx, tx, id) - if err != nil { - return err - } - bucket = b - return nil - }) - - if err != nil { - return nil, err - } - - return bucket, nil -} - -func (s *BucketSvc) FindBucketByName(ctx context.Context, orgID platform.ID, name string) (*influxdb.Bucket, error) { - var bucket *influxdb.Bucket - err := s.store.View(ctx, func(tx kv.Tx) error { - b, err := s.store.GetBucketByName(ctx, tx, orgID, name) - if err != nil { - return err - } - bucket = b - return nil - }) - - if err != nil { - return nil, err - } - - return bucket, nil - -} - -// FindBucket returns the first bucket that matches filter. -func (s *BucketSvc) FindBucket(ctx context.Context, filter influxdb.BucketFilter) (*influxdb.Bucket, error) { - if filter.ID != nil { - return s.FindBucketByID(ctx, *filter.ID) - } - - if filter.Name != nil && filter.OrganizationID != nil { - return s.FindBucketByName(ctx, *filter.OrganizationID, *filter.Name) - } - - buckets, _, err := s.FindBuckets(ctx, filter, influxdb.FindOptions{ - Limit: 1, - }) - - if err != nil { - return nil, err - } - - if len(buckets) < 1 { - return nil, ErrBucketNotFound - } - - return buckets[0], nil -} - -// FindBuckets returns a list of buckets that match filter and the total count of matching buckets. -// Additional options provide pagination & sorting. -func (s *BucketSvc) FindBuckets(ctx context.Context, filter influxdb.BucketFilter, opt ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - if filter.ID != nil { - b, err := s.FindBucketByID(ctx, *filter.ID) - if err != nil { - return nil, 0, err - } - return []*influxdb.Bucket{b}, 1, nil - } - if filter.OrganizationID == nil && filter.Org != nil { - org, err := s.svc.FindOrganization(ctx, influxdb.OrganizationFilter{Name: filter.Org}) - if err != nil { - return nil, 0, err - } - filter.OrganizationID = &org.ID - } - - var buckets []*influxdb.Bucket - err := s.store.View(ctx, func(tx kv.Tx) error { - if filter.Name != nil && filter.OrganizationID != nil { - b, err := s.store.GetBucketByName(ctx, tx, *filter.OrganizationID, *filter.Name) - if err != nil { - return err - } - buckets = []*influxdb.Bucket{b} - return nil - } - - bs, err := s.store.ListBuckets(ctx, tx, BucketFilter{ - Name: filter.Name, - OrganizationID: filter.OrganizationID, - }, opt...) - if err != nil { - return err - } - buckets = bs - return nil - }) - - if err != nil { - return nil, 0, err - } - - return buckets, len(buckets), nil -} - -// CreateBucket creates a new bucket and sets b.ID with the new identifier. -func (s *BucketSvc) CreateBucket(ctx context.Context, b *influxdb.Bucket) error { - if !b.OrgID.Valid() { - // we need a valid org id - return ErrOrgNotFound - } - - if err := validBucketName(b.Name, b.Type); err != nil { - return err - } - - // make sure the org exists - if _, err := s.svc.FindOrganizationByID(ctx, b.OrgID); err != nil { - return err - } - - return s.store.Update(ctx, func(tx kv.Tx) error { - return s.store.CreateBucket(ctx, tx, b) - }) -} - -// UpdateBucket updates a single bucket with changeset. -// Returns the new bucket state after update. -func (s *BucketSvc) UpdateBucket(ctx context.Context, id platform.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { - var bucket *influxdb.Bucket - err := s.store.Update(ctx, func(tx kv.Tx) error { - b, err := s.store.UpdateBucket(ctx, tx, id, upd) - if err != nil { - return err - } - bucket = b - return nil - }) - - if err != nil { - return nil, err - } - - return bucket, nil -} - -// DeleteBucket removes a bucket by ID. -func (s *BucketSvc) DeleteBucket(ctx context.Context, id platform.ID) error { - err := s.store.Update(ctx, func(tx kv.Tx) error { - bucket, err := s.store.GetBucket(ctx, tx, id) - if err != nil { - return err - } - if bucket.Type == influxdb.BucketTypeSystem && !isInternal(ctx) { - // TODO: I think we should allow bucket deletes but maybe im wrong. - return errDeleteSystemBucket - } - - if err := s.store.DeleteBucket(ctx, tx, id); err != nil { - return err - } - return nil - }) - if err != nil { - return err - } - return s.removeResourceRelations(ctx, id) -} - -// removeResourceRelations allows us to clean up any resource relationship that would have normally been left over after a delete action of a resource. -func (s *BucketSvc) removeResourceRelations(ctx context.Context, resourceID platform.ID) error { - urms, _, err := s.svc.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{ - ResourceID: resourceID, - }) - if err != nil { - return err - } - for _, urm := range urms { - err := s.svc.DeleteUserResourceMapping(ctx, urm.ResourceID, urm.UserID) - if err != nil && err != ErrURMNotFound { - return err - } - } - return nil -} - -// validBucketName reports any errors with bucket names -func validBucketName(name string, typ influxdb.BucketType) error { - // names starting with an underscore are reserved for system buckets - if strings.HasPrefix(name, "_") && typ != influxdb.BucketTypeSystem { - return &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("bucket name %s is invalid. Buckets may not start with underscore", name), - Op: influxdb.OpCreateBucket, - } - } - // quotation marks will cause queries to fail - if strings.Contains(name, "\"") { - return &errors.Error{ - Code: errors.EInvalid, - Msg: fmt.Sprintf("bucket name %s is invalid. Bucket names may not include quotation marks", name), - Op: influxdb.OpCreateBucket, - } - } - return nil -} diff --git a/tenant/service_bucket_test.go b/tenant/service_bucket_test.go deleted file mode 100644 index 252185cdde5..00000000000 --- a/tenant/service_bucket_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package tenant_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/tenant" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -func TestInmemBucketService(t *testing.T) { - influxdbtesting.BucketService(initInmemBucketService, t) -} - -func initInmemBucketService(f influxdbtesting.BucketFields, t *testing.T) (influxdb.BucketService, string, func()) { - s := influxdbtesting.NewTestInmemStore(t) - return initBucketService(s, f, t) -} - -func initBucketService(s kv.SchemaStore, f influxdbtesting.BucketFields, t *testing.T) (influxdb.BucketService, string, func()) { - storage := tenant.NewStore(s) - if f.IDGenerator != nil { - storage.IDGen = f.IDGenerator - } - - if f.OrgIDs != nil { - storage.OrgIDGen = f.OrgIDs - } - - if f.BucketIDs != nil { - storage.BucketIDGen = f.BucketIDs - } - - // go direct to storage for test data - if err := s.Update(context.Background(), func(tx kv.Tx) error { - for _, o := range f.Organizations { - if err := storage.CreateOrg(tx.Context(), tx, o); err != nil { - return err - } - } - - for _, b := range f.Buckets { - if err := storage.CreateBucket(tx.Context(), tx, b); err != nil { - return err - } - } - - return nil - }); err != nil { - t.Fatalf("failed to populate organizations: %s", err) - } - - return tenant.NewService(storage), "tenant/", func() { - if err := s.Update(context.Background(), func(tx kv.Tx) error { - for _, b := range f.Buckets { - if err := storage.DeleteBucket(tx.Context(), tx, b.ID); err != nil { - return err - } - } - - for _, o := range f.Organizations { - if err := storage.DeleteOrg(tx.Context(), tx, o.ID); err != nil { - return err - } - } - - return nil - }); err != nil { - t.Logf("failed to cleanup organizations: %s", err) - } - } -} - -func TestBucketFind(t *testing.T) { - s := influxdbtesting.NewTestInmemStore(t) - - storage := tenant.NewStore(s) - svc := tenant.NewService(storage) - o := &influxdb.Organization{ - Name: "theorg", - } - - if err := svc.CreateOrganization(context.Background(), o); err != nil { - t.Fatal(err) - } - name := "thebucket" - _, _, err := svc.FindBuckets(context.Background(), influxdb.BucketFilter{ - Name: &name, - Org: &o.Name, - }) - if err.Error() != `bucket "thebucket" not found` { - t.Fatal(err) - } -} - -func TestSystemBucketsInNameFind(t *testing.T) { - s := influxdbtesting.NewTestInmemStore(t) - - storage := tenant.NewStore(s) - svc := tenant.NewService(storage) - o := &influxdb.Organization{ - Name: "theorg", - } - - if err := svc.CreateOrganization(context.Background(), o); err != nil { - t.Fatal(err) - } - b := &influxdb.Bucket{ - OrgID: o.ID, - Name: "thebucket", - } - if err := svc.CreateBucket(context.Background(), b); err != nil { - t.Fatal(err) - } - name := "thebucket" - buckets, _, _ := svc.FindBuckets(context.Background(), influxdb.BucketFilter{ - Name: &name, - Org: &o.Name, - }) - if len(buckets) != 1 { - t.Fatal("failed to return a single bucket when doing a bucket lookup by name") - } -} diff --git a/tenant/service_onboarding.go b/tenant/service_onboarding.go deleted file mode 100644 index 39d58f0133d..00000000000 --- a/tenant/service_onboarding.go +++ /dev/null @@ -1,192 +0,0 @@ -package tenant - -import ( - "context" - "fmt" - "strings" - - "github.com/influxdata/influxdb/v2" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" - "go.uber.org/zap" -) - -type OnboardService struct { - service *Service - authSvc influxdb.AuthorizationService - alwaysAllow bool - log *zap.Logger -} - -type OnboardServiceOptionFn func(*OnboardService) - -// WithAlwaysAllowInitialUser configures the OnboardService to -// always return true for IsOnboarding to allow multiple -// initial onboard requests. -func WithAlwaysAllowInitialUser() OnboardServiceOptionFn { - return func(s *OnboardService) { - s.alwaysAllow = true - } -} - -func WithOnboardingLogger(logger *zap.Logger) OnboardServiceOptionFn { - return func(s *OnboardService) { - s.log = logger - } -} - -func NewOnboardService(svc *Service, as influxdb.AuthorizationService, opts ...OnboardServiceOptionFn) influxdb.OnboardingService { - s := &OnboardService{ - service: svc, - authSvc: as, - log: zap.NewNop(), - } - - for _, opt := range opts { - opt(s) - } - - return s -} - -// IsOnboarding determine if onboarding request is allowed. -func (s *OnboardService) IsOnboarding(ctx context.Context) (bool, error) { - if s.alwaysAllow { - return true, nil - } - - allowed := false - err := s.service.store.View(ctx, func(tx kv.Tx) error { - // we are allowed to onboard a user if we have no users or orgs - users, _ := s.service.store.ListUsers(ctx, tx, influxdb.FindOptions{Limit: 1}) - orgs, _ := s.service.store.ListOrgs(ctx, tx, influxdb.FindOptions{Limit: 1}) - if len(users) == 0 && len(orgs) == 0 { - allowed = true - } - return nil - }) - return allowed, err -} - -// OnboardInitialUser allows us to onboard a new user if is onboarding is allowed -func (s *OnboardService) OnboardInitialUser(ctx context.Context, req *influxdb.OnboardingRequest) (*influxdb.OnboardingResults, error) { - allowed, err := s.IsOnboarding(ctx) - if err != nil { - return nil, err - } - - if !allowed { - return nil, ErrOnboardingNotAllowed - } - - return s.onboardUser(ctx, req, func(platform.ID, platform.ID) []influxdb.Permission { return influxdb.OperPermissions() }) -} - -// onboardUser allows us to onboard new users. -func (s *OnboardService) onboardUser(ctx context.Context, req *influxdb.OnboardingRequest, permFn func(orgID, userID platform.ID) []influxdb.Permission) (*influxdb.OnboardingResults, error) { - if req == nil { - return nil, &errors.Error{ - Code: errors.EEmptyValue, - Msg: "onboarding failed: no request body provided", - } - } - - var missingFields []string - if req.User == "" { - missingFields = append(missingFields, "username") - } - if req.Org == "" { - missingFields = append(missingFields, "org") - } - if req.Bucket == "" { - missingFields = append(missingFields, "bucket") - } - if len(missingFields) > 0 { - return nil, &errors.Error{ - Code: errors.EUnprocessableEntity, - Msg: fmt.Sprintf("onboarding failed: missing required fields [%s]", strings.Join(missingFields, ",")), - } - } - - result := &influxdb.OnboardingResults{} - - // create a user - user := &influxdb.User{ - Name: req.User, - Status: influxdb.Active, - } - - if err := s.service.CreateUser(ctx, user); err != nil { - return nil, err - } - - // create users password - if req.Password != "" { - if err := s.service.SetPassword(ctx, user.ID, req.Password); err != nil { - // Try to clean up. - if cleanupErr := s.service.DeleteUser(ctx, user.ID); cleanupErr != nil { - s.log.Error( - "couldn't clean up user after failing to set password", - zap.String("user", user.Name), - zap.String("user_id", user.ID.String()), - zap.Error(cleanupErr), - ) - } - return nil, err - } - } - - // set the new user in the context - ctx = icontext.SetAuthorizer(ctx, &influxdb.Authorization{ - UserID: user.ID, - }) - - // create users org - org := &influxdb.Organization{ - Name: req.Org, - } - - if err := s.service.CreateOrganization(ctx, org); err != nil { - return nil, err - } - - if err := s.service.CreateUserResourceMapping(ctx, &influxdb.UserResourceMapping{ - UserID: user.ID, - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.InstanceResourceType, - ResourceID: platform.ID(1), // The instance doesn't have a resourceid - }); err != nil { - return nil, err - } - - // create orgs buckets - ub := &influxdb.Bucket{ - OrgID: org.ID, - Name: req.Bucket, - Type: influxdb.BucketTypeUser, - RetentionPeriod: req.RetentionPeriod(), - } - - if err := s.service.CreateBucket(ctx, ub); err != nil { - return nil, err - } - - result.User = user - result.Org = org - result.Bucket = ub - - // bolt doesn't lock per collection or record so we have to close our transaction - // before we can reach out to the auth service. - result.Auth = &influxdb.Authorization{ - Description: fmt.Sprintf("%s's Token", req.User), - Permissions: permFn(result.Org.ID, result.User.ID), - Token: req.Token, - UserID: result.User.ID, - OrgID: result.Org.ID, - } - - return result, s.authSvc.CreateAuthorization(ctx, result.Auth) -} diff --git a/tenant/service_onboarding_test.go b/tenant/service_onboarding_test.go deleted file mode 100644 index e10a20288fe..00000000000 --- a/tenant/service_onboarding_test.go +++ /dev/null @@ -1,256 +0,0 @@ -package tenant_test - -import ( - "context" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorization" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/pkg/testing/assert" - "github.com/influxdata/influxdb/v2/tenant" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/require" -) - -func TestBoltOnboardingService(t *testing.T) { - influxdbtesting.OnboardInitialUser(initBoltOnboardingService, t) -} - -func initBoltOnboardingService(f influxdbtesting.OnboardingFields, t *testing.T) (influxdb.OnboardingService, func()) { - s := influxdbtesting.NewTestInmemStore(t) - svc := initOnboardingService(s, f, t) - return svc, func() {} -} - -func initOnboardingService(s kv.Store, f influxdbtesting.OnboardingFields, t *testing.T) influxdb.OnboardingService { - storage := tenant.NewStore(s) - ten := tenant.NewService(storage) - - authStore, err := authorization.NewStore(s) - require.NoError(t, err) - authSvc := authorization.NewService(authStore, ten) - - // we will need an auth service as well - svc := tenant.NewOnboardService(ten, authSvc) - - ctx := context.Background() - - t.Logf("Onboarding: %v", f.IsOnboarding) - if !f.IsOnboarding { - // create a dummy so so we can no longer onboard - err := ten.CreateUser(ctx, &influxdb.User{Name: "dummy", Status: influxdb.Active}) - if err != nil { - t.Fatal(err) - } - } - - return svc -} - -func TestOnboardURM(t *testing.T) { - s := influxdbtesting.NewTestInmemStore(t) - storage := tenant.NewStore(s) - ten := tenant.NewService(storage) - - authStore, err := authorization.NewStore(s) - require.NoError(t, err) - authSvc := authorization.NewService(authStore, ten) - - svc := tenant.NewOnboardService(ten, authSvc) - - ctx := icontext.SetAuthorizer(context.Background(), &influxdb.Authorization{ - UserID: 123, - }) - - onboard, err := svc.OnboardInitialUser(ctx, &influxdb.OnboardingRequest{ - User: "name", - Org: "name", - Bucket: "name", - }) - - if err != nil { - t.Fatal(err) - } - - urms, _, err := ten.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{ResourceID: onboard.Org.ID}) - if err != nil { - t.Fatal(err) - } - - if len(urms) > 1 { - t.Fatal("additional URMs created") - } - if urms[0].UserID != onboard.User.ID { - t.Fatal("org assigned to the wrong user") - } -} - -func TestOnboardAuth(t *testing.T) { - s := influxdbtesting.NewTestInmemStore(t) - storage := tenant.NewStore(s) - ten := tenant.NewService(storage) - - authStore, err := authorization.NewStore(s) - require.NoError(t, err) - authSvc := authorization.NewService(authStore, ten) - - svc := tenant.NewOnboardService(ten, authSvc) - - ctx := icontext.SetAuthorizer(context.Background(), &influxdb.Authorization{ - UserID: 123, - }) - - onboard, err := svc.OnboardInitialUser(ctx, &influxdb.OnboardingRequest{ - User: "name", - Org: "name", - Bucket: "name", - }) - - if err != nil { - t.Fatal(err) - } - - auth := onboard.Auth - expectedPerm := []influxdb.Permission{ - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.AuthorizationsResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.AuthorizationsResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.BucketsResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.BucketsResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.DashboardsResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.DashboardsResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.OrgsResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.OrgsResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.SourcesResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.SourcesResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.TasksResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.TasksResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.TelegrafsResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.TelegrafsResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.UsersResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.UsersResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.VariablesResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.VariablesResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.ScraperResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.ScraperResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.SecretsResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.SecretsResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.LabelsResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.LabelsResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.ViewsResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.ViewsResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.DocumentsResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.DocumentsResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.NotificationRuleResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.NotificationRuleResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.NotificationEndpointResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.NotificationEndpointResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.ChecksResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.ChecksResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.DBRPResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.DBRPResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.NotebooksResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.NotebooksResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.AnnotationsResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.AnnotationsResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.RemotesResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.RemotesResourceType}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.ReplicationsResourceType}}, - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.ReplicationsResourceType}}, - } - if !cmp.Equal(auth.Permissions, expectedPerm) { - t.Fatalf("unequal permissions: \n %+v", cmp.Diff(auth.Permissions, expectedPerm)) - } - -} - -func TestOnboardService_RetentionPolicy(t *testing.T) { - s := influxdbtesting.NewTestInmemStore(t) - storage := tenant.NewStore(s) - ten := tenant.NewService(storage) - - authStore, err := authorization.NewStore(s) - require.NoError(t, err) - - authSvc := authorization.NewService(authStore, ten) - - // we will need an auth service as well - svc := tenant.NewOnboardService(ten, authSvc) - - ctx := icontext.SetAuthorizer(context.Background(), &influxdb.Authorization{ - UserID: 123, - }) - - var retention int64 = 72 * 3600 // 72h - onboard, err := svc.OnboardInitialUser(ctx, &influxdb.OnboardingRequest{ - User: "name", - Org: "name", - Bucket: "name", - RetentionPeriodSeconds: retention, - }) - - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, onboard.Bucket.RetentionPeriod, time.Duration(retention)*time.Second, "Retention policy should pass through") -} - -func TestOnboardService_RetentionPolicyDeprecated(t *testing.T) { - s := influxdbtesting.NewTestInmemStore(t) - storage := tenant.NewStore(s) - ten := tenant.NewService(storage) - - authStore, err := authorization.NewStore(s) - require.NoError(t, err) - - authSvc := authorization.NewService(authStore, ten) - - // we will need an auth service as well - svc := tenant.NewOnboardService(ten, authSvc) - - ctx := icontext.SetAuthorizer(context.Background(), &influxdb.Authorization{ - UserID: 123, - }) - - retention := 72 * time.Hour - onboard, err := svc.OnboardInitialUser(ctx, &influxdb.OnboardingRequest{ - User: "name", - Org: "name", - Bucket: "name", - RetentionPeriodDeprecated: retention, - }) - - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, onboard.Bucket.RetentionPeriod, retention, "Retention policy should pass through") -} - -func TestOnboardService_WeakPassword(t *testing.T) { - s := influxdbtesting.NewTestInmemStore(t) - storage := tenant.NewStore(s) - ten := tenant.NewService(storage) - - authStore, err := authorization.NewStore(s) - require.NoError(t, err) - - authSvc := authorization.NewService(authStore, ten) - svc := tenant.NewOnboardService(ten, authSvc) - - ctx := icontext.SetAuthorizer(context.Background(), &influxdb.Authorization{ - UserID: 123, - }) - - _, err = svc.OnboardInitialUser(ctx, &influxdb.OnboardingRequest{ - User: "name", - Password: "short", - Org: "name", - Bucket: "name", - }) - assert.Equal(t, err, tenant.EShortPassword) -} diff --git a/tenant/service_op_log.go b/tenant/service_op_log.go deleted file mode 100644 index 7452caf5753..00000000000 --- a/tenant/service_op_log.go +++ /dev/null @@ -1,159 +0,0 @@ -package tenant - -import ( - "context" - "encoding/json" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" -) - -const ( - orgOperationLogKeyPrefix = "org" - bucketOperationLogKeyPrefix = "bucket" - userOperationLogKeyPrefix = "user" -) - -// OpLogStore is a type which persists and reports operation log entries on a backing -// kv store transaction. -type OpLogStore interface { - AddLogEntryTx(ctx context.Context, tx kv.Tx, k, v []byte, t time.Time) error - ForEachLogEntryTx(ctx context.Context, tx kv.Tx, k []byte, opts influxdb.FindOptions, fn func([]byte, time.Time) error) error -} - -// OpLogService is a type which stores operation logs for buckets, users and orgs. -type OpLogService struct { - kv kv.Store - - opLogStore OpLogStore - - TimeGenerator influxdb.TimeGenerator -} - -// NewOpLogService constructs and configures a new op log service. -func NewOpLogService(store kv.Store, opLogStore OpLogStore) *OpLogService { - return &OpLogService{ - kv: store, - opLogStore: opLogStore, - TimeGenerator: influxdb.RealTimeGenerator{}, - } -} - -// GetOrganizationOperationLog retrieves a organization operation log. -func (s *OpLogService) GetOrganizationOperationLog(ctx context.Context, id platform.ID, opts influxdb.FindOptions) ([]*influxdb.OperationLogEntry, int, error) { - // TODO(desa): might be worthwhile to allocate a slice of size opts.Limit - log := []*influxdb.OperationLogEntry{} - - err := s.kv.View(ctx, func(tx kv.Tx) error { - key, err := encodeOrganizationOperationLogKey(id) - if err != nil { - return err - } - - return s.opLogStore.ForEachLogEntryTx(ctx, tx, key, opts, func(v []byte, t time.Time) error { - e := &influxdb.OperationLogEntry{} - if err := json.Unmarshal(v, e); err != nil { - return err - } - e.Time = t - - log = append(log, e) - - return nil - }) - }) - - if err != nil && err != kv.ErrKeyValueLogBoundsNotFound { - return nil, 0, err - } - - return log, len(log), nil -} - -// GetBucketOperationLog retrieves a buckets operation log. -func (s *OpLogService) GetBucketOperationLog(ctx context.Context, id platform.ID, opts influxdb.FindOptions) ([]*influxdb.OperationLogEntry, int, error) { - // TODO(desa): might be worthwhile to allocate a slice of size opts.Limit - log := []*influxdb.OperationLogEntry{} - - err := s.kv.View(ctx, func(tx kv.Tx) error { - key, err := encodeBucketOperationLogKey(id) - if err != nil { - return err - } - - return s.opLogStore.ForEachLogEntryTx(ctx, tx, key, opts, func(v []byte, t time.Time) error { - e := &influxdb.OperationLogEntry{} - if err := json.Unmarshal(v, e); err != nil { - return err - } - e.Time = t - - log = append(log, e) - - return nil - }) - }) - - if err != nil && err != kv.ErrKeyValueLogBoundsNotFound { - return nil, 0, err - } - - return log, len(log), nil -} - -// GetUserOperationLog retrieves a user operation log. -func (s *OpLogService) GetUserOperationLog(ctx context.Context, id platform.ID, opts influxdb.FindOptions) ([]*influxdb.OperationLogEntry, int, error) { - // TODO(desa): might be worthwhile to allocate a slice of size opts.Limit - log := []*influxdb.OperationLogEntry{} - - err := s.kv.View(ctx, func(tx kv.Tx) error { - key, err := encodeUserOperationLogKey(id) - if err != nil { - return err - } - - return s.opLogStore.ForEachLogEntryTx(ctx, tx, key, opts, func(v []byte, t time.Time) error { - e := &influxdb.OperationLogEntry{} - if err := json.Unmarshal(v, e); err != nil { - return err - } - e.Time = t - - log = append(log, e) - - return nil - }) - }) - - if err != nil && err != kv.ErrKeyValueLogBoundsNotFound { - return nil, 0, err - } - - return log, len(log), nil -} - -func encodeOrganizationOperationLogKey(id platform.ID) ([]byte, error) { - buf, err := id.Encode() - if err != nil { - return nil, err - } - return append([]byte(orgOperationLogKeyPrefix), buf...), nil -} - -func encodeBucketOperationLogKey(id platform.ID) ([]byte, error) { - buf, err := id.Encode() - if err != nil { - return nil, err - } - return append([]byte(bucketOperationLogKeyPrefix), buf...), nil -} - -func encodeUserOperationLogKey(id platform.ID) ([]byte, error) { - buf, err := id.Encode() - if err != nil { - return nil, err - } - return append([]byte(userOperationLogKeyPrefix), buf...), nil -} diff --git a/tenant/service_org.go b/tenant/service_org.go deleted file mode 100644 index 10b5558fe65..00000000000 --- a/tenant/service_org.go +++ /dev/null @@ -1,233 +0,0 @@ -package tenant - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" -) - -type OrgSvc struct { - store *Store - svc *Service -} - -func NewOrganizationSvc(st *Store, svc *Service) *OrgSvc { - return &OrgSvc{ - store: st, - svc: svc, - } -} - -// Returns a single organization by ID. -func (s *OrgSvc) FindOrganizationByID(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - var org *influxdb.Organization - err := s.store.View(ctx, func(tx kv.Tx) error { - o, err := s.store.GetOrg(ctx, tx, id) - if err != nil { - return err - } - org = o - return nil - }) - - if err != nil { - return nil, err - } - - return org, nil -} - -// Returns the first organization that matches filter. -func (s *OrgSvc) FindOrganization(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { - if filter.ID != nil { - return s.FindOrganizationByID(ctx, *filter.ID) - } - - if filter.Name == nil { - return nil, influxdb.ErrInvalidOrgFilter - } - - var org *influxdb.Organization - err := s.store.View(ctx, func(tx kv.Tx) error { - o, err := s.store.GetOrgByName(ctx, tx, *filter.Name) - if err != nil { - return err - } - org = o - return nil - }) - - if err != nil { - return nil, err - } - - return org, nil -} - -// Returns a list of organizations that match filter and the total count of matching organizations. -// Additional options provide pagination & sorting. -func (s *OrgSvc) FindOrganizations(ctx context.Context, filter influxdb.OrganizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Organization, int, error) { - // if im given a id or a name I know I can only return 1 - if filter.ID != nil || filter.Name != nil { - org, err := s.FindOrganization(ctx, filter) - if err != nil { - return nil, 0, err - } - return []*influxdb.Organization{org}, 1, nil - } - - var orgs []*influxdb.Organization - - if filter.UserID != nil { - // find urms for orgs with this user - urms, _, err := s.svc.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{ - UserID: *filter.UserID, - ResourceType: influxdb.OrgsResourceType, - }, opt...) - if err != nil { - return nil, 0, err - } - // find orgs by the urm's resource ids. - for _, urm := range urms { - o, err := s.FindOrganizationByID(ctx, urm.ResourceID) - if err == nil { - // if there is an error then this is a crufty urm and we should just move on - orgs = append(orgs, o) - } - } - - return orgs, len(orgs), nil - } - - err := s.store.View(ctx, func(tx kv.Tx) error { - os, err := s.store.ListOrgs(ctx, tx, opt...) - if err != nil { - return err - } - orgs = os - return nil - }) - - if err != nil { - return nil, 0, err - } - - return orgs, len(orgs), err -} - -// Creates a new organization and sets b.ID with the new identifier. -func (s *OrgSvc) CreateOrganization(ctx context.Context, o *influxdb.Organization) error { - err := s.store.Update(ctx, func(tx kv.Tx) error { - return s.store.CreateOrg(ctx, tx, o) - }) - if err != nil { - return err - } - - tb := &influxdb.Bucket{ - OrgID: o.ID, - Type: influxdb.BucketTypeSystem, - Name: influxdb.TasksSystemBucketName, - RetentionPeriod: influxdb.TasksSystemBucketRetention, - Description: "System bucket for task logs", - } - - if err := s.svc.CreateBucket(ctx, tb); err != nil { - return err - } - - mb := &influxdb.Bucket{ - OrgID: o.ID, - Type: influxdb.BucketTypeSystem, - Name: influxdb.MonitoringSystemBucketName, - RetentionPeriod: influxdb.MonitoringSystemBucketRetention, - Description: "System bucket for monitoring logs", - } - - if err := s.svc.CreateBucket(ctx, mb); err != nil { - return err - } - - // create associated URM - userID, err := icontext.GetUserID(ctx) - if err == nil && userID.Valid() { - // if I am given a userid i can associate the user as the org owner - err = s.svc.CreateUserResourceMapping(ctx, &influxdb.UserResourceMapping{ - UserID: userID, - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.OrgsResourceType, - ResourceID: o.ID, - }) - if err != nil { - return err - } - } - return nil -} - -// Updates a single organization with changeset. -// Returns the new organization state after update. -func (s *OrgSvc) UpdateOrganization(ctx context.Context, id platform.ID, upd influxdb.OrganizationUpdate) (*influxdb.Organization, error) { - var org *influxdb.Organization - err := s.store.Update(ctx, func(tx kv.Tx) error { - o, err := s.store.UpdateOrg(ctx, tx, id, upd) - if err != nil { - return err - } - org = o - return nil - }) - if err != nil { - return nil, err - } - return org, nil -} - -// DeleteOrganization removes a organization by ID and its dependent resources. -func (s *OrgSvc) DeleteOrganization(ctx context.Context, id platform.ID) error { - // clean up the buckets for this organization - filter := influxdb.BucketFilter{ - OrganizationID: &id, - } - bs, _, err := s.svc.FindBuckets(ctx, filter) - if err != nil { - return err - } - for _, b := range bs { - if err := s.svc.DeleteBucket(internalCtx(ctx), b.ID); err != nil { - if err != ErrBucketNotFound { - return err - } - } - } - - err = s.store.Update(ctx, func(tx kv.Tx) error { - return s.store.DeleteOrg(ctx, tx, id) - }) - if err != nil { - return err - } - - return s.removeResourceRelations(ctx, id) -} - -// removeResourceRelations allows us to clean up any resource relationship that would have normally been left over after a delete action of a resource. -func (s *OrgSvc) removeResourceRelations(ctx context.Context, resourceID platform.ID) error { - urms, _, err := s.svc.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{ - ResourceID: resourceID, - }) - if err != nil { - return err - } - for _, urm := range urms { - err := s.svc.DeleteUserResourceMapping(ctx, urm.ResourceID, urm.UserID) - if err != nil && err != ErrURMNotFound { - return err - } - } - return nil -} diff --git a/tenant/service_org_test.go b/tenant/service_org_test.go deleted file mode 100644 index 5d90aaba124..00000000000 --- a/tenant/service_org_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package tenant_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/tenant" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -func TestBoltOrganizationService(t *testing.T) { - influxdbtesting.OrganizationService(initBoltOrganizationService, t) -} - -func initBoltOrganizationService(f influxdbtesting.OrganizationFields, t *testing.T) (influxdb.OrganizationService, string, func()) { - s, closeBolt := influxdbtesting.NewTestBoltStore(t) - svc, op, closeSvc := initOrganizationService(s, f, t) - return svc, op, func() { - closeSvc() - closeBolt() - } -} - -func initOrganizationService(s kv.Store, f influxdbtesting.OrganizationFields, t *testing.T) (influxdb.OrganizationService, string, func()) { - storage := tenant.NewStore(s) - - if f.OrgBucketIDs != nil { - storage.OrgIDGen = f.OrgBucketIDs - storage.BucketIDGen = f.OrgBucketIDs - } - - // go direct to storage for test data - if err := s.Update(context.Background(), func(tx kv.Tx) error { - for _, o := range f.Organizations { - if err := storage.CreateOrg(tx.Context(), tx, o); err != nil { - return err - } - } - - return nil - }); err != nil { - t.Fatalf("failed to populate organizations: %s", err) - } - - return tenant.NewService(storage), "tenant/", func() { - // go direct to storage for test data - if err := s.Update(context.Background(), func(tx kv.Tx) error { - for _, o := range f.Organizations { - if err := storage.DeleteOrg(tx.Context(), tx, o.ID); err != nil { - return err - } - } - - return nil - }); err != nil { - t.Logf("failed to remove organizations: %v", err) - } - } -} diff --git a/tenant/service_test.go b/tenant/service_test.go deleted file mode 100644 index 192493fecc3..00000000000 --- a/tenant/service_test.go +++ /dev/null @@ -1,942 +0,0 @@ -package tenant_test - -import ( - "context" - "sort" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/tenant" - itesting "github.com/influxdata/influxdb/v2/testing" -) - -type tenantFields struct { - OrgIDGenerator platform.IDGenerator - BucketIDGenerator platform.IDGenerator - Users []*influxdb.User - Passwords []string // passwords are indexed against the Users field - UserResourceMappings []*influxdb.UserResourceMapping - Organizations []*influxdb.Organization - Buckets []*influxdb.Bucket -} - -// TestBoltTenantService tests the tenant service functions. -// These tests stress the relation between the services embedded by the TenantService. -// The individual functionality of services is tested elsewhere. -func TestBoltTenantService(t *testing.T) { - tests := []struct { - name string - fn func(t *testing.T, init func(*testing.T, tenantFields) (*tenant.Service, func())) - }{ - { - name: "Create", - fn: Create, - }, - { - name: "Delete", - fn: Delete, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.fn(t, initBoltTenantService) - }) - } -} - -type bucketsByName []*influxdb.Bucket - -func (b bucketsByName) Len() int { - return len(b) -} - -func (b bucketsByName) Less(i, j int) bool { - return strings.Compare(b[i].Name, b[j].Name) < 0 -} - -func (b bucketsByName) Swap(i, j int) { - b[i], b[j] = b[j], b[i] -} - -type urmByResourceID []*influxdb.UserResourceMapping - -func (u urmByResourceID) Len() int { - return len(u) -} - -func (u urmByResourceID) Less(i, j int) bool { - return u[i].ResourceID < u[j].ResourceID -} - -func (u urmByResourceID) Swap(i, j int) { - u[i], u[j] = u[j], u[i] -} - -type urmByUserID []*influxdb.UserResourceMapping - -func (u urmByUserID) Len() int { - return len(u) -} - -func (u urmByUserID) Less(i, j int) bool { - return u[i].UserID < u[j].UserID -} - -func (u urmByUserID) Swap(i, j int) { - u[i], u[j] = u[j], u[i] -} - -// Create tests various cases of creation for the services in the TenantService. -// For example, when you create a user, do you create system buckets? How are URMs organized? -func Create(t *testing.T, init func(*testing.T, tenantFields) (*tenant.Service, func())) { - t.Helper() - - // Blank fields, we are testing creation. - fields := func() tenantFields { - return tenantFields{ - OrgIDGenerator: mock.NewIncrementingIDGenerator(1), - BucketIDGenerator: mock.NewIncrementingIDGenerator(1), - } - } - - // NOTE(affo)(*kv.Service): tests that contain s.CreateOrganization() generate error in logs: - // Failed to make user owner of organization: {"error": "could not find authorizer on context when adding user to resource type orgs"}. - // This happens because kv requires an authorization to be in context. - // This is a bad dependency pattern (store -> auth) and should not be there. - // Anyways this does not prevent the org to be created. If you add the urm manually you'll obtain the same result. - - // NOTE(affo)(*kv.Service): it also creates urms for the non existing user found in context. - t.Run("creating an org creates system buckets", func(t *testing.T) { - data := fields() - s, done := init(t, data) - defer done() - ctx := context.Background() - - o := &influxdb.Organization{ - // ID(1) - Name: "org1", - } - if err := s.CreateOrganization(ctx, o); err != nil { - t.Fatal(err) - } - - // Check existence - orgs, norgs, err := s.FindOrganizations(ctx, influxdb.OrganizationFilter{}) - if err != nil { - t.Fatal(err) - } - if norgs != 1 { - t.Errorf("expected 1 org, got: %v", orgs) - } - usrs, nusrs, err := s.FindUsers(ctx, influxdb.UserFilter{}) - if err != nil { - t.Fatal(err) - } - if nusrs > 0 { - t.Errorf("expected no user, got: %v", usrs) - } - urms, nurms, err := s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{}) - if err != nil { - t.Fatal(err) - } - if nurms > 0 { - t.Errorf("expected no urm, got: %+v", urms) - } - bs, nbs, err := s.FindBuckets(ctx, influxdb.BucketFilter{}) - if err != nil { - t.Fatal(err) - } - if nbs != 2 { - t.Errorf("expected 2 buckets, got: %v", bs) - } - sort.Sort(bucketsByName(bs)) - if name := bs[0].Name; name != "_monitoring" { - t.Errorf("unexpected nam for bucket: %s", name) - } - if name := bs[1].Name; name != "_tasks" { - t.Errorf("unexpected nam for bucket: %s", name) - } - }) - - // NOTE(affo)(*kv.Service): nope, it does create system buckets with invalid OrgIDs. - t.Run("creating user creates only the user", func(t *testing.T) { - data := fields() - s, done := init(t, data) - defer done() - ctx := context.Background() - - // Number of buckets prior to user creation. - // This is because, for now, system buckets always get returned for compatibility with the old system. - _, nbs, err := s.FindBuckets(ctx, influxdb.BucketFilter{}) - if err != nil { - t.Fatal(err) - } - - u := &influxdb.User{ - ID: 1, - Name: "user1", - } - if err := s.CreateUser(ctx, u); err != nil { - t.Fatal(err) - } - - // Check existence - orgs, norgs, err := s.FindOrganizations(ctx, influxdb.OrganizationFilter{}) - if err != nil { - t.Fatal(err) - } - if norgs != 0 { - t.Errorf("expected no org, got: %v", orgs) - } - usrs, nusrs, err := s.FindUsers(ctx, influxdb.UserFilter{}) - if err != nil { - t.Fatal(err) - } - if nusrs != 1 { - t.Errorf("expected 1 user, got: %v", usrs) - } - urms, nurms, err := s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{}) - if err != nil { - t.Fatal(err) - } - if nurms > 0 { - t.Errorf("expected no urm, got: %v", urms) - } - bs, nnbs, err := s.FindBuckets(ctx, influxdb.BucketFilter{}) - if err != nil { - t.Fatal(err) - } - // Compare new number of buckets with the one prior to user creation. - if nnbs != nbs { - t.Errorf("expected no bucket created, got: %+v", bs) - } - }) - - // NOTE(affo)(*kv.Service): nope, it does create a useless URM, no existence check. - // Apparently, system buckets are created too :thinking. - t.Run("creating urm pointing to non existing user fails", func(t *testing.T) { - data := fields() - s, done := init(t, data) - defer done() - ctx := context.Background() - - // First create an org and a user. - u := &influxdb.User{ - ID: 1, - Name: "user1", - } - if err := s.CreateUser(ctx, u); err != nil { - t.Fatal(err) - } - o := &influxdb.Organization{ - // ID(1) - Name: "org1", - } - if err := s.CreateOrganization(ctx, o); err != nil { - t.Fatal(err) - } - - checkInvariance := func(nurms int) { - orgs, norgs, err := s.FindOrganizations(ctx, influxdb.OrganizationFilter{}) - if err != nil { - t.Fatal(err) - } - if norgs != 1 { - t.Errorf("expected 1 org, got: %v", orgs) - } - usrs, nusrs, err := s.FindUsers(ctx, influxdb.UserFilter{}) - if err != nil { - t.Fatal(err) - } - if nusrs != 1 { - t.Errorf("expected 1 user, got: %v", usrs) - } - urms, nnurms, err := s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{}) - if err != nil { - t.Fatal(err) - } - if nnurms != nurms { - t.Errorf("expected %d urms got %d: %+v", nurms, nnurms, urms) - } - bs, nbs, err := s.FindBuckets(ctx, influxdb.BucketFilter{}) - if err != nil { - t.Fatal(err) - } - if nbs != 2 { - t.Errorf("expected 2 buckets, got: %v", bs) - } - } - - checkInvariance(0) - - // Wrong userID. - urm := &influxdb.UserResourceMapping{ - UserID: 2, - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.OrgsResourceType, - ResourceID: 1, - } - if err := s.CreateUserResourceMapping(ctx, urm); err == nil { - t.Errorf("expected error got none") - } - - checkInvariance(0) - - // Wrong orgID. The URM gets created successfully. - urm = &influxdb.UserResourceMapping{ - UserID: 1, - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.OrgsResourceType, - ResourceID: 2, - } - if err := s.CreateUserResourceMapping(ctx, urm); err != nil { - t.Errorf("unexpected error: %v", err) - } - - checkInvariance(1) - }) - - // NOTE(affo)(*kv.Service): errors on bucket creation. - // But, apparently, system buckets are created too :thinking. - t.Run("should not be possible to create bucket without org", func(t *testing.T) { - data := fields() - s, done := init(t, data) - defer done() - ctx := context.Background() - - // Number of buckets prior to bucket creation. - // This is because, for now, system buckets always get returned for compatibility with the old system. - _, nbs, err := s.FindBuckets(ctx, influxdb.BucketFilter{}) - if err != nil { - t.Fatal(err) - } - - b := &influxdb.Bucket{ - // ID(1) - OrgID: 1, - Name: "bucket1", - } - if err := s.CreateBucket(ctx, b); err == nil { - t.Errorf("expected error got none") - } - - // Check existence - orgs, norgs, err := s.FindOrganizations(ctx, influxdb.OrganizationFilter{}) - if err != nil { - t.Fatal(err) - } - if norgs != 0 { - t.Errorf("expected no org, got: %v", orgs) - } - usrs, nusrs, err := s.FindUsers(ctx, influxdb.UserFilter{}) - if err != nil { - t.Fatal(err) - } - if nusrs != 0 { - t.Errorf("expected no user, got: %v", usrs) - } - urms, nurms, err := s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{}) - if err != nil { - t.Fatal(err) - } - if nurms > 0 { - t.Errorf("expected no urm, got: %v", urms) - } - bs, nnbs, err := s.FindBuckets(ctx, influxdb.BucketFilter{}) - if err != nil { - t.Fatal(err) - } - // Compare new number of buckets with the one prior to bucket creation. - if nnbs != nbs { - t.Errorf("expected bucket created, got: %+v", bs) - } - }) - - t.Run("making user part of org creates mapping to org only", func(t *testing.T) { - for _, userType := range []influxdb.UserType{influxdb.Owner, influxdb.Member} { - t.Run(string(userType), func(t *testing.T) { - data := fields() - s, done := init(t, data) - defer done() - ctx := context.Background() - - u := &influxdb.User{ - ID: 1, - Name: "user1", - } - if err := s.CreateUser(ctx, u); err != nil { - t.Fatal(err) - } - o := &influxdb.Organization{ - // ID(1) - Name: "org1", - } - if err := s.CreateOrganization(ctx, o); err != nil { - t.Fatal(err) - } - urm := &influxdb.UserResourceMapping{ - UserID: u.ID, - UserType: userType, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.OrgsResourceType, - ResourceID: o.ID, - } - if err := s.CreateUserResourceMapping(ctx, urm); err != nil { - t.Fatal(err) - } - - // Check existence - orgs, norgs, err := s.FindOrganizations(ctx, influxdb.OrganizationFilter{}) - if err != nil { - t.Fatal(err) - } - if norgs != 1 { - t.Errorf("expected 1 org, got: %v", orgs) - } - usrs, nusrs, err := s.FindUsers(ctx, influxdb.UserFilter{}) - if err != nil { - t.Fatal(err) - } - if nusrs != 1 { - t.Errorf("expected 1 user, got: %v", usrs) - } - - bs, nbs, err := s.FindBuckets(ctx, influxdb.BucketFilter{}) - if err != nil { - t.Fatal(err) - } - if nbs != 2 { - t.Errorf("expected 2 buckets, got: %v", bs) - } - sort.Sort(bucketsByName(bs)) - if name := bs[0].Name; name != "_monitoring" { - t.Errorf("unexpected name for bucket: %s", name) - } - if name := bs[1].Name; name != "_tasks" { - t.Errorf("unexpected name for bucket: %v", name) - } - - urms, _, err := s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{}) - if err != nil { - t.Fatal(err) - } - want := []*influxdb.UserResourceMapping{ - { - UserID: u.ID, - UserType: userType, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.OrgsResourceType, - ResourceID: o.ID, - }, - } - sort.Sort(urmByResourceID(want)) - sort.Sort(urmByResourceID(urms)) - if diff := cmp.Diff(want, urms); diff != "" { - t.Errorf("unexpected urms -want/+got:\n\t%s", diff) - } - - // Now add a new bucket and check the URMs. - b := &influxdb.Bucket{ - // ID(1) - OrgID: o.ID, - Name: "bucket1", - } - if err := s.CreateBucket(ctx, b); err != nil { - t.Fatal(err) - } - urms, _, err = s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{}) - if err != nil { - t.Fatal(err) - } - - sort.Sort(urmByResourceID(urms)) - if diff := cmp.Diff(want, urms); diff != "" { - t.Errorf("unexpected urms -want/+got:\n\t%s", diff) - } - }) - } - }) -} - -// Delete tests various cases of deletion for the services in the TenantService. -// An example: if you delete a bucket the corresponding user resource mapping is not present. -func Delete(t *testing.T, init func(*testing.T, tenantFields) (*tenant.Service, func())) { - t.Helper() - - fields := func() tenantFields { - return tenantFields{ - OrgIDGenerator: mock.NewIncrementingIDGenerator(1), - // URM are userID + resourceID (they do not include resource type) - // so same IDs across different resources leads to collisions - // therefore, we need to start bucket IDs at higher offset for - // test. - BucketIDGenerator: mock.NewIncrementingIDGenerator(10), - Users: []*influxdb.User{ - { - ID: 1, - Name: "user1", - }, - { - ID: 2, - Name: "user2", - }, - }, - Passwords: []string{"password1", "password2"}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "org1", - }, - { - // ID(2) - Name: "org2", - }, - }, - // 2 organizations create 2 system buckets each - // so start at 14 - Buckets: []*influxdb.Bucket{ - { - // ID(14) - OrgID: 1, - Name: "bucket1", - }, - { - // ID(15) - OrgID: 2, - Name: "bucket2", - }, - }, - UserResourceMappings: []*influxdb.UserResourceMapping{ - // NOTE(affo): bucket URMs should not be here, create them only for deletion purposes. - // user 1 owns org1 (and so bucket1) - { - UserID: 1, - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.OrgsResourceType, - ResourceID: 1, - }, - { - UserID: 1, - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.BucketsResourceType, - ResourceID: 14, - }, - // user 1 is member of org2 (and so bucket2) - { - UserID: 1, - UserType: influxdb.Member, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.OrgsResourceType, - ResourceID: 2, - }, - { - UserID: 1, - UserType: influxdb.Member, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.BucketsResourceType, - ResourceID: 15, - }, - // user 2 owns org2 (and so bucket2) - { - UserID: 2, - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.OrgsResourceType, - ResourceID: 2, - }, - { - UserID: 2, - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.BucketsResourceType, - ResourceID: 15, - }, - }, - } - } - - t.Run("deleting bucket deletes urm", func(t *testing.T) { - data := fields() - s, done := init(t, data) - defer done() - ctx := context.Background() - - f := influxdb.UserResourceMappingFilter{ - ResourceID: data.Buckets[0].ID, - ResourceType: influxdb.BucketsResourceType, - } - urms, n, err := s.FindUserResourceMappings(ctx, f) - if err != nil { - t.Fatal(err) - } - if n != 1 { - t.Fatalf("expected 1 urm, got: %v", urms) - } - if err := s.DeleteBucket(ctx, data.Buckets[0].ID); err != nil { - t.Fatal(err) - } - f = influxdb.UserResourceMappingFilter{ - ResourceID: data.Buckets[0].ID, - ResourceType: influxdb.BucketsResourceType, - } - urms, n, err = s.FindUserResourceMappings(ctx, f) - if err != nil { - t.Fatal(err) - } - if n > 0 { - t.Fatalf("expected no urm, got: %v", urms) - } - }) - - // NOTE(affo): those resources could not be dangling (URM could be inferred from an user being in the owner org). - // We do not want to automatically propagate this kind of delete because an resource will always have an owner org. - t.Run("deleting bucket urm does create dangling bucket", func(t *testing.T) { - data := fields() - s, done := init(t, data) - defer done() - ctx := context.Background() - - // Pre-check the current situation. - // bucket1 is owned by user1. - // Check it. - urms, _, err := s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{ - ResourceType: influxdb.BucketsResourceType, - ResourceID: data.Buckets[0].ID, - }) - if err != nil { - t.Fatal(err) - } - want := []*influxdb.UserResourceMapping{ - { - UserID: data.Users[0].ID, - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.BucketsResourceType, - ResourceID: data.Buckets[0].ID, - }, - } - sort.Sort(urmByUserID(want)) - sort.Sort(urmByUserID(urms)) - if diff := cmp.Diff(want, urms); diff != "" { - t.Fatalf("unexpected urms -want/+got:\n\t%s", diff) - } - // bucket2 is owned by user2. - // bucket2 is readable by user2. - // Check it. - urms, _, err = s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{ - ResourceType: influxdb.BucketsResourceType, - ResourceID: data.Buckets[1].ID, - }) - if err != nil { - t.Fatal(err) - } - want = []*influxdb.UserResourceMapping{ - { - UserID: data.Users[1].ID, - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.BucketsResourceType, - ResourceID: data.Buckets[1].ID, - }, - { - UserID: data.Users[0].ID, - UserType: influxdb.Member, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.BucketsResourceType, - ResourceID: data.Buckets[1].ID, - }, - } - sort.Sort(urmByUserID(want)) - sort.Sort(urmByUserID(urms)) - if diff := cmp.Diff(want, urms); diff != "" { - t.Fatalf("unexpected urms -want/+got:\n\t%s", diff) - } - - // Now delete user2 -> bucket2. - // Still expect bucket2 to exist (user1 still points to it). - if err := s.DeleteUserResourceMapping(ctx, data.Buckets[1].ID, data.Users[1].ID); err != nil { - t.Fatal(err) - } - bs, nbs, err := s.FindBuckets(ctx, influxdb.BucketFilter{ID: &data.Buckets[1].ID}) - if err != nil { - t.Fatal(err) - } - if nbs != 1 { - t.Errorf("expected 1 buckets, got: %v", bs) - } - // Now delete user1 -> bucket2. - // Still expect bucket2 to exist (nobody points to it). - if err := s.DeleteUserResourceMapping(ctx, data.Buckets[1].ID, data.Users[0].ID); err != nil { - t.Fatal(err) - } - bs, nbs, err = s.FindBuckets(ctx, influxdb.BucketFilter{ID: &data.Buckets[1].ID}) - if err != nil { - t.Fatal(err) - } - if nbs != 1 { - t.Errorf("expected 1 buckets, got: %v", bs) - } - urms, nurms, err := s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{ - ResourceType: influxdb.BucketsResourceType, - ResourceID: data.Buckets[1].ID, - }) - if err != nil { - t.Fatal(err) - } - if nurms != 0 { - t.Errorf("expected bucket2, to be dangling, got: %+v", urms) - } - // Now delete user1 -> bucket1. - // Still expect bucket1 to exist (nobody points to it). - if err := s.DeleteUserResourceMapping(ctx, data.Buckets[0].ID, data.Users[0].ID); err != nil { - t.Fatal(err) - } - bs, nbs, err = s.FindBuckets(ctx, influxdb.BucketFilter{ID: &data.Buckets[0].ID}) - if err != nil { - t.Fatal(err) - } - if nbs != 1 { - t.Errorf("expected 1 buckets, got: %v", bs) - } - urms, nurms, err = s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{ - ResourceType: influxdb.BucketsResourceType, - ResourceID: data.Buckets[0].ID, - }) - if err != nil { - t.Fatal(err) - } - if nurms != 0 { - t.Errorf("expected bucket1, to be dangling, got: %+v", urms) - } - }) - - t.Run("deleting a user deletes every related urm and nothing else", func(t *testing.T) { - data := fields() - s, done := init(t, data) - defer done() - ctx := context.Background() - - // bucket1 is owned by user1. - // Check it. - urms, _, err := s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{ - ResourceType: influxdb.BucketsResourceType, - ResourceID: data.Buckets[0].ID, - }) - if err != nil { - t.Fatal(err) - } - want := []*influxdb.UserResourceMapping{ - { - UserID: data.Users[0].ID, - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.BucketsResourceType, - ResourceID: data.Buckets[0].ID, - }, - } - sort.Sort(urmByUserID(want)) - sort.Sort(urmByUserID(urms)) - if diff := cmp.Diff(want, urms); diff != "" { - t.Fatalf("unexpected urms -want/+got:\n\t%s", diff) - } - - // Delete user1. - // We expect his urms deleted but not bucket1. - if err := s.DeleteUser(ctx, data.Users[0].ID); err != nil { - t.Fatal(err) - } - urms, nurms, err := s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{ - UserID: data.Users[0].ID, - }) - if err != nil { - t.Fatal(err) - } - if nurms > 0 { - t.Errorf("expected that user deletion would remove dangling urms, got: %+v", urms) - } - bs, nbs, err := s.FindBuckets(ctx, influxdb.BucketFilter{ID: &data.Buckets[0].ID}) - if err != nil { - t.Fatal(err) - } - if nbs != 1 { - t.Errorf("expected 1 buckets, got: %v", bs) - } - }) - - t.Run("deleting a bucket deletes every related urm", func(t *testing.T) { - data := fields() - s, done := init(t, data) - defer done() - ctx := context.Background() - - // Delete bucket2. - // We expect its urms deleted. - if err := s.DeleteBucket(ctx, data.Buckets[1].ID); err != nil { - t.Fatal(err) - } - urms, nurms, err := s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{ - ResourceType: influxdb.BucketsResourceType, - ResourceID: data.Buckets[1].ID, - }) - if err != nil { - t.Fatal(err) - } - if nurms > 0 { - t.Errorf("expected that bucket deletion would remove dangling urms, got: %+v", urms) - } - }) - - // NOTE(affo)(*kv.Service): buckets, users, and urms survive. - t.Run("deleting an organization should delete everything that depends on it", func(t *testing.T) { - data := fields() - s, done := init(t, data) - defer done() - ctx := context.Background() - - // Delete org1. - // We expect its buckets to be deleted. - // We expect urms to those buckets to be deleted too. - // No user should be deleted. - preDeletionBuckets, _, err := s.FindBuckets(ctx, influxdb.BucketFilter{OrganizationID: &data.Organizations[0].ID}) - if err != nil { - t.Fatal(err) - } - if err := s.DeleteOrganization(ctx, data.Organizations[0].ID); err != nil { - t.Fatal(err) - } - bs, nbs, err := s.FindBuckets(ctx, influxdb.BucketFilter{OrganizationID: &data.Organizations[0].ID}) - if err != nil { - t.Fatal(err) - } - if nbs != 0 { - t.Errorf("expected org buckets to be deleted, got: %+v", bs) - } - - urms, _, err := s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{ - UserID: data.Users[0].ID, - ResourceType: influxdb.BucketsResourceType, - }) - if err != nil { - t.Fatal(err) - } - for _, urm := range urms { - for _, b := range preDeletionBuckets { - if urm.ResourceID == b.ID { - t.Errorf("expected this urm to be deleted, got %+v instead", urm) - } - } - } - if _, err := s.FindUser(ctx, influxdb.UserFilter{ID: &data.Users[0].ID}); err != nil { - t.Fatal(err) - } - - // Delete org2. - // Everything should disappear. - if err := s.DeleteOrganization(ctx, data.Organizations[1].ID); err != nil { - t.Fatal(err) - } - orgs, norgs, err := s.FindOrganizations(ctx, influxdb.OrganizationFilter{}) - if err != nil { - t.Fatal(err) - } - if norgs != 0 { - t.Errorf("expected no org, got: %v", orgs) - } - usrs, nusrs, err := s.FindUsers(ctx, influxdb.UserFilter{}) - if err != nil { - t.Fatal(err) - } - if nusrs != 2 { - t.Errorf("expected 2 users, got: %v", usrs) - } - urms, nurms, err := s.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{}) - if err != nil { - t.Fatal(err) - } - if nurms > 0 { - t.Errorf("expected no urm, got: %v", urms) - } - bs, nbs, err = s.FindBuckets(ctx, influxdb.BucketFilter{}) - if err != nil { - t.Fatal(err) - } - if nbs != 0 { - t.Errorf("expected buckets to be deleted, got: %+v", bs) - } - }) -} - -func initBoltTenantService(t *testing.T, f tenantFields) (*tenant.Service, func()) { - s, closeBolt := itesting.NewTestBoltStore(t) - store := tenant.NewStore(s) - - if f.OrgIDGenerator != nil { - store.OrgIDGen = f.OrgIDGenerator - } - - if f.BucketIDGenerator != nil { - store.BucketIDGen = f.BucketIDGenerator - } - - svc := tenant.NewService(store) - - for _, u := range f.Users { - if err := svc.CreateUser(context.Background(), u); err != nil { - t.Fatalf("error populating users: %v", err) - } - } - - for i := range f.Passwords { - if err := svc.SetPassword(context.Background(), f.Users[i].ID, f.Passwords[i]); err != nil { - t.Fatalf("error setting passsword user, %s %s: %v", f.Users[i].Name, f.Passwords[i], err) - } - } - - for _, o := range f.Organizations { - if err := svc.CreateOrganization(context.Background(), o); err != nil { - t.Fatalf("failed to populate organizations: %s", err) - } - } - - for _, b := range f.Buckets { - if err := svc.CreateBucket(context.Background(), b); err != nil { - t.Fatalf("failed to populate buckets: %s", err) - } - } - - for _, m := range f.UserResourceMappings { - if err := svc.CreateUserResourceMapping(context.Background(), m); err != nil { - t.Fatalf("failed to populate mappings: %v", err) - } - } - - return svc, func() { - for _, u := range f.Users { - if err := svc.DeleteUser(context.Background(), u.ID); err != nil { - t.Logf("error removing users: %v", err) - } - } - - for _, m := range f.UserResourceMappings { - if err := svc.DeleteUserResourceMapping(context.Background(), m.ResourceID, m.UserID); err != nil { - t.Logf("failed to remove user resource mapping: %v", err) - } - } - - for _, o := range f.Organizations { - if err := svc.DeleteOrganization(context.Background(), o.ID); err != nil { - t.Logf("failed to remove organization: %v", err) - } - } - - closeBolt() - } -} diff --git a/tenant/service_urm.go b/tenant/service_urm.go deleted file mode 100644 index 586f5d04633..00000000000 --- a/tenant/service_urm.go +++ /dev/null @@ -1,58 +0,0 @@ -package tenant - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" -) - -type URMSvc struct { - store *Store - svc *Service -} - -func NewUserResourceMappingSvc(st *Store, svc *Service) *URMSvc { - return &URMSvc{ - store: st, - svc: svc, - } -} - -// FindUserResourceMappings returns a list of UserResourceMappings that match filter and the total count of matching mappings. -func (s *URMSvc) FindUserResourceMappings(ctx context.Context, filter influxdb.UserResourceMappingFilter, opt ...influxdb.FindOptions) ([]*influxdb.UserResourceMapping, int, error) { - var urms []*influxdb.UserResourceMapping - err := s.store.View(ctx, func(tx kv.Tx) error { - u, err := s.store.ListURMs(ctx, tx, filter, opt...) - if err != nil { - return err - } - urms = u - return nil - }) - if err != nil { - return nil, 0, err - } - return urms, len(urms), nil -} - -// CreateUserResourceMapping creates a user resource mapping. -func (s *URMSvc) CreateUserResourceMapping(ctx context.Context, m *influxdb.UserResourceMapping) error { - err := s.store.Update(ctx, func(tx kv.Tx) error { - return s.store.CreateURM(ctx, tx, m) - }) - return err -} - -// DeleteUserResourceMapping deletes a user resource mapping. -func (s *URMSvc) DeleteUserResourceMapping(ctx context.Context, resourceID, userID platform.ID) error { - err := s.store.Update(ctx, func(tx kv.Tx) error { - _, err := s.store.GetURM(ctx, tx, resourceID, userID) - if err != nil { - return ErrURMNotFound - } - return s.store.DeleteURM(ctx, tx, resourceID, userID) - }) - return err -} diff --git a/tenant/service_urm_test.go b/tenant/service_urm_test.go deleted file mode 100644 index f5845960e98..00000000000 --- a/tenant/service_urm_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package tenant_test - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/tenant" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -func TestBoltUserResourceMappingService(t *testing.T) { - influxdbtesting.UserResourceMappingService(initBoltUserResourceMappingService, t) -} - -func initBoltUserResourceMappingService(f influxdbtesting.UserResourceFields, t *testing.T) (influxdb.UserResourceMappingService, func()) { - s, closeBolt := influxdbtesting.NewTestBoltStore(t) - svc, closeSvc := initUserResourceMappingService(s, f, t) - return svc, func() { - closeSvc() - closeBolt() - } -} - -func initUserResourceMappingService(s kv.Store, f influxdbtesting.UserResourceFields, t *testing.T) (influxdb.UserResourceMappingService, func()) { - var ( - storage = tenant.NewStore(s) - svc = tenant.NewService(storage) - ) - - // Create resources before mappings. - - for _, u := range f.Users { - if err := svc.CreateUser(context.Background(), u); err != nil { - t.Fatalf("error populating users: %v", err) - } - } - - withID := func(gen *platform.IDGenerator, id platform.ID, fn func()) { - idGen := *gen - defer func() { *gen = idGen }() - - if id.Valid() { - *gen = mock.NewStaticIDGenerator(id) - } - - fn() - } - - for _, o := range f.Organizations { - withID(&storage.OrgIDGen, o.ID, func() { - if err := svc.CreateOrganization(context.Background(), o); err != nil { - t.Fatalf("failed to populate organizations: %s", err) - } - }) - } - - for _, b := range f.Buckets { - withID(&storage.BucketIDGen, b.ID, func() { - if err := svc.CreateBucket(context.Background(), b); err != nil { - t.Fatalf("failed to populate buckets: %s", err) - } - }) - } - - // Now create mappings. - - for _, m := range f.UserResourceMappings { - if err := svc.CreateUserResourceMapping(context.Background(), m); err != nil { - t.Fatalf("failed to populate mappings: %v", err) - } - } - - return svc, func() { - for _, u := range f.Users { - if err := svc.DeleteUser(context.Background(), u.ID); err != nil { - t.Logf("error removing users: %v", err) - } - } - - for _, m := range f.UserResourceMappings { - if err := svc.DeleteUserResourceMapping(context.Background(), m.ResourceID, m.UserID); err != nil { - t.Logf("failed to remove user resource mapping: %v", err) - } - } - - for _, o := range f.Organizations { - if err := svc.DeleteOrganization(context.Background(), o.ID); err != nil { - t.Logf("failed to remove organization: %v", err) - } - } - } -} diff --git a/tenant/service_user.go b/tenant/service_user.go deleted file mode 100644 index 3236d8ca6e7..00000000000 --- a/tenant/service_user.go +++ /dev/null @@ -1,264 +0,0 @@ -package tenant - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" - "golang.org/x/crypto/bcrypt" -) - -type UserSvc struct { - store *Store - svc *Service -} - -func NewUserSvc(st *Store, svc *Service) *UserSvc { - return &UserSvc{ - store: st, - svc: svc, - } -} - -// Returns a single user by ID. -func (s *UserSvc) FindUserByID(ctx context.Context, id platform.ID) (*influxdb.User, error) { - var user *influxdb.User - err := s.store.View(ctx, func(tx kv.Tx) error { - u, err := s.store.GetUser(ctx, tx, id) - if err != nil { - return err - } - user = u - return nil - }) - - if err != nil { - return nil, err - } - - return user, nil -} - -// Returns the first user that matches filter. -func (s *UserSvc) FindUser(ctx context.Context, filter influxdb.UserFilter) (*influxdb.User, error) { - // if im given no filters its not a valid find user request. (leaving it unchecked seems dangerous) - if filter.ID == nil && filter.Name == nil { - return nil, ErrUserNotFound - } - - if filter.ID != nil { - return s.FindUserByID(ctx, *filter.ID) - } - - var user *influxdb.User - err := s.store.View(ctx, func(tx kv.Tx) error { - u, err := s.store.GetUserByName(ctx, tx, *filter.Name) - if err != nil { - return err - } - user = u - return nil - }) - if err != nil { - return nil, err - } - - return user, nil -} - -// Returns a list of users that match filter and the total count of matching users. -// Additional options provide pagination & sorting. { -func (s *UserSvc) FindUsers(ctx context.Context, filter influxdb.UserFilter, opt ...influxdb.FindOptions) ([]*influxdb.User, int, error) { - // if a id is provided we will reroute to findUserByID - if filter.ID != nil { - user, err := s.FindUserByID(ctx, *filter.ID) - if err != nil { - return nil, 0, err - } - return []*influxdb.User{user}, 1, nil - } - - // if a name is provided we will reroute to findUser with a name filter - if filter.Name != nil { - user, err := s.FindUser(ctx, filter) - if err != nil { - return nil, 0, err - } - return []*influxdb.User{user}, 1, nil - } - - var users []*influxdb.User - err := s.store.View(ctx, func(tx kv.Tx) error { - us, err := s.store.ListUsers(ctx, tx, opt...) - if err != nil { - return err - } - users = us - return nil - }) - if err != nil { - return nil, 0, err - } - - return users, len(users), nil -} - -// Creates a new user and sets u.ID with the new identifier. -func (s *UserSvc) CreateUser(ctx context.Context, u *influxdb.User) error { - err := s.store.Update(ctx, func(tx kv.Tx) error { - return s.store.CreateUser(ctx, tx, u) - }) - - return err -} - -// Updates a single user with changeset. -// Returns the new user state after update. { -func (s *UserSvc) UpdateUser(ctx context.Context, id platform.ID, upd influxdb.UserUpdate) (*influxdb.User, error) { - var user *influxdb.User - err := s.store.Update(ctx, func(tx kv.Tx) error { - u, err := s.store.UpdateUser(ctx, tx, id, upd) - if err != nil { - return err - } - user = u - return nil - }) - if err != nil { - return nil, err - } - - return user, nil -} - -// Removes a user by ID. -func (s *UserSvc) DeleteUser(ctx context.Context, id platform.ID) error { - err := s.store.Update(ctx, func(tx kv.Tx) error { - err := s.store.DeletePassword(ctx, tx, id) - if err != nil { - return err - } - return s.store.DeleteUser(ctx, tx, id) - }) - return err -} - -// FindPermissionForUser gets the full set of permission for a specified user id -func (s *UserSvc) FindPermissionForUser(ctx context.Context, uid platform.ID) (influxdb.PermissionSet, error) { - mappings, _, err := s.svc.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{UserID: uid}, influxdb.FindOptions{Limit: 100}) - if err != nil { - return nil, err - } - - permissions, err := permissionFromMapping(mappings) - if err != nil { - return nil, err - } - - if len(mappings) >= 100 { - // if we got 100 mappings we probably need to pull more pages - // account for paginated results - for i := len(mappings); len(mappings) > 0; i += len(mappings) { - mappings, _, err = s.svc.FindUserResourceMappings(ctx, influxdb.UserResourceMappingFilter{UserID: uid}, influxdb.FindOptions{Offset: i, Limit: 100}) - if err != nil { - return nil, err - } - pms, err := permissionFromMapping(mappings) - if err != nil { - return nil, err - } - permissions = append(permissions, pms...) - } - } - - permissions = append(permissions, influxdb.MePermissions(uid)...) - return permissions, nil -} - -// SetPassword overrides the password of a known user. -func (s *UserSvc) SetPassword(ctx context.Context, userID platform.ID, password string) error { - if len(password) < MinPasswordLen { - return EShortPassword - } - passHash, err := encryptPassword(password) - if err != nil { - return err - } - // set password - return s.store.Update(ctx, func(tx kv.Tx) error { - _, err := s.store.GetUser(ctx, tx, userID) - if err != nil { - return EIncorrectUser - } - return s.store.SetPassword(ctx, tx, userID, passHash) - }) -} - -// ComparePassword checks if the password matches the password recorded. -// Passwords that do not match return errors. -func (s *UserSvc) ComparePassword(ctx context.Context, userID platform.ID, password string) error { - // get password - var hash []byte - err := s.store.View(ctx, func(tx kv.Tx) error { - - _, err := s.store.GetUser(ctx, tx, userID) - if err != nil { - return EIncorrectUser - } - h, err := s.store.GetPassword(ctx, tx, userID) - if err != nil { - if err == kv.ErrKeyNotFound { - return EIncorrectPassword - } - return err - } - hash = []byte(h) - return nil - }) - if err != nil { - return err - } - // compare password - if err := bcrypt.CompareHashAndPassword(hash, []byte(password)); err != nil { - return EIncorrectPassword - } - - return nil -} - -// CompareAndSetPassword checks the password and if they match -// updates to the new password. -func (s *UserSvc) CompareAndSetPassword(ctx context.Context, userID platform.ID, old, new string) error { - err := s.ComparePassword(ctx, userID, old) - if err != nil { - return err - } - - return s.SetPassword(ctx, userID, new) -} - -func encryptPassword(password string) (string, error) { - passHash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) - if err != nil { - return "", err - } - return string(passHash), nil -} - -func permissionFromMapping(mappings []*influxdb.UserResourceMapping) ([]influxdb.Permission, error) { - ps := make([]influxdb.Permission, 0, len(mappings)) - for _, m := range mappings { - p, err := m.ToPermissions() - if err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - ps = append(ps, p...) - } - - return ps, nil -} diff --git a/tenant/service_user_test.go b/tenant/service_user_test.go deleted file mode 100644 index a1106083ec9..00000000000 --- a/tenant/service_user_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package tenant_test - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/tenant" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" -) - -func TestBoltUserService(t *testing.T) { - influxdbtesting.UserService(initBoltUserService, t) -} - -func initBoltUserService(f influxdbtesting.UserFields, t *testing.T) (influxdb.UserService, string, func()) { - s, closeBolt := influxdbtesting.NewTestBoltStore(t) - svc, op, closeSvc := initUserService(s, f, t) - return svc, op, func() { - closeSvc() - closeBolt() - } -} - -func initUserService(s kv.Store, f influxdbtesting.UserFields, t *testing.T) (influxdb.UserService, string, func()) { - storage := tenant.NewStore(s) - svc := tenant.NewService(storage) - - for _, u := range f.Users { - if err := svc.CreateUser(context.Background(), u); err != nil { - t.Fatalf("failed to populate users") - } - } - - return svc, "tenant", func() { - for _, u := range f.Users { - if err := svc.DeleteUser(context.Background(), u.ID); err != nil { - t.Logf("failed to remove users: %v", err) - } - } - } -} - -func TestBoltPasswordService(t *testing.T) { - influxdbtesting.PasswordsService(initBoltPasswordsService, t) -} - -func initBoltPasswordsService(f influxdbtesting.PasswordFields, t *testing.T) (influxdb.PasswordsService, func()) { - s, closeStore := influxdbtesting.NewTestBoltStore(t) - svc, closeSvc := initPasswordsService(s, f, t) - return svc, func() { - closeSvc() - closeStore() - } -} - -func initPasswordsService(s kv.Store, f influxdbtesting.PasswordFields, t *testing.T) (influxdb.PasswordsService, func()) { - storage := tenant.NewStore(s) - svc := tenant.NewService(storage) - - for _, u := range f.Users { - if err := svc.CreateUser(context.Background(), u); err != nil { - t.Fatalf("error populating users: %v", err) - } - } - - for i := range f.Passwords { - if err := svc.SetPassword(context.Background(), f.Users[i].ID, f.Passwords[i]); err != nil { - t.Fatalf("error setting passsword user, %s %s: %v", f.Users[i].Name, f.Passwords[i], err) - } - } - - return svc, func() { - for _, u := range f.Users { - if err := svc.DeleteUser(context.Background(), u.ID); err != nil { - t.Logf("error removing users: %v", err) - } - } - } -} - -func TestFindPermissionsFromUser(t *testing.T) { - s := influxdbtesting.NewTestInmemStore(t) - storage := tenant.NewStore(s) - svc := tenant.NewService(storage) - - // createUser - u := &influxdb.User{ - Name: "rockstar", - Status: influxdb.Active, - } - - if err := svc.CreateUser(context.Background(), u); err != nil { - t.Fatal(err) - } - - ctx := context.Background() - - // createSomeURMS - err := svc.CreateUserResourceMapping(ctx, &influxdb.UserResourceMapping{ - UserID: u.ID, - UserType: influxdb.Member, - ResourceType: influxdb.OrgsResourceType, - ResourceID: 1, - }) - if err != nil { - t.Fatal(err) - } - - err = svc.CreateUserResourceMapping(ctx, &influxdb.UserResourceMapping{ - UserID: u.ID, - UserType: influxdb.Owner, - ResourceType: influxdb.BucketsResourceType, - ResourceID: 2, - }) - if err != nil { - t.Fatal(err) - } - // pull the permissions for this user - perms, err := svc.FindPermissionForUser(ctx, u.ID) - if err != nil { - t.Fatal(err) - } - - orgID := platform.ID(1) - expected := influxdb.PermissionSet{ - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.AuthorizationsResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.BucketsResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.DashboardsResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{ID: &orgID, Type: influxdb.OrgsResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.SourcesResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.TasksResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.TelegrafsResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.UsersResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.VariablesResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.ScraperResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.SecretsResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.LabelsResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.ViewsResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.DocumentsResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.NotificationRuleResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.NotificationEndpointResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.ChecksResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.DBRPResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.NotebooksResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.AnnotationsResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.RemotesResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{OrgID: &orgID, Type: influxdb.ReplicationsResourceType}}, - influxdb.Permission{Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.UsersResourceType, ID: &u.ID}}, - influxdb.Permission{Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.UsersResourceType, ID: &u.ID}}, - } - if !cmp.Equal(perms, expected) { - t.Fatalf("inequal response for find params %+v", cmp.Diff(perms, expected)) - } -} diff --git a/tenant/storage.go b/tenant/storage.go deleted file mode 100644 index 8f989826fcd..00000000000 --- a/tenant/storage.go +++ /dev/null @@ -1,113 +0,0 @@ -package tenant - -import ( - "context" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/rand" - "github.com/influxdata/influxdb/v2/snowflake" - "github.com/influxdata/influxdb/v2/tenant/index" -) - -const MaxIDGenerationN = 100 - -type Store struct { - kvStore kv.Store - IDGen platform.IDGenerator - OrgIDGen platform.IDGenerator - BucketIDGen platform.IDGenerator - - now func() time.Time - - urmByUserIndex *kv.Index -} - -type StoreOption func(*Store) - -func NewStore(kvStore kv.Store, opts ...StoreOption) *Store { - store := &Store{ - kvStore: kvStore, - IDGen: snowflake.NewDefaultIDGenerator(), - OrgIDGen: rand.NewOrgBucketID(time.Now().UnixNano()), - BucketIDGen: rand.NewOrgBucketID(time.Now().UnixNano()), - now: func() time.Time { - return time.Now().UTC() - }, - urmByUserIndex: kv.NewIndex(index.URMByUserIndexMapping, kv.WithIndexReadPathEnabled), - } - - for _, opt := range opts { - opt(store) - } - - return store -} - -func (s *Store) RLock() { - s.kvStore.RLock() -} - -func (s *Store) RUnlock() { - s.kvStore.RUnlock() -} - -// View opens up a transaction that will not write to any data. Implementing interfaces -// should take care to ensure that all view transactions do not mutate any data. -func (s *Store) View(ctx context.Context, fn func(kv.Tx) error) error { - return s.kvStore.View(ctx, fn) -} - -// Update opens up a transaction that will mutate data. -func (s *Store) Update(ctx context.Context, fn func(kv.Tx) error) error { - return s.kvStore.Update(ctx, fn) -} - -// generateSafeID attempts to create ids for buckets -// and orgs that are without backslash, commas, and spaces, BUT ALSO do not already exist. -func (s *Store) generateSafeID(ctx context.Context, tx kv.Tx, bucket []byte, gen platform.IDGenerator) (platform.ID, error) { - for i := 0; i < MaxIDGenerationN; i++ { - id := gen.ID() - - err := s.uniqueID(ctx, tx, bucket, id) - if err == nil { - return id, nil - } - - if err == ErrIDNotUnique { - continue - } - - return platform.InvalidID(), err - } - - return platform.InvalidID(), ErrFailureGeneratingID -} - -func (s *Store) uniqueID(ctx context.Context, tx kv.Tx, bucket []byte, id platform.ID) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - encodedID, err := id.Encode() - if err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - b, err := tx.Bucket(bucket) - if err != nil { - return err - } - - _, err = b.Get(encodedID) - if kv.IsNotFound(err) { - return nil - } - - return ErrIDNotUnique -} diff --git a/tenant/storage_bucket.go b/tenant/storage_bucket.go deleted file mode 100644 index 96c7c93c91b..00000000000 --- a/tenant/storage_bucket.go +++ /dev/null @@ -1,455 +0,0 @@ -package tenant - -import ( - "context" - "encoding/json" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" -) - -var ( - bucketBucket = []byte("bucketsv1") - bucketIndex = []byte("bucketindexv1") -) - -func bucketIndexKey(o platform.ID, name string) ([]byte, error) { - orgID, err := o.Encode() - - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - k := make([]byte, platform.IDLength+len(name)) - copy(k, orgID) - copy(k[platform.IDLength:], name) - return k, nil -} - -// uniqueBucketName ensures this bucket is unique for this organization -func (s *Store) uniqueBucketName(ctx context.Context, tx kv.Tx, oid platform.ID, uname string) error { - key, err := bucketIndexKey(oid, uname) - if err != nil { - return err - } - if len(key) == 0 { - return ErrNameisEmpty - } - - idx, err := tx.Bucket(bucketIndex) - if err != nil { - return err - } - - _, err = idx.Get(key) - // if not found then this is _unique_. - if kv.IsNotFound(err) { - return nil - } - - // no error means this is not unique - if err == nil { - return BucketAlreadyExistsError(uname) - } - - // any other error is some sort of internal server error - return ErrInternalServiceError(err) -} - -func unmarshalBucket(v []byte) (*influxdb.Bucket, error) { - u := &influxdb.Bucket{} - if err := json.Unmarshal(v, u); err != nil { - return nil, ErrCorruptBucket(err) - } - - return u, nil -} - -func marshalBucket(u *influxdb.Bucket) ([]byte, error) { - v, err := json.Marshal(u) - if err != nil { - return nil, ErrUnprocessableBucket(err) - } - - return v, nil -} - -func (s *Store) GetBucket(ctx context.Context, tx kv.Tx, id platform.ID) (*influxdb.Bucket, error) { - encodedID, err := id.Encode() - if err != nil { - return nil, InvalidOrgIDError(err) - } - - b, err := tx.Bucket(bucketBucket) - if err != nil { - return nil, err - } - - v, err := b.Get(encodedID) - if kv.IsNotFound(err) { - return nil, ErrBucketNotFound - } - - if err != nil { - return nil, ErrInternalServiceError(err) - } - - return unmarshalBucket(v) -} - -func (s *Store) GetBucketByName(ctx context.Context, tx kv.Tx, orgID platform.ID, n string) (*influxdb.Bucket, error) { - key, err := bucketIndexKey(orgID, n) - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - idx, err := tx.Bucket(bucketIndex) - if err != nil { - return nil, err - } - - buf, err := idx.Get(key) - - // allow for hard coded bucket names that dont exist in the system - if kv.IsNotFound(err) { - return nil, ErrBucketNotFoundByName(n) - } - - if err != nil { - return nil, err - } - - var id platform.ID - if err := id.Decode(buf); err != nil { - return nil, &errors.Error{ - Err: err, - } - } - return s.GetBucket(ctx, tx, id) -} - -type BucketFilter struct { - Name *string - OrganizationID *platform.ID -} - -func (s *Store) ListBuckets(ctx context.Context, tx kv.Tx, filter BucketFilter, opt ...influxdb.FindOptions) ([]*influxdb.Bucket, error) { - // this isn't a list action its a `GetBucketByName` - if (filter.OrganizationID != nil && filter.OrganizationID.Valid()) && filter.Name != nil { - return nil, invalidBucketListRequest - } - - if len(opt) == 0 { - opt = append(opt, influxdb.FindOptions{}) - } - o := opt[0] - - // if an organization is passed we need to use the index - if filter.OrganizationID != nil { - return s.listBucketsByOrg(ctx, tx, *filter.OrganizationID, o) - } - - b, err := tx.Bucket(bucketBucket) - if err != nil { - return nil, err - } - - var opts []kv.CursorOption - if o.Descending { - opts = append(opts, kv.WithCursorDirection(kv.CursorDescending)) - } - - var seek []byte - if o.After != nil { - after := (*o.After) + 1 - seek, err = after.Encode() - if err != nil { - return nil, err - } - } - - cursor, err := b.ForwardCursor(seek, opts...) - if err != nil { - return nil, err - } - defer cursor.Close() - - count := 0 - bs := []*influxdb.Bucket{} - for k, v := cursor.Next(); k != nil; k, v = cursor.Next() { - if o.Offset != 0 && count < o.Offset { - count++ - continue - } - b, err := unmarshalBucket(v) - if err != nil { - return nil, err - } - - // check to see if it matches the filter - if filter.Name == nil || (*filter.Name == b.Name) { - bs = append(bs, b) - } - - if o.Limit != 0 && len(bs) >= o.Limit { - break - } - } - - return bs, cursor.Err() -} - -func (s *Store) listBucketsByOrg(ctx context.Context, tx kv.Tx, orgID platform.ID, o influxdb.FindOptions) ([]*influxdb.Bucket, error) { - // get the prefix key (org id with an empty name) - key, err := bucketIndexKey(orgID, "") - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - idx, err := tx.Bucket(bucketIndex) - if err != nil { - return nil, err - } - - start := key - opts := []kv.CursorOption{kv.WithCursorPrefix(key)} - if o.Descending { - // To list in descending order, we have to find the last entry prefixed - // by the org ID. AFAICT the only way to do this given our current indexing - // scheme is to iterate through all entries in the org once, remembering the - // last-seen key. - start, err = func() ([]byte, error) { - cursor, err := idx.ForwardCursor(start, opts...) - if err != nil { - return nil, err - } - defer cursor.Close() - - lastKey := start - for k, _ := cursor.Next(); k != nil; k, _ = cursor.Next() { - lastKey = k - } - return lastKey, nil - }() - if err != nil { - return nil, err - } - // Once we've found the end, walk backwards from it on the next iteration. - opts = append(opts, kv.WithCursorDirection(kv.CursorDescending)) - } - cursor, err := idx.ForwardCursor(start, opts...) - if err != nil { - return nil, err - } - defer cursor.Close() - - count := 0 - bs := []*influxdb.Bucket{} - searchingForAfter := o.After != nil - for k, v := cursor.Next(); k != nil; k, v = cursor.Next() { - if o.Offset != 0 && count < o.Offset { - count++ - continue - } - - if err != nil { - return nil, err - } - - var id platform.ID - if err := id.Decode(v); err != nil { - return nil, &errors.Error{ - Err: err, - } - } - if searchingForAfter { - searchingForAfter = id != *o.After - continue - } - b, err := s.GetBucket(ctx, tx, id) - if err != nil { - return nil, err - } - - bs = append(bs, b) - - if o.Limit != 0 && len(bs) >= o.Limit { - break - } - } - - return bs, cursor.Err() -} - -func (s *Store) CreateBucket(ctx context.Context, tx kv.Tx, bucket *influxdb.Bucket) (err error) { - // generate new bucket ID - bucket.ID, err = s.generateSafeID(ctx, tx, bucketBucket, s.BucketIDGen) - if err != nil { - return err - } - - encodedID, err := bucket.ID.Encode() - if err != nil { - return InvalidOrgIDError(err) - } - - if err := s.uniqueBucketName(ctx, tx, bucket.OrgID, bucket.Name); err != nil { - return err - } - - bucket.SetCreatedAt(s.now()) - bucket.SetUpdatedAt(s.now()) - idx, err := tx.Bucket(bucketIndex) - if err != nil { - return err - } - - b, err := tx.Bucket(bucketBucket) - if err != nil { - return err - } - - v, err := marshalBucket(bucket) - if err != nil { - return err - } - - ikey, err := bucketIndexKey(bucket.OrgID, bucket.Name) - if err != nil { - return err - } - - if err := idx.Put(ikey, encodedID); err != nil { - return ErrInternalServiceError(err) - } - - if err := b.Put(encodedID, v); err != nil { - return ErrInternalServiceError(err) - } - - return nil -} - -func (s *Store) UpdateBucket(ctx context.Context, tx kv.Tx, id platform.ID, upd influxdb.BucketUpdate) (*influxdb.Bucket, error) { - encodedID, err := id.Encode() - if err != nil { - return nil, err - } - - bucket, err := s.GetBucket(ctx, tx, id) - if err != nil { - return nil, err - } - - bucket.SetUpdatedAt(s.now()) - if upd.Name != nil && bucket.Name != *upd.Name { - // validation - if bucket.Type == influxdb.BucketTypeSystem { - return nil, errRenameSystemBucket - } - - if err := validBucketName(*upd.Name, bucket.Type); err != nil { - return nil, err - } - - if err := s.uniqueBucketName(ctx, tx, bucket.OrgID, *upd.Name); err != nil { - return nil, ErrBucketNameNotUnique - } - - idx, err := tx.Bucket(bucketIndex) - if err != nil { - return nil, err - } - - oldIkey, err := bucketIndexKey(bucket.OrgID, bucket.Name) - if err != nil { - return nil, err - } - - if err := idx.Delete(oldIkey); err != nil { - return nil, ErrInternalServiceError(err) - } - - bucket.Name = *upd.Name - newIkey, err := bucketIndexKey(bucket.OrgID, bucket.Name) - if err != nil { - return nil, err - } - - if err := idx.Put(newIkey, encodedID); err != nil { - return nil, ErrInternalServiceError(err) - } - } - - if upd.Description != nil { - bucket.Description = *upd.Description - } - - if upd.RetentionPeriod != nil { - bucket.RetentionPeriod = *upd.RetentionPeriod - } - if upd.ShardGroupDuration != nil { - bucket.ShardGroupDuration = *upd.ShardGroupDuration - } - - v, err := marshalBucket(bucket) - if err != nil { - return nil, err - } - - b, err := tx.Bucket(bucketBucket) - if err != nil { - return nil, err - } - if err := b.Put(encodedID, v); err != nil { - return nil, ErrInternalServiceError(err) - } - - return bucket, nil -} - -func (s *Store) DeleteBucket(ctx context.Context, tx kv.Tx, id platform.ID) error { - bucket, err := s.GetBucket(ctx, tx, id) - if err != nil { - return err - } - - encodedID, err := id.Encode() - if err != nil { - return InvalidOrgIDError(err) - } - - idx, err := tx.Bucket(bucketIndex) - if err != nil { - return err - } - - ikey, err := bucketIndexKey(bucket.OrgID, bucket.Name) - if err != nil { - return err - } - if err := idx.Delete(ikey); err != nil { - return ErrInternalServiceError(err) - } - - b, err := tx.Bucket(bucketBucket) - if err != nil { - return err - } - - if err := b.Delete(encodedID); err != nil { - return ErrInternalServiceError(err) - } - - return nil -} diff --git a/tenant/storage_bucket_test.go b/tenant/storage_bucket_test.go deleted file mode 100644 index 1777d9d5852..00000000000 --- a/tenant/storage_bucket_test.go +++ /dev/null @@ -1,420 +0,0 @@ -package tenant_test - -import ( - "context" - "fmt" - "reflect" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/tenant" - itesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// type Bucket struct { -// ID ID `json:"id,omitempty"` -// OrgID ID `json:"bucketID,omitempty"` -// Type BucketType `json:"type"` -// Name string `json:"name"` -// Description string `json:"description"` -// RetentionPolicyName string `json:"rp,omitempty"` // This to support v1 sources -// RetentionPeriod time.Duration `json:"retentionPeriod"` -// CRUDLog -// } - -const ( - firstBucketID platform.ID = (iota + 1) - secondBucketID - thirdBucketID - fourthBucketID - fifthBucketID -) - -var orgIDs = []platform.ID{firstOrgID, secondOrgID} - -func TestBucket(t *testing.T) { - var ( - aTime = time.Date(2020, 7, 23, 10, 0, 0, 0, time.UTC) - // generate 10 buckets to test with - // optionally provide a visit function to manipulate - // the generated slice (for convenience) - testBuckets = func(count int, visit ...func(*influxdb.Bucket)) (buckets []*influxdb.Bucket) { - buckets = make([]*influxdb.Bucket, count) - for i := range buckets { - id := firstBucketID + platform.ID(i) - // flip-flop between (reserved_id + reserved_id+1) - orgID := orgIDs[i%2] - buckets[i] = &influxdb.Bucket{ - ID: id, - OrgID: orgID, - Name: fmt.Sprintf("bucket%d", int(id)), - Description: "words", - RetentionPolicyName: "name", - RetentionPeriod: time.Second, - } - - for _, fn := range visit { - fn(buckets[i]) - } - } - return - } - withCrudLog = func(bkt *influxdb.Bucket) { - bkt.CRUDLog = influxdb.CRUDLog{ - CreatedAt: aTime, - UpdatedAt: aTime, - } - } - ) - - simpleSetup := func(t *testing.T, store *tenant.Store, tx kv.Tx) { - store.BucketIDGen = mock.NewIncrementingIDGenerator(1) - for _, bucket := range testBuckets(10) { - err := store.CreateBucket(context.Background(), tx, bucket) - if err != nil { - t.Fatal(err) - } - } - } - - over20Setup := func(t *testing.T, store *tenant.Store, tx kv.Tx) { - store.BucketIDGen = mock.NewIncrementingIDGenerator(1) - for _, bucket := range testBuckets(24) { - err := store.CreateBucket(context.Background(), tx, bucket) - if err != nil { - t.Fatal(err) - } - } - } - - st := []struct { - name string - setup func(*testing.T, *tenant.Store, kv.Tx) - update func(*testing.T, *tenant.Store, kv.Tx) - results func(*testing.T, *tenant.Store, kv.Tx) - }{ - { - name: "create", - setup: simpleSetup, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - buckets, err := store.ListBuckets(context.Background(), tx, tenant.BucketFilter{}) - if err != nil { - t.Fatal(err) - } - - if len(buckets) != 10 { - t.Fatalf("expected 10 buckets got: %d", len(buckets)) - } - - expected := testBuckets(10, withCrudLog) - assert.Equal(t, expected, buckets) - }, - }, - { - name: "get", - setup: simpleSetup, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - bucket, err := store.GetBucket(context.Background(), tx, fifthBucketID) - assert.NoError(t, err) - - expected := &influxdb.Bucket{ - ID: fifthBucketID, - OrgID: firstOrgID, - Name: "bucket5", - Description: "words", - RetentionPolicyName: "name", - RetentionPeriod: time.Second, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: aTime, - UpdatedAt: aTime, - }, - } - - assert.Equal(t, expected, bucket) - - bucket, err = store.GetBucketByName(context.Background(), tx, firstOrgID, "bucket5") - require.NoError(t, err) - assert.Equal(t, expected, bucket) - - if _, err := store.GetBucket(context.Background(), tx, 11); err != tenant.ErrBucketNotFound { - t.Fatal("failed to get correct error when looking for non present bucket by id") - } - - if _, err := store.GetBucketByName(context.Background(), tx, 3, "notabucket"); err.Error() != tenant.ErrBucketNotFoundByName("notabucket").Error() { - t.Fatal("failed to get correct error when looking for invalid bucket by name") - } - }, - }, - { - name: "list", - setup: simpleSetup, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - expected := testBuckets(10, withCrudLog) - orgID := firstOrgID - buckets, err := store.ListBuckets(context.Background(), tx, tenant.BucketFilter{OrganizationID: &orgID}) - require.NoError(t, err) - assert.Len(t, buckets, 5) - - orgExpected := []*influxdb.Bucket{ - expected[0], // id 10 => 000a which is alphabetically first - expected[2], - expected[4], - expected[6], - expected[8], - } - assert.Equal(t, orgExpected, buckets) - - buckets, err = store.ListBuckets(context.Background(), tx, tenant.BucketFilter{}, influxdb.FindOptions{Limit: 4}) - require.NoError(t, err) - - if len(buckets) != 4 { - t.Fatalf("expected 4 buckets got: %d", len(buckets)) - } - if !reflect.DeepEqual(buckets, expected[:4]) { - t.Fatalf("expected identical buckets with limit: \n%+v\n%+v", buckets, expected[:4]) - } - - buckets, err = store.ListBuckets(context.Background(), tx, tenant.BucketFilter{}, influxdb.FindOptions{Offset: 3}) - if err != nil { - t.Fatal(err) - } - - if len(buckets) != 7 { - t.Fatalf("expected 7 buckets got: %d", len(buckets)) - } - if !reflect.DeepEqual(buckets, expected[3:]) { - t.Fatalf("expected identical buckets with limit: \n%+v\n%+v", buckets, expected[3:]) - } - }, - }, - { - name: "listOver20", - setup: over20Setup, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - buckets, err := store.ListBuckets(context.Background(), tx, tenant.BucketFilter{}) - require.NoError(t, err) - assert.Len(t, buckets, 24) - }, - }, - { - name: "list all with limit 3 using after to paginate", - setup: simpleSetup, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - var ( - expected = testBuckets(10, withCrudLog) - found []*influxdb.Bucket - lastID *platform.ID - limit = 3 - listAfter = func(after *platform.ID) ([]*influxdb.Bucket, error) { - return store.ListBuckets(context.Background(), tx, tenant.BucketFilter{}, influxdb.FindOptions{ - After: after, - Limit: limit, - }) - } - ) - - var ( - b []*influxdb.Bucket - err error - ) - - for b, err = listAfter(lastID); err == nil; b, err = listAfter(lastID) { - lastID = &b[len(b)-1].ID - found = append(found, b...) - - // given we've seen the last page - if len(b) < limit { - break - } - } - - require.NoError(t, err) - - assert.Equal(t, expected, found) - }, - }, - { - name: "list in org with pagination", - setup: simpleSetup, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - allBuckets := testBuckets(10, withCrudLog) - orgID := secondOrgID - allInOrg := []*influxdb.Bucket{ - allBuckets[9], // id 10 => 000a which is alphabetically first - allBuckets[1], - allBuckets[3], - allBuckets[5], - allBuckets[7], - } - - buckets, err := store.ListBuckets(context.Background(), tx, tenant.BucketFilter{OrganizationID: &orgID}) - require.NoError(t, err) - require.Equal(t, allInOrg, buckets) - - // Test pagination using `after` and `limit`. - afterBuckets, err := store.ListBuckets( - context.Background(), tx, - tenant.BucketFilter{OrganizationID: &orgID}, - influxdb.FindOptions{After: &allInOrg[1].ID, Limit: 2}, - ) - require.NoError(t, err) - assert.Equal(t, allInOrg[2:4], afterBuckets) - - // Test pagination using `offset` and `limit`. - offsetBuckets, err := store.ListBuckets( - context.Background(), tx, - tenant.BucketFilter{OrganizationID: &orgID}, - influxdb.FindOptions{Offset: 3, Limit: 1}, - ) - require.NoError(t, err) - assert.Equal(t, allInOrg[3:4], offsetBuckets) - }, - }, - { - name: "list descending in org with pagination", - setup: simpleSetup, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - allBuckets := testBuckets(10, withCrudLog) - orgID := secondOrgID - allInOrg := []*influxdb.Bucket{ - allBuckets[7], - allBuckets[5], - allBuckets[3], - allBuckets[1], - allBuckets[9], // id 10 => 000a which is alphabetically first - } - - buckets, err := store.ListBuckets( - context.Background(), tx, - tenant.BucketFilter{OrganizationID: &orgID}, - influxdb.FindOptions{Descending: true}, - ) - require.NoError(t, err) - require.Equal(t, allInOrg, buckets) - - // Test pagination using `after` and `limit`. - afterBuckets, err := store.ListBuckets( - context.Background(), tx, - tenant.BucketFilter{OrganizationID: &orgID}, - influxdb.FindOptions{After: &allInOrg[1].ID, Limit: 2, Descending: true}, - ) - require.NoError(t, err) - assert.Equal(t, allInOrg[2:4], afterBuckets) - - // Test pagination using `offset` and `limit`. - offsetBuckets, err := store.ListBuckets( - context.Background(), tx, - tenant.BucketFilter{OrganizationID: &orgID}, - influxdb.FindOptions{Offset: 3, Limit: 1, Descending: true}, - ) - require.NoError(t, err) - assert.Equal(t, allInOrg[3:4], offsetBuckets) - }, - }, - { - name: "update", - setup: simpleSetup, - update: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - bucket5 := "bucket5" - _, err := store.UpdateBucket(context.Background(), tx, thirdBucketID, influxdb.BucketUpdate{Name: &bucket5}) - if err != tenant.ErrBucketNameNotUnique { - t.Fatal("failed to error on duplicate bucketname") - } - - bucket30 := "bucket30" - _, err = store.UpdateBucket(context.Background(), tx, thirdBucketID, influxdb.BucketUpdate{Name: &bucket30}) - require.NoError(t, err) - - description := "notWords" - _, err = store.UpdateBucket(context.Background(), tx, thirdBucketID, influxdb.BucketUpdate{Description: &description}) - require.NoError(t, err) - }, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - buckets, err := store.ListBuckets(context.Background(), tx, tenant.BucketFilter{}) - if err != nil { - t.Fatal(err) - } - - expected := testBuckets(10, withCrudLog) - expected[2].Name = "bucket30" - expected[2].Description = "notWords" - assert.Equal(t, expected, buckets) - }, - }, - { - name: "delete", - setup: simpleSetup, - update: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - err := store.DeleteBucket(context.Background(), tx, firstBucketID) - require.NoError(t, err) - - err = store.DeleteBucket(context.Background(), tx, firstBucketID) - if err != tenant.ErrBucketNotFound { - t.Fatal("invalid error when deleting bucket that has already been deleted", err) - } - - err = store.DeleteBucket(context.Background(), tx, secondBucketID) - require.NoError(t, err) - }, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - buckets, err := store.ListBuckets(context.Background(), tx, tenant.BucketFilter{}) - if err != nil { - t.Fatal(err) - } - - expected := testBuckets(10, withCrudLog)[2:] - assert.Equal(t, expected, buckets) - }, - }, - } - for _, testScenario := range st { - t.Run(testScenario.name, func(t *testing.T) { - s := itesting.NewTestInmemStore(t) - ts := tenant.NewStore(s, tenant.WithNow(func() time.Time { - return aTime - })) - - // setup - if testScenario.setup != nil { - err := ts.Update(context.Background(), func(tx kv.Tx) error { - testScenario.setup(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - - // update - if testScenario.update != nil { - err := ts.Update(context.Background(), func(tx kv.Tx) error { - testScenario.update(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - - // results - if testScenario.results != nil { - err := ts.View(context.Background(), func(tx kv.Tx) error { - testScenario.results(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - }) - } -} diff --git a/tenant/storage_org.go b/tenant/storage_org.go deleted file mode 100644 index e71db36700c..00000000000 --- a/tenant/storage_org.go +++ /dev/null @@ -1,280 +0,0 @@ -package tenant - -import ( - "context" - "encoding/json" - "strings" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" -) - -var ( - organizationBucket = []byte("organizationsv1") - organizationIndex = []byte("organizationindexv1") -) - -func (s *Store) uniqueOrgName(ctx context.Context, tx kv.Tx, uname string) error { - key := organizationIndexKey(uname) - if len(key) == 0 { - return influxdb.ErrOrgNameisEmpty - } - - idx, err := tx.Bucket(organizationIndex) - - if err != nil { - return err - } - - _, err = idx.Get(key) - // if not found then this is _unique_. - if kv.IsNotFound(err) { - return nil - } - - // no error means this is not unique - if err == nil { - return OrgAlreadyExistsError(uname) - } - - // any other error is some sort of internal server error - return ErrInternalServiceError(err) -} - -func organizationIndexKey(n string) []byte { - return []byte(strings.TrimSpace(n)) -} - -func unmarshalOrg(v []byte) (*influxdb.Organization, error) { - u := &influxdb.Organization{} - if err := json.Unmarshal(v, u); err != nil { - return nil, ErrCorruptOrg(err) - } - - return u, nil -} - -func marshalOrg(u *influxdb.Organization) ([]byte, error) { - v, err := json.Marshal(u) - if err != nil { - return nil, ErrUnprocessableOrg(err) - } - - return v, nil -} - -func (s *Store) GetOrg(ctx context.Context, tx kv.Tx, id platform.ID) (*influxdb.Organization, error) { - encodedID, err := id.Encode() - if err != nil { - return nil, InvalidOrgIDError(err) - } - - b, err := tx.Bucket(organizationBucket) - if err != nil { - return nil, err - } - - v, err := b.Get(encodedID) - if kv.IsNotFound(err) { - return nil, ErrOrgNotFound - } - - if err != nil { - return nil, ErrInternalServiceError(err) - } - - return unmarshalOrg(v) -} - -func (s *Store) GetOrgByName(ctx context.Context, tx kv.Tx, n string) (*influxdb.Organization, error) { - b, err := tx.Bucket(organizationIndex) - if err != nil { - return nil, err - } - - uid, err := b.Get(organizationIndexKey(n)) - if err == kv.ErrKeyNotFound { - return nil, OrgNotFoundByName(n) - } - - if err != nil { - return nil, ErrInternalServiceError(err) - } - - var id platform.ID - if err := id.Decode(uid); err != nil { - return nil, platform.ErrCorruptID(err) - } - return s.GetOrg(ctx, tx, id) -} - -func (s *Store) ListOrgs(ctx context.Context, tx kv.Tx, opt ...influxdb.FindOptions) ([]*influxdb.Organization, error) { - // if we dont have any options it would be irresponsible to just give back all orgs in the system - if len(opt) == 0 { - opt = append(opt, influxdb.FindOptions{}) - } - o := opt[0] - - b, err := tx.Bucket(organizationBucket) - if err != nil { - return nil, err - } - - cursor, err := b.ForwardCursor(nil) - if err != nil { - return nil, err - } - defer cursor.Close() - - count := 0 - us := []*influxdb.Organization{} - for k, v := cursor.Next(); k != nil; k, v = cursor.Next() { - if o.Offset != 0 && count < o.Offset { - count++ - continue - } - u, err := unmarshalOrg(v) - if err != nil { - continue - } - - us = append(us, u) - - if o.Limit != 0 && len(us) >= o.Limit { - break - } - } - - return us, cursor.Err() -} - -func (s *Store) CreateOrg(ctx context.Context, tx kv.Tx, o *influxdb.Organization) (err error) { - // if ID is provided then ensure it is unique - // generate new bucket ID - o.ID, err = s.generateSafeID(ctx, tx, organizationBucket, s.OrgIDGen) - if err != nil { - return err - } - - encodedID, err := o.ID.Encode() - if err != nil { - return InvalidOrgIDError(err) - } - - if err := s.uniqueOrgName(ctx, tx, o.Name); err != nil { - return err - } - - o.SetCreatedAt(s.now()) - o.SetUpdatedAt(s.now()) - idx, err := tx.Bucket(organizationIndex) - if err != nil { - return err - } - - b, err := tx.Bucket(organizationBucket) - if err != nil { - return err - } - - v, err := marshalOrg(o) - if err != nil { - return err - } - - if err := idx.Put(organizationIndexKey(o.Name), encodedID); err != nil { - return ErrInternalServiceError(err) - } - - if err := b.Put(encodedID, v); err != nil { - return ErrInternalServiceError(err) - } - - return nil -} - -func (s *Store) UpdateOrg(ctx context.Context, tx kv.Tx, id platform.ID, upd influxdb.OrganizationUpdate) (*influxdb.Organization, error) { - encodedID, err := id.Encode() - if err != nil { - return nil, err - } - - u, err := s.GetOrg(ctx, tx, id) - if err != nil { - return nil, err - } - - u.SetUpdatedAt(s.now()) - if upd.Name != nil && u.Name != *upd.Name { - if err := s.uniqueOrgName(ctx, tx, *upd.Name); err != nil { - return nil, err - } - - idx, err := tx.Bucket(organizationIndex) - if err != nil { - return nil, err - } - - if err := idx.Delete(organizationIndexKey(u.Name)); err != nil { - return nil, ErrInternalServiceError(err) - } - - u.Name = *upd.Name - - if err := idx.Put(organizationIndexKey(*upd.Name), encodedID); err != nil { - return nil, ErrInternalServiceError(err) - } - } - - if upd.Description != nil { - u.Description = *upd.Description - } - - v, err := marshalOrg(u) - if err != nil { - return nil, err - } - - b, err := tx.Bucket(organizationBucket) - if err != nil { - return nil, err - } - if err := b.Put(encodedID, v); err != nil { - return nil, ErrInternalServiceError(err) - } - - return u, nil -} - -func (s *Store) DeleteOrg(ctx context.Context, tx kv.Tx, id platform.ID) error { - u, err := s.GetOrg(ctx, tx, id) - if err != nil { - return err - } - - encodedID, err := id.Encode() - if err != nil { - return InvalidOrgIDError(err) - } - - idx, err := tx.Bucket(organizationIndex) - if err != nil { - return err - } - - if err := idx.Delete([]byte(u.Name)); err != nil { - return ErrInternalServiceError(err) - } - - b, err := tx.Bucket(organizationBucket) - if err != nil { - return err - } - - if err := b.Delete(encodedID); err != nil { - return ErrInternalServiceError(err) - } - - return nil -} diff --git a/tenant/storage_org_test.go b/tenant/storage_org_test.go deleted file mode 100644 index aab8533637c..00000000000 --- a/tenant/storage_org_test.go +++ /dev/null @@ -1,271 +0,0 @@ -package tenant_test - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/tenant" - itesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// type Organization struct { -// ID ID `json:"id,omitempty"` -// Name string `json:"name"` -// Description string `json:"description"` -// CRUDLog -// } - -const ( - firstOrgID platform.ID = (iota + 1) - secondOrgID - thirdOrgID - fourthOrgID - fifthOrgID -) - -func TestOrg(t *testing.T) { - var ( - aTime = time.Date(2020, 7, 23, 10, 0, 0, 0, time.UTC) - testOrgs = func(count int, visit ...func(*influxdb.Organization)) (orgs []*influxdb.Organization) { - for i := 1; i <= count; i++ { - org := &influxdb.Organization{ - ID: platform.ID(i), - Name: fmt.Sprintf("org%d", i), - Description: "words", - } - - if len(visit) > 0 { - visit[0](org) - } - - orgs = append(orgs, org) - } - - return - } - - withCrudLog = func(o *influxdb.Organization) { - o.CRUDLog = influxdb.CRUDLog{ - CreatedAt: aTime, - UpdatedAt: aTime, - } - } - - simpleSetup = func(t *testing.T, store *tenant.Store, tx kv.Tx) { - store.OrgIDGen = mock.NewIncrementingIDGenerator(1) - for _, org := range testOrgs(10) { - require.NoError(t, store.CreateOrg(context.Background(), tx, org)) - } - } - - over20Setup = func(t *testing.T, store *tenant.Store, tx kv.Tx) { - store.OrgIDGen = mock.NewIncrementingIDGenerator(1) - for _, org := range testOrgs(25) { - require.NoError(t, store.CreateOrg(context.Background(), tx, org)) - } - } - ) - - st := []struct { - name string - setup func(*testing.T, *tenant.Store, kv.Tx) - update func(*testing.T, *tenant.Store, kv.Tx) - results func(*testing.T, *tenant.Store, kv.Tx) - }{ - { - name: "create", - setup: simpleSetup, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - orgs, err := store.ListOrgs(context.Background(), tx) - if err != nil { - t.Fatal(err) - } - - assert.Len(t, orgs, 10) - - expected := testOrgs(10, withCrudLog) - assert.Equal(t, expected, orgs) - }, - }, - { - name: "get", - setup: simpleSetup, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - org, err := store.GetOrg(context.Background(), tx, fifthOrgID) - if err != nil { - t.Fatal(err) - } - - expected := &influxdb.Organization{ - ID: fifthOrgID, - Name: "org5", - Description: "words", - CRUDLog: influxdb.CRUDLog{ - CreatedAt: org.CreatedAt, - UpdatedAt: org.UpdatedAt, - }, - } - require.Equal(t, expected, org) - - org, err = store.GetOrgByName(context.Background(), tx, "org5") - if err != nil { - t.Fatal(err) - } - require.Equal(t, expected, org) - - if _, err := store.GetOrg(context.Background(), tx, 500); err != tenant.ErrOrgNotFound { - t.Fatal("failed to get correct error when looking for invalid org by id") - } - - if _, err := store.GetOrgByName(context.Background(), tx, "notaorg"); err.Error() != tenant.OrgNotFoundByName("notaorg").Error() { - t.Fatal("failed to get correct error when looking for invalid org by name") - } - - }, - }, - { - name: "list", - setup: simpleSetup, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - orgs, err := store.ListOrgs(context.Background(), tx) - if err != nil { - t.Fatal(err) - } - - require.Len(t, orgs, 10) - - expected := testOrgs(10, withCrudLog) - require.Equal(t, expected, orgs) - orgs, err = store.ListOrgs(context.Background(), tx, influxdb.FindOptions{Limit: 4}) - require.NoError(t, err) - assert.Len(t, orgs, 4) - assert.Equal(t, expected[:4], orgs) - - orgs, err = store.ListOrgs(context.Background(), tx, influxdb.FindOptions{Offset: 3}) - require.NoError(t, err) - assert.Len(t, orgs, 7) - assert.Equal(t, expected[3:], orgs) - }, - }, - { - name: "listOver20", - setup: over20Setup, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - orgs, err := store.ListOrgs(context.Background(), tx) - if err != nil { - t.Fatal(err) - } - - require.Len(t, orgs, 25) - }, - }, - { - name: "update", - setup: simpleSetup, - update: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - org5 := "org5" - _, err := store.UpdateOrg(context.Background(), tx, thirdOrgID, influxdb.OrganizationUpdate{Name: &org5}) - if err.Error() != tenant.OrgAlreadyExistsError(org5).Error() { - t.Fatal("failed to error on duplicate orgname") - } - - org30 := "org30" - _, err = store.UpdateOrg(context.Background(), tx, thirdOrgID, influxdb.OrganizationUpdate{Name: &org30}) - require.NoError(t, err) - - description := "notWords" - _, err = store.UpdateOrg(context.Background(), tx, thirdOrgID, influxdb.OrganizationUpdate{Description: &description}) - require.NoError(t, err) - }, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - orgs, err := store.ListOrgs(context.Background(), tx) - require.NoError(t, err) - - assert.Len(t, orgs, 10) - - expected := testOrgs(10, withCrudLog) - expected[2].Name = "org30" - expected[2].Description = "notWords" - require.Equal(t, expected, orgs) - }, - }, - { - name: "delete", - setup: simpleSetup, - update: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - err := store.DeleteOrg(context.Background(), tx, firstOrgID) - require.NoError(t, err) - - err = store.DeleteOrg(context.Background(), tx, firstOrgID) - if err != tenant.ErrOrgNotFound { - t.Fatal("invalid error when deleting org that has already been deleted", err) - } - - err = store.DeleteOrg(context.Background(), tx, thirdOrgID) - require.NoError(t, err) - }, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - orgs, err := store.ListOrgs(context.Background(), tx) - require.NoError(t, err) - assert.Len(t, orgs, 8) - - all := testOrgs(10, withCrudLog) - // deleted first and third item - expected := append(all[1:2], all[3:]...) - require.Equal(t, expected, orgs) - }, - }, - } - for _, testScenario := range st { - t.Run(testScenario.name, func(t *testing.T) { - s := itesting.NewTestInmemStore(t) - ts := tenant.NewStore(s, tenant.WithNow(func() time.Time { - return aTime - })) - - // setup - if testScenario.setup != nil { - err := ts.Update(context.Background(), func(tx kv.Tx) error { - testScenario.setup(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - - // update - if testScenario.update != nil { - err := ts.Update(context.Background(), func(tx kv.Tx) error { - testScenario.update(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - - // results - if testScenario.results != nil { - err := ts.View(context.Background(), func(tx kv.Tx) error { - testScenario.results(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - }) - } -} diff --git a/tenant/storage_test.go b/tenant/storage_test.go deleted file mode 100644 index 9d3a426b261..00000000000 --- a/tenant/storage_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package tenant - -import "time" - -// WithNow is a test only option used to override the now time -// generating function -func WithNow(fn func() time.Time) StoreOption { - return func(s *Store) { - s.now = fn - } -} diff --git a/tenant/storage_urm.go b/tenant/storage_urm.go deleted file mode 100644 index 1d5195d7dc5..00000000000 --- a/tenant/storage_urm.go +++ /dev/null @@ -1,227 +0,0 @@ -package tenant - -import ( - "context" - "encoding/json" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" -) - -var urmBucket = []byte("userresourcemappingsv1") - -// NOTE(affo): On URM creation, we check that the user exists. -// We do not check that the resource it is pointing to exists. -// This decision takes into account that different resources could not be in the same store. -// To perform that kind of check, we must rely on the service layer. -// However, we do not want having the storage layer depend on the service layer above. -func (s *Store) CreateURM(ctx context.Context, tx kv.Tx, urm *influxdb.UserResourceMapping) error { - if _, err := s.GetUser(ctx, tx, urm.UserID); err != nil { - return err - } - if err := s.uniqueUserResourceMapping(ctx, tx, urm); err != nil { - return err - } - - v, err := json.Marshal(urm) - if err != nil { - return ErrUnprocessableMapping(err) - } - - key, err := userResourceKey(urm.ResourceID, urm.UserID) - if err != nil { - return err - } - - b, err := tx.Bucket(urmBucket) - if err != nil { - return UnavailableURMServiceError(err) - } - - if err := b.Put(key, v); err != nil { - return UnavailableURMServiceError(err) - } - - // insert urm into by user index - userID, err := urm.UserID.Encode() - if err != nil { - return err - } - if err := s.urmByUserIndex.Insert(tx, userID, key); err != nil { - return err - } - - return nil -} - -func (s *Store) ListURMs(ctx context.Context, tx kv.Tx, filter influxdb.UserResourceMappingFilter, opt ...influxdb.FindOptions) ([]*influxdb.UserResourceMapping, error) { - ms := []*influxdb.UserResourceMapping{} - - b, err := tx.Bucket(urmBucket) - if err != nil { - return nil, UnavailableURMServiceError(err) - } - - filterFn := func(m *influxdb.UserResourceMapping) bool { - return (!filter.UserID.Valid() || (filter.UserID == m.UserID)) && - (!filter.ResourceID.Valid() || (filter.ResourceID == m.ResourceID)) && - (filter.UserType == "" || (filter.UserType == m.UserType)) && - (filter.ResourceType == "" || (filter.ResourceType == m.ResourceType)) - } - - if filter.UserID.Valid() { - var ( - // urm by user index lookup - userID, _ = filter.UserID.Encode() - seen int - ) - - err := s.urmByUserIndex.Walk(ctx, tx, userID, func(k, v []byte) (bool, error) { - m := &influxdb.UserResourceMapping{} - if err := json.Unmarshal(v, m); err != nil { - return false, CorruptURMError(err) - } - - // respect offset parameter - reachedOffset := (len(opt) == 0 || seen >= opt[0].Offset) - if reachedOffset && filterFn(m) { - ms = append(ms, m) - } - - seen++ - - return (len(opt) == 0 || opt[0].Limit <= 0 || len(ms) < opt[0].Limit), nil - }) - - return ms, err - } - - // for now the best we can do is use the resourceID if we have that as a forward cursor option - var prefix []byte - var cursorOptions []kv.CursorOption - - if filter.ResourceID.Valid() { - p, err := userResourcePrefixKey(filter.ResourceID) - if err != nil { - return nil, err - } - prefix = p - cursorOptions = append(cursorOptions, kv.WithCursorPrefix(p)) - } - cur, err := b.ForwardCursor(prefix, cursorOptions...) - if err != nil { - return nil, err - } - defer cur.Close() - - for k, v := cur.Next(); k != nil; k, v = cur.Next() { - m := &influxdb.UserResourceMapping{} - if err := json.Unmarshal(v, m); err != nil { - return nil, CorruptURMError(err) - } - - // check to see if it matches the filter - if filterFn(m) { - ms = append(ms, m) - } - - if len(opt) > 0 && opt[0].Limit > 0 && len(ms) >= opt[0].Limit { - break - } - } - - return ms, cur.Err() -} - -func (s *Store) GetURM(ctx context.Context, tx kv.Tx, resourceID, userID platform.ID) (*influxdb.UserResourceMapping, error) { - key, err := userResourceKey(resourceID, userID) - if err != nil { - return nil, err - } - - b, err := tx.Bucket(urmBucket) - if err != nil { - return nil, UnavailableURMServiceError(err) - } - - val, err := b.Get(key) - if err != nil { - return nil, err - } - - m := &influxdb.UserResourceMapping{} - if err := json.Unmarshal(val, m); err != nil { - return nil, CorruptURMError(err) - } - return m, nil -} - -func (s *Store) DeleteURM(ctx context.Context, tx kv.Tx, resourceID, userID platform.ID) error { - key, err := userResourceKey(resourceID, userID) - if err != nil { - return err - } - - b, err := tx.Bucket(urmBucket) - if err != nil { - return err - } - - // remove user resource mapping from by user index - uid, err := userID.Encode() - if err != nil { - return err - } - - if err := s.urmByUserIndex.Delete(tx, uid, key); err != nil { - return err - } - - return b.Delete(key) -} - -func userResourcePrefixKey(resourceID platform.ID) ([]byte, error) { - encodedResourceID, err := resourceID.Encode() - if err != nil { - return nil, ErrInvalidURMID - } - return encodedResourceID, nil -} - -func userResourceKey(resourceID, userID platform.ID) ([]byte, error) { - encodedResourceID, err := resourceID.Encode() - if err != nil { - return nil, ErrInvalidURMID - } - - encodedUserID, err := userID.Encode() - if err != nil { - return nil, ErrInvalidURMID - } - - key := make([]byte, len(encodedResourceID)+len(encodedUserID)) - copy(key, encodedResourceID) - copy(key[len(encodedResourceID):], encodedUserID) - - return key, nil -} - -func (s *Store) uniqueUserResourceMapping(ctx context.Context, tx kv.Tx, m *influxdb.UserResourceMapping) error { - key, err := userResourceKey(m.ResourceID, m.UserID) - if err != nil { - return err - } - - b, err := tx.Bucket(urmBucket) - if err != nil { - return UnavailableURMServiceError(err) - } - - _, err = b.Get(key) - if !kv.IsNotFound(err) { - return NonUniqueMappingError(m.UserID) - } - - return nil -} diff --git a/tenant/storage_urm_test.go b/tenant/storage_urm_test.go deleted file mode 100644 index 360b82bf556..00000000000 --- a/tenant/storage_urm_test.go +++ /dev/null @@ -1,377 +0,0 @@ -package tenant_test - -import ( - "context" - "fmt" - "reflect" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/tenant" - itesting "github.com/influxdata/influxdb/v2/testing" -) - -func TestURM(t *testing.T) { - simpleSetup := func(t *testing.T, store *tenant.Store, tx kv.Tx) { - for i := 1; i <= 10; i++ { - // User must exist to create urm. - uid := platform.ID(i + 1) - err := store.CreateUser(context.Background(), tx, &influxdb.User{ - ID: uid, - Name: fmt.Sprintf("user%d", i), - }) - if err != nil { - t.Fatal(err) - } - err = store.CreateURM(context.Background(), tx, &influxdb.UserResourceMapping{ - UserID: uid, - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.OrgsResourceType, - ResourceID: platform.ID(i%2 + 1), - }) - if err != nil { - t.Fatal(err) - } - } - } - - st := []struct { - name string - setup func(*testing.T, *tenant.Store, kv.Tx) - update func(*testing.T, *tenant.Store, kv.Tx) - results func(*testing.T, *tenant.Store, kv.Tx) - }{ - { - name: "create", - setup: simpleSetup, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - urms, err := store.ListURMs(context.Background(), tx, influxdb.UserResourceMappingFilter{}) - if err != nil { - t.Fatal(err) - } - - if len(urms) != 10 { - t.Fatalf("ten records are created and we received %d", len(urms)) - } - var expected []*influxdb.UserResourceMapping - for i := 1; i <= 10; i++ { - expected = append(expected, &influxdb.UserResourceMapping{ - UserID: platform.ID(i + 1), - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.OrgsResourceType, - ResourceID: platform.ID(i%2 + 1), - }) - } - sort.Slice(expected, func(i, j int) bool { - irid, _ := expected[i].ResourceID.Encode() - iuid, _ := expected[i].UserID.Encode() - jrid, _ := expected[j].ResourceID.Encode() - juid, _ := expected[j].UserID.Encode() - return string(irid)+string(iuid) < string(jrid)+string(juid) - }) - - if !reflect.DeepEqual(urms, expected) { - t.Fatalf("expected identical urms: \n%s", cmp.Diff(urms, expected)) - } - }, - }, - { - name: "create - user not found", - setup: simpleSetup, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - users, err := store.ListUsers(context.Background(), tx) - if err != nil { - t.Fatal(err) - } - - maxID := platform.ID(0) - for _, u := range users { - if u.ID > maxID { - maxID = u.ID - } - } - - err = store.CreateURM(context.Background(), tx, &influxdb.UserResourceMapping{ - UserID: maxID + 1, - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.OrgsResourceType, - ResourceID: platform.ID(1), - }) - if err == nil { - t.Fatal("expected error got none") - } else if errors.ErrorCode(err) != errors.ENotFound { - t.Fatalf("expected not found error got: %v", err) - } - }, - }, - { - name: "get", - setup: simpleSetup, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - _, err := store.GetURM(context.Background(), tx, 1, 2) - if err != kv.ErrKeyNotFound { - t.Fatal("failed to not find urm") - } - - urm, err := store.GetURM(context.Background(), tx, 2, 2) - if err != nil { - t.Fatal(err) - } - expected := &influxdb.UserResourceMapping{ - UserID: 2, - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.OrgsResourceType, - ResourceID: 2, - } - - if !reflect.DeepEqual(urm, expected) { - t.Fatalf("expected identical urm: \n%s", cmp.Diff(urm, expected)) - } - }, - }, - { - name: "list", - setup: simpleSetup, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - urms, err := store.ListURMs(context.Background(), tx, influxdb.UserResourceMappingFilter{}) - if err != nil { - t.Fatal(err) - } - - if len(urms) != 10 { - t.Fatalf("ten records are created and we received %d", len(urms)) - } - var expected []*influxdb.UserResourceMapping - for i := 1; i <= 10; i++ { - expected = append(expected, &influxdb.UserResourceMapping{ - UserID: platform.ID(i + 1), - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.OrgsResourceType, - ResourceID: platform.ID(i%2 + 1), - }) - } - sort.Slice(expected, func(i, j int) bool { - irid, _ := expected[i].ResourceID.Encode() - iuid, _ := expected[i].UserID.Encode() - jrid, _ := expected[j].ResourceID.Encode() - juid, _ := expected[j].UserID.Encode() - return string(irid)+string(iuid) < string(jrid)+string(juid) - }) - - if !reflect.DeepEqual(urms, expected) { - t.Fatalf("expected identical urms: \n%s", cmp.Diff(urms, expected)) - } - - urms, err = store.ListURMs(context.Background(), tx, influxdb.UserResourceMappingFilter{ResourceID: platform.ID(1)}) - if err != nil { - t.Fatal(err) - } - - if len(urms) != 5 { - t.Fatalf("expected 5 urms got %d", len(urms)) - } - - if !reflect.DeepEqual(urms, expected[:5]) { - t.Fatalf("expected subset of urms urms: \n%s", cmp.Diff(urms, expected[:5])) - } - - }, - }, - { - name: "list by user with limit", - setup: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - uid := platform.ID(1) - err := store.CreateUser(context.Background(), tx, &influxdb.User{ - ID: uid, - Name: "user", - }) - if err != nil { - t.Fatal(err) - } - for i := 1; i <= 25; i++ { - // User must exist to create urm. - err = store.CreateURM(context.Background(), tx, &influxdb.UserResourceMapping{ - UserID: uid, - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.OrgsResourceType, - ResourceID: platform.ID(i + 1), - }) - if err != nil { - t.Fatal(err) - } - } - }, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - urms, err := store.ListURMs(context.Background(), tx, influxdb.UserResourceMappingFilter{UserID: platform.ID(1)}, influxdb.FindOptions{Limit: 10}) - if err != nil { - t.Fatal(err) - } - - if len(urms) != 10 { - t.Fatalf("when setting the limit to 10 we got: %d", len(urms)) - } - var expected []*influxdb.UserResourceMapping - for i := 1; i <= 10; i++ { - expected = append(expected, &influxdb.UserResourceMapping{ - UserID: platform.ID(1), - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.OrgsResourceType, - ResourceID: platform.ID(i + 1), - }) - } - sort.Slice(expected, func(i, j int) bool { - irid, _ := expected[i].ResourceID.Encode() - iuid, _ := expected[i].UserID.Encode() - jrid, _ := expected[j].ResourceID.Encode() - juid, _ := expected[j].UserID.Encode() - return string(irid)+string(iuid) < string(jrid)+string(juid) - }) - - if !reflect.DeepEqual(urms, expected) { - t.Fatalf("expected identical urms: \n%s", cmp.Diff(urms, expected)) - } - }, - }, - { - name: "list by user with limit and offset", - setup: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - uid := platform.ID(1) - err := store.CreateUser(context.Background(), tx, &influxdb.User{ - ID: uid, - Name: "user", - }) - if err != nil { - t.Fatal(err) - } - for i := 1; i <= 25; i++ { - // User must exist to create urm. - err = store.CreateURM(context.Background(), tx, &influxdb.UserResourceMapping{ - UserID: uid, - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.OrgsResourceType, - ResourceID: platform.ID(i + 1), - }) - if err != nil { - t.Fatal(err) - } - } - }, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - urms, err := store.ListURMs( - context.Background(), - tx, - influxdb.UserResourceMappingFilter{ - UserID: platform.ID(1)}, - influxdb.FindOptions{ - Offset: 10, - Limit: 10, - }, - ) - if err != nil { - t.Fatal(err) - } - - if len(urms) != 10 { - t.Fatalf("when setting the limit to 10 we got: %d", len(urms)) - } - var expected []*influxdb.UserResourceMapping - for i := 11; i <= 20; i++ { - expected = append(expected, &influxdb.UserResourceMapping{ - UserID: platform.ID(1), - UserType: influxdb.Owner, - MappingType: influxdb.UserMappingType, - ResourceType: influxdb.OrgsResourceType, - ResourceID: platform.ID(i + 1), - }) - } - sort.Slice(expected, func(i, j int) bool { - irid, _ := expected[i].ResourceID.Encode() - iuid, _ := expected[i].UserID.Encode() - jrid, _ := expected[j].ResourceID.Encode() - juid, _ := expected[j].UserID.Encode() - return string(irid)+string(iuid) < string(jrid)+string(juid) - }) - - if !reflect.DeepEqual(urms, expected) { - t.Fatalf("expected identical urms: \n%s", cmp.Diff(urms, expected)) - } - }, - }, - { - name: "delete", - setup: simpleSetup, - update: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - err := store.DeleteURM(context.Background(), tx, 23, 21) - if err != nil { - t.Fatal(err) - } - - err = store.DeleteURM(context.Background(), tx, 2, 2) - if err != nil { - t.Fatal(err) - } - }, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - _, err := store.GetURM(context.Background(), tx, 2, 2) - if err != kv.ErrKeyNotFound { - t.Fatal("failed to erro when getting a deleted URM") - } - }, - }, - } - for _, testScenario := range st { - t.Run(testScenario.name, func(t *testing.T) { - s := itesting.NewTestInmemStore(t) - ts := tenant.NewStore(s) - - // setup - if testScenario.setup != nil { - err := ts.Update(context.Background(), func(tx kv.Tx) error { - testScenario.setup(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - - // update - if testScenario.update != nil { - err := ts.Update(context.Background(), func(tx kv.Tx) error { - testScenario.update(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - - // results - if testScenario.results != nil { - err := ts.View(context.Background(), func(tx kv.Tx) error { - testScenario.results(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - }) - } -} diff --git a/tenant/storage_user.go b/tenant/storage_user.go deleted file mode 100644 index c937bdcb549..00000000000 --- a/tenant/storage_user.go +++ /dev/null @@ -1,370 +0,0 @@ -package tenant - -import ( - "context" - "encoding/json" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" -) - -var ( - userBucket = []byte("usersv1") - userIndex = []byte("userindexv1") - - userpasswordBucket = []byte("userspasswordv1") -) - -func unmarshalUser(v []byte) (*influxdb.User, error) { - u := &influxdb.User{} - if err := json.Unmarshal(v, u); err != nil { - return nil, ErrCorruptUser(err) - } - - return u, nil -} - -func marshalUser(u *influxdb.User) ([]byte, error) { - v, err := json.Marshal(u) - if err != nil { - return nil, ErrUnprocessableUser(err) - } - - return v, nil -} - -func (s *Store) uniqueUserName(tx kv.Tx, uname string) error { - - idx, err := tx.Bucket(userIndex) - if err != nil { - return err - } - - _, err = idx.Get([]byte(uname)) - // if not found then this is _unique_. - if kv.IsNotFound(err) { - return nil - } - - // no error means this is not unique - if err == nil { - return UserAlreadyExistsError(uname) - } - - // any other error is some sort of internal server error - return ErrUnprocessableUser(err) -} - -func (s *Store) uniqueUserID(tx kv.Tx, id platform.ID) error { - encodedID, _ := id.Encode() - - b, err := tx.Bucket(userBucket) - if err != nil { - return err - } - - _, err = b.Get(encodedID) - if kv.IsNotFound(err) { - return nil - } - - if err == nil { - return UserIDAlreadyExistsError(id.String()) - } - - return ErrUnprocessableUser(err) -} - -func (s *Store) GetUser(ctx context.Context, tx kv.Tx, id platform.ID) (*influxdb.User, error) { - encodedID, err := id.Encode() - if err != nil { - return nil, InvalidUserIDError(err) - } - - b, err := tx.Bucket(userBucket) - if err != nil { - return nil, err - } - - v, err := b.Get(encodedID) - if kv.IsNotFound(err) { - return nil, ErrUserNotFound - } - - if err != nil { - return nil, ErrInternalServiceError(err) - } - - return unmarshalUser(v) -} - -func (s *Store) GetUserByName(ctx context.Context, tx kv.Tx, n string) (*influxdb.User, error) { - b, err := tx.Bucket(userIndex) - if err != nil { - return nil, err - } - - uid, err := b.Get([]byte(n)) - if err == kv.ErrKeyNotFound { - return nil, ErrUserNotFound - } - - if err != nil { - return nil, ErrInternalServiceError(err) - } - - var id platform.ID - if err := id.Decode(uid); err != nil { - return nil, platform.ErrCorruptID(err) - } - return s.GetUser(ctx, tx, id) -} - -func (s *Store) ListUsers(ctx context.Context, tx kv.Tx, opt ...influxdb.FindOptions) ([]*influxdb.User, error) { - if len(opt) == 0 { - opt = append(opt, influxdb.FindOptions{}) - } - o := opt[0] - - b, err := tx.Bucket(userBucket) - if err != nil { - return nil, err - } - - var opts []kv.CursorOption - if o.Descending { - opts = append(opts, kv.WithCursorDirection(kv.CursorDescending)) - } - - var seek []byte - if o.After != nil { - after := (*o.After) + 1 - seek, err = after.Encode() - if err != nil { - return nil, err - } - } - - cursor, err := b.ForwardCursor(seek, opts...) - if err != nil { - return nil, err - } - defer cursor.Close() - - count := 0 - us := []*influxdb.User{} - for k, v := cursor.Next(); k != nil; k, v = cursor.Next() { - if o.Offset != 0 && count < o.Offset { - count++ - continue - } - u, err := unmarshalUser(v) - if err != nil { - continue - } - - us = append(us, u) - - if o.Limit != 0 && len(us) >= o.Limit { - break - } - } - - return us, cursor.Err() -} - -func (s *Store) CreateUser(ctx context.Context, tx kv.Tx, u *influxdb.User) error { - if !u.ID.Valid() { - u.ID = s.IDGen.ID() - } - - encodedID, err := u.ID.Encode() - if err != nil { - return InvalidUserIDError(err) - } - - // Verify that both the provided username and user ID are not already in-use - if err := s.uniqueUserName(tx, u.Name); err != nil { - return err - } - if err := s.uniqueUserID(tx, u.ID); err != nil { - return err - } - - idx, err := tx.Bucket(userIndex) - if err != nil { - return err - } - - b, err := tx.Bucket(userBucket) - if err != nil { - return err - } - - v, err := marshalUser(u) - if err != nil { - return err - } - - if err := idx.Put([]byte(u.Name), encodedID); err != nil { - return ErrInternalServiceError(err) - } - - if err := b.Put(encodedID, v); err != nil { - return ErrInternalServiceError(err) - } - - return nil -} - -func (s *Store) UpdateUser(ctx context.Context, tx kv.Tx, id platform.ID, upd influxdb.UserUpdate) (*influxdb.User, error) { - encodedID, err := id.Encode() - if err != nil { - return nil, err - } - - u, err := s.GetUser(ctx, tx, id) - if err != nil { - return nil, err - } - - if upd.Name != nil && *upd.Name != u.Name { - if err := s.uniqueUserName(tx, *upd.Name); err != nil { - return nil, err - } - - idx, err := tx.Bucket(userIndex) - if err != nil { - return nil, err - } - - if err := idx.Delete([]byte(u.Name)); err != nil { - return nil, ErrInternalServiceError(err) - } - - u.Name = *upd.Name - - if err := idx.Put([]byte(u.Name), encodedID); err != nil { - return nil, ErrInternalServiceError(err) - } - } - - if upd.Status != nil { - u.Status = *upd.Status - } - - v, err := marshalUser(u) - if err != nil { - return nil, err - } - - b, err := tx.Bucket(userBucket) - if err != nil { - return nil, err - } - if err := b.Put(encodedID, v); err != nil { - return nil, ErrInternalServiceError(err) - } - - return u, nil -} - -func (s *Store) DeleteUser(ctx context.Context, tx kv.Tx, id platform.ID) error { - u, err := s.GetUser(ctx, tx, id) - if err != nil { - return err - } - - encodedID, err := id.Encode() - if err != nil { - return InvalidUserIDError(err) - } - - idx, err := tx.Bucket(userIndex) - if err != nil { - return err - } - - if err := idx.Delete([]byte(u.Name)); err != nil { - return ErrInternalServiceError(err) - } - - b, err := tx.Bucket(userBucket) - if err != nil { - return err - } - - if err := b.Delete(encodedID); err != nil { - return ErrInternalServiceError(err) - } - - // Clean up users password. - ub, err := tx.Bucket(userpasswordBucket) - if err != nil { - return UnavailablePasswordServiceError(err) - } - if err := ub.Delete(encodedID); err != nil { - return err - } - - // Clean up user URMs. - urms, err := s.ListURMs(ctx, tx, influxdb.UserResourceMappingFilter{UserID: id}) - if err != nil { - return err - } - // Do not fail fast on error. - // Try to avoid as much as possible the effects of partial deletion. - aggErr := NewAggregateError() - for _, urm := range urms { - if err := s.DeleteURM(ctx, tx, urm.ResourceID, urm.UserID); err != nil { - aggErr.Add(err) - } - } - return aggErr.Err() -} - -func (s *Store) GetPassword(ctx context.Context, tx kv.Tx, id platform.ID) (string, error) { - encodedID, err := id.Encode() - if err != nil { - return "", InvalidUserIDError(err) - } - - b, err := tx.Bucket(userpasswordBucket) - if err != nil { - return "", UnavailablePasswordServiceError(err) - } - - passwd, err := b.Get(encodedID) - - return string(passwd), err -} - -func (s *Store) SetPassword(ctx context.Context, tx kv.Tx, id platform.ID, password string) error { - encodedID, err := id.Encode() - if err != nil { - return InvalidUserIDError(err) - } - - b, err := tx.Bucket(userpasswordBucket) - if err != nil { - return UnavailablePasswordServiceError(err) - } - - return b.Put(encodedID, []byte(password)) -} - -func (s *Store) DeletePassword(ctx context.Context, tx kv.Tx, id platform.ID) error { - encodedID, err := id.Encode() - if err != nil { - return InvalidUserIDError(err) - } - - b, err := tx.Bucket(userpasswordBucket) - if err != nil { - return UnavailablePasswordServiceError(err) - } - - return b.Delete(encodedID) - -} diff --git a/tenant/storage_user_test.go b/tenant/storage_user_test.go deleted file mode 100644 index a7e35d6de55..00000000000 --- a/tenant/storage_user_test.go +++ /dev/null @@ -1,331 +0,0 @@ -package tenant_test - -import ( - "context" - "fmt" - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/tenant" - itesting "github.com/influxdata/influxdb/v2/testing" -) - -func TestUser(t *testing.T) { - simpleSetup := func(t *testing.T, store *tenant.Store, tx kv.Tx) { - for i := 1; i <= 10; i++ { - err := store.CreateUser(context.Background(), tx, &influxdb.User{ - ID: platform.ID(i), - Name: fmt.Sprintf("user%d", i), - Status: "active", - }) - if err != nil { - t.Fatal(err) - } - } - } - - over20Setup := func(t *testing.T, store *tenant.Store, tx kv.Tx) { - for i := 1; i <= 22; i++ { - err := store.CreateUser(context.Background(), tx, &influxdb.User{ - ID: platform.ID(i), - Name: fmt.Sprintf("user%d", i), - Status: "active", - }) - if err != nil { - t.Fatal(err) - } - } - } - - st := []struct { - name string - setup func(*testing.T, *tenant.Store, kv.Tx) - update func(*testing.T, *tenant.Store, kv.Tx) - results func(*testing.T, *tenant.Store, kv.Tx) - }{ - { - name: "create", - setup: simpleSetup, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - users, err := store.ListUsers(context.Background(), tx) - if err != nil { - t.Fatal(err) - } - - if len(users) != 10 { - t.Fatalf("expected 10 users got: %d", len(users)) - } - - expected := []*influxdb.User{} - for i := 1; i <= 10; i++ { - expected = append(expected, &influxdb.User{ - ID: platform.ID(i), - Name: fmt.Sprintf("user%d", i), - Status: "active", - }) - } - if !reflect.DeepEqual(users, expected) { - t.Fatalf("expected identical users: \n%+v\n%+v", users, expected) - } - - // Test that identical name causes an error - err = store.CreateUser(context.Background(), tx, &influxdb.User{ - ID: platform.ID(11), // Unique ID - Name: "user1", // Non-unique name - }) - if err == nil { - t.Fatal("expected error on creating user with identical username") - } - - // Test that identical ID causes an error - err = store.CreateUser(context.Background(), tx, &influxdb.User{ - ID: platform.ID(1), // Non-unique ID - Name: "user11", // Unique name - }) - if err == nil { - t.Fatal("expected error on creating user with identical ID") - } - }, - }, - { - name: "get", - setup: simpleSetup, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - user, err := store.GetUser(context.Background(), tx, 5) - if err != nil { - t.Fatal(err) - } - - expected := &influxdb.User{ - ID: 5, - Name: "user5", - Status: "active", - } - - if !reflect.DeepEqual(user, expected) { - t.Fatalf("expected identical user: \n%+v\n%+v", user, expected) - } - - user, err = store.GetUserByName(context.Background(), tx, "user5") - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(user, expected) { - t.Fatalf("expected identical user: \n%+v\n%+v", user, expected) - } - - if _, err := store.GetUser(context.Background(), tx, 500); err != tenant.ErrUserNotFound { - t.Fatal("failed to get correct error when looking for invalid user by id") - } - - if _, err := store.GetUserByName(context.Background(), tx, "notauser"); err != tenant.ErrUserNotFound { - t.Fatal("failed to get correct error when looking for invalid user by name") - } - - }, - }, - { - name: "list", - setup: simpleSetup, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - users, err := store.ListUsers(context.Background(), tx) - if err != nil { - t.Fatal(err) - } - - if len(users) != 10 { - t.Fatalf("expected 10 users got: %d", len(users)) - } - - expected := []*influxdb.User{} - for i := 1; i <= 10; i++ { - expected = append(expected, &influxdb.User{ - ID: platform.ID(i), - Name: fmt.Sprintf("user%d", i), - Status: "active", - }) - } - if !reflect.DeepEqual(users, expected) { - t.Fatalf("expected identical users: \n%+v\n%+v", users, expected) - } - - users, err = store.ListUsers(context.Background(), tx, influxdb.FindOptions{Limit: 4}) - if err != nil { - t.Fatal(err) - } - - if len(users) != 4 { - t.Fatalf("expected 4 users got: %d", len(users)) - } - if !reflect.DeepEqual(users, expected[:4]) { - t.Fatalf("expected identical users with limit: \n%+v\n%+v", users, expected[:4]) - } - - users, err = store.ListUsers(context.Background(), tx, influxdb.FindOptions{Offset: 3}) - if err != nil { - t.Fatal(err) - } - - if len(users) != 7 { - t.Fatalf("expected 7 users got: %d", len(users)) - } - if !reflect.DeepEqual(users, expected[3:]) { - t.Fatalf("expected identical users with limit: \n%+v\n%+v", users, expected[3:]) - } - }, - }, - { - name: "listOver20", - setup: over20Setup, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - users, err := store.ListUsers(context.Background(), tx) - if err != nil { - t.Fatal(err) - } - - if len(users) != 22 { - t.Fatalf("expected 10 users got: %d", len(users)) - } - }, - }, - { - name: "update", - setup: simpleSetup, - update: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - user5 := "user5" - _, err := store.UpdateUser(context.Background(), tx, platform.ID(3), influxdb.UserUpdate{Name: &user5}) - if err.Error() != tenant.UserAlreadyExistsError(user5).Error() { - t.Fatal("failed to error on duplicate username") - } - - user30 := "user30" - _, err = store.UpdateUser(context.Background(), tx, platform.ID(3), influxdb.UserUpdate{Name: &user30}) - if err != nil { - t.Fatal(err) - } - - inactive := influxdb.Status("inactive") - _, err = store.UpdateUser(context.Background(), tx, platform.ID(3), influxdb.UserUpdate{Status: &inactive}) - if err != nil { - t.Fatal(err) - } - }, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - users, err := store.ListUsers(context.Background(), tx) - if err != nil { - t.Fatal(err) - } - - if len(users) != 10 { - t.Fatalf("expected 10 users got: %d", len(users)) - } - - expected := []*influxdb.User{} - for i := 1; i <= 10; i++ { - expected = append(expected, &influxdb.User{ - ID: platform.ID(i), - Name: fmt.Sprintf("user%d", i), - Status: "active", - }) - } - expected[2].Name = "user30" - expected[2].Status = "inactive" - if !reflect.DeepEqual(users, expected) { - t.Fatalf("expected identical users: \n%+v\n%+v", users, expected) - } - }, - }, - { - name: "delete", - setup: simpleSetup, - update: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - err := store.DeleteUser(context.Background(), tx, 1) - if err != nil { - t.Fatal(err) - } - - err = store.DeleteUser(context.Background(), tx, 1) - if err != tenant.ErrUserNotFound { - t.Fatal("invalid error when deleting user that has already been deleted", err) - } - - err = store.DeleteUser(context.Background(), tx, 3) - if err != nil { - t.Fatal(err) - } - - }, - results: func(t *testing.T, store *tenant.Store, tx kv.Tx) { - users, err := store.ListUsers(context.Background(), tx) - if err != nil { - t.Fatal(err) - } - - if len(users) != 8 { - t.Fatalf("expected 10 users got: %d", len(users)) - } - - expected := []*influxdb.User{} - for i := 1; i <= 10; i++ { - if i != 1 && i != 3 { - expected = append(expected, &influxdb.User{ - ID: platform.ID(i), - Name: fmt.Sprintf("user%d", i), - Status: "active", - }) - } - } - - if !reflect.DeepEqual(users, expected) { - t.Fatalf("expected identical users: \n%+v\n%+v", users, expected) - } - }, - }, - } - for _, testScenario := range st { - t.Run(testScenario.name, func(t *testing.T) { - s := itesting.NewTestInmemStore(t) - ts := tenant.NewStore(s) - - // setup - if testScenario.setup != nil { - err := ts.Update(context.Background(), func(tx kv.Tx) error { - testScenario.setup(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - - // update - if testScenario.update != nil { - err := ts.Update(context.Background(), func(tx kv.Tx) error { - testScenario.update(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - - // results - if testScenario.results != nil { - err := ts.View(context.Background(), func(tx kv.Tx) error { - testScenario.results(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - }) - } -} diff --git a/test.flux b/test.flux deleted file mode 100644 index 731a7c2a340..00000000000 --- a/test.flux +++ /dev/null @@ -1,8 +0,0 @@ -import "array" -import "profiler" -import "internal/gen" -import "runtime" - -option profiler.enabledProfilers = ["operator"] - -array.from(rows: [{version: runtime.version()}]) diff --git a/testing/auth.go b/testing/auth.go deleted file mode 100644 index 35e1aec7fb8..00000000000 --- a/testing/auth.go +++ /dev/null @@ -1,1363 +0,0 @@ -package testing - -import ( - "bytes" - "context" - "sort" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" -) - -const ( - authZeroID = "020f755c3c081000" - authOneID = "020f755c3c082000" - authTwoID = "020f755c3c082001" - authThreeID = "020f755c3c082002" -) - -var authorizationCmpOptions = cmp.Options{ - cmpopts.EquateEmpty(), - cmpopts.IgnoreFields(influxdb.Authorization{}, "ID", "Token", "CreatedAt", "UpdatedAt"), - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.Authorization) []*influxdb.Authorization { - out := append([]*influxdb.Authorization(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -type AuthTestOpts struct { - WithoutFindByToken bool -} - -// WithoutFindByToken allows the Find By Token test case to be skipped when we are testing the http server, -// since finding by token is not supported by the HTTP API -func WithoutFindByToken() AuthTestOpts { - return AuthTestOpts{ - WithoutFindByToken: true, - } -} - -// AuthorizationFields will include the IDGenerator, and authorizations -type AuthorizationFields struct { - IDGenerator platform.IDGenerator - OrgIDGenerator platform.IDGenerator - TokenGenerator influxdb.TokenGenerator - TimeGenerator influxdb.TimeGenerator - Authorizations []*influxdb.Authorization - Users []*influxdb.User - Orgs []*influxdb.Organization -} - -// AuthorizationService tests all the service functions. -func AuthorizationService( - init func(AuthorizationFields, *testing.T) (influxdb.AuthorizationService, string, func()), - t *testing.T, - opts ...AuthTestOpts) { - tests := []struct { - name string - fn func(init func(AuthorizationFields, *testing.T) (influxdb.AuthorizationService, string, func()), - t *testing.T) - }{ - { - name: "CreateAuthorization", - fn: CreateAuthorization, - }, - { - name: "FindAuthorizationByID", - fn: FindAuthorizationByID, - }, - { - name: "FindAuthorizationByToken", - fn: FindAuthorizationByToken, - }, - { - name: "UpdateAuthorization", - fn: UpdateAuthorization, - }, - { - name: "FindAuthorizations", - fn: FindAuthorizations, - }, - { - name: "DeleteAuthorization", - fn: DeleteAuthorization, - }, - } - for _, tt := range tests { - if tt.name == "FindAuthorizationByToken" && len(opts) > 0 && opts[0].WithoutFindByToken { - continue - } - t.Run(tt.name, func(t *testing.T) { - tt := tt - t.Parallel() - tt.fn(init, t) - }) - } -} - -// CreateAuthorization testing -func CreateAuthorization( - init func(AuthorizationFields, *testing.T) (influxdb.AuthorizationService, string, func()), - t *testing.T, -) { - type args struct { - authorization *influxdb.Authorization - } - type wants struct { - err error - authorizations []*influxdb.Authorization - } - - tests := []struct { - name string - fields AuthorizationFields - args args - wants wants - }{ - { - name: "basic create authorization", - fields: AuthorizationFields{ - OrgIDGenerator: mock.NewIncrementingIDGenerator(1), - IDGenerator: mock.NewIDGenerator(authTwoID, t), - TimeGenerator: &mock.TimeGenerator{ - FakeValue: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), - }, - TokenGenerator: &mock.TokenGenerator{ - TokenFn: func() (string, error) { - return "rand", nil - }, - }, - Users: []*influxdb.User{ - { - Name: "cooluser", - ID: MustIDBase16(userOneID), - }, - }, - Orgs: []*influxdb.Organization{ - { - Name: "o1", - }, - }, - Authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Token: "supersecret", - Permissions: allUsersPermission(idOne), - Description: "already existing auth", - }, - }, - }, - args: args{ - authorization: &influxdb.Authorization{ - OrgID: idOne, - UserID: MustIDBase16(userOneID), - Permissions: createUsersPermission(idOne), - Description: "new auth", - }, - }, - wants: wants{ - authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Status: influxdb.Active, - Token: "supersecret", - Permissions: allUsersPermission(idOne), - Description: "already existing auth", - }, - { - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Token: "rand", - Status: influxdb.Active, - Permissions: createUsersPermission(idOne), - Description: "new auth", - CRUDLog: influxdb.CRUDLog{ - CreatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), - UpdatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), - }, - }, - }, - }, - }, - { - name: "providing a non existing user is invalid", - fields: AuthorizationFields{ - OrgIDGenerator: mock.NewIncrementingIDGenerator(1), - IDGenerator: mock.NewIDGenerator(authTwoID, t), - TimeGenerator: &mock.TimeGenerator{ - FakeValue: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), - }, - TokenGenerator: &mock.TokenGenerator{ - TokenFn: func() (string, error) { - return "rand", nil - }, - }, - Users: []*influxdb.User{ - { - Name: "cooluser", - ID: MustIDBase16(userOneID), - }, - }, - Orgs: []*influxdb.Organization{ - { - Name: "o1", - }, - }, - Authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Token: "supersecret", - Permissions: allUsersPermission(idOne), - Description: "already existing auth", - }, - }, - }, - args: args{ - authorization: &influxdb.Authorization{ - OrgID: idOne, - UserID: MustIDBase16(userTwoID), - Permissions: createUsersPermission(idOne), - Description: "auth with non-existent user", - }, - }, - wants: wants{ - authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Status: influxdb.Active, - Token: "supersecret", - Permissions: allUsersPermission(idOne), - Description: "already existing auth", - }, - }, - err: influxdb.ErrUnableToCreateToken, - }, - }, - { - name: "providing a non existing org is invalid", - fields: AuthorizationFields{ - OrgIDGenerator: mock.NewIncrementingIDGenerator(1), - IDGenerator: mock.NewIDGenerator(authTwoID, t), - TimeGenerator: &mock.TimeGenerator{ - FakeValue: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), - }, - TokenGenerator: &mock.TokenGenerator{ - TokenFn: func() (string, error) { - return "rand", nil - }, - }, - Users: []*influxdb.User{ - { - Name: "cooluser", - ID: MustIDBase16(userOneID), - }, - }, - Orgs: []*influxdb.Organization{ - { - Name: "o1", - }, - }, - Authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Token: "supersecret", - Permissions: allUsersPermission(idOne), - Description: "already existing auth", - }, - }, - }, - args: args{ - authorization: &influxdb.Authorization{ - OrgID: idTwo, - UserID: MustIDBase16(userOneID), - Permissions: createUsersPermission(idTwo), - Description: "auth with non-existent org", - }, - }, - wants: wants{ - authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Status: influxdb.Active, - Token: "supersecret", - Permissions: allUsersPermission(idOne), - Description: "already existing auth", - }, - }, - err: influxdb.ErrUnableToCreateToken, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.CreateAuthorization(ctx, tt.args.authorization) - if (err != nil) != (tt.wants.err != nil) { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - defer s.DeleteAuthorization(ctx, tt.args.authorization.ID) - - authorizations, _, err := s.FindAuthorizations(ctx, influxdb.AuthorizationFilter{}) - if err != nil { - t.Fatalf("failed to retrieve authorizations: %v", err) - } - if diff := cmp.Diff(authorizations, tt.wants.authorizations, authorizationCmpOptions...); diff != "" { - t.Errorf("authorizations are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindAuthorizationByID testing -func FindAuthorizationByID( - init func(AuthorizationFields, *testing.T) (influxdb.AuthorizationService, string, func()), - t *testing.T, -) { - type wants struct { - err error - authorizations []*influxdb.Authorization - } - - tests := []struct { - name string - fields AuthorizationFields - wants wants - }{ - { - name: "basic find authorization by id", - fields: AuthorizationFields{ - OrgIDGenerator: mock.NewIncrementingIDGenerator(1), - Users: []*influxdb.User{ - { - Name: "cooluser", - ID: MustIDBase16(userOneID), - }, - { - Name: "regularuser", - ID: MustIDBase16(userTwoID), - }, - }, - Authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Token: "rand1", - Permissions: allUsersPermission(idOne), - }, - { - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userTwoID), - OrgID: idOne, - Token: "rand2", - Permissions: createUsersPermission(idOne), - }, - }, - Orgs: []*influxdb.Organization{ - { - // ID(1) - Name: "o1", - }, - }, - }, - wants: wants{ - authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Token: "rand1", - Status: "active", - Permissions: allUsersPermission(idOne), - }, - { - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userTwoID), - OrgID: idOne, - Token: "rand2", - Status: "active", - Permissions: createUsersPermission(idOne), - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - for i := range tt.fields.Authorizations { - authorization, err := s.FindAuthorizationByID(ctx, tt.fields.Authorizations[i].ID) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(authorization, tt.wants.authorizations[i], authorizationCmpOptions...); diff != "" { - t.Errorf("authorization is different -got/+want\ndiff %s", diff) - } - } - - }) - } -} - -func stringPtr(s string) *string { - return &s -} - -// UpdateAuthorization testing -func UpdateAuthorization( - init func(AuthorizationFields, *testing.T) (influxdb.AuthorizationService, string, func()), - t *testing.T, -) { - type args struct { - id platform.ID - upd *influxdb.AuthorizationUpdate - } - type wants struct { - err error - authorization *influxdb.Authorization - } - tests := []struct { - name string - fields AuthorizationFields - args args - wants wants - }{ - { - name: "regular update", - fields: AuthorizationFields{ - OrgIDGenerator: mock.NewIncrementingIDGenerator(1), - TimeGenerator: &mock.TimeGenerator{ - FakeValue: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), - }, - Users: []*influxdb.User{ - { - Name: "cooluser", - ID: MustIDBase16(userOneID), - }, - { - Name: "regularuser", - ID: MustIDBase16(userTwoID), - }, - }, - Orgs: []*influxdb.Organization{ - { - Name: "o1", - }, - { - Name: "o2", - }, - }, - Authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - Token: "rand1", - Status: influxdb.Inactive, - OrgID: idTwo, - Permissions: allUsersPermission(idTwo), - }, - { - ID: MustIDBase16(authZeroID), - UserID: MustIDBase16(userOneID), - Token: "rand0", - OrgID: idOne, - Permissions: allUsersPermission(idOne), - }, - { - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userTwoID), - OrgID: idOne, - Token: "rand2", - Permissions: createUsersPermission(idOne), - }, - { - ID: MustIDBase16(authThreeID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Token: "rand3", - Permissions: allUsersPermission(idOne), - }, - }, - }, - args: args{ - id: MustIDBase16(authTwoID), - upd: &influxdb.AuthorizationUpdate{ - Status: influxdb.Inactive.Ptr(), - Description: stringPtr("desc1"), - }, - }, - wants: wants{ - authorization: &influxdb.Authorization{ - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userTwoID), - OrgID: idOne, - Token: "rand2", - Permissions: createUsersPermission(idOne), - Status: influxdb.Inactive, - Description: "desc1", - CRUDLog: influxdb.CRUDLog{ - UpdatedAt: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), - }, - }, - }, - }, - { - name: "update with id not found", - fields: AuthorizationFields{ - OrgIDGenerator: mock.NewIncrementingIDGenerator(1), - Users: []*influxdb.User{ - { - Name: "cooluser", - ID: MustIDBase16(userOneID), - }, - { - Name: "regularuser", - ID: MustIDBase16(userTwoID), - }, - }, - Orgs: []*influxdb.Organization{ - { - Name: "o1", - }, - { - Name: "o2", - }, - }, - Authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - Token: "rand1", - Status: influxdb.Inactive, - OrgID: idTwo, - Permissions: allUsersPermission(idTwo), - }, - { - ID: MustIDBase16(authZeroID), - UserID: MustIDBase16(userOneID), - Token: "rand0", - OrgID: idOne, - Permissions: allUsersPermission(idOne), - }, - { - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userTwoID), - OrgID: idOne, - Token: "rand2", - Permissions: createUsersPermission(idOne), - }, - }, - }, - args: args{ - id: MustIDBase16(authThreeID), - upd: &influxdb.AuthorizationUpdate{ - Status: influxdb.Inactive.Ptr(), - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpUpdateAuthorization, - Msg: "authorization not found", - }, - }, - }, - { - name: "update with unknown status", - fields: AuthorizationFields{ - OrgIDGenerator: mock.NewIncrementingIDGenerator(1), - TimeGenerator: &mock.TimeGenerator{ - FakeValue: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), - }, - Users: []*influxdb.User{ - { - Name: "cooluser", - ID: MustIDBase16(userOneID), - }, - { - Name: "regularuser", - ID: MustIDBase16(userTwoID), - }, - }, - Orgs: []*influxdb.Organization{ - { - Name: "o1", - }, - { - Name: "o2", - }, - }, - Authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - Token: "rand1", - Status: influxdb.Inactive, - OrgID: idTwo, - Permissions: allUsersPermission(idTwo), - }, - { - ID: MustIDBase16(authZeroID), - UserID: MustIDBase16(userOneID), - Token: "rand0", - OrgID: idOne, - Permissions: allUsersPermission(idOne), - }, - { - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userTwoID), - OrgID: idOne, - Token: "rand2", - Permissions: createUsersPermission(idOne), - }, - { - ID: MustIDBase16(authThreeID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Token: "rand3", - Permissions: allUsersPermission(idOne), - }, - }, - }, - args: args{ - id: MustIDBase16(authTwoID), - upd: &influxdb.AuthorizationUpdate{ - Status: influxdb.Status("unknown").Ptr(), - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EInvalid, - Op: influxdb.OpUpdateAuthorization, - Msg: "unknown authorization status", - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - updatedAuth, err := s.UpdateAuthorization(ctx, tt.args.id, tt.args.upd) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if tt.wants.err == nil { - authorization, err := s.FindAuthorizationByID(ctx, tt.args.id) - if err != nil { - t.Errorf("%s failed, got error %s", tt.name, err.Error()) - } - if diff := cmp.Diff(authorization, tt.wants.authorization, authorizationCmpOptions...); diff != "" { - t.Errorf("authorization is different -got/+want\ndiff %s", diff) - } - if diff := cmp.Diff(authorization, updatedAuth, authorizationCmpOptions...); diff != "" { - t.Errorf("authorization is different -got/+want\ndiff %s", diff) - } - } - }) - } -} - -// FindAuthorizationByToken testing -func FindAuthorizationByToken( - init func(AuthorizationFields, *testing.T) (influxdb.AuthorizationService, string, func()), - t *testing.T, -) { - type args struct { - token string - } - type wants struct { - err error - authorization *influxdb.Authorization - } - - tests := []struct { - name string - fields AuthorizationFields - args args - wants wants - }{ - { - name: "basic find authorization by token", - fields: AuthorizationFields{ - OrgIDGenerator: mock.NewIncrementingIDGenerator(1), - Users: []*influxdb.User{ - { - Name: "cooluser", - ID: MustIDBase16(userOneID), - }, - { - Name: "regularuser", - ID: MustIDBase16(userTwoID), - }, - }, - Orgs: []*influxdb.Organization{ - { - Name: "o1", - }, - { - Name: "o2", - }, - }, - Authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - Token: "rand1", - Status: influxdb.Inactive, - OrgID: idTwo, - Permissions: allUsersPermission(idTwo), - }, - { - ID: MustIDBase16(authZeroID), - UserID: MustIDBase16(userOneID), - Token: "rand0", - OrgID: idOne, - Permissions: allUsersPermission(idOne), - }, - { - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userTwoID), - OrgID: idOne, - Token: "rand2", - Permissions: createUsersPermission(idOne), - }, - { - ID: MustIDBase16(authThreeID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Token: "rand3", - Permissions: allUsersPermission(idOne), - }, - }, - }, - args: args{ - token: "rand1", - }, - wants: wants{ - authorization: &influxdb.Authorization{ - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - OrgID: idTwo, - Status: influxdb.Inactive, - Token: "rand1", - Permissions: allUsersPermission(idTwo), - }, - }, - }, - { - name: "find authorization by token", - fields: AuthorizationFields{ - OrgIDGenerator: mock.NewIncrementingIDGenerator(1), - Users: []*influxdb.User{ - { - Name: "cooluser", - ID: MustIDBase16(userOneID), - }, - { - Name: "regularuser", - ID: MustIDBase16(userTwoID), - }, - }, - Orgs: []*influxdb.Organization{ - { - Name: "o1", - }, - }, - Authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authZeroID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Token: "rand1", - Permissions: deleteUsersPermission(idOne), - }, - { - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userTwoID), - OrgID: idOne, - Token: "rand2", - Permissions: createUsersPermission(idOne), - }, - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Token: "rand3", - Permissions: allUsersPermission(idOne), - }, - { - ID: MustIDBase16(authThreeID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Token: "rand4", - Permissions: deleteUsersPermission(idOne), - }, - }, - }, - args: args{ - token: "rand2", - }, - wants: wants{ - authorization: &influxdb.Authorization{ - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userTwoID), - OrgID: idOne, - Token: "rand2", - Status: influxdb.Active, - Permissions: createUsersPermission(idOne), - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - authorization, err := s.FindAuthorizationByToken(ctx, tt.args.token) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(authorization, tt.wants.authorization, authorizationCmpOptions...); diff != "" { - t.Errorf("authorization is different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindAuthorizations testing -func FindAuthorizations( - init func(AuthorizationFields, *testing.T) (influxdb.AuthorizationService, string, func()), - t *testing.T, -) { - type args struct { - ID platform.ID - UserID platform.ID - OrgID platform.ID - token string - } - - type wants struct { - authorizations []*influxdb.Authorization - err error - } - tests := []struct { - name string - fields AuthorizationFields - args args - wants wants - }{ - { - name: "find all authorizations", - fields: AuthorizationFields{ - OrgIDGenerator: mock.NewIncrementingIDGenerator(1), - Users: []*influxdb.User{ - { - Name: "cooluser", - ID: MustIDBase16(userOneID), - }, - { - Name: "regularuser", - ID: MustIDBase16(userTwoID), - }, - }, - Orgs: []*influxdb.Organization{ - { - Name: "o1", - }, - }, - Authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Token: "rand1", - Permissions: allUsersPermission(idOne), - }, - { - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userTwoID), - OrgID: idOne, - Token: "rand2", - Permissions: createUsersPermission(idOne), - }, - }, - }, - args: args{}, - wants: wants{ - authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Token: "rand1", - Status: influxdb.Active, - Permissions: allUsersPermission(idOne), - }, - { - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userTwoID), - OrgID: idOne, - Token: "rand2", - Status: influxdb.Active, - Permissions: createUsersPermission(idOne), - }, - }, - }, - }, - { - name: "find authorization by user id", - fields: AuthorizationFields{ - OrgIDGenerator: mock.NewIncrementingIDGenerator(1), - Users: []*influxdb.User{ - { - Name: "cooluser", - ID: MustIDBase16(userOneID), - }, - { - Name: "regularuser", - ID: MustIDBase16(userTwoID), - }, - }, - Orgs: []*influxdb.Organization{ - { - Name: "o1", - }, - }, - Authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Token: "rand1", - Status: influxdb.Active, - Permissions: allUsersPermission(idOne), - }, - { - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userTwoID), - OrgID: idOne, - Token: "rand2", - Permissions: createUsersPermission(idOne), - }, - { - ID: MustIDBase16(authThreeID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Token: "rand3", - Permissions: deleteUsersPermission(idOne), - }, - }, - }, - args: args{ - UserID: MustIDBase16(userOneID), - }, - wants: wants{ - authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Status: influxdb.Active, - Token: "rand1", - Permissions: allUsersPermission(idOne), - }, - { - ID: MustIDBase16(authThreeID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Status: influxdb.Active, - Token: "rand3", - Permissions: deleteUsersPermission(idOne), - }, - }, - }, - }, - { - name: "find authorization by org id", - fields: AuthorizationFields{ - OrgIDGenerator: mock.NewIncrementingIDGenerator(1), - Users: []*influxdb.User{ - { - Name: "cooluser", - ID: MustIDBase16(userOneID), - }, - }, - Orgs: []*influxdb.Organization{ - { - Name: "o1", - }, - { - Name: "o2", - }, - }, - Authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Status: influxdb.Active, - Token: "rand1", - Permissions: createUsersPermission(idOne), - }, - { - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Status: influxdb.Active, - Token: "rand2", - Permissions: deleteUsersPermission(idOne), - }, - { - ID: MustIDBase16(authThreeID), - UserID: MustIDBase16(userOneID), - OrgID: idTwo, - Status: influxdb.Active, - Token: "rand3", - Permissions: allUsersPermission(idTwo), - }, - }, - }, - args: args{ - OrgID: idOne, - }, - wants: wants{ - authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Status: influxdb.Active, - Token: "rand1", - Permissions: createUsersPermission(idOne), - }, - { - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Status: influxdb.Active, - Token: "rand2", - Permissions: deleteUsersPermission(idOne), - }, - }, - }, - }, - { - name: "find authorization by org id and user id", - fields: AuthorizationFields{ - OrgIDGenerator: mock.NewIncrementingIDGenerator(1), - Users: []*influxdb.User{ - { - Name: "cooluser", - ID: MustIDBase16(userOneID), - }, - { - Name: "regularuser", - ID: MustIDBase16(userTwoID), - }, - }, - Orgs: []*influxdb.Organization{ - { - Name: "o1", - }, - { - Name: "o2", - ID: idTwo, - }, - }, - Authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Status: influxdb.Active, - Token: "rand1", - Permissions: allUsersPermission(idOne), - }, - { - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userOneID), - OrgID: idTwo, - Status: influxdb.Active, - Token: "rand2", - Permissions: allUsersPermission(idTwo), - }, - { - ID: MustIDBase16(authThreeID), - UserID: MustIDBase16(userTwoID), - OrgID: idOne, - Status: influxdb.Active, - Token: "rand3", - Permissions: allUsersPermission(idOne), - }, - { - ID: MustIDBase16(authThreeID), - UserID: MustIDBase16(userTwoID), - OrgID: idTwo, - Status: influxdb.Active, - Token: "rand4", - Permissions: allUsersPermission(idTwo), - }, - }, - }, - args: args{ - UserID: MustIDBase16(userOneID), - OrgID: idTwo, - }, - wants: wants{ - authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userOneID), - OrgID: idTwo, - Status: influxdb.Active, - Token: "rand2", - Permissions: allUsersPermission(idTwo), - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - filter := influxdb.AuthorizationFilter{} - if tt.args.ID.Valid() { - filter.ID = &tt.args.ID - } - if tt.args.UserID.Valid() { - filter.UserID = &tt.args.UserID - } - if tt.args.OrgID.Valid() { - filter.OrgID = &tt.args.OrgID - } - if tt.args.token != "" { - filter.Token = &tt.args.token - } - - authorizations, _, err := s.FindAuthorizations(ctx, filter) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - if diff := cmp.Diff(authorizations, tt.wants.authorizations, authorizationCmpOptions...); diff != "" { - t.Errorf("authorizations are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// DeleteAuthorization testing -func DeleteAuthorization( - init func(AuthorizationFields, *testing.T) (influxdb.AuthorizationService, string, func()), - t *testing.T, -) { - type args struct { - ID platform.ID - } - type wants struct { - err error - authorizations []*influxdb.Authorization - } - - tests := []struct { - name string - fields AuthorizationFields - args args - wants wants - }{ - { - name: "delete authorizations using exist id", - fields: AuthorizationFields{ - OrgIDGenerator: mock.NewIncrementingIDGenerator(1), - Users: []*influxdb.User{ - { - Name: "cooluser", - ID: MustIDBase16(userOneID), - }, - { - Name: "regularuser", - ID: MustIDBase16(userTwoID), - }, - }, - Orgs: []*influxdb.Organization{ - { - Name: "o1", - }, - }, - Authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Token: "rand1", - Permissions: allUsersPermission(idOne), - }, - { - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userTwoID), - OrgID: idOne, - Token: "rand2", - Permissions: createUsersPermission(idOne), - }, - }, - }, - args: args{ - ID: MustIDBase16(authOneID), - }, - wants: wants{ - authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userTwoID), - OrgID: idOne, - Status: influxdb.Active, - Token: "rand2", - Permissions: createUsersPermission(idOne), - }, - }, - }, - }, - { - name: "delete authorizations using id that does not exist", - fields: AuthorizationFields{ - OrgIDGenerator: mock.NewIncrementingIDGenerator(1), - Users: []*influxdb.User{ - { - Name: "cooluser", - ID: MustIDBase16(userOneID), - }, - { - Name: "regularuser", - ID: MustIDBase16(userTwoID), - }, - }, - Orgs: []*influxdb.Organization{ - { - Name: "o1", - }, - }, - Authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - OrgID: idOne, - Token: "rand1", - Permissions: allUsersPermission(idOne), - }, - { - ID: MustIDBase16(authTwoID), - OrgID: idOne, - UserID: MustIDBase16(userTwoID), - Token: "rand2", - Permissions: createUsersPermission(idOne), - }, - }, - }, - args: args{ - ID: MustIDBase16(authThreeID), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "authorization not found", - Op: influxdb.OpDeleteAuthorization, - }, - authorizations: []*influxdb.Authorization{ - { - ID: MustIDBase16(authOneID), - UserID: MustIDBase16(userOneID), - Token: "rand1", - Status: influxdb.Active, - OrgID: idOne, - Permissions: allUsersPermission(idOne), - }, - { - ID: MustIDBase16(authTwoID), - UserID: MustIDBase16(userTwoID), - OrgID: idOne, - Token: "rand2", - Status: influxdb.Active, - Permissions: createUsersPermission(idOne), - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.DeleteAuthorization(ctx, tt.args.ID) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - filter := influxdb.AuthorizationFilter{} - authorizations, _, err := s.FindAuthorizations(ctx, filter) - if err != nil { - t.Fatalf("failed to retrieve authorizations: %v", err) - } - if diff := cmp.Diff(authorizations, tt.wants.authorizations, authorizationCmpOptions...); diff != "" { - t.Errorf("authorizations are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func allUsersPermission(orgID platform.ID) []influxdb.Permission { - return []influxdb.Permission{ - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.UsersResourceType, OrgID: &orgID}}, - {Action: influxdb.ReadAction, Resource: influxdb.Resource{Type: influxdb.UsersResourceType, OrgID: &orgID}}, - } -} - -func createUsersPermission(orgID platform.ID) []influxdb.Permission { - return []influxdb.Permission{ - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.UsersResourceType, OrgID: &orgID}}, - } -} - -func deleteUsersPermission(orgID platform.ID) []influxdb.Permission { - return []influxdb.Permission{ - {Action: influxdb.WriteAction, Resource: influxdb.Resource{Type: influxdb.UsersResourceType, OrgID: &orgID}}, - } -} diff --git a/testing/bucket_service.go b/testing/bucket_service.go deleted file mode 100644 index cd81bb0cb78..00000000000 --- a/testing/bucket_service.go +++ /dev/null @@ -1,1748 +0,0 @@ -package testing - -import ( - "bytes" - "context" - "sort" - "testing" - "time" - - "github.com/dustin/go-humanize" - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" -) - -const ( - idOne = platform.ID(iota + 1) - idTwo - idThree - idFour - idFive -) - -var bucketCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Comparer(func(x, y *influxdb.Bucket) bool { - if x == nil && y == nil { - return true - } - if x != nil && y == nil || y != nil && x == nil { - return false - } - - return x.OrgID == y.OrgID && - x.Type == y.Type && - x.Description == y.Description && - x.RetentionPolicyName == y.RetentionPolicyName && - x.RetentionPeriod == y.RetentionPeriod && - x.Name == y.Name - }), - cmp.Transformer("Sort", func(in []*influxdb.Bucket) []*influxdb.Bucket { - out := append([]*influxdb.Bucket(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].Name > out[j].Name - }) - return out - }), -} - -// BucketFields will include the IDGenerator, and buckets -type BucketFields struct { - IDGenerator platform.IDGenerator - OrgIDs platform.IDGenerator - BucketIDs platform.IDGenerator - TimeGenerator influxdb.TimeGenerator - Buckets []*influxdb.Bucket - Organizations []*influxdb.Organization -} - -type bucketServiceF func( - init func(BucketFields, *testing.T) (influxdb.BucketService, string, func()), - t *testing.T, -) - -// BucketService tests all the service functions. -func BucketService( - init func(BucketFields, *testing.T) (influxdb.BucketService, string, func()), - t *testing.T) { - tests := []struct { - name string - fn bucketServiceF - }{ - { - name: "CreateBucket", - fn: CreateBucket, - }, - { - name: "FindBucketByID", - fn: FindBucketByID, - }, - { - name: "FindBuckets", - fn: FindBuckets, - }, - { - name: "FindBucket", - fn: FindBucket, - }, - { - name: "UpdateBucket", - fn: UpdateBucket, - }, - { - name: "DeleteBucket", - fn: DeleteBucket, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - tt.fn(init, t) - }) - } -} - -// CreateBucket testing -func CreateBucket( - init func(BucketFields, *testing.T) (influxdb.BucketService, string, func()), - t *testing.T, -) { - type args struct { - bucket *influxdb.Bucket - } - type wants struct { - err error - buckets []*influxdb.Bucket - } - - tests := []struct { - name string - fields BucketFields - args args - wants wants - }{ - { - name: "create buckets with empty set", - fields: BucketFields{ - IDGenerator: mock.NewStaticIDGenerator(idOne), - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Buckets: []*influxdb.Bucket{}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - }, - args: args{ - bucket: &influxdb.Bucket{ - Name: "name1", - OrgID: idOne, - Description: "desc1", - }, - }, - wants: wants{ - buckets: []*influxdb.Bucket{ - { - Name: "name1", - ID: idOne, - OrgID: idOne, - Description: "desc1", - CRUDLog: influxdb.CRUDLog{ - CreatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - }, - }, - }, - { - name: "basic create bucket", - fields: BucketFields{ - IDGenerator: mock.NewStaticIDGenerator(idTwo), - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - { - // ID(2) - Name: "otherorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - Name: "bucket1", - OrgID: idOne, - }, - }, - }, - args: args{ - bucket: &influxdb.Bucket{ - Name: "bucket2", - OrgID: idTwo, - RetentionPeriod: humanize.Week, - ShardGroupDuration: humanize.Day, - }, - }, - wants: wants{ - buckets: []*influxdb.Bucket{ - { - ID: idOne, - Name: "bucket1", - OrgID: idOne, - }, - { - ID: idTwo, - Name: "bucket2", - OrgID: idTwo, - RetentionPeriod: humanize.Week, - ShardGroupDuration: humanize.Day, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - }, - }, - }, - { - name: "names should be unique within an organization", - fields: BucketFields{ - IDGenerator: mock.NewStaticIDGenerator(idTwo), - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - { - // ID(2) - Name: "otherorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - Name: "bucket1", - OrgID: idOne, - }, - }, - }, - args: args{ - bucket: &influxdb.Bucket{ - Name: "bucket1", - OrgID: idOne, - }, - }, - wants: wants{ - buckets: []*influxdb.Bucket{ - { - ID: idOne, - Name: "bucket1", - OrgID: idOne, - }, - }, - err: &errors.Error{ - Code: errors.EConflict, - Op: influxdb.OpCreateBucket, - Msg: "bucket with name bucket1 already exists", - }, - }, - }, - { - name: "names should not be unique across organizations", - fields: BucketFields{ - IDGenerator: mock.NewStaticIDGenerator(idTwo), - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - { - // ID(2) - Name: "otherorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - Name: "bucket1", - OrgID: idOne, - }, - }, - }, - args: args{ - bucket: &influxdb.Bucket{ - Name: "bucket1", - OrgID: idTwo, - }, - }, - wants: wants{ - buckets: []*influxdb.Bucket{ - { - ID: idOne, - Name: "bucket1", - OrgID: idOne, - // CRUDLog is missing because seed data is created through - // storage layer and not service layer (where CRUDLog is populated) - }, - { - ID: idTwo, - Name: "bucket1", - OrgID: idTwo, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - }, - }, - }, - { - name: "create bucket with orgID not exist", - fields: BucketFields{ - IDGenerator: mock.NewStaticIDGenerator(idOne), - OrgIDs: mock.NewStaticIDGenerator(idOne), - BucketIDs: mock.NewStaticIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Buckets: []*influxdb.Bucket{}, - Organizations: []*influxdb.Organization{}, - }, - args: args{ - bucket: &influxdb.Bucket{ - Name: "name1", - OrgID: idOne, - }, - }, - wants: wants{ - buckets: []*influxdb.Bucket{}, - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "organization not found", - Op: influxdb.OpCreateBucket, - }, - }, - }, - { - name: "create bucket with illegal quotation mark", - fields: BucketFields{ - IDGenerator: mock.NewStaticIDGenerator(idOne), - BucketIDs: mock.NewStaticIDGenerator(idOne), - OrgIDs: mock.NewStaticIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Buckets: []*influxdb.Bucket{}, - Organizations: []*influxdb.Organization{ - { - Name: "org", - }, - }, - }, - args: args{ - bucket: &influxdb.Bucket{ - Name: "namewith\"quote", - OrgID: idOne, - }, - }, - wants: wants{ - buckets: []*influxdb.Bucket{}, - err: &errors.Error{ - Code: errors.EInvalid, - Op: influxdb.OpCreateBucket, - Msg: "bucket name namewith\"quote is invalid. Bucket names may not include quotation marks", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.CreateBucket(ctx, tt.args.bucket) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - // Delete only newly created buckets - ie., with a not nil ID - // if tt.args.bucket.ID.Valid() { - defer s.DeleteBucket(ctx, tt.args.bucket.ID) - // } - - buckets, _, err := s.FindBuckets(ctx, influxdb.BucketFilter{}) - if err != nil { - t.Fatalf("failed to retrieve buckets: %v", err) - } - - // remove system buckets - filteredBuckets := []*influxdb.Bucket{} - for _, b := range buckets { - if b.Type != influxdb.BucketTypeSystem { - filteredBuckets = append(filteredBuckets, b) - } - } - - if diff := cmp.Diff(filteredBuckets, tt.wants.buckets, bucketCmpOptions...); diff != "" { - t.Errorf("buckets are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindBucketByID testing -func FindBucketByID( - init func(BucketFields, *testing.T) (influxdb.BucketService, string, func()), - t *testing.T, -) { - type args struct { - id platform.ID - } - type wants struct { - err error - bucket *influxdb.Bucket - } - - tests := []struct { - name string - fields BucketFields - args args - wants wants - }{ - { - name: "basic find bucket by id", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - ID: idOne, - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "bucket1", - }, - { - // ID(2) - OrgID: idOne, - Name: "bucket2", - }, - }, - }, - args: args{ - id: idOne, - }, - wants: wants{ - bucket: &influxdb.Bucket{ - ID: idOne, - OrgID: idOne, - Name: "bucket1", - }, - }, - }, - { - name: "find bucket by id not exist", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "bucket1", - }, - { - // ID(2) - OrgID: idOne, - Name: "bucket2", - }, - }, - }, - args: args{ - id: idThree, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindBucketByID, - Msg: "bucket not found", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - bucket, err := s.FindBucketByID(ctx, tt.args.id) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(bucket, tt.wants.bucket, bucketCmpOptions...); diff != "" { - t.Errorf("bucket is different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindBuckets testing -func FindBuckets( - init func(BucketFields, *testing.T) (influxdb.BucketService, string, func()), - t *testing.T, -) { - type args struct { - ID platform.ID - name string - organization string - organizationID platform.ID - findOptions influxdb.FindOptions - } - - type wants struct { - buckets []*influxdb.Bucket - err error - } - tests := []struct { - name string - fields BucketFields - args args - wants wants - }{ - { - name: "find all buckets", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - { - // ID(2) - Name: "otherorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - OrgID: idOne, - Name: "abc", - RetentionPeriod: humanize.Week, - }, - { - OrgID: idTwo, - Name: "xyz", - ShardGroupDuration: humanize.Day, - }, - }, - }, - args: args{}, - wants: wants{ - buckets: []*influxdb.Bucket{ - { - ID: idOne, - OrgID: idOne, - Name: "abc", - RetentionPeriod: humanize.Week, - }, - { - ID: idTwo, - OrgID: idTwo, - Name: "xyz", - ShardGroupDuration: humanize.Day, - }, - }, - }, - }, - { - name: "find all buckets by offset and limit", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "abc", - }, - { - // ID(2) - OrgID: idOne, - Name: "def", - }, - { - // ID(3) - OrgID: idOne, - Name: "xyz", - }, - }, - }, - args: args{ - findOptions: influxdb.FindOptions{ - Offset: 1, - Limit: 1, - }, - }, - wants: wants{ - buckets: []*influxdb.Bucket{ - { - ID: idTwo, - OrgID: idOne, - Name: "def", - }, - }, - }, - }, - { - name: "find all buckets by after and limit", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "abc", - }, - { - // ID(2) - OrgID: idOne, - Name: "def", - }, - { - // ID(3) - OrgID: idOne, - Name: "xyz", - }, - }, - }, - args: args{ - findOptions: influxdb.FindOptions{ - After: idPtr(idOne), - Limit: 2, - }, - }, - wants: wants{ - buckets: []*influxdb.Bucket{ - { - ID: idTwo, - OrgID: idOne, - Name: "def", - }, - { - ID: idThree, - OrgID: idOne, - Name: "xyz", - }, - }, - }, - }, - { - name: "find all buckets by descending", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "abc", - }, - { - // ID(2) - OrgID: idOne, - Name: "def", - }, - { - // ID(3) - OrgID: idOne, - Name: "xyz", - }, - }, - }, - args: args{ - findOptions: influxdb.FindOptions{ - Offset: 1, - Descending: true, - }, - }, - wants: wants{ - buckets: []*influxdb.Bucket{ - { - ID: idTwo, - OrgID: idOne, - Name: "def", - }, - { - ID: idOne, - OrgID: idOne, - Name: "abc", - }, - }, - }, - }, - { - name: "find buckets by organization name", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - { - // ID(2) - Name: "otherorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "abc", - }, - { - // ID(2) - OrgID: idTwo, - Name: "xyz", - }, - { - // ID(3) - OrgID: idOne, - Name: "123", - }, - }, - }, - args: args{ - organization: "theorg", - }, - wants: wants{ - buckets: []*influxdb.Bucket{ - { - ID: idOne, - OrgID: idOne, - Name: "abc", - }, - { - ID: idThree, - OrgID: idOne, - Name: "123", - }, - }, - }, - }, - { - name: "find buckets by organization id", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - { - // ID(2) - Name: "otherorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "abc", - }, - { - // ID(2) - OrgID: idTwo, - Name: "xyz", - }, - { - // ID(3) - OrgID: idOne, - Name: "123", - }, - }, - }, - args: args{ - organizationID: idOne, - }, - wants: wants{ - buckets: []*influxdb.Bucket{ - { - ID: idOne, - OrgID: idOne, - Name: "abc", - }, - { - ID: idThree, - OrgID: idOne, - Name: "123", - }, - }, - }, - }, - { - name: "find bucket by name", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "abc", - }, - { - // ID(2) - OrgID: idOne, - Name: "xyz", - }, - }, - }, - args: args{ - name: "xyz", - }, - wants: wants{ - buckets: []*influxdb.Bucket{ - { - ID: idTwo, - OrgID: idOne, - Name: "xyz", - }, - }, - }, - }, - { - name: "missing bucket returns no buckets", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{}, - }, - args: args{ - name: "xyz", - }, - wants: wants{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - filter := influxdb.BucketFilter{} - if tt.args.ID.Valid() { - filter.ID = &tt.args.ID - } - if tt.args.organizationID.Valid() { - filter.OrganizationID = &tt.args.organizationID - } - if tt.args.organization != "" { - filter.Org = &tt.args.organization - } - if tt.args.name != "" { - filter.Name = &tt.args.name - } - - buckets, _, err := s.FindBuckets(ctx, filter, tt.args.findOptions) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - // remove system buckets - filteredBuckets := []*influxdb.Bucket{} - for _, b := range buckets { - if b.Type != influxdb.BucketTypeSystem { - filteredBuckets = append(filteredBuckets, b) - } - } - - if diff := cmp.Diff(filteredBuckets, tt.wants.buckets, bucketCmpOptions...); diff != "" { - t.Errorf("buckets are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// DeleteBucket testing -func DeleteBucket( - init func(BucketFields, *testing.T) (influxdb.BucketService, string, func()), - t *testing.T, -) { - type args struct { - ID platform.ID - } - type wants struct { - err error - buckets []*influxdb.Bucket - } - - tests := []struct { - name string - fields BucketFields - args args - wants wants - }{ - { - name: "delete buckets using exist id", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - Name: "A", - OrgID: idOne, - }, - { - // ID(2) - Name: "B", - OrgID: idOne, - }, - }, - }, - args: args{ - ID: idOne, - }, - wants: wants{ - buckets: []*influxdb.Bucket{ - { - Name: "B", - ID: idTwo, - OrgID: idOne, - }, - }, - }, - }, - { - name: "delete buckets using id that does not exist", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - Name: "A", - OrgID: idOne, - }, - { - // ID(2) - Name: "B", - OrgID: idOne, - }, - }, - }, - args: args{ - ID: MustIDBase16("1234567890654321"), - }, - wants: wants{ - err: &errors.Error{ - Op: influxdb.OpDeleteBucket, - Msg: "bucket not found", - Code: errors.ENotFound, - }, - buckets: []*influxdb.Bucket{ - { - Name: "A", - ID: idOne, - OrgID: idOne, - }, - { - Name: "B", - ID: idTwo, - OrgID: idOne, - }, - }, - }, - }, - { - name: "delete system buckets", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - Name: "A", - OrgID: idOne, - Type: influxdb.BucketTypeSystem, - }, - }, - }, - args: args{ - ID: idOne, - }, - wants: wants{ - err: &errors.Error{ - Op: influxdb.OpDeleteBucket, - Msg: "system buckets cannot be deleted", - Code: errors.EInvalid, - }, - buckets: []*influxdb.Bucket{ - { - Name: "A", - ID: idOne, - OrgID: idOne, - Type: influxdb.BucketTypeSystem, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.DeleteBucket(ctx, tt.args.ID) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - filter := influxdb.BucketFilter{} - buckets, _, err := s.FindBuckets(ctx, filter) - if err != nil { - t.Fatalf("failed to retrieve buckets: %v", err) - } - - // remove built in system buckets - filteredBuckets := []*influxdb.Bucket{} - for _, b := range buckets { - if b.Name != influxdb.TasksSystemBucketName && b.Name != influxdb.MonitoringSystemBucketName { - filteredBuckets = append(filteredBuckets, b) - } - } - - if diff := cmp.Diff(filteredBuckets, tt.wants.buckets, bucketCmpOptions...); diff != "" { - t.Errorf("buckets are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindBucket testing -func FindBucket( - init func(BucketFields, *testing.T) (influxdb.BucketService, string, func()), - t *testing.T, -) { - type args struct { - name string - organizationID platform.ID - id platform.ID - } - - type wants struct { - bucket *influxdb.Bucket - err error - } - - tests := []struct { - name string - fields BucketFields - args args - wants wants - }{ - { - name: "find bucket by name", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "abc", - RetentionPeriod: humanize.Week, - ShardGroupDuration: humanize.Day, - }, - { - // ID(2) - OrgID: idOne, - Name: "xyz", - }, - }, - }, - args: args{ - name: "abc", - organizationID: idOne, - }, - wants: wants{ - bucket: &influxdb.Bucket{ - ID: idOne, - OrgID: idOne, - Name: "abc", - RetentionPeriod: humanize.Week, - ShardGroupDuration: humanize.Day, - }, - }, - }, - { - name: "find bucket by id", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "abc", - }, - { - // ID(2) - OrgID: idOne, - Name: "xyz", - RetentionPeriod: humanize.Week, - ShardGroupDuration: humanize.Day, - }, - }, - }, - args: args{ - id: idTwo, - organizationID: idOne, - }, - wants: wants{ - bucket: &influxdb.Bucket{ - ID: idTwo, - OrgID: idOne, - Name: "xyz", - RetentionPeriod: humanize.Week, - ShardGroupDuration: humanize.Day, - }, - }, - }, - { - name: "missing bucket returns error", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{}, - }, - args: args{ - name: "xyz", - organizationID: idOne, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindBucket, - Msg: "bucket \"xyz\" not found", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - filter := influxdb.BucketFilter{} - if tt.args.name != "" { - filter.Name = &tt.args.name - } - if tt.args.id.Valid() { - filter.ID = &tt.args.id - } - if tt.args.organizationID.Valid() { - filter.OrganizationID = &tt.args.organizationID - } - - bucket, err := s.FindBucket(ctx, filter) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(bucket, tt.wants.bucket, bucketCmpOptions...); diff != "" { - t.Errorf("buckets are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// UpdateBucket testing -func UpdateBucket( - init func(BucketFields, *testing.T) (influxdb.BucketService, string, func()), - t *testing.T, -) { - type args struct { - name string - id platform.ID - retention int - shardDuration int - description *string - } - type wants struct { - err error - bucket *influxdb.Bucket - } - - tests := []struct { - name string - fields BucketFields - args args - wants wants - }{ - { - name: "update name", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "bucket1", - }, - { - // ID(2) - OrgID: idOne, - Name: "bucket2", - }, - }, - }, - args: args{ - id: idTwo, - name: "changed", - }, - wants: wants{ - bucket: &influxdb.Bucket{ - ID: idTwo, - OrgID: idOne, - Name: "changed", - CRUDLog: influxdb.CRUDLog{ - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - }, - }, - { - name: "update name unique", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "bucket1", - }, - { - // ID(2) - OrgID: idOne, - Name: "bucket2", - }, - }, - }, - args: args{ - id: idOne, - name: "bucket2", - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EConflict, - Msg: "bucket name is not unique", - }, - }, - }, - { - name: "update system bucket name", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Type: influxdb.BucketTypeSystem, - Name: "bucket1", - }, - }, - }, - args: args{ - id: idOne, - name: "bucket2", - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "system buckets cannot be renamed", - }, - }, - }, - { - name: "update retention", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "bucket1", - ShardGroupDuration: time.Hour, - }, - { - // ID(2) - OrgID: idOne, - Name: "bucket2", - }, - }, - }, - args: args{ - id: idOne, - retention: 100, - }, - wants: wants{ - bucket: &influxdb.Bucket{ - ID: idOne, - OrgID: idOne, - Name: "bucket1", - RetentionPeriod: 100 * time.Minute, - ShardGroupDuration: time.Hour, - CRUDLog: influxdb.CRUDLog{ - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - }, - }, - { - name: "update shard-group duration", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "bucket1", - RetentionPeriod: humanize.Day, - ShardGroupDuration: time.Hour, - }, - { - // ID(2) - OrgID: idOne, - Name: "bucket2", - }, - }, - }, - args: args{ - id: idOne, - shardDuration: 100, - }, - wants: wants{ - bucket: &influxdb.Bucket{ - ID: idOne, - OrgID: idOne, - Name: "bucket1", - RetentionPeriod: humanize.Day, - ShardGroupDuration: 100 * time.Minute, - CRUDLog: influxdb.CRUDLog{ - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - }, - }, - { - name: "update retention and shard-group duration", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "bucket1", - RetentionPeriod: humanize.Day, - ShardGroupDuration: time.Hour, - }, - { - // ID(2) - OrgID: idOne, - Name: "bucket2", - }, - }, - }, - args: args{ - id: idOne, - retention: 100, - shardDuration: 100, - }, - wants: wants{ - bucket: &influxdb.Bucket{ - ID: idOne, - OrgID: idOne, - Name: "bucket1", - RetentionPeriod: 100 * time.Minute, - ShardGroupDuration: 100 * time.Minute, - CRUDLog: influxdb.CRUDLog{ - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - }, - }, - { - name: "update description", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "bucket1", - }, - { - // ID(2) - OrgID: idOne, - Name: "bucket2", - }, - }, - }, - args: args{ - id: idOne, - description: stringPtr("desc1"), - }, - wants: wants{ - bucket: &influxdb.Bucket{ - ID: idOne, - OrgID: idOne, - Name: "bucket1", - Description: "desc1", - CRUDLog: influxdb.CRUDLog{ - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - }, - }, - { - name: "update retention and name", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "bucket1", - }, - { - // ID(2) - OrgID: idOne, - Name: "bucket2", - }, - }, - }, - args: args{ - id: idOne, - retention: 101, - name: "changed", - }, - wants: wants{ - bucket: &influxdb.Bucket{ - ID: idOne, - OrgID: idOne, - Name: "changed", - RetentionPeriod: 101 * time.Minute, - CRUDLog: influxdb.CRUDLog{ - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - }, - }, - { - name: "update retention and same name", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "bucket1", - }, - { - // ID(2) - OrgID: idOne, - Name: "bucket2", - }, - }, - }, - args: args{ - id: idTwo, - retention: 101, - name: "bucket2", - }, - wants: wants{ - bucket: &influxdb.Bucket{ - ID: idTwo, - OrgID: idOne, - Name: "bucket2", - RetentionPeriod: 101 * time.Minute, - CRUDLog: influxdb.CRUDLog{ - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - }, - }, - { - name: "update retention and same name", - fields: BucketFields{ - OrgIDs: mock.NewIncrementingIDGenerator(idOne), - BucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "theorg", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - OrgID: idOne, - Name: "bucket1", - }, - { - // ID(2) - OrgID: idOne, - Name: "bucket2", - }, - }, - }, - args: args{ - id: idTwo, - retention: 101, - name: "bucket2", - }, - wants: wants{ - bucket: &influxdb.Bucket{ - ID: idTwo, - OrgID: idOne, - Name: "bucket2", - RetentionPeriod: 101 * time.Minute, - CRUDLog: influxdb.CRUDLog{ - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - }, - }, - { - name: "update bucket with illegal quotation mark", - fields: BucketFields{ - OrgIDs: mock.NewStaticIDGenerator(idOne), - BucketIDs: mock.NewStaticIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "org", - }, - }, - Buckets: []*influxdb.Bucket{ - { - // ID(1) - Name: "valid name", - OrgID: idOne, - }, - }, - }, - args: args{ - id: idOne, - name: "namewith\"quote", - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EInvalid, - Op: influxdb.OpCreateBucket, - Msg: "bucket name namewith\"quote is invalid. Bucket names may not include quotation marks", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - upd := influxdb.BucketUpdate{} - if tt.args.name != "" { - upd.Name = &tt.args.name - } - if tt.args.retention != 0 { - d := time.Duration(tt.args.retention) * time.Minute - upd.RetentionPeriod = &d - } - if tt.args.shardDuration != 0 { - d := time.Duration(tt.args.shardDuration) * time.Minute - upd.ShardGroupDuration = &d - } - - upd.Description = tt.args.description - - bucket, err := s.UpdateBucket(ctx, tt.args.id, upd) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(bucket, tt.wants.bucket, bucketCmpOptions...); diff != "" { - t.Errorf("bucket is different -got/+want\ndiff %s", diff) - } - }) - } -} diff --git a/testing/dbrp_mapping.go b/testing/dbrp_mapping.go deleted file mode 100644 index 3a5d0ed8666..00000000000 --- a/testing/dbrp_mapping.go +++ /dev/null @@ -1,2100 +0,0 @@ -package testing - -import ( - "bytes" - "context" - "fmt" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/dbrp" - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/pkg/errors" -) - -const ( - dbrpOrg1ID = "ba55ba55ba55ba55" - dbrpOrg2ID = "beadbeadbeadbead" - dbrpOrg3ID = "1005e1eaf1005e1e" - dbrpBucket1ID = "cab00d1ecab00d1e" - dbrpBucket2ID = "ca1fca1fca1fca1f" - dbrpBucketAID = "a55e55eda55e55ed" - dbrpBucketBID = "b1077edb1077eded" -) - -var DBRPMappingCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.DBRPMapping) []*influxdb.DBRPMapping { - out := make([]*influxdb.DBRPMapping, len(in)) - copy(out, in) // Copy input slice to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID < out[j].ID - }) - return out - }), -} - -type DBRPMappingFields struct { - BucketSvc influxdb.BucketService - DBRPMappingsV2 []*influxdb.DBRPMapping -} - -// Populate creates all entities in DBRPMappingFields. -func (f DBRPMappingFields) Populate(ctx context.Context, s influxdb.DBRPMappingService) error { - for _, m := range f.DBRPMappingsV2 { - if err := s.Create(ctx, m); err != nil { - return errors.Wrap(err, "failed to populate dbrp mappings") - } - } - return nil -} - -// DBRPMappingService tests all the service functions. -func DBRPMappingService( - init func(DBRPMappingFields, *testing.T) (influxdb.DBRPMappingService, func()), - t *testing.T, -) { - tests := []struct { - name string - fn func(init func(DBRPMappingFields, *testing.T) (influxdb.DBRPMappingService, func()), - t *testing.T) - }{ - { - name: "create", - fn: CreateDBRPMappingV2, - }, - { - name: "find by ID", - fn: FindDBRPMappingByIDV2, - }, - { - name: "find", - fn: FindManyDBRPMappingsV2, - }, - { - name: "update", - fn: UpdateDBRPMappingV2, - }, - { - name: "delete", - fn: DeleteDBRPMappingV2, - }, - { - name: "miscellaneous", - fn: MiscDBRPMappingV2, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt := tt - t.Parallel() - tt.fn(init, t) - }) - } -} - -// CleanupDBRPMappingsV2 finds and removes all dbrp mappings. -func CleanupDBRPMappingsV2(ctx context.Context, s influxdb.DBRPMappingService) error { - mappings, _, err := s.FindMany(ctx, influxdb.DBRPMappingFilter{}) - if err != nil { - return errors.Wrap(err, "failed to retrieve all dbrp mappings") - } - - for _, m := range mappings { - if err := s.Delete(ctx, m.OrganizationID, m.ID); err != nil { - return errors.Wrapf(err, "failed to remove dbrp mapping %v", m.ID) - } - } - return nil -} - -func CreateDBRPMappingV2( - init func(DBRPMappingFields, *testing.T) (influxdb.DBRPMappingService, func()), - t *testing.T, -) { - type args struct { - dbrpMapping *influxdb.DBRPMapping - } - type wants struct { - err error - dbrpMappings []*influxdb.DBRPMapping - } - - tests := []struct { - name string - fields DBRPMappingFields - args args - wants wants - }{ - { - name: "basic create dbrp", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{}, - }, - args: args{ - dbrpMapping: &influxdb.DBRPMapping{ - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{{ - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - // If there is only one mapping for a database, that is the default one. - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }}, - }, - }, - { - name: "create mapping for same db does not change default", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - }, - args: args{ - dbrpMapping: &influxdb.DBRPMapping{ - ID: 200, - Database: "database1", - RetentionPolicy: "retention_policy2", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 200, - Database: "database1", - RetentionPolicy: "retention_policy2", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - }, - }, - { - name: "create mapping for same db changes default", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - }, - args: args{ - dbrpMapping: &influxdb.DBRPMapping{ - ID: 200, - Database: "database1", - RetentionPolicy: "retention_policy2", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 200, - Database: "database1", - RetentionPolicy: "retention_policy2", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - }, - }, - { - name: "error on create existing dbrp with same ID", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{{ - OrganizationID: MustIDBase16(dbrpOrg1ID), - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - BucketID: MustIDBase16(dbrpBucket1ID), - }}, - }, - args: args{ - dbrpMapping: &influxdb.DBRPMapping{ - // NOTE(affo): in the "same ID" concept, orgID must match too! - OrganizationID: MustIDBase16(dbrpOrg1ID), - ID: 100, - Database: "database2", - RetentionPolicy: "retention_policy2", - Default: false, - BucketID: MustIDBase16(dbrpBucket2ID), - }, - }, - wants: wants{ - err: dbrp.ErrDBRPAlreadyExists("dbrp already exist for this particular ID. If you are trying an update use the right function .Update"), - dbrpMappings: []*influxdb.DBRPMapping{{ - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }}, - }, - }, - { - name: "error on create dbrp with same orgID, db and rp", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{{ - OrganizationID: MustIDBase16(dbrpOrg1ID), - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - BucketID: MustIDBase16(dbrpBucket1ID), - }}, - }, - args: args{ - dbrpMapping: &influxdb.DBRPMapping{ - OrganizationID: MustIDBase16(dbrpOrg1ID), - ID: 200, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - BucketID: MustIDBase16(dbrpBucket2ID), - }, - }, - wants: wants{ - err: dbrp.ErrDBRPAlreadyExists("another DBRP mapping with same orgID, db, and rp exists"), - dbrpMappings: []*influxdb.DBRPMapping{{ - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }}, - }, - }, - { - name: "error bucket does not exist", - fields: DBRPMappingFields{ - BucketSvc: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - if id == MustIDBase16(dbrpBucket2ID) { - return nil, &errors2.Error{ - Code: errors2.ENotFound, - Msg: "bucket not found", - } - } - return nil, nil - }, - FindBucketsFn: func(ctx context.Context, bf influxdb.BucketFilter, fo ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - return []*influxdb.Bucket{}, 0, nil - }}, - DBRPMappingsV2: []*influxdb.DBRPMapping{{ - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }}, - }, - args: args{ - dbrpMapping: &influxdb.DBRPMapping{ - ID: 200, - Database: "database1", - RetentionPolicy: "retention_policy2", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket2ID), - }, - }, - wants: wants{ - err: &errors2.Error{ - Code: errors2.ENotFound, - Msg: "bucket not found", - }, - dbrpMappings: []*influxdb.DBRPMapping{{ - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.Create(ctx, tt.args.dbrpMapping) - if (err != nil) != (tt.wants.err != nil) { - t.Errorf("expected error '%v' got '%v'", tt.wants.err, err) - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Errorf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - } - } - - dbrpMappings, n, err := s.FindMany(ctx, influxdb.DBRPMappingFilter{}) - if err != nil { - t.Fatalf("failed to retrieve dbrps: %v", err) - } - if n != len(tt.wants.dbrpMappings) { - t.Errorf("want dbrpMappings count of %d, got %d", len(tt.wants.dbrpMappings), n) - } - if diff := cmp.Diff(tt.wants.dbrpMappings, dbrpMappings, DBRPMappingCmpOptions...); diff != "" { - t.Errorf("dbrpMappings are different -want/+got\ndiff %s", diff) - } - }) - } -} - -func FindManyDBRPMappingsV2( - init func(DBRPMappingFields, *testing.T) (influxdb.DBRPMappingService, func()), - t *testing.T, -) { - type args struct { - filter influxdb.DBRPMappingFilter - } - - type wants struct { - dbrpMappings []*influxdb.DBRPMapping - err error - } - tests := []struct { - name string - fields DBRPMappingFields - args args - wants wants - }{ - { - name: "find all dbrps", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 200, - Database: "database2", - RetentionPolicy: "retention_policy2", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: MustIDBase16(dbrpBucket2ID), - }, - }, - }, - args: args{ - filter: influxdb.DBRPMappingFilter{}, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 200, - Database: "database2", - RetentionPolicy: "retention_policy2", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: MustIDBase16(dbrpBucket2ID), - }, - }, - }, - }, - { - name: "find by ID", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: MustIDBase16("1111111111111111"), - Database: "database1", - RetentionPolicy: "retention_policyA", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - { - ID: MustIDBase16("2222222222222222"), - Database: "database2", - RetentionPolicy: "retention_policyB", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - }, - }, - args: args{ - filter: influxdb.DBRPMappingFilter{ - ID: MustIDBase16Ptr("1111111111111111"), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{ - { - ID: MustIDBase16("1111111111111111"), - Database: "database1", - RetentionPolicy: "retention_policyA", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - }, - }, - }, - { - name: "find by bucket ID", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policyA", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - { - ID: 200, - Database: "database2", - RetentionPolicy: "retention_policyB", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - }, - }, - args: args{ - filter: influxdb.DBRPMappingFilter{ - BucketID: MustIDBase16Ptr(dbrpBucketBID), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{ - { - ID: 200, - Database: "database2", - RetentionPolicy: "retention_policyB", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - }, - }, - }, - { - name: "find by orgID", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database", - RetentionPolicy: "retention_policyA", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - { - ID: 200, - Database: "database", - RetentionPolicy: "retention_policyB", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - { - ID: 300, - Database: "database1", - RetentionPolicy: "retention_policyA", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - { - ID: 400, - Database: "database1", - RetentionPolicy: "retention_policyB", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - { - ID: 500, - Database: "database2", - RetentionPolicy: "retention_policyA", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - }, - }, - args: args{ - filter: influxdb.DBRPMappingFilter{ - OrgID: MustIDBase16Ptr(dbrpOrg3ID), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{ - { - ID: 300, - Database: "database1", - RetentionPolicy: "retention_policyA", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - { - ID: 400, - Database: "database1", - RetentionPolicy: "retention_policyB", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - { - ID: 500, - Database: "database2", - RetentionPolicy: "retention_policyA", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - }, - }, - }, - { - name: "find virtual by orgID", - fields: DBRPMappingFields{ - BucketSvc: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - if id == MustIDBase16(dbrpBucket2ID) { - return nil, &errors2.Error{ - Code: errors2.ENotFound, - Msg: "bucket not found", - } - } - return nil, nil - }, - FindBucketsFn: func(ctx context.Context, bf influxdb.BucketFilter, fo ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - return []*influxdb.Bucket{ - // org 3 - {ID: 100, Name: "testdb", OrgID: MustIDBase16(dbrpOrg3ID)}, - {ID: 200, Name: "testdb2/testrp2", OrgID: MustIDBase16(dbrpOrg3ID)}, - // org 2 - {ID: 300, Name: "testdb3", OrgID: MustIDBase16(dbrpOrg2ID)}, - {ID: 400, Name: "testdb4/testrp4", OrgID: MustIDBase16(dbrpOrg2ID)}, - }, 0, nil - }}, - DBRPMappingsV2: []*influxdb.DBRPMapping{}, - }, - args: args{ - filter: influxdb.DBRPMappingFilter{ - OrgID: MustIDBase16Ptr(dbrpOrg3ID), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "testdb", - RetentionPolicy: "autogen", - Default: true, - Virtual: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: 100, - }, - { - ID: 200, - Database: "testdb2", - RetentionPolicy: "testrp2", - Default: false, - Virtual: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: 200, - }, - }, - }, - }, - { - name: "find virtual by database", - fields: DBRPMappingFields{ - BucketSvc: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - if id == MustIDBase16(dbrpBucket2ID) { - return nil, &errors2.Error{ - Code: errors2.ENotFound, - Msg: "bucket not found", - } - } - return nil, nil - }, - FindBucketsFn: func(ctx context.Context, bf influxdb.BucketFilter, fo ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - return []*influxdb.Bucket{ - // org 3 - {ID: 100, Name: "testdb", OrgID: MustIDBase16(dbrpOrg3ID)}, - {ID: 200, Name: "testdb2/testrp2", OrgID: MustIDBase16(dbrpOrg3ID)}, - // org 2 - {ID: 300, Name: "testdb3", OrgID: MustIDBase16(dbrpOrg2ID)}, - {ID: 400, Name: "testdb4/testrp4", OrgID: MustIDBase16(dbrpOrg2ID)}, - {ID: 500, Name: "testdb4", OrgID: MustIDBase16(dbrpOrg2ID)}, - }, 0, nil - }}, - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 500, - Database: "testdb4", - RetentionPolicy: "autogen", - Default: true, - Virtual: false, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: 500, - }, - }, - }, - args: args{ - filter: influxdb.DBRPMappingFilter{ - Database: strPtr("testdb4"), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{ - { - ID: 500, - Database: "testdb4", - RetentionPolicy: "autogen", - Default: true, - Virtual: false, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: 500, - }, - { - ID: 400, - Database: "testdb4", - RetentionPolicy: "testrp4", - Default: false, - Virtual: true, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: 400, - }, - }, - }, - }, - { - name: "find virtual by database autogen", - fields: DBRPMappingFields{ - BucketSvc: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - if id == MustIDBase16(dbrpBucket2ID) { - return nil, &errors2.Error{ - Code: errors2.ENotFound, - Msg: "bucket not found", - } - } - return nil, nil - }, - FindBucketsFn: func(ctx context.Context, bf influxdb.BucketFilter, fo ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - return []*influxdb.Bucket{ - // org 3 - {ID: 100, Name: "testdb", OrgID: MustIDBase16(dbrpOrg3ID)}, - {ID: 200, Name: "testdb2/testrp2", OrgID: MustIDBase16(dbrpOrg3ID)}, - // org 2 - {ID: 300, Name: "testdb3", OrgID: MustIDBase16(dbrpOrg2ID)}, - {ID: 400, Name: "testdb4/testrp4", OrgID: MustIDBase16(dbrpOrg2ID)}, - }, 0, nil - }}, - DBRPMappingsV2: []*influxdb.DBRPMapping{}, - }, - args: args{ - filter: influxdb.DBRPMappingFilter{ - Database: strPtr("testdb"), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "testdb", - RetentionPolicy: "autogen", - Default: true, - Virtual: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: 100, - }, - }, - }, - }, - { - name: "find virtual by rp", - fields: DBRPMappingFields{ - BucketSvc: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - if id == MustIDBase16(dbrpBucket2ID) { - return nil, &errors2.Error{ - Code: errors2.ENotFound, - Msg: "bucket not found", - } - } - return nil, nil - }, - FindBucketsFn: func(ctx context.Context, bf influxdb.BucketFilter, fo ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - return []*influxdb.Bucket{ - // org 3 - {ID: 100, Name: "testdb", OrgID: MustIDBase16(dbrpOrg3ID)}, - {ID: 200, Name: "testdb2/testrp2", OrgID: MustIDBase16(dbrpOrg3ID)}, - // org 2 - {ID: 300, Name: "testdb3", OrgID: MustIDBase16(dbrpOrg2ID)}, - {ID: 400, Name: "testdb4/testrp4", OrgID: MustIDBase16(dbrpOrg2ID)}, - }, 0, nil - }}, - DBRPMappingsV2: []*influxdb.DBRPMapping{}, - }, - args: args{ - filter: influxdb.DBRPMappingFilter{ - RetentionPolicy: stringPtr("autogen"), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "testdb", - RetentionPolicy: "autogen", - Default: true, - Virtual: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: 100, - }, - { - ID: 300, - Database: "testdb3", - RetentionPolicy: "autogen", - Default: true, - Virtual: true, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: 300, - }, - }, - }, - }, - { - name: "find by db", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policyA", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - { - ID: 200, - Database: "database2", - RetentionPolicy: "retention_policyB", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - }, - }, - args: args{ - filter: influxdb.DBRPMappingFilter{ - Database: stringPtr("database1"), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policyA", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - }, - }, - }, - { - name: "find by rp", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database", - RetentionPolicy: "retention_policyA", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - { - ID: 200, - Database: "database", - RetentionPolicy: "retention_policyB", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - }, - }, - args: args{ - filter: influxdb.DBRPMappingFilter{ - RetentionPolicy: stringPtr("retention_policyB"), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{ - { - ID: 200, - Database: "database", - RetentionPolicy: "retention_policyB", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - }, - }, - }, - { - name: "find by default", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database", - RetentionPolicy: "retention_policyA", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - { - ID: 200, - Database: "database", - RetentionPolicy: "retention_policyB", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - }, - }, - args: args{ - filter: influxdb.DBRPMappingFilter{ - Default: boolPtr(true), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{ - { - ID: 200, - Database: "database", - RetentionPolicy: "retention_policyB", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - }, - }, - }, - { - name: "find default", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: MustIDBase16("0000000000000100"), - Database: "database", - RetentionPolicy: "retention_policyA", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - { - ID: MustIDBase16("0000000000000200"), - Database: "database", - RetentionPolicy: "retention_policyB", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - { - ID: MustIDBase16("0000000000000300"), - Database: "database", - RetentionPolicy: "retention_policyB", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - }, - }, - args: args{ - filter: influxdb.DBRPMappingFilter{ - OrgID: MustIDBase16Ptr(dbrpOrg3ID), - Database: stringPtr("database"), - Default: boolPtr(true), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{ - { - ID: MustIDBase16("0000000000000200"), - Database: "database", - RetentionPolicy: "retention_policyB", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - }, - }, - }, - { - name: "mixed", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policyA", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - { - ID: 200, - Database: "database2", - RetentionPolicy: "retention_policyA", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - { - ID: 300, - Database: "database1", - RetentionPolicy: "retention_policyA", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - // This one will substitute 200 as default for "database2". - { - ID: 400, - Database: "database2", - RetentionPolicy: "retention_policyB", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - }, - }, - args: args{ - filter: influxdb.DBRPMappingFilter{ - RetentionPolicy: stringPtr("retention_policyA"), - Default: boolPtr(true), - OrgID: MustIDBase16Ptr(dbrpOrg3ID), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policyA", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - }, - }, - }, - { - name: "not found", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - Database: "database1", - RetentionPolicy: "retention_policyA", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - { - Database: "database1", - RetentionPolicy: "retention_policyB", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - { - Database: "database1", - RetentionPolicy: "retention_policyA", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - { - Database: "database2", - RetentionPolicy: "retention_policyB", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketBID), - }, - }, - }, - args: args{ - filter: influxdb.DBRPMappingFilter{ - Database: stringPtr("database1"), - RetentionPolicy: stringPtr("retention_policyC"), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - dbrpMappings, _, err := s.FindMany(ctx, tt.args.filter) - if (err != nil) != (tt.wants.err != nil) { - t.Fatalf("expected errors to be equal '%v' got '%v'", tt.wants.err, err) - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - } - - if diff := cmp.Diff(dbrpMappings, tt.wants.dbrpMappings, DBRPMappingCmpOptions...); diff != "" { - t.Errorf("dbrpMappings are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func FindDBRPMappingByIDV2( - init func(DBRPMappingFields, *testing.T) (influxdb.DBRPMappingService, func()), - t *testing.T, -) { - type args struct { - OrgID platform.ID - ID platform.ID - } - - type wants struct { - dbrpMapping *influxdb.DBRPMapping - err error - } - - tests := []struct { - name string - fields DBRPMappingFields - args args - wants wants - }{ - { - name: "find existing dbrp", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database", - RetentionPolicy: "retention_policyA", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - { - ID: 200, - Database: "database", - RetentionPolicy: "retention_policyB", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - { - ID: 300, - Database: "database", - RetentionPolicy: "retention_policyC", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - }, - }, - args: args{ - OrgID: MustIDBase16(dbrpOrg3ID), - ID: 200, - }, - wants: wants{ - dbrpMapping: &influxdb.DBRPMapping{ - ID: 200, - Database: "database", - RetentionPolicy: "retention_policyB", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - }, - }, - { - name: "find non existing dbrp", - fields: DBRPMappingFields{ - BucketSvc: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - if id == MustIDBase16(dbrpBucketAID) { - return &influxdb.Bucket{ID: id}, nil - } - return nil, &errors2.Error{ - Code: errors2.ENotFound, - Msg: "bucket not found", - } - }, - FindBucketsFn: func(ctx context.Context, bf influxdb.BucketFilter, fo ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - return []*influxdb.Bucket{}, 0, nil - }}, - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database", - RetentionPolicy: "retention_policyA", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - }, - }, - args: args{ - OrgID: MustIDBase16(dbrpOrg3ID), - ID: 200, - }, - wants: wants{ - err: dbrp.ErrDBRPNotFound, - }, - }, - { - name: "find virtual dbrp with slash", - fields: DBRPMappingFields{ - BucketSvc: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - if id == MustIDBase16(dbrpBucketAID) { - return &influxdb.Bucket{ID: id, Name: "testdb/testrp", OrgID: MustIDBase16(dbrpOrg3ID)}, nil - } - return nil, &errors2.Error{ - Code: errors2.ENotFound, - Msg: "bucket not found", - } - }, - FindBucketsFn: func(ctx context.Context, bf influxdb.BucketFilter, fo ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - return []*influxdb.Bucket{}, 0, nil - }}, - DBRPMappingsV2: []*influxdb.DBRPMapping{}, - }, - args: args{ - OrgID: MustIDBase16(dbrpOrg3ID), - ID: MustIDBase16(dbrpBucketAID), - }, - wants: wants{ - dbrpMapping: &influxdb.DBRPMapping{ - ID: MustIDBase16(dbrpBucketAID), - Database: "testdb", - RetentionPolicy: "testrp", - Default: false, - Virtual: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - }, - }, - { - name: "find virtual dbrp", - fields: DBRPMappingFields{ - BucketSvc: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - if id == MustIDBase16(dbrpBucketAID) { - return &influxdb.Bucket{ID: id, Name: "testdb", OrgID: MustIDBase16(dbrpOrg3ID)}, nil - } - return nil, &errors2.Error{ - Code: errors2.ENotFound, - Msg: "bucket not found", - } - }, - FindBucketsFn: func(ctx context.Context, bf influxdb.BucketFilter, fo ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - return []*influxdb.Bucket{}, 0, nil - }}, - DBRPMappingsV2: []*influxdb.DBRPMapping{}, - }, - args: args{ - OrgID: MustIDBase16(dbrpOrg3ID), - ID: MustIDBase16(dbrpBucketAID), - }, - wants: wants{ - dbrpMapping: &influxdb.DBRPMapping{ - ID: MustIDBase16(dbrpBucketAID), - Database: "testdb", - RetentionPolicy: "autogen", - Default: true, - Virtual: true, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - }, - }, - { - name: "find existing dbrp but wrong orgID", - fields: DBRPMappingFields{ - BucketSvc: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - if id == MustIDBase16(dbrpBucketAID) { - return &influxdb.Bucket{ID: id}, nil - } - return nil, &errors2.Error{ - Code: errors2.ENotFound, - Msg: "bucket not found", - } - }, - FindBucketsFn: func(ctx context.Context, bf influxdb.BucketFilter, fo ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - return []*influxdb.Bucket{}, 0, nil - }}, - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database", - RetentionPolicy: "retention_policyA", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg3ID), - BucketID: MustIDBase16(dbrpBucketAID), - }, - }, - }, - args: args{ - OrgID: MustIDBase16(dbrpOrg2ID), - ID: 100, - }, - wants: wants{ - err: dbrp.ErrDBRPNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - dbrpMapping, err := s.FindByID(ctx, tt.args.OrgID, tt.args.ID) - if (err != nil) != (tt.wants.err != nil) { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Fatalf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - } - } - - if diff := cmp.Diff(dbrpMapping, tt.wants.dbrpMapping, DBRPMappingCmpOptions...); diff != "" { - t.Errorf("dbrpMappings are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func UpdateDBRPMappingV2( - init func(DBRPMappingFields, *testing.T) (influxdb.DBRPMappingService, func()), - t *testing.T, -) { - type args struct { - dbrpMapping *influxdb.DBRPMapping - } - type wants struct { - err error - dbrpMappings []*influxdb.DBRPMapping - } - - tests := []struct { - name string - fields DBRPMappingFields - args args - wants wants - }{ - { - name: "basic update", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - }, - args: args{ - dbrpMapping: &influxdb.DBRPMapping{ - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy2", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{{ - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy2", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }}, - }, - }, - { - name: "update invalid dbrp", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{{ - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }}, - }, - args: args{ - dbrpMapping: &influxdb.DBRPMapping{ - ID: 100, - Database: "./", // invalid db name. - RetentionPolicy: "retention_policy2", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket2ID), - }, - }, - wants: wants{ - err: dbrp.ErrInvalidDBRP(fmt.Errorf("database must contain at least one character and only be letters, numbers, '_', '-', and '.'")), - dbrpMappings: []*influxdb.DBRPMapping{{ - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }}, - }, - }, - { - name: "error dbrp not found", - fields: DBRPMappingFields{ - BucketSvc: &mock.BucketService{ - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - if id == MustIDBase16(dbrpBucket1ID) { - return &influxdb.Bucket{ID: id}, nil - } - return nil, &errors2.Error{ - Code: errors2.ENotFound, - Msg: "bucket not found", - } - }, - FindBucketsFn: func(ctx context.Context, bf influxdb.BucketFilter, fo ...influxdb.FindOptions) ([]*influxdb.Bucket, int, error) { - return []*influxdb.Bucket{}, 0, nil - }}, - DBRPMappingsV2: []*influxdb.DBRPMapping{{ - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }}, - }, - args: args{ - dbrpMapping: &influxdb.DBRPMapping{ - ID: 200, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket2ID), - }, - }, - wants: wants{ - err: dbrp.ErrDBRPNotFound, - dbrpMappings: []*influxdb.DBRPMapping{{ - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }}, - }, - }, - { - name: "update unchangeable fields", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{{ - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }}, - }, - args: args{ - dbrpMapping: &influxdb.DBRPMapping{ - ID: 100, - Database: "wont_change", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket2ID), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{{ - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }}, - }, - }, - { - name: "update to same orgID, db, and rp", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 200, - Database: "database1", - RetentionPolicy: "retention_policy2", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - }, - args: args{ - dbrpMapping: &influxdb.DBRPMapping{ - ID: 200, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - wants: wants{ - err: dbrp.ErrDBRPAlreadyExists("another DBRP mapping with same orgID, db, and rp exists"), - dbrpMappings: []*influxdb.DBRPMapping{{ - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 200, - Database: "database1", - RetentionPolicy: "retention_policy2", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - }, - }, - { - name: "update default when only one dbrp is present", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{{ - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }}, - }, - args: args{ - dbrpMapping: &influxdb.DBRPMapping{ - ID: 100, - Database: "wont_change", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket2ID), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{{ - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }}, - }, - }, - { - name: "set default when more dbrps are present", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 200, - Database: "database1", - RetentionPolicy: "retention_policy2", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 300, - Database: "database1", - RetentionPolicy: "retention_policy3", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - }, - args: args{ - dbrpMapping: &influxdb.DBRPMapping{ - ID: 200, - Database: "database1", - RetentionPolicy: "retention_policy2", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 200, - Database: "database1", - RetentionPolicy: "retention_policy2", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 300, - Database: "database1", - RetentionPolicy: "retention_policy3", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - }, - }, - { - name: "unset default when more dbrps are present", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 200, - Database: "database1", - RetentionPolicy: "retention_policy2", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 300, - Database: "database1", - RetentionPolicy: "retention_policy3", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - }, - args: args{ - dbrpMapping: &influxdb.DBRPMapping{ - ID: 300, - Database: "database1", - RetentionPolicy: "retention_policy3", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 200, - Database: "database1", - RetentionPolicy: "retention_policy2", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 300, - Database: "database1", - RetentionPolicy: "retention_policy3", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.Update(ctx, tt.args.dbrpMapping) - if (err != nil) != (tt.wants.err != nil) { - t.Errorf("expected error '%v' got '%v'", tt.wants.err, err) - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Errorf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - } - } - - dbrpMappings, _, err := s.FindMany(ctx, influxdb.DBRPMappingFilter{}) - if err != nil { - t.Fatalf("failed to retrieve dbrps: %v", err) - } - if diff := cmp.Diff(dbrpMappings, tt.wants.dbrpMappings, DBRPMappingCmpOptions...); diff != "" { - t.Errorf("dbrpMappings are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func DeleteDBRPMappingV2( - init func(DBRPMappingFields, *testing.T) (influxdb.DBRPMappingService, func()), - t *testing.T, -) { - type args struct { - OrgID platform.ID - ID platform.ID - } - type wants struct { - err error - dbrpMappings []*influxdb.DBRPMapping - } - - tests := []struct { - name string - fields DBRPMappingFields - args args - wants wants - }{ - { - name: "delete existing dbrp", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 200, - Database: "database2", - RetentionPolicy: "retention_policy2", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: MustIDBase16(dbrpBucket2ID), - }, - }, - }, - args: args{ - OrgID: MustIDBase16(dbrpOrg1ID), - ID: 100, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{{ - ID: 200, - Database: "database2", - RetentionPolicy: "retention_policy2", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: MustIDBase16(dbrpBucket2ID), - }}, - }, - }, - { - name: "delete default dbrp", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 200, - Database: "database1", - RetentionPolicy: "retention_policy2", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 300, - Database: "database1", - RetentionPolicy: "retention_policy3", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 400, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 500, - Database: "database1", - RetentionPolicy: "retention_policy2", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - }, - args: args{ - OrgID: MustIDBase16(dbrpOrg1ID), - ID: 200, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{ - // The first one becomes the default one. - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 300, - Database: "database1", - RetentionPolicy: "retention_policy3", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 400, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 500, - Database: "database1", - RetentionPolicy: "retention_policy2", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - }, - }, - }, - { - name: "delete non-existing dbrp", - fields: DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 200, - Database: "database2", - RetentionPolicy: "retention_policy2", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: MustIDBase16(dbrpBucket2ID), - }, - }, - }, - args: args{ - OrgID: MustIDBase16(dbrpOrg2ID), - ID: 150, - }, - wants: wants{ - dbrpMappings: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 200, - Database: "database2", - RetentionPolicy: "retention_policy2", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: MustIDBase16(dbrpBucket2ID), - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.Delete(ctx, tt.args.OrgID, tt.args.ID) - if (err != nil) != (tt.wants.err != nil) { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Fatalf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - } - } - - filter := influxdb.DBRPMappingFilter{} - dbrpMappings, _, err := s.FindMany(ctx, filter) - if err != nil { - t.Fatalf("failed to retrieve dbrps: %v", err) - } - if diff := cmp.Diff(dbrpMappings, tt.wants.dbrpMappings, DBRPMappingCmpOptions...); diff != "" { - t.Errorf("dbrpMappings are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func MiscDBRPMappingV2( - init func(DBRPMappingFields, *testing.T) (influxdb.DBRPMappingService, func()), - t *testing.T, -) { - fields := DBRPMappingFields{ - DBRPMappingsV2: []*influxdb.DBRPMapping{ - { - ID: 100, - Database: "database1", - RetentionPolicy: "retention_policy1", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg1ID), - BucketID: MustIDBase16(dbrpBucket1ID), - }, - { - ID: 200, - Database: "database2", - RetentionPolicy: "retention_policy2", - Default: true, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: MustIDBase16(dbrpBucket2ID), - }, - }, - } - s, done := init(fields, t) - defer done() - ctx := context.Background() - - t.Run("defaults are ok", func(t *testing.T) { - if !fields.DBRPMappingsV2[0].Default || !fields.DBRPMappingsV2[1].Default { - t.Errorf("should be default") - } - }) - - t.Run("what is inited is present", func(t *testing.T) { - filter := influxdb.DBRPMappingFilter{} - dbrpMappings, _, err := s.FindMany(ctx, filter) - if err != nil { - t.Fatalf("failed to retrieve dbrps: %v", err) - } - if diff := cmp.Diff(dbrpMappings, fields.DBRPMappingsV2, DBRPMappingCmpOptions...); diff != "" { - t.Errorf("dbrpMappings are different -got/+want\ndiff %s", diff) - } - }) - - t.Run("delete works", func(t *testing.T) { - err := s.Delete(ctx, fields.DBRPMappingsV2[0].OrganizationID, fields.DBRPMappingsV2[0].ID) - if err != nil { - t.Fatalf("failed to delete: %v", err) - } - err = s.Delete(ctx, fields.DBRPMappingsV2[1].OrganizationID, fields.DBRPMappingsV2[1].ID) - if err != nil { - t.Fatalf("failed to delete: %v", err) - } - }) - - t.Run("nothing left", func(t *testing.T) { - filter := influxdb.DBRPMappingFilter{} - dbrpMappings, _, err := s.FindMany(ctx, filter) - if err != nil { - t.Fatalf("failed to retrieve dbrps: %v", err) - } - if diff := cmp.Diff(dbrpMappings, []*influxdb.DBRPMapping{}, DBRPMappingCmpOptions...); diff != "" { - t.Errorf("dbrpMappings are different -got/+want\ndiff %s", diff) - } - }) - - t.Run("new one is still ok", func(t *testing.T) { - m := &influxdb.DBRPMapping{ - ID: 300, - Database: "database2", - RetentionPolicy: "retention_policy2", - Default: false, - OrganizationID: MustIDBase16(dbrpOrg2ID), - BucketID: MustIDBase16(dbrpBucket2ID), - } - if err := s.Create(ctx, m); err != nil { - t.Fatalf("failed to create: %v", err) - } - got, err := s.FindByID(ctx, m.OrganizationID, m.ID) - if err != nil { - t.Fatalf("failed to retrieve dbrp: %v", err) - } - if diff := cmp.Diff(m, got, DBRPMappingCmpOptions...); diff != "" { - t.Errorf("dbrpMappings are different -got/+want\ndiff %s", diff) - } - if !m.Default { - t.Errorf("should be default") - } - }) -} diff --git a/testing/id.go b/testing/id.go deleted file mode 100644 index 6d0da503e86..00000000000 --- a/testing/id.go +++ /dev/null @@ -1,8 +0,0 @@ -package testing - -import ( - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// IDPtr returns a pointer to an influxdb.ID. -func IDPtr(id platform.ID) *platform.ID { return &id } diff --git a/testing/index.go b/testing/index.go deleted file mode 100644 index 69b8d348a72..00000000000 --- a/testing/index.go +++ /dev/null @@ -1,422 +0,0 @@ -package testing - -import ( - "context" - "encoding/json" - "fmt" - "reflect" - "sort" - "testing" - - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration" -) - -const ( - someResourceBucket = "aresource" -) - -var ( - mapping = kv.NewIndexMapping([]byte(someResourceBucket), []byte("aresourcebyowneridv1"), func(body []byte) ([]byte, error) { - var resource someResource - if err := json.Unmarshal(body, &resource); err != nil { - return nil, err - } - - return []byte(resource.OwnerID), nil - }) -) - -type someResource struct { - ID string - OwnerID string -} - -type someResourceStore struct { - store kv.Store - ownerIDIndex *kv.Index -} - -type tester interface { - Helper() - Fatal(...interface{}) -} - -func newSomeResourceStore(t tester, ctx context.Context, store kv.SchemaStore) *someResourceStore { - t.Helper() - - if err := migration.CreateBuckets("create the aresource bucket", []byte(someResourceBucket)).Up(ctx, store); err != nil { - t.Fatal(err) - } - - if err := kv.NewIndexMigration(mapping).Up(ctx, store); err != nil { - t.Fatal(err) - } - - return &someResourceStore{ - store: store, - ownerIDIndex: kv.NewIndex(mapping), - } -} - -func (s *someResourceStore) FindByOwner(ctx context.Context, ownerID string) (resources []someResource, err error) { - err = s.store.View(ctx, func(tx kv.Tx) error { - return s.ownerIDIndex.Walk(ctx, tx, []byte(ownerID), func(k, v []byte) (bool, error) { - var resource someResource - if err := json.Unmarshal(v, &resource); err != nil { - return false, err - } - - resources = append(resources, resource) - return true, nil - }) - }) - return -} - -func (s *someResourceStore) Create(ctx context.Context, resource someResource, index bool) error { - return s.store.Update(ctx, func(tx kv.Tx) error { - bkt, err := tx.Bucket(mapping.SourceBucket()) - if err != nil { - return err - } - - if index { - if err := s.ownerIDIndex.Insert(tx, []byte(resource.OwnerID), []byte(resource.ID)); err != nil { - return err - } - } - - data, err := json.Marshal(resource) - if err != nil { - return err - } - - return bkt.Put([]byte(resource.ID), data) - }) -} - -func newResource(id, owner string) someResource { - return someResource{ID: id, OwnerID: owner} -} - -func newNResources(n int) (resources []someResource) { - return newNResourcesWithUserCount(n, 5) -} - -func newNResourcesWithUserCount(n, userCount int) (resources []someResource) { - for i := 0; i < n; i++ { - var ( - id = fmt.Sprintf("resource %d", i) - owner = fmt.Sprintf("owner %d", i%userCount) - ) - resources = append(resources, newResource(id, owner)) - } - return -} - -func TestIndex(t *testing.T, store kv.SchemaStore) { - t.Run("Test_PopulateAndVerify", func(t *testing.T) { - testPopulateAndVerify(t, store) - }) - - t.Run("Test_Walk", func(t *testing.T) { - testWalk(t, store) - }) -} - -func testPopulateAndVerify(t *testing.T, store kv.SchemaStore) { - var ( - ctx = context.TODO() - resources = newNResources(20) - resourceStore = newSomeResourceStore(t, ctx, store) - ) - - // insert 20 resources, but only index the first half - for i, resource := range resources { - if err := resourceStore.Create(ctx, resource, i < len(resources)/2); err != nil { - t.Fatal(err) - } - } - - // check that the index is populated with only 10 items - var count int - store.View(ctx, func(tx kv.Tx) error { - kvs, err := allKVs(tx, mapping.IndexBucket()) - if err != nil { - return err - } - - count = len(kvs) - - return nil - }) - - if count > 10 { - t.Errorf("expected index to be empty, found %d items", count) - } - - // ensure verify identifies the 10 missing items from the index - diff, err := resourceStore.ownerIDIndex.Verify(ctx, store) - if err != nil { - t.Fatal(err) - } - - expected := kv.IndexDiff{ - PresentInIndex: map[string]map[string]struct{}{ - "owner 0": {"resource 0": {}, "resource 5": {}}, - "owner 1": {"resource 1": {}, "resource 6": {}}, - "owner 2": {"resource 2": {}, "resource 7": {}}, - "owner 3": {"resource 3": {}, "resource 8": {}}, - "owner 4": {"resource 4": {}, "resource 9": {}}, - }, - MissingFromIndex: map[string]map[string]struct{}{ - "owner 0": {"resource 10": {}, "resource 15": {}}, - "owner 1": {"resource 11": {}, "resource 16": {}}, - "owner 2": {"resource 12": {}, "resource 17": {}}, - "owner 3": {"resource 13": {}, "resource 18": {}}, - "owner 4": {"resource 14": {}, "resource 19": {}}, - }, - } - - if !reflect.DeepEqual(expected, diff) { - t.Errorf("expected %#v, found %#v", expected, diff) - } - - corrupt := diff.Corrupt() - sort.Strings(corrupt) - - if expected := []string{ - "owner 0", - "owner 1", - "owner 2", - "owner 3", - "owner 4", - }; !reflect.DeepEqual(expected, corrupt) { - t.Errorf("expected %#v, found %#v\n", expected, corrupt) - } - - // populate the missing indexes - - if err = kv.NewIndexMigration(mapping).Up(ctx, store); err != nil { - t.Errorf("unexpected err %v", err) - } - - // check the contents of the index - var allKvs [][2][]byte - store.View(ctx, func(tx kv.Tx) (err error) { - allKvs, err = allKVs(tx, mapping.IndexBucket()) - return - }) - - if expected := [][2][]byte{ - {[]byte("owner 0/resource 0"), []byte("resource 0")}, - {[]byte("owner 0/resource 10"), []byte("resource 10")}, - {[]byte("owner 0/resource 15"), []byte("resource 15")}, - {[]byte("owner 0/resource 5"), []byte("resource 5")}, - {[]byte("owner 1/resource 1"), []byte("resource 1")}, - {[]byte("owner 1/resource 11"), []byte("resource 11")}, - {[]byte("owner 1/resource 16"), []byte("resource 16")}, - {[]byte("owner 1/resource 6"), []byte("resource 6")}, - {[]byte("owner 2/resource 12"), []byte("resource 12")}, - {[]byte("owner 2/resource 17"), []byte("resource 17")}, - {[]byte("owner 2/resource 2"), []byte("resource 2")}, - {[]byte("owner 2/resource 7"), []byte("resource 7")}, - {[]byte("owner 3/resource 13"), []byte("resource 13")}, - {[]byte("owner 3/resource 18"), []byte("resource 18")}, - {[]byte("owner 3/resource 3"), []byte("resource 3")}, - {[]byte("owner 3/resource 8"), []byte("resource 8")}, - {[]byte("owner 4/resource 14"), []byte("resource 14")}, - {[]byte("owner 4/resource 19"), []byte("resource 19")}, - {[]byte("owner 4/resource 4"), []byte("resource 4")}, - {[]byte("owner 4/resource 9"), []byte("resource 9")}, - }; !reflect.DeepEqual(allKvs, expected) { - t.Errorf("expected %#v, found %#v", expected, allKvs) - } - - // remove the last 10 items from the source, but leave them in the index - store.Update(ctx, func(tx kv.Tx) error { - bkt, err := tx.Bucket(mapping.SourceBucket()) - if err != nil { - t.Fatal(err) - } - - for _, resource := range resources[10:] { - bkt.Delete([]byte(resource.ID)) - } - - return nil - }) - - // ensure verify identifies the last 10 items as missing from the source - diff, err = resourceStore.ownerIDIndex.Verify(ctx, store) - if err != nil { - t.Fatal(err) - } - - expected = kv.IndexDiff{ - PresentInIndex: map[string]map[string]struct{}{ - "owner 0": {"resource 0": {}, "resource 5": {}, "resource 10": {}, "resource 15": {}}, - "owner 1": {"resource 1": {}, "resource 6": {}, "resource 11": {}, "resource 16": {}}, - "owner 2": {"resource 2": {}, "resource 7": {}, "resource 12": {}, "resource 17": {}}, - "owner 3": {"resource 3": {}, "resource 8": {}, "resource 13": {}, "resource 18": {}}, - "owner 4": {"resource 4": {}, "resource 9": {}, "resource 14": {}, "resource 19": {}}, - }, - MissingFromSource: map[string]map[string]struct{}{ - "owner 0": {"resource 10": {}, "resource 15": {}}, - "owner 1": {"resource 11": {}, "resource 16": {}}, - "owner 2": {"resource 12": {}, "resource 17": {}}, - "owner 3": {"resource 13": {}, "resource 18": {}}, - "owner 4": {"resource 14": {}, "resource 19": {}}, - }, - } - if !reflect.DeepEqual(expected, diff) { - t.Errorf("expected %#v, found %#v", expected, diff) - } -} - -func testWalk(t *testing.T, store kv.SchemaStore) { - var ( - ctx = context.TODO() - resources = newNResources(20) - // configure resource store with read disabled - resourceStore = newSomeResourceStore(t, ctx, store) - - cases = []struct { - owner string - resources []someResource - }{ - { - owner: "owner 0", - resources: []someResource{ - newResource("resource 0", "owner 0"), - newResource("resource 10", "owner 0"), - newResource("resource 15", "owner 0"), - newResource("resource 5", "owner 0"), - }, - }, - { - owner: "owner 1", - resources: []someResource{ - newResource("resource 1", "owner 1"), - newResource("resource 11", "owner 1"), - newResource("resource 16", "owner 1"), - newResource("resource 6", "owner 1"), - }, - }, - { - owner: "owner 2", - resources: []someResource{ - newResource("resource 12", "owner 2"), - newResource("resource 17", "owner 2"), - newResource("resource 2", "owner 2"), - newResource("resource 7", "owner 2"), - }, - }, - { - owner: "owner 3", - resources: []someResource{ - newResource("resource 13", "owner 3"), - newResource("resource 18", "owner 3"), - newResource("resource 3", "owner 3"), - newResource("resource 8", "owner 3"), - }, - }, - { - owner: "owner 4", - resources: []someResource{ - newResource("resource 14", "owner 4"), - newResource("resource 19", "owner 4"), - newResource("resource 4", "owner 4"), - newResource("resource 9", "owner 4"), - }, - }, - } - ) - - // insert all 20 resources with indexing enabled - for _, resource := range resources { - if err := resourceStore.Create(ctx, resource, true); err != nil { - t.Fatal(err) - } - } - - for _, testCase := range cases { - found, err := resourceStore.FindByOwner(ctx, testCase.owner) - if err != nil { - t.Fatal(err) - } - - // expect resources to be empty while read path disabled disabled - if len(found) > 0 { - t.Fatalf("expected %#v to be empty", found) - } - } - - // configure index read path enabled - kv.WithIndexReadPathEnabled(resourceStore.ownerIDIndex) - - for _, testCase := range cases { - found, err := resourceStore.FindByOwner(ctx, testCase.owner) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(found, testCase.resources) { - t.Errorf("expected %#v, found %#v", testCase.resources, found) - } - } -} - -func allKVs(tx kv.Tx, bucket []byte) (kvs [][2][]byte, err error) { - idx, err := tx.Bucket(mapping.IndexBucket()) - if err != nil { - return - } - - cursor, err := idx.ForwardCursor(nil) - if err != nil { - return - } - - defer func() { - if cerr := cursor.Close(); cerr != nil && err == nil { - err = cerr - } - }() - - for k, v := cursor.Next(); k != nil; k, v = cursor.Next() { - kvs = append(kvs, [2][]byte{k, v}) - } - - return kvs, cursor.Err() -} - -func BenchmarkIndexWalk(b *testing.B, store kv.SchemaStore, resourceCount, fetchCount int) { - var ( - ctx = context.TODO() - resourceStore = newSomeResourceStore(b, ctx, store) - userCount = resourceCount / fetchCount - resources = newNResourcesWithUserCount(resourceCount, userCount) - ) - - kv.WithIndexReadPathEnabled(resourceStore.ownerIDIndex) - - for _, resource := range resources { - resourceStore.Create(ctx, resource, true) - } - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - store.View(ctx, func(tx kv.Tx) error { - return resourceStore.ownerIDIndex.Walk(ctx, tx, []byte(fmt.Sprintf("owner %d", i%userCount)), func(k, v []byte) (bool, error) { - if k == nil || v == nil { - b.Fatal("entries must not be nil") - } - - return true, nil - }) - }) - } -} diff --git a/testing/keyvalue_log.go b/testing/keyvalue_log.go deleted file mode 100644 index 5266f8fcf3d..00000000000 --- a/testing/keyvalue_log.go +++ /dev/null @@ -1,837 +0,0 @@ -package testing - -import ( - "bytes" - "context" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - platform "github.com/influxdata/influxdb/v2" -) - -var keyValueLogCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), -} - -// A log entry is a comparable data structure that is used for testing -type LogEntry struct { - Key []byte - Value []byte - Time time.Time -} - -// KeyValueLogFields will include the IDGenerator, and keyValueLogs -type KeyValueLogFields struct { - LogEntries []LogEntry -} - -// KeyValueLog tests all the service functions. -func KeyValueLog( - init func(KeyValueLogFields, *testing.T) (platform.KeyValueLog, func()), t *testing.T, -) { - tests := []struct { - name string - fn func(init func(KeyValueLogFields, *testing.T) (platform.KeyValueLog, func()), - t *testing.T) - }{ - { - name: "AddLogEntry", - fn: AddLogEntry, - }, - { - name: "ForEachLogEntry", - fn: ForEachLogEntry, - }, - { - name: "FirstLogEntry", - fn: FirstLogEntry, - }, - { - name: "LastLogEntry", - fn: LastLogEntry, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt := tt - t.Parallel() - tt.fn(init, t) - }) - } -} - -// AddLogEntry tests the AddLogEntry for the KeyValueLog contract -func AddLogEntry( - init func(KeyValueLogFields, *testing.T) (platform.KeyValueLog, func()), - t *testing.T, -) { - type args struct { - key []byte - value []byte - time time.Time - } - type wants struct { - err error - logEntries []LogEntry - } - - tests := []struct { - name string - fields KeyValueLogFields - args args - wants wants - }{ - { - name: "Add entry to empty log", - fields: KeyValueLogFields{}, - args: args{ - key: []byte("abc"), - value: []byte("hello"), - time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), - }, - wants: wants{ - logEntries: []LogEntry{ - { - Key: []byte("abc"), - Value: []byte("hello"), - Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), - }, - }, - }, - }, - { - name: "Add entry to non-empty log", - fields: KeyValueLogFields{ - LogEntries: []LogEntry{ - { - Key: []byte("abc"), - Value: []byte("hat"), - Time: time.Date(2009, time.November, 10, 22, 0, 0, 0, time.UTC), - }, - }, - }, - args: args{ - key: []byte("abc"), - value: []byte("hello"), - time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), - }, - wants: wants{ - logEntries: []LogEntry{ - { - Key: []byte("abc"), - Value: []byte("hat"), - Time: time.Date(2009, time.November, 10, 22, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("hello"), - Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.AddLogEntry(ctx, tt.args.key, tt.args.value, tt.args.time) - if (err != nil) != (tt.wants.err != nil) { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Fatalf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - } - } - - logEntries := []LogEntry{} - opts := platform.FindOptions{} - err = s.ForEachLogEntry(ctx, tt.args.key, opts, func(v []byte, t time.Time) error { - logEntries = append(logEntries, LogEntry{ - Key: tt.args.key, - Value: v, - Time: t, - }) - return nil - }) - if err != nil { - t.Fatalf("failed to retrieve log entries: %v", err) - } - if diff := cmp.Diff(logEntries, tt.wants.logEntries, keyValueLogCmpOptions...); diff != "" { - t.Errorf("logEntries are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// ForEachLogEntry tests the AddLogEntry for the KeyValueLog contract -func ForEachLogEntry( - init func(KeyValueLogFields, *testing.T) (platform.KeyValueLog, func()), - t *testing.T, -) { - type args struct { - key []byte - opts platform.FindOptions - } - type wants struct { - err error - logEntries []LogEntry - } - - tests := []struct { - name string - fields KeyValueLogFields - args args - wants wants - }{ - { - name: "all log entries", - fields: KeyValueLogFields{ - LogEntries: []LogEntry{ - { - Key: []byte("abc"), - Value: []byte("1"), - Time: time.Date(2009, time.November, 10, 1, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("2"), - Time: time.Date(2009, time.November, 10, 2, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("3"), - Time: time.Date(2009, time.November, 10, 3, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("4"), - Time: time.Date(2009, time.November, 10, 4, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("5"), - Time: time.Date(2009, time.November, 10, 5, 0, 0, 0, time.UTC), - }, - }, - }, - args: args{ - key: []byte("abc"), - opts: platform.FindOptions{}, - }, - wants: wants{ - logEntries: []LogEntry{ - { - Key: []byte("abc"), - Value: []byte("1"), - Time: time.Date(2009, time.November, 10, 1, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("2"), - Time: time.Date(2009, time.November, 10, 2, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("3"), - Time: time.Date(2009, time.November, 10, 3, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("4"), - Time: time.Date(2009, time.November, 10, 4, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("5"), - Time: time.Date(2009, time.November, 10, 5, 0, 0, 0, time.UTC), - }, - }, - }, - }, - { - name: "all log entries descending order", - fields: KeyValueLogFields{ - LogEntries: []LogEntry{ - { - Key: []byte("abc"), - Value: []byte("1"), - Time: time.Date(2009, time.November, 10, 1, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("2"), - Time: time.Date(2009, time.November, 10, 2, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("3"), - Time: time.Date(2009, time.November, 10, 3, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("4"), - Time: time.Date(2009, time.November, 10, 4, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("5"), - Time: time.Date(2009, time.November, 10, 5, 0, 0, 0, time.UTC), - }, - }, - }, - args: args{ - key: []byte("abc"), - opts: platform.FindOptions{ - Descending: true, - }, - }, - wants: wants{ - logEntries: []LogEntry{ - { - Key: []byte("abc"), - Value: []byte("5"), - Time: time.Date(2009, time.November, 10, 5, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("4"), - Time: time.Date(2009, time.November, 10, 4, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("3"), - Time: time.Date(2009, time.November, 10, 3, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("2"), - Time: time.Date(2009, time.November, 10, 2, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("1"), - Time: time.Date(2009, time.November, 10, 1, 0, 0, 0, time.UTC), - }, - }, - }, - }, - { - name: "all log entries with offset", - fields: KeyValueLogFields{ - LogEntries: []LogEntry{ - { - Key: []byte("abc"), - Value: []byte("1"), - Time: time.Date(2009, time.November, 10, 1, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("2"), - Time: time.Date(2009, time.November, 10, 2, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("3"), - Time: time.Date(2009, time.November, 10, 3, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("4"), - Time: time.Date(2009, time.November, 10, 4, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("5"), - Time: time.Date(2009, time.November, 10, 5, 0, 0, 0, time.UTC), - }, - }, - }, - args: args{ - key: []byte("abc"), - opts: platform.FindOptions{ - Offset: 2, - }, - }, - wants: wants{ - logEntries: []LogEntry{ - { - Key: []byte("abc"), - Value: []byte("3"), - Time: time.Date(2009, time.November, 10, 3, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("4"), - Time: time.Date(2009, time.November, 10, 4, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("5"), - Time: time.Date(2009, time.November, 10, 5, 0, 0, 0, time.UTC), - }, - }, - }, - }, - { - name: "for each log entry with limit", - fields: KeyValueLogFields{ - LogEntries: []LogEntry{ - { - Key: []byte("abc"), - Value: []byte("1"), - Time: time.Date(2009, time.November, 10, 1, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("2"), - Time: time.Date(2009, time.November, 10, 2, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("3"), - Time: time.Date(2009, time.November, 10, 3, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("4"), - Time: time.Date(2009, time.November, 10, 4, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("5"), - Time: time.Date(2009, time.November, 10, 5, 0, 0, 0, time.UTC), - }, - }, - }, - args: args{ - key: []byte("abc"), - opts: platform.FindOptions{ - Limit: 3, - }, - }, - wants: wants{ - logEntries: []LogEntry{ - { - Key: []byte("abc"), - Value: []byte("1"), - Time: time.Date(2009, time.November, 10, 1, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("2"), - Time: time.Date(2009, time.November, 10, 2, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("3"), - Time: time.Date(2009, time.November, 10, 3, 0, 0, 0, time.UTC), - }, - }, - }, - }, - { - name: "log entries with offset and limit", - fields: KeyValueLogFields{ - LogEntries: []LogEntry{ - { - Key: []byte("abc"), - Value: []byte("1"), - Time: time.Date(2009, time.November, 10, 1, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("2"), - Time: time.Date(2009, time.November, 10, 2, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("3"), - Time: time.Date(2009, time.November, 10, 3, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("4"), - Time: time.Date(2009, time.November, 10, 4, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("5"), - Time: time.Date(2009, time.November, 10, 5, 0, 0, 0, time.UTC), - }, - }, - }, - args: args{ - key: []byte("abc"), - opts: platform.FindOptions{ - Offset: 2, - Limit: 2, - }, - }, - wants: wants{ - logEntries: []LogEntry{ - { - Key: []byte("abc"), - Value: []byte("3"), - Time: time.Date(2009, time.November, 10, 3, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("4"), - Time: time.Date(2009, time.November, 10, 4, 0, 0, 0, time.UTC), - }, - }, - }, - }, - { - name: "descending log entries with offset and limit", - fields: KeyValueLogFields{ - LogEntries: []LogEntry{ - { - Key: []byte("abc"), - Value: []byte("1"), - Time: time.Date(2009, time.November, 10, 1, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("2"), - Time: time.Date(2009, time.November, 10, 2, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("3"), - Time: time.Date(2009, time.November, 10, 3, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("4"), - Time: time.Date(2009, time.November, 10, 4, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("5"), - Time: time.Date(2009, time.November, 10, 5, 0, 0, 0, time.UTC), - }, - }, - }, - args: args{ - key: []byte("abc"), - opts: platform.FindOptions{ - Offset: 2, - Limit: 2, - Descending: true, - }, - }, - wants: wants{ - logEntries: []LogEntry{ - { - Key: []byte("abc"), - Value: []byte("3"), - Time: time.Date(2009, time.November, 10, 3, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("2"), - Time: time.Date(2009, time.November, 10, 2, 0, 0, 0, time.UTC), - }, - }, - }, - }, - { - name: "offset exceeds log range", - fields: KeyValueLogFields{ - LogEntries: []LogEntry{ - { - Key: []byte("abc"), - Value: []byte("1"), - Time: time.Date(2009, time.November, 10, 1, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("2"), - Time: time.Date(2009, time.November, 10, 2, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("3"), - Time: time.Date(2009, time.November, 10, 3, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("4"), - Time: time.Date(2009, time.November, 10, 4, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("5"), - Time: time.Date(2009, time.November, 10, 5, 0, 0, 0, time.UTC), - }, - }, - }, - args: args{ - key: []byte("abc"), - opts: platform.FindOptions{ - Offset: 5, - }, - }, - wants: wants{ - logEntries: []LogEntry{}, - }, - }, - { - name: "offset exceeds log range descending", - fields: KeyValueLogFields{ - LogEntries: []LogEntry{ - { - Key: []byte("abc"), - Value: []byte("1"), - Time: time.Date(2009, time.November, 10, 1, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("2"), - Time: time.Date(2009, time.November, 10, 2, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("3"), - Time: time.Date(2009, time.November, 10, 3, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("4"), - Time: time.Date(2009, time.November, 10, 4, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("5"), - Time: time.Date(2009, time.November, 10, 5, 0, 0, 0, time.UTC), - }, - }, - }, - args: args{ - key: []byte("abc"), - opts: platform.FindOptions{ - Offset: 5, - Descending: true, - }, - }, - wants: wants{ - logEntries: []LogEntry{}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - logEntries := []LogEntry{} - err := s.ForEachLogEntry(ctx, tt.args.key, tt.args.opts, func(v []byte, t time.Time) error { - logEntries = append(logEntries, LogEntry{ - Key: tt.args.key, - Value: v, - Time: t, - }) - return nil - }) - if (err != nil) != (tt.wants.err != nil) { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Fatalf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - } - } - - if diff := cmp.Diff(logEntries, tt.wants.logEntries, keyValueLogCmpOptions...); diff != "" { - t.Errorf("logEntries are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FirstLogEntry tests the FirstLogEntry method for the KeyValueLog contract. -func FirstLogEntry( - init func(KeyValueLogFields, *testing.T) (platform.KeyValueLog, func()), - t *testing.T, -) { - type args struct { - key []byte - } - type wants struct { - err error - logEntry LogEntry - } - - tests := []struct { - name string - fields KeyValueLogFields - args args - wants wants - }{ - { - name: "get first log entry", - fields: KeyValueLogFields{ - LogEntries: []LogEntry{ - { - Key: []byte("abc"), - Value: []byte("1"), - Time: time.Date(2009, time.November, 10, 1, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("2"), - Time: time.Date(2009, time.November, 10, 2, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("3"), - Time: time.Date(2009, time.November, 10, 3, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("4"), - Time: time.Date(2009, time.November, 10, 4, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("5"), - Time: time.Date(2009, time.November, 10, 5, 0, 0, 0, time.UTC), - }, - }, - }, - args: args{ - key: []byte("abc"), - }, - wants: wants{ - logEntry: LogEntry{ - Key: []byte("abc"), - Value: []byte("1"), - Time: time.Date(2009, time.November, 10, 1, 0, 0, 0, time.UTC), - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - var err error - logEntry := LogEntry{Key: tt.args.key} - logEntry.Value, logEntry.Time, err = s.FirstLogEntry(ctx, tt.args.key) - if (err != nil) != (tt.wants.err != nil) { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Fatalf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - } - } - - if diff := cmp.Diff(logEntry, tt.wants.logEntry, keyValueLogCmpOptions...); diff != "" { - t.Errorf("logEntries are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// LastLogEntry tests the LastLogEntry method for the KeyValueLog contract. -func LastLogEntry( - init func(KeyValueLogFields, *testing.T) (platform.KeyValueLog, func()), - t *testing.T, -) { - type args struct { - key []byte - } - type wants struct { - err error - logEntry LogEntry - } - - tests := []struct { - name string - fields KeyValueLogFields - args args - wants wants - }{ - { - name: "get last log entry", - fields: KeyValueLogFields{ - LogEntries: []LogEntry{ - { - Key: []byte("abc"), - Value: []byte("1"), - Time: time.Date(2009, time.November, 10, 1, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("2"), - Time: time.Date(2009, time.November, 10, 2, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("3"), - Time: time.Date(2009, time.November, 10, 3, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("4"), - Time: time.Date(2009, time.November, 10, 4, 0, 0, 0, time.UTC), - }, - { - Key: []byte("abc"), - Value: []byte("5"), - Time: time.Date(2009, time.November, 10, 5, 0, 0, 0, time.UTC), - }, - }, - }, - args: args{ - key: []byte("abc"), - }, - wants: wants{ - logEntry: LogEntry{ - Key: []byte("abc"), - Value: []byte("5"), - Time: time.Date(2009, time.November, 10, 5, 0, 0, 0, time.UTC), - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - var err error - logEntry := LogEntry{Key: tt.args.key} - logEntry.Value, logEntry.Time, err = s.LastLogEntry(ctx, tt.args.key) - if (err != nil) != (tt.wants.err != nil) { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Fatalf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - } - } - - if diff := cmp.Diff(logEntry, tt.wants.logEntry, keyValueLogCmpOptions...); diff != "" { - t.Errorf("logEntries are different -got/+want\ndiff %s", diff) - } - }) - } -} diff --git a/testing/kv.go b/testing/kv.go deleted file mode 100644 index 00303e13cd3..00000000000 --- a/testing/kv.go +++ /dev/null @@ -1,1655 +0,0 @@ -package testing - -import ( - "bytes" - "context" - "errors" - "fmt" - "reflect" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/kv" -) - -// KVStoreFields are background data that has to be set before -// the test runs. -type KVStoreFields struct { - Bucket []byte - Pairs []kv.Pair -} - -// KVStore tests the key value store contract -func KVStore( - init func(KVStoreFields, *testing.T) (kv.Store, func()), - t *testing.T, -) { - tests := []struct { - name string - fn func( - init func(KVStoreFields, *testing.T) (kv.Store, func()), - t *testing.T, - ) - }{ - { - name: "Get", - fn: KVGet, - }, - { - name: "GetBatch", - fn: KVGetBatch, - }, - { - name: "Put", - fn: KVPut, - }, - { - name: "Delete", - fn: KVDelete, - }, - { - name: "Cursor", - fn: KVCursor, - }, - { - name: "CursorWithHints", - fn: KVCursorWithHints, - }, - { - name: "ForwardCursor", - fn: KVForwardCursor, - }, - { - name: "View", - fn: KVView, - }, - { - name: "Update", - fn: KVUpdate, - }, - { - name: "ConcurrentUpdate", - fn: KVConcurrentUpdate, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt := tt - t.Parallel() - tt.fn(init, t) - }) - } -} - -// KVGet tests the get method contract for the key value store. -func KVGet( - init func(KVStoreFields, *testing.T) (kv.Store, func()), - t *testing.T, -) { - type args struct { - bucket []byte - key []byte - } - type wants struct { - err error - val []byte - } - - tests := []struct { - name string - fields KVStoreFields - args args - wants wants - }{ - { - name: "get key", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: []kv.Pair{ - { - Key: []byte("hello"), - Value: []byte("world"), - }, - }, - }, - args: args{ - bucket: []byte("bucket"), - key: []byte("hello"), - }, - wants: wants{ - val: []byte("world"), - }, - }, - { - name: "get missing key", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: []kv.Pair{}, - }, - args: args{ - bucket: []byte("bucket"), - key: []byte("hello"), - }, - wants: wants{ - err: kv.ErrKeyNotFound, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, close := init(tt.fields, t) - defer close() - - err := s.View(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket(tt.args.bucket) - if err != nil { - t.Errorf("unexpected error retrieving bucket: %v", err) - return err - } - - val, err := b.Get(tt.args.key) - if (err != nil) != (tt.wants.err != nil) { - t.Errorf("expected error '%v' got '%v'", tt.wants.err, err) - return err - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Errorf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - return err - } - } - - if want, got := tt.wants.val, val; !bytes.Equal(want, got) { - t.Errorf("exptected to get value %s got %s", string(want), string(got)) - return err - } - - return nil - }) - - if err != nil { - t.Fatalf("error during view transaction: %v", err) - } - }) - } -} - -// KVGetBatch tests the get batch method contract for the key value store. -func KVGetBatch( - init func(KVStoreFields, *testing.T) (kv.Store, func()), - t *testing.T, -) { - type args struct { - bucket []byte - keys [][]byte - } - type wants struct { - err error - vals [][]byte - } - - tests := []struct { - name string - fields KVStoreFields - args args - wants wants - }{ - { - name: "get keys", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: []kv.Pair{ - { - Key: []byte("hello"), - Value: []byte("world"), - }, - { - Key: []byte("color"), - Value: []byte("orange"), - }, - { - Key: []byte("organization"), - Value: []byte("influx"), - }, - }, - }, - args: args{ - bucket: []byte("bucket"), - keys: [][]byte{[]byte("hello"), []byte("organization")}, - }, - wants: wants{ - vals: [][]byte{[]byte("world"), []byte("influx")}, - }, - }, - { - name: "get keys with missing", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: []kv.Pair{ - { - Key: []byte("hello"), - Value: []byte("world"), - }, - { - Key: []byte("organization"), - Value: []byte("influx"), - }, - }, - }, - args: args{ - bucket: []byte("bucket"), - keys: [][]byte{[]byte("hello"), []byte("color")}, - }, - wants: wants{ - vals: [][]byte{[]byte("world"), nil}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, close := init(tt.fields, t) - defer close() - - err := s.View(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket(tt.args.bucket) - if err != nil { - t.Errorf("unexpected error retrieving bucket: %v", err) - return err - } - - vals, err := b.GetBatch(tt.args.keys...) - if (err != nil) != (tt.wants.err != nil) { - t.Errorf("expected error '%v' got '%v'", tt.wants.err, err) - return err - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Errorf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - return err - } - } - - if want, got := tt.wants.vals, vals; !reflect.DeepEqual(want, got) { - t.Errorf("exptected to get value %q got %q", want, got) - return err - } - - return nil - }) - - if err != nil { - t.Fatalf("error during view transaction: %v", err) - } - }) - } -} - -// KVPut tests the get method contract for the key value store. -func KVPut( - init func(KVStoreFields, *testing.T) (kv.Store, func()), - t *testing.T, -) { - type args struct { - bucket []byte - key []byte - val []byte - } - type wants struct { - err error - } - - tests := []struct { - name string - fields KVStoreFields - args args - wants wants - }{ - { - name: "put pair", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: []kv.Pair{}, - }, - args: args{ - bucket: []byte("bucket"), - key: []byte("hello"), - val: []byte("world"), - }, - wants: wants{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, close := init(tt.fields, t) - defer close() - - err := s.Update(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket(tt.args.bucket) - if err != nil { - t.Errorf("unexpected error retrieving bucket: %v", err) - return err - } - - { - err := b.Put(tt.args.key, tt.args.val) - if (err != nil) != (tt.wants.err != nil) { - t.Errorf("expected error '%v' got '%v'", tt.wants.err, err) - return err - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Errorf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - return err - } - } - - val, err := b.Get(tt.args.key) - if err != nil { - t.Errorf("unexpected error retrieving value: %v", err) - return err - } - - if want, got := tt.args.val, val; !bytes.Equal(want, got) { - t.Errorf("exptected to get value %s got %s", string(want), string(got)) - return err - } - } - - return nil - }) - - if err != nil { - t.Fatalf("error during view transaction: %v", err) - } - }) - } -} - -// KVDelete tests the delete method contract for the key value store. -func KVDelete( - init func(KVStoreFields, *testing.T) (kv.Store, func()), - t *testing.T, -) { - type args struct { - bucket []byte - key []byte - } - type wants struct { - err error - } - - tests := []struct { - name string - fields KVStoreFields - args args - wants wants - }{ - { - name: "delete key", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: []kv.Pair{ - { - Key: []byte("hello"), - Value: []byte("world"), - }, - }, - }, - args: args{ - bucket: []byte("bucket"), - key: []byte("hello"), - }, - wants: wants{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, close := init(tt.fields, t) - defer close() - - err := s.Update(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket(tt.args.bucket) - if err != nil { - t.Errorf("unexpected error retrieving bucket: %v", err) - return err - } - - { - err := b.Delete(tt.args.key) - if (err != nil) != (tt.wants.err != nil) { - t.Errorf("expected error '%v' got '%v'", tt.wants.err, err) - return err - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Errorf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - return err - } - } - - if _, err := b.Get(tt.args.key); err != kv.ErrKeyNotFound { - t.Errorf("expected key not found error got %v", err) - return err - } - } - - return nil - }) - - if err != nil { - t.Fatalf("error during view transaction: %v", err) - } - }) - } -} - -// KVCursor tests the cursor contract for the key value store. -func KVCursor( - init func(KVStoreFields, *testing.T) (kv.Store, func()), - t *testing.T, -) { - type args struct { - bucket []byte - seek []byte - } - type wants struct { - err error - first kv.Pair - last kv.Pair - seek kv.Pair - next kv.Pair - prev kv.Pair - } - - tests := []struct { - name string - fields KVStoreFields - args args - wants wants - }{ - { - name: "basic cursor", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: []kv.Pair{ - { - Key: []byte("a"), - Value: []byte("1"), - }, - { - Key: []byte("ab"), - Value: []byte("2"), - }, - { - Key: []byte("abc"), - Value: []byte("3"), - }, - { - Key: []byte("abcd"), - Value: []byte("4"), - }, - { - Key: []byte("abcde"), - Value: []byte("5"), - }, - { - Key: []byte("bcd"), - Value: []byte("6"), - }, - { - Key: []byte("cd"), - Value: []byte("7"), - }, - }, - }, - args: args{ - bucket: []byte("bucket"), - seek: []byte("abc"), - }, - wants: wants{ - first: kv.Pair{ - Key: []byte("a"), - Value: []byte("1"), - }, - last: kv.Pair{ - Key: []byte("cd"), - Value: []byte("7"), - }, - seek: kv.Pair{ - Key: []byte("abc"), - Value: []byte("3"), - }, - next: kv.Pair{ - Key: []byte("abcd"), - Value: []byte("4"), - }, - prev: kv.Pair{ - Key: []byte("abc"), - Value: []byte("3"), - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, close := init(tt.fields, t) - defer close() - - err := s.View(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket(tt.args.bucket) - if err != nil { - t.Errorf("unexpected error retrieving bucket: %v", err) - return err - } - - cur, err := b.Cursor() - if (err != nil) != (tt.wants.err != nil) { - t.Errorf("expected error '%v' got '%v'", tt.wants.err, err) - return err - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Errorf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - return err - } - } - - { - key, val := cur.First() - if want, got := tt.wants.first.Key, key; !bytes.Equal(want, got) { - t.Errorf("exptected to get key %s got %s", string(want), string(got)) - return err - } - - if want, got := tt.wants.first.Value, val; !bytes.Equal(want, got) { - t.Errorf("exptected to get value %s got %s", string(want), string(got)) - return err - } - } - - { - key, val := cur.Last() - if want, got := tt.wants.last.Key, key; !bytes.Equal(want, got) { - t.Errorf("exptected to get key %s got %s", string(want), string(got)) - return err - } - - if want, got := tt.wants.last.Value, val; !bytes.Equal(want, got) { - t.Errorf("exptected to get value %s got %s", string(want), string(got)) - return err - } - } - - { - key, val := cur.Seek(tt.args.seek) - if want, got := tt.wants.seek.Key, key; !bytes.Equal(want, got) { - t.Errorf("exptected to get key %s got %s", string(want), string(got)) - return err - } - - if want, got := tt.wants.seek.Value, val; !bytes.Equal(want, got) { - t.Errorf("exptected to get value %s got %s", string(want), string(got)) - return err - } - } - - { - key, val := cur.Next() - if want, got := tt.wants.next.Key, key; !bytes.Equal(want, got) { - t.Errorf("exptected to get key %s got %s", string(want), string(got)) - return err - } - - if want, got := tt.wants.next.Value, val; !bytes.Equal(want, got) { - t.Errorf("exptected to get value %s got %s", string(want), string(got)) - return err - } - } - - { - key, val := cur.Prev() - if want, got := tt.wants.prev.Key, key; !bytes.Equal(want, got) { - t.Errorf("exptected to get key %s got %s", string(want), string(got)) - return err - } - - if want, got := tt.wants.prev.Value, val; !bytes.Equal(want, got) { - t.Errorf("exptected to get value %s got %s", string(want), string(got)) - return err - } - } - - return nil - }) - - if err != nil { - t.Fatalf("error during view transaction: %v", err) - } - }) - } -} - -// KVCursor tests the cursor contract for the key value store. -func KVCursorWithHints( - init func(KVStoreFields, *testing.T) (kv.Store, func()), - t *testing.T, -) { - type args struct { - seek string - until string - hints []kv.CursorHint - } - - pairs := func(keys ...string) []kv.Pair { - p := make([]kv.Pair, len(keys)) - for i, k := range keys { - p[i].Key = []byte(k) - p[i].Value = []byte("val:" + k) - } - return p - } - - tests := []struct { - name string - fields KVStoreFields - args args - exp []string - }{ - { - name: "no hints", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "aaa", - until: "bbb/00", - }, - exp: []string{"aaa/00", "aaa/01", "aaa/02", "aaa/03", "bbb/00"}, - }, - { - name: "prefix hint", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "aaa", - until: "aaa/03", - hints: []kv.CursorHint{kv.WithCursorHintPrefix("aaa/")}, - }, - exp: []string{"aaa/00", "aaa/01", "aaa/02", "aaa/03"}, - }, - { - name: "start hint", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "aaa", - until: "bbb/00", - hints: []kv.CursorHint{kv.WithCursorHintKeyStart("aaa/")}, - }, - exp: []string{"aaa/00", "aaa/01", "aaa/02", "aaa/03", "bbb/00"}, - }, - { - name: "predicate for key", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "aaa", - until: "aaa/03", - hints: []kv.CursorHint{ - kv.WithCursorHintPredicate(func(key, _ []byte) bool { - return len(key) < 3 || string(key[:3]) == "aaa" - })}, - }, - exp: []string{"aaa/00", "aaa/01", "aaa/02", "aaa/03"}, - }, - { - name: "predicate for value", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "", - until: "aa/01", - hints: []kv.CursorHint{ - kv.WithCursorHintPredicate(func(_, val []byte) bool { - return len(val) < 7 || string(val[:7]) == "val:aa/" - })}, - }, - exp: []string{"aa/00", "aa/01"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, fin := init(tt.fields, t) - defer fin() - - err := s.View(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket([]byte("bucket")) - if err != nil { - t.Errorf("unexpected error retrieving bucket: %v", err) - return err - } - - cur, err := b.Cursor(tt.args.hints...) - if err != nil { - t.Errorf("unexpected error: %v", err) - return err - } - - var got []string - k, _ := cur.Seek([]byte(tt.args.seek)) - for len(k) > 0 { - got = append(got, string(k)) - if string(k) == tt.args.until { - break - } - k, _ = cur.Next() - } - - if exp := tt.exp; !cmp.Equal(got, exp) { - t.Errorf("unexpected cursor values: -got/+exp\n%v", cmp.Diff(got, exp)) - } - - return nil - }) - - if err != nil { - t.Fatalf("error during view transaction: %v", err) - } - }) - } -} - -// KVForwardCursor tests the forward cursor contract for the key value store. -func KVForwardCursor( - init func(KVStoreFields, *testing.T) (kv.Store, func()), - t *testing.T, -) { - type args struct { - seek string - until string - opts []kv.CursorOption - } - - pairs := func(keys ...string) []kv.Pair { - p := make([]kv.Pair, len(keys)) - for i, k := range keys { - p[i].Key = []byte(k) - p[i].Value = []byte("val:" + k) - } - return p - } - - tests := []struct { - name string - fields KVStoreFields - args args - exp []string - expErr error - }{ - { - name: "no hints", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "aaa", - until: "bbb/00", - }, - exp: []string{"aaa/00", "aaa/01", "aaa/02", "aaa/03", "bbb/00"}, - }, - { - name: "limit", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "aaa", - opts: []kv.CursorOption{ - kv.WithCursorLimit(4), - }, - }, - exp: []string{"aaa/00", "aaa/01", "aaa/02", "aaa/03"}, - }, - { - name: "prefix - no hints", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "aaa/00", - until: "bbb/02", - opts: []kv.CursorOption{ - kv.WithCursorPrefix([]byte("aaa")), - }, - }, - exp: []string{"aaa/00", "aaa/01", "aaa/02", "aaa/03"}, - }, - - { - name: "prefix with limit", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "aaa/00", - until: "bbb/02", - opts: []kv.CursorOption{ - kv.WithCursorPrefix([]byte("aaa")), - kv.WithCursorLimit(2), - }, - }, - exp: []string{"aaa/00", "aaa/01"}, - }, - { - name: "prefix - skip first", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "aaa/00", - until: "bbb/02", - opts: []kv.CursorOption{ - kv.WithCursorPrefix([]byte("aaa")), - kv.WithCursorSkipFirstItem(), - }, - }, - exp: []string{"aaa/01", "aaa/02", "aaa/03"}, - }, - { - name: "prefix - skip first with limit", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "aaa/00", - until: "bbb/02", - opts: []kv.CursorOption{ - kv.WithCursorPrefix([]byte("aaa")), - kv.WithCursorSkipFirstItem(), - kv.WithCursorLimit(2), - }, - }, - exp: []string{"aaa/01", "aaa/02"}, - }, - { - name: "prefix - skip first (one item)", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "bbb/02", - opts: []kv.CursorOption{ - kv.WithCursorSkipFirstItem(), - }, - }, - exp: nil, - }, - { - name: "prefix - does not prefix seek", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "aaa/00", - until: "bbb/02", - opts: []kv.CursorOption{ - kv.WithCursorPrefix([]byte("aab")), - }, - }, - expErr: kv.ErrSeekMissingPrefix, - }, - { - name: "prefix hint", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "aaa", - until: "aaa/03", - opts: []kv.CursorOption{ - kv.WithCursorHints(kv.WithCursorHintPrefix("aaa/")), - }, - }, - exp: []string{"aaa/00", "aaa/01", "aaa/02", "aaa/03"}, - }, - { - name: "start hint", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "aaa", - until: "bbb/00", - opts: []kv.CursorOption{ - kv.WithCursorHints(kv.WithCursorHintKeyStart("aaa/")), - }, - }, - exp: []string{"aaa/00", "aaa/01", "aaa/02", "aaa/03", "bbb/00"}, - }, - { - name: "predicate for key", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "aaa", - until: "aaa/03", - opts: []kv.CursorOption{ - kv.WithCursorHints(kv.WithCursorHintPredicate(func(key, _ []byte) bool { - return len(key) < 3 || string(key[:3]) == "aaa" - })), - }, - }, - exp: []string{"aaa/00", "aaa/01", "aaa/02", "aaa/03"}, - }, - { - name: "predicate for value", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "", - until: "aa/01", - opts: []kv.CursorOption{ - kv.WithCursorHints(kv.WithCursorHintPredicate(func(_, val []byte) bool { - return len(val) < 7 || string(val[:7]) == "val:aa/" - })), - }, - }, - exp: []string{"aa/00", "aa/01"}, - }, - { - name: "no hints - descending", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "bbb/00", - until: "aaa/00", - opts: []kv.CursorOption{kv.WithCursorDirection(kv.CursorDescending)}, - }, - exp: []string{"bbb/00", "aaa/03", "aaa/02", "aaa/01", "aaa/00"}, - }, - { - name: "no hints - descending - with limit", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "bbb/00", - until: "aaa/00", - opts: []kv.CursorOption{ - kv.WithCursorDirection(kv.CursorDescending), - kv.WithCursorLimit(3), - }, - }, - exp: []string{"bbb/00", "aaa/03", "aaa/02"}, - }, - { - name: "prefixed - no hints - descending", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "aaa/02", - until: "aa/", - opts: []kv.CursorOption{ - kv.WithCursorPrefix([]byte("aaa/")), - kv.WithCursorDirection(kv.CursorDescending), - }, - }, - exp: []string{"aaa/02", "aaa/01", "aaa/00"}, - }, - { - name: "prefixed - no hints - descending - with limit", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "aaa/02", - until: "aa/", - opts: []kv.CursorOption{ - kv.WithCursorPrefix([]byte("aaa/")), - kv.WithCursorDirection(kv.CursorDescending), - kv.WithCursorLimit(2), - }, - }, - exp: []string{"aaa/02", "aaa/01"}, - }, - { - name: "start hint - descending", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "bbb/00", - until: "aaa/00", - opts: []kv.CursorOption{ - kv.WithCursorDirection(kv.CursorDescending), - kv.WithCursorHints(kv.WithCursorHintKeyStart("aaa/")), - }, - }, - exp: []string{"bbb/00", "aaa/03", "aaa/02", "aaa/01", "aaa/00"}, - }, - { - name: "predicate for key - descending", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "aaa/03", - until: "aaa/00", - opts: []kv.CursorOption{ - kv.WithCursorDirection(kv.CursorDescending), - kv.WithCursorHints(kv.WithCursorHintPredicate(func(key, _ []byte) bool { - return len(key) < 3 || string(key[:3]) == "aaa" - })), - }, - }, - exp: []string{"aaa/03", "aaa/02", "aaa/01", "aaa/00"}, - }, - { - name: "predicate for value - descending", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: pairs( - "aa/00", "aa/01", - "aaa/00", "aaa/01", "aaa/02", "aaa/03", - "bbb/00", "bbb/01", "bbb/02"), - }, - args: args{ - seek: "aa/01", - until: "aa/00", - opts: []kv.CursorOption{ - kv.WithCursorDirection(kv.CursorDescending), - kv.WithCursorHints(kv.WithCursorHintPredicate(func(_, val []byte) bool { - return len(val) >= 7 && string(val[:7]) == "val:aa/" - })), - }, - }, - exp: []string{"aa/01", "aa/00"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, fin := init(tt.fields, t) - defer fin() - - err := s.View(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket([]byte("bucket")) - if err != nil { - t.Errorf("unexpected error retrieving bucket: %v", err) - return err - } - - cur, err := b.ForwardCursor([]byte(tt.args.seek), tt.args.opts...) - if err != nil { - if tt.expErr != nil && errors.Is(err, tt.expErr) { - // successfully returned expected error - return nil - } - - t.Errorf("unexpected error: %v", err) - return err - } - - var got []string - - k, _ := cur.Next() - for len(k) > 0 { - got = append(got, string(k)) - if string(k) == tt.args.until { - break - } - - k, _ = cur.Next() - } - - if exp := tt.exp; !cmp.Equal(got, exp) { - t.Errorf("unexpected cursor values: -got/+exp\n%v", cmp.Diff(got, exp)) - } - - if err := cur.Err(); !cmp.Equal(err, tt.expErr) { - t.Errorf("expected error to be %v, got %v", tt.expErr, err) - } - - if err := cur.Close(); err != nil { - t.Errorf("expected cursor to close with nil error, found %v", err) - } - - return nil - }) - - if err != nil { - t.Fatalf("error during view transaction: %v", err) - } - }) - } -} - -// KVView tests the view method contract for the key value store. -func KVView( - init func(KVStoreFields, *testing.T) (kv.Store, func()), - t *testing.T, -) { - type args struct { - bucket []byte - key []byte - // If len(value) == 0 the test will not attempt a put - value []byte - // If true, the test will attempt to delete the provided key - delete bool - } - type wants struct { - value []byte - } - - tests := []struct { - name string - fields KVStoreFields - args args - wants wants - }{ - { - name: "basic view", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: []kv.Pair{ - { - Key: []byte("hello"), - Value: []byte("cruel world"), - }, - }, - }, - args: args{ - bucket: []byte("bucket"), - key: []byte("hello"), - }, - wants: wants{ - value: []byte("cruel world"), - }, - }, - { - name: "basic view with delete", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: []kv.Pair{ - { - Key: []byte("hello"), - Value: []byte("cruel world"), - }, - }, - }, - args: args{ - bucket: []byte("bucket"), - key: []byte("hello"), - delete: true, - }, - wants: wants{ - value: []byte("cruel world"), - }, - }, - { - name: "basic view with put", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: []kv.Pair{ - { - Key: []byte("hello"), - Value: []byte("cruel world"), - }, - }, - }, - args: args{ - bucket: []byte("bucket"), - key: []byte("hello"), - value: []byte("world"), - delete: true, - }, - wants: wants{ - value: []byte("cruel world"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, close := init(tt.fields, t) - defer close() - - err := s.View(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket(tt.args.bucket) - if err != nil { - t.Errorf("unexpected error retrieving bucket: %v", err) - return err - } - - if len(tt.args.value) != 0 { - err := b.Put(tt.args.key, tt.args.value) - if err == nil { - return fmt.Errorf("expected transaction to fail") - } - if err != kv.ErrTxNotWritable { - return err - } - return nil - } - - value, err := b.Get(tt.args.key) - if err != nil { - return err - } - - if want, got := tt.wants.value, value; !bytes.Equal(want, got) { - t.Errorf("exptected to get value %s got %s", string(want), string(got)) - return err - } - - if tt.args.delete { - err := b.Delete(tt.args.key) - if err == nil { - return fmt.Errorf("expected transaction to fail") - } - if err != kv.ErrTxNotWritable { - return err - } - return nil - } - - return nil - }) - - if err != nil { - t.Fatalf("error during view transaction: %v", err) - } - }) - } -} - -// KVUpdate tests the update method contract for the key value store. -func KVUpdate( - init func(KVStoreFields, *testing.T) (kv.Store, func()), - t *testing.T, -) { - type args struct { - bucket []byte - key []byte - value []byte - delete bool - } - type wants struct { - value []byte - } - - tests := []struct { - name string - fields KVStoreFields - args args - wants wants - }{ - { - name: "basic update", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: []kv.Pair{ - { - Key: []byte("hello"), - Value: []byte("cruel world"), - }, - }, - }, - args: args{ - bucket: []byte("bucket"), - key: []byte("hello"), - value: []byte("world"), - }, - wants: wants{ - value: []byte("world"), - }, - }, - { - name: "basic update with delete", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: []kv.Pair{ - { - Key: []byte("hello"), - Value: []byte("cruel world"), - }, - }, - }, - args: args{ - bucket: []byte("bucket"), - key: []byte("hello"), - value: []byte("world"), - delete: true, - }, - wants: wants{}, - }, - // TODO: add case with failed update transaction that doesn't apply all of the changes. - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, close := init(tt.fields, t) - defer close() - - { - err := s.Update(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket(tt.args.bucket) - if err != nil { - t.Errorf("unexpected error retrieving bucket: %v", err) - return err - } - - if len(tt.args.value) != 0 { - err := b.Put(tt.args.key, tt.args.value) - if err != nil { - return err - } - } - - if tt.args.delete { - err := b.Delete(tt.args.key) - if err != nil { - return err - } - } - - value, err := b.Get(tt.args.key) - if tt.args.delete { - if err != kv.ErrKeyNotFound { - return fmt.Errorf("expected key not found") - } - return nil - } else if err != nil { - return err - } - - if want, got := tt.wants.value, value; !bytes.Equal(want, got) { - t.Errorf("exptected to get value %s got %s", string(want), string(got)) - return err - } - - return nil - }) - - if err != nil { - t.Fatalf("error during update transaction: %v", err) - } - } - - { - err := s.View(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket(tt.args.bucket) - if err != nil { - t.Errorf("unexpected error retrieving bucket: %v", err) - return err - } - - value, err := b.Get(tt.args.key) - if tt.args.delete { - if err != kv.ErrKeyNotFound { - return fmt.Errorf("expected key not found") - } - } else if err != nil { - return err - } - - if want, got := tt.wants.value, value; !bytes.Equal(want, got) { - t.Errorf("exptected to get value %s got %s", string(want), string(got)) - return err - } - - return nil - }) - - if err != nil { - t.Fatalf("error during view transaction: %v", err) - } - } - }) - } -} - -// KVConcurrentUpdate tests concurrent calls to update. -func KVConcurrentUpdate( - init func(KVStoreFields, *testing.T) (kv.Store, func()), - t *testing.T, -) { - type args struct { - bucket []byte - key []byte - valueA []byte - valueB []byte - } - type wants struct { - value []byte - } - - tests := []struct { - name string - fields KVStoreFields - args args - wants wants - }{ - { - name: "basic concurrent update", - fields: KVStoreFields{ - Bucket: []byte("bucket"), - Pairs: []kv.Pair{ - { - Key: []byte("hello"), - Value: []byte("cruel world"), - }, - }, - }, - args: args{ - bucket: []byte("bucket"), - key: []byte("hello"), - valueA: []byte("world"), - valueB: []byte("darkness my new friend"), - }, - wants: wants{ - value: []byte("darkness my new friend"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Skip("https://github.com/influxdata/platform/issues/2371") - s, closeFn := init(tt.fields, t) - defer closeFn() - - errCh := make(chan error) - var fn = func(v []byte) { - err := s.Update(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket(tt.args.bucket) - if err != nil { - return err - } - - if err := b.Put(tt.args.key, v); err != nil { - return err - } - - return nil - }) - - if err != nil { - errCh <- fmt.Errorf("error during update transaction: %v", err) - } else { - errCh <- nil - } - } - go fn(tt.args.valueA) - // To ensure that a is scheduled before b - time.Sleep(time.Millisecond) - go fn(tt.args.valueB) - - count := 0 - for err := range errCh { - count++ - if err != nil { - t.Fatal(err) - } - if count == 2 { - break - } - } - - close(errCh) - - { - err := s.View(context.Background(), func(tx kv.Tx) error { - b, err := tx.Bucket(tt.args.bucket) - if err != nil { - t.Errorf("unexpected error retrieving bucket: %v", err) - return err - } - - deadline := time.Now().Add(1 * time.Second) - var returnErr error - for { - if time.Now().After(deadline) { - break - } - - value, err := b.Get(tt.args.key) - if err != nil { - return err - } - - if want, got := tt.wants.value, value; !bytes.Equal(want, got) { - returnErr = fmt.Errorf("exptected to get value %s got %s", string(want), string(got)) - } else { - returnErr = nil - break - } - } - - if returnErr != nil { - return returnErr - } - - return nil - }) - - if err != nil { - t.Fatalf("error during view transaction: %v", err) - } - } - }) - } -} diff --git a/testing/label_service.go b/testing/label_service.go deleted file mode 100644 index cd09988ee1e..00000000000 --- a/testing/label_service.go +++ /dev/null @@ -1,1043 +0,0 @@ -package testing - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" -) - -const ( - labelOneID = "41a9f7288d4e2d64" - labelTwoID = "b7c5355e1134b11c" - labelThreeID = "c8d6466f2245c22d" -) - -var labelCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.Label) []*influxdb.Label { - out := append([]*influxdb.Label(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -// LabelFields include the IDGenerator, labels and their mappings -type LabelFields struct { - Labels []*influxdb.Label - Mappings []*influxdb.LabelMapping - IDGenerator platform.IDGenerator -} - -type labelServiceF func( - init func(LabelFields, *testing.T) (influxdb.LabelService, string, func()), - t *testing.T, -) - -// LabelService tests all the service functions. -func LabelService( - init func(LabelFields, *testing.T) (influxdb.LabelService, string, func()), - t *testing.T, -) { - tests := []struct { - name string - fn labelServiceF - }{ - { - name: "CreateLabel", - fn: CreateLabel, - }, - { - name: "CreateLabelMapping", - fn: CreateLabelMapping, - }, - { - name: "FindLabels", - fn: FindLabels, - }, - { - name: "FindLabelByID", - fn: FindLabelByID, - }, - { - name: "UpdateLabel", - fn: UpdateLabel, - }, - { - name: "DeleteLabel", - fn: DeleteLabel, - }, - { - name: "DeleteLabelMapping", - fn: DeleteLabelMapping, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt := tt - t.Parallel() - tt.fn(init, t) - }) - } -} - -func CreateLabel( - init func(LabelFields, *testing.T) (influxdb.LabelService, string, func()), - t *testing.T, -) { - type args struct { - label *influxdb.Label - } - type wants struct { - err error - labels []*influxdb.Label - } - - tests := []struct { - name string - fields LabelFields - args args - wants wants - }{ - { - name: "names should be unique", - fields: LabelFields{ - IDGenerator: mock.NewMockIDGenerator(), - Labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - Name: "label_1", - OrgID: idOne, - Properties: map[string]string{ - "color": "fff000", - }, - }, - }, - }, - args: args{ - label: &influxdb.Label{ - ID: MustIDBase16(labelTwoID), - Name: "label_1", - OrgID: idOne, - Properties: map[string]string{ - "color": "fff000", - }, - }, - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - Name: "label_1", - OrgID: idOne, - Properties: map[string]string{ - "color": "fff000", - }, - }, - }, - err: &errors.Error{ - Code: errors.EConflict, - Op: influxdb.OpCreateLabel, - Msg: "label with name label_1 already exists", - }, - }, - }, - { - name: "names should be trimmed of spacing", - fields: LabelFields{ - IDGenerator: mock.NewMockIDGenerator(), - Labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - Name: "tag_1", - OrgID: idOne, - Properties: map[string]string{ - "color": "fff000", - }, - }, - }, - }, - args: args{ - label: &influxdb.Label{ - ID: MustIDBase16(labelOneID), - Name: " tag_1 ", - OrgID: idOne, - Properties: map[string]string{ - "color": "fff000", - }, - }, - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - Name: "tag_1", - OrgID: idOne, - Properties: map[string]string{ - "color": "fff000", - }, - }, - }, - err: &errors.Error{ - Code: errors.EConflict, - Op: influxdb.OpCreateLabel, - Msg: "label with name tag_1 already exists", - }, - }, - }, - { - name: "labels should be unique and case-agnostic", - fields: LabelFields{ - IDGenerator: mock.NewMockIDGenerator(), - Labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - Name: "tag_1", - OrgID: idOne, - Properties: map[string]string{ - "color": "fff000", - }, - }, - }, - }, - args: args{ - label: &influxdb.Label{ - ID: MustIDBase16(labelOneID), - Name: "TAG_1", - OrgID: idOne, - Properties: map[string]string{ - "color": "fff000", - }, - }, - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - Name: "tag_1", - OrgID: idOne, - Properties: map[string]string{ - "color": "fff000", - }, - }, - }, - err: &errors.Error{ - Code: errors.EConflict, - Op: influxdb.OpCreateLabel, - Msg: "label with name TAG_1 already exists", - }, - }, - }, - { - name: "basic create label", - fields: LabelFields{ - IDGenerator: mock.NewIDGenerator(labelOneID, t), - Labels: []*influxdb.Label{}, - }, - args: args{ - label: &influxdb.Label{ - Name: "Tag2", - ID: MustIDBase16(labelOneID), - OrgID: idOne, - Properties: map[string]string{ - "color": "fff000", - }, - }, - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - Name: "Tag2", - OrgID: idOne, - Properties: map[string]string{ - "color": "fff000", - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.CreateLabel(ctx, tt.args.label) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - defer s.DeleteLabel(ctx, tt.args.label.ID) - - labels, err := s.FindLabels(ctx, influxdb.LabelFilter{}) - if err != nil { - t.Fatalf("failed to retrieve labels: %v", err) - } - if diff := cmp.Diff(labels, tt.wants.labels, labelCmpOptions...); diff != "" { - t.Errorf("labels are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func FindLabels( - init func(LabelFields, *testing.T) (influxdb.LabelService, string, func()), - t *testing.T, -) { - type args struct { - filter influxdb.LabelFilter - } - type wants struct { - err error - labels []*influxdb.Label - } - - tests := []struct { - name string - fields LabelFields - args args - wants wants - }{ - { - name: "basic find labels", - fields: LabelFields{ - Labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - Name: "Tag1", - OrgID: idOne, - }, - { - ID: MustIDBase16(labelTwoID), - Name: "Tag2", - OrgID: idOne, - }, - }, - }, - args: args{ - filter: influxdb.LabelFilter{}, - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - Name: "Tag1", - OrgID: idOne, - }, - { - ID: MustIDBase16(labelTwoID), - Name: "Tag2", - OrgID: idOne, - }, - }, - }, - }, - { - name: "find labels filtering", - fields: LabelFields{ - Labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - Name: "Tag1", - OrgID: idOne, - }, - { - ID: MustIDBase16(labelTwoID), - Name: "Tag2", - OrgID: idOne, - }, - { - ID: MustIDBase16(labelThreeID), - Name: "Tag1", - OrgID: idTwo, - }, - }, - }, - args: args{ - filter: influxdb.LabelFilter{ - Name: "Tag1", - OrgID: idPtr(idOne), - }, - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - Name: "Tag1", - OrgID: idOne, - }, - }, - }, - }, - { - name: "find a label by name is case-agnostic", - fields: LabelFields{ - Labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - Name: "tag1", - OrgID: idOne, - }, - }, - }, - args: args{ - filter: influxdb.LabelFilter{ - Name: "TAG1", - OrgID: idPtr(idOne), - }, - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - Name: "tag1", - OrgID: idOne, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - labels, err := s.FindLabels(ctx, tt.args.filter) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(labels, tt.wants.labels, labelCmpOptions...); diff != "" { - t.Errorf("labels are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func FindLabelByID( - init func(LabelFields, *testing.T) (influxdb.LabelService, string, func()), - t *testing.T, -) { - type args struct { - id platform.ID - } - type wants struct { - err error - label *influxdb.Label - } - - tests := []struct { - name string - fields LabelFields - args args - wants wants - }{ - { - name: "find label by ID", - fields: LabelFields{ - Labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - Name: "Tag1", - OrgID: idOne, - }, - { - ID: MustIDBase16(labelTwoID), - Name: "Tag2", - OrgID: idOne, - }, - }, - }, - args: args{ - id: MustIDBase16(labelOneID), - }, - wants: wants{ - label: &influxdb.Label{ - ID: MustIDBase16(labelOneID), - Name: "Tag1", - OrgID: idOne, - }, - }, - }, - { - name: "label does not exist", - fields: LabelFields{ - Labels: []*influxdb.Label{}, - }, - args: args{ - id: MustIDBase16(labelOneID), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindLabelByID, - Msg: influxdb.ErrLabelNotFound, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - label, err := s.FindLabelByID(ctx, tt.args.id) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(label, tt.wants.label, labelCmpOptions...); diff != "" { - t.Errorf("labels are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func UpdateLabel( - init func(LabelFields, *testing.T) (influxdb.LabelService, string, func()), - t *testing.T, -) { - type args struct { - labelID platform.ID - update influxdb.LabelUpdate - } - type wants struct { - err error - labels []*influxdb.Label - } - - tests := []struct { - name string - fields LabelFields - args args - wants wants - }{ - { - name: "update label name", - fields: LabelFields{ - Labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - OrgID: idOne, - Name: "Tag1", - }, - }, - }, - args: args{ - labelID: MustIDBase16(labelOneID), - update: influxdb.LabelUpdate{ - Name: "NotTag1", - }, - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - OrgID: idOne, - Name: "NotTag1", - }, - }, - }, - }, - { - name: "cant update a label with a name that already exists", - fields: LabelFields{ - Labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - OrgID: idOne, - Name: "tag_1", - }, - { - ID: MustIDBase16(labelTwoID), - OrgID: idOne, - Name: "tag_2", - }, - }, - }, - args: args{ - labelID: MustIDBase16(labelTwoID), - update: influxdb.LabelUpdate{ - Name: "tag_1", - }, - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - OrgID: idOne, - Name: "tag_1", - }, - { - ID: MustIDBase16(labelTwoID), - OrgID: idOne, - Name: "tag_2", - }, - }, - err: &errors.Error{ - Code: errors.EConflict, - Op: influxdb.OpCreateLabel, - Msg: "label with name tag_1 already exists", - }, - }, - }, - { - name: "should trim space but fails to update existing label", - fields: LabelFields{ - Labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - OrgID: idOne, - Name: "tag_1", - }, - { - ID: MustIDBase16(labelTwoID), - OrgID: idOne, - Name: "tag_2", - }, - }, - }, - args: args{ - labelID: MustIDBase16(labelTwoID), - update: influxdb.LabelUpdate{ - Name: " tag_1 ", - }, - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - OrgID: idOne, - Name: "tag_1", - }, - { - ID: MustIDBase16(labelTwoID), - OrgID: idOne, - Name: "tag_2", - }, - }, - err: &errors.Error{ - Code: errors.EConflict, - Op: influxdb.OpCreateLabel, - Msg: "label with name tag_1 already exists", - }, - }, - }, - { - name: "update label properties", - fields: LabelFields{ - Labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - OrgID: idOne, - Name: "Tag1", - }, - }, - }, - args: args{ - labelID: MustIDBase16(labelOneID), - update: influxdb.LabelUpdate{ - Properties: map[string]string{ - "color": "fff000", - }, - }, - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - OrgID: idOne, - Name: "Tag1", - Properties: map[string]string{ - "color": "fff000", - }, - }, - }, - }, - }, - { - name: "replacing a label property", - fields: LabelFields{ - Labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - OrgID: idOne, - Name: "Tag1", - Properties: map[string]string{ - "color": "fff000", - "description": "description", - }, - }, - }, - }, - args: args{ - labelID: MustIDBase16(labelOneID), - update: influxdb.LabelUpdate{ - Properties: map[string]string{ - "color": "abc123", - }, - }, - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - OrgID: idOne, - Name: "Tag1", - Properties: map[string]string{ - "color": "abc123", - "description": "description", - }, - }, - }, - }, - }, - { - name: "deleting a label property", - fields: LabelFields{ - Labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - OrgID: idOne, - Name: "Tag1", - Properties: map[string]string{ - "color": "fff000", - "description": "description", - }, - }, - }, - }, - args: args{ - labelID: MustIDBase16(labelOneID), - update: influxdb.LabelUpdate{ - Properties: map[string]string{ - "description": "", - }, - }, - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - OrgID: idOne, - Name: "Tag1", - Properties: map[string]string{ - "color": "fff000", - }, - }, - }, - }, - }, - { - name: "updating a non-existent label", - fields: LabelFields{ - Labels: []*influxdb.Label{}, - }, - args: args{ - labelID: MustIDBase16(labelOneID), - update: influxdb.LabelUpdate{ - Properties: map[string]string{ - "color": "fff000", - }, - }, - }, - wants: wants{ - labels: []*influxdb.Label{}, - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpUpdateLabel, - Msg: influxdb.ErrLabelNotFound, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - _, err := s.UpdateLabel(ctx, tt.args.labelID, tt.args.update) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - labels, err := s.FindLabels(ctx, influxdb.LabelFilter{}) - if err != nil { - t.Fatalf("failed to retrieve labels: %v", err) - } - if diff := cmp.Diff(labels, tt.wants.labels, labelCmpOptions...); diff != "" { - t.Errorf("labels are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func DeleteLabel( - init func(LabelFields, *testing.T) (influxdb.LabelService, string, func()), - t *testing.T, -) { - type args struct { - labelID platform.ID - } - type wants struct { - err error - labels []*influxdb.Label - } - - tests := []struct { - name string - fields LabelFields - args args - wants wants - }{ - { - name: "basic delete label", - fields: LabelFields{ - Labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - OrgID: idOne, - Name: "Tag1", - }, - { - ID: MustIDBase16(labelTwoID), - OrgID: idOne, - Name: "Tag2", - }, - }, - }, - args: args{ - labelID: MustIDBase16(labelOneID), - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelTwoID), - OrgID: idOne, - Name: "Tag2", - }, - }, - }, - }, - { - name: "deleting a non-existent label", - fields: LabelFields{ - Labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - OrgID: idOne, - Name: "Tag1", - }, - }, - }, - args: args{ - labelID: MustIDBase16(labelTwoID), - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - OrgID: idOne, - Name: "Tag1", - }, - }, - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpDeleteLabel, - Msg: influxdb.ErrLabelNotFound, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.DeleteLabel(ctx, tt.args.labelID) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - labels, err := s.FindLabels(ctx, influxdb.LabelFilter{}) - if err != nil { - t.Fatalf("failed to retrieve labels: %v", err) - } - if diff := cmp.Diff(labels, tt.wants.labels, labelCmpOptions...); diff != "" { - t.Errorf("labels are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func CreateLabelMapping( - init func(LabelFields, *testing.T) (influxdb.LabelService, string, func()), - t *testing.T, -) { - type args struct { - mapping *influxdb.LabelMapping - filter *influxdb.LabelMappingFilter - } - type wants struct { - err error - labels []*influxdb.Label - } - - tests := []struct { - name string - fields LabelFields - args args - wants wants - }{ - { - name: "create label mapping", - fields: LabelFields{ - Labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - OrgID: idOne, - Name: "Tag1", - }, - }, - }, - args: args{ - mapping: &influxdb.LabelMapping{ - LabelID: MustIDBase16(labelOneID), - ResourceID: idOne, - }, - filter: &influxdb.LabelMappingFilter{ - ResourceID: idOne, - }, - }, - wants: wants{ - labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - Name: "Tag1", - OrgID: idOne, - }, - }, - }, - }, - { - name: "mapping to a nonexistent label", - fields: LabelFields{ - IDGenerator: mock.NewIDGenerator(labelOneID, t), - Labels: []*influxdb.Label{}, - }, - args: args{ - mapping: &influxdb.LabelMapping{ - LabelID: MustIDBase16(labelOneID), - ResourceID: idOne, - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpDeleteLabel, - Msg: influxdb.ErrLabelNotFound, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.CreateLabelMapping(ctx, tt.args.mapping) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - defer s.DeleteLabelMapping(ctx, tt.args.mapping) - - if tt.args.filter == nil { - return - } - - labels, err := s.FindResourceLabels(ctx, *tt.args.filter) - if err != nil { - t.Fatalf("failed to retrieve labels: %v", err) - } - if diff := cmp.Diff(labels, tt.wants.labels, labelCmpOptions...); diff != "" { - t.Errorf("labels are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func DeleteLabelMapping( - init func(LabelFields, *testing.T) (influxdb.LabelService, string, func()), - t *testing.T, -) { - type args struct { - mapping *influxdb.LabelMapping - filter influxdb.LabelMappingFilter - } - type wants struct { - err error - labels []*influxdb.Label - } - - tests := []struct { - name string - fields LabelFields - args args - wants wants - }{ - { - name: "delete label mapping", - fields: LabelFields{ - Labels: []*influxdb.Label{ - { - ID: MustIDBase16(labelOneID), - Name: "Tag1", - OrgID: idOne, - }, - }, - Mappings: []*influxdb.LabelMapping{ - { - LabelID: MustIDBase16(labelOneID), - ResourceID: idOne, - }, - }, - }, - args: args{ - mapping: &influxdb.LabelMapping{ - LabelID: MustIDBase16(labelOneID), - ResourceID: idOne, - }, - filter: influxdb.LabelMappingFilter{ - ResourceID: idOne, - }, - }, - wants: wants{ - labels: []*influxdb.Label{}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.DeleteLabelMapping(ctx, tt.args.mapping) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - labels, err := s.FindResourceLabels(ctx, tt.args.filter) - if err != nil { - t.Fatalf("failed to retrieve labels: %v", err) - } - if diff := cmp.Diff(labels, tt.wants.labels, labelCmpOptions...); diff != "" { - t.Errorf("labels are different -got/+want\ndiff %s", diff) - } - }) - } -} diff --git a/testing/migration.go b/testing/migration.go deleted file mode 100644 index 11c78033e90..00000000000 --- a/testing/migration.go +++ /dev/null @@ -1,402 +0,0 @@ -package testing - -import ( - "context" - "reflect" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -// NowFunc is a function which returns a time -type NowFunc func() time.Time - -// Migrator tests a migrator against a provided store. -// The migrator is constructed via a provided constructor function which takes -// a logger and a now function used to derive time. -func Migrator(t *testing.T, store kv.SchemaStore, newMigrator func(*testing.T, *zap.Logger, kv.SchemaStore, NowFunc) *migration.Migrator) { - var ( - ctx = context.TODO() - logger = zap.NewNop() - migrationOne = newMigration("migration one") - migrationTwo = newMigration("migration two") - migrationThree = newMigration("migration three") - migrationFour = newMigration("migration four") - - // mocking now time - timestamp = int64(0) - now = func() time.Time { - timestamp++ - return time.Unix(timestamp, 0).In(time.UTC) - } - - // ts returns a point to a time at N unix seconds. - ts = func(n int64) *time.Time { - t := time.Unix(n, 0).In(time.UTC) - return &t - } - - migrator = newMigrator(t, logger, store, now) - ) - - migrator.AddMigrations( - // all migrations excluding number four (for now) - migrationOne, - migrationTwo, - migrationThree, - ) - - t.Run("List() shows all migrations in down state", func(t *testing.T) { - migrations, err := migrator.List(ctx) - if err != nil { - t.Fatal(err) - } - - if expected := []migration.Migration{ - { - ID: platform.ID(1), - Name: "migration one", - State: migration.DownMigrationState, - }, - { - ID: platform.ID(2), - Name: "migration two", - State: migration.DownMigrationState, - }, - { - ID: platform.ID(3), - Name: "migration three", - State: migration.DownMigrationState, - }, - }; !reflect.DeepEqual(expected, migrations) { - t.Errorf("expected %#v, found %#v", expected, migrations) - } - }) - - t.Run("Up() runs each migration in turn", func(t *testing.T) { - // apply all migrations - if err := migrator.Up(ctx); err != nil { - t.Fatal(err) - } - - // list migration again - migrations, err := migrator.List(ctx) - if err != nil { - t.Fatal(err) - } - - if expected := []migration.Migration{ - { - ID: platform.ID(1), - Name: "migration one", - State: migration.UpMigrationState, - StartedAt: ts(1), - FinishedAt: ts(2), - }, - { - ID: platform.ID(2), - Name: "migration two", - State: migration.UpMigrationState, - StartedAt: ts(3), - FinishedAt: ts(4), - }, - { - ID: platform.ID(3), - Name: "migration three", - State: migration.UpMigrationState, - StartedAt: ts(5), - FinishedAt: ts(6), - }, - }; !reflect.DeepEqual(expected, migrations) { - t.Errorf("expected %#v, found %#v", expected, migrations) - } - - // assert each migration was called - migrationOne.assertUpCalled(t, 1) - migrationTwo.assertUpCalled(t, 1) - migrationThree.assertUpCalled(t, 1) - }) - - t.Run("List() after adding new migration it reports as expected", func(t *testing.T) { - migrator.AddMigrations(migrationFour) - - // list migration again - migrations, err := migrator.List(ctx) - if err != nil { - t.Fatal(err) - } - - if expected := []migration.Migration{ - { - ID: platform.ID(1), - Name: "migration one", - State: migration.UpMigrationState, - StartedAt: ts(1), - FinishedAt: ts(2), - }, - { - ID: platform.ID(2), - Name: "migration two", - State: migration.UpMigrationState, - StartedAt: ts(3), - FinishedAt: ts(4), - }, - { - ID: platform.ID(3), - Name: "migration three", - State: migration.UpMigrationState, - StartedAt: ts(5), - FinishedAt: ts(6), - }, - { - ID: platform.ID(4), - Name: "migration four", - State: migration.DownMigrationState, - }, - }; !reflect.DeepEqual(expected, migrations) { - t.Errorf("expected %#v, found %#v", expected, migrations) - } - }) - - t.Run("Up() only applies the single down migration", func(t *testing.T) { - // apply all migrations - if err := migrator.Up(ctx); err != nil { - t.Fatal(err) - } - - // list migration again - migrations, err := migrator.List(ctx) - if err != nil { - t.Fatal(err) - } - - if expected := []migration.Migration{ - { - ID: platform.ID(1), - Name: "migration one", - State: migration.UpMigrationState, - StartedAt: ts(1), - FinishedAt: ts(2), - }, - { - ID: platform.ID(2), - Name: "migration two", - State: migration.UpMigrationState, - StartedAt: ts(3), - FinishedAt: ts(4), - }, - { - ID: platform.ID(3), - Name: "migration three", - State: migration.UpMigrationState, - StartedAt: ts(5), - FinishedAt: ts(6), - }, - { - ID: platform.ID(4), - Name: "migration four", - State: migration.UpMigrationState, - StartedAt: ts(7), - FinishedAt: ts(8), - }, - }; !reflect.DeepEqual(expected, migrations) { - t.Errorf("expected %#v, found %#v", expected, migrations) - } - - // assert each migration was called only once - migrationOne.assertUpCalled(t, 1) - migrationTwo.assertUpCalled(t, 1) - migrationThree.assertUpCalled(t, 1) - migrationFour.assertUpCalled(t, 1) - }) - - t.Run("Down() calls down for each migration", func(t *testing.T) { - // apply all migrations - if err := migrator.Down(ctx, 0); err != nil { - t.Fatal(err) - } - - // list migration again - migrations, err := migrator.List(ctx) - if err != nil { - t.Fatal(err) - } - - if expected := []migration.Migration{ - { - ID: platform.ID(1), - Name: "migration one", - State: migration.DownMigrationState, - }, - { - ID: platform.ID(2), - Name: "migration two", - State: migration.DownMigrationState, - }, - { - ID: platform.ID(3), - Name: "migration three", - State: migration.DownMigrationState, - }, - { - ID: platform.ID(4), - Name: "migration four", - State: migration.DownMigrationState, - }, - }; !reflect.DeepEqual(expected, migrations) { - t.Errorf("expected %#v, found %#v", expected, migrations) - } - - // assert each migration was called only once - migrationOne.assertDownCalled(t, 1) - migrationTwo.assertDownCalled(t, 1) - migrationThree.assertDownCalled(t, 1) - migrationFour.assertDownCalled(t, 1) - }) - - t.Run("Up() re-applies all migrations", func(t *testing.T) { - // apply all migrations - if err := migrator.Up(ctx); err != nil { - t.Fatal(err) - } - - // list migration again - migrations, err := migrator.List(ctx) - if err != nil { - t.Fatal(err) - } - - if expected := []migration.Migration{ - { - ID: platform.ID(1), - Name: "migration one", - State: migration.UpMigrationState, - StartedAt: ts(9), - FinishedAt: ts(10), - }, - { - ID: platform.ID(2), - Name: "migration two", - State: migration.UpMigrationState, - StartedAt: ts(11), - FinishedAt: ts(12), - }, - { - ID: platform.ID(3), - Name: "migration three", - State: migration.UpMigrationState, - StartedAt: ts(13), - FinishedAt: ts(14), - }, - { - ID: platform.ID(4), - Name: "migration four", - State: migration.UpMigrationState, - StartedAt: ts(15), - FinishedAt: ts(16), - }, - }; !reflect.DeepEqual(expected, migrations) { - t.Errorf("expected %#v, found %#v", expected, migrations) - } - - // assert each migration up was called for a second time - migrationOne.assertUpCalled(t, 2) - migrationTwo.assertUpCalled(t, 2) - migrationThree.assertUpCalled(t, 2) - migrationFour.assertUpCalled(t, 2) - }) - - t.Run("Down() calls down on a subset of migrations", func(t *testing.T) { - if err := migrator.Down(ctx, 2); err != nil { - t.Fatal(err) - } - - // list migration again - migrations, err := migrator.List(ctx) - if err != nil { - t.Fatal(err) - } - - if expected := []migration.Migration{ - { - ID: platform.ID(1), - Name: "migration one", - State: migration.UpMigrationState, - StartedAt: ts(9), - FinishedAt: ts(10), - }, - { - ID: platform.ID(2), - Name: "migration two", - State: migration.UpMigrationState, - StartedAt: ts(11), - FinishedAt: ts(12), - }, - { - ID: platform.ID(3), - Name: "migration three", - State: migration.DownMigrationState, - }, - { - ID: platform.ID(4), - Name: "migration four", - State: migration.DownMigrationState, - }, - }; !reflect.DeepEqual(expected, migrations) { - t.Errorf("expected %#v, found %#v", expected, migrations) - } - }) - - t.Run("List() missing migration spec errors as expected", func(t *testing.T) { - // remove all but first specification from migration list - migrator.Specs = migrator.Specs[:1] - // list migration again - _, err := migrator.List(ctx) - require.Error(t, err) - require.Contains(t, err.Error(), "influxd downgrade", - "Error returned on unknown migration should recommend `influxd downgrade`") - }) -} - -func newMigration(name string) *spyMigrationSpec { - return &spyMigrationSpec{name: name} -} - -type spyMigrationSpec struct { - name string - upCalled int - downCalled int -} - -func (s *spyMigrationSpec) MigrationName() string { - return s.name -} - -func (s *spyMigrationSpec) assertUpCalled(t *testing.T, times int) { - t.Helper() - if s.upCalled != times { - t.Errorf("expected Up() to be called %d times, instead found %d times", times, s.upCalled) - } -} - -func (s *spyMigrationSpec) Up(ctx context.Context, _ migration.Store) error { - s.upCalled++ - return nil -} - -func (s *spyMigrationSpec) assertDownCalled(t *testing.T, times int) { - t.Helper() - if s.downCalled != times { - t.Errorf("expected Down() to be called %d times, instead found %d times", times, s.downCalled) - } -} - -func (s *spyMigrationSpec) Down(ctx context.Context, _ migration.Store) error { - s.downCalled++ - return nil -} diff --git a/testing/onboarding.go b/testing/onboarding.go deleted file mode 100644 index 03f473ed830..00000000000 --- a/testing/onboarding.go +++ /dev/null @@ -1,235 +0,0 @@ -package testing - -import ( - "context" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" -) - -var onboardCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y *platform.OnboardingResults) bool { - if x == nil && y == nil { - return true - } - if x != nil && y == nil || y != nil && x == nil { - return false - } - - return x.User.Name == y.User.Name && x.User.OAuthID == y.User.OAuthID && x.User.Status == y.User.Status && - x.Org.Name == y.Org.Name && x.Org.Description == y.Org.Description && - x.Bucket.Type == y.Bucket.Type && x.Bucket.Description == y.Bucket.Description && x.Bucket.RetentionPolicyName == y.Bucket.RetentionPolicyName && x.Bucket.RetentionPeriod == y.Bucket.RetentionPeriod && x.Bucket.Name == y.Bucket.Name && - (x.Auth != nil && y.Auth != nil && cmp.Equal(x.Auth.Permissions, y.Auth.Permissions)) // its possible auth wont exist on the basic service level - }), -} - -// OnboardingFields will include the IDGenerator, TokenGenerator -// and IsOnboarding -type OnboardingFields struct { - IDGenerator platform2.IDGenerator - TokenGenerator platform.TokenGenerator - TimeGenerator platform.TimeGenerator - IsOnboarding bool -} - -// OnboardInitialUser testing -func OnboardInitialUser( - init func(OnboardingFields, *testing.T) (platform.OnboardingService, func()), - t *testing.T, -) { - type args struct { - request *platform.OnboardingRequest - } - type wants struct { - errCode string - results *platform.OnboardingResults - } - tests := []struct { - name string - fields OnboardingFields - args args - wants wants - }{ - { - name: "denied", - fields: OnboardingFields{ - IDGenerator: &loopIDGenerator{ - s: []string{oneID, twoID, threeID, fourID}, - }, - TokenGenerator: mock.NewTokenGenerator(oneToken, nil), - IsOnboarding: false, - }, - wants: wants{ - errCode: errors.EConflict, - }, - }, - { - name: "missing username", - fields: OnboardingFields{ - IDGenerator: &loopIDGenerator{ - s: []string{oneID, twoID, threeID, fourID}, - }, - TokenGenerator: mock.NewTokenGenerator(oneToken, nil), - IsOnboarding: true, - }, - args: args{ - request: &platform.OnboardingRequest{ - Org: "org1", - Bucket: "bucket1", - }, - }, - wants: wants{ - errCode: errors.EUnprocessableEntity, - }, - }, - { - name: "missing org", - fields: OnboardingFields{ - IDGenerator: &loopIDGenerator{ - s: []string{oneID, twoID, threeID, fourID}, - }, - TokenGenerator: mock.NewTokenGenerator(oneToken, nil), - IsOnboarding: true, - }, - args: args{ - request: &platform.OnboardingRequest{ - User: "admin", - Bucket: "bucket1", - }, - }, - wants: wants{ - errCode: errors.EUnprocessableEntity, - }, - }, - { - name: "missing bucket", - fields: OnboardingFields{ - IDGenerator: &loopIDGenerator{ - s: []string{oneID, twoID, threeID, fourID}, - }, - TokenGenerator: mock.NewTokenGenerator(oneToken, nil), - IsOnboarding: true, - }, - args: args{ - request: &platform.OnboardingRequest{ - User: "admin", - Org: "org1", - }, - }, - wants: wants{ - errCode: errors.EUnprocessableEntity, - }, - }, - { - name: "valid onboarding json should create a user, org, bucket, and authorization", - fields: OnboardingFields{ - IDGenerator: &loopIDGenerator{ - s: []string{oneID, twoID, threeID, fourID}, - }, - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - TokenGenerator: mock.NewTokenGenerator(oneToken, nil), - IsOnboarding: true, - }, - args: args{ - request: &platform.OnboardingRequest{ - User: "admin", - Org: "org1", - Bucket: "bucket1", - Password: "password1", - RetentionPeriodSeconds: 3600 * 24 * 7, // 1 week - }, - }, - wants: wants{ - results: &platform.OnboardingResults{ - User: &platform.User{ - ID: MustIDBase16(oneID), - Name: "admin", - Status: platform.Active, - }, - Org: &platform.Organization{ - ID: MustIDBase16(twoID), - Name: "org1", - CRUDLog: platform.CRUDLog{ - CreatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - Bucket: &platform.Bucket{ - ID: MustIDBase16(threeID), - Name: "bucket1", - OrgID: MustIDBase16(twoID), - RetentionPeriod: time.Hour * 24 * 7, - CRUDLog: platform.CRUDLog{ - CreatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - Auth: &platform.Authorization{ - ID: MustIDBase16(fourID), - Token: oneToken, - Status: platform.Active, - UserID: MustIDBase16(oneID), - Description: "admin's Token", - OrgID: MustIDBase16(twoID), - Permissions: platform.OperPermissions(), - CRUDLog: platform.CRUDLog{ - CreatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - results, err := s.OnboardInitialUser(ctx, tt.args.request) - if (err != nil) != (tt.wants.errCode != "") { - t.Logf("Error: %v", err) - t.Fatalf("expected error code '%s' got '%v'", tt.wants.errCode, err) - } - if err != nil && tt.wants.errCode != "" { - if code := errors.ErrorCode(err); code != tt.wants.errCode { - t.Logf("Error: %v", err) - t.Fatalf("expected error code to match '%s' got '%v'", tt.wants.errCode, code) - } - } - if diff := cmp.Diff(results, tt.wants.results, onboardCmpOptions); diff != "" { - t.Errorf("onboarding results are different -got/+want\ndiff %s", diff) - } - }) - } - -} - -const ( - oneID = "020f755c3c082000" - twoID = "020f755c3c082001" - threeID = "020f755c3c082002" - fourID = "020f755c3c082003" - fiveID = "020f755c3c082004" - sixID = "020f755c3c082005" - oneToken = "020f755c3c082008" -) - -type loopIDGenerator struct { - s []string - p int -} - -func (g *loopIDGenerator) ID() platform2.ID { - if g.p == len(g.s) { - g.p = 0 - } - id := MustIDBase16(g.s[g.p]) - g.p++ - return id -} diff --git a/testing/organization_service.go b/testing/organization_service.go deleted file mode 100644 index e61f1400520..00000000000 --- a/testing/organization_service.go +++ /dev/null @@ -1,1104 +0,0 @@ -package testing - -import ( - "bytes" - "context" - "sort" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" -) - -var orgBucketsIDGenerator = mock.NewMockIDGenerator() - -var organizationCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Comparer(func(x, y *influxdb.Organization) bool { - if x == nil && y == nil { - return true - } - if x != nil && y == nil || y != nil && x == nil { - return false - } - return x.Name == y.Name && x.Description == y.Description - }), - cmp.Transformer("Sort", func(in []*influxdb.Organization) []*influxdb.Organization { - out := append([]*influxdb.Organization(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].Name > out[j].Name - }) - return out - }), -} - -// OrganizationFields will include the IDGenerator, and organizations -type OrganizationFields struct { - IDGenerator platform.IDGenerator - OrgBucketIDs platform.IDGenerator - Organizations []*influxdb.Organization - TimeGenerator influxdb.TimeGenerator -} - -// OrganizationService tests all the service functions. -func OrganizationService( - init func(OrganizationFields, *testing.T) (influxdb.OrganizationService, string, func()), t *testing.T, -) { - tests := []struct { - name string - fn func(init func(OrganizationFields, *testing.T) (influxdb.OrganizationService, string, func()), - t *testing.T) - }{ - { - name: "CreateOrganization", - fn: CreateOrganization, - }, - { - name: "FindOrganizationByID", - fn: FindOrganizationByID, - }, - { - name: "FindOrganizations", - fn: FindOrganizations, - }, - { - name: "DeleteOrganization", - fn: DeleteOrganization, - }, - { - name: "FindOrganization", - fn: FindOrganization, - }, - { - name: "UpdateOrganization", - fn: UpdateOrganization, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt := tt - t.Parallel() - tt.fn(init, t) - }) - } -} - -// CreateOrganization testing -func CreateOrganization( - init func(OrganizationFields, *testing.T) (influxdb.OrganizationService, string, func()), - t *testing.T, -) { - type args struct { - organization *influxdb.Organization - } - type wants struct { - err error - organizations []*influxdb.Organization - } - - tests := []struct { - name string - fields OrganizationFields - args args - wants wants - }{ - { - name: "create organizations with empty set", - fields: OrganizationFields{ - IDGenerator: mock.NewMockIDGenerator(), - OrgBucketIDs: mock.NewMockIDGenerator(), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{}, - }, - args: args{ - organization: &influxdb.Organization{ - Name: "name1", - ID: idOne, - Description: "desc1", - }, - }, - wants: wants{ - organizations: []*influxdb.Organization{ - { - Name: "name1", - ID: platform.ID(mock.FirstMockID), - Description: "desc1", - CRUDLog: influxdb.CRUDLog{ - CreatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - }, - }, - }, - { - name: "basic create organization", - fields: OrganizationFields{ - IDGenerator: mock.NewMockIDGenerator(), - OrgBucketIDs: mock.NewMockIDGenerator(), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - ID: idOne, - Name: "organization1", - }, - }, - }, - args: args{ - organization: &influxdb.Organization{ - ID: idTwo, - Name: "organization2", - }, - }, - wants: wants{ - organizations: []*influxdb.Organization{ - { - ID: idOne, - Name: "organization1", - }, - { - ID: platform.ID(mock.FirstMockID), - Name: "organization2", - CRUDLog: influxdb.CRUDLog{ - CreatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - }, - }, - }, - { - name: "empty name", - fields: OrganizationFields{ - IDGenerator: mock.NewMockIDGenerator(), - OrgBucketIDs: orgBucketsIDGenerator, - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - ID: idOne, - Name: "organization1", - }, - }, - }, - args: args{ - organization: &influxdb.Organization{ - ID: idTwo, - }, - }, - wants: wants{ - organizations: []*influxdb.Organization{ - { - ID: idOne, - Name: "organization1", - }, - }, - err: influxdb.ErrOrgNameisEmpty, - }, - }, - { - name: "name only have spaces", - fields: OrganizationFields{ - IDGenerator: mock.NewMockIDGenerator(), - OrgBucketIDs: orgBucketsIDGenerator, - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - ID: idOne, - Name: "organization1", - }, - }, - }, - args: args{ - organization: &influxdb.Organization{ - ID: idTwo, - Name: " ", - }, - }, - wants: wants{ - organizations: []*influxdb.Organization{ - { - ID: idOne, - Name: "organization1", - }, - }, - err: influxdb.ErrOrgNameisEmpty, - }, - }, - { - name: "names should be unique", - fields: OrganizationFields{ - IDGenerator: mock.NewMockIDGenerator(), - OrgBucketIDs: orgBucketsIDGenerator, - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - ID: idOne, - Name: "organization1", - }, - }, - }, - args: args{ - organization: &influxdb.Organization{ - ID: idTwo, - Name: "organization1", - }, - }, - wants: wants{ - organizations: []*influxdb.Organization{ - { - ID: idOne, - Name: "organization1", - }, - }, - err: &errors.Error{ - Code: errors.EConflict, - Op: influxdb.OpCreateOrganization, - Msg: "organization with name organization1 already exists", - }, - }, - }, - { - name: "create organization with no id", - fields: OrganizationFields{ - IDGenerator: mock.NewMockIDGenerator(), - OrgBucketIDs: mock.NewMockIDGenerator(), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - ID: idOne, - Name: "organization1", - }, - }, - }, - args: args{ - organization: &influxdb.Organization{ - Name: "organization2", - }, - }, - wants: wants{ - organizations: []*influxdb.Organization{ - { - ID: idOne, - Name: "organization1", - }, - { - ID: platform.ID(mock.FirstMockID), - Name: "organization2", - CRUDLog: influxdb.CRUDLog{ - CreatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.CreateOrganization(ctx, tt.args.organization) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - // Delete only newly created organizations - // if tt.args.organization.ID != nil { - defer s.DeleteOrganization(ctx, tt.args.organization.ID) - - organizations, _, err := s.FindOrganizations(ctx, influxdb.OrganizationFilter{}) - diffPlatformErrors(tt.name, err, nil, opPrefix, t) - if diff := cmp.Diff(organizations, tt.wants.organizations, organizationCmpOptions...); diff != "" { - t.Errorf("organizations are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindOrganizationByID testing -func FindOrganizationByID( - init func(OrganizationFields, *testing.T) (influxdb.OrganizationService, string, func()), - t *testing.T, -) { - type args struct { - id platform.ID - } - type wants struct { - err error - organization *influxdb.Organization - } - - tests := []struct { - name string - fields OrganizationFields - args args - wants wants - }{ - { - name: "basic find organization by id", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "organization1", - }, - { - // ID(2) - Name: "organization2", - }, - }, - }, - args: args{ - id: idTwo, - }, - wants: wants{ - organization: &influxdb.Organization{ - ID: idTwo, - Name: "organization2", - }, - }, - }, - { - name: "didn't find organization by id", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "organization1", - }, - { - // ID(2) - Name: "organization2", - }, - }, - }, - args: args{ - id: idThree, - }, - wants: wants{ - organization: nil, - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindOrganizationByID, - Msg: "organization not found", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - organization, err := s.FindOrganizationByID(ctx, tt.args.id) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(organization, tt.wants.organization, organizationCmpOptions...); diff != "" { - t.Errorf("organization is different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindOrganizations testing -func FindOrganizations( - init func(OrganizationFields, *testing.T) (influxdb.OrganizationService, string, func()), - t *testing.T, -) { - type args struct { - ID platform.ID - name string - findOptions influxdb.FindOptions - } - - type wants struct { - organizations []*influxdb.Organization - err error - } - tests := []struct { - name string - fields OrganizationFields - args args - wants wants - }{ - { - name: "find all organizations", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "abc", - }, - { - // ID(2) - Name: "xyz", - Description: "desc xyz", - }, - }, - }, - args: args{}, - wants: wants{ - organizations: []*influxdb.Organization{ - { - ID: idOne, - Name: "abc", - }, - { - ID: idTwo, - Name: "xyz", - Description: "desc xyz", - }, - }, - }, - }, - { - name: "find all organizations by offset and limit", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "abc", - }, - { - // ID(2) - Name: "xyz", - Description: "desc xyz", - }, - { - // ID(3) - Name: "ijk", - }, - }, - }, - args: args{ - findOptions: influxdb.FindOptions{ - Offset: 1, - Limit: 1, - }, - }, - wants: wants{ - organizations: []*influxdb.Organization{ - { - ID: idTwo, - Name: "xyz", - Description: "desc xyz", - }, - }, - }, - }, - { - name: "find organization by id", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "abc", - }, - { - // ID(2) - Name: "xyz", - }, - }, - }, - args: args{ - ID: idTwo, - }, - wants: wants{ - organizations: []*influxdb.Organization{ - { - ID: idTwo, - Name: "xyz", - }, - }, - }, - }, - { - name: "find organization by name", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "abc", - }, - { - // ID(2) - Name: "xyz", - }, - }, - }, - args: args{ - name: "xyz", - }, - wants: wants{ - organizations: []*influxdb.Organization{ - { - ID: idTwo, - Name: "xyz", - }, - }, - }, - }, - { - name: "find organization by id not exists", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "abc", - }, - { - // ID(2) - Name: "xyz", - }, - }, - }, - args: args{ - ID: idThree, - }, - wants: wants{ - organizations: []*influxdb.Organization{}, - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindOrganizations, - Msg: "organization not found", - }, - }, - }, - { - name: "find organization by name not exists", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "abc", - }, - { - // ID(2) - Name: "xyz", - }, - }, - }, - args: args{ - name: "na", - }, - wants: wants{ - organizations: []*influxdb.Organization{}, - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindOrganizations, - Msg: "organization name \"na\" not found", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - filter := influxdb.OrganizationFilter{} - if tt.args.ID.Valid() { - filter.ID = &tt.args.ID - } - if tt.args.name != "" { - filter.Name = &tt.args.name - } - - organizations, _, err := s.FindOrganizations(ctx, filter, tt.args.findOptions) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(organizations, tt.wants.organizations, organizationCmpOptions...); diff != "" { - t.Errorf("organizations are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// DeleteOrganization testing -func DeleteOrganization( - init func(OrganizationFields, *testing.T) (influxdb.OrganizationService, string, func()), - t *testing.T, -) { - type args struct { - ID platform.ID - } - type wants struct { - err error - organizations []*influxdb.Organization - } - - tests := []struct { - name string - fields OrganizationFields - args args - wants wants - }{ - { - name: "delete organizations using exist id", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "orgA", - }, - { - // ID(2) - Name: "orgB", - }, - }, - }, - args: args{ - ID: idOne, - }, - wants: wants{ - organizations: []*influxdb.Organization{ - { - Name: "orgB", - ID: idTwo, - }, - }, - }, - }, - { - name: "delete organizations using id that does not exist", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "orgA", - }, - { - // ID(2) - Name: "orgB", - }, - }, - }, - args: args{ - ID: MustIDBase16("1234567890654321"), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpDeleteOrganization, - Msg: "organization not found", - }, - organizations: []*influxdb.Organization{ - { - Name: "orgA", - ID: idOne, - }, - { - Name: "orgB", - ID: idTwo, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.DeleteOrganization(ctx, tt.args.ID) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - filter := influxdb.OrganizationFilter{} - organizations, _, err := s.FindOrganizations(ctx, filter) - diffPlatformErrors(tt.name, err, nil, opPrefix, t) - - if diff := cmp.Diff(organizations, tt.wants.organizations, organizationCmpOptions...); diff != "" { - t.Errorf("organizations are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindOrganization testing -func FindOrganization( - init func(OrganizationFields, *testing.T) (influxdb.OrganizationService, string, func()), - t *testing.T, -) { - type args struct { - name string - id platform.ID - } - - type wants struct { - organization *influxdb.Organization - err error - } - - tests := []struct { - name string - fields OrganizationFields - args args - wants wants - }{ - { - name: "find organization by name", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "abc", - }, - { - // ID(2) - Name: "xyz", - }, - }, - }, - args: args{ - name: "abc", - }, - wants: wants{ - organization: &influxdb.Organization{ - ID: idOne, - Name: "abc", - }, - }, - }, - { - name: "find organization in which no name filter matches should return no org", - args: args{ - name: "unknown", - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindOrganization, - Msg: "organization name \"unknown\" not found", - }, - }, - }, - { - name: "find organization in which no id filter matches should return no org", - args: args{ - id: platform.ID(3), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "organization not found", - }, - }, - }, - { - name: "find organization no filter is set returns an error about filters not provided", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "o1", - }, - }, - }, - wants: wants{ - err: influxdb.ErrInvalidOrgFilter, - }, - }, - { - name: "missing organization returns error", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - Organizations: []*influxdb.Organization{}, - }, - args: args{ - name: "abc", - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindOrganization, - Msg: "organization name \"abc\" not found", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - filter := influxdb.OrganizationFilter{} - if tt.args.name != "" { - filter.Name = &tt.args.name - } - if tt.args.id != platform.InvalidID() { - filter.ID = &tt.args.id - } - - organization, err := s.FindOrganization(ctx, filter) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(organization, tt.wants.organization, organizationCmpOptions...); diff != "" { - t.Errorf("organizations are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// UpdateOrganization testing -func UpdateOrganization( - init func(OrganizationFields, *testing.T) (influxdb.OrganizationService, string, func()), - t *testing.T, -) { - type args struct { - id platform.ID - name *string - description *string - } - type wants struct { - err error - organization *influxdb.Organization - } - - tests := []struct { - name string - fields OrganizationFields - args args - wants wants - }{ - { - name: "update id not exists", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "organization1", - }, - { - // ID(2) - Name: "organization2", - }, - }, - }, - args: args{ - id: MustIDBase16(threeID), - name: strPtr("changed"), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpUpdateOrganization, - Msg: "organization not found", - }, - }, - }, - { - name: "update name", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "organization1", - }, - { - // ID(2) - Name: "organization2", - }, - }, - }, - args: args{ - id: idOne, - name: strPtr("changed"), - }, - wants: wants{ - organization: &influxdb.Organization{ - ID: idOne, - Name: "changed", - CRUDLog: influxdb.CRUDLog{ - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - }, - }, - { - name: "update name to same name", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "organization1", - }, - { - // ID(2) - Name: "organization2", - }, - }, - }, - args: args{ - id: idOne, - name: strPtr("organization1"), - }, - wants: wants{ - organization: &influxdb.Organization{ - ID: idOne, - Name: "organization1", - CRUDLog: influxdb.CRUDLog{ - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - }, - }, { - name: "update name not unique", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "organization1", - }, - { - // ID(2) - Name: "organization2", - }, - }, - }, - args: args{ - id: idOne, - name: strPtr("organization2"), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EConflict, - Op: influxdb.OpUpdateOrganization, - Msg: "organization with name organization2 already exists", - }, - }, - }, - { - name: "update name is empty", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "organization1", - }, - { - // ID(2) - Name: "organization2", - }, - }, - }, - args: args{ - id: idOne, - name: strPtr(""), - }, - wants: wants{ - err: influxdb.ErrOrgNameisEmpty, - }, - }, - { - name: "update name only has space", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "organization1", - }, - { - // ID(2) - Name: "organization2", - }, - }, - }, - args: args{ - id: idOne, - name: strPtr(" "), - }, - wants: wants{ - err: influxdb.ErrOrgNameisEmpty, - }, - }, - { - name: "update description", - fields: OrganizationFields{ - OrgBucketIDs: mock.NewIncrementingIDGenerator(idOne), - TimeGenerator: mock.TimeGenerator{FakeValue: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC)}, - Organizations: []*influxdb.Organization{ - { - // ID(1) - Name: "organization1", - Description: "organization1 description", - }, - { - // ID(2) - Name: "organization2", - Description: "organization2 description", - }, - }, - }, - args: args{ - id: idOne, - description: strPtr("changed"), - }, - wants: wants{ - organization: &influxdb.Organization{ - ID: idOne, - Name: "organization1", - Description: "changed", - CRUDLog: influxdb.CRUDLog{ - UpdatedAt: time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC), - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - upd := influxdb.OrganizationUpdate{} - upd.Name = tt.args.name - upd.Description = tt.args.description - - organization, err := s.UpdateOrganization(ctx, tt.args.id, upd) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(organization, tt.wants.organization, organizationCmpOptions...); diff != "" { - t.Errorf("organization is different -got/+want\ndiff %s", diff) - } - }) - } -} diff --git a/testing/passwords.go b/testing/passwords.go deleted file mode 100644 index e40894182b1..00000000000 --- a/testing/passwords.go +++ /dev/null @@ -1,360 +0,0 @@ -package testing - -import ( - "context" - "fmt" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// PasswordFields will include the IDGenerator, and users and their passwords. -type PasswordFields struct { - IDGenerator platform.IDGenerator - Users []*influxdb.User - Passwords []string // passwords are indexed against the Users field -} - -// PasswordsService tests all the service functions. -func PasswordsService( - init func(PasswordFields, *testing.T) (influxdb.PasswordsService, func()), t *testing.T, -) { - tests := []struct { - name string - fn func(init func(PasswordFields, *testing.T) (influxdb.PasswordsService, func()), - t *testing.T) - }{ - { - name: "SetPassword", - fn: SetPassword, - }, - { - name: "ComparePassword", - fn: ComparePassword, - }, - { - name: "CompareAndSetPassword", - fn: CompareAndSetPassword, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt := tt - t.Parallel() - tt.fn(init, t) - }) - } -} - -// SetPassword tests overriding the password of a known user -func SetPassword( - init func(PasswordFields, *testing.T) (influxdb.PasswordsService, func()), - t *testing.T) { - type args struct { - user platform.ID - password string - } - type wants struct { - err error - } - tests := []struct { - name string - fields PasswordFields - args args - wants wants - }{ - { - name: "setting password longer than 8 characters works", - fields: PasswordFields{ - Users: []*influxdb.User{ - { - Name: "user1", - ID: MustIDBase16(oneID), - }, - }, - }, - args: args{ - user: MustIDBase16(oneID), - password: "howdydoody", - }, - wants: wants{}, - }, - { - name: "passwords that are too short have errors", - fields: PasswordFields{ - Users: []*influxdb.User{ - { - Name: "user1", - ID: MustIDBase16(oneID), - }, - }, - }, - args: args{ - user: MustIDBase16(oneID), - password: "short", - }, - wants: wants{ - err: fmt.Errorf("passwords must be at least 8 characters long"), - }, - }, - { - name: "setting a password for a non-existent user is a generic-like error", - fields: PasswordFields{ - Users: []*influxdb.User{ - { - Name: "user1", - ID: MustIDBase16(oneID), - }, - }, - }, - args: args{ - user: 33, - password: "howdydoody", - }, - wants: wants{ - err: fmt.Errorf("your userID is incorrect"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - err := s.SetPassword(ctx, tt.args.user, tt.args.password) - - if (err != nil && tt.wants.err == nil) || (err == nil && tt.wants.err != nil) { - t.Fatalf("expected SetPassword error %v got %v", tt.wants.err, err) - return - } - - if err != nil { - if want, got := tt.wants.err.Error(), err.Error(); want != got { - t.Fatalf("expected SetPassword error %v got %v", want, got) - } - } - }) - } -} - -// ComparePassword tests setting and comparing passwords. -func ComparePassword( - init func(PasswordFields, *testing.T) (influxdb.PasswordsService, func()), - t *testing.T) { - type args struct { - user platform.ID - password string - } - type wants struct { - err error - } - tests := []struct { - name string - fields PasswordFields - args args - wants wants - }{ - { - name: "comparing same password is not an error", - fields: PasswordFields{ - Users: []*influxdb.User{ - { - Name: "user1", - ID: MustIDBase16(oneID), - }, - }, - Passwords: []string{"howdydoody"}, - }, - args: args{ - user: MustIDBase16(oneID), - password: "howdydoody", - }, - wants: wants{}, - }, - { - name: "comparing different password is an error", - fields: PasswordFields{ - Users: []*influxdb.User{ - { - Name: "user1", - ID: MustIDBase16(oneID), - }, - }, - Passwords: []string{"howdydoody"}, - }, - args: args{ - user: MustIDBase16(oneID), - password: "wrongpassword", - }, - wants: wants{ - err: fmt.Errorf("your username or password is incorrect"), - }, - }, - { - name: "comparing a password to a non-existent user is a generic-like error", - fields: PasswordFields{ - Users: []*influxdb.User{ - { - Name: "user1", - ID: MustIDBase16(oneID), - }, - }, - Passwords: []string{"howdydoody"}, - }, - args: args{ - user: 1, - password: "howdydoody", - }, - wants: wants{ - err: fmt.Errorf("your userID is incorrect"), - }, - }, - { - name: "user exists but no password has been set", - fields: PasswordFields{ - Users: []*influxdb.User{ - { - Name: "user1", - ID: MustIDBase16(oneID), - }, - }, - }, - args: args{ - user: MustIDBase16(oneID), - password: "howdydoody", - }, - wants: wants{ - err: fmt.Errorf("your username or password is incorrect"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - err := s.ComparePassword(ctx, tt.args.user, tt.args.password) - - if (err != nil && tt.wants.err == nil) || (err == nil && tt.wants.err != nil) { - t.Fatalf("expected ComparePassword error %v got %v", tt.wants.err, err) - return - } - - if err != nil { - if want, got := tt.wants.err.Error(), err.Error(); want != got { - t.Fatalf("expected ComparePassword error %v got %v", tt.wants.err, err) - } - return - } - - }) - } -} - -// CompareAndSetPassword tests implementations of PasswordsService. -func CompareAndSetPassword( - init func(PasswordFields, *testing.T) (influxdb.PasswordsService, func()), - t *testing.T) { - type args struct { - user platform.ID - old string - new string - } - type wants struct { - err error - } - tests := []struct { - name string - fields PasswordFields - args args - wants wants - }{ - { - name: "setting a password to the existing password is valid", - fields: PasswordFields{ - Users: []*influxdb.User{ - { - Name: "user1", - ID: MustIDBase16(oneID), - }, - }, - Passwords: []string{"howdydoody"}, - }, - args: args{ - user: MustIDBase16(oneID), - old: "howdydoody", - new: "howdydoody", - }, - wants: wants{}, - }, - { - name: "providing an incorrect old password is an error", - fields: PasswordFields{ - Users: []*influxdb.User{ - { - Name: "user1", - ID: MustIDBase16(oneID), - }, - }, - Passwords: []string{"howdydoody"}, - }, - args: args{ - user: MustIDBase16(oneID), - old: "invalid", - new: "not used", - }, - wants: wants{ - err: fmt.Errorf("your username or password is incorrect"), - }, - }, - { - name: " a new password that is less than 8 characters is an error", - fields: PasswordFields{ - Users: []*influxdb.User{ - { - Name: "user1", - ID: MustIDBase16(oneID), - }, - }, - Passwords: []string{"howdydoody"}, - }, - args: args{ - user: MustIDBase16(oneID), - old: "howdydoody", - new: "short", - }, - wants: wants{ - err: fmt.Errorf("passwords must be at least 8 characters long"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - err := s.CompareAndSetPassword(ctx, tt.args.user, tt.args.old, tt.args.new) - - if (err != nil && tt.wants.err == nil) || (err == nil && tt.wants.err != nil) { - t.Fatalf("expected CompareAndSetPassword error %v got %v", tt.wants.err, err) - return - } - - if err != nil { - if want, got := tt.wants.err.Error(), err.Error(); want != got { - t.Fatalf("expected CompareAndSetPassword error %v got %v", tt.wants.err, err) - } - return - } - - }) - } - -} diff --git a/testing/scraper_target.go b/testing/scraper_target.go deleted file mode 100644 index 20a4a837ff3..00000000000 --- a/testing/scraper_target.go +++ /dev/null @@ -1,846 +0,0 @@ -package testing - -import ( - "bytes" - "context" - "fmt" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" -) - -const ( - targetOneID = "020f755c3c082000" - targetTwoID = "020f755c3c082001" - targetThreeID = "020f755c3c082002" -) - -var ( - target1 = influxdb.ScraperTarget{ - Name: "name1", - Type: influxdb.PrometheusScraperType, - OrgID: idOne, - BucketID: idOne, - URL: "url1", - ID: MustIDBase16(targetOneID), - } - target2 = influxdb.ScraperTarget{ - Name: "name2", - Type: influxdb.PrometheusScraperType, - OrgID: idTwo, - BucketID: idTwo, - URL: "url2", - ID: MustIDBase16(targetTwoID), - } - target3 = influxdb.ScraperTarget{ - Name: "name3", - Type: influxdb.PrometheusScraperType, - OrgID: idOne, - BucketID: idThree, - URL: "url3", - ID: MustIDBase16(targetThreeID), - } - newOrg = func(id platform.ID) *influxdb.Organization { - return &influxdb.Organization{ - ID: id, - Name: fmt.Sprintf("org%d", int(id)), - } - } -) - -// TargetFields will include the IDGenerator, and targets -type TargetFields struct { - IDGenerator platform.IDGenerator - Targets []*influxdb.ScraperTarget - Organizations []*influxdb.Organization -} - -var targetCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []influxdb.ScraperTarget) []influxdb.ScraperTarget { - out := append([]influxdb.ScraperTarget(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -// ScraperService tests all the service functions. -func ScraperService( - init func(TargetFields, *testing.T) (influxdb.ScraperTargetStoreService, string, func()), t *testing.T, -) { - t.Helper() - tests := []struct { - name string - fn func(init func(TargetFields, *testing.T) (influxdb.ScraperTargetStoreService, string, func()), - t *testing.T) - }{ - { - name: "AddTarget", - fn: AddTarget, - }, - { - name: "ListTargets", - fn: ListTargets, - }, - { - name: "GetTargetByID", - fn: GetTargetByID, - }, - { - name: "RemoveTarget", - fn: RemoveTarget, - }, - { - name: "UpdateTarget", - fn: UpdateTarget, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt := tt - t.Parallel() - tt.fn(init, t) - }) - } -} - -// AddTarget testing. -func AddTarget( - init func(TargetFields, *testing.T) (influxdb.ScraperTargetStoreService, string, func()), - t *testing.T, -) { - t.Helper() - type args struct { - userID platform.ID - target *influxdb.ScraperTarget - } - type wants struct { - err error - targets []influxdb.ScraperTarget - } - tests := []struct { - name string - fields TargetFields - args args - wants wants - }{ - { - name: "create targets with empty set", - fields: TargetFields{ - IDGenerator: mock.NewIDGenerator(targetOneID, t), - Targets: []*influxdb.ScraperTarget{}, - Organizations: []*influxdb.Organization{newOrg(platform.ID(1))}, - }, - args: args{ - userID: MustIDBase16(threeID), - target: &influxdb.ScraperTarget{ - Name: "name1", - Type: influxdb.PrometheusScraperType, - OrgID: idOne, - BucketID: idOne, - URL: "url1", - }, - }, - wants: wants{ - targets: []influxdb.ScraperTarget{ - { - Name: "name1", - Type: influxdb.PrometheusScraperType, - OrgID: idOne, - BucketID: idOne, - URL: "url1", - ID: MustIDBase16(targetOneID), - }, - }, - }, - }, - { - name: "create target with invalid org id", - fields: TargetFields{ - IDGenerator: mock.NewIDGenerator(targetTwoID, t), - Organizations: []*influxdb.Organization{newOrg(platform.ID(1))}, - Targets: []*influxdb.ScraperTarget{ - { - Name: "name1", - Type: influxdb.PrometheusScraperType, - OrgID: idOne, - BucketID: idOne, - URL: "url1", - ID: MustIDBase16(targetOneID), - }, - }, - }, - args: args{ - target: &influxdb.ScraperTarget{ - ID: MustIDBase16(targetTwoID), - Name: "name2", - Type: influxdb.PrometheusScraperType, - BucketID: idTwo, - URL: "url2", - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "provided organization ID has invalid format", - Op: influxdb.OpAddTarget, - }, - targets: []influxdb.ScraperTarget{ - { - Name: "name1", - Type: influxdb.PrometheusScraperType, - OrgID: idOne, - BucketID: idOne, - URL: "url1", - ID: MustIDBase16(targetOneID), - }, - }, - }, - }, - { - name: "create target with invalid bucket id", - fields: TargetFields{ - IDGenerator: mock.NewIDGenerator(targetTwoID, t), - Organizations: []*influxdb.Organization{newOrg(platform.ID(1))}, - Targets: []*influxdb.ScraperTarget{ - { - Name: "name1", - Type: influxdb.PrometheusScraperType, - OrgID: idOne, - BucketID: idOne, - URL: "url1", - ID: MustIDBase16(targetOneID), - }, - }, - }, - args: args{ - target: &influxdb.ScraperTarget{ - ID: MustIDBase16(targetTwoID), - Name: "name2", - Type: influxdb.PrometheusScraperType, - OrgID: idTwo, - URL: "url2", - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EInvalid, - Msg: "provided bucket ID has invalid format", - Op: influxdb.OpAddTarget, - }, - targets: []influxdb.ScraperTarget{ - { - Name: "name1", - Type: influxdb.PrometheusScraperType, - OrgID: idOne, - BucketID: idOne, - URL: "url1", - ID: MustIDBase16(targetOneID), - }, - }, - }, - }, - { - name: "basic create target", - fields: TargetFields{ - IDGenerator: mock.NewIDGenerator(targetTwoID, t), - Targets: []*influxdb.ScraperTarget{ - { - Name: "name1", - Type: influxdb.PrometheusScraperType, - OrgID: idOne, - BucketID: idOne, - URL: "url1", - ID: MustIDBase16(targetOneID), - }, - }, - Organizations: []*influxdb.Organization{newOrg(platform.ID(1)), newOrg(platform.ID(2))}, - }, - args: args{ - userID: MustIDBase16(threeID), - target: &influxdb.ScraperTarget{ - ID: MustIDBase16(targetTwoID), - Name: "name2", - Type: influxdb.PrometheusScraperType, - OrgID: idTwo, - BucketID: idTwo, - URL: "url2", - }, - }, - wants: wants{ - targets: []influxdb.ScraperTarget{ - { - Name: "name1", - Type: influxdb.PrometheusScraperType, - OrgID: idOne, - BucketID: idOne, - URL: "url1", - ID: MustIDBase16(targetOneID), - }, - { - Name: "name2", - Type: influxdb.PrometheusScraperType, - OrgID: idTwo, - BucketID: idTwo, - URL: "url2", - ID: MustIDBase16(targetTwoID), - }, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.AddTarget(ctx, tt.args.target, tt.args.userID) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - defer s.RemoveTarget(ctx, tt.args.target.ID) - - targets, err := s.ListTargets(ctx, influxdb.ScraperTargetFilter{}) - if err != nil { - t.Fatalf("failed to retrieve scraper targets: %v", err) - } - if diff := cmp.Diff(targets, tt.wants.targets, targetCmpOptions...); diff != "" { - t.Errorf("scraper targets are different -got/+want\ndiff %s", diff) - } - }) - - } -} - -// ListTargets testing -func ListTargets( - init func(TargetFields, *testing.T) (influxdb.ScraperTargetStoreService, string, func()), - t *testing.T, -) { - type args struct { - filter influxdb.ScraperTargetFilter - } - type wants struct { - targets []influxdb.ScraperTarget - err error - } - - tests := []struct { - name string - fields TargetFields - args args - wants wants - }{ - { - name: "get all targets", - fields: TargetFields{ - Organizations: []*influxdb.Organization{ - newOrg(platform.ID(1)), - newOrg(platform.ID(2)), - }, - Targets: []*influxdb.ScraperTarget{ - &target1, - &target2, - &target3, - }, - }, - args: args{ - filter: influxdb.ScraperTargetFilter{}, - }, - wants: wants{ - targets: []influxdb.ScraperTarget{ - target1, - target2, - target3, - }, - }, - }, - { - name: "filter by name", - fields: TargetFields{ - Organizations: []*influxdb.Organization{ - newOrg(platform.ID(1)), - newOrg(platform.ID(2)), - }, - Targets: []*influxdb.ScraperTarget{ - &target1, - &target2, - &target3, - }, - }, - args: args{ - filter: influxdb.ScraperTargetFilter{ - Name: strPtr(target2.Name), - }, - }, - wants: wants{ - targets: []influxdb.ScraperTarget{ - target2, - }, - }, - }, - { - name: "filter by id", - fields: TargetFields{ - Organizations: []*influxdb.Organization{ - newOrg(platform.ID(1)), - newOrg(platform.ID(2)), - }, - Targets: []*influxdb.ScraperTarget{ - &target1, - &target2, - &target3, - }, - }, - args: args{ - filter: influxdb.ScraperTargetFilter{ - IDs: map[platform.ID]bool{target2.ID: false}, - }, - }, - wants: wants{ - targets: []influxdb.ScraperTarget{ - target2, - }, - }, - }, - { - name: "filter targets by orgID", - fields: TargetFields{ - Organizations: []*influxdb.Organization{ - newOrg(platform.ID(1)), - newOrg(platform.ID(2)), - }, - Targets: []*influxdb.ScraperTarget{ - &target1, - &target2, - &target3, - }, - }, - args: args{ - filter: influxdb.ScraperTargetFilter{ - OrgID: idPtr(idOne), - }, - }, - wants: wants{ - targets: []influxdb.ScraperTarget{ - target1, - target3, - }, - }, - }, - { - name: "filter targets by org name", - fields: TargetFields{ - Organizations: []*influxdb.Organization{ - newOrg(platform.ID(1)), - newOrg(platform.ID(2)), - }, - Targets: []*influxdb.ScraperTarget{ - &target1, - &target2, - &target3, - }, - }, - args: args{ - filter: influxdb.ScraperTargetFilter{ - Org: strPtr("org1"), - }, - }, - wants: wants{ - targets: []influxdb.ScraperTarget{ - target1, - target3, - }, - }, - }, - { - name: "filter targets by org name not exist", - fields: TargetFields{ - Organizations: []*influxdb.Organization{ - newOrg(platform.ID(1)), - }, - Targets: []*influxdb.ScraperTarget{ - &target1, - &target2, - &target3, - }, - }, - args: args{ - filter: influxdb.ScraperTargetFilter{ - Org: strPtr("org2"), - }, - }, - wants: wants{ - targets: []influxdb.ScraperTarget{}, - err: &errors.Error{ - Code: errors.ENotFound, - Msg: `organization name "org2" not found`, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - targets, err := s.ListTargets(ctx, tt.args.filter) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(targets, tt.wants.targets, targetCmpOptions...); diff != "" { - t.Errorf("targets are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// GetTargetByID testing -func GetTargetByID( - init func(TargetFields, *testing.T) (influxdb.ScraperTargetStoreService, string, func()), - t *testing.T, -) { - t.Helper() - type args struct { - id platform.ID - } - type wants struct { - err error - target *influxdb.ScraperTarget - } - - tests := []struct { - name string - fields TargetFields - args args - wants wants - }{ - { - name: "basic find target by id", - fields: TargetFields{ - Organizations: []*influxdb.Organization{newOrg(platform.ID(1))}, - Targets: []*influxdb.ScraperTarget{ - { - ID: MustIDBase16(targetOneID), - Name: "target1", - OrgID: idOne, - BucketID: idOne, - }, - { - ID: MustIDBase16(targetTwoID), - Name: "target2", - OrgID: idOne, - BucketID: idOne, - }, - }, - }, - args: args{ - id: MustIDBase16(targetTwoID), - }, - wants: wants{ - target: &influxdb.ScraperTarget{ - ID: MustIDBase16(targetTwoID), - Name: "target2", - OrgID: idOne, - BucketID: idOne, - }, - }, - }, - { - name: "find target by id not find", - fields: TargetFields{ - Targets: []*influxdb.ScraperTarget{ - { - ID: MustIDBase16(targetOneID), - Name: "target1", - OrgID: idOne, - BucketID: idOne, - }, - { - ID: MustIDBase16(targetTwoID), - Name: "target2", - OrgID: idOne, - BucketID: idOne, - }, - }, - }, - args: args{ - id: MustIDBase16(threeID), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpGetTargetByID, - Msg: "scraper target is not found", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - target, err := s.GetTargetByID(ctx, tt.args.id) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(target, tt.wants.target, targetCmpOptions...); diff != "" { - t.Errorf("target is different -got/+want\ndiff %s", diff) - } - }) - } -} - -// RemoveTarget testing -func RemoveTarget(init func(TargetFields, *testing.T) (influxdb.ScraperTargetStoreService, string, func()), - t *testing.T) { - type args struct { - ID platform.ID - userID platform.ID - } - type wants struct { - err error - targets []influxdb.ScraperTarget - } - tests := []struct { - name string - fields TargetFields - args args - wants wants - }{ - { - name: "delete targets using exist id", - fields: TargetFields{ - Organizations: []*influxdb.Organization{newOrg(platform.ID(1))}, - Targets: []*influxdb.ScraperTarget{ - { - ID: MustIDBase16(targetOneID), - OrgID: idOne, - BucketID: idOne, - }, - { - ID: MustIDBase16(targetTwoID), - OrgID: idOne, - BucketID: idOne, - }, - }, - }, - args: args{ - ID: MustIDBase16(targetOneID), - userID: MustIDBase16(threeID), - }, - wants: wants{ - targets: []influxdb.ScraperTarget{ - { - ID: MustIDBase16(targetTwoID), - OrgID: idOne, - BucketID: idOne, - }, - }, - }, - }, - { - name: "delete targets using id that does not exist", - fields: TargetFields{ - Organizations: []*influxdb.Organization{newOrg(platform.ID(1))}, - Targets: []*influxdb.ScraperTarget{ - { - ID: MustIDBase16(targetOneID), - OrgID: idOne, - BucketID: idOne, - }, - { - ID: MustIDBase16(targetTwoID), - OrgID: idOne, - BucketID: idOne, - }, - }, - }, - args: args{ - ID: MustIDBase16(targetThreeID), - userID: MustIDBase16(threeID), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpRemoveTarget, - Msg: "scraper target is not found", - }, - targets: []influxdb.ScraperTarget{ - { - ID: MustIDBase16(targetOneID), - OrgID: idOne, - BucketID: idOne, - }, - { - ID: MustIDBase16(targetTwoID), - OrgID: idOne, - BucketID: idOne, - }, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.RemoveTarget(ctx, tt.args.ID) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - targets, err := s.ListTargets(ctx, influxdb.ScraperTargetFilter{}) - if err != nil { - t.Fatalf("failed to retrieve targets: %v", err) - } - if diff := cmp.Diff(targets, tt.wants.targets, targetCmpOptions...); diff != "" { - t.Errorf("targets are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// UpdateTarget testing -func UpdateTarget( - init func(TargetFields, *testing.T) (influxdb.ScraperTargetStoreService, string, func()), - t *testing.T, -) { - type args struct { - url string - userID platform.ID - id platform.ID - } - type wants struct { - err error - target *influxdb.ScraperTarget - } - - tests := []struct { - name string - fields TargetFields - args args - wants wants - }{ - { - name: "update url with blank id", - fields: TargetFields{ - Organizations: []*influxdb.Organization{newOrg(platform.ID(1))}, - Targets: []*influxdb.ScraperTarget{ - { - ID: MustIDBase16(targetOneID), - URL: "url1", - OrgID: idOne, - BucketID: idOne, - }, - { - ID: MustIDBase16(targetTwoID), - URL: "url2", - OrgID: idOne, - BucketID: idOne, - }, - }, - }, - args: args{ - url: "changed", - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EInvalid, - Op: influxdb.OpUpdateTarget, - Msg: "provided scraper target ID has invalid format", - }, - }, - }, - { - name: "update url with non exist id", - fields: TargetFields{ - Organizations: []*influxdb.Organization{newOrg(platform.ID(1))}, - Targets: []*influxdb.ScraperTarget{ - { - ID: MustIDBase16(targetOneID), - URL: "url1", - OrgID: idOne, - BucketID: idOne, - }, - { - ID: MustIDBase16(targetTwoID), - URL: "url2", - OrgID: idOne, - BucketID: idOne, - }, - }, - }, - args: args{ - id: MustIDBase16(targetThreeID), - url: "changed", - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpUpdateTarget, - Msg: "scraper target is not found", - }, - }, - }, - { - name: "update url", - fields: TargetFields{ - Organizations: []*influxdb.Organization{newOrg(platform.ID(1))}, - Targets: []*influxdb.ScraperTarget{ - { - ID: MustIDBase16(targetOneID), - URL: "url1", - OrgID: idOne, - BucketID: idOne, - }, - { - ID: MustIDBase16(targetTwoID), - URL: "url2", - OrgID: idOne, - BucketID: idOne, - }, - }, - }, - args: args{ - id: MustIDBase16(targetOneID), - url: "changed", - }, - wants: wants{ - target: &influxdb.ScraperTarget{ - ID: MustIDBase16(targetOneID), - URL: "changed", - OrgID: idOne, - BucketID: idOne, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - upd := &influxdb.ScraperTarget{ - ID: tt.args.id, - URL: tt.args.url, - } - - target, err := s.UpdateTarget(ctx, upd, tt.args.userID) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(target, tt.wants.target, targetCmpOptions...); diff != "" { - t.Errorf("scraper target is different -got/+want\ndiff %s", diff) - } - }) - } -} diff --git a/testing/secret.go b/testing/secret.go deleted file mode 100644 index 88a3878a93f..00000000000 --- a/testing/secret.go +++ /dev/null @@ -1,527 +0,0 @@ -package testing - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var secretCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []string) []string { - out := append([]string(nil), in...) // Copy input to avoid mutating it - sort.Strings(out) - return out - }), -} - -// A secret is a comparable data structure that is used for testing -type Secret struct { - OrganizationID platform2.ID - Env map[string]string -} - -// SecretServiceFields contain the -type SecretServiceFields struct { - Secrets []Secret -} - -// SecretService will test all methods for the secrets service. -func SecretService( - init func(SecretServiceFields, *testing.T) (platform.SecretService, func()), - t *testing.T, -) { - - tests := []struct { - name string - fn func( - init func(SecretServiceFields, *testing.T) (platform.SecretService, func()), - t *testing.T, - ) - }{ - { - name: "LoadSecret", - fn: LoadSecret, - }, - { - name: "PutSecret", - fn: PutSecret, - }, - { - name: "PutSecrets", - fn: PutSecrets, - }, - { - name: "PatchSecrets", - fn: PatchSecrets, - }, - { - name: "GetSecretKeys", - fn: GetSecretKeys, - }, - { - name: "DeleteSecrets", - fn: DeleteSecrets, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt := tt - t.Parallel() - tt.fn(init, t) - }) - } -} - -// LoadSecret tests the LoadSecret method for the SecretService interface. -func LoadSecret( - init func(f SecretServiceFields, t *testing.T) (platform.SecretService, func()), - t *testing.T, -) { - type args struct { - orgID platform2.ID - key string - } - type wants struct { - value string - err error - } - - tests := []struct { - name string - fields SecretServiceFields - args args - wants wants - }{ - { - name: "load secret field", - fields: SecretServiceFields{ - Secrets: []Secret{ - { - OrganizationID: platform2.ID(1), - Env: map[string]string{ - "api_key": "abc123xyz", - }, - }, - }, - }, - args: args{ - orgID: platform2.ID(1), - key: "api_key", - }, - wants: wants{ - value: "abc123xyz", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - val, err := s.LoadSecret(ctx, tt.args.orgID, tt.args.key) - if (err != nil) != (tt.wants.err != nil) { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Fatalf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - } - } - - if want, got := tt.wants.value, val; want != got { - t.Errorf("expected value to be %s, got %s", want, got) - } - }) - } -} - -// PutSecret tests the PutSecret method for the SecretService interface. -func PutSecret( - init func(f SecretServiceFields, t *testing.T) (platform.SecretService, func()), - t *testing.T, -) { - type args struct { - orgID platform2.ID - key string - value string - } - type wants struct { - err error - } - - tests := []struct { - name string - fields SecretServiceFields - args args - wants wants - }{ - { - name: "put secret", - fields: SecretServiceFields{}, - args: args{ - orgID: platform2.ID(1), - key: "api_key", - value: "abc123xyz", - }, - wants: wants{}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - err := s.PutSecret(ctx, tt.args.orgID, tt.args.key, tt.args.value) - if (err != nil) != (tt.wants.err != nil) { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Fatalf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - } - } - - val, err := s.LoadSecret(ctx, tt.args.orgID, tt.args.key) - if err != nil { - t.Fatalf("unexpected error %v", err) - } - - if want, got := tt.args.value, val; want != got { - t.Errorf("expected value to be %s, got %s", want, got) - } - }) - } -} - -// PutSecrets tests the PutSecrets method for the SecretService interface. -func PutSecrets( - init func(f SecretServiceFields, t *testing.T) (platform.SecretService, func()), - t *testing.T, -) { - type args struct { - orgID platform2.ID - secrets map[string]string - } - type wants struct { - err error - keys []string - } - - tests := []struct { - name string - fields SecretServiceFields - args args - wants wants - }{ - { - name: "put secrets", - fields: SecretServiceFields{ - Secrets: []Secret{ - { - OrganizationID: platform2.ID(1), - Env: map[string]string{ - "api_key": "abc123xyz", - }, - }, - }, - }, - args: args{ - orgID: platform2.ID(1), - secrets: map[string]string{ - "api_key2": "abc123xyz", - "batman": "potato", - }, - }, - wants: wants{ - keys: []string{"api_key2", "batman"}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - err := s.PutSecrets(ctx, tt.args.orgID, tt.args.secrets) - if (err != nil) != (tt.wants.err != nil) { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Fatalf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - } - } - - for k, v := range tt.args.secrets { - val, err := s.LoadSecret(ctx, tt.args.orgID, k) - if err != nil { - t.Fatalf("unexpected error %v", err) - } - - if want, got := v, val; want != got { - t.Errorf("expected value to be %s, got %s", want, got) - } - } - - keys, err := s.GetSecretKeys(ctx, tt.args.orgID) - if err != nil { - t.Fatalf("unexpected error %v", err) - } - - if diff := cmp.Diff(keys, tt.wants.keys, secretCmpOptions...); diff != "" { - t.Errorf("keys are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// PatchSecrets tests the PatchSecrets method for the SecretService interface. -func PatchSecrets( - init func(f SecretServiceFields, t *testing.T) (platform.SecretService, func()), - t *testing.T, -) { - type args struct { - orgID platform2.ID - secrets map[string]string - } - type wants struct { - err error - keys []string - } - - tests := []struct { - name string - fields SecretServiceFields - args args - wants wants - }{ - { - name: "patch secrets", - fields: SecretServiceFields{ - Secrets: []Secret{ - { - OrganizationID: platform2.ID(1), - Env: map[string]string{ - "api_key": "abc123xyz", - }, - }, - }, - }, - args: args{ - orgID: platform2.ID(1), - secrets: map[string]string{ - "api_key2": "abc123xyz", - "batman": "potato", - }, - }, - wants: wants{ - keys: []string{"api_key", "api_key2", "batman"}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - - ctx := context.Background() - - err := s.PatchSecrets(ctx, tt.args.orgID, tt.args.secrets) - if (err != nil) != (tt.wants.err != nil) { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Fatalf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - } - } - - for k, v := range tt.args.secrets { - val, err := s.LoadSecret(ctx, tt.args.orgID, k) - if err != nil { - if errors.ErrorCode(err) == errors.EMethodNotAllowed { - // skip value checking for http service testing - break - } - t.Fatalf("unexpected error %v", err) - } - - if want, got := v, val; want != got { - t.Errorf("expected value to be %s, got %s", want, got) - } - } - - keys, err := s.GetSecretKeys(ctx, tt.args.orgID) - if err != nil { - t.Fatalf("unexpected error %v", err) - } - - if diff := cmp.Diff(keys, tt.wants.keys, secretCmpOptions...); diff != "" { - t.Errorf("keys are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// GetSecretKeys tests the GetSecretKeys method for the SecretService interface. -func GetSecretKeys( - init func(f SecretServiceFields, t *testing.T) (platform.SecretService, func()), - t *testing.T, -) { - type args struct { - orgID platform2.ID - } - type wants struct { - keys []string - err error - } - - tests := []struct { - name string - fields SecretServiceFields - args args - wants wants - }{ - { - name: "get secret keys for one org", - fields: SecretServiceFields{ - Secrets: []Secret{ - { - OrganizationID: platform2.ID(1), - Env: map[string]string{ - "api_key": "abc123xyz", - }, - }, - { - OrganizationID: platform2.ID(2), - Env: map[string]string{ - "api_key": "zyx321cba", - }, - }, - }, - }, - args: args{ - orgID: platform2.ID(1), - }, - wants: wants{ - keys: []string{"api_key"}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - keys, err := s.GetSecretKeys(ctx, tt.args.orgID) - if (err != nil) != (tt.wants.err != nil) { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Fatalf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - } - } - - if diff := cmp.Diff(keys, tt.wants.keys, secretCmpOptions...); diff != "" { - t.Errorf("keys are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// DeleteSecrets tests the DeleteSecrets method for the SecretService interface. -func DeleteSecrets( - init func(f SecretServiceFields, t *testing.T) (platform.SecretService, func()), - t *testing.T, -) { - type args struct { - orgID platform2.ID - keys []string - } - type wants struct { - keys []string - err error - } - - tests := []struct { - name string - fields SecretServiceFields - args args - wants wants - }{ - { - name: "delete secret keys", - fields: SecretServiceFields{ - Secrets: []Secret{ - { - OrganizationID: platform2.ID(1), - Env: map[string]string{ - "api_key": "abc123xyz", - "api_key2": "potato", - "batman": "foo", - }, - }, - }, - }, - args: args{ - orgID: platform2.ID(1), - keys: []string{"api_key2", "batman"}, - }, - wants: wants{ - keys: []string{"api_key"}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - err := s.DeleteSecret(ctx, tt.args.orgID, tt.args.keys...) - if (err != nil) != (tt.wants.err != nil) { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Fatalf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - } - } - - keys, err := s.GetSecretKeys(ctx, tt.args.orgID) - if err != nil { - t.Fatalf("unexpected error %v", err) - } - - if diff := cmp.Diff(keys, tt.wants.keys, secretCmpOptions...); diff != "" { - t.Errorf("keys are different -got/+want\ndiff %s", diff) - } - }) - } -} diff --git a/testing/session.go b/testing/session.go deleted file mode 100644 index 19631b6e249..00000000000 --- a/testing/session.go +++ /dev/null @@ -1,434 +0,0 @@ -package testing - -import ( - "bytes" - "context" - "sort" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" -) - -const ( - sessionOneID = "020f755c3c082000" - sessionTwoID = "020f755c3c082001" -) - -var sessionCmpOptions = sessionCompareOptions("CreatedAt", "ExpiresAt", "Permissions") - -func sessionCompareOptions(ignore ...string) cmp.Options { - return cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.Session) []*influxdb.Session { - out := append([]*influxdb.Session(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), - cmpopts.IgnoreFields(influxdb.Session{}, ignore...), - cmpopts.EquateEmpty(), - } -} - -// SessionFields will include the IDGenerator, TokenGenerator, Sessions, and Users -type SessionFields struct { - IDGenerator platform.IDGenerator - TokenGenerator influxdb.TokenGenerator - Sessions []*influxdb.Session - Users []*influxdb.User -} - -type sessionServiceFunc func( - init func(SessionFields, *testing.T) (influxdb.SessionService, string, func()), - t *testing.T, -) - -// SessionService tests all the service functions. -func SessionService( - init func(SessionFields, *testing.T) (influxdb.SessionService, string, func()), t *testing.T, -) { - tests := []struct { - name string - fn sessionServiceFunc - }{ - { - name: "CreateSession", - fn: CreateSession, - }, - { - name: "FindSession", - fn: FindSession, - }, - { - name: "ExpireSession", - fn: ExpireSession, - }, - { - name: "RenewSession", - fn: RenewSession, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt := tt - t.Parallel() - tt.fn(init, t) - }) - } -} - -// CreateSession testing -func CreateSession( - init func(SessionFields, *testing.T) (influxdb.SessionService, string, func()), - t *testing.T, -) { - type args struct { - user string - } - type wants struct { - err error - session *influxdb.Session - } - - tests := []struct { - name string - fields SessionFields - args args - wants wants - }{ - { - name: "create sessions with empty set", - fields: SessionFields{ - IDGenerator: mock.NewIDGenerator(sessionTwoID, t), - TokenGenerator: mock.NewTokenGenerator("abc123xyz", nil), - Users: []*influxdb.User{ - { - ID: MustIDBase16(sessionOneID), - Name: "user1", - }, - }, - }, - args: args{ - user: "user1", - }, - wants: wants{ - session: &influxdb.Session{ - ID: MustIDBase16(sessionTwoID), - UserID: MustIDBase16(sessionOneID), - Key: "abc123xyz", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - session, err := s.CreateSession(ctx, tt.args.user) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(session, tt.wants.session, sessionCmpOptions...); diff != "" { - t.Errorf("sessions are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindSession testing -func FindSession( - init func(SessionFields, *testing.T) (influxdb.SessionService, string, func()), - t *testing.T, -) { - type args struct { - key string - } - type wants struct { - err error - session *influxdb.Session - } - - tests := []struct { - name string - fields SessionFields - args args - wants wants - }{ - { - name: "basic find session", - fields: SessionFields{ - IDGenerator: mock.NewIDGenerator(sessionTwoID, t), - TokenGenerator: mock.NewTokenGenerator("abc123xyz", nil), - Sessions: []*influxdb.Session{ - { - ID: MustIDBase16(sessionOneID), - UserID: MustIDBase16(sessionTwoID), - Key: "abc123xyz", - ExpiresAt: time.Date(2030, 9, 26, 0, 0, 0, 0, time.UTC), - }, - }, - }, - args: args{ - key: "abc123xyz", - }, - wants: wants{ - session: &influxdb.Session{ - ID: MustIDBase16(sessionOneID), - UserID: MustIDBase16(sessionTwoID), - Key: "abc123xyz", - ExpiresAt: time.Date(2030, 9, 26, 0, 0, 0, 0, time.UTC), - }, - }, - }, - { - name: "look for not existing session", - args: args{ - key: "abc123xyz", - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindSession, - Msg: influxdb.ErrSessionNotFound, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - session, err := s.FindSession(ctx, tt.args.key) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(session, tt.wants.session, sessionCmpOptions...); diff != "" { - t.Errorf("session is different -got/+want\ndiff %s", diff) - } - }) - } -} - -// ExpireSession testing -func ExpireSession( - init func(SessionFields, *testing.T) (influxdb.SessionService, string, func()), - t *testing.T, -) { - type args struct { - key string - } - type wants struct { - err error - session *influxdb.Session - } - - tests := []struct { - name string - fields SessionFields - args args - wants wants - }{ - { - name: "basic find session", - fields: SessionFields{ - IDGenerator: mock.NewIDGenerator(sessionTwoID, t), - TokenGenerator: mock.NewTokenGenerator("abc123xyz", nil), - Sessions: []*influxdb.Session{ - { - ID: MustIDBase16(sessionOneID), - UserID: MustIDBase16(sessionTwoID), - Key: "abc123xyz", - ExpiresAt: time.Date(2030, 9, 26, 0, 0, 0, 0, time.UTC), - }, - }, - }, - args: args{ - key: "abc123xyz", - }, - wants: wants{ - session: &influxdb.Session{ - ID: MustIDBase16(sessionOneID), - UserID: MustIDBase16(sessionTwoID), - Key: "abc123xyz", - ExpiresAt: time.Date(2030, 9, 26, 0, 0, 0, 0, time.UTC), - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - err := s.ExpireSession(ctx, tt.args.key) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - session, err := s.FindSession(ctx, tt.args.key) - if err.Error() != influxdb.ErrSessionExpired && err.Error() != influxdb.ErrSessionNotFound { - t.Errorf("expected session to be expired got %v", err) - } - - if session != nil { - t.Errorf("expected a nil session but got: %v", session) - } - }) - } -} - -// RenewSession testing -func RenewSession( - init func(SessionFields, *testing.T) (influxdb.SessionService, string, func()), - t *testing.T, -) { - type args struct { - session *influxdb.Session - key string - expireAt time.Time - } - - type wants struct { - err error - session *influxdb.Session - } - - tests := []struct { - name string - fields SessionFields - args args - wants wants - }{ - { - name: "basic renew session", - fields: SessionFields{ - IDGenerator: mock.NewIDGenerator(sessionTwoID, t), - TokenGenerator: mock.NewTokenGenerator("abc123xyz", nil), - Sessions: []*influxdb.Session{ - { - ID: MustIDBase16(sessionOneID), - UserID: MustIDBase16(sessionTwoID), - Key: "abc123xyz", - ExpiresAt: time.Date(2030, 9, 26, 0, 0, 0, 0, time.UTC), - }, - }, - }, - args: args{ - session: &influxdb.Session{ - ID: MustIDBase16(sessionOneID), - UserID: MustIDBase16(sessionTwoID), - Key: "abc123xyz", - ExpiresAt: time.Date(2030, 9, 26, 0, 0, 0, 0, time.UTC), - }, - key: "abc123xyz", - expireAt: time.Date(2031, 9, 26, 0, 0, 10, 0, time.UTC), - }, - wants: wants{ - session: &influxdb.Session{ - ID: MustIDBase16(sessionOneID), - UserID: MustIDBase16(sessionTwoID), - Key: "abc123xyz", - ExpiresAt: time.Date(2031, 9, 26, 0, 0, 10, 0, time.UTC), - }, - }, - }, - { - name: "renew session with an earlier time than existing expiration", - fields: SessionFields{ - IDGenerator: mock.NewIDGenerator(sessionTwoID, t), - TokenGenerator: mock.NewTokenGenerator("abc123xyz", nil), - Sessions: []*influxdb.Session{ - { - ID: MustIDBase16(sessionOneID), - UserID: MustIDBase16(sessionTwoID), - Key: "abc123xyz", - ExpiresAt: time.Date(2031, 9, 26, 0, 0, 0, 0, time.UTC), - }, - }, - }, - args: args{ - session: &influxdb.Session{ - ID: MustIDBase16(sessionOneID), - UserID: MustIDBase16(sessionTwoID), - Key: "abc123xyz", - ExpiresAt: time.Date(2031, 9, 26, 0, 0, 0, 0, time.UTC), - }, - key: "abc123xyz", - expireAt: time.Date(2030, 9, 26, 0, 0, 0, 0, time.UTC), - }, - wants: wants{ - session: &influxdb.Session{ - ID: MustIDBase16(sessionOneID), - UserID: MustIDBase16(sessionTwoID), - Key: "abc123xyz", - ExpiresAt: time.Date(2031, 9, 26, 0, 0, 0, 0, time.UTC), - }, - }, - }, - { - name: "renew nil session", - fields: SessionFields{ - IDGenerator: mock.NewIDGenerator(sessionTwoID, t), - TokenGenerator: mock.NewTokenGenerator("abc123xyz", nil), - Sessions: []*influxdb.Session{ - { - ID: MustIDBase16(sessionOneID), - UserID: MustIDBase16(sessionTwoID), - Key: "abc123xyz", - ExpiresAt: time.Date(2030, 9, 26, 0, 0, 0, 0, time.UTC), - }, - }, - }, - args: args{ - key: "abc123xyz", - expireAt: time.Date(2031, 9, 26, 0, 0, 10, 0, time.UTC), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EInternal, - Msg: "session is nil", - Op: influxdb.OpRenewSession, - }, - session: &influxdb.Session{ - ID: MustIDBase16(sessionOneID), - UserID: MustIDBase16(sessionTwoID), - Key: "abc123xyz", - ExpiresAt: time.Date(2030, 9, 26, 0, 0, 0, 0, time.UTC), - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - err := s.RenewSession(ctx, tt.args.session, tt.args.expireAt) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - session, err := s.FindSession(ctx, tt.args.key) - if err != nil { - t.Errorf("err in find session %v", err) - } - - cmpOptions := sessionCompareOptions("CreatedAt", "Permissions") - if diff := cmp.Diff(session, tt.wants.session, cmpOptions...); diff != "" { - t.Errorf("session is different -got/+want\ndiff %s", diff) - } - }) - } -} diff --git a/testing/source.go b/testing/source.go deleted file mode 100644 index c81a3c13dc3..00000000000 --- a/testing/source.go +++ /dev/null @@ -1,357 +0,0 @@ -package testing - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" -) - -const ( - defaultSourceID = "020f755c3c082000" - defaultSourceOrganizationID = "50616e67652c206c" - sourceOneID = "020f755c3c082001" - sourceTwoID = "020f755c3c082002" - sourceOrgOneID = "61726920617a696f" -) - -var sourceCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*platform.Source) []*platform.Source { - out := append([]*platform.Source(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -// SourceFields will include the IDGenerator, and sources -type SourceFields struct { - IDGenerator platform2.IDGenerator - Sources []*platform.Source -} - -// CreateSource testing -func CreateSource( - init func(SourceFields, *testing.T) (platform.SourceService, string, func()), - t *testing.T, -) { - type args struct { - source *platform.Source - } - type wants struct { - err error - sources []*platform.Source - } - - tests := []struct { - name string - fields SourceFields - args args - wants wants - }{ - { - name: "create sources with empty set", - fields: SourceFields{ - IDGenerator: mock.NewIDGenerator(sourceOneID, t), - Sources: []*platform.Source{}, - }, - args: args{ - source: &platform.Source{ - Name: "name1", - }, - }, - wants: wants{ - sources: []*platform.Source{ - { - Name: "autogen", - Type: "self", - ID: MustIDBase16(defaultSourceID), - OrganizationID: MustIDBase16(defaultSourceOrganizationID), - Default: true, - }, - { - Name: "name1", - ID: MustIDBase16(sourceOneID), - OrganizationID: MustIDBase16(sourceOneID), - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.CreateSource(ctx, tt.args.source) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - defer s.DeleteSource(ctx, tt.args.source.ID) - - sources, _, err := s.FindSources(ctx, platform.FindOptions{}) - if err != nil { - t.Fatalf("failed to retrieve sources: %v", err) - } - if diff := cmp.Diff(sources, tt.wants.sources, sourceCmpOptions...); diff != "" { - t.Errorf("sources are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindSourceByID testing -func FindSourceByID( - init func(SourceFields, *testing.T) (platform.SourceService, string, func()), - t *testing.T, -) { - type args struct { - id platform2.ID - } - type wants struct { - err error - source *platform.Source - } - - tests := []struct { - name string - fields SourceFields - args args - wants wants - }{ - { - name: "find default source by ID", - fields: SourceFields{ - IDGenerator: mock.NewIDGenerator(sourceOneID, t), - Sources: []*platform.Source{ - { - Name: "name1", - ID: MustIDBase16(sourceOneID), - OrganizationID: MustIDBase16(sourceOrgOneID), - }, - }, - }, - args: args{ - id: MustIDBase16(sourceOneID), - }, - wants: wants{ - source: &platform.Source{ - Name: "name1", - ID: MustIDBase16(sourceOneID), - OrganizationID: MustIDBase16(sourceOrgOneID), - }, - }, - }, - { - name: "find source by ID", - fields: SourceFields{ - IDGenerator: mock.NewIDGenerator(sourceOneID, t), - Sources: []*platform.Source{}, - }, - args: args{ - id: MustIDBase16(defaultSourceID), - }, - wants: wants{ - source: &platform.Source{ - Name: "autogen", - Type: "self", - ID: MustIDBase16(defaultSourceID), - OrganizationID: MustIDBase16(defaultSourceOrganizationID), - Default: true, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - source, err := s.FindSourceByID(ctx, tt.args.id) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(source, tt.wants.source, sourceCmpOptions...); diff != "" { - t.Errorf("sources are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindSources testing -func FindSources( - init func(SourceFields, *testing.T) (platform.SourceService, string, func()), - t *testing.T, -) { - type args struct { - opts platform.FindOptions - } - type wants struct { - err error - sources []*platform.Source - } - - tests := []struct { - name string - fields SourceFields - args args - wants wants - }{ - { - name: "find all sources", - fields: SourceFields{ - IDGenerator: mock.NewIDGenerator(sourceOneID, t), - Sources: []*platform.Source{ - { - Name: "name1", - ID: MustIDBase16(sourceOneID), - OrganizationID: MustIDBase16(sourceOrgOneID), - }, - { - Name: "name2", - ID: MustIDBase16(sourceTwoID), - OrganizationID: MustIDBase16(sourceOrgOneID), - }, - }, - }, - args: args{}, - wants: wants{ - sources: []*platform.Source{ - { - Name: "autogen", - Type: "self", - ID: MustIDBase16(defaultSourceID), - OrganizationID: MustIDBase16(defaultSourceOrganizationID), - Default: true, - }, - { - Name: "name1", - ID: MustIDBase16(sourceOneID), - OrganizationID: MustIDBase16(sourceOrgOneID), - }, - { - Name: "name2", - ID: MustIDBase16(sourceTwoID), - OrganizationID: MustIDBase16(sourceOrgOneID), - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - sources, _, err := s.FindSources(ctx, tt.args.opts) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(sources, tt.wants.sources, sourceCmpOptions...); diff != "" { - t.Errorf("sources are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// DeleteSource testing -func DeleteSource( - init func(SourceFields, *testing.T) (platform.SourceService, string, func()), - t *testing.T, -) { - type args struct { - id platform2.ID - } - type wants struct { - err error - sources []*platform.Source - } - - tests := []struct { - name string - fields SourceFields - args args - wants wants - }{ - { - name: "delete source by ID", - fields: SourceFields{ - IDGenerator: mock.NewIDGenerator(sourceOneID, t), - Sources: []*platform.Source{ - { - Name: "name1", - ID: MustIDBase16(sourceOneID), - OrganizationID: MustIDBase16(sourceOrgOneID), - }, - }, - }, - args: args{ - id: MustIDBase16(sourceOneID), - }, - wants: wants{ - sources: []*platform.Source{ - { - Name: "autogen", - Type: "self", - ID: MustIDBase16(defaultSourceID), - OrganizationID: MustIDBase16(defaultSourceOrganizationID), - Default: true, - }, - }, - }, - }, - { - name: "delete default source by ID", - fields: SourceFields{ - IDGenerator: mock.NewIDGenerator(sourceOneID, t), - Sources: []*platform.Source{}, - }, - args: args{ - id: MustIDBase16(defaultSourceID), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EForbidden, - Op: platform.OpDeleteSource, - Msg: "cannot delete autogen source", - }, - sources: []*platform.Source{ - { - Name: "autogen", - Type: "self", - ID: MustIDBase16(defaultSourceID), - OrganizationID: MustIDBase16(defaultSourceOrganizationID), - Default: true, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.DeleteSource(ctx, tt.args.id) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - sources, _, err := s.FindSources(ctx, platform.FindOptions{}) - if err != nil { - t.Fatalf("failed to retrieve sources: %v", err) - } - if diff := cmp.Diff(sources, tt.wants.sources, sourceCmpOptions...); diff != "" { - t.Errorf("sources are different -got/+want\ndiff %s", diff) - } - }) - } -} diff --git a/testing/user_resource_mapping_service.go b/testing/user_resource_mapping_service.go deleted file mode 100644 index ced8256dea5..00000000000 --- a/testing/user_resource_mapping_service.go +++ /dev/null @@ -1,554 +0,0 @@ -package testing - -import ( - "bytes" - "context" - "fmt" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -var mappingCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*platform.UserResourceMapping) []*platform.UserResourceMapping { - out := append([]*platform.UserResourceMapping(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ResourceID.String() > out[j].ResourceID.String() - }) - return out - }), -} - -// UserResourceFields includes prepopulated data for mapping tests -type UserResourceFields struct { - Organizations []*platform.Organization - Users []*platform.User - Buckets []*platform.Bucket - UserResourceMappings []*platform.UserResourceMapping -} - -type userResourceMappingServiceF func( - init func(UserResourceFields, *testing.T) (platform.UserResourceMappingService, func()), - t *testing.T, -) - -// UserResourceMappingService tests all the service functions. -func UserResourceMappingService( - init func(UserResourceFields, *testing.T) (platform.UserResourceMappingService, func()), - t *testing.T, -) { - tests := []struct { - name string - fn userResourceMappingServiceF - }{ - { - name: "CreateUserResourceMapping", - fn: CreateUserResourceMapping, - }, - { - name: "FindUserResourceMappings", - fn: FindUserResourceMappings, - }, - { - name: "DeleteUserResourceMapping", - fn: DeleteUserResourceMapping, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt := tt - t.Parallel() - tt.fn(init, t) - }) - } -} - -// baseUserResourceFields creates base fields to create URMs. -// Users for URMs must exist in order not to fail on creation. -func baseUserResourceFields() UserResourceFields { - return UserResourceFields{ - Users: []*platform.User{ - { - Name: "user1", - ID: MustIDBase16(userOneID), - }, - { - Name: "user2", - ID: MustIDBase16(userTwoID), - }, - }, - } -} - -func CreateUserResourceMapping( - init func(UserResourceFields, *testing.T) (platform.UserResourceMappingService, func()), - t *testing.T, -) { - type args struct { - mapping *platform.UserResourceMapping - } - type wants struct { - err error - mappings []*platform.UserResourceMapping - } - - tests := []struct { - name string - fields UserResourceFields - args args - wants wants - }{ - { - name: "basic create user resource mapping", - fields: func() UserResourceFields { - f := baseUserResourceFields() - f.UserResourceMappings = []*platform.UserResourceMapping{ - { - ResourceID: idOne, - UserID: MustIDBase16(userOneID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - } - return f - }(), - args: args{ - mapping: &platform.UserResourceMapping{ - ResourceID: idTwo, - UserID: MustIDBase16(userTwoID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - }, - wants: wants{ - mappings: []*platform.UserResourceMapping{ - { - ResourceID: idOne, - UserID: MustIDBase16(userOneID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - { - ResourceID: idTwo, - UserID: MustIDBase16(userTwoID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - }, - }, - }, - { - name: "duplicate mappings are not allowed", - fields: func() UserResourceFields { - f := baseUserResourceFields() - f.UserResourceMappings = []*platform.UserResourceMapping{ - { - ResourceID: idOne, - UserID: MustIDBase16(userOneID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - } - return f - }(), - args: args{ - mapping: &platform.UserResourceMapping{ - ResourceID: idOne, - UserID: MustIDBase16(userOneID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - }, - wants: wants{ - mappings: []*platform.UserResourceMapping{ - { - ResourceID: idOne, - UserID: MustIDBase16(userOneID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - }, - //lint:ignore ST1005 Error is capitalized in the tested code. - err: fmt.Errorf("Unexpected error when assigning user to a resource: mapping for user %s already exists", userOneID), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.CreateUserResourceMapping(ctx, tt.args.mapping) - if (err != nil) != (tt.wants.err != nil) { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Fatalf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - } - } - defer s.DeleteUserResourceMapping(ctx, tt.args.mapping.ResourceID, tt.args.mapping.UserID) - - mappings, _, err := s.FindUserResourceMappings(ctx, platform.UserResourceMappingFilter{}) - if err != nil { - t.Fatalf("failed to retrieve mappings: %v", err) - } - if diff := cmp.Diff(mappings, tt.wants.mappings, mappingCmpOptions...); diff != "" { - t.Errorf("mappings are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func DeleteUserResourceMapping( - init func(UserResourceFields, *testing.T) (platform.UserResourceMappingService, func()), - t *testing.T, -) { - type args struct { - resourceID platform2.ID - userID platform2.ID - } - type wants struct { - err error - mappings []*platform.UserResourceMapping - } - - tests := []struct { - name string - fields UserResourceFields - args args - wants wants - }{ - { - name: "basic delete user resource mapping", - fields: func() UserResourceFields { - f := baseUserResourceFields() - f.UserResourceMappings = []*platform.UserResourceMapping{ - { - ResourceID: idOne, - UserID: MustIDBase16(userOneID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - } - return f - }(), - args: args{ - resourceID: idOne, - userID: MustIDBase16(userOneID), - }, - wants: wants{ - mappings: []*platform.UserResourceMapping{}, - }, - }, - { - name: "deleting a non-existent user", - fields: UserResourceFields{ - UserResourceMappings: []*platform.UserResourceMapping{}, - }, - args: args{ - resourceID: idOne, - userID: MustIDBase16(userOneID), - }, - wants: wants{ - mappings: []*platform.UserResourceMapping{}, - err: fmt.Errorf("user to resource mapping not found"), - }, - }, - { - name: "delete user resource mapping for org", - fields: UserResourceFields{ - Organizations: []*platform.Organization{ - { - ID: idOne, - Name: "organization1", - }, - }, - Users: []*platform.User{ - { - ID: MustIDBase16(userOneID), - Name: "user1", - }, - }, - Buckets: []*platform.Bucket{ - { - ID: idOne, - Name: "bucket1", - OrgID: idOne, - }, - }, - UserResourceMappings: []*platform.UserResourceMapping{ - { - ResourceID: idOne, - ResourceType: platform.OrgsResourceType, - MappingType: platform.UserMappingType, - UserID: MustIDBase16(userOneID), - UserType: platform.Member, - }, - }, - }, - args: args{ - resourceID: idOne, - userID: MustIDBase16(userOneID), - }, - wants: wants{ - mappings: []*platform.UserResourceMapping{}, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.DeleteUserResourceMapping(ctx, tt.args.resourceID, tt.args.userID) - if (err != nil) != (tt.wants.err != nil) { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Fatalf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - } - } - - mappings, _, err := s.FindUserResourceMappings(ctx, platform.UserResourceMappingFilter{}) - if err != nil { - t.Fatalf("failed to retrieve mappings: %v", err) - } - if diff := cmp.Diff(mappings, tt.wants.mappings, mappingCmpOptions...); diff != "" { - t.Errorf("mappings are different -got/+want\ndiff %s", diff) - } - }) - } -} - -func FindUserResourceMappings( - init func(UserResourceFields, *testing.T) (platform.UserResourceMappingService, func()), - t *testing.T, -) { - type args struct { - filter platform.UserResourceMappingFilter - } - type wants struct { - err error - mappings []*platform.UserResourceMapping - } - - tests := []struct { - name string - fields UserResourceFields - args args - wants wants - }{ - { - name: "basic find mappings", - fields: func() UserResourceFields { - f := baseUserResourceFields() - f.UserResourceMappings = []*platform.UserResourceMapping{ - { - ResourceID: idOne, - UserID: MustIDBase16(userOneID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - { - ResourceID: idTwo, - UserID: MustIDBase16(userTwoID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - } - return f - }(), - args: args{ - filter: platform.UserResourceMappingFilter{}, - }, - wants: wants{ - mappings: []*platform.UserResourceMapping{ - { - ResourceID: idOne, - UserID: MustIDBase16(userOneID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - { - ResourceID: idTwo, - UserID: MustIDBase16(userTwoID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - }, - }, - }, - { - name: "find mappings filtered by user", - fields: func() UserResourceFields { - f := baseUserResourceFields() - f.UserResourceMappings = []*platform.UserResourceMapping{ - { - ResourceID: idOne, - UserID: MustIDBase16(userOneID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - { - ResourceID: idTwo, - UserID: MustIDBase16(userTwoID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - } - return f - }(), - args: args{ - filter: platform.UserResourceMappingFilter{ - UserID: MustIDBase16(userOneID), - }, - }, - wants: wants{ - mappings: []*platform.UserResourceMapping{ - { - ResourceID: idOne, - UserID: MustIDBase16(userOneID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - }, - }, - }, - { - name: "find mappings filtered by resource", - fields: func() UserResourceFields { - f := baseUserResourceFields() - f.UserResourceMappings = []*platform.UserResourceMapping{ - { - ResourceID: idOne, - UserID: MustIDBase16(userOneID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - { - ResourceID: idTwo, - UserID: MustIDBase16(userTwoID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - } - return f - }(), - args: args{ - filter: platform.UserResourceMappingFilter{ - ResourceID: idOne, - }, - }, - wants: wants{ - mappings: []*platform.UserResourceMapping{ - { - ResourceID: idOne, - UserID: MustIDBase16(userOneID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - }, - }, - }, - { - name: "find mappings filtered by user type", - fields: func() UserResourceFields { - f := baseUserResourceFields() - f.UserResourceMappings = []*platform.UserResourceMapping{ - { - ResourceID: idOne, - UserID: MustIDBase16(userOneID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - { - ResourceID: idTwo, - UserID: MustIDBase16(userTwoID), - UserType: platform.Owner, - ResourceType: platform.BucketsResourceType, - }, - } - return f - }(), - args: args{ - filter: platform.UserResourceMappingFilter{ - UserType: platform.Owner, - }, - }, - wants: wants{ - mappings: []*platform.UserResourceMapping{ - { - ResourceID: idTwo, - UserID: MustIDBase16(userTwoID), - UserType: platform.Owner, - ResourceType: platform.BucketsResourceType, - }, - }, - }, - }, - { - name: "find mappings filtered by resource type", - fields: func() UserResourceFields { - f := baseUserResourceFields() - f.UserResourceMappings = []*platform.UserResourceMapping{ - { - ResourceID: idTwo, - UserID: MustIDBase16(userTwoID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - } - return f - }(), - args: args{ - filter: platform.UserResourceMappingFilter{ - ResourceType: platform.BucketsResourceType, - }, - }, - wants: wants{ - mappings: []*platform.UserResourceMapping{ - { - ResourceID: idTwo, - UserID: MustIDBase16(userTwoID), - UserType: platform.Member, - ResourceType: platform.BucketsResourceType, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, done := init(tt.fields, t) - defer done() - ctx := context.Background() - mappings, _, err := s.FindUserResourceMappings(ctx, tt.args.filter) - if (err != nil) != (tt.wants.err != nil) { - t.Fatalf("expected error '%v' got '%v'", tt.wants.err, err) - } - - if err != nil && tt.wants.err != nil { - if err.Error() != tt.wants.err.Error() { - t.Fatalf("expected error messages to match '%v' got '%v'", tt.wants.err, err.Error()) - } - } - - if diff := cmp.Diff(mappings, tt.wants.mappings, mappingCmpOptions...); diff != "" { - t.Errorf("mappings are different -got/+want\ndiff %s", diff) - } - }) - } -} diff --git a/testing/user_service.go b/testing/user_service.go deleted file mode 100644 index e684fdb5411..00000000000 --- a/testing/user_service.go +++ /dev/null @@ -1,1158 +0,0 @@ -package testing - -import ( - "bytes" - "context" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" -) - -const ( - userOneID = "020f755c3c082000" - userTwoID = "020f755c3c082001" - userThreeID = "020f755c3c082002" -) - -var userCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Comparer(func(x, y *influxdb.User) bool { - if x == nil && y == nil { - return true - } - if x != nil && y == nil || y != nil && x == nil { - return false - } - return x.Name == y.Name && x.OAuthID == y.OAuthID && x.Status == y.Status - }), - cmp.Transformer("Sort", func(in []*influxdb.User) []*influxdb.User { - out := append([]*influxdb.User(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -// UserFields will include the IDGenerator, and users -type UserFields struct { - IDGenerator platform.IDGenerator - Users []*influxdb.User -} - -// UserService tests all the service functions. -func UserService( - init func(UserFields, *testing.T) (influxdb.UserService, string, func()), t *testing.T, -) { - tests := []struct { - name string - fn func(init func(UserFields, *testing.T) (influxdb.UserService, string, func()), - t *testing.T) - }{ - { - name: "CreateUser", - fn: CreateUser, - }, - { - name: "FindUserByID", - fn: FindUserByID, - }, - { - name: "FindUsers", - fn: FindUsers, - }, - { - name: "DeleteUser", - fn: DeleteUser, - }, - { - name: "FindUser", - fn: FindUser, - }, - { - name: "UpdateUser", - fn: UpdateUser, - }, - { - name: "UpdateUser_IndexHygiene", - fn: UpdateUser_IndexHygiene, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt := tt - t.Parallel() - tt.fn(init, t) - }) - } -} - -// CreateUser testing -func CreateUser( - init func(UserFields, *testing.T) (influxdb.UserService, string, func()), - t *testing.T, -) { - type args struct { - user *influxdb.User - } - type wants struct { - err error - users []*influxdb.User - } - - tests := []struct { - name string - fields UserFields - args args - wants wants - }{ - { - name: "create users with empty set", - fields: UserFields{ - IDGenerator: mock.NewIDGenerator(userOneID, t), - Users: []*influxdb.User{}, - }, - args: args{ - user: &influxdb.User{ - Name: "name1", - Status: influxdb.Active, - }, - }, - wants: wants{ - users: []*influxdb.User{ - { - Name: "name1", - ID: MustIDBase16(userOneID), - Status: influxdb.Active, - }, - }, - }, - }, - { - name: "basic create user", - fields: UserFields{ - IDGenerator: mock.NewIDGenerator(userTwoID, t), - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "user1", - Status: influxdb.Active, - }, - }, - }, - args: args{ - user: &influxdb.User{ - Name: "user2", - Status: influxdb.Active, - }, - }, - wants: wants{ - users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "user1", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userTwoID), - Name: "user2", - Status: influxdb.Active, - }, - }, - }, - }, - { - name: "names should be unique", - fields: UserFields{ - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform.ID { - return MustIDBase16(userOneID) - }, - }, - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "user1", - Status: influxdb.Active, - }, - }, - }, - args: args{ - user: &influxdb.User{ - Name: "user1", - }, - }, - wants: wants{ - users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "user1", - Status: influxdb.Active, - }, - }, - err: &errors.Error{ - Code: errors.EConflict, - Op: influxdb.OpCreateUser, - Msg: "user with name user1 already exists", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.CreateUser(ctx, tt.args.user) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - // Delete only created users - ie., having a not nil ID - if tt.args.user.ID.Valid() { - defer s.DeleteUser(ctx, tt.args.user.ID) - } - - users, _, err := s.FindUsers(ctx, influxdb.UserFilter{}) - if err != nil { - t.Fatalf("failed to retrieve users: %v", err) - } - if diff := cmp.Diff(users, tt.wants.users, userCmpOptions...); diff != "" { - t.Errorf("users are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindUserByID testing -func FindUserByID( - init func(UserFields, *testing.T) (influxdb.UserService, string, func()), - t *testing.T, -) { - type args struct { - id platform.ID - } - type wants struct { - err error - user *influxdb.User - } - - tests := []struct { - name string - fields UserFields - args args - wants wants - }{ - { - name: "basic find user by id", - fields: UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "user1", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userTwoID), - Name: "user2", - Status: influxdb.Active, - }, - }, - }, - args: args{ - id: MustIDBase16(userTwoID), - }, - wants: wants{ - user: &influxdb.User{ - ID: MustIDBase16(userTwoID), - Name: "user2", - Status: influxdb.Active, - }, - }, - }, - { - name: "find user by id not exists", - fields: UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "user1", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userTwoID), - Name: "user2", - Status: influxdb.Active, - }, - }, - }, - args: args{ - id: MustIDBase16(threeID), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindUserByID, - Msg: "user not found", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - user, err := s.FindUserByID(ctx, tt.args.id) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(user, tt.wants.user, userCmpOptions...); diff != "" { - t.Errorf("user is different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindUsers testing -func FindUsers( - init func(UserFields, *testing.T) (influxdb.UserService, string, func()), - t *testing.T, -) { - type args struct { - ID platform.ID - name string - findOptions influxdb.FindOptions - } - - type wants struct { - users []*influxdb.User - err error - } - tests := []struct { - name string - fields UserFields - args args - wants wants - }{ - { - name: "find all users", - fields: UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "abc", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userTwoID), - Name: "xyz", - Status: influxdb.Active, - }, - }, - }, - args: args{}, - wants: wants{ - users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "abc", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userTwoID), - Name: "xyz", - Status: influxdb.Active, - }, - }, - }, - }, - { - name: "find user by id", - fields: UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "abc", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userTwoID), - Name: "xyz", - Status: influxdb.Active, - }, - }, - }, - args: args{ - ID: MustIDBase16(userTwoID), - }, - wants: wants{ - users: []*influxdb.User{ - { - ID: MustIDBase16(userTwoID), - Name: "xyz", - Status: influxdb.Active, - }, - }, - }, - }, - { - name: "find user by name", - fields: UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "abc", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userTwoID), - Name: "xyz", - Status: influxdb.Active, - }, - }, - }, - args: args{ - name: "xyz", - }, - wants: wants{ - users: []*influxdb.User{ - { - ID: MustIDBase16(userTwoID), - Name: "xyz", - Status: influxdb.Active, - }, - }, - }, - }, - { - name: "find user by id not exists", - fields: UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "abc", - }, - { - ID: MustIDBase16(userTwoID), - Name: "xyz", - }, - }, - }, - args: args{ - ID: MustIDBase16(threeID), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindUsers, - Msg: "user not found", - }, - }, - }, - { - name: "find user by name not exists", - fields: UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "abc", - }, - { - ID: MustIDBase16(userTwoID), - Name: "xyz", - }, - }, - }, - args: args{ - name: "no_exist", - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindUsers, - Msg: "user not found", - }, - }, - }, - { - name: "find all users by offset and limit", - fields: UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "abc", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userTwoID), - Name: "def", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userThreeID), - Name: "xyz", - Status: influxdb.Active, - }, - }, - }, - args: args{ - findOptions: influxdb.FindOptions{ - Offset: 1, - Limit: 1, - }, - }, - wants: wants{ - users: []*influxdb.User{ - { - ID: MustIDBase16(userTwoID), - Name: "def", - Status: influxdb.Active, - }, - }, - }, - }, - { - name: "find all users by after and limit", - fields: UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "abc", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userTwoID), - Name: "def", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userThreeID), - Name: "xyz", - Status: influxdb.Active, - }, - }, - }, - args: args{ - findOptions: influxdb.FindOptions{ - After: MustIDBase16Ptr(userOneID), - Limit: 2, - }, - }, - wants: wants{ - users: []*influxdb.User{ - { - ID: MustIDBase16(userTwoID), - Name: "def", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userThreeID), - Name: "xyz", - Status: influxdb.Active, - }, - }, - }, - }, - { - name: "find all users by descending", - fields: UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "abc", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userTwoID), - Name: "def", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userThreeID), - Name: "xyz", - Status: influxdb.Active, - }, - }, - }, - args: args{ - findOptions: influxdb.FindOptions{ - Offset: 1, - Descending: true, - }, - }, - wants: wants{ - users: []*influxdb.User{ - { - ID: MustIDBase16(userTwoID), - Name: "def", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userOneID), - Name: "abc", - Status: influxdb.Active, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - filter := influxdb.UserFilter{} - if tt.args.ID.Valid() { - filter.ID = &tt.args.ID - } - if tt.args.name != "" { - filter.Name = &tt.args.name - } - - users, _, err := s.FindUsers(ctx, filter, tt.args.findOptions) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(users, tt.wants.users, userCmpOptions...); diff != "" { - t.Errorf("users are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// DeleteUser testing -func DeleteUser( - init func(UserFields, *testing.T) (influxdb.UserService, string, func()), - t *testing.T, -) { - type args struct { - ID platform.ID - } - type wants struct { - err error - users []*influxdb.User - } - - tests := []struct { - name string - fields UserFields - args args - wants wants - }{ - { - name: "delete users using exist id", - fields: UserFields{ - Users: []*influxdb.User{ - { - Name: "orgA", - ID: MustIDBase16(userOneID), - Status: influxdb.Active, - }, - { - Name: "orgB", - ID: MustIDBase16(userTwoID), - Status: influxdb.Active, - }, - }, - }, - args: args{ - ID: MustIDBase16(userOneID), - }, - wants: wants{ - users: []*influxdb.User{ - { - Name: "orgB", - ID: MustIDBase16(userTwoID), - Status: influxdb.Active, - }, - }, - }, - }, - { - name: "delete users using id that does not exist", - fields: UserFields{ - Users: []*influxdb.User{ - { - Name: "orgA", - ID: MustIDBase16(userOneID), - Status: influxdb.Active, - }, - { - Name: "orgB", - ID: MustIDBase16(userTwoID), - Status: influxdb.Active, - }, - }, - }, - args: args{ - ID: MustIDBase16(userThreeID), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpDeleteUser, - Msg: "user not found", - }, - users: []*influxdb.User{ - { - Name: "orgA", - ID: MustIDBase16(userOneID), - Status: influxdb.Active, - }, - { - Name: "orgB", - ID: MustIDBase16(userTwoID), - Status: influxdb.Active, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - err := s.DeleteUser(ctx, tt.args.ID) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - filter := influxdb.UserFilter{} - users, _, err := s.FindUsers(ctx, filter) - if err != nil { - t.Fatalf("failed to retrieve users: %v", err) - } - if diff := cmp.Diff(users, tt.wants.users, userCmpOptions...); diff != "" { - t.Errorf("users are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindUser testing -func FindUser( - init func(UserFields, *testing.T) (influxdb.UserService, string, func()), - t *testing.T, -) { - type args struct { - filter influxdb.UserFilter - } - - type wants struct { - user *influxdb.User - err error - } - - tests := []struct { - name string - fields UserFields - args args - wants wants - }{ - { - name: "find user by name", - fields: UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "abc", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userTwoID), - Name: "xyz", - Status: influxdb.Active, - }, - }, - }, - args: args{ - filter: influxdb.UserFilter{ - Name: func(s string) *string { return &s }("abc"), - }, - }, - wants: wants{ - user: &influxdb.User{ - ID: MustIDBase16(userOneID), - Name: "abc", - Status: influxdb.Active, - }, - }, - }, - { - name: "find existing user by its id", - fields: UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "abc", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userTwoID), - Name: "xyz", - Status: influxdb.Active, - }, - }, - }, - args: args{ - filter: influxdb.UserFilter{ - ID: func(id platform.ID) *platform.ID { return &id }(MustIDBase16(userOneID)), - }, - }, - wants: wants{ - user: &influxdb.User{ - ID: MustIDBase16(userOneID), - Name: "abc", - Status: influxdb.Active, - }, - }, - }, - { - name: "user with name does not exist", - fields: UserFields{ - Users: []*influxdb.User{}, - }, - args: args{ - filter: influxdb.UserFilter{ - Name: func(s string) *string { return &s }("abc"), - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "user not found", - Op: influxdb.OpFindUser, - }, - }, - }, - { - name: "user with id does not exist", - fields: UserFields{ - Users: []*influxdb.User{}, - }, - args: args{ - filter: influxdb.UserFilter{ - ID: func(id platform.ID) *platform.ID { return &id }(MustIDBase16(userOneID)), - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "user not found", - Op: influxdb.OpFindUser, - }, - }, - }, - { - name: "filter with both name and ID prefers ID", - fields: UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "abc", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userTwoID), - Name: "xyz", - Status: influxdb.Active, - }, - }, - }, - args: args{ - filter: influxdb.UserFilter{ - ID: func(id platform.ID) *platform.ID { return &id }(MustIDBase16(userOneID)), - Name: func(s string) *string { return &s }("xyz"), - }, - }, - wants: wants{ - user: &influxdb.User{ - ID: MustIDBase16(userOneID), - Name: "abc", - Status: influxdb.Active, - }, - }, - }, - { - name: "filter with no name nor id returns error", - fields: UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userTwoID), - Name: "xyz", - }, - }, - }, - args: args{ - filter: influxdb.UserFilter{}, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "user not found", - Op: influxdb.OpFindUser, - }, - }, - }, - { - name: "filter both name and non-existent id returns no user", - fields: UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userTwoID), - Name: "xyz", - }, - }, - }, - args: args{ - filter: influxdb.UserFilter{ - ID: func(id platform.ID) *platform.ID { return &id }(MustIDBase16(userOneID)), - Name: func(s string) *string { return &s }("xyz"), - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Msg: "user not found", - Op: influxdb.OpFindUser, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - user, err := s.FindUser(ctx, tt.args.filter) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(user, tt.wants.user, userCmpOptions...); diff != "" { - t.Errorf("users are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// UpdateUser testing -func UpdateUser( - init func(UserFields, *testing.T) (influxdb.UserService, string, func()), - t *testing.T, -) { - type args struct { - name string - id platform.ID - status string - } - type wants struct { - err error - user *influxdb.User - } - - tests := []struct { - name string - fields UserFields - args args - wants wants - }{ - { - name: "update name", - fields: UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "user1", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userTwoID), - Name: "user2", - Status: influxdb.Active, - }, - }, - }, - args: args{ - id: MustIDBase16(userOneID), - name: "changed", - }, - wants: wants{ - user: &influxdb.User{ - ID: MustIDBase16(userOneID), - Name: "changed", - Status: influxdb.Active, - }, - }, - }, - { - name: "update name to same name", - fields: UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "user1", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userTwoID), - Name: "user2", - Status: influxdb.Active, - }, - }, - }, - args: args{ - id: MustIDBase16(userOneID), - name: "user1", - }, - wants: wants{ - user: &influxdb.User{ - ID: MustIDBase16(userOneID), - Name: "user1", - Status: influxdb.Active, - }, - }, - }, - { - name: "update status", - fields: UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "user1", - Status: influxdb.Active, - }, - { - ID: MustIDBase16(userTwoID), - Name: "user2", - Status: influxdb.Active, - }, - }, - }, - args: args{ - id: MustIDBase16(userOneID), - status: "inactive", - }, - wants: wants{ - user: &influxdb.User{ - ID: MustIDBase16(userOneID), - Name: "user1", - Status: "inactive", - }, - }, - }, - { - name: "update name with id not exists", - fields: UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: "user1", - }, - { - ID: MustIDBase16(userTwoID), - Name: "user2", - }, - }, - }, - args: args{ - id: MustIDBase16(threeID), - name: "changed", - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpUpdateUser, - Msg: "user not found", - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - upd := influxdb.UserUpdate{} - if tt.args.name != "" { - upd.Name = &tt.args.name - } - - switch tt.args.status { - case "inactive": - status := influxdb.Inactive - upd.Status = &status - case "active": - status := influxdb.Inactive - upd.Status = &status - } - - user, err := s.UpdateUser(ctx, tt.args.id, upd) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(user, tt.wants.user, userCmpOptions...); diff != "" { - t.Errorf("user is different -got/+want\ndiff %s", diff) - } - }) - } -} - -func UpdateUser_IndexHygiene( - init func(UserFields, *testing.T) (influxdb.UserService, string, func()), - t *testing.T, -) { - - oldUserName := "user1" - users := UserFields{ - Users: []*influxdb.User{ - { - ID: MustIDBase16(userOneID), - Name: oldUserName, - Status: "active", - }, - }, - } - s, _, done := init(users, t) - defer done() - - newUserName := "user1Updated" - upd := influxdb.UserUpdate{ - Name: &newUserName, - } - - ctx := context.Background() - _, err := s.UpdateUser(ctx, MustIDBase16(userOneID), upd) - if err != nil { - t.Error(err) - } - - // Ensure we can find the user with the new name. - _, nerr := s.FindUser(ctx, influxdb.UserFilter{ - Name: &newUserName, - }) - if nerr != nil { - t.Error("unexpected error when finding user by name", nerr) - } - - // Ensure we cannot find a user with the old name. The index used when - // searching by name should have been cleared out by the UpdateUser - // operation. - _, oerr := s.FindUser(ctx, influxdb.UserFilter{ - Name: &oldUserName, - }) - ErrorsEqual(t, oerr, &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindUser, - Msg: "user not found", - }) -} diff --git a/testing/util.go b/testing/util.go deleted file mode 100644 index b2ed351a9eb..00000000000 --- a/testing/util.go +++ /dev/null @@ -1,168 +0,0 @@ -package testing - -import ( - "context" - "os" - "strings" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/bolt" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/query/fluxlang" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func NewTestBoltStore(t *testing.T) (kv.SchemaStore, func()) { - f, err := os.CreateTemp("", "influxdata-bolt-") - require.NoError(t, err, "unable to create temporary boltdb file") - require.NoError(t, f.Close()) - - path := f.Name() - s := bolt.NewKVStore(zaptest.NewLogger(t), path, bolt.WithNoSync) - require.NoError(t, s.Open(context.Background())) - - // apply all kv migrations - require.NoError(t, all.Up(context.Background(), zaptest.NewLogger(t), s)) - - close := func() { - s.Close() - os.Remove(path) - } - - return s, close -} - -func NewTestInmemStore(t *testing.T) kv.SchemaStore { - s := inmem.NewKVStore() - // apply all kv migrations - require.NoError(t, all.Up(context.Background(), zaptest.NewLogger(t), s)) - return s -} - -// TODO(goller): remove opPrefix argument -func diffPlatformErrors(name string, actual, expected error, opPrefix string, t *testing.T) { - t.Helper() - ErrorsEqual(t, actual, expected) -} - -// ErrorsEqual checks to see if the provided errors are equivalent. -func ErrorsEqual(t *testing.T, actual, expected error) { - t.Helper() - if expected == nil && actual == nil { - return - } - - if expected == nil && actual != nil { - t.Errorf("unexpected error %s", actual.Error()) - } - - if expected != nil && actual == nil { - t.Errorf("expected error %s but received nil", expected.Error()) - } - - if errors.ErrorCode(expected) != errors.ErrorCode(actual) { - t.Logf("\nexpected: %v\nactual: %v\n\n", expected, actual) - t.Errorf("expected error code %q but received %q", errors.ErrorCode(expected), errors.ErrorCode(actual)) - } - - if errors.ErrorMessage(expected) != errors.ErrorMessage(actual) { - t.Logf("\nexpected: %v\nactual: %v\n\n", expected, actual) - t.Errorf("expected error message %q but received %q", errors.ErrorMessage(expected), errors.ErrorMessage(actual)) - } -} - -func idPtr(id platform.ID) *platform.ID { - return &id -} - -func strPtr(s string) *string { - return &s -} -func boolPtr(b bool) *bool { - return &b -} - -// MustIDBase16 is an helper to ensure a correct ID is built during testing. -func MustIDBase16(s string) platform.ID { - id, err := platform.IDFromString(s) - if err != nil { - panic(err) - } - return *id -} - -// MustIDBase16Ptr is an helper to ensure a correct ID ptr ref is built during testing. -func MustIDBase16Ptr(s string) *platform.ID { - id := MustIDBase16(s) - return &id -} - -func MustCreateOrgs(ctx context.Context, svc influxdb.OrganizationService, os ...*influxdb.Organization) { - for _, o := range os { - if err := svc.CreateOrganization(ctx, o); err != nil { - panic(err) - } - } -} - -func MustCreateUsers(ctx context.Context, svc influxdb.UserService, us ...*influxdb.User) { - for _, u := range us { - if err := svc.CreateUser(ctx, u); err != nil { - panic(err) - } - } -} - -func MustNewPermission(a influxdb.Action, rt influxdb.ResourceType, orgID platform.ID) *influxdb.Permission { - perm, err := influxdb.NewPermission(a, rt, orgID) - if err != nil { - panic(err) - } - return perm -} - -func MustNewPermissionAtID(id platform.ID, a influxdb.Action, rt influxdb.ResourceType, orgID platform.ID) *influxdb.Permission { - perm, err := influxdb.NewPermissionAtID(id, a, rt, orgID) - if err != nil { - panic(err) - } - return perm -} - -func influxErrsEqual(t *testing.T, expected *errors.Error, actual error) { - t.Helper() - - if expected != nil { - require.Error(t, actual) - } - - if actual == nil { - return - } - - if expected == nil { - require.NoError(t, actual) - return - } - iErr, ok := actual.(*errors.Error) - require.True(t, ok) - assert.Equal(t, expected.Code, iErr.Code) - assert.Truef(t, strings.HasPrefix(iErr.Error(), expected.Error()), "expected: %s got err: %s", expected.Error(), actual.Error()) -} - -func FormatFluxString(t *testing.T, script string) string { - svc := fluxlang.DefaultService - - astPkg, err := svc.Parse(script) - require.NoError(t, err) - formatted, err := svc.Format(astPkg.Files[0]) - require.NoError(t, err) - return formatted -} diff --git a/testing/variable.go b/testing/variable.go deleted file mode 100644 index 34325f2855f..00000000000 --- a/testing/variable.go +++ /dev/null @@ -1,1371 +0,0 @@ -package testing - -import ( - "bytes" - "context" - "sort" - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/pkg/testing/assert" - "github.com/stretchr/testify/require" -) - -const ( - idA = "020f755c3c082000" - idB = "020f755c3c082001" - idC = "020f755c3c082002" - idD = "020f755c3c082003" -) - -var oldFakeDate = time.Date(2002, 8, 5, 2, 2, 3, 0, time.UTC) -var fakeDate = time.Date(2006, 5, 4, 1, 2, 3, 0, time.UTC) -var fakeGenerator = mock.TimeGenerator{FakeValue: fakeDate} - -var variableCmpOptions = cmp.Options{ - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.Variable) []*influxdb.Variable { - out := append([]*influxdb.Variable(nil), in...) - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -// VariableFields defines fields for a variable test -type VariableFields struct { - Variables []*influxdb.Variable - IDGenerator platform.IDGenerator - TimeGenerator influxdb.TimeGenerator -} - -type VariableSvcOpts struct { - HTTPValidation bool -} - -func WithHTTPValidation() VariableSvcOpts { - return VariableSvcOpts{ - HTTPValidation: true, - } -} - -// VariableService tests all the service functions. -func VariableService( - init func(VariableFields, *testing.T) (influxdb.VariableService, string, func()), t *testing.T, opts ...VariableSvcOpts, -) { - tests := []struct { - name string - fn func(init func(VariableFields, *testing.T) (influxdb.VariableService, string, func()), - t *testing.T) - }{ - { - name: "CreateVariable", - fn: CreateVariable, - }, - { - name: "TrimWhitespace", - fn: TrimWhitespace, - }, - { - name: "FindVariableByID", - fn: FindVariableByID, - }, - { - name: "FindVariables", - fn: FindVariables, - }, - { - name: "UpdateVariable", - fn: UpdateVariable, - }, - { - name: "ReplaceVariable", - fn: ReplaceVariable, - }, - { - name: "DeleteVariable", - fn: DeleteVariable, - }, - } - for _, tt := range tests { - if tt.name == "trimwhitespace" && len(opts) > 0 && !opts[0].HTTPValidation || len(opts) == 0 { - continue - } - t.Run(tt.name, func(t *testing.T) { - tt := tt - t.Parallel() - tt.fn(init, t) - }) - } -} - -func TrimWhitespace(init func(VariableFields, *testing.T) (influxdb.VariableService, string, func()), t *testing.T) { - type args struct { - variable *influxdb.Variable - } - type wants struct { - err *errors.Error - variables []*influxdb.Variable - } - - tests := []struct { - name string - fields VariableFields - args args - wants wants - }{ - { - // trims white space, but fails when variable name already exists - name: "trimwhitespace", - fields: VariableFields{ - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform.ID { - return MustIDBase16(idA) - }, - }, - TimeGenerator: fakeGenerator, - Variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(3), - Name: "existing-variable", - Selected: []string{"b"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"b"}, - }, - }, - }, - }, - args: args{ - variable: &influxdb.Variable{ - ID: MustIDBase16(idA), - OrganizationID: platform.ID(3), - Name: " existing-variable ", - Selected: []string{"a"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"a"}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EConflict, - Msg: "variable is not unique", - }, - variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(3), - Name: "existing-variable", - Selected: []string{"b"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"b"}, - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - err := s.CreateVariable(ctx, tt.args.variable) - influxErrsEqual(t, tt.wants.err, err) - - variables, err := s.FindVariables(ctx, influxdb.VariableFilter{}) - if err != nil { - t.Fatalf("failed to retrieve variables: %v", err) - } - if diff := cmp.Diff(variables, tt.wants.variables, variableCmpOptions...); diff != "" { - t.Fatalf("found unexpected variables -got/+want\ndiff %s", diff) - } - }) - } -} - -// CreateVariable tests influxdb.VariableService CreateVariable interface method -func CreateVariable(init func(VariableFields, *testing.T) (influxdb.VariableService, string, func()), t *testing.T) { - type args struct { - variable *influxdb.Variable - } - type wants struct { - err *errors.Error - variables []*influxdb.Variable - } - - tests := []struct { - name string - fields VariableFields - args args - wants wants - }{ - { - name: "basic create with missing id", - fields: VariableFields{ - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform.ID { - return MustIDBase16(idD) - }, - }, - TimeGenerator: fakeGenerator, - Variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idA), - OrganizationID: platform.ID(1), - Name: "already there", - }, - }, - }, - args: args{ - variable: &influxdb.Variable{ - OrganizationID: platform.ID(3), - Name: "basic variable", - Selected: []string{"a"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"a"}, - }, - }, - }, - wants: wants{ - variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idA), - OrganizationID: platform.ID(1), - Name: "already there", - }, - { - ID: MustIDBase16(idD), - OrganizationID: platform.ID(3), - Name: "basic variable", - Selected: []string{"a"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"a"}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - }, - }, - { - name: "creating a variable assigns the variable an id and adds it to the store", - fields: VariableFields{ - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform.ID { - return MustIDBase16(idA) - }, - }, - TimeGenerator: fakeGenerator, - Variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(3), - Name: "existing-variable", - Selected: []string{"b"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"b"}, - }, - }, - }, - }, - args: args{ - variable: &influxdb.Variable{ - ID: MustIDBase16(idA), - OrganizationID: platform.ID(3), - Name: "MY-variable", - Selected: []string{"a"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"a"}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - wants: wants{ - err: nil, - variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(3), - Name: "existing-variable", - Selected: []string{"b"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"b"}, - }, - }, - { - ID: MustIDBase16(idA), - OrganizationID: platform.ID(3), - Name: "MY-variable", - Selected: []string{"a"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"a"}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - }, - }, - { - name: "cant create a new variable with a name that exists", - fields: VariableFields{ - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform.ID { - return MustIDBase16(idA) - }, - }, - TimeGenerator: fakeGenerator, - Variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idB), - OrganizationID: MustIDBase16(idD), - Name: "existing-variable", - Selected: []string{"b"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"b"}, - }, - }, - }, - }, - args: args{ - variable: &influxdb.Variable{ - ID: MustIDBase16(idA), - OrganizationID: MustIDBase16(idD), - Name: "existing-variable", - Selected: []string{"a"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"a"}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EConflict, - Msg: "variable is not unique", - }, - variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idB), - OrganizationID: MustIDBase16(idD), - Name: "existing-variable", - Selected: []string{"b"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"b"}, - }, - }, - }, - }, - }, - { - name: "variable names should be unique and case-insensitive", - fields: VariableFields{ - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform.ID { - return MustIDBase16(idA) - }, - }, - TimeGenerator: fakeGenerator, - Variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(3), - Name: "existing-variable", - Selected: []string{"b"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"b"}, - }, - }, - }, - }, - args: args{ - variable: &influxdb.Variable{ - ID: MustIDBase16(idA), - OrganizationID: platform.ID(3), - Name: "EXISTING-variable", - Selected: []string{"a"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"a"}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EConflict, - Msg: "variable is not unique for key ", - }, - variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(3), - Name: "existing-variable", - Selected: []string{"b"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"b"}, - }, - }, - }, - }, - }, - { - name: "cant create a new variable when variable name exists with a different type", - fields: VariableFields{ - IDGenerator: &mock.IDGenerator{ - IDFn: func() platform.ID { - return MustIDBase16(idA) - }, - }, - TimeGenerator: fakeGenerator, - Variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(3), - Name: "existing-variable", - Selected: []string{"b"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"b"}, - }, - }, - }, - }, - args: args{ - variable: &influxdb.Variable{ - ID: MustIDBase16(idA), - OrganizationID: platform.ID(3), - Name: "existing-variable", - Selected: []string{"a"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"a"}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EConflict, - Msg: "variable is not unique", - }, - variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(3), - Name: "existing-variable", - Selected: []string{"b"}, - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"b"}, - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - err := s.CreateVariable(ctx, tt.args.variable) - influxErrsEqual(t, tt.wants.err, err) - - variables, err := s.FindVariables(ctx, influxdb.VariableFilter{}) - if err != nil { - t.Fatalf("failed to retrieve variables: %v", err) - } - if diff := cmp.Diff(variables, tt.wants.variables, variableCmpOptions...); diff != "" { - t.Fatalf("found unexpected variables -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindVariableByID tests influxdb.VariableService FindVariableByID interface method -func FindVariableByID(init func(VariableFields, *testing.T) (influxdb.VariableService, string, func()), t *testing.T) { - type args struct { - id platform.ID - } - type wants struct { - err *errors.Error - variable *influxdb.Variable - } - - tests := []struct { - name string - fields VariableFields - args args - wants wants - }{ - { - name: "finding a variable that exists by id", - fields: VariableFields{ - Variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idA), - OrganizationID: platform.ID(5), - Name: "existing-variable-a", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(5), - Name: "existing-variable-b", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - }, - args: args{ - id: MustIDBase16(idB), - }, - wants: wants{ - err: nil, - variable: &influxdb.Variable{ - ID: MustIDBase16(idB), - OrganizationID: platform.ID(5), - Name: "existing-variable-b", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - }, - { - name: "finding a non-existent variable", - fields: VariableFields{ - Variables: []*influxdb.Variable{}, - }, - args: args{ - id: MustIDBase16(idA), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpFindVariableByID, - Msg: influxdb.ErrVariableNotFound, - }, - variable: nil, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - variable, err := s.FindVariableByID(ctx, tt.args.id) - if err != nil { - if tt.wants.err == nil { - require.NoError(t, err) - } - iErr, ok := err.(*errors.Error) - require.True(t, ok) - assert.Equal(t, iErr.Code, tt.wants.err.Code) - assert.Equal(t, strings.HasPrefix(iErr.Error(), tt.wants.err.Error()), true) - return - } - - if diff := cmp.Diff(variable, tt.wants.variable, variableCmpOptions...); diff != "" { - t.Fatalf("found unexpected variable -got/+want\ndiff %s", diff) - } - }) - } -} - -// FindVariables tests influxdb.variableService FindVariables interface method -func FindVariables(init func(VariableFields, *testing.T) (influxdb.VariableService, string, func()), t *testing.T) { - // todo(leodido) - type args struct { - // todo(leodido) > use VariableFilter as arg - orgID *platform.ID - findOpts influxdb.FindOptions - } - type wants struct { - variables []*influxdb.Variable - err error - } - - tests := []struct { - name string - fields VariableFields - args args - wants wants - }{ - { - name: "find nothing (empty set)", - fields: VariableFields{ - Variables: []*influxdb.Variable{}, - }, - args: args{ - findOpts: influxdb.DefaultVariableFindOptions, - }, - wants: wants{ - variables: []*influxdb.Variable{}, - }, - }, - { - name: "find all variables", - fields: VariableFields{ - Variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idA), - OrganizationID: platform.ID(22), - Name: "a", - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(22), - Name: "b", - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - }, - args: args{ - findOpts: influxdb.DefaultVariableFindOptions, - }, - wants: wants{ - variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idA), - OrganizationID: platform.ID(22), - Name: "a", - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(22), - Name: "b", - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - }, - }, - { - name: "find variables by wrong org id", - fields: VariableFields{ - Variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idA), - OrganizationID: platform.ID(22), - Name: "a", - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(22), - Name: "b", - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - }, - args: args{ - findOpts: influxdb.DefaultVariableFindOptions, - orgID: idPtr(platform.ID(1)), - }, - wants: wants{ - variables: []*influxdb.Variable{}, - }, - }, - { - name: "find all variables by org 22", - fields: VariableFields{ - Variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idA), - OrganizationID: platform.ID(1), - Name: "a", - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(22), - Name: "b", - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - { - ID: MustIDBase16(idC), - OrganizationID: platform.ID(2), - Name: "c", - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - { - ID: MustIDBase16(idD), - OrganizationID: platform.ID(22), - Name: "d", - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - }, - args: args{ - findOpts: influxdb.DefaultVariableFindOptions, - orgID: idPtr(platform.ID(22)), - }, - wants: wants{ - variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(22), - Name: "b", - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - { - ID: MustIDBase16(idD), - OrganizationID: platform.ID(22), - Name: "d", - CRUDLog: influxdb.CRUDLog{ - CreatedAt: fakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, opPrefix, done := init(tt.fields, t) - defer done() - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - filter := influxdb.VariableFilter{} - if tt.args.orgID != nil { - filter.OrganizationID = tt.args.orgID - } - - variables, err := s.FindVariables(ctx, filter, tt.args.findOpts) - diffPlatformErrors(tt.name, err, tt.wants.err, opPrefix, t) - - if diff := cmp.Diff(variables, tt.wants.variables, variableCmpOptions...); diff != "" { - t.Errorf("variables are different -got/+want\ndiff %s", diff) - } - }) - } -} - -// UpdateVariable tests influxdb.VariableService UpdateVariable interface method -func UpdateVariable(init func(VariableFields, *testing.T) (influxdb.VariableService, string, func()), t *testing.T) { - type args struct { - id platform.ID - update *influxdb.VariableUpdate - } - type wants struct { - err *errors.Error - variables []*influxdb.Variable - } - - tests := []struct { - name string - fields VariableFields - args args - wants wants - }{ - { - name: "updating a variable's name", - fields: VariableFields{ - TimeGenerator: fakeGenerator, - Variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idA), - OrganizationID: platform.ID(7), - Name: "existing-variable-a", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: oldFakeDate, - UpdatedAt: fakeDate, - }, - }, - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(7), - Name: "existing-variable-b", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: oldFakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - }, - args: args{ - id: MustIDBase16(idB), - update: &influxdb.VariableUpdate{ - Name: "new-variable-b-name", - }, - }, - wants: wants{ - err: nil, - variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idA), - OrganizationID: platform.ID(7), - Name: "existing-variable-a", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: oldFakeDate, - UpdatedAt: fakeDate, - }, - }, - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(7), - Name: "new-variable-b-name", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: oldFakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - }, - }, - { - name: "updating a non-existent variable fails", - fields: VariableFields{ - Variables: []*influxdb.Variable{}, - }, - args: args{ - id: MustIDBase16(idA), - update: &influxdb.VariableUpdate{ - Name: "howdy", - }, - }, - wants: wants{ - err: &errors.Error{ - Op: influxdb.OpUpdateVariable, - Msg: influxdb.ErrVariableNotFound, - Code: errors.ENotFound, - }, - variables: []*influxdb.Variable{}, - }, - }, - { - name: "updating fails when variable name already exists", - fields: VariableFields{ - TimeGenerator: fakeGenerator, - Variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idA), - OrganizationID: platform.ID(7), - Name: "variable-a", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: oldFakeDate, - UpdatedAt: fakeDate, - }, - }, - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(7), - Name: "variable-b", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: oldFakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - }, - args: args{ - id: MustIDBase16(idB), - update: &influxdb.VariableUpdate{ - Name: "variable-a", - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EConflict, - Msg: "variable entity update conflicts with an existing entity", - }, - variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idA), - OrganizationID: platform.ID(7), - Name: "variable-a", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: oldFakeDate, - UpdatedAt: fakeDate, - }, - }, - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(7), - Name: "variable-b", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: oldFakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - }, - }, - { - name: "trims the variable name but updating fails when variable name already exists", - fields: VariableFields{ - TimeGenerator: fakeGenerator, - Variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idA), - OrganizationID: platform.ID(7), - Name: "variable-a", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: oldFakeDate, - UpdatedAt: fakeDate, - }, - }, - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(7), - Name: "variable-b", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: oldFakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - }, - args: args{ - id: MustIDBase16(idB), - update: &influxdb.VariableUpdate{ - Name: " variable-a ", - }, - }, - wants: wants{ - err: &errors.Error{ - Code: errors.EConflict, - Msg: "variable entity update conflicts with an existing entity", - }, - variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idA), - OrganizationID: platform.ID(7), - Name: "variable-a", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: oldFakeDate, - UpdatedAt: fakeDate, - }, - }, - { - ID: MustIDBase16(idB), - OrganizationID: platform.ID(7), - Name: "variable-b", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: oldFakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - variable, err := s.UpdateVariable(ctx, tt.args.id, tt.args.update) - influxErrsEqual(t, tt.wants.err, err) - if err != nil { - return - } - - if variable != nil { - if tt.args.update.Name != "" && variable.Name != tt.args.update.Name { - t.Fatalf("variable name not updated") - } - } - - variables, err := s.FindVariables(ctx, influxdb.VariableFilter{}) - if err != nil { - t.Fatalf("failed to retrieve variables: %v", err) - } - if diff := cmp.Diff(variables, tt.wants.variables, variableCmpOptions...); diff != "" { - t.Fatalf("found unexpected variables -got/+want\ndiff %s", diff) - } - }) - } -} - -// ReplaceVariable tests influxdb.VariableService ReplaceVariable interface method -func ReplaceVariable(init func(VariableFields, *testing.T) (influxdb.VariableService, string, func()), t *testing.T) { - type args struct { - id platform.ID - newVariable *influxdb.Variable - } - type wants struct { - err *errors.Error - variables []*influxdb.Variable - } - - tests := []struct { - name string - fields VariableFields - args args - wants wants - }{ - { - name: "updating a variable's name", - fields: VariableFields{ - TimeGenerator: fakeGenerator, - Variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idA), - OrganizationID: platform.ID(7), - Name: "existing-variable", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: oldFakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - }, - args: args{ - id: MustIDBase16(idB), - newVariable: &influxdb.Variable{ - ID: MustIDBase16(idA), - OrganizationID: platform.ID(7), - Name: "renamed-variable", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: oldFakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - wants: wants{ - err: nil, - variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idA), - OrganizationID: platform.ID(7), - Name: "renamed-variable", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: oldFakeDate, - UpdatedAt: fakeDate, - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - err := s.ReplaceVariable(ctx, tt.args.newVariable) - influxErrsEqual(t, tt.wants.err, err) - if err != nil { - return - } - - variable, err := s.FindVariableByID(ctx, tt.args.id) - if err != nil { - return - } - - if variable != nil { - if tt.args.newVariable.Name != "" && variable.Name != tt.args.newVariable.Name { - t.Fatalf("variable name not updated") - } - } - - variables, err := s.FindVariables(ctx, influxdb.VariableFilter{}) - if err != nil { - t.Fatalf("failed to retrieve variables: %v", err) - } - if diff := cmp.Diff(variables, tt.wants.variables, variableCmpOptions...); diff != "" { - t.Fatalf("found unexpected variables -got/+want\ndiff %s", diff) - } - }) - } -} - -// DeleteVariable tests influxdb.VariableService DeleteVariable interface method -func DeleteVariable(init func(VariableFields, *testing.T) (influxdb.VariableService, string, func()), t *testing.T) { - type args struct { - id platform.ID - } - type wants struct { - err *errors.Error - variables []*influxdb.Variable - } - - tests := []struct { - name string - fields VariableFields - args args - wants wants - }{ - { - name: "deleting a variable", - fields: VariableFields{ - Variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idA), - OrganizationID: platform.ID(9), - Name: "m", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: oldFakeDate, - UpdatedAt: oldFakeDate, - }, - }, - }, - }, - args: args{ - id: MustIDBase16(idA), - }, - wants: wants{ - err: nil, - variables: []*influxdb.Variable{}, - }, - }, - { - name: "deleting a variable that doesn't exist", - fields: VariableFields{ - Variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idD), - OrganizationID: platform.ID(1), - Name: "existing-variable", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: oldFakeDate, - UpdatedAt: oldFakeDate, - }, - }, - }, - }, - args: args{ - id: MustIDBase16(idB), - }, - wants: wants{ - err: &errors.Error{ - Code: errors.ENotFound, - Op: influxdb.OpDeleteVariable, - Msg: influxdb.ErrVariableNotFound, - }, - variables: []*influxdb.Variable{ - { - ID: MustIDBase16(idD), - OrganizationID: platform.ID(1), - Name: "existing-variable", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{}, - }, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: oldFakeDate, - UpdatedAt: oldFakeDate, - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - s, _, done := init(tt.fields, t) - defer done() - ctx := context.Background() - - defer s.ReplaceVariable(ctx, &influxdb.Variable{ - ID: tt.args.id, - OrganizationID: platform.ID(1), - }) - - err := s.DeleteVariable(ctx, tt.args.id) - if err != nil { - if tt.wants.err == nil { - require.NoError(t, err) - } - iErr, ok := err.(*errors.Error) - require.True(t, ok) - assert.Equal(t, iErr.Code, tt.wants.err.Code) - assert.Equal(t, strings.HasPrefix(iErr.Error(), tt.wants.err.Error()), true) - return - } - - variables, err := s.FindVariables(ctx, influxdb.VariableFilter{}) - if err != nil { - t.Fatalf("failed to retrieve variables: %v", err) - } - if diff := cmp.Diff(variables, tt.wants.variables, variableCmpOptions...); diff != "" { - t.Fatalf("found unexpected variables -got/+want\ndiff %s", diff) - } - }) - } -} diff --git a/tests/auth_helpers.go b/tests/auth_helpers.go deleted file mode 100644 index 515c093fb0d..00000000000 --- a/tests/auth_helpers.go +++ /dev/null @@ -1,39 +0,0 @@ -package tests - -import ( - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -func mergePerms(orgID platform.ID, in ...[]influxdb.Permission) []influxdb.Permission { - var perms []influxdb.Permission - for i := range in { - perms = append(perms, in[i]...) - } - for i := range perms { - perms[i].Resource.OrgID = &orgID - } - return perms -} - -func MakeBucketPerm(bucketID platform.ID, actions ...influxdb.Action) []influxdb.Permission { - var perms []influxdb.Permission - for i := range actions { - perms = append(perms, influxdb.Permission{Action: actions[i], Resource: influxdb.Resource{ID: &bucketID, Type: influxdb.BucketsResourceType}}) - } - return perms -} - -func MakeBucketRWPerm(bucketID platform.ID) []influxdb.Permission { - return MakeBucketPerm(bucketID, []influxdb.Action{influxdb.ReadAction, influxdb.WriteAction}...) -} - -func MakeAuthorization(org, userID platform.ID, perms ...[]influxdb.Permission) *influxdb.Authorization { - return &influxdb.Authorization{ - OrgID: org, - UserID: userID, - Permissions: mergePerms(org, perms...), - Description: "foo user auth", - Status: influxdb.Active, - } -} diff --git a/tests/client.go b/tests/client.go deleted file mode 100644 index 6fd43e03d0b..00000000000 --- a/tests/client.go +++ /dev/null @@ -1,704 +0,0 @@ -package tests - -import ( - "bytes" - "context" - "io" - "net/http" - "strings" - "testing" - "time" - - "github.com/influxdata/flux/csv" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorization" - influxhttp "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/pkg/httpc" - "github.com/influxdata/influxdb/v2/tenant" -) - -type ClientConfig struct { - UserID platform.ID - OrgID platform.ID - BucketID platform.ID - DocumentsNamespace string - - // If Session is provided, Token is ignored. - Token string - Session *influxdb.Session -} - -// Client provides an API for writing, querying, and interacting with -// resources like authorizations, buckets, and organizations. -type Client struct { - Client *httpc.Client - *influxhttp.Service - - *authorization.AuthorizationClientService - *tenant.BucketClientService - *tenant.OrgClientService - *tenant.UserClientService - - ClientConfig -} - -// NewClient initialises a new Client which is ready to write points to the HTTP write endpoint. -func NewClient(endpoint string, config ClientConfig) (*Client, error) { - opts := make([]httpc.ClientOptFn, 0) - if config.Session != nil { - config.Token = "" - opts = append(opts, httpc.WithSessionCookie(config.Session.Key)) - } - hc, err := influxhttp.NewHTTPClient(endpoint, config.Token, false, opts...) - if err != nil { - return nil, err - } - - svc, err := influxhttp.NewService(hc, endpoint, config.Token) - if err != nil { - return nil, err - } - return &Client{ - Client: hc, - Service: svc, - AuthorizationClientService: &authorization.AuthorizationClientService{Client: hc}, - BucketClientService: &tenant.BucketClientService{Client: hc}, - OrgClientService: &tenant.OrgClientService{Client: hc}, - UserClientService: &tenant.UserClientService{Client: hc}, - ClientConfig: config, - }, nil -} - -// Open opens the client -func (c *Client) Open() error { return nil } - -// Close closes the client -func (c *Client) Close() error { return nil } - -// MustWriteBatch calls WriteBatch, panicking if an error is encountered. -func (c *Client) MustWriteBatch(points string) { - if err := c.WriteBatch(points); err != nil { - panic(err) - } -} - -// WriteBatch writes the current batch of points to the HTTP endpoint. -func (c *Client) WriteBatch(points string) error { - return c.WriteService.WriteTo( - context.Background(), - influxdb.BucketFilter{ - ID: &c.BucketID, - OrganizationID: &c.OrgID, - }, - strings.NewReader(points), - ) -} - -// Query returns the CSV response from a flux query to the HTTP API. -// -// This also remove all the \r to make it easier to write tests. -func (c *Client) QueryFlux(org, query string) (string, error) { - var csv string - csvResp := func(resp *http.Response) error { - body, err := io.ReadAll(resp.Body) - if err != nil { - return err - } - // remove the \r to simplify testing against a body of CSV. - body = bytes.ReplaceAll(body, []byte("\r"), nil) - csv = string(body) - return nil - } - - qr := QueryRequestBody(query) - err := c.Client.PostJSON(qr, fluxPath). - QueryParams([2]string{"org", org}). - Accept("text/csv"). - RespFn(csvResp). - StatusFn(httpc.StatusIn(http.StatusOK)). - Do(context.Background()) - - return csv, err -} - -const ( - fluxPath = "/api/v2/query" - // This is the only namespace for documents present after init. - DefaultDocumentsNamespace = "templates" -) - -// QueryRequestBody creates a body for a flux query using common CSV output params. -// Headers are included, but, annotations are not. -func QueryRequestBody(flux string) *influxhttp.QueryRequest { - header := true - return &influxhttp.QueryRequest{ - Type: "flux", - Query: flux, - Dialect: influxhttp.QueryDialect{ - Header: &header, - Delimiter: ",", - CommentPrefix: "#", - DateTimeFormat: "RFC3339", - Annotations: csv.DefaultEncoderConfig().Annotations, - }, - } -} - -// MustCreateAuth creates an auth or is a fatal error. -// Used in tests where the content of the bucket does not matter. -// -// This authorization token is an operator token for the default -// organization for the default user. -func (c *Client) MustCreateAuth(t *testing.T) platform.ID { - t.Helper() - - perms := influxdb.OperPermissions() - auth := &influxdb.Authorization{ - OrgID: c.OrgID, - UserID: c.UserID, - Permissions: perms, - } - err := c.CreateAuthorization(context.Background(), auth) - if err != nil { - t.Fatalf("unable to create auth: %v", err) - } - return auth.ID -} - -// MustCreateBucket creates a bucket or is a fatal error. -// Used in tests where the content of the bucket does not matter. -func (c *Client) MustCreateBucket(t *testing.T) platform.ID { - t.Helper() - - bucket := &influxdb.Bucket{OrgID: c.OrgID, Name: "n1"} - err := c.CreateBucket(context.Background(), bucket) - if err != nil { - t.Fatalf("unable to create bucket: %v", err) - } - return bucket.ID -} - -// MustCreateOrg creates an org or is a fatal error. -// Used in tests where the content of the org does not matter. -func (c *Client) MustCreateOrg(t *testing.T) platform.ID { - t.Helper() - - org := &influxdb.Organization{Name: "n1"} - err := c.CreateOrganization(context.Background(), org) - if err != nil { - t.Fatalf("unable to create org: %v", err) - } - return org.ID -} - -// MustCreateLabel creates a label or is a fatal error. -// Used in tests where the content of the label does not matter. -func (c *Client) MustCreateLabel(t *testing.T) platform.ID { - t.Helper() - - l := &influxdb.Label{OrgID: c.OrgID, Name: "n1"} - err := c.CreateLabel(context.Background(), l) - if err != nil { - t.Fatalf("unable to create label: %v", err) - } - return l.ID -} - -// MustCreateCheck creates a check or is a fatal error. -// Used in tests where the content of the check does not matter. -func (c *Client) MustCreateCheck(t *testing.T) platform.ID { - t.Helper() - - chk, err := c.CreateCheck(context.Background(), MockCheck("c", c.OrgID, c.UserID)) - if err != nil { - t.Fatalf("unable to create check: %v", err) - } - return chk.ID -} - -// MustCreateTelegraf creates a telegraf config or is a fatal error. -// Used in tests where the content of the telegraf config does not matter. -func (c *Client) MustCreateTelegraf(t *testing.T) platform.ID { - t.Helper() - - tc := &influxdb.TelegrafConfig{ - OrgID: c.OrgID, - Name: "n1", - Description: "d1", - Config: "[[howdy]]", - } - unused := platform.ID(1) /* this id is not used in the API */ - err := c.CreateTelegrafConfig(context.Background(), tc, unused) - if err != nil { - t.Fatalf("unable to create telegraf config: %v", err) - } - return tc.ID -} - -// MustCreateUser creates a user or is a fatal error. -// Used in tests where the content of the user does not matter. -func (c *Client) MustCreateUser(t *testing.T) platform.ID { - t.Helper() - - u := &influxdb.User{Name: "n1"} - err := c.CreateUser(context.Background(), u) - if err != nil { - t.Fatalf("unable to create user: %v", err) - } - return u.ID -} - -// MustCreateVariable creates a variable or is a fatal error. -// Used in tests where the content of the variable does not matter. -func (c *Client) MustCreateVariable(t *testing.T) platform.ID { - t.Helper() - - v := &influxdb.Variable{ - OrganizationID: c.OrgID, - Name: "n1", - Arguments: &influxdb.VariableArguments{ - Type: "constant", - Values: influxdb.VariableConstantValues{"v1", "v2"}, - }, - } - err := c.CreateVariable(context.Background(), v) - if err != nil { - t.Fatalf("unable to create variable: %v", err) - } - return v.ID -} - -// MustCreateNotificationEndpoint creates a notification endpoint or is a fatal error. -// Used in tests where the content of the notification endpoint does not matter. -func (c *Client) MustCreateNotificationEndpoint(t *testing.T) platform.ID { - t.Helper() - - ne := ValidNotificationEndpoint(c.OrgID) - err := c.CreateNotificationEndpoint(context.Background(), ne, c.UserID) - if err != nil { - t.Fatalf("unable to create notification endpoint: %v", err) - } - return ne.GetID() -} - -// MustCreateNotificationRule creates a Notification Rule or is a fatal error -// Used in tests where the content of the notification rule does not matter -func (c *Client) MustCreateNotificationRule(t *testing.T) platform.ID { - t.Helper() - ctx := context.Background() - - ne := ValidCustomNotificationEndpoint(c.OrgID, time.Now().String()) - err := c.CreateNotificationEndpoint(ctx, ne, c.UserID) - if err != nil { - t.Fatalf("unable to create notification endpoint: %v", err) - } - endpointID := ne.GetID() - r := ValidNotificationRule(c.OrgID, endpointID) - rc := influxdb.NotificationRuleCreate{NotificationRule: r, Status: influxdb.Active} - - err = c.CreateNotificationRule(ctx, rc, c.UserID) - if err != nil { - t.Fatalf("unable to create notification rule: %v", err) - } - - // we don't need this endpoint, so delete it to be compatible with other tests - _, _, err = c.DeleteNotificationEndpoint(ctx, endpointID) - if err != nil { - t.Fatalf("unable to delete notification endpoint: %v", err) - } - - return r.GetID() -} - -// MustCreateDBRPMapping creates a DBRP Mapping or is a fatal error. -// Used in tests where the content of the mapping does not matter. -// The created mapping points to the user's default bucket. -func (c *Client) MustCreateDBRPMapping(t *testing.T) platform.ID { - t.Helper() - ctx := context.Background() - - m := &influxdb.DBRPMapping{ - Database: "db", - RetentionPolicy: "rp", - OrganizationID: c.OrgID, - BucketID: c.BucketID, - } - if err := c.DBRPMappingService.Create(ctx, m); err != nil { - t.Fatalf("unable to create DBRP mapping: %v", err) - } - return m.ID -} - -// MustCreateResource will create a generic resource via the API. -// Used in tests where the content of the resource does not matter. -// -// // Create one of each org resource -// for _, r := range influxdb.OrgResourceTypes { -// client.MustCreateResource(t, r) -// } -// -// -// // Create a variable: -// id := client.MustCreateResource(t, influxdb.VariablesResourceType) -// defer client.MustDeleteResource(t, influxdb.VariablesResourceType, id) -func (c *Client) MustCreateResource(t *testing.T, r influxdb.ResourceType) platform.ID { - t.Helper() - - switch r { - case influxdb.AuthorizationsResourceType: // 0 - return c.MustCreateAuth(t) - case influxdb.BucketsResourceType: // 1 - return c.MustCreateBucket(t) - case influxdb.OrgsResourceType: // 3 - return c.MustCreateOrg(t) - case influxdb.SourcesResourceType: // 4 - t.Skip("I think sources are going to be removed right?") - case influxdb.TasksResourceType: // 5 - t.Skip("Task go client is not yet created") - case influxdb.TelegrafsResourceType: // 6 - return c.MustCreateTelegraf(t) - case influxdb.UsersResourceType: // 7 - return c.MustCreateUser(t) - case influxdb.VariablesResourceType: // 8 - return c.MustCreateVariable(t) - case influxdb.ScraperResourceType: // 9 - t.Skip("Scraper go client is not yet created") - case influxdb.SecretsResourceType: // 10 - t.Skip("Secrets go client is not yet created") - case influxdb.LabelsResourceType: // 11 - return c.MustCreateLabel(t) - case influxdb.ViewsResourceType: // 12 - t.Skip("Are views still a thing?") - case influxdb.NotificationRuleResourceType: // 14 - return c.MustCreateNotificationRule(t) - case influxdb.NotificationEndpointResourceType: // 15 - return c.MustCreateNotificationEndpoint(t) - case influxdb.ChecksResourceType: // 16 - return c.MustCreateCheck(t) - case influxdb.DBRPResourceType: // 17 - return c.MustCreateDBRPMapping(t) - } - return 0 -} - -// DeleteResource will remove a resource using the API. -func (c *Client) DeleteResource(t *testing.T, r influxdb.ResourceType, id platform.ID) error { - t.Helper() - - ctx := context.Background() - switch r { - case influxdb.AuthorizationsResourceType: // 0 - return c.DeleteAuthorization(ctx, id) - case influxdb.BucketsResourceType: // 1 - return c.DeleteBucket(context.Background(), id) - case influxdb.OrgsResourceType: // 3 - return c.DeleteOrganization(ctx, id) - case influxdb.SourcesResourceType: // 4 - t.Skip("I think sources are going to be removed right?") - case influxdb.TasksResourceType: // 5 - t.Skip("Task go client is not yet created") - case influxdb.TelegrafsResourceType: // 6 - return c.DeleteTelegrafConfig(ctx, id) - case influxdb.UsersResourceType: // 7 - return c.DeleteUser(ctx, id) - case influxdb.VariablesResourceType: // 8 - return c.DeleteVariable(ctx, id) - case influxdb.ScraperResourceType: // 9 - t.Skip("Scraper go client is not yet created") - case influxdb.SecretsResourceType: // 10 - t.Skip("Secrets go client is not yet created") - case influxdb.LabelsResourceType: // 11 - return c.DeleteLabel(ctx, id) - case influxdb.ViewsResourceType: // 12 - t.Skip("Are views still a thing?") - case influxdb.NotificationRuleResourceType: // 14 - return c.DeleteNotificationRule(ctx, id) - case influxdb.NotificationEndpointResourceType: // 15 - // Ignore the other results as suggested by goDoc. - _, _, err := c.DeleteNotificationEndpoint(ctx, id) - return err - case influxdb.ChecksResourceType: // 16 - return c.DeleteCheck(ctx, id) - case influxdb.DBRPResourceType: // 17 - return c.DBRPMappingService.Delete(ctx, c.OrgID, id) - } - return nil -} - -// MustDeleteResource requires no error when deleting a resource. -func (c *Client) MustDeleteResource(t *testing.T, r influxdb.ResourceType, id platform.ID) { - t.Helper() - - if err := c.DeleteResource(t, r, id); err != nil { - t.Fatalf("unable to delete resource %v %v: %v", r, id, err) - } -} - -// FindAll returns all the IDs of a specific resource type. -func (c *Client) FindAll(t *testing.T, r influxdb.ResourceType) ([]platform.ID, error) { - t.Helper() - - var ids []platform.ID - ctx := context.Background() - switch r { - case influxdb.AuthorizationsResourceType: // 0 - rs, _, err := c.FindAuthorizations(ctx, influxdb.AuthorizationFilter{}) - if err != nil { - return nil, err - } - for _, r := range rs { - ids = append(ids, r.ID) - } - case influxdb.BucketsResourceType: // 1 - rs, _, err := c.FindBuckets(ctx, influxdb.BucketFilter{}) - if err != nil { - return nil, err - } - for _, r := range rs { - ids = append(ids, r.ID) - } - case influxdb.OrgsResourceType: // 3 - rs, _, err := c.FindOrganizations(ctx, influxdb.OrganizationFilter{}) - if err != nil { - return nil, err - } - for _, r := range rs { - ids = append(ids, r.ID) - } - case influxdb.SourcesResourceType: // 4 - t.Skip("I think sources are going to be removed right?") - case influxdb.TasksResourceType: // 5 - t.Skip("Task go client is not yet created") - case influxdb.TelegrafsResourceType: // 6 - rs, _, err := c.FindTelegrafConfigs(ctx, influxdb.TelegrafConfigFilter{}) - if err != nil { - return nil, err - } - for _, r := range rs { - ids = append(ids, r.ID) - } - case influxdb.UsersResourceType: // 7 - rs, _, err := c.FindUsers(ctx, influxdb.UserFilter{}) - if err != nil { - return nil, err - } - for _, r := range rs { - ids = append(ids, r.ID) - } - case influxdb.VariablesResourceType: // 8 - rs, err := c.FindVariables(ctx, influxdb.VariableFilter{}) - if err != nil { - return nil, err - } - for _, r := range rs { - ids = append(ids, r.ID) - } - case influxdb.ScraperResourceType: // 9 - t.Skip("Scraper go client is not yet created") - case influxdb.SecretsResourceType: // 10 - t.Skip("Secrets go client is not yet created") - case influxdb.LabelsResourceType: // 11 - rs, err := c.FindLabels(ctx, influxdb.LabelFilter{}) - if err != nil { - return nil, err - } - for _, r := range rs { - ids = append(ids, r.ID) - } - case influxdb.ViewsResourceType: // 12 - t.Skip("Are views still a thing?") - case influxdb.NotificationRuleResourceType: // 14 - rs, _, err := c.FindNotificationRules(ctx, influxdb.NotificationRuleFilter{OrgID: &c.OrgID}) - if err != nil { - return nil, err - } - for _, r := range rs { - ids = append(ids, r.GetID()) - } - return ids, nil - case influxdb.NotificationEndpointResourceType: // 15 - rs, _, err := c.FindNotificationEndpoints(ctx, influxdb.NotificationEndpointFilter{OrgID: &c.OrgID}) - if err != nil { - return nil, err - } - for _, r := range rs { - ids = append(ids, r.GetID()) - } - case influxdb.ChecksResourceType: // 16 - rs, _, err := c.FindChecks(ctx, influxdb.CheckFilter{}) - if err != nil { - return nil, err - } - for _, r := range rs { - ids = append(ids, r.ID) - } - case influxdb.DBRPResourceType: // 17 - rs, _, err := c.DBRPMappingService.FindMany(ctx, influxdb.DBRPMappingFilter{OrgID: &c.OrgID}) - if err != nil { - return nil, err - } - for _, r := range rs { - ids = append(ids, r.ID) - } - } - return ids, nil -} - -// MustFindAll returns all the IDs of a specific resource type; any error -// is fatal. -func (c *Client) MustFindAll(t *testing.T, r influxdb.ResourceType) []platform.ID { - t.Helper() - - ids, err := c.FindAll(t, r) - if err != nil { - t.Fatalf("unexpected error finding resources %v: %v", r, err) - } - return ids -} - -func (c *Client) AddURM(u platform.ID, typ influxdb.UserType, r influxdb.ResourceType, id platform.ID) error { - access := &influxdb.UserResourceMapping{ - UserID: u, - UserType: typ, - MappingType: influxdb.UserMappingType, - ResourceType: r, - ResourceID: id, - } - - return c.CreateUserResourceMapping( - context.Background(), - access, - ) -} - -// AddOwner associates the user as owner of the resource. -func (c *Client) AddOwner(user platform.ID, r influxdb.ResourceType, id platform.ID) error { - return c.AddURM(user, influxdb.Owner, r, id) -} - -// MustAddOwner requires that the user is associated with the resource -// or the test will be stopped fatally. -func (c *Client) MustAddOwner(t *testing.T, user platform.ID, r influxdb.ResourceType, id platform.ID) { - t.Helper() - - if err := c.AddOwner(user, r, id); err != nil { - t.Fatalf("unexpected error adding owner %v to %v: %v", user, id, err) - } -} - -// AddMember associates the user as member of the resource. -func (c *Client) AddMember(user platform.ID, r influxdb.ResourceType, id platform.ID) error { - return c.AddURM(user, influxdb.Member, r, id) -} - -// MustAddMember requires that the user is associated with the resource -// or the test will be stopped fatally. -func (c *Client) MustAddMember(t *testing.T, user platform.ID, r influxdb.ResourceType, id platform.ID) { - t.Helper() - - if err := c.AddMember(user, r, id); err != nil { - t.Fatalf("unexpected error adding member %v to %v", user, id) - } -} - -// RemoveURM removes association of the user to the resource. -// Interestingly the URM service does not make difference on the user type. -// I.e. removing an URM from a user to a resource, will delete every URM of every type -// from that user to that resource. -// Or, put in another way, there can only be one resource mapping from a user to a -// resource at a time: either you are a member, or an owner (in that case you are a member too). -func (c *Client) RemoveURM(user, id platform.ID) error { - return c.DeleteUserResourceMapping(context.Background(), id, user) -} - -// RemoveSpecificURM gets around a client issue where deletes doesn't have enough context to remove a urm from -// a specific resource type -func (c *Client) RemoveSpecificURM(rt influxdb.ResourceType, ut influxdb.UserType, user, id platform.ID) error { - return c.SpecificURMSvc(rt, ut).DeleteUserResourceMapping(context.Background(), id, user) -} - -// MustRemoveURM requires that the user is removed as owner/member from the resource. -func (c *Client) MustRemoveURM(t *testing.T, user, id platform.ID) { - t.Helper() - - if err := c.RemoveURM(user, id); err != nil { - t.Fatalf("unexpected error removing org/resource mapping: %v", err) - } -} - -// CreateLabelMapping creates a label mapping for label `l` to the resource with `id`. -func (c *Client) CreateLabelMapping(l platform.ID, r influxdb.ResourceType, id platform.ID) error { - mapping := &influxdb.LabelMapping{ - LabelID: l, - ResourceType: r, - ResourceID: id, - } - return c.LabelService.CreateLabelMapping( - context.Background(), - mapping, - ) -} - -// MustCreateLabelMapping requires that the label is associated with the resource -// or the test will be stopped fatally. -func (c *Client) MustCreateLabelMapping(t *testing.T, l platform.ID, r influxdb.ResourceType, id platform.ID) { - t.Helper() - - if err := c.CreateLabelMapping(l, r, id); err != nil { - t.Fatalf("unexpected error attaching label %v to %v: %v", l, id, err) - } -} - -// FindLabelMappings finds the labels for the specified resource. -func (c *Client) FindLabelMappings(r influxdb.ResourceType, id platform.ID) ([]platform.ID, error) { - filter := influxdb.LabelMappingFilter{ - ResourceType: r, - ResourceID: id, - } - ls, err := c.LabelService.FindResourceLabels( - context.Background(), - filter, - ) - if err != nil { - return nil, err - } - var ids []platform.ID - for _, r := range ls { - ids = append(ids, r.ID) - } - return ids, nil -} - -// MustFindLabelMappings makes the test fail if an error is found. -func (c *Client) MustFindLabelMappings(t *testing.T, r influxdb.ResourceType, id platform.ID) []platform.ID { - t.Helper() - - ls, err := c.FindLabelMappings(r, id) - if err != nil { - t.Fatalf("unexpected error finding label mappings: %v", err) - } - return ls -} - -// DeleteLabelMapping deletes the label for the specified resource. -func (c *Client) DeleteLabelMapping(l platform.ID, r influxdb.ResourceType, id platform.ID) error { - m := &influxdb.LabelMapping{ - ResourceType: r, - ResourceID: id, - LabelID: l, - } - return c.LabelService.DeleteLabelMapping( - context.Background(), - m, - ) -} - -// MustDeleteLabelMapping makes the test fail if an error is found. -func (c *Client) MustDeleteLabelMapping(t *testing.T, l platform.ID, r influxdb.ResourceType, id platform.ID) { - t.Helper() - - if err := c.DeleteLabelMapping(l, r, id); err != nil { - t.Fatalf("unexpected error deleting label %v from %v", l, id) - } -} diff --git a/tests/defaults.go b/tests/defaults.go deleted file mode 100644 index 3087b6d13eb..00000000000 --- a/tests/defaults.go +++ /dev/null @@ -1,17 +0,0 @@ -package tests - -// Default values created when calling NewPipeline. -const ( - DefaultOrgName = "myorg" - - DefaultBucketName = "db/rp" // Since we can only write data via 1.x path we need to have a 1.x bucket name - - DefaultUsername = "admin" - DefaultPassword = "password" - - // OperToken has permissions to do anything. - OperToken = "opertoken" -) - -// VeryVerbose when set to true, will enable very verbose logging of services. -var VeryVerbose bool diff --git a/tests/doc.go b/tests/doc.go deleted file mode 100644 index bf768ee71e8..00000000000 --- a/tests/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -/* -Package tests contains a set of integration tests, which run in-memory versions -of various 2.0 services. They're not intended to be full end-to-end tests, -but are a suitable place to write tests that need to flex the logic of -multiple 2.0 components. -*/ -package tests diff --git a/tests/mock.go b/tests/mock.go deleted file mode 100644 index c3b4b772528..00000000000 --- a/tests/mock.go +++ /dev/null @@ -1,117 +0,0 @@ -package tests - -import ( - "net/http" - "time" - - "github.com/influxdata/influxdb/v2" - influxhttp "github.com/influxdata/influxdb/v2/http" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/notification" - checks "github.com/influxdata/influxdb/v2/notification/check" - "github.com/influxdata/influxdb/v2/notification/endpoint" - "github.com/influxdata/influxdb/v2/notification/rule" -) - -// ValidCustomNotificationEndpoint creates a NotificationEndpoint with a custom name -func ValidCustomNotificationEndpoint(org platform.ID, name string) influxdb.NotificationEndpoint { - return &endpoint.HTTP{ - Base: endpoint.Base{ - Name: name, - OrgID: &org, - Status: influxdb.Active, - CRUDLog: influxdb.CRUDLog{}, - }, - URL: "https://howdy.com", - AuthMethod: "none", - Method: http.MethodGet, - } -} - -// ValidNotificationEndpoint returns a valid notification endpoint. -// This is the easiest way of "mocking" a influxdb.NotificationEndpoint. -func ValidNotificationEndpoint(org platform.ID) influxdb.NotificationEndpoint { - return ValidCustomNotificationEndpoint(org, "howdy") -} - -// ValidNotificationRule returns a valid Notification Rule of type HTTP for testing -func ValidNotificationRule(org, endpoint platform.ID) influxdb.NotificationRule { - d, _ := notification.FromTimeDuration(time.Second * 5) - return &rule.HTTP{ - Base: rule.Base{ - Name: "little rule", - EndpointID: endpoint, - OrgID: org, - Every: &d, - CRUDLog: influxdb.CRUDLog{}, - StatusRules: []notification.StatusRule{ - { - CurrentLevel: notification.Critical, - }, - }, - TagRules: []notification.TagRule{}, - }, - } -} - -// MockCheck returns a valid check to be used in tests. -func MockCheck(name string, orgID, userID platform.ID) *influxhttp.Check { - return &influxhttp.Check{ - ID: userID, - OwnerID: userID, - Type: "threshold", - Status: influxdb.Active, - Name: name, - Description: "pipeline test check", - OrgID: orgID, - Every: "1m", - Offset: "0m", - Level: "CRIT", - StatusMessageTemplate: "Check: ${ r._check_name } is: ${ r._level }", - Query: &influxhttp.CheckQuery{ - Name: name, - Text: `from(bucket: "db/rp") |> range(start: v.timeRangeStart, stop: v.timeRangeStop) |> filter(fn: (r) => r._measurement == "my_measurement") |> filter(fn: (r) => r._field == "my_field") |> count() |> yield(name: "count")`, - EditMode: "builder", - BuilderConfig: &influxhttp.CheckBuilderConfig{ - Buckets: []string{"db/rp"}, - Tags: []struct { - Key string `json:"key"` - Values []string `json:"values"` - AggregateFunctionType string `json:"aggregateFunctionType"` - }{ - { - Key: "_measurement", - Values: []string{"my_measurement"}, - AggregateFunctionType: "filter", - }, - { - Key: "_field", - Values: []string{"my_field"}, - AggregateFunctionType: "filter", - }, - }, - Functions: []struct { - Name string `json:"name"` - }{ - { - Name: "count", - }, - }, - AggregateWindow: struct { - Period string `json:"period"` - }{ - Period: "1m", - }, - }, - }, - Thresholds: []*influxhttp.CheckThreshold{ - { - Type: "greater", - Value: 9999, - ThresholdConfigBase: checks.ThresholdConfigBase{ - Level: notification.Critical, - }, - }, - }, - } -} diff --git a/tests/pipeline/fixture.go b/tests/pipeline/fixture.go deleted file mode 100644 index 1e07ccaa40f..00000000000 --- a/tests/pipeline/fixture.go +++ /dev/null @@ -1,76 +0,0 @@ -package pipeline - -import ( - "fmt" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/tests" -) - -type ClientTag string - -const ( - AdminTag ClientTag = "admin" - OwnerTag ClientTag = "owner" - MemberTag ClientTag = "member" - NoAccessTag ClientTag = "no_access" -) - -var AllClientTags = []ClientTag{AdminTag, OwnerTag, MemberTag, NoAccessTag} - -// BaseFixture is a Fixture with multiple users in the system. -type BaseFixture struct { - Admin *tests.Client - Owner *tests.Client - Member *tests.Client - NoAccess *tests.Client -} - -// NewBaseFixture creates a BaseFixture with and admin, an org owner, a member, and an outsider -// for the given orgID and bucketID. -func NewBaseFixture(t *testing.T, p *tests.Pipeline, orgID, bucketID platform.ID) BaseFixture { - fx := BaseFixture{} - admin := p.MustNewAdminClient() - fx.Admin = admin - cli, id, err := p.BrowserFor(orgID, bucketID, "owner") - if err != nil { - t.Fatalf("error while creating browser client: %v", err) - } - admin.MustAddOwner(t, id, influxdb.OrgsResourceType, orgID) - cli.UserID = id - fx.Owner = cli - - cli, id, err = p.BrowserFor(orgID, bucketID, "member") - if err != nil { - t.Fatalf("error while creating browser client: %v", err) - } - admin.MustAddMember(t, id, influxdb.OrgsResourceType, orgID) - cli.UserID = id - fx.Member = cli - - cli, id, err = p.BrowserFor(orgID, bucketID, "no_access") - if err != nil { - t.Fatalf("error while creating browser client: %v", err) - } - cli.UserID = id - fx.NoAccess = cli - return fx -} - -// GetClient returns the client associated with the given tag. -func (f BaseFixture) GetClient(tag ClientTag) *tests.Client { - switch tag { - case AdminTag: - return f.Admin - case OwnerTag: - return f.Owner - case MemberTag: - return f.Member - case NoAccessTag: - return f.NoAccess - default: - panic(fmt.Sprintf("unknown tag %s", tag)) - } -} diff --git a/tests/pipeline_helpers.go b/tests/pipeline_helpers.go deleted file mode 100644 index aa0440090d1..00000000000 --- a/tests/pipeline_helpers.go +++ /dev/null @@ -1,164 +0,0 @@ -package tests - -import ( - "context" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/cmd/influxd/launcher" - "github.com/influxdata/influxdb/v2/kit/platform" - "go.uber.org/zap" -) - -// A Pipeline is responsible for configuring launcher.TestLauncher -// with default values so it may be used for end-to-end integration -// tests. -type Pipeline struct { - Launcher *launcher.TestLauncher - - DefaultOrgID platform.ID - DefaultBucketID platform.ID - DefaultUserID platform.ID -} - -// NewDefaultPipeline creates a Pipeline with default -// values. -// -// It is retained for compatibility with cloud tests. -func NewDefaultPipeline(t *testing.T, opts ...launcher.OptSetter) *DefaultPipeline { - setDefaultLogLevel := func(o *launcher.InfluxdOpts) { - // This is left here mainly for retro compatibility - if VeryVerbose { - o.LogLevel = zap.DebugLevel - } else { - o.LogLevel = zap.InfoLevel - } - - } - // Set the default log level as the FIRST option here so users can override - // it with passed-in setters. - opts = append([]launcher.OptSetter{setDefaultLogLevel}, opts...) - return &DefaultPipeline{Pipeline: NewPipeline(t, opts...)} -} - -// NewPipeline returns a pipeline with the given options applied to the configuration as appropriate. -// -// A single user, org, bucket and token are created. -func NewPipeline(tb testing.TB, opts ...launcher.OptSetter) *Pipeline { - tb.Helper() - - tl := launcher.NewTestLauncher() - p := &Pipeline{ - Launcher: tl, - } - - tl.RunOrFail(tb, context.Background(), opts...) - - // setup default operator - res := p.Launcher.OnBoardOrFail(tb, &influxdb.OnboardingRequest{ - User: DefaultUsername, - Password: DefaultPassword, - Org: DefaultOrgName, - Bucket: DefaultBucketName, - RetentionPeriodSeconds: influxdb.InfiniteRetention, - Token: OperToken, - }) - - p.DefaultOrgID = res.Org.ID - p.DefaultUserID = res.User.ID - p.DefaultBucketID = res.Bucket.ID - - return p -} - -// Open opens all the components of the pipeline. -func (p *Pipeline) Open() error { - return nil -} - -// MustOpen opens the pipeline, panicking if any error is encountered. -func (p *Pipeline) MustOpen() { - if err := p.Open(); err != nil { - panic(err) - } -} - -// Close closes all the components of the pipeline. -func (p *Pipeline) Close() error { - return p.Launcher.Shutdown(context.Background()) -} - -// MustClose closes the pipeline, panicking if any error is encountered. -func (p *Pipeline) MustClose() { - if err := p.Close(); err != nil { - panic(err) - } -} - -// MustNewAdminClient returns a default client that will direct requests to Launcher. -// -// The operator token is authorized to do anything in the system. -func (p *Pipeline) MustNewAdminClient() *Client { - return p.MustNewClient(p.DefaultOrgID, p.DefaultBucketID, OperToken) -} - -// MustNewClient returns a client that will direct requests to Launcher. -func (p *Pipeline) MustNewClient(org, bucket platform.ID, token string) *Client { - config := ClientConfig{ - UserID: p.DefaultUserID, - OrgID: org, - BucketID: bucket, - DocumentsNamespace: DefaultDocumentsNamespace, - Token: token, - } - svc, err := NewClient(p.Launcher.URL().String(), config) - if err != nil { - panic(err) - } - return svc -} - -// NewBrowserClient returns a client with a cookie session that will direct requests to Launcher. -func (p *Pipeline) NewBrowserClient(org, bucket platform.ID, session *influxdb.Session) (*Client, error) { - config := ClientConfig{ - UserID: p.DefaultUserID, - OrgID: org, - BucketID: bucket, - DocumentsNamespace: DefaultDocumentsNamespace, - Session: session, - } - return NewClient(p.Launcher.URL().String(), config) -} - -// BrowserFor will create a user, session, and browser client. -// The generated browser points to the given org and bucket. -// -// The user and session are inserted directly into the backing store. -func (p *Pipeline) BrowserFor(org, bucket platform.ID, username string) (*Client, platform.ID, error) { - ctx := context.Background() - user := &influxdb.User{ - Name: username, - } - - err := p.Launcher.UserService().CreateUser(ctx, user) - if err != nil { - return nil, 0, err - } - - session, err := p.Launcher.SessionService().CreateSession(ctx, username) - if err != nil { - return nil, 0, err - } - client, err := p.NewBrowserClient(org, bucket, session) - return client, user.ID, err -} - -// Flush is a no-op and retained for compatibility with tests from cloud. -func (p *Pipeline) Flush() { -} - -// DefaultPipeline is a wrapper for Pipeline and is retained -// for compatibility with cloud tests. -type DefaultPipeline struct { - *Pipeline -} diff --git a/token.go b/token.go deleted file mode 100644 index be9bbf30ea4..00000000000 --- a/token.go +++ /dev/null @@ -1,7 +0,0 @@ -package influxdb - -// TokenGenerator represents a generator for API tokens. -type TokenGenerator interface { - // Token generates a new API token. - Token() (string, error) -} diff --git a/toml/toml.go b/toml/toml.go deleted file mode 100644 index 931aaf92ab6..00000000000 --- a/toml/toml.go +++ /dev/null @@ -1,274 +0,0 @@ -// Package toml adds support to marshal and unmarshal types not in the official TOML spec. -package toml - -import ( - "encoding" - "errors" - "fmt" - "os" - "os/user" - "reflect" - "strconv" - "strings" - "time" - - "github.com/dustin/go-humanize" - "github.com/spf13/pflag" -) - -// Duration is a TOML wrapper type for time.Duration. -type Duration time.Duration - -var _ pflag.Value = (*Duration)(nil) - -func (d *Duration) Set(s string) error { - return d.UnmarshalText([]byte(s)) -} - -func (d Duration) Type() string { - return "Duration" -} - -// String returns the string representation of the duration. -func (d Duration) String() string { - return time.Duration(d).String() -} - -// UnmarshalText parses a TOML value into a duration value. -func (d *Duration) UnmarshalText(text []byte) error { - // Ignore if there is no value set. - if len(text) == 0 { - return nil - } - - // Otherwise parse as a duration formatted string. - duration, err := time.ParseDuration(string(text)) - if err != nil { - return err - } - - // Set duration and return. - *d = Duration(duration) - return nil -} - -// MarshalText converts a duration to a string for decoding toml -func (d Duration) MarshalText() (text []byte, err error) { - return []byte(d.String()), nil -} - -// Size represents a TOML parsable file size. -// Users can specify size using "k" or "K" for kibibytes, "m" or "M" for mebibytes, -// and "g" or "G" for gibibytes. If a size suffix isn't specified then bytes are assumed. -type Size uint64 - -var _ pflag.Value = (*Size)(nil) - -func (s Size) String() string { - return humanize.IBytes(uint64(s)) -} - -func (s *Size) Set(d string) error { - return s.UnmarshalText([]byte(d)) -} - -func (s Size) Type() string { - return "Size" -} - -// UnmarshalText parses a byte size from text. -func (s *Size) UnmarshalText(text []byte) error { - if len(text) == 0 { - return fmt.Errorf("size was empty") - } - - v, err := humanize.ParseBytes(string(text)) - if err != nil { - return err - } - *s = Size(v) - - return nil -} - -type FileMode uint32 - -func (m *FileMode) UnmarshalText(text []byte) error { - // Ignore if there is no value set. - if len(text) == 0 { - return nil - } - - mode, err := strconv.ParseUint(string(text), 8, 32) - if err != nil { - return err - } else if mode == 0 { - return errors.New("file mode cannot be zero") - } - *m = FileMode(mode) - return nil -} - -func (m FileMode) MarshalText() (text []byte, err error) { - if m != 0 { - return []byte(fmt.Sprintf("%04o", m)), nil - } - return nil, nil -} - -type Group int - -func (g *Group) UnmarshalTOML(data interface{}) error { - if grpName, ok := data.(string); ok { - group, err := user.LookupGroup(grpName) - if err != nil { - return err - } - - gid, err := strconv.Atoi(group.Gid) - if err != nil { - return err - } - *g = Group(gid) - return nil - } else if gid, ok := data.(int64); ok { - *g = Group(gid) - return nil - } - return errors.New("group must be a name (string) or id (int)") -} - -func ApplyEnvOverrides(getenv func(string) string, prefix string, val interface{}) error { - if getenv == nil { - getenv = os.Getenv - } - return applyEnvOverrides(getenv, prefix, reflect.ValueOf(val), "") -} - -func applyEnvOverrides(getenv func(string) string, prefix string, spec reflect.Value, structKey string) error { - element := spec - // If spec is a named type and is addressable, - // check the address to see if it implements encoding.TextUnmarshaler. - if spec.Kind() != reflect.Pointer && spec.Type().Name() != "" && spec.CanAddr() { - v := spec.Addr() - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - value := getenv(prefix) - return u.UnmarshalText([]byte(value)) - } - } - // If we have a pointer, dereference it - if spec.Kind() == reflect.Pointer { - element = spec.Elem() - } - - value := getenv(prefix) - - switch element.Kind() { - case reflect.String: - if len(value) == 0 { - return nil - } - element.SetString(value) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - intValue, err := strconv.ParseInt(value, 0, element.Type().Bits()) - if err != nil { - return fmt.Errorf("failed to apply %v to %v using type %v and value '%v': %s", prefix, structKey, element.Type().String(), value, err) - } - element.SetInt(intValue) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - intValue, err := strconv.ParseUint(value, 0, element.Type().Bits()) - if err != nil { - return fmt.Errorf("failed to apply %v to %v using type %v and value '%v': %s", prefix, structKey, element.Type().String(), value, err) - } - element.SetUint(intValue) - case reflect.Bool: - boolValue, err := strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("failed to apply %v to %v using type %v and value '%v': %s", prefix, structKey, element.Type().String(), value, err) - } - element.SetBool(boolValue) - case reflect.Float32, reflect.Float64: - floatValue, err := strconv.ParseFloat(value, element.Type().Bits()) - if err != nil { - return fmt.Errorf("failed to apply %v to %v using type %v and value '%v': %s", prefix, structKey, element.Type().String(), value, err) - } - element.SetFloat(floatValue) - case reflect.Slice: - // If the type is s slice, apply to each using the index as a suffix, e.g. GRAPHITE_0, GRAPHITE_0_TEMPLATES_0 or GRAPHITE_0_TEMPLATES="item1,item2" - for j := 0; j < element.Len(); j++ { - f := element.Index(j) - if err := applyEnvOverrides(getenv, prefix, f, structKey); err != nil { - return err - } - - if err := applyEnvOverrides(getenv, fmt.Sprintf("%s_%d", prefix, j), f, structKey); err != nil { - return err - } - } - - // If the type is s slice but have value not parsed as slice e.g. GRAPHITE_0_TEMPLATES="item1,item2" - if element.Len() == 0 && len(value) > 0 { - rules := strings.Split(value, ",") - - for _, rule := range rules { - element.Set(reflect.Append(element, reflect.ValueOf(rule))) - } - } - case reflect.Struct: - typeOfSpec := element.Type() - for i := 0; i < element.NumField(); i++ { - field := element.Field(i) - - // Skip any fields that we cannot set - if !field.CanSet() && field.Kind() != reflect.Slice { - continue - } - - structField := typeOfSpec.Field(i) - fieldName := structField.Name - - configName := structField.Tag.Get("toml") - if configName == "-" { - // Skip fields with tag `toml:"-"`. - continue - } - - if configName == "" && structField.Anonymous { - // Embedded field without a toml tag. - // Don't modify prefix. - if err := applyEnvOverrides(getenv, prefix, field, fieldName); err != nil { - return err - } - continue - } - - // Replace hyphens with underscores to avoid issues with shells - configName = strings.Replace(configName, "-", "_", -1) - - envKey := strings.ToUpper(configName) - if prefix != "" { - envKey = strings.ToUpper(fmt.Sprintf("%s_%s", prefix, configName)) - } - - // If it's a sub-config, recursively apply - if field.Kind() == reflect.Struct || field.Kind() == reflect.Pointer || - field.Kind() == reflect.Slice || field.Kind() == reflect.Array { - if err := applyEnvOverrides(getenv, envKey, field, fieldName); err != nil { - return err - } - continue - } - - value := getenv(envKey) - // Skip any fields we don't have a value to set - if len(value) == 0 { - continue - } - - if err := applyEnvOverrides(getenv, envKey, field, fieldName); err != nil { - return err - } - } - } - return nil -} diff --git a/toml/toml_test.go b/toml/toml_test.go deleted file mode 100644 index 8eb6b028f37..00000000000 --- a/toml/toml_test.go +++ /dev/null @@ -1,244 +0,0 @@ -package toml_test - -import ( - "fmt" - "math" - "os/user" - "runtime" - "strconv" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - itoml "github.com/influxdata/influxdb/v2/toml" -) - -func TestSize_UnmarshalText(t *testing.T) { - var s itoml.Size - for _, test := range []struct { - str string - want uint64 - }{ - {"1", 1}, - {"10", 10}, - {"100", 100}, - {"1kib", 1 << 10}, - {"10kib", 10 << 10}, - {"100kib", 100 << 10}, - {"1Kib", 1 << 10}, - {"10Kib", 10 << 10}, - {"100Kib", 100 << 10}, - {"1mib", 1 << 20}, - {"10mib", 10 << 20}, - {"100mib", 100 << 20}, - {"1Mib", 1 << 20}, - {"10Mib", 10 << 20}, - {"100Mib", 100 << 20}, - {"1gib", 1 << 30}, - {"1Gib", 1 << 30}, - {"100Gib", 100 << 30}, - {"1tib", 1 << 40}, - } { - if err := s.UnmarshalText([]byte(test.str)); err != nil { - t.Errorf("unexpected error: %s", err) - } - if s != itoml.Size(test.want) { - t.Errorf("wanted: %d got: %d", test.want, s) - } - } - - for _, str := range []string{ - fmt.Sprintf("%dk", uint64(math.MaxUint64-1)), - "10000000000000000000g", - "abcdef", - "√m", - "a1", - "", - } { - if err := s.UnmarshalText([]byte(str)); err == nil { - t.Errorf("input should have failed: %s", str) - } - } -} - -func TestFileMode_MarshalText(t *testing.T) { - for _, test := range []struct { - mode int - want string - }{ - {mode: 0755, want: `0755`}, - {mode: 0777, want: `0777`}, - {mode: 01777, want: `1777`}, - } { - mode := itoml.FileMode(test.mode) - if got, err := mode.MarshalText(); err != nil { - t.Errorf("unexpected error: %s", err) - } else if test.want != string(got) { - t.Errorf("wanted: %v got: %v", test.want, string(got)) - } - } -} - -func TestFileMode_UnmarshalText(t *testing.T) { - for _, test := range []struct { - str string - want uint32 - }{ - {str: ``, want: 0}, - {str: `0777`, want: 0777}, - {str: `777`, want: 0777}, - {str: `1777`, want: 01777}, - {str: `0755`, want: 0755}, - } { - var mode itoml.FileMode - if err := mode.UnmarshalText([]byte(test.str)); err != nil { - t.Errorf("unexpected error: %s", err) - } else if mode != itoml.FileMode(test.want) { - t.Errorf("wanted: %04o got: %04o", test.want, mode) - } - } -} - -func TestGroup_UnmarshalTOML(t *testing.T) { - // Skip this test on windows since it does not support setting the group anyway. - if runtime.GOOS == "windows" { - t.Skip("unsupported on windows") - } - - // Find the current user ID so we can use that group name. - u, err := user.Current() - if err != nil { - t.Skipf("unable to find the current user: %s", err) - } - - // Lookup the group by the group id. - gr, err := user.LookupGroupId(u.Gid) - if err == nil { - var group itoml.Group - if err := group.UnmarshalTOML(gr.Name); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if got, want := u.Gid, strconv.Itoa(int(group)); got != want { - t.Fatalf("unexpected group id: %s != %s", got, want) - } - } - - // Attempt to convert the group to an integer so we can test reading an integer. - gid, err := strconv.Atoi(u.Gid) - if err != nil { - t.Fatalf("group id is not an integer: %s", err) - } - - var group itoml.Group - if err := group.UnmarshalTOML(int64(gid)); err != nil { - t.Fatalf("unexpected error: %s", err) - } else if int(group) != gid { - t.Fatalf("unexpected group id: %d != %d", gid, int(group)) - } -} - -func TestConfig_Encode(t *testing.T) { - t.Skip("TODO(jsternberg): rewrite this test to use something from platform") - //var c run.Config - //c.Coordinator.WriteTimeout = itoml.Duration(time.Minute) - //buf := new(bytes.Buffer) - //if err := toml.NewEncoder(buf).Encode(&c); err != nil { - // t.Fatal("Failed to encode: ", err) - //} - //got, search := buf.String(), `write-timeout = "1m0s"` - //if !strings.Contains(got, search) { - // t.Fatalf("Encoding config failed.\nfailed to find %s in:\n%s\n", search, got) - //} -} - -func TestEnvOverride_Builtins(t *testing.T) { - envMap := map[string]string{ - "X_STRING": "a string", - "X_DURATION": "1m1s", - "X_INT": "1", - "X_INT8": "2", - "X_INT16": "3", - "X_INT32": "4", - "X_INT64": "5", - "X_UINT": "6", - "X_UINT8": "7", - "X_UINT16": "8", - "X_UINT32": "9", - "X_UINT64": "10", - "X_BOOL": "true", - "X_FLOAT32": "11.5", - "X_FLOAT64": "12.5", - "X_NESTED_STRING": "a nested string", - "X_NESTED_INT": "13", - "X_ES": "an embedded string", - "X__": "-1", // This value should not be applied to the "ignored" field with toml tag -. - } - - env := func(s string) string { - return envMap[s] - } - - type nested struct { - Str string `toml:"string"` - Int int `toml:"int"` - } - type Embedded struct { - ES string `toml:"es"` - } - type all struct { - Str string `toml:"string"` - Dur itoml.Duration `toml:"duration"` - Int int `toml:"int"` - Int8 int8 `toml:"int8"` - Int16 int16 `toml:"int16"` - Int32 int32 `toml:"int32"` - Int64 int64 `toml:"int64"` - Uint uint `toml:"uint"` - Uint8 uint8 `toml:"uint8"` - Uint16 uint16 `toml:"uint16"` - Uint32 uint32 `toml:"uint32"` - Uint64 uint64 `toml:"uint64"` - Bool bool `toml:"bool"` - Float32 float32 `toml:"float32"` - Float64 float64 `toml:"float64"` - Nested nested `toml:"nested"` - - Embedded - - Ignored int `toml:"-"` - } - - var got all - if err := itoml.ApplyEnvOverrides(env, "X", &got); err != nil { - t.Fatal(err) - } - - exp := all{ - Str: "a string", - Dur: itoml.Duration(time.Minute + time.Second), - Int: 1, - Int8: 2, - Int16: 3, - Int32: 4, - Int64: 5, - Uint: 6, - Uint8: 7, - Uint16: 8, - Uint32: 9, - Uint64: 10, - Bool: true, - Float32: 11.5, - Float64: 12.5, - Nested: nested{ - Str: "a nested string", - Int: 13, - }, - Embedded: Embedded{ - ES: "an embedded string", - }, - Ignored: 0, - } - - if diff := cmp.Diff(got, exp); diff != "" { - t.Fatal(diff) - } -} diff --git a/tools.go b/tools.go deleted file mode 100644 index d9c33850498..00000000000 --- a/tools.go +++ /dev/null @@ -1,23 +0,0 @@ -//go:build tools - -package influxdb - -import ( - _ "github.com/benbjohnson/tmpl" - _ "github.com/editorconfig-checker/editorconfig-checker/cmd/editorconfig-checker" - _ "github.com/influxdata/pkg-config" - _ "github.com/kevinburke/go-bindata/go-bindata" - _ "github.com/mna/pigeon" - _ "golang.org/x/tools/cmd/goimports" - _ "golang.org/x/tools/cmd/stringer" - _ "google.golang.org/protobuf/cmd/protoc-gen-go" - _ "gopkg.in/yaml.v2" - _ "honnef.co/go/tools/cmd/staticcheck" -) - -// This package is a workaround for adding additional paths to the go.mod file -// and ensuring they stay there. The build tag ensures this file never gets -// compiled, but the go module tool will still look at the dependencies and -// add/keep them in go.mod so we can version these paths along with our other -// dependencies. When we run build on any of these paths, we get the version -// that has been specified in go.mod rather than the master copy. diff --git a/tools/tmpl/main.go b/tools/tmpl/main.go deleted file mode 100644 index f8662e8b6ec..00000000000 --- a/tools/tmpl/main.go +++ /dev/null @@ -1,250 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "go/format" - "os" - "os/exec" - "path/filepath" - "strings" - "text/template" -) - -const Ext = ".tmpl" - -type pathSpec struct { - in, out string -} - -func (p *pathSpec) String() string { return p.in + " → " + p.out } -func (p *pathSpec) IsGoFile() bool { return filepath.Ext(p.out) == ".go" } - -func parsePath(path string) (string, string) { - p := strings.IndexByte(path, '=') - if p == -1 { - if filepath.Ext(path) != Ext { - errExit("template file '%s' must have .tmpl extension", path) - } - return path, path[:len(path)-len(Ext)] - } - - return path[:p], path[p+1:] -} - -type data struct { - In interface{} - D listValue -} - -func errExit(format string, a ...interface{}) { - fmt.Fprintf(os.Stderr, format, a...) - fmt.Fprintln(os.Stderr) - os.Exit(1) -} - -type listValue map[string]string - -func (l listValue) String() string { - res := make([]string, 0, len(l)) - for k, v := range l { - res = append(res, fmt.Sprintf("%s=%s", k, v)) - } - return strings.Join(res, ", ") -} - -func (l listValue) Set(v string) error { - nv := strings.Split(v, "=") - if len(nv) != 2 { - return fmt.Errorf("expected NAME=VALUE, got %s", v) - } - l[nv[0]] = nv[1] - return nil -} - -func main() { - var ( - dataArg = flag.String("data", "", "input JSON data") - gi = flag.Bool("i", false, "run goimports") - in = &data{D: make(listValue)} - ) - - flag.Var(&in.D, "d", "-d NAME=VALUE") - - flag.Parse() - if *dataArg == "" { - errExit("data option is required") - } - - if *gi { - if _, err := exec.LookPath("goimports"); err != nil { - errExit("failed to find goimports: %s", err.Error()) - } - formatter = formatSource - } else { - formatter = format.Source - } - - paths := flag.Args() - if len(paths) == 0 { - errExit("no tmpl files specified") - } - - specs := make([]pathSpec, len(paths)) - for i, p := range paths { - in, out := parsePath(p) - specs[i] = pathSpec{in: in, out: out} - } - - in.In = readData(*dataArg) - process(in, specs) -} - -func mustReadAll(path string) []byte { - data, err := os.ReadFile(path) - if err != nil { - errExit(err.Error()) - } - - return data -} - -func readData(path string) interface{} { - data := mustReadAll(path) - var v interface{} - if err := json.Unmarshal(StripComments(data), &v); err != nil { - errExit("invalid JSON data: %s", err.Error()) - } - return v -} - -func fileMode(path string) os.FileMode { - stat, err := os.Stat(path) - if err != nil { - errExit(err.Error()) - } - return stat.Mode() -} - -var funcs = template.FuncMap{ - "lower": strings.ToLower, - "upper": strings.ToUpper, -} - -func process(data interface{}, specs []pathSpec) { - for _, spec := range specs { - var ( - t *template.Template - err error - ) - t, err = template.New("gen").Funcs(funcs).Parse(string(mustReadAll(spec.in))) - if err != nil { - errExit("error processing template '%s': %s", spec.in, err.Error()) - } - - var buf bytes.Buffer - if spec.IsGoFile() { - // preamble - fmt.Fprintf(&buf, "// Code generated by %s. DO NOT EDIT.\n", spec.in) - fmt.Fprintln(&buf) - } - err = t.Execute(&buf, data) - if err != nil { - errExit("error executing template '%s': %s", spec.in, err.Error()) - } - - generated := buf.Bytes() - if spec.IsGoFile() { - generated, err = formatter(generated) - if err != nil { - errExit("error formatting '%s': %s", spec.in, err.Error()) - } - } - - os.WriteFile(spec.out, generated, fileMode(spec.in)) - } -} - -var ( - formatter func([]byte) ([]byte, error) -) - -func formatSource(in []byte) ([]byte, error) { - r := bytes.NewReader(in) - cmd := exec.Command("goimports") - cmd.Stdin = r - out, err := cmd.Output() - if err != nil { - if ee, ok := err.(*exec.ExitError); ok { - return nil, fmt.Errorf("error running goimports: %s", string(ee.Stderr)) - } - return nil, fmt.Errorf("error running goimports: %s", string(out)) - } - - return out, nil -} - -func StripComments(raw []byte) []byte { - var ( - quoted, esc bool - comment bool - ) - - buf := bytes.Buffer{} - - for i := 0; i < len(raw); i++ { - b := raw[i] - - if comment { - switch b { - case '/': - comment = false - j := bytes.IndexByte(raw[i+1:], '\n') - if j == -1 { - i = len(raw) - } else { - i += j // keep new line - } - case '*': - j := bytes.Index(raw[i+1:], []byte("*/")) - if j == -1 { - i = len(raw) - } else { - i += j + 2 - comment = false - } - } - continue - } - - if esc { - esc = false - continue - } - - if b == '\\' && quoted { - esc = true - continue - } - - if b == '"' || b == '\'' { - quoted = !quoted - } - - if b == '/' && !quoted { - comment = true - continue - } - - buf.WriteByte(b) - } - - if quoted || esc || comment { - // unexpected state, so return raw bytes - return raw - } - - return buf.Bytes() -} diff --git a/tools/tmpl/main_test.go b/tools/tmpl/main_test.go deleted file mode 100644 index cbb1f07e8fb..00000000000 --- a/tools/tmpl/main_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package main - -import ( - "testing" -) - -func TestStripComments(t *testing.T) { - tests := []struct { - name string - in string - exp string - }{ - {name: "none", in: `[1,2,3]`, exp: `[1,2,3]`}, - {name: "single-line, line comment at end", in: `[1,2,3] // foo bar`, exp: `[1,2,3] `}, - {name: "single-line, block comment at end", in: `[1,2,3] /* foo bar */ `, exp: `[1,2,3] `}, - {name: "single-line, block comment at end", in: `[1,2,3] /* /* // */`, exp: `[1,2,3] `}, - {name: "single-line, block comment in middle", in: `[1,/* foo bar */2,3]`, exp: `[1,2,3]`}, - {name: "single-line, block comment in string", in: `[1,"/* foo bar */"]`, exp: `[1,"/* foo bar */"]`}, - {name: "single-line, malformed block comment", in: `[1,2,/*]`, exp: `[1,2,/*]`}, - {name: "single-line, malformed JSON", in: `[1,2,/]`, exp: `[1,2,/]`}, - - { - name: "multi-line", - in: `[ - 1, - 2, - 3 -]`, - exp: `[ - 1, - 2, - 3 -]`, - }, - { - name: "multi-line, multiple line comments", - in: `[ // foo - 1, // bar - 2, - 3 -] // fit`, - exp: `[ - 1, - 2, - 3 -] `, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - got := string(StripComments([]byte(test.in))) - if got != test.exp { - t.Errorf("got:\n%s\nexp:\n%s", got, test.exp) - } - }) - } -} diff --git a/tsdb/README.md b/tsdb/README.md deleted file mode 100644 index 0bb6697db58..00000000000 --- a/tsdb/README.md +++ /dev/null @@ -1,91 +0,0 @@ -# Line Protocol - -The line protocol is a text based format for writing points to InfluxDB. Each line defines a single point. -Multiple lines must be separated by the newline character `\n`. The format of the line consists of three parts: - -``` -[key] [fields] [timestamp] -``` - -Each section is separated by spaces. The minimum required point consists of a measurement name and at least one field. Points without a specified timestamp will be written using the server's local timestamp. Timestamps are assumed to be in nanoseconds unless a `precision` value is passed in the query string. - -## Key - -The key is the measurement name and any optional tags separated by commas. Measurement names, tag keys, and tag values must escape any spaces or commas using a backslash (`\`). For example: `\ ` and `\,`. All tag values are stored as strings and should not be surrounded in quotes. - -Tags should be sorted by key before being sent for best performance. The sort should match that from the Go `bytes.Compare` function (http://golang.org/pkg/bytes/#Compare). - -### Examples - -``` -# measurement only -cpu - -# measurement and tags -cpu,host=serverA,region=us-west - -# measurement with commas -cpu\,01,host=serverA,region=us-west - -# tag value with spaces -cpu,host=server\ A,region=us\ west -``` - -## Fields - -Fields are key-value metrics associated with the measurement. Every line must have at least one field. Multiple fields must be separated with commas and not spaces. - -Field keys are always strings and follow the same syntactical rules as described above for tag keys and values. Field values can be one of four types. The first value written for a given field on a given measurement defines the type of that field for all series under that measurement. - -* _integer_ - Numeric values that do not include a decimal and are followed by a trailing i when inserted (e.g. 1i, 345i, 2015i, -10i). Note that all values must have a trailing i. If they do not they will be written as floats. -* _float_ - Numeric values that are not followed by a trailing i. (e.g. 1, 1.0, -3.14, 6.0+e5, 10). -* _boolean_ - A value indicating true or false. Valid boolean strings are (t, T, true, TRUE, f, F, false, and FALSE). -* _string_ - A text value. All string values _must_ be surrounded in double-quotes `"`. If the string contains -a double-quote or backslashes, it must be escaped with a backslash, e.g. `\"`, `\\`. - - -``` -# integer value -cpu value=1i - -cpu value=1.1i # will result in a parse error - -# float value -cpu_load value=1 - -cpu_load value=1.0 - -cpu_load value=1.2 - -# boolean value -error fatal=true - -# string value -event msg="logged out" - -# multiple values -cpu load=10,alert=true,reason="value above maximum threshold" -``` - -## Timestamp - -The timestamp section is optional but should be specified if possible. The value is an integer representing nanoseconds since the epoch. If the timestamp is not provided the point will inherit the server's local timestamp. - -Some write APIs allow passing a lower precision. If the API supports a lower precision, the timestamp may also be -an integer epoch in microseconds, milliseconds, seconds, minutes or hours. - -## Full Example -A full example is shown below. -``` -cpu,host=server01,region=uswest value=1 1434055562000000000 -cpu,host=server02,region=uswest value=3 1434055562000010000 -``` -In this example the first line shows a `measurement` of "cpu", there are two tags "host" and "region, the `value` is 1.0, and the `timestamp` is 1434055562000000000. Following this is a second line, also a point in the `measurement` "cpu" but belonging to a different "host". -``` -cpu,host=server\ 01,region=uswest value=1,msg="all systems nominal" -cpu,host=server\ 01,region=us\,west value_int=1i -``` -In these examples, the "host" is set to `server 01`. The field value associated with field key `msg` is double-quoted, as it is a string. The second example shows a region of `us,west` with the comma properly escaped. In the first example `value` is written as a floating point number. In the second, `value_int` is an integer. - -# Distributed Queries - diff --git a/tsdb/config.go b/tsdb/config.go deleted file mode 100644 index 731d7a4940f..00000000000 --- a/tsdb/config.go +++ /dev/null @@ -1,222 +0,0 @@ -package tsdb - -import ( - "errors" - "fmt" - "time" - - "github.com/influxdata/influxdb/v2/toml" -) - -const ( - // DefaultEngine is the default engine for new shards - DefaultEngine = "tsm1" - - // DefaultIndex is the default index for new shards - DefaultIndex = TSI1IndexName - - // tsdb/engine/wal configuration options - - // Default settings for TSM - - // DefaultCacheMaxMemorySize is the maximum size a shard's cache can - // reach before it starts rejecting writes. - DefaultCacheMaxMemorySize = 1024 * 1024 * 1024 // 1GB - - // DefaultCacheSnapshotMemorySize is the size at which the engine will - // snapshot the cache and write it to a TSM file, freeing up memory - DefaultCacheSnapshotMemorySize = 25 * 1024 * 1024 // 25MB - - // DefaultCacheSnapshotWriteColdDuration is the length of time at which - // the engine will snapshot the cache and write it to a new TSM file if - // the shard hasn't received writes or deletes - DefaultCacheSnapshotWriteColdDuration = time.Duration(10 * time.Minute) - - // DefaultCompactFullWriteColdDuration is the duration at which the engine - // will compact all TSM files in a shard if it hasn't received a write or delete - DefaultCompactFullWriteColdDuration = time.Duration(4 * time.Hour) - - // DefaultCompactThroughput is the rate limit in bytes per second that we - // will allow TSM compactions to write to disk. Not that short bursts are allowed - // to happen at a possibly larger value, set by DefaultCompactThroughputBurst. - // A value of 0 here will disable compaction rate limiting - DefaultCompactThroughput = 48 * 1024 * 1024 - - // DefaultCompactThroughputBurst is the rate limit in bytes per second that we - // will allow TSM compactions to write to disk. If this is not set, the burst value - // will be set to equal the normal throughput - DefaultCompactThroughputBurst = 48 * 1024 * 1024 - - // DefaultMaxPointsPerBlock is the maximum number of points in an encoded - // block in a TSM file - DefaultMaxPointsPerBlock = 1000 - - // DefaultMaxValuesPerTag is the maximum number of values a tag can have within a measurement. - DefaultMaxValuesPerTag = 100000 - - // DefaultMaxConcurrentCompactions is the maximum number of concurrent full and level compactions - // that can run at one time. A value of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime. - DefaultMaxConcurrentCompactions = 0 - - // DefaultMaxIndexLogFileSize is the default threshold, in bytes, when an index - // write-ahead log file will compact into an index file. - DefaultMaxIndexLogFileSize = 1 * 1024 * 1024 // 1MB - - // DefaultSeriesIDSetCacheSize is the default number of series ID sets to cache in the TSI index. - DefaultSeriesIDSetCacheSize = 100 - - // DefaultSeriesFileMaxConcurrentSnapshotCompactions is the maximum number of concurrent series - // partition snapshot compactions that can run at one time. - // A value of 0 results in runtime.GOMAXPROCS(0). - DefaultSeriesFileMaxConcurrentSnapshotCompactions = 0 -) - -// Config holds the configuration for the tsbd package. -type Config struct { - Dir string `toml:"dir"` - Engine string `toml:"-"` - Index string `toml:"index-version"` - - // General WAL configuration options - WALDir string `toml:"wal-dir"` - - // WALMaxConcurrentWrites sets the max number of WAL writes that can be attempted at one time. - // In reality only one write to disk can run at a time, but we allow the preceding encoding steps - // to run concurrently. This can cause allocations to increase quickly when writing to a slow disk. - // Set to 0 to use the default ( * 2). - WALMaxConcurrentWrites int `toml:"wal-max-concurrent-writes"` - - // WALMaxWriteDelay is the max amount of time the WAL will wait to begin a write when there are - // already WALMaxConcurrentWrites in progress. A value of 0 disables any timeout. - WALMaxWriteDelay time.Duration `toml:"wal-max-write-delay"` - - // WALFsyncDelay is the amount of time that a write will wait before fsyncing. A duration - // greater than 0 can be used to batch up multiple fsync calls. This is useful for slower - // disks or when WAL write contention is seen. A value of 0 fsyncs every write to the WAL. - WALFsyncDelay toml.Duration `toml:"wal-fsync-delay"` - - // Enables unicode validation on series keys on write. - ValidateKeys bool `toml:"validate-keys"` - - // When true, skips size validation on fields - SkipFieldSizeValidation bool `toml:"skip-field-size-validation"` - - // Query logging - QueryLogEnabled bool `toml:"query-log-enabled"` - - // Compaction options for tsm1 (descriptions above with defaults) - CacheMaxMemorySize toml.Size `toml:"cache-max-memory-size"` - CacheSnapshotMemorySize toml.Size `toml:"cache-snapshot-memory-size"` - CacheSnapshotWriteColdDuration toml.Duration `toml:"cache-snapshot-write-cold-duration"` - CompactFullWriteColdDuration toml.Duration `toml:"compact-full-write-cold-duration"` - CompactThroughput toml.Size `toml:"compact-throughput"` - CompactThroughputBurst toml.Size `toml:"compact-throughput-burst"` - - // Limits - - // MaxConcurrentCompactions is the maximum number of concurrent level and full compactions - // that can be running at one time across all shards. Compactions scheduled to run when the - // limit is reached are blocked until a running compaction completes. Snapshot compactions are - // not affected by this limit. A value of 0 limits compactions to runtime.GOMAXPROCS(0). - MaxConcurrentCompactions int `toml:"max-concurrent-compactions"` - - // MaxIndexLogFileSize is the threshold, in bytes, when an index write-ahead log file will - // compact into an index file. Lower sizes will cause log files to be compacted more quickly - // and result in lower heap usage at the expense of write throughput. Higher sizes will - // be compacted less frequently, store more series in-memory, and provide higher write throughput. - MaxIndexLogFileSize toml.Size `toml:"max-index-log-file-size"` - - // SeriesIDSetCacheSize is the number items that can be cached within the TSI index. TSI caching can help - // with query performance when the same tag key/value predicates are commonly used on queries. - // Setting series-id-set-cache-size to 0 disables the cache. - SeriesIDSetCacheSize int `toml:"series-id-set-cache-size"` - - // SeriesFileMaxConcurrentSnapshotCompactions is the maximum number of concurrent snapshot compactions - // that can be running at one time across all series partitions in a database. Snapshots scheduled - // to run when the limit is reached are blocked until a running snapshot completes. Only snapshot - // compactions are affected by this limit. A value of 0 limits snapshot compactions to the lesser of - // 8 (series file partition quantity) and runtime.GOMAXPROCS(0). - SeriesFileMaxConcurrentSnapshotCompactions int `toml:"series-file-max-concurrent-snapshot-compactions"` - - TraceLoggingEnabled bool `toml:"trace-logging-enabled"` - - // TSMWillNeed controls whether we hint to the kernel that we intend to - // page in mmap'd sections of TSM files. This setting defaults to off, as it has - // been found to be problematic in some cases. It may help users who have - // slow disks. - TSMWillNeed bool `toml:"tsm-use-madv-willneed"` -} - -// NewConfig returns the default configuration for tsdb. -func NewConfig() Config { - return Config{ - Engine: DefaultEngine, - Index: DefaultIndex, - - QueryLogEnabled: true, - - CacheMaxMemorySize: toml.Size(DefaultCacheMaxMemorySize), - CacheSnapshotMemorySize: toml.Size(DefaultCacheSnapshotMemorySize), - CacheSnapshotWriteColdDuration: toml.Duration(DefaultCacheSnapshotWriteColdDuration), - CompactFullWriteColdDuration: toml.Duration(DefaultCompactFullWriteColdDuration), - CompactThroughput: toml.Size(DefaultCompactThroughput), - CompactThroughputBurst: toml.Size(DefaultCompactThroughputBurst), - - MaxConcurrentCompactions: DefaultMaxConcurrentCompactions, - - WALMaxWriteDelay: 10 * time.Minute, - - MaxIndexLogFileSize: toml.Size(DefaultMaxIndexLogFileSize), - SeriesIDSetCacheSize: DefaultSeriesIDSetCacheSize, - - SeriesFileMaxConcurrentSnapshotCompactions: DefaultSeriesFileMaxConcurrentSnapshotCompactions, - - TraceLoggingEnabled: false, - TSMWillNeed: false, - } -} - -// Validate validates the configuration hold by c. -func (c *Config) Validate() error { - if c.Dir == "" { - return errors.New("Data.Dir must be specified") - } else if c.WALDir == "" { - return errors.New("Data.WALDir must be specified") - } - - if c.MaxConcurrentCompactions < 0 { - return errors.New("max-concurrent-compactions must be non-negative") - } - - if c.SeriesIDSetCacheSize < 0 { - return errors.New("series-id-set-cache-size must be non-negative") - } - - if c.SeriesFileMaxConcurrentSnapshotCompactions < 0 { - return errors.New("series-file-max-concurrent-compactions must be non-negative") - } - - valid := false - for _, e := range RegisteredEngines() { - if e == c.Engine { - valid = true - break - } - } - if !valid { - return fmt.Errorf("unrecognized engine %s", c.Engine) - } - - valid = false - for _, e := range RegisteredIndexes() { - if e == c.Index { - valid = true - break - } - } - if !valid { - return fmt.Errorf("unrecognized index %s", c.Index) - } - - return nil -} diff --git a/tsdb/config_test.go b/tsdb/config_test.go deleted file mode 100644 index 1dc1e2ab4de..00000000000 --- a/tsdb/config_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package tsdb_test - -import ( - "testing" - "time" - - "github.com/BurntSushi/toml" - "github.com/influxdata/influxdb/v2/tsdb" -) - -func TestConfig_Parse(t *testing.T) { - // Parse configuration. - c := tsdb.NewConfig() - if _, err := toml.Decode(` -dir = "/var/lib/influxdb/data" -wal-dir = "/var/lib/influxdb/wal" -wal-fsync-delay = "10s" -tsm-use-madv-willneed = true -`, &c); err != nil { - t.Fatal(err) - } - - if err := c.Validate(); err != nil { - t.Errorf("unexpected validate error: %s", err) - } - - if got, exp := c.Dir, "/var/lib/influxdb/data"; got != exp { - t.Errorf("unexpected dir:\n\nexp=%v\n\ngot=%v\n\n", exp, got) - } - if got, exp := c.WALDir, "/var/lib/influxdb/wal"; got != exp { - t.Errorf("unexpected wal-dir:\n\nexp=%v\n\ngot=%v\n\n", exp, got) - } - if got, exp := c.WALFsyncDelay, time.Duration(10*time.Second); time.Duration(got).Nanoseconds() != exp.Nanoseconds() { - t.Errorf("unexpected wal-fsync-delay:\n\nexp=%v\n\ngot=%v\n\n", exp, got) - } - if got, exp := c.TSMWillNeed, true; got != exp { - t.Errorf("unexpected tsm-madv-willneed:\n\nexp=%v\n\ngot=%v\n\n", exp, got) - } -} - -func TestConfig_Validate_Error(t *testing.T) { - c := tsdb.NewConfig() - if err := c.Validate(); err == nil || err.Error() != "Data.Dir must be specified" { - t.Errorf("unexpected error: %s", err) - } - - c.Dir = "/var/lib/influxdb/data" - if err := c.Validate(); err == nil || err.Error() != "Data.WALDir must be specified" { - t.Errorf("unexpected error: %s", err) - } - - c.WALDir = "/var/lib/influxdb/wal" - c.Engine = "fake1" - if err := c.Validate(); err == nil || err.Error() != "unrecognized engine fake1" { - t.Errorf("unexpected error: %s", err) - } - - c.Engine = "tsm1" - c.Index = "foo" - if err := c.Validate(); err == nil || err.Error() != "unrecognized index foo" { - t.Errorf("unexpected error: %s", err) - } - - c.Index = tsdb.TSI1IndexName - if err := c.Validate(); err != nil { - t.Error(err) - } - - c.SeriesIDSetCacheSize = -1 - if err := c.Validate(); err == nil || err.Error() != "series-id-set-cache-size must be non-negative" { - t.Errorf("unexpected error: %s", err) - } -} - -func TestConfig_ByteSizes(t *testing.T) { - // Parse configuration. - c := tsdb.NewConfig() - if _, err := toml.Decode(` -dir = "/var/lib/influxdb/data" -wal-dir = "/var/lib/influxdb/wal" -wal-fsync-delay = "10s" -cache-max-memory-size = 5368709120 -cache-snapshot-memory-size = 104857600 -`, &c); err != nil { - t.Fatal(err) - } - - if err := c.Validate(); err != nil { - t.Errorf("unexpected validate error: %s", err) - } - - if got, exp := c.Dir, "/var/lib/influxdb/data"; got != exp { - t.Errorf("unexpected dir:\n\nexp=%v\n\ngot=%v\n\n", exp, got) - } - if got, exp := c.WALDir, "/var/lib/influxdb/wal"; got != exp { - t.Errorf("unexpected wal-dir:\n\nexp=%v\n\ngot=%v\n\n", exp, got) - } - if got, exp := c.WALFsyncDelay, time.Duration(10*time.Second); time.Duration(got).Nanoseconds() != exp.Nanoseconds() { - t.Errorf("unexpected wal-fsync-delay:\n\nexp=%v\n\ngot=%v\n\n", exp, got) - } - if got, exp := c.CacheMaxMemorySize, uint64(5<<30); uint64(got) != exp { - t.Errorf("unexpected cache-max-memory-size:\n\nexp=%v\n\ngot=%v\n\n", exp, got) - } - if got, exp := c.CacheSnapshotMemorySize, uint64(100<<20); uint64(got) != exp { - t.Errorf("unexpected cache-snapshot-memory-size:\n\nexp=%v\n\ngot=%v\n\n", exp, got) - } -} - -func TestConfig_HumanReadableSizes(t *testing.T) { - // Parse configuration. - c := tsdb.NewConfig() - if _, err := toml.Decode(` -dir = "/var/lib/influxdb/data" -wal-dir = "/var/lib/influxdb/wal" -wal-fsync-delay = "10s" -cache-max-memory-size = "5gib" -cache-snapshot-memory-size = "100mib" -`, &c); err != nil { - t.Fatal(err) - } - - if err := c.Validate(); err != nil { - t.Errorf("unexpected validate error: %s", err) - } - - if got, exp := c.Dir, "/var/lib/influxdb/data"; got != exp { - t.Errorf("unexpected dir:\n\nexp=%v\n\ngot=%v\n\n", exp, got) - } - if got, exp := c.WALDir, "/var/lib/influxdb/wal"; got != exp { - t.Errorf("unexpected wal-dir:\n\nexp=%v\n\ngot=%v\n\n", exp, got) - } - if got, exp := c.WALFsyncDelay, time.Duration(10*time.Second); time.Duration(got).Nanoseconds() != exp.Nanoseconds() { - t.Errorf("unexpected wal-fsync-delay:\n\nexp=%v\n\ngot=%v\n\n", exp, got) - } - if got, exp := c.CacheMaxMemorySize, uint64(5<<30); uint64(got) != exp { - t.Errorf("unexpected cache-max-memory-size:\n\nexp=%v\n\ngot=%v\n\n", exp, got) - } - if got, exp := c.CacheSnapshotMemorySize, uint64(100<<20); uint64(got) != exp { - t.Errorf("unexpected cache-snapshot-memory-size:\n\nexp=%v\n\ngot=%v\n\n", exp, got) - } -} diff --git a/tsdb/cursor.go b/tsdb/cursor.go deleted file mode 100644 index 71e034629d0..00000000000 --- a/tsdb/cursor.go +++ /dev/null @@ -1,51 +0,0 @@ -package tsdb - -import ( - "context" - - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -type ( - IntegerArray = cursors.IntegerArray - FloatArray = cursors.FloatArray - UnsignedArray = cursors.UnsignedArray - StringArray = cursors.StringArray - BooleanArray = cursors.BooleanArray - - IntegerArrayCursor = cursors.IntegerArrayCursor - FloatArrayCursor = cursors.FloatArrayCursor - UnsignedArrayCursor = cursors.UnsignedArrayCursor - StringArrayCursor = cursors.StringArrayCursor - BooleanArrayCursor = cursors.BooleanArrayCursor - - Cursor = cursors.Cursor - CursorStats = cursors.CursorStats - CursorRequest = cursors.CursorRequest - CursorIterator = cursors.CursorIterator - CursorIterators = cursors.CursorIterators -) - -func NewIntegerArrayLen(sz int) *IntegerArray { return cursors.NewIntegerArrayLen(sz) } -func NewFloatArrayLen(sz int) *FloatArray { return cursors.NewFloatArrayLen(sz) } -func NewUnsignedArrayLen(sz int) *UnsignedArray { return cursors.NewUnsignedArrayLen(sz) } -func NewStringArrayLen(sz int) *StringArray { return cursors.NewStringArrayLen(sz) } -func NewBooleanArrayLen(sz int) *BooleanArray { return cursors.NewBooleanArrayLen(sz) } - -// EOF represents a "not found" key returned by a Cursor. -const EOF = query.ZeroTime - -func CreateCursorIterators(ctx context.Context, shards []*Shard) (CursorIterators, error) { - q := make(CursorIterators, 0, len(shards)) - for _, s := range shards { - // possible errors are ErrEngineClosed or ErrShardDisabled, so we can safely skip those shards - if cq, err := s.CreateCursorIterator(ctx); cq != nil && err == nil { - q = append(q, cq) - } - } - if len(q) == 0 { - return nil, nil - } - return q, nil -} diff --git a/tsdb/cursors/arrayvalues.gen.go b/tsdb/cursors/arrayvalues.gen.go deleted file mode 100644 index 9eaffa8e40d..00000000000 --- a/tsdb/cursors/arrayvalues.gen.go +++ /dev/null @@ -1,1107 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: arrayvalues.gen.go.tmpl - -package cursors - -type FloatArray struct { - Timestamps []int64 - Values []float64 -} - -func NewFloatArrayLen(sz int) *FloatArray { - return &FloatArray{ - Timestamps: make([]int64, sz), - Values: make([]float64, sz), - } -} - -func (a *FloatArray) MinTime() int64 { - return a.Timestamps[0] -} - -func (a *FloatArray) MaxTime() int64 { - return a.Timestamps[len(a.Timestamps)-1] -} - -func (a *FloatArray) Len() int { - return len(a.Timestamps) -} - -// search performs a binary search for UnixNano() v in a -// and returns the position, i, where v would be inserted. -// An additional check of a.Timestamps[i] == v is necessary -// to determine if the value v exists. -func (a *FloatArray) search(v int64) int { - // Define: f(x) → a.Timestamps[x] < v - // Define: f(-1) == true, f(n) == false - // Invariant: f(lo-1) == true, f(hi) == false - lo := 0 - hi := a.Len() - for lo < hi { - mid := int(uint(lo+hi) >> 1) - if a.Timestamps[mid] < v { - lo = mid + 1 // preserves f(lo-1) == true - } else { - hi = mid // preserves f(hi) == false - } - } - - // lo == hi - return lo -} - -// FindRange returns the positions where min and max would be -// inserted into the array. If a[0].UnixNano() > max or -// a[len-1].UnixNano() < min then FindRange returns (-1, -1) -// indicating the array is outside the [min, max]. The values must -// be deduplicated and sorted before calling FindRange or the results -// are undefined. -func (a *FloatArray) FindRange(min, max int64) (int, int) { - if a.Len() == 0 || min > max { - return -1, -1 - } - - minVal := a.MinTime() - maxVal := a.MaxTime() - - if maxVal < min || minVal > max { - return -1, -1 - } - - return a.search(min), a.search(max) -} - -// Exclude removes the subset of values in [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a *FloatArray) Exclude(min, max int64) { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return - } - - // a.Timestamps[rmin] ≥ min - // a.Timestamps[rmax] ≥ max - - if rmax < a.Len() { - if a.Timestamps[rmax] == max { - rmax++ - } - rest := a.Len() - rmax - if rest > 0 { - ts := a.Timestamps[:rmin+rest] - copy(ts[rmin:], a.Timestamps[rmax:]) - a.Timestamps = ts - - vs := a.Values[:rmin+rest] - copy(vs[rmin:], a.Values[rmax:]) - a.Values = vs - return - } - } - - a.Timestamps = a.Timestamps[:rmin] - a.Values = a.Values[:rmin] -} - -// Include returns the subset values between min and max inclusive. The values must -// be deduplicated and sorted before calling Include or the results are undefined. -func (a *FloatArray) Include(min, max int64) { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - a.Timestamps = a.Timestamps[:0] - a.Values = a.Values[:0] - return - } - - // a.Timestamps[rmin] ≥ min - // a.Timestamps[rmax] ≥ max - - if rmax < a.Len() && a.Timestamps[rmax] == max { - rmax++ - } - - if rmin > -1 { - ts := a.Timestamps[:rmax-rmin] - copy(ts, a.Timestamps[rmin:rmax]) - a.Timestamps = ts - vs := a.Values[:rmax-rmin] - copy(vs, a.Values[rmin:rmax]) - a.Values = vs - } else { - a.Timestamps = a.Timestamps[:rmax] - a.Values = a.Values[:rmax] - } -} - -// Merge overlays b to top of a. If two values conflict with -// the same timestamp, b is used. Both a and b must be sorted -// in ascending order. -func (a *FloatArray) Merge(b *FloatArray) { - if a.Len() == 0 { - *a = *b - return - } - - if b.Len() == 0 { - return - } - - // Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's - // possible stored blocks might contain duplicate values. Remove them if they exists before - // merging. - // a = a.Deduplicate() - // b = b.Deduplicate() - - if a.MaxTime() < b.MinTime() { - a.Timestamps = append(a.Timestamps, b.Timestamps...) - a.Values = append(a.Values, b.Values...) - return - } - - if b.MaxTime() < a.MinTime() { - var tmp FloatArray - tmp.Timestamps = append(b.Timestamps, a.Timestamps...) - tmp.Values = append(b.Values, a.Values...) - *a = tmp - return - } - - out := NewFloatArrayLen(a.Len() + b.Len()) - i, j, k := 0, 0, 0 - for i < len(a.Timestamps) && j < len(b.Timestamps) { - if a.Timestamps[i] < b.Timestamps[j] { - out.Timestamps[k] = a.Timestamps[i] - out.Values[k] = a.Values[i] - i++ - } else if a.Timestamps[i] == b.Timestamps[j] { - out.Timestamps[k] = b.Timestamps[j] - out.Values[k] = b.Values[j] - i++ - j++ - } else { - out.Timestamps[k] = b.Timestamps[j] - out.Values[k] = b.Values[j] - j++ - } - k++ - } - - if i < len(a.Timestamps) { - n := copy(out.Timestamps[k:], a.Timestamps[i:]) - copy(out.Values[k:], a.Values[i:]) - k += n - } else if j < len(b.Timestamps) { - n := copy(out.Timestamps[k:], b.Timestamps[j:]) - copy(out.Values[k:], b.Values[j:]) - k += n - } - - a.Timestamps = out.Timestamps[:k] - a.Values = out.Values[:k] -} - -type IntegerArray struct { - Timestamps []int64 - Values []int64 -} - -func NewIntegerArrayLen(sz int) *IntegerArray { - return &IntegerArray{ - Timestamps: make([]int64, sz), - Values: make([]int64, sz), - } -} - -func (a *IntegerArray) MinTime() int64 { - return a.Timestamps[0] -} - -func (a *IntegerArray) MaxTime() int64 { - return a.Timestamps[len(a.Timestamps)-1] -} - -func (a *IntegerArray) Len() int { - return len(a.Timestamps) -} - -// search performs a binary search for UnixNano() v in a -// and returns the position, i, where v would be inserted. -// An additional check of a.Timestamps[i] == v is necessary -// to determine if the value v exists. -func (a *IntegerArray) search(v int64) int { - // Define: f(x) → a.Timestamps[x] < v - // Define: f(-1) == true, f(n) == false - // Invariant: f(lo-1) == true, f(hi) == false - lo := 0 - hi := a.Len() - for lo < hi { - mid := int(uint(lo+hi) >> 1) - if a.Timestamps[mid] < v { - lo = mid + 1 // preserves f(lo-1) == true - } else { - hi = mid // preserves f(hi) == false - } - } - - // lo == hi - return lo -} - -// FindRange returns the positions where min and max would be -// inserted into the array. If a[0].UnixNano() > max or -// a[len-1].UnixNano() < min then FindRange returns (-1, -1) -// indicating the array is outside the [min, max]. The values must -// be deduplicated and sorted before calling FindRange or the results -// are undefined. -func (a *IntegerArray) FindRange(min, max int64) (int, int) { - if a.Len() == 0 || min > max { - return -1, -1 - } - - minVal := a.MinTime() - maxVal := a.MaxTime() - - if maxVal < min || minVal > max { - return -1, -1 - } - - return a.search(min), a.search(max) -} - -// Exclude removes the subset of values in [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a *IntegerArray) Exclude(min, max int64) { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return - } - - // a.Timestamps[rmin] ≥ min - // a.Timestamps[rmax] ≥ max - - if rmax < a.Len() { - if a.Timestamps[rmax] == max { - rmax++ - } - rest := a.Len() - rmax - if rest > 0 { - ts := a.Timestamps[:rmin+rest] - copy(ts[rmin:], a.Timestamps[rmax:]) - a.Timestamps = ts - - vs := a.Values[:rmin+rest] - copy(vs[rmin:], a.Values[rmax:]) - a.Values = vs - return - } - } - - a.Timestamps = a.Timestamps[:rmin] - a.Values = a.Values[:rmin] -} - -// Include returns the subset values between min and max inclusive. The values must -// be deduplicated and sorted before calling Include or the results are undefined. -func (a *IntegerArray) Include(min, max int64) { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - a.Timestamps = a.Timestamps[:0] - a.Values = a.Values[:0] - return - } - - // a.Timestamps[rmin] ≥ min - // a.Timestamps[rmax] ≥ max - - if rmax < a.Len() && a.Timestamps[rmax] == max { - rmax++ - } - - if rmin > -1 { - ts := a.Timestamps[:rmax-rmin] - copy(ts, a.Timestamps[rmin:rmax]) - a.Timestamps = ts - vs := a.Values[:rmax-rmin] - copy(vs, a.Values[rmin:rmax]) - a.Values = vs - } else { - a.Timestamps = a.Timestamps[:rmax] - a.Values = a.Values[:rmax] - } -} - -// Merge overlays b to top of a. If two values conflict with -// the same timestamp, b is used. Both a and b must be sorted -// in ascending order. -func (a *IntegerArray) Merge(b *IntegerArray) { - if a.Len() == 0 { - *a = *b - return - } - - if b.Len() == 0 { - return - } - - // Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's - // possible stored blocks might contain duplicate values. Remove them if they exists before - // merging. - // a = a.Deduplicate() - // b = b.Deduplicate() - - if a.MaxTime() < b.MinTime() { - a.Timestamps = append(a.Timestamps, b.Timestamps...) - a.Values = append(a.Values, b.Values...) - return - } - - if b.MaxTime() < a.MinTime() { - var tmp IntegerArray - tmp.Timestamps = append(b.Timestamps, a.Timestamps...) - tmp.Values = append(b.Values, a.Values...) - *a = tmp - return - } - - out := NewIntegerArrayLen(a.Len() + b.Len()) - i, j, k := 0, 0, 0 - for i < len(a.Timestamps) && j < len(b.Timestamps) { - if a.Timestamps[i] < b.Timestamps[j] { - out.Timestamps[k] = a.Timestamps[i] - out.Values[k] = a.Values[i] - i++ - } else if a.Timestamps[i] == b.Timestamps[j] { - out.Timestamps[k] = b.Timestamps[j] - out.Values[k] = b.Values[j] - i++ - j++ - } else { - out.Timestamps[k] = b.Timestamps[j] - out.Values[k] = b.Values[j] - j++ - } - k++ - } - - if i < len(a.Timestamps) { - n := copy(out.Timestamps[k:], a.Timestamps[i:]) - copy(out.Values[k:], a.Values[i:]) - k += n - } else if j < len(b.Timestamps) { - n := copy(out.Timestamps[k:], b.Timestamps[j:]) - copy(out.Values[k:], b.Values[j:]) - k += n - } - - a.Timestamps = out.Timestamps[:k] - a.Values = out.Values[:k] -} - -type UnsignedArray struct { - Timestamps []int64 - Values []uint64 -} - -func NewUnsignedArrayLen(sz int) *UnsignedArray { - return &UnsignedArray{ - Timestamps: make([]int64, sz), - Values: make([]uint64, sz), - } -} - -func (a *UnsignedArray) MinTime() int64 { - return a.Timestamps[0] -} - -func (a *UnsignedArray) MaxTime() int64 { - return a.Timestamps[len(a.Timestamps)-1] -} - -func (a *UnsignedArray) Len() int { - return len(a.Timestamps) -} - -// search performs a binary search for UnixNano() v in a -// and returns the position, i, where v would be inserted. -// An additional check of a.Timestamps[i] == v is necessary -// to determine if the value v exists. -func (a *UnsignedArray) search(v int64) int { - // Define: f(x) → a.Timestamps[x] < v - // Define: f(-1) == true, f(n) == false - // Invariant: f(lo-1) == true, f(hi) == false - lo := 0 - hi := a.Len() - for lo < hi { - mid := int(uint(lo+hi) >> 1) - if a.Timestamps[mid] < v { - lo = mid + 1 // preserves f(lo-1) == true - } else { - hi = mid // preserves f(hi) == false - } - } - - // lo == hi - return lo -} - -// FindRange returns the positions where min and max would be -// inserted into the array. If a[0].UnixNano() > max or -// a[len-1].UnixNano() < min then FindRange returns (-1, -1) -// indicating the array is outside the [min, max]. The values must -// be deduplicated and sorted before calling FindRange or the results -// are undefined. -func (a *UnsignedArray) FindRange(min, max int64) (int, int) { - if a.Len() == 0 || min > max { - return -1, -1 - } - - minVal := a.MinTime() - maxVal := a.MaxTime() - - if maxVal < min || minVal > max { - return -1, -1 - } - - return a.search(min), a.search(max) -} - -// Exclude removes the subset of values in [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a *UnsignedArray) Exclude(min, max int64) { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return - } - - // a.Timestamps[rmin] ≥ min - // a.Timestamps[rmax] ≥ max - - if rmax < a.Len() { - if a.Timestamps[rmax] == max { - rmax++ - } - rest := a.Len() - rmax - if rest > 0 { - ts := a.Timestamps[:rmin+rest] - copy(ts[rmin:], a.Timestamps[rmax:]) - a.Timestamps = ts - - vs := a.Values[:rmin+rest] - copy(vs[rmin:], a.Values[rmax:]) - a.Values = vs - return - } - } - - a.Timestamps = a.Timestamps[:rmin] - a.Values = a.Values[:rmin] -} - -// Include returns the subset values between min and max inclusive. The values must -// be deduplicated and sorted before calling Include or the results are undefined. -func (a *UnsignedArray) Include(min, max int64) { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - a.Timestamps = a.Timestamps[:0] - a.Values = a.Values[:0] - return - } - - // a.Timestamps[rmin] ≥ min - // a.Timestamps[rmax] ≥ max - - if rmax < a.Len() && a.Timestamps[rmax] == max { - rmax++ - } - - if rmin > -1 { - ts := a.Timestamps[:rmax-rmin] - copy(ts, a.Timestamps[rmin:rmax]) - a.Timestamps = ts - vs := a.Values[:rmax-rmin] - copy(vs, a.Values[rmin:rmax]) - a.Values = vs - } else { - a.Timestamps = a.Timestamps[:rmax] - a.Values = a.Values[:rmax] - } -} - -// Merge overlays b to top of a. If two values conflict with -// the same timestamp, b is used. Both a and b must be sorted -// in ascending order. -func (a *UnsignedArray) Merge(b *UnsignedArray) { - if a.Len() == 0 { - *a = *b - return - } - - if b.Len() == 0 { - return - } - - // Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's - // possible stored blocks might contain duplicate values. Remove them if they exists before - // merging. - // a = a.Deduplicate() - // b = b.Deduplicate() - - if a.MaxTime() < b.MinTime() { - a.Timestamps = append(a.Timestamps, b.Timestamps...) - a.Values = append(a.Values, b.Values...) - return - } - - if b.MaxTime() < a.MinTime() { - var tmp UnsignedArray - tmp.Timestamps = append(b.Timestamps, a.Timestamps...) - tmp.Values = append(b.Values, a.Values...) - *a = tmp - return - } - - out := NewUnsignedArrayLen(a.Len() + b.Len()) - i, j, k := 0, 0, 0 - for i < len(a.Timestamps) && j < len(b.Timestamps) { - if a.Timestamps[i] < b.Timestamps[j] { - out.Timestamps[k] = a.Timestamps[i] - out.Values[k] = a.Values[i] - i++ - } else if a.Timestamps[i] == b.Timestamps[j] { - out.Timestamps[k] = b.Timestamps[j] - out.Values[k] = b.Values[j] - i++ - j++ - } else { - out.Timestamps[k] = b.Timestamps[j] - out.Values[k] = b.Values[j] - j++ - } - k++ - } - - if i < len(a.Timestamps) { - n := copy(out.Timestamps[k:], a.Timestamps[i:]) - copy(out.Values[k:], a.Values[i:]) - k += n - } else if j < len(b.Timestamps) { - n := copy(out.Timestamps[k:], b.Timestamps[j:]) - copy(out.Values[k:], b.Values[j:]) - k += n - } - - a.Timestamps = out.Timestamps[:k] - a.Values = out.Values[:k] -} - -type StringArray struct { - Timestamps []int64 - Values []string -} - -func NewStringArrayLen(sz int) *StringArray { - return &StringArray{ - Timestamps: make([]int64, sz), - Values: make([]string, sz), - } -} - -func (a *StringArray) MinTime() int64 { - return a.Timestamps[0] -} - -func (a *StringArray) MaxTime() int64 { - return a.Timestamps[len(a.Timestamps)-1] -} - -func (a *StringArray) Len() int { - return len(a.Timestamps) -} - -// search performs a binary search for UnixNano() v in a -// and returns the position, i, where v would be inserted. -// An additional check of a.Timestamps[i] == v is necessary -// to determine if the value v exists. -func (a *StringArray) search(v int64) int { - // Define: f(x) → a.Timestamps[x] < v - // Define: f(-1) == true, f(n) == false - // Invariant: f(lo-1) == true, f(hi) == false - lo := 0 - hi := a.Len() - for lo < hi { - mid := int(uint(lo+hi) >> 1) - if a.Timestamps[mid] < v { - lo = mid + 1 // preserves f(lo-1) == true - } else { - hi = mid // preserves f(hi) == false - } - } - - // lo == hi - return lo -} - -// FindRange returns the positions where min and max would be -// inserted into the array. If a[0].UnixNano() > max or -// a[len-1].UnixNano() < min then FindRange returns (-1, -1) -// indicating the array is outside the [min, max]. The values must -// be deduplicated and sorted before calling FindRange or the results -// are undefined. -func (a *StringArray) FindRange(min, max int64) (int, int) { - if a.Len() == 0 || min > max { - return -1, -1 - } - - minVal := a.MinTime() - maxVal := a.MaxTime() - - if maxVal < min || minVal > max { - return -1, -1 - } - - return a.search(min), a.search(max) -} - -// Exclude removes the subset of values in [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a *StringArray) Exclude(min, max int64) { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return - } - - // a.Timestamps[rmin] ≥ min - // a.Timestamps[rmax] ≥ max - - if rmax < a.Len() { - if a.Timestamps[rmax] == max { - rmax++ - } - rest := a.Len() - rmax - if rest > 0 { - ts := a.Timestamps[:rmin+rest] - copy(ts[rmin:], a.Timestamps[rmax:]) - a.Timestamps = ts - - vs := a.Values[:rmin+rest] - copy(vs[rmin:], a.Values[rmax:]) - a.Values = vs - return - } - } - - a.Timestamps = a.Timestamps[:rmin] - a.Values = a.Values[:rmin] -} - -// Include returns the subset values between min and max inclusive. The values must -// be deduplicated and sorted before calling Include or the results are undefined. -func (a *StringArray) Include(min, max int64) { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - a.Timestamps = a.Timestamps[:0] - a.Values = a.Values[:0] - return - } - - // a.Timestamps[rmin] ≥ min - // a.Timestamps[rmax] ≥ max - - if rmax < a.Len() && a.Timestamps[rmax] == max { - rmax++ - } - - if rmin > -1 { - ts := a.Timestamps[:rmax-rmin] - copy(ts, a.Timestamps[rmin:rmax]) - a.Timestamps = ts - vs := a.Values[:rmax-rmin] - copy(vs, a.Values[rmin:rmax]) - a.Values = vs - } else { - a.Timestamps = a.Timestamps[:rmax] - a.Values = a.Values[:rmax] - } -} - -// Merge overlays b to top of a. If two values conflict with -// the same timestamp, b is used. Both a and b must be sorted -// in ascending order. -func (a *StringArray) Merge(b *StringArray) { - if a.Len() == 0 { - *a = *b - return - } - - if b.Len() == 0 { - return - } - - // Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's - // possible stored blocks might contain duplicate values. Remove them if they exists before - // merging. - // a = a.Deduplicate() - // b = b.Deduplicate() - - if a.MaxTime() < b.MinTime() { - a.Timestamps = append(a.Timestamps, b.Timestamps...) - a.Values = append(a.Values, b.Values...) - return - } - - if b.MaxTime() < a.MinTime() { - var tmp StringArray - tmp.Timestamps = append(b.Timestamps, a.Timestamps...) - tmp.Values = append(b.Values, a.Values...) - *a = tmp - return - } - - out := NewStringArrayLen(a.Len() + b.Len()) - i, j, k := 0, 0, 0 - for i < len(a.Timestamps) && j < len(b.Timestamps) { - if a.Timestamps[i] < b.Timestamps[j] { - out.Timestamps[k] = a.Timestamps[i] - out.Values[k] = a.Values[i] - i++ - } else if a.Timestamps[i] == b.Timestamps[j] { - out.Timestamps[k] = b.Timestamps[j] - out.Values[k] = b.Values[j] - i++ - j++ - } else { - out.Timestamps[k] = b.Timestamps[j] - out.Values[k] = b.Values[j] - j++ - } - k++ - } - - if i < len(a.Timestamps) { - n := copy(out.Timestamps[k:], a.Timestamps[i:]) - copy(out.Values[k:], a.Values[i:]) - k += n - } else if j < len(b.Timestamps) { - n := copy(out.Timestamps[k:], b.Timestamps[j:]) - copy(out.Values[k:], b.Values[j:]) - k += n - } - - a.Timestamps = out.Timestamps[:k] - a.Values = out.Values[:k] -} - -type BooleanArray struct { - Timestamps []int64 - Values []bool -} - -func NewBooleanArrayLen(sz int) *BooleanArray { - return &BooleanArray{ - Timestamps: make([]int64, sz), - Values: make([]bool, sz), - } -} - -func (a *BooleanArray) MinTime() int64 { - return a.Timestamps[0] -} - -func (a *BooleanArray) MaxTime() int64 { - return a.Timestamps[len(a.Timestamps)-1] -} - -func (a *BooleanArray) Len() int { - return len(a.Timestamps) -} - -// search performs a binary search for UnixNano() v in a -// and returns the position, i, where v would be inserted. -// An additional check of a.Timestamps[i] == v is necessary -// to determine if the value v exists. -func (a *BooleanArray) search(v int64) int { - // Define: f(x) → a.Timestamps[x] < v - // Define: f(-1) == true, f(n) == false - // Invariant: f(lo-1) == true, f(hi) == false - lo := 0 - hi := a.Len() - for lo < hi { - mid := int(uint(lo+hi) >> 1) - if a.Timestamps[mid] < v { - lo = mid + 1 // preserves f(lo-1) == true - } else { - hi = mid // preserves f(hi) == false - } - } - - // lo == hi - return lo -} - -// FindRange returns the positions where min and max would be -// inserted into the array. If a[0].UnixNano() > max or -// a[len-1].UnixNano() < min then FindRange returns (-1, -1) -// indicating the array is outside the [min, max]. The values must -// be deduplicated and sorted before calling FindRange or the results -// are undefined. -func (a *BooleanArray) FindRange(min, max int64) (int, int) { - if a.Len() == 0 || min > max { - return -1, -1 - } - - minVal := a.MinTime() - maxVal := a.MaxTime() - - if maxVal < min || minVal > max { - return -1, -1 - } - - return a.search(min), a.search(max) -} - -// Exclude removes the subset of values in [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a *BooleanArray) Exclude(min, max int64) { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return - } - - // a.Timestamps[rmin] ≥ min - // a.Timestamps[rmax] ≥ max - - if rmax < a.Len() { - if a.Timestamps[rmax] == max { - rmax++ - } - rest := a.Len() - rmax - if rest > 0 { - ts := a.Timestamps[:rmin+rest] - copy(ts[rmin:], a.Timestamps[rmax:]) - a.Timestamps = ts - - vs := a.Values[:rmin+rest] - copy(vs[rmin:], a.Values[rmax:]) - a.Values = vs - return - } - } - - a.Timestamps = a.Timestamps[:rmin] - a.Values = a.Values[:rmin] -} - -// Include returns the subset values between min and max inclusive. The values must -// be deduplicated and sorted before calling Include or the results are undefined. -func (a *BooleanArray) Include(min, max int64) { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - a.Timestamps = a.Timestamps[:0] - a.Values = a.Values[:0] - return - } - - // a.Timestamps[rmin] ≥ min - // a.Timestamps[rmax] ≥ max - - if rmax < a.Len() && a.Timestamps[rmax] == max { - rmax++ - } - - if rmin > -1 { - ts := a.Timestamps[:rmax-rmin] - copy(ts, a.Timestamps[rmin:rmax]) - a.Timestamps = ts - vs := a.Values[:rmax-rmin] - copy(vs, a.Values[rmin:rmax]) - a.Values = vs - } else { - a.Timestamps = a.Timestamps[:rmax] - a.Values = a.Values[:rmax] - } -} - -// Merge overlays b to top of a. If two values conflict with -// the same timestamp, b is used. Both a and b must be sorted -// in ascending order. -func (a *BooleanArray) Merge(b *BooleanArray) { - if a.Len() == 0 { - *a = *b - return - } - - if b.Len() == 0 { - return - } - - // Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's - // possible stored blocks might contain duplicate values. Remove them if they exists before - // merging. - // a = a.Deduplicate() - // b = b.Deduplicate() - - if a.MaxTime() < b.MinTime() { - a.Timestamps = append(a.Timestamps, b.Timestamps...) - a.Values = append(a.Values, b.Values...) - return - } - - if b.MaxTime() < a.MinTime() { - var tmp BooleanArray - tmp.Timestamps = append(b.Timestamps, a.Timestamps...) - tmp.Values = append(b.Values, a.Values...) - *a = tmp - return - } - - out := NewBooleanArrayLen(a.Len() + b.Len()) - i, j, k := 0, 0, 0 - for i < len(a.Timestamps) && j < len(b.Timestamps) { - if a.Timestamps[i] < b.Timestamps[j] { - out.Timestamps[k] = a.Timestamps[i] - out.Values[k] = a.Values[i] - i++ - } else if a.Timestamps[i] == b.Timestamps[j] { - out.Timestamps[k] = b.Timestamps[j] - out.Values[k] = b.Values[j] - i++ - j++ - } else { - out.Timestamps[k] = b.Timestamps[j] - out.Values[k] = b.Values[j] - j++ - } - k++ - } - - if i < len(a.Timestamps) { - n := copy(out.Timestamps[k:], a.Timestamps[i:]) - copy(out.Values[k:], a.Values[i:]) - k += n - } else if j < len(b.Timestamps) { - n := copy(out.Timestamps[k:], b.Timestamps[j:]) - copy(out.Values[k:], b.Values[j:]) - k += n - } - - a.Timestamps = out.Timestamps[:k] - a.Values = out.Values[:k] -} - -type TimestampArray struct { - Timestamps []int64 -} - -func NewTimestampArrayLen(sz int) *TimestampArray { - return &TimestampArray{ - Timestamps: make([]int64, sz), - } -} - -func (a *TimestampArray) MinTime() int64 { - return a.Timestamps[0] -} - -func (a *TimestampArray) MaxTime() int64 { - return a.Timestamps[len(a.Timestamps)-1] -} - -func (a *TimestampArray) Len() int { - return len(a.Timestamps) -} - -// search performs a binary search for UnixNano() v in a -// and returns the position, i, where v would be inserted. -// An additional check of a.Timestamps[i] == v is necessary -// to determine if the value v exists. -func (a *TimestampArray) search(v int64) int { - // Define: f(x) → a.Timestamps[x] < v - // Define: f(-1) == true, f(n) == false - // Invariant: f(lo-1) == true, f(hi) == false - lo := 0 - hi := a.Len() - for lo < hi { - mid := int(uint(lo+hi) >> 1) - if a.Timestamps[mid] < v { - lo = mid + 1 // preserves f(lo-1) == true - } else { - hi = mid // preserves f(hi) == false - } - } - - // lo == hi - return lo -} - -// FindRange returns the positions where min and max would be -// inserted into the array. If a[0].UnixNano() > max or -// a[len-1].UnixNano() < min then FindRange returns (-1, -1) -// indicating the array is outside the [min, max]. The values must -// be deduplicated and sorted before calling FindRange or the results -// are undefined. -func (a *TimestampArray) FindRange(min, max int64) (int, int) { - if a.Len() == 0 || min > max { - return -1, -1 - } - - minVal := a.MinTime() - maxVal := a.MaxTime() - - if maxVal < min || minVal > max { - return -1, -1 - } - - return a.search(min), a.search(max) -} - -// Exclude removes the subset of timestamps in [min, max]. The timestamps must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a *TimestampArray) Exclude(min, max int64) { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return - } - - // a.Timestamps[rmin] ≥ min - // a.Timestamps[rmax] ≥ max - - if rmax < a.Len() { - if a.Timestamps[rmax] == max { - rmax++ - } - rest := a.Len() - rmax - if rest > 0 { - ts := a.Timestamps[:rmin+rest] - copy(ts[rmin:], a.Timestamps[rmax:]) - a.Timestamps = ts - return - } - } - - a.Timestamps = a.Timestamps[:rmin] -} - -// Contains returns true if values exist between min and max inclusive. The -// values must be sorted before calling Contains or the results are undefined. -func (a *TimestampArray) Contains(min, max int64) bool { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return false - } - - // a.Timestamps[rmin] ≥ min - // a.Timestamps[rmax] ≥ max - - if a.Timestamps[rmin] == min { - return true - } - - if rmax < a.Len() && a.Timestamps[rmax] == max { - return true - } - - return rmax-rmin > 0 -} diff --git a/tsdb/cursors/arrayvalues.gen.go.tmpl b/tsdb/cursors/arrayvalues.gen.go.tmpl deleted file mode 100644 index 3e7632aeeb9..00000000000 --- a/tsdb/cursors/arrayvalues.gen.go.tmpl +++ /dev/null @@ -1,259 +0,0 @@ -package cursors - -{{range .}} -{{- $typename := print .Name "Array" }} -{{- $hasType := or (and .Type true) false }} - -type {{ $typename }} struct { - Timestamps []int64 -{{- if $hasType }} - Values []{{.Type}} -{{- end }} -} - -func New{{$typename}}Len(sz int) *{{$typename}} { - return &{{$typename}}{ - Timestamps: make([]int64, sz), -{{- if $hasType }} - Values: make([]{{.Type}}, sz), -{{- end }} - } -} - -func (a *{{ $typename }}) MinTime() int64 { - return a.Timestamps[0] -} - -func (a *{{ $typename }}) MaxTime() int64 { - return a.Timestamps[len(a.Timestamps)-1] -} - -func (a *{{ $typename}}) Len() int { - return len(a.Timestamps) -} - -// search performs a binary search for UnixNano() v in a -// and returns the position, i, where v would be inserted. -// An additional check of a.Timestamps[i] == v is necessary -// to determine if the value v exists. -func (a *{{ $typename }}) search(v int64) int { - // Define: f(x) → a.Timestamps[x] < v - // Define: f(-1) == true, f(n) == false - // Invariant: f(lo-1) == true, f(hi) == false - lo := 0 - hi := a.Len() - for lo < hi { - mid := int(uint(lo+hi) >> 1) - if a.Timestamps[mid] < v { - lo = mid + 1 // preserves f(lo-1) == true - } else { - hi = mid // preserves f(hi) == false - } - } - - // lo == hi - return lo -} - -// FindRange returns the positions where min and max would be -// inserted into the array. If a[0].UnixNano() > max or -// a[len-1].UnixNano() < min then FindRange returns (-1, -1) -// indicating the array is outside the [min, max]. The values must -// be deduplicated and sorted before calling FindRange or the results -// are undefined. -func (a *{{ $typename }}) FindRange(min, max int64) (int, int) { - if a.Len() == 0 || min > max { - return -1, -1 - } - - minVal := a.MinTime() - maxVal := a.MaxTime() - - if maxVal < min || minVal > max { - return -1, -1 - } - - return a.search(min), a.search(max) -} - -{{- if $hasType }} -// Exclude removes the subset of values in [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a *{{ $typename }}) Exclude(min, max int64) { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return - } - - // a.Timestamps[rmin] ≥ min - // a.Timestamps[rmax] ≥ max - - if rmax < a.Len() { - if a.Timestamps[rmax] == max { - rmax++ - } - rest := a.Len()-rmax - if rest > 0 { - ts := a.Timestamps[:rmin+rest] - copy(ts[rmin:], a.Timestamps[rmax:]) - a.Timestamps = ts - - vs := a.Values[:rmin+rest] - copy(vs[rmin:], a.Values[rmax:]) - a.Values = vs - return - } - } - - a.Timestamps = a.Timestamps[:rmin] - a.Values = a.Values[:rmin] -} - -// Include returns the subset values between min and max inclusive. The values must -// be deduplicated and sorted before calling Include or the results are undefined. -func (a *{{ $typename }}) Include(min, max int64) { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - a.Timestamps = a.Timestamps[:0] - a.Values = a.Values[:0] - return - } - - // a.Timestamps[rmin] ≥ min - // a.Timestamps[rmax] ≥ max - - if rmax < a.Len() && a.Timestamps[rmax] == max { - rmax++ - } - - if rmin > -1 { - ts := a.Timestamps[:rmax-rmin] - copy(ts, a.Timestamps[rmin:rmax]) - a.Timestamps = ts - vs := a.Values[:rmax-rmin] - copy(vs, a.Values[rmin:rmax]) - a.Values = vs - } else { - a.Timestamps = a.Timestamps[:rmax] - a.Values = a.Values[:rmax] - } -} - -// Merge overlays b to top of a. If two values conflict with -// the same timestamp, b is used. Both a and b must be sorted -// in ascending order. -func (a *{{ $typename }}) Merge(b *{{ $typename }}) { - if a.Len() == 0 { - *a = *b - return - } - - if b.Len() == 0 { - return - } - - // Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's - // possible stored blocks might contain duplicate values. Remove them if they exists before - // merging. - // a = a.Deduplicate() - // b = b.Deduplicate() - - if a.MaxTime() < b.MinTime() { - a.Timestamps = append(a.Timestamps, b.Timestamps...) - a.Values = append(a.Values, b.Values...) - return - } - - if b.MaxTime() < a.MinTime() { - var tmp {{$typename}} - tmp.Timestamps = append(b.Timestamps, a.Timestamps...) - tmp.Values = append(b.Values, a.Values...) - *a = tmp - return - } - - out := New{{$typename}}Len(a.Len()+b.Len()) - i, j, k := 0, 0, 0 - for i < len(a.Timestamps) && j < len(b.Timestamps) { - if a.Timestamps[i] < b.Timestamps[j] { - out.Timestamps[k] = a.Timestamps[i] - out.Values[k] = a.Values[i] - i++ - } else if a.Timestamps[i] == b.Timestamps[j] { - out.Timestamps[k] = b.Timestamps[j] - out.Values[k] = b.Values[j] - i++ - j++ - } else { - out.Timestamps[k] = b.Timestamps[j] - out.Values[k] = b.Values[j] - j++ - } - k++ - } - - if i < len(a.Timestamps) { - n := copy(out.Timestamps[k:], a.Timestamps[i:]) - copy(out.Values[k:], a.Values[i:]) - k += n - } else if j < len(b.Timestamps) { - n := copy(out.Timestamps[k:], b.Timestamps[j:]) - copy(out.Values[k:], b.Values[j:]) - k += n - } - - a.Timestamps = out.Timestamps[:k] - a.Values = out.Values[:k] -} -{{ else }} -// Exclude removes the subset of timestamps in [min, max]. The timestamps must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a *{{ $typename }}) Exclude(min, max int64) { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return - } - - // a.Timestamps[rmin] ≥ min - // a.Timestamps[rmax] ≥ max - - if rmax < a.Len() { - if a.Timestamps[rmax] == max { - rmax++ - } - rest := a.Len()-rmax - if rest > 0 { - ts := a.Timestamps[:rmin+rest] - copy(ts[rmin:], a.Timestamps[rmax:]) - a.Timestamps = ts - return - } - } - - a.Timestamps = a.Timestamps[:rmin] -} - -// Contains returns true if values exist between min and max inclusive. The -// values must be sorted before calling Contains or the results are undefined. -func (a *{{ $typename }}) Contains(min, max int64) bool { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return false - } - - // a.Timestamps[rmin] ≥ min - // a.Timestamps[rmax] ≥ max - - if a.Timestamps[rmin] == min { - return true - } - - if rmax < a.Len() && a.Timestamps[rmax] == max { - return true - } - - return rmax-rmin > 0 -} -{{ end }} - -{{ end }} diff --git a/tsdb/cursors/arrayvalues.gen.go.tmpldata b/tsdb/cursors/arrayvalues.gen.go.tmpldata deleted file mode 100644 index 7ebe5b94c1b..00000000000 --- a/tsdb/cursors/arrayvalues.gen.go.tmpldata +++ /dev/null @@ -1,26 +0,0 @@ -[ - { - "Name":"Float", - "Type":"float64" - }, - { - "Name":"Integer", - "Type":"int64" - }, - { - "Name":"Unsigned", - "Type":"uint64" - }, - { - "Name":"String", - "Type":"string" - }, - { - "Name":"Boolean", - "Type":"bool" - }, - { - "Name":"Timestamp", - "Type": null - } -] diff --git a/tsdb/cursors/arrayvalues.gen_test.go b/tsdb/cursors/arrayvalues.gen_test.go deleted file mode 100644 index f9bf6483bee..00000000000 --- a/tsdb/cursors/arrayvalues.gen_test.go +++ /dev/null @@ -1,254 +0,0 @@ -package cursors - -import ( - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" -) - -func makeIntegerArray(count int, min, max int64) *IntegerArray { - vals := NewIntegerArrayLen(count) - - ts := min - inc := (max - min) / int64(count) - - for i := 0; i < count; i++ { - vals.Timestamps[i] = ts - ts += inc - } - - return vals -} - -func makeIntegerArrayFromSlice(t []int64) *IntegerArray { - iv := NewIntegerArrayLen(len(t)) - copy(iv.Timestamps, t) - return iv -} - -func TestIntegerArray_FindRangeNoValues(t *testing.T) { - var vals IntegerArray - l, r := vals.FindRange(0, 100) - if exp := -1; l != exp { - t.Errorf("invalid l; exp=%d, got=%d", exp, l) - } - if exp := -1; r != exp { - t.Errorf("invalid r; exp=%d, got=%d", exp, r) - } -} - -func TestIntegerArray_FindRange(t *testing.T) { - vals := makeIntegerArrayFromSlice([]int64{10, 11, 13, 15, 17, 20, 21}) - - cases := []struct { - min, max int64 - l, r int - }{ - {12, 20, 2, 5}, - {22, 40, -1, -1}, - {1, 9, -1, -1}, - {1, 10, 0, 0}, - {1, 11, 0, 1}, - {15, 15, 3, 3}, - } - - for _, tc := range cases { - t.Run(fmt.Sprintf("%d→%d", tc.min, tc.max), func(t *testing.T) { - l, r := vals.FindRange(tc.min, tc.max) - if l != tc.l { - t.Errorf("left: got %d, exp %d", l, tc.l) - } - if r != tc.r { - t.Errorf("right: got %d, exp %d", r, tc.r) - } - }) - } -} - -func TestIntegerArray_Exclude(t *testing.T) { - cases := []struct { - n string - min, max int64 - exp []int64 - }{ - {"excl bad range", 18, 11, []int64{10, 12, 14, 16, 18}}, - {"excl none-lo", 0, 9, []int64{10, 12, 14, 16, 18}}, - {"excl none-hi", 19, 30, []int64{10, 12, 14, 16, 18}}, - {"excl first", 0, 10, []int64{12, 14, 16, 18}}, - {"excl last", 18, 20, []int64{10, 12, 14, 16}}, - {"excl all but first and last", 12, 16, []int64{10, 18}}, - {"excl none in middle", 13, 13, []int64{10, 12, 14, 16, 18}}, - {"excl middle", 14, 14, []int64{10, 12, 16, 18}}, - {"excl suffix", 14, 18, []int64{10, 12}}, - } - - for _, tc := range cases { - t.Run(fmt.Sprintf("%s[%d,%d]", tc.n, tc.min, tc.max), func(t *testing.T) { - vals := makeIntegerArray(5, 10, 20) - vals.Exclude(tc.min, tc.max) - got := vals.Timestamps - if !cmp.Equal(got, tc.exp) { - t.Errorf("unexpected values -got/+exp\n%s", cmp.Diff(got, tc.exp)) - } - }) - } -} - -func TestIntegerArray_Include(t *testing.T) { - cases := []struct { - n string - min, max int64 - exp []int64 - }{ - {"incl none-lo", 0, 9, []int64{}}, - {"incl none-hi", 19, 30, []int64{}}, - {"incl first", 0, 10, []int64{10}}, - {"incl last", 18, 20, []int64{18}}, - {"incl all but first and last", 12, 16, []int64{12, 14, 16}}, - {"incl none in middle", 13, 13, []int64{}}, - {"incl middle", 14, 14, []int64{14}}, - } - - for _, tc := range cases { - t.Run(fmt.Sprintf("%s[%d,%d]", tc.n, tc.min, tc.max), func(t *testing.T) { - vals := makeIntegerArray(5, 10, 20) - vals.Include(tc.min, tc.max) - got := vals.Timestamps - if !cmp.Equal(got, tc.exp) { - t.Errorf("unexpected values -got/+exp\n%s", cmp.Diff(got, tc.exp)) - } - }) - } -} - -func makeTimestampArray(count int, min, max int64) *TimestampArray { - vals := NewTimestampArrayLen(count) - - ts := min - inc := (max - min) / int64(count) - - for i := 0; i < count; i++ { - vals.Timestamps[i] = ts - ts += inc - } - - return vals -} - -func TestTimestampArray_Contains(t *testing.T) { - cases := []struct { - n string - min, max int64 - exp bool - }{ - {"no/lo", 0, 9, false}, - {"no/hi", 19, 30, false}, - {"no/middle", 13, 13, false}, - - {"yes/first", 0, 10, true}, - {"yes/first-eq", 10, 10, true}, - {"yes/last", 18, 20, true}, - {"yes/last-eq", 18, 18, true}, - {"yes/all but first and last", 12, 16, true}, - {"yes/middle-eq", 14, 14, true}, - {"yes/middle-overlap", 13, 15, true}, - {"yes/covers", 8, 22, true}, - } - - for _, tc := range cases { - t.Run(fmt.Sprintf("%s[%d,%d]", tc.n, tc.min, tc.max), func(t *testing.T) { - vals := makeTimestampArray(5, 10, 20) - if got := vals.Contains(tc.min, tc.max); got != tc.exp { - t.Errorf("Contains -got/+exp\n%s", cmp.Diff(got, tc.exp)) - } - }) - } -} - -func benchExclude(b *testing.B, vals *IntegerArray, min, max int64) { - b.ResetTimer() - - for i := 0; i < b.N; i++ { - vals.Exclude(min, max) - } -} - -func BenchmarkIntegerArray_ExcludeNone_1000(b *testing.B) { - benchExclude(b, makeIntegerArray(1000, 1000, 2000), 0, 500) -} - -func BenchmarkIntegerArray_ExcludeMiddleHalf_1000(b *testing.B) { - benchExclude(b, makeIntegerArray(1000, 1000, 2000), 1250, 1750) -} - -func BenchmarkIntegerArray_ExcludeFirst_1000(b *testing.B) { - benchExclude(b, makeIntegerArray(1000, 1000, 2000), 0, 1000) -} - -func BenchmarkIntegerArray_ExcludeLast_1000(b *testing.B) { - benchExclude(b, makeIntegerArray(1000, 1000, 2000), 1999, 2000) -} - -func BenchmarkIntegerArray_ExcludeNone_10000(b *testing.B) { - benchExclude(b, makeIntegerArray(10000, 10000, 20000), 00, 5000) -} - -func BenchmarkIntegerArray_ExcludeMiddleHalf_10000(b *testing.B) { - benchExclude(b, makeIntegerArray(10000, 10000, 20000), 12500, 17500) -} - -func BenchmarkIntegerArray_ExcludeFirst_10000(b *testing.B) { - benchExclude(b, makeIntegerArray(10000, 10000, 20000), 0, 10000) -} - -func BenchmarkIntegerArray_ExcludeLast_10000(b *testing.B) { - benchExclude(b, makeIntegerArray(10000, 10000, 20000), 19999, 20000) -} - -func benchInclude(b *testing.B, vals *IntegerArray, min, max int64) { - src := *vals - tmp := NewIntegerArrayLen(vals.Len()) - copy(tmp.Timestamps, vals.Timestamps) - copy(tmp.Values, vals.Values) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - vals.Include(min, max) - *vals = src - copy(vals.Timestamps, tmp.Timestamps) - copy(vals.Values, tmp.Values) - } -} - -func BenchmarkIntegerArray_IncludeNone_1000(b *testing.B) { - benchInclude(b, makeIntegerArray(1000, 1000, 2000), 0, 500) -} - -func BenchmarkIntegerArray_IncludeMiddleHalf_1000(b *testing.B) { - benchInclude(b, makeIntegerArray(1000, 1000, 2000), 1250, 1750) -} - -func BenchmarkIntegerArray_IncludeFirst_1000(b *testing.B) { - benchInclude(b, makeIntegerArray(1000, 1000, 2000), 0, 1000) -} - -func BenchmarkIntegerArray_IncludeLast_1000(b *testing.B) { - benchInclude(b, makeIntegerArray(1000, 1000, 2000), 1999, 2000) -} - -func BenchmarkIntegerArray_IncludeNone_10000(b *testing.B) { - benchInclude(b, makeIntegerArray(10000, 10000, 20000), 00, 5000) -} - -func BenchmarkIntegerArray_IncludeMiddleHalf_10000(b *testing.B) { - benchInclude(b, makeIntegerArray(10000, 10000, 20000), 12500, 17500) -} - -func BenchmarkIntegerArray_IncludeFirst_10000(b *testing.B) { - benchInclude(b, makeIntegerArray(10000, 10000, 20000), 0, 10000) -} - -func BenchmarkIntegerArray_IncludeLast_10000(b *testing.B) { - benchInclude(b, makeIntegerArray(10000, 10000, 20000), 19999, 20000) -} diff --git a/tsdb/cursors/arrayvalues.go b/tsdb/cursors/arrayvalues.go deleted file mode 100644 index cab1fc15a4d..00000000000 --- a/tsdb/cursors/arrayvalues.go +++ /dev/null @@ -1,29 +0,0 @@ -package cursors - -func (a *FloatArray) Size() int { - // size of timestamps + values - return len(a.Timestamps)*8 + len(a.Values)*8 -} - -func (a *IntegerArray) Size() int { - // size of timestamps + values - return len(a.Timestamps)*8 + len(a.Values)*8 -} - -func (a *UnsignedArray) Size() int { - // size of timestamps + values - return len(a.Timestamps)*8 + len(a.Values)*8 -} - -func (a *StringArray) Size() int { - sz := len(a.Timestamps) * 8 - for _, s := range a.Values { - sz += len(s) - } - return sz -} - -func (a *BooleanArray) Size() int { - // size of timestamps + values - return len(a.Timestamps)*8 + len(a.Values) -} diff --git a/tsdb/cursors/arrayvalues_test.go b/tsdb/cursors/arrayvalues_test.go deleted file mode 100644 index ac991a1ee8a..00000000000 --- a/tsdb/cursors/arrayvalues_test.go +++ /dev/null @@ -1,459 +0,0 @@ -package cursors_test - -import ( - "strconv" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -func makeBooleanArray(v ...interface{}) *cursors.BooleanArray { - if len(v)&1 == 1 { - panic("invalid array length") - } - a := cursors.NewBooleanArrayLen(len(v) / 2) - for i := 0; i < len(v); i += 2 { - a.Timestamps[i/2] = int64(v[i].(int)) - a.Values[i/2] = v[i+1].(bool) - } - return a -} - -func makeFloatArray(v ...interface{}) *cursors.FloatArray { - if len(v)&1 == 1 { - panic("invalid array length") - } - a := cursors.NewFloatArrayLen(len(v) / 2) - for i := 0; i < len(v); i += 2 { - a.Timestamps[i/2] = int64(v[i].(int)) - a.Values[i/2] = v[i+1].(float64) - } - return a -} - -func makeIntegerArray(v ...interface{}) *cursors.IntegerArray { - if len(v)&1 == 1 { - panic("invalid array length") - } - a := cursors.NewIntegerArrayLen(len(v) / 2) - for i := 0; i < len(v); i += 2 { - a.Timestamps[i/2] = int64(v[i].(int)) - a.Values[i/2] = int64(v[i+1].(int)) - } - return a -} - -func makeUnsignedArray(v ...interface{}) *cursors.UnsignedArray { - if len(v)&1 == 1 { - panic("invalid array length") - } - a := cursors.NewUnsignedArrayLen(len(v) / 2) - for i := 0; i < len(v); i += 2 { - a.Timestamps[i/2] = int64(v[i].(int)) - a.Values[i/2] = uint64(v[i+1].(int)) - } - return a -} - -func makeStringArray(v ...interface{}) *cursors.StringArray { - if len(v)&1 == 1 { - panic("invalid array length") - } - a := cursors.NewStringArrayLen(len(v) / 2) - for i := 0; i < len(v); i += 2 { - a.Timestamps[i/2] = int64(v[i].(int)) - a.Values[i/2] = strconv.Itoa(v[i+1].(int)) - } - return a -} - -func TestBooleanArray_Merge(t *testing.T) { - tests := []struct { - name string - a, b, exp *cursors.BooleanArray - }{ - { - name: "empty a", - - a: makeBooleanArray(), - b: makeBooleanArray(1, true, 2, true), - exp: makeBooleanArray(1, true, 2, true), - }, - { - name: "empty b", - - a: makeBooleanArray(1, true, 2, true), - b: makeBooleanArray(), - exp: makeBooleanArray(1, true, 2, true), - }, - { - name: "b replaces a", - - a: makeBooleanArray(1, true), - b: makeBooleanArray( - 0, false, - 1, false, // overwrites a - 2, false, - 3, false, - 4, false, - ), - exp: makeBooleanArray(0, false, 1, false, 2, false, 3, false, 4, false), - }, - { - name: "b replaces partial a", - - a: makeBooleanArray(1, true, 2, true, 3, true, 4, true), - b: makeBooleanArray( - 1, false, // overwrites a - 2, false, // overwrites a - ), - exp: makeBooleanArray( - 1, false, // overwrites a - 2, false, // overwrites a - 3, true, - 4, true, - ), - }, - { - name: "b replaces all a", - - a: makeBooleanArray(1, true, 2, true, 3, true, 4, true), - b: makeBooleanArray(1, false, 2, false, 3, false, 4, false), - exp: makeBooleanArray(1, false, 2, false, 3, false, 4, false), - }, - { - name: "b replaces a interleaved", - a: makeBooleanArray(0, true, 1, true, 2, true, 3, true, 4, true), - b: makeBooleanArray(0, false, 2, false, 4, false), - exp: makeBooleanArray(0, false, 1, true, 2, false, 3, true, 4, false), - }, - { - name: "b merges a interleaved", - a: makeBooleanArray(0, true, 2, true, 4, true), - b: makeBooleanArray(1, false, 3, false, 5, false), - exp: makeBooleanArray(0, true, 1, false, 2, true, 3, false, 4, true, 5, false), - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - test.a.Merge(test.b) - if !cmp.Equal(test.a, test.exp) { - t.Fatalf("unexpected values -got/+exp\n%s", cmp.Diff(test.a, test.exp)) - } - }) - } -} - -func TestFloatArray_Merge(t *testing.T) { - tests := []struct { - name string - a, b, exp *cursors.FloatArray - }{ - { - name: "empty a", - - a: makeFloatArray(), - b: makeFloatArray(1, 1.1, 2, 2.1), - exp: makeFloatArray(1, 1.1, 2, 2.1), - }, - { - name: "empty b", - - a: makeFloatArray(1, 1.0, 2, 2.0), - b: makeFloatArray(), - exp: makeFloatArray(1, 1.0, 2, 2.0), - }, - { - name: "b replaces a", - - a: makeFloatArray(1, 1.0), - b: makeFloatArray( - 0, 0.1, - 1, 1.1, // overwrites a - 2, 2.1, - 3, 3.1, - 4, 4.1, - ), - exp: makeFloatArray(0, 0.1, 1, 1.1, 2, 2.1, 3, 3.1, 4, 4.1), - }, - { - name: "b replaces partial a", - - a: makeFloatArray(1, 1.0, 2, 2.0, 3, 3.0, 4, 4.0), - b: makeFloatArray( - 1, 1.1, // overwrites a - 2, 2.1, // overwrites a - ), - exp: makeFloatArray( - 1, 1.1, // overwrites a - 2, 2.1, // overwrites a - 3, 3.0, - 4, 4.0, - ), - }, - { - name: "b replaces all a", - - a: makeFloatArray(1, 1.0, 2, 2.0, 3, 3.0, 4, 4.0), - b: makeFloatArray(1, 1.1, 2, 2.1, 3, 3.1, 4, 4.1), - exp: makeFloatArray(1, 1.1, 2, 2.1, 3, 3.1, 4, 4.1), - }, - { - name: "b replaces a interleaved", - a: makeFloatArray(0, 0.0, 1, 1.0, 2, 2.0, 3, 3.0, 4, 4.0), - b: makeFloatArray(0, 0.1, 2, 2.1, 4, 4.1), - exp: makeFloatArray(0, 0.1, 1, 1.0, 2, 2.1, 3, 3.0, 4, 4.1), - }, - { - name: "b merges a interleaved", - a: makeFloatArray(0, 0.0, 2, 2.0, 4, 4.0), - b: makeFloatArray(1, 1.1, 3, 3.1, 5, 5.1), - exp: makeFloatArray(0, 0.0, 1, 1.1, 2, 2.0, 3, 3.1, 4, 4.0, 5, 5.1), - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - test.a.Merge(test.b) - if !cmp.Equal(test.a, test.exp) { - t.Fatalf("unexpected values -got/+exp\n%s", cmp.Diff(test.a, test.exp)) - } - }) - } -} - -func TestIntegerArray_Merge(t *testing.T) { - tests := []struct { - name string - a, b, exp *cursors.IntegerArray - }{ - { - name: "empty a", - - a: makeIntegerArray(), - b: makeIntegerArray(1, 11, 2, 21), - exp: makeIntegerArray(1, 11, 2, 21), - }, - { - name: "empty b", - - a: makeIntegerArray(1, 10, 2, 20), - b: makeIntegerArray(), - exp: makeIntegerArray(1, 10, 2, 20), - }, - { - name: "b replaces a", - - a: makeIntegerArray(1, 10), - b: makeIntegerArray( - 0, 1, - 1, 11, // overwrites a - 2, 21, - 3, 31, - 4, 41, - ), - exp: makeIntegerArray(0, 1, 1, 11, 2, 21, 3, 31, 4, 41), - }, - { - name: "b replaces partial a", - - a: makeIntegerArray(1, 10, 2, 20, 3, 30, 4, 40), - b: makeIntegerArray( - 1, 11, // overwrites a - 2, 21, // overwrites a - ), - exp: makeIntegerArray( - 1, 11, // overwrites a - 2, 21, // overwrites a - 3, 30, - 4, 40, - ), - }, - { - name: "b replaces all a", - - a: makeIntegerArray(1, 10, 2, 20, 3, 30, 4, 40), - b: makeIntegerArray(1, 11, 2, 21, 3, 31, 4, 41), - exp: makeIntegerArray(1, 11, 2, 21, 3, 31, 4, 41), - }, - { - name: "b replaces a interleaved", - a: makeIntegerArray(0, 0, 1, 10, 2, 20, 3, 30, 4, 40), - b: makeIntegerArray(0, 1, 2, 21, 4, 41), - exp: makeIntegerArray(0, 1, 1, 10, 2, 21, 3, 30, 4, 41), - }, - { - name: "b merges a interleaved", - a: makeIntegerArray(0, 00, 2, 20, 4, 40), - b: makeIntegerArray(1, 11, 3, 31, 5, 51), - exp: makeIntegerArray(0, 00, 1, 11, 2, 20, 3, 31, 4, 40, 5, 51), - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - test.a.Merge(test.b) - if !cmp.Equal(test.a, test.exp) { - t.Fatalf("unexpected values -got/+exp\n%s", cmp.Diff(test.a, test.exp)) - } - }) - } -} - -func TestUnsignedArray_Merge(t *testing.T) { - tests := []struct { - name string - a, b, exp *cursors.UnsignedArray - }{ - { - name: "empty a", - - a: makeUnsignedArray(), - b: makeUnsignedArray(1, 11, 2, 21), - exp: makeUnsignedArray(1, 11, 2, 21), - }, - { - name: "empty b", - - a: makeUnsignedArray(1, 10, 2, 20), - b: makeUnsignedArray(), - exp: makeUnsignedArray(1, 10, 2, 20), - }, - { - name: "b replaces a", - - a: makeUnsignedArray(1, 10), - b: makeUnsignedArray( - 0, 1, - 1, 11, // overwrites a - 2, 21, - 3, 31, - 4, 41, - ), - exp: makeUnsignedArray(0, 1, 1, 11, 2, 21, 3, 31, 4, 41), - }, - { - name: "b replaces partial a", - - a: makeUnsignedArray(1, 10, 2, 20, 3, 30, 4, 40), - b: makeUnsignedArray( - 1, 11, // overwrites a - 2, 21, // overwrites a - ), - exp: makeUnsignedArray( - 1, 11, // overwrites a - 2, 21, // overwrites a - 3, 30, - 4, 40, - ), - }, - { - name: "b replaces all a", - - a: makeUnsignedArray(1, 10, 2, 20, 3, 30, 4, 40), - b: makeUnsignedArray(1, 11, 2, 21, 3, 31, 4, 41), - exp: makeUnsignedArray(1, 11, 2, 21, 3, 31, 4, 41), - }, - { - name: "b replaces a interleaved", - a: makeUnsignedArray(0, 0, 1, 10, 2, 20, 3, 30, 4, 40), - b: makeUnsignedArray(0, 1, 2, 21, 4, 41), - exp: makeUnsignedArray(0, 1, 1, 10, 2, 21, 3, 30, 4, 41), - }, - { - name: "b merges a interleaved", - a: makeUnsignedArray(0, 00, 2, 20, 4, 40), - b: makeUnsignedArray(1, 11, 3, 31, 5, 51), - exp: makeUnsignedArray(0, 00, 1, 11, 2, 20, 3, 31, 4, 40, 5, 51), - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - test.a.Merge(test.b) - if !cmp.Equal(test.a, test.exp) { - t.Fatalf("unexpected values -got/+exp\n%s", cmp.Diff(test.a, test.exp)) - } - }) - } -} - -func TestStringArray_Merge(t *testing.T) { - tests := []struct { - name string - a, b, exp *cursors.StringArray - }{ - { - name: "empty a", - - a: makeStringArray(), - b: makeStringArray(1, 11, 2, 21), - exp: makeStringArray(1, 11, 2, 21), - }, - { - name: "empty b", - - a: makeStringArray(1, 10, 2, 20), - b: makeStringArray(), - exp: makeStringArray(1, 10, 2, 20), - }, - { - name: "b replaces a", - - a: makeStringArray(1, 10), - b: makeStringArray( - 0, 1, - 1, 11, // overwrites a - 2, 21, - 3, 31, - 4, 41, - ), - exp: makeStringArray(0, 1, 1, 11, 2, 21, 3, 31, 4, 41), - }, - { - name: "b replaces partial a", - - a: makeStringArray(1, 10, 2, 20, 3, 30, 4, 40), - b: makeStringArray( - 1, 11, // overwrites a - 2, 21, // overwrites a - ), - exp: makeStringArray( - 1, 11, // overwrites a - 2, 21, // overwrites a - 3, 30, - 4, 40, - ), - }, - { - name: "b replaces all a", - - a: makeStringArray(1, 10, 2, 20, 3, 30, 4, 40), - b: makeStringArray(1, 11, 2, 21, 3, 31, 4, 41), - exp: makeStringArray(1, 11, 2, 21, 3, 31, 4, 41), - }, - { - name: "b replaces a interleaved", - a: makeStringArray(0, 0, 1, 10, 2, 20, 3, 30, 4, 40), - b: makeStringArray(0, 1, 2, 21, 4, 41), - exp: makeStringArray(0, 1, 1, 10, 2, 21, 3, 30, 4, 41), - }, - { - name: "b merges a interleaved", - a: makeStringArray(0, 00, 2, 20, 4, 40), - b: makeStringArray(1, 11, 3, 31, 5, 51), - exp: makeStringArray(0, 00, 1, 11, 2, 20, 3, 31, 4, 40, 5, 51), - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - test.a.Merge(test.b) - if !cmp.Equal(test.a, test.exp) { - t.Fatalf("unexpected values -got/+exp\n%s", cmp.Diff(test.a, test.exp)) - } - }) - } -} diff --git a/tsdb/cursors/cursor.go b/tsdb/cursors/cursor.go deleted file mode 100644 index e593fd4a6f9..00000000000 --- a/tsdb/cursors/cursor.go +++ /dev/null @@ -1,96 +0,0 @@ -package cursors - -import ( - "context" - - "github.com/influxdata/influxdb/v2/models" -) - -const DefaultMaxPointsPerBlock = 1000 - -type Cursor interface { - Close() - Err() error - Stats() CursorStats -} - -type IntegerArrayCursor interface { - Cursor - Next() *IntegerArray -} - -type FloatArrayCursor interface { - Cursor - Next() *FloatArray -} - -type UnsignedArrayCursor interface { - Cursor - Next() *UnsignedArray -} - -type StringArrayCursor interface { - Cursor - Next() *StringArray -} - -type BooleanArrayCursor interface { - Cursor - Next() *BooleanArray -} - -// CursorRequest is a request to the storage engine for a cursor to be -// created with the given name, tags, and field for a given direction -// and time range. -type CursorRequest struct { - // Name is the measurement name a cursor is requested for. - Name []byte - - // Tags is the set of series tags a cursor is requested for. - Tags models.Tags - - // Field is the selected field for the cursor that is requested. - Field string - - // Ascending is whether the cursor should move in an ascending - // or descending time order. - Ascending bool - - // StartTime is the start time of the cursor. It is the lower - // absolute time regardless of the Ascending flag. This value - // is an inclusive bound. - StartTime int64 - - // EndTime is the end time of the cursor. It is the higher - // absolute time regardless of the Ascending flag. This value - // is an inclusive bound. - EndTime int64 -} - -type CursorIterator interface { - Next(ctx context.Context, r *CursorRequest) (Cursor, error) - Stats() CursorStats -} - -type CursorIterators []CursorIterator - -// Stats returns the aggregate stats of all cursor iterators. -func (a CursorIterators) Stats() CursorStats { - var stats CursorStats - for _, itr := range a { - stats.Add(itr.Stats()) - } - return stats -} - -// CursorStats represents stats collected by a cursor. -type CursorStats struct { - ScannedValues int // number of values scanned - ScannedBytes int // number of uncompressed bytes scanned -} - -// Add adds other to s and updates s. -func (s *CursorStats) Add(other CursorStats) { - s.ScannedValues += other.ScannedValues - s.ScannedBytes += other.ScannedBytes -} diff --git a/tsdb/cursors/fieldtype_string.go b/tsdb/cursors/fieldtype_string.go deleted file mode 100644 index 79414dcc22e..00000000000 --- a/tsdb/cursors/fieldtype_string.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by "stringer -type FieldType"; DO NOT EDIT. - -package cursors - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[Float-0] - _ = x[Integer-1] - _ = x[Unsigned-2] - _ = x[String-3] - _ = x[Boolean-4] - _ = x[Undefined-5] -} - -const _FieldType_name = "FloatIntegerUnsignedStringBooleanUndefined" - -var _FieldType_index = [...]uint8{0, 5, 12, 20, 26, 33, 42} - -func (i FieldType) String() string { - if i < 0 || i >= FieldType(len(_FieldType_index)-1) { - return "FieldType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _FieldType_name[_FieldType_index[i]:_FieldType_index[i+1]] -} diff --git a/tsdb/cursors/gen.go b/tsdb/cursors/gen.go deleted file mode 100644 index 40bcfb2a3c4..00000000000 --- a/tsdb/cursors/gen.go +++ /dev/null @@ -1,4 +0,0 @@ -package cursors - -//go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@arrayvalues.gen.go.tmpldata arrayvalues.gen.go.tmpl -//go:generate stringer -type FieldType diff --git a/tsdb/cursors/int64.go b/tsdb/cursors/int64.go deleted file mode 100644 index df1d11637a3..00000000000 --- a/tsdb/cursors/int64.go +++ /dev/null @@ -1,81 +0,0 @@ -package cursors - -// Int64Iterator describes the behavior for enumerating a sequence of int64 -// values. -type Int64Iterator interface { - // Next advances the Int64Iterator to the next value. It returns false when - // there are no more values. - Next() bool - - // Value returns the current value. - Value() int64 - - Stats() CursorStats -} - -// EmptyInt64Iterator is an implementation of Int64Iterator that returns no -// values. -var EmptyInt64Iterator Int64Iterator = &int64Iterator{} - -type int64Iterator struct{} - -func (*int64Iterator) Next() bool { return false } -func (*int64Iterator) Value() int64 { return 0 } -func (*int64Iterator) Stats() CursorStats { return CursorStats{} } - -type Int64SliceIterator struct { - s []int64 - v int64 - i int - stats CursorStats -} - -func NewInt64SliceIterator(s []int64) *Int64SliceIterator { - return &Int64SliceIterator{s: s, i: 0} -} - -func NewInt64SliceIteratorWithStats(s []int64, stats CursorStats) *Int64SliceIterator { - return &Int64SliceIterator{s: s, i: 0, stats: stats} -} - -func (s *Int64SliceIterator) Next() bool { - if s.i < len(s.s) { - s.v = s.s[s.i] - s.i++ - return true - } - s.v = 0 - return false -} - -func (s *Int64SliceIterator) Value() int64 { - return s.v -} - -func (s *Int64SliceIterator) Stats() CursorStats { - return s.stats -} - -func (s *Int64SliceIterator) toSlice() []int64 { - if s.i < len(s.s) { - return s.s[s.i:] - } - return nil -} - -// Int64SliceIteratorToSlice reads the remainder of i into a slice and returns -// the result. -func Int64SliceIteratorToSlice(i Int64Iterator) []int64 { - if i == nil { - return nil - } - - if si, ok := i.(*Int64SliceIterator); ok { - return si.toSlice() - } - var a []int64 - for i.Next() { - a = append(a, i.Value()) - } - return a -} diff --git a/tsdb/cursors/mock/cursor_iterator.go b/tsdb/cursors/mock/cursor_iterator.go deleted file mode 100644 index 4084125f6ce..00000000000 --- a/tsdb/cursors/mock/cursor_iterator.go +++ /dev/null @@ -1,65 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/tsdb/cursors (interfaces: CursorIterator) - -// Package mock is a generated GoMock package. -package mock - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - cursors "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -// MockCursorIterator is a mock of CursorIterator interface -type MockCursorIterator struct { - ctrl *gomock.Controller - recorder *MockCursorIteratorMockRecorder -} - -// MockCursorIteratorMockRecorder is the mock recorder for MockCursorIterator -type MockCursorIteratorMockRecorder struct { - mock *MockCursorIterator -} - -// NewMockCursorIterator creates a new mock instance -func NewMockCursorIterator(ctrl *gomock.Controller) *MockCursorIterator { - mock := &MockCursorIterator{ctrl: ctrl} - mock.recorder = &MockCursorIteratorMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockCursorIterator) EXPECT() *MockCursorIteratorMockRecorder { - return m.recorder -} - -// Next mocks base method -func (m *MockCursorIterator) Next(arg0 context.Context, arg1 *cursors.CursorRequest) (cursors.Cursor, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Next", arg0, arg1) - ret0, _ := ret[0].(cursors.Cursor) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Next indicates an expected call of Next -func (mr *MockCursorIteratorMockRecorder) Next(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockCursorIterator)(nil).Next), arg0, arg1) -} - -// Stats mocks base method -func (m *MockCursorIterator) Stats() cursors.CursorStats { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Stats") - ret0, _ := ret[0].(cursors.CursorStats) - return ret0 -} - -// Stats indicates an expected call of Stats -func (mr *MockCursorIteratorMockRecorder) Stats() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stats", reflect.TypeOf((*MockCursorIterator)(nil).Stats)) -} diff --git a/tsdb/cursors/mock/integer_array_cursor.go b/tsdb/cursors/mock/integer_array_cursor.go deleted file mode 100644 index 5b0a00dcca7..00000000000 --- a/tsdb/cursors/mock/integer_array_cursor.go +++ /dev/null @@ -1,89 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/tsdb/cursors (interfaces: IntegerArrayCursor) - -// Package mock is a generated GoMock package. -package mock - -import ( - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - cursors "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -// MockIntegerArrayCursor is a mock of IntegerArrayCursor interface -type MockIntegerArrayCursor struct { - ctrl *gomock.Controller - recorder *MockIntegerArrayCursorMockRecorder -} - -// MockIntegerArrayCursorMockRecorder is the mock recorder for MockIntegerArrayCursor -type MockIntegerArrayCursorMockRecorder struct { - mock *MockIntegerArrayCursor -} - -// NewMockIntegerArrayCursor creates a new mock instance -func NewMockIntegerArrayCursor(ctrl *gomock.Controller) *MockIntegerArrayCursor { - mock := &MockIntegerArrayCursor{ctrl: ctrl} - mock.recorder = &MockIntegerArrayCursorMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockIntegerArrayCursor) EXPECT() *MockIntegerArrayCursorMockRecorder { - return m.recorder -} - -// Close mocks base method -func (m *MockIntegerArrayCursor) Close() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Close") -} - -// Close indicates an expected call of Close -func (mr *MockIntegerArrayCursorMockRecorder) Close() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockIntegerArrayCursor)(nil).Close)) -} - -// Err mocks base method -func (m *MockIntegerArrayCursor) Err() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Err") - ret0, _ := ret[0].(error) - return ret0 -} - -// Err indicates an expected call of Err -func (mr *MockIntegerArrayCursorMockRecorder) Err() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockIntegerArrayCursor)(nil).Err)) -} - -// Next mocks base method -func (m *MockIntegerArrayCursor) Next() *cursors.IntegerArray { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Next") - ret0, _ := ret[0].(*cursors.IntegerArray) - return ret0 -} - -// Next indicates an expected call of Next -func (mr *MockIntegerArrayCursorMockRecorder) Next() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockIntegerArrayCursor)(nil).Next)) -} - -// Stats mocks base method -func (m *MockIntegerArrayCursor) Stats() cursors.CursorStats { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Stats") - ret0, _ := ret[0].(cursors.CursorStats) - return ret0 -} - -// Stats indicates an expected call of Stats -func (mr *MockIntegerArrayCursorMockRecorder) Stats() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stats", reflect.TypeOf((*MockIntegerArrayCursor)(nil).Stats)) -} diff --git a/tsdb/cursors/schema.go b/tsdb/cursors/schema.go deleted file mode 100644 index 33ab70a2231..00000000000 --- a/tsdb/cursors/schema.go +++ /dev/null @@ -1,201 +0,0 @@ -package cursors - -import ( - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxql" -) - -// FieldType represents the primitive field data types available in tsm. -type FieldType int - -const ( - Float FieldType = iota // means the data type is a float - Integer // means the data type is an integer - Unsigned // means the data type is an unsigned integer - String // means the data type is a string of text - Boolean // means the data type is a boolean - Undefined // means the data type in unknown or undefined -) - -var ( - fieldTypeToDataTypeMapping = [8]influxql.DataType{ - Float: influxql.Float, - Integer: influxql.Integer, - Unsigned: influxql.Unsigned, - String: influxql.String, - Boolean: influxql.Boolean, - Undefined: influxql.Unknown, - 6: influxql.Unknown, - 7: influxql.Unknown, - } -) - -// FieldTypeToDataType returns the equivalent influxql DataType for the field type ft. -// If ft is an invalid FieldType, the results are undefined. -func FieldTypeToDataType(ft FieldType) influxql.DataType { - return fieldTypeToDataTypeMapping[ft&7] -} - -// IsLower returns true if the other FieldType has greater precedence than the -// current value. Undefined has the lowest precedence. -func (ft FieldType) IsLower(other FieldType) bool { return other < ft } - -var ( - modelsFieldTypeToFieldTypeMapping = [8]FieldType{ - models.Integer: Integer, - models.Float: Float, - models.Boolean: Boolean, - models.String: String, - models.Empty: Undefined, - models.Unsigned: Unsigned, - 6: Undefined, - 7: Undefined, - } -) - -// ModelsFieldTypeToFieldType returns the equivalent FieldType for ft. -// If ft is an invalid FieldType, the results are undefined. -func ModelsFieldTypeToFieldType(ft models.FieldType) FieldType { - return modelsFieldTypeToFieldTypeMapping[ft&7] -} - -type MeasurementField struct { - Key string // Key is the name of the field - Type FieldType // Type is field type - Timestamp int64 // Timestamp refers to the maximum timestamp observed for the given field -} - -// MeasurementFieldSlice implements sort.Interface and sorts -// the slice from lowest to highest precedence. Use sort.Reverse -// to sort from highest to lowest. -type MeasurementFieldSlice []MeasurementField - -func (m MeasurementFieldSlice) Len() int { - return len(m) -} - -func (m MeasurementFieldSlice) Less(i, j int) bool { - ii, jj := &m[i], &m[j] - return ii.Key < jj.Key || - (ii.Key == jj.Key && - (ii.Timestamp < jj.Timestamp || - (ii.Timestamp == jj.Timestamp && ii.Type.IsLower(jj.Type)))) -} - -func (m MeasurementFieldSlice) Swap(i, j int) { - m[i], m[j] = m[j], m[i] -} - -// UniqueByKey performs an in-place update of m, removing duplicate elements -// by Key, keeping the first occurrence of each. If the slice is not sorted, -// the behavior of UniqueByKey is undefined. -func (m *MeasurementFieldSlice) UniqueByKey() { - mm := *m - if len(mm) < 2 { - return - } - - j := 0 - for i := 1; i < len(mm); i++ { - if mm[j].Key != mm[i].Key { - j++ - if j != i { - // optimization: skip copy if j == i - mm[j] = mm[i] - } - } - } - - *m = mm[:j+1] -} - -type MeasurementFields struct { - Fields []MeasurementField -} - -type MeasurementFieldsIterator interface { - // Next advances the iterator to the next value. It returns false - // when there are no more values. - Next() bool - - // Value returns the current value. - Value() MeasurementFields - - Stats() CursorStats -} - -// EmptyMeasurementFieldsIterator is an implementation of MeasurementFieldsIterator that returns -// no values. -var EmptyMeasurementFieldsIterator = &measurementFieldsIterator{} - -type measurementFieldsIterator struct{} - -func (m *measurementFieldsIterator) Next() bool { return false } -func (m *measurementFieldsIterator) Value() MeasurementFields { return MeasurementFields{} } -func (m *measurementFieldsIterator) Stats() CursorStats { return CursorStats{} } - -type MeasurementFieldsSliceIterator struct { - f []MeasurementFields - v MeasurementFields - i int - stats CursorStats -} - -func NewMeasurementFieldsSliceIterator(f []MeasurementFields) *MeasurementFieldsSliceIterator { - return &MeasurementFieldsSliceIterator{f: f} -} - -func NewMeasurementFieldsSliceIteratorWithStats(f []MeasurementFields, stats CursorStats) *MeasurementFieldsSliceIterator { - return &MeasurementFieldsSliceIterator{f: f, stats: stats} -} - -func (s *MeasurementFieldsSliceIterator) Next() bool { - if s.i < len(s.f) { - s.v = s.f[s.i] - s.i++ - return true - } - s.v = MeasurementFields{} - return false -} - -func (s *MeasurementFieldsSliceIterator) Value() MeasurementFields { - return s.v -} - -func (s *MeasurementFieldsSliceIterator) Stats() CursorStats { - return s.stats -} - -func (s *MeasurementFieldsSliceIterator) toSlice() []MeasurementFields { - if s.i < len(s.f) { - return s.f[s.i:] - } - return nil -} - -// MeasurementFieldsIteratorFlatMap reads the remainder of i, flattening the results -// to a single slice. -func MeasurementFieldsIteratorFlatMap(i MeasurementFieldsIterator) []MeasurementField { - if i == nil { - return nil - } - - var res []MeasurementField - if si, ok := i.(*MeasurementFieldsSliceIterator); ok { - s := si.toSlice() - sz := 0 - for i := range s { - sz += len(s[i].Fields) - } - res = make([]MeasurementField, 0, sz) - for i := range s { - res = append(res, s[i].Fields...) - } - } else { - for i.Next() { - res = append(res, i.Value().Fields...) - } - } - return res -} diff --git a/tsdb/cursors/schema_test.go b/tsdb/cursors/schema_test.go deleted file mode 100644 index 8b36df10b71..00000000000 --- a/tsdb/cursors/schema_test.go +++ /dev/null @@ -1,310 +0,0 @@ -package cursors_test - -import ( - "math/rand" - "sort" - "testing" - - "github.com/influxdata/influxdb/v2/pkg/testing/assert" - "github.com/influxdata/influxdb/v2/tsdb/cursors" -) - -// Verifies FieldType precedence behavior is equivalent to influxql.DataType#LessThan -func TestFieldTypeDataTypePrecedenceEquivalence(t *testing.T) { - var fieldTypes = []cursors.FieldType{ - cursors.Float, - cursors.Integer, - cursors.Unsigned, - cursors.Boolean, - cursors.String, - cursors.Undefined, - } - - for _, fta := range fieldTypes { - for _, ftb := range fieldTypes { - if fta == ftb { - continue - } - - got := fta.IsLower(ftb) - exp := cursors.FieldTypeToDataType(fta).LessThan(cursors.FieldTypeToDataType(ftb)) - assert.Equal(t, got, exp, "failed %s.LessThan(%s)", fta.String(), ftb.String()) - } - } -} - -// Verifies sorting behavior of MeasurementFieldSlice -func TestMeasurementFieldSliceSort(t *testing.T) { - mfs := func(d ...cursors.MeasurementField) cursors.MeasurementFieldSlice { - return d - } - - mf := func(key string, timestamp int64, ft cursors.FieldType) cursors.MeasurementField { - return cursors.MeasurementField{ - Key: key, - Type: ft, - Timestamp: timestamp, - } - } - - fltF := func(key string, ts int64) cursors.MeasurementField { - return mf(key, ts, cursors.Float) - } - intF := func(key string, ts int64) cursors.MeasurementField { - return mf(key, ts, cursors.Integer) - } - strF := func(key string, ts int64) cursors.MeasurementField { - return mf(key, ts, cursors.String) - } - blnF := func(key string, ts int64) cursors.MeasurementField { - return mf(key, ts, cursors.Boolean) - } - - cases := []struct { - name string - in cursors.MeasurementFieldSlice - exp cursors.MeasurementFieldSlice - }{ - { - name: "keys:diff types:same ts:same", - in: mfs( - fltF("bbb", 0), - fltF("aaa", 0), - fltF("ccc", 0), - ), - exp: mfs( - fltF("aaa", 0), - fltF("bbb", 0), - fltF("ccc", 0), - ), - }, - { - name: "keys:same types:same ts:diff", - in: mfs( - fltF("aaa", 10), - fltF("ccc", 20), - fltF("aaa", 0), - fltF("ccc", 0), - ), - exp: mfs( - fltF("aaa", 0), - fltF("aaa", 10), - fltF("ccc", 0), - fltF("ccc", 20), - ), - }, - { - name: "keys:same types:diff ts:same", - in: mfs( - strF("aaa", 0), - intF("aaa", 0), - fltF("aaa", 0), - blnF("aaa", 0), - ), - exp: mfs( - blnF("aaa", 0), - strF("aaa", 0), - intF("aaa", 0), - fltF("aaa", 0), - ), - }, - { - name: "keys:same types:diff ts:diff", - in: mfs( - strF("aaa", 20), - intF("aaa", 10), - fltF("aaa", 0), - blnF("aaa", 30), - ), - exp: mfs( - fltF("aaa", 0), - intF("aaa", 10), - strF("aaa", 20), - blnF("aaa", 30), - ), - }, - { - name: "keys:diff types:diff ts:diff", - in: mfs( - intF("ccc", 10), - blnF("fff", 30), - strF("aaa", 20), - fltF("ddd", 0), - ), - exp: mfs( - strF("aaa", 20), - intF("ccc", 10), - fltF("ddd", 0), - blnF("fff", 30), - ), - }, - { - name: "keys:many types:many ts:same", - in: mfs( - intF("ccc", 10), - blnF("fff", 30), - strF("aaa", 20), - fltF("ddd", 0), - fltF("ccc", 10), - strF("fff", 30), - intF("aaa", 20), - blnF("ddd", 0), - ), - exp: mfs( - strF("aaa", 20), - intF("aaa", 20), - intF("ccc", 10), - fltF("ccc", 10), - blnF("ddd", 0), - fltF("ddd", 0), - blnF("fff", 30), - strF("fff", 30), - ), - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - got := tc.in - - // randomize order using fixed seed to - // ensure tests are deterministic on a given platform - seededRand := rand.New(rand.NewSource(100)) - for i := 0; i < 5; i++ { - seededRand.Shuffle(len(got), func(i, j int) { - got[i], got[j] = got[j], got[i] - }) - - sort.Sort(got) - assert.Equal(t, got, tc.exp, "failed at index", i) - } - }) - } -} - -func TestMeasurementFieldSlice_UniqueByKey(t *testing.T) { - mfs := func(d ...cursors.MeasurementField) cursors.MeasurementFieldSlice { - return d - } - - mf := func(key string, timestamp int64, ft cursors.FieldType) cursors.MeasurementField { - return cursors.MeasurementField{ - Key: key, - Type: ft, - Timestamp: timestamp, - } - } - - fltF := func(key string, ts int64) cursors.MeasurementField { - return mf(key, ts, cursors.Float) - } - - t.Run("multiple start end", func(t *testing.T) { - got := mfs( - fltF("aaa", 0), - fltF("aaa", 10), - fltF("bbb", 10), - fltF("ccc", 10), - fltF("ccc", 20), - ) - - exp := mfs( - fltF("aaa", 0), - fltF("bbb", 10), - fltF("ccc", 10), - ) - - got.UniqueByKey() - assert.Equal(t, got, exp) - }) - - t.Run("multiple at end", func(t *testing.T) { - got := mfs( - fltF("aaa", 0), - fltF("bbb", 10), - fltF("ccc", 10), - fltF("ccc", 20), - fltF("ccc", 30), - ) - - exp := mfs( - fltF("aaa", 0), - fltF("bbb", 10), - fltF("ccc", 10), - ) - - got.UniqueByKey() - assert.Equal(t, got, exp) - }) - - t.Run("no duplicates many", func(t *testing.T) { - got := mfs( - fltF("aaa", 0), - fltF("bbb", 10), - fltF("ccc", 20), - ) - - exp := mfs( - fltF("aaa", 0), - fltF("bbb", 10), - fltF("ccc", 20), - ) - - got.UniqueByKey() - assert.Equal(t, got, exp) - }) - - t.Run("no duplicates two elements", func(t *testing.T) { - got := mfs( - fltF("aaa", 0), - fltF("bbb", 10), - ) - - exp := mfs( - fltF("aaa", 0), - fltF("bbb", 10), - ) - - got.UniqueByKey() - assert.Equal(t, got, exp) - }) - - t.Run("duplicates one key", func(t *testing.T) { - got := mfs( - fltF("aaa", 0), - fltF("aaa", 10), - fltF("aaa", 10), - fltF("aaa", 10), - fltF("aaa", 10), - fltF("aaa", 10), - ) - - exp := mfs( - fltF("aaa", 0), - ) - - got.UniqueByKey() - assert.Equal(t, got, exp) - }) - - t.Run("one element", func(t *testing.T) { - got := mfs( - fltF("aaa", 0), - ) - - exp := mfs( - fltF("aaa", 0), - ) - - got.UniqueByKey() - assert.Equal(t, got, exp) - }) - - t.Run("empty", func(t *testing.T) { - got := mfs() - exp := mfs() - - got.UniqueByKey() - assert.Equal(t, got, exp) - }) -} diff --git a/tsdb/cursors/string.go b/tsdb/cursors/string.go deleted file mode 100644 index 2c2b13a7af1..00000000000 --- a/tsdb/cursors/string.go +++ /dev/null @@ -1,81 +0,0 @@ -package cursors - -// StringIterator describes the behavior for enumerating a sequence of -// string values. -type StringIterator interface { - // Next advances the StringIterator to the next value. It returns false - // when there are no more values. - Next() bool - - // Value returns the current value. - Value() string - - Stats() CursorStats -} - -// EmptyStringIterator is an implementation of StringIterator that returns -// no values. -var EmptyStringIterator StringIterator = &stringIterator{} - -type stringIterator struct{} - -func (*stringIterator) Next() bool { return false } -func (*stringIterator) Value() string { return "" } -func (*stringIterator) Stats() CursorStats { return CursorStats{} } - -type StringSliceIterator struct { - s []string - v string - i int - stats CursorStats -} - -func NewStringSliceIterator(s []string) *StringSliceIterator { - return &StringSliceIterator{s: s, i: 0} -} - -func NewStringSliceIteratorWithStats(s []string, stats CursorStats) *StringSliceIterator { - return &StringSliceIterator{s: s, i: 0, stats: stats} -} - -func (s *StringSliceIterator) Next() bool { - if s.i < len(s.s) { - s.v = s.s[s.i] - s.i++ - return true - } - s.v = "" - return false -} - -func (s *StringSliceIterator) Value() string { - return s.v -} - -func (s *StringSliceIterator) Stats() CursorStats { - return s.stats -} - -func (s *StringSliceIterator) toSlice() []string { - if s.i < len(s.s) { - return s.s[s.i:] - } - return nil -} - -// StringIteratorToSlice reads the remainder of i into a slice and -// returns the result. -func StringIteratorToSlice(i StringIterator) []string { - if i == nil { - return nil - } - - if si, ok := i.(*StringSliceIterator); ok { - return si.toSlice() - } - var a []string - for i.Next() { - a = append(a, i.Value()) - } - return a -} diff --git a/tsdb/engine.go b/tsdb/engine.go deleted file mode 100644 index 8d87955840b..00000000000 --- a/tsdb/engine.go +++ /dev/null @@ -1,212 +0,0 @@ -package tsdb - -import ( - "context" - "errors" - "fmt" - "io" - "os" - "regexp" - "runtime" - "sort" - "time" - - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/estimator" - "github.com/influxdata/influxdb/v2/pkg/limiter" - "github.com/influxdata/influxql" - "go.uber.org/zap" -) - -var ( - // ErrUnknownEngineFormat is returned when the engine format is - // unknown. ErrUnknownEngineFormat is currently returned if a format - // other than tsm1 is encountered. - ErrUnknownEngineFormat = errors.New("unknown engine format") -) - -// Engine represents a swappable storage engine for the shard. -type Engine interface { - Open(ctx context.Context) error - Close() error - SetEnabled(enabled bool) - SetCompactionsEnabled(enabled bool) - ScheduleFullCompaction() error - - WithLogger(*zap.Logger) - - LoadMetadataIndex(shardID uint64, index Index) error - - CreateSnapshot(skipCacheOk bool) (string, error) - Backup(w io.Writer, basePath string, since time.Time) error - Export(w io.Writer, basePath string, start time.Time, end time.Time) error - Restore(r io.Reader, basePath string) error - Import(r io.Reader, basePath string) error - Digest() (io.ReadCloser, int64, error) - - CreateIterator(ctx context.Context, measurement string, opt query.IteratorOptions) (query.Iterator, error) - CreateCursorIterator(ctx context.Context) (CursorIterator, error) - IteratorCost(measurement string, opt query.IteratorOptions) (query.IteratorCost, error) - WritePoints(ctx context.Context, points []models.Point) error - - CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error - CreateSeriesListIfNotExists(keys, names [][]byte, tags []models.Tags) error - DeleteSeriesRange(ctx context.Context, itr SeriesIterator, min, max int64) error - DeleteSeriesRangeWithPredicate(ctx context.Context, itr SeriesIterator, predicate func(name []byte, tags models.Tags) (int64, int64, bool)) error - - MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) - SeriesSketches() (estimator.Sketch, estimator.Sketch, error) - SeriesN() int64 - - MeasurementExists(name []byte) (bool, error) - - MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) - MeasurementFieldSet() *MeasurementFieldSet - MeasurementFields(measurement []byte) *MeasurementFields - ForEachMeasurementName(fn func(name []byte) error) error - DeleteMeasurement(ctx context.Context, name []byte) error - - HasTagKey(name, key []byte) (bool, error) - MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) - TagKeyCardinality(name, key []byte) int - - LastModified() time.Time - DiskSize() int64 - IsIdle() (bool, string) - Free() error - - Reindex() error - - io.WriterTo -} - -// SeriesIDSets provides access to the total set of series IDs -type SeriesIDSets interface { - ForEach(f func(ids *SeriesIDSet)) error -} - -// EngineFormat represents the format for an engine. -type EngineFormat int - -// NewEngineFunc creates a new engine. -type NewEngineFunc func(id uint64, i Index, path string, walPath string, sfile *SeriesFile, options EngineOptions) Engine - -// newEngineFuncs is a lookup of engine constructors by name. -var newEngineFuncs = make(map[string]NewEngineFunc) - -// RegisterEngine registers a storage engine initializer by name. -func RegisterEngine(name string, fn NewEngineFunc) { - if _, ok := newEngineFuncs[name]; ok { - panic("engine already registered: " + name) - } - newEngineFuncs[name] = fn -} - -// RegisteredEngines returns the slice of currently registered engines. -func RegisteredEngines() []string { - a := make([]string, 0, len(newEngineFuncs)) - for k := range newEngineFuncs { - a = append(a, k) - } - sort.Strings(a) - return a -} - -// NewEngine returns an instance of an engine based on its format. -// If the path does not exist then the DefaultFormat is used. -func NewEngine(id uint64, i Index, path string, walPath string, sfile *SeriesFile, options EngineOptions) (Engine, error) { - // Create a new engine - if _, err := os.Stat(path); os.IsNotExist(err) { - engine := newEngineFuncs[options.EngineVersion](id, i, path, walPath, sfile, options) - if options.OnNewEngine != nil { - options.OnNewEngine(engine) - } - return engine, nil - } - - // If it's a dir then it's a tsm1 engine - format := DefaultEngine - if fi, err := os.Stat(path); err != nil { - return nil, err - } else if !fi.Mode().IsDir() { - return nil, ErrUnknownEngineFormat - } else { - format = "tsm1" - } - - // Lookup engine by format. - fn := newEngineFuncs[format] - if fn == nil { - return nil, fmt.Errorf("invalid engine format: %q", format) - } - - engine := fn(id, i, path, walPath, sfile, options) - if options.OnNewEngine != nil { - options.OnNewEngine(engine) - } - return engine, nil -} - -// EngineOptions represents the options used to initialize the engine. -type EngineOptions struct { - EngineVersion string - IndexVersion string - ShardID uint64 - - // Limits the concurrent number of TSM files that can be loaded at once. - OpenLimiter limiter.Fixed - - // CompactionDisabled specifies shards should not schedule compactions. - // This option is intended for offline tooling. - CompactionDisabled bool - CompactionPlannerCreator CompactionPlannerCreator - CompactionLimiter limiter.Fixed - CompactionThroughputLimiter limiter.Rate - WALEnabled bool - MonitorDisabled bool - - // DatabaseFilter is a predicate controlling which databases may be opened. - // If no function is set, all databases will be opened. - DatabaseFilter func(database string) bool - - // RetentionPolicyFilter is a predicate controlling which combination of database and retention policy may be opened. - // nil will allow all combinations to pass. - RetentionPolicyFilter func(database, rp string) bool - - // ShardFilter is a predicate controlling which combination of database, retention policy and shard group may be opened. - // nil will allow all combinations to pass. - ShardFilter func(database, rp string, id uint64) bool - - Config Config - SeriesIDSets SeriesIDSets - - OnNewEngine func(Engine) - - FileStoreObserver FileStoreObserver - MetricsDisabled bool -} - -// NewEngineOptions constructs an EngineOptions object with safe default values. -// This should only be used in tests; production environments should read from a config file. -func NewEngineOptions() EngineOptions { - return EngineOptions{ - EngineVersion: DefaultEngine, - IndexVersion: DefaultIndex, - Config: NewConfig(), - WALEnabled: true, - OpenLimiter: limiter.NewFixed(runtime.GOMAXPROCS(0)), - } -} - -type CompactionPlannerCreator func(cfg Config) interface{} - -// FileStoreObserver is passed notifications before the file store adds or deletes files. In this way, it can -// be sure to observe every file that is added or removed even in the presence of process death. -type FileStoreObserver interface { - // FileFinishing is called before a file is renamed to it's final name. - FileFinishing(path string) error - - // FileUnlinking is called before a file is unlinked. - FileUnlinking(path string) error -} diff --git a/tsdb/engine/engine.go b/tsdb/engine/engine.go deleted file mode 100644 index 8b5870da803..00000000000 --- a/tsdb/engine/engine.go +++ /dev/null @@ -1,10 +0,0 @@ -// Package engine can be imported to initialize and register all available TSDB engines. -// -// Alternatively, you can import any individual subpackage underneath engine. -package engine // import "github.com/influxdata/influxdb/v2/tsdb/engine" - -import ( - - // Initialize and register tsm1 engine - _ "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" -) diff --git a/tsdb/engine/tsm1/DESIGN.md b/tsdb/engine/tsm1/DESIGN.md deleted file mode 100644 index 0b5935c9ede..00000000000 --- a/tsdb/engine/tsm1/DESIGN.md +++ /dev/null @@ -1,451 +0,0 @@ -# File Structure - -A TSM file is composed for four sections: header, blocks, index and the footer. - -``` -┌────────┬────────────────────────────────────┬─────────────┬──────────────┐ -│ Header │ Blocks │ Index │ Footer │ -│5 bytes │ N bytes │ N bytes │ 4 bytes │ -└────────┴────────────────────────────────────┴─────────────┴──────────────┘ -``` -Header is composed of a magic number to identify the file type and a version number. - -``` -┌───────────────────┐ -│ Header │ -├─────────┬─────────┤ -│ Magic │ Version │ -│ 4 bytes │ 1 byte │ -└─────────┴─────────┘ -``` - -Blocks are sequences of block CRC32 and data. The block data is opaque to the file. The CRC32 is used for recovery to ensure blocks have not been corrupted due to bugs outside of our control. The length of the blocks is stored in the index. - -``` -┌───────────────────────────────────────────────────────────┐ -│ Blocks │ -├───────────────────┬───────────────────┬───────────────────┤ -│ Block 1 │ Block 2 │ Block N │ -├─────────┬─────────┼─────────┬─────────┼─────────┬─────────┤ -│ CRC │ Data │ CRC │ Data │ CRC │ Data │ -│ 4 bytes │ N bytes │ 4 bytes │ N bytes │ 4 bytes │ N bytes │ -└─────────┴─────────┴─────────┴─────────┴─────────┴─────────┘ -``` - -Following the blocks is the index for the blocks in the file. The index is composed of a sequence of index entries ordered lexicographically by key and then by time. Each index entry starts with a key length and key followed by a count of the number of blocks in the file. Each block entry is composed of the min and max time for the block, the offset into the file where the block is located and the size of the block. - -The index structure can provide efficient access to all blocks as well as the ability to determine the cost associated with accessing a given key. Given a key and timestamp, we know exactly which file contains the block for that timestamp as well as where that block resides and how much data to read to retrieve the block. If we know we need to read all or multiple blocks in a file, we can use the size to determine how much to read in a given IO. - -_TBD: The block length stored in the block data could probably be dropped since we store it in the index._ - -``` -┌────────────────────────────────────────────────────────────────────────────┐ -│ Index │ -├─────────┬─────────┬──────┬───────┬─────────┬─────────┬────────┬────────┬───┤ -│ Key Len │ Key │ Type │ Count │Min Time │Max Time │ Offset │ Size │...│ -│ 2 bytes │ N bytes │1 byte│2 bytes│ 8 bytes │ 8 bytes │8 bytes │4 bytes │ │ -└─────────┴─────────┴──────┴───────┴─────────┴─────────┴────────┴────────┴───┘ -``` - -The last section is the footer that stores the offset of the start of the index. - -``` -┌─────────┐ -│ Footer │ -├─────────┤ -│Index Ofs│ -│ 8 bytes │ -└─────────┘ -``` - -# File System Layout - -The file system is organized a directory per shard where each shard is an integer number. Associated with each shard directory, there is a set of other directories and files: - -* a wal directory - contains a set numerically increasing files WAL segment files named #####.wal. The wal directory is separate from the directory containing the TSM files so that different types can be used if necessary. -* .tsm files - a set of numerically increasing TSM files containing compressed series data. -* .tombstone files - files named after the corresponding TSM file as #####.tombstone. These contain measurement and series keys that have been deleted. These files are removed during compactions. - -# Data Flow - -Writes are appended to the current WAL segment and are also added to the Cache. Each WAL segment is size bounded and rolls-over to a new file after it fills up. The cache is also size bounded; snapshots are taken and WAL compactions are initiated when the cache becomes too full. If the inbound write rate exceeds the WAL compaction rate for a sustained period, the cache may become too full in which case new writes will fail until the compaction process catches up. The WAL and Cache are separate entities and do not interact with each other. The Engine coordinates the writes to both. - -When WAL segments fill up and have been closed, the Compactor reads the WAL entries and combines them with one or more existing TSM files. This process runs continuously until all WAL files are compacted and there is a minimum number of TSM files. As each TSM file is completed, it is loaded and referenced by the FileStore. - -Queries are executed by constructing Cursors for keys. The Cursors iterate over slices of Values. When the current Values are exhausted, a Cursor requests the next set of Values from the Engine. The Engine returns a slice of Values by querying the FileStore and Cache. The Values in the Cache are overlaid on top of the values returned from the FileStore. The FileStore reads and decodes blocks of Values according to the index for the file. - -Updates (writing a newer value for a point that already exists) occur as normal writes. Since cached values overwrite existing values, newer writes take precedence. - -Deletes occur by writing a delete entry for the measurement or series to the WAL and then updating the Cache and FileStore. The Cache evicts all relevant entries. The FileStore writes a tombstone file for each TSM file that contains relevant data. These tombstone files are used at startup time to ignore blocks as well as during compactions to remove deleted entries. - -# Compactions - -Compactions are a serial and continuously running process that iteratively optimizes the storage for queries. Specifically, it does the following: - -* Converts closed WAL files into TSM files and removes the closed WAL files -* Combines smaller TSM files into larger ones to improve compression ratios -* Rewrites existing files that contain series data that has been deleted -* Rewrites existing files that contain writes with more recent data to ensure a point exists in only one TSM file. - -The compaction algorithm is continuously running and always selects files to compact based on a priority. - -1. If there are closed WAL files, the 5 oldest WAL segments are added to the set of compaction files. -2. If any TSM files contain points with older timestamps that also exist in the WAL files, those TSM files are added to the compaction set. -3. If any TSM files have a tombstone marker, those TSM files are added to the compaction set. - -The compaction algorithm generates a set of SeriesIterators that return a sequence of `key`, `Values` where each `key` returned is lexicographically greater than the previous one. The iterators are ordered such that WAL iterators will override any values returned by the TSM file iterators. WAL iterators read and cache the WAL segment so that deletes later in the log can be processed correctly. TSM file iterators use the tombstone files to ensure that deleted series are not returned during iteration. As each key is processed, the Values slice is grown, sorted, and then written to a new block in the new TSM file. The blocks can be split based on number of points or size of the block. If the total size of the current TSM file would exceed the maximum file size, a new file is created. - -Deletions can occur while a new file is being written. Since the new TSM file is not complete a tombstone would not be written for it. This could result in deleted values getting written into a new file. To prevent this, if a compaction is running and a delete occurs, the current compaction is aborted and new compaction is started. - -When all WAL files in the current compaction have been processed and the new TSM files have been successfully written, the new TSM files are renamed to their final names, the WAL segments are truncated and the associated snapshots are released from the cache. - -The compaction process then runs again until there are no more WAL files and the minimum number of TSM files exist that are also under the maximum file size. - -# WAL - -Currently, there is a WAL per shard. This means all the writes in a WAL segment are for the given shard. It also means that writes across a lot of shards append to many files which might result in more disk IO due to seeking to the end of multiple files. - -Two options are being considered: - -## WAL per Shard - -This is the current behavior of the WAL. This option is conceptually easier to reason about. For example, compactions that read in multiple WAL segments are assured that all the WAL entries pertain to the current shard. If it completes a compaction, it is safe to remove the WAL segment. It is also easier to deal with shard deletions as all the WAL segments can be dropped along with the other shard files. - -The drawback of this option is the potential for turning sequential write IO into random IO in the presence of multiple shards and writes to many different shards. - -## Single WAL - -Using a single WAL adds some complexity to compactions and deletions. Compactions will need to either sort all the WAL entries in a segment by shard first and then run compactions on each shard or the compactor needs to be able to compact multiple shards concurrently while ensuring points in existing TSM files in different shards remain separate. - -Deletions would not be able to reclaim WAL segments immediately as in the case where there is a WAL per shard. Similarly, a compaction of a WAL segment that contains writes for a deleted shard would need to be dropped. - -Currently, we are moving towards a Single WAL implementation. - -# Cache - -The purpose of the cache is so that data in the WAL is queryable. Every time a point is written to a WAL segment, it is also written to an in-memory cache. The cache is split into two parts: a "hot" part, representing the most recent writes and a "cold" part containing snapshots for which an active WAL compaction -process is underway. - -Queries are satisfied with values read from the cache and finalized TSM files. Points in the cache always take precedence over points in TSM files with the same timestamp. Queries are never read directly from WAL segment files which are designed to optimize write rather than read performance. - -The cache tracks its size on a "point-calculated" basis. "point-calculated" means that the RAM storage footprint for a point is the determined by calling its `Size()` method. While this does not correspond directly to the actual RAM footprint in the cache, the two values are sufficiently well correlated for the purpose of controlling RAM usage. - -If the cache becomes too full, or the cache has been idle for too long, a snapshot of the cache is taken and a compaction process is initiated for the related WAL segments. When the compaction of these segments is complete, the related snapshots are released from the cache. - -In cases where IO performance of the compaction process falls behind the incoming write rate, it is possible that writes might arrive at the cache while the cache is both too full and the compaction of the previous snapshot is still in progress. In this case, the cache will reject the write, causing the write to fail. -Well behaved clients should interpret write failures as back pressure and should either discard the write or back off and retry the write after a delay. - -# TSM File Index - -Each TSM file contains a full index of the blocks contained within the file. The existing index structure is designed to allow for a binary search across the index to find the starting block for a key. We would then seek to that start key and sequentially scan each block to find the location of a timestamp. - -Some issues with the existing structure is that seeking to a given timestamp for a key has a unknown cost. This can cause variability in read performance that would very difficult to fix. Another issue is that startup times for loading a TSM file would grow in proportion to number and size of TSM files on disk since we would need to scan the entire file to find all keys contained in the file. This could be addressed by using a separate index like file or changing the index structure. - -We've chosen to update the block index structure to ensure a TSM file is fully self-contained, supports consistent IO characteristics for sequential and random accesses as well as provides an efficient load time regardless of file size. The implications of these changes are that the index is slightly larger and we need to be able to search the index despite each entry being variably sized. - -The following are some alternative design options to handle the cases where the index is too large to fit in memory. We are currently planning to use an indirect MMAP indexing approach for loaded TSM files. - -### Indirect MMAP Indexing - -One option is to MMAP the index into memory and record the pointers to the start of each index entry in a slice. When searching for a given key, the pointers are used to perform a binary search on the underlying mmap data. When the matching key is found, the block entries can be loaded and search or a subsequent binary search on the blocks can be performed. - -A variation of this can also be done without MMAPs by seeking and reading in the file. The underlying file cache will still be utilized in this approach as well. - -As an example, if we have an index structure in memory such as: - - ``` -┌────────────────────────────────────────────────────────────────────┐ -│ Index │ -├─┬──────────────────────┬──┬───────────────────────┬───┬────────────┘ -│0│ │62│ │145│ -├─┴───────┬─────────┬────┼──┴──────┬─────────┬──────┼───┴─────┬──────┐ -│Key 1 Len│ Key │... │Key 2 Len│ Key 2 │ ... │ Key 3 │ ... │ -│ 2 bytes │ N bytes │ │ 2 bytes │ N bytes │ │ 2 bytes │ │ -└─────────┴─────────┴────┴─────────┴─────────┴──────┴─────────┴──────┘ -``` - -We would build an `offsets` slices where each element pointers to the byte location for the first key in then index slice. - -``` -┌────────────────────────────────────────────────────────────────────┐ -│ Offsets │ -├────┬────┬────┬─────────────────────────────────────────────────────┘ -│ 0 │ 62 │145 │ -└────┴────┴────┘ - ``` - - -Using this offset slice we can find `Key 2` by doing a binary search over the offsets slice. Instead of comparing the value in the offsets (e.g. `62`), we use that as an index into the underlying index to retrieve the key at position `62` and perform our comparisons with that. - -When we have identified the correct position in the index for a given key, we could perform another binary search or a linear scan. This should be fast as well since each index entry is 28 bytes and all contiguous in memory. - -The size of the offsets slice would be proportional to the number of unique series. If we we limit file sizes to 4GB, we would use 4 bytes for each pointer. - -### LRU/Lazy Load - -A second option could be to have the index work as a memory bounded, lazy-load style cache. When a cache miss occurs, the index structure is scanned to find the key and the entries are load and added to the cache which causes the least-recently used entries to be evicted. - -### Key Compression - -Another option is compress keys using a key specific dictionary encoding. For example, - -``` -cpu,host=server1 value=1 -cpu,host=server2 value=2 -memory,host=server1 value=3 -``` - -Could be compressed by expanding the key into its respective parts: measurement, tag keys, tag values and tag fields . For each part a unique number is assigned. e.g. - -Measurements -``` -cpu = 1 -memory = 2 -``` - -Tag Keys -``` -host = 1 -``` - -Tag Values -``` -server1 = 1 -server2 = 2 -``` - -Fields -``` -value = 1 -``` - -Using this encoding dictionary, the string keys could be converted to a sequence of integers: - -``` -cpu,host=server1 value=1 --> 1,1,1,1 -cpu,host=server2 value=2 --> 1,1,2,1 -memory,host=server1 value=3 --> 3,1,2,1 -``` - -These sequences of small integers list can then be compressed further using a bit packed format such as Simple9 or Simple8b. The resulting byte slices would be a multiple of 4 or 8 bytes (using Simple9/Simple8b respectively) which could used as the (string). - -### Separate Index - -Another option might be to have a separate index file (BoltDB) that serves as the storage for the `FileIndex` and is transient. This index would be recreated at startup and updated at compaction time. - -# Components - -These are some of the high-level components and their responsibilities. These are ideas preliminary. - -## WAL - -* Append-only log composed of fixed size segment files. -* Writes are appended to the current segment -* Roll-over to new segment after filling the current segment -* Closed segments are never modified and used for startup and recovery as well as compactions. -* There is a single WAL for the store as opposed to a WAL per shard. - -## Compactor - -* Continuously running, iterative file storage optimizer -* Takes closed WAL files, existing TSM files and combines into one or more new TSM files - -## Cache - -* Hold recently written series data -* Has max size and a flushing limit -* When the flushing limit is crossed, a snapshot is taken and a compaction process for the related WAL segments is commenced. -* If a write comes in, the cache is too full, and the previous snapshot is still being compacted, the write will fail. - -# Engine - -* Maintains references to Cache, FileStore, WAL, etc.. -* Creates a cursor -* Receives writes, coordinates queries -* Hides underlying files and types from clients - -## Cursor - -* Iterates forward or reverse for given key -* Requests values from Engine for key and timestamp -* Has no knowledge of TSM files or WAL - delegates to Engine to request next set of Values - -## FileStore - -* Manages TSM files -* Maintains the file indexes and references to active files -* A TSM file that is opened entails reading in and adding the index section to the `FileIndex`. The block data is then MMAPed up to the index offset to avoid having the index in memory twice. - -## FileIndex -* Provides location information to a file and block for a given key and timestamp. - -## Interfaces - -``` -SeriesIterator returns the key and []Value such that a key is only returned -once and subsequent calls to Next() do not return the same key twice. -type SeriesIterator interface { - func Next() (key, []Value, error) -} -``` - -## Types - -_NOTE: the actual func names are to illustrate the type of functionality the type is responsible._ - -``` -TSMWriter writes a sets of key and Values to a TSM file. -type TSMWriter struct {} -func (t *TSMWriter) Write(key string, values []Value) error {} -func (t *TSMWriter) Close() error -``` - - -``` -// WALIterator returns the key and []Values for a set of WAL segment files. -type WALIterator struct{ - Files *os.File -} -func (r *WALReader) Next() (key, []Value, error) -``` - - -``` -TSMIterator returns the key and values from a TSM file. -type TSMIterator struct {} -func (r *TSMIterator) Next() (key, []Value, error) -``` - -``` -type Compactor struct {} -func (c *Compactor) Compact(iters ...SeriesIterators) error -``` - -``` -type Engine struct { - wal *WAL - cache *Cache - fileStore *FileStore - compactor *Compactor -} - -func (e *Engine) ValuesBefore(key string, timestamp time.Time) ([]Value, error) -func (e *Engine) ValuesAfter(key string, timestamp time.Time) ([]Value, error) -``` - -``` -type Cursor struct{ - engine *Engine -} -... -``` - -``` -// FileStore maintains references -type FileStore struct {} -func (f *FileStore) ValuesBefore(key string, timestamp time.Time) ([]Value, error) -func (f *FileStore) ValuesAfter(key string, timestamp time.Time) ([]Value, error) - -``` - -``` -type FileIndex struct {} - -// Returns a file and offset for a block located in the return file that contains the requested key and timestamp. -func (f *FileIndex) Location(key, timestamp) (*os.File, uint64, error) -``` - -``` -type Cache struct {} -func (c *Cache) Write(key string, values []Value, checkpoint uint64) error -func (c *Cache) SetCheckpoint(checkpoint uint64) error -func (c *Cache) Cursor(key string) tsdb.Cursor -``` - -``` -type WAL struct {} -func (w *WAL) Write(key string, values []Value) -func (w *WAL) ClosedSegments() ([]*os.File, error) -``` - - -# Concerns - -## Performance - -There are three categories of performance this design is concerned with: - -* Write Throughput/Latency -* Query Throughput/Latency -* Startup time -* Compaction Throughput/Latency -* Memory Usage - -### Writes - -Write throughput is bounded by the time to process the write on the CPU (parsing, sorting, etc..), adding and evicting to the Cache and appending the write to the WAL. The first two items are CPU bound and can be tuned and optimized if they become a bottleneck. The WAL write can be tuned such that in the worst case every write requires at least 2 IOPS (write + fsync) or batched so that multiple writes are queued and fsync'd in sizes matching one or more disk blocks. Performing more work with each IO will improve throughput - -Write latency is minimal for the WAL write since there are no seeks. The latency is bounded by the time to complete any write and fsync calls. - -### Queries - -Query throughput is directly related to how many blocks can be read in a period of time. The index structure contains enough information to determine if one or multiple blocks can be read in a single IO. - -Query latency is determine by how long it takes to find and read the relevant blocks. The in-memory index structure contains the offsets and sizes of all blocks for a key. This allows every block to be read in 2 IOPS (seek + read) regardless of position, structure or size of file. - -### Startup - -Startup time is proportional to the number of WAL files, TSM files and tombstone files. WAL files can be read and process in large batches using the WALIterators. TSM files require reading the index block into memory (5 IOPS/file). Tombstone files are expected to be small and infrequent and would require approximately 2 IOPS/file. - -### Compactions - -Compactions are IO intensive in that they may need to read multiple, large TSM files to rewrite them. The throughput of a compactions (MB/s) as well as the latency for each compaction is important to keep consistent even as data sizes grow. - -To address these concerns, compactions prioritize old WAL files over optimizing storage/compression to avoid data being hidden during overload situations. This also accounts for the fact that shards will eventually become cold for writes so that existing data will be able to be optimized. To maintain consistent performance, the number of each type of file processed as well as the size of each file processed is bounded. - -### Memory Footprint - -The memory footprint should not grow unbounded due to additional files or series keys of large sizes or numbers. Some options for addressing this concern is covered in the [Design Options] section. - -## Concurrency - -The main concern with concurrency is that reads and writes should not block each other. Writes add entries to the Cache and append entries to the WAL. During queries, the contention points will be the Cache and existing TSM files. Since the Cache and TSM file data is only accessed through the engine by the cursors, several strategies can be used to improve concurrency. - -1. cached series data is returned to cursors as a copy. Since cache snapshots are released following compaction, cursor iteration and writes to the same series could block each other. Iterating over copies of the values can relieve some of this contention. -2. TSM data values returned by the engine are new references to Values and not access to the actual TSM files. This means that the `Engine`, through the `FileStore` can limit contention. -3. Compactions are the only place where new TSM files are added and removed. Since this is a serial, continuously running process, file contention is minimized. - -## Robustness - -The two robustness concerns considered by this design are writes filling the cache and crash recovery. - -### Cache Exhaustion - -The cache is used to hold the contents of uncompacted WAL segments in memory until such time that the compaction process has had a chance to convert the write-optimised WAL segments into read-optimised TSM files. - -The question arises about what to do in the case that the inbound write rate temporarily exceeds the compaction rate. There are four alternatives: - -* block the write until the compaction process catches up -* cache the write and hope that the compaction process catches up before memory exhaustion occurs -* evict older cache entries to make room for new writes -* fail the write and propagate the error back to the database client as a form of back pressure - -The current design chooses the last option - failing the writes. While this option reduces the apparent robustness of the database API from the perspective of the clients, it does provide a means by which the database can communicate, via back pressure, the need for clients to temporarily backoff. Well behaved clients should respond to write errors either by discarding the write or by retrying the write after a delay in the hope that the compaction process will eventually catch up. The problem with the first two options is that they may exhaust server resources. The problem with the third option is that queries (which don't touch WAL segments) might silently return incomplete results during compaction periods; with the selected option the possibility of incomplete queries is at least flagged by the presence of write errors during periods of degraded compaction performance. - -### Crash Recovery - -Crash recovery is facilitated with the following two properties: the append-only nature of WAL segments and the write-once nature of TSM files. If the server crashes incomplete compactions are discarded and the cache is rebuilt from the discovered WAL segments. Compactions will then resume in the normal way. Similarly, TSM files are immutable once they have been created and registered with the file store. A compaction may replace an existing TSM file, but the replaced file is not removed from the file system until replacement file has been created and synced to disk. - -#Errata - -This section is reserved for errata. In cases where the document is incorrect or inconsistent, such errata will be noted here with the contents of this section taking precedence over text elsewhere in the document in the case of discrepancies. Future full revisions of this document will fold the errata text back into the body of the document. - -#Revisions - -##14 February, 2016 - -* refined description of cache behaviour and robustness to reflect current design based on snapshots. Most references to checkpoints and evictions have been removed. See discussion here - https://goo.gl/L7AzVu - -##11 November, 2015 - -* initial design published \ No newline at end of file diff --git a/tsdb/engine/tsm1/array_cursor.gen.go b/tsdb/engine/tsm1/array_cursor.gen.go deleted file mode 100644 index 7b786d31626..00000000000 --- a/tsdb/engine/tsm1/array_cursor.gen.go +++ /dev/null @@ -1,1425 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: array_cursor.gen.go.tmpl - -package tsm1 - -import ( - "sort" - - "github.com/influxdata/influxdb/v2/tsdb" -) - -// Array Cursors - -type floatArrayAscendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - buf *tsdb.FloatArray - values *tsdb.FloatArray - pos int - keyCursor *KeyCursor - } - - end int64 - res *tsdb.FloatArray -} - -func newFloatArrayAscendingCursor() *floatArrayAscendingCursor { - c := &floatArrayAscendingCursor{ - res: tsdb.NewFloatArrayLen(tsdb.DefaultMaxPointsPerBlock), - } - c.tsm.buf = tsdb.NewFloatArrayLen(tsdb.DefaultMaxPointsPerBlock) - return c -} - -func (c *floatArrayAscendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { - c.end = end - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() >= seek - }) - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadFloatArrayBlock(c.tsm.buf) - c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { - return c.tsm.values.Timestamps[i] >= seek - }) -} - -func (c *floatArrayAscendingCursor) Err() error { return nil } - -func (c *floatArrayAscendingCursor) Stats() tsdb.CursorStats { - return tsdb.CursorStats{} -} - -// close closes the cursor and any dependent cursors. -func (c *floatArrayAscendingCursor) Close() { - if c.tsm.keyCursor != nil { - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - } - c.cache.values = nil - c.tsm.values = nil -} - -// Next returns the next key/value for the cursor. -func (c *floatArrayAscendingCursor) Next() *tsdb.FloatArray { - pos := 0 - cvals := c.cache.values - tvals := c.tsm.values - - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - for pos < len(c.res.Timestamps) && c.tsm.pos < len(tvals.Timestamps) && c.cache.pos < len(cvals) { - ckey := cvals[c.cache.pos].UnixNano() - tkey := tvals.Timestamps[c.tsm.pos] - if ckey == tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value - c.cache.pos++ - c.tsm.pos++ - } else if ckey < tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value - c.cache.pos++ - } else { - c.res.Timestamps[pos] = tkey - c.res.Values[pos] = tvals.Values[c.tsm.pos] - c.tsm.pos++ - } - - pos++ - - if c.tsm.pos >= len(tvals.Timestamps) { - tvals = c.nextTSM() - } - } - - if pos < len(c.res.Timestamps) { - if c.tsm.pos < len(tvals.Timestamps) { - if pos == 0 && len(c.res.Timestamps) >= len(tvals.Timestamps) { - // optimization: all points can be served from TSM data because - // we need the entire block and the block completely fits within - // the buffer. - copy(c.res.Timestamps, tvals.Timestamps) - pos += copy(c.res.Values, tvals.Values) - c.nextTSM() - } else { - // copy as much as we can - n := copy(c.res.Timestamps[pos:], tvals.Timestamps[c.tsm.pos:]) - copy(c.res.Values[pos:], tvals.Values[c.tsm.pos:]) - pos += n - c.tsm.pos += n - if c.tsm.pos >= len(tvals.Timestamps) { - c.nextTSM() - } - } - } - - if c.cache.pos < len(cvals) { - // TSM was exhausted - for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) { - c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value - pos++ - c.cache.pos++ - } - } - } - - // Strip timestamps from after the end time. - if pos > 0 && c.res.Timestamps[pos-1] > c.end { - pos -= 2 - for pos >= 0 && c.res.Timestamps[pos] > c.end { - pos-- - } - pos++ - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -func (c *floatArrayAscendingCursor) nextTSM() *tsdb.FloatArray { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadFloatArrayBlock(c.tsm.buf) - c.tsm.pos = 0 - return c.tsm.values -} - -type floatArrayDescendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - buf *tsdb.FloatArray - values *tsdb.FloatArray - pos int - keyCursor *KeyCursor - } - - end int64 - res *tsdb.FloatArray -} - -func newFloatArrayDescendingCursor() *floatArrayDescendingCursor { - c := &floatArrayDescendingCursor{ - res: tsdb.NewFloatArrayLen(tsdb.DefaultMaxPointsPerBlock), - } - c.tsm.buf = tsdb.NewFloatArrayLen(tsdb.DefaultMaxPointsPerBlock) - return c -} - -func (c *floatArrayDescendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { - // Search for the time value greater than the seek time (not included) - // and then move our position back one which will include the values in - // our time range. - c.end = end - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() > seek - }) - c.cache.pos-- - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadFloatArrayBlock(c.tsm.buf) - c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { - return c.tsm.values.Timestamps[i] > seek - }) - c.tsm.pos-- -} - -func (c *floatArrayDescendingCursor) Err() error { return nil } - -func (c *floatArrayDescendingCursor) Stats() tsdb.CursorStats { - return tsdb.CursorStats{} -} - -func (c *floatArrayDescendingCursor) Close() { - if c.tsm.keyCursor != nil { - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - } - c.cache.values = nil - c.tsm.values = nil -} - -func (c *floatArrayDescendingCursor) Next() *tsdb.FloatArray { - pos := 0 - cvals := c.cache.values - tvals := c.tsm.values - - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 && c.cache.pos >= 0 { - ckey := cvals[c.cache.pos].UnixNano() - tkey := tvals.Timestamps[c.tsm.pos] - if ckey == tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value - c.cache.pos-- - c.tsm.pos-- - } else if ckey > tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value - c.cache.pos-- - } else { - c.res.Timestamps[pos] = tkey - c.res.Values[pos] = tvals.Values[c.tsm.pos] - c.tsm.pos-- - } - - pos++ - - if c.tsm.pos < 0 { - tvals = c.nextTSM() - } - } - - if pos < len(c.res.Timestamps) { - // cache was exhausted - if c.tsm.pos >= 0 { - for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 { - c.res.Timestamps[pos] = tvals.Timestamps[c.tsm.pos] - c.res.Values[pos] = tvals.Values[c.tsm.pos] - pos++ - c.tsm.pos-- - if c.tsm.pos < 0 { - tvals = c.nextTSM() - } - } - } - - if c.cache.pos >= 0 { - // TSM was exhausted - for pos < len(c.res.Timestamps) && c.cache.pos >= 0 { - c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(FloatValue).value - pos++ - c.cache.pos-- - } - } - } - - // Strip timestamps from before the end time. - if pos > 0 && c.res.Timestamps[pos-1] < c.end { - pos -= 2 - for pos >= 0 && c.res.Timestamps[pos] < c.end { - pos-- - } - pos++ - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -func (c *floatArrayDescendingCursor) nextTSM() *tsdb.FloatArray { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadFloatArrayBlock(c.tsm.buf) - c.tsm.pos = len(c.tsm.values.Timestamps) - 1 - return c.tsm.values -} - -type integerArrayAscendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - buf *tsdb.IntegerArray - values *tsdb.IntegerArray - pos int - keyCursor *KeyCursor - } - - end int64 - res *tsdb.IntegerArray -} - -func newIntegerArrayAscendingCursor() *integerArrayAscendingCursor { - c := &integerArrayAscendingCursor{ - res: tsdb.NewIntegerArrayLen(tsdb.DefaultMaxPointsPerBlock), - } - c.tsm.buf = tsdb.NewIntegerArrayLen(tsdb.DefaultMaxPointsPerBlock) - return c -} - -func (c *integerArrayAscendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { - c.end = end - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() >= seek - }) - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadIntegerArrayBlock(c.tsm.buf) - c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { - return c.tsm.values.Timestamps[i] >= seek - }) -} - -func (c *integerArrayAscendingCursor) Err() error { return nil } - -func (c *integerArrayAscendingCursor) Stats() tsdb.CursorStats { - return tsdb.CursorStats{} -} - -// close closes the cursor and any dependent cursors. -func (c *integerArrayAscendingCursor) Close() { - if c.tsm.keyCursor != nil { - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - } - c.cache.values = nil - c.tsm.values = nil -} - -// Next returns the next key/value for the cursor. -func (c *integerArrayAscendingCursor) Next() *tsdb.IntegerArray { - pos := 0 - cvals := c.cache.values - tvals := c.tsm.values - - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - for pos < len(c.res.Timestamps) && c.tsm.pos < len(tvals.Timestamps) && c.cache.pos < len(cvals) { - ckey := cvals[c.cache.pos].UnixNano() - tkey := tvals.Timestamps[c.tsm.pos] - if ckey == tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value - c.cache.pos++ - c.tsm.pos++ - } else if ckey < tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value - c.cache.pos++ - } else { - c.res.Timestamps[pos] = tkey - c.res.Values[pos] = tvals.Values[c.tsm.pos] - c.tsm.pos++ - } - - pos++ - - if c.tsm.pos >= len(tvals.Timestamps) { - tvals = c.nextTSM() - } - } - - if pos < len(c.res.Timestamps) { - if c.tsm.pos < len(tvals.Timestamps) { - if pos == 0 && len(c.res.Timestamps) >= len(tvals.Timestamps) { - // optimization: all points can be served from TSM data because - // we need the entire block and the block completely fits within - // the buffer. - copy(c.res.Timestamps, tvals.Timestamps) - pos += copy(c.res.Values, tvals.Values) - c.nextTSM() - } else { - // copy as much as we can - n := copy(c.res.Timestamps[pos:], tvals.Timestamps[c.tsm.pos:]) - copy(c.res.Values[pos:], tvals.Values[c.tsm.pos:]) - pos += n - c.tsm.pos += n - if c.tsm.pos >= len(tvals.Timestamps) { - c.nextTSM() - } - } - } - - if c.cache.pos < len(cvals) { - // TSM was exhausted - for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) { - c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value - pos++ - c.cache.pos++ - } - } - } - - // Strip timestamps from after the end time. - if pos > 0 && c.res.Timestamps[pos-1] > c.end { - pos -= 2 - for pos >= 0 && c.res.Timestamps[pos] > c.end { - pos-- - } - pos++ - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -func (c *integerArrayAscendingCursor) nextTSM() *tsdb.IntegerArray { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadIntegerArrayBlock(c.tsm.buf) - c.tsm.pos = 0 - return c.tsm.values -} - -type integerArrayDescendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - buf *tsdb.IntegerArray - values *tsdb.IntegerArray - pos int - keyCursor *KeyCursor - } - - end int64 - res *tsdb.IntegerArray -} - -func newIntegerArrayDescendingCursor() *integerArrayDescendingCursor { - c := &integerArrayDescendingCursor{ - res: tsdb.NewIntegerArrayLen(tsdb.DefaultMaxPointsPerBlock), - } - c.tsm.buf = tsdb.NewIntegerArrayLen(tsdb.DefaultMaxPointsPerBlock) - return c -} - -func (c *integerArrayDescendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { - // Search for the time value greater than the seek time (not included) - // and then move our position back one which will include the values in - // our time range. - c.end = end - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() > seek - }) - c.cache.pos-- - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadIntegerArrayBlock(c.tsm.buf) - c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { - return c.tsm.values.Timestamps[i] > seek - }) - c.tsm.pos-- -} - -func (c *integerArrayDescendingCursor) Err() error { return nil } - -func (c *integerArrayDescendingCursor) Stats() tsdb.CursorStats { - return tsdb.CursorStats{} -} - -func (c *integerArrayDescendingCursor) Close() { - if c.tsm.keyCursor != nil { - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - } - c.cache.values = nil - c.tsm.values = nil -} - -func (c *integerArrayDescendingCursor) Next() *tsdb.IntegerArray { - pos := 0 - cvals := c.cache.values - tvals := c.tsm.values - - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 && c.cache.pos >= 0 { - ckey := cvals[c.cache.pos].UnixNano() - tkey := tvals.Timestamps[c.tsm.pos] - if ckey == tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value - c.cache.pos-- - c.tsm.pos-- - } else if ckey > tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value - c.cache.pos-- - } else { - c.res.Timestamps[pos] = tkey - c.res.Values[pos] = tvals.Values[c.tsm.pos] - c.tsm.pos-- - } - - pos++ - - if c.tsm.pos < 0 { - tvals = c.nextTSM() - } - } - - if pos < len(c.res.Timestamps) { - // cache was exhausted - if c.tsm.pos >= 0 { - for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 { - c.res.Timestamps[pos] = tvals.Timestamps[c.tsm.pos] - c.res.Values[pos] = tvals.Values[c.tsm.pos] - pos++ - c.tsm.pos-- - if c.tsm.pos < 0 { - tvals = c.nextTSM() - } - } - } - - if c.cache.pos >= 0 { - // TSM was exhausted - for pos < len(c.res.Timestamps) && c.cache.pos >= 0 { - c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(IntegerValue).value - pos++ - c.cache.pos-- - } - } - } - - // Strip timestamps from before the end time. - if pos > 0 && c.res.Timestamps[pos-1] < c.end { - pos -= 2 - for pos >= 0 && c.res.Timestamps[pos] < c.end { - pos-- - } - pos++ - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -func (c *integerArrayDescendingCursor) nextTSM() *tsdb.IntegerArray { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadIntegerArrayBlock(c.tsm.buf) - c.tsm.pos = len(c.tsm.values.Timestamps) - 1 - return c.tsm.values -} - -type unsignedArrayAscendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - buf *tsdb.UnsignedArray - values *tsdb.UnsignedArray - pos int - keyCursor *KeyCursor - } - - end int64 - res *tsdb.UnsignedArray -} - -func newUnsignedArrayAscendingCursor() *unsignedArrayAscendingCursor { - c := &unsignedArrayAscendingCursor{ - res: tsdb.NewUnsignedArrayLen(tsdb.DefaultMaxPointsPerBlock), - } - c.tsm.buf = tsdb.NewUnsignedArrayLen(tsdb.DefaultMaxPointsPerBlock) - return c -} - -func (c *unsignedArrayAscendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { - c.end = end - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() >= seek - }) - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadUnsignedArrayBlock(c.tsm.buf) - c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { - return c.tsm.values.Timestamps[i] >= seek - }) -} - -func (c *unsignedArrayAscendingCursor) Err() error { return nil } - -func (c *unsignedArrayAscendingCursor) Stats() tsdb.CursorStats { - return tsdb.CursorStats{} -} - -// close closes the cursor and any dependent cursors. -func (c *unsignedArrayAscendingCursor) Close() { - if c.tsm.keyCursor != nil { - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - } - c.cache.values = nil - c.tsm.values = nil -} - -// Next returns the next key/value for the cursor. -func (c *unsignedArrayAscendingCursor) Next() *tsdb.UnsignedArray { - pos := 0 - cvals := c.cache.values - tvals := c.tsm.values - - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - for pos < len(c.res.Timestamps) && c.tsm.pos < len(tvals.Timestamps) && c.cache.pos < len(cvals) { - ckey := cvals[c.cache.pos].UnixNano() - tkey := tvals.Timestamps[c.tsm.pos] - if ckey == tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value - c.cache.pos++ - c.tsm.pos++ - } else if ckey < tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value - c.cache.pos++ - } else { - c.res.Timestamps[pos] = tkey - c.res.Values[pos] = tvals.Values[c.tsm.pos] - c.tsm.pos++ - } - - pos++ - - if c.tsm.pos >= len(tvals.Timestamps) { - tvals = c.nextTSM() - } - } - - if pos < len(c.res.Timestamps) { - if c.tsm.pos < len(tvals.Timestamps) { - if pos == 0 && len(c.res.Timestamps) >= len(tvals.Timestamps) { - // optimization: all points can be served from TSM data because - // we need the entire block and the block completely fits within - // the buffer. - copy(c.res.Timestamps, tvals.Timestamps) - pos += copy(c.res.Values, tvals.Values) - c.nextTSM() - } else { - // copy as much as we can - n := copy(c.res.Timestamps[pos:], tvals.Timestamps[c.tsm.pos:]) - copy(c.res.Values[pos:], tvals.Values[c.tsm.pos:]) - pos += n - c.tsm.pos += n - if c.tsm.pos >= len(tvals.Timestamps) { - c.nextTSM() - } - } - } - - if c.cache.pos < len(cvals) { - // TSM was exhausted - for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) { - c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value - pos++ - c.cache.pos++ - } - } - } - - // Strip timestamps from after the end time. - if pos > 0 && c.res.Timestamps[pos-1] > c.end { - pos -= 2 - for pos >= 0 && c.res.Timestamps[pos] > c.end { - pos-- - } - pos++ - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -func (c *unsignedArrayAscendingCursor) nextTSM() *tsdb.UnsignedArray { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadUnsignedArrayBlock(c.tsm.buf) - c.tsm.pos = 0 - return c.tsm.values -} - -type unsignedArrayDescendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - buf *tsdb.UnsignedArray - values *tsdb.UnsignedArray - pos int - keyCursor *KeyCursor - } - - end int64 - res *tsdb.UnsignedArray -} - -func newUnsignedArrayDescendingCursor() *unsignedArrayDescendingCursor { - c := &unsignedArrayDescendingCursor{ - res: tsdb.NewUnsignedArrayLen(tsdb.DefaultMaxPointsPerBlock), - } - c.tsm.buf = tsdb.NewUnsignedArrayLen(tsdb.DefaultMaxPointsPerBlock) - return c -} - -func (c *unsignedArrayDescendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { - // Search for the time value greater than the seek time (not included) - // and then move our position back one which will include the values in - // our time range. - c.end = end - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() > seek - }) - c.cache.pos-- - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadUnsignedArrayBlock(c.tsm.buf) - c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { - return c.tsm.values.Timestamps[i] > seek - }) - c.tsm.pos-- -} - -func (c *unsignedArrayDescendingCursor) Err() error { return nil } - -func (c *unsignedArrayDescendingCursor) Stats() tsdb.CursorStats { - return tsdb.CursorStats{} -} - -func (c *unsignedArrayDescendingCursor) Close() { - if c.tsm.keyCursor != nil { - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - } - c.cache.values = nil - c.tsm.values = nil -} - -func (c *unsignedArrayDescendingCursor) Next() *tsdb.UnsignedArray { - pos := 0 - cvals := c.cache.values - tvals := c.tsm.values - - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 && c.cache.pos >= 0 { - ckey := cvals[c.cache.pos].UnixNano() - tkey := tvals.Timestamps[c.tsm.pos] - if ckey == tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value - c.cache.pos-- - c.tsm.pos-- - } else if ckey > tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value - c.cache.pos-- - } else { - c.res.Timestamps[pos] = tkey - c.res.Values[pos] = tvals.Values[c.tsm.pos] - c.tsm.pos-- - } - - pos++ - - if c.tsm.pos < 0 { - tvals = c.nextTSM() - } - } - - if pos < len(c.res.Timestamps) { - // cache was exhausted - if c.tsm.pos >= 0 { - for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 { - c.res.Timestamps[pos] = tvals.Timestamps[c.tsm.pos] - c.res.Values[pos] = tvals.Values[c.tsm.pos] - pos++ - c.tsm.pos-- - if c.tsm.pos < 0 { - tvals = c.nextTSM() - } - } - } - - if c.cache.pos >= 0 { - // TSM was exhausted - for pos < len(c.res.Timestamps) && c.cache.pos >= 0 { - c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(UnsignedValue).value - pos++ - c.cache.pos-- - } - } - } - - // Strip timestamps from before the end time. - if pos > 0 && c.res.Timestamps[pos-1] < c.end { - pos -= 2 - for pos >= 0 && c.res.Timestamps[pos] < c.end { - pos-- - } - pos++ - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -func (c *unsignedArrayDescendingCursor) nextTSM() *tsdb.UnsignedArray { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadUnsignedArrayBlock(c.tsm.buf) - c.tsm.pos = len(c.tsm.values.Timestamps) - 1 - return c.tsm.values -} - -type stringArrayAscendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - buf *tsdb.StringArray - values *tsdb.StringArray - pos int - keyCursor *KeyCursor - } - - end int64 - res *tsdb.StringArray -} - -func newStringArrayAscendingCursor() *stringArrayAscendingCursor { - c := &stringArrayAscendingCursor{ - res: tsdb.NewStringArrayLen(tsdb.DefaultMaxPointsPerBlock), - } - c.tsm.buf = tsdb.NewStringArrayLen(tsdb.DefaultMaxPointsPerBlock) - return c -} - -func (c *stringArrayAscendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { - c.end = end - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() >= seek - }) - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadStringArrayBlock(c.tsm.buf) - c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { - return c.tsm.values.Timestamps[i] >= seek - }) -} - -func (c *stringArrayAscendingCursor) Err() error { return nil } - -func (c *stringArrayAscendingCursor) Stats() tsdb.CursorStats { - return tsdb.CursorStats{} -} - -// close closes the cursor and any dependent cursors. -func (c *stringArrayAscendingCursor) Close() { - if c.tsm.keyCursor != nil { - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - } - c.cache.values = nil - c.tsm.values = nil -} - -// Next returns the next key/value for the cursor. -func (c *stringArrayAscendingCursor) Next() *tsdb.StringArray { - pos := 0 - cvals := c.cache.values - tvals := c.tsm.values - - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - for pos < len(c.res.Timestamps) && c.tsm.pos < len(tvals.Timestamps) && c.cache.pos < len(cvals) { - ckey := cvals[c.cache.pos].UnixNano() - tkey := tvals.Timestamps[c.tsm.pos] - if ckey == tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value - c.cache.pos++ - c.tsm.pos++ - } else if ckey < tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value - c.cache.pos++ - } else { - c.res.Timestamps[pos] = tkey - c.res.Values[pos] = tvals.Values[c.tsm.pos] - c.tsm.pos++ - } - - pos++ - - if c.tsm.pos >= len(tvals.Timestamps) { - tvals = c.nextTSM() - } - } - - if pos < len(c.res.Timestamps) { - if c.tsm.pos < len(tvals.Timestamps) { - if pos == 0 && len(c.res.Timestamps) >= len(tvals.Timestamps) { - // optimization: all points can be served from TSM data because - // we need the entire block and the block completely fits within - // the buffer. - copy(c.res.Timestamps, tvals.Timestamps) - pos += copy(c.res.Values, tvals.Values) - c.nextTSM() - } else { - // copy as much as we can - n := copy(c.res.Timestamps[pos:], tvals.Timestamps[c.tsm.pos:]) - copy(c.res.Values[pos:], tvals.Values[c.tsm.pos:]) - pos += n - c.tsm.pos += n - if c.tsm.pos >= len(tvals.Timestamps) { - c.nextTSM() - } - } - } - - if c.cache.pos < len(cvals) { - // TSM was exhausted - for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) { - c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value - pos++ - c.cache.pos++ - } - } - } - - // Strip timestamps from after the end time. - if pos > 0 && c.res.Timestamps[pos-1] > c.end { - pos -= 2 - for pos >= 0 && c.res.Timestamps[pos] > c.end { - pos-- - } - pos++ - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -func (c *stringArrayAscendingCursor) nextTSM() *tsdb.StringArray { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadStringArrayBlock(c.tsm.buf) - c.tsm.pos = 0 - return c.tsm.values -} - -type stringArrayDescendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - buf *tsdb.StringArray - values *tsdb.StringArray - pos int - keyCursor *KeyCursor - } - - end int64 - res *tsdb.StringArray -} - -func newStringArrayDescendingCursor() *stringArrayDescendingCursor { - c := &stringArrayDescendingCursor{ - res: tsdb.NewStringArrayLen(tsdb.DefaultMaxPointsPerBlock), - } - c.tsm.buf = tsdb.NewStringArrayLen(tsdb.DefaultMaxPointsPerBlock) - return c -} - -func (c *stringArrayDescendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { - // Search for the time value greater than the seek time (not included) - // and then move our position back one which will include the values in - // our time range. - c.end = end - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() > seek - }) - c.cache.pos-- - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadStringArrayBlock(c.tsm.buf) - c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { - return c.tsm.values.Timestamps[i] > seek - }) - c.tsm.pos-- -} - -func (c *stringArrayDescendingCursor) Err() error { return nil } - -func (c *stringArrayDescendingCursor) Stats() tsdb.CursorStats { - return tsdb.CursorStats{} -} - -func (c *stringArrayDescendingCursor) Close() { - if c.tsm.keyCursor != nil { - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - } - c.cache.values = nil - c.tsm.values = nil -} - -func (c *stringArrayDescendingCursor) Next() *tsdb.StringArray { - pos := 0 - cvals := c.cache.values - tvals := c.tsm.values - - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 && c.cache.pos >= 0 { - ckey := cvals[c.cache.pos].UnixNano() - tkey := tvals.Timestamps[c.tsm.pos] - if ckey == tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value - c.cache.pos-- - c.tsm.pos-- - } else if ckey > tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value - c.cache.pos-- - } else { - c.res.Timestamps[pos] = tkey - c.res.Values[pos] = tvals.Values[c.tsm.pos] - c.tsm.pos-- - } - - pos++ - - if c.tsm.pos < 0 { - tvals = c.nextTSM() - } - } - - if pos < len(c.res.Timestamps) { - // cache was exhausted - if c.tsm.pos >= 0 { - for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 { - c.res.Timestamps[pos] = tvals.Timestamps[c.tsm.pos] - c.res.Values[pos] = tvals.Values[c.tsm.pos] - pos++ - c.tsm.pos-- - if c.tsm.pos < 0 { - tvals = c.nextTSM() - } - } - } - - if c.cache.pos >= 0 { - // TSM was exhausted - for pos < len(c.res.Timestamps) && c.cache.pos >= 0 { - c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(StringValue).value - pos++ - c.cache.pos-- - } - } - } - - // Strip timestamps from before the end time. - if pos > 0 && c.res.Timestamps[pos-1] < c.end { - pos -= 2 - for pos >= 0 && c.res.Timestamps[pos] < c.end { - pos-- - } - pos++ - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -func (c *stringArrayDescendingCursor) nextTSM() *tsdb.StringArray { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadStringArrayBlock(c.tsm.buf) - c.tsm.pos = len(c.tsm.values.Timestamps) - 1 - return c.tsm.values -} - -type booleanArrayAscendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - buf *tsdb.BooleanArray - values *tsdb.BooleanArray - pos int - keyCursor *KeyCursor - } - - end int64 - res *tsdb.BooleanArray -} - -func newBooleanArrayAscendingCursor() *booleanArrayAscendingCursor { - c := &booleanArrayAscendingCursor{ - res: tsdb.NewBooleanArrayLen(tsdb.DefaultMaxPointsPerBlock), - } - c.tsm.buf = tsdb.NewBooleanArrayLen(tsdb.DefaultMaxPointsPerBlock) - return c -} - -func (c *booleanArrayAscendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { - c.end = end - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() >= seek - }) - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadBooleanArrayBlock(c.tsm.buf) - c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { - return c.tsm.values.Timestamps[i] >= seek - }) -} - -func (c *booleanArrayAscendingCursor) Err() error { return nil } - -func (c *booleanArrayAscendingCursor) Stats() tsdb.CursorStats { - return tsdb.CursorStats{} -} - -// close closes the cursor and any dependent cursors. -func (c *booleanArrayAscendingCursor) Close() { - if c.tsm.keyCursor != nil { - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - } - c.cache.values = nil - c.tsm.values = nil -} - -// Next returns the next key/value for the cursor. -func (c *booleanArrayAscendingCursor) Next() *tsdb.BooleanArray { - pos := 0 - cvals := c.cache.values - tvals := c.tsm.values - - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - for pos < len(c.res.Timestamps) && c.tsm.pos < len(tvals.Timestamps) && c.cache.pos < len(cvals) { - ckey := cvals[c.cache.pos].UnixNano() - tkey := tvals.Timestamps[c.tsm.pos] - if ckey == tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value - c.cache.pos++ - c.tsm.pos++ - } else if ckey < tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value - c.cache.pos++ - } else { - c.res.Timestamps[pos] = tkey - c.res.Values[pos] = tvals.Values[c.tsm.pos] - c.tsm.pos++ - } - - pos++ - - if c.tsm.pos >= len(tvals.Timestamps) { - tvals = c.nextTSM() - } - } - - if pos < len(c.res.Timestamps) { - if c.tsm.pos < len(tvals.Timestamps) { - if pos == 0 && len(c.res.Timestamps) >= len(tvals.Timestamps) { - // optimization: all points can be served from TSM data because - // we need the entire block and the block completely fits within - // the buffer. - copy(c.res.Timestamps, tvals.Timestamps) - pos += copy(c.res.Values, tvals.Values) - c.nextTSM() - } else { - // copy as much as we can - n := copy(c.res.Timestamps[pos:], tvals.Timestamps[c.tsm.pos:]) - copy(c.res.Values[pos:], tvals.Values[c.tsm.pos:]) - pos += n - c.tsm.pos += n - if c.tsm.pos >= len(tvals.Timestamps) { - c.nextTSM() - } - } - } - - if c.cache.pos < len(cvals) { - // TSM was exhausted - for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) { - c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value - pos++ - c.cache.pos++ - } - } - } - - // Strip timestamps from after the end time. - if pos > 0 && c.res.Timestamps[pos-1] > c.end { - pos -= 2 - for pos >= 0 && c.res.Timestamps[pos] > c.end { - pos-- - } - pos++ - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -func (c *booleanArrayAscendingCursor) nextTSM() *tsdb.BooleanArray { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadBooleanArrayBlock(c.tsm.buf) - c.tsm.pos = 0 - return c.tsm.values -} - -type booleanArrayDescendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - buf *tsdb.BooleanArray - values *tsdb.BooleanArray - pos int - keyCursor *KeyCursor - } - - end int64 - res *tsdb.BooleanArray -} - -func newBooleanArrayDescendingCursor() *booleanArrayDescendingCursor { - c := &booleanArrayDescendingCursor{ - res: tsdb.NewBooleanArrayLen(tsdb.DefaultMaxPointsPerBlock), - } - c.tsm.buf = tsdb.NewBooleanArrayLen(tsdb.DefaultMaxPointsPerBlock) - return c -} - -func (c *booleanArrayDescendingCursor) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { - // Search for the time value greater than the seek time (not included) - // and then move our position back one which will include the values in - // our time range. - c.end = end - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() > seek - }) - c.cache.pos-- - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadBooleanArrayBlock(c.tsm.buf) - c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { - return c.tsm.values.Timestamps[i] > seek - }) - c.tsm.pos-- -} - -func (c *booleanArrayDescendingCursor) Err() error { return nil } - -func (c *booleanArrayDescendingCursor) Stats() tsdb.CursorStats { - return tsdb.CursorStats{} -} - -func (c *booleanArrayDescendingCursor) Close() { - if c.tsm.keyCursor != nil { - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - } - c.cache.values = nil - c.tsm.values = nil -} - -func (c *booleanArrayDescendingCursor) Next() *tsdb.BooleanArray { - pos := 0 - cvals := c.cache.values - tvals := c.tsm.values - - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 && c.cache.pos >= 0 { - ckey := cvals[c.cache.pos].UnixNano() - tkey := tvals.Timestamps[c.tsm.pos] - if ckey == tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value - c.cache.pos-- - c.tsm.pos-- - } else if ckey > tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value - c.cache.pos-- - } else { - c.res.Timestamps[pos] = tkey - c.res.Values[pos] = tvals.Values[c.tsm.pos] - c.tsm.pos-- - } - - pos++ - - if c.tsm.pos < 0 { - tvals = c.nextTSM() - } - } - - if pos < len(c.res.Timestamps) { - // cache was exhausted - if c.tsm.pos >= 0 { - for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 { - c.res.Timestamps[pos] = tvals.Timestamps[c.tsm.pos] - c.res.Values[pos] = tvals.Values[c.tsm.pos] - pos++ - c.tsm.pos-- - if c.tsm.pos < 0 { - tvals = c.nextTSM() - } - } - } - - if c.cache.pos >= 0 { - // TSM was exhausted - for pos < len(c.res.Timestamps) && c.cache.pos >= 0 { - c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].(BooleanValue).value - pos++ - c.cache.pos-- - } - } - } - - // Strip timestamps from before the end time. - if pos > 0 && c.res.Timestamps[pos-1] < c.end { - pos -= 2 - for pos >= 0 && c.res.Timestamps[pos] < c.end { - pos-- - } - pos++ - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -func (c *booleanArrayDescendingCursor) nextTSM() *tsdb.BooleanArray { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadBooleanArrayBlock(c.tsm.buf) - c.tsm.pos = len(c.tsm.values.Timestamps) - 1 - return c.tsm.values -} diff --git a/tsdb/engine/tsm1/array_cursor.gen.go.tmpl b/tsdb/engine/tsm1/array_cursor.gen.go.tmpl deleted file mode 100644 index 02a209eeea1..00000000000 --- a/tsdb/engine/tsm1/array_cursor.gen.go.tmpl +++ /dev/null @@ -1,301 +0,0 @@ -package tsm1 - -import ( - "sort" - - "github.com/influxdata/influxdb/v2/tsdb" -) - -// Array Cursors - -{{range .}} -{{$arrayType := print "*tsdb." .Name "Array"}} -{{$type := print .name "ArrayAscendingCursor"}} -{{$Type := print .Name "ArrayAscendingCursor"}} - -type {{$type}} struct { - cache struct { - values Values - pos int - } - - tsm struct { - buf {{$arrayType}} - values {{$arrayType}} - pos int - keyCursor *KeyCursor - } - - end int64 - res {{$arrayType}} -} - -func new{{$Type}}() *{{$type}} { - c := &{{$type}}{ - res: tsdb.New{{.Name}}ArrayLen(tsdb.DefaultMaxPointsPerBlock), - } - c.tsm.buf = tsdb.New{{.Name}}ArrayLen(tsdb.DefaultMaxPointsPerBlock) - return c -} - -func (c *{{$type}}) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { -c.end = end - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() >= seek - }) - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.Read{{.Name}}ArrayBlock(c.tsm.buf) - c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { - return c.tsm.values.Timestamps[i] >= seek - }) -} - -func (c *{{$type}}) Err() error { return nil } - -func (c *{{$type}}) Stats() tsdb.CursorStats { - return tsdb.CursorStats{} -} - -// close closes the cursor and any dependent cursors. -func (c *{{$type}}) Close() { - if c.tsm.keyCursor != nil { - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - } - c.cache.values = nil - c.tsm.values = nil -} - -// Next returns the next key/value for the cursor. -func (c *{{$type}}) Next() {{$arrayType}} { - pos := 0 - cvals := c.cache.values - tvals := c.tsm.values - - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - for pos < len(c.res.Timestamps) && c.tsm.pos < len(tvals.Timestamps) && c.cache.pos < len(cvals) { - ckey := cvals[c.cache.pos].UnixNano() - tkey := tvals.Timestamps[c.tsm.pos] - if ckey == tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value - c.cache.pos++ - c.tsm.pos++ - } else if ckey < tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value - c.cache.pos++ - } else { - c.res.Timestamps[pos] = tkey - c.res.Values[pos] = tvals.Values[c.tsm.pos] - c.tsm.pos++ - } - - pos++ - - if c.tsm.pos >= len(tvals.Timestamps) { - tvals = c.nextTSM() - } - } - - if pos < len(c.res.Timestamps) { - if c.tsm.pos < len(tvals.Timestamps) { - if pos == 0 && len(c.res.Timestamps) >= len(tvals.Timestamps){ - // optimization: all points can be served from TSM data because - // we need the entire block and the block completely fits within - // the buffer. - copy(c.res.Timestamps, tvals.Timestamps) - pos += copy(c.res.Values, tvals.Values) - c.nextTSM() - } else { - // copy as much as we can - n := copy(c.res.Timestamps[pos:], tvals.Timestamps[c.tsm.pos:]) - copy(c.res.Values[pos:], tvals.Values[c.tsm.pos:]) - pos += n - c.tsm.pos += n - if c.tsm.pos >= len(tvals.Timestamps) { - c.nextTSM() - } - } - } - - if c.cache.pos < len(cvals) { - // TSM was exhausted - for pos < len(c.res.Timestamps) && c.cache.pos < len(cvals) { - c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value - pos++ - c.cache.pos++ - } - } - } - - // Strip timestamps from after the end time. - if pos > 0 && c.res.Timestamps[pos-1] > c.end { - pos -= 2 - for pos >= 0 && c.res.Timestamps[pos] > c.end { - pos-- - } - pos++ - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -func (c *{{$type}}) nextTSM() {{$arrayType}} { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.Read{{.Name}}ArrayBlock(c.tsm.buf) - c.tsm.pos = 0 - return c.tsm.values -} - -{{$type := print .name "ArrayDescendingCursor"}} -{{$Type := print .Name "ArrayDescendingCursor"}} - -type {{$type}} struct { - cache struct { - values Values - pos int - } - - tsm struct { - buf {{$arrayType}} - values {{$arrayType}} - pos int - keyCursor *KeyCursor - } - - end int64 - res {{$arrayType}} -} - -func new{{$Type}}() *{{$type}} { - c := &{{$type}}{ - res: tsdb.New{{.Name}}ArrayLen(tsdb.DefaultMaxPointsPerBlock), - } - c.tsm.buf = tsdb.New{{.Name}}ArrayLen(tsdb.DefaultMaxPointsPerBlock) - return c -} - -func (c *{{$type}}) reset(seek, end int64, cacheValues Values, tsmKeyCursor *KeyCursor) { - // Search for the time value greater than the seek time (not included) - // and then move our position back one which will include the values in - // our time range. - c.end = end - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() > seek - }) - c.cache.pos-- - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.Read{{.Name}}ArrayBlock(c.tsm.buf) - c.tsm.pos = sort.Search(c.tsm.values.Len(), func(i int) bool { - return c.tsm.values.Timestamps[i] > seek - }) - c.tsm.pos-- -} - -func (c *{{$type}}) Err() error { return nil } - -func (c *{{$type}}) Stats() tsdb.CursorStats { - return tsdb.CursorStats{} -} - -func (c *{{$type}}) Close() { - if c.tsm.keyCursor != nil { - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - } - c.cache.values = nil - c.tsm.values = nil -} - -func (c *{{$type}}) Next() {{$arrayType}} { - pos := 0 - cvals := c.cache.values - tvals := c.tsm.values - - c.res.Timestamps = c.res.Timestamps[:cap(c.res.Timestamps)] - c.res.Values = c.res.Values[:cap(c.res.Values)] - - for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 && c.cache.pos >= 0 { - ckey := cvals[c.cache.pos].UnixNano() - tkey := tvals.Timestamps[c.tsm.pos] - if ckey == tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value - c.cache.pos-- - c.tsm.pos-- - } else if ckey > tkey { - c.res.Timestamps[pos] = ckey - c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value - c.cache.pos-- - } else { - c.res.Timestamps[pos] = tkey - c.res.Values[pos] = tvals.Values[c.tsm.pos] - c.tsm.pos-- - } - - pos++ - - if c.tsm.pos < 0 { - tvals = c.nextTSM() - } - } - - if pos < len(c.res.Timestamps) { - // cache was exhausted - if c.tsm.pos >= 0 { - for pos < len(c.res.Timestamps) && c.tsm.pos >= 0 { - c.res.Timestamps[pos] = tvals.Timestamps[c.tsm.pos] - c.res.Values[pos] = tvals.Values[c.tsm.pos] - pos++ - c.tsm.pos-- - if c.tsm.pos < 0 { - tvals = c.nextTSM() - } - } - } - - if c.cache.pos >= 0 { - // TSM was exhausted - for pos < len(c.res.Timestamps) && c.cache.pos >= 0 { - c.res.Timestamps[pos] = cvals[c.cache.pos].UnixNano() - c.res.Values[pos] = cvals[c.cache.pos].({{.Name}}Value).value - pos++ - c.cache.pos-- - } - } - } - - // Strip timestamps from before the end time. - if pos > 0 && c.res.Timestamps[pos-1] < c.end { - pos -= 2 - for pos >= 0 && c.res.Timestamps[pos] < c.end { - pos-- - } - pos++ - } - - c.res.Timestamps = c.res.Timestamps[:pos] - c.res.Values = c.res.Values[:pos] - - return c.res -} - -func (c *{{$type}}) nextTSM() {{$arrayType}} { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.Read{{.Name}}ArrayBlock(c.tsm.buf) - c.tsm.pos = len(c.tsm.values.Timestamps) - 1 - return c.tsm.values -} - -{{end}} diff --git a/tsdb/engine/tsm1/array_cursor_iterator.gen.go b/tsdb/engine/tsm1/array_cursor_iterator.gen.go deleted file mode 100644 index 7cddd84f864..00000000000 --- a/tsdb/engine/tsm1/array_cursor_iterator.gen.go +++ /dev/null @@ -1,115 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: array_cursor_iterator.gen.go.tmpl - -package tsm1 - -import ( - "context" - - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb" -) - -// buildFloatArrayCursor creates an array cursor for a float field. -func (q *arrayCursorIterator) buildFloatArrayCursor(ctx context.Context, name []byte, tags models.Tags, field string, opt query.IteratorOptions) tsdb.FloatArrayCursor { - key := q.seriesFieldKeyBytes(name, tags, field) - cacheValues := q.e.Cache.Values(key) - keyCursor := q.e.KeyCursor(ctx, key, opt.SeekTime(), opt.Ascending) - if opt.Ascending { - if q.asc.Float == nil { - q.asc.Float = newFloatArrayAscendingCursor() - } - q.asc.Float.reset(opt.SeekTime(), opt.StopTime(), cacheValues, keyCursor) - return q.asc.Float - } else { - if q.desc.Float == nil { - q.desc.Float = newFloatArrayDescendingCursor() - } - q.desc.Float.reset(opt.SeekTime(), opt.StopTime(), cacheValues, keyCursor) - return q.desc.Float - } -} - -// buildIntegerArrayCursor creates an array cursor for a integer field. -func (q *arrayCursorIterator) buildIntegerArrayCursor(ctx context.Context, name []byte, tags models.Tags, field string, opt query.IteratorOptions) tsdb.IntegerArrayCursor { - key := q.seriesFieldKeyBytes(name, tags, field) - cacheValues := q.e.Cache.Values(key) - keyCursor := q.e.KeyCursor(ctx, key, opt.SeekTime(), opt.Ascending) - if opt.Ascending { - if q.asc.Integer == nil { - q.asc.Integer = newIntegerArrayAscendingCursor() - } - q.asc.Integer.reset(opt.SeekTime(), opt.StopTime(), cacheValues, keyCursor) - return q.asc.Integer - } else { - if q.desc.Integer == nil { - q.desc.Integer = newIntegerArrayDescendingCursor() - } - q.desc.Integer.reset(opt.SeekTime(), opt.StopTime(), cacheValues, keyCursor) - return q.desc.Integer - } -} - -// buildUnsignedArrayCursor creates an array cursor for a unsigned field. -func (q *arrayCursorIterator) buildUnsignedArrayCursor(ctx context.Context, name []byte, tags models.Tags, field string, opt query.IteratorOptions) tsdb.UnsignedArrayCursor { - key := q.seriesFieldKeyBytes(name, tags, field) - cacheValues := q.e.Cache.Values(key) - keyCursor := q.e.KeyCursor(ctx, key, opt.SeekTime(), opt.Ascending) - if opt.Ascending { - if q.asc.Unsigned == nil { - q.asc.Unsigned = newUnsignedArrayAscendingCursor() - } - q.asc.Unsigned.reset(opt.SeekTime(), opt.StopTime(), cacheValues, keyCursor) - return q.asc.Unsigned - } else { - if q.desc.Unsigned == nil { - q.desc.Unsigned = newUnsignedArrayDescendingCursor() - } - q.desc.Unsigned.reset(opt.SeekTime(), opt.StopTime(), cacheValues, keyCursor) - return q.desc.Unsigned - } -} - -// buildStringArrayCursor creates an array cursor for a string field. -func (q *arrayCursorIterator) buildStringArrayCursor(ctx context.Context, name []byte, tags models.Tags, field string, opt query.IteratorOptions) tsdb.StringArrayCursor { - key := q.seriesFieldKeyBytes(name, tags, field) - cacheValues := q.e.Cache.Values(key) - keyCursor := q.e.KeyCursor(ctx, key, opt.SeekTime(), opt.Ascending) - if opt.Ascending { - if q.asc.String == nil { - q.asc.String = newStringArrayAscendingCursor() - } - q.asc.String.reset(opt.SeekTime(), opt.StopTime(), cacheValues, keyCursor) - return q.asc.String - } else { - if q.desc.String == nil { - q.desc.String = newStringArrayDescendingCursor() - } - q.desc.String.reset(opt.SeekTime(), opt.StopTime(), cacheValues, keyCursor) - return q.desc.String - } -} - -// buildBooleanArrayCursor creates an array cursor for a boolean field. -func (q *arrayCursorIterator) buildBooleanArrayCursor(ctx context.Context, name []byte, tags models.Tags, field string, opt query.IteratorOptions) tsdb.BooleanArrayCursor { - key := q.seriesFieldKeyBytes(name, tags, field) - cacheValues := q.e.Cache.Values(key) - keyCursor := q.e.KeyCursor(ctx, key, opt.SeekTime(), opt.Ascending) - if opt.Ascending { - if q.asc.Boolean == nil { - q.asc.Boolean = newBooleanArrayAscendingCursor() - } - q.asc.Boolean.reset(opt.SeekTime(), opt.StopTime(), cacheValues, keyCursor) - return q.asc.Boolean - } else { - if q.desc.Boolean == nil { - q.desc.Boolean = newBooleanArrayDescendingCursor() - } - q.desc.Boolean.reset(opt.SeekTime(), opt.StopTime(), cacheValues, keyCursor) - return q.desc.Boolean - } -} diff --git a/tsdb/engine/tsm1/array_cursor_iterator.gen.go.tmpl b/tsdb/engine/tsm1/array_cursor_iterator.gen.go.tmpl deleted file mode 100644 index 84cf6a35d69..00000000000 --- a/tsdb/engine/tsm1/array_cursor_iterator.gen.go.tmpl +++ /dev/null @@ -1,33 +0,0 @@ -package tsm1 - -import ( - "context" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/tsdb" -) - -{{range .}} - -// build{{.Name}}ArrayCursor creates an array cursor for a {{.name}} field. -func (q *arrayCursorIterator) build{{.Name}}ArrayCursor(ctx context.Context, name []byte, tags models.Tags, field string, opt query.IteratorOptions) tsdb.{{.Name}}ArrayCursor { - key := q.seriesFieldKeyBytes(name, tags, field) - cacheValues := q.e.Cache.Values(key) - keyCursor := q.e.KeyCursor(ctx, key, opt.SeekTime(), opt.Ascending) - if opt.Ascending { - if q.asc.{{.Name}} == nil { - q.asc.{{.Name}} = new{{.Name}}ArrayAscendingCursor() - } - q.asc.{{.Name}}.reset(opt.SeekTime(), opt.StopTime(), cacheValues, keyCursor) - return q.asc.{{.Name}} - } else { - if q.desc.{{.Name}} == nil { - q.desc.{{.Name}} = new{{.Name}}ArrayDescendingCursor() - } - q.desc.{{.Name}}.reset(opt.SeekTime(), opt.StopTime(), cacheValues, keyCursor) - return q.desc.{{.Name}} - } -} - -{{end}} diff --git a/tsdb/engine/tsm1/array_cursor_iterator.go b/tsdb/engine/tsm1/array_cursor_iterator.go deleted file mode 100644 index 3a4d130847e..00000000000 --- a/tsdb/engine/tsm1/array_cursor_iterator.go +++ /dev/null @@ -1,84 +0,0 @@ -package tsm1 - -import ( - "context" - "fmt" - - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/metrics" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxql" -) - -type arrayCursorIterator struct { - e *Engine - key []byte - - asc struct { - Float *floatArrayAscendingCursor - Integer *integerArrayAscendingCursor - Unsigned *unsignedArrayAscendingCursor - Boolean *booleanArrayAscendingCursor - String *stringArrayAscendingCursor - } - - desc struct { - Float *floatArrayDescendingCursor - Integer *integerArrayDescendingCursor - Unsigned *unsignedArrayDescendingCursor - Boolean *booleanArrayDescendingCursor - String *stringArrayDescendingCursor - } -} - -func (q *arrayCursorIterator) Stats() tsdb.CursorStats { - return tsdb.CursorStats{} -} - -func (q *arrayCursorIterator) Next(ctx context.Context, r *tsdb.CursorRequest) (tsdb.Cursor, error) { - // Look up fields for measurement. - mf := q.e.fieldset.Fields(r.Name) - if mf == nil { - return nil, nil - } - - // Find individual field. - f := mf.Field(r.Field) - if f == nil { - // field doesn't exist for this measurement - return nil, nil - } - - if grp := metrics.GroupFromContext(ctx); grp != nil { - grp.GetCounter(numberOfRefCursorsCounter).Add(1) - } - - var opt query.IteratorOptions - opt.Ascending = r.Ascending - opt.StartTime = r.StartTime - opt.EndTime = r.EndTime // inclusive - - // Return appropriate cursor based on type. - switch f.Type { - case influxql.Float: - return q.buildFloatArrayCursor(ctx, r.Name, r.Tags, r.Field, opt), nil - case influxql.Integer: - return q.buildIntegerArrayCursor(ctx, r.Name, r.Tags, r.Field, opt), nil - case influxql.Unsigned: - return q.buildUnsignedArrayCursor(ctx, r.Name, r.Tags, r.Field, opt), nil - case influxql.String: - return q.buildStringArrayCursor(ctx, r.Name, r.Tags, r.Field, opt), nil - case influxql.Boolean: - return q.buildBooleanArrayCursor(ctx, r.Name, r.Tags, r.Field, opt), nil - default: - panic(fmt.Sprintf("unreachable: %T", f.Type)) - } -} - -func (q *arrayCursorIterator) seriesFieldKeyBytes(name []byte, tags models.Tags, field string) []byte { - q.key = models.AppendMakeKey(q.key[:0], name, tags) - q.key = append(q.key, keyFieldSeparatorBytes...) - q.key = append(q.key, field...) - return q.key -} diff --git a/tsdb/engine/tsm1/array_cursor_test.go b/tsdb/engine/tsm1/array_cursor_test.go deleted file mode 100644 index abb27dad8a1..00000000000 --- a/tsdb/engine/tsm1/array_cursor_test.go +++ /dev/null @@ -1,562 +0,0 @@ -package tsm1 - -import ( - "context" - "fmt" - "os" - "path/filepath" - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/pkg/fs" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/cursors" - "github.com/stretchr/testify/assert" -) - -type keyValues struct { - key string - values []Value -} - -func MustTempFile(dir string) *os.File { - f, err := os.CreateTemp(dir, "tsm1test") - if err != nil { - panic(fmt.Sprintf("failed to create temp file: %v", err)) - } - return f -} - -func newFiles(dir string, values ...keyValues) ([]string, error) { - var files []string - - id := 1 - for _, v := range values { - f := MustTempFile(dir) - w, err := NewTSMWriter(f) - if err != nil { - return nil, err - } - - if err := w.Write([]byte(v.key), v.values); err != nil { - return nil, err - } - - if err := w.WriteIndex(); err != nil { - return nil, err - } - - if err := w.Close(); err != nil { - return nil, err - } - - newName := filepath.Join(filepath.Dir(f.Name()), DefaultFormatFileName(id, 1)+".tsm") - if err := fs.RenameFile(f.Name(), newName); err != nil { - return nil, err - } - id++ - - files = append(files, newName) - } - return files, nil -} - -func TestDescendingCursor_SinglePointStartTime(t *testing.T) { - t.Run("cache", func(t *testing.T) { - dir := t.TempDir() - fs := NewFileStore(dir, tsdb.EngineTags{}) - t.Cleanup(func() { fs.Close() }) - - const START, END = 10, 1 - kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, false) - t.Cleanup(kc.Close) - cur := newIntegerArrayDescendingCursor() - t.Cleanup(cur.Close) - // Include a cached value with timestamp equal to END - cur.reset(START, END, Values{NewIntegerValue(1, 1)}, kc) - - var got []int64 - ar := cur.Next() - for ar.Len() > 0 { - got = append(got, ar.Timestamps...) - ar = cur.Next() - } - - if exp := []int64{1}; !cmp.Equal(got, exp) { - t.Errorf("unexpected values; -got/+exp\n%s", cmp.Diff(got, exp)) - } - }) - t.Run("tsm", func(t *testing.T) { - dir := t.TempDir() - fs := NewFileStore(dir, tsdb.EngineTags{}) - t.Cleanup(func() { fs.Close() }) - - const START, END = 10, 1 - - data := []keyValues{ - // Write a single data point with timestamp equal to END - {"m,_field=v#!~#v", []Value{NewIntegerValue(1, 1)}}, - } - - files, err := newFiles(dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - _ = fs.Replace(nil, files) - - kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, false) - t.Cleanup(kc.Close) - cur := newIntegerArrayDescendingCursor() - t.Cleanup(cur.Close) - cur.reset(START, END, nil, kc) - - var got []int64 - ar := cur.Next() - for ar.Len() > 0 { - got = append(got, ar.Timestamps...) - ar = cur.Next() - } - - if exp := []int64{1}; !cmp.Equal(got, exp) { - t.Errorf("unexpected values; -got/+exp\n%s", cmp.Diff(got, exp)) - } - }) -} - -func TestFileStore_DuplicatePoints(t *testing.T) { - dir := t.TempDir() - fs := NewFileStore(dir, tsdb.EngineTags{}) - t.Cleanup(func() { fs.Close() }) - - makeVals := func(ts ...int64) []Value { - vals := make([]Value, len(ts)) - for i, t := range ts { - vals[i] = NewFloatValue(t, 1.01) - } - return vals - } - - // Setup 3 files - data := []keyValues{ - {"m,_field=v#!~#v", makeVals(21)}, - {"m,_field=v#!~#v", makeVals(44)}, - {"m,_field=v#!~#v", makeVals(40, 46)}, - {"m,_field=v#!~#v", makeVals(46, 51)}, - } - - files, err := newFiles(dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - _ = fs.Replace(nil, files) - - t.Run("ascending", func(t *testing.T) { - const START, END = 0, 100 - kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, true) - t.Cleanup(kc.Close) - cur := newFloatArrayAscendingCursor() - cur.reset(START, END, nil, kc) - t.Cleanup(cur.Close) - - var got []int64 - ar := cur.Next() - for ar.Len() > 0 { - got = append(got, ar.Timestamps...) - ar = cur.Next() - } - - if exp := []int64{21, 40, 44, 46, 51}; !cmp.Equal(got, exp) { - t.Errorf("unexpected values; -got/+exp\n%s", cmp.Diff(got, exp)) - } - }) - - t.Run("descending", func(t *testing.T) { - const START, END = 100, 0 - kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, false) - t.Cleanup(kc.Close) - cur := newFloatArrayDescendingCursor() - cur.reset(START, END, nil, kc) - t.Cleanup(cur.Close) - - var got []int64 - ar := cur.Next() - for ar.Len() > 0 { - got = append(got, ar.Timestamps...) - ar = cur.Next() - } - - if exp := []int64{51, 46, 44, 40, 21}; !cmp.Equal(got, exp) { - t.Errorf("unexpected values; -got/+exp\n%s", cmp.Diff(got, exp)) - } - }) -} - -// Int64Slice attaches the methods of Interface to []int64, sorting in increasing order. -type Int64Slice []int64 - -func (p Int64Slice) Len() int { return len(p) } -func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// Verifies the array cursors correctly handle merged blocks from KeyCursor which may exceed the -// array cursor's local values buffer, which is initialized to MaxPointsPerBlock elements (1000) -// -// This test creates two TSM files which have a single block each. The second file -// has interleaving timestamps with the first file. -// -// The first file has a block of 800 timestamps starting at 1000 an increasing by 10ns -// The second file has a block of 400 timestamps starting at 1005, also increasing by 10ns -// -// When calling `nextTSM`, a single block of 1200 timestamps will be returned and the -// array cursor must chuck the values in the Next call. -func TestFileStore_MergeBlocksLargerThat1000_SecondEntirelyContained(t *testing.T) { - dir := t.TempDir() - fs := NewFileStore(dir, tsdb.EngineTags{}) - t.Cleanup(func() { fs.Close() }) - - // makeVals creates count points starting at ts and incrementing by step - makeVals := func(ts, count, step int64) []Value { - vals := make([]Value, count) - for i := range vals { - vals[i] = NewFloatValue(ts, 1.01) - ts += step - } - return vals - } - - makeTs := func(ts, count, step int64) []int64 { - vals := make([]int64, count) - for i := range vals { - vals[i] = ts - ts += step - } - return vals - } - - // Setup 2 files with the second containing a single block that is completely within the first - data := []keyValues{ - {"m,_field=v#!~#v", makeVals(1000, 800, 10)}, - {"m,_field=v#!~#v", makeVals(1005, 400, 10)}, - } - - files, err := newFiles(dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - _ = fs.Replace(nil, files) - - t.Run("ascending", func(t *testing.T) { - const START, END = 1000, 10000 - kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, true) - t.Cleanup(kc.Close) - cur := newFloatArrayAscendingCursor() - t.Cleanup(cur.Close) - cur.reset(START, END, nil, kc) - - exp := makeTs(1000, 800, 10) - exp = append(exp, makeTs(1005, 400, 10)...) - sort.Sort(Int64Slice(exp)) - - // check first block - ar := cur.Next() - assert.Len(t, ar.Timestamps, 1000) - assert.Equal(t, exp[:1000], ar.Timestamps) - - // check second block - exp = exp[1000:] - ar = cur.Next() - assert.Len(t, ar.Timestamps, 200) - assert.Equal(t, exp, ar.Timestamps) - }) - - t.Run("descending", func(t *testing.T) { - const START, END = 10000, 0 - kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, false) - t.Cleanup(kc.Close) - cur := newFloatArrayDescendingCursor() - t.Cleanup(cur.Close) - cur.reset(START, END, nil, kc) - - exp := makeTs(1000, 800, 10) - exp = append(exp, makeTs(1005, 400, 10)...) - sort.Sort(sort.Reverse(Int64Slice(exp))) - - // check first block - ar := cur.Next() - assert.Len(t, ar.Timestamps, 1000) - assert.Equal(t, exp[:1000], ar.Timestamps) - - // check second block - exp = exp[1000:] - ar = cur.Next() - assert.Len(t, ar.Timestamps, 200) - assert.Equal(t, exp, ar.Timestamps) - }) -} - -// FloatArray attaches the methods of sort.Interface to *tsdb.FloatArray, sorting in increasing order. -type FloatArray struct { - *cursors.FloatArray -} - -func (a *FloatArray) Less(i, j int) bool { return a.Timestamps[i] < a.Timestamps[j] } -func (a *FloatArray) Swap(i, j int) { - a.Timestamps[i], a.Timestamps[j] = a.Timestamps[j], a.Timestamps[i] - a.Values[i], a.Values[j] = a.Values[j], a.Values[i] -} - -// Verifies the array cursors correctly handle merged blocks from KeyCursor which may exceed the -// array cursor's local values buffer, which is initialized to MaxPointsPerBlock elements (1000) -// -// This test creates two TSM files with a significant number of interleaved points in addition -// to a significant number of points in the second file which replace values in the first. -// To verify intersecting data from the second file replaces the first, the values differ, -// so the enumerated results can be compared with the expected output. -func TestFileStore_MergeBlocksLargerThat1000_MultipleBlocksInEachFile(t *testing.T) { - dir := t.TempDir() - fs := NewFileStore(dir, tsdb.EngineTags{}) - t.Cleanup(func() { fs.Close() }) - - // makeVals creates count points starting at ts and incrementing by step - makeVals := func(ts, count, step int64, v float64) []Value { - vals := make([]Value, count) - for i := range vals { - vals[i] = NewFloatValue(ts, v) - ts += step - } - return vals - } - - makeArray := func(ts, count, step int64, v float64) *cursors.FloatArray { - ar := cursors.NewFloatArrayLen(int(count)) - for i := range ar.Timestamps { - ar.Timestamps[i] = ts - ar.Values[i] = v - ts += step - } - return ar - } - - // Setup 2 files with partially overlapping blocks and the second file replaces some elements of the first - data := []keyValues{ - {"m,_field=v#!~#v", makeVals(1000, 3500, 10, 1.01)}, - {"m,_field=v#!~#v", makeVals(4005, 3500, 5, 2.01)}, - } - - files, err := newFiles(dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - _ = fs.Replace(nil, files) - - t.Run("ascending", func(t *testing.T) { - const START, END = 1000, 1e9 - kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, true) - t.Cleanup(kc.Close) - cur := newFloatArrayAscendingCursor() - t.Cleanup(cur.Close) - cur.reset(START, END, nil, kc) - - exp := makeArray(1000, 3500, 10, 1.01) - a2 := makeArray(4005, 3500, 5, 2.01) - exp.Merge(a2) - - got := cursors.NewFloatArrayLen(exp.Len()) - got.Timestamps = got.Timestamps[:0] - got.Values = got.Values[:0] - - ar := cur.Next() - for ar.Len() > 0 { - got.Timestamps = append(got.Timestamps, ar.Timestamps...) - got.Values = append(got.Values, ar.Values...) - ar = cur.Next() - } - - assert.Len(t, got.Timestamps, exp.Len()) - assert.Equal(t, exp.Timestamps, got.Timestamps) - assert.Equal(t, exp.Values, got.Values) - }) - - t.Run("descending", func(t *testing.T) { - const START, END = 1e9, 0 - kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, false) - t.Cleanup(kc.Close) - cur := newFloatArrayDescendingCursor() - t.Cleanup(cur.Close) - cur.reset(START, END, nil, kc) - - exp := makeArray(1000, 3500, 10, 1.01) - a2 := makeArray(4005, 3500, 5, 2.01) - exp.Merge(a2) - sort.Sort(sort.Reverse(&FloatArray{exp})) - - got := cursors.NewFloatArrayLen(exp.Len()) - got.Timestamps = got.Timestamps[:0] - got.Values = got.Values[:0] - - ar := cur.Next() - for ar.Len() > 0 { - got.Timestamps = append(got.Timestamps, ar.Timestamps...) - got.Values = append(got.Values, ar.Values...) - ar = cur.Next() - } - - assert.Len(t, got.Timestamps, exp.Len()) - assert.Equal(t, exp.Timestamps, got.Timestamps) - assert.Equal(t, exp.Values, got.Values) - }) -} - -func TestFileStore_SeekBoundaries(t *testing.T) { - dir := t.TempDir() - fs := NewFileStore(dir, tsdb.EngineTags{}) - t.Cleanup(func() { fs.Close() }) - - // makeVals creates count points starting at ts and incrementing by step - makeVals := func(ts, count, step int64, v float64) []Value { - vals := make([]Value, count) - for i := range vals { - vals[i] = NewFloatValue(ts, v) - ts += step - } - return vals - } - - makeArray := func(ts, count, step int64, v float64) *cursors.FloatArray { - ar := cursors.NewFloatArrayLen(int(count)) - for i := range ar.Timestamps { - ar.Timestamps[i] = ts - ar.Values[i] = v - ts += step - } - return ar - } - - // Setup 2 files where the seek time matches the end time. - data := []keyValues{ - {"m,_field=v#!~#v", makeVals(1000, 100, 1, 1.01)}, - {"m,_field=v#!~#v", makeVals(1100, 100, 1, 2.01)}, - } - - files, err := newFiles(dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %s", err) - } - - _ = fs.Replace(nil, files) - - t.Run("ascending full", func(t *testing.T) { - const START, END = 1000, 1099 - kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, true) - t.Cleanup(kc.Close) - cur := newFloatArrayAscendingCursor() - t.Cleanup(cur.Close) - cur.reset(START, END, nil, kc) - - exp := makeArray(1000, 100, 1, 1.01) - - got := cursors.NewFloatArrayLen(exp.Len()) - got.Timestamps = got.Timestamps[:0] - got.Values = got.Values[:0] - - ar := cur.Next() - for ar.Len() > 0 { - got.Timestamps = append(got.Timestamps, ar.Timestamps...) - got.Values = append(got.Values, ar.Values...) - ar = cur.Next() - } - - assert.Len(t, got.Timestamps, exp.Len()) - assert.Equal(t, exp.Timestamps, got.Timestamps) - assert.Equal(t, exp.Values, got.Values) - }) - - t.Run("ascending split", func(t *testing.T) { - const START, END = 1050, 1149 - kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, true) - t.Cleanup(kc.Close) - cur := newFloatArrayAscendingCursor() - t.Cleanup(cur.Close) - cur.reset(START, END, nil, kc) - - exp := makeArray(1050, 50, 1, 1.01) - a2 := makeArray(1100, 50, 1, 2.01) - exp.Merge(a2) - - got := cursors.NewFloatArrayLen(exp.Len()) - got.Timestamps = got.Timestamps[:0] - got.Values = got.Values[:0] - - ar := cur.Next() - for ar.Len() > 0 { - got.Timestamps = append(got.Timestamps, ar.Timestamps...) - got.Values = append(got.Values, ar.Values...) - ar = cur.Next() - } - - assert.Len(t, got.Timestamps, exp.Len()) - assert.Equal(t, exp.Timestamps, got.Timestamps) - assert.Equal(t, exp.Values, got.Values) - }) - - t.Run("descending full", func(t *testing.T) { - const START, END = 1099, 1000 - kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, false) - t.Cleanup(kc.Close) - cur := newFloatArrayDescendingCursor() - t.Cleanup(cur.Close) - cur.reset(START, END, nil, kc) - - exp := makeArray(1000, 100, 1, 1.01) - sort.Sort(sort.Reverse(&FloatArray{exp})) - - got := cursors.NewFloatArrayLen(exp.Len()) - got.Timestamps = got.Timestamps[:0] - got.Values = got.Values[:0] - - ar := cur.Next() - for ar.Len() > 0 { - got.Timestamps = append(got.Timestamps, ar.Timestamps...) - got.Values = append(got.Values, ar.Values...) - ar = cur.Next() - } - - assert.Len(t, got.Timestamps, exp.Len()) - assert.Equal(t, exp.Timestamps, got.Timestamps) - assert.Equal(t, exp.Values, got.Values) - }) - - t.Run("descending split", func(t *testing.T) { - const START, END = 1149, 1050 - kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, false) - t.Cleanup(kc.Close) - cur := newFloatArrayDescendingCursor() - t.Cleanup(cur.Close) - cur.reset(START, END, nil, kc) - - exp := makeArray(1050, 50, 1, 1.01) - a2 := makeArray(1100, 50, 1, 2.01) - exp.Merge(a2) - sort.Sort(sort.Reverse(&FloatArray{exp})) - - got := cursors.NewFloatArrayLen(exp.Len()) - got.Timestamps = got.Timestamps[:0] - got.Values = got.Values[:0] - - ar := cur.Next() - for ar.Len() > 0 { - got.Timestamps = append(got.Timestamps, ar.Timestamps...) - got.Values = append(got.Values, ar.Values...) - ar = cur.Next() - } - - assert.Len(t, got.Timestamps, exp.Len()) - assert.Equal(t, exp.Timestamps, got.Timestamps) - assert.Equal(t, exp.Values, got.Values) - }) -} diff --git a/tsdb/engine/tsm1/array_encoding.go b/tsdb/engine/tsm1/array_encoding.go deleted file mode 100644 index eb16c39202d..00000000000 --- a/tsdb/engine/tsm1/array_encoding.go +++ /dev/null @@ -1,112 +0,0 @@ -package tsm1 - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2/tsdb" -) - -// DecodeBooleanArrayBlock decodes the boolean block from the byte slice -// and writes the values to a. -func DecodeBooleanArrayBlock(block []byte, a *tsdb.BooleanArray) error { - blockType := block[0] - if blockType != BlockBoolean { - return fmt.Errorf("invalid block type: exp %d, got %d", BlockBoolean, blockType) - } - - tb, vb, err := unpackBlock(block[1:]) - if err != nil { - return err - } - - a.Timestamps, err = TimeArrayDecodeAll(tb, a.Timestamps) - if err != nil { - return err - } - a.Values, err = BooleanArrayDecodeAll(vb, a.Values) - return err -} - -// DecodeFloatArrayBlock decodes the float block from the byte slice -// and writes the values to a. -func DecodeFloatArrayBlock(block []byte, a *tsdb.FloatArray) error { - blockType := block[0] - if blockType != BlockFloat64 { - return fmt.Errorf("invalid block type: exp %d, got %d", BlockFloat64, blockType) - } - - tb, vb, err := unpackBlock(block[1:]) - if err != nil { - return err - } - - a.Timestamps, err = TimeArrayDecodeAll(tb, a.Timestamps) - if err != nil { - return err - } - a.Values, err = FloatArrayDecodeAll(vb, a.Values) - return err -} - -// DecodeIntegerArrayBlock decodes the integer block from the byte slice -// and writes the values to a. -func DecodeIntegerArrayBlock(block []byte, a *tsdb.IntegerArray) error { - blockType := block[0] - if blockType != BlockInteger { - return fmt.Errorf("invalid block type: exp %d, got %d", BlockInteger, blockType) - } - - tb, vb, err := unpackBlock(block[1:]) - if err != nil { - return err - } - - a.Timestamps, err = TimeArrayDecodeAll(tb, a.Timestamps) - if err != nil { - return err - } - a.Values, err = IntegerArrayDecodeAll(vb, a.Values) - return err -} - -// DecodeUnsignedArrayBlock decodes the unsigned integer block from the byte slice -// and writes the values to a. -func DecodeUnsignedArrayBlock(block []byte, a *tsdb.UnsignedArray) error { - blockType := block[0] - if blockType != BlockUnsigned { - return fmt.Errorf("invalid block type: exp %d, got %d", BlockUnsigned, blockType) - } - - tb, vb, err := unpackBlock(block[1:]) - if err != nil { - return err - } - - a.Timestamps, err = TimeArrayDecodeAll(tb, a.Timestamps) - if err != nil { - return err - } - a.Values, err = UnsignedArrayDecodeAll(vb, a.Values) - return err -} - -// DecodeStringArrayBlock decodes the string block from the byte slice -// and writes the values to a. -func DecodeStringArrayBlock(block []byte, a *tsdb.StringArray) error { - blockType := block[0] - if blockType != BlockString { - return fmt.Errorf("invalid block type: exp %d, got %d", BlockString, blockType) - } - - tb, vb, err := unpackBlock(block[1:]) - if err != nil { - return err - } - - a.Timestamps, err = TimeArrayDecodeAll(tb, a.Timestamps) - if err != nil { - return err - } - a.Values, err = StringArrayDecodeAll(vb, a.Values) - return err -} diff --git a/tsdb/engine/tsm1/array_encoding_test.go b/tsdb/engine/tsm1/array_encoding_test.go deleted file mode 100644 index 3cfe92a6c75..00000000000 --- a/tsdb/engine/tsm1/array_encoding_test.go +++ /dev/null @@ -1,202 +0,0 @@ -package tsm1_test - -import ( - "fmt" - "math/rand" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" -) - -func TestDecodeFloatArrayBlock(t *testing.T) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make(tsm1.FloatValues, len(times)) - for i, t := range times { - values[i] = tsm1.NewFloatValue(t, float64(i)).(tsm1.FloatValue) - } - exp := tsm1.NewFloatArrayFromValues(values) - - b, err := values.Encode(nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - got := tsdb.NewFloatArrayLen(exp.Len()) - tsm1.DecodeFloatArrayBlock(b, got) - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func BenchmarkDecodeBooleanArrayBlock(b *testing.B) { - cases := []int{ - 5, - 55, - 555, - 1000, - } - for _, n := range cases { - b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { - valueCount := n - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, true) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - b.ResetTimer() - b.ReportAllocs() - b.SetBytes(int64(tsm1.Values(values).Size())) - - b.RunParallel(func(pb *testing.PB) { - decodedValues := tsdb.NewBooleanArrayLen(len(values)) - - for pb.Next() { - err = tsm1.DecodeBooleanArrayBlock(bytes, decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } - }) - }) - } -} - -func BenchmarkDecodeFloatArrayBlock(b *testing.B) { - cases := []int{ - 5, - 55, - 555, - 1000, - } - for _, n := range cases { - b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { - valueCount := n - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, float64(i)) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - b.ResetTimer() - b.ReportAllocs() - b.SetBytes(int64(tsm1.Values(values).Size())) - - b.RunParallel(func(pb *testing.PB) { - decodedValues := tsdb.NewFloatArrayLen(len(values)) - - for pb.Next() { - err = tsm1.DecodeFloatArrayBlock(bytes, decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } - }) - }) - } -} - -func BenchmarkDecodeIntegerArrayBlock(b *testing.B) { - rle := func(_ *rand.Rand, i int) int64 { return int64(i) } - s8b := func(r *rand.Rand, i int) int64 { return int64(i + int(r.Int31n(10))) } - - cases := []struct { - enc string - gen func(r *rand.Rand, i int) int64 - n int - }{ - {enc: "rle", gen: rle, n: 5}, - {enc: "rle", gen: rle, n: 55}, - {enc: "rle", gen: rle, n: 555}, - {enc: "rle", gen: rle, n: 1000}, - {enc: "s8b", gen: s8b, n: 5}, - {enc: "s8b", gen: s8b, n: 55}, - {enc: "s8b", gen: s8b, n: 555}, - {enc: "s8b", gen: s8b, n: 1000}, - } - for _, bm := range cases { - b.Run(fmt.Sprintf("%s_%d", bm.enc, bm.n), func(b *testing.B) { - seededRand := rand.New(rand.NewSource(int64(bm.n * 1e3))) - - valueCount := bm.n - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, bm.gen(seededRand, i)) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - b.ResetTimer() - b.ReportAllocs() - b.SetBytes(int64(tsm1.Values(values).Size())) - - b.RunParallel(func(pb *testing.PB) { - decodedValues := tsdb.NewIntegerArrayLen(len(values)) - - for pb.Next() { - err = tsm1.DecodeIntegerArrayBlock(bytes, decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } - }) - }) - } -} - -func BenchmarkDecodeStringArrayBlock(b *testing.B) { - cases := []int{ - 5, - 55, - 555, - 1000, - } - for _, n := range cases { - b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { - valueCount := n - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, fmt.Sprintf("value %d", i)) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - b.ResetTimer() - b.ReportAllocs() - b.SetBytes(int64(tsm1.Values(values).Size())) - - b.RunParallel(func(pb *testing.PB) { - decodedValues := tsdb.NewStringArrayLen(len(values)) - - for pb.Next() { - err = tsm1.DecodeStringArrayBlock(bytes, decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } - }) - }) - } -} diff --git a/tsdb/engine/tsm1/batch_boolean.go b/tsdb/engine/tsm1/batch_boolean.go deleted file mode 100644 index 98d4999b30c..00000000000 --- a/tsdb/engine/tsm1/batch_boolean.go +++ /dev/null @@ -1,77 +0,0 @@ -package tsm1 - -import ( - "encoding/binary" - "fmt" -) - -// BooleanArrayEncodeAll encodes src into b, returning b and any error encountered. -// The returned slice may be of a different length and capacity to b. -func BooleanArrayEncodeAll(src []bool, b []byte) ([]byte, error) { - sz := 1 + 8 + ((len(src) + 7) / 8) // Header + Num bools + bool data. - if len(b) < sz && cap(b) > sz { - b = b[:sz] - } else if len(b) < sz { - b = append(b, make([]byte, sz)...) - } - - // Store the encoding type in the 4 high bits of the first byte - b[0] = byte(booleanCompressedBitPacked) << 4 - n := uint64(8) // Current bit in current byte. - - // Encode the number of booleans written. - i := binary.PutUvarint(b[n>>3:], uint64(len(src))) - n += uint64(i * 8) - - for _, v := range src { - if v { - b[n>>3] |= 128 >> (n & 7) // Set current bit on current byte. - } else { - b[n>>3] &^= 128 >> (n & 7) // Clear current bit on current byte. - } - n++ - } - - length := n >> 3 - if n&7 > 0 { - length++ // Add an extra byte to capture overflowing bits. - } - return b[:length], nil -} - -func BooleanArrayDecodeAll(b []byte, dst []bool) ([]bool, error) { - if len(b) == 0 { - return nil, nil - } - - // First byte stores the encoding type, only have 1 bit-packet format - // currently ignore for now. - b = b[1:] - val, n := binary.Uvarint(b) - if n <= 0 { - return nil, fmt.Errorf("BooleanBatchDecoder: invalid count") - } - - count := int(val) - - b = b[n:] - if min := len(b) * 8; min < count { - // Shouldn't happen - TSM file was truncated/corrupted - count = min - } - - if cap(dst) < count { - dst = make([]bool, count) - } else { - dst = dst[:count] - } - - j := 0 - for _, v := range b { - for i := byte(128); i > 0 && j < len(dst); i >>= 1 { - dst[j] = v&i != 0 - j++ - } - } - return dst, nil -} diff --git a/tsdb/engine/tsm1/batch_boolean_test.go b/tsdb/engine/tsm1/batch_boolean_test.go deleted file mode 100644 index c2348b2197c..00000000000 --- a/tsdb/engine/tsm1/batch_boolean_test.go +++ /dev/null @@ -1,305 +0,0 @@ -package tsm1_test - -import ( - "bytes" - "fmt" - "math/rand" - "reflect" - "testing" - "testing/quick" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" -) - -func TestBooleanArrayEncodeAll_NoValues(t *testing.T) { - b, err := tsm1.BooleanArrayEncodeAll(nil, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var dec tsm1.BooleanDecoder - dec.SetBytes(b) - if dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } -} - -func TestBooleanArrayEncodeAll_Single(t *testing.T) { - src := []bool{true} - - b, err := tsm1.BooleanArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var dec tsm1.BooleanDecoder - dec.SetBytes(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got false, exp true") - } - - if src[0] != dec.Read() { - t.Fatalf("unexpected value: got %v, exp %v", dec.Read(), src[0]) - } -} - -func TestBooleanArrayEncodeAll_Compare(t *testing.T) { - // generate random values - input := make([]bool, 1000) - for i := 0; i < len(input); i++ { - input[i] = rand.Int63n(2) == 1 - } - - s := tsm1.NewBooleanEncoder(1000) - for _, v := range input { - s.Write(v) - } - s.Flush() - - buf1, err := s.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - buf2 := append([]byte("this is some jibberish"), make([]byte, 100, 200)...) - buf2, err = tsm1.BooleanArrayEncodeAll(input, buf2) - if err != nil { - t.Fatalf("unexpected error: %v\nbuf: %db %x", err, len(buf2), buf2) - } - - result, err := tsm1.BooleanArrayDecodeAll(buf2, nil) - if err != nil { - dumpBufs(buf1, buf2) - t.Fatalf("unexpected error: %v\nbuf: %db %x", err, len(buf2), buf2) - } - - if got, exp := result, input; !reflect.DeepEqual(got, exp) { - dumpBufs(buf1, buf2) - t.Fatalf("got result %v, expected %v", got, exp) - } - - // Check that the encoders are byte for byte the same... - if !bytes.Equal(buf1, buf2) { - dumpBufs(buf1, buf2) - t.Fatalf("Raw bytes differ for encoders") - } -} - -func TestBooleanArrayEncodeAll_Multi_Compressed(t *testing.T) { - src := make([]bool, 10) - for i := range src { - src[i] = i%2 == 0 - } - - b, err := tsm1.BooleanArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if exp := 4; len(b) != exp { - t.Fatalf("unexpected length: got %v, exp %v", len(b), exp) - } - - var dec tsm1.BooleanDecoder - dec.SetBytes(b) - - for i, v := range src { - if !dec.Next() { - t.Fatalf("unexpected next value: got false, exp true") - } - if v != dec.Read() { - t.Fatalf("unexpected value at pos %d: got %v, exp %v", i, dec.Read(), v) - } - } - - if dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } -} - -func TestBooleanArrayEncodeAll_Quick(t *testing.T) { - if err := quick.Check(func(values []bool) bool { - src := values - if values == nil { - src = []bool{} - } - - // Retrieve compressed bytes. - buf, err := tsm1.BooleanArrayEncodeAll(src, nil) - if err != nil { - t.Fatal(err) - } - - // Read values out of decoder. - got := make([]bool, 0, len(values)) - var dec tsm1.BooleanDecoder - dec.SetBytes(buf) - for dec.Next() { - got = append(got, dec.Read()) - } - - // Verify that input and output values match. - if !reflect.DeepEqual(src, got) { - t.Fatalf("mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", src, got) - } - - return true - }, nil); err != nil { - t.Fatal(err) - } -} - -func Test_BooleanArrayDecodeAll_Single(t *testing.T) { - enc := tsm1.NewBooleanEncoder(1) - exp := true - enc.Write(exp) - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - got, _ := tsm1.BooleanArrayDecodeAll(b, nil) - if len(got) != 1 { - t.Fatalf("expected 1 value") - } - if got := got[0]; got != exp { - t.Fatalf("unexpected value -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func Test_BooleanArrayDecodeAll_Multi_Compressed(t *testing.T) { - cases := []struct { - n int - p float64 // probability of a true value - }{ - {10, 0.33}, - {100, 0.55}, - {1000, 0.68}, - } - - for _, tc := range cases { - t.Run(fmt.Sprintf("%d_%0.2f", tc.n, tc.p), func(t *testing.T) { - seededRand := rand.New(rand.NewSource(int64(tc.n * tc.n))) - - enc := tsm1.NewBooleanEncoder(tc.n) - values := make([]bool, tc.n) - for i := range values { - values[i] = seededRand.Float64() < tc.p - enc.Write(values[i]) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - got, err := tsm1.BooleanArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected error %q", err.Error()) - } - - if !cmp.Equal(got, values) { - t.Fatalf("unexpected values, -got/+exp\n%s", cmp.Diff(got, values)) - } - }) - } -} - -func Test_BooleanBatchDecoder_Corrupt(t *testing.T) { - cases := []struct { - name string - d string - }{ - {"empty", ""}, - {"invalid count", "\x10\x90"}, - {"count greater than remaining bits, multiple bytes expected", "\x10\x7f"}, - {"count greater than remaining bits, one byte expected", "\x10\x01"}, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - dst, _ := tsm1.BooleanArrayDecodeAll([]byte(c.d), nil) - if len(dst) != 0 { - t.Fatalf("unexpected result -got/+want\n%s", cmp.Diff(dst, nil)) - } - }) - } -} - -func BenchmarkEncodeBooleans(b *testing.B) { - var err error - cases := []int{10, 100, 1000} - - for _, n := range cases { - enc := tsm1.NewBooleanEncoder(n) - b.Run(fmt.Sprintf("%d_ran", n), func(b *testing.B) { - input := make([]bool, n) - for i := 0; i < n; i++ { - input[i] = rand.Int63n(2) == 1 - } - - b.Run("itr", func(b *testing.B) { - b.ReportAllocs() - enc.Reset() - b.ResetTimer() - for n := 0; n < b.N; n++ { - enc.Reset() - for _, x := range input { - enc.Write(x) - } - enc.Flush() - if bufResult, err = enc.Bytes(); err != nil { - b.Fatal(err) - } - } - }) - - b.Run("batch", func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - if bufResult, err = tsm1.BooleanArrayEncodeAll(input, bufResult); err != nil { - b.Fatal(err) - } - } - }) - - }) - } -} - -func BenchmarkBooleanArrayDecodeAll(b *testing.B) { - benchmarks := []struct { - n int - }{ - {1}, - {55}, - {555}, - {1000}, - } - for _, bm := range benchmarks { - b.Run(fmt.Sprintf("%d", bm.n), func(b *testing.B) { - size := bm.n - e := tsm1.NewBooleanEncoder(size) - for i := 0; i < size; i++ { - e.Write(i&1 == 1) - } - bytes, err := e.Bytes() - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - b.SetBytes(int64(len(bytes))) - b.ResetTimer() - - dst := make([]bool, size) - for i := 0; i < b.N; i++ { - res, _ := tsm1.BooleanArrayDecodeAll(bytes, dst) - if len(res) != size { - b.Fatalf("expected to read %d booleans, but read %d", size, len(res)) - } - } - }) - } -} diff --git a/tsdb/engine/tsm1/batch_float.go b/tsdb/engine/tsm1/batch_float.go deleted file mode 100644 index 9789c2a4e6a..00000000000 --- a/tsdb/engine/tsm1/batch_float.go +++ /dev/null @@ -1,514 +0,0 @@ -package tsm1 - -import ( - "encoding/binary" - "fmt" - "io" - "math" - "math/bits" - "unsafe" -) - -// FloatArrayEncodeAll encodes src into b, returning b and any error encountered. -// The returned slice may be of a different length and capacity to b. -// -// Currently only the float compression scheme used in Facebook's Gorilla is -// supported, so this method implements a batch oriented version of that. -func FloatArrayEncodeAll(src []float64, b []byte) ([]byte, error) { - if cap(b) < 9 { - b = make([]byte, 0, 9) // Enough room for the header and one value. - } - - b = b[:1] - b[0] = floatCompressedGorilla << 4 - - var first float64 - var finished bool - if len(src) > 0 && math.IsNaN(src[0]) { - return nil, fmt.Errorf("unsupported value: NaN") - } else if len(src) == 0 { - first = math.NaN() // Write sentinel value to terminate batch. - finished = true - } else { - first = src[0] - src = src[1:] - } - - b = b[:9] - n := uint64(8 + 64) // Number of bits written. - prev := math.Float64bits(first) - - // Write first value. - binary.BigEndian.PutUint64(b[1:], prev) - - prevLeading, prevTrailing := ^uint64(0), uint64(0) - var leading, trailing uint64 - var mask uint64 - var sum float64 - - // Encode remaining values. - for i := 0; !finished; i++ { - var x float64 - if i < len(src) { - x = src[i] - sum += x - } else { - // Encode sentinel value to terminate batch - x = math.NaN() - finished = true - } - - { - cur := math.Float64bits(x) - vDelta := cur ^ prev - if vDelta == 0 { - n++ // Write a zero bit. Nothing else to do. - prev = cur - continue - } - - // First the current bit of the current byte is set to indicate we're - // writing a delta value to the stream. - for n>>3 >= uint64(len(b)) { // Keep growing b until we can fit all bits in. - b = append(b, byte(0)) - } - - // n&7 - current bit in current byte. - // n>>3 - the current byte. - b[n>>3] |= 128 >> (n & 7) // Sets the current bit of the current byte. - n++ - - // Write the delta to b. - - // Determine the leading and trailing zeros. - leading = uint64(bits.LeadingZeros64(vDelta)) - trailing = uint64(bits.TrailingZeros64(vDelta)) - - // Clamp number of leading zeros to avoid overflow when encoding - leading &= 0x1F - if leading >= 32 { - leading = 31 - } - - // At least 2 further bits will be required. - if (n+2)>>3 >= uint64(len(b)) { - b = append(b, byte(0)) - } - - if prevLeading != ^uint64(0) && leading >= prevLeading && trailing >= prevTrailing { - n++ // Write a zero bit. - - // Write the l least significant bits of vDelta to b, most significant - // bit first. - l := uint64(64 - prevLeading - prevTrailing) - for (n+l)>>3 >= uint64(len(b)) { // Keep growing b until we can fit all bits in. - b = append(b, byte(0)) - } - - // Full value to write. - v := (vDelta >> prevTrailing) << (64 - l) // l least significant bits of v. - - var m = n & 7 // Current bit in current byte. - var written uint64 - if m > 0 { // In this case the current byte is not full. - written = 8 - m - if l < written { - written = l - } - mask = v >> 56 // Move 8 MSB to 8 LSB - b[n>>3] |= byte(mask >> m) - n += written - - if l-written == 0 { - prev = cur - continue - } - } - - vv := v << written // Move written bits out of the way. - - // TODO(edd): Optimise this. It's unlikely we actually have 8 bytes to write. - if (n>>3)+8 >= uint64(len(b)) { - b = append(b, 0, 0, 0, 0, 0, 0, 0, 0) - } - binary.BigEndian.PutUint64(b[n>>3:], vv) - n += (l - written) - } else { - prevLeading, prevTrailing = leading, trailing - - // Set a single bit to indicate a value will follow. - b[n>>3] |= 128 >> (n & 7) // Set current bit on current byte - n++ - - // Write 5 bits of leading. - if (n+5)>>3 >= uint64(len(b)) { - b = append(b, byte(0)) - } - - // Enough room to write the 5 bits in the current byte? - var m = n & 7 - l := uint64(5) - v := leading << 59 // 5 LSB of leading. - mask = v >> 56 // Move 5 MSB to 8 LSB - - if m <= 3 { // 5 bits fit into current byte. - b[n>>3] |= byte(mask >> m) - n += l - } else { // In this case there are fewer than 5 bits available in current byte. - // First step is to fill current byte - written := 8 - m - b[n>>3] |= byte(mask >> m) // Some of mask will get lost. - n += written - - // Second step is to write the lost part of mask into the next byte. - mask = v << written // Move written bits in previous byte out of way. - mask >>= 56 - - m = n & 7 // Recompute current bit. - b[n>>3] |= byte(mask >> m) - n += (l - written) - } - - // Note that if leading == trailing == 0, then sigbits == 64. But that - // value doesn't actually fit into the 6 bits we have. - // Luckily, we never need to encode 0 significant bits, since that would - // put us in the other case (vdelta == 0). So instead we write out a 0 and - // adjust it back to 64 on unpacking. - sigbits := 64 - leading - trailing - - if (n+6)>>3 >= uint64(len(b)) { - b = append(b, byte(0)) - } - - m = n & 7 - l = uint64(6) - v = sigbits << 58 // Move 6 LSB of sigbits to MSB - mask = v >> 56 // Move 6 MSB to 8 LSB - if m <= 2 { - // The 6 bits fit into the current byte. - b[n>>3] |= byte(mask >> m) - n += l - } else { // In this case there are fewer than 6 bits available in current byte. - // First step is to fill the current byte. - written := 8 - m - b[n>>3] |= byte(mask >> m) // Write to the current bit. - n += written - - // Second step is to write the lost part of mask into the next byte. - // Write l remaining bits into current byte. - mask = v << written // Remove bits written in previous byte out of way. - mask >>= 56 - - m = n & 7 // Recompute current bit. - b[n>>3] |= byte(mask >> m) - n += l - written - } - - // Write final value. - m = n & 7 - l = sigbits - v = (vDelta >> trailing) << (64 - l) // Move l LSB into MSB - for (n+l)>>3 >= uint64(len(b)) { // Keep growing b until we can fit all bits in. - b = append(b, byte(0)) - } - - var written uint64 - if m > 0 { // In this case the current byte is not full. - written = 8 - m - if l < written { - written = l - } - mask = v >> 56 // Move 8 MSB to 8 LSB - b[n>>3] |= byte(mask >> m) - n += written - - if l-written == 0 { - prev = cur - continue - } - } - - // Shift remaining bits and write out in one go. - vv := v << written // Remove bits written in previous byte. - // TODO(edd): Optimise this. - if (n>>3)+8 >= uint64(len(b)) { - b = append(b, 0, 0, 0, 0, 0, 0, 0, 0) - } - - binary.BigEndian.PutUint64(b[n>>3:], vv) - n += (l - written) - } - prev = cur - } - } - - if math.IsNaN(sum) { - return nil, fmt.Errorf("unsupported value: NaN") - } - - length := n >> 3 - if n&7 > 0 { - length++ // Add an extra byte to capture overflowing bits. - } - return b[:length], nil -} - -// bitMask contains a lookup table where the index is the number of bits -// and the value is a mask. The table is always read by ANDing the index -// with 0x3f, such that if the index is 64, position 0 will be read, which -// is a 0xffffffffffffffff, thus returning all bits. -// -// 00 = 0xffffffffffffffff -// 01 = 0x0000000000000001 -// 02 = 0x0000000000000003 -// 03 = 0x0000000000000007 -// ... -// 62 = 0x3fffffffffffffff -// 63 = 0x7fffffffffffffff -var bitMask [64]uint64 - -func init() { - v := uint64(1) - for i := 1; i <= 64; i++ { - bitMask[i&0x3f] = v - v = v<<1 | 1 - } -} - -func FloatArrayDecodeAll(b []byte, buf []float64) ([]float64, error) { - if len(b) < 9 { - return []float64{}, nil - } - - var ( - val uint64 // current value - trailingN uint8 // trailing zero count - meaningfulN uint8 = 64 // meaningful bit count - ) - - // first byte is the compression type; always Gorilla - b = b[1:] - - val = binary.BigEndian.Uint64(b) - if val == uvnan { - if buf == nil { - var tmp [1]float64 - buf = tmp[:0] - } - // special case: there were no values to decode - return buf[:0], nil - } - - buf = buf[:0] - // convert the []float64 to []uint64 to avoid calling math.Float64Frombits, - // which results in unnecessary moves between Xn registers before moving - // the value into the float64 slice. This change increased performance from - // 320 MB/s to 340 MB/s on an Intel(R) Core(TM) i7-6920HQ CPU @ 2.90GHz - dst := *(*[]uint64)(unsafe.Pointer(&buf)) - dst = append(dst, val) - - b = b[8:] - - // The bit reader code uses brCachedVal to store up to the next 8 bytes - // of MSB data read from b. brValidBits stores the number of remaining unread - // bits starting from the MSB. Before N bits are read from brCachedVal, - // they are left-rotated N bits, such that they end up in the left-most position. - // Using bits.RotateLeft64 results in a single instruction on many CPU architectures. - // This approach permits simple tests, such as for the two control bits: - // - // brCachedVal&1 > 0 - // - // The alternative was to leave brCachedValue alone and perform shifts and - // masks to read specific bits. The original approach looked like the - // following: - // - // brCachedVal&(1<<(brValidBits&0x3f)) > 0 - // - var ( - brCachedVal = uint64(0) // a buffer of up to the next 8 bytes read from b in MSB order - brValidBits = uint8(0) // the number of unread bits remaining in brCachedVal - ) - - // Refill brCachedVal, reading up to 8 bytes from b - if len(b) >= 8 { - // fast path reads 8 bytes directly - brCachedVal = binary.BigEndian.Uint64(b) - brValidBits = 64 - b = b[8:] - } else if len(b) > 0 { - brCachedVal = 0 - brValidBits = uint8(len(b) * 8) - for i := range b { - brCachedVal = (brCachedVal << 8) | uint64(b[i]) - } - brCachedVal = bits.RotateLeft64(brCachedVal, -int(brValidBits)) - b = b[:0] - } else { - goto ERROR - } - - // The expected exit condition is for a uvnan to be decoded. - // Any other error (EOF) indicates a truncated stream. - for { - if brValidBits > 0 { - // brValidBits > 0 is impossible to predict, so we place the - // most likely case inside the if and immediately jump, keeping - // the instruction pipeline consistently full. - // This is a similar approach to using the GCC __builtin_expect - // intrinsic, which modifies the order of branches such that the - // likely case follows the conditional jump. - // - // Written as if brValidBits == 0 and placing the Refill brCachedVal - // code inside reduces benchmarks from 318 MB/s to 260 MB/s on an - // Intel(R) Core(TM) i7-6920HQ CPU @ 2.90GHz - goto READ0 - } - - // Refill brCachedVal, reading up to 8 bytes from b - if len(b) >= 8 { - brCachedVal = binary.BigEndian.Uint64(b) - brValidBits = 64 - b = b[8:] - } else if len(b) > 0 { - brCachedVal = 0 - brValidBits = uint8(len(b) * 8) - for i := range b { - brCachedVal = (brCachedVal << 8) | uint64(b[i]) - } - brCachedVal = bits.RotateLeft64(brCachedVal, -int(brValidBits)) - b = b[:0] - } else { - goto ERROR - } - - READ0: - // read control bit 0 - brValidBits -= 1 - brCachedVal = bits.RotateLeft64(brCachedVal, 1) - if brCachedVal&1 > 0 { - if brValidBits > 0 { - goto READ1 - } - - // Refill brCachedVal, reading up to 8 bytes from b - if len(b) >= 8 { - brCachedVal = binary.BigEndian.Uint64(b) - brValidBits = 64 - b = b[8:] - } else if len(b) > 0 { - brCachedVal = 0 - brValidBits = uint8(len(b) * 8) - for i := range b { - brCachedVal = (brCachedVal << 8) | uint64(b[i]) - } - brCachedVal = bits.RotateLeft64(brCachedVal, -int(brValidBits)) - b = b[:0] - } else { - goto ERROR - } - - READ1: - // read control bit 1 - brValidBits -= 1 - brCachedVal = bits.RotateLeft64(brCachedVal, 1) - if brCachedVal&1 > 0 { - // read 5 bits for leading zero count and 6 bits for the meaningful data count - const leadingTrailingBitCount = 11 - var lmBits uint64 // leading + meaningful data counts - if brValidBits >= leadingTrailingBitCount { - // decode 5 bits leading + 6 bits meaningful for a total of 11 bits - brValidBits -= leadingTrailingBitCount - brCachedVal = bits.RotateLeft64(brCachedVal, leadingTrailingBitCount) - lmBits = brCachedVal - } else { - bits01 := uint8(11) - if brValidBits > 0 { - bits01 -= brValidBits - lmBits = bits.RotateLeft64(brCachedVal, 11) - } - - // Refill brCachedVal, reading up to 8 bytes from b - if len(b) >= 8 { - brCachedVal = binary.BigEndian.Uint64(b) - brValidBits = 64 - b = b[8:] - } else if len(b) > 0 { - brCachedVal = 0 - brValidBits = uint8(len(b) * 8) - for i := range b { - brCachedVal = (brCachedVal << 8) | uint64(b[i]) - } - brCachedVal = bits.RotateLeft64(brCachedVal, -int(brValidBits)) - b = b[:0] - } else { - goto ERROR - } - brCachedVal = bits.RotateLeft64(brCachedVal, int(bits01)) - brValidBits -= bits01 - lmBits &^= bitMask[bits01&0x3f] - lmBits |= brCachedVal & bitMask[bits01&0x3f] - } - - lmBits &= 0x7ff - leadingN := uint8((lmBits >> 6) & 0x1f) // 5 bits leading - meaningfulN = uint8(lmBits & 0x3f) // 6 bits meaningful - if meaningfulN > 0 { - trailingN = 64 - leadingN - meaningfulN - } else { - // meaningfulN == 0 is a special case, such that all bits - // are meaningful - trailingN = 0 - meaningfulN = 64 - } - } - - var sBits uint64 // significant bits - if brValidBits >= meaningfulN { - brValidBits -= meaningfulN - brCachedVal = bits.RotateLeft64(brCachedVal, int(meaningfulN)) - sBits = brCachedVal - } else { - mBits := meaningfulN - if brValidBits > 0 { - mBits -= brValidBits - sBits = bits.RotateLeft64(brCachedVal, int(meaningfulN)) - } - - // Refill brCachedVal, reading up to 8 bytes from b - if len(b) >= 8 { - brCachedVal = binary.BigEndian.Uint64(b) - brValidBits = 64 - b = b[8:] - } else if len(b) > 0 { - brCachedVal = 0 - brValidBits = uint8(len(b) * 8) - for i := range b { - brCachedVal = (brCachedVal << 8) | uint64(b[i]) - } - brCachedVal = bits.RotateLeft64(brCachedVal, -int(brValidBits)) - b = b[:0] - } else { - goto ERROR - } - brCachedVal = bits.RotateLeft64(brCachedVal, int(mBits)) - brValidBits -= mBits - sBits &^= bitMask[mBits&0x3f] - sBits |= brCachedVal & bitMask[mBits&0x3f] - } - sBits &= bitMask[meaningfulN&0x3f] - - val ^= sBits << (trailingN & 0x3f) - if val == uvnan { - // IsNaN, eof - break - } - } - - dst = append(dst, val) - } - - return *(*[]float64)(unsafe.Pointer(&dst)), nil - -ERROR: - return (*(*[]float64)(unsafe.Pointer(&dst)))[:0], io.EOF -} diff --git a/tsdb/engine/tsm1/batch_float_test.go b/tsdb/engine/tsm1/batch_float_test.go deleted file mode 100644 index 9f614eb3799..00000000000 --- a/tsdb/engine/tsm1/batch_float_test.go +++ /dev/null @@ -1,438 +0,0 @@ -package tsm1_test - -import ( - "bytes" - "fmt" - "math" - "math/rand" - "reflect" - "testing" - "testing/quick" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" -) - -var fullBlockFloat64Ones []float64 - -func init() { - for i := 0; i < 1000; i++ { - fullBlockFloat64Ones = append(fullBlockFloat64Ones, 1.0) - } -} -func TestFloatArrayEncodeAll(t *testing.T) { - examples := [][]float64{ - {12, 12, 24, 13, 24, 24, 24, 24}, // From example paper. - {-3.8970913068231994e+307, -9.036931257783943e+307, 1.7173073833490201e+308, - -9.312369166661538e+307, -2.2435523083555231e+307, 1.4779121287289644e+307, - 1.771273431601434e+308, 8.140360378221364e+307, 4.783405048208089e+307, - -2.8044680049605344e+307, 4.412915337205696e+307, -1.2779380602005046e+308, - 1.6235802318921885e+308, -1.3402901846299688e+307, 1.6961015582104055e+308, - -1.067980796435633e+308, -3.02868987458268e+307, 1.7641793640790284e+308, - 1.6587191845856813e+307, -1.786073304985983e+308, 1.0694549382051123e+308, - 3.5635180996210295e+307}, // Failed during early development - {6.00065e+06, 6.000656e+06, 6.000657e+06, 6.000659e+06, 6.000661e+06}, // Similar values. - twoHoursData, - fullBlockFloat64Ones, - {}, - } - - for _, example := range examples { - src := example - var buf []byte - buf, err := tsm1.FloatArrayEncodeAll(src, buf) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - result, err := tsm1.FloatArrayDecodeAll(buf, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got, exp := result, src; !reflect.DeepEqual(got, exp) { - t.Fatalf("got result %v, expected %v", got, exp) - } - } -} - -func TestFloatArrayEncode_Compare(t *testing.T) { - // generate random values - input := make([]float64, 1000) - for i := 0; i < len(input); i++ { - input[i] = (rand.Float64() * math.MaxFloat64) - math.MaxFloat32 - } - - s := tsm1.NewFloatEncoder() - for _, v := range input { - s.Write(v) - } - s.Flush() - - buf1, err := s.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var buf2 []byte - buf2, err = tsm1.FloatArrayEncodeAll(input, buf2) - if err != nil { - t.Fatalf("unexpected error: %v\nbuf: %db %x", err, len(buf2), buf2) - } - - result, err := tsm1.FloatArrayDecodeAll(buf2, nil) - if err != nil { - dumpBufs(buf1, buf2) - t.Fatalf("unexpected error: %v\nbuf: %db %x", err, len(buf2), buf2) - } - - if got, exp := result, input; !reflect.DeepEqual(got, exp) { - t.Fatalf("got result %v, expected %v", got, exp) - } - - // Check that the encoders are byte for byte the same... - if !bytes.Equal(buf1, buf2) { - dumpBufs(buf1, buf2) - t.Fatalf("Raw bytes differ for encoders") - } -} - -func dumpBufs(a, b []byte) { - longest := len(a) - if len(b) > longest { - longest = len(b) - } - - for i := 0; i < longest; i++ { - var as, bs string - if i < len(a) { - as = fmt.Sprintf("%08b", a[i]) - } - if i < len(b) { - bs = fmt.Sprintf("%08b", b[i]) - } - - same := as == bs - fmt.Printf("%d (%d) %s - %s :: %v\n", i, i*8, as, bs, same) - } - fmt.Println() -} - -func TestFloatArrayEncodeAll_NaN(t *testing.T) { - examples := [][]float64{ - {1.0, math.NaN(), 2.0}, - {1.22, math.NaN()}, - {math.NaN(), math.NaN()}, - {math.NaN()}, - } - - for _, example := range examples { - var buf []byte - _, err := tsm1.FloatArrayEncodeAll(example, buf) - if err == nil { - t.Fatalf("expected error. got nil") - } - } -} - -func Test_FloatArrayEncodeAll_Quick(t *testing.T) { - quick.Check(func(values []float64) bool { - src := values - if src == nil { - src = []float64{} - } - - for i, v := range src { - if math.IsNaN(v) { - src[i] = 1.0 // Remove invalid values - } - } - - s := tsm1.NewFloatEncoder() - for _, p := range src { - s.Write(p) - } - s.Flush() - - buf1, err := s.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var buf2 []byte - buf2, err = tsm1.FloatArrayEncodeAll(src, buf2) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - result, err := tsm1.FloatArrayDecodeAll(buf2, nil) - if err != nil { - dumpBufs(buf1, buf2) - fmt.Println(src) - t.Fatalf("unexpected error: %v", err) - } - - if got, exp := result, src; !reflect.DeepEqual(got, exp) { - t.Fatalf("got result %v, expected %v", got, exp) - } - return true - }, nil) -} - -func TestDecodeFloatArrayAll_Empty(t *testing.T) { - s := tsm1.NewFloatEncoder() - s.Flush() - - b, err := s.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var got []float64 - if _, err := tsm1.FloatArrayDecodeAll(b, got); err != nil { - t.Fatal(err) - } - -} - -func TestFloatArrayDecodeAll_Simple(t *testing.T) { - // Example from the paper - s := tsm1.NewFloatEncoder() - - exp := []float64{ - 12, - 12, - 24, - - // extra tests - - // floating point masking/shifting bug - 13, - 24, - - // delta-of-delta sizes - 24, - 24, - 24, - } - - for _, f := range exp { - s.Write(f) - } - s.Flush() - - b, err := s.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - buf := make([]float64, 8) - got, err := tsm1.FloatArrayDecodeAll(b, buf) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestFloatArrayDecodeAll_Empty(t *testing.T) { - s := tsm1.NewFloatEncoder() - s.Flush() - - b, err := s.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - buf := make([]float64, 8) - got, err := tsm1.FloatArrayDecodeAll(b, buf) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if exp := []float64{}; !cmp.Equal(got, exp) { - t.Fatalf("unexpected values -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -var bufResult []byte - -func BenchmarkEncodeFloats(b *testing.B) { - var err error - cases := []int{10, 100, 1000} - enc := tsm1.NewFloatEncoder() - - for _, n := range cases { - b.Run(fmt.Sprintf("%d_seq", n), func(b *testing.B) { - input := make([]float64, n) - for i := 0; i < n; i++ { - input[i] = float64(i) - } - - b.Run("itr", func(b *testing.B) { - b.ReportAllocs() - enc.Reset() - b.ResetTimer() - for n := 0; n < b.N; n++ { - enc.Reset() - for _, x := range input { - enc.Write(x) - } - enc.Flush() - if bufResult, err = enc.Bytes(); err != nil { - b.Fatal(err) - } else { - b.SetBytes(int64(len(bufResult))) - } - } - }) - - b.Run("batch", func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - if bufResult, err = tsm1.FloatArrayEncodeAll(input, bufResult); err != nil { - b.Fatal(err) - } else { - b.SetBytes(int64(len(bufResult))) - } - } - }) - - }) - - b.Run(fmt.Sprintf("%d_ran", n), func(b *testing.B) { - input := make([]float64, n) - for i := 0; i < n; i++ { - input[i] = rand.Float64() * 100.0 - } - - b.Run("itr", func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - enc.Reset() - for _, x := range input { - enc.Write(x) - } - enc.Flush() - if bufResult, err = enc.Bytes(); err != nil { - b.Fatal(err) - } else { - b.SetBytes(int64(len(bufResult))) - } - } - }) - - b.Run("batch", func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - if bufResult, err = tsm1.FloatArrayEncodeAll(input, bufResult); err != nil { - b.Fatal(err) - } else { - b.SetBytes(int64(len(bufResult))) - } - } - }) - }) - } -} - -func BenchmarkDecodeFloats(b *testing.B) { - cases := []int{1, 55, 550, 1000} - for _, n := range cases { - b.Run(fmt.Sprintf("%d_seq", n), func(b *testing.B) { - s := tsm1.NewFloatEncoder() - for i := 0; i < n; i++ { - s.Write(float64(i)) - } - s.Flush() - data, err := s.Bytes() - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - b.SetBytes(int64(len(data))) - b.ResetTimer() - - dst := make([]float64, n) - for i := 0; i < b.N; i++ { - - got, err := tsm1.FloatArrayDecodeAll(data, dst) - if err != nil { - b.Fatalf("unexpected error\n%s", err.Error()) - } - if len(got) != n { - b.Fatalf("unexpected length -got/+exp\n%s", cmp.Diff(len(got), n)) - } - } - }) - - b.Run(fmt.Sprintf("%d_ran", n), func(b *testing.B) { - s := tsm1.NewFloatEncoder() - for i := 0; i < n; i++ { - s.Write(rand.Float64() * 100.0) - } - s.Flush() - data, err := s.Bytes() - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - b.SetBytes(int64(len(data))) - b.ResetTimer() - - dst := make([]float64, n) - for i := 0; i < b.N; i++ { - - got, err := tsm1.FloatArrayDecodeAll(data, dst) - if err != nil { - b.Fatalf("unexpected error\n%s", err.Error()) - } - if len(got) != n { - b.Fatalf("unexpected length -got/+exp\n%s", cmp.Diff(len(got), n)) - } - } - }) - } -} - -func BenchmarkFloatArrayDecodeAll(b *testing.B) { - benchmarks := []int{ - 1, - 55, - 550, - 1000, - } - for _, size := range benchmarks { - s := tsm1.NewFloatEncoder() - for c := 0; c < size; c++ { - s.Write(twoHoursData[c%len(twoHoursData)]) - } - s.Flush() - bytes, err := s.Bytes() - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { - b.SetBytes(int64(len(bytes))) - b.ResetTimer() - - dst := make([]float64, size) - for i := 0; i < b.N; i++ { - - got, err := tsm1.FloatArrayDecodeAll(bytes, dst) - if err != nil { - b.Fatalf("unexpected error\n%s", err.Error()) - } - if len(got) != size { - b.Fatalf("unexpected length -got/+exp\n%s", cmp.Diff(len(got), size)) - } - } - }) - } -} diff --git a/tsdb/engine/tsm1/batch_integer.go b/tsdb/engine/tsm1/batch_integer.go deleted file mode 100644 index f33fcee0cfd..00000000000 --- a/tsdb/engine/tsm1/batch_integer.go +++ /dev/null @@ -1,290 +0,0 @@ -package tsm1 - -import ( - "encoding/binary" - "fmt" - "unsafe" - - "github.com/influxdata/influxdb/v2/pkg/encoding/simple8b" -) - -// IntegerArrayEncodeAll encodes src into b, returning b and any error encountered. -// The returned slice may be of a different length and capacity to b. -// -// IntegerArrayEncodeAll implements batch oriented versions of the three integer -// encoding types we support: uncompressed, simple8b and RLE. -// -// Important: IntegerArrayEncodeAll modifies the contents of src by using it as -// scratch space for delta encoded values. It is NOT SAFE to use src after -// passing it into IntegerArrayEncodeAll. -func IntegerArrayEncodeAll(src []int64, b []byte) ([]byte, error) { - if len(src) == 0 { - return nil, nil // Nothing to do - } - - var max = uint64(0) - - // To prevent an allocation of the entire block we're encoding reuse the - // src slice to store the encoded deltas. - deltas := reinterpretInt64ToUint64Slice(src) - for i := len(deltas) - 1; i > 0; i-- { - deltas[i] = deltas[i] - deltas[i-1] - deltas[i] = ZigZagEncode(int64(deltas[i])) - if deltas[i] > max { - max = deltas[i] - } - } - - deltas[0] = ZigZagEncode(int64(deltas[0])) - - if len(deltas) > 2 { - var rle = true - for i := 2; i < len(deltas); i++ { - if deltas[1] != deltas[i] { - rle = false - break - } - } - - if rle { - // Large varints can take up to 10 bytes. We're storing 3 + 1 - // type byte. - if len(b) < 31 && cap(b) >= 31 { - b = b[:31] - } else if len(b) < 31 { - b = append(b, make([]byte, 31-len(b))...) - } - - // 4 high bits used for the encoding type - b[0] = byte(intCompressedRLE) << 4 - - i := 1 - // The first value - binary.BigEndian.PutUint64(b[i:], deltas[0]) - i += 8 - // The first delta - i += binary.PutUvarint(b[i:], deltas[1]) - // The number of times the delta is repeated - i += binary.PutUvarint(b[i:], uint64(len(deltas)-1)) - - return b[:i], nil - } - } - - if max > simple8b.MaxValue { // There is an encoded value that's too big to simple8b encode. - // Encode uncompressed. - sz := 1 + len(deltas)*8 - if len(b) < sz && cap(b) >= sz { - b = b[:sz] - } else if len(b) < sz { - b = append(b, make([]byte, sz-len(b))...) - } - - // 4 high bits of first byte store the encoding type for the block - b[0] = byte(intUncompressed) << 4 - for i, v := range deltas { - binary.BigEndian.PutUint64(b[1+i*8:1+i*8+8], uint64(v)) - } - return b[:sz], nil - } - - // Encode with simple8b - fist value is written unencoded using 8 bytes. - encoded, err := simple8b.EncodeAll(deltas[1:]) - if err != nil { - return nil, err - } - - sz := 1 + (len(encoded)+1)*8 - if len(b) < sz && cap(b) >= sz { - b = b[:sz] - } else if len(b) < sz { - b = append(b, make([]byte, sz-len(b))...) - } - - // 4 high bits of first byte store the encoding type for the block - b[0] = byte(intCompressedSimple) << 4 - - // Write the first value since it's not part of the encoded values - binary.BigEndian.PutUint64(b[1:9], deltas[0]) - - // Write the encoded values - for i, v := range encoded { - binary.BigEndian.PutUint64(b[9+i*8:9+i*8+8], v) - } - return b, nil -} - -// UnsignedArrayEncodeAll encodes src into b, returning b and any error encountered. -// The returned slice may be of a different length and capacity to b. -// -// UnsignedArrayEncodeAll implements batch oriented versions of the three integer -// encoding types we support: uncompressed, simple8b and RLE. -// -// Important: IntegerArrayEncodeAll modifies the contents of src by using it as -// scratch space for delta encoded values. It is NOT SAFE to use src after -// passing it into IntegerArrayEncodeAll. -func UnsignedArrayEncodeAll(src []uint64, b []byte) ([]byte, error) { - srcint := reinterpretUint64ToInt64Slice(src) - return IntegerArrayEncodeAll(srcint, b) -} - -var ( - integerBatchDecoderFunc = [...]func(b []byte, dst []int64) ([]int64, error){ - integerBatchDecodeAllUncompressed, - integerBatchDecodeAllSimple, - integerBatchDecodeAllRLE, - integerBatchDecodeAllInvalid, - } -) - -func IntegerArrayDecodeAll(b []byte, dst []int64) ([]int64, error) { - if len(b) == 0 { - return []int64{}, nil - } - - encoding := b[0] >> 4 - if encoding > intCompressedRLE { - encoding = 3 // integerBatchDecodeAllInvalid - } - - return integerBatchDecoderFunc[encoding&3](b, dst) -} - -func UnsignedArrayDecodeAll(b []byte, dst []uint64) ([]uint64, error) { - if len(b) == 0 { - return []uint64{}, nil - } - - encoding := b[0] >> 4 - if encoding > intCompressedRLE { - encoding = 3 // integerBatchDecodeAllInvalid - } - - res, err := integerBatchDecoderFunc[encoding&3](b, reinterpretUint64ToInt64Slice(dst)) - return reinterpretInt64ToUint64Slice(res), err -} - -func integerBatchDecodeAllUncompressed(b []byte, dst []int64) ([]int64, error) { - b = b[1:] - if len(b)&0x7 != 0 { - return []int64{}, fmt.Errorf("IntegerArrayDecodeAll: expected multiple of 8 bytes") - } - - count := len(b) / 8 - if cap(dst) < count { - dst = make([]int64, count) - } else { - dst = dst[:count] - } - - prev := int64(0) - for i := range dst { - prev += ZigZagDecode(binary.BigEndian.Uint64(b[i*8:])) - dst[i] = prev - } - - return dst, nil -} - -func integerBatchDecodeAllSimple(b []byte, dst []int64) ([]int64, error) { - b = b[1:] - if len(b) < 8 { - return []int64{}, fmt.Errorf("IntegerArrayDecodeAll: not enough data to decode packed value") - } - - count, err := simple8b.CountBytes(b[8:]) - if err != nil { - return []int64{}, err - } - - count += 1 - if cap(dst) < count { - dst = make([]int64, count) - } else { - dst = dst[:count] - } - - // first value - dst[0] = ZigZagDecode(binary.BigEndian.Uint64(b)) - - // decode compressed values - buf := reinterpretInt64ToUint64Slice(dst) - n, err := simple8b.DecodeBytesBigEndian(buf[1:], b[8:]) - if err != nil { - return []int64{}, err - } - if n != count-1 { - return []int64{}, fmt.Errorf("IntegerArrayDecodeAll: unexpected number of values decoded; got=%d, exp=%d", n, count-1) - } - - // calculate prefix sum - prev := dst[0] - for i := 1; i < len(dst); i++ { - prev += ZigZagDecode(uint64(dst[i])) - dst[i] = prev - } - - return dst, nil -} - -func integerBatchDecodeAllRLE(b []byte, dst []int64) ([]int64, error) { - b = b[1:] - if len(b) < 8 { - return []int64{}, fmt.Errorf("IntegerArrayDecodeAll: not enough data to decode RLE starting value") - } - - var k, n int - - // Next 8 bytes is the starting value - first := ZigZagDecode(binary.BigEndian.Uint64(b[k : k+8])) - k += 8 - - // Next 1-10 bytes is the delta value - value, n := binary.Uvarint(b[k:]) - if n <= 0 { - return []int64{}, fmt.Errorf("IntegerArrayDecodeAll: invalid RLE delta value") - } - k += n - - delta := ZigZagDecode(value) - - // Last 1-10 bytes is how many times the value repeats - count, n := binary.Uvarint(b[k:]) - if n <= 0 { - return []int64{}, fmt.Errorf("IntegerArrayDecodeAll: invalid RLE repeat value") - } - - count += 1 - - if cap(dst) < int(count) { - dst = make([]int64, count) - } else { - dst = dst[:count] - } - - if delta == 0 { - for i := range dst { - dst[i] = first - } - } else { - acc := first - for i := range dst { - dst[i] = acc - acc += delta - } - } - - return dst, nil -} - -func integerBatchDecodeAllInvalid(b []byte, _ []int64) ([]int64, error) { - return []int64{}, fmt.Errorf("unknown encoding %v", b[0]>>4) -} - -func reinterpretInt64ToUint64Slice(src []int64) []uint64 { - return *(*[]uint64)(unsafe.Pointer(&src)) -} - -func reinterpretUint64ToInt64Slice(src []uint64) []int64 { - return *(*[]int64)(unsafe.Pointer(&src)) -} diff --git a/tsdb/engine/tsm1/batch_integer_test.go b/tsdb/engine/tsm1/batch_integer_test.go deleted file mode 100644 index 6034bf271d1..00000000000 --- a/tsdb/engine/tsm1/batch_integer_test.go +++ /dev/null @@ -1,1174 +0,0 @@ -package tsm1 - -import ( - "bytes" - "fmt" - "math" - "math/rand" - "reflect" - "sort" - "testing" - "testing/quick" - - "github.com/google/go-cmp/cmp" -) - -func dumpBufs(a, b []byte) { - longest := len(a) - if len(b) > longest { - longest = len(b) - } - - for i := 0; i < longest; i++ { - var as, bs string - if i < len(a) { - as = fmt.Sprintf("%08[1]b (%[1]d)", a[i]) - } - if i < len(b) { - bs = fmt.Sprintf("%08[1]b (%[1]d)", b[i]) - } - - same := as == bs - fmt.Printf("%d (%d) %s - %s :: %v\n", i, i*8, as, bs, same) - } - fmt.Println() -} - -func TestIntegerArrayEncodeAll_NoValues(t *testing.T) { - b, err := IntegerArrayEncodeAll(nil, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if len(b) > 0 { - t.Fatalf("unexpected length: exp 0, got %v", len(b)) - } - - var dec IntegerDecoder - dec.SetBytes(b) - if dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } -} - -func TestIntegerArrayEncodeAll_Compare(t *testing.T) { - // generate random values (should use simple8b) - input := make([]int64, 1000) - for i := 0; i < len(input); i++ { - input[i] = rand.Int63n(100000) - 50000 - } - sort.Slice(input, func(i int, j int) bool { return input[i] < input[j] }) - testIntegerArrayEncodeAll_Compare(t, input, intCompressedSimple) - - // Generate same values (should use RLE) - for i := 0; i < len(input); i++ { - input[i] = 1232342341234 - } - testIntegerArrayEncodeAll_Compare(t, input, intCompressedRLE) - - // Generate large random values that are not sorted. The deltas will be large - // and the values should be stored uncompressed. - for i := 0; i < len(input); i++ { - input[i] = int64(rand.Uint64()) - } - testIntegerArrayEncodeAll_Compare(t, input, intUncompressed) -} - -func testIntegerArrayEncodeAll_Compare(t *testing.T, input []int64, encoding byte) { - exp := make([]int64, len(input)) - copy(exp, input) - - s := NewIntegerEncoder(1000) - for _, v := range input { - s.Write(v) - } - - buf1, err := s.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got, exp := buf1[0]>>4, encoding; got != exp { - t.Fatalf("got encoding %v, expected %v", got, encoding) - } - - var buf2 []byte - buf2, err = IntegerArrayEncodeAll(input, buf2) - if err != nil { - t.Fatalf("unexpected error: %v\nbuf: %db %x", err, len(buf2), buf2) - } - - if got, exp := buf2[0]>>4, encoding; got != exp { - t.Fatalf("got encoding %v, expected %v", got, encoding) - } - - result, err := IntegerArrayDecodeAll(buf2, nil) - if err != nil { - dumpBufs(buf1, buf2) - t.Fatalf("unexpected error: %v\nbuf: %db %x", err, len(buf2), buf2) - } - - if got := result; !reflect.DeepEqual(got, exp) { - t.Fatalf("-got/+exp\n%s", cmp.Diff(got, exp)) - } - - // Check that the encoders are byte for byte the same... - if !bytes.Equal(buf1, buf2) { - dumpBufs(buf1, buf2) - t.Fatalf("Raw bytes differ for encoders") - } -} - -func TestUnsignedArrayEncodeAll_Compare(t *testing.T) { - // generate random values (should use simple8b) - input := make([]uint64, 1000) - for i := 0; i < len(input); i++ { - input[i] = uint64(rand.Int63n(100000)) - } - sort.Slice(input, func(i int, j int) bool { return input[i] < input[j] }) - testUnsignedArrayEncodeAll_Compare(t, input, intCompressedSimple) - - // Generate same values (should use RLE) - for i := 0; i < len(input); i++ { - input[i] = 1232342341234 - } - testUnsignedArrayEncodeAll_Compare(t, input, intCompressedRLE) - - // Generate large random values that are not sorted. The deltas will be large - // and the values should be stored uncompressed. - for i := 0; i < len(input); i++ { - input[i] = rand.Uint64() - } - testUnsignedArrayEncodeAll_Compare(t, input, intUncompressed) -} - -func testUnsignedArrayEncodeAll_Compare(t *testing.T, input []uint64, encoding byte) { - exp := make([]uint64, len(input)) - copy(exp, input) - - s := NewIntegerEncoder(1000) - for _, v := range input { - s.Write(int64(v)) - } - - buf1, err := s.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got, exp := buf1[0]>>4, encoding; got != exp { - t.Fatalf("got encoding %v, expected %v", got, encoding) - } - - var buf2 []byte - buf2, err = UnsignedArrayEncodeAll(input, buf2) - if err != nil { - t.Fatalf("unexpected error: %v\nbuf: %db %x", err, len(buf2), buf2) - } - - if got, exp := buf2[0]>>4, encoding; got != exp { - t.Fatalf("got encoding %v, expected %v", got, encoding) - } - - result, err := UnsignedArrayDecodeAll(buf2, nil) - if err != nil { - dumpBufs(buf1, buf2) - t.Fatalf("unexpected error: %v\nbuf: %db %x", err, len(buf2), buf2) - } - - if got := result; !reflect.DeepEqual(got, exp) { - t.Fatalf("got result %v, expected %v", got, exp) - } - - // Check that the encoders are byte for byte the same... - if !bytes.Equal(buf1, buf2) { - dumpBufs(buf1, buf2) - t.Fatalf("Raw bytes differ for encoders") - } -} - -func TestIntegerArrayEncodeAll_One(t *testing.T) { - v1 := int64(1) - - src := []int64{1} - b, err := IntegerArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; intCompressedSimple != got { - t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got) - } - - var dec IntegerDecoder - dec.SetBytes(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v1 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v1) - } -} - -func TestIntegerArrayEncodeAll_Two(t *testing.T) { - var v1, v2 int64 = 1, 2 - - src := []int64{v1, v2} - b, err := IntegerArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; intCompressedSimple != got { - t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got) - } - - var dec IntegerDecoder - dec.SetBytes(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v1 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v1) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v2 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v2) - } -} - -func TestIntegerArrayEncodeAll_Negative(t *testing.T) { - var v1, v2, v3 int64 = -2, 0, 1 - - src := []int64{v1, v2, v3} - b, err := IntegerArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; intCompressedSimple != got { - t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got) - } - - var dec IntegerDecoder - dec.SetBytes(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v1 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v1) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v2 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v2) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v3 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v3) - } -} - -func TestIntegerArrayEncodeAll_Large_Range(t *testing.T) { - exp := []int64{math.MaxInt64, 0, math.MaxInt64} - - b, err := IntegerArrayEncodeAll(append([]int64{}, exp...), nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; intUncompressed != got { - t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got) - } - - var dec IntegerDecoder - dec.SetBytes(b) - - var got []int64 - for dec.Next() { - got = append(got, dec.Read()) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unxpected result, -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestIntegerArrayEncodeAll_Uncompressed(t *testing.T) { - var v1, v2, v3 int64 = 0, 1, 1 << 60 - - src := []int64{v1, v2, v3} - b, err := IntegerArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("expected error: %v", err) - } - - // 1 byte header + 3 * 8 byte values - if exp := 25; len(b) != exp { - t.Fatalf("length mismatch: got %v, exp %v", len(b), exp) - } - - if got := b[0] >> 4; intUncompressed != got { - t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got) - } - - var dec IntegerDecoder - dec.SetBytes(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v1 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v1) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v2 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v2) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v3 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v3) - } -} - -func TestIntegerArrayEncodeAll_NegativeUncompressed(t *testing.T) { - src := []int64{ - -2352281900722994752, 1438442655375607923, -4110452567888190110, - -1221292455668011702, -1941700286034261841, -2836753127140407751, - 1432686216250034552, 3663244026151507025, -3068113732684750258, - -1949953187327444488, 3713374280993588804, 3226153669854871355, - -2093273755080502606, 1006087192578600616, -2272122301622271655, - 2533238229511593671, -4450454445568858273, 2647789901083530435, - 2761419461769776844, -1324397441074946198, -680758138988210958, - 94468846694902125, -2394093124890745254, -2682139311758778198, - } - exp := make([]int64, len(src)) - copy(exp, src) - - b, err := IntegerArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("expected error: %v", err) - } - - if got := b[0] >> 4; intUncompressed != got { - t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got) - } - - var dec IntegerDecoder - dec.SetBytes(b) - - i := 0 - for dec.Next() { - if i > len(src) { - t.Fatalf("read too many values: got %v, exp %v", i, len(exp)) - } - - if exp[i] != dec.Read() { - t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), exp[i]) - } - i += 1 - } - - if i != len(exp) { - t.Fatalf("failed to read enough values: got %v, exp %v", i, len(exp)) - } -} - -func TestIntegerArrayEncodeAll_AllNegative(t *testing.T) { - src := []int64{ - -10, -5, -1, - } - exp := make([]int64, len(src)) - copy(exp, src) - - b, err := IntegerArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; intCompressedSimple != got { - t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got) - } - - var dec IntegerDecoder - dec.SetBytes(b) - i := 0 - for dec.Next() { - if i > len(exp) { - t.Fatalf("read too many values: got %v, exp %v", i, len(exp)) - } - - if exp[i] != dec.Read() { - t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), exp[i]) - } - i += 1 - } - - if i != len(exp) { - t.Fatalf("failed to read enough values: got %v, exp %v", i, len(exp)) - } -} - -func TestIntegerArrayEncodeAll_CounterPacked(t *testing.T) { - src := []int64{ - 1e15, 1e15 + 1, 1e15 + 2, 1e15 + 3, 1e15 + 4, 1e15 + 6, - } - exp := make([]int64, len(src)) - copy(exp, src) - - b, err := IntegerArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if b[0]>>4 != intCompressedSimple { - t.Fatalf("unexpected encoding format: expected simple, got %v", b[0]>>4) - } - - // Should use 1 header byte + 2, 8 byte words if delta-encoding is used based on - // values sizes. Without delta-encoding, we'd get 49 bytes. - if exp := 17; len(b) != exp { - t.Fatalf("encoded length mismatch: got %v, exp %v", len(b), exp) - } - - var dec IntegerDecoder - dec.SetBytes(b) - i := 0 - for dec.Next() { - if i > len(exp) { - t.Fatalf("read too many values: got %v, exp %v", i, len(exp)) - } - - if exp[i] != dec.Read() { - t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), exp[i]) - } - i += 1 - } - - if i != len(exp) { - t.Fatalf("failed to read enough values: got %v, exp %v", i, len(exp)) - } -} - -func TestIntegerArrayEncodeAll_CounterRLE(t *testing.T) { - src := []int64{ - 1e15, 1e15 + 1, 1e15 + 2, 1e15 + 3, 1e15 + 4, 1e15 + 5, - } - exp := make([]int64, len(src)) - copy(exp, src) - - b, err := IntegerArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if b[0]>>4 != intCompressedRLE { - t.Fatalf("unexpected encoding format: expected RLE, got %v", b[0]>>4) - } - - // Should use 1 header byte, 8 byte first value, 1 var-byte for delta and 1 var-byte for - // count of deltas in this particular RLE. - if exp := 11; len(b) != exp { - t.Fatalf("encoded length mismatch: got %v, exp %v", len(b), exp) - } - - var dec IntegerDecoder - dec.SetBytes(b) - i := 0 - for dec.Next() { - if i > len(exp) { - t.Fatalf("read too many values: got %v, exp %v", i, len(exp)) - } - - if exp[i] != dec.Read() { - t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), exp[i]) - } - i += 1 - } - - if i != len(exp) { - t.Fatalf("failed to read enough values: got %v, exp %v", i, len(exp)) - } -} - -func TestIntegerArrayEncodeAll_Descending(t *testing.T) { - src := []int64{ - 7094, 4472, 1850, - } - exp := make([]int64, len(src)) - copy(exp, src) - - b, err := IntegerArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if b[0]>>4 != intCompressedRLE { - t.Fatalf("unexpected encoding format: expected simple, got %v", b[0]>>4) - } - - // Should use 1 header byte, 8 byte first value, 1 var-byte for delta and 1 var-byte for - // count of deltas in this particular RLE. - if exp := 12; len(b) != exp { - t.Fatalf("encoded length mismatch: got %v, exp %v", len(b), exp) - } - - var dec IntegerDecoder - dec.SetBytes(b) - i := 0 - for dec.Next() { - if i > len(exp) { - t.Fatalf("read too many values: got %v, exp %v", i, len(exp)) - } - - if exp[i] != dec.Read() { - t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), exp[i]) - } - i += 1 - } - - if i != len(exp) { - t.Fatalf("failed to read enough values: got %v, exp %v", i, len(exp)) - } -} - -func TestIntegerArrayEncodeAll_Flat(t *testing.T) { - src := []int64{ - 1, 1, 1, 1, - } - exp := make([]int64, len(src)) - copy(exp, src) - - b, err := IntegerArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if b[0]>>4 != intCompressedRLE { - t.Fatalf("unexpected encoding format: expected simple, got %v", b[0]>>4) - } - - // Should use 1 header byte, 8 byte first value, 1 var-byte for delta and 1 var-byte for - // count of deltas in this particular RLE. - if exp := 11; len(b) != exp { - t.Fatalf("encoded length mismatch: got %v, exp %v", len(b), exp) - } - - var dec IntegerDecoder - dec.SetBytes(b) - i := 0 - for dec.Next() { - if i > len(exp) { - t.Fatalf("read too many values: got %v, exp %v", i, len(exp)) - } - - if exp[i] != dec.Read() { - t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), exp[i]) - } - i += 1 - } - - if i != len(exp) { - t.Fatalf("failed to read enough values: got %v, exp %v", i, len(exp)) - } -} - -func TestIntegerArrayEncodeAll_MinMax(t *testing.T) { - src := []int64{ - math.MinInt64, math.MaxInt64, - } - exp := make([]int64, len(src)) - copy(exp, src) - - b, err := IntegerArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if b[0]>>4 != intCompressedSimple { - t.Fatalf("unexpected encoding format: expected simple, got %v", b[0]>>4) - } - - if exp := 17; len(b) != exp { - t.Fatalf("encoded length mismatch: got %v, exp %v", len(b), exp) - } - - var dec IntegerDecoder - dec.SetBytes(b) - i := 0 - for dec.Next() { - if i > len(exp) { - t.Fatalf("read too many values: got %v, exp %v", i, len(exp)) - } - - if exp[i] != dec.Read() { - t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), exp[i]) - } - i += 1 - } - - if i != len(exp) { - t.Fatalf("failed to read enough values: got %v, exp %v", i, len(exp)) - } -} - -func TestIntegerArrayEncodeAll_Quick(t *testing.T) { - quick.Check(func(values []int64) bool { - src := values - if values == nil { - src = []int64{} // is this really expected? - } - - // Copy over values to compare result—src is modified... - exp := make([]int64, 0, len(src)) - exp = append(exp, src...) - - // Retrieve encoded bytes from encoder. - b, err := IntegerArrayEncodeAll(src, nil) - if err != nil { - t.Fatal(err) - } - - // Read values out of decoder. - got := make([]int64, 0, len(src)) - var dec IntegerDecoder - dec.SetBytes(b) - for dec.Next() { - if err := dec.Error(); err != nil { - t.Fatal(err) - } - got = append(got, dec.Read()) - } - - // Verify that input and output values match. - if !reflect.DeepEqual(exp, got) { - t.Fatalf("mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", src, got) - } - - return true - }, nil) -} - -func TestIntegerArrayDecodeAll_NegativeUncompressed(t *testing.T) { - exp := []int64{ - -2352281900722994752, 1438442655375607923, -4110452567888190110, - -1221292455668011702, -1941700286034261841, -2836753127140407751, - 1432686216250034552, 3663244026151507025, -3068113732684750258, - -1949953187327444488, 3713374280993588804, 3226153669854871355, - -2093273755080502606, 1006087192578600616, -2272122301622271655, - 2533238229511593671, -4450454445568858273, 2647789901083530435, - 2761419461769776844, -1324397441074946198, -680758138988210958, - 94468846694902125, -2394093124890745254, -2682139311758778198, - } - enc := NewIntegerEncoder(256) - for _, v := range exp { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("expected error: %v", err) - } - - if got := b[0] >> 4; intUncompressed != got { - t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got) - } - - got, err := IntegerArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestIntegerArrayDecodeAll_AllNegative(t *testing.T) { - enc := NewIntegerEncoder(3) - exp := []int64{ - -10, -5, -1, - } - - for _, v := range exp { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; intCompressedSimple != got { - t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got) - } - - got, err := IntegerArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestIntegerArrayDecodeAll_CounterPacked(t *testing.T) { - enc := NewIntegerEncoder(16) - exp := []int64{ - 1e15, 1e15 + 1, 1e15 + 2, 1e15 + 3, 1e15 + 4, 1e15 + 6, - } - - for _, v := range exp { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if b[0]>>4 != intCompressedSimple { - t.Fatalf("unexpected encoding format: expected simple, got %v", b[0]>>4) - } - - // Should use 1 header byte + 2, 8 byte words if delta-encoding is used based on - // values sizes. Without delta-encoding, we'd get 49 bytes. - if exp := 17; len(b) != exp { - t.Fatalf("encoded length mismatch: got %v, exp %v", len(b), exp) - } - - got, err := IntegerArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestIntegerArrayDecodeAll_CounterRLE(t *testing.T) { - enc := NewIntegerEncoder(16) - exp := []int64{ - 1e15, 1e15 + 1, 1e15 + 2, 1e15 + 3, 1e15 + 4, 1e15 + 5, - } - - for _, v := range exp { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if b[0]>>4 != intCompressedRLE { - t.Fatalf("unexpected encoding format: expected RLE, got %v", b[0]>>4) - } - - // Should use 1 header byte, 8 byte first value, 1 var-byte for delta and 1 var-byte for - // count of deltas in this particular RLE. - if exp := 11; len(b) != exp { - t.Fatalf("encoded length mismatch: got %v, exp %v", len(b), exp) - } - - got, err := IntegerArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestIntegerArrayDecodeAll_Descending(t *testing.T) { - enc := NewIntegerEncoder(16) - exp := []int64{ - 7094, 4472, 1850, - } - - for _, v := range exp { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if b[0]>>4 != intCompressedRLE { - t.Fatalf("unexpected encoding format: expected simple, got %v", b[0]>>4) - } - - // Should use 1 header byte, 8 byte first value, 1 var-byte for delta and 1 var-byte for - // count of deltas in this particular RLE. - if exp := 12; len(b) != exp { - t.Fatalf("encoded length mismatch: got %v, exp %v", len(b), exp) - } - - got, err := IntegerArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestIntegerArrayDecodeAll_Flat(t *testing.T) { - enc := NewIntegerEncoder(16) - exp := []int64{ - 1, 1, 1, 1, - } - - for _, v := range exp { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if b[0]>>4 != intCompressedRLE { - t.Fatalf("unexpected encoding format: expected simple, got %v", b[0]>>4) - } - - // Should use 1 header byte, 8 byte first value, 1 var-byte for delta and 1 var-byte for - // count of deltas in this particular RLE. - if exp := 11; len(b) != exp { - t.Fatalf("encoded length mismatch: got %v, exp %v", len(b), exp) - } - - got, err := IntegerArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestIntegerArrayDecodeAll_MinMax(t *testing.T) { - enc := NewIntegerEncoder(2) - exp := []int64{ - math.MinInt64, math.MaxInt64, - } - - for _, v := range exp { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if b[0]>>4 != intUncompressed { - t.Fatalf("unexpected encoding format: expected simple, got %v", b[0]>>4) - } - - if exp := 17; len(b) != exp { - t.Fatalf("encoded length mismatch: got %v, exp %v", len(b), exp) - } - - got, err := IntegerArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestIntegerArrayDecodeAll_Quick(t *testing.T) { - quick.Check(func(values []int64) bool { - if values == nil { - values = []int64{} // is this really expected? - } - - // Write values to encoder. - enc := NewIntegerEncoder(1024) - for _, v := range values { - enc.Write(v) - } - - // Retrieve encoded bytes from encoder. - buf, err := enc.Bytes() - if err != nil { - t.Fatal(err) - } - - // Read values out of decoder. - got, err := IntegerArrayDecodeAll(buf, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, values) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, values)) - } - - return true - }, nil) -} - -var bufResult []byte - -func BenchmarkEncodeIntegers(b *testing.B) { - var err error - cases := []int{10, 100, 1000} - - for _, n := range cases { - enc := NewIntegerEncoder(n) - - b.Run(fmt.Sprintf("%d_seq", n), func(b *testing.B) { - src := make([]int64, n) - for i := 0; i < n; i++ { - src[i] = int64(i) - } - - input := make([]int64, len(src)) - copy(input, src) - - b.Run("itr", func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - enc.Reset() - for _, x := range src { - enc.Write(x) - } - enc.Flush() - if bufResult, err = enc.Bytes(); err != nil { - b.Fatal(err) - } - - // Since the batch encoder needs to do a copy to reset the - // input, we will add a copy here too. - copy(input, src) - } - }) - - b.Run("batch", func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - if bufResult, err = IntegerArrayEncodeAll(input, bufResult); err != nil { - b.Fatal(err) - } - copy(input, src) // Reset input that gets modified in IntegerArrayEncodeAll - } - }) - - }) - - b.Run(fmt.Sprintf("%d_ran", n), func(b *testing.B) { - src := make([]int64, n) - for i := 0; i < n; i++ { - src[i] = rand.Int63n(100) - } - - input := make([]int64, len(src)) - copy(input, src) - - b.Run("itr", func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - enc.Reset() - for _, x := range src { - enc.Write(x) - } - enc.Flush() - if bufResult, err = enc.Bytes(); err != nil { - b.Fatal(err) - } - - // Since the batch encoder needs to do a copy to reset the - // input, we will add a copy here too. - copy(input, src) - } - }) - - b.Run("batch", func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - if bufResult, err = IntegerArrayEncodeAll(input, bufResult); err != nil { - b.Fatal(err) - } - copy(input, src) // Reset input that gets modified in IntegerArrayEncodeAll - } - }) - }) - - b.Run(fmt.Sprintf("%d_dup", n), func(b *testing.B) { - src := make([]int64, n) - for i := 0; i < n; i++ { - src[i] = 1233242 - } - - input := make([]int64, len(src)) - copy(input, src) - - b.Run("itr", func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - enc.Reset() - for _, x := range src { - enc.Write(x) - } - enc.Flush() - if bufResult, err = enc.Bytes(); err != nil { - b.Fatal(err) - } - - // Since the batch encoder needs to do a copy to reset the - // input, we will add a copy here too. - copy(input, src) - } - }) - - b.Run("batch", func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - if bufResult, err = IntegerArrayEncodeAll(input, bufResult); err != nil { - b.Fatal(err) - } - copy(input, src) // Reset input that gets modified in IntegerArrayEncodeAll - } - }) - }) - } -} - -func BenchmarkIntegerArrayDecodeAllUncompressed(b *testing.B) { - benchmarks := []int{ - 5, - 55, - 555, - 1000, - } - - values := []int64{ - -2352281900722994752, 1438442655375607923, -4110452567888190110, - -1221292455668011702, -1941700286034261841, -2836753127140407751, - 1432686216250034552, 3663244026151507025, -3068113732684750258, - -1949953187327444488, 3713374280993588804, 3226153669854871355, - -2093273755080502606, 1006087192578600616, -2272122301622271655, - 2533238229511593671, -4450454445568858273, 2647789901083530435, - 2761419461769776844, -1324397441074946198, -680758138988210958, - 94468846694902125, -2394093124890745254, -2682139311758778198, - } - - for _, size := range benchmarks { - seededRand := rand.New(rand.NewSource(int64(size * 1e3))) - - enc := NewIntegerEncoder(size) - for i := 0; i < size; i++ { - enc.Write(values[seededRand.Int()%len(values)]) - } - bytes, _ := enc.Bytes() - - b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { - b.SetBytes(int64(len(bytes))) - b.ReportAllocs() - - dst := make([]int64, size) - for i := 0; i < b.N; i++ { - dst, _ = IntegerArrayDecodeAll(bytes, dst) - } - }) - } -} - -func BenchmarkIntegerArrayDecodeAllPackedSimple(b *testing.B) { - benchmarks := []int{ - 5, - 55, - 555, - 1000, - } - for _, size := range benchmarks { - seededRand := rand.New(rand.NewSource(int64(size * 1e3))) - - enc := NewIntegerEncoder(size) - for i := 0; i < size; i++ { - // Small amount of randomness prevents RLE from being used - enc.Write(int64(i) + int64(seededRand.Intn(10))) - } - bytes, _ := enc.Bytes() - - b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { - b.SetBytes(int64(len(bytes))) - b.ReportAllocs() - - dst := make([]int64, size) - for i := 0; i < b.N; i++ { - IntegerArrayDecodeAll(bytes, dst) - } - }) - } -} - -func BenchmarkIntegerArrayDecodeAllRLE(b *testing.B) { - benchmarks := []struct { - n int - delta int64 - }{ - {5, 1}, - {55, 1}, - {555, 1}, - {1000, 1}, - {1000, 0}, - } - for _, bm := range benchmarks { - enc := NewIntegerEncoder(bm.n) - acc := int64(0) - for i := 0; i < bm.n; i++ { - enc.Write(acc) - acc += bm.delta - } - bytes, _ := enc.Bytes() - - b.Run(fmt.Sprintf("%d_delta_%d", bm.n, bm.delta), func(b *testing.B) { - b.SetBytes(int64(len(bytes))) - b.ReportAllocs() - - dst := make([]int64, bm.n) - for i := 0; i < b.N; i++ { - IntegerArrayDecodeAll(bytes, dst) - } - }) - } -} diff --git a/tsdb/engine/tsm1/batch_string.go b/tsdb/engine/tsm1/batch_string.go deleted file mode 100644 index 739e986aa0d..00000000000 --- a/tsdb/engine/tsm1/batch_string.go +++ /dev/null @@ -1,144 +0,0 @@ -package tsm1 - -import ( - "encoding/binary" - "errors" - "fmt" - "math" - "unsafe" - - "github.com/golang/snappy" -) - -var ( - errStringBatchDecodeInvalidStringLength = fmt.Errorf("StringArrayDecodeAll: invalid encoded string length") - errStringBatchDecodeLengthOverflow = fmt.Errorf("StringArrayDecodeAll: length overflow") - errStringBatchDecodeShortBuffer = fmt.Errorf("StringArrayDecodeAll: short buffer") - - // ErrStringArrayEncodeTooLarge reports that the encoded length of a slice of strings is too large. - ErrStringArrayEncodeTooLarge = errors.New("StringArrayEncodeAll: source length too large") -) - -// StringArrayEncodeAll encodes src into b, returning b and any error encountered. -// The returned slice may be of a different length and capacity to b. -// -// Currently only the string compression scheme used snappy. -func StringArrayEncodeAll(src []string, b []byte) ([]byte, error) { - srcSz64 := int64(2 + len(src)*binary.MaxVarintLen32) // strings shouldn't be longer than 64kb - for i := range src { - srcSz64 += int64(len(src[i])) - } - - // 32-bit systems - if srcSz64 > math.MaxUint32 { - return b[:0], ErrStringArrayEncodeTooLarge - } - - srcSz := int(srcSz64) - - // determine the maximum possible length needed for the buffer, which - // includes the compressed size - var compressedSz = 0 - if len(src) > 0 { - mle := snappy.MaxEncodedLen(srcSz) - if mle == -1 { - return b[:0], ErrStringArrayEncodeTooLarge - } - compressedSz = mle + 1 /* header */ - } - totSz := srcSz + compressedSz - - if cap(b) < totSz { - b = make([]byte, totSz) - } else { - b = b[:totSz] - } - - // Shortcut to snappy encoding nothing. - if len(src) == 0 { - b[0] = stringCompressedSnappy << 4 - return b[:2], nil - } - - // write the data to be compressed *after* the space needed for snappy - // compression. The compressed data is at the start of the allocated buffer, - // ensuring the entire capacity is returned and available for subsequent use. - dta := b[compressedSz:] - n := 0 - for i := range src { - n += binary.PutUvarint(dta[n:], uint64(len(src[i]))) - n += copy(dta[n:], src[i]) - } - dta = dta[:n] - - dst := b[:compressedSz] - dst[0] = stringCompressedSnappy << 4 - res := snappy.Encode(dst[1:], dta) - return dst[:len(res)+1], nil -} - -func StringArrayDecodeAll(b []byte, dst []string) ([]string, error) { - // First byte stores the encoding type, only have snappy format - // currently so ignore for now. - if len(b) > 0 { - var err error - // it is important that to note that `snappy.Decode` always returns - // a newly allocated slice as the final strings reference this slice - // directly. - b, err = snappy.Decode(nil, b[1:]) - if err != nil { - return []string{}, fmt.Errorf("failed to decode string block: %v", err.Error()) - } - } else { - return []string{}, nil - } - - var ( - i, l int - ) - - sz := cap(dst) - if sz == 0 { - sz = 64 - dst = make([]string, sz) - } else { - dst = dst[:sz] - } - - j := 0 - - for i < len(b) { - length, n := binary.Uvarint(b[i:]) - if n <= 0 { - return []string{}, errStringBatchDecodeInvalidStringLength - } - - // The length of this string plus the length of the variable byte encoded length - l = int(length) + n - - lower := i + n - upper := lower + int(length) - if upper < lower { - return []string{}, errStringBatchDecodeLengthOverflow - } - if upper > len(b) { - return []string{}, errStringBatchDecodeShortBuffer - } - - // NOTE: this optimization is critical for performance and to reduce - // allocations. This is just as "safe" as string.Builder, which - // returns a string mapped to the original byte slice - s := b[lower:upper] - val := *(*string)(unsafe.Pointer(&s)) - if j < len(dst) { - dst[j] = val - } else { - dst = append(dst, val) // force a resize - dst = dst[:cap(dst)] - } - i += l - j++ - } - - return dst[:j], nil -} diff --git a/tsdb/engine/tsm1/batch_string_test.go b/tsdb/engine/tsm1/batch_string_test.go deleted file mode 100644 index 12d151f529c..00000000000 --- a/tsdb/engine/tsm1/batch_string_test.go +++ /dev/null @@ -1,401 +0,0 @@ -package tsm1 - -import ( - "bytes" - "fmt" - "reflect" - "strings" - "testing" - "testing/quick" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/internal/testutil" - "github.com/influxdata/influxdb/v2/uuid" -) - -func equalError(a, b error) bool { - return a == nil && b == nil || a != nil && b != nil && a.Error() == b.Error() -} - -func TestStringArrayEncodeAll_NoValues(t *testing.T) { - b, err := StringArrayEncodeAll(nil, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var dec StringDecoder - if err := dec.SetBytes(b); err != nil { - t.Fatalf("unexpected error creating string decoder: %v", err) - } - if dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } -} - -func TestStringArrayEncodeAll_ExceedsMaxEncodedLen(t *testing.T) { - str := strings.Repeat(" ", 1<<23) // 8MB string - var s []string - for i := 0; i < 512; i++ { - s = append(s, str) - } - - _, got := StringArrayEncodeAll(s, nil) - if !cmp.Equal(got, ErrStringArrayEncodeTooLarge, cmp.Comparer(equalError)) { - t.Fatalf("expected error, got: %v", got) - } -} - -func TestStringArrayEncodeAll_Single(t *testing.T) { - src := []string{"v1"} - b, err := StringArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var dec StringDecoder - if dec.SetBytes(b); err != nil { - t.Fatalf("unexpected error creating string decoder: %v", err) - } - if !dec.Next() { - t.Fatalf("unexpected next value: got false, exp true") - } - - if src[0] != dec.Read() { - t.Fatalf("unexpected value: got %v, exp %v", dec.Read(), src[0]) - } -} - -func TestStringArrayEncode_Compare(t *testing.T) { - // generate random values - input := make([]string, 1000) - for i := 0; i < len(input); i++ { - input[i] = uuid.TimeUUID().String() - } - - // Example from the paper - s := NewStringEncoder(1000) - for _, v := range input { - s.Write(v) - } - s.Flush() - - buf1, err := s.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - buf2 := append([]byte("this is some jibberish"), make([]byte, 100, 200)...) - buf2, err = StringArrayEncodeAll(input, buf2) - if err != nil { - t.Fatalf("unexpected error: %v\nbuf: %db %x", err, len(buf2), buf2) - } - - result, err := StringArrayDecodeAll(buf2, nil) - if err != nil { - dumpBufs(buf1, buf2) - t.Fatalf("unexpected error: %v\nbuf: %db %x", err, len(buf2), buf2) - } - - if got, exp := result, input; !reflect.DeepEqual(got, exp) { - t.Fatalf("got result %v, expected %v", got, exp) - } - - // Check that the encoders are byte for byte the same... - if !bytes.Equal(buf1, buf2) { - dumpBufs(buf1, buf2) - t.Fatalf("Raw bytes differ for encoders") - } -} - -func TestStringArrayEncodeAll_Multi_Compressed(t *testing.T) { - src := make([]string, 10) - for i := range src { - src[i] = fmt.Sprintf("value %d", i) - } - - b, err := StringArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if b[0]>>4 != stringCompressedSnappy { - t.Fatalf("unexpected encoding: got %v, exp %v", b[0], stringCompressedSnappy) - } - - if exp := 51; len(b) != exp { - t.Fatalf("unexpected length: got %v, exp %v", len(b), exp) - } - - var dec StringDecoder - if err := dec.SetBytes(b); err != nil { - t.Fatalf("unexpected erorr creating string decoder: %v", err) - } - - for i, v := range src { - if !dec.Next() { - t.Fatalf("unexpected next value: got false, exp true") - } - if v != dec.Read() { - t.Fatalf("unexpected value at pos %d: got %v, exp %v", i, dec.Read(), v) - } - } - - if dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } -} - -func TestStringArrayEncodeAll_Quick(t *testing.T) { - var base []byte - quick.Check(func(values []string) bool { - src := values - if values == nil { - src = []string{} - } - - // Retrieve encoded bytes from encoder. - buf, err := StringArrayEncodeAll(src, base) - if err != nil { - t.Fatal(err) - } - - // Read values out of decoder. - got := make([]string, 0, len(src)) - var dec StringDecoder - if err := dec.SetBytes(buf); err != nil { - t.Fatal(err) - } - for dec.Next() { - if err := dec.Error(); err != nil { - t.Fatal(err) - } - got = append(got, dec.Read()) - } - - // Verify that input and output values match. - if !reflect.DeepEqual(src, got) { - t.Fatalf("mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", src, got) - } - - return true - }, nil) -} - -func TestStringArrayDecodeAll_NoValues(t *testing.T) { - enc := NewStringEncoder(1024) - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - got, err := StringArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected error creating string decoder: %v", err) - } - - exp := []string{} - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected value: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestStringArrayDecodeAll_Single(t *testing.T) { - enc := NewStringEncoder(1024) - v1 := "v1" - enc.Write(v1) - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - got, err := StringArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected error creating string decoder: %v", err) - } - - exp := []string{"v1"} - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected value: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestStringArrayDecodeAll_Multi_Compressed(t *testing.T) { - enc := NewStringEncoder(1024) - - exp := make([]string, 10) - for i := range exp { - exp[i] = fmt.Sprintf("value %d", i) - enc.Write(exp[i]) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if b[0]>>4 != stringCompressedSnappy { - t.Fatalf("unexpected encoding: got %v, exp %v", b[0], stringCompressedSnappy) - } - - if exp := 51; len(b) != exp { - t.Fatalf("unexpected length: got %v, exp %v", len(b), exp) - } - - got, err := StringArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected error creating string decoder: %v", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected value: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestStringArrayDecodeAll_Quick(t *testing.T) { - quick.Check(func(values []string) bool { - if values == nil { - values = []string{} - } - // Write values to encoder. - enc := NewStringEncoder(1024) - for _, v := range values { - enc.Write(v) - } - - // Retrieve encoded bytes from encoder. - buf, err := enc.Bytes() - if err != nil { - t.Fatal(err) - } - - // Read values out of decoder. - got, err := StringArrayDecodeAll(buf, nil) - if err != nil { - t.Fatalf("unexpected error creating string decoder: %v", err) - } - - if !cmp.Equal(got, values) { - t.Fatalf("unexpected value: -got/+exp\n%s", cmp.Diff(got, values)) - } - - return true - }, nil) -} - -func TestStringArrayDecodeAll_Empty(t *testing.T) { - got, err := StringArrayDecodeAll([]byte{}, nil) - if err != nil { - t.Fatalf("unexpected error creating string decoder: %v", err) - } - - exp := []string{} - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected value: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestStringArrayDecodeAll_CorruptBytes(t *testing.T) { - cases := []string{ - "\x10\x03\b\x03Hi", // Higher length than actual data - "\x10\x1dp\x9c\x90\x90\x90\x90\x90\x90\x90\x90\x90length overflow----", - "0t\x00\x01\x000\x00\x01\x000\x00\x01\x000\x00\x01\x000\x00\x01" + - "\x000\x00\x01\x000\x00\x01\x000\x00\x00\x00\xff:\x01\x00\x01\x00\x01" + - "\x00\x01\x00\x01\x00\x01\x00\x010\x010\x000\x010\x010\x010\x01" + - "0\x010\x010\x010\x010\x010\x010\x010\x010\x010\x010", // Upper slice bounds overflows negative - } - - for _, c := range cases { - t.Run(fmt.Sprintf("%q", c), func(t *testing.T) { - got, err := StringArrayDecodeAll([]byte(c), nil) - if err == nil { - t.Fatal("exp an err, got nil") - } - - exp := []string{} - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected value: -got/+exp\n%s", cmp.Diff(got, exp)) - } - }) - } -} - -func BenchmarkEncodeStrings(b *testing.B) { - var err error - cases := []int{10, 100, 1000} - - for _, n := range cases { - enc := NewStringEncoder(n) - b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { - input := make([]string, n) - for i := 0; i < n; i++ { - input[i] = uuid.TimeUUID().String() - } - - b.Run("itr", func(b *testing.B) { - b.ReportAllocs() - enc.Reset() - b.ResetTimer() - for n := 0; n < b.N; n++ { - enc.Reset() - for _, x := range input { - enc.Write(x) - } - enc.Flush() - if bufResult, err = enc.Bytes(); err != nil { - b.Fatal(err) - } - } - }) - - b.Run("batch", func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - if bufResult, err = StringArrayEncodeAll(input, bufResult); err != nil { - b.Fatal(err) - } - } - }) - - }) - } -} - -func BenchmarkStringArrayDecodeAll(b *testing.B) { - benchmarks := []struct { - n int - w int - }{ - {1, 10}, - {55, 10}, - {550, 10}, - {1000, 10}, - } - for _, bm := range benchmarks { - s := NewStringEncoder(bm.n) - for c := 0; c < bm.n; c++ { - s.Write(testutil.MakeSentence(bm.w)) - } - s.Flush() - bytes, err := s.Bytes() - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - b.Run(fmt.Sprintf("%d", bm.n), func(b *testing.B) { - b.SetBytes(int64(len(bytes))) - b.ReportAllocs() - - dst := make([]string, bm.n) - for i := 0; i < b.N; i++ { - got, err := StringArrayDecodeAll(bytes, dst) - if err != nil { - b.Fatalf("unexpected length -got/+exp\n%s", cmp.Diff(len(dst), bm.n)) - } - if len(got) != bm.n { - b.Fatalf("unexpected length -got/+exp\n%s", cmp.Diff(len(dst), bm.n)) - } - } - }) - } -} diff --git a/tsdb/engine/tsm1/batch_timestamp.go b/tsdb/engine/tsm1/batch_timestamp.go deleted file mode 100644 index d7cac55993b..00000000000 --- a/tsdb/engine/tsm1/batch_timestamp.go +++ /dev/null @@ -1,296 +0,0 @@ -package tsm1 - -import ( - "encoding/binary" - "fmt" - "math" - "unsafe" - - "github.com/influxdata/influxdb/v2/pkg/encoding/simple8b" -) - -// TimeArrayEncodeAll encodes src into b, returning b and any error encountered. -// The returned slice may be of a different length and capacity to b. -// -// TimeArrayEncodeAll implements batch oriented versions of the three integer -// encoding types we support: uncompressed, simple8b and RLE. -// -// Timestamp values to be encoded should be sorted before encoding. When encoded, -// the values are first delta-encoded. The first value is the starting timestamp, -// subsequent values are the difference from the prior value. -// -// Important: TimeArrayEncodeAll modifies the contents of src by using it as -// scratch space for delta encoded values. It is NOT SAFE to use src after -// passing it into TimeArrayEncodeAll. -func TimeArrayEncodeAll(src []int64, b []byte) ([]byte, error) { - if len(src) == 0 { - return nil, nil // Nothing to do - } - - var max, div = uint64(0), uint64(1e12) - - // To prevent an allocation of the entire block we're encoding reuse the - // src slice to store the encoded deltas. - deltas := reinterpretInt64ToUint64Slice(src) - - if len(deltas) > 1 { - for i := len(deltas) - 1; i > 0; i-- { - deltas[i] = deltas[i] - deltas[i-1] - if deltas[i] > max { - max = deltas[i] - } - } - - var rle = true - for i := 2; i < len(deltas); i++ { - if deltas[1] != deltas[i] { - rle = false - break - } - } - - // Deltas are the same - encode with RLE - if rle { - // Large varints can take up to 10 bytes. We're storing 3 + 1 - // type byte. - if len(b) < 31 && cap(b) >= 31 { - b = b[:31] - } else if len(b) < 31 { - b = append(b, make([]byte, 31-len(b))...) - } - - // 4 high bits used for the encoding type - b[0] = byte(timeCompressedRLE) << 4 - - i := 1 - // The first value - binary.BigEndian.PutUint64(b[i:], deltas[0]) - i += 8 - - // The first delta, checking the divisor - // given all deltas are the same, we can do a single check for the divisor - v := deltas[1] - for div > 1 && v%div != 0 { - div /= 10 - } - - if div > 1 { - // 4 low bits are the log10 divisor - b[0] |= byte(math.Log10(float64(div))) - i += binary.PutUvarint(b[i:], deltas[1]/div) - } else { - i += binary.PutUvarint(b[i:], deltas[1]) - } - - // The number of times the delta is repeated - i += binary.PutUvarint(b[i:], uint64(len(deltas))) - - return b[:i], nil - } - } - - // We can't compress this time-range, the deltas exceed 1 << 60 - if max > simple8b.MaxValue { - // Encode uncompressed. - sz := 1 + len(deltas)*8 - if len(b) < sz && cap(b) >= sz { - b = b[:sz] - } else if len(b) < sz { - b = append(b, make([]byte, sz-len(b))...) - } - - // 4 high bits of first byte store the encoding type for the block - b[0] = byte(timeUncompressed) << 4 - for i, v := range deltas { - binary.BigEndian.PutUint64(b[1+i*8:1+i*8+8], v) - } - return b[:sz], nil - } - - // find divisor only if we're compressing with simple8b - for i := 1; i < len(deltas) && div > 1; i++ { - // If our value is divisible by 10, break. Otherwise, try the next smallest divisor. - v := deltas[i] - for div > 1 && v%div != 0 { - div /= 10 - } - } - - // Only apply the divisor if it's greater than 1 since division is expensive. - if div > 1 { - for i := 1; i < len(deltas); i++ { - deltas[i] /= div - } - } - - // Encode with simple8b - fist value is written unencoded using 8 bytes. - encoded, err := simple8b.EncodeAll(deltas[1:]) - if err != nil { - return nil, err - } - - sz := 1 + (len(encoded)+1)*8 - if len(b) < sz && cap(b) >= sz { - b = b[:sz] - } else if len(b) < sz { - b = append(b, make([]byte, sz-len(b))...) - } - - // 4 high bits of first byte store the encoding type for the block - b[0] = byte(timeCompressedPackedSimple) << 4 - // 4 low bits are the log10 divisor - b[0] |= byte(math.Log10(float64(div))) - - // Write the first value since it's not part of the encoded values - binary.BigEndian.PutUint64(b[1:9], deltas[0]) - - // Write the encoded values - for i, v := range encoded { - binary.BigEndian.PutUint64(b[9+i*8:9+i*8+8], v) - } - return b[:sz], nil -} - -var ( - timeBatchDecoderFunc = [...]func(b []byte, dst []int64) ([]int64, error){ - timeBatchDecodeAllUncompressed, - timeBatchDecodeAllSimple, - timeBatchDecodeAllRLE, - timeBatchDecodeAllInvalid, - } -) - -func TimeArrayDecodeAll(b []byte, dst []int64) ([]int64, error) { - if len(b) == 0 { - return []int64{}, nil - } - - encoding := b[0] >> 4 - if encoding > timeCompressedRLE { - encoding = 3 // timeBatchDecodeAllInvalid - } - - return timeBatchDecoderFunc[encoding&3](b, dst) -} - -func timeBatchDecodeAllUncompressed(b []byte, dst []int64) ([]int64, error) { - b = b[1:] - if len(b)&0x7 != 0 { - return []int64{}, fmt.Errorf("TimeArrayDecodeAll: expected multiple of 8 bytes") - } - - count := len(b) / 8 - if cap(dst) < count { - dst = make([]int64, count) - } else { - dst = dst[:count] - } - - prev := uint64(0) - for i := range dst { - prev += binary.BigEndian.Uint64(b[i*8:]) - dst[i] = int64(prev) - } - - return dst, nil -} - -func timeBatchDecodeAllSimple(b []byte, dst []int64) ([]int64, error) { - if len(b) < 9 { - return []int64{}, fmt.Errorf("TimeArrayDecodeAll: not enough data to decode packed timestamps") - } - - div := uint64(math.Pow10(int(b[0] & 0xF))) // multiplier - - count, err := simple8b.CountBytes(b[9:]) - if err != nil { - return []int64{}, err - } - - count += 1 - - if cap(dst) < count { - dst = make([]int64, count) - } else { - dst = dst[:count] - } - - buf := *(*[]uint64)(unsafe.Pointer(&dst)) - - // first value - buf[0] = binary.BigEndian.Uint64(b[1:9]) - n, err := simple8b.DecodeBytesBigEndian(buf[1:], b[9:]) - if err != nil { - return []int64{}, err - } - if n != count-1 { - return []int64{}, fmt.Errorf("TimeArrayDecodeAll: unexpected number of values decoded; got=%d, exp=%d", n, count-1) - } - - // Compute the prefix sum and scale the deltas back up - last := buf[0] - if div > 1 { - for i := 1; i < len(buf); i++ { - dgap := buf[i] * div - buf[i] = last + dgap - last = buf[i] - } - } else { - for i := 1; i < len(buf); i++ { - buf[i] += last - last = buf[i] - } - } - - return dst, nil -} - -func timeBatchDecodeAllRLE(b []byte, dst []int64) ([]int64, error) { - if len(b) < 9 { - return []int64{}, fmt.Errorf("TimeArrayDecodeAll: not enough data to decode RLE starting value") - } - - var k, n int - - // Lower 4 bits hold the 10 based exponent so we can scale the values back up - mod := int64(math.Pow10(int(b[k] & 0xF))) - k++ - - // Next 8 bytes is the starting timestamp - first := binary.BigEndian.Uint64(b[k:]) - k += 8 - - // Next 1-10 bytes is our (scaled down by factor of 10) run length delta - delta, n := binary.Uvarint(b[k:]) - if n <= 0 { - return []int64{}, fmt.Errorf("TimeArrayDecodeAll: invalid run length in decodeRLE") - } - k += n - - // Scale the delta back up - delta *= uint64(mod) - - // Last 1-10 bytes is how many times the value repeats - count, n := binary.Uvarint(b[k:]) - if n <= 0 { - return []int64{}, fmt.Errorf("TimeDecoder: invalid repeat value in decodeRLE") - } - - if cap(dst) < int(count) { - dst = make([]int64, count) - } else { - dst = dst[:count] - } - - acc := first - for i := range dst { - dst[i] = int64(acc) - acc += delta - } - - return dst, nil -} - -func timeBatchDecodeAllInvalid(b []byte, _ []int64) ([]int64, error) { - return []int64{}, fmt.Errorf("unknown encoding %v", b[0]>>4) -} diff --git a/tsdb/engine/tsm1/batch_timestamp_test.go b/tsdb/engine/tsm1/batch_timestamp_test.go deleted file mode 100644 index 8d36389d1b5..00000000000 --- a/tsdb/engine/tsm1/batch_timestamp_test.go +++ /dev/null @@ -1,1171 +0,0 @@ -package tsm1 - -import ( - "bytes" - "fmt" - "math/rand" - "reflect" - "sort" - "testing" - "testing/quick" - "time" - - "github.com/google/go-cmp/cmp" -) - -func TestTimeArrayEncodeAll(t *testing.T) { - now := time.Unix(0, 0) - src := []int64{now.UnixNano()} - - for i := 1; i < 4; i++ { - src = append(src, now.Add(time.Duration(i)*time.Second).UnixNano()) - } - - exp := make([]int64, len(src)) - copy(exp, src) - - b, err := TimeArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeCompressedRLE { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - var dec TimeDecoder - dec.Init(b) - for i, v := range exp { - if !dec.Next() { - t.Fatalf("Next == false, expected true") - } - - if v != dec.Read() { - t.Fatalf("Item %d mismatch, got %v, exp %v", i, dec.Read(), v) - } - } -} - -// This test compares the ArrayEncoder to the original iterator encoder, byte for -// byte. -func TestTimeArrayEncodeAll_Compare(t *testing.T) { - // generate random values (should use simple8b) - input := make([]int64, 1000) - for i := 0; i < len(input); i++ { - input[i] = rand.Int63n(100000) - 50000 - } - sort.Slice(input, func(i int, j int) bool { return input[i] < input[j] }) - testTimeArrayEncodeAll_Compare(t, input, timeCompressedPackedSimple) - - // Generate same values (should use RLE) - for i := 0; i < len(input); i++ { - input[i] = 1232342341234 - } - testTimeArrayEncodeAll_Compare(t, input, timeCompressedRLE) - - // Generate large random values that are not sorted. The deltas will be large - // and the values should be stored uncompressed. - for i := 0; i < len(input); i++ { - input[i] = int64(rand.Uint64()) - } - testTimeArrayEncodeAll_Compare(t, input, timeUncompressed) -} - -func testTimeArrayEncodeAll_Compare(t *testing.T, input []int64, encoding byte) { - exp := make([]int64, len(input)) - copy(exp, input) - - s := NewTimeEncoder(1000) - for _, v := range input { - s.Write(v) - } - - buf1, err := s.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got, exp := buf1[0]>>4, encoding; got != exp { - t.Fatalf("got encoding %v, expected %v", got, encoding) - } - - var buf2 []byte - buf2, err = TimeArrayEncodeAll(input, buf2) - if err != nil { - t.Fatalf("unexpected error: %v\nbuf: %db %x", err, len(buf2), buf2) - } - - if got, exp := buf2[0]>>4, encoding; got != exp { - t.Fatalf("got encoding %v, expected %v", got, encoding) - } - - result, err := TimeArrayDecodeAll(buf2, nil) - if err != nil { - dumpBufs(buf1, buf2) - t.Fatalf("unexpected error: %v\nbuf: %db %x", err, len(buf2), buf2) - } - - if got := result; !reflect.DeepEqual(got, exp) { - t.Fatalf("-got/+exp\n%s", cmp.Diff(got, exp)) - } - - // Check that the encoders are byte for byte the same... - if !bytes.Equal(buf1, buf2) { - dumpBufs(buf1, buf2) - t.Fatalf("Raw bytes differ for encoders") - } -} - -func TestTimeArrayEncodeAll_NoValues(t *testing.T) { - b, err := TimeArrayEncodeAll(nil, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var dec TimeDecoder - dec.Init(b) - if dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } -} - -func TestTimeArrayEncodeAll_One(t *testing.T) { - src := []int64{0} - exp := make([]int64, len(src)) - copy(exp, src) - - b, err := TimeArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeCompressedPackedSimple { - t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got) - } - - var dec TimeDecoder - dec.Init(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if exp[0] != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), exp[0]) - } -} - -func TestTimeArrayEncodeAll_Two(t *testing.T) { - src := []int64{0, 1} - exp := make([]int64, len(src)) - copy(exp, src) - - b, err := TimeArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeCompressedRLE { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - var dec TimeDecoder - dec.Init(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if exp[0] != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), exp[0]) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if exp[1] != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), exp[1]) - } -} - -func TestTimeArrayEncodeAll_Three(t *testing.T) { - src := []int64{0, 1, 3} - exp := make([]int64, len(src)) - copy(exp, src) - - b, err := TimeArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeCompressedPackedSimple { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - var dec TimeDecoder - dec.Init(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if exp[0] != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), exp[0]) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if exp[1] != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), exp[1]) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if exp[2] != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), exp[2]) - } -} - -func TestTimeArrayEncodeAll_Large_Range(t *testing.T) { - src := []int64{1442369134000000000, 1442369135000000000} - exp := make([]int64, len(src)) - copy(exp, src) - - b, err := TimeArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeCompressedRLE { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - var dec TimeDecoder - dec.Init(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if exp[0] != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), exp[2]) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if exp[1] != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), exp[1]) - } -} - -func TestTimeArrayEncodeAll_Uncompressed(t *testing.T) { - src := []int64{time.Unix(0, 0).UnixNano(), time.Unix(1, 0).UnixNano()} - - // about 36.5yrs in NS resolution is max range for compressed format - // This should cause the encoding to fallback to raw points - src = append(src, time.Unix(2, (2<<59)).UnixNano()) - exp := make([]int64, len(src)) - copy(exp, src) - - b, err := TimeArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("expected error: %v", err) - } - - if exp := 25; len(b) != exp { - t.Fatalf("length mismatch: got %v, exp %v", len(b), exp) - } - - if got := b[0] >> 4; got != timeUncompressed { - t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got) - } - - var dec TimeDecoder - dec.Init(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if exp[0] != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), exp[0]) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if exp[1] != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), exp[1]) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if exp[2] != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), exp[2]) - } -} - -func TestTimeArrayEncodeAll_RLE(t *testing.T) { - var src []int64 - for i := 0; i < 500; i++ { - src = append(src, int64(i)) - } - exp := make([]int64, len(src)) - copy(exp, src) - - b, err := TimeArrayEncodeAll(src, nil) - if exp := 12; len(b) != exp { - t.Fatalf("length mismatch: got %v, exp %v", len(b), exp) - } - - if got := b[0] >> 4; got != timeCompressedRLE { - t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got) - } - - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var dec TimeDecoder - dec.Init(b) - for i, v := range exp { - if !dec.Next() { - t.Fatalf("Next == false, expected true") - } - - if v != dec.Read() { - t.Fatalf("Item %d mismatch, got %v, exp %v", i, dec.Read(), v) - } - } - - if dec.Next() { - t.Fatalf("unexpected extra values") - } -} - -func TestTimeArrayEncodeAll_Reverse(t *testing.T) { - src := []int64{3, 2, 0} - exp := make([]int64, len(src)) - copy(exp, src) - - b, err := TimeArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeUncompressed { - t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got) - } - - var dec TimeDecoder - dec.Init(b) - i := 0 - for dec.Next() { - if exp[i] != dec.Read() { - t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), exp[i]) - } - i++ - } -} - -func TestTimeArrayEncodeAll_220SecondDelta(t *testing.T) { - var src []int64 - now := time.Now() - - for i := 0; i < 220; i++ { - src = append(src, now.Add(time.Duration(i*60)*time.Second).UnixNano()) - } - exp := make([]int64, len(src)) - copy(exp, src) - - b, err := TimeArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - // Using RLE, should get 12 bytes - if exp := 12; len(b) != exp { - t.Fatalf("unexpected length: got %v, exp %v", len(b), exp) - } - - if got := b[0] >> 4; got != timeCompressedRLE { - t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got) - } - - var dec TimeDecoder - dec.Init(b) - i := 0 - for dec.Next() { - if exp[i] != dec.Read() { - t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), exp[i]) - } - i++ - } - - if i != len(exp) { - t.Fatalf("Read too few values: exp %d, got %d", len(exp), i) - } - - if dec.Next() { - t.Fatalf("expecte Next() = false, got true") - } -} - -func TestTimeArrayEncodeAll_Quick(t *testing.T) { - quick.Check(func(values []int64) bool { - // Write values to encoder. - - exp := make([]int64, len(values)) - for i, v := range values { - exp[i] = int64(v) - } - - // Retrieve encoded bytes from encoder. - b, err := TimeArrayEncodeAll(values, nil) - if err != nil { - t.Fatal(err) - } - - // Read values out of decoder. - got := make([]int64, 0, len(values)) - var dec TimeDecoder - dec.Init(b) - for dec.Next() { - if err := dec.Error(); err != nil { - t.Fatal(err) - } - got = append(got, dec.Read()) - } - - // Verify that input and output values match. - if !reflect.DeepEqual(exp, got) { - t.Fatalf("mismatch:\n\nexp=%+v\n\ngot=%+v\n\n", exp, got) - } - - return true - }, nil) -} - -func TestTimeArrayEncodeAll_RLESeconds(t *testing.T) { - src := []int64{ - 1444448158000000000, - 1444448168000000000, - 1444448178000000000, - 1444448188000000000, - 1444448198000000000, - 1444448208000000000, - } - exp := make([]int64, len(src)) - copy(exp, src) - - b, err := TimeArrayEncodeAll(src, nil) - if got := b[0] >> 4; got != timeCompressedRLE { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var dec TimeDecoder - dec.Init(b) - for i, v := range exp { - if !dec.Next() { - t.Fatalf("Next == false, expected true") - } - - if v != dec.Read() { - t.Fatalf("Item %d mismatch, got %v, exp %v", i, dec.Read(), v) - } - } - - if dec.Next() { - t.Fatalf("unexpected extra values") - } -} - -func TestTimeArrayEncodeAll_Count_Uncompressed(t *testing.T) { - src := []int64{time.Unix(0, 0).UnixNano(), - time.Unix(1, 0).UnixNano(), - } - - // about 36.5yrs in NS resolution is max range for compressed format - // This should cause the encoding to fallback to raw points - src = append(src, time.Unix(2, (2<<59)).UnixNano()) - exp := make([]int64, len(src)) - copy(exp, src) - - b, err := TimeArrayEncodeAll(src, nil) - if got := b[0] >> 4; got != timeUncompressed { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got, exp := CountTimestamps(b), 3; got != exp { - t.Fatalf("count mismatch: got %v, exp %v", got, exp) - } -} - -func TestTimeArrayEncodeAll_Count_RLE(t *testing.T) { - src := []int64{ - 1444448158000000000, - 1444448168000000000, - 1444448178000000000, - 1444448188000000000, - 1444448198000000000, - 1444448208000000000, - } - exp := make([]int64, len(src)) - copy(exp, src) - - b, err := TimeArrayEncodeAll(src, nil) - if got := b[0] >> 4; got != timeCompressedRLE { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got, exp := CountTimestamps(b), len(exp); got != exp { - t.Fatalf("count mismatch: got %v, exp %v", got, exp) - } -} - -func TestTimeArrayEncodeAll_Count_Simple8(t *testing.T) { - src := []int64{0, 1, 3} - - b, err := TimeArrayEncodeAll(src, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeCompressedPackedSimple { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got, exp := CountTimestamps(b), 3; got != exp { - t.Fatalf("count mismatch: got %v, exp %v", got, exp) - } -} - -func TestTimeArrayDecodeAll_NoValues(t *testing.T) { - enc := NewTimeEncoder(0) - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - got, err := TimeArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - exp := []int64{} - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestTimeArrayDecodeAll_One(t *testing.T) { - enc := NewTimeEncoder(1) - exp := []int64{0} - for _, v := range exp { - enc.Write(v) - } - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeCompressedPackedSimple { - t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got) - } - - got, err := TimeArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestTimeArrayDecodeAll_Two(t *testing.T) { - enc := NewTimeEncoder(2) - exp := []int64{0, 1} - for _, v := range exp { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeCompressedRLE { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - got, err := TimeArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestTimeArrayDecodeAll_Three(t *testing.T) { - enc := NewTimeEncoder(3) - exp := []int64{0, 1, 3} - for _, v := range exp { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeCompressedPackedSimple { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - got, err := TimeArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestTimeArrayDecodeAll_Large_Range(t *testing.T) { - enc := NewTimeEncoder(2) - exp := []int64{1442369134000000000, 1442369135000000000} - for _, v := range exp { - enc.Write(v) - } - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeCompressedRLE { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - got, err := TimeArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestTimeArrayDecodeAll_Uncompressed(t *testing.T) { - enc := NewTimeEncoder(3) - exp := []int64{ - time.Unix(0, 0).UnixNano(), - time.Unix(1, 0).UnixNano(), - // about 36.5yrs in NS resolution is max range for compressed format - // This should cause the encoding to fallback to raw points - time.Unix(2, 2<<59).UnixNano(), - } - for _, v := range exp { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("expected error: %v", err) - } - - if exp := 25; len(b) != exp { - t.Fatalf("length mismatch: got %v, exp %v", len(b), exp) - } - - if got := b[0] >> 4; got != timeUncompressed { - t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got) - } - - got, err := TimeArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestTimeArrayDecodeAll_RLE(t *testing.T) { - enc := NewTimeEncoder(512) - var exp []int64 - for i := 0; i < 500; i++ { - exp = append(exp, int64(i)) - } - - for _, v := range exp { - enc.Write(v) - } - - b, err := enc.Bytes() - if exp := 12; len(b) != exp { - t.Fatalf("length mismatch: got %v, exp %v", len(b), exp) - } - - if got := b[0] >> 4; got != timeCompressedRLE { - t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got) - } - - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - got, err := TimeArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestTimeArrayDecodeAll_Reverse(t *testing.T) { - enc := NewTimeEncoder(3) - exp := []int64{ - int64(3), - int64(2), - int64(0), - } - - for _, v := range exp { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeUncompressed { - t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got) - } - - got, err := TimeArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestTimeArrayDecodeAll_Negative(t *testing.T) { - enc := NewTimeEncoder(3) - exp := []int64{ - -2352281900722994752, 1438442655375607923, -4110452567888190110, - -1221292455668011702, -1941700286034261841, -2836753127140407751, - 1432686216250034552, 3663244026151507025, -3068113732684750258, - -1949953187327444488, 3713374280993588804, 3226153669854871355, - -2093273755080502606, 1006087192578600616, -2272122301622271655, - 2533238229511593671, -4450454445568858273, 2647789901083530435, - 2761419461769776844, -1324397441074946198, -680758138988210958, - 94468846694902125, -2394093124890745254, -2682139311758778198, - } - - for _, v := range exp { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeUncompressed { - t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got) - } - - got, err := TimeArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestTimeArrayDecodeAll_220SecondDelta(t *testing.T) { - enc := NewTimeEncoder(256) - var exp []int64 - now := time.Now() - for i := 0; i < 220; i++ { - exp = append(exp, now.Add(time.Duration(i*60)*time.Second).UnixNano()) - } - - for _, v := range exp { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - // Using RLE, should get 12 bytes - if exp := 12; len(b) != exp { - t.Fatalf("unexpected length: got %v, exp %v", len(b), exp) - } - - if got := b[0] >> 4; got != timeCompressedRLE { - t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got) - } - - got, err := TimeArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestTimeArrayDecodeAll_Quick(t *testing.T) { - quick.Check(func(values []int64) bool { - // Write values to encoder. - enc := NewTimeEncoder(1024) - exp := make([]int64, len(values)) - for i, v := range values { - exp[i] = int64(v) - enc.Write(exp[i]) - } - - // Retrieve encoded bytes from encoder. - buf, err := enc.Bytes() - if err != nil { - t.Fatal(err) - } - - got, err := TimeArrayDecodeAll(buf, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, exp)) - } - - return true - }, nil) -} - -func TestTimeArrayDecodeAll_RLESeconds(t *testing.T) { - enc := NewTimeEncoder(6) - exp := make([]int64, 6) - - exp[0] = int64(1444448158000000000) - exp[1] = int64(1444448168000000000) - exp[2] = int64(1444448178000000000) - exp[3] = int64(1444448188000000000) - exp[4] = int64(1444448198000000000) - exp[5] = int64(1444448208000000000) - - for _, v := range exp { - enc.Write(v) - } - - b, err := enc.Bytes() - if got := b[0] >> 4; got != timeCompressedRLE { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - got, err := TimeArrayDecodeAll(b, nil) - if err != nil { - t.Fatalf("unexpected decode error %q", err) - } - - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected values: -got/+exp\n%s", cmp.Diff(got, exp)) - } -} - -func TestTimeArrayDecodeAll_Corrupt(t *testing.T) { - cases := []string{ - "\x10\x14", // Packed: not enough data - "\x20\x00", // RLE: not enough data for starting timestamp - "\x2012345678\x90", // RLE: initial timestamp but invalid uvarint encoding - "\x2012345678\x7f", // RLE: timestamp, RLE but invalid repeat - "\x00123", // Raw: data length not multiple of 8 - } - - for _, c := range cases { - t.Run(fmt.Sprintf("%q", c), func(t *testing.T) { - got, err := TimeArrayDecodeAll([]byte(c), nil) - if err == nil { - t.Fatal("exp an err, got nil") - } - - exp := []int64{} - if !cmp.Equal(got, exp) { - t.Fatalf("unexpected value: -got/+exp\n%s", cmp.Diff(got, exp)) - } - }) - } -} - -func BenchmarkEncodeTimestamps(b *testing.B) { - var err error - cases := []int{10, 100, 1000} - - for _, n := range cases { - enc := NewTimeEncoder(n) - - b.Run(fmt.Sprintf("%d_seq", n), func(b *testing.B) { - src := make([]int64, n) - for i := 0; i < n; i++ { - src[i] = int64(i) - } - sort.Slice(src, func(i int, j int) bool { return src[i] < src[j] }) - - input := make([]int64, len(src)) - copy(input, src) - - b.Run("itr", func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - enc.Reset() - for _, x := range src { - enc.Write(x) - } - if bufResult, err = enc.Bytes(); err != nil { - b.Fatal(err) - } - - // Since the batch encoder needs to do a copy to reset the - // input, we will add a copy here too. - copy(input, src) - } - }) - - b.Run("batch", func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - if bufResult, err = TimeArrayEncodeAll(input, bufResult); err != nil { - b.Fatal(err) - } - copy(input, src) // Reset input that gets modified in IntegerArrayEncodeAll - } - }) - - }) - - b.Run(fmt.Sprintf("%d_ran", n), func(b *testing.B) { - src := make([]int64, n) - for i := 0; i < n; i++ { - src[i] = int64(rand.Uint64()) - } - sort.Slice(src, func(i int, j int) bool { return src[i] < src[j] }) - - input := make([]int64, len(src)) - copy(input, src) - - b.Run("itr", func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - enc.Reset() - for _, x := range src { - enc.Write(x) - } - if bufResult, err = enc.Bytes(); err != nil { - b.Fatal(err) - } - - // Since the batch encoder needs to do a copy to reset the - // input, we will add a copy here too. - copy(input, src) - } - }) - - b.Run("batch", func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - if bufResult, err = TimeArrayEncodeAll(input, bufResult); err != nil { - b.Fatal(err) - } - copy(input, src) // Reset input that gets modified in IntegerArrayEncodeAll - } - }) - }) - - b.Run(fmt.Sprintf("%d_dup", n), func(b *testing.B) { - src := make([]int64, n) - for i := 0; i < n; i++ { - src[i] = 1233242 - } - - input := make([]int64, len(src)) - copy(input, src) - - b.Run("itr", func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - enc.Reset() - for _, x := range src { - enc.Write(x) - } - if bufResult, err = enc.Bytes(); err != nil { - b.Fatal(err) - } - - // Since the batch encoder needs to do a copy to reset the - // input, we will add a copy here too. - copy(input, src) - } - }) - - b.Run("batch", func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - if bufResult, err = TimeArrayEncodeAll(input, bufResult); err != nil { - b.Fatal(err) - } - copy(input, src) // Reset input that gets modified in IntegerArrayEncodeAll - } - }) - }) - } -} - -func BenchmarkTimeArrayDecodeAllUncompressed(b *testing.B) { - benchmarks := []int{ - 5, - 55, - 555, - 1000, - } - - values := []int64{ - -2352281900722994752, 1438442655375607923, -4110452567888190110, - -1221292455668011702, -1941700286034261841, -2836753127140407751, - 1432686216250034552, 3663244026151507025, -3068113732684750258, - -1949953187327444488, 3713374280993588804, 3226153669854871355, - -2093273755080502606, 1006087192578600616, -2272122301622271655, - 2533238229511593671, -4450454445568858273, 2647789901083530435, - 2761419461769776844, -1324397441074946198, -680758138988210958, - 94468846694902125, -2394093124890745254, -2682139311758778198, - } - - for _, size := range benchmarks { - seededRand := rand.New(rand.NewSource(int64(size * 1e3))) - - enc := NewTimeEncoder(size) - for i := 0; i < size; i++ { - enc.Write(values[seededRand.Int()%len(values)]) - } - bytes, _ := enc.Bytes() - - b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { - b.SetBytes(int64(len(bytes))) - b.ReportAllocs() - - dst := make([]int64, size) - for i := 0; i < b.N; i++ { - dst, _ = TimeArrayDecodeAll(bytes, dst) - } - }) - } -} - -func BenchmarkTimeArrayDecodeAllPackedSimple(b *testing.B) { - benchmarks := []int{ - 5, - 55, - 555, - 1000, - } - for _, size := range benchmarks { - seededRand := rand.New(rand.NewSource(int64(size * 1e3))) - - enc := NewTimeEncoder(size) - for i := 0; i < size; i++ { - // Small amount of randomness prevents RLE from being used - enc.Write(int64(i*1000) + int64(seededRand.Intn(10))) - } - bytes, _ := enc.Bytes() - - b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { - b.SetBytes(int64(len(bytes))) - b.ReportAllocs() - - dst := make([]int64, size) - for i := 0; i < b.N; i++ { - dst, _ = TimeArrayDecodeAll(bytes, dst) - } - }) - } -} - -func BenchmarkTimeArrayDecodeAllRLE(b *testing.B) { - benchmarks := []struct { - n int - delta int64 - }{ - {5, 10}, - {55, 10}, - {555, 10}, - {1000, 10}, - } - for _, bm := range benchmarks { - enc := NewTimeEncoder(bm.n) - acc := int64(0) - for i := 0; i < bm.n; i++ { - enc.Write(acc) - acc += bm.delta - } - bytes, _ := enc.Bytes() - - b.Run(fmt.Sprintf("%d_delta_%d", bm.n, bm.delta), func(b *testing.B) { - b.SetBytes(int64(len(bytes))) - b.ReportAllocs() - - dst := make([]int64, bm.n) - for i := 0; i < b.N; i++ { - dst, _ = TimeArrayDecodeAll(bytes, dst) - } - }) - } -} diff --git a/tsdb/engine/tsm1/bit_reader.go b/tsdb/engine/tsm1/bit_reader.go deleted file mode 100644 index d91c5b8ca95..00000000000 --- a/tsdb/engine/tsm1/bit_reader.go +++ /dev/null @@ -1,133 +0,0 @@ -package tsm1 - -import "io" - -// BitReader reads bits from an io.Reader. -type BitReader struct { - data []byte - - buf struct { - v uint64 // bit buffer - n uint // available bits - } -} - -// NewBitReader returns a new instance of BitReader that reads from data. -func NewBitReader(data []byte) *BitReader { - b := new(BitReader) - b.Reset(data) - return b -} - -// Reset sets the underlying reader on b and reinitializes. -func (r *BitReader) Reset(data []byte) { - r.data = data - r.buf.v, r.buf.n = 0, 0 - r.readBuf() -} - -// CanReadBitFast returns true if calling ReadBitFast() is allowed. -// Fast bit reads are allowed when at least 2 values are in the buffer. -// This is because it is not required to refilled the buffer and the caller -// can inline the calls. -func (r *BitReader) CanReadBitFast() bool { return r.buf.n > 1 } - -// ReadBitFast is an optimized bit read. -// IMPORTANT: Only allowed if CanReadFastBit() is true! -func (r *BitReader) ReadBitFast() bool { - v := (r.buf.v&(1<<63) != 0) - r.buf.v <<= 1 - r.buf.n -= 1 - return v -} - -// ReadBit returns the next bit from the underlying data. -func (r *BitReader) ReadBit() (bool, error) { - v, err := r.ReadBits(1) - return v != 0, err -} - -// ReadBits reads nbits from the underlying data into a uint64. -// nbits must be from 1 to 64, inclusive. -func (r *BitReader) ReadBits(nbits uint) (uint64, error) { - // Return EOF if there is no more data. - if r.buf.n == 0 { - return 0, io.EOF - } - - // Return bits from buffer if less than available bits. - if nbits <= r.buf.n { - // Return all bits, if requested. - if nbits == 64 { - v := r.buf.v - r.buf.v, r.buf.n = 0, 0 - r.readBuf() - return v, nil - } - - // Otherwise mask returned bits. - v := (r.buf.v >> (64 - nbits)) - r.buf.v <<= nbits - r.buf.n -= nbits - - if r.buf.n == 0 { - r.readBuf() - } - return v, nil - } - - // Otherwise read all available bits in current buffer. - v, n := r.buf.v, r.buf.n - - // Read new buffer. - r.buf.v, r.buf.n = 0, 0 - r.readBuf() - - // Append new buffer to previous buffer and shift to remove unnecessary bits. - v |= (r.buf.v >> n) - v >>= 64 - nbits - - // Remove used bits from new buffer. - bufN := nbits - n - if bufN > r.buf.n { - bufN = r.buf.n - } - r.buf.v <<= bufN - r.buf.n -= bufN - - if r.buf.n == 0 { - r.readBuf() - } - - return v, nil -} - -func (r *BitReader) readBuf() { - // Determine number of bytes to read to fill buffer. - byteN := 8 - (r.buf.n / 8) - - // Limit to the length of our data. - if n := uint(len(r.data)); byteN > n { - byteN = n - } - - // Optimized 8-byte read. - if byteN == 8 { - r.buf.v = uint64(r.data[7]) | uint64(r.data[6])<<8 | - uint64(r.data[5])<<16 | uint64(r.data[4])<<24 | - uint64(r.data[3])<<32 | uint64(r.data[2])<<40 | - uint64(r.data[1])<<48 | uint64(r.data[0])<<56 - r.buf.n = 64 - r.data = r.data[8:] - return - } - - // Otherwise append bytes to buffer. - for i := uint(0); i < byteN; i++ { - r.buf.n += 8 - r.buf.v |= uint64(r.data[i]) << (64 - r.buf.n) - } - - // Move data forward. - r.data = r.data[byteN:] -} diff --git a/tsdb/engine/tsm1/bit_reader_test.go b/tsdb/engine/tsm1/bit_reader_test.go deleted file mode 100644 index 27c3b1418f4..00000000000 --- a/tsdb/engine/tsm1/bit_reader_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package tsm1_test - -import ( - "bytes" - "io" - "math" - "math/rand" - "reflect" - "testing" - "testing/quick" - - "github.com/dgryski/go-bitstream" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" -) - -func TestBitStreamEOF(t *testing.T) { - br := tsm1.NewBitReader([]byte("0")) - - b, err := br.ReadBits(8) - if err != nil { - t.Fatal(err) - } - if b != '0' { - t.Error("ReadBits(8) didn't return first byte") - } - - if _, err := br.ReadBits(8); err != io.EOF { - t.Error("ReadBits(8) on empty string didn't return EOF") - } - - // 0 = 0b00110000 - br = tsm1.NewBitReader([]byte("0")) - - buf := bytes.NewBuffer(nil) - bw := bitstream.NewWriter(buf) - - for i := 0; i < 4; i++ { - bit, err := br.ReadBit() - if err == io.EOF { - break - } - if err != nil { - t.Error("GetBit returned error err=", err.Error()) - return - } - bw.WriteBit(bitstream.Bit(bit)) - } - - bw.Flush(bitstream.One) - - err = bw.WriteByte(0xAA) - if err != nil { - t.Error("unable to WriteByte") - } - - c := buf.Bytes() - - if len(c) != 2 || c[1] != 0xAA || c[0] != 0x3f { - t.Error("bad return from 4 read bytes") - } - - _, err = tsm1.NewBitReader([]byte("")).ReadBit() - if err != io.EOF { - t.Error("ReadBit on empty string didn't return EOF") - } -} - -func TestBitStream(t *testing.T) { - buf := bytes.NewBuffer(nil) - br := tsm1.NewBitReader([]byte("hello")) - bw := bitstream.NewWriter(buf) - - for { - bit, err := br.ReadBit() - if err == io.EOF { - break - } - if err != nil { - t.Error("GetBit returned error err=", err.Error()) - return - } - bw.WriteBit(bitstream.Bit(bit)) - } - - s := buf.String() - - if s != "hello" { - t.Error("expected 'hello', got=", []byte(s)) - } -} - -func TestByteStream(t *testing.T) { - buf := bytes.NewBuffer(nil) - br := tsm1.NewBitReader([]byte("hello")) - bw := bitstream.NewWriter(buf) - - for i := 0; i < 3; i++ { - bit, err := br.ReadBit() - if err == io.EOF { - break - } - if err != nil { - t.Error("GetBit returned error err=", err.Error()) - return - } - bw.WriteBit(bitstream.Bit(bit)) - } - - for i := 0; i < 3; i++ { - byt, err := br.ReadBits(8) - if err == io.EOF { - break - } - if err != nil { - t.Error("ReadBits(8) returned error err=", err.Error()) - return - } - bw.WriteByte(byte(byt)) - } - - u, err := br.ReadBits(13) - - if err != nil { - t.Error("ReadBits returned error err=", err.Error()) - return - } - - bw.WriteBits(u, 13) - - bw.WriteBits(('!'<<12)|('.'<<4)|0x02, 20) - // 0x2f == '/' - bw.Flush(bitstream.One) - - s := buf.String() - - if s != "hello!./" { - t.Errorf("expected 'hello!./', got=%x", []byte(s)) - } -} - -// Ensure bit reader can read random bits written to a stream. -func TestBitReader_Quick(t *testing.T) { - if err := quick.Check(func(values []uint64, nbits []uint) bool { - // Limit nbits to 64. - for i := 0; i < len(values) && i < len(nbits); i++ { - nbits[i] = (nbits[i] % 64) + 1 - values[i] = values[i] & (math.MaxUint64 >> (64 - nbits[i])) - } - - // Write bits to a buffer. - var buf bytes.Buffer - w := bitstream.NewWriter(&buf) - for i := 0; i < len(values) && i < len(nbits); i++ { - w.WriteBits(values[i], int(nbits[i])) - } - w.Flush(bitstream.Zero) - - // Read bits from the buffer. - r := tsm1.NewBitReader(buf.Bytes()) - for i := 0; i < len(values) && i < len(nbits); i++ { - v, err := r.ReadBits(nbits[i]) - if err != nil { - t.Errorf("unexpected error(%d): %s", i, err) - return false - } else if v != values[i] { - t.Errorf("value mismatch(%d): got=%d, exp=%d (nbits=%d)", i, v, values[i], nbits[i]) - return false - } - } - - return true - }, &quick.Config{ - Values: func(a []reflect.Value, rand *rand.Rand) { - a[0], _ = quick.Value(reflect.TypeOf([]uint64{}), rand) - a[1], _ = quick.Value(reflect.TypeOf([]uint{}), rand) - }, - }); err != nil { - t.Fatal(err) - } -} diff --git a/tsdb/engine/tsm1/bool.go b/tsdb/engine/tsm1/bool.go deleted file mode 100644 index da49c2c8281..00000000000 --- a/tsdb/engine/tsm1/bool.go +++ /dev/null @@ -1,169 +0,0 @@ -package tsm1 - -// boolean encoding uses 1 bit per value. Each compressed byte slice contains a 1 byte header -// indicating the compression type, followed by a variable byte encoded length indicating -// how many booleans are packed in the slice. The remaining bytes contains 1 byte for every -// 8 boolean values encoded. - -import ( - "encoding/binary" - "fmt" -) - -// Note: an uncompressed boolean format is not yet implemented. -// booleanCompressedBitPacked is a bit packed format using 1 bit per boolean -const booleanCompressedBitPacked = 1 - -// BooleanEncoder encodes a series of booleans to an in-memory buffer. -type BooleanEncoder struct { - // The encoded bytes - bytes []byte - - // The current byte being encoded - b byte - - // The number of bools packed into b - i int - - // The total number of bools written - n int -} - -// NewBooleanEncoder returns a new instance of BooleanEncoder. -func NewBooleanEncoder(sz int) BooleanEncoder { - return BooleanEncoder{ - bytes: make([]byte, 0, (sz+7)/8), - } -} - -// Reset sets the encoder to its initial state. -func (e *BooleanEncoder) Reset() { - e.bytes = e.bytes[:0] - e.b = 0 - e.i = 0 - e.n = 0 -} - -// Write encodes b to the underlying buffer. -func (e *BooleanEncoder) Write(b bool) { - // If we have filled the current byte, flush it - if e.i >= 8 { - e.flush() - } - - // Use 1 bit for each boolean value, shift the current byte - // by 1 and set the least significant bit accordingly - e.b = e.b << 1 - if b { - e.b |= 1 - } - - // Increment the current boolean count - e.i++ - // Increment the total boolean count - e.n++ -} - -func (e *BooleanEncoder) flush() { - // Pad remaining byte w/ 0s - for e.i < 8 { - e.b = e.b << 1 - e.i++ - } - - // If we have bits set, append them to the byte slice - if e.i > 0 { - e.bytes = append(e.bytes, e.b) - e.b = 0 - e.i = 0 - } -} - -// Flush is no-op -func (e *BooleanEncoder) Flush() {} - -// Bytes returns a new byte slice containing the encoded booleans from previous calls to Write. -func (e *BooleanEncoder) Bytes() ([]byte, error) { - // Ensure the current byte is flushed - e.flush() - b := make([]byte, 10+1) - - // Store the encoding type in the 4 high bits of the first byte - b[0] = byte(booleanCompressedBitPacked) << 4 - - i := 1 - // Encode the number of booleans written - i += binary.PutUvarint(b[i:], uint64(e.n)) - - // Append the packed booleans - return append(b[:i], e.bytes...), nil -} - -// BooleanDecoder decodes a series of booleans from an in-memory buffer. -type BooleanDecoder struct { - b []byte - i int - n int - err error -} - -// SetBytes initializes the decoder with a new set of bytes to read from. -// This must be called before calling any other methods. -func (e *BooleanDecoder) SetBytes(b []byte) { - if len(b) == 0 { - return - } - - // First byte stores the encoding type, only have 1 bit-packet format - // currently ignore for now. - b = b[1:] - count, n := binary.Uvarint(b) - if n <= 0 { - e.err = fmt.Errorf("BooleanDecoder: invalid count") - return - } - - e.b = b[n:] - e.i = -1 - e.n = int(count) - - if min := len(e.b) * 8; min < e.n { - // Shouldn't happen - TSM file was truncated/corrupted - e.n = min - } -} - -// Next returns whether there are any bits remaining in the decoder. -// It returns false if there was an error decoding. -// The error is available on the Error method. -func (e *BooleanDecoder) Next() bool { - if e.err != nil { - return false - } - - e.i++ - return e.i < e.n -} - -// Read returns the next bit from the decoder. -func (e *BooleanDecoder) Read() bool { - // Index into the byte slice - idx := e.i >> 3 // integer division by 8 - - // Bit position - pos := 7 - (e.i & 0x7) - - // The mask to select the bit - mask := byte(1 << uint(pos)) - - // The packed byte - v := e.b[idx] - - // Returns true if the bit is set - return v&mask == mask -} - -// Error returns the error encountered during decoding, if one occurred. -func (e *BooleanDecoder) Error() error { - return e.err -} diff --git a/tsdb/engine/tsm1/bool_test.go b/tsdb/engine/tsm1/bool_test.go deleted file mode 100644 index 27db0130a16..00000000000 --- a/tsdb/engine/tsm1/bool_test.go +++ /dev/null @@ -1,170 +0,0 @@ -package tsm1_test - -import ( - "fmt" - "reflect" - "testing" - "testing/quick" - - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" -) - -func Test_BooleanEncoder_NoValues(t *testing.T) { - enc := tsm1.NewBooleanEncoder(0) - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var dec tsm1.BooleanDecoder - dec.SetBytes(b) - if dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } -} - -func Test_BooleanEncoder_Single(t *testing.T) { - enc := tsm1.NewBooleanEncoder(1) - v1 := true - enc.Write(v1) - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var dec tsm1.BooleanDecoder - dec.SetBytes(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got false, exp true") - } - - if v1 != dec.Read() { - t.Fatalf("unexpected value: got %v, exp %v", dec.Read(), v1) - } -} - -func Test_BooleanEncoder_Multi_Compressed(t *testing.T) { - enc := tsm1.NewBooleanEncoder(10) - - values := make([]bool, 10) - for i := range values { - values[i] = i%2 == 0 - enc.Write(values[i]) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if exp := 4; len(b) != exp { - t.Fatalf("unexpected length: got %v, exp %v", len(b), exp) - } - - var dec tsm1.BooleanDecoder - dec.SetBytes(b) - - for i, v := range values { - if !dec.Next() { - t.Fatalf("unexpected next value: got false, exp true") - } - if v != dec.Read() { - t.Fatalf("unexpected value at pos %d: got %v, exp %v", i, dec.Read(), v) - } - } - - if dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } -} - -func Test_BooleanEncoder_Quick(t *testing.T) { - if err := quick.Check(func(values []bool) bool { - if values == nil { - values = []bool{} - } - // Write values to encoder. - enc := tsm1.NewBooleanEncoder(1024) - for _, v := range values { - enc.Write(v) - } - - // Retrieve compressed bytes. - buf, err := enc.Bytes() - if err != nil { - t.Fatal(err) - } - - // Read values out of decoder. - got := make([]bool, 0, len(values)) - var dec tsm1.BooleanDecoder - dec.SetBytes(buf) - for dec.Next() { - got = append(got, dec.Read()) - } - - // Verify that input and output values match. - if !reflect.DeepEqual(values, got) { - t.Fatalf("mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", values, got) - } - - return true - }, nil); err != nil { - t.Fatal(err) - } -} - -func Test_BooleanDecoder_Corrupt(t *testing.T) { - cases := []string{ - "", // Empty - "\x10\x90", // Packed: invalid count - "\x10\x7f", // Packed: count greater than remaining bits, multiple bytes expected - "\x10\x01", // Packed: count greater than remaining bits, one byte expected - } - - for _, c := range cases { - var dec tsm1.BooleanDecoder - dec.SetBytes([]byte(c)) - if dec.Next() { - t.Fatalf("exp next == false, got true for case %q", c) - } - } -} - -func BenchmarkBooleanDecoder_DecodeAll(b *testing.B) { - benchmarks := []int{ - 1, - 55, - 555, - 1000, - } - for _, size := range benchmarks { - e := tsm1.NewBooleanEncoder(size) - for i := 0; i < size; i++ { - e.Write(i&1 == 1) - } - bytes, err := e.Bytes() - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { - b.SetBytes(int64(len(bytes))) - - dst := make([]bool, size) - for i := 0; i < b.N; i++ { - var d tsm1.BooleanDecoder - d.SetBytes(bytes) - - var n int - for d.Next() { - dst[n] = d.Read() - n++ - } - if n != size { - b.Fatalf("expected to read %d booleans, but read %d", size, n) - } - } - }) - } -} diff --git a/tsdb/engine/tsm1/cache.go b/tsdb/engine/tsm1/cache.go deleted file mode 100644 index d6bb2b9d567..00000000000 --- a/tsdb/engine/tsm1/cache.go +++ /dev/null @@ -1,807 +0,0 @@ -package tsm1 - -import ( - "fmt" - "math" - "os" - "sync" - "sync/atomic" - "time" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxql" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -// ringShards specifies the number of partitions that the hash ring used to -// store the entry mappings contains. It must be a power of 2. From empirical -// testing, a value above the number of cores on the machine does not provide -// any additional benefit. For now we'll set it to the number of cores on the -// largest box we could imagine running influx. -const ringShards = 16 - -var ( - // ErrSnapshotInProgress is returned if a snapshot is attempted while one is already running. - ErrSnapshotInProgress = fmt.Errorf("snapshot in progress") -) - -// ErrCacheMemorySizeLimitExceeded returns an error indicating an operation -// could not be completed due to exceeding the cache-max-memory-size setting. -func ErrCacheMemorySizeLimitExceeded(n, limit uint64) error { - return fmt.Errorf("cache-max-memory-size exceeded: (%d/%d)", n, limit) -} - -// entry is a set of values and some metadata. -type entry struct { - mu sync.RWMutex - values Values // All stored values. - - // The type of values stored. Read only so doesn't need to be protected by - // mu. - vtype byte -} - -// newEntryValues returns a new instance of entry with the given values. If the -// values are not valid, an error is returned. -func newEntryValues(values []Value) (*entry, error) { - e := &entry{} - e.values = make(Values, 0, len(values)) - e.values = append(e.values, values...) - - // No values, don't check types and ordering - if len(values) == 0 { - return e, nil - } - - et := valueType(values[0]) - for _, v := range values { - // Make sure all the values are the same type - if et != valueType(v) { - return nil, tsdb.ErrFieldTypeConflict - } - } - - // Set the type of values stored. - e.vtype = et - - return e, nil -} - -// add adds the given values to the entry. -func (e *entry) add(values []Value) error { - if len(values) == 0 { - return nil // Nothing to do. - } - - // Are any of the new values the wrong type? - if e.vtype != 0 { - for _, v := range values { - if e.vtype != valueType(v) { - return tsdb.ErrFieldTypeConflict - } - } - } - - // entry currently has no values, so add the new ones and we're done. - e.mu.Lock() - if len(e.values) == 0 { - e.values = values - e.vtype = valueType(values[0]) - e.mu.Unlock() - return nil - } - - // Append the new values to the existing ones... - e.values = append(e.values, values...) - e.mu.Unlock() - return nil -} - -// deduplicate sorts and orders the entry's values. If values are already deduped and sorted, -// the function does no work and simply returns. -func (e *entry) deduplicate() { - e.mu.Lock() - defer e.mu.Unlock() - - if len(e.values) <= 1 { - return - } - e.values = e.values.Deduplicate() -} - -// count returns the number of values in this entry. -func (e *entry) count() int { - e.mu.RLock() - n := len(e.values) - e.mu.RUnlock() - return n -} - -// filter removes all values with timestamps between min and max inclusive. -func (e *entry) filter(min, max int64) { - e.mu.Lock() - if len(e.values) > 1 { - e.values = e.values.Deduplicate() - } - e.values = e.values.Exclude(min, max) - e.mu.Unlock() -} - -// size returns the size of this entry in bytes. -func (e *entry) size() int { - e.mu.RLock() - sz := e.values.Size() - e.mu.RUnlock() - return sz -} - -// InfluxQLType returns for the entry the data type of its values. -func (e *entry) InfluxQLType() (influxql.DataType, error) { - e.mu.RLock() - defer e.mu.RUnlock() - return e.values.InfluxQLType() -} - -// storer is the interface that descibes a cache's store. -type storer interface { - entry(key []byte) *entry // Get an entry by its key. - write(key []byte, values Values) (bool, error) // Write an entry to the store. - remove(key []byte) // Remove an entry from the store. - keys(sorted bool) [][]byte // Return an optionally sorted slice of entry keys. - apply(f func([]byte, *entry) error) error // Apply f to all entries in the store in parallel. - applySerial(f func([]byte, *entry) error) error // Apply f to all entries in serial. - reset() // Reset the store to an initial unused state. - split(n int) []storer // Split splits the store into n stores - count() int // Count returns the number of keys in the store -} - -// Cache maintains an in-memory store of Values for a set of keys. -type Cache struct { - // Due to a bug in atomic size needs to be the first word in the struct, as - // that's the only place where you're guaranteed to be 64-bit aligned on a - // 32 bit system. See: https://golang.org/pkg/sync/atomic/#pkg-note-BUG - size uint64 - snapshotSize uint64 - - mu sync.RWMutex - store storer - maxSize uint64 - - // snapshots are the cache objects that are currently being written to tsm files - // they're kept in memory while flushing so they can be queried along with the cache. - // they are read only and should never be modified - snapshot *Cache - snapshotting bool - - // This number is the number of pending or failed WriteSnaphot attempts since the last successful one. - snapshotAttempts int - - stats *cacheMetrics - lastWriteTime time.Time - - // A one time synchronization used to initial the cache with a store. Since the store can allocate a - // large amount memory across shards, we lazily create it. - initialize atomic.Value - initializedCount uint32 -} - -// NewCache returns an instance of a cache which will use a maximum of maxSize bytes of memory. -// Only used for engine caches, never for snapshots. -// Note tags are for metrics only, so if metrics are not desired tags do not have to be set. -func NewCache(maxSize uint64, tags tsdb.EngineTags) *Cache { - c := &Cache{ - maxSize: maxSize, - store: emptyStore{}, - stats: newCacheMetrics(tags), - } - c.stats.LastSnapshot.SetToCurrentTime() - c.initialize.Store(&sync.Once{}) - return c -} - -var globalCacheMetrics = newAllCacheMetrics() - -const cacheSubsystem = "cache" - -type allCacheMetrics struct { - MemBytes *prometheus.GaugeVec - DiskBytes *prometheus.GaugeVec - LastSnapshot *prometheus.GaugeVec - Writes *prometheus.CounterVec - WriteErr *prometheus.CounterVec - WriteDropped *prometheus.CounterVec -} - -type cacheMetrics struct { - MemBytes prometheus.Gauge - DiskBytes prometheus.Gauge - LastSnapshot prometheus.Gauge - Writes prometheus.Counter - WriteErr prometheus.Counter - WriteDropped prometheus.Counter -} - -func newAllCacheMetrics() *allCacheMetrics { - labels := tsdb.EngineLabelNames() - return &allCacheMetrics{ - MemBytes: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: storageNamespace, - Subsystem: cacheSubsystem, - Name: "inuse_bytes", - Help: "Gauge of current memory consumption of cache", - }, labels), - DiskBytes: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: storageNamespace, - Subsystem: cacheSubsystem, - Name: "disk_bytes", - Help: "Gauge of size of most recent snapshot", - }, labels), - LastSnapshot: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: storageNamespace, - Subsystem: cacheSubsystem, - Name: "latest_snapshot", - Help: "Unix time of most recent snapshot", - }, labels), - Writes: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: storageNamespace, - Subsystem: cacheSubsystem, - Name: "writes_total", - Help: "Counter of all writes to cache", - }, labels), - WriteErr: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: storageNamespace, - Subsystem: cacheSubsystem, - Name: "writes_err", - Help: "Counter of failed writes to cache", - }, labels), - WriteDropped: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: storageNamespace, - Subsystem: cacheSubsystem, - Name: "writes_dropped", - Help: "Counter of writes to cache with some dropped points", - }, labels), - } -} - -func CacheCollectors() []prometheus.Collector { - return []prometheus.Collector{ - globalCacheMetrics.MemBytes, - globalCacheMetrics.DiskBytes, - globalCacheMetrics.LastSnapshot, - globalCacheMetrics.Writes, - globalCacheMetrics.WriteErr, - globalCacheMetrics.WriteDropped, - } -} - -func newCacheMetrics(tags tsdb.EngineTags) *cacheMetrics { - labels := tags.GetLabels() - return &cacheMetrics{ - MemBytes: globalCacheMetrics.MemBytes.With(labels), - DiskBytes: globalCacheMetrics.DiskBytes.With(labels), - LastSnapshot: globalCacheMetrics.LastSnapshot.With(labels), - Writes: globalCacheMetrics.Writes.With(labels), - WriteErr: globalCacheMetrics.WriteErr.With(labels), - WriteDropped: globalCacheMetrics.WriteDropped.With(labels), - } -} - -// init initializes the cache and allocates the underlying store. Once initialized, -// the store re-used until Freed. -func (c *Cache) init() { - if !atomic.CompareAndSwapUint32(&c.initializedCount, 0, 1) { - return - } - - c.mu.Lock() - c.store, _ = newring(ringShards) - c.mu.Unlock() -} - -// Free releases the underlying store and memory held by the Cache. -func (c *Cache) Free() { - if !atomic.CompareAndSwapUint32(&c.initializedCount, 1, 0) { - return - } - - c.mu.Lock() - c.store = emptyStore{} - c.mu.Unlock() -} - -// WriteMulti writes the map of keys and associated values to the cache. This -// function is goroutine-safe. It returns an error if the cache will exceeded -// its max size by adding the new values. The write attempts to write as many -// values as possible. If one key fails, the others can still succeed and an -// error will be returned. -func (c *Cache) WriteMulti(values map[string][]Value) error { - c.init() - c.stats.Writes.Inc() - var addedSize uint64 - for _, v := range values { - addedSize += uint64(Values(v).Size()) - } - - // Enough room in the cache? - limit := c.maxSize // maxSize is safe for reading without a lock. - n := c.Size() + addedSize - if limit > 0 && n > limit { - c.stats.WriteErr.Inc() - return ErrCacheMemorySizeLimitExceeded(n, limit) - } - - var werr error - c.mu.RLock() - store := c.store - c.mu.RUnlock() - - // We'll optimistically set size here, and then decrement it for write errors. - c.increaseSize(addedSize) - for k, v := range values { - newKey, err := store.write([]byte(k), v) - if err != nil { - // The write failed, hold onto the error and adjust the size delta. - werr = err - addedSize -= uint64(Values(v).Size()) - c.decreaseSize(uint64(Values(v).Size())) - } - if newKey { - addedSize += uint64(len(k)) - c.increaseSize(uint64(len(k))) - } - } - - // Some points in the batch were dropped. An error is returned so - // error stat is incremented as well. - if werr != nil { - c.stats.WriteDropped.Inc() - c.stats.WriteErr.Inc() - } - - // Update the memory size stat - c.stats.MemBytes.Set(float64(c.Size())) - - c.mu.Lock() - c.lastWriteTime = time.Now() - c.mu.Unlock() - - return werr -} - -// Snapshot takes a snapshot of the current cache, adds it to the slice of caches that -// are being flushed, and resets the current cache with new values. -func (c *Cache) Snapshot() (*Cache, error) { - c.init() - - c.mu.Lock() - defer c.mu.Unlock() - - if c.snapshotting { - return nil, ErrSnapshotInProgress - } - - c.snapshotting = true - c.snapshotAttempts++ // increment the number of times we tried to do this - - // If no snapshot exists, create a new one, otherwise update the existing snapshot - if c.snapshot == nil { - store, err := newring(ringShards) - if err != nil { - return nil, err - } - - c.snapshot = &Cache{ - store: store, - } - } - - // Did a prior snapshot exist that failed? If so, return the existing - // snapshot to retry. - if c.snapshot.Size() > 0 { - return c.snapshot, nil - } - - c.snapshot.store, c.store = c.store, c.snapshot.store - snapshotSize := c.Size() - - // Save the size of the snapshot on the snapshot cache - atomic.StoreUint64(&c.snapshot.size, snapshotSize) - // Save the size of the snapshot on the live cache - atomic.StoreUint64(&c.snapshotSize, snapshotSize) - - // Reset the cache's store. - c.store.reset() - atomic.StoreUint64(&c.size, 0) - c.stats.LastSnapshot.SetToCurrentTime() - - return c.snapshot, nil -} - -// Deduplicate sorts the snapshot before returning it. The compactor and any queries -// coming in while it writes will need the values sorted. -func (c *Cache) Deduplicate() { - c.mu.RLock() - store := c.store - c.mu.RUnlock() - - // Apply a function that simply calls deduplicate on each entry in the ring. - // apply cannot return an error in this invocation. - _ = store.apply(func(_ []byte, e *entry) error { e.deduplicate(); return nil }) -} - -// ClearSnapshot removes the snapshot cache from the list of flushing caches and -// adjusts the size. -func (c *Cache) ClearSnapshot(success bool) { - c.init() - - c.mu.RLock() - snapStore := c.snapshot.store - c.mu.RUnlock() - - // reset the snapshot store outside of the write lock - if success { - snapStore.reset() - } - - c.mu.Lock() - defer c.mu.Unlock() - - c.snapshotting = false - - if success { - c.snapshotAttempts = 0 - - // Reset the snapshot to a fresh Cache. - c.snapshot = &Cache{ - store: c.snapshot.store, - } - c.stats.DiskBytes.Set(float64(atomic.LoadUint64(&c.snapshotSize))) - atomic.StoreUint64(&c.snapshotSize, 0) - } - c.stats.MemBytes.Set(float64(c.Size())) -} - -// Size returns the number of point-calcuated bytes the cache currently uses. -func (c *Cache) Size() uint64 { - return atomic.LoadUint64(&c.size) + atomic.LoadUint64(&c.snapshotSize) -} - -// increaseSize increases size by delta. -func (c *Cache) increaseSize(delta uint64) { - atomic.AddUint64(&c.size, delta) -} - -// decreaseSize decreases size by delta. -func (c *Cache) decreaseSize(delta uint64) { - // Per sync/atomic docs, bit-flip delta minus one to perform subtraction within AddUint64. - atomic.AddUint64(&c.size, ^(delta - 1)) -} - -// MaxSize returns the maximum number of bytes the cache may consume. -func (c *Cache) MaxSize() uint64 { - return c.maxSize -} - -func (c *Cache) Count() int { - c.mu.RLock() - n := c.store.count() - c.mu.RUnlock() - return n -} - -// Keys returns a sorted slice of all keys under management by the cache. -func (c *Cache) Keys() [][]byte { - c.mu.RLock() - store := c.store - c.mu.RUnlock() - return store.keys(true) -} - -func (c *Cache) Split(n int) []*Cache { - if n == 1 { - return []*Cache{c} - } - - caches := make([]*Cache, n) - storers := c.store.split(n) - for i := 0; i < n; i++ { - caches[i] = &Cache{ - store: storers[i], - } - } - return caches -} - -// Type returns the series type for a key. -func (c *Cache) Type(key []byte) (models.FieldType, error) { - c.mu.RLock() - e := c.store.entry(key) - if e == nil && c.snapshot != nil { - e = c.snapshot.store.entry(key) - } - c.mu.RUnlock() - - if e != nil { - typ, err := e.InfluxQLType() - if err != nil { - return models.Empty, tsdb.ErrUnknownFieldType - } - - switch typ { - case influxql.Float: - return models.Float, nil - case influxql.Integer: - return models.Integer, nil - case influxql.Unsigned: - return models.Unsigned, nil - case influxql.Boolean: - return models.Boolean, nil - case influxql.String: - return models.String, nil - } - } - - return models.Empty, tsdb.ErrUnknownFieldType -} - -// Values returns a copy of all values, deduped and sorted, for the given key. -func (c *Cache) Values(key []byte) Values { - var snapshotEntries *entry - - c.mu.RLock() - e := c.store.entry(key) - if c.snapshot != nil { - snapshotEntries = c.snapshot.store.entry(key) - } - c.mu.RUnlock() - - if e == nil { - if snapshotEntries == nil { - // No values in hot cache or snapshots. - return nil - } - } else { - e.deduplicate() - } - - // Build the sequence of entries that will be returned, in the correct order. - // Calculate the required size of the destination buffer. - var entries []*entry - sz := 0 - - if snapshotEntries != nil { - snapshotEntries.deduplicate() // guarantee we are deduplicated - entries = append(entries, snapshotEntries) - sz += snapshotEntries.count() - } - - if e != nil { - entries = append(entries, e) - sz += e.count() - } - - // Any entries? If not, return. - if sz == 0 { - return nil - } - - // Create the buffer, and copy all hot values and snapshots. Individual - // entries are sorted at this point, so now the code has to check if the - // resultant buffer will be sorted from start to finish. - values := make(Values, sz) - n := 0 - for _, e := range entries { - e.mu.RLock() - n += copy(values[n:], e.values) - e.mu.RUnlock() - } - values = values[:n] - values = values.Deduplicate() - - return values -} - -// Delete removes all values for the given keys from the cache. -func (c *Cache) Delete(keys [][]byte) { - c.DeleteRange(keys, math.MinInt64, math.MaxInt64) -} - -// DeleteRange removes the values for all keys containing points -// with timestamps between between min and max from the cache. -// -// TODO(edd): Lock usage could possibly be optimised if necessary. -func (c *Cache) DeleteRange(keys [][]byte, min, max int64) { - c.init() - - c.mu.Lock() - defer c.mu.Unlock() - - for _, k := range keys { - // Make sure key exist in the cache, skip if it does not - e := c.store.entry(k) - if e == nil { - continue - } - - origSize := uint64(e.size()) - if min == math.MinInt64 && max == math.MaxInt64 { - c.decreaseSize(origSize + uint64(len(k))) - c.store.remove(k) - continue - } - - e.filter(min, max) - if e.count() == 0 { - c.store.remove(k) - c.decreaseSize(origSize + uint64(len(k))) - continue - } - - c.decreaseSize(origSize - uint64(e.size())) - } - c.stats.MemBytes.Set(float64(c.Size())) -} - -// SetMaxSize updates the memory limit of the cache. -func (c *Cache) SetMaxSize(size uint64) { - c.mu.Lock() - c.maxSize = size - c.mu.Unlock() -} - -// values returns the values for the key. It assumes the data is already sorted. -// It doesn't lock the cache but it does read-lock the entry if there is one for the key. -// values should only be used in compact.go in the CacheKeyIterator. -func (c *Cache) values(key []byte) Values { - e := c.store.entry(key) - if e == nil { - return nil - } - e.mu.RLock() - v := e.values - e.mu.RUnlock() - return v -} - -// ApplyEntryFn applies the function f to each entry in the Cache. -// ApplyEntryFn calls f on each entry in turn, within the same goroutine. -// It is safe for use by multiple goroutines. -func (c *Cache) ApplyEntryFn(f func(key []byte, entry *entry) error) error { - c.mu.RLock() - store := c.store - c.mu.RUnlock() - return store.applySerial(f) -} - -// CacheLoader processes a set of WAL segment files, and loads a cache with the data -// contained within those files. Processing of the supplied files take place in the -// order they exist in the files slice. -type CacheLoader struct { - files []string - - Logger *zap.Logger -} - -// NewCacheLoader returns a new instance of a CacheLoader. -func NewCacheLoader(files []string) *CacheLoader { - return &CacheLoader{ - files: files, - Logger: zap.NewNop(), - } -} - -// Load returns a cache loaded with the data contained within the segment files. -// If, during reading of a segment file, corruption is encountered, that segment -// file is truncated up to and including the last valid byte, and processing -// continues with the next segment file. -func (cl *CacheLoader) Load(cache *Cache) error { - - var r *WALSegmentReader - for _, fn := range cl.files { - if err := func() error { - f, err := os.OpenFile(fn, os.O_CREATE|os.O_RDWR, 0666) - if err != nil { - return err - } - defer f.Close() - - // Log some information about the segments. - stat, err := os.Stat(f.Name()) - if err != nil { - return err - } - cl.Logger.Info("Reading file", zap.String("path", f.Name()), zap.Int64("size", stat.Size())) - - // Nothing to read, skip it - if stat.Size() == 0 { - return nil - } - - if r == nil { - r = NewWALSegmentReader(f) - defer r.Close() - } else { - r.Reset(f) - } - - for r.Next() { - entry, err := r.Read() - if err != nil { - n := r.Count() - cl.Logger.Info("File corrupt", zap.Error(err), zap.String("path", f.Name()), zap.Int64("pos", n)) - if err := f.Truncate(n); err != nil { - return err - } - break - } - - switch t := entry.(type) { - case *WriteWALEntry: - if err := cache.WriteMulti(t.Values); err != nil { - return err - } - case *DeleteRangeWALEntry: - cache.DeleteRange(t.Keys, t.Min, t.Max) - case *DeleteWALEntry: - cache.Delete(t.Keys) - } - } - - return r.Close() - }(); err != nil { - return err - } - } - return nil -} - -// WithLogger sets the logger on the CacheLoader. -func (cl *CacheLoader) WithLogger(log *zap.Logger) { - cl.Logger = log.With(zap.String("service", "cacheloader")) -} - -func (c *Cache) LastWriteTime() time.Time { - c.mu.RLock() - defer c.mu.RUnlock() - return c.lastWriteTime -} - -const ( - valueTypeUndefined = 0 - valueTypeFloat64 = 1 - valueTypeInteger = 2 - valueTypeString = 3 - valueTypeBoolean = 4 - valueTypeUnsigned = 5 -) - -func valueType(v Value) byte { - switch v.(type) { - case FloatValue: - return valueTypeFloat64 - case IntegerValue: - return valueTypeInteger - case StringValue: - return valueTypeString - case BooleanValue: - return valueTypeBoolean - case UnsignedValue: - return valueTypeUnsigned - default: - return valueTypeUndefined - } -} - -type emptyStore struct{} - -func (e emptyStore) entry(key []byte) *entry { return nil } -func (e emptyStore) write(key []byte, values Values) (bool, error) { return false, nil } -func (e emptyStore) remove(key []byte) {} -func (e emptyStore) keys(sorted bool) [][]byte { return nil } -func (e emptyStore) apply(f func([]byte, *entry) error) error { return nil } -func (e emptyStore) applySerial(f func([]byte, *entry) error) error { return nil } -func (e emptyStore) reset() {} -func (e emptyStore) split(n int) []storer { return nil } -func (e emptyStore) count() int { return 0 } diff --git a/tsdb/engine/tsm1/cache_race_test.go b/tsdb/engine/tsm1/cache_race_test.go deleted file mode 100644 index 651d8381ffd..00000000000 --- a/tsdb/engine/tsm1/cache_race_test.go +++ /dev/null @@ -1,207 +0,0 @@ -package tsm1_test - -import ( - "fmt" - "math/rand" - "sync" - "testing" - - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" -) - -func TestCacheCheckConcurrentReadsAreSafe(t *testing.T) { - values := make(tsm1.Values, 1000) - timestamps := make([]int64, len(values)) - series := make([][]byte, 100) - for i := range timestamps { - timestamps[i] = int64(rand.Int63n(int64(len(values)))) - } - - for i := range values { - values[i] = tsm1.NewValue(timestamps[i*len(timestamps)/len(values)], float64(i)) - } - - for i := range series { - series[i] = []byte(fmt.Sprintf("series%d", i)) - } - - wg := sync.WaitGroup{} - c := tsm1.NewCache(1000000, tsdb.EngineTags{}) - - ch := make(chan struct{}) - for _, s := range series { - for _, v := range values { - c.Write(s, tsm1.Values{v}) - } - wg.Add(3) - go func(s []byte) { - defer wg.Done() - <-ch - c.Values(s) - }(s) - go func(s []byte) { - defer wg.Done() - <-ch - c.Values(s) - }(s) - go func(s []byte) { - defer wg.Done() - <-ch - c.Values(s) - }(s) - } - close(ch) - wg.Wait() -} - -func TestCacheRace(t *testing.T) { - values := make(tsm1.Values, 1000) - timestamps := make([]int64, len(values)) - series := make([][]byte, 100) - for i := range timestamps { - timestamps[i] = int64(rand.Int63n(int64(len(values)))) - } - - for i := range values { - values[i] = tsm1.NewValue(timestamps[i*len(timestamps)/len(values)], float64(i)) - } - - for i := range series { - series[i] = []byte(fmt.Sprintf("series%d", i)) - } - - wg := sync.WaitGroup{} - c := tsm1.NewCache(1000000, tsdb.EngineTags{}) - - ch := make(chan struct{}) - for _, s := range series { - for _, v := range values { - c.Write(s, tsm1.Values{v}) - } - wg.Add(1) - go func(s []byte) { - defer wg.Done() - <-ch - c.Values(s) - }(s) - } - - errC := make(chan error) - wg.Add(1) - go func() { - defer wg.Done() - <-ch - s, err := c.Snapshot() - if err == tsm1.ErrSnapshotInProgress { - return - } - - if err != nil { - errC <- fmt.Errorf("failed to snapshot cache: %v", err) - return - } - - s.Deduplicate() - c.ClearSnapshot(true) - }() - - close(ch) - - go func() { - wg.Wait() - close(errC) - }() - - for err := range errC { - if err != nil { - t.Error(err) - } - } -} - -func TestCacheRace2Compacters(t *testing.T) { - values := make(tsm1.Values, 1000) - timestamps := make([]int64, len(values)) - series := make([][]byte, 100) - for i := range timestamps { - timestamps[i] = int64(rand.Int63n(int64(len(values)))) - } - - for i := range values { - values[i] = tsm1.NewValue(timestamps[i*len(timestamps)/len(values)], float64(i)) - } - - for i := range series { - series[i] = []byte(fmt.Sprintf("series%d", i)) - } - - wg := sync.WaitGroup{} - c := tsm1.NewCache(1000000, tsdb.EngineTags{}) - - ch := make(chan struct{}) - for _, s := range series { - for _, v := range values { - c.Write(s, tsm1.Values{v}) - } - wg.Add(1) - go func(s []byte) { - defer wg.Done() - <-ch - c.Values(s) - }(s) - } - fileCounter := 0 - mapFiles := map[int]bool{} - mu := sync.Mutex{} - errC := make(chan error) - for i := 0; i < 2; i++ { - wg.Add(1) - go func() { - defer wg.Done() - <-ch - s, err := c.Snapshot() - if err == tsm1.ErrSnapshotInProgress { - return - } - - if err != nil { - errC <- fmt.Errorf("failed to snapshot cache: %v", err) - return - } - - mu.Lock() - mapFiles[fileCounter] = true - fileCounter++ - myFiles := map[int]bool{} - for k, e := range mapFiles { - myFiles[k] = e - } - mu.Unlock() - s.Deduplicate() - c.ClearSnapshot(true) - mu.Lock() - defer mu.Unlock() - for k := range myFiles { - if _, ok := mapFiles[k]; !ok { - errC <- fmt.Errorf("something else deleted one of my files") - return - } else { - delete(mapFiles, k) - } - } - }() - } - close(ch) - - go func() { - wg.Wait() - close(errC) - }() - - for err := range errC { - if err != nil { - t.Error(err) - } - } -} diff --git a/tsdb/engine/tsm1/cache_test.go b/tsdb/engine/tsm1/cache_test.go deleted file mode 100644 index f5f0c6305c6..00000000000 --- a/tsdb/engine/tsm1/cache_test.go +++ /dev/null @@ -1,907 +0,0 @@ -package tsm1 - -import ( - "bytes" - "errors" - "fmt" - "math" - "math/rand" - "os" - "reflect" - "runtime" - "strings" - "sync" - "sync/atomic" - "testing" - - "github.com/golang/snappy" - "github.com/influxdata/influxdb/v2/tsdb" -) - -// Convenience method for testing. -func (c *Cache) Write(key []byte, values []Value) error { - return c.WriteMulti(map[string][]Value{string(key): values}) -} - -func TestCache_NewCache(t *testing.T) { - c := NewCache(100, tsdb.EngineTags{}) - if c == nil { - t.Fatalf("failed to create new cache") - } - - if c.MaxSize() != 100 { - t.Fatalf("new cache max size not correct") - } - if c.Size() != 0 { - t.Fatalf("new cache size not correct") - } - if len(c.Keys()) != 0 { - t.Fatalf("new cache keys not correct: %v", c.Keys()) - } -} - -func TestCache_CacheWriteMulti(t *testing.T) { - v0 := NewValue(1, 1.0) - v1 := NewValue(2, 2.0) - v2 := NewValue(3, 3.0) - values := Values{v0, v1, v2} - valuesSize := uint64(v0.Size() + v1.Size() + v2.Size()) - - c := NewCache(30*valuesSize, tsdb.EngineTags{}) - - if err := c.WriteMulti(map[string][]Value{"foo": values, "bar": values}); err != nil { - t.Fatalf("failed to write key foo to cache: %s", err.Error()) - } - if n := c.Size(); n != 2*valuesSize+6 { - t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n) - } - - if exp, keys := [][]byte{[]byte("bar"), []byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) { - t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys) - } -} - -// Tests that the cache stats and size are correctly maintained during writes. -func TestCache_WriteMulti_Stats(t *testing.T) { - limit := uint64(1) - c := NewCache(limit, tsdb.EngineTags{}) - ms := NewTestStore() - c.store = ms - - // Not enough room in the cache. - v := NewValue(1, 1.0) - values := map[string][]Value{"foo": {v, v}} - if got, exp := c.WriteMulti(values), ErrCacheMemorySizeLimitExceeded(uint64(v.Size()*2), limit); !reflect.DeepEqual(got, exp) { - t.Fatalf("got %q, expected %q", got, exp) - } - - // Fail one of the values in the write. - c = NewCache(50, tsdb.EngineTags{}) - c.init() - c.store = ms - - ms.writef = func(key []byte, v Values) (bool, error) { - if bytes.Equal(key, []byte("foo")) { - return false, errors.New("write failed") - } - return true, nil - } - - values = map[string][]Value{"foo": {v, v}, "bar": {v}} - if got, exp := c.WriteMulti(values), errors.New("write failed"); !reflect.DeepEqual(got, exp) { - t.Fatalf("got %v, expected %v", got, exp) - } - - // Cache size decreased correctly. - if got, exp := c.Size(), uint64(16)+3; got != exp { - t.Fatalf("got %v, expected %v", got, exp) - } -} - -func TestCache_CacheWriteMulti_TypeConflict(t *testing.T) { - v0 := NewValue(1, 1.0) - v1 := NewValue(2, 2.0) - v2 := NewValue(3, int64(3)) - values := Values{v0, v1, v2} - valuesSize := uint64(v0.Size() + v1.Size() + v2.Size()) - - c := NewCache(3*valuesSize, tsdb.EngineTags{}) - - if err := c.WriteMulti(map[string][]Value{"foo": values[:1], "bar": values[1:]}); err == nil { - t.Fatalf(" expected field type conflict") - } - - if exp, got := uint64(v0.Size())+3, c.Size(); exp != got { - t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", exp, got) - } - - if exp, keys := [][]byte{[]byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) { - t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys) - } -} - -func TestCache_Cache_DeleteRange(t *testing.T) { - v0 := NewValue(1, 1.0) - v1 := NewValue(2, 2.0) - v2 := NewValue(3, 3.0) - values := Values{v0, v1, v2} - valuesSize := uint64(v0.Size() + v1.Size() + v2.Size()) - - c := NewCache(30*valuesSize, tsdb.EngineTags{}) - - if err := c.WriteMulti(map[string][]Value{"foo": values, "bar": values}); err != nil { - t.Fatalf("failed to write key foo to cache: %s", err.Error()) - } - if n := c.Size(); n != 2*valuesSize+6 { - t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n) - } - - if exp, keys := [][]byte{[]byte("bar"), []byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) { - t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys) - } - - c.DeleteRange([][]byte{[]byte("bar")}, 2, math.MaxInt64) - - if exp, keys := [][]byte{[]byte("bar"), []byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) { - t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys) - } - - if got, exp := c.Size(), valuesSize+uint64(v0.Size())+6; exp != got { - t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", exp, got) - } - - if got, exp := len(c.Values([]byte("bar"))), 1; got != exp { - t.Fatalf("cache values mismatch: got %v, exp %v", got, exp) - } - - if got, exp := len(c.Values([]byte("foo"))), 3; got != exp { - t.Fatalf("cache values mismatch: got %v, exp %v", got, exp) - } -} - -func TestCache_DeleteRange_NoValues(t *testing.T) { - v0 := NewValue(1, 1.0) - v1 := NewValue(2, 2.0) - v2 := NewValue(3, 3.0) - values := Values{v0, v1, v2} - valuesSize := uint64(v0.Size() + v1.Size() + v2.Size()) - - c := NewCache(3*valuesSize, tsdb.EngineTags{}) - - if err := c.WriteMulti(map[string][]Value{"foo": values}); err != nil { - t.Fatalf("failed to write key foo to cache: %s", err.Error()) - } - if n := c.Size(); n != valuesSize+3 { - t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n) - } - - if exp, keys := [][]byte{[]byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) { - t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys) - } - - c.DeleteRange([][]byte{[]byte("foo")}, math.MinInt64, math.MaxInt64) - - if exp, keys := 0, len(c.Keys()); !reflect.DeepEqual(keys, exp) { - t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys) - } - - if got, exp := c.Size(), uint64(0); exp != got { - t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", exp, got) - } - - if got, exp := len(c.Values([]byte("foo"))), 0; got != exp { - t.Fatalf("cache values mismatch: got %v, exp %v", got, exp) - } -} - -func TestCache_DeleteRange_NotSorted(t *testing.T) { - v0 := NewValue(1, 1.0) - v1 := NewValue(3, 3.0) - v2 := NewValue(2, 2.0) - values := Values{v0, v1, v2} - valuesSize := uint64(v0.Size() + v1.Size() + v2.Size()) - - c := NewCache(3*valuesSize, tsdb.EngineTags{}) - - if err := c.WriteMulti(map[string][]Value{"foo": values}); err != nil { - t.Fatalf("failed to write key foo to cache: %s", err.Error()) - } - if n := c.Size(); n != valuesSize+3 { - t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n) - } - - if exp, keys := [][]byte{[]byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) { - t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys) - } - - c.DeleteRange([][]byte{[]byte("foo")}, 1, 3) - - if exp, keys := 0, len(c.Keys()); !reflect.DeepEqual(keys, exp) { - t.Fatalf("cache keys incorrect after delete, exp %v, got %v", exp, keys) - } - - if got, exp := c.Size(), uint64(0); exp != got { - t.Fatalf("cache size incorrect after delete, exp %d, got %d", exp, got) - } - - if got, exp := len(c.Values([]byte("foo"))), 0; got != exp { - t.Fatalf("cache values mismatch: got %v, exp %v", got, exp) - } -} - -func TestCache_Cache_Delete(t *testing.T) { - v0 := NewValue(1, 1.0) - v1 := NewValue(2, 2.0) - v2 := NewValue(3, 3.0) - values := Values{v0, v1, v2} - valuesSize := uint64(v0.Size() + v1.Size() + v2.Size()) - - c := NewCache(30*valuesSize, tsdb.EngineTags{}) - - if err := c.WriteMulti(map[string][]Value{"foo": values, "bar": values}); err != nil { - t.Fatalf("failed to write key foo to cache: %s", err.Error()) - } - if n := c.Size(); n != 2*valuesSize+6 { - t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", 2*valuesSize, n) - } - - if exp, keys := [][]byte{[]byte("bar"), []byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) { - t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys) - } - - c.Delete([][]byte{[]byte("bar")}) - - if exp, keys := [][]byte{[]byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) { - t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys) - } - - if got, exp := c.Size(), valuesSize+3; exp != got { - t.Fatalf("cache size incorrect after 2 writes, exp %d, got %d", exp, got) - } - - if got, exp := len(c.Values([]byte("bar"))), 0; got != exp { - t.Fatalf("cache values mismatch: got %v, exp %v", got, exp) - } - - if got, exp := len(c.Values([]byte("foo"))), 3; got != exp { - t.Fatalf("cache values mismatch: got %v, exp %v", got, exp) - } -} - -func TestCache_Cache_Delete_NonExistent(t *testing.T) { - c := NewCache(1024, tsdb.EngineTags{}) - - c.Delete([][]byte{[]byte("bar")}) - - if got, exp := c.Size(), uint64(0); exp != got { - t.Fatalf("cache size incorrect exp %d, got %d", exp, got) - } -} - -// This tests writing two batches to the same series. The first batch -// is sorted. The second batch is also sorted but contains duplicates. -func TestCache_CacheWriteMulti_Duplicates(t *testing.T) { - v0 := NewValue(2, 1.0) - v1 := NewValue(3, 1.0) - values0 := Values{v0, v1} - - v3 := NewValue(4, 2.0) - v4 := NewValue(5, 3.0) - v5 := NewValue(5, 3.0) - values1 := Values{v3, v4, v5} - - c := NewCache(0, tsdb.EngineTags{}) - - if err := c.WriteMulti(map[string][]Value{"foo": values0}); err != nil { - t.Fatalf("failed to write key foo to cache: %s", err.Error()) - } - - if err := c.WriteMulti(map[string][]Value{"foo": values1}); err != nil { - t.Fatalf("failed to write key foo to cache: %s", err.Error()) - } - - if exp, keys := [][]byte{[]byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) { - t.Fatalf("cache keys incorrect after 2 writes, exp %v, got %v", exp, keys) - } - - expAscValues := Values{v0, v1, v3, v5} - if exp, got := len(expAscValues), len(c.Values([]byte("foo"))); exp != got { - t.Fatalf("value count mismatch: exp: %v, got %v", exp, got) - } - if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expAscValues, deduped) { - t.Fatalf("deduped ascending values for foo incorrect, exp: %v, got %v", expAscValues, deduped) - } -} - -func TestCache_CacheValues(t *testing.T) { - v0 := NewValue(1, 0.0) - v1 := NewValue(2, 2.0) - v2 := NewValue(3, 3.0) - v3 := NewValue(1, 1.0) - v4 := NewValue(4, 4.0) - - c := NewCache(512, tsdb.EngineTags{}) - if deduped := c.Values([]byte("no such key")); deduped != nil { - t.Fatalf("Values returned for no such key") - } - - if err := c.Write([]byte("foo"), Values{v0, v1, v2, v3}); err != nil { - t.Fatalf("failed to write 3 values, key foo to cache: %s", err.Error()) - } - if err := c.Write([]byte("foo"), Values{v4}); err != nil { - t.Fatalf("failed to write 1 value, key foo to cache: %s", err.Error()) - } - - expAscValues := Values{v3, v1, v2, v4} - if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expAscValues, deduped) { - t.Fatalf("deduped ascending values for foo incorrect, exp: %v, got %v", expAscValues, deduped) - } -} - -func TestCache_CacheSnapshot(t *testing.T) { - v0 := NewValue(2, 0.0) - v1 := NewValue(3, 2.0) - v2 := NewValue(4, 3.0) - v3 := NewValue(5, 4.0) - v4 := NewValue(6, 5.0) - v5 := NewValue(1, 5.0) - v6 := NewValue(7, 5.0) - v7 := NewValue(2, 5.0) - - c := NewCache(512, tsdb.EngineTags{}) - if err := c.Write([]byte("foo"), Values{v0, v1, v2, v3}); err != nil { - t.Fatalf("failed to write 3 values, key foo to cache: %s", err.Error()) - } - - // Grab snapshot, and ensure it's as expected. - snapshot, err := c.Snapshot() - if err != nil { - t.Fatalf("failed to snapshot cache: %v", err) - } - - expValues := Values{v0, v1, v2, v3} - if deduped := snapshot.values([]byte("foo")); !reflect.DeepEqual(expValues, deduped) { - t.Fatalf("snapshotted values for foo incorrect, exp: %v, got %v", expValues, deduped) - } - - // Ensure cache is still as expected. - if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expValues, deduped) { - t.Fatalf("post-snapshot values for foo incorrect, exp: %v, got %v", expValues, deduped) - } - - // Write a new value to the cache. - if err := c.Write([]byte("foo"), Values{v4}); err != nil { - t.Fatalf("failed to write post-snap value, key foo to cache: %s", err.Error()) - } - expValues = Values{v0, v1, v2, v3, v4} - if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expValues, deduped) { - t.Fatalf("post-snapshot write values for foo incorrect, exp: %v, got %v", expValues, deduped) - } - - // Write a new, out-of-order, value to the cache. - if err := c.Write([]byte("foo"), Values{v5}); err != nil { - t.Fatalf("failed to write post-snap value, key foo to cache: %s", err.Error()) - } - expValues = Values{v5, v0, v1, v2, v3, v4} - if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expValues, deduped) { - t.Fatalf("post-snapshot out-of-order write values for foo incorrect, exp: %v, got %v", expValues, deduped) - } - - // Clear snapshot, ensuring non-snapshot data untouched. - c.ClearSnapshot(true) - - expValues = Values{v5, v4} - if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expValues, deduped) { - t.Fatalf("post-clear values for foo incorrect, exp: %v, got %v", expValues, deduped) - } - - // Create another snapshot - _, err = c.Snapshot() - if err != nil { - t.Fatalf("failed to snapshot cache: %v", err) - } - - if err := c.Write([]byte("foo"), Values{v4, v5}); err != nil { - t.Fatalf("failed to write post-snap value, key foo to cache: %s", err.Error()) - } - - c.ClearSnapshot(true) - - _, err = c.Snapshot() - if err != nil { - t.Fatalf("failed to snapshot cache: %v", err) - } - - if err := c.Write([]byte("foo"), Values{v6, v7}); err != nil { - t.Fatalf("failed to write post-snap value, key foo to cache: %s", err.Error()) - } - - expValues = Values{v5, v7, v4, v6} - if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(expValues, deduped) { - t.Fatalf("post-snapshot out-of-order write values for foo incorrect, exp: %v, got %v", expValues, deduped) - } -} - -// Tests that Snapshot updates statistics correctly. -func TestCache_Snapshot_Stats(t *testing.T) { - limit := uint64(16) - c := NewCache(limit, tsdb.EngineTags{}) - - values := map[string][]Value{"foo": {NewValue(1, 1.0)}} - if err := c.WriteMulti(values); err != nil { - t.Fatal(err) - } - - _, err := c.Snapshot() - if err != nil { - t.Fatal(err) - } - - // Store size should have been reset. - if got, exp := c.Size(), uint64(16)+3; got != exp { - t.Fatalf("got %v, expected %v", got, exp) - } -} - -func TestCache_CacheEmptySnapshot(t *testing.T) { - c := NewCache(512, tsdb.EngineTags{}) - - // Grab snapshot, and ensure it's as expected. - snapshot, err := c.Snapshot() - if err != nil { - t.Fatalf("failed to snapshot cache: %v", err) - } - if deduped := snapshot.values([]byte("foo")); !reflect.DeepEqual(Values(nil), deduped) { - t.Fatalf("snapshotted values for foo incorrect, exp: %v, got %v", nil, deduped) - } - - // Ensure cache is still as expected. - if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(Values(nil), deduped) { - t.Fatalf("post-snapshotted values for foo incorrect, exp: %v, got %v", Values(nil), deduped) - } - - // Clear snapshot. - c.ClearSnapshot(true) - if deduped := c.Values([]byte("foo")); !reflect.DeepEqual(Values(nil), deduped) { - t.Fatalf("post-snapshot-clear values for foo incorrect, exp: %v, got %v", Values(nil), deduped) - } -} - -func TestCache_CacheWriteMemoryExceeded(t *testing.T) { - v0 := NewValue(1, 1.0) - v1 := NewValue(2, 2.0) - - c := NewCache(uint64(v1.Size()), tsdb.EngineTags{}) - - if err := c.Write([]byte("foo"), Values{v0}); err != nil { - t.Fatalf("failed to write key foo to cache: %s", err.Error()) - } - if exp, keys := [][]byte{[]byte("foo")}, c.Keys(); !reflect.DeepEqual(keys, exp) { - t.Fatalf("cache keys incorrect after writes, exp %v, got %v", exp, keys) - } - if err := c.Write([]byte("bar"), Values{v1}); err == nil || !strings.Contains(err.Error(), "cache-max-memory-size") { - t.Fatalf("wrong error writing key bar to cache: %v", err) - } - - // Grab snapshot, write should still fail since we're still using the memory. - _, err := c.Snapshot() - if err != nil { - t.Fatalf("failed to snapshot cache: %v", err) - } - if err := c.Write([]byte("bar"), Values{v1}); err == nil || !strings.Contains(err.Error(), "cache-max-memory-size") { - t.Fatalf("wrong error writing key bar to cache: %v", err) - } - - // Clear the snapshot and the write should now succeed. - c.ClearSnapshot(true) - if err := c.Write([]byte("bar"), Values{v1}); err != nil { - t.Fatalf("failed to write key foo to cache: %s", err.Error()) - } - expAscValues := Values{v1} - if deduped := c.Values([]byte("bar")); !reflect.DeepEqual(expAscValues, deduped) { - t.Fatalf("deduped ascending values for bar incorrect, exp: %v, got %v", expAscValues, deduped) - } -} - -func TestCache_Deduplicate_Concurrent(t *testing.T) { - if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" || os.Getenv("CIRCLECI") != "" { - t.Skip("Skipping test in short, race, circleci and appveyor mode.") - } - - values := make(map[string][]Value) - - for i := 0; i < 1000; i++ { - for j := 0; j < 100; j++ { - values[fmt.Sprintf("cpu%d", i)] = []Value{NewValue(int64(i+j)+int64(rand.Intn(10)), float64(i))} - } - } - - wg := sync.WaitGroup{} - c := NewCache(1000000, tsdb.EngineTags{}) - - wg.Add(1) - go func() { - defer wg.Done() - for i := 0; i < 1000; i++ { - c.WriteMulti(values) - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - for i := 0; i < 1000; i++ { - c.Deduplicate() - } - }() - - wg.Wait() -} - -// Ensure the CacheLoader can correctly load from a single segment, even if it's corrupted. -func TestCacheLoader_LoadSingle(t *testing.T) { - // Create a WAL segment. - dir := t.TempDir() - f := mustTempFile(dir) - w := NewWALSegmentWriter(f) - - p1 := NewValue(1, 1.1) - p2 := NewValue(1, int64(1)) - p3 := NewValue(1, true) - - values := map[string][]Value{ - "foo": {p1}, - "bar": {p2}, - "baz": {p3}, - } - - entry := &WriteWALEntry{ - Values: values, - } - - if err := w.Write(mustMarshalEntry(entry)); err != nil { - t.Fatal("write points", err) - } - - if err := w.Flush(); err != nil { - t.Fatalf("flush error: %v", err) - } - - // Load the cache using the segment. - cache := NewCache(1024, tsdb.EngineTags{}) - loader := NewCacheLoader([]string{f.Name()}) - if err := loader.Load(cache); err != nil { - t.Fatalf("failed to load cache: %s", err.Error()) - } - - // Check the cache. - if values := cache.Values([]byte("foo")); !reflect.DeepEqual(values, Values{p1}) { - t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1}) - } - if values := cache.Values([]byte("bar")); !reflect.DeepEqual(values, Values{p2}) { - t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p2}) - } - if values := cache.Values([]byte("baz")); !reflect.DeepEqual(values, Values{p3}) { - t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p3}) - } - - // Corrupt the WAL segment. - if _, err := f.Write([]byte{1, 4, 0, 0, 0}); err != nil { - t.Fatalf("corrupt WAL segment: %s", err.Error()) - } - - // Reload the cache using the segment. - cache = NewCache(1024, tsdb.EngineTags{}) - loader = NewCacheLoader([]string{f.Name()}) - if err := loader.Load(cache); err != nil { - t.Fatalf("failed to load cache: %s", err.Error()) - } - - // Check the cache. - if values := cache.Values([]byte("foo")); !reflect.DeepEqual(values, Values{p1}) { - t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1}) - } - if values := cache.Values([]byte("bar")); !reflect.DeepEqual(values, Values{p2}) { - t.Fatalf("cache key bar not as expected, got %v, exp %v", values, Values{p2}) - } - if values := cache.Values([]byte("baz")); !reflect.DeepEqual(values, Values{p3}) { - t.Fatalf("cache key baz not as expected, got %v, exp %v", values, Values{p3}) - } -} - -// Ensure the CacheLoader can correctly load from two segments, even if one is corrupted. -func TestCacheLoader_LoadDouble(t *testing.T) { - // Create a WAL segment. - dir := t.TempDir() - f1, f2 := mustTempFile(dir), mustTempFile(dir) - w1, w2 := NewWALSegmentWriter(f1), NewWALSegmentWriter(f2) - t.Cleanup(func() { - f1.Close() - f2.Close() - w1.close() - w2.close() - }) - - p1 := NewValue(1, 1.1) - p2 := NewValue(1, int64(1)) - p3 := NewValue(1, true) - p4 := NewValue(1, "string") - - // Write first and second segment. - - segmentWrite := func(w *WALSegmentWriter, values map[string][]Value) { - entry := &WriteWALEntry{ - Values: values, - } - if err := w1.Write(mustMarshalEntry(entry)); err != nil { - t.Fatal("write points", err) - } - if err := w1.Flush(); err != nil { - t.Fatalf("flush error: %v", err) - } - } - - values := map[string][]Value{ - "foo": {p1}, - "bar": {p2}, - } - segmentWrite(w1, values) - values = map[string][]Value{ - "baz": {p3}, - "qux": {p4}, - } - segmentWrite(w2, values) - - // Corrupt the first WAL segment. - if _, err := f1.Write([]byte{1, 4, 0, 0, 0}); err != nil { - t.Fatalf("corrupt WAL segment: %s", err.Error()) - } - - // Load the cache using the segments. - cache := NewCache(1024, tsdb.EngineTags{}) - loader := NewCacheLoader([]string{f1.Name(), f2.Name()}) - if err := loader.Load(cache); err != nil { - t.Fatalf("failed to load cache: %s", err.Error()) - } - - // Check the cache. - if values := cache.Values([]byte("foo")); !reflect.DeepEqual(values, Values{p1}) { - t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1}) - } - if values := cache.Values([]byte("bar")); !reflect.DeepEqual(values, Values{p2}) { - t.Fatalf("cache key bar not as expected, got %v, exp %v", values, Values{p2}) - } - if values := cache.Values([]byte("baz")); !reflect.DeepEqual(values, Values{p3}) { - t.Fatalf("cache key baz not as expected, got %v, exp %v", values, Values{p3}) - } - if values := cache.Values([]byte("qux")); !reflect.DeepEqual(values, Values{p4}) { - t.Fatalf("cache key qux not as expected, got %v, exp %v", values, Values{p4}) - } -} - -// Ensure the CacheLoader can load deleted series -func TestCacheLoader_LoadDeleted(t *testing.T) { - // Create a WAL segment. - dir := t.TempDir() - f := mustTempFile(dir) - w := NewWALSegmentWriter(f) - t.Cleanup(func() { - f.Close() - w.close() - }) - - p1 := NewValue(1, 1.0) - p2 := NewValue(2, 2.0) - p3 := NewValue(3, 3.0) - - values := map[string][]Value{ - "foo": {p1, p2, p3}, - } - - entry := &WriteWALEntry{ - Values: values, - } - - if err := w.Write(mustMarshalEntry(entry)); err != nil { - t.Fatal("write points", err) - } - - if err := w.Flush(); err != nil { - t.Fatalf("flush error: %v", err) - } - - dentry := &DeleteRangeWALEntry{ - Keys: [][]byte{[]byte("foo")}, - Min: 2, - Max: 3, - } - - if err := w.Write(mustMarshalEntry(dentry)); err != nil { - t.Fatal("write points", err) - } - - if err := w.Flush(); err != nil { - t.Fatalf("flush error: %v", err) - } - - // Load the cache using the segment. - cache := NewCache(1024, tsdb.EngineTags{}) - loader := NewCacheLoader([]string{f.Name()}) - if err := loader.Load(cache); err != nil { - t.Fatalf("failed to load cache: %s", err.Error()) - } - - // Check the cache. - if values := cache.Values([]byte("foo")); !reflect.DeepEqual(values, Values{p1}) { - t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1}) - } - - // Reload the cache using the segment. - cache = NewCache(1024, tsdb.EngineTags{}) - loader = NewCacheLoader([]string{f.Name()}) - if err := loader.Load(cache); err != nil { - t.Fatalf("failed to load cache: %s", err.Error()) - } - - // Check the cache. - if values := cache.Values([]byte("foo")); !reflect.DeepEqual(values, Values{p1}) { - t.Fatalf("cache key foo not as expected, got %v, exp %v", values, Values{p1}) - } -} - -func TestCache_Split(t *testing.T) { - v0 := NewValue(1, 1.0) - v1 := NewValue(2, 2.0) - v2 := NewValue(3, 3.0) - values := Values{v0, v1, v2} - valuesSize := uint64(v0.Size() + v1.Size() + v2.Size()) - - c := NewCache(0, tsdb.EngineTags{}) - - if err := c.Write([]byte("foo"), values); err != nil { - t.Fatalf("failed to write key foo to cache: %s", err.Error()) - } - if err := c.Write([]byte("bar"), values); err != nil { - t.Fatalf("failed to write key foo to cache: %s", err.Error()) - } - - if err := c.Write([]byte("baz"), values); err != nil { - t.Fatalf("failed to write key foo to cache: %s", err.Error()) - } - - if n := c.Size(); n != 3*valuesSize+9 { - t.Fatalf("cache size incorrect after 3 writes, exp %d, got %d", 3*valuesSize*9, n) - } - - splits := c.Split(3) - keys := make(map[string]int) - for _, s := range splits { - for _, k := range s.Keys() { - keys[string(k)] = s.Values(k).Size() - } - } - - for _, key := range []string{"foo", "bar", "baz"} { - if _, ok := keys[key]; !ok { - t.Fatalf("missing key, exp %s, got %v", key, nil) - } - } -} - -func mustTempFile(dir string) *os.File { - f, err := os.CreateTemp(dir, "tsm1test") - if err != nil { - panic(fmt.Sprintf("failed to create temp file: %v", err)) - } - return f -} - -func mustMarshalEntry(entry WALEntry) (WalEntryType, []byte) { - bytes := make([]byte, 1024<<2) - - b, err := entry.Encode(bytes) - if err != nil { - panic(fmt.Sprintf("error encoding: %v", err)) - } - - return entry.Type(), snappy.Encode(b, b) -} - -// TestStore implements the storer interface and can be used to mock out a -// Cache's storer implementation. -type TestStore struct { - entryf func(key []byte) *entry - writef func(key []byte, values Values) (bool, error) - removef func(key []byte) - keysf func(sorted bool) [][]byte - applyf func(f func([]byte, *entry) error) error - applySerialf func(f func([]byte, *entry) error) error - resetf func() - splitf func(n int) []storer - countf func() int -} - -func NewTestStore() *TestStore { return &TestStore{} } -func (s *TestStore) entry(key []byte) *entry { return s.entryf(key) } -func (s *TestStore) write(key []byte, values Values) (bool, error) { return s.writef(key, values) } -func (s *TestStore) remove(key []byte) { s.removef(key) } -func (s *TestStore) keys(sorted bool) [][]byte { return s.keysf(sorted) } -func (s *TestStore) apply(f func([]byte, *entry) error) error { return s.applyf(f) } -func (s *TestStore) applySerial(f func([]byte, *entry) error) error { return s.applySerialf(f) } -func (s *TestStore) reset() { s.resetf() } -func (s *TestStore) split(n int) []storer { return s.splitf(n) } -func (s *TestStore) count() int { return s.countf() } - -var fvSize = uint64(NewValue(1, float64(1)).Size()) - -func BenchmarkCacheFloatEntries(b *testing.B) { - cache := NewCache(uint64(b.N)*fvSize, tsdb.EngineTags{}) - vals := make([][]Value, b.N) - for i := 0; i < b.N; i++ { - vals[i] = []Value{NewValue(1, float64(i))} - } - b.ResetTimer() - - for i := 0; i < b.N; i++ { - if err := cache.Write([]byte("test"), vals[i]); err != nil { - b.Fatal("err:", err, "i:", i, "N:", b.N) - } - } -} - -type points struct { - key []byte - vals []Value -} - -func BenchmarkCacheParallelFloatEntries(b *testing.B) { - c := b.N * runtime.GOMAXPROCS(0) - cache := NewCache(uint64(c)*fvSize*10, tsdb.EngineTags{}) - vals := make([]points, c) - for i := 0; i < c; i++ { - v := make([]Value, 10) - for j := 0; j < 10; j++ { - v[j] = NewValue(1, float64(i+j)) - } - vals[i] = points{key: []byte(fmt.Sprintf("cpu%v", rand.Intn(20))), vals: v} - } - i := int32(-1) - b.ResetTimer() - - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - j := atomic.AddInt32(&i, 1) - v := vals[j] - if err := cache.Write(v.key, v.vals); err != nil { - b.Fatal("err:", err, "j:", j, "N:", b.N) - } - } - }) -} - -func BenchmarkEntry_add(b *testing.B) { - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - b.StopTimer() - values := make([]Value, 10) - for i := 0; i < 10; i++ { - values[i] = NewValue(int64(i+1), float64(i)) - } - - otherValues := make([]Value, 10) - for i := 0; i < 10; i++ { - otherValues[i] = NewValue(1, float64(i)) - } - - entry, err := newEntryValues(values) - if err != nil { - b.Fatal(err) - } - - b.StartTimer() - if err := entry.add(otherValues); err != nil { - b.Fatal(err) - } - } - }) -} diff --git a/tsdb/engine/tsm1/compact.gen.go b/tsdb/engine/tsm1/compact.gen.go deleted file mode 100644 index c7890e7fcd3..00000000000 --- a/tsdb/engine/tsm1/compact.gen.go +++ /dev/null @@ -1,1133 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: compact.gen.go.tmpl - -package tsm1 - -import ( - "sort" - - "github.com/influxdata/influxdb/v2/tsdb" -) - -// merge combines the next set of blocks into merged blocks. -func (k *tsmBatchKeyIterator) mergeFloat() { - // No blocks left, or pending merged values, we're done - if len(k.blocks) == 0 && len(k.merged) == 0 && k.mergedFloatValues.Len() == 0 { - return - } - - sort.Stable(k.blocks) - - dedup := k.mergedFloatValues.Len() != 0 - if len(k.blocks) > 0 && !dedup { - // If we have more than one block or any partially tombstoned blocks, we many need to dedup - dedup = len(k.blocks[0].tombstones) > 0 || k.blocks[0].partiallyRead() - - // Quickly scan each block to see if any overlap with the prior block, if they overlap then - // we need to dedup as there may be duplicate points now - for i := 1; !dedup && i < len(k.blocks); i++ { - dedup = k.blocks[i].partiallyRead() || - k.blocks[i].overlapsTimeRange(k.blocks[i-1].minTime, k.blocks[i-1].maxTime) || - len(k.blocks[i].tombstones) > 0 - } - - } - - k.merged = k.combineFloat(dedup) -} - -// combine returns a new set of blocks using the current blocks in the buffers. If dedup -// is true, all the blocks will be decoded, dedup and sorted in in order. If dedup is false, -// only blocks that are smaller than the chunk size will be decoded and combined. -func (k *tsmBatchKeyIterator) combineFloat(dedup bool) blocks { - if dedup { - for k.mergedFloatValues.Len() < k.size && len(k.blocks) > 0 { - for len(k.blocks) > 0 && k.blocks[0].read() { - k.blocks = k.blocks[1:] - } - - if len(k.blocks) == 0 { - break - } - first := k.blocks[0] - minTime := first.minTime - maxTime := first.maxTime - - // Adjust the min time to the start of any overlapping blocks. - for i := 0; i < len(k.blocks); i++ { - if k.blocks[i].overlapsTimeRange(minTime, maxTime) && !k.blocks[i].read() { - if k.blocks[i].minTime < minTime { - minTime = k.blocks[i].minTime - } - if k.blocks[i].maxTime > minTime && k.blocks[i].maxTime < maxTime { - maxTime = k.blocks[i].maxTime - } - } - } - - // We have some overlapping blocks so decode all, append in order and then dedup - for i := 0; i < len(k.blocks); i++ { - if !k.blocks[i].overlapsTimeRange(minTime, maxTime) || k.blocks[i].read() { - continue - } - - var v tsdb.FloatArray - var err error - if err = DecodeFloatArrayBlock(k.blocks[i].b, &v); err != nil { - k.handleDecodeError(err, "float") - return nil - } - - // Invariant: v.MaxTime() == k.blocks[i].maxTime - if k.blocks[i].maxTime != v.MaxTime() { - if maxTime == k.blocks[i].maxTime { - maxTime = v.MaxTime() - } - k.blocks[i].maxTime = v.MaxTime() - } - - // Remove values we already read - v.Exclude(k.blocks[i].readMin, k.blocks[i].readMax) - - // Filter out only the values for overlapping block - v.Include(minTime, maxTime) - if v.Len() > 0 { - // Record that we read a subset of the block - k.blocks[i].markRead(v.MinTime(), v.MaxTime()) - } - - // Apply each tombstone to the block - for _, ts := range k.blocks[i].tombstones { - v.Exclude(ts.Min, ts.Max) - } - - k.mergedFloatValues.Merge(&v) - } - } - - // Since we combined multiple blocks, we could have more values than we should put into - // a single block. We need to chunk them up into groups and re-encode them. - return k.chunkFloat(nil) - } - var i int - - for ; i < len(k.blocks); i++ { - - // skip this block if it's values were already read - if k.blocks[i].read() { - continue - } - - // if this block is already full, just add it as is - count, err := BlockCount(k.blocks[i].b) - if err != nil { - k.AppendError(err) - continue - } - - if count < k.size { - break - } - - k.merged = append(k.merged, k.blocks[i]) - } - - if k.fast { - for i < len(k.blocks) { - // skip this block if it's values were already read - if k.blocks[i].read() { - i++ - continue - } - - k.merged = append(k.merged, k.blocks[i]) - i++ - } - } - - // if we only have 1 blocks left, just append it as is and avoid decoding/recoding - if i == len(k.blocks)-1 { - if !k.blocks[i].read() { - k.merged = append(k.merged, k.blocks[i]) - } - i++ - } - - // The remaining blocks can be combined and we know that they do not overlap and - // so we can just append each, sort and re-encode. - for i < len(k.blocks) && k.mergedFloatValues.Len() < k.size { - if k.blocks[i].read() { - i++ - continue - } - - var v tsdb.FloatArray - if err := DecodeFloatArrayBlock(k.blocks[i].b, &v); err != nil { - k.handleDecodeError(err, "float") - return nil - } - - // Invariant: v.MaxTime() == k.blocks[i].maxTime - if k.blocks[i].maxTime != v.MaxTime() { - k.blocks[i].maxTime = v.MaxTime() - } - - // Apply each tombstone to the block - for _, ts := range k.blocks[i].tombstones { - v.Exclude(ts.Min, ts.Max) - } - - k.blocks[i].markRead(k.blocks[i].minTime, k.blocks[i].maxTime) - - k.mergedFloatValues.Merge(&v) - i++ - } - - k.blocks = k.blocks[i:] - - return k.chunkFloat(k.merged) -} - -func (k *tsmBatchKeyIterator) chunkFloat(dst blocks) blocks { - if k.mergedFloatValues.Len() > k.size { - var values tsdb.FloatArray - values.Timestamps = k.mergedFloatValues.Timestamps[:k.size] - minTime, maxTime := values.Timestamps[0], values.Timestamps[len(values.Timestamps)-1] - values.Values = k.mergedFloatValues.Values[:k.size] - - cb, err := EncodeFloatArrayBlock(&values, nil) // TODO(edd): pool this buffer - if err != nil { - k.handleEncodeError(err, "float") - return nil - } - - dst = append(dst, &block{ - minTime: minTime, - maxTime: maxTime, - key: k.key, - b: cb, - }) - k.mergedFloatValues.Timestamps = k.mergedFloatValues.Timestamps[k.size:] - k.mergedFloatValues.Values = k.mergedFloatValues.Values[k.size:] - return dst - } - - // Re-encode the remaining values into the last block - if k.mergedFloatValues.Len() > 0 { - minTime, maxTime := k.mergedFloatValues.Timestamps[0], k.mergedFloatValues.Timestamps[len(k.mergedFloatValues.Timestamps)-1] - cb, err := EncodeFloatArrayBlock(k.mergedFloatValues, nil) // TODO(edd): pool this buffer - if err != nil { - k.handleEncodeError(err, "float") - return nil - } - - dst = append(dst, &block{ - minTime: minTime, - maxTime: maxTime, - key: k.key, - b: cb, - }) - k.mergedFloatValues.Timestamps = k.mergedFloatValues.Timestamps[:0] - k.mergedFloatValues.Values = k.mergedFloatValues.Values[:0] - } - return dst -} - -// merge combines the next set of blocks into merged blocks. -func (k *tsmBatchKeyIterator) mergeInteger() { - // No blocks left, or pending merged values, we're done - if len(k.blocks) == 0 && len(k.merged) == 0 && k.mergedIntegerValues.Len() == 0 { - return - } - - sort.Stable(k.blocks) - - dedup := k.mergedIntegerValues.Len() != 0 - if len(k.blocks) > 0 && !dedup { - // If we have more than one block or any partially tombstoned blocks, we many need to dedup - dedup = len(k.blocks[0].tombstones) > 0 || k.blocks[0].partiallyRead() - - // Quickly scan each block to see if any overlap with the prior block, if they overlap then - // we need to dedup as there may be duplicate points now - for i := 1; !dedup && i < len(k.blocks); i++ { - dedup = k.blocks[i].partiallyRead() || - k.blocks[i].overlapsTimeRange(k.blocks[i-1].minTime, k.blocks[i-1].maxTime) || - len(k.blocks[i].tombstones) > 0 - } - - } - - k.merged = k.combineInteger(dedup) -} - -// combine returns a new set of blocks using the current blocks in the buffers. If dedup -// is true, all the blocks will be decoded, dedup and sorted in in order. If dedup is false, -// only blocks that are smaller than the chunk size will be decoded and combined. -func (k *tsmBatchKeyIterator) combineInteger(dedup bool) blocks { - if dedup { - for k.mergedIntegerValues.Len() < k.size && len(k.blocks) > 0 { - for len(k.blocks) > 0 && k.blocks[0].read() { - k.blocks = k.blocks[1:] - } - - if len(k.blocks) == 0 { - break - } - first := k.blocks[0] - minTime := first.minTime - maxTime := first.maxTime - - // Adjust the min time to the start of any overlapping blocks. - for i := 0; i < len(k.blocks); i++ { - if k.blocks[i].overlapsTimeRange(minTime, maxTime) && !k.blocks[i].read() { - if k.blocks[i].minTime < minTime { - minTime = k.blocks[i].minTime - } - if k.blocks[i].maxTime > minTime && k.blocks[i].maxTime < maxTime { - maxTime = k.blocks[i].maxTime - } - } - } - - // We have some overlapping blocks so decode all, append in order and then dedup - for i := 0; i < len(k.blocks); i++ { - if !k.blocks[i].overlapsTimeRange(minTime, maxTime) || k.blocks[i].read() { - continue - } - - var v tsdb.IntegerArray - var err error - if err = DecodeIntegerArrayBlock(k.blocks[i].b, &v); err != nil { - k.handleDecodeError(err, "integer") - return nil - } - - // Invariant: v.MaxTime() == k.blocks[i].maxTime - if k.blocks[i].maxTime != v.MaxTime() { - if maxTime == k.blocks[i].maxTime { - maxTime = v.MaxTime() - } - k.blocks[i].maxTime = v.MaxTime() - } - - // Remove values we already read - v.Exclude(k.blocks[i].readMin, k.blocks[i].readMax) - - // Filter out only the values for overlapping block - v.Include(minTime, maxTime) - if v.Len() > 0 { - // Record that we read a subset of the block - k.blocks[i].markRead(v.MinTime(), v.MaxTime()) - } - - // Apply each tombstone to the block - for _, ts := range k.blocks[i].tombstones { - v.Exclude(ts.Min, ts.Max) - } - - k.mergedIntegerValues.Merge(&v) - } - } - - // Since we combined multiple blocks, we could have more values than we should put into - // a single block. We need to chunk them up into groups and re-encode them. - return k.chunkInteger(nil) - } - var i int - - for ; i < len(k.blocks); i++ { - - // skip this block if it's values were already read - if k.blocks[i].read() { - continue - } - - // if this block is already full, just add it as is - count, err := BlockCount(k.blocks[i].b) - if err != nil { - k.AppendError(err) - continue - } - - if count < k.size { - break - } - - k.merged = append(k.merged, k.blocks[i]) - } - - if k.fast { - for i < len(k.blocks) { - // skip this block if it's values were already read - if k.blocks[i].read() { - i++ - continue - } - - k.merged = append(k.merged, k.blocks[i]) - i++ - } - } - - // if we only have 1 blocks left, just append it as is and avoid decoding/recoding - if i == len(k.blocks)-1 { - if !k.blocks[i].read() { - k.merged = append(k.merged, k.blocks[i]) - } - i++ - } - - // The remaining blocks can be combined and we know that they do not overlap and - // so we can just append each, sort and re-encode. - for i < len(k.blocks) && k.mergedIntegerValues.Len() < k.size { - if k.blocks[i].read() { - i++ - continue - } - - var v tsdb.IntegerArray - if err := DecodeIntegerArrayBlock(k.blocks[i].b, &v); err != nil { - k.handleDecodeError(err, "integer") - return nil - } - - // Invariant: v.MaxTime() == k.blocks[i].maxTime - if k.blocks[i].maxTime != v.MaxTime() { - k.blocks[i].maxTime = v.MaxTime() - } - - // Apply each tombstone to the block - for _, ts := range k.blocks[i].tombstones { - v.Exclude(ts.Min, ts.Max) - } - - k.blocks[i].markRead(k.blocks[i].minTime, k.blocks[i].maxTime) - - k.mergedIntegerValues.Merge(&v) - i++ - } - - k.blocks = k.blocks[i:] - - return k.chunkInteger(k.merged) -} - -func (k *tsmBatchKeyIterator) chunkInteger(dst blocks) blocks { - if k.mergedIntegerValues.Len() > k.size { - var values tsdb.IntegerArray - values.Timestamps = k.mergedIntegerValues.Timestamps[:k.size] - minTime, maxTime := values.Timestamps[0], values.Timestamps[len(values.Timestamps)-1] - values.Values = k.mergedIntegerValues.Values[:k.size] - - cb, err := EncodeIntegerArrayBlock(&values, nil) // TODO(edd): pool this buffer - if err != nil { - k.handleEncodeError(err, "integer") - return nil - } - - dst = append(dst, &block{ - minTime: minTime, - maxTime: maxTime, - key: k.key, - b: cb, - }) - k.mergedIntegerValues.Timestamps = k.mergedIntegerValues.Timestamps[k.size:] - k.mergedIntegerValues.Values = k.mergedIntegerValues.Values[k.size:] - return dst - } - - // Re-encode the remaining values into the last block - if k.mergedIntegerValues.Len() > 0 { - minTime, maxTime := k.mergedIntegerValues.Timestamps[0], k.mergedIntegerValues.Timestamps[len(k.mergedIntegerValues.Timestamps)-1] - cb, err := EncodeIntegerArrayBlock(k.mergedIntegerValues, nil) // TODO(edd): pool this buffer - if err != nil { - k.handleEncodeError(err, "integer") - return nil - } - - dst = append(dst, &block{ - minTime: minTime, - maxTime: maxTime, - key: k.key, - b: cb, - }) - k.mergedIntegerValues.Timestamps = k.mergedIntegerValues.Timestamps[:0] - k.mergedIntegerValues.Values = k.mergedIntegerValues.Values[:0] - } - return dst -} - -// merge combines the next set of blocks into merged blocks. -func (k *tsmBatchKeyIterator) mergeUnsigned() { - // No blocks left, or pending merged values, we're done - if len(k.blocks) == 0 && len(k.merged) == 0 && k.mergedUnsignedValues.Len() == 0 { - return - } - - sort.Stable(k.blocks) - - dedup := k.mergedUnsignedValues.Len() != 0 - if len(k.blocks) > 0 && !dedup { - // If we have more than one block or any partially tombstoned blocks, we many need to dedup - dedup = len(k.blocks[0].tombstones) > 0 || k.blocks[0].partiallyRead() - - // Quickly scan each block to see if any overlap with the prior block, if they overlap then - // we need to dedup as there may be duplicate points now - for i := 1; !dedup && i < len(k.blocks); i++ { - dedup = k.blocks[i].partiallyRead() || - k.blocks[i].overlapsTimeRange(k.blocks[i-1].minTime, k.blocks[i-1].maxTime) || - len(k.blocks[i].tombstones) > 0 - } - - } - - k.merged = k.combineUnsigned(dedup) -} - -// combine returns a new set of blocks using the current blocks in the buffers. If dedup -// is true, all the blocks will be decoded, dedup and sorted in in order. If dedup is false, -// only blocks that are smaller than the chunk size will be decoded and combined. -func (k *tsmBatchKeyIterator) combineUnsigned(dedup bool) blocks { - if dedup { - for k.mergedUnsignedValues.Len() < k.size && len(k.blocks) > 0 { - for len(k.blocks) > 0 && k.blocks[0].read() { - k.blocks = k.blocks[1:] - } - - if len(k.blocks) == 0 { - break - } - first := k.blocks[0] - minTime := first.minTime - maxTime := first.maxTime - - // Adjust the min time to the start of any overlapping blocks. - for i := 0; i < len(k.blocks); i++ { - if k.blocks[i].overlapsTimeRange(minTime, maxTime) && !k.blocks[i].read() { - if k.blocks[i].minTime < minTime { - minTime = k.blocks[i].minTime - } - if k.blocks[i].maxTime > minTime && k.blocks[i].maxTime < maxTime { - maxTime = k.blocks[i].maxTime - } - } - } - - // We have some overlapping blocks so decode all, append in order and then dedup - for i := 0; i < len(k.blocks); i++ { - if !k.blocks[i].overlapsTimeRange(minTime, maxTime) || k.blocks[i].read() { - continue - } - - var v tsdb.UnsignedArray - var err error - if err = DecodeUnsignedArrayBlock(k.blocks[i].b, &v); err != nil { - k.handleDecodeError(err, "unsigned") - return nil - } - - // Invariant: v.MaxTime() == k.blocks[i].maxTime - if k.blocks[i].maxTime != v.MaxTime() { - if maxTime == k.blocks[i].maxTime { - maxTime = v.MaxTime() - } - k.blocks[i].maxTime = v.MaxTime() - } - - // Remove values we already read - v.Exclude(k.blocks[i].readMin, k.blocks[i].readMax) - - // Filter out only the values for overlapping block - v.Include(minTime, maxTime) - if v.Len() > 0 { - // Record that we read a subset of the block - k.blocks[i].markRead(v.MinTime(), v.MaxTime()) - } - - // Apply each tombstone to the block - for _, ts := range k.blocks[i].tombstones { - v.Exclude(ts.Min, ts.Max) - } - - k.mergedUnsignedValues.Merge(&v) - } - } - - // Since we combined multiple blocks, we could have more values than we should put into - // a single block. We need to chunk them up into groups and re-encode them. - return k.chunkUnsigned(nil) - } - var i int - - for ; i < len(k.blocks); i++ { - - // skip this block if it's values were already read - if k.blocks[i].read() { - continue - } - - // if this block is already full, just add it as is - count, err := BlockCount(k.blocks[i].b) - if err != nil { - k.AppendError(err) - continue - } - - if count < k.size { - break - } - - k.merged = append(k.merged, k.blocks[i]) - } - - if k.fast { - for i < len(k.blocks) { - // skip this block if it's values were already read - if k.blocks[i].read() { - i++ - continue - } - - k.merged = append(k.merged, k.blocks[i]) - i++ - } - } - - // if we only have 1 blocks left, just append it as is and avoid decoding/recoding - if i == len(k.blocks)-1 { - if !k.blocks[i].read() { - k.merged = append(k.merged, k.blocks[i]) - } - i++ - } - - // The remaining blocks can be combined and we know that they do not overlap and - // so we can just append each, sort and re-encode. - for i < len(k.blocks) && k.mergedUnsignedValues.Len() < k.size { - if k.blocks[i].read() { - i++ - continue - } - - var v tsdb.UnsignedArray - if err := DecodeUnsignedArrayBlock(k.blocks[i].b, &v); err != nil { - k.handleDecodeError(err, "unsigned") - return nil - } - - // Invariant: v.MaxTime() == k.blocks[i].maxTime - if k.blocks[i].maxTime != v.MaxTime() { - k.blocks[i].maxTime = v.MaxTime() - } - - // Apply each tombstone to the block - for _, ts := range k.blocks[i].tombstones { - v.Exclude(ts.Min, ts.Max) - } - - k.blocks[i].markRead(k.blocks[i].minTime, k.blocks[i].maxTime) - - k.mergedUnsignedValues.Merge(&v) - i++ - } - - k.blocks = k.blocks[i:] - - return k.chunkUnsigned(k.merged) -} - -func (k *tsmBatchKeyIterator) chunkUnsigned(dst blocks) blocks { - if k.mergedUnsignedValues.Len() > k.size { - var values tsdb.UnsignedArray - values.Timestamps = k.mergedUnsignedValues.Timestamps[:k.size] - minTime, maxTime := values.Timestamps[0], values.Timestamps[len(values.Timestamps)-1] - values.Values = k.mergedUnsignedValues.Values[:k.size] - - cb, err := EncodeUnsignedArrayBlock(&values, nil) // TODO(edd): pool this buffer - if err != nil { - k.handleEncodeError(err, "unsigned") - return nil - } - - dst = append(dst, &block{ - minTime: minTime, - maxTime: maxTime, - key: k.key, - b: cb, - }) - k.mergedUnsignedValues.Timestamps = k.mergedUnsignedValues.Timestamps[k.size:] - k.mergedUnsignedValues.Values = k.mergedUnsignedValues.Values[k.size:] - return dst - } - - // Re-encode the remaining values into the last block - if k.mergedUnsignedValues.Len() > 0 { - minTime, maxTime := k.mergedUnsignedValues.Timestamps[0], k.mergedUnsignedValues.Timestamps[len(k.mergedUnsignedValues.Timestamps)-1] - cb, err := EncodeUnsignedArrayBlock(k.mergedUnsignedValues, nil) // TODO(edd): pool this buffer - if err != nil { - k.handleEncodeError(err, "unsigned") - return nil - } - - dst = append(dst, &block{ - minTime: minTime, - maxTime: maxTime, - key: k.key, - b: cb, - }) - k.mergedUnsignedValues.Timestamps = k.mergedUnsignedValues.Timestamps[:0] - k.mergedUnsignedValues.Values = k.mergedUnsignedValues.Values[:0] - } - return dst -} - -// merge combines the next set of blocks into merged blocks. -func (k *tsmBatchKeyIterator) mergeString() { - // No blocks left, or pending merged values, we're done - if len(k.blocks) == 0 && len(k.merged) == 0 && k.mergedStringValues.Len() == 0 { - return - } - - sort.Stable(k.blocks) - - dedup := k.mergedStringValues.Len() != 0 - if len(k.blocks) > 0 && !dedup { - // If we have more than one block or any partially tombstoned blocks, we many need to dedup - dedup = len(k.blocks[0].tombstones) > 0 || k.blocks[0].partiallyRead() - - // Quickly scan each block to see if any overlap with the prior block, if they overlap then - // we need to dedup as there may be duplicate points now - for i := 1; !dedup && i < len(k.blocks); i++ { - dedup = k.blocks[i].partiallyRead() || - k.blocks[i].overlapsTimeRange(k.blocks[i-1].minTime, k.blocks[i-1].maxTime) || - len(k.blocks[i].tombstones) > 0 - } - - } - - k.merged = k.combineString(dedup) -} - -// combine returns a new set of blocks using the current blocks in the buffers. If dedup -// is true, all the blocks will be decoded, dedup and sorted in in order. If dedup is false, -// only blocks that are smaller than the chunk size will be decoded and combined. -func (k *tsmBatchKeyIterator) combineString(dedup bool) blocks { - if dedup { - for k.mergedStringValues.Len() < k.size && len(k.blocks) > 0 { - for len(k.blocks) > 0 && k.blocks[0].read() { - k.blocks = k.blocks[1:] - } - - if len(k.blocks) == 0 { - break - } - first := k.blocks[0] - minTime := first.minTime - maxTime := first.maxTime - - // Adjust the min time to the start of any overlapping blocks. - for i := 0; i < len(k.blocks); i++ { - if k.blocks[i].overlapsTimeRange(minTime, maxTime) && !k.blocks[i].read() { - if k.blocks[i].minTime < minTime { - minTime = k.blocks[i].minTime - } - if k.blocks[i].maxTime > minTime && k.blocks[i].maxTime < maxTime { - maxTime = k.blocks[i].maxTime - } - } - } - - // We have some overlapping blocks so decode all, append in order and then dedup - for i := 0; i < len(k.blocks); i++ { - if !k.blocks[i].overlapsTimeRange(minTime, maxTime) || k.blocks[i].read() { - continue - } - - var v tsdb.StringArray - var err error - if err = DecodeStringArrayBlock(k.blocks[i].b, &v); err != nil { - k.handleDecodeError(err, "string") - return nil - } - - // Invariant: v.MaxTime() == k.blocks[i].maxTime - if k.blocks[i].maxTime != v.MaxTime() { - if maxTime == k.blocks[i].maxTime { - maxTime = v.MaxTime() - } - k.blocks[i].maxTime = v.MaxTime() - } - - // Remove values we already read - v.Exclude(k.blocks[i].readMin, k.blocks[i].readMax) - - // Filter out only the values for overlapping block - v.Include(minTime, maxTime) - if v.Len() > 0 { - // Record that we read a subset of the block - k.blocks[i].markRead(v.MinTime(), v.MaxTime()) - } - - // Apply each tombstone to the block - for _, ts := range k.blocks[i].tombstones { - v.Exclude(ts.Min, ts.Max) - } - - k.mergedStringValues.Merge(&v) - } - } - - // Since we combined multiple blocks, we could have more values than we should put into - // a single block. We need to chunk them up into groups and re-encode them. - return k.chunkString(nil) - } - var i int - - for ; i < len(k.blocks); i++ { - - // skip this block if it's values were already read - if k.blocks[i].read() { - continue - } - - // if this block is already full, just add it as is - count, err := BlockCount(k.blocks[i].b) - if err != nil { - k.AppendError(err) - continue - } - - if count < k.size { - break - } - - k.merged = append(k.merged, k.blocks[i]) - } - - if k.fast { - for i < len(k.blocks) { - // skip this block if it's values were already read - if k.blocks[i].read() { - i++ - continue - } - - k.merged = append(k.merged, k.blocks[i]) - i++ - } - } - - // if we only have 1 blocks left, just append it as is and avoid decoding/recoding - if i == len(k.blocks)-1 { - if !k.blocks[i].read() { - k.merged = append(k.merged, k.blocks[i]) - } - i++ - } - - // The remaining blocks can be combined and we know that they do not overlap and - // so we can just append each, sort and re-encode. - for i < len(k.blocks) && k.mergedStringValues.Len() < k.size { - if k.blocks[i].read() { - i++ - continue - } - - var v tsdb.StringArray - if err := DecodeStringArrayBlock(k.blocks[i].b, &v); err != nil { - k.handleDecodeError(err, "string") - return nil - } - - // Invariant: v.MaxTime() == k.blocks[i].maxTime - if k.blocks[i].maxTime != v.MaxTime() { - k.blocks[i].maxTime = v.MaxTime() - } - - // Apply each tombstone to the block - for _, ts := range k.blocks[i].tombstones { - v.Exclude(ts.Min, ts.Max) - } - - k.blocks[i].markRead(k.blocks[i].minTime, k.blocks[i].maxTime) - - k.mergedStringValues.Merge(&v) - i++ - } - - k.blocks = k.blocks[i:] - - return k.chunkString(k.merged) -} - -func (k *tsmBatchKeyIterator) chunkString(dst blocks) blocks { - if k.mergedStringValues.Len() > k.size { - var values tsdb.StringArray - values.Timestamps = k.mergedStringValues.Timestamps[:k.size] - minTime, maxTime := values.Timestamps[0], values.Timestamps[len(values.Timestamps)-1] - values.Values = k.mergedStringValues.Values[:k.size] - - cb, err := EncodeStringArrayBlock(&values, nil) // TODO(edd): pool this buffer - if err != nil { - k.handleEncodeError(err, "string") - return nil - } - - dst = append(dst, &block{ - minTime: minTime, - maxTime: maxTime, - key: k.key, - b: cb, - }) - k.mergedStringValues.Timestamps = k.mergedStringValues.Timestamps[k.size:] - k.mergedStringValues.Values = k.mergedStringValues.Values[k.size:] - return dst - } - - // Re-encode the remaining values into the last block - if k.mergedStringValues.Len() > 0 { - minTime, maxTime := k.mergedStringValues.Timestamps[0], k.mergedStringValues.Timestamps[len(k.mergedStringValues.Timestamps)-1] - cb, err := EncodeStringArrayBlock(k.mergedStringValues, nil) // TODO(edd): pool this buffer - if err != nil { - k.handleEncodeError(err, "string") - return nil - } - - dst = append(dst, &block{ - minTime: minTime, - maxTime: maxTime, - key: k.key, - b: cb, - }) - k.mergedStringValues.Timestamps = k.mergedStringValues.Timestamps[:0] - k.mergedStringValues.Values = k.mergedStringValues.Values[:0] - } - return dst -} - -// merge combines the next set of blocks into merged blocks. -func (k *tsmBatchKeyIterator) mergeBoolean() { - // No blocks left, or pending merged values, we're done - if len(k.blocks) == 0 && len(k.merged) == 0 && k.mergedBooleanValues.Len() == 0 { - return - } - - sort.Stable(k.blocks) - - dedup := k.mergedBooleanValues.Len() != 0 - if len(k.blocks) > 0 && !dedup { - // If we have more than one block or any partially tombstoned blocks, we many need to dedup - dedup = len(k.blocks[0].tombstones) > 0 || k.blocks[0].partiallyRead() - - // Quickly scan each block to see if any overlap with the prior block, if they overlap then - // we need to dedup as there may be duplicate points now - for i := 1; !dedup && i < len(k.blocks); i++ { - dedup = k.blocks[i].partiallyRead() || - k.blocks[i].overlapsTimeRange(k.blocks[i-1].minTime, k.blocks[i-1].maxTime) || - len(k.blocks[i].tombstones) > 0 - } - - } - - k.merged = k.combineBoolean(dedup) -} - -// combine returns a new set of blocks using the current blocks in the buffers. If dedup -// is true, all the blocks will be decoded, dedup and sorted in in order. If dedup is false, -// only blocks that are smaller than the chunk size will be decoded and combined. -func (k *tsmBatchKeyIterator) combineBoolean(dedup bool) blocks { - if dedup { - for k.mergedBooleanValues.Len() < k.size && len(k.blocks) > 0 { - for len(k.blocks) > 0 && k.blocks[0].read() { - k.blocks = k.blocks[1:] - } - - if len(k.blocks) == 0 { - break - } - first := k.blocks[0] - minTime := first.minTime - maxTime := first.maxTime - - // Adjust the min time to the start of any overlapping blocks. - for i := 0; i < len(k.blocks); i++ { - if k.blocks[i].overlapsTimeRange(minTime, maxTime) && !k.blocks[i].read() { - if k.blocks[i].minTime < minTime { - minTime = k.blocks[i].minTime - } - if k.blocks[i].maxTime > minTime && k.blocks[i].maxTime < maxTime { - maxTime = k.blocks[i].maxTime - } - } - } - - // We have some overlapping blocks so decode all, append in order and then dedup - for i := 0; i < len(k.blocks); i++ { - if !k.blocks[i].overlapsTimeRange(minTime, maxTime) || k.blocks[i].read() { - continue - } - - var v tsdb.BooleanArray - var err error - if err = DecodeBooleanArrayBlock(k.blocks[i].b, &v); err != nil { - k.handleDecodeError(err, "boolean") - return nil - } - - // Invariant: v.MaxTime() == k.blocks[i].maxTime - if k.blocks[i].maxTime != v.MaxTime() { - if maxTime == k.blocks[i].maxTime { - maxTime = v.MaxTime() - } - k.blocks[i].maxTime = v.MaxTime() - } - - // Remove values we already read - v.Exclude(k.blocks[i].readMin, k.blocks[i].readMax) - - // Filter out only the values for overlapping block - v.Include(minTime, maxTime) - if v.Len() > 0 { - // Record that we read a subset of the block - k.blocks[i].markRead(v.MinTime(), v.MaxTime()) - } - - // Apply each tombstone to the block - for _, ts := range k.blocks[i].tombstones { - v.Exclude(ts.Min, ts.Max) - } - - k.mergedBooleanValues.Merge(&v) - } - } - - // Since we combined multiple blocks, we could have more values than we should put into - // a single block. We need to chunk them up into groups and re-encode them. - return k.chunkBoolean(nil) - } - var i int - - for ; i < len(k.blocks); i++ { - - // skip this block if it's values were already read - if k.blocks[i].read() { - continue - } - - // if this block is already full, just add it as is - count, err := BlockCount(k.blocks[i].b) - if err != nil { - k.AppendError(err) - continue - } - - if count < k.size { - break - } - - k.merged = append(k.merged, k.blocks[i]) - } - - if k.fast { - for i < len(k.blocks) { - // skip this block if it's values were already read - if k.blocks[i].read() { - i++ - continue - } - - k.merged = append(k.merged, k.blocks[i]) - i++ - } - } - - // if we only have 1 blocks left, just append it as is and avoid decoding/recoding - if i == len(k.blocks)-1 { - if !k.blocks[i].read() { - k.merged = append(k.merged, k.blocks[i]) - } - i++ - } - - // The remaining blocks can be combined and we know that they do not overlap and - // so we can just append each, sort and re-encode. - for i < len(k.blocks) && k.mergedBooleanValues.Len() < k.size { - if k.blocks[i].read() { - i++ - continue - } - - var v tsdb.BooleanArray - if err := DecodeBooleanArrayBlock(k.blocks[i].b, &v); err != nil { - k.handleDecodeError(err, "boolean") - return nil - } - - // Invariant: v.MaxTime() == k.blocks[i].maxTime - if k.blocks[i].maxTime != v.MaxTime() { - k.blocks[i].maxTime = v.MaxTime() - } - - // Apply each tombstone to the block - for _, ts := range k.blocks[i].tombstones { - v.Exclude(ts.Min, ts.Max) - } - - k.blocks[i].markRead(k.blocks[i].minTime, k.blocks[i].maxTime) - - k.mergedBooleanValues.Merge(&v) - i++ - } - - k.blocks = k.blocks[i:] - - return k.chunkBoolean(k.merged) -} - -func (k *tsmBatchKeyIterator) chunkBoolean(dst blocks) blocks { - if k.mergedBooleanValues.Len() > k.size { - var values tsdb.BooleanArray - values.Timestamps = k.mergedBooleanValues.Timestamps[:k.size] - minTime, maxTime := values.Timestamps[0], values.Timestamps[len(values.Timestamps)-1] - values.Values = k.mergedBooleanValues.Values[:k.size] - - cb, err := EncodeBooleanArrayBlock(&values, nil) // TODO(edd): pool this buffer - if err != nil { - k.handleEncodeError(err, "boolean") - return nil - } - - dst = append(dst, &block{ - minTime: minTime, - maxTime: maxTime, - key: k.key, - b: cb, - }) - k.mergedBooleanValues.Timestamps = k.mergedBooleanValues.Timestamps[k.size:] - k.mergedBooleanValues.Values = k.mergedBooleanValues.Values[k.size:] - return dst - } - - // Re-encode the remaining values into the last block - if k.mergedBooleanValues.Len() > 0 { - minTime, maxTime := k.mergedBooleanValues.Timestamps[0], k.mergedBooleanValues.Timestamps[len(k.mergedBooleanValues.Timestamps)-1] - cb, err := EncodeBooleanArrayBlock(k.mergedBooleanValues, nil) // TODO(edd): pool this buffer - if err != nil { - k.handleEncodeError(err, "boolean") - return nil - } - - dst = append(dst, &block{ - minTime: minTime, - maxTime: maxTime, - key: k.key, - b: cb, - }) - k.mergedBooleanValues.Timestamps = k.mergedBooleanValues.Timestamps[:0] - k.mergedBooleanValues.Values = k.mergedBooleanValues.Values[:0] - } - return dst -} diff --git a/tsdb/engine/tsm1/compact.gen.go.tmpl b/tsdb/engine/tsm1/compact.gen.go.tmpl deleted file mode 100644 index cde652c1390..00000000000 --- a/tsdb/engine/tsm1/compact.gen.go.tmpl +++ /dev/null @@ -1,235 +0,0 @@ -package tsm1 - -import ( - "sort" - - "github.com/influxdata/influxdb/v2/tsdb" -) - -{{range .}} -// merge combines the next set of blocks into merged blocks. -func (k *tsmBatchKeyIterator) merge{{.Name}}() { - // No blocks left, or pending merged values, we're done - if len(k.blocks) == 0 && len(k.merged) == 0 && k.merged{{.Name}}Values.Len() == 0 { - return - } - - sort.Stable(k.blocks) - - dedup := k.merged{{.Name}}Values.Len() != 0 - if len(k.blocks) > 0 && !dedup { - // If we have more than one block or any partially tombstoned blocks, we many need to dedup - dedup = len(k.blocks[0].tombstones) > 0 || k.blocks[0].partiallyRead() - - // Quickly scan each block to see if any overlap with the prior block, if they overlap then - // we need to dedup as there may be duplicate points now - for i := 1; !dedup && i < len(k.blocks); i++ { - dedup = k.blocks[i].partiallyRead() || - k.blocks[i].overlapsTimeRange(k.blocks[i-1].minTime, k.blocks[i-1].maxTime) || - len(k.blocks[i].tombstones) > 0 - } - - } - - k.merged = k.combine{{.Name}}(dedup) -} - -// combine returns a new set of blocks using the current blocks in the buffers. If dedup -// is true, all the blocks will be decoded, dedup and sorted in in order. If dedup is false, -// only blocks that are smaller than the chunk size will be decoded and combined. -func (k *tsmBatchKeyIterator) combine{{.Name}}(dedup bool) blocks { - if dedup { - for k.merged{{.Name}}Values.Len() < k.size && len(k.blocks) > 0 { - for len(k.blocks) > 0 && k.blocks[0].read() { - k.blocks = k.blocks[1:] - } - - if len(k.blocks) == 0 { - break - } - first := k.blocks[0] - minTime := first.minTime - maxTime := first.maxTime - - // Adjust the min time to the start of any overlapping blocks. - for i := 0; i < len(k.blocks); i++ { - if k.blocks[i].overlapsTimeRange(minTime, maxTime) && !k.blocks[i].read() { - if k.blocks[i].minTime < minTime { - minTime = k.blocks[i].minTime - } - if k.blocks[i].maxTime > minTime && k.blocks[i].maxTime < maxTime { - maxTime = k.blocks[i].maxTime - } - } - } - - // We have some overlapping blocks so decode all, append in order and then dedup - for i := 0; i < len(k.blocks); i++ { - if !k.blocks[i].overlapsTimeRange(minTime, maxTime) || k.blocks[i].read() { - continue - } - - var v tsdb.{{.Name}}Array - var err error - if err = Decode{{.Name}}ArrayBlock(k.blocks[i].b, &v); err != nil { - k.handleDecodeError(err, "{{.name}}") - return nil - } - - // Invariant: v.MaxTime() == k.blocks[i].maxTime - if k.blocks[i].maxTime != v.MaxTime() { - if maxTime == k.blocks[i].maxTime { - maxTime = v.MaxTime() - } - k.blocks[i].maxTime = v.MaxTime() - } - - // Remove values we already read - v.Exclude(k.blocks[i].readMin, k.blocks[i].readMax) - - // Filter out only the values for overlapping block - v.Include(minTime, maxTime) - if v.Len() > 0 { - // Record that we read a subset of the block - k.blocks[i].markRead(v.MinTime(), v.MaxTime()) - } - - // Apply each tombstone to the block - for _, ts := range k.blocks[i].tombstones { - v.Exclude(ts.Min, ts.Max) - } - - k.merged{{.Name}}Values.Merge(&v) - } - } - - // Since we combined multiple blocks, we could have more values than we should put into - // a single block. We need to chunk them up into groups and re-encode them. - return k.chunk{{.Name}}(nil) - } - var i int - - for ; i < len(k.blocks); i++ { - - // skip this block if it's values were already read - if k.blocks[i].read() { - continue - } - - // if this block is already full, just add it as is - count, err := BlockCount(k.blocks[i].b) - if err != nil { - k.AppendError(err) - continue - } - - if count < k.size { - break - } - - k.merged = append(k.merged, k.blocks[i]) - } - - if k.fast { - for i < len(k.blocks) { - // skip this block if it's values were already read - if k.blocks[i].read() { - i++ - continue - } - - k.merged = append(k.merged, k.blocks[i]) - i++ - } - } - - // if we only have 1 blocks left, just append it as is and avoid decoding/recoding - if i == len(k.blocks)-1 { - if !k.blocks[i].read() { - k.merged = append(k.merged, k.blocks[i]) - } - i++ - } - - // The remaining blocks can be combined and we know that they do not overlap and - // so we can just append each, sort and re-encode. - for i < len(k.blocks) && k.merged{{.Name}}Values.Len() < k.size { - if k.blocks[i].read() { - i++ - continue - } - - var v tsdb.{{.Name}}Array - if err := Decode{{.Name}}ArrayBlock(k.blocks[i].b, &v); err != nil { - k.handleDecodeError(err, "{{.name}}") - return nil - } - - // Invariant: v.MaxTime() == k.blocks[i].maxTime - if k.blocks[i].maxTime != v.MaxTime() { - k.blocks[i].maxTime = v.MaxTime() - } - - // Apply each tombstone to the block - for _, ts := range k.blocks[i].tombstones { - v.Exclude(ts.Min, ts.Max) - } - - k.blocks[i].markRead(k.blocks[i].minTime, k.blocks[i].maxTime) - - k.merged{{.Name}}Values.Merge(&v) - i++ - } - - k.blocks = k.blocks[i:] - - return k.chunk{{.Name}}(k.merged) -} - -func (k *tsmBatchKeyIterator) chunk{{.Name}}(dst blocks) blocks { - if k.merged{{.Name}}Values.Len() > k.size { - var values tsdb.{{.Name}}Array - values.Timestamps = k.merged{{.Name}}Values.Timestamps[:k.size] - minTime, maxTime := values.Timestamps[0], values.Timestamps[len(values.Timestamps)-1] - values.Values = k.merged{{.Name}}Values.Values[:k.size] - - cb, err := Encode{{.Name}}ArrayBlock(&values, nil) // TODO(edd): pool this buffer - if err != nil { - k.handleEncodeError(err, "{{.name}}") - return nil - } - - dst = append(dst, &block{ - minTime: minTime, - maxTime: maxTime, - key: k.key, - b: cb, - }) - k.merged{{.Name}}Values.Timestamps = k.merged{{.Name}}Values.Timestamps[k.size:] - k.merged{{.Name}}Values.Values = k.merged{{.Name}}Values.Values[k.size:] - return dst - } - - // Re-encode the remaining values into the last block - if k.merged{{.Name}}Values.Len() > 0 { - minTime, maxTime := k.merged{{.Name}}Values.Timestamps[0], k.merged{{.Name}}Values.Timestamps[len(k.merged{{.Name}}Values.Timestamps)-1] - cb, err := Encode{{.Name}}ArrayBlock(k.merged{{.Name}}Values, nil) // TODO(edd): pool this buffer - if err != nil { - k.handleEncodeError(err, "{{.name}}") - return nil - } - - dst = append(dst, &block{ - minTime: minTime, - maxTime: maxTime, - key: k.key, - b: cb, - }) - k.merged{{.Name}}Values.Timestamps = k.merged{{.Name}}Values.Timestamps[:0] - k.merged{{.Name}}Values.Values = k.merged{{.Name}}Values.Values[:0] - } - return dst -} - - -{{ end }} diff --git a/tsdb/engine/tsm1/compact.gen.go.tmpldata b/tsdb/engine/tsm1/compact.gen.go.tmpldata deleted file mode 100644 index 236ba310ba4..00000000000 --- a/tsdb/engine/tsm1/compact.gen.go.tmpldata +++ /dev/null @@ -1,22 +0,0 @@ -[ - { - "Name":"Float", - "name":"float" - }, - { - "Name":"Integer", - "name":"integer" - }, - { - "Name":"Unsigned", - "name":"unsigned" - }, - { - "Name":"String", - "name":"string" - }, - { - "Name":"Boolean", - "name":"boolean" - } -] diff --git a/tsdb/engine/tsm1/compact.go b/tsdb/engine/tsm1/compact.go deleted file mode 100644 index 19520f9fb17..00000000000 --- a/tsdb/engine/tsm1/compact.go +++ /dev/null @@ -1,1900 +0,0 @@ -package tsm1 - -// Compactions are the process of creating read-optimized TSM files. -// The files are created by converting write-optimized WAL entries -// to read-optimized TSM format. They can also be created from existing -// TSM files when there are tombstone records that need to be removed, points -// that were overwritten by later writes and need to updated, or multiple -// smaller TSM files need to be merged to reduce file counts and improve -// compression ratios. -// -// The compaction process is stream-oriented using multiple readers and -// iterators. The resulting stream is written sorted and chunked to allow for -// one-pass writing of a new TSM file. - -import ( - "bytes" - "fmt" - "io" - "math" - "os" - "path/filepath" - "runtime" - "sort" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/influxdata/influxdb/v2/pkg/limiter" - "github.com/influxdata/influxdb/v2/tsdb" - "go.uber.org/zap" -) - -const maxTSMFileSize = uint32(2048 * 1024 * 1024) // 2GB -const logEvery = 2 * DefaultSegmentSize - -const ( - // CompactionTempExtension is the extension used for temporary files created during compaction. - CompactionTempExtension = "tmp" - - // TSMFileExtension is the extension used for TSM files. - TSMFileExtension = "tsm" - - // DefaultMaxSavedErrors is the number of errors that are stored by a TSMBatchKeyReader before - // subsequent errors are discarded - DefaultMaxSavedErrors = 100 -) - -var ( - errMaxFileExceeded = fmt.Errorf("max file exceeded") - errSnapshotsDisabled = fmt.Errorf("snapshots disabled") - errCompactionsDisabled = fmt.Errorf("compactions disabled") -) - -type errCompactionInProgress struct { - err error -} - -// Error returns the string representation of the error, to satisfy the error interface. -func (e errCompactionInProgress) Error() string { - if e.err != nil { - return fmt.Sprintf("compaction in progress: %s", e.err) - } - return "compaction in progress" -} - -type errCompactionAborted struct { - err error -} - -func (e errCompactionAborted) Error() string { - if e.err != nil { - return fmt.Sprintf("compaction aborted: %s", e.err) - } - return "compaction aborted" -} - -type errBlockRead struct { - file string - err error -} - -func (e errBlockRead) Error() string { - if e.err != nil { - return fmt.Sprintf("block read error on %s: %s", e.file, e.err) - } - return fmt.Sprintf("block read error on %s", e.file) -} - -// CompactionGroup represents a list of files eligible to be compacted together. -type CompactionGroup []string - -// CompactionPlanner determines what TSM files and WAL segments to include in a -// given compaction run. -type CompactionPlanner interface { - Plan(lastWrite time.Time) ([]CompactionGroup, int64) - PlanLevel(level int) ([]CompactionGroup, int64) - PlanOptimize() ([]CompactionGroup, int64) - Release(group []CompactionGroup) - FullyCompacted() (bool, string) - - // ForceFull causes the planner to return a full compaction plan the next - // time Plan() is called if there are files that could be compacted. - ForceFull() - - SetFileStore(fs *FileStore) -} - -// DefaultPlanner implements CompactionPlanner using a strategy to roll up -// multiple generations of TSM files into larger files in stages. It attempts -// to minimize the number of TSM files on disk while rolling up a bounder number -// of files. -type DefaultPlanner struct { - FileStore fileStore - - // compactFullWriteColdDuration specifies the length of time after - // which if no writes have been committed to the WAL, the engine will - // do a full compaction of the TSM files in this shard. This duration - // should always be greater than the CacheFlushWriteColdDuration - compactFullWriteColdDuration time.Duration - - // lastPlanCheck is the last time Plan was called - lastPlanCheck time.Time - - mu sync.RWMutex - // lastFindGenerations is the last time findGenerations was run - lastFindGenerations time.Time - - // lastGenerations is the last set of generations found by findGenerations - lastGenerations tsmGenerations - - // forceFull causes the next full plan requests to plan any files - // that may need to be compacted. Normally, these files are skipped and scheduled - // infrequently as the plans are more expensive to run. - forceFull bool - - // filesInUse is the set of files that have been returned as part of a plan and might - // be being compacted. Two plans should not return the same file at any given time. - filesInUse map[string]struct{} -} - -type fileStore interface { - Stats() []FileStat - LastModified() time.Time - BlockCount(path string, idx int) int - ParseFileName(path string) (int, int, error) -} - -func NewDefaultPlanner(fs fileStore, writeColdDuration time.Duration) *DefaultPlanner { - return &DefaultPlanner{ - FileStore: fs, - compactFullWriteColdDuration: writeColdDuration, - filesInUse: make(map[string]struct{}), - } -} - -// tsmGeneration represents the TSM files within a generation. -// 000001-01.tsm, 000001-02.tsm would be in the same generation -// 000001 each with different sequence numbers. -type tsmGeneration struct { - id int - files []FileStat - parseFileName ParseFileNameFunc -} - -func newTsmGeneration(id int, parseFileNameFunc ParseFileNameFunc) *tsmGeneration { - return &tsmGeneration{ - id: id, - parseFileName: parseFileNameFunc, - } -} - -// size returns the total size of the files in the generation. -func (t *tsmGeneration) size() uint64 { - var n uint64 - for _, f := range t.files { - n += uint64(f.Size) - } - return n -} - -// compactionLevel returns the level of the files in this generation. -func (t *tsmGeneration) level() int { - // Level 0 is always created from the result of a cache compaction. It generates - // 1 file with a sequence num of 1. Level 2 is generated by compacting multiple - // level 1 files. Level 3 is generate by compacting multiple level 2 files. Level - // 4 is for anything else. - _, seq, _ := t.parseFileName(t.files[0].Path) - if seq < 4 { - return seq - } - - return 4 -} - -// count returns the number of files in the generation. -func (t *tsmGeneration) count() int { - return len(t.files) -} - -// hasTombstones returns true if there are keys removed for any of the files. -func (t *tsmGeneration) hasTombstones() bool { - for _, f := range t.files { - if f.HasTombstone { - return true - } - } - return false -} - -func (c *DefaultPlanner) SetFileStore(fs *FileStore) { - c.FileStore = fs -} - -func (c *DefaultPlanner) ParseFileName(path string) (int, int, error) { - return c.FileStore.ParseFileName(path) -} - -// FullyCompacted returns true if the shard is fully compacted. -func (c *DefaultPlanner) FullyCompacted() (bool, string) { - gens := c.findGenerations(false) - if len(gens) > 1 { - return false, "not fully compacted and not idle because of more than one generation" - } else if gens.hasTombstones() { - return false, "not fully compacted and not idle because of tombstones" - } else { - return true, "" - } -} - -// ForceFull causes the planner to return a full compaction plan the next time -// a plan is requested. When ForceFull is called, level and optimize plans will -// not return plans until a full plan is requested and released. -func (c *DefaultPlanner) ForceFull() { - c.mu.Lock() - defer c.mu.Unlock() - c.forceFull = true -} - -// PlanLevel returns a set of TSM files to rewrite for a specific level. -func (c *DefaultPlanner) PlanLevel(level int) ([]CompactionGroup, int64) { - // If a full plan has been requested, don't plan any levels which will prevent - // the full plan from acquiring them. - c.mu.RLock() - if c.forceFull { - c.mu.RUnlock() - return nil, 0 - } - c.mu.RUnlock() - - // Determine the generations from all files on disk. We need to treat - // a generation conceptually as a single file even though it may be - // split across several files in sequence. - generations := c.findGenerations(true) - - // If there is only one generation and no tombstones, then there's nothing to - // do. - if len(generations) <= 1 && !generations.hasTombstones() { - return nil, 0 - } - - // Group each generation by level such that two adjacent generations in the same - // level become part of the same group. - var currentGen tsmGenerations - var groups []tsmGenerations - for i := 0; i < len(generations); i++ { - cur := generations[i] - - // See if this generation is orphaned which would prevent it from being further - // compacted until a final full compaction runs. - if i < len(generations)-1 { - if cur.level() < generations[i+1].level() { - currentGen = append(currentGen, cur) - continue - } - } - - if len(currentGen) == 0 || currentGen.level() == cur.level() { - currentGen = append(currentGen, cur) - continue - } - groups = append(groups, currentGen) - - currentGen = tsmGenerations{} - currentGen = append(currentGen, cur) - } - - if len(currentGen) > 0 { - groups = append(groups, currentGen) - } - - // Remove any groups in the wrong level - var levelGroups []tsmGenerations - for _, cur := range groups { - if cur.level() == level { - levelGroups = append(levelGroups, cur) - } - } - - minGenerations := 4 - if level == 1 { - minGenerations = 8 - } - - var cGroups []CompactionGroup - for _, group := range levelGroups { - for _, chunk := range group.chunk(minGenerations) { - var cGroup CompactionGroup - var hasTombstones bool - for _, gen := range chunk { - if gen.hasTombstones() { - hasTombstones = true - } - for _, file := range gen.files { - cGroup = append(cGroup, file.Path) - } - } - - if len(chunk) < minGenerations && !hasTombstones { - continue - } - - cGroups = append(cGroups, cGroup) - } - } - - if !c.acquire(cGroups) { - return nil, int64(len(cGroups)) - } - - return cGroups, int64(len(cGroups)) -} - -// PlanOptimize returns all TSM files if they are in different generations in order -// to optimize the index across TSM files. Each returned compaction group can be -// compacted concurrently. -func (c *DefaultPlanner) PlanOptimize() ([]CompactionGroup, int64) { - // If a full plan has been requested, don't plan any levels which will prevent - // the full plan from acquiring them. - c.mu.RLock() - if c.forceFull { - c.mu.RUnlock() - return nil, 0 - } - c.mu.RUnlock() - - // Determine the generations from all files on disk. We need to treat - // a generation conceptually as a single file even though it may be - // split across several files in sequence. - generations := c.findGenerations(true) - - // If there is only one generation and no tombstones, then there's nothing to - // do. - if len(generations) <= 1 && !generations.hasTombstones() { - return nil, 0 - } - - // Group each generation by level such that two adjacent generations in the same - // level become part of the same group. - var currentGen tsmGenerations - var groups []tsmGenerations - for i := 0; i < len(generations); i++ { - cur := generations[i] - - // Skip the file if it's over the max size and contains a full block and it does not have any tombstones - if cur.count() > 2 && cur.size() > uint64(maxTSMFileSize) && c.FileStore.BlockCount(cur.files[0].Path, 1) == tsdb.DefaultMaxPointsPerBlock && !cur.hasTombstones() { - continue - } - - // See if this generation is orphan'd which would prevent it from being further - // compacted until a final full compaction runs. - if i < len(generations)-1 { - if cur.level() < generations[i+1].level() { - currentGen = append(currentGen, cur) - continue - } - } - - if len(currentGen) == 0 || currentGen.level() == cur.level() { - currentGen = append(currentGen, cur) - continue - } - groups = append(groups, currentGen) - - currentGen = tsmGenerations{} - currentGen = append(currentGen, cur) - } - - if len(currentGen) > 0 { - groups = append(groups, currentGen) - } - - // Only optimize level 4 files since using lower-levels will collide - // with the level planners - var levelGroups []tsmGenerations - for _, cur := range groups { - if cur.level() == 4 { - levelGroups = append(levelGroups, cur) - } - } - - var cGroups []CompactionGroup - for _, group := range levelGroups { - // Skip the group if it's not worthwhile to optimize it - if len(group) < 4 && !group.hasTombstones() { - continue - } - - var cGroup CompactionGroup - for _, gen := range group { - for _, file := range gen.files { - cGroup = append(cGroup, file.Path) - } - } - - cGroups = append(cGroups, cGroup) - } - - if !c.acquire(cGroups) { - return nil, int64(len(cGroups)) - } - - return cGroups, int64(len(cGroups)) -} - -// Plan returns a set of TSM files to rewrite for level 4 or higher. The planning returns -// multiple groups if possible to allow compactions to run concurrently. -func (c *DefaultPlanner) Plan(lastWrite time.Time) ([]CompactionGroup, int64) { - generations := c.findGenerations(true) - - c.mu.RLock() - forceFull := c.forceFull - c.mu.RUnlock() - - // first check if we should be doing a full compaction because nothing has been written in a long time - if forceFull || c.compactFullWriteColdDuration > 0 && time.Since(lastWrite) > c.compactFullWriteColdDuration && len(generations) > 1 { - - // Reset the full schedule if we planned because of it. - if forceFull { - c.mu.Lock() - c.forceFull = false - c.mu.Unlock() - } - - var tsmFiles []string - var genCount int - for i, group := range generations { - var skip bool - - // Skip the file if it's over the max size and contains a full block and it does not have any tombstones - if len(generations) > 2 && group.size() > uint64(maxTSMFileSize) && c.FileStore.BlockCount(group.files[0].Path, 1) == tsdb.DefaultMaxPointsPerBlock && !group.hasTombstones() { - skip = true - } - - // We need to look at the level of the next file because it may need to be combined with this generation - // but won't get picked up on it's own if this generation is skipped. This allows the most recently - // created files to get picked up by the full compaction planner and avoids having a few less optimally - // compressed files. - if i < len(generations)-1 { - if generations[i+1].level() <= 3 { - skip = false - } - } - - if skip { - continue - } - - for _, f := range group.files { - tsmFiles = append(tsmFiles, f.Path) - } - genCount += 1 - } - sort.Strings(tsmFiles) - - // Make sure we have more than 1 file and more than 1 generation - if len(tsmFiles) <= 1 || genCount <= 1 { - return nil, 0 - } - - group := []CompactionGroup{tsmFiles} - if !c.acquire(group) { - return nil, int64(len(group)) - } - return group, int64(len(group)) - } - - // don't plan if nothing has changed in the filestore - if c.lastPlanCheck.After(c.FileStore.LastModified()) && !generations.hasTombstones() { - return nil, 0 - } - - c.lastPlanCheck = time.Now() - - // If there is only one generation, return early to avoid re-compacting the same file - // over and over again. - if len(generations) <= 1 && !generations.hasTombstones() { - return nil, 0 - } - - // Need to find the ending point for level 4 files. They will be the oldest files. We scan - // each generation in descending break once we see a file less than 4. - end := 0 - start := 0 - for i, g := range generations { - if g.level() <= 3 { - break - } - end = i + 1 - } - - // As compactions run, the oldest files get bigger. We don't want to re-compact them during - // this planning if they are maxed out so skip over any we see. - var hasTombstones bool - for i, g := range generations[:end] { - if g.hasTombstones() { - hasTombstones = true - } - - if hasTombstones { - continue - } - - // Skip the file if it's over the max size and contains a full block or the generation is split - // over multiple files. In the latter case, that would mean the data in the file spilled over - // the 2GB limit. - if g.size() > uint64(maxTSMFileSize) && c.FileStore.BlockCount(g.files[0].Path, 1) == tsdb.DefaultMaxPointsPerBlock { - start = i + 1 - } - - // This is an edge case that can happen after multiple compactions run. The files at the beginning - // can become larger faster than ones after them. We want to skip those really big ones and just - // compact the smaller ones until they are closer in size. - if i > 0 { - if g.size()*2 < generations[i-1].size() { - start = i - break - } - } - } - - // step is how may files to compact in a group. We want to clamp it at 4 but also stil - // return groups smaller than 4. - step := 4 - if step > end { - step = end - } - - // slice off the generations that we'll examine - generations = generations[start:end] - - // Loop through the generations in groups of size step and see if we can compact all (or - // some of them as group) - groups := []tsmGenerations{} - for i := 0; i < len(generations); i += step { - var skipGroup bool - startIndex := i - - for j := i; j < i+step && j < len(generations); j++ { - gen := generations[j] - lvl := gen.level() - - // Skip compacting this group if there happens to be any lower level files in the - // middle. These will get picked up by the level compactors. - if lvl <= 3 { - skipGroup = true - break - } - - // Skip the file if it's over the max size and it contains a full block - if gen.size() >= uint64(maxTSMFileSize) && c.FileStore.BlockCount(gen.files[0].Path, 1) == tsdb.DefaultMaxPointsPerBlock && !gen.hasTombstones() { - startIndex++ - continue - } - } - - if skipGroup { - continue - } - - endIndex := i + step - if endIndex > len(generations) { - endIndex = len(generations) - } - if endIndex-startIndex > 0 { - groups = append(groups, generations[startIndex:endIndex]) - } - } - - if len(groups) == 0 { - return nil, 0 - } - - // With the groups, we need to evaluate whether the group as a whole can be compacted - compactable := []tsmGenerations{} - for _, group := range groups { - // if we don't have enough generations to compact, skip it - if len(group) < 4 && !group.hasTombstones() { - continue - } - compactable = append(compactable, group) - } - - // All the files to be compacted must be compacted in order. We need to convert each - // group to the actual set of files in that group to be compacted. - var tsmFiles []CompactionGroup - for _, c := range compactable { - var cGroup CompactionGroup - for _, group := range c { - for _, f := range group.files { - cGroup = append(cGroup, f.Path) - } - } - sort.Strings(cGroup) - tsmFiles = append(tsmFiles, cGroup) - } - - if !c.acquire(tsmFiles) { - return nil, int64(len(tsmFiles)) - } - return tsmFiles, int64(len(tsmFiles)) -} - -// findGenerations groups all the TSM files by generation based -// on their filename, then returns the generations in descending order (newest first). -// If skipInUse is true, tsm files that are part of an existing compaction plan -// are not returned. -func (c *DefaultPlanner) findGenerations(skipInUse bool) tsmGenerations { - c.mu.Lock() - defer c.mu.Unlock() - - last := c.lastFindGenerations - lastGen := c.lastGenerations - - if !last.IsZero() && c.FileStore.LastModified().Equal(last) { - return lastGen - } - - genTime := c.FileStore.LastModified() - tsmStats := c.FileStore.Stats() - generations := make(map[int]*tsmGeneration, len(tsmStats)) - for _, f := range tsmStats { - gen, _, _ := c.ParseFileName(f.Path) - - // Skip any files that are assigned to a current compaction plan - if _, ok := c.filesInUse[f.Path]; skipInUse && ok { - continue - } - - group := generations[gen] - if group == nil { - group = newTsmGeneration(gen, c.ParseFileName) - generations[gen] = group - } - group.files = append(group.files, f) - } - - orderedGenerations := make(tsmGenerations, 0, len(generations)) - for _, g := range generations { - orderedGenerations = append(orderedGenerations, g) - } - if !orderedGenerations.IsSorted() { - sort.Sort(orderedGenerations) - } - - c.lastFindGenerations = genTime - c.lastGenerations = orderedGenerations - - return orderedGenerations -} - -func (c *DefaultPlanner) acquire(groups []CompactionGroup) bool { - c.mu.Lock() - defer c.mu.Unlock() - - // See if the new files are already in use - for _, g := range groups { - for _, f := range g { - if _, ok := c.filesInUse[f]; ok { - return false - } - } - } - - // Mark all the new files in use - for _, g := range groups { - for _, f := range g { - c.filesInUse[f] = struct{}{} - } - } - return true -} - -// Release removes the files reference in each compaction group allowing new plans -// to be able to use them. -func (c *DefaultPlanner) Release(groups []CompactionGroup) { - c.mu.Lock() - defer c.mu.Unlock() - for _, g := range groups { - for _, f := range g { - delete(c.filesInUse, f) - } - } -} - -// Compactor merges multiple TSM files into new files or -// writes a Cache into 1 or more TSM files. -type Compactor struct { - Dir string - Size int - - FileStore interface { - NextGeneration() int - TSMReader(path string) *TSMReader - } - - // RateLimit is the limit for disk writes for all concurrent compactions. - RateLimit limiter.Rate - - formatFileName FormatFileNameFunc - parseFileName ParseFileNameFunc - - mu sync.RWMutex - snapshotsEnabled bool - compactionsEnabled bool - - // lastSnapshotDuration is the amount of time the last snapshot took to complete. - lastSnapshotDuration time.Duration - - snapshotLatencies *latencies - - // The channel to signal that any in progress snapshots should be aborted. - snapshotsInterrupt chan struct{} - // The channel to signal that any in progress level compactions should be aborted. - compactionsInterrupt chan struct{} - - files map[string]struct{} -} - -// NewCompactor returns a new instance of Compactor. -func NewCompactor() *Compactor { - return &Compactor{ - formatFileName: DefaultFormatFileName, - parseFileName: DefaultParseFileName, - } -} - -func (c *Compactor) WithFormatFileNameFunc(formatFileNameFunc FormatFileNameFunc) { - c.formatFileName = formatFileNameFunc -} - -func (c *Compactor) WithParseFileNameFunc(parseFileNameFunc ParseFileNameFunc) { - c.parseFileName = parseFileNameFunc -} - -// Open initializes the Compactor. -func (c *Compactor) Open() { - c.mu.Lock() - defer c.mu.Unlock() - if c.snapshotsEnabled || c.compactionsEnabled { - return - } - - c.snapshotsEnabled = true - c.compactionsEnabled = true - c.snapshotsInterrupt = make(chan struct{}) - c.compactionsInterrupt = make(chan struct{}) - c.snapshotLatencies = &latencies{values: make([]time.Duration, 4)} - - c.files = make(map[string]struct{}) -} - -// Close disables the Compactor. -func (c *Compactor) Close() { - c.mu.Lock() - defer c.mu.Unlock() - if !(c.snapshotsEnabled || c.compactionsEnabled) { - return - } - c.snapshotsEnabled = false - c.compactionsEnabled = false - if c.compactionsInterrupt != nil { - close(c.compactionsInterrupt) - } - if c.snapshotsInterrupt != nil { - close(c.snapshotsInterrupt) - } -} - -// DisableSnapshots disables the compactor from performing snapshots. -func (c *Compactor) DisableSnapshots() { - c.mu.Lock() - c.snapshotsEnabled = false - if c.snapshotsInterrupt != nil { - close(c.snapshotsInterrupt) - c.snapshotsInterrupt = nil - } - c.mu.Unlock() -} - -// EnableSnapshots allows the compactor to perform snapshots. -func (c *Compactor) EnableSnapshots() { - c.mu.Lock() - c.snapshotsEnabled = true - if c.snapshotsInterrupt == nil { - c.snapshotsInterrupt = make(chan struct{}) - } - c.mu.Unlock() -} - -// DisableSnapshots disables the compactor from performing compactions. -func (c *Compactor) DisableCompactions() { - c.mu.Lock() - c.compactionsEnabled = false - if c.compactionsInterrupt != nil { - close(c.compactionsInterrupt) - c.compactionsInterrupt = nil - } - c.mu.Unlock() -} - -// EnableCompactions allows the compactor to perform compactions. -func (c *Compactor) EnableCompactions() { - c.mu.Lock() - c.compactionsEnabled = true - if c.compactionsInterrupt == nil { - c.compactionsInterrupt = make(chan struct{}) - } - c.mu.Unlock() -} - -// WriteSnapshot writes a Cache snapshot to one or more new TSM files. -func (c *Compactor) WriteSnapshot(cache *Cache, logger *zap.Logger) ([]string, error) { - c.mu.RLock() - enabled := c.snapshotsEnabled - intC := c.snapshotsInterrupt - c.mu.RUnlock() - - if !enabled { - return nil, errSnapshotsDisabled - } - - start := time.Now() - card := cache.Count() - - // Enable throttling if we have lower cardinality or snapshots are going fast. - throttle := card < 3e6 && c.snapshotLatencies.avg() < 15*time.Second - - // Write snapshot concurrently if cardinality is relatively high. - concurrency := card / 2e6 - if concurrency < 1 { - concurrency = 1 - } - - // Special case very high cardinality, use max concurrency and don't throttle writes. - if card >= 3e6 { - concurrency = 4 - throttle = false - } - - splits := cache.Split(concurrency) - - type res struct { - files []string - err error - } - - resC := make(chan res, concurrency) - for i := 0; i < concurrency; i++ { - go func(sp *Cache) { - iter := NewCacheKeyIterator(sp, tsdb.DefaultMaxPointsPerBlock, intC) - files, err := c.writeNewFiles(c.FileStore.NextGeneration(), 0, nil, iter, throttle, logger) - resC <- res{files: files, err: err} - - }(splits[i]) - } - - var err error - files := make([]string, 0, concurrency) - for i := 0; i < concurrency; i++ { - result := <-resC - if result.err != nil { - err = result.err - } - files = append(files, result.files...) - } - - dur := time.Since(start).Truncate(time.Second) - - c.mu.Lock() - - // See if we were disabled while writing a snapshot - enabled = c.snapshotsEnabled - c.lastSnapshotDuration = dur - c.snapshotLatencies.add(time.Since(start)) - c.mu.Unlock() - - if !enabled { - return nil, errSnapshotsDisabled - } - - return files, err -} - -// compact writes multiple smaller TSM files into 1 or more larger files. -func (c *Compactor) compact(fast bool, tsmFiles []string, logger *zap.Logger) ([]string, error) { - size := c.Size - if size <= 0 { - size = tsdb.DefaultMaxPointsPerBlock - } - - c.mu.RLock() - intC := c.compactionsInterrupt - c.mu.RUnlock() - - // The new compacted files need to added to the max generation in the - // set. We need to find that max generation as well as the max sequence - // number to ensure we write to the next unique location. - var maxGeneration, maxSequence int - for _, f := range tsmFiles { - gen, seq, err := c.parseFileName(f) - if err != nil { - return nil, err - } - - if gen > maxGeneration { - maxGeneration = gen - maxSequence = seq - } - - if gen == maxGeneration && seq > maxSequence { - maxSequence = seq - } - } - - // For each TSM file, create a TSM reader - var trs []*TSMReader - for _, file := range tsmFiles { - select { - case <-intC: - return nil, errCompactionAborted{} - default: - } - - tr := c.FileStore.TSMReader(file) - if tr == nil { - // This would be a bug if this occurred as tsmFiles passed in should only be - // assigned to one compaction at any one time. A nil tr would mean the file - // doesn't exist. - return nil, errCompactionAborted{fmt.Errorf("bad plan: %s", file)} - } - defer tr.Unref() // inform that we're done with this reader when this method returns. - trs = append(trs, tr) - } - - if len(trs) == 0 { - logger.Debug("No input files") - return nil, nil - } - - tsm, err := NewTSMBatchKeyIterator(size, fast, DefaultMaxSavedErrors, intC, tsmFiles, trs...) - if err != nil { - return nil, err - } - - return c.writeNewFiles(maxGeneration, maxSequence, tsmFiles, tsm, true, logger) -} - -// CompactFull writes multiple smaller TSM files into 1 or more larger files. -func (c *Compactor) CompactFull(tsmFiles []string, logger *zap.Logger) ([]string, error) { - c.mu.RLock() - enabled := c.compactionsEnabled - c.mu.RUnlock() - - if !enabled { - return nil, errCompactionsDisabled - } - - if !c.add(tsmFiles) { - return nil, errCompactionInProgress{} - } - defer c.remove(tsmFiles) - - files, err := c.compact(false, tsmFiles, logger) - - // See if we were disabled while writing a snapshot - c.mu.RLock() - enabled = c.compactionsEnabled - c.mu.RUnlock() - - if !enabled { - if err := c.removeTmpFiles(files); err != nil { - return nil, err - } - return nil, errCompactionsDisabled - } - - return files, err -} - -// CompactFast writes multiple smaller TSM files into 1 or more larger files. -func (c *Compactor) CompactFast(tsmFiles []string, logger *zap.Logger) ([]string, error) { - c.mu.RLock() - enabled := c.compactionsEnabled - c.mu.RUnlock() - - if !enabled { - return nil, errCompactionsDisabled - } - - if !c.add(tsmFiles) { - return nil, errCompactionInProgress{} - } - defer c.remove(tsmFiles) - - files, err := c.compact(true, tsmFiles, logger) - - // See if we were disabled while writing a snapshot - c.mu.RLock() - enabled = c.compactionsEnabled - c.mu.RUnlock() - - if !enabled { - if err := c.removeTmpFiles(files); err != nil { - return nil, err - } - return nil, errCompactionsDisabled - } - - return files, err - -} - -// removeTmpFiles is responsible for cleaning up a compaction that -// was started, but then abandoned before the temporary files were dealt with. -func (c *Compactor) removeTmpFiles(files []string) error { - for _, f := range files { - if err := os.Remove(f); err != nil { - return fmt.Errorf("error removing temp compaction file: %v", err) - } - } - return nil -} - -// writeNewFiles writes from the iterator into new TSM files, rotating -// to a new file once it has reached the max TSM file size. -func (c *Compactor) writeNewFiles(generation, sequence int, src []string, iter KeyIterator, throttle bool, logger *zap.Logger) ([]string, error) { - // These are the new TSM files written - var files []string - - for { - sequence++ - - // New TSM files are written to a temp file and renamed when fully completed. - fileName := filepath.Join(c.Dir, c.formatFileName(generation, sequence)+"."+TSMFileExtension+"."+TmpTSMFileExtension) - logger.Debug("Compacting files", zap.Int("file_count", len(src)), zap.String("output_file", fileName)) - - // Write as much as possible to this file - err := c.write(fileName, iter, throttle, logger) - - // We've hit the max file limit and there is more to write. Create a new file - // and continue. - if err == errMaxFileExceeded || err == ErrMaxBlocksExceeded { - files = append(files, fileName) - logger.Debug("file size or block count exceeded, opening another output file", zap.String("output_file", fileName)) - continue - } else if err == ErrNoValues { - logger.Debug("Dropping empty file", zap.String("output_file", fileName)) - // If the file only contained tombstoned entries, then it would be a 0 length - // file that we can drop. - if err := os.RemoveAll(fileName); err != nil { - return nil, err - } - break - } else if _, ok := err.(errCompactionInProgress); ok { - // Don't clean up the file as another compaction is using it. This should not happen as the - // planner keeps track of which files are assigned to compaction plans now. - return nil, err - } else if err != nil { - // We hit an error and didn't finish the compaction. Abort. - // Remove any tmp files we already completed - // discard later errors to return the first one from the write() call - for _, f := range files { - _ = os.RemoveAll(f) - } - // Remove the temp file - // discard later errors to return the first one from the write() call - _ = os.RemoveAll(fileName) - return nil, err - } - - files = append(files, fileName) - break - } - - return files, nil -} - -func (c *Compactor) write(path string, iter KeyIterator, throttle bool, logger *zap.Logger) (err error) { - fd, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_EXCL, 0666) - if err != nil { - return errCompactionInProgress{err: err} - } - - // syncingWriter ensures that whatever we wrap the above file descriptor in - // it will always be able to be synced by the tsm writer, since it does - // type assertions to attempt to sync. - type syncingWriter interface { - io.Writer - Sync() error - } - - // Create the write for the new TSM file. - var ( - w TSMWriter - limitWriter syncingWriter = fd - ) - - if c.RateLimit != nil && throttle { - limitWriter = limiter.NewWriterWithRate(fd, c.RateLimit) - } - - // Use a disk based TSM buffer if it looks like we might create a big index - // in memory. - if iter.EstimatedIndexSize() > 64*1024*1024 { - w, err = NewTSMWriterWithDiskBuffer(limitWriter) - if err != nil { - return err - } - } else { - w, err = NewTSMWriter(limitWriter) - if err != nil { - return err - } - } - - defer func() { - closeErr := w.Close() - if err == nil { - err = closeErr - } - - // Check for errors where we should not remove the file - _, inProgress := err.(errCompactionInProgress) - maxBlocks := err == ErrMaxBlocksExceeded - maxFileSize := err == errMaxFileExceeded - if inProgress || maxBlocks || maxFileSize { - return - } - - if err != nil { - _ = w.Remove() - } - }() - - lastLogSize := w.Size() - for iter.Next() { - c.mu.RLock() - enabled := c.snapshotsEnabled || c.compactionsEnabled - c.mu.RUnlock() - - if !enabled { - return errCompactionAborted{} - } - // Each call to read returns the next sorted key (or the prior one if there are - // more values to write). The size of values will be less than or equal to our - // chunk size (1000) - key, minTime, maxTime, block, err := iter.Read() - if err != nil { - return err - } - - if minTime > maxTime { - return fmt.Errorf("invalid index entry for block. min=%d, max=%d", minTime, maxTime) - } - - // Write the key and value - if err := w.WriteBlock(key, minTime, maxTime, block); err == ErrMaxBlocksExceeded { - if err := w.WriteIndex(); err != nil { - return err - } - return err - } else if err != nil { - return err - } - - // If we have a max file size configured and we're over it, close out the file - // and return the error. - if w.Size() > maxTSMFileSize { - if err := w.WriteIndex(); err != nil { - return err - } - - return errMaxFileExceeded - } else if (w.Size() - lastLogSize) > logEvery { - logger.Debug("Compaction progress", zap.String("output_file", path), zap.Uint32("size", w.Size())) - lastLogSize = w.Size() - } - } - - // Were there any errors encountered during iteration? - if err := iter.Err(); err != nil { - return err - } - - // We're all done. Close out the file. - if err := w.WriteIndex(); err != nil { - return err - } - logger.Debug("Compaction finished", zap.String("output_file", path), zap.Uint32("size", w.Size())) - return nil -} - -func (c *Compactor) add(files []string) bool { - c.mu.Lock() - defer c.mu.Unlock() - - // See if the new files are already in use - for _, f := range files { - if _, ok := c.files[f]; ok { - return false - } - } - - // Mark all the new files in use - for _, f := range files { - c.files[f] = struct{}{} - } - return true -} - -func (c *Compactor) remove(files []string) { - c.mu.Lock() - defer c.mu.Unlock() - for _, f := range files { - delete(c.files, f) - } -} - -// KeyIterator allows iteration over set of keys and values in sorted order. -type KeyIterator interface { - // Next returns true if there are any values remaining in the iterator. - Next() bool - - // Read returns the key, time range, and raw data for the next block, - // or any error that occurred. - Read() (key []byte, minTime int64, maxTime int64, data []byte, err error) - - // Close closes the iterator. - Close() error - - // Err returns any errors encountered during iteration. - Err() error - - // EstimatedIndexSize returns the estimated size of the index that would - // be required to store all the series and entries in the KeyIterator. - EstimatedIndexSize() int -} -type TSMErrors []error - -func (t TSMErrors) Error() string { - e := []string{} - for _, v := range t { - e = append(e, v.Error()) - } - return strings.Join(e, ", ") -} - -type block struct { - key []byte - minTime, maxTime int64 - typ byte - b []byte - tombstones []TimeRange - - // readMin, readMax are the timestamps range of values have been - // read and encoded from this block. - readMin, readMax int64 -} - -func (b *block) overlapsTimeRange(min, max int64) bool { - return b.minTime <= max && b.maxTime >= min -} - -func (b *block) read() bool { - return b.readMin <= b.minTime && b.readMax >= b.maxTime -} - -func (b *block) markRead(min, max int64) { - if min < b.readMin { - b.readMin = min - } - - if max > b.readMax { - b.readMax = max - } -} - -func (b *block) partiallyRead() bool { - // If readMin and readMax are still the initial values, nothing has been read. - if b.readMin == int64(math.MaxInt64) && b.readMax == int64(math.MinInt64) { - return false - } - return b.readMin != b.minTime || b.readMax != b.maxTime -} - -type blocks []*block - -func (a blocks) Len() int { return len(a) } - -func (a blocks) Less(i, j int) bool { - cmp := bytes.Compare(a[i].key, a[j].key) - if cmp == 0 { - return a[i].minTime < a[j].minTime && a[i].maxTime < a[j].minTime - } - return cmp < 0 -} - -func (a blocks) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// tsmBatchKeyIterator implements the KeyIterator for set of TSMReaders. Iteration produces -// keys in sorted order and the values between the keys sorted and deduped. If any of -// the readers have associated tombstone entries, they are returned as part of iteration. -type tsmBatchKeyIterator struct { - // readers is the set of readers it produce a sorted key run with - readers []*TSMReader - - // values is the temporary buffers for each key that is returned by a reader - values map[string][]Value - - // pos is the current key position within the corresponding readers slice. A value of - // pos[0] = 1, means the reader[0] is currently at key 1 in its ordered index. - pos []int - - // errs is any error we received while iterating values. - errs TSMErrors - - // indicates whether the iterator should choose a faster merging strategy over a more - // optimally compressed one. If fast is true, multiple blocks will just be added as is - // and not combined. In some cases, a slower path will need to be utilized even when - // fast is true to prevent overlapping blocks of time for the same key. - // If false, the blocks will be decoded and duplicated (if needed) and - // then chunked into the maximally sized blocks. - fast bool - - // size is the maximum number of values to encode in a single block - size int - - // key is the current key lowest key across all readers that has not be fully exhausted - // of values. - key []byte - typ byte - - // tsmFiles are the string names of the files for use in tracking errors, ordered the same - // as iterators and buf - tsmFiles []string - // currentTsm is the current TSM file being iterated over - currentTsm string - - iterators []*BlockIterator - blocks blocks - - buf []blocks - - // mergeValues are decoded blocks that have been combined - mergedFloatValues *tsdb.FloatArray - mergedIntegerValues *tsdb.IntegerArray - mergedUnsignedValues *tsdb.UnsignedArray - mergedBooleanValues *tsdb.BooleanArray - mergedStringValues *tsdb.StringArray - - // merged are encoded blocks that have been combined or used as is - // without decode - merged blocks - interrupt chan struct{} - - // maxErrors is the maximum number of errors to store before discarding. - maxErrors int - // overflowErrors is the number of errors we have ignored. - overflowErrors int -} - -func (t *tsmBatchKeyIterator) AppendError(err error) bool { - if t.maxErrors > len(t.errs) { - t.errs = append(t.errs, err) - // Was the error stored? - return true - } else { - // Was the error dropped - t.overflowErrors++ - return false - } -} - -// NewTSMBatchKeyIterator returns a new TSM key iterator from readers. -// size indicates the maximum number of values to encode in a single block. -func NewTSMBatchKeyIterator(size int, fast bool, maxErrors int, interrupt chan struct{}, tsmFiles []string, readers ...*TSMReader) (KeyIterator, error) { - var iter []*BlockIterator - for _, r := range readers { - iter = append(iter, r.BlockIterator()) - } - - return &tsmBatchKeyIterator{ - readers: readers, - values: map[string][]Value{}, - pos: make([]int, len(readers)), - size: size, - iterators: iter, - fast: fast, - tsmFiles: tsmFiles, - buf: make([]blocks, len(iter)), - mergedFloatValues: &tsdb.FloatArray{}, - mergedIntegerValues: &tsdb.IntegerArray{}, - mergedUnsignedValues: &tsdb.UnsignedArray{}, - mergedBooleanValues: &tsdb.BooleanArray{}, - mergedStringValues: &tsdb.StringArray{}, - interrupt: interrupt, - maxErrors: maxErrors, - }, nil -} - -func (k *tsmBatchKeyIterator) hasMergedValues() bool { - return k.mergedFloatValues.Len() > 0 || - k.mergedIntegerValues.Len() > 0 || - k.mergedUnsignedValues.Len() > 0 || - k.mergedStringValues.Len() > 0 || - k.mergedBooleanValues.Len() > 0 -} - -func (k *tsmBatchKeyIterator) EstimatedIndexSize() int { - var size uint32 - for _, r := range k.readers { - size += r.IndexSize() - } - return int(size) / len(k.readers) -} - -// Next returns true if there are any values remaining in the iterator. -func (k *tsmBatchKeyIterator) Next() bool { -RETRY: - // Any merged blocks pending? - if len(k.merged) > 0 { - k.merged = k.merged[1:] - if len(k.merged) > 0 { - return true - } - } - - // Any merged values pending? - if k.hasMergedValues() { - k.merge() - if len(k.merged) > 0 || k.hasMergedValues() { - return true - } - } - - // If we still have blocks from the last read, merge them - if len(k.blocks) > 0 { - k.merge() - if len(k.merged) > 0 || k.hasMergedValues() { - return true - } - } - - // Read the next block from each TSM iterator - for i, v := range k.buf { - if len(v) != 0 { - continue - } - - iter := k.iterators[i] - k.currentTsm = k.tsmFiles[i] - if iter.Next() { - key, minTime, maxTime, typ, _, b, err := iter.Read() - if err != nil { - k.AppendError(errBlockRead{k.currentTsm, err}) - } - - // This block may have ranges of time removed from it that would - // reduce the block min and max time. - tombstones := iter.r.TombstoneRange(key) - - var blk *block - if cap(k.buf[i]) > len(k.buf[i]) { - k.buf[i] = k.buf[i][:len(k.buf[i])+1] - blk = k.buf[i][len(k.buf[i])-1] - if blk == nil { - blk = &block{} - k.buf[i][len(k.buf[i])-1] = blk - } - } else { - blk = &block{} - k.buf[i] = append(k.buf[i], blk) - } - blk.minTime = minTime - blk.maxTime = maxTime - blk.key = key - blk.typ = typ - blk.b = b - blk.tombstones = tombstones - blk.readMin = math.MaxInt64 - blk.readMax = math.MinInt64 - - blockKey := key - for bytes.Equal(iter.PeekNext(), blockKey) { - iter.Next() - key, minTime, maxTime, typ, _, b, err := iter.Read() - if err != nil { - k.AppendError(errBlockRead{k.currentTsm, err}) - } - - tombstones := iter.r.TombstoneRange(key) - - var blk *block - if cap(k.buf[i]) > len(k.buf[i]) { - k.buf[i] = k.buf[i][:len(k.buf[i])+1] - blk = k.buf[i][len(k.buf[i])-1] - if blk == nil { - blk = &block{} - k.buf[i][len(k.buf[i])-1] = blk - } - } else { - blk = &block{} - k.buf[i] = append(k.buf[i], blk) - } - - blk.minTime = minTime - blk.maxTime = maxTime - blk.key = key - blk.typ = typ - blk.b = b - blk.tombstones = tombstones - blk.readMin = math.MaxInt64 - blk.readMax = math.MinInt64 - } - } - - if iter.Err() != nil { - k.AppendError(errBlockRead{k.currentTsm, iter.Err()}) - } - } - - // Each reader could have a different key that it's currently at, need to find - // the next smallest one to keep the sort ordering. - var minKey []byte - var minType byte - for _, b := range k.buf { - // block could be nil if the iterator has been exhausted for that file - if len(b) == 0 { - continue - } - if len(minKey) == 0 || bytes.Compare(b[0].key, minKey) < 0 { - minKey = b[0].key - minType = b[0].typ - } - } - k.key = minKey - k.typ = minType - - // Now we need to find all blocks that match the min key so we can combine and dedupe - // the blocks if necessary - for i, b := range k.buf { - if len(b) == 0 { - continue - } - if bytes.Equal(b[0].key, k.key) { - k.blocks = append(k.blocks, b...) - k.buf[i] = k.buf[i][:0] - } - } - - if len(k.blocks) == 0 { - return false - } - - k.merge() - - // After merging all the values for this key, we might not have any. (e.g. they were all deleted - // through many tombstones). In this case, move on to the next key instead of ending iteration. - if len(k.merged) == 0 { - goto RETRY - } - - return len(k.merged) > 0 -} - -// merge combines the next set of blocks into merged blocks. -func (k *tsmBatchKeyIterator) merge() { - switch k.typ { - case BlockFloat64: - k.mergeFloat() - case BlockInteger: - k.mergeInteger() - case BlockUnsigned: - k.mergeUnsigned() - case BlockBoolean: - k.mergeBoolean() - case BlockString: - k.mergeString() - default: - k.AppendError(errBlockRead{k.currentTsm, fmt.Errorf("unknown block type: %v", k.typ)}) - } -} - -func (k *tsmBatchKeyIterator) handleEncodeError(err error, typ string) { - k.AppendError(errBlockRead{k.currentTsm, fmt.Errorf("encode error: unable to compress block type %s for key '%s': %v", typ, k.key, err)}) -} - -func (k *tsmBatchKeyIterator) handleDecodeError(err error, typ string) { - k.AppendError(errBlockRead{k.currentTsm, fmt.Errorf("decode error: unable to decompress block type %s for key '%s': %v", typ, k.key, err)}) -} - -func (k *tsmBatchKeyIterator) Read() ([]byte, int64, int64, []byte, error) { - // See if compactions were disabled while we were running. - select { - case <-k.interrupt: - return nil, 0, 0, nil, errCompactionAborted{} - default: - } - - if len(k.merged) == 0 { - return nil, 0, 0, nil, k.Err() - } - - block := k.merged[0] - return block.key, block.minTime, block.maxTime, block.b, k.Err() -} - -func (k *tsmBatchKeyIterator) Close() error { - k.values = nil - k.pos = nil - k.iterators = nil - for _, r := range k.readers { - if err := r.Close(); err != nil { - return err - } - } - return nil -} - -// Error returns any errors encountered during iteration. -func (k *tsmBatchKeyIterator) Err() error { - if len(k.errs) == 0 { - return nil - } - // Copy the errors before appending the dropped error count - var errs TSMErrors - errs = make([]error, 0, len(k.errs)+1) - errs = append(errs, k.errs...) - errs = append(errs, fmt.Errorf("additional errors dropped: %d", k.overflowErrors)) - return errs -} - -type cacheKeyIterator struct { - cache *Cache - size int - order [][]byte - - i int - blocks [][]cacheBlock - ready []chan struct{} - interrupt chan struct{} - err error -} - -type cacheBlock struct { - k []byte - minTime, maxTime int64 - b []byte - err error -} - -// NewCacheKeyIterator returns a new KeyIterator from a Cache. -func NewCacheKeyIterator(cache *Cache, size int, interrupt chan struct{}) KeyIterator { - keys := cache.Keys() - - chans := make([]chan struct{}, len(keys)) - for i := 0; i < len(keys); i++ { - chans[i] = make(chan struct{}, 1) - } - - cki := &cacheKeyIterator{ - i: -1, - size: size, - cache: cache, - order: keys, - ready: chans, - blocks: make([][]cacheBlock, len(keys)), - interrupt: interrupt, - } - go cki.encode() - return cki -} - -func (c *cacheKeyIterator) EstimatedIndexSize() int { - var n int - for _, v := range c.order { - n += len(v) - } - return n -} - -func (c *cacheKeyIterator) encode() { - concurrency := runtime.GOMAXPROCS(0) - n := len(c.ready) - - // Divide the keyset across each CPU - chunkSize := 1 - idx := uint64(0) - - for i := 0; i < concurrency; i++ { - // Run one goroutine per CPU and encode a section of the key space concurrently - go func() { - tenc := getTimeEncoder(tsdb.DefaultMaxPointsPerBlock) - fenc := getFloatEncoder(tsdb.DefaultMaxPointsPerBlock) - benc := getBooleanEncoder(tsdb.DefaultMaxPointsPerBlock) - uenc := getUnsignedEncoder(tsdb.DefaultMaxPointsPerBlock) - senc := getStringEncoder(tsdb.DefaultMaxPointsPerBlock) - ienc := getIntegerEncoder(tsdb.DefaultMaxPointsPerBlock) - - defer putTimeEncoder(tenc) - defer putFloatEncoder(fenc) - defer putBooleanEncoder(benc) - defer putUnsignedEncoder(uenc) - defer putStringEncoder(senc) - defer putIntegerEncoder(ienc) - - for { - i := int(atomic.AddUint64(&idx, uint64(chunkSize))) - chunkSize - - if i >= n { - break - } - - key := c.order[i] - values := c.cache.values(key) - - for len(values) > 0 { - - end := len(values) - if end > c.size { - end = c.size - } - - minTime, maxTime := values[0].UnixNano(), values[end-1].UnixNano() - var b []byte - var err error - - switch values[0].(type) { - case FloatValue: - b, err = encodeFloatBlockUsing(nil, values[:end], tenc, fenc) - case IntegerValue: - b, err = encodeIntegerBlockUsing(nil, values[:end], tenc, ienc) - case UnsignedValue: - b, err = encodeUnsignedBlockUsing(nil, values[:end], tenc, uenc) - case BooleanValue: - b, err = encodeBooleanBlockUsing(nil, values[:end], tenc, benc) - case StringValue: - b, err = encodeStringBlockUsing(nil, values[:end], tenc, senc) - default: - b, err = Values(values[:end]).Encode(nil) - } - - values = values[end:] - - c.blocks[i] = append(c.blocks[i], cacheBlock{ - k: key, - minTime: minTime, - maxTime: maxTime, - b: b, - err: err, - }) - - if err != nil { - c.err = err - } - } - // Notify this key is fully encoded - c.ready[i] <- struct{}{} - } - }() - } -} - -func (c *cacheKeyIterator) Next() bool { - if c.i >= 0 && c.i < len(c.ready) && len(c.blocks[c.i]) > 0 { - c.blocks[c.i] = c.blocks[c.i][1:] - if len(c.blocks[c.i]) > 0 { - return true - } - } - c.i++ - - if c.i >= len(c.ready) { - return false - } - - <-c.ready[c.i] - return true -} - -func (c *cacheKeyIterator) Read() ([]byte, int64, int64, []byte, error) { - // See if snapshot compactions were disabled while we were running. - select { - case <-c.interrupt: - c.err = errCompactionAborted{} - return nil, 0, 0, nil, c.err - default: - } - - blk := c.blocks[c.i][0] - return blk.k, blk.minTime, blk.maxTime, blk.b, blk.err -} - -func (c *cacheKeyIterator) Close() error { - return nil -} - -func (c *cacheKeyIterator) Err() error { - return c.err -} - -type tsmGenerations []*tsmGeneration - -func (a tsmGenerations) Len() int { return len(a) } -func (a tsmGenerations) Less(i, j int) bool { return a[i].id < a[j].id } -func (a tsmGenerations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a tsmGenerations) hasTombstones() bool { - for _, g := range a { - if g.hasTombstones() { - return true - } - } - return false -} - -func (a tsmGenerations) level() int { - var level int - for _, g := range a { - lev := g.level() - if lev > level { - level = lev - } - } - return level -} - -func (a tsmGenerations) chunk(size int) []tsmGenerations { - var chunks []tsmGenerations - for len(a) > 0 { - if len(a) >= size { - chunks = append(chunks, a[:size]) - a = a[size:] - } else { - chunks = append(chunks, a) - a = a[len(a):] - } - } - return chunks -} - -func (a tsmGenerations) IsSorted() bool { - if len(a) == 1 { - return true - } - - for i := 1; i < len(a); i++ { - if a.Less(i, i-1) { - return false - } - } - return true -} - -type latencies struct { - i int - values []time.Duration -} - -func (l *latencies) add(t time.Duration) { - l.values[l.i%len(l.values)] = t - l.i++ -} - -func (l *latencies) avg() time.Duration { - var n int64 - var sum time.Duration - for _, v := range l.values { - if v == 0 { - continue - } - sum += v - n++ - } - - if n > 0 { - return time.Duration(int64(sum) / n) - } - return time.Duration(0) -} diff --git a/tsdb/engine/tsm1/compact_test.go b/tsdb/engine/tsm1/compact_test.go deleted file mode 100644 index 4c711fa1f72..00000000000 --- a/tsdb/engine/tsm1/compact_test.go +++ /dev/null @@ -1,3093 +0,0 @@ -package tsm1_test - -import ( - "fmt" - "math" - "os" - "path/filepath" - "sort" - "strings" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "go.uber.org/zap" -) - -// Tests compacting a Cache snapshot into a single TSM file -func TestCompactor_Snapshot(t *testing.T) { - dir := t.TempDir() - - v1 := tsm1.NewValue(1, float64(1)) - v2 := tsm1.NewValue(1, float64(1)) - v3 := tsm1.NewValue(2, float64(2)) - - points1 := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {v1}, - "cpu,host=B#!~#value": {v2, v3}, - } - - c := tsm1.NewCache(0, tsdb.EngineTags{}) - for k, v := range points1 { - if err := c.Write([]byte(k), v); err != nil { - t.Fatalf("failed to write key foo to cache: %s", err.Error()) - } - } - - fs := &fakeFileStore{} - t.Cleanup(func() { fs.Close() }) - - compactor := tsm1.NewCompactor() - compactor.Dir = dir - compactor.FileStore = fs - - files, err := compactor.WriteSnapshot(c, zap.NewNop()) - if err == nil { - t.Fatalf("expected error writing snapshot: %v", err) - } - if len(files) > 0 { - t.Fatalf("no files should be compacted: got %v", len(files)) - - } - - compactor.Open() - - files, err = compactor.WriteSnapshot(c, zap.NewNop()) - if err != nil { - t.Fatalf("unexpected error writing snapshot: %v", err) - } - - if got, exp := len(files), 1; got != exp { - t.Fatalf("files length mismatch: got %v, exp %v", got, exp) - } - - r := MustOpenTSMReader(files[0]) - t.Cleanup(func() { r.Close() }) - - if got, exp := r.KeyCount(), 2; got != exp { - t.Fatalf("keys length mismatch: got %v, exp %v", got, exp) - } - - var data = []struct { - key string - points []tsm1.Value - }{ - {"cpu,host=A#!~#value", []tsm1.Value{v1}}, - {"cpu,host=B#!~#value", []tsm1.Value{v2, v3}}, - } - - for _, p := range data { - values, err := r.ReadAll([]byte(p.key)) - if err != nil { - t.Fatalf("unexpected error reading: %v", err) - } - - if got, exp := len(values), len(p.points); got != exp { - t.Fatalf("values length mismatch: got %v, exp %v", got, exp) - } - - for i, point := range p.points { - assertValueEqual(t, values[i], point) - } - } -} - -func TestCompactor_CompactFullLastTimestamp(t *testing.T) { - dir := t.TempDir() - - var vals tsm1.Values - ts := int64(1e9) - for i := 0; i < 120; i++ { - vals = append(vals, tsm1.NewIntegerValue(ts, 1)) - ts += 1e9 - } - // 121st timestamp skips a second - ts += 1e9 - vals = append(vals, tsm1.NewIntegerValue(ts, 1)) - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": vals[:100], - } - f1 := MustWriteTSM(t, dir, 1, writes) - - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": vals[100:], - } - f2 := MustWriteTSM(t, dir, 2, writes) - - fs := &fakeFileStore{} - t.Cleanup(func() { fs.Close() }) - - compactor := tsm1.NewCompactor() - compactor.Dir = dir - compactor.FileStore = fs - compactor.Open() - - files, err := compactor.CompactFull([]string{f1, f2}, zap.NewNop()) - if err != nil { - t.Fatalf("unexpected error writing snapshot: %#v", err) - } - - r := MustOpenTSMReader(files[0]) - t.Cleanup(func() { r.Close() }) - - entries := r.Entries([]byte("cpu,host=A#!~#value")) - _, b, err := r.ReadBytes(&entries[0], nil) - if err != nil { - t.Fatalf("ReadBytes: unexpected error %v", err) - } - var a tsdb.IntegerArray - err = tsm1.DecodeIntegerArrayBlock(b, &a) - if err != nil { - t.Fatalf("DecodeIntegerArrayBlock: unexpected error %v", err) - } - - if a.MaxTime() != entries[0].MaxTime { - t.Fatalf("expected MaxTime == a.MaxTime()") - } -} - -// Ensures that a compaction will properly merge multiple TSM files -func TestCompactor_CompactFull(t *testing.T) { - dir := t.TempDir() - - // write 3 TSM files with different data and one new point - a1 := tsm1.NewValue(1, 1.1) - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a1}, - } - f1 := MustWriteTSM(t, dir, 1, writes) - - a2 := tsm1.NewValue(2, 1.2) - b1 := tsm1.NewValue(1, 2.1) - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a2}, - "cpu,host=B#!~#value": {b1}, - } - f2 := MustWriteTSM(t, dir, 2, writes) - - a3 := tsm1.NewValue(1, 1.3) - c1 := tsm1.NewValue(1, 3.1) - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a3}, - "cpu,host=C#!~#value": {c1}, - } - f3 := MustWriteTSM(t, dir, 3, writes) - - fs := &fakeFileStore{} - t.Cleanup(func() { fs.Close() }) - compactor := tsm1.NewCompactor() - compactor.Dir = dir - compactor.FileStore = fs - - files, err := compactor.CompactFull([]string{f1, f2, f3}, zap.NewNop()) - if err == nil { - t.Fatalf("expected error writing snapshot: %v", err) - } - if len(files) > 0 { - t.Fatalf("no files should be compacted: got %v", len(files)) - - } - - compactor.Open() - - files, err = compactor.CompactFull([]string{f1, f2, f3}, zap.NewNop()) - if err != nil { - t.Fatalf("unexpected error writing snapshot: %v", err) - } - - if got, exp := len(files), 1; got != exp { - t.Fatalf("files length mismatch: got %v, exp %v", got, exp) - } - - expGen, expSeq, err := tsm1.DefaultParseFileName(f3) - if err != nil { - t.Fatalf("unexpected error parsing file name: %v", err) - } - expSeq = expSeq + 1 - - gotGen, gotSeq, err := tsm1.DefaultParseFileName(files[0]) - if err != nil { - t.Fatalf("unexpected error parsing file name: %v", err) - } - - if gotGen != expGen { - t.Fatalf("wrong generation for new file: got %v, exp %v", gotGen, expGen) - } - - if gotSeq != expSeq { - t.Fatalf("wrong sequence for new file: got %v, exp %v", gotSeq, expSeq) - } - - r := MustOpenTSMReader(files[0]) - t.Cleanup(func() { r.Close() }) - - if got, exp := r.KeyCount(), 3; got != exp { - t.Fatalf("keys length mismatch: got %v, exp %v", got, exp) - } - - var data = []struct { - key string - points []tsm1.Value - }{ - {"cpu,host=A#!~#value", []tsm1.Value{a3, a2}}, - {"cpu,host=B#!~#value", []tsm1.Value{b1}}, - {"cpu,host=C#!~#value", []tsm1.Value{c1}}, - } - - for _, p := range data { - values, err := r.ReadAll([]byte(p.key)) - if err != nil { - t.Fatalf("unexpected error reading: %v", err) - } - - if got, exp := len(values), len(p.points); got != exp { - t.Fatalf("values length mismatch %s: got %v, exp %v", p.key, got, exp) - } - - for i, point := range p.points { - assertValueEqual(t, values[i], point) - } - } -} - -// Ensures that a compaction will properly merge multiple TSM files -func TestCompactor_DecodeError(t *testing.T) { - dir := t.TempDir() - - // write 3 TSM files with different data and one new point - a1 := tsm1.NewValue(1, 1.1) - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a1}, - } - f1 := MustWriteTSM(t, dir, 1, writes) - - a2 := tsm1.NewValue(2, 1.2) - b1 := tsm1.NewValue(1, 2.1) - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a2}, - "cpu,host=B#!~#value": {b1}, - } - f2 := MustWriteTSM(t, dir, 2, writes) - - a3 := tsm1.NewValue(1, 1.3) - c1 := tsm1.NewValue(1, 3.1) - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a3}, - "cpu,host=C#!~#value": {c1}, - } - f3 := MustWriteTSM(t, dir, 3, writes) - f, err := os.OpenFile(f3, os.O_RDWR, os.ModePerm) - if err != nil { - panic(err) - } - f.WriteAt([]byte("ffff"), 10) // skip over header - f.Close() - - fs := &fakeFileStore{} - defer fs.Close() - compactor := tsm1.NewCompactor() - compactor.Dir = dir - compactor.FileStore = fs - - files, err := compactor.CompactFull([]string{f1, f2, f3}, zap.NewNop()) - if err == nil { - t.Fatalf("expected error writing snapshot: %v", err) - } - if len(files) > 0 { - t.Fatalf("no files should be compacted: got %v", len(files)) - - } - - compactor.Open() - - if _, err = compactor.CompactFull([]string{f1, f2, f3}, zap.NewNop()); err == nil || !strings.Contains(err.Error(), "decode error: unable to decompress block type float for key 'cpu,host=A#!~#value': unpackBlock: not enough data for timestamp") { - t.Fatalf("expected error writing snapshot: %v", err) - } -} - -// Ensures that a compaction will properly merge multiple TSM files -func TestCompactor_Compact_OverlappingBlocks(t *testing.T) { - dir := t.TempDir() - - // write 3 TSM files with different data and one new point - a1 := tsm1.NewValue(4, 1.1) - a2 := tsm1.NewValue(5, 1.1) - a3 := tsm1.NewValue(7, 1.1) - - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a1, a2, a3}, - } - f1 := MustWriteTSM(t, dir, 1, writes) - - c1 := tsm1.NewValue(3, 1.2) - c2 := tsm1.NewValue(8, 1.2) - c3 := tsm1.NewValue(9, 1.2) - - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {c1, c2, c3}, - } - f3 := MustWriteTSM(t, dir, 3, writes) - - fs := &fakeFileStore{} - t.Cleanup(func() { fs.Close() }) - compactor := tsm1.NewCompactor() - compactor.Dir = dir - compactor.FileStore = fs - compactor.Size = 2 - - compactor.Open() - - files, err := compactor.CompactFast([]string{f1, f3}, zap.NewNop()) - if err != nil { - t.Fatalf("unexpected error writing snapshot: %v", err) - } - - if got, exp := len(files), 1; got != exp { - t.Fatalf("files length mismatch: got %v, exp %v", got, exp) - } - - r := MustOpenTSMReader(files[0]) - t.Cleanup(func() { r.Close() }) - - if got, exp := r.KeyCount(), 1; got != exp { - t.Fatalf("keys length mismatch: got %v, exp %v", got, exp) - } - - var data = []struct { - key string - points []tsm1.Value - }{ - {"cpu,host=A#!~#value", []tsm1.Value{c1, a1, a2, a3, c2, c3}}, - } - - for _, p := range data { - values, err := r.ReadAll([]byte(p.key)) - if err != nil { - t.Fatalf("unexpected error reading: %v", err) - } - - if got, exp := len(values), len(p.points); got != exp { - t.Fatalf("values length mismatch %s: got %v, exp %v", p.key, got, exp) - } - - for i, point := range p.points { - assertValueEqual(t, values[i], point) - } - } -} - -// Ensures that a compaction will properly merge multiple TSM files -func TestCompactor_Compact_OverlappingBlocksMultiple(t *testing.T) { - dir := t.TempDir() - - // write 3 TSM files with different data and one new point - a1 := tsm1.NewValue(4, 1.1) - a2 := tsm1.NewValue(5, 1.1) - a3 := tsm1.NewValue(7, 1.1) - - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a1, a2, a3}, - } - f1 := MustWriteTSM(t, dir, 1, writes) - - b1 := tsm1.NewValue(1, 1.2) - b2 := tsm1.NewValue(2, 1.2) - b3 := tsm1.NewValue(6, 1.2) - - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {b1, b2, b3}, - } - f2 := MustWriteTSM(t, dir, 2, writes) - - c1 := tsm1.NewValue(3, 1.2) - c2 := tsm1.NewValue(8, 1.2) - c3 := tsm1.NewValue(9, 1.2) - - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {c1, c2, c3}, - } - f3 := MustWriteTSM(t, dir, 3, writes) - - fs := &fakeFileStore{} - t.Cleanup(func() { fs.Close() }) - compactor := tsm1.NewCompactor() - compactor.Dir = dir - compactor.FileStore = fs - compactor.Size = 2 - - compactor.Open() - - files, err := compactor.CompactFast([]string{f1, f2, f3}, zap.NewNop()) - if err != nil { - t.Fatalf("unexpected error writing snapshot: %v", err) - } - - if got, exp := len(files), 1; got != exp { - t.Fatalf("files length mismatch: got %v, exp %v", got, exp) - } - - r := MustOpenTSMReader(files[0]) - t.Cleanup(func() { r.Close() }) - - if got, exp := r.KeyCount(), 1; got != exp { - t.Fatalf("keys length mismatch: got %v, exp %v", got, exp) - } - - var data = []struct { - key string - points []tsm1.Value - }{ - {"cpu,host=A#!~#value", []tsm1.Value{b1, b2, c1, a1, a2, b3, a3, c2, c3}}, - } - - for _, p := range data { - values, err := r.ReadAll([]byte(p.key)) - if err != nil { - t.Fatalf("unexpected error reading: %v", err) - } - - if got, exp := len(values), len(p.points); got != exp { - t.Fatalf("values length mismatch %s: got %v, exp %v", p.key, got, exp) - } - - for i, point := range p.points { - assertValueEqual(t, values[i], point) - } - } -} - -func TestCompactor_Compact_UnsortedBlocks(t *testing.T) { - dir := t.TempDir() - - // write 2 TSM files with different data and one new point - a1 := tsm1.NewValue(4, 1.1) - a2 := tsm1.NewValue(5, 1.1) - a3 := tsm1.NewValue(6, 1.1) - - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a1, a2, a3}, - } - f1 := MustWriteTSM(t, dir, 1, writes) - - b1 := tsm1.NewValue(1, 1.2) - b2 := tsm1.NewValue(2, 1.2) - b3 := tsm1.NewValue(3, 1.2) - - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {b1, b2, b3}, - } - f2 := MustWriteTSM(t, dir, 2, writes) - - fs := &fakeFileStore{} - t.Cleanup(func() { fs.Close() }) - compactor := tsm1.NewCompactor() - compactor.Dir = dir - compactor.FileStore = fs - compactor.Size = 2 - - compactor.Open() - - files, err := compactor.CompactFast([]string{f1, f2}, zap.NewNop()) - if err != nil { - t.Fatalf("unexpected error writing snapshot: %v", err) - } - - if got, exp := len(files), 1; got != exp { - t.Fatalf("files length mismatch: got %v, exp %v", got, exp) - } - - r := MustOpenTSMReader(files[0]) - t.Cleanup(func() { r.Close() }) - - if got, exp := r.KeyCount(), 1; got != exp { - t.Fatalf("keys length mismatch: got %v, exp %v", got, exp) - } - - var data = []struct { - key string - points []tsm1.Value - }{ - {"cpu,host=A#!~#value", []tsm1.Value{b1, b2, b3, a1, a2, a3}}, - } - - for _, p := range data { - values, err := r.ReadAll([]byte(p.key)) - if err != nil { - t.Fatalf("unexpected error reading: %v", err) - } - - if got, exp := len(values), len(p.points); got != exp { - t.Fatalf("values length mismatch %s: got %v, exp %v", p.key, got, exp) - } - - for i, point := range p.points { - assertValueEqual(t, values[i], point) - } - } -} - -func TestCompactor_Compact_UnsortedBlocksOverlapping(t *testing.T) { - dir := t.TempDir() - - // write 3 TSM files where two blocks are overlapping and with unsorted order - a1 := tsm1.NewValue(1, 1.1) - a2 := tsm1.NewValue(2, 1.1) - - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a1, a2}, - } - f1 := MustWriteTSM(t, dir, 1, writes) - - b1 := tsm1.NewValue(3, 1.2) - b2 := tsm1.NewValue(4, 1.2) - - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {b1, b2}, - } - f2 := MustWriteTSM(t, dir, 2, writes) - - c1 := tsm1.NewValue(1, 1.1) - c2 := tsm1.NewValue(2, 1.1) - - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {c1, c2}, - } - f3 := MustWriteTSM(t, dir, 3, writes) - - fs := &fakeFileStore{} - t.Cleanup(func() { fs.Close() }) - compactor := tsm1.NewCompactor() - compactor.Dir = dir - compactor.FileStore = fs - compactor.Size = 2 - - compactor.Open() - - files, err := compactor.CompactFast([]string{f1, f2, f3}, zap.NewNop()) - if err != nil { - t.Fatalf("unexpected error writing snapshot: %v", err) - } - - if got, exp := len(files), 1; got != exp { - t.Fatalf("files length mismatch: got %v, exp %v", got, exp) - } - - r := MustOpenTSMReader(files[0]) - t.Cleanup(func() { r.Close() }) - - if got, exp := r.KeyCount(), 1; got != exp { - t.Fatalf("keys length mismatch: got %v, exp %v", got, exp) - } - - var data = []struct { - key string - points []tsm1.Value - }{ - {"cpu,host=A#!~#value", []tsm1.Value{a1, a2, b1, b2}}, - } - - for _, p := range data { - values, err := r.ReadAll([]byte(p.key)) - if err != nil { - t.Fatalf("unexpected error reading: %v", err) - } - - if got, exp := len(values), len(p.points); got != exp { - t.Fatalf("values length mismatch %s: got %v, exp %v", p.key, got, exp) - } - - for i, point := range p.points { - assertValueEqual(t, values[i], point) - } - } -} - -// Ensures that a compaction will properly merge multiple TSM files -func TestCompactor_CompactFull_SkipFullBlocks(t *testing.T) { - dir := t.TempDir() - - // write 3 TSM files with different data and one new point - a1 := tsm1.NewValue(1, 1.1) - a2 := tsm1.NewValue(2, 1.2) - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a1, a2}, - } - f1 := MustWriteTSM(t, dir, 1, writes) - - a3 := tsm1.NewValue(3, 1.3) - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a3}, - } - f2 := MustWriteTSM(t, dir, 2, writes) - - a4 := tsm1.NewValue(4, 1.4) - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a4}, - } - f3 := MustWriteTSM(t, dir, 3, writes) - - fs := &fakeFileStore{} - t.Cleanup(func() { fs.Close() }) - compactor := tsm1.NewCompactor() - compactor.Dir = dir - compactor.FileStore = fs - compactor.Size = 2 - compactor.Open() - - files, err := compactor.CompactFull([]string{f1, f2, f3}, zap.NewNop()) - if err != nil { - t.Fatalf("unexpected error writing snapshot: %v", err) - } - - if got, exp := len(files), 1; got != exp { - t.Fatalf("files length mismatch: got %v, exp %v", got, exp) - } - - expGen, expSeq, err := tsm1.DefaultParseFileName(f3) - if err != nil { - t.Fatalf("unexpected error parsing file name: %v", err) - } - expSeq = expSeq + 1 - - gotGen, gotSeq, err := tsm1.DefaultParseFileName(files[0]) - if err != nil { - t.Fatalf("unexpected error parsing file name: %v", err) - } - - if gotGen != expGen { - t.Fatalf("wrong generation for new file: got %v, exp %v", gotGen, expGen) - } - - if gotSeq != expSeq { - t.Fatalf("wrong sequence for new file: got %v, exp %v", gotSeq, expSeq) - } - - r := MustOpenTSMReader(files[0]) - t.Cleanup(func() { r.Close() }) - - if got, exp := r.KeyCount(), 1; got != exp { - t.Fatalf("keys length mismatch: got %v, exp %v", got, exp) - } - - var data = []struct { - key string - points []tsm1.Value - }{ - {"cpu,host=A#!~#value", []tsm1.Value{a1, a2, a3, a4}}, - } - - for _, p := range data { - values, err := r.ReadAll([]byte(p.key)) - if err != nil { - t.Fatalf("unexpected error reading: %v", err) - } - - if got, exp := len(values), len(p.points); got != exp { - t.Fatalf("values length mismatch %s: got %v, exp %v", p.key, got, exp) - } - - for i, point := range p.points { - assertValueEqual(t, values[i], point) - } - } - - if got, exp := len(r.Entries([]byte("cpu,host=A#!~#value"))), 2; got != exp { - t.Fatalf("block count mismatch: got %v, exp %v", got, exp) - } -} - -// Ensures that a full compaction will skip over blocks that have the full -// range of time contained in the block tombstoned -func TestCompactor_CompactFull_TombstonedSkipBlock(t *testing.T) { - dir := t.TempDir() - - // write 3 TSM files with different data and one new point - a1 := tsm1.NewValue(1, 1.1) - a2 := tsm1.NewValue(2, 1.2) - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a1, a2}, - } - f1 := MustWriteTSM(t, dir, 1, writes) - - ts := tsm1.NewTombstoner(f1, nil) - ts.AddRange([][]byte{[]byte("cpu,host=A#!~#value")}, math.MinInt64, math.MaxInt64) - - if err := ts.Flush(); err != nil { - t.Fatalf("unexpected error flushing tombstone: %v", err) - } - - a3 := tsm1.NewValue(3, 1.3) - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a3}, - } - f2 := MustWriteTSM(t, dir, 2, writes) - - a4 := tsm1.NewValue(4, 1.4) - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a4}, - } - f3 := MustWriteTSM(t, dir, 3, writes) - - fs := &fakeFileStore{} - t.Cleanup(func() { fs.Close() }) - compactor := tsm1.NewCompactor() - compactor.Dir = dir - compactor.FileStore = fs - compactor.Size = 2 - compactor.Open() - - files, err := compactor.CompactFull([]string{f1, f2, f3}, zap.NewNop()) - if err != nil { - t.Fatalf("unexpected error writing snapshot: %v", err) - } - - if got, exp := len(files), 1; got != exp { - t.Fatalf("files length mismatch: got %v, exp %v", got, exp) - } - - expGen, expSeq, err := tsm1.DefaultParseFileName(f3) - if err != nil { - t.Fatalf("unexpected error parsing file name: %v", err) - } - expSeq = expSeq + 1 - - gotGen, gotSeq, err := tsm1.DefaultParseFileName(files[0]) - if err != nil { - t.Fatalf("unexpected error parsing file name: %v", err) - } - - if gotGen != expGen { - t.Fatalf("wrong generation for new file: got %v, exp %v", gotGen, expGen) - } - - if gotSeq != expSeq { - t.Fatalf("wrong sequence for new file: got %v, exp %v", gotSeq, expSeq) - } - - r := MustOpenTSMReader(files[0]) - t.Cleanup(func() { r.Close() }) - - if got, exp := r.KeyCount(), 1; got != exp { - t.Fatalf("keys length mismatch: got %v, exp %v", got, exp) - } - - var data = []struct { - key string - points []tsm1.Value - }{ - {"cpu,host=A#!~#value", []tsm1.Value{a3, a4}}, - } - - for _, p := range data { - values, err := r.ReadAll([]byte(p.key)) - if err != nil { - t.Fatalf("unexpected error reading: %v", err) - } - - if got, exp := len(values), len(p.points); got != exp { - t.Fatalf("values length mismatch %s: got %v, exp %v", p.key, got, exp) - } - - for i, point := range p.points { - assertValueEqual(t, values[i], point) - } - } - - if got, exp := len(r.Entries([]byte("cpu,host=A#!~#value"))), 1; got != exp { - t.Fatalf("block count mismatch: got %v, exp %v", got, exp) - } -} - -// Ensures that a full compaction will decode and combine blocks with -// partial tombstoned values -func TestCompactor_CompactFull_TombstonedPartialBlock(t *testing.T) { - dir := t.TempDir() - - // write 3 TSM files with different data and one new point - a1 := tsm1.NewValue(1, 1.1) - a2 := tsm1.NewValue(2, 1.2) - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a1, a2}, - } - f1 := MustWriteTSM(t, dir, 1, writes) - - ts := tsm1.NewTombstoner(f1, nil) - // a1 should remain after compaction - ts.AddRange([][]byte{[]byte("cpu,host=A#!~#value")}, 2, math.MaxInt64) - - if err := ts.Flush(); err != nil { - t.Fatalf("unexpected error flushing tombstone: %v", err) - } - - a3 := tsm1.NewValue(3, 1.3) - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a3}, - } - f2 := MustWriteTSM(t, dir, 2, writes) - - a4 := tsm1.NewValue(4, 1.4) - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a4}, - } - f3 := MustWriteTSM(t, dir, 3, writes) - - fs := &fakeFileStore{} - t.Cleanup(func() { fs.Close() }) - compactor := tsm1.NewCompactor() - compactor.Dir = dir - compactor.FileStore = fs - compactor.Size = 2 - compactor.Open() - - files, err := compactor.CompactFull([]string{f1, f2, f3}, zap.NewNop()) - if err != nil { - t.Fatalf("unexpected error writing snapshot: %v", err) - } - - if got, exp := len(files), 1; got != exp { - t.Fatalf("files length mismatch: got %v, exp %v", got, exp) - } - - expGen, expSeq, err := tsm1.DefaultParseFileName(f3) - if err != nil { - t.Fatalf("unexpected error parsing file name: %v", err) - } - expSeq = expSeq + 1 - - gotGen, gotSeq, err := tsm1.DefaultParseFileName(files[0]) - if err != nil { - t.Fatalf("unexpected error parsing file name: %v", err) - } - - if gotGen != expGen { - t.Fatalf("wrong generation for new file: got %v, exp %v", gotGen, expGen) - } - - if gotSeq != expSeq { - t.Fatalf("wrong sequence for new file: got %v, exp %v", gotSeq, expSeq) - } - - r := MustOpenTSMReader(files[0]) - t.Cleanup(func() { r.Close() }) - - if got, exp := r.KeyCount(), 1; got != exp { - t.Fatalf("keys length mismatch: got %v, exp %v", got, exp) - } - - var data = []struct { - key string - points []tsm1.Value - }{ - {"cpu,host=A#!~#value", []tsm1.Value{a1, a3, a4}}, - } - - for _, p := range data { - values, err := r.ReadAll([]byte(p.key)) - if err != nil { - t.Fatalf("unexpected error reading: %v", err) - } - - if got, exp := len(values), len(p.points); got != exp { - t.Fatalf("values length mismatch %s: got %v, exp %v", p.key, got, exp) - } - - for i, point := range p.points { - assertValueEqual(t, values[i], point) - } - } - - if got, exp := len(r.Entries([]byte("cpu,host=A#!~#value"))), 2; got != exp { - t.Fatalf("block count mismatch: got %v, exp %v", got, exp) - } -} - -// Ensures that a full compaction will decode and combine blocks with -// multiple tombstoned ranges within the block e.g. (t1, t2, t3, t4) -// having t2 and t3 removed -func TestCompactor_CompactFull_TombstonedMultipleRanges(t *testing.T) { - dir := t.TempDir() - - // write 3 TSM files with different data and one new point - a1 := tsm1.NewValue(1, 1.1) - a2 := tsm1.NewValue(2, 1.2) - a3 := tsm1.NewValue(3, 1.3) - a4 := tsm1.NewValue(4, 1.4) - - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a1, a2, a3, a4}, - } - f1 := MustWriteTSM(t, dir, 1, writes) - - ts := tsm1.NewTombstoner(f1, nil) - // a1, a3 should remain after compaction - ts.AddRange([][]byte{[]byte("cpu,host=A#!~#value")}, 2, 2) - ts.AddRange([][]byte{[]byte("cpu,host=A#!~#value")}, 4, 4) - - if err := ts.Flush(); err != nil { - t.Fatalf("unexpected error flushing tombstone: %v", err) - } - - a5 := tsm1.NewValue(5, 1.5) - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a5}, - } - f2 := MustWriteTSM(t, dir, 2, writes) - - a6 := tsm1.NewValue(6, 1.6) - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {a6}, - } - f3 := MustWriteTSM(t, dir, 3, writes) - - fs := &fakeFileStore{} - t.Cleanup(func() { fs.Close() }) - compactor := tsm1.NewCompactor() - compactor.Dir = dir - compactor.FileStore = fs - compactor.Size = 2 - compactor.Open() - - files, err := compactor.CompactFull([]string{f1, f2, f3}, zap.NewNop()) - if err != nil { - t.Fatalf("unexpected error writing snapshot: %v", err) - } - - if got, exp := len(files), 1; got != exp { - t.Fatalf("files length mismatch: got %v, exp %v", got, exp) - } - - expGen, expSeq, err := tsm1.DefaultParseFileName(f3) - if err != nil { - t.Fatalf("unexpected error parsing file name: %v", err) - } - expSeq = expSeq + 1 - - gotGen, gotSeq, err := tsm1.DefaultParseFileName(files[0]) - if err != nil { - t.Fatalf("unexpected error parsing file name: %v", err) - } - - if gotGen != expGen { - t.Fatalf("wrong generation for new file: got %v, exp %v", gotGen, expGen) - } - - if gotSeq != expSeq { - t.Fatalf("wrong sequence for new file: got %v, exp %v", gotSeq, expSeq) - } - - r := MustOpenTSMReader(files[0]) - t.Cleanup(func() { r.Close() }) - - if got, exp := r.KeyCount(), 1; got != exp { - t.Fatalf("keys length mismatch: got %v, exp %v", got, exp) - } - - var data = []struct { - key string - points []tsm1.Value - }{ - {"cpu,host=A#!~#value", []tsm1.Value{a1, a3, a5, a6}}, - } - - for _, p := range data { - values, err := r.ReadAll([]byte(p.key)) - if err != nil { - t.Fatalf("unexpected error reading: %v", err) - } - - if got, exp := len(values), len(p.points); got != exp { - t.Fatalf("values length mismatch %s: got %v, exp %v", p.key, got, exp) - } - - for i, point := range p.points { - assertValueEqual(t, values[i], point) - } - } - - if got, exp := len(r.Entries([]byte("cpu,host=A#!~#value"))), 2; got != exp { - t.Fatalf("block count mismatch: got %v, exp %v", got, exp) - } -} - -// Ensures that a compaction will properly rollover to a new file when the -// max keys per blocks is exceeded -func TestCompactor_CompactFull_MaxKeys(t *testing.T) { - // This test creates a lot of data and causes timeout failures for these envs - if testing.Short() || os.Getenv("CI") != "" || os.Getenv("GORACE") != "" { - t.Skip("Skipping max keys compaction test") - } - dir := t.TempDir() - - // write two files where the first contains a single key with the maximum - // number of full blocks that can fit in a TSM file - f1, f1Name := MustTSMWriter(t, dir, 1) - values := make([]tsm1.Value, 1000) - for i := 0; i < 65534; i++ { - values = values[:0] - for j := 0; j < 1000; j++ { - values = append(values, tsm1.NewValue(int64(i*1000+j), int64(1))) - } - if err := f1.Write([]byte("cpu,host=A#!~#value"), values); err != nil { - t.Fatalf("write tsm f1: %v", err) - } - } - if err := f1.WriteIndex(); err != nil { - t.Fatalf("write index f1: %v", err) - } - f1.Close() - - // Write a new file with 2 blocks that when compacted would exceed the max - // blocks - f2, f2Name := MustTSMWriter(t, dir, 2) - for i := 0; i < 2; i++ { - lastTimeStamp := values[len(values)-1].UnixNano() + 1 - values = values[:0] - for j := lastTimeStamp; j < lastTimeStamp+1000; j++ { - values = append(values, tsm1.NewValue(int64(j), int64(1))) - } - if err := f2.Write([]byte("cpu,host=A#!~#value"), values); err != nil { - t.Fatalf("write tsm f1: %v", err) - } - } - - if err := f2.WriteIndex(); err != nil { - t.Fatalf("write index f2: %v", err) - } - f2.Close() - - fs := &fakeFileStore{} - t.Cleanup(func() { fs.Close() }) - compactor := tsm1.NewCompactor() - compactor.Dir = dir - compactor.FileStore = fs - compactor.Open() - - // Compact both files, should get 2 files back - files, err := compactor.CompactFull([]string{f1Name, f2Name}, zap.NewNop()) - if err != nil { - t.Fatalf("unexpected error writing snapshot: %v", err) - } - - if got, exp := len(files), 2; got != exp { - t.Fatalf("files length mismatch: got %v, exp %v", got, exp) - } - - expGen, expSeq, err := tsm1.DefaultParseFileName(f2Name) - if err != nil { - t.Fatalf("unexpected error parsing file name: %v", err) - } - expSeq = expSeq + 1 - - gotGen, gotSeq, err := tsm1.DefaultParseFileName(files[0]) - if err != nil { - t.Fatalf("unexpected error parsing file name: %v", err) - } - - if gotGen != expGen { - t.Fatalf("wrong generation for new file: got %v, exp %v", gotGen, expGen) - } - - if gotSeq != expSeq { - t.Fatalf("wrong sequence for new file: got %v, exp %v", gotSeq, expSeq) - } -} - -// Tests that a single TSM file can be read and iterated over -func TestTSMKeyIterator_Single(t *testing.T) { - dir := t.TempDir() - - v1 := tsm1.NewValue(1, 1.1) - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {v1}, - } - - r := MustTSMReader(t, dir, 1, writes) - t.Cleanup(func() { r.Close() }) - - iter, err := newTSMKeyIterator(1, false, nil, r) - if err != nil { - t.Fatalf("unexpected error creating WALKeyIterator: %v", err) - } - t.Cleanup(func() { iter.Close() }) - - var readValues bool - for iter.Next() { - key, _, _, block, err := iter.Read() - if err != nil { - t.Fatalf("unexpected error read: %v", err) - } - - values, err := tsm1.DecodeBlock(block, nil) - if err != nil { - t.Fatalf("unexpected error decode: %v", err) - } - - if got, exp := string(key), "cpu,host=A#!~#value"; got != exp { - t.Fatalf("key mismatch: got %v, exp %v", got, exp) - } - - if got, exp := len(values), len(writes); got != exp { - t.Fatalf("values length mismatch: got %v, exp %v", got, exp) - } - - for _, v := range values { - readValues = true - assertValueEqual(t, v, v1) - } - } - - if !readValues { - t.Fatalf("failed to read any values") - } -} - -func newTSMKeyIterator(size int, fast bool, interrupt chan struct{}, readers ...*tsm1.TSMReader) (tsm1.KeyIterator, error) { - files := []string{} - for _, r := range readers { - files = append(files, r.Path()) - } - return tsm1.NewTSMBatchKeyIterator(size, fast, 0, interrupt, files, readers...) -} - -// Tests that duplicate point values are merged. There is only one case -// where this could happen and that is when a compaction completed and we replace -// the old TSM file with a new one and we crash just before deleting the old file. -// No data is lost but the same point time/value would exist in two files until -// compaction corrects it. -func TestTSMKeyIterator_Duplicate(t *testing.T) { - dir := t.TempDir() - - v1 := tsm1.NewValue(1, int64(1)) - v2 := tsm1.NewValue(1, int64(2)) - - writes1 := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {v1}, - } - - r1 := MustTSMReader(t, dir, 1, writes1) - t.Cleanup(func() { r1.Close() }) - - writes2 := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {v2}, - } - - r2 := MustTSMReader(t, dir, 2, writes2) - t.Cleanup(func() { r2.Close() }) - - iter, err := newTSMKeyIterator(1, false, nil, r1, r2) - if err != nil { - t.Fatalf("unexpected error creating WALKeyIterator: %v", err) - } - t.Cleanup(func() { iter.Close() }) - - var readValues bool - for iter.Next() { - key, _, _, block, err := iter.Read() - if err != nil { - t.Fatalf("unexpected error read: %v", err) - } - - values, err := tsm1.DecodeBlock(block, nil) - if err != nil { - t.Fatalf("unexpected error decode: %v", err) - } - - if got, exp := string(key), "cpu,host=A#!~#value"; got != exp { - t.Fatalf("key mismatch: got %v, exp %v", got, exp) - } - - if got, exp := len(values), 1; got != exp { - t.Fatalf("values length mismatch: got %v, exp %v", got, exp) - } - - readValues = true - assertValueEqual(t, values[0], v2) - } - - if !readValues { - t.Fatalf("failed to read any values") - } -} - -// Tests that deleted keys are not seen during iteration with -// TSM files. -func TestTSMKeyIterator_MultipleKeysDeleted(t *testing.T) { - dir := t.TempDir() - - v1 := tsm1.NewValue(2, int64(1)) - points1 := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {v1}, - } - - r1 := MustTSMReader(t, dir, 1, points1) - if e := r1.Delete([][]byte{[]byte("cpu,host=A#!~#value")}); nil != e { - t.Fatal(e) - } - t.Cleanup(func() { r1.Close() }) - - v2 := tsm1.NewValue(1, float64(1)) - v3 := tsm1.NewValue(1, float64(1)) - - points2 := map[string][]tsm1.Value{ - "cpu,host=A#!~#count": {v2}, - "cpu,host=B#!~#value": {v3}, - } - - r2 := MustTSMReader(t, dir, 2, points2) - t.Cleanup(func() { r2.Close() }) - r2.Delete([][]byte{[]byte("cpu,host=A#!~#count")}) - - iter, err := newTSMKeyIterator(1, false, nil, r1, r2) - if err != nil { - t.Fatalf("unexpected error creating WALKeyIterator: %v", err) - } - - var readValues bool - var data = []struct { - key string - value tsm1.Value - }{ - {"cpu,host=B#!~#value", v3}, - } - - for iter.Next() { - key, _, _, block, err := iter.Read() - if err != nil { - t.Fatalf("unexpected error read: %v", err) - } - - values, err := tsm1.DecodeBlock(block, nil) - if err != nil { - t.Fatalf("unexpected error decode: %v", err) - } - - if got, exp := string(key), data[0].key; got != exp { - t.Fatalf("key mismatch: got %v, exp %v", got, exp) - } - - if got, exp := len(values), 1; got != exp { - t.Fatalf("values length mismatch: got %v, exp %v", got, exp) - } - readValues = true - - assertValueEqual(t, values[0], data[0].value) - data = data[1:] - } - - if !readValues { - t.Fatalf("failed to read any values") - } -} - -// Tests that deleted keys are not seen during iteration with -// TSM files. -func TestTSMKeyIterator_SingleDeletes(t *testing.T) { - dir := t.TempDir() - - v1 := tsm1.NewValue(10, int64(1)) - v2 := tsm1.NewValue(20, int64(1)) - v3 := tsm1.NewValue(30, int64(1)) - v4 := tsm1.NewValue(40, int64(1)) - v5 := tsm1.NewValue(50, int64(1)) - v6 := tsm1.NewValue(60, int64(1)) - - points1 := map[string][]tsm1.Value{ - "cpu,host=0#!~#value": {v1, v2}, - "cpu,host=A#!~#value": {v5, v6}, - "cpu,host=B#!~#value": {v3, v4}, - "cpu,host=C#!~#value": {v1, v2}, - "cpu,host=D#!~#value": {v1, v2}, - } - - r1 := MustTSMReader(t, dir, 1, points1) - t.Cleanup(func() { r1.Close() }) - - if e := r1.DeleteRange([][]byte{[]byte("cpu,host=A#!~#value")}, 50, 50); nil != e { - t.Fatal(e) - } - if e := r1.DeleteRange([][]byte{[]byte("cpu,host=A#!~#value")}, 60, 60); nil != e { - t.Fatal(e) - } - if e := r1.DeleteRange([][]byte{[]byte("cpu,host=C#!~#value")}, 10, 10); nil != e { - t.Fatal(e) - } - if e := r1.DeleteRange([][]byte{[]byte("cpu,host=C#!~#value")}, 60, 60); nil != e { - t.Fatal(e) - } - if e := r1.DeleteRange([][]byte{[]byte("cpu,host=C#!~#value")}, 20, 20); nil != e { - t.Fatal(e) - } - - iter, err := newTSMKeyIterator(1, false, nil, r1) - if err != nil { - t.Fatalf("unexpected error creating WALKeyIterator: %v", err) - } - t.Cleanup(func() { iter.Close() }) - - var readValues int - var data = []struct { - key string - value tsm1.Value - }{ - {"cpu,host=0#!~#value", v1}, - {"cpu,host=B#!~#value", v3}, - {"cpu,host=D#!~#value", v1}, - } - - for iter.Next() { - key, _, _, block, err := iter.Read() - if err != nil { - t.Fatalf("unexpected error read: %v", err) - } - - values, err := tsm1.DecodeBlock(block, nil) - if err != nil { - t.Fatalf("unexpected error decode: %v", err) - } - - if exp, got := string(key), data[0].key; exp != got { - t.Fatalf("key mismatch: got %v, exp %v", exp, got) - } - - if exp, got := len(values), 2; exp != got { - t.Fatalf("values length mismatch: exp %v, got %v", exp, got) - } - readValues++ - - assertValueEqual(t, values[0], data[0].value) - data = data[1:] - } - - if exp, got := 3, readValues; exp != got { - t.Fatalf("failed to read expected values: exp %v, got %v", exp, got) - } -} - -// Tests that the TSMKeyIterator will abort if the interrupt channel is closed -func TestTSMKeyIterator_Abort(t *testing.T) { - dir := t.TempDir() - - v1 := tsm1.NewValue(1, 1.1) - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {v1}, - } - - r := MustTSMReader(t, dir, 1, writes) - t.Cleanup(func() { r.Close() }) - - intC := make(chan struct{}) - iter, err := newTSMKeyIterator(1, false, intC, r) - if err != nil { - t.Fatalf("unexpected error creating WALKeyIterator: %v", err) - } - t.Cleanup(func() { iter.Close() }) - - var aborted bool - for iter.Next() { - // Abort - close(intC) - - _, _, _, _, err := iter.Read() - if err == nil { - t.Fatalf("unexpected error read: %v", err) - } - aborted = err != nil - } - - if !aborted { - t.Fatalf("iteration not aborted") - } -} - -func TestCacheKeyIterator_Single(t *testing.T) { - v0 := tsm1.NewValue(1, 1.0) - - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {v0}, - } - - c := tsm1.NewCache(0, tsdb.EngineTags{}) - - for k, v := range writes { - if err := c.Write([]byte(k), v); err != nil { - t.Fatalf("failed to write key foo to cache: %s", err.Error()) - } - } - - iter := tsm1.NewCacheKeyIterator(c, 1, nil) - var readValues bool - for iter.Next() { - key, _, _, block, err := iter.Read() - if err != nil { - t.Fatalf("unexpected error read: %v", err) - } - - values, err := tsm1.DecodeBlock(block, nil) - if err != nil { - t.Fatalf("unexpected error decode: %v", err) - } - - if got, exp := string(key), "cpu,host=A#!~#value"; got != exp { - t.Fatalf("key mismatch: got %v, exp %v", got, exp) - } - - if got, exp := len(values), len(writes); got != exp { - t.Fatalf("values length mismatch: got %v, exp %v", got, exp) - } - - for _, v := range values { - readValues = true - assertValueEqual(t, v, v0) - } - } - - if !readValues { - t.Fatalf("failed to read any values") - } -} - -func TestCacheKeyIterator_Chunked(t *testing.T) { - v0 := tsm1.NewValue(1, 1.0) - v1 := tsm1.NewValue(2, 2.0) - - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {v0, v1}, - } - - c := tsm1.NewCache(0, tsdb.EngineTags{}) - - for k, v := range writes { - if err := c.Write([]byte(k), v); err != nil { - t.Fatalf("failed to write key foo to cache: %s", err.Error()) - } - } - - iter := tsm1.NewCacheKeyIterator(c, 1, nil) - var readValues bool - var chunk int - for iter.Next() { - key, _, _, block, err := iter.Read() - if err != nil { - t.Fatalf("unexpected error read: %v", err) - } - - values, err := tsm1.DecodeBlock(block, nil) - if err != nil { - t.Fatalf("unexpected error decode: %v", err) - } - - if got, exp := string(key), "cpu,host=A#!~#value"; got != exp { - t.Fatalf("key mismatch: got %v, exp %v", got, exp) - } - - if got, exp := len(values), 1; got != exp { - t.Fatalf("values length mismatch: got %v, exp %v", got, exp) - } - - for _, v := range values { - readValues = true - assertValueEqual(t, v, writes["cpu,host=A#!~#value"][chunk]) - } - chunk++ - } - - if !readValues { - t.Fatalf("failed to read any values") - } -} - -// Tests that the CacheKeyIterator will abort if the interrupt channel is closed -func TestCacheKeyIterator_Abort(t *testing.T) { - v0 := tsm1.NewValue(1, 1.0) - - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {v0}, - } - - c := tsm1.NewCache(0, tsdb.EngineTags{}) - - for k, v := range writes { - if err := c.Write([]byte(k), v); err != nil { - t.Fatalf("failed to write key foo to cache: %s", err.Error()) - } - } - - intC := make(chan struct{}) - - iter := tsm1.NewCacheKeyIterator(c, 1, intC) - - var aborted bool - for iter.Next() { - //Abort - close(intC) - - _, _, _, _, err := iter.Read() - if err == nil { - t.Fatalf("unexpected error read: %v", err) - } - aborted = err != nil - } - - if !aborted { - t.Fatalf("iteration not aborted") - } -} - -func TestDefaultPlanner_Plan_Min(t *testing.T) { - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return []tsm1.FileStat{ - { - Path: "01-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "02-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "03-1.tsm1", - Size: 251 * 1024 * 1024, - }, - } - }, - }, tsdb.DefaultCompactFullWriteColdDuration, - ) - - tsm, pLen := cp.Plan(time.Now()) - if exp, got := 0, len(tsm); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } -} - -// Ensure that if there are older files that can be compacted together but a newer -// file that is in a larger step, the older ones will get compacted. -func TestDefaultPlanner_Plan_CombineSequence(t *testing.T) { - data := []tsm1.FileStat{ - { - Path: "01-04.tsm1", - Size: 128 * 1024 * 1024, - }, - { - Path: "02-04.tsm1", - Size: 128 * 1024 * 1024, - }, - { - Path: "03-04.tsm1", - Size: 128 * 1024 * 1024, - }, - { - Path: "04-04.tsm1", - Size: 128 * 1024 * 1024, - }, - { - Path: "06-02.tsm1", - Size: 67 * 1024 * 1024, - }, - { - Path: "07-02.tsm1", - Size: 128 * 1024 * 1024, - }, - { - Path: "08-01.tsm1", - Size: 251 * 1024 * 1024, - }, - } - - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return data - }, - }, tsdb.DefaultCompactFullWriteColdDuration, - ) - - expFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3]} - tsm, pLen := cp.Plan(time.Now()) - if exp, got := len(expFiles), len(tsm[0]); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - - for i, p := range expFiles { - if got, exp := tsm[0][i], p.Path; got != exp { - t.Fatalf("tsm file mismatch: got %v, exp %v", got, exp) - } - } -} - -// Ensure that the planner grabs the smallest compaction step -func TestDefaultPlanner_Plan_MultipleGroups(t *testing.T) { - data := []tsm1.FileStat{ - { - Path: "01-04.tsm1", - Size: 64 * 1024 * 1024, - }, - { - Path: "02-04.tsm1", - Size: 64 * 1024 * 1024, - }, - { - Path: "03-04.tsm1", - Size: 64 * 1024 * 1024, - }, - { - Path: "04-04.tsm1", - Size: 129 * 1024 * 1024, - }, - { - Path: "05-04.tsm1", - Size: 129 * 1024 * 1024, - }, - { - Path: "06-04.tsm1", - Size: 129 * 1024 * 1024, - }, - { - Path: "07-04.tsm1", - Size: 129 * 1024 * 1024, - }, - { - Path: "08-04.tsm1", - Size: 129 * 1024 * 1024, - }, - { - Path: "09-04.tsm1", // should be skipped - Size: 129 * 1024 * 1024, - }, - } - - cp := tsm1.NewDefaultPlanner(&fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return data - }, - }, tsdb.DefaultCompactFullWriteColdDuration) - - expFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3], - data[4], data[5], data[6], data[7]} - tsm, pLen := cp.Plan(time.Now()) - - if got, exp := len(tsm), 2; got != exp { - t.Fatalf("compaction group length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - - if exp, got := len(expFiles[:4]), len(tsm[0]); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } - - if exp, got := len(expFiles[4:]), len(tsm[1]); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } - - for i, p := range expFiles[:4] { - if got, exp := tsm[0][i], p.Path; got != exp { - t.Fatalf("tsm file mismatch: got %v, exp %v", got, exp) - } - } - - for i, p := range expFiles[4:] { - if got, exp := tsm[1][i], p.Path; got != exp { - t.Fatalf("tsm file mismatch: got %v, exp %v", got, exp) - } - } -} - -// Ensure that the planner grabs the smallest compaction step -func TestDefaultPlanner_PlanLevel_SmallestCompactionStep(t *testing.T) { - data := []tsm1.FileStat{ - { - Path: "01-03.tsm1", - Size: 251 * 1024 * 1024, - }, - { - Path: "02-03.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "03-03.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "04-03.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "05-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "06-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "07-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "08-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "09-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "10-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "11-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "12-01.tsm1", - Size: 1 * 1024 * 1024, - }, - } - - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return data - }, - }, tsdb.DefaultCompactFullWriteColdDuration, - ) - - expFiles := []tsm1.FileStat{data[4], data[5], data[6], data[7], data[8], data[9], data[10], data[11]} - tsm, pLen := cp.PlanLevel(1) - if exp, got := len(expFiles), len(tsm[0]); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - - for i, p := range expFiles { - if got, exp := tsm[0][i], p.Path; got != exp { - t.Fatalf("tsm file mismatch: got %v, exp %v", got, exp) - } - } -} - -func TestDefaultPlanner_PlanLevel_SplitFile(t *testing.T) { - data := []tsm1.FileStat{ - { - Path: "01-03.tsm1", - Size: 251 * 1024 * 1024, - }, - { - Path: "02-03.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "03-03.tsm1", - Size: 2 * 1024 * 1024 * 1024, - }, - { - Path: "03-04.tsm1", - Size: 10 * 1024 * 1024, - }, - { - Path: "04-03.tsm1", - Size: 10 * 1024 * 1024, - }, - { - Path: "05-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "06-01.tsm1", - Size: 1 * 1024 * 1024, - }, - } - - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return data - }, - }, tsdb.DefaultCompactFullWriteColdDuration, - ) - - expFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3], data[4]} - tsm, pLen := cp.PlanLevel(3) - if exp, got := len(expFiles), len(tsm[0]); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - - for i, p := range expFiles { - if got, exp := tsm[0][i], p.Path; got != exp { - t.Fatalf("tsm file mismatch: got %v, exp %v", got, exp) - } - } -} - -func TestDefaultPlanner_PlanLevel_IsolatedHighLevel(t *testing.T) { - data := []tsm1.FileStat{ - { - Path: "01-02.tsm1", - Size: 251 * 1024 * 1024, - }, - { - Path: "02-02.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "03-03.tsm1", - Size: 2 * 1024 * 1024 * 1024, - }, - { - Path: "03-04.tsm1", - Size: 2 * 1024 * 1024 * 1024, - }, - { - Path: "04-02.tsm1", - Size: 10 * 1024 * 1024, - }, - { - Path: "05-02.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "06-02.tsm1", - Size: 1 * 1024 * 1024, - }, - } - - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return data - }, - }, tsdb.DefaultCompactFullWriteColdDuration, - ) - - expFiles := []tsm1.FileStat{} - tsm, pLen := cp.PlanLevel(3) - if exp, got := len(expFiles), len(tsm); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } -} - -func TestDefaultPlanner_PlanLevel3_MinFiles(t *testing.T) { - data := []tsm1.FileStat{ - { - Path: "01-03.tsm1", - Size: 251 * 1024 * 1024, - }, - { - Path: "02-03.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "03-01.tsm1", - Size: 2 * 1024 * 1024 * 1024, - }, - { - Path: "04-01.tsm1", - Size: 10 * 1024 * 1024, - }, - { - Path: "05-02.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "06-01.tsm1", - Size: 1 * 1024 * 1024, - }, - } - - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return data - }, - }, tsdb.DefaultCompactFullWriteColdDuration, - ) - - expFiles := []tsm1.FileStat{} - tsm, pLen := cp.PlanLevel(3) - if exp, got := len(expFiles), len(tsm); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } -} - -func TestDefaultPlanner_PlanLevel2_MinFiles(t *testing.T) { - data := []tsm1.FileStat{ - { - Path: "02-04.tsm1", - Size: 251 * 1024 * 1024, - }, - - { - Path: "03-02.tsm1", - Size: 251 * 1024 * 1024, - }, - { - Path: "03-03.tsm1", - Size: 1 * 1024 * 1024, - }, - } - - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return data - }, - }, tsdb.DefaultCompactFullWriteColdDuration, - ) - - expFiles := []tsm1.FileStat{} - tsm, pLen := cp.PlanLevel(2) - if exp, got := len(expFiles), len(tsm); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } -} - -func TestDefaultPlanner_PlanLevel_Tombstone(t *testing.T) { - data := []tsm1.FileStat{ - { - Path: "01-03.tsm1", - Size: 251 * 1024 * 1024, - HasTombstone: true, - }, - { - Path: "02-03.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "03-01.tsm1", - Size: 2 * 1024 * 1024 * 1024, - }, - { - Path: "04-01.tsm1", - Size: 10 * 1024 * 1024, - }, - { - Path: "05-02.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "06-01.tsm1", - Size: 1 * 1024 * 1024, - }, - } - - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return data - }, - }, tsdb.DefaultCompactFullWriteColdDuration, - ) - - expFiles := []tsm1.FileStat{data[0], data[1]} - tsm, pLen := cp.PlanLevel(3) - if exp, got := len(expFiles), len(tsm[0]); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - - for i, p := range expFiles { - if got, exp := tsm[0][i], p.Path; got != exp { - t.Fatalf("tsm file mismatch: got %v, exp %v", got, exp) - } - } -} - -func TestDefaultPlanner_PlanLevel_Multiple(t *testing.T) { - data := []tsm1.FileStat{ - { - Path: "01-01.tsm1", - Size: 251 * 1024 * 1024, - }, - { - Path: "02-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "03-01.tsm1", - Size: 2 * 1024 * 1024 * 1024, - }, - { - Path: "04-01.tsm1", - Size: 10 * 1024 * 1024, - }, - { - Path: "05-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "06-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "07-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "08-01.tsm1", - Size: 1 * 1024 * 1024, - }, - } - - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return data - }, - }, tsdb.DefaultCompactFullWriteColdDuration, - ) - - expFiles1 := []tsm1.FileStat{data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]} - - tsm, pLen := cp.PlanLevel(1) - if exp, got := len(expFiles1), len(tsm[0]); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - - for i, p := range expFiles1 { - if got, exp := tsm[0][i], p.Path; got != exp { - t.Fatalf("tsm file mismatch: got %v, exp %v", got, exp) - } - } -} - -func TestDefaultPlanner_PlanLevel_InUse(t *testing.T) { - data := []tsm1.FileStat{ - { - Path: "01-01.tsm1", - Size: 251 * 1024 * 1024, - }, - { - Path: "02-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "03-01.tsm1", - Size: 2 * 1024 * 1024 * 1024, - }, - { - Path: "04-01.tsm1", - Size: 10 * 1024 * 1024, - }, - { - Path: "05-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "06-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "07-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "08-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "09-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "10-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "11-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "12-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "13-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "14-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "15-01.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "16-01.tsm1", - Size: 1 * 1024 * 1024, - }, - } - - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return data - }, - }, tsdb.DefaultCompactFullWriteColdDuration, - ) - - expFiles1 := data[0:8] - expFiles2 := data[8:16] - - tsm, pLen := cp.PlanLevel(1) - if exp, got := len(expFiles1), len(tsm[0]); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - - for i, p := range expFiles1 { - if got, exp := tsm[0][i], p.Path; got != exp { - t.Fatalf("tsm file mismatch: got %v, exp %v", got, exp) - } - } - - if exp, got := len(expFiles2), len(tsm[1]); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } - - for i, p := range expFiles2 { - if got, exp := tsm[1][i], p.Path; got != exp { - t.Fatalf("tsm file mismatch: got %v, exp %v", got, exp) - } - } - - cp.Release(tsm[1:]) - - tsm, pLen = cp.PlanLevel(1) - if exp, got := len(expFiles2), len(tsm[0]); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - - for i, p := range expFiles2 { - if got, exp := tsm[0][i], p.Path; got != exp { - t.Fatalf("tsm file mismatch: got %v, exp %v", got, exp) - } - } -} - -func TestDefaultPlanner_PlanOptimize_NoLevel4(t *testing.T) { - data := []tsm1.FileStat{ - { - Path: "01-03.tsm1", - Size: 251 * 1024 * 1024, - }, - { - Path: "02-03.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "03-03.tsm1", - Size: 2 * 1024 * 1024 * 1024, - }, - } - - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return data - }, - }, tsdb.DefaultCompactFullWriteColdDuration, - ) - - expFiles := []tsm1.FileStat{} - tsm, pLen := cp.PlanOptimize() - if exp, got := len(expFiles), len(tsm); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } -} - -func TestDefaultPlanner_PlanOptimize_Level4(t *testing.T) { - data := []tsm1.FileStat{ - { - Path: "01-04.tsm1", - Size: 251 * 1024 * 1024, - }, - { - Path: "02-04.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "03-04.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "04-04.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "05-03.tsm1", - Size: 2 * 1024 * 1024 * 1024, - }, - { - Path: "06-04.tsm1", - Size: 2 * 1024 * 1024 * 1024, - }, - { - Path: "07-03.tsm1", - Size: 2 * 1024 * 1024 * 1024, - }, - } - - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return data - }, - }, tsdb.DefaultCompactFullWriteColdDuration, - ) - - expFiles1 := []tsm1.FileStat{data[0], data[1], data[2], data[3], data[4], data[5]} - tsm, pLen := cp.PlanOptimize() - if exp, got := 1, len(tsm); exp != got { - t.Fatalf("group length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - - if exp, got := len(expFiles1), len(tsm[0]); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } - - for i, p := range expFiles1 { - if got, exp := tsm[0][i], p.Path; got != exp { - t.Fatalf("tsm file mismatch: got %v, exp %v", got, exp) - } - } -} - -func TestDefaultPlanner_PlanOptimize_Multiple(t *testing.T) { - data := []tsm1.FileStat{ - { - Path: "01-04.tsm1", - Size: 251 * 1024 * 1024, - }, - { - Path: "02-04.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "03-04.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "04-04.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "05-03.tsm1", - Size: 2 * 1024 * 1024 * 1024, - }, - { - Path: "06-03.tsm1", - Size: 2 * 1024 * 1024 * 1024, - }, - { - Path: "07-04.tsm1", - Size: 2 * 1024 * 1024 * 1024, - }, - { - Path: "08-04.tsm1", - Size: 2 * 1024 * 1024 * 1024, - }, - { - Path: "09-04.tsm1", - Size: 2 * 1024 * 1024 * 1024, - }, - { - Path: "10-04.tsm1", - Size: 2 * 1024 * 1024 * 1024, - }, - } - - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return data - }, - }, tsdb.DefaultCompactFullWriteColdDuration, - ) - - expFiles1 := []tsm1.FileStat{data[0], data[1], data[2], data[3]} - expFiles2 := []tsm1.FileStat{data[6], data[7], data[8], data[9]} - - tsm, pLen := cp.PlanOptimize() - if exp, got := 2, len(tsm); exp != got { - t.Fatalf("group length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - - if exp, got := len(expFiles1), len(tsm[0]); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } - - for i, p := range expFiles1 { - if got, exp := tsm[0][i], p.Path; got != exp { - t.Fatalf("tsm file mismatch: got %v, exp %v", got, exp) - } - } - - if exp, got := len(expFiles2), len(tsm[1]); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } - - for i, p := range expFiles2 { - if got, exp := tsm[1][i], p.Path; got != exp { - t.Fatalf("tsm file mismatch: got %v, exp %v", got, exp) - } - } -} - -func TestDefaultPlanner_PlanOptimize_Optimized(t *testing.T) { - data := []tsm1.FileStat{ - { - Path: "01-03.tsm1", - Size: 251 * 1024 * 1024, - }, - { - Path: "01-04.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "01-05.tsm1", - Size: 2 * 1024 * 1024 * 1024, - }, - } - - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return data - }, - }, tsdb.DefaultCompactFullWriteColdDuration, - ) - - expFiles := []tsm1.FileStat{} - tsm, pLen := cp.PlanOptimize() - if exp, got := len(expFiles), len(tsm); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } -} - -func TestDefaultPlanner_PlanOptimize_Tombstones(t *testing.T) { - data := []tsm1.FileStat{ - { - Path: "01-04.tsm1", - Size: 251 * 1024 * 1024, - }, - { - Path: "01-05.tsm1", - Size: 1 * 1024 * 1024, - HasTombstone: true, - }, - { - Path: "02-06.tsm1", - Size: 2 * 1024 * 1024 * 1024, - }, - } - - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return data - }, - }, tsdb.DefaultCompactFullWriteColdDuration, - ) - - expFiles := []tsm1.FileStat{data[0], data[1], data[2]} - tsm, pLen := cp.PlanOptimize() - if exp, got := len(expFiles), len(tsm[0]); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - - for i, p := range expFiles { - if got, exp := tsm[0][i], p.Path; got != exp { - t.Fatalf("tsm file mismatch: got %v, exp %v", got, exp) - } - } - -} - -// Ensure that the planner will compact all files if no writes -// have happened in some interval -func TestDefaultPlanner_Plan_FullOnCold(t *testing.T) { - data := []tsm1.FileStat{ - { - Path: "01-01.tsm1", - Size: 513 * 1024 * 1024, - }, - { - Path: "02-02.tsm1", - Size: 129 * 1024 * 1024, - }, - { - Path: "03-02.tsm1", - Size: 33 * 1024 * 1024, - }, - { - Path: "04-02.tsm1", - Size: 1 * 1024 * 1024, - }, - { - Path: "05-02.tsm1", - Size: 10 * 1024 * 1024, - }, - { - Path: "06-01.tsm1", - Size: 2 * 1024 * 1024, - }, - } - - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return data - }, - }, - time.Nanosecond, - ) - - tsm, pLen := cp.Plan(time.Now().Add(-time.Second)) - if exp, got := len(data), len(tsm[0]); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - - for i, p := range data { - if got, exp := tsm[0][i], p.Path; got != exp { - t.Fatalf("tsm file mismatch: got %v, exp %v", got, exp) - } - } -} - -// Ensure that the planner will not return files that are over the max -// allowable size -func TestDefaultPlanner_Plan_SkipMaxSizeFiles(t *testing.T) { - data := []tsm1.FileStat{ - { - Path: "01-01.tsm1", - Size: 2049 * 1024 * 1024, - }, - { - Path: "02-02.tsm1", - Size: 2049 * 1024 * 1024, - }, - } - - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return data - }, - }, tsdb.DefaultCompactFullWriteColdDuration, - ) - - tsm, pLen := cp.Plan(time.Now()) - if exp, got := 0, len(tsm); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } -} - -// Ensure that the planner will not return files that are over the max -// allowable size -func TestDefaultPlanner_Plan_SkipPlanningAfterFull(t *testing.T) { - testSet := []tsm1.FileStat{ - { - Path: "01-05.tsm1", - Size: 256 * 1024 * 1024, - }, - { - Path: "02-05.tsm1", - Size: 256 * 1024 * 1024, - }, - { - Path: "03-05.tsm1", - Size: 256 * 1024 * 1024, - }, - { - Path: "04-04.tsm1", - Size: 256 * 1024 * 1024, - }, - } - - fs := &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return testSet - }, - blockCount: 1000, - } - - cp := tsm1.NewDefaultPlanner(fs, time.Nanosecond) - plan, pLen := cp.Plan(time.Now().Add(-time.Second)) - // first verify that our test set would return files - if exp, got := 4, len(plan[0]); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(plan)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - cp.Release(plan) - - // skip planning if all files are over the limit - over := []tsm1.FileStat{ - { - Path: "01-05.tsm1", - Size: 2049 * 1024 * 1024, - }, - { - Path: "02-05.tsm1", - Size: 2049 * 1024 * 1024, - }, - { - Path: "03-05.tsm1", - Size: 2049 * 1024 * 1024, - }, - { - Path: "04-05.tsm1", - Size: 2049 * 1024 * 1024, - }, - { - Path: "05-05.tsm1", - Size: 2049 * 1024 * 1024, - }, - } - - overFs := &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return over - }, - blockCount: 1000, - } - - cp.FileStore = overFs - plan, pLen = cp.Plan(time.Now().Add(-time.Second)) - if exp, got := 0, len(plan); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(plan)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - cp.Release(plan) - - plan, pLen = cp.PlanOptimize() - // ensure the optimize planner would pick this up - if exp, got := 1, len(plan); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(plan)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - cp.Release(plan) - - cp.FileStore = fs - // ensure that it will plan if last modified has changed - fs.lastModified = time.Now() - - cGroups, pLen := cp.Plan(time.Now()) - if exp, got := 4, len(cGroups[0]); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(cGroups)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } -} - -// Tests that 2 generations, each over 2 GB and the second in level 2 does -// not return just the first generation. This was a case where full planning -// would get repeatedly plan the same files and never stop. -func TestDefaultPlanner_Plan_TwoGenLevel3(t *testing.T) { - data := []tsm1.FileStat{ - { - Path: "000002245-000001666.tsm", - Size: 2049 * 1024 * 1024, - }, - { - Path: "000002245-000001667.tsm", - Size: 2049 * 1024 * 1024, - }, - { - Path: "000002245-000001668.tsm", - Size: 2049 * 1024 * 1024, - }, - { - Path: "000002245-000001669.tsm", - Size: 2049 * 1024 * 1024, - }, - { - Path: "000002245-000001670.tsm", - Size: 2049 * 1024 * 1024, - }, - { - Path: "000002245-000001671.tsm", - Size: 2049 * 1024 * 1024, - }, - { - Path: "000002245-000001672.tsm", - Size: 2049 * 1024 * 1024, - }, - { - Path: "000002245-000001673.tsm", - Size: 192631258, - }, - { - Path: "000002246-000000002.tsm", - Size: 2049 * 1024 * 1024, - }, - { - Path: "000002246-000000003.tsm", - Size: 192631258, - }, - } - - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - blockCount: 1000, - PathsFn: func() []tsm1.FileStat { - return data - }, - }, - time.Hour) - - tsm, pLen := cp.Plan(time.Now().Add(-24 * time.Hour)) - if exp, got := 1, len(tsm); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } -} - -// Ensure that the planner will return files over the max file -// size, but do not contain full blocks -func TestDefaultPlanner_Plan_NotFullOverMaxsize(t *testing.T) { - testSet := []tsm1.FileStat{ - { - Path: "01-05.tsm1", - Size: 256 * 1024 * 1024, - }, - { - Path: "02-05.tsm1", - Size: 256 * 1024 * 1024, - }, - { - Path: "03-05.tsm1", - Size: 256 * 1024 * 1024, - }, - { - Path: "04-04.tsm1", - Size: 256 * 1024 * 1024, - }, - } - - fs := &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return testSet - }, - blockCount: 100, - } - - cp := tsm1.NewDefaultPlanner( - fs, - time.Nanosecond, - ) - - plan, pLen := cp.Plan(time.Now().Add(-time.Second)) - // first verify that our test set would return files - if exp, got := 4, len(plan[0]); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(plan)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - cp.Release(plan) - - // skip planning if all files are over the limit - over := []tsm1.FileStat{ - { - Path: "01-05.tsm1", - Size: 2049 * 1024 * 1024, - }, - { - Path: "02-05.tsm1", - Size: 2049 * 1024 * 1024, - }, - } - - overFs := &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return over - }, - blockCount: 100, - } - - cp.FileStore = overFs - cGroups, pLen := cp.Plan(time.Now().Add(-time.Second)) - if exp, got := 1, len(cGroups); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(cGroups)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } -} - -// Ensure that the planner will compact files that are past the smallest step -// size even if there is a single file in the smaller step size -func TestDefaultPlanner_Plan_CompactsMiddleSteps(t *testing.T) { - data := []tsm1.FileStat{ - { - Path: "01-04.tsm1", - Size: 64 * 1024 * 1024, - }, - { - Path: "02-04.tsm1", - Size: 64 * 1024 * 1024, - }, - { - Path: "03-04.tsm1", - Size: 64 * 1024 * 1024, - }, - { - Path: "04-04.tsm1", - Size: 64 * 1024 * 1024, - }, - { - Path: "05-02.tsm1", - Size: 2 * 1024 * 1024, - }, - } - - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return data - }, - }, tsdb.DefaultCompactFullWriteColdDuration, - ) - - expFiles := []tsm1.FileStat{data[0], data[1], data[2], data[3]} - tsm, pLen := cp.Plan(time.Now()) - if exp, got := len(expFiles), len(tsm[0]); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - - for i, p := range expFiles { - if got, exp := tsm[0][i], p.Path; got != exp { - t.Fatalf("tsm file mismatch: got %v, exp %v", got, exp) - } - } -} - -func TestDefaultPlanner_Plan_LargeGeneration(t *testing.T) { - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return []tsm1.FileStat{ - { - Path: "000000278-000000006.tsm", - Size: 2148340232, - }, - { - Path: "000000278-000000007.tsm", - Size: 2148356556, - }, - { - Path: "000000278-000000008.tsm", - Size: 167780181, - }, - { - Path: "000000278-000047040.tsm", - Size: 2148728539, - }, - { - Path: "000000278-000047041.tsm", - Size: 701863692, - }, - } - }, - }, tsdb.DefaultCompactFullWriteColdDuration, - ) - - tsm, pLen := cp.Plan(time.Now()) - if exp, got := 0, len(tsm); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } -} - -func TestDefaultPlanner_Plan_ForceFull(t *testing.T) { - cp := tsm1.NewDefaultPlanner( - &fakeFileStore{ - PathsFn: func() []tsm1.FileStat { - return []tsm1.FileStat{ - { - Path: "000000001-000000001.tsm", - Size: 2148340232, - }, - { - Path: "000000002-000000001.tsm", - Size: 2148356556, - }, - { - Path: "000000003-000000001.tsm", - Size: 167780181, - }, - { - Path: "000000004-000000001.tsm", - Size: 2148728539, - }, - { - Path: "000000005-000000001.tsm", - Size: 2148340232, - }, - { - Path: "000000006-000000001.tsm", - Size: 2148356556, - }, - { - Path: "000000007-000000001.tsm", - Size: 167780181, - }, - { - Path: "000000008-000000001.tsm", - Size: 2148728539, - }, - { - Path: "000000009-000000002.tsm", - Size: 701863692, - }, - { - Path: "000000010-000000002.tsm", - Size: 701863692, - }, - { - Path: "000000011-000000002.tsm", - Size: 701863692, - }, - { - Path: "000000012-000000002.tsm", - Size: 701863692, - }, - { - Path: "000000013-000000002.tsm", - Size: 701863692, - }, - } - }, - }, tsdb.DefaultCompactFullWriteColdDuration, - ) - - tsm, pLen := cp.PlanLevel(1) - if exp, got := 1, len(tsm); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - cp.Release(tsm) - - tsm, pLen = cp.PlanLevel(2) - if exp, got := 1, len(tsm); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - cp.Release(tsm) - - cp.ForceFull() - - // Level plans should not return any plans - tsm, pLen = cp.PlanLevel(1) - if exp, got := 0, len(tsm); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - cp.Release(tsm) - - tsm, pLen = cp.PlanLevel(2) - if exp, got := 0, len(tsm); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - cp.Release(tsm) - - tsm, pLen = cp.Plan(time.Now()) - if exp, got := 1, len(tsm); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - - if got, exp := len(tsm[0]), 13; got != exp { - t.Fatalf("plan length mismatch: got %v, exp %v", got, exp) - } - cp.Release(tsm) - - // Level plans should return plans now that Plan has been called - tsm, pLen = cp.PlanLevel(1) - if exp, got := 1, len(tsm); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - cp.Release(tsm) - - tsm, pLen = cp.PlanLevel(2) - if exp, got := 1, len(tsm); got != exp { - t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) - } else if pLen != int64(len(tsm)) { - t.Fatalf("tsm file plan length mismatch: got %v, exp %v", pLen, exp) - } - cp.Release(tsm) - -} - -func assertValueEqual(t *testing.T, a, b tsm1.Value) { - if got, exp := a.UnixNano(), b.UnixNano(); got != exp { - t.Fatalf("time mismatch: got %v, exp %v", got, exp) - } - if got, exp := a.Value(), b.Value(); got != exp { - t.Fatalf("value mismatch: got %v, exp %v", got, exp) - } -} - -func MustTSMWriter(tb testing.TB, dir string, gen int) (tsm1.TSMWriter, string) { - f := MustTempFile(tb, dir) - oldName := f.Name() - - // Windows can't rename a file while it's open. Close first, rename and - // then re-open - if err := f.Close(); err != nil { - panic(fmt.Sprintf("close temp file: %v", err)) - } - - newName := filepath.Join(filepath.Dir(oldName), tsm1.DefaultFormatFileName(gen, 1)+".tsm") - if err := os.Rename(oldName, newName); err != nil { - panic(fmt.Sprintf("create tsm file: %v", err)) - } - - var err error - f, err = os.OpenFile(newName, os.O_RDWR, 0666) - if err != nil { - panic(fmt.Sprintf("open tsm files: %v", err)) - } - - w, err := tsm1.NewTSMWriter(f) - if err != nil { - panic(fmt.Sprintf("create TSM writer: %v", err)) - } - - return w, newName -} - -func MustWriteTSM(tb testing.TB, dir string, gen int, values map[string][]tsm1.Value) string { - w, name := MustTSMWriter(tb, dir, gen) - - keys := make([]string, 0, len(values)) - for k := range values { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - if err := w.Write([]byte(k), values[k]); err != nil { - panic(fmt.Sprintf("write TSM value: %v", err)) - } - } - - if err := w.WriteIndex(); err != nil { - panic(fmt.Sprintf("write TSM index: %v", err)) - } - - if err := w.Close(); err != nil { - panic(fmt.Sprintf("write TSM close: %v", err)) - } - - return name -} - -func MustTSMReader(tb testing.TB, dir string, gen int, values map[string][]tsm1.Value) *tsm1.TSMReader { - return MustOpenTSMReader(MustWriteTSM(tb, dir, gen, values)) -} - -func MustOpenTSMReader(name string) *tsm1.TSMReader { - f, err := os.Open(name) - if err != nil { - panic(fmt.Sprintf("open file: %v", err)) - } - - r, err := tsm1.NewTSMReader(f) - if err != nil { - panic(fmt.Sprintf("new reader: %v", err)) - } - return r -} - -type fakeFileStore struct { - PathsFn func() []tsm1.FileStat - lastModified time.Time - blockCount int - readers []*tsm1.TSMReader -} - -func (w *fakeFileStore) Stats() []tsm1.FileStat { - return w.PathsFn() -} - -func (w *fakeFileStore) NextGeneration() int { - return 1 -} - -func (w *fakeFileStore) LastModified() time.Time { - return w.lastModified -} - -func (w *fakeFileStore) BlockCount(path string, idx int) int { - return w.blockCount -} - -func (w *fakeFileStore) TSMReader(path string) *tsm1.TSMReader { - r := MustOpenTSMReader(path) - w.readers = append(w.readers, r) - r.Ref() - return r -} - -func (w *fakeFileStore) Close() { - for _, r := range w.readers { - r.Close() - } - w.readers = nil -} - -func (w *fakeFileStore) ParseFileName(path string) (int, int, error) { - return tsm1.DefaultParseFileName(path) -} diff --git a/tsdb/engine/tsm1/digest.go b/tsdb/engine/tsm1/digest.go deleted file mode 100644 index d66dc9fb343..00000000000 --- a/tsdb/engine/tsm1/digest.go +++ /dev/null @@ -1,258 +0,0 @@ -package tsm1 - -import ( - "bytes" - "fmt" - "io" - "math" - "os" - "path/filepath" - "sort" - "time" -) - -const ( - DigestFilename = "digest.tsd" -) - -type DigestOptions struct { - MinTime, MaxTime int64 - MinKey, MaxKey []byte -} - -// DigestWithOptions writes a digest of dir to w using options to filter by -// time and key range. -func DigestWithOptions(dir string, files []string, opts DigestOptions, w io.WriteCloser) (err error) { - manifest, err := NewDigestManifest(dir, files) - if err != nil { - return err - } - - tsmFiles := make([]TSMFile, 0, len(files)) - defer func() { - for _, r := range tsmFiles { - if e := r.Close(); e != nil && err == nil { - err = e - } - } - }() - - readers := make([]*TSMReader, 0, len(files)) - for _, fi := range files { - f, err := os.Open(fi) - if err != nil { - return err - } - - r, err := NewTSMReader(f) - if err != nil { - return err - } - readers = append(readers, r) - tsmFiles = append(tsmFiles, r) - } - - dw, err := NewDigestWriter(w) - if err != nil { - return err - } - defer func() { - if e := dw.Close(); e != nil && err == nil { - err = e - } - }() - - // Write the manifest. - if err := dw.WriteManifest(manifest); err != nil { - return err - } - - // Write the digest data. - var n int - ki := newMergeKeyIterator(tsmFiles, nil) - for ki.Next() { - key, _ := ki.Read() - if len(opts.MinKey) > 0 && bytes.Compare(key, opts.MinKey) < 0 { - continue - } - - if len(opts.MaxKey) > 0 && bytes.Compare(key, opts.MaxKey) > 0 { - continue - } - - ts := &DigestTimeSpan{} - n++ - kstr := string(key) - - for _, r := range readers { - entries := r.Entries(key) - for _, entry := range entries { - crc, b, err := r.ReadBytes(&entry, nil) - if err != nil { - return err - } - - // Filter blocks that are outside the time filter. If they overlap, we - // still include them. - if entry.MaxTime < opts.MinTime || entry.MinTime > opts.MaxTime { - continue - } - - cnt, err := BlockCount(b) - if err != nil { - return err - } - - ts.Add(entry.MinTime, entry.MaxTime, cnt, crc) - } - } - - sort.Sort(ts) - if err := dw.WriteTimeSpan(kstr, ts); err != nil { - return err - } - } - return nil -} - -// Digest writes a digest of dir to w of a full shard dir. -func Digest(dir string, files []string, w io.WriteCloser) error { - return DigestWithOptions(dir, files, DigestOptions{ - MinTime: math.MinInt64, - MaxTime: math.MaxInt64, - }, w) -} - -// DigestFresh returns true if digest cached in dir is still fresh and returns -// false if it is stale. If the digest is stale, a string description of the -// reason is also returned. files is a list of filenames the caller expects the -// digest to contain, usually from the engine's FileStore. -func DigestFresh(dir string, files []string, shardLastMod time.Time) (bool, string) { - // Open the digest file. - digestPath := filepath.Join(dir, DigestFilename) - f, err := os.Open(digestPath) - if err != nil { - return false, fmt.Sprintf("Can't open digest file: %s", err) - } - defer f.Close() - - // Get digest file info. - digest, err := f.Stat() - if err != nil { - return false, fmt.Sprintf("Can't stat digest file: %s", err) - } - - // See if shard was modified after digest was generated. - if shardLastMod.After(digest.ModTime()) { - return false, fmt.Sprintf("Shard modified: shard_time=%v, digest_time=%v", shardLastMod, digest.ModTime()) - } - - // Read the manifest from the digest file. - dr, err := NewDigestReader(f) - if err != nil { - return false, fmt.Sprintf("Can't read digest: err=%s", err) - } - defer dr.Close() - - mfest, err := dr.ReadManifest() - if err != nil { - return false, fmt.Sprintf("Can't read manifest: err=%s", err) - } - - // Make sure the digest file belongs to this shard. - if mfest.Dir != dir { - return false, fmt.Sprintf("Digest belongs to another shard. Manually copied?: manifest_dir=%s, shard_dir=%s", mfest.Dir, dir) - } - - // See if the number of tsm files matches what's listed in the manifest. - if len(files) != len(mfest.Entries) { - return false, fmt.Sprintf("Number of tsm files differ: engine=%d, manifest=%d", len(files), len(mfest.Entries)) - } - - // See if all the tsm files match the manifest. - sort.Strings(files) - for i, tsmname := range files { - entry := mfest.Entries[i] - - // Check filename. - if tsmname != entry.Filename { - return false, fmt.Sprintf("Names don't match: manifest_entry=%d, engine_name=%s, manifest_name=%s", i, tsmname, entry.Filename) - } - - // Get tsm file info. - tsm, err := os.Stat(tsmname) - if err != nil { - return false, fmt.Sprintf("Can't stat tsm file: manifest_entry=%d, path=%s", i, tsmname) - } - - // See if tsm file size has changed. - if tsm.Size() != entry.Size { - return false, fmt.Sprintf("TSM file size changed: manifest_entry=%d, path=%s, tsm=%d, manifest=%d", i, tsmname, tsm.Size(), entry.Size) - } - - // See if tsm file was modified after the digest was created. This should be - // covered by the engine mod time check above but we'll check each file to - // be sure. It's better to regenerate the digest than use a stale one. - if tsm.ModTime().After(digest.ModTime()) { - return false, fmt.Sprintf("TSM file modified: manifest_entry=%d, path=%s, tsm_time=%v, digest_time=%v", i, tsmname, tsm.ModTime(), digest.ModTime()) - } - } - - // Digest is fresh. - return true, "" -} - -// DigestManifest contains a list of tsm files used to generate a digest -// and information about those files which can be used to verify the -// associated digest file is still valid. -type DigestManifest struct { - // Dir is the directory path this manifest describes. - Dir string `json:"dir"` - // Entries is a list of files used to generate a digest. - Entries DigestManifestEntries `json:"entries"` -} - -// NewDigestManifest creates a digest manifest for a shard directory and list -// of tsm files from that directory. -func NewDigestManifest(dir string, files []string) (*DigestManifest, error) { - mfest := &DigestManifest{ - Dir: dir, - Entries: make([]*DigestManifestEntry, len(files)), - } - - for i, name := range files { - fi, err := os.Stat(name) - if err != nil { - return nil, err - } - mfest.Entries[i] = NewDigestManifestEntry(name, fi.Size()) - } - - sort.Sort(mfest.Entries) - - return mfest, nil -} - -type DigestManifestEntry struct { - // Filename is the name of one .tsm file used in digest generation. - Filename string `json:"filename"` - // Size is the size, in bytes, of the .tsm file. - Size int64 `json:"size"` -} - -// NewDigestManifestEntry creates a digest manifest entry initialized with a -// tsm filename and its size. -func NewDigestManifestEntry(filename string, size int64) *DigestManifestEntry { - return &DigestManifestEntry{ - Filename: filename, - Size: size, - } -} - -// DigestManifestEntries is a list of entries in a manifest file, ordered by -// tsm filename. -type DigestManifestEntries []*DigestManifestEntry - -func (a DigestManifestEntries) Len() int { return len(a) } -func (a DigestManifestEntries) Less(i, j int) bool { return a[i].Filename < a[j].Filename } -func (a DigestManifestEntries) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/tsdb/engine/tsm1/digest_reader.go b/tsdb/engine/tsm1/digest_reader.go deleted file mode 100644 index b4e2b68ac12..00000000000 --- a/tsdb/engine/tsm1/digest_reader.go +++ /dev/null @@ -1,97 +0,0 @@ -package tsm1 - -import ( - "encoding/binary" - "encoding/json" - "errors" - "fmt" - "io" - - "github.com/golang/snappy" -) - -var ( - // ErrDigestManifestAlreadyRead is returned if the client attempts to read - // a manifest from a digest more than once on the same reader. - ErrDigestManifestAlreadyRead = errors.New("digest manifest already read") -) - -type DigestReader struct { - r io.ReadCloser - sr *snappy.Reader - manifestRead bool -} - -func NewDigestReader(r io.ReadCloser) (*DigestReader, error) { - return &DigestReader{r: r, sr: snappy.NewReader(r)}, nil -} - -func (r *DigestReader) ReadManifest() (*DigestManifest, error) { - if r.manifestRead { - return nil, ErrDigestManifestAlreadyRead - } - - var n uint32 - // Read manifest length. - if err := binary.Read(r.sr, binary.BigEndian, &n); err != nil { - return nil, err - } - - lr := io.LimitReader(r.sr, int64(n)) - - m := &DigestManifest{} - if err := json.NewDecoder(lr).Decode(m); err != nil { - return nil, err - } - - r.manifestRead = true - - return m, nil -} - -func (r *DigestReader) ReadTimeSpan() (string, *DigestTimeSpan, error) { - if !r.manifestRead { - if _, err := r.ReadManifest(); err != nil { - return "", nil, err - } - } - - var n uint16 - if err := binary.Read(r.sr, binary.BigEndian, &n); err != nil { - return "", nil, err - } - - b := make([]byte, n) - if _, err := io.ReadFull(r.sr, b); err != nil { - return "", nil, err - } - - var cnt uint32 - if err := binary.Read(r.sr, binary.BigEndian, &cnt); err != nil { - return "", nil, err - } - - ts := &DigestTimeSpan{} - ts.Ranges = make([]DigestTimeRange, cnt) - for i := 0; i < int(cnt); i++ { - var buf [22]byte - - n, err := io.ReadFull(r.sr, buf[:]) - if err != nil { - return "", nil, err - } else if n != len(buf) { - return "", nil, fmt.Errorf("read %d bytes, expected %d, data %v", n, len(buf), buf[:n]) - } - - ts.Ranges[i].Min = int64(binary.BigEndian.Uint64(buf[0:])) - ts.Ranges[i].Max = int64(binary.BigEndian.Uint64(buf[8:])) - ts.Ranges[i].CRC = binary.BigEndian.Uint32(buf[16:]) - ts.Ranges[i].N = int(binary.BigEndian.Uint16(buf[20:])) - } - - return string(b), ts, nil -} - -func (r *DigestReader) Close() error { - return r.r.Close() -} diff --git a/tsdb/engine/tsm1/digest_test.go b/tsdb/engine/tsm1/digest_test.go deleted file mode 100644 index 0e3d1b45d87..00000000000 --- a/tsdb/engine/tsm1/digest_test.go +++ /dev/null @@ -1,475 +0,0 @@ -package tsm1_test - -import ( - "fmt" - "io" - "os" - "path/filepath" - "reflect" - "sort" - "testing" - - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" -) - -func TestDigest_None(t *testing.T) { - dir := t.TempDir() - dataDir := filepath.Join(dir, "data") - if err := os.Mkdir(dataDir, 0755); err != nil { - t.Fatalf("create data dir: %v", err) - } - - df := MustTempFile(t, dir) - - files := []string{} - if err := tsm1.Digest(dir, files, df); err != nil { - t.Fatalf("digest error: %v", err) - } - - df, err := os.Open(df.Name()) - if err != nil { - t.Fatalf("open error: %v", err) - } - - r, err := tsm1.NewDigestReader(df) - if err != nil { - t.Fatalf("NewDigestReader error: %v", err) - } - defer r.Close() - - mfest, err := r.ReadManifest() - if err != nil { - t.Fatal(err) - } - - if len(mfest.Entries) != 0 { - t.Fatalf("exp: 0, got: %d", len(mfest.Entries)) - } - - var count int - for { - _, _, err := r.ReadTimeSpan() - if err == io.EOF { - break - } - - count++ - } - - if got, exp := count, 0; got != exp { - t.Fatalf("count mismatch: got %v, exp %v", got, exp) - } -} - -func TestDigest_One(t *testing.T) { - dir := t.TempDir() - dataDir := filepath.Join(dir, "data") - if err := os.Mkdir(dataDir, 0755); err != nil { - t.Fatalf("create data dir: %v", err) - } - - a1 := tsm1.NewValue(1, 1.1) - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": []tsm1.Value{a1}, - } - MustWriteTSM(t, dir, 1, writes) - - files, err := filepath.Glob(filepath.Join(dir, fmt.Sprintf("*.%s", tsm1.TSMFileExtension))) - if err != nil { - t.Fatal(err) - } - - df := MustTempFile(t, dir) - - if err := tsm1.Digest(dir, files, df); err != nil { - t.Fatalf("digest error: %v", err) - } - - df, err = os.Open(df.Name()) - if err != nil { - t.Fatalf("open error: %v", err) - } - - r, err := tsm1.NewDigestReader(df) - if err != nil { - t.Fatalf("NewDigestReader error: %v", err) - } - defer r.Close() - - mfest, err := r.ReadManifest() - if err != nil { - t.Fatal(err) - } - - if len(mfest.Entries) != 1 { - t.Fatalf("exp: 1, got: %d", len(mfest.Entries)) - } - - var count int - for { - key, _, err := r.ReadTimeSpan() - if err == io.EOF { - break - } - - if got, exp := key, "cpu,host=A#!~#value"; got != exp { - t.Fatalf("key mismatch: got %v, exp %v", got, exp) - } - - count++ - } - - if got, exp := count, 1; got != exp { - t.Fatalf("count mismatch: got %v, exp %v", got, exp) - } -} - -func TestDigest_TimeFilter(t *testing.T) { - dir := t.TempDir() - dataDir := filepath.Join(dir, "data") - if err := os.Mkdir(dataDir, 0755); err != nil { - t.Fatalf("create data dir: %v", err) - } - - a1 := tsm1.NewValue(1, 1.1) - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": []tsm1.Value{a1}, - } - MustWriteTSM(t, dir, 1, writes) - - a2 := tsm1.NewValue(2, 2.1) - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": []tsm1.Value{a2}, - } - MustWriteTSM(t, dir, 2, writes) - - a3 := tsm1.NewValue(3, 3.1) - writes = map[string][]tsm1.Value{ - "cpu,host=A#!~#value": []tsm1.Value{a3}, - } - MustWriteTSM(t, dir, 3, writes) - - files, err := filepath.Glob(filepath.Join(dir, fmt.Sprintf("*.%s", tsm1.TSMFileExtension))) - if err != nil { - t.Fatal(err) - } - - df := MustTempFile(t, dir) - - if err := tsm1.DigestWithOptions(dir, files, tsm1.DigestOptions{MinTime: 2, MaxTime: 2}, df); err != nil { - t.Fatalf("digest error: %v", err) - } - - df, err = os.Open(df.Name()) - if err != nil { - t.Fatalf("open error: %v", err) - } - - r, err := tsm1.NewDigestReader(df) - if err != nil { - t.Fatalf("NewDigestReader error: %v", err) - } - defer r.Close() - - mfest, err := r.ReadManifest() - if err != nil { - t.Fatal(err) - } - - if len(mfest.Entries) != 3 { - t.Fatalf("exp: 3, got: %d", len(mfest.Entries)) - } - - var count int - for { - key, ts, err := r.ReadTimeSpan() - if err == io.EOF { - break - } - - if got, exp := key, "cpu,host=A#!~#value"; got != exp { - t.Fatalf("key mismatch: got %v, exp %v", got, exp) - } - - for _, tr := range ts.Ranges { - if got, exp := tr.Max, int64(2); got != exp { - t.Fatalf("min time not filtered: got %v, exp %v", got, exp) - } - } - - count++ - } - - if got, exp := count, 1; got != exp { - t.Fatalf("count mismatch: got %v, exp %v", got, exp) - } -} - -func TestDigest_KeyFilter(t *testing.T) { - dir := t.TempDir() - dataDir := filepath.Join(dir, "data") - if err := os.Mkdir(dataDir, 0755); err != nil { - t.Fatalf("create data dir: %v", err) - } - - a1 := tsm1.NewValue(1, 1.1) - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": []tsm1.Value{a1}, - } - MustWriteTSM(t, dir, 1, writes) - - a2 := tsm1.NewValue(2, 2.1) - writes = map[string][]tsm1.Value{ - "cpu,host=B#!~#value": []tsm1.Value{a2}, - } - MustWriteTSM(t, dir, 2, writes) - - a3 := tsm1.NewValue(3, 3.1) - writes = map[string][]tsm1.Value{ - "cpu,host=C#!~#value": []tsm1.Value{a3}, - } - MustWriteTSM(t, dir, 3, writes) - - files, err := filepath.Glob(filepath.Join(dir, fmt.Sprintf("*.%s", tsm1.TSMFileExtension))) - if err != nil { - t.Fatal(err) - } - - df := MustTempFile(t, dir) - - if err := tsm1.DigestWithOptions(dir, files, tsm1.DigestOptions{ - MinKey: []byte("cpu,host=B#!~#value"), - MaxKey: []byte("cpu,host=B#!~#value")}, df); err != nil { - t.Fatalf("digest error: %v", err) - } - - df, err = os.Open(df.Name()) - if err != nil { - t.Fatalf("open error: %v", err) - } - - r, err := tsm1.NewDigestReader(df) - if err != nil { - t.Fatalf("NewDigestReader error: %v", err) - } - defer r.Close() - - mfest, err := r.ReadManifest() - if err != nil { - t.Fatal(err) - } - - if len(mfest.Entries) != 3 { - t.Fatalf("exp: 3, got: %d", len(mfest.Entries)) - } - - var count int - for { - key, _, err := r.ReadTimeSpan() - if err == io.EOF { - break - } - - if got, exp := key, "cpu,host=B#!~#value"; got != exp { - t.Fatalf("key mismatch: got %v, exp %v", got, exp) - } - - count++ - } - - if got, exp := count, 1; got != exp { - t.Fatalf("count mismatch: got %v, exp %v", got, exp) - } -} - -func TestDigest_Manifest(t *testing.T) { - // Create temp directory to hold test files. - dir := t.TempDir() - - digestFile := filepath.Join(dir, tsm1.DigestFilename) - - // Create a point to write to the tsm files. - a1 := tsm1.NewValue(1, 1.1) - writes := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": []tsm1.Value{a1}, - } - - // Write a few tsm files. - var files []string - gen := 1 - for ; gen < 4; gen++ { - name := MustWriteTSM(t, dir, gen, writes) - files = append(files, name) - } - - // Generate a manifest. - mfest, err := tsm1.NewDigestManifest(dir, files) - if err != nil { - t.Fatal(err) - } - - // Make sure manifest contains only the expected files. - var got []string - for _, e := range mfest.Entries { - got = append(got, e.Filename) - } - - sort.StringSlice(files).Sort() - sort.StringSlice(got).Sort() - - if !reflect.DeepEqual(files, got) { - t.Fatalf("exp: %v, got: %v", files, got) - } - - // Write a digest of the files. - df := MustCreate(digestFile) - if err := tsm1.Digest(dir, files, df); err != nil { - t.Fatalf("digest error: %v", err) - } - - // Helper func to read manifest from a digest. - readManifest := func(name string) *tsm1.DigestManifest { - t.Helper() - - df, err = os.Open(df.Name()) - if err != nil { - t.Fatal(err) - } - - r, err := tsm1.NewDigestReader(df) - if err != nil { - t.Fatal(err) - } - - mfest, err := r.ReadManifest() - if err != nil { - t.Fatal(err) - } - - if err := r.Close(); err != nil { - t.Fatal(err) - } - - return mfest - } - - // Read the manifest from the digest. - mfest2 := readManifest(df.Name()) - - // Make sure the manifest read from the digest on disk is correct. - if !reflect.DeepEqual(mfest, mfest2) { - t.Fatalf("invalid manifest:\nexp: %v\ngot: %v", mfest, mfest2) - } - - // Write an extra tsm file that shouldn't be included in the manifest. - extra := MustWriteTSM(t, dir, gen, writes) - - // Re-generate manifest. - mfest, err = tsm1.NewDigestManifest(dir, files) - if err != nil { - t.Fatal(err) - } - - // Make sure manifest contains only the expected files. - got = got[:0] - for _, e := range mfest.Entries { - if e.Filename == extra { - t.Fatal("extra file in shard directory should not be in digest manifest") - } - got = append(got, e.Filename) - } - - sort.StringSlice(got).Sort() - - if !reflect.DeepEqual(files, got) { - t.Fatalf("exp: %v, got: %v", files, got) - } - - // Re-generate digest and make sure it does not include the extra tsm file. - df = MustCreate(digestFile) - if err := tsm1.Digest(dir, files, df); err != nil { - t.Fatalf("digest error: %v", err) - } - - // Read the manifest from the new digest. - mfest2 = readManifest(df.Name()) - - // Make sure the manifest read from the digest on disk is correct. - if !reflect.DeepEqual(mfest, mfest2) { - t.Fatalf("invalid manifest:\nexp: %v\ngot: %v", mfest, mfest2) - } - - // Make sure the digest is fresh. - digest, err := os.Stat(df.Name()) - if err != nil { - t.Fatal(err) - } - - fresh, reason := tsm1.DigestFresh(dir, files, digest.ModTime()) - if !fresh { - t.Fatalf("digest is stale: reason=%s", reason) - } - - // Test that digest is stale if shard time is newer than digest time. - fresh, _ = tsm1.DigestFresh(dir, files, digest.ModTime().Add(1)) - if fresh { - t.Fatalf("digest is fresh") - } - - // Test that digest is stale if a new tsm file has been written by the engine. - allfiles := append(files, extra) - fresh, _ = tsm1.DigestFresh(dir, allfiles, digest.ModTime()) - if fresh { - t.Fatalf("digest is fresh") - } - - // Open one of the tsm files and write data to it. - f, err := os.OpenFile(files[0], os.O_WRONLY|os.O_APPEND, 0666) - if err != nil { - t.Fatal(err) - } - - if _, err := f.WriteString("some data"); err != nil { - t.Fatal(err) - } - - if err := f.Close(); err != nil { - t.Fatal(err) - } - - // Test that digest is stale if a tsm file is changed. - fresh, _ = tsm1.DigestFresh(dir, files, digest.ModTime()) - if fresh { - t.Fatalf("digest is fresh") - } - - // Delete a tsm file. - if err := os.Remove(files[0]); err != nil { - t.Fatal(err) - } - - // Test that digest is stale if a tsm file is missing on disk. - fresh, _ = tsm1.DigestFresh(dir, files, digest.ModTime()) - if fresh { - t.Fatalf("digest is fresh") - } - - // Delete the entire shard directory - if err := os.RemoveAll(dir); err != nil { - t.Fatal(err) - } - - // Test that digest is stale if the entire shard directory is missing. - fresh, _ = tsm1.DigestFresh(dir, files, digest.ModTime()) - if fresh { - t.Fatalf("digest is fresh") - } -} - -func MustCreate(path string) *os.File { - f, err := os.Create(path) - if err != nil { - panic(err) - } - return f -} diff --git a/tsdb/engine/tsm1/digest_writer.go b/tsdb/engine/tsm1/digest_writer.go deleted file mode 100644 index 4009d6e2e94..00000000000 --- a/tsdb/engine/tsm1/digest_writer.go +++ /dev/null @@ -1,137 +0,0 @@ -package tsm1 - -import ( - "encoding/binary" - "encoding/json" - "errors" - "io" - - "github.com/golang/snappy" -) - -var ( - // ErrNoDigestManifest is returned if an attempt is made to write other parts of a - // digest before writing the manifest. - ErrNoDigestManifest = errors.New("no digest manifest") - - // ErrDigestAlreadyWritten is returned if the client attempts to write more than - // one manifest. - ErrDigestAlreadyWritten = errors.New("digest manifest already written") -) - -// DigestWriter allows for writing a digest of a shard. A digest is a condensed -// representation of the contents of a shard. It can be scoped to one or more series -// keys, ranges of times or sets of files. -type DigestWriter struct { - w io.WriteCloser - sw *snappy.Writer - manifestWritten bool -} - -func NewDigestWriter(w io.WriteCloser) (*DigestWriter, error) { - return &DigestWriter{w: w, sw: snappy.NewBufferedWriter(w)}, nil -} - -func (w *DigestWriter) WriteManifest(m *DigestManifest) error { - if w.manifestWritten { - return ErrDigestAlreadyWritten - } - - b, err := json.Marshal(m) - if err != nil { - return err - } - - // Write length of manifest. - if err := binary.Write(w.sw, binary.BigEndian, uint32(len(b))); err != nil { - return err - } - - // Write manifest. - if _, err = w.sw.Write(b); err != nil { - return err - } - - w.manifestWritten = true - - return err -} - -func (w *DigestWriter) WriteTimeSpan(key string, t *DigestTimeSpan) error { - if !w.manifestWritten { - return ErrNoDigestManifest - } - - if err := binary.Write(w.sw, binary.BigEndian, uint16(len(key))); err != nil { - return err - } - - if _, err := w.sw.Write([]byte(key)); err != nil { - return err - } - - if err := binary.Write(w.sw, binary.BigEndian, uint32(t.Len())); err != nil { - return err - } - - for _, tr := range t.Ranges { - if err := binary.Write(w.sw, binary.BigEndian, tr.Min); err != nil { - return err - } - - if err := binary.Write(w.sw, binary.BigEndian, tr.Max); err != nil { - return err - } - - if err := binary.Write(w.sw, binary.BigEndian, tr.CRC); err != nil { - return err - } - - if err := binary.Write(w.sw, binary.BigEndian, uint16(tr.N)); err != nil { - return err - } - } - - return nil -} - -func (w *DigestWriter) Flush() error { - return w.sw.Flush() -} - -func (w *DigestWriter) Close() error { - if err := w.Flush(); err != nil { - return err - } - - if err := w.sw.Close(); err != nil { - return err - } - - return w.w.Close() -} - -type DigestTimeSpan struct { - Ranges []DigestTimeRange -} - -func (a DigestTimeSpan) Len() int { return len(a.Ranges) } -func (a DigestTimeSpan) Swap(i, j int) { a.Ranges[i], a.Ranges[j] = a.Ranges[j], a.Ranges[i] } -func (a DigestTimeSpan) Less(i, j int) bool { - return a.Ranges[i].Min < a.Ranges[j].Min -} - -func (t *DigestTimeSpan) Add(min, max int64, n int, crc uint32) { - for _, v := range t.Ranges { - if v.Min == min && v.Max == max && v.N == n && v.CRC == crc { - return - } - } - t.Ranges = append(t.Ranges, DigestTimeRange{Min: min, Max: max, N: n, CRC: crc}) -} - -type DigestTimeRange struct { - Min, Max int64 - N int - CRC uint32 -} diff --git a/tsdb/engine/tsm1/digest_writer_test.go b/tsdb/engine/tsm1/digest_writer_test.go deleted file mode 100644 index e106c6e6346..00000000000 --- a/tsdb/engine/tsm1/digest_writer_test.go +++ /dev/null @@ -1,198 +0,0 @@ -package tsm1_test - -import ( - "io" - "os" - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" -) - -// Test that an error is returned if a manifest isn't the first thing written -// to a digest. -func TestEngine_DigestManifestNotWritten(t *testing.T) { - f := MustTempFile(t, "") - w, err := tsm1.NewDigestWriter(f) - if err != nil { - t.Fatalf("NewDigestWriter: %v", err) - } - defer w.Close() - - ts := &tsm1.DigestTimeSpan{} - ts.Add(1, 2, 3, 4) - - if err := w.WriteTimeSpan("cpu", ts); err != tsm1.ErrNoDigestManifest { - t.Fatalf("exp: tsm1.ErrNoDigestManifest, got: %v", err) - } -} - -// Test that a digest reader will skip over the manifest without error -// if needed. -func TestEngine_DigestReadSkipsManifest(t *testing.T) { - f := MustTempFile(t, "") - w, err := tsm1.NewDigestWriter(f) - if err != nil { - t.Fatalf("NewDigestWriter: %v", err) - } - - // Write an empty manifest. - if err := w.WriteManifest(&tsm1.DigestManifest{}); err != nil { - t.Fatal(err) - } - - // Write a time span. - ts := &tsm1.DigestTimeSpan{} - ts.Add(1, 2, 3, 4) - - if err := w.WriteTimeSpan("cpu", ts); err != nil { - t.Fatal(err) - } - - if err := w.Close(); err != nil { - t.Fatal(err) - } - - // Open the digest and create a reader. - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("Open: %v", err) - } - - r, err := tsm1.NewDigestReader(f) - if err != nil { - t.Fatalf("NewDigestReader: %v", err) - } - - // Test that we can read the timespan without first reading the manifest. - key, ts, err := r.ReadTimeSpan() - if err != nil { - t.Fatal(err) - } else if key != "cpu" { - t.Fatalf("exp: cpu, got: %s", key) - } else if len(ts.Ranges) != 1 { - t.Fatalf("exp: 1, got: %d", len(ts.Ranges)) - } else if ts.Ranges[0].Min != 1 { - t.Fatalf("exp: 1, got: %d", ts.Ranges[0].Min) - } else if ts.Ranges[0].Max != 2 { - t.Fatalf("exp: 1, got: %d", ts.Ranges[0].Min) - } else if ts.Ranges[0].N != 3 { - t.Fatalf("exp: 1, got: %d", ts.Ranges[0].N) - } else if ts.Ranges[0].CRC != 4 { - t.Fatalf("exp: 1, got: %d", ts.Ranges[0].CRC) - } -} - -// Test that we get an error if a digest manifest is written twice. -func TestEngine_DigestManifestDoubleWrite(t *testing.T) { - f := MustTempFile(t, "") - w, err := tsm1.NewDigestWriter(f) - if err != nil { - t.Fatalf("NewDigestWriter: %v", err) - } - defer w.Close() - - if err := w.WriteManifest(&tsm1.DigestManifest{}); err != nil { - t.Fatal(err) - } - - if err := w.WriteManifest(&tsm1.DigestManifest{}); err != tsm1.ErrDigestAlreadyWritten { - t.Fatalf("exp: %s, got: %s", tsm1.ErrDigestAlreadyWritten, err) - } -} - -// Test that we get an error if the manifest is read twice. -func TestEngine_DigestManifestDoubleRead(t *testing.T) { - f := MustTempFile(t, "") - w, err := tsm1.NewDigestWriter(f) - if err != nil { - t.Fatalf("NewDigestWriter: %v", err) - } - - // Write the manifest. - if err := w.WriteManifest(&tsm1.DigestManifest{Dir: "test"}); err != nil { - t.Fatal(err) - } - if err := w.Close(); err != nil { - t.Fatal(err) - } - - // Open the digest and create a reader. - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("Open: %v", err) - } - - r, err := tsm1.NewDigestReader(f) - if err != nil { - t.Fatalf("NewDigestReader: %v", err) - } - - // Read the manifest. - if m, err := r.ReadManifest(); err != nil { - t.Fatal(err) - } else if m.Dir != "test" { - t.Fatalf("exp: test, got: %s", m.Dir) - } - - // Attempt to read the manifest a second time (should fail). - if _, err := r.ReadManifest(); err != tsm1.ErrDigestManifestAlreadyRead { - t.Fatalf("exp: digest manifest already read, got: %v", err) - } -} - -// Test writing and reading a digest. -func TestEngine_DigestWriterReader(t *testing.T) { - f := MustTempFile(t, "") - w, err := tsm1.NewDigestWriter(f) - if err != nil { - t.Fatalf("NewDigestWriter: %v", err) - } - - if err := w.WriteManifest(&tsm1.DigestManifest{}); err != nil { - t.Fatal(err) - } - - ts := &tsm1.DigestTimeSpan{} - ts.Add(1, 2, 3, 4) - - if err := w.WriteTimeSpan("cpu", ts); err != nil { - t.Fatalf("WriteTimeSpan: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("Close: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("Open: %v", err) - } - - r, err := tsm1.NewDigestReader(f) - if err != nil { - t.Fatalf("NewDigestReader: %v", err) - } - for { - - key, ts, err := r.ReadTimeSpan() - if err == io.EOF { - break - } else if err != nil { - t.Fatalf("ReadTimeSpan: %v", err) - } - - if exp, got := "cpu", key; exp != got { - t.Fatalf("key mismatch: exp %v, got %v", exp, got) - } - - if exp, got := 1, len(ts.Ranges); exp != got { - t.Fatalf("range len mismatch: exp %v, got %v", exp, got) - } - - exp := tsm1.DigestTimeRange{Min: 1, Max: 2, N: 3, CRC: 4} - if got := ts.Ranges[0]; !reflect.DeepEqual(exp, got) { - t.Fatalf("time range mismatch: exp %v, got %v", exp, got) - } - } -} diff --git a/tsdb/engine/tsm1/encoding.gen.go b/tsdb/engine/tsm1/encoding.gen.go deleted file mode 100644 index cf9001b2903..00000000000 --- a/tsdb/engine/tsm1/encoding.gen.go +++ /dev/null @@ -1,1697 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: encoding.gen.go.tmpl - -//lint:file-ignore U1000 generated code -package tsm1 - -import ( - "fmt" - "sort" - - "github.com/influxdata/influxdb/v2/tsdb" -) - -// Values represents a slice of values. -type Values []Value - -func (a Values) MinTime() int64 { - return a[0].UnixNano() -} - -func (a Values) MaxTime() int64 { - return a[len(a)-1].UnixNano() -} - -func (a Values) Size() int { - sz := 0 - for _, v := range a { - sz += v.Size() - } - return sz -} - -func (a Values) ordered() bool { - if len(a) <= 1 { - return true - } - for i := 1; i < len(a); i++ { - if av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab { - return false - } - } - return true -} - -func (a Values) assertOrdered() { - if len(a) <= 1 { - return - } - for i := 1; i < len(a); i++ { - if av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab { - panic(fmt.Sprintf("not ordered: %d %d >= %d", i, av, ab)) - } - } -} - -// Deduplicate returns a new slice with any values that have the same timestamp removed. -// The Value that appears last in the slice is the one that is kept. The returned -// Values are sorted if necessary. -func (a Values) Deduplicate() Values { - if len(a) <= 1 { - return a - } - - // See if we're already sorted and deduped - var needSort bool - for i := 1; i < len(a); i++ { - if a[i-1].UnixNano() >= a[i].UnixNano() { - needSort = true - break - } - } - - if !needSort { - return a - } - - sort.Stable(a) - var i int - for j := 1; j < len(a); j++ { - v := a[j] - if v.UnixNano() != a[i].UnixNano() { - i++ - } - a[i] = v - - } - return a[:i+1] -} - -// Exclude returns the subset of values not in [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a Values) Exclude(min, max int64) Values { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return a - } - - // a[rmin].UnixNano() ≥ min - // a[rmax].UnixNano() ≥ max - - if rmax < len(a) { - if a[rmax].UnixNano() == max { - rmax++ - } - rest := len(a) - rmax - if rest > 0 { - b := a[:rmin+rest] - copy(b[rmin:], a[rmax:]) - return b - } - } - - return a[:rmin] -} - -// Include returns the subset values between min and max inclusive. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a Values) Include(min, max int64) Values { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return nil - } - - // a[rmin].UnixNano() ≥ min - // a[rmax].UnixNano() ≥ max - - if rmax < len(a) && a[rmax].UnixNano() == max { - rmax++ - } - - if rmin > -1 { - b := a[:rmax-rmin] - copy(b, a[rmin:rmax]) - return b - } - - return a[:rmax] -} - -// search performs a binary search for UnixNano() v in a -// and returns the position, i, where v would be inserted. -// An additional check of a[i].UnixNano() == v is necessary -// to determine if the value v exists. -func (a Values) search(v int64) int { - // Define: f(x) → a[x].UnixNano() < v - // Define: f(-1) == true, f(n) == false - // Invariant: f(lo-1) == true, f(hi) == false - lo := 0 - hi := len(a) - for lo < hi { - mid := int(uint(lo+hi) >> 1) - if a[mid].UnixNano() < v { - lo = mid + 1 // preserves f(lo-1) == true - } else { - hi = mid // preserves f(hi) == false - } - } - - // lo == hi - return lo -} - -// FindRange returns the positions where min and max would be -// inserted into the array. If a[0].UnixNano() > max or -// a[len-1].UnixNano() < min then FindRange returns (-1, -1) -// indicating the array is outside the [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results -// are undefined. -func (a Values) FindRange(min, max int64) (int, int) { - if len(a) == 0 || min > max { - return -1, -1 - } - - minVal := a[0].UnixNano() - maxVal := a[len(a)-1].UnixNano() - - if maxVal < min || minVal > max { - return -1, -1 - } - - return a.search(min), a.search(max) -} - -// Merge overlays b to top of a. If two values conflict with -// the same timestamp, b is used. Both a and b must be sorted -// in ascending order. -func (a Values) Merge(b Values) Values { - if len(a) == 0 { - return b - } - - if len(b) == 0 { - return a - } - - // Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's - // possible stored blocks might contain duplicate values. Remove them if they exists before - // merging. - a = a.Deduplicate() - b = b.Deduplicate() - - if a[len(a)-1].UnixNano() < b[0].UnixNano() { - return append(a, b...) - } - - if b[len(b)-1].UnixNano() < a[0].UnixNano() { - return append(b, a...) - } - - out := make(Values, 0, len(a)+len(b)) - for len(a) > 0 && len(b) > 0 { - if a[0].UnixNano() < b[0].UnixNano() { - out, a = append(out, a[0]), a[1:] - } else if len(b) > 0 && a[0].UnixNano() == b[0].UnixNano() { - a = a[1:] - } else { - out, b = append(out, b[0]), b[1:] - } - } - if len(a) > 0 { - return append(out, a...) - } - return append(out, b...) -} - -// Sort methods -func (a Values) Len() int { return len(a) } -func (a Values) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a Values) Less(i, j int) bool { return a[i].UnixNano() < a[j].UnixNano() } - -// FloatValues represents a slice of Float values. -type FloatValues []FloatValue - -func NewFloatArrayFromValues(v FloatValues) *tsdb.FloatArray { - a := tsdb.NewFloatArrayLen(len(v)) - for i, val := range v { - a.Timestamps[i] = val.unixnano - a.Values[i] = val.value - } - return a -} - -func (a FloatValues) MinTime() int64 { - return a[0].UnixNano() -} - -func (a FloatValues) MaxTime() int64 { - return a[len(a)-1].UnixNano() -} - -func (a FloatValues) Size() int { - sz := 0 - for _, v := range a { - sz += v.Size() - } - return sz -} - -func (a FloatValues) ordered() bool { - if len(a) <= 1 { - return true - } - for i := 1; i < len(a); i++ { - if av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab { - return false - } - } - return true -} - -func (a FloatValues) assertOrdered() { - if len(a) <= 1 { - return - } - for i := 1; i < len(a); i++ { - if av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab { - panic(fmt.Sprintf("not ordered: %d %d >= %d", i, av, ab)) - } - } -} - -// Deduplicate returns a new slice with any values that have the same timestamp removed. -// The Value that appears last in the slice is the one that is kept. The returned -// Values are sorted if necessary. -func (a FloatValues) Deduplicate() FloatValues { - if len(a) <= 1 { - return a - } - - // See if we're already sorted and deduped - var needSort bool - for i := 1; i < len(a); i++ { - if a[i-1].UnixNano() >= a[i].UnixNano() { - needSort = true - break - } - } - - if !needSort { - return a - } - - sort.Stable(a) - var i int - for j := 1; j < len(a); j++ { - v := a[j] - if v.UnixNano() != a[i].UnixNano() { - i++ - } - a[i] = v - - } - return a[:i+1] -} - -// Exclude returns the subset of values not in [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a FloatValues) Exclude(min, max int64) FloatValues { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return a - } - - // a[rmin].UnixNano() ≥ min - // a[rmax].UnixNano() ≥ max - - if rmax < len(a) { - if a[rmax].UnixNano() == max { - rmax++ - } - rest := len(a) - rmax - if rest > 0 { - b := a[:rmin+rest] - copy(b[rmin:], a[rmax:]) - return b - } - } - - return a[:rmin] -} - -// Include returns the subset values between min and max inclusive. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a FloatValues) Include(min, max int64) FloatValues { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return nil - } - - // a[rmin].UnixNano() ≥ min - // a[rmax].UnixNano() ≥ max - - if rmax < len(a) && a[rmax].UnixNano() == max { - rmax++ - } - - if rmin > -1 { - b := a[:rmax-rmin] - copy(b, a[rmin:rmax]) - return b - } - - return a[:rmax] -} - -// search performs a binary search for UnixNano() v in a -// and returns the position, i, where v would be inserted. -// An additional check of a[i].UnixNano() == v is necessary -// to determine if the value v exists. -func (a FloatValues) search(v int64) int { - // Define: f(x) → a[x].UnixNano() < v - // Define: f(-1) == true, f(n) == false - // Invariant: f(lo-1) == true, f(hi) == false - lo := 0 - hi := len(a) - for lo < hi { - mid := int(uint(lo+hi) >> 1) - if a[mid].UnixNano() < v { - lo = mid + 1 // preserves f(lo-1) == true - } else { - hi = mid // preserves f(hi) == false - } - } - - // lo == hi - return lo -} - -// FindRange returns the positions where min and max would be -// inserted into the array. If a[0].UnixNano() > max or -// a[len-1].UnixNano() < min then FindRange returns (-1, -1) -// indicating the array is outside the [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results -// are undefined. -func (a FloatValues) FindRange(min, max int64) (int, int) { - if len(a) == 0 || min > max { - return -1, -1 - } - - minVal := a[0].UnixNano() - maxVal := a[len(a)-1].UnixNano() - - if maxVal < min || minVal > max { - return -1, -1 - } - - return a.search(min), a.search(max) -} - -// Merge overlays b to top of a. If two values conflict with -// the same timestamp, b is used. Both a and b must be sorted -// in ascending order. -func (a FloatValues) Merge(b FloatValues) FloatValues { - if len(a) == 0 { - return b - } - - if len(b) == 0 { - return a - } - - // Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's - // possible stored blocks might contain duplicate values. Remove them if they exists before - // merging. - a = a.Deduplicate() - b = b.Deduplicate() - - if a[len(a)-1].UnixNano() < b[0].UnixNano() { - return append(a, b...) - } - - if b[len(b)-1].UnixNano() < a[0].UnixNano() { - return append(b, a...) - } - - out := make(FloatValues, 0, len(a)+len(b)) - for len(a) > 0 && len(b) > 0 { - if a[0].UnixNano() < b[0].UnixNano() { - out, a = append(out, a[0]), a[1:] - } else if len(b) > 0 && a[0].UnixNano() == b[0].UnixNano() { - a = a[1:] - } else { - out, b = append(out, b[0]), b[1:] - } - } - if len(a) > 0 { - return append(out, a...) - } - return append(out, b...) -} - -func (a FloatValues) Encode(buf []byte) ([]byte, error) { - return encodeFloatValuesBlock(buf, a) -} - -func EncodeFloatArrayBlock(a *tsdb.FloatArray, b []byte) ([]byte, error) { - if a.Len() == 0 { - return nil, nil - } - - // TODO(edd): These need to be pooled. - var vb []byte - var tb []byte - var err error - - if vb, err = FloatArrayEncodeAll(a.Values, vb); err != nil { - return nil, err - } - - if tb, err = TimeArrayEncodeAll(a.Timestamps, tb); err != nil { - return nil, err - } - - // Prepend the first timestamp of the block in the first 8 bytes and the block - // in the next byte, followed by the block - return packBlock(b, BlockFloat64, tb, vb), nil -} - -func encodeFloatValuesBlock(buf []byte, values []FloatValue) ([]byte, error) { - if len(values) == 0 { - return nil, nil - } - - venc := getFloatEncoder(len(values)) - tsenc := getTimeEncoder(len(values)) - - var b []byte - err := func() error { - for _, v := range values { - tsenc.Write(v.unixnano) - venc.Write(v.value) - } - venc.Flush() - - // Encoded timestamp values - tb, err := tsenc.Bytes() - if err != nil { - return err - } - // Encoded values - vb, err := venc.Bytes() - if err != nil { - return err - } - - // Prepend the first timestamp of the block in the first 8 bytes and the block - // in the next byte, followed by the block - b = packBlock(buf, BlockFloat64, tb, vb) - - return nil - }() - - putTimeEncoder(tsenc) - putFloatEncoder(venc) - - return b, err -} - -// Sort methods -func (a FloatValues) Len() int { return len(a) } -func (a FloatValues) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a FloatValues) Less(i, j int) bool { return a[i].UnixNano() < a[j].UnixNano() } - -// IntegerValues represents a slice of Integer values. -type IntegerValues []IntegerValue - -func NewIntegerArrayFromValues(v IntegerValues) *tsdb.IntegerArray { - a := tsdb.NewIntegerArrayLen(len(v)) - for i, val := range v { - a.Timestamps[i] = val.unixnano - a.Values[i] = val.value - } - return a -} - -func (a IntegerValues) MinTime() int64 { - return a[0].UnixNano() -} - -func (a IntegerValues) MaxTime() int64 { - return a[len(a)-1].UnixNano() -} - -func (a IntegerValues) Size() int { - sz := 0 - for _, v := range a { - sz += v.Size() - } - return sz -} - -func (a IntegerValues) ordered() bool { - if len(a) <= 1 { - return true - } - for i := 1; i < len(a); i++ { - if av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab { - return false - } - } - return true -} - -func (a IntegerValues) assertOrdered() { - if len(a) <= 1 { - return - } - for i := 1; i < len(a); i++ { - if av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab { - panic(fmt.Sprintf("not ordered: %d %d >= %d", i, av, ab)) - } - } -} - -// Deduplicate returns a new slice with any values that have the same timestamp removed. -// The Value that appears last in the slice is the one that is kept. The returned -// Values are sorted if necessary. -func (a IntegerValues) Deduplicate() IntegerValues { - if len(a) <= 1 { - return a - } - - // See if we're already sorted and deduped - var needSort bool - for i := 1; i < len(a); i++ { - if a[i-1].UnixNano() >= a[i].UnixNano() { - needSort = true - break - } - } - - if !needSort { - return a - } - - sort.Stable(a) - var i int - for j := 1; j < len(a); j++ { - v := a[j] - if v.UnixNano() != a[i].UnixNano() { - i++ - } - a[i] = v - - } - return a[:i+1] -} - -// Exclude returns the subset of values not in [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a IntegerValues) Exclude(min, max int64) IntegerValues { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return a - } - - // a[rmin].UnixNano() ≥ min - // a[rmax].UnixNano() ≥ max - - if rmax < len(a) { - if a[rmax].UnixNano() == max { - rmax++ - } - rest := len(a) - rmax - if rest > 0 { - b := a[:rmin+rest] - copy(b[rmin:], a[rmax:]) - return b - } - } - - return a[:rmin] -} - -// Include returns the subset values between min and max inclusive. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a IntegerValues) Include(min, max int64) IntegerValues { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return nil - } - - // a[rmin].UnixNano() ≥ min - // a[rmax].UnixNano() ≥ max - - if rmax < len(a) && a[rmax].UnixNano() == max { - rmax++ - } - - if rmin > -1 { - b := a[:rmax-rmin] - copy(b, a[rmin:rmax]) - return b - } - - return a[:rmax] -} - -// search performs a binary search for UnixNano() v in a -// and returns the position, i, where v would be inserted. -// An additional check of a[i].UnixNano() == v is necessary -// to determine if the value v exists. -func (a IntegerValues) search(v int64) int { - // Define: f(x) → a[x].UnixNano() < v - // Define: f(-1) == true, f(n) == false - // Invariant: f(lo-1) == true, f(hi) == false - lo := 0 - hi := len(a) - for lo < hi { - mid := int(uint(lo+hi) >> 1) - if a[mid].UnixNano() < v { - lo = mid + 1 // preserves f(lo-1) == true - } else { - hi = mid // preserves f(hi) == false - } - } - - // lo == hi - return lo -} - -// FindRange returns the positions where min and max would be -// inserted into the array. If a[0].UnixNano() > max or -// a[len-1].UnixNano() < min then FindRange returns (-1, -1) -// indicating the array is outside the [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results -// are undefined. -func (a IntegerValues) FindRange(min, max int64) (int, int) { - if len(a) == 0 || min > max { - return -1, -1 - } - - minVal := a[0].UnixNano() - maxVal := a[len(a)-1].UnixNano() - - if maxVal < min || minVal > max { - return -1, -1 - } - - return a.search(min), a.search(max) -} - -// Merge overlays b to top of a. If two values conflict with -// the same timestamp, b is used. Both a and b must be sorted -// in ascending order. -func (a IntegerValues) Merge(b IntegerValues) IntegerValues { - if len(a) == 0 { - return b - } - - if len(b) == 0 { - return a - } - - // Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's - // possible stored blocks might contain duplicate values. Remove them if they exists before - // merging. - a = a.Deduplicate() - b = b.Deduplicate() - - if a[len(a)-1].UnixNano() < b[0].UnixNano() { - return append(a, b...) - } - - if b[len(b)-1].UnixNano() < a[0].UnixNano() { - return append(b, a...) - } - - out := make(IntegerValues, 0, len(a)+len(b)) - for len(a) > 0 && len(b) > 0 { - if a[0].UnixNano() < b[0].UnixNano() { - out, a = append(out, a[0]), a[1:] - } else if len(b) > 0 && a[0].UnixNano() == b[0].UnixNano() { - a = a[1:] - } else { - out, b = append(out, b[0]), b[1:] - } - } - if len(a) > 0 { - return append(out, a...) - } - return append(out, b...) -} - -func (a IntegerValues) Encode(buf []byte) ([]byte, error) { - return encodeIntegerValuesBlock(buf, a) -} - -func EncodeIntegerArrayBlock(a *tsdb.IntegerArray, b []byte) ([]byte, error) { - if a.Len() == 0 { - return nil, nil - } - - // TODO(edd): These need to be pooled. - var vb []byte - var tb []byte - var err error - - if vb, err = IntegerArrayEncodeAll(a.Values, vb); err != nil { - return nil, err - } - - if tb, err = TimeArrayEncodeAll(a.Timestamps, tb); err != nil { - return nil, err - } - - // Prepend the first timestamp of the block in the first 8 bytes and the block - // in the next byte, followed by the block - return packBlock(b, BlockInteger, tb, vb), nil -} - -func encodeIntegerValuesBlock(buf []byte, values []IntegerValue) ([]byte, error) { - if len(values) == 0 { - return nil, nil - } - - venc := getIntegerEncoder(len(values)) - tsenc := getTimeEncoder(len(values)) - - var b []byte - err := func() error { - for _, v := range values { - tsenc.Write(v.unixnano) - venc.Write(v.value) - } - venc.Flush() - - // Encoded timestamp values - tb, err := tsenc.Bytes() - if err != nil { - return err - } - // Encoded values - vb, err := venc.Bytes() - if err != nil { - return err - } - - // Prepend the first timestamp of the block in the first 8 bytes and the block - // in the next byte, followed by the block - b = packBlock(buf, BlockInteger, tb, vb) - - return nil - }() - - putTimeEncoder(tsenc) - putIntegerEncoder(venc) - - return b, err -} - -// Sort methods -func (a IntegerValues) Len() int { return len(a) } -func (a IntegerValues) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a IntegerValues) Less(i, j int) bool { return a[i].UnixNano() < a[j].UnixNano() } - -// UnsignedValues represents a slice of Unsigned values. -type UnsignedValues []UnsignedValue - -func NewUnsignedArrayFromValues(v UnsignedValues) *tsdb.UnsignedArray { - a := tsdb.NewUnsignedArrayLen(len(v)) - for i, val := range v { - a.Timestamps[i] = val.unixnano - a.Values[i] = val.value - } - return a -} - -func (a UnsignedValues) MinTime() int64 { - return a[0].UnixNano() -} - -func (a UnsignedValues) MaxTime() int64 { - return a[len(a)-1].UnixNano() -} - -func (a UnsignedValues) Size() int { - sz := 0 - for _, v := range a { - sz += v.Size() - } - return sz -} - -func (a UnsignedValues) ordered() bool { - if len(a) <= 1 { - return true - } - for i := 1; i < len(a); i++ { - if av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab { - return false - } - } - return true -} - -func (a UnsignedValues) assertOrdered() { - if len(a) <= 1 { - return - } - for i := 1; i < len(a); i++ { - if av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab { - panic(fmt.Sprintf("not ordered: %d %d >= %d", i, av, ab)) - } - } -} - -// Deduplicate returns a new slice with any values that have the same timestamp removed. -// The Value that appears last in the slice is the one that is kept. The returned -// Values are sorted if necessary. -func (a UnsignedValues) Deduplicate() UnsignedValues { - if len(a) <= 1 { - return a - } - - // See if we're already sorted and deduped - var needSort bool - for i := 1; i < len(a); i++ { - if a[i-1].UnixNano() >= a[i].UnixNano() { - needSort = true - break - } - } - - if !needSort { - return a - } - - sort.Stable(a) - var i int - for j := 1; j < len(a); j++ { - v := a[j] - if v.UnixNano() != a[i].UnixNano() { - i++ - } - a[i] = v - - } - return a[:i+1] -} - -// Exclude returns the subset of values not in [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a UnsignedValues) Exclude(min, max int64) UnsignedValues { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return a - } - - // a[rmin].UnixNano() ≥ min - // a[rmax].UnixNano() ≥ max - - if rmax < len(a) { - if a[rmax].UnixNano() == max { - rmax++ - } - rest := len(a) - rmax - if rest > 0 { - b := a[:rmin+rest] - copy(b[rmin:], a[rmax:]) - return b - } - } - - return a[:rmin] -} - -// Include returns the subset values between min and max inclusive. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a UnsignedValues) Include(min, max int64) UnsignedValues { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return nil - } - - // a[rmin].UnixNano() ≥ min - // a[rmax].UnixNano() ≥ max - - if rmax < len(a) && a[rmax].UnixNano() == max { - rmax++ - } - - if rmin > -1 { - b := a[:rmax-rmin] - copy(b, a[rmin:rmax]) - return b - } - - return a[:rmax] -} - -// search performs a binary search for UnixNano() v in a -// and returns the position, i, where v would be inserted. -// An additional check of a[i].UnixNano() == v is necessary -// to determine if the value v exists. -func (a UnsignedValues) search(v int64) int { - // Define: f(x) → a[x].UnixNano() < v - // Define: f(-1) == true, f(n) == false - // Invariant: f(lo-1) == true, f(hi) == false - lo := 0 - hi := len(a) - for lo < hi { - mid := int(uint(lo+hi) >> 1) - if a[mid].UnixNano() < v { - lo = mid + 1 // preserves f(lo-1) == true - } else { - hi = mid // preserves f(hi) == false - } - } - - // lo == hi - return lo -} - -// FindRange returns the positions where min and max would be -// inserted into the array. If a[0].UnixNano() > max or -// a[len-1].UnixNano() < min then FindRange returns (-1, -1) -// indicating the array is outside the [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results -// are undefined. -func (a UnsignedValues) FindRange(min, max int64) (int, int) { - if len(a) == 0 || min > max { - return -1, -1 - } - - minVal := a[0].UnixNano() - maxVal := a[len(a)-1].UnixNano() - - if maxVal < min || minVal > max { - return -1, -1 - } - - return a.search(min), a.search(max) -} - -// Merge overlays b to top of a. If two values conflict with -// the same timestamp, b is used. Both a and b must be sorted -// in ascending order. -func (a UnsignedValues) Merge(b UnsignedValues) UnsignedValues { - if len(a) == 0 { - return b - } - - if len(b) == 0 { - return a - } - - // Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's - // possible stored blocks might contain duplicate values. Remove them if they exists before - // merging. - a = a.Deduplicate() - b = b.Deduplicate() - - if a[len(a)-1].UnixNano() < b[0].UnixNano() { - return append(a, b...) - } - - if b[len(b)-1].UnixNano() < a[0].UnixNano() { - return append(b, a...) - } - - out := make(UnsignedValues, 0, len(a)+len(b)) - for len(a) > 0 && len(b) > 0 { - if a[0].UnixNano() < b[0].UnixNano() { - out, a = append(out, a[0]), a[1:] - } else if len(b) > 0 && a[0].UnixNano() == b[0].UnixNano() { - a = a[1:] - } else { - out, b = append(out, b[0]), b[1:] - } - } - if len(a) > 0 { - return append(out, a...) - } - return append(out, b...) -} - -func (a UnsignedValues) Encode(buf []byte) ([]byte, error) { - return encodeUnsignedValuesBlock(buf, a) -} - -func EncodeUnsignedArrayBlock(a *tsdb.UnsignedArray, b []byte) ([]byte, error) { - if a.Len() == 0 { - return nil, nil - } - - // TODO(edd): These need to be pooled. - var vb []byte - var tb []byte - var err error - - if vb, err = UnsignedArrayEncodeAll(a.Values, vb); err != nil { - return nil, err - } - - if tb, err = TimeArrayEncodeAll(a.Timestamps, tb); err != nil { - return nil, err - } - - // Prepend the first timestamp of the block in the first 8 bytes and the block - // in the next byte, followed by the block - return packBlock(b, BlockUnsigned, tb, vb), nil -} - -func encodeUnsignedValuesBlock(buf []byte, values []UnsignedValue) ([]byte, error) { - if len(values) == 0 { - return nil, nil - } - - venc := getUnsignedEncoder(len(values)) - tsenc := getTimeEncoder(len(values)) - - var b []byte - err := func() error { - for _, v := range values { - tsenc.Write(v.unixnano) - venc.Write(int64(v.value)) - } - venc.Flush() - - // Encoded timestamp values - tb, err := tsenc.Bytes() - if err != nil { - return err - } - // Encoded values - vb, err := venc.Bytes() - if err != nil { - return err - } - - // Prepend the first timestamp of the block in the first 8 bytes and the block - // in the next byte, followed by the block - b = packBlock(buf, BlockUnsigned, tb, vb) - - return nil - }() - - putTimeEncoder(tsenc) - putUnsignedEncoder(venc) - - return b, err -} - -// Sort methods -func (a UnsignedValues) Len() int { return len(a) } -func (a UnsignedValues) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a UnsignedValues) Less(i, j int) bool { return a[i].UnixNano() < a[j].UnixNano() } - -// StringValues represents a slice of String values. -type StringValues []StringValue - -func NewStringArrayFromValues(v StringValues) *tsdb.StringArray { - a := tsdb.NewStringArrayLen(len(v)) - for i, val := range v { - a.Timestamps[i] = val.unixnano - a.Values[i] = val.value - } - return a -} - -func (a StringValues) MinTime() int64 { - return a[0].UnixNano() -} - -func (a StringValues) MaxTime() int64 { - return a[len(a)-1].UnixNano() -} - -func (a StringValues) Size() int { - sz := 0 - for _, v := range a { - sz += v.Size() - } - return sz -} - -func (a StringValues) ordered() bool { - if len(a) <= 1 { - return true - } - for i := 1; i < len(a); i++ { - if av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab { - return false - } - } - return true -} - -func (a StringValues) assertOrdered() { - if len(a) <= 1 { - return - } - for i := 1; i < len(a); i++ { - if av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab { - panic(fmt.Sprintf("not ordered: %d %d >= %d", i, av, ab)) - } - } -} - -// Deduplicate returns a new slice with any values that have the same timestamp removed. -// The Value that appears last in the slice is the one that is kept. The returned -// Values are sorted if necessary. -func (a StringValues) Deduplicate() StringValues { - if len(a) <= 1 { - return a - } - - // See if we're already sorted and deduped - var needSort bool - for i := 1; i < len(a); i++ { - if a[i-1].UnixNano() >= a[i].UnixNano() { - needSort = true - break - } - } - - if !needSort { - return a - } - - sort.Stable(a) - var i int - for j := 1; j < len(a); j++ { - v := a[j] - if v.UnixNano() != a[i].UnixNano() { - i++ - } - a[i] = v - - } - return a[:i+1] -} - -// Exclude returns the subset of values not in [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a StringValues) Exclude(min, max int64) StringValues { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return a - } - - // a[rmin].UnixNano() ≥ min - // a[rmax].UnixNano() ≥ max - - if rmax < len(a) { - if a[rmax].UnixNano() == max { - rmax++ - } - rest := len(a) - rmax - if rest > 0 { - b := a[:rmin+rest] - copy(b[rmin:], a[rmax:]) - return b - } - } - - return a[:rmin] -} - -// Include returns the subset values between min and max inclusive. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a StringValues) Include(min, max int64) StringValues { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return nil - } - - // a[rmin].UnixNano() ≥ min - // a[rmax].UnixNano() ≥ max - - if rmax < len(a) && a[rmax].UnixNano() == max { - rmax++ - } - - if rmin > -1 { - b := a[:rmax-rmin] - copy(b, a[rmin:rmax]) - return b - } - - return a[:rmax] -} - -// search performs a binary search for UnixNano() v in a -// and returns the position, i, where v would be inserted. -// An additional check of a[i].UnixNano() == v is necessary -// to determine if the value v exists. -func (a StringValues) search(v int64) int { - // Define: f(x) → a[x].UnixNano() < v - // Define: f(-1) == true, f(n) == false - // Invariant: f(lo-1) == true, f(hi) == false - lo := 0 - hi := len(a) - for lo < hi { - mid := int(uint(lo+hi) >> 1) - if a[mid].UnixNano() < v { - lo = mid + 1 // preserves f(lo-1) == true - } else { - hi = mid // preserves f(hi) == false - } - } - - // lo == hi - return lo -} - -// FindRange returns the positions where min and max would be -// inserted into the array. If a[0].UnixNano() > max or -// a[len-1].UnixNano() < min then FindRange returns (-1, -1) -// indicating the array is outside the [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results -// are undefined. -func (a StringValues) FindRange(min, max int64) (int, int) { - if len(a) == 0 || min > max { - return -1, -1 - } - - minVal := a[0].UnixNano() - maxVal := a[len(a)-1].UnixNano() - - if maxVal < min || minVal > max { - return -1, -1 - } - - return a.search(min), a.search(max) -} - -// Merge overlays b to top of a. If two values conflict with -// the same timestamp, b is used. Both a and b must be sorted -// in ascending order. -func (a StringValues) Merge(b StringValues) StringValues { - if len(a) == 0 { - return b - } - - if len(b) == 0 { - return a - } - - // Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's - // possible stored blocks might contain duplicate values. Remove them if they exists before - // merging. - a = a.Deduplicate() - b = b.Deduplicate() - - if a[len(a)-1].UnixNano() < b[0].UnixNano() { - return append(a, b...) - } - - if b[len(b)-1].UnixNano() < a[0].UnixNano() { - return append(b, a...) - } - - out := make(StringValues, 0, len(a)+len(b)) - for len(a) > 0 && len(b) > 0 { - if a[0].UnixNano() < b[0].UnixNano() { - out, a = append(out, a[0]), a[1:] - } else if len(b) > 0 && a[0].UnixNano() == b[0].UnixNano() { - a = a[1:] - } else { - out, b = append(out, b[0]), b[1:] - } - } - if len(a) > 0 { - return append(out, a...) - } - return append(out, b...) -} - -func (a StringValues) Encode(buf []byte) ([]byte, error) { - return encodeStringValuesBlock(buf, a) -} - -func EncodeStringArrayBlock(a *tsdb.StringArray, b []byte) ([]byte, error) { - if a.Len() == 0 { - return nil, nil - } - - // TODO(edd): These need to be pooled. - var vb []byte - var tb []byte - var err error - - if vb, err = StringArrayEncodeAll(a.Values, vb); err != nil { - return nil, err - } - - if tb, err = TimeArrayEncodeAll(a.Timestamps, tb); err != nil { - return nil, err - } - - // Prepend the first timestamp of the block in the first 8 bytes and the block - // in the next byte, followed by the block - return packBlock(b, BlockString, tb, vb), nil -} - -func encodeStringValuesBlock(buf []byte, values []StringValue) ([]byte, error) { - if len(values) == 0 { - return nil, nil - } - - venc := getStringEncoder(len(values)) - tsenc := getTimeEncoder(len(values)) - - var b []byte - err := func() error { - for _, v := range values { - tsenc.Write(v.unixnano) - venc.Write(v.value) - } - venc.Flush() - - // Encoded timestamp values - tb, err := tsenc.Bytes() - if err != nil { - return err - } - // Encoded values - vb, err := venc.Bytes() - if err != nil { - return err - } - - // Prepend the first timestamp of the block in the first 8 bytes and the block - // in the next byte, followed by the block - b = packBlock(buf, BlockString, tb, vb) - - return nil - }() - - putTimeEncoder(tsenc) - putStringEncoder(venc) - - return b, err -} - -// Sort methods -func (a StringValues) Len() int { return len(a) } -func (a StringValues) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a StringValues) Less(i, j int) bool { return a[i].UnixNano() < a[j].UnixNano() } - -// BooleanValues represents a slice of Boolean values. -type BooleanValues []BooleanValue - -func NewBooleanArrayFromValues(v BooleanValues) *tsdb.BooleanArray { - a := tsdb.NewBooleanArrayLen(len(v)) - for i, val := range v { - a.Timestamps[i] = val.unixnano - a.Values[i] = val.value - } - return a -} - -func (a BooleanValues) MinTime() int64 { - return a[0].UnixNano() -} - -func (a BooleanValues) MaxTime() int64 { - return a[len(a)-1].UnixNano() -} - -func (a BooleanValues) Size() int { - sz := 0 - for _, v := range a { - sz += v.Size() - } - return sz -} - -func (a BooleanValues) ordered() bool { - if len(a) <= 1 { - return true - } - for i := 1; i < len(a); i++ { - if av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab { - return false - } - } - return true -} - -func (a BooleanValues) assertOrdered() { - if len(a) <= 1 { - return - } - for i := 1; i < len(a); i++ { - if av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab { - panic(fmt.Sprintf("not ordered: %d %d >= %d", i, av, ab)) - } - } -} - -// Deduplicate returns a new slice with any values that have the same timestamp removed. -// The Value that appears last in the slice is the one that is kept. The returned -// Values are sorted if necessary. -func (a BooleanValues) Deduplicate() BooleanValues { - if len(a) <= 1 { - return a - } - - // See if we're already sorted and deduped - var needSort bool - for i := 1; i < len(a); i++ { - if a[i-1].UnixNano() >= a[i].UnixNano() { - needSort = true - break - } - } - - if !needSort { - return a - } - - sort.Stable(a) - var i int - for j := 1; j < len(a); j++ { - v := a[j] - if v.UnixNano() != a[i].UnixNano() { - i++ - } - a[i] = v - - } - return a[:i+1] -} - -// Exclude returns the subset of values not in [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a BooleanValues) Exclude(min, max int64) BooleanValues { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return a - } - - // a[rmin].UnixNano() ≥ min - // a[rmax].UnixNano() ≥ max - - if rmax < len(a) { - if a[rmax].UnixNano() == max { - rmax++ - } - rest := len(a) - rmax - if rest > 0 { - b := a[:rmin+rest] - copy(b[rmin:], a[rmax:]) - return b - } - } - - return a[:rmin] -} - -// Include returns the subset values between min and max inclusive. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a BooleanValues) Include(min, max int64) BooleanValues { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return nil - } - - // a[rmin].UnixNano() ≥ min - // a[rmax].UnixNano() ≥ max - - if rmax < len(a) && a[rmax].UnixNano() == max { - rmax++ - } - - if rmin > -1 { - b := a[:rmax-rmin] - copy(b, a[rmin:rmax]) - return b - } - - return a[:rmax] -} - -// search performs a binary search for UnixNano() v in a -// and returns the position, i, where v would be inserted. -// An additional check of a[i].UnixNano() == v is necessary -// to determine if the value v exists. -func (a BooleanValues) search(v int64) int { - // Define: f(x) → a[x].UnixNano() < v - // Define: f(-1) == true, f(n) == false - // Invariant: f(lo-1) == true, f(hi) == false - lo := 0 - hi := len(a) - for lo < hi { - mid := int(uint(lo+hi) >> 1) - if a[mid].UnixNano() < v { - lo = mid + 1 // preserves f(lo-1) == true - } else { - hi = mid // preserves f(hi) == false - } - } - - // lo == hi - return lo -} - -// FindRange returns the positions where min and max would be -// inserted into the array. If a[0].UnixNano() > max or -// a[len-1].UnixNano() < min then FindRange returns (-1, -1) -// indicating the array is outside the [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results -// are undefined. -func (a BooleanValues) FindRange(min, max int64) (int, int) { - if len(a) == 0 || min > max { - return -1, -1 - } - - minVal := a[0].UnixNano() - maxVal := a[len(a)-1].UnixNano() - - if maxVal < min || minVal > max { - return -1, -1 - } - - return a.search(min), a.search(max) -} - -// Merge overlays b to top of a. If two values conflict with -// the same timestamp, b is used. Both a and b must be sorted -// in ascending order. -func (a BooleanValues) Merge(b BooleanValues) BooleanValues { - if len(a) == 0 { - return b - } - - if len(b) == 0 { - return a - } - - // Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's - // possible stored blocks might contain duplicate values. Remove them if they exists before - // merging. - a = a.Deduplicate() - b = b.Deduplicate() - - if a[len(a)-1].UnixNano() < b[0].UnixNano() { - return append(a, b...) - } - - if b[len(b)-1].UnixNano() < a[0].UnixNano() { - return append(b, a...) - } - - out := make(BooleanValues, 0, len(a)+len(b)) - for len(a) > 0 && len(b) > 0 { - if a[0].UnixNano() < b[0].UnixNano() { - out, a = append(out, a[0]), a[1:] - } else if len(b) > 0 && a[0].UnixNano() == b[0].UnixNano() { - a = a[1:] - } else { - out, b = append(out, b[0]), b[1:] - } - } - if len(a) > 0 { - return append(out, a...) - } - return append(out, b...) -} - -func (a BooleanValues) Encode(buf []byte) ([]byte, error) { - return encodeBooleanValuesBlock(buf, a) -} - -func EncodeBooleanArrayBlock(a *tsdb.BooleanArray, b []byte) ([]byte, error) { - if a.Len() == 0 { - return nil, nil - } - - // TODO(edd): These need to be pooled. - var vb []byte - var tb []byte - var err error - - if vb, err = BooleanArrayEncodeAll(a.Values, vb); err != nil { - return nil, err - } - - if tb, err = TimeArrayEncodeAll(a.Timestamps, tb); err != nil { - return nil, err - } - - // Prepend the first timestamp of the block in the first 8 bytes and the block - // in the next byte, followed by the block - return packBlock(b, BlockBoolean, tb, vb), nil -} - -func encodeBooleanValuesBlock(buf []byte, values []BooleanValue) ([]byte, error) { - if len(values) == 0 { - return nil, nil - } - - venc := getBooleanEncoder(len(values)) - tsenc := getTimeEncoder(len(values)) - - var b []byte - err := func() error { - for _, v := range values { - tsenc.Write(v.unixnano) - venc.Write(v.value) - } - venc.Flush() - - // Encoded timestamp values - tb, err := tsenc.Bytes() - if err != nil { - return err - } - // Encoded values - vb, err := venc.Bytes() - if err != nil { - return err - } - - // Prepend the first timestamp of the block in the first 8 bytes and the block - // in the next byte, followed by the block - b = packBlock(buf, BlockBoolean, tb, vb) - - return nil - }() - - putTimeEncoder(tsenc) - putBooleanEncoder(venc) - - return b, err -} - -// Sort methods -func (a BooleanValues) Len() int { return len(a) } -func (a BooleanValues) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a BooleanValues) Less(i, j int) bool { return a[i].UnixNano() < a[j].UnixNano() } diff --git a/tsdb/engine/tsm1/encoding.gen.go.tmpl b/tsdb/engine/tsm1/encoding.gen.go.tmpl deleted file mode 100644 index 5749b0893fe..00000000000 --- a/tsdb/engine/tsm1/encoding.gen.go.tmpl +++ /dev/null @@ -1,313 +0,0 @@ -//lint:file-ignore U1000 generated code -package tsm1 - -import ( - "fmt" - "sort" - - "github.com/influxdata/influxdb/v2/tsdb" -) - -{{range .}} - -// {{.Name}}Values represents a slice of {{.Name}} values. -type {{.Name}}Values []{{.Name}}Value - -{{if ne .Name ""}} -func New{{.Name}}ArrayFromValues(v {{.Name}}Values) *tsdb.{{.Name}}Array { - a := tsdb.New{{.Name}}ArrayLen(len(v)) - for i, val := range v { - a.Timestamps[i] = val.unixnano - a.Values[i] = val.value - } - return a -} -{{end}} - -func (a {{.Name}}Values) MinTime() int64 { - return a[0].UnixNano() -} - -func (a {{.Name}}Values) MaxTime() int64 { - return a[len(a)-1].UnixNano() -} - -func (a {{.Name}}Values) Size() int { - sz := 0 - for _, v := range a { - sz += v.Size() - } - return sz -} - -func (a {{.Name}}Values) ordered() bool { - if len(a) <= 1 { - return true - } - for i := 1; i < len(a); i++ { - if av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab { - return false - } - } - return true -} - -func (a {{.Name}}Values) assertOrdered() { - if len(a) <= 1 { - return - } - for i := 1; i < len(a); i++ { - if av, ab := a[i-1].UnixNano(), a[i].UnixNano(); av >= ab { - panic(fmt.Sprintf("not ordered: %d %d >= %d", i, av, ab)) - } - } -} - - -// Deduplicate returns a new slice with any values that have the same timestamp removed. -// The Value that appears last in the slice is the one that is kept. The returned -// Values are sorted if necessary. -func (a {{.Name}}Values) Deduplicate() {{.Name}}Values { - if len(a) <= 1 { - return a - } - - // See if we're already sorted and deduped - var needSort bool - for i := 1; i < len(a); i++ { - if a[i-1].UnixNano() >= a[i].UnixNano() { - needSort = true - break - } - } - - if !needSort { - return a - } - - sort.Stable(a) - var i int - for j := 1; j < len(a); j++ { - v := a[j] - if v.UnixNano() != a[i].UnixNano() { - i++ - } - a[i] = v - - } - return a[:i+1] -} - -// Exclude returns the subset of values not in [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a {{.Name}}Values) Exclude(min, max int64) {{.Name}}Values { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return a - } - - // a[rmin].UnixNano() ≥ min - // a[rmax].UnixNano() ≥ max - - if rmax < len(a) { - if a[rmax].UnixNano() == max { - rmax++ - } - rest := len(a)-rmax - if rest > 0 { - b := a[:rmin+rest] - copy(b[rmin:], a[rmax:]) - return b - } - } - - return a[:rmin] -} - -// Include returns the subset values between min and max inclusive. The values must -// be deduplicated and sorted before calling Exclude or the results are undefined. -func (a {{.Name}}Values) Include(min, max int64) {{.Name}}Values { - rmin, rmax := a.FindRange(min, max) - if rmin == -1 && rmax == -1 { - return nil - } - - // a[rmin].UnixNano() ≥ min - // a[rmax].UnixNano() ≥ max - - if rmax < len(a) && a[rmax].UnixNano() == max { - rmax++ - } - - if rmin > -1 { - b := a[:rmax-rmin] - copy(b, a[rmin:rmax]) - return b - } - - return a[:rmax] -} - -// search performs a binary search for UnixNano() v in a -// and returns the position, i, where v would be inserted. -// An additional check of a[i].UnixNano() == v is necessary -// to determine if the value v exists. -func (a {{.Name}}Values) search(v int64) int { - // Define: f(x) → a[x].UnixNano() < v - // Define: f(-1) == true, f(n) == false - // Invariant: f(lo-1) == true, f(hi) == false - lo := 0 - hi := len(a) - for lo < hi { - mid := int(uint(lo+hi) >> 1) - if a[mid].UnixNano() < v { - lo = mid + 1 // preserves f(lo-1) == true - } else { - hi = mid // preserves f(hi) == false - } - } - - // lo == hi - return lo -} - -// FindRange returns the positions where min and max would be -// inserted into the array. If a[0].UnixNano() > max or -// a[len-1].UnixNano() < min then FindRange returns (-1, -1) -// indicating the array is outside the [min, max]. The values must -// be deduplicated and sorted before calling Exclude or the results -// are undefined. -func (a {{.Name}}Values) FindRange(min, max int64) (int, int) { - if len(a) == 0 || min > max { - return -1, -1 - } - - minVal := a[0].UnixNano() - maxVal := a[len(a)-1].UnixNano() - - if maxVal < min || minVal > max { - return -1, -1 - } - - return a.search(min), a.search(max) -} - -// Merge overlays b to top of a. If two values conflict with -// the same timestamp, b is used. Both a and b must be sorted -// in ascending order. -func (a {{.Name}}Values) Merge(b {{.Name}}Values) {{.Name}}Values { - if len(a) == 0 { - return b - } - - if len(b) == 0 { - return a - } - - // Normally, both a and b should not contain duplicates. Due to a bug in older versions, it's - // possible stored blocks might contain duplicate values. Remove them if they exists before - // merging. - a = a.Deduplicate() - b = b.Deduplicate() - - if a[len(a)-1].UnixNano() < b[0].UnixNano() { - return append(a, b...) - } - - if b[len(b)-1].UnixNano() < a[0].UnixNano() { - return append(b, a...) - } - - out := make({{.Name}}Values, 0, len(a)+len(b)) - for len(a) > 0 && len(b) > 0 { - if a[0].UnixNano() < b[0].UnixNano() { - out, a = append(out, a[0]), a[1:] - } else if len(b) > 0 && a[0].UnixNano() == b[0].UnixNano() { - a = a[1:] - } else { - out, b = append(out, b[0]), b[1:] - } - } - if len(a) > 0 { - return append(out, a...) - } - return append(out, b...) -} - -{{ if ne .Name "" }} -func (a {{.Name}}Values) Encode(buf []byte) ([]byte, error) { - return encode{{.Name}}ValuesBlock(buf, a) -} - -func Encode{{ .Name }}ArrayBlock(a *tsdb.{{ .Name }}Array, b []byte) ([]byte, error) { - if a.Len() == 0 { - return nil, nil - } - - // TODO(edd): These need to be pooled. - var vb []byte - var tb []byte - var err error - - if vb, err = {{ .Name }}ArrayEncodeAll(a.Values, vb); err != nil { - return nil, err - } - - if tb, err = TimeArrayEncodeAll(a.Timestamps, tb); err != nil { - return nil, err - } - - // Prepend the first timestamp of the block in the first 8 bytes and the block - // in the next byte, followed by the block - return packBlock(b, {{ .Type }}, tb, vb), nil -} - -func encode{{ .Name }}ValuesBlock(buf []byte, values []{{.Name}}Value) ([]byte, error) { - if len(values) == 0 { - return nil, nil - } - - venc := get{{ .Name }}Encoder(len(values)) - tsenc := getTimeEncoder(len(values)) - - var b []byte - err := func() error { - for _, v := range values { - tsenc.Write(v.unixnano) - venc.Write({{if .CastType}}{{.CastType}}(v.value){{else}}v.value{{end}}) - } - venc.Flush() - - // Encoded timestamp values - tb, err := tsenc.Bytes() - if err != nil { - return err - } - // Encoded values - vb, err := venc.Bytes() - if err != nil { - return err - } - - // Prepend the first timestamp of the block in the first 8 bytes and the block - // in the next byte, followed by the block - b = packBlock(buf, {{ .Type }}, tb, vb) - - return nil - }() - - putTimeEncoder(tsenc) - put{{.Name}}Encoder(venc) - - return b, err -} - -{{ end }} - -// Sort methods -func (a {{.Name}}Values) Len() int { return len(a) } -func (a {{.Name}}Values) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a {{.Name}}Values) Less(i, j int) bool { return a[i].UnixNano() < a[j].UnixNano() } - - -{{ end }} diff --git a/tsdb/engine/tsm1/encoding.gen.go.tmpldata b/tsdb/engine/tsm1/encoding.gen.go.tmpldata deleted file mode 100644 index 8da02869516..00000000000 --- a/tsdb/engine/tsm1/encoding.gen.go.tmpldata +++ /dev/null @@ -1,38 +0,0 @@ -[ - { - "Name":"", - "name":"", - "Type":"", - "CastType":"" - }, - { - "Name":"Float", - "name":"float", - "Type":"BlockFloat64", - "CastType":"" - }, - { - "Name":"Integer", - "name":"integer", - "Type":"BlockInteger", - "CastType":"" - }, - { - "Name":"Unsigned", - "name":"unsigned", - "Type":"BlockUnsigned", - "CastType":"int64" - }, - { - "Name":"String", - "name":"string", - "Type":"BlockString", - "CastType":"" - }, - { - "Name":"Boolean", - "name":"boolean", - "Type":"BlockBoolean", - "CastType":"" - } -] diff --git a/tsdb/engine/tsm1/encoding.gen_test.go b/tsdb/engine/tsm1/encoding.gen_test.go deleted file mode 100644 index 9fb01893f00..00000000000 --- a/tsdb/engine/tsm1/encoding.gen_test.go +++ /dev/null @@ -1,217 +0,0 @@ -package tsm1 - -import ( - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" -) - -func makeIntegerValues(count int, min, max int64) IntegerValues { - vals := make(IntegerValues, count) - - ts := min - inc := (max - min) / int64(count) - - for i := 0; i < count; i++ { - vals[i].unixnano = ts - ts += inc - } - - return vals -} - -func makeIntegerValuesFromSlice(t []int64) IntegerValues { - iv := make(IntegerValues, len(t)) - for i, v := range t { - iv[i].unixnano = v - } - return iv -} - -func TestIntegerValues_FindRangeNoValues(t *testing.T) { - var vals IntegerValues - l, r := vals.FindRange(0, 100) - if exp := -1; l != exp { - t.Errorf("invalid l; exp=%d, got=%d", exp, l) - } - if exp := -1; r != exp { - t.Errorf("invalid r; exp=%d, got=%d", exp, r) - } -} - -func TestIntegerValues_FindRange(t *testing.T) { - vals := makeIntegerValuesFromSlice([]int64{10, 11, 13, 15, 17, 20, 21}) - - cases := []struct { - min, max int64 - l, r int - }{ - {12, 20, 2, 5}, - {22, 40, -1, -1}, - {1, 9, -1, -1}, - {1, 10, 0, 0}, - {1, 11, 0, 1}, - {15, 15, 3, 3}, - } - - for _, tc := range cases { - t.Run(fmt.Sprintf("%d→%d", tc.min, tc.max), func(t *testing.T) { - l, r := vals.FindRange(tc.min, tc.max) - if l != tc.l { - t.Errorf("left: got %d, exp %d", l, tc.l) - } - if r != tc.r { - t.Errorf("right: got %d, exp %d", r, tc.r) - } - }) - } -} - -func TestIntegerValues_Exclude(t *testing.T) { - cases := []struct { - n string - min, max int64 - exp []int64 - }{ - {"excl bad range", 18, 11, []int64{10, 12, 14, 16, 18}}, - {"excl none-lo", 0, 9, []int64{10, 12, 14, 16, 18}}, - {"excl none-hi", 19, 30, []int64{10, 12, 14, 16, 18}}, - {"excl first", 0, 10, []int64{12, 14, 16, 18}}, - {"excl last", 18, 20, []int64{10, 12, 14, 16}}, - {"excl all but first and last", 12, 16, []int64{10, 18}}, - {"excl none in middle", 13, 13, []int64{10, 12, 14, 16, 18}}, - {"excl middle", 14, 14, []int64{10, 12, 16, 18}}, - {"excl suffix", 16, 18, []int64{10, 12, 14}}, - } - - for _, tc := range cases { - t.Run(fmt.Sprintf("%s[%d,%d]", tc.n, tc.min, tc.max), func(t *testing.T) { - vals := makeIntegerValues(5, 10, 20) - vals = vals.Exclude(tc.min, tc.max) - var got []int64 - for _, v := range vals { - got = append(got, v.unixnano) - } - opt := cmp.AllowUnexported(IntegerValue{}) - if !cmp.Equal(tc.exp, got, opt) { - t.Error(cmp.Diff(tc.exp, got, opt)) - } - }) - } -} - -func TestIntegerValues_Include(t *testing.T) { - cases := []struct { - n string - min, max int64 - exp []int64 - }{ - {"incl none-lo", 0, 9, nil}, - {"incl none-hi", 19, 30, nil}, - {"incl first", 0, 10, []int64{10}}, - {"incl last", 18, 20, []int64{18}}, - {"incl all but first and last", 12, 16, []int64{12, 14, 16}}, - {"incl none in middle", 13, 13, nil}, - {"incl middle", 14, 14, []int64{14}}, - } - - for _, tc := range cases { - t.Run(fmt.Sprintf("%s[%d,%d]", tc.n, tc.min, tc.max), func(t *testing.T) { - vals := makeIntegerValues(5, 10, 20) - vals = vals.Include(tc.min, tc.max) - var got []int64 - for _, v := range vals { - got = append(got, v.unixnano) - } - opt := cmp.AllowUnexported(IntegerValue{}) - if !cmp.Equal(tc.exp, got, opt) { - t.Error(cmp.Diff(tc.exp, got, opt)) - } - }) - } -} - -func benchExclude(b *testing.B, vals IntegerValues, min, max int64) { - b.ResetTimer() - - for i := 0; i < b.N; i++ { - vals.Exclude(min, max) - } -} - -func BenchmarkIntegerValues_ExcludeNone_1000(b *testing.B) { - benchExclude(b, makeIntegerValues(1000, 1000, 2000), 0, 500) -} - -func BenchmarkIntegerValues_ExcludeMiddleHalf_1000(b *testing.B) { - benchExclude(b, makeIntegerValues(1000, 1000, 2000), 1250, 1750) -} - -func BenchmarkIntegerValues_ExcludeFirst_1000(b *testing.B) { - benchExclude(b, makeIntegerValues(1000, 1000, 2000), 0, 1000) -} - -func BenchmarkIntegerValues_ExcludeLast_1000(b *testing.B) { - benchExclude(b, makeIntegerValues(1000, 1000, 2000), 1999, 2000) -} - -func BenchmarkIntegerValues_ExcludeNone_10000(b *testing.B) { - benchExclude(b, makeIntegerValues(10000, 10000, 20000), 00, 5000) -} - -func BenchmarkIntegerValues_ExcludeMiddleHalf_10000(b *testing.B) { - benchExclude(b, makeIntegerValues(10000, 10000, 20000), 12500, 17500) -} - -func BenchmarkIntegerValues_ExcludeFirst_10000(b *testing.B) { - benchExclude(b, makeIntegerValues(10000, 10000, 20000), 0, 10000) -} - -func BenchmarkIntegerValues_ExcludeLast_10000(b *testing.B) { - benchExclude(b, makeIntegerValues(10000, 10000, 20000), 19999, 20000) -} - -func benchInclude(b *testing.B, vals IntegerValues, min, max int64) { - tmp := append(IntegerValues{}, vals...) - n := len(vals) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - vals.Include(min, max) - vals = vals[:n] - copy(vals, tmp) - } -} - -func BenchmarkIntegerValues_IncludeNone_1000(b *testing.B) { - benchInclude(b, makeIntegerValues(1000, 1000, 2000), 0, 500) -} - -func BenchmarkIntegerValues_IncludeMiddleHalf_1000(b *testing.B) { - benchInclude(b, makeIntegerValues(1000, 1000, 2000), 1250, 1750) -} - -func BenchmarkIntegerValues_IncludeFirst_1000(b *testing.B) { - benchInclude(b, makeIntegerValues(1000, 1000, 2000), 0, 1000) -} - -func BenchmarkIntegerValues_IncludeLast_1000(b *testing.B) { - benchInclude(b, makeIntegerValues(1000, 1000, 2000), 1999, 2000) -} - -func BenchmarkIntegerValues_IncludeNone_10000(b *testing.B) { - benchInclude(b, makeIntegerValues(10000, 10000, 20000), 00, 5000) -} - -func BenchmarkIntegerValues_IncludeMiddleHalf_10000(b *testing.B) { - benchInclude(b, makeIntegerValues(10000, 10000, 20000), 12500, 17500) -} - -func BenchmarkIntegerValues_IncludeFirst_10000(b *testing.B) { - benchInclude(b, makeIntegerValues(10000, 10000, 20000), 0, 10000) -} - -func BenchmarkIntegerValues_IncludeLast_10000(b *testing.B) { - benchInclude(b, makeIntegerValues(10000, 10000, 20000), 19999, 20000) -} diff --git a/tsdb/engine/tsm1/encoding.go b/tsdb/engine/tsm1/encoding.go deleted file mode 100644 index ed5d9cd61ee..00000000000 --- a/tsdb/engine/tsm1/encoding.go +++ /dev/null @@ -1,1044 +0,0 @@ -package tsm1 - -import ( - "encoding/binary" - "fmt" - "runtime" - "time" - - "github.com/influxdata/influxdb/v2/pkg/pool" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxql" -) - -const ( - // BlockFloat64 designates a block encodes float64 values. - BlockFloat64 = byte(0) - - // BlockInteger designates a block encodes int64 values. - BlockInteger = byte(1) - - // BlockBoolean designates a block encodes boolean values. - BlockBoolean = byte(2) - - // BlockString designates a block encodes string values. - BlockString = byte(3) - - // BlockUnsigned designates a block encodes uint64 values. - BlockUnsigned = byte(4) - - // encodedBlockHeaderSize is the size of the header for an encoded block. There is one - // byte encoding the type of the block. - encodedBlockHeaderSize = 1 -) - -func init() { - // Prime the pools with one encoder/decoder for each available CPU. - vals := make([]interface{}, 0, runtime.NumCPU()) - for _, p := range []*pool.Generic{ - timeEncoderPool, timeDecoderPool, - integerEncoderPool, integerDecoderPool, - floatDecoderPool, floatDecoderPool, - stringEncoderPool, stringEncoderPool, - booleanEncoderPool, booleanDecoderPool, - } { - vals = vals[:0] - // Check one out to force the allocation now and hold onto it - for i := 0; i < runtime.NumCPU(); i++ { - v := p.Get(tsdb.DefaultMaxPointsPerBlock) - vals = append(vals, v) - } - // Add them all back - for _, v := range vals { - p.Put(v) - } - } -} - -var ( - // encoder pools - - timeEncoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} { - return NewTimeEncoder(sz) - }) - integerEncoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} { - return NewIntegerEncoder(sz) - }) - floatEncoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} { - return NewFloatEncoder() - }) - stringEncoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} { - return NewStringEncoder(sz) - }) - booleanEncoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} { - return NewBooleanEncoder(sz) - }) - - // decoder pools - - timeDecoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} { - return &TimeDecoder{} - }) - integerDecoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} { - return &IntegerDecoder{} - }) - floatDecoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} { - return &FloatDecoder{} - }) - stringDecoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} { - return &StringDecoder{} - }) - booleanDecoderPool = pool.NewGeneric(runtime.NumCPU(), func(sz int) interface{} { - return &BooleanDecoder{} - }) -) - -// Value represents a TSM-encoded value. -type Value interface { - // UnixNano returns the timestamp of the value in nanoseconds since unix epoch. - UnixNano() int64 - - // Value returns the underlying value. - Value() interface{} - - // Size returns the number of bytes necessary to represent the value and its timestamp. - Size() int - - // String returns the string representation of the value and its timestamp. - String() string - - // internalOnly is unexported to ensure implementations of Value - // can only originate in this package. - internalOnly() -} - -// NewValue returns a new Value with the underlying type dependent on value. -func NewValue(t int64, value interface{}) Value { - switch v := value.(type) { - case int64: - return IntegerValue{unixnano: t, value: v} - case uint64: - return UnsignedValue{unixnano: t, value: v} - case float64: - return FloatValue{unixnano: t, value: v} - case bool: - return BooleanValue{unixnano: t, value: v} - case string: - return StringValue{unixnano: t, value: v} - } - return EmptyValue{} -} - -// NewIntegerValue returns a new integer value. -func NewIntegerValue(t int64, v int64) Value { - return IntegerValue{unixnano: t, value: v} -} - -// NewUnsignedValue returns a new unsigned integer value. -func NewUnsignedValue(t int64, v uint64) Value { - return UnsignedValue{unixnano: t, value: v} -} - -// NewFloatValue returns a new float value. -func NewFloatValue(t int64, v float64) Value { - return FloatValue{unixnano: t, value: v} -} - -// NewBooleanValue returns a new boolean value. -func NewBooleanValue(t int64, v bool) Value { - return BooleanValue{unixnano: t, value: v} -} - -// NewStringValue returns a new string value. -func NewStringValue(t int64, v string) Value { - return StringValue{unixnano: t, value: v} -} - -// EmptyValue is used when there is no appropriate other value. -type EmptyValue struct{} - -// UnixNano returns tsdb.EOF. -func (e EmptyValue) UnixNano() int64 { return tsdb.EOF } - -// Value returns nil. -func (e EmptyValue) Value() interface{} { return nil } - -// Size returns 0. -func (e EmptyValue) Size() int { return 0 } - -// String returns the empty string. -func (e EmptyValue) String() string { return "" } - -func (EmptyValue) internalOnly() {} -func (StringValue) internalOnly() {} -func (IntegerValue) internalOnly() {} -func (UnsignedValue) internalOnly() {} -func (BooleanValue) internalOnly() {} -func (FloatValue) internalOnly() {} - -// Encode converts the values to a byte slice. If there are no values, -// this function panics. -func (a Values) Encode(buf []byte) ([]byte, error) { - if len(a) == 0 { - panic("unable to encode block type") - } - - switch a[0].(type) { - case FloatValue: - return encodeFloatBlock(buf, a) - case IntegerValue: - return encodeIntegerBlock(buf, a) - case UnsignedValue: - return encodeUnsignedBlock(buf, a) - case BooleanValue: - return encodeBooleanBlock(buf, a) - case StringValue: - return encodeStringBlock(buf, a) - } - - return nil, fmt.Errorf("unsupported value type %T", a[0]) -} - -// InfluxQLType returns the influxql.DataType the values map to. -func (a Values) InfluxQLType() (influxql.DataType, error) { - if len(a) == 0 { - return influxql.Unknown, fmt.Errorf("no values to infer type") - } - - switch a[0].(type) { - case FloatValue: - return influxql.Float, nil - case IntegerValue: - return influxql.Integer, nil - case UnsignedValue: - return influxql.Unsigned, nil - case BooleanValue: - return influxql.Boolean, nil - case StringValue: - return influxql.String, nil - } - - return influxql.Unknown, fmt.Errorf("unsupported value type %T", a[0]) -} - -// BlockType returns the type of value encoded in a block or an error -// if the block type is unknown. -func BlockType(block []byte) (byte, error) { - blockType := block[0] - switch blockType { - case BlockFloat64, BlockInteger, BlockUnsigned, BlockBoolean, BlockString: - return blockType, nil - default: - return 0, fmt.Errorf("unknown block type: %d", blockType) - } -} - -// BlockCount returns the number of timestamps encoded in block. -func BlockCount(block []byte) (int, error) { - if len(block) <= encodedBlockHeaderSize { - return 0, fmt.Errorf("count of short block: got %v, exp %v", len(block), encodedBlockHeaderSize) - } - // first byte is the block type - tb, _, err := unpackBlock(block[1:]) - if err != nil { - return 0, fmt.Errorf("BlockCount: error unpacking block: %v", err) - } - return CountTimestamps(tb), nil -} - -// DecodeBlock takes a byte slice and decodes it into values of the appropriate type -// based on the block. -func DecodeBlock(block []byte, vals []Value) ([]Value, error) { - if len(block) <= encodedBlockHeaderSize { - return nil, fmt.Errorf("decode of short block: got %v, exp %v", len(block), encodedBlockHeaderSize) - } - - blockType, err := BlockType(block) - if err != nil { - return nil, fmt.Errorf("error decoding block type: %v", err) - } - - switch blockType { - case BlockFloat64: - var buf []FloatValue - decoded, err := DecodeFloatBlock(block, &buf) - if len(vals) < len(decoded) { - vals = make([]Value, len(decoded)) - } - for i := range decoded { - vals[i] = decoded[i] - } - return vals[:len(decoded)], err - case BlockInteger: - var buf []IntegerValue - decoded, err := DecodeIntegerBlock(block, &buf) - if len(vals) < len(decoded) { - vals = make([]Value, len(decoded)) - } - for i := range decoded { - vals[i] = decoded[i] - } - return vals[:len(decoded)], err - - case BlockUnsigned: - var buf []UnsignedValue - decoded, err := DecodeUnsignedBlock(block, &buf) - if len(vals) < len(decoded) { - vals = make([]Value, len(decoded)) - } - for i := range decoded { - vals[i] = decoded[i] - } - return vals[:len(decoded)], err - - case BlockBoolean: - var buf []BooleanValue - decoded, err := DecodeBooleanBlock(block, &buf) - if len(vals) < len(decoded) { - vals = make([]Value, len(decoded)) - } - for i := range decoded { - vals[i] = decoded[i] - } - return vals[:len(decoded)], err - - case BlockString: - var buf []StringValue - decoded, err := DecodeStringBlock(block, &buf) - if len(vals) < len(decoded) { - vals = make([]Value, len(decoded)) - } - for i := range decoded { - vals[i] = decoded[i] - } - return vals[:len(decoded)], err - - default: - return nil, fmt.Errorf("unknown block type: %d", blockType) - } -} - -// FloatValue represents a float64 value. -type FloatValue struct { - unixnano int64 - value float64 -} - -// UnixNano returns the timestamp of the value. -func (v FloatValue) UnixNano() int64 { - return v.unixnano -} - -// Value returns the underlying float64 value. -func (v FloatValue) Value() interface{} { - return v.value -} - -// Size returns the number of bytes necessary to represent the value and its timestamp. -func (v FloatValue) Size() int { - return 16 -} - -// String returns the string representation of the value and its timestamp. -func (v FloatValue) String() string { - return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.value) -} - -func (v FloatValue) RawValue() float64 { return v.value } - -func encodeFloatBlock(buf []byte, values []Value) ([]byte, error) { - if len(values) == 0 { - return nil, nil - } - - // A float block is encoded using different compression strategies - // for timestamps and values. - - // Encode values using Gorilla float compression - venc := getFloatEncoder(len(values)) - - // Encode timestamps using an adaptive encoder that uses delta-encoding, - // frame-or-reference and run length encoding. - tsenc := getTimeEncoder(len(values)) - - b, err := encodeFloatBlockUsing(buf, values, tsenc, venc) - - putTimeEncoder(tsenc) - putFloatEncoder(venc) - - return b, err -} - -func encodeFloatBlockUsing(buf []byte, values []Value, tsenc TimeEncoder, venc *FloatEncoder) ([]byte, error) { - tsenc.Reset() - venc.Reset() - - for _, v := range values { - vv := v.(FloatValue) - tsenc.Write(vv.unixnano) - venc.Write(vv.value) - } - venc.Flush() - - // Encoded timestamp values - tb, err := tsenc.Bytes() - if err != nil { - return nil, err - } - // Encoded float values - vb, err := venc.Bytes() - if err != nil { - return nil, err - } - - // Prepend the first timestamp of the block in the first 8 bytes and the block - // in the next byte, followed by the block - return packBlock(buf, BlockFloat64, tb, vb), nil -} - -// DecodeFloatBlock decodes the float block from the byte slice -// and appends the float values to a. -func DecodeFloatBlock(block []byte, a *[]FloatValue) ([]FloatValue, error) { - // Block type is the next block, make sure we actually have a float block - blockType := block[0] - if blockType != BlockFloat64 { - return nil, fmt.Errorf("invalid block type: exp %d, got %d", BlockFloat64, blockType) - } - block = block[1:] - - tb, vb, err := unpackBlock(block) - if err != nil { - return nil, err - } - - sz := CountTimestamps(tb) - - if cap(*a) < sz { - *a = make([]FloatValue, sz) - } else { - *a = (*a)[:sz] - } - - tdec := timeDecoderPool.Get(0).(*TimeDecoder) - vdec := floatDecoderPool.Get(0).(*FloatDecoder) - - var i int - err = func(a []FloatValue) error { - // Setup our timestamp and value decoders - tdec.Init(tb) - err = vdec.SetBytes(vb) - if err != nil { - return err - } - - // Decode both a timestamp and value - j := 0 - for j < len(a) && tdec.Next() && vdec.Next() { - a[j] = FloatValue{unixnano: tdec.Read(), value: vdec.Values()} - j++ - } - i = j - - // Did timestamp decoding have an error? - err = tdec.Error() - if err != nil { - return err - } - - // Did float decoding have an error? - return vdec.Error() - }(*a) - - timeDecoderPool.Put(tdec) - floatDecoderPool.Put(vdec) - - return (*a)[:i], err -} - -// BooleanValue represents a boolean value. -type BooleanValue struct { - unixnano int64 - value bool -} - -// Size returns the number of bytes necessary to represent the value and its timestamp. -func (v BooleanValue) Size() int { - return 9 -} - -// UnixNano returns the timestamp of the value in nanoseconds since unix epoch. -func (v BooleanValue) UnixNano() int64 { - return v.unixnano -} - -// Value returns the underlying boolean value. -func (v BooleanValue) Value() interface{} { - return v.value -} - -// String returns the string representation of the value and its timestamp. -func (v BooleanValue) String() string { - return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value()) -} - -func (v BooleanValue) RawValue() bool { return v.value } - -func encodeBooleanBlock(buf []byte, values []Value) ([]byte, error) { - if len(values) == 0 { - return nil, nil - } - - // A boolean block is encoded using different compression strategies - // for timestamps and values. - venc := getBooleanEncoder(len(values)) - - // Encode timestamps using an adaptive encoder - tsenc := getTimeEncoder(len(values)) - - b, err := encodeBooleanBlockUsing(buf, values, tsenc, venc) - - putTimeEncoder(tsenc) - putBooleanEncoder(venc) - - return b, err -} - -func encodeBooleanBlockUsing(buf []byte, values []Value, tenc TimeEncoder, venc BooleanEncoder) ([]byte, error) { - tenc.Reset() - venc.Reset() - - for _, v := range values { - vv := v.(BooleanValue) - tenc.Write(vv.unixnano) - venc.Write(vv.value) - } - - // Encoded timestamp values - tb, err := tenc.Bytes() - if err != nil { - return nil, err - } - // Encoded float values - vb, err := venc.Bytes() - if err != nil { - return nil, err - } - - // Prepend the first timestamp of the block in the first 8 bytes and the block - // in the next byte, followed by the block - return packBlock(buf, BlockBoolean, tb, vb), nil -} - -// DecodeBooleanBlock decodes the boolean block from the byte slice -// and appends the boolean values to a. -func DecodeBooleanBlock(block []byte, a *[]BooleanValue) ([]BooleanValue, error) { - // Block type is the next block, make sure we actually have a float block - blockType := block[0] - if blockType != BlockBoolean { - return nil, fmt.Errorf("invalid block type: exp %d, got %d", BlockBoolean, blockType) - } - block = block[1:] - - tb, vb, err := unpackBlock(block) - if err != nil { - return nil, err - } - - sz := CountTimestamps(tb) - - if cap(*a) < sz { - *a = make([]BooleanValue, sz) - } else { - *a = (*a)[:sz] - } - - tdec := timeDecoderPool.Get(0).(*TimeDecoder) - vdec := booleanDecoderPool.Get(0).(*BooleanDecoder) - - var i int - err = func(a []BooleanValue) error { - // Setup our timestamp and value decoders - tdec.Init(tb) - vdec.SetBytes(vb) - - // Decode both a timestamp and value - j := 0 - for j < len(a) && tdec.Next() && vdec.Next() { - a[j] = BooleanValue{unixnano: tdec.Read(), value: vdec.Read()} - j++ - } - i = j - - // Did timestamp decoding have an error? - err = tdec.Error() - if err != nil { - return err - } - // Did boolean decoding have an error? - return vdec.Error() - }(*a) - - timeDecoderPool.Put(tdec) - booleanDecoderPool.Put(vdec) - - return (*a)[:i], err -} - -// IntegerValue represents an int64 value. -type IntegerValue struct { - unixnano int64 - value int64 -} - -// Value returns the underlying int64 value. -func (v IntegerValue) Value() interface{} { - return v.value -} - -// UnixNano returns the timestamp of the value. -func (v IntegerValue) UnixNano() int64 { - return v.unixnano -} - -// Size returns the number of bytes necessary to represent the value and its timestamp. -func (v IntegerValue) Size() int { - return 16 -} - -// String returns the string representation of the value and its timestamp. -func (v IntegerValue) String() string { - return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value()) -} - -func (v IntegerValue) RawValue() int64 { return v.value } - -func encodeIntegerBlock(buf []byte, values []Value) ([]byte, error) { - tenc := getTimeEncoder(len(values)) - venc := getIntegerEncoder(len(values)) - - b, err := encodeIntegerBlockUsing(buf, values, tenc, venc) - - putTimeEncoder(tenc) - putIntegerEncoder(venc) - - return b, err -} - -func encodeIntegerBlockUsing(buf []byte, values []Value, tenc TimeEncoder, venc IntegerEncoder) ([]byte, error) { - tenc.Reset() - venc.Reset() - - for _, v := range values { - vv := v.(IntegerValue) - tenc.Write(vv.unixnano) - venc.Write(vv.value) - } - - // Encoded timestamp values - tb, err := tenc.Bytes() - if err != nil { - return nil, err - } - // Encoded int64 values - vb, err := venc.Bytes() - if err != nil { - return nil, err - } - - // Prepend the first timestamp of the block in the first 8 bytes - return packBlock(buf, BlockInteger, tb, vb), nil -} - -// DecodeIntegerBlock decodes the integer block from the byte slice -// and appends the integer values to a. -func DecodeIntegerBlock(block []byte, a *[]IntegerValue) ([]IntegerValue, error) { - blockType := block[0] - if blockType != BlockInteger { - return nil, fmt.Errorf("invalid block type: exp %d, got %d", BlockInteger, blockType) - } - - block = block[1:] - - // The first 8 bytes is the minimum timestamp of the block - tb, vb, err := unpackBlock(block) - if err != nil { - return nil, err - } - - sz := CountTimestamps(tb) - - if cap(*a) < sz { - *a = make([]IntegerValue, sz) - } else { - *a = (*a)[:sz] - } - - tdec := timeDecoderPool.Get(0).(*TimeDecoder) - vdec := integerDecoderPool.Get(0).(*IntegerDecoder) - - var i int - err = func(a []IntegerValue) error { - // Setup our timestamp and value decoders - tdec.Init(tb) - vdec.SetBytes(vb) - - // Decode both a timestamp and value - j := 0 - for j < len(a) && tdec.Next() && vdec.Next() { - a[j] = IntegerValue{unixnano: tdec.Read(), value: vdec.Read()} - j++ - } - i = j - - // Did timestamp decoding have an error? - err = tdec.Error() - if err != nil { - return err - } - // Did int64 decoding have an error? - return vdec.Error() - }(*a) - - timeDecoderPool.Put(tdec) - integerDecoderPool.Put(vdec) - - return (*a)[:i], err -} - -// UnsignedValue represents an int64 value. -type UnsignedValue struct { - unixnano int64 - value uint64 -} - -// Value returns the underlying int64 value. -func (v UnsignedValue) Value() interface{} { - return v.value -} - -// UnixNano returns the timestamp of the value. -func (v UnsignedValue) UnixNano() int64 { - return v.unixnano -} - -// Size returns the number of bytes necessary to represent the value and its timestamp. -func (v UnsignedValue) Size() int { - return 16 -} - -// String returns the string representation of the value and its timestamp. -func (v UnsignedValue) String() string { - return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value()) -} - -func (v UnsignedValue) RawValue() uint64 { return v.value } - -func encodeUnsignedBlock(buf []byte, values []Value) ([]byte, error) { - tenc := getTimeEncoder(len(values)) - venc := getUnsignedEncoder(len(values)) - - b, err := encodeUnsignedBlockUsing(buf, values, tenc, venc) - - putTimeEncoder(tenc) - putUnsignedEncoder(venc) - - return b, err -} - -func encodeUnsignedBlockUsing(buf []byte, values []Value, tenc TimeEncoder, venc IntegerEncoder) ([]byte, error) { - tenc.Reset() - venc.Reset() - - for _, v := range values { - vv := v.(UnsignedValue) - tenc.Write(vv.unixnano) - venc.Write(int64(vv.value)) - } - - // Encoded timestamp values - tb, err := tenc.Bytes() - if err != nil { - return nil, err - } - // Encoded int64 values - vb, err := venc.Bytes() - if err != nil { - return nil, err - } - - // Prepend the first timestamp of the block in the first 8 bytes - return packBlock(buf, BlockUnsigned, tb, vb), nil -} - -// DecodeUnsignedBlock decodes the unsigned integer block from the byte slice -// and appends the unsigned integer values to a. -func DecodeUnsignedBlock(block []byte, a *[]UnsignedValue) ([]UnsignedValue, error) { - blockType := block[0] - if blockType != BlockUnsigned { - return nil, fmt.Errorf("invalid block type: exp %d, got %d", BlockUnsigned, blockType) - } - - block = block[1:] - - // The first 8 bytes is the minimum timestamp of the block - tb, vb, err := unpackBlock(block) - if err != nil { - return nil, err - } - - sz := CountTimestamps(tb) - - if cap(*a) < sz { - *a = make([]UnsignedValue, sz) - } else { - *a = (*a)[:sz] - } - - tdec := timeDecoderPool.Get(0).(*TimeDecoder) - vdec := integerDecoderPool.Get(0).(*IntegerDecoder) - - var i int - err = func(a []UnsignedValue) error { - // Setup our timestamp and value decoders - tdec.Init(tb) - vdec.SetBytes(vb) - - // Decode both a timestamp and value - j := 0 - for j < len(a) && tdec.Next() && vdec.Next() { - a[j] = UnsignedValue{unixnano: tdec.Read(), value: uint64(vdec.Read())} - j++ - } - i = j - - // Did timestamp decoding have an error? - err = tdec.Error() - if err != nil { - return err - } - // Did int64 decoding have an error? - return vdec.Error() - }(*a) - - timeDecoderPool.Put(tdec) - integerDecoderPool.Put(vdec) - - return (*a)[:i], err -} - -// StringValue represents a string value. -type StringValue struct { - unixnano int64 - value string -} - -// Value returns the underlying string value. -func (v StringValue) Value() interface{} { - return v.value -} - -// UnixNano returns the timestamp of the value. -func (v StringValue) UnixNano() int64 { - return v.unixnano -} - -// Size returns the number of bytes necessary to represent the value and its timestamp. -func (v StringValue) Size() int { - return 8 + len(v.value) -} - -// String returns the string representation of the value and its timestamp. -func (v StringValue) String() string { - return fmt.Sprintf("%v %v", time.Unix(0, v.unixnano), v.Value()) -} - -func (v StringValue) RawValue() string { return v.value } - -func encodeStringBlock(buf []byte, values []Value) ([]byte, error) { - tenc := getTimeEncoder(len(values)) - venc := getStringEncoder(len(values) * len(values[0].(StringValue).value)) - - b, err := encodeStringBlockUsing(buf, values, tenc, venc) - - putTimeEncoder(tenc) - putStringEncoder(venc) - - return b, err -} - -func encodeStringBlockUsing(buf []byte, values []Value, tenc TimeEncoder, venc StringEncoder) ([]byte, error) { - tenc.Reset() - venc.Reset() - - for _, v := range values { - vv := v.(StringValue) - tenc.Write(vv.unixnano) - venc.Write(vv.value) - } - - // Encoded timestamp values - tb, err := tenc.Bytes() - if err != nil { - return nil, err - } - // Encoded string values - vb, err := venc.Bytes() - if err != nil { - return nil, err - } - - // Prepend the first timestamp of the block in the first 8 bytes - return packBlock(buf, BlockString, tb, vb), nil -} - -// DecodeStringBlock decodes the string block from the byte slice -// and appends the string values to a. -func DecodeStringBlock(block []byte, a *[]StringValue) ([]StringValue, error) { - blockType := block[0] - if blockType != BlockString { - return nil, fmt.Errorf("invalid block type: exp %d, got %d", BlockString, blockType) - } - - block = block[1:] - - // The first 8 bytes is the minimum timestamp of the block - tb, vb, err := unpackBlock(block) - if err != nil { - return nil, err - } - - sz := CountTimestamps(tb) - - if cap(*a) < sz { - *a = make([]StringValue, sz) - } else { - *a = (*a)[:sz] - } - - tdec := timeDecoderPool.Get(0).(*TimeDecoder) - vdec := stringDecoderPool.Get(0).(*StringDecoder) - - var i int - err = func(a []StringValue) error { - // Setup our timestamp and value decoders - tdec.Init(tb) - err = vdec.SetBytes(vb) - if err != nil { - return err - } - - // Decode both a timestamp and value - j := 0 - for j < len(a) && tdec.Next() && vdec.Next() { - a[j] = StringValue{unixnano: tdec.Read(), value: vdec.Read()} - j++ - } - i = j - - // Did timestamp decoding have an error? - err = tdec.Error() - if err != nil { - return err - } - // Did string decoding have an error? - return vdec.Error() - }(*a) - - timeDecoderPool.Put(tdec) - stringDecoderPool.Put(vdec) - - return (*a)[:i], err -} - -func packBlock(buf []byte, typ byte, ts []byte, values []byte) []byte { - // We encode the length of the timestamp block using a variable byte encoding. - // This allows small byte slices to take up 1 byte while larger ones use 2 or more. - sz := 1 + binary.MaxVarintLen64 + len(ts) + len(values) - if cap(buf) < sz { - buf = make([]byte, sz) - } - b := buf[:sz] - b[0] = typ - i := binary.PutUvarint(b[1:1+binary.MaxVarintLen64], uint64(len(ts))) - i += 1 - - // block is , , - copy(b[i:], ts) - // We don't encode the value length because we know it's the rest of the block after - // the timestamp block. - copy(b[i+len(ts):], values) - return b[:i+len(ts)+len(values)] -} - -func unpackBlock(buf []byte) (ts, values []byte, err error) { - // Unpack the timestamp block length - tsLen, i := binary.Uvarint(buf) - if i <= 0 { - err = fmt.Errorf("unpackBlock: unable to read timestamp block length") - return - } - - // Unpack the timestamp bytes - tsIdx := int(i) + int(tsLen) - if tsIdx > len(buf) { - err = fmt.Errorf("unpackBlock: not enough data for timestamp") - return - } - ts = buf[int(i):tsIdx] - - // Unpack the value bytes - values = buf[tsIdx:] - return -} - -// ZigZagEncode converts a int64 to a uint64 by zig zagging negative and positive values -// across even and odd numbers. Eg. [0,-1,1,-2] becomes [0, 1, 2, 3]. -func ZigZagEncode(x int64) uint64 { - return uint64(uint64(x<<1) ^ uint64((int64(x) >> 63))) -} - -// ZigZagDecode converts a previously zigzag encoded uint64 back to a int64. -func ZigZagDecode(v uint64) int64 { - return int64((v >> 1) ^ uint64((int64(v&1)<<63)>>63)) -} -func getTimeEncoder(sz int) TimeEncoder { - x := timeEncoderPool.Get(sz).(TimeEncoder) - x.Reset() - return x -} -func putTimeEncoder(enc TimeEncoder) { timeEncoderPool.Put(enc) } - -func getIntegerEncoder(sz int) IntegerEncoder { - x := integerEncoderPool.Get(sz).(IntegerEncoder) - x.Reset() - return x -} -func putIntegerEncoder(enc IntegerEncoder) { integerEncoderPool.Put(enc) } - -func getUnsignedEncoder(sz int) IntegerEncoder { - x := integerEncoderPool.Get(sz).(IntegerEncoder) - x.Reset() - return x -} -func putUnsignedEncoder(enc IntegerEncoder) { integerEncoderPool.Put(enc) } - -func getFloatEncoder(sz int) *FloatEncoder { - x := floatEncoderPool.Get(sz).(*FloatEncoder) - x.Reset() - return x -} -func putFloatEncoder(enc *FloatEncoder) { floatEncoderPool.Put(enc) } - -func getStringEncoder(sz int) StringEncoder { - x := stringEncoderPool.Get(sz).(StringEncoder) - x.Reset() - return x -} -func putStringEncoder(enc StringEncoder) { stringEncoderPool.Put(enc) } - -func getBooleanEncoder(sz int) BooleanEncoder { - x := booleanEncoderPool.Get(sz).(BooleanEncoder) - x.Reset() - return x -} -func putBooleanEncoder(enc BooleanEncoder) { booleanEncoderPool.Put(enc) } diff --git a/tsdb/engine/tsm1/encoding_test.go b/tsdb/engine/tsm1/encoding_test.go deleted file mode 100644 index 82d455fc881..00000000000 --- a/tsdb/engine/tsm1/encoding_test.go +++ /dev/null @@ -1,1900 +0,0 @@ -package tsm1_test - -import ( - "fmt" - "math/rand" - "reflect" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" -) - -func TestEncoding_FloatBlock(t *testing.T) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, float64(i)) - } - - b, err := tsm1.Values(values).Encode(nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var decodedValues []tsm1.Value - decodedValues, err = tsm1.DecodeBlock(b, decodedValues) - if err != nil { - t.Fatalf("unexpected error decoding block: %v", err) - } - - if !reflect.DeepEqual(decodedValues, values) { - t.Fatalf("unexpected results:\n\tgot: %s\n\texp: %s\n", spew.Sdump(decodedValues), spew.Sdump(values)) - } -} - -func TestEncoding_FloatBlock_ZeroTime(t *testing.T) { - values := make([]tsm1.Value, 3) - for i := 0; i < 3; i++ { - values[i] = tsm1.NewValue(0, float64(i)) - } - - b, err := tsm1.Values(values).Encode(nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var decodedValues []tsm1.Value - decodedValues, err = tsm1.DecodeBlock(b, decodedValues) - if err != nil { - t.Fatalf("unexpected error decoding block: %v", err) - } - - if !reflect.DeepEqual(decodedValues, values) { - t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values) - } -} - -func TestEncoding_FloatBlock_SimilarFloats(t *testing.T) { - values := make([]tsm1.Value, 5) - values[0] = tsm1.NewValue(1444238178437870000, 6.00065e+06) - values[1] = tsm1.NewValue(1444238185286830000, 6.000656e+06) - values[2] = tsm1.NewValue(1444238188441501000, 6.000657e+06) - values[3] = tsm1.NewValue(1444238195286811000, 6.000659e+06) - values[4] = tsm1.NewValue(1444238198439917000, 6.000661e+06) - - b, err := tsm1.Values(values).Encode(nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var decodedValues []tsm1.Value - decodedValues, err = tsm1.DecodeBlock(b, decodedValues) - if err != nil { - t.Fatalf("unexpected error decoding block: %v", err) - } - - if !reflect.DeepEqual(decodedValues, values) { - t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values) - } -} - -func TestEncoding_IntBlock_Basic(t *testing.T) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, int64(i)) - } - - b, err := tsm1.Values(values).Encode(nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var decodedValues []tsm1.Value - decodedValues, err = tsm1.DecodeBlock(b, decodedValues) - if err != nil { - t.Fatalf("unexpected error decoding block: %v", err) - } - - if len(decodedValues) != len(values) { - t.Fatalf("unexpected results length:\n\tgot: %v\n\texp: %v\n", len(decodedValues), len(values)) - } - - for i := 0; i < len(decodedValues); i++ { - if decodedValues[i].UnixNano() != values[i].UnixNano() { - t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues[i].UnixNano(), values[i].UnixNano()) - } - - if decodedValues[i].Value() != values[i].Value() { - t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues[i].Value(), values[i].Value()) - } - } -} - -func TestEncoding_IntBlock_Negatives(t *testing.T) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - v := int64(i) - if i%2 == 0 { - v = -v - } - values[i] = tsm1.NewValue(t, int64(v)) - } - - b, err := tsm1.Values(values).Encode(nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var decodedValues []tsm1.Value - decodedValues, err = tsm1.DecodeBlock(b, decodedValues) - if err != nil { - t.Fatalf("unexpected error decoding block: %v", err) - } - - if !reflect.DeepEqual(decodedValues, values) { - t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values) - } -} - -func TestEncoding_UIntBlock_Basic(t *testing.T) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, uint64(i)) - } - - b, err := tsm1.Values(values).Encode(nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var decodedValues []tsm1.Value - decodedValues, err = tsm1.DecodeBlock(b, decodedValues) - if err != nil { - t.Fatalf("unexpected error decoding block: %v", err) - } - - if len(decodedValues) != len(values) { - t.Fatalf("unexpected results length:\n\tgot: %v\n\texp: %v\n", len(decodedValues), len(values)) - } - - for i := 0; i < len(decodedValues); i++ { - if decodedValues[i].UnixNano() != values[i].UnixNano() { - t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues[i].UnixNano(), values[i].UnixNano()) - } - - if decodedValues[i].Value() != values[i].Value() { - t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues[i].Value(), values[i].Value()) - } - } -} - -// TestEncoding_UIntBlock_MaxValues encodes uint64 numbers starting at max (18446744073709551615) -// down to 18446744073709550616 -func TestEncoding_UIntBlock_MaxValues(t *testing.T) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, ^uint64(i)) - } - - b, err := tsm1.Values(values).Encode(nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var decodedValues []tsm1.Value - decodedValues, err = tsm1.DecodeBlock(b, decodedValues) - if err != nil { - t.Fatalf("unexpected error decoding block: %v", err) - } - - if !reflect.DeepEqual(decodedValues, values) { - t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values) - } -} - -func TestEncoding_BooleanBlock_Basic(t *testing.T) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - v := true - if i%2 == 0 { - v = false - } - values[i] = tsm1.NewValue(t, v) - } - - b, err := tsm1.Values(values).Encode(nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var decodedValues []tsm1.Value - decodedValues, err = tsm1.DecodeBlock(b, decodedValues) - if err != nil { - t.Fatalf("unexpected error decoding block: %v", err) - } - - if !reflect.DeepEqual(decodedValues, values) { - t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values) - } -} - -func TestEncoding_StringBlock_Basic(t *testing.T) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, fmt.Sprintf("value %d", i)) - } - - b, err := tsm1.Values(values).Encode(nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var decodedValues []tsm1.Value - decodedValues, err = tsm1.DecodeBlock(b, decodedValues) - if err != nil { - t.Fatalf("unexpected error decoding block: %v", err) - } - - if !reflect.DeepEqual(decodedValues, values) { - t.Fatalf("unexpected results:\n\tgot: %v\n\texp: %v\n", decodedValues, values) - } -} - -func TestEncoding_BlockType(t *testing.T) { - tests := []struct { - value interface{} - blockType byte - }{ - {value: float64(1.0), blockType: tsm1.BlockFloat64}, - {value: int64(1), blockType: tsm1.BlockInteger}, - {value: uint64(1), blockType: tsm1.BlockUnsigned}, - {value: true, blockType: tsm1.BlockBoolean}, - {value: "string", blockType: tsm1.BlockString}, - } - - for _, test := range tests { - var values []tsm1.Value - values = append(values, tsm1.NewValue(0, test.value)) - - b, err := tsm1.Values(values).Encode(nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - bt, err := tsm1.BlockType(b) - if err != nil { - t.Fatalf("unexpected error decoding block type: %v", err) - } - - if got, exp := bt, test.blockType; got != exp { - t.Fatalf("block type mismatch: got %v, exp %v", got, exp) - } - } - - _, err := tsm1.BlockType([]byte{10}) - if err == nil { - t.Fatalf("expected error decoding block type, got nil") - } -} - -func TestEncoding_Count(t *testing.T) { - tests := []struct { - value interface{} - blockType byte - }{ - {value: float64(1.0), blockType: tsm1.BlockFloat64}, - {value: int64(1), blockType: tsm1.BlockInteger}, - {value: uint64(1), blockType: tsm1.BlockUnsigned}, - {value: true, blockType: tsm1.BlockBoolean}, - {value: "string", blockType: tsm1.BlockString}, - } - - for _, test := range tests { - var values []tsm1.Value - values = append(values, tsm1.NewValue(0, test.value)) - - b, err := tsm1.Values(values).Encode(nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - cnt, err := tsm1.BlockCount(b) - if err != nil { - t.Fatalf("Block is corrupted: %v", err) - } - if got, exp := cnt, 1; got != exp { - t.Fatalf("block count mismatch: got %v, exp %v", got, exp) - } - } -} - -func TestValues_MergeFloat(t *testing.T) { - tests := []struct { - a, b, exp []tsm1.Value - }{ - - { // empty a - a: []tsm1.Value{}, - - b: []tsm1.Value{ - tsm1.NewValue(1, 1.2), - tsm1.NewValue(2, 2.2), - }, - exp: []tsm1.Value{ - tsm1.NewValue(1, 1.2), - tsm1.NewValue(2, 2.2), - }, - }, - { // empty b - a: []tsm1.Value{ - tsm1.NewValue(1, 1.1), - tsm1.NewValue(2, 2.1), - }, - - b: []tsm1.Value{}, - exp: []tsm1.Value{ - tsm1.NewValue(1, 1.1), - tsm1.NewValue(2, 2.1), - }, - }, - { - a: []tsm1.Value{ - tsm1.NewValue(0, 0.0), - tsm1.NewValue(1, 1.1), - tsm1.NewValue(2, 2.1), - }, - b: []tsm1.Value{ - tsm1.NewValue(2, 2.2), - tsm1.NewValue(2, 2.2), // duplicate data - }, - exp: []tsm1.Value{ - tsm1.NewValue(0, 0.0), - tsm1.NewValue(1, 1.1), - tsm1.NewValue(2, 2.2), - }, - }, - { - a: []tsm1.Value{ - tsm1.NewValue(0, 0.0), - tsm1.NewValue(1, 1.1), - tsm1.NewValue(1, 1.1), // duplicate data - tsm1.NewValue(2, 2.1), - }, - b: []tsm1.Value{ - tsm1.NewValue(2, 2.2), - tsm1.NewValue(2, 2.2), // duplicate data - }, - exp: []tsm1.Value{ - tsm1.NewValue(0, 0.0), - tsm1.NewValue(1, 1.1), - tsm1.NewValue(2, 2.2), - }, - }, - - { - a: []tsm1.Value{ - tsm1.NewValue(1, 1.1), - }, - b: []tsm1.Value{ - tsm1.NewValue(0, 0.0), - tsm1.NewValue(1, 1.2), // overwrites a - tsm1.NewValue(2, 2.2), - tsm1.NewValue(3, 3.2), - tsm1.NewValue(4, 4.2), - }, - exp: []tsm1.Value{ - tsm1.NewValue(0, 0.0), - tsm1.NewValue(1, 1.2), - tsm1.NewValue(2, 2.2), - tsm1.NewValue(3, 3.2), - tsm1.NewValue(4, 4.2), - }, - }, - { - a: []tsm1.Value{ - tsm1.NewValue(1, 1.1), - tsm1.NewValue(2, 2.1), - tsm1.NewValue(3, 3.1), - tsm1.NewValue(4, 4.1), - }, - - b: []tsm1.Value{ - tsm1.NewValue(1, 1.2), // overwrites a - tsm1.NewValue(2, 2.2), // overwrites a - }, - exp: []tsm1.Value{ - tsm1.NewValue(1, 1.2), - tsm1.NewValue(2, 2.2), - tsm1.NewValue(3, 3.1), - tsm1.NewValue(4, 4.1), - }, - }, - { - a: []tsm1.Value{ - tsm1.NewValue(1, 1.1), - tsm1.NewValue(2, 2.1), - tsm1.NewValue(3, 3.1), - tsm1.NewValue(4, 4.1), - }, - - b: []tsm1.Value{ - tsm1.NewValue(1, 1.2), // overwrites a - tsm1.NewValue(2, 2.2), // overwrites a - tsm1.NewValue(3, 3.2), - tsm1.NewValue(4, 4.2), - }, - exp: []tsm1.Value{ - tsm1.NewValue(1, 1.2), - tsm1.NewValue(2, 2.2), - tsm1.NewValue(3, 3.2), - tsm1.NewValue(4, 4.2), - }, - }, - { - a: []tsm1.Value{ - tsm1.NewValue(0, 0.0), - tsm1.NewValue(1, 1.1), - tsm1.NewValue(2, 2.1), - tsm1.NewValue(3, 3.1), - tsm1.NewValue(4, 4.1), - }, - b: []tsm1.Value{ - tsm1.NewValue(0, 0.0), - tsm1.NewValue(2, 2.2), - tsm1.NewValue(4, 4.2), - }, - exp: []tsm1.Value{ - tsm1.NewValue(0, 0.0), - tsm1.NewValue(1, 1.1), - tsm1.NewValue(2, 2.2), - tsm1.NewValue(3, 3.1), - tsm1.NewValue(4, 4.2), - }, - }, - - { - a: []tsm1.Value{ - tsm1.NewValue(1462498658242869207, 0.0), - tsm1.NewValue(1462498658288956853, 1.1), - }, - b: []tsm1.Value{ - tsm1.NewValue(1462498658242870810, 0.0), - tsm1.NewValue(1462498658262911238, 2.2), - tsm1.NewValue(1462498658282415038, 4.2), - tsm1.NewValue(1462498658282417760, 4.2), - }, - exp: []tsm1.Value{ - tsm1.NewValue(1462498658242869207, 0.0), - tsm1.NewValue(1462498658242870810, 0.0), - tsm1.NewValue(1462498658262911238, 2.2), - tsm1.NewValue(1462498658282415038, 4.2), - tsm1.NewValue(1462498658282417760, 4.2), - tsm1.NewValue(1462498658288956853, 1.1), - }, - }, - { - a: []tsm1.Value{ - tsm1.NewValue(4, 4.0), - tsm1.NewValue(5, 5.0), - tsm1.NewValue(6, 6.0), - }, - b: []tsm1.Value{ - tsm1.NewValue(1, 1.0), - tsm1.NewValue(2, 2.0), - tsm1.NewValue(3, 3.0), - }, - exp: []tsm1.Value{ - tsm1.NewValue(1, 1.0), - tsm1.NewValue(2, 2.0), - tsm1.NewValue(3, 3.0), - tsm1.NewValue(4, 4.0), - tsm1.NewValue(5, 5.0), - tsm1.NewValue(6, 6.0), - }, - }, - { - a: []tsm1.Value{ - tsm1.NewValue(5, 5.0), - tsm1.NewValue(6, 6.0), - }, - b: []tsm1.Value{ - tsm1.NewValue(1, 1.0), - tsm1.NewValue(2, 2.0), - tsm1.NewValue(3, 3.0), - tsm1.NewValue(4, 4.0), - tsm1.NewValue(7, 7.0), - tsm1.NewValue(8, 8.0), - }, - exp: []tsm1.Value{ - tsm1.NewValue(1, 1.0), - tsm1.NewValue(2, 2.0), - tsm1.NewValue(3, 3.0), - tsm1.NewValue(4, 4.0), - tsm1.NewValue(5, 5.0), - tsm1.NewValue(6, 6.0), - tsm1.NewValue(7, 7.0), - tsm1.NewValue(8, 8.0), - }, - }, - { - a: []tsm1.Value{ - tsm1.NewValue(1, 1.0), - tsm1.NewValue(2, 2.0), - tsm1.NewValue(3, 3.0), - }, - b: []tsm1.Value{ - tsm1.NewValue(4, 4.0), - tsm1.NewValue(5, 5.0), - tsm1.NewValue(6, 6.0), - }, - exp: []tsm1.Value{ - tsm1.NewValue(1, 1.0), - tsm1.NewValue(2, 2.0), - tsm1.NewValue(3, 3.0), - tsm1.NewValue(4, 4.0), - tsm1.NewValue(5, 5.0), - tsm1.NewValue(6, 6.0), - }, - }, - } - - for i, test := range tests { - got := tsm1.Values(test.a).Merge(test.b) - if exp, got := len(test.exp), len(got); exp != got { - t.Fatalf("test(%d): value length mismatch: exp %v, got %v", i, exp, got) - } - - dedup := tsm1.Values(append(test.a, test.b...)).Deduplicate() - - for i := range test.exp { - if exp, got := test.exp[i].String(), got[i].String(); exp != got { - t.Fatalf("value mismatch:\n exp %v\n got %v", exp, got) - } - - if exp, got := test.exp[i].String(), dedup[i].String(); exp != got { - t.Fatalf("value mismatch:\n exp %v\n got %v", exp, got) - } - } - } -} - -func TestIntegerValues_Merge(t *testing.T) { - integerValue := func(t int64, f int64) tsm1.IntegerValue { - return tsm1.NewValue(t, f).(tsm1.IntegerValue) - } - - tests := []struct { - a, b, exp []tsm1.IntegerValue - }{ - - { // empty a - a: []tsm1.IntegerValue{}, - - b: []tsm1.IntegerValue{ - integerValue(1, 10), - integerValue(2, 20), - }, - exp: []tsm1.IntegerValue{ - integerValue(1, 10), - integerValue(2, 20), - }, - }, - { // empty b - a: []tsm1.IntegerValue{ - integerValue(1, 1), - integerValue(2, 2), - }, - - b: []tsm1.IntegerValue{}, - exp: []tsm1.IntegerValue{ - integerValue(1, 1), - integerValue(2, 2), - }, - }, - { - a: []tsm1.IntegerValue{ - integerValue(1, 1), - }, - b: []tsm1.IntegerValue{ - integerValue(0, 0), - integerValue(1, 10), // overwrites a - integerValue(2, 20), - integerValue(3, 30), - integerValue(4, 40), - }, - exp: []tsm1.IntegerValue{ - integerValue(0, 0), - integerValue(1, 10), - integerValue(2, 20), - integerValue(3, 30), - integerValue(4, 40), - }, - }, - { - a: []tsm1.IntegerValue{ - integerValue(1, 1), - integerValue(2, 2), - integerValue(3, 3), - integerValue(4, 4), - }, - - b: []tsm1.IntegerValue{ - integerValue(1, 10), // overwrites a - integerValue(2, 20), // overwrites a - }, - exp: []tsm1.IntegerValue{ - integerValue(1, 10), - integerValue(2, 20), - integerValue(3, 3), - integerValue(4, 4), - }, - }, - { - a: []tsm1.IntegerValue{ - integerValue(1, 1), - integerValue(2, 2), - integerValue(3, 3), - integerValue(4, 4), - }, - - b: []tsm1.IntegerValue{ - integerValue(1, 10), // overwrites a - integerValue(2, 20), // overwrites a - integerValue(3, 30), - integerValue(4, 40), - }, - exp: []tsm1.IntegerValue{ - integerValue(1, 10), - integerValue(2, 20), - integerValue(3, 30), - integerValue(4, 40), - }, - }, - { - a: []tsm1.IntegerValue{ - integerValue(0, 0), - integerValue(1, 1), - integerValue(2, 2), - integerValue(3, 3), - integerValue(4, 4), - }, - b: []tsm1.IntegerValue{ - integerValue(0, 0), - integerValue(2, 20), - integerValue(4, 40), - }, - exp: []tsm1.IntegerValue{ - integerValue(0, 0.0), - integerValue(1, 1), - integerValue(2, 20), - integerValue(3, 3), - integerValue(4, 40), - }, - }, - } - - for i, test := range tests { - if i != 2 { - continue - } - - got := tsm1.IntegerValues(test.a).Merge(test.b) - if exp, got := len(test.exp), len(got); exp != got { - t.Fatalf("test(%d): value length mismatch: exp %v, got %v", i, exp, got) - } - - dedup := tsm1.IntegerValues(append(test.a, test.b...)).Deduplicate() - - for i := range test.exp { - if exp, got := test.exp[i].String(), got[i].String(); exp != got { - t.Fatalf("value mismatch:\n exp %v\n got %v", exp, got) - } - - if exp, got := test.exp[i].String(), dedup[i].String(); exp != got { - t.Fatalf("value mismatch:\n exp %v\n got %v", exp, got) - } - } - } -} - -func TestUnsignedValues_Merge(t *testing.T) { - uintValue := func(t int64, f uint64) tsm1.UnsignedValue { - return tsm1.NewValue(t, f).(tsm1.UnsignedValue) - } - - tests := []struct { - a, b, exp []tsm1.UnsignedValue - }{ - - { // empty a - a: []tsm1.UnsignedValue{}, - - b: []tsm1.UnsignedValue{ - uintValue(1, 10), - uintValue(2, 20), - }, - exp: []tsm1.UnsignedValue{ - uintValue(1, 10), - uintValue(2, 20), - }, - }, - { // empty b - a: []tsm1.UnsignedValue{ - uintValue(1, 1), - uintValue(2, 2), - }, - - b: []tsm1.UnsignedValue{}, - exp: []tsm1.UnsignedValue{ - uintValue(1, 1), - uintValue(2, 2), - }, - }, - { - a: []tsm1.UnsignedValue{ - uintValue(1, 1), - }, - b: []tsm1.UnsignedValue{ - uintValue(0, 0), - uintValue(1, 10), // overwrites a - uintValue(2, 20), - uintValue(3, 30), - uintValue(4, 40), - }, - exp: []tsm1.UnsignedValue{ - uintValue(0, 0), - uintValue(1, 10), - uintValue(2, 20), - uintValue(3, 30), - uintValue(4, 40), - }, - }, - { - a: []tsm1.UnsignedValue{ - uintValue(1, 1), - uintValue(2, 2), - uintValue(3, 3), - uintValue(4, 4), - }, - - b: []tsm1.UnsignedValue{ - uintValue(1, ^uint64(0)), // overwrites a - uintValue(2, 20), // overwrites a - }, - exp: []tsm1.UnsignedValue{ - uintValue(1, ^uint64(0)), - uintValue(2, 20), - uintValue(3, 3), - uintValue(4, 4), - }, - }, - { - a: []tsm1.UnsignedValue{ - uintValue(1, 1), - uintValue(2, 2), - uintValue(3, 3), - uintValue(4, 4), - }, - - b: []tsm1.UnsignedValue{ - uintValue(1, 10), // overwrites a - uintValue(2, 20), // overwrites a - uintValue(3, 30), - uintValue(4, 40), - }, - exp: []tsm1.UnsignedValue{ - uintValue(1, 10), - uintValue(2, 20), - uintValue(3, 30), - uintValue(4, 40), - }, - }, - { - a: []tsm1.UnsignedValue{ - uintValue(0, 0), - uintValue(1, 1), - uintValue(2, 2), - uintValue(3, 3), - uintValue(4, 4), - }, - b: []tsm1.UnsignedValue{ - uintValue(0, 0), - uintValue(2, 20), - uintValue(4, 40), - }, - exp: []tsm1.UnsignedValue{ - uintValue(0, 0.0), - uintValue(1, 1), - uintValue(2, 20), - uintValue(3, 3), - uintValue(4, 40), - }, - }, - } - - for i, test := range tests { - if i != 2 { - continue - } - - got := tsm1.UnsignedValues(test.a).Merge(test.b) - if exp, got := len(test.exp), len(got); exp != got { - t.Fatalf("test(%d): value length mismatch: exp %v, got %v", i, exp, got) - } - - dedup := tsm1.UnsignedValues(append(test.a, test.b...)).Deduplicate() - - for i := range test.exp { - if exp, got := test.exp[i].String(), got[i].String(); exp != got { - t.Fatalf("value mismatch:\n exp %v\n got %v", exp, got) - } - - if exp, got := test.exp[i].String(), dedup[i].String(); exp != got { - t.Fatalf("value mismatch:\n exp %v\n got %v", exp, got) - } - } - } -} - -func TestFloatValues_Merge(t *testing.T) { - floatValue := func(t int64, f float64) tsm1.FloatValue { - return tsm1.NewValue(t, f).(tsm1.FloatValue) - } - - tests := []struct { - a, b, exp []tsm1.FloatValue - }{ - - { // empty a - a: []tsm1.FloatValue{}, - - b: []tsm1.FloatValue{ - floatValue(1, 1.2), - floatValue(2, 2.2), - }, - exp: []tsm1.FloatValue{ - floatValue(1, 1.2), - floatValue(2, 2.2), - }, - }, - { // empty b - a: []tsm1.FloatValue{ - floatValue(1, 1.1), - floatValue(2, 2.1), - }, - - b: []tsm1.FloatValue{}, - exp: []tsm1.FloatValue{ - floatValue(1, 1.1), - floatValue(2, 2.1), - }, - }, - { - a: []tsm1.FloatValue{ - floatValue(1, 1.1), - }, - b: []tsm1.FloatValue{ - floatValue(0, 0.0), - floatValue(1, 1.2), // overwrites a - floatValue(2, 2.2), - floatValue(3, 3.2), - floatValue(4, 4.2), - }, - exp: []tsm1.FloatValue{ - floatValue(0, 0.0), - floatValue(1, 1.2), - floatValue(2, 2.2), - floatValue(3, 3.2), - floatValue(4, 4.2), - }, - }, - { - a: []tsm1.FloatValue{ - floatValue(1, 1.1), - floatValue(2, 2.1), - floatValue(3, 3.1), - floatValue(4, 4.1), - }, - - b: []tsm1.FloatValue{ - floatValue(1, 1.2), // overwrites a - floatValue(2, 2.2), // overwrites a - }, - exp: []tsm1.FloatValue{ - floatValue(1, 1.2), - floatValue(2, 2.2), - floatValue(3, 3.1), - floatValue(4, 4.1), - }, - }, - { - a: []tsm1.FloatValue{ - floatValue(1, 1.1), - floatValue(2, 2.1), - floatValue(3, 3.1), - floatValue(4, 4.1), - }, - - b: []tsm1.FloatValue{ - floatValue(1, 1.2), // overwrites a - floatValue(2, 2.2), // overwrites a - floatValue(3, 3.2), - floatValue(4, 4.2), - }, - exp: []tsm1.FloatValue{ - floatValue(1, 1.2), - floatValue(2, 2.2), - floatValue(3, 3.2), - floatValue(4, 4.2), - }, - }, - { - a: []tsm1.FloatValue{ - floatValue(0, 0.0), - floatValue(1, 1.1), - floatValue(2, 2.1), - floatValue(3, 3.1), - floatValue(4, 4.1), - }, - b: []tsm1.FloatValue{ - floatValue(0, 0.0), - floatValue(2, 2.2), - floatValue(4, 4.2), - }, - exp: []tsm1.FloatValue{ - floatValue(0, 0.0), - floatValue(1, 1.1), - floatValue(2, 2.2), - floatValue(3, 3.1), - floatValue(4, 4.2), - }, - }, - } - - for i, test := range tests { - got := tsm1.FloatValues(test.a).Merge(test.b) - if exp, got := len(test.exp), len(got); exp != got { - t.Fatalf("test(%d): value length mismatch: exp %v, got %v", i, exp, got) - } - - dedup := tsm1.FloatValues(append(test.a, test.b...)).Deduplicate() - - for i := range test.exp { - if exp, got := test.exp[i].String(), got[i].String(); exp != got { - t.Fatalf("value mismatch:\n exp %v\n got %v", exp, got) - } - - if exp, got := test.exp[i].String(), dedup[i].String(); exp != got { - t.Fatalf("value mismatch:\n exp %v\n got %v", exp, got) - } - } - } -} - -func TestBooleanValues_Merge(t *testing.T) { - booleanValue := func(t int64, f bool) tsm1.BooleanValue { - return tsm1.NewValue(t, f).(tsm1.BooleanValue) - } - - tests := []struct { - a, b, exp []tsm1.BooleanValue - }{ - - { // empty a - a: []tsm1.BooleanValue{}, - - b: []tsm1.BooleanValue{ - booleanValue(1, true), - booleanValue(2, true), - }, - exp: []tsm1.BooleanValue{ - booleanValue(1, true), - booleanValue(2, true), - }, - }, - { // empty b - a: []tsm1.BooleanValue{ - booleanValue(1, true), - booleanValue(2, true), - }, - - b: []tsm1.BooleanValue{}, - exp: []tsm1.BooleanValue{ - booleanValue(1, true), - booleanValue(2, true), - }, - }, - { - a: []tsm1.BooleanValue{ - booleanValue(1, true), - }, - b: []tsm1.BooleanValue{ - booleanValue(0, false), - booleanValue(1, false), // overwrites a - booleanValue(2, false), - booleanValue(3, false), - booleanValue(4, false), - }, - exp: []tsm1.BooleanValue{ - booleanValue(0, false), - booleanValue(1, false), - booleanValue(2, false), - booleanValue(3, false), - booleanValue(4, false), - }, - }, - { - a: []tsm1.BooleanValue{ - booleanValue(1, true), - booleanValue(2, true), - booleanValue(3, true), - booleanValue(4, true), - }, - - b: []tsm1.BooleanValue{ - booleanValue(1, false), // overwrites a - booleanValue(2, false), // overwrites a - }, - exp: []tsm1.BooleanValue{ - booleanValue(1, false), // overwrites a - booleanValue(2, false), // overwrites a - booleanValue(3, true), - booleanValue(4, true), - }, - }, - { - a: []tsm1.BooleanValue{ - booleanValue(1, true), - booleanValue(2, true), - booleanValue(3, true), - booleanValue(4, true), - }, - - b: []tsm1.BooleanValue{ - booleanValue(1, false), // overwrites a - booleanValue(2, false), // overwrites a - booleanValue(3, false), - booleanValue(4, false), - }, - exp: []tsm1.BooleanValue{ - booleanValue(1, false), - booleanValue(2, false), - booleanValue(3, false), - booleanValue(4, false), - }, - }, - { - a: []tsm1.BooleanValue{ - booleanValue(0, true), - booleanValue(1, true), - booleanValue(2, true), - booleanValue(3, true), - booleanValue(4, true), - }, - b: []tsm1.BooleanValue{ - booleanValue(0, false), - booleanValue(2, false), - booleanValue(4, false), - }, - exp: []tsm1.BooleanValue{ - booleanValue(0, false), - booleanValue(1, true), - booleanValue(2, false), - booleanValue(3, true), - booleanValue(4, false), - }, - }, - } - - for i, test := range tests { - got := tsm1.BooleanValues(test.a).Merge(test.b) - if exp, got := len(test.exp), len(got); exp != got { - t.Fatalf("test(%d): value length mismatch: exp %v, got %v", i, exp, got) - } - - dedup := tsm1.BooleanValues(append(test.a, test.b...)).Deduplicate() - - for i := range test.exp { - if exp, got := test.exp[i].String(), got[i].String(); exp != got { - t.Fatalf("value mismatch:\n exp %v\n got %v", exp, got) - } - - if exp, got := test.exp[i].String(), dedup[i].String(); exp != got { - t.Fatalf("value mismatch:\n exp %v\n got %v", exp, got) - } - } - } -} - -func TestStringValues_Merge(t *testing.T) { - stringValue := func(t int64, f string) tsm1.StringValue { - return tsm1.NewValue(t, f).(tsm1.StringValue) - } - - tests := []struct { - a, b, exp []tsm1.StringValue - }{ - - { // empty a - a: []tsm1.StringValue{}, - - b: []tsm1.StringValue{ - stringValue(1, "10"), - stringValue(2, "20"), - }, - exp: []tsm1.StringValue{ - stringValue(1, "10"), - stringValue(2, "20"), - }, - }, - { // empty b - a: []tsm1.StringValue{ - stringValue(1, "1"), - stringValue(2, "2"), - }, - - b: []tsm1.StringValue{}, - exp: []tsm1.StringValue{ - stringValue(1, "1"), - stringValue(2, "2"), - }, - }, - { - a: []tsm1.StringValue{ - stringValue(1, "1"), - }, - b: []tsm1.StringValue{ - stringValue(0, "0"), - stringValue(1, "10"), // overwrites a - stringValue(2, "20"), - stringValue(3, "30"), - stringValue(4, "40"), - }, - exp: []tsm1.StringValue{ - stringValue(0, "0"), - stringValue(1, "10"), - stringValue(2, "20"), - stringValue(3, "30"), - stringValue(4, "40"), - }, - }, - { - a: []tsm1.StringValue{ - stringValue(1, "1"), - stringValue(2, "2"), - stringValue(3, "3"), - stringValue(4, "4"), - }, - - b: []tsm1.StringValue{ - stringValue(1, "10"), // overwrites a - stringValue(2, "20"), // overwrites a - }, - exp: []tsm1.StringValue{ - stringValue(1, "10"), - stringValue(2, "20"), - stringValue(3, "3"), - stringValue(4, "4"), - }, - }, - { - a: []tsm1.StringValue{ - stringValue(1, "1"), - stringValue(2, "2"), - stringValue(3, "3"), - stringValue(4, "4"), - }, - - b: []tsm1.StringValue{ - stringValue(1, "10"), // overwrites a - stringValue(2, "20"), // overwrites a - stringValue(3, "30"), - stringValue(4, "40"), - }, - exp: []tsm1.StringValue{ - stringValue(1, "10"), - stringValue(2, "20"), - stringValue(3, "30"), - stringValue(4, "40"), - }, - }, - { - a: []tsm1.StringValue{ - stringValue(0, "0"), - stringValue(1, "1"), - stringValue(2, "2"), - stringValue(3, "3"), - stringValue(4, "4"), - }, - b: []tsm1.StringValue{ - stringValue(0, "0"), - stringValue(2, "20"), - stringValue(4, "40"), - }, - exp: []tsm1.StringValue{ - stringValue(0, "0.0"), - stringValue(1, "1"), - stringValue(2, "20"), - stringValue(3, "3"), - stringValue(4, "40"), - }, - }, - } - - for i, test := range tests { - if i != 2 { - continue - } - - got := tsm1.StringValues(test.a).Merge(test.b) - if exp, got := len(test.exp), len(got); exp != got { - t.Fatalf("test(%d): value length mismatch: exp %v, got %v", i, exp, got) - } - - dedup := tsm1.StringValues(append(test.a, test.b...)).Deduplicate() - - for i := range test.exp { - if exp, got := test.exp[i].String(), got[i].String(); exp != got { - t.Fatalf("value mismatch:\n exp %v\n got %v", exp, got) - } - - if exp, got := test.exp[i].String(), dedup[i].String(); exp != got { - t.Fatalf("value mismatch:\n exp %v\n got %v", exp, got) - } - } - } -} -func getTimes(n, step int, precision time.Duration) []int64 { - t := time.Now().Round(precision).UnixNano() - a := make([]int64, n) - for i := 0; i < n; i++ { - a[i] = t + (time.Duration(i*60) * precision).Nanoseconds() - } - return a -} - -func BenchmarkDecodeBlock_Float_Empty(b *testing.B) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, float64(i)) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - var decodedValues []tsm1.Value - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err = tsm1.DecodeBlock(bytes, decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } -} - -func BenchmarkDecodeBlock_Float_EqualSize(b *testing.B) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, float64(i)) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - decodedValues := make([]tsm1.Value, len(values)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err = tsm1.DecodeBlock(bytes, decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } -} - -func BenchmarkDecodeBlock_Float_TypeSpecific(b *testing.B) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, float64(i)) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - decodedValues := make([]tsm1.FloatValue, len(values)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err = tsm1.DecodeFloatBlock(bytes, &decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } -} - -func BenchmarkDecodeBlock_Integer_Empty(b *testing.B) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, int64(i)) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - var decodedValues []tsm1.Value - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err = tsm1.DecodeBlock(bytes, decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } -} - -func BenchmarkDecodeBlock_Integer_EqualSize(b *testing.B) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, int64(i)) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - decodedValues := make([]tsm1.Value, len(values)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err = tsm1.DecodeBlock(bytes, decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } -} - -func BenchmarkDecodeBlock_Integer_TypeSpecific(b *testing.B) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, int64(i)) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - decodedValues := make([]tsm1.IntegerValue, len(values)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err = tsm1.DecodeIntegerBlock(bytes, &decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } -} - -func BenchmarkDecodeBlock_Boolean_Empty(b *testing.B) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, true) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - var decodedValues []tsm1.Value - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err = tsm1.DecodeBlock(bytes, decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } -} - -func BenchmarkDecodeBlock_Boolean_EqualSize(b *testing.B) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, true) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - decodedValues := make([]tsm1.Value, len(values)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err = tsm1.DecodeBlock(bytes, decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } -} - -func BenchmarkDecodeBlock_Boolean_TypeSpecific(b *testing.B) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, true) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - decodedValues := make([]tsm1.BooleanValue, len(values)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err = tsm1.DecodeBooleanBlock(bytes, &decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } -} - -func BenchmarkDecodeBooleanBlock(b *testing.B) { - cases := []int{ - 5, - 55, - 555, - 1000, - } - for _, n := range cases { - b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { - valueCount := n - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, true) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - b.ResetTimer() - b.ReportAllocs() - b.SetBytes(int64(tsm1.Values(values).Size())) - - b.RunParallel(func(pb *testing.PB) { - decodedValues := make([]tsm1.BooleanValue, len(values)) - - for pb.Next() { - _, err = tsm1.DecodeBooleanBlock(bytes, &decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } - }) - }) - } -} - -func BenchmarkDecodeFloatBlock(b *testing.B) { - cases := []int{ - 5, - 55, - 555, - 1000, - } - for _, n := range cases { - b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { - valueCount := n - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, float64(i)) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - b.ResetTimer() - b.ReportAllocs() - b.SetBytes(int64(tsm1.Values(values).Size())) - - b.RunParallel(func(pb *testing.PB) { - decodedValues := make([]tsm1.FloatValue, len(values)) - - for pb.Next() { - _, err = tsm1.DecodeFloatBlock(bytes, &decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } - }) - }) - } -} - -func BenchmarkDecodeIntegerBlock(b *testing.B) { - rle := func(_ *rand.Rand, i int) int64 { return int64(i) } - s8b := func(r *rand.Rand, i int) int64 { return int64(i + int(r.Int31n(10))) } - - cases := []struct { - enc string - gen func(r *rand.Rand, i int) int64 - n int - }{ - {enc: "rle", gen: rle, n: 5}, - {enc: "rle", gen: rle, n: 55}, - {enc: "rle", gen: rle, n: 555}, - {enc: "rle", gen: rle, n: 1000}, - {enc: "s8b", gen: s8b, n: 5}, - {enc: "s8b", gen: s8b, n: 55}, - {enc: "s8b", gen: s8b, n: 555}, - {enc: "s8b", gen: s8b, n: 1000}, - } - for _, bm := range cases { - b.Run(fmt.Sprintf("%s_%d", bm.enc, bm.n), func(b *testing.B) { - seededRand := rand.New(rand.NewSource(int64(bm.n * 1e3))) - - valueCount := bm.n - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, bm.gen(seededRand, i)) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - b.ResetTimer() - b.ReportAllocs() - b.SetBytes(int64(tsm1.Values(values).Size())) - - b.RunParallel(func(pb *testing.PB) { - decodedValues := make([]tsm1.IntegerValue, len(values)) - - for pb.Next() { - _, err = tsm1.DecodeIntegerBlock(bytes, &decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } - }) - }) - } -} - -func BenchmarkDecodeStringBlock(b *testing.B) { - cases := []int{ - 5, - 55, - 555, - 1000, - } - for _, n := range cases { - b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { - valueCount := n - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, fmt.Sprintf("value %d", i)) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - b.ResetTimer() - b.ReportAllocs() - b.SetBytes(int64(tsm1.Values(values).Size())) - - b.RunParallel(func(pb *testing.PB) { - decodedValues := make([]tsm1.StringValue, len(values)) - - for pb.Next() { - _, err = tsm1.DecodeStringBlock(bytes, &decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } - }) - }) - } -} - -func BenchmarkDecodeBlock_String_Empty(b *testing.B) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, fmt.Sprintf("value %d", i)) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - var decodedValues []tsm1.Value - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err = tsm1.DecodeBlock(bytes, decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } -} - -func BenchmarkDecodeBlock_String_EqualSize(b *testing.B) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, fmt.Sprintf("value %d", i)) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - decodedValues := make([]tsm1.Value, len(values)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err = tsm1.DecodeBlock(bytes, decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } -} - -func BenchmarkDecodeBlock_String_TypeSpecific(b *testing.B) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, fmt.Sprintf("value %d", i)) - } - - bytes, err := tsm1.Values(values).Encode(nil) - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - decodedValues := make([]tsm1.StringValue, len(values)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err = tsm1.DecodeStringBlock(bytes, &decodedValues) - if err != nil { - b.Fatalf("unexpected error decoding block: %v", err) - } - } -} - -func BenchmarkValues_Deduplicate(b *testing.B) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - values := make([]tsm1.Value, len(times)) - for i, t := range times { - values[i] = tsm1.NewValue(t, fmt.Sprintf("value %d", i)) - } - values = append(values, values...) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - tsm1.Values(values).Deduplicate() - } -} - -func BenchmarkValues_Merge(b *testing.B) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - a := make([]tsm1.Value, len(times)) - c := make([]tsm1.Value, len(times)) - - for i, t := range times { - a[i] = tsm1.NewValue(t, float64(i)) - c[i] = tsm1.NewValue(t+1, float64(i)) - } - - b.ResetTimer() - benchmarkMerge(a, c, b) -} - -func BenchmarkValues_MergeDisjoint(b *testing.B) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - a := make([]tsm1.Value, len(times)) - c := make([]tsm1.Value, len(times)) - - for i, t := range times { - a[i] = tsm1.NewValue(t, float64(i)) - c[i] = tsm1.NewValue(times[len(times)-1]+int64((i+1)*1e9), float64(i)) - } - - b.ResetTimer() - benchmarkMerge(a, c, b) -} - -func BenchmarkValues_MergeSame(b *testing.B) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - a := make([]tsm1.Value, len(times)) - c := make([]tsm1.Value, len(times)) - - for i, t := range times { - a[i] = tsm1.NewValue(t, float64(i)) - c[i] = tsm1.NewValue(t, float64(i)) - } - - b.ResetTimer() - benchmarkMerge(a, c, b) -} - -func BenchmarkValues_MergeSimilar(b *testing.B) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - a := make([]tsm1.Value, len(times)) - c := make([]tsm1.Value, len(times)) - - for i, t := range times { - a[i] = tsm1.NewValue(t, float64(i)) - if i == 0 { - t++ - } - c[i] = tsm1.NewValue(t, float64(i)) - } - - b.ResetTimer() - benchmarkMerge(a, c, b) -} - -func BenchmarkValues_MergeUnevenA(b *testing.B) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - a := make([]tsm1.Value, len(times)) - c := make([]tsm1.Value, len(times)) - - for i, t := range times { - a[i] = tsm1.NewValue(t, float64(i)) - c[i] = tsm1.NewValue(t, float64(i)) - } - - b.ResetTimer() - benchmarkMerge(a[:700], c[:10], b) -} - -func BenchmarkValues_MergeUnevenB(b *testing.B) { - valueCount := 1000 - times := getTimes(valueCount, 60, time.Second) - a := make([]tsm1.Value, len(times)) - c := make([]tsm1.Value, len(times)) - - for i, t := range times { - a[i] = tsm1.NewValue(t, float64(i)) - c[i] = tsm1.NewValue(t, float64(i)) - } - - b.ResetTimer() - benchmarkMerge(a[:10], c[:700], b) -} - -func benchmarkMerge(a, c tsm1.Values, b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - aa := make(tsm1.Values, len(a)) - copy(aa, a) - cc := make(tsm1.Values, len(c)) - copy(cc, c) - b.StartTimer() - tsm1.Values(aa).Merge(tsm1.Values(cc)) - } -} - -func BenchmarkValues_EncodeInteger(b *testing.B) { - valueCount := 1024 - times := getTimes(valueCount, 60, time.Second) - a := make([]tsm1.Value, len(times)) - - for i, t := range times { - a[i] = tsm1.NewValue(t, int64(i)) - } - - buf := make([]byte, 1024*8) - b.ResetTimer() - for i := 0; i < b.N; i++ { - tsm1.Values(a).Encode(buf) - } -} - -func BenchmarkValues_EncodeFloat(b *testing.B) { - valueCount := 1024 - times := getTimes(valueCount, 60, time.Second) - a := make([]tsm1.Value, len(times)) - - for i, t := range times { - a[i] = tsm1.NewValue(t, float64(i)) - } - - buf := make([]byte, 1024*8) - b.ResetTimer() - for i := 0; i < b.N; i++ { - tsm1.Values(a).Encode(buf) - } -} -func BenchmarkValues_EncodeString(b *testing.B) { - valueCount := 1024 - times := getTimes(valueCount, 60, time.Second) - a := make([]tsm1.Value, len(times)) - - for i, t := range times { - a[i] = tsm1.NewValue(t, fmt.Sprintf("%d", i)) - } - - buf := make([]byte, 1024*8) - b.ResetTimer() - for i := 0; i < b.N; i++ { - tsm1.Values(a).Encode(buf) - } -} -func BenchmarkValues_EncodeBool(b *testing.B) { - valueCount := 1024 - times := getTimes(valueCount, 60, time.Second) - a := make([]tsm1.Value, len(times)) - - for i, t := range times { - if i%2 == 0 { - a[i] = tsm1.NewValue(t, true) - } else { - a[i] = tsm1.NewValue(t, false) - } - } - - buf := make([]byte, 1024*8) - b.ResetTimer() - for i := 0; i < b.N; i++ { - tsm1.Values(a).Encode(buf) - } -} diff --git a/tsdb/engine/tsm1/engine.gen.go b/tsdb/engine/tsm1/engine.gen.go deleted file mode 100644 index 19351634208..00000000000 --- a/tsdb/engine/tsm1/engine.gen.go +++ /dev/null @@ -1,53 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: engine.gen.go.tmpl - -package tsm1 - -import ( - "context" - - "github.com/influxdata/influxdb/v2/influxql/query" -) - -// buildFloatCursor creates a cursor for a float field. -func (e *Engine) buildFloatCursor(ctx context.Context, measurement, seriesKey, field string, opt query.IteratorOptions) floatCursor { - key := SeriesFieldKeyBytes(seriesKey, field) - cacheValues := e.Cache.Values(key) - keyCursor := e.KeyCursor(ctx, key, opt.SeekTime(), opt.Ascending) - return newFloatCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor) -} - -// buildIntegerCursor creates a cursor for a integer field. -func (e *Engine) buildIntegerCursor(ctx context.Context, measurement, seriesKey, field string, opt query.IteratorOptions) integerCursor { - key := SeriesFieldKeyBytes(seriesKey, field) - cacheValues := e.Cache.Values(key) - keyCursor := e.KeyCursor(ctx, key, opt.SeekTime(), opt.Ascending) - return newIntegerCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor) -} - -// buildUnsignedCursor creates a cursor for a unsigned field. -func (e *Engine) buildUnsignedCursor(ctx context.Context, measurement, seriesKey, field string, opt query.IteratorOptions) unsignedCursor { - key := SeriesFieldKeyBytes(seriesKey, field) - cacheValues := e.Cache.Values(key) - keyCursor := e.KeyCursor(ctx, key, opt.SeekTime(), opt.Ascending) - return newUnsignedCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor) -} - -// buildStringCursor creates a cursor for a string field. -func (e *Engine) buildStringCursor(ctx context.Context, measurement, seriesKey, field string, opt query.IteratorOptions) stringCursor { - key := SeriesFieldKeyBytes(seriesKey, field) - cacheValues := e.Cache.Values(key) - keyCursor := e.KeyCursor(ctx, key, opt.SeekTime(), opt.Ascending) - return newStringCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor) -} - -// buildBooleanCursor creates a cursor for a boolean field. -func (e *Engine) buildBooleanCursor(ctx context.Context, measurement, seriesKey, field string, opt query.IteratorOptions) booleanCursor { - key := SeriesFieldKeyBytes(seriesKey, field) - cacheValues := e.Cache.Values(key) - keyCursor := e.KeyCursor(ctx, key, opt.SeekTime(), opt.Ascending) - return newBooleanCursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor) -} diff --git a/tsdb/engine/tsm1/engine.gen.go.tmpl b/tsdb/engine/tsm1/engine.gen.go.tmpl deleted file mode 100644 index c72f0f56c3c..00000000000 --- a/tsdb/engine/tsm1/engine.gen.go.tmpl +++ /dev/null @@ -1,19 +0,0 @@ -package tsm1 - -import ( - "context" - - "github.com/influxdata/influxdb/v2/influxql/query" -) - -{{range .}} - -// build{{.Name}}Cursor creates a cursor for a {{.name}} field. -func (e *Engine) build{{.Name}}Cursor(ctx context.Context, measurement, seriesKey, field string, opt query.IteratorOptions) {{.name}}Cursor { - key := SeriesFieldKeyBytes(seriesKey, field) - cacheValues := e.Cache.Values(key) - keyCursor := e.KeyCursor(ctx, key, opt.SeekTime(), opt.Ascending) - return new{{.Name}}Cursor(opt.SeekTime(), opt.Ascending, cacheValues, keyCursor) -} - -{{end}} diff --git a/tsdb/engine/tsm1/engine.go b/tsdb/engine/tsm1/engine.go deleted file mode 100644 index 0191a3b9f7b..00000000000 --- a/tsdb/engine/tsm1/engine.go +++ /dev/null @@ -1,3216 +0,0 @@ -// Package tsm1 provides a TSDB in the Time Structured Merge tree format. -package tsm1 // import "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - -import ( - "archive/tar" - "bytes" - "context" - "errors" - "fmt" - "io" - "math" - "os" - "path/filepath" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/logger" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/bytesutil" - "github.com/influxdata/influxdb/v2/pkg/estimator" - "github.com/influxdata/influxdb/v2/pkg/file" - "github.com/influxdata/influxdb/v2/pkg/limiter" - "github.com/influxdata/influxdb/v2/pkg/metrics" - "github.com/influxdata/influxdb/v2/pkg/radix" - intar "github.com/influxdata/influxdb/v2/pkg/tar" - "github.com/influxdata/influxdb/v2/pkg/tracing" - "github.com/influxdata/influxdb/v2/tsdb" - _ "github.com/influxdata/influxdb/v2/tsdb/index" - "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" - "github.com/influxdata/influxql" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -//go:generate -command tmpl go run github.com/benbjohnson/tmpl -//go:generate tmpl -data=@iterator.gen.go.tmpldata iterator.gen.go.tmpl engine.gen.go.tmpl array_cursor.gen.go.tmpl array_cursor_iterator.gen.go.tmpl -// The file store generate uses a custom modified tmpl -// to support adding templated data from the command line. -// This can probably be worked into the upstream tmpl -// but isn't at the moment. -//go:generate go run ../../../tools/tmpl -data=file_store.gen.go.tmpldata file_store.gen.go.tmpl=file_store.gen.go -//go:generate go run ../../../tools/tmpl -d isArray=y -data=file_store.gen.go.tmpldata file_store.gen.go.tmpl=file_store_array.gen.go -//go:generate tmpl -data=@encoding.gen.go.tmpldata encoding.gen.go.tmpl -//go:generate tmpl -data=@compact.gen.go.tmpldata compact.gen.go.tmpl -//go:generate tmpl -data=@reader.gen.go.tmpldata reader.gen.go.tmpl - -func init() { - tsdb.RegisterEngine("tsm1", NewEngine) -} - -var ( - // Ensure Engine implements the interface. - _ tsdb.Engine = &Engine{} - // Static objects to prevent small allocs. - timeBytes = []byte("time") - keyFieldSeparatorBytes = []byte(keyFieldSeparator) - emptyBytes = []byte{} -) - -var ( - tsmGroup = metrics.MustRegisterGroup("tsm1") - numberOfRefCursorsCounter = metrics.MustRegisterCounter("cursors_ref", metrics.WithGroup(tsmGroup)) - numberOfAuxCursorsCounter = metrics.MustRegisterCounter("cursors_aux", metrics.WithGroup(tsmGroup)) - numberOfCondCursorsCounter = metrics.MustRegisterCounter("cursors_cond", metrics.WithGroup(tsmGroup)) - planningTimer = metrics.MustRegisterTimer("planning_time", metrics.WithGroup(tsmGroup)) -) - -const ( - // keyFieldSeparator separates the series key from the field name in the composite key - // that identifies a specific field in series - keyFieldSeparator = "#!~#" - - // deleteFlushThreshold is the size in bytes of a batch of series keys to delete. - deleteFlushThreshold = 50 * 1024 * 1024 -) - -// Engine represents a storage engine with compressed blocks. -type Engine struct { - mu sync.RWMutex - - index tsdb.Index - - // The following group of fields is used to track the state of level compactions within the - // Engine. The WaitGroup is used to monitor the compaction goroutines, the 'done' channel is - // used to signal those goroutines to shutdown. Every request to disable level compactions will - // call 'Wait' on 'wg', with the first goroutine to arrive (levelWorkers == 0 while holding the - // lock) will close the done channel and re-assign 'nil' to the variable. Re-enabling will - // decrease 'levelWorkers', and when it decreases to zero, level compactions will be started - // back up again. - - wg *sync.WaitGroup // waitgroup for active level compaction goroutines - done chan struct{} // channel to signal level compactions to stop - levelWorkers int // Number of "workers" that expect compactions to be in a disabled state - - snapDone chan struct{} // channel to signal snapshot compactions to stop - snapWG *sync.WaitGroup // waitgroup for running snapshot compactions - - id uint64 - path string - sfile *tsdb.SeriesFile - logger *zap.Logger // Logger to be used for important messages - traceLogger *zap.Logger // Logger to be used when trace-logging is on. - traceLogging bool - - fieldset *tsdb.MeasurementFieldSet - - WAL *WAL - Cache *Cache - Compactor *Compactor - CompactionPlan CompactionPlanner - FileStore *FileStore - - MaxPointsPerBlock int - - // CacheFlushMemorySizeThreshold specifies the minimum size threshold for - // the cache when the engine should write a snapshot to a TSM file - CacheFlushMemorySizeThreshold uint64 - - // CacheFlushWriteColdDuration specifies the length of time after which if - // no writes have been committed to the WAL, the engine will write - // a snapshot of the cache to a TSM file - CacheFlushWriteColdDuration time.Duration - - // WALEnabled determines whether writes to the WAL are enabled. If this is false, - // writes will only exist in the cache and can be lost if a snapshot has not occurred. - WALEnabled bool - - // Invoked when creating a backup file "as new". - formatFileName FormatFileNameFunc - - // Controls whether to enabled compactions when the engine is open - enableCompactionsOnOpen bool - - stats *compactionMetrics - - activeCompactions *compactionCounter - - // Limiter for concurrent compactions. - compactionLimiter limiter.Fixed - - scheduler *scheduler - - // provides access to the total set of series IDs - seriesIDSets tsdb.SeriesIDSets - - // seriesTypeMap maps a series key to field type - seriesTypeMap *radix.Tree - - // muDigest ensures only one goroutine can generate a digest at a time. - muDigest sync.RWMutex -} - -// NewEngine returns a new instance of Engine. -func NewEngine(id uint64, idx tsdb.Index, path string, walPath string, sfile *tsdb.SeriesFile, opt tsdb.EngineOptions) tsdb.Engine { - etags := tsdb.EngineTags{ - Path: path, - WalPath: walPath, - Id: fmt.Sprintf("%d", id), - Bucket: filepath.Base(filepath.Dir(filepath.Dir(path))), // discard shard & rp, take db - EngineVersion: opt.EngineVersion, - } - - var wal *WAL - if opt.WALEnabled { - wal = NewWAL(walPath, opt.Config.WALMaxConcurrentWrites, opt.Config.WALMaxWriteDelay, etags) - wal.syncDelay = time.Duration(opt.Config.WALFsyncDelay) - } - - fs := NewFileStore(path, etags) - fs.openLimiter = opt.OpenLimiter - if opt.FileStoreObserver != nil { - fs.WithObserver(opt.FileStoreObserver) - } - fs.tsmMMAPWillNeed = opt.Config.TSMWillNeed - - cache := NewCache(uint64(opt.Config.CacheMaxMemorySize), etags) - - c := NewCompactor() - c.Dir = path - c.FileStore = fs - c.RateLimit = opt.CompactionThroughputLimiter - - var planner CompactionPlanner = NewDefaultPlanner(fs, time.Duration(opt.Config.CompactFullWriteColdDuration)) - if opt.CompactionPlannerCreator != nil { - planner = opt.CompactionPlannerCreator(opt.Config).(CompactionPlanner) - planner.SetFileStore(fs) - } - - stats := newEngineMetrics(etags) - activeCompactions := &compactionCounter{} - e := &Engine{ - id: id, - path: path, - index: idx, - sfile: sfile, - logger: zap.NewNop(), - traceLogger: zap.NewNop(), - traceLogging: opt.Config.TraceLoggingEnabled, - - WAL: wal, - Cache: cache, - - FileStore: fs, - Compactor: c, - CompactionPlan: planner, - - activeCompactions: activeCompactions, - scheduler: newScheduler(activeCompactions, opt.CompactionLimiter.Capacity()), - - CacheFlushMemorySizeThreshold: uint64(opt.Config.CacheSnapshotMemorySize), - CacheFlushWriteColdDuration: time.Duration(opt.Config.CacheSnapshotWriteColdDuration), - enableCompactionsOnOpen: true, - WALEnabled: opt.WALEnabled, - formatFileName: DefaultFormatFileName, - stats: stats, - compactionLimiter: opt.CompactionLimiter, - seriesIDSets: opt.SeriesIDSets, - } - - // Feature flag to enable per-series type checking, by default this is off and - // e.seriesTypeMap will be nil. - if os.Getenv("INFLUXDB_SERIES_TYPE_CHECK_ENABLED") != "" { - e.seriesTypeMap = radix.New() - } - - if e.traceLogging { - fs.enableTraceLogging(true) - if e.WALEnabled { - e.WAL.enableTraceLogging(true) - } - } - - return e -} - -func (e *Engine) WithFormatFileNameFunc(formatFileNameFunc FormatFileNameFunc) { - e.Compactor.WithFormatFileNameFunc(formatFileNameFunc) - e.formatFileName = formatFileNameFunc -} - -func (e *Engine) WithParseFileNameFunc(parseFileNameFunc ParseFileNameFunc) { - e.FileStore.WithParseFileNameFunc(parseFileNameFunc) - e.Compactor.WithParseFileNameFunc(parseFileNameFunc) -} - -// Digest returns a reader for the shard's digest. -func (e *Engine) Digest() (io.ReadCloser, int64, error) { - e.muDigest.Lock() - defer e.muDigest.Unlock() - - log, logEnd := logger.NewOperation(context.TODO(), e.logger, "Engine digest", "tsm1_digest") - defer logEnd() - - log.Info("Starting digest", zap.String("tsm1_path", e.path)) - - digestPath := filepath.Join(e.path, DigestFilename) - - // Get a list of tsm file paths from the FileStore. - files := e.FileStore.Files() - tsmfiles := make([]string, 0, len(files)) - for _, f := range files { - tsmfiles = append(tsmfiles, f.Path()) - } - - // See if there's a fresh digest cached on disk. - fresh, reason := DigestFresh(e.path, tsmfiles, e.LastModified()) - if fresh { - f, err := os.Open(digestPath) - if err == nil { - fi, err := f.Stat() - if err != nil { - log.Info("Digest aborted, couldn't stat digest file", logger.Shard(e.id), zap.Error(err)) - return nil, 0, err - } - - log.Info("Digest is fresh", logger.Shard(e.id), zap.String("path", digestPath)) - - // Return the cached digest. - return f, fi.Size(), nil - } - } - - log.Info("Digest stale", logger.Shard(e.id), zap.String("reason", reason)) - - // Either no digest existed or the existing one was stale - // so generate a new digest. - - // Make sure the directory exists, in case it was deleted for some reason. - if err := os.MkdirAll(e.path, 0777); err != nil { - log.Info("Digest aborted, problem creating shard directory path", zap.Error(err)) - return nil, 0, err - } - - // Create a tmp file to write the digest to. - tf, err := os.Create(digestPath + ".tmp") - if err != nil { - log.Info("Digest aborted, problem creating tmp digest", zap.Error(err)) - return nil, 0, err - } - - // Write the new digest to the tmp file. - if err := Digest(e.path, tsmfiles, tf); err != nil { - log.Info("Digest aborted, problem writing tmp digest", zap.Error(err)) - tf.Close() - os.Remove(tf.Name()) - return nil, 0, err - } - - // Rename the temporary digest file to the actual digest file. - if err := file.RenameFile(tf.Name(), digestPath); err != nil { - log.Info("Digest aborted, problem renaming tmp digest", zap.Error(err)) - return nil, 0, err - } - - // Create and return a reader for the new digest file. - f, err := os.Open(digestPath) - if err != nil { - log.Info("Digest aborted, opening new digest", zap.Error(err)) - return nil, 0, err - } - - fi, err := f.Stat() - if err != nil { - log.Info("Digest aborted, can't stat new digest", zap.Error(err)) - f.Close() - return nil, 0, err - } - - log.Info("Digest written", zap.String("tsm1_digest_path", digestPath), zap.Int64("size", fi.Size())) - - return f, fi.Size(), nil -} - -// SetEnabled sets whether the engine is enabled. -func (e *Engine) SetEnabled(enabled bool) { - e.enableCompactionsOnOpen = enabled - e.SetCompactionsEnabled(enabled) -} - -// SetCompactionsEnabled enables compactions on the engine. When disabled -// all running compactions are aborted and new compactions stop running. -func (e *Engine) SetCompactionsEnabled(enabled bool) { - if enabled { - e.enableSnapshotCompactions() - e.enableLevelCompactions(false) - } else { - e.disableSnapshotCompactions() - e.disableLevelCompactions(false) - } -} - -// enableLevelCompactions will request that level compactions start back up again -// -// 'wait' signifies that a corresponding call to disableLevelCompactions(true) was made at some -// point, and the associated task that required disabled compactions is now complete -func (e *Engine) enableLevelCompactions(wait bool) { - // If we don't need to wait, see if we're already enabled - if !wait { - e.mu.RLock() - if e.done != nil { - e.mu.RUnlock() - return - } - e.mu.RUnlock() - } - - e.mu.Lock() - if wait { - e.levelWorkers -= 1 - } - if e.levelWorkers != 0 || e.done != nil { - // still waiting on more workers or already enabled - e.mu.Unlock() - return - } - - // last one to enable, start things back up - e.Compactor.EnableCompactions() - e.done = make(chan struct{}) - wg := new(sync.WaitGroup) - wg.Add(1) - e.wg = wg - e.mu.Unlock() - - go func() { defer wg.Done(); e.compact(wg) }() -} - -// disableLevelCompactions will stop level compactions before returning. -// -// If 'wait' is set to true, then a corresponding call to enableLevelCompactions(true) will be -// required before level compactions will start back up again. -func (e *Engine) disableLevelCompactions(wait bool) { - e.mu.Lock() - old := e.levelWorkers - if wait { - e.levelWorkers += 1 - } - - // Hold onto the current done channel so we can wait on it if necessary - waitCh := e.done - wg := e.wg - - if old == 0 && e.done != nil { - // It's possible we have closed the done channel and released the lock and another - // goroutine has attempted to disable compactions. We're current in the process of - // disabling them so check for this and wait until the original completes. - select { - case <-e.done: - e.mu.Unlock() - return - default: - } - - // Prevent new compactions from starting - e.Compactor.DisableCompactions() - - // Stop all background compaction goroutines - close(e.done) - e.mu.Unlock() - wg.Wait() - - // Signal that all goroutines have exited. - e.mu.Lock() - e.done = nil - e.mu.Unlock() - return - } - e.mu.Unlock() - - // Compaction were already disabled. - if waitCh == nil { - return - } - - // We were not the first caller to disable compactions and they were in the process - // of being disabled. Wait for them to complete before returning. - <-waitCh - wg.Wait() -} - -func (e *Engine) enableSnapshotCompactions() { - // Check if already enabled under read lock - e.mu.RLock() - if e.snapDone != nil { - e.mu.RUnlock() - return - } - e.mu.RUnlock() - - // Check again under write lock - e.mu.Lock() - if e.snapDone != nil { - e.mu.Unlock() - return - } - - e.Compactor.EnableSnapshots() - e.snapDone = make(chan struct{}) - wg := new(sync.WaitGroup) - wg.Add(1) - e.snapWG = wg - e.mu.Unlock() - - go func() { defer wg.Done(); e.compactCache() }() -} - -func (e *Engine) disableSnapshotCompactions() { - e.mu.Lock() - if e.snapDone == nil { - e.mu.Unlock() - return - } - - // We may be in the process of stopping snapshots. See if the channel - // was closed. - select { - case <-e.snapDone: - e.mu.Unlock() - return - default: - } - - // first one here, disable and wait for completion - close(e.snapDone) - e.Compactor.DisableSnapshots() - wg := e.snapWG - e.mu.Unlock() - - // Wait for the snapshot goroutine to exit. - wg.Wait() - - // Signal that the goroutines are exit and everything is stopped by setting - // snapDone to nil. - e.mu.Lock() - e.snapDone = nil - e.mu.Unlock() - - // If the cache is empty, free up its resources as well. - if e.Cache.Size() == 0 { - e.Cache.Free() - } -} - -// ScheduleFullCompaction will force the engine to fully compact all data stored. -// This will cancel and running compactions and snapshot any data in the cache to -// TSM files. This is an expensive operation. -func (e *Engine) ScheduleFullCompaction() error { - // Snapshot any data in the cache - if err := e.WriteSnapshot(); err != nil { - return err - } - - // Cancel running compactions - e.SetCompactionsEnabled(false) - - // Ensure compactions are restarted - defer e.SetCompactionsEnabled(true) - - // Force the planner to only create a full plan. - e.CompactionPlan.ForceFull() - return nil -} - -// Path returns the path the engine was opened with. -func (e *Engine) Path() string { return e.path } - -func (e *Engine) MeasurementExists(name []byte) (bool, error) { - return e.index.MeasurementExists(name) -} - -func (e *Engine) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) { - return e.index.MeasurementNamesByRegex(re) -} - -// MeasurementFieldSet returns the measurement field set. -func (e *Engine) MeasurementFieldSet() *tsdb.MeasurementFieldSet { - return e.fieldset -} - -// MeasurementFields returns the measurement fields for a measurement. -func (e *Engine) MeasurementFields(measurement []byte) *tsdb.MeasurementFields { - return e.fieldset.CreateFieldsIfNotExists(measurement) -} - -func (e *Engine) HasTagKey(name, key []byte) (bool, error) { - return e.index.HasTagKey(name, key) -} - -func (e *Engine) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) { - return e.index.MeasurementTagKeysByExpr(name, expr) -} - -func (e *Engine) TagKeyCardinality(name, key []byte) int { - return e.index.TagKeyCardinality(name, key) -} - -// SeriesN returns the unique number of series in the index. -func (e *Engine) SeriesN() int64 { - return e.index.SeriesN() -} - -// MeasurementsSketches returns sketches that describe the cardinality of the -// measurements in this shard and measurements that were in this shard, but have -// been tombstoned. -func (e *Engine) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) { - return e.index.MeasurementsSketches() -} - -// SeriesSketches returns sketches that describe the cardinality of the -// series in this shard and series that were in this shard, but have -// been tombstoned. -func (e *Engine) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) { - return e.index.SeriesSketches() -} - -// LastModified returns the time when this shard was last modified. -func (e *Engine) LastModified() time.Time { - fsTime := e.FileStore.LastModified() - - if e.WALEnabled && e.WAL.LastWriteTime().After(fsTime) { - return e.WAL.LastWriteTime() - } - - return fsTime -} - -var globalCompactionMetrics *compactionMetrics = newAllCompactionMetrics(tsdb.EngineLabelNames()) - -// PrometheusCollectors returns all prometheus metrics for the tsm1 package. -func PrometheusCollectors() []prometheus.Collector { - collectors := []prometheus.Collector{ - globalCompactionMetrics.Duration, - globalCompactionMetrics.Active, - globalCompactionMetrics.Failed, - globalCompactionMetrics.Queued, - } - collectors = append(collectors, FileStoreCollectors()...) - collectors = append(collectors, CacheCollectors()...) - collectors = append(collectors, WALCollectors()...) - return collectors -} - -const ( - storageNamespace = "storage" - engineSubsystem = "compactions" - level1 = "1" - level2 = "2" - level3 = "3" - levelOpt = "opt" - levelFull = "full" - levelKey = "level" - levelCache = "cache" -) - -func labelForLevel(l int) prometheus.Labels { - switch l { - case 1: - return prometheus.Labels{levelKey: level1} - case 2: - return prometheus.Labels{levelKey: level2} - case 3: - return prometheus.Labels{levelKey: level3} - } - panic(fmt.Sprintf("labelForLevel: level out of range %d", l)) -} - -func newAllCompactionMetrics(labelNames []string) *compactionMetrics { - labelNamesWithLevel := append(labelNames, levelKey) - return &compactionMetrics{ - Duration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: storageNamespace, - Subsystem: engineSubsystem, - Name: "duration_seconds", - Help: "Histogram of compactions by level since startup", - // 10 minute compactions seem normal, 1h40min is high - Buckets: []float64{60, 600, 6000}, - }, labelNamesWithLevel), - Active: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: storageNamespace, - Subsystem: engineSubsystem, - Name: "active", - Help: "Gauge of compactions (by level) currently running", - }, labelNamesWithLevel), - Failed: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: storageNamespace, - Subsystem: engineSubsystem, - Name: "failed", - Help: "Counter of TSM compactions (by level) that have failed due to error", - }, labelNamesWithLevel), - Queued: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: storageNamespace, - Subsystem: engineSubsystem, - Name: "queued", - Help: "Counter of TSM compactions (by level) that are currently queued", - }, labelNamesWithLevel), - } -} - -type compactionCounter struct { - l1 int64 - l2 int64 - l3 int64 - full int64 - optimize int64 -} - -func (c *compactionCounter) countForLevel(l int) *int64 { - switch l { - case 1: - return &c.l1 - case 2: - return &c.l2 - case 3: - return &c.l3 - } - return nil -} - -// engineMetrics holds statistics across all instantiated engines -type compactionMetrics struct { - Duration prometheus.ObserverVec - Active *prometheus.GaugeVec - Queued *prometheus.GaugeVec - Failed *prometheus.CounterVec -} - -func newEngineMetrics(tags tsdb.EngineTags) *compactionMetrics { - engineLabels := tags.GetLabels() - return &compactionMetrics{ - Duration: globalCompactionMetrics.Duration.MustCurryWith(engineLabels), - Active: globalCompactionMetrics.Active.MustCurryWith(engineLabels), - Failed: globalCompactionMetrics.Failed.MustCurryWith(engineLabels), - Queued: globalCompactionMetrics.Queued.MustCurryWith(engineLabels), - } -} - -// DiskSize returns the total size in bytes of all TSM and WAL segments on disk. -func (e *Engine) DiskSize() int64 { - var walDiskSizeBytes int64 - if e.WALEnabled { - walDiskSizeBytes = e.WAL.DiskSizeBytes() - } - return e.FileStore.DiskSizeBytes() + walDiskSizeBytes -} - -// Open opens and initializes the engine. -func (e *Engine) Open(ctx context.Context) error { - if err := os.MkdirAll(e.path, 0777); err != nil { - return err - } - - if err := e.cleanup(); err != nil { - return err - } - - fields, err := tsdb.NewMeasurementFieldSet(filepath.Join(e.path, "fields.idx"), e.logger) - if err != nil { - e.logger.Warn(fmt.Sprintf("error opening fields.idx: %v. Rebuilding.", err)) - } - - e.mu.Lock() - e.fieldset = fields - e.mu.Unlock() - - e.index.SetFieldSet(fields) - - if e.WALEnabled { - if err := e.WAL.Open(); err != nil { - return err - } - } - - if err := e.FileStore.Open(ctx); err != nil { - return err - } - - if e.WALEnabled { - if err := e.reloadCache(); err != nil { - return err - } - } - - e.Compactor.Open() - - if e.enableCompactionsOnOpen { - e.SetCompactionsEnabled(true) - } - - return nil -} - -// Close closes the engine. Subsequent calls to Close are a nop. -func (e *Engine) Close() error { - e.SetCompactionsEnabled(false) - - // Lock now and close everything else down. - e.mu.Lock() - defer e.mu.Unlock() - e.done = nil // Ensures that the channel will not be closed again. - - var err error = nil - err = e.fieldset.Close() - if err2 := e.FileStore.Close(); err2 != nil && err == nil { - err = err2 - } - if e.WALEnabled { - if err2 := e.WAL.Close(); err2 != nil && err == nil { - err = err2 - } - } - return err -} - -// WithLogger sets the logger for the engine. -func (e *Engine) WithLogger(log *zap.Logger) { - e.logger = log.With(zap.String("engine", "tsm1")) - - if e.traceLogging { - e.traceLogger = e.logger - } - - if e.WALEnabled { - e.WAL.WithLogger(e.logger) - } - e.FileStore.WithLogger(e.logger) -} - -// LoadMetadataIndex loads the shard metadata into memory. -// -// Note, it not safe to call LoadMetadataIndex concurrently. LoadMetadataIndex -// should only be called when initialising a new Engine. -func (e *Engine) LoadMetadataIndex(shardID uint64, index tsdb.Index) error { - now := time.Now() - - // Save reference to index for iterator creation. - e.index = index - - // If we have the cached fields index on disk, we can skip scanning all the TSM files. - if !e.fieldset.IsEmpty() { - return nil - } - - keys := make([][]byte, 0, 10000) - fieldTypes := make([]influxql.DataType, 0, 10000) - - if err := e.FileStore.WalkKeys(nil, func(key []byte, typ byte) error { - fieldType := BlockTypeToInfluxQLDataType(typ) - if fieldType == influxql.Unknown { - return fmt.Errorf("unknown block type: %v", typ) - } - - keys = append(keys, key) - fieldTypes = append(fieldTypes, fieldType) - if len(keys) == cap(keys) { - // Send batch of keys to the index. - if err := e.addToIndexFromKey(keys, fieldTypes); err != nil { - return err - } - - // Reset buffers. - keys, fieldTypes = keys[:0], fieldTypes[:0] - } - - return nil - }); err != nil { - return err - } - - if len(keys) > 0 { - // Add remaining partial batch from FileStore. - if err := e.addToIndexFromKey(keys, fieldTypes); err != nil { - return err - } - keys, fieldTypes = keys[:0], fieldTypes[:0] - } - - // load metadata from the Cache - if err := e.Cache.ApplyEntryFn(func(key []byte, entry *entry) error { - fieldType, err := entry.values.InfluxQLType() - if err != nil { - e.logger.Info("Error getting the data type of values for key", zap.ByteString("key", key), zap.Error(err)) - } - - keys = append(keys, key) - fieldTypes = append(fieldTypes, fieldType) - if len(keys) == cap(keys) { - // Send batch of keys to the index. - if err := e.addToIndexFromKey(keys, fieldTypes); err != nil { - return err - } - - // Reset buffers. - keys, fieldTypes = keys[:0], fieldTypes[:0] - } - return nil - }); err != nil { - return err - } - - if len(keys) > 0 { - // Add remaining partial batch from FileStore. - if err := e.addToIndexFromKey(keys, fieldTypes); err != nil { - return err - } - } - - // Save the field set index so we don't have to rebuild it next time - if err := e.fieldset.WriteToFile(); err != nil { - return err - } - - e.traceLogger.Info("Meta data index for shard loaded", zap.Uint64("id", shardID), zap.Duration("duration", time.Since(now))) - return nil -} - -// IsIdle returns true if the cache is empty, there are no running compactions and the -// shard is fully compacted. -func (e *Engine) IsIdle() (state bool, reason string) { - c := []struct { - ActiveCompactions *int64 - LogMessage string - }{ - // We don't actually track cache compactions: {&e.status.CacheCompactionsActive, "not idle because of active Cache compactions"}, - {&e.activeCompactions.l1, "not idle because of active Level1 compactions"}, - {&e.activeCompactions.l2, "not idle because of active Level2 compactions"}, - {&e.activeCompactions.l3, "not idle because of active Level3 compactions"}, - {&e.activeCompactions.full, "not idle because of active Full compactions"}, - {&e.activeCompactions.optimize, "not idle because of active TSM Optimization compactions"}, - } - - for _, compactionState := range c { - count := atomic.LoadInt64(compactionState.ActiveCompactions) - if count > 0 { - return false, compactionState.LogMessage - } - } - - if cacheSize := e.Cache.Size(); cacheSize > 0 { - return false, "not idle because cache size is nonzero" - } else if c, r := e.CompactionPlan.FullyCompacted(); !c { - return false, r - } else { - return true, "" - } -} - -// Free releases any resources held by the engine to free up memory or CPU. -func (e *Engine) Free() error { - e.Cache.Free() - return e.FileStore.Free() -} - -// Backup writes a tar archive of any TSM files modified since the passed -// in time to the passed in writer. The basePath will be prepended to the names -// of the files in the archive. It will force a snapshot of the WAL first -// then perform the backup with a read lock against the file store. This means -// that new TSM files will not be able to be created in this shard while the -// backup is running. For shards that are still actively getting writes, this -// could cause the WAL to backup, increasing memory usage and eventually rejecting writes. -func (e *Engine) Backup(w io.Writer, basePath string, since time.Time) error { - var err error - var path string - path, err = e.CreateSnapshot(true) - if err != nil { - return err - } - // Remove the temporary snapshot dir - defer os.RemoveAll(path) - - return intar.Stream(w, path, basePath, intar.SinceFilterTarFile(since)) -} - -func (e *Engine) timeStampFilterTarFile(start, end time.Time) func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error { - return func(fi os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error { - if !strings.HasSuffix(fi.Name(), ".tsm") { - return intar.StreamFile(fi, shardRelativePath, fullPath, tw) - } - - f, err := os.Open(fullPath) - if err != nil { - return err - } - r, err := NewTSMReader(f) - if err != nil { - return err - } - - // Grab the tombstone file if one exists. - if ts := r.TombstoneStats(); ts.TombstoneExists { - return intar.StreamFile(fi, shardRelativePath, filepath.Base(ts.Path), tw) - } - - min, max := r.TimeRange() - stun := start.UnixNano() - eun := end.UnixNano() - - // We overlap time ranges, we need to filter the file - if min >= stun && min <= eun && max > eun || // overlap to the right - max >= stun && max <= eun && min < stun || // overlap to the left - min <= stun && max >= eun { // TSM file has a range LARGER than the boundary - err := e.filterFileToBackup(r, fi, shardRelativePath, fullPath, start.UnixNano(), end.UnixNano(), tw) - if err != nil { - if err := r.Close(); err != nil { - return err - } - return err - } - - } - - // above is the only case where we need to keep the reader open. - if err := r.Close(); err != nil { - return err - } - - // the TSM file is 100% inside the range, so we can just write it without scanning each block - if min >= start.UnixNano() && max <= end.UnixNano() { - if err := intar.StreamFile(fi, shardRelativePath, fullPath, tw); err != nil { - return err - } - } - return nil - } -} - -func (e *Engine) Export(w io.Writer, basePath string, start time.Time, end time.Time) error { - path, err := e.CreateSnapshot(false) - if err != nil { - return err - } - // Remove the temporary snapshot dir - defer os.RemoveAll(path) - - return intar.Stream(w, path, basePath, e.timeStampFilterTarFile(start, end)) -} - -func (e *Engine) filterFileToBackup(r *TSMReader, fi os.FileInfo, shardRelativePath, fullPath string, start, end int64, tw *tar.Writer) error { - path := fullPath + ".tmp" - out, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0666) - if err != nil { - return err - } - defer os.Remove(path) - - w, err := NewTSMWriter(out) - if err != nil { - return err - } - defer w.Close() - - // implicit else: here we iterate over the blocks and only keep the ones we really want. - bi := r.BlockIterator() - - for bi.Next() { - // not concerned with typ or checksum since we are just blindly writing back, with no decoding - key, minTime, maxTime, _, _, buf, err := bi.Read() - if err != nil { - return err - } - if minTime >= start && minTime <= end || - maxTime >= start && maxTime <= end || - minTime <= start && maxTime >= end { - err := w.WriteBlock(key, minTime, maxTime, buf) - if err != nil { - return err - } - } - } - - if err := bi.Err(); err != nil { - return err - } - - err = w.WriteIndex() - if err != nil { - return err - } - - // make sure the whole file is out to disk - if err := w.Flush(); err != nil { - return err - } - - tmpFi, err := os.Stat(path) - if err != nil { - return err - } - - return intar.StreamRenameFile(tmpFi, fi.Name(), shardRelativePath, path, tw) -} - -// Restore reads a tar archive generated by Backup(). -// Only files that match basePath will be copied into the directory. This obtains -// a write lock so no operations can be performed while restoring. -func (e *Engine) Restore(r io.Reader, basePath string) error { - return e.overlay(r, basePath, false) -} - -// Import reads a tar archive generated by Backup() and adds each -// file matching basePath as a new TSM file. This obtains -// a write lock so no operations can be performed while Importing. -// If the import is successful, a full compaction is scheduled. -func (e *Engine) Import(r io.Reader, basePath string) error { - if err := e.overlay(r, basePath, true); err != nil { - return err - } - return e.ScheduleFullCompaction() -} - -// overlay reads a tar archive generated by Backup() and adds each file -// from the archive matching basePath to the shard. -// If asNew is true, each file will be installed as a new TSM file even if an -// existing file with the same name in the backup exists. -func (e *Engine) overlay(r io.Reader, basePath string, asNew bool) error { - // Copy files from archive while under lock to prevent reopening. - newFiles, err := func() ([]string, error) { - e.mu.Lock() - defer e.mu.Unlock() - - var newFiles []string - tr := tar.NewReader(r) - for { - if fileName, err := e.readFileFromBackup(tr, basePath, asNew); err == io.EOF { - break - } else if err != nil { - return nil, err - } else if fileName != "" { - newFiles = append(newFiles, fileName) - } - } - - if err := file.SyncDir(e.path); err != nil { - return nil, err - } - - // The filestore will only handle tsm files. Other file types will be ignored. - if err := e.FileStore.Replace(nil, newFiles); err != nil { - return nil, err - } - return newFiles, nil - }() - - if err != nil { - return err - } - - // Load any new series keys to the index - tsmFiles := make([]TSMFile, 0, len(newFiles)) - defer func() { - for _, r := range tsmFiles { - r.Close() - } - }() - - ext := fmt.Sprintf(".%s", TmpTSMFileExtension) - for _, f := range newFiles { - // If asNew is true, the files created from readFileFromBackup will be new ones - // having a temp extension. - f = strings.TrimSuffix(f, ext) - if !strings.HasSuffix(f, TSMFileExtension) { - // This isn't a .tsm file. - continue - } - - fd, err := os.Open(f) - if err != nil { - return err - } - - r, err := NewTSMReader(fd) - if err != nil { - return err - } - tsmFiles = append(tsmFiles, r) - } - - // Merge and dedup all the series keys across each reader to reduce - // lock contention on the index. - keys := make([][]byte, 0, 10000) - fieldTypes := make([]influxql.DataType, 0, 10000) - - ki := newMergeKeyIterator(tsmFiles, nil) - for ki.Next() { - key, typ := ki.Read() - fieldType := BlockTypeToInfluxQLDataType(typ) - if fieldType == influxql.Unknown { - return fmt.Errorf("unknown block type: %v", typ) - } - - keys = append(keys, key) - fieldTypes = append(fieldTypes, fieldType) - - if len(keys) == cap(keys) { - // Send batch of keys to the index. - if err := e.addToIndexFromKey(keys, fieldTypes); err != nil { - return err - } - - // Reset buffers. - keys, fieldTypes = keys[:0], fieldTypes[:0] - } - } - - if len(keys) > 0 { - // Add remaining partial batch. - if err := e.addToIndexFromKey(keys, fieldTypes); err != nil { - return err - } - } - return e.MeasurementFieldSet().WriteToFile() -} - -// readFileFromBackup copies the next file from the archive into the shard. -// The file is skipped if it does not have a matching shardRelativePath prefix. -// If asNew is true, each file will be installed as a new TSM file even if an -// existing file with the same name in the backup exists. -func (e *Engine) readFileFromBackup(tr *tar.Reader, shardRelativePath string, asNew bool) (string, error) { - // Read next archive file. - hdr, err := tr.Next() - if err != nil { - return "", err - } - - if !strings.HasSuffix(hdr.Name, TSMFileExtension) { - // This isn't a .tsm file. - return "", nil - } - - filename := filepath.Base(filepath.FromSlash(hdr.Name)) - - // If this is a directory entry (usually just `index` for tsi), create it an move on. - if hdr.Typeflag == tar.TypeDir { - if err := os.MkdirAll(filepath.Join(e.path, filename), os.FileMode(hdr.Mode).Perm()); err != nil { - return "", err - } - return "", nil - } - - if asNew { - filename = e.formatFileName(e.FileStore.NextGeneration(), 1) + "." + TSMFileExtension - } - - tmp := fmt.Sprintf("%s.%s", filepath.Join(e.path, filename), TmpTSMFileExtension) - // Create new file on disk. - f, err := os.OpenFile(tmp, os.O_CREATE|os.O_RDWR, 0666) - if err != nil { - return "", err - } - defer f.Close() - - // Copy from archive to the file. - if _, err := io.CopyN(f, tr, hdr.Size); err != nil { - return "", err - } - - // Sync to disk & close. - if err := f.Sync(); err != nil { - return "", err - } - - return tmp, nil -} - -// addToIndexFromKey will pull the measurement names, series keys, and field -// names from composite keys, and add them to the database index and measurement -// fields. -func (e *Engine) addToIndexFromKey(keys [][]byte, fieldTypes []influxql.DataType) error { - var field []byte - names := make([][]byte, 0, len(keys)) - tags := make([]models.Tags, 0, len(keys)) - - for i := 0; i < len(keys); i++ { - // Replace tsm key format with index key format. - keys[i], field = SeriesAndFieldFromCompositeKey(keys[i]) - name := models.ParseName(keys[i]) - mf := e.fieldset.CreateFieldsIfNotExists(name) - if err := mf.CreateFieldIfNotExists(field, fieldTypes[i]); err != nil { - return err - } - - names = append(names, name) - tags = append(tags, models.ParseTags(keys[i])) - } - - return e.index.CreateSeriesListIfNotExists(keys, names, tags) -} - -// WritePoints writes metadata and point data into the engine. -// It returns an error if new points are added to an existing key. -func (e *Engine) WritePoints(ctx context.Context, points []models.Point) error { - values := make(map[string][]Value, len(points)) - var ( - keyBuf []byte - baseLen int - seriesErr error - ) - - for _, p := range points { - keyBuf = append(keyBuf[:0], p.Key()...) - keyBuf = append(keyBuf, keyFieldSeparator...) - baseLen = len(keyBuf) - iter := p.FieldIterator() - t := p.Time().UnixNano() - for iter.Next() { - // Skip fields name "time", they are illegal - if bytes.Equal(iter.FieldKey(), timeBytes) { - continue - } - - keyBuf = append(keyBuf[:baseLen], iter.FieldKey()...) - - if e.seriesTypeMap != nil { - // Fast-path check to see if the field for the series already exists. - if v, ok := e.seriesTypeMap.Get(keyBuf); !ok { - if typ, err := e.Type(keyBuf); err != nil { - // Field type is unknown, we can try to add it. - } else if typ != iter.Type() { - // Existing type is different from what was passed in, we need to drop - // this write and refresh the series type map. - seriesErr = tsdb.ErrFieldTypeConflict - e.seriesTypeMap.Insert(keyBuf, int(typ)) - continue - } - - // Doesn't exist, so try to insert - vv, ok := e.seriesTypeMap.Insert(keyBuf, int(iter.Type())) - - // We didn't insert and the type that exists isn't what we tried to insert, so - // we have a conflict and must drop this field/series. - if !ok || vv != int(iter.Type()) { - seriesErr = tsdb.ErrFieldTypeConflict - continue - } - } else if v != int(iter.Type()) { - // The series already exists, but with a different type. This is also a type conflict - // and we need to drop this field/series. - seriesErr = tsdb.ErrFieldTypeConflict - continue - } - } - - var v Value - switch iter.Type() { - case models.Float: - fv, err := iter.FloatValue() - if err != nil { - return err - } - v = NewFloatValue(t, fv) - case models.Integer: - iv, err := iter.IntegerValue() - if err != nil { - return err - } - v = NewIntegerValue(t, iv) - case models.Unsigned: - iv, err := iter.UnsignedValue() - if err != nil { - return err - } - v = NewUnsignedValue(t, iv) - case models.String: - v = NewStringValue(t, iter.StringValue()) - case models.Boolean: - bv, err := iter.BooleanValue() - if err != nil { - return err - } - v = NewBooleanValue(t, bv) - default: - return fmt.Errorf("unknown field type for %s: %s", string(iter.FieldKey()), p.String()) - } - values[string(keyBuf)] = append(values[string(keyBuf)], v) - } - } - - e.mu.RLock() - defer e.mu.RUnlock() - - // first try to write to the cache - if err := e.Cache.WriteMulti(values); err != nil { - return err - } - - if e.WALEnabled { - if _, err := e.WAL.WriteMulti(ctx, values); err != nil { - return err - } - } - return seriesErr -} - -// DeleteSeriesRange removes the values between min and max (inclusive) from all series -func (e *Engine) DeleteSeriesRange(ctx context.Context, itr tsdb.SeriesIterator, min, max int64) error { - return e.DeleteSeriesRangeWithPredicate(ctx, itr, func(name []byte, tags models.Tags) (int64, int64, bool) { - return min, max, true - }) -} - -// DeleteSeriesRangeWithPredicate removes the values between min and max (inclusive) from all series -// for which predicate() returns true. If predicate() is nil, then all values in range are removed. -func (e *Engine) DeleteSeriesRangeWithPredicate( - ctx context.Context, - itr tsdb.SeriesIterator, - predicate func(name []byte, tags models.Tags) (int64, int64, bool), -) error { - var disableOnce bool - - // Ensure that the index does not compact away the measurement or series we're - // going to delete before we're done with them. - if tsiIndex, ok := e.index.(*tsi1.Index); ok { - tsiIndex.DisableCompactions() - defer tsiIndex.EnableCompactions() - tsiIndex.Wait() - - fs, err := tsiIndex.RetainFileSet() - if err != nil { - return err - } - defer fs.Release() - } - - var ( - sz int - min, max int64 = math.MinInt64, math.MaxInt64 - - // Indicator that the min/max time for the current batch has changed and - // we need to flush the current batch before appending to it. - flushBatch bool - ) - - // These are reversed from min/max to ensure they are different the first time through. - newMin, newMax := int64(math.MaxInt64), int64(math.MinInt64) - - // There is no predicate, so setup newMin/newMax to delete the full time range. - if predicate == nil { - newMin = min - newMax = max - } - - batch := make([][]byte, 0, 10000) - for { - elem, err := itr.Next() - if err != nil { - return err - } else if elem == nil { - break - } - - // See if the series should be deleted and if so, what range of time. - if predicate != nil { - var shouldDelete bool - newMin, newMax, shouldDelete = predicate(elem.Name(), elem.Tags()) - if !shouldDelete { - continue - } - - // If the min/max happens to change for the batch, we need to flush - // the current batch and start a new one. - flushBatch = (min != newMin || max != newMax) && len(batch) > 0 - } - - if elem.Expr() != nil { - if v, ok := elem.Expr().(*influxql.BooleanLiteral); !ok || !v.Val { - return errors.New("fields not supported in WHERE clause during deletion") - } - } - - if !disableOnce { - // Disable and abort running compactions so that tombstones added existing tsm - // files don't get removed. This would cause deleted measurements/series to - // re-appear once the compaction completed. We only disable the level compactions - // so that snapshotting does not stop while writing out tombstones. If it is stopped, - // and writing tombstones takes a long time, writes can get rejected due to the cache - // filling up. - e.disableLevelCompactions(true) - defer e.enableLevelCompactions(true) - - e.sfile.DisableCompactions() - defer e.sfile.EnableCompactions() - e.sfile.Wait() - - disableOnce = true - } - - if sz >= deleteFlushThreshold || flushBatch { - // Delete all matching batch. - if err := e.deleteSeriesRange(ctx, batch, min, max); err != nil { - return err - } - batch = batch[:0] - sz = 0 - flushBatch = false - } - - // Use the new min/max time for the next iteration - min = newMin - max = newMax - - key := models.MakeKey(elem.Name(), elem.Tags()) - sz += len(key) - batch = append(batch, key) - } - - if len(batch) > 0 { - // Delete all matching batch. - if err := e.deleteSeriesRange(ctx, batch, min, max); err != nil { - return err - } - } - - return nil -} - -// deleteSeriesRange removes the values between min and max (inclusive) from all series. This -// does not update the index or disable compactions. This should mainly be called by DeleteSeriesRange -// and not directly. -func (e *Engine) deleteSeriesRange(ctx context.Context, seriesKeys [][]byte, min, max int64) error { - if len(seriesKeys) == 0 { - return nil - } - - // Min and max time in the engine are slightly different from the query language values. - if min == influxql.MinTime { - min = math.MinInt64 - } - if max == influxql.MaxTime { - max = math.MaxInt64 - } - - var overlapsTimeRangeMinMax bool - var overlapsTimeRangeMinMaxLock sync.Mutex - e.FileStore.Apply(ctx, func(r TSMFile) error { - if r.OverlapsTimeRange(min, max) { - overlapsTimeRangeMinMaxLock.Lock() - overlapsTimeRangeMinMax = true - overlapsTimeRangeMinMaxLock.Unlock() - } - return nil - }) - - if !overlapsTimeRangeMinMax && e.Cache.store.count() > 0 { - overlapsTimeRangeMinMax = true - } - - if !overlapsTimeRangeMinMax { - return nil - } - - // Ensure keys are sorted since lower layers require them to be. - if !bytesutil.IsSorted(seriesKeys) { - bytesutil.Sort(seriesKeys) - } - - // Run the delete on each TSM file in parallel - if err := e.FileStore.Apply(ctx, func(r TSMFile) error { - // See if this TSM file contains the keys and time range - minKey, maxKey := seriesKeys[0], seriesKeys[len(seriesKeys)-1] - tsmMin, tsmMax := r.KeyRange() - - tsmMin, _ = SeriesAndFieldFromCompositeKey(tsmMin) - tsmMax, _ = SeriesAndFieldFromCompositeKey(tsmMax) - - overlaps := bytes.Compare(tsmMin, maxKey) <= 0 && bytes.Compare(tsmMax, minKey) >= 0 - if !overlaps || !r.OverlapsTimeRange(min, max) { - return nil - } - - // Delete each key we find in the file. We seek to the min key and walk from there. - batch := r.BatchDelete() - n := r.KeyCount() - var j int - for i := r.Seek(minKey); i < n; i++ { - indexKey, _ := r.KeyAt(i) - seriesKey, _ := SeriesAndFieldFromCompositeKey(indexKey) - - for j < len(seriesKeys) && bytes.Compare(seriesKeys[j], seriesKey) < 0 { - j++ - } - - if j >= len(seriesKeys) { - break - } - if bytes.Equal(seriesKeys[j], seriesKey) { - if err := batch.DeleteRange([][]byte{indexKey}, min, max); err != nil { - batch.Rollback() - return err - } - } - } - - return batch.Commit() - }); err != nil { - return err - } - - // find the keys in the cache and remove them - deleteKeys := make([][]byte, 0, len(seriesKeys)) - - // ApplySerialEntryFn cannot return an error in this invocation. - _ = e.Cache.ApplyEntryFn(func(k []byte, _ *entry) error { - seriesKey, _ := SeriesAndFieldFromCompositeKey([]byte(k)) - - // Cache does not walk keys in sorted order, so search the sorted - // series we need to delete to see if any of the cache keys match. - i := bytesutil.SearchBytes(seriesKeys, seriesKey) - if i < len(seriesKeys) && bytes.Equal(seriesKey, seriesKeys[i]) { - // k is the measurement + tags + sep + field - deleteKeys = append(deleteKeys, k) - } - return nil - }) - - // Sort the series keys because ApplyEntryFn iterates over the keys randomly. - bytesutil.Sort(deleteKeys) - - e.Cache.DeleteRange(deleteKeys, min, max) - - // delete from the WAL - if e.WALEnabled { - if _, err := e.WAL.DeleteRange(ctx, deleteKeys, min, max); err != nil { - return err - } - } - - // The series are deleted on disk, but the index may still say they exist. - // Depending on the min,max time passed in, the series may or not actually - // exists now. To reconcile the index, we walk the series keys that still exists - // on disk and cross out any keys that match the passed in series. Any series - // left in the slice at the end do not exist and can be deleted from the index. - // Note: this is inherently racy if writes are occurring to the same measurement/series are - // being removed. A write could occur and exist in the cache at this point, but we - // would delete it from the index. - minKey := seriesKeys[0] - - // Apply runs this func concurrently. The seriesKeys slice is mutated concurrently - // by different goroutines setting positions to nil. - if err := e.FileStore.Apply(ctx, func(r TSMFile) error { - n := r.KeyCount() - var j int - - // Start from the min deleted key that exists in this file. - for i := r.Seek(minKey); i < n; i++ { - if j >= len(seriesKeys) { - return nil - } - - indexKey, _ := r.KeyAt(i) - seriesKey, _ := SeriesAndFieldFromCompositeKey(indexKey) - - // Skip over any deleted keys that are less than our tsm key - cmp := bytes.Compare(seriesKeys[j], seriesKey) - for j < len(seriesKeys) && cmp < 0 { - j++ - if j >= len(seriesKeys) { - return nil - } - cmp = bytes.Compare(seriesKeys[j], seriesKey) - } - - // We've found a matching key, cross it out so we do not remove it from the index. - if j < len(seriesKeys) && cmp == 0 { - seriesKeys[j] = emptyBytes - j++ - } - } - return nil - }); err != nil { - return err - } - - // The seriesKeys slice is mutated if they are still found in the cache. - cacheKeys := e.Cache.Keys() - for i := 0; i < len(seriesKeys); i++ { - seriesKey := seriesKeys[i] - // Already crossed out - if len(seriesKey) == 0 { - continue - } - - j := bytesutil.SearchBytes(cacheKeys, seriesKey) - if j < len(cacheKeys) { - cacheSeriesKey, _ := SeriesAndFieldFromCompositeKey(cacheKeys[j]) - if bytes.Equal(seriesKey, cacheSeriesKey) { - seriesKeys[i] = emptyBytes - } - } - } - - // Have we deleted all values for the series? If so, we need to remove - // the series from the index. - hasDeleted := false - for _, k := range seriesKeys { - if len(k) > 0 { - hasDeleted = true - break - } - } - if hasDeleted { - buf := make([]byte, 1024) // For use when accessing series file. - ids := tsdb.NewSeriesIDSet() - measurements := make(map[string]struct{}, 1) - - for _, k := range seriesKeys { - if len(k) == 0 { - continue // This key was wiped because it shouldn't be removed from index. - } - - name, tags := models.ParseKeyBytes(k) - sid := e.sfile.SeriesID(name, tags, buf) - if sid == 0 { - continue - } - - // See if this series was found in the cache earlier - i := bytesutil.SearchBytes(deleteKeys, k) - - var hasCacheValues bool - // If there are multiple fields, they will have the same prefix. If any field - // has values, then we can't delete it from the index. - for i < len(deleteKeys) && bytes.HasPrefix(deleteKeys[i], k) { - if e.Cache.Values(deleteKeys[i]).Len() > 0 { - hasCacheValues = true - break - } - i++ - } - - if hasCacheValues { - continue - } - - measurements[string(name)] = struct{}{} - // Remove the series from the local index. - if err := e.index.DropSeries(sid, k, false); err != nil { - return err - } - - // Add the id to the set of delete ids. - ids.Add(sid) - } - - actuallyDeleted := make([]string, 0, len(measurements)) - for k := range measurements { - if dropped, err := e.index.DropMeasurementIfSeriesNotExist([]byte(k)); err != nil { - return err - } else if dropped { - if deleted, err := e.cleanupMeasurement([]byte(k)); err != nil { - return err - } else if deleted { - actuallyDeleted = append(actuallyDeleted, k) - } - } - } - if len(actuallyDeleted) > 0 { - if err := e.fieldset.Save(tsdb.MeasurementsToFieldChangeDeletions(actuallyDeleted)); err != nil { - return err - } - } - - // Remove any series IDs for our set that still exist in other shards. - // We cannot remove these from the series file yet. - if err := e.seriesIDSets.ForEach(func(s *tsdb.SeriesIDSet) { - ids = ids.AndNot(s) - }); err != nil { - return err - } - - // Remove the remaining ids from the series file as they no longer exist - // in any shard. - var err error - ids.ForEach(func(id uint64) { - if err1 := e.sfile.DeleteSeriesID(id); err1 != nil { - err = err1 - return - } - }) - if err != nil { - return err - } - } - - return nil -} - -func (e *Engine) cleanupMeasurement(name []byte) (deleted bool, err error) { - // A sentinel error message to cause DeleteWithLock to not delete the measurement - abortErr := fmt.Errorf("measurements still exist") - - // Under write lock, delete the measurement if we no longer have any data stored for - // the measurement. If data exists, we can't delete the field set yet as there - // were writes to the measurement while we are deleting it. - if err := e.fieldset.DeleteWithLock(string(name), func() error { - encodedName := models.EscapeMeasurement(name) - sep := len(encodedName) - - // First scan the cache to see if any series exists for this measurement. - if err := e.Cache.ApplyEntryFn(func(k []byte, _ *entry) error { - if bytes.HasPrefix(k, encodedName) && (k[sep] == ',' || k[sep] == keyFieldSeparator[0]) { - return abortErr - } - return nil - }); err != nil { - return err - } - - // Check the filestore. - return e.FileStore.WalkKeys(name, func(k []byte, _ byte) error { - if bytes.HasPrefix(k, encodedName) && (k[sep] == ',' || k[sep] == keyFieldSeparator[0]) { - return abortErr - } - return nil - }) - - }); err != nil && err != abortErr { - // Something else failed, return it - return false, err - } - - return err != abortErr, nil -} - -// DeleteMeasurement deletes a measurement and all related series. -func (e *Engine) DeleteMeasurement(ctx context.Context, name []byte) error { - // Attempt to find the series keys. - indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile} - itr, err := indexSet.MeasurementSeriesByExprIterator(name, nil) - if err != nil { - return err - } else if itr == nil { - return nil - } - defer itr.Close() - return e.DeleteSeriesRange(ctx, tsdb.NewSeriesIteratorAdapter(e.sfile, itr), math.MinInt64, math.MaxInt64) -} - -// ForEachMeasurementName iterates over each measurement name in the engine. -func (e *Engine) ForEachMeasurementName(fn func(name []byte) error) error { - return e.index.ForEachMeasurementName(fn) -} - -func (e *Engine) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSlice []models.Tags) error { - return e.index.CreateSeriesListIfNotExists(keys, names, tagsSlice) -} - -func (e *Engine) CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error { - return e.index.CreateSeriesIfNotExists(key, name, tags) -} - -// WriteTo is not implemented. -func (e *Engine) WriteTo(w io.Writer) (n int64, err error) { panic("not implemented") } - -// WriteSnapshot will snapshot the cache and write a new TSM file with its contents, releasing the snapshot when done. -func (e *Engine) WriteSnapshot() (err error) { - // Lock and grab the cache snapshot along with all the closed WAL - // filenames associated with the snapshot - - started := time.Now() - log, logEnd := logger.NewOperation(context.TODO(), e.logger, "Cache snapshot", "tsm1_cache_snapshot") - defer func() { - elapsed := time.Since(started) - if err != nil && err != errCompactionsDisabled { - e.stats.Failed.With(prometheus.Labels{levelKey: levelCache}).Inc() - } - e.stats.Duration.With(prometheus.Labels{levelKey: levelCache}).Observe(elapsed.Seconds()) - if err == nil { - log.Info("Snapshot for path written", zap.String("path", e.path), zap.Duration("duration", elapsed)) - } - logEnd() - }() - - closedFiles, snapshot, err := func() (segments []string, snapshot *Cache, err error) { - e.mu.Lock() - defer e.mu.Unlock() - - if e.WALEnabled { - if err = e.WAL.CloseSegment(); err != nil { - return - } - - segments, err = e.WAL.ClosedSegments() - if err != nil { - return - } - } - - snapshot, err = e.Cache.Snapshot() - if err != nil { - return - } - - return - }() - - if err != nil { - return err - } - - if snapshot.Size() == 0 { - e.Cache.ClearSnapshot(true) - return nil - } - - // The snapshotted cache may have duplicate points and unsorted data. We need to deduplicate - // it before writing the snapshot. This can be very expensive so it's done while we are not - // holding the engine write lock. - dedup := time.Now() - snapshot.Deduplicate() - e.traceLogger.Info("Snapshot for path deduplicated", - zap.String("path", e.path), - zap.Duration("duration", time.Since(dedup))) - - return e.writeSnapshotAndCommit(log, closedFiles, snapshot) -} - -// CreateSnapshot will create a temp directory that holds -// temporary hardlinks to the underlying shard files. -// skipCacheOk controls whether it is permissible to fail writing out -// in-memory cache data when a previous snapshot is in progress. -func (e *Engine) CreateSnapshot(skipCacheOk bool) (string, error) { - err := e.WriteSnapshot() - for i := 0; i < 3 && err == ErrSnapshotInProgress; i += 1 { - backoff := time.Duration(math.Pow(32, float64(i))) * time.Millisecond - time.Sleep(backoff) - err = e.WriteSnapshot() - } - if err == ErrSnapshotInProgress && skipCacheOk { - e.logger.Warn("Snapshotter busy: proceeding without cache contents") - } else if err != nil { - return "", err - } - - e.mu.RLock() - defer e.mu.RUnlock() - path, err := e.FileStore.CreateSnapshot() - if err != nil { - return "", err - } - - // Generate a snapshot of the index. - return path, nil -} - -// writeSnapshotAndCommit will write the passed cache to a new TSM file and remove the closed WAL segments. -func (e *Engine) writeSnapshotAndCommit(log *zap.Logger, closedFiles []string, snapshot *Cache) (err error) { - defer func() { - if err != nil { - e.Cache.ClearSnapshot(false) - } - }() - - // write the new snapshot files - newFiles, err := e.Compactor.WriteSnapshot(snapshot, e.logger) - if err != nil { - log.Info("Error writing snapshot from compactor", zap.Error(err)) - return err - } - - e.mu.RLock() - defer e.mu.RUnlock() - - // update the file store with these new files - if err := e.FileStore.Replace(nil, newFiles); err != nil { - log.Info("Error adding new TSM files from snapshot. Removing temp files.", zap.Error(err)) - - // Remove the new snapshot files. We will try again. - for _, file := range newFiles { - if err := os.Remove(file); err != nil { - log.Info("Unable to remove file", zap.String("path", file), zap.Error(err)) - } - } - return err - } - - // clear the snapshot from the in-memory cache, then the old WAL files - e.Cache.ClearSnapshot(true) - - if e.WALEnabled { - if err := e.WAL.Remove(closedFiles); err != nil { - log.Info("Error removing closed WAL segments", zap.Error(err)) - } - } - - return nil -} - -// compactCache continually checks if the WAL cache should be written to disk. -func (e *Engine) compactCache() { - t := time.NewTicker(time.Second) - defer t.Stop() - for { - e.mu.RLock() - quit := e.snapDone - e.mu.RUnlock() - - select { - case <-quit: - return - - case <-t.C: - if e.ShouldCompactCache(time.Now()) { - e.traceLogger.Info("Compacting cache", zap.String("path", e.path)) - err := e.WriteSnapshot() - if err != nil && err != errCompactionsDisabled { - e.logger.Info("Error writing snapshot", zap.Error(err)) - } - } - } - } -} - -// ShouldCompactCache returns true if the Cache is over its flush threshold -// or if the passed in lastWriteTime is older than the write cold threshold. -func (e *Engine) ShouldCompactCache(t time.Time) bool { - sz := e.Cache.Size() - - if sz == 0 { - return false - } - - if sz > e.CacheFlushMemorySizeThreshold { - return true - } - - return t.Sub(e.Cache.LastWriteTime()) > e.CacheFlushWriteColdDuration -} - -func (e *Engine) compact(wg *sync.WaitGroup) { - t := time.NewTicker(time.Second) - defer t.Stop() - - for { - e.mu.RLock() - quit := e.done - e.mu.RUnlock() - - select { - case <-quit: - return - - case <-t.C: - - // Find our compaction plans - level1Groups, len1 := e.CompactionPlan.PlanLevel(1) - level2Groups, len2 := e.CompactionPlan.PlanLevel(2) - level3Groups, len3 := e.CompactionPlan.PlanLevel(3) - level4Groups, len4 := e.CompactionPlan.Plan(e.LastModified()) - - e.stats.Queued.With(prometheus.Labels{levelKey: levelFull}).Set(float64(len4)) - - // If no full compactions are need, see if an optimize is needed - if len(level4Groups) == 0 { - level4Groups, len4 = e.CompactionPlan.PlanOptimize() - e.stats.Queued.With(prometheus.Labels{levelKey: levelOpt}).Set(float64(len4)) - } - - // Update the level plan queue stats - // For stats, use the length needed, even if the lock was - // not acquired - e.stats.Queued.With(prometheus.Labels{levelKey: level1}).Set(float64(len1)) - e.stats.Queued.With(prometheus.Labels{levelKey: level2}).Set(float64(len2)) - e.stats.Queued.With(prometheus.Labels{levelKey: level3}).Set(float64(len3)) - - // Set the queue depths on the scheduler - // Use the real queue depth, dependent on acquiring - // the file locks. - e.scheduler.setDepth(1, len(level1Groups)) - e.scheduler.setDepth(2, len(level2Groups)) - e.scheduler.setDepth(3, len(level3Groups)) - e.scheduler.setDepth(4, len(level4Groups)) - - // Find the next compaction that can run and try to kick it off - if level, runnable := e.scheduler.next(); runnable { - switch level { - case 1: - if e.compactLevel(level1Groups[0], 1, false, wg) { - level1Groups = level1Groups[1:] - } - case 2: - if e.compactLevel(level2Groups[0], 2, false, wg) { - level2Groups = level2Groups[1:] - } - case 3: - if e.compactLevel(level3Groups[0], 3, true, wg) { - level3Groups = level3Groups[1:] - } - case 4: - if e.compactFull(level4Groups[0], wg) { - level4Groups = level4Groups[1:] - } - } - } - - // Release all the plans we didn't start. - e.CompactionPlan.Release(level1Groups) - e.CompactionPlan.Release(level2Groups) - e.CompactionPlan.Release(level3Groups) - e.CompactionPlan.Release(level4Groups) - } - } -} - -// compactLevel kicks off compactions using the level strategy. It returns -// true if the compaction was started -func (e *Engine) compactLevel(grp CompactionGroup, level int, fast bool, wg *sync.WaitGroup) bool { - s := e.levelCompactionStrategy(grp, fast, level) - if s == nil { - return false - } - - if e.compactionLimiter.TryTake() { - { - val := atomic.AddInt64(e.activeCompactions.countForLevel(level), 1) - e.stats.Active.With(labelForLevel(level)).Set(float64(val)) - } - - wg.Add(1) - go func() { - defer wg.Done() - defer func() { - val := atomic.AddInt64(e.activeCompactions.countForLevel(level), -1) - e.stats.Active.With(labelForLevel(level)).Set(float64(val)) - }() - - defer e.compactionLimiter.Release() - s.Apply() - // Release the files in the compaction plan - e.CompactionPlan.Release([]CompactionGroup{s.group}) - }() - return true - } - - // Return the unused plans - return false -} - -// compactFull kicks off full and optimize compactions using the lo priority policy. It returns -// the plans that were not able to be started. -func (e *Engine) compactFull(grp CompactionGroup, wg *sync.WaitGroup) bool { - s := e.fullCompactionStrategy(grp, false) - if s == nil { - return false - } - - // Try the lo priority limiter, otherwise steal a little from the high priority if we can. - if e.compactionLimiter.TryTake() { - { - val := atomic.AddInt64(&e.activeCompactions.full, 1) - e.stats.Active.With(prometheus.Labels{levelKey: levelFull}).Set(float64(val)) - } - wg.Add(1) - go func() { - defer wg.Done() - defer func() { - val := atomic.AddInt64(&e.activeCompactions.full, -1) - e.stats.Active.With(prometheus.Labels{levelKey: levelFull}).Set(float64(val)) - }() - defer e.compactionLimiter.Release() - s.Apply() - // Release the files in the compaction plan - e.CompactionPlan.Release([]CompactionGroup{s.group}) - }() - return true - } - return false -} - -// compactionStrategy holds the details of what to do in a compaction. -type compactionStrategy struct { - group CompactionGroup - - fast bool - level int - - durationSecondsStat prometheus.Observer - errorStat prometheus.Counter - - logger *zap.Logger - compactor *Compactor - fileStore *FileStore - - engine *Engine -} - -// Apply concurrently compacts all the groups in a compaction strategy. -func (s *compactionStrategy) Apply() { - start := time.Now() - s.compactGroup() - s.durationSecondsStat.Observe(time.Since(start).Seconds()) -} - -// compactGroup executes the compaction strategy against a single CompactionGroup. -func (s *compactionStrategy) compactGroup() { - group := s.group - log, logEnd := logger.NewOperation(context.TODO(), s.logger, "TSM compaction", "tsm1_compact_group") - defer logEnd() - - log.Info("Beginning compaction", zap.Int("tsm1_files_n", len(group))) - for i, f := range group { - log.Info("Compacting file", zap.Int("tsm1_index", i), zap.String("tsm1_file", f)) - } - - var ( - err error - files []string - ) - if s.fast { - files, err = s.compactor.CompactFast(group, log) - } else { - files, err = s.compactor.CompactFull(group, log) - } - - if err != nil { - _, inProgress := err.(errCompactionInProgress) - if err == errCompactionsDisabled || inProgress { - log.Info("Aborted compaction", zap.Error(err)) - - if _, ok := err.(errCompactionInProgress); ok { - time.Sleep(time.Second) - } - return - } - - log.Warn("Error compacting TSM files", zap.Error(err)) - - // We hit a bad TSM file - rename so the next compaction can proceed. - if _, ok := err.(errBlockRead); ok { - path := err.(errBlockRead).file - log.Info("Renaming a corrupt TSM file due to compaction error", zap.Error(err)) - if err := s.fileStore.ReplaceWithCallback([]string{path}, nil, nil); err != nil { - log.Info("Error removing bad TSM file", zap.Error(err)) - } else if e := os.Rename(path, path+"."+BadTSMFileExtension); e != nil { - log.Info("Error renaming corrupt TSM file", zap.Error((err))) - } - } - - s.errorStat.Inc() - time.Sleep(time.Second) - return - } - - if err := s.fileStore.ReplaceWithCallback(group, files, nil); err != nil { - log.Info("Error replacing new TSM files", zap.Error(err)) - s.errorStat.Inc() - time.Sleep(time.Second) - - // Remove the new snapshot files. We will try again. - for _, file := range files { - if err := os.Remove(file); err != nil { - log.Error("Unable to remove file", zap.String("path", file), zap.Error(err)) - } - } - return - } - - for i, f := range files { - log.Info("Compacted file", zap.Int("tsm1_index", i), zap.String("tsm1_file", f)) - } - log.Info("Finished compacting files", - zap.Int("tsm1_files_n", len(files))) -} - -// levelCompactionStrategy returns a compactionStrategy for the given level. -// It returns nil if there are no TSM files to compact. -func (e *Engine) levelCompactionStrategy(group CompactionGroup, fast bool, level int) *compactionStrategy { - label := labelForLevel(level) - return &compactionStrategy{ - group: group, - logger: e.logger.With(zap.Int("tsm1_level", level), zap.String("tsm1_strategy", "level")), - fileStore: e.FileStore, - compactor: e.Compactor, - fast: fast, - engine: e, - level: level, - - errorStat: e.stats.Failed.With(label), - durationSecondsStat: e.stats.Duration.With(label), - } -} - -// fullCompactionStrategy returns a compactionStrategy for higher level generations of TSM files. -// It returns nil if there are no TSM files to compact. -func (e *Engine) fullCompactionStrategy(group CompactionGroup, optimize bool) *compactionStrategy { - s := &compactionStrategy{ - group: group, - logger: e.logger.With(zap.String("tsm1_strategy", "full"), zap.Bool("tsm1_optimize", optimize)), - fileStore: e.FileStore, - compactor: e.Compactor, - fast: optimize, - engine: e, - level: 4, - } - - plabel := prometheus.Labels{levelKey: levelFull} - if optimize { - plabel = prometheus.Labels{levelKey: levelOpt} - } - s.errorStat = e.stats.Failed.With(plabel) - s.durationSecondsStat = e.stats.Duration.With(plabel) - return s -} - -// reloadCache reads the WAL segment files and loads them into the cache. -func (e *Engine) reloadCache() error { - now := time.Now() - files, err := segmentFileNames(e.WAL.Path()) - if err != nil { - return err - } - - limit := e.Cache.MaxSize() - defer func() { - e.Cache.SetMaxSize(limit) - }() - - // Disable the max size during loading - e.Cache.SetMaxSize(0) - - loader := NewCacheLoader(files) - loader.WithLogger(e.logger) - if err := loader.Load(e.Cache); err != nil { - return err - } - - e.traceLogger.Info("Reloaded WAL cache", - zap.String("path", e.WAL.Path()), zap.Duration("duration", time.Since(now))) - return nil -} - -// cleanup removes all temp files and dirs that exist on disk. This is should only be run at startup to avoid -// removing tmp files that are still in use. -func (e *Engine) cleanup() error { - allfiles, err := os.ReadDir(e.path) - if os.IsNotExist(err) { - return nil - } else if err != nil { - return err - } - - ext := fmt.Sprintf(".%s", TmpTSMFileExtension) - for _, f := range allfiles { - // Check to see if there are any `.tmp` directories that were left over from failed shard snapshots - if f.IsDir() && strings.HasSuffix(f.Name(), ext) { - if err := os.RemoveAll(filepath.Join(e.path, f.Name())); err != nil { - return fmt.Errorf("error removing tmp snapshot directory %q: %s", f.Name(), err) - } - } - } - - return e.cleanupTempTSMFiles() -} - -func (e *Engine) cleanupTempTSMFiles() error { - files, err := filepath.Glob(filepath.Join(e.path, fmt.Sprintf("*.%s", CompactionTempExtension))) - if err != nil { - return fmt.Errorf("error getting compaction temp files: %s", err.Error()) - } - - for _, f := range files { - if err := os.Remove(f); err != nil { - return fmt.Errorf("error removing temp compaction files: %v", err) - } - } - return nil -} - -// KeyCursor returns a KeyCursor for the given key starting at time t. -func (e *Engine) KeyCursor(ctx context.Context, key []byte, t int64, ascending bool) *KeyCursor { - return e.FileStore.KeyCursor(ctx, key, t, ascending) -} - -// CreateIterator returns an iterator for the measurement based on opt. -func (e *Engine) CreateIterator(ctx context.Context, measurement string, opt query.IteratorOptions) (query.Iterator, error) { - if span := tracing.SpanFromContext(ctx); span != nil { - labels := []string{"shard_id", strconv.Itoa(int(e.id)), "measurement", measurement} - if opt.Condition != nil { - labels = append(labels, "cond", opt.Condition.String()) - } - - span = span.StartSpan("create_iterator") - span.SetLabels(labels...) - ctx = tracing.NewContextWithSpan(ctx, span) - - group := metrics.NewGroup(tsmGroup) - ctx = metrics.NewContextWithGroup(ctx, group) - start := time.Now() - - defer group.GetTimer(planningTimer).UpdateSince(start) - } - - if call, ok := opt.Expr.(*influxql.Call); ok { - if opt.Interval.IsZero() { - if call.Name == "first" || call.Name == "last" { - refOpt := opt - refOpt.Limit = 1 - refOpt.Ascending = call.Name == "first" - refOpt.Ordered = true - refOpt.Expr = call.Args[0] - - itrs, err := e.createVarRefIterator(ctx, measurement, refOpt) - if err != nil { - return nil, err - } - return newMergeFinalizerIterator(ctx, itrs, opt, e.logger) - } - } - - inputs, err := e.createCallIterator(ctx, measurement, call, opt) - if err != nil { - return nil, err - } else if len(inputs) == 0 { - return nil, nil - } - return newMergeFinalizerIterator(ctx, inputs, opt, e.logger) - } - - itrs, err := e.createVarRefIterator(ctx, measurement, opt) - if err != nil { - return nil, err - } - return newMergeFinalizerIterator(ctx, itrs, opt, e.logger) -} - -// createSeriesIterator creates an optimized series iterator if possible. -// We exclude less-common cases for now as not worth implementing. -func (e *Engine) createSeriesIterator(measurement string, ref *influxql.VarRef, is tsdb.IndexSet, opt query.IteratorOptions) (query.Iterator, error) { - // Main check to see if we are trying to create a seriesKey iterator - if ref == nil || ref.Val != "_seriesKey" || len(opt.Aux) != 0 { - return nil, nil - } - // Check some other cases that we could maybe handle, but don't - if len(opt.Dimensions) > 0 { - return nil, nil - } - if opt.SLimit != 0 || opt.SOffset != 0 { - return nil, nil - } - if opt.StripName { - return nil, nil - } - if opt.Ordered { - return nil, nil - } - // Actual creation of the iterator - seriesCursor, err := is.MeasurementSeriesKeyByExprIterator([]byte(measurement), opt.Condition, opt.Authorizer) - if err != nil { - seriesCursor.Close() - return nil, err - } - var seriesIterator query.Iterator - seriesIterator = newSeriesIterator(measurement, seriesCursor) - if opt.InterruptCh != nil { - seriesIterator = query.NewInterruptIterator(seriesIterator, opt.InterruptCh) - } - return seriesIterator, nil -} - -func (e *Engine) createCallIterator(ctx context.Context, measurement string, call *influxql.Call, opt query.IteratorOptions) ([]query.Iterator, error) { - ref, _ := call.Args[0].(*influxql.VarRef) - - if exists, err := e.index.MeasurementExists([]byte(measurement)); err != nil { - return nil, err - } else if !exists { - return nil, nil - } - - // check for optimized series iteration for tsi index - if e.index.Type() == tsdb.TSI1IndexName { - indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile} - seriesOpt := opt - if len(opt.Dimensions) == 0 && (call.Name == "count" || call.Name == "sum_hll") { - // no point ordering the series if we are just counting all of them - seriesOpt.Ordered = false - } - seriesIterator, err := e.createSeriesIterator(measurement, ref, indexSet, seriesOpt) - if err != nil { - return nil, err - } - if seriesIterator != nil { - callIterator, err := query.NewCallIterator(seriesIterator, opt) - if err != nil { - seriesIterator.Close() - return nil, err - } - return []query.Iterator{callIterator}, nil - } - } - - // Determine tagsets for this measurement based on dimensions and filters. - var ( - tagSets []*query.TagSet - err error - ) - indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile} - tagSets, err = indexSet.TagSets(e.sfile, []byte(measurement), opt) - - if err != nil { - return nil, err - } - - // Reverse the tag sets if we are ordering by descending. - if !opt.Ascending { - for _, t := range tagSets { - t.Reverse() - } - } - - // Calculate tag sets and apply SLIMIT/SOFFSET. - tagSets = query.LimitTagSets(tagSets, opt.SLimit, opt.SOffset) - - itrs := make([]query.Iterator, 0, len(tagSets)) - if err := func() error { - for _, t := range tagSets { - // Abort if the query was killed - select { - case <-opt.InterruptCh: - query.Iterators(itrs).Close() - return query.ErrQueryInterrupted - default: - } - - inputs, err := e.createTagSetIterators(ctx, ref, measurement, t, opt) - if err != nil { - return err - } else if len(inputs) == 0 { - continue - } - - // Wrap each series in a call iterator. - for i, input := range inputs { - if opt.InterruptCh != nil { - input = query.NewInterruptIterator(input, opt.InterruptCh) - } - - itr, err := query.NewCallIterator(input, opt) - if err != nil { - query.Iterators(inputs).Close() - return err - } - inputs[i] = itr - } - - itr := query.NewParallelMergeIterator(inputs, opt, runtime.GOMAXPROCS(0)) - itrs = append(itrs, itr) - } - return nil - }(); err != nil { - query.Iterators(itrs).Close() - return nil, err - } - - return itrs, nil -} - -// createVarRefIterator creates an iterator for a variable reference. -func (e *Engine) createVarRefIterator(ctx context.Context, measurement string, opt query.IteratorOptions) ([]query.Iterator, error) { - ref, _ := opt.Expr.(*influxql.VarRef) - - if exists, err := e.index.MeasurementExists([]byte(measurement)); err != nil { - return nil, err - } else if !exists { - return nil, nil - } - - var ( - tagSets []*query.TagSet - err error - ) - indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile} - tagSets, err = indexSet.TagSets(e.sfile, []byte(measurement), opt) - - if err != nil { - return nil, err - } - - // Reverse the tag sets if we are ordering by descending. - if !opt.Ascending { - for _, t := range tagSets { - t.Reverse() - } - } - - // Calculate tag sets and apply SLIMIT/SOFFSET. - tagSets = query.LimitTagSets(tagSets, opt.SLimit, opt.SOffset) - itrs := make([]query.Iterator, 0, len(tagSets)) - if err := func() error { - for _, t := range tagSets { - inputs, err := e.createTagSetIterators(ctx, ref, measurement, t, opt) - if err != nil { - return err - } else if len(inputs) == 0 { - continue - } - - // If we have a LIMIT or OFFSET and the grouping of the outer query - // is different than the current grouping, we need to perform the - // limit on each of the individual series keys instead to improve - // performance. - if (opt.Limit > 0 || opt.Offset > 0) && len(opt.Dimensions) != len(opt.GroupBy) { - for i, input := range inputs { - inputs[i] = newLimitIterator(input, opt) - } - } - - itr, err := query.Iterators(inputs).Merge(opt) - if err != nil { - query.Iterators(inputs).Close() - return err - } - - // Apply a limit on the merged iterator. - if opt.Limit > 0 || opt.Offset > 0 { - if len(opt.Dimensions) == len(opt.GroupBy) { - // When the final dimensions and the current grouping are - // the same, we will only produce one series so we can use - // the faster limit iterator. - itr = newLimitIterator(itr, opt) - } else { - // When the dimensions are different than the current - // grouping, we need to account for the possibility there - // will be multiple series. The limit iterator in the - // influxql package handles that scenario. - itr = query.NewLimitIterator(itr, opt) - } - } - itrs = append(itrs, itr) - } - return nil - }(); err != nil { - query.Iterators(itrs).Close() - return nil, err - } - - return itrs, nil -} - -// createTagSetIterators creates a set of iterators for a tagset. -func (e *Engine) createTagSetIterators(ctx context.Context, ref *influxql.VarRef, name string, t *query.TagSet, opt query.IteratorOptions) ([]query.Iterator, error) { - // Set parallelism by number of logical cpus. - parallelism := runtime.GOMAXPROCS(0) - if parallelism > len(t.SeriesKeys) { - parallelism = len(t.SeriesKeys) - } - - // Create series key groupings w/ return error. - groups := make([]struct { - keys []string - filters []influxql.Expr - itrs []query.Iterator - err error - }, parallelism) - - // Group series keys. - n := len(t.SeriesKeys) / parallelism - for i := 0; i < parallelism; i++ { - group := &groups[i] - - if i < parallelism-1 { - group.keys = t.SeriesKeys[i*n : (i+1)*n] - group.filters = t.Filters[i*n : (i+1)*n] - } else { - group.keys = t.SeriesKeys[i*n:] - group.filters = t.Filters[i*n:] - } - } - - // Read series groups in parallel. - var wg sync.WaitGroup - for i := range groups { - wg.Add(1) - go func(i int) { - defer wg.Done() - groups[i].itrs, groups[i].err = e.createTagSetGroupIterators(ctx, ref, name, groups[i].keys, t, groups[i].filters, opt) - }(i) - } - wg.Wait() - - // Determine total number of iterators so we can allocate only once. - var itrN int - for _, group := range groups { - itrN += len(group.itrs) - } - - // Combine all iterators together and check for errors. - var err error - itrs := make([]query.Iterator, 0, itrN) - for _, group := range groups { - if group.err != nil { - err = group.err - } - itrs = append(itrs, group.itrs...) - } - - // If an error occurred, make sure we close all created iterators. - if err != nil { - query.Iterators(itrs).Close() - return nil, err - } - - return itrs, nil -} - -// createTagSetGroupIterators creates a set of iterators for a subset of a tagset's series. -func (e *Engine) createTagSetGroupIterators(ctx context.Context, ref *influxql.VarRef, name string, seriesKeys []string, t *query.TagSet, filters []influxql.Expr, opt query.IteratorOptions) ([]query.Iterator, error) { - itrs := make([]query.Iterator, 0, len(seriesKeys)) - for i, seriesKey := range seriesKeys { - var conditionFields []influxql.VarRef - if filters[i] != nil { - // Retrieve non-time fields from this series filter and filter out tags. - conditionFields = influxql.ExprNames(filters[i]) - } - - itr, err := e.createVarRefSeriesIterator(ctx, ref, name, seriesKey, t, filters[i], conditionFields, opt) - if err != nil { - return itrs, err - } else if itr == nil { - continue - } - itrs = append(itrs, itr) - - // Abort if the query was killed - select { - case <-opt.InterruptCh: - query.Iterators(itrs).Close() - return nil, query.ErrQueryInterrupted - default: - } - - // Enforce series limit at creation time. - if opt.MaxSeriesN > 0 && len(itrs) > opt.MaxSeriesN { - query.Iterators(itrs).Close() - return nil, fmt.Errorf("max-select-series limit exceeded: (%d/%d)", len(itrs), opt.MaxSeriesN) - } - - } - return itrs, nil -} - -// createVarRefSeriesIterator creates an iterator for a variable reference for a series. -func (e *Engine) createVarRefSeriesIterator(ctx context.Context, ref *influxql.VarRef, name string, seriesKey string, t *query.TagSet, filter influxql.Expr, conditionFields []influxql.VarRef, opt query.IteratorOptions) (query.Iterator, error) { - _, tfs := models.ParseKey([]byte(seriesKey)) - tags := query.NewTags(tfs.Map()) - - // Create options specific for this series. - itrOpt := opt - itrOpt.Condition = filter - - var curCounter, auxCounter, condCounter *metrics.Counter - if col := metrics.GroupFromContext(ctx); col != nil { - curCounter = col.GetCounter(numberOfRefCursorsCounter) - auxCounter = col.GetCounter(numberOfAuxCursorsCounter) - condCounter = col.GetCounter(numberOfCondCursorsCounter) - } - - // Build main cursor. - var cur cursor - if ref != nil { - cur = e.buildCursor(ctx, name, seriesKey, tfs, ref, opt) - // If the field doesn't exist then don't build an iterator. - if cur == nil { - return nil, nil - } - if curCounter != nil { - curCounter.Add(1) - } - } - - // Build auxiliary cursors. - // Tag values should be returned if the field doesn't exist. - var aux []cursorAt - if len(opt.Aux) > 0 { - aux = make([]cursorAt, len(opt.Aux)) - for i, ref := range opt.Aux { - // Create cursor from field if a tag wasn't requested. - if ref.Type != influxql.Tag { - cur := e.buildCursor(ctx, name, seriesKey, tfs, &ref, opt) - if cur != nil { - if auxCounter != nil { - auxCounter.Add(1) - } - aux[i] = newBufCursor(cur, opt.Ascending) - continue - } - - // If a field was requested, use a nil cursor of the requested type. - switch ref.Type { - case influxql.Float, influxql.AnyField: - aux[i] = nilFloatLiteralValueCursor - continue - case influxql.Integer: - aux[i] = nilIntegerLiteralValueCursor - continue - case influxql.Unsigned: - aux[i] = nilUnsignedLiteralValueCursor - continue - case influxql.String: - aux[i] = nilStringLiteralValueCursor - continue - case influxql.Boolean: - aux[i] = nilBooleanLiteralValueCursor - continue - } - } - - // If field doesn't exist, use the tag value. - if v := tags.Value(ref.Val); v == "" { - // However, if the tag value is blank then return a null. - aux[i] = nilStringLiteralValueCursor - } else { - aux[i] = &literalValueCursor{value: v} - } - } - } - - // Remove _tagKey condition field. - // We can't seach on it because we can't join it to _tagValue based on time. - if varRefSliceContains(conditionFields, "_tagKey") { - conditionFields = varRefSliceRemove(conditionFields, "_tagKey") - - // Remove _tagKey conditional references from iterator. - itrOpt.Condition = influxql.RewriteExpr(influxql.CloneExpr(itrOpt.Condition), func(expr influxql.Expr) influxql.Expr { - switch expr := expr.(type) { - case *influxql.BinaryExpr: - if ref, ok := expr.LHS.(*influxql.VarRef); ok && ref.Val == "_tagKey" { - return &influxql.BooleanLiteral{Val: true} - } - if ref, ok := expr.RHS.(*influxql.VarRef); ok && ref.Val == "_tagKey" { - return &influxql.BooleanLiteral{Val: true} - } - } - return expr - }) - } - - // Build conditional field cursors. - // If a conditional field doesn't exist then ignore the series. - var conds []cursorAt - if len(conditionFields) > 0 { - conds = make([]cursorAt, len(conditionFields)) - for i, ref := range conditionFields { - // Create cursor from field if a tag wasn't requested. - if ref.Type != influxql.Tag { - cur := e.buildCursor(ctx, name, seriesKey, tfs, &ref, opt) - if cur != nil { - if condCounter != nil { - condCounter.Add(1) - } - conds[i] = newBufCursor(cur, opt.Ascending) - continue - } - - // If a field was requested, use a nil cursor of the requested type. - switch ref.Type { - case influxql.Float, influxql.AnyField: - conds[i] = nilFloatLiteralValueCursor - continue - case influxql.Integer: - conds[i] = nilIntegerLiteralValueCursor - continue - case influxql.Unsigned: - conds[i] = nilUnsignedLiteralValueCursor - continue - case influxql.String: - conds[i] = nilStringLiteralValueCursor - continue - case influxql.Boolean: - conds[i] = nilBooleanLiteralValueCursor - continue - } - } - - // If field doesn't exist, use the tag value. - if v := tags.Value(ref.Val); v == "" { - // However, if the tag value is blank then return a null. - conds[i] = nilStringLiteralValueCursor - } else { - conds[i] = &literalValueCursor{value: v} - } - } - } - condNames := influxql.VarRefs(conditionFields).Strings() - - // Limit tags to only the dimensions selected. - dimensions := opt.GetDimensions() - tags = tags.Subset(dimensions) - - // If it's only auxiliary fields then it doesn't matter what type of iterator we use. - if ref == nil { - if opt.StripName { - name = "" - } - return newFloatIterator(name, tags, itrOpt, nil, aux, conds, condNames), nil - } - - // Remove name if requested. - if opt.StripName { - name = "" - } - - switch cur := cur.(type) { - case floatCursor: - return newFloatIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil - case integerCursor: - return newIntegerIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil - case unsignedCursor: - return newUnsignedIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil - case stringCursor: - return newStringIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil - case booleanCursor: - return newBooleanIterator(name, tags, itrOpt, cur, aux, conds, condNames), nil - default: - panic("unreachable") - } -} - -// buildCursor creates an untyped cursor for a field. -func (e *Engine) buildCursor(ctx context.Context, measurement, seriesKey string, tags models.Tags, ref *influxql.VarRef, opt query.IteratorOptions) cursor { - // Check if this is a system field cursor. - switch ref.Val { - case "_name": - return &stringSliceCursor{values: []string{measurement}} - case "_tagKey": - return &stringSliceCursor{values: tags.Keys()} - case "_tagValue": - return &stringSliceCursor{values: matchTagValues(tags, opt.Condition)} - case "_seriesKey": - return &stringSliceCursor{values: []string{seriesKey}} - } - - // Look up fields for measurement. - mf := e.fieldset.FieldsByString(measurement) - if mf == nil { - return nil - } - - // Check for system field for field keys. - if ref.Val == "_fieldKey" { - return &stringSliceCursor{values: mf.FieldKeys()} - } - - // Find individual field. - f := mf.Field(ref.Val) - if f == nil { - return nil - } - - // Check if we need to perform a cast. Performing a cast in the - // engine (if it is possible) is much more efficient than an automatic cast. - if ref.Type != influxql.Unknown && ref.Type != influxql.AnyField && ref.Type != f.Type { - switch ref.Type { - case influxql.Float: - switch f.Type { - case influxql.Integer: - cur := e.buildIntegerCursor(ctx, measurement, seriesKey, ref.Val, opt) - return &floatCastIntegerCursor{cursor: cur} - case influxql.Unsigned: - cur := e.buildUnsignedCursor(ctx, measurement, seriesKey, ref.Val, opt) - return &floatCastUnsignedCursor{cursor: cur} - } - case influxql.Integer: - switch f.Type { - case influxql.Float: - cur := e.buildFloatCursor(ctx, measurement, seriesKey, ref.Val, opt) - return &integerCastFloatCursor{cursor: cur} - case influxql.Unsigned: - cur := e.buildUnsignedCursor(ctx, measurement, seriesKey, ref.Val, opt) - return &integerCastUnsignedCursor{cursor: cur} - } - case influxql.Unsigned: - switch f.Type { - case influxql.Float: - cur := e.buildFloatCursor(ctx, measurement, seriesKey, ref.Val, opt) - return &unsignedCastFloatCursor{cursor: cur} - case influxql.Integer: - cur := e.buildIntegerCursor(ctx, measurement, seriesKey, ref.Val, opt) - return &unsignedCastIntegerCursor{cursor: cur} - } - } - return nil - } - - // Return appropriate cursor based on type. - switch f.Type { - case influxql.Float: - return e.buildFloatCursor(ctx, measurement, seriesKey, ref.Val, opt) - case influxql.Integer: - return e.buildIntegerCursor(ctx, measurement, seriesKey, ref.Val, opt) - case influxql.Unsigned: - return e.buildUnsignedCursor(ctx, measurement, seriesKey, ref.Val, opt) - case influxql.String: - return e.buildStringCursor(ctx, measurement, seriesKey, ref.Val, opt) - case influxql.Boolean: - return e.buildBooleanCursor(ctx, measurement, seriesKey, ref.Val, opt) - default: - panic("unreachable") - } -} - -func matchTagValues(tags models.Tags, condition influxql.Expr) []string { - if condition == nil { - return tags.Values() - } - - // Populate map with tag values. - data := map[string]interface{}{} - for _, tag := range tags { - data[string(tag.Key)] = string(tag.Value) - } - - // Match against each specific tag. - var values []string - for _, tag := range tags { - data["_tagKey"] = string(tag.Key) - if influxql.EvalBool(condition, data) { - values = append(values, string(tag.Value)) - } - } - return values -} - -// IteratorCost produces the cost of an iterator. -func (e *Engine) IteratorCost(measurement string, opt query.IteratorOptions) (query.IteratorCost, error) { - // Determine if this measurement exists. If it does not, then no shards are - // accessed to begin with. - if exists, err := e.index.MeasurementExists([]byte(measurement)); err != nil { - return query.IteratorCost{}, err - } else if !exists { - return query.IteratorCost{}, nil - } - - // Determine all of the tag sets for this query. - indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile} - tagSets, err := indexSet.TagSets(e.sfile, []byte(measurement), opt) - if err != nil { - return query.IteratorCost{}, err - } - - // Attempt to retrieve the ref from the main expression (if it exists). - var ref *influxql.VarRef - if opt.Expr != nil { - if v, ok := opt.Expr.(*influxql.VarRef); ok { - ref = v - } else if call, ok := opt.Expr.(*influxql.Call); ok { - if len(call.Args) > 0 { - ref, _ = call.Args[0].(*influxql.VarRef) - } - } - } - - // Count the number of series concatenated from the tag set. - cost := query.IteratorCost{NumShards: 1} - for _, t := range tagSets { - cost.NumSeries += int64(len(t.SeriesKeys)) - for i, key := range t.SeriesKeys { - // Retrieve the cost for the main expression (if it exists). - if ref != nil { - c := e.seriesCost(key, ref.Val, opt.StartTime, opt.EndTime) - cost = cost.Combine(c) - } - - // Retrieve the cost for every auxiliary field since these are also - // iterators that we may have to look through. - // We may want to separate these though as we are unlikely to incur - // anywhere close to the full costs of the auxiliary iterators because - // many of the selected values are usually skipped. - for _, ref := range opt.Aux { - c := e.seriesCost(key, ref.Val, opt.StartTime, opt.EndTime) - cost = cost.Combine(c) - } - - // Retrieve the expression names in the condition (if there is a condition). - // We will also create cursors for these too. - if t.Filters[i] != nil { - refs := influxql.ExprNames(t.Filters[i]) - for _, ref := range refs { - c := e.seriesCost(key, ref.Val, opt.StartTime, opt.EndTime) - cost = cost.Combine(c) - } - } - } - } - return cost, nil -} - -// Type returns FieldType for a series. If the series does not -// exist, ErrUnknownFieldType is returned. -func (e *Engine) Type(series []byte) (models.FieldType, error) { - if typ, err := e.Cache.Type(series); err == nil { - return typ, nil - } - - typ, err := e.FileStore.Type(series) - if err != nil { - return 0, err - } - switch typ { - case BlockFloat64: - return models.Float, nil - case BlockInteger: - return models.Integer, nil - case BlockUnsigned: - return models.Unsigned, nil - case BlockString: - return models.String, nil - case BlockBoolean: - return models.Boolean, nil - } - return 0, tsdb.ErrUnknownFieldType -} - -func (e *Engine) seriesCost(seriesKey, field string, tmin, tmax int64) query.IteratorCost { - key := SeriesFieldKeyBytes(seriesKey, field) - c := e.FileStore.Cost(key, tmin, tmax) - - // Retrieve the range of values within the cache. - cacheValues := e.Cache.Values(key) - c.CachedValues = int64(len(cacheValues.Include(tmin, tmax))) - return c -} - -// SeriesFieldKey combine a series key and field name for a unique string to be hashed to a numeric ID. -func SeriesFieldKey(seriesKey, field string) string { - return seriesKey + keyFieldSeparator + field -} - -func SeriesFieldKeyBytes(seriesKey, field string) []byte { - b := make([]byte, len(seriesKey)+len(keyFieldSeparator)+len(field)) - i := copy(b, seriesKey) - i += copy(b[i:], keyFieldSeparatorBytes) - copy(b[i:], field) - return b -} - -var ( - blockToFieldType = [8]influxql.DataType{ - BlockFloat64: influxql.Float, - BlockInteger: influxql.Integer, - BlockBoolean: influxql.Boolean, - BlockString: influxql.String, - BlockUnsigned: influxql.Unsigned, - 5: influxql.Unknown, - 6: influxql.Unknown, - 7: influxql.Unknown, - } -) - -func BlockTypeToInfluxQLDataType(typ byte) influxql.DataType { return blockToFieldType[typ&7] } - -// SeriesAndFieldFromCompositeKey returns the series key and the field key extracted from the composite key. -func SeriesAndFieldFromCompositeKey(key []byte) ([]byte, []byte) { - series, field, _ := bytes.Cut(key, keyFieldSeparatorBytes) - return series, field -} - -func varRefSliceContains(a []influxql.VarRef, v string) bool { - for _, ref := range a { - if ref.Val == v { - return true - } - } - return false -} - -func varRefSliceRemove(a []influxql.VarRef, v string) []influxql.VarRef { - if !varRefSliceContains(a, v) { - return a - } - - other := make([]influxql.VarRef, 0, len(a)) - for _, ref := range a { - if ref.Val != v { - other = append(other, ref) - } - } - return other -} - -const reindexBatchSize = 10000 - -func (e *Engine) Reindex() error { - keys := make([][]byte, reindexBatchSize) - seriesKeys := make([][]byte, reindexBatchSize) - names := make([][]byte, reindexBatchSize) - tags := make([]models.Tags, reindexBatchSize) - - n := 0 - - reindexBatch := func() error { - if n == 0 { - return nil - } - - for i, key := range keys[:n] { - seriesKeys[i], _ = SeriesAndFieldFromCompositeKey(key) - names[i], tags[i] = models.ParseKeyBytes(seriesKeys[i]) - e.traceLogger.Debug( - "Read series during reindexing", - logger.Shard(e.id), - zap.String("name", string(names[i])), - zap.String("tags", tags[i].String()), - ) - } - - e.logger.Debug("Reindexing data batch", logger.Shard(e.id), zap.Int("batch_size", n)) - if err := e.index.CreateSeriesListIfNotExists(seriesKeys[:n], names[:n], tags[:n]); err != nil { - return err - } - - n = 0 - return nil - } - reindexKey := func(key []byte) error { - keys[n] = key - n++ - - if n < reindexBatchSize { - return nil - } - return reindexBatch() - } - - // Index data stored in TSM files. - e.logger.Info("Reindexing TSM data", logger.Shard(e.id)) - if err := e.FileStore.WalkKeys(nil, func(key []byte, _ byte) error { - return reindexKey(key) - }); err != nil { - return err - } - - // Make sure all TSM data is indexed. - if err := reindexBatch(); err != nil { - return err - } - - if !e.WALEnabled { - // All done. - return nil - } - - // Reindex data stored in the WAL cache. - e.logger.Info("Reindexing WAL data", logger.Shard(e.id)) - for _, key := range e.Cache.Keys() { - if err := reindexKey(key); err != nil { - return err - } - } - - // Make sure all WAL data is indexed. - return reindexBatch() -} diff --git a/tsdb/engine/tsm1/engine_cursor.go b/tsdb/engine/tsm1/engine_cursor.go deleted file mode 100644 index 171ad5bfbc1..00000000000 --- a/tsdb/engine/tsm1/engine_cursor.go +++ /dev/null @@ -1,11 +0,0 @@ -package tsm1 - -import ( - "context" - - "github.com/influxdata/influxdb/v2/tsdb" -) - -func (e *Engine) CreateCursorIterator(ctx context.Context) (tsdb.CursorIterator, error) { - return &arrayCursorIterator{e: e}, nil -} diff --git a/tsdb/engine/tsm1/engine_internal_test.go b/tsdb/engine/tsm1/engine_internal_test.go deleted file mode 100644 index 0205fea5ec4..00000000000 --- a/tsdb/engine/tsm1/engine_internal_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package tsm1 - -import ( - "context" - "os" - "path/filepath" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func TestEngine_ConcurrentShardSnapshots(t *testing.T) { - tmpDir := t.TempDir() - - tmpShard := filepath.Join(tmpDir, "shard") - tmpWal := filepath.Join(tmpDir, "wal") - - sfile := NewSeriesFile(t, tmpDir) - defer sfile.Close() - - opts := tsdb.NewEngineOptions() - opts.Config.WALDir = filepath.Join(tmpDir, "wal") - opts.SeriesIDSets = seriesIDSets([]*tsdb.SeriesIDSet{}) - - sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile, opts) - require.NoError(t, sh.Open(context.Background()), "error opening shard") - defer sh.Close() - - points := make([]models.Point, 0, 10000) - for i := 0; i < cap(points); i++ { - points = append(points, models.MustNewPoint( - "cpu", - models.NewTags(map[string]string{"host": "server"}), - map[string]interface{}{"value": 1.0}, - time.Unix(int64(i), 0), - )) - } - err := sh.WritePoints(context.Background(), points) - require.NoError(t, err) - - engineInterface, err := sh.Engine() - require.NoError(t, err, "error retrieving shard engine") - - // Get the struct underlying the interface. Not a recommended practice. - realEngineStruct, ok := (engineInterface).(*Engine) - if !ok { - t.Log("Engine type does not permit simulating Cache race conditions") - return - } - // fake a race condition in snapshotting the cache. - realEngineStruct.Cache.snapshotting = true - defer func() { - realEngineStruct.Cache.snapshotting = false - }() - - snapshotFunc := func(skipCacheOk bool) { - if f, err := sh.CreateSnapshot(skipCacheOk); err == nil { - require.NoError(t, os.RemoveAll(f), "error cleaning up TestEngine_ConcurrentShardSnapshots") - } else if err == ErrSnapshotInProgress { - if skipCacheOk { - t.Fatalf("failing to ignore this error,: %s", err.Error()) - } - } else { - t.Fatalf("error creating shard snapshot: %s", err.Error()) - } - } - - // Permit skipping cache in the snapshot - snapshotFunc(true) - // do not permit skipping the cache in the snapshot - snapshotFunc(false) - realEngineStruct.Cache.snapshotting = false -} - -// NewSeriesFile returns a new instance of SeriesFile with a temporary file path. -func NewSeriesFile(tb testing.TB, tmpDir string) *tsdb.SeriesFile { - tb.Helper() - - dir := tb.TempDir() - f := tsdb.NewSeriesFile(dir) - f.Logger = zaptest.NewLogger(tb) - if err := f.Open(); err != nil { - panic(err) - } - return f -} - -type seriesIDSets []*tsdb.SeriesIDSet - -func (a seriesIDSets) ForEach(f func(ids *tsdb.SeriesIDSet)) error { - for _, v := range a { - f(v) - } - return nil -} diff --git a/tsdb/engine/tsm1/engine_test.go b/tsdb/engine/tsm1/engine_test.go deleted file mode 100644 index e8fd76c5165..00000000000 --- a/tsdb/engine/tsm1/engine_test.go +++ /dev/null @@ -1,2832 +0,0 @@ -package tsm1_test - -import ( - "archive/tar" - "bytes" - "context" - "fmt" - "io" - "math" - "math/rand" - "os" - "path" - "path/filepath" - "reflect" - "runtime" - "strings" - "sync" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/deep" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" - "github.com/influxdata/influxql" - tassert "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -// Ensure that deletes only sent to the WAL will clear out the data from the cache on restart -func TestEngine_DeleteWALLoadMetadata(t *testing.T) { - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - e := MustOpenEngine(t, index) - - if err := e.WritePointsString( - `cpu,host=A value=1.1 1000000000`, - `cpu,host=B value=1.2 2000000000`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - // Remove series. - itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A")}} - if err := e.DeleteSeriesRange(context.Background(), itr, math.MinInt64, math.MaxInt64); err != nil { - t.Fatalf("failed to delete series: %s", err.Error()) - } - - // Ensure we can close and load index from the WAL - if err := e.Reopen(); err != nil { - t.Fatal(err) - } - - if exp, got := 0, len(e.Cache.Values(tsm1.SeriesFieldKeyBytes("cpu,host=A", "value"))); exp != got { - t.Fatalf("unexpected number of values: got: %d. exp: %d", got, exp) - } - - if exp, got := 1, len(e.Cache.Values(tsm1.SeriesFieldKeyBytes("cpu,host=B", "value"))); exp != got { - t.Fatalf("unexpected number of values: got: %d. exp: %d", got, exp) - } - }) - } -} - -// See https://github.com/influxdata/influxdb/v2/issues/14229 -func TestEngine_DeleteSeriesAfterCacheSnapshot(t *testing.T) { - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - e := MustOpenEngine(t, index) - - if err := e.WritePointsString( - `cpu,host=A value=1.1 1000000000`, - `cpu,host=B value=1.2 2000000000`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) - e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) - e.CreateSeriesIfNotExists([]byte("cpu,host=B"), []byte("cpu"), models.NewTags(map[string]string{"host": "B"})) - - // Verify series exist. - n, err := seriesExist(e, "cpu", []string{"host"}) - if err != nil { - t.Fatal(err) - } else if got, exp := n, 2; got != exp { - t.Fatalf("got %d points, expected %d", got, exp) - } - - // Simulate restart of server - if err := e.Reopen(); err != nil { - t.Fatal(err) - } - - // Snapshot the cache - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("failed to snapshot: %s", err.Error()) - } - - // Verify series exist. - n, err = seriesExist(e, "cpu", []string{"host"}) - if err != nil { - t.Fatal(err) - } else if got, exp := n, 2; got != exp { - t.Fatalf("got %d points, expected %d", got, exp) - } - - // Delete the series - itr := &seriesIterator{keys: [][]byte{ - []byte("cpu,host=A"), - []byte("cpu,host=B"), - }, - } - if err := e.DeleteSeriesRange(context.Background(), itr, math.MinInt64, math.MaxInt64); err != nil { - t.Fatalf("failed to delete series: %s", err.Error()) - } - - // Verify the series are no longer present. - n, err = seriesExist(e, "cpu", []string{"host"}) - if err != nil { - t.Fatal(err) - } else if got, exp := n, 0; got != exp { - t.Fatalf("got %d points, expected %d", got, exp) - } - - // Simulate restart of server - if err := e.Reopen(); err != nil { - t.Fatal(err) - } - - // Verify the series are no longer present. - n, err = seriesExist(e, "cpu", []string{"host"}) - if err != nil { - t.Fatal(err) - } else if got, exp := n, 0; got != exp { - t.Fatalf("got %d points, expected %d", got, exp) - } - }) - } -} - -func seriesExist(e *Engine, m string, dims []string) (int, error) { - itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Dimensions: []string{"host"}, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - Ascending: false, - }) - if err != nil { - return 0, err - } else if itr == nil { - return 0, nil - } - defer itr.Close() - fitr := itr.(query.FloatIterator) - - var n int - for { - p, err := fitr.Next() - if err != nil { - return 0, err - } else if p == nil { - return n, nil - } - n++ - } -} - -// Ensure that the engine can write & read shard digest files. -func TestEngine_Digest(t *testing.T) { - e := MustOpenEngine(t, tsi1.IndexName) - - if err := e.Open(context.Background()); err != nil { - t.Fatalf("failed to open tsm1 engine: %s", err.Error()) - } - - // Create a few points. - points := []models.Point{ - MustParsePointString("cpu,host=A value=1.1 1000000000"), - MustParsePointString("cpu,host=B value=1.2 2000000000"), - } - - if err := e.WritePoints(context.Background(), points); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - // Force a compaction. - e.ScheduleFullCompaction() - - digest := func() ([]span, error) { - // Get a reader for the shard's digest. - r, sz, err := e.Digest() - if err != nil { - return nil, err - } - - if sz <= 0 { - t.Fatalf("expected digest size > 0") - } - - // Make sure the digest can be read. - dr, err := tsm1.NewDigestReader(r) - if err != nil { - r.Close() - return nil, err - } - defer dr.Close() - - _, err = dr.ReadManifest() - if err != nil { - t.Fatal(err) - } - - got := []span{} - - for { - k, s, err := dr.ReadTimeSpan() - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - - got = append(got, span{ - key: k, - tspan: s, - }) - } - - return got, nil - } - - exp := []span{ - span{ - key: "cpu,host=A#!~#value", - tspan: &tsm1.DigestTimeSpan{ - Ranges: []tsm1.DigestTimeRange{ - tsm1.DigestTimeRange{ - Min: 1000000000, - Max: 1000000000, - N: 1, - CRC: 1048747083, - }, - }, - }, - }, - span{ - key: "cpu,host=B#!~#value", - tspan: &tsm1.DigestTimeSpan{ - Ranges: []tsm1.DigestTimeRange{ - tsm1.DigestTimeRange{ - Min: 2000000000, - Max: 2000000000, - N: 1, - CRC: 734984746, - }, - }, - }, - }, - } - - for n := 0; n < 2; n++ { - got, err := digest() - if err != nil { - t.Fatalf("n = %d: %s", n, err) - } - - // Make sure the data in the digest was valid. - if !reflect.DeepEqual(exp, got) { - t.Fatalf("n = %d\nexp = %v\ngot = %v\n", n, exp, got) - } - } - - // Test that writing more points causes the digest to be updated. - points = []models.Point{ - MustParsePointString("cpu,host=C value=1.1 3000000000"), - } - - if err := e.WritePoints(context.Background(), points); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - // Force a compaction. - e.ScheduleFullCompaction() - - // Get new digest. - got, err := digest() - if err != nil { - t.Fatal(err) - } - - exp = append(exp, span{ - key: "cpu,host=C#!~#value", - tspan: &tsm1.DigestTimeSpan{ - Ranges: []tsm1.DigestTimeRange{ - tsm1.DigestTimeRange{ - Min: 3000000000, - Max: 3000000000, - N: 1, - CRC: 2553233514, - }, - }, - }, - }) - - if !reflect.DeepEqual(exp, got) { - t.Fatalf("\nexp = %v\ngot = %v\n", exp, got) - } -} - -type span struct { - key string - tspan *tsm1.DigestTimeSpan -} - -// Ensure engine handles concurrent calls to Digest(). -func TestEngine_Digest_Concurrent(t *testing.T) { - e := MustOpenEngine(t, tsi1.IndexName) - - if err := e.Open(context.Background()); err != nil { - t.Fatalf("failed to open tsm1 engine: %s", err.Error()) - } - - // Create a few points. - points := []models.Point{ - MustParsePointString("cpu,host=A value=1.1 1000000000"), - MustParsePointString("cpu,host=B value=1.2 2000000000"), - } - - if err := e.WritePoints(context.Background(), points); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - // Force a compaction. - e.ScheduleFullCompaction() - - // Start multiple waiting goroutines, ready to call Digest(). - start := make(chan struct{}) - errs := make(chan error) - wg := &sync.WaitGroup{} - for n := 0; n < 100; n++ { - wg.Add(1) - go func() { - defer wg.Done() - <-start - r, _, err := e.Digest() - if err != nil { - errs <- err - } - r.Close() - }() - } - - // Goroutine to close errs channel after all routines have finished. - go func() { wg.Wait(); close(errs) }() - - // Signal all goroutines to call Digest(). - close(start) - - // Check for digest errors. - for err := range errs { - if err != nil { - t.Fatal(err) - } - } -} - -// Ensure that the engine will backup any TSM files created since the passed in time -func TestEngine_Backup(t *testing.T) { - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - // Generate temporary file. - f, _ := os.CreateTemp("", "tsm") - f.Close() - os.Remove(f.Name()) - walPath := filepath.Join(f.Name(), "wal") - os.MkdirAll(walPath, 0777) - defer os.RemoveAll(f.Name()) - - // Create a few points. - p1 := MustParsePointString("cpu,host=A value=1.1 1000000000") - p2 := MustParsePointString("cpu,host=B value=1.2 2000000000") - p3 := MustParsePointString("cpu,host=C value=1.3 3000000000") - - // Write those points to the engine. - db := path.Base(f.Name()) - opt := tsdb.NewEngineOptions() - idx := tsdb.MustOpenIndex(1, db, filepath.Join(f.Name(), "index"), tsdb.NewSeriesIDSet(), sfile.SeriesFile, opt) - defer idx.Close() - - e := tsm1.NewEngine(1, idx, f.Name(), walPath, sfile.SeriesFile, opt).(*tsm1.Engine) - - // mock the planner so compactions don't run during the test - e.CompactionPlan = &mockPlanner{} - - if err := e.Open(context.Background()); err != nil { - t.Fatalf("failed to open tsm1 engine: %s", err.Error()) - } - - if err := e.WritePoints(context.Background(), []models.Point{p1}); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("failed to snapshot: %s", err.Error()) - } - - if err := e.WritePoints(context.Background(), []models.Point{p2}); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - b := bytes.NewBuffer(nil) - if err := e.Backup(b, "", time.Unix(0, 0)); err != nil { - t.Fatalf("failed to backup: %s", err.Error()) - } - - tr := tar.NewReader(b) - if len(e.FileStore.Files()) != 2 { - t.Fatalf("file count wrong: exp: %d, got: %d", 2, len(e.FileStore.Files())) - } - - fileNames := map[string]bool{} - for _, f := range e.FileStore.Files() { - fileNames[filepath.Base(f.Path())] = true - } - - th, err := tr.Next() - for err == nil { - if !fileNames[th.Name] { - t.Errorf("Extra file in backup: %q", th.Name) - } - delete(fileNames, th.Name) - th, err = tr.Next() - } - - if err != nil && err != io.EOF { - t.Fatalf("Problem reading tar header: %s", err) - } - - for f := range fileNames { - t.Errorf("File missing from backup: %s", f) - } - - if t.Failed() { - t.FailNow() - } - - lastBackup := time.Now() - - // we have to sleep for a second because last modified times only have second level precision. - // so this test won't work properly unless the file is at least a second past the last one - time.Sleep(time.Second) - - if err := e.WritePoints(context.Background(), []models.Point{p3}); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - b = bytes.NewBuffer(nil) - if err := e.Backup(b, "", lastBackup); err != nil { - t.Fatalf("failed to backup: %s", err.Error()) - } - - tr = tar.NewReader(b) - th, err = tr.Next() - if err != nil { - t.Fatalf("error getting next tar header: %s", err.Error()) - } - - mostRecentFile := e.FileStore.Files()[e.FileStore.Count()-1].Path() - if !strings.Contains(mostRecentFile, th.Name) || th.Name == "" { - t.Fatalf("file name doesn't match:\n\tgot: %s\n\texp: %s", th.Name, mostRecentFile) - } - storeDir := filepath.Dir(e.FileStore.Files()[0].Path()) - dfd, err := os.Open(storeDir) - if err != nil { - t.Fatalf("cannot open filestore directory %s: %q", storeDir, err) - } else { - defer dfd.Close() - } - files, err := dfd.Readdirnames(0) - if err != nil { - t.Fatalf("cannot read directory %s: %q", storeDir, err) - } - for _, f := range files { - if strings.HasSuffix(f, tsm1.TmpTSMFileExtension) { - t.Fatalf("temporary directory for backup not cleaned up: %s", f) - } - } -} - -func TestEngine_Export(t *testing.T) { - // Generate temporary file. - f, _ := os.CreateTemp("", "tsm") - f.Close() - os.Remove(f.Name()) - walPath := filepath.Join(f.Name(), "wal") - os.MkdirAll(walPath, 0777) - defer os.RemoveAll(f.Name()) - - // Create a few points. - p1 := MustParsePointString("cpu,host=A value=1.1 1000000000") - p2 := MustParsePointString("cpu,host=B value=1.2 2000000000") - p3 := MustParsePointString("cpu,host=C value=1.3 3000000000") - - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - // Write those points to the engine. - db := path.Base(f.Name()) - opt := tsdb.NewEngineOptions() - idx := tsdb.MustOpenIndex(1, db, filepath.Join(f.Name(), "index"), tsdb.NewSeriesIDSet(), sfile.SeriesFile, opt) - defer idx.Close() - - e := tsm1.NewEngine(1, idx, f.Name(), walPath, sfile.SeriesFile, opt).(*tsm1.Engine) - - // mock the planner so compactions don't run during the test - e.CompactionPlan = &mockPlanner{} - - if err := e.Open(context.Background()); err != nil { - t.Fatalf("failed to open tsm1 engine: %s", err.Error()) - } - - if err := e.WritePoints(context.Background(), []models.Point{p1}); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("failed to snapshot: %s", err.Error()) - } - - if err := e.WritePoints(context.Background(), []models.Point{p2}); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("failed to snapshot: %s", err.Error()) - } - - if err := e.WritePoints(context.Background(), []models.Point{p3}); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - // export the whole DB - var exBuf bytes.Buffer - if err := e.Export(&exBuf, "", time.Unix(0, 0), time.Unix(0, 4000000000)); err != nil { - t.Fatalf("failed to export: %s", err.Error()) - } - - var bkBuf bytes.Buffer - if err := e.Backup(&bkBuf, "", time.Unix(0, 0)); err != nil { - t.Fatalf("failed to backup: %s", err.Error()) - } - - if len(e.FileStore.Files()) != 3 { - t.Fatalf("file count wrong: exp: %d, got: %d", 3, len(e.FileStore.Files())) - } - - fileNames := map[string]bool{} - for _, f := range e.FileStore.Files() { - fileNames[filepath.Base(f.Path())] = true - } - - fileData, err := getExportData(&exBuf) - if err != nil { - t.Errorf("Error extracting data from export: %s", err.Error()) - } - - // TEST 1: did we get any extra files not found in the store? - for k := range fileData { - if _, ok := fileNames[k]; !ok { - t.Errorf("exported a file not in the store: %s", k) - } - } - - // TEST 2: did we miss any files that the store had? - for k := range fileNames { - if _, ok := fileData[k]; !ok { - t.Errorf("failed to export a file from the store: %s", k) - } - } - - // TEST 3: Does 'backup' get the same files + bits? - tr := tar.NewReader(&bkBuf) - - th, err := tr.Next() - for err == nil { - expData, ok := fileData[th.Name] - if !ok { - t.Errorf("Extra file in backup: %q", th.Name) - continue - } - - buf := new(bytes.Buffer) - if _, err := io.Copy(buf, tr); err != nil { - t.Fatal(err) - } - - if !equalBuffers(expData, buf) { - t.Errorf("2Difference in data between backup and Export for file %s", th.Name) - } - - th, err = tr.Next() - } - - if t.Failed() { - t.FailNow() - } - - // TEST 4: Are subsets (1), (2), (3), (1,2), (2,3) accurately found in the larger export? - // export the whole DB - var ex1 bytes.Buffer - if err := e.Export(&ex1, "", time.Unix(0, 0), time.Unix(0, 1000000000)); err != nil { - t.Fatalf("failed to export: %s", err.Error()) - } - ex1Data, err := getExportData(&ex1) - if err != nil { - t.Errorf("Error extracting data from export: %s", err.Error()) - } - - for k, v := range ex1Data { - fullExp, ok := fileData[k] - if !ok { - t.Errorf("Extracting subset resulted in file not found in full export: %s", err.Error()) - continue - } - if !equalBuffers(fullExp, v) { - t.Errorf("2Difference in data between backup and Export for file %s", th.Name) - } - - } - - var ex2 bytes.Buffer - if err := e.Export(&ex2, "", time.Unix(0, 1000000001), time.Unix(0, 2000000000)); err != nil { - t.Fatalf("failed to export: %s", err.Error()) - } - - ex2Data, err := getExportData(&ex2) - if err != nil { - t.Errorf("Error extracting data from export: %s", err.Error()) - } - - for k, v := range ex2Data { - fullExp, ok := fileData[k] - if !ok { - t.Errorf("Extracting subset resulted in file not found in full export: %s", err.Error()) - continue - } - if !equalBuffers(fullExp, v) { - t.Errorf("2Difference in data between backup and Export for file %s", th.Name) - } - - } - - var ex3 bytes.Buffer - if err := e.Export(&ex3, "", time.Unix(0, 2000000001), time.Unix(0, 3000000000)); err != nil { - t.Fatalf("failed to export: %s", err.Error()) - } - - ex3Data, err := getExportData(&ex3) - if err != nil { - t.Errorf("Error extracting data from export: %s", err.Error()) - } - - for k, v := range ex3Data { - fullExp, ok := fileData[k] - if !ok { - t.Errorf("Extracting subset resulted in file not found in full export: %s", err.Error()) - continue - } - if !equalBuffers(fullExp, v) { - t.Errorf("2Difference in data between backup and Export for file %s", th.Name) - } - - } - - var ex12 bytes.Buffer - if err := e.Export(&ex12, "", time.Unix(0, 0), time.Unix(0, 2000000000)); err != nil { - t.Fatalf("failed to export: %s", err.Error()) - } - - ex12Data, err := getExportData(&ex12) - if err != nil { - t.Errorf("Error extracting data from export: %s", err.Error()) - } - - for k, v := range ex12Data { - fullExp, ok := fileData[k] - if !ok { - t.Errorf("Extracting subset resulted in file not found in full export: %s", err.Error()) - continue - } - if !equalBuffers(fullExp, v) { - t.Errorf("2Difference in data between backup and Export for file %s", th.Name) - } - - } - - var ex23 bytes.Buffer - if err := e.Export(&ex23, "", time.Unix(0, 1000000001), time.Unix(0, 3000000000)); err != nil { - t.Fatalf("failed to export: %s", err.Error()) - } - - ex23Data, err := getExportData(&ex23) - if err != nil { - t.Errorf("Error extracting data from export: %s", err.Error()) - } - - for k, v := range ex23Data { - fullExp, ok := fileData[k] - if !ok { - t.Errorf("Extracting subset resulted in file not found in full export: %s", err.Error()) - continue - } - if !equalBuffers(fullExp, v) { - t.Errorf("2Difference in data between backup and Export for file %s", th.Name) - } - - } -} - -func equalBuffers(bufA, bufB *bytes.Buffer) bool { - for i, v := range bufA.Bytes() { - if v != bufB.Bytes()[i] { - return false - } - } - return true -} - -func getExportData(exBuf *bytes.Buffer) (map[string]*bytes.Buffer, error) { - - tr := tar.NewReader(exBuf) - - fileData := make(map[string]*bytes.Buffer) - - // TEST 1: Get the bits for each file. If we got a file the store doesn't know about, report error - for { - th, err := tr.Next() - if err == io.EOF { - break - } - if err != nil { - return nil, err - } - - buf := new(bytes.Buffer) - if _, err := io.Copy(buf, tr); err != nil { - return nil, err - } - fileData[th.Name] = buf - - } - - return fileData, nil -} - -// Ensure engine can create an ascending iterator for cached values. -func TestEngine_CreateIterator_Cache_Ascending(t *testing.T) { - t.Parallel() - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - e := MustOpenEngine(t, index) - - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) - e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) - - if err := e.WritePointsString( - `cpu,host=A value=1.1 1000000000`, - `cpu,host=A value=1.2 2000000000`, - `cpu,host=A value=1.3 3000000000`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Dimensions: []string{"host"}, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - Ascending: true, - }) - if err != nil { - t.Fatal(err) - } - fitr := itr.(query.FloatIterator) - - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(0): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) { - t.Fatalf("unexpected point(0): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(1): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) { - t.Fatalf("unexpected point(1): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(2): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) { - t.Fatalf("unexpected point(2): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("expected eof, got error: %v", err) - } else if p != nil { - t.Fatalf("expected eof: %v", p) - } - }) - } -} - -// Ensure engine can create an descending iterator for cached values. -func TestEngine_CreateIterator_Cache_Descending(t *testing.T) { - t.Parallel() - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - - e := MustOpenEngine(t, index) - - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) - e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) - - if err := e.WritePointsString( - `cpu,host=A value=1.1 1000000000`, - `cpu,host=A value=1.2 2000000000`, - `cpu,host=A value=1.3 3000000000`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Dimensions: []string{"host"}, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - Ascending: false, - }) - if err != nil { - t.Fatal(err) - } - fitr := itr.(query.FloatIterator) - - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(0): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) { - t.Fatalf("unexpected point(0): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unepxected error(1): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) { - t.Fatalf("unexpected point(1): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(2): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) { - t.Fatalf("unexpected point(2): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("expected eof, got error: %v", err) - } else if p != nil { - t.Fatalf("expected eof: %v", p) - } - }) - } -} - -// Ensure engine can create an ascending iterator for tsm values. -func TestEngine_CreateIterator_TSM_Ascending(t *testing.T) { - t.Parallel() - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - e := MustOpenEngine(t, index) - - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) - e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) - - if err := e.WritePointsString( - `cpu,host=A value=1.1 1000000000`, - `cpu,host=A value=1.2 2000000000`, - `cpu,host=A value=1.3 3000000000`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - e.MustWriteSnapshot() - - itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Dimensions: []string{"host"}, - StartTime: 1000000000, - EndTime: 3000000000, - Ascending: true, - }) - if err != nil { - t.Fatal(err) - } - defer itr.Close() - fitr := itr.(query.FloatIterator) - - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(0): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) { - t.Fatalf("unexpected point(0): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(1): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) { - t.Fatalf("unexpected point(1): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(2): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) { - t.Fatalf("unexpected point(2): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("expected eof, got error: %v", err) - } else if p != nil { - t.Fatalf("expected eof: %v", p) - } - }) - } -} - -// Ensure engine can create an descending iterator for cached values. -func TestEngine_CreateIterator_TSM_Descending(t *testing.T) { - t.Parallel() - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - e := MustOpenEngine(t, index) - - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) - e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) - - if err := e.WritePointsString( - `cpu,host=A value=1.1 1000000000`, - `cpu,host=A value=1.2 2000000000`, - `cpu,host=A value=1.3 3000000000`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - e.MustWriteSnapshot() - - itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Dimensions: []string{"host"}, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - Ascending: false, - }) - if err != nil { - t.Fatal(err) - } - defer itr.Close() - fitr := itr.(query.FloatIterator) - - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(0): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) { - t.Fatalf("unexpected point(0): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(1): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) { - t.Fatalf("unexpected point(1): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(2): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) { - t.Fatalf("unexpected point(2): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("expected eof, got error: %v", err) - } else if p != nil { - t.Fatalf("expected eof: %v", p) - } - }) - } -} - -// Ensure engine can create an iterator with auxiliary fields. -func TestEngine_CreateIterator_Aux(t *testing.T) { - t.Parallel() - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - e := MustOpenEngine(t, index) - - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("F"), influxql.Float) - e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) - - if err := e.WritePointsString( - `cpu,host=A value=1.1 1000000000`, - `cpu,host=A F=100 1000000000`, - `cpu,host=A value=1.2 2000000000`, - `cpu,host=A value=1.3 3000000000`, - `cpu,host=A F=200 3000000000`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Aux: []influxql.VarRef{{Val: "F"}}, - Dimensions: []string{"host"}, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - Ascending: true, - }) - if err != nil { - t.Fatal(err) - } - fitr := itr.(query.FloatIterator) - - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(0): %v", err) - } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1, Aux: []interface{}{float64(100)}}) { - t.Fatalf("unexpected point(0): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(1): %v", err) - } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2, Aux: []interface{}{(*float64)(nil)}}) { - t.Fatalf("unexpected point(1): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(2): %v", err) - } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3, Aux: []interface{}{float64(200)}}) { - t.Fatalf("unexpected point(2): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("expected eof, got error: %v", err) - } else if p != nil { - t.Fatalf("expected eof: %v", p) - } - }) - } -} - -// Ensure engine can create an iterator with a condition. -func TestEngine_CreateIterator_Condition(t *testing.T) { - t.Parallel() - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - e := MustOpenEngine(t, index) - - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("X"), influxql.Float) - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("Y"), influxql.Float) - e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) - - if err := e.WritePointsString( - `cpu,host=A value=1.1 1000000000`, - `cpu,host=A X=10 1000000000`, - `cpu,host=A Y=100 1000000000`, - - `cpu,host=A value=1.2 2000000000`, - - `cpu,host=A value=1.3 3000000000`, - `cpu,host=A X=20 3000000000`, - `cpu,host=A Y=200 3000000000`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Dimensions: []string{"host"}, - Condition: influxql.MustParseExpr(`X = 10 OR Y > 150`), - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - Ascending: true, - }) - if err != nil { - t.Fatal(err) - } - fitr := itr.(query.FloatIterator) - - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(0): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) { - t.Fatalf("unexpected point(0): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected point(1): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) { - t.Fatalf("unexpected point(1): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("expected eof, got error: %v", err) - } else if p != nil { - t.Fatalf("expected eof: %v", p) - } - }) - } -} - -// Test that series id set gets updated and returned appropriately. -func TestIndex_SeriesIDSet(t *testing.T) { - test := func(t *testing.T, index string) error { - engine := MustOpenEngine(t, index) - - // Add some series. - engine.MustAddSeries("cpu", map[string]string{"host": "a", "region": "west"}) - engine.MustAddSeries("cpu", map[string]string{"host": "b", "region": "west"}) - engine.MustAddSeries("cpu", map[string]string{"host": "b"}) - engine.MustAddSeries("gpu", nil) - engine.MustAddSeries("gpu", map[string]string{"host": "b"}) - engine.MustAddSeries("mem", map[string]string{"host": "z"}) - - // Collect series IDs. - seriesIDMap := map[string]uint64{} - var e tsdb.SeriesIDElem - var err error - - itr := engine.sfile.SeriesIDIterator() - for e, err = itr.Next(); ; e, err = itr.Next() { - if err != nil { - return err - } else if e.SeriesID == 0 { - break - } - - name, tags := tsdb.ParseSeriesKey(engine.sfile.SeriesKey(e.SeriesID)) - key := fmt.Sprintf("%s%s", name, tags.HashKey()) - seriesIDMap[key] = e.SeriesID - } - - for _, id := range seriesIDMap { - if !engine.SeriesIDSet().Contains(id) { - return fmt.Errorf("bitmap does not contain ID: %d", id) - } - } - - // Drop all the series for the gpu measurement and they should no longer - // be in the series ID set. - if err := engine.DeleteMeasurement(context.Background(), []byte("gpu")); err != nil { - return err - } - - if engine.SeriesIDSet().Contains(seriesIDMap["gpu"]) { - return fmt.Errorf("bitmap does not contain ID: %d for key %s, but should", seriesIDMap["gpu"], "gpu") - } else if engine.SeriesIDSet().Contains(seriesIDMap["gpu,host=b"]) { - return fmt.Errorf("bitmap does not contain ID: %d for key %s, but should", seriesIDMap["gpu,host=b"], "gpu,host=b") - } - delete(seriesIDMap, "gpu") - delete(seriesIDMap, "gpu,host=b") - - // Drop the specific mem series - ditr := &seriesIterator{keys: [][]byte{[]byte("mem,host=z")}} - if err := engine.DeleteSeriesRange(context.Background(), ditr, math.MinInt64, math.MaxInt64); err != nil { - return err - } - - if engine.SeriesIDSet().Contains(seriesIDMap["mem,host=z"]) { - return fmt.Errorf("bitmap does not contain ID: %d for key %s, but should", seriesIDMap["mem,host=z"], "mem,host=z") - } - delete(seriesIDMap, "mem,host=z") - - // The rest of the keys should still be in the set. - for key, id := range seriesIDMap { - if !engine.SeriesIDSet().Contains(id) { - return fmt.Errorf("bitmap does not contain ID: %d for key %s, but should", id, key) - } - } - - // Reopen the engine, and the series should be re-added to the bitmap. - if err := engine.Reopen(); err != nil { - panic(err) - } - - // Check bitset is expected. - expected := tsdb.NewSeriesIDSet() - for _, id := range seriesIDMap { - expected.Add(id) - } - - if !engine.SeriesIDSet().Equals(expected) { - return fmt.Errorf("got bitset %s, expected %s", engine.SeriesIDSet().String(), expected.String()) - } - return nil - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - if err := test(t, index); err != nil { - t.Error(err) - } - }) - } -} - -// Ensures that deleting series from TSM files with multiple fields removes all the -/// series -func TestEngine_DeleteSeries(t *testing.T) { - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - // Create a few points. - p1 := MustParsePointString("cpu,host=A value=1.1 1000000000") - p2 := MustParsePointString("cpu,host=B value=1.2 2000000000") - p3 := MustParsePointString("cpu,host=A sum=1.3 3000000000") - - e, err := NewEngine(t, index) - if err != nil { - t.Fatal(err) - } - - // mock the planner so compactions don't run during the test - e.CompactionPlan = &mockPlanner{} - if err := e.Open(context.Background()); err != nil { - t.Fatal(err) - } - - if err := e.writePoints(p1, p2, p3); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("failed to snapshot: %s", err.Error()) - } - - keys := e.FileStore.Keys() - if exp, got := 3, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A")}} - if err := e.DeleteSeriesRange(context.Background(), itr, math.MinInt64, math.MaxInt64); err != nil { - t.Fatalf("failed to delete series: %v", err) - } - - keys = e.FileStore.Keys() - if exp, got := 1, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - exp := "cpu,host=B#!~#value" - if _, ok := keys[exp]; !ok { - t.Fatalf("wrong series deleted: exp %v, got %v", exp, keys) - } - }) - } -} - -func TestEngine_DeleteSeriesRange(t *testing.T) { - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - // Create a few points. - p1 := MustParsePointString("cpu,host=0 value=1.1 6000000000") // Should not be deleted - p2 := MustParsePointString("cpu,host=A value=1.2 2000000000") - p3 := MustParsePointString("cpu,host=A value=1.3 3000000000") - p4 := MustParsePointString("cpu,host=B value=1.3 4000000000") // Should not be deleted - p5 := MustParsePointString("cpu,host=B value=1.3 5000000000") // Should not be deleted - p6 := MustParsePointString("cpu,host=C value=1.3 1000000000") - p7 := MustParsePointString("mem,host=C value=1.3 1000000000") // Should not be deleted - p8 := MustParsePointString("disk,host=C value=1.3 1000000000") // Should not be deleted - - e, err := NewEngine(t, index) - if err != nil { - t.Fatal(err) - } - - // mock the planner so compactions don't run during the test - e.CompactionPlan = &mockPlanner{} - if err := e.Open(context.Background()); err != nil { - t.Fatal(err) - } - - for _, p := range []models.Point{p1, p2, p3, p4, p5, p6, p7, p8} { - if err := e.CreateSeriesIfNotExists(p.Key(), p.Name(), p.Tags()); err != nil { - t.Fatalf("create series index error: %v", err) - } - } - - if err := e.WritePoints(context.Background(), []models.Point{p1, p2, p3, p4, p5, p6, p7, p8}); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("failed to snapshot: %s", err.Error()) - } - - keys := e.FileStore.Keys() - if exp, got := 6, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=0"), []byte("cpu,host=A"), []byte("cpu,host=B"), []byte("cpu,host=C")}} - if err := e.DeleteSeriesRange(context.Background(), itr, 0, 3000000000); err != nil { - t.Fatalf("failed to delete series: %v", err) - } - - keys = e.FileStore.Keys() - if exp, got := 4, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - exp := "cpu,host=B#!~#value" - if _, ok := keys[exp]; !ok { - t.Fatalf("wrong series deleted: exp %v, got %v", exp, keys) - } - - // Check that the series still exists in the index - indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile} - iter, err := indexSet.MeasurementSeriesIDIterator([]byte("cpu")) - if err != nil { - t.Fatalf("iterator error: %v", err) - } - defer iter.Close() - - elem, err := iter.Next() - if err != nil { - t.Fatal(err) - } - if elem.SeriesID == 0 { - t.Fatalf("series index mismatch: EOF, exp 2 series") - } - - // Lookup series. - name, tags := e.sfile.Series(elem.SeriesID) - if got, exp := name, []byte("cpu"); !bytes.Equal(got, exp) { - t.Fatalf("series mismatch: got %s, exp %s", got, exp) - } - - if !tags.Equal(models.NewTags(map[string]string{"host": "0"})) && !tags.Equal(models.NewTags(map[string]string{"host": "B"})) { - t.Fatalf(`series mismatch: got %s, exp either "host=0" or "host=B"`, tags) - } - iter.Close() - - // Deleting remaining series should remove them from the series. - itr = &seriesIterator{keys: [][]byte{[]byte("cpu,host=0"), []byte("cpu,host=B")}} - if err := e.DeleteSeriesRange(context.Background(), itr, 0, 9000000000); err != nil { - t.Fatalf("failed to delete series: %v", err) - } - - indexSet = tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile} - if iter, err = indexSet.MeasurementSeriesIDIterator([]byte("cpu")); err != nil { - t.Fatalf("iterator error: %v", err) - } - if iter == nil { - return - } - - defer iter.Close() - if elem, err = iter.Next(); err != nil { - t.Fatal(err) - } - if elem.SeriesID != 0 { - t.Fatalf("got an undeleted series id, but series should be dropped from index") - } - }) - } -} - -func TestEngine_DeleteSeriesRangeWithPredicate(t *testing.T) { - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - // Create a few points. - p1 := MustParsePointString("cpu,host=A value=1.1 6000000000") // Should not be deleted - p2 := MustParsePointString("cpu,host=A value=1.2 2000000000") // Should not be deleted - p3 := MustParsePointString("cpu,host=B value=1.3 3000000000") - p4 := MustParsePointString("cpu,host=B value=1.3 4000000000") - p5 := MustParsePointString("cpu,host=C value=1.3 5000000000") // Should not be deleted - p6 := MustParsePointString("mem,host=B value=1.3 1000000000") - p7 := MustParsePointString("mem,host=C value=1.3 1000000000") - p8 := MustParsePointString("disk,host=C value=1.3 1000000000") // Should not be deleted - - e, err := NewEngine(t, index) - if err != nil { - t.Fatal(err) - } - - // mock the planner so compactions don't run during the test - e.CompactionPlan = &mockPlanner{} - if err := e.Open(context.Background()); err != nil { - t.Fatal(err) - } - - for _, p := range []models.Point{p1, p2, p3, p4, p5, p6, p7, p8} { - if err := e.CreateSeriesIfNotExists(p.Key(), p.Name(), p.Tags()); err != nil { - t.Fatalf("create series index error: %v", err) - } - } - - if err := e.WritePoints(context.Background(), []models.Point{p1, p2, p3, p4, p5, p6, p7, p8}); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("failed to snapshot: %s", err.Error()) - } - - keys := e.FileStore.Keys() - if exp, got := 6, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A"), []byte("cpu,host=B"), []byte("cpu,host=C"), []byte("mem,host=B"), []byte("mem,host=C")}} - predicate := func(name []byte, tags models.Tags) (int64, int64, bool) { - if bytes.Equal(name, []byte("mem")) { - return math.MinInt64, math.MaxInt64, true - } - if bytes.Equal(name, []byte("cpu")) { - for _, tag := range tags { - if bytes.Equal(tag.Key, []byte("host")) && bytes.Equal(tag.Value, []byte("B")) { - return math.MinInt64, math.MaxInt64, true - } - } - } - return math.MinInt64, math.MaxInt64, false - } - if err := e.DeleteSeriesRangeWithPredicate(context.Background(), itr, predicate); err != nil { - t.Fatalf("failed to delete series: %v", err) - } - - keys = e.FileStore.Keys() - if exp, got := 3, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - exps := []string{"cpu,host=A#!~#value", "cpu,host=C#!~#value", "disk,host=C#!~#value"} - for _, exp := range exps { - if _, ok := keys[exp]; !ok { - t.Fatalf("wrong series deleted: exp %v, got %v", exps, keys) - } - } - - // Check that the series still exists in the index - indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile} - iter, err := indexSet.MeasurementSeriesIDIterator([]byte("cpu")) - if err != nil { - t.Fatalf("iterator error: %v", err) - } - defer iter.Close() - - elem, err := iter.Next() - if err != nil { - t.Fatal(err) - } - if elem.SeriesID == 0 { - t.Fatalf("series index mismatch: EOF, exp 2 series") - } - - // Lookup series. - name, tags := e.sfile.Series(elem.SeriesID) - if got, exp := name, []byte("cpu"); !bytes.Equal(got, exp) { - t.Fatalf("series mismatch: got %s, exp %s", got, exp) - } - - if !tags.Equal(models.NewTags(map[string]string{"host": "A"})) && !tags.Equal(models.NewTags(map[string]string{"host": "C"})) { - t.Fatalf(`series mismatch: got %s, exp either "host=A" or "host=C"`, tags) - } - iter.Close() - - // Deleting remaining series should remove them from the series. - itr = &seriesIterator{keys: [][]byte{[]byte("cpu,host=A"), []byte("cpu,host=C")}} - if err := e.DeleteSeriesRange(context.Background(), itr, 0, 9000000000); err != nil { - t.Fatalf("failed to delete series: %v", err) - } - - indexSet = tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile} - if iter, err = indexSet.MeasurementSeriesIDIterator([]byte("cpu")); err != nil { - t.Fatalf("iterator error: %v", err) - } - if iter == nil { - return - } - - defer iter.Close() - if elem, err = iter.Next(); err != nil { - t.Fatal(err) - } - if elem.SeriesID != 0 { - t.Fatalf("got an undeleted series id, but series should be dropped from index") - } - }) - } -} - -// Tests that a nil predicate deletes all values returned from the series iterator. -func TestEngine_DeleteSeriesRangeWithPredicate_Nil(t *testing.T) { - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - // Create a few points. - p1 := MustParsePointString("cpu,host=A value=1.1 6000000000") // Should not be deleted - p2 := MustParsePointString("cpu,host=A value=1.2 2000000000") // Should not be deleted - p3 := MustParsePointString("cpu,host=B value=1.3 3000000000") - p4 := MustParsePointString("cpu,host=B value=1.3 4000000000") - p5 := MustParsePointString("cpu,host=C value=1.3 5000000000") // Should not be deleted - p6 := MustParsePointString("mem,host=B value=1.3 1000000000") - p7 := MustParsePointString("mem,host=C value=1.3 1000000000") - p8 := MustParsePointString("disk,host=C value=1.3 1000000000") // Should not be deleted - - e, err := NewEngine(t, index) - if err != nil { - t.Fatal(err) - } - - // mock the planner so compactions don't run during the test - e.CompactionPlan = &mockPlanner{} - if err := e.Open(context.Background()); err != nil { - t.Fatal(err) - } - - for _, p := range []models.Point{p1, p2, p3, p4, p5, p6, p7, p8} { - if err := e.CreateSeriesIfNotExists(p.Key(), p.Name(), p.Tags()); err != nil { - t.Fatalf("create series index error: %v", err) - } - } - - if err := e.WritePoints(context.Background(), []models.Point{p1, p2, p3, p4, p5, p6, p7, p8}); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("failed to snapshot: %s", err.Error()) - } - - keys := e.FileStore.Keys() - if exp, got := 6, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A"), []byte("cpu,host=B"), []byte("cpu,host=C"), []byte("mem,host=B"), []byte("mem,host=C")}} - if err := e.DeleteSeriesRangeWithPredicate(context.Background(), itr, nil); err != nil { - t.Fatalf("failed to delete series: %v", err) - } - - keys = e.FileStore.Keys() - if exp, got := 1, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - // Check that the series still exists in the index - indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile} - iter, err := indexSet.MeasurementSeriesIDIterator([]byte("cpu")) - if err != nil { - t.Fatalf("iterator error: %v", err) - } else if iter == nil { - return - } - defer iter.Close() - - if elem, err := iter.Next(); err != nil { - t.Fatal(err) - } else if elem.SeriesID != 0 { - t.Fatalf("got an undeleted series id, but series should be dropped from index") - } - - // Check that disk series still exists - iter, err = indexSet.MeasurementSeriesIDIterator([]byte("disk")) - if err != nil { - t.Fatalf("iterator error: %v", err) - } else if iter == nil { - return - } - defer iter.Close() - - if elem, err := iter.Next(); err != nil { - t.Fatal(err) - } else if elem.SeriesID == 0 { - t.Fatalf("got an undeleted series id, but series should be dropped from index") - } - }) - } -} -func TestEngine_DeleteSeriesRangeWithPredicate_FlushBatch(t *testing.T) { - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - // Create a few points. - p1 := MustParsePointString("cpu,host=A value=1.1 6000000000") // Should not be deleted - p2 := MustParsePointString("cpu,host=A value=1.2 2000000000") // Should not be deleted - p3 := MustParsePointString("cpu,host=B value=1.3 3000000000") - p4 := MustParsePointString("cpu,host=B value=1.3 4000000000") - p5 := MustParsePointString("cpu,host=C value=1.3 5000000000") // Should not be deleted - p6 := MustParsePointString("mem,host=B value=1.3 1000000000") - p7 := MustParsePointString("mem,host=C value=1.3 1000000000") - p8 := MustParsePointString("disk,host=C value=1.3 1000000000") // Should not be deleted - - e, err := NewEngine(t, index) - if err != nil { - t.Fatal(err) - } - - // mock the planner so compactions don't run during the test - e.CompactionPlan = &mockPlanner{} - if err := e.Open(context.Background()); err != nil { - t.Fatal(err) - } - - for _, p := range []models.Point{p1, p2, p3, p4, p5, p6, p7, p8} { - if err := e.CreateSeriesIfNotExists(p.Key(), p.Name(), p.Tags()); err != nil { - t.Fatalf("create series index error: %v", err) - } - } - - if err := e.WritePoints(context.Background(), []models.Point{p1, p2, p3, p4, p5, p6, p7, p8}); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("failed to snapshot: %s", err.Error()) - } - - keys := e.FileStore.Keys() - if exp, got := 6, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A"), []byte("cpu,host=B"), []byte("cpu,host=C"), []byte("mem,host=B"), []byte("mem,host=C")}} - predicate := func(name []byte, tags models.Tags) (int64, int64, bool) { - if bytes.Equal(name, []byte("mem")) { - return 1000000000, 1000000000, true - } - - if bytes.Equal(name, []byte("cpu")) { - for _, tag := range tags { - if bytes.Equal(tag.Key, []byte("host")) && bytes.Equal(tag.Value, []byte("B")) { - return 3000000000, 4000000000, true - } - } - } - return math.MinInt64, math.MaxInt64, false - } - if err := e.DeleteSeriesRangeWithPredicate(context.Background(), itr, predicate); err != nil { - t.Fatalf("failed to delete series: %v", err) - } - - keys = e.FileStore.Keys() - if exp, got := 3, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - exps := []string{"cpu,host=A#!~#value", "cpu,host=C#!~#value", "disk,host=C#!~#value"} - for _, exp := range exps { - if _, ok := keys[exp]; !ok { - t.Fatalf("wrong series deleted: exp %v, got %v", exps, keys) - } - } - - // Check that the series still exists in the index - indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile} - iter, err := indexSet.MeasurementSeriesIDIterator([]byte("cpu")) - if err != nil { - t.Fatalf("iterator error: %v", err) - } - defer iter.Close() - - elem, err := iter.Next() - if err != nil { - t.Fatal(err) - } - if elem.SeriesID == 0 { - t.Fatalf("series index mismatch: EOF, exp 2 series") - } - - // Lookup series. - name, tags := e.sfile.Series(elem.SeriesID) - if got, exp := name, []byte("cpu"); !bytes.Equal(got, exp) { - t.Fatalf("series mismatch: got %s, exp %s", got, exp) - } - - if !tags.Equal(models.NewTags(map[string]string{"host": "A"})) && !tags.Equal(models.NewTags(map[string]string{"host": "C"})) { - t.Fatalf(`series mismatch: got %s, exp either "host=A" or "host=C"`, tags) - } - iter.Close() - - // Deleting remaining series should remove them from the series. - itr = &seriesIterator{keys: [][]byte{[]byte("cpu,host=A"), []byte("cpu,host=C")}} - if err := e.DeleteSeriesRange(context.Background(), itr, 0, 9000000000); err != nil { - t.Fatalf("failed to delete series: %v", err) - } - - indexSet = tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile} - if iter, err = indexSet.MeasurementSeriesIDIterator([]byte("cpu")); err != nil { - t.Fatalf("iterator error: %v", err) - } - if iter == nil { - return - } - - defer iter.Close() - if elem, err = iter.Next(); err != nil { - t.Fatal(err) - } - if elem.SeriesID != 0 { - t.Fatalf("got an undeleted series id, but series should be dropped from index") - } - }) - } -} - -func TestEngine_DeleteSeriesRange_OutsideTime(t *testing.T) { - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - // Create a few points. - p1 := MustParsePointString("cpu,host=A value=1.1 1000000000") // Should not be deleted - - e, err := NewEngine(t, index) - if err != nil { - t.Fatal(err) - } - - // mock the planner so compactions don't run during the test - e.CompactionPlan = &mockPlanner{} - if err := e.Open(context.Background()); err != nil { - t.Fatal(err) - } - - for _, p := range []models.Point{p1} { - if err := e.CreateSeriesIfNotExists(p.Key(), p.Name(), p.Tags()); err != nil { - t.Fatalf("create series index error: %v", err) - } - } - - if err := e.WritePoints(context.Background(), []models.Point{p1}); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("failed to snapshot: %s", err.Error()) - } - - keys := e.FileStore.Keys() - if exp, got := 1, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A")}} - if err := e.DeleteSeriesRange(context.Background(), itr, 0, 0); err != nil { - t.Fatalf("failed to delete series: %v", err) - } - - keys = e.FileStore.Keys() - if exp, got := 1, len(keys); exp != got { - t.Fatalf("series count mismatch: exp %v, got %v", exp, got) - } - - exp := "cpu,host=A#!~#value" - if _, ok := keys[exp]; !ok { - t.Fatalf("wrong series deleted: exp %v, got %v", exp, keys) - } - - // Check that the series still exists in the index - iter, err := e.index.MeasurementSeriesIDIterator([]byte("cpu")) - if err != nil { - t.Fatalf("iterator error: %v", err) - } - defer iter.Close() - - elem, err := iter.Next() - if err != nil { - t.Fatal(err) - } - if elem.SeriesID == 0 { - t.Fatalf("series index mismatch: EOF, exp 1 series") - } - - // Lookup series. - name, tags := e.sfile.Series(elem.SeriesID) - if got, exp := name, []byte("cpu"); !bytes.Equal(got, exp) { - t.Fatalf("series mismatch: got %s, exp %s", got, exp) - } - - if got, exp := tags, models.NewTags(map[string]string{"host": "A"}); !got.Equal(exp) { - t.Fatalf("series mismatch: got %s, exp %s", got, exp) - } - }) - } -} - -func TestEngine_LastModified(t *testing.T) { - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - // Create a few points. - p1 := MustParsePointString("cpu,host=A value=1.1 1000000000") - p2 := MustParsePointString("cpu,host=B value=1.2 2000000000") - p3 := MustParsePointString("cpu,host=A sum=1.3 3000000000") - - e, err := NewEngine(t, index) - require.NoError(t, err) - - // mock the planner so compactions don't run during the test - e.CompactionPlan = &mockPlanner{} - e.SetEnabled(false) - require.NoError(t, e.Open(context.Background())) - - require.NoError(t, e.writePoints(p1, p2, p3)) - lm := e.LastModified() - require.Falsef(t, lm.IsZero(), "expected non-zero time, got %v", lm.UTC()) - e.SetEnabled(true) - - // Artificial sleep added due to filesystems caching the mod time - // of files. This prevents the WAL last modified time from being - // returned and newer than the filestore's mod time. - time.Sleep(time.Second) // Covers most filesystems. - - require.NoError(t, e.WriteSnapshot()) - lm2 := e.LastModified() - require.NotEqual(t, lm, lm2) - - // Another arbitrary sleep. - time.Sleep(time.Second) - - itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A")}} - require.NoError(t, e.DeleteSeriesRange(context.Background(), itr, math.MinInt64, math.MaxInt64)) - lm3 := e.LastModified() - require.NotEqual(t, lm2, lm3) - }) - } -} - -func TestEngine_SnapshotsDisabled(t *testing.T) { - sfile := MustOpenSeriesFile(t) - t.Cleanup(func() { sfile.Close() }) - - // Generate temporary file. - dir := t.TempDir() - walPath := filepath.Join(dir, "wal") - os.MkdirAll(walPath, 0777) - - // Create a tsm1 engine. - db := path.Base(dir) - opt := tsdb.NewEngineOptions() - idx := tsdb.MustOpenIndex(1, db, filepath.Join(dir, "index"), tsdb.NewSeriesIDSet(), sfile.SeriesFile, opt) - t.Cleanup(func() { idx.Close() }) - - e := tsm1.NewEngine(1, idx, dir, walPath, sfile.SeriesFile, opt).(*tsm1.Engine) - t.Cleanup(func() { e.Close() }) - - // mock the planner so compactions don't run during the test - e.CompactionPlan = &mockPlanner{} - - e.SetEnabled(false) - if err := e.Open(context.Background()); err != nil { - t.Fatalf("failed to open tsm1 engine: %s", err.Error()) - } - - // Make sure Snapshots are disabled. - e.SetCompactionsEnabled(false) - e.Compactor.DisableSnapshots() - - // Writing a snapshot should not fail when the snapshot is empty - // even if snapshots are disabled. - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("failed to snapshot: %s", err.Error()) - } -} - -func TestEngine_ShouldCompactCache(t *testing.T) { - nowTime := time.Now() - - e, err := NewEngine(t, tsi1.IndexName) - if err != nil { - t.Fatal(err) - } - - // mock the planner so compactions don't run during the test - e.CompactionPlan = &mockPlanner{} - e.SetEnabled(false) - if err := e.Open(context.Background()); err != nil { - t.Fatalf("failed to open tsm1 engine: %s", err.Error()) - } - - e.CacheFlushMemorySizeThreshold = 1024 - e.CacheFlushWriteColdDuration = time.Minute - - if e.ShouldCompactCache(nowTime) { - t.Fatal("nothing written to cache, so should not compact") - } - - if err := e.WritePointsString("m,k=v f=3i"); err != nil { - t.Fatal(err) - } - - if e.ShouldCompactCache(nowTime) { - t.Fatal("cache size < flush threshold and nothing written to FileStore, so should not compact") - } - - if !e.ShouldCompactCache(nowTime.Add(time.Hour)) { - t.Fatal("last compaction was longer than flush write cold threshold, so should compact") - } - - e.CacheFlushMemorySizeThreshold = 1 - if !e.ShouldCompactCache(nowTime) { - t.Fatal("cache size > flush threshold, so should compact") - } -} - -// Ensure engine can create an ascending cursor for cache and tsm values. -func TestEngine_CreateCursor_Ascending(t *testing.T) { - t.Parallel() - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - - e := MustOpenEngine(t, index) - - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) - e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) - - if err := e.WritePointsString( - `cpu,host=A value=1.1 1`, - `cpu,host=A value=1.2 2`, - `cpu,host=A value=1.3 3`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - e.MustWriteSnapshot() - - if err := e.WritePointsString( - `cpu,host=A value=10.1 10`, - `cpu,host=A value=11.2 11`, - `cpu,host=A value=12.3 12`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - q, err := e.CreateCursorIterator(context.Background()) - if err != nil { - t.Fatal(err) - } - - cur, err := q.Next(context.Background(), &tsdb.CursorRequest{ - Name: []byte("cpu"), - Tags: models.ParseTags([]byte("cpu,host=A")), - Field: "value", - Ascending: true, - StartTime: 2, - EndTime: 11, - }) - if err != nil { - t.Fatal(err) - } - defer cur.Close() - - fcur := cur.(tsdb.FloatArrayCursor) - a := fcur.Next() - if !cmp.Equal([]int64{2, 3, 10, 11}, a.Timestamps) { - t.Fatal("unexpect timestamps") - } - if !cmp.Equal([]float64{1.2, 1.3, 10.1, 11.2}, a.Values) { - t.Fatal("unexpect timestamps") - } - }) - } -} - -// Ensure engine can create an ascending cursor for tsm values. -func TestEngine_CreateCursor_Descending(t *testing.T) { - t.Parallel() - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - - e := MustOpenEngine(t, index) - - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) - e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) - - if err := e.WritePointsString( - `cpu,host=A value=1.1 1`, - `cpu,host=A value=1.2 2`, - `cpu,host=A value=1.3 3`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - e.MustWriteSnapshot() - - if err := e.WritePointsString( - `cpu,host=A value=10.1 10`, - `cpu,host=A value=11.2 11`, - `cpu,host=A value=12.3 12`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - q, err := e.CreateCursorIterator(context.Background()) - if err != nil { - t.Fatal(err) - } - - cur, err := q.Next(context.Background(), &tsdb.CursorRequest{ - Name: []byte("cpu"), - Tags: models.ParseTags([]byte("cpu,host=A")), - Field: "value", - Ascending: false, - StartTime: 1, - EndTime: 10, - }) - if err != nil { - t.Fatal(err) - } - defer cur.Close() - - fcur := cur.(tsdb.FloatArrayCursor) - a := fcur.Next() - if !cmp.Equal([]int64{10, 3, 2, 1}, a.Timestamps) { - t.Fatalf("unexpect timestamps %v", a.Timestamps) - } - if !cmp.Equal([]float64{10.1, 1.3, 1.2, 1.1}, a.Values) { - t.Fatal("unexpect values") - } - }) - } -} - -// Ensure engine can create an descending iterator for cached values. -func TestEngine_CreateIterator_SeriesKey(t *testing.T) { - t.Parallel() - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - assert := tassert.New(t) - e := MustOpenEngine(t, index) - - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) - e.CreateSeriesIfNotExists([]byte("cpu,host=A,region=east"), []byte("cpu"), models.NewTags(map[string]string{"host": "A", "region": "east"})) - e.CreateSeriesIfNotExists([]byte("cpu,host=B,region=east"), []byte("cpu"), models.NewTags(map[string]string{"host": "B", "region": "east"})) - e.CreateSeriesIfNotExists([]byte("cpu,host=C,region=east"), []byte("cpu"), models.NewTags(map[string]string{"host": "C", "region": "east"})) - e.CreateSeriesIfNotExists([]byte("cpu,host=A,region=west"), []byte("cpu"), models.NewTags(map[string]string{"host": "A", "region": "west"})) - - if err := e.WritePointsString( - `cpu,host=A,region=east value=1.1 1000000001`, - `cpu,host=B,region=east value=1.2 1000000002`, - `cpu,host=A,region=east value=1.3 1000000003`, - `cpu,host=C,region=east value=1.4 1000000004`, - `cpu,host=A,region=west value=1.5 1000000005`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - opts := query.IteratorOptions{ - Expr: influxql.MustParseExpr(`_seriesKey`), - Dimensions: []string{}, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - Condition: influxql.MustParseExpr(`host = 'A'`), - } - - itr, err := e.CreateIterator(context.Background(), "cpu", opts) - if err != nil { - t.Fatal(err) - } - - stringItr, ok := itr.(query.StringIterator) - assert.True(ok, "series iterator must be of type string") - expectedSeries := map[string]struct{}{ - "cpu,host=A,region=west": struct{}{}, - "cpu,host=A,region=east": struct{}{}, - } - var str *query.StringPoint - for str, err = stringItr.Next(); err == nil && str != (*query.StringPoint)(nil); str, err = stringItr.Next() { - _, ok := expectedSeries[str.Value] - assert.True(ok, "Saw bad key "+str.Value) - delete(expectedSeries, str.Value) - } - assert.NoError(err) - assert.NoError(itr.Close()) - - countOpts := opts - countOpts.Expr = influxql.MustParseExpr(`count(_seriesKey)`) - itr, err = e.CreateIterator(context.Background(), "cpu", countOpts) - if err != nil { - t.Fatal(err) - } - - integerIter, ok := itr.(query.IntegerIterator) - assert.True(ok, "series count iterator must be of type integer") - i, err := integerIter.Next() - assert.NoError(err) - assert.Equal(int64(2), i.Value, "must count 2 series with host=A") - i, err = integerIter.Next() - assert.NoError(err) - assert.Equal((*query.IntegerPoint)(nil), i, "count iterator has only one output") - assert.NoError(itr.Close()) - }) - } -} - -func makeBlockTypeSlice(n int) []byte { - r := make([]byte, n) - b := tsm1.BlockFloat64 - m := tsm1.BlockUnsigned + 1 - for i := 0; i < len(r); i++ { - r[i] = b % m - } - return r -} - -var blockType = influxql.Unknown - -func BenchmarkBlockTypeToInfluxQLDataType(b *testing.B) { - t := makeBlockTypeSlice(1000) - for i := 0; i < b.N; i++ { - for j := 0; j < len(t); j++ { - blockType = tsm1.BlockTypeToInfluxQLDataType(t[j]) - } - } -} - -// This test ensures that "sync: WaitGroup is reused before previous Wait has returned" is -// is not raised. -func TestEngine_DisableEnableCompactions_Concurrent(t *testing.T) { - t.Parallel() - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - - e := MustOpenEngine(t, index) - - var wg sync.WaitGroup - wg.Add(2) - - go func() { - defer wg.Done() - for i := 0; i < 1000; i++ { - e.SetCompactionsEnabled(true) - e.SetCompactionsEnabled(false) - } - }() - - go func() { - defer wg.Done() - for i := 0; i < 1000; i++ { - e.SetCompactionsEnabled(false) - e.SetCompactionsEnabled(true) - } - }() - - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - - // Wait for waitgroup or fail if it takes too long. - select { - case <-time.NewTimer(30 * time.Second).C: - t.Fatalf("timed out after 30 seconds waiting for waitgroup") - case <-done: - } - }) - } -} - -func TestEngine_WritePoints_TypeConflict(t *testing.T) { - os.Setenv("INFLUXDB_SERIES_TYPE_CHECK_ENABLED", "1") - defer os.Unsetenv("INFLUXDB_SERIES_TYPE_CHECK_ENABLED") - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - - e := MustOpenEngine(t, index) - - if err := e.WritePointsString( - `cpu,host=A value=1.1 1`, - `cpu,host=A value=1i 2`, - ); err == nil { - t.Fatalf("expected field type conflict") - } else if err != tsdb.ErrFieldTypeConflict { - t.Fatalf("error mismatch: got %v, exp %v", err, tsdb.ErrFieldTypeConflict) - } - - // Series type should be a float - got, err := e.Type([]byte(tsm1.SeriesFieldKey("cpu,host=A", "value"))) - if err != nil { - t.Fatalf("unexpected error getting field type: %v", err) - } - - if exp := models.Float; got != exp { - t.Fatalf("field type mismatch: got %v, exp %v", got, exp) - } - - values := e.Cache.Values([]byte(tsm1.SeriesFieldKey("cpu,host=A", "value"))) - if got, exp := len(values), 1; got != exp { - t.Fatalf("values len mismatch: got %v, exp %v", got, exp) - } - }) - } -} - -func TestEngine_WritePoints_Reload(t *testing.T) { - t.Skip("Disabled until INFLUXDB_SERIES_TYPE_CHECK_ENABLED is enabled by default") - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - - e := MustOpenEngine(t, index) - - if err := e.WritePointsString( - `cpu,host=A value=1.1 1`, - ); err != nil { - t.Fatalf("expected field type conflict") - } - - // Series type should be a float - got, err := e.Type([]byte(tsm1.SeriesFieldKey("cpu,host=A", "value"))) - if err != nil { - t.Fatalf("unexpected error getting field type: %v", err) - } - - if exp := models.Float; got != exp { - t.Fatalf("field type mismatch: got %v, exp %v", got, exp) - } - - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("unexpected error writing snapshot: %v", err) - } - - if err := e.Reopen(); err != nil { - t.Fatalf("unexpected error reopning engine: %v", err) - } - - if err := e.WritePointsString( - `cpu,host=A value=1i 1`, - ); err != tsdb.ErrFieldTypeConflict { - t.Fatalf("expected field type conflict: got %v", err) - } - }) - } -} - -func TestEngine_Invalid_UTF8(t *testing.T) { - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - name := []byte{255, 112, 114, 111, 99} // A known invalid UTF-8 string - field := []byte{255, 110, 101, 116} // A known invalid UTF-8 string - p := MustParsePointString(fmt.Sprintf("%s,host=A %s=1.1 6000000000", name, field)) - - e, err := NewEngine(t, index) - if err != nil { - t.Fatal(err) - } - - // mock the planner so compactions don't run during the test - e.CompactionPlan = &mockPlanner{} - if err := e.Open(context.Background()); err != nil { - t.Fatal(err) - } - - if err := e.CreateSeriesIfNotExists(p.Key(), p.Name(), p.Tags()); err != nil { - t.Fatalf("create series index error: %v", err) - } - - if err := e.WritePoints(context.Background(), []models.Point{p}); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - - // Re-open the engine - if err := e.Reopen(); err != nil { - t.Fatal(err) - } - }) - } -} -func BenchmarkEngine_WritePoints(b *testing.B) { - batchSizes := []int{10, 100, 1000, 5000, 10000} - for _, sz := range batchSizes { - for _, index := range tsdb.RegisteredIndexes() { - e := MustOpenEngine(b, index) - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) - pp := make([]models.Point, 0, sz) - for i := 0; i < sz; i++ { - p := MustParsePointString(fmt.Sprintf("cpu,host=%d value=1.2", i)) - pp = append(pp, p) - } - - b.Run(fmt.Sprintf("%s_%d", index, sz), func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - err := e.WritePoints(context.Background(), pp) - if err != nil { - b.Fatal(err) - } - } - }) - } - } -} - -func BenchmarkEngine_WritePoints_Parallel(b *testing.B) { - batchSizes := []int{1000, 5000, 10000, 25000, 50000, 75000, 100000, 200000} - for _, sz := range batchSizes { - for _, index := range tsdb.RegisteredIndexes() { - e := MustOpenEngine(b, index) - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) - - cpus := runtime.GOMAXPROCS(0) - pp := make([]models.Point, 0, sz*cpus) - for i := 0; i < sz*cpus; i++ { - p := MustParsePointString(fmt.Sprintf("cpu,host=%d value=1.2,other=%di", i, i)) - pp = append(pp, p) - } - - b.Run(fmt.Sprintf("%s_%d", index, sz), func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - var wg sync.WaitGroup - errC := make(chan error) - for i := 0; i < cpus; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - from, to := i*sz, (i+1)*sz - err := e.WritePoints(context.Background(), pp[from:to]) - if err != nil { - errC <- err - return - } - }(i) - } - - go func() { - wg.Wait() - close(errC) - }() - - for err := range errC { - if err != nil { - b.Error(err) - } - } - } - }) - } - } -} - -var benchmarks = []struct { - name string - opt query.IteratorOptions -}{ - { - name: "Count", - opt: query.IteratorOptions{ - Expr: influxql.MustParseExpr("count(value)"), - Ascending: true, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - }, - }, - { - name: "First", - opt: query.IteratorOptions{ - Expr: influxql.MustParseExpr("first(value)"), - Ascending: true, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - }, - }, - { - name: "Last", - opt: query.IteratorOptions{ - Expr: influxql.MustParseExpr("last(value)"), - Ascending: true, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - }, - }, - { - name: "Limit", - opt: query.IteratorOptions{ - Expr: influxql.MustParseExpr("value"), - Ascending: true, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - Limit: 10, - }, - }, -} - -var benchmarkVariants = []struct { - name string - modify func(opt query.IteratorOptions) query.IteratorOptions -}{ - { - name: "All", - modify: func(opt query.IteratorOptions) query.IteratorOptions { - return opt - }, - }, - { - name: "GroupByTime_1m-1h", - modify: func(opt query.IteratorOptions) query.IteratorOptions { - opt.StartTime = 0 - opt.EndTime = int64(time.Hour) - 1 - opt.Interval = query.Interval{ - Duration: time.Minute, - } - return opt - }, - }, - { - name: "GroupByTime_1h-1d", - modify: func(opt query.IteratorOptions) query.IteratorOptions { - opt.StartTime = 0 - opt.EndTime = int64(24*time.Hour) - 1 - opt.Interval = query.Interval{ - Duration: time.Hour, - } - return opt - }, - }, - { - name: "GroupByTime_1m-1d", - modify: func(opt query.IteratorOptions) query.IteratorOptions { - opt.StartTime = 0 - opt.EndTime = int64(24*time.Hour) - 1 - opt.Interval = query.Interval{ - Duration: time.Minute, - } - return opt - }, - }, - { - name: "GroupByHost", - modify: func(opt query.IteratorOptions) query.IteratorOptions { - opt.Dimensions = []string{"host"} - return opt - }, - }, - { - name: "GroupByHostAndTime_1m-1h", - modify: func(opt query.IteratorOptions) query.IteratorOptions { - opt.Dimensions = []string{"host"} - opt.StartTime = 0 - opt.EndTime = int64(time.Hour) - 1 - opt.Interval = query.Interval{ - Duration: time.Minute, - } - return opt - }, - }, - { - name: "GroupByHostAndTime_1h-1d", - modify: func(opt query.IteratorOptions) query.IteratorOptions { - opt.Dimensions = []string{"host"} - opt.StartTime = 0 - opt.EndTime = int64(24*time.Hour) - 1 - opt.Interval = query.Interval{ - Duration: time.Hour, - } - return opt - }, - }, - { - name: "GroupByHostAndTime_1m-1d", - modify: func(opt query.IteratorOptions) query.IteratorOptions { - opt.Dimensions = []string{"host"} - opt.StartTime = 0 - opt.EndTime = int64(24*time.Hour) - 1 - opt.Interval = query.Interval{ - Duration: time.Hour, - } - return opt - }, - }, -} - -func BenchmarkEngine_CreateIterator(b *testing.B) { - engines := make([]*benchmarkEngine, len(sizes)) - for i, size := range sizes { - engines[i] = MustInitDefaultBenchmarkEngine(b, size.name, size.sz) - } - - for _, tt := range benchmarks { - for _, variant := range benchmarkVariants { - name := tt.name + "_" + variant.name - opt := variant.modify(tt.opt) - b.Run(name, func(b *testing.B) { - for _, e := range engines { - b.Run(e.Name, func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - itr, err := e.CreateIterator(context.Background(), "cpu", opt) - if err != nil { - b.Fatal(err) - } - query.DrainIterator(itr) - } - }) - } - }) - } - } -} - -type benchmarkEngine struct { - *Engine - Name string - PointN int -} - -var ( - hostNames = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J"} - sizes = []struct { - name string - sz int - }{ - {name: "1K", sz: 1000}, - {name: "100K", sz: 100000}, - {name: "1M", sz: 1000000}, - } -) - -// MustInitDefaultBenchmarkEngine creates a new engine using the default index -// and fills it with points. Reuses previous engine if the same parameters -// were used. -func MustInitDefaultBenchmarkEngine(tb testing.TB, name string, pointN int) *benchmarkEngine { - const batchSize = 1000 - if pointN%batchSize != 0 { - panic(fmt.Sprintf("point count (%d) must be a multiple of batch size (%d)", pointN, batchSize)) - } - - e := MustOpenEngine(tb, tsdb.DefaultIndex) - - // Initialize metadata. - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) - e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) - - // Generate time ascending points with jitterred time & value. - rand := rand.New(rand.NewSource(0)) - for i := 0; i < pointN; i += batchSize { - var buf bytes.Buffer - for j := 0; j < batchSize; j++ { - fmt.Fprintf(&buf, "cpu,host=%s value=%d %d", - hostNames[j%len(hostNames)], - 100+rand.Intn(50)-25, - (time.Duration(i+j)*time.Second)+(time.Duration(rand.Intn(500)-250)*time.Millisecond), - ) - if j != pointN-1 { - fmt.Fprint(&buf, "\n") - } - } - - if err := e.WritePointsString(buf.String()); err != nil { - panic(err) - } - } - - if err := e.WriteSnapshot(); err != nil { - panic(err) - } - - // Force garbage collection. - runtime.GC() - - // Save engine reference for reuse. - return &benchmarkEngine{ - Engine: e, - Name: name, - PointN: pointN, - } -} - -// Engine is a test wrapper for tsm1.Engine. -type Engine struct { - *tsm1.Engine - root string - indexPath string - indexType string - index tsdb.Index - sfile *tsdb.SeriesFile -} - -// NewEngine returns a new instance of Engine at a temporary location. The -// Engine is automatically closed by tb.Cleanup when the test and all its -// subtests complete. -func NewEngine(tb testing.TB, index string) (*Engine, error) { - tb.Helper() - - root := tb.TempDir() - - db := "db0" - dbPath := filepath.Join(root, "data", db) - - if err := os.MkdirAll(dbPath, os.ModePerm); err != nil { - return nil, err - } - - // Setup series file. - sfile := tsdb.NewSeriesFile(filepath.Join(dbPath, tsdb.SeriesFileDirectory)) - sfile.Logger = zaptest.NewLogger(tb) - if err := sfile.Open(); err != nil { - return nil, err - } - - opt := tsdb.NewEngineOptions() - opt.IndexVersion = index - // Initialise series id sets. Need to do this as it's normally done at the - // store level. - seriesIDs := tsdb.NewSeriesIDSet() - opt.SeriesIDSets = seriesIDSets([]*tsdb.SeriesIDSet{seriesIDs}) - - idxPath := filepath.Join(dbPath, "index") - idx := tsdb.MustOpenIndex(1, db, idxPath, seriesIDs, sfile, opt) - - tsm1Engine := tsm1.NewEngine(1, idx, filepath.Join(root, "data"), filepath.Join(root, "wal"), sfile, opt).(*tsm1.Engine) - - e := &Engine{ - Engine: tsm1Engine, - root: root, - indexPath: idxPath, - indexType: index, - index: idx, - sfile: sfile, - } - tb.Cleanup(func() { e.Close() }) - - return e, nil -} - -// MustOpenEngine returns a new, open instance of Engine. -func MustOpenEngine(tb testing.TB, index string) *Engine { - tb.Helper() - - e, err := NewEngine(tb, index) - if err != nil { - panic(err) - } - - if err := e.Open(context.Background()); err != nil { - panic(err) - } - return e -} - -// Close closes the engine and removes all underlying data. -func (e *Engine) Close() error { - return e.close(true) -} - -func (e *Engine) close(cleanup bool) error { - if e.index != nil { - e.index.Close() - } - - if e.sfile != nil { - e.sfile.Close() - } - - defer func() { - if cleanup { - os.RemoveAll(e.root) - } - }() - return e.Engine.Close() -} - -// Reopen closes and reopens the engine. -func (e *Engine) Reopen() error { - // Close engine without removing underlying engine data. - if err := e.close(false); err != nil { - return err - } - - // Re-open series file. Must create a new series file using the same data. - e.sfile = tsdb.NewSeriesFile(e.sfile.Path()) - if err := e.sfile.Open(); err != nil { - return err - } - - db := path.Base(e.root) - opt := tsdb.NewEngineOptions() - - // Re-initialise the series id set - seriesIDSet := tsdb.NewSeriesIDSet() - opt.SeriesIDSets = seriesIDSets([]*tsdb.SeriesIDSet{seriesIDSet}) - - // Re-open index. - e.index = tsdb.MustOpenIndex(1, db, e.indexPath, seriesIDSet, e.sfile, opt) - - // Re-initialize engine. - e.Engine = tsm1.NewEngine(1, e.index, filepath.Join(e.root, "data"), filepath.Join(e.root, "wal"), e.sfile, opt).(*tsm1.Engine) - - // Reopen engine - if err := e.Engine.Open(context.Background()); err != nil { - return err - } - - // Reload series data into index (no-op on TSI). - return e.LoadMetadataIndex(1, e.index) -} - -// SeriesIDSet provides access to the underlying series id bitset in the engine's -// index. It will panic if the underlying index does not have a SeriesIDSet -// method. -func (e *Engine) SeriesIDSet() *tsdb.SeriesIDSet { - return e.index.SeriesIDSet() -} - -// AddSeries adds the provided series data to the index and writes a point to -// the engine with default values for a field and a time of now. -func (e *Engine) AddSeries(name string, tags map[string]string) error { - point, err := models.NewPoint(name, models.NewTags(tags), models.Fields{"v": 1.0}, time.Now()) - if err != nil { - return err - } - return e.writePoints(point) -} - -// WritePointsString calls WritePointsString on the underlying engine, but also -// adds the associated series to the index. -func (e *Engine) WritePointsString(ptstr ...string) error { - points, err := models.ParsePointsString(strings.Join(ptstr, "\n")) - if err != nil { - return err - } - return e.writePoints(points...) -} - -// writePoints adds the series for the provided points to the index, and writes -// the point data to the engine. -func (e *Engine) writePoints(points ...models.Point) error { - for _, point := range points { - // Write into the index. - if err := e.Engine.CreateSeriesIfNotExists(point.Key(), point.Name(), point.Tags()); err != nil { - return err - } - } - // Write the points into the cache/wal. - return e.WritePoints(context.Background(), points) -} - -// MustAddSeries calls AddSeries, panicking if there is an error. -func (e *Engine) MustAddSeries(name string, tags map[string]string) { - if err := e.AddSeries(name, tags); err != nil { - panic(err) - } -} - -// MustWriteSnapshot forces a snapshot of the engine. Panic on error. -func (e *Engine) MustWriteSnapshot() { - if err := e.WriteSnapshot(); err != nil { - panic(err) - } -} - -// SeriesFile is a test wrapper for tsdb.SeriesFile. -type SeriesFile struct { - *tsdb.SeriesFile -} - -// NewSeriesFile returns a new instance of SeriesFile with a temporary file path. -func NewSeriesFile(tb testing.TB) *SeriesFile { - return &SeriesFile{SeriesFile: tsdb.NewSeriesFile(tb.TempDir())} -} - -// MustOpenSeriesFile returns a new, open instance of SeriesFile. Panic on error. -func MustOpenSeriesFile(tb testing.TB) *SeriesFile { - f := NewSeriesFile(tb) - if err := f.Open(); err != nil { - panic(err) - } - return f -} - -// Close closes the log file and removes it from disk. -func (f *SeriesFile) Close() { - defer os.RemoveAll(f.Path()) - if err := f.SeriesFile.Close(); err != nil { - panic(err) - } -} - -// MustParsePointsString parses points from a string. Panic on error. -func MustParsePointsString(buf string) []models.Point { - a, err := models.ParsePointsString(buf) - if err != nil { - panic(err) - } - return a -} - -// MustParsePointString parses the first point from a string. Panic on error. -func MustParsePointString(buf string) models.Point { return MustParsePointsString(buf)[0] } - -type mockPlanner struct{} - -func (m *mockPlanner) Plan(lastWrite time.Time) ([]tsm1.CompactionGroup, int64) { return nil, 0 } -func (m *mockPlanner) PlanLevel(level int) ([]tsm1.CompactionGroup, int64) { return nil, 0 } -func (m *mockPlanner) PlanOptimize() ([]tsm1.CompactionGroup, int64) { return nil, 0 } -func (m *mockPlanner) Release(groups []tsm1.CompactionGroup) {} -func (m *mockPlanner) FullyCompacted() (bool, string) { return false, "not compacted" } -func (m *mockPlanner) ForceFull() {} -func (m *mockPlanner) SetFileStore(fs *tsm1.FileStore) {} - -// ParseTags returns an instance of Tags for a comma-delimited list of key/values. -func ParseTags(s string) query.Tags { - m := make(map[string]string) - for _, kv := range strings.Split(s, ",") { - a := strings.Split(kv, "=") - m[a[0]] = a[1] - } - return query.NewTags(m) -} - -type seriesIterator struct { - keys [][]byte -} - -type series struct { - name []byte - tags models.Tags - deleted bool -} - -func (s series) Name() []byte { return s.name } -func (s series) Tags() models.Tags { return s.tags } -func (s series) Deleted() bool { return s.deleted } -func (s series) Expr() influxql.Expr { return nil } - -func (itr *seriesIterator) Close() error { return nil } - -func (itr *seriesIterator) Next() (tsdb.SeriesElem, error) { - if len(itr.keys) == 0 { - return nil, nil - } - name, tags := models.ParseKeyBytes(itr.keys[0]) - s := series{name: name, tags: tags} - itr.keys = itr.keys[1:] - return s, nil -} - -type seriesIDSets []*tsdb.SeriesIDSet - -func (a seriesIDSets) ForEach(f func(ids *tsdb.SeriesIDSet)) error { - for _, v := range a { - f(v) - } - return nil -} diff --git a/tsdb/engine/tsm1/file_store.gen.go b/tsdb/engine/tsm1/file_store.gen.go deleted file mode 100644 index d809ca2cdad..00000000000 --- a/tsdb/engine/tsm1/file_store.gen.go +++ /dev/null @@ -1,933 +0,0 @@ -// Code generated by file_store.gen.go.tmpl. DO NOT EDIT. - -package tsm1 - -// ReadFloatBlock reads the next block as a set of float values. -func (c *KeyCursor) ReadFloatBlock(buf *[]FloatValue) ([]FloatValue, error) { -LOOP: - // No matching blocks to decode - if len(c.current) == 0 { - return nil, nil - } - - // First block is the oldest block containing the points we're searching for. - first := c.current[0] - *buf = (*buf)[:0] - var values FloatValues - values, err := first.r.ReadFloatBlockAt(&first.entry, buf) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(floatBlocksDecodedCounter).Add(1) - c.col.GetCounter(floatBlocksSizeCounter).Add(int64(first.entry.Size)) - } - - // Remove values we already read - values = values.Exclude(first.readMin, first.readMax) - - // Remove any tombstones - tombstones := first.r.TombstoneRange(c.key) - values = excludeTombstonesFloatValues(tombstones, values) - // If there are no values in this first block (all tombstoned or previously read) and - // we have more potential blocks too search. Try again. - if values.Len() == 0 && len(c.current) > 0 { - c.current = c.current[1:] - goto LOOP - } - - // Only one block with this key and time range so return it - if len(c.current) == 1 { - if values.Len() > 0 { - first.markRead(values.MinTime(), values.MaxTime()) - } - return values, nil - } - - // Use the current block time range as our overlapping window - minT, maxT := first.readMin, first.readMax - if values.Len() > 0 { - minT, maxT = values.MinTime(), values.MaxTime() - } - if c.ascending { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the min time range to ensure values are returned in ascending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MinTime < minT && !cur.read() { - minT = cur.entry.MinTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MaxTime > maxT { - maxT = cur.entry.MaxTime - } - values = values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - var a []FloatValue - var v FloatValues - v, err := cur.r.ReadFloatBlockAt(&cur.entry, &a) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(floatBlocksDecodedCounter).Add(1) - c.col.GetCounter(floatBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - v = excludeTombstonesFloatValues(tombstones, v) - - // Remove values we already read - v = v.Exclude(cur.readMin, cur.readMax) - - if v.Len() > 0 { - // Only use values in the overlapping window - v = v.Include(minT, maxT) - // Merge the remaining values with the existing - values = values.Merge(v) - } - cur.markRead(minT, maxT) - } - - } else { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the max time range to ensure values are returned in descending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MaxTime > maxT && !cur.read() { - maxT = cur.entry.MaxTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MinTime < minT { - minT = cur.entry.MinTime - } - values = values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - var a []FloatValue - var v FloatValues - v, err := cur.r.ReadFloatBlockAt(&cur.entry, &a) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(floatBlocksDecodedCounter).Add(1) - c.col.GetCounter(floatBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - v = excludeTombstonesFloatValues(tombstones, v) - - // Remove values we already read - v = v.Exclude(cur.readMin, cur.readMax) - - // If the block we decoded should have all of it's values included, mark it as read so we - // don't use it again. - if v.Len() > 0 { - v = v.Include(minT, maxT) - // Merge the remaining values with the existing - values = v.Merge(values) - } - cur.markRead(minT, maxT) - } - } - - first.markRead(minT, maxT) - - return values, err -} - -func excludeTombstonesFloatValues(t []TimeRange, values FloatValues) FloatValues { - for i := range t { - values = values.Exclude(t[i].Min, t[i].Max) - } - return values -} - -// ReadIntegerBlock reads the next block as a set of integer values. -func (c *KeyCursor) ReadIntegerBlock(buf *[]IntegerValue) ([]IntegerValue, error) { -LOOP: - // No matching blocks to decode - if len(c.current) == 0 { - return nil, nil - } - - // First block is the oldest block containing the points we're searching for. - first := c.current[0] - *buf = (*buf)[:0] - var values IntegerValues - values, err := first.r.ReadIntegerBlockAt(&first.entry, buf) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(integerBlocksDecodedCounter).Add(1) - c.col.GetCounter(integerBlocksSizeCounter).Add(int64(first.entry.Size)) - } - - // Remove values we already read - values = values.Exclude(first.readMin, first.readMax) - - // Remove any tombstones - tombstones := first.r.TombstoneRange(c.key) - values = excludeTombstonesIntegerValues(tombstones, values) - // If there are no values in this first block (all tombstoned or previously read) and - // we have more potential blocks too search. Try again. - if values.Len() == 0 && len(c.current) > 0 { - c.current = c.current[1:] - goto LOOP - } - - // Only one block with this key and time range so return it - if len(c.current) == 1 { - if values.Len() > 0 { - first.markRead(values.MinTime(), values.MaxTime()) - } - return values, nil - } - - // Use the current block time range as our overlapping window - minT, maxT := first.readMin, first.readMax - if values.Len() > 0 { - minT, maxT = values.MinTime(), values.MaxTime() - } - if c.ascending { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the min time range to ensure values are returned in ascending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MinTime < minT && !cur.read() { - minT = cur.entry.MinTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MaxTime > maxT { - maxT = cur.entry.MaxTime - } - values = values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - var a []IntegerValue - var v IntegerValues - v, err := cur.r.ReadIntegerBlockAt(&cur.entry, &a) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(integerBlocksDecodedCounter).Add(1) - c.col.GetCounter(integerBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - v = excludeTombstonesIntegerValues(tombstones, v) - - // Remove values we already read - v = v.Exclude(cur.readMin, cur.readMax) - - if v.Len() > 0 { - // Only use values in the overlapping window - v = v.Include(minT, maxT) - // Merge the remaining values with the existing - values = values.Merge(v) - } - cur.markRead(minT, maxT) - } - - } else { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the max time range to ensure values are returned in descending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MaxTime > maxT && !cur.read() { - maxT = cur.entry.MaxTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MinTime < minT { - minT = cur.entry.MinTime - } - values = values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - var a []IntegerValue - var v IntegerValues - v, err := cur.r.ReadIntegerBlockAt(&cur.entry, &a) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(integerBlocksDecodedCounter).Add(1) - c.col.GetCounter(integerBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - v = excludeTombstonesIntegerValues(tombstones, v) - - // Remove values we already read - v = v.Exclude(cur.readMin, cur.readMax) - - // If the block we decoded should have all of it's values included, mark it as read so we - // don't use it again. - if v.Len() > 0 { - v = v.Include(minT, maxT) - // Merge the remaining values with the existing - values = v.Merge(values) - } - cur.markRead(minT, maxT) - } - } - - first.markRead(minT, maxT) - - return values, err -} - -func excludeTombstonesIntegerValues(t []TimeRange, values IntegerValues) IntegerValues { - for i := range t { - values = values.Exclude(t[i].Min, t[i].Max) - } - return values -} - -// ReadUnsignedBlock reads the next block as a set of unsigned values. -func (c *KeyCursor) ReadUnsignedBlock(buf *[]UnsignedValue) ([]UnsignedValue, error) { -LOOP: - // No matching blocks to decode - if len(c.current) == 0 { - return nil, nil - } - - // First block is the oldest block containing the points we're searching for. - first := c.current[0] - *buf = (*buf)[:0] - var values UnsignedValues - values, err := first.r.ReadUnsignedBlockAt(&first.entry, buf) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(unsignedBlocksDecodedCounter).Add(1) - c.col.GetCounter(unsignedBlocksSizeCounter).Add(int64(first.entry.Size)) - } - - // Remove values we already read - values = values.Exclude(first.readMin, first.readMax) - - // Remove any tombstones - tombstones := first.r.TombstoneRange(c.key) - values = excludeTombstonesUnsignedValues(tombstones, values) - // If there are no values in this first block (all tombstoned or previously read) and - // we have more potential blocks too search. Try again. - if values.Len() == 0 && len(c.current) > 0 { - c.current = c.current[1:] - goto LOOP - } - - // Only one block with this key and time range so return it - if len(c.current) == 1 { - if values.Len() > 0 { - first.markRead(values.MinTime(), values.MaxTime()) - } - return values, nil - } - - // Use the current block time range as our overlapping window - minT, maxT := first.readMin, first.readMax - if values.Len() > 0 { - minT, maxT = values.MinTime(), values.MaxTime() - } - if c.ascending { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the min time range to ensure values are returned in ascending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MinTime < minT && !cur.read() { - minT = cur.entry.MinTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MaxTime > maxT { - maxT = cur.entry.MaxTime - } - values = values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - var a []UnsignedValue - var v UnsignedValues - v, err := cur.r.ReadUnsignedBlockAt(&cur.entry, &a) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(unsignedBlocksDecodedCounter).Add(1) - c.col.GetCounter(unsignedBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - v = excludeTombstonesUnsignedValues(tombstones, v) - - // Remove values we already read - v = v.Exclude(cur.readMin, cur.readMax) - - if v.Len() > 0 { - // Only use values in the overlapping window - v = v.Include(minT, maxT) - // Merge the remaining values with the existing - values = values.Merge(v) - } - cur.markRead(minT, maxT) - } - - } else { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the max time range to ensure values are returned in descending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MaxTime > maxT && !cur.read() { - maxT = cur.entry.MaxTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MinTime < minT { - minT = cur.entry.MinTime - } - values = values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - var a []UnsignedValue - var v UnsignedValues - v, err := cur.r.ReadUnsignedBlockAt(&cur.entry, &a) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(unsignedBlocksDecodedCounter).Add(1) - c.col.GetCounter(unsignedBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - v = excludeTombstonesUnsignedValues(tombstones, v) - - // Remove values we already read - v = v.Exclude(cur.readMin, cur.readMax) - - // If the block we decoded should have all of it's values included, mark it as read so we - // don't use it again. - if v.Len() > 0 { - v = v.Include(minT, maxT) - // Merge the remaining values with the existing - values = v.Merge(values) - } - cur.markRead(minT, maxT) - } - } - - first.markRead(minT, maxT) - - return values, err -} - -func excludeTombstonesUnsignedValues(t []TimeRange, values UnsignedValues) UnsignedValues { - for i := range t { - values = values.Exclude(t[i].Min, t[i].Max) - } - return values -} - -// ReadStringBlock reads the next block as a set of string values. -func (c *KeyCursor) ReadStringBlock(buf *[]StringValue) ([]StringValue, error) { -LOOP: - // No matching blocks to decode - if len(c.current) == 0 { - return nil, nil - } - - // First block is the oldest block containing the points we're searching for. - first := c.current[0] - *buf = (*buf)[:0] - var values StringValues - values, err := first.r.ReadStringBlockAt(&first.entry, buf) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(stringBlocksDecodedCounter).Add(1) - c.col.GetCounter(stringBlocksSizeCounter).Add(int64(first.entry.Size)) - } - - // Remove values we already read - values = values.Exclude(first.readMin, first.readMax) - - // Remove any tombstones - tombstones := first.r.TombstoneRange(c.key) - values = excludeTombstonesStringValues(tombstones, values) - // If there are no values in this first block (all tombstoned or previously read) and - // we have more potential blocks too search. Try again. - if values.Len() == 0 && len(c.current) > 0 { - c.current = c.current[1:] - goto LOOP - } - - // Only one block with this key and time range so return it - if len(c.current) == 1 { - if values.Len() > 0 { - first.markRead(values.MinTime(), values.MaxTime()) - } - return values, nil - } - - // Use the current block time range as our overlapping window - minT, maxT := first.readMin, first.readMax - if values.Len() > 0 { - minT, maxT = values.MinTime(), values.MaxTime() - } - if c.ascending { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the min time range to ensure values are returned in ascending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MinTime < minT && !cur.read() { - minT = cur.entry.MinTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MaxTime > maxT { - maxT = cur.entry.MaxTime - } - values = values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - var a []StringValue - var v StringValues - v, err := cur.r.ReadStringBlockAt(&cur.entry, &a) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(stringBlocksDecodedCounter).Add(1) - c.col.GetCounter(stringBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - v = excludeTombstonesStringValues(tombstones, v) - - // Remove values we already read - v = v.Exclude(cur.readMin, cur.readMax) - - if v.Len() > 0 { - // Only use values in the overlapping window - v = v.Include(minT, maxT) - // Merge the remaining values with the existing - values = values.Merge(v) - } - cur.markRead(minT, maxT) - } - - } else { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the max time range to ensure values are returned in descending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MaxTime > maxT && !cur.read() { - maxT = cur.entry.MaxTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MinTime < minT { - minT = cur.entry.MinTime - } - values = values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - var a []StringValue - var v StringValues - v, err := cur.r.ReadStringBlockAt(&cur.entry, &a) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(stringBlocksDecodedCounter).Add(1) - c.col.GetCounter(stringBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - v = excludeTombstonesStringValues(tombstones, v) - - // Remove values we already read - v = v.Exclude(cur.readMin, cur.readMax) - - // If the block we decoded should have all of it's values included, mark it as read so we - // don't use it again. - if v.Len() > 0 { - v = v.Include(minT, maxT) - // Merge the remaining values with the existing - values = v.Merge(values) - } - cur.markRead(minT, maxT) - } - } - - first.markRead(minT, maxT) - - return values, err -} - -func excludeTombstonesStringValues(t []TimeRange, values StringValues) StringValues { - for i := range t { - values = values.Exclude(t[i].Min, t[i].Max) - } - return values -} - -// ReadBooleanBlock reads the next block as a set of boolean values. -func (c *KeyCursor) ReadBooleanBlock(buf *[]BooleanValue) ([]BooleanValue, error) { -LOOP: - // No matching blocks to decode - if len(c.current) == 0 { - return nil, nil - } - - // First block is the oldest block containing the points we're searching for. - first := c.current[0] - *buf = (*buf)[:0] - var values BooleanValues - values, err := first.r.ReadBooleanBlockAt(&first.entry, buf) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(booleanBlocksDecodedCounter).Add(1) - c.col.GetCounter(booleanBlocksSizeCounter).Add(int64(first.entry.Size)) - } - - // Remove values we already read - values = values.Exclude(first.readMin, first.readMax) - - // Remove any tombstones - tombstones := first.r.TombstoneRange(c.key) - values = excludeTombstonesBooleanValues(tombstones, values) - // If there are no values in this first block (all tombstoned or previously read) and - // we have more potential blocks too search. Try again. - if values.Len() == 0 && len(c.current) > 0 { - c.current = c.current[1:] - goto LOOP - } - - // Only one block with this key and time range so return it - if len(c.current) == 1 { - if values.Len() > 0 { - first.markRead(values.MinTime(), values.MaxTime()) - } - return values, nil - } - - // Use the current block time range as our overlapping window - minT, maxT := first.readMin, first.readMax - if values.Len() > 0 { - minT, maxT = values.MinTime(), values.MaxTime() - } - if c.ascending { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the min time range to ensure values are returned in ascending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MinTime < minT && !cur.read() { - minT = cur.entry.MinTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MaxTime > maxT { - maxT = cur.entry.MaxTime - } - values = values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - var a []BooleanValue - var v BooleanValues - v, err := cur.r.ReadBooleanBlockAt(&cur.entry, &a) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(booleanBlocksDecodedCounter).Add(1) - c.col.GetCounter(booleanBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - v = excludeTombstonesBooleanValues(tombstones, v) - - // Remove values we already read - v = v.Exclude(cur.readMin, cur.readMax) - - if v.Len() > 0 { - // Only use values in the overlapping window - v = v.Include(minT, maxT) - // Merge the remaining values with the existing - values = values.Merge(v) - } - cur.markRead(minT, maxT) - } - - } else { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the max time range to ensure values are returned in descending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MaxTime > maxT && !cur.read() { - maxT = cur.entry.MaxTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MinTime < minT { - minT = cur.entry.MinTime - } - values = values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - var a []BooleanValue - var v BooleanValues - v, err := cur.r.ReadBooleanBlockAt(&cur.entry, &a) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(booleanBlocksDecodedCounter).Add(1) - c.col.GetCounter(booleanBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - v = excludeTombstonesBooleanValues(tombstones, v) - - // Remove values we already read - v = v.Exclude(cur.readMin, cur.readMax) - - // If the block we decoded should have all of it's values included, mark it as read so we - // don't use it again. - if v.Len() > 0 { - v = v.Include(minT, maxT) - // Merge the remaining values with the existing - values = v.Merge(values) - } - cur.markRead(minT, maxT) - } - } - - first.markRead(minT, maxT) - - return values, err -} - -func excludeTombstonesBooleanValues(t []TimeRange, values BooleanValues) BooleanValues { - for i := range t { - values = values.Exclude(t[i].Min, t[i].Max) - } - return values -} diff --git a/tsdb/engine/tsm1/file_store.gen.go.tmpl b/tsdb/engine/tsm1/file_store.gen.go.tmpl deleted file mode 100644 index 809937bdfbb..00000000000 --- a/tsdb/engine/tsm1/file_store.gen.go.tmpl +++ /dev/null @@ -1,281 +0,0 @@ -package tsm1 - -{{$isArray := .D.isArray}} -{{$isNotArray := not $isArray}} - -{{if $isArray -}} -import ( - "github.com/influxdata/influxdb/v2/tsdb" -) -{{end}} - -{{range .In}} -{{if $isArray -}} -// Read{{.Name}}ArrayBlock reads the next block as a set of {{.name}} values. -func (c *KeyCursor) Read{{.Name}}ArrayBlock(values *tsdb.{{.Name}}Array) (*tsdb.{{.Name}}Array, error) { -LOOP: - // No matching blocks to decode - if len(c.current) == 0 { - values.Timestamps = values.Timestamps[:0] - values.Values = values.Values[:0] - return values, nil - } -{{else}} -// Read{{.Name}}Block reads the next block as a set of {{.name}} values. -func (c *KeyCursor) Read{{.Name}}Block(buf *[]{{.Name}}Value) ([]{{.Name}}Value, error) { -LOOP: - // No matching blocks to decode - if len(c.current) == 0 { - return nil, nil - } -{{end}} - - // First block is the oldest block containing the points we're searching for. - first := c.current[0] -{{if $isArray -}} - err := first.r.Read{{.Name}}ArrayBlockAt(&first.entry, values) -{{else -}} - *buf = (*buf)[:0] - var values {{.Name}}Values - values, err := first.r.Read{{.Name}}BlockAt(&first.entry, buf) -{{end -}} - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter({{.name}}BlocksDecodedCounter).Add(1) - c.col.GetCounter({{.name}}BlocksSizeCounter).Add(int64(first.entry.Size)) - } - - // Remove values we already read -{{if $isArray -}} - values.Exclude(first.readMin, first.readMax) -{{else -}} - values = values.Exclude(first.readMin, first.readMax) -{{end}} - - // Remove any tombstones - tombstones := first.r.TombstoneRange(c.key) -{{if $isArray -}} - excludeTombstones{{.Name}}Array(tombstones, values) -{{else -}} - values = excludeTombstones{{.Name}}Values(tombstones, values) -{{end -}} - - // If there are no values in this first block (all tombstoned or previously read) and - // we have more potential blocks too search. Try again. - if values.Len() == 0 && len(c.current) > 0 { - c.current = c.current[1:] - goto LOOP - } - - // Only one block with this key and time range so return it - if len(c.current) == 1 { - if values.Len() > 0 { - first.markRead(values.MinTime(), values.MaxTime()) - } - return values, nil - } - - // Use the current block time range as our overlapping window - minT, maxT := first.readMin, first.readMax - if values.Len() > 0 { - minT, maxT = values.MinTime(), values.MaxTime() - } - if c.ascending { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the min time range to ensure values are returned in ascending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MinTime < minT && !cur.read() { - minT = cur.entry.MinTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MaxTime > maxT { - maxT = cur.entry.MaxTime - } -{{if $isArray -}} - values.Include(minT, maxT) -{{else -}} - values = values.Include(minT, maxT) -{{end -}} - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - -{{if $isArray -}} - v := &tsdb.{{.Name}}Array{} - err := cur.r.Read{{.Name}}ArrayBlockAt(&cur.entry, v) -{{else -}} - var a []{{.Name}}Value - var v {{.Name}}Values - v, err := cur.r.Read{{.Name}}BlockAt(&cur.entry, &a) -{{end -}} - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter({{.name}}BlocksDecodedCounter).Add(1) - c.col.GetCounter({{.name}}BlocksSizeCounter).Add(int64(cur.entry.Size)) - } - - tombstones := cur.r.TombstoneRange(c.key) -{{if $isArray -}} - // Remove any tombstoned values - excludeTombstones{{.Name}}Array(tombstones, v) - - // Remove values we already read - v.Exclude(cur.readMin, cur.readMax) - - if v.Len() > 0 { - // Only use values in the overlapping window - v.Include(minT, maxT) - // Merge the remaining values with the existing - values.Merge(v) - } -{{else -}} - // Remove any tombstoned values - v = excludeTombstones{{.Name}}Values(tombstones, v) - - // Remove values we already read - v = v.Exclude(cur.readMin, cur.readMax) - - if v.Len() > 0 { - // Only use values in the overlapping window - v = v.Include(minT, maxT) - // Merge the remaining values with the existing - values = values.Merge(v) - } -{{end -}} - cur.markRead(minT, maxT) - } - - } else { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the max time range to ensure values are returned in descending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MaxTime > maxT && !cur.read() { - maxT = cur.entry.MaxTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MinTime < minT { - minT = cur.entry.MinTime - } -{{if $isArray -}} - values.Include(minT, maxT) -{{else -}} - values = values.Include(minT, maxT) -{{end -}} - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - -{{if $isArray -}} - v := &tsdb.{{.Name}}Array{} - err := cur.r.Read{{.Name}}ArrayBlockAt(&cur.entry, v) -{{else -}} - var a []{{.Name}}Value - var v {{.Name}}Values - v, err := cur.r.Read{{.Name}}BlockAt(&cur.entry, &a) -{{end -}} - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter({{.name}}BlocksDecodedCounter).Add(1) - c.col.GetCounter({{.name}}BlocksSizeCounter).Add(int64(cur.entry.Size)) - } - tombstones := cur.r.TombstoneRange(c.key) -{{if $isArray -}} - // Remove any tombstoned values - excludeTombstones{{.Name}}Array(tombstones, v) - - // Remove values we already read - v.Exclude(cur.readMin, cur.readMax) - - // If the block we decoded should have all of it's values included, mark it as read so we - // don't use it again. - if v.Len() > 0 { - v.Include(minT, maxT) - // Merge the remaining values with the existing - v.Merge(values) - *values = *v - } -{{else -}} - // Remove any tombstoned values - v = excludeTombstones{{.Name}}Values(tombstones, v) - - // Remove values we already read - v = v.Exclude(cur.readMin, cur.readMax) - - // If the block we decoded should have all of it's values included, mark it as read so we - // don't use it again. - if v.Len() > 0 { - v = v.Include(minT, maxT) - // Merge the remaining values with the existing - values = v.Merge(values) - } -{{end -}} - cur.markRead(minT, maxT) - } - } - - first.markRead(minT, maxT) - - return values, err -} - -{{if $isArray -}} -func excludeTombstones{{.Name}}Array(t []TimeRange, values *tsdb.{{.Name}}Array) { - for i := range t { - values.Exclude(t[i].Min, t[i].Max) - } -} -{{else -}} -func excludeTombstones{{.Name}}Values(t []TimeRange, values {{.Name}}Values) {{.Name}}Values { - for i := range t { - values = values.Exclude(t[i].Min, t[i].Max) - } - return values -} -{{end -}} -{{ end }} diff --git a/tsdb/engine/tsm1/file_store.gen.go.tmpldata b/tsdb/engine/tsm1/file_store.gen.go.tmpldata deleted file mode 100644 index 236ba310ba4..00000000000 --- a/tsdb/engine/tsm1/file_store.gen.go.tmpldata +++ /dev/null @@ -1,22 +0,0 @@ -[ - { - "Name":"Float", - "name":"float" - }, - { - "Name":"Integer", - "name":"integer" - }, - { - "Name":"Unsigned", - "name":"unsigned" - }, - { - "Name":"String", - "name":"string" - }, - { - "Name":"Boolean", - "name":"boolean" - } -] diff --git a/tsdb/engine/tsm1/file_store.go b/tsdb/engine/tsm1/file_store.go deleted file mode 100644 index 765002422d6..00000000000 --- a/tsdb/engine/tsm1/file_store.go +++ /dev/null @@ -1,1577 +0,0 @@ -package tsm1 - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "math" - "os" - "path/filepath" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "syscall" - "time" - - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/pkg/file" - "github.com/influxdata/influxdb/v2/pkg/limiter" - "github.com/influxdata/influxdb/v2/pkg/metrics" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -const ( - // The extension used to describe temporary snapshot files. - TmpTSMFileExtension = "tmp" - - // The extension used to describe corrupt snapshot files. - BadTSMFileExtension = "bad" -) - -// TSMFile represents an on-disk TSM file. -type TSMFile interface { - // Path returns the underlying file path for the TSMFile. If the file - // has not be written or loaded from disk, the zero value is returned. - Path() string - - // Read returns all the values in the block where time t resides. - Read(key []byte, t int64) ([]Value, error) - - // ReadAt returns all the values in the block identified by entry. - ReadAt(entry *IndexEntry, values []Value) ([]Value, error) - ReadFloatBlockAt(entry *IndexEntry, values *[]FloatValue) ([]FloatValue, error) - ReadFloatArrayBlockAt(entry *IndexEntry, values *tsdb.FloatArray) error - ReadIntegerBlockAt(entry *IndexEntry, values *[]IntegerValue) ([]IntegerValue, error) - ReadIntegerArrayBlockAt(entry *IndexEntry, values *tsdb.IntegerArray) error - ReadUnsignedBlockAt(entry *IndexEntry, values *[]UnsignedValue) ([]UnsignedValue, error) - ReadUnsignedArrayBlockAt(entry *IndexEntry, values *tsdb.UnsignedArray) error - ReadStringBlockAt(entry *IndexEntry, values *[]StringValue) ([]StringValue, error) - ReadStringArrayBlockAt(entry *IndexEntry, values *tsdb.StringArray) error - ReadBooleanBlockAt(entry *IndexEntry, values *[]BooleanValue) ([]BooleanValue, error) - ReadBooleanArrayBlockAt(entry *IndexEntry, values *tsdb.BooleanArray) error - - // Entries returns the index entries for all blocks for the given key. - Entries(key []byte) []IndexEntry - ReadEntries(key []byte, entries *[]IndexEntry) []IndexEntry - - // Returns true if the TSMFile may contain a value with the specified - // key and time. - ContainsValue(key []byte, t int64) bool - - // Contains returns true if the file contains any values for the given - // key. - Contains(key []byte) bool - - // OverlapsTimeRange returns true if the time range of the file intersect min and max. - OverlapsTimeRange(min, max int64) bool - - // OverlapsKeyRange returns true if the key range of the file intersects min and max. - OverlapsKeyRange(min, max []byte) bool - - // TimeRange returns the min and max time across all keys in the file. - TimeRange() (int64, int64) - - // TombstoneRange returns ranges of time that are deleted for the given key. - TombstoneRange(key []byte) []TimeRange - - // KeyRange returns the min and max keys in the file. - KeyRange() ([]byte, []byte) - - // KeyCount returns the number of distinct keys in the file. - KeyCount() int - - // Seek returns the position in the index with the key <= key. - Seek(key []byte) int - - // KeyAt returns the key located at index position idx. - KeyAt(idx int) ([]byte, byte) - - // Type returns the block type of the values stored for the key. Returns one of - // BlockFloat64, BlockInt64, BlockBoolean, BlockString. If key does not exist, - // an error is returned. - Type(key []byte) (byte, error) - - // BatchDelete return a BatchDeleter that allows for multiple deletes in batches - // and group commit or rollback. - BatchDelete() BatchDeleter - - // Delete removes the keys from the set of keys available in this file. - Delete(keys [][]byte) error - - // DeleteRange removes the values for keys between timestamps min and max. - DeleteRange(keys [][]byte, min, max int64) error - - // HasTombstones returns true if file contains values that have been deleted. - HasTombstones() bool - - // TombstoneStats returns the tombstone filestats if there are any tombstones - // written for this file. - TombstoneStats() TombstoneStat - - // Close closes the underlying file resources. - Close() error - - // Size returns the size of the file on disk in bytes. - Size() uint32 - - // Rename renames the existing TSM file to a new name and replaces the mmap backing slice using the new - // file name. Index and Reader state are not re-initialized. - Rename(path string) error - - // Remove deletes the file from the filesystem. - Remove() error - - // InUse returns true if the file is currently in use by queries. - InUse() bool - - // Ref records that this file is actively in use. - Ref() - - // Unref records that this file is no longer in use. - Unref() - - // Stats returns summary information about the TSM file. - Stats() FileStat - - // BlockIterator returns an iterator pointing to the first block in the file and - // allows sequential iteration to each and every block. - BlockIterator() *BlockIterator - - // Free releases any resources held by the FileStore to free up system resources. - Free() error -} - -var ( - floatBlocksDecodedCounter = metrics.MustRegisterCounter("float_blocks_decoded", metrics.WithGroup(tsmGroup)) - floatBlocksSizeCounter = metrics.MustRegisterCounter("float_blocks_size_bytes", metrics.WithGroup(tsmGroup)) - integerBlocksDecodedCounter = metrics.MustRegisterCounter("integer_blocks_decoded", metrics.WithGroup(tsmGroup)) - integerBlocksSizeCounter = metrics.MustRegisterCounter("integer_blocks_size_bytes", metrics.WithGroup(tsmGroup)) - unsignedBlocksDecodedCounter = metrics.MustRegisterCounter("unsigned_blocks_decoded", metrics.WithGroup(tsmGroup)) - unsignedBlocksSizeCounter = metrics.MustRegisterCounter("unsigned_blocks_size_bytes", metrics.WithGroup(tsmGroup)) - stringBlocksDecodedCounter = metrics.MustRegisterCounter("string_blocks_decoded", metrics.WithGroup(tsmGroup)) - stringBlocksSizeCounter = metrics.MustRegisterCounter("string_blocks_size_bytes", metrics.WithGroup(tsmGroup)) - booleanBlocksDecodedCounter = metrics.MustRegisterCounter("boolean_blocks_decoded", metrics.WithGroup(tsmGroup)) - booleanBlocksSizeCounter = metrics.MustRegisterCounter("boolean_blocks_size_bytes", metrics.WithGroup(tsmGroup)) -) - -// FileStore is an abstraction around multiple TSM files. -type FileStore struct { - mu sync.RWMutex - lastModified time.Time - // Most recently known file stats. If nil then stats will need to be - // recalculated - lastFileStats []FileStat - - currentGeneration int - dir string - - files []TSMFile - tsmMMAPWillNeed bool // If true then the kernel will be advised MMAP_WILLNEED for TSM files. - openLimiter limiter.Fixed // limit the number of concurrent opening TSM files. - - logger *zap.Logger // Logger to be used for important messages - traceLogger *zap.Logger // Logger to be used when trace-logging is on. - traceLogging bool - - stats *fileStoreMetrics - purger *purger - - currentTempDirID int - - parseFileName ParseFileNameFunc - - obs tsdb.FileStoreObserver - - copyFiles bool -} - -// FileStat holds information about a TSM file on disk. -type FileStat struct { - Path string - HasTombstone bool - Size uint32 - LastModified int64 - MinTime, MaxTime int64 - MinKey, MaxKey []byte -} - -// TombstoneStat holds information about a possible tombstone file on disk. -type TombstoneStat struct { - TombstoneExists bool - Path string - LastModified int64 - Size uint32 -} - -// OverlapsTimeRange returns true if the time range of the file intersect min and max. -func (f FileStat) OverlapsTimeRange(min, max int64) bool { - return f.MinTime <= max && f.MaxTime >= min -} - -// OverlapsKeyRange returns true if the min and max keys of the file overlap the arguments min and max. -func (f FileStat) OverlapsKeyRange(min, max []byte) bool { - return len(min) != 0 && len(max) != 0 && bytes.Compare(f.MinKey, max) <= 0 && bytes.Compare(f.MaxKey, min) >= 0 -} - -// ContainsKey returns true if the min and max keys of the file overlap the arguments min and max. -func (f FileStat) ContainsKey(key []byte) bool { - return bytes.Compare(f.MinKey, key) >= 0 || bytes.Compare(key, f.MaxKey) <= 0 -} - -// NewFileStore returns a new instance of FileStore based on the given directory. -func NewFileStore(dir string, tags tsdb.EngineTags) *FileStore { - logger := zap.NewNop() - fs := &FileStore{ - dir: dir, - lastModified: time.Time{}, - logger: logger, - traceLogger: logger, - openLimiter: limiter.NewFixed(runtime.GOMAXPROCS(0)), - stats: newFileStoreMetrics(tags), - purger: &purger{ - files: map[string]TSMFile{}, - logger: logger, - }, - obs: noFileStoreObserver{}, - parseFileName: DefaultParseFileName, - copyFiles: runtime.GOOS == "windows", - } - fs.purger.fileStore = fs - return fs -} - -// WithObserver sets the observer for the file store. -func (f *FileStore) WithObserver(obs tsdb.FileStoreObserver) { - f.obs = obs -} - -func (f *FileStore) WithParseFileNameFunc(parseFileNameFunc ParseFileNameFunc) { - f.parseFileName = parseFileNameFunc -} - -func (f *FileStore) ParseFileName(path string) (int, int, error) { - return f.parseFileName(path) -} - -// enableTraceLogging must be called before the FileStore is opened. -func (f *FileStore) enableTraceLogging(enabled bool) { - f.traceLogging = enabled - if enabled { - f.traceLogger = f.logger - } -} - -// WithLogger sets the logger on the file store. -func (f *FileStore) WithLogger(log *zap.Logger) { - f.logger = log.With(zap.String("service", "filestore")) - f.purger.logger = f.logger - - if f.traceLogging { - f.traceLogger = f.logger - } -} - -var globalFileStoreMetrics = newAllFileStoreMetrics() - -const filesSubsystem = "tsm_files" - -type allFileStoreMetrics struct { - files *prometheus.GaugeVec - size *prometheus.GaugeVec -} - -type fileStoreMetrics struct { - files prometheus.Gauge - size prometheus.Gauge - sizeAtomic int64 -} - -func (f *fileStoreMetrics) AddSize(n int64) { - val := atomic.AddInt64(&f.sizeAtomic, n) - f.size.Set(float64(val)) -} - -func (f *fileStoreMetrics) SetSize(n int64) { - atomic.StoreInt64(&f.sizeAtomic, n) - f.size.Set(float64(n)) -} - -func (f *fileStoreMetrics) SetFiles(n int64) { - f.files.Set(float64(n)) -} - -func newAllFileStoreMetrics() *allFileStoreMetrics { - labels := tsdb.EngineLabelNames() - return &allFileStoreMetrics{ - files: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: storageNamespace, - Subsystem: filesSubsystem, - Name: "total", - Help: "Gauge of number of files per shard", - }, labels), - size: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: storageNamespace, - Subsystem: filesSubsystem, - Name: "disk_bytes", - Help: "Gauge of data size in bytes for each shard", - }, labels), - } -} - -func FileStoreCollectors() []prometheus.Collector { - return []prometheus.Collector{ - globalFileStoreMetrics.files, - globalFileStoreMetrics.size, - } -} - -func newFileStoreMetrics(tags tsdb.EngineTags) *fileStoreMetrics { - labels := tags.GetLabels() - return &fileStoreMetrics{ - files: globalFileStoreMetrics.files.With(labels), - size: globalFileStoreMetrics.size.With(labels), - } -} - -// Count returns the number of TSM files currently loaded. -func (f *FileStore) Count() int { - f.mu.RLock() - defer f.mu.RUnlock() - return len(f.files) -} - -// Files returns the slice of TSM files currently loaded. This is only used for -// tests, and the files aren't guaranteed to stay valid in the presence of compactions. -func (f *FileStore) Files() []TSMFile { - f.mu.RLock() - defer f.mu.RUnlock() - return f.files -} - -// Free releases any resources held by the FileStore. The resources will be re-acquired -// if necessary if they are needed after freeing them. -func (f *FileStore) Free() error { - f.mu.RLock() - defer f.mu.RUnlock() - for _, f := range f.files { - if err := f.Free(); err != nil { - return err - } - } - return nil -} - -// CurrentGeneration returns the current generation of the TSM files. -func (f *FileStore) CurrentGeneration() int { - f.mu.RLock() - defer f.mu.RUnlock() - return f.currentGeneration -} - -// NextGeneration increments the max file ID and returns the new value. -func (f *FileStore) NextGeneration() int { - f.mu.Lock() - defer f.mu.Unlock() - f.currentGeneration++ - return f.currentGeneration -} - -// WalkKeys calls fn for every key in every TSM file known to the FileStore. If the key -// exists in multiple files, it will be invoked for each file. -func (f *FileStore) WalkKeys(seek []byte, fn func(key []byte, typ byte) error) error { - f.mu.RLock() - if len(f.files) == 0 { - f.mu.RUnlock() - return nil - } - - // Ensure files are not unmapped while we're iterating over them. - for _, r := range f.files { - r.Ref() - defer r.Unref() - } - - ki := newMergeKeyIterator(f.files, seek) - f.mu.RUnlock() - for ki.Next() { - key, typ := ki.Read() - if err := fn(key, typ); err != nil { - return err - } - } - - return nil -} - -// Keys returns all keys and types for all files in the file store. -func (f *FileStore) Keys() map[string]byte { - f.mu.RLock() - defer f.mu.RUnlock() - - uniqueKeys := map[string]byte{} - if err := f.WalkKeys(nil, func(key []byte, typ byte) error { - uniqueKeys[string(key)] = typ - return nil - }); err != nil { - return nil - } - - return uniqueKeys -} - -// Type returns the type of values store at the block for key. -func (f *FileStore) Type(key []byte) (byte, error) { - f.mu.RLock() - defer f.mu.RUnlock() - - for _, f := range f.files { - if f.Contains(key) { - return f.Type(key) - } - } - return 0, fmt.Errorf("unknown type for %v", key) -} - -// Delete removes the keys from the set of keys available in this file. -func (f *FileStore) Delete(keys [][]byte) error { - return f.DeleteRange(keys, math.MinInt64, math.MaxInt64) -} - -func (f *FileStore) Apply(ctx context.Context, fn func(r TSMFile) error) error { - // Limit apply fn to number of cores - limiter := limiter.NewFixed(runtime.GOMAXPROCS(0)) - - f.mu.RLock() - errC := make(chan error, len(f.files)) - - for _, f := range f.files { - go func(r TSMFile) { - if err := limiter.Take(ctx); err != nil { - errC <- err - return - } - defer limiter.Release() - - r.Ref() - defer r.Unref() - errC <- fn(r) - }(f) - } - - var applyErr error - for i := 0; i < cap(errC); i++ { - if err := <-errC; err != nil { - applyErr = err - } - } - f.mu.RUnlock() - - f.mu.Lock() - f.lastModified = time.Now().UTC() - f.lastFileStats = nil - f.mu.Unlock() - - return applyErr -} - -// DeleteRange removes the values for keys between timestamps min and max. This should only -// be used with smaller batches of series keys. -func (f *FileStore) DeleteRange(keys [][]byte, min, max int64) error { - var batches BatchDeleters - f.mu.RLock() - for _, f := range f.files { - if f.OverlapsTimeRange(min, max) { - batches = append(batches, f.BatchDelete()) - } - } - f.mu.RUnlock() - - if len(batches) == 0 { - return nil - } - - if err := func() error { - if err := batches.DeleteRange(keys, min, max); err != nil { - return err - } - - return batches.Commit() - }(); err != nil { - // Rollback the deletes - _ = batches.Rollback() - return err - } - - f.mu.Lock() - f.lastModified = time.Now().UTC() - f.lastFileStats = nil - f.mu.Unlock() - return nil -} - -// Open loads all the TSM files in the configured directory. -func (f *FileStore) Open(ctx context.Context) error { - f.mu.Lock() - defer f.mu.Unlock() - - // Not loading files from disk so nothing to do - if f.dir == "" { - return nil - } - - if f.openLimiter == nil { - return errors.New("cannot open FileStore without an OpenLimiter (is EngineOptions.OpenLimiter set?)") - } - - // find the current max ID for temp directories - tmpfiles, err := os.ReadDir(f.dir) - if err != nil { - return err - } - - // ascertain the current temp directory number by examining the existing - // directories and choosing the one with the higest basename when converted - // to an integer. - for _, fi := range tmpfiles { - if !fi.IsDir() || !strings.HasSuffix(fi.Name(), "."+TmpTSMFileExtension) { - continue - } - - ss := strings.Split(filepath.Base(fi.Name()), ".") - if len(ss) != 2 { - continue - } - - i, err := strconv.Atoi(ss[0]) - if err != nil || i <= f.currentTempDirID { - continue - } - - // i must be a valid integer and greater than f.currentTempDirID at this - // point - f.currentTempDirID = i - } - - files, err := filepath.Glob(filepath.Join(f.dir, "*."+TSMFileExtension)) - if err != nil { - return err - } - - // struct to hold the result of opening each reader in a goroutine - type res struct { - r *TSMReader - err error - } - - readerC := make(chan *res) - for i, fn := range files { - // Keep track of the latest ID - generation, _, err := f.parseFileName(fn) - if err != nil { - return err - } - - if generation >= f.currentGeneration { - f.currentGeneration = generation + 1 - } - - file, err := os.OpenFile(fn, os.O_RDONLY, 0666) - if err != nil { - return fmt.Errorf("error opening file %s: %v", fn, err) - } - - go func(idx int, file *os.File) { - // Ensure a limited number of TSM files are loaded at once. - // Systems which have very large datasets (1TB+) can have thousands - // of TSM files which can cause extremely long load times. - if err := f.openLimiter.Take(ctx); err != nil { - f.logger.Error("Failed to open tsm file", zap.String("path", file.Name()), zap.Error(err)) - readerC <- &res{err: fmt.Errorf("failed to open tsm file %q: %w", file.Name(), err)} - return - } - defer f.openLimiter.Release() - - start := time.Now() - df, err := NewTSMReader(file, WithMadviseWillNeed(f.tsmMMAPWillNeed)) - f.logger.Info("Opened file", - zap.String("path", file.Name()), - zap.Int("id", idx), - zap.Duration("duration", time.Since(start))) - - // If we are unable to read a TSM file then log the error, rename - // the file, and continue loading the shard without it. - if err != nil { - f.logger.Error("Cannot read corrupt tsm file, renaming", zap.String("path", file.Name()), zap.Int("id", idx), zap.Error(err)) - file.Close() - if e := os.Rename(file.Name(), file.Name()+"."+BadTSMFileExtension); e != nil { - f.logger.Error("Cannot rename corrupt tsm file", zap.String("path", file.Name()), zap.Int("id", idx), zap.Error(e)) - readerC <- &res{r: df, err: fmt.Errorf("cannot rename corrupt file %s: %v", file.Name(), e)} - return - } - readerC <- &res{r: df, err: fmt.Errorf("cannot read corrupt file %s: %v", file.Name(), err)} - return - } - - df.WithObserver(f.obs) - readerC <- &res{r: df} - }(i, file) - } - - var lm int64 - isEmpty := true - for range files { - res := <-readerC - if res.err != nil { - return res.err - } else if res.r == nil { - continue - } - f.files = append(f.files, res.r) - - // Accumulate file store size stats - f.stats.AddSize(int64(res.r.Size())) - if ts := res.r.TombstoneStats(); ts.TombstoneExists { - f.stats.AddSize(int64(ts.Size)) - } - - // Re-initialize the lastModified time for the file store - if res.r.LastModified() > lm { - lm = res.r.LastModified() - } - isEmpty = false - } - if isEmpty { - if fi, err := os.Stat(f.dir); err == nil { - f.lastModified = fi.ModTime().UTC() - } else { - close(readerC) - return err - } - } else { - f.lastModified = time.Unix(0, lm).UTC() - } - close(readerC) - - sort.Sort(tsmReaders(f.files)) - f.stats.SetFiles(int64(len(f.files))) - return nil -} - -// Close closes the file store. -func (f *FileStore) Close() error { - // Make the object appear closed to other method calls. - f.mu.Lock() - - files := f.files - - f.lastFileStats = nil - f.files = nil - - f.stats.SetFiles(0) - - // Let other methods access this closed object while we do the actual closing. - f.mu.Unlock() - - for _, file := range files { - err := file.Close() - if err != nil { - return err - } - } - - return nil -} - -func (f *FileStore) DiskSizeBytes() int64 { - return atomic.LoadInt64(&f.stats.sizeAtomic) -} - -// Read returns the slice of values for the given key and the given timestamp, -// if any file matches those constraints. -func (f *FileStore) Read(key []byte, t int64) ([]Value, error) { - f.mu.RLock() - defer f.mu.RUnlock() - - for _, f := range f.files { - // Can this file possibly contain this key and timestamp? - if !f.Contains(key) { - continue - } - - // May have the key and time we are looking for so try to find - v, err := f.Read(key, t) - if err != nil { - return nil, err - } - - if len(v) > 0 { - return v, nil - } - } - return nil, nil -} - -func (f *FileStore) Cost(key []byte, min, max int64) query.IteratorCost { - f.mu.RLock() - defer f.mu.RUnlock() - return f.cost(key, min, max) -} - -// Reader returns a TSMReader for path if one is currently managed by the FileStore. -// Otherwise it returns nil. If it returns a file, you must call Unref on it when -// you are done, and never use it after that. -func (f *FileStore) TSMReader(path string) *TSMReader { - f.mu.RLock() - defer f.mu.RUnlock() - for _, r := range f.files { - if r.Path() == path { - r.Ref() - return r.(*TSMReader) - } - } - return nil -} - -// KeyCursor returns a KeyCursor for key and t across the files in the FileStore. -func (f *FileStore) KeyCursor(ctx context.Context, key []byte, t int64, ascending bool) *KeyCursor { - f.mu.RLock() - defer f.mu.RUnlock() - return newKeyCursor(ctx, f, key, t, ascending) -} - -// Stats returns the stats of the underlying files, preferring the cached version if it is still valid. -func (f *FileStore) Stats() []FileStat { - f.mu.RLock() - if len(f.lastFileStats) > 0 { - defer f.mu.RUnlock() - return f.lastFileStats - } - f.mu.RUnlock() - - // The file stats cache is invalid due to changes to files. Need to - // recalculate. - f.mu.Lock() - defer f.mu.Unlock() - - if len(f.lastFileStats) > 0 { - return f.lastFileStats - } - - // If lastFileStats's capacity is far away from the number of entries - // we need to add, then we'll reallocate. - if cap(f.lastFileStats) < len(f.files)/2 { - f.lastFileStats = make([]FileStat, 0, len(f.files)) - } - - for _, fd := range f.files { - f.lastFileStats = append(f.lastFileStats, fd.Stats()) - } - return f.lastFileStats -} - -// ReplaceWithCallback replaces oldFiles with newFiles and calls updatedFn with the files to be added the FileStore. -func (f *FileStore) ReplaceWithCallback(oldFiles, newFiles []string, updatedFn func(r []TSMFile)) error { - return f.replace(oldFiles, newFiles, updatedFn) -} - -// Replace replaces oldFiles with newFiles. -func (f *FileStore) Replace(oldFiles, newFiles []string) error { - return f.replace(oldFiles, newFiles, nil) -} - -func (f *FileStore) replace(oldFiles, newFiles []string, updatedFn func(r []TSMFile)) error { - if len(oldFiles) == 0 && len(newFiles) == 0 { - return nil - } - - f.mu.RLock() - maxTime := f.lastModified - f.mu.RUnlock() - - updated := make([]TSMFile, 0, len(newFiles)) - tsmTmpExt := fmt.Sprintf("%s.%s", TSMFileExtension, TmpTSMFileExtension) - - // Rename all the new files to make them live on restart - for _, file := range newFiles { - if !strings.HasSuffix(file, tsmTmpExt) && !strings.HasSuffix(file, TSMFileExtension) { - // This isn't a .tsm or .tsm.tmp file. - continue - } - - // give the observer a chance to process the file first. - if err := f.obs.FileFinishing(file); err != nil { - return err - } - - var oldName, newName = file, file - if strings.HasSuffix(oldName, tsmTmpExt) { - // The new TSM files have a tmp extension. First rename them. - newName = file[:len(file)-4] - if err := os.Rename(oldName, newName); err != nil { - return err - } - } - - // Any error after this point should result in the file being bein named - // back to the original name. The caller then has the opportunity to - // remove it. - fd, err := os.Open(newName) - if err != nil { - if newName != oldName { - if err1 := os.Rename(newName, oldName); err1 != nil { - return err1 - } - } - return err - } - - // Keep track of the new mod time - if stat, err := fd.Stat(); err == nil { - if maxTime.IsZero() || stat.ModTime().UTC().After(maxTime) { - maxTime = stat.ModTime().UTC() - } - } - - tsm, err := NewTSMReader(fd, WithMadviseWillNeed(f.tsmMMAPWillNeed)) - if err != nil { - if newName != oldName { - if err1 := os.Rename(newName, oldName); err1 != nil { - return err1 - } - } - return err - } - tsm.WithObserver(f.obs) - - updated = append(updated, tsm) - } - - if updatedFn != nil { - updatedFn(updated) - } - - f.mu.Lock() - defer f.mu.Unlock() - - // Copy the current set of active files while we rename - // and load the new files. We copy the pointers here to minimize - // the time that locks are held as well as to ensure that the replacement - // is atomic.© - - updated = append(updated, f.files...) - - // We need to prune our set of active files now - var active, inuse []TSMFile - for _, file := range updated { - keep := true - for _, remove := range oldFiles { - if remove == file.Path() { - keep = false - - // give the observer a chance to process the file first. - if err := f.obs.FileUnlinking(file.Path()); err != nil { - return err - } - - if ts := file.TombstoneStats(); ts.TombstoneExists { - if err := f.obs.FileUnlinking(ts.Path); err != nil { - return err - } - } - - // If queries are running against this file, then we need to move it out of the - // way and let them complete. We'll then delete the original file to avoid - // blocking callers upstream. If the process crashes, the temp file is - // cleaned up at startup automatically. - // - // In order to ensure that there are no races with this (file held externally calls Ref - // after we check InUse), we need to maintain the invariant that every handle to a file - // is handed out in use (Ref'd), and handlers only ever relinquish the file once (call Unref - // exactly once, and never use it again). InUse is only valid during a write lock, since - // we allow calls to Ref and Unref under the read lock and no lock at all respectively. - if file.InUse() { - // Copy all the tombstones related to this TSM file - var deletes []string - if ts := file.TombstoneStats(); ts.TombstoneExists { - deletes = append(deletes, ts.Path) - } - - // Rename the TSM file used by this reader - tempPath := fmt.Sprintf("%s.%s", file.Path(), TmpTSMFileExtension) - if err := file.Rename(tempPath); err != nil { - return err - } - - // Remove the old file and tombstones. We can't use the normal TSMReader.Remove() - // because it now refers to our temp file which we can't remove. - for _, f := range deletes { - if err := os.Remove(f); err != nil { - return err - } - } - - inuse = append(inuse, file) - continue - } - - if err := file.Close(); err != nil { - return err - } - - if err := file.Remove(); err != nil { - return err - } - break - } - } - - if keep { - active = append(active, file) - } - } - - if err := file.SyncDir(f.dir); err != nil { - return err - } - - // Tell the purger about our in-use files we need to remove - f.purger.add(inuse) - - // If times didn't change (which can happen since file mod times are second level), - // then add a ns to the time to ensure that lastModified changes since files on disk - // actually did change - if maxTime.Equal(f.lastModified) || maxTime.Before(f.lastModified) { - maxTime = f.lastModified.UTC().Add(1) - } - - f.lastModified = maxTime.UTC() - - f.lastFileStats = nil - f.files = active - sort.Sort(tsmReaders(f.files)) - f.stats.SetFiles(int64(len(f.files))) - - // Recalculate the disk size stat - var totalSize int64 - for _, file := range f.files { - totalSize += int64(file.Size()) - if ts := file.TombstoneStats(); ts.TombstoneExists { - totalSize += int64(ts.Size) - } - } - f.stats.SetSize(totalSize) - - return nil -} - -// LastModified returns the last time the file store was updated with new -// TSM files or a delete. -func (f *FileStore) LastModified() time.Time { - f.mu.RLock() - defer f.mu.RUnlock() - - return f.lastModified -} - -// BlockCount returns number of values stored in the block at location idx -// in the file at path. If path does not match any file in the store, 0 is -// returned. If idx is out of range for the number of blocks in the file, -// 0 is returned. -func (f *FileStore) BlockCount(path string, idx int) int { - f.mu.RLock() - defer f.mu.RUnlock() - - if idx < 0 { - return 0 - } - - for _, fd := range f.files { - if fd.Path() == path { - iter := fd.BlockIterator() - for i := 0; i < idx; i++ { - if !iter.Next() { - return 0 - } - } - _, _, _, _, _, block, _ := iter.Read() - // on Error, BlockCount(block) returns 0 for cnt - cnt, _ := BlockCount(block) - return cnt - } - } - return 0 -} - -// We need to determine the possible files that may be accessed by this query given -// the time range. -func (f *FileStore) cost(key []byte, min, max int64) query.IteratorCost { - var cache []IndexEntry - cost := query.IteratorCost{} - for _, fd := range f.files { - minTime, maxTime := fd.TimeRange() - if !(maxTime > min && minTime < max) { - continue - } - skipped := true - tombstones := fd.TombstoneRange(key) - - entries := fd.ReadEntries(key, &cache) - ENTRIES: - for i := 0; i < len(entries); i++ { - ie := entries[i] - - if !(ie.MaxTime > min && ie.MinTime < max) { - continue - } - - // Skip any blocks only contain values that are tombstoned. - for _, t := range tombstones { - if t.Min <= ie.MinTime && t.Max >= ie.MaxTime { - continue ENTRIES - } - } - - cost.BlocksRead++ - cost.BlockSize += int64(ie.Size) - skipped = false - } - - if !skipped { - cost.NumFiles++ - } - } - return cost -} - -// locations returns the files and index blocks for a key and time. ascending indicates -// whether the key will be scan in ascending time order or descenging time order. -// This function assumes the read-lock has been taken. -func (f *FileStore) locations(key []byte, t int64, ascending bool) []*location { - var cache []IndexEntry - locations := make([]*location, 0, len(f.files)) - for _, fd := range f.files { - minTime, maxTime := fd.TimeRange() - - // If we ascending and the max time of the file is before where we want to start - // skip it. - if ascending && maxTime < t { - continue - // If we are descending and the min time of the file is after where we want to start, - // then skip it. - } else if !ascending && minTime > t { - continue - } - tombstones := fd.TombstoneRange(key) - - // This file could potential contain points we are looking for so find the blocks for - // the given key. - entries := fd.ReadEntries(key, &cache) - LOOP: - for i := 0; i < len(entries); i++ { - ie := entries[i] - - // Skip any blocks only contain values that are tombstoned. - for _, t := range tombstones { - if t.Min <= ie.MinTime && t.Max >= ie.MaxTime { - continue LOOP - } - } - - // If we ascending and the max time of a block is before where we are looking, skip - // it since the data is out of our range - if ascending && ie.MaxTime < t { - continue - // If we descending and the min time of a block is after where we are looking, skip - // it since the data is out of our range - } else if !ascending && ie.MinTime > t { - continue - } - - location := &location{ - r: fd, - entry: ie, - } - - if ascending { - // For an ascending cursor, mark everything before the seek time as read - // so we can filter it out at query time - location.readMin = math.MinInt64 - location.readMax = t - 1 - } else { - // For an ascending cursort, mark everything after the seek time as read - // so we can filter it out at query time - location.readMin = t + 1 - location.readMax = math.MaxInt64 - } - // Otherwise, add this file and block location - locations = append(locations, location) - } - } - return locations -} - -// MakeSnapshotLinks creates hardlinks from the supplied TSMFiles to -// corresponding files under a supplied directory. -func (f *FileStore) MakeSnapshotLinks(destPath string, files []TSMFile) (returnErr error) { - for _, tsmf := range files { - newpath := filepath.Join(destPath, filepath.Base(tsmf.Path())) - err := f.copyOrLink(tsmf.Path(), newpath) - if err != nil { - return err - } - if tf := tsmf.TombstoneStats(); tf.TombstoneExists { - newpath := filepath.Join(destPath, filepath.Base(tf.Path)) - err := f.copyOrLink(tf.Path, newpath) - if err != nil { - return err - } - } - } - return nil -} - -func (f *FileStore) copyOrLink(oldpath string, newpath string) error { - if f.copyFiles { - f.logger.Info("copying backup snapshots", zap.String("OldPath", oldpath), zap.String("NewPath", newpath)) - if err := f.copyNotLink(oldpath, newpath); err != nil { - return err - } - } else { - f.logger.Info("linking backup snapshots", zap.String("OldPath", oldpath), zap.String("NewPath", newpath)) - if err := f.linkNotCopy(oldpath, newpath); err != nil { - return err - } - } - return nil -} - -// copyNotLink - use file copies instead of hard links for 2 scenarios: -// Windows does not permit deleting a file with open file handles -// Azure does not support hard links in its default file system -func (f *FileStore) copyNotLink(oldPath, newPath string) (returnErr error) { - rfd, err := os.Open(oldPath) - if err != nil { - return fmt.Errorf("error opening file for backup %s: %q", oldPath, err) - } else { - defer func() { - if e := rfd.Close(); returnErr == nil && e != nil { - returnErr = fmt.Errorf("error closing source file for backup %s: %w", oldPath, e) - } - }() - } - fi, err := rfd.Stat() - if err != nil { - return fmt.Errorf("error collecting statistics from file for backup %s: %w", oldPath, err) - } - wfd, err := os.OpenFile(newPath, os.O_RDWR|os.O_CREATE, fi.Mode()) - if err != nil { - return fmt.Errorf("error creating temporary file for backup %s: %w", newPath, err) - } else { - defer func() { - if e := wfd.Close(); returnErr == nil && e != nil { - returnErr = fmt.Errorf("error closing temporary file for backup %s: %w", newPath, e) - } - }() - } - if _, err := io.Copy(wfd, rfd); err != nil { - return fmt.Errorf("unable to copy file for backup from %s to %s: %w", oldPath, newPath, err) - } - if err := os.Chtimes(newPath, fi.ModTime(), fi.ModTime()); err != nil { - return fmt.Errorf("unable to set modification time on temporary backup file %s: %w", newPath, err) - } - return nil -} - -// linkNotCopy - use hard links for backup snapshots -func (f *FileStore) linkNotCopy(oldPath, newPath string) error { - if err := os.Link(oldPath, newPath); err != nil { - if errors.Is(err, syscall.ENOTSUP) { - if fi, e := os.Stat(oldPath); e == nil && !fi.IsDir() { - f.logger.Info("file system does not support hard links, switching to copies for backup", zap.String("OldPath", oldPath), zap.String("NewPath", newPath)) - // Force future snapshots to copy - f.copyFiles = true - return f.copyNotLink(oldPath, newPath) - } else if e != nil { - // Stat failed - return fmt.Errorf("error creating hard link for backup, cannot determine if %s is a file or directory: %w", oldPath, e) - } else { - return fmt.Errorf("error creating hard link for backup - %s is a directory, not a file: %q", oldPath, err) - } - } else { - return fmt.Errorf("error creating hard link for backup from %s to %s: %w", oldPath, newPath, err) - } - } else { - return nil - } -} - -// CreateSnapshot creates hardlinks for all tsm and tombstone files -// in the path provided. -func (f *FileStore) CreateSnapshot() (string, error) { - f.traceLogger.Info("Creating snapshot", zap.String("dir", f.dir)) - - f.mu.Lock() - // create a copy of the files slice and ensure they aren't closed out from - // under us, nor the slice mutated. - files := make([]TSMFile, len(f.files)) - copy(files, f.files) - - for _, tsmf := range files { - tsmf.Ref() - defer tsmf.Unref() - } - - // increment and keep track of the current temp dir for when we drop the lock. - // this ensures we are the only writer to the directory. - f.currentTempDirID += 1 - tmpPath := fmt.Sprintf("%d.%s", f.currentTempDirID, TmpTSMFileExtension) - tmpPath = filepath.Join(f.dir, tmpPath) - f.mu.Unlock() - - // create the tmp directory and add the hard links. there is no longer any shared - // mutable state. - err := os.Mkdir(tmpPath, 0777) - if err != nil { - return "", err - } - if err := f.MakeSnapshotLinks(tmpPath, files); err != nil { - // remove temporary directory since we couldn't create our hard links. - _ = os.RemoveAll(tmpPath) - return "", fmt.Errorf("CreateSnapshot() failed to create links %v: %w", tmpPath, err) - } - - return tmpPath, nil -} - -// FormatFileNameFunc is executed when generating a new TSM filename. -// Source filenames are provided via src. -type FormatFileNameFunc func(generation, sequence int) string - -// DefaultFormatFileName is the default implementation to format TSM filenames. -func DefaultFormatFileName(generation, sequence int) string { - return fmt.Sprintf("%09d-%09d", generation, sequence) -} - -// ParseFileNameFunc is executed when parsing a TSM filename into generation & sequence. -type ParseFileNameFunc func(name string) (generation, sequence int, err error) - -// DefaultParseFileName is used to parse the filenames of TSM files. -func DefaultParseFileName(name string) (int, int, error) { - base := filepath.Base(name) - idx := strings.Index(base, ".") - if idx == -1 { - return 0, 0, fmt.Errorf("file %s is named incorrectly", name) - } - - id := base[:idx] - - idx = strings.Index(id, "-") - if idx == -1 { - return 0, 0, fmt.Errorf("file %s is named incorrectly", name) - } - - generation, err := strconv.ParseUint(id[:idx], 10, 32) - if err != nil { - return 0, 0, fmt.Errorf("file %s is named incorrectly", name) - } - - sequence, err := strconv.ParseUint(id[idx+1:], 10, 32) - if err != nil { - return 0, 0, fmt.Errorf("file %s is named incorrectly", name) - } - - return int(generation), int(sequence), nil -} - -// KeyCursor allows iteration through keys in a set of files within a FileStore. -type KeyCursor struct { - key []byte - - // seeks is all the file locations that we need to return during iteration. - seeks []*location - - // current is the set of blocks possibly containing the next set of points. - // Normally this is just one entry, but there may be multiple if points have - // been overwritten. - current []*location - buf []Value - - ctx context.Context - col *metrics.Group - - // pos is the index within seeks. Based on ascending, it will increment or - // decrement through the size of seeks slice. - pos int - ascending bool -} - -type location struct { - r TSMFile - entry IndexEntry - - readMin, readMax int64 -} - -func (l *location) read() bool { - return l.readMin <= l.entry.MinTime && l.readMax >= l.entry.MaxTime -} - -func (l *location) markRead(min, max int64) { - if min < l.readMin { - l.readMin = min - } - - if max > l.readMax { - l.readMax = max - } -} - -type descLocations []*location - -// Sort methods -func (a descLocations) Len() int { return len(a) } -func (a descLocations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a descLocations) Less(i, j int) bool { - if a[i].entry.OverlapsTimeRange(a[j].entry.MinTime, a[j].entry.MaxTime) { - return a[i].r.Path() < a[j].r.Path() - } - return a[i].entry.MaxTime < a[j].entry.MaxTime -} - -type ascLocations []*location - -// Sort methods -func (a ascLocations) Len() int { return len(a) } -func (a ascLocations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a ascLocations) Less(i, j int) bool { - if a[i].entry.OverlapsTimeRange(a[j].entry.MinTime, a[j].entry.MaxTime) { - return a[i].r.Path() < a[j].r.Path() - } - return a[i].entry.MinTime < a[j].entry.MinTime -} - -// newKeyCursor returns a new instance of KeyCursor. -// This function assumes the read-lock has been taken. -func newKeyCursor(ctx context.Context, fs *FileStore, key []byte, t int64, ascending bool) *KeyCursor { - c := &KeyCursor{ - key: key, - seeks: fs.locations(key, t, ascending), - ctx: ctx, - col: metrics.GroupFromContext(ctx), - ascending: ascending, - } - - if ascending { - sort.Sort(ascLocations(c.seeks)) - } else { - sort.Sort(descLocations(c.seeks)) - } - - // Determine the distinct set of TSM files in use and mark then as in-use - for _, f := range c.seeks { - f.r.Ref() - } - - c.seek(t) - return c -} - -// Close removes all references on the cursor. -func (c *KeyCursor) Close() { - // Remove all of our in-use references since we're done - for _, f := range c.seeks { - f.r.Unref() - } - - c.buf = nil - c.seeks = nil - c.current = nil -} - -// seek positions the cursor at the given time. -func (c *KeyCursor) seek(t int64) { - if len(c.seeks) == 0 { - return - } - c.current = nil - - if c.ascending { - c.seekAscending(t) - } else { - c.seekDescending(t) - } -} - -func (c *KeyCursor) seekAscending(t int64) { - for i, e := range c.seeks { - if t < e.entry.MinTime || e.entry.Contains(t) { - // Record the position of the first block matching our seek time - if len(c.current) == 0 { - c.pos = i - } - - c.current = append(c.current, e) - } - } -} - -func (c *KeyCursor) seekDescending(t int64) { - for i := len(c.seeks) - 1; i >= 0; i-- { - e := c.seeks[i] - if t > e.entry.MaxTime || e.entry.Contains(t) { - // Record the position of the first block matching our seek time - if len(c.current) == 0 { - c.pos = i - } - c.current = append(c.current, e) - } - } -} - -// Next moves the cursor to the next position. -// Data should be read by the ReadBlock functions. -func (c *KeyCursor) Next() { - if len(c.current) == 0 { - return - } - // Do we still have unread values in the current block - if !c.current[0].read() { - return - } - c.current = c.current[:0] - if c.ascending { - c.nextAscending() - } else { - c.nextDescending() - } -} - -func (c *KeyCursor) nextAscending() { - for { - c.pos++ - if c.pos >= len(c.seeks) { - return - } else if !c.seeks[c.pos].read() { - break - } - } - - // Append the first matching block - if len(c.current) == 0 { - c.current = append(c.current, nil) - } else { - c.current = c.current[:1] - } - c.current[0] = c.seeks[c.pos] - - // If we have ovelapping blocks, append all their values so we can dedup - for i := c.pos + 1; i < len(c.seeks); i++ { - if c.seeks[i].read() { - continue - } - - c.current = append(c.current, c.seeks[i]) - } -} - -func (c *KeyCursor) nextDescending() { - for { - c.pos-- - if c.pos < 0 { - return - } else if !c.seeks[c.pos].read() { - break - } - } - - // Append the first matching block - if len(c.current) == 0 { - c.current = make([]*location, 1) - } else { - c.current = c.current[:1] - } - c.current[0] = c.seeks[c.pos] - - // If we have ovelapping blocks, append all their values so we can dedup - for i := c.pos; i >= 0; i-- { - if c.seeks[i].read() { - continue - } - c.current = append(c.current, c.seeks[i]) - } -} - -type purger struct { - mu sync.RWMutex - fileStore *FileStore - files map[string]TSMFile - running bool - - logger *zap.Logger -} - -func (p *purger) add(files []TSMFile) { - p.mu.Lock() - for _, f := range files { - p.files[f.Path()] = f - } - p.mu.Unlock() - p.purge() -} - -func (p *purger) purge() { - p.mu.Lock() - if p.running { - p.mu.Unlock() - return - } - p.running = true - p.mu.Unlock() - - go func() { - for { - p.mu.Lock() - for k, v := range p.files { - // In order to ensure that there are no races with this (file held externally calls Ref - // after we check InUse), we need to maintain the invariant that every handle to a file - // is handed out in use (Ref'd), and handlers only ever relinquish the file once (call Unref - // exactly once, and never use it again). InUse is only valid during a write lock, since - // we allow calls to Ref and Unref under the read lock and no lock at all respectively. - if !v.InUse() { - if err := v.Close(); err != nil { - p.logger.Info("Purge: close file", zap.Error(err)) - continue - } - - if err := v.Remove(); err != nil { - p.logger.Info("Purge: remove file", zap.Error(err)) - continue - } - delete(p.files, k) - } - } - - if len(p.files) == 0 { - p.running = false - p.mu.Unlock() - return - } - - p.mu.Unlock() - time.Sleep(time.Second) - } - }() -} - -type tsmReaders []TSMFile - -func (a tsmReaders) Len() int { return len(a) } -func (a tsmReaders) Less(i, j int) bool { return a[i].Path() < a[j].Path() } -func (a tsmReaders) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/tsdb/engine/tsm1/file_store_array.gen.go b/tsdb/engine/tsm1/file_store_array.gen.go deleted file mode 100644 index c6c81afecc1..00000000000 --- a/tsdb/engine/tsm1/file_store_array.gen.go +++ /dev/null @@ -1,927 +0,0 @@ -// Code generated by file_store.gen.go.tmpl. DO NOT EDIT. - -package tsm1 - -import ( - "github.com/influxdata/influxdb/v2/tsdb" -) - -// ReadFloatArrayBlock reads the next block as a set of float values. -func (c *KeyCursor) ReadFloatArrayBlock(values *tsdb.FloatArray) (*tsdb.FloatArray, error) { -LOOP: - // No matching blocks to decode - if len(c.current) == 0 { - values.Timestamps = values.Timestamps[:0] - values.Values = values.Values[:0] - return values, nil - } - - // First block is the oldest block containing the points we're searching for. - first := c.current[0] - err := first.r.ReadFloatArrayBlockAt(&first.entry, values) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(floatBlocksDecodedCounter).Add(1) - c.col.GetCounter(floatBlocksSizeCounter).Add(int64(first.entry.Size)) - } - - // Remove values we already read - values.Exclude(first.readMin, first.readMax) - - // Remove any tombstones - tombstones := first.r.TombstoneRange(c.key) - excludeTombstonesFloatArray(tombstones, values) - // If there are no values in this first block (all tombstoned or previously read) and - // we have more potential blocks too search. Try again. - if values.Len() == 0 && len(c.current) > 0 { - c.current = c.current[1:] - goto LOOP - } - - // Only one block with this key and time range so return it - if len(c.current) == 1 { - if values.Len() > 0 { - first.markRead(values.MinTime(), values.MaxTime()) - } - return values, nil - } - - // Use the current block time range as our overlapping window - minT, maxT := first.readMin, first.readMax - if values.Len() > 0 { - minT, maxT = values.MinTime(), values.MaxTime() - } - if c.ascending { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the min time range to ensure values are returned in ascending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MinTime < minT && !cur.read() { - minT = cur.entry.MinTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MaxTime > maxT { - maxT = cur.entry.MaxTime - } - values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - v := &tsdb.FloatArray{} - err := cur.r.ReadFloatArrayBlockAt(&cur.entry, v) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(floatBlocksDecodedCounter).Add(1) - c.col.GetCounter(floatBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - excludeTombstonesFloatArray(tombstones, v) - - // Remove values we already read - v.Exclude(cur.readMin, cur.readMax) - - if v.Len() > 0 { - // Only use values in the overlapping window - v.Include(minT, maxT) - // Merge the remaining values with the existing - values.Merge(v) - } - cur.markRead(minT, maxT) - } - - } else { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the max time range to ensure values are returned in descending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MaxTime > maxT && !cur.read() { - maxT = cur.entry.MaxTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MinTime < minT { - minT = cur.entry.MinTime - } - values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - v := &tsdb.FloatArray{} - err := cur.r.ReadFloatArrayBlockAt(&cur.entry, v) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(floatBlocksDecodedCounter).Add(1) - c.col.GetCounter(floatBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - excludeTombstonesFloatArray(tombstones, v) - - // Remove values we already read - v.Exclude(cur.readMin, cur.readMax) - - // If the block we decoded should have all of it's values included, mark it as read so we - // don't use it again. - if v.Len() > 0 { - v.Include(minT, maxT) - // Merge the remaining values with the existing - v.Merge(values) - *values = *v - } - cur.markRead(minT, maxT) - } - } - - first.markRead(minT, maxT) - - return values, err -} - -func excludeTombstonesFloatArray(t []TimeRange, values *tsdb.FloatArray) { - for i := range t { - values.Exclude(t[i].Min, t[i].Max) - } -} - -// ReadIntegerArrayBlock reads the next block as a set of integer values. -func (c *KeyCursor) ReadIntegerArrayBlock(values *tsdb.IntegerArray) (*tsdb.IntegerArray, error) { -LOOP: - // No matching blocks to decode - if len(c.current) == 0 { - values.Timestamps = values.Timestamps[:0] - values.Values = values.Values[:0] - return values, nil - } - - // First block is the oldest block containing the points we're searching for. - first := c.current[0] - err := first.r.ReadIntegerArrayBlockAt(&first.entry, values) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(integerBlocksDecodedCounter).Add(1) - c.col.GetCounter(integerBlocksSizeCounter).Add(int64(first.entry.Size)) - } - - // Remove values we already read - values.Exclude(first.readMin, first.readMax) - - // Remove any tombstones - tombstones := first.r.TombstoneRange(c.key) - excludeTombstonesIntegerArray(tombstones, values) - // If there are no values in this first block (all tombstoned or previously read) and - // we have more potential blocks too search. Try again. - if values.Len() == 0 && len(c.current) > 0 { - c.current = c.current[1:] - goto LOOP - } - - // Only one block with this key and time range so return it - if len(c.current) == 1 { - if values.Len() > 0 { - first.markRead(values.MinTime(), values.MaxTime()) - } - return values, nil - } - - // Use the current block time range as our overlapping window - minT, maxT := first.readMin, first.readMax - if values.Len() > 0 { - minT, maxT = values.MinTime(), values.MaxTime() - } - if c.ascending { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the min time range to ensure values are returned in ascending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MinTime < minT && !cur.read() { - minT = cur.entry.MinTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MaxTime > maxT { - maxT = cur.entry.MaxTime - } - values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - v := &tsdb.IntegerArray{} - err := cur.r.ReadIntegerArrayBlockAt(&cur.entry, v) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(integerBlocksDecodedCounter).Add(1) - c.col.GetCounter(integerBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - excludeTombstonesIntegerArray(tombstones, v) - - // Remove values we already read - v.Exclude(cur.readMin, cur.readMax) - - if v.Len() > 0 { - // Only use values in the overlapping window - v.Include(minT, maxT) - // Merge the remaining values with the existing - values.Merge(v) - } - cur.markRead(minT, maxT) - } - - } else { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the max time range to ensure values are returned in descending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MaxTime > maxT && !cur.read() { - maxT = cur.entry.MaxTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MinTime < minT { - minT = cur.entry.MinTime - } - values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - v := &tsdb.IntegerArray{} - err := cur.r.ReadIntegerArrayBlockAt(&cur.entry, v) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(integerBlocksDecodedCounter).Add(1) - c.col.GetCounter(integerBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - excludeTombstonesIntegerArray(tombstones, v) - - // Remove values we already read - v.Exclude(cur.readMin, cur.readMax) - - // If the block we decoded should have all of it's values included, mark it as read so we - // don't use it again. - if v.Len() > 0 { - v.Include(minT, maxT) - // Merge the remaining values with the existing - v.Merge(values) - *values = *v - } - cur.markRead(minT, maxT) - } - } - - first.markRead(minT, maxT) - - return values, err -} - -func excludeTombstonesIntegerArray(t []TimeRange, values *tsdb.IntegerArray) { - for i := range t { - values.Exclude(t[i].Min, t[i].Max) - } -} - -// ReadUnsignedArrayBlock reads the next block as a set of unsigned values. -func (c *KeyCursor) ReadUnsignedArrayBlock(values *tsdb.UnsignedArray) (*tsdb.UnsignedArray, error) { -LOOP: - // No matching blocks to decode - if len(c.current) == 0 { - values.Timestamps = values.Timestamps[:0] - values.Values = values.Values[:0] - return values, nil - } - - // First block is the oldest block containing the points we're searching for. - first := c.current[0] - err := first.r.ReadUnsignedArrayBlockAt(&first.entry, values) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(unsignedBlocksDecodedCounter).Add(1) - c.col.GetCounter(unsignedBlocksSizeCounter).Add(int64(first.entry.Size)) - } - - // Remove values we already read - values.Exclude(first.readMin, first.readMax) - - // Remove any tombstones - tombstones := first.r.TombstoneRange(c.key) - excludeTombstonesUnsignedArray(tombstones, values) - // If there are no values in this first block (all tombstoned or previously read) and - // we have more potential blocks too search. Try again. - if values.Len() == 0 && len(c.current) > 0 { - c.current = c.current[1:] - goto LOOP - } - - // Only one block with this key and time range so return it - if len(c.current) == 1 { - if values.Len() > 0 { - first.markRead(values.MinTime(), values.MaxTime()) - } - return values, nil - } - - // Use the current block time range as our overlapping window - minT, maxT := first.readMin, first.readMax - if values.Len() > 0 { - minT, maxT = values.MinTime(), values.MaxTime() - } - if c.ascending { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the min time range to ensure values are returned in ascending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MinTime < minT && !cur.read() { - minT = cur.entry.MinTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MaxTime > maxT { - maxT = cur.entry.MaxTime - } - values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - v := &tsdb.UnsignedArray{} - err := cur.r.ReadUnsignedArrayBlockAt(&cur.entry, v) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(unsignedBlocksDecodedCounter).Add(1) - c.col.GetCounter(unsignedBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - excludeTombstonesUnsignedArray(tombstones, v) - - // Remove values we already read - v.Exclude(cur.readMin, cur.readMax) - - if v.Len() > 0 { - // Only use values in the overlapping window - v.Include(minT, maxT) - // Merge the remaining values with the existing - values.Merge(v) - } - cur.markRead(minT, maxT) - } - - } else { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the max time range to ensure values are returned in descending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MaxTime > maxT && !cur.read() { - maxT = cur.entry.MaxTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MinTime < minT { - minT = cur.entry.MinTime - } - values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - v := &tsdb.UnsignedArray{} - err := cur.r.ReadUnsignedArrayBlockAt(&cur.entry, v) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(unsignedBlocksDecodedCounter).Add(1) - c.col.GetCounter(unsignedBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - excludeTombstonesUnsignedArray(tombstones, v) - - // Remove values we already read - v.Exclude(cur.readMin, cur.readMax) - - // If the block we decoded should have all of it's values included, mark it as read so we - // don't use it again. - if v.Len() > 0 { - v.Include(minT, maxT) - // Merge the remaining values with the existing - v.Merge(values) - *values = *v - } - cur.markRead(minT, maxT) - } - } - - first.markRead(minT, maxT) - - return values, err -} - -func excludeTombstonesUnsignedArray(t []TimeRange, values *tsdb.UnsignedArray) { - for i := range t { - values.Exclude(t[i].Min, t[i].Max) - } -} - -// ReadStringArrayBlock reads the next block as a set of string values. -func (c *KeyCursor) ReadStringArrayBlock(values *tsdb.StringArray) (*tsdb.StringArray, error) { -LOOP: - // No matching blocks to decode - if len(c.current) == 0 { - values.Timestamps = values.Timestamps[:0] - values.Values = values.Values[:0] - return values, nil - } - - // First block is the oldest block containing the points we're searching for. - first := c.current[0] - err := first.r.ReadStringArrayBlockAt(&first.entry, values) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(stringBlocksDecodedCounter).Add(1) - c.col.GetCounter(stringBlocksSizeCounter).Add(int64(first.entry.Size)) - } - - // Remove values we already read - values.Exclude(first.readMin, first.readMax) - - // Remove any tombstones - tombstones := first.r.TombstoneRange(c.key) - excludeTombstonesStringArray(tombstones, values) - // If there are no values in this first block (all tombstoned or previously read) and - // we have more potential blocks too search. Try again. - if values.Len() == 0 && len(c.current) > 0 { - c.current = c.current[1:] - goto LOOP - } - - // Only one block with this key and time range so return it - if len(c.current) == 1 { - if values.Len() > 0 { - first.markRead(values.MinTime(), values.MaxTime()) - } - return values, nil - } - - // Use the current block time range as our overlapping window - minT, maxT := first.readMin, first.readMax - if values.Len() > 0 { - minT, maxT = values.MinTime(), values.MaxTime() - } - if c.ascending { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the min time range to ensure values are returned in ascending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MinTime < minT && !cur.read() { - minT = cur.entry.MinTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MaxTime > maxT { - maxT = cur.entry.MaxTime - } - values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - v := &tsdb.StringArray{} - err := cur.r.ReadStringArrayBlockAt(&cur.entry, v) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(stringBlocksDecodedCounter).Add(1) - c.col.GetCounter(stringBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - excludeTombstonesStringArray(tombstones, v) - - // Remove values we already read - v.Exclude(cur.readMin, cur.readMax) - - if v.Len() > 0 { - // Only use values in the overlapping window - v.Include(minT, maxT) - // Merge the remaining values with the existing - values.Merge(v) - } - cur.markRead(minT, maxT) - } - - } else { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the max time range to ensure values are returned in descending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MaxTime > maxT && !cur.read() { - maxT = cur.entry.MaxTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MinTime < minT { - minT = cur.entry.MinTime - } - values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - v := &tsdb.StringArray{} - err := cur.r.ReadStringArrayBlockAt(&cur.entry, v) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(stringBlocksDecodedCounter).Add(1) - c.col.GetCounter(stringBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - excludeTombstonesStringArray(tombstones, v) - - // Remove values we already read - v.Exclude(cur.readMin, cur.readMax) - - // If the block we decoded should have all of it's values included, mark it as read so we - // don't use it again. - if v.Len() > 0 { - v.Include(minT, maxT) - // Merge the remaining values with the existing - v.Merge(values) - *values = *v - } - cur.markRead(minT, maxT) - } - } - - first.markRead(minT, maxT) - - return values, err -} - -func excludeTombstonesStringArray(t []TimeRange, values *tsdb.StringArray) { - for i := range t { - values.Exclude(t[i].Min, t[i].Max) - } -} - -// ReadBooleanArrayBlock reads the next block as a set of boolean values. -func (c *KeyCursor) ReadBooleanArrayBlock(values *tsdb.BooleanArray) (*tsdb.BooleanArray, error) { -LOOP: - // No matching blocks to decode - if len(c.current) == 0 { - values.Timestamps = values.Timestamps[:0] - values.Values = values.Values[:0] - return values, nil - } - - // First block is the oldest block containing the points we're searching for. - first := c.current[0] - err := first.r.ReadBooleanArrayBlockAt(&first.entry, values) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(booleanBlocksDecodedCounter).Add(1) - c.col.GetCounter(booleanBlocksSizeCounter).Add(int64(first.entry.Size)) - } - - // Remove values we already read - values.Exclude(first.readMin, first.readMax) - - // Remove any tombstones - tombstones := first.r.TombstoneRange(c.key) - excludeTombstonesBooleanArray(tombstones, values) - // If there are no values in this first block (all tombstoned or previously read) and - // we have more potential blocks too search. Try again. - if values.Len() == 0 && len(c.current) > 0 { - c.current = c.current[1:] - goto LOOP - } - - // Only one block with this key and time range so return it - if len(c.current) == 1 { - if values.Len() > 0 { - first.markRead(values.MinTime(), values.MaxTime()) - } - return values, nil - } - - // Use the current block time range as our overlapping window - minT, maxT := first.readMin, first.readMax - if values.Len() > 0 { - minT, maxT = values.MinTime(), values.MaxTime() - } - if c.ascending { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the min time range to ensure values are returned in ascending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MinTime < minT && !cur.read() { - minT = cur.entry.MinTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MaxTime > maxT { - maxT = cur.entry.MaxTime - } - values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - v := &tsdb.BooleanArray{} - err := cur.r.ReadBooleanArrayBlockAt(&cur.entry, v) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(booleanBlocksDecodedCounter).Add(1) - c.col.GetCounter(booleanBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - excludeTombstonesBooleanArray(tombstones, v) - - // Remove values we already read - v.Exclude(cur.readMin, cur.readMax) - - if v.Len() > 0 { - // Only use values in the overlapping window - v.Include(minT, maxT) - // Merge the remaining values with the existing - values.Merge(v) - } - cur.markRead(minT, maxT) - } - - } else { - // Blocks are ordered by generation, we may have values in the past in later blocks, if so, - // expand the window to include the max time range to ensure values are returned in descending - // order - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.MaxTime > maxT && !cur.read() { - maxT = cur.entry.MaxTime - } - } - - // Find first block that overlaps our window - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - if cur.entry.OverlapsTimeRange(minT, maxT) && !cur.read() { - // Shrink our window so it's the intersection of the first overlapping block and the - // first block. We do this to minimize the region that overlaps and needs to - // be merged. - if cur.entry.MinTime < minT { - minT = cur.entry.MinTime - } - values.Include(minT, maxT) - break - } - } - - // Search the remaining blocks that overlap our window and append their values so we can - // merge them. - for i := 1; i < len(c.current); i++ { - cur := c.current[i] - // Skip this block if it doesn't contain points we looking for or they have already been read - if !cur.entry.OverlapsTimeRange(minT, maxT) || cur.read() { - cur.markRead(minT, maxT) - continue - } - - v := &tsdb.BooleanArray{} - err := cur.r.ReadBooleanArrayBlockAt(&cur.entry, v) - if err != nil { - return nil, err - } - if c.col != nil { - c.col.GetCounter(booleanBlocksDecodedCounter).Add(1) - c.col.GetCounter(booleanBlocksSizeCounter).Add(int64(cur.entry.Size)) - } - tombstones := cur.r.TombstoneRange(c.key) - // Remove any tombstoned values - excludeTombstonesBooleanArray(tombstones, v) - - // Remove values we already read - v.Exclude(cur.readMin, cur.readMax) - - // If the block we decoded should have all of it's values included, mark it as read so we - // don't use it again. - if v.Len() > 0 { - v.Include(minT, maxT) - // Merge the remaining values with the existing - v.Merge(values) - *values = *v - } - cur.markRead(minT, maxT) - } - } - - first.markRead(minT, maxT) - - return values, err -} - -func excludeTombstonesBooleanArray(t []TimeRange, values *tsdb.BooleanArray) { - for i := range t { - values.Exclude(t[i].Min, t[i].Max) - } -} diff --git a/tsdb/engine/tsm1/file_store_array_test.go b/tsdb/engine/tsm1/file_store_array_test.go deleted file mode 100644 index 30c2b7fd932..00000000000 --- a/tsdb/engine/tsm1/file_store_array_test.go +++ /dev/null @@ -1,369 +0,0 @@ -package tsm1_test - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" -) - -func TestFileStore_Array(t *testing.T) { - makeFile := func(d ...interface{}) keyValues { - t.Helper() - if len(d)&1 == 1 { - panic("input should be even") - } - - vals := make([]tsm1.Value, len(d)/2) - for i := 0; i < len(d); i += 2 { - vals[i/2] = tsm1.NewFloatValue(int64(d[i].(int)), d[i+1].(float64)) - } - return keyValues{key: "cpu", values: vals} - } - - // sel selects files and values from the keyValues slice - // and used to build the expected output. - type sel struct { - // f is the index of the tsm file - f int - // i is the index of the value to select from the file - i int - } - - // del represents a file delete in order to generate tombstones - type del struct { - // f is the index of the tsm file to perform a delete. - // Specifying -1 will perform a delete over the entire FileStore. - f int - min, max int64 - } - - type read []sel - - cases := []struct { - name string - data []keyValues - time int64 - asc bool - deletes []del - reads []read - }{ - { - name: "SeekToAsc_FromStart", - - data: []keyValues{ - makeFile(0, 1.0), - makeFile(1, 2.0), - makeFile(2, 3.0), - }, - time: 0, - asc: true, - reads: []read{ - []sel{{0, 0}}, - }, - }, - { - name: "SeekToAsc_BeforeStart", - - data: []keyValues{ - makeFile(1, 1.0), - makeFile(2, 2.0), - makeFile(3, 3.0), - }, - time: 0, - asc: true, - reads: []read{ - []sel{{0, 0}}, - }, - }, - { - // Tests that seeking and reading all blocks that contain overlapping points does - // not skip any blocks. - name: "SeekToAsc_BeforeStart_OverlapFloat", - - data: []keyValues{ - makeFile(0, 0.0, 1, 1.0), - makeFile(2, 2.0), - makeFile(3, 3.0), - makeFile(0, 4.0, 2, 7.0), - }, - time: 0, - asc: true, - reads: []read{ - []sel{{3, 0}, {0, 1}, {3, 1}}, - []sel{{2, 0}}, - }, - }, - { - // Tests that blocks with a lower min time in later files are not returned - // more than once causing unsorted results. - name: "SeekToAsc_OverlapMinFloat", - - data: []keyValues{ - makeFile(1, 1.0, 3, 3.0), - makeFile(2, 2.0, 4, 4.0), - makeFile(0, 0.0, 1, 1.1), - makeFile(2, 2.2), - }, - time: 0, - asc: true, - reads: []read{ - []sel{{2, 0}, {2, 1}, {3, 0}, {0, 1}}, - []sel{{1, 1}}, - []sel{}, - }, - }, - { - name: "SeekToAsc_Middle", - - data: []keyValues{ - makeFile(1, 1.0, 2, 2.0, 3, 3.0), - makeFile(4, 4.0), - }, - time: 3, - asc: true, - reads: []read{ - []sel{{0, 2}}, - []sel{{1, 0}}, - }, - }, - { - name: "SeekToAsc_End", - - data: []keyValues{ - makeFile(0, 1.0), - makeFile(1, 2.0), - makeFile(2, 3.0), - }, - time: 2, - asc: true, - reads: []read{ - []sel{{2, 0}}, - }, - }, - - // descending cursor tests - { - name: "SeekToDesc_FromStart", - - data: []keyValues{ - makeFile(0, 1.0), - makeFile(1, 2.0), - makeFile(2, 3.0), - }, - time: 0, - asc: false, - reads: []read{ - []sel{{0, 0}}, - }, - }, - { - name: "SeekToDesc_Duplicate", - - data: []keyValues{ - makeFile(0, 4.0), - makeFile(0, 1.0), - makeFile(2, 2.0), - makeFile(2, 3.0), - }, - time: 2, - asc: false, - reads: []read{ - []sel{{3, 0}}, - []sel{{1, 0}}, - }, - }, - { - name: "SeekToDesc_OverlapMaxFloat", - - data: []keyValues{ - makeFile(1, 1.0, 3, 3.0), - makeFile(2, 2.0, 4, 4.0), - makeFile(0, 0.0, 1, 1.1), - makeFile(2, 2.2), - }, - time: 5, - asc: false, - reads: []read{ - []sel{{3, 0}, {0, 1}, {1, 1}}, - []sel{{2, 0}, {2, 1}}, - }, - }, - { - name: "SeekToDesc_AfterEnd", - - data: []keyValues{ - makeFile(1, 1.0), - makeFile(2, 2.0), - makeFile(3, 3.0), - }, - time: 4, - asc: false, - reads: []read{ - []sel{{2, 0}}, - }, - }, - { - name: "SeekToDesc_AfterEnd_OverlapFloat", - - data: []keyValues{ - makeFile(8, 0.0, 9, 1.0), - makeFile(2, 2.0), - makeFile(3, 3.0), - makeFile(3, 4.0, 7, 7.0), - }, - time: 10, - asc: false, - reads: []read{ - []sel{{0, 0}, {0, 1}}, - []sel{{3, 0}, {3, 1}}, - []sel{{1, 0}}, - []sel{}, - }, - }, - { - name: "SeekToDesc_Middle", - - data: []keyValues{ - makeFile(1, 1.0), - makeFile(2, 2.0, 3, 3.0, 4, 4.0), - }, - time: 3, - asc: false, - reads: []read{ - []sel{{1, 0}, {1, 1}}, - }, - }, - { - name: "SeekToDesc_End", - - data: []keyValues{ - makeFile(0, 1.0), - makeFile(1, 2.0), - makeFile(2, 3.0), - }, - time: 2, - asc: false, - reads: []read{ - []sel{{2, 0}}, - }, - }, - - // tombstone tests - { - name: "TombstoneRange", - - data: []keyValues{ - makeFile(0, 1.0), - makeFile(1, 2.0), - makeFile(2, 3.0), - }, - time: 0, - asc: true, - deletes: []del{ - {-1, 1, 1}, - }, - reads: []read{ - []sel{{0, 0}}, - []sel{{2, 0}}, - []sel{}, - }, - }, - { - name: "TombstoneRange_PartialFirst", - - data: []keyValues{ - makeFile(0, 0.0, 1, 1.0), - makeFile(2, 2.0), - }, - time: 0, - asc: true, - deletes: []del{ - {0, 1, 3}, - }, - reads: []read{ - []sel{{0, 0}}, - []sel{{1, 0}}, - []sel{}, - }, - }, - { - name: "TombstoneRange_PartialFloat", - - data: []keyValues{ - makeFile(0, 0.0, 1, 1.0, 2, 2.0), - }, - time: 0, - asc: true, - deletes: []del{ - {-1, 1, 1}, - }, - reads: []read{ - []sel{{0, 0}, {0, 2}}, - []sel{}, - }, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - files, err := newFiles(t, dir, tc.data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - for _, del := range tc.deletes { - if del.f > -1 { - // Delete part of the block in the first file. - r := MustOpenTSMReader(files[del.f]) - r.DeleteRange([][]byte{[]byte("cpu")}, del.min, del.max) - r.Close() - } - } - - fs.Replace(nil, files) - - for _, del := range tc.deletes { - if del.f == -1 { - if err := fs.DeleteRange([][]byte{[]byte("cpu")}, del.min, del.max); err != nil { - t.Fatalf("unexpected error delete range: %v", err) - } - } - } - - buf := tsdb.NewFloatArrayLen(1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), tc.time, tc.asc) - t.Cleanup(c.Close) - - for i, read := range tc.reads { - // Search for an entry that exists in the second file - values, err := c.ReadFloatArrayBlock(buf) - if err != nil { - t.Fatalf("read %d failed: unexpected error reading values: %v", i, err) - } - - exp := &tsdb.FloatArray{} - for _, s := range read { - vals := tc.data[s.f].values - exp.Timestamps = append(exp.Timestamps, vals[s.i].UnixNano()) - exp.Values = append(exp.Values, vals[s.i].Value().(float64)) - } - - if len(read) == 0 { - exp = tsdb.NewFloatArrayLen(0) - } - - if !cmp.Equal(values, exp) { - t.Fatalf("read %d failed: unexpected values -got/+exp\n%s", i, cmp.Diff(values, exp)) - } - - c.Next() - } - }) - } -} diff --git a/tsdb/engine/tsm1/file_store_key_iterator.go b/tsdb/engine/tsm1/file_store_key_iterator.go deleted file mode 100644 index c3613b654c8..00000000000 --- a/tsdb/engine/tsm1/file_store_key_iterator.go +++ /dev/null @@ -1,112 +0,0 @@ -package tsm1 - -import ( - "bytes" - "container/heap" -) - -type keyIterator struct { - f TSMFile - c int // current key index - n int // key count - key []byte - typ byte -} - -func newKeyIterator(f TSMFile, seek []byte) *keyIterator { - c, n := 0, f.KeyCount() - if len(seek) > 0 { - c = f.Seek(seek) - } - - if c >= n { - return nil - } - - k := &keyIterator{f: f, c: c, n: n} - k.next() - - return k -} - -func (k *keyIterator) next() bool { - if k.c < k.n { - k.key, k.typ = k.f.KeyAt(k.c) - k.c++ - return true - } - return false -} - -type mergeKeyIterator struct { - itrs keyIterators - key []byte - typ byte -} - -func newMergeKeyIterator(files []TSMFile, seek []byte) *mergeKeyIterator { - m := &mergeKeyIterator{} - itrs := make(keyIterators, 0, len(files)) - for _, f := range files { - if ki := newKeyIterator(f, seek); ki != nil { - itrs = append(itrs, ki) - } - } - m.itrs = itrs - heap.Init(&m.itrs) - - return m -} - -func (m *mergeKeyIterator) Next() bool { - merging := len(m.itrs) > 1 - -RETRY: - if len(m.itrs) == 0 { - return false - } - - key, typ := m.itrs[0].key, m.itrs[0].typ - more := m.itrs[0].next() - - switch { - case len(m.itrs) > 1: - if !more { - // remove iterator from heap - heap.Pop(&m.itrs) - } else { - heap.Fix(&m.itrs, 0) - } - - case len(m.itrs) == 1: - if !more { - m.itrs = nil - } - } - - if merging && bytes.Equal(m.key, key) { - // same as previous key, keep iterating - goto RETRY - } - - m.key, m.typ = key, typ - - return true -} - -func (m *mergeKeyIterator) Read() ([]byte, byte) { return m.key, m.typ } - -type keyIterators []*keyIterator - -func (k keyIterators) Len() int { return len(k) } -func (k keyIterators) Less(i, j int) bool { return bytes.Compare(k[i].key, k[j].key) == -1 } -func (k keyIterators) Swap(i, j int) { k[i], k[j] = k[j], k[i] } -func (k *keyIterators) Push(x interface{}) { *k = append(*k, x.(*keyIterator)) } - -func (k *keyIterators) Pop() interface{} { - old := *k - n := len(old) - x := old[n-1] - *k = old[:n-1] - return x -} diff --git a/tsdb/engine/tsm1/file_store_key_iterator_test.go b/tsdb/engine/tsm1/file_store_key_iterator_test.go deleted file mode 100644 index cbb3b33ab34..00000000000 --- a/tsdb/engine/tsm1/file_store_key_iterator_test.go +++ /dev/null @@ -1,221 +0,0 @@ -package tsm1 - -import ( - "sort" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/tsdb" -) - -func TestNewMergeKeyIterator(t *testing.T) { - cases := []struct { - name string - seek string - files []TSMFile - - exp []string - }{ - { - name: "mixed", - files: newTSMFiles( - []string{"aaaa", "bbbb", "cccc", "dddd"}, - []string{"aaaa", "cccc", "dddd"}, - []string{"eeee", "ffff", "gggg"}, - []string{"aaaa"}, - []string{"dddd"}, - ), - exp: []string{"aaaa", "bbbb", "cccc", "dddd", "eeee", "ffff", "gggg"}, - }, - - { - name: "similar keys", - files: newTSMFiles( - []string{"a", "aaa"}, - []string{"aa", "aaaa"}, - ), - exp: []string{"a", "aa", "aaa", "aaaa"}, - }, - - { - name: "seek skips some files", - seek: "eeee", - files: newTSMFiles( - []string{"aaaa", "bbbb", "cccc", "dddd"}, - []string{"aaaa", "cccc", "dddd"}, - []string{"eeee", "ffff", "gggg"}, - []string{"aaaa"}, - []string{"dddd"}, - ), - exp: []string{"eeee", "ffff", "gggg"}, - }, - - { - name: "keys same across all files", - files: newTSMFiles( - []string{"aaaa", "bbbb", "cccc", "dddd"}, - []string{"aaaa", "bbbb", "cccc", "dddd"}, - []string{"aaaa", "bbbb", "cccc", "dddd"}, - ), - exp: []string{"aaaa", "bbbb", "cccc", "dddd"}, - }, - - { - name: "keys same across all files with extra", - files: newTSMFiles( - []string{"aaaa", "bbbb", "cccc", "dddd"}, - []string{"aaaa", "bbbb", "cccc", "dddd"}, - []string{"aaaa", "bbbb", "cccc", "dddd", "eeee"}, - ), - exp: []string{"aaaa", "bbbb", "cccc", "dddd", "eeee"}, - }, - - { - name: "seek skips all files", - seek: "eeee", - files: newTSMFiles( - []string{"aaaa", "bbbb", "cccc", "dddd"}, - []string{"aaaa", "bbbb", "cccc", "dddd"}, - []string{"aaaa", "bbbb", "cccc", "dddd"}, - ), - exp: nil, - }, - - { - name: "keys sequential across all files", - files: newTSMFiles( - []string{"a", "b", "c", "d"}, - []string{"e", "f", "g", "h"}, - []string{"i", "j", "k", "l"}, - ), - exp: []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"}, - }, - - { - name: "seek past one file", - seek: "e", - files: newTSMFiles( - []string{"a", "b", "c", "d"}, - []string{"e", "f", "g", "h"}, - []string{"i", "j", "k", "l"}, - ), - exp: []string{"e", "f", "g", "h", "i", "j", "k", "l"}, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - ki := newMergeKeyIterator(tc.files, []byte(tc.seek)) - var act []string - for ki.Next() { - key, _ := ki.Read() - act = append(act, string(key)) - } - if !cmp.Equal(tc.exp, act) { - t.Error(cmp.Diff(tc.exp, act)) - } - }) - } - -} - -func newTSMFiles(keys ...[]string) []TSMFile { - var files []TSMFile - for _, k := range keys { - files = append(files, newMockTSMFile(k...)) - } - return files -} - -type mockTSMFile struct { - keys []string -} - -func newMockTSMFile(keys ...string) *mockTSMFile { - sort.Strings(keys) - return &mockTSMFile{keys: keys} -} - -func (t *mockTSMFile) KeyCount() int { return len(t.keys) } - -func (t *mockTSMFile) Seek(key []byte) int { - k := string(key) - return sort.Search(len(t.keys), func(i int) bool { - return t.keys[i] >= k - }) -} - -func (t *mockTSMFile) KeyAt(idx int) ([]byte, byte) { - return []byte(t.keys[idx]), BlockFloat64 -} - -func (*mockTSMFile) Path() string { panic("implement me") } -func (*mockTSMFile) Read(key []byte, t int64) ([]Value, error) { panic("implement me") } -func (*mockTSMFile) ReadAt(entry *IndexEntry, values []Value) ([]Value, error) { panic("implement me") } -func (*mockTSMFile) Entries(key []byte) []IndexEntry { panic("implement me") } -func (*mockTSMFile) ReadEntries(key []byte, entries *[]IndexEntry) []IndexEntry { - panic("implement me") -} -func (*mockTSMFile) ContainsValue(key []byte, t int64) bool { panic("implement me") } -func (*mockTSMFile) Contains(key []byte) bool { panic("implement me") } -func (*mockTSMFile) OverlapsTimeRange(min, max int64) bool { panic("implement me") } -func (*mockTSMFile) OverlapsKeyRange(min, max []byte) bool { panic("implement me") } -func (*mockTSMFile) TimeRange() (int64, int64) { panic("implement me") } -func (*mockTSMFile) TombstoneRange(key []byte) []TimeRange { panic("implement me") } -func (*mockTSMFile) KeyRange() ([]byte, []byte) { panic("implement me") } -func (*mockTSMFile) Type(key []byte) (byte, error) { panic("implement me") } -func (*mockTSMFile) BatchDelete() BatchDeleter { panic("implement me") } -func (*mockTSMFile) Delete(keys [][]byte) error { panic("implement me") } -func (*mockTSMFile) DeleteRange(keys [][]byte, min, max int64) error { panic("implement me") } -func (*mockTSMFile) HasTombstones() bool { panic("implement me") } -func (*mockTSMFile) TombstoneStats() TombstoneStat { panic("implement me") } -func (*mockTSMFile) Close() error { panic("implement me") } -func (*mockTSMFile) Size() uint32 { panic("implement me") } -func (*mockTSMFile) Rename(path string) error { panic("implement me") } -func (*mockTSMFile) Remove() error { panic("implement me") } -func (*mockTSMFile) InUse() bool { panic("implement me") } -func (*mockTSMFile) Ref() { panic("implement me") } -func (*mockTSMFile) Unref() { panic("implement me") } -func (*mockTSMFile) Stats() FileStat { panic("implement me") } -func (*mockTSMFile) BlockIterator() *BlockIterator { panic("implement me") } -func (*mockTSMFile) Free() error { panic("implement me") } - -func (*mockTSMFile) ReadFloatBlockAt(*IndexEntry, *[]FloatValue) ([]FloatValue, error) { - panic("implement me") -} - -func (*mockTSMFile) ReadIntegerBlockAt(*IndexEntry, *[]IntegerValue) ([]IntegerValue, error) { - panic("implement me") -} - -func (*mockTSMFile) ReadUnsignedBlockAt(*IndexEntry, *[]UnsignedValue) ([]UnsignedValue, error) { - panic("implement me") -} - -func (*mockTSMFile) ReadStringBlockAt(*IndexEntry, *[]StringValue) ([]StringValue, error) { - panic("implement me") -} - -func (*mockTSMFile) ReadBooleanBlockAt(*IndexEntry, *[]BooleanValue) ([]BooleanValue, error) { - panic("implement me") -} - -func (*mockTSMFile) ReadFloatArrayBlockAt(*IndexEntry, *tsdb.FloatArray) error { - panic("implement me") -} - -func (*mockTSMFile) ReadIntegerArrayBlockAt(*IndexEntry, *tsdb.IntegerArray) error { - panic("implement me") -} - -func (*mockTSMFile) ReadUnsignedArrayBlockAt(*IndexEntry, *tsdb.UnsignedArray) error { - panic("implement me") -} - -func (*mockTSMFile) ReadStringArrayBlockAt(*IndexEntry, *tsdb.StringArray) error { - panic("implement me") -} - -func (*mockTSMFile) ReadBooleanArrayBlockAt(*IndexEntry, *tsdb.BooleanArray) error { - panic("implement me") -} diff --git a/tsdb/engine/tsm1/file_store_observer.go b/tsdb/engine/tsm1/file_store_observer.go deleted file mode 100644 index a39ee114a45..00000000000 --- a/tsdb/engine/tsm1/file_store_observer.go +++ /dev/null @@ -1,6 +0,0 @@ -package tsm1 - -type noFileStoreObserver struct{} - -func (noFileStoreObserver) FileFinishing(path string) error { return nil } -func (noFileStoreObserver) FileUnlinking(path string) error { return nil } diff --git a/tsdb/engine/tsm1/file_store_test.go b/tsdb/engine/tsm1/file_store_test.go deleted file mode 100644 index b09bf0b2788..00000000000 --- a/tsdb/engine/tsm1/file_store_test.go +++ /dev/null @@ -1,3007 +0,0 @@ -package tsm1_test - -import ( - "context" - "fmt" - "os" - "path/filepath" - "reflect" - "strings" - "sync/atomic" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "go.uber.org/zap/zaptest" -) - -func TestFileStore_Read(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, 2.0)}}, - keyValues{"mem", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - // Search for an entry that exists in the second file - values, err := fs.Read([]byte("cpu"), 1) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := data[1] - if got, exp := len(values), len(exp.values); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp.values { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } -} - -func TestFileStore_SeekToAsc_FromStart(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, 2.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 3.0)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - buf := make([]tsm1.FloatValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - // Search for an entry that exists in the second file - values, err := c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := data[0] - if got, exp := len(values), len(exp.values); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp.values { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } -} - -func TestFileStore_SeekToAsc_Duplicate(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 2.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 3.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 4.0)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - buf := make([]tsm1.FloatValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - // Search for an entry that exists in the second file - values, err := c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[1].values[0], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - // Check that calling Next will dedupe points - c.Next() - values, err = c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[3].values[0], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadFloatBlock(&buf) - if err != nil { - t.Fatal(err) - } - - exp = nil - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } -} - -func TestFileStore_SeekToAsc_BeforeStart(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, 1.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 2.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(3, 3.0)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - // Search for an entry that exists in the second file - buf := make([]tsm1.FloatValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - values, err := c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := data[0] - if got, exp := len(values), len(exp.values); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp.values { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } -} - -// Tests that seeking and reading all blocks that contain overlapping points does -// not skip any blocks. -func TestFileStore_SeekToAsc_BeforeStart_OverlapFloat(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 0.0), tsm1.NewValue(1, 1.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 2.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(3, 3.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 4.0), tsm1.NewValue(2, 7.0)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - // Search for an entry that exists in the second file - buf := make([]tsm1.FloatValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - values, err := c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[3].values[0], - data[0].values[1], - data[3].values[1], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[2].values[0], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } -} - -// Tests that seeking and reading all blocks that contain overlapping points does -// not skip any blocks. -func TestFileStore_SeekToAsc_BeforeStart_OverlapInteger(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, int64(0)), tsm1.NewValue(1, int64(1))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, int64(2))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(3, int64(3))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, int64(4)), tsm1.NewValue(2, int64(7))}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - // Search for an entry that exists in the second file - buf := make([]tsm1.IntegerValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - values, err := c.ReadIntegerBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[3].values[0], - data[0].values[1], - data[3].values[1], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadIntegerBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[2].values[0], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } -} - -// Tests that seeking and reading all blocks that contain overlapping points does -// not skip any blocks. -func TestFileStore_SeekToAsc_BeforeStart_OverlapUnsigned(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, uint64(0)), tsm1.NewValue(1, uint64(1))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, uint64(2))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(3, uint64(3))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, uint64(4)), tsm1.NewValue(2, uint64(7))}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - // Search for an entry that exists in the second file - buf := make([]tsm1.UnsignedValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - values, err := c.ReadUnsignedBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[3].values[0], - data[0].values[1], - data[3].values[1], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadUnsignedBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[2].values[0], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } -} - -// Tests that seeking and reading all blocks that contain overlapping points does -// not skip any blocks. -func TestFileStore_SeekToAsc_BeforeStart_OverlapBoolean(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, true), tsm1.NewValue(1, false)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, true)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(3, true)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, false), tsm1.NewValue(2, true)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - // Search for an entry that exists in the second file - buf := make([]tsm1.BooleanValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - values, err := c.ReadBooleanBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[3].values[0], - data[0].values[1], - data[3].values[1], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadBooleanBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[2].values[0], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } -} - -// Tests that seeking and reading all blocks that contain overlapping points does -// not skip any blocks. -func TestFileStore_SeekToAsc_BeforeStart_OverlapString(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, "zero"), tsm1.NewValue(1, "one")}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, "two")}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(3, "three")}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, "four"), tsm1.NewValue(2, "seven")}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - // Search for an entry that exists in the second file - buf := make([]tsm1.StringValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - values, err := c.ReadStringBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[3].values[0], - data[0].values[1], - data[3].values[1], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadStringBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[2].values[0], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } -} - -// Tests that blocks with a lower min time in later files are not returned -// more than once causing unsorted results -func TestFileStore_SeekToAsc_OverlapMinFloat(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, 1.0), tsm1.NewValue(3, 3.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 2.0), tsm1.NewValue(4, 4.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 0.0), tsm1.NewValue(1, 1.1)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 2.2)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - buf := make([]tsm1.FloatValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - // Search for an entry that exists in the second file - values, err := c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[2].values[0], - data[2].values[1], - data[3].values[0], - data[0].values[1], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - // Check that calling Next will dedupe points - c.Next() - values, err = c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[1].values[1], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadFloatBlock(&buf) - if err != nil { - t.Fatal(err) - } - - exp = nil - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } -} - -// Tests that blocks with a lower min time in later files are not returned -// more than once causing unsorted results -func TestFileStore_SeekToAsc_OverlapMinInteger(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, int64(1)), tsm1.NewValue(3, int64(3))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, int64(2)), tsm1.NewValue(4, int64(4))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, int64(0)), tsm1.NewValue(1, int64(10))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, int64(5))}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - buf := make([]tsm1.IntegerValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - // Search for an entry that exists in the second file - values, err := c.ReadIntegerBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[2].values[0], - data[2].values[1], - data[3].values[0], - data[0].values[1], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - // Check that calling Next will dedupe points - c.Next() - values, err = c.ReadIntegerBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[1].values[1], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadIntegerBlock(&buf) - if err != nil { - t.Fatal(err) - } - - exp = nil - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } -} - -// Tests that blocks with a lower min time in later files are not returned -// more than once causing unsorted results -func TestFileStore_SeekToAsc_OverlapMinUnsigned(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, uint64(1)), tsm1.NewValue(3, uint64(3))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, uint64(2)), tsm1.NewValue(4, uint64(4))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, uint64(0)), tsm1.NewValue(1, uint64(10))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, uint64(5))}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - buf := make([]tsm1.UnsignedValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - // Search for an entry that exists in the second file - values, err := c.ReadUnsignedBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[2].values[0], - data[2].values[1], - data[3].values[0], - data[0].values[1], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - // Check that calling Next will dedupe points - c.Next() - values, err = c.ReadUnsignedBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[1].values[1], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadUnsignedBlock(&buf) - if err != nil { - t.Fatal(err) - } - - exp = nil - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } -} - -// Tests that blocks with a lower min time in later files are not returned -// more than once causing unsorted results -func TestFileStore_SeekToAsc_OverlapMinBoolean(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, true), tsm1.NewValue(3, true)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, true), tsm1.NewValue(4, true)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, true), tsm1.NewValue(1, false)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, false)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - buf := make([]tsm1.BooleanValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - // Search for an entry that exists in the second file - values, err := c.ReadBooleanBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[2].values[0], - data[2].values[1], - data[3].values[0], - data[0].values[1], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - // Check that calling Next will dedupe points - c.Next() - values, err = c.ReadBooleanBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[1].values[1], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadBooleanBlock(&buf) - if err != nil { - t.Fatal(err) - } - - exp = nil - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } -} - -// Tests that blocks with a lower min time in later files are not returned -// more than once causing unsorted results -func TestFileStore_SeekToAsc_OverlapMinString(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, "1.0"), tsm1.NewValue(3, "3.0")}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, "2.0"), tsm1.NewValue(4, "4.0")}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, "0.0"), tsm1.NewValue(1, "1.1")}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, "2.2")}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - buf := make([]tsm1.StringValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - // Search for an entry that exists in the second file - values, err := c.ReadStringBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[2].values[0], - data[2].values[1], - data[3].values[0], - data[0].values[1], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - // Check that calling Next will dedupe points - c.Next() - values, err = c.ReadStringBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[1].values[1], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadStringBlock(&buf) - if err != nil { - t.Fatal(err) - } - - exp = nil - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } -} - -func TestFileStore_SeekToAsc_Middle(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, 1.0), - tsm1.NewValue(2, 2.0), - tsm1.NewValue(3, 3.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(4, 4.0)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - // Search for an entry that exists in the second file - buf := make([]tsm1.FloatValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 3, true) - t.Cleanup(c.Close) - - values, err := c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{data[0].values[2]} - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{data[1].values[0]} - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - -} - -func TestFileStore_SeekToAsc_End(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, 2.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 3.0)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - buf := make([]tsm1.FloatValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 2, true) - t.Cleanup(c.Close) - - values, err := c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := data[2] - if got, exp := len(values), len(exp.values); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp.values { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } -} - -func TestFileStore_SeekToDesc_FromStart(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, 2.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 3.0)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - // Search for an entry that exists in the second file - buf := make([]tsm1.FloatValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, false) - t.Cleanup(c.Close) - - values, err := c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - exp := data[0] - if got, exp := len(values), len(exp.values); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp.values { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } -} - -func TestFileStore_SeekToDesc_Duplicate(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 4.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 2.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 3.0)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - // Search for an entry that exists in the second file - buf := make([]tsm1.FloatValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 2, false) - t.Cleanup(c.Close) - - values, err := c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - exp := []tsm1.Value{ - data[3].values[0], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - exp = []tsm1.Value{ - data[1].values[0], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } -} - -func TestFileStore_SeekToDesc_OverlapMaxFloat(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, 1.0), tsm1.NewValue(3, 3.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 2.0), tsm1.NewValue(4, 4.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 0.0), tsm1.NewValue(1, 1.1)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 2.2)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - // Search for an entry that exists in the second file - buf := make([]tsm1.FloatValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 5, false) - t.Cleanup(c.Close) - - values, err := c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[3].values[0], - data[0].values[1], - data[1].values[1], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - exp = []tsm1.Value{ - - data[2].values[0], - data[2].values[1], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } -} - -func TestFileStore_SeekToDesc_OverlapMaxInteger(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, int64(1)), tsm1.NewValue(3, int64(3))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, int64(2)), tsm1.NewValue(4, int64(4))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, int64(0)), tsm1.NewValue(1, int64(10))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, int64(5))}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - // Search for an entry that exists in the second file - buf := make([]tsm1.IntegerValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 5, false) - t.Cleanup(c.Close) - - values, err := c.ReadIntegerBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[3].values[0], - data[0].values[1], - data[1].values[1], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadIntegerBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - exp = []tsm1.Value{ - data[2].values[0], - data[2].values[1], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } -} -func TestFileStore_SeekToDesc_OverlapMaxUnsigned(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, uint64(1)), tsm1.NewValue(3, uint64(3))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, uint64(2)), tsm1.NewValue(4, uint64(4))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, uint64(0)), tsm1.NewValue(1, uint64(10))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, uint64(5))}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - // Search for an entry that exists in the second file - buf := make([]tsm1.UnsignedValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 5, false) - t.Cleanup(c.Close) - - values, err := c.ReadUnsignedBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[3].values[0], - data[0].values[1], - data[1].values[1], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadUnsignedBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - exp = []tsm1.Value{ - data[2].values[0], - data[2].values[1], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } -} - -func TestFileStore_SeekToDesc_OverlapMaxBoolean(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, true), tsm1.NewValue(3, true)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, true), tsm1.NewValue(4, true)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, true), tsm1.NewValue(1, false)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, false)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - // Search for an entry that exists in the second file - buf := make([]tsm1.BooleanValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 5, false) - t.Cleanup(c.Close) - - values, err := c.ReadBooleanBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[3].values[0], - data[0].values[1], - data[1].values[1], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadBooleanBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - exp = []tsm1.Value{ - data[2].values[0], - data[2].values[1], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } -} - -func TestFileStore_SeekToDesc_OverlapMaxString(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, "1.0"), tsm1.NewValue(3, "3.0")}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, "2.0"), tsm1.NewValue(4, "4.0")}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, "0.0"), tsm1.NewValue(1, "1.1")}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, "2.2")}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - // Search for an entry that exists in the second file - buf := make([]tsm1.StringValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 5, false) - t.Cleanup(c.Close) - - values, err := c.ReadStringBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[3].values[0], - data[0].values[1], - data[1].values[1], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadStringBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - exp = []tsm1.Value{ - data[2].values[0], - data[2].values[1], - } - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } -} - -func TestFileStore_SeekToDesc_AfterEnd(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, 1.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 2.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(3, 3.0)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - buf := make([]tsm1.FloatValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 4, false) - t.Cleanup(c.Close) - - values, err := c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := data[2] - if got, exp := len(values), len(exp.values); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp.values { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } -} - -func TestFileStore_SeekToDesc_AfterEnd_OverlapFloat(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 4 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(8, 0.0), tsm1.NewValue(9, 1.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 2.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(3, 3.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(3, 4.0), tsm1.NewValue(7, 7.0)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - buf := make([]tsm1.FloatValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 10, false) - t.Cleanup(c.Close) - - values, err := c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[0].values[0], - data[0].values[1], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadFloatBlock(&buf) - - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[3].values[0], - data[3].values[1], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadFloatBlock(&buf) - - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[1].values[0], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadFloatBlock(&buf) - - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - if got, exp := len(values), 0; got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } -} - -func TestFileStore_SeekToDesc_AfterEnd_OverlapInteger(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(8, int64(0)), tsm1.NewValue(9, int64(1))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, int64(2))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(3, int64(3))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(3, int64(4)), tsm1.NewValue(10, int64(7))}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - buf := make([]tsm1.IntegerValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 11, false) - t.Cleanup(c.Close) - - values, err := c.ReadIntegerBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[3].values[0], - data[0].values[0], - data[0].values[1], - data[3].values[1], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadIntegerBlock(&buf) - - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[1].values[0], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadIntegerBlock(&buf) - - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - if got, exp := len(values), 0; got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } -} - -func TestFileStore_SeekToDesc_AfterEnd_OverlapUnsigned(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(8, uint64(0)), tsm1.NewValue(9, uint64(1))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, uint64(2))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(3, uint64(3))}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(3, uint64(4)), tsm1.NewValue(10, uint64(7))}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - buf := make([]tsm1.UnsignedValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 11, false) - t.Cleanup(c.Close) - - values, err := c.ReadUnsignedBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[3].values[0], - data[0].values[0], - data[0].values[1], - data[3].values[1], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadUnsignedBlock(&buf) - - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[1].values[0], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadUnsignedBlock(&buf) - - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - if got, exp := len(values), 0; got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } -} - -func TestFileStore_SeekToDesc_AfterEnd_OverlapBoolean(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(8, true), tsm1.NewValue(9, true)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, true)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(3, false)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(3, true), tsm1.NewValue(7, false)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - buf := make([]tsm1.BooleanValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 11, false) - t.Cleanup(c.Close) - - values, err := c.ReadBooleanBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[0].values[0], - data[0].values[1], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadBooleanBlock(&buf) - - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[3].values[0], - data[3].values[1], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadBooleanBlock(&buf) - - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[1].values[0], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadBooleanBlock(&buf) - - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - if got, exp := len(values), 0; got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } -} - -func TestFileStore_SeekToDesc_AfterEnd_OverlapString(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(8, "eight"), tsm1.NewValue(9, "nine")}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, "two")}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(3, "three")}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(3, "four"), tsm1.NewValue(7, "seven")}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - buf := make([]tsm1.StringValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 11, false) - t.Cleanup(c.Close) - - values, err := c.ReadStringBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[0].values[0], - data[0].values[1], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadStringBlock(&buf) - - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[3].values[0], - data[3].values[1], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadStringBlock(&buf) - - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[1].values[0], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadStringBlock(&buf) - - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - if got, exp := len(values), 0; got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } -} - -func TestFileStore_SeekToDesc_Middle(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, 1.0)}}, - keyValues{"cpu", []tsm1.Value{ - tsm1.NewValue(2, 2.0), - tsm1.NewValue(3, 3.0), - tsm1.NewValue(4, 4.0)}, - }, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - // Search for an entry that exists in the second file - buf := make([]tsm1.FloatValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 3, false) - t.Cleanup(c.Close) - - values, err := c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := []tsm1.Value{ - data[1].values[0], - data[1].values[1], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - c.Next() - values, err = c.ReadFloatBlock(&buf) - - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp = []tsm1.Value{ - data[0].values[0], - } - - if got, exp := len(values), len(exp); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } - - c.Next() - values, err = c.ReadFloatBlock(&buf) - - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - if got, exp := len(values), 0; got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } -} - -func TestFileStore_SeekToDesc_End(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, 2.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 3.0)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - buf := make([]tsm1.FloatValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 2, false) - t.Cleanup(c.Close) - - values, err := c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := data[2] - if got, exp := len(values), len(exp.values); got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - for i, v := range exp.values { - if got, exp := values[i].Value(), v.Value(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", i, got, exp) - } - } -} - -func TestKeyCursor_TombstoneRange(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, 2.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 3.0)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - if err := fs.DeleteRange([][]byte{[]byte("cpu")}, 1, 1); err != nil { - t.Fatalf("unexpected error delete range: %v", err) - } - - buf := make([]tsm1.FloatValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - expValues := []int{0, 2} - for _, v := range expValues { - values, err := c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - exp := data[v] - if got, exp := len(values), 1; got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := values[0].String(), exp.values[0].String(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", 0, got, exp) - } - c.Next() - } -} - -func TestKeyCursor_TombstoneRange_PartialFirst(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 0.0), tsm1.NewValue(1, 1.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 2.0)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - // Delete part of the block in the first file. - r := MustOpenTSMReader(files[0]) - r.DeleteRange([][]byte{[]byte("cpu")}, 1, 3) - t.Cleanup(func() { r.Close() }) - - fs.Replace(nil, files) - - buf := make([]tsm1.FloatValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - expValues := []tsm1.Value{tsm1.NewValue(0, 0.0), tsm1.NewValue(2, 2.0)} - - for _, exp := range expValues { - values, err := c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - if got, exp := len(values), 1; got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := values[0].String(), exp.String(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", 0, got, exp) - } - c.Next() - } -} - -func TestKeyCursor_TombstoneRange_PartialFloat(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{ - tsm1.NewValue(0, 1.0), - tsm1.NewValue(1, 2.0), - tsm1.NewValue(2, 3.0)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - if err := fs.DeleteRange([][]byte{[]byte("cpu")}, 1, 1); err != nil { - t.Fatalf("unexpected error delete range: %v", err) - } - - buf := make([]tsm1.FloatValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - values, err := c.ReadFloatBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - expValues := []tsm1.Value{data[0].values[0], data[0].values[2]} - for i, v := range expValues { - exp := v - if got, exp := len(values), 2; got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := values[i].String(), exp.String(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", 0, got, exp) - } - } -} - -func TestKeyCursor_TombstoneRange_PartialInteger(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{ - tsm1.NewValue(0, int64(1)), - tsm1.NewValue(1, int64(2)), - tsm1.NewValue(2, int64(3))}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - if err := fs.DeleteRange([][]byte{[]byte("cpu")}, 1, 1); err != nil { - t.Fatalf("unexpected error delete range: %v", err) - } - - buf := make([]tsm1.IntegerValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - values, err := c.ReadIntegerBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - expValues := []tsm1.Value{data[0].values[0], data[0].values[2]} - for i, v := range expValues { - exp := v - if got, exp := len(values), 2; got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := values[i].String(), exp.String(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", 0, got, exp) - } - } -} - -func TestKeyCursor_TombstoneRange_PartialUnsigned(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{ - tsm1.NewValue(0, uint64(1)), - tsm1.NewValue(1, uint64(2)), - tsm1.NewValue(2, uint64(3))}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - if err := fs.DeleteRange([][]byte{[]byte("cpu")}, 1, 1); err != nil { - t.Fatalf("unexpected error delete range: %v", err) - } - - buf := make([]tsm1.UnsignedValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - values, err := c.ReadUnsignedBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - expValues := []tsm1.Value{data[0].values[0], data[0].values[2]} - for i, v := range expValues { - exp := v - if got, exp := len(values), 2; got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := values[i].String(), exp.String(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", 0, got, exp) - } - } -} - -func TestKeyCursor_TombstoneRange_PartialString(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{ - tsm1.NewValue(0, "1"), - tsm1.NewValue(1, "2"), - tsm1.NewValue(2, "3")}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - if err := fs.DeleteRange([][]byte{[]byte("cpu")}, 1, 1); err != nil { - t.Fatalf("unexpected error delete range: %v", err) - } - - buf := make([]tsm1.StringValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - values, err := c.ReadStringBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - expValues := []tsm1.Value{data[0].values[0], data[0].values[2]} - for i, v := range expValues { - exp := v - if got, exp := len(values), 2; got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := values[i].String(), exp.String(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", 0, got, exp) - } - } -} - -func TestKeyCursor_TombstoneRange_PartialBoolean(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{ - tsm1.NewValue(0, true), - tsm1.NewValue(1, false), - tsm1.NewValue(2, true)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - if err := fs.DeleteRange([][]byte{[]byte("cpu")}, 1, 1); err != nil { - t.Fatalf("unexpected error delete range: %v", err) - } - - buf := make([]tsm1.BooleanValue, 1000) - c := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - t.Cleanup(c.Close) - - values, err := c.ReadBooleanBlock(&buf) - if err != nil { - t.Fatalf("unexpected error reading values: %v", err) - } - - expValues := []tsm1.Value{data[0].values[0], data[0].values[2]} - for i, v := range expValues { - exp := v - if got, exp := len(values), 2; got != exp { - t.Fatalf("value length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := values[i].String(), exp.String(); got != exp { - t.Fatalf("read value mismatch(%d): got %v, exp %v", 0, got, exp) - } - } -} - -func TestFileStore_Open(t *testing.T) { - dir := t.TempDir() - - // Create 3 TSM files... - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, 2.0)}}, - keyValues{"mem", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - } - - _, err := newFileDir(t, dir, data...) - if err != nil { - fatal(t, "creating test files", err) - } - - fs := newTestFileStore(t, dir) - if err := fs.Open(context.Background()); err != nil { - fatal(t, "opening file store", err) - } - - if got, exp := fs.Count(), 3; got != exp { - t.Fatalf("file count mismatch: got %v, exp %v", got, exp) - } - - if got, exp := fs.CurrentGeneration(), 4; got != exp { - t.Fatalf("current ID mismatch: got %v, exp %v", got, exp) - } -} - -func TestFileStore_Remove(t *testing.T) { - dir := t.TempDir() - - // Create 3 TSM files... - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, 2.0)}}, - keyValues{"mem", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - } - - files, err := newFileDir(t, dir, data...) - if err != nil { - fatal(t, "creating test files", err) - } - - fs := newTestFileStore(t, dir) - if err := fs.Open(context.Background()); err != nil { - fatal(t, "opening file store", err) - } - - if got, exp := fs.Count(), 3; got != exp { - t.Fatalf("file count mismatch: got %v, exp %v", got, exp) - } - - if got, exp := fs.CurrentGeneration(), 4; got != exp { - t.Fatalf("current ID mismatch: got %v, exp %v", got, exp) - } - - fs.Replace(files[2:3], nil) - - if got, exp := fs.Count(), 2; got != exp { - t.Fatalf("file count mismatch: got %v, exp %v", got, exp) - } - - if got, exp := fs.CurrentGeneration(), 4; got != exp { - t.Fatalf("current ID mismatch: got %v, exp %v", got, exp) - } -} - -func TestFileStore_Replace(t *testing.T) { - dir := t.TempDir() - - // Create 3 TSM files... - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, 2.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 3.0)}}, - } - - files, err := newFileDir(t, dir, data...) - if err != nil { - fatal(t, "creating test files", err) - } - - // Replace requires assumes new files have a .tmp extension - replacement := fmt.Sprintf("%s.%s", files[2], tsm1.TmpTSMFileExtension) - os.Rename(files[2], replacement) - - fs := newTestFileStore(t, dir) - if err := fs.Open(context.Background()); err != nil { - fatal(t, "opening file store", err) - } - - if got, exp := fs.Count(), 2; got != exp { - t.Fatalf("file count mismatch: got %v, exp %v", got, exp) - } - - // Should record references to the two existing TSM files - cur := fs.KeyCursor(context.Background(), []byte("cpu"), 0, true) - - // Should move the existing files out of the way, but allow query to complete - if err := fs.Replace(files[:2], []string{replacement}); err != nil { - t.Fatalf("replace: %v", err) - } - - if got, exp := fs.Count(), 1; got != exp { - t.Fatalf("file count mismatch: got %v, exp %v", got, exp) - } - - // There should be two blocks (1 in each file) - cur.Next() - buf := make([]tsm1.FloatValue, 10) - values, err := cur.ReadFloatBlock(&buf) - if err != nil { - t.Fatal(err) - } - if got, exp := len(values), 1; got != exp { - t.Fatalf("value len mismatch: got %v, exp %v", got, exp) - } - - cur.Next() - values, err = cur.ReadFloatBlock(&buf) - if err != nil { - t.Fatal(err) - } - if got, exp := len(values), 1; got != exp { - t.Fatalf("value len mismatch: got %v, exp %v", got, exp) - } - - // No more blocks for this cursor - cur.Next() - values, err = cur.ReadFloatBlock(&buf) - if err != nil { - t.Fatal(err) - } - if got, exp := len(values), 0; got != exp { - t.Fatalf("value len mismatch: got %v, exp %v", got, exp) - } - - // Release the references (files should get evicted by purger shortly) - cur.Close() - - time.Sleep(time.Second) - // Make sure the two TSM files used by the cursor are gone - if _, err := os.Stat(files[0]); !os.IsNotExist(err) { - t.Fatalf("stat file: %v", err) - } - if _, err := os.Stat(files[1]); !os.IsNotExist(err) { - t.Fatalf("stat file: %v", err) - } - - // Make sure the new file exists - if _, err := os.Stat(files[2]); err != nil { - t.Fatalf("stat file: %v", err) - } - -} - -func TestFileStore_Open_Deleted(t *testing.T) { - dir := t.TempDir() - - // Create 3 TSM files... - data := []keyValues{ - keyValues{"cpu,host=server2!~#!value", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - keyValues{"cpu,host=server1!~#!value", []tsm1.Value{tsm1.NewValue(1, 2.0)}}, - keyValues{"mem,host=server1!~#!value", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - } - - _, err := newFileDir(t, dir, data...) - if err != nil { - fatal(t, "creating test files", err) - } - - fs := newTestFileStore(t, dir) - if err := fs.Open(context.Background()); err != nil { - fatal(t, "opening file store", err) - } - - if got, exp := len(fs.Keys()), 3; got != exp { - t.Fatalf("file count mismatch: got %v, exp %v", got, exp) - } - - if err := fs.Delete([][]byte{[]byte("cpu,host=server2!~#!value")}); err != nil { - fatal(t, "deleting", err) - } - - fs2 := newTestFileStore(t, dir) - if err := fs2.Open(context.Background()); err != nil { - fatal(t, "opening file store", err) - } - - if got, exp := len(fs2.Keys()), 2; got != exp { - t.Fatalf("file count mismatch: got %v, exp %v", got, exp) - } -} - -func TestFileStore_Delete(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu,host=server2!~#!value", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - keyValues{"cpu,host=server1!~#!value", []tsm1.Value{tsm1.NewValue(1, 2.0)}}, - keyValues{"mem,host=server1!~#!value", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - keys := fs.Keys() - if got, exp := len(keys), 3; got != exp { - t.Fatalf("key length mismatch: got %v, exp %v", got, exp) - } - - if err := fs.Delete([][]byte{[]byte("cpu,host=server2!~#!value")}); err != nil { - fatal(t, "deleting", err) - } - - keys = fs.Keys() - if got, exp := len(keys), 2; got != exp { - t.Fatalf("key length mismatch: got %v, exp %v", got, exp) - } -} - -func TestFileStore_Apply(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu,host=server2#!~#value", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - keyValues{"cpu,host=server1#!~#value", []tsm1.Value{tsm1.NewValue(1, 2.0)}}, - keyValues{"mem,host=server1#!~#value", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - keys := fs.Keys() - if got, exp := len(keys), 3; got != exp { - t.Fatalf("key length mismatch: got %v, exp %v", got, exp) - } - - var n int64 - if err := fs.Apply(context.Background(), func(r tsm1.TSMFile) error { - atomic.AddInt64(&n, 1) - return nil - }); err != nil { - t.Fatalf("unexpected error deleting: %v", err) - } - - if got, exp := n, int64(3); got != exp { - t.Fatalf("apply mismatch: got %v, exp %v", got, exp) - } -} - -func TestFileStore_Stats(t *testing.T) { - dir := t.TempDir() - - // Create 3 TSM files... - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, 2.0)}}, - keyValues{"mem", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - } - - files, err := newFileDir(t, dir, data...) - if err != nil { - fatal(t, "creating test files", err) - } - - fs := newTestFileStore(t, dir) - if err := fs.Open(context.Background()); err != nil { - fatal(t, "opening file store", err) - } - - stats := fs.Stats() - if got, exp := len(stats), 3; got != exp { - t.Fatalf("file count mismatch: got %v, exp %v", got, exp) - } - - // Another call should result in the same stats being returned. - if got, exp := fs.Stats(), stats; !reflect.DeepEqual(got, exp) { - t.Fatalf("got %v, exp %v", got, exp) - } - - // Removing one of the files should invalidate the cache. - fs.Replace(files[0:1], nil) - if got, exp := len(fs.Stats()), 2; got != exp { - t.Fatalf("file count mismatch: got %v, exp %v", got, exp) - } - - // Write a new TSM file that that is not open - newFile := MustWriteTSM(t, dir, 4, map[string][]tsm1.Value{ - "mem": []tsm1.Value{tsm1.NewValue(0, 1.0)}, - }) - - replacement := fmt.Sprintf("%s.%s.%s", files[2], tsm1.TmpTSMFileExtension, tsm1.TSMFileExtension) // Assumes new files have a .tmp extension - if err := os.Rename(newFile, replacement); err != nil { - t.Fatalf("rename: %v", err) - } - // Replace 3 w/ 1 - if err := fs.Replace(files, []string{replacement}); err != nil { - t.Fatalf("replace: %v", err) - } - - var found bool - stats = fs.Stats() - for _, stat := range stats { - if strings.HasSuffix(stat.Path, fmt.Sprintf("%s.%s.%s", tsm1.TSMFileExtension, tsm1.TmpTSMFileExtension, tsm1.TSMFileExtension)) { - found = true - } - } - - if !found { - t.Fatalf("Didn't find %s in stats: %v", "foo", stats) - } - - newFile = MustWriteTSM(t, dir, 5, map[string][]tsm1.Value{ - "mem": []tsm1.Value{tsm1.NewValue(0, 1.0)}, - }) - - // Adding some files should invalidate the cache. - fs.Replace(nil, []string{newFile}) - if got, exp := len(fs.Stats()), 2; got != exp { - t.Fatalf("file count mismatch: got %v, exp %v", got, exp) - } -} - -func TestFileStore_CreateSnapshot(t *testing.T) { - dir := t.TempDir() - fs := newTestFileStore(t, dir) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(1, 2.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(2, 3.0)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - fs.Replace(nil, files) - - // Create a tombstone - if err := fs.DeleteRange([][]byte{[]byte("cpu")}, 1, 1); err != nil { - t.Fatalf("unexpected error delete range: %v", err) - } - - s, e := fs.CreateSnapshot() - if e != nil { - t.Fatal(e) - } - t.Logf("temp file for hard links: %q", s) - - tfs, e := os.ReadDir(s) - if e != nil { - t.Fatal(e) - } - if len(tfs) == 0 { - t.Fatal("no files found") - } - - for _, f := range fs.Files() { - p := filepath.Join(s, filepath.Base(f.Path())) - t.Logf("checking for existence of hard link %q", p) - if _, err := os.Stat(p); os.IsNotExist(err) { - t.Fatalf("unable to find file %q", p) - } - if ts := f.TombstoneStats(); ts.TombstoneExists { - p := filepath.Join(s, filepath.Base(ts.Path)) - t.Logf("checking for existence of hard link %q", p) - if _, err := os.Stat(p); os.IsNotExist(err) { - t.Fatalf("unable to find file %q", p) - } - } - } -} - -// newTestFileStore returns a FileStore for testing. The FileStore is closed by -// tb.Cleanup when the test and all its subtests complete. -func newTestFileStore(tb testing.TB, dir string) *tsm1.FileStore { - fs := tsm1.NewFileStore(dir, tsdb.EngineTags{}) - - tb.Cleanup(func() { - fs.Close() - }) - - return fs -} - -type mockObserver struct { - fileFinishing func(path string) error - fileUnlinking func(path string) error -} - -func (m mockObserver) FileFinishing(path string) error { - return m.fileFinishing(path) -} - -func (m mockObserver) FileUnlinking(path string) error { - return m.fileUnlinking(path) -} - -func TestFileStore_Observer(t *testing.T) { - var finishes, unlinks []string - m := mockObserver{ - fileFinishing: func(path string) error { - finishes = append(finishes, path) - return nil - }, - fileUnlinking: func(path string) error { - unlinks = append(unlinks, path) - return nil - }, - } - - check := func(results []string, expect ...string) { - t.Helper() - if len(results) != len(expect) { - t.Fatalf("wrong number of results: %d results != %d expected", len(results), len(expect)) - } - for i, ex := range expect { - if got := filepath.Base(results[i]); got != ex { - t.Fatalf("unexpected result: got %q != expected %q", got, ex) - } - } - } - - dir := t.TempDir() - fs := newTestFileStore(t, dir) - fs.WithObserver(m) - - // Setup 3 files - data := []keyValues{ - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 1.0), tsm1.NewValue(1, 2.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(10, 2.0)}}, - keyValues{"cpu", []tsm1.Value{tsm1.NewValue(20, 3.0)}}, - } - - files, err := newFiles(t, dir, data...) - if err != nil { - t.Fatalf("unexpected error creating files: %v", err) - } - - if err := fs.Replace(nil, files); err != nil { - t.Fatalf("error replacing: %v", err) - } - - // Create a tombstone - if err := fs.DeleteRange([][]byte{[]byte("cpu")}, 10, 10); err != nil { - t.Fatalf("unexpected error delete range: %v", err) - } - - // Check that we observed finishes correctly - check(finishes, - "000000001-000000001.tsm", - "000000002-000000001.tsm", - "000000003-000000001.tsm", - "000000002-000000001.tombstone.tmp", - ) - check(unlinks) - unlinks, finishes = nil, nil - - // remove files including a tombstone - if err := fs.Replace(files[1:3], nil); err != nil { - t.Fatal("error replacing") - } - - // Check that we observed unlinks correctly - check(finishes) - check(unlinks, - "000000002-000000001.tsm", - "000000002-000000001.tombstone", - "000000003-000000001.tsm", - ) - unlinks, finishes = nil, nil - - // add a tombstone for the first file multiple times. - if err := fs.DeleteRange([][]byte{[]byte("cpu")}, 0, 0); err != nil { - t.Fatalf("unexpected error delete range: %v", err) - } - if err := fs.DeleteRange([][]byte{[]byte("cpu")}, 1, 1); err != nil { - t.Fatalf("unexpected error delete range: %v", err) - } - - check(finishes, - "000000001-000000001.tombstone.tmp", - "000000001-000000001.tombstone.tmp", - ) - check(unlinks) - unlinks, finishes = nil, nil -} - -func newFileDir(tb testing.TB, dir string, values ...keyValues) ([]string, error) { - var files []string - - id := 1 - for _, v := range values { - f := MustTempFile(tb, dir) - w, err := tsm1.NewTSMWriter(f) - if err != nil { - return nil, err - } - - if err := w.Write([]byte(v.key), v.values); err != nil { - return nil, err - } - - if err := w.WriteIndex(); err != nil { - return nil, err - } - - if err := w.Close(); err != nil { - return nil, err - } - newName := filepath.Join(filepath.Dir(f.Name()), tsm1.DefaultFormatFileName(id, 1)+".tsm") - if err := os.Rename(f.Name(), newName); err != nil { - return nil, err - } - id++ - - files = append(files, newName) - } - return files, nil - -} - -func newFiles(tb testing.TB, dir string, values ...keyValues) ([]string, error) { - var files []string - - id := 1 - for _, v := range values { - f := MustTempFile(tb, dir) - w, err := tsm1.NewTSMWriter(f) - if err != nil { - return nil, err - } - - if err := w.Write([]byte(v.key), v.values); err != nil { - return nil, err - } - - if err := w.WriteIndex(); err != nil { - return nil, err - } - - if err := w.Close(); err != nil { - return nil, err - } - - newName := filepath.Join(filepath.Dir(f.Name()), tsm1.DefaultFormatFileName(id, 1)+".tsm") - if err := os.Rename(f.Name(), newName); err != nil { - return nil, err - } - id++ - - files = append(files, newName) - } - return files, nil -} - -type keyValues struct { - key string - values []tsm1.Value -} - -func MustTempFile(tb testing.TB, dir string) *os.File { - f, err := os.CreateTemp(dir, "tsm1test") - if err != nil { - panic(fmt.Sprintf("failed to create temp file: %v", err)) - } - tb.Cleanup(func() { - f.Close() - }) - - return f -} - -func fatal(t *testing.T, msg string, err error) { - t.Fatalf("unexpected error %v: %v", msg, err) -} - -var fsResult []tsm1.FileStat - -func BenchmarkFileStore_Stats(b *testing.B) { - dir := b.TempDir() - - // Create some TSM files... - data := make([]keyValues, 0, 1000) - for i := 0; i < 1000; i++ { - data = append(data, keyValues{"cpu", []tsm1.Value{tsm1.NewValue(0, 1.0)}}) - } - - _, err := newFileDir(b, dir, data...) - if err != nil { - b.Fatalf("creating benchmark files %v", err) - } - - fs := newTestFileStore(b, dir) - fs.WithLogger(zaptest.NewLogger(b)) - - if err := fs.Open(context.Background()); err != nil { - b.Fatalf("opening file store %v", err) - } - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - fsResult = fs.Stats() - } -} diff --git a/tsdb/engine/tsm1/float.go b/tsdb/engine/tsm1/float.go deleted file mode 100644 index 17c3f79c135..00000000000 --- a/tsdb/engine/tsm1/float.go +++ /dev/null @@ -1,280 +0,0 @@ -package tsm1 - -/* -This code is originally from: https://github.com/dgryski/go-tsz and has been modified to remove -the timestamp compression functionality. - -It implements the float compression as presented in: http://www.vldb.org/pvldb/vol8/p1816-teller.pdf. -This implementation uses a sentinel value of NaN which means that float64 NaN cannot be stored using -this version. -*/ - -import ( - "bytes" - "fmt" - "math" - "math/bits" - - "github.com/dgryski/go-bitstream" -) - -// Note: an uncompressed format is not yet implemented. -// floatCompressedGorilla is a compressed format using the gorilla paper encoding -const floatCompressedGorilla = 1 - -// uvnan is the constant returned from math.NaN(). -const uvnan = 0x7FF8000000000001 - -// FloatEncoder encodes multiple float64s into a byte slice. -type FloatEncoder struct { - val float64 - err error - - leading uint64 - trailing uint64 - - buf bytes.Buffer - bw *bitstream.BitWriter - - first bool - finished bool -} - -// NewFloatEncoder returns a new FloatEncoder. -func NewFloatEncoder() *FloatEncoder { - s := FloatEncoder{ - first: true, - leading: ^uint64(0), - } - - s.bw = bitstream.NewWriter(&s.buf) - s.buf.WriteByte(floatCompressedGorilla << 4) - - return &s -} - -// Reset sets the encoder back to its initial state. -func (s *FloatEncoder) Reset() { - s.val = 0 - s.err = nil - s.leading = ^uint64(0) - s.trailing = 0 - s.buf.Reset() - s.buf.WriteByte(floatCompressedGorilla << 4) - - s.bw.Resume(0x0, 8) - - s.finished = false - s.first = true -} - -// Bytes returns a copy of the underlying byte buffer used in the encoder. -func (s *FloatEncoder) Bytes() ([]byte, error) { - return s.buf.Bytes(), s.err -} - -// Flush indicates there are no more values to encode. -func (s *FloatEncoder) Flush() { - if !s.finished { - // write an end-of-stream record - s.finished = true - s.Write(math.NaN()) - s.bw.Flush(bitstream.Zero) - } -} - -// Write encodes v to the underlying buffer. -func (s *FloatEncoder) Write(v float64) { - // Only allow NaN as a sentinel value - if math.IsNaN(v) && !s.finished { - s.err = fmt.Errorf("unsupported value: NaN") - return - } - if s.first { - // first point - s.val = v - s.first = false - s.bw.WriteBits(math.Float64bits(v), 64) - return - } - - vDelta := math.Float64bits(v) ^ math.Float64bits(s.val) - - if vDelta == 0 { - s.bw.WriteBit(bitstream.Zero) - } else { - s.bw.WriteBit(bitstream.One) - - leading := uint64(bits.LeadingZeros64(vDelta)) - trailing := uint64(bits.TrailingZeros64(vDelta)) - - // Clamp number of leading zeros to avoid overflow when encoding - leading &= 0x1F - if leading >= 32 { - leading = 31 - } - - // TODO(dgryski): check if it's 'cheaper' to reset the leading/trailing bits instead - if s.leading != ^uint64(0) && leading >= s.leading && trailing >= s.trailing { - s.bw.WriteBit(bitstream.Zero) - s.bw.WriteBits(vDelta>>s.trailing, 64-int(s.leading)-int(s.trailing)) - } else { - s.leading, s.trailing = leading, trailing - - s.bw.WriteBit(bitstream.One) - s.bw.WriteBits(leading, 5) - - // Note that if leading == trailing == 0, then sigbits == 64. But that - // value doesn't actually fit into the 6 bits we have. - // Luckily, we never need to encode 0 significant bits, since that would - // put us in the other case (vdelta == 0). So instead we write out a 0 and - // adjust it back to 64 on unpacking. - sigbits := 64 - leading - trailing - s.bw.WriteBits(sigbits, 6) - s.bw.WriteBits(vDelta>>trailing, int(sigbits)) - } - } - - s.val = v -} - -// FloatDecoder decodes a byte slice into multiple float64 values. -type FloatDecoder struct { - val uint64 - - leading uint64 - trailing uint64 - - br BitReader - b []byte - - first bool - finished bool - - err error -} - -// SetBytes initializes the decoder with b. Must call before calling Next(). -func (it *FloatDecoder) SetBytes(b []byte) error { - var v uint64 - if len(b) == 0 { - v = uvnan - } else { - // first byte is the compression type. - // we currently just have gorilla compression. - it.br.Reset(b[1:]) - - var err error - v, err = it.br.ReadBits(64) - if err != nil { - return err - } - } - - // Reset all fields. - it.val = v - it.leading = 0 - it.trailing = 0 - it.b = b - it.first = true - it.finished = false - it.err = nil - - return nil -} - -// Next returns true if there are remaining values to read. -func (it *FloatDecoder) Next() bool { - if it.err != nil || it.finished { - return false - } - - if it.first { - it.first = false - - // mark as finished if there were no values. - if it.val == uvnan { // IsNaN - it.finished = true - return false - } - - return true - } - - // read compressed value - var bit bool - if it.br.CanReadBitFast() { - bit = it.br.ReadBitFast() - } else if v, err := it.br.ReadBit(); err != nil { - it.err = err - return false - } else { - bit = v - } - - if !bit { - // it.val = it.val - } else { - var bit bool - if it.br.CanReadBitFast() { - bit = it.br.ReadBitFast() - } else if v, err := it.br.ReadBit(); err != nil { - it.err = err - return false - } else { - bit = v - } - - if !bit { - // reuse leading/trailing zero bits - // it.leading, it.trailing = it.leading, it.trailing - } else { - bits, err := it.br.ReadBits(5) - if err != nil { - it.err = err - return false - } - it.leading = bits - - bits, err = it.br.ReadBits(6) - if err != nil { - it.err = err - return false - } - mbits := bits - // 0 significant bits here means we overflowed and we actually need 64; see comment in encoder - if mbits == 0 { - mbits = 64 - } - it.trailing = 64 - it.leading - mbits - } - - mbits := uint(64 - it.leading - it.trailing) - bits, err := it.br.ReadBits(mbits) - if err != nil { - it.err = err - return false - } - - vbits := it.val - vbits ^= (bits << it.trailing) - - if vbits == uvnan { // IsNaN - it.finished = true - return false - } - it.val = vbits - } - - return true -} - -// Values returns the current float64 value. -func (it *FloatDecoder) Values() float64 { - return math.Float64frombits(it.val) -} - -// Error returns the current decoding error. -func (it *FloatDecoder) Error() error { - return it.err -} diff --git a/tsdb/engine/tsm1/float_test.go b/tsdb/engine/tsm1/float_test.go deleted file mode 100644 index cdbb5d2233b..00000000000 --- a/tsdb/engine/tsm1/float_test.go +++ /dev/null @@ -1,336 +0,0 @@ -package tsm1_test - -import ( - "fmt" - "math" - "reflect" - "testing" - "testing/quick" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" -) - -func TestFloatEncoder_Simple(t *testing.T) { - // Example from the paper - s := tsm1.NewFloatEncoder() - - s.Write(12) - s.Write(12) - s.Write(24) - - // extra tests - - // floating point masking/shifting bug - s.Write(13) - s.Write(24) - - // delta-of-delta sizes - s.Write(24) - s.Write(24) - s.Write(24) - - s.Flush() - - b, err := s.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var it tsm1.FloatDecoder - if err := it.SetBytes(b); err != nil { - t.Fatalf("unexpected error creating float decoder: %v", err) - } - - want := []float64{ - 12, - 12, - 24, - - 13, - 24, - - 24, - 24, - 24, - } - - for _, w := range want { - if !it.Next() { - t.Fatalf("Next()=false, want true") - } - vv := it.Values() - if w != vv { - t.Errorf("Values()=(%v), want (%v)\n", vv, w) - } - } - - if it.Next() { - t.Fatalf("Next()=true, want false") - } - - if err := it.Error(); err != nil { - t.Errorf("it.Error()=%v, want nil", err) - } -} - -func TestFloatEncoder_SimilarFloats(t *testing.T) { - s := tsm1.NewFloatEncoder() - want := []float64{ - 6.00065e+06, - 6.000656e+06, - 6.000657e+06, - - 6.000659e+06, - 6.000661e+06, - } - - for _, v := range want { - s.Write(v) - } - - s.Flush() - - b, err := s.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var it tsm1.FloatDecoder - if err := it.SetBytes(b); err != nil { - t.Fatalf("unexpected error creating float decoder: %v", err) - } - - for _, w := range want { - if !it.Next() { - t.Fatalf("Next()=false, want true") - } - vv := it.Values() - if w != vv { - t.Errorf("Values()=(%v), want (%v)\n", vv, w) - } - } - - if it.Next() { - t.Fatalf("Next()=true, want false") - } - - if err := it.Error(); err != nil { - t.Errorf("it.Error()=%v, want nil", err) - } -} - -var twoHoursData = []float64{ - // 2h of data, rows of 10 values - 761, 727, 763, 706, 700, 679, 757, 708, 739, 707, - 699, 740, 729, 766, 730, 715, 705, 693, 765, 724, - 799, 761, 737, 766, 756, 719, 722, 801, 747, 731, - 742, 744, 791, 750, 759, 809, 751, 705, 770, 792, - 727, 762, 772, 721, 748, 753, 744, 716, 776, 659, - 789, 766, 758, 690, 795, 770, 758, 723, 767, 765, - 693, 706, 681, 727, 724, 780, 678, 696, 758, 740, - 735, 700, 742, 747, 752, 734, 743, 732, 746, 770, - 780, 710, 731, 712, 712, 741, 770, 770, 754, 718, - 670, 775, 749, 795, 756, 741, 787, 721, 745, 782, - 765, 780, 811, 790, 836, 743, 858, 739, 762, 770, - 752, 763, 795, 792, 746, 786, 785, 774, 786, 718, -} - -func TestFloatEncoder_Roundtrip(t *testing.T) { - s := tsm1.NewFloatEncoder() - for _, p := range twoHoursData { - s.Write(p) - } - s.Flush() - - b, err := s.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var it tsm1.FloatDecoder - if err := it.SetBytes(b); err != nil { - t.Fatalf("unexpected error creating float decoder: %v", err) - } - - for _, w := range twoHoursData { - if !it.Next() { - t.Fatalf("Next()=false, want true") - } - vv := it.Values() - // t.Logf("it.Values()=(%+v, %+v)\n", time.Unix(int64(tt), 0), vv) - if w != vv { - t.Errorf("Values()=(%v), want (%v)\n", vv, w) - } - } - - if it.Next() { - t.Fatalf("Next()=true, want false") - } - - if err := it.Error(); err != nil { - t.Errorf("it.Error()=%v, want nil", err) - } -} - -func TestFloatEncoder_Roundtrip_NaN(t *testing.T) { - s := tsm1.NewFloatEncoder() - s.Write(1.0) - s.Write(math.NaN()) - s.Write(2.0) - s.Flush() - - _, err := s.Bytes() - if err == nil { - t.Fatalf("expected error. got nil") - } -} - -func TestFloatEncoder_Empty(t *testing.T) { - s := tsm1.NewFloatEncoder() - s.Flush() - - b, err := s.Bytes() - if err != nil { - t.Fatal(err) - } - - var dec tsm1.FloatDecoder - if err := dec.SetBytes(b); err != nil { - t.Fatal(err) - } - - var got []float64 - for dec.Next() { - got = append(got, dec.Values()) - } - - if len(got) != 0 { - t.Fatalf("got len %d, expected 0", len(got)) - } -} - -func Test_FloatEncoder_Quick(t *testing.T) { - quick.Check(func(values []float64) bool { - if values == nil { - values = []float64{} - } - - // Write values to encoder. - enc := tsm1.NewFloatEncoder() - for _, v := range values { - enc.Write(v) - } - enc.Flush() - - // Read values out of decoder. - got := make([]float64, 0, len(values)) - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var dec tsm1.FloatDecoder - if err := dec.SetBytes(b); err != nil { - t.Fatal(err) - } - for dec.Next() { - got = append(got, dec.Values()) - } - - // Verify that input and output values match. - if !reflect.DeepEqual(values, got) { - t.Fatalf("mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", values, got) - } - - return true - }, nil) -} - -func TestFloatDecoder_Empty(t *testing.T) { - var dec tsm1.FloatDecoder - if err := dec.SetBytes([]byte{}); err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if dec.Next() { - t.Fatalf("exp next == false, got true") - } -} - -func BenchmarkFloatEncoder(b *testing.B) { - for i := 0; i < b.N; i++ { - s := tsm1.NewFloatEncoder() - for _, tt := range twoHoursData { - s.Write(tt) - } - s.Flush() - } -} - -func BenchmarkFloatDecoder(b *testing.B) { - s := tsm1.NewFloatEncoder() - for _, tt := range twoHoursData { - s.Write(tt) - } - s.Flush() - bytes, err := s.Bytes() - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - var it tsm1.FloatDecoder - if err := it.SetBytes(bytes); err != nil { - b.Fatalf("unexpected error creating float decoder: %v", err) - } - - for j := 0; j < len(twoHoursData); it.Next() { - j++ - } - } -} - -func BenchmarkFloatDecoder_DecodeAll(b *testing.B) { - benchmarks := []int{ - 1, - 55, - 550, - 1000, - } - for _, size := range benchmarks { - s := tsm1.NewFloatEncoder() - for c := 0; c < size; c++ { - s.Write(twoHoursData[c%len(twoHoursData)]) - } - s.Flush() - bytes, err := s.Bytes() - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { - b.SetBytes(int64(len(bytes))) - - dst := make([]float64, size) - for i := 0; i < b.N; i++ { - var it tsm1.FloatDecoder - if err := it.SetBytes(bytes); err != nil { - b.Fatalf("unexpected error creating float decoder: %v", err) - } - - i := 0 - for it.Next() { - dst[i] = it.Values() - i++ - } - - if len(dst) != size { - b.Fatalf("unexpected length -got/+exp\n%s", cmp.Diff(len(dst), size)) - } - } - }) - } -} diff --git a/tsdb/engine/tsm1/int.go b/tsdb/engine/tsm1/int.go deleted file mode 100644 index d4f66a1046f..00000000000 --- a/tsdb/engine/tsm1/int.go +++ /dev/null @@ -1,324 +0,0 @@ -package tsm1 - -// Integer encoding uses two different strategies depending on the range of values in -// the uncompressed data. Encoded values are first encoding used zig zag encoding. -// This interleaves positive and negative integers across a range of positive integers. -// -// For example, [-2,-1,0,1] becomes [3,1,0,2]. See -// https://developers.google.com/protocol-buffers/docs/encoding?hl=en#signed-integers -// for more information. -// -// If all the zig zag encoded values are less than 1 << 60 - 1, they are compressed using -// simple8b encoding. If any value is larger than 1 << 60 - 1, the values are stored uncompressed. -// -// Each encoded byte slice contains a 1 byte header followed by multiple 8 byte packed integers -// or 8 byte uncompressed integers. The 4 high bits of the first byte indicate the encoding type -// for the remaining bytes. -// -// There are currently two encoding types that can be used with room for 16 total. These additional -// encoding slots are reserved for future use. One improvement to be made is to use a patched -// encoding such as PFOR if only a small number of values exceed the max compressed value range. This -// should improve compression ratios with very large integers near the ends of the int64 range. - -import ( - "encoding/binary" - "fmt" - - "github.com/jwilder/encoding/simple8b" -) - -const ( - // intUncompressed is an uncompressed format using 8 bytes per point - intUncompressed = 0 - // intCompressedSimple is a bit-packed format using simple8b encoding - intCompressedSimple = 1 - // intCompressedRLE is a run-length encoding format - intCompressedRLE = 2 -) - -// IntegerEncoder encodes int64s into byte slices. -type IntegerEncoder struct { - prev int64 - rle bool - values []uint64 -} - -// NewIntegerEncoder returns a new integer encoder with an initial buffer of values sized at sz. -func NewIntegerEncoder(sz int) IntegerEncoder { - return IntegerEncoder{ - rle: true, - values: make([]uint64, 0, sz), - } -} - -// Flush is no-op -func (e *IntegerEncoder) Flush() {} - -// Reset sets the encoder back to its initial state. -func (e *IntegerEncoder) Reset() { - e.prev = 0 - e.rle = true - e.values = e.values[:0] -} - -// Write encodes v to the underlying buffers. -func (e *IntegerEncoder) Write(v int64) { - // Delta-encode each value as it's written. This happens before - // ZigZagEncoding because the deltas could be negative. - delta := v - e.prev - e.prev = v - enc := ZigZagEncode(delta) - if len(e.values) > 1 { - e.rle = e.rle && e.values[len(e.values)-1] == enc - } - - e.values = append(e.values, enc) -} - -// Bytes returns a copy of the underlying buffer. -func (e *IntegerEncoder) Bytes() ([]byte, error) { - // Only run-length encode if it could reduce storage size. - if e.rle && len(e.values) > 2 { - return e.encodeRLE() - } - - for _, v := range e.values { - // Value is too large to encode using packed format - if v > simple8b.MaxValue { - return e.encodeUncompressed() - } - } - - return e.encodePacked() -} - -func (e *IntegerEncoder) encodeRLE() ([]byte, error) { - // Large varints can take up to 10 bytes. We're storing 3 + 1 - // type byte. - var b [31]byte - - // 4 high bits used for the encoding type - b[0] = byte(intCompressedRLE) << 4 - - i := 1 - // The first value - binary.BigEndian.PutUint64(b[i:], e.values[0]) - i += 8 - // The first delta - i += binary.PutUvarint(b[i:], e.values[1]) - // The number of times the delta is repeated - i += binary.PutUvarint(b[i:], uint64(len(e.values)-1)) - - return b[:i], nil -} - -func (e *IntegerEncoder) encodePacked() ([]byte, error) { - if len(e.values) == 0 { - return nil, nil - } - - // Encode all but the first value. Fist value is written unencoded - // using 8 bytes. - encoded, err := simple8b.EncodeAll(e.values[1:]) - if err != nil { - return nil, err - } - - b := make([]byte, 1+(len(encoded)+1)*8) - // 4 high bits of first byte store the encoding type for the block - b[0] = byte(intCompressedSimple) << 4 - - // Write the first value since it's not part of the encoded values - binary.BigEndian.PutUint64(b[1:9], e.values[0]) - - // Write the encoded values - for i, v := range encoded { - binary.BigEndian.PutUint64(b[9+i*8:9+i*8+8], v) - } - return b, nil -} - -func (e *IntegerEncoder) encodeUncompressed() ([]byte, error) { - if len(e.values) == 0 { - return nil, nil - } - - b := make([]byte, 1+len(e.values)*8) - // 4 high bits of first byte store the encoding type for the block - b[0] = byte(intUncompressed) << 4 - - for i, v := range e.values { - binary.BigEndian.PutUint64(b[1+i*8:1+i*8+8], v) - } - return b, nil -} - -// IntegerDecoder decodes a byte slice into int64s. -type IntegerDecoder struct { - // 240 is the maximum number of values that can be encoded into a single uint64 using simple8b - values [240]uint64 - bytes []byte - i int - n int - prev int64 - first bool - - // The first value for a run-length encoded byte slice - rleFirst uint64 - - // The delta value for a run-length encoded byte slice - rleDelta uint64 - encoding byte - err error -} - -// SetBytes sets the underlying byte slice of the decoder. -func (d *IntegerDecoder) SetBytes(b []byte) { - if len(b) > 0 { - d.encoding = b[0] >> 4 - d.bytes = b[1:] - } else { - d.encoding = 0 - d.bytes = nil - } - - d.i = 0 - d.n = 0 - d.prev = 0 - d.first = true - - d.rleFirst = 0 - d.rleDelta = 0 - d.err = nil -} - -// Next returns true if there are any values remaining to be decoded. -func (d *IntegerDecoder) Next() bool { - if d.i >= d.n && len(d.bytes) == 0 { - return false - } - - d.i++ - - if d.i >= d.n { - switch d.encoding { - case intUncompressed: - d.decodeUncompressed() - case intCompressedSimple: - d.decodePacked() - case intCompressedRLE: - d.decodeRLE() - default: - d.err = fmt.Errorf("unknown encoding %v", d.encoding) - } - } - return d.err == nil && d.i < d.n -} - -// Error returns the last error encountered by the decoder. -func (d *IntegerDecoder) Error() error { - return d.err -} - -// Read returns the next value from the decoder. -func (d *IntegerDecoder) Read() int64 { - switch d.encoding { - case intCompressedRLE: - return ZigZagDecode(d.rleFirst) + int64(d.i)*ZigZagDecode(d.rleDelta) - default: - v := ZigZagDecode(d.values[d.i]) - // v is the delta encoded value, we need to add the prior value to get the original - v = v + d.prev - d.prev = v - return v - } -} - -func (d *IntegerDecoder) decodeRLE() { - if len(d.bytes) == 0 { - return - } - - if len(d.bytes) < 8 { - d.err = fmt.Errorf("IntegerDecoder: not enough data to decode RLE starting value") - return - } - - var i, n int - - // Next 8 bytes is the starting value - first := binary.BigEndian.Uint64(d.bytes[i : i+8]) - i += 8 - - // Next 1-10 bytes is the delta value - value, n := binary.Uvarint(d.bytes[i:]) - if n <= 0 { - d.err = fmt.Errorf("IntegerDecoder: invalid RLE delta value") - return - } - i += n - - // Last 1-10 bytes is how many times the value repeats - count, n := binary.Uvarint(d.bytes[i:]) - if n <= 0 { - d.err = fmt.Errorf("IntegerDecoder: invalid RLE repeat value") - return - } - - // Store the first value and delta value so we do not need to allocate - // a large values slice. We can compute the value at position d.i on - // demand. - d.rleFirst = first - d.rleDelta = value - d.n = int(count) + 1 - d.i = 0 - - // We've process all the bytes - d.bytes = nil -} - -func (d *IntegerDecoder) decodePacked() { - if len(d.bytes) == 0 { - return - } - - if len(d.bytes) < 8 { - d.err = fmt.Errorf("IntegerDecoder: not enough data to decode packed value") - return - } - - v := binary.BigEndian.Uint64(d.bytes[0:8]) - // The first value is always unencoded - if d.first { - d.first = false - d.n = 1 - d.values[0] = v - } else { - n, err := simple8b.Decode(&d.values, v) - if err != nil { - // Should never happen, only error that could be returned is if the value to be decoded was not - // actually encoded by simple8b encoder. - d.err = fmt.Errorf("failed to decode value %v: %v", v, err) - } - - d.n = n - } - d.i = 0 - d.bytes = d.bytes[8:] -} - -func (d *IntegerDecoder) decodeUncompressed() { - if len(d.bytes) == 0 { - return - } - - if len(d.bytes) < 8 { - d.err = fmt.Errorf("IntegerDecoder: not enough data to decode uncompressed value") - return - } - - d.values[0] = binary.BigEndian.Uint64(d.bytes[0:8]) - d.i = 0 - d.n = 1 - d.bytes = d.bytes[8:] -} diff --git a/tsdb/engine/tsm1/int_test.go b/tsdb/engine/tsm1/int_test.go deleted file mode 100644 index 0768f270a0a..00000000000 --- a/tsdb/engine/tsm1/int_test.go +++ /dev/null @@ -1,728 +0,0 @@ -package tsm1 - -import ( - "fmt" - "math" - "math/rand" - "reflect" - "testing" - "testing/quick" -) - -func Test_IntegerEncoder_NoValues(t *testing.T) { - enc := NewIntegerEncoder(0) - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if len(b) > 0 { - t.Fatalf("unexpected length: exp 0, got %v", len(b)) - } - - var dec IntegerDecoder - dec.SetBytes(b) - if dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } -} - -func Test_IntegerEncoder_One(t *testing.T) { - enc := NewIntegerEncoder(1) - v1 := int64(1) - - enc.Write(1) - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; intCompressedSimple != got { - t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got) - } - - var dec IntegerDecoder - dec.SetBytes(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v1 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v1) - } -} - -func Test_IntegerEncoder_Two(t *testing.T) { - enc := NewIntegerEncoder(2) - var v1, v2 int64 = 1, 2 - - enc.Write(v1) - enc.Write(v2) - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; intCompressedSimple != got { - t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got) - } - - var dec IntegerDecoder - dec.SetBytes(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v1 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v1) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v2 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v2) - } -} - -func Test_IntegerEncoder_Negative(t *testing.T) { - enc := NewIntegerEncoder(3) - var v1, v2, v3 int64 = -2, 0, 1 - - enc.Write(v1) - enc.Write(v2) - enc.Write(v3) - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; intCompressedSimple != got { - t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got) - } - - var dec IntegerDecoder - dec.SetBytes(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v1 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v1) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v2 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v2) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v3 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v3) - } -} - -func Test_IntegerEncoder_Large_Range(t *testing.T) { - enc := NewIntegerEncoder(2) - var v1, v2 int64 = math.MinInt64, math.MaxInt64 - enc.Write(v1) - enc.Write(v2) - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; intUncompressed != got { - t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got) - } - - var dec IntegerDecoder - dec.SetBytes(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v1 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v1) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v2 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v2) - } -} - -func Test_IntegerEncoder_Uncompressed(t *testing.T) { - enc := NewIntegerEncoder(3) - var v1, v2, v3 int64 = 0, 1, 1 << 60 - - enc.Write(v1) - enc.Write(v2) - enc.Write(v3) - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("expected error: %v", err) - } - - // 1 byte header + 3 * 8 byte values - if exp := 25; len(b) != exp { - t.Fatalf("length mismatch: got %v, exp %v", len(b), exp) - } - - if got := b[0] >> 4; intUncompressed != got { - t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got) - } - - var dec IntegerDecoder - dec.SetBytes(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v1 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v1) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v2 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v2) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if v3 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), v3) - } -} - -func Test_IntegerEncoder_NegativeUncompressed(t *testing.T) { - values := []int64{ - -2352281900722994752, 1438442655375607923, -4110452567888190110, - -1221292455668011702, -1941700286034261841, -2836753127140407751, - 1432686216250034552, 3663244026151507025, -3068113732684750258, - -1949953187327444488, 3713374280993588804, 3226153669854871355, - -2093273755080502606, 1006087192578600616, -2272122301622271655, - 2533238229511593671, -4450454445568858273, 2647789901083530435, - 2761419461769776844, -1324397441074946198, -680758138988210958, - 94468846694902125, -2394093124890745254, -2682139311758778198, - } - enc := NewIntegerEncoder(256) - for _, v := range values { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("expected error: %v", err) - } - - if got := b[0] >> 4; intUncompressed != got { - t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got) - } - - var dec IntegerDecoder - dec.SetBytes(b) - - i := 0 - for dec.Next() { - if i > len(values) { - t.Fatalf("read too many values: got %v, exp %v", i, len(values)) - } - - if values[i] != dec.Read() { - t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), values[i]) - } - i += 1 - } - - if i != len(values) { - t.Fatalf("failed to read enough values: got %v, exp %v", i, len(values)) - } -} - -func Test_IntegerEncoder_AllNegative(t *testing.T) { - enc := NewIntegerEncoder(3) - values := []int64{ - -10, -5, -1, - } - - for _, v := range values { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; intCompressedSimple != got { - t.Fatalf("encoding type mismatch: exp uncompressed, got %v", got) - } - - var dec IntegerDecoder - dec.SetBytes(b) - i := 0 - for dec.Next() { - if i > len(values) { - t.Fatalf("read too many values: got %v, exp %v", i, len(values)) - } - - if values[i] != dec.Read() { - t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), values[i]) - } - i += 1 - } - - if i != len(values) { - t.Fatalf("failed to read enough values: got %v, exp %v", i, len(values)) - } -} - -func Test_IntegerEncoder_CounterPacked(t *testing.T) { - enc := NewIntegerEncoder(16) - values := []int64{ - 1e15, 1e15 + 1, 1e15 + 2, 1e15 + 3, 1e15 + 4, 1e15 + 6, - } - - for _, v := range values { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if b[0]>>4 != intCompressedSimple { - t.Fatalf("unexpected encoding format: expected simple, got %v", b[0]>>4) - } - - // Should use 1 header byte + 2, 8 byte words if delta-encoding is used based on - // values sizes. Without delta-encoding, we'd get 49 bytes. - if exp := 17; len(b) != exp { - t.Fatalf("encoded length mismatch: got %v, exp %v", len(b), exp) - } - - var dec IntegerDecoder - dec.SetBytes(b) - i := 0 - for dec.Next() { - if i > len(values) { - t.Fatalf("read too many values: got %v, exp %v", i, len(values)) - } - - if values[i] != dec.Read() { - t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), values[i]) - } - i += 1 - } - - if i != len(values) { - t.Fatalf("failed to read enough values: got %v, exp %v", i, len(values)) - } -} - -func Test_IntegerEncoder_CounterRLE(t *testing.T) { - enc := NewIntegerEncoder(16) - values := []int64{ - 1e15, 1e15 + 1, 1e15 + 2, 1e15 + 3, 1e15 + 4, 1e15 + 5, - } - - for _, v := range values { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if b[0]>>4 != intCompressedRLE { - t.Fatalf("unexpected encoding format: expected RLE, got %v", b[0]>>4) - } - - // Should use 1 header byte, 8 byte first value, 1 var-byte for delta and 1 var-byte for - // count of deltas in this particular RLE. - if exp := 11; len(b) != exp { - t.Fatalf("encoded length mismatch: got %v, exp %v", len(b), exp) - } - - var dec IntegerDecoder - dec.SetBytes(b) - i := 0 - for dec.Next() { - if i > len(values) { - t.Fatalf("read too many values: got %v, exp %v", i, len(values)) - } - - if values[i] != dec.Read() { - t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), values[i]) - } - i += 1 - } - - if i != len(values) { - t.Fatalf("failed to read enough values: got %v, exp %v", i, len(values)) - } -} - -func Test_IntegerEncoder_Descending(t *testing.T) { - enc := NewIntegerEncoder(16) - values := []int64{ - 7094, 4472, 1850, - } - - for _, v := range values { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if b[0]>>4 != intCompressedRLE { - t.Fatalf("unexpected encoding format: expected simple, got %v", b[0]>>4) - } - - // Should use 1 header byte, 8 byte first value, 1 var-byte for delta and 1 var-byte for - // count of deltas in this particular RLE. - if exp := 12; len(b) != exp { - t.Fatalf("encoded length mismatch: got %v, exp %v", len(b), exp) - } - - var dec IntegerDecoder - dec.SetBytes(b) - i := 0 - for dec.Next() { - if i > len(values) { - t.Fatalf("read too many values: got %v, exp %v", i, len(values)) - } - - if values[i] != dec.Read() { - t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), values[i]) - } - i += 1 - } - - if i != len(values) { - t.Fatalf("failed to read enough values: got %v, exp %v", i, len(values)) - } -} - -func Test_IntegerEncoder_Flat(t *testing.T) { - enc := NewIntegerEncoder(16) - values := []int64{ - 1, 1, 1, 1, - } - - for _, v := range values { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if b[0]>>4 != intCompressedRLE { - t.Fatalf("unexpected encoding format: expected simple, got %v", b[0]>>4) - } - - // Should use 1 header byte, 8 byte first value, 1 var-byte for delta and 1 var-byte for - // count of deltas in this particular RLE. - if exp := 11; len(b) != exp { - t.Fatalf("encoded length mismatch: got %v, exp %v", len(b), exp) - } - - var dec IntegerDecoder - dec.SetBytes(b) - i := 0 - for dec.Next() { - if i > len(values) { - t.Fatalf("read too many values: got %v, exp %v", i, len(values)) - } - - if values[i] != dec.Read() { - t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), values[i]) - } - i += 1 - } - - if i != len(values) { - t.Fatalf("failed to read enough values: got %v, exp %v", i, len(values)) - } -} - -func Test_IntegerEncoder_MinMax(t *testing.T) { - enc := NewIntegerEncoder(2) - values := []int64{ - math.MinInt64, math.MaxInt64, - } - - for _, v := range values { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if b[0]>>4 != intUncompressed { - t.Fatalf("unexpected encoding format: expected simple, got %v", b[0]>>4) - } - - if exp := 17; len(b) != exp { - t.Fatalf("encoded length mismatch: got %v, exp %v", len(b), exp) - } - - var dec IntegerDecoder - dec.SetBytes(b) - i := 0 - for dec.Next() { - if i > len(values) { - t.Fatalf("read too many values: got %v, exp %v", i, len(values)) - } - - if values[i] != dec.Read() { - t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), values[i]) - } - i += 1 - } - - if i != len(values) { - t.Fatalf("failed to read enough values: got %v, exp %v", i, len(values)) - } -} - -func Test_IntegerEncoder_Quick(t *testing.T) { - quick.Check(func(values []int64) bool { - if values == nil { - values = []int64{} // is this really expected? - } - - // Write values to encoder. - enc := NewIntegerEncoder(1024) - for _, v := range values { - enc.Write(v) - } - - // Retrieve encoded bytes from encoder. - buf, err := enc.Bytes() - if err != nil { - t.Fatal(err) - } - - // Read values out of decoder. - got := make([]int64, 0, len(values)) - var dec IntegerDecoder - dec.SetBytes(buf) - for dec.Next() { - if err := dec.Error(); err != nil { - t.Fatal(err) - } - got = append(got, dec.Read()) - } - - // Verify that input and output values match. - if !reflect.DeepEqual(values, got) { - t.Fatalf("mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", values, got) - } - - return true - }, nil) -} - -func Test_IntegerDecoder_Corrupt(t *testing.T) { - cases := []string{ - "", // Empty - "\x00abc", // Uncompressed: less than 8 bytes - "\x10abc", // Packed: less than 8 bytes - "\x20abc", // RLE: less than 8 bytes - "\x2012345678\x90", // RLE: valid starting value but invalid delta value - "\x2012345678\x01\x90", // RLE: valid starting, valid delta value, invalid repeat value - } - - for _, c := range cases { - var dec IntegerDecoder - dec.SetBytes([]byte(c)) - if dec.Next() { - t.Fatalf("exp next == false, got true") - } - } -} - -func BenchmarkIntegerEncoderRLE(b *testing.B) { - enc := NewIntegerEncoder(1024) - x := make([]int64, 1024) - for i := 0; i < len(x); i++ { - x[i] = int64(i) - enc.Write(x[i]) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - enc.Bytes() - } -} - -func BenchmarkIntegerEncoderPackedSimple(b *testing.B) { - enc := NewIntegerEncoder(1024) - x := make([]int64, 1024) - for i := 0; i < len(x); i++ { - // Small amount of randomness prevents RLE from being used - x[i] = int64(i) + int64(rand.Intn(10)) - enc.Write(x[i]) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - enc.Bytes() - enc.Reset() - for i := 0; i < len(x); i++ { - enc.Write(x[i]) - } - } -} - -func BenchmarkIntegerBatch_DecodeAllUncompressed(b *testing.B) { - benchmarks := []struct { - n int - }{ - {5}, - {55}, - {555}, - {1000}, - } - - values := []int64{ - -2352281900722994752, 1438442655375607923, -4110452567888190110, - -1221292455668011702, -1941700286034261841, -2836753127140407751, - 1432686216250034552, 3663244026151507025, -3068113732684750258, - -1949953187327444488, 3713374280993588804, 3226153669854871355, - -2093273755080502606, 1006087192578600616, -2272122301622271655, - 2533238229511593671, -4450454445568858273, 2647789901083530435, - 2761419461769776844, -1324397441074946198, -680758138988210958, - 94468846694902125, -2394093124890745254, -2682139311758778198, - } - - for _, bm := range benchmarks { - seededRand := rand.New(rand.NewSource(int64(bm.n * 1e3))) - - enc := NewIntegerEncoder(bm.n) - for i := 0; i < bm.n; i++ { - enc.Write(values[seededRand.Int()%len(values)]) - } - bytes, _ := enc.Bytes() - - b.Run(fmt.Sprintf("%d", bm.n), func(b *testing.B) { - b.SetBytes(int64(len(bytes))) - b.ReportAllocs() - - dst := make([]int64, bm.n) - for i := 0; i < b.N; i++ { - var dec IntegerDecoder - dec.SetBytes(bytes) - var n int - for dec.Next() { - dst[n] = dec.Read() - n++ - } - } - }) - } -} - -func BenchmarkIntegerBatch_DecodeAllPackedSimple(b *testing.B) { - benchmarks := []struct { - n int - }{ - {5}, - {55}, - {555}, - {1000}, - } - for _, bm := range benchmarks { - seededRand := rand.New(rand.NewSource(int64(bm.n * 1e3))) - - enc := NewIntegerEncoder(bm.n) - for i := 0; i < bm.n; i++ { - // Small amount of randomness prevents RLE from being used - enc.Write(int64(i) + int64(seededRand.Intn(10))) - } - bytes, _ := enc.Bytes() - - b.Run(fmt.Sprintf("%d", bm.n), func(b *testing.B) { - b.SetBytes(int64(len(bytes))) - b.ReportAllocs() - - dst := make([]int64, bm.n) - for i := 0; i < b.N; i++ { - var dec IntegerDecoder - dec.SetBytes(bytes) - var n int - for dec.Next() { - dst[n] = dec.Read() - n++ - } - } - }) - } -} - -func BenchmarkIntegerBatch_DecodeAllRLE(b *testing.B) { - benchmarks := []struct { - n int - delta int64 - }{ - {5, 1}, - {55, 1}, - {555, 1}, - {1000, 1}, - {1000, 0}, - } - for _, bm := range benchmarks { - enc := NewIntegerEncoder(bm.n) - acc := int64(0) - for i := 0; i < bm.n; i++ { - enc.Write(acc) - acc += bm.delta - } - bytes, _ := enc.Bytes() - - b.Run(fmt.Sprintf("%d_delta_%d", bm.n, bm.delta), func(b *testing.B) { - b.SetBytes(int64(len(bytes))) - b.ReportAllocs() - - dst := make([]int64, bm.n) - for i := 0; i < b.N; i++ { - var dec IntegerDecoder - dec.SetBytes(bytes) - var n int - for dec.Next() { - dst[n] = dec.Read() - n++ - } - } - }) - } -} diff --git a/tsdb/engine/tsm1/iterator.gen.go b/tsdb/engine/tsm1/iterator.gen.go deleted file mode 100644 index ac04fb3d5c2..00000000000 --- a/tsdb/engine/tsm1/iterator.gen.go +++ /dev/null @@ -1,2531 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: iterator.gen.go.tmpl - -package tsm1 - -import ( - "fmt" - "runtime" - "sort" - "sync" - - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/pkg/metrics" - "github.com/influxdata/influxdb/v2/pkg/tracing" - "github.com/influxdata/influxdb/v2/pkg/tracing/fields" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxql" - "go.uber.org/zap" -) - -type cursor interface { - close() error - next() (t int64, v interface{}) -} - -// cursorAt provides a bufferred cursor interface. -// This required for literal value cursors which don't have a time value. -type cursorAt interface { - close() error - peek() (k int64, v interface{}) - nextAt(seek int64) interface{} -} - -type nilCursor struct{} - -func (nilCursor) next() (int64, interface{}) { return tsdb.EOF, nil } -func (nilCursor) close() error { return nil } - -// bufCursor implements a bufferred cursor. -type bufCursor struct { - cur cursor - buf struct { - key int64 - value interface{} - filled bool - } - ascending bool -} - -// newBufCursor returns a bufferred wrapper for cur. -func newBufCursor(cur cursor, ascending bool) *bufCursor { - return &bufCursor{cur: cur, ascending: ascending} -} - -func (c *bufCursor) close() error { - if c.cur == nil { - return nil - } - - err := c.cur.close() - c.cur = nil - return err -} - -// next returns the buffer, if filled. Otherwise returns the next key/value from the cursor. -func (c *bufCursor) next() (int64, interface{}) { - if c.buf.filled { - k, v := c.buf.key, c.buf.value - c.buf.filled = false - return k, v - } - return c.cur.next() -} - -// unread pushes k and v onto the buffer. -func (c *bufCursor) unread(k int64, v interface{}) { - c.buf.key, c.buf.value = k, v - c.buf.filled = true -} - -// peek reads next next key/value without removing them from the cursor. -func (c *bufCursor) peek() (k int64, v interface{}) { - k, v = c.next() - c.unread(k, v) - return -} - -// nextAt returns the next value where key is equal to seek. -// Skips over any keys that are less than seek. -// If the key doesn't exist then a nil value is returned instead. -func (c *bufCursor) nextAt(seek int64) interface{} { - for { - k, v := c.next() - if k != tsdb.EOF { - if k == seek { - return v - } else if c.ascending && k < seek { - continue - } else if !c.ascending && k > seek { - continue - } - c.unread(k, v) - } - - // Return "nil" value for type. - switch c.cur.(type) { - case floatCursor: - return (*float64)(nil) - case integerCursor: - return (*int64)(nil) - case unsignedCursor: - return (*uint64)(nil) - case stringCursor: - return (*string)(nil) - case booleanCursor: - return (*bool)(nil) - default: - panic("unreachable") - } - } -} - -// statsBufferCopyIntervalN is the number of points that are read before -// copying the stats buffer to the iterator's stats field. This is used to -// amortize the cost of using a mutex when updating stats. -const statsBufferCopyIntervalN = 100 - -type floatFinalizerIterator struct { - query.FloatIterator - logger *zap.Logger -} - -func newFloatFinalizerIterator(inner query.FloatIterator, logger *zap.Logger) *floatFinalizerIterator { - itr := &floatFinalizerIterator{FloatIterator: inner, logger: logger} - runtime.SetFinalizer(itr, (*floatFinalizerIterator).closeGC) - return itr -} - -func (itr *floatFinalizerIterator) closeGC() { - go func() { - itr.logger.Error("FloatIterator finalized by GC") - itr.Close() - }() -} - -func (itr *floatFinalizerIterator) Close() error { - runtime.SetFinalizer(itr, nil) - return itr.FloatIterator.Close() -} - -type floatInstrumentedIterator struct { - query.FloatIterator - span *tracing.Span - group *metrics.Group -} - -func newFloatInstrumentedIterator(inner query.FloatIterator, span *tracing.Span, group *metrics.Group) *floatInstrumentedIterator { - return &floatInstrumentedIterator{FloatIterator: inner, span: span, group: group} -} - -func (itr *floatInstrumentedIterator) Close() error { - var f fields.Fields - itr.group.ForEach(func(v metrics.Metric) { - switch m := v.(type) { - case *metrics.Counter: - f = append(f, fields.Int64(m.Name(), m.Value())) - - case *metrics.Timer: - f = append(f, fields.Duration(m.Name(), m.Value())) - - default: - panic("unexpected metrics") - } - }) - itr.span.SetFields(f) - itr.span.Finish() - - return itr.FloatIterator.Close() -} - -type floatIterator struct { - cur floatCursor - aux []cursorAt - conds struct { - names []string - curs []cursorAt - } - opt query.IteratorOptions - - m map[string]interface{} // map used for condition evaluation - point query.FloatPoint // reusable buffer - - statsLock sync.Mutex - stats query.IteratorStats - statsBuf query.IteratorStats - valuer influxql.ValuerEval -} - -func newFloatIterator(name string, tags query.Tags, opt query.IteratorOptions, cur floatCursor, aux []cursorAt, conds []cursorAt, condNames []string) *floatIterator { - itr := &floatIterator{ - cur: cur, - aux: aux, - opt: opt, - point: query.FloatPoint{ - Name: name, - Tags: tags, - }, - statsBuf: query.IteratorStats{ - SeriesN: 1, - }, - } - itr.stats = itr.statsBuf - - if len(aux) > 0 { - itr.point.Aux = make([]interface{}, len(aux)) - } - - if opt.Condition != nil { - itr.m = make(map[string]interface{}, len(aux)+len(conds)) - } - itr.conds.names = condNames - itr.conds.curs = conds - - itr.valuer = influxql.ValuerEval{ - Valuer: influxql.MultiValuer( - query.MathValuer{}, - influxql.MapValuer(itr.m), - ), - } - - return itr -} - -// Next returns the next point from the iterator. -func (itr *floatIterator) Next() (*query.FloatPoint, error) { - for { - seek := tsdb.EOF - - if itr.cur != nil { - // Read from the main cursor if we have one. - itr.point.Time, itr.point.Value = itr.cur.nextFloat() - seek = itr.point.Time - } else { - // Otherwise find lowest aux timestamp. - for i := range itr.aux { - if k, _ := itr.aux[i].peek(); k != tsdb.EOF { - if seek == tsdb.EOF || (itr.opt.Ascending && k < seek) || (!itr.opt.Ascending && k > seek) { - seek = k - } - } - } - itr.point.Time = seek - } - - // Exit if we have no more points or we are outside our time range. - if itr.point.Time == tsdb.EOF { - itr.copyStats() - return nil, nil - } else if itr.opt.Ascending && itr.point.Time > itr.opt.EndTime { - itr.copyStats() - return nil, nil - } else if !itr.opt.Ascending && itr.point.Time < itr.opt.StartTime { - itr.copyStats() - return nil, nil - } - - // Read from each auxiliary cursor. - for i := range itr.opt.Aux { - itr.point.Aux[i] = itr.aux[i].nextAt(seek) - } - - // Read from condition field cursors. - for i := range itr.conds.curs { - itr.m[itr.conds.names[i]] = itr.conds.curs[i].nextAt(seek) - } - - // Evaluate condition, if one exists. Retry if it fails. - if itr.opt.Condition != nil && !itr.valuer.EvalBool(itr.opt.Condition) { - continue - } - - // Track points returned. - itr.statsBuf.PointN++ - - // Copy buffer to stats periodically. - if itr.statsBuf.PointN%statsBufferCopyIntervalN == 0 { - itr.copyStats() - } - - return &itr.point, nil - } -} - -// copyStats copies from the itr stats buffer to the stats under lock. -func (itr *floatIterator) copyStats() { - itr.statsLock.Lock() - itr.stats = itr.statsBuf - itr.statsLock.Unlock() -} - -// Stats returns stats on the points processed. -func (itr *floatIterator) Stats() query.IteratorStats { - itr.statsLock.Lock() - stats := itr.stats - itr.statsLock.Unlock() - return stats -} - -// Close closes the iterator. -func (itr *floatIterator) Close() error { - cursorsAt(itr.aux).close() - itr.aux = nil - cursorsAt(itr.conds.curs).close() - itr.conds.curs = nil - if itr.cur != nil { - err := itr.cur.close() - itr.cur = nil - return err - } - return nil -} - -// floatLimitIterator -type floatLimitIterator struct { - input query.FloatIterator - opt query.IteratorOptions - n int -} - -func newFloatLimitIterator(input query.FloatIterator, opt query.IteratorOptions) *floatLimitIterator { - return &floatLimitIterator{ - input: input, - opt: opt, - } -} - -func (itr *floatLimitIterator) Stats() query.IteratorStats { return itr.input.Stats() } -func (itr *floatLimitIterator) Close() error { return itr.input.Close() } - -func (itr *floatLimitIterator) Next() (*query.FloatPoint, error) { - // Check if we are beyond the limit. - if (itr.n - itr.opt.Offset) > itr.opt.Limit { - return nil, nil - } - - // Read the next point. - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Increment counter. - itr.n++ - - // Offsets are handled by a higher level iterator so return all points. - return p, nil -} - -// floatCursor represents an object for iterating over a single float field. -type floatCursor interface { - cursor - nextFloat() (t int64, v float64) -} - -func newFloatCursor(seek int64, ascending bool, cacheValues Values, tsmKeyCursor *KeyCursor) floatCursor { - if ascending { - return newFloatAscendingCursor(seek, cacheValues, tsmKeyCursor) - } - return newFloatDescendingCursor(seek, cacheValues, tsmKeyCursor) -} - -type floatAscendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - values []FloatValue - pos int - keyCursor *KeyCursor - } -} - -func newFloatAscendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *floatAscendingCursor { - c := &floatAscendingCursor{} - - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() >= seek - }) - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadFloatBlock(&c.tsm.values) - c.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool { - return c.tsm.values[i].UnixNano() >= seek - }) - - return c -} - -// peekCache returns the current time/value from the cache. -func (c *floatAscendingCursor) peekCache() (t int64, v float64) { - if c.cache.pos >= len(c.cache.values) { - return tsdb.EOF, 0 - } - - item := c.cache.values[c.cache.pos] - return item.UnixNano(), item.(FloatValue).value -} - -// peekTSM returns the current time/value from tsm. -func (c *floatAscendingCursor) peekTSM() (t int64, v float64) { - if c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) { - return tsdb.EOF, 0 - } - - item := c.tsm.values[c.tsm.pos] - return item.UnixNano(), item.value -} - -// close closes the cursor and any dependent cursors. -func (c *floatAscendingCursor) close() error { - if c.tsm.keyCursor == nil { - return nil - } - - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - c.cache.values = nil - c.tsm.values = nil - return nil -} - -// next returns the next key/value for the cursor. -func (c *floatAscendingCursor) next() (int64, interface{}) { return c.nextFloat() } - -// nextFloat returns the next key/value for the cursor. -func (c *floatAscendingCursor) nextFloat() (int64, float64) { - ckey, cvalue := c.peekCache() - tkey, tvalue := c.peekTSM() - - // No more data in cache or in TSM files. - if ckey == tsdb.EOF && tkey == tsdb.EOF { - return tsdb.EOF, 0 - } - - // Both cache and tsm files have the same key, cache takes precedence. - if ckey == tkey { - c.nextCache() - c.nextTSM() - return ckey, cvalue - } - - // Buffered cache key precedes that in TSM file. - if ckey != tsdb.EOF && (ckey < tkey || tkey == tsdb.EOF) { - c.nextCache() - return ckey, cvalue - } - - // Buffered TSM key precedes that in cache. - c.nextTSM() - return tkey, tvalue -} - -// nextCache returns the next value from the cache. -func (c *floatAscendingCursor) nextCache() { - if c.cache.pos >= len(c.cache.values) { - return - } - c.cache.pos++ -} - -// nextTSM returns the next value from the TSM files. -func (c *floatAscendingCursor) nextTSM() { - c.tsm.pos++ - if c.tsm.pos >= len(c.tsm.values) { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadFloatBlock(&c.tsm.values) - if len(c.tsm.values) == 0 { - return - } - c.tsm.pos = 0 - } -} - -type floatDescendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - values []FloatValue - pos int - keyCursor *KeyCursor - } -} - -func newFloatDescendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *floatDescendingCursor { - c := &floatDescendingCursor{} - - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() >= seek - }) - if t, _ := c.peekCache(); t != seek { - c.cache.pos-- - } - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadFloatBlock(&c.tsm.values) - c.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool { - return c.tsm.values[i].UnixNano() >= seek - }) - if t, _ := c.peekTSM(); t != seek { - c.tsm.pos-- - } - - return c -} - -// peekCache returns the current time/value from the cache. -func (c *floatDescendingCursor) peekCache() (t int64, v float64) { - if c.cache.pos < 0 || c.cache.pos >= len(c.cache.values) { - return tsdb.EOF, 0 - } - - item := c.cache.values[c.cache.pos] - return item.UnixNano(), item.(FloatValue).value -} - -// peekTSM returns the current time/value from tsm. -func (c *floatDescendingCursor) peekTSM() (t int64, v float64) { - if c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) { - return tsdb.EOF, 0 - } - - item := c.tsm.values[c.tsm.pos] - return item.UnixNano(), item.value -} - -// close closes the cursor and any dependent cursors. -func (c *floatDescendingCursor) close() error { - if c.tsm.keyCursor == nil { - return nil - } - - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - c.cache.values = nil - c.tsm.values = nil - return nil -} - -// next returns the next key/value for the cursor. -func (c *floatDescendingCursor) next() (int64, interface{}) { return c.nextFloat() } - -// nextFloat returns the next key/value for the cursor. -func (c *floatDescendingCursor) nextFloat() (int64, float64) { - ckey, cvalue := c.peekCache() - tkey, tvalue := c.peekTSM() - - // No more data in cache or in TSM files. - if ckey == tsdb.EOF && tkey == tsdb.EOF { - return tsdb.EOF, 0 - } - - // Both cache and tsm files have the same key, cache takes precedence. - if ckey == tkey { - c.nextCache() - c.nextTSM() - return ckey, cvalue - } - - // Buffered cache key precedes that in TSM file. - if ckey != tsdb.EOF && (ckey > tkey || tkey == tsdb.EOF) { - c.nextCache() - return ckey, cvalue - } - - // Buffered TSM key precedes that in cache. - c.nextTSM() - return tkey, tvalue -} - -// nextCache returns the next value from the cache. -func (c *floatDescendingCursor) nextCache() { - if c.cache.pos < 0 { - return - } - c.cache.pos-- -} - -// nextTSM returns the next value from the TSM files. -func (c *floatDescendingCursor) nextTSM() { - c.tsm.pos-- - if c.tsm.pos < 0 { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadFloatBlock(&c.tsm.values) - if len(c.tsm.values) == 0 { - return - } - c.tsm.pos = len(c.tsm.values) - 1 - } -} - -type integerFinalizerIterator struct { - query.IntegerIterator - logger *zap.Logger -} - -func newIntegerFinalizerIterator(inner query.IntegerIterator, logger *zap.Logger) *integerFinalizerIterator { - itr := &integerFinalizerIterator{IntegerIterator: inner, logger: logger} - runtime.SetFinalizer(itr, (*integerFinalizerIterator).closeGC) - return itr -} - -func (itr *integerFinalizerIterator) closeGC() { - go func() { - itr.logger.Error("IntegerIterator finalized by GC") - itr.Close() - }() -} - -func (itr *integerFinalizerIterator) Close() error { - runtime.SetFinalizer(itr, nil) - return itr.IntegerIterator.Close() -} - -type integerInstrumentedIterator struct { - query.IntegerIterator - span *tracing.Span - group *metrics.Group -} - -func newIntegerInstrumentedIterator(inner query.IntegerIterator, span *tracing.Span, group *metrics.Group) *integerInstrumentedIterator { - return &integerInstrumentedIterator{IntegerIterator: inner, span: span, group: group} -} - -func (itr *integerInstrumentedIterator) Close() error { - var f fields.Fields - itr.group.ForEach(func(v metrics.Metric) { - switch m := v.(type) { - case *metrics.Counter: - f = append(f, fields.Int64(m.Name(), m.Value())) - - case *metrics.Timer: - f = append(f, fields.Duration(m.Name(), m.Value())) - - default: - panic("unexpected metrics") - } - }) - itr.span.SetFields(f) - itr.span.Finish() - - return itr.IntegerIterator.Close() -} - -type integerIterator struct { - cur integerCursor - aux []cursorAt - conds struct { - names []string - curs []cursorAt - } - opt query.IteratorOptions - - m map[string]interface{} // map used for condition evaluation - point query.IntegerPoint // reusable buffer - - statsLock sync.Mutex - stats query.IteratorStats - statsBuf query.IteratorStats - valuer influxql.ValuerEval -} - -func newIntegerIterator(name string, tags query.Tags, opt query.IteratorOptions, cur integerCursor, aux []cursorAt, conds []cursorAt, condNames []string) *integerIterator { - itr := &integerIterator{ - cur: cur, - aux: aux, - opt: opt, - point: query.IntegerPoint{ - Name: name, - Tags: tags, - }, - statsBuf: query.IteratorStats{ - SeriesN: 1, - }, - } - itr.stats = itr.statsBuf - - if len(aux) > 0 { - itr.point.Aux = make([]interface{}, len(aux)) - } - - if opt.Condition != nil { - itr.m = make(map[string]interface{}, len(aux)+len(conds)) - } - itr.conds.names = condNames - itr.conds.curs = conds - - itr.valuer = influxql.ValuerEval{ - Valuer: influxql.MultiValuer( - query.MathValuer{}, - influxql.MapValuer(itr.m), - ), - } - - return itr -} - -// Next returns the next point from the iterator. -func (itr *integerIterator) Next() (*query.IntegerPoint, error) { - for { - seek := tsdb.EOF - - if itr.cur != nil { - // Read from the main cursor if we have one. - itr.point.Time, itr.point.Value = itr.cur.nextInteger() - seek = itr.point.Time - } else { - // Otherwise find lowest aux timestamp. - for i := range itr.aux { - if k, _ := itr.aux[i].peek(); k != tsdb.EOF { - if seek == tsdb.EOF || (itr.opt.Ascending && k < seek) || (!itr.opt.Ascending && k > seek) { - seek = k - } - } - } - itr.point.Time = seek - } - - // Exit if we have no more points or we are outside our time range. - if itr.point.Time == tsdb.EOF { - itr.copyStats() - return nil, nil - } else if itr.opt.Ascending && itr.point.Time > itr.opt.EndTime { - itr.copyStats() - return nil, nil - } else if !itr.opt.Ascending && itr.point.Time < itr.opt.StartTime { - itr.copyStats() - return nil, nil - } - - // Read from each auxiliary cursor. - for i := range itr.opt.Aux { - itr.point.Aux[i] = itr.aux[i].nextAt(seek) - } - - // Read from condition field cursors. - for i := range itr.conds.curs { - itr.m[itr.conds.names[i]] = itr.conds.curs[i].nextAt(seek) - } - - // Evaluate condition, if one exists. Retry if it fails. - if itr.opt.Condition != nil && !itr.valuer.EvalBool(itr.opt.Condition) { - continue - } - - // Track points returned. - itr.statsBuf.PointN++ - - // Copy buffer to stats periodically. - if itr.statsBuf.PointN%statsBufferCopyIntervalN == 0 { - itr.copyStats() - } - - return &itr.point, nil - } -} - -// copyStats copies from the itr stats buffer to the stats under lock. -func (itr *integerIterator) copyStats() { - itr.statsLock.Lock() - itr.stats = itr.statsBuf - itr.statsLock.Unlock() -} - -// Stats returns stats on the points processed. -func (itr *integerIterator) Stats() query.IteratorStats { - itr.statsLock.Lock() - stats := itr.stats - itr.statsLock.Unlock() - return stats -} - -// Close closes the iterator. -func (itr *integerIterator) Close() error { - cursorsAt(itr.aux).close() - itr.aux = nil - cursorsAt(itr.conds.curs).close() - itr.conds.curs = nil - if itr.cur != nil { - err := itr.cur.close() - itr.cur = nil - return err - } - return nil -} - -// integerLimitIterator -type integerLimitIterator struct { - input query.IntegerIterator - opt query.IteratorOptions - n int -} - -func newIntegerLimitIterator(input query.IntegerIterator, opt query.IteratorOptions) *integerLimitIterator { - return &integerLimitIterator{ - input: input, - opt: opt, - } -} - -func (itr *integerLimitIterator) Stats() query.IteratorStats { return itr.input.Stats() } -func (itr *integerLimitIterator) Close() error { return itr.input.Close() } - -func (itr *integerLimitIterator) Next() (*query.IntegerPoint, error) { - // Check if we are beyond the limit. - if (itr.n - itr.opt.Offset) > itr.opt.Limit { - return nil, nil - } - - // Read the next point. - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Increment counter. - itr.n++ - - // Offsets are handled by a higher level iterator so return all points. - return p, nil -} - -// integerCursor represents an object for iterating over a single integer field. -type integerCursor interface { - cursor - nextInteger() (t int64, v int64) -} - -func newIntegerCursor(seek int64, ascending bool, cacheValues Values, tsmKeyCursor *KeyCursor) integerCursor { - if ascending { - return newIntegerAscendingCursor(seek, cacheValues, tsmKeyCursor) - } - return newIntegerDescendingCursor(seek, cacheValues, tsmKeyCursor) -} - -type integerAscendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - values []IntegerValue - pos int - keyCursor *KeyCursor - } -} - -func newIntegerAscendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *integerAscendingCursor { - c := &integerAscendingCursor{} - - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() >= seek - }) - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadIntegerBlock(&c.tsm.values) - c.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool { - return c.tsm.values[i].UnixNano() >= seek - }) - - return c -} - -// peekCache returns the current time/value from the cache. -func (c *integerAscendingCursor) peekCache() (t int64, v int64) { - if c.cache.pos >= len(c.cache.values) { - return tsdb.EOF, 0 - } - - item := c.cache.values[c.cache.pos] - return item.UnixNano(), item.(IntegerValue).value -} - -// peekTSM returns the current time/value from tsm. -func (c *integerAscendingCursor) peekTSM() (t int64, v int64) { - if c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) { - return tsdb.EOF, 0 - } - - item := c.tsm.values[c.tsm.pos] - return item.UnixNano(), item.value -} - -// close closes the cursor and any dependent cursors. -func (c *integerAscendingCursor) close() error { - if c.tsm.keyCursor == nil { - return nil - } - - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - c.cache.values = nil - c.tsm.values = nil - return nil -} - -// next returns the next key/value for the cursor. -func (c *integerAscendingCursor) next() (int64, interface{}) { return c.nextInteger() } - -// nextInteger returns the next key/value for the cursor. -func (c *integerAscendingCursor) nextInteger() (int64, int64) { - ckey, cvalue := c.peekCache() - tkey, tvalue := c.peekTSM() - - // No more data in cache or in TSM files. - if ckey == tsdb.EOF && tkey == tsdb.EOF { - return tsdb.EOF, 0 - } - - // Both cache and tsm files have the same key, cache takes precedence. - if ckey == tkey { - c.nextCache() - c.nextTSM() - return ckey, cvalue - } - - // Buffered cache key precedes that in TSM file. - if ckey != tsdb.EOF && (ckey < tkey || tkey == tsdb.EOF) { - c.nextCache() - return ckey, cvalue - } - - // Buffered TSM key precedes that in cache. - c.nextTSM() - return tkey, tvalue -} - -// nextCache returns the next value from the cache. -func (c *integerAscendingCursor) nextCache() { - if c.cache.pos >= len(c.cache.values) { - return - } - c.cache.pos++ -} - -// nextTSM returns the next value from the TSM files. -func (c *integerAscendingCursor) nextTSM() { - c.tsm.pos++ - if c.tsm.pos >= len(c.tsm.values) { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadIntegerBlock(&c.tsm.values) - if len(c.tsm.values) == 0 { - return - } - c.tsm.pos = 0 - } -} - -type integerDescendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - values []IntegerValue - pos int - keyCursor *KeyCursor - } -} - -func newIntegerDescendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *integerDescendingCursor { - c := &integerDescendingCursor{} - - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() >= seek - }) - if t, _ := c.peekCache(); t != seek { - c.cache.pos-- - } - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadIntegerBlock(&c.tsm.values) - c.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool { - return c.tsm.values[i].UnixNano() >= seek - }) - if t, _ := c.peekTSM(); t != seek { - c.tsm.pos-- - } - - return c -} - -// peekCache returns the current time/value from the cache. -func (c *integerDescendingCursor) peekCache() (t int64, v int64) { - if c.cache.pos < 0 || c.cache.pos >= len(c.cache.values) { - return tsdb.EOF, 0 - } - - item := c.cache.values[c.cache.pos] - return item.UnixNano(), item.(IntegerValue).value -} - -// peekTSM returns the current time/value from tsm. -func (c *integerDescendingCursor) peekTSM() (t int64, v int64) { - if c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) { - return tsdb.EOF, 0 - } - - item := c.tsm.values[c.tsm.pos] - return item.UnixNano(), item.value -} - -// close closes the cursor and any dependent cursors. -func (c *integerDescendingCursor) close() error { - if c.tsm.keyCursor == nil { - return nil - } - - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - c.cache.values = nil - c.tsm.values = nil - return nil -} - -// next returns the next key/value for the cursor. -func (c *integerDescendingCursor) next() (int64, interface{}) { return c.nextInteger() } - -// nextInteger returns the next key/value for the cursor. -func (c *integerDescendingCursor) nextInteger() (int64, int64) { - ckey, cvalue := c.peekCache() - tkey, tvalue := c.peekTSM() - - // No more data in cache or in TSM files. - if ckey == tsdb.EOF && tkey == tsdb.EOF { - return tsdb.EOF, 0 - } - - // Both cache and tsm files have the same key, cache takes precedence. - if ckey == tkey { - c.nextCache() - c.nextTSM() - return ckey, cvalue - } - - // Buffered cache key precedes that in TSM file. - if ckey != tsdb.EOF && (ckey > tkey || tkey == tsdb.EOF) { - c.nextCache() - return ckey, cvalue - } - - // Buffered TSM key precedes that in cache. - c.nextTSM() - return tkey, tvalue -} - -// nextCache returns the next value from the cache. -func (c *integerDescendingCursor) nextCache() { - if c.cache.pos < 0 { - return - } - c.cache.pos-- -} - -// nextTSM returns the next value from the TSM files. -func (c *integerDescendingCursor) nextTSM() { - c.tsm.pos-- - if c.tsm.pos < 0 { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadIntegerBlock(&c.tsm.values) - if len(c.tsm.values) == 0 { - return - } - c.tsm.pos = len(c.tsm.values) - 1 - } -} - -type unsignedFinalizerIterator struct { - query.UnsignedIterator - logger *zap.Logger -} - -func newUnsignedFinalizerIterator(inner query.UnsignedIterator, logger *zap.Logger) *unsignedFinalizerIterator { - itr := &unsignedFinalizerIterator{UnsignedIterator: inner, logger: logger} - runtime.SetFinalizer(itr, (*unsignedFinalizerIterator).closeGC) - return itr -} - -func (itr *unsignedFinalizerIterator) closeGC() { - go func() { - itr.logger.Error("UnsignedIterator finalized by GC") - itr.Close() - }() -} - -func (itr *unsignedFinalizerIterator) Close() error { - runtime.SetFinalizer(itr, nil) - return itr.UnsignedIterator.Close() -} - -type unsignedInstrumentedIterator struct { - query.UnsignedIterator - span *tracing.Span - group *metrics.Group -} - -func newUnsignedInstrumentedIterator(inner query.UnsignedIterator, span *tracing.Span, group *metrics.Group) *unsignedInstrumentedIterator { - return &unsignedInstrumentedIterator{UnsignedIterator: inner, span: span, group: group} -} - -func (itr *unsignedInstrumentedIterator) Close() error { - var f fields.Fields - itr.group.ForEach(func(v metrics.Metric) { - switch m := v.(type) { - case *metrics.Counter: - f = append(f, fields.Int64(m.Name(), m.Value())) - - case *metrics.Timer: - f = append(f, fields.Duration(m.Name(), m.Value())) - - default: - panic("unexpected metrics") - } - }) - itr.span.SetFields(f) - itr.span.Finish() - - return itr.UnsignedIterator.Close() -} - -type unsignedIterator struct { - cur unsignedCursor - aux []cursorAt - conds struct { - names []string - curs []cursorAt - } - opt query.IteratorOptions - - m map[string]interface{} // map used for condition evaluation - point query.UnsignedPoint // reusable buffer - - statsLock sync.Mutex - stats query.IteratorStats - statsBuf query.IteratorStats - valuer influxql.ValuerEval -} - -func newUnsignedIterator(name string, tags query.Tags, opt query.IteratorOptions, cur unsignedCursor, aux []cursorAt, conds []cursorAt, condNames []string) *unsignedIterator { - itr := &unsignedIterator{ - cur: cur, - aux: aux, - opt: opt, - point: query.UnsignedPoint{ - Name: name, - Tags: tags, - }, - statsBuf: query.IteratorStats{ - SeriesN: 1, - }, - } - itr.stats = itr.statsBuf - - if len(aux) > 0 { - itr.point.Aux = make([]interface{}, len(aux)) - } - - if opt.Condition != nil { - itr.m = make(map[string]interface{}, len(aux)+len(conds)) - } - itr.conds.names = condNames - itr.conds.curs = conds - - itr.valuer = influxql.ValuerEval{ - Valuer: influxql.MultiValuer( - query.MathValuer{}, - influxql.MapValuer(itr.m), - ), - } - - return itr -} - -// Next returns the next point from the iterator. -func (itr *unsignedIterator) Next() (*query.UnsignedPoint, error) { - for { - seek := tsdb.EOF - - if itr.cur != nil { - // Read from the main cursor if we have one. - itr.point.Time, itr.point.Value = itr.cur.nextUnsigned() - seek = itr.point.Time - } else { - // Otherwise find lowest aux timestamp. - for i := range itr.aux { - if k, _ := itr.aux[i].peek(); k != tsdb.EOF { - if seek == tsdb.EOF || (itr.opt.Ascending && k < seek) || (!itr.opt.Ascending && k > seek) { - seek = k - } - } - } - itr.point.Time = seek - } - - // Exit if we have no more points or we are outside our time range. - if itr.point.Time == tsdb.EOF { - itr.copyStats() - return nil, nil - } else if itr.opt.Ascending && itr.point.Time > itr.opt.EndTime { - itr.copyStats() - return nil, nil - } else if !itr.opt.Ascending && itr.point.Time < itr.opt.StartTime { - itr.copyStats() - return nil, nil - } - - // Read from each auxiliary cursor. - for i := range itr.opt.Aux { - itr.point.Aux[i] = itr.aux[i].nextAt(seek) - } - - // Read from condition field cursors. - for i := range itr.conds.curs { - itr.m[itr.conds.names[i]] = itr.conds.curs[i].nextAt(seek) - } - - // Evaluate condition, if one exists. Retry if it fails. - if itr.opt.Condition != nil && !itr.valuer.EvalBool(itr.opt.Condition) { - continue - } - - // Track points returned. - itr.statsBuf.PointN++ - - // Copy buffer to stats periodically. - if itr.statsBuf.PointN%statsBufferCopyIntervalN == 0 { - itr.copyStats() - } - - return &itr.point, nil - } -} - -// copyStats copies from the itr stats buffer to the stats under lock. -func (itr *unsignedIterator) copyStats() { - itr.statsLock.Lock() - itr.stats = itr.statsBuf - itr.statsLock.Unlock() -} - -// Stats returns stats on the points processed. -func (itr *unsignedIterator) Stats() query.IteratorStats { - itr.statsLock.Lock() - stats := itr.stats - itr.statsLock.Unlock() - return stats -} - -// Close closes the iterator. -func (itr *unsignedIterator) Close() error { - cursorsAt(itr.aux).close() - itr.aux = nil - cursorsAt(itr.conds.curs).close() - itr.conds.curs = nil - if itr.cur != nil { - err := itr.cur.close() - itr.cur = nil - return err - } - return nil -} - -// unsignedLimitIterator -type unsignedLimitIterator struct { - input query.UnsignedIterator - opt query.IteratorOptions - n int -} - -func newUnsignedLimitIterator(input query.UnsignedIterator, opt query.IteratorOptions) *unsignedLimitIterator { - return &unsignedLimitIterator{ - input: input, - opt: opt, - } -} - -func (itr *unsignedLimitIterator) Stats() query.IteratorStats { return itr.input.Stats() } -func (itr *unsignedLimitIterator) Close() error { return itr.input.Close() } - -func (itr *unsignedLimitIterator) Next() (*query.UnsignedPoint, error) { - // Check if we are beyond the limit. - if (itr.n - itr.opt.Offset) > itr.opt.Limit { - return nil, nil - } - - // Read the next point. - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Increment counter. - itr.n++ - - // Offsets are handled by a higher level iterator so return all points. - return p, nil -} - -// unsignedCursor represents an object for iterating over a single unsigned field. -type unsignedCursor interface { - cursor - nextUnsigned() (t int64, v uint64) -} - -func newUnsignedCursor(seek int64, ascending bool, cacheValues Values, tsmKeyCursor *KeyCursor) unsignedCursor { - if ascending { - return newUnsignedAscendingCursor(seek, cacheValues, tsmKeyCursor) - } - return newUnsignedDescendingCursor(seek, cacheValues, tsmKeyCursor) -} - -type unsignedAscendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - values []UnsignedValue - pos int - keyCursor *KeyCursor - } -} - -func newUnsignedAscendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *unsignedAscendingCursor { - c := &unsignedAscendingCursor{} - - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() >= seek - }) - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadUnsignedBlock(&c.tsm.values) - c.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool { - return c.tsm.values[i].UnixNano() >= seek - }) - - return c -} - -// peekCache returns the current time/value from the cache. -func (c *unsignedAscendingCursor) peekCache() (t int64, v uint64) { - if c.cache.pos >= len(c.cache.values) { - return tsdb.EOF, 0 - } - - item := c.cache.values[c.cache.pos] - return item.UnixNano(), item.(UnsignedValue).value -} - -// peekTSM returns the current time/value from tsm. -func (c *unsignedAscendingCursor) peekTSM() (t int64, v uint64) { - if c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) { - return tsdb.EOF, 0 - } - - item := c.tsm.values[c.tsm.pos] - return item.UnixNano(), item.value -} - -// close closes the cursor and any dependent cursors. -func (c *unsignedAscendingCursor) close() error { - if c.tsm.keyCursor == nil { - return nil - } - - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - c.cache.values = nil - c.tsm.values = nil - return nil -} - -// next returns the next key/value for the cursor. -func (c *unsignedAscendingCursor) next() (int64, interface{}) { return c.nextUnsigned() } - -// nextUnsigned returns the next key/value for the cursor. -func (c *unsignedAscendingCursor) nextUnsigned() (int64, uint64) { - ckey, cvalue := c.peekCache() - tkey, tvalue := c.peekTSM() - - // No more data in cache or in TSM files. - if ckey == tsdb.EOF && tkey == tsdb.EOF { - return tsdb.EOF, 0 - } - - // Both cache and tsm files have the same key, cache takes precedence. - if ckey == tkey { - c.nextCache() - c.nextTSM() - return ckey, cvalue - } - - // Buffered cache key precedes that in TSM file. - if ckey != tsdb.EOF && (ckey < tkey || tkey == tsdb.EOF) { - c.nextCache() - return ckey, cvalue - } - - // Buffered TSM key precedes that in cache. - c.nextTSM() - return tkey, tvalue -} - -// nextCache returns the next value from the cache. -func (c *unsignedAscendingCursor) nextCache() { - if c.cache.pos >= len(c.cache.values) { - return - } - c.cache.pos++ -} - -// nextTSM returns the next value from the TSM files. -func (c *unsignedAscendingCursor) nextTSM() { - c.tsm.pos++ - if c.tsm.pos >= len(c.tsm.values) { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadUnsignedBlock(&c.tsm.values) - if len(c.tsm.values) == 0 { - return - } - c.tsm.pos = 0 - } -} - -type unsignedDescendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - values []UnsignedValue - pos int - keyCursor *KeyCursor - } -} - -func newUnsignedDescendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *unsignedDescendingCursor { - c := &unsignedDescendingCursor{} - - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() >= seek - }) - if t, _ := c.peekCache(); t != seek { - c.cache.pos-- - } - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadUnsignedBlock(&c.tsm.values) - c.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool { - return c.tsm.values[i].UnixNano() >= seek - }) - if t, _ := c.peekTSM(); t != seek { - c.tsm.pos-- - } - - return c -} - -// peekCache returns the current time/value from the cache. -func (c *unsignedDescendingCursor) peekCache() (t int64, v uint64) { - if c.cache.pos < 0 || c.cache.pos >= len(c.cache.values) { - return tsdb.EOF, 0 - } - - item := c.cache.values[c.cache.pos] - return item.UnixNano(), item.(UnsignedValue).value -} - -// peekTSM returns the current time/value from tsm. -func (c *unsignedDescendingCursor) peekTSM() (t int64, v uint64) { - if c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) { - return tsdb.EOF, 0 - } - - item := c.tsm.values[c.tsm.pos] - return item.UnixNano(), item.value -} - -// close closes the cursor and any dependent cursors. -func (c *unsignedDescendingCursor) close() error { - if c.tsm.keyCursor == nil { - return nil - } - - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - c.cache.values = nil - c.tsm.values = nil - return nil -} - -// next returns the next key/value for the cursor. -func (c *unsignedDescendingCursor) next() (int64, interface{}) { return c.nextUnsigned() } - -// nextUnsigned returns the next key/value for the cursor. -func (c *unsignedDescendingCursor) nextUnsigned() (int64, uint64) { - ckey, cvalue := c.peekCache() - tkey, tvalue := c.peekTSM() - - // No more data in cache or in TSM files. - if ckey == tsdb.EOF && tkey == tsdb.EOF { - return tsdb.EOF, 0 - } - - // Both cache and tsm files have the same key, cache takes precedence. - if ckey == tkey { - c.nextCache() - c.nextTSM() - return ckey, cvalue - } - - // Buffered cache key precedes that in TSM file. - if ckey != tsdb.EOF && (ckey > tkey || tkey == tsdb.EOF) { - c.nextCache() - return ckey, cvalue - } - - // Buffered TSM key precedes that in cache. - c.nextTSM() - return tkey, tvalue -} - -// nextCache returns the next value from the cache. -func (c *unsignedDescendingCursor) nextCache() { - if c.cache.pos < 0 { - return - } - c.cache.pos-- -} - -// nextTSM returns the next value from the TSM files. -func (c *unsignedDescendingCursor) nextTSM() { - c.tsm.pos-- - if c.tsm.pos < 0 { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadUnsignedBlock(&c.tsm.values) - if len(c.tsm.values) == 0 { - return - } - c.tsm.pos = len(c.tsm.values) - 1 - } -} - -type stringFinalizerIterator struct { - query.StringIterator - logger *zap.Logger -} - -func newStringFinalizerIterator(inner query.StringIterator, logger *zap.Logger) *stringFinalizerIterator { - itr := &stringFinalizerIterator{StringIterator: inner, logger: logger} - runtime.SetFinalizer(itr, (*stringFinalizerIterator).closeGC) - return itr -} - -func (itr *stringFinalizerIterator) closeGC() { - go func() { - itr.logger.Error("StringIterator finalized by GC") - itr.Close() - }() -} - -func (itr *stringFinalizerIterator) Close() error { - runtime.SetFinalizer(itr, nil) - return itr.StringIterator.Close() -} - -type stringInstrumentedIterator struct { - query.StringIterator - span *tracing.Span - group *metrics.Group -} - -func newStringInstrumentedIterator(inner query.StringIterator, span *tracing.Span, group *metrics.Group) *stringInstrumentedIterator { - return &stringInstrumentedIterator{StringIterator: inner, span: span, group: group} -} - -func (itr *stringInstrumentedIterator) Close() error { - var f fields.Fields - itr.group.ForEach(func(v metrics.Metric) { - switch m := v.(type) { - case *metrics.Counter: - f = append(f, fields.Int64(m.Name(), m.Value())) - - case *metrics.Timer: - f = append(f, fields.Duration(m.Name(), m.Value())) - - default: - panic("unexpected metrics") - } - }) - itr.span.SetFields(f) - itr.span.Finish() - - return itr.StringIterator.Close() -} - -type stringIterator struct { - cur stringCursor - aux []cursorAt - conds struct { - names []string - curs []cursorAt - } - opt query.IteratorOptions - - m map[string]interface{} // map used for condition evaluation - point query.StringPoint // reusable buffer - - statsLock sync.Mutex - stats query.IteratorStats - statsBuf query.IteratorStats - valuer influxql.ValuerEval -} - -func newStringIterator(name string, tags query.Tags, opt query.IteratorOptions, cur stringCursor, aux []cursorAt, conds []cursorAt, condNames []string) *stringIterator { - itr := &stringIterator{ - cur: cur, - aux: aux, - opt: opt, - point: query.StringPoint{ - Name: name, - Tags: tags, - }, - statsBuf: query.IteratorStats{ - SeriesN: 1, - }, - } - itr.stats = itr.statsBuf - - if len(aux) > 0 { - itr.point.Aux = make([]interface{}, len(aux)) - } - - if opt.Condition != nil { - itr.m = make(map[string]interface{}, len(aux)+len(conds)) - } - itr.conds.names = condNames - itr.conds.curs = conds - - itr.valuer = influxql.ValuerEval{ - Valuer: influxql.MultiValuer( - query.MathValuer{}, - influxql.MapValuer(itr.m), - ), - } - - return itr -} - -// Next returns the next point from the iterator. -func (itr *stringIterator) Next() (*query.StringPoint, error) { - for { - seek := tsdb.EOF - - if itr.cur != nil { - // Read from the main cursor if we have one. - itr.point.Time, itr.point.Value = itr.cur.nextString() - seek = itr.point.Time - } else { - // Otherwise find lowest aux timestamp. - for i := range itr.aux { - if k, _ := itr.aux[i].peek(); k != tsdb.EOF { - if seek == tsdb.EOF || (itr.opt.Ascending && k < seek) || (!itr.opt.Ascending && k > seek) { - seek = k - } - } - } - itr.point.Time = seek - } - - // Exit if we have no more points or we are outside our time range. - if itr.point.Time == tsdb.EOF { - itr.copyStats() - return nil, nil - } else if itr.opt.Ascending && itr.point.Time > itr.opt.EndTime { - itr.copyStats() - return nil, nil - } else if !itr.opt.Ascending && itr.point.Time < itr.opt.StartTime { - itr.copyStats() - return nil, nil - } - - // Read from each auxiliary cursor. - for i := range itr.opt.Aux { - itr.point.Aux[i] = itr.aux[i].nextAt(seek) - } - - // Read from condition field cursors. - for i := range itr.conds.curs { - itr.m[itr.conds.names[i]] = itr.conds.curs[i].nextAt(seek) - } - - // Evaluate condition, if one exists. Retry if it fails. - if itr.opt.Condition != nil && !itr.valuer.EvalBool(itr.opt.Condition) { - continue - } - - // Track points returned. - itr.statsBuf.PointN++ - - // Copy buffer to stats periodically. - if itr.statsBuf.PointN%statsBufferCopyIntervalN == 0 { - itr.copyStats() - } - - return &itr.point, nil - } -} - -// copyStats copies from the itr stats buffer to the stats under lock. -func (itr *stringIterator) copyStats() { - itr.statsLock.Lock() - itr.stats = itr.statsBuf - itr.statsLock.Unlock() -} - -// Stats returns stats on the points processed. -func (itr *stringIterator) Stats() query.IteratorStats { - itr.statsLock.Lock() - stats := itr.stats - itr.statsLock.Unlock() - return stats -} - -// Close closes the iterator. -func (itr *stringIterator) Close() error { - cursorsAt(itr.aux).close() - itr.aux = nil - cursorsAt(itr.conds.curs).close() - itr.conds.curs = nil - if itr.cur != nil { - err := itr.cur.close() - itr.cur = nil - return err - } - return nil -} - -// stringLimitIterator -type stringLimitIterator struct { - input query.StringIterator - opt query.IteratorOptions - n int -} - -func newStringLimitIterator(input query.StringIterator, opt query.IteratorOptions) *stringLimitIterator { - return &stringLimitIterator{ - input: input, - opt: opt, - } -} - -func (itr *stringLimitIterator) Stats() query.IteratorStats { return itr.input.Stats() } -func (itr *stringLimitIterator) Close() error { return itr.input.Close() } - -func (itr *stringLimitIterator) Next() (*query.StringPoint, error) { - // Check if we are beyond the limit. - if (itr.n - itr.opt.Offset) > itr.opt.Limit { - return nil, nil - } - - // Read the next point. - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Increment counter. - itr.n++ - - // Offsets are handled by a higher level iterator so return all points. - return p, nil -} - -// stringCursor represents an object for iterating over a single string field. -type stringCursor interface { - cursor - nextString() (t int64, v string) -} - -func newStringCursor(seek int64, ascending bool, cacheValues Values, tsmKeyCursor *KeyCursor) stringCursor { - if ascending { - return newStringAscendingCursor(seek, cacheValues, tsmKeyCursor) - } - return newStringDescendingCursor(seek, cacheValues, tsmKeyCursor) -} - -type stringAscendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - values []StringValue - pos int - keyCursor *KeyCursor - } -} - -func newStringAscendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *stringAscendingCursor { - c := &stringAscendingCursor{} - - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() >= seek - }) - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadStringBlock(&c.tsm.values) - c.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool { - return c.tsm.values[i].UnixNano() >= seek - }) - - return c -} - -// peekCache returns the current time/value from the cache. -func (c *stringAscendingCursor) peekCache() (t int64, v string) { - if c.cache.pos >= len(c.cache.values) { - return tsdb.EOF, "" - } - - item := c.cache.values[c.cache.pos] - return item.UnixNano(), item.(StringValue).value -} - -// peekTSM returns the current time/value from tsm. -func (c *stringAscendingCursor) peekTSM() (t int64, v string) { - if c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) { - return tsdb.EOF, "" - } - - item := c.tsm.values[c.tsm.pos] - return item.UnixNano(), item.value -} - -// close closes the cursor and any dependent cursors. -func (c *stringAscendingCursor) close() error { - if c.tsm.keyCursor == nil { - return nil - } - - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - c.cache.values = nil - c.tsm.values = nil - return nil -} - -// next returns the next key/value for the cursor. -func (c *stringAscendingCursor) next() (int64, interface{}) { return c.nextString() } - -// nextString returns the next key/value for the cursor. -func (c *stringAscendingCursor) nextString() (int64, string) { - ckey, cvalue := c.peekCache() - tkey, tvalue := c.peekTSM() - - // No more data in cache or in TSM files. - if ckey == tsdb.EOF && tkey == tsdb.EOF { - return tsdb.EOF, "" - } - - // Both cache and tsm files have the same key, cache takes precedence. - if ckey == tkey { - c.nextCache() - c.nextTSM() - return ckey, cvalue - } - - // Buffered cache key precedes that in TSM file. - if ckey != tsdb.EOF && (ckey < tkey || tkey == tsdb.EOF) { - c.nextCache() - return ckey, cvalue - } - - // Buffered TSM key precedes that in cache. - c.nextTSM() - return tkey, tvalue -} - -// nextCache returns the next value from the cache. -func (c *stringAscendingCursor) nextCache() { - if c.cache.pos >= len(c.cache.values) { - return - } - c.cache.pos++ -} - -// nextTSM returns the next value from the TSM files. -func (c *stringAscendingCursor) nextTSM() { - c.tsm.pos++ - if c.tsm.pos >= len(c.tsm.values) { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadStringBlock(&c.tsm.values) - if len(c.tsm.values) == 0 { - return - } - c.tsm.pos = 0 - } -} - -type stringDescendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - values []StringValue - pos int - keyCursor *KeyCursor - } -} - -func newStringDescendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *stringDescendingCursor { - c := &stringDescendingCursor{} - - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() >= seek - }) - if t, _ := c.peekCache(); t != seek { - c.cache.pos-- - } - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadStringBlock(&c.tsm.values) - c.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool { - return c.tsm.values[i].UnixNano() >= seek - }) - if t, _ := c.peekTSM(); t != seek { - c.tsm.pos-- - } - - return c -} - -// peekCache returns the current time/value from the cache. -func (c *stringDescendingCursor) peekCache() (t int64, v string) { - if c.cache.pos < 0 || c.cache.pos >= len(c.cache.values) { - return tsdb.EOF, "" - } - - item := c.cache.values[c.cache.pos] - return item.UnixNano(), item.(StringValue).value -} - -// peekTSM returns the current time/value from tsm. -func (c *stringDescendingCursor) peekTSM() (t int64, v string) { - if c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) { - return tsdb.EOF, "" - } - - item := c.tsm.values[c.tsm.pos] - return item.UnixNano(), item.value -} - -// close closes the cursor and any dependent cursors. -func (c *stringDescendingCursor) close() error { - if c.tsm.keyCursor == nil { - return nil - } - - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - c.cache.values = nil - c.tsm.values = nil - return nil -} - -// next returns the next key/value for the cursor. -func (c *stringDescendingCursor) next() (int64, interface{}) { return c.nextString() } - -// nextString returns the next key/value for the cursor. -func (c *stringDescendingCursor) nextString() (int64, string) { - ckey, cvalue := c.peekCache() - tkey, tvalue := c.peekTSM() - - // No more data in cache or in TSM files. - if ckey == tsdb.EOF && tkey == tsdb.EOF { - return tsdb.EOF, "" - } - - // Both cache and tsm files have the same key, cache takes precedence. - if ckey == tkey { - c.nextCache() - c.nextTSM() - return ckey, cvalue - } - - // Buffered cache key precedes that in TSM file. - if ckey != tsdb.EOF && (ckey > tkey || tkey == tsdb.EOF) { - c.nextCache() - return ckey, cvalue - } - - // Buffered TSM key precedes that in cache. - c.nextTSM() - return tkey, tvalue -} - -// nextCache returns the next value from the cache. -func (c *stringDescendingCursor) nextCache() { - if c.cache.pos < 0 { - return - } - c.cache.pos-- -} - -// nextTSM returns the next value from the TSM files. -func (c *stringDescendingCursor) nextTSM() { - c.tsm.pos-- - if c.tsm.pos < 0 { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadStringBlock(&c.tsm.values) - if len(c.tsm.values) == 0 { - return - } - c.tsm.pos = len(c.tsm.values) - 1 - } -} - -type booleanFinalizerIterator struct { - query.BooleanIterator - logger *zap.Logger -} - -func newBooleanFinalizerIterator(inner query.BooleanIterator, logger *zap.Logger) *booleanFinalizerIterator { - itr := &booleanFinalizerIterator{BooleanIterator: inner, logger: logger} - runtime.SetFinalizer(itr, (*booleanFinalizerIterator).closeGC) - return itr -} - -func (itr *booleanFinalizerIterator) closeGC() { - go func() { - itr.logger.Error("BooleanIterator finalized by GC") - itr.Close() - }() -} - -func (itr *booleanFinalizerIterator) Close() error { - runtime.SetFinalizer(itr, nil) - return itr.BooleanIterator.Close() -} - -type booleanInstrumentedIterator struct { - query.BooleanIterator - span *tracing.Span - group *metrics.Group -} - -func newBooleanInstrumentedIterator(inner query.BooleanIterator, span *tracing.Span, group *metrics.Group) *booleanInstrumentedIterator { - return &booleanInstrumentedIterator{BooleanIterator: inner, span: span, group: group} -} - -func (itr *booleanInstrumentedIterator) Close() error { - var f fields.Fields - itr.group.ForEach(func(v metrics.Metric) { - switch m := v.(type) { - case *metrics.Counter: - f = append(f, fields.Int64(m.Name(), m.Value())) - - case *metrics.Timer: - f = append(f, fields.Duration(m.Name(), m.Value())) - - default: - panic("unexpected metrics") - } - }) - itr.span.SetFields(f) - itr.span.Finish() - - return itr.BooleanIterator.Close() -} - -type booleanIterator struct { - cur booleanCursor - aux []cursorAt - conds struct { - names []string - curs []cursorAt - } - opt query.IteratorOptions - - m map[string]interface{} // map used for condition evaluation - point query.BooleanPoint // reusable buffer - - statsLock sync.Mutex - stats query.IteratorStats - statsBuf query.IteratorStats - valuer influxql.ValuerEval -} - -func newBooleanIterator(name string, tags query.Tags, opt query.IteratorOptions, cur booleanCursor, aux []cursorAt, conds []cursorAt, condNames []string) *booleanIterator { - itr := &booleanIterator{ - cur: cur, - aux: aux, - opt: opt, - point: query.BooleanPoint{ - Name: name, - Tags: tags, - }, - statsBuf: query.IteratorStats{ - SeriesN: 1, - }, - } - itr.stats = itr.statsBuf - - if len(aux) > 0 { - itr.point.Aux = make([]interface{}, len(aux)) - } - - if opt.Condition != nil { - itr.m = make(map[string]interface{}, len(aux)+len(conds)) - } - itr.conds.names = condNames - itr.conds.curs = conds - - itr.valuer = influxql.ValuerEval{ - Valuer: influxql.MultiValuer( - query.MathValuer{}, - influxql.MapValuer(itr.m), - ), - } - - return itr -} - -// Next returns the next point from the iterator. -func (itr *booleanIterator) Next() (*query.BooleanPoint, error) { - for { - seek := tsdb.EOF - - if itr.cur != nil { - // Read from the main cursor if we have one. - itr.point.Time, itr.point.Value = itr.cur.nextBoolean() - seek = itr.point.Time - } else { - // Otherwise find lowest aux timestamp. - for i := range itr.aux { - if k, _ := itr.aux[i].peek(); k != tsdb.EOF { - if seek == tsdb.EOF || (itr.opt.Ascending && k < seek) || (!itr.opt.Ascending && k > seek) { - seek = k - } - } - } - itr.point.Time = seek - } - - // Exit if we have no more points or we are outside our time range. - if itr.point.Time == tsdb.EOF { - itr.copyStats() - return nil, nil - } else if itr.opt.Ascending && itr.point.Time > itr.opt.EndTime { - itr.copyStats() - return nil, nil - } else if !itr.opt.Ascending && itr.point.Time < itr.opt.StartTime { - itr.copyStats() - return nil, nil - } - - // Read from each auxiliary cursor. - for i := range itr.opt.Aux { - itr.point.Aux[i] = itr.aux[i].nextAt(seek) - } - - // Read from condition field cursors. - for i := range itr.conds.curs { - itr.m[itr.conds.names[i]] = itr.conds.curs[i].nextAt(seek) - } - - // Evaluate condition, if one exists. Retry if it fails. - if itr.opt.Condition != nil && !itr.valuer.EvalBool(itr.opt.Condition) { - continue - } - - // Track points returned. - itr.statsBuf.PointN++ - - // Copy buffer to stats periodically. - if itr.statsBuf.PointN%statsBufferCopyIntervalN == 0 { - itr.copyStats() - } - - return &itr.point, nil - } -} - -// copyStats copies from the itr stats buffer to the stats under lock. -func (itr *booleanIterator) copyStats() { - itr.statsLock.Lock() - itr.stats = itr.statsBuf - itr.statsLock.Unlock() -} - -// Stats returns stats on the points processed. -func (itr *booleanIterator) Stats() query.IteratorStats { - itr.statsLock.Lock() - stats := itr.stats - itr.statsLock.Unlock() - return stats -} - -// Close closes the iterator. -func (itr *booleanIterator) Close() error { - cursorsAt(itr.aux).close() - itr.aux = nil - cursorsAt(itr.conds.curs).close() - itr.conds.curs = nil - if itr.cur != nil { - err := itr.cur.close() - itr.cur = nil - return err - } - return nil -} - -// booleanLimitIterator -type booleanLimitIterator struct { - input query.BooleanIterator - opt query.IteratorOptions - n int -} - -func newBooleanLimitIterator(input query.BooleanIterator, opt query.IteratorOptions) *booleanLimitIterator { - return &booleanLimitIterator{ - input: input, - opt: opt, - } -} - -func (itr *booleanLimitIterator) Stats() query.IteratorStats { return itr.input.Stats() } -func (itr *booleanLimitIterator) Close() error { return itr.input.Close() } - -func (itr *booleanLimitIterator) Next() (*query.BooleanPoint, error) { - // Check if we are beyond the limit. - if (itr.n - itr.opt.Offset) > itr.opt.Limit { - return nil, nil - } - - // Read the next point. - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Increment counter. - itr.n++ - - // Offsets are handled by a higher level iterator so return all points. - return p, nil -} - -// booleanCursor represents an object for iterating over a single boolean field. -type booleanCursor interface { - cursor - nextBoolean() (t int64, v bool) -} - -func newBooleanCursor(seek int64, ascending bool, cacheValues Values, tsmKeyCursor *KeyCursor) booleanCursor { - if ascending { - return newBooleanAscendingCursor(seek, cacheValues, tsmKeyCursor) - } - return newBooleanDescendingCursor(seek, cacheValues, tsmKeyCursor) -} - -type booleanAscendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - values []BooleanValue - pos int - keyCursor *KeyCursor - } -} - -func newBooleanAscendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *booleanAscendingCursor { - c := &booleanAscendingCursor{} - - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() >= seek - }) - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadBooleanBlock(&c.tsm.values) - c.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool { - return c.tsm.values[i].UnixNano() >= seek - }) - - return c -} - -// peekCache returns the current time/value from the cache. -func (c *booleanAscendingCursor) peekCache() (t int64, v bool) { - if c.cache.pos >= len(c.cache.values) { - return tsdb.EOF, false - } - - item := c.cache.values[c.cache.pos] - return item.UnixNano(), item.(BooleanValue).value -} - -// peekTSM returns the current time/value from tsm. -func (c *booleanAscendingCursor) peekTSM() (t int64, v bool) { - if c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) { - return tsdb.EOF, false - } - - item := c.tsm.values[c.tsm.pos] - return item.UnixNano(), item.value -} - -// close closes the cursor and any dependent cursors. -func (c *booleanAscendingCursor) close() error { - if c.tsm.keyCursor == nil { - return nil - } - - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - c.cache.values = nil - c.tsm.values = nil - return nil -} - -// next returns the next key/value for the cursor. -func (c *booleanAscendingCursor) next() (int64, interface{}) { return c.nextBoolean() } - -// nextBoolean returns the next key/value for the cursor. -func (c *booleanAscendingCursor) nextBoolean() (int64, bool) { - ckey, cvalue := c.peekCache() - tkey, tvalue := c.peekTSM() - - // No more data in cache or in TSM files. - if ckey == tsdb.EOF && tkey == tsdb.EOF { - return tsdb.EOF, false - } - - // Both cache and tsm files have the same key, cache takes precedence. - if ckey == tkey { - c.nextCache() - c.nextTSM() - return ckey, cvalue - } - - // Buffered cache key precedes that in TSM file. - if ckey != tsdb.EOF && (ckey < tkey || tkey == tsdb.EOF) { - c.nextCache() - return ckey, cvalue - } - - // Buffered TSM key precedes that in cache. - c.nextTSM() - return tkey, tvalue -} - -// nextCache returns the next value from the cache. -func (c *booleanAscendingCursor) nextCache() { - if c.cache.pos >= len(c.cache.values) { - return - } - c.cache.pos++ -} - -// nextTSM returns the next value from the TSM files. -func (c *booleanAscendingCursor) nextTSM() { - c.tsm.pos++ - if c.tsm.pos >= len(c.tsm.values) { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadBooleanBlock(&c.tsm.values) - if len(c.tsm.values) == 0 { - return - } - c.tsm.pos = 0 - } -} - -type booleanDescendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - values []BooleanValue - pos int - keyCursor *KeyCursor - } -} - -func newBooleanDescendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *booleanDescendingCursor { - c := &booleanDescendingCursor{} - - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() >= seek - }) - if t, _ := c.peekCache(); t != seek { - c.cache.pos-- - } - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.ReadBooleanBlock(&c.tsm.values) - c.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool { - return c.tsm.values[i].UnixNano() >= seek - }) - if t, _ := c.peekTSM(); t != seek { - c.tsm.pos-- - } - - return c -} - -// peekCache returns the current time/value from the cache. -func (c *booleanDescendingCursor) peekCache() (t int64, v bool) { - if c.cache.pos < 0 || c.cache.pos >= len(c.cache.values) { - return tsdb.EOF, false - } - - item := c.cache.values[c.cache.pos] - return item.UnixNano(), item.(BooleanValue).value -} - -// peekTSM returns the current time/value from tsm. -func (c *booleanDescendingCursor) peekTSM() (t int64, v bool) { - if c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) { - return tsdb.EOF, false - } - - item := c.tsm.values[c.tsm.pos] - return item.UnixNano(), item.value -} - -// close closes the cursor and any dependent cursors. -func (c *booleanDescendingCursor) close() error { - if c.tsm.keyCursor == nil { - return nil - } - - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - c.cache.values = nil - c.tsm.values = nil - return nil -} - -// next returns the next key/value for the cursor. -func (c *booleanDescendingCursor) next() (int64, interface{}) { return c.nextBoolean() } - -// nextBoolean returns the next key/value for the cursor. -func (c *booleanDescendingCursor) nextBoolean() (int64, bool) { - ckey, cvalue := c.peekCache() - tkey, tvalue := c.peekTSM() - - // No more data in cache or in TSM files. - if ckey == tsdb.EOF && tkey == tsdb.EOF { - return tsdb.EOF, false - } - - // Both cache and tsm files have the same key, cache takes precedence. - if ckey == tkey { - c.nextCache() - c.nextTSM() - return ckey, cvalue - } - - // Buffered cache key precedes that in TSM file. - if ckey != tsdb.EOF && (ckey > tkey || tkey == tsdb.EOF) { - c.nextCache() - return ckey, cvalue - } - - // Buffered TSM key precedes that in cache. - c.nextTSM() - return tkey, tvalue -} - -// nextCache returns the next value from the cache. -func (c *booleanDescendingCursor) nextCache() { - if c.cache.pos < 0 { - return - } - c.cache.pos-- -} - -// nextTSM returns the next value from the TSM files. -func (c *booleanDescendingCursor) nextTSM() { - c.tsm.pos-- - if c.tsm.pos < 0 { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.ReadBooleanBlock(&c.tsm.values) - if len(c.tsm.values) == 0 { - return - } - c.tsm.pos = len(c.tsm.values) - 1 - } -} - -var _ = fmt.Print diff --git a/tsdb/engine/tsm1/iterator.gen.go.tmpl b/tsdb/engine/tsm1/iterator.gen.go.tmpl deleted file mode 100644 index 3c2d28f44b2..00000000000 --- a/tsdb/engine/tsm1/iterator.gen.go.tmpl +++ /dev/null @@ -1,611 +0,0 @@ -package tsm1 - -import ( - "sort" - "fmt" - "runtime" - "sync" - - "github.com/influxdata/influxdb/v2/pkg/metrics" - "github.com/influxdata/influxdb/v2/pkg/tracing" - "github.com/influxdata/influxdb/v2/pkg/tracing/fields" - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxql" - "go.uber.org/zap" -) - -type cursor interface { - close() error - next() (t int64, v interface{}) -} - -// cursorAt provides a bufferred cursor interface. -// This required for literal value cursors which don't have a time value. -type cursorAt interface { - close() error - peek() (k int64, v interface{}) - nextAt(seek int64) interface{} -} - -type nilCursor struct {} -func (nilCursor) next() (int64, interface{}) { return tsdb.EOF, nil } -func (nilCursor) close() error { return nil } - -// bufCursor implements a bufferred cursor. -type bufCursor struct { - cur cursor - buf struct { - key int64 - value interface{} - filled bool - } - ascending bool -} - -// newBufCursor returns a bufferred wrapper for cur. -func newBufCursor(cur cursor, ascending bool) *bufCursor { - return &bufCursor{cur: cur, ascending: ascending} -} - -func (c *bufCursor) close() error { - if c.cur == nil { - return nil - } - - err := c.cur.close() - c.cur = nil - return err -} - -// next returns the buffer, if filled. Otherwise returns the next key/value from the cursor. -func (c *bufCursor) next() (int64, interface{}) { - if c.buf.filled { - k, v := c.buf.key, c.buf.value - c.buf.filled = false - return k, v - } - return c.cur.next() -} - -// unread pushes k and v onto the buffer. -func (c *bufCursor) unread(k int64, v interface{}) { - c.buf.key, c.buf.value = k, v - c.buf.filled = true -} - -// peek reads next next key/value without removing them from the cursor. -func (c *bufCursor) peek() (k int64, v interface{}) { - k, v = c.next() - c.unread(k, v) - return -} - -// nextAt returns the next value where key is equal to seek. -// Skips over any keys that are less than seek. -// If the key doesn't exist then a nil value is returned instead. -func (c *bufCursor) nextAt(seek int64) interface{} { - for { - k, v := c.next() - if k != tsdb.EOF { - if k == seek { - return v - } else if c.ascending && k < seek { - continue - } else if !c.ascending && k > seek { - continue - } - c.unread(k, v) - } - - // Return "nil" value for type. - switch c.cur.(type) { - case floatCursor: - return (*float64)(nil) - case integerCursor: - return (*int64)(nil) - case unsignedCursor: - return (*uint64)(nil) - case stringCursor: - return (*string)(nil) - case booleanCursor: - return (*bool)(nil) - default: - panic("unreachable") - } - } -} - - -// statsBufferCopyIntervalN is the number of points that are read before -// copying the stats buffer to the iterator's stats field. This is used to -// amortize the cost of using a mutex when updating stats. -const statsBufferCopyIntervalN = 100 - -{{range .}} - -type {{.name}}FinalizerIterator struct { - query.{{.Name}}Iterator - logger *zap.Logger -} - -func new{{.Name}}FinalizerIterator(inner query.{{.Name}}Iterator, logger *zap.Logger) *{{.name}}FinalizerIterator { - itr := &{{.name}}FinalizerIterator{ {{.Name}}Iterator: inner, logger: logger} - runtime.SetFinalizer(itr, (*{{.name}}FinalizerIterator).closeGC) - return itr -} - -func (itr *{{.name}}FinalizerIterator) closeGC() { - go func() { - itr.logger.Error("{{.Name}}Iterator finalized by GC") - itr.Close() - }() -} - -func (itr *{{.name}}FinalizerIterator) Close() error { - runtime.SetFinalizer(itr, nil) - return itr.{{.Name}}Iterator.Close() -} - - -type {{.name}}InstrumentedIterator struct { - query.{{.Name}}Iterator - span *tracing.Span - group *metrics.Group -} - -func new{{.Name}}InstrumentedIterator(inner query.{{.Name}}Iterator, span *tracing.Span, group *metrics.Group) *{{.name}}InstrumentedIterator { - return &{{.name}}InstrumentedIterator{ {{.Name}}Iterator: inner, span: span, group: group} -} - -func (itr *{{.name}}InstrumentedIterator) Close() error { - var f fields.Fields - itr.group.ForEach(func(v metrics.Metric) { - switch m := v.(type) { - case *metrics.Counter: - f = append(f, fields.Int64(m.Name(), m.Value())) - - case *metrics.Timer: - f = append(f, fields.Duration(m.Name(), m.Value())) - - default: - panic("unexpected metrics") - } - }) - itr.span.SetFields(f) - itr.span.Finish() - - return itr.{{.Name}}Iterator.Close() -} - - -type {{.name}}Iterator struct { - cur {{.name}}Cursor - aux []cursorAt - conds struct { - names []string - curs []cursorAt - } - opt query.IteratorOptions - - m map[string]interface{} // map used for condition evaluation - point query.{{.Name}}Point // reusable buffer - - statsLock sync.Mutex - stats query.IteratorStats - statsBuf query.IteratorStats - valuer influxql.ValuerEval -} - -func new{{.Name}}Iterator(name string, tags query.Tags, opt query.IteratorOptions, cur {{.name}}Cursor, aux []cursorAt, conds []cursorAt, condNames []string) *{{.name}}Iterator { - itr := &{{.name}}Iterator{ - cur: cur, - aux: aux, - opt: opt, - point: query.{{.Name}}Point{ - Name: name, - Tags: tags, - }, - statsBuf: query.IteratorStats{ - SeriesN: 1, - }, - } - itr.stats = itr.statsBuf - - if len(aux) > 0 { - itr.point.Aux = make([]interface{}, len(aux)) - } - - if opt.Condition != nil { - itr.m = make(map[string]interface{}, len(aux)+len(conds)) - } - itr.conds.names = condNames - itr.conds.curs = conds - - itr.valuer = influxql.ValuerEval{ - Valuer: influxql.MultiValuer( - query.MathValuer{}, - influxql.MapValuer(itr.m), - ), - } - - return itr -} - -// Next returns the next point from the iterator. -func (itr *{{.name}}Iterator) Next() (*query.{{.Name}}Point, error) { - for { - seek := tsdb.EOF - - if itr.cur != nil { - // Read from the main cursor if we have one. - itr.point.Time, itr.point.Value = itr.cur.next{{.Name}}() - seek = itr.point.Time - } else { - // Otherwise find lowest aux timestamp. - for i := range itr.aux { - if k, _ := itr.aux[i].peek(); k != tsdb.EOF { - if seek == tsdb.EOF || (itr.opt.Ascending && k < seek) || (!itr.opt.Ascending && k > seek) { - seek = k - } - } - } - itr.point.Time = seek - } - - // Exit if we have no more points or we are outside our time range. - if itr.point.Time == tsdb.EOF { - itr.copyStats() - return nil, nil - } else if itr.opt.Ascending && itr.point.Time > itr.opt.EndTime { - itr.copyStats() - return nil, nil - } else if !itr.opt.Ascending && itr.point.Time < itr.opt.StartTime { - itr.copyStats() - return nil, nil - } - - // Read from each auxiliary cursor. - for i := range itr.opt.Aux { - itr.point.Aux[i] = itr.aux[i].nextAt(seek) - } - - // Read from condition field cursors. - for i := range itr.conds.curs { - itr.m[itr.conds.names[i]] = itr.conds.curs[i].nextAt(seek) - } - - // Evaluate condition, if one exists. Retry if it fails. - if itr.opt.Condition != nil && !itr.valuer.EvalBool(itr.opt.Condition) { - continue - } - - // Track points returned. - itr.statsBuf.PointN++ - - // Copy buffer to stats periodically. - if itr.statsBuf.PointN % statsBufferCopyIntervalN == 0 { - itr.copyStats() - } - - return &itr.point, nil - } -} - -// copyStats copies from the itr stats buffer to the stats under lock. -func (itr *{{.name}}Iterator) copyStats() { - itr.statsLock.Lock() - itr.stats = itr.statsBuf - itr.statsLock.Unlock() -} - -// Stats returns stats on the points processed. -func (itr *{{.name}}Iterator) Stats() query.IteratorStats { - itr.statsLock.Lock() - stats := itr.stats - itr.statsLock.Unlock() - return stats -} - -// Close closes the iterator. -func (itr *{{.name}}Iterator) Close() error { - cursorsAt(itr.aux).close() - itr.aux = nil - cursorsAt(itr.conds.curs).close() - itr.conds.curs = nil - if itr.cur != nil { - err := itr.cur.close() - itr.cur = nil - return err - } - return nil -} - -// {{.name}}LimitIterator -type {{.name}}LimitIterator struct { - input query.{{.Name}}Iterator - opt query.IteratorOptions - n int -} - -func new{{.Name}}LimitIterator(input query.{{.Name}}Iterator, opt query.IteratorOptions) *{{.name}}LimitIterator { - return &{{.name}}LimitIterator{ - input: input, - opt: opt, - } -} - -func (itr *{{.name}}LimitIterator) Stats() query.IteratorStats { return itr.input.Stats() } -func (itr *{{.name}}LimitIterator) Close() error { return itr.input.Close() } - -func (itr *{{.name}}LimitIterator) Next() (*query.{{.Name}}Point, error) { - // Check if we are beyond the limit. - if (itr.n-itr.opt.Offset) > itr.opt.Limit { - return nil, nil - } - - // Read the next point. - p, err := itr.input.Next() - if p == nil || err != nil { - return nil, err - } - - // Increment counter. - itr.n++ - - // Offsets are handled by a higher level iterator so return all points. - return p, nil -} - -// {{.name}}Cursor represents an object for iterating over a single {{.name}} field. -type {{.name}}Cursor interface { - cursor - next{{.Name}}() (t int64, v {{.Type}}) -} - -func new{{.Name}}Cursor(seek int64, ascending bool, cacheValues Values, tsmKeyCursor *KeyCursor) {{.name}}Cursor { - if ascending { - return new{{.Name}}AscendingCursor(seek, cacheValues, tsmKeyCursor) - } - return new{{.Name}}DescendingCursor(seek, cacheValues, tsmKeyCursor) -} - -type {{.name}}AscendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - values []{{.Name}}Value - pos int - keyCursor *KeyCursor - } -} - -func new{{.Name}}AscendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *{{.name}}AscendingCursor { - c := &{{.name}}AscendingCursor{} - - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() >= seek - }) - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.Read{{.Name}}Block(&c.tsm.values) - c.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool { - return c.tsm.values[i].UnixNano() >= seek - }) - - return c -} - -// peekCache returns the current time/value from the cache. -func (c *{{.name}}AscendingCursor) peekCache() (t int64, v {{.Type}}) { - if c.cache.pos >= len(c.cache.values) { - return tsdb.EOF, {{.Nil}} - } - - item := c.cache.values[c.cache.pos] - return item.UnixNano(), item.({{.ValueType}}).value -} - -// peekTSM returns the current time/value from tsm. -func (c *{{.name}}AscendingCursor) peekTSM() (t int64, v {{.Type}}) { - if c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) { - return tsdb.EOF, {{.Nil}} - } - - item := c.tsm.values[c.tsm.pos] - return item.UnixNano(), item.value -} - -// close closes the cursor and any dependent cursors. -func (c *{{.name}}AscendingCursor) close() (error) { - if c.tsm.keyCursor == nil { - return nil - } - - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - c.cache.values = nil - c.tsm.values = nil - return nil -} - -// next returns the next key/value for the cursor. -func (c *{{.name}}AscendingCursor) next() (int64, interface{}) { return c.next{{.Name}}() } - -// next{{.Name}} returns the next key/value for the cursor. -func (c *{{.name}}AscendingCursor) next{{.Name}}() (int64, {{.Type}}) { - ckey, cvalue := c.peekCache() - tkey, tvalue := c.peekTSM() - - // No more data in cache or in TSM files. - if ckey == tsdb.EOF && tkey == tsdb.EOF { - return tsdb.EOF, {{.Nil}} - } - - // Both cache and tsm files have the same key, cache takes precedence. - if ckey == tkey { - c.nextCache() - c.nextTSM() - return ckey, cvalue - } - - // Buffered cache key precedes that in TSM file. - if ckey != tsdb.EOF && (ckey < tkey || tkey == tsdb.EOF) { - c.nextCache() - return ckey, cvalue - } - - // Buffered TSM key precedes that in cache. - c.nextTSM() - return tkey, tvalue -} - -// nextCache returns the next value from the cache. -func (c *{{.name}}AscendingCursor) nextCache() { - if c.cache.pos >= len(c.cache.values) { - return - } - c.cache.pos++ -} - -// nextTSM returns the next value from the TSM files. -func (c *{{.name}}AscendingCursor) nextTSM() { - c.tsm.pos++ - if c.tsm.pos >= len(c.tsm.values) { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.Read{{.Name}}Block(&c.tsm.values) - if len(c.tsm.values) == 0 { - return - } - c.tsm.pos = 0 - } -} - -type {{.name}}DescendingCursor struct { - cache struct { - values Values - pos int - } - - tsm struct { - values []{{.Name}}Value - pos int - keyCursor *KeyCursor - } -} - -func new{{.Name}}DescendingCursor(seek int64, cacheValues Values, tsmKeyCursor *KeyCursor) *{{.name}}DescendingCursor { - c := &{{.name}}DescendingCursor{} - - c.cache.values = cacheValues - c.cache.pos = sort.Search(len(c.cache.values), func(i int) bool { - return c.cache.values[i].UnixNano() >= seek - }) - if t, _ := c.peekCache(); t != seek { - c.cache.pos-- - } - - c.tsm.keyCursor = tsmKeyCursor - c.tsm.values, _ = c.tsm.keyCursor.Read{{.Name}}Block(&c.tsm.values) - c.tsm.pos = sort.Search(len(c.tsm.values), func(i int) bool { - return c.tsm.values[i].UnixNano() >= seek - }) - if t, _ := c.peekTSM(); t != seek { - c.tsm.pos-- - } - - return c -} - -// peekCache returns the current time/value from the cache. -func (c *{{.name}}DescendingCursor) peekCache() (t int64, v {{.Type}}) { - if c.cache.pos < 0 || c.cache.pos >= len(c.cache.values) { - return tsdb.EOF, {{.Nil}} - } - - item := c.cache.values[c.cache.pos] - return item.UnixNano(), item.({{.ValueType}}).value -} - -// peekTSM returns the current time/value from tsm. -func (c *{{.name}}DescendingCursor) peekTSM() (t int64, v {{.Type}}) { - if c.tsm.pos < 0 || c.tsm.pos >= len(c.tsm.values) { - return tsdb.EOF, {{.Nil}} - } - - item := c.tsm.values[c.tsm.pos] - return item.UnixNano(), item.value -} - -// close closes the cursor and any dependent cursors. -func (c *{{.name}}DescendingCursor) close() (error) { - if c.tsm.keyCursor == nil { - return nil - } - - c.tsm.keyCursor.Close() - c.tsm.keyCursor = nil - c.cache.values = nil - c.tsm.values = nil - return nil -} - -// next returns the next key/value for the cursor. -func (c *{{.name}}DescendingCursor) next() (int64, interface{}) { return c.next{{.Name}}() } - -// next{{.Name}} returns the next key/value for the cursor. -func (c *{{.name}}DescendingCursor) next{{.Name}}() (int64, {{.Type}}) { - ckey, cvalue := c.peekCache() - tkey, tvalue := c.peekTSM() - - // No more data in cache or in TSM files. - if ckey == tsdb.EOF && tkey == tsdb.EOF { - return tsdb.EOF, {{.Nil}} - } - - // Both cache and tsm files have the same key, cache takes precedence. - if ckey == tkey { - c.nextCache() - c.nextTSM() - return ckey, cvalue - } - - // Buffered cache key precedes that in TSM file. - if ckey != tsdb.EOF && (ckey > tkey || tkey == tsdb.EOF) { - c.nextCache() - return ckey, cvalue - } - - // Buffered TSM key precedes that in cache. - c.nextTSM() - return tkey, tvalue -} - -// nextCache returns the next value from the cache. -func (c *{{.name}}DescendingCursor) nextCache() { - if c.cache.pos < 0 { - return - } - c.cache.pos-- -} - -// nextTSM returns the next value from the TSM files. -func (c *{{.name}}DescendingCursor) nextTSM() { - c.tsm.pos-- - if c.tsm.pos < 0 { - c.tsm.keyCursor.Next() - c.tsm.values, _ = c.tsm.keyCursor.Read{{.Name}}Block(&c.tsm.values) - if len(c.tsm.values) == 0 { - return - } - c.tsm.pos = len(c.tsm.values) - 1 - } -} - -{{end}} - -var _ = fmt.Print diff --git a/tsdb/engine/tsm1/iterator.gen.go.tmpldata b/tsdb/engine/tsm1/iterator.gen.go.tmpldata deleted file mode 100644 index 3e230721cb5..00000000000 --- a/tsdb/engine/tsm1/iterator.gen.go.tmpldata +++ /dev/null @@ -1,37 +0,0 @@ -[ - { - "Name":"Float", - "name":"float", - "Type":"float64", - "ValueType":"FloatValue", - "Nil":"0" - }, - { - "Name":"Integer", - "name":"integer", - "Type":"int64", - "ValueType":"IntegerValue", - "Nil":"0" - }, - { - "Name":"Unsigned", - "name":"unsigned", - "Type":"uint64", - "ValueType":"UnsignedValue", - "Nil":"0" - }, - { - "Name":"String", - "name":"string", - "Type":"string", - "ValueType":"StringValue", - "Nil":"\"\"" - }, - { - "Name":"Boolean", - "name":"boolean", - "Type":"bool", - "ValueType":"BooleanValue", - "Nil":"false" - } -] diff --git a/tsdb/engine/tsm1/iterator.go b/tsdb/engine/tsm1/iterator.go deleted file mode 100644 index fab159fb52a..00000000000 --- a/tsdb/engine/tsm1/iterator.go +++ /dev/null @@ -1,287 +0,0 @@ -package tsm1 - -import ( - "context" - "fmt" - "sync" - - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/pkg/metrics" - "github.com/influxdata/influxdb/v2/pkg/tracing" - "github.com/influxdata/influxdb/v2/tsdb" - "go.uber.org/zap" -) - -func newLimitIterator(input query.Iterator, opt query.IteratorOptions) query.Iterator { - switch input := input.(type) { - case query.FloatIterator: - return newFloatLimitIterator(input, opt) - case query.IntegerIterator: - return newIntegerLimitIterator(input, opt) - case query.UnsignedIterator: - return newUnsignedLimitIterator(input, opt) - case query.StringIterator: - return newStringLimitIterator(input, opt) - case query.BooleanIterator: - return newBooleanLimitIterator(input, opt) - default: - panic(fmt.Sprintf("unsupported limit iterator type: %T", input)) - } -} - -type floatCastIntegerCursor struct { - cursor integerCursor -} - -func (c *floatCastIntegerCursor) close() error { return c.cursor.close() } - -func (c *floatCastIntegerCursor) next() (t int64, v interface{}) { return c.nextFloat() } - -func (c *floatCastIntegerCursor) nextFloat() (int64, float64) { - t, v := c.cursor.nextInteger() - return t, float64(v) -} - -type floatCastUnsignedCursor struct { - cursor unsignedCursor -} - -func (c *floatCastUnsignedCursor) close() error { return c.cursor.close() } - -func (c *floatCastUnsignedCursor) next() (t int64, v interface{}) { return c.nextFloat() } - -func (c *floatCastUnsignedCursor) nextFloat() (int64, float64) { - t, v := c.cursor.nextUnsigned() - return t, float64(v) -} - -type integerCastFloatCursor struct { - cursor floatCursor -} - -func (c *integerCastFloatCursor) close() error { return c.cursor.close() } - -func (c *integerCastFloatCursor) next() (t int64, v interface{}) { return c.nextInteger() } - -func (c *integerCastFloatCursor) nextInteger() (int64, int64) { - t, v := c.cursor.nextFloat() - return t, int64(v) -} - -type integerCastUnsignedCursor struct { - cursor unsignedCursor -} - -func (c *integerCastUnsignedCursor) close() error { return c.cursor.close() } - -func (c *integerCastUnsignedCursor) next() (t int64, v interface{}) { return c.nextInteger() } - -func (c *integerCastUnsignedCursor) nextInteger() (int64, int64) { - t, v := c.cursor.nextUnsigned() - return t, int64(v) -} - -type unsignedCastFloatCursor struct { - cursor floatCursor -} - -func (c *unsignedCastFloatCursor) close() error { return c.cursor.close() } - -func (c *unsignedCastFloatCursor) next() (t int64, v interface{}) { return c.nextUnsigned() } - -func (c *unsignedCastFloatCursor) nextUnsigned() (int64, uint64) { - t, v := c.cursor.nextFloat() - return t, uint64(v) -} - -type unsignedCastIntegerCursor struct { - cursor integerCursor -} - -func (c *unsignedCastIntegerCursor) close() error { return c.cursor.close() } - -func (c *unsignedCastIntegerCursor) next() (t int64, v interface{}) { return c.nextUnsigned() } - -func (c *unsignedCastIntegerCursor) nextUnsigned() (int64, uint64) { - t, v := c.cursor.nextInteger() - return t, uint64(v) -} - -// literalValueCursor represents a cursor that always returns a single value. -// It doesn't not have a time value so it can only be used with nextAt(). -type literalValueCursor struct { - value interface{} -} - -func (c *literalValueCursor) close() error { return nil } -func (c *literalValueCursor) peek() (t int64, v interface{}) { return tsdb.EOF, c.value } -func (c *literalValueCursor) next() (t int64, v interface{}) { return tsdb.EOF, c.value } -func (c *literalValueCursor) nextAt(seek int64) interface{} { return c.value } - -// preallocate and cast to cursorAt to avoid allocations -var ( - nilFloatLiteralValueCursor cursorAt = &literalValueCursor{value: (*float64)(nil)} - nilIntegerLiteralValueCursor cursorAt = &literalValueCursor{value: (*int64)(nil)} - nilUnsignedLiteralValueCursor cursorAt = &literalValueCursor{value: (*uint64)(nil)} - nilStringLiteralValueCursor cursorAt = &literalValueCursor{value: (*string)(nil)} - nilBooleanLiteralValueCursor cursorAt = &literalValueCursor{value: (*bool)(nil)} -) - -// stringSliceCursor is a cursor that outputs a slice of string values. -type stringSliceCursor struct { - values []string -} - -func (c *stringSliceCursor) close() error { return nil } - -func (c *stringSliceCursor) next() (int64, interface{}) { return c.nextString() } - -func (c *stringSliceCursor) nextString() (int64, string) { - if len(c.values) == 0 { - return tsdb.EOF, "" - } - - value := c.values[0] - c.values = c.values[1:] - return 0, value -} - -type cursorsAt []cursorAt - -func (c cursorsAt) close() { - for _, cur := range c { - cur.close() - } -} - -// newMergeFinalizerIterator creates a new Merge iterator from the inputs. If the call to Merge succeeds, -// the resulting Iterator will be wrapped in a finalizer iterator. -// If Merge returns an error, the inputs will be closed. -func newMergeFinalizerIterator(ctx context.Context, inputs []query.Iterator, opt query.IteratorOptions, log *zap.Logger) (query.Iterator, error) { - itr, err := query.Iterators(inputs).Merge(opt) - if err != nil { - query.Iterators(inputs).Close() - return nil, err - } - return newInstrumentedIterator(ctx, newFinalizerIterator(itr, log)), nil -} - -// newFinalizerIterator creates a new iterator that installs a runtime finalizer -// to ensure close is eventually called if the iterator is garbage collected. -// This additional guard attempts to protect against clients of CreateIterator not -// correctly closing them and leaking cursors. -func newFinalizerIterator(itr query.Iterator, log *zap.Logger) query.Iterator { - if itr == nil { - return nil - } - - switch inner := itr.(type) { - case query.FloatIterator: - return newFloatFinalizerIterator(inner, log) - case query.IntegerIterator: - return newIntegerFinalizerIterator(inner, log) - case query.UnsignedIterator: - return newUnsignedFinalizerIterator(inner, log) - case query.StringIterator: - return newStringFinalizerIterator(inner, log) - case query.BooleanIterator: - return newBooleanFinalizerIterator(inner, log) - default: - panic(fmt.Sprintf("unsupported finalizer iterator type: %T", itr)) - } -} - -func newInstrumentedIterator(ctx context.Context, itr query.Iterator) query.Iterator { - if itr == nil { - return nil - } - - span := tracing.SpanFromContext(ctx) - grp := metrics.GroupFromContext(ctx) - if span == nil || grp == nil { - return itr - } - - switch inner := itr.(type) { - case query.FloatIterator: - return newFloatInstrumentedIterator(inner, span, grp) - case query.IntegerIterator: - return newIntegerInstrumentedIterator(inner, span, grp) - case query.UnsignedIterator: - return newUnsignedInstrumentedIterator(inner, span, grp) - case query.StringIterator: - return newStringInstrumentedIterator(inner, span, grp) - case query.BooleanIterator: - return newBooleanInstrumentedIterator(inner, span, grp) - default: - panic(fmt.Sprintf("unsupported instrumented iterator type: %T", itr)) - } -} - -type seriesIterator struct { - cur tsdb.SeriesKeyIterator - point query.StringPoint // reusable buffer - - statsLock sync.Mutex - stats query.IteratorStats - statsBuf query.IteratorStats -} - -func newSeriesIterator(name string, cur tsdb.SeriesKeyIterator) *seriesIterator { - itr := &seriesIterator{ - cur: cur, - point: query.StringPoint{ - Name: name, - Tags: query.NewTags(nil), - }, - } - itr.stats = itr.statsBuf - return itr -} - -// Next returns the next point from the iterator. -func (itr *seriesIterator) Next() (*query.StringPoint, error) { - // Read from the main cursor - b, err := itr.cur.Next() - if err != nil { - itr.copyStats() - return nil, err - } - itr.point.Value = string(b) - - // Exit if we have no more points or we are outside our time range. - if b == nil { - itr.copyStats() - return nil, nil - } - // Track points returned. - itr.statsBuf.PointN++ - itr.statsBuf.SeriesN++ - - // Copy buffer to stats periodically. - if itr.statsBuf.PointN%statsBufferCopyIntervalN == 0 { - itr.copyStats() - } - - return &itr.point, nil -} - -// copyStats copies from the itr stats buffer to the stats under lock. -func (itr *seriesIterator) copyStats() { - itr.statsLock.Lock() - itr.stats = itr.statsBuf - itr.statsLock.Unlock() -} - -// Stats returns stats on the points processed. -func (itr *seriesIterator) Stats() query.IteratorStats { - itr.statsLock.Lock() - stats := itr.stats - itr.statsLock.Unlock() - return stats -} - -// Close closes the iterator. -func (itr *seriesIterator) Close() error { - return itr.cur.Close() -} diff --git a/tsdb/engine/tsm1/iterator_test.go b/tsdb/engine/tsm1/iterator_test.go deleted file mode 100644 index 342e41a420b..00000000000 --- a/tsdb/engine/tsm1/iterator_test.go +++ /dev/null @@ -1,164 +0,0 @@ -package tsm1 - -import ( - "runtime" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxql" - "go.uber.org/zap/zaptest" -) - -func BenchmarkIntegerIterator_Next(b *testing.B) { - opt := query.IteratorOptions{ - Aux: []influxql.VarRef{{Val: "f1"}, {Val: "f1"}, {Val: "f1"}, {Val: "f1"}}, - } - aux := []cursorAt{ - &literalValueCursor{value: "foo bar"}, - &literalValueCursor{value: int64(1e3)}, - &literalValueCursor{value: float64(1e3)}, - &literalValueCursor{value: true}, - } - - cur := newIntegerIterator("m0", query.Tags{}, opt, &infiniteIntegerCursor{}, aux, nil, nil) - - b.ResetTimer() - b.ReportAllocs() - for i := 0; i < b.N; i++ { - cur.Next() - } -} - -type infiniteIntegerCursor struct{} - -func (*infiniteIntegerCursor) close() error { - return nil -} - -func (*infiniteIntegerCursor) next() (t int64, v interface{}) { - return 0, 0 -} - -func (*infiniteIntegerCursor) nextInteger() (t int64, v int64) { - return 0, 0 -} - -type testFinalizerIterator struct { - OnClose func() -} - -func (itr *testFinalizerIterator) Next() (*query.FloatPoint, error) { - return nil, nil -} - -func (itr *testFinalizerIterator) Close() error { - // Act as if this is a slow finalizer and ensure that it doesn't block - // the finalizer background thread. - itr.OnClose() - return nil -} - -func (itr *testFinalizerIterator) Stats() query.IteratorStats { - return query.IteratorStats{} -} - -func TestFinalizerIterator(t *testing.T) { - var ( - step1 = make(chan struct{}) - step2 = make(chan struct{}) - step3 = make(chan struct{}) - ) - - l := zaptest.NewLogger(t) - done := make(chan struct{}) - func() { - itr := &testFinalizerIterator{ - OnClose: func() { - // Simulate a slow closing iterator by waiting for the done channel - // to be closed. The done channel is closed by a later finalizer. - close(step1) - <-done - close(step3) - }, - } - newFinalizerIterator(itr, l) - }() - - // Thrash the GC here and hope it eventually notices that itr (above) should be - // cleaned up, triggering the finalizer. - for i := 0; i < 500; i++ { - runtime.GC() - } - - // Wait for the finalizer to close the 1st channel. - timer := time.NewTimer(1000 * time.Millisecond) - select { - case <-timer.C: - defer close(done) - t.Fatal("The finalizer for the iterator did not run") - case <-step1: - // The finalizer has successfully started running, but it won't complete - // until we close the done channel. - timer.Stop() - } - - select { - case <-step3: - t.Fatal("The finalizer should not have finished yet") - default: - } - - // Use a fake value that will be collected by the garbage collector and have - // the finalizer close the channel. This finalizer should run after the iterator's - // finalizer. - value := func() int { - foo := &struct { - value int - }{value: 1} - runtime.SetFinalizer(foo, func(value interface{}) { - close(done) - close(step2) - }) - return foo.value + 2 - }() - if value < 2 { - t.Log("This should never be output") - } - - for i := 0; i < 500; i++ { - runtime.GC() - } - - timer.Reset(1000 * time.Millisecond) - select { - case <-timer.C: - t.Fatal("The second finalizer did not run") - case <-step2: - // The finalizer has successfully run and should have - // closed the done channel. - timer.Stop() - } - - // Wait for step3 to finish where the closed value should be set. - timer.Reset(1000 * time.Millisecond) - select { - case <-timer.C: - t.Fatal("The iterator was not finalized") - case <-step3: - timer.Stop() - } -} - -func TestBufCursor_DoubleClose(t *testing.T) { - c := newBufCursor(nilCursor{}, true) - if err := c.close(); err != nil { - t.Fatalf("error closing: %v", err) - } - - // This shouldn't panic - if err := c.close(); err != nil { - t.Fatalf("error closing: %v", err) - } - -} diff --git a/tsdb/engine/tsm1/mmap_unix.go b/tsdb/engine/tsm1/mmap_unix.go deleted file mode 100644 index 0fa303cb423..00000000000 --- a/tsdb/engine/tsm1/mmap_unix.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build !windows && !plan9 - -package tsm1 - -import ( - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -func mmap(f *os.File, offset int64, length int) ([]byte, error) { - // anonymous mapping - if f == nil { - return unix.Mmap(-1, 0, length, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE) - } - - mmap, err := unix.Mmap(int(f.Fd()), 0, length, syscall.PROT_READ, syscall.MAP_SHARED) - if err != nil { - return nil, err - } - - return mmap, nil -} - -func munmap(b []byte) (err error) { - return unix.Munmap(b) -} - -// madviseWillNeed gives the kernel the mmap madvise value MADV_WILLNEED, hinting -// that we plan on using the provided buffer in the near future. -func madviseWillNeed(b []byte) error { - return madvise(b, syscall.MADV_WILLNEED) -} - -func madviseDontNeed(b []byte) error { - return madvise(b, syscall.MADV_DONTNEED) -} - -// From: github.com/boltdb/bolt/bolt_unix.go -func madvise(b []byte, advice int) (err error) { - return unix.Madvise(b, advice) -} diff --git a/tsdb/engine/tsm1/mmap_windows.go b/tsdb/engine/tsm1/mmap_windows.go deleted file mode 100644 index 8a437e23f5d..00000000000 --- a/tsdb/engine/tsm1/mmap_windows.go +++ /dev/null @@ -1,133 +0,0 @@ -package tsm1 - -import ( - "errors" - "os" - "reflect" - "sync" - "syscall" - "unsafe" -) - -// mmap implementation for Windows -// Based on: https://github.com/edsrzf/mmap-go -// Based on: https://github.com/boltdb/bolt/bolt_windows.go -// Ref: https://groups.google.com/forum/#!topic/golang-nuts/g0nLwQI9www - -// We keep this map so that we can get back the original handle from the memory address. -var handleLock sync.Mutex -var handleMap = map[uintptr]syscall.Handle{} -var fileMap = map[uintptr]*os.File{} - -func openSharedFile(f *os.File) (file *os.File, err error) { - - var access, createmode, sharemode uint32 - var sa *syscall.SecurityAttributes - - access = syscall.GENERIC_READ - sharemode = uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE) - createmode = syscall.OPEN_EXISTING - fileName := f.Name() - - pathp, err := syscall.UTF16PtrFromString(fileName) - if err != nil { - return nil, err - } - - h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0) - - if e != nil { - return nil, e - } - //NewFile does not add finalizer, need to close this manually - return os.NewFile(uintptr(h), fileName), nil -} - -func mmap(f *os.File, offset int64, length int) (out []byte, err error) { - // TODO: Add support for anonymous mapping on windows - if f == nil { - return make([]byte, length), nil - } - - // Open a file mapping handle. - sizelo := uint32(length >> 32) - sizehi := uint32(length) & 0xffffffff - - sharedHandle, errno := openSharedFile(f) - if errno != nil { - return nil, os.NewSyscallError("CreateFile", errno) - } - - h, errno := syscall.CreateFileMapping(syscall.Handle(sharedHandle.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) - if h == 0 { - return nil, os.NewSyscallError("CreateFileMapping", errno) - } - - // Create the memory map. - addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(length)) - if addr == 0 { - return nil, os.NewSyscallError("MapViewOfFile", errno) - } - - handleLock.Lock() - handleMap[addr] = h - fileMap[addr] = sharedHandle - handleLock.Unlock() - - // Convert to a byte array. - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&out)) - hdr.Data = uintptr(unsafe.Pointer(addr)) - hdr.Len = length - hdr.Cap = length - - return -} - -// munmap Windows implementation -// Based on: https://github.com/edsrzf/mmap-go -// Based on: https://github.com/boltdb/bolt/bolt_windows.go -func munmap(b []byte) (err error) { - handleLock.Lock() - defer handleLock.Unlock() - - addr := (uintptr)(unsafe.Pointer(&b[0])) - if err := syscall.UnmapViewOfFile(addr); err != nil { - return os.NewSyscallError("UnmapViewOfFile", err) - } - - handle, ok := handleMap[addr] - if !ok { - // should be impossible; we would've seen the error above - return errors.New("unknown base address") - } - delete(handleMap, addr) - - e := syscall.CloseHandle(syscall.Handle(handle)) - if e != nil { - return os.NewSyscallError("CloseHandle", e) - } - - file, ok := fileMap[addr] - if !ok { - // should be impossible; we would've seen the error above - return errors.New("unknown base address") - } - delete(fileMap, addr) - - e = file.Close() - if e != nil { - return errors.New("close file" + e.Error()) - } - return nil -} - -// madviseWillNeed is unsupported on Windows. -func madviseWillNeed(b []byte) error { return nil } - -// madviseDontNeed is unsupported on Windows. -func madviseDontNeed(b []byte) error { return nil } - -func madvise(b []byte, advice int) error { - // Not implemented - return nil -} diff --git a/tsdb/engine/tsm1/pools.go b/tsdb/engine/tsm1/pools.go deleted file mode 100644 index 02d4d6231ee..00000000000 --- a/tsdb/engine/tsm1/pools.go +++ /dev/null @@ -1,27 +0,0 @@ -package tsm1 - -import "sync" - -var bufPool sync.Pool - -// getBuf returns a buffer with length size from the buffer pool. -func getBuf(size int) *[]byte { - x := bufPool.Get() - if x == nil { - b := make([]byte, size) - return &b - } - buf := x.(*[]byte) - if cap(*buf) < size { - bufPool.Put(x) - b := make([]byte, size) - return &b - } - *buf = (*buf)[:size] - return buf -} - -// putBuf returns a buffer to the pool. -func putBuf(buf *[]byte) { - bufPool.Put(buf) -} diff --git a/tsdb/engine/tsm1/predicate.go b/tsdb/engine/tsm1/predicate.go deleted file mode 100644 index 17574efab09..00000000000 --- a/tsdb/engine/tsm1/predicate.go +++ /dev/null @@ -1,684 +0,0 @@ -package tsm1 - -import ( - "bytes" - "fmt" - "regexp" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "google.golang.org/protobuf/proto" -) - -// Predicate is something that can match on a series key. -type Predicate interface { - Clone() influxdb.Predicate - Matches(key []byte) bool - Marshal() ([]byte, error) -} - -const ( // Enumeration of all predicate versions we support unmarshalling. - predicateVersionZero = '\x00' -) - -// UnmarshalPredicate takes stored predicate bytes from a Marshal call and returns a Predicate. -func UnmarshalPredicate(data []byte) (Predicate, error) { - if len(data) == 0 { - return nil, nil - } else if data[0] != predicateVersionZero { - return nil, fmt.Errorf("unknown tag byte: %x", data[0]) - } - - pred := new(datatypes.Predicate) - if err := proto.Unmarshal(data[1:], pred); err != nil { - return nil, err - } - return NewProtobufPredicate(pred) -} - -// -// Design -// - -// Predicates lazily evaluate with memoization so that we can walk a series key -// by the tags without parsing them into a structure and allocating. Each node -// in a predicate tree keeps a cache if it has enough information to have a -// definite value. The predicate state keeps track of all of the tag key/value -// pairs passed to it, and has a reset function to start over for a new series key. -// -// For example, imagine a query like -// -// ("tag1" == "val1" AND "tag2" == "val2") OR "tag3" == "val3" -// -// The state would have tag values set on it like -// -// state.Set("tag1", "val1") => NeedMore -// state.Set("tag2", "not-val2") => NeedMore -// state.Set("tag3", "val3") => True -// -// where after the first Set, the AND and OR clauses are both NeedMore, after -// the second Set, the AND clause is False and the OR clause is NeedMore, and -// after the last Set, the AND clause is still False, and the OR clause is True. -// -// Fast resetting is achieved by having each cache maintain a pointer to the state -// and both having a generation number. When the state resets, it bumps the generation -// number, and when the value is set in the cache, it is set with the current generation -// of the state. When querying the cache, it checks if the generation still matches. - -// -// Protobuf Implementation -// - -// NewProtobufPredicate returns a Predicate that matches based on the comparison structure -// described by the incoming protobuf. -func NewProtobufPredicate(pred *datatypes.Predicate) (Predicate, error) { - // Walk the predicate to collect the tag refs - locs := make(map[string]int) - walkPredicateNodes(pred.Root, func(node *datatypes.Node) { - if node.GetNodeType() == datatypes.Node_TypeTagRef { - switch value := node.GetValue().(type) { - case *datatypes.Node_TagRefValue: - // Only add to the matcher locations the first time we encounter - // the tag key reference. This prevents problems with redundant - // predicates like: - // - // foo = a AND foo = b - // foo = c AND foo = d - if _, ok := locs[value.TagRefValue]; !ok { - locs[value.TagRefValue] = len(locs) - } - } - } - }) - - // Construct the shared state and root predicate node. - state := newPredicateState(locs) - root, err := buildPredicateNode(state, pred.Root) - if err != nil { - return nil, err - } - - return &predicateMatcher{ - pred: pred, - state: state, - root: root, - }, nil -} - -// predicateMatcher implements Predicate for a protobuf. -type predicateMatcher struct { - pred *datatypes.Predicate - state *predicateState - root predicateNode -} - -// Clone returns a deep copy of p's state and root node. -// -// It is not safe to modify p.pred on the returned clone. -func (p *predicateMatcher) Clone() influxdb.Predicate { - state := p.state.Clone() - return &predicateMatcher{ - pred: p.pred, - state: state, - root: p.root.Clone(state), - } -} - -// Matches checks if the key matches the predicate by feeding individual tags into the -// state and returning as soon as the root node has a definite answer. -func (p *predicateMatcher) Matches(key []byte) bool { - p.state.Reset() - - // Extract the series from the composite key - key, _ = SeriesAndFieldFromCompositeKey(key) - - // Determine which popping algorithm to use. If there are no escape characters - // we can use the quicker method that only works in that case. - popTag := predicatePopTag - if bytes.IndexByte(key, '\\') != -1 { - popTag = predicatePopTagEscape - } - - // Feed tag pairs into the state and update until we have a definite response. - var tag, value []byte - for len(key) > 0 { - tag, value, key = popTag(key) - if tag == nil || !p.state.Set(tag, value) { - continue - } - resp := p.root.Update() - if resp == predicateResponse_true { - return true - } else if resp == predicateResponse_false { - return false - } - } - - // If it always needed more then it didn't match. For example, consider if - // the predicate matches `tag1=val1` but tag1 is not present in the key. - return false -} - -// Marshal returns a buffer representing the protobuf predicate. -func (p *predicateMatcher) Marshal() ([]byte, error) { - // Prefix it with the version byte so that we can change in the future if necessary - buf, err := proto.Marshal(p.pred) - return append([]byte{predicateVersionZero}, buf...), err -} - -// walkPredicateNodes recursively calls the function for each node. -func walkPredicateNodes(node *datatypes.Node, fn func(node *datatypes.Node)) { - fn(node) - for _, ch := range node.Children { - walkPredicateNodes(ch, fn) - } -} - -// buildPredicateNode takes a protobuf node and converts it into a predicateNode. It is strict -// in what it accepts. -func buildPredicateNode(state *predicateState, node *datatypes.Node) (predicateNode, error) { - switch node.GetNodeType() { - case datatypes.Node_TypeComparisonExpression: - children := node.GetChildren() - if len(children) != 2 { - return nil, fmt.Errorf("invalid number of children for logical expression: %v", len(children)) - } - left, right := children[0], children[1] - - comp := &predicateNodeComparison{ - predicateCache: newPredicateCache(state), - comp: node.GetComparison(), - } - - // Fill in the left side of the comparison - switch left.GetNodeType() { - // Tag refs look up the location of the tag in the state - case datatypes.Node_TypeTagRef: - idx, ok := state.locs[left.GetTagRefValue()] - if !ok { - return nil, fmt.Errorf("invalid tag ref in comparison: %v", left.GetTagRefValue()) - } - comp.leftIndex = idx - - // Left literals are only allowed to be strings - case datatypes.Node_TypeLiteral: - lit, ok := left.GetValue().(*datatypes.Node_StringValue) - if !ok { - return nil, fmt.Errorf("invalid left literal in comparison: %v", left.GetValue()) - } - comp.leftLiteral = []byte(lit.StringValue) - - default: - return nil, fmt.Errorf("invalid left node in comparison: %v", left.GetNodeType()) - } - - // Fill in the right side of the comparison - switch right.GetNodeType() { - // Tag refs look up the location of the tag in the state - case datatypes.Node_TypeTagRef: - idx, ok := state.locs[right.GetTagRefValue()] - if !ok { - return nil, fmt.Errorf("invalid tag ref in comparison: %v", right.GetTagRefValue()) - } - comp.rightIndex = idx - - // Right literals are allowed to be regexes as well as strings - case datatypes.Node_TypeLiteral: - switch lit := right.GetValue().(type) { - case *datatypes.Node_StringValue: - comp.rightLiteral = []byte(lit.StringValue) - - case *datatypes.Node_RegexValue: - reg, err := regexp.Compile(lit.RegexValue) - if err != nil { - return nil, err - } - comp.rightReg = reg - - default: - return nil, fmt.Errorf("invalid right literal in comparison: %v", right.GetValue()) - } - - default: - return nil, fmt.Errorf("invalid right node in comparison: %v", right.GetNodeType()) - } - - // Ensure that a regex is set on the right if and only if the comparison is a regex - if comp.rightReg == nil { - if comp.comp == datatypes.Node_ComparisonRegex || comp.comp == datatypes.Node_ComparisonNotRegex { - return nil, fmt.Errorf("invalid comparison involving regex: %v", node) - } - } else { - if comp.comp != datatypes.Node_ComparisonRegex && comp.comp != datatypes.Node_ComparisonNotRegex { - return nil, fmt.Errorf("invalid comparison not against regex: %v", node) - } - } - - return comp, nil - - case datatypes.Node_TypeLogicalExpression: - children := node.GetChildren() - if len(children) != 2 { - return nil, fmt.Errorf("invalid number of children for logical expression: %v", len(children)) - } - - left, err := buildPredicateNode(state, children[0]) - if err != nil { - return nil, err - } - right, err := buildPredicateNode(state, children[1]) - if err != nil { - return nil, err - } - - switch node.GetLogical() { - case datatypes.Node_LogicalAnd: - return &predicateNodeAnd{ - predicateCache: newPredicateCache(state), - left: left, - right: right, - }, nil - - case datatypes.Node_LogicalOr: - return &predicateNodeOr{ - predicateCache: newPredicateCache(state), - left: left, - right: right, - }, nil - - default: - return nil, fmt.Errorf("unknown logical type: %v", node.GetLogical()) - } - - default: - return nil, fmt.Errorf("unsupported predicate type: %v", node.GetNodeType()) - } -} - -// -// Predicate Responses -// - -type predicateResponse uint8 - -const ( - predicateResponse_needMore predicateResponse = iota - predicateResponse_true - predicateResponse_false -) - -// -// Predicate State -// - -// predicateState keeps track of tag key=>value mappings with cheap methods -// to reset to a blank state. -type predicateState struct { - gen uint64 - locs map[string]int - values [][]byte -} - -// newPredicateState creates a predicateState given a map of keys to indexes into an -// an array. -func newPredicateState(locs map[string]int) *predicateState { - return &predicateState{ - gen: 1, // so that caches start out unfilled since they start at 0 - locs: locs, - values: make([][]byte, len(locs)), - } -} - -// Clone returns a deep copy of p. -func (p *predicateState) Clone() *predicateState { - q := &predicateState{ - gen: p.gen, - locs: make(map[string]int, len(p.locs)), - values: make([][]byte, len(p.values)), - } - - for k, v := range p.locs { - q.locs[k] = v - } - copy(q.values, p.values) - - return q -} - -// Reset clears any set values for the state. -func (p *predicateState) Reset() { - p.gen++ - - for i := range p.values { - p.values[i] = nil - } -} - -// Set sets the key to be the value and returns true if the key is part of the considered -// set of keys. -func (p *predicateState) Set(key, value []byte) bool { - i, ok := p.locs[string(key)] - if ok { - p.values[i] = value - } - return ok -} - -// -// Predicate Cache -// - -// predicateCache interacts with the predicateState to keep determined responses -// memoized until the state has been Reset to avoid recomputing nodes. -type predicateCache struct { - state *predicateState - gen uint64 - resp predicateResponse -} - -// newPredicateCache constructs a predicateCache for the provided state. -func newPredicateCache(state *predicateState) predicateCache { - return predicateCache{ - state: state, - gen: 0, - resp: predicateResponse_needMore, - } -} - -// Clone returns a deep copy of p. -func (p *predicateCache) Clone(state *predicateState) *predicateCache { - if state == nil { - state = p.state.Clone() - } - return &predicateCache{ - state: state, - gen: p.gen, - resp: p.resp, - } -} - -// Cached returns the cached response and a boolean indicating if it is valid. -func (p *predicateCache) Cached() (predicateResponse, bool) { - return p.resp, p.gen == p.state.gen -} - -// Store sets the cache to the provided response until the state is Reset. -func (p *predicateCache) Store(resp predicateResponse) { - p.gen = p.state.gen - p.resp = resp -} - -// -// Predicate Nodes -// - -// predicateNode is the interface that any parts of a predicate tree implement. -type predicateNode interface { - // Update informs the node that the state has been updated and asks it to return - // a response. - Update() predicateResponse - - // Clone returns a deep copy of the node. - Clone(state *predicateState) predicateNode -} - -// predicateNodeAnd combines two predicate nodes with an And. -type predicateNodeAnd struct { - predicateCache - left, right predicateNode -} - -// Clone returns a deep copy of p. -func (p *predicateNodeAnd) Clone(state *predicateState) predicateNode { - return &predicateNodeAnd{ - predicateCache: *p.predicateCache.Clone(state), - left: p.left.Clone(state), - right: p.right.Clone(state), - } -} - -// Update checks if both of the left and right nodes are true. If either is false -// then the node is definitely false. Otherwise, it needs more information. -func (p *predicateNodeAnd) Update() predicateResponse { - if resp, ok := p.Cached(); ok { - return resp - } - - left := p.left.Update() - if left == predicateResponse_false { - p.Store(predicateResponse_false) - return predicateResponse_false - } else if left == predicateResponse_needMore { - return predicateResponse_needMore - } - - right := p.right.Update() - if right == predicateResponse_false { - p.Store(predicateResponse_false) - return predicateResponse_false - } else if right == predicateResponse_needMore { - return predicateResponse_needMore - } - - return predicateResponse_true -} - -// predicateNodeOr combines two predicate nodes with an Or. -type predicateNodeOr struct { - predicateCache - left, right predicateNode -} - -// Clone returns a deep copy of p. -func (p *predicateNodeOr) Clone(state *predicateState) predicateNode { - return &predicateNodeOr{ - predicateCache: *p.predicateCache.Clone(state), - left: p.left.Clone(state), - right: p.right.Clone(state), - } -} - -// Update checks if either the left and right nodes are true. If both nodes -// are false, then the node is definitely false. Otherwise, it needs more information. -func (p *predicateNodeOr) Update() predicateResponse { - if resp, ok := p.Cached(); ok { - return resp - } - - left := p.left.Update() - if left == predicateResponse_true { - p.Store(predicateResponse_true) - return predicateResponse_true - } - - right := p.right.Update() - if right == predicateResponse_true { - p.Store(predicateResponse_true) - return predicateResponse_true - } - - if left == predicateResponse_false && right == predicateResponse_false { - p.Store(predicateResponse_false) - return predicateResponse_false - } - - return predicateResponse_needMore -} - -// predicateNodeComparison compares values of tags. -type predicateNodeComparison struct { - predicateCache - comp datatypes.Node_Comparison - rightReg *regexp.Regexp - leftLiteral []byte - rightLiteral []byte - leftIndex int - rightIndex int -} - -// Clone returns a deep copy of p. -func (p *predicateNodeComparison) Clone(state *predicateState) predicateNode { - q := &predicateNodeComparison{ - predicateCache: *p.predicateCache.Clone(state), - comp: p.comp, - rightReg: p.rightReg, - leftIndex: p.leftIndex, - rightIndex: p.rightIndex, - } - - if p.leftLiteral != nil { - q.leftLiteral = make([]byte, len(p.leftLiteral)) - copy(q.leftLiteral, p.leftLiteral) - } - if p.rightLiteral != nil { - q.rightLiteral = make([]byte, len(p.rightLiteral)) - copy(q.rightLiteral, p.rightLiteral) - } - return q -} - -// Update checks if both sides of the comparison are determined, and if so, evaluates -// the comparison to a determined truth value. -func (p *predicateNodeComparison) Update() predicateResponse { - if resp, ok := p.Cached(); ok { - return resp - } - - left := p.leftLiteral - if left == nil { - left = p.state.values[p.leftIndex] - if left == nil { - return predicateResponse_needMore - } - } - - right := p.rightLiteral - if right == nil && p.rightReg == nil { - right = p.state.values[p.rightIndex] - if right == nil { - return predicateResponse_needMore - } - } - - if predicateEval(p.comp, left, right, p.rightReg) { - p.Store(predicateResponse_true) - return predicateResponse_true - } else { - p.Store(predicateResponse_false) - return predicateResponse_false - } -} - -// predicateEval is a helper to do the appropriate comparison depending on which comparison -// enumeration value was passed. -func predicateEval(comp datatypes.Node_Comparison, left, right []byte, rightReg *regexp.Regexp) bool { - switch comp { - case datatypes.Node_ComparisonEqual: - return string(left) == string(right) - case datatypes.Node_ComparisonNotEqual: - return string(left) != string(right) - case datatypes.Node_ComparisonStartsWith: - return bytes.HasPrefix(left, right) - case datatypes.Node_ComparisonLess: - return string(left) < string(right) - case datatypes.Node_ComparisonLessEqual: - return string(left) <= string(right) - case datatypes.Node_ComparisonGreater: - return string(left) > string(right) - case datatypes.Node_ComparisonGreaterEqual: - return string(left) >= string(right) - case datatypes.Node_ComparisonRegex: - return rightReg.Match(left) - case datatypes.Node_ComparisonNotRegex: - return !rightReg.Match(left) - } - return false -} - -// -// Popping Tags -// - -// The models package has some of this logic as well, but doesn't export ways to get -// at individual tags one at a time. In the common, no escape characters case, popping -// the first tag off of a series key takes around ~10ns. - -// predicatePopTag pops a tag=value pair from the front of series, returning the -// remainder in rest. it assumes there are no escaped characters in the series. -func predicatePopTag(series []byte) (tag, value []byte, rest []byte) { - // find the first ',' - series, rest, _ = bytes.Cut(series, []byte(",")) - - // find the first '=' - tag, value, _ = bytes.Cut(series, []byte("=")) - - return tag, value, rest -} - -// predicatePopTagEscape pops a tag=value pair from the front of series, returning the -// remainder in rest. it assumes there are possibly/likely escaped characters in the series. -func predicatePopTagEscape(series []byte) (tag, value []byte, rest []byte) { - // find the first unescaped ',' - for j := uint(0); j < uint(len(series)); { - i := bytes.IndexByte(series[j:], ',') - if i < 0 { - break // this is the last tag pair - } - - ui := uint(i) + j // make index relative to full series slice - if ui > 0 && series[ui-1] == '\\' { // the comma is escaped - j = ui + 1 - continue - } - - series, rest = series[:ui], series[ui+1:] - break - } - - // find the first unescaped '=' - for j := uint(0); j < uint(len(series)); { - i := bytes.IndexByte(series[j:], '=') - if i < 0 { - break // there is no tag value - } - ui := uint(i) + j // make index relative to full series slice - if ui > 0 && series[ui-1] == '\\' { // the equals is escaped - j = ui + 1 - continue - } - - tag, value = series[:ui], series[ui+1:] - break - } - - // sad time: it's possible this tag/value has escaped characters, so we have to - // find an unescape them. since the byte slice may refer to read-only memory, we - // can't do this in place, so we make copies. - if bytes.IndexByte(tag, '\\') != -1 { - unescapedTag := make([]byte, 0, len(tag)) - for i, c := range tag { - if c == '\\' && i+1 < len(tag) { - if c := tag[i+1]; c == ',' || c == ' ' || c == '=' { - continue - } - } - unescapedTag = append(unescapedTag, c) - } - tag = unescapedTag - } - - if bytes.IndexByte(value, '\\') != -1 { - unescapedValue := make([]byte, 0, len(value)) - for i, c := range value { - if c == '\\' && i+1 < len(value) { - if c := value[i+1]; c == ',' || c == ' ' || c == '=' { - continue - } - } - unescapedValue = append(unescapedValue, c) - } - value = unescapedValue - } - - return tag, value, rest -} diff --git a/tsdb/engine/tsm1/predicate_test.go b/tsdb/engine/tsm1/predicate_test.go deleted file mode 100644 index dd9bb820e5b..00000000000 --- a/tsdb/engine/tsm1/predicate_test.go +++ /dev/null @@ -1,545 +0,0 @@ -package tsm1 - -import ( - "fmt" - "testing" - - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" -) - -func TestPredicatePopTagEscape(t *testing.T) { - cases := []struct { - Key string - Tag string - Value string - Rest string - }{ - {Key: "", Tag: "", Value: "", Rest: ""}, - {Key: "invalid", Tag: "", Value: "", Rest: ""}, - {Key: "region=west,server=b,foo=bar", Tag: "region", Value: "west", Rest: "server=b,foo=bar"}, - {Key: "region=west", Tag: "region", Value: "west", Rest: ""}, - {Key: `re\=gion=west,server=a`, Tag: `re=gion`, Value: "west", Rest: "server=a"}, - {Key: `region=w\,est,server=a`, Tag: `region`, Value: "w,est", Rest: "server=a"}, - {Key: `hi\ yo\ =w\,est,server=a`, Tag: `hi yo `, Value: "w,est", Rest: "server=a"}, - {Key: `\ e\ \=o=world,server=a`, Tag: ` e =o`, Value: "world", Rest: "server=a"}, - } - - for _, c := range cases { - tag, value, rest := predicatePopTagEscape([]byte(c.Key)) - if string(tag) != c.Tag { - t.Fatalf("got returned tag %q expected %q", tag, c.Tag) - } else if string(value) != c.Value { - t.Fatalf("got returned value %q expected %q", value, c.Value) - } else if string(rest) != c.Rest { - t.Fatalf("got returned remainder %q expected %q", rest, c.Rest) - } - } -} - -func TestPredicate_Matches(t *testing.T) { - cases := []struct { - Name string - Predicate *datatypes.Predicate - Key string - Matches bool - }{ - { - Name: "Basic Matching", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag3"), stringNode("val3"))), - Key: "bucketorg,tag3=val3", - Matches: true, - }, - - { - Name: "Basic Unmatching", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag3"), stringNode("val3"))), - Key: "bucketorg,tag3=val2", - Matches: false, - }, - - { - Name: "Compound Logical Matching", - Predicate: predicate( - orNode( - andNode( - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("foo"), stringNode("bar")), - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("baz"), stringNode("no"))), - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag3"), stringNode("val3")))), - Key: "bucketorg,foo=bar,baz=bif,tag3=val3", - Matches: true, - }, - - { - Name: "Compound Logical Unmatching", - Predicate: predicate( - orNode( - andNode( - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("foo"), stringNode("bar")), - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("baz"), stringNode("no"))), - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag3"), stringNode("val3")))), - Key: "bucketorg,foo=bar,baz=bif,tag3=val2", - Matches: false, - }, - - { - Name: "Logical Or Short Circuit", - Predicate: predicate( - orNode( - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("foo"), stringNode("bar")), - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("baz"), stringNode("no")))), - Key: "bucketorg,baz=bif,foo=bar,tag3=val3", - Matches: true, - }, - - { - Name: "Logical And Short Circuit", - Predicate: predicate( - andNode( - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("foo"), stringNode("no")), - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("baz"), stringNode("bif")))), - Key: "bucketorg,baz=bif,foo=bar,tag3=val3", - Matches: false, - }, - - { - Name: "Logical And Matching", - Predicate: predicate( - andNode( - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("foo"), stringNode("bar")), - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("baz"), stringNode("bif")))), - Key: "bucketorg,baz=bif,foo=bar,tag3=val3", - Matches: true, - }, - - { - Name: "Logical And Matching Reduce (Simplify)", - Predicate: predicate( - andNode( - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("foo"), stringNode("bar")), - comparisonNode(datatypes.Node_ComparisonNotEqual, tagNode("foo"), stringNode("bif")))), - Key: "bucketorg,baz=bif,foo=bar,tag3=val3", - Matches: true, - }, - - { - Name: "Regex Matching", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonRegex, tagNode("tag3"), regexNode("...3"))), - Key: "bucketorg,tag3=val3", - Matches: true, - }, - - { - Name: "NotRegex Matching", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonNotRegex, tagNode("tag3"), regexNode("...4"))), - Key: "bucketorg,tag3=val3", - Matches: true, - }, - - { - Name: "Regex Unmatching", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonRegex, tagNode("tag3"), regexNode("...4"))), - Key: "bucketorg,tag3=val3", - Matches: false, - }, - - { - Name: "NotRegex Unmatching", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonNotRegex, tagNode("tag3"), regexNode("...3"))), - Key: "bucketorg,tag3=val3", - Matches: false, - }, - - { - Name: "Basic Matching Reversed", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonEqual, stringNode("val3"), tagNode("tag3"))), - Key: "bucketorg,tag2=val2,tag3=val3", - Matches: true, - }, - - { - Name: "Tag Matching Tag", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag4"), tagNode("tag3"))), - Key: "bucketorg,tag3=val3,tag4=val3", - Matches: true, - }, - - { - Name: "No Tag", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag4"), stringNode("val4"))), - Key: "bucketorg,tag3=val3", - Matches: false, - }, - - { - Name: "Not Equal", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonNotEqual, tagNode("tag3"), stringNode("val4"))), - Key: "bucketorg,tag3=val3", - Matches: true, - }, - - { - Name: "Starts With", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonStartsWith, tagNode("tag3"), stringNode("va"))), - Key: "bucketorg,tag3=val3", - Matches: true, - }, - - { - Name: "Less", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonLess, tagNode("tag3"), stringNode("val4"))), - Key: "bucketorg,tag3=val3", - Matches: true, - }, - - { - Name: "Less Equal", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonLessEqual, tagNode("tag3"), stringNode("val4"))), - Key: "bucketorg,tag3=val3", - Matches: true, - }, - - { - Name: "Greater", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonGreater, tagNode("tag3"), stringNode("u"))), - Key: "bucketorg,tag3=val3", - Matches: true, - }, - - { - Name: "Greater Equal;", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonGreaterEqual, tagNode("tag3"), stringNode("u"))), - Key: "bucketorg,tag3=val3", - Matches: true, - }, - - { - Name: "Escaping Matching", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag3"), stringNode("val3"))), - Key: `bucketorg,tag1=\,foo,tag2=\ bar,tag2\=more=val2\,\ \=hello,tag3=val3`, - Matches: true, - }, - } - - for _, test := range cases { - t.Run(test.Name, func(t *testing.T) { - pred, err := NewProtobufPredicate(test.Predicate) - if err != nil { - t.Fatal("compile failure:", err) - } - - if got, exp := pred.Matches([]byte(test.Key)), test.Matches; got != exp { - t.Fatal("match failure:", "got", got, "!=", "exp", exp) - } - - // Clone and try again. - pred = pred.Clone() - if got, exp := pred.Matches([]byte(test.Key)), test.Matches; got != exp { - t.Fatal("cloned match failure:", "got", got, "!=", "exp", exp) - } - }) - } -} - -func TestPredicate_Unmarshal(t *testing.T) { - protoPred := predicate( - orNode( - andNode( - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("foo"), stringNode("bar")), - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("baz"), stringNode("no"))), - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag3"), stringNode("val3")))) - - pred1, err := NewProtobufPredicate(protoPred) - if err != nil { - t.Fatal(err) - } - - predData, err := pred1.Marshal() - if err != nil { - t.Fatal(err) - } - - _, err = UnmarshalPredicate(predData) - if err != nil { - t.Fatal(err) - } - - t.Skip("TODO(dstrand1): Fix cmp for predicateMatcher. See in IDPE: https://github.com/influxdata/idpe/blob/7c52ef7c9bc387905f2864c8730c7366f07f8a1e/storage/tsdb/tsm1/predicate_test.go#L285") - - //if !cmp.Equal(pred1, pred2, cmputil.IgnoreProtobufUnexported()) { - // t.Fatal("mismatch on unmarshal") - //} -} - -func TestPredicate_Unmarshal_InvalidTag(t *testing.T) { - _, err := UnmarshalPredicate([]byte("\xff")) - if err == nil { - t.Fatal("expected error") - } -} - -func TestPredicate_Unmarshal_InvalidProtobuf(t *testing.T) { - _, err := UnmarshalPredicate([]byte("\x00\xff")) - if err == nil { - t.Fatal("expected error") - } -} - -func TestPredicate_Unmarshal_Empty(t *testing.T) { - pred, err := UnmarshalPredicate(nil) - if err != nil { - t.Fatal(err) - } else if pred != nil { - t.Fatal("expected no predicate") - } -} - -func TestPredicate_Invalid_Protobuf(t *testing.T) { - cases := []struct { - Name string - Predicate *datatypes.Predicate - }{ - { - Name: "Invalid Comparison Num Children", - Predicate: predicate(&datatypes.Node{ - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: datatypes.Node_ComparisonEqual}, - Children: []*datatypes.Node{{}, {}, {}}, - }), - }, - - { - Name: "Mismatching Left Tag Type", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonEqual, &datatypes.Node{ - NodeType: datatypes.Node_TypeTagRef, - Value: &datatypes.Node_IntegerValue{IntegerValue: 2}, - }, tagNode("tag"))), - }, - - { - Name: "Mismatching Left Literal Type", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonEqual, &datatypes.Node{ - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_IntegerValue{IntegerValue: 2}, - }, tagNode("tag"))), - }, - - { - Name: "Invalid Left Node Type", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonEqual, &datatypes.Node{ - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: datatypes.Node_ComparisonEqual}, - }, tagNode("tag"))), - }, - - { - Name: "Mismatching Right Tag Type", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag"), &datatypes.Node{ - NodeType: datatypes.Node_TypeTagRef, - Value: &datatypes.Node_IntegerValue{IntegerValue: 2}, - })), - }, - - { - Name: "Invalid Regex", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonRegex, tagNode("tag3"), regexNode("("))), - }, - - { - Name: "Mismatching Right Literal Type", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag"), &datatypes.Node{ - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_IntegerValue{IntegerValue: 2}, - })), - }, - - { - Name: "Invalid Right Node Type", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag"), &datatypes.Node{ - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: datatypes.Node_ComparisonEqual}, - })), - }, - - { - Name: "Invalid Comparison Without Regex", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonRegex, tagNode("tag3"), stringNode("val3"))), - }, - - { - Name: "Invalid Comparison With Regex", - Predicate: predicate( - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag3"), regexNode("."))), - }, - - { - Name: "Invalid Logical Operation Children", - Predicate: predicate(&datatypes.Node{ - NodeType: datatypes.Node_TypeLogicalExpression, - Value: &datatypes.Node_Logical_{Logical: datatypes.Node_LogicalAnd}, - Children: []*datatypes.Node{{}, {}, {}}, - }), - }, - - { - Name: "Invalid Left Logical Expression", - Predicate: predicate( - andNode( - tagNode("tag"), - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag3"), stringNode("val3")), - )), - }, - - { - Name: "Invalid Right Logical Expression", - Predicate: predicate( - andNode( - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag3"), stringNode("val3")), - tagNode("tag"), - )), - }, - - { - Name: "Invalid Logical Value", - Predicate: predicate(&datatypes.Node{ - NodeType: datatypes.Node_TypeLogicalExpression, - Value: &datatypes.Node_Logical_{Logical: 9999}, - Children: []*datatypes.Node{ - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag3"), stringNode("val3")), - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag3"), stringNode("val3")), - }, - }), - }, - - { - Name: "Invalid Root Node", - Predicate: predicate(tagNode("tag3")), - }, - } - - for _, test := range cases { - t.Run(test.Name, func(t *testing.T) { - _, err := NewProtobufPredicate(test.Predicate) - if err == nil { - t.Fatal("expected compile failure") - } - }) - } -} - -func BenchmarkPredicate(b *testing.B) { - run := func(b *testing.B, predicate *datatypes.Predicate) { - pred, err := NewProtobufPredicate(predicate) - if err != nil { - b.Fatal(err) - } - - series := []byte("bucketorg,") - for i := 0; i < 10; i++ { - series = append(series, fmt.Sprintf("tag%d=val%d,", i, i)...) - } - series = series[:len(series)-1] - - b.SetBytes(int64(len(series))) - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - pred.Matches(series) - } - } - - b.Run("Basic", func(b *testing.B) { - run(b, predicate( - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag5"), stringNode("val5")), - )) - }) - - b.Run("Compound", func(b *testing.B) { - run(b, predicate( - orNode( - andNode( - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag0"), stringNode("val0")), - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag6"), stringNode("val5")), - ), - comparisonNode(datatypes.Node_ComparisonEqual, tagNode("tag5"), stringNode("val5")), - ), - )) - }) -} - -// -// Helpers to create predicate protobufs -// - -func tagNode(s string) *datatypes.Node { - return &datatypes.Node{ - NodeType: datatypes.Node_TypeTagRef, - Value: &datatypes.Node_TagRefValue{TagRefValue: s}, - } -} - -func stringNode(s string) *datatypes.Node { - return &datatypes.Node{ - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_StringValue{StringValue: s}, - } -} - -func regexNode(s string) *datatypes.Node { - return &datatypes.Node{ - NodeType: datatypes.Node_TypeLiteral, - Value: &datatypes.Node_RegexValue{RegexValue: s}, - } -} - -func comparisonNode(comp datatypes.Node_Comparison, left, right *datatypes.Node) *datatypes.Node { - return &datatypes.Node{ - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: comp}, - Children: []*datatypes.Node{left, right}, - } -} - -func andNode(left, right *datatypes.Node) *datatypes.Node { - return &datatypes.Node{ - NodeType: datatypes.Node_TypeLogicalExpression, - Value: &datatypes.Node_Logical_{Logical: datatypes.Node_LogicalAnd}, - Children: []*datatypes.Node{left, right}, - } -} - -func orNode(left, right *datatypes.Node) *datatypes.Node { - return &datatypes.Node{ - NodeType: datatypes.Node_TypeLogicalExpression, - Value: &datatypes.Node_Logical_{Logical: datatypes.Node_LogicalOr}, - Children: []*datatypes.Node{left, right}, - } -} - -func predicate(root *datatypes.Node) *datatypes.Predicate { - return &datatypes.Predicate{Root: root} -} diff --git a/tsdb/engine/tsm1/reader.gen.go b/tsdb/engine/tsm1/reader.gen.go deleted file mode 100644 index 227d9233683..00000000000 --- a/tsdb/engine/tsm1/reader.gen.go +++ /dev/null @@ -1,285 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: reader.gen.go.tmpl - -package tsm1 - -import ( - "github.com/influxdata/influxdb/v2/tsdb" -) - -// ReadFloatBlockAt returns the float values corresponding to the given index entry. -func (t *TSMReader) ReadFloatBlockAt(entry *IndexEntry, vals *[]FloatValue) ([]FloatValue, error) { - t.mu.RLock() - v, err := t.accessor.readFloatBlock(entry, vals) - t.mu.RUnlock() - return v, err -} - -// ReadFloatArrayBlockAt fills vals with the float values corresponding to the given index entry. -func (t *TSMReader) ReadFloatArrayBlockAt(entry *IndexEntry, vals *tsdb.FloatArray) error { - t.mu.RLock() - err := t.accessor.readFloatArrayBlock(entry, vals) - t.mu.RUnlock() - return err -} - -// ReadIntegerBlockAt returns the integer values corresponding to the given index entry. -func (t *TSMReader) ReadIntegerBlockAt(entry *IndexEntry, vals *[]IntegerValue) ([]IntegerValue, error) { - t.mu.RLock() - v, err := t.accessor.readIntegerBlock(entry, vals) - t.mu.RUnlock() - return v, err -} - -// ReadIntegerArrayBlockAt fills vals with the integer values corresponding to the given index entry. -func (t *TSMReader) ReadIntegerArrayBlockAt(entry *IndexEntry, vals *tsdb.IntegerArray) error { - t.mu.RLock() - err := t.accessor.readIntegerArrayBlock(entry, vals) - t.mu.RUnlock() - return err -} - -// ReadUnsignedBlockAt returns the unsigned values corresponding to the given index entry. -func (t *TSMReader) ReadUnsignedBlockAt(entry *IndexEntry, vals *[]UnsignedValue) ([]UnsignedValue, error) { - t.mu.RLock() - v, err := t.accessor.readUnsignedBlock(entry, vals) - t.mu.RUnlock() - return v, err -} - -// ReadUnsignedArrayBlockAt fills vals with the unsigned values corresponding to the given index entry. -func (t *TSMReader) ReadUnsignedArrayBlockAt(entry *IndexEntry, vals *tsdb.UnsignedArray) error { - t.mu.RLock() - err := t.accessor.readUnsignedArrayBlock(entry, vals) - t.mu.RUnlock() - return err -} - -// ReadStringBlockAt returns the string values corresponding to the given index entry. -func (t *TSMReader) ReadStringBlockAt(entry *IndexEntry, vals *[]StringValue) ([]StringValue, error) { - t.mu.RLock() - v, err := t.accessor.readStringBlock(entry, vals) - t.mu.RUnlock() - return v, err -} - -// ReadStringArrayBlockAt fills vals with the string values corresponding to the given index entry. -func (t *TSMReader) ReadStringArrayBlockAt(entry *IndexEntry, vals *tsdb.StringArray) error { - t.mu.RLock() - err := t.accessor.readStringArrayBlock(entry, vals) - t.mu.RUnlock() - return err -} - -// ReadBooleanBlockAt returns the boolean values corresponding to the given index entry. -func (t *TSMReader) ReadBooleanBlockAt(entry *IndexEntry, vals *[]BooleanValue) ([]BooleanValue, error) { - t.mu.RLock() - v, err := t.accessor.readBooleanBlock(entry, vals) - t.mu.RUnlock() - return v, err -} - -// ReadBooleanArrayBlockAt fills vals with the boolean values corresponding to the given index entry. -func (t *TSMReader) ReadBooleanArrayBlockAt(entry *IndexEntry, vals *tsdb.BooleanArray) error { - t.mu.RLock() - err := t.accessor.readBooleanArrayBlock(entry, vals) - t.mu.RUnlock() - return err -} - -// blockAccessor abstracts a method of accessing blocks from a -// TSM file. -type blockAccessor interface { - init() (*indirectIndex, error) - read(key []byte, timestamp int64) ([]Value, error) - readAll(key []byte) ([]Value, error) - readBlock(entry *IndexEntry, values []Value) ([]Value, error) - readFloatBlock(entry *IndexEntry, values *[]FloatValue) ([]FloatValue, error) - readFloatArrayBlock(entry *IndexEntry, values *tsdb.FloatArray) error - readIntegerBlock(entry *IndexEntry, values *[]IntegerValue) ([]IntegerValue, error) - readIntegerArrayBlock(entry *IndexEntry, values *tsdb.IntegerArray) error - readUnsignedBlock(entry *IndexEntry, values *[]UnsignedValue) ([]UnsignedValue, error) - readUnsignedArrayBlock(entry *IndexEntry, values *tsdb.UnsignedArray) error - readStringBlock(entry *IndexEntry, values *[]StringValue) ([]StringValue, error) - readStringArrayBlock(entry *IndexEntry, values *tsdb.StringArray) error - readBooleanBlock(entry *IndexEntry, values *[]BooleanValue) ([]BooleanValue, error) - readBooleanArrayBlock(entry *IndexEntry, values *tsdb.BooleanArray) error - readBytes(entry *IndexEntry, buf []byte) (uint32, []byte, error) - rename(path string) error - path() string - close() error - free() error -} - -func (m *mmapAccessor) readFloatBlock(entry *IndexEntry, values *[]FloatValue) ([]FloatValue, error) { - m.incAccess() - - m.mu.RLock() - if int64(len(m.b)) < entry.Offset+int64(entry.Size) { - m.mu.RUnlock() - return nil, ErrTSMClosed - } - - a, err := DecodeFloatBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values) - m.mu.RUnlock() - - if err != nil { - return nil, err - } - - return a, nil -} - -func (m *mmapAccessor) readFloatArrayBlock(entry *IndexEntry, values *tsdb.FloatArray) error { - m.incAccess() - - m.mu.RLock() - if int64(len(m.b)) < entry.Offset+int64(entry.Size) { - m.mu.RUnlock() - return ErrTSMClosed - } - - err := DecodeFloatArrayBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values) - m.mu.RUnlock() - - return err -} - -func (m *mmapAccessor) readIntegerBlock(entry *IndexEntry, values *[]IntegerValue) ([]IntegerValue, error) { - m.incAccess() - - m.mu.RLock() - if int64(len(m.b)) < entry.Offset+int64(entry.Size) { - m.mu.RUnlock() - return nil, ErrTSMClosed - } - - a, err := DecodeIntegerBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values) - m.mu.RUnlock() - - if err != nil { - return nil, err - } - - return a, nil -} - -func (m *mmapAccessor) readIntegerArrayBlock(entry *IndexEntry, values *tsdb.IntegerArray) error { - m.incAccess() - - m.mu.RLock() - if int64(len(m.b)) < entry.Offset+int64(entry.Size) { - m.mu.RUnlock() - return ErrTSMClosed - } - - err := DecodeIntegerArrayBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values) - m.mu.RUnlock() - - return err -} - -func (m *mmapAccessor) readUnsignedBlock(entry *IndexEntry, values *[]UnsignedValue) ([]UnsignedValue, error) { - m.incAccess() - - m.mu.RLock() - if int64(len(m.b)) < entry.Offset+int64(entry.Size) { - m.mu.RUnlock() - return nil, ErrTSMClosed - } - - a, err := DecodeUnsignedBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values) - m.mu.RUnlock() - - if err != nil { - return nil, err - } - - return a, nil -} - -func (m *mmapAccessor) readUnsignedArrayBlock(entry *IndexEntry, values *tsdb.UnsignedArray) error { - m.incAccess() - - m.mu.RLock() - if int64(len(m.b)) < entry.Offset+int64(entry.Size) { - m.mu.RUnlock() - return ErrTSMClosed - } - - err := DecodeUnsignedArrayBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values) - m.mu.RUnlock() - - return err -} - -func (m *mmapAccessor) readStringBlock(entry *IndexEntry, values *[]StringValue) ([]StringValue, error) { - m.incAccess() - - m.mu.RLock() - if int64(len(m.b)) < entry.Offset+int64(entry.Size) { - m.mu.RUnlock() - return nil, ErrTSMClosed - } - - a, err := DecodeStringBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values) - m.mu.RUnlock() - - if err != nil { - return nil, err - } - - return a, nil -} - -func (m *mmapAccessor) readStringArrayBlock(entry *IndexEntry, values *tsdb.StringArray) error { - m.incAccess() - - m.mu.RLock() - if int64(len(m.b)) < entry.Offset+int64(entry.Size) { - m.mu.RUnlock() - return ErrTSMClosed - } - - err := DecodeStringArrayBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values) - m.mu.RUnlock() - - return err -} - -func (m *mmapAccessor) readBooleanBlock(entry *IndexEntry, values *[]BooleanValue) ([]BooleanValue, error) { - m.incAccess() - - m.mu.RLock() - if int64(len(m.b)) < entry.Offset+int64(entry.Size) { - m.mu.RUnlock() - return nil, ErrTSMClosed - } - - a, err := DecodeBooleanBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values) - m.mu.RUnlock() - - if err != nil { - return nil, err - } - - return a, nil -} - -func (m *mmapAccessor) readBooleanArrayBlock(entry *IndexEntry, values *tsdb.BooleanArray) error { - m.incAccess() - - m.mu.RLock() - if int64(len(m.b)) < entry.Offset+int64(entry.Size) { - m.mu.RUnlock() - return ErrTSMClosed - } - - err := DecodeBooleanArrayBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values) - m.mu.RUnlock() - - return err -} diff --git a/tsdb/engine/tsm1/reader.gen.go.tmpl b/tsdb/engine/tsm1/reader.gen.go.tmpl deleted file mode 100644 index 78536d8d9ee..00000000000 --- a/tsdb/engine/tsm1/reader.gen.go.tmpl +++ /dev/null @@ -1,77 +0,0 @@ -package tsm1 - -import ( - "github.com/influxdata/influxdb/v2/tsdb" -) - -{{range .}} -// Read{{.Name}}BlockAt returns the {{.name}} values corresponding to the given index entry. -func (t *TSMReader) Read{{.Name}}BlockAt(entry *IndexEntry, vals *[]{{.Name}}Value) ([]{{.Name}}Value, error) { - t.mu.RLock() - v, err := t.accessor.read{{.Name}}Block(entry, vals) - t.mu.RUnlock() - return v, err -} - -// Read{{.Name}}ArrayBlockAt fills vals with the {{.name}} values corresponding to the given index entry. -func (t *TSMReader) Read{{.Name}}ArrayBlockAt(entry *IndexEntry, vals *tsdb.{{.Name}}Array) error { - t.mu.RLock() - err := t.accessor.read{{.Name}}ArrayBlock(entry, vals) - t.mu.RUnlock() - return err -} -{{end}} - -// blockAccessor abstracts a method of accessing blocks from a -// TSM file. -type blockAccessor interface { - init() (*indirectIndex, error) - read(key []byte, timestamp int64) ([]Value, error) - readAll(key []byte) ([]Value, error) - readBlock(entry *IndexEntry, values []Value) ([]Value, error) -{{- range .}} - read{{.Name}}Block(entry *IndexEntry, values *[]{{.Name}}Value) ([]{{.Name}}Value, error) - read{{.Name}}ArrayBlock(entry *IndexEntry, values *tsdb.{{.Name}}Array) error -{{- end}} - readBytes(entry *IndexEntry, buf []byte) (uint32, []byte, error) - rename(path string) error - path() string - close() error - free() error -} - -{{range .}} -func (m *mmapAccessor) read{{.Name}}Block(entry *IndexEntry, values *[]{{.Name}}Value) ([]{{.Name}}Value, error) { - m.incAccess() - - m.mu.RLock() - if int64(len(m.b)) < entry.Offset+int64(entry.Size) { - m.mu.RUnlock() - return nil, ErrTSMClosed - } - - a, err := Decode{{.Name}}Block(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values) - m.mu.RUnlock() - - if err != nil { - return nil, err - } - - return a, nil -} - -func (m *mmapAccessor) read{{.Name}}ArrayBlock(entry *IndexEntry, values *tsdb.{{.Name}}Array) error { - m.incAccess() - - m.mu.RLock() - if int64(len(m.b)) < entry.Offset+int64(entry.Size) { - m.mu.RUnlock() - return ErrTSMClosed - } - - err := Decode{{.Name}}ArrayBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values) - m.mu.RUnlock() - - return err -} -{{end}} \ No newline at end of file diff --git a/tsdb/engine/tsm1/reader.gen.go.tmpldata b/tsdb/engine/tsm1/reader.gen.go.tmpldata deleted file mode 100644 index 236ba310ba4..00000000000 --- a/tsdb/engine/tsm1/reader.gen.go.tmpldata +++ /dev/null @@ -1,22 +0,0 @@ -[ - { - "Name":"Float", - "name":"float" - }, - { - "Name":"Integer", - "name":"integer" - }, - { - "Name":"Unsigned", - "name":"unsigned" - }, - { - "Name":"String", - "name":"string" - }, - { - "Name":"Boolean", - "name":"boolean" - } -] diff --git a/tsdb/engine/tsm1/reader.go b/tsdb/engine/tsm1/reader.go deleted file mode 100644 index 174c69071c2..00000000000 --- a/tsdb/engine/tsm1/reader.go +++ /dev/null @@ -1,1641 +0,0 @@ -package tsm1 - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "math" - "os" - "runtime" - "sort" - "sync" - "sync/atomic" - - "github.com/influxdata/influxdb/v2/pkg/bytesutil" - "github.com/influxdata/influxdb/v2/pkg/file" - "github.com/influxdata/influxdb/v2/tsdb" -) - -// ErrFileInUse is returned when attempting to remove or close a TSM file that is still being used. -var ErrFileInUse = fmt.Errorf("file still in use") - -// nilOffset is the value written to the offsets to indicate that position is deleted. The value is the max -// uint32 which is an invalid position. We don't use 0 as 0 is actually a valid position. -var nilOffset = []byte{255, 255, 255, 255} - -// TSMReader is a reader for a TSM file. -type TSMReader struct { - // refs is the count of active references to this reader. - refs int64 - refsWG sync.WaitGroup - - madviseWillNeed bool // Hint to the kernel with MADV_WILLNEED. - mu sync.RWMutex - - // accessor provides access and decoding of blocks for the reader. - accessor blockAccessor - - // index is the index of all blocks. - index TSMIndex - - // tombstoner ensures tombstoned keys are not available by the index. - tombstoner *Tombstoner - - // size is the size of the file on disk. - size int64 - - // lastModified is the last time this file was modified on disk - lastModified int64 - - // deleteMu limits concurrent deletes - deleteMu sync.Mutex -} - -// TSMIndex represent the index section of a TSM file. The index records all -// blocks, their locations, sizes, min and max times. -type TSMIndex interface { - // Delete removes the given keys from the index. - Delete(keys [][]byte) - - // DeleteRange removes the given keys with data between minTime and maxTime from the index. - DeleteRange(keys [][]byte, minTime, maxTime int64) - - // ContainsKey returns true if the given key may exist in the index. This func is faster than - // Contains but, may return false positives. - ContainsKey(key []byte) bool - - // Contains return true if the given key exists in the index. - Contains(key []byte) bool - - // ContainsValue returns true if key and time might exist in this file. This function could - // return true even though the actual point does not exists. For example, the key may - // exist in this file, but not have a point exactly at time t. - ContainsValue(key []byte, timestamp int64) bool - - // Entries returns all index entries for a key. - Entries(key []byte) []IndexEntry - - // ReadEntries reads the index entries for key into entries. - ReadEntries(key []byte, entries *[]IndexEntry) []IndexEntry - - // Entry returns the index entry for the specified key and timestamp. If no entry - // matches the key and timestamp, nil is returned. - Entry(key []byte, timestamp int64) *IndexEntry - - // Key returns the key in the index at the given position, using entries to avoid allocations. - Key(index int, entries *[]IndexEntry) ([]byte, byte, []IndexEntry) - - // KeyAt returns the key in the index at the given position. - KeyAt(index int) ([]byte, byte) - - // KeyCount returns the count of unique keys in the index. - KeyCount() int - - // Seek returns the position in the index where key <= value in the index. - Seek(key []byte) int - - // OverlapsTimeRange returns true if the time range of the file intersect min and max. - OverlapsTimeRange(min, max int64) bool - - // OverlapsKeyRange returns true if the min and max keys of the file overlap the arguments min and max. - OverlapsKeyRange(min, max []byte) bool - - // Size returns the size of the current index in bytes. - Size() uint32 - - // TimeRange returns the min and max time across all keys in the file. - TimeRange() (int64, int64) - - // TombstoneRange returns ranges of time that are deleted for the given key. - TombstoneRange(key []byte) []TimeRange - - // KeyRange returns the min and max keys in the file. - KeyRange() ([]byte, []byte) - - // Type returns the block type of the values stored for the key. Returns one of - // BlockFloat64, BlockInt64, BlockBool, BlockString. If key does not exist, - // an error is returned. - Type(key []byte) (byte, error) - - // UnmarshalBinary populates an index from an encoded byte slice - // representation of an index. - UnmarshalBinary(b []byte) error - - // Close closes the index and releases any resources. - Close() error -} - -// BlockIterator allows iterating over each block in a TSM file in order. It provides -// raw access to the block bytes without decoding them. -type BlockIterator struct { - r *TSMReader - - // i is the current key index - i int - - // n is the total number of keys - n int - - key []byte - cache []IndexEntry - entries []IndexEntry - err error - typ byte -} - -// PeekNext returns the next key to be iterated or an empty string. -func (b *BlockIterator) PeekNext() []byte { - if len(b.entries) > 1 { - return b.key - } else if b.n-b.i > 1 { - key, _ := b.r.KeyAt(b.i + 1) - return key - } - return nil -} - -// Next returns true if there are more blocks to iterate through. -func (b *BlockIterator) Next() bool { - if b.err != nil { - return false - } - - if b.n-b.i == 0 && len(b.entries) == 0 { - return false - } - - if len(b.entries) > 0 { - b.entries = b.entries[1:] - if len(b.entries) > 0 { - return true - } - } - - if b.n-b.i > 0 { - b.key, b.typ, b.entries = b.r.Key(b.i, &b.cache) - b.i++ - - // If there were deletes on the TSMReader, then our index is now off and we - // can't proceed. What we just read may not actually the next block. - if b.n != b.r.KeyCount() { - b.err = fmt.Errorf("delete during iteration") - return false - } - - if len(b.entries) > 0 { - return true - } - } - - return false -} - -// Read reads information about the next block to be iterated. -func (b *BlockIterator) Read() (key []byte, minTime int64, maxTime int64, typ byte, checksum uint32, buf []byte, err error) { - if b.err != nil { - return nil, 0, 0, 0, 0, nil, b.err - } - checksum, buf, err = b.r.ReadBytes(&b.entries[0], nil) - if err != nil { - b.err = err - return nil, 0, 0, 0, 0, nil, err - } - return b.key, b.entries[0].MinTime, b.entries[0].MaxTime, b.typ, checksum, buf, err -} - -// Err returns any errors encounter during iteration. -func (b *BlockIterator) Err() error { - return b.err -} - -type tsmReaderOption func(*TSMReader) - -// WithMadviseWillNeed is an option for specifying whether to provide a MADV_WILL need hint to the kernel. -var WithMadviseWillNeed = func(willNeed bool) tsmReaderOption { - return func(r *TSMReader) { - r.madviseWillNeed = willNeed - } -} - -// NewTSMReader returns a new TSMReader from the given file. -func NewTSMReader(f *os.File, options ...tsmReaderOption) (*TSMReader, error) { - t := &TSMReader{} - for _, option := range options { - option(t) - } - - stat, err := f.Stat() - if err != nil { - return nil, err - } - t.size = stat.Size() - t.lastModified = stat.ModTime().UnixNano() - t.accessor = &mmapAccessor{ - f: f, - mmapWillNeed: t.madviseWillNeed, - } - - index, err := t.accessor.init() - if err != nil { - _ = t.accessor.close() - return nil, err - } - - t.index = index - t.tombstoner = NewTombstoner(t.Path(), index.ContainsKey) - - if err := t.applyTombstones(); err != nil { - return nil, err - } - - return t, nil -} - -// WithObserver sets the observer for the TSM reader. -func (t *TSMReader) WithObserver(obs tsdb.FileStoreObserver) { - t.tombstoner.WithObserver(obs) -} - -func (t *TSMReader) applyTombstones() error { - var cur, prev Tombstone - batch := make([][]byte, 0, 4096) - - if err := t.tombstoner.Walk(func(ts Tombstone) error { - cur = ts - if len(batch) > 0 { - if prev.Min != cur.Min || prev.Max != cur.Max { - t.index.DeleteRange(batch, prev.Min, prev.Max) - batch = batch[:0] - } - } - - // Copy the tombstone key and re-use the buffers to avoid allocations - n := len(batch) - batch = batch[:n+1] - if cap(batch[n]) < len(ts.Key) { - batch[n] = make([]byte, len(ts.Key)) - } else { - batch[n] = batch[n][:len(ts.Key)] - } - copy(batch[n], ts.Key) - - if len(batch) >= 4096 { - t.index.DeleteRange(batch, prev.Min, prev.Max) - batch = batch[:0] - } - - prev = ts - return nil - }); err != nil { - return fmt.Errorf("init: read tombstones: %v", err) - } - - if len(batch) > 0 { - t.index.DeleteRange(batch, cur.Min, cur.Max) - } - return nil -} - -func (t *TSMReader) Free() error { - t.mu.RLock() - defer t.mu.RUnlock() - return t.accessor.free() -} - -// Path returns the path of the file the TSMReader was initialized with. -func (t *TSMReader) Path() string { - t.mu.RLock() - p := t.accessor.path() - t.mu.RUnlock() - return p -} - -// Key returns the key and the underlying entry at the numeric index. -func (t *TSMReader) Key(index int, entries *[]IndexEntry) ([]byte, byte, []IndexEntry) { - return t.index.Key(index, entries) -} - -// KeyAt returns the key and key type at position idx in the index. -func (t *TSMReader) KeyAt(idx int) ([]byte, byte) { - return t.index.KeyAt(idx) -} - -func (t *TSMReader) Seek(key []byte) int { - return t.index.Seek(key) -} - -// ReadAt returns the values corresponding to the given index entry. -func (t *TSMReader) ReadAt(entry *IndexEntry, vals []Value) ([]Value, error) { - t.mu.RLock() - v, err := t.accessor.readBlock(entry, vals) - t.mu.RUnlock() - return v, err -} - -// Read returns the values corresponding to the block at the given key and timestamp. -func (t *TSMReader) Read(key []byte, timestamp int64) ([]Value, error) { - t.mu.RLock() - v, err := t.accessor.read(key, timestamp) - t.mu.RUnlock() - return v, err -} - -// ReadAll returns all values for a key in all blocks. -func (t *TSMReader) ReadAll(key []byte) ([]Value, error) { - t.mu.RLock() - v, err := t.accessor.readAll(key) - t.mu.RUnlock() - return v, err -} - -func (t *TSMReader) ReadBytes(e *IndexEntry, b []byte) (uint32, []byte, error) { - t.mu.RLock() - n, v, err := t.accessor.readBytes(e, b) - t.mu.RUnlock() - return n, v, err -} - -// Type returns the type of values stored at the given key. -func (t *TSMReader) Type(key []byte) (byte, error) { - return t.index.Type(key) -} - -// Close closes the TSMReader. -func (t *TSMReader) Close() error { - t.refsWG.Wait() - - t.mu.Lock() - defer t.mu.Unlock() - - if err := t.accessor.close(); err != nil { - return err - } - - return t.index.Close() -} - -// Ref records a usage of this TSMReader. If there are active references -// when the reader is closed or removed, the reader will remain open until -// there are no more references. -func (t *TSMReader) Ref() { - atomic.AddInt64(&t.refs, 1) - t.refsWG.Add(1) -} - -// Unref removes a usage record of this TSMReader. If the Reader was closed -// by another goroutine while there were active references, the file will -// be closed and remove -func (t *TSMReader) Unref() { - atomic.AddInt64(&t.refs, -1) - t.refsWG.Done() -} - -// InUse returns whether the TSMReader currently has any active references. -func (t *TSMReader) InUse() bool { - refs := atomic.LoadInt64(&t.refs) - return refs > 0 -} - -// Remove removes any underlying files stored on disk for this reader. -func (t *TSMReader) Remove() error { - t.mu.Lock() - defer t.mu.Unlock() - return t.remove() -} - -// Rename renames the underlying file to the new path. -func (t *TSMReader) Rename(path string) error { - t.mu.Lock() - defer t.mu.Unlock() - return t.accessor.rename(path) -} - -// Remove removes any underlying files stored on disk for this reader. -func (t *TSMReader) remove() error { - path := t.accessor.path() - - if t.InUse() { - return ErrFileInUse - } - - if path != "" { - err := os.RemoveAll(path) - if err != nil { - return err - } - } - - if err := t.tombstoner.Delete(); err != nil { - return err - } - return nil -} - -// Contains returns whether the given key is present in the index. -func (t *TSMReader) Contains(key []byte) bool { - return t.index.Contains(key) -} - -// ContainsValue returns true if key and time might exists in this file. This function could -// return true even though the actual point does not exist. For example, the key may -// exist in this file, but not have a point exactly at time t. -func (t *TSMReader) ContainsValue(key []byte, ts int64) bool { - return t.index.ContainsValue(key, ts) -} - -// DeleteRange removes the given points for keys between minTime and maxTime. The series -// keys passed in must be sorted. -func (t *TSMReader) DeleteRange(keys [][]byte, minTime, maxTime int64) error { - if len(keys) == 0 { - return nil - } - - batch := t.BatchDelete() - if err := batch.DeleteRange(keys, minTime, maxTime); err != nil { - batch.Rollback() - return err - } - return batch.Commit() -} - -// Delete deletes blocks indicated by keys. -func (t *TSMReader) Delete(keys [][]byte) error { - if err := t.tombstoner.Add(keys); err != nil { - return err - } - - if err := t.tombstoner.Flush(); err != nil { - return err - } - - t.index.Delete(keys) - return nil -} - -// OverlapsTimeRange returns true if the time range of the file intersect min and max. -func (t *TSMReader) OverlapsTimeRange(min, max int64) bool { - return t.index.OverlapsTimeRange(min, max) -} - -// OverlapsKeyRange returns true if the key range of the file intersect min and max. -func (t *TSMReader) OverlapsKeyRange(min, max []byte) bool { - return t.index.OverlapsKeyRange(min, max) -} - -// TimeRange returns the min and max time across all keys in the file. -func (t *TSMReader) TimeRange() (int64, int64) { - return t.index.TimeRange() -} - -// KeyRange returns the min and max key across all keys in the file. -func (t *TSMReader) KeyRange() ([]byte, []byte) { - return t.index.KeyRange() -} - -// KeyCount returns the count of unique keys in the TSMReader. -func (t *TSMReader) KeyCount() int { - return t.index.KeyCount() -} - -// Entries returns all index entries for key. -func (t *TSMReader) Entries(key []byte) []IndexEntry { - return t.index.Entries(key) -} - -// ReadEntries reads the index entries for key into entries. -func (t *TSMReader) ReadEntries(key []byte, entries *[]IndexEntry) []IndexEntry { - return t.index.ReadEntries(key, entries) -} - -// IndexSize returns the size of the index in bytes. -func (t *TSMReader) IndexSize() uint32 { - return t.index.Size() -} - -// Size returns the size of the underlying file in bytes. -func (t *TSMReader) Size() uint32 { - t.mu.RLock() - size := t.size - t.mu.RUnlock() - return uint32(size) -} - -// LastModified returns the last time the underlying file was modified. -func (t *TSMReader) LastModified() int64 { - t.mu.RLock() - lm := t.lastModified - if ts := t.tombstoner.TombstoneStats(); ts.TombstoneExists { - if ts.LastModified > lm { - lm = ts.LastModified - } - } - t.mu.RUnlock() - return lm -} - -// HasTombstones return true if there are any tombstone entries recorded. -func (t *TSMReader) HasTombstones() bool { - t.mu.RLock() - b := t.tombstoner.HasTombstones() - t.mu.RUnlock() - return b -} - -// TombstoneFiles returns any tombstone files associated with this TSM file. -func (t *TSMReader) TombstoneStats() TombstoneStat { - t.mu.RLock() - fs := t.tombstoner.TombstoneStats() - t.mu.RUnlock() - return fs -} - -// TombstoneRange returns ranges of time that are deleted for the given key. -func (t *TSMReader) TombstoneRange(key []byte) []TimeRange { - t.mu.RLock() - tr := t.index.TombstoneRange(key) - t.mu.RUnlock() - return tr -} - -// Stats returns the FileStat for the TSMReader's underlying file. -func (t *TSMReader) Stats() FileStat { - minTime, maxTime := t.index.TimeRange() - minKey, maxKey := t.index.KeyRange() - return FileStat{ - Path: t.Path(), - Size: t.Size(), - LastModified: t.LastModified(), - MinTime: minTime, - MaxTime: maxTime, - MinKey: minKey, - MaxKey: maxKey, - HasTombstone: t.tombstoner.HasTombstones(), - } -} - -// BlockIterator returns a BlockIterator for the underlying TSM file. -func (t *TSMReader) BlockIterator() *BlockIterator { - return &BlockIterator{ - r: t, - n: t.index.KeyCount(), - } -} - -type BatchDeleter interface { - DeleteRange(keys [][]byte, min, max int64) error - Commit() error - Rollback() error -} - -type batchDelete struct { - r *TSMReader -} - -func (b *batchDelete) DeleteRange(keys [][]byte, minTime, maxTime int64) error { - if len(keys) == 0 { - return nil - } - - // If the keys can't exist in this TSM file, skip it. - minKey, maxKey := keys[0], keys[len(keys)-1] - if !b.r.index.OverlapsKeyRange(minKey, maxKey) { - return nil - } - - // If the timerange can't exist in this TSM file, skip it. - if !b.r.index.OverlapsTimeRange(minTime, maxTime) { - return nil - } - - if err := b.r.tombstoner.AddRange(keys, minTime, maxTime); err != nil { - return err - } - - return nil -} - -func (b *batchDelete) Commit() error { - defer b.r.deleteMu.Unlock() - if err := b.r.tombstoner.Flush(); err != nil { - return err - } - - return b.r.applyTombstones() -} - -func (b *batchDelete) Rollback() error { - defer b.r.deleteMu.Unlock() - return b.r.tombstoner.Rollback() -} - -// BatchDelete returns a BatchDeleter. Only a single goroutine may run a BatchDelete at a time. -// Callers must either Commit or Rollback the operation. -func (r *TSMReader) BatchDelete() BatchDeleter { - r.deleteMu.Lock() - return &batchDelete{r: r} -} - -type BatchDeleters []BatchDeleter - -func (a BatchDeleters) DeleteRange(keys [][]byte, min, max int64) error { - errC := make(chan error, len(a)) - for _, b := range a { - go func(b BatchDeleter) { errC <- b.DeleteRange(keys, min, max) }(b) - } - - var err error - for i := 0; i < len(a); i++ { - dErr := <-errC - if dErr != nil { - err = dErr - } - } - return err -} - -func (a BatchDeleters) Commit() error { - errC := make(chan error, len(a)) - for _, b := range a { - go func(b BatchDeleter) { errC <- b.Commit() }(b) - } - - var err error - for i := 0; i < len(a); i++ { - dErr := <-errC - if dErr != nil { - err = dErr - } - } - return err -} - -func (a BatchDeleters) Rollback() error { - errC := make(chan error, len(a)) - for _, b := range a { - go func(b BatchDeleter) { errC <- b.Rollback() }(b) - } - - var err error - for i := 0; i < len(a); i++ { - dErr := <-errC - if dErr != nil { - err = dErr - } - } - return err -} - -// indirectIndex is a TSMIndex that uses a raw byte slice representation of an index. This -// implementation can be used for indexes that may be MMAPed into memory. -type indirectIndex struct { - mu sync.RWMutex - - // indirectIndex works a follows. Assuming we have an index structure in memory as - // the diagram below: - // - // ┌────────────────────────────────────────────────────────────────────┐ - // │ Index │ - // ├─┬──────────────────────┬──┬───────────────────────┬───┬────────────┘ - // │0│ │62│ │145│ - // ├─┴───────┬─────────┬────┼──┴──────┬─────────┬──────┼───┴─────┬──────┐ - // │Key 1 Len│ Key │... │Key 2 Len│ Key 2 │ ... │ Key 3 │ ... │ - // │ 2 bytes │ N bytes │ │ 2 bytes │ N bytes │ │ 2 bytes │ │ - // └─────────┴─────────┴────┴─────────┴─────────┴──────┴─────────┴──────┘ - - // We would build an `offsets` slices where each element pointers to the byte location - // for the first key in the index slice. - - // ┌────────────────────────────────────────────────────────────────────┐ - // │ Offsets │ - // ├────┬────┬────┬─────────────────────────────────────────────────────┘ - // │ 0 │ 62 │145 │ - // └────┴────┴────┘ - - // Using this offset slice we can find `Key 2` by doing a binary search - // over the offsets slice. Instead of comparing the value in the offsets - // (e.g. `62`), we use that as an index into the underlying index to - // retrieve the key at position `62` and perform our comparisons with that. - - // When we have identified the correct position in the index for a given - // key, we could perform another binary search or a linear scan. This - // should be fast as well since each index entry is 28 bytes and all - // contiguous in memory. The current implementation uses a linear scan since the - // number of block entries is expected to be < 100 per key. - - // b is the underlying index byte slice. This could be a copy on the heap or an MMAP - // slice reference - b []byte - - // offsets contains the positions in b for each key. It points to the 2 byte length of - // key. - offsets []byte - - // minKey, maxKey are the minimum and maximum (lexicographically sorted) contained in the - // file - minKey, maxKey []byte - - // minTime, maxTime are the minimum and maximum times contained in the file across all - // series. - minTime, maxTime int64 - - // tombstones contains only the tombstoned keys with subset of time values deleted. An - // entry would exist here if a subset of the points for a key were deleted and the file - // had not be re-compacted to remove the points on disk. - tombstones map[string][]TimeRange -} - -// TimeRange holds a min and max timestamp. -type TimeRange struct { - Min, Max int64 -} - -func (t TimeRange) Overlaps(min, max int64) bool { - return t.Min <= max && t.Max >= min -} - -// NewIndirectIndex returns a new indirect index. -func NewIndirectIndex() *indirectIndex { - return &indirectIndex{ - tombstones: make(map[string][]TimeRange), - } -} - -func (d *indirectIndex) offset(i int) int { - if i < 0 || i+4 > len(d.offsets) { - return -1 - } - return int(binary.BigEndian.Uint32(d.offsets[i*4 : i*4+4])) -} - -func (d *indirectIndex) Seek(key []byte) int { - d.mu.RLock() - defer d.mu.RUnlock() - return d.searchOffset(key) -} - -// searchOffset searches the offsets slice for key and returns the position in -// offsets where key would exist. -func (d *indirectIndex) searchOffset(key []byte) int { - // We use a binary search across our indirect offsets (pointers to all the keys - // in the index slice). - i := bytesutil.SearchBytesFixed(d.offsets, 4, func(x []byte) bool { - // i is the position in offsets we are at so get offset it points to - offset := int32(binary.BigEndian.Uint32(x)) - - // It's pointing to the start of the key which is a 2 byte length - keyLen := int32(binary.BigEndian.Uint16(d.b[offset : offset+2])) - - // See if it matches - return bytes.Compare(d.b[offset+2:offset+2+keyLen], key) >= 0 - }) - - // See if we might have found the right index - if i < len(d.offsets) { - return int(i / 4) - } - - // The key is not in the index. i is the index where it would be inserted so return - // a value outside our offset range. - return int(len(d.offsets)) / 4 -} - -// search returns the byte position of key in the index. If key is not -// in the index, len(index) is returned. -func (d *indirectIndex) search(key []byte) int { - if !d.ContainsKey(key) { - return len(d.b) - } - - // We use a binary search across our indirect offsets (pointers to all the keys - // in the index slice). - // TODO(sgc): this should be inlined to `indirectIndex` as it is only used here - i := bytesutil.SearchBytesFixed(d.offsets, 4, func(x []byte) bool { - // i is the position in offsets we are at so get offset it points to - offset := int32(binary.BigEndian.Uint32(x)) - - // It's pointing to the start of the key which is a 2 byte length - keyLen := int32(binary.BigEndian.Uint16(d.b[offset : offset+2])) - - // See if it matches - return bytes.Compare(d.b[offset+2:offset+2+keyLen], key) >= 0 - }) - - // See if we might have found the right index - if i < len(d.offsets) { - ofs := binary.BigEndian.Uint32(d.offsets[i : i+4]) - _, k := readKey(d.b[ofs:]) - - // The search may have returned an i == 0 which could indicated that the value - // searched should be inserted at position 0. Make sure the key in the index - // matches the search value. - if !bytes.Equal(key, k) { - return len(d.b) - } - - return int(ofs) - } - - // The key is not in the index. i is the index where it would be inserted so return - // a value outside our offset range. - return len(d.b) -} - -// ContainsKey returns true of key may exist in this index. -func (d *indirectIndex) ContainsKey(key []byte) bool { - return bytes.Compare(key, d.minKey) >= 0 && bytes.Compare(key, d.maxKey) <= 0 -} - -// Entries returns all index entries for a key. -func (d *indirectIndex) Entries(key []byte) []IndexEntry { - return d.ReadEntries(key, nil) -} - -func (d *indirectIndex) readEntriesAt(ofs int, entries *[]IndexEntry) ([]byte, []IndexEntry) { - n, k := readKey(d.b[ofs:]) - - // Read and return all the entries - ofs += n - var ie indexEntries - if entries != nil { - ie.entries = *entries - } - if _, err := readEntries(d.b[ofs:], &ie); err != nil { - panic(fmt.Sprintf("error reading entries: %v", err)) - } - if entries != nil { - *entries = ie.entries - } - return k, ie.entries -} - -// ReadEntries returns all index entries for a key. -func (d *indirectIndex) ReadEntries(key []byte, entries *[]IndexEntry) []IndexEntry { - d.mu.RLock() - defer d.mu.RUnlock() - - ofs := d.search(key) - if ofs < len(d.b) { - k, entries := d.readEntriesAt(ofs, entries) - // The search may have returned an i == 0 which could indicated that the value - // searched should be inserted at position 0. Make sure the key in the index - // matches the search value. - if !bytes.Equal(key, k) { - return nil - } - - return entries - } - - // The key is not in the index. i is the index where it would be inserted. - return nil -} - -// Entry returns the index entry for the specified key and timestamp. If no entry -// matches the key an timestamp, nil is returned. -func (d *indirectIndex) Entry(key []byte, timestamp int64) *IndexEntry { - entries := d.Entries(key) - for _, entry := range entries { - if entry.Contains(timestamp) { - return &entry - } - } - return nil -} - -// Key returns the key in the index at the given position. -func (d *indirectIndex) Key(idx int, entries *[]IndexEntry) ([]byte, byte, []IndexEntry) { - d.mu.RLock() - defer d.mu.RUnlock() - - if idx < 0 || idx*4+4 > len(d.offsets) { - return nil, 0, nil - } - ofs := binary.BigEndian.Uint32(d.offsets[idx*4 : idx*4+4]) - n, key := readKey(d.b[ofs:]) - - typ := d.b[int(ofs)+n] - - var ie indexEntries - if entries != nil { - ie.entries = *entries - } - if _, err := readEntries(d.b[int(ofs)+n:], &ie); err != nil { - return nil, 0, nil - } - if entries != nil { - *entries = ie.entries - } - - return key, typ, ie.entries -} - -// KeyAt returns the key in the index at the given position. -func (d *indirectIndex) KeyAt(idx int) ([]byte, byte) { - d.mu.RLock() - - if idx < 0 || idx*4+4 > len(d.offsets) { - d.mu.RUnlock() - return nil, 0 - } - ofs := int32(binary.BigEndian.Uint32(d.offsets[idx*4 : idx*4+4])) - - n, key := readKey(d.b[ofs:]) - ofs = ofs + int32(n) - typ := d.b[ofs] - d.mu.RUnlock() - return key, typ -} - -// KeyCount returns the count of unique keys in the index. -func (d *indirectIndex) KeyCount() int { - d.mu.RLock() - n := len(d.offsets) / 4 - d.mu.RUnlock() - return n -} - -// Delete removes the given keys from the index. -func (d *indirectIndex) Delete(keys [][]byte) { - if len(keys) == 0 { - return - } - - if !bytesutil.IsSorted(keys) { - bytesutil.Sort(keys) - } - - // Both keys and offsets are sorted. Walk both in order and skip - // any keys that exist in both. - d.mu.Lock() - start := d.searchOffset(keys[0]) - for i := start * 4; i+4 <= len(d.offsets) && len(keys) > 0; i += 4 { - offset := binary.BigEndian.Uint32(d.offsets[i : i+4]) - _, indexKey := readKey(d.b[offset:]) - - for len(keys) > 0 && bytes.Compare(keys[0], indexKey) < 0 { - keys = keys[1:] - } - - if len(keys) > 0 && bytes.Equal(keys[0], indexKey) { - keys = keys[1:] - copy(d.offsets[i:i+4], nilOffset) - } - } - d.offsets = bytesutil.Pack(d.offsets, 4, 255) - d.mu.Unlock() -} - -// DeleteRange removes the given keys with data between minTime and maxTime from the index. -func (d *indirectIndex) DeleteRange(keys [][]byte, minTime, maxTime int64) { - // No keys, nothing to do - if len(keys) == 0 { - return - } - - if !bytesutil.IsSorted(keys) { - bytesutil.Sort(keys) - } - - // If we're deleting the max time range, just use tombstoning to remove the - // key from the offsets slice - if minTime == math.MinInt64 && maxTime == math.MaxInt64 { - d.Delete(keys) - return - } - - // Is the range passed in outside of the time range for the file? - min, max := d.TimeRange() - if minTime > max || maxTime < min { - return - } - - fullKeys := make([][]byte, 0, len(keys)) - tombstones := map[string][]TimeRange{} - var ie []IndexEntry - - for i := 0; len(keys) > 0 && i < d.KeyCount(); i++ { - k, entries := d.readEntriesAt(d.offset(i), &ie) - - // Skip any keys that don't exist. These are less than the current key. - for len(keys) > 0 && bytes.Compare(keys[0], k) < 0 { - keys = keys[1:] - } - - // No more keys to delete, we're done. - if len(keys) == 0 { - break - } - - // If the current key is greater than the index one, continue to the next - // index key. - if len(keys) > 0 && bytes.Compare(keys[0], k) > 0 { - continue - } - - // If multiple tombstones are saved for the same key - if len(entries) == 0 { - continue - } - - // Is the time range passed outside of the time range we've have stored for this key? - min, max := entries[0].MinTime, entries[len(entries)-1].MaxTime - if minTime > max || maxTime < min { - continue - } - - // Does the range passed in cover every value for the key? - if minTime <= min && maxTime >= max { - fullKeys = append(fullKeys, keys[0]) - keys = keys[1:] - continue - } - - d.mu.RLock() - existing := d.tombstones[string(k)] - d.mu.RUnlock() - - // Append the new tombonstes to the existing ones - newTs := append(existing, append(tombstones[string(k)], TimeRange{minTime, maxTime})...) - fn := func(i, j int) bool { - a, b := newTs[i], newTs[j] - if a.Min == b.Min { - return a.Max <= b.Max - } - return a.Min < b.Min - } - - // Sort the updated tombstones if necessary - if len(newTs) > 1 && !sort.SliceIsSorted(newTs, fn) { - sort.Slice(newTs, fn) - } - - tombstones[string(k)] = newTs - - // We need to see if all the tombstones end up deleting the entire series. This - // could happen if their is one tombstore with min,max time spanning all the block - // time ranges or from multiple smaller tombstones the delete segments. To detect - // this cases, we use a window starting at the first tombstone and grow it be each - // tombstone that is immediately adjacent to the current window or if it overlaps. - // If there are any gaps, we abort. - minTs, maxTs := newTs[0].Min, newTs[0].Max - for j := 1; j < len(newTs); j++ { - prevTs := newTs[j-1] - ts := newTs[j] - - // Make sure all the tombstone line up for a continuous range. We don't - // want to have two small deletes on each edges end up causing us to - // remove the full key. - if prevTs.Max != ts.Min-1 && !prevTs.Overlaps(ts.Min, ts.Max) { - minTs, maxTs = int64(math.MaxInt64), int64(math.MinInt64) - break - } - - if ts.Min < minTs { - minTs = ts.Min - } - if ts.Max > maxTs { - maxTs = ts.Max - } - } - - // If we have a fully deleted series, delete it all of it. - if minTs <= min && maxTs >= max { - fullKeys = append(fullKeys, keys[0]) - keys = keys[1:] - continue - } - } - - // Delete all the keys that fully deleted in bulk - if len(fullKeys) > 0 { - d.Delete(fullKeys) - } - - if len(tombstones) == 0 { - return - } - - d.mu.Lock() - for k, v := range tombstones { - d.tombstones[k] = v - } - d.mu.Unlock() -} - -// TombstoneRange returns ranges of time that are deleted for the given key. -func (d *indirectIndex) TombstoneRange(key []byte) []TimeRange { - d.mu.RLock() - r := d.tombstones[string(key)] - d.mu.RUnlock() - return r -} - -// Contains return true if the given key exists in the index. -func (d *indirectIndex) Contains(key []byte) bool { - return len(d.Entries(key)) > 0 -} - -// ContainsValue returns true if key and time might exist in this file. -func (d *indirectIndex) ContainsValue(key []byte, timestamp int64) bool { - entry := d.Entry(key, timestamp) - if entry == nil { - return false - } - - d.mu.RLock() - tombstones := d.tombstones[string(key)] - d.mu.RUnlock() - - for _, t := range tombstones { - if t.Min <= timestamp && t.Max >= timestamp { - return false - } - } - return true -} - -// Type returns the block type of the values stored for the key. -func (d *indirectIndex) Type(key []byte) (byte, error) { - d.mu.RLock() - defer d.mu.RUnlock() - - ofs := d.search(key) - if ofs < len(d.b) { - n, _ := readKey(d.b[ofs:]) - ofs += n - return d.b[ofs], nil - } - return 0, fmt.Errorf("key does not exist: %s", key) -} - -// OverlapsTimeRange returns true if the time range of the file intersect min and max. -func (d *indirectIndex) OverlapsTimeRange(min, max int64) bool { - return d.minTime <= max && d.maxTime >= min -} - -// OverlapsKeyRange returns true if the min and max keys of the file overlap the arguments min and max. -func (d *indirectIndex) OverlapsKeyRange(min, max []byte) bool { - return bytes.Compare(d.minKey, max) <= 0 && bytes.Compare(d.maxKey, min) >= 0 -} - -// KeyRange returns the min and max keys in the index. -func (d *indirectIndex) KeyRange() ([]byte, []byte) { - return d.minKey, d.maxKey -} - -// TimeRange returns the min and max time across all keys in the index. -func (d *indirectIndex) TimeRange() (int64, int64) { - return d.minTime, d.maxTime -} - -// MarshalBinary returns a byte slice encoded version of the index. -func (d *indirectIndex) MarshalBinary() ([]byte, error) { - d.mu.RLock() - defer d.mu.RUnlock() - - return d.b, nil -} - -// UnmarshalBinary populates an index from an encoded byte slice -// representation of an index. -func (d *indirectIndex) UnmarshalBinary(b []byte) error { - d.mu.Lock() - defer d.mu.Unlock() - - // Keep a reference to the actual index bytes - d.b = b - if len(b) == 0 { - return nil - } - - //var minKey, maxKey []byte - var minTime, maxTime int64 = math.MaxInt64, 0 - - // To create our "indirect" index, we need to find the location of all the keys in - // the raw byte slice. The keys are listed once each (in sorted order). Following - // each key is a time ordered list of index entry blocks for that key. The loop below - // basically skips across the slice keeping track of the counter when we are at a key - // field. - var i int32 - var offsets []int32 - iMax := int32(len(b)) - for i < iMax { - offsets = append(offsets, i) - - // Skip to the start of the values - // key length value (2) + type (1) + length of key - if i+2 >= iMax { - return fmt.Errorf("indirectIndex: not enough data for key length value") - } - i += 3 + int32(binary.BigEndian.Uint16(b[i:i+2])) - - // count of index entries - if i+indexCountSize >= iMax { - return fmt.Errorf("indirectIndex: not enough data for index entries count") - } - count := int32(binary.BigEndian.Uint16(b[i : i+indexCountSize])) - i += indexCountSize - - // Find the min time for the block - if i+8 >= iMax { - return fmt.Errorf("indirectIndex: not enough data for min time") - } - minT := int64(binary.BigEndian.Uint64(b[i : i+8])) - if minT < minTime { - minTime = minT - } - - i += (count - 1) * indexEntrySize - - // Find the max time for the block - if i+16 >= iMax { - return fmt.Errorf("indirectIndex: not enough data for max time") - } - maxT := int64(binary.BigEndian.Uint64(b[i+8 : i+16])) - if maxT > maxTime { - maxTime = maxT - } - - i += indexEntrySize - } - - firstOfs := offsets[0] - _, key := readKey(b[firstOfs:]) - d.minKey = key - - lastOfs := offsets[len(offsets)-1] - _, key = readKey(b[lastOfs:]) - d.maxKey = key - - d.minTime = minTime - d.maxTime = maxTime - - var err error - d.offsets, err = mmap(nil, 0, len(offsets)*4) - if err != nil { - return err - } - for i, v := range offsets { - binary.BigEndian.PutUint32(d.offsets[i*4:i*4+4], uint32(v)) - } - - return nil -} - -// Size returns the size of the current index in bytes. -func (d *indirectIndex) Size() uint32 { - d.mu.RLock() - defer d.mu.RUnlock() - - return uint32(len(d.b)) -} - -func (d *indirectIndex) Close() error { - // Windows doesn't use the anonymous map for the offsets index - if runtime.GOOS == "windows" { - return nil - } - return munmap(d.offsets[:cap(d.offsets)]) -} - -// mmapAccess is mmap based block accessor. It access blocks through an -// MMAP file interface. -type mmapAccessor struct { - accessCount uint64 // Counter incremented everytime the mmapAccessor is accessed - freeCount uint64 // Counter to determine whether the accessor can free its resources - - mmapWillNeed bool // If true then mmap advise value MADV_WILLNEED will be provided the kernel for b. - - mu sync.RWMutex - b []byte - f *os.File - - index *indirectIndex -} - -func (m *mmapAccessor) init() (*indirectIndex, error) { - m.mu.Lock() - defer m.mu.Unlock() - - if err := verifyVersion(m.f); err != nil { - return nil, err - } - - var err error - - if _, err := m.f.Seek(0, 0); err != nil { - return nil, err - } - - stat, err := m.f.Stat() - if err != nil { - return nil, err - } - - m.b, err = mmap(m.f, 0, int(stat.Size())) - if err != nil { - return nil, err - } - if len(m.b) < 8 { - return nil, fmt.Errorf("mmapAccessor: byte slice too small for indirectIndex") - } - - // Hint to the kernel that we will be reading the file. It would be better to hint - // that we will be reading the index section, but that's not been - // implemented as yet. - if m.mmapWillNeed { - if err := madviseWillNeed(m.b); err != nil { - return nil, err - } - } - - indexOfsPos := len(m.b) - 8 - indexStart := binary.BigEndian.Uint64(m.b[indexOfsPos : indexOfsPos+8]) - if indexStart >= uint64(indexOfsPos) { - return nil, fmt.Errorf("mmapAccessor: invalid indexStart") - } - - m.index = NewIndirectIndex() - if err := m.index.UnmarshalBinary(m.b[indexStart:indexOfsPos]); err != nil { - return nil, err - } - - // Allow resources to be freed immediately if requested - m.incAccess() - atomic.StoreUint64(&m.freeCount, 1) - - return m.index, nil -} - -func (m *mmapAccessor) free() error { - accessCount := atomic.LoadUint64(&m.accessCount) - freeCount := atomic.LoadUint64(&m.freeCount) - - // Already freed everything. - if freeCount == 0 && accessCount == 0 { - return nil - } - - // Were there accesses after the last time we tried to free? - // If so, don't free anything and record the access count that we - // see now for the next check. - if accessCount != freeCount { - atomic.StoreUint64(&m.freeCount, accessCount) - return nil - } - - // Reset both counters to zero to indicate that we have freed everything. - atomic.StoreUint64(&m.accessCount, 0) - atomic.StoreUint64(&m.freeCount, 0) - - m.mu.RLock() - defer m.mu.RUnlock() - - return madviseDontNeed(m.b) -} - -func (m *mmapAccessor) incAccess() { - atomic.AddUint64(&m.accessCount, 1) -} - -func (m *mmapAccessor) rename(path string) error { - m.incAccess() - - m.mu.Lock() - defer m.mu.Unlock() - - err := munmap(m.b) - if err != nil { - return err - } - - if err := m.f.Close(); err != nil { - return err - } - - if err := file.RenameFile(m.f.Name(), path); err != nil { - return err - } - - m.f, err = os.Open(path) - if err != nil { - return err - } - - if _, err := m.f.Seek(0, 0); err != nil { - return err - } - - stat, err := m.f.Stat() - if err != nil { - return err - } - - m.b, err = mmap(m.f, 0, int(stat.Size())) - if err != nil { - return err - } - - if m.mmapWillNeed { - return madviseWillNeed(m.b) - } - return nil -} - -func (m *mmapAccessor) read(key []byte, timestamp int64) ([]Value, error) { - entry := m.index.Entry(key, timestamp) - if entry == nil { - return nil, nil - } - - return m.readBlock(entry, nil) -} - -func (m *mmapAccessor) readBlock(entry *IndexEntry, values []Value) ([]Value, error) { - m.incAccess() - - m.mu.RLock() - defer m.mu.RUnlock() - - if int64(len(m.b)) < entry.Offset+int64(entry.Size) { - return nil, ErrTSMClosed - } - //TODO: Validate checksum - var err error - values, err = DecodeBlock(m.b[entry.Offset+4:entry.Offset+int64(entry.Size)], values) - if err != nil { - return nil, err - } - - return values, nil -} - -func (m *mmapAccessor) readBytes(entry *IndexEntry, b []byte) (uint32, []byte, error) { - m.incAccess() - - m.mu.RLock() - if int64(len(m.b)) < entry.Offset+int64(entry.Size) { - m.mu.RUnlock() - return 0, nil, ErrTSMClosed - } - - // return the bytes after the 4 byte checksum - crc, block := binary.BigEndian.Uint32(m.b[entry.Offset:entry.Offset+4]), m.b[entry.Offset+4:entry.Offset+int64(entry.Size)] - m.mu.RUnlock() - - return crc, block, nil -} - -// readAll returns all values for a key in all blocks. -func (m *mmapAccessor) readAll(key []byte) ([]Value, error) { - m.incAccess() - - blocks := m.index.Entries(key) - if len(blocks) == 0 { - return nil, nil - } - - tombstones := m.index.TombstoneRange(key) - - m.mu.RLock() - defer m.mu.RUnlock() - - var temp []Value - var err error - var values []Value - for _, block := range blocks { - var skip bool - for _, t := range tombstones { - // Should we skip this block because it contains points that have been deleted - if t.Min <= block.MinTime && t.Max >= block.MaxTime { - skip = true - break - } - } - - if skip { - continue - } - //TODO: Validate checksum - temp = temp[:0] - // The +4 is the 4 byte checksum length - temp, err = DecodeBlock(m.b[block.Offset+4:block.Offset+int64(block.Size)], temp) - if err != nil { - return nil, err - } - - // Filter out any values that were deleted - for _, t := range tombstones { - temp = Values(temp).Exclude(t.Min, t.Max) - } - - values = append(values, temp...) - } - - return values, nil -} - -func (m *mmapAccessor) path() string { - m.mu.RLock() - path := m.f.Name() - m.mu.RUnlock() - return path -} - -func (m *mmapAccessor) close() error { - m.mu.Lock() - defer m.mu.Unlock() - - if m.b == nil { - return nil - } - - err := munmap(m.b) - if err != nil { - return err - } - - m.b = nil - return m.f.Close() -} - -type indexEntries struct { - Type byte - entries []IndexEntry -} - -func (a *indexEntries) Len() int { return len(a.entries) } -func (a *indexEntries) Swap(i, j int) { a.entries[i], a.entries[j] = a.entries[j], a.entries[i] } -func (a *indexEntries) Less(i, j int) bool { - return a.entries[i].MinTime < a.entries[j].MinTime -} - -func (a *indexEntries) MarshalBinary() ([]byte, error) { - buf := make([]byte, len(a.entries)*indexEntrySize) - - for i, entry := range a.entries { - entry.AppendTo(buf[indexEntrySize*i:]) - } - - return buf, nil -} - -func (a *indexEntries) WriteTo(w io.Writer) (total int64, err error) { - var buf [indexEntrySize]byte - var n int - - for _, entry := range a.entries { - entry.AppendTo(buf[:]) - n, err = w.Write(buf[:]) - total += int64(n) - if err != nil { - return total, err - } - } - - return total, nil -} - -func readKey(b []byte) (n int, key []byte) { - // 2 byte size of key - n, size := 2, int(binary.BigEndian.Uint16(b[:2])) - - // N byte key - key = b[n : n+size] - - n += len(key) - return -} - -func readEntries(b []byte, entries *indexEntries) (n int, err error) { - if len(b) < 1+indexCountSize { - return 0, fmt.Errorf("readEntries: data too short for headers") - } - - // 1 byte block type - entries.Type = b[n] - n++ - - // 2 byte count of index entries - count := int(binary.BigEndian.Uint16(b[n : n+indexCountSize])) - n += indexCountSize - - if cap(entries.entries) < count { - entries.entries = make([]IndexEntry, count) - } else { - entries.entries = entries.entries[:count] - } - - b = b[indexCountSize+indexTypeSize:] - for i := 0; i < len(entries.entries); i++ { - if err = entries.entries[i].UnmarshalBinary(b); err != nil { - return 0, fmt.Errorf("readEntries: unmarshal error: %v", err) - } - b = b[indexEntrySize:] - } - - n += count * indexEntrySize - - return -} diff --git a/tsdb/engine/tsm1/reader_test.go b/tsdb/engine/tsm1/reader_test.go deleted file mode 100644 index 2c61e116f5d..00000000000 --- a/tsdb/engine/tsm1/reader_test.go +++ /dev/null @@ -1,2005 +0,0 @@ -package tsm1 - -import ( - "fmt" - "math" - "os" - "path/filepath" - "sort" - "strings" - "testing" - - "github.com/stretchr/testify/require" -) - -func fatal(t *testing.T, msg string, err error) { - t.Fatalf("unexpected error %v: %v", msg, err) -} - -func TestTSMReader_Type(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - values := []Value{NewValue(0, int64(1))} - if err := w.Write([]byte("cpu"), values); err != nil { - t.Fatalf("unexpected error writing: %v", err) - - } - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error opening: %v", err) - } - r, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - t.Cleanup(func() { r.Close() }) - - typ, err := r.Type([]byte("cpu")) - if err != nil { - fatal(t, "reading type", err) - } - - if got, exp := typ, BlockInteger; got != exp { - t.Fatalf("type mismatch: got %v, exp %v", got, exp) - } -} - -func TestTSMReader_MMAP_ReadAll(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - defer f.Close() - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - var data = map[string][]Value{ - "float": {NewValue(1, 1.0)}, - "int": {NewValue(1, int64(1))}, - "uint": {NewValue(1, ^uint64(0))}, - "bool": {NewValue(1, true)}, - "string": {NewValue(1, "foo")}, - } - - keys := make([]string, 0, len(data)) - for k := range data { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - if err := w.Write([]byte(k), data[k]); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - defer r.Close() - - var count int - for k, vals := range data { - readValues, err := r.ReadAll([]byte(k)) - if err != nil { - t.Fatalf("unexpected error readin: %v", err) - } - - if exp := len(vals); exp != len(readValues) { - t.Fatalf("read values length mismatch: got %v, exp %v", len(readValues), exp) - } - - for i, v := range vals { - if v.Value() != readValues[i].Value() { - t.Fatalf("read value mismatch(%d): got %v, exp %d", i, readValues[i].Value(), v.Value()) - } - } - count++ - } - - if got, exp := count, len(data); got != exp { - t.Fatalf("read values count mismatch: got %v, exp %v", got, exp) - } -} - -func TestTSMReader_MMAP_Read(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - defer f.Close() - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - var data = map[string][]Value{ - "float": { - NewValue(1, 1.0)}, - "int": { - NewValue(1, int64(1))}, - "uint": { - NewValue(1, ^uint64(0))}, - "bool": { - NewValue(1, true)}, - "string": { - NewValue(1, "foo")}, - } - - keys := make([]string, 0, len(data)) - for k := range data { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - if err := w.Write([]byte(k), data[k]); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - defer r.Close() - - var count int - for k, vals := range data { - readValues, err := r.Read([]byte(k), vals[0].UnixNano()) - if err != nil { - t.Fatalf("unexpected error readin: %v", err) - } - - if exp := len(vals); exp != len(readValues) { - t.Fatalf("read values length mismatch: got %v, exp %v", len(readValues), exp) - } - - for i, v := range vals { - if v.Value() != readValues[i].Value() { - t.Fatalf("read value mismatch(%d): got %v, exp %d", i, readValues[i].Value(), v.Value()) - } - } - count++ - } - - if got, exp := count, len(data); got != exp { - t.Fatalf("read values count mismatch: got %v, exp %v", got, exp) - } -} - -func TestTSMReader_MMAP_Keys(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - defer f.Close() - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - var data = map[string][]Value{ - "float": { - NewValue(1, 1.0)}, - "int": { - NewValue(1, int64(1))}, - "uint": { - NewValue(1, ^uint64(0))}, - "bool": { - NewValue(1, true)}, - "string": { - NewValue(1, "foo")}, - } - - keys := make([]string, 0, len(data)) - for k := range data { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - if err := w.Write([]byte(k), data[k]); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - defer r.Close() - - var count int - for k, vals := range data { - readValues, err := r.Read([]byte(k), vals[0].UnixNano()) - if err != nil { - t.Fatalf("unexpected error readin: %v", err) - } - - if exp := len(vals); exp != len(readValues) { - t.Fatalf("read values length mismatch: got %v, exp %v", len(readValues), exp) - } - - for i, v := range vals { - if v.Value() != readValues[i].Value() { - t.Fatalf("read value mismatch(%d): got %v, exp %d", i, readValues[i].Value(), v.Value()) - } - } - count++ - } - - if got, exp := count, len(data); got != exp { - t.Fatalf("read values count mismatch: got %v, exp %v", got, exp) - } -} - -func TestTSMReader_MMAP_Tombstone(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - t.Cleanup(func() { f.Close() }) - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - values := []Value{NewValue(0, 1.0)} - if err := w.Write([]byte("cpu"), values); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - if err := w.Write([]byte("mem"), values); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r1, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - t.Cleanup(func() { r1.Close() }) - - if err := r1.Delete([][]byte{[]byte("mem")}); err != nil { - t.Fatalf("unexpected error deleting: %v", err) - } - - r2, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - t.Cleanup(func() { r2.Close() }) - - if got, exp := r2.KeyCount(), 1; got != exp { - t.Fatalf("key length mismatch: got %v, exp %v", got, exp) - } -} - -func TestTSMReader_MMAP_TombstoneRange(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - defer f.Close() - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - expValues := []Value{ - NewValue(1, 1.0), - NewValue(2, 2.0), - NewValue(3, 3.0), - } - if err := w.Write([]byte("cpu"), expValues); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - - if err := r.DeleteRange([][]byte{[]byte("cpu")}, 2, math.MaxInt64); err != nil { - t.Fatalf("unexpected error deleting: %v", err) - } - defer r.Close() - - if got, exp := r.ContainsValue([]byte("cpu"), 1), true; got != exp { - t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp) - } - - if got, exp := r.ContainsValue([]byte("cpu"), 3), false; got != exp { - t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp) - } - - values, err := r.ReadAll([]byte("cpu")) - if err != nil { - t.Fatalf("unexpected error reading all: %v", err) - } - - if got, exp := len(values), 1; got != exp { - t.Fatalf("values length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := values[0].String(), expValues[0].String(); got != exp { - t.Fatalf("value mismatch: got %v, exp %v", got, exp) - } -} - -func TestTSMReader_MMAP_TombstoneOutsideTimeRange(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - defer f.Close() - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - expValues := []Value{ - NewValue(1, 1.0), - NewValue(2, 2.0), - NewValue(3, 3.0), - } - if err := w.Write([]byte("cpu"), expValues); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - - if err := r.DeleteRange([][]byte{[]byte("cpu")}, 0, 0); err != nil { - t.Fatalf("unexpected error deleting: %v", err) - } - defer r.Close() - - if got, exp := r.ContainsValue([]byte("cpu"), 1), true; got != exp { - t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp) - } - - if got, exp := r.ContainsValue([]byte("cpu"), 2), true; got != exp { - t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp) - } - - if got, exp := r.ContainsValue([]byte("cpu"), 3), true; got != exp { - t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp) - } - - if got, exp := r.HasTombstones(), false; got != exp { - t.Fatalf("HasTombstones mismatch: got %v, exp %v", got, exp) - } - - require.False(t, r.TombstoneStats().TombstoneExists) -} - -func TestTSMReader_MMAP_TombstoneOutsideKeyRange(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - defer f.Close() - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - expValues := []Value{ - NewValue(1, 1.0), - NewValue(2, 2.0), - NewValue(3, 3.0), - } - if err := w.Write([]byte("cpu"), expValues); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - - if err := r.DeleteRange([][]byte{[]byte("mem")}, 0, 3); err != nil { - t.Fatalf("unexpected error deleting: %v", err) - } - defer r.Close() - - if got, exp := r.ContainsValue([]byte("cpu"), 1), true; got != exp { - t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp) - } - - if got, exp := r.ContainsValue([]byte("cpu"), 2), true; got != exp { - t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp) - } - - if got, exp := r.ContainsValue([]byte("cpu"), 3), true; got != exp { - t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp) - } - - if got, exp := r.HasTombstones(), false; got != exp { - t.Fatalf("HasTombstones mismatch: got %v, exp %v", got, exp) - } - - require.False(t, r.TombstoneStats().TombstoneExists) -} - -func TestTSMReader_MMAP_TombstoneOverlapKeyRange(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - defer f.Close() - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - expValues := []Value{ - NewValue(1, 1.0), - NewValue(2, 2.0), - NewValue(3, 3.0), - } - if err := w.Write([]byte("cpu,app=foo,host=server-0#!~#value"), expValues); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - if err := w.Write([]byte("cpu,app=foo,host=server-73379#!~#value"), expValues); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - - if err := r.DeleteRange([][]byte{ - []byte("cpu,app=foo,host=server-0#!~#value"), - []byte("cpu,app=foo,host=server-73379#!~#value"), - []byte("cpu,app=foo,host=server-99999#!~#value")}, - math.MinInt64, math.MaxInt64); err != nil { - t.Fatalf("unexpected error deleting: %v", err) - } - defer r.Close() - - if got, exp := r.Contains([]byte("cpu,app=foo,host=server-0#!~#value")), false; got != exp { - t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp) - } - - if got, exp := r.Contains([]byte("cpu,app=foo,host=server-73379#!~#value")), false; got != exp { - t.Fatalf("ContainsValue mismatch: got %v, exp %v", got, exp) - } - - if got, exp := r.HasTombstones(), true; got != exp { - t.Fatalf("HasTombstones mismatch: got %v, exp %v", got, exp) - } - - require.True(t, r.TombstoneStats().TombstoneExists) -} - -func TestTSMReader_MMAP_TombstoneFullRange(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - defer f.Close() - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - expValues := []Value{ - NewValue(1, 1.0), - NewValue(2, 2.0), - NewValue(3, 3.0), - } - if err := w.Write([]byte("cpu"), expValues); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - - if err := r.DeleteRange([][]byte{[]byte("cpu")}, math.MinInt64, math.MaxInt64); err != nil { - t.Fatalf("unexpected error deleting: %v", err) - } - defer r.Close() - - values, err := r.ReadAll([]byte("cpu")) - if err != nil { - t.Fatalf("unexpected error reading all: %v", err) - } - - if got, exp := len(values), 0; got != exp { - t.Fatalf("values length mismatch: got %v, exp %v", got, exp) - } -} - -func TestTSMReader_MMAP_TombstoneFullRangeMultiple(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - defer f.Close() - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - expValues := []Value{ - NewValue(1, 1.0), - NewValue(2, 2.0), - NewValue(3, 3.0), - } - if err := w.Write([]byte("cpu"), expValues); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - expValues1 := []Value{ - NewValue(3, 1.0), - NewValue(4, 2.0), - NewValue(5, 3.0), - } - - if err := w.Write([]byte("mem"), expValues1); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - defer r.Close() - - if err := r.DeleteRange([][]byte{[]byte("mem"), []byte("cpu")}, 0, 3); err != nil { - t.Fatalf("unexpected error deleting: %v", err) - } - - // Make sure everything is deleted - values, err := r.ReadAll([]byte("cpu")) - if err != nil { - t.Fatalf("unexpected error reading all: %v", err) - } - - if got, exp := len(values), 0; got != exp { - t.Fatalf("values length mismatch: got %v, exp %v", got, exp) - } - - values, err = r.ReadAll([]byte("mem")) - if err != nil { - t.Fatalf("unexpected error reading all: %v", err) - } - - if got, exp := len(values), 2; got != exp { - t.Fatalf("values length mismatch: got %v, exp %v", got, exp) - } -} - -func TestTSMReader_MMAP_TombstoneMultipleRanges(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - defer f.Close() - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - expValues := []Value{ - NewValue(1, 1.0), - NewValue(2, 2.0), - NewValue(3, 3.0), - NewValue(4, 4.0), - NewValue(5, 5.0), - } - if err := w.Write([]byte("cpu"), expValues); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - defer r.Close() - - if err := r.DeleteRange([][]byte{[]byte("cpu")}, 2, 2); err != nil { - t.Fatalf("unexpected error deleting: %v", err) - } - - if err := r.DeleteRange([][]byte{[]byte("cpu")}, 4, 4); err != nil { - t.Fatalf("unexpected error deleting: %v", err) - } - - values, err := r.ReadAll([]byte("cpu")) - if err != nil { - t.Fatalf("unexpected error reading all: %v", err) - } - - if got, exp := len(values), 3; got != exp { - t.Fatalf("values length mismatch: got %v, exp %v", got, exp) - } -} - -func TestTSMReader_MMAP_TombstoneMultipleRangesFull(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - defer f.Close() - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - expValues := []Value{ - NewValue(1, 1.0), - NewValue(2, 2.0), - } - if err := w.Write([]byte("cpu"), expValues); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - defer r.Close() - - if err := r.DeleteRange([][]byte{[]byte("cpu")}, 1, 1); err != nil { - t.Fatalf("unexpected error deleting: %v", err) - } - - if err := r.DeleteRange([][]byte{[]byte("cpu")}, 2, 2); err != nil { - t.Fatalf("unexpected error deleting: %v", err) - } - - if got, exp := r.KeyCount(), 0; got != exp { - t.Fatalf("key count mismatch: got %v, exp %v", got, exp) - } - - values, err := r.ReadAll([]byte("cpu")) - if err != nil { - t.Fatalf("unexpected error reading all: %v", err) - } - - if got, exp := len(values), 0; got != exp { - t.Fatalf("values length mismatch: got %v, exp %v", got, exp) - } -} - -func TestTSMReader_MMAP_TombstoneMultipleRangesNoOverlap(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - defer f.Close() - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - expValues := []Value{ - NewValue(1, 1.0), - NewValue(2, 2.0), - NewValue(3, 2.0), - } - if err := w.Write([]byte("cpu"), expValues); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - defer r.Close() - - if err := r.DeleteRange([][]byte{[]byte("cpu")}, 1, 1); err != nil { - t.Fatalf("unexpected error deleting: %v", err) - } - - if err := r.DeleteRange([][]byte{[]byte("cpu")}, 3, 3); err != nil { - t.Fatalf("unexpected error deleting: %v", err) - } - - if got, exp := r.KeyCount(), 1; got != exp { - t.Fatalf("key count mismatch: got %v, exp %v", got, exp) - } - - values, err := r.ReadAll([]byte("cpu")) - if err != nil { - t.Fatalf("unexpected error reading all: %v", err) - } - - if got, exp := len(values), 1; got != exp { - t.Fatalf("values length mismatch: got %v, exp %v", got, exp) - } -} - -func TestTSMReader_MMAP_TombstoneOutsideRange(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - defer f.Close() - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - cpuValues := []Value{ - NewValue(1, 1.0), - NewValue(2, 2.0), - NewValue(3, 3.0), - } - if err := w.Write([]byte("cpu"), cpuValues); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - memValues := []Value{ - NewValue(1, 1.0), - NewValue(2, 2.0), - NewValue(30, 3.0), - } - if err := w.Write([]byte("mem"), memValues); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - - if err := r.DeleteRange([][]byte{[]byte("cpu"), []byte("mem")}, 5, math.MaxInt64); err != nil { - t.Fatalf("unexpected error deleting: %v", err) - } - defer r.Close() - - if got, exp := r.KeyCount(), 2; got != exp { - t.Fatalf("key count mismatch: got %v, exp %v", got, exp) - } - - if got, exp := len(r.TombstoneRange([]byte("cpu"))), 0; got != exp { - t.Fatalf("tombstone range mismatch: got %v, exp %v", got, exp) - } - - values, err := r.ReadAll([]byte("cpu")) - if err != nil { - t.Fatalf("unexpected error reading all: %v", err) - } - - if got, exp := len(values), len(cpuValues); got != exp { - t.Fatalf("values length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := len(r.TombstoneRange([]byte("mem"))), 1; got != exp { - t.Fatalf("tombstone range mismatch: got %v, exp %v", got, exp) - } - - values, err = r.ReadAll([]byte("mem")) - if err != nil { - t.Fatalf("unexpected error reading all: %v", err) - } - - if got, exp := len(values), len(memValues[:2]); got != exp { - t.Fatalf("values length mismatch: got %v, exp %v", got, exp) - } - -} - -func TestTSMReader_MMAP_Stats(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - defer f.Close() - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - values1 := []Value{NewValue(0, 1.0)} - if err := w.Write([]byte("cpu"), values1); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - values2 := []Value{NewValue(1, 1.0)} - if err := w.Write([]byte("mem"), values2); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - defer r.Close() - - stats := r.Stats() - if got, exp := string(stats.MinKey), "cpu"; got != exp { - t.Fatalf("min key mismatch: got %v, exp %v", got, exp) - } - - if got, exp := string(stats.MaxKey), "mem"; got != exp { - t.Fatalf("max key mismatch: got %v, exp %v", got, exp) - } - - if got, exp := stats.MinTime, values1[0].UnixNano(); got != exp { - t.Fatalf("min time mismatch: got %v, exp %v", got, exp) - } - - if got, exp := stats.MaxTime, values2[0].UnixNano(); got != exp { - t.Fatalf("max time mismatch: got %v, exp %v", got, exp) - } - - if got, exp := r.KeyCount(), 2; got != exp { - t.Fatalf("key length mismatch: got %v, exp %v", got, exp) - } -} - -// Ensure that we return an error if we try to open a non-tsm file -func TestTSMReader_VerifiesFileType(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - defer f.Close() - - // write some garbage - f.Write([]byte{0x23, 0xac, 0x99, 0x22, 0x77, 0x23, 0xac, 0x99, 0x22, 0x77, 0x23, 0xac, 0x99, 0x22, 0x77, 0x23, 0xac, 0x99, 0x22, 0x77}) - - _, err := NewTSMReader(f) - if err == nil { - t.Fatal("expected error trying to open non-tsm file") - } -} - -func TestIndirectIndex_Entries(t *testing.T) { - index := NewIndexWriter() - index.Add([]byte("cpu"), BlockFloat64, 0, 1, 10, 100) - index.Add([]byte("cpu"), BlockFloat64, 2, 3, 20, 200) - exp := index.Entries([]byte("cpu")) - - index.Add([]byte("mem"), BlockFloat64, 0, 1, 10, 100) - - b, err := index.MarshalBinary() - if err != nil { - t.Fatalf("unexpected error marshaling index: %v", err) - } - - indirect := NewIndirectIndex() - if err := indirect.UnmarshalBinary(b); err != nil { - t.Fatalf("unexpected error unmarshalling index: %v", err) - } - - entries := indirect.Entries([]byte("cpu")) - - if got, exp := len(entries), len(exp); got != exp { - t.Fatalf("entries length mismatch: got %v, exp %v", got, exp) - } - - for i, exp := range exp { - got := entries[i] - if exp.MinTime != got.MinTime { - t.Fatalf("minTime mismatch: got %v, exp %v", got.MinTime, exp.MinTime) - } - - if exp.MaxTime != got.MaxTime { - t.Fatalf("minTime mismatch: got %v, exp %v", got.MaxTime, exp.MaxTime) - } - - if exp.Size != got.Size { - t.Fatalf("size mismatch: got %v, exp %v", got.Size, exp.Size) - } - if exp.Offset != got.Offset { - t.Fatalf("size mismatch: got %v, exp %v", got.Offset, exp.Offset) - } - } -} - -func TestIndirectIndex_Entries_NonExistent(t *testing.T) { - index := NewIndexWriter() - index.Add([]byte("cpu"), BlockFloat64, 0, 1, 10, 100) - index.Add([]byte("cpu"), BlockFloat64, 2, 3, 20, 200) - - b, err := index.MarshalBinary() - if err != nil { - t.Fatalf("unexpected error marshaling index: %v", err) - } - - indirect := NewIndirectIndex() - if err := indirect.UnmarshalBinary(b); err != nil { - t.Fatalf("unexpected error unmarshalling index: %v", err) - } - - // mem has not been added to the index so we should get no entries back - // for both - exp := index.Entries([]byte("mem")) - entries := indirect.Entries([]byte("mem")) - - if got, exp := len(entries), len(exp); got != exp && exp != 0 { - t.Fatalf("entries length mismatch: got %v, exp %v", got, exp) - } -} - -func TestIndirectIndex_MaxBlocks(t *testing.T) { - index := NewIndexWriter() - for i := 0; i < 1<<16; i++ { - index.Add([]byte("cpu"), BlockFloat64, 0, 1, 10, 20) - } - - if _, err := index.MarshalBinary(); err == nil { - t.Fatalf("expected max block count error. got nil") - } else { - println(err.Error()) - } -} - -func TestIndirectIndex_Type(t *testing.T) { - index := NewIndexWriter() - index.Add([]byte("cpu"), BlockInteger, 0, 1, 10, 20) - - b, err := index.MarshalBinary() - if err != nil { - t.Fatal(err) - } - - ind := NewIndirectIndex() - if err := ind.UnmarshalBinary(b); err != nil { - fatal(t, "unmarshal binary", err) - } - - typ, err := ind.Type([]byte("cpu")) - if err != nil { - fatal(t, "reading type", err) - } - - if got, exp := typ, BlockInteger; got != exp { - t.Fatalf("type mismatch: got %v, exp %v", got, exp) - } -} - -func TestDirectIndex_KeyCount(t *testing.T) { - index := NewIndexWriter() - index.Add([]byte("cpu"), BlockFloat64, 0, 1, 10, 20) - index.Add([]byte("cpu"), BlockFloat64, 1, 2, 20, 30) - index.Add([]byte("mem"), BlockFloat64, 0, 1, 10, 20) - - // 2 distinct keys - if got, exp := index.KeyCount(), 2; got != exp { - t.Fatalf("length mismatch: got %v, exp %v", got, exp) - } -} - -func TestBlockIterator_Single(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - values := []Value{NewValue(0, int64(1))} - if err := w.Write([]byte("cpu"), values); err != nil { - t.Fatalf("unexpected error writing: %v", err) - - } - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - fd, err := os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error opening: %v", err) - } - - r, err := NewTSMReader(fd) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - t.Cleanup(func() { r.Close() }) - - var count int - iter := r.BlockIterator() - for iter.Next() { - key, minTime, maxTime, typ, _, buf, err := iter.Read() - if err != nil { - t.Fatalf("unexpected error creating iterator: %v", err) - } - - if got, exp := string(key), "cpu"; got != exp { - t.Fatalf("key mismatch: got %v, exp %v", got, exp) - } - - if got, exp := minTime, int64(0); got != exp { - t.Fatalf("min time mismatch: got %v, exp %v", got, exp) - } - - if got, exp := maxTime, int64(0); got != exp { - t.Fatalf("max time mismatch: got %v, exp %v", got, exp) - } - - if got, exp := typ, BlockInteger; got != exp { - t.Fatalf("block type mismatch: got %v, exp %v", got, exp) - } - - if len(buf) == 0 { - t.Fatalf("buf length = 0") - } - - count++ - } - - if got, exp := count, len(values); got != exp { - t.Fatalf("value count mismatch: got %v, exp %v", got, exp) - } -} - -func TestBlockIterator_Tombstone(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - values := []Value{NewValue(0, int64(1))} - if err := w.Write([]byte("cpu"), values); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - if err := w.Write([]byte("mem"), values); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - fd, err := os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error opening: %v", err) - } - - r, err := NewTSMReader(fd) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - t.Cleanup(func() { r.Close() }) - - iter := r.BlockIterator() - for iter.Next() { - // Trigger a delete during iteration. This should cause an error condition for - // the BlockIterator - r.Delete([][]byte{[]byte("cpu")}) - } - - if iter.Err() == nil { - t.Fatalf("expected error: got nil") - } -} - -func TestBlockIterator_MultipleBlocks(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - values1 := []Value{NewValue(0, int64(1))} - if err := w.Write([]byte("cpu"), values1); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - values2 := []Value{NewValue(1, int64(2))} - if err := w.Write([]byte("cpu"), values2); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - fd, err := os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error opening: %v", err) - } - - r, err := NewTSMReader(fd) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - t.Cleanup(func() { r.Close() }) - - var count int - expData := []Values{values1, values2} - iter := r.BlockIterator() - var i int - for iter.Next() { - key, minTime, maxTime, typ, _, buf, err := iter.Read() - - if err != nil { - t.Fatalf("unexpected error creating iterator: %v", err) - } - - if got, exp := string(key), "cpu"; got != exp { - t.Fatalf("key mismatch: got %v, exp %v", got, exp) - } - - if got, exp := minTime, expData[i][0].UnixNano(); got != exp { - t.Fatalf("min time mismatch: got %v, exp %v", got, exp) - } - - if got, exp := maxTime, expData[i][0].UnixNano(); got != exp { - t.Fatalf("max time mismatch: got %v, exp %v", got, exp) - } - - if got, exp := typ, BlockInteger; got != exp { - t.Fatalf("block type mismatch: got %v, exp %v", got, exp) - } - - if len(buf) == 0 { - t.Fatalf("buf length = 0") - } - - count++ - i++ - } - - if got, exp := count, 2; got != exp { - t.Fatalf("value count mismatch: got %v, exp %v", got, exp) - } -} - -func TestBlockIterator_Sorted(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - values := map[string][]Value{ - "mem": {NewValue(0, int64(1))}, - "cycles": {NewValue(0, ^uint64(0))}, - "cpu": {NewValue(1, float64(2))}, - "disk": {NewValue(1, true)}, - "load": {NewValue(1, "string")}, - } - - keys := make([]string, 0, len(values)) - for k := range values { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - if err := w.Write([]byte(k), values[k]); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - fd, err := os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error opening: %v", err) - } - - r, err := NewTSMReader(fd) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - t.Cleanup(func() { r.Close() }) - - var count int - iter := r.BlockIterator() - var lastKey string - for iter.Next() { - key, _, _, _, _, buf, err := iter.Read() - - if string(key) < lastKey { - t.Fatalf("keys not sorted: got %v, last %v", key, lastKey) - } - - lastKey = string(key) - - if err != nil { - t.Fatalf("unexpected error creating iterator: %v", err) - } - - if len(buf) == 0 { - t.Fatalf("buf length = 0") - } - - count++ - } - - if got, exp := count, len(values); got != exp { - t.Fatalf("value count mismatch: got %v, exp %v", got, exp) - } -} - -func TestIndirectIndex_UnmarshalBinary_BlockCountOverflow(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - defer f.Close() - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - for i := 0; i < 3280; i++ { - w.Write([]byte("cpu"), []Value{NewValue(int64(i), float64(i))}) - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - defer r.Close() -} - -func TestCompacted_NotFull(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - values := []Value{NewValue(0, 1.0)} - if err := w.Write([]byte("cpu"), values); err != nil { - t.Fatalf("unexpected error writing: %v", err) - - } - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - fd, err := os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := NewTSMReader(fd) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - t.Cleanup(func() { r.Close() }) - - iter := r.BlockIterator() - if !iter.Next() { - t.Fatalf("expected next, got false") - } - - _, _, _, _, _, block, err := iter.Read() - if err != nil { - t.Fatalf("unexpected error reading block: %v", err) - } - - cnt, err := BlockCount(block) - if err != nil { - t.Fatalf("Block is corrupted: %v", err) - } - if got, exp := cnt, 1; got != exp { - t.Fatalf("block count mismatch: got %v, exp %v", got, exp) - } -} - -func TestTSMReader_File_ReadAll(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - defer f.Close() - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - var data = map[string][]Value{ - "float": { - NewValue(1, 1.0)}, - "int": { - NewValue(1, int64(1))}, - "uint": { - NewValue(1, ^uint64(0))}, - "bool": { - NewValue(1, true)}, - "string": { - NewValue(1, "foo")}, - } - - keys := make([]string, 0, len(data)) - for k := range data { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - if err := w.Write([]byte(k), data[k]); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - defer r.Close() - - var count int - for k, vals := range data { - readValues, err := r.ReadAll([]byte(k)) - if err != nil { - t.Fatalf("unexpected error reading: %v", err) - } - - if exp := len(vals); exp != len(readValues) { - t.Fatalf("read values length mismatch: exp %v, got %v", exp, len(readValues)) - } - - for i, v := range vals { - if exp, got := v.Value(), readValues[i].Value(); exp != got { - t.Fatalf("read value mismatch(%d): exp %v, got %d", i, v.Value(), readValues[i].Value()) - } - } - count++ - } - - if exp, got := len(data), count; exp != got { - t.Fatalf("read values count mismatch: exp %v, got %v", exp, got) - } -} - -func TestTSMReader_FuzzCrashes(t *testing.T) { - cases := []string{ - "", - "\x16\xd1\x16\xd1\x01\x10\x14X\xfb\x03\xac~\x80\xf0\x00\x00\x00I^K" + - "_\xf0\x00\x00\x00D424259389w\xf0\x00\x00\x00" + - "o\x93\bO\x10?\xf0\x00\x00\x00\x00\b\x00\xc2_\xff\xd8\x0fX^" + - "/\xbf\xe8\x00\x00\x00\x00\x00\x01\x00\bctr#!~#n\x00" + - "\x00\x01\x14X\xfb\xb0\x03\xac~\x80\x14X\xfb\xb1\x00\xd4ܥ\x00\x00" + - "\x00\x00\x00\x00\x00\x05\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00E", - "\x16\xd1\x16\xd1\x01\x80'Z\\\x00\v)\x00\x00\x00\x00;\x9a\xca\x00" + - "\x01\x05\x10?\xf0\x00\x00\x00\x00\x00\x00\xc2_\xff\xd6\x1d\xd4&\xed\v" + - "\xc5\xf7\xfb\xc0\x00\x00\x00\x00\x00 \x00\x06a#!~#v\x00\x00" + - "\x01\x00\x00\x00\x00;\x9a\xca\x00\x00\x00\x00\x01*\x05\xf2\x00\x00\x00\x00" + - "\x00\x00\x00\x00\x00\x00\x00\x00\x002", - "\x16\xd1\x16\xd1\x01\x80\xf0\x00\x00\x00I^K_\xf0\x00\x00\x00D7" + - "\nw\xf0\x00\x00\x00o\x93\bO\x10?\xf0\x00\x00\x00\x00\x00\x00\xc2" + - "_\xff\x14X\xfb\xb0\x03\xac~\x80\x14X\xfb\xb1\x00\xd4ܥ\x00\x00" + - "\x00\x00\x00\x00\x00\x05\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00E", - "\x16\xd1\x16\xd1\x01000000000000000" + - "00000000000000000000" + - "0000000000\x00\x000\x00\x0100000" + - "000\x00\x00\x00\x00\x00\x00\x002", - "\x16\xd1\x16\xd1\x01", - "\x16\xd1\x16\xd1\x01\x00\x00o\x93\bO\x10?\xf0\x00\x00\x00\x00X^" + - "/\xbf\xe8\x00\x00\x00\x00\x00\x01\x00\bctr#!~#n\x00" + - "\x00\x01\x14X\xfb\xb0\x03\xac~\x80\x14X\xfb\xb1\x00\xd4ܥ\x00\x00" + - "\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00E", - } - - for _, c := range cases { - dir := t.TempDir() - - filename := filepath.Join(dir, "x.tsm") - if err := os.WriteFile(filename, []byte(c), 0600); err != nil { - t.Fatalf("exp no error, got %s", err) - } - - f, err := os.Open(filename) - if err != nil { - t.Fatalf("exp no error, got %s", err) - } - t.Cleanup(func() { f.Close() }) - - r, err := NewTSMReader(f) - if err != nil { - return - } - t.Cleanup(func() { r.Close() }) - - iter := r.BlockIterator() - for iter.Next() { - key, _, _, _, _, _, err := iter.Read() - if err != nil { - return - } - - _, _ = r.Type(key) - - if _, err = r.ReadAll(key); err != nil { - return - } - } - } -} - -func TestTSMReader_File_Read(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - defer f.Close() - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - var data = map[string][]Value{ - "float": { - NewValue(1, 1.0)}, - "int": { - NewValue(1, int64(1))}, - "uint": { - NewValue(1, ^uint64(0))}, - "bool": { - NewValue(1, true)}, - "string": { - NewValue(1, "foo")}, - } - - keys := make([]string, 0, len(data)) - for k := range data { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - if err := w.Write([]byte(k), data[k]); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - defer r.Close() - - var count int - for k, vals := range data { - readValues, err := r.Read([]byte(k), vals[0].UnixNano()) - if err != nil { - t.Fatalf("unexpected error readin: %v", err) - } - - if exp, got := len(vals), len(readValues); exp != got { - t.Fatalf("read values length mismatch: exp %v, got %v", exp, len(readValues)) - } - - for i, v := range vals { - if v.Value() != readValues[i].Value() { - t.Fatalf("read value mismatch(%d): exp %v, got %d", i, v.Value(), readValues[i].Value()) - } - } - count++ - } - - if exp, got := count, len(data); exp != got { - t.Fatalf("read values count mismatch: exp %v, got %v", exp, got) - } -} - -func TestTSMReader_References(t *testing.T) { - dir := t.TempDir() - f := mustTempFile(dir) - defer f.Close() - - w, err := NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - var data = map[string][]Value{ - "float": { - NewValue(1, 1.0)}, - "int": { - NewValue(1, int64(1))}, - "uint": { - NewValue(1, ^uint64(0))}, - "bool": { - NewValue(1, true)}, - "string": { - NewValue(1, "foo")}, - } - - keys := make([]string, 0, len(data)) - for k := range data { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - if err := w.Write([]byte(k), data[k]); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - f, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := NewTSMReader(f) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - defer r.Close() - - r.Ref() - - if err := r.Remove(); err != ErrFileInUse { - t.Fatalf("expected error removing reader: %v", err) - } - - var count int - for k, vals := range data { - readValues, err := r.Read([]byte(k), vals[0].UnixNano()) - if err != nil { - t.Fatalf("unexpected error readin: %v", err) - } - - if exp, got := len(vals), len(readValues); exp != got { - t.Fatalf("read values length mismatch: exp %v, got %v", exp, len(readValues)) - } - - for i, v := range vals { - if v.Value() != readValues[i].Value() { - t.Fatalf("read value mismatch(%d): exp %v, got %d", i, v.Value(), readValues[i].Value()) - } - } - count++ - } - - if exp, got := count, len(data); exp != got { - t.Fatalf("read values count mismatch: exp %v, got %v", exp, got) - } - r.Unref() - - if err := r.Close(); err != nil { - t.Fatalf("unexpected error closing reader: %v", err) - } - - if err := r.Remove(); err != nil { - t.Fatalf("unexpected error removing reader: %v", err) - } -} - -func TestBatchKeyIterator_Errors(t *testing.T) { - const MaxErrors = 10 - - dir, name := createTestTSM(t) - defer os.RemoveAll(dir) - fr, err := os.Open(name) - if err != nil { - t.Fatalf("unexpected error opening file %s: %v", name, err) - } - r, err := NewTSMReader(fr) - if err != nil { - // Only have a deferred close if we could not create the TSMReader - defer func() { - if e := fr.Close(); e != nil { - t.Fatalf("unexpected error closing %s: %v", name, e) - } - }() - - t.Fatalf("unexpected error creating TSMReader for %s: %v", name, err) - } - defer func() { - if e := r.Close(); e != nil { - t.Fatalf("error closing TSMReader for %s: %v", name, e) - } - }() - interrupts := make(chan struct{}) - var iter KeyIterator - if iter, err = NewTSMBatchKeyIterator(3, false, MaxErrors, interrupts, []string{name}, r); err != nil { - t.Fatalf("unexpected error creating tsmBatchKeyIterator: %v", err) - } - var i int - for i = 0; i < MaxErrors*2; i++ { - saved := iter.(*tsmBatchKeyIterator).AppendError(fmt.Errorf("fake error: %d", i)) - if i < MaxErrors && !saved { - t.Fatalf("error unexpectedly not saved: %d", i) - } - if i >= MaxErrors && saved { - t.Fatalf("error unexpectedly saved: %d", i) - } - } - errs := iter.Err() - if errCnt := len(errs.(TSMErrors)); errCnt != (MaxErrors + 1) { - t.Fatalf("saved wrong number of errors: expected %d, got %d", MaxErrors, errCnt) - } - expected := fmt.Sprintf("additional errors dropped: %d", i-MaxErrors) - if strings.Compare(errs.(TSMErrors)[MaxErrors].Error(), expected) != 0 { - t.Fatalf("expected: '%s', got: '%s", expected, errs.(TSMErrors)[MaxErrors].Error()) - } -} - -func createTestTSM(t *testing.T) (dir string, name string) { - dir = t.TempDir() - f := mustTempFile(dir) - name = f.Name() - w, err := NewTSMWriter(f) - if err != nil { - f.Close() - t.Fatalf("unexpected error creating writer for %s: %v", name, err) - } - defer func() { - if e := w.Close(); e != nil { - t.Fatalf("write TSM close of %s: %v", name, err) - } - }() - - var data = map[string][]Value{ - "float": {NewValue(1, 1.0)}, - "int": {NewValue(1, int64(1))}, - "uint": {NewValue(1, ^uint64(0))}, - "bool": {NewValue(1, true)}, - "string": {NewValue(1, "foo")}, - } - - keys := make([]string, 0, len(data)) - for k := range data { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - if err := w.Write([]byte(k), data[k]); err != nil { - t.Fatalf("write TSM value: %v", err) - } - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("write TSM index: %v", err) - } - - return dir, name -} - -func BenchmarkIndirectIndex_UnmarshalBinary(b *testing.B) { - index := NewIndexWriter() - for i := 0; i < 100000; i++ { - index.Add([]byte(fmt.Sprintf("cpu-%d", i)), BlockFloat64, int64(i*2), int64(i*2+1), 10, 100) - } - - bytes, err := index.MarshalBinary() - if err != nil { - b.Fatalf("unexpected error marshaling index: %v", err) - } - - indirect := NewIndirectIndex() - b.ResetTimer() - for i := 0; i < b.N; i++ { - if err := indirect.UnmarshalBinary(bytes); err != nil { - b.Fatalf("unexpected error unmarshalling index: %v", err) - } - } -} - -func mustMakeIndex(tb testing.TB, keys, blocks int) *indirectIndex { - index := NewIndexWriter() - // add 1000 keys and 1000 blocks per key - for i := 0; i < keys; i++ { - for j := 0; j < blocks; j++ { - index.Add([]byte(fmt.Sprintf("cpu-%03d", i)), BlockFloat64, int64(i*j*2), int64(i*j*2+1), 10, 100) - } - } - - bytes, err := index.MarshalBinary() - if err != nil { - tb.Fatalf("unexpected error marshaling index: %v", err) - } - - indirect := NewIndirectIndex() - if err = indirect.UnmarshalBinary(bytes); err != nil { - tb.Fatalf("unexpected error unmarshalling index: %v", err) - } - - return indirect -} - -func BenchmarkIndirectIndex_Entries(b *testing.B) { - indirect := mustMakeIndex(b, 1000, 1000) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - indirect.Entries([]byte("cpu-001")) - } -} - -func BenchmarkIndirectIndex_ReadEntries(b *testing.B) { - var cache []IndexEntry - indirect := mustMakeIndex(b, 1000, 1000) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - indirect.ReadEntries([]byte("cpu-001"), &cache) - } -} - -func BenchmarkBlockIterator_Next(b *testing.B) { - r := TSMReader{index: mustMakeIndex(b, 1000, 1000)} - b.ResetTimer() - - for i := 0; i < b.N; i++ { - bi := r.BlockIterator() - for bi.Next() { - } - } -} diff --git a/tsdb/engine/tsm1/ring.go b/tsdb/engine/tsm1/ring.go deleted file mode 100644 index 615eb48f11e..00000000000 --- a/tsdb/engine/tsm1/ring.go +++ /dev/null @@ -1,294 +0,0 @@ -package tsm1 - -import ( - "fmt" - "sync" - - "github.com/cespare/xxhash" - "github.com/influxdata/influxdb/v2/pkg/bytesutil" -) - -// partitions is the number of partitions we used in the ring's continuum. It -// basically defines the maximum number of partitions you can have in the ring. -// If a smaller number of partitions are chosen when creating a ring, then -// they're evenly spread across this many partitions in the ring. -const partitions = 16 - -// ring is a structure that maps series keys to entries. -// -// ring is implemented as a crude hash ring, in so much that you can have -// variable numbers of members in the ring, and the appropriate member for a -// given series key can always consistently be found. Unlike a true hash ring -// though, this ring is not resizeable—there must be at most 16 members in the -// ring, and the number of members must always be a power of 2. -// -// ring works as follows: Each member of the ring contains a single store, which -// contains a map of series keys to entries. A ring always has 16 partitions, -// and a member takes up one or more of these partitions (depending on how many -// members are specified to be in the ring) -// -// To determine the partition that a series key should be added to, the series -// key is hashed and the first 8 bits are used as an index to the ring. -type ring struct { - // The unique set of partitions in the ring. - // len(partitions) <= len(continuum) - partitions []*partition -} - -// newring returns a new ring initialised with n partitions. n must always be a -// power of 2, and for performance reasons should be larger than the number of -// cores on the host. The supported set of values for n is: -// -// {1, 2, 4, 8, 16}. -func newring(n int) (*ring, error) { - if n <= 0 || n > partitions { - return nil, fmt.Errorf("invalid number of partitions: %d", n) - } - if n&(n-1) != 0 { - return nil, fmt.Errorf("partitions %d is not a power of two", n) - } - - r := ring{ - partitions: make([]*partition, n), // maximum number of partitions. - } - - // The trick here is to map N partitions to all points on the continuum, - // such that the first eight bits of a given hash will map directly to one - // of the N partitions. - for i := 0; i < len(r.partitions); i++ { - r.partitions[i] = &partition{ - store: make(map[string]*entry), - } - } - return &r, nil -} - -// reset resets the ring so it can be reused. Before removing references to entries -// within each partition it gathers sizing information to provide hints when -// reallocating entries in partition maps. -// -// reset is not safe for use by multiple goroutines. -func (r *ring) reset() { - for _, partition := range r.partitions { - partition.reset() - } -} - -// getPartition retrieves the hash ring partition associated with the provided -// key. -func (r *ring) getPartition(key []byte) *partition { - return r.partitions[int(xxhash.Sum64(key)%uint64(len(r.partitions)))] -} - -// entry returns the entry for the given key. -// entry is safe for use by multiple goroutines. -func (r *ring) entry(key []byte) *entry { - return r.getPartition(key).entry(key) -} - -// write writes values to the entry in the ring's partition associated with key. -// If no entry exists for the key then one will be created. -// write is safe for use by multiple goroutines. -func (r *ring) write(key []byte, values Values) (bool, error) { - return r.getPartition(key).write(key, values) -} - -// remove deletes the entry for the given key. -// remove is safe for use by multiple goroutines. -func (r *ring) remove(key []byte) { - r.getPartition(key).remove(key) -} - -// keys returns all the keys from all partitions in the hash ring. The returned -// keys will be in order if sorted is true. -func (r *ring) keys(sorted bool) [][]byte { - keys := make([][]byte, 0) - for _, p := range r.partitions { - keys = append(keys, p.keys()...) - } - - if sorted { - bytesutil.Sort(keys) - } - return keys -} - -// count returns the number of values in the ring -// count is not accurate since it doesn't use read lock when iterating over partitions -func (r *ring) count() int { - var n int - for _, p := range r.partitions { - n += p.count() - } - return n -} - -// apply applies the provided function to every entry in the ring under a read -// lock using a separate goroutine for each partition. The provided function -// will be called with each key and the corresponding entry. The first error -// encountered will be returned, if any. apply is safe for use by multiple -// goroutines. -func (r *ring) apply(f func([]byte, *entry) error) error { - - var ( - wg sync.WaitGroup - res = make(chan error, len(r.partitions)) - ) - - for _, p := range r.partitions { - wg.Add(1) - - go func(p *partition) { - defer wg.Done() - - p.mu.RLock() - for k, e := range p.store { - if err := f([]byte(k), e); err != nil { - res <- err - p.mu.RUnlock() - return - } - } - p.mu.RUnlock() - }(p) - } - - go func() { - wg.Wait() - close(res) - }() - - // Collect results. - for err := range res { - if err != nil { - return err - } - } - return nil -} - -// applySerial is similar to apply, but invokes f on each partition in the same -// goroutine. -// apply is safe for use by multiple goroutines. -func (r *ring) applySerial(f func([]byte, *entry) error) error { - for _, p := range r.partitions { - p.mu.RLock() - for k, e := range p.store { - if e.count() == 0 { - continue - } - if err := f([]byte(k), e); err != nil { - p.mu.RUnlock() - return err - } - } - p.mu.RUnlock() - } - return nil -} - -func (r *ring) split(n int) []storer { - storers := make([]storer, n) - for i := 0; i < n; i++ { - storers[i], _ = newring(len(r.partitions)) - } - - for i, p := range r.partitions { - r := storers[i%n].(*ring) - r.partitions[i] = p - } - return storers -} - -// partition provides safe access to a map of series keys to entries. -type partition struct { - mu sync.RWMutex - store map[string]*entry -} - -// entry returns the partition's entry for the provided key. -// It's safe for use by multiple goroutines. -func (p *partition) entry(key []byte) *entry { - p.mu.RLock() - e := p.store[string(key)] - p.mu.RUnlock() - return e -} - -// write writes the values to the entry in the partition, creating the entry -// if it does not exist. -// write is safe for use by multiple goroutines. -func (p *partition) write(key []byte, values Values) (bool, error) { - p.mu.RLock() - e := p.store[string(key)] - p.mu.RUnlock() - if e != nil { - // Hot path. - return false, e.add(values) - } - - p.mu.Lock() - defer p.mu.Unlock() - - // Check again. - if e = p.store[string(key)]; e != nil { - return false, e.add(values) - } - - // Create a new entry using a preallocated size if we have a hint available. - e, err := newEntryValues(values) - if err != nil { - return false, err - } - - p.store[string(key)] = e - return true, nil -} - -// remove deletes the entry associated with the provided key. -// remove is safe for use by multiple goroutines. -func (p *partition) remove(key []byte) { - p.mu.Lock() - delete(p.store, string(key)) - p.mu.Unlock() -} - -// keys returns an unsorted slice of the keys in the partition. -func (p *partition) keys() [][]byte { - p.mu.RLock() - keys := make([][]byte, 0, len(p.store)) - for k, v := range p.store { - if v.count() == 0 { - continue - } - keys = append(keys, []byte(k)) - } - p.mu.RUnlock() - return keys -} - -// reset resets the partition by reinitialising the store. reset returns hints -// about sizes that the entries within the store could be reallocated with. -func (p *partition) reset() { - p.mu.RLock() - sz := len(p.store) - p.mu.RUnlock() - - newStore := make(map[string]*entry, sz) - p.mu.Lock() - p.store = newStore - p.mu.Unlock() -} - -func (p *partition) count() int { - var n int - p.mu.RLock() - for _, v := range p.store { - if v.count() > 0 { - n++ - } - } - p.mu.RUnlock() - return n - -} diff --git a/tsdb/engine/tsm1/ring_test.go b/tsdb/engine/tsm1/ring_test.go deleted file mode 100644 index 172dccb5db8..00000000000 --- a/tsdb/engine/tsm1/ring_test.go +++ /dev/null @@ -1,151 +0,0 @@ -package tsm1 - -import ( - "fmt" - "runtime" - "sync" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestRing_newRing(t *testing.T) { - examples := []struct { - n int - returnErr bool - }{ - {n: 1}, - {n: 2}, - {n: 4}, - {n: 8}, - {n: 16}, - {n: 32, returnErr: true}, - {n: 0, returnErr: true}, - {n: 3, returnErr: true}, - } - - for _, example := range examples { - t.Run(fmt.Sprintf("ring n %d", example.n), func(t *testing.T) { - r, err := newring(example.n) - if example.returnErr { - require.Error(t, err) - return - } - require.NoError(t, err) - require.Equal(t, example.n, len(r.partitions)) - - // Check partitions distributed correctly - partitions := make([]*partition, 0) - for i, partition := range r.partitions { - if i == 0 || partition != partitions[len(partitions)-1] { - partitions = append(partitions, partition) - } - } - require.Equal(t, example.n, len(partitions)) - }) - } -} - -var strSliceRes [][]byte - -func benchmarkRingkeys(b *testing.B, r *ring, keys int) { - // Add some keys - for i := 0; i < keys; i++ { - r.write([]byte(fmt.Sprintf("cpu,host=server-%d value=1", i)), Values([]Value{ - IntegerValue{ - unixnano: 1, - value: int64(i), - }, - })) - } - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - strSliceRes = r.keys(false) - } -} - -func BenchmarkRing_keys_100(b *testing.B) { benchmarkRingkeys(b, MustNewRing(16), 100) } -func BenchmarkRing_keys_1000(b *testing.B) { benchmarkRingkeys(b, MustNewRing(16), 1000) } -func BenchmarkRing_keys_10000(b *testing.B) { benchmarkRingkeys(b, MustNewRing(16), 10000) } -func BenchmarkRing_keys_100000(b *testing.B) { benchmarkRingkeys(b, MustNewRing(16), 100000) } - -func benchmarkRingGetPartition(b *testing.B, r *ring, keys int) { - vals := make([][]byte, keys) - - // Add some keys - for i := 0; i < keys; i++ { - vals[i] = []byte(fmt.Sprintf("cpu,host=server-%d field1=value1,field2=value2,field4=value4,field5=value5,field6=value6,field7=value7,field8=value1,field9=value2,field10=value4,field11=value5,field12=value6,field13=value7", i)) - r.write([]byte(fmt.Sprintf("cpu,host=server-%d value=1", i)), Values([]Value{ - IntegerValue{ - unixnano: 1, - value: int64(i), - }, - })) - } - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - r.getPartition(vals[i%keys]) - } -} - -func BenchmarkRing_getPartition_100(b *testing.B) { - benchmarkRingGetPartition(b, MustNewRing(16), 100) -} -func BenchmarkRing_getPartition_1000(b *testing.B) { - benchmarkRingGetPartition(b, MustNewRing(16), 1000) -} - -func benchmarkRingWrite(b *testing.B, r *ring, n int) { - b.ReportAllocs() - for i := 0; i < b.N; i++ { - var wg sync.WaitGroup - for i := 0; i < runtime.GOMAXPROCS(0); i++ { - errC := make(chan error) - wg.Add(1) - go func() { - defer wg.Done() - for j := 0; j < n; j++ { - if _, err := r.write([]byte(fmt.Sprintf("cpu,host=server-%d value=1", j)), Values{}); err != nil { - errC <- err - } - } - }() - - go func() { - wg.Wait() - close(errC) - }() - - for err := range errC { - if err != nil { - b.Error(err) - } - } - } - } -} - -func BenchmarkRing_write_1_100(b *testing.B) { benchmarkRingWrite(b, MustNewRing(1), 100) } -func BenchmarkRing_write_1_1000(b *testing.B) { benchmarkRingWrite(b, MustNewRing(1), 1000) } -func BenchmarkRing_write_1_10000(b *testing.B) { benchmarkRingWrite(b, MustNewRing(1), 10000) } -func BenchmarkRing_write_1_100000(b *testing.B) { benchmarkRingWrite(b, MustNewRing(1), 100000) } -func BenchmarkRing_write_4_100(b *testing.B) { benchmarkRingWrite(b, MustNewRing(4), 100) } -func BenchmarkRing_write_4_1000(b *testing.B) { benchmarkRingWrite(b, MustNewRing(4), 1000) } -func BenchmarkRing_write_4_10000(b *testing.B) { benchmarkRingWrite(b, MustNewRing(4), 10000) } -func BenchmarkRing_write_4_100000(b *testing.B) { benchmarkRingWrite(b, MustNewRing(4), 100000) } -func BenchmarkRing_write_16_100(b *testing.B) { benchmarkRingWrite(b, MustNewRing(16), 100) } -func BenchmarkRing_write_16_1000(b *testing.B) { benchmarkRingWrite(b, MustNewRing(16), 1000) } -func BenchmarkRing_write_16_10000(b *testing.B) { benchmarkRingWrite(b, MustNewRing(16), 10000) } -func BenchmarkRing_write_16_100000(b *testing.B) { benchmarkRingWrite(b, MustNewRing(16), 100000) } - -func MustNewRing(n int) *ring { - r, err := newring(n) - if err != nil { - panic(err) - } - return r -} diff --git a/tsdb/engine/tsm1/scheduler.go b/tsdb/engine/tsm1/scheduler.go deleted file mode 100644 index 774a8998061..00000000000 --- a/tsdb/engine/tsm1/scheduler.go +++ /dev/null @@ -1,79 +0,0 @@ -package tsm1 - -import ( - "sync/atomic" -) - -var defaultWeights = [4]float64{0.4, 0.3, 0.2, 0.1} - -type scheduler struct { - maxConcurrency int - activeCompactions *compactionCounter - - // queues is the depth of work pending for each compaction level - queues [4]int - weights [4]float64 -} - -func newScheduler(activeCompactions *compactionCounter, maxConcurrency int) *scheduler { - return &scheduler{ - activeCompactions: activeCompactions, - maxConcurrency: maxConcurrency, - weights: defaultWeights, - } -} - -func (s *scheduler) setDepth(level, depth int) { - level = level - 1 - if level < 0 || level > len(s.queues) { - return - } - - s.queues[level] = depth -} - -func (s *scheduler) next() (int, bool) { - level1Running := int(atomic.LoadInt64(&s.activeCompactions.l1)) - level2Running := int(atomic.LoadInt64(&s.activeCompactions.l2)) - level3Running := int(atomic.LoadInt64(&s.activeCompactions.l3)) - level4Running := int(atomic.LoadInt64(&s.activeCompactions.full) + atomic.LoadInt64(&s.activeCompactions.optimize)) - - if level1Running+level2Running+level3Running+level4Running >= s.maxConcurrency { - return 0, false - } - - var ( - level int - runnable bool - ) - - loLimit, _ := s.limits() - - end := len(s.queues) - if level3Running+level4Running >= loLimit && s.maxConcurrency-(level1Running+level2Running) == 0 { - end = 2 - } - - var weight float64 - for i := 0; i < end; i++ { - if float64(s.queues[i])*s.weights[i] > weight { - level, runnable = i+1, true - weight = float64(s.queues[i]) * s.weights[i] - } - } - return level, runnable -} - -func (s *scheduler) limits() (int, int) { - hiLimit := s.maxConcurrency * 4 / 5 - loLimit := (s.maxConcurrency / 5) + 1 - if hiLimit == 0 { - hiLimit = 1 - } - - if loLimit == 0 { - loLimit = 1 - } - - return loLimit, hiLimit -} diff --git a/tsdb/engine/tsm1/scheduler_test.go b/tsdb/engine/tsm1/scheduler_test.go deleted file mode 100644 index 8d14dd31232..00000000000 --- a/tsdb/engine/tsm1/scheduler_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package tsm1 - -import "testing" - -func TestScheduler_Runnable_Empty(t *testing.T) { - s := newScheduler(&compactionCounter{}, 1) - - for i := 1; i < 5; i++ { - s.setDepth(i, 1) - level, runnable := s.next() - if exp, got := true, runnable; exp != got { - t.Fatalf("runnable(%d) mismatch: exp %v, got %v ", i, exp, got) - } - - if exp, got := i, level; exp != got { - t.Fatalf("runnable(%d) mismatch: exp %v, got %v ", i, exp, got) - } - s.setDepth(i, 0) - } -} - -func TestScheduler_Runnable_MaxConcurrency(t *testing.T) { - s := newScheduler(&compactionCounter{}, 1) - - // level 1 - s.activeCompactions = &compactionCounter{} - s.activeCompactions.l1 = 1 - for i := 0; i <= 4; i++ { - _, runnable := s.next() - if exp, got := false, runnable; exp != got { - t.Fatalf("runnable mismatch: exp %v, got %v ", exp, got) - } - } - - // level 2 - s.activeCompactions = &compactionCounter{} - s.activeCompactions.l2 = 1 - for i := 0; i <= 4; i++ { - _, runnable := s.next() - if exp, got := false, runnable; exp != got { - t.Fatalf("runnable mismatch: exp %v, got %v ", exp, got) - } - } - - // level 3 - s.activeCompactions = &compactionCounter{} - s.activeCompactions.l3 = 1 - for i := 0; i <= 4; i++ { - _, runnable := s.next() - if exp, got := false, runnable; exp != got { - t.Fatalf("runnable mismatch: exp %v, got %v ", exp, got) - } - } - - // optimize - s.activeCompactions = &compactionCounter{} - s.activeCompactions.optimize++ - for i := 0; i <= 4; i++ { - _, runnable := s.next() - if exp, got := false, runnable; exp != got { - t.Fatalf("runnable mismatch: exp %v, got %v ", exp, got) - } - } - - // full - s.activeCompactions = &compactionCounter{} - s.activeCompactions.full++ - for i := 0; i <= 4; i++ { - _, runnable := s.next() - if exp, got := false, runnable; exp != got { - t.Fatalf("runnable mismatch: exp %v, got %v ", exp, got) - } - } -} diff --git a/tsdb/engine/tsm1/string.go b/tsdb/engine/tsm1/string.go deleted file mode 100644 index fe6b5e9b20c..00000000000 --- a/tsdb/engine/tsm1/string.go +++ /dev/null @@ -1,129 +0,0 @@ -package tsm1 - -// String encoding uses snappy compression to compress each string. Each string is -// appended to byte slice prefixed with a variable byte length followed by the string -// bytes. The bytes are compressed using snappy compressor and a 1 byte header is used -// to indicate the type of encoding. - -import ( - "encoding/binary" - "fmt" - - "github.com/golang/snappy" -) - -// Note: an uncompressed format is not yet implemented. - -// stringCompressedSnappy is a compressed encoding using Snappy compression -const stringCompressedSnappy = 1 - -// StringEncoder encodes multiple strings into a byte slice. -type StringEncoder struct { - // The encoded bytes - bytes []byte -} - -// NewStringEncoder returns a new StringEncoder with an initial buffer ready to hold sz bytes. -func NewStringEncoder(sz int) StringEncoder { - return StringEncoder{ - bytes: make([]byte, 0, sz), - } -} - -// Flush is no-op -func (e *StringEncoder) Flush() {} - -// Reset sets the encoder back to its initial state. -func (e *StringEncoder) Reset() { - e.bytes = e.bytes[:0] -} - -// Write encodes s to the underlying buffer. -func (e *StringEncoder) Write(s string) { - b := make([]byte, 10) - // Append the length of the string using variable byte encoding - i := binary.PutUvarint(b, uint64(len(s))) - e.bytes = append(e.bytes, b[:i]...) - - // Append the string bytes - e.bytes = append(e.bytes, s...) -} - -// Bytes returns a copy of the underlying buffer. -func (e *StringEncoder) Bytes() ([]byte, error) { - // Compress the currently appended bytes using snappy and prefix with - // a 1 byte header for future extension - data := snappy.Encode(nil, e.bytes) - return append([]byte{stringCompressedSnappy << 4}, data...), nil -} - -// StringDecoder decodes a byte slice into strings. -type StringDecoder struct { - b []byte - l int - i int - err error -} - -// SetBytes initializes the decoder with bytes to read from. -// This must be called before calling any other method. -func (e *StringDecoder) SetBytes(b []byte) error { - // First byte stores the encoding type, only have snappy format - // currently so ignore for now. - var data []byte - if len(b) > 0 { - var err error - data, err = snappy.Decode(nil, b[1:]) - if err != nil { - return fmt.Errorf("failed to decode string block: %v", err.Error()) - } - } - - e.b = data - e.l = 0 - e.i = 0 - e.err = nil - - return nil -} - -// Next returns true if there are any values remaining to be decoded. -func (e *StringDecoder) Next() bool { - if e.err != nil { - return false - } - - e.i += e.l - return e.i < len(e.b) -} - -// Read returns the next value from the decoder. -func (e *StringDecoder) Read() string { - // Read the length of the string - length, n := binary.Uvarint(e.b[e.i:]) - if n <= 0 { - e.err = fmt.Errorf("StringDecoder: invalid encoded string length") - return "" - } - - // The length of this string plus the length of the variable byte encoded length - e.l = int(length) + n - - lower := e.i + n - upper := lower + int(length) - if upper < lower { - e.err = fmt.Errorf("StringDecoder: length overflow") - return "" - } - if upper > len(e.b) { - e.err = fmt.Errorf("StringDecoder: not enough data to represent encoded string") - return "" - } - - return string(e.b[lower:upper]) -} - -// Error returns the last error encountered by the decoder. -func (e *StringDecoder) Error() error { - return e.err -} diff --git a/tsdb/engine/tsm1/string_test.go b/tsdb/engine/tsm1/string_test.go deleted file mode 100644 index 2a50aaa0f27..00000000000 --- a/tsdb/engine/tsm1/string_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package tsm1 - -import ( - "fmt" - "reflect" - "testing" - "testing/quick" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/internal/testutil" -) - -func Test_StringEncoder_NoValues(t *testing.T) { - enc := NewStringEncoder(1024) - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var dec StringDecoder - if err := dec.SetBytes(b); err != nil { - t.Fatalf("unexpected error creating string decoder: %v", err) - } - if dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } -} - -func Test_StringEncoder_Single(t *testing.T) { - enc := NewStringEncoder(1024) - v1 := "v1" - enc.Write(v1) - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var dec StringDecoder - if dec.SetBytes(b); err != nil { - t.Fatalf("unexpected error creating string decoder: %v", err) - } - if !dec.Next() { - t.Fatalf("unexpected next value: got false, exp true") - } - - if v1 != dec.Read() { - t.Fatalf("unexpected value: got %v, exp %v", dec.Read(), v1) - } -} - -func Test_StringEncoder_Multi_Compressed(t *testing.T) { - enc := NewStringEncoder(1024) - - values := make([]string, 10) - for i := range values { - values[i] = fmt.Sprintf("value %d", i) - enc.Write(values[i]) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if b[0]>>4 != stringCompressedSnappy { - t.Fatalf("unexpected encoding: got %v, exp %v", b[0], stringCompressedSnappy) - } - - if exp := 51; len(b) != exp { - t.Fatalf("unexpected length: got %v, exp %v", len(b), exp) - } - - var dec StringDecoder - if err := dec.SetBytes(b); err != nil { - t.Fatalf("unexpected erorr creating string decoder: %v", err) - } - - for i, v := range values { - if !dec.Next() { - t.Fatalf("unexpected next value: got false, exp true") - } - if v != dec.Read() { - t.Fatalf("unexpected value at pos %d: got %v, exp %v", i, dec.Read(), v) - } - } - - if dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } -} - -func Test_StringEncoder_Quick(t *testing.T) { - quick.Check(func(values []string) bool { - if values == nil { - values = []string{} - } - // Write values to encoder. - enc := NewStringEncoder(1024) - for _, v := range values { - enc.Write(v) - } - - // Retrieve encoded bytes from encoder. - buf, err := enc.Bytes() - if err != nil { - t.Fatal(err) - } - - // Read values out of decoder. - got := make([]string, 0, len(values)) - var dec StringDecoder - if err := dec.SetBytes(buf); err != nil { - t.Fatal(err) - } - for dec.Next() { - if err := dec.Error(); err != nil { - t.Fatal(err) - } - got = append(got, dec.Read()) - } - - // Verify that input and output values match. - if !reflect.DeepEqual(values, got) { - t.Fatalf("mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", values, got) - } - - return true - }, nil) -} - -func Test_StringDecoder_Empty(t *testing.T) { - var dec StringDecoder - if err := dec.SetBytes([]byte{}); err != nil { - t.Fatal(err) - } - - if dec.Next() { - t.Fatalf("exp Next() == false, got true") - } -} - -func Test_StringDecoder_CorruptRead(t *testing.T) { - cases := []string{ - "\x10\x03\b\x03Hi", // Higher length than actual data - "\x10\x1dp\x9c\x90\x90\x90\x90\x90\x90\x90\x90\x90length overflow----", - } - - for _, c := range cases { - var dec StringDecoder - if err := dec.SetBytes([]byte(c)); err != nil { - t.Fatal(err) - } - - if !dec.Next() { - t.Fatalf("exp Next() to return true, got false") - } - - _ = dec.Read() - if dec.Error() == nil { - t.Fatalf("exp an err, got nil: %q", c) - } - } -} - -func Test_StringDecoder_CorruptSetBytes(t *testing.T) { - cases := []string{ - "0t\x00\x01\x000\x00\x01\x000\x00\x01\x000\x00\x01\x000\x00\x01" + - "\x000\x00\x01\x000\x00\x01\x000\x00\x00\x00\xff:\x01\x00\x01\x00\x01" + - "\x00\x01\x00\x01\x00\x01\x00\x010\x010\x000\x010\x010\x010\x01" + - "0\x010\x010\x010\x010\x010\x010\x010\x010\x010\x010", // Upper slice bounds overflows negative - } - - for _, c := range cases { - var dec StringDecoder - if err := dec.SetBytes([]byte(c)); err == nil { - t.Fatalf("exp an err, got nil: %q", c) - } - } -} - -func BenchmarkStringDecoder_DecodeAll(b *testing.B) { - benchmarks := []struct { - n int - w int - }{ - {1, 10}, - {55, 10}, - {550, 10}, - {1000, 10}, - } - for _, bm := range benchmarks { - s := NewStringEncoder(bm.n) - for c := 0; c < bm.n; c++ { - s.Write(testutil.MakeSentence(bm.w)) - } - s.Flush() - bytes, err := s.Bytes() - if err != nil { - b.Fatalf("unexpected error: %v", err) - } - - b.Run(fmt.Sprintf("%d", bm.n), func(b *testing.B) { - b.SetBytes(int64(len(bytes))) - b.ReportAllocs() - - dst := make([]string, bm.n) - for i := 0; i < b.N; i++ { - var it StringDecoder - if err := it.SetBytes(bytes); err != nil { - b.Fatalf("unexpected error creating float decoder: %v", err) - } - - i := 0 - for it.Next() { - dst[i] = it.Read() - i++ - } - - if len(dst) != bm.n { - b.Fatalf("unexpected length -got/+exp\n%s", cmp.Diff(len(dst), bm.n)) - } - } - }) - } -} diff --git a/tsdb/engine/tsm1/timestamp.go b/tsdb/engine/tsm1/timestamp.go deleted file mode 100644 index 8856d931e17..00000000000 --- a/tsdb/engine/tsm1/timestamp.go +++ /dev/null @@ -1,414 +0,0 @@ -package tsm1 - -// Timestamp encoding is adaptive and based on structure of the timestamps that are encoded. It -// uses a combination of delta encoding, scaling and compression using simple8b, run length encoding -// as well as falling back to no compression if needed. -// -// Timestamp values to be encoded should be sorted before encoding. When encoded, the values are -// first delta-encoded. The first value is the starting timestamp, subsequent values are the difference -// from the prior value. -// -// Timestamp resolution can also be in the nanosecond. Many timestamps are monotonically increasing -// and fall on even boundaries of time such as every 10s. When the timestamps have this structure, -// they are scaled by the largest common divisor that is also a factor of 10. This has the effect -// of converting very large integer deltas into very small one that can be reversed by multiplying them -// by the scaling factor. -// -// Using these adjusted values, if all the deltas are the same, the time range is stored using run -// length encoding. If run length encoding is not possible and all values are less than 1 << 60 - 1 -// (~36.5 yrs in nanosecond resolution), then the timestamps are encoded using simple8b encoding. If -// any value exceeds the maximum values, the deltas are stored uncompressed using 8b each. -// -// Each compressed byte slice has a 1 byte header indicating the compression type. The 4 high bits -// indicate the encoding type. The 4 low bits are used by the encoding type. -// -// For run-length encoding, the 4 low bits store the log10 of the scaling factor. The next 8 bytes are -// the starting timestamp, next 1-10 bytes is the delta value using variable-length encoding, finally the -// next 1-10 bytes is the count of values. -// -// For simple8b encoding, the 4 low bits store the log10 of the scaling factor. The next 8 bytes is the -// first delta value stored uncompressed, the remaining bytes are 64bit words containing compressed delta -// values. -// -// For uncompressed encoding, the delta values are stored using 8 bytes each. - -import ( - "encoding/binary" - "fmt" - "math" - - "github.com/jwilder/encoding/simple8b" -) - -const ( - // timeUncompressed is a an uncompressed format using 8 bytes per timestamp - timeUncompressed = 0 - // timeCompressedPackedSimple is a bit-packed format using simple8b encoding - timeCompressedPackedSimple = 1 - // timeCompressedRLE is a run-length encoding format - timeCompressedRLE = 2 -) - -// TimeEncoder encodes time.Time to byte slices. -type TimeEncoder interface { - Write(t int64) - Bytes() ([]byte, error) - Reset() -} - -type encoder struct { - ts []uint64 - bytes []byte - enc *simple8b.Encoder -} - -// NewTimeEncoder returns a TimeEncoder with an initial buffer ready to hold sz bytes. -func NewTimeEncoder(sz int) TimeEncoder { - return &encoder{ - ts: make([]uint64, 0, sz), - enc: simple8b.NewEncoder(), - } -} - -// Reset sets the encoder back to its initial state. -func (e *encoder) Reset() { - e.ts = e.ts[:0] - e.bytes = e.bytes[:0] - e.enc.Reset() -} - -// Write adds a timestamp to the compressed stream. -func (e *encoder) Write(t int64) { - e.ts = append(e.ts, uint64(t)) -} - -func (e *encoder) reduce() (max, divisor uint64, rle bool, deltas []uint64) { - // Compute the deltas in place to avoid allocating another slice - deltas = e.ts - // Starting values for a max and divisor - max, divisor = 0, 1e12 - - // Indicates whether the deltas can be run-length encoded - rle = true - - // Iterate in reverse so we can apply deltas in place - for i := len(deltas) - 1; i > 0; i-- { - - // First differential encode the values - deltas[i] = deltas[i] - deltas[i-1] - - // We also need to keep track of the max value and largest common divisor - v := deltas[i] - - if v > max { - max = v - } - - // If our value is divisible by 10, break. Otherwise, try the next smallest divisor. - for divisor > 1 && v%divisor != 0 { - divisor /= 10 - } - - // Skip the first value || see if prev = curr. The deltas can be RLE if the are all equal. - rle = i == len(deltas)-1 || rle && (deltas[i+1] == deltas[i]) - } - return -} - -// Bytes returns the encoded bytes of all written times. -func (e *encoder) Bytes() ([]byte, error) { - if len(e.ts) == 0 { - return e.bytes[:0], nil - } - - // Maximum and largest common divisor. rle is true if dts (the delta timestamps), - // are all the same. - max, div, rle, dts := e.reduce() - - // The deltas are all the same, so we can run-length encode them - if rle && len(e.ts) > 1 { - return e.encodeRLE(e.ts[0], e.ts[1], div, len(e.ts)) - } - - // We can't compress this time-range, the deltas exceed 1 << 60 - if max > simple8b.MaxValue { - return e.encodeRaw() - } - - return e.encodePacked(div, dts) -} - -func (e *encoder) encodePacked(div uint64, dts []uint64) ([]byte, error) { - // Only apply the divisor if it's greater than 1 since division is expensive. - if div > 1 { - for _, v := range dts[1:] { - if err := e.enc.Write(v / div); err != nil { - return nil, err - } - } - } else { - for _, v := range dts[1:] { - if err := e.enc.Write(v); err != nil { - return nil, err - } - } - } - - // The compressed deltas - deltas, err := e.enc.Bytes() - if err != nil { - return nil, err - } - - sz := 8 + 1 + len(deltas) - if cap(e.bytes) < sz { - e.bytes = make([]byte, sz) - } - b := e.bytes[:sz] - - // 4 high bits used for the encoding type - b[0] = byte(timeCompressedPackedSimple) << 4 - // 4 low bits are the log10 divisor - b[0] |= byte(math.Log10(float64(div))) - - // The first delta value - binary.BigEndian.PutUint64(b[1:9], uint64(dts[0])) - - copy(b[9:], deltas) - return b[:9+len(deltas)], nil -} - -func (e *encoder) encodeRaw() ([]byte, error) { - sz := 1 + len(e.ts)*8 - if cap(e.bytes) < sz { - e.bytes = make([]byte, sz) - } - b := e.bytes[:sz] - b[0] = byte(timeUncompressed) << 4 - for i, v := range e.ts { - binary.BigEndian.PutUint64(b[1+i*8:1+i*8+8], uint64(v)) - } - return b, nil -} - -func (e *encoder) encodeRLE(first, delta, div uint64, n int) ([]byte, error) { - // Large varints can take up to 10 bytes, we're encoding 3 + 1 byte type - sz := 31 - if cap(e.bytes) < sz { - e.bytes = make([]byte, sz) - } - b := e.bytes[:sz] - // 4 high bits used for the encoding type - b[0] = byte(timeCompressedRLE) << 4 - // 4 low bits are the log10 divisor - b[0] |= byte(math.Log10(float64(div))) - - i := 1 - // The first timestamp - binary.BigEndian.PutUint64(b[i:], uint64(first)) - i += 8 - // The first delta - i += binary.PutUvarint(b[i:], uint64(delta/div)) - // The number of times the delta is repeated - i += binary.PutUvarint(b[i:], uint64(n)) - - return b[:i], nil -} - -// TimeDecoder decodes a byte slice into timestamps. -type TimeDecoder struct { - v int64 - i, n int - ts []uint64 - dec simple8b.Decoder - err error - - // The delta value for a run-length encoded byte slice - rleDelta int64 - - encoding byte -} - -// Init initializes the decoder with bytes to read from. -func (d *TimeDecoder) Init(b []byte) { - d.v = 0 - d.i = 0 - d.ts = d.ts[:0] - d.err = nil - if len(b) > 0 { - // Encoding type is stored in the 4 high bits of the first byte - d.encoding = b[0] >> 4 - } - d.decode(b) -} - -// Next returns true if there are any timestamps remaining to be decoded. -func (d *TimeDecoder) Next() bool { - if d.err != nil { - return false - } - - if d.encoding == timeCompressedRLE { - if d.i >= d.n { - return false - } - d.i++ - d.v += d.rleDelta - return d.i < d.n - } - - if d.i >= len(d.ts) { - return false - } - d.v = int64(d.ts[d.i]) - d.i++ - return true -} - -// Read returns the next timestamp from the decoder. -func (d *TimeDecoder) Read() int64 { - return d.v -} - -// Error returns the last error encountered by the decoder. -func (d *TimeDecoder) Error() error { - return d.err -} - -func (d *TimeDecoder) decode(b []byte) { - if len(b) == 0 { - return - } - - switch d.encoding { - case timeUncompressed: - d.decodeRaw(b[1:]) - case timeCompressedRLE: - d.decodeRLE(b) - case timeCompressedPackedSimple: - d.decodePacked(b) - default: - d.err = fmt.Errorf("unknown encoding: %v", d.encoding) - } -} - -func (d *TimeDecoder) decodePacked(b []byte) { - if len(b) < 9 { - d.err = fmt.Errorf("TimeDecoder: not enough data to decode packed timestamps") - return - } - div := uint64(math.Pow10(int(b[0] & 0xF))) - first := uint64(binary.BigEndian.Uint64(b[1:9])) - - d.dec.SetBytes(b[9:]) - - d.i = 0 - deltas := d.ts[:0] - deltas = append(deltas, first) - - for d.dec.Next() { - deltas = append(deltas, d.dec.Read()) - } - - // Compute the prefix sum and scale the deltas back up - last := deltas[0] - if div > 1 { - for i := 1; i < len(deltas); i++ { - dgap := deltas[i] * div - deltas[i] = last + dgap - last = deltas[i] - } - } else { - for i := 1; i < len(deltas); i++ { - deltas[i] += last - last = deltas[i] - } - } - - d.i = 0 - d.ts = deltas -} - -func (d *TimeDecoder) decodeRLE(b []byte) { - if len(b) < 9 { - d.err = fmt.Errorf("TimeDecoder: not enough data for initial RLE timestamp") - return - } - - var i, n int - - // Lower 4 bits hold the 10 based exponent so we can scale the values back up - mod := int64(math.Pow10(int(b[i] & 0xF))) - i++ - - // Next 8 bytes is the starting timestamp - first := binary.BigEndian.Uint64(b[i : i+8]) - i += 8 - - // Next 1-10 bytes is our (scaled down by factor of 10) run length values - value, n := binary.Uvarint(b[i:]) - if n <= 0 { - d.err = fmt.Errorf("TimeDecoder: invalid run length in decodeRLE") - return - } - - // Scale the value back up - value *= uint64(mod) - i += n - - // Last 1-10 bytes is how many times the value repeats - count, n := binary.Uvarint(b[i:]) - if n <= 0 { - d.err = fmt.Errorf("TimeDecoder: invalid repeat value in decodeRLE") - return - } - - d.v = int64(first - value) - d.rleDelta = int64(value) - - d.i = -1 - d.n = int(count) -} - -func (d *TimeDecoder) decodeRaw(b []byte) { - d.i = 0 - d.ts = make([]uint64, len(b)/8) - for i := range d.ts { - d.ts[i] = binary.BigEndian.Uint64(b[i*8 : i*8+8]) - - delta := d.ts[i] - // Compute the prefix sum and scale the deltas back up - if i > 0 { - d.ts[i] = d.ts[i-1] + delta - } - } -} - -func CountTimestamps(b []byte) int { - if len(b) == 0 { - return 0 - } - - // Encoding type is stored in the 4 high bits of the first byte - encoding := b[0] >> 4 - switch encoding { - case timeUncompressed: - // Uncompressed timestamps are just 8 bytes each - return len(b[1:]) / 8 - case timeCompressedRLE: - // First 9 bytes are the starting timestamp and scaling factor, skip over them - i := 9 - // Next 1-10 bytes is our (scaled down by factor of 10) run length values - _, n := binary.Uvarint(b[9:]) - i += n - // Last 1-10 bytes is how many times the value repeats - count, _ := binary.Uvarint(b[i:]) - return int(count) - case timeCompressedPackedSimple: - // First 9 bytes are the starting timestamp and scaling factor, skip over them - count, _ := simple8b.CountBytes(b[9:]) - return count + 1 // +1 is for the first uncompressed timestamp, starting timestamp in b[1:9] - default: - return 0 - } -} diff --git a/tsdb/engine/tsm1/timestamp_test.go b/tsdb/engine/tsm1/timestamp_test.go deleted file mode 100644 index 987a742572b..00000000000 --- a/tsdb/engine/tsm1/timestamp_test.go +++ /dev/null @@ -1,726 +0,0 @@ -package tsm1 - -import ( - "fmt" - "math/rand" - "reflect" - "testing" - "testing/quick" - "time" -) - -func Test_TimeEncoder(t *testing.T) { - enc := NewTimeEncoder(1) - - x := []int64{} - now := time.Unix(0, 0) - x = append(x, now.UnixNano()) - enc.Write(now.UnixNano()) - for i := 1; i < 4; i++ { - x = append(x, now.Add(time.Duration(i)*time.Second).UnixNano()) - enc.Write(x[i]) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeCompressedRLE { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - var dec TimeDecoder - dec.Init(b) - for i, v := range x { - if !dec.Next() { - t.Fatalf("Next == false, expected true") - } - - if v != dec.Read() { - t.Fatalf("Item %d mismatch, got %v, exp %v", i, dec.Read(), v) - } - } -} - -func Test_TimeEncoder_NoValues(t *testing.T) { - enc := NewTimeEncoder(0) - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var dec TimeDecoder - dec.Init(b) - if dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } -} - -func Test_TimeEncoder_One(t *testing.T) { - enc := NewTimeEncoder(1) - var tm int64 - - enc.Write(tm) - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeCompressedPackedSimple { - t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got) - } - - var dec TimeDecoder - dec.Init(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if tm != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), tm) - } -} - -func Test_TimeEncoder_Two(t *testing.T) { - enc := NewTimeEncoder(2) - t1 := int64(0) - t2 := int64(1) - enc.Write(t1) - enc.Write(t2) - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeCompressedRLE { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - var dec TimeDecoder - dec.Init(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if t1 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), t1) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if t2 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), t2) - } -} - -func Test_TimeEncoder_Three(t *testing.T) { - enc := NewTimeEncoder(3) - t1 := int64(0) - t2 := int64(1) - t3 := int64(3) - - enc.Write(t1) - enc.Write(t2) - enc.Write(t3) - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeCompressedPackedSimple { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - var dec TimeDecoder - dec.Init(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if t1 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), t1) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if t2 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), t2) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if t3 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), t3) - } -} - -func Test_TimeEncoder_Large_Range(t *testing.T) { - enc := NewTimeEncoder(2) - t1 := int64(1442369134000000000) - t2 := int64(1442369135000000000) - enc.Write(t1) - enc.Write(t2) - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeCompressedRLE { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - var dec TimeDecoder - dec.Init(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if t1 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), t1) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if t2 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), t2) - } -} - -func Test_TimeEncoder_Uncompressed(t *testing.T) { - enc := NewTimeEncoder(3) - t1 := time.Unix(0, 0).UnixNano() - t2 := time.Unix(1, 0).UnixNano() - - // about 36.5yrs in NS resolution is max range for compressed format - // This should cause the encoding to fallback to raw points - t3 := time.Unix(2, (2 << 59)).UnixNano() - enc.Write(t1) - enc.Write(t2) - enc.Write(t3) - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("expected error: %v", err) - } - - if exp := 25; len(b) != exp { - t.Fatalf("length mismatch: got %v, exp %v", len(b), exp) - } - - if got := b[0] >> 4; got != timeUncompressed { - t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got) - } - - var dec TimeDecoder - dec.Init(b) - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if t1 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), t1) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if t2 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), t2) - } - - if !dec.Next() { - t.Fatalf("unexpected next value: got true, exp false") - } - - if t3 != dec.Read() { - t.Fatalf("read value mismatch: got %v, exp %v", dec.Read(), t3) - } -} - -func Test_TimeEncoder_RLE(t *testing.T) { - enc := NewTimeEncoder(512) - var ts []int64 - for i := 0; i < 500; i++ { - ts = append(ts, int64(i)) - } - - for _, v := range ts { - enc.Write(v) - } - - b, err := enc.Bytes() - if exp := 12; len(b) != exp { - t.Fatalf("length mismatch: got %v, exp %v", len(b), exp) - } - - if got := b[0] >> 4; got != timeCompressedRLE { - t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got) - } - - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var dec TimeDecoder - dec.Init(b) - for i, v := range ts { - if !dec.Next() { - t.Fatalf("Next == false, expected true") - } - - if v != dec.Read() { - t.Fatalf("Item %d mismatch, got %v, exp %v", i, dec.Read(), v) - } - } - - if dec.Next() { - t.Fatalf("unexpected extra values") - } -} - -func Test_TimeEncoder_Reverse(t *testing.T) { - enc := NewTimeEncoder(3) - ts := []int64{ - int64(3), - int64(2), - int64(0), - } - - for _, v := range ts { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeUncompressed { - t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got) - } - - var dec TimeDecoder - dec.Init(b) - i := 0 - for dec.Next() { - if ts[i] != dec.Read() { - t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), ts[i]) - } - i++ - } -} - -func Test_TimeEncoder_220SecondDelta(t *testing.T) { - enc := NewTimeEncoder(256) - var ts []int64 - now := time.Now() - for i := 0; i < 220; i++ { - ts = append(ts, now.Add(time.Duration(i*60)*time.Second).UnixNano()) - } - - for _, v := range ts { - enc.Write(v) - } - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - // Using RLE, should get 12 bytes - if exp := 12; len(b) != exp { - t.Fatalf("unexpected length: got %v, exp %v", len(b), exp) - } - - if got := b[0] >> 4; got != timeCompressedRLE { - t.Fatalf("Wrong encoding used: expected uncompressed, got %v", got) - } - - var dec TimeDecoder - dec.Init(b) - i := 0 - for dec.Next() { - if ts[i] != dec.Read() { - t.Fatalf("read value %d mismatch: got %v, exp %v", i, dec.Read(), ts[i]) - } - i++ - } - - if i != len(ts) { - t.Fatalf("Read too few values: exp %d, got %d", len(ts), i) - } - - if dec.Next() { - t.Fatalf("expecte Next() = false, got true") - } -} - -func Test_TimeEncoder_Quick(t *testing.T) { - quick.Check(func(values []int64) bool { - // Write values to encoder. - enc := NewTimeEncoder(1024) - exp := make([]int64, len(values)) - for i, v := range values { - exp[i] = int64(v) - enc.Write(exp[i]) - } - - // Retrieve encoded bytes from encoder. - buf, err := enc.Bytes() - if err != nil { - t.Fatal(err) - } - - // Read values out of decoder. - got := make([]int64, 0, len(values)) - var dec TimeDecoder - dec.Init(buf) - for dec.Next() { - if err := dec.Error(); err != nil { - t.Fatal(err) - } - got = append(got, dec.Read()) - } - - // Verify that input and output values match. - if !reflect.DeepEqual(exp, got) { - t.Fatalf("mismatch:\n\nexp=%+v\n\ngot=%+v\n\n", exp, got) - } - - return true - }, nil) -} - -func Test_TimeEncoder_RLESeconds(t *testing.T) { - enc := NewTimeEncoder(6) - ts := make([]int64, 6) - - ts[0] = int64(1444448158000000000) - ts[1] = int64(1444448168000000000) - ts[2] = int64(1444448178000000000) - ts[3] = int64(1444448188000000000) - ts[4] = int64(1444448198000000000) - ts[5] = int64(1444448208000000000) - - for _, v := range ts { - enc.Write(v) - } - - b, err := enc.Bytes() - if got := b[0] >> 4; got != timeCompressedRLE { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - var dec TimeDecoder - dec.Init(b) - for i, v := range ts { - if !dec.Next() { - t.Fatalf("Next == false, expected true") - } - - if v != dec.Read() { - t.Fatalf("Item %d mismatch, got %v, exp %v", i, dec.Read(), v) - } - } - - if dec.Next() { - t.Fatalf("unexpected extra values") - } -} - -func TestTimeEncoder_Count_Uncompressed(t *testing.T) { - enc := NewTimeEncoder(2) - t1 := time.Unix(0, 0).UnixNano() - t2 := time.Unix(1, 0).UnixNano() - - // about 36.5yrs in NS resolution is max range for compressed format - // This should cause the encoding to fallback to raw points - t3 := time.Unix(2, (2 << 59)).UnixNano() - enc.Write(t1) - enc.Write(t2) - enc.Write(t3) - - b, err := enc.Bytes() - if got := b[0] >> 4; got != timeUncompressed { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got, exp := CountTimestamps(b), 3; got != exp { - t.Fatalf("count mismatch: got %v, exp %v", got, exp) - } -} - -func TestTimeEncoder_Count_RLE(t *testing.T) { - enc := NewTimeEncoder(5) - ts := make([]int64, 6) - - ts[0] = int64(1444448158000000000) - ts[1] = int64(1444448168000000000) - ts[2] = int64(1444448178000000000) - ts[3] = int64(1444448188000000000) - ts[4] = int64(1444448198000000000) - ts[5] = int64(1444448208000000000) - - for _, v := range ts { - enc.Write(v) - } - - b, err := enc.Bytes() - if got := b[0] >> 4; got != timeCompressedRLE { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got, exp := CountTimestamps(b), len(ts); got != exp { - t.Fatalf("count mismatch: got %v, exp %v", got, exp) - } -} - -func TestTimeEncoder_Count_Simple8(t *testing.T) { - enc := NewTimeEncoder(3) - t1 := int64(0) - t2 := int64(1) - t3 := int64(3) - - enc.Write(t1) - enc.Write(t2) - enc.Write(t3) - - b, err := enc.Bytes() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got := b[0] >> 4; got != timeCompressedPackedSimple { - t.Fatalf("Wrong encoding used: expected rle, got %v", got) - } - - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if got, exp := CountTimestamps(b), 3; got != exp { - t.Fatalf("count mismatch: got %v, exp %v", got, exp) - } -} - -func TestTimeDecoder_Corrupt(t *testing.T) { - cases := []string{ - "", // Empty - "\x10\x14", // Packed: not enough data - "\x20\x00", // RLE: not enough data for starting timestamp - "\x2012345678\x90", // RLE: initial timestamp but invalid uvarint encoding - "\x2012345678\x7f", // RLE: timestamp, RLE but invalid repeat - "\x00123", // Raw: data length not multiple of 8 - } - - for _, c := range cases { - var dec TimeDecoder - dec.Init([]byte(c)) - if dec.Next() { - t.Fatalf("exp next == false, got true") - } - } -} - -func BenchmarkTimeEncoder(b *testing.B) { - enc := NewTimeEncoder(1024) - x := make([]int64, 1024) - for i := 0; i < len(x); i++ { - x[i] = time.Now().UnixNano() - enc.Write(x[i]) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - enc.Bytes() - enc.Reset() - for i := 0; i < len(x); i++ { - enc.Write(x[i]) - } - } -} - -func BenchmarkTimeDecoder_Packed(b *testing.B) { - x := make([]int64, 1024) - enc := NewTimeEncoder(1024) - for i := 0; i < len(x); i++ { - x[i] = time.Now().UnixNano() - enc.Write(x[i]) - } - bytes, _ := enc.Bytes() - - b.ResetTimer() - - var dec TimeDecoder - for i := 0; i < b.N; i++ { - dec.Init(bytes) - for dec.Next() { - } - } -} - -func BenchmarkTimeDecoder_RLE(b *testing.B) { - x := make([]int64, 1024) - enc := NewTimeEncoder(1024) - for i := 0; i < len(x); i++ { - x[i] = int64(i * 10) - enc.Write(x[i]) - } - bytes, _ := enc.Bytes() - - b.ResetTimer() - - b.StopTimer() - var dec TimeDecoder - b.StartTimer() - - for i := 0; i < b.N; i++ { - dec.Init(bytes) - for dec.Next() { - } - } -} - -func BenchmarkTimeBatch_DecodeAllUncompressed(b *testing.B) { - benchmarks := []int{ - 5, - 55, - 555, - 1000, - } - - values := []int64{ - -2352281900722994752, 1438442655375607923, -4110452567888190110, - -1221292455668011702, -1941700286034261841, -2836753127140407751, - 1432686216250034552, 3663244026151507025, -3068113732684750258, - -1949953187327444488, 3713374280993588804, 3226153669854871355, - -2093273755080502606, 1006087192578600616, -2272122301622271655, - 2533238229511593671, -4450454445568858273, 2647789901083530435, - 2761419461769776844, -1324397441074946198, -680758138988210958, - 94468846694902125, -2394093124890745254, -2682139311758778198, - } - - for _, size := range benchmarks { - seededRand := rand.New(rand.NewSource(int64(size * 1e3))) - - enc := NewTimeEncoder(size) - for i := 0; i < size; i++ { - enc.Write(values[seededRand.Int()%len(values)]) - } - bytes, _ := enc.Bytes() - - b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { - b.SetBytes(int64(len(bytes))) - b.ReportAllocs() - - dst := make([]int64, size) - for i := 0; i < b.N; i++ { - var dec TimeDecoder - dec.Init(bytes) - var n int - for dec.Next() { - dst[n] = dec.Read() - n++ - } - } - }) - } -} - -func BenchmarkTimeBatch_DecodeAllPackedSimple(b *testing.B) { - benchmarks := []struct { - n int - }{ - {5}, - {55}, - {555}, - {1000}, - } - for _, bm := range benchmarks { - seededRand := rand.New(rand.NewSource(int64(bm.n * 1e3))) - - enc := NewTimeEncoder(bm.n) - for i := 0; i < bm.n; i++ { - // Small amount of randomness prevents RLE from being used - enc.Write(int64(i*1000) + int64(seededRand.Intn(10))) - } - bytes, _ := enc.Bytes() - - b.Run(fmt.Sprintf("%d", bm.n), func(b *testing.B) { - b.SetBytes(int64(len(bytes))) - b.ReportAllocs() - - dst := make([]int64, bm.n) - for i := 0; i < b.N; i++ { - var dec TimeDecoder - dec.Init(bytes) - var n int - for dec.Next() { - dst[n] = dec.Read() - n++ - } - } - }) - } -} - -func BenchmarkTimeBatch_DecodeAllRLE(b *testing.B) { - benchmarks := []struct { - n int - delta int64 - }{ - {5, 10}, - {55, 10}, - {555, 10}, - {1000, 10}, - } - for _, bm := range benchmarks { - enc := NewTimeEncoder(bm.n) - acc := int64(0) - for i := 0; i < bm.n; i++ { - enc.Write(acc) - acc += bm.delta - } - bytes, _ := enc.Bytes() - - b.Run(fmt.Sprintf("%d_delta_%d", bm.n, bm.delta), func(b *testing.B) { - b.SetBytes(int64(len(bytes))) - b.ReportAllocs() - - dst := make([]int64, bm.n) - for i := 0; i < b.N; i++ { - var dec TimeDecoder - dec.Init(bytes) - var n int - for dec.Next() { - dst[n] = dec.Read() - n++ - } - } - }) - } -} diff --git a/tsdb/engine/tsm1/tombstone.go b/tsdb/engine/tsm1/tombstone.go deleted file mode 100644 index 8d22755c04a..00000000000 --- a/tsdb/engine/tsm1/tombstone.go +++ /dev/null @@ -1,730 +0,0 @@ -package tsm1 - -import ( - "bufio" - "compress/gzip" - "encoding/binary" - "errors" - "fmt" - "io" - "math" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/influxdata/influxdb/v2/pkg/file" - "github.com/influxdata/influxdb/v2/tsdb" -) - -const TombstoneFileExtension = "tombstone" -const ( - headerSize = 4 - v2header = 0x1502 - v3header = 0x1503 - v4header = 0x1504 -) - -var errIncompatibleVersion = errors.New("incompatible v4 version") - -// Tombstoner records tombstones when entries are deleted. -type Tombstoner struct { - mu sync.RWMutex - - // Path is the location of the file to record tombstone. This should be the - // full path to a TSM file. - Path string - - FilterFn func(k []byte) bool - - // Tombstones that have been written but not flushed to disk yet. - tombstones []Tombstone - // cache of the stats for this tombstone - tombstoneStats TombstoneStat - // indicates that the stats may be out of sync with what is on disk and they - // should be refreshed. - statsLoaded bool - - // These are references used for pending writes that have not been committed. If - // these are nil, then no pending writes are in progress. - gz *gzip.Writer - bw *bufio.Writer - pendingFile *os.File - tmp [8]byte - lastAppliedOffset int64 - - // Optional observer for when tombstone files are written. - obs tsdb.FileStoreObserver -} - -// NewTombstoner constructs a Tombstoner for the given path. FilterFn can be nil. -func NewTombstoner(path string, filterFn func(k []byte) bool) *Tombstoner { - return &Tombstoner{ - Path: path, - FilterFn: filterFn, - obs: noFileStoreObserver{}, - } -} - -// Tombstone represents an individual deletion. -type Tombstone struct { - // Key is the tombstoned series key. - Key []byte - - // Min and Max are the min and max unix nanosecond time ranges of Key that are deleted. If - // the full range is deleted, both values are -1. - Min, Max int64 -} - -// WithObserver sets a FileStoreObserver for when the tombstone file is written. -func (t *Tombstoner) WithObserver(obs tsdb.FileStoreObserver) { - t.obs = obs -} - -// Add adds the all keys, across all timestamps, to the tombstone. -func (t *Tombstoner) Add(keys [][]byte) error { - return t.AddRange(keys, math.MinInt64, math.MaxInt64) -} - -// AddRange adds all keys to the tombstone specifying only the data between min and max to be removed. -func (t *Tombstoner) AddRange(keys [][]byte, min, max int64) error { - for t.FilterFn != nil && len(keys) > 0 && !t.FilterFn(keys[0]) { - keys = keys[1:] - } - - if len(keys) == 0 { - return nil - } - - t.mu.Lock() - defer t.mu.Unlock() - - // If this TSMFile has not been written (mainly in tests), don't write a - // tombstone because the keys will not be written when it's actually saved. - if t.Path == "" { - return nil - } - - t.statsLoaded = false - - if err := t.prepareV4(); err == errIncompatibleVersion { - if cap(t.tombstones) < len(t.tombstones)+len(keys) { - ts := make([]Tombstone, len(t.tombstones), len(t.tombstones)+len(keys)) - copy(ts, t.tombstones) - t.tombstones = ts - } - - for _, k := range keys { - if t.FilterFn != nil && !t.FilterFn(k) { - continue - } - - t.tombstones = append(t.tombstones, Tombstone{ - Key: k, - Min: min, - Max: max, - }) - } - return t.writeTombstoneV3(t.tombstones) - - } else if err != nil { - return err - } - - for _, k := range keys { - if t.FilterFn != nil && !t.FilterFn(k) { - continue - } - - if err := t.writeTombstone(t.gz, Tombstone{ - Key: k, - Min: min, - Max: max, - }); err != nil { - return err - } - } - - return nil -} - -func (t *Tombstoner) Flush() error { - t.mu.Lock() - defer t.mu.Unlock() - - if err := t.commit(); err != nil { - // Reset our temp references and clean up. - _ = t.rollback() - return err - } - return nil -} - -func (t *Tombstoner) Rollback() error { - t.mu.Lock() - defer t.mu.Unlock() - return t.rollback() -} - -// Delete removes all the tombstone files from disk. -func (t *Tombstoner) Delete() error { - t.mu.Lock() - defer t.mu.Unlock() - if err := os.RemoveAll(t.tombstonePath()); err != nil { - return err - } - t.statsLoaded = false - t.lastAppliedOffset = 0 - - return nil -} - -// HasTombstones return true if there are any tombstone entries recorded. -func (t *Tombstoner) HasTombstones() bool { - stats := t.TombstoneStats() - if !stats.TombstoneExists { - return false - } - if stats.Size > 0 { - return true - } - - t.mu.RLock() - n := len(t.tombstones) - t.mu.RUnlock() - - return n > 0 -} - -// TombstoneFiles returns any tombstone files associated with Tombstoner's TSM file. -func (t *Tombstoner) TombstoneStats() TombstoneStat { - t.mu.RLock() - if t.statsLoaded { - stats := t.tombstoneStats - t.mu.RUnlock() - return stats - } - t.mu.RUnlock() - - stat, err := os.Stat(t.tombstonePath()) - if err != nil { - t.mu.Lock() - // The file doesn't exist so record that we tried to load it so - // we don't continue to keep trying. This is the common case. - t.statsLoaded = os.IsNotExist(err) - t.tombstoneStats.TombstoneExists = false - stats := t.tombstoneStats - t.mu.Unlock() - return stats - } - - t.mu.Lock() - t.tombstoneStats = TombstoneStat{ - TombstoneExists: true, - Path: t.tombstonePath(), - LastModified: stat.ModTime().UnixNano(), - Size: uint32(stat.Size()), - } - t.statsLoaded = true - stats := t.tombstoneStats - t.mu.Unlock() - - return stats -} - -// Walk calls fn for every Tombstone under the Tombstoner. -func (t *Tombstoner) Walk(fn func(t Tombstone) error) error { - t.mu.Lock() - defer t.mu.Unlock() - - f, err := os.Open(t.tombstonePath()) - if os.IsNotExist(err) { - return nil - } else if err != nil { - return err - } - defer f.Close() - - var b [4]byte - if _, err := f.Read(b[:]); err != nil { - // Might be a zero length file which should not exist, but - // an old bug allowed them to occur. Treat it as an empty - // v1 tombstone file so we don't abort loading the TSM file. - return t.readTombstoneV1(f, fn) - } - - if _, err := f.Seek(0, io.SeekStart); err != nil { - return err - } - - header := binary.BigEndian.Uint32(b[:]) - if header == v4header { - return t.readTombstoneV4(f, fn) - } else if header == v3header { - return t.readTombstoneV3(f, fn) - } else if header == v2header { - return t.readTombstoneV2(f, fn) - } - return t.readTombstoneV1(f, fn) -} - -func (t *Tombstoner) writeTombstoneV3(tombstones []Tombstone) error { - tmp, err := os.CreateTemp(filepath.Dir(t.Path), TombstoneFileExtension) - if err != nil { - return err - } - defer tmp.Close() - - var b [8]byte - - bw := bufio.NewWriterSize(tmp, 1024*1024) - - binary.BigEndian.PutUint32(b[:4], v3header) - if _, err := bw.Write(b[:4]); err != nil { - return err - } - - gz := gzip.NewWriter(bw) - for _, ts := range tombstones { - if err := t.writeTombstone(gz, ts); err != nil { - return err - } - } - - t.gz = gz - t.bw = bw - t.pendingFile = tmp - t.tombstones = t.tombstones[:0] - - return t.commit() -} - -func (t *Tombstoner) prepareV4() error { - if t.pendingFile != nil { - return nil - } - - tmpPath := fmt.Sprintf("%s.%s", t.tombstonePath(), CompactionTempExtension) - tmp, err := os.OpenFile(tmpPath, os.O_CREATE|os.O_RDWR|os.O_EXCL, 0666) - if err != nil { - return err - } - - removeTmp := func() { - tmp.Close() - os.Remove(tmp.Name()) - } - - // Copy the existing v4 file if it exists - f, err := os.Open(t.tombstonePath()) - if !os.IsNotExist(err) { - defer f.Close() - var b [4]byte - if n, err := f.Read(b[:]); n == 4 && err == nil { - header := binary.BigEndian.Uint32(b[:]) - // There is an existing tombstone on disk and it's not a v3. Just rewrite it as a v3 - // version again. - if header != v4header { - removeTmp() - return errIncompatibleVersion - } - - // Seek back to the beginning we copy the header - if _, err := f.Seek(0, io.SeekStart); err != nil { - removeTmp() - return err - } - - // Copy the while file - if _, err := io.Copy(tmp, f); err != nil { - f.Close() - removeTmp() - return err - } - } - } else if err != nil && !os.IsNotExist(err) { - removeTmp() - return err - } - - var b [8]byte - bw := bufio.NewWriterSize(tmp, 64*1024) - - // Write the header only if the file is new - if os.IsNotExist(err) { - binary.BigEndian.PutUint32(b[:4], v4header) - if _, err := bw.Write(b[:4]); err != nil { - removeTmp() - return err - } - } - - // Write the tombstones - gz := gzip.NewWriter(bw) - - t.pendingFile = tmp - t.gz = gz - t.bw = bw - - return nil -} - -func (t *Tombstoner) commit() error { - // No pending writes - if t.pendingFile == nil { - return nil - } - - if err := t.gz.Close(); err != nil { - return err - } - - if err := t.bw.Flush(); err != nil { - return err - } - - // fsync the file to flush the write - if err := t.pendingFile.Sync(); err != nil { - return err - } - - tmpFilename := t.pendingFile.Name() - t.pendingFile.Close() - - if err := t.obs.FileFinishing(tmpFilename); err != nil { - return err - } - - if err := file.RenameFile(tmpFilename, t.tombstonePath()); err != nil { - return err - } - - if err := file.SyncDir(filepath.Dir(t.tombstonePath())); err != nil { - return err - } - - t.pendingFile = nil - t.bw = nil - t.gz = nil - - return nil -} - -func (t *Tombstoner) rollback() error { - if t.pendingFile == nil { - return nil - } - - tmpFilename := t.pendingFile.Name() - t.pendingFile.Close() - t.gz = nil - t.bw = nil - t.pendingFile = nil - return os.Remove(tmpFilename) -} - -// readTombstoneV1 reads the first version of tombstone files that were not -// capable of storing a min and max time for a key. This is used for backwards -// compatibility with versions prior to 0.13. This format is a simple newline -// separated text file. -func (t *Tombstoner) readTombstoneV1(f *os.File, fn func(t Tombstone) error) error { - r := bufio.NewScanner(f) - for r.Scan() { - line := r.Text() - if line == "" { - continue - } - if err := fn(Tombstone{ - Key: []byte(line), - Min: math.MinInt64, - Max: math.MaxInt64, - }); err != nil { - return err - } - } - - if err := r.Err(); err != nil { - return err - } - - for _, t := range t.tombstones { - if err := fn(t); err != nil { - return err - } - } - return nil -} - -// readTombstoneV2 reads the second version of tombstone files that are capable -// of storing keys and the range of time for the key that points were deleted. This -// format is binary. -func (t *Tombstoner) readTombstoneV2(f *os.File, fn func(t Tombstone) error) error { - // Skip header, already checked earlier - if _, err := f.Seek(headerSize, io.SeekStart); err != nil { - return err - } - n := int64(4) - - fi, err := f.Stat() - if err != nil { - return err - } - size := fi.Size() - - var ( - min, max int64 - key []byte - ) - b := make([]byte, 4096) - for { - if n >= size { - break - } - - if _, err = f.Read(b[:4]); err != nil { - return err - } - n += 4 - - keyLen := int(binary.BigEndian.Uint32(b[:4])) - if keyLen > len(b) { - b = make([]byte, keyLen) - } - - if _, err := f.Read(b[:keyLen]); err != nil { - return err - } - key = b[:keyLen] - n += int64(keyLen) - - if _, err := f.Read(b[:8]); err != nil { - return err - } - n += 8 - - min = int64(binary.BigEndian.Uint64(b[:8])) - - if _, err := f.Read(b[:8]); err != nil { - return err - } - n += 8 - max = int64(binary.BigEndian.Uint64(b[:8])) - - if err := fn(Tombstone{ - Key: key, - Min: min, - Max: max, - }); err != nil { - return err - } - } - - for _, t := range t.tombstones { - if err := fn(t); err != nil { - return err - } - } - return nil -} - -// readTombstoneV3 reads the third version of tombstone files that are capable -// of storing keys and the range of time for the key that points were deleted. This -// format is a binary and compressed with gzip. -func (t *Tombstoner) readTombstoneV3(f *os.File, fn func(t Tombstone) error) error { - // Skip header, already checked earlier - if _, err := f.Seek(headerSize, io.SeekStart); err != nil { - return err - } - - var ( - min, max int64 - key []byte - ) - - gr, err := gzip.NewReader(bufio.NewReader(f)) - if err != nil { - return err - } - defer gr.Close() - - b := make([]byte, 4096) - for { - if _, err = io.ReadFull(gr, b[:4]); err == io.EOF || err == io.ErrUnexpectedEOF { - break - } else if err != nil { - return err - } - - keyLen := int(binary.BigEndian.Uint32(b[:4])) - if keyLen > len(b) { - b = make([]byte, keyLen) - } - - if _, err := io.ReadFull(gr, b[:keyLen]); err != nil { - return err - } - - // Copy the key since b is re-used - key = make([]byte, keyLen) - copy(key, b[:keyLen]) - - if _, err := io.ReadFull(gr, b[:8]); err != nil { - return err - } - - min = int64(binary.BigEndian.Uint64(b[:8])) - - if _, err := io.ReadFull(gr, b[:8]); err != nil { - return err - } - - max = int64(binary.BigEndian.Uint64(b[:8])) - - if err := fn(Tombstone{ - Key: key, - Min: min, - Max: max, - }); err != nil { - return err - } - } - - for _, t := range t.tombstones { - if err := fn(t); err != nil { - return err - } - } - return nil -} - -// readTombstoneV4 reads the fourth version of tombstone files that are capable -// of storing multiple v3 files appended together. -func (t *Tombstoner) readTombstoneV4(f *os.File, fn func(t Tombstone) error) error { - // Skip header, already checked earlier - if t.lastAppliedOffset != 0 { - if _, err := f.Seek(t.lastAppliedOffset, io.SeekStart); err != nil { - return err - } - } else { - if _, err := f.Seek(headerSize, io.SeekStart); err != nil { - return err - } - } - var ( - min, max int64 - key []byte - ) - - br := bufio.NewReaderSize(f, 64*1024) - gr, err := gzip.NewReader(br) - if err == io.EOF { - return nil - } else if err != nil { - return err - } - defer gr.Close() - - b := make([]byte, 4096) - for { - gr.Multistream(false) - if err := func() error { - for { - if _, err = io.ReadFull(gr, b[:4]); err == io.EOF || err == io.ErrUnexpectedEOF { - return nil - } else if err != nil { - return err - } - - keyLen := int(binary.BigEndian.Uint32(b[:4])) - if keyLen+16 > len(b) { - b = make([]byte, keyLen+16) - } - - if _, err := io.ReadFull(gr, b[:keyLen]); err != nil { - return err - } - - // Copy the key since b is re-used - key = b[:keyLen] - - minBuf := b[keyLen : keyLen+8] - maxBuf := b[keyLen+8 : keyLen+16] - if _, err := io.ReadFull(gr, minBuf); err != nil { - return err - } - - min = int64(binary.BigEndian.Uint64(minBuf)) - if _, err := io.ReadFull(gr, maxBuf); err != nil { - return err - } - - max = int64(binary.BigEndian.Uint64(maxBuf)) - if err := fn(Tombstone{ - Key: key, - Min: min, - Max: max, - }); err != nil { - return err - } - } - }(); err != nil { - return err - } - - for _, t := range t.tombstones { - if err := fn(t); err != nil { - return err - } - } - - err = gr.Reset(br) - if err == io.EOF { - break - } - } - - // Save the position of tombstone file so we don't re-apply the same set again if there are - // more deletes. - pos, err := f.Seek(0, io.SeekCurrent) - if err != nil { - return err - } - t.lastAppliedOffset = pos - return nil -} - -func (t *Tombstoner) tombstonePath() string { - if strings.HasSuffix(t.Path, TombstoneFileExtension) { - return t.Path - } - - // Filename is 0000001.tsm1 - filename := filepath.Base(t.Path) - - // Strip off the tsm1 - ext := filepath.Ext(filename) - if ext != "" { - filename = strings.TrimSuffix(filename, ext) - } - - // Append the "tombstone" suffix to create a 0000001.tombstone file - return filepath.Join(filepath.Dir(t.Path), filename+"."+TombstoneFileExtension) -} - -func (t *Tombstoner) writeTombstone(dst io.Writer, ts Tombstone) error { - binary.BigEndian.PutUint32(t.tmp[:4], uint32(len(ts.Key))) - if _, err := dst.Write(t.tmp[:4]); err != nil { - return err - } - if _, err := dst.Write([]byte(ts.Key)); err != nil { - return err - } - binary.BigEndian.PutUint64(t.tmp[:], uint64(ts.Min)) - if _, err := dst.Write(t.tmp[:]); err != nil { - return err - } - - binary.BigEndian.PutUint64(t.tmp[:], uint64(ts.Max)) - _, err := dst.Write(t.tmp[:]) - return err -} diff --git a/tsdb/engine/tsm1/tombstone_test.go b/tsdb/engine/tsm1/tombstone_test.go deleted file mode 100644 index 262aed4d140..00000000000 --- a/tsdb/engine/tsm1/tombstone_test.go +++ /dev/null @@ -1,309 +0,0 @@ -package tsm1_test - -import ( - "bytes" - "os" - "testing" - - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/stretchr/testify/require" -) - -func TestTombstoner_Add(t *testing.T) { - dir := t.TempDir() - - f := MustTempFile(t, dir) - ts := tsm1.NewTombstoner(f.Name(), nil) - - entries := mustReadAll(ts) - if got, exp := len(entries), 0; got != exp { - t.Fatalf("length mismatch: got %v, exp %v", got, exp) - } - - stats := ts.TombstoneStats() - require.False(t, stats.TombstoneExists) - - ts.Add([][]byte{[]byte("foo")}) - - if err := ts.Flush(); err != nil { - t.Fatalf("unexpected error flushing tombstone: %v", err) - } - - entries = mustReadAll(ts) - stats = ts.TombstoneStats() - require.True(t, stats.TombstoneExists) - require.NotZero(t, stats.Size) - require.NotZero(t, stats.LastModified) - require.NotEmpty(t, stats.Path) - - if got, exp := len(entries), 1; got != exp { - t.Fatalf("length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := string(entries[0].Key), "foo"; got != exp { - t.Fatalf("value mismatch: got %v, exp %v", got, exp) - } - - // Use a new Tombstoner to verify values are persisted - ts = tsm1.NewTombstoner(f.Name(), nil) - entries = mustReadAll(ts) - if got, exp := len(entries), 1; got != exp { - t.Fatalf("length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := string(entries[0].Key), "foo"; got != exp { - t.Fatalf("value mismatch: got %v, exp %v", got, exp) - } -} - -func TestTombstoner_Add_LargeKey(t *testing.T) { - dir := t.TempDir() - - f := MustTempFile(t, dir) - ts := tsm1.NewTombstoner(f.Name(), nil) - - entries := mustReadAll(ts) - if got, exp := len(entries), 0; got != exp { - t.Fatalf("length mismatch: got %v, exp %v", got, exp) - } - - stats := ts.TombstoneStats() - require.False(t, stats.TombstoneExists) - - key := bytes.Repeat([]byte{'a'}, 4096) - ts.Add([][]byte{key}) - - if err := ts.Flush(); err != nil { - t.Fatalf("unexpected error flushing tombstone: %v", err) - } - - entries = mustReadAll(ts) - stats = ts.TombstoneStats() - require.True(t, stats.TombstoneExists) - require.NotZero(t, stats.Size) - require.NotZero(t, stats.LastModified) - require.NotEmpty(t, stats.Path) - - if got, exp := len(entries), 1; got != exp { - t.Fatalf("length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := string(entries[0].Key), string(key); got != exp { - t.Fatalf("value mismatch: got %v, exp %v", got, exp) - } - - // Use a new Tombstoner to verify values are persisted - ts = tsm1.NewTombstoner(f.Name(), nil) - entries = mustReadAll(ts) - if got, exp := len(entries), 1; got != exp { - t.Fatalf("length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := string(entries[0].Key), string(key); got != exp { - t.Fatalf("value mismatch: got %v, exp %v", got, exp) - } -} - -func TestTombstoner_Add_Multiple(t *testing.T) { - dir := t.TempDir() - - f := MustTempFile(t, dir) - ts := tsm1.NewTombstoner(f.Name(), nil) - - entries := mustReadAll(ts) - if got, exp := len(entries), 0; got != exp { - t.Fatalf("length mismatch: got %v, exp %v", got, exp) - } - - stats := ts.TombstoneStats() - require.False(t, stats.TombstoneExists) - - ts.Add([][]byte{[]byte("foo")}) - - if err := ts.Flush(); err != nil { - t.Fatalf("unexpected error flushing tombstone: %v", err) - } - - ts.Add([][]byte{[]byte("bar")}) - - if err := ts.Flush(); err != nil { - t.Fatalf("unexpected error flushing tombstone: %v", err) - } - - entries = mustReadAll(ts) - stats = ts.TombstoneStats() - require.True(t, stats.TombstoneExists) - require.NotZero(t, stats.Size) - require.NotZero(t, stats.LastModified) - require.NotEmpty(t, stats.Path) - - if got, exp := len(entries), 2; got != exp { - t.Fatalf("length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := string(entries[0].Key), "foo"; got != exp { - t.Fatalf("value mismatch: got %v, exp %v", got, exp) - } - - if got, exp := string(entries[1].Key), "bar"; got != exp { - t.Fatalf("value mismatch: got %v, exp %v", got, exp) - } - - // Use a new Tombstoner to verify values are persisted - ts = tsm1.NewTombstoner(f.Name(), nil) - entries = mustReadAll(ts) - if got, exp := len(entries), 2; got != exp { - t.Fatalf("length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := string(entries[0].Key), "foo"; got != exp { - t.Fatalf("value mismatch: got %v, exp %v", got, exp) - } - - if got, exp := string(entries[1].Key), "bar"; got != exp { - t.Fatalf("value mismatch: got %v, exp %v", got, exp) - } - -} - -func TestTombstoner_Add_Empty(t *testing.T) { - dir := t.TempDir() - - f := MustTempFile(t, dir) - ts := tsm1.NewTombstoner(f.Name(), nil) - - entries := mustReadAll(ts) - if got, exp := len(entries), 0; got != exp { - t.Fatalf("length mismatch: got %v, exp %v", got, exp) - } - - ts.Add([][]byte{}) - - if err := ts.Flush(); err != nil { - t.Fatalf("unexpected error flushing tombstone: %v", err) - } - - // Use a new Tombstoner to verify values are persisted - ts = tsm1.NewTombstoner(f.Name(), nil) - entries = mustReadAll(ts) - if got, exp := len(entries), 0; got != exp { - t.Fatalf("length mismatch: got %v, exp %v", got, exp) - } - - stats := ts.TombstoneStats() - require.False(t, stats.TombstoneExists) -} - -func TestTombstoner_Delete(t *testing.T) { - dir := t.TempDir() - - f := MustTempFile(t, dir) - ts := tsm1.NewTombstoner(f.Name(), nil) - - ts.Add([][]byte{[]byte("foo")}) - - if err := ts.Flush(); err != nil { - t.Fatalf("unexpected error flushing: %v", err) - } - - // Use a new Tombstoner to verify values are persisted - ts = tsm1.NewTombstoner(f.Name(), nil) - entries := mustReadAll(ts) - if got, exp := len(entries), 1; got != exp { - t.Fatalf("length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := string(entries[0].Key), "foo"; got != exp { - t.Fatalf("value mismatch: got %s, exp %s", got, exp) - } - - if err := ts.Delete(); err != nil { - fatal(t, "delete tombstone", err) - } - - stats := ts.TombstoneStats() - require.False(t, stats.TombstoneExists) - - ts = tsm1.NewTombstoner(f.Name(), nil) - entries = mustReadAll(ts) - if got, exp := len(entries), 0; got != exp { - t.Fatalf("length mismatch: got %v, exp %v", got, exp) - } -} - -func TestTombstoner_ReadV1(t *testing.T) { - dir := t.TempDir() - - f := MustTempFile(t, dir) - if err := os.WriteFile(f.Name(), []byte("foo\n"), 0x0600); err != nil { - t.Fatalf("write v1 file: %v", err) - } - f.Close() - - if err := os.Rename(f.Name(), f.Name()+"."+tsm1.TombstoneFileExtension); err != nil { - t.Fatalf("rename tombstone failed: %v", err) - } - - ts := tsm1.NewTombstoner(f.Name(), nil) - - // Read once - _ = mustReadAll(ts) - - // Read again - entries := mustReadAll(ts) - - if got, exp := len(entries), 1; got != exp { - t.Fatalf("length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := string(entries[0].Key), "foo"; got != exp { - t.Fatalf("value mismatch: got %v, exp %v", got, exp) - } - - // Use a new Tombstoner to verify values are persisted - ts = tsm1.NewTombstoner(f.Name(), nil) - entries = mustReadAll(ts) - if got, exp := len(entries), 1; got != exp { - t.Fatalf("length mismatch: got %v, exp %v", got, exp) - } - - if got, exp := string(entries[0].Key), "foo"; got != exp { - t.Fatalf("value mismatch: got %v, exp %v", got, exp) - } -} - -func TestTombstoner_ReadEmptyV1(t *testing.T) { - dir := t.TempDir() - - f := MustTempFile(t, dir) - f.Close() - if err := os.Rename(f.Name(), f.Name()+"."+tsm1.TombstoneFileExtension); err != nil { - t.Fatalf("rename tombstone failed: %v", err) - } - - ts := tsm1.NewTombstoner(f.Name(), nil) - - _ = mustReadAll(ts) - - entries := mustReadAll(ts) - if got, exp := len(entries), 0; got != exp { - t.Fatalf("length mismatch: got %v, exp %v", got, exp) - } -} - -func mustReadAll(t *tsm1.Tombstoner) []tsm1.Tombstone { - var tombstones []tsm1.Tombstone - if err := t.Walk(func(t tsm1.Tombstone) error { - b := make([]byte, len(t.Key)) - copy(b, t.Key) - tombstones = append(tombstones, tsm1.Tombstone{ - Min: t.Min, - Max: t.Max, - Key: b, - }) - return nil - }); err != nil { - panic(err) - } - return tombstones -} diff --git a/tsdb/engine/tsm1/wal.go b/tsdb/engine/tsm1/wal.go deleted file mode 100644 index 9eaf4fd38f0..00000000000 --- a/tsdb/engine/tsm1/wal.go +++ /dev/null @@ -1,1312 +0,0 @@ -package tsm1 - -import ( - "bufio" - "bytes" - "context" - "encoding/binary" - "fmt" - "io" - "math" - "os" - "path/filepath" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/golang/snappy" - "github.com/influxdata/influxdb/v2/pkg/limiter" - "github.com/influxdata/influxdb/v2/pkg/pool" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -const ( - // DefaultSegmentSize of 10MB is the size at which segment files will be rolled over. - DefaultSegmentSize = 10 * 1024 * 1024 - - // WALFileExtension is the file extension we expect for wal segments. - WALFileExtension = "wal" - - // WALFilePrefix is the prefix on all wal segment files. - WALFilePrefix = "_" - - // walEncodeBufSize is the size of the wal entry encoding buffer - walEncodeBufSize = 4 * 1024 * 1024 - - float64EntryType = 1 - integerEntryType = 2 - booleanEntryType = 3 - stringEntryType = 4 - unsignedEntryType = 5 -) - -// WalEntryType is a byte written to a wal segment file that indicates what the following compressed block contains. -type WalEntryType byte - -const ( - // WriteWALEntryType indicates a write entry. - WriteWALEntryType WalEntryType = 0x01 - - // DeleteWALEntryType indicates a delete entry. - DeleteWALEntryType WalEntryType = 0x02 - - // DeleteRangeWALEntryType indicates a delete range entry. - DeleteRangeWALEntryType WalEntryType = 0x03 -) - -var ( - // ErrWALClosed is returned when attempting to write to a closed WAL file. - ErrWALClosed = fmt.Errorf("WAL closed") - - // ErrWALCorrupt is returned when reading a corrupt WAL entry. - ErrWALCorrupt = fmt.Errorf("corrupted WAL entry") - - defaultWaitingWALWrites = runtime.GOMAXPROCS(0) * 2 - - // bytePool is a shared bytes pool buffer re-cycle []byte slices to reduce allocations. - bytesPool = pool.NewLimitedBytes(256, walEncodeBufSize*2) -) - -// WAL represents the write-ahead log used for writing TSM files. -type WAL struct { - // goroutines waiting for the next fsync - syncCount uint64 - syncWaiters chan chan error - - mu sync.RWMutex - lastWriteTime time.Time - - path string - - // write variables - currentSegmentID int - currentSegmentWriter *WALSegmentWriter - - // cache and flush variables - once sync.Once - closing chan struct{} - - // syncDelay sets the duration to wait before fsyncing writes. A value of 0 (default) - // will cause every write to be fsync'd. This must be set before the WAL - // is opened if a non-default value is required. - syncDelay time.Duration - - // WALOutput is the writer used by the logger. - logger *zap.Logger // Logger to be used for important messages - traceLogger *zap.Logger // Logger to be used when trace-logging is on. - traceLogging bool - - // SegmentSize is the file size at which a segment file will be rotated - SegmentSize int - - // statistics for the WAL - stats *walMetrics - - // limiter limits the max concurrency of waiting WAL writes. - limiter limiter.Fixed - - // maxWriteWait sets the max duration the WAL will wait when limiter has no available - // values to take. - maxWriteWait time.Duration -} - -// NewWAL initializes a new WAL at the given directory. -func NewWAL(path string, maxConcurrentWrites int, maxWriteDelay time.Duration, tags tsdb.EngineTags) *WAL { - logger := zap.NewNop() - if maxConcurrentWrites == 0 { - maxConcurrentWrites = defaultWaitingWALWrites - } - - return &WAL{ - path: path, - - // these options should be overridden by any options in the config - SegmentSize: DefaultSegmentSize, - closing: make(chan struct{}), - syncWaiters: make(chan chan error, 1024), - stats: newWALMetrics(tags), - limiter: limiter.NewFixed(maxConcurrentWrites), - maxWriteWait: maxWriteDelay, - logger: logger, - traceLogger: logger, - } -} - -// enableTraceLogging must be called before the WAL is opened. -func (l *WAL) enableTraceLogging(enabled bool) { - l.traceLogging = enabled - if enabled { - l.traceLogger = l.logger - } -} - -// WithLogger sets the WAL's logger. -func (l *WAL) WithLogger(log *zap.Logger) { - l.logger = log.With(zap.String("service", "wal")) - - if l.traceLogging { - l.traceLogger = l.logger - } -} - -var globalWALMetrics = newAllWALMetrics() - -const walSubsystem = "wal" - -type allWALMetrics struct { - size *prometheus.GaugeVec - writes *prometheus.CounterVec - writesErr *prometheus.CounterVec -} - -type walMetrics struct { - // size should never be updated directly, only through SetSize/AddSize - size prometheus.Gauge - // sizeAtomic should never be updated directly, only through SetSize/AddSize - sizeAtomic int64 - writes prometheus.Counter - writesErr prometheus.Counter -} - -func (f *walMetrics) AddSize(n int64) { - val := atomic.AddInt64(&f.sizeAtomic, n) - f.size.Set(float64(val)) -} - -func (f *walMetrics) SetSize(n int64) { - atomic.StoreInt64(&f.sizeAtomic, n) - f.size.Set(float64(n)) -} - -func newAllWALMetrics() *allWALMetrics { - labels := tsdb.EngineLabelNames() - return &allWALMetrics{ - size: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: storageNamespace, - Subsystem: walSubsystem, - Name: "size", - Help: "Gauge of size of WAL in bytes", - }, labels), - writes: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: storageNamespace, - Subsystem: walSubsystem, - Name: "writes", - Help: "Number of write attempts to the WAL", - }, labels), - writesErr: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: storageNamespace, - Subsystem: walSubsystem, - Name: "writes_err", - Help: "Number of failed write attempts to the WAL", - }, labels), - } -} - -func WALCollectors() []prometheus.Collector { - return []prometheus.Collector{ - globalWALMetrics.size, - globalWALMetrics.writes, - globalWALMetrics.writesErr, - } -} - -func newWALMetrics(tags tsdb.EngineTags) *walMetrics { - labels := tags.GetLabels() - return &walMetrics{ - size: globalWALMetrics.size.With(labels), - writes: globalWALMetrics.writes.With(labels), - writesErr: globalWALMetrics.writesErr.With(labels), - } -} - -// Path returns the directory the log was initialized with. -func (l *WAL) Path() string { - l.mu.RLock() - defer l.mu.RUnlock() - return l.path -} - -// Open opens and initializes the Log. Open can recover from previous unclosed shutdowns. -func (l *WAL) Open() error { - l.mu.Lock() - defer l.mu.Unlock() - - l.traceLogger.Info("tsm1 WAL starting", zap.Int("segment_size", l.SegmentSize)) - l.traceLogger.Info("tsm1 WAL writing", zap.String("path", l.path)) - - if err := os.MkdirAll(l.path, 0777); err != nil { - return err - } - - segments, err := segmentFileNames(l.path) - if err != nil { - return err - } - - if len(segments) > 0 { - lastSegment := segments[len(segments)-1] - id, err := idFromFileName(lastSegment) - if err != nil { - return err - } - - l.currentSegmentID = id - stat, err := os.Stat(lastSegment) - if err != nil { - return err - } - - if stat.Size() == 0 { - os.Remove(lastSegment) - segments = segments[:len(segments)-1] - } else { - fd, err := os.OpenFile(lastSegment, os.O_RDWR, 0666) - if err != nil { - return err - } - if _, err := fd.Seek(0, io.SeekEnd); err != nil { - _ = fd.Close() - return err - } - l.currentSegmentWriter = NewWALSegmentWriter(fd) - - // Set the correct size on the segment writer - l.currentSegmentWriter.size = int(stat.Size()) - } - } - - var totalSize int64 - for _, seg := range segments { - stat, err := os.Stat(seg) - if err != nil { - return err - } - - if stat.Size() > 0 { - totalSize += stat.Size() - if stat.ModTime().After(l.lastWriteTime) { - l.lastWriteTime = stat.ModTime().UTC() - } - } - } - - l.stats.SetSize(totalSize) - - l.closing = make(chan struct{}) - - return nil -} - -// scheduleSync will schedule an fsync to the current wal segment and notify any -// waiting gorutines. If an fsync is already scheduled, subsequent calls will -// not schedule a new fsync and will be handle by the existing scheduled fsync. -func (l *WAL) scheduleSync() { - // If we're not the first to sync, then another goroutine is fsyncing the wal for us. - if !atomic.CompareAndSwapUint64(&l.syncCount, 0, 1) { - return - } - - // Fsync the wal and notify all pending waiters - go func() { - var timerCh <-chan time.Time - - // time.NewTicker requires a > 0 delay, since 0 indicates no delay, use a closed - // channel which will always be ready to read from. - if l.syncDelay == 0 { - // Create a RW chan and close it - timerChrw := make(chan time.Time) - close(timerChrw) - // Convert it to a read-only - timerCh = timerChrw - } else { - t := time.NewTicker(l.syncDelay) - defer t.Stop() - timerCh = t.C - } - for { - select { - case <-timerCh: - l.mu.Lock() - if len(l.syncWaiters) == 0 { - atomic.StoreUint64(&l.syncCount, 0) - l.mu.Unlock() - return - } - - l.sync() - l.mu.Unlock() - case <-l.closing: - atomic.StoreUint64(&l.syncCount, 0) - return - } - } - }() -} - -// sync fsyncs the current wal segments and notifies any waiters. Callers must ensure -// a write lock on the WAL is obtained before calling sync. -func (l *WAL) sync() { - err := l.currentSegmentWriter.sync() - for len(l.syncWaiters) > 0 { - errC := <-l.syncWaiters - errC <- err - } -} - -// WriteMulti writes the given values to the WAL. It returns the WAL segment ID to -// which the points were written. If an error is returned the segment ID should -// be ignored. -func (l *WAL) WriteMulti(ctx context.Context, values map[string][]Value) (int, error) { - entry := &WriteWALEntry{ - Values: values, - } - - id, err := l.writeToLog(ctx, entry) - l.stats.writes.Inc() - if err != nil { - l.stats.writesErr.Inc() - return -1, err - } - - return id, nil -} - -// ClosedSegments returns a slice of the names of the closed segment files. -func (l *WAL) ClosedSegments() ([]string, error) { - l.mu.RLock() - defer l.mu.RUnlock() - // Not loading files from disk so nothing to do - if l.path == "" { - return nil, nil - } - - var currentFile string - if l.currentSegmentWriter != nil { - currentFile = l.currentSegmentWriter.path() - } - - files, err := segmentFileNames(l.path) - if err != nil { - return nil, err - } - - var closedFiles []string - for _, fn := range files { - // Skip the current path - if fn == currentFile { - continue - } - - closedFiles = append(closedFiles, fn) - } - - return closedFiles, nil -} - -// Remove deletes the given segment file paths from disk and cleans up any associated objects. -func (l *WAL) Remove(files []string) error { - l.mu.Lock() - defer l.mu.Unlock() - for _, fn := range files { - l.traceLogger.Info("Removing WAL file", zap.String("path", fn)) - os.RemoveAll(fn) - } - - // Refresh the on-disk size stats - segments, err := segmentFileNames(l.path) - if err != nil { - return err - } - - var totalSize int64 - for _, seg := range segments { - stat, err := os.Stat(seg) - if err != nil { - return err - } - - totalSize += stat.Size() - } - - l.stats.SetSize(totalSize) - - return nil -} - -// LastWriteTime is the last time anything was written to the WAL. -func (l *WAL) LastWriteTime() time.Time { - l.mu.RLock() - defer l.mu.RUnlock() - return l.lastWriteTime -} - -func (l *WAL) DiskSizeBytes() int64 { - return atomic.LoadInt64(&l.stats.sizeAtomic) -} - -func (l *WAL) writeToLog(ctx context.Context, entry WALEntry) (int, error) { - // limit how many concurrent encodings can be in flight. Since we can only - // write one at a time to disk, a slow disk can cause the allocations below - // to increase quickly. If we're backed up, wait until others have completed. - cancel := func() {} - if l.maxWriteWait > 0 { - ctx, cancel = context.WithTimeout(ctx, l.maxWriteWait) - } - if err := l.limiter.Take(ctx); err != nil { - cancel() - return 0, err - } - defer l.limiter.Release() - cancel() - - bytes := bytesPool.Get(entry.MarshalSize()) - - b, err := entry.Encode(bytes) - if err != nil { - bytesPool.Put(bytes) - return -1, err - } - - encBuf := bytesPool.Get(snappy.MaxEncodedLen(len(b))) - - compressed := snappy.Encode(encBuf, b) - bytesPool.Put(bytes) - - syncErr := make(chan error) - - segID, err := func() (int, error) { - l.mu.Lock() - defer l.mu.Unlock() - - // Make sure the log has not been closed - select { - case <-l.closing: - return -1, ErrWALClosed - default: - } - - // roll the segment file if needed - if err := l.rollSegment(); err != nil { - return -1, fmt.Errorf("error rolling WAL segment: %v", err) - } - - // write and sync - oldSize := l.currentSegmentWriter.size - if err := l.currentSegmentWriter.Write(entry.Type(), compressed); err != nil { - return -1, fmt.Errorf("error writing WAL entry: %v", err) - } - sizeDelta := l.currentSegmentWriter.size - oldSize - - select { - case l.syncWaiters <- syncErr: - default: - return -1, fmt.Errorf("error syncing wal") - } - l.scheduleSync() - - // Update stats for current segment size - l.stats.AddSize(int64(sizeDelta)) - - l.lastWriteTime = time.Now().UTC() - - return l.currentSegmentID, nil - - }() - - bytesPool.Put(encBuf) - - if err != nil { - return segID, err - } - - // schedule an fsync and wait for it to complete - return segID, <-syncErr -} - -// rollSegment checks if the current segment is due to roll over to a new segment; -// and if so, opens a new segment file for future writes. -func (l *WAL) rollSegment() error { - if l.currentSegmentWriter == nil || l.currentSegmentWriter.size > l.SegmentSize { - if err := l.newSegmentFile(); err != nil { - // A drop database or RP call could trigger this error if writes were in-flight - // when the drop statement executes. - return fmt.Errorf("error opening new segment file for wal (2): %v", err) - } - return nil - } - - return nil -} - -// CloseSegment closes the current segment if it is non-empty and opens a new one. -func (l *WAL) CloseSegment() error { - l.mu.Lock() - defer l.mu.Unlock() - if l.currentSegmentWriter == nil || l.currentSegmentWriter.size > 0 { - if err := l.newSegmentFile(); err != nil { - // A drop database or RP call could trigger this error if writes were in-flight - // when the drop statement executes. - return fmt.Errorf("error opening new segment file for wal (1): %v", err) - } - return nil - } - return nil -} - -// Delete deletes the given keys, returning the segment ID for the operation. -func (l *WAL) Delete(ctx context.Context, keys [][]byte) (int, error) { - if len(keys) == 0 { - return 0, nil - } - entry := &DeleteWALEntry{ - Keys: keys, - } - - id, err := l.writeToLog(ctx, entry) - if err != nil { - return -1, err - } - return id, nil -} - -// DeleteRange deletes the given keys within the given time range, -// returning the segment ID for the operation. -func (l *WAL) DeleteRange(ctx context.Context, keys [][]byte, min, max int64) (int, error) { - if len(keys) == 0 { - return 0, nil - } - entry := &DeleteRangeWALEntry{ - Keys: keys, - Min: min, - Max: max, - } - - id, err := l.writeToLog(ctx, entry) - if err != nil { - return -1, err - } - return id, nil -} - -// Close will finish any flush that is currently in progress and close file handles. -func (l *WAL) Close() error { - l.mu.Lock() - defer l.mu.Unlock() - - // Always attempt to close the segment writer. We cannot do this in once.Do - // because if we have already closed the WAL before and reopened it again, - // the next Close() call will not close the new segment writer. For example: - // func main() { - // w.Close() -- (1) - // w.Open() - // w.Close() -- (2) - // } - // (2) needs to close the reopened `currentSegmentWriter` again. - l.traceLogger.Info("Closing WAL file", zap.String("path", l.path)) - if l.currentSegmentWriter != nil { - l.sync() - _ = l.currentSegmentWriter.close() - l.currentSegmentWriter = nil - } - - l.once.Do(func() { - // Close, but don't set to nil so future goroutines can still be signaled - close(l.closing) - }) - - return nil -} - -// segmentFileNames will return all files that are WAL segment files in sorted order by ascending ID. -func segmentFileNames(dir string) ([]string, error) { - names, err := filepath.Glob(filepath.Join(dir, fmt.Sprintf("%s*.%s", WALFilePrefix, WALFileExtension))) - if err != nil { - return nil, err - } - sort.Strings(names) - return names, nil -} - -// newSegmentFile will close the current segment file and open a new one, updating bookkeeping info on the log. -func (l *WAL) newSegmentFile() error { - l.currentSegmentID++ - if l.currentSegmentWriter != nil { - l.sync() - - if err := l.currentSegmentWriter.close(); err != nil { - return err - } - } - - fileName := filepath.Join(l.path, fmt.Sprintf("%s%05d.%s", WALFilePrefix, l.currentSegmentID, WALFileExtension)) - fd, err := os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0666) - if err != nil { - return err - } - l.currentSegmentWriter = NewWALSegmentWriter(fd) - - return nil -} - -// WALEntry is record stored in each WAL segment. Each entry has a type -// and an opaque, type dependent byte slice data attribute. -type WALEntry interface { - Type() WalEntryType - Encode(dst []byte) ([]byte, error) - MarshalBinary() ([]byte, error) - UnmarshalBinary(b []byte) error - MarshalSize() int -} - -// WriteWALEntry represents a write of points. -type WriteWALEntry struct { - Values map[string][]Value - sz int -} - -func (w *WriteWALEntry) MarshalSize() int { - if w.sz > 0 || len(w.Values) == 0 { - return w.sz - } - - encLen := 7 * len(w.Values) // Type (1), Key Length (2), and Count (4) for each key - - // determine required length - for k, v := range w.Values { - encLen += len(k) - if len(v) == 0 { - return 0 - } - - encLen += 8 * len(v) // timestamps (8) - - switch v[0].(type) { - case FloatValue, IntegerValue, UnsignedValue: - encLen += 8 * len(v) - case BooleanValue: - encLen += 1 * len(v) - case StringValue: - for _, vv := range v { - str, ok := vv.(StringValue) - if !ok { - return 0 - } - encLen += 4 + len(str.value) - } - default: - return 0 - } - } - - w.sz = encLen - - return w.sz -} - -// Encode converts the WriteWALEntry into a byte stream using dst if it -// is large enough. If dst is too small, the slice will be grown to fit the -// encoded entry. -func (w *WriteWALEntry) Encode(dst []byte) ([]byte, error) { - // The entries values are encode as follows: - // - // For each key and slice of values, first a 1 byte type for the []Values - // slice is written. Following the type, the length and key bytes are written. - // Following the key, a 4 byte count followed by each value as a 8 byte time - // and N byte value. The value is dependent on the type being encoded. float64, - // int64, use 8 bytes, boolean uses 1 byte, and string is similar to the key encoding, - // except that string values have a 4-byte length, and keys only use 2 bytes. - // - // This structure is then repeated for each key an value slices. - // - // ┌────────────────────────────────────────────────────────────────────┐ - // │ WriteWALEntry │ - // ├──────┬─────────┬────────┬───────┬─────────┬─────────┬───┬──────┬───┤ - // │ Type │ Key Len │ Key │ Count │ Time │ Value │...│ Type │...│ - // │1 byte│ 2 bytes │ N bytes│4 bytes│ 8 bytes │ N bytes │ │1 byte│ │ - // └──────┴─────────┴────────┴───────┴─────────┴─────────┴───┴──────┴───┘ - - encLen := w.MarshalSize() // Type (1), Key Length (2), and Count (4) for each key - - // allocate or re-slice to correct size - if len(dst) < encLen { - dst = make([]byte, encLen) - } else { - dst = dst[:encLen] - } - - // Finally, encode the entry - var n int - var curType byte - - for k, v := range w.Values { - switch v[0].(type) { - case FloatValue: - curType = float64EntryType - case IntegerValue: - curType = integerEntryType - case UnsignedValue: - curType = unsignedEntryType - case BooleanValue: - curType = booleanEntryType - case StringValue: - curType = stringEntryType - default: - return nil, fmt.Errorf("unsupported value type: %T", v[0]) - } - dst[n] = curType - n++ - - binary.BigEndian.PutUint16(dst[n:n+2], uint16(len(k))) - n += 2 - n += copy(dst[n:], k) - - binary.BigEndian.PutUint32(dst[n:n+4], uint32(len(v))) - n += 4 - - for _, vv := range v { - binary.BigEndian.PutUint64(dst[n:n+8], uint64(vv.UnixNano())) - n += 8 - - switch vv := vv.(type) { - case FloatValue: - if curType != float64EntryType { - return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv) - } - binary.BigEndian.PutUint64(dst[n:n+8], math.Float64bits(vv.value)) - n += 8 - case IntegerValue: - if curType != integerEntryType { - return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv) - } - binary.BigEndian.PutUint64(dst[n:n+8], uint64(vv.value)) - n += 8 - case UnsignedValue: - if curType != unsignedEntryType { - return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv) - } - binary.BigEndian.PutUint64(dst[n:n+8], uint64(vv.value)) - n += 8 - case BooleanValue: - if curType != booleanEntryType { - return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv) - } - if vv.value { - dst[n] = 1 - } else { - dst[n] = 0 - } - n++ - case StringValue: - if curType != stringEntryType { - return nil, fmt.Errorf("incorrect value found in %T slice: %T", v[0].Value(), vv) - } - binary.BigEndian.PutUint32(dst[n:n+4], uint32(len(vv.value))) - n += 4 - n += copy(dst[n:], vv.value) - default: - return nil, fmt.Errorf("unsupported value found in %T slice: %T", v[0].Value(), vv) - } - } - } - - return dst[:n], nil -} - -// MarshalBinary returns a binary representation of the entry in a new byte slice. -func (w *WriteWALEntry) MarshalBinary() ([]byte, error) { - // Temp buffer to write marshaled points into - b := make([]byte, w.MarshalSize()) - return w.Encode(b) -} - -// UnmarshalBinary deserializes the byte slice into w. -func (w *WriteWALEntry) UnmarshalBinary(b []byte) error { - var i int - for i < len(b) { - typ := b[i] - i++ - - if i+2 > len(b) { - return ErrWALCorrupt - } - - length := int(binary.BigEndian.Uint16(b[i : i+2])) - i += 2 - - if i+length > len(b) { - return ErrWALCorrupt - } - - k := string(b[i : i+length]) - i += length - - if i+4 > len(b) { - return ErrWALCorrupt - } - - nvals := int(binary.BigEndian.Uint32(b[i : i+4])) - i += 4 - - if nvals <= 0 || nvals > len(b) { - return ErrWALCorrupt - } - - switch typ { - case float64EntryType: - if i+16*nvals > len(b) { - return ErrWALCorrupt - } - - values := make([]Value, 0, nvals) - for j := 0; j < nvals; j++ { - un := int64(binary.BigEndian.Uint64(b[i : i+8])) - i += 8 - v := math.Float64frombits((binary.BigEndian.Uint64(b[i : i+8]))) - i += 8 - values = append(values, NewFloatValue(un, v)) - } - w.Values[k] = values - case integerEntryType: - if i+16*nvals > len(b) { - return ErrWALCorrupt - } - - values := make([]Value, 0, nvals) - for j := 0; j < nvals; j++ { - un := int64(binary.BigEndian.Uint64(b[i : i+8])) - i += 8 - v := int64(binary.BigEndian.Uint64(b[i : i+8])) - i += 8 - values = append(values, NewIntegerValue(un, v)) - } - w.Values[k] = values - - case unsignedEntryType: - if i+16*nvals > len(b) { - return ErrWALCorrupt - } - - values := make([]Value, 0, nvals) - for j := 0; j < nvals; j++ { - un := int64(binary.BigEndian.Uint64(b[i : i+8])) - i += 8 - v := binary.BigEndian.Uint64(b[i : i+8]) - i += 8 - values = append(values, NewUnsignedValue(un, v)) - } - w.Values[k] = values - - case booleanEntryType: - if i+9*nvals > len(b) { - return ErrWALCorrupt - } - - values := make([]Value, 0, nvals) - for j := 0; j < nvals; j++ { - un := int64(binary.BigEndian.Uint64(b[i : i+8])) - i += 8 - - v := b[i] - i += 1 - if v == 1 { - values = append(values, NewBooleanValue(un, true)) - } else { - values = append(values, NewBooleanValue(un, false)) - } - } - w.Values[k] = values - - case stringEntryType: - values := make([]Value, 0, nvals) - for j := 0; j < nvals; j++ { - if i+12 > len(b) { - return ErrWALCorrupt - } - - un := int64(binary.BigEndian.Uint64(b[i : i+8])) - i += 8 - - length := int(binary.BigEndian.Uint32(b[i : i+4])) - if i+length > len(b) { - return ErrWALCorrupt - } - - i += 4 - - if i+length > len(b) { - return ErrWALCorrupt - } - - v := string(b[i : i+length]) - i += length - values = append(values, NewStringValue(un, v)) - } - w.Values[k] = values - - default: - return fmt.Errorf("unsupported value type: %#v", typ) - } - } - return nil -} - -// Type returns WriteWALEntryType. -func (w *WriteWALEntry) Type() WalEntryType { - return WriteWALEntryType -} - -// DeleteWALEntry represents the deletion of multiple series. -type DeleteWALEntry struct { - Keys [][]byte - sz int -} - -// MarshalBinary returns a binary representation of the entry in a new byte slice. -func (w *DeleteWALEntry) MarshalBinary() ([]byte, error) { - b := make([]byte, w.MarshalSize()) - return w.Encode(b) -} - -// UnmarshalBinary deserializes the byte slice into w. -func (w *DeleteWALEntry) UnmarshalBinary(b []byte) error { - if len(b) == 0 { - return nil - } - - // b originates from a pool. Copy what needs to be retained. - buf := make([]byte, len(b)) - copy(buf, b) - w.Keys = bytes.Split(buf, []byte("\n")) - return nil -} - -func (w *DeleteWALEntry) MarshalSize() int { - if w.sz > 0 || len(w.Keys) == 0 { - return w.sz - } - - encLen := len(w.Keys) // newlines - for _, k := range w.Keys { - encLen += len(k) - } - - w.sz = encLen - - return encLen -} - -// Encode converts the DeleteWALEntry into a byte slice, appending to dst. -func (w *DeleteWALEntry) Encode(dst []byte) ([]byte, error) { - sz := w.MarshalSize() - - if len(dst) < sz { - dst = make([]byte, sz) - } - - var n int - for _, k := range w.Keys { - n += copy(dst[n:], k) - n += copy(dst[n:], "\n") - } - - // We return n-1 to strip off the last newline so that unmarshalling the value - // does not produce an empty string - return []byte(dst[:n-1]), nil -} - -// Type returns DeleteWALEntryType. -func (w *DeleteWALEntry) Type() WalEntryType { - return DeleteWALEntryType -} - -// DeleteRangeWALEntry represents the deletion of multiple series. -type DeleteRangeWALEntry struct { - Keys [][]byte - Min, Max int64 - sz int -} - -// MarshalBinary returns a binary representation of the entry in a new byte slice. -func (w *DeleteRangeWALEntry) MarshalBinary() ([]byte, error) { - b := make([]byte, w.MarshalSize()) - return w.Encode(b) -} - -// UnmarshalBinary deserializes the byte slice into w. -func (w *DeleteRangeWALEntry) UnmarshalBinary(b []byte) error { - if len(b) < 16 { - return ErrWALCorrupt - } - - w.Min = int64(binary.BigEndian.Uint64(b[:8])) - w.Max = int64(binary.BigEndian.Uint64(b[8:16])) - - i := 16 - for i < len(b) { - if i+4 > len(b) { - return ErrWALCorrupt - } - sz := int(binary.BigEndian.Uint32(b[i : i+4])) - i += 4 - - if i+sz > len(b) { - return ErrWALCorrupt - } - - // b originates from a pool. Copy what needs to be retained. - buf := make([]byte, sz) - copy(buf, b[i:i+sz]) - w.Keys = append(w.Keys, buf) - i += sz - } - return nil -} - -func (w *DeleteRangeWALEntry) MarshalSize() int { - if w.sz > 0 { - return w.sz - } - - sz := 16 + len(w.Keys)*4 - for _, k := range w.Keys { - sz += len(k) - } - - w.sz = sz - - return sz -} - -// Encode converts the DeleteRangeWALEntry into a byte slice, appending to b. -func (w *DeleteRangeWALEntry) Encode(b []byte) ([]byte, error) { - sz := w.MarshalSize() - - if len(b) < sz { - b = make([]byte, sz) - } - - binary.BigEndian.PutUint64(b[:8], uint64(w.Min)) - binary.BigEndian.PutUint64(b[8:16], uint64(w.Max)) - - i := 16 - for _, k := range w.Keys { - binary.BigEndian.PutUint32(b[i:i+4], uint32(len(k))) - i += 4 - i += copy(b[i:], k) - } - - return b[:i], nil -} - -// Type returns DeleteRangeWALEntryType. -func (w *DeleteRangeWALEntry) Type() WalEntryType { - return DeleteRangeWALEntryType -} - -// WALSegmentWriter writes WAL segments. -type WALSegmentWriter struct { - bw *bufio.Writer - w io.WriteCloser - size int -} - -// NewWALSegmentWriter returns a new WALSegmentWriter writing to w. -func NewWALSegmentWriter(w io.WriteCloser) *WALSegmentWriter { - return &WALSegmentWriter{ - bw: bufio.NewWriterSize(w, 16*1024), - w: w, - } -} - -func (w *WALSegmentWriter) path() string { - if f, ok := w.w.(*os.File); ok { - return f.Name() - } - return "" -} - -// Write writes entryType and the buffer containing compressed entry data. -func (w *WALSegmentWriter) Write(entryType WalEntryType, compressed []byte) error { - var buf [5]byte - buf[0] = byte(entryType) - binary.BigEndian.PutUint32(buf[1:5], uint32(len(compressed))) - - if _, err := w.bw.Write(buf[:]); err != nil { - return err - } - - if _, err := w.bw.Write(compressed); err != nil { - return err - } - - w.size += len(buf) + len(compressed) - - return nil -} - -// Sync flushes the file systems in-memory copy of recently written data to disk, -// if w is writing to an os.File. -func (w *WALSegmentWriter) sync() error { - if err := w.bw.Flush(); err != nil { - return err - } - - if f, ok := w.w.(*os.File); ok { - return f.Sync() - } - return nil -} - -func (w *WALSegmentWriter) Flush() error { - return w.bw.Flush() -} - -func (w *WALSegmentWriter) close() error { - if err := w.Flush(); err != nil { - return err - } - return w.w.Close() -} - -// WALSegmentReader reads WAL segments. -type WALSegmentReader struct { - rc io.ReadCloser - r *bufio.Reader - entry WALEntry - n int64 - err error -} - -// NewWALSegmentReader returns a new WALSegmentReader reading from r. -func NewWALSegmentReader(r io.ReadCloser) *WALSegmentReader { - return &WALSegmentReader{ - rc: r, - r: bufio.NewReader(r), - } -} - -func (r *WALSegmentReader) Reset(rc io.ReadCloser) { - r.rc = rc - r.r.Reset(rc) - r.entry = nil - r.n = 0 - r.err = nil -} - -// Next indicates if there is a value to read. -func (r *WALSegmentReader) Next() bool { - var nReadOK int - - // read the type and the length of the entry - var lv [5]byte - n, err := io.ReadFull(r.r, lv[:]) - if err == io.EOF { - return false - } - - if err != nil { - r.err = err - // We return true here because we want the client code to call read which - // will return the this error to be handled. - return true - } - nReadOK += n - - entryType := lv[0] - length := binary.BigEndian.Uint32(lv[1:5]) - - b := *(getBuf(int(length))) - defer putBuf(&b) - - // read the compressed block and decompress it - n, err = io.ReadFull(r.r, b[:length]) - if err != nil { - r.err = err - return true - } - nReadOK += n - - decLen, err := snappy.DecodedLen(b[:length]) - if err != nil { - r.err = err - return true - } - decBuf := *(getBuf(decLen)) - defer putBuf(&decBuf) - - data, err := snappy.Decode(decBuf, b[:length]) - if err != nil { - r.err = err - return true - } - - // and marshal it and send it to the cache - switch WalEntryType(entryType) { - case WriteWALEntryType: - r.entry = &WriteWALEntry{ - Values: make(map[string][]Value), - } - case DeleteWALEntryType: - r.entry = &DeleteWALEntry{} - case DeleteRangeWALEntryType: - r.entry = &DeleteRangeWALEntry{} - default: - r.err = fmt.Errorf("unknown wal entry type: %v", entryType) - return true - } - r.err = r.entry.UnmarshalBinary(data) - if r.err == nil { - // Read and decode of this entry was successful. - r.n += int64(nReadOK) - } - - return true -} - -// Read returns the next entry in the reader. -func (r *WALSegmentReader) Read() (WALEntry, error) { - if r.err != nil { - return nil, r.err - } - return r.entry, nil -} - -// Count returns the total number of bytes read successfully from the segment, as -// of the last call to Read(). The segment is guaranteed to be valid up to and -// including this number of bytes. -func (r *WALSegmentReader) Count() int64 { - return r.n -} - -// Error returns the last error encountered by the reader. -func (r *WALSegmentReader) Error() error { - return r.err -} - -// Close closes the underlying io.Reader. -func (r *WALSegmentReader) Close() error { - if r.rc == nil { - return nil - } - err := r.rc.Close() - r.rc = nil - return err -} - -// idFromFileName parses the segment file ID from its name. -func idFromFileName(name string) (int, error) { - parts := strings.Split(filepath.Base(name), ".") - if len(parts) != 2 { - return 0, fmt.Errorf("file %s has wrong name format to have an id", name) - } - - id, err := strconv.ParseUint(parts[0][1:], 10, 32) - - return int(id), err -} diff --git a/tsdb/engine/tsm1/wal_test.go b/tsdb/engine/tsm1/wal_test.go deleted file mode 100644 index 321de492537..00000000000 --- a/tsdb/engine/tsm1/wal_test.go +++ /dev/null @@ -1,821 +0,0 @@ -package tsm1_test - -import ( - "context" - "fmt" - "io" - "os" - "path/filepath" - "reflect" - "sort" - "sync" - "testing" - "time" - - "github.com/golang/snappy" - "github.com/influxdata/influxdb/v2/pkg/slices" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" - "github.com/stretchr/testify/require" -) - -func NewWAL(path string, maxConcurrentWrites int, maxWriteDelay time.Duration) *tsm1.WAL { - // EngineTags is only for metrics, not needed for tests - return tsm1.NewWAL(path, maxConcurrentWrites, maxWriteDelay, tsdb.EngineTags{}) -} - -func TestWALWriter_WriteMulti_Single(t *testing.T) { - dir := t.TempDir() - w := NewWAL(dir, 0, 0) - defer w.Close() - require.NoError(t, w.Open()) - - p1 := tsm1.NewValue(1, 1.1) - p2 := tsm1.NewValue(1, int64(1)) - p3 := tsm1.NewValue(1, true) - p4 := tsm1.NewValue(1, "string") - p5 := tsm1.NewValue(1, ^uint64(0)) - - values := map[string][]tsm1.Value{ - "cpu,host=A#!~#float": {p1}, - "cpu,host=A#!~#int": {p2}, - "cpu,host=A#!~#bool": {p3}, - "cpu,host=A#!~#string": {p4}, - "cpu,host=A#!~#unsigned": {p5}, - } - - _, err := w.WriteMulti(context.Background(), values) - require.NoError(t, err) - - f, r := mustSegmentReader(t, w) - defer r.Close() - - require.True(t, r.Next()) - - we, err := r.Read() - require.NoError(t, err) - - e, ok := we.(*tsm1.WriteWALEntry) - require.True(t, ok) - - for k, v := range e.Values { - for i, vv := range v { - require.Equal(t, values[k][i].String(), vv.String()) - } - } - - require.Equal(t, r.Count(), mustReadFileSize(f)) -} - -func TestWALWriter_WriteMulti_LargeBatch(t *testing.T) { - dir := t.TempDir() - w := NewWAL(dir, 0, 0) - defer w.Close() - require.NoError(t, w.Open()) - - var points []tsm1.Value - for i := 0; i < 100000; i++ { - points = append(points, tsm1.NewValue(int64(i), int64(1))) - } - - values := map[string][]tsm1.Value{ - "cpu,host=A,server=01,foo=bar,tag=really-long#!~#float": points, - "mem,host=A,server=01,foo=bar,tag=really-long#!~#float": points, - } - - _, err := w.WriteMulti(context.Background(), values) - require.NoError(t, err) - - f, r := mustSegmentReader(t, w) - defer r.Close() - - require.True(t, r.Next()) - - we, err := r.Read() - require.NoError(t, err) - - e, ok := we.(*tsm1.WriteWALEntry) - require.True(t, ok) - - for k, v := range e.Values { - for i, vv := range v { - require.Equal(t, values[k][i].String(), vv.String()) - } - } - - require.Equal(t, r.Count(), mustReadFileSize(f)) -} - -func TestWALWriter_WriteMulti_Multiple(t *testing.T) { - dir := t.TempDir() - w := NewWAL(dir, 0, 0) - defer w.Close() - require.NoError(t, w.Open()) - - p1 := tsm1.NewValue(1, int64(1)) - p2 := tsm1.NewValue(1, int64(2)) - - exp := []struct { - key string - values []tsm1.Value - }{ - {"cpu,host=A#!~#value", []tsm1.Value{p1}}, - {"cpu,host=B#!~#value", []tsm1.Value{p2}}, - } - - for _, v := range exp { - _, err := w.WriteMulti(context.Background(), map[string][]tsm1.Value{v.key: v.values}) - require.NoError(t, err) - } - - f, r := mustSegmentReader(t, w) - defer r.Close() - - for _, ep := range exp { - require.True(t, r.Next()) - - we, err := r.Read() - require.NoError(t, err) - - e, ok := we.(*tsm1.WriteWALEntry) - require.True(t, ok) - - for k, v := range e.Values { - require.Equal(t, k, ep.key) - require.Equal(t, len(v), len(ep.values)) - - for i, vv := range v { - require.Equal(t, vv.String(), ep.values[i].String()) - } - } - } - - require.Equal(t, r.Count(), mustReadFileSize(f)) -} - -func TestWALWriter_WriteDelete_Single(t *testing.T) { - dir := t.TempDir() - w := NewWAL(dir, 0, 0) - defer w.Close() - require.NoError(t, w.Open()) - - keys := [][]byte{[]byte("cpu")} - - _, err := w.Delete(context.Background(), keys) - require.NoError(t, err) - - _, r := mustSegmentReader(t, w) - defer r.Close() - - require.True(t, r.Next()) - - we, err := r.Read() - require.NoError(t, err) - - e, ok := we.(*tsm1.DeleteWALEntry) - require.True(t, ok) - - require.Equal(t, len(e.Keys), len(keys)) - require.Equal(t, string(e.Keys[0]), string(keys[0])) -} - -func TestWALWriter_WriteMultiDelete_Multiple(t *testing.T) { - dir := t.TempDir() - w := NewWAL(dir, 0, 0) - defer w.Close() - require.NoError(t, w.Open()) - - p1 := tsm1.NewValue(1, true) - values := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {p1}, - } - - _, err := w.WriteMulti(context.Background(), values) - require.NoError(t, err) - - deleteKeys := [][]byte{[]byte("cpu,host=A#!~value")} - - _, err = w.Delete(context.Background(), deleteKeys) - require.NoError(t, err) - - _, r := mustSegmentReader(t, w) - defer r.Close() - - require.True(t, r.Next()) - - we, err := r.Read() - require.NoError(t, err) - - e, ok := we.(*tsm1.WriteWALEntry) - require.True(t, ok) - - for k, v := range e.Values { - require.Equal(t, len(v), len(values[k])) - - for i, vv := range v { - require.Equal(t, vv.String(), values[k][i].String()) - } - } - - // Read the delete second - require.True(t, r.Next()) - - we, err = r.Read() - require.NoError(t, err) - - de, ok := we.(*tsm1.DeleteWALEntry) - require.True(t, ok) - - require.Equal(t, len(de.Keys), len(deleteKeys)) - require.Equal(t, string(de.Keys[0]), string(deleteKeys[0])) -} - -func TestWALWriter_WriteMultiDeleteRange_Multiple(t *testing.T) { - dir := t.TempDir() - w := NewWAL(dir, 0, 0) - defer w.Close() - require.NoError(t, w.Open()) - - p1 := tsm1.NewValue(1, 1.0) - p2 := tsm1.NewValue(2, 2.0) - p3 := tsm1.NewValue(3, 3.0) - - values := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {p1, p2, p3}, - } - - _, err := w.WriteMulti(context.Background(), values) - require.NoError(t, err) - - // Write the delete entry - deleteKeys := [][]byte{[]byte("cpu,host=A#!~value")} - deleteMin, deleteMax := int64(2), int64(3) - - _, err = w.DeleteRange(context.Background(), deleteKeys, deleteMin, deleteMax) - require.NoError(t, err) - - _, r := mustSegmentReader(t, w) - defer r.Close() - - require.True(t, r.Next()) - - we, err := r.Read() - require.NoError(t, err) - - e, ok := we.(*tsm1.WriteWALEntry) - require.True(t, ok) - - for k, v := range e.Values { - require.Equal(t, len(v), len(values[k])) - - for i, vv := range v { - require.Equal(t, vv.String(), values[k][i].String()) - } - } - - // Read the delete second - require.True(t, r.Next()) - - we, err = r.Read() - require.NoError(t, err) - - de, ok := we.(*tsm1.DeleteRangeWALEntry) - require.True(t, ok) - - require.Equal(t, len(de.Keys), len(deleteKeys)) - require.Equal(t, string(de.Keys[0]), string(deleteKeys[0])) - require.Equal(t, de.Min, deleteMin) - require.Equal(t, de.Max, deleteMax) -} - -func TestWAL_ClosedSegments(t *testing.T) { - dir := t.TempDir() - - w := NewWAL(dir, 0, 0) - require.NoError(t, w.Open()) - - files, err := w.ClosedSegments() - require.NoError(t, err) - - require.Equal(t, len(files), 0) - - _, err = w.WriteMulti(context.Background(), map[string][]tsm1.Value{ - "cpu,host=A#!~#value": { - tsm1.NewValue(1, 1.1), - }, - }) - require.NoError(t, err) - - require.NoError(t, w.Close()) - - // Re-open the WAL - w = NewWAL(dir, 0, 0) - defer w.Close() - require.NoError(t, w.Open()) - - files, err = w.ClosedSegments() - require.NoError(t, err) - require.Equal(t, len(files), 0) -} - -func TestWAL_Delete(t *testing.T) { - dir := t.TempDir() - - w := NewWAL(dir, 0, 0) - require.NoError(t, w.Open()) - - files, err := w.ClosedSegments() - - require.NoError(t, err) - - require.Equal(t, len(files), 0) - - _, err = w.Delete(context.Background(), [][]byte{[]byte("cpu")}) - require.NoError(t, err) - - require.NoError(t, w.Close()) - - // Re-open the WAL - w = NewWAL(dir, 0, 0) - defer w.Close() - require.NoError(t, w.Open()) - - files, err = w.ClosedSegments() - require.NoError(t, err) - require.Equal(t, len(files), 0) -} - -func TestWALWriter_Corrupt(t *testing.T) { - dir := t.TempDir() - f := MustTempFile(t, dir) - w := tsm1.NewWALSegmentWriter(f) - corruption := []byte{1, 4, 0, 0, 0} - - p1 := tsm1.NewValue(1, 1.1) - values := map[string][]tsm1.Value{ - "cpu,host=A#!~#float": {p1}, - } - - entry := &tsm1.WriteWALEntry{ - Values: values, - } - - require.NoError(t, w.Write(mustMarshalEntry(entry))) - require.NoError(t, w.Flush()) - - // Write some random bytes to the file to simulate corruption. - _, err := f.Write(corruption) - require.NoError(t, err) - - // Create the WAL segment reader. - _, err = f.Seek(0, io.SeekStart) - require.NoError(t, err) - - r := tsm1.NewWALSegmentReader(f) - - // Try to decode two entries. - require.True(t, r.Next()) - - _, err = r.Read() - require.NoError(t, err) - - require.True(t, r.Next()) - - _, err = r.Read() - require.Error(t, err) - - // Count should only return size of valid data. - expCount := mustReadFileSize(f) - int64(len(corruption)) - require.Equal(t, expCount, r.Count()) -} - -// Reproduces a `panic: runtime error: makeslice: cap out of range` when run with -// GOARCH=386 go test -run TestWALSegmentReader_Corrupt -v ./tsdb/engine/tsm1/ -func TestWALSegmentReader_Corrupt(t *testing.T) { - dir := t.TempDir() - f := MustTempFile(t, dir) - w := tsm1.NewWALSegmentWriter(f) - - p4 := tsm1.NewValue(1, "string") - - values := map[string][]tsm1.Value{ - "cpu,host=A#!~#string": {p4, p4}, - } - - entry := &tsm1.WriteWALEntry{ - Values: values, - } - - typ, b := mustMarshalEntry(entry) - - // This causes the nvals field to overflow on 32 bit systems which produces a - // negative count and a panic when reading the segment. - b[25] = 255 - - require.NoError(t, w.Write(typ, b)) - require.NoError(t, w.Flush()) - - // Create the WAL segment reader. - _, err := f.Seek(0, io.SeekStart) - require.NoError(t, err) - - r := tsm1.NewWALSegmentReader(f) - defer r.Close() - - // Try to decode two entries. - for r.Next() { - r.Read() - } -} - -func TestWALRollSegment(t *testing.T) { - dir := t.TempDir() - - w := NewWAL(dir, 0, 0) - require.NoError(t, w.Open()) - const segSize = 1024 - w.SegmentSize = segSize - - values := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {tsm1.NewValue(1, 1.0)}, - "cpu,host=B#!~#value": {tsm1.NewValue(1, 1.0)}, - "cpu,host=C#!~#value": {tsm1.NewValue(1, 1.0)}, - } - _, err := w.WriteMulti(context.Background(), values) - require.NoError(t, err) - - files, err := os.ReadDir(w.Path()) - require.NoError(t, err) - require.Equal(t, 1, len(files)) - - file, err := files[0].Info() - require.NoError(t, err) - encodeSize := file.Size() - - for i := 0; i < 100; i++ { - _, err := w.WriteMulti(context.Background(), values) - require.NoError(t, err) - } - files, err = os.ReadDir(w.Path()) - require.NoError(t, err) - for _, f := range files { - file, err := f.Info() - require.NoError(t, err) - require.True(t, file.Size() <= int64(segSize)+encodeSize) - } - require.NoError(t, w.Close()) -} - -func TestWAL_DiskSize(t *testing.T) { - test := func(w *tsm1.WAL, oldZero, curZero bool) { - // get disk size by reading file - files, err := os.ReadDir(w.Path()) - require.NoError(t, err) - - sort.Slice(files, func(i, j int) bool { - return files[i].Name() < files[j].Name() - }) - - var old, cur int64 - if len(files) > 0 { - file, err := files[len(files)-1].Info() - require.NoError(t, err) - cur = file.Size() - for i := 0; i < len(files)-1; i++ { - file, err := files[i].Info() - require.NoError(t, err) - old += file.Size() - } - } - - // test zero size condition - require.False(t, oldZero && old > 0) - require.False(t, !oldZero && old == 0) - require.False(t, curZero && cur > 0) - require.False(t, !curZero && cur == 0) - - // test method DiskSizeBytes - require.Equal(t, old+cur, w.DiskSizeBytes(), "total disk size") - } - - dir := t.TempDir() - - w := NewWAL(dir, 0, 0) - - const segSize = 1024 - w.SegmentSize = segSize - - // open - require.NoError(t, w.Open()) - - test(w, true, true) - - // write some values, the total size of these values does not exceed segSize(1024), - // so rollSegment will not be triggered - values := map[string][]tsm1.Value{ - "cpu,host=A#!~#value": {tsm1.NewValue(1, 1.0)}, - "cpu,host=B#!~#value": {tsm1.NewValue(1, 1.0)}, - "cpu,host=C#!~#value": {tsm1.NewValue(1, 1.0)}, - } - - _, err := w.WriteMulti(context.Background(), values) - require.NoError(t, err) - - test(w, true, false) - - // write some values, the total size of these values exceeds segSize(1024), - // so rollSegment will be triggered - for i := 0; i < 100; i++ { - _, err := w.WriteMulti(context.Background(), values) - require.NoError(t, err) - } - - test(w, false, false) - - // reopen - require.NoError(t, w.Close()) - require.NoError(t, w.Open()) - - test(w, false, false) - - // remove - closedSegments, err := w.ClosedSegments() - require.NoError(t, err) - require.NoError(t, w.Remove(closedSegments)) - - test(w, true, false) - - require.NoError(t, w.Close()) -} - -func TestWriteWALSegment_UnmarshalBinary_WriteWALCorrupt(t *testing.T) { - p1 := tsm1.NewValue(1, 1.1) - p2 := tsm1.NewValue(1, int64(1)) - p3 := tsm1.NewValue(1, true) - p4 := tsm1.NewValue(1, "string") - p5 := tsm1.NewValue(1, uint64(1)) - - values := map[string][]tsm1.Value{ - "cpu,host=A#!~#float": {p1, p1}, - "cpu,host=A#!~#int": {p2, p2}, - "cpu,host=A#!~#bool": {p3, p3}, - "cpu,host=A#!~#string": {p4, p4}, - "cpu,host=A#!~#unsigned": {p5, p5}, - } - - w := &tsm1.WriteWALEntry{ - Values: values, - } - - b, err := w.MarshalBinary() - require.NoError(t, err) - - // Test every possible truncation of a write WAL entry - for i := 0; i < len(b); i++ { - // re-allocated to ensure capacity would be exceed if slicing - truncated := make([]byte, i) - copy(truncated, b[:i]) - err := w.UnmarshalBinary(truncated) - require.True(t, err == nil || err == tsm1.ErrWALCorrupt) - } -} - -func TestDeleteWALEntry_UnmarshalBinary(t *testing.T) { - examples := []struct { - In []string - Out [][]byte - }{ - { - In: []string{""}, - Out: nil, - }, - { - In: []string{"foo"}, - Out: [][]byte{[]byte("foo")}, - }, - { - In: []string{"foo", "bar"}, - Out: [][]byte{[]byte("foo"), []byte("bar")}, - }, - { - In: []string{"foo", "bar", "z", "abc"}, - Out: [][]byte{[]byte("foo"), []byte("bar"), []byte("z"), []byte("abc")}, - }, - { - In: []string{"foo", "bar", "z", "a"}, - Out: [][]byte{[]byte("foo"), []byte("bar"), []byte("z"), []byte("a")}, - }, - } - - for _, example := range examples { - w := &tsm1.DeleteWALEntry{Keys: slices.StringsToBytes(example.In...)} - b, err := w.MarshalBinary() - require.NoError(t, err) - - out := &tsm1.DeleteWALEntry{} - require.NoError(t, out.UnmarshalBinary(b)) - - require.True(t, reflect.DeepEqual(example.Out, out.Keys)) - } -} - -func TestWriteWALSegment_UnmarshalBinary_DeleteWALCorrupt(t *testing.T) { - w := &tsm1.DeleteWALEntry{ - Keys: [][]byte{[]byte("foo"), []byte("bar")}, - } - - b, err := w.MarshalBinary() - require.NoError(t, err) - - // Test every possible truncation of a write WAL entry - for i := 0; i < len(b); i++ { - // re-allocated to ensure capacity would be exceed if slicing - truncated := make([]byte, i) - copy(truncated, b[:i]) - err := w.UnmarshalBinary(truncated) - require.True(t, err == nil || err == tsm1.ErrWALCorrupt) - } -} - -func TestWriteWALSegment_UnmarshalBinary_DeleteRangeWALCorrupt(t *testing.T) { - w := &tsm1.DeleteRangeWALEntry{ - Keys: [][]byte{[]byte("foo"), []byte("bar")}, - Min: 1, - Max: 2, - } - - b, err := w.MarshalBinary() - require.NoError(t, err) - - // Test every possible truncation of a write WAL entry - for i := 0; i < len(b); i++ { - // re-allocated to ensure capacity would be exceed if slicing - truncated := make([]byte, i) - copy(truncated, b[:i]) - err := w.UnmarshalBinary(truncated) - require.True(t, err == nil || err == tsm1.ErrWALCorrupt) - } -} - -func BenchmarkWAL_WriteMulti_Concurrency(b *testing.B) { - benchmarks := []struct { - concurrency int - }{ - {1}, - {12}, - {24}, - {50}, - {100}, - {200}, - {300}, - {400}, - {500}, - } - - for _, bm := range benchmarks { - b.Run(fmt.Sprintf("concurrency-%d", bm.concurrency), func(b *testing.B) { - points := map[string][]tsm1.Value{} - for i := 0; i < 5000; i++ { - k := "cpu,host=A#!~#value" - points[k] = append(points[k], tsm1.NewValue(int64(i), 1.1)) - } - - dir := b.TempDir() - - w := NewWAL(dir, 0, 0) - defer w.Close() - require.NoError(b, w.Open()) - - start := make(chan struct{}) - stop := make(chan struct{}) - - succeed := make(chan struct{}, 1000) - defer close(succeed) - - wg := &sync.WaitGroup{} - for i := 0; i < bm.concurrency; i++ { - wg.Add(1) - go func() { - defer wg.Done() - - <-start - - for { - select { - case <-stop: - return - default: - _, err := w.WriteMulti(context.Background(), points) - require.NoError(b, err) - - succeed <- struct{}{} - } - } - }() - } - - b.ResetTimer() - - close(start) - - for i := 0; i < b.N; i++ { - <-succeed - } - - b.StopTimer() - - close(stop) - wg.Wait() - }) - } -} - -func BenchmarkWALSegmentWriter(b *testing.B) { - points := map[string][]tsm1.Value{} - for i := 0; i < 5000; i++ { - k := "cpu,host=A#!~#value" - points[k] = append(points[k], tsm1.NewValue(int64(i), 1.1)) - } - - dir := b.TempDir() - - f := MustTempFile(b, dir) - w := tsm1.NewWALSegmentWriter(f) - - write := &tsm1.WriteWALEntry{ - Values: points, - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - require.NoError(b, w.Write(mustMarshalEntry(write))) - } -} - -func BenchmarkWALSegmentReader(b *testing.B) { - points := map[string][]tsm1.Value{} - for i := 0; i < 5000; i++ { - k := "cpu,host=A#!~#value" - points[k] = append(points[k], tsm1.NewValue(int64(i), 1.1)) - } - - dir := b.TempDir() - - f := MustTempFile(b, dir) - w := tsm1.NewWALSegmentWriter(f) - - write := &tsm1.WriteWALEntry{ - Values: points, - } - - for i := 0; i < 100; i++ { - require.NoError(b, w.Write(mustMarshalEntry(write))) - } - - r := tsm1.NewWALSegmentReader(f) - b.ResetTimer() - - for i := 0; i < b.N; i++ { - b.StopTimer() - f.Seek(0, io.SeekStart) - b.StartTimer() - - for r.Next() { - _, err := r.Read() - require.NoError(b, err) - } - } -} - -func mustSegmentReader(t *testing.T, w *tsm1.WAL) (*os.File, *tsm1.WALSegmentReader) { - files, err := filepath.Glob(filepath.Join(w.Path(), - fmt.Sprintf("%s*.%s", tsm1.WALFilePrefix, tsm1.WALFileExtension))) - require.NoError(t, err) - require.Equal(t, 1, len(files)) - - sort.Strings(files) - - f, err := os.OpenFile(files[0], os.O_CREATE|os.O_RDWR, 0666) - require.NoError(t, err) - return f, tsm1.NewWALSegmentReader(f) -} - -// mustReadFileSize returns the size of the file, or panics. -func mustReadFileSize(f *os.File) int64 { - stat, err := os.Stat(f.Name()) - if err != nil { - panic(fmt.Sprintf("failed to get size of file at %s: %s", f.Name(), err.Error())) - } - return stat.Size() -} - -func mustMarshalEntry(entry tsm1.WALEntry) (tsm1.WalEntryType, []byte) { - bytes := make([]byte, 1024<<2) - - b, err := entry.Encode(bytes) - if err != nil { - panic(fmt.Sprintf("error encoding: %v", err)) - } - - return entry.Type(), snappy.Encode(b, b) -} diff --git a/tsdb/engine/tsm1/writer.go b/tsdb/engine/tsm1/writer.go deleted file mode 100644 index 1611e444a19..00000000000 --- a/tsdb/engine/tsm1/writer.go +++ /dev/null @@ -1,824 +0,0 @@ -package tsm1 - -/* -A TSM file is composed for four sections: header, blocks, index and the footer. - -┌────────┬────────────────────────────────────┬─────────────┬──────────────┐ -│ Header │ Blocks │ Index │ Footer │ -│5 bytes │ N bytes │ N bytes │ 4 bytes │ -└────────┴────────────────────────────────────┴─────────────┴──────────────┘ - -Header is composed of a magic number to identify the file type and a version -number. - -┌───────────────────┐ -│ Header │ -├─────────┬─────────┤ -│ Magic │ Version │ -│ 4 bytes │ 1 byte │ -└─────────┴─────────┘ - -Blocks are sequences of pairs of CRC32 and data. The block data is opaque to the -file. The CRC32 is used for block level error detection. The length of the blocks -is stored in the index. - -┌───────────────────────────────────────────────────────────┐ -│ Blocks │ -├───────────────────┬───────────────────┬───────────────────┤ -│ Block 1 │ Block 2 │ Block N │ -├─────────┬─────────┼─────────┬─────────┼─────────┬─────────┤ -│ CRC │ Data │ CRC │ Data │ CRC │ Data │ -│ 4 bytes │ N bytes │ 4 bytes │ N bytes │ 4 bytes │ N bytes │ -└─────────┴─────────┴─────────┴─────────┴─────────┴─────────┘ - -Following the blocks is the index for the blocks in the file. The index is -composed of a sequence of index entries ordered lexicographically by key and -then by time. Each index entry starts with a key length and key followed by a -count of the number of blocks in the file. Each block entry is composed of -the min and max time for the block, the offset into the file where the block -is located and the size of the block. - -The index structure can provide efficient access to all blocks as well as the -ability to determine the cost associated with accessing a given key. Given a key -and timestamp, we can determine whether a file contains the block for that -timestamp as well as where that block resides and how much data to read to -retrieve the block. If we know we need to read all or multiple blocks in a -file, we can use the size to determine how much to read in a given IO. - -┌────────────────────────────────────────────────────────────────────────────┐ -│ Index │ -├─────────┬─────────┬──────┬───────┬─────────┬─────────┬────────┬────────┬───┤ -│ Key Len │ Key │ Type │ Count │Min Time │Max Time │ Offset │ Size │...│ -│ 2 bytes │ N bytes │1 byte│2 bytes│ 8 bytes │ 8 bytes │8 bytes │4 bytes │ │ -└─────────┴─────────┴──────┴───────┴─────────┴─────────┴────────┴────────┴───┘ - -The last section is the footer that stores the offset of the start of the index. - -┌─────────┐ -│ Footer │ -├─────────┤ -│Index Ofs│ -│ 8 bytes │ -└─────────┘ -*/ - -import ( - "bufio" - "bytes" - "encoding/binary" - "fmt" - "hash/crc32" - "io" - "os" - "sort" - "strings" - "time" -) - -const ( - // MagicNumber is written as the first 4 bytes of a data file to - // identify the file as a tsm1 formatted file - MagicNumber uint32 = 0x16D116D1 - - // Version indicates the version of the TSM file format. - Version byte = 1 - - // Size in bytes of an index entry - indexEntrySize = 28 - - // Size in bytes used to store the count of index entries for a key - indexCountSize = 2 - - // Size in bytes used to store the type of block encoded - indexTypeSize = 1 - - // Max number of blocks for a given key that can exist in a single file - maxIndexEntries = (1 << (indexCountSize * 8)) - 1 - - // max length of a key in an index entry (measurement + tags) - maxKeyLength = (1 << (2 * 8)) - 1 - - // The threshold amount data written before we periodically fsync a TSM file. This helps avoid - // long pauses due to very large fsyncs at the end of writing a TSM file. - fsyncEvery = 25 * 1024 * 1024 -) - -var ( - //ErrNoValues is returned when TSMWriter.WriteIndex is called and there are no values to write. - ErrNoValues = fmt.Errorf("no values written") - - // ErrTSMClosed is returned when performing an operation against a closed TSM file. - ErrTSMClosed = fmt.Errorf("tsm file closed") - - // ErrMaxKeyLengthExceeded is returned when attempting to write a key that is too long. - ErrMaxKeyLengthExceeded = fmt.Errorf("max key length exceeded") - - // ErrMaxBlocksExceeded is returned when attempting to write a block past the allowed number. - ErrMaxBlocksExceeded = fmt.Errorf("max blocks exceeded") -) - -// TSMWriter writes TSM formatted key and values. -type TSMWriter interface { - // Write writes a new block for key containing and values. Writes append - // blocks in the order that the Write function is called. The caller is - // responsible for ensuring keys and blocks are sorted appropriately. - // Values are encoded as a full block. The caller is responsible for - // ensuring a fixed number of values are encoded in each block as well as - // ensuring the Values are sorted. The first and last timestamp values are - // used as the minimum and maximum values for the index entry. - Write(key []byte, values Values) error - - // WriteBlock writes a new block for key containing the bytes in block. WriteBlock appends - // blocks in the order that the WriteBlock function is called. The caller is - // responsible for ensuring keys and blocks are sorted appropriately, and that the - // block and index information is correct for the block. The minTime and maxTime - // timestamp values are used as the minimum and maximum values for the index entry. - WriteBlock(key []byte, minTime, maxTime int64, block []byte) error - - // WriteIndex finishes the TSM write streams and writes the index. - WriteIndex() error - - // Flushes flushes all pending changes to the underlying file resources. - Flush() error - - // Close closes any underlying file resources. - Close() error - - // Size returns the current size in bytes of the file. - Size() uint32 - - Remove() error -} - -// IndexWriter writes a TSMIndex. -type IndexWriter interface { - // Add records a new block entry for a key in the index. - Add(key []byte, blockType byte, minTime, maxTime int64, offset int64, size uint32) - - // Entries returns all index entries for a key. - Entries(key []byte) []IndexEntry - - // KeyCount returns the count of unique keys in the index. - KeyCount() int - - // Size returns the size of a the current index in bytes. - Size() uint32 - - // MarshalBinary returns a byte slice encoded version of the index. - MarshalBinary() ([]byte, error) - - // WriteTo writes the index contents to a writer. - WriteTo(w io.Writer) (int64, error) - - Close() error - - Remove() error -} - -// IndexEntry is the index information for a given block in a TSM file. -type IndexEntry struct { - // The min and max time of all points stored in the block. - MinTime, MaxTime int64 - - // The absolute position in the file where this block is located. - Offset int64 - - // The size in bytes of the block in the file. - Size uint32 -} - -// UnmarshalBinary decodes an IndexEntry from a byte slice. -func (e *IndexEntry) UnmarshalBinary(b []byte) error { - if len(b) < indexEntrySize { - return fmt.Errorf("unmarshalBinary: short buf: %v < %v", len(b), indexEntrySize) - } - e.MinTime = int64(binary.BigEndian.Uint64(b[:8])) - e.MaxTime = int64(binary.BigEndian.Uint64(b[8:16])) - e.Offset = int64(binary.BigEndian.Uint64(b[16:24])) - e.Size = binary.BigEndian.Uint32(b[24:28]) - return nil -} - -// AppendTo writes a binary-encoded version of IndexEntry to b, allocating -// and returning a new slice, if necessary. -func (e *IndexEntry) AppendTo(b []byte) []byte { - if len(b) < indexEntrySize { - if cap(b) < indexEntrySize { - b = make([]byte, indexEntrySize) - } else { - b = b[:indexEntrySize] - } - } - - binary.BigEndian.PutUint64(b[:8], uint64(e.MinTime)) - binary.BigEndian.PutUint64(b[8:16], uint64(e.MaxTime)) - binary.BigEndian.PutUint64(b[16:24], uint64(e.Offset)) - binary.BigEndian.PutUint32(b[24:28], uint32(e.Size)) - - return b -} - -// Contains returns true if this IndexEntry may contain values for the given time. -// The min and max times are inclusive. -func (e *IndexEntry) Contains(t int64) bool { - return e.MinTime <= t && e.MaxTime >= t -} - -// OverlapsTimeRange returns true if the given time ranges are completely within the entry's time bounds. -func (e *IndexEntry) OverlapsTimeRange(min, max int64) bool { - return e.MinTime <= max && e.MaxTime >= min -} - -// String returns a string representation of the entry. -func (e *IndexEntry) String() string { - return fmt.Sprintf("min=%s max=%s ofs=%d siz=%d", - time.Unix(0, e.MinTime).UTC(), time.Unix(0, e.MaxTime).UTC(), e.Offset, e.Size) -} - -// NewIndexWriter returns a new IndexWriter. -func NewIndexWriter() IndexWriter { - buf := bytes.NewBuffer(make([]byte, 0, 1024*1024)) - return &directIndex{buf: buf, w: bufio.NewWriter(buf)} -} - -// NewIndexWriter returns a new IndexWriter. -func NewDiskIndexWriter(f *os.File) IndexWriter { - return &directIndex{fd: f, w: bufio.NewWriterSize(f, 1024*1024)} -} - -type syncer interface { - Name() string - Sync() error -} - -// directIndex is a simple in-memory index implementation for a TSM file. The full index -// must fit in memory. -type directIndex struct { - keyCount int - size uint32 - - // The bytes written count of when we last fsync'd - lastSync uint32 - fd *os.File - buf *bytes.Buffer - - f syncer - - w *bufio.Writer - - key []byte - indexEntries *indexEntries -} - -func (d *directIndex) Add(key []byte, blockType byte, minTime, maxTime int64, offset int64, size uint32) { - // Is this the first block being added? - if len(d.key) == 0 { - // size of the key stored in the index - d.size += uint32(2 + len(key)) - // size of the count of entries stored in the index - d.size += indexCountSize - - d.key = key - if d.indexEntries == nil { - d.indexEntries = &indexEntries{} - } - d.indexEntries.Type = blockType - d.indexEntries.entries = append(d.indexEntries.entries, IndexEntry{ - MinTime: minTime, - MaxTime: maxTime, - Offset: offset, - Size: size, - }) - - // size of the encoded index entry - d.size += indexEntrySize - d.keyCount++ - return - } - - // See if were still adding to the same series key. - cmp := bytes.Compare(d.key, key) - if cmp == 0 { - // The last block is still this key - d.indexEntries.entries = append(d.indexEntries.entries, IndexEntry{ - MinTime: minTime, - MaxTime: maxTime, - Offset: offset, - Size: size, - }) - - // size of the encoded index entry - d.size += indexEntrySize - - } else if cmp < 0 { - d.flush(d.w) - // We have a new key that is greater than the last one so we need to add - // a new index block section. - - // size of the key stored in the index - d.size += uint32(2 + len(key)) - // size of the count of entries stored in the index - d.size += indexCountSize - - d.key = key - d.indexEntries.Type = blockType - d.indexEntries.entries = append(d.indexEntries.entries, IndexEntry{ - MinTime: minTime, - MaxTime: maxTime, - Offset: offset, - Size: size, - }) - - // size of the encoded index entry - d.size += indexEntrySize - d.keyCount++ - } else { - // Keys can't be added out of order. - panic(fmt.Sprintf("keys must be added in sorted order: %s < %s", string(key), string(d.key))) - } -} - -func (d *directIndex) entries(key []byte) []IndexEntry { - if len(d.key) == 0 { - return nil - } - - if bytes.Equal(d.key, key) { - return d.indexEntries.entries - } - - return nil -} - -func (d *directIndex) Entries(key []byte) []IndexEntry { - return d.entries(key) -} - -func (d *directIndex) Entry(key []byte, t int64) *IndexEntry { - entries := d.entries(key) - for _, entry := range entries { - if entry.Contains(t) { - return &entry - } - } - return nil -} - -func (d *directIndex) KeyCount() int { - return d.keyCount -} - -// copyBuffer is the actual implementation of Copy and CopyBuffer. -// if buf is nil, one is allocated. This is copied from the Go stdlib -// in order to remove the fast path WriteTo calls which circumvent any -// IO throttling as well as to add periodic fsyncs to avoid long stalls. -func copyBuffer(f syncer, dst io.Writer, src io.Reader, buf []byte) (written int64, err error) { - if buf == nil { - buf = make([]byte, 32*1024) - } - var lastSync int64 - for { - nr, er := src.Read(buf) - if nr > 0 { - nw, ew := dst.Write(buf[0:nr]) - if nw > 0 { - written += int64(nw) - } - - if written-lastSync > fsyncEvery { - if err := f.Sync(); err != nil { - return 0, err - } - lastSync = written - } - if ew != nil { - err = ew - break - } - if nr != nw { - err = io.ErrShortWrite - break - } - } - if er != nil { - if er != io.EOF { - err = er - } - break - } - } - return written, err -} - -func (d *directIndex) WriteTo(w io.Writer) (int64, error) { - if _, err := d.flush(d.w); err != nil { - return 0, err - } - - if err := d.w.Flush(); err != nil { - return 0, err - } - - if d.fd == nil { - return copyBuffer(d.f, w, d.buf, nil) - } - - if _, err := d.fd.Seek(0, io.SeekStart); err != nil { - return 0, err - } - - return io.Copy(w, bufio.NewReaderSize(d.fd, 1024*1024)) -} - -func (d *directIndex) flush(w io.Writer) (int64, error) { - var ( - n int - err error - buf [5]byte - N int64 - ) - - if len(d.key) == 0 { - return 0, nil - } - // For each key, individual entries are sorted by time - key := d.key - entries := d.indexEntries - - if entries.Len() > maxIndexEntries { - return N, fmt.Errorf("key '%s' exceeds max index entries: %d > %d", key, entries.Len(), maxIndexEntries) - } - - if !sort.IsSorted(entries) { - sort.Sort(entries) - } - - binary.BigEndian.PutUint16(buf[0:2], uint16(len(key))) - buf[2] = entries.Type - binary.BigEndian.PutUint16(buf[3:5], uint16(entries.Len())) - - // Append the key length and key - if n, err = w.Write(buf[0:2]); err != nil { - return int64(n) + N, fmt.Errorf("write: writer key length error: %v", err) - } - N += int64(n) - - if n, err = w.Write(key); err != nil { - return int64(n) + N, fmt.Errorf("write: writer key error: %v", err) - } - N += int64(n) - - // Append the block type and count - if n, err = w.Write(buf[2:5]); err != nil { - return int64(n) + N, fmt.Errorf("write: writer block type and count error: %v", err) - } - N += int64(n) - - // Append each index entry for all blocks for this key - var n64 int64 - if n64, err = entries.WriteTo(w); err != nil { - return n64 + N, fmt.Errorf("write: writer entries error: %v", err) - } - N += n64 - - d.key = nil - d.indexEntries.Type = 0 - d.indexEntries.entries = d.indexEntries.entries[:0] - - // If this is a disk based index and we've written more than the fsync threshold, - // fsync the data to avoid long pauses later on. - if d.fd != nil && d.size-d.lastSync > fsyncEvery { - if err := d.fd.Sync(); err != nil { - return N, err - } - d.lastSync = d.size - } - - return N, nil - -} - -func (d *directIndex) MarshalBinary() ([]byte, error) { - var b bytes.Buffer - if _, err := d.WriteTo(&b); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -func (d *directIndex) Size() uint32 { - return d.size -} - -func (d *directIndex) Close() error { - // Flush anything remaining in the index - if err := d.w.Flush(); err != nil { - return err - } - - if d.fd == nil { - return nil - } - - if err := d.fd.Close(); err != nil { - return err - } - return os.Remove(d.fd.Name()) -} - -// Remove removes the index from any temporary storage -func (d *directIndex) Remove() error { - if d.fd == nil { - return nil - } - - // Close the file handle to prevent leaking. We ignore the error because - // we just want to cleanup and remove the file. - _ = d.fd.Close() - - return os.Remove(d.fd.Name()) -} - -// tsmWriter writes keys and values in the TSM format -type tsmWriter struct { - wrapped io.Writer - w *bufio.Writer - index IndexWriter - n int64 - - // The bytes written count of when we last fsync'd - lastSync int64 -} - -// NewTSMWriter returns a new TSMWriter writing to w. -func NewTSMWriter(w io.Writer) (TSMWriter, error) { - index := NewIndexWriter() - return &tsmWriter{wrapped: w, w: bufio.NewWriterSize(w, 1024*1024), index: index}, nil -} - -// NewTSMWriterWithDiskBuffer returns a new TSMWriter writing to w and will use a disk -// based buffer for the TSM index if possible. -func NewTSMWriterWithDiskBuffer(w io.Writer) (TSMWriter, error) { - var index IndexWriter - // Make sure is a File so we can write the temp index alongside it. - if fw, ok := w.(syncer); ok { - f, err := os.OpenFile(strings.TrimSuffix(fw.Name(), ".tsm.tmp")+".idx.tmp", os.O_CREATE|os.O_RDWR|os.O_EXCL, 0666) - if err != nil { - return nil, err - } - index = NewDiskIndexWriter(f) - } else { - // w is not a file, just use an inmem index - index = NewIndexWriter() - } - - return &tsmWriter{wrapped: w, w: bufio.NewWriterSize(w, 1024*1024), index: index}, nil -} - -func (t *tsmWriter) writeHeader() error { - var buf [5]byte - binary.BigEndian.PutUint32(buf[0:4], MagicNumber) - buf[4] = Version - - n, err := t.w.Write(buf[:]) - if err != nil { - return err - } - t.n = int64(n) - return nil -} - -// Write writes a new block containing key and values. -func (t *tsmWriter) Write(key []byte, values Values) error { - if len(key) > maxKeyLength { - return ErrMaxKeyLengthExceeded - } - - // Nothing to write - if len(values) == 0 { - return nil - } - - // Write header only after we have some data to write. - if t.n == 0 { - if err := t.writeHeader(); err != nil { - return err - } - } - - block, err := values.Encode(nil) - if err != nil { - return err - } - - blockType, err := BlockType(block) - if err != nil { - return err - } - - var checksum [crc32.Size]byte - binary.BigEndian.PutUint32(checksum[:], crc32.ChecksumIEEE(block)) - - _, err = t.w.Write(checksum[:]) - if err != nil { - return err - } - - n, err := t.w.Write(block) - if err != nil { - return err - } - n += len(checksum) - - // Record this block in index - t.index.Add(key, blockType, values[0].UnixNano(), values[len(values)-1].UnixNano(), t.n, uint32(n)) - - // Increment file position pointer - t.n += int64(n) - - if len(t.index.Entries(key)) >= maxIndexEntries { - return ErrMaxBlocksExceeded - } - - return nil -} - -// WriteBlock writes block for the given key and time range to the TSM file. If the write -// exceeds max entries for a given key, ErrMaxBlocksExceeded is returned. This indicates -// that the index is now full for this key and no future writes to this key will succeed. -func (t *tsmWriter) WriteBlock(key []byte, minTime, maxTime int64, block []byte) error { - if len(key) > maxKeyLength { - return ErrMaxKeyLengthExceeded - } - - // Nothing to write - if len(block) == 0 { - return nil - } - - blockType, err := BlockType(block) - if err != nil { - return err - } - - // Write header only after we have some data to write. - if t.n == 0 { - if err := t.writeHeader(); err != nil { - return err - } - } - - var checksum [crc32.Size]byte - binary.BigEndian.PutUint32(checksum[:], crc32.ChecksumIEEE(block)) - - _, err = t.w.Write(checksum[:]) - if err != nil { - return err - } - - n, err := t.w.Write(block) - if err != nil { - return err - } - n += len(checksum) - - // Record this block in index - t.index.Add(key, blockType, minTime, maxTime, t.n, uint32(n)) - - // Increment file position pointer (checksum + block len) - t.n += int64(n) - - // fsync the file periodically to avoid long pauses with very big files. - if t.n-t.lastSync > fsyncEvery { - if err := t.sync(); err != nil { - return err - } - t.lastSync = t.n - } - - if len(t.index.Entries(key)) >= maxIndexEntries { - return ErrMaxBlocksExceeded - } - - return nil -} - -// WriteIndex writes the index section of the file. If there are no index entries to write, -// this returns ErrNoValues. -func (t *tsmWriter) WriteIndex() error { - indexPos := t.n - - if t.index.KeyCount() == 0 { - return ErrNoValues - } - - // Set the destination file on the index so we can periodically - // fsync while writing the index. - if f, ok := t.wrapped.(syncer); ok { - t.index.(*directIndex).f = f - } - - // Write the index - if _, err := t.index.WriteTo(t.w); err != nil { - return err - } - - var buf [8]byte - binary.BigEndian.PutUint64(buf[:], uint64(indexPos)) - - // Write the index index position - _, err := t.w.Write(buf[:]) - return err -} - -func (t *tsmWriter) Flush() error { - if err := t.w.Flush(); err != nil { - return err - } - - return t.sync() -} - -func (t *tsmWriter) sync() error { - // sync is a minimal interface to make sure we can sync the wrapped - // value. we use a minimal interface to be as robust as possible for - // syncing these files. - type sync interface { - Sync() error - } - - if f, ok := t.wrapped.(sync); ok { - if err := f.Sync(); err != nil { - return err - } - } - return nil -} - -func (t *tsmWriter) Close() error { - if err := t.Flush(); err != nil { - return err - } - - if err := t.index.Close(); err != nil { - return err - } - - if c, ok := t.wrapped.(io.Closer); ok { - return c.Close() - } - return nil -} - -// Remove removes any temporary storage used by the writer. -func (t *tsmWriter) Remove() error { - if err := t.index.Remove(); err != nil { - return err - } - - // nameCloser is the most permissive interface we can close the wrapped - // value with. - type nameCloser interface { - io.Closer - Name() string - } - - if f, ok := t.wrapped.(nameCloser); ok { - // Close the file handle to prevent leaking. We ignore the error because - // we just want to cleanup and remove the file. - _ = f.Close() - - return os.Remove(f.Name()) - } - return nil -} - -func (t *tsmWriter) Size() uint32 { - return uint32(t.n) + t.index.Size() -} - -// verifyVersion verifies that the reader's bytes are a TSM byte -// stream of the correct version (1) -func verifyVersion(r io.ReadSeeker) error { - _, err := r.Seek(0, 0) - if err != nil { - return fmt.Errorf("init: failed to seek: %v", err) - } - var b [4]byte - _, err = io.ReadFull(r, b[:]) - if err != nil { - return fmt.Errorf("init: error reading magic number of file: %v", err) - } - if binary.BigEndian.Uint32(b[:]) != MagicNumber { - return fmt.Errorf("can only read from tsm file") - } - _, err = io.ReadFull(r, b[:1]) - if err != nil { - return fmt.Errorf("init: error reading version: %v", err) - } - if b[0] != Version { - return fmt.Errorf("init: file is version %b. expected %b", b[0], Version) - } - - return nil -} diff --git a/tsdb/engine/tsm1/writer_test.go b/tsdb/engine/tsm1/writer_test.go deleted file mode 100644 index ee816df94ab..00000000000 --- a/tsdb/engine/tsm1/writer_test.go +++ /dev/null @@ -1,614 +0,0 @@ -package tsm1_test - -import ( - "bytes" - "encoding/binary" - "io" - "os" - "testing" - - "github.com/influxdata/influxdb/v2/tsdb/engine/tsm1" -) - -func TestTSMWriter_Write_Empty(t *testing.T) { - var b bytes.Buffer - w, err := tsm1.NewTSMWriter(&b) - if err != nil { - t.Fatalf("unexpected error created writer: %v", err) - } - - if err := w.WriteIndex(); err != tsm1.ErrNoValues { - t.Fatalf("unexpected error closing: %v", err) - } - - if got, exp := len(b.Bytes()), 0; got < exp { - t.Fatalf("file size mismatch: got %v, exp %v", got, exp) - } -} - -func TestTSMWriter_Write_NoValues(t *testing.T) { - var b bytes.Buffer - w, err := tsm1.NewTSMWriter(&b) - if err != nil { - t.Fatalf("unexpected error created writer: %v", err) - } - - if err := w.Write([]byte("foo"), []tsm1.Value{}); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - - if err := w.WriteIndex(); err != tsm1.ErrNoValues { - t.Fatalf("unexpected error closing: %v", err) - } - - if got, exp := len(b.Bytes()), 0; got < exp { - t.Fatalf("file size mismatch: got %v, exp %v", got, exp) - } -} - -func TestTSMWriter_Write_Single(t *testing.T) { - dir := t.TempDir() - f := MustTempFile(t, dir) - - w, err := tsm1.NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - values := []tsm1.Value{tsm1.NewValue(0, 1.0)} - if err := w.Write([]byte("cpu"), values); err != nil { - t.Fatalf("unexpected error writing: %v", err) - - } - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - fd, err := os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - b, err := io.ReadAll(fd) - if err != nil { - t.Fatalf("unexpected error reading: %v", err) - } - - if got, exp := len(b), 5; got < exp { - t.Fatalf("file size mismatch: got %v, exp %v", got, exp) - } - if got := binary.BigEndian.Uint32(b[0:4]); got != tsm1.MagicNumber { - t.Fatalf("magic number mismatch: got %v, exp %v", got, tsm1.MagicNumber) - } - - if _, err := fd.Seek(0, io.SeekStart); err != nil { - t.Fatalf("unexpected error seeking: %v", err) - } - - r, err := tsm1.NewTSMReader(fd) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - defer r.Close() - - readValues, err := r.ReadAll([]byte("cpu")) - if err != nil { - t.Fatalf("unexpected error readin: %v", err) - } - - if len(readValues) != len(values) { - t.Fatalf("read values length mismatch: got %v, exp %v", len(readValues), len(values)) - } - - for i, v := range values { - if v.Value() != readValues[i].Value() { - t.Fatalf("read value mismatch(%d): got %v, exp %d", i, readValues[i].Value(), v.Value()) - } - } -} - -func TestTSMWriter_Write_Multiple(t *testing.T) { - dir := t.TempDir() - f := MustTempFile(t, dir) - - w, err := tsm1.NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - var data = []struct { - key string - values []tsm1.Value - }{ - {"cpu", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - {"mem", []tsm1.Value{tsm1.NewValue(1, 2.0)}}, - } - - for _, d := range data { - if err := w.Write([]byte(d.key), d.values); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - fd, err := os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := tsm1.NewTSMReader(fd) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - defer r.Close() - - for _, d := range data { - readValues, err := r.ReadAll([]byte(d.key)) - if err != nil { - t.Fatalf("unexpected error readin: %v", err) - } - - if exp := len(d.values); exp != len(readValues) { - t.Fatalf("read values length mismatch: got %v, exp %v", len(readValues), exp) - } - - for i, v := range d.values { - if v.Value() != readValues[i].Value() { - t.Fatalf("read value mismatch(%d): got %v, exp %d", i, readValues[i].Value(), v.Value()) - } - } - } -} - -func TestTSMWriter_Write_MultipleKeyValues(t *testing.T) { - dir := t.TempDir() - f := MustTempFile(t, dir) - - w, err := tsm1.NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - var data = []struct { - key string - values []tsm1.Value - }{ - {"cpu", []tsm1.Value{ - tsm1.NewValue(0, 1.0), - tsm1.NewValue(1, 2.0)}, - }, - {"mem", []tsm1.Value{ - tsm1.NewValue(0, 1.5), - tsm1.NewValue(1, 2.5)}, - }, - } - - for _, d := range data { - if err := w.Write([]byte(d.key), d.values); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - fd, err := os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := tsm1.NewTSMReader(fd) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - defer r.Close() - - for _, d := range data { - readValues, err := r.ReadAll([]byte(d.key)) - if err != nil { - t.Fatalf("unexpected error readin: %v", err) - } - - if exp := len(d.values); exp != len(readValues) { - t.Fatalf("read values length mismatch: got %v, exp %v", len(readValues), exp) - } - - for i, v := range d.values { - if v.Value() != readValues[i].Value() { - t.Fatalf("read value mismatch(%d): got %v, exp %d", i, readValues[i].Value(), v.Value()) - } - } - } -} - -// Tests that writing keys in reverse is able to read them back. -func TestTSMWriter_Write_SameKey(t *testing.T) { - dir := t.TempDir() - f := MustTempFile(t, dir) - - w, err := tsm1.NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - var data = []struct { - key string - values []tsm1.Value - }{ - {"cpu", []tsm1.Value{ - tsm1.NewValue(0, 1.0), - tsm1.NewValue(1, 2.0)}, - }, - {"cpu", []tsm1.Value{ - tsm1.NewValue(2, 3.0), - tsm1.NewValue(3, 4.0)}, - }, - } - - for _, d := range data { - if err := w.Write([]byte(d.key), d.values); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - fd, err := os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := tsm1.NewTSMReader(fd) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - defer r.Close() - - values := append(data[0].values, data[1].values...) - - readValues, err := r.ReadAll([]byte("cpu")) - if err != nil { - t.Fatalf("unexpected error readin: %v", err) - } - - if exp := len(values); exp != len(readValues) { - t.Fatalf("read values length mismatch: got %v, exp %v", len(readValues), exp) - } - - for i, v := range values { - if v.Value() != readValues[i].Value() { - t.Fatalf("read value mismatch(%d): got %v, exp %d", i, readValues[i].Value(), v.Value()) - } - } -} - -// Tests that calling Read returns all the values for block matching the key -// and timestamp -func TestTSMWriter_Read_Multiple(t *testing.T) { - dir := t.TempDir() - f := MustTempFile(t, dir) - - w, err := tsm1.NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - var data = []struct { - key string - values []tsm1.Value - }{ - {"cpu", []tsm1.Value{ - tsm1.NewValue(0, 1.0), - tsm1.NewValue(1, 2.0)}, - }, - {"cpu", []tsm1.Value{ - tsm1.NewValue(2, 3.0), - tsm1.NewValue(3, 4.0)}, - }, - } - - for _, d := range data { - if err := w.Write([]byte(d.key), d.values); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - fd, err := os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - - r, err := tsm1.NewTSMReader(fd) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - defer r.Close() - - for _, values := range data { - // Try the first timestamp - readValues, err := r.Read([]byte("cpu"), values.values[0].UnixNano()) - if err != nil { - t.Fatalf("unexpected error readin: %v", err) - } - - if exp := len(values.values); exp != len(readValues) { - t.Fatalf("read values length mismatch: got %v, exp %v", len(readValues), exp) - } - - for i, v := range values.values { - if v.Value() != readValues[i].Value() { - t.Fatalf("read value mismatch(%d): got %v, exp %d", i, readValues[i].Value(), v.Value()) - } - } - - // Try the last timestamp too - readValues, err = r.Read([]byte("cpu"), values.values[1].UnixNano()) - if err != nil { - t.Fatalf("unexpected error readin: %v", err) - } - - if exp := len(values.values); exp != len(readValues) { - t.Fatalf("read values length mismatch: got %v, exp %v", len(readValues), exp) - } - - for i, v := range values.values { - if v.Value() != readValues[i].Value() { - t.Fatalf("read value mismatch(%d): got %v, exp %d", i, readValues[i].Value(), v.Value()) - } - } - } -} - -func TestTSMWriter_WriteBlock_Empty(t *testing.T) { - dir := t.TempDir() - f := MustTempFile(t, dir) - - w, err := tsm1.NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - if err := w.WriteBlock([]byte("cpu"), 0, 0, nil); err != nil { - t.Fatalf("unexpected error writing block: %v", err) - } - - if err := w.WriteIndex(); err != tsm1.ErrNoValues { - t.Fatalf("unexpected error closing: %v", err) - } - - fd, err := os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - defer fd.Close() - - b, err := io.ReadAll(fd) - if err != nil { - t.Fatalf("unexpected error read all: %v", err) - } - - if got, exp := len(b), 0; got < exp { - t.Fatalf("file size mismatch: got %v, exp %v", got, exp) - } -} - -func TestTSMWriter_WriteBlock_Multiple(t *testing.T) { - dir := t.TempDir() - f := MustTempFile(t, dir) - - w, err := tsm1.NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - var data = []struct { - key string - values []tsm1.Value - }{ - {"cpu", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, - {"mem", []tsm1.Value{tsm1.NewValue(1, 2.0)}}, - } - - for _, d := range data { - if err := w.Write([]byte(d.key), d.values); err != nil { - t.Fatalf("unexpected error writing: %v", err) - } - } - - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - fd, err := os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - defer fd.Close() - - b, err := io.ReadAll(fd) - if err != nil { - t.Fatalf("unexpected error read all: %v", err) - } - - if got, exp := len(b), 5; got < exp { - t.Fatalf("file size mismatch: got %v, exp %v", got, exp) - } - if got := binary.BigEndian.Uint32(b[0:4]); got != tsm1.MagicNumber { - t.Fatalf("magic number mismatch: got %v, exp %v", got, tsm1.MagicNumber) - } - - if _, err := fd.Seek(0, io.SeekStart); err != nil { - t.Fatalf("error seeking: %v", err) - } - - // Create reader for that file - r, err := tsm1.NewTSMReader(fd) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - defer r.Close() - - f = MustTempFile(t, dir) - w, err = tsm1.NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - iter := r.BlockIterator() - for iter.Next() { - key, minTime, maxTime, _, _, b, err := iter.Read() - if err != nil { - t.Fatalf("unexpected error reading block: %v", err) - } - if err := w.WriteBlock([]byte(key), minTime, maxTime, b); err != nil { - t.Fatalf("unexpected error writing block: %v", err) - } - } - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - fd, err = os.Open(f.Name()) - if err != nil { - t.Fatalf("unexpected error open file: %v", err) - } - defer fd.Close() - - // Now create a reader to verify the written blocks matches the originally - // written file using Write - r, err = tsm1.NewTSMReader(fd) - if err != nil { - t.Fatalf("unexpected error created reader: %v", err) - } - defer r.Close() - - for _, d := range data { - readValues, err := r.ReadAll([]byte(d.key)) - if err != nil { - t.Fatalf("unexpected error readin: %v", err) - } - - if exp := len(d.values); exp != len(readValues) { - t.Fatalf("read values length mismatch: got %v, exp %v", len(readValues), exp) - } - - for i, v := range d.values { - if v.Value() != readValues[i].Value() { - t.Fatalf("read value mismatch(%d): got %v, exp %d", i, readValues[i].Value(), v.Value()) - } - } - } -} - -func TestTSMWriter_WriteBlock_MaxKey(t *testing.T) { - dir := t.TempDir() - f := MustTempFile(t, dir) - - w, err := tsm1.NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - var key string - for i := 0; i < 100000; i++ { - key += "a" - } - - if err := w.WriteBlock([]byte(key), 0, 0, nil); err != tsm1.ErrMaxKeyLengthExceeded { - t.Fatalf("expected max key length error writing key: %v", err) - } -} - -func TestTSMWriter_Write_MaxKey(t *testing.T) { - dir := t.TempDir() - f := MustTempFile(t, dir) - - w, err := tsm1.NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error created writer: %v", err) - } - - var key string - for i := 0; i < 100000; i++ { - key += "a" - } - if err := w.Write([]byte(key), []tsm1.Value{tsm1.NewValue(0, 1.0)}); err != tsm1.ErrMaxKeyLengthExceeded { - t.Fatalf("expected max key length error writing key: %v", err) - } -} - -type fakeSyncer bool - -func (f *fakeSyncer) Sync() error { - *f = true - return nil -} - -func TestTSMWriter_Sync(t *testing.T) { - f := &struct { - io.Writer - fakeSyncer - }{ - Writer: io.Discard, - } - - w, err := tsm1.NewTSMWriter(f) - if err != nil { - t.Fatalf("unexpected error creating writer: %v", err) - } - - values := []tsm1.Value{tsm1.NewValue(0, 1.0)} - if err := w.Write([]byte("cpu"), values); err != nil { - t.Fatalf("unexpected error writing: %v", err) - - } - if err := w.WriteIndex(); err != nil { - t.Fatalf("unexpected error writing index: %v", err) - } - - if err := w.Close(); err != nil { - t.Fatalf("unexpected error closing: %v", err) - } - - if !f.fakeSyncer { - t.Fatal("failed to sync") - } -} diff --git a/tsdb/enginetags.go b/tsdb/enginetags.go deleted file mode 100644 index d141ee815ba..00000000000 --- a/tsdb/enginetags.go +++ /dev/null @@ -1,29 +0,0 @@ -package tsdb - -import "github.com/prometheus/client_golang/prometheus" - -// EngineTags holds tags for prometheus -// -// It should not be used for behaviour other than attaching tags to prometheus metrics -type EngineTags struct { - Path, WalPath, Id, Bucket, EngineVersion string -} - -func (et *EngineTags) GetLabels() prometheus.Labels { - return prometheus.Labels{ - "path": et.Path, - "walPath": et.WalPath, - "id": et.Id, - "bucket": et.Bucket, - "engine": et.EngineVersion, - } -} - -func EngineLabelNames() []string { - emptyLabels := (&EngineTags{}).GetLabels() - val := make([]string, 0, len(emptyLabels)) - for k := range emptyLabels { - val = append(val, k) - } - return val -} diff --git a/tsdb/epoch_tracker.go b/tsdb/epoch_tracker.go deleted file mode 100644 index 997cf56a524..00000000000 --- a/tsdb/epoch_tracker.go +++ /dev/null @@ -1,147 +0,0 @@ -package tsdb - -import ( - "sync" -) - -// TODO(jeff): using a mutex is easiest, but there may be a way to do -// this with atomics only, and in a way such that writes are minimally -// blocked. - -// epochTracker keeps track of epochs for write and delete operations -// allowing a delete to block until all previous writes have completed. -type epochTracker struct { - mu sync.Mutex - epoch uint64 // current epoch - largest uint64 // largest delete possible - writes int64 // pending writes - // pending deletes waiting on writes - deletes map[uint64]*epochDeleteState -} - -// newEpochTracker constructs an epochTracker. -func newEpochTracker() *epochTracker { - return &epochTracker{ - deletes: make(map[uint64]*epochDeleteState), - } -} - -// epochDeleteState keeps track of the state for a pending delete. -type epochDeleteState struct { - cond *sync.Cond - guard *guard - pending int64 -} - -// done signals that an earlier write has finished. -func (e *epochDeleteState) done() { - e.cond.L.Lock() - e.pending-- - if e.pending == 0 { - e.cond.Broadcast() - } - e.cond.L.Unlock() -} - -// Wait blocks until all earlier writes have finished. -func (e *epochDeleteState) Wait() { - e.cond.L.Lock() - for e.pending > 0 { - e.cond.Wait() - } - e.cond.L.Unlock() -} - -// next bumps the epoch and returns it. -func (e *epochTracker) next() uint64 { - e.epoch++ - return e.epoch -} - -// StartWrite should be called before a write is going to start, and after -// it has checked for guards. -func (e *epochTracker) StartWrite() ([]*guard, uint64) { - e.mu.Lock() - gen := e.next() - e.writes++ - - if len(e.deletes) == 0 { - e.mu.Unlock() - return nil, gen - } - - guards := make([]*guard, 0, len(e.deletes)) - for _, state := range e.deletes { - guards = append(guards, state.guard) - } - - e.mu.Unlock() - return guards, gen -} - -// EndWrite should be called when the write ends for any reason. -func (e *epochTracker) EndWrite(gen uint64) { - e.mu.Lock() - if gen <= e.largest { - // TODO(jeff): at the cost of making waitDelete more - // complicated, we can keep a sorted slice which would - // allow this to exit early rather than go over the - // whole map. - for dgen, state := range e.deletes { - if gen > dgen { - continue - } - state.done() - } - } - e.writes-- - e.mu.Unlock() -} - -// epochWaiter is a type that can be waited on for prior writes to finish. -type epochWaiter struct { - gen uint64 - guard *guard - state *epochDeleteState - tracker *epochTracker -} - -// Wait blocks until all writes prior to the creation of the waiter finish. -func (e epochWaiter) Wait() { - if e.state == nil || e.tracker == nil { - return - } - e.state.Wait() -} - -// Done marks the delete as completed, removing its guard. -func (e epochWaiter) Done() { - e.tracker.mu.Lock() - delete(e.tracker.deletes, e.gen) - e.tracker.mu.Unlock() - e.guard.Done() -} - -// WaitDelete should be called after any delete guards have been installed. -// The returned epochWaiter will not be affected by any future writes. -func (e *epochTracker) WaitDelete(guard *guard) epochWaiter { - e.mu.Lock() - state := &epochDeleteState{ - pending: e.writes, - cond: sync.NewCond(new(sync.Mutex)), - guard: guard, - } - - // record our pending delete - gen := e.next() - e.largest = gen - e.deletes[gen] = state - e.mu.Unlock() - - return epochWaiter{ - gen: gen, - guard: guard, - state: state, - tracker: e, - } -} diff --git a/tsdb/epoch_tracker_test.go b/tsdb/epoch_tracker_test.go deleted file mode 100644 index ddfe9a0b2b7..00000000000 --- a/tsdb/epoch_tracker_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package tsdb - -import ( - "testing" - "time" -) - -func TestEpochTracker(t *testing.T) { - t.Run("Delete waits", func(t *testing.T) { - tr := newEpochTracker() - - // delete should proceed with no pending writes - waiter := tr.WaitDelete(newGuard(0, 0, nil, nil)) - waiter.Wait() - waiter.Done() - - for i := 0; i < 1000; i++ { - // start up some writes - _, w1 := tr.StartWrite() - _, w2 := tr.StartWrite() - _, w3 := tr.StartWrite() - - // wait for a delete. this time based stuff isn't sufficient - // to check every problem, but it can catch some. - waiter := tr.WaitDelete(nil) - done := make(chan time.Time, 1) - go func() { waiter.Wait(); done <- time.Now() }() - - // future writes should not block the waiter - _, w4 := tr.StartWrite() - - // ending the writes allows the waiter to proceed - tr.EndWrite(w1) - tr.EndWrite(w2) - now := time.Now() - tr.EndWrite(w3) - if (<-done).Before(now) { - t.Fatal("Wait ended too soon") - } - tr.EndWrite(w4) - } - }) - - t.Run("Guards tracked", func(t *testing.T) { - checkGuards := func(got []*guard, exp ...*guard) { - t.Helper() - if len(exp) != len(got) { - t.Fatalf("invalid: %p != %p", exp, got) - } - next: - for _, g1 := range got { - for _, g2 := range exp { - if g1 == g2 { - continue next - } - } - t.Fatalf("invalid: %p != %p", exp, got) - } - } - - tr := newEpochTracker() - g1, g2, g3 := newGuard(0, 0, nil, nil), newGuard(0, 0, nil, nil), newGuard(0, 0, nil, nil) - - guards, _ := tr.StartWrite() - checkGuards(guards) - - d1 := tr.WaitDelete(g1) - guards, _ = tr.StartWrite() - checkGuards(guards, g1) - - d2 := tr.WaitDelete(g2) - guards, _ = tr.StartWrite() - checkGuards(guards, g1, g2) - - d3 := tr.WaitDelete(g3) - guards, _ = tr.StartWrite() - checkGuards(guards, g1, g2, g3) - - d2.Done() - guards, _ = tr.StartWrite() - checkGuards(guards, g1, g3) - - d1.Done() - guards, _ = tr.StartWrite() - checkGuards(guards, g3) - - d3.Done() - guards, _ = tr.StartWrite() - checkGuards(guards) - }) -} - -func BenchmarkEpochTracker(b *testing.B) { - b.Run("Writes with deletes", func(b *testing.B) { - b.Run("Serial", func(b *testing.B) { - run := func(b *testing.B, deletes int) { - tr := newEpochTracker() - tr.StartWrite() - for i := 0; i < deletes; i++ { - tr.WaitDelete(nil) - } - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _, gen := tr.StartWrite() - tr.EndWrite(gen) - } - } - - b.Run("0", func(b *testing.B) { run(b, 0) }) - b.Run("1", func(b *testing.B) { run(b, 1) }) - b.Run("10", func(b *testing.B) { run(b, 10) }) - b.Run("100", func(b *testing.B) { run(b, 100) }) - }) - - b.Run("Parallel", func(b *testing.B) { - run := func(b *testing.B, deletes int) { - tr := newEpochTracker() - tr.StartWrite() - for i := 0; i < deletes; i++ { - tr.WaitDelete(nil) - } - b.ReportAllocs() - b.ResetTimer() - - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - _, gen := tr.StartWrite() - tr.EndWrite(gen) - } - }) - } - - b.Run("0", func(b *testing.B) { run(b, 0) }) - b.Run("1", func(b *testing.B) { run(b, 1) }) - b.Run("10", func(b *testing.B) { run(b, 10) }) - b.Run("100", func(b *testing.B) { run(b, 100) }) - }) - }) -} diff --git a/tsdb/field_validator.go b/tsdb/field_validator.go deleted file mode 100644 index 5a978dd8fd6..00000000000 --- a/tsdb/field_validator.go +++ /dev/null @@ -1,83 +0,0 @@ -package tsdb - -import ( - "bytes" - "fmt" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxql" -) - -const MaxFieldValueLength = 1048576 - -// ValidateFields will return a PartialWriteError if: -// - the point has inconsistent fields, or -// - the point has fields that are too long -func ValidateFields(mf *MeasurementFields, point models.Point, skipSizeValidation bool) error { - pointSize := point.StringSize() - iter := point.FieldIterator() - for iter.Next() { - if !skipSizeValidation { - // Check for size of field too large. Note it is much cheaper to check the whole point size - // than checking the StringValue size (StringValue potentially takes an allocation if it must - // unescape the string, and must at least parse the string) - if pointSize > MaxFieldValueLength && iter.Type() == models.String { - if sz := len(iter.StringValue()); sz > MaxFieldValueLength { - return PartialWriteError{ - Reason: fmt.Sprintf( - "input field \"%s\" on measurement \"%s\" is too long, %d > %d", - iter.FieldKey(), point.Name(), sz, MaxFieldValueLength), - Dropped: 1, - } - } - } - } - - // Skip fields name "time", they are illegal. - if bytes.Equal(iter.FieldKey(), timeBytes) { - continue - } - - // If the fields is not present, there cannot be a conflict. - f := mf.FieldBytes(iter.FieldKey()) - if f == nil { - continue - } - - dataType := dataTypeFromModelsFieldType(iter.Type()) - if dataType == influxql.Unknown { - continue - } - - // If the types are not the same, there is a conflict. - if f.Type != dataType { - return PartialWriteError{ - Reason: fmt.Sprintf( - "%s: input field \"%s\" on measurement \"%s\" is type %s, already exists as type %s", - ErrFieldTypeConflict, iter.FieldKey(), point.Name(), dataType, f.Type), - Dropped: 1, - } - } - } - - return nil -} - -// dataTypeFromModelsFieldType returns the influxql.DataType that corresponds to the -// passed in field type. If there is no good match, it returns Unknown. -func dataTypeFromModelsFieldType(fieldType models.FieldType) influxql.DataType { - switch fieldType { - case models.Float: - return influxql.Float - case models.Integer: - return influxql.Integer - case models.Unsigned: - return influxql.Unsigned - case models.Boolean: - return influxql.Boolean - case models.String: - return influxql.String - default: - return influxql.Unknown - } -} diff --git a/tsdb/guard.go b/tsdb/guard.go deleted file mode 100644 index 60603652891..00000000000 --- a/tsdb/guard.go +++ /dev/null @@ -1,253 +0,0 @@ -package tsdb - -import ( - "bytes" - "sync" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxql" -) - -// guard lets one match a set of points and block until they are done. -type guard struct { - cond *sync.Cond - done bool - min int64 - max int64 - names map[string]struct{} - expr *exprGuard -} - -// newGuard constructs a guard that will match any points in the given min and max -// time range, with the given set of measurement names, or the given expression. -// The expression is optional. -func newGuard(min, max int64, names []string, expr influxql.Expr) *guard { - set := make(map[string]struct{}, len(names)) - for _, name := range names { - set[name] = struct{}{} - } - return &guard{ - cond: sync.NewCond(new(sync.Mutex)), - min: min, - max: max, - names: set, - expr: newExprGuard(expr), - } -} - -// Matches returns true if any of the points match the guard. -func (g *guard) Matches(points []models.Point) bool { - if g == nil { - return true - } - - for _, pt := range points { - if t := pt.Time().UnixNano(); t < g.min || t > g.max { - continue - } - if len(g.names) == 0 && g.expr.matches(pt) { - return true - } else if _, ok := g.names[string(pt.Name())]; ok && g.expr.matches(pt) { - return true - } - } - return false -} - -// Wait blocks until the guard has been marked Done. -func (g *guard) Wait() { - g.cond.L.Lock() - for !g.done { - g.cond.Wait() - } - g.cond.L.Unlock() -} - -// Done signals to anyone waiting on the guard that they can proceed. -func (g *guard) Done() { - g.cond.L.Lock() - g.done = true - g.cond.Broadcast() - g.cond.L.Unlock() -} - -// exprGuard is a union of influxql.Expr based guards. a nil exprGuard matches -// everything, while the zero value matches nothing. -type exprGuard struct { - and *[2]*exprGuard - or *[2]*exprGuard - tagMatches *tagGuard - tagExists map[string]struct{} -} - -type tagGuard struct { - meas bool - key []byte - op func([]byte) bool -} - -// empty returns true if the exprGuard is empty, meaning that it matches no points. -func (e *exprGuard) empty() bool { - return e != nil && e.and == nil && e.or == nil && e.tagMatches == nil && e.tagExists == nil -} - -// newExprGuard scrutinizes the expression and returns an efficient guard. -func newExprGuard(expr influxql.Expr) *exprGuard { - if expr == nil { - return nil - } - - switch expr := expr.(type) { - case *influxql.ParenExpr: - return newExprGuard(expr.Expr) - - case *influxql.BooleanLiteral: - if expr.Val { - return nil // matches everything - } - return new(exprGuard) // matches nothing - - case *influxql.BinaryExpr: - switch expr.Op { - case influxql.AND: - lhs, rhs := newExprGuard(expr.LHS), newExprGuard(expr.RHS) - if lhs == nil { // reduce - return rhs - } else if rhs == nil { // reduce - return lhs - } else if lhs.empty() || rhs.empty() { // short circuit - return new(exprGuard) - } else { - return &exprGuard{and: &[2]*exprGuard{lhs, rhs}} - } - - case influxql.OR: - lhs, rhs := newExprGuard(expr.LHS), newExprGuard(expr.RHS) - if lhs.empty() { // reduce - return rhs - } else if rhs.empty() { // reduce - return lhs - } else if lhs == nil || rhs == nil { // short circuit - return nil - } else { - return &exprGuard{or: &[2]*exprGuard{lhs, rhs}} - } - - default: - return newBinaryExprGuard(expr) - } - default: - // if we couldn't analyze, match everything - return nil - } -} - -// newBinaryExprGuard scrutinizes the binary expression and returns an efficient guard. -func newBinaryExprGuard(expr *influxql.BinaryExpr) *exprGuard { - // if it's a nested binary expression, always match. - if _, ok := expr.LHS.(*influxql.BinaryExpr); ok { - return nil - } else if _, ok := expr.RHS.(*influxql.BinaryExpr); ok { - return nil - } - - // ensure one of the expressions is a VarRef, and make that the key. - key, ok := expr.LHS.(*influxql.VarRef) - value := expr.RHS - if !ok { - key, ok = expr.RHS.(*influxql.VarRef) - if !ok { - return nil - } - value = expr.LHS - } - - // check the key for situations we know we can't filter. - if key.Val != "_name" && key.Type != influxql.Unknown && key.Type != influxql.Tag { - return nil - } - - // scrutinize the value to return an efficient guard. - switch value := value.(type) { - case *influxql.StringLiteral: - val := []byte(value.Val) - g := &exprGuard{tagMatches: &tagGuard{ - meas: key.Val == "_name", - key: []byte(key.Val), - }} - - switch expr.Op { - case influxql.EQ: - g.tagMatches.op = func(x []byte) bool { return bytes.Equal(val, x) } - - case influxql.NEQ: - g.tagMatches.op = func(x []byte) bool { return !bytes.Equal(val, x) } - - default: // any other operator isn't valid. conservatively match everything. - return nil - } - - return g - - case *influxql.RegexLiteral: - // There's a tradeoff between being precise and being fast. For example, if the - // delete includes a very expensive regex, we don't want to run that against every - // incoming point. The decision here is to match any point that has a possibly - // expensive match if there is any overlap on the tags. In other words, expensive - // matches get transformed into trivially matching everything. - return &exprGuard{tagExists: map[string]struct{}{key.Val: {}}} - - case *influxql.VarRef: - // We could do a better job here by encoding the two names and checking the points - // against them, but I'm not quite sure how to do that. Be conservative and match - // any points that contain either the key or value. - - // since every point has a measurement, always match if either are on the measurement. - if key.Val == "_name" || value.Val == "_name" { - return nil - } - return &exprGuard{tagExists: map[string]struct{}{ - key.Val: {}, - value.Val: {}, - }} - - default: // any other value type matches everything - return nil - } -} - -// matches checks if the exprGuard matches the point. -func (g *exprGuard) matches(pt models.Point) bool { - switch { - case g == nil: - return true - - case g.and != nil: - return g.and[0].matches(pt) && g.and[1].matches(pt) - - case g.or != nil: - return g.or[0].matches(pt) || g.or[1].matches(pt) - - case g.tagMatches != nil: - if g.tagMatches.meas { - return g.tagMatches.op(pt.Name()) - } - for _, tag := range pt.Tags() { - if bytes.Equal(tag.Key, g.tagMatches.key) && g.tagMatches.op(tag.Value) { - return true - } - } - return false - - case g.tagExists != nil: - for _, tag := range pt.Tags() { - if _, ok := g.tagExists[string(tag.Key)]; ok { - return true - } - } - return false - - default: - return false - } -} diff --git a/tsdb/guard_test.go b/tsdb/guard_test.go deleted file mode 100644 index c13cdb141a5..00000000000 --- a/tsdb/guard_test.go +++ /dev/null @@ -1,314 +0,0 @@ -package tsdb - -import ( - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxql" -) - -func TestGuard(t *testing.T) { - tests := []struct { - min, max int64 - names []string - expr string - point string - matches bool - }{ - { // in time matching - min: 0, max: 1000, - point: "cpu value=1 100", - matches: true, - }, - { // out of time range doesn't match - min: 0, max: 10, - names: []string{"cpu"}, - point: "cpu value=1 100", - matches: false, - }, - { // measurement name matches - min: 0, max: 1000, - names: []string{"cpu"}, - point: "cpu value=1 100", - matches: true, - }, - { // measurement doesn't match - min: 0, max: 1000, - names: []string{"mem"}, - point: "cpu value=1 100", - matches: false, - }, - { // basic expression matching - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "host = 'server1'", - matches: true, - }, - { // basic expression matching - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "host != 'server2'", - matches: true, - }, - { // basic expression mismatch - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "host = 'server2'", - matches: false, - }, - { // basic expression mismatch - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "host != 'server1'", - matches: false, - }, - { // parenthesis unwrap - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "(host = 'server1')", - matches: true, - }, - { // compound expression matching - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "host = 'server2' or host = 'server1'", - matches: true, - }, - { // compound expression mismatch - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "host = 'server1' and host = 'server2'", - matches: false, - }, - { // regex expression matching - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "host =~ /server1/", - matches: true, - }, - { // regex expression mismatch - min: 0, max: 1000, - point: "cpu,foo=server1 value=1 100", - expr: "host =~ /server1/", - matches: false, - }, - { // regex over-approximation - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "host =~ /server2/", - matches: true, - }, - { // regex over-approximation - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "host !~ /server1/", - matches: true, - }, - { // key doesn't have to come first - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "'server1' = host", - matches: true, - }, - { // key doesn't have to come first - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "'server2' = host", - matches: false, - }, - { // conservative on no var refs - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "1 = 2", - matches: true, - }, - { // expr matches measurement - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "_name = 'cpu'", - matches: true, - }, - { // expr mismatches measurement - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "_name = 'mem'", - matches: false, - }, - { // expr conservative on dual var ref - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "host = test", - matches: true, - }, - { // expr conservative on dual var ref mismatches - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "foo = bar", - matches: false, - }, - { // expr conservative on dual var ref involving measurement - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "_name = host", - matches: true, - }, - { // expr conservative on dual var ref involving measurement - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "host = _name", - matches: true, - }, - { // boolean literal matches - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "true", - matches: true, - }, - { // boolean literal mismatches - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "false", - matches: false, - }, - { // reduce and - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "true and host = 'server1'", - matches: true, - }, - { // reduce and - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "host = 'server1' and true", - matches: true, - }, - { // reduce or - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "false or host = 'server1'", - matches: true, - }, - { // reduce or - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "host = 'server1' or false", - matches: true, - }, - { // short circuit and - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "false and host = 'server1'", - matches: false, - }, - { // short circuit and - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "host = 'server1' and false", - matches: false, - }, - { // short circuit or - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "true or host = 'server2'", - matches: true, - }, - { // short circuit or - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "host = 'server2' or true", - matches: true, - }, - { // conservative match weird exprs - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "'wierd'", - matches: true, - }, - { // conservative match weird exprs - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "value::field = '1'", - matches: true, - }, - { // conservative match weird exprs - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "host <= 'aaa'", - matches: true, - }, - { // conservative match weird exprs - min: 0, max: 1000, - point: "cpu,host=server1 value=1 100", - expr: "host = ('server2')", - matches: true, - }, - } - - for i, test := range tests { - var expr influxql.Expr - if test.expr != "" { - var err error - expr, err = influxql.ParseExpr(test.expr) - if err != nil { - t.Fatal(err) - } - } - points, err := models.ParsePointsString(test.point) - if err != nil { - t.Fatal(err) - } - guard := newGuard(test.min, test.max, test.names, expr) - - if guard.Matches(points) != test.matches { - t.Errorf("%d: expected matching %q with time:[%d, %d] measurements:%v expr:%q to be %t", - i, test.point, test.min, test.max, test.names, test.expr, test.matches) - cs := &spew.ConfigState{DisableMethods: true, SpewKeys: true, Indent: " "} - t.Errorf("%d: expr: %s", i, cs.Sdump(expr)) - t.Errorf("%d: guard: %s", i, cs.Sdump(guard.expr)) - } - } -} - -func BenchmarkGuard(b *testing.B) { - tag := func(key, value string) models.Tag { - return models.Tag{Key: []byte(key), Value: []byte(value)} - } - - run := func(b *testing.B, g *guard) { - run := func(b *testing.B, batch int) { - points := make([]models.Point, batch) - for i := range points { - points[i] = models.MustNewPoint("cpu", models.Tags{ - tag("t0", "v0"), tag("t1", "v1"), tag("t2", "v2"), - tag("t3", "v3"), tag("t4", "v4"), tag("t5", "v5"), - tag("t6", "v6"), tag("t7", "v7"), tag("t8", "v8"), - }, models.Fields{"value": 100}, time.Unix(0, 50)) - } - - for i := 0; i < b.N; i++ { - if g.Matches(points) { - b.Fatal("matched") - } - } - } - - b.Run("1", func(b *testing.B) { run(b, 1) }) - b.Run("100", func(b *testing.B) { run(b, 100) }) - b.Run("10000", func(b *testing.B) { run(b, 10000) }) - } - - b.Run("Time Filtered", func(b *testing.B) { - run(b, newGuard(0, 10, nil, nil)) - }) - - b.Run("Measurement Filtered", func(b *testing.B) { - run(b, newGuard(0, 100, []string{"mem"}, nil)) - }) - - b.Run("Tag Filtered", func(b *testing.B) { - expr, _ := influxql.ParseExpr("t4 = 'v5'") - run(b, newGuard(0, 100, []string{"cpu"}, expr)) - }) -} diff --git a/tsdb/index.go b/tsdb/index.go deleted file mode 100644 index 5bca86ff352..00000000000 --- a/tsdb/index.go +++ /dev/null @@ -1,3223 +0,0 @@ -package tsdb - -import ( - "bytes" - "errors" - "fmt" - "os" - "regexp" - "sort" - "sync" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/bytesutil" - "github.com/influxdata/influxdb/v2/pkg/estimator" - "github.com/influxdata/influxdb/v2/pkg/slices" - "github.com/influxdata/influxql" - "go.uber.org/zap" -) - -// Available index types. -const ( - TSI1IndexName = "tsi1" -) - -// ErrIndexClosing can be returned to from an Index method if the index is currently closing. -var ErrIndexClosing = errors.New("index is closing") - -type Index interface { - Open() error - Close() error - WithLogger(*zap.Logger) - - Database() string - MeasurementExists(name []byte) (bool, error) - MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) - DropMeasurement(name []byte) error - ForEachMeasurementName(fn func(name []byte) error) error - - CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error - CreateSeriesListIfNotExists(keys, names [][]byte, tags []models.Tags) error - DropSeries(seriesID uint64, key []byte, cascade bool) error - DropMeasurementIfSeriesNotExist(name []byte) (bool, error) - - MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) - SeriesN() int64 - SeriesSketches() (estimator.Sketch, estimator.Sketch, error) - SeriesIDSet() *SeriesIDSet - - HasTagKey(name, key []byte) (bool, error) - HasTagValue(name, key, value []byte) (bool, error) - - MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) - - TagKeyCardinality(name, key []byte) int - - // InfluxQL system iterators - MeasurementIterator() (MeasurementIterator, error) - TagKeyIterator(name []byte) (TagKeyIterator, error) - TagValueIterator(name, key []byte) (TagValueIterator, error) - MeasurementSeriesIDIterator(name []byte) (SeriesIDIterator, error) - TagKeySeriesIDIterator(name, key []byte) (SeriesIDIterator, error) - TagValueSeriesIDIterator(name, key, value []byte) (SeriesIDIterator, error) - - // Sets a shared fieldset from the engine. - FieldSet() *MeasurementFieldSet - SetFieldSet(fs *MeasurementFieldSet) - - // Size of the index on disk, if applicable. - DiskSizeBytes() int64 - - // Bytes estimates the memory footprint of this Index, in bytes. - Bytes() int - - Type() string - - // Returns a unique reference ID to the index instance. - UniqueReferenceID() uintptr -} - -// SeriesElem represents a generic series element. -type SeriesElem interface { - Name() []byte - Tags() models.Tags - Deleted() bool - - // InfluxQL expression associated with series during filtering. - Expr() influxql.Expr -} - -// SeriesIterator represents a iterator over a list of series. -type SeriesIterator interface { - Close() error - Next() (SeriesElem, error) -} - -// NewSeriesIteratorAdapter returns an adapter for converting series ids to series. -func NewSeriesIteratorAdapter(sfile *SeriesFile, itr SeriesIDIterator) SeriesIterator { - return &seriesIteratorAdapter{ - sfile: sfile, - itr: itr, - } -} - -type seriesIteratorAdapter struct { - sfile *SeriesFile - itr SeriesIDIterator -} - -func (itr *seriesIteratorAdapter) Close() error { return itr.itr.Close() } - -func (itr *seriesIteratorAdapter) Next() (SeriesElem, error) { - for { - elem, err := itr.itr.Next() - if err != nil { - return nil, err - } else if elem.SeriesID == 0 { - return nil, nil - } - - // Skip if this key has been tombstoned. - key := itr.sfile.SeriesKey(elem.SeriesID) - if len(key) == 0 { - continue - } - - name, tags := ParseSeriesKey(key) - deleted := itr.sfile.IsDeleted(elem.SeriesID) - return &seriesElemAdapter{ - name: name, - tags: tags, - deleted: deleted, - expr: elem.Expr, - }, nil - } -} - -type seriesElemAdapter struct { - name []byte - tags models.Tags - deleted bool - expr influxql.Expr -} - -func (e *seriesElemAdapter) Name() []byte { return e.name } -func (e *seriesElemAdapter) Tags() models.Tags { return e.tags } -func (e *seriesElemAdapter) Deleted() bool { return e.deleted } -func (e *seriesElemAdapter) Expr() influxql.Expr { return e.expr } - -var _ SeriesIDIterator = (*PredicateSeriesIDIterator)(nil) - -type PredicateSeriesIDIterator struct { - itr SeriesIDIterator - sfile *SeriesFile - pred influxdb.Predicate -} - -func NewPredicateSeriesIDIterator(itr SeriesIDIterator, sfile *SeriesFile, pred influxdb.Predicate) SeriesIDIterator { - if pred == nil { - return itr - } - return &PredicateSeriesIDIterator{ - itr: itr, - sfile: sfile, - pred: pred, - } -} - -func (itr *PredicateSeriesIDIterator) Close() error { return itr.itr.Close() } - -func (itr *PredicateSeriesIDIterator) Next() (SeriesIDElem, error) { - for { - elem, err := itr.itr.Next() - if elem.SeriesID == 0 || err != nil { - return elem, err - } - - // Skip if this key has been tombstoned. - seriesKey := itr.sfile.SeriesKey(elem.SeriesID) - if len(seriesKey) == 0 { - continue - } - - name, tags := ParseSeriesKey(seriesKey) - tags = append(models.Tags{{Key: models.MeasurementTagKeyBytes, Value: name}}, tags...) - key := models.MakeKey(name, tags) - if !itr.pred.Matches(key) { - continue - } - return elem, nil - } -} - -// SeriesIDElem represents a single series and optional expression. -type SeriesIDElem struct { - SeriesID uint64 - Expr influxql.Expr -} - -// SeriesIDElems represents a list of series id elements. -type SeriesIDElems []SeriesIDElem - -func (a SeriesIDElems) Len() int { return len(a) } -func (a SeriesIDElems) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a SeriesIDElems) Less(i, j int) bool { return a[i].SeriesID < a[j].SeriesID } - -// SeriesIDIterator represents a iterator over a list of series ids. -type SeriesIDIterator interface { - Next() (SeriesIDElem, error) - Close() error -} - -// SeriesKeyIterator represents an iterator over a list of SeriesKeys -type SeriesKeyIterator interface { - Next() ([]byte, error) - Close() error -} - -// SeriesIDSetIterator represents an iterator that can produce a SeriesIDSet. -type SeriesIDSetIterator interface { - SeriesIDIterator - SeriesIDSet() *SeriesIDSet -} - -type seriesIDSetIterator struct { - ss *SeriesIDSet - itr SeriesIDSetIterable -} - -func NewSeriesIDSetIterator(ss *SeriesIDSet) SeriesIDSetIterator { - if ss == nil || ss.bitmap == nil { - return nil - } - return &seriesIDSetIterator{ss: ss, itr: ss.Iterator()} -} - -func (itr *seriesIDSetIterator) Next() (SeriesIDElem, error) { - if !itr.itr.HasNext() { - return SeriesIDElem{}, nil - } - return SeriesIDElem{SeriesID: uint64(itr.itr.Next())}, nil -} - -func (itr *seriesIDSetIterator) Close() error { return nil } - -func (itr *seriesIDSetIterator) SeriesIDSet() *SeriesIDSet { return itr.ss } - -// NewSeriesIDSetIterators returns a slice of SeriesIDSetIterator if all itrs -// can be type casted. Otherwise returns nil. -func NewSeriesIDSetIterators(itrs []SeriesIDIterator) []SeriesIDSetIterator { - if len(itrs) == 0 { - return nil - } - - a := make([]SeriesIDSetIterator, len(itrs)) - for i := range itrs { - if itr, ok := itrs[i].(SeriesIDSetIterator); ok { - a[i] = itr - } else { - return nil - } - } - return a -} - -// ReadAllSeriesIDIterator returns all ids from the iterator. -func ReadAllSeriesIDIterator(itr SeriesIDIterator) ([]uint64, error) { - if itr == nil { - return nil, nil - } - - var a []uint64 - for { - e, err := itr.Next() - if err != nil { - return nil, err - } else if e.SeriesID == 0 { - break - } - a = append(a, e.SeriesID) - } - return a, nil -} - -// NewSeriesIDSliceIterator returns a SeriesIDIterator that iterates over a slice. -func NewSeriesIDSliceIterator(ids []uint64) *SeriesIDSliceIterator { - return &SeriesIDSliceIterator{ids: ids} -} - -// SeriesIDSliceIterator iterates over a slice of series ids. -type SeriesIDSliceIterator struct { - ids []uint64 -} - -// Next returns the next series id in the slice. -func (itr *SeriesIDSliceIterator) Next() (SeriesIDElem, error) { - if len(itr.ids) == 0 { - return SeriesIDElem{}, nil - } - id := itr.ids[0] - itr.ids = itr.ids[1:] - return SeriesIDElem{SeriesID: id}, nil -} - -func (itr *SeriesIDSliceIterator) Close() error { return nil } - -// SeriesIDSet returns a set of all remaining ids. -func (itr *SeriesIDSliceIterator) SeriesIDSet() *SeriesIDSet { - s := NewSeriesIDSet() - for _, id := range itr.ids { - s.AddNoLock(id) - } - return s -} - -type SeriesIDIterators []SeriesIDIterator - -func (a SeriesIDIterators) Close() (err error) { - for i := range a { - if e := a[i].Close(); e != nil && err == nil { - err = e - } - } - return err -} - -func (a SeriesIDIterators) filterNonNil() []SeriesIDIterator { - other := make([]SeriesIDIterator, 0, len(a)) - for _, itr := range a { - if itr == nil { - continue - } - other = append(other, itr) - } - return other -} - -// seriesQueryAdapterIterator adapts SeriesIDIterator to an influxql.Iterator. -type seriesQueryAdapterIterator struct { - once sync.Once - sfile *SeriesFile - itr SeriesIDIterator - fieldset *MeasurementFieldSet - opt query.IteratorOptions - - point query.FloatPoint // reusable point -} - -// NewSeriesQueryAdapterIterator returns a new instance of SeriesQueryAdapterIterator. -func NewSeriesQueryAdapterIterator(sfile *SeriesFile, itr SeriesIDIterator, fieldset *MeasurementFieldSet, opt query.IteratorOptions) query.Iterator { - return &seriesQueryAdapterIterator{ - sfile: sfile, - itr: itr, - fieldset: fieldset, - point: query.FloatPoint{ - Aux: make([]interface{}, len(opt.Aux)), - }, - opt: opt, - } -} - -// Stats returns stats about the points processed. -func (itr *seriesQueryAdapterIterator) Stats() query.IteratorStats { return query.IteratorStats{} } - -// Close closes the iterator. -func (itr *seriesQueryAdapterIterator) Close() error { - itr.once.Do(func() { - itr.itr.Close() - }) - return nil -} - -// Next emits the next point in the iterator. -func (itr *seriesQueryAdapterIterator) Next() (*query.FloatPoint, error) { - for { - // Read next series element. - e, err := itr.itr.Next() - if err != nil { - return nil, err - } else if e.SeriesID == 0 { - return nil, nil - } - - // Skip if key has been tombstoned. - seriesKey := itr.sfile.SeriesKey(e.SeriesID) - if len(seriesKey) == 0 { - continue - } - - // Convert to a key. - name, tags := ParseSeriesKey(seriesKey) - key := string(models.MakeKey(name, tags)) - - // Write auxiliary fields. - for i, f := range itr.opt.Aux { - switch f.Val { - case "key": - itr.point.Aux[i] = key - } - } - return &itr.point, nil - } -} - -// filterUndeletedSeriesIDIterator returns all series which are not deleted. -type filterUndeletedSeriesIDIterator struct { - sfile *SeriesFile - itr SeriesIDIterator -} - -// FilterUndeletedSeriesIDIterator returns an iterator which filters all deleted series. -func FilterUndeletedSeriesIDIterator(sfile *SeriesFile, itr SeriesIDIterator) SeriesIDIterator { - if itr == nil { - return nil - } - return &filterUndeletedSeriesIDIterator{sfile: sfile, itr: itr} -} - -func (itr *filterUndeletedSeriesIDIterator) Close() error { - return itr.itr.Close() -} - -func (itr *filterUndeletedSeriesIDIterator) Next() (SeriesIDElem, error) { - for { - e, err := itr.itr.Next() - if err != nil { - return SeriesIDElem{}, err - } else if e.SeriesID == 0 { - return SeriesIDElem{}, nil - } else if itr.sfile.IsDeleted(e.SeriesID) { - continue - } - return e, nil - } -} - -// seriesIDExprIterator is an iterator that attaches an associated expression. -type seriesIDExprIterator struct { - itr SeriesIDIterator - expr influxql.Expr -} - -// newSeriesIDExprIterator returns a new instance of seriesIDExprIterator. -func newSeriesIDExprIterator(itr SeriesIDIterator, expr influxql.Expr) SeriesIDIterator { - if itr == nil { - return nil - } - - return &seriesIDExprIterator{ - itr: itr, - expr: expr, - } -} - -func (itr *seriesIDExprIterator) Close() error { - return itr.itr.Close() -} - -// Next returns the next element in the iterator. -func (itr *seriesIDExprIterator) Next() (SeriesIDElem, error) { - elem, err := itr.itr.Next() - if err != nil { - return SeriesIDElem{}, err - } else if elem.SeriesID == 0 { - return SeriesIDElem{}, nil - } - elem.Expr = itr.expr - return elem, nil -} - -// MergeSeriesIDIterators returns an iterator that merges a set of iterators. -// Iterators that are first in the list take precedence and a deletion by those -// early iterators will invalidate elements by later iterators. -func MergeSeriesIDIterators(itrs ...SeriesIDIterator) SeriesIDIterator { - if n := len(itrs); n == 0 { - return nil - } else if n == 1 { - return itrs[0] - } - itrs = SeriesIDIterators(itrs).filterNonNil() - - // Merge as series id sets, if available. - if a := NewSeriesIDSetIterators(itrs); a != nil { - sets := make([]*SeriesIDSet, len(a)) - for i := range a { - sets[i] = a[i].SeriesIDSet() - } - - ss := NewSeriesIDSet() - ss.Merge(sets...) - SeriesIDIterators(itrs).Close() - return NewSeriesIDSetIterator(ss) - } - - return &seriesIDMergeIterator{ - buf: make([]SeriesIDElem, len(itrs)), - itrs: itrs, - } -} - -// seriesIDMergeIterator is an iterator that merges multiple iterators together. -type seriesIDMergeIterator struct { - buf []SeriesIDElem - itrs []SeriesIDIterator -} - -func (itr *seriesIDMergeIterator) Close() error { - SeriesIDIterators(itr.itrs).Close() - return nil -} - -// Next returns the element with the next lowest name/tags across the iterators. -func (itr *seriesIDMergeIterator) Next() (SeriesIDElem, error) { - // Find next lowest id amongst the buffers. - var elem SeriesIDElem - for i := range itr.buf { - buf := &itr.buf[i] - - // Fill buffer. - if buf.SeriesID == 0 { - elem, err := itr.itrs[i].Next() - if err != nil { - return SeriesIDElem{}, nil - } else if elem.SeriesID == 0 { - continue - } - itr.buf[i] = elem - } - - if elem.SeriesID == 0 || buf.SeriesID < elem.SeriesID { - elem = *buf - } - } - - // Return EOF if no elements remaining. - if elem.SeriesID == 0 { - return SeriesIDElem{}, nil - } - - // Clear matching buffers. - for i := range itr.buf { - if itr.buf[i].SeriesID == elem.SeriesID { - itr.buf[i].SeriesID = 0 - } - } - return elem, nil -} - -// IntersectSeriesIDIterators returns an iterator that only returns series which -// occur in both iterators. If both series have associated expressions then -// they are combined together. -func IntersectSeriesIDIterators(itr0, itr1 SeriesIDIterator) SeriesIDIterator { - if itr0 == nil || itr1 == nil { - if itr0 != nil { - itr0.Close() - } - if itr1 != nil { - itr1.Close() - } - return nil - } - - // Create series id set, if available. - if a := NewSeriesIDSetIterators([]SeriesIDIterator{itr0, itr1}); a != nil { - ss := a[0].SeriesIDSet().And(a[1].SeriesIDSet()) - // `a` holds references to itr0/itr1 so itr0/itr1 should not be closed when `a` is still in use - itr0.Close() - itr1.Close() - return NewSeriesIDSetIterator(ss) - } - - return &seriesIDIntersectIterator{itrs: [2]SeriesIDIterator{itr0, itr1}} -} - -// seriesIDIntersectIterator is an iterator that merges two iterators together. -type seriesIDIntersectIterator struct { - buf [2]SeriesIDElem - itrs [2]SeriesIDIterator -} - -func (itr *seriesIDIntersectIterator) Close() (err error) { - if e := itr.itrs[0].Close(); e != nil && err == nil { - err = e - } - if e := itr.itrs[1].Close(); e != nil && err == nil { - err = e - } - return err -} - -// Next returns the next element which occurs in both iterators. -func (itr *seriesIDIntersectIterator) Next() (_ SeriesIDElem, err error) { - for { - // Fill buffers. - if itr.buf[0].SeriesID == 0 { - if itr.buf[0], err = itr.itrs[0].Next(); err != nil { - return SeriesIDElem{}, err - } - } - if itr.buf[1].SeriesID == 0 { - if itr.buf[1], err = itr.itrs[1].Next(); err != nil { - return SeriesIDElem{}, err - } - } - - // Exit if either buffer is still empty. - if itr.buf[0].SeriesID == 0 || itr.buf[1].SeriesID == 0 { - return SeriesIDElem{}, nil - } - - // Skip if both series are not equal. - if a, b := itr.buf[0].SeriesID, itr.buf[1].SeriesID; a < b { - itr.buf[0].SeriesID = 0 - continue - } else if a > b { - itr.buf[1].SeriesID = 0 - continue - } - - // Merge series together if equal. - elem := itr.buf[0] - - // Attach expression. - expr0 := itr.buf[0].Expr - expr1 := itr.buf[1].Expr - if expr0 == nil { - elem.Expr = expr1 - } else if expr1 == nil { - elem.Expr = expr0 - } else { - elem.Expr = influxql.Reduce(&influxql.BinaryExpr{ - Op: influxql.AND, - LHS: expr0, - RHS: expr1, - }, nil) - } - - itr.buf[0].SeriesID, itr.buf[1].SeriesID = 0, 0 - return elem, nil - } -} - -// UnionSeriesIDIterators returns an iterator that returns series from both -// both iterators. If both series have associated expressions then they are -// combined together. -func UnionSeriesIDIterators(itr0, itr1 SeriesIDIterator) SeriesIDIterator { - // Return other iterator if either one is nil. - if itr0 == nil { - return itr1 - } else if itr1 == nil { - return itr0 - } - - // Create series id set, if available. - if a := NewSeriesIDSetIterators([]SeriesIDIterator{itr0, itr1}); a != nil { - ss := NewSeriesIDSet() - ss.Merge(a[0].SeriesIDSet(), a[1].SeriesIDSet()) - // `a` holds references to itr0/itr1 so itr0/itr1 should not be closed when `a` is still in use - itr0.Close() - itr1.Close() - return NewSeriesIDSetIterator(ss) - } - - return &seriesIDUnionIterator{itrs: [2]SeriesIDIterator{itr0, itr1}} -} - -// seriesIDUnionIterator is an iterator that unions two iterators together. -type seriesIDUnionIterator struct { - buf [2]SeriesIDElem - itrs [2]SeriesIDIterator -} - -func (itr *seriesIDUnionIterator) Close() (err error) { - if e := itr.itrs[0].Close(); e != nil && err == nil { - err = e - } - if e := itr.itrs[1].Close(); e != nil && err == nil { - err = e - } - return err -} - -// Next returns the next element which occurs in both iterators. -func (itr *seriesIDUnionIterator) Next() (_ SeriesIDElem, err error) { - // Fill buffers. - if itr.buf[0].SeriesID == 0 { - if itr.buf[0], err = itr.itrs[0].Next(); err != nil { - return SeriesIDElem{}, err - } - } - if itr.buf[1].SeriesID == 0 { - if itr.buf[1], err = itr.itrs[1].Next(); err != nil { - return SeriesIDElem{}, err - } - } - - // Return non-zero or lesser series. - if a, b := itr.buf[0].SeriesID, itr.buf[1].SeriesID; a == 0 && b == 0 { - return SeriesIDElem{}, nil - } else if b == 0 || (a != 0 && a < b) { - elem := itr.buf[0] - itr.buf[0].SeriesID = 0 - return elem, nil - } else if a == 0 || (b != 0 && a > b) { - elem := itr.buf[1] - itr.buf[1].SeriesID = 0 - return elem, nil - } - - // Attach element. - elem := itr.buf[0] - - // Attach expression. - expr0 := itr.buf[0].Expr - expr1 := itr.buf[1].Expr - if expr0 != nil && expr1 != nil { - elem.Expr = influxql.Reduce(&influxql.BinaryExpr{ - Op: influxql.OR, - LHS: expr0, - RHS: expr1, - }, nil) - } else { - elem.Expr = nil - } - - itr.buf[0].SeriesID, itr.buf[1].SeriesID = 0, 0 - return elem, nil -} - -// DifferenceSeriesIDIterators returns an iterator that only returns series which -// occur the first iterator but not the second iterator. -func DifferenceSeriesIDIterators(itr0, itr1 SeriesIDIterator) SeriesIDIterator { - if itr0 == nil && itr1 == nil { - return nil - } else if itr1 == nil { - return itr0 - } else if itr0 == nil { - itr1.Close() - return nil - } - - // Create series id set, if available. - if a := NewSeriesIDSetIterators([]SeriesIDIterator{itr0, itr1}); a != nil { - ss := a[0].SeriesIDSet().AndNot(a[1].SeriesIDSet()) - // `a` holds references to itr0/itr1 so itr0/itr1 should not be closed when `a` is still in use - itr0.Close() - itr1.Close() - return NewSeriesIDSetIterator(ss) - } - - return &seriesIDDifferenceIterator{itrs: [2]SeriesIDIterator{itr0, itr1}} -} - -// seriesIDDifferenceIterator is an iterator that merges two iterators together. -type seriesIDDifferenceIterator struct { - buf [2]SeriesIDElem - itrs [2]SeriesIDIterator -} - -func (itr *seriesIDDifferenceIterator) Close() (err error) { - if e := itr.itrs[0].Close(); e != nil && err == nil { - err = e - } - if e := itr.itrs[1].Close(); e != nil && err == nil { - err = e - } - return err -} - -// Next returns the next element which occurs only in the first iterator. -func (itr *seriesIDDifferenceIterator) Next() (_ SeriesIDElem, err error) { - for { - // Fill buffers. - if itr.buf[0].SeriesID == 0 { - if itr.buf[0], err = itr.itrs[0].Next(); err != nil { - return SeriesIDElem{}, err - } - } - if itr.buf[1].SeriesID == 0 { - if itr.buf[1], err = itr.itrs[1].Next(); err != nil { - return SeriesIDElem{}, err - } - } - - // Exit if first buffer is still empty. - if itr.buf[0].SeriesID == 0 { - return SeriesIDElem{}, nil - } else if itr.buf[1].SeriesID == 0 { - elem := itr.buf[0] - itr.buf[0].SeriesID = 0 - return elem, nil - } - - // Return first series if it's less. - // If second series is less then skip it. - // If both series are equal then skip both. - if a, b := itr.buf[0].SeriesID, itr.buf[1].SeriesID; a < b { - elem := itr.buf[0] - itr.buf[0].SeriesID = 0 - return elem, nil - } else if a > b { - itr.buf[1].SeriesID = 0 - continue - } else { - itr.buf[0].SeriesID, itr.buf[1].SeriesID = 0, 0 - continue - } - } -} - -// seriesPointIterator adapts SeriesIterator to an influxql.Iterator. -type seriesPointIterator struct { - once sync.Once - indexSet IndexSet - mitr MeasurementIterator - keys [][]byte - opt query.IteratorOptions - - point query.FloatPoint // reusable point -} - -// newSeriesPointIterator returns a new instance of seriesPointIterator. -func NewSeriesPointIterator(indexSet IndexSet, opt query.IteratorOptions) (_ query.Iterator, err error) { - // Only equality operators are allowed. - influxql.WalkFunc(opt.Condition, func(n influxql.Node) { - switch n := n.(type) { - case *influxql.BinaryExpr: - switch n.Op { - case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX, - influxql.OR, influxql.AND: - default: - err = errors.New("invalid tag comparison operator") - } - } - }) - if err != nil { - return nil, err - } - - mitr, err := indexSet.MeasurementIterator() - if err != nil { - return nil, err - } - - return &seriesPointIterator{ - indexSet: indexSet, - mitr: mitr, - point: query.FloatPoint{ - Aux: make([]interface{}, len(opt.Aux)), - }, - opt: opt, - }, nil -} - -// Stats returns stats about the points processed. -func (itr *seriesPointIterator) Stats() query.IteratorStats { return query.IteratorStats{} } - -// Close closes the iterator. -func (itr *seriesPointIterator) Close() (err error) { - itr.once.Do(func() { - if itr.mitr != nil { - err = itr.mitr.Close() - } - }) - return err -} - -// Next emits the next point in the iterator. -func (itr *seriesPointIterator) Next() (*query.FloatPoint, error) { - for { - // Read series keys for next measurement if no more keys remaining. - // Exit if there are no measurements remaining. - if len(itr.keys) == 0 { - m, err := itr.mitr.Next() - if err != nil { - return nil, err - } else if m == nil { - return nil, nil - } - - if err := itr.readSeriesKeys(m); err != nil { - return nil, err - } - continue - } - - name, tags := ParseSeriesKey(itr.keys[0]) - itr.keys = itr.keys[1:] - - // TODO(edd): It seems to me like this authorisation check should be - // further down in the index. At this point we're going to be filtering - // series that have already been materialised in the LogFiles and - // IndexFiles. - if itr.opt.Authorizer != nil && !itr.opt.Authorizer.AuthorizeSeriesRead(itr.indexSet.Database(), name, tags) { - continue - } - - // Convert to a key. - key := string(models.MakeKey(name, tags)) - - // Write auxiliary fields. - for i, f := range itr.opt.Aux { - switch f.Val { - case "key": - itr.point.Aux[i] = key - } - } - - return &itr.point, nil - } -} - -func (itr *seriesPointIterator) readSeriesKeys(name []byte) error { - sitr, err := itr.indexSet.MeasurementSeriesByExprIterator(name, itr.opt.Condition) - if err != nil { - return err - } else if sitr == nil { - return nil - } - defer sitr.Close() - - // Slurp all series keys. - itr.keys = itr.keys[:0] - for i := 0; ; i++ { - elem, err := sitr.Next() - if err != nil { - return err - } else if elem.SeriesID == 0 { - break - } - - // Periodically check for interrupt. - if i&0xFF == 0xFF { - select { - case <-itr.opt.InterruptCh: - return itr.Close() - default: - } - } - - key := itr.indexSet.SeriesFile.SeriesKey(elem.SeriesID) - if len(key) == 0 { - continue - } - itr.keys = append(itr.keys, key) - } - - // Sort keys. - sort.Sort(seriesKeys(itr.keys)) - return nil -} - -// MeasurementIterator represents a iterator over a list of measurements. -type MeasurementIterator interface { - Close() error - Next() ([]byte, error) -} - -type MeasurementIterators []MeasurementIterator - -func (a MeasurementIterators) Close() (err error) { - for i := range a { - if e := a[i].Close(); e != nil && err == nil { - err = e - } - } - return err -} - -type MeasurementSliceIterator interface { - MeasurementIterator - UnderlyingSlice() [][]byte -} - -type measurementSliceIterator struct { - names [][]byte -} - -// NewMeasurementSliceIterator returns an iterator over a slice of in-memory measurement names. -func NewMeasurementSliceIterator(names [][]byte) *measurementSliceIterator { - return &measurementSliceIterator{names: names} -} - -func (itr *measurementSliceIterator) Close() (err error) { return nil } - -func (itr *measurementSliceIterator) Next() (name []byte, err error) { - if len(itr.names) == 0 { - return nil, nil - } - name, itr.names = itr.names[0], itr.names[1:] - return name, nil -} - -func (itr *measurementSliceIterator) UnderlyingSlice() [][]byte { - return itr.names -} - -// fileMeasurementSliceIterator is designed to allow a tag value slice -// iterator to use memory from a memory-mapped file, pinning it -// with the underlying file iterators -type fileMeasurementSliceIterator struct { - measurementSliceIterator - fileIterators MeasurementIterators -} - -func (itr *fileMeasurementSliceIterator) Close() error { - e1 := itr.fileIterators.Close() - e2 := itr.measurementSliceIterator.Close() - if e1 != nil { - return e1 - } else { - return e2 - } -} - -func newFileMeasurementSliceIterator(names [][]byte, itrs MeasurementIterators) *fileMeasurementSliceIterator { - return &fileMeasurementSliceIterator{ - measurementSliceIterator: measurementSliceIterator{ - names: names, - }, - fileIterators: itrs, - } -} - -// MergeMeasurementIterators returns an iterator that merges a set of iterators. -// Iterators that are first in the list take precedence and a deletion by those -// early iterators will invalidate elements by later iterators. -func MergeMeasurementIterators(itrs ...MeasurementIterator) MeasurementIterator { - if len(itrs) == 0 { - return nil - } else if len(itrs) == 1 { - return itrs[0] - } - - return &measurementMergeIterator{ - buf: make([][]byte, len(itrs)), - itrs: itrs, - } -} - -type measurementMergeIterator struct { - buf [][]byte - itrs []MeasurementIterator -} - -func (itr *measurementMergeIterator) Close() (err error) { - for i := range itr.itrs { - if e := itr.itrs[i].Close(); e != nil && err == nil { - err = e - } - } - return err -} - -// Next returns the element with the next lowest name across the iterators. -// -// If multiple iterators contain the same name then the first is returned -// and the remaining ones are skipped. -func (itr *measurementMergeIterator) Next() (_ []byte, err error) { - // Find next lowest name amongst the buffers. - var name []byte - for i, buf := range itr.buf { - // Fill buffer if empty. - if buf == nil { - if buf, err = itr.itrs[i].Next(); err != nil { - return nil, err - } else if buf != nil { - itr.buf[i] = buf - } else { - continue - } - } - - // Find next lowest name. - if name == nil || bytes.Compare(itr.buf[i], name) == -1 { - name = itr.buf[i] - } - } - - // Return nil if no elements remaining. - if name == nil { - return nil, nil - } - - // Merge all elements together and clear buffers. - for i, buf := range itr.buf { - if buf == nil || !bytes.Equal(buf, name) { - continue - } - itr.buf[i] = nil - } - return name, nil -} - -// TagKeyIterator represents a iterator over a list of tag keys. -type TagKeyIterator interface { - Close() error - Next() ([]byte, error) -} - -type TagKeyIterators []TagKeyIterator - -func (a TagKeyIterators) Close() (err error) { - for i := range a { - if e := a[i].Close(); e != nil && err == nil { - err = e - } - } - return err -} - -// NewTagKeySliceIterator returns a TagKeyIterator that iterates over a slice. -func NewTagKeySliceIterator(keys [][]byte) *tagKeySliceIterator { - return &tagKeySliceIterator{keys: keys} -} - -// tagKeySliceIterator iterates over a slice of tag keys. -type tagKeySliceIterator struct { - keys [][]byte -} - -// Next returns the next tag key in the slice. -func (itr *tagKeySliceIterator) Next() ([]byte, error) { - if len(itr.keys) == 0 { - return nil, nil - } - key := itr.keys[0] - itr.keys = itr.keys[1:] - return key, nil -} - -func (itr *tagKeySliceIterator) Close() error { return nil } - -// MergeTagKeyIterators returns an iterator that merges a set of iterators. -func MergeTagKeyIterators(itrs ...TagKeyIterator) TagKeyIterator { - if len(itrs) == 0 { - return nil - } else if len(itrs) == 1 { - return itrs[0] - } - - return &tagKeyMergeIterator{ - buf: make([][]byte, len(itrs)), - itrs: itrs, - } -} - -type tagKeyMergeIterator struct { - buf [][]byte - itrs []TagKeyIterator -} - -func (itr *tagKeyMergeIterator) Close() error { - for i := range itr.itrs { - itr.itrs[i].Close() - } - return nil -} - -// Next returns the element with the next lowest key across the iterators. -// -// If multiple iterators contain the same key then the first is returned -// and the remaining ones are skipped. -func (itr *tagKeyMergeIterator) Next() (_ []byte, err error) { - // Find next lowest key amongst the buffers. - var key []byte - for i, buf := range itr.buf { - // Fill buffer. - if buf == nil { - if buf, err = itr.itrs[i].Next(); err != nil { - return nil, err - } else if buf != nil { - itr.buf[i] = buf - } else { - continue - } - } - - // Find next lowest key. - if key == nil || bytes.Compare(buf, key) == -1 { - key = buf - } - } - - // Return nil if no elements remaining. - if key == nil { - return nil, nil - } - - // Merge elements and clear buffers. - for i, buf := range itr.buf { - if buf == nil || !bytes.Equal(buf, key) { - continue - } - itr.buf[i] = nil - } - return key, nil -} - -// TagValueIterator represents a iterator over a list of tag values. -type TagValueIterator interface { - Close() error - Next() ([]byte, error) -} - -type TagValueIterators []TagValueIterator - -func (a TagValueIterators) Close() (err error) { - for i := range a { - if e := a[i].Close(); e != nil && err == nil { - err = e - } - } - return err -} - -// NewTagValueSliceIterator returns a TagValueIterator that iterates over a slice. -func NewTagValueSliceIterator(values [][]byte) *tagValueSliceIterator { - return &tagValueSliceIterator{values: values} -} - -// tagValueSliceIterator iterates over a slice of tag values. -type tagValueSliceIterator struct { - values [][]byte -} - -// Next returns the next tag value in the slice. -func (itr *tagValueSliceIterator) Next() ([]byte, error) { - if len(itr.values) == 0 { - return nil, nil - } - value := itr.values[0] - itr.values = itr.values[1:] - return value, nil -} - -func (itr *tagValueSliceIterator) Close() error { return nil } - -// MergeTagValueIterators returns an iterator that merges a set of iterators. -func MergeTagValueIterators(itrs ...TagValueIterator) TagValueIterator { - if len(itrs) == 0 { - return nil - } else if len(itrs) == 1 { - return itrs[0] - } - - return &tagValueMergeIterator{ - buf: make([][]byte, len(itrs)), - itrs: itrs, - } -} - -type tagValueMergeIterator struct { - buf [][]byte - itrs []TagValueIterator -} - -func (itr *tagValueMergeIterator) Close() error { - for i := range itr.itrs { - itr.itrs[i].Close() - } - return nil -} - -// Next returns the element with the next lowest value across the iterators. -// -// If multiple iterators contain the same value then the first is returned -// and the remaining ones are skipped. -func (itr *tagValueMergeIterator) Next() (_ []byte, err error) { - // Find next lowest value amongst the buffers. - var value []byte - for i, buf := range itr.buf { - // Fill buffer. - if buf == nil { - if buf, err = itr.itrs[i].Next(); err != nil { - return nil, err - } else if buf != nil { - itr.buf[i] = buf - } else { - continue - } - } - - // Find next lowest value. - if value == nil || bytes.Compare(buf, value) == -1 { - value = buf - } - } - - // Return nil if no elements remaining. - if value == nil { - return nil, nil - } - - // Merge elements and clear buffers. - for i, buf := range itr.buf { - if buf == nil || !bytes.Equal(buf, value) { - continue - } - itr.buf[i] = nil - } - return value, nil -} - -// IndexSet represents a list of indexes, all belonging to one database. -type IndexSet struct { - Indexes []Index // The set of indexes comprising this IndexSet. - SeriesFile *SeriesFile // The Series File associated with the db for this set. - fieldSets []*MeasurementFieldSet // field sets for _all_ indexes in this set's DB. -} - -// Database returns the database name of the first index. -func (is IndexSet) Database() string { - if len(is.Indexes) == 0 { - return "" - } - return is.Indexes[0].Database() -} - -// HasField determines if any of the field sets on the set of indexes in the -// IndexSet have the provided field for the provided measurement. -func (is IndexSet) HasField(measurement []byte, field string) bool { - if len(is.Indexes) == 0 { - return false - } - - if len(is.fieldSets) == 0 { - // field sets may not have been initialised yet. - is.fieldSets = make([]*MeasurementFieldSet, 0, len(is.Indexes)) - for _, idx := range is.Indexes { - is.fieldSets = append(is.fieldSets, idx.FieldSet()) - } - } - - for _, fs := range is.fieldSets { - if fs.Fields(measurement).HasField(field) { - return true - } - } - return false -} - -// MeasurementNamesByExpr returns a slice of measurement names matching the -// provided condition. If no condition is provided then all names are returned. -func (is IndexSet) MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) (_ [][]byte, err error) { - release := is.SeriesFile.Retain() - defer release() - - // Return filtered list if expression exists. - if expr != nil { - itr, returnErr := is.measurementNamesByExpr(auth, expr) - if returnErr != nil { - return nil, returnErr - } else if itr == nil { - return nil, nil - } - defer func() { - if e := itr.Close(); err == nil { - err = e - } - }() - return slices.CopyChunkedByteSlices(itr.UnderlyingSlice(), 1000), nil - } - - itr, err := is.measurementIterator() - if err != nil { - return nil, err - } else if itr == nil { - return nil, nil - } - defer func() { - if e := itr.Close(); err == nil { - err = e - } - }() - - var names [][]byte - // Iterate over all measurements if no condition exists. - for { - e, err := itr.Next() - if err != nil { - return nil, err - } else if e == nil { - break - } - - // Determine if there exists at least one authorised series for the - // measurement name. - if is.measurementAuthorizedSeries(auth, e, nil) { - names = append(names, e) - } - } - return slices.CopyChunkedByteSlices(names, 1000), nil -} - -func (is IndexSet) measurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) (MeasurementSliceIterator, error) { - if expr == nil { - return nil, nil - } - - switch e := expr.(type) { - case *influxql.BinaryExpr: - switch e.Op { - case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: - tag, ok := e.LHS.(*influxql.VarRef) - if !ok { - return nil, fmt.Errorf("left side of '%s' must be a tag key", e.Op.String()) - } - - // Retrieve value or regex expression from RHS. - var value string - var regex *regexp.Regexp - if influxql.IsRegexOp(e.Op) { - re, ok := e.RHS.(*influxql.RegexLiteral) - if !ok { - return nil, fmt.Errorf("right side of '%s' must be a regular expression", e.Op.String()) - } - regex = re.Val - } else { - s, ok := e.RHS.(*influxql.StringLiteral) - if !ok { - return nil, fmt.Errorf("right side of '%s' must be a tag value string", e.Op.String()) - } - value = s.Val - } - - // Match on name, if specified. - if tag.Val == "_name" { - return is.measurementNamesByNameFilter(auth, e.Op, value, regex) - } else if influxql.IsSystemName(tag.Val) { - return nil, nil - } - return is.measurementNamesByTagFilter(auth, e.Op, tag.Val, value, regex) - - case influxql.OR, influxql.AND: - - lhs, err := is.measurementNamesByExpr(auth, e.LHS) - if err != nil { - return nil, err - } - rhs, err := is.measurementNamesByExpr(auth, e.RHS) - if err != nil { - lhs.Close() - return nil, err - } - - mis := MeasurementIterators{lhs, rhs} - if e.Op == influxql.OR { - return newFileMeasurementSliceIterator(bytesutil.Union(lhs.UnderlyingSlice(), rhs.UnderlyingSlice()), mis), nil - } - return newFileMeasurementSliceIterator(bytesutil.Intersect(lhs.UnderlyingSlice(), rhs.UnderlyingSlice()), mis), nil - - default: - return nil, fmt.Errorf("invalid tag comparison operator") - } - - case *influxql.ParenExpr: - return is.measurementNamesByExpr(auth, e.Expr) - default: - return nil, fmt.Errorf("invalid measurement expression %#v", expr) - } -} - -// measurementNamesByNameFilter returns matching measurement names in sorted order. -func (is IndexSet) measurementNamesByNameFilter(auth query.Authorizer, op influxql.Token, val string, regex *regexp.Regexp) (MeasurementSliceIterator, error) { - itr, err := is.measurementIterator() - if err != nil { - return nil, err - } else if itr == nil { - return nil, nil - } - - var names [][]byte - for { - e, err := itr.Next() - if err != nil { - itr.Close() - return nil, err - } else if e == nil { - break - } - - var matched bool - switch op { - case influxql.EQ: - matched = string(e) == val - case influxql.NEQ: - matched = string(e) != val - case influxql.EQREGEX: - matched = regex.Match(e) - case influxql.NEQREGEX: - matched = !regex.Match(e) - } - - if matched && is.measurementAuthorizedSeries(auth, e, nil) { - names = append(names, e) - } - } - bytesutil.Sort(names) - return newFileMeasurementSliceIterator(names, MeasurementIterators{itr}), nil -} - -// MeasurementNamesByPredicate returns a slice of measurement names matching the -// provided condition. If no condition is provided then all names are returned. -// This behaves differently from MeasurementNamesByExpr because it will -// return measurements using flux predicates. -func (is IndexSet) MeasurementNamesByPredicate(auth query.Authorizer, expr influxql.Expr) (_ [][]byte, err error) { - release := is.SeriesFile.Retain() - defer release() - - // Return filtered list if expression exists. - if expr != nil { - itr, returnErr := is.measurementNamesByPredicate(auth, expr) - if returnErr != nil { - return nil, returnErr - } - if itr != nil { - defer func() { - if e := itr.Close(); err == nil { - err = e - } - }() - } - return slices.CopyChunkedByteSlices(itr.UnderlyingSlice(), 1000), nil - } - - itr, err := is.measurementIterator() - if err != nil { - return nil, err - } else if itr == nil { - return nil, nil - } - defer func() { - if e := itr.Close(); err == nil { - err = e - } - }() - - var names [][]byte - // Iterate over all measurements if no condition exists. - for { - e, err := itr.Next() - if err != nil { - return nil, err - } else if e == nil { - break - } - - // Determine if there exists at least one authorised series for the - // measurement name. - if is.measurementAuthorizedSeries(auth, e, nil) { - names = append(names, e) - } - } - return slices.CopyChunkedByteSlices(names, 1000), nil -} - -func (is IndexSet) measurementNamesByPredicate(auth query.Authorizer, expr influxql.Expr) (MeasurementSliceIterator, error) { - if expr == nil { - return nil, nil - } - - switch e := expr.(type) { - case *influxql.BinaryExpr: - switch e.Op { - case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: - tag, ok := e.LHS.(*influxql.VarRef) - if !ok { - return nil, fmt.Errorf("left side of '%s' must be a tag key", e.Op.String()) - } - - // Retrieve value or regex expression from RHS. - var value string - var regex *regexp.Regexp - if influxql.IsRegexOp(e.Op) { - re, ok := e.RHS.(*influxql.RegexLiteral) - if !ok { - return nil, fmt.Errorf("right side of '%s' must be a regular expression", e.Op.String()) - } - regex = re.Val - } else { - s, ok := e.RHS.(*influxql.StringLiteral) - if !ok { - return nil, fmt.Errorf("right side of '%s' must be a tag value string", e.Op.String()) - } - value = s.Val - } - - // Match on name, if specified. - if tag.Val == "_name" { - return is.measurementNamesByNameFilter(auth, e.Op, value, regex) - } else if influxql.IsSystemName(tag.Val) { - return nil, nil - } - return is.measurementNamesByTagPredicate(auth, e.Op, tag.Val, value, regex) - - case influxql.OR, influxql.AND: - lhs, err := is.measurementNamesByPredicate(auth, e.LHS) - if err != nil { - return nil, err - } - rhs, err := is.measurementNamesByPredicate(auth, e.RHS) - if err != nil { - lhs.Close() - return nil, err - } - mis := MeasurementIterators{lhs, rhs} - - if e.Op == influxql.OR { - return newFileMeasurementSliceIterator(bytesutil.Union(lhs.UnderlyingSlice(), rhs.UnderlyingSlice()), mis), nil - } - return newFileMeasurementSliceIterator(bytesutil.Intersect(lhs.UnderlyingSlice(), rhs.UnderlyingSlice()), mis), nil - - default: - return nil, fmt.Errorf("invalid tag comparison operator") - } - - case *influxql.ParenExpr: - return is.measurementNamesByPredicate(auth, e.Expr) - default: - return nil, fmt.Errorf("%#v", expr) - } -} - -func (is IndexSet) measurementNamesByTagFilter(auth query.Authorizer, op influxql.Token, key, val string, regex *regexp.Regexp) (MeasurementSliceIterator, error) { - var names [][]byte - failed := true - - mitr, err := is.measurementIterator() - if err != nil { - return nil, err - } else if mitr == nil { - return nil, nil - } - defer func() { - if failed { - mitr.Close() - } - }() - - // valEqual determines if the provided []byte is equal to the tag value - // to be filtered on. - valEqual := regex.Match - if op == influxql.EQ || op == influxql.NEQ { - vb := []byte(val) - valEqual = func(b []byte) bool { return bytes.Equal(vb, b) } - } - - var tagMatch bool - var authorized bool - for { - me, err := mitr.Next() - if err != nil { - return nil, err - } else if me == nil { - break - } - // If the measurement doesn't have the tag key, then it won't be considered. - if ok, err := is.hasTagKey(me, []byte(key)); err != nil { - return nil, err - } else if !ok { - continue - } - tagMatch = false - // Authorization must be explicitly granted when an authorizer is present. - authorized = query.AuthorizerIsOpen(auth) - - vitr, err := is.tagValueIterator(me, []byte(key)) - if err != nil { - return nil, err - } - - if vitr != nil { - defer vitr.Close() - for { - ve, err := vitr.Next() - if err != nil { - return nil, err - } else if ve == nil { - break - } - if !valEqual(ve) { - continue - } - - tagMatch = true - if query.AuthorizerIsOpen(auth) { - break - } - - // When an authorizer is present, the measurement should be - // included only if one of it's series is authorized. - sitr, err := is.tagValueSeriesIDIterator(me, []byte(key), ve) - if err != nil { - return nil, err - } else if sitr == nil { - continue - } - defer sitr.Close() - sitr = FilterUndeletedSeriesIDIterator(is.SeriesFile, sitr) - - // Locate a series with this matching tag value that's authorized. - for { - se, err := sitr.Next() - if err != nil { - return nil, err - } - - if se.SeriesID == 0 { - break - } - - name, tags := is.SeriesFile.Series(se.SeriesID) - if auth.AuthorizeSeriesRead(is.Database(), name, tags) { - authorized = true - break - } - } - - if err := sitr.Close(); err != nil { - return nil, err - } - - if tagMatch && authorized { - // The measurement can definitely be included or rejected. - break - } - } - if err := vitr.Close(); err != nil { - return nil, err - } - } - - // For negation operators, to determine if the measurement is authorized, - // an authorized series belonging to the measurement must be located. - // Then, the measurement can be added iff !tagMatch && authorized. - if (op == influxql.NEQ || op == influxql.NEQREGEX) && !tagMatch { - authorized = is.measurementAuthorizedSeries(auth, me, nil) - } - - // tags match | operation is EQ | measurement matches - // -------------------------------------------------- - // True | True | True - // True | False | False - // False | True | False - // False | False | True - if tagMatch == (op == influxql.EQ || op == influxql.EQREGEX) && authorized { - names = append(names, me) - continue - } - } - - bytesutil.Sort(names) - failed = false - return newFileMeasurementSliceIterator(names, MeasurementIterators{mitr}), nil -} - -func (is IndexSet) measurementNamesByTagPredicate(auth query.Authorizer, op influxql.Token, key, val string, regex *regexp.Regexp) (MeasurementSliceIterator, error) { - var names [][]byte - failed := true - - mitr, err := is.measurementIterator() - if err != nil { - return nil, err - } else if mitr == nil { - return nil, nil - } - defer func() { - if failed { - mitr.Close() - } - }() - - var checkMeasurement func(auth query.Authorizer, me []byte) (bool, error) - switch op { - case influxql.EQ: - checkMeasurement = func(auth query.Authorizer, me []byte) (bool, error) { - return is.measurementHasTagValue(auth, me, []byte(key), []byte(val)) - } - case influxql.NEQ: - checkMeasurement = func(auth query.Authorizer, me []byte) (bool, error) { - // If there is an authorized series in this measurement and that series - // does not contain the tag key/value. - ok := is.measurementAuthorizedSeries(auth, me, func(tags models.Tags) bool { - return tags.GetString(key) == val - }) - return ok, nil - } - case influxql.EQREGEX: - checkMeasurement = func(auth query.Authorizer, me []byte) (bool, error) { - return is.measurementHasTagValueRegex(auth, me, []byte(key), regex) - } - case influxql.NEQREGEX: - checkMeasurement = func(auth query.Authorizer, me []byte) (bool, error) { - // If there is an authorized series in this measurement and that series - // does not contain the tag key/value. - ok := is.measurementAuthorizedSeries(auth, me, func(tags models.Tags) bool { - return regex.MatchString(tags.GetString(key)) - }) - return ok, nil - } - default: - return nil, fmt.Errorf("unsupported operand: %s", op) - } - - for { - me, err := mitr.Next() - if err != nil { - return nil, err - } else if me == nil { - break - } - - ok, err := checkMeasurement(auth, me) - if err != nil { - return nil, err - } else if ok { - names = append(names, me) - } - } - - bytesutil.Sort(names) - failed = false - return newFileMeasurementSliceIterator(names, MeasurementIterators{mitr}), nil -} - -// measurementAuthorizedSeries determines if the measurement contains a series -// that is authorized to be read. -func (is IndexSet) measurementAuthorizedSeries(auth query.Authorizer, name []byte, exclude func(tags models.Tags) bool) bool { - if query.AuthorizerIsOpen(auth) && exclude == nil { - return true - } - - if auth == nil { - auth = query.OpenAuthorizer - } - - sitr, err := is.measurementSeriesIDIterator(name) - if err != nil || sitr == nil { - return false - } - defer sitr.Close() - sitr = FilterUndeletedSeriesIDIterator(is.SeriesFile, sitr) - - for { - series, err := sitr.Next() - if err != nil { - return false - } - - if series.SeriesID == 0 { - return false // End of iterator - } - - name, tags := is.SeriesFile.Series(series.SeriesID) - if auth.AuthorizeSeriesRead(is.Database(), name, tags) { - if exclude != nil && exclude(tags) { - continue - } - return true - } - } -} - -func (is IndexSet) measurementHasTagValue(auth query.Authorizer, me, key, value []byte) (bool, error) { - if len(value) == 0 { - return is.measurementHasEmptyTagValue(auth, me, key) - } - - hasTagValue, err := is.HasTagValue(me, key, value) - if err != nil || !hasTagValue { - return false, err - } - - // If the authorizer is open, return true. - if query.AuthorizerIsOpen(auth) { - return true, nil - } - - // When an authorizer is present, the measurement should be - // included only if one of it's series is authorized. - sitr, err := is.tagValueSeriesIDIterator(me, key, value) - if err != nil || sitr == nil { - return false, err - } - defer sitr.Close() - sitr = FilterUndeletedSeriesIDIterator(is.SeriesFile, sitr) - - // Locate a series with this matching tag value that's authorized. - for { - se, err := sitr.Next() - if err != nil || se.SeriesID == 0 { - return false, err - } - - name, tags := is.SeriesFile.Series(se.SeriesID) - if auth.AuthorizeSeriesRead(is.Database(), name, tags) { - return true, nil - } - } -} - -func (is IndexSet) measurementHasEmptyTagValue(auth query.Authorizer, me, key []byte) (bool, error) { - // Any series that does not have a tag key - // has an empty tag value for that key. - // Iterate through all of the series to find one - // series that does not have the tag key. - sitr, err := is.measurementSeriesIDIterator(me) - if err != nil || sitr == nil { - return false, err - } - defer sitr.Close() - sitr = FilterUndeletedSeriesIDIterator(is.SeriesFile, sitr) - - for { - series, err := sitr.Next() - if err != nil || series.SeriesID == 0 { - return false, err - } - - name, tags := is.SeriesFile.Series(series.SeriesID) - if len(tags.Get(key)) > 0 { - // The tag key exists in this series. We need - // at least one series that does not have the tag - // keys. - continue - } - - // Verify that we can see this series. - if query.AuthorizerIsOpen(auth) { - return true, nil - } else if auth.AuthorizeSeriesRead(is.Database(), name, tags) { - return true, nil - } - } -} - -func (is IndexSet) measurementHasTagValueRegex(auth query.Authorizer, me, key []byte, value *regexp.Regexp) (bool, error) { - // If the regex matches the empty string, do a special check to see - // if we have an empty tag value. - if matchEmpty := value.MatchString(""); matchEmpty { - if ok, err := is.measurementHasEmptyTagValue(auth, me, key); err != nil { - return false, err - } else if ok { - return true, nil - } - } - - // Iterate over the tag values and find one that matches the value. - vitr, err := is.tagValueIterator(me, key) - if err != nil || vitr == nil { - return false, err - } - defer vitr.Close() - - for { - ve, err := vitr.Next() - if err != nil || ve == nil { - return false, err - } - - if !value.Match(ve) { - // The regex does not match this tag value. - continue - } - - // If the authorizer is open, then we have found a suitable tag value. - if query.AuthorizerIsOpen(auth) { - return true, nil - } - - // When an authorizer is present, the measurement should only be included - // if one of the series is authorized. - if authorized, err := func() (bool, error) { - sitr, err := is.tagValueSeriesIDIterator(me, key, ve) - if err != nil || sitr == nil { - return false, err - } - defer sitr.Close() - sitr = FilterUndeletedSeriesIDIterator(is.SeriesFile, sitr) - - // Locate an authorized series. - for { - se, err := sitr.Next() - if err != nil || se.SeriesID == 0 { - return false, err - } - - name, tags := is.SeriesFile.Series(se.SeriesID) - if auth.AuthorizeSeriesRead(is.Database(), name, tags) { - return true, nil - } - } - }(); err != nil { - return false, err - } else if authorized { - return true, nil - } - } -} - -// HasTagKey returns true if the tag key exists in any index for the provided -// measurement. -func (is IndexSet) HasTagKey(name, key []byte) (bool, error) { - return is.hasTagKey(name, key) -} - -// hasTagKey returns true if the tag key exists in any index for the provided -// measurement, and guarantees to never take a lock on the series file. -func (is IndexSet) hasTagKey(name, key []byte) (bool, error) { - for _, idx := range is.Indexes { - if ok, err := idx.HasTagKey(name, key); err != nil { - return false, err - } else if ok { - return true, nil - } - } - return false, nil -} - -// HasTagValue returns true if the tag value exists in any index for the provided -// measurement and tag key. -func (is IndexSet) HasTagValue(name, key, value []byte) (bool, error) { - for _, idx := range is.Indexes { - if ok, err := idx.HasTagValue(name, key, value); err != nil { - return false, err - } else if ok { - return true, nil - } - } - return false, nil -} - -// MeasurementIterator returns an iterator over all measurements in the index. -func (is IndexSet) MeasurementIterator() (MeasurementIterator, error) { - return is.measurementIterator() -} - -// measurementIterator returns an iterator over all measurements in the index. -// It guarantees to never take any locks on the underlying series file. -func (is IndexSet) measurementIterator() (MeasurementIterator, error) { - a := make([]MeasurementIterator, 0, len(is.Indexes)) - for _, idx := range is.Indexes { - itr, err := idx.MeasurementIterator() - if err != nil { - MeasurementIterators(a).Close() - return nil, err - } else if itr != nil { - a = append(a, itr) - } - } - return MergeMeasurementIterators(a...), nil -} - -// TagKeyIterator returns a key iterator for a measurement. -func (is IndexSet) TagKeyIterator(name []byte) (TagKeyIterator, error) { - return is.tagKeyIterator(name) -} - -// tagKeyIterator returns a key iterator for a measurement. It guarantees to never -// take any locks on the underlying series file. -func (is IndexSet) tagKeyIterator(name []byte) (TagKeyIterator, error) { - a := make([]TagKeyIterator, 0, len(is.Indexes)) - for _, idx := range is.Indexes { - itr, err := idx.TagKeyIterator(name) - if err != nil { - TagKeyIterators(a).Close() - return nil, err - } else if itr != nil { - a = append(a, itr) - } - } - return MergeTagKeyIterators(a...), nil -} - -// TagValueIterator returns a value iterator for a tag key. -func (is IndexSet) TagValueIterator(name, key []byte) (TagValueIterator, error) { - return is.tagValueIterator(name, key) -} - -// tagValueIterator returns a value iterator for a tag key. It guarantees to never -// take any locks on the underlying series file. -func (is IndexSet) tagValueIterator(name, key []byte) (TagValueIterator, error) { - a := make([]TagValueIterator, 0, len(is.Indexes)) - for _, idx := range is.Indexes { - itr, err := idx.TagValueIterator(name, key) - if err != nil { - TagValueIterators(a).Close() - return nil, err - } else if itr != nil { - a = append(a, itr) - } - } - return MergeTagValueIterators(a...), nil -} - -// TagKeyHasAuthorizedSeries determines if there exists an authorized series for -// the provided measurement name and tag key. -func (is IndexSet) TagKeyHasAuthorizedSeries(auth query.Authorizer, name, tagKey []byte) (bool, error) { - if query.AuthorizerIsOpen(auth) { - return true, nil - } - - release := is.SeriesFile.Retain() - defer release() - - itr, err := is.tagKeySeriesIDIterator(name, tagKey) - if err != nil { - return false, err - } else if itr == nil { - return false, nil - } - defer itr.Close() - itr = FilterUndeletedSeriesIDIterator(is.SeriesFile, itr) - - for { - e, err := itr.Next() - if err != nil { - return false, err - } - - if e.SeriesID == 0 { - return false, nil - } - - if query.AuthorizerIsOpen(auth) { - return true, nil - } - - name, tags := is.SeriesFile.Series(e.SeriesID) - if auth.AuthorizeSeriesRead(is.Database(), name, tags) { - return true, nil - } - } -} - -// MeasurementSeriesIDIterator returns an iterator over all non-tombstoned series -// for the provided measurement. -func (is IndexSet) MeasurementSeriesIDIterator(name []byte) (SeriesIDIterator, error) { - release := is.SeriesFile.Retain() - defer release() - - itr, err := is.measurementSeriesIDIterator(name) - if err != nil { - return nil, err - } - return FilterUndeletedSeriesIDIterator(is.SeriesFile, itr), nil -} - -// measurementSeriesIDIterator does not provide any locking on the Series file. -// -// See MeasurementSeriesIDIterator for more details. -func (is IndexSet) measurementSeriesIDIterator(name []byte) (SeriesIDIterator, error) { - a := make([]SeriesIDIterator, 0, len(is.Indexes)) - for _, idx := range is.Indexes { - itr, err := idx.MeasurementSeriesIDIterator(name) - if err != nil { - SeriesIDIterators(a).Close() - return nil, err - } else if itr != nil { - a = append(a, itr) - } - } - return MergeSeriesIDIterators(a...), nil -} - -// ForEachMeasurementTagKey iterates over all tag keys in a measurement and applies -// the provided function. -func (is IndexSet) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error { - release := is.SeriesFile.Retain() - defer release() - - itr, err := is.tagKeyIterator(name) - if err != nil { - return err - } else if itr == nil { - return nil - } - defer itr.Close() - - for { - key, err := itr.Next() - if err != nil { - return err - } else if key == nil { - return nil - } - - if err := fn(key); err != nil { - return err - } - } -} - -// MeasurementTagKeysByExpr extracts the tag keys wanted by the expression. -func (is IndexSet) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) { - release := is.SeriesFile.Retain() - defer release() - - keys := make(map[string]struct{}) - for _, idx := range is.Indexes { - m, err := idx.MeasurementTagKeysByExpr(name, expr) - if err != nil { - return nil, err - } - for k := range m { - keys[k] = struct{}{} - } - } - return keys, nil -} - -// TagKeySeriesIDIterator returns a series iterator for all values across a single key. -func (is IndexSet) TagKeySeriesIDIterator(name, key []byte) (SeriesIDIterator, error) { - release := is.SeriesFile.Retain() - defer release() - - itr, err := is.tagKeySeriesIDIterator(name, key) - if err != nil { - return nil, err - } - return FilterUndeletedSeriesIDIterator(is.SeriesFile, itr), nil -} - -// tagKeySeriesIDIterator returns a series iterator for all values across a -// single key. -// -// It guarantees to never take any locks on the series file. -func (is IndexSet) tagKeySeriesIDIterator(name, key []byte) (SeriesIDIterator, error) { - a := make([]SeriesIDIterator, 0, len(is.Indexes)) - for _, idx := range is.Indexes { - itr, err := idx.TagKeySeriesIDIterator(name, key) - if err != nil { - SeriesIDIterators(a).Close() - return nil, err - } else if itr != nil { - a = append(a, itr) - } - } - return MergeSeriesIDIterators(a...), nil -} - -// TagValueSeriesIDIterator returns a series iterator for a single tag value. -func (is IndexSet) TagValueSeriesIDIterator(name, key, value []byte) (SeriesIDIterator, error) { - release := is.SeriesFile.Retain() - defer release() - - itr, err := is.tagValueSeriesIDIterator(name, key, value) - if err != nil { - return nil, err - } - return FilterUndeletedSeriesIDIterator(is.SeriesFile, itr), nil -} - -// tagValueSeriesIDIterator does not provide any locking on the Series File. -// -// See TagValueSeriesIDIterator for more details. -func (is IndexSet) tagValueSeriesIDIterator(name, key, value []byte) (SeriesIDIterator, error) { - a := make([]SeriesIDIterator, 0, len(is.Indexes)) - for _, idx := range is.Indexes { - itr, err := idx.TagValueSeriesIDIterator(name, key, value) - if err != nil { - SeriesIDIterators(a).Close() - return nil, err - } else if itr != nil { - a = append(a, itr) - } - } - return MergeSeriesIDIterators(a...), nil -} - -// MeasurementSeriesByExprIterator returns a series iterator for a measurement -// that is filtered by expr. If expr only contains time expressions then this -// call is equivalent to MeasurementSeriesIDIterator(). -func (is IndexSet) MeasurementSeriesByExprIterator(name []byte, expr influxql.Expr) (SeriesIDIterator, error) { - release := is.SeriesFile.Retain() - defer release() - return is.measurementSeriesByExprIterator(name, expr) -} - -// measurementSeriesByExprIterator returns a series iterator for a measurement -// that is filtered by expr. See MeasurementSeriesByExprIterator for more details. -// -// measurementSeriesByExprIterator guarantees to never take any locks on the -// series file. -func (is IndexSet) measurementSeriesByExprIterator(name []byte, expr influxql.Expr) (SeriesIDIterator, error) { - // Return all series for the measurement if there are no tag expressions. - if expr == nil { - itr, err := is.measurementSeriesIDIterator(name) - if err != nil { - return nil, err - } - return FilterUndeletedSeriesIDIterator(is.SeriesFile, itr), nil - } - - itr, err := is.seriesByExprIterator(name, expr) - if err != nil { - return nil, err - } - return FilterUndeletedSeriesIDIterator(is.SeriesFile, itr), nil -} - -type measurementSeriesKeyByExprIterator struct { - ids SeriesIDIterator - is IndexSet - auth query.Authorizer - once sync.Once - releaser func() -} - -func (itr *measurementSeriesKeyByExprIterator) Next() ([]byte, error) { - if itr == nil { - return nil, nil - } - for { - e, err := itr.ids.Next() - if err != nil { - return nil, err - } else if e.SeriesID == 0 { - return nil, nil - } - - seriesKey := itr.is.SeriesFile.SeriesKey(e.SeriesID) - if len(seriesKey) == 0 { - continue - } - - name, tags := ParseSeriesKey(seriesKey) - - // Check leftover filters. All fields that might be filtered default to zero values - if e.Expr != nil { - if v, ok := e.Expr.(*influxql.BooleanLiteral); ok { - if !v.Val { - continue - } - } else { - values := make(map[string]interface{}, len(tags)) - for _, t := range tags { - values[string(t.Key)] = string(t.Value) - } - if !influxql.EvalBool(e.Expr, values) { - continue - } - } - } - - if itr.auth != nil && !itr.auth.AuthorizeSeriesRead(itr.is.Database(), name, tags) { - continue - } - - out := models.MakeKey(name, tags) - // ensure nil is only returned when we are done (or for errors) - if out == nil { - out = []byte{} - } - return out, nil - } -} - -func (itr *measurementSeriesKeyByExprIterator) Close() error { - if itr == nil { - return nil - } - itr.once.Do(itr.releaser) - return itr.ids.Close() -} - -// MeasurementSeriesKeyByExprIterator iterates through series, filtered by an expression on the tags. -// Any non-tag expressions will be filtered as if the field had the zero value. -func (is IndexSet) MeasurementSeriesKeyByExprIterator(name []byte, expr influxql.Expr, auth query.Authorizer) (SeriesKeyIterator, error) { - release := is.SeriesFile.Retain() - // Create iterator for all matching series. - ids, err := is.measurementSeriesByExprIterator(name, expr) - if err != nil { - release() - return nil, err - } - if ids == nil { - release() - return nil, nil - } - return &measurementSeriesKeyByExprIterator{ - ids: ids, - releaser: release, - auth: auth, - is: is, - }, nil -} - -// MeasurementSeriesKeysByExpr returns a list of series keys matching expr. -func (is IndexSet) MeasurementSeriesKeysByExpr(name []byte, expr influxql.Expr) ([][]byte, error) { - release := is.SeriesFile.Retain() - defer release() - - // Create iterator for all matching series. - itr, err := is.measurementSeriesByExprIterator(name, expr) - if err != nil { - return nil, err - } else if itr == nil { - return nil, nil - } - defer itr.Close() - - // measurementSeriesByExprIterator filters deleted series; no need to do so here. - - // Iterate over all series and generate keys. - var keys [][]byte - for { - e, err := itr.Next() - if err != nil { - return nil, err - } else if e.SeriesID == 0 { - break - } - - // Check for unsupported field filters. - // Any remaining filters means there were fields (e.g., `WHERE value = 1.2`). - if e.Expr != nil { - if v, ok := e.Expr.(*influxql.BooleanLiteral); !ok || !v.Val { - return nil, errors.New("fields not supported in WHERE clause during deletion") - } - } - - seriesKey := is.SeriesFile.SeriesKey(e.SeriesID) - if len(seriesKey) == 0 { - continue - } - - name, tags := ParseSeriesKey(seriesKey) - keys = append(keys, models.MakeKey(name, tags)) - } - - bytesutil.Sort(keys) - - return keys, nil -} - -func (is IndexSet) seriesByExprIterator(name []byte, expr influxql.Expr) (SeriesIDIterator, error) { - switch expr := expr.(type) { - case *influxql.BinaryExpr: - switch expr.Op { - case influxql.AND, influxql.OR: - // Get the series IDs and filter expressions for the LHS. - litr, err := is.seriesByExprIterator(name, expr.LHS) - if err != nil { - return nil, err - } - - // Get the series IDs and filter expressions for the RHS. - ritr, err := is.seriesByExprIterator(name, expr.RHS) - if err != nil { - if litr != nil { - litr.Close() - } - return nil, err - } - - // Intersect iterators if expression is "AND". - if expr.Op == influxql.AND { - return IntersectSeriesIDIterators(litr, ritr), nil - } - - // Union iterators if expression is "OR". - return UnionSeriesIDIterators(litr, ritr), nil - - default: - return is.seriesByBinaryExprIterator(name, expr) - } - - case *influxql.ParenExpr: - return is.seriesByExprIterator(name, expr.Expr) - - case *influxql.BooleanLiteral: - if expr.Val { - return is.measurementSeriesIDIterator(name) - } - return nil, nil - - default: - return nil, nil - } -} - -// seriesByBinaryExprIterator returns a series iterator and a filtering expression. -func (is IndexSet) seriesByBinaryExprIterator(name []byte, n *influxql.BinaryExpr) (SeriesIDIterator, error) { - // If this binary expression has another binary expression, then this - // is some expression math and we should just pass it to the underlying query. - if _, ok := n.LHS.(*influxql.BinaryExpr); ok { - itr, err := is.measurementSeriesIDIterator(name) - if err != nil { - return nil, err - } - return newSeriesIDExprIterator(itr, n), nil - } else if _, ok := n.RHS.(*influxql.BinaryExpr); ok { - itr, err := is.measurementSeriesIDIterator(name) - if err != nil { - return nil, err - } - return newSeriesIDExprIterator(itr, n), nil - } - - // Retrieve the variable reference from the correct side of the expression. - key, ok := n.LHS.(*influxql.VarRef) - value := n.RHS - if !ok { - key, ok = n.RHS.(*influxql.VarRef) - if !ok { - // This is an expression we do not know how to evaluate. Let the - // query engine take care of this. - itr, err := is.measurementSeriesIDIterator(name) - if err != nil { - return nil, err - } - return newSeriesIDExprIterator(itr, n), nil - } - value = n.LHS - } - - // For fields, return all series from this measurement. - if key.Val != "_name" && ((key.Type == influxql.Unknown && is.HasField(name, key.Val)) || key.Type == influxql.AnyField || (key.Type != influxql.Tag && key.Type != influxql.Unknown)) { - itr, err := is.measurementSeriesIDIterator(name) - if err != nil { - return nil, err - } - return newSeriesIDExprIterator(itr, n), nil - } else if value, ok := value.(*influxql.VarRef); ok { - // Check if the RHS is a variable and if it is a field. - if value.Val != "_name" && ((value.Type == influxql.Unknown && is.HasField(name, value.Val)) || key.Type == influxql.AnyField || (value.Type != influxql.Tag && value.Type != influxql.Unknown)) { - itr, err := is.measurementSeriesIDIterator(name) - if err != nil { - return nil, err - } - return newSeriesIDExprIterator(itr, n), nil - } - } - - // Create iterator based on value type. - switch value := value.(type) { - case *influxql.StringLiteral: - return is.seriesByBinaryExprStringIterator(name, []byte(key.Val), []byte(value.Val), n.Op) - case *influxql.RegexLiteral: - return is.seriesByBinaryExprRegexIterator(name, []byte(key.Val), value.Val, n.Op) - case *influxql.VarRef: - return is.seriesByBinaryExprVarRefIterator(name, []byte(key.Val), value, n.Op) - default: - // We do not know how to evaluate this expression so pass it - // on to the query engine. - itr, err := is.measurementSeriesIDIterator(name) - if err != nil { - return nil, err - } - return newSeriesIDExprIterator(itr, n), nil - } -} - -func (is IndexSet) seriesByBinaryExprStringIterator(name, key, value []byte, op influxql.Token) (SeriesIDIterator, error) { - // Special handling for "_name" to match measurement name. - if bytes.Equal(key, []byte("_name")) { - if (op == influxql.EQ && bytes.Equal(value, name)) || (op == influxql.NEQ && !bytes.Equal(value, name)) { - return is.measurementSeriesIDIterator(name) - } - return nil, nil - } - - if op == influxql.EQ { - // Match a specific value. - if len(value) != 0 { - return is.tagValueSeriesIDIterator(name, key, value) - } - - mitr, err := is.measurementSeriesIDIterator(name) - if err != nil { - return nil, err - } - - kitr, err := is.tagKeySeriesIDIterator(name, key) - if err != nil { - if mitr != nil { - mitr.Close() - } - return nil, err - } - - // Return all measurement series that have no values from this tag key. - return DifferenceSeriesIDIterators(mitr, kitr), nil - } - - // Return all measurement series without this tag value. - if len(value) != 0 { - mitr, err := is.measurementSeriesIDIterator(name) - if err != nil { - return nil, err - } - - vitr, err := is.tagValueSeriesIDIterator(name, key, value) - if err != nil { - if mitr != nil { - mitr.Close() - } - return nil, err - } - - return DifferenceSeriesIDIterators(mitr, vitr), nil - } - - // Return all series across all values of this tag key. - return is.tagKeySeriesIDIterator(name, key) -} - -func (is IndexSet) seriesByBinaryExprRegexIterator(name, key []byte, value *regexp.Regexp, op influxql.Token) (SeriesIDIterator, error) { - // Special handling for "_name" to match measurement name. - if bytes.Equal(key, []byte("_name")) { - match := value.Match(name) - if (op == influxql.EQREGEX && match) || (op == influxql.NEQREGEX && !match) { - mitr, err := is.measurementSeriesIDIterator(name) - if err != nil { - return nil, err - } - return newSeriesIDExprIterator(mitr, &influxql.BooleanLiteral{Val: true}), nil - } - return nil, nil - } - return is.matchTagValueSeriesIDIterator(name, key, value, op == influxql.EQREGEX) -} - -func (is IndexSet) seriesByBinaryExprVarRefIterator(name, key []byte, value *influxql.VarRef, op influxql.Token) (SeriesIDIterator, error) { - itr0, err := is.tagKeySeriesIDIterator(name, key) - if err != nil { - return nil, err - } - - itr1, err := is.tagKeySeriesIDIterator(name, []byte(value.Val)) - if err != nil { - if itr0 != nil { - itr0.Close() - } - return nil, err - } - - if op == influxql.EQ { - return IntersectSeriesIDIterators(itr0, itr1), nil - } - return DifferenceSeriesIDIterators(itr0, itr1), nil -} - -// MatchTagValueSeriesIDIterator returns a series iterator for tags which match value. -// If matches is false, returns iterators which do not match value. -func (is IndexSet) MatchTagValueSeriesIDIterator(name, key []byte, value *regexp.Regexp, matches bool) (SeriesIDIterator, error) { - release := is.SeriesFile.Retain() - defer release() - itr, err := is.matchTagValueSeriesIDIterator(name, key, value, matches) - if err != nil { - return nil, err - } - return FilterUndeletedSeriesIDIterator(is.SeriesFile, itr), nil -} - -// matchTagValueSeriesIDIterator returns a series iterator for tags which match -// value. See MatchTagValueSeriesIDIterator for more details. -// -// It guarantees to never take any locks on the underlying series file. -func (is IndexSet) matchTagValueSeriesIDIterator(name, key []byte, value *regexp.Regexp, matches bool) (SeriesIDIterator, error) { - matchEmpty := value.MatchString("") - if matches { - if matchEmpty { - return is.matchTagValueEqualEmptySeriesIDIterator(name, key, value) - } - return is.matchTagValueEqualNotEmptySeriesIDIterator(name, key, value) - } - - if matchEmpty { - return is.matchTagValueNotEqualEmptySeriesIDIterator(name, key, value) - } - return is.matchTagValueNotEqualNotEmptySeriesIDIterator(name, key, value) -} - -func (is IndexSet) matchTagValueEqualEmptySeriesIDIterator(name, key []byte, value *regexp.Regexp) (SeriesIDIterator, error) { - vitr, err := is.tagValueIterator(name, key) - if err != nil { - return nil, err - } else if vitr == nil { - return is.measurementSeriesIDIterator(name) - } - defer vitr.Close() - - var itrs []SeriesIDIterator - if err := func() error { - for { - e, err := vitr.Next() - if err != nil { - return err - } else if e == nil { - break - } - - if !value.Match(e) { - itr, err := is.tagValueSeriesIDIterator(name, key, e) - if err != nil { - return err - } else if itr != nil { - itrs = append(itrs, itr) - } - } - } - return nil - }(); err != nil { - SeriesIDIterators(itrs).Close() - return nil, err - } - - mitr, err := is.measurementSeriesIDIterator(name) - if err != nil { - SeriesIDIterators(itrs).Close() - return nil, err - } - - return DifferenceSeriesIDIterators(mitr, MergeSeriesIDIterators(itrs...)), nil -} - -func (is IndexSet) matchTagValueEqualNotEmptySeriesIDIterator(name, key []byte, value *regexp.Regexp) (SeriesIDIterator, error) { - vitr, err := is.tagValueIterator(name, key) - if err != nil { - return nil, err - } else if vitr == nil { - return nil, nil - } - defer vitr.Close() - - var itrs []SeriesIDIterator - for { - e, err := vitr.Next() - if err != nil { - SeriesIDIterators(itrs).Close() - return nil, err - } else if e == nil { - break - } - - if value.Match(e) { - itr, err := is.tagValueSeriesIDIterator(name, key, e) - if err != nil { - SeriesIDIterators(itrs).Close() - return nil, err - } else if itr != nil { - itrs = append(itrs, itr) - } - } - } - return MergeSeriesIDIterators(itrs...), nil -} - -func (is IndexSet) matchTagValueNotEqualEmptySeriesIDIterator(name, key []byte, value *regexp.Regexp) (SeriesIDIterator, error) { - vitr, err := is.tagValueIterator(name, key) - if err != nil { - return nil, err - } else if vitr == nil { - return nil, nil - } - defer vitr.Close() - - var itrs []SeriesIDIterator - for { - e, err := vitr.Next() - if err != nil { - SeriesIDIterators(itrs).Close() - return nil, err - } else if e == nil { - break - } - - if !value.Match(e) { - itr, err := is.tagValueSeriesIDIterator(name, key, e) - if err != nil { - SeriesIDIterators(itrs).Close() - return nil, err - } else if itr != nil { - itrs = append(itrs, itr) - } - } - } - return MergeSeriesIDIterators(itrs...), nil -} - -func (is IndexSet) matchTagValueNotEqualNotEmptySeriesIDIterator(name, key []byte, value *regexp.Regexp) (SeriesIDIterator, error) { - vitr, err := is.tagValueIterator(name, key) - if err != nil { - return nil, err - } else if vitr == nil { - return is.measurementSeriesIDIterator(name) - } - defer vitr.Close() - - var itrs []SeriesIDIterator - for { - e, err := vitr.Next() - if err != nil { - SeriesIDIterators(itrs).Close() - return nil, err - } else if e == nil { - break - } - if value.Match(e) { - itr, err := is.tagValueSeriesIDIterator(name, key, e) - if err != nil { - SeriesIDIterators(itrs).Close() - return nil, err - } else if itr != nil { - itrs = append(itrs, itr) - } - } - } - - mitr, err := is.measurementSeriesIDIterator(name) - if err != nil { - SeriesIDIterators(itrs).Close() - return nil, err - } - return DifferenceSeriesIDIterators(mitr, MergeSeriesIDIterators(itrs...)), nil -} - -// tagValuesByKeyAndExpr retrieves tag values for the provided tag keys. -// -// tagValuesByKeyAndExpr returns sets of values for each key, indexable by the -// position of the tag key in the keys argument. -// -// N.B tagValuesByKeyAndExpr relies on keys being sorted in ascending -// lexicographic order. -// -// tagValuesByKeyAndExpr guarantees to never take any locks on the underlying -// series file. -func (is IndexSet) tagValuesByKeyAndExpr(auth query.Authorizer, name []byte, keys []string, expr influxql.Expr) ([]map[string]struct{}, error) { - database := is.Database() - - valueExpr, remainingExpr, err := influxql.PartitionExpr(influxql.CloneExpr(expr), func(e influxql.Expr) (bool, error) { - switch e := e.(type) { - case *influxql.BinaryExpr: - switch e.Op { - case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: - tag, ok := e.LHS.(*influxql.VarRef) - if ok && tag.Val == "value" { - return true, nil - } - } - } - return false, nil - }) - if err != nil { - return nil, err - } - if remainingExpr == nil { - remainingExpr = &influxql.BooleanLiteral{Val: true} - } - - itr, err := is.seriesByExprIterator(name, remainingExpr) - if err != nil { - return nil, err - } else if itr == nil { - return nil, nil - } - itr = FilterUndeletedSeriesIDIterator(is.SeriesFile, itr) - defer itr.Close() - - keyIdxs := make(map[string]int, len(keys)) - for ki, key := range keys { - keyIdxs[key] = ki - - // Check that keys are in order. - if ki > 0 && key < keys[ki-1] { - return nil, fmt.Errorf("keys %v are not in ascending order", keys) - } - } - - resultSet := make([]map[string]struct{}, len(keys)) - for i := 0; i < len(resultSet); i++ { - resultSet[i] = make(map[string]struct{}) - } - - // Iterate all series to collect tag values. - for { - e, err := itr.Next() - if err != nil { - return nil, err - } else if e.SeriesID == 0 { - break - } - - if e.Expr != nil { - // We don't yet have code that correctly processes expressions that - // seriesByExprIterator doesn't handle - lit, ok := e.Expr.(*influxql.BooleanLiteral) - if !ok { - return nil, fmt.Errorf("expression too complex for metaquery: %v", e.Expr) - } - if !lit.Val { - continue - } - } - - buf := is.SeriesFile.SeriesKey(e.SeriesID) - if len(buf) == 0 { - continue - } - - if auth != nil { - name, tags := ParseSeriesKey(buf) - if !auth.AuthorizeSeriesRead(database, name, tags) { - continue - } - } - - _, buf = ReadSeriesKeyLen(buf) - _, buf = ReadSeriesKeyMeasurement(buf) - tagN, buf := ReadSeriesKeyTagN(buf) - for i := 0; i < tagN; i++ { - var key, value []byte - key, value, buf = ReadSeriesKeyTag(buf) - if valueExpr != nil { - if !influxql.EvalBool(valueExpr, map[string]interface{}{"value": string(value)}) { - continue - } - } - - if idx, ok := keyIdxs[string(key)]; ok { - resultSet[idx][string(value)] = struct{}{} - } else if string(key) > keys[len(keys)-1] { - // The tag key is > the largest key we're interested in. - break - } - } - } - return resultSet, nil -} - -// MeasurementTagKeyValuesByExpr returns a set of tag values filtered by an expression. -func (is IndexSet) MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte, keys []string, expr influxql.Expr, keysSorted bool) ([][]string, error) { - if len(keys) == 0 { - return nil, nil - } - - results := make([][]string, len(keys)) - // If the keys are not sorted, then sort them. - if !keysSorted { - sort.Strings(keys) - } - - release := is.SeriesFile.Retain() - defer release() - - // No expression means that the values shouldn't be filtered; so fetch them - // all. - if expr == nil { - for ki, key := range keys { - vitr, err := is.tagValueIterator(name, []byte(key)) - if err != nil { - return nil, err - } else if vitr == nil { - break - } - defer vitr.Close() - - // If no authorizer present then return all values. - if query.AuthorizerIsOpen(auth) { - for { - val, err := vitr.Next() - if err != nil { - return nil, err - } else if val == nil { - break - } - results[ki] = append(results[ki], string(val)) - } - continue - } - - // Authorization is present — check all series with matching tag values - // and measurements for the presence of an authorized series. - for { - val, err := vitr.Next() - if err != nil { - return nil, err - } else if val == nil { - break - } - - sitr, err := is.tagValueSeriesIDIterator(name, []byte(key), val) - if err != nil { - return nil, err - } else if sitr == nil { - continue - } - defer sitr.Close() - sitr = FilterUndeletedSeriesIDIterator(is.SeriesFile, sitr) - - for { - se, err := sitr.Next() - if err != nil { - return nil, err - } - - if se.SeriesID == 0 { - break - } - - name, tags := is.SeriesFile.Series(se.SeriesID) - if auth.AuthorizeSeriesRead(is.Database(), name, tags) { - results[ki] = append(results[ki], string(val)) - break - } - } - if err := sitr.Close(); err != nil { - return nil, err - } - } - } - return results, nil - } - - // This is the case where we have filtered series by some WHERE condition. - // We only care about the tag values for the keys given the - // filtered set of series ids. - resultSet, err := is.tagValuesByKeyAndExpr(auth, name, keys, expr) - if err != nil { - return nil, err - } - - // Convert result sets into []string - for i, s := range resultSet { - values := make([]string, 0, len(s)) - for v := range s { - values = append(values, v) - } - sort.Strings(values) - results[i] = values - } - return results, nil -} - -// TagSets returns an ordered list of tag sets for a measurement by dimension -// and filtered by an optional conditional expression. -func (is IndexSet) TagSets(sfile *SeriesFile, name []byte, opt query.IteratorOptions) ([]*query.TagSet, error) { - release := is.SeriesFile.Retain() - defer release() - - itr, err := is.measurementSeriesByExprIterator(name, opt.Condition) - if err != nil { - return nil, err - } else if itr == nil { - return nil, nil - } - defer itr.Close() - // measurementSeriesByExprIterator filters deleted series IDs; no need to - // do so here. - - var dims []string - if len(opt.Dimensions) > 0 { - dims = make([]string, len(opt.Dimensions)) - copy(dims, opt.Dimensions) - sort.Strings(dims) - } - - // For every series, get the tag values for the requested tag keys i.e. - // dimensions. This is the TagSet for that series. Series with the same - // TagSet are then grouped together, because for the purpose of GROUP BY - // they are part of the same composite series. - tagSets := make(map[string]*query.TagSet, 64) - var ( - seriesN, maxSeriesN int - db = is.Database() - ) - - if opt.MaxSeriesN > 0 { - maxSeriesN = opt.MaxSeriesN - } else { - maxSeriesN = int(^uint(0) >> 1) - } - - // The tag sets require a string for each series key in the set, The series - // file formatted keys need to be parsed into models format. Since they will - // end up as strings we can re-use an intermediate buffer for this process. - var keyBuf []byte - var tagsBuf models.Tags // Buffer for tags. Tags are not needed outside of each loop iteration. - for { - se, err := itr.Next() - if err != nil { - return nil, err - } else if se.SeriesID == 0 { - break - } - - // Skip if the series has been tombstoned. - key := sfile.SeriesKey(se.SeriesID) - if len(key) == 0 { - continue - } - - if seriesN&0x3fff == 0x3fff { - // check every 16384 series if the query has been canceled - select { - case <-opt.InterruptCh: - return nil, query.ErrQueryInterrupted - default: - } - } - - if seriesN > maxSeriesN { - return nil, fmt.Errorf("max-select-series limit exceeded: (%d/%d)", seriesN, opt.MaxSeriesN) - } - - // NOTE - must not escape this loop iteration. - _, tagsBuf = ParseSeriesKeyInto(key, tagsBuf) - if opt.Authorizer != nil && !opt.Authorizer.AuthorizeSeriesRead(db, name, tagsBuf) { - continue - } - - var tagsAsKey []byte - if len(dims) > 0 { - tagsAsKey = MakeTagsKey(dims, tagsBuf) - } - - tagSet, ok := tagSets[string(tagsAsKey)] - if !ok { - // This TagSet is new, create a new entry for it. - tagSet = &query.TagSet{ - Key: tagsAsKey, - } - } - - // Associate the series and filter with the Tagset. - keyBuf = models.AppendMakeKey(keyBuf, name, tagsBuf) - tagSet.AddFilter(string(keyBuf), se.Expr) - keyBuf = keyBuf[:0] - - // Ensure it's back in the map. - tagSets[string(tagsAsKey)] = tagSet - seriesN++ - } - - // Sort the series in each tag set. - for _, t := range tagSets { - sort.Sort(t) - } - - // The TagSets have been created, as a map of TagSets. Just send - // the values back as a slice, sorting for consistency. - sortedTagsSets := make([]*query.TagSet, 0, len(tagSets)) - for _, v := range tagSets { - sortedTagsSets = append(sortedTagsSets, v) - } - sort.Sort(byTagKey(sortedTagsSets)) - - return sortedTagsSets, nil -} - -// NewIndexFunc creates a new index. -type NewIndexFunc func(id uint64, database, path string, seriesIDSet *SeriesIDSet, sfile *SeriesFile, options EngineOptions) Index - -// newIndexFuncs is a lookup of index constructors by name. -var newIndexFuncs = make(map[string]NewIndexFunc) - -// RegisterIndex registers a storage index initializer by name. -func RegisterIndex(name string, fn NewIndexFunc) { - if _, ok := newIndexFuncs[name]; ok { - panic("index already registered: " + name) - } - newIndexFuncs[name] = fn -} - -// RegisteredIndexes returns the slice of currently registered indexes. -func RegisteredIndexes() []string { - a := make([]string, 0, len(newIndexFuncs)) - for k := range newIndexFuncs { - a = append(a, k) - } - sort.Strings(a) - return a -} - -// NewIndex returns an instance of an index based on its format. -// If the path does not exist then the DefaultFormat is used. -func NewIndex(id uint64, database, path string, seriesIDSet *SeriesIDSet, sfile *SeriesFile, options EngineOptions) (Index, error) { - format := options.IndexVersion - - // Use default format unless existing directory exists. - _, err := os.Stat(path) - if os.IsNotExist(err) { - // nop, use default - } else if err != nil { - return nil, err - } else if err == nil { - format = TSI1IndexName - } - - // Lookup index by format. - fn := newIndexFuncs[format] - if fn == nil { - return nil, fmt.Errorf("invalid index format: %q", format) - } - return fn(id, database, path, seriesIDSet, sfile, options), nil -} - -func MustOpenIndex(id uint64, database, path string, seriesIDSet *SeriesIDSet, sfile *SeriesFile, options EngineOptions) Index { - idx, err := NewIndex(id, database, path, seriesIDSet, sfile, options) - if err != nil { - panic(err) - } else if err := idx.Open(); err != nil { - panic(err) - } - return idx -} - -// assert will panic with a given formatted message if the given condition is false. -func assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assert failed: "+msg, v...)) - } -} - -type byTagKey []*query.TagSet - -func (t byTagKey) Len() int { return len(t) } -func (t byTagKey) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) < 0 } -func (t byTagKey) Swap(i, j int) { t[i], t[j] = t[j], t[i] } diff --git a/tsdb/index/index.go b/tsdb/index/index.go deleted file mode 100644 index b040461958f..00000000000 --- a/tsdb/index/index.go +++ /dev/null @@ -1,5 +0,0 @@ -package index // import "github.com/influxdata/influxdb/v2/tsdb/index" - -import ( - _ "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" -) diff --git a/tsdb/index/tsi1/cache.go b/tsdb/index/tsi1/cache.go deleted file mode 100644 index 21e4a277760..00000000000 --- a/tsdb/index/tsi1/cache.go +++ /dev/null @@ -1,200 +0,0 @@ -package tsi1 - -import ( - "container/list" - "sync" - - "github.com/influxdata/influxdb/v2/tsdb" -) - -// TagValueSeriesIDCache is an LRU cache for series id sets associated with -// name -> key -> value mappings. The purpose of the cache is to provide -// efficient means to get sets of series ids that would otherwise involve merging -// many individual bitmaps at query time. -// -// When initialising a TagValueSeriesIDCache a capacity must be provided. When -// more than c items are added to the cache, the least recently used item is -// evicted from the cache. -// -// A TagValueSeriesIDCache comprises a linked list implementation to track the -// order by which items should be evicted from the cache, and a hashmap implementation -// to provide constant time retrievals of items from the cache. -type TagValueSeriesIDCache struct { - sync.RWMutex - cache map[string]map[string]map[string]*list.Element - evictor *list.List - - capacity int -} - -// NewTagValueSeriesIDCache returns a TagValueSeriesIDCache with capacity c. -func NewTagValueSeriesIDCache(c int) *TagValueSeriesIDCache { - return &TagValueSeriesIDCache{ - cache: map[string]map[string]map[string]*list.Element{}, - evictor: list.New(), - capacity: c, - } -} - -// Get returns the SeriesIDSet associated with the {name, key, value} tuple if it -// exists. -func (c *TagValueSeriesIDCache) Get(name, key, value []byte) *tsdb.SeriesIDSet { - c.Lock() - defer c.Unlock() - return c.get(name, key, value) -} - -func (c *TagValueSeriesIDCache) get(name, key, value []byte) *tsdb.SeriesIDSet { - if mmap, ok := c.cache[string(name)]; ok { - if tkmap, ok := mmap[string(key)]; ok { - if ele, ok := tkmap[string(value)]; ok { - c.evictor.MoveToFront(ele) // This now becomes most recently used. - return ele.Value.(*seriesIDCacheElement).SeriesIDSet - } - } - } - return nil -} - -// exists returns true if the an item exists for the tuple {name, key, value}. -func (c *TagValueSeriesIDCache) exists(name, key, value []byte) bool { - if mmap, ok := c.cache[string(name)]; ok { - if tkmap, ok := mmap[string(key)]; ok { - _, ok := tkmap[string(value)] - return ok - } - } - return false -} - -// addToSet adds x to the SeriesIDSet associated with the tuple {name, key, value} -// if it exists. This method takes a lock on the underlying SeriesIDSet. -// -// NB this does not count as an access on the set—therefore the set is not promoted -// within the LRU cache. -func (c *TagValueSeriesIDCache) addToSet(name, key, value []byte, x uint64) { - if mmap, ok := c.cache[string(name)]; ok { - if tkmap, ok := mmap[string(key)]; ok { - if ele, ok := tkmap[string(value)]; ok { - ss := ele.Value.(*seriesIDCacheElement).SeriesIDSet - if ss == nil { - ele.Value.(*seriesIDCacheElement).SeriesIDSet = tsdb.NewSeriesIDSet(x) - return - } - ele.Value.(*seriesIDCacheElement).SeriesIDSet.Add(x) - } - } - } -} - -// measurementContainsSets returns true if there are sets cached for the provided measurement. -func (c *TagValueSeriesIDCache) measurementContainsSets(name []byte) bool { - _, ok := c.cache[string(name)] - return ok -} - -// Put adds the SeriesIDSet to the cache under the tuple {name, key, value}. If -// the cache is at its limit, then the least recently used item is evicted. -func (c *TagValueSeriesIDCache) Put(name, key, value []byte, ss *tsdb.SeriesIDSet) { - c.Lock() - // Check under the write lock if the relevant item is now in the cache. - if c.exists(name, key, value) { - c.Unlock() - return - } - defer c.Unlock() - - // Ensure our SeriesIDSet is go heap backed. - if ss != nil { - ss = ss.Clone() - } - - // Create list item, and add to the front of the eviction list. - listElement := c.evictor.PushFront(&seriesIDCacheElement{ - name: string(name), - key: string(key), - value: string(value), - SeriesIDSet: ss, - }) - - // Add the listElement to the set of items. - if mmap, ok := c.cache[string(name)]; ok { - if tkmap, ok := mmap[string(key)]; ok { - if _, ok := tkmap[string(value)]; ok { - goto EVICT - } - - // Add the set to the map - tkmap[string(value)] = listElement - goto EVICT - } - - // No series set map for the tag key - first tag value for the tag key. - mmap[string(key)] = map[string]*list.Element{string(value): listElement} - goto EVICT - } - - // No map for the measurement - first tag key for the measurement. - c.cache[string(name)] = map[string]map[string]*list.Element{ - string(key): {string(value): listElement}, - } - -EVICT: - c.checkEviction() -} - -// Delete removes x from the tuple {name, key, value} if it exists. -// This method takes a lock on the underlying SeriesIDSet. -func (c *TagValueSeriesIDCache) Delete(name, key, value []byte, x uint64) { - c.Lock() - c.delete(name, key, value, x) - c.Unlock() -} - -// delete removes x from the tuple {name, key, value} if it exists. -func (c *TagValueSeriesIDCache) delete(name, key, value []byte, x uint64) { - if mmap, ok := c.cache[string(name)]; ok { - if tkmap, ok := mmap[string(key)]; ok { - if ele, ok := tkmap[string(value)]; ok { - if ss := ele.Value.(*seriesIDCacheElement).SeriesIDSet; ss != nil { - ele.Value.(*seriesIDCacheElement).SeriesIDSet.Remove(x) - } - } - } - } -} - -// checkEviction checks if the cache is too big, and evicts the least recently used -// item if it is. -func (c *TagValueSeriesIDCache) checkEviction() { - if c.evictor.Len() <= c.capacity { - return - } - - e := c.evictor.Back() // Least recently used item. - listElement := e.Value.(*seriesIDCacheElement) - name := listElement.name - key := listElement.key - value := listElement.value - - c.evictor.Remove(e) // Remove from evictor - delete(c.cache[string(name)][string(key)], string(value)) // Remove from hashmap of items. - - // Check if there are no more tag values for the tag key. - if len(c.cache[string(name)][string(key)]) == 0 { - delete(c.cache[string(name)], string(key)) - } - - // Check there are no more tag keys for the measurement. - if len(c.cache[string(name)]) == 0 { - delete(c.cache, string(name)) - } -} - -// seriesIDCacheElement is an item stored within a cache. -type seriesIDCacheElement struct { - name string - key string - value string - SeriesIDSet *tsdb.SeriesIDSet -} diff --git a/tsdb/index/tsi1/cache_test.go b/tsdb/index/tsi1/cache_test.go deleted file mode 100644 index 2bfa83de8de..00000000000 --- a/tsdb/index/tsi1/cache_test.go +++ /dev/null @@ -1,245 +0,0 @@ -package tsi1 - -import ( - "math/rand" - "sync" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/tsdb" -) - -// This function is used to log the components of disk size when DiskSizeBytes fails -func (i *Index) LogDiskSize(t *testing.T) { - fs, err := i.RetainFileSet() - if err != nil { - t.Log("could not retain fileset") - } - defer fs.Release() - var size int64 - // Get MANIFEST sizes from each partition. - for count, p := range i.partitions { - sz := p.manifestSize - t.Logf("Partition %d has size %d", count, sz) - size += sz - } - for _, f := range fs.files { - sz := f.Size() - t.Logf("Size of file %s is %d", f.Path(), sz) - size += sz - } - t.Logf("Total size is %d", size) -} - -func TestTagValueSeriesIDCache(t *testing.T) { - m0k0v0 := tsdb.NewSeriesIDSet(1, 2, 3, 4, 5) - m0k0v1 := tsdb.NewSeriesIDSet(10, 20, 30, 40, 50) - m0k1v2 := tsdb.NewSeriesIDSet() - m1k3v0 := tsdb.NewSeriesIDSet(900, 0, 929) - - cache := TestCache{NewTagValueSeriesIDCache(10)} - cache.Has(t, "m0", "k0", "v0", nil) - - // Putting something in the cache makes it retrievable. - cache.PutByString("m0", "k0", "v0", m0k0v0) - cache.Has(t, "m0", "k0", "v0", m0k0v0) - - // Putting something else under the same key will not replace the original item. - cache.PutByString("m0", "k0", "v0", tsdb.NewSeriesIDSet(100, 200)) - cache.Has(t, "m0", "k0", "v0", m0k0v0) - - // Add another item to the cache. - cache.PutByString("m0", "k0", "v1", m0k0v1) - cache.Has(t, "m0", "k0", "v0", m0k0v0) - cache.Has(t, "m0", "k0", "v1", m0k0v1) - - // Add some more items - cache.PutByString("m0", "k1", "v2", m0k1v2) - cache.PutByString("m1", "k3", "v0", m1k3v0) - cache.Has(t, "m0", "k0", "v0", m0k0v0) - cache.Has(t, "m0", "k0", "v1", m0k0v1) - cache.Has(t, "m0", "k1", "v2", m0k1v2) - cache.Has(t, "m1", "k3", "v0", m1k3v0) -} - -func TestTagValueSeriesIDCache_eviction(t *testing.T) { - m0k0v0 := tsdb.NewSeriesIDSet(1, 2, 3, 4, 5) - m0k0v1 := tsdb.NewSeriesIDSet(10, 20, 30, 40, 50) - m0k1v2 := tsdb.NewSeriesIDSet() - m1k3v0 := tsdb.NewSeriesIDSet(900, 0, 929) - - cache := TestCache{NewTagValueSeriesIDCache(4)} - cache.PutByString("m0", "k0", "v0", m0k0v0) - cache.PutByString("m0", "k0", "v1", m0k0v1) - cache.PutByString("m0", "k1", "v2", m0k1v2) - cache.PutByString("m1", "k3", "v0", m1k3v0) - cache.Has(t, "m0", "k0", "v0", m0k0v0) - cache.Has(t, "m0", "k0", "v1", m0k0v1) - cache.Has(t, "m0", "k1", "v2", m0k1v2) - cache.Has(t, "m1", "k3", "v0", m1k3v0) - - // Putting another item in the cache will evict m0k0v0 - m2k0v0 := tsdb.NewSeriesIDSet(8, 8, 8) - cache.PutByString("m2", "k0", "v0", m2k0v0) - if got, exp := cache.evictor.Len(), 4; got != exp { - t.Fatalf("cache size was %d, expected %d", got, exp) - } - cache.HasNot(t, "m0", "k0", "v0") - cache.Has(t, "m0", "k0", "v1", m0k0v1) - cache.Has(t, "m0", "k1", "v2", m0k1v2) - cache.Has(t, "m1", "k3", "v0", m1k3v0) - cache.Has(t, "m2", "k0", "v0", m2k0v0) - - // Putting another item in the cache will evict m0k0v1. That will mean - // there will be no values left under the tuple {m0, k0} - if _, ok := cache.cache[string("m0")][string("k0")]; !ok { - t.Fatalf("Map missing for key %q", "k0") - } - - m2k0v1 := tsdb.NewSeriesIDSet(8, 8, 8) - cache.PutByString("m2", "k0", "v1", m2k0v1) - if got, exp := cache.evictor.Len(), 4; got != exp { - t.Fatalf("cache size was %d, expected %d", got, exp) - } - cache.HasNot(t, "m0", "k0", "v0") - cache.HasNot(t, "m0", "k0", "v1") - cache.Has(t, "m0", "k1", "v2", m0k1v2) - cache.Has(t, "m1", "k3", "v0", m1k3v0) - cache.Has(t, "m2", "k0", "v0", m2k0v0) - cache.Has(t, "m2", "k0", "v1", m2k0v1) - - // Further, the map for all tag values for the tuple {m0, k0} should be removed. - if _, ok := cache.cache[string("m0")][string("k0")]; ok { - t.Fatalf("Map present for key %q, should be removed", "k0") - } - - // Putting another item in the cache will evict m0k1v2. That will mean - // there will be no values left under the tuple {m0} - if _, ok := cache.cache[string("m0")]; !ok { - t.Fatalf("Map missing for key %q", "k0") - } - m2k0v2 := tsdb.NewSeriesIDSet(8, 9, 9) - cache.PutByString("m2", "k0", "v2", m2k0v2) - cache.HasNot(t, "m0", "k0", "v0") - cache.HasNot(t, "m0", "k0", "v1") - cache.HasNot(t, "m0", "k1", "v2") - cache.Has(t, "m1", "k3", "v0", m1k3v0) - cache.Has(t, "m2", "k0", "v0", m2k0v0) - cache.Has(t, "m2", "k0", "v1", m2k0v1) - cache.Has(t, "m2", "k0", "v2", m2k0v2) - - // The map for all tag values for the tuple {m0} should be removed. - if _, ok := cache.cache[string("m0")]; ok { - t.Fatalf("Map present for key %q, should be removed", "k0") - } - - // Putting another item in the cache will evict m2k0v0 if we first get m1k3v0 - // because m2k0v0 will have been used less recently... - m3k0v0 := tsdb.NewSeriesIDSet(1000) - cache.Has(t, "m1", "k3", "v0", m1k3v0) // This makes it the most recently used rather than the least. - cache.PutByString("m3", "k0", "v0", m3k0v0) - - cache.HasNot(t, "m0", "k0", "v0") - cache.HasNot(t, "m0", "k0", "v1") - cache.HasNot(t, "m0", "k1", "v2") - cache.HasNot(t, "m2", "k0", "v0") // This got pushed to the back. - - cache.Has(t, "m1", "k3", "v0", m1k3v0) // This got saved because we looked at it before we added to the cache - cache.Has(t, "m2", "k0", "v1", m2k0v1) - cache.Has(t, "m2", "k0", "v2", m2k0v2) - cache.Has(t, "m3", "k0", "v0", m3k0v0) -} - -func TestTagValueSeriesIDCache_addToSet(t *testing.T) { - cache := TestCache{NewTagValueSeriesIDCache(4)} - cache.PutByString("m0", "k0", "v0", nil) // Puts a nil set in the cache. - s2 := tsdb.NewSeriesIDSet(100) - cache.PutByString("m0", "k0", "v1", s2) - cache.Has(t, "m0", "k0", "v0", nil) - cache.Has(t, "m0", "k0", "v1", s2) - - cache.addToSet([]byte("m0"), []byte("k0"), []byte("v0"), 20) // No non-nil set exists so one will be created - cache.addToSet([]byte("m0"), []byte("k0"), []byte("v1"), 101) // No non-nil set exists so one will be created - cache.Has(t, "m0", "k0", "v1", tsdb.NewSeriesIDSet(100, 101)) - - ss := cache.GetByString("m0", "k0", "v0") - if !tsdb.NewSeriesIDSet(20).Equals(ss) { - t.Fatalf("series id set was %v", ss) - } - -} - -func TestTagValueSeriesIDCache_ConcurrentGetPut(t *testing.T) { - if testing.Short() { - t.Skip("Skipping long test") - } - - a := []string{"a", "b", "c", "d", "e"} - rnd := func() []byte { - return []byte(a[rand.Intn(len(a)-1)]) - } - - cache := TestCache{NewTagValueSeriesIDCache(100)} - done := make(chan struct{}) - var wg sync.WaitGroup - - for i := 0; i < 5; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case <-done: - return - default: - } - cache.Put(rnd(), rnd(), rnd(), tsdb.NewSeriesIDSet()) - } - }() - } - - for i := 0; i < 5; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case <-done: - return - default: - } - _ = cache.Get(rnd(), rnd(), rnd()) - } - }() - } - - time.Sleep(10 * time.Second) - close(done) - wg.Wait() -} - -type TestCache struct { - *TagValueSeriesIDCache -} - -func (c TestCache) Has(t *testing.T, name, key, value string, ss *tsdb.SeriesIDSet) { - if got, exp := c.Get([]byte(name), []byte(key), []byte(value)), ss; !got.Equals(exp) { - t.Helper() - t.Fatalf("got set %v, expected %v", got, exp) - } -} - -func (c TestCache) HasNot(t *testing.T, name, key, value string) { - if got := c.Get([]byte(name), []byte(key), []byte(value)); got != nil { - t.Helper() - t.Fatalf("got non-nil set %v for {%q, %q, %q}", got, name, key, value) - } -} - -func (c TestCache) GetByString(name, key, value string) *tsdb.SeriesIDSet { - return c.Get([]byte(name), []byte(key), []byte(value)) -} - -func (c TestCache) PutByString(name, key, value string, ss *tsdb.SeriesIDSet) { - c.Put([]byte(name), []byte(key), []byte(value), ss) -} diff --git a/tsdb/index/tsi1/doc.go b/tsdb/index/tsi1/doc.go deleted file mode 100644 index 39bc6076bd5..00000000000 --- a/tsdb/index/tsi1/doc.go +++ /dev/null @@ -1,226 +0,0 @@ -/* -Package tsi1 provides a memory-mapped index implementation that supports -high cardinality series. - -# Overview - -The top-level object in tsi1 is the Index. It is the primary access point from -the rest of the system. The Index is composed of LogFile and IndexFile objects. - -Log files are small write-ahead log files that record new series immediately -in the order that they are received. The data within the file is indexed -in-memory so it can be quickly accessed. When the system is restarted, this log -file is replayed and the in-memory representation is rebuilt. - -Index files also contain series information, however, they are highly indexed -so that reads can be performed quickly. Index files are built through a process -called compaction where a log file or multiple index files are merged together. - -# Operations - -The index can perform many tasks related to series, measurement, & tag data. -All data is inserted by adding a series to the index. When adding a series, -the measurement, tag keys, and tag values are all extracted and indexed -separately. - -Once a series has been added, it can be removed in several ways. First, the -individual series can be removed. Second, it can be removed as part of a bulk -operation by deleting the entire measurement. - -The query engine needs to be able to look up series in a variety of ways such -as by measurement name, by tag value, or by using regular expressions. The -index provides an API to iterate over subsets of series and perform set -operations such as unions and intersections. - -# Log File Layout - -The write-ahead file that series initially are inserted into simply appends -all new operations sequentially. It is simply composed of a series of log -entries. An entry contains a flag to specify the operation type, the measurement -name, the tag set, and a checksum. - - ┏━━━━━━━━━LogEntry━━━━━━━━━┓ - ┃ ┌──────────────────────┐ ┃ - ┃ │ Flag │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ Measurement │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ Key/Value │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ Key/Value │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ Key/Value │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ Checksum │ ┃ - ┃ └──────────────────────┘ ┃ - ┗━━━━━━━━━━━━━━━━━━━━━━━━━━┛ - -When the log file is replayed, if the checksum is incorrect or the entry is -incomplete (because of a partially failed write) then the log is truncated. - -# Index File Layout - -The index file is composed of 3 main block types: one series block, one or more -tag blocks, and one measurement block. At the end of the index file is a -trailer that records metadata such as the offsets to these blocks. - -# Series Block Layout - -The series block stores raw series keys in sorted order. It also provides hash -indexes so that series can be looked up quickly. Hash indexes are inserted -periodically so that memory size is limited at write time. Once all the series -and hash indexes have been written then a list of index entries are written -so that hash indexes can be looked up via binary search. - -The end of the block contains two HyperLogLog++ sketches which track the -estimated number of created series and deleted series. After the sketches is -a trailer which contains metadata about the block. - - ┏━━━━━━━SeriesBlock━━━━━━━━┓ - ┃ ┌──────────────────────┐ ┃ - ┃ │ Series Key │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ Series Key │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ Series Key │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ │ ┃ - ┃ │ Hash Index │ ┃ - ┃ │ │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ Series Key │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ Series Key │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ Series Key │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ │ ┃ - ┃ │ Hash Index │ ┃ - ┃ │ │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ Index Entries │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ HLL Sketches │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ Trailer │ ┃ - ┃ └──────────────────────┘ ┃ - ┗━━━━━━━━━━━━━━━━━━━━━━━━━━┛ - -# Tag Block Layout - -After the series block is one or more tag blocks. One of these blocks exists -for every measurement in the index file. The block is structured as a sorted -list of values for each key and then a sorted list of keys. Each of these lists -has their own hash index for fast direct lookups. - - ┏━━━━━━━━Tag Block━━━━━━━━━┓ - ┃ ┌──────────────────────┐ ┃ - ┃ │ Value │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ Value │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ Value │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ │ ┃ - ┃ │ Hash Index │ ┃ - ┃ │ │ ┃ - ┃ └──────────────────────┘ ┃ - ┃ ┌──────────────────────┐ ┃ - ┃ │ Value │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ Value │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ │ ┃ - ┃ │ Hash Index │ ┃ - ┃ │ │ ┃ - ┃ └──────────────────────┘ ┃ - ┃ ┌──────────────────────┐ ┃ - ┃ │ Key │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ Key │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ │ ┃ - ┃ │ Hash Index │ ┃ - ┃ │ │ ┃ - ┃ └──────────────────────┘ ┃ - ┃ ┌──────────────────────┐ ┃ - ┃ │ Trailer │ ┃ - ┃ └──────────────────────┘ ┃ - ┗━━━━━━━━━━━━━━━━━━━━━━━━━━┛ - -Each entry for values contains a sorted list of offsets for series keys that use -that value. Series iterators can be built around a single tag key value or -multiple iterators can be merged with set operators such as union or -intersection. - -# Measurement block - -The measurement block stores a sorted list of measurements, their associated -series offsets, and the offset to their tag block. This allows all series for -a measurement to be traversed quickly and it allows fast direct lookups of -measurements and their tags. - -This block also contains HyperLogLog++ sketches for new and deleted -measurements. - - ┏━━━━Measurement Block━━━━━┓ - ┃ ┌──────────────────────┐ ┃ - ┃ │ Measurement │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ Measurement │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ Measurement │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ │ ┃ - ┃ │ Hash Index │ ┃ - ┃ │ │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ HLL Sketches │ ┃ - ┃ ├──────────────────────┤ ┃ - ┃ │ Trailer │ ┃ - ┃ └──────────────────────┘ ┃ - ┗━━━━━━━━━━━━━━━━━━━━━━━━━━┛ - -# Manifest file - -The index is simply an ordered set of log and index files. These files can be -merged together or rewritten but their order must always be the same. This is -because series, measurements, & tags can be marked as deleted (aka tombstoned) -and this action needs to be tracked in time order. - -Whenever the set of active files is changed, a manifest file is written to -track the set. The manifest specifies the ordering of files and, on startup, -all files not in the manifest are removed from the index directory. - -# Compacting index files - -Compaction is the process of taking files and merging them together into a -single file. There are two stages of compaction within TSI. - -First, once log files exceed a size threshold then they are compacted into an -index file. This threshold is relatively small because log files must maintain -their index in the heap which TSI tries to avoid. Small log files are also very -quick to convert into an index file so this is done aggressively. - -Second, once a contiguous set of index files exceed a factor (e.g. 10x) then -they are all merged together into a single index file and the old files are -discarded. Because all blocks are written in sorted order, the new index file -can be streamed and minimize memory use. - -# Concurrency - -Index files are immutable so they do not require fine grained locks, however, -compactions require that we track which files are in use so they are not -discarded too soon. This is done by using reference counting with file sets. - -A file set is simply an ordered list of index files. When the current file set -is obtained from the index, a counter is incremented to track its usage. Once -the user is done with the file set, it is released and the counter is -decremented. A file cannot be removed from the file system until this counter -returns to zero. - -Besides the reference counting, there are no other locking mechanisms when -reading or writing index files. Log files, however, do require a lock whenever -they are accessed. This is another reason to minimize log file size. -*/ -package tsi1 diff --git a/tsdb/index/tsi1/file_set.go b/tsdb/index/tsi1/file_set.go deleted file mode 100644 index b42e482ce61..00000000000 --- a/tsdb/index/tsi1/file_set.go +++ /dev/null @@ -1,568 +0,0 @@ -package tsi1 - -import ( - "bytes" - "fmt" - "regexp" - "sync" - "unsafe" - - "github.com/influxdata/influxdb/v2/pkg/estimator" - "github.com/influxdata/influxdb/v2/pkg/estimator/hll" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxql" -) - -// FileSet represents a collection of files. -type FileSet struct { - files []File -} - -// NewFileSet returns a new instance of FileSet. -func NewFileSet(files []File) *FileSet { - return &FileSet{ - files: files, - } -} - -// bytes estimates the memory footprint of this FileSet, in bytes. -func (fs *FileSet) bytes() int { - var b int - // Do not count SeriesFile because it belongs to the code that constructed this FileSet. - for _, file := range fs.files { - b += file.bytes() - } - b += int(unsafe.Sizeof(*fs)) - return b -} - -// Close closes all the files in the file set. -func (fs FileSet) Close() error { - return Files(fs.files).Close() -} - -// Retain adds a reference count to all files. -func (fs *FileSet) Retain() { - for _, f := range fs.files { - f.Retain() - } -} - -// Release removes a reference count from all files. -func (fs *FileSet) Release() { - for _, f := range fs.files { - f.Release() - } -} - -// PrependLogFile returns a new file set with f added at the beginning. -// Filters do not need to be rebuilt because log files have no bloom filter. -func (fs *FileSet) PrependLogFile(f *LogFile) *FileSet { - return &FileSet{ - files: append([]File{f}, fs.files...), - } -} - -// Size returns the on-disk size of the FileSet. -func (fs *FileSet) Size() int64 { - var total int64 - for _, f := range fs.files { - total += f.Size() - } - return total -} - -// MustReplace swaps a list of files for a single file and returns a new file set. -// The caller should always guarantee that the files exist and are contiguous. -func (fs *FileSet) MustReplace(oldFiles []File, newFile File) *FileSet { - assert(len(oldFiles) > 0, "cannot replace empty files") - - // Find index of first old file. - var i int - for ; i < len(fs.files); i++ { - if fs.files[i] == oldFiles[0] { - break - } else if i == len(fs.files)-1 { - panic("first replacement file not found") - } - } - - // Ensure all old files are contiguous. - for j := range oldFiles { - if fs.files[i+j] != oldFiles[j] { - panic(fmt.Sprintf("cannot replace non-contiguous files: subset=%+v, fileset=%+v", Files(oldFiles).IDs(), Files(fs.files).IDs())) - } - } - - // Copy to new fileset. - other := make([]File, len(fs.files)-len(oldFiles)+1) - copy(other[:i], fs.files[:i]) - other[i] = newFile - copy(other[i+1:], fs.files[i+len(oldFiles):]) - - // Build new fileset and rebuild changed filters. - return &FileSet{ - files: other, - } -} - -// MaxID returns the highest file identifier. -func (fs *FileSet) MaxID() int { - var max int - for _, f := range fs.files { - if i := f.ID(); i > max { - max = i - } - } - return max -} - -// Files returns all files in the set. -func (fs *FileSet) Files() []File { - return fs.files -} - -// LastContiguousIndexFilesByLevel returns the last contiguous files by level. -// These can be used by the compaction scheduler. -func (fs *FileSet) LastContiguousIndexFilesByLevel(level int) []*IndexFile { - if level == 0 { - return nil - } - - var a []*IndexFile - for i := len(fs.files) - 1; i >= 0; i-- { - f := fs.files[i] - - // Ignore files above level, stop on files below level. - if level < f.Level() { - continue - } else if level > f.Level() { - break - } - - a = append([]*IndexFile{f.(*IndexFile)}, a...) - } - return a -} - -// Measurement returns a measurement by name. -func (fs *FileSet) Measurement(name []byte) MeasurementElem { - for _, f := range fs.files { - if e := f.Measurement(name); e == nil { - continue - } else if e.Deleted() { - return nil - } else { - return e - } - } - return nil -} - -// MeasurementIterator returns an iterator over all measurements in the index. -func (fs *FileSet) MeasurementIterator() MeasurementIterator { - a := make([]MeasurementIterator, 0, len(fs.files)) - for _, f := range fs.files { - itr := f.MeasurementIterator() - if itr != nil { - a = append(a, itr) - } - } - return MergeMeasurementIterators(a...) -} - -// TagKeyIterator returns an iterator over all tag keys for a measurement. -func (fs *FileSet) TagKeyIterator(name []byte) TagKeyIterator { - a := make([]TagKeyIterator, 0, len(fs.files)) - for _, f := range fs.files { - itr := f.TagKeyIterator(name) - if itr != nil { - a = append(a, itr) - } - } - return MergeTagKeyIterators(a...) -} - -// MeasurementSeriesIDIterator returns a series iterator for a measurement. -func (fs *FileSet) MeasurementSeriesIDIterator(name []byte) tsdb.SeriesIDIterator { - a := make([]tsdb.SeriesIDIterator, 0, len(fs.files)) - for _, f := range fs.files { - itr := f.MeasurementSeriesIDIterator(name) - if itr != nil { - a = append(a, itr) - } - } - return tsdb.MergeSeriesIDIterators(a...) -} - -// MeasurementTagKeysByExpr extracts the tag keys wanted by the expression. -func (fs *FileSet) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) { - // Return all keys if no condition was passed in. - if expr == nil { - m := make(map[string]struct{}) - if itr := fs.TagKeyIterator(name); itr != nil { - for e := itr.Next(); e != nil; e = itr.Next() { - m[string(e.Key())] = struct{}{} - } - } - return m, nil - } - - switch e := expr.(type) { - case *influxql.BinaryExpr: - switch e.Op { - case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: - tag, ok := e.LHS.(*influxql.VarRef) - if !ok { - return nil, fmt.Errorf("left side of '%s' must be a tag key", e.Op.String()) - } else if tag.Val != "_tagKey" { - return nil, nil - } - - if influxql.IsRegexOp(e.Op) { - re, ok := e.RHS.(*influxql.RegexLiteral) - if !ok { - return nil, fmt.Errorf("right side of '%s' must be a regular expression", e.Op.String()) - } - return fs.tagKeysByFilter(name, e.Op, nil, re.Val), nil - } - - s, ok := e.RHS.(*influxql.StringLiteral) - if !ok { - return nil, fmt.Errorf("right side of '%s' must be a tag value string", e.Op.String()) - } - return fs.tagKeysByFilter(name, e.Op, []byte(s.Val), nil), nil - - case influxql.AND, influxql.OR: - lhs, err := fs.MeasurementTagKeysByExpr(name, e.LHS) - if err != nil { - return nil, err - } - - rhs, err := fs.MeasurementTagKeysByExpr(name, e.RHS) - if err != nil { - return nil, err - } - - if lhs != nil && rhs != nil { - if e.Op == influxql.OR { - return unionStringSets(lhs, rhs), nil - } - return intersectStringSets(lhs, rhs), nil - } else if lhs != nil { - return lhs, nil - } else if rhs != nil { - return rhs, nil - } - return nil, nil - default: - return nil, fmt.Errorf("invalid operator for tag keys by expression") - } - - case *influxql.ParenExpr: - return fs.MeasurementTagKeysByExpr(name, e.Expr) - } - - return nil, fmt.Errorf("invalid measurement tag keys expression: %#v", expr) -} - -// tagKeysByFilter will filter the tag keys for the measurement. -func (fs *FileSet) tagKeysByFilter(name []byte, op influxql.Token, val []byte, regex *regexp.Regexp) map[string]struct{} { - ss := make(map[string]struct{}) - itr := fs.TagKeyIterator(name) - if itr != nil { - for e := itr.Next(); e != nil; e = itr.Next() { - var matched bool - switch op { - case influxql.EQ: - matched = bytes.Equal(e.Key(), val) - case influxql.NEQ: - matched = !bytes.Equal(e.Key(), val) - case influxql.EQREGEX: - matched = regex.Match(e.Key()) - case influxql.NEQREGEX: - matched = !regex.Match(e.Key()) - } - - if !matched { - continue - } - ss[string(e.Key())] = struct{}{} - } - } - return ss -} - -// TagKeySeriesIDIterator returns a series iterator for all values across a single key. -func (fs *FileSet) TagKeySeriesIDIterator(name, key []byte) (tsdb.SeriesIDIterator, error) { - a := make([]tsdb.SeriesIDIterator, 0, len(fs.files)) - for _, f := range fs.files { - itr, err := f.TagKeySeriesIDIterator(name, key) - if err != nil { - return nil, err - } else if itr != nil { - a = append(a, itr) - } - } - return tsdb.MergeSeriesIDIterators(a...), nil -} - -// HasTagKey returns true if the tag key exists. -func (fs *FileSet) HasTagKey(name, key []byte) bool { - for _, f := range fs.files { - if e := f.TagKey(name, key); e != nil { - return !e.Deleted() - } - } - return false -} - -// HasTagValue returns true if the tag value exists. -func (fs *FileSet) HasTagValue(name, key, value []byte) bool { - for _, f := range fs.files { - if e := f.TagValue(name, key, value); e != nil { - return !e.Deleted() - } - } - return false -} - -// TagValueIterator returns a value iterator for a tag key. -func (fs *FileSet) TagValueIterator(name, key []byte) TagValueIterator { - a := make([]TagValueIterator, 0, len(fs.files)) - for _, f := range fs.files { - itr := f.TagValueIterator(name, key) - if itr != nil { - a = append(a, itr) - } - } - return MergeTagValueIterators(a...) -} - -// TagValueSeriesIDIterator returns a series iterator for a single tag value. -func (fs *FileSet) TagValueSeriesIDIterator(name, key, value []byte) (tsdb.SeriesIDIterator, error) { - ss := tsdb.NewSeriesIDSet() - - var ftss *tsdb.SeriesIDSet - for i := len(fs.files) - 1; i >= 0; i-- { - f := fs.files[i] - - // Remove tombstones set in previous file. - if ftss != nil && ftss.Cardinality() > 0 { - ss = ss.AndNot(ftss) - } - - // Fetch tag value series set for this file and merge into overall set. - fss, err := f.TagValueSeriesIDSet(name, key, value) - if err != nil { - return nil, err - } else if fss != nil { - ss.Merge(fss) - } - - // Fetch tombstone set to be processed on next file. - if ftss, err = f.TombstoneSeriesIDSet(); err != nil { - return nil, err - } - } - return tsdb.NewSeriesIDSetIterator(ss), nil -} - -// MeasurementsSketches returns the merged measurement sketches for the FileSet. -func (fs *FileSet) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) { - sketch, tSketch := hll.NewDefaultPlus(), hll.NewDefaultPlus() - for _, f := range fs.files { - if s, t, err := f.MeasurementsSketches(); err != nil { - return nil, nil, err - } else if err := sketch.Merge(s); err != nil { - return nil, nil, err - } else if err := tSketch.Merge(t); err != nil { - return nil, nil, err - } - } - return sketch, tSketch, nil -} - -// SeriesSketches returns the merged measurement sketches for the FileSet. -func (fs *FileSet) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) { - sketch, tSketch := hll.NewDefaultPlus(), hll.NewDefaultPlus() - for _, f := range fs.files { - if s, t, err := f.SeriesSketches(); err != nil { - return nil, nil, err - } else if err := sketch.Merge(s); err != nil { - return nil, nil, err - } else if err := tSketch.Merge(t); err != nil { - return nil, nil, err - } - } - return sketch, tSketch, nil -} - -// File represents a log or index file. -type File interface { - Close() error - Path() string - - ID() int - Level() int - - Measurement(name []byte) MeasurementElem - MeasurementIterator() MeasurementIterator - MeasurementHasSeries(ss *tsdb.SeriesIDSet, name []byte) bool - - TagKey(name, key []byte) TagKeyElem - TagKeyIterator(name []byte) TagKeyIterator - - TagValue(name, key, value []byte) TagValueElem - TagValueIterator(name, key []byte) TagValueIterator - - // Series iteration. - MeasurementSeriesIDIterator(name []byte) tsdb.SeriesIDIterator - TagKeySeriesIDIterator(name, key []byte) (tsdb.SeriesIDIterator, error) - TagValueSeriesIDSet(name, key, value []byte) (*tsdb.SeriesIDSet, error) - - // Sketches for cardinality estimation - MeasurementsSketches() (s, t estimator.Sketch, err error) - SeriesSketches() (s, t estimator.Sketch, err error) - - // Bitmap series existence. - SeriesIDSet() (*tsdb.SeriesIDSet, error) - TombstoneSeriesIDSet() (*tsdb.SeriesIDSet, error) - - // Reference counting. - Retain() - Release() - - // Size of file on disk - Size() int64 - - // Estimated memory footprint - bytes() int -} - -type Files []File - -func (a Files) IDs() []int { - ids := make([]int, len(a)) - for i := range a { - ids[i] = a[i].ID() - } - return ids -} - -func (a Files) Close() error { - var err error - for _, f := range a { - if e := f.Close(); e != nil && err == nil { - err = e - } - } - return err -} - -// fileSetSeriesIDIterator attaches a fileset to an iterator that is released on close. -type fileSetSeriesIDIterator struct { - once sync.Once - fs *FileSet - itr tsdb.SeriesIDIterator -} - -func newFileSetSeriesIDIterator(fs *FileSet, itr tsdb.SeriesIDIterator) tsdb.SeriesIDIterator { - if itr == nil { - fs.Release() - return nil - } - if itr, ok := itr.(tsdb.SeriesIDSetIterator); ok { - return &fileSetSeriesIDSetIterator{fs: fs, itr: itr} - } - return &fileSetSeriesIDIterator{fs: fs, itr: itr} -} - -func (itr *fileSetSeriesIDIterator) Next() (tsdb.SeriesIDElem, error) { - return itr.itr.Next() -} - -func (itr *fileSetSeriesIDIterator) Close() error { - itr.once.Do(func() { itr.fs.Release() }) - return itr.itr.Close() -} - -// fileSetSeriesIDSetIterator attaches a fileset to an iterator that is released on close. -type fileSetSeriesIDSetIterator struct { - once sync.Once - fs *FileSet - itr tsdb.SeriesIDSetIterator -} - -func (itr *fileSetSeriesIDSetIterator) Next() (tsdb.SeriesIDElem, error) { - return itr.itr.Next() -} - -func (itr *fileSetSeriesIDSetIterator) Close() error { - itr.once.Do(func() { itr.fs.Release() }) - return itr.itr.Close() -} - -func (itr *fileSetSeriesIDSetIterator) SeriesIDSet() *tsdb.SeriesIDSet { - return itr.itr.SeriesIDSet() -} - -// fileSetMeasurementIterator attaches a fileset to an iterator that is released on close. -type fileSetMeasurementIterator struct { - once sync.Once - fs *FileSet - itr tsdb.MeasurementIterator -} - -func newFileSetMeasurementIterator(fs *FileSet, itr tsdb.MeasurementIterator) *fileSetMeasurementIterator { - return &fileSetMeasurementIterator{fs: fs, itr: itr} -} - -func (itr *fileSetMeasurementIterator) Next() ([]byte, error) { - return itr.itr.Next() -} - -func (itr *fileSetMeasurementIterator) Close() error { - itr.once.Do(func() { itr.fs.Release() }) - return itr.itr.Close() -} - -// fileSetTagKeyIterator attaches a fileset to an iterator that is released on close. -type fileSetTagKeyIterator struct { - once sync.Once - fs *FileSet - itr tsdb.TagKeyIterator -} - -func newFileSetTagKeyIterator(fs *FileSet, itr tsdb.TagKeyIterator) *fileSetTagKeyIterator { - return &fileSetTagKeyIterator{fs: fs, itr: itr} -} - -func (itr *fileSetTagKeyIterator) Next() ([]byte, error) { - return itr.itr.Next() -} - -func (itr *fileSetTagKeyIterator) Close() error { - itr.once.Do(func() { itr.fs.Release() }) - return itr.itr.Close() -} - -// fileSetTagValueIterator attaches a fileset to an iterator that is released on close. -type fileSetTagValueIterator struct { - once sync.Once - fs *FileSet - itr tsdb.TagValueIterator -} - -func newFileSetTagValueIterator(fs *FileSet, itr tsdb.TagValueIterator) *fileSetTagValueIterator { - return &fileSetTagValueIterator{fs: fs, itr: itr} -} - -func (itr *fileSetTagValueIterator) Next() ([]byte, error) { - return itr.itr.Next() -} - -func (itr *fileSetTagValueIterator) Close() error { - itr.once.Do(func() { itr.fs.Release() }) - return itr.itr.Close() -} diff --git a/tsdb/index/tsi1/file_set_test.go b/tsdb/index/tsi1/file_set_test.go deleted file mode 100644 index 73bf5e5c2e6..00000000000 --- a/tsdb/index/tsi1/file_set_test.go +++ /dev/null @@ -1,309 +0,0 @@ -package tsi1_test - -import ( - "fmt" - "reflect" - "sort" - "testing" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb" -) - -// Ensure fileset can return an iterator over all series in the index. -func TestFileSet_SeriesIDIterator(t *testing.T) { - idx := MustOpenIndex(t, 1) - defer idx.Close() - - // Create initial set of series. - if err := idx.CreateSeriesSliceIfNotExists([]Series{ - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})}, - {Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "east"})}, - }); err != nil { - t.Fatal(err) - } - - // Verify initial set of series. - idx.Run(t, func(t *testing.T) { - fs, err := idx.PartitionAt(0).RetainFileSet() - if err != nil { - t.Fatal(err) - } - defer fs.Release() - - itr := idx.SeriesFile.SeriesIDIterator() - if itr == nil { - t.Fatal("expected iterator") - } - if result := MustReadAllSeriesIDIteratorString(idx.SeriesFile.SeriesFile, itr); !reflect.DeepEqual(result, []string{ - "cpu,[{region east}]", - "cpu,[{region west}]", - "mem,[{region east}]", - }) { - t.Fatalf("unexpected keys: %s", result) - } - }) - - // Add more series. - if err := idx.CreateSeriesSliceIfNotExists([]Series{ - {Name: []byte("disk")}, - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "north"})}, - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, - }); err != nil { - t.Fatal(err) - } - - // Verify additional series. - idx.Run(t, func(t *testing.T) { - fs, err := idx.PartitionAt(0).RetainFileSet() - if err != nil { - t.Fatal(err) - } - defer fs.Release() - - itr := idx.SeriesFile.SeriesIDIterator() - if itr == nil { - t.Fatal("expected iterator") - } - - if result := MustReadAllSeriesIDIteratorString(idx.SeriesFile.SeriesFile, itr); !reflect.DeepEqual(result, []string{ - "cpu,[{region east}]", - "cpu,[{region north}]", - "cpu,[{region west}]", - "disk,[]", - "mem,[{region east}]", - }) { - t.Fatalf("unexpected keys: %s", result) - } - }) -} - -// Ensure fileset can return an iterator over all series for one measurement. -func TestFileSet_MeasurementSeriesIDIterator(t *testing.T) { - idx := MustOpenIndex(t, 1) - defer idx.Close() - - // Create initial set of series. - if err := idx.CreateSeriesSliceIfNotExists([]Series{ - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})}, - {Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "east"})}, - }); err != nil { - t.Fatal(err) - } - - // Verify initial set of series. - idx.Run(t, func(t *testing.T) { - fs, err := idx.PartitionAt(0).RetainFileSet() - if err != nil { - t.Fatal(err) - } - defer fs.Release() - - itr := fs.MeasurementSeriesIDIterator([]byte("cpu")) - if itr == nil { - t.Fatal("expected iterator") - } - - if result := MustReadAllSeriesIDIteratorString(idx.SeriesFile.SeriesFile, itr); !reflect.DeepEqual(result, []string{ - "cpu,[{region east}]", - "cpu,[{region west}]", - }) { - t.Fatalf("unexpected keys: %s", result) - } - }) - - // Add more series. - if err := idx.CreateSeriesSliceIfNotExists([]Series{ - {Name: []byte("disk")}, - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "north"})}, - }); err != nil { - t.Fatal(err) - } - - // Verify additional series. - idx.Run(t, func(t *testing.T) { - fs, err := idx.PartitionAt(0).RetainFileSet() - if err != nil { - t.Fatal(err) - } - defer fs.Release() - - itr := fs.MeasurementSeriesIDIterator([]byte("cpu")) - if itr == nil { - t.Fatalf("expected iterator") - } - - if result := MustReadAllSeriesIDIteratorString(idx.SeriesFile.SeriesFile, itr); !reflect.DeepEqual(result, []string{ - "cpu,[{region east}]", - "cpu,[{region north}]", - "cpu,[{region west}]", - }) { - t.Fatalf("unexpected keys: %s", result) - } - }) -} - -// Ensure fileset can return an iterator over all measurements for the index. -func TestFileSet_MeasurementIterator(t *testing.T) { - idx := MustOpenIndex(t, 1) - defer idx.Close() - - // Create initial set of series. - if err := idx.CreateSeriesSliceIfNotExists([]Series{ - {Name: []byte("cpu")}, - {Name: []byte("mem")}, - }); err != nil { - t.Fatal(err) - } - - // Verify initial set of series. - idx.Run(t, func(t *testing.T) { - fs, err := idx.PartitionAt(0).RetainFileSet() - if err != nil { - t.Fatal(err) - } - defer fs.Release() - - itr := fs.MeasurementIterator() - if itr == nil { - t.Fatal("expected iterator") - } - - expectedNames := []string{"cpu", "mem", ""} // Empty string implies end - for _, name := range expectedNames { - e := itr.Next() - if name == "" && e != nil { - t.Errorf("got measurement %s, expected nil measurement", e.Name()) - } else if e == nil && name != "" { - t.Errorf("got nil measurement, expected %s", name) - } else if e != nil && string(e.Name()) != name { - t.Errorf("got measurement %s, expected %s", e.Name(), name) - } - } - }) - - // Add more series. - if err := idx.CreateSeriesSliceIfNotExists([]Series{ - {Name: []byte("disk"), Tags: models.NewTags(map[string]string{"foo": "bar"})}, - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "north", "x": "y"})}, - }); err != nil { - t.Fatal(err) - } - - // Verify additional series. - idx.Run(t, func(t *testing.T) { - fs, err := idx.PartitionAt(0).RetainFileSet() - if err != nil { - t.Fatal(err) - } - defer fs.Release() - - itr := fs.MeasurementIterator() - if itr == nil { - t.Fatal("expected iterator") - } - - expectedNames := []string{"cpu", "disk", "mem", ""} // Empty string implies end - for _, name := range expectedNames { - e := itr.Next() - if name == "" && e != nil { - t.Errorf("got measurement %s, expected nil measurement", e.Name()) - } else if e == nil && name != "" { - t.Errorf("got nil measurement, expected %s", name) - } else if e != nil && string(e.Name()) != name { - t.Errorf("got measurement %s, expected %s", e.Name(), name) - } - } - }) -} - -// Ensure fileset can return an iterator over all keys for one measurement. -func TestFileSet_TagKeyIterator(t *testing.T) { - idx := MustOpenIndex(t, 1) - defer idx.Close() - - // Create initial set of series. - if err := idx.CreateSeriesSliceIfNotExists([]Series{ - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west", "type": "gpu"})}, - {Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "east", "misc": "other"})}, - }); err != nil { - t.Fatal(err) - } - - // Verify initial set of series. - idx.Run(t, func(t *testing.T) { - fs, err := idx.PartitionAt(0).RetainFileSet() - if err != nil { - t.Fatal(err) - } - defer fs.Release() - - itr := fs.TagKeyIterator([]byte("cpu")) - if itr == nil { - t.Fatalf("expected iterator") - } - - if e := itr.Next(); string(e.Key()) != `region` { - t.Fatalf("unexpected key: %s", e.Key()) - } else if e := itr.Next(); string(e.Key()) != `type` { - t.Fatalf("unexpected key: %s", e.Key()) - } else if e := itr.Next(); e != nil { - t.Fatalf("expected nil key: %s", e.Key()) - } - }) - - // Add more series. - if err := idx.CreateSeriesSliceIfNotExists([]Series{ - {Name: []byte("disk"), Tags: models.NewTags(map[string]string{"foo": "bar"})}, - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "north", "x": "y"})}, - }); err != nil { - t.Fatal(err) - } - - // Verify additional series. - idx.Run(t, func(t *testing.T) { - fs, err := idx.PartitionAt(0).RetainFileSet() - if err != nil { - t.Fatal(err) - } - defer fs.Release() - - itr := fs.TagKeyIterator([]byte("cpu")) - if itr == nil { - t.Fatal("expected iterator") - } - - if e := itr.Next(); string(e.Key()) != `region` { - t.Fatalf("unexpected key: %s", e.Key()) - } else if e := itr.Next(); string(e.Key()) != `type` { - t.Fatalf("unexpected key: %s", e.Key()) - } else if e := itr.Next(); string(e.Key()) != `x` { - t.Fatalf("unexpected key: %s", e.Key()) - } else if e := itr.Next(); e != nil { - t.Fatalf("expected nil key: %s", e.Key()) - } - }) -} - -func MustReadAllSeriesIDIteratorString(sfile *tsdb.SeriesFile, itr tsdb.SeriesIDIterator) []string { - // Read all ids. - ids, err := tsdb.ReadAllSeriesIDIterator(itr) - if err != nil { - panic(err) - } - - // Convert to keys and sort. - keys := sfile.SeriesKeys(ids) - sort.Slice(keys, func(i, j int) bool { return tsdb.CompareSeriesKeys(keys[i], keys[j]) == -1 }) - - // Convert to strings. - a := make([]string, len(keys)) - for i := range a { - name, tags := tsdb.ParseSeriesKey(keys[i]) - a[i] = fmt.Sprintf("%s,%s", name, tags.String()) - } - return a -} diff --git a/tsdb/index/tsi1/index.go b/tsdb/index/tsi1/index.go deleted file mode 100644 index bc8629e9b62..00000000000 --- a/tsdb/index/tsi1/index.go +++ /dev/null @@ -1,1134 +0,0 @@ -package tsi1 - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "regexp" - "runtime" - "strconv" - "sync" - "sync/atomic" - "time" - "unsafe" - - "github.com/cespare/xxhash" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/estimator" - "github.com/influxdata/influxdb/v2/pkg/estimator/hll" - "github.com/influxdata/influxdb/v2/pkg/slices" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxql" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" -) - -// IndexName is the name of the index. -const IndexName = tsdb.TSI1IndexName - -// ErrCompactionInterrupted is returned if compactions are disabled or -// an index is closed while a compaction is occurring. -var ErrCompactionInterrupted = errors.New("tsi1: compaction interrupted") - -func init() { - if os.Getenv("INFLUXDB_EXP_TSI_PARTITIONS") != "" { - i, err := strconv.Atoi(os.Getenv("INFLUXDB_EXP_TSI_PARTITIONS")) - if err != nil { - panic(err) - } - DefaultPartitionN = uint64(i) - } - - tsdb.RegisterIndex(IndexName, func(_ uint64, db, path string, _ *tsdb.SeriesIDSet, sfile *tsdb.SeriesFile, opt tsdb.EngineOptions) tsdb.Index { - idx := NewIndex(sfile, db, - WithPath(path), - WithMaximumLogFileSize(int64(opt.Config.MaxIndexLogFileSize)), - WithMaximumLogFileAge(time.Duration(opt.Config.CompactFullWriteColdDuration)), - WithSeriesIDCacheSize(opt.Config.SeriesIDSetCacheSize), - ) - return idx - }) -} - -// DefaultPartitionN determines how many shards the index will be partitioned into. -// -// NOTE: Currently, this must not be change once a database is created. Further, -// it must also be a power of 2. -var DefaultPartitionN uint64 = 8 - -// An IndexOption is a functional option for changing the configuration of -// an Index. -type IndexOption func(i *Index) - -// WithPath sets the root path of the Index -var WithPath = func(path string) IndexOption { - return func(i *Index) { - i.path = path - } -} - -// DisableCompactions disables compactions on the Index. -var DisableCompactions = func() IndexOption { - return func(i *Index) { - i.disableCompactions = true - } -} - -// WithLogger sets the logger for the Index. -var WithLogger = func(l zap.Logger) IndexOption { - return func(i *Index) { - i.logger = l.With(zap.String("index", "tsi")) - } -} - -// WithMaximumLogFileSize sets the maximum size of LogFiles before they're -// compacted into IndexFiles. -var WithMaximumLogFileSize = func(size int64) IndexOption { - return func(i *Index) { - i.maxLogFileSize = size - } -} - -var WithMaximumLogFileAge = func(dur time.Duration) IndexOption { - return func(i *Index) { - i.maxLogFileAge = dur - } -} - -// DisableFsync disables flushing and syncing of underlying files. Primarily this -// impacts the LogFiles. This option can be set when working with the index in -// an offline manner, for cases where a hard failure can be overcome by re-running the tooling. -var DisableFsync = func() IndexOption { - return func(i *Index) { - i.disableFsync = true - } -} - -// WithLogFileBufferSize sets the size of the buffer used within LogFiles. -// Typically appending an entry to a LogFile involves writing 11 or 12 bytes, so -// depending on how many new series are being created within a batch, it may -// be appropriate to set this. -var WithLogFileBufferSize = func(sz int) IndexOption { - return func(i *Index) { - if sz > 1<<17 { // 128K - sz = 1 << 17 - } else if sz < 1<<12 { - sz = 1 << 12 // 4K (runtime default) - } - i.logfileBufferSize = sz - } -} - -// WithSeriesIDCacheSize sets the size of the series id set cache. -// If set to 0, then the cache is disabled. -var WithSeriesIDCacheSize = func(sz int) IndexOption { - return func(i *Index) { - i.tagValueCacheSize = sz - } -} - -// Index represents a collection of layered index files and WAL. -type Index struct { - mu sync.RWMutex - partitions []*Partition - opened bool - - tagValueCache *TagValueSeriesIDCache - tagValueCacheSize int - - // The following may be set when initializing an Index. - path string // Root directory of the index partitions. - disableCompactions bool // Initially disables compactions on the index. - maxLogFileSize int64 // Maximum size of a LogFile before it's compacted. - maxLogFileAge time.Duration // Maximum age of a LogFile before it's compacted. - logfileBufferSize int // The size of the buffer used by the LogFile. - disableFsync bool // Disables flushing buffers and fsyning files. Used when working with indexes offline. - logger *zap.Logger // Index's logger. - - // The following must be set when initializing an Index. - sfile *tsdb.SeriesFile // series lookup file - database string // Name of database. - - // Cached sketches. - mSketch, mTSketch estimator.Sketch // Measurement sketches - sSketch, sTSketch estimator.Sketch // Series sketches - - // Index's version. - version int - - // Number of partitions used by the index. - PartitionN uint64 -} - -func (i *Index) UniqueReferenceID() uintptr { - return uintptr(unsafe.Pointer(i)) -} - -// NewIndex returns a new instance of Index. -func NewIndex(sfile *tsdb.SeriesFile, database string, options ...IndexOption) *Index { - idx := &Index{ - tagValueCacheSize: tsdb.DefaultSeriesIDSetCacheSize, - maxLogFileSize: tsdb.DefaultMaxIndexLogFileSize, - maxLogFileAge: tsdb.DefaultCompactFullWriteColdDuration, - logger: zap.NewNop(), - version: Version, - sfile: sfile, - database: database, - mSketch: hll.NewDefaultPlus(), - mTSketch: hll.NewDefaultPlus(), - sSketch: hll.NewDefaultPlus(), - sTSketch: hll.NewDefaultPlus(), - PartitionN: DefaultPartitionN, - } - - for _, option := range options { - option(idx) - } - - idx.tagValueCache = NewTagValueSeriesIDCache(idx.tagValueCacheSize) - return idx -} - -// Bytes estimates the memory footprint of this Index, in bytes. -func (i *Index) Bytes() int { - var b int - i.mu.RLock() - b += 24 // mu RWMutex is 24 bytes - b += int(unsafe.Sizeof(i.partitions)) - for _, p := range i.partitions { - b += int(unsafe.Sizeof(p)) + p.bytes() - } - b += int(unsafe.Sizeof(i.opened)) - b += int(unsafe.Sizeof(i.path)) + len(i.path) - b += int(unsafe.Sizeof(i.disableCompactions)) - b += int(unsafe.Sizeof(i.maxLogFileSize)) - b += int(unsafe.Sizeof(i.maxLogFileAge)) - b += int(unsafe.Sizeof(i.logger)) - b += int(unsafe.Sizeof(i.sfile)) - // Do not count SeriesFile because it belongs to the code that constructed this Index. - b += int(unsafe.Sizeof(i.mSketch)) + i.mSketch.Bytes() - b += int(unsafe.Sizeof(i.mTSketch)) + i.mTSketch.Bytes() - b += int(unsafe.Sizeof(i.sSketch)) + i.sSketch.Bytes() - b += int(unsafe.Sizeof(i.sTSketch)) + i.sTSketch.Bytes() - b += int(unsafe.Sizeof(i.database)) + len(i.database) - b += int(unsafe.Sizeof(i.version)) - b += int(unsafe.Sizeof(i.PartitionN)) - i.mu.RUnlock() - return b -} - -// Database returns the name of the database the index was initialized with. -func (i *Index) Database() string { - return i.database -} - -// WithLogger sets the logger on the index after it's been created. -// -// It's not safe to call WithLogger after the index has been opened, or before -// it has been closed. -func (i *Index) WithLogger(l *zap.Logger) { - i.mu.Lock() - defer i.mu.Unlock() - i.logger = l.With(zap.String("index", "tsi")) -} - -// Type returns the type of Index this is. -func (i *Index) Type() string { return IndexName } - -// SeriesFile returns the series file attached to the index. -func (i *Index) SeriesFile() *tsdb.SeriesFile { return i.sfile } - -// SeriesIDSet returns the set of series ids associated with series in this -// index. Any series IDs for series no longer present in the index are filtered out. -func (i *Index) SeriesIDSet() *tsdb.SeriesIDSet { - seriesIDSet := tsdb.NewSeriesIDSet() - others := make([]*tsdb.SeriesIDSet, 0, i.PartitionN) - for _, p := range i.partitions { - others = append(others, p.seriesIDSet) - } - seriesIDSet.Merge(others...) - return seriesIDSet -} - -// Open opens the index. -func (i *Index) Open() (rErr error) { - i.mu.Lock() - defer i.mu.Unlock() - - if i.opened { - return errors.New("index already open") - } - - // Ensure root exists. - if err := os.MkdirAll(i.path, 0777); err != nil { - return err - } - - // Initialize index partitions. - i.partitions = make([]*Partition, i.PartitionN) - for j := 0; j < len(i.partitions); j++ { - p := NewPartition(i.sfile, filepath.Join(i.path, fmt.Sprint(j))) - p.MaxLogFileSize = i.maxLogFileSize - p.MaxLogFileAge = i.maxLogFileAge - p.nosync = i.disableFsync - p.logbufferSize = i.logfileBufferSize - p.logger = i.logger.With(zap.String("tsi1_partition", fmt.Sprint(j+1))) - i.partitions[j] = p - } - - // Open all the Partitions in parallel. - partitionN := len(i.partitions) - n := i.availableThreads() - - // Run fn on each partition using a fixed number of goroutines. - g := new(errgroup.Group) - g.SetLimit(n) - for idx := 0; idx < partitionN; idx++ { - g.Go(i.partitions[idx].Open) - } - err := g.Wait() - defer i.cleanUpFail(&rErr) - if err != nil { - return err - } - - // Refresh cached sketches. - if err := i.updateSeriesSketches(); err != nil { - return err - } else if err := i.updateMeasurementSketches(); err != nil { - return err - } - - // Mark opened. - i.opened = true - i.logger.Info(fmt.Sprintf("index opened with %d partitions", partitionN)) - return nil -} - -func (i *Index) cleanUpFail(err *error) { - if nil != *err { - i.close() - } -} - -// Compact requests a compaction of partitions. -func (i *Index) Compact() { - i.mu.Lock() - defer i.mu.Unlock() - for _, p := range i.partitions { - p.Compact() - } -} - -func (i *Index) EnableCompactions() { - for _, p := range i.partitions { - p.EnableCompactions() - } -} - -func (i *Index) DisableCompactions() { - for _, p := range i.partitions { - p.DisableCompactions() - } -} - -// Wait blocks until all outstanding compactions have completed. -func (i *Index) Wait() { - for _, p := range i.partitions { - p.Wait() - } -} - -// Close closes the index. -func (i *Index) Close() error { - // Lock index and close partitions. - i.mu.Lock() - defer i.mu.Unlock() - return i.close() -} - -// close closes the index without locking -func (i *Index) close() (rErr error) { - for _, p := range i.partitions { - if (p != nil) && p.IsOpen() { - if pErr := p.Close(); pErr != nil { - i.logger.Warn("Failed to clean up partition", zap.String("path", p.Path())) - if rErr == nil { - rErr = pErr - } - } - } - } - // Mark index as closed. - i.opened = false - return rErr -} - -// Path returns the path the index was opened with. -func (i *Index) Path() string { return i.path } - -// PartitionAt returns the partition by index. -func (i *Index) PartitionAt(index int) *Partition { - return i.partitions[index] -} - -// partition returns the appropriate Partition for a provided series key. -func (i *Index) partition(key []byte) *Partition { - return i.partitions[int(xxhash.Sum64(key)&(i.PartitionN-1))] -} - -// partitionIdx returns the index of the partition that key belongs in. -func (i *Index) partitionIdx(key []byte) int { - return int(xxhash.Sum64(key) & (i.PartitionN - 1)) -} - -// availableThreads returns the minimum of GOMAXPROCS and the number of -// partitions in the Index. -func (i *Index) availableThreads() int { - n := runtime.GOMAXPROCS(0) - if len(i.partitions) < n { - return len(i.partitions) - } - return n -} - -// updateMeasurementSketches rebuilds the cached measurement sketches. -func (i *Index) updateMeasurementSketches() error { - for j := 0; j < int(i.PartitionN); j++ { - if s, t, err := i.partitions[j].MeasurementsSketches(); err != nil { - return err - } else if err := i.mSketch.Merge(s); err != nil { - return err - } else if err := i.mTSketch.Merge(t); err != nil { - return err - } - } - return nil -} - -// updateSeriesSketches rebuilds the cached series sketches. -func (i *Index) updateSeriesSketches() error { - for j := 0; j < int(i.PartitionN); j++ { - if s, t, err := i.partitions[j].SeriesSketches(); err != nil { - return err - } else if err := i.sSketch.Merge(s); err != nil { - return err - } else if err := i.sTSketch.Merge(t); err != nil { - return err - } - } - return nil -} - -// SetFieldSet sets a shared field set from the engine. -func (i *Index) SetFieldSet(fs *tsdb.MeasurementFieldSet) { - for _, p := range i.partitions { - p.SetFieldSet(fs) - } -} - -// FieldSet returns the assigned fieldset. -func (i *Index) FieldSet() *tsdb.MeasurementFieldSet { - if len(i.partitions) == 0 { - return nil - } - return i.partitions[0].FieldSet() -} - -// ForEachMeasurementName iterates over all measurement names in the index, -// applying fn. It returns the first error encountered, if any. -// -// ForEachMeasurementName does not call fn on each partition concurrently so the -// call may provide a non-goroutine safe fn. -func (i *Index) ForEachMeasurementName(fn func(name []byte) error) error { - itr, err := i.MeasurementIterator() - if err != nil { - return err - } else if itr == nil { - return nil - } - defer itr.Close() - - // Iterate over all measurements. - for { - e, err := itr.Next() - if err != nil { - return err - } else if e == nil { - break - } - - if err := fn(e); err != nil { - return err - } - } - return nil -} - -// MeasurementExists returns true if a measurement exists. -func (i *Index) MeasurementExists(name []byte) (bool, error) { - n := i.availableThreads() - - // Store errors - var found uint32 // Use this to signal we found the measurement. - errC := make(chan error, i.PartitionN) - - // Check each partition for the measurement concurrently. - var pidx uint32 // Index of maximum Partition being worked on. - for k := 0; k < n; k++ { - go func() { - for { - idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to check - if idx >= len(i.partitions) { - return // No more work. - } - - // Check if the measurement has been found. If it has don't - // need to check this partition and can just move on. - if atomic.LoadUint32(&found) == 1 { - errC <- nil - continue - } - - b, err := i.partitions[idx].MeasurementExists(name) - if b { - atomic.StoreUint32(&found, 1) - } - errC <- err - } - }() - } - - // Check for error - for i := 0; i < cap(errC); i++ { - if err := <-errC; err != nil { - return false, err - } - } - - // Check if we found the measurement. - return atomic.LoadUint32(&found) == 1, nil -} - -// MeasurementHasSeries returns true if a measurement has non-tombstoned series. -func (i *Index) MeasurementHasSeries(name []byte) (bool, error) { - for _, p := range i.partitions { - if v, err := p.MeasurementHasSeries(name); err != nil { - return false, err - } else if v { - return true, nil - } - } - return false, nil -} - -// fetchByteValues is a helper for gathering values from each partition in the index, -// based on some criteria. -// -// fn is a function that works on partition idx and calls into some method on -// the partition that returns some ordered values. -func (i *Index) fetchByteValues(fn func(idx int) ([][]byte, error)) ([][]byte, error) { - n := i.availableThreads() - - // Store results. - names := make([][][]byte, i.PartitionN) - errC := make(chan error, i.PartitionN) - - var pidx uint32 // Index of maximum Partition being worked on. - for k := 0; k < n; k++ { - go func() { - for { - idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to work on. - if idx >= len(i.partitions) { - return // No more work. - } - - pnames, err := fn(idx) - - // This is safe since there are no readers on names until all - // the writers are done. - names[idx] = pnames - errC <- err - } - }() - } - - // Check for error - for i := 0; i < cap(errC); i++ { - if err := <-errC; err != nil { - return nil, err - } - } - - // It's now safe to read from names. - return slices.MergeSortedBytes(names...), nil -} - -// MeasurementIterator returns an iterator over all measurements. -func (i *Index) MeasurementIterator() (tsdb.MeasurementIterator, error) { - itrs := make([]tsdb.MeasurementIterator, 0, len(i.partitions)) - for _, p := range i.partitions { - itr, err := p.MeasurementIterator() - if err != nil { - tsdb.MeasurementIterators(itrs).Close() - return nil, err - } else if itr != nil { - itrs = append(itrs, itr) - } - } - return tsdb.MergeMeasurementIterators(itrs...), nil -} - -// MeasurementSeriesIDIterator returns an iterator over all series in a measurement. -func (i *Index) MeasurementSeriesIDIterator(name []byte) (tsdb.SeriesIDIterator, error) { - itrs := make([]tsdb.SeriesIDIterator, 0, len(i.partitions)) - for _, p := range i.partitions { - itr, err := p.MeasurementSeriesIDIterator(name) - if err != nil { - tsdb.SeriesIDIterators(itrs).Close() - return nil, err - } else if itr != nil { - itrs = append(itrs, itr) - } - } - return tsdb.MergeSeriesIDIterators(itrs...), nil -} - -// MeasurementNamesByRegex returns measurement names for the provided regex. -func (i *Index) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) { - return i.fetchByteValues(func(idx int) ([][]byte, error) { - return i.partitions[idx].MeasurementNamesByRegex(re) - }) -} - -// DropMeasurement deletes a measurement from the index. It returns the first -// error encountered, if any. -func (i *Index) DropMeasurement(name []byte) error { - n := i.availableThreads() - - // Store results. - errC := make(chan error, i.PartitionN) - - var pidx uint32 // Index of maximum Partition being worked on. - for k := 0; k < n; k++ { - go func() { - for { - idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to work on. - if idx >= len(i.partitions) { - return // No more work. - } - errC <- i.partitions[idx].DropMeasurement(name) - } - }() - } - - // Check for error - for i := 0; i < cap(errC); i++ { - if err := <-errC; err != nil { - return err - } - } - - // Update sketches under lock. - i.mu.Lock() - defer i.mu.Unlock() - - i.mTSketch.Add(name) - if err := i.updateSeriesSketches(); err != nil { - return err - } - - return nil -} - -// CreateSeriesListIfNotExists creates a list of series if they doesn't exist in bulk. -func (i *Index) CreateSeriesListIfNotExists(keys [][]byte, names [][]byte, tagsSlice []models.Tags) error { - // All slices must be of equal length. - if len(names) != len(tagsSlice) { - return errors.New("names/tags length mismatch in index") - } - - // We need to move different series into collections for each partition - // to process. - pNames := make([][][]byte, i.PartitionN) - pTags := make([][]models.Tags, i.PartitionN) - - // Determine partition for series using each series key. - for ki, key := range keys { - pidx := i.partitionIdx(key) - pNames[pidx] = append(pNames[pidx], names[ki]) - pTags[pidx] = append(pTags[pidx], tagsSlice[ki]) - } - - // Process each subset of series on each partition. - n := i.availableThreads() - - // Store errors. - errC := make(chan error, i.PartitionN) - - var pidx uint32 // Index of maximum Partition being worked on. - for k := 0; k < n; k++ { - go func() { - for { - idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to work on. - if idx >= len(i.partitions) { - return // No more work. - } - - ids, err := i.partitions[idx].createSeriesListIfNotExists(pNames[idx], pTags[idx]) - - var updateCache bool - for _, id := range ids { - if id != 0 { - updateCache = true - break - } - } - - if !updateCache { - errC <- err - continue - } - - // Some cached bitset results may need to be updated. - i.tagValueCache.RLock() - for j, id := range ids { - if id == 0 { - continue - } - - name := pNames[idx][j] - tags := pTags[idx][j] - if i.tagValueCache.measurementContainsSets(name) { - for _, pair := range tags { - // TODO(edd): It's not clear to me yet whether it will be better to take a lock - // on every series id set, or whether to gather them all up under the cache rlock - // and then take the cache lock and update them all at once (without invoking a lock - // on each series id set). - // - // Taking the cache lock will block all queries, but is one lock. Taking each series set - // lock might be many lock/unlocks but will only block a query that needs that particular set. - // - // Need to think on it, but I think taking a lock on each series id set is the way to go. - // - // One other option here is to take a lock on the series id set when we first encounter it - // and then keep it locked until we're done with all the ids. - // - // Note: this will only add `id` to the set if it exists. - i.tagValueCache.addToSet(name, pair.Key, pair.Value, id) // Takes a lock on the series id set - } - } - } - i.tagValueCache.RUnlock() - - errC <- err - } - }() - } - - // Check for error - for i := 0; i < cap(errC); i++ { - if err := <-errC; err != nil { - return err - } - } - - // Update sketches under lock. - i.mu.Lock() - defer i.mu.Unlock() - - for _, key := range keys { - i.sSketch.Add(key) - } - for _, name := range names { - i.mSketch.Add(name) - } - - return nil -} - -// CreateSeriesIfNotExists creates a series if it doesn't exist or is deleted. -func (i *Index) CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error { - ids, err := i.partition(key).createSeriesListIfNotExists([][]byte{name}, []models.Tags{tags}) - if err != nil { - return err - } - - i.mu.Lock() - i.sSketch.Add(key) - i.mSketch.Add(name) - i.mu.Unlock() - - if ids[0] == 0 { - return nil // No new series, nothing further to update. - } - - // If there are cached sets for any of the tag pairs, they will need to be - // updated with the series id. - i.tagValueCache.RLock() - if i.tagValueCache.measurementContainsSets(name) { - for _, pair := range tags { - // TODO(edd): It's not clear to me yet whether it will be better to take a lock - // on every series id set, or whether to gather them all up under the cache rlock - // and then take the cache lock and update them all at once (without invoking a lock - // on each series id set). - // - // Taking the cache lock will block all queries, but is one lock. Taking each series set - // lock might be many lock/unlocks but will only block a query that needs that particular set. - // - // Need to think on it, but I think taking a lock on each series id set is the way to go. - // - // Note this will only add `id` to the set if it exists. - i.tagValueCache.addToSet(name, pair.Key, pair.Value, ids[0]) // Takes a lock on the series id set - } - } - i.tagValueCache.RUnlock() - return nil -} - -// DropSeries drops the provided series from the index. If cascade is true -// and this is the last series to the measurement, the measurement will also be dropped. -func (i *Index) DropSeries(seriesID uint64, key []byte, cascade bool) error { - // Remove from partition. - if err := i.partition(key).DropSeries(seriesID); err != nil { - return err - } - - // Add sketch tombstone. - i.mu.Lock() - i.sTSketch.Add(key) - i.mu.Unlock() - - if !cascade { - return nil - } - - // Extract measurement name & tags. - name, tags := models.ParseKeyBytes(key) - - // If there are cached sets for any of the tag pairs, they will need to be - // updated with the series id. - i.tagValueCache.RLock() - if i.tagValueCache.measurementContainsSets(name) { - for _, pair := range tags { - i.tagValueCache.delete(name, pair.Key, pair.Value, seriesID) // Takes a lock on the series id set - } - } - i.tagValueCache.RUnlock() - - // Check if that was the last series for the measurement in the entire index. - if ok, err := i.MeasurementHasSeries(name); err != nil { - return err - } else if ok { - return nil - } - - // If no more series exist in the measurement then delete the measurement. - if err := i.DropMeasurement(name); err != nil { - return err - } - return nil -} - -// DropMeasurementIfSeriesNotExist drops a measurement only if there are no more -// series for the measurement. -func (i *Index) DropMeasurementIfSeriesNotExist(name []byte) (bool, error) { - // Check if that was the last series for the measurement in the entire index. - if ok, err := i.MeasurementHasSeries(name); err != nil { - return false, err - } else if ok { - return false, nil - } - - // If no more series exist in the measurement then delete the measurement. - return true, i.DropMeasurement(name) -} - -// MeasurementsSketches returns the two measurement sketches for the index. -func (i *Index) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) { - i.mu.RLock() - defer i.mu.RUnlock() - return i.mSketch.Clone(), i.mTSketch.Clone(), nil -} - -// SeriesSketches returns the two series sketches for the index. -func (i *Index) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) { - i.mu.RLock() - defer i.mu.RUnlock() - return i.sSketch.Clone(), i.sTSketch.Clone(), nil -} - -// Since indexes are not shared across shards, the count returned by SeriesN -// cannot be combined with other shard's results. If you need to count series -// across indexes then use either the database-wide series file, or merge the -// index-level bitsets or sketches. -func (i *Index) SeriesN() int64 { - return int64(i.SeriesIDSet().Cardinality()) -} - -// HasTagKey returns true if tag key exists. It returns the first error -// encountered if any. -func (i *Index) HasTagKey(name, key []byte) (bool, error) { - n := i.availableThreads() - - // Store errors - var found uint32 // Use this to signal we found the tag key. - errC := make(chan error, i.PartitionN) - - // Check each partition for the tag key concurrently. - var pidx uint32 // Index of maximum Partition being worked on. - for k := 0; k < n; k++ { - go func() { - for { - idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to check - if idx >= len(i.partitions) { - return // No more work. - } - - // Check if the tag key has already been found. If it has, we - // don't need to check this partition and can just move on. - if atomic.LoadUint32(&found) == 1 { - errC <- nil - continue - } - - b, err := i.partitions[idx].HasTagKey(name, key) - if b { - atomic.StoreUint32(&found, 1) - } - errC <- err - } - }() - } - - // Check for error - for i := 0; i < cap(errC); i++ { - if err := <-errC; err != nil { - return false, err - } - } - - // Check if we found the tag key. - return atomic.LoadUint32(&found) == 1, nil -} - -// HasTagValue returns true if tag value exists. -func (i *Index) HasTagValue(name, key, value []byte) (bool, error) { - n := i.availableThreads() - - // Store errors - var found uint32 // Use this to signal we found the tag key. - errC := make(chan error, i.PartitionN) - - // Check each partition for the tag key concurrently. - var pidx uint32 // Index of maximum Partition being worked on. - for k := 0; k < n; k++ { - go func() { - for { - idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to check - if idx >= len(i.partitions) { - return // No more work. - } - - // Check if the tag key has already been found. If it has, we - // don't need to check this partition and can just move on. - if atomic.LoadUint32(&found) == 1 { - errC <- nil - continue - } - - b, err := i.partitions[idx].HasTagValue(name, key, value) - if b { - atomic.StoreUint32(&found, 1) - } - errC <- err - } - }() - } - - // Check for error - for i := 0; i < cap(errC); i++ { - if err := <-errC; err != nil { - return false, err - } - } - - // Check if we found the tag key. - return atomic.LoadUint32(&found) == 1, nil -} - -// TagKeyIterator returns an iterator for all keys across a single measurement. -func (i *Index) TagKeyIterator(name []byte) (tsdb.TagKeyIterator, error) { - a := make([]tsdb.TagKeyIterator, 0, len(i.partitions)) - for _, p := range i.partitions { - itr := p.TagKeyIterator(name) - if itr != nil { - a = append(a, itr) - } - } - return tsdb.MergeTagKeyIterators(a...), nil -} - -// TagValueIterator returns an iterator for all values across a single key. -func (i *Index) TagValueIterator(name, key []byte) (tsdb.TagValueIterator, error) { - a := make([]tsdb.TagValueIterator, 0, len(i.partitions)) - for _, p := range i.partitions { - itr := p.TagValueIterator(name, key) - if itr != nil { - a = append(a, itr) - } - } - return tsdb.MergeTagValueIterators(a...), nil -} - -// TagKeySeriesIDIterator returns a series iterator for all values across a single key. -func (i *Index) TagKeySeriesIDIterator(name, key []byte) (tsdb.SeriesIDIterator, error) { - a := make([]tsdb.SeriesIDIterator, 0, len(i.partitions)) - for _, p := range i.partitions { - itr, err := p.TagKeySeriesIDIterator(name, key) - if err != nil { - return nil, err - } else if itr != nil { - a = append(a, itr) - } - } - return tsdb.MergeSeriesIDIterators(a...), nil -} - -// TagValueSeriesIDIterator returns a series iterator for a single tag value. -func (i *Index) TagValueSeriesIDIterator(name, key, value []byte) (tsdb.SeriesIDIterator, error) { - // Check series ID set cache... - if i.tagValueCacheSize > 0 { - if ss := i.tagValueCache.Get(name, key, value); ss != nil { - // Return a clone because the set is mutable. - return tsdb.NewSeriesIDSetIterator(ss.Clone()), nil - } - } - - a := make([]tsdb.SeriesIDIterator, 0, len(i.partitions)) - for _, p := range i.partitions { - itr, err := p.TagValueSeriesIDIterator(name, key, value) - if err != nil { - tsdb.SeriesIDIterators(a).Close() - return nil, err - } else if itr != nil { - a = append(a, itr) - } - } - - itr := tsdb.MergeSeriesIDIterators(a...) - if i.tagValueCacheSize == 0 { - return itr, nil - } - - // Check if the iterator contains only series id sets. Cache them... - if ssitr, ok := itr.(tsdb.SeriesIDSetIterator); ok { - ss := ssitr.SeriesIDSet() - i.tagValueCache.Put(name, key, value, ss) - } - return itr, nil -} - -// MeasurementTagKeysByExpr extracts the tag keys wanted by the expression. -func (i *Index) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) { - n := i.availableThreads() - - // Store results. - keys := make([]map[string]struct{}, i.PartitionN) - errC := make(chan error, i.PartitionN) - - var pidx uint32 // Index of maximum Partition being worked on. - for k := 0; k < n; k++ { - go func() { - for { - idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to work on. - if idx >= len(i.partitions) { - return // No more work. - } - - // This is safe since there are no readers on keys until all - // the writers are done. - tagKeys, err := i.partitions[idx].MeasurementTagKeysByExpr(name, expr) - keys[idx] = tagKeys - errC <- err - } - }() - } - - // Check for error - for i := 0; i < cap(errC); i++ { - if err := <-errC; err != nil { - return nil, err - } - } - - // Merge into single map. - result := keys[0] - for k := 1; k < len(i.partitions); k++ { - for k := range keys[k] { - result[k] = struct{}{} - } - } - return result, nil -} - -// DiskSizeBytes returns the size of the index on disk. -func (i *Index) DiskSizeBytes() int64 { - fs, err := i.RetainFileSet() - if err != nil { - i.logger.Warn("Index is closing down") - return 0 - } - defer fs.Release() - - var manifestSize int64 - // Get MANIFEST sizes from each partition. - for _, p := range i.partitions { - manifestSize += p.manifestSize - } - return fs.Size() + manifestSize -} - -// TagKeyCardinality always returns zero. -// It is not possible to determine cardinality of tags across index files, and -// thus it cannot be done across partitions. -func (i *Index) TagKeyCardinality(name, key []byte) int { - return 0 -} - -// RetainFileSet returns the set of all files across all partitions. -// This is only needed when all files need to be retained for an operation. -func (i *Index) RetainFileSet() (*FileSet, error) { - i.mu.RLock() - defer i.mu.RUnlock() - - fs := NewFileSet(nil) - for _, p := range i.partitions { - pfs, err := p.RetainFileSet() - if err != nil { - fs.Close() - return nil, err - } - fs.files = append(fs.files, pfs.files...) - } - return fs, nil -} - -// IsIndexDir returns true if directory contains at least one partition directory. -func IsIndexDir(path string) (bool, error) { - fis, err := os.ReadDir(path) - if err != nil { - return false, err - } - for _, fi := range fis { - if !fi.IsDir() { - continue - } else if ok, err := IsPartitionDir(filepath.Join(path, fi.Name())); err != nil { - return false, err - } else if ok { - return true, nil - } - } - return false, nil -} diff --git a/tsdb/index/tsi1/index_file.go b/tsdb/index/tsi1/index_file.go deleted file mode 100644 index 8cfdfc88810..00000000000 --- a/tsdb/index/tsi1/index_file.go +++ /dev/null @@ -1,530 +0,0 @@ -package tsi1 - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "sync" - "unsafe" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/estimator" - "github.com/influxdata/influxdb/v2/pkg/estimator/hll" - "github.com/influxdata/influxdb/v2/pkg/mmap" - "github.com/influxdata/influxdb/v2/tsdb" -) - -// IndexFileVersion is the current TSI1 index file version. -const IndexFileVersion = 1 - -// FileSignature represents a magic number at the header of the index file. -const FileSignature = "TSI1" - -// IndexFile field size constants. -const ( - // IndexFile trailer fields - IndexFileVersionSize = 2 - - // IndexFileTrailerSize is the size of the trailer. Currently 82 bytes. - IndexFileTrailerSize = IndexFileVersionSize + - 8 + 8 + // measurement block offset + size - 8 + 8 + // series id set offset + size - 8 + 8 + // tombstone series id set offset + size - 8 + 8 + // series sketch offset + size - 8 + 8 + // tombstone series sketch offset + size - 0 -) - -// IndexFile errors. -var ( - ErrInvalidIndexFile = errors.New("invalid index file") - ErrUnsupportedIndexFileVersion = errors.New("unsupported index file version") -) - -// IndexFile represents a collection of measurement, tag, and series data. -type IndexFile struct { - wg sync.WaitGroup // ref count - data []byte - - // Components - sfile *tsdb.SeriesFile - tblks map[string]*TagBlock // tag blocks by measurement name - mblk MeasurementBlock - - // Raw series set data. - seriesIDSetData []byte - tombstoneSeriesIDSetData []byte - - // Series sketch data. - sketchData, tSketchData []byte - - // Sortable identifier & filepath to the log file. - level int - id int - - mu sync.RWMutex - // Compaction tracking. - compacting bool - - // Path to data file. - path string -} - -// NewIndexFile returns a new instance of IndexFile. -func NewIndexFile(sfile *tsdb.SeriesFile) *IndexFile { - return &IndexFile{ - sfile: sfile, - } -} - -// bytes estimates the memory footprint of this IndexFile, in bytes. -func (f *IndexFile) bytes() int { - var b int - f.wg.Add(1) - b += 16 // wg WaitGroup is 16 bytes - b += int(unsafe.Sizeof(f.data)) - // Do not count f.data contents because it is mmap'd - b += int(unsafe.Sizeof(f.sfile)) - // Do not count SeriesFile because it belongs to the code that constructed this IndexFile. - b += int(unsafe.Sizeof(f.tblks)) - for k, v := range f.tblks { - // Do not count TagBlock contents, they all reference f.data - b += int(unsafe.Sizeof(k)) + len(k) - b += int(unsafe.Sizeof(*v)) - } - b += int(unsafe.Sizeof(f.mblk)) + f.mblk.bytes() - b += int(unsafe.Sizeof(f.seriesIDSetData) + unsafe.Sizeof(f.tombstoneSeriesIDSetData)) - // Do not count contents of seriesIDSetData or tombstoneSeriesIDSetData: references f.data - b += int(unsafe.Sizeof(f.level) + unsafe.Sizeof(f.id)) - b += 24 // mu RWMutex is 24 bytes - b += int(unsafe.Sizeof(f.compacting)) - b += int(unsafe.Sizeof(f.path)) + len(f.path) - f.wg.Done() - return b -} - -// Open memory maps the data file at the file's path. -func (f *IndexFile) Open() error { - defer func() { - if err := recover(); err != nil { - err = fmt.Errorf("[Index file: %s] %v", f.path, err) - panic(err) - } - }() - - // Extract identifier from path name. - f.id, f.level = ParseFilename(f.Path()) - - data, err := mmap.Map(f.Path(), 0) - if err != nil { - return err - } - - return f.UnmarshalBinary(data) -} - -// Close unmaps the data file. -func (f *IndexFile) Close() error { - // Wait until all references are released. - f.wg.Wait() - - f.sfile = nil - f.tblks = nil - f.mblk = MeasurementBlock{} - return mmap.Unmap(f.data) -} - -// ID returns the file sequence identifier. -func (f *IndexFile) ID() int { return f.id } - -// Path returns the file path. -func (f *IndexFile) Path() string { return f.path } - -// SetPath sets the file's path. -func (f *IndexFile) SetPath(path string) { f.path = path } - -// Level returns the compaction level for the file. -func (f *IndexFile) Level() int { return f.level } - -// Retain adds a reference count to the file. -func (f *IndexFile) Retain() { f.wg.Add(1) } - -// Release removes a reference count from the file. -func (f *IndexFile) Release() { f.wg.Done() } - -// Size returns the size of the index file, in bytes. -func (f *IndexFile) Size() int64 { return int64(len(f.data)) } - -// Compacting returns true if the file is being compacted. -func (f *IndexFile) Compacting() bool { - f.mu.RLock() - v := f.compacting - f.mu.RUnlock() - return v -} - -// UnmarshalBinary opens an index from data. -// The byte slice is retained so it must be kept open. -func (f *IndexFile) UnmarshalBinary(data []byte) error { - // Ensure magic number exists at the beginning. - if len(data) < len(FileSignature) { - return fmt.Errorf("%q: %w", f.path, io.ErrShortBuffer) - } else if !bytes.Equal(data[:len(FileSignature)], []byte(FileSignature)) { - return fmt.Errorf("%q: %w", f.path, ErrInvalidIndexFile) - } - - // Read index file trailer. - t, err := ReadIndexFileTrailer(data) - if err != nil { - return fmt.Errorf("%q: %w", f.path, err) - } - - // Slice series sketch data. - f.sketchData = data[t.SeriesSketch.Offset : t.SeriesSketch.Offset+t.SeriesSketch.Size] - f.tSketchData = data[t.TombstoneSeriesSketch.Offset : t.TombstoneSeriesSketch.Offset+t.TombstoneSeriesSketch.Size] - - // Slice series set data. - f.seriesIDSetData = data[t.SeriesIDSet.Offset : t.SeriesIDSet.Offset+t.SeriesIDSet.Size] - f.tombstoneSeriesIDSetData = data[t.TombstoneSeriesIDSet.Offset : t.TombstoneSeriesIDSet.Offset+t.TombstoneSeriesIDSet.Size] - - // Unmarshal measurement block. - if err := f.mblk.UnmarshalBinary(data[t.MeasurementBlock.Offset:][:t.MeasurementBlock.Size]); err != nil { - return fmt.Errorf("%q: %w", f.path, err) - } - - // Unmarshal each tag block. - f.tblks = make(map[string]*TagBlock) - itr := f.mblk.Iterator() - - for m := itr.Next(); m != nil; m = itr.Next() { - e := m.(*MeasurementBlockElem) - - // Slice measurement block data. - buf := data[e.tagBlock.offset:] - buf = buf[:e.tagBlock.size] - - // Unmarshal measurement block. - var tblk TagBlock - if err := tblk.UnmarshalBinary(buf); err != nil { - return fmt.Errorf("%q: %w", f.path, err) - } - f.tblks[string(e.name)] = &tblk - } - - // Save reference to entire data block. - f.data = data - - return nil -} - -func (f *IndexFile) SeriesIDSet() (*tsdb.SeriesIDSet, error) { - ss := tsdb.NewSeriesIDSet() - if err := ss.UnmarshalBinary(f.seriesIDSetData); err != nil { - return nil, err - } - return ss, nil -} - -func (f *IndexFile) TombstoneSeriesIDSet() (*tsdb.SeriesIDSet, error) { - ss := tsdb.NewSeriesIDSet() - if err := ss.UnmarshalBinaryUnsafe(f.tombstoneSeriesIDSetData); err != nil { - return nil, err - } - return ss, nil -} - -// Measurement returns a measurement element. -func (f *IndexFile) Measurement(name []byte) MeasurementElem { - e, ok := f.mblk.Elem(name) - if !ok { - return nil - } - return &e -} - -// MeasurementN returns the number of measurements in the file. -func (f *IndexFile) MeasurementN() (n uint64) { - mitr := f.mblk.Iterator() - for me := mitr.Next(); me != nil; me = mitr.Next() { - n++ - } - return n -} - -// MeasurementHasSeries returns true if a measurement has any non-tombstoned series. -func (f *IndexFile) MeasurementHasSeries(ss *tsdb.SeriesIDSet, name []byte) (ok bool) { - e, ok := f.mblk.Elem(name) - if !ok { - return false - } - - var exists bool - e.ForEachSeriesID(func(id uint64) error { - if ss.Contains(id) { - exists = true - return errors.New("done") - } - return nil - }) - return exists -} - -// TagValueIterator returns a value iterator for a tag key and a flag -// indicating if a tombstone exists on the measurement or key. -func (f *IndexFile) TagValueIterator(name, key []byte) TagValueIterator { - tblk := f.tblks[string(name)] - if tblk == nil { - return nil - } - - // Find key element. - ke := tblk.TagKeyElem(key) - if ke == nil { - return nil - } - - // Merge all value series iterators together. - return ke.TagValueIterator() -} - -// TagKeySeriesIDIterator returns a series iterator for a tag key and a flag -// indicating if a tombstone exists on the measurement or key. -func (f *IndexFile) TagKeySeriesIDIterator(name, key []byte) (tsdb.SeriesIDIterator, error) { - tblk := f.tblks[string(name)] - if tblk == nil { - return nil, nil - } - - // Find key element. - ke := tblk.TagKeyElem(key) - if ke == nil { - return nil, nil - } - - // Merge all value series iterators together. - vitr := ke.TagValueIterator() - - var itrs []tsdb.SeriesIDIterator - for ve := vitr.Next(); ve != nil; ve = vitr.Next() { - tblk, ok := ve.(*TagBlockValueElem) - if !ok { - return nil, fmt.Errorf("got type %T for iterator, expected %T", ve, TagBlockValueElem{}) - } - - ss, err := tblk.SeriesIDSet() - if err != nil { - return nil, err - } - itrs = append(itrs, tsdb.NewSeriesIDSetIterator(ss)) - } - - return tsdb.MergeSeriesIDIterators(itrs...), nil -} - -// TagValueSeriesIDSet returns a series id set for a tag value. -func (f *IndexFile) TagValueSeriesIDSet(name, key, value []byte) (*tsdb.SeriesIDSet, error) { - tblk := f.tblks[string(name)] - if tblk == nil { - return nil, nil - } - - // Find value element. - var valueElem TagBlockValueElem - if !tblk.DecodeTagValueElem(key, value, &valueElem) { - return nil, nil - } else if valueElem.SeriesN() == 0 { - return nil, nil - } - return valueElem.SeriesIDSet() -} - -// TagKey returns a tag key. -func (f *IndexFile) TagKey(name, key []byte) TagKeyElem { - tblk := f.tblks[string(name)] - if tblk == nil { - return nil - } - return tblk.TagKeyElem(key) -} - -// TagValue returns a tag value. -func (f *IndexFile) TagValue(name, key, value []byte) TagValueElem { - tblk := f.tblks[string(name)] - if tblk == nil { - return nil - } - return tblk.TagValueElem(key, value) -} - -// HasSeries returns flags indicating if the series exists and if it is tombstoned. -func (f *IndexFile) HasSeries(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool) { - return f.sfile.HasSeries(name, tags, buf), false // TODO(benbjohnson): series tombstone -} - -// TagValueElem returns an element for a measurement/tag/value. -func (f *IndexFile) TagValueElem(name, key, value []byte) TagValueElem { - tblk, ok := f.tblks[string(name)] - if !ok { - return nil - } - return tblk.TagValueElem(key, value) -} - -// MeasurementIterator returns an iterator over all measurements. -func (f *IndexFile) MeasurementIterator() MeasurementIterator { - return f.mblk.Iterator() -} - -// TagKeyIterator returns an iterator over all tag keys for a measurement. -func (f *IndexFile) TagKeyIterator(name []byte) TagKeyIterator { - blk := f.tblks[string(name)] - if blk == nil { - return nil - } - return blk.TagKeyIterator() -} - -// MeasurementSeriesIDIterator returns an iterator over a measurement's series. -func (f *IndexFile) MeasurementSeriesIDIterator(name []byte) tsdb.SeriesIDIterator { - return f.mblk.SeriesIDIterator(name) -} - -// MeasurementsSketches returns existence and tombstone sketches for measurements. -func (f *IndexFile) MeasurementsSketches() (sketch, tSketch estimator.Sketch, err error) { - return f.mblk.Sketches() -} - -// SeriesSketches returns existence and tombstone sketches for series. -func (f *IndexFile) SeriesSketches() (sketch, tSketch estimator.Sketch, err error) { - sketch = hll.NewDefaultPlus() - if err := sketch.UnmarshalBinary(f.sketchData); err != nil { - return nil, nil, err - } - - tSketch = hll.NewDefaultPlus() - if err := tSketch.UnmarshalBinary(f.tSketchData); err != nil { - return nil, nil, err - } - return sketch, tSketch, nil -} - -// ReadIndexFileTrailer returns the index file trailer from data. -func ReadIndexFileTrailer(data []byte) (IndexFileTrailer, error) { - var t IndexFileTrailer - - // Read version. - t.Version = int(binary.BigEndian.Uint16(data[len(data)-IndexFileVersionSize:])) - if t.Version != IndexFileVersion { - return t, ErrUnsupportedIndexFileVersion - } - - // Slice trailer data. - buf := data[len(data)-IndexFileTrailerSize:] - - // Read measurement block info. - t.MeasurementBlock.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - t.MeasurementBlock.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - - // Read series id set info. - t.SeriesIDSet.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - t.SeriesIDSet.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - - // Read series tombstone id set info. - t.TombstoneSeriesIDSet.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - t.TombstoneSeriesIDSet.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - - // Read series sketch set info. - t.SeriesSketch.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - t.SeriesSketch.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - - // Read series tombstone sketch info. - t.TombstoneSeriesSketch.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - t.TombstoneSeriesSketch.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - - if len(buf) != 2 { // Version field still in buffer. - return t, fmt.Errorf("unread %d bytes left unread in trailer", len(buf)-2) - } - return t, nil -} - -// IndexFileTrailer represents meta data written to the end of the index file. -type IndexFileTrailer struct { - Version int - - MeasurementBlock struct { - Offset int64 - Size int64 - } - - SeriesIDSet struct { - Offset int64 - Size int64 - } - - TombstoneSeriesIDSet struct { - Offset int64 - Size int64 - } - - SeriesSketch struct { - Offset int64 - Size int64 - } - - TombstoneSeriesSketch struct { - Offset int64 - Size int64 - } -} - -// WriteTo writes the trailer to w. -func (t *IndexFileTrailer) WriteTo(w io.Writer) (n int64, err error) { - // Write measurement block info. - if err := writeUint64To(w, uint64(t.MeasurementBlock.Offset), &n); err != nil { - return n, err - } else if err := writeUint64To(w, uint64(t.MeasurementBlock.Size), &n); err != nil { - return n, err - } - - // Write series id set info. - if err := writeUint64To(w, uint64(t.SeriesIDSet.Offset), &n); err != nil { - return n, err - } else if err := writeUint64To(w, uint64(t.SeriesIDSet.Size), &n); err != nil { - return n, err - } - - // Write tombstone series id set info. - if err := writeUint64To(w, uint64(t.TombstoneSeriesIDSet.Offset), &n); err != nil { - return n, err - } else if err := writeUint64To(w, uint64(t.TombstoneSeriesIDSet.Size), &n); err != nil { - return n, err - } - - // Write series sketch info. - if err := writeUint64To(w, uint64(t.SeriesSketch.Offset), &n); err != nil { - return n, err - } else if err := writeUint64To(w, uint64(t.SeriesSketch.Size), &n); err != nil { - return n, err - } - - // Write series tombstone sketch info. - if err := writeUint64To(w, uint64(t.TombstoneSeriesSketch.Offset), &n); err != nil { - return n, err - } else if err := writeUint64To(w, uint64(t.TombstoneSeriesSketch.Size), &n); err != nil { - return n, err - } - - // Write index file encoding version. - if err := writeUint16To(w, IndexFileVersion, &n); err != nil { - return n, err - } - - return n, nil -} - -// FormatIndexFileName generates an index filename for the given index. -func FormatIndexFileName(id, level int) string { - return fmt.Sprintf("L%d-%08d%s", level, id, IndexFileExt) -} diff --git a/tsdb/index/tsi1/index_file_test.go b/tsdb/index/tsi1/index_file_test.go deleted file mode 100644 index 563fb9a4055..00000000000 --- a/tsdb/index/tsi1/index_file_test.go +++ /dev/null @@ -1,262 +0,0 @@ -package tsi1_test - -import ( - "bytes" - "reflect" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" -) - -// Ensure a simple index file can be built and opened. -func TestCreateIndexFile(t *testing.T) { - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - f, err := CreateIndexFile(sfile.SeriesFile, []Series{ - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})}, - {Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "east"})}, - }) - if err != nil { - t.Fatal(err) - } - - if e := f.TagValueElem([]byte("cpu"), []byte("region"), []byte("west")); e == nil { - t.Fatal("expected element") - } else if n := e.(*tsi1.TagBlockValueElem).SeriesN(); n != 1 { - t.Fatalf("unexpected series count: %d", n) - } -} - -func TestIndexFile_TagKeySeriesIDIterator(t *testing.T) { - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - f, err := CreateIndexFile(sfile.SeriesFile, []Series{ - {Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "east"})}, - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})}, - }) - if err != nil { - t.Fatal(err) - } - defer f.Close() - - itr, err := f.TagKeySeriesIDIterator([]byte("cpu"), []byte("region")) - if err != nil { - t.Fatal(err) - } - defer itr.Close() - - // NOTE(edd): the series keys end up being emitted in this order because the - // series were written to different partitons in the _series file_. As such, - // the key with region=west ends up with a lower series ID than the region=east - // series, even though it was written later. When the series id sets for each - // tag block in the index file are merged together and iterated, the roaring - // bitmap library sorts the series ids, resulting the series keys being - // emitted in a different order to that which they were written. - exp := []string{"cpu,region=west", "cpu,region=east"} - var got []string - for { - e, err := itr.Next() - if err != nil { - t.Fatal(err) - } - - if e.SeriesID == 0 { - break - } - - name, tags := tsdb.ParseSeriesKey(sfile.SeriesKey(e.SeriesID)) - got = append(got, string(models.MustNewPoint(string(name), tags, models.Fields{"a": "a"}, time.Time{}).Key())) - } - - if !reflect.DeepEqual(got, exp) { - t.Fatalf("got keys %v, expected %v", got, exp) - } -} - -// Ensure index file generation can be successfully built. -func TestGenerateIndexFile(t *testing.T) { - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - // Build generated index file. - f, err := GenerateIndexFile(sfile.SeriesFile, 10, 3, 4) - if err != nil { - t.Fatal(err) - } - - // Verify that tag/value series can be fetched. - if e := f.TagValueElem([]byte("measurement0"), []byte("key0"), []byte("value0")); e == nil { - t.Fatal("expected element") - } else if n := e.(*tsi1.TagBlockValueElem).SeriesN(); n == 0 { - t.Fatal("expected series") - } -} - -// Ensure index file generated with uvarint encoding can be loaded. -func TestGenerateIndexFile_Uvarint(t *testing.T) { - // Load previously generated series file. - sfile := tsdb.NewSeriesFile("testdata/uvarint/_series") - if err := sfile.Open(); err != nil { - t.Fatal(err) - } - defer sfile.Close() - - // Load legacy index file from buffer. - f := tsi1.NewIndexFile(sfile) - f.SetPath("testdata/uvarint/index") - if err := f.Open(); err != nil { - t.Fatal(err) - } - defer f.Close() - - // Verify that tag/value series can be fetched. - if e := f.TagValueElem([]byte("measurement0"), []byte("key0"), []byte("value0")); e == nil { - t.Fatal("expected element") - } else if n := e.(*tsi1.TagBlockValueElem).SeriesN(); n == 0 { - t.Fatal("expected series") - } -} - -// Ensure a MeasurementHashSeries returns false when all series are tombstoned. -func TestIndexFile_MeasurementHasSeries_Tombstoned(t *testing.T) { - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - f, err := CreateIndexFile(sfile.SeriesFile, []Series{ - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, - }) - if err != nil { - t.Fatal(err) - } - - // Simulate all series are tombstoned - ss := tsdb.NewSeriesIDSet() - - if f.MeasurementHasSeries(ss, []byte("cpu")) { - t.Fatalf("MeasurementHasSeries got true, exp false") - } -} - -func BenchmarkIndexFile_TagValueSeries(b *testing.B) { - b.Run("M=1,K=2,V=3", func(b *testing.B) { - sfile := MustOpenSeriesFile(b) - defer sfile.Close() - benchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(sfile.SeriesFile, 1, 2, 3)) - }) - b.Run("M=10,K=5,V=5", func(b *testing.B) { - sfile := MustOpenSeriesFile(b) - defer sfile.Close() - benchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(sfile.SeriesFile, 10, 5, 5)) - }) - b.Run("M=10,K=7,V=5", func(b *testing.B) { - sfile := MustOpenSeriesFile(b) - defer sfile.Close() - benchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(sfile.SeriesFile, 10, 7, 7)) - }) -} - -func benchmarkIndexFile_TagValueSeries(b *testing.B, idx *tsi1.IndexFile) { - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - if e := idx.TagValueElem([]byte("measurement0"), []byte("key0"), []byte("value0")); e == nil { - b.Fatal("expected element") - } else if e.(*tsi1.TagBlockValueElem).SeriesN() == 0 { - b.Fatal("expected series") - } - } -} - -// CreateIndexFile creates an index file with a given set of series. -func CreateIndexFile(sfile *tsdb.SeriesFile, series []Series) (*tsi1.IndexFile, error) { - lf, err := CreateLogFile(sfile, series) - if err != nil { - return nil, err - } - - // Write index file to buffer. - var buf bytes.Buffer - if _, err := lf.CompactTo(&buf, M, K, nil); err != nil { - return nil, err - } - - // Load index file from buffer. - f := tsi1.NewIndexFile(sfile) - if err := f.UnmarshalBinary(buf.Bytes()); err != nil { - return nil, err - } - return f, nil -} - -// GenerateIndexFile generates an index file from a set of series based on the count arguments. -// Total series returned will equal measurementN * tagN * valueN. -func GenerateIndexFile(sfile *tsdb.SeriesFile, measurementN, tagN, valueN int) (*tsi1.IndexFile, error) { - // Generate a new log file first. - lf, err := GenerateLogFile(sfile, measurementN, tagN, valueN) - if err != nil { - return nil, err - } - - // Compact log file to buffer. - var buf bytes.Buffer - if _, err := lf.CompactTo(&buf, M, K, nil); err != nil { - return nil, err - } - - // Load index file from buffer. - f := tsi1.NewIndexFile(sfile) - if err := f.UnmarshalBinary(buf.Bytes()); err != nil { - return nil, err - } - return f, nil -} - -func MustGenerateIndexFile(sfile *tsdb.SeriesFile, measurementN, tagN, valueN int) *tsi1.IndexFile { - f, err := GenerateIndexFile(sfile, measurementN, tagN, valueN) - if err != nil { - panic(err) - } - return f -} - -var indexFileCache struct { - MeasurementN int - TagN int - ValueN int - - IndexFile *tsi1.IndexFile -} - -// MustFindOrGenerateIndexFile returns a cached index file or generates one if it doesn't exist. -func MustFindOrGenerateIndexFile(sfile *tsdb.SeriesFile, measurementN, tagN, valueN int) *tsi1.IndexFile { - // Use cache if fields match and the index file has been generated. - if indexFileCache.MeasurementN == measurementN && - indexFileCache.TagN == tagN && - indexFileCache.ValueN == valueN && - indexFileCache.IndexFile != nil { - return indexFileCache.IndexFile - } - - // Generate and cache. - indexFileCache.MeasurementN = measurementN - indexFileCache.TagN = tagN - indexFileCache.ValueN = valueN - indexFileCache.IndexFile = MustGenerateIndexFile(sfile, measurementN, tagN, valueN) - return indexFileCache.IndexFile -} - -func pow(x, y int) int { - r := 1 - for i := 0; i < y; i++ { - r *= x - } - return r -} diff --git a/tsdb/index/tsi1/index_files.go b/tsdb/index/tsi1/index_files.go deleted file mode 100644 index ceedbf401a5..00000000000 --- a/tsdb/index/tsi1/index_files.go +++ /dev/null @@ -1,457 +0,0 @@ -package tsi1 - -import ( - "bufio" - "io" - "os" - "sort" - "time" - - "github.com/influxdata/influxdb/v2/pkg/bytesutil" - "github.com/influxdata/influxdb/v2/pkg/estimator/hll" - "github.com/influxdata/influxdb/v2/tsdb" -) - -// IndexFiles represents a layered set of index files. -type IndexFiles []*IndexFile - -// IDs returns the ids for all index files. -func (p IndexFiles) IDs() []int { - a := make([]int, len(p)) - for i, f := range p { - a[i] = f.ID() - } - return a -} - -// Retain adds a reference count to all files. -func (p IndexFiles) Retain() { - for _, f := range p { - f.Retain() - } -} - -// Release removes a reference count from all files. -func (p IndexFiles) Release() { - for _, f := range p { - f.Release() - } -} - -// Files returns p as a list of File objects. -func (p IndexFiles) Files() []File { - other := make([]File, len(p)) - for i, f := range p { - other[i] = f - } - return other -} - -func (p IndexFiles) buildSeriesIDSets() (seriesIDSet, tombstoneSeriesIDSet *tsdb.SeriesIDSet, err error) { - if len(p) == 0 { - return tsdb.NewSeriesIDSet(), tsdb.NewSeriesIDSet(), nil - } - - // Start with sets from last file. - if seriesIDSet, err = p[len(p)-1].SeriesIDSet(); err != nil { - return nil, nil, err - } else if tombstoneSeriesIDSet, err = p[len(p)-1].TombstoneSeriesIDSet(); err != nil { - return nil, nil, err - } - - // Build sets in reverse order. - // This assumes that bits in both sets are mutually exclusive. - for i := len(p) - 2; i >= 0; i-- { - ss, err := p[i].SeriesIDSet() - if err != nil { - return nil, nil, err - } - - ts, err := p[i].TombstoneSeriesIDSet() - if err != nil { - return nil, nil, err - } - - // Add tombstones and remove from old series existence set. - seriesIDSet.Diff(ts) - tombstoneSeriesIDSet.Merge(ts) - - // Add new series and remove from old series tombstone set. - tombstoneSeriesIDSet.Diff(ss) - seriesIDSet.Merge(ss) - } - - return seriesIDSet, tombstoneSeriesIDSet, nil -} - -// MeasurementNames returns a sorted list of all measurement names for all files. -func (p *IndexFiles) MeasurementNames() [][]byte { - itr := p.MeasurementIterator() - if itr == nil { - return nil - } - - var names [][]byte - for e := itr.Next(); e != nil; e = itr.Next() { - names = append(names, bytesutil.Clone(e.Name())) - } - sort.Sort(byteSlices(names)) - return names -} - -// MeasurementIterator returns an iterator that merges measurements across all files. -func (p IndexFiles) MeasurementIterator() MeasurementIterator { - a := make([]MeasurementIterator, 0, len(p)) - for i := range p { - itr := p[i].MeasurementIterator() - if itr == nil { - continue - } - a = append(a, itr) - } - return MergeMeasurementIterators(a...) -} - -// TagKeyIterator returns an iterator that merges tag keys across all files. -func (p *IndexFiles) TagKeyIterator(name []byte) (TagKeyIterator, error) { - a := make([]TagKeyIterator, 0, len(*p)) - for _, f := range *p { - itr := f.TagKeyIterator(name) - if itr == nil { - continue - } - a = append(a, itr) - } - return MergeTagKeyIterators(a...), nil -} - -// MeasurementSeriesIDIterator returns an iterator that merges series across all files. -func (p IndexFiles) MeasurementSeriesIDIterator(name []byte) tsdb.SeriesIDIterator { - a := make([]tsdb.SeriesIDIterator, 0, len(p)) - for _, f := range p { - itr := f.MeasurementSeriesIDIterator(name) - if itr == nil { - continue - } - a = append(a, itr) - } - return tsdb.MergeSeriesIDIterators(a...) -} - -// TagValueSeriesIDSet returns an iterator that merges series across all files. -func (p IndexFiles) TagValueSeriesIDSet(name, key, value []byte) (*tsdb.SeriesIDSet, error) { - ss := tsdb.NewSeriesIDSet() - for i := range p { - if fss, err := p[i].TagValueSeriesIDSet(name, key, value); err != nil { - return nil, err - } else if fss != nil { - ss.Merge(fss) - } - } - return ss, nil -} - -// CompactTo merges all index files and writes them to w. -func (p IndexFiles) CompactTo(w io.Writer, sfile *tsdb.SeriesFile, m, k uint64, cancel <-chan struct{}) (n int64, err error) { - var t IndexFileTrailer - - // Check for cancellation. - select { - case <-cancel: - return n, ErrCompactionInterrupted - default: - } - - // Wrap writer in buffered I/O. - bw := bufio.NewWriter(w) - - // Setup context object to track shared data for this compaction. - var info indexCompactInfo - info.cancel = cancel - info.tagSets = make(map[string]indexTagSetPos) - - // Write magic number. - if err := writeTo(bw, []byte(FileSignature), &n); err != nil { - return n, err - } - - // Flush buffer before re-mapping. - if err := bw.Flush(); err != nil { - return n, err - } - - // Write tagset blocks in measurement order. - if err := p.writeTagsetsTo(bw, &info, &n); err != nil { - return n, err - } - - // Ensure block is word aligned. - // if offset := n % 8; offset != 0 { - // if err := writeTo(bw, make([]byte, 8-offset), &n); err != nil { - // return n, err - // } - // } - - // Write measurement block. - t.MeasurementBlock.Offset = n - if err := p.writeMeasurementBlockTo(bw, &info, &n); err != nil { - return n, err - } - t.MeasurementBlock.Size = n - t.MeasurementBlock.Offset - - // Build series sets. - seriesIDSet, tombstoneSeriesIDSet, err := p.buildSeriesIDSets() - if err != nil { - return n, err - } - - // Generate sketches from series sets. - sketch := hll.NewDefaultPlus() - seriesIDSet.ForEach(func(id uint64) { - if key := sfile.SeriesKey(id); key != nil { - sketch.Add(key) - } - }) - - tSketch := hll.NewDefaultPlus() - tombstoneSeriesIDSet.ForEach(func(id uint64) { - if key := sfile.SeriesKey(id); key != nil { - tSketch.Add(key) - } - }) - - // Write series set. - t.SeriesIDSet.Offset = n - nn, err := seriesIDSet.WriteTo(bw) - if n += nn; err != nil { - return n, err - } - t.SeriesIDSet.Size = n - t.SeriesIDSet.Offset - - // Write tombstone series set. - t.TombstoneSeriesIDSet.Offset = n - nn, err = tombstoneSeriesIDSet.WriteTo(bw) - if n += nn; err != nil { - return n, err - } - t.TombstoneSeriesIDSet.Size = n - t.TombstoneSeriesIDSet.Offset - - // Write series sketches. TODO(edd): Implement WriterTo on HLL++. - t.SeriesSketch.Offset = n - data, err := sketch.MarshalBinary() - if err != nil { - return n, err - } else if _, err := bw.Write(data); err != nil { - return n, err - } - t.SeriesSketch.Size = int64(len(data)) - n += t.SeriesSketch.Size - - t.TombstoneSeriesSketch.Offset = n - if data, err = tSketch.MarshalBinary(); err != nil { - return n, err - } else if _, err := bw.Write(data); err != nil { - return n, err - } - t.TombstoneSeriesSketch.Size = int64(len(data)) - n += t.TombstoneSeriesSketch.Size - - // Write trailer. - nn, err = t.WriteTo(bw) - n += nn - if err != nil { - return n, err - } - - // Flush file. - if err := bw.Flush(); err != nil { - return n, err - } - - return n, nil -} - -func (p IndexFiles) writeTagsetsTo(w io.Writer, info *indexCompactInfo, n *int64) error { - mitr := p.MeasurementIterator() - if mitr == nil { - return nil - } - - for m := mitr.Next(); m != nil; m = mitr.Next() { - if err := p.writeTagsetTo(w, m.Name(), info, n); err != nil { - return err - } - } - return nil -} - -// writeTagsetTo writes a single tagset to w and saves the tagset offset. -func (p IndexFiles) writeTagsetTo(w io.Writer, name []byte, info *indexCompactInfo, n *int64) error { - var seriesIDs []uint64 - - // Check for cancellation. - select { - case <-info.cancel: - return ErrCompactionInterrupted - default: - } - - // Ensure block is word aligned. - // if offset := (*n) % 8; offset != 0 { - // if err := writeTo(w, make([]byte, 8-offset), n); err != nil { - // return err - // } - // } - - kitr, err := p.TagKeyIterator(name) - if err != nil { - return err - } - - enc := NewTagBlockEncoder(w) - for ke := kitr.Next(); ke != nil; ke = kitr.Next() { - // Encode key. - if err := enc.EncodeKey(ke.Key(), ke.Deleted()); err != nil { - return err - } - - // Iterate over tag values. - vitr := ke.TagValueIterator() - for ve := vitr.Next(); ve != nil; ve = vitr.Next() { - seriesIDs = seriesIDs[:0] - - // Merge all series together. - if err := func() error { - ss, err := p.TagValueSeriesIDSet(name, ke.Key(), ve.Value()) - if err != nil { - return err - } - return enc.EncodeValue(ve.Value(), ve.Deleted(), ss) - }(); err != nil { - return nil - } - } - } - - // Save tagset offset to measurement. - pos := info.tagSets[string(name)] - pos.offset = *n - - // Flush data to writer. - err = enc.Close() - *n += enc.N() - if err != nil { - return err - } - - // Save tagset size to measurement. - pos.size = *n - pos.offset - - info.tagSets[string(name)] = pos - - return nil -} - -func (p IndexFiles) writeMeasurementBlockTo(w io.Writer, info *indexCompactInfo, n *int64) error { - mw := NewMeasurementBlockWriter() - - // Check for cancellation. - select { - case <-info.cancel: - return ErrCompactionInterrupted - default: - } - - // Add measurement data & compute sketches. - mitr := p.MeasurementIterator() - if mitr != nil { - var seriesN int - for m := mitr.Next(); m != nil; m = mitr.Next() { - name := m.Name() - - // Look-up series ids. - if err := func() error { - itr := p.MeasurementSeriesIDIterator(name) - defer itr.Close() - - var seriesIDs []uint64 - for { - e, err := itr.Next() - if err != nil { - return err - } else if e.SeriesID == 0 { - break - } - seriesIDs = append(seriesIDs, e.SeriesID) - - // Check for cancellation periodically. - if seriesN++; seriesN%1000 == 0 { - select { - case <-info.cancel: - return ErrCompactionInterrupted - default: - } - } - } - sort.Sort(uint64Slice(seriesIDs)) - - // Add measurement to writer. - pos := info.tagSets[string(name)] - mw.Add(name, m.Deleted(), pos.offset, pos.size, seriesIDs) - - return nil - }(); err != nil { - return err - } - } - } - - // Flush data to writer. - nn, err := mw.WriteTo(w) - *n += nn - return err -} - -// Stat returns the max index file size and the total file size for all index files. -func (p IndexFiles) Stat() (*IndexFilesInfo, error) { - var info IndexFilesInfo - for _, f := range p { - fi, err := os.Stat(f.Path()) - if os.IsNotExist(err) { - continue - } else if err != nil { - return nil, err - } - - if fi.Size() > info.MaxSize { - info.MaxSize = fi.Size() - } - if fi.ModTime().After(info.ModTime) { - info.ModTime = fi.ModTime() - } - - info.Size += fi.Size() - } - return &info, nil -} - -type IndexFilesInfo struct { - MaxSize int64 // largest file size - Size int64 // total file size - ModTime time.Time // last modified -} - -// indexCompactInfo is a context object used for tracking position information -// during the compaction of index files. -type indexCompactInfo struct { - cancel <-chan struct{} - - // Tracks offset/size for each measurement's tagset. - tagSets map[string]indexTagSetPos -} - -// indexTagSetPos stores the offset/size of tagsets. -type indexTagSetPos struct { - offset int64 - size int64 -} diff --git a/tsdb/index/tsi1/index_files_test.go b/tsdb/index/tsi1/index_files_test.go deleted file mode 100644 index 33b9c147913..00000000000 --- a/tsdb/index/tsi1/index_files_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package tsi1_test - -import ( - "bytes" - "testing" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" -) - -// Ensure multiple index files can be compacted together. -func TestIndexFiles_WriteTo(t *testing.T) { - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - // Write first file. - f0, err := CreateIndexFile(sfile.SeriesFile, []Series{ - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})}, - {Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "east"})}, - }) - if err != nil { - t.Fatal(err) - } - - // Write second file. - f1, err := CreateIndexFile(sfile.SeriesFile, []Series{ - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})}, - {Name: []byte("disk"), Tags: models.NewTags(map[string]string{"region": "east"})}, - }) - if err != nil { - t.Fatal(err) - } - - // Compact the two together and write out to a buffer. - var buf bytes.Buffer - a := tsi1.IndexFiles{f0, f1} - if n, err := a.CompactTo(&buf, sfile.SeriesFile, M, K, nil); err != nil { - t.Fatal(err) - } else if n == 0 { - t.Fatal("expected data written") - } - - // Unmarshal buffer into a new index file. - f := tsi1.NewIndexFile(sfile.SeriesFile) - if err := f.UnmarshalBinary(buf.Bytes()); err != nil { - t.Fatal(err) - } - - // Verify data in compacted file. - if e := f.TagValueElem([]byte("cpu"), []byte("region"), []byte("west")); e == nil { - t.Fatal("expected element") - } else if n := e.(*tsi1.TagBlockValueElem).SeriesN(); n != 1 { - t.Fatalf("unexpected series count: %d", n) - } -} diff --git a/tsdb/index/tsi1/index_test.go b/tsdb/index/tsi1/index_test.go deleted file mode 100644 index 7422ff9e25f..00000000000 --- a/tsdb/index/tsi1/index_test.go +++ /dev/null @@ -1,950 +0,0 @@ -package tsi1_test - -import ( - "compress/gzip" - "errors" - "fmt" - "io" - "os" - "path" - "path/filepath" - "reflect" - "regexp" - "sort" - "sync" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// Bloom filter settings used in tests. -const M, K = 4096, 6 - -// Ensure index can iterate over all measurement names. -func TestIndex_ForEachMeasurementName(t *testing.T) { - idx := MustOpenDefaultIndex(t) - defer idx.Close() - - // Add series to index. - if err := idx.CreateSeriesSliceIfNotExists([]Series{ - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})}, - {Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "east"})}, - }); err != nil { - t.Fatal(err) - } - - // Verify measurements are returned. - idx.Run(t, func(t *testing.T) { - var names []string - if err := idx.ForEachMeasurementName(func(name []byte) error { - names = append(names, string(name)) - return nil - }); err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(names, []string{"cpu", "mem"}) { - t.Fatalf("unexpected names: %#v", names) - } - }) - - // Add more series. - if err := idx.CreateSeriesSliceIfNotExists([]Series{ - {Name: []byte("disk")}, - {Name: []byte("mem")}, - }); err != nil { - t.Fatal(err) - } - - // Verify new measurements. - idx.Run(t, func(t *testing.T) { - var names []string - if err := idx.ForEachMeasurementName(func(name []byte) error { - names = append(names, string(name)) - return nil - }); err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(names, []string{"cpu", "disk", "mem"}) { - t.Fatalf("unexpected names: %#v", names) - } - }) -} - -// Ensure index can return whether a measurement exists. -func TestIndex_MeasurementExists(t *testing.T) { - idx := MustOpenDefaultIndex(t) - defer idx.Close() - - // Add series to index. - if err := idx.CreateSeriesSliceIfNotExists([]Series{ - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})}, - }); err != nil { - t.Fatal(err) - } - - // Verify measurement exists. - idx.Run(t, func(t *testing.T) { - if v, err := idx.MeasurementExists([]byte("cpu")); err != nil { - t.Fatal(err) - } else if !v { - t.Fatal("expected measurement to exist") - } - }) - - name, tags := []byte("cpu"), models.NewTags(map[string]string{"region": "east"}) - sid := idx.Index.SeriesFile().SeriesID(name, tags, nil) - if sid == 0 { - t.Fatalf("got 0 series id for %s/%v", name, tags) - } - - // Delete one series. - if err := idx.DropSeries(sid, models.MakeKey(name, tags), true); err != nil { - t.Fatal(err) - } - - // Verify measurement still exists. - idx.Run(t, func(t *testing.T) { - if v, err := idx.MeasurementExists([]byte("cpu")); err != nil { - t.Fatal(err) - } else if !v { - t.Fatal("expected measurement to still exist") - } - }) - - // Delete second series. - tags.Set([]byte("region"), []byte("west")) - sid = idx.Index.SeriesFile().SeriesID(name, tags, nil) - if sid == 0 { - t.Fatalf("got 0 series id for %s/%v", name, tags) - } - if err := idx.DropSeries(sid, models.MakeKey(name, tags), true); err != nil { - t.Fatal(err) - } - - // Verify measurement is now deleted. - idx.Run(t, func(t *testing.T) { - if v, err := idx.MeasurementExists([]byte("cpu")); err != nil { - t.Fatal(err) - } else if v { - t.Fatal("expected measurement to be deleted") - } - }) -} - -// Ensure index can return a list of matching measurements. -func TestIndex_MeasurementNamesByRegex(t *testing.T) { - idx := MustOpenDefaultIndex(t) - defer idx.Close() - - // Add series to index. - if err := idx.CreateSeriesSliceIfNotExists([]Series{ - {Name: []byte("cpu")}, - {Name: []byte("disk")}, - {Name: []byte("mem")}, - }); err != nil { - t.Fatal(err) - } - - // Retrieve measurements by regex. - idx.Run(t, func(t *testing.T) { - names, err := idx.MeasurementNamesByRegex(regexp.MustCompile(`cpu|mem`)) - if err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(names, [][]byte{[]byte("cpu"), []byte("mem")}) { - t.Fatalf("unexpected names: %v", names) - } - }) -} - -// Ensure index can delete a measurement and all related keys, values, & series. -func TestIndex_DropMeasurement(t *testing.T) { - idx := MustOpenDefaultIndex(t) - defer idx.Close() - - // Add series to index. - if err := idx.CreateSeriesSliceIfNotExists([]Series{ - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})}, - {Name: []byte("disk"), Tags: models.NewTags(map[string]string{"region": "north"})}, - {Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "west", "country": "us"})}, - }); err != nil { - t.Fatal(err) - } - - // Drop measurement. - if err := idx.DropMeasurement([]byte("cpu")); err != nil { - t.Fatal(err) - } - - // Verify data is gone in each stage. - idx.Run(t, func(t *testing.T) { - // Verify measurement is gone. - if v, err := idx.MeasurementExists([]byte("cpu")); err != nil { - t.Fatal(err) - } else if v { - t.Fatal("expected no measurement") - } - - // Obtain file set to perform lower level checks. - fs, err := idx.PartitionAt(0).RetainFileSet() - if err != nil { - t.Fatal(err) - } - defer fs.Release() - - // Verify tags & values are gone. - if e := fs.TagKeyIterator([]byte("cpu")).Next(); e != nil && !e.Deleted() { - t.Fatal("expected deleted tag key") - } - if itr := fs.TagValueIterator([]byte("cpu"), []byte("region")); itr != nil { - t.Fatal("expected nil tag value iterator") - } - - }) -} - -func TestIndex_OpenFail(t *testing.T) { - idx := NewDefaultIndex(t) - require.NoError(t, idx.Open()) - idx.Index.Close() - // mess up the index: - tslPath := path.Join(idx.Index.Path(), "3", "L0-00000001.tsl") - tslFile, err := os.OpenFile(tslPath, os.O_RDWR, 0666) - require.NoError(t, err) - require.NoError(t, tslFile.Truncate(0)) - // write poisonous TSL file - first byte doesn't matter, remaining bytes are an invalid uvarint - _, err = tslFile.Write([]byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}) - require.NoError(t, err) - require.NoError(t, tslFile.Close()) - idx.Index = tsi1.NewIndex(idx.SeriesFile.SeriesFile, "db0", tsi1.WithPath(idx.Index.Path())) - err = idx.Index.Open() - require.Error(t, err, "expected an error on opening the index") - require.Contains(t, err.Error(), ".tsl\": parsing binary-encoded uint64 value failed; binary.Uvarint() returned -11") - // ensure each partition is closed: - for i := 0; i < int(idx.Index.PartitionN); i++ { - assert.Equal(t, idx.Index.PartitionAt(i).FileN(), 0) - } - require.NoError(t, idx.Close()) -} - -func TestIndex_Open(t *testing.T) { - t.Run("open new index", func(t *testing.T) { - // Opening a fresh index should set the MANIFEST version to current version. - idx := MustOpenDefaultIndex(t) - t.Cleanup(func() { assert.NoError(t, idx.Close()) }) - - // Check version set appropriately. - for i := 0; uint64(i) < tsi1.DefaultPartitionN; i++ { - partition := idx.PartitionAt(i) - - if got, exp := partition.Manifest().Version, 1; got != exp { - t.Fatalf("got index version %d, expected %d", got, exp) - } - } - - for i := 0; i < int(idx.PartitionN); i++ { - p := idx.PartitionAt(i) - - if got, exp := p.NeedsCompaction(false), false; got != exp { - t.Fatalf("got needs compaction %v, expected %v", got, exp) - } - } - }) - - // Reopening an open index should return an error. - t.Run("reopen open index", func(t *testing.T) { - idx := MustOpenDefaultIndex(t) - t.Cleanup(func() { assert.NoError(t, idx.Close()) }) - - // Manually closing the existing SeriesFile so that it won't be left - // opened after idx.Open(), which calls another idx.SeriesFile.Open(). - // - // This is required for t.TempDir() to be cleaned-up successfully on - // Windows. - assert.NoError(t, idx.SeriesFile.Close()) - - err := idx.Open() - if err == nil { - t.Fatal("didn't get an error on reopen, but expected one") - } - }) - - // Opening an incompatible index should return an error. - incompatibleVersions := []int{-1, 0, 2} - for _, v := range incompatibleVersions { - t.Run(fmt.Sprintf("incompatible index version: %d", v), func(t *testing.T) { - idx := NewDefaultIndex(t) - - // Manually create a MANIFEST file for an incompatible index version. - // under one of the partitions. - partitionPath := filepath.Join(idx.Path(), "2") - os.MkdirAll(partitionPath, 0777) - - mpath := filepath.Join(partitionPath, tsi1.ManifestFileName) - m := tsi1.NewManifest(mpath) - m.Levels = nil - m.Version = v // Set example MANIFEST version. - if _, err := m.Write(); err != nil { - t.Fatal(err) - } - - // Log the MANIFEST file. - data, err := os.ReadFile(mpath) - if err != nil { - panic(err) - } - t.Logf("Incompatible MANIFEST: %s", data) - - // Opening this index should return an error because the MANIFEST has an - // incompatible version. - err = idx.Open() - t.Cleanup(func() { assert.NoError(t, idx.Close()) }) - if !errors.Is(err, tsi1.ErrIncompatibleVersion) { - t.Fatalf("got error %v, expected %v", err, tsi1.ErrIncompatibleVersion) - } - }) - } -} - -func TestIndex_Manifest(t *testing.T) { - t.Run("current MANIFEST", func(t *testing.T) { - idx := MustOpenIndex(t, tsi1.DefaultPartitionN) - - // Check version set appropriately. - for i := 0; uint64(i) < tsi1.DefaultPartitionN; i++ { - partition := idx.PartitionAt(i) - if got, exp := partition.Manifest().Version, tsi1.Version; got != exp { - t.Fatalf("got MANIFEST version %d, expected %d", got, exp) - } - } - - require.NoError(t, idx.Close()) - }) -} - -func TestIndex_DiskSizeBytes(t *testing.T) { - idx := MustOpenIndex(t, tsi1.DefaultPartitionN) - defer idx.Close() - - // Add series to index. - if err := idx.CreateSeriesSliceIfNotExists([]Series{ - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})}, - {Name: []byte("disk"), Tags: models.NewTags(map[string]string{"region": "north"})}, - {Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "west", "country": "us"})}, - }); err != nil { - t.Fatal(err) - } - - idx.RunStateAware(t, func(t *testing.T, state int) { - // Each MANIFEST file is 419 bytes and there are tsi1.DefaultPartitionN of them - expSize := int64(tsi1.DefaultPartitionN * 419) - switch state { - case Initial: - fallthrough - case Reopen: - // In the log file, each series stores flag(1) + series(uvarint(2)) + len(name)(1) + len(key)(1) + len(value)(1) + checksum(4). - expSize += 4 * 9 - case PostCompaction: - fallthrough - case PostCompactionReopen: - // For TSI files after a compaction, instead of 4*9, we have encoded measurement names, tag names, etc which is larger - expSize += 2202 - } - - if got, exp := idx.DiskSizeBytes(), expSize; got != exp { - // We had some odd errors - if the size is unexpected, log it - idx.Index.LogDiskSize(t) - t.Fatalf("got %d bytes, expected %d", got, exp) - } - }) -} - -func TestIndex_TagValueSeriesIDIterator(t *testing.T) { - idx1 := MustOpenDefaultIndex(t) // Uses the single series creation method CreateSeriesIfNotExists - defer idx1.Close() - idx2 := MustOpenDefaultIndex(t) // Uses the batch series creation method CreateSeriesListIfNotExists - defer idx2.Close() - - // Add some series. - data := []struct { - Key string - Name string - Tags map[string]string - }{ - {"cpu,region=west,server=a", "cpu", map[string]string{"region": "west", "server": "a"}}, - {"cpu,region=west,server=b", "cpu", map[string]string{"region": "west", "server": "b"}}, - {"cpu,region=east,server=a", "cpu", map[string]string{"region": "east", "server": "a"}}, - {"cpu,region=north,server=c", "cpu", map[string]string{"region": "north", "server": "c"}}, - {"cpu,region=south,server=s", "cpu", map[string]string{"region": "south", "server": "s"}}, - {"mem,region=west,server=a", "mem", map[string]string{"region": "west", "server": "a"}}, - {"mem,region=west,server=b", "mem", map[string]string{"region": "west", "server": "b"}}, - {"mem,region=west,server=c", "mem", map[string]string{"region": "west", "server": "c"}}, - {"disk,region=east,server=a", "disk", map[string]string{"region": "east", "server": "a"}}, - {"disk,region=east,server=a", "disk", map[string]string{"region": "east", "server": "a"}}, - {"disk,region=north,server=c", "disk", map[string]string{"region": "north", "server": "c"}}, - } - - var batchKeys [][]byte - var batchNames [][]byte - var batchTags []models.Tags - for _, pt := range data { - if err := idx1.CreateSeriesIfNotExists([]byte(pt.Key), []byte(pt.Name), models.NewTags(pt.Tags)); err != nil { - t.Fatal(err) - } - - batchKeys = append(batchKeys, []byte(pt.Key)) - batchNames = append(batchNames, []byte(pt.Name)) - batchTags = append(batchTags, models.NewTags(pt.Tags)) - } - - if err := idx2.CreateSeriesListIfNotExists(batchKeys, batchNames, batchTags); err != nil { - t.Fatal(err) - } - - testTagValueSeriesIDIterator := func(t *testing.T, name, key, value string, expKeys []string) { - for i, idx := range []*Index{idx1, idx2} { - sitr, err := idx.TagValueSeriesIDIterator([]byte(name), []byte(key), []byte(value)) - if err != nil { - t.Fatalf("[index %d] %v", i, err) - } else if sitr == nil { - t.Fatalf("[index %d] series id iterater nil", i) - } - - // Convert series ids to series keys. - itr := tsdb.NewSeriesIteratorAdapter(idx.SeriesFile.SeriesFile, sitr) - if itr == nil { - t.Fatalf("[index %d] got nil iterator", i) - } - defer itr.Close() - - var keys []string - for e, err := itr.Next(); err == nil; e, err = itr.Next() { - if e == nil { - break - } - keys = append(keys, string(models.MakeKey(e.Name(), e.Tags()))) - } - - if err != nil { - t.Fatal(err) - } - - // Iterator was in series id order, which may not be series key order. - sort.Strings(keys) - if got, exp := keys, expKeys; !reflect.DeepEqual(got, exp) { - t.Fatalf("[index %d] got %v, expected %v", i, got, exp) - } - } - } - - // Test that correct series are initially returned - t.Run("initial", func(t *testing.T) { - testTagValueSeriesIDIterator(t, "mem", "region", "west", []string{ - "mem,region=west,server=a", - "mem,region=west,server=b", - "mem,region=west,server=c", - }) - }) - - // The result should now be cached, and the same result should be returned. - t.Run("cached", func(t *testing.T) { - testTagValueSeriesIDIterator(t, "mem", "region", "west", []string{ - "mem,region=west,server=a", - "mem,region=west,server=b", - "mem,region=west,server=c", - }) - }) - - // Adding a new series that would be referenced by some cached bitsets (in this case - // the bitsets for mem->region->west and mem->server->c) should cause the cached - // bitsets to be updated. - if err := idx1.CreateSeriesIfNotExists( - []byte("mem,region=west,root=x,server=c"), - []byte("mem"), - models.NewTags(map[string]string{"region": "west", "root": "x", "server": "c"}), - ); err != nil { - t.Fatal(err) - } - - if err := idx2.CreateSeriesListIfNotExists( - [][]byte{[]byte("mem,region=west,root=x,server=c")}, - [][]byte{[]byte("mem")}, - []models.Tags{models.NewTags(map[string]string{"region": "west", "root": "x", "server": "c"})}, - ); err != nil { - t.Fatal(err) - } - - t.Run("insert series", func(t *testing.T) { - testTagValueSeriesIDIterator(t, "mem", "region", "west", []string{ - "mem,region=west,root=x,server=c", - "mem,region=west,server=a", - "mem,region=west,server=b", - "mem,region=west,server=c", - }) - }) - - if err := idx1.CreateSeriesIfNotExists( - []byte("mem,region=west,root=x,server=c"), - []byte("mem"), - models.NewTags(map[string]string{"region": "west", "root": "x", "server": "c"}), - ); err != nil { - t.Fatal(err) - } - - if err := idx2.CreateSeriesListIfNotExists( - [][]byte{[]byte("mem,region=west,root=x,server=c")}, - [][]byte{[]byte("mem")}, - []models.Tags{models.NewTags(map[string]string{"region": "west", "root": "x", "server": "c"})}, - ); err != nil { - t.Fatal(err) - } - - t.Run("insert same series", func(t *testing.T) { - testTagValueSeriesIDIterator(t, "mem", "region", "west", []string{ - "mem,region=west,root=x,server=c", - "mem,region=west,server=a", - "mem,region=west,server=b", - "mem,region=west,server=c", - }) - }) - - t.Run("no matching series", func(t *testing.T) { - testTagValueSeriesIDIterator(t, "foo", "bar", "zoo", nil) - }) -} - -// Index is a test wrapper for tsi1.Index. -type Index struct { - *tsi1.Index - SeriesFile *SeriesFile -} - -// NewIndex returns a new instance of Index at a temporary path. -func NewIndex(tb testing.TB, partitionN uint64) *Index { - idx := &Index{SeriesFile: NewSeriesFile(tb)} - idx.Index = tsi1.NewIndex(idx.SeriesFile.SeriesFile, "db0", tsi1.WithPath(tb.TempDir())) - idx.Index.PartitionN = partitionN - return idx -} - -// NewIndex returns a new instance of Index with default number of partitions at a temporary path. -func NewDefaultIndex(tb testing.TB) *Index { - return NewIndex(tb, tsi1.DefaultPartitionN) -} - -// MustOpenIndex returns a new, open index. Panic on error. -func MustOpenIndex(tb testing.TB, partitionN uint64) *Index { - idx := NewIndex(tb, partitionN) - if err := idx.Open(); err != nil { - panic(err) - } - return idx -} - -// MustOpenIndex returns a new, open index with the default number of partitions. -func MustOpenDefaultIndex(tb testing.TB) *Index { - return MustOpenIndex(tb, tsi1.DefaultPartitionN) -} - -// Open opens the underlying tsi1.Index and tsdb.SeriesFile -func (idx Index) Open() error { - if err := idx.SeriesFile.Open(); err != nil { - return err - } - return idx.Index.Open() -} - -// Close closes and removes the index directory. -func (idx *Index) Close() error { - // Series file is opened first and must be closed last - if err := idx.Index.Close(); err != nil { - return err - } - if err := idx.SeriesFile.Close(); err != nil { - return err - } - return nil -} - -// Reopen closes and opens the index. -func (idx *Index) Reopen(maxLogSize int64) error { - if err := idx.Index.Close(); err != nil { - return err - } - - // Reopen the series file correctly, by initialising a new underlying series - // file using the same disk data. - if err := idx.SeriesFile.Reopen(); err != nil { - return err - } - - partitionN := idx.Index.PartitionN // Remember how many partitions to use. - idx.Index = tsi1.NewIndex(idx.SeriesFile.SeriesFile, "db0", tsi1.WithPath(idx.Index.Path()), tsi1.WithMaximumLogFileSize(maxLogSize)) - idx.Index.PartitionN = partitionN - return idx.Open() -} - -const ( - Initial = iota - Reopen - PostCompaction - PostCompactionReopen -) - -func curryState(state int, f func(t *testing.T, state int)) func(t *testing.T) { - return func(t *testing.T) { - f(t, state) - } -} - -// Run executes a subtest for each of several different states: -// -// - Immediately -// - After reopen -// - After compaction -// - After reopen again -// -// The index should always respond in the same fashion regardless of -// how data is stored. This helper allows the index to be easily tested -// in all major states. -func (idx *Index) RunStateAware(t *testing.T, fn func(t *testing.T, state int)) { - // Invoke immediately. - t.Run("state=initial", curryState(Initial, fn)) - - // Reopen and invoke again. - if err := idx.Reopen(tsdb.DefaultMaxIndexLogFileSize); err != nil { - t.Fatalf("reopen error: %s", err) - } - t.Run("state=reopen", curryState(Reopen, fn)) - - // Reopen requiring a full compaction of the TSL files and invoke again. - idx.Reopen(1) - for { - needsCompaction := false - for i := 0; i < int(idx.PartitionN); i++ { - needsCompaction = needsCompaction || idx.PartitionAt(i).NeedsCompaction(false) - } - if !needsCompaction { - break - } - time.Sleep(10 * time.Millisecond) - } - t.Run("state=post-compaction", curryState(PostCompaction, fn)) - - // Reopen and invoke again. - if err := idx.Reopen(tsdb.DefaultMaxIndexLogFileSize); err != nil { - t.Fatalf("post-compaction reopen error: %s", err) - } - t.Run("state=post-compaction-reopen", curryState(PostCompactionReopen, fn)) -} - -// Run is the same is RunStateAware but for tests that do not depend on compaction state -func (idx *Index) Run(t *testing.T, fn func(t *testing.T)) { - idx.RunStateAware(t, func(t *testing.T, _ int) { - fn(t) - }) -} - -// CreateSeriesSliceIfNotExists creates multiple series at a time. -func (idx *Index) CreateSeriesSliceIfNotExists(a []Series) error { - keys := make([][]byte, 0, len(a)) - names := make([][]byte, 0, len(a)) - tags := make([]models.Tags, 0, len(a)) - for _, s := range a { - keys = append(keys, models.MakeKey(s.Name, s.Tags)) - names = append(names, s.Name) - tags = append(tags, s.Tags) - } - return idx.CreateSeriesListIfNotExists(keys, names, tags) -} - -var tsiditr tsdb.SeriesIDIterator - -// Calling TagValueSeriesIDIterator on the index involves merging several -// SeriesIDSets together.BenchmarkIndex_TagValueSeriesIDIterator, which can have -// a non trivial cost. In the case of `tsi` files, the mmapd sets are merged -// together. In the case of tsl files the sets need to are cloned and then merged. -// -// Typical results on an i7 laptop -// BenchmarkIndex_IndexFile_TagValueSeriesIDIterator/78888_series_TagValueSeriesIDIterator/cache-8 2000000 643 ns/op 744 B/op 13 allocs/op -// BenchmarkIndex_IndexFile_TagValueSeriesIDIterator/78888_series_TagValueSeriesIDIterator/no_cache-8 10000 130749 ns/op 124952 B/op 350 allocs/op -func BenchmarkIndex_IndexFile_TagValueSeriesIDIterator(b *testing.B) { - runBenchMark := func(b *testing.B, cacheSize int) { - var err error - sfile := NewSeriesFile(b) - // Load index - idx := tsi1.NewIndex(sfile.SeriesFile, "foo", - tsi1.WithPath("testdata/index-file-index"), - tsi1.DisableCompactions(), - tsi1.WithSeriesIDCacheSize(cacheSize), - ) - defer sfile.Close() - - if err = idx.Open(); err != nil { - b.Fatal(err) - } - defer idx.Close() - - for i := 0; i < b.N; i++ { - tsiditr, err = idx.TagValueSeriesIDIterator([]byte("m4"), []byte("tag0"), []byte("value4")) - if err != nil { - b.Fatal(err) - } else if tsiditr == nil { - b.Fatal("got nil iterator") - } - } - } - - // This benchmark will merge eight bitsets each containing ~10,000 series IDs. - b.Run("78888 series TagValueSeriesIDIterator", func(b *testing.B) { - b.ReportAllocs() - b.Run("cache", func(b *testing.B) { - runBenchMark(b, tsdb.DefaultSeriesIDSetCacheSize) - }) - - b.Run("no cache", func(b *testing.B) { - runBenchMark(b, 0) - }) - }) -} - -var errResult error - -// Typical results on an i7 laptop -// BenchmarkIndex_CreateSeriesListIfNotExists/batch_size_1000/partition_1-8 1 4004452124 ns/op 2381998144 B/op 21686990 allocs/op -// BenchmarkIndex_CreateSeriesListIfNotExists/batch_size_1000/partition_2-8 1 2625853773 ns/op 2368913968 B/op 21765385 allocs/op -// BenchmarkIndex_CreateSeriesListIfNotExists/batch_size_1000/partition_4-8 1 2127205189 ns/op 2338013584 B/op 21908381 allocs/op -// BenchmarkIndex_CreateSeriesListIfNotExists/batch_size_1000/partition_8-8 1 2331960889 ns/op 2332643248 B/op 22191763 allocs/op -// BenchmarkIndex_CreateSeriesListIfNotExists/batch_size_1000/partition_16-8 1 2398489751 ns/op 2299551824 B/op 22670465 allocs/op -// BenchmarkIndex_CreateSeriesListIfNotExists/batch_size_10000/partition_1-8 1 3404683972 ns/op 2387236504 B/op 21600671 allocs/op -// BenchmarkIndex_CreateSeriesListIfNotExists/batch_size_10000/partition_2-8 1 2173772186 ns/op 2329237224 B/op 21631104 allocs/op -// BenchmarkIndex_CreateSeriesListIfNotExists/batch_size_10000/partition_4-8 1 1729089575 ns/op 2299161840 B/op 21699878 allocs/op -// BenchmarkIndex_CreateSeriesListIfNotExists/batch_size_10000/partition_8-8 1 1644295339 ns/op 2161473200 B/op 21796469 allocs/op -// BenchmarkIndex_CreateSeriesListIfNotExists/batch_size_10000/partition_16-8 1 1683275418 ns/op 2171872432 B/op 21925974 allocs/op -// BenchmarkIndex_CreateSeriesListIfNotExists/batch_size_100000/partition_1-8 1 3330508160 ns/op 2333250904 B/op 21574887 allocs/op -// BenchmarkIndex_CreateSeriesListIfNotExists/batch_size_100000/partition_2-8 1 2278604285 ns/op 2292600808 B/op 21628966 allocs/op -// BenchmarkIndex_CreateSeriesListIfNotExists/batch_size_100000/partition_4-8 1 1760098762 ns/op 2243730672 B/op 21684608 allocs/op -// BenchmarkIndex_CreateSeriesListIfNotExists/batch_size_100000/partition_8-8 1 1693312924 ns/op 2166924112 B/op 21753079 allocs/op -// BenchmarkIndex_CreateSeriesListIfNotExists/batch_size_100000/partition_16-8 1 1663610452 ns/op 2131177160 B/op 21806209 allocs/op -func BenchmarkIndex_CreateSeriesListIfNotExists(b *testing.B) { - // Read line-protocol and coerce into tsdb format. - keys := make([][]byte, 0, 1e6) - names := make([][]byte, 0, 1e6) - tags := make([]models.Tags, 0, 1e6) - - // 1M series generated with: - // $inch -b 10000 -c 1 -t 10,10,10,10,10,10 -f 1 -m 5 -p 1 - fd, err := os.Open("../../testdata/line-protocol-1M.txt.gz") - if err != nil { - b.Fatal(err) - } - - gzr, err := gzip.NewReader(fd) - if err != nil { - fd.Close() - b.Fatal(err) - } - - data, err := io.ReadAll(gzr) - if err != nil { - b.Fatal(err) - } - - if err := fd.Close(); err != nil { - b.Fatal(err) - } - - points, err := models.ParsePoints(data) - if err != nil { - b.Fatal(err) - } - - for _, pt := range points { - keys = append(keys, pt.Key()) - names = append(names, pt.Name()) - tags = append(tags, pt.Tags()) - } - - batchSizes := []int{1000, 10000, 100000} - partitions := []uint64{1, 2, 4, 8, 16} - for _, sz := range batchSizes { - b.Run(fmt.Sprintf("batch size %d", sz), func(b *testing.B) { - for _, partition := range partitions { - b.Run(fmt.Sprintf("partition %d", partition), func(b *testing.B) { - idx := MustOpenIndex(b, partition) - for j := 0; j < b.N; j++ { - for i := 0; i < len(keys); i += sz { - k := keys[i : i+sz] - n := names[i : i+sz] - t := tags[i : i+sz] - if errResult = idx.CreateSeriesListIfNotExists(k, n, t); errResult != nil { - b.Fatal(err) - } - } - // Reset the index... - b.StopTimer() - if err := idx.Close(); err != nil { - b.Fatal(err) - } - idx = MustOpenIndex(b, partition) - b.StartTimer() - } - }) - } - }) - } -} - -// This benchmark concurrently writes series to the index and fetches cached bitsets. -// The idea is to emphasize the performance difference when bitset caching is on and off. -// -// Typical results for an i7 laptop -// BenchmarkIndex_ConcurrentWriteQuery/partition_1/queries_100000/cache-8 1 3836451407 ns/op 2453296232 B/op 22648482 allocs/op -// BenchmarkIndex_ConcurrentWriteQuery/partition_4/queries_100000/cache-8 1 1836598730 ns/op 2435668224 B/op 22908705 allocs/op -// BenchmarkIndex_ConcurrentWriteQuery/partition_8/queries_100000/cache-8 1 1714771527 ns/op 2341518456 B/op 23450621 allocs/op -// BenchmarkIndex_ConcurrentWriteQuery/partition_16/queries_100000/cache-8 1 1810658403 ns/op 2401239408 B/op 23868079 allocs/op -// BenchmarkIndex_ConcurrentWriteQuery/partition_1/queries_100000/no_cache-8 1 4044478305 ns/op 4414915048 B/op 27292357 allocs/op -// BenchmarkIndex_ConcurrentWriteQuery/partition_4/queries_100000/no_cache-8 1 18663345153 ns/op 23035974472 B/op 54015704 allocs/op -// BenchmarkIndex_ConcurrentWriteQuery/partition_8/queries_100000/no_cache-8 1 22242979152 ns/op 28178915600 B/op 80156305 allocs/op -// BenchmarkIndex_ConcurrentWriteQuery/partition_16/queries_100000/no_cache-8 1 24817283922 ns/op 34613960984 B/op 150356327 allocs/op -func BenchmarkIndex_ConcurrentWriteQuery(b *testing.B) { - // Read line-protocol and coerce into tsdb format. - keys := make([][]byte, 0, 1e6) - names := make([][]byte, 0, 1e6) - tags := make([]models.Tags, 0, 1e6) - - // 1M series generated with: - // $inch -b 10000 -c 1 -t 10,10,10,10,10,10 -f 1 -m 5 -p 1 - fd, err := os.Open("testdata/line-protocol-1M.txt.gz") - if err != nil { - b.Fatal(err) - } - - gzr, err := gzip.NewReader(fd) - if err != nil { - fd.Close() - b.Fatal(err) - } - - data, err := io.ReadAll(gzr) - if err != nil { - b.Fatal(err) - } - - if err := fd.Close(); err != nil { - b.Fatal(err) - } - - points, err := models.ParsePoints(data) - if err != nil { - b.Fatal(err) - } - - for _, pt := range points { - keys = append(keys, pt.Key()) - names = append(names, pt.Name()) - tags = append(tags, pt.Tags()) - } - - runBenchmark := func(b *testing.B, queryN int, partitions uint64, cacheSize int) { - idx := &Index{SeriesFile: NewSeriesFile(b)} - idx.Index = tsi1.NewIndex(idx.SeriesFile.SeriesFile, "db0", tsi1.WithPath(b.TempDir()), tsi1.WithSeriesIDCacheSize(cacheSize)) - idx.Index.PartitionN = partitions - - if err := idx.Open(); err != nil { - panic(err) - } - - var wg sync.WaitGroup - - // Run concurrent iterator... - runIter := func(b *testing.B) { - keys := [][]string{ - {"m0", "tag2", "value4"}, - {"m1", "tag3", "value5"}, - {"m2", "tag4", "value6"}, - {"m3", "tag0", "value8"}, - {"m4", "tag5", "value0"}, - } - - for i := 0; i < queryN/5; i++ { - for _, key := range keys { - itr, err := idx.TagValueSeriesIDIterator([]byte(key[0]), []byte(key[1]), []byte(key[2])) - if err != nil { - b.Fatal(err) - } else if itr == nil { - b.Fatal("got nil iterator") - } - if err := itr.Close(); err != nil { - b.Fatal(err) - } - } - } - } - - wg.Add(1) - go func() { defer wg.Done(); runIter(b) }() - batchSize := 10000 - - for j := 0; j < 1; j++ { - for i := 0; i < len(keys); i += batchSize { - k := keys[i : i+batchSize] - n := names[i : i+batchSize] - t := tags[i : i+batchSize] - if errResult = idx.CreateSeriesListIfNotExists(k, n, t); errResult != nil { - b.Fatal(err) - } - } - - // Wait for queries to finish - wg.Wait() - - // Reset the index... - b.StopTimer() - if err := idx.Close(); err != nil { - b.Fatal(err) - } - - // Re-open everything - idx := &Index{SeriesFile: NewSeriesFile(b)} - idx.Index = tsi1.NewIndex(idx.SeriesFile.SeriesFile, "db0", tsi1.WithPath(b.TempDir()), tsi1.WithSeriesIDCacheSize(cacheSize)) - idx.Index.PartitionN = partitions - - if err := idx.Open(); err != nil { - b.Fatal(err) - } - - wg.Add(1) - go func() { defer wg.Done(); runIter(b) }() - b.StartTimer() - } - } - - partitions := []uint64{1, 4, 8, 16} - queries := []int{1e5} - for _, partition := range partitions { - b.Run(fmt.Sprintf("partition %d", partition), func(b *testing.B) { - for _, queryN := range queries { - b.Run(fmt.Sprintf("queries %d", queryN), func(b *testing.B) { - b.Run("cache", func(b *testing.B) { - runBenchmark(b, queryN, partition, tsdb.DefaultSeriesIDSetCacheSize) - }) - - b.Run("no cache", func(b *testing.B) { - runBenchmark(b, queryN, partition, 0) - }) - }) - } - }) - } -} diff --git a/tsdb/index/tsi1/log_file.go b/tsdb/index/tsi1/log_file.go deleted file mode 100644 index a50b7c0778e..00000000000 --- a/tsdb/index/tsi1/log_file.go +++ /dev/null @@ -1,1527 +0,0 @@ -//lint:file-ignore SA5011 we use assertions, which don't guard -package tsi1 - -import ( - "bufio" - "bytes" - "encoding/binary" - "errors" - "fmt" - "hash/crc32" - "io" - "os" - "sort" - "sync" - "time" - "unsafe" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/bloom" - "github.com/influxdata/influxdb/v2/pkg/estimator" - "github.com/influxdata/influxdb/v2/pkg/estimator/hll" - "github.com/influxdata/influxdb/v2/pkg/mmap" - "github.com/influxdata/influxdb/v2/tsdb" -) - -// Log errors. -var ( - ErrLogEntryChecksumMismatch = errors.New("log entry checksum mismatch") -) - -// Log entry flag constants. -const ( - LogEntrySeriesTombstoneFlag = 0x01 - LogEntryMeasurementTombstoneFlag = 0x02 - LogEntryTagKeyTombstoneFlag = 0x04 - LogEntryTagValueTombstoneFlag = 0x08 -) - -// defaultLogFileBufferSize describes the size of the buffer that the LogFile's buffered -// writer uses. If the LogFile does not have an explicit buffer size set then -// this is the size of the buffer; it is equal to the default buffer size used -// by a bufio.Writer. -const defaultLogFileBufferSize = 4096 - -// indexFileBufferSize is the buffer size used when compacting the LogFile down -// into a .tsi file. -const indexFileBufferSize = 1 << 17 // 128K - -// LogFile represents an on-disk write-ahead log file. -type LogFile struct { - mu sync.RWMutex - wg sync.WaitGroup // ref count - id int // file sequence identifier - data []byte // mmap - file *os.File // writer - w *bufio.Writer // buffered writer - bufferSize int // The size of the buffer used by the buffered writer - nosync bool // Disables buffer flushing and file syncing. Useful for offline tooling. - buf []byte // marshaling buffer - keyBuf []byte - - sfile *tsdb.SeriesFile // series lookup - size int64 // tracks current file size - modTime time.Time // tracks last time write occurred - - // In-memory series existence/tombstone sets. - seriesIDSet, tombstoneSeriesIDSet *tsdb.SeriesIDSet - - // In-memory index. - mms logMeasurements - - // Filepath to the log file. - path string -} - -// NewLogFile returns a new instance of LogFile. -func NewLogFile(sfile *tsdb.SeriesFile, path string) *LogFile { - return &LogFile{ - sfile: sfile, - path: path, - mms: make(logMeasurements), - - seriesIDSet: tsdb.NewSeriesIDSet(), - tombstoneSeriesIDSet: tsdb.NewSeriesIDSet(), - } -} - -// bytes estimates the memory footprint of this LogFile, in bytes. -func (f *LogFile) bytes() int { - f.mu.RLock() - defer f.mu.RUnlock() - var b int - b += 24 // mu RWMutex is 24 bytes - b += 16 // wg WaitGroup is 16 bytes - b += int(unsafe.Sizeof(f.id)) - // Do not include f.data because it is mmap'd - // TODO(jacobmarble): Uncomment when we are using go >= 1.10.0 - //b += int(unsafe.Sizeof(f.w)) + f.w.Size() - b += int(unsafe.Sizeof(f.buf)) + len(f.buf) - b += int(unsafe.Sizeof(f.keyBuf)) + len(f.keyBuf) - // Do not count SeriesFile because it belongs to the code that constructed this Index. - b += int(unsafe.Sizeof(f.size)) - b += int(unsafe.Sizeof(f.modTime)) - b += int(unsafe.Sizeof(f.seriesIDSet)) + f.seriesIDSet.Bytes() - b += int(unsafe.Sizeof(f.tombstoneSeriesIDSet)) + f.tombstoneSeriesIDSet.Bytes() - b += int(unsafe.Sizeof(f.mms)) + f.mms.bytes() - b += int(unsafe.Sizeof(f.path)) + len(f.path) - return b -} - -// Open reads the log from a file and validates all the checksums. -func (f *LogFile) Open() error { - f.mu.Lock() - defer f.mu.Unlock() - if err := f.open(); err != nil { - f.Close() - return err - } - return nil -} - -func (f *LogFile) open() error { - f.id, _ = ParseFilename(f.path) - - // Open file for appending. - file, err := os.OpenFile(f.Path(), os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - return err - } - f.file = file - - if f.bufferSize == 0 { - f.bufferSize = defaultLogFileBufferSize - } - f.w = bufio.NewWriterSize(f.file, f.bufferSize) - - // Finish opening if file is empty. - fi, err := file.Stat() - if err != nil { - return err - } else if fi.Size() == 0 { - return nil - } - f.size = fi.Size() - f.modTime = fi.ModTime() - - // Open a read-only memory map of the existing data. - data, err := mmap.Map(f.Path(), 0) - if err != nil { - return err - } - f.data = data - - // Read log entries from mmap. - var n int64 - for buf := f.data; len(buf) > 0; { - // Read next entry. Truncate partial writes. - var e LogEntry - if err := e.UnmarshalBinary(buf); errors.Is(err, io.ErrShortBuffer) || errors.Is(err, ErrLogEntryChecksumMismatch) { - break - } else if err != nil { - return fmt.Errorf("%q: %w", f.path, err) - } - - // Execute entry against in-memory index. - f.execEntry(&e) - - // Move buffer forward. - n += int64(e.Size) - buf = buf[e.Size:] - } - - // Move to the end of the file. - f.size = n - _, err = file.Seek(n, io.SeekStart) - return err -} - -// Close shuts down the file handle and mmap. -func (f *LogFile) Close() error { - // Wait until the file has no more references. - f.wg.Wait() - - if f.w != nil { - f.w.Flush() - f.w = nil - } - - if f.file != nil { - f.file.Close() - f.file = nil - } - - if f.data != nil { - mmap.Unmap(f.data) - } - - f.mms = make(logMeasurements) - return nil -} - -// FlushAndSync flushes buffered data to disk and then fsyncs the underlying file. -// If the LogFile has disabled flushing and syncing then FlushAndSync is a no-op. -func (f *LogFile) FlushAndSync() error { - if f.nosync { - return nil - } - - if f.w != nil { - if err := f.w.Flush(); err != nil { - return err - } - } - - if f.file == nil { - return nil - } - return f.file.Sync() -} - -// ID returns the file sequence identifier. -func (f *LogFile) ID() int { return f.id } - -// Path returns the file path. -func (f *LogFile) Path() string { return f.path } - -// SetPath sets the log file's path. -func (f *LogFile) SetPath(path string) { f.path = path } - -// Level returns the log level of the file. -func (f *LogFile) Level() int { return 0 } - -// Filter returns the bloom filter for the file. -func (f *LogFile) Filter() *bloom.Filter { return nil } - -// Retain adds a reference count to the file. -func (f *LogFile) Retain() { f.wg.Add(1) } - -// Release removes a reference count from the file. -func (f *LogFile) Release() { f.wg.Done() } - -// Stat returns size and last modification time of the file. -func (f *LogFile) Stat() (int64, time.Time) { - f.mu.RLock() - size, modTime := f.size, f.modTime - f.mu.RUnlock() - return size, modTime -} - -// SeriesIDSet returns the series existence set. -func (f *LogFile) SeriesIDSet() (*tsdb.SeriesIDSet, error) { - return f.seriesIDSet, nil -} - -// TombstoneSeriesIDSet returns the series tombstone set. -func (f *LogFile) TombstoneSeriesIDSet() (*tsdb.SeriesIDSet, error) { - return f.tombstoneSeriesIDSet, nil -} - -// Size returns the size of the file, in bytes. -func (f *LogFile) Size() int64 { - f.mu.RLock() - v := f.size - f.mu.RUnlock() - return v -} - -// ModTime returns the last modified time of the file -func (f *LogFile) ModTime() time.Time { - f.mu.RLock() - defer f.mu.RUnlock() - return f.modTime -} - -// Measurement returns a measurement element. -func (f *LogFile) Measurement(name []byte) MeasurementElem { - f.mu.RLock() - defer f.mu.RUnlock() - - mm, ok := f.mms[string(name)] - if !ok { - return nil - } - - return mm -} - -func (f *LogFile) MeasurementHasSeries(ss *tsdb.SeriesIDSet, name []byte) bool { - f.mu.RLock() - defer f.mu.RUnlock() - - mm, ok := f.mms[string(name)] - if !ok { - return false - } - - return mm.hasSeries(ss) -} - -// MeasurementNames returns an ordered list of measurement names. -func (f *LogFile) MeasurementNames() []string { - f.mu.RLock() - defer f.mu.RUnlock() - return f.measurementNames() -} - -func (f *LogFile) measurementNames() []string { - a := make([]string, 0, len(f.mms)) - for name := range f.mms { - a = append(a, name) - } - sort.Strings(a) - return a -} - -// DeleteMeasurement adds a tombstone for a measurement to the log file. -func (f *LogFile) DeleteMeasurement(name []byte) error { - f.mu.Lock() - defer f.mu.Unlock() - - e := LogEntry{Flag: LogEntryMeasurementTombstoneFlag, Name: name} - if err := f.appendEntry(&e); err != nil { - return err - } - f.execEntry(&e) - - // Flush buffer and sync to disk. - return f.FlushAndSync() -} - -// TagKeySeriesIDIterator returns a series iterator for a tag key. -func (f *LogFile) TagKeySeriesIDIterator(name, key []byte) (tsdb.SeriesIDIterator, error) { - f.mu.RLock() - defer f.mu.RUnlock() - - mm, ok := f.mms[string(name)] - if !ok { - return nil, nil - } - - tk, ok := mm.tagSet[string(key)] - if !ok { - return nil, nil - } - - // Combine iterators across all tag keys. - itrs := make([]tsdb.SeriesIDIterator, 0, len(tk.tagValues)) - for _, tv := range tk.tagValues { - if tv.cardinality() == 0 { - continue - } - if itr := tsdb.NewSeriesIDSetIterator(tv.seriesIDSet()); itr != nil { - itrs = append(itrs, itr) - } - } - - return tsdb.MergeSeriesIDIterators(itrs...), nil -} - -// TagKeyIterator returns a value iterator for a measurement. -func (f *LogFile) TagKeyIterator(name []byte) TagKeyIterator { - f.mu.RLock() - defer f.mu.RUnlock() - - mm, ok := f.mms[string(name)] - if !ok { - return nil - } - - a := make([]logTagKey, 0, len(mm.tagSet)) - for _, k := range mm.tagSet { - a = append(a, k) - } - return newLogTagKeyIterator(f, a) -} - -// TagKey returns a tag key element. -func (f *LogFile) TagKey(name, key []byte) TagKeyElem { - f.mu.RLock() - defer f.mu.RUnlock() - - mm, ok := f.mms[string(name)] - if !ok { - return nil - } - - tk, ok := mm.tagSet[string(key)] - if !ok { - return nil - } - - return &tk -} - -// TagValue returns a tag value element. -func (f *LogFile) TagValue(name, key, value []byte) TagValueElem { - f.mu.RLock() - defer f.mu.RUnlock() - - mm, ok := f.mms[string(name)] - if !ok { - return nil - } - - tk, ok := mm.tagSet[string(key)] - if !ok { - return nil - } - - tv, ok := tk.tagValues[string(value)] - if !ok { - return nil - } - - return &tv -} - -// TagValueIterator returns a value iterator for a tag key. -func (f *LogFile) TagValueIterator(name, key []byte) TagValueIterator { - f.mu.RLock() - defer f.mu.RUnlock() - - mm, ok := f.mms[string(name)] - if !ok { - return nil - } - - tk, ok := mm.tagSet[string(key)] - if !ok { - return nil - } - return tk.TagValueIterator() -} - -// DeleteTagKey adds a tombstone for a tag key to the log file. -func (f *LogFile) DeleteTagKey(name, key []byte) error { - f.mu.Lock() - defer f.mu.Unlock() - - e := LogEntry{Flag: LogEntryTagKeyTombstoneFlag, Name: name, Key: key} - if err := f.appendEntry(&e); err != nil { - return err - } - f.execEntry(&e) - - // Flush buffer and sync to disk. - return f.FlushAndSync() -} - -// TagValueSeriesIDSet returns a series iterator for a tag value. -func (f *LogFile) TagValueSeriesIDSet(name, key, value []byte) (*tsdb.SeriesIDSet, error) { - f.mu.RLock() - defer f.mu.RUnlock() - - mm, ok := f.mms[string(name)] - if !ok { - return nil, nil - } - - tk, ok := mm.tagSet[string(key)] - if !ok { - return nil, nil - } - - tv, ok := tk.tagValues[string(value)] - if !ok { - return nil, nil - } else if tv.cardinality() == 0 { - return nil, nil - } - - return tv.seriesIDSet(), nil -} - -// MeasurementN returns the total number of measurements. -func (f *LogFile) MeasurementN() (n uint64) { - f.mu.RLock() - defer f.mu.RUnlock() - return uint64(len(f.mms)) -} - -// TagKeyN returns the total number of keys. -func (f *LogFile) TagKeyN() (n uint64) { - f.mu.RLock() - defer f.mu.RUnlock() - for _, mm := range f.mms { - n += uint64(len(mm.tagSet)) - } - return n -} - -// TagValueN returns the total number of values. -func (f *LogFile) TagValueN() (n uint64) { - f.mu.RLock() - defer f.mu.RUnlock() - for _, mm := range f.mms { - for _, k := range mm.tagSet { - n += uint64(len(k.tagValues)) - } - } - return n -} - -// DeleteTagValue adds a tombstone for a tag value to the log file. -func (f *LogFile) DeleteTagValue(name, key, value []byte) error { - f.mu.Lock() - defer f.mu.Unlock() - - e := LogEntry{Flag: LogEntryTagValueTombstoneFlag, Name: name, Key: key, Value: value} - if err := f.appendEntry(&e); err != nil { - return err - } - f.execEntry(&e) - - // Flush buffer and sync to disk. - return f.FlushAndSync() -} - -// AddSeriesList adds a list of series to the log file in bulk. -func (f *LogFile) AddSeriesList(seriesSet *tsdb.SeriesIDSet, names [][]byte, tagsSlice []models.Tags) ([]uint64, error) { - seriesIDs, err := f.sfile.CreateSeriesListIfNotExists(names, tagsSlice) - if err != nil { - return nil, err - } - - var writeRequired bool - entries := make([]LogEntry, 0, len(names)) - seriesSet.RLock() - for i := range names { - if seriesSet.ContainsNoLock(seriesIDs[i]) { - // We don't need to allocate anything for this series. - seriesIDs[i] = 0 - continue - } - writeRequired = true - entries = append(entries, LogEntry{SeriesID: seriesIDs[i], name: names[i], tags: tagsSlice[i], cached: true, batchidx: i}) - } - seriesSet.RUnlock() - - // Exit if all series already exist. - if !writeRequired { - return seriesIDs, nil - } - - f.mu.Lock() - defer f.mu.Unlock() - - seriesSet.Lock() - defer seriesSet.Unlock() - - for i := range entries { // NB - this doesn't evaluate all series ids returned from series file. - entry := &entries[i] - if seriesSet.ContainsNoLock(entry.SeriesID) { - // We don't need to allocate anything for this series. - seriesIDs[entry.batchidx] = 0 - continue - } - if err := f.appendEntry(entry); err != nil { - return nil, err - } - f.execEntry(entry) - seriesSet.AddNoLock(entry.SeriesID) - } - - // Flush buffer and sync to disk. - if err := f.FlushAndSync(); err != nil { - return nil, err - } - return seriesIDs, nil -} - -// DeleteSeriesID adds a tombstone for a series id. -func (f *LogFile) DeleteSeriesID(id uint64) error { - f.mu.Lock() - defer f.mu.Unlock() - - e := LogEntry{Flag: LogEntrySeriesTombstoneFlag, SeriesID: id} - if err := f.appendEntry(&e); err != nil { - return err - } - f.execEntry(&e) - - // Flush buffer and sync to disk. - return f.FlushAndSync() -} - -// SeriesN returns the total number of series in the file. -func (f *LogFile) SeriesN() (n uint64) { - f.mu.RLock() - defer f.mu.RUnlock() - - for _, mm := range f.mms { - n += uint64(mm.cardinality()) - } - return n -} - -// appendEntry adds a log entry to the end of the file. -func (f *LogFile) appendEntry(e *LogEntry) error { - // Marshal entry to the local buffer. - f.buf = appendLogEntry(f.buf[:0], e) - - // Save the size of the record. - e.Size = len(f.buf) - - // Write record to file. - n, err := f.w.Write(f.buf) - if err != nil { - // Move position backwards over partial entry. - // Log should be reopened if seeking cannot be completed. - if n > 0 { - f.w.Reset(f.file) - if _, err := f.file.Seek(int64(-n), io.SeekCurrent); err != nil { - f.Close() - } - } - return err - } - - // Update in-memory file size & modification time. - f.size += int64(n) - f.modTime = time.Now() - - return nil -} - -// execEntry executes a log entry against the in-memory index. -// This is done after appending and on replay of the log. -func (f *LogFile) execEntry(e *LogEntry) { - switch e.Flag { - case LogEntryMeasurementTombstoneFlag: - f.execDeleteMeasurementEntry(e) - case LogEntryTagKeyTombstoneFlag: - f.execDeleteTagKeyEntry(e) - case LogEntryTagValueTombstoneFlag: - f.execDeleteTagValueEntry(e) - default: - f.execSeriesEntry(e) - } -} - -func (f *LogFile) execDeleteMeasurementEntry(e *LogEntry) { - mm := f.createMeasurementIfNotExists(e.Name) - mm.deleted = true - mm.tagSet = make(map[string]logTagKey) - mm.series = make(map[uint64]struct{}) - mm.seriesSet = nil -} - -func (f *LogFile) execDeleteTagKeyEntry(e *LogEntry) { - mm := f.createMeasurementIfNotExists(e.Name) - ts := mm.createTagSetIfNotExists(e.Key) - - ts.deleted = true - - mm.tagSet[string(e.Key)] = ts -} - -func (f *LogFile) execDeleteTagValueEntry(e *LogEntry) { - mm := f.createMeasurementIfNotExists(e.Name) - ts := mm.createTagSetIfNotExists(e.Key) - tv := ts.createTagValueIfNotExists(e.Value) - - tv.deleted = true - - ts.tagValues[string(e.Value)] = tv - mm.tagSet[string(e.Key)] = ts -} - -func (f *LogFile) execSeriesEntry(e *LogEntry) { - var seriesKey []byte - if e.cached { - sz := tsdb.SeriesKeySize(e.name, e.tags) - if len(f.keyBuf) < sz { - f.keyBuf = make([]byte, 0, sz) - } - seriesKey = tsdb.AppendSeriesKey(f.keyBuf[:0], e.name, e.tags) - } else { - seriesKey = f.sfile.SeriesKey(e.SeriesID) - } - - // Series keys can be removed if the series has been deleted from - // the entire database and the server is restarted. This would cause - // the log to replay its insert but the key cannot be found. - // - // https://github.com/influxdata/influxdb/issues/9444 - if seriesKey == nil { - return - } - - // Check if deleted. - deleted := e.Flag == LogEntrySeriesTombstoneFlag - - // Read key size. - _, remainder := tsdb.ReadSeriesKeyLen(seriesKey) - - // Read measurement name. - name, remainder := tsdb.ReadSeriesKeyMeasurement(remainder) - mm := f.createMeasurementIfNotExists(name) - mm.deleted = false - if !deleted { - mm.addSeriesID(e.SeriesID) - } else { - mm.removeSeriesID(e.SeriesID) - } - - // Read tag count. - tagN, remainder := tsdb.ReadSeriesKeyTagN(remainder) - - // Save tags. - var k, v []byte - for i := 0; i < tagN; i++ { - k, v, remainder = tsdb.ReadSeriesKeyTag(remainder) - ts := mm.createTagSetIfNotExists(k) - tv := ts.createTagValueIfNotExists(v) - - // Add/remove a reference to the series on the tag value. - if !deleted { - tv.addSeriesID(e.SeriesID) - } else { - tv.removeSeriesID(e.SeriesID) - } - - ts.tagValues[string(v)] = tv - - mm.tagSet[string(k)] = ts - } - - // Add/remove from appropriate series id sets. - if !deleted { - f.seriesIDSet.Add(e.SeriesID) - f.tombstoneSeriesIDSet.Remove(e.SeriesID) - } else { - f.seriesIDSet.Remove(e.SeriesID) - f.tombstoneSeriesIDSet.Add(e.SeriesID) - } -} - -// SeriesIDIterator returns an iterator over all series in the log file. -func (f *LogFile) SeriesIDIterator() tsdb.SeriesIDIterator { - f.mu.RLock() - defer f.mu.RUnlock() - - ss := tsdb.NewSeriesIDSet() - allSeriesSets := make([]*tsdb.SeriesIDSet, 0, len(f.mms)) - - for _, mm := range f.mms { - if mm.seriesSet != nil { - allSeriesSets = append(allSeriesSets, mm.seriesSet) - continue - } - - // measurement is not using seriesSet to store series IDs. - mm.forEach(func(seriesID uint64) { - ss.AddNoLock(seriesID) - }) - } - - // Fast merge all seriesSets. - if len(allSeriesSets) > 0 { - ss.Merge(allSeriesSets...) - } - - return tsdb.NewSeriesIDSetIterator(ss) -} - -// createMeasurementIfNotExists returns a measurement by name. -func (f *LogFile) createMeasurementIfNotExists(name []byte) *logMeasurement { - mm := f.mms[string(name)] - if mm == nil { - mm = &logMeasurement{ - f: f, - name: name, - tagSet: make(map[string]logTagKey), - series: make(map[uint64]struct{}), - } - f.mms[string(name)] = mm - } - return mm -} - -// MeasurementIterator returns an iterator over all the measurements in the file. -func (f *LogFile) MeasurementIterator() MeasurementIterator { - f.mu.RLock() - defer f.mu.RUnlock() - - var itr logMeasurementIterator - for _, mm := range f.mms { - itr.mms = append(itr.mms, *mm) - } - sort.Sort(logMeasurementSlice(itr.mms)) - return &itr -} - -// MeasurementSeriesIDIterator returns an iterator over all series for a measurement. -func (f *LogFile) MeasurementSeriesIDIterator(name []byte) tsdb.SeriesIDIterator { - f.mu.RLock() - defer f.mu.RUnlock() - - mm := f.mms[string(name)] - if mm == nil || mm.cardinality() == 0 { - return nil - } - return tsdb.NewSeriesIDSetIterator(mm.seriesIDSet()) -} - -// CompactTo compacts the log file and writes it to w. -func (f *LogFile) CompactTo(w io.Writer, m, k uint64, cancel <-chan struct{}) (n int64, err error) { - f.mu.RLock() - defer f.mu.RUnlock() - - // Check for cancellation. - select { - case <-cancel: - return n, ErrCompactionInterrupted - default: - } - - // Wrap in bufferred writer with a buffer equivalent to the LogFile size. - bw := bufio.NewWriterSize(w, indexFileBufferSize) // 128K - - // Setup compaction offset tracking data. - var t IndexFileTrailer - info := newLogFileCompactInfo() - info.cancel = cancel - - // Write magic number. - if err := writeTo(bw, []byte(FileSignature), &n); err != nil { - return n, err - } - - // Retreve measurement names in order. - names := f.measurementNames() - - // Flush buffer & mmap series block. - if err := bw.Flush(); err != nil { - return n, err - } - - // Write tagset blocks in measurement order. - if err := f.writeTagsetsTo(bw, names, info, &n); err != nil { - return n, err - } - - // Write measurement block. - t.MeasurementBlock.Offset = n - if err := f.writeMeasurementBlockTo(bw, names, info, &n); err != nil { - return n, err - } - t.MeasurementBlock.Size = n - t.MeasurementBlock.Offset - - // Write series set. - t.SeriesIDSet.Offset = n - nn, err := f.seriesIDSet.WriteTo(bw) - if n += nn; err != nil { - return n, err - } - t.SeriesIDSet.Size = n - t.SeriesIDSet.Offset - - // Write tombstone series set. - t.TombstoneSeriesIDSet.Offset = n - nn, err = f.tombstoneSeriesIDSet.WriteTo(bw) - if n += nn; err != nil { - return n, err - } - t.TombstoneSeriesIDSet.Size = n - t.TombstoneSeriesIDSet.Offset - - // Build series sketches. - sSketch, sTSketch, err := f.seriesSketches() - if err != nil { - return n, err - } - - // Write series sketches. - t.SeriesSketch.Offset = n - data, err := sSketch.MarshalBinary() - if err != nil { - return n, err - } else if _, err := bw.Write(data); err != nil { - return n, err - } - t.SeriesSketch.Size = int64(len(data)) - n += t.SeriesSketch.Size - - t.TombstoneSeriesSketch.Offset = n - if data, err = sTSketch.MarshalBinary(); err != nil { - return n, err - } else if _, err := bw.Write(data); err != nil { - return n, err - } - t.TombstoneSeriesSketch.Size = int64(len(data)) - n += t.TombstoneSeriesSketch.Size - - // Write trailer. - nn, err = t.WriteTo(bw) - n += nn - if err != nil { - return n, err - } - - // Flush buffer. - if err := bw.Flush(); err != nil { - return n, err - } - - return n, nil -} - -func (f *LogFile) writeTagsetsTo(w io.Writer, names []string, info *logFileCompactInfo, n *int64) error { - for _, name := range names { - if err := f.writeTagsetTo(w, name, info, n); err != nil { - return err - } - } - return nil -} - -// writeTagsetTo writes a single tagset to w and saves the tagset offset. -func (f *LogFile) writeTagsetTo(w io.Writer, name string, info *logFileCompactInfo, n *int64) error { - mm := f.mms[name] - - // Check for cancellation. - select { - case <-info.cancel: - return ErrCompactionInterrupted - default: - } - - enc := NewTagBlockEncoder(w) - var valueN int - for _, k := range mm.keys() { - tag := mm.tagSet[k] - - // Encode tag. Skip values if tag is deleted. - if err := enc.EncodeKey(tag.name, tag.deleted); err != nil { - return err - } else if tag.deleted { - continue - } - - // Sort tag values. - values := make([]string, 0, len(tag.tagValues)) - for v := range tag.tagValues { - values = append(values, v) - } - sort.Strings(values) - - // Add each value. - for _, v := range values { - value := tag.tagValues[v] - if err := enc.EncodeValue(value.name, value.deleted, value.seriesIDSet()); err != nil { - return err - } - - // Check for cancellation periodically. - if valueN++; valueN%1000 == 0 { - select { - case <-info.cancel: - return ErrCompactionInterrupted - default: - } - } - } - } - - // Save tagset offset to measurement. - offset := *n - - // Flush tag block. - err := enc.Close() - *n += enc.N() - if err != nil { - return err - } - - // Save tagset offset to measurement. - size := *n - offset - - info.mms[name] = &logFileMeasurementCompactInfo{offset: offset, size: size} - - return nil -} - -func (f *LogFile) writeMeasurementBlockTo(w io.Writer, names []string, info *logFileCompactInfo, n *int64) error { - mw := NewMeasurementBlockWriter() - - // Check for cancellation. - select { - case <-info.cancel: - return ErrCompactionInterrupted - default: - } - - // Add measurement data. - for _, name := range names { - mm := f.mms[name] - mmInfo := info.mms[name] - assert(mmInfo != nil, "measurement info not found") - mw.Add(mm.name, mm.deleted, mmInfo.offset, mmInfo.size, mm.seriesIDs()) - } - - // Flush data to writer. - nn, err := mw.WriteTo(w) - *n += nn - return err -} - -// logFileCompactInfo is a context object to track compaction position info. -type logFileCompactInfo struct { - cancel <-chan struct{} - mms map[string]*logFileMeasurementCompactInfo -} - -// newLogFileCompactInfo returns a new instance of logFileCompactInfo. -func newLogFileCompactInfo() *logFileCompactInfo { - return &logFileCompactInfo{ - mms: make(map[string]*logFileMeasurementCompactInfo), - } -} - -type logFileMeasurementCompactInfo struct { - offset int64 - size int64 -} - -// MeasurementsSketches returns sketches for existing and tombstoned measurement names. -func (f *LogFile) MeasurementsSketches() (sketch, tSketch estimator.Sketch, err error) { - f.mu.RLock() - defer f.mu.RUnlock() - return f.measurementsSketches() -} - -func (f *LogFile) measurementsSketches() (sketch, tSketch estimator.Sketch, err error) { - sketch, tSketch = hll.NewDefaultPlus(), hll.NewDefaultPlus() - for _, mm := range f.mms { - if mm.deleted { - tSketch.Add(mm.name) - } else { - sketch.Add(mm.name) - } - } - return sketch, tSketch, nil -} - -// SeriesSketches returns sketches for existing and tombstoned series. -func (f *LogFile) SeriesSketches() (sketch, tSketch estimator.Sketch, err error) { - f.mu.RLock() - defer f.mu.RUnlock() - return f.seriesSketches() -} - -func (f *LogFile) seriesSketches() (sketch, tSketch estimator.Sketch, err error) { - sketch = hll.NewDefaultPlus() - f.seriesIDSet.ForEach(func(id uint64) { - name, keys := f.sfile.Series(id) - sketch.Add(models.MakeKey(name, keys)) - }) - - tSketch = hll.NewDefaultPlus() - f.tombstoneSeriesIDSet.ForEach(func(id uint64) { - name, keys := f.sfile.Series(id) - tSketch.Add(models.MakeKey(name, keys)) - }) - return sketch, tSketch, nil -} - -// LogEntry represents a single log entry in the write-ahead log. -type LogEntry struct { - Flag byte // flag - SeriesID uint64 // series id - Name []byte // measurement name - Key []byte // tag key - Value []byte // tag value - Checksum uint32 // checksum of flag/name/tags. - Size int // total size of record, in bytes. - - cached bool // Hint to LogFile that series data is already parsed - name []byte // series naem, this is a cached copy of the parsed measurement name - tags models.Tags // series tags, this is a cached copied of the parsed tags - batchidx int // position of entry in batch. -} - -// UnmarshalBinary unmarshals data into e. -func (e *LogEntry) UnmarshalBinary(data []byte) error { - var sz uint64 - var n int - var seriesID uint64 - var err error - - orig := data - start := len(data) - - // Parse flag data. - if len(data) < 1 { - return io.ErrShortBuffer - } - e.Flag, data = data[0], data[1:] - - // Parse series id. - if seriesID, n, err = uvarint(data); err != nil { - return err - } - e.SeriesID, data = seriesID, data[n:] - - // Parse name length. - if sz, n, err = uvarint(data); err != nil { - return err - } - - // Read name data. - if len(data) < n+int(sz) { - return io.ErrShortBuffer - } - e.Name, data = data[n:n+int(sz)], data[n+int(sz):] - - // Parse key length. - if sz, n, err = uvarint(data); err != nil { - return err - } - - // Read key data. - if len(data) < n+int(sz) { - return io.ErrShortBuffer - } - e.Key, data = data[n:n+int(sz)], data[n+int(sz):] - - // Parse value length. - if sz, n, err = uvarint(data); err != nil { - return err - } - - // Read value data. - if len(data) < n+int(sz) { - return io.ErrShortBuffer - } - e.Value, data = data[n:n+int(sz)], data[n+int(sz):] - - // Compute checksum. - chk := crc32.ChecksumIEEE(orig[:start-len(data)]) - - // Parse checksum. - if len(data) < 4 { - return io.ErrShortBuffer - } - e.Checksum, data = binary.BigEndian.Uint32(data[:4]), data[4:] - - // Verify checksum. - if chk != e.Checksum { - return ErrLogEntryChecksumMismatch - } - - // Save length of elem. - e.Size = start - len(data) - - return nil -} - -// appendLogEntry appends to dst and returns the new buffer. -// This updates the checksum on the entry. -func appendLogEntry(dst []byte, e *LogEntry) []byte { - var buf [binary.MaxVarintLen64]byte - start := len(dst) - - // Append flag. - dst = append(dst, e.Flag) - - // Append series id. - n := binary.PutUvarint(buf[:], uint64(e.SeriesID)) - dst = append(dst, buf[:n]...) - - // Append name. - n = binary.PutUvarint(buf[:], uint64(len(e.Name))) - dst = append(dst, buf[:n]...) - dst = append(dst, e.Name...) - - // Append key. - n = binary.PutUvarint(buf[:], uint64(len(e.Key))) - dst = append(dst, buf[:n]...) - dst = append(dst, e.Key...) - - // Append value. - n = binary.PutUvarint(buf[:], uint64(len(e.Value))) - dst = append(dst, buf[:n]...) - dst = append(dst, e.Value...) - - // Calculate checksum. - e.Checksum = crc32.ChecksumIEEE(dst[start:]) - - // Append checksum. - binary.BigEndian.PutUint32(buf[:4], e.Checksum) - dst = append(dst, buf[:4]...) - - return dst -} - -// logMeasurements represents a map of measurement names to measurements. -type logMeasurements map[string]*logMeasurement - -// bytes estimates the memory footprint of this logMeasurements, in bytes. -func (mms *logMeasurements) bytes() int { - var b int - for k, v := range *mms { - b += len(k) - b += v.bytes() - } - b += int(unsafe.Sizeof(*mms)) - return b -} - -type logMeasurement struct { - f *LogFile - name []byte - tagSet map[string]logTagKey - deleted bool - series map[uint64]struct{} - seriesSet *tsdb.SeriesIDSet -} - -// bytes estimates the memory footprint of this logMeasurement, in bytes. -func (m *logMeasurement) bytes() int { - var b int - b += len(m.name) - for k, v := range m.tagSet { - b += len(k) - b += v.bytes() - } - b += (int(m.cardinality()) * 8) - b += int(unsafe.Sizeof(*m)) - return b -} - -func (m *logMeasurement) addSeriesID(x uint64) { - if m.seriesSet != nil { - m.seriesSet.AddNoLock(x) - return - } - - m.series[x] = struct{}{} - - // If the map is getting too big it can be converted into a roaring seriesSet. - if len(m.series) > 25 { - m.seriesSet = tsdb.NewSeriesIDSet() - for id := range m.series { - m.seriesSet.AddNoLock(id) - } - m.series = nil - } -} - -func (m *logMeasurement) removeSeriesID(x uint64) { - if m.seriesSet != nil { - m.seriesSet.RemoveNoLock(x) - return - } - delete(m.series, x) -} - -func (m *logMeasurement) cardinality() int64 { - if m.seriesSet != nil { - return int64(m.seriesSet.Cardinality()) - } - return int64(len(m.series)) -} - -// forEach applies fn to every series ID in the logMeasurement. -func (m *logMeasurement) forEach(fn func(uint64)) { - if m.seriesSet != nil { - m.seriesSet.ForEachNoLock(fn) - return - } - - for seriesID := range m.series { - fn(seriesID) - } -} - -// seriesIDs returns a sorted set of seriesIDs. -func (m *logMeasurement) seriesIDs() []uint64 { - a := make([]uint64, 0, m.cardinality()) - if m.seriesSet != nil { - m.seriesSet.ForEachNoLock(func(id uint64) { a = append(a, id) }) - return a // IDs are already sorted. - } - - for seriesID := range m.series { - a = append(a, seriesID) - } - sort.Sort(uint64Slice(a)) - return a -} - -// seriesIDSet returns a copy of the logMeasurement's seriesSet, or creates a new -// one -func (m *logMeasurement) seriesIDSet() *tsdb.SeriesIDSet { - if m.seriesSet != nil { - return m.seriesSet.CloneNoLock() - } - - ss := tsdb.NewSeriesIDSet() - for seriesID := range m.series { - ss.AddNoLock(seriesID) - } - return ss -} - -func (m *logMeasurement) hasSeries(ss *tsdb.SeriesIDSet) bool { - if m.seriesSet != nil { - return m.seriesSet.Intersects(ss) - } - - for seriesID := range m.series { - if ss.Contains(seriesID) { - return true - } - } - - return false -} - -func (m *logMeasurement) Name() []byte { return m.name } -func (m *logMeasurement) Deleted() bool { return m.deleted } - -func (m *logMeasurement) createTagSetIfNotExists(key []byte) logTagKey { - ts, ok := m.tagSet[string(key)] - if !ok { - ts = logTagKey{f: m.f, name: key, tagValues: make(map[string]logTagValue)} - } - return ts -} - -// keys returns a sorted list of tag keys. -func (m *logMeasurement) keys() []string { - a := make([]string, 0, len(m.tagSet)) - for k := range m.tagSet { - a = append(a, k) - } - sort.Strings(a) - return a -} - -// logMeasurementSlice is a sortable list of log measurements. -type logMeasurementSlice []logMeasurement - -func (a logMeasurementSlice) Len() int { return len(a) } -func (a logMeasurementSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a logMeasurementSlice) Less(i, j int) bool { return bytes.Compare(a[i].name, a[j].name) == -1 } - -// logMeasurementIterator represents an iterator over a slice of measurements. -type logMeasurementIterator struct { - mms []logMeasurement -} - -// Next returns the next element in the iterator. -func (itr *logMeasurementIterator) Next() (e MeasurementElem) { - if len(itr.mms) == 0 { - return nil - } - e, itr.mms = &itr.mms[0], itr.mms[1:] - return e -} - -type logTagKey struct { - f *LogFile - name []byte - deleted bool - tagValues map[string]logTagValue -} - -// bytes estimates the memory footprint of this logTagKey, in bytes. -func (tk *logTagKey) bytes() int { - var b int - b += len(tk.name) - for k, v := range tk.tagValues { - b += len(k) - b += v.bytes() - } - b += int(unsafe.Sizeof(*tk)) - return b -} - -func (tk *logTagKey) Key() []byte { return tk.name } -func (tk *logTagKey) Deleted() bool { return tk.deleted } - -func (tk *logTagKey) TagValueIterator() TagValueIterator { - tk.f.mu.RLock() - a := make([]logTagValue, 0, len(tk.tagValues)) - for _, v := range tk.tagValues { - a = append(a, v) - } - tk.f.mu.RUnlock() - - return newLogTagValueIterator(a) -} - -func (tk *logTagKey) createTagValueIfNotExists(value []byte) logTagValue { - tv, ok := tk.tagValues[string(value)] - if !ok { - tv = logTagValue{name: value, series: make(map[uint64]struct{})} - } - return tv -} - -// logTagKey is a sortable list of log tag keys. -type logTagKeySlice []logTagKey - -func (a logTagKeySlice) Len() int { return len(a) } -func (a logTagKeySlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a logTagKeySlice) Less(i, j int) bool { return bytes.Compare(a[i].name, a[j].name) == -1 } - -type logTagValue struct { - name []byte - deleted bool - series map[uint64]struct{} - seriesSet *tsdb.SeriesIDSet -} - -// bytes estimates the memory footprint of this logTagValue, in bytes. -func (tv *logTagValue) bytes() int { - var b int - b += len(tv.name) - b += int(unsafe.Sizeof(*tv)) - b += (int(tv.cardinality()) * 8) - return b -} - -func (tv *logTagValue) addSeriesID(x uint64) { - if tv.seriesSet != nil { - tv.seriesSet.AddNoLock(x) - return - } - - tv.series[x] = struct{}{} - - // If the map is getting too big it can be converted into a roaring seriesSet. - if len(tv.series) > 25 { - tv.seriesSet = tsdb.NewSeriesIDSet() - for id := range tv.series { - tv.seriesSet.AddNoLock(id) - } - tv.series = nil - } -} - -func (tv *logTagValue) removeSeriesID(x uint64) { - if tv.seriesSet != nil { - tv.seriesSet.RemoveNoLock(x) - return - } - delete(tv.series, x) -} - -func (tv *logTagValue) cardinality() int64 { - if tv.seriesSet != nil { - return int64(tv.seriesSet.Cardinality()) - } - return int64(len(tv.series)) -} - -// seriesIDSet returns a copy of the logMeasurement's seriesSet, or creates a new -// one -func (tv *logTagValue) seriesIDSet() *tsdb.SeriesIDSet { - if tv.seriesSet != nil { - return tv.seriesSet.CloneNoLock() - } - - ss := tsdb.NewSeriesIDSet() - for seriesID := range tv.series { - ss.AddNoLock(seriesID) - } - return ss -} - -func (tv *logTagValue) Value() []byte { return tv.name } -func (tv *logTagValue) Deleted() bool { return tv.deleted } - -// logTagValue is a sortable list of log tag values. -type logTagValueSlice []logTagValue - -func (a logTagValueSlice) Len() int { return len(a) } -func (a logTagValueSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a logTagValueSlice) Less(i, j int) bool { return bytes.Compare(a[i].name, a[j].name) == -1 } - -// logTagKeyIterator represents an iterator over a slice of tag keys. -type logTagKeyIterator struct { - f *LogFile - a []logTagKey -} - -// newLogTagKeyIterator returns a new instance of logTagKeyIterator. -func newLogTagKeyIterator(f *LogFile, a []logTagKey) *logTagKeyIterator { - sort.Sort(logTagKeySlice(a)) - return &logTagKeyIterator{f: f, a: a} -} - -// Next returns the next element in the iterator. -func (itr *logTagKeyIterator) Next() (e TagKeyElem) { - if len(itr.a) == 0 { - return nil - } - e, itr.a = &itr.a[0], itr.a[1:] - return e -} - -// logTagValueIterator represents an iterator over a slice of tag values. -type logTagValueIterator struct { - a []logTagValue -} - -// newLogTagValueIterator returns a new instance of logTagValueIterator. -func newLogTagValueIterator(a []logTagValue) *logTagValueIterator { - sort.Sort(logTagValueSlice(a)) - return &logTagValueIterator{a: a} -} - -// Next returns the next element in the iterator. -func (itr *logTagValueIterator) Next() (e TagValueElem) { - if len(itr.a) == 0 { - return nil - } - e, itr.a = &itr.a[0], itr.a[1:] - return e -} - -// FormatLogFileName generates a log filename for the given index. -func FormatLogFileName(id int) string { - return fmt.Sprintf("L0-%08d%s", id, LogFileExt) -} diff --git a/tsdb/index/tsi1/log_file_test.go b/tsdb/index/tsi1/log_file_test.go deleted file mode 100644 index 42a4cb974a0..00000000000 --- a/tsdb/index/tsi1/log_file_test.go +++ /dev/null @@ -1,633 +0,0 @@ -package tsi1_test - -import ( - "bytes" - "fmt" - "math/rand" - "os" - "path/filepath" - "reflect" - "regexp" - "runtime/pprof" - "sort" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/bloom" - "github.com/influxdata/influxdb/v2/pkg/slices" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" - "github.com/stretchr/testify/require" -) - -// Ensure log file can append series. -func TestLogFile_AddSeriesList(t *testing.T) { - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - f := MustOpenLogFile(sfile.SeriesFile) - defer f.Close() - seriesSet := tsdb.NewSeriesIDSet() - - // Add test data. - ids, err := f.AddSeriesList(seriesSet, - slices.StringsToBytes("cpu", "mem"), - []models.Tags{ - models.NewTags(map[string]string{"region": "us-east"}), - models.NewTags(map[string]string{"host": "serverA"}), - }, - ) - - if err != nil { - t.Fatal(err) - } - - // Returned series ids should match those in the seriesSet. - other := tsdb.NewSeriesIDSet(ids...) - if !other.Equals(seriesSet) { - t.Fatalf("got series ids %s, expected %s", other, seriesSet) - } - - // Add the same series again with a new one. - ids, err = f.AddSeriesList(seriesSet, - slices.StringsToBytes("cpu", "mem"), - []models.Tags{ - models.NewTags(map[string]string{"region": "us-west"}), - models.NewTags(map[string]string{"host": "serverA"}), - }, - ) - - if err != nil { - t.Fatal(err) - } - - if got, exp := len(ids), 2; got != exp { - t.Fatalf("got %d series ids, expected %d", got, exp) - } else if got := ids[0]; got == 0 { - t.Error("series id was 0, expected it not to be") - } else if got := ids[1]; got != 0 { - t.Errorf("got series id %d, expected 0", got) - } - - // Add only the same series IDs. - ids, err = f.AddSeriesList(seriesSet, - slices.StringsToBytes("cpu", "mem"), - []models.Tags{ - models.NewTags(map[string]string{"region": "us-west"}), - models.NewTags(map[string]string{"host": "serverA"}), - }, - ) - - if err != nil { - t.Fatal(err) - } - - if got, exp := ids, make([]uint64, 2); !reflect.DeepEqual(got, exp) { - t.Fatalf("got ids %v, expected %v", got, exp) - } - - // Verify data. - itr := f.MeasurementIterator() - if e := itr.Next(); e == nil || string(e.Name()) != "cpu" { - t.Fatalf("unexpected measurement: %#v", e) - } else if e := itr.Next(); e == nil || string(e.Name()) != "mem" { - t.Fatalf("unexpected measurement: %#v", e) - } else if e := itr.Next(); e != nil { - t.Fatalf("expected eof, got: %#v", e) - } - - // Reopen file and re-verify. - if err := f.Reopen(); err != nil { - t.Fatal(err) - } - - // Verify data. - itr = f.MeasurementIterator() - if e := itr.Next(); e == nil || string(e.Name()) != "cpu" { - t.Fatalf("unexpected measurement: %#v", e) - } else if e := itr.Next(); e == nil || string(e.Name()) != "mem" { - t.Fatalf("unexpected measurement: %#v", e) - } else if e := itr.Next(); e != nil { - t.Fatalf("expected eof, got: %#v", e) - } -} - -func TestLogFile_SeriesStoredInOrder(t *testing.T) { - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - f := MustOpenLogFile(sfile.SeriesFile) - defer f.Close() - seriesSet := tsdb.NewSeriesIDSet() - - // Generate and add test data - tvm := make(map[string]struct{}) - seededRand := rand.New(rand.NewSource(time.Now().Unix())) - for i := 0; i < 100; i++ { - tv := fmt.Sprintf("server-%d", seededRand.Intn(50)) // Encourage adding duplicate series. - tvm[tv] = struct{}{} - - if _, err := f.AddSeriesList(seriesSet, [][]byte{ - []byte("mem"), - []byte("cpu"), - }, []models.Tags{ - {models.NewTag([]byte("host"), []byte(tv))}, - {models.NewTag([]byte("host"), []byte(tv))}, - }); err != nil { - t.Fatal(err) - } - } - - // Sort the tag values so we know what order to expect. - tvs := make([]string, 0, len(tvm)) - for tv := range tvm { - tvs = append(tvs, tv) - } - sort.Strings(tvs) - - // Double the series values since we're adding them twice (two measurements) - tvs = append(tvs, tvs...) - - // When we pull the series out via an iterator they should be in order. - itr := f.SeriesIDIterator() - if itr == nil { - t.Fatal("nil iterator") - } - - var prevSeriesID uint64 - for i := 0; i < len(tvs); i++ { - elem, err := itr.Next() - if err != nil { - t.Fatal(err) - } else if elem.SeriesID == 0 { - t.Fatal("got nil series") - } else if elem.SeriesID < prevSeriesID { - t.Fatalf("series out of order: %d !< %d ", elem.SeriesID, prevSeriesID) - } - prevSeriesID = elem.SeriesID - } -} - -// Ensure log file can delete an existing measurement. -func TestLogFile_DeleteMeasurement(t *testing.T) { - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - f := MustOpenLogFile(sfile.SeriesFile) - defer f.Close() - seriesSet := tsdb.NewSeriesIDSet() - - // Add test data. - if _, err := f.AddSeriesList(seriesSet, [][]byte{ - []byte("mem"), - []byte("cpu"), - []byte("cpu"), - }, []models.Tags{ - {{Key: []byte("host"), Value: []byte("serverA")}}, - {{Key: []byte("region"), Value: []byte("us-east")}}, - {{Key: []byte("region"), Value: []byte("us-west")}}, - }); err != nil { - t.Fatal(err) - } - - // Remove measurement. - if err := f.DeleteMeasurement([]byte("cpu")); err != nil { - t.Fatal(err) - } - - // Verify data. - itr := f.MeasurementIterator() - if e := itr.Next(); string(e.Name()) != "cpu" || !e.Deleted() { - t.Fatalf("unexpected measurement: %s/%v", e.Name(), e.Deleted()) - } else if e := itr.Next(); string(e.Name()) != "mem" || e.Deleted() { - t.Fatalf("unexpected measurement: %s/%v", e.Name(), e.Deleted()) - } else if e := itr.Next(); e != nil { - t.Fatalf("expected eof, got: %#v", e) - } -} - -// Ensure log file can recover correctly. -func TestLogFile_Open(t *testing.T) { - t.Run("Truncate", func(t *testing.T) { - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - seriesSet := tsdb.NewSeriesIDSet() - - f := MustOpenLogFile(sfile.SeriesFile) - defer f.Close() - - // Add test data & close. - if _, err := f.AddSeriesList(seriesSet, [][]byte{[]byte("cpu"), []byte("mem")}, []models.Tags{{{}}, {{}}}); err != nil { - t.Fatal(err) - } else if err := f.LogFile.Close(); err != nil { - t.Fatal(err) - } - - // Truncate data & reopen. - if fi, err := os.Stat(f.LogFile.Path()); err != nil { - t.Fatal(err) - } else if err := os.Truncate(f.LogFile.Path(), fi.Size()-1); err != nil { - t.Fatal(err) - } else if err := f.LogFile.Open(); err != nil { - t.Fatal(err) - } - - // Verify data. - itr := f.SeriesIDIterator() - if elem, err := itr.Next(); err != nil { - t.Fatal(err) - } else if name, tags := sfile.Series(elem.SeriesID); string(name) != `cpu` { - t.Fatalf("unexpected series: %s,%s", name, tags.String()) - } else if elem, err := itr.Next(); err != nil { - t.Fatal(err) - } else if elem.SeriesID != 0 { - t.Fatalf("expected eof, got: %#v", elem) - } - - // Add more data & reopen. - if _, err := f.AddSeriesList(seriesSet, [][]byte{[]byte("disk")}, []models.Tags{{{}}}); err != nil { - t.Fatal(err) - } else if err := f.Reopen(); err != nil { - t.Fatal(err) - } - - // Verify new data. - itr = f.SeriesIDIterator() - if elem, err := itr.Next(); err != nil { - t.Fatal(err) - } else if name, tags := sfile.Series(elem.SeriesID); string(name) != `cpu` { - t.Fatalf("unexpected series: %s,%s", name, tags.String()) - } else if elem, err := itr.Next(); err != nil { - t.Fatal(err) - } else if name, tags := sfile.Series(elem.SeriesID); string(name) != `disk` { - t.Fatalf("unexpected series: %s,%s", name, tags.String()) - } else if elem, err := itr.Next(); err != nil { - t.Fatal(err) - } else if elem.SeriesID != 0 { - t.Fatalf("expected eof, got: %#v", elem) - } - }) - - t.Run("ChecksumMismatch", func(t *testing.T) { - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - seriesSet := tsdb.NewSeriesIDSet() - - f := MustOpenLogFile(sfile.SeriesFile) - defer f.Close() - - // Add test data & close. - if _, err := f.AddSeriesList(seriesSet, [][]byte{[]byte("cpu"), []byte("mem")}, []models.Tags{{{}}, {{}}}); err != nil { - t.Fatal(err) - } else if err := f.LogFile.Close(); err != nil { - t.Fatal(err) - } - - // Corrupt last entry. - buf, err := os.ReadFile(f.LogFile.Path()) - if err != nil { - t.Fatal(err) - } - buf[len(buf)-1] = 0 - - // Overwrite file with corrupt entry and reopen. - if err := os.WriteFile(f.LogFile.Path(), buf, 0666); err != nil { - t.Fatal(err) - } else if err := f.LogFile.Open(); err != nil { - t.Fatal(err) - } - - // Verify data. - itr := f.SeriesIDIterator() - if elem, err := itr.Next(); err != nil { - t.Fatal(err) - } else if name, tags := sfile.Series(elem.SeriesID); string(name) != `cpu` { - t.Fatalf("unexpected series: %s,%s", name, tags.String()) - } else if elem, err := itr.Next(); err != nil { - t.Fatal(err) - } else if elem.SeriesID != 0 { - t.Fatalf("expected eof, got: %#v", elem) - } - }) -} - -func TestLogFile_MeasurementHasSeries(t *testing.T) { - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - f := MustOpenLogFile(sfile.SeriesFile) - defer f.Close() - - measurementN, seriesValueN, seriesKeyN := 3, 2, 5 - tagValueN := pow(seriesValueN, seriesKeyN) - - seriesSet := tsdb.NewSeriesIDSet() // all series in all measurements - seriesIDs := make([]uint64, 0, tagValueN) // all series ids in measurement0 - - // add series to all measurements - for i := 0; i < measurementN; i++ { - name := []byte(fmt.Sprintf("measurement%d", i)) - - names := make([][]byte, tagValueN) - tags := make([]models.Tags, tagValueN) - - for j := 0; j < tagValueN; j++ { - var tag models.Tags - for k := 0; k < seriesKeyN; k++ { - key := []byte(fmt.Sprintf("key%d", k)) - value := []byte(fmt.Sprintf("value%d", j/pow(seriesValueN, k)%seriesValueN)) - tag = append(tag, models.NewTag(key, value)) - } - - names[j] = name - tags[j] = tag - } - - ids, err := f.AddSeriesList(seriesSet, names, tags) - require.NoError(t, err) - - if i == 0 { - seriesIDs = append(seriesIDs, ids...) - } - } - - // remove series from measurement 0 - name := []byte("measurement0") - for i := 0; i < tagValueN; i++ { - // measurement0 has series before last one removed - require.True(t, f.MeasurementHasSeries(seriesSet, name)) - - require.NoError(t, f.DeleteSeriesID(seriesIDs[i])) - seriesSet.Remove(seriesIDs[i]) - } - - // measurement0 has none series when last one removed - require.False(t, f.MeasurementHasSeries(seriesSet, name)) -} - -// LogFile is a test wrapper for tsi1.LogFile. -type LogFile struct { - *tsi1.LogFile -} - -// NewLogFile returns a new instance of LogFile with a temporary file path. -func NewLogFile(sfile *tsdb.SeriesFile) *LogFile { - file, err := os.CreateTemp("", "tsi1-log-file-") - if err != nil { - panic(err) - } - file.Close() - - return &LogFile{LogFile: tsi1.NewLogFile(sfile, file.Name())} -} - -// MustOpenLogFile returns a new, open instance of LogFile. Panic on error. -func MustOpenLogFile(sfile *tsdb.SeriesFile) *LogFile { - f := NewLogFile(sfile) - if err := f.Open(); err != nil { - panic(err) - } - return f -} - -// Close closes the log file and removes it from disk. -func (f *LogFile) Close() error { - defer os.Remove(f.Path()) - return f.LogFile.Close() -} - -// Reopen closes and reopens the file. -func (f *LogFile) Reopen() error { - if err := f.LogFile.Close(); err != nil { - return err - } - if err := f.LogFile.Open(); err != nil { - return err - } - return nil -} - -// CreateLogFile creates a new temporary log file and adds a list of series. -func CreateLogFile(sfile *tsdb.SeriesFile, series []Series) (*LogFile, error) { - f := MustOpenLogFile(sfile) - seriesSet := tsdb.NewSeriesIDSet() - for _, serie := range series { - if _, err := f.AddSeriesList(seriesSet, [][]byte{serie.Name}, []models.Tags{serie.Tags}); err != nil { - return nil, err - } - } - return f, nil -} - -// GenerateLogFile generates a log file from a set of series based on the count arguments. -// Total series returned will equal measurementN * tagN * valueN. -func GenerateLogFile(sfile *tsdb.SeriesFile, measurementN, tagN, valueN int) (*LogFile, error) { - tagValueN := pow(valueN, tagN) - - f := MustOpenLogFile(sfile) - seriesSet := tsdb.NewSeriesIDSet() - for i := 0; i < measurementN; i++ { - name := []byte(fmt.Sprintf("measurement%d", i)) - - // Generate tag sets. - for j := 0; j < tagValueN; j++ { - var tags models.Tags - for k := 0; k < tagN; k++ { - key := []byte(fmt.Sprintf("key%d", k)) - value := []byte(fmt.Sprintf("value%d", (j / pow(valueN, k) % valueN))) - tags = append(tags, models.NewTag(key, value)) - } - if _, err := f.AddSeriesList(seriesSet, [][]byte{name}, []models.Tags{tags}); err != nil { - return nil, err - } - } - } - return f, nil -} - -func benchmarkLogFile_AddSeries(b *testing.B, measurementN, seriesKeyN, seriesValueN int) { - sfile := MustOpenSeriesFile(b) - defer sfile.Close() - - b.StopTimer() - f := MustOpenLogFile(sfile.SeriesFile) - seriesSet := tsdb.NewSeriesIDSet() - - type Datum struct { - Name []byte - Tags models.Tags - } - - // Pre-generate everything. - var ( - data []Datum - series int - ) - - tagValueN := pow(seriesValueN, seriesKeyN) - - for i := 0; i < measurementN; i++ { - name := []byte(fmt.Sprintf("measurement%d", i)) - for j := 0; j < tagValueN; j++ { - var tags models.Tags - for k := 0; k < seriesKeyN; k++ { - key := []byte(fmt.Sprintf("key%d", k)) - value := []byte(fmt.Sprintf("value%d", (j / pow(seriesValueN, k) % seriesValueN))) - tags = append(tags, models.NewTag(key, value)) - } - data = append(data, Datum{Name: name, Tags: tags}) - series += len(tags) - } - } - b.StartTimer() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - for _, d := range data { - if _, err := f.AddSeriesList(seriesSet, [][]byte{d.Name}, []models.Tags{d.Tags}); err != nil { - b.Fatal(err) - } - } - } -} - -func BenchmarkLogFile_AddSeries_100_1_1(b *testing.B) { benchmarkLogFile_AddSeries(b, 100, 1, 1) } // 100 series -func BenchmarkLogFile_AddSeries_1000_1_1(b *testing.B) { benchmarkLogFile_AddSeries(b, 1000, 1, 1) } // 1000 series -func BenchmarkLogFile_AddSeries_10000_1_1(b *testing.B) { benchmarkLogFile_AddSeries(b, 10000, 1, 1) } // 10000 series -func BenchmarkLogFile_AddSeries_100_2_10(b *testing.B) { benchmarkLogFile_AddSeries(b, 100, 2, 10) } // ~20K series -func BenchmarkLogFile_AddSeries_100000_1_1(b *testing.B) { benchmarkLogFile_AddSeries(b, 100000, 1, 1) } // ~100K series -func BenchmarkLogFile_AddSeries_100_3_7(b *testing.B) { benchmarkLogFile_AddSeries(b, 100, 3, 7) } // ~100K series -func BenchmarkLogFile_AddSeries_200_3_7(b *testing.B) { benchmarkLogFile_AddSeries(b, 200, 3, 7) } // ~200K series -func BenchmarkLogFile_AddSeries_200_4_7(b *testing.B) { benchmarkLogFile_AddSeries(b, 200, 4, 7) } // ~1.9M series - -func BenchmarkLogFile_WriteTo(b *testing.B) { - for _, seriesN := range []int{1000, 10000, 100000, 1000000} { - name := fmt.Sprintf("series=%d", seriesN) - b.Run(name, func(b *testing.B) { - sfile := MustOpenSeriesFile(b) - defer sfile.Close() - - f := MustOpenLogFile(sfile.SeriesFile) - defer f.Close() - seriesSet := tsdb.NewSeriesIDSet() - - // Estimate bloom filter size. - m, k := bloom.Estimate(uint64(seriesN), 0.02) - - // Initialize log file with series data. - for i := 0; i < seriesN; i++ { - if _, err := f.AddSeriesList( - seriesSet, - [][]byte{[]byte("cpu")}, - []models.Tags{{ - {Key: []byte("host"), Value: []byte(fmt.Sprintf("server-%d", i))}, - {Key: []byte("location"), Value: []byte("us-west")}, - }}, - ); err != nil { - b.Fatal(err) - } - } - b.ResetTimer() - - // Create cpu profile for each subtest. - MustStartCPUProfile(name) - defer pprof.StopCPUProfile() - - // Compact log file. - for i := 0; i < b.N; i++ { - buf := bytes.NewBuffer(make([]byte, 0, 150*seriesN)) - if _, err := f.CompactTo(buf, m, k, nil); err != nil { - b.Fatal(err) - } - b.Logf("sz=%db", buf.Len()) - } - }) - } -} - -func benchmarkLogFile_MeasurementHasSeries(b *testing.B, seriesKeyN, seriesValueN int) { - b.StopTimer() - - sfile := MustOpenSeriesFile(b) - defer sfile.Close() - - f := MustOpenLogFile(sfile.SeriesFile) - defer f.Close() - - measurementN := 2 - tagValueN := pow(seriesValueN, seriesKeyN) - - seriesSet := tsdb.NewSeriesIDSet() // all series in all measurements - seriesIDs := make([]uint64, 0, tagValueN) // all series ids in measurement0 - - // add series to all measurements - for i := 0; i < measurementN; i++ { - name := []byte(fmt.Sprintf("measurement%d", i)) - - names := make([][]byte, tagValueN) - tags := make([]models.Tags, tagValueN) - - for j := 0; j < tagValueN; j++ { - var tag models.Tags - for k := 0; k < seriesKeyN; k++ { - key := []byte(fmt.Sprintf("key%d", k)) - value := []byte(fmt.Sprintf("value%d", j/pow(seriesValueN, k)%seriesValueN)) - tag = append(tag, models.NewTag(key, value)) - } - - names[j] = name - tags[j] = tag - } - - ids, err := f.AddSeriesList(seriesSet, names, tags) - require.NoError(b, err) - - if i == 0 { - seriesIDs = append(seriesIDs, ids...) - } - } - - // remove some series in measurement0 - name := []byte("measurement0") - for i := 0; i < 50; i++ { - require.NoError(b, f.DeleteSeriesID(seriesIDs[i])) - seriesSet.Remove(seriesIDs[i]) - } - - b.StartTimer() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - if !f.MeasurementHasSeries(seriesSet, name) { - b.Fatal("expect true, got false") - } - } -} - -func BenchmarkLogFile_MeasurementHasSeries_2_10(b *testing.B) { - benchmarkLogFile_MeasurementHasSeries(b, 2, 10) -} // 100 series -func BenchmarkLogFile_MeasurementHasSeries_3_10(b *testing.B) { - benchmarkLogFile_MeasurementHasSeries(b, 3, 10) -} // 1k series -func BenchmarkLogFile_MeasurementHasSeries_4_10(b *testing.B) { - benchmarkLogFile_MeasurementHasSeries(b, 4, 10) -} // 10k series -func BenchmarkLogFile_MeasurementHasSeries_5_10(b *testing.B) { - benchmarkLogFile_MeasurementHasSeries(b, 5, 10) -} // 100k series - -// MustStartCPUProfile starts a cpu profile in a temporary path based on name. -func MustStartCPUProfile(name string) { - name = regexp.MustCompile(`\W+`).ReplaceAllString(name, "-") - - // Open file and start pprof. - f, err := os.Create(filepath.Join("/tmp", fmt.Sprintf("cpu-%s.pprof", name))) - if err != nil { - panic(err) - } - if err := pprof.StartCPUProfile(f); err != nil { - panic(err) - } -} diff --git a/tsdb/index/tsi1/measurement_block.go b/tsdb/index/tsi1/measurement_block.go deleted file mode 100644 index 18ba7fc090f..00000000000 --- a/tsdb/index/tsi1/measurement_block.go +++ /dev/null @@ -1,688 +0,0 @@ -package tsi1 - -import ( - "bytes" - "encoding/binary" - "errors" - "io" - "sort" - "unsafe" - - "github.com/influxdata/influxdb/v2/pkg/estimator" - "github.com/influxdata/influxdb/v2/pkg/estimator/hll" - "github.com/influxdata/influxdb/v2/pkg/rhh" - "github.com/influxdata/influxdb/v2/tsdb" -) - -// MeasurementBlockVersion is the version of the measurement block. -const MeasurementBlockVersion = 1 - -// Measurement flag constants. -const ( - MeasurementTombstoneFlag = 0x01 - MeasurementSeriesIDSetFlag = 0x02 -) - -// Measurement field size constants. -const ( - // 1 byte offset for the block to ensure non-zero offsets. - MeasurementFillSize = 1 - - // Measurement trailer fields - MeasurementTrailerSize = 0 + - 2 + // version - 8 + 8 + // data offset/size - 8 + 8 + // hash index offset/size - 8 + 8 + // measurement sketch offset/size - 8 + 8 // tombstone measurement sketch offset/size - - // Measurement key block fields. - MeasurementNSize = 8 - MeasurementOffsetSize = 8 - - SeriesIDSize = 8 -) - -// Measurement errors. -var ( - ErrUnsupportedMeasurementBlockVersion = errors.New("unsupported measurement block version") - ErrMeasurementBlockSizeMismatch = errors.New("measurement block size mismatch") -) - -// MeasurementBlock represents a collection of all measurements in an index. -type MeasurementBlock struct { - data []byte - hashData []byte - - // Measurement sketch and tombstone sketch for cardinality estimation. - sketchData, tSketchData []byte - - version int // block version -} - -// bytes estimates the memory footprint of this MeasurementBlock, in bytes. -func (blk *MeasurementBlock) bytes() int { - var b int - // Do not count contents of blk.data or blk.hashData because they reference into an external []byte - b += int(unsafe.Sizeof(*blk)) - return b -} - -// Version returns the encoding version parsed from the data. -// Only valid after UnmarshalBinary() has been successfully invoked. -func (blk *MeasurementBlock) Version() int { return blk.version } - -// Elem returns an element for a measurement. -func (blk *MeasurementBlock) Elem(name []byte) (e MeasurementBlockElem, ok bool) { - n := int64(binary.BigEndian.Uint64(blk.hashData[:MeasurementNSize])) - hash := rhh.HashKey(name) - pos := hash % n - - // Track current distance - var d int64 - for { - // Find offset of measurement. - offset := binary.BigEndian.Uint64(blk.hashData[MeasurementNSize+(pos*MeasurementOffsetSize):]) - if offset == 0 { - return MeasurementBlockElem{}, false - } - - // Evaluate name if offset is not empty. - if offset > 0 { - // Parse into element. - var e MeasurementBlockElem - e.UnmarshalBinary(blk.data[offset:]) - - // Return if name match. - if bytes.Equal(e.name, name) { - return e, true - } - - // Check if we've exceeded the probe distance. - if d > rhh.Dist(rhh.HashKey(e.name), pos, n) { - return MeasurementBlockElem{}, false - } - } - - // Move position forward. - pos = (pos + 1) % n - d++ - - if d > n { - return MeasurementBlockElem{}, false - } - } -} - -// UnmarshalBinary unpacks data into the block. Block is not copied so data -// should be retained and unchanged after being passed into this function. -func (blk *MeasurementBlock) UnmarshalBinary(data []byte) error { - // Read trailer. - t, err := ReadMeasurementBlockTrailer(data) - if err != nil { - return err - } - - // Save data section. - blk.data = data[t.Data.Offset:] - blk.data = blk.data[:t.Data.Size] - - // Save hash index block. - blk.hashData = data[t.HashIndex.Offset:] - blk.hashData = blk.hashData[:t.HashIndex.Size] - - // Initialise sketch data. - blk.sketchData = data[t.Sketch.Offset:][:t.Sketch.Size] - blk.tSketchData = data[t.TSketch.Offset:][:t.TSketch.Size] - - return nil -} - -// Iterator returns an iterator over all measurements. -func (blk *MeasurementBlock) Iterator() MeasurementIterator { - return &blockMeasurementIterator{data: blk.data[MeasurementFillSize:]} -} - -// SeriesIDIterator returns an iterator for all series ids in a measurement. -func (blk *MeasurementBlock) SeriesIDIterator(name []byte) tsdb.SeriesIDIterator { - // Find measurement element. - e, ok := blk.Elem(name) - if !ok { - return &rawSeriesIDIterator{} - } - if e.seriesIDSet != nil { - return tsdb.NewSeriesIDSetIterator(e.seriesIDSet) - } - return &rawSeriesIDIterator{n: e.series.n, data: e.series.data} -} - -// Sketches returns existence and tombstone measurement sketches. -func (blk *MeasurementBlock) Sketches() (sketch, tSketch estimator.Sketch, err error) { - sketch = hll.NewDefaultPlus() - if err := sketch.UnmarshalBinary(blk.sketchData); err != nil { - return nil, nil, err - } - - tSketch = hll.NewDefaultPlus() - if err := tSketch.UnmarshalBinary(blk.tSketchData); err != nil { - return nil, nil, err - } - return sketch, tSketch, nil -} - -// blockMeasurementIterator iterates over a list measurements in a block. -type blockMeasurementIterator struct { - elem MeasurementBlockElem - data []byte -} - -// Next returns the next measurement. Returns nil when iterator is complete. -func (itr *blockMeasurementIterator) Next() MeasurementElem { - // Return nil when we run out of data. - if len(itr.data) == 0 { - return nil - } - - // Unmarshal the element at the current position. - itr.elem.UnmarshalBinary(itr.data) - - // Move the data forward past the record. - itr.data = itr.data[itr.elem.size:] - - return &itr.elem -} - -// rawSeriesIterator iterates over a list of raw series data. -type rawSeriesIDIterator struct { - prev uint64 - n uint64 - data []byte -} - -func (itr *rawSeriesIDIterator) Close() error { return nil } - -// Next returns the next decoded series. -func (itr *rawSeriesIDIterator) Next() (tsdb.SeriesIDElem, error) { - if len(itr.data) == 0 { - return tsdb.SeriesIDElem{}, nil - } - - delta, n, err := uvarint(itr.data) - if err != nil { - return tsdb.SeriesIDElem{}, err - } - itr.data = itr.data[n:] - - seriesID := itr.prev + uint64(delta) - itr.prev = seriesID - return tsdb.SeriesIDElem{SeriesID: seriesID}, nil -} - -func (itr *rawSeriesIDIterator) SeriesIDSet() *tsdb.SeriesIDSet { - ss := tsdb.NewSeriesIDSet() - for data, prev := itr.data, uint64(0); len(data) > 0; { - delta, n, err := uvarint(data) - if err != nil { - break - } - data = data[n:] - - seriesID := prev + uint64(delta) - prev = seriesID - ss.AddNoLock(seriesID) - } - return ss -} - -// MeasurementBlockTrailer represents meta data at the end of a MeasurementBlock. -type MeasurementBlockTrailer struct { - Version int // Encoding version - - // Offset & size of data section. - Data struct { - Offset int64 - Size int64 - } - - // Offset & size of hash map section. - HashIndex struct { - Offset int64 - Size int64 - } - - // Offset and size of cardinality sketch for measurements. - Sketch struct { - Offset int64 - Size int64 - } - - // Offset and size of cardinality sketch for tombstoned measurements. - TSketch struct { - Offset int64 - Size int64 - } -} - -// ReadMeasurementBlockTrailer returns the block trailer from data. -func ReadMeasurementBlockTrailer(data []byte) (MeasurementBlockTrailer, error) { - var t MeasurementBlockTrailer - - // Read version (which is located in the last two bytes of the trailer). - t.Version = int(binary.BigEndian.Uint16(data[len(data)-2:])) - if t.Version != MeasurementBlockVersion { - return t, ErrUnsupportedIndexFileVersion - } - - // Slice trailer data. - buf := data[len(data)-MeasurementTrailerSize:] - - // Read data section info. - t.Data.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - t.Data.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - - // Read measurement block info. - t.HashIndex.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - t.HashIndex.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - - // Read measurement sketch info. - t.Sketch.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - t.Sketch.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - - // Read tombstone measurement sketch info. - t.TSketch.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - t.TSketch.Size = int64(binary.BigEndian.Uint64(buf[0:8])) - - return t, nil -} - -// WriteTo writes the trailer to w. -func (t *MeasurementBlockTrailer) WriteTo(w io.Writer) (n int64, err error) { - // Write data section info. - if err := writeUint64To(w, uint64(t.Data.Offset), &n); err != nil { - return n, err - } else if err := writeUint64To(w, uint64(t.Data.Size), &n); err != nil { - return n, err - } - - // Write hash index section info. - if err := writeUint64To(w, uint64(t.HashIndex.Offset), &n); err != nil { - return n, err - } else if err := writeUint64To(w, uint64(t.HashIndex.Size), &n); err != nil { - return n, err - } - - // Write measurement sketch info. - if err := writeUint64To(w, uint64(t.Sketch.Offset), &n); err != nil { - return n, err - } else if err := writeUint64To(w, uint64(t.Sketch.Size), &n); err != nil { - return n, err - } - - // Write tombstone measurement sketch info. - if err := writeUint64To(w, uint64(t.TSketch.Offset), &n); err != nil { - return n, err - } else if err := writeUint64To(w, uint64(t.TSketch.Size), &n); err != nil { - return n, err - } - - // Write measurement block version. - if err := writeUint16To(w, MeasurementBlockVersion, &n); err != nil { - return n, err - } - - return n, nil -} - -// MeasurementBlockElem represents an internal measurement element. -type MeasurementBlockElem struct { - flag byte // flag - name []byte // measurement name - - tagBlock struct { - offset int64 - size int64 - } - - series struct { - n uint64 // series count - data []byte // serialized series data - } - - seriesIDSet *tsdb.SeriesIDSet - - // size in bytes, set after unmarshalling. - size int -} - -// Name returns the measurement name. -func (e *MeasurementBlockElem) Name() []byte { return e.name } - -// Deleted returns true if the tombstone flag is set. -func (e *MeasurementBlockElem) Deleted() bool { - return (e.flag & MeasurementTombstoneFlag) != 0 -} - -// TagBlockOffset returns the offset of the measurement's tag block. -func (e *MeasurementBlockElem) TagBlockOffset() int64 { return e.tagBlock.offset } - -// TagBlockSize returns the size of the measurement's tag block. -func (e *MeasurementBlockElem) TagBlockSize() int64 { return e.tagBlock.size } - -// SeriesData returns the raw series data. -func (e *MeasurementBlockElem) SeriesData() []byte { return e.series.data } - -// SeriesN returns the number of series associated with the measurement. -func (e *MeasurementBlockElem) SeriesN() uint64 { return e.series.n } - -// SeriesID returns series ID at an index. -func (e *MeasurementBlockElem) SeriesID(i int) uint64 { - return binary.BigEndian.Uint64(e.series.data[i*SeriesIDSize:]) -} - -func (e *MeasurementBlockElem) HasSeries() bool { return e.series.n > 0 } - -// SeriesIDs returns a list of decoded series ids. -// -// NOTE: This should be used for testing and diagnostics purposes only. -// It requires loading the entire list of series in-memory. -func (e *MeasurementBlockElem) SeriesIDs() []uint64 { - a := make([]uint64, 0, e.series.n) - e.ForEachSeriesID(func(id uint64) error { - a = append(a, id) - return nil - }) - return a -} - -func (e *MeasurementBlockElem) ForEachSeriesID(fn func(uint64) error) error { - // Read from roaring, if available. - if e.seriesIDSet != nil { - itr := e.seriesIDSet.Iterator() - for itr.HasNext() { - if err := fn(uint64(itr.Next())); err != nil { - return err - } - } - } - - // Read from uvarint encoded data, if available. - var prev uint64 - for data := e.series.data; len(data) > 0; { - delta, n, err := uvarint(data) - if err != nil { - return err - } - data = data[n:] - - seriesID := prev + uint64(delta) - if err = fn(seriesID); err != nil { - return err - } - prev = seriesID - } - return nil -} - -// Size returns the size of the element. -func (e *MeasurementBlockElem) Size() int { return e.size } - -// UnmarshalBinary unmarshals data into e. -func (e *MeasurementBlockElem) UnmarshalBinary(data []byte) error { - start := len(data) - - // Parse flag data. - e.flag, data = data[0], data[1:] - - // Parse tag block offset. - e.tagBlock.offset, data = int64(binary.BigEndian.Uint64(data)), data[8:] - e.tagBlock.size, data = int64(binary.BigEndian.Uint64(data)), data[8:] - - // Parse name. - sz, n, err := uvarint(data) - if err != nil { - return err - } - e.name, data = data[n:n+int(sz)], data[n+int(sz):] - - // Parse series count. - v, n, err := uvarint(data) - if err != nil { - return err - } - e.series.n, data = uint64(v), data[n:] - - // Parse series data size. - sz, n, err = uvarint(data) - if err != nil { - return err - } - data = data[n:] - - // Parse series data (original uvarint encoded or roaring bitmap). - if e.flag&MeasurementSeriesIDSetFlag == 0 { - e.series.data, data = data[:sz], data[sz:] - } else { - // data = memalign(data) - e.seriesIDSet = tsdb.NewSeriesIDSet() - if err = e.seriesIDSet.UnmarshalBinaryUnsafe(data[:sz]); err != nil { - return err - } - data = data[sz:] - } - - // Save length of elem. - e.size = start - len(data) - - return nil -} - -// MeasurementBlockWriter writes a measurement block. -type MeasurementBlockWriter struct { - buf bytes.Buffer - mms map[string]measurement - - // Measurement sketch and tombstoned measurement sketch. - sketch, tSketch estimator.Sketch -} - -// NewMeasurementBlockWriter returns a new MeasurementBlockWriter. -func NewMeasurementBlockWriter() *MeasurementBlockWriter { - return &MeasurementBlockWriter{ - mms: make(map[string]measurement), - sketch: hll.NewDefaultPlus(), - tSketch: hll.NewDefaultPlus(), - } -} - -// Add adds a measurement with series and tag set offset/size. -func (mw *MeasurementBlockWriter) Add(name []byte, deleted bool, offset, size int64, seriesIDs []uint64) { - mm := mw.mms[string(name)] - mm.deleted = deleted - mm.tagBlock.offset = offset - mm.tagBlock.size = size - - if mm.seriesIDSet == nil { - mm.seriesIDSet = tsdb.NewSeriesIDSet() - } - for _, seriesID := range seriesIDs { - mm.seriesIDSet.AddNoLock(seriesID) - } - - mw.mms[string(name)] = mm - - if deleted { - mw.tSketch.Add(name) - } else { - mw.sketch.Add(name) - } -} - -// WriteTo encodes the measurements to w. -func (mw *MeasurementBlockWriter) WriteTo(w io.Writer) (n int64, err error) { - var t MeasurementBlockTrailer - - // The sketches must be set before calling WriteTo. - if mw.sketch == nil { - return 0, errors.New("measurement sketch not set") - } else if mw.tSketch == nil { - return 0, errors.New("measurement tombstone sketch not set") - } - - // Sort names. - names := make([]string, 0, len(mw.mms)) - for name := range mw.mms { - names = append(names, name) - } - sort.Strings(names) - - // Begin data section. - t.Data.Offset = n - - // Write padding byte so no offsets are zero. - if err := writeUint8To(w, 0, &n); err != nil { - return n, err - } - - // Encode key list. - for _, name := range names { - // Retrieve measurement and save offset. - mm := mw.mms[name] - mm.offset = n - mw.mms[name] = mm - - // Write measurement - if err := mw.writeMeasurementTo(w, []byte(name), &mm, &n); err != nil { - return n, err - } - } - t.Data.Size = n - t.Data.Offset - - // Build key hash map - m := rhh.NewHashMap(rhh.Options{ - Capacity: int64(len(names)), - LoadFactor: LoadFactor, - }) - for name := range mw.mms { - mm := mw.mms[name] - m.Put([]byte(name), &mm) - } - - t.HashIndex.Offset = n - - // Encode hash map length. - if err := writeUint64To(w, uint64(m.Cap()), &n); err != nil { - return n, err - } - - // Encode hash map offset entries. - for i := int64(0); i < m.Cap(); i++ { - _, v := m.Elem(i) - - var offset int64 - if mm, ok := v.(*measurement); ok { - offset = mm.offset - } - - if err := writeUint64To(w, uint64(offset), &n); err != nil { - return n, err - } - } - t.HashIndex.Size = n - t.HashIndex.Offset - - // Write the sketches out. - t.Sketch.Offset = n - if err := writeSketchTo(w, mw.sketch, &n); err != nil { - return n, err - } - t.Sketch.Size = n - t.Sketch.Offset - - t.TSketch.Offset = n - if err := writeSketchTo(w, mw.tSketch, &n); err != nil { - return n, err - } - t.TSketch.Size = n - t.TSketch.Offset - - // Write trailer. - nn, err := t.WriteTo(w) - n += nn - return n, err -} - -// writeMeasurementTo encodes a single measurement entry into w. -func (mw *MeasurementBlockWriter) writeMeasurementTo(w io.Writer, name []byte, mm *measurement, n *int64) error { - // Write flag & tag block offset. - if err := writeUint8To(w, mm.flag(), n); err != nil { - return err - } - if err := writeUint64To(w, uint64(mm.tagBlock.offset), n); err != nil { - return err - } else if err := writeUint64To(w, uint64(mm.tagBlock.size), n); err != nil { - return err - } - - // Write measurement name. - if err := writeUvarintTo(w, uint64(len(name)), n); err != nil { - return err - } - if err := writeTo(w, name, n); err != nil { - return err - } - - // Write series data to buffer. - mw.buf.Reset() - if _, err := mm.seriesIDSet.WriteTo(&mw.buf); err != nil { - return err - } - - // Write series count. - if err := writeUvarintTo(w, mm.seriesIDSet.Cardinality(), n); err != nil { - return err - } - - // Write data size & buffer. - if err := writeUvarintTo(w, uint64(mw.buf.Len()), n); err != nil { - return err - } - - // Word align bitmap data. - // if offset := (*n) % 8; offset != 0 { - // if err := writeTo(w, make([]byte, 8-offset), n); err != nil { - // return err - // } - // } - - nn, err := mw.buf.WriteTo(w) - *n += nn - return err -} - -// writeSketchTo writes an estimator.Sketch into w, updating the number of bytes -// written via n. -func writeSketchTo(w io.Writer, s estimator.Sketch, n *int64) error { - data, err := s.MarshalBinary() - if err != nil { - return err - } - - nn, err := w.Write(data) - *n += int64(nn) - return err -} - -type measurement struct { - deleted bool - tagBlock struct { - offset int64 - size int64 - } - seriesIDSet *tsdb.SeriesIDSet - offset int64 -} - -func (mm measurement) flag() byte { - flag := byte(MeasurementSeriesIDSetFlag) - if mm.deleted { - flag |= MeasurementTombstoneFlag - } - return flag -} diff --git a/tsdb/index/tsi1/measurement_block_test.go b/tsdb/index/tsi1/measurement_block_test.go deleted file mode 100644 index 9d860e147ee..00000000000 --- a/tsdb/index/tsi1/measurement_block_test.go +++ /dev/null @@ -1,181 +0,0 @@ -package tsi1_test - -import ( - "bytes" - "encoding/binary" - "fmt" - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" -) - -func TestReadMeasurementBlockTrailer(t *testing.T) { - // Build a trailer - var ( - data = make([]byte, tsi1.MeasurementTrailerSize) - blockversion = uint16(1) - blockOffset, blockSize = uint64(1), uint64(2500) - hashIdxOffset, hashIdxSize = uint64(2501), uint64(1000) - sketchOffset, sketchSize = uint64(3501), uint64(250) - tsketchOffset, tsketchSize = uint64(3751), uint64(250) - ) - - binary.BigEndian.PutUint64(data[0:], blockOffset) - binary.BigEndian.PutUint64(data[8:], blockSize) - binary.BigEndian.PutUint64(data[16:], hashIdxOffset) - binary.BigEndian.PutUint64(data[24:], hashIdxSize) - binary.BigEndian.PutUint64(data[32:], sketchOffset) - binary.BigEndian.PutUint64(data[40:], sketchSize) - binary.BigEndian.PutUint64(data[48:], tsketchOffset) - binary.BigEndian.PutUint64(data[56:], tsketchSize) - binary.BigEndian.PutUint16(data[64:], blockversion) - - trailer, err := tsi1.ReadMeasurementBlockTrailer(data) - if err != nil { - t.Logf("trailer is: %#v\n", trailer) - t.Fatal(err) - } - - ok := true && - trailer.Version == int(blockversion) && - trailer.Data.Offset == int64(blockOffset) && - trailer.Data.Size == int64(blockSize) && - trailer.HashIndex.Offset == int64(hashIdxOffset) && - trailer.HashIndex.Size == int64(hashIdxSize) && - trailer.Sketch.Offset == int64(sketchOffset) && - trailer.Sketch.Size == int64(sketchSize) && - trailer.TSketch.Offset == int64(tsketchOffset) && - trailer.TSketch.Size == int64(tsketchSize) - - if !ok { - t.Fatalf("got %v\nwhich does not match expected", trailer) - } -} - -func TestMeasurementBlockTrailer_WriteTo(t *testing.T) { - var trailer = tsi1.MeasurementBlockTrailer{ - Version: 1, - Data: struct { - Offset int64 - Size int64 - }{Offset: 1, Size: 2}, - HashIndex: struct { - Offset int64 - Size int64 - }{Offset: 3, Size: 4}, - Sketch: struct { - Offset int64 - Size int64 - }{Offset: 5, Size: 6}, - TSketch: struct { - Offset int64 - Size int64 - }{Offset: 7, Size: 8}, - } - - var buf bytes.Buffer - n, err := trailer.WriteTo(&buf) - if got, exp := n, int64(tsi1.MeasurementTrailerSize); got != exp { - t.Fatalf("got %v, exp %v", got, exp) - } - - if got := err; got != nil { - t.Fatalf("got %v, exp %v", got, nil) - } - - // Verify trailer written correctly. - exp := "" + - "0000000000000001" + // data offset - "0000000000000002" + // data size - "0000000000000003" + // hash index offset - "0000000000000004" + // hash index size - "0000000000000005" + // sketch offset - "0000000000000006" + // sketch size - "0000000000000007" + // tsketch offset - "0000000000000008" + // tsketch size - "0001" // version - - if got, exp := fmt.Sprintf("%x", buf.String()), exp; got != exp { - t.Fatalf("got %v, exp %v", got, exp) - } -} - -// Ensure measurement blocks can be written and opened. -func TestMeasurementBlockWriter(t *testing.T) { - ms := Measurements{ - NewMeasurement([]byte("foo"), false, 100, 10, []uint64{1, 3, 4}), - NewMeasurement([]byte("bar"), false, 200, 20, []uint64{2}), - NewMeasurement([]byte("baz"), false, 300, 30, []uint64{5, 6}), - } - - // Write the measurements to writer. - mw := tsi1.NewMeasurementBlockWriter() - for _, m := range ms { - mw.Add(m.Name, m.Deleted, m.Offset, m.Size, m.ids) - } - - // Encode into buffer. - var buf bytes.Buffer - if n, err := mw.WriteTo(&buf); err != nil { - t.Fatal(err) - } else if n == 0 { - t.Fatal("expected bytes written") - } - - // Unmarshal into a block. - var blk tsi1.MeasurementBlock - if err := blk.UnmarshalBinary(buf.Bytes()); err != nil { - t.Fatal(err) - } - - // Verify data in block. - if e, ok := blk.Elem([]byte("foo")); !ok { - t.Fatal("expected element") - } else if e.TagBlockOffset() != 100 || e.TagBlockSize() != 10 { - t.Fatalf("unexpected offset/size: %v/%v", e.TagBlockOffset(), e.TagBlockSize()) - } else if !reflect.DeepEqual(e.SeriesIDs(), []uint64{1, 3, 4}) { - t.Fatalf("unexpected series data: %#v", e.SeriesIDs()) - } - - if e, ok := blk.Elem([]byte("bar")); !ok { - t.Fatal("expected element") - } else if e.TagBlockOffset() != 200 || e.TagBlockSize() != 20 { - t.Fatalf("unexpected offset/size: %v/%v", e.TagBlockOffset(), e.TagBlockSize()) - } else if !reflect.DeepEqual(e.SeriesIDs(), []uint64{2}) { - t.Fatalf("unexpected series data: %#v", e.SeriesIDs()) - } - - if e, ok := blk.Elem([]byte("baz")); !ok { - t.Fatal("expected element") - } else if e.TagBlockOffset() != 300 || e.TagBlockSize() != 30 { - t.Fatalf("unexpected offset/size: %v/%v", e.TagBlockOffset(), e.TagBlockSize()) - } else if !reflect.DeepEqual(e.SeriesIDs(), []uint64{5, 6}) { - t.Fatalf("unexpected series data: %#v", e.SeriesIDs()) - } - - // Verify non-existent measurement doesn't exist. - if _, ok := blk.Elem([]byte("BAD_MEASUREMENT")); ok { - t.Fatal("expected no element") - } -} - -type Measurements []Measurement - -type Measurement struct { - Name []byte - Deleted bool - Offset int64 - Size int64 - ids []uint64 -} - -func NewMeasurement(name []byte, deleted bool, offset, size int64, ids []uint64) Measurement { - return Measurement{ - Name: name, - Deleted: deleted, - Offset: offset, - Size: size, - ids: ids, - } -} diff --git a/tsdb/index/tsi1/partition.go b/tsdb/index/tsi1/partition.go deleted file mode 100644 index bdd268eb01e..00000000000 --- a/tsdb/index/tsi1/partition.go +++ /dev/null @@ -1,1550 +0,0 @@ -package tsi1 - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "os" - "path/filepath" - "regexp" - "strconv" - "strings" - "sync" - "time" - "unsafe" - - "github.com/influxdata/influxdb/v2/logger" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/bytesutil" - errors2 "github.com/influxdata/influxdb/v2/pkg/errors" - "github.com/influxdata/influxdb/v2/pkg/estimator" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxql" - "go.uber.org/zap" -) - -// Version is the current version of the TSI index. -const Version = 1 - -// File extensions. -const ( - LogFileExt = ".tsl" - IndexFileExt = ".tsi" - - CompactingExt = ".compacting" -) - -// ManifestFileName is the name of the index manifest file. -const ManifestFileName = "MANIFEST" - -// Partition represents a collection of layered index files and WAL. -type Partition struct { - mu sync.RWMutex - opened bool - - sfile *tsdb.SeriesFile // series lookup file - activeLogFile *LogFile // current log file - fileSet *FileSet // current file set - seq int // file id sequence - - // Fast series lookup of series IDs in the series file that have been present - // in this partition. This set tracks both insertions and deletions of a series. - seriesIDSet *tsdb.SeriesIDSet - - // Compaction management - levels []CompactionLevel // compaction levels - levelCompacting []bool // level compaction status - - // Close management. - once sync.Once - closing chan struct{} // closing is used to inform iterators the partition is closing. - - // Fieldset shared with engine. - fieldset *tsdb.MeasurementFieldSet - - currentCompactionN int // counter of in-progress compactions - - // Directory of the Partition's index files. - path string - id string // id portion of path. - - // Log file compaction thresholds. - MaxLogFileSize int64 - MaxLogFileAge time.Duration - nosync bool // when true, flushing and syncing of LogFile will be disabled. - logbufferSize int // the LogFile's buffer is set to this value. - - // Frequency of compaction checks. - compactionInterrupt chan struct{} - compactionsDisabled int - - logger *zap.Logger - - // Current size of MANIFEST. Used to determine partition size. - manifestSize int64 - - // Index's version. - version int - - manifestPathFn func() string -} - -// NewPartition returns a new instance of Partition. -func NewPartition(sfile *tsdb.SeriesFile, path string) *Partition { - p := &Partition{ - closing: make(chan struct{}), - path: path, - sfile: sfile, - seriesIDSet: tsdb.NewSeriesIDSet(), - fileSet: &FileSet{}, - MaxLogFileSize: tsdb.DefaultMaxIndexLogFileSize, - - // compactionEnabled: true, - compactionInterrupt: make(chan struct{}), - - logger: zap.NewNop(), - version: Version, - } - p.manifestPathFn = p.manifestPath - return p -} - -// bytes estimates the memory footprint of this Partition, in bytes. -func (p *Partition) bytes() int { - var b int - b += 24 // mu RWMutex is 24 bytes - b += int(unsafe.Sizeof(p.opened)) - // Do not count SeriesFile because it belongs to the code that constructed this Partition. - b += int(unsafe.Sizeof(p.activeLogFile)) + p.activeLogFile.bytes() - b += int(unsafe.Sizeof(p.fileSet)) + p.fileSet.bytes() - b += int(unsafe.Sizeof(p.seq)) - b += int(unsafe.Sizeof(p.seriesIDSet)) + p.seriesIDSet.Bytes() - b += int(unsafe.Sizeof(p.levels)) - for _, level := range p.levels { - b += int(unsafe.Sizeof(level)) - } - b += int(unsafe.Sizeof(p.levelCompacting)) - for _, levelCompacting := range p.levelCompacting { - b += int(unsafe.Sizeof(levelCompacting)) - } - b += 12 // once sync.Once is 12 bytes - b += int(unsafe.Sizeof(p.closing)) - b += int(unsafe.Sizeof(p.currentCompactionN)) - b += int(unsafe.Sizeof(p.fieldset)) + p.fieldset.Bytes() - b += int(unsafe.Sizeof(p.path)) + len(p.path) - b += int(unsafe.Sizeof(p.id)) + len(p.id) - b += int(unsafe.Sizeof(p.MaxLogFileSize)) - b += int(unsafe.Sizeof(p.MaxLogFileAge)) - b += int(unsafe.Sizeof(p.compactionInterrupt)) - b += int(unsafe.Sizeof(p.compactionsDisabled)) - b += int(unsafe.Sizeof(p.logger)) - b += int(unsafe.Sizeof(p.manifestSize)) - b += int(unsafe.Sizeof(p.version)) - return b -} - -// ErrIncompatibleVersion is returned when attempting to read from an -// incompatible tsi1 manifest file. -var ErrIncompatibleVersion = errors.New("incompatible tsi1 index MANIFEST") - -// Open opens the partition. -func (p *Partition) Open() (rErr error) { - p.mu.Lock() - defer p.mu.Unlock() - - p.closing = make(chan struct{}) - - if p.opened { - return fmt.Errorf("index partition already open: %q", p.path) - } - - // Validate path is correct. - p.id = filepath.Base(p.path) - _, err := strconv.Atoi(p.id) - if err != nil { - return fmt.Errorf("poorly formed manifest file path, %q: %w", p.path, err) - } - - // Create directory if it doesn't exist. - if err := os.MkdirAll(p.path, 0777); err != nil { - return err - } - - filename := filepath.Join(p.path, ManifestFileName) - // Read manifest file. - m, manifestSize, err := ReadManifestFile(filename) - if os.IsNotExist(err) { - m = NewManifest(p.ManifestPath()) - } else if err != nil { - return err - } - // Set manifest size on the partition - p.manifestSize = manifestSize - - // Check to see if the MANIFEST file is compatible with the current Index. - if err := m.Validate(); err != nil { - return err - } - - // Copy compaction levels to the index. - p.levels = make([]CompactionLevel, len(m.Levels)) - copy(p.levels, m.Levels) - - // Set up flags to track whether a level is compacting. - p.levelCompacting = make([]bool, len(p.levels)) - - // Open each file in the manifest. - var files []File - defer func() { - if rErr != nil { - Files(files).Close() - } - }() - - for _, filename := range m.Files { - switch filepath.Ext(filename) { - case LogFileExt: - f, err := p.openLogFile(filepath.Join(p.path, filename)) - if err != nil { - return err - } - files = append(files, f) - - // Make first log file active, if within threshold. - sz, _ := f.Stat() - if p.activeLogFile == nil && sz < p.MaxLogFileSize { - p.activeLogFile = f - } - - case IndexFileExt: - f, err := p.openIndexFile(filepath.Join(p.path, filename)) - if err != nil { - return err - } - files = append(files, f) - } - } - p.fileSet = NewFileSet(files) - - // Set initial sequence number. - p.seq = p.fileSet.MaxID() - - // Delete any files not in the manifest. - if err := p.deleteNonManifestFiles(m); err != nil { - return err - } - - // Ensure a log file exists. - if p.activeLogFile == nil { - if err := p.prependActiveLogFile(); err != nil { - return err - } - } - - // Build series existence set. - if err := p.buildSeriesSet(); err != nil { - return err - } - - // Mark opened. - p.opened = true - - // Send a compaction request on start up. - go p.runPeriodicCompaction() - - return nil -} - -func (p *Partition) IsOpen() bool { - return p.opened -} - -// openLogFile opens a log file and appends it to the index. -func (p *Partition) openLogFile(path string) (*LogFile, error) { - f := NewLogFile(p.sfile, path) - f.nosync = p.nosync - f.bufferSize = p.logbufferSize - - if err := f.Open(); err != nil { - return nil, err - } - return f, nil -} - -// openIndexFile opens a log file and appends it to the index. -func (p *Partition) openIndexFile(path string) (*IndexFile, error) { - f := NewIndexFile(p.sfile) - f.SetPath(path) - if err := f.Open(); err != nil { - return nil, err - } - return f, nil -} - -// deleteNonManifestFiles removes all files not in the manifest. -func (p *Partition) deleteNonManifestFiles(m *Manifest) (rErr error) { - dir, err := os.Open(p.path) - if err != nil { - return err - } - defer errors2.Capture(&rErr, dir.Close)() - - fis, err := dir.Readdir(-1) - if err != nil { - return err - } - - // Loop over all files and remove any not in the manifest. - for _, fi := range fis { - filename := filepath.Base(fi.Name()) - if filename == ManifestFileName || m.HasFile(filename) { - continue - } - - if err := os.RemoveAll(filename); err != nil { - return err - } - } - - return nil -} - -func (p *Partition) buildSeriesSet() error { - fs := p.retainFileSet() - defer fs.Release() - - p.seriesIDSet = tsdb.NewSeriesIDSet() - - // Read series sets from files in reverse. - for i := len(fs.files) - 1; i >= 0; i-- { - f := fs.files[i] - - // Delete anything that's been tombstoned. - ts, err := f.TombstoneSeriesIDSet() - if err != nil { - return err - } - p.seriesIDSet.Diff(ts) - - // Add series created within the file. - ss, err := f.SeriesIDSet() - if err != nil { - return err - } - p.seriesIDSet.Merge(ss) - } - return nil -} - -// CurrentCompactionN returns the number of compactions currently running. -func (p *Partition) CurrentCompactionN() int { - p.mu.RLock() - defer p.mu.RUnlock() - return p.currentCompactionN -} - -// Wait will block until all compactions are finished. -// Must only be called while they are disabled. -func (p *Partition) Wait() { - ticker := time.NewTicker(10 * time.Millisecond) - defer ticker.Stop() - for { - if p.CurrentCompactionN() == 0 { - return - } - <-ticker.C - } -} - -// Close closes the index. -func (p *Partition) Close() error { - // Wait for goroutines to finish outstanding compactions. - p.once.Do(func() { - close(p.closing) - close(p.compactionInterrupt) - }) - p.Wait() - - // Lock index and close remaining - p.mu.Lock() - defer p.mu.Unlock() - - if p.fileSet == nil { - return nil - } - - // Close log files. - var err error - for _, f := range p.fileSet.files { - if localErr := f.Close(); localErr != nil { - err = localErr - } - } - p.fileSet.files = nil - - return err -} - -// closing returns true if the partition is currently closing. It does not require -// a lock so will always return to callers. -func (p *Partition) isClosing() bool { - select { - case <-p.closing: - return true - default: - return false - } -} - -// Path returns the path to the partition. -func (p *Partition) Path() string { return p.path } - -// SeriesFile returns the attached series file. -func (p *Partition) SeriesFile() *tsdb.SeriesFile { return p.sfile } - -// NextSequence returns the next file identifier. -func (p *Partition) NextSequence() int { - p.mu.Lock() - defer p.mu.Unlock() - return p.nextSequence() -} - -func (p *Partition) nextSequence() int { - p.seq++ - return p.seq -} - -func (p *Partition) ManifestPath() string { - return p.manifestPathFn() -} - -// ManifestPath returns the path to the index's manifest file. -func (p *Partition) manifestPath() string { - return filepath.Join(p.path, ManifestFileName) -} - -// Manifest returns a manifest for the index. -func (p *Partition) Manifest() *Manifest { - return p.manifest(p.fileSet) -} - -// manifest returns a manifest for the index, possibly using a -// new FileSet to account for compaction or log prepending -func (p *Partition) manifest(newFileSet *FileSet) *Manifest { - m := &Manifest{ - Levels: p.levels, - Files: make([]string, len(newFileSet.files)), - Version: p.version, - path: p.ManifestPath(), - } - - for j, f := range newFileSet.files { - m.Files[j] = filepath.Base(f.Path()) - } - - return m -} - -// SetManifestPathForTest is only to force a bad path in testing -func (p *Partition) SetManifestPathForTest(path string) { - p.mu.Lock() - defer p.mu.Unlock() - p.manifestPathFn = func() string { return path } -} - -// WithLogger sets the logger for the index. -func (p *Partition) WithLogger(logger *zap.Logger) { - p.logger = logger.With(zap.String("index", "tsi")) -} - -// SetFieldSet sets a shared field set from the engine. -func (p *Partition) SetFieldSet(fs *tsdb.MeasurementFieldSet) { - p.mu.Lock() - p.fieldset = fs - p.mu.Unlock() -} - -// FieldSet returns the fieldset. -func (p *Partition) FieldSet() *tsdb.MeasurementFieldSet { - p.mu.Lock() - fs := p.fieldset - p.mu.Unlock() - return fs -} - -// RetainFileSet returns the current fileset and adds a reference count. -func (p *Partition) RetainFileSet() (*FileSet, error) { - select { - case <-p.closing: - return nil, tsdb.ErrIndexClosing - default: - p.mu.RLock() - defer p.mu.RUnlock() - return p.retainFileSet(), nil - } -} - -func (p *Partition) retainFileSet() *FileSet { - fs := p.fileSet - fs.Retain() - return fs -} - -// FileN returns the active files in the file set. -func (p *Partition) FileN() int { - p.mu.RLock() - defer p.mu.RUnlock() - return len(p.fileSet.files) -} - -// prependActiveLogFile adds a new log file so that the current log file can be compacted. -func (p *Partition) prependActiveLogFile() (rErr error) { - // Open file and insert it into the first position. - f, err := p.openLogFile(filepath.Join(p.path, FormatLogFileName(p.nextSequence()))) - if err != nil { - return err - } - var oldActiveFile *LogFile - p.activeLogFile, oldActiveFile = f, p.activeLogFile - - // Prepend and generate new fileset but do not yet update the partition - newFileSet := p.fileSet.PrependLogFile(f) - - defer errors2.Capture(&rErr, func() error { - if rErr != nil { - // close the new file. - f.Close() - p.activeLogFile = oldActiveFile - } - return rErr - })() - - // Write new manifest. - manifestSize, err := p.manifest(newFileSet).Write() - if err != nil { - return fmt.Errorf("manifest write failed for %q: %w", p.ManifestPath(), err) - } - p.manifestSize = manifestSize - // Store the new FileSet in the partition now that the manifest has been written - p.fileSet = newFileSet - return nil -} - -// ForEachMeasurementName iterates over all measurement names in the index. -func (p *Partition) ForEachMeasurementName(fn func(name []byte) error) error { - fs, err := p.RetainFileSet() - if err != nil { - return err - } - defer fs.Release() - - itr := fs.MeasurementIterator() - if itr == nil { - return nil - } - - for e := itr.Next(); e != nil; e = itr.Next() { - if err := fn(e.Name()); err != nil { - return err - } - } - - return nil -} - -// MeasurementHasSeries returns true if a measurement has at least one non-tombstoned series. -func (p *Partition) MeasurementHasSeries(name []byte) (bool, error) { - fs, err := p.RetainFileSet() - if err != nil { - return false, err - } - defer fs.Release() - - for _, f := range fs.files { - if f.MeasurementHasSeries(p.seriesIDSet, name) { - return true, nil - } - } - - return false, nil -} - -// MeasurementIterator returns an iterator over all measurement names. -func (p *Partition) MeasurementIterator() (tsdb.MeasurementIterator, error) { - fs, err := p.RetainFileSet() - if err != nil { - return nil, err - } - itr := fs.MeasurementIterator() - if itr == nil { - fs.Release() - return nil, nil - } - return newFileSetMeasurementIterator(fs, NewTSDBMeasurementIteratorAdapter(itr)), nil -} - -// MeasurementExists returns true if a measurement exists. -func (p *Partition) MeasurementExists(name []byte) (bool, error) { - fs, err := p.RetainFileSet() - if err != nil { - return false, err - } - defer fs.Release() - m := fs.Measurement(name) - return m != nil && !m.Deleted(), nil -} - -func (p *Partition) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) { - fs, err := p.RetainFileSet() - if err != nil { - return nil, err - } - defer fs.Release() - - itr := fs.MeasurementIterator() - if itr == nil { - return nil, nil - } - - var a [][]byte - for e := itr.Next(); e != nil; e = itr.Next() { - if re.Match(e.Name()) { - // Clone bytes since they will be used after the fileset is released. - a = append(a, bytesutil.Clone(e.Name())) - } - } - return a, nil -} - -func (p *Partition) MeasurementSeriesIDIterator(name []byte) (tsdb.SeriesIDIterator, error) { - fs, err := p.RetainFileSet() - if err != nil { - return nil, err - } - return newFileSetSeriesIDIterator(fs, fs.MeasurementSeriesIDIterator(name)), nil -} - -// DropMeasurement deletes a measurement from the index. DropMeasurement does -// not remove any series from the index directly. -func (p *Partition) DropMeasurement(name []byte) error { - fs, err := p.RetainFileSet() - if err != nil { - return err - } - defer fs.Release() - - // Delete all keys and values. - if kitr := fs.TagKeyIterator(name); kitr != nil { - for k := kitr.Next(); k != nil; k = kitr.Next() { - // Delete key if not already deleted. - if !k.Deleted() { - if err := func() error { - p.mu.RLock() - defer p.mu.RUnlock() - return p.activeLogFile.DeleteTagKey(name, k.Key()) - }(); err != nil { - return err - } - } - - // Delete each value in key. - if vitr := k.TagValueIterator(); vitr != nil { - for v := vitr.Next(); v != nil; v = vitr.Next() { - if !v.Deleted() { - if err := func() error { - p.mu.RLock() - defer p.mu.RUnlock() - return p.activeLogFile.DeleteTagValue(name, k.Key(), v.Value()) - }(); err != nil { - return err - } - } - } - } - } - } - - // Delete all series. - if itr := fs.MeasurementSeriesIDIterator(name); itr != nil { - defer itr.Close() - for { - elem, err := itr.Next() - if err != nil { - return err - } else if elem.SeriesID == 0 { - break - } - if err := func() error { - p.mu.RLock() - defer p.mu.RUnlock() - return p.activeLogFile.DeleteSeriesID(elem.SeriesID) - }(); err != nil { - return err - } - } - if err = itr.Close(); err != nil { - return err - } - } - - // Mark measurement as deleted. - if err := func() error { - p.mu.RLock() - defer p.mu.RUnlock() - return p.activeLogFile.DeleteMeasurement(name) - }(); err != nil { - return err - } - - // Check if the log file needs to be swapped. - if err := p.CheckLogFile(); err != nil { - return err - } - - return nil -} - -// createSeriesListIfNotExists creates a list of series if they doesn't exist in -// bulk. -func (p *Partition) createSeriesListIfNotExists(names [][]byte, tagsSlice []models.Tags) ([]uint64, error) { - // Is there anything to do? The partition may have been sent an empty batch. - if len(names) == 0 { - return nil, nil - } else if len(names) != len(tagsSlice) { - return nil, fmt.Errorf("uneven batch, partition %s sent %d names and %d tags", p.id, len(names), len(tagsSlice)) - } - - // Maintain reference count on files in file set. - fs, err := p.RetainFileSet() - if err != nil { - return nil, err - } - defer fs.Release() - - // Ensure fileset cannot change during insert. - p.mu.RLock() - // Insert series into log file. - ids, err := p.activeLogFile.AddSeriesList(p.seriesIDSet, names, tagsSlice) - if err != nil { - p.mu.RUnlock() - return nil, err - } - p.mu.RUnlock() - - if err := p.CheckLogFile(); err != nil { - return nil, err - } - return ids, nil -} - -func (p *Partition) DropSeries(seriesID uint64) error { - // Delete series from index. - if err := func() error { - p.mu.RLock() - defer p.mu.RUnlock() - return p.activeLogFile.DeleteSeriesID(seriesID) - }(); err != nil { - return err - } - - p.seriesIDSet.Remove(seriesID) - - // Swap log file, if necessary. - return p.CheckLogFile() -} - -// MeasurementsSketches returns the two sketches for the partition by merging all -// instances of the type sketch types in all the index files. -func (p *Partition) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) { - fs, err := p.RetainFileSet() - if err != nil { - return nil, nil, err - } - defer fs.Release() - return fs.MeasurementsSketches() -} - -// SeriesSketches returns the two sketches for the partition by merging all -// instances of the type sketch types in all the index files. -func (p *Partition) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) { - fs, err := p.RetainFileSet() - if err != nil { - return nil, nil, err - } - defer fs.Release() - return fs.SeriesSketches() -} - -// HasTagKey returns true if tag key exists. -func (p *Partition) HasTagKey(name, key []byte) (bool, error) { - fs, err := p.RetainFileSet() - if err != nil { - return false, err - } - defer fs.Release() - return fs.HasTagKey(name, key), nil -} - -// HasTagValue returns true if tag value exists. -func (p *Partition) HasTagValue(name, key, value []byte) (bool, error) { - fs, err := p.RetainFileSet() - if err != nil { - return false, err - } - defer fs.Release() - return fs.HasTagValue(name, key, value), nil -} - -// TagKeyIterator returns an iterator for all keys across a single measurement. -func (p *Partition) TagKeyIterator(name []byte) tsdb.TagKeyIterator { - fs, err := p.RetainFileSet() - if err != nil { - return nil // TODO(edd): this should probably return an error. - } - - itr := fs.TagKeyIterator(name) - if itr == nil { - fs.Release() - return nil - } - return newFileSetTagKeyIterator(fs, NewTSDBTagKeyIteratorAdapter(itr)) -} - -// TagValueIterator returns an iterator for all values across a single key. -func (p *Partition) TagValueIterator(name, key []byte) tsdb.TagValueIterator { - fs, err := p.RetainFileSet() - if err != nil { - return nil // TODO(edd): this should probably return an error. - } - - itr := fs.TagValueIterator(name, key) - if itr == nil { - fs.Release() - return nil - } - return newFileSetTagValueIterator(fs, NewTSDBTagValueIteratorAdapter(itr)) -} - -// TagKeySeriesIDIterator returns a series iterator for all values across a single key. -func (p *Partition) TagKeySeriesIDIterator(name, key []byte) (tsdb.SeriesIDIterator, error) { - fs, err := p.RetainFileSet() - if err != nil { - return nil, err - } - - itr, err := fs.TagKeySeriesIDIterator(name, key) - if err != nil { - fs.Release() - return nil, err - } else if itr == nil { - fs.Release() - return nil, nil - } - return newFileSetSeriesIDIterator(fs, itr), nil -} - -// TagValueSeriesIDIterator returns a series iterator for a single key value. -func (p *Partition) TagValueSeriesIDIterator(name, key, value []byte) (tsdb.SeriesIDIterator, error) { - fs, err := p.RetainFileSet() - if err != nil { - return nil, err - } - - itr, err := fs.TagValueSeriesIDIterator(name, key, value) - if err != nil { - fs.Release() - return nil, err - } else if itr == nil { - fs.Release() - return nil, nil - } - return newFileSetSeriesIDIterator(fs, itr), nil -} - -// MeasurementTagKeysByExpr extracts the tag keys wanted by the expression. -func (p *Partition) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) { - fs, err := p.RetainFileSet() - if err != nil { - return nil, err - } - defer fs.Release() - - return fs.MeasurementTagKeysByExpr(name, expr) -} - -// ForEachMeasurementTagKey iterates over all tag keys in a measurement. -func (p *Partition) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error { - fs, err := p.RetainFileSet() - if err != nil { - return err - } - defer fs.Release() - - itr := fs.TagKeyIterator(name) - if itr == nil { - return nil - } - - for e := itr.Next(); e != nil; e = itr.Next() { - if err := fn(e.Key()); err != nil { - return err - } - } - - return nil -} - -// TagKeyCardinality always returns zero. -// It is not possible to determine cardinality of tags across index files. -func (p *Partition) TagKeyCardinality(name, key []byte) int { - return 0 -} - -func (p *Partition) SetFieldName(measurement []byte, name string) {} -func (p *Partition) RemoveShard(shardID uint64) {} -func (p *Partition) AssignShard(k string, shardID uint64) {} - -// Compact requests a compaction of log files. -func (p *Partition) Compact() { - p.mu.Lock() - defer p.mu.Unlock() - p.compact() -} - -func (p *Partition) DisableCompactions() { - p.mu.Lock() - defer p.mu.Unlock() - p.compactionsDisabled++ - - select { - case <-p.closing: - return - default: - } - - if p.compactionsDisabled == 0 { - close(p.compactionInterrupt) - p.compactionInterrupt = make(chan struct{}) - } -} - -func (p *Partition) EnableCompactions() { - p.mu.Lock() - defer p.mu.Unlock() - - // Already enabled? - if p.compactionsEnabled() { - return - } - p.compactionsDisabled-- -} - -func (p *Partition) compactionsEnabled() bool { - return p.compactionsDisabled == 0 -} - -func (p *Partition) runPeriodicCompaction() { - // kick off an initial compaction at startup without the optimization check - p.Compact() - - // Avoid a race when using Reopen in tests - p.mu.RLock() - closing := p.closing - p.mu.RUnlock() - - // check for compactions once an hour (usually not necessary but a nice safety check) - t := time.NewTicker(1 * time.Hour) - defer t.Stop() - for { - select { - case <-closing: - return - case <-t.C: - if p.NeedsCompaction(true) { - p.Compact() - } - } - } -} - -// NeedsCompaction only requires a read lock and checks if there are files that could be compacted. -// -// If compact() is updated we should also update needsCompaction -// If checkRunning = true, only count as needing a compaction if there is not a compaction already -// in progress for the level that would be compacted -func (p *Partition) NeedsCompaction(checkRunning bool) bool { - p.mu.RLock() - defer p.mu.RUnlock() - if p.needsLogCompaction() { - return true - } - levelCount := make(map[int]int) - maxLevel := len(p.levels) - 2 - // If we have 2 log files (level 0), or if we have 2 files at the same level, we should do a compaction. - for _, f := range p.fileSet.files { - level := f.Level() - levelCount[level]++ - if level <= maxLevel && levelCount[level] > 1 && !(checkRunning && p.levelCompacting[level]) { - return true - } - } - return false -} - -// compact compacts continguous groups of files that are not currently compacting. -// -// compact requires that mu is write-locked. -func (p *Partition) compact() { - if p.isClosing() { - return - } else if !p.compactionsEnabled() { - return - } - interrupt := p.compactionInterrupt - - fs := p.retainFileSet() - defer fs.Release() - - // check if the current active log file should be rolled - if p.needsLogCompaction() { - if err := p.prependActiveLogFile(); err != nil { - p.logger.Error("failed to retire active log file", zap.Error(err)) - } - } - - // compact any non-active log files first - for _, f := range p.fileSet.files { - if f.Level() == 0 { - logFile := f.(*LogFile) // It is an invariant that a file is level 0 iff it is a log file - if logFile == p.activeLogFile { - continue - } - if p.levelCompacting[0] { - break - } - // Mark the level as compacting. - p.levelCompacting[0] = true - p.currentCompactionN++ - go func() { - p.compactLogFile(logFile) - p.mu.Lock() - p.currentCompactionN-- - p.levelCompacting[0] = false - p.mu.Unlock() - p.Compact() - }() - } - } - - // Iterate over each level we are going to compact. - // We skip the first level (0) because it is log files and they are compacted separately. - // We skip the last level because the files have no higher level to compact into. - minLevel, maxLevel := 1, len(p.levels)-2 - for level := minLevel; level <= maxLevel; level++ { - // Skip level if it is currently compacting. - if p.levelCompacting[level] { - continue - } - - // Collect contiguous files from the end of the level. - files := fs.LastContiguousIndexFilesByLevel(level) - if len(files) < 2 { - continue - } else if len(files) > MaxIndexMergeCount { - files = files[len(files)-MaxIndexMergeCount:] - } - - // Retain files during compaction. - IndexFiles(files).Retain() - - // Mark the level as compacting. - p.levelCompacting[level] = true - - // Execute in closure to save reference to the group within the loop. - func(files []*IndexFile, level int) { - // Start compacting in a separate goroutine. - p.currentCompactionN++ - go func() { - - // Compact to a new level. - p.compactToLevel(files, level+1, interrupt) - - // Ensure compaction lock for the level is released. - p.mu.Lock() - p.levelCompacting[level] = false - p.currentCompactionN-- - p.mu.Unlock() - - // Check for new compactions - p.Compact() - }() - }(files, level) - } -} - -// compactToLevel compacts a set of files into a new file. Replaces old files with -// compacted file on successful completion. This runs in a separate goroutine. -func (p *Partition) compactToLevel(files []*IndexFile, level int, interrupt <-chan struct{}) { - assert(len(files) >= 2, "at least two index files are required for compaction") - assert(level > 0, "cannot compact level zero") - - // Files have already been retained by caller. - // Ensure files are released only once. - var once sync.Once - defer once.Do(func() { IndexFiles(files).Release() }) - - // Build a logger for this compaction. - log, logEnd := logger.NewOperation(context.TODO(), p.logger, "TSI level compaction", "tsi1_compact_to_level", zap.Int("tsi1_level", level)) - defer logEnd() - - // Check for cancellation. - select { - case <-interrupt: - log.Error("Cannot begin compaction", zap.Error(ErrCompactionInterrupted)) - return - default: - } - - // Track time to compact. - start := time.Now() - - // Create new index file. - path := filepath.Join(p.path, FormatIndexFileName(p.NextSequence(), level)) - f, err := os.Create(path) - if err != nil { - log.Error("Cannot create compaction files", zap.Error(err)) - return - } - defer f.Close() - - log.Info("Performing full compaction", - zap.String("src", joinIntSlice(IndexFiles(files).IDs(), ",")), - zap.String("dst", path), - ) - - // Compact all index files to new index file. - lvl := p.levels[level] - n, err := IndexFiles(files).CompactTo(f, p.sfile, lvl.M, lvl.K, interrupt) - if err != nil { - log.Error("Cannot compact index files", zap.Error(err)) - return - } - - if err = f.Sync(); err != nil { - log.Error("Error sync index file", zap.Error(err)) - return - } - - // Close file. - if err := f.Close(); err != nil { - log.Error("Error closing index file", zap.Error(err)) - return - } - - // Reopen as an index file. - file := NewIndexFile(p.sfile) - file.SetPath(path) - if err := file.Open(); err != nil { - log.Error("Cannot open new index file", zap.Error(err)) - return - } - - // Obtain lock to swap in index file and write manifest. - if err := func() (rErr error) { - p.mu.Lock() - defer p.mu.Unlock() - - // Replace previous files with new index file. - newFileSet := p.fileSet.MustReplace(IndexFiles(files).Files(), file) - - // Write new manifest. - manifestSize, err := p.manifest(newFileSet).Write() - defer errors2.Capture(&rErr, func() error { - if rErr != nil { - // Close the new file to avoid leaks. - file.Close() - } - return rErr - })() - if err != nil { - return fmt.Errorf("manifest file write failed compacting index %q: %w", p.ManifestPath(), err) - } - p.manifestSize = manifestSize - // Store the new FileSet in the partition now that the manifest has been written - p.fileSet = newFileSet - return nil - }(); err != nil { - log.Error("Cannot write manifest", zap.Error(err)) - return - } - - elapsed := time.Since(start) - log.Info("Full compaction complete", - zap.String("path", path), - logger.DurationLiteral("elapsed", elapsed), - zap.Int64("bytes", n), - zap.Int("kb_per_sec", int(float64(n)/elapsed.Seconds())/1024), - ) - - // Release old files. - once.Do(func() { IndexFiles(files).Release() }) - - // Close and delete all old index files. - for _, f := range files { - log.Info("Removing index file", zap.String("path", f.Path())) - - if err := f.Close(); err != nil { - log.Error("Cannot close index file", zap.Error(err)) - return - } else if err := os.Remove(f.Path()); err != nil { - log.Error("Cannot remove index file", zap.Error(err)) - return - } - } -} - -func (p *Partition) Rebuild() {} - -// needsLogCompaction returns true if the log file is too big or too old -// The caller must have at least a read lock on the partition -func (p *Partition) needsLogCompaction() bool { - size := p.activeLogFile.Size() - modTime := p.activeLogFile.ModTime() - return size >= p.MaxLogFileSize || (size > 0 && modTime.Before(time.Now().Add(-p.MaxLogFileAge))) -} - -func (p *Partition) CheckLogFile() error { - // Check log file under read lock. - needsCompaction := func() bool { - p.mu.RLock() - defer p.mu.RUnlock() - return p.needsLogCompaction() - }() - if !needsCompaction { - return nil - } - - // If file size exceeded then recheck under write lock and swap files. - p.mu.Lock() - defer p.mu.Unlock() - return p.checkLogFile() -} - -func (p *Partition) checkLogFile() error { - if !p.needsLogCompaction() { - return nil - } - - // Open new log file and insert it into the first position. - if err := p.prependActiveLogFile(); err != nil { - return err - } - - // Begin compacting in a background goroutine. - go func() { - p.Compact() // check for new compactions - }() - - return nil -} - -// compactLogFile compacts f into a tsi file. The new file will share the -// same identifier but will have a ".tsi" extension. Once the log file is -// compacted then the manifest is updated and the log file is discarded. -func (p *Partition) compactLogFile(logFile *LogFile) { - if p.isClosing() { - return - } - - p.mu.Lock() - interrupt := p.compactionInterrupt - p.mu.Unlock() - - start := time.Now() - - // Retrieve identifier from current path. - id := logFile.ID() - assert(id != 0, "cannot parse log file id: %s", logFile.Path()) - - // Build a logger for this compaction. - log, logEnd := logger.NewOperation(context.TODO(), p.logger, "TSI log compaction", "tsi1_compact_log_file", zap.Int("tsi1_log_file_id", id)) - defer logEnd() - - // Create new index file. - path := filepath.Join(p.path, FormatIndexFileName(id, 1)) - f, err := os.Create(path) - if err != nil { - log.Error("Cannot create index file", zap.Error(err)) - return - } - defer f.Close() - - // Compact log file to new index file. - lvl := p.levels[1] - n, err := logFile.CompactTo(f, lvl.M, lvl.K, interrupt) - if err != nil { - log.Error("Cannot compact log file", zap.Error(err), zap.String("path", logFile.Path())) - return - } - - if err = f.Sync(); err != nil { - log.Error("Cannot sync index file", zap.Error(err)) - return - } - - // Close file. - if err := f.Close(); err != nil { - log.Error("Cannot close index file", zap.Error(err)) - return - } - - // Reopen as an index file. - file := NewIndexFile(p.sfile) - file.SetPath(path) - if err := file.Open(); err != nil { - log.Error("Cannot open compacted index file", zap.Error(err), zap.String("path", file.Path())) - return - } - - // Obtain lock to swap in index file and write manifest. - if err := func() (rErr error) { - p.mu.Lock() - defer p.mu.Unlock() - - // Replace previous log file with index file. - newFileSet := p.fileSet.MustReplace([]File{logFile}, file) - - defer errors2.Capture(&rErr, func() error { - if rErr != nil { - // close new file - file.Close() - } - return rErr - })() - - // Write new manifest. - manifestSize, err := p.manifest(newFileSet).Write() - - if err != nil { - return fmt.Errorf("manifest file write failed compacting log file %q: %w", p.ManifestPath(), err) - } - // Store the new FileSet in the partition now that the manifest has been written - p.fileSet = newFileSet - p.manifestSize = manifestSize - return nil - }(); err != nil { - log.Error("Cannot update manifest", zap.Error(err)) - return - } - - elapsed := time.Since(start) - log.Info("Log file compacted", - logger.DurationLiteral("elapsed", elapsed), - zap.Int64("bytes", n), - zap.Int("kb_per_sec", int(float64(n)/elapsed.Seconds())/1024), - ) - - // Closing the log file will automatically wait until the ref count is zero. - if err := logFile.Close(); err != nil { - log.Error("Cannot close log file", zap.Error(err)) - return - } else if err := os.Remove(logFile.Path()); err != nil { - log.Error("Cannot remove log file", zap.Error(err)) - return - } -} - -// unionStringSets returns the union of two sets -func unionStringSets(a, b map[string]struct{}) map[string]struct{} { - other := make(map[string]struct{}) - for k := range a { - other[k] = struct{}{} - } - for k := range b { - other[k] = struct{}{} - } - return other -} - -// intersectStringSets returns the intersection of two sets. -func intersectStringSets(a, b map[string]struct{}) map[string]struct{} { - if len(a) < len(b) { - a, b = b, a - } - - other := make(map[string]struct{}) - for k := range a { - if _, ok := b[k]; ok { - other[k] = struct{}{} - } - } - return other -} - -var fileIDRegex = regexp.MustCompile(`^L(\d+)-(\d+)\..+$`) - -// ParseFilename extracts the numeric id from a log or index file path. -// Returns 0 if it cannot be parsed. -func ParseFilename(name string) (level, id int) { - a := fileIDRegex.FindStringSubmatch(filepath.Base(name)) - if a == nil { - return 0, 0 - } - - level, _ = strconv.Atoi(a[1]) - id, _ = strconv.Atoi(a[2]) - return id, level -} - -// Manifest represents the list of log & index files that make up the index. -// The files are listed in time order, not necessarily ID order. -type Manifest struct { - Levels []CompactionLevel `json:"levels,omitempty"` - Files []string `json:"files,omitempty"` - - // Version should be updated whenever the TSI format has changed. - Version int `json:"version,omitempty"` - - path string // location on disk of the manifest. -} - -// NewManifest returns a new instance of Manifest with default compaction levels. -func NewManifest(path string) *Manifest { - m := &Manifest{ - Levels: make([]CompactionLevel, len(DefaultCompactionLevels)), - Version: Version, - path: path, - } - copy(m.Levels, DefaultCompactionLevels) - return m -} - -// HasFile returns true if name is listed in the log files or index files. -func (m *Manifest) HasFile(name string) bool { - for _, filename := range m.Files { - if filename == name { - return true - } - } - return false -} - -// Validate checks if the Manifest's version is compatible with this version -// of the tsi1 index. -func (m *Manifest) Validate() error { - // If we don't have an explicit version in the manifest file then we know - // it's not compatible with the latest tsi1 Index. - if m.Version != Version { - return fmt.Errorf("%q: %w", m.path, ErrIncompatibleVersion) - } - return nil -} - -// Write writes the manifest file to the provided path, returning the number of -// bytes written and an error, if any. -func (m *Manifest) Write() (int64, error) { - var tmp string - buf, err := json.MarshalIndent(m, "", " ") - if err != nil { - return 0, fmt.Errorf("failed marshaling %q: %w", m.path, err) - } - buf = append(buf, '\n') - - f, err := os.CreateTemp(filepath.Dir(m.path), ManifestFileName) - - if err != nil { - return 0, err - } - - // In correct operation, Remove() should fail because the file was renamed - defer os.Remove(tmp) - - err = func() (rErr error) { - // Close() before rename for Windows - defer errors2.Capture(&rErr, f.Close)() - - tmp = f.Name() - - if _, err = f.Write(buf); err != nil { - return fmt.Errorf("failed writing temporary manifest file %q: %w", tmp, err) - } - if err = f.Sync(); err != nil { - return fmt.Errorf("failed syncing temporary manifest file to disk %q: %w", tmp, err) - } - return nil - }() - if err != nil { - return 0, err - } - - if err = os.Rename(tmp, m.path); err != nil { - return 0, err - } - - return int64(len(buf)), nil -} - -// ReadManifestFile reads a manifest from a file path and returns the Manifest, -// the size of the manifest on disk, and any error if appropriate. -func ReadManifestFile(path string) (*Manifest, int64, error) { - buf, err := os.ReadFile(path) - if err != nil { - return nil, 0, err - } - - // Decode manifest. - var m Manifest - if err := json.Unmarshal(buf, &m); err != nil { - return nil, 0, fmt.Errorf("failed unmarshaling %q: %w", path, err) - } - - // Set the path of the manifest. - m.path = path - return &m, int64(len(buf)), nil -} - -func joinIntSlice(a []int, sep string) string { - other := make([]string, len(a)) - for i := range a { - other[i] = strconv.Itoa(a[i]) - } - return strings.Join(other, sep) -} - -// CompactionLevel represents a grouping of index files based on bloom filter -// settings. By having the same bloom filter settings, the filters -// can be merged and evaluated at a higher level. -type CompactionLevel struct { - // Bloom filter bit size & hash count - M uint64 `json:"m,omitempty"` - K uint64 `json:"k,omitempty"` -} - -// DefaultCompactionLevels is the default settings used by the index. -var DefaultCompactionLevels = []CompactionLevel{ - {M: 0, K: 0}, // L0: Log files, no filter. - {M: 1 << 25, K: 6}, // L1: Initial compaction - {M: 1 << 25, K: 6}, // L2 - {M: 1 << 26, K: 6}, // L3 - {M: 1 << 27, K: 6}, // L4 - {M: 1 << 28, K: 6}, // L5 - {M: 1 << 29, K: 6}, // L6 - {M: 1 << 30, K: 6}, // L7 -} - -// MaxIndexMergeCount is the maximum number of files that can be merged together at once. -const MaxIndexMergeCount = 2 - -// MaxIndexFileSize is the maximum expected size of an index file. -const MaxIndexFileSize = 4 * (1 << 30) - -// IsPartitionDir returns true if directory contains a MANIFEST file. -func IsPartitionDir(path string) (bool, error) { - if _, err := os.Stat(filepath.Join(path, ManifestFileName)); os.IsNotExist(err) { - return false, nil - } else if err != nil { - return false, err - } - return true, nil -} diff --git a/tsdb/index/tsi1/partition_test.go b/tsdb/index/tsi1/partition_test.go deleted file mode 100644 index 094746664c1..00000000000 --- a/tsdb/index/tsi1/partition_test.go +++ /dev/null @@ -1,185 +0,0 @@ -package tsi1_test - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "syscall" - "testing" - - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" -) - -func TestPartition_Open(t *testing.T) { - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - // Opening a fresh index should set the MANIFEST version to current version. - p := NewPartition(t, sfile.SeriesFile) - t.Run("open new index", func(t *testing.T) { - if err := p.Open(); err != nil { - t.Fatal(err) - } - - // Check version set appropriately. - if got, exp := p.Manifest().Version, 1; got != exp { - t.Fatalf("got index version %d, expected %d", got, exp) - } - }) - - // Reopening an open index should return an error. - t.Run("reopen open index", func(t *testing.T) { - err := p.Open() - if err == nil { - p.Close() - t.Fatal("didn't get an error on reopen, but expected one") - } - p.Close() - }) - - // Opening an incompatible index should return an error. - incompatibleVersions := []int{-1, 0, 2} - for _, v := range incompatibleVersions { - t.Run(fmt.Sprintf("incompatible index version: %d", v), func(t *testing.T) { - p = NewPartition(t, sfile.SeriesFile) - // Manually create a MANIFEST file for an incompatible index version. - mpath := filepath.Join(p.Path(), tsi1.ManifestFileName) - m := tsi1.NewManifest(mpath) - m.Levels = nil - m.Version = v // Set example MANIFEST version. - if _, err := m.Write(); err != nil { - t.Fatal(err) - } - - // Log the MANIFEST file. - data, err := os.ReadFile(mpath) - if err != nil { - panic(err) - } - t.Logf("Incompatible MANIFEST: %s", data) - - // Opening this index should return an error because the MANIFEST has an - // incompatible version. - err = p.Open() - if !errors.Is(err, tsi1.ErrIncompatibleVersion) { - p.Close() - t.Fatalf("got error %v, expected %v", err, tsi1.ErrIncompatibleVersion) - } - }) - } -} - -func TestPartition_Manifest(t *testing.T) { - t.Run("current MANIFEST", func(t *testing.T) { - sfile := MustOpenSeriesFile(t) - t.Cleanup(func() { sfile.Close() }) - - p := MustOpenPartition(t, sfile.SeriesFile) - t.Cleanup(func() { p.Close() }) - - if got, exp := p.Manifest().Version, tsi1.Version; got != exp { - t.Fatalf("got MANIFEST version %d, expected %d", got, exp) - } - }) -} - -var badManifestPath string = filepath.Join(os.DevNull, tsi1.ManifestFileName) - -func TestPartition_Manifest_Write_Fail(t *testing.T) { - t.Run("write MANIFEST", func(t *testing.T) { - m := tsi1.NewManifest(badManifestPath) - _, err := m.Write() - if !errors.Is(err, syscall.ENOTDIR) { - t.Fatalf("expected: syscall.ENOTDIR, got %T: %v", err, err) - } - }) -} - -func TestPartition_PrependLogFile_Write_Fail(t *testing.T) { - t.Run("write MANIFEST", func(t *testing.T) { - sfile := MustOpenSeriesFile(t) - t.Cleanup(func() { sfile.Close() }) - - p := MustOpenPartition(t, sfile.SeriesFile) - t.Cleanup(func() { - if err := p.Close(); err != nil { - t.Fatalf("error closing partition: %v", err) - } - }) - p.Partition.MaxLogFileSize = -1 - fileN := p.FileN() - p.CheckLogFile() - if fileN >= p.FileN() { - t.Fatalf("manifest write prepending log file should have succeeded but number of files did not change correctly: expected more than %d files, got %d files", fileN, p.FileN()) - } - p.SetManifestPathForTest(badManifestPath) - fileN = p.FileN() - p.CheckLogFile() - if fileN != p.FileN() { - t.Fatalf("manifest write prepending log file should have failed, but number of files changed: expected %d files, got %d files", fileN, p.FileN()) - } - }) -} - -func TestPartition_Compact_Write_Fail(t *testing.T) { - t.Run("write MANIFEST", func(t *testing.T) { - sfile := MustOpenSeriesFile(t) - t.Cleanup(func() { sfile.Close() }) - - p := MustOpenPartition(t, sfile.SeriesFile) - t.Cleanup(func() { - if err := p.Close(); err != nil { - t.Fatalf("error closing partition: %v", err) - } - }) - p.Partition.MaxLogFileSize = -1 - fileN := p.FileN() - p.Compact() - if (1 + fileN) != p.FileN() { - t.Fatalf("manifest write in compaction should have succeeded, but number of files did not change correctly: expected %d files, got %d files", fileN+1, p.FileN()) - } - p.SetManifestPathForTest(badManifestPath) - fileN = p.FileN() - p.Compact() - if fileN != p.FileN() { - t.Fatalf("manifest write should have failed the compaction, but number of files changed: expected %d files, got %d files", fileN, p.FileN()) - } - }) -} - -// Partition is a test wrapper for tsi1.Partition. -type Partition struct { - *tsi1.Partition -} - -// NewPartition returns a new instance of Partition at a temporary path. -func NewPartition(tb testing.TB, sfile *tsdb.SeriesFile) *Partition { - return &Partition{Partition: tsi1.NewPartition(sfile, MustTempPartitionDir(tb))} -} - -// MustOpenPartition returns a new, open index. Panic on error. -func MustOpenPartition(tb testing.TB, sfile *tsdb.SeriesFile) *Partition { - p := NewPartition(tb, sfile) - if err := p.Open(); err != nil { - panic(err) - } - return p -} - -// Close closes and removes the index directory. -func (p *Partition) Close() error { - return p.Partition.Close() -} - -// Reopen closes and opens the index. -func (p *Partition) Reopen() error { - if err := p.Partition.Close(); err != nil { - return err - } - - sfile, path := p.SeriesFile(), p.Path() - p.Partition = tsi1.NewPartition(sfile, path) - return p.Open() -} diff --git a/tsdb/index/tsi1/sql_index_exporter.go b/tsdb/index/tsi1/sql_index_exporter.go deleted file mode 100644 index c95f1fc906e..00000000000 --- a/tsdb/index/tsi1/sql_index_exporter.go +++ /dev/null @@ -1,229 +0,0 @@ -package tsi1 - -import ( - "bytes" - "fmt" - "io" - "strings" - "unicode/utf8" - - "go.uber.org/zap" -) - -// SQLIndexExporter writes out all TSI data for an index to a SQL export. -type SQLIndexExporter struct { - w io.Writer - - initialized bool - - // Logs non-fatal warnings. - Logger *zap.Logger - - // Write schema, if true. - ShowSchema bool -} - -// NewSQLIndexExporter returns a new instance of SQLIndexExporter. -func NewSQLIndexExporter(w io.Writer) *SQLIndexExporter { - return &SQLIndexExporter{ - w: w, - - Logger: zap.NewNop(), - ShowSchema: true, - } -} - -// Close ends the export and writes final output. -func (e *SQLIndexExporter) Close() error { - return nil -} - -// ExportIndex writes all blocks of the TSM file. -func (e *SQLIndexExporter) ExportIndex(idx *Index) error { - if err := e.initialize(); err != nil { - return err - } - - fmt.Fprintln(e.w, `BEGIN TRANSACTION;`) - - // Iterate over each measurement across all partitions. - itr, err := idx.MeasurementIterator() - if err != nil { - return err - } else if itr == nil { - return nil - } - defer itr.Close() - - for { - name, err := itr.Next() - if err != nil { - return err - } else if name == nil { - break - } - - if err := e.exportMeasurement(idx, name); err != nil { - return err - } - } - - fmt.Fprintln(e.w, "COMMIT;") - return nil -} - -func (e *SQLIndexExporter) exportMeasurement(idx *Index, name []byte) error { - if err := e.exportMeasurementSeries(idx, name); err != nil { - return err - } - - itr, err := idx.TagKeyIterator(name) - if err != nil { - return err - } else if itr == nil { - return nil - } - defer itr.Close() - - for { - key, err := itr.Next() - if err != nil { - return err - } else if key == nil { - break - } - - if err := e.exportTagKey(idx, name, key); err != nil { - return err - } - } - return nil -} - -func (e *SQLIndexExporter) exportMeasurementSeries(idx *Index, name []byte) error { - itr, err := idx.MeasurementSeriesIDIterator(name) - if err != nil { - return err - } else if itr == nil { - return nil - } - defer itr.Close() - - for { - elem, err := itr.Next() - if err != nil { - return err - } else if elem.SeriesID == 0 { - break - } - - if _, err := fmt.Fprintf(e.w, "INSERT INTO measurement_series (name, series_id) VALUES (%s, %d);\n", - quoteSQL(string(name)), - elem.SeriesID); err != nil { - return err - } - } - return nil -} - -func (e *SQLIndexExporter) exportTagKey(idx *Index, name, key []byte) error { - itr, err := idx.TagValueIterator(name, key) - if err != nil { - return err - } else if itr == nil { - return nil - } - defer itr.Close() - - for { - value, err := itr.Next() - if err != nil { - return err - } else if value == nil { - break - } - - if err := e.exportTagValue(idx, name, key, value); err != nil { - return err - } - } - return nil -} - -func (e *SQLIndexExporter) exportTagValue(idx *Index, name, key, value []byte) error { - itr, err := idx.TagValueSeriesIDIterator(name, key, value) - if err != nil { - return err - } else if itr == nil { - return nil - } - defer itr.Close() - - for { - elem, err := itr.Next() - if err != nil { - return err - } else if elem.SeriesID == 0 { - break - } - - // Replace special case keys for measurement & field. - if bytes.Equal(key, []byte{0}) { - key = []byte("_measurement") - } else if bytes.Equal(key, []byte{0xff}) { - key = []byte("_field") - } - - if _, err := fmt.Fprintf(e.w, - "INSERT INTO tag_value_series (name, key, value, series_id) VALUES (%s, %s, %s, %d);\n", - quoteSQL(string(name)), - quoteSQL(string(key)), - quoteSQL(string(value)), - elem.SeriesID, - ); err != nil { - return err - } - } - return nil -} - -func (e *SQLIndexExporter) initialize() error { - if e.initialized { - return nil - } - e.initialized = true - - if !e.ShowSchema { - return nil - } - fmt.Fprintln(e.w, ` -CREATE TABLE IF NOT EXISTS measurement_series ( - name TEXT NOT NULL, - series_id INTEGER NOT NULL -); - -CREATE TABLE IF NOT EXISTS tag_value_series ( - name TEXT NOT NULL, - key TEXT NOT NULL, - value TEXT NOT NULL, - series_id INTEGER NOT NULL -); -`[1:]) - - return nil -} - -func quoteSQL(s string) string { - return `'` + sqlReplacer.Replace(toValidUTF8(s)) + `'` -} - -var sqlReplacer = strings.NewReplacer(`'`, `''`, "\x00", "") - -func toValidUTF8(s string) string { - return strings.Map(func(r rune) rune { - if r == utf8.RuneError { - return -1 - } - return r - }, s) -} diff --git a/tsdb/index/tsi1/sql_index_exporter_test.go b/tsdb/index/tsi1/sql_index_exporter_test.go deleted file mode 100644 index bff5c5231ee..00000000000 --- a/tsdb/index/tsi1/sql_index_exporter_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package tsi1_test - -import ( - "bytes" - "testing" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" - "go.uber.org/zap/zaptest" -) - -func TestSQLIndexExporter_ExportIndex(t *testing.T) { - idx := MustOpenIndex(t, 1) - defer idx.Close() - - // Add series to index. - if err := idx.CreateSeriesSliceIfNotExists([]Series{ - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east", "status": "ok"})}, - {Name: []byte("disk"), Tags: models.NewTags(map[string]string{"region": "west"})}, - {Name: []byte("memory"), Tags: models.NewTags(map[string]string{"region": "east"})}, - }); err != nil { - t.Fatal(err) - } - - // Expected output. - want := ` -BEGIN TRANSACTION; -INSERT INTO measurement_series (name, series_id) VALUES ('cpu', 3); -INSERT INTO tag_value_series (name, key, value, series_id) VALUES ('cpu', 'region', 'east', 3); -INSERT INTO tag_value_series (name, key, value, series_id) VALUES ('cpu', 'status', 'ok', 3); -INSERT INTO measurement_series (name, series_id) VALUES ('disk', 7); -INSERT INTO tag_value_series (name, key, value, series_id) VALUES ('disk', 'region', 'west', 7); -INSERT INTO measurement_series (name, series_id) VALUES ('memory', 8); -INSERT INTO tag_value_series (name, key, value, series_id) VALUES ('memory', 'region', 'east', 8); -COMMIT; -`[1:] - - // Export file to SQL. - var buf bytes.Buffer - e := tsi1.NewSQLIndexExporter(&buf) - e.ShowSchema = false - e.Logger = zaptest.NewLogger(t) - if err := e.ExportIndex(idx.Index); err != nil { - t.Fatal(err) - } else if err := e.Close(); err != nil { - t.Fatal(err) - } else if got := buf.String(); got != want { - t.Fatalf("unexpected output:\ngot=%s\n--\nwant=%s", got, want) - } -} diff --git a/tsdb/index/tsi1/tag_block.go b/tsdb/index/tsi1/tag_block.go deleted file mode 100644 index ae7739a7cf1..00000000000 --- a/tsdb/index/tsi1/tag_block.go +++ /dev/null @@ -1,825 +0,0 @@ -package tsi1 - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - - "github.com/influxdata/influxdb/v2/pkg/rhh" - "github.com/influxdata/influxdb/v2/tsdb" -) - -// TagBlockVersion is the version of the tag block. -const TagBlockVersion = 1 - -// Tag key flag constants. -const ( - TagKeyTombstoneFlag = 0x01 -) - -// Tag value flag constants. -const ( - TagValueTombstoneFlag = 0x01 - TagValueSeriesIDSetFlag = 0x02 -) - -// TagBlock variable size constants. -const ( - // TagBlock key block fields. - TagKeyNSize = 8 - TagKeyOffsetSize = 8 - - // TagBlock value block fields. - TagValueNSize = 8 - TagValueOffsetSize = 8 -) - -// TagBlock errors. -var ( - ErrUnsupportedTagBlockVersion = errors.New("unsupported tag block version") - ErrTagBlockSizeMismatch = errors.New("tag block size mismatch") -) - -// TagBlock represents tag key/value block for a single measurement. -type TagBlock struct { - data []byte - - valueData []byte - keyData []byte - hashData []byte - - version int // tag block version -} - -// Version returns the encoding version parsed from the data. -// Only valid after UnmarshalBinary() has been successfully invoked. -func (blk *TagBlock) Version() int { return blk.version } - -// UnmarshalBinary unpacks data into the tag block. Tag block is not copied so data -// should be retained and unchanged after being passed into this function. -func (blk *TagBlock) UnmarshalBinary(data []byte) error { - // Read trailer. - t, err := ReadTagBlockTrailer(data) - if err != nil { - return err - } - - // Verify data size is correct. - if int64(len(data)) != t.Size { - return ErrTagBlockSizeMismatch - } - - // Save data section. - blk.valueData = data[t.ValueData.Offset:] - blk.valueData = blk.valueData[:t.ValueData.Size] - - // Save key data section. - blk.keyData = data[t.KeyData.Offset:] - blk.keyData = blk.keyData[:t.KeyData.Size] - - // Save hash index block. - blk.hashData = data[t.HashIndex.Offset:] - blk.hashData = blk.hashData[:t.HashIndex.Size] - - // Save entire block. - blk.data = data - - return nil -} - -// TagKeyElem returns an element for a tag key. -// Returns an element with a nil key if not found. -func (blk *TagBlock) TagKeyElem(key []byte) TagKeyElem { - var elem TagBlockKeyElem - if !blk.DecodeTagKeyElem(key, &elem) { - return nil - } - return &elem -} - -func (blk *TagBlock) DecodeTagKeyElem(key []byte, elem *TagBlockKeyElem) bool { - keyN := int64(binary.BigEndian.Uint64(blk.hashData[:TagKeyNSize])) - hash := rhh.HashKey(key) - pos := hash % keyN - - // Track current distance - var d int64 - for { - // Find offset of tag key. - offset := binary.BigEndian.Uint64(blk.hashData[TagKeyNSize+(pos*TagKeyOffsetSize):]) - if offset == 0 { - return false - } - - // Parse into element. - elem.unmarshal(blk.data[offset:], blk.data) - - // Return if keys match. - if bytes.Equal(elem.key, key) { - return true - } - - // Check if we've exceeded the probe distance. - if d > rhh.Dist(rhh.HashKey(elem.key), pos, keyN) { - return false - } - - // Move position forward. - pos = (pos + 1) % keyN - d++ - - if d > keyN { - return false - } - } -} - -// TagValueElem returns an element for a tag value. -func (blk *TagBlock) TagValueElem(key, value []byte) TagValueElem { - var valueElem TagBlockValueElem - if !blk.DecodeTagValueElem(key, value, &valueElem) { - return nil - } - return &valueElem -} - -// DecodeTagValueElem returns an element for a tag value. -func (blk *TagBlock) DecodeTagValueElem(key, value []byte, valueElem *TagBlockValueElem) bool { - // Find key element, exit if not found. - var keyElem TagBlockKeyElem - if !blk.DecodeTagKeyElem(key, &keyElem) { - return false - } - - // Slice hash index data. - hashData := keyElem.hashIndex.buf - - valueN := int64(binary.BigEndian.Uint64(hashData[:TagValueNSize])) - hash := rhh.HashKey(value) - pos := hash % valueN - - // Track current distance - var d int64 - for { - // Find offset of tag value. - offset := binary.BigEndian.Uint64(hashData[TagValueNSize+(pos*TagValueOffsetSize):]) - if offset == 0 { - return false - } - - // Parse into element. - valueElem.unmarshal(blk.data[offset:]) - - // Return if values match. - if bytes.Equal(valueElem.value, value) { - return true - } - - // Check if we've exceeded the probe distance. - max := rhh.Dist(rhh.HashKey(valueElem.value), pos, valueN) - if d > max { - return false - } - - // Move position forward. - pos = (pos + 1) % valueN - d++ - - if d > valueN { - return false - } - } -} - -// TagKeyIterator returns an iterator over all the keys in the block. -func (blk *TagBlock) TagKeyIterator() TagKeyIterator { - return &tagBlockKeyIterator{ - blk: blk, - keyData: blk.keyData, - } -} - -// tagBlockKeyIterator represents an iterator over all keys in a TagBlock. -type tagBlockKeyIterator struct { - blk *TagBlock - keyData []byte - e TagBlockKeyElem -} - -// Next returns the next element in the iterator. -func (itr *tagBlockKeyIterator) Next() TagKeyElem { - // Exit when there is no data left. - if len(itr.keyData) == 0 { - return nil - } - - // Unmarshal next element & move data forward. - itr.e.unmarshal(itr.keyData, itr.blk.data) - itr.keyData = itr.keyData[itr.e.size:] - - assert(len(itr.e.Key()) > 0, "invalid zero-length tag key") - return &itr.e -} - -// tagBlockValueIterator represents an iterator over all values for a tag key. -type tagBlockValueIterator struct { - data []byte - e TagBlockValueElem -} - -// Next returns the next element in the iterator. -func (itr *tagBlockValueIterator) Next() TagValueElem { - // Exit when there is no data left. - if len(itr.data) == 0 { - return nil - } - - // Unmarshal next element & move data forward. - itr.e.unmarshal(itr.data) - itr.data = itr.data[itr.e.size:] - - assert(len(itr.e.Value()) > 0, "invalid zero-length tag value") - return &itr.e -} - -// TagBlockKeyElem represents a tag key element in a TagBlock. -type TagBlockKeyElem struct { - flag byte - key []byte - - // Value data - data struct { - offset uint64 - size uint64 - buf []byte - } - - // Value hash index data - hashIndex struct { - offset uint64 - size uint64 - buf []byte - } - - size int -} - -// Deleted returns true if the key has been tombstoned. -func (e *TagBlockKeyElem) Deleted() bool { return (e.flag & TagKeyTombstoneFlag) != 0 } - -// Key returns the key name of the element. -func (e *TagBlockKeyElem) Key() []byte { return e.key } - -// TagValueIterator returns an iterator over the key's values. -func (e *TagBlockKeyElem) TagValueIterator() TagValueIterator { - return &tagBlockValueIterator{data: e.data.buf} -} - -// unmarshal unmarshals buf into e. -// The data argument represents the entire block data. -func (e *TagBlockKeyElem) unmarshal(buf, data []byte) { - start := len(buf) - - // Parse flag data. - e.flag, buf = buf[0], buf[1:] - - // Parse data offset/size. - e.data.offset, buf = binary.BigEndian.Uint64(buf), buf[8:] - e.data.size, buf = binary.BigEndian.Uint64(buf), buf[8:] - - // Slice data. - e.data.buf = data[e.data.offset:] - e.data.buf = e.data.buf[:e.data.size] - - // Parse hash index offset/size. - e.hashIndex.offset, buf = binary.BigEndian.Uint64(buf), buf[8:] - e.hashIndex.size, buf = binary.BigEndian.Uint64(buf), buf[8:] - - // Slice hash index data. - e.hashIndex.buf = data[e.hashIndex.offset:] - e.hashIndex.buf = e.hashIndex.buf[:e.hashIndex.size] - - // Parse key. - n, sz := binary.Uvarint(buf) - e.key, buf = buf[sz:sz+int(n)], buf[int(n)+sz:] - - // Save length of elem. - e.size = start - len(buf) -} - -// TagBlockValueElem represents a tag value element. -type TagBlockValueElem struct { - flag byte - value []byte - - // Legacy uvarint-encoded series data. - // Mutually exclusive with seriesIDSetData field. - series struct { - n uint64 // Series count - data []byte // Raw series data - } - - // Roaring bitmap encoded series data. - // Mutually exclusive with series.data field. - seriesIDSetData []byte - - size int -} - -// Deleted returns true if the element has been tombstoned. -func (e *TagBlockValueElem) Deleted() bool { return (e.flag & TagValueTombstoneFlag) != 0 } - -// Value returns the value for the element. -func (e *TagBlockValueElem) Value() []byte { return e.value } - -// SeriesN returns the series count. -func (e *TagBlockValueElem) SeriesN() uint64 { return e.series.n } - -// SeriesData returns the raw series data. -func (e *TagBlockValueElem) SeriesData() []byte { return e.series.data } - -// SeriesID returns series ID at an index. -func (e *TagBlockValueElem) SeriesID(i int) uint64 { - return binary.BigEndian.Uint64(e.series.data[i*SeriesIDSize:]) -} - -// SeriesIDs returns a list decoded series ids. -func (e *TagBlockValueElem) SeriesIDs() ([]uint64, error) { - if e.seriesIDSetData != nil { - ss, err := e.SeriesIDSet() - if err != nil { - return nil, err - } - return ss.Slice(), nil - } - - a := make([]uint64, 0, e.series.n) - var prev uint64 - for data := e.series.data; len(data) > 0; { - delta, n, err := uvarint(data) - if err != nil { - return nil, err - } - data = data[n:] - - seriesID := prev + uint64(delta) - a = append(a, seriesID) - prev = seriesID - } - return a, nil -} - -// SeriesIDSet returns a set of series ids. -func (e *TagBlockValueElem) SeriesIDSet() (*tsdb.SeriesIDSet, error) { - ss := tsdb.NewSeriesIDSet() - - // Read bitmap data directly from mmap, if available. - if e.seriesIDSetData != nil { - if err := ss.UnmarshalBinaryUnsafe(e.seriesIDSetData); err != nil { - return nil, err - } - return ss, nil - } - - // Otherwise decode series ids from uvarint encoding. - var prev uint64 - for data := e.series.data; len(data) > 0; { - delta, n, err := uvarint(data) - if err != nil { - return nil, err - } - data = data[n:] - - seriesID := prev + uint64(delta) - ss.AddNoLock(seriesID) - prev = seriesID - } - return ss, nil -} - -// Size returns the size of the element. -func (e *TagBlockValueElem) Size() int { return e.size } - -// unmarshal unmarshals buf into e. -func (e *TagBlockValueElem) unmarshal(buf []byte) { - start := len(buf) - - // Parse flag data. - e.flag, buf = buf[0], buf[1:] - - // Parse value. - sz, n := binary.Uvarint(buf) - e.value, buf = buf[n:n+int(sz)], buf[n+int(sz):] - - // Parse series count. - v, n := binary.Uvarint(buf) - e.series.n = uint64(v) - buf = buf[n:] - - // Parse data block size. - sz, n = binary.Uvarint(buf) - buf = buf[n:] - - // Parse series data (original uvarint encoded or roaring bitmap). - if e.flag&TagValueSeriesIDSetFlag == 0 { - e.series.data, buf = buf[:sz], buf[sz:] - } else { - // buf = memalign(buf) - e.seriesIDSetData, buf = buf, buf[sz:] - } - - // Save length of elem. - e.size = start - len(buf) -} - -// TagBlockTrailerSize is the total size of the on-disk trailer. -const TagBlockTrailerSize = 0 + - 8 + 8 + // value data offset/size - 8 + 8 + // key data offset/size - 8 + 8 + // hash index offset/size - 8 + // size - 2 // version - -// TagBlockTrailer represents meta data at the end of a TagBlock. -type TagBlockTrailer struct { - Version int // Encoding version - Size int64 // Total size w/ trailer - - // Offset & size of value data section. - ValueData struct { - Offset int64 - Size int64 - } - - // Offset & size of key data section. - KeyData struct { - Offset int64 - Size int64 - } - - // Offset & size of hash map section. - HashIndex struct { - Offset int64 - Size int64 - } -} - -// WriteTo writes the trailer to w. -func (t *TagBlockTrailer) WriteTo(w io.Writer) (n int64, err error) { - // Write data info. - if err := writeUint64To(w, uint64(t.ValueData.Offset), &n); err != nil { - return n, err - } else if err := writeUint64To(w, uint64(t.ValueData.Size), &n); err != nil { - return n, err - } - - // Write key data info. - if err := writeUint64To(w, uint64(t.KeyData.Offset), &n); err != nil { - return n, err - } else if err := writeUint64To(w, uint64(t.KeyData.Size), &n); err != nil { - return n, err - } - - // Write hash index info. - if err := writeUint64To(w, uint64(t.HashIndex.Offset), &n); err != nil { - return n, err - } else if err := writeUint64To(w, uint64(t.HashIndex.Size), &n); err != nil { - return n, err - } - - // Write total size & encoding version. - if err := writeUint64To(w, uint64(t.Size), &n); err != nil { - return n, err - } else if err := writeUint16To(w, IndexFileVersion, &n); err != nil { - return n, err - } - - return n, nil -} - -// ReadTagBlockTrailer returns the tag block trailer from data. -func ReadTagBlockTrailer(data []byte) (TagBlockTrailer, error) { - var t TagBlockTrailer - - // Read version. - t.Version = int(binary.BigEndian.Uint16(data[len(data)-2:])) - if t.Version != TagBlockVersion { - return t, ErrUnsupportedTagBlockVersion - } - - // Slice trailer data. - buf := data[len(data)-TagBlockTrailerSize:] - - // Read data section info. - t.ValueData.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - t.ValueData.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - - // Read key section info. - t.KeyData.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - t.KeyData.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - - // Read hash section info. - t.HashIndex.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - t.HashIndex.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - - // Read total size. - t.Size = int64(binary.BigEndian.Uint64(buf[0:8])) - - return t, nil -} - -// TagBlockEncoder encodes a tags to a TagBlock section. -type TagBlockEncoder struct { - w io.Writer - buf bytes.Buffer - - // Track value offsets. - offsets *rhh.HashMap - - // Track bytes written, sections. - n int64 - trailer TagBlockTrailer - - // Track tag keys. - keys []tagKeyEncodeEntry - prevValue []byte -} - -// NewTagBlockEncoder returns a new TagBlockEncoder. -func NewTagBlockEncoder(w io.Writer) *TagBlockEncoder { - return &TagBlockEncoder{ - w: w, - offsets: rhh.NewHashMap(rhh.Options{LoadFactor: LoadFactor}), - trailer: TagBlockTrailer{ - Version: TagBlockVersion, - }, - } -} - -// N returns the number of bytes written. -func (enc *TagBlockEncoder) N() int64 { return enc.n } - -// EncodeKey writes a tag key to the underlying writer. -func (enc *TagBlockEncoder) EncodeKey(key []byte, deleted bool) error { - // An initial empty byte must be written. - if err := enc.ensureHeaderWritten(); err != nil { - return err - } - - // Verify key is lexicographically after previous key. - if len(enc.keys) > 0 { - prev := enc.keys[len(enc.keys)-1].key - if cmp := bytes.Compare(prev, key); cmp == 1 { - return fmt.Errorf("tag key out of order: prev=%s, new=%s", prev, key) - } else if cmp == 0 { - return fmt.Errorf("tag key already encoded: %s", key) - } - } - - // Flush values section for key. - if err := enc.flushValueHashIndex(); err != nil { - return err - } - - // Append key on to the end of the key list. - entry := tagKeyEncodeEntry{ - key: key, - deleted: deleted, - } - entry.data.offset = enc.n - - enc.keys = append(enc.keys, entry) - - // Clear previous value. - enc.prevValue = nil - - return nil -} - -// EncodeValue writes a tag value to the underlying writer. -// The tag key must be lexicographical sorted after the previous encoded tag key. -func (enc *TagBlockEncoder) EncodeValue(value []byte, deleted bool, ss *tsdb.SeriesIDSet) error { - if len(enc.keys) == 0 { - return fmt.Errorf("tag key must be encoded before encoding values") - } else if len(value) == 0 { - return fmt.Errorf("zero length tag value not allowed") - } - - // Validate that keys are in-order. - if cmp := bytes.Compare(enc.prevValue, value); cmp == 1 { - return fmt.Errorf("tag value out of order: prev=%s, new=%s", enc.prevValue, value) - } else if cmp == 0 { - return fmt.Errorf("tag value already encoded: %s", value) - } - - // Save offset to hash map. - enc.offsets.Put(value, enc.n) - - // Write flag. - if err := writeUint8To(enc.w, encodeTagValueFlag(deleted), &enc.n); err != nil { - return err - } - - // Write value. - if err := writeUvarintTo(enc.w, uint64(len(value)), &enc.n); err != nil { - return err - } else if err := writeTo(enc.w, value, &enc.n); err != nil { - return err - } - - // Build series data in buffer. - enc.buf.Reset() - if _, err := ss.WriteTo(&enc.buf); err != nil { - return err - } - - // Write series count. - if err := writeUvarintTo(enc.w, uint64(ss.Cardinality()), &enc.n); err != nil { - return err - } - - // Write data size & buffer. - if err := writeUvarintTo(enc.w, uint64(enc.buf.Len()), &enc.n); err != nil { - return err - } - - // Word align bitmap data. - // if offset := (enc.n) % 8; offset != 0 { - // if err := writeTo(enc.w, make([]byte, 8-offset), &enc.n); err != nil { - // return err - // } - // } - - nn, err := enc.buf.WriteTo(enc.w) - if enc.n += nn; err != nil { - return err - } - - // Save previous value. - enc.prevValue = value - - return nil -} - -// Close flushes the trailer of the encoder to the writer. -func (enc *TagBlockEncoder) Close() error { - // Flush last value set. - if err := enc.ensureHeaderWritten(); err != nil { - return err - } else if err := enc.flushValueHashIndex(); err != nil { - return err - } - - // Save ending position of entire data block. - enc.trailer.ValueData.Size = enc.n - enc.trailer.ValueData.Offset - - // Write key block to point to value blocks. - if err := enc.encodeTagKeyBlock(); err != nil { - return err - } - - // Compute total size w/ trailer. - enc.trailer.Size = enc.n + TagBlockTrailerSize - - // Write trailer. - nn, err := enc.trailer.WriteTo(enc.w) - enc.n += nn - return err -} - -// ensureHeaderWritten writes a single byte to offset the rest of the block. -func (enc *TagBlockEncoder) ensureHeaderWritten() error { - if enc.n > 0 { - return nil - } else if _, err := enc.w.Write([]byte{0}); err != nil { - return err - } - - enc.n++ - enc.trailer.ValueData.Offset = enc.n - - return nil -} - -// flushValueHashIndex builds writes the hash map at the end of a value set. -func (enc *TagBlockEncoder) flushValueHashIndex() error { - // Ignore if no keys have been written. - if len(enc.keys) == 0 { - return nil - } - key := &enc.keys[len(enc.keys)-1] - - // Save size of data section. - key.data.size = enc.n - key.data.offset - - // Encode hash map length. - key.hashIndex.offset = enc.n - if err := writeUint64To(enc.w, uint64(enc.offsets.Cap()), &enc.n); err != nil { - return err - } - - // Encode hash map offset entries. - for i := int64(0); i < enc.offsets.Cap(); i++ { - _, v := enc.offsets.Elem(i) - offset, _ := v.(int64) - if err := writeUint64To(enc.w, uint64(offset), &enc.n); err != nil { - return err - } - } - key.hashIndex.size = enc.n - key.hashIndex.offset - - // Clear offsets. - enc.offsets = rhh.NewHashMap(rhh.Options{LoadFactor: LoadFactor}) - - return nil -} - -// encodeTagKeyBlock encodes the keys section to the writer. -func (enc *TagBlockEncoder) encodeTagKeyBlock() error { - offsets := rhh.NewHashMap(rhh.Options{Capacity: int64(len(enc.keys)), LoadFactor: LoadFactor}) - - // Encode key list in sorted order. - enc.trailer.KeyData.Offset = enc.n - for i := range enc.keys { - entry := &enc.keys[i] - - // Save current offset so we can use it in the hash index. - offsets.Put(entry.key, enc.n) - - if err := writeUint8To(enc.w, encodeTagKeyFlag(entry.deleted), &enc.n); err != nil { - return err - } - - // Write value data offset & size. - if err := writeUint64To(enc.w, uint64(entry.data.offset), &enc.n); err != nil { - return err - } else if err := writeUint64To(enc.w, uint64(entry.data.size), &enc.n); err != nil { - return err - } - - // Write value hash index offset & size. - if err := writeUint64To(enc.w, uint64(entry.hashIndex.offset), &enc.n); err != nil { - return err - } else if err := writeUint64To(enc.w, uint64(entry.hashIndex.size), &enc.n); err != nil { - return err - } - - // Write key length and data. - if err := writeUvarintTo(enc.w, uint64(len(entry.key)), &enc.n); err != nil { - return err - } else if err := writeTo(enc.w, entry.key, &enc.n); err != nil { - return err - } - } - enc.trailer.KeyData.Size = enc.n - enc.trailer.KeyData.Offset - - // Encode hash map length. - enc.trailer.HashIndex.Offset = enc.n - if err := writeUint64To(enc.w, uint64(offsets.Cap()), &enc.n); err != nil { - return err - } - - // Encode hash map offset entries. - for i := int64(0); i < offsets.Cap(); i++ { - _, v := offsets.Elem(i) - offset, _ := v.(int64) - if err := writeUint64To(enc.w, uint64(offset), &enc.n); err != nil { - return err - } - } - enc.trailer.HashIndex.Size = enc.n - enc.trailer.HashIndex.Offset - - return nil -} - -type tagKeyEncodeEntry struct { - key []byte - deleted bool - - data struct { - offset int64 - size int64 - } - hashIndex struct { - offset int64 - size int64 - } -} - -func encodeTagKeyFlag(deleted bool) byte { - var flag byte - if deleted { - flag |= TagKeyTombstoneFlag - } - return flag -} - -func encodeTagValueFlag(deleted bool) byte { - flag := byte(TagValueSeriesIDSetFlag) - if deleted { - flag |= TagValueTombstoneFlag - } - return flag -} diff --git a/tsdb/index/tsi1/tag_block_test.go b/tsdb/index/tsi1/tag_block_test.go deleted file mode 100644 index 2cdca4b3b3f..00000000000 --- a/tsdb/index/tsi1/tag_block_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package tsi1_test - -import ( - "bytes" - "fmt" - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" -) - -// Ensure tag blocks can be written and opened. -func TestTagBlockWriter(t *testing.T) { - // Write 3 series to writer. - var buf bytes.Buffer - enc := tsi1.NewTagBlockEncoder(&buf) - - if err := enc.EncodeKey([]byte("host"), false); err != nil { - t.Fatal(err) - } else if err := enc.EncodeValue([]byte("server0"), false, tsdb.NewSeriesIDSet(1)); err != nil { - t.Fatal(err) - } else if err := enc.EncodeValue([]byte("server1"), false, tsdb.NewSeriesIDSet(2)); err != nil { - t.Fatal(err) - } else if err := enc.EncodeValue([]byte("server2"), false, tsdb.NewSeriesIDSet(3)); err != nil { - t.Fatal(err) - } - - if err := enc.EncodeKey([]byte("region"), false); err != nil { - t.Fatal(err) - } else if err := enc.EncodeValue([]byte("us-east"), false, tsdb.NewSeriesIDSet(1, 2)); err != nil { - t.Fatal(err) - } else if err := enc.EncodeValue([]byte("us-west"), false, tsdb.NewSeriesIDSet(3)); err != nil { - t.Fatal(err) - } - - // Flush encoder. - if err := enc.Close(); err != nil { - t.Fatal(err) - } else if int(enc.N()) != buf.Len() { - t.Fatalf("bytes written mismatch: %d, expected %d", enc.N(), buf.Len()) - } - - // Unmarshal into a block. - var blk tsi1.TagBlock - if err := blk.UnmarshalBinary(buf.Bytes()); err != nil { - t.Fatal(err) - } - - // Verify data. - if e := blk.TagValueElem([]byte("region"), []byte("us-east")); e == nil { - t.Fatal("expected element") - } else if a, err := e.(*tsi1.TagBlockValueElem).SeriesIDs(); err != nil { - t.Fatalf("unexpected error: %v", err) - } else if !reflect.DeepEqual(a, []uint64{1, 2}) { - t.Fatalf("unexpected series ids: %#v", a) - } - - if e := blk.TagValueElem([]byte("region"), []byte("us-west")); e == nil { - t.Fatal("expected element") - } else if a, err := e.(*tsi1.TagBlockValueElem).SeriesIDs(); err != nil { - t.Fatalf("unexpected error: %v", err) - } else if !reflect.DeepEqual(a, []uint64{3}) { - t.Fatalf("unexpected series ids: %#v", a) - } - if e := blk.TagValueElem([]byte("host"), []byte("server0")); e == nil { - t.Fatal("expected element") - } else if a, err := e.(*tsi1.TagBlockValueElem).SeriesIDs(); err != nil { - t.Fatalf("unexpected error: %v", err) - } else if !reflect.DeepEqual(a, []uint64{1}) { - t.Fatalf("unexpected series ids: %#v", a) - } - if e := blk.TagValueElem([]byte("host"), []byte("server1")); e == nil { - t.Fatal("expected element") - } else if a, err := e.(*tsi1.TagBlockValueElem).SeriesIDs(); err != nil { - t.Fatalf("unexpected error: %v", err) - } else if !reflect.DeepEqual(a, []uint64{2}) { - t.Fatalf("unexpected series ids: %#v", a) - } - if e := blk.TagValueElem([]byte("host"), []byte("server2")); e == nil { - t.Fatal("expected element") - } else if a, err := e.(*tsi1.TagBlockValueElem).SeriesIDs(); err != nil { - t.Fatalf("unexpected error: %v", err) - } else if !reflect.DeepEqual(a, []uint64{3}) { - t.Fatalf("unexpected series ids: %#v", a) - } -} - -var benchmarkTagBlock10x1000 *tsi1.TagBlock -var benchmarkTagBlock100x1000 *tsi1.TagBlock -var benchmarkTagBlock1000x1000 *tsi1.TagBlock -var benchmarkTagBlock1x1000000 *tsi1.TagBlock - -func BenchmarkTagBlock_SeriesN_10_1000(b *testing.B) { - benchmarkTagBlock_SeriesN(b, 10, 1000, &benchmarkTagBlock10x1000) -} -func BenchmarkTagBlock_SeriesN_100_1000(b *testing.B) { - benchmarkTagBlock_SeriesN(b, 100, 1000, &benchmarkTagBlock100x1000) -} -func BenchmarkTagBlock_SeriesN_1000_1000(b *testing.B) { - benchmarkTagBlock_SeriesN(b, 1000, 1000, &benchmarkTagBlock1000x1000) -} -func BenchmarkTagBlock_SeriesN_1_1000000(b *testing.B) { - benchmarkTagBlock_SeriesN(b, 1, 1000000, &benchmarkTagBlock1x1000000) -} - -func benchmarkTagBlock_SeriesN(b *testing.B, tagN, valueN int, blk **tsi1.TagBlock) { - if (*blk) == nil { - var buf bytes.Buffer - enc := tsi1.NewTagBlockEncoder(&buf) - - // Write block. - for i := 0; i < tagN; i++ { - if err := enc.EncodeKey([]byte(fmt.Sprintf("%08d", i)), false); err != nil { - b.Fatal(err) - } - - for j := 0; j < valueN; j++ { - if err := enc.EncodeValue([]byte(fmt.Sprintf("%08d", j)), false, tsdb.NewSeriesIDSet(1)); err != nil { - b.Fatal(err) - } - } - } - - // Flush encoder. - if err := enc.Close(); err != nil { - b.Fatal(err) - } - b.Log("size", buf.Len()) - - // Unmarshal into a block. - *blk = &tsi1.TagBlock{} - if err := (*blk).UnmarshalBinary(buf.Bytes()); err != nil { - b.Fatal(err) - } - } - - // Benchmark lookups. - b.ReportAllocs() - b.ResetTimer() - - key, value := []byte("0"), []byte("0") - for i := 0; i < b.N; i++ { - if e := (*blk).TagValueElem(key, value); e == nil { - b.Fatal("expected element") - } else if n := e.(*tsi1.TagBlockValueElem).SeriesN(); n != 1 { - b.Fatalf("unexpected series count: %d", n) - } - } -} diff --git a/tsdb/index/tsi1/testdata/index-file-index/0/L0-00000002.tsl b/tsdb/index/tsi1/testdata/index-file-index/0/L0-00000002.tsl deleted file mode 100644 index 4b82160a0aa..00000000000 Binary files a/tsdb/index/tsi1/testdata/index-file-index/0/L0-00000002.tsl and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/index-file-index/0/L1-00000001.tsi b/tsdb/index/tsi1/testdata/index-file-index/0/L1-00000001.tsi deleted file mode 100644 index 1a01587d03b..00000000000 Binary files a/tsdb/index/tsi1/testdata/index-file-index/0/L1-00000001.tsi and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/index-file-index/0/MANIFEST b/tsdb/index/tsi1/testdata/index-file-index/0/MANIFEST deleted file mode 100644 index a259b469af6..00000000000 --- a/tsdb/index/tsi1/testdata/index-file-index/0/MANIFEST +++ /dev/null @@ -1,38 +0,0 @@ -{ - "levels": [ - {}, - { - "m": 33554432, - "k": 6 - }, - { - "m": 33554432, - "k": 6 - }, - { - "m": 67108864, - "k": 6 - }, - { - "m": 134217728, - "k": 6 - }, - { - "m": 268435456, - "k": 6 - }, - { - "m": 536870912, - "k": 6 - }, - { - "m": 1073741824, - "k": 6 - } - ], - "files": [ - "L0-00000002.tsl", - "L1-00000001.tsi" - ], - "version": 1 -} diff --git a/tsdb/index/tsi1/testdata/index-file-index/1/L0-00000002.tsl b/tsdb/index/tsi1/testdata/index-file-index/1/L0-00000002.tsl deleted file mode 100644 index 5e268f7508b..00000000000 Binary files a/tsdb/index/tsi1/testdata/index-file-index/1/L0-00000002.tsl and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/index-file-index/1/L1-00000001.tsi b/tsdb/index/tsi1/testdata/index-file-index/1/L1-00000001.tsi deleted file mode 100644 index f0bc9c13473..00000000000 Binary files a/tsdb/index/tsi1/testdata/index-file-index/1/L1-00000001.tsi and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/index-file-index/1/MANIFEST b/tsdb/index/tsi1/testdata/index-file-index/1/MANIFEST deleted file mode 100644 index a259b469af6..00000000000 --- a/tsdb/index/tsi1/testdata/index-file-index/1/MANIFEST +++ /dev/null @@ -1,38 +0,0 @@ -{ - "levels": [ - {}, - { - "m": 33554432, - "k": 6 - }, - { - "m": 33554432, - "k": 6 - }, - { - "m": 67108864, - "k": 6 - }, - { - "m": 134217728, - "k": 6 - }, - { - "m": 268435456, - "k": 6 - }, - { - "m": 536870912, - "k": 6 - }, - { - "m": 1073741824, - "k": 6 - } - ], - "files": [ - "L0-00000002.tsl", - "L1-00000001.tsi" - ], - "version": 1 -} diff --git a/tsdb/index/tsi1/testdata/index-file-index/2/L0-00000002.tsl b/tsdb/index/tsi1/testdata/index-file-index/2/L0-00000002.tsl deleted file mode 100644 index 180ec01ade1..00000000000 Binary files a/tsdb/index/tsi1/testdata/index-file-index/2/L0-00000002.tsl and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/index-file-index/2/L1-00000001.tsi b/tsdb/index/tsi1/testdata/index-file-index/2/L1-00000001.tsi deleted file mode 100644 index 27f40bdd8b5..00000000000 Binary files a/tsdb/index/tsi1/testdata/index-file-index/2/L1-00000001.tsi and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/index-file-index/2/MANIFEST b/tsdb/index/tsi1/testdata/index-file-index/2/MANIFEST deleted file mode 100644 index a259b469af6..00000000000 --- a/tsdb/index/tsi1/testdata/index-file-index/2/MANIFEST +++ /dev/null @@ -1,38 +0,0 @@ -{ - "levels": [ - {}, - { - "m": 33554432, - "k": 6 - }, - { - "m": 33554432, - "k": 6 - }, - { - "m": 67108864, - "k": 6 - }, - { - "m": 134217728, - "k": 6 - }, - { - "m": 268435456, - "k": 6 - }, - { - "m": 536870912, - "k": 6 - }, - { - "m": 1073741824, - "k": 6 - } - ], - "files": [ - "L0-00000002.tsl", - "L1-00000001.tsi" - ], - "version": 1 -} diff --git a/tsdb/index/tsi1/testdata/index-file-index/3/L0-00000002.tsl b/tsdb/index/tsi1/testdata/index-file-index/3/L0-00000002.tsl deleted file mode 100644 index a8aa9b37a0d..00000000000 Binary files a/tsdb/index/tsi1/testdata/index-file-index/3/L0-00000002.tsl and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/index-file-index/3/L1-00000001.tsi b/tsdb/index/tsi1/testdata/index-file-index/3/L1-00000001.tsi deleted file mode 100644 index 6f065bd548a..00000000000 Binary files a/tsdb/index/tsi1/testdata/index-file-index/3/L1-00000001.tsi and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/index-file-index/3/MANIFEST b/tsdb/index/tsi1/testdata/index-file-index/3/MANIFEST deleted file mode 100644 index a259b469af6..00000000000 --- a/tsdb/index/tsi1/testdata/index-file-index/3/MANIFEST +++ /dev/null @@ -1,38 +0,0 @@ -{ - "levels": [ - {}, - { - "m": 33554432, - "k": 6 - }, - { - "m": 33554432, - "k": 6 - }, - { - "m": 67108864, - "k": 6 - }, - { - "m": 134217728, - "k": 6 - }, - { - "m": 268435456, - "k": 6 - }, - { - "m": 536870912, - "k": 6 - }, - { - "m": 1073741824, - "k": 6 - } - ], - "files": [ - "L0-00000002.tsl", - "L1-00000001.tsi" - ], - "version": 1 -} diff --git a/tsdb/index/tsi1/testdata/index-file-index/4/L0-00000002.tsl b/tsdb/index/tsi1/testdata/index-file-index/4/L0-00000002.tsl deleted file mode 100644 index 30785280942..00000000000 Binary files a/tsdb/index/tsi1/testdata/index-file-index/4/L0-00000002.tsl and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/index-file-index/4/L1-00000001.tsi b/tsdb/index/tsi1/testdata/index-file-index/4/L1-00000001.tsi deleted file mode 100644 index 44b4042dbd5..00000000000 Binary files a/tsdb/index/tsi1/testdata/index-file-index/4/L1-00000001.tsi and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/index-file-index/4/MANIFEST b/tsdb/index/tsi1/testdata/index-file-index/4/MANIFEST deleted file mode 100644 index a259b469af6..00000000000 --- a/tsdb/index/tsi1/testdata/index-file-index/4/MANIFEST +++ /dev/null @@ -1,38 +0,0 @@ -{ - "levels": [ - {}, - { - "m": 33554432, - "k": 6 - }, - { - "m": 33554432, - "k": 6 - }, - { - "m": 67108864, - "k": 6 - }, - { - "m": 134217728, - "k": 6 - }, - { - "m": 268435456, - "k": 6 - }, - { - "m": 536870912, - "k": 6 - }, - { - "m": 1073741824, - "k": 6 - } - ], - "files": [ - "L0-00000002.tsl", - "L1-00000001.tsi" - ], - "version": 1 -} diff --git a/tsdb/index/tsi1/testdata/index-file-index/5/L0-00000002.tsl b/tsdb/index/tsi1/testdata/index-file-index/5/L0-00000002.tsl deleted file mode 100644 index b6e817a4b87..00000000000 Binary files a/tsdb/index/tsi1/testdata/index-file-index/5/L0-00000002.tsl and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/index-file-index/5/L1-00000001.tsi b/tsdb/index/tsi1/testdata/index-file-index/5/L1-00000001.tsi deleted file mode 100644 index c585527273a..00000000000 Binary files a/tsdb/index/tsi1/testdata/index-file-index/5/L1-00000001.tsi and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/index-file-index/5/MANIFEST b/tsdb/index/tsi1/testdata/index-file-index/5/MANIFEST deleted file mode 100644 index a259b469af6..00000000000 --- a/tsdb/index/tsi1/testdata/index-file-index/5/MANIFEST +++ /dev/null @@ -1,38 +0,0 @@ -{ - "levels": [ - {}, - { - "m": 33554432, - "k": 6 - }, - { - "m": 33554432, - "k": 6 - }, - { - "m": 67108864, - "k": 6 - }, - { - "m": 134217728, - "k": 6 - }, - { - "m": 268435456, - "k": 6 - }, - { - "m": 536870912, - "k": 6 - }, - { - "m": 1073741824, - "k": 6 - } - ], - "files": [ - "L0-00000002.tsl", - "L1-00000001.tsi" - ], - "version": 1 -} diff --git a/tsdb/index/tsi1/testdata/index-file-index/6/L0-00000002.tsl b/tsdb/index/tsi1/testdata/index-file-index/6/L0-00000002.tsl deleted file mode 100644 index 4564b90b211..00000000000 Binary files a/tsdb/index/tsi1/testdata/index-file-index/6/L0-00000002.tsl and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/index-file-index/6/L1-00000001.tsi b/tsdb/index/tsi1/testdata/index-file-index/6/L1-00000001.tsi deleted file mode 100644 index 6f44f0849b7..00000000000 Binary files a/tsdb/index/tsi1/testdata/index-file-index/6/L1-00000001.tsi and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/index-file-index/6/MANIFEST b/tsdb/index/tsi1/testdata/index-file-index/6/MANIFEST deleted file mode 100644 index a259b469af6..00000000000 --- a/tsdb/index/tsi1/testdata/index-file-index/6/MANIFEST +++ /dev/null @@ -1,38 +0,0 @@ -{ - "levels": [ - {}, - { - "m": 33554432, - "k": 6 - }, - { - "m": 33554432, - "k": 6 - }, - { - "m": 67108864, - "k": 6 - }, - { - "m": 134217728, - "k": 6 - }, - { - "m": 268435456, - "k": 6 - }, - { - "m": 536870912, - "k": 6 - }, - { - "m": 1073741824, - "k": 6 - } - ], - "files": [ - "L0-00000002.tsl", - "L1-00000001.tsi" - ], - "version": 1 -} diff --git a/tsdb/index/tsi1/testdata/index-file-index/7/L0-00000002.tsl b/tsdb/index/tsi1/testdata/index-file-index/7/L0-00000002.tsl deleted file mode 100644 index 10d7be91c22..00000000000 Binary files a/tsdb/index/tsi1/testdata/index-file-index/7/L0-00000002.tsl and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/index-file-index/7/L1-00000001.tsi b/tsdb/index/tsi1/testdata/index-file-index/7/L1-00000001.tsi deleted file mode 100644 index 36dd3da47aa..00000000000 Binary files a/tsdb/index/tsi1/testdata/index-file-index/7/L1-00000001.tsi and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/index-file-index/7/MANIFEST b/tsdb/index/tsi1/testdata/index-file-index/7/MANIFEST deleted file mode 100644 index a259b469af6..00000000000 --- a/tsdb/index/tsi1/testdata/index-file-index/7/MANIFEST +++ /dev/null @@ -1,38 +0,0 @@ -{ - "levels": [ - {}, - { - "m": 33554432, - "k": 6 - }, - { - "m": 33554432, - "k": 6 - }, - { - "m": 67108864, - "k": 6 - }, - { - "m": 134217728, - "k": 6 - }, - { - "m": 268435456, - "k": 6 - }, - { - "m": 536870912, - "k": 6 - }, - { - "m": 1073741824, - "k": 6 - } - ], - "files": [ - "L0-00000002.tsl", - "L1-00000001.tsi" - ], - "version": 1 -} diff --git a/tsdb/index/tsi1/testdata/line-protocol-1M.txt.gz b/tsdb/index/tsi1/testdata/line-protocol-1M.txt.gz deleted file mode 100644 index e862b24e193..00000000000 Binary files a/tsdb/index/tsi1/testdata/line-protocol-1M.txt.gz and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/uvarint/_series/00/0000 b/tsdb/index/tsi1/testdata/uvarint/_series/00/0000 deleted file mode 100644 index 4c8b99784a9..00000000000 Binary files a/tsdb/index/tsi1/testdata/uvarint/_series/00/0000 and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/uvarint/_series/01/0000 b/tsdb/index/tsi1/testdata/uvarint/_series/01/0000 deleted file mode 100644 index 6f8e3d77d2d..00000000000 Binary files a/tsdb/index/tsi1/testdata/uvarint/_series/01/0000 and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/uvarint/_series/02/0000 b/tsdb/index/tsi1/testdata/uvarint/_series/02/0000 deleted file mode 100644 index 4ca6881e190..00000000000 Binary files a/tsdb/index/tsi1/testdata/uvarint/_series/02/0000 and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/uvarint/_series/03/0000 b/tsdb/index/tsi1/testdata/uvarint/_series/03/0000 deleted file mode 100644 index 8292b86985d..00000000000 Binary files a/tsdb/index/tsi1/testdata/uvarint/_series/03/0000 and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/uvarint/_series/04/0000 b/tsdb/index/tsi1/testdata/uvarint/_series/04/0000 deleted file mode 100644 index 9887d95d1dd..00000000000 Binary files a/tsdb/index/tsi1/testdata/uvarint/_series/04/0000 and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/uvarint/_series/05/0000 b/tsdb/index/tsi1/testdata/uvarint/_series/05/0000 deleted file mode 100644 index 6a8bb911aea..00000000000 Binary files a/tsdb/index/tsi1/testdata/uvarint/_series/05/0000 and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/uvarint/_series/06/0000 b/tsdb/index/tsi1/testdata/uvarint/_series/06/0000 deleted file mode 100644 index 83d795c4d2f..00000000000 Binary files a/tsdb/index/tsi1/testdata/uvarint/_series/06/0000 and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/uvarint/_series/07/0000 b/tsdb/index/tsi1/testdata/uvarint/_series/07/0000 deleted file mode 100644 index 74d2a3a1d10..00000000000 Binary files a/tsdb/index/tsi1/testdata/uvarint/_series/07/0000 and /dev/null differ diff --git a/tsdb/index/tsi1/testdata/uvarint/index b/tsdb/index/tsi1/testdata/uvarint/index deleted file mode 100644 index 24f75f011d2..00000000000 Binary files a/tsdb/index/tsi1/testdata/uvarint/index and /dev/null differ diff --git a/tsdb/index/tsi1/tsi1.go b/tsdb/index/tsi1/tsi1.go deleted file mode 100644 index ba1a2fea8c3..00000000000 --- a/tsdb/index/tsi1/tsi1.go +++ /dev/null @@ -1,546 +0,0 @@ -package tsi1 - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - - "github.com/influxdata/influxdb/v2/tsdb" -) - -// LoadFactor is the fill percent for RHH indexes. -const LoadFactor = 80 - -// MeasurementElem represents a generic measurement element. -type MeasurementElem interface { - Name() []byte - Deleted() bool - // HasSeries() bool -} - -// MeasurementElems represents a list of MeasurementElem. -type MeasurementElems []MeasurementElem - -func (a MeasurementElems) Len() int { return len(a) } -func (a MeasurementElems) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a MeasurementElems) Less(i, j int) bool { return bytes.Compare(a[i].Name(), a[j].Name()) == -1 } - -// MeasurementIterator represents a iterator over a list of measurements. -type MeasurementIterator interface { - Next() MeasurementElem -} - -// MergeMeasurementIterators returns an iterator that merges a set of iterators. -// Iterators that are first in the list take precedence and a deletion by those -// early iterators will invalidate elements by later iterators. -func MergeMeasurementIterators(itrs ...MeasurementIterator) MeasurementIterator { - if len(itrs) == 0 { - return nil - } - - return &measurementMergeIterator{ - e: make(measurementMergeElem, 0, len(itrs)), - buf: make([]MeasurementElem, len(itrs)), - itrs: itrs, - } -} - -type measurementMergeIterator struct { - e measurementMergeElem - buf []MeasurementElem - itrs []MeasurementIterator -} - -// Next returns the element with the next lowest name across the iterators. -// -// If multiple iterators contain the same name then the first is returned -// and the remaining ones are skipped. -func (itr *measurementMergeIterator) Next() MeasurementElem { - // Find next lowest name amongst the buffers. - var name []byte - for i, buf := range itr.buf { - // Fill buffer if empty. - if buf == nil { - if buf = itr.itrs[i].Next(); buf != nil { - itr.buf[i] = buf - } else { - continue - } - } - - // Find next lowest name. - if name == nil || bytes.Compare(itr.buf[i].Name(), name) == -1 { - name = itr.buf[i].Name() - } - } - - // Return nil if no elements remaining. - if name == nil { - return nil - } - - // Merge all elements together and clear buffers. - itr.e = itr.e[:0] - for i, buf := range itr.buf { - if buf == nil || !bytes.Equal(buf.Name(), name) { - continue - } - itr.e = append(itr.e, buf) - itr.buf[i] = nil - } - return itr.e -} - -// measurementMergeElem represents a merged measurement element. -type measurementMergeElem []MeasurementElem - -// Name returns the name of the first element. -func (p measurementMergeElem) Name() []byte { - if len(p) == 0 { - return nil - } - return p[0].Name() -} - -// Deleted returns the deleted flag of the first element. -func (p measurementMergeElem) Deleted() bool { - if len(p) == 0 { - return false - } - return p[0].Deleted() -} - -// tsdbMeasurementIteratorAdapter wraps MeasurementIterator to match the TSDB interface. -// This is needed because TSDB doesn't have a concept of "deleted" measurements. -type tsdbMeasurementIteratorAdapter struct { - itr MeasurementIterator -} - -// NewTSDBMeasurementIteratorAdapter return an iterator which implements tsdb.MeasurementIterator. -func NewTSDBMeasurementIteratorAdapter(itr MeasurementIterator) tsdb.MeasurementIterator { - if itr == nil { - return nil - } - return &tsdbMeasurementIteratorAdapter{itr: itr} -} - -func (itr *tsdbMeasurementIteratorAdapter) Close() error { return nil } - -func (itr *tsdbMeasurementIteratorAdapter) Next() ([]byte, error) { - for { - e := itr.itr.Next() - if e == nil { - return nil, nil - } else if e.Deleted() { - continue - } - return e.Name(), nil - } -} - -// TagKeyElem represents a generic tag key element. -type TagKeyElem interface { - Key() []byte - Deleted() bool - TagValueIterator() TagValueIterator -} - -// TagKeyIterator represents a iterator over a list of tag keys. -type TagKeyIterator interface { - Next() TagKeyElem -} - -// tsdbTagKeyIteratorAdapter wraps TagKeyIterator to match the TSDB interface. -// This is needed because TSDB doesn't have a concept of "deleted" tag keys. -type tsdbTagKeyIteratorAdapter struct { - itr TagKeyIterator -} - -// NewTSDBTagKeyIteratorAdapter return an iterator which implements tsdb.TagKeyIterator. -func NewTSDBTagKeyIteratorAdapter(itr TagKeyIterator) tsdb.TagKeyIterator { - if itr == nil { - return nil - } - return &tsdbTagKeyIteratorAdapter{itr: itr} -} - -func (itr *tsdbTagKeyIteratorAdapter) Close() error { return nil } - -func (itr *tsdbTagKeyIteratorAdapter) Next() ([]byte, error) { - for { - e := itr.itr.Next() - if e == nil { - return nil, nil - } else if e.Deleted() { - continue - } - return e.Key(), nil - } -} - -// MergeTagKeyIterators returns an iterator that merges a set of iterators. -// Iterators that are first in the list take precedence and a deletion by those -// early iterators will invalidate elements by later iterators. -func MergeTagKeyIterators(itrs ...TagKeyIterator) TagKeyIterator { - if len(itrs) == 0 { - return nil - } - - return &tagKeyMergeIterator{ - e: make(tagKeyMergeElem, 0, len(itrs)), - buf: make([]TagKeyElem, len(itrs)), - itrs: itrs, - } -} - -type tagKeyMergeIterator struct { - e tagKeyMergeElem - buf []TagKeyElem - itrs []TagKeyIterator -} - -// Next returns the element with the next lowest key across the iterators. -// -// If multiple iterators contain the same key then the first is returned -// and the remaining ones are skipped. -func (itr *tagKeyMergeIterator) Next() TagKeyElem { - // Find next lowest key amongst the buffers. - var key []byte - for i, buf := range itr.buf { - // Fill buffer. - if buf == nil { - if buf = itr.itrs[i].Next(); buf != nil { - itr.buf[i] = buf - } else { - continue - } - } - - // Find next lowest key. - if key == nil || bytes.Compare(buf.Key(), key) == -1 { - key = buf.Key() - } - } - - // Return nil if no elements remaining. - if key == nil { - return nil - } - - // Merge elements together & clear buffer. - itr.e = itr.e[:0] - for i, buf := range itr.buf { - if buf == nil || !bytes.Equal(buf.Key(), key) { - continue - } - itr.e = append(itr.e, buf) - itr.buf[i] = nil - } - - return itr.e -} - -// tagKeyMergeElem represents a merged tag key element. -type tagKeyMergeElem []TagKeyElem - -// Key returns the key of the first element. -func (p tagKeyMergeElem) Key() []byte { - if len(p) == 0 { - return nil - } - return p[0].Key() -} - -// Deleted returns the deleted flag of the first element. -func (p tagKeyMergeElem) Deleted() bool { - if len(p) == 0 { - return false - } - return p[0].Deleted() -} - -// TagValueIterator returns a merge iterator for all elements until a tombstone occurs. -func (p tagKeyMergeElem) TagValueIterator() TagValueIterator { - if len(p) == 0 { - return nil - } - - a := make([]TagValueIterator, 0, len(p)) - for _, e := range p { - itr := e.TagValueIterator() - - a = append(a, itr) - if e.Deleted() { - break - } - } - return MergeTagValueIterators(a...) -} - -// TagValueElem represents a generic tag value element. -type TagValueElem interface { - Value() []byte - Deleted() bool -} - -// TagValueIterator represents a iterator over a list of tag values. -type TagValueIterator interface { - Next() TagValueElem -} - -// tsdbTagValueIteratorAdapter wraps TagValueIterator to match the TSDB interface. -// This is needed because TSDB doesn't have a concept of "deleted" tag values. -type tsdbTagValueIteratorAdapter struct { - itr TagValueIterator -} - -// NewTSDBTagValueIteratorAdapter return an iterator which implements tsdb.TagValueIterator. -func NewTSDBTagValueIteratorAdapter(itr TagValueIterator) tsdb.TagValueIterator { - if itr == nil { - return nil - } - return &tsdbTagValueIteratorAdapter{itr: itr} -} - -func (itr *tsdbTagValueIteratorAdapter) Close() error { return nil } - -func (itr *tsdbTagValueIteratorAdapter) Next() ([]byte, error) { - for { - e := itr.itr.Next() - if e == nil { - return nil, nil - } else if e.Deleted() { - continue - } - return e.Value(), nil - } -} - -// MergeTagValueIterators returns an iterator that merges a set of iterators. -// Iterators that are first in the list take precedence and a deletion by those -// early iterators will invalidate elements by later iterators. -func MergeTagValueIterators(itrs ...TagValueIterator) TagValueIterator { - if len(itrs) == 0 { - return nil - } - - return &tagValueMergeIterator{ - e: make(tagValueMergeElem, 0, len(itrs)), - buf: make([]TagValueElem, len(itrs)), - itrs: itrs, - } -} - -type tagValueMergeIterator struct { - e tagValueMergeElem - buf []TagValueElem - itrs []TagValueIterator -} - -// Next returns the element with the next lowest value across the iterators. -// -// If multiple iterators contain the same value then the first is returned -// and the remaining ones are skipped. -func (itr *tagValueMergeIterator) Next() TagValueElem { - // Find next lowest value amongst the buffers. - var value []byte - for i, buf := range itr.buf { - // Fill buffer. - if buf == nil { - if buf = itr.itrs[i].Next(); buf != nil { - itr.buf[i] = buf - } else { - continue - } - } - - // Find next lowest value. - if value == nil || bytes.Compare(buf.Value(), value) == -1 { - value = buf.Value() - } - } - - // Return nil if no elements remaining. - if value == nil { - return nil - } - - // Merge elements and clear buffers. - itr.e = itr.e[:0] - for i, buf := range itr.buf { - if buf == nil || !bytes.Equal(buf.Value(), value) { - continue - } - itr.e = append(itr.e, buf) - itr.buf[i] = nil - } - return itr.e -} - -// tagValueMergeElem represents a merged tag value element. -type tagValueMergeElem []TagValueElem - -// Name returns the value of the first element. -func (p tagValueMergeElem) Value() []byte { - if len(p) == 0 { - return nil - } - return p[0].Value() -} - -// Deleted returns the deleted flag of the first element. -func (p tagValueMergeElem) Deleted() bool { - if len(p) == 0 { - return false - } - return p[0].Deleted() -} - -/* -type SeriesPointMergeIterator interface { - Next() (*query.FloatPoint, error) - Close() error - Stats() query.IteratorStats -} - -func MergeSeriesPointIterators(itrs ...*seriesPointIterator) SeriesPointMergeIterator { - if n := len(itrs); n == 0 { - return nil - } else if n == 1 { - return itrs[0] - } - - return &seriesPointMergeIterator{ - buf: make([]*query.FloatPoint, len(itrs)), - itrs: itrs, - } -} - -type seriesPointMergeIterator struct { - buf []*query.FloatPoint - itrs []*seriesPointIterator -} - -func (itr *seriesPointMergeIterator) Close() error { - for i := range itr.itrs { - itr.itrs[i].Close() - } - return nil -} -func (itr *seriesPointMergeIterator) Stats() query.IteratorStats { - return query.IteratorStats{} -} - -func (itr *seriesPointMergeIterator) Next() (_ *query.FloatPoint, err error) { - // Find next lowest point amongst the buffers. - var key []byte - for i, buf := range itr.buf { - // Fill buffer. - if buf == nil { - if buf, err = itr.itrs[i].Next(); err != nil { - return nil, err - } else if buf != nil { - itr.buf[i] = buf - } else { - continue - } - } - - // Find next lowest key. - if key == nil || bytes.Compare(buf.Key(), key) == -1 { - key = buf.Key() - } - } - - // Return nil if no elements remaining. - if key == nil { - return nil, nil - } - - // Merge elements together & clear buffer. - itr.e = itr.e[:0] - for i, buf := range itr.buf { - if buf == nil || !bytes.Equal(buf.Key(), key) { - continue - } - itr.e = append(itr.e, buf) - itr.buf[i] = nil - } - - return itr.e, nil -} -*/ - -// writeTo writes write v into w. Updates n. -func writeTo(w io.Writer, v []byte, n *int64) error { - nn, err := w.Write(v) - *n += int64(nn) - return err -} - -// writeUint8To writes write v into w. Updates n. -func writeUint8To(w io.Writer, v uint8, n *int64) error { - nn, err := w.Write([]byte{v}) - *n += int64(nn) - return err -} - -// writeUint16To writes write v into w using big endian encoding. Updates n. -func writeUint16To(w io.Writer, v uint16, n *int64) error { - var buf [2]byte - binary.BigEndian.PutUint16(buf[:], v) - nn, err := w.Write(buf[:]) - *n += int64(nn) - return err -} - -// writeUint64To writes write v into w using big endian encoding. Updates n. -func writeUint64To(w io.Writer, v uint64, n *int64) error { - var buf [8]byte - binary.BigEndian.PutUint64(buf[:], v) - nn, err := w.Write(buf[:]) - *n += int64(nn) - return err -} - -// writeUvarintTo writes write v into w using variable length encoding. Updates n. -func writeUvarintTo(w io.Writer, v uint64, n *int64) error { - var buf [binary.MaxVarintLen64]byte - i := binary.PutUvarint(buf[:], v) - nn, err := w.Write(buf[:i]) - *n += int64(nn) - return err -} - -type uint64Slice []uint64 - -func (a uint64Slice) Len() int { return len(a) } -func (a uint64Slice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a uint64Slice) Less(i, j int) bool { return a[i] < a[j] } - -type byteSlices [][]byte - -func (a byteSlices) Len() int { return len(a) } -func (a byteSlices) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 } - -// assert will panic with a given formatted message if the given condition is false. -func assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assert failed: "+msg, v...)) - } -} - -// uvarint is a wrapper around binary.Uvarint. -// Returns a non-nil error when binary.Uvarint returns n <= 0 or n > len(data). -func uvarint(data []byte) (value uint64, n int, err error) { - if len(data) < 1 { - err = io.ErrShortBuffer - } else if value, n = binary.Uvarint(data); n == 0 || n > len(data) { - err = io.ErrShortBuffer - } else if n < 0 { - err = fmt.Errorf("parsing binary-encoded uint64 value failed; binary.Uvarint() returned %d", n) - } - return -} diff --git a/tsdb/index/tsi1/tsi1_test.go b/tsdb/index/tsi1/tsi1_test.go deleted file mode 100644 index d13eb8015f4..00000000000 --- a/tsdb/index/tsi1/tsi1_test.go +++ /dev/null @@ -1,306 +0,0 @@ -package tsi1_test - -import ( - "bytes" - "os" - "path/filepath" - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" -) - -// Ensure iterator can operate over an in-memory list of elements. -func TestMeasurementIterator(t *testing.T) { - elems := []MeasurementElem{ - MeasurementElem{name: []byte("cpu"), deleted: true}, - MeasurementElem{name: []byte("mem")}, - } - - itr := MeasurementIterator{Elems: elems} - if e := itr.Next(); !reflect.DeepEqual(&elems[0], e) { - t.Fatalf("unexpected elem(0): %#v", e) - } else if e := itr.Next(); !reflect.DeepEqual(&elems[1], e) { - t.Fatalf("unexpected elem(1): %#v", e) - } else if e := itr.Next(); e != nil { - t.Fatalf("expected nil elem: %#v", e) - } -} - -// Ensure iterator can merge multiple iterators together. -func TestMergeMeasurementIterators(t *testing.T) { - itr := tsi1.MergeMeasurementIterators( - &MeasurementIterator{Elems: []MeasurementElem{ - {name: []byte("aaa")}, - {name: []byte("bbb"), deleted: true}, - {name: []byte("ccc")}, - }}, - &MeasurementIterator{}, - &MeasurementIterator{Elems: []MeasurementElem{ - {name: []byte("bbb")}, - {name: []byte("ccc"), deleted: true}, - {name: []byte("ddd")}, - }}, - ) - - if e := itr.Next(); !bytes.Equal(e.Name(), []byte("aaa")) || e.Deleted() { - t.Fatalf("unexpected elem(0): %s/%v", e.Name(), e.Deleted()) - } else if e := itr.Next(); !bytes.Equal(e.Name(), []byte("bbb")) || !e.Deleted() { - t.Fatalf("unexpected elem(1): %s/%v", e.Name(), e.Deleted()) - } else if e := itr.Next(); !bytes.Equal(e.Name(), []byte("ccc")) || e.Deleted() { - t.Fatalf("unexpected elem(2): %s/%v", e.Name(), e.Deleted()) - } else if e := itr.Next(); !bytes.Equal(e.Name(), []byte("ddd")) || e.Deleted() { - t.Fatalf("unexpected elem(3): %s/%v", e.Name(), e.Deleted()) - } else if e := itr.Next(); e != nil { - t.Fatalf("expected nil elem: %#v", e) - } -} - -// Ensure iterator can operate over an in-memory list of tag key elements. -func TestTagKeyIterator(t *testing.T) { - elems := []TagKeyElem{ - {key: []byte("aaa"), deleted: true}, - {key: []byte("bbb")}, - } - - itr := TagKeyIterator{Elems: elems} - if e := itr.Next(); !reflect.DeepEqual(&elems[0], e) { - t.Fatalf("unexpected elem(0): %#v", e) - } else if e := itr.Next(); !reflect.DeepEqual(&elems[1], e) { - t.Fatalf("unexpected elem(1): %#v", e) - } else if e := itr.Next(); e != nil { - t.Fatalf("expected nil elem: %#v", e) - } -} - -// Ensure iterator can merge multiple iterators together. -func TestMergeTagKeyIterators(t *testing.T) { - itr := tsi1.MergeTagKeyIterators( - &TagKeyIterator{Elems: []TagKeyElem{ - {key: []byte("aaa")}, - {key: []byte("bbb"), deleted: true}, - {key: []byte("ccc")}, - }}, - &TagKeyIterator{}, - &TagKeyIterator{Elems: []TagKeyElem{ - {key: []byte("bbb")}, - {key: []byte("ccc"), deleted: true}, - {key: []byte("ddd")}, - }}, - ) - - if e := itr.Next(); !bytes.Equal(e.Key(), []byte("aaa")) || e.Deleted() { - t.Fatalf("unexpected elem(0): %s/%v", e.Key(), e.Deleted()) - } else if e := itr.Next(); !bytes.Equal(e.Key(), []byte("bbb")) || !e.Deleted() { - t.Fatalf("unexpected elem(1): %s/%v", e.Key(), e.Deleted()) - } else if e := itr.Next(); !bytes.Equal(e.Key(), []byte("ccc")) || e.Deleted() { - t.Fatalf("unexpected elem(2): %s/%v", e.Key(), e.Deleted()) - } else if e := itr.Next(); !bytes.Equal(e.Key(), []byte("ddd")) || e.Deleted() { - t.Fatalf("unexpected elem(3): %s/%v", e.Key(), e.Deleted()) - } else if e := itr.Next(); e != nil { - t.Fatalf("expected nil elem: %#v", e) - } -} - -// Ensure iterator can operate over an in-memory list of tag value elements. -func TestTagValueIterator(t *testing.T) { - elems := []TagValueElem{ - {value: []byte("aaa"), deleted: true}, - {value: []byte("bbb")}, - } - - itr := &TagValueIterator{Elems: elems} - if e := itr.Next(); !reflect.DeepEqual(&elems[0], e) { - t.Fatalf("unexpected elem(0): %#v", e) - } else if e := itr.Next(); !reflect.DeepEqual(&elems[1], e) { - t.Fatalf("unexpected elem(1): %#v", e) - } else if e := itr.Next(); e != nil { - t.Fatalf("expected nil elem: %#v", e) - } -} - -// Ensure iterator can merge multiple iterators together. -func TestMergeTagValueIterators(t *testing.T) { - itr := tsi1.MergeTagValueIterators( - &TagValueIterator{Elems: []TagValueElem{ - {value: []byte("aaa")}, - {value: []byte("bbb"), deleted: true}, - {value: []byte("ccc")}, - }}, - &TagValueIterator{}, - &TagValueIterator{Elems: []TagValueElem{ - {value: []byte("bbb")}, - {value: []byte("ccc"), deleted: true}, - {value: []byte("ddd")}, - }}, - ) - - if e := itr.Next(); !bytes.Equal(e.Value(), []byte("aaa")) || e.Deleted() { - t.Fatalf("unexpected elem(0): %s/%v", e.Value(), e.Deleted()) - } else if e := itr.Next(); !bytes.Equal(e.Value(), []byte("bbb")) || !e.Deleted() { - t.Fatalf("unexpected elem(1): %s/%v", e.Value(), e.Deleted()) - } else if e := itr.Next(); !bytes.Equal(e.Value(), []byte("ccc")) || e.Deleted() { - t.Fatalf("unexpected elem(2): %s/%v", e.Value(), e.Deleted()) - } else if e := itr.Next(); !bytes.Equal(e.Value(), []byte("ddd")) || e.Deleted() { - t.Fatalf("unexpected elem(3): %s/%v", e.Value(), e.Deleted()) - } else if e := itr.Next(); e != nil { - t.Fatalf("expected nil elem: %#v", e) - } -} - -// Ensure iterator can operate over an in-memory list of series. -func TestSeriesIDIterator(t *testing.T) { - elems := []tsdb.SeriesIDElem{ - {SeriesID: 1}, - {SeriesID: 2}, - } - - itr := SeriesIDIterator{Elems: elems} - if e := itr.Next(); !reflect.DeepEqual(elems[0], e) { - t.Fatalf("unexpected elem(0): %#v", e) - } else if e := itr.Next(); !reflect.DeepEqual(elems[1], e) { - t.Fatalf("unexpected elem(1): %#v", e) - } else if e := itr.Next(); e.SeriesID != 0 { - t.Fatalf("expected nil elem: %#v", e) - } -} - -// MeasurementElem represents a test implementation of tsi1.MeasurementElem. -type MeasurementElem struct { - name []byte - deleted bool - hasSeries bool -} - -func (e *MeasurementElem) Name() []byte { return e.name } -func (e *MeasurementElem) Deleted() bool { return e.deleted } -func (e *MeasurementElem) HasSeries() bool { return e.hasSeries } - -func (e *MeasurementElem) TagKeyIterator() tsi1.TagKeyIterator { return nil } - -// MeasurementIterator represents an iterator over a slice of measurements. -type MeasurementIterator struct { - Elems []MeasurementElem -} - -// Next returns the next element in the iterator. -func (itr *MeasurementIterator) Next() (e tsi1.MeasurementElem) { - if len(itr.Elems) == 0 { - return nil - } - e, itr.Elems = &itr.Elems[0], itr.Elems[1:] - return e -} - -// TagKeyElem represents a test implementation of tsi1.TagKeyElem. -type TagKeyElem struct { - key []byte - deleted bool -} - -func (e *TagKeyElem) Key() []byte { return e.key } -func (e *TagKeyElem) Deleted() bool { return e.deleted } -func (e *TagKeyElem) TagValueIterator() tsi1.TagValueIterator { return nil } - -// TagKeyIterator represents an iterator over a slice of tag keys. -type TagKeyIterator struct { - Elems []TagKeyElem -} - -// Next returns the next element in the iterator. -func (itr *TagKeyIterator) Next() (e tsi1.TagKeyElem) { - if len(itr.Elems) == 0 { - return nil - } - e, itr.Elems = &itr.Elems[0], itr.Elems[1:] - return e -} - -// TagValueElem represents a test implementation of tsi1.TagValueElem. -type TagValueElem struct { - value []byte - deleted bool -} - -func (e *TagValueElem) Value() []byte { return e.value } -func (e *TagValueElem) Deleted() bool { return e.deleted } - -// TagValueIterator represents an iterator over a slice of tag values. -type TagValueIterator struct { - Elems []TagValueElem -} - -// Next returns the next element in the iterator. -func (itr *TagValueIterator) Next() (e tsi1.TagValueElem) { - if len(itr.Elems) == 0 { - return nil - } - e, itr.Elems = &itr.Elems[0], itr.Elems[1:] - return e -} - -// SeriesIDIterator represents an iterator over a slice of series id elems. -type SeriesIDIterator struct { - Elems []tsdb.SeriesIDElem -} - -// Next returns the next element in the iterator. -func (itr *SeriesIDIterator) Next() (elem tsdb.SeriesIDElem) { - if len(itr.Elems) == 0 { - return tsdb.SeriesIDElem{} - } - elem, itr.Elems = itr.Elems[0], itr.Elems[1:] - return elem -} - -// MustTempPartitionDir returns a temporary directory for a partition. Panic on -// error. -func MustTempPartitionDir(tb testing.TB) string { - path := filepath.Join(tb.TempDir(), "0") - if err := os.Mkdir(path, 0777); err != nil { - panic(err) - } - return path -} - -// Series represents name/tagset pairs that are used in testing. -type Series struct { - Name []byte - Tags models.Tags - Deleted bool -} - -// SeriesFile is a test wrapper for tsdb.SeriesFile. -type SeriesFile struct { - *tsdb.SeriesFile -} - -// NewSeriesFile returns a new instance of SeriesFile with a temporary file path. -func NewSeriesFile(tb testing.TB) *SeriesFile { - return &SeriesFile{SeriesFile: tsdb.NewSeriesFile(tb.TempDir())} -} - -// MustOpenSeriesFile returns a new, open instance of SeriesFile. Panic on error. -func MustOpenSeriesFile(tb testing.TB) *SeriesFile { - f := NewSeriesFile(tb) - if err := f.Open(); err != nil { - panic(err) - } - return f -} - -// Close closes the log file and removes it from disk. -func (f *SeriesFile) Close() error { - return f.SeriesFile.Close() -} - -// Reopen initialises a new series file using the existing one. -func (f *SeriesFile) Reopen() error { - if err := f.SeriesFile.Close(); err != nil { - return err - } - f.SeriesFile = tsdb.NewSeriesFile(f.SeriesFile.Path()) - return nil -} diff --git a/tsdb/index_test.go b/tsdb/index_test.go deleted file mode 100644 index 2cd2fd26d3a..00000000000 --- a/tsdb/index_test.go +++ /dev/null @@ -1,700 +0,0 @@ -package tsdb_test - -import ( - "compress/gzip" - "fmt" - "io" - "os" - "path/filepath" - "reflect" - "sync" - "testing" - - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/internal" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/slices" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/index/tsi1" - "github.com/influxdata/influxql" - "go.uber.org/zap/zaptest" -) - -// Ensure iterator can merge multiple iterators together. -func TestMergeSeriesIDIterators(t *testing.T) { - itr := tsdb.MergeSeriesIDIterators( - tsdb.NewSeriesIDSliceIterator([]uint64{1, 2, 3}), - tsdb.NewSeriesIDSliceIterator(nil), - nil, - tsdb.NewSeriesIDSliceIterator([]uint64{1, 2, 3, 4}), - ) - - if e, err := itr.Next(); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(e, tsdb.SeriesIDElem{SeriesID: 1}) { - t.Fatalf("unexpected elem(0): %#v", e) - } - if e, err := itr.Next(); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(e, tsdb.SeriesIDElem{SeriesID: 2}) { - t.Fatalf("unexpected elem(1): %#v", e) - } - if e, err := itr.Next(); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(e, tsdb.SeriesIDElem{SeriesID: 3}) { - t.Fatalf("unexpected elem(2): %#v", e) - } - if e, err := itr.Next(); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(e, tsdb.SeriesIDElem{SeriesID: 4}) { - t.Fatalf("unexpected elem(3): %#v", e) - } - if e, err := itr.Next(); err != nil { - t.Fatal(err) - } else if e.SeriesID != 0 { - t.Fatalf("expected nil elem: %#v", e) - } -} - -func TestIndexSet_MeasurementNamesByExpr(t *testing.T) { - // Setup indexes - indexes := map[string]*Index{} - for _, name := range tsdb.RegisteredIndexes() { - idx := MustOpenNewIndex(t, name) - idx.AddSeries("cpu", map[string]string{"region": "east"}) - idx.AddSeries("cpu", map[string]string{"region": "west", "secret": "foo"}) - idx.AddSeries("disk", map[string]string{"secret": "foo"}) - idx.AddSeries("mem", map[string]string{"region": "west"}) - idx.AddSeries("gpu", map[string]string{"region": "east"}) - idx.AddSeries("pci", map[string]string{"region": "east", "secret": "foo"}) - indexes[name] = idx - defer idx.Close() - } - - authorizer := &internal.AuthorizerMock{ - AuthorizeSeriesReadFn: func(database string, measurement []byte, tags models.Tags) bool { - if tags.GetString("secret") != "" { - t.Logf("Rejecting series db=%s, m=%s, tags=%v", database, measurement, tags) - return false - } - return true - }, - } - - type example struct { - name string - expr influxql.Expr - expected [][]byte - } - - // These examples should be run without any auth. - examples := []example{ - {name: "all", expected: slices.StringsToBytes("cpu", "disk", "gpu", "mem", "pci")}, - {name: "EQ", expr: influxql.MustParseExpr(`region = 'west'`), expected: slices.StringsToBytes("cpu", "mem")}, - {name: "NEQ", expr: influxql.MustParseExpr(`region != 'west'`), expected: slices.StringsToBytes("gpu", "pci")}, - {name: "EQREGEX", expr: influxql.MustParseExpr(`region =~ /.*st/`), expected: slices.StringsToBytes("cpu", "gpu", "mem", "pci")}, - {name: "NEQREGEX", expr: influxql.MustParseExpr(`region !~ /.*est/`), expected: slices.StringsToBytes("gpu", "pci")}, - } - - // These examples should be run with the authorizer. - authExamples := []example{ - {name: "all", expected: slices.StringsToBytes("cpu", "gpu", "mem")}, - {name: "EQ", expr: influxql.MustParseExpr(`region = 'west'`), expected: slices.StringsToBytes("mem")}, - {name: "NEQ", expr: influxql.MustParseExpr(`region != 'west'`), expected: slices.StringsToBytes("gpu")}, - {name: "EQREGEX", expr: influxql.MustParseExpr(`region =~ /.*st/`), expected: slices.StringsToBytes("cpu", "gpu", "mem")}, - {name: "NEQREGEX", expr: influxql.MustParseExpr(`region !~ /.*est/`), expected: slices.StringsToBytes("gpu")}, - } - - for _, idx := range tsdb.RegisteredIndexes() { - t.Run(idx, func(t *testing.T) { - t.Run("no authorization", func(t *testing.T) { - for _, example := range examples { - t.Run(example.name, func(t *testing.T) { - names, err := indexes[idx].IndexSet().MeasurementNamesByExpr(nil, example.expr) - if err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(names, example.expected) { - t.Fatalf("got names: %v, expected %v", slices.BytesToStrings(names), slices.BytesToStrings(example.expected)) - } - }) - } - }) - - t.Run("with authorization", func(t *testing.T) { - for _, example := range authExamples { - t.Run(example.name, func(t *testing.T) { - names, err := indexes[idx].IndexSet().MeasurementNamesByExpr(authorizer, example.expr) - if err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(names, example.expected) { - t.Fatalf("got names: %v, expected %v", slices.BytesToStrings(names), slices.BytesToStrings(example.expected)) - } - }) - } - }) - }) - } -} - -func TestIndexSet_MeasurementNamesByPredicate(t *testing.T) { - // Setup indexes - indexes := map[string]*Index{} - for _, name := range tsdb.RegisteredIndexes() { - idx := MustOpenNewIndex(t, name) - idx.AddSeries("cpu", map[string]string{"region": "east"}) - idx.AddSeries("cpu", map[string]string{"region": "west", "secret": "foo"}) - idx.AddSeries("disk", map[string]string{"secret": "foo"}) - idx.AddSeries("mem", map[string]string{"region": "west"}) - idx.AddSeries("gpu", map[string]string{"region": "east"}) - idx.AddSeries("pci", map[string]string{"region": "east", "secret": "foo"}) - indexes[name] = idx - defer idx.Close() - } - - authorizer := &internal.AuthorizerMock{ - AuthorizeSeriesReadFn: func(database string, measurement []byte, tags models.Tags) bool { - if tags.GetString("secret") != "" { - t.Logf("Rejecting series db=%s, m=%s, tags=%v", database, measurement, tags) - return false - } - return true - }, - } - - type example struct { - name string - expr influxql.Expr - expected [][]byte - } - - // These examples should be run without any auth. - examples := []example{ - {name: "all", expected: slices.StringsToBytes("cpu", "disk", "gpu", "mem", "pci")}, - {name: "EQ", expr: influxql.MustParseExpr(`region = 'west'`), expected: slices.StringsToBytes("cpu", "mem")}, - {name: "NEQ", expr: influxql.MustParseExpr(`region != 'west'`), expected: slices.StringsToBytes("cpu", "disk", "gpu", "pci")}, - {name: "EQREGEX", expr: influxql.MustParseExpr(`region =~ /.*st/`), expected: slices.StringsToBytes("cpu", "gpu", "mem", "pci")}, - {name: "NEQREGEX", expr: influxql.MustParseExpr(`region !~ /.*est/`), expected: slices.StringsToBytes("cpu", "disk", "gpu", "pci")}, - // None of the series have this tag so all should be selected. - {name: "EQ empty", expr: influxql.MustParseExpr(`host = ''`), expected: slices.StringsToBytes("cpu", "disk", "gpu", "mem", "pci")}, - // Measurements that have this tag at all should be returned. - {name: "NEQ empty", expr: influxql.MustParseExpr(`region != ''`), expected: slices.StringsToBytes("cpu", "gpu", "mem", "pci")}, - {name: "EQREGEX empty", expr: influxql.MustParseExpr(`host =~ /.*/`), expected: slices.StringsToBytes("cpu", "disk", "gpu", "mem", "pci")}, - {name: "NEQ empty", expr: influxql.MustParseExpr(`region !~ /.*/`), expected: slices.StringsToBytes()}, - } - - // These examples should be run with the authorizer. - authExamples := []example{ - {name: "all", expected: slices.StringsToBytes("cpu", "gpu", "mem")}, - {name: "EQ", expr: influxql.MustParseExpr(`region = 'west'`), expected: slices.StringsToBytes("mem")}, - {name: "NEQ", expr: influxql.MustParseExpr(`region != 'west'`), expected: slices.StringsToBytes("cpu", "gpu")}, - {name: "EQREGEX", expr: influxql.MustParseExpr(`region =~ /.*st/`), expected: slices.StringsToBytes("cpu", "gpu", "mem")}, - {name: "NEQREGEX", expr: influxql.MustParseExpr(`region !~ /.*est/`), expected: slices.StringsToBytes("cpu", "gpu")}, - {name: "EQ empty", expr: influxql.MustParseExpr(`host = ''`), expected: slices.StringsToBytes("cpu", "gpu", "mem")}, - {name: "NEQ empty", expr: influxql.MustParseExpr(`region != ''`), expected: slices.StringsToBytes("cpu", "gpu", "mem")}, - {name: "EQREGEX empty", expr: influxql.MustParseExpr(`host =~ /.*/`), expected: slices.StringsToBytes("cpu", "gpu", "mem")}, - {name: "NEQ empty", expr: influxql.MustParseExpr(`region !~ /.*/`), expected: slices.StringsToBytes()}, - } - - for _, idx := range tsdb.RegisteredIndexes() { - t.Run(idx, func(t *testing.T) { - t.Run("no authorization", func(t *testing.T) { - for _, example := range examples { - t.Run(example.name, func(t *testing.T) { - names, err := indexes[idx].IndexSet().MeasurementNamesByPredicate(nil, example.expr) - if err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(names, example.expected) { - t.Fatalf("got names: %v, expected %v", slices.BytesToStrings(names), slices.BytesToStrings(example.expected)) - } - }) - } - }) - - t.Run("with authorization", func(t *testing.T) { - for _, example := range authExamples { - t.Run(example.name, func(t *testing.T) { - names, err := indexes[idx].IndexSet().MeasurementNamesByPredicate(authorizer, example.expr) - if err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(names, example.expected) { - t.Fatalf("got names: %v, expected %v", slices.BytesToStrings(names), slices.BytesToStrings(example.expected)) - } - }) - } - }) - }) - } -} - -func TestIndex_Sketches(t *testing.T) { - checkCardinalities := func(t *testing.T, index *Index, state string, series, tseries, measurements, tmeasurements int) { - t.Helper() - - // Get sketches and check cardinality... - sketch, tsketch, err := index.SeriesSketches() - if err != nil { - t.Fatal(err) - } - - // delta calculates a rough 10% delta. If i is small then a minimum value - // of 2 is used. - delta := func(i int) int { - v := i / 10 - if v == 0 { - v = 2 - } - return v - } - - // series cardinality should be well within 10%. - if got, exp := int(sketch.Count()), series; got-exp < -delta(series) || got-exp > delta(series) { - t.Errorf("[%s] got series cardinality %d, expected ~%d", state, got, exp) - } - - // check series tombstones - if got, exp := int(tsketch.Count()), tseries; got-exp < -delta(tseries) || got-exp > delta(tseries) { - t.Errorf("[%s] got series tombstone cardinality %d, expected ~%d", state, got, exp) - } - - // Check measurement cardinality. - if sketch, tsketch, err = index.MeasurementsSketches(); err != nil { - t.Fatal(err) - } - - if got, exp := int(sketch.Count()), measurements; got != exp { //got-exp < -delta(measurements) || got-exp > delta(measurements) { - t.Errorf("[%s] got measurement cardinality %d, expected ~%d", state, got, exp) - } - - if got, exp := int(tsketch.Count()), tmeasurements; got != exp { //got-exp < -delta(tmeasurements) || got-exp > delta(tmeasurements) { - t.Errorf("[%s] got measurement tombstone cardinality %d, expected ~%d", state, got, exp) - } - } - - test := func(t *testing.T, index string) error { - idx := MustNewIndex(t, index) - if index, ok := idx.Index.(*tsi1.Index); ok { - // Override the log file max size to force a log file compaction sooner. - // This way, we will test the sketches are correct when they have been - // compacted into IndexFiles, and also when they're loaded from - // IndexFiles after a re-open. - tsi1.WithMaximumLogFileSize(1 << 10)(index) - } - - // Open the index - idx.MustOpen() - defer idx.Close() - - series := genTestSeries(10, 5, 3) - // Add series to index. - for _, serie := range series { - if err := idx.AddSeries(serie.Measurement, serie.Tags.Map()); err != nil { - t.Fatal(err) - } - } - - // Check cardinalities after adding series. - checkCardinalities(t, idx, "initial", 2430, 0, 10, 0) - - // Re-open step only applies to the TSI index. - if _, ok := idx.Index.(*tsi1.Index); ok { - // Re-open the index. - if err := idx.Reopen(); err != nil { - panic(err) - } - - // Check cardinalities after the reopen - checkCardinalities(t, idx, "initial|reopen", 2430, 0, 10, 0) - } - - // Drop some series - if err := idx.DropMeasurement([]byte("measurement2")); err != nil { - return err - } else if err := idx.DropMeasurement([]byte("measurement5")); err != nil { - return err - } - - // Check cardinalities after the delete - checkCardinalities(t, idx, "initial|reopen|delete", 2430, 486, 10, 2) - - // Re-open step only applies to the TSI index. - if _, ok := idx.Index.(*tsi1.Index); ok { - // Re-open the index. - if err := idx.Reopen(); err != nil { - panic(err) - } - - // Check cardinalities after the reopen - checkCardinalities(t, idx, "initial|reopen|delete|reopen", 2430, 486, 10, 2) - } - return nil - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - if err := test(t, index); err != nil { - t.Fatal(err) - } - }) - } -} - -// Index wraps a series file and index. -type Index struct { - tsdb.Index - rootPath string - indexType string - sfile *tsdb.SeriesFile -} - -type EngineOption func(opts *tsdb.EngineOptions) - -// DisableTSICache allows the caller to disable the TSI bitset cache during a test. -var DisableTSICache = func() EngineOption { - return func(opts *tsdb.EngineOptions) { - opts.Config.SeriesIDSetCacheSize = 0 - } -} - -// MustNewIndex will initialize a new index using the provide type. It creates -// everything under the same root directory so it can be cleanly removed on Close. -// -// The index will not be opened. -func MustNewIndex(tb testing.TB, index string, eopts ...EngineOption) *Index { - tb.Helper() - - opts := tsdb.NewEngineOptions() - opts.IndexVersion = index - - for _, opt := range eopts { - opt(&opts) - } - - rootPath := tb.TempDir() - - seriesPath, err := os.MkdirTemp(rootPath, tsdb.SeriesFileDirectory) - if err != nil { - panic(err) - } - - sfile := tsdb.NewSeriesFile(seriesPath) - if err := sfile.Open(); err != nil { - panic(err) - } - - i, err := tsdb.NewIndex(0, "db0", filepath.Join(rootPath, "index"), tsdb.NewSeriesIDSet(), sfile, opts) - if err != nil { - panic(err) - } - i.WithLogger(zaptest.NewLogger(tb)) - - idx := &Index{ - Index: i, - indexType: index, - rootPath: rootPath, - sfile: sfile, - } - return idx -} - -// MustOpenNewIndex will initialize a new index using the provide type and opens -// it. -func MustOpenNewIndex(tb testing.TB, index string, opts ...EngineOption) *Index { - tb.Helper() - - idx := MustNewIndex(tb, index, opts...) - idx.MustOpen() - return idx -} - -// MustOpen opens the underlying index or panics. -func (i *Index) MustOpen() { - if err := i.Index.Open(); err != nil { - panic(err) - } -} - -func (idx *Index) IndexSet() *tsdb.IndexSet { - return &tsdb.IndexSet{Indexes: []tsdb.Index{idx.Index}, SeriesFile: idx.sfile} -} - -func (idx *Index) AddSeries(name string, tags map[string]string) error { - t := models.NewTags(tags) - key := fmt.Sprintf("%s,%s", name, t.HashKey()) - return idx.CreateSeriesIfNotExists([]byte(key), []byte(name), t) -} - -// Reopen closes and re-opens the underlying index, without removing any data. -func (i *Index) Reopen() error { - if err := i.Index.Close(); err != nil { - return err - } - - if err := i.sfile.Close(); err != nil { - return err - } - - i.sfile = tsdb.NewSeriesFile(i.sfile.Path()) - if err := i.sfile.Open(); err != nil { - return err - } - - opts := tsdb.NewEngineOptions() - opts.IndexVersion = i.indexType - - idx, err := tsdb.NewIndex(0, "db0", filepath.Join(i.rootPath, "index"), tsdb.NewSeriesIDSet(), i.sfile, opts) - if err != nil { - return err - } - i.Index = idx - return i.Index.Open() -} - -// Close closes the index cleanly and removes all on-disk data. -func (i *Index) Close() error { - if err := i.Index.Close(); err != nil { - return err - } - - if err := i.sfile.Close(); err != nil { - return err - } - //return os.RemoveAll(i.rootPath) - return nil -} - -// This benchmark compares the TagSets implementation across index types. -// -// In the case of the TSI index, TagSets has to merge results across all several -// index partitions. -// -// Typical results on an i7 laptop. -// -// BenchmarkIndexSet_TagSets/1M_series/tsi1-8 100 18995530 ns/op 5221180 B/op 20379 allocs/op -func BenchmarkIndexSet_TagSets(b *testing.B) { - // Read line-protocol and coerce into tsdb format. - keys := make([][]byte, 0, 1e6) - names := make([][]byte, 0, 1e6) - tags := make([]models.Tags, 0, 1e6) - - // 1M series generated with: - // $inch -b 10000 -c 1 -t 10,10,10,10,10,10 -f 1 -m 5 -p 1 - fd, err := os.Open("testdata/line-protocol-1M.txt.gz") - if err != nil { - b.Fatal(err) - } - - gzr, err := gzip.NewReader(fd) - if err != nil { - fd.Close() - b.Fatal(err) - } - - data, err := io.ReadAll(gzr) - if err != nil { - b.Fatal(err) - } - - if err := fd.Close(); err != nil { - b.Fatal(err) - } - - points, err := models.ParsePoints(data) - if err != nil { - b.Fatal(err) - } - - for _, pt := range points { - keys = append(keys, pt.Key()) - names = append(names, pt.Name()) - tags = append(tags, pt.Tags()) - } - - // setup writes all of the above points to the index. - setup := func(idx *Index) { - batchSize := 10000 - for j := 0; j < 1; j++ { - for i := 0; i < len(keys); i += batchSize { - k := keys[i : i+batchSize] - n := names[i : i+batchSize] - t := tags[i : i+batchSize] - if err := idx.CreateSeriesListIfNotExists(k, n, t); err != nil { - b.Fatal(err) - } - } - } - } - - var errResult error - - // This benchmark will merge eight bitsets each containing ~10,000 series IDs. - b.Run("1M series", func(b *testing.B) { - b.ReportAllocs() - for _, indexType := range tsdb.RegisteredIndexes() { - idx := MustOpenNewIndex(b, indexType) - setup(idx) - - name := []byte("m4") - opt := query.IteratorOptions{Condition: influxql.MustParseExpr(`"tag5"::tag = 'value0'`)} - indexSet := tsdb.IndexSet{ - SeriesFile: idx.sfile, - Indexes: []tsdb.Index{idx.Index}, - } // For TSI implementation - - ts := func() ([]*query.TagSet, error) { - return indexSet.TagSets(idx.sfile, name, opt) - } - - b.Run(indexType, func(b *testing.B) { - for i := 0; i < b.N; i++ { - // Will call TagSets on the appropriate implementation. - _, errResult = ts() - if errResult != nil { - b.Fatal(err) - } - } - }) - - if err := idx.Close(); err != nil { - b.Fatal(err) - } - } - }) -} - -// This benchmark concurrently writes series to the index and fetches cached bitsets. -// The idea is to emphasize the performance difference when bitset caching is on and off. -// -// Typical results for an i7 laptop -// -// BenchmarkIndex_ConcurrentWriteQuery/tsi1/queries_100000/cache-8 1 1645048376 ns/op 2215402840 B/op 23048978 allocs/op -// BenchmarkIndex_ConcurrentWriteQuery/tsi1/queries_100000/no_cache-8 1 22242155616 ns/op 28277544136 B/op 79620463 allocs/op -func BenchmarkIndex_ConcurrentWriteQuery(b *testing.B) { - // Read line-protocol and coerce into tsdb format. - keys := make([][]byte, 0, 1e6) - names := make([][]byte, 0, 1e6) - tags := make([]models.Tags, 0, 1e6) - - // 1M series generated with: - // $inch -b 10000 -c 1 -t 10,10,10,10,10,10 -f 1 -m 5 -p 1 - fd, err := os.Open("testdata/line-protocol-1M.txt.gz") - if err != nil { - b.Fatal(err) - } - - gzr, err := gzip.NewReader(fd) - if err != nil { - fd.Close() - b.Fatal(err) - } - - data, err := io.ReadAll(gzr) - if err != nil { - b.Fatal(err) - } - - if err := fd.Close(); err != nil { - b.Fatal(err) - } - - points, err := models.ParsePoints(data) - if err != nil { - b.Fatal(err) - } - - for _, pt := range points { - keys = append(keys, pt.Key()) - names = append(names, pt.Name()) - tags = append(tags, pt.Tags()) - } - - runBenchmark := func(b *testing.B, index string, queryN int, useTSICache bool) { - var idx *Index - if !useTSICache { - idx = MustOpenNewIndex(b, index, DisableTSICache()) - } else { - idx = MustOpenNewIndex(b, index) - } - - var wg sync.WaitGroup - begin := make(chan struct{}) - - // Run concurrent iterator... - runIter := func() { - keys := [][]string{ - {"m0", "tag2", "value4"}, - {"m1", "tag3", "value5"}, - {"m2", "tag4", "value6"}, - {"m3", "tag0", "value8"}, - {"m4", "tag5", "value0"}, - } - - <-begin // Wait for writes to land - for i := 0; i < queryN/5; i++ { - for _, key := range keys { - itr, err := idx.TagValueSeriesIDIterator([]byte(key[0]), []byte(key[1]), []byte(key[2])) - if err != nil { - b.Fatal(err) - } - - if itr == nil { - panic("should not happen") - } - - if err := itr.Close(); err != nil { - b.Fatal(err) - } - } - } - } - - batchSize := 10000 - wg.Add(1) - go func() { defer wg.Done(); runIter() }() - var once sync.Once - for j := 0; j < b.N; j++ { - for i := 0; i < len(keys); i += batchSize { - k := keys[i : i+batchSize] - n := names[i : i+batchSize] - t := tags[i : i+batchSize] - if err := idx.CreateSeriesListIfNotExists(k, n, t); err != nil { - b.Fatal(err) - } - once.Do(func() { close(begin) }) - } - - // Wait for queries to finish - wg.Wait() - - // Reset the index... - b.StopTimer() - if err := idx.Close(); err != nil { - b.Fatal(err) - } - - // Re-open everything - idx = MustOpenNewIndex(b, index) - wg.Add(1) - begin = make(chan struct{}) - once = sync.Once{} - go func() { defer wg.Done(); runIter() }() - b.StartTimer() - } - } - - queries := []int{1e5} - for _, indexType := range tsdb.RegisteredIndexes() { - b.Run(indexType, func(b *testing.B) { - for _, queryN := range queries { - b.Run(fmt.Sprintf("queries %d", queryN), func(b *testing.B) { - b.Run("cache", func(b *testing.B) { - runBenchmark(b, indexType, queryN, true) - }) - - b.Run("no cache", func(b *testing.B) { - runBenchmark(b, indexType, queryN, false) - }) - }) - } - }) - } -} diff --git a/tsdb/internal/fieldsindex.pb.go b/tsdb/internal/fieldsindex.pb.go deleted file mode 100644 index 66e5295a7d0..00000000000 --- a/tsdb/internal/fieldsindex.pb.go +++ /dev/null @@ -1,640 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.27.1 -// protoc v3.17.3 -// source: internal/fieldsindex.proto - -package tsdb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type ChangeType int32 - -const ( - ChangeType_AddMeasurementField ChangeType = 0 - ChangeType_DeleteMeasurement ChangeType = 1 -) - -// Enum value maps for ChangeType. -var ( - ChangeType_name = map[int32]string{ - 0: "AddMeasurementField", - 1: "DeleteMeasurement", - } - ChangeType_value = map[string]int32{ - "AddMeasurementField": 0, - "DeleteMeasurement": 1, - } -) - -func (x ChangeType) Enum() *ChangeType { - p := new(ChangeType) - *p = x - return p -} - -func (x ChangeType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ChangeType) Descriptor() protoreflect.EnumDescriptor { - return file_internal_fieldsindex_proto_enumTypes[0].Descriptor() -} - -func (ChangeType) Type() protoreflect.EnumType { - return &file_internal_fieldsindex_proto_enumTypes[0] -} - -func (x ChangeType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ChangeType.Descriptor instead. -func (ChangeType) EnumDescriptor() ([]byte, []int) { - return file_internal_fieldsindex_proto_rawDescGZIP(), []int{0} -} - -type Series struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Key string `protobuf:"bytes,1,opt,name=Key,proto3" json:"Key,omitempty"` - Tags []*Tag `protobuf:"bytes,2,rep,name=Tags,proto3" json:"Tags,omitempty"` -} - -func (x *Series) Reset() { - *x = Series{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_fieldsindex_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Series) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Series) ProtoMessage() {} - -func (x *Series) ProtoReflect() protoreflect.Message { - mi := &file_internal_fieldsindex_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Series.ProtoReflect.Descriptor instead. -func (*Series) Descriptor() ([]byte, []int) { - return file_internal_fieldsindex_proto_rawDescGZIP(), []int{0} -} - -func (x *Series) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *Series) GetTags() []*Tag { - if x != nil { - return x.Tags - } - return nil -} - -type Tag struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Key string `protobuf:"bytes,1,opt,name=Key,proto3" json:"Key,omitempty"` - Value string `protobuf:"bytes,2,opt,name=Value,proto3" json:"Value,omitempty"` -} - -func (x *Tag) Reset() { - *x = Tag{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_fieldsindex_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Tag) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Tag) ProtoMessage() {} - -func (x *Tag) ProtoReflect() protoreflect.Message { - mi := &file_internal_fieldsindex_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Tag.ProtoReflect.Descriptor instead. -func (*Tag) Descriptor() ([]byte, []int) { - return file_internal_fieldsindex_proto_rawDescGZIP(), []int{1} -} - -func (x *Tag) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *Tag) GetValue() string { - if x != nil { - return x.Value - } - return "" -} - -type MeasurementFields struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name []byte `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` - Fields []*Field `protobuf:"bytes,2,rep,name=Fields,proto3" json:"Fields,omitempty"` -} - -func (x *MeasurementFields) Reset() { - *x = MeasurementFields{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_fieldsindex_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MeasurementFields) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MeasurementFields) ProtoMessage() {} - -func (x *MeasurementFields) ProtoReflect() protoreflect.Message { - mi := &file_internal_fieldsindex_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MeasurementFields.ProtoReflect.Descriptor instead. -func (*MeasurementFields) Descriptor() ([]byte, []int) { - return file_internal_fieldsindex_proto_rawDescGZIP(), []int{2} -} - -func (x *MeasurementFields) GetName() []byte { - if x != nil { - return x.Name - } - return nil -} - -func (x *MeasurementFields) GetFields() []*Field { - if x != nil { - return x.Fields - } - return nil -} - -type Field struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name []byte `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` - Type int32 `protobuf:"varint,2,opt,name=Type,proto3" json:"Type,omitempty"` -} - -func (x *Field) Reset() { - *x = Field{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_fieldsindex_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Field) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Field) ProtoMessage() {} - -func (x *Field) ProtoReflect() protoreflect.Message { - mi := &file_internal_fieldsindex_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Field.ProtoReflect.Descriptor instead. -func (*Field) Descriptor() ([]byte, []int) { - return file_internal_fieldsindex_proto_rawDescGZIP(), []int{3} -} - -func (x *Field) GetName() []byte { - if x != nil { - return x.Name - } - return nil -} - -func (x *Field) GetType() int32 { - if x != nil { - return x.Type - } - return 0 -} - -type MeasurementFieldSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Measurements []*MeasurementFields `protobuf:"bytes,1,rep,name=Measurements,proto3" json:"Measurements,omitempty"` -} - -func (x *MeasurementFieldSet) Reset() { - *x = MeasurementFieldSet{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_fieldsindex_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MeasurementFieldSet) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MeasurementFieldSet) ProtoMessage() {} - -func (x *MeasurementFieldSet) ProtoReflect() protoreflect.Message { - mi := &file_internal_fieldsindex_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MeasurementFieldSet.ProtoReflect.Descriptor instead. -func (*MeasurementFieldSet) Descriptor() ([]byte, []int) { - return file_internal_fieldsindex_proto_rawDescGZIP(), []int{4} -} - -func (x *MeasurementFieldSet) GetMeasurements() []*MeasurementFields { - if x != nil { - return x.Measurements - } - return nil -} - -type MeasurementFieldChange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Measurement []byte `protobuf:"bytes,1,opt,name=Measurement,proto3" json:"Measurement,omitempty"` - Field *Field `protobuf:"bytes,2,opt,name=Field,proto3" json:"Field,omitempty"` - Change ChangeType `protobuf:"varint,3,opt,name=Change,proto3,enum=tsdb.ChangeType" json:"Change,omitempty"` -} - -func (x *MeasurementFieldChange) Reset() { - *x = MeasurementFieldChange{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_fieldsindex_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MeasurementFieldChange) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MeasurementFieldChange) ProtoMessage() {} - -func (x *MeasurementFieldChange) ProtoReflect() protoreflect.Message { - mi := &file_internal_fieldsindex_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MeasurementFieldChange.ProtoReflect.Descriptor instead. -func (*MeasurementFieldChange) Descriptor() ([]byte, []int) { - return file_internal_fieldsindex_proto_rawDescGZIP(), []int{5} -} - -func (x *MeasurementFieldChange) GetMeasurement() []byte { - if x != nil { - return x.Measurement - } - return nil -} - -func (x *MeasurementFieldChange) GetField() *Field { - if x != nil { - return x.Field - } - return nil -} - -func (x *MeasurementFieldChange) GetChange() ChangeType { - if x != nil { - return x.Change - } - return ChangeType_AddMeasurementField -} - -type FieldChangeSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Changes []*MeasurementFieldChange `protobuf:"bytes,1,rep,name=Changes,proto3" json:"Changes,omitempty"` -} - -func (x *FieldChangeSet) Reset() { - *x = FieldChangeSet{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_fieldsindex_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FieldChangeSet) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FieldChangeSet) ProtoMessage() {} - -func (x *FieldChangeSet) ProtoReflect() protoreflect.Message { - mi := &file_internal_fieldsindex_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FieldChangeSet.ProtoReflect.Descriptor instead. -func (*FieldChangeSet) Descriptor() ([]byte, []int) { - return file_internal_fieldsindex_proto_rawDescGZIP(), []int{6} -} - -func (x *FieldChangeSet) GetChanges() []*MeasurementFieldChange { - if x != nil { - return x.Changes - } - return nil -} - -var File_internal_fieldsindex_proto protoreflect.FileDescriptor - -var file_internal_fieldsindex_proto_rawDesc = []byte{ - 0x0a, 0x1a, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x74, 0x73, - 0x64, 0x62, 0x22, 0x39, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, - 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x4b, 0x65, 0x79, 0x12, 0x1d, - 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x74, - 0x73, 0x64, 0x62, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x54, 0x61, 0x67, 0x73, 0x22, 0x2d, 0x0a, - 0x03, 0x54, 0x61, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x4b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4c, 0x0a, 0x11, - 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x06, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x73, 0x64, 0x62, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x52, 0x06, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x22, 0x2f, 0x0a, 0x05, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x22, 0x52, 0x0a, 0x13, 0x4d, - 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, - 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x0c, 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x73, 0x64, 0x62, 0x2e, - 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x52, 0x0c, 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, - 0x87, 0x01, 0x0a, 0x16, 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x4d, 0x65, - 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x0b, 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x05, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x74, 0x73, - 0x64, 0x62, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x05, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, - 0x28, 0x0a, 0x06, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x10, 0x2e, 0x74, 0x73, 0x64, 0x62, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x06, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x48, 0x0a, 0x0e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x65, 0x74, 0x12, 0x36, 0x0a, 0x07, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, - 0x73, 0x64, 0x62, 0x2e, 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x07, 0x43, 0x68, 0x61, 0x6e, - 0x67, 0x65, 0x73, 0x2a, 0x3c, 0x0a, 0x0a, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x64, 0x64, 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x10, - 0x01, 0x42, 0x08, 0x5a, 0x06, 0x2e, 0x3b, 0x74, 0x73, 0x64, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_internal_fieldsindex_proto_rawDescOnce sync.Once - file_internal_fieldsindex_proto_rawDescData = file_internal_fieldsindex_proto_rawDesc -) - -func file_internal_fieldsindex_proto_rawDescGZIP() []byte { - file_internal_fieldsindex_proto_rawDescOnce.Do(func() { - file_internal_fieldsindex_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_fieldsindex_proto_rawDescData) - }) - return file_internal_fieldsindex_proto_rawDescData -} - -var file_internal_fieldsindex_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_internal_fieldsindex_proto_msgTypes = make([]protoimpl.MessageInfo, 7) -var file_internal_fieldsindex_proto_goTypes = []interface{}{ - (ChangeType)(0), // 0: tsdb.ChangeType - (*Series)(nil), // 1: tsdb.Series - (*Tag)(nil), // 2: tsdb.Tag - (*MeasurementFields)(nil), // 3: tsdb.MeasurementFields - (*Field)(nil), // 4: tsdb.Field - (*MeasurementFieldSet)(nil), // 5: tsdb.MeasurementFieldSet - (*MeasurementFieldChange)(nil), // 6: tsdb.MeasurementFieldChange - (*FieldChangeSet)(nil), // 7: tsdb.FieldChangeSet -} -var file_internal_fieldsindex_proto_depIdxs = []int32{ - 2, // 0: tsdb.Series.Tags:type_name -> tsdb.Tag - 4, // 1: tsdb.MeasurementFields.Fields:type_name -> tsdb.Field - 3, // 2: tsdb.MeasurementFieldSet.Measurements:type_name -> tsdb.MeasurementFields - 4, // 3: tsdb.MeasurementFieldChange.Field:type_name -> tsdb.Field - 0, // 4: tsdb.MeasurementFieldChange.Change:type_name -> tsdb.ChangeType - 6, // 5: tsdb.FieldChangeSet.Changes:type_name -> tsdb.MeasurementFieldChange - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name -} - -func init() { file_internal_fieldsindex_proto_init() } -func file_internal_fieldsindex_proto_init() { - if File_internal_fieldsindex_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_internal_fieldsindex_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Series); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_fieldsindex_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Tag); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_fieldsindex_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MeasurementFields); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_fieldsindex_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Field); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_fieldsindex_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MeasurementFieldSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_fieldsindex_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MeasurementFieldChange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_fieldsindex_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FieldChangeSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_fieldsindex_proto_rawDesc, - NumEnums: 1, - NumMessages: 7, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_internal_fieldsindex_proto_goTypes, - DependencyIndexes: file_internal_fieldsindex_proto_depIdxs, - EnumInfos: file_internal_fieldsindex_proto_enumTypes, - MessageInfos: file_internal_fieldsindex_proto_msgTypes, - }.Build() - File_internal_fieldsindex_proto = out.File - file_internal_fieldsindex_proto_rawDesc = nil - file_internal_fieldsindex_proto_goTypes = nil - file_internal_fieldsindex_proto_depIdxs = nil -} diff --git a/tsdb/internal/fieldsindex.proto b/tsdb/internal/fieldsindex.proto deleted file mode 100644 index 2e4c80cf0b6..00000000000 --- a/tsdb/internal/fieldsindex.proto +++ /dev/null @@ -1,49 +0,0 @@ -syntax = "proto3"; - -package tsdb; -option go_package = ".;tsdb"; - -//======================================================================== -// -// Metadata -// -//======================================================================== - -message Series { - string Key = 1; - repeated Tag Tags = 2; -} - -message Tag { - string Key = 1; - string Value = 2; -} - -message MeasurementFields { - bytes Name = 1; - repeated Field Fields = 2; -} - -message Field { - bytes Name = 1; - int32 Type = 2; -} - -message MeasurementFieldSet { - repeated MeasurementFields Measurements = 1; -} - -enum ChangeType { - AddMeasurementField = 0; - DeleteMeasurement = 1; -} - -message MeasurementFieldChange { - bytes Measurement = 1; - Field Field = 2; - ChangeType Change = 3; -} - -message FieldChangeSet { - repeated MeasurementFieldChange Changes = 1; -} \ No newline at end of file diff --git a/tsdb/meta.go b/tsdb/meta.go deleted file mode 100644 index 8c81e016d8d..00000000000 --- a/tsdb/meta.go +++ /dev/null @@ -1,98 +0,0 @@ -package tsdb - -//go:generate protoc --go_out=internal/ internal/fieldsindex.proto - -import ( - "sort" - - "github.com/influxdata/influxdb/v2/models" -) - -// MarshalTags converts a tag set to bytes for use as a lookup key. -func MarshalTags(tags map[string]string) []byte { - // Empty maps marshal to empty bytes. - if len(tags) == 0 { - return nil - } - - // Extract keys and determine final size. - sz := (len(tags) * 2) - 1 // separators - keys := make([]string, 0, len(tags)) - for k, v := range tags { - keys = append(keys, k) - sz += len(k) + len(v) - } - sort.Strings(keys) - - // Generate marshaled bytes. - b := make([]byte, sz) - buf := b - for _, k := range keys { - copy(buf, k) - buf[len(k)] = '|' - buf = buf[len(k)+1:] - } - for i, k := range keys { - v := tags[k] - copy(buf, v) - if i < len(keys)-1 { - buf[len(v)] = '|' - buf = buf[len(v)+1:] - } - } - return b -} - -// MakeTagsKey converts a tag set to bytes for use as a lookup key. -func MakeTagsKey(keys []string, tags models.Tags) []byte { - // precondition: keys is sorted - // precondition: models.Tags is sorted - - // Empty maps marshal to empty bytes. - if len(keys) == 0 || len(tags) == 0 { - return nil - } - - sel := make([]int, 0, len(keys)) - - sz := 0 - i, j := 0, 0 - for i < len(keys) && j < len(tags) { - if keys[i] < string(tags[j].Key) { - i++ - } else if keys[i] > string(tags[j].Key) { - j++ - } else { - sel = append(sel, j) - sz += len(keys[i]) + len(tags[j].Value) - i++ - j++ - } - } - - if len(sel) == 0 { - // no tags matched the requested keys - return nil - } - - sz += (len(sel) * 2) - 1 // selected tags, add separators - - // Generate marshaled bytes. - b := make([]byte, sz) - buf := b - for _, k := range sel { - copy(buf, tags[k].Key) - buf[len(tags[k].Key)] = '|' - buf = buf[len(tags[k].Key)+1:] - } - - for i, k := range sel { - copy(buf, tags[k].Value) - if i < len(sel)-1 { - buf[len(tags[k].Value)] = '|' - buf = buf[len(tags[k].Value)+1:] - } - } - - return b -} diff --git a/tsdb/meta_test.go b/tsdb/meta_test.go deleted file mode 100644 index 499b52c4801..00000000000 --- a/tsdb/meta_test.go +++ /dev/null @@ -1,261 +0,0 @@ -package tsdb_test - -import ( - "bytes" - "fmt" - "testing" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb" -) - -// Ensure tags can be marshaled into a byte slice. -func TestMarshalTags(t *testing.T) { - for i, tt := range []struct { - tags map[string]string - result []byte - }{ - { - tags: nil, - result: nil, - }, - { - tags: map[string]string{"foo": "bar"}, - result: []byte(`foo|bar`), - }, - { - tags: map[string]string{"foo": "bar", "baz": "battttt"}, - result: []byte(`baz|foo|battttt|bar`), - }, - { - tags: map[string]string{"baz": "battttt", "foo": "bar"}, - result: []byte(`baz|foo|battttt|bar`), - }, - } { - result := tsdb.MarshalTags(tt.tags) - if !bytes.Equal(result, tt.result) { - t.Fatalf("%d. unexpected result: exp=%s, got=%s", i, tt.result, result) - } - } -} - -func BenchmarkMarshalTags_KeyN1(b *testing.B) { benchmarkMarshalTags(b, 1) } -func BenchmarkMarshalTags_KeyN3(b *testing.B) { benchmarkMarshalTags(b, 3) } -func BenchmarkMarshalTags_KeyN5(b *testing.B) { benchmarkMarshalTags(b, 5) } -func BenchmarkMarshalTags_KeyN10(b *testing.B) { benchmarkMarshalTags(b, 10) } - -func benchmarkMarshalTags(b *testing.B, keyN int) { - const keySize, valueSize = 8, 15 - - // Generate tag map. - tags := make(map[string]string) - for i := 0; i < keyN; i++ { - tags[fmt.Sprintf("%0*d", keySize, i)] = fmt.Sprintf("%0*d", valueSize, i) - } - - // Unmarshal map into byte slice. - b.ReportAllocs() - for i := 0; i < b.N; i++ { - tsdb.MarshalTags(tags) - } -} - -// Ensure tags can be marshaled into a byte slice. -func TestMakeTagsKey(t *testing.T) { - for i, tt := range []struct { - keys []string - tags models.Tags - result []byte - }{ - { - keys: nil, - tags: nil, - result: nil, - }, - { - keys: []string{"foo"}, - tags: models.NewTags(map[string]string{"foo": "bar"}), - result: []byte(`foo|bar`), - }, - { - keys: []string{"foo"}, - tags: models.NewTags(map[string]string{"baz": "battttt"}), - result: []byte(``), - }, - { - keys: []string{"baz", "foo"}, - tags: models.NewTags(map[string]string{"baz": "battttt"}), - result: []byte(`baz|battttt`), - }, - { - keys: []string{"baz", "foo", "zzz"}, - tags: models.NewTags(map[string]string{"foo": "bar"}), - result: []byte(`foo|bar`), - }, - { - keys: []string{"baz", "foo"}, - tags: models.NewTags(map[string]string{"foo": "bar", "baz": "battttt"}), - result: []byte(`baz|foo|battttt|bar`), - }, - { - keys: []string{"baz"}, - tags: models.NewTags(map[string]string{"baz": "battttt", "foo": "bar"}), - result: []byte(`baz|battttt`), - }, - } { - result := tsdb.MakeTagsKey(tt.keys, tt.tags) - if !bytes.Equal(result, tt.result) { - t.Fatalf("%d. unexpected result: exp=%s, got=%s", i, tt.result, result) - } - } -} - -func BenchmarkMakeTagsKey_KeyN1(b *testing.B) { benchmarkMakeTagsKey(b, 1) } -func BenchmarkMakeTagsKey_KeyN3(b *testing.B) { benchmarkMakeTagsKey(b, 3) } -func BenchmarkMakeTagsKey_KeyN5(b *testing.B) { benchmarkMakeTagsKey(b, 5) } -func BenchmarkMakeTagsKey_KeyN10(b *testing.B) { benchmarkMakeTagsKey(b, 10) } - -func makeTagsAndKeys(keyN int) ([]string, models.Tags) { - const keySize, valueSize = 8, 15 - - // Generate tag map. - keys := make([]string, keyN) - tags := make(map[string]string) - for i := 0; i < keyN; i++ { - keys[i] = fmt.Sprintf("%0*d", keySize, i) - tags[keys[i]] = fmt.Sprintf("%0*d", valueSize, i) - } - - return keys, models.NewTags(tags) -} - -func benchmarkMakeTagsKey(b *testing.B, keyN int) { - keys, tags := makeTagsAndKeys(keyN) - - // Unmarshal map into byte slice. - b.ReportAllocs() - for i := 0; i < b.N; i++ { - tsdb.MakeTagsKey(keys, tags) - } -} - -type TestSeries struct { - Measurement string - Key string - Tags models.Tags -} - -func genTestSeries(mCnt, tCnt, vCnt int) []*TestSeries { - measurements := genStrList("measurement", mCnt) - tagSets := NewTagSetGenerator(tCnt, vCnt).AllSets() - series := make([]*TestSeries, 0, mCnt*len(tagSets)) - for _, m := range measurements { - for _, ts := range tagSets { - series = append(series, &TestSeries{ - Measurement: m, - Key: fmt.Sprintf("%s:%s", m, string(tsdb.MarshalTags(ts))), - Tags: models.NewTags(ts), - }) - } - } - return series -} - -type TagValGenerator struct { - Key string - Vals []string - idx int -} - -func NewTagValGenerator(tagKey string, nVals int) *TagValGenerator { - tvg := &TagValGenerator{Key: tagKey, Vals: make([]string, 0, nVals)} - for i := 0; i < nVals; i++ { - tvg.Vals = append(tvg.Vals, fmt.Sprintf("tagValue%d", i)) - } - return tvg -} - -func (tvg *TagValGenerator) First() string { - tvg.idx = 0 - return tvg.Curr() -} - -func (tvg *TagValGenerator) Curr() string { - return tvg.Vals[tvg.idx] -} - -func (tvg *TagValGenerator) Next() string { - tvg.idx++ - if tvg.idx >= len(tvg.Vals) { - tvg.idx-- - return "" - } - return tvg.Curr() -} - -type TagSet map[string]string - -type TagSetGenerator struct { - TagVals []*TagValGenerator -} - -func NewTagSetGenerator(nSets int, nTagVals ...int) *TagSetGenerator { - tsg := &TagSetGenerator{TagVals: make([]*TagValGenerator, 0, nSets)} - for i := 0; i < nSets; i++ { - nVals := nTagVals[0] - if i < len(nTagVals) { - nVals = nTagVals[i] - } - tagKey := fmt.Sprintf("tagKey%d", i) - tsg.TagVals = append(tsg.TagVals, NewTagValGenerator(tagKey, nVals)) - } - return tsg -} - -func (tsg *TagSetGenerator) First() TagSet { - for _, tsv := range tsg.TagVals { - tsv.First() - } - return tsg.Curr() -} - -func (tsg *TagSetGenerator) Curr() TagSet { - ts := TagSet{} - for _, tvg := range tsg.TagVals { - ts[tvg.Key] = tvg.Curr() - } - return ts -} - -func (tsg *TagSetGenerator) Next() TagSet { - val := "" - for _, tsv := range tsg.TagVals { - if val = tsv.Next(); val != "" { - break - } else { - tsv.First() - } - } - - if val == "" { - return nil - } - - return tsg.Curr() -} - -func (tsg *TagSetGenerator) AllSets() []TagSet { - allSets := []TagSet{} - for ts := tsg.First(); ts != nil; ts = tsg.Next() { - allSets = append(allSets, ts) - } - return allSets -} - -func genStrList(prefix string, n int) []string { - lst := make([]string, 0, n) - for i := 0; i < n; i++ { - lst = append(lst, fmt.Sprintf("%s%d", prefix, i)) - } - return lst -} diff --git a/tsdb/series_cursor.go b/tsdb/series_cursor.go deleted file mode 100644 index 127fe6b541d..00000000000 --- a/tsdb/series_cursor.go +++ /dev/null @@ -1,155 +0,0 @@ -package tsdb - -import ( - "bytes" - "errors" - "sort" - "sync" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxql" -) - -type SeriesCursor interface { - Close() error - Next() (*SeriesCursorRow, error) -} - -type SeriesCursorRequest struct { - Measurements MeasurementIterator -} - -// seriesCursor is an implementation of SeriesCursor over an IndexSet. -type seriesCursor struct { - once sync.Once - indexSet IndexSet - mitr MeasurementIterator - keys [][]byte - ofs int - row SeriesCursorRow - cond influxql.Expr -} - -type SeriesCursorRow struct { - Name []byte - Tags models.Tags -} - -func (r *SeriesCursorRow) Compare(other *SeriesCursorRow) int { - if r == other { - return 0 - } else if r == nil { - return -1 - } else if other == nil { - return 1 - } - cmp := bytes.Compare(r.Name, other.Name) - if cmp != 0 { - return cmp - } - return models.CompareTags(r.Tags, other.Tags) -} - -// newSeriesCursor returns a new instance of SeriesCursor. -func newSeriesCursor(req SeriesCursorRequest, indexSet IndexSet, cond influxql.Expr) (_ SeriesCursor, err error) { - // Only equality operators are allowed. - influxql.WalkFunc(cond, func(node influxql.Node) { - switch n := node.(type) { - case *influxql.BinaryExpr: - switch n.Op { - case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX, influxql.OR, influxql.AND: - default: - err = errors.New("invalid tag comparison operator") - } - } - }) - if err != nil { - return nil, err - } - - mitr := req.Measurements - if mitr == nil { - mitr, err = indexSet.MeasurementIterator() - if err != nil { - return nil, err - } - } - - return &seriesCursor{ - indexSet: indexSet, - mitr: mitr, - cond: cond, - }, nil -} - -// Close closes the iterator. -func (cur *seriesCursor) Close() (err error) { - cur.once.Do(func() { - if cur.mitr != nil { - err = cur.mitr.Close() - } - }) - return err -} - -// Next emits the next point in the iterator. -func (cur *seriesCursor) Next() (*SeriesCursorRow, error) { - for { - // Read series keys for next measurement if no more keys remaining. - // Exit if there are no measurements remaining. - if cur.ofs == len(cur.keys) { - m, err := cur.mitr.Next() - if err != nil { - return nil, err - } else if m == nil { - return nil, nil - } - - if err := cur.readSeriesKeys(m); err != nil { - return nil, err - } - continue - } - - cur.row.Name, cur.row.Tags = ParseSeriesKey(cur.keys[cur.ofs]) - cur.ofs++ - - //if itr.opt.Authorizer != nil && !itr.opt.Authorizer.AuthorizeSeriesRead(itr.indexSet.Database(), name, tags) { - // continue - //} - - return &cur.row, nil - } -} - -func (cur *seriesCursor) readSeriesKeys(name []byte) error { - sitr, err := cur.indexSet.MeasurementSeriesByExprIterator(name, cur.cond) - if err != nil { - return err - } else if sitr == nil { - return nil - } - defer sitr.Close() - - // Slurp all series keys. - cur.ofs = 0 - cur.keys = cur.keys[:0] - for { - elem, err := sitr.Next() - if err != nil { - return err - } else if elem.SeriesID == 0 { - break - } - - key := cur.indexSet.SeriesFile.SeriesKey(elem.SeriesID) - if len(key) == 0 { - continue - } - cur.keys = append(cur.keys, key) - } - - // Sort keys. - sort.Sort(seriesKeys(cur.keys)) - return nil -} diff --git a/tsdb/series_file.go b/tsdb/series_file.go deleted file mode 100644 index 4100dcfe4ab..00000000000 --- a/tsdb/series_file.go +++ /dev/null @@ -1,538 +0,0 @@ -package tsdb - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sort" - "sync" - - "github.com/cespare/xxhash" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/binaryutil" - "github.com/influxdata/influxdb/v2/pkg/limiter" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" -) - -var ( - ErrSeriesFileClosed = errors.New("tsdb: series file closed") - ErrInvalidSeriesPartitionID = errors.New("tsdb: invalid series partition id") -) - -// SeriesIDSize is the size in bytes of a series key ID. -const SeriesIDSize = 8 - -const ( - // SeriesFilePartitionN is the number of partitions a series file is split into. - SeriesFilePartitionN = 8 -) - -// SeriesFile represents the section of the index that holds series data. -type SeriesFile struct { - path string - partitions []*SeriesPartition - - maxSnapshotConcurrency int - - refs sync.RWMutex // RWMutex to track references to the SeriesFile that are in use. - - Logger *zap.Logger -} - -// NewSeriesFile returns a new instance of SeriesFile. -func NewSeriesFile(path string) *SeriesFile { - maxSnapshotConcurrency := runtime.GOMAXPROCS(0) - if maxSnapshotConcurrency < 1 { - maxSnapshotConcurrency = 1 - } - - return &SeriesFile{ - path: path, - maxSnapshotConcurrency: maxSnapshotConcurrency, - Logger: zap.NewNop(), - } -} - -func (f *SeriesFile) WithMaxCompactionConcurrency(maxCompactionConcurrency int) { - if maxCompactionConcurrency < 1 { - maxCompactionConcurrency = runtime.GOMAXPROCS(0) - if maxCompactionConcurrency < 1 { - maxCompactionConcurrency = 1 - } - } - - f.maxSnapshotConcurrency = maxCompactionConcurrency -} - -// Open memory maps the data file at the file's path. -func (f *SeriesFile) Open() error { - // Wait for all references to be released and prevent new ones from being acquired. - f.refs.Lock() - defer f.refs.Unlock() - - // Create path if it doesn't exist. - if err := os.MkdirAll(filepath.Join(f.path), 0777); err != nil { - return err - } - - // Limit concurrent series file compactions - compactionLimiter := limiter.NewFixed(f.maxSnapshotConcurrency) - - // Open partitions. - f.partitions = make([]*SeriesPartition, 0, SeriesFilePartitionN) - for i := 0; i < SeriesFilePartitionN; i++ { - p := NewSeriesPartition(i, f.SeriesPartitionPath(i), compactionLimiter) - p.Logger = f.Logger.With(zap.Int("partition", p.ID())) - if err := p.Open(); err != nil { - f.Logger.Error("Unable to open series file", - zap.String("path", f.path), - zap.Int("partition", p.ID()), - zap.Error(err)) - f.close() - return err - } - f.partitions = append(f.partitions, p) - } - - return nil -} - -func (f *SeriesFile) close() (err error) { - for _, p := range f.partitions { - if e := p.Close(); e != nil && err == nil { - err = e - } - } - - return err -} - -// Close unmaps the data file. -func (f *SeriesFile) Close() (err error) { - f.refs.Lock() - defer f.refs.Unlock() - return f.close() -} - -// Path returns the path to the file. -func (f *SeriesFile) Path() string { return f.path } - -// SeriesPartitionPath returns the path to a given partition. -func (f *SeriesFile) SeriesPartitionPath(i int) string { - return filepath.Join(f.path, fmt.Sprintf("%02x", i)) -} - -// Partitions returns all partitions. -func (f *SeriesFile) Partitions() []*SeriesPartition { return f.partitions } - -// Retain adds a reference count to the file. It returns a release func. -func (f *SeriesFile) Retain() func() { - if f != nil { - f.refs.RLock() - - // Return the RUnlock func as the release func to be called when done. - return f.refs.RUnlock - } - return nop -} - -// EnableCompactions allows compactions to run. -func (f *SeriesFile) EnableCompactions() { - for _, p := range f.partitions { - p.EnableCompactions() - } -} - -// DisableCompactions prevents new compactions from running. -func (f *SeriesFile) DisableCompactions() { - for _, p := range f.partitions { - p.DisableCompactions() - } -} - -// Wait waits for all Retains to be released. -func (f *SeriesFile) Wait() { - f.refs.Lock() - defer f.refs.Unlock() -} - -// FileSize returns the size of all partitions, in bytes. -func (f *SeriesFile) FileSize() (n int64, err error) { - for _, p := range f.partitions { - v, err := p.FileSize() - n += v - if err != nil { - return n, err - } - } - return n, err -} - -// CreateSeriesListIfNotExists creates a list of series in bulk if they don't exist. -// The returned ids slice returns IDs for every name+tags, creating new series IDs as needed. -func (f *SeriesFile) CreateSeriesListIfNotExists(names [][]byte, tagsSlice []models.Tags) ([]uint64, error) { - keys := GenerateSeriesKeys(names, tagsSlice) - keyPartitionIDs := f.SeriesKeysPartitionIDs(keys) - ids := make([]uint64, len(keys)) - - var g errgroup.Group - for i := range f.partitions { - p := f.partitions[i] - g.Go(func() error { - return p.CreateSeriesListIfNotExists(keys, keyPartitionIDs, ids) - }) - } - if err := g.Wait(); err != nil { - return nil, err - } - return ids, nil -} - -// DeleteSeriesID flags a series as permanently deleted. -// If the series is reintroduced later then it must create a new id. -func (f *SeriesFile) DeleteSeriesID(id uint64) error { - p := f.SeriesIDPartition(id) - if p == nil { - return ErrInvalidSeriesPartitionID - } - return p.DeleteSeriesID(id) -} - -// IsDeleted returns true if the ID has been deleted before. -func (f *SeriesFile) IsDeleted(id uint64) bool { - p := f.SeriesIDPartition(id) - if p == nil { - return false - } - return p.IsDeleted(id) -} - -// SeriesKey returns the series key for a given id. -func (f *SeriesFile) SeriesKey(id uint64) []byte { - if id == 0 { - return nil - } - p := f.SeriesIDPartition(id) - if p == nil { - return nil - } - return p.SeriesKey(id) -} - -// SeriesKeys returns a list of series keys from a list of ids. -func (f *SeriesFile) SeriesKeys(ids []uint64) [][]byte { - keys := make([][]byte, len(ids)) - for i := range ids { - keys[i] = f.SeriesKey(ids[i]) - } - return keys -} - -// Series returns the parsed series name and tags for an offset. -func (f *SeriesFile) Series(id uint64) ([]byte, models.Tags) { - key := f.SeriesKey(id) - if key == nil { - return nil, nil - } - return ParseSeriesKey(key) -} - -// SeriesID return the series id for the series. -func (f *SeriesFile) SeriesID(name []byte, tags models.Tags, buf []byte) uint64 { - key := AppendSeriesKey(buf[:0], name, tags) - keyPartition := f.SeriesKeyPartition(key) - if keyPartition == nil { - return 0 - } - return keyPartition.FindIDBySeriesKey(key) -} - -// HasSeries return true if the series exists. -func (f *SeriesFile) HasSeries(name []byte, tags models.Tags, buf []byte) bool { - return f.SeriesID(name, tags, buf) > 0 -} - -// SeriesCount returns the number of series. -func (f *SeriesFile) SeriesCount() uint64 { - var n uint64 - for _, p := range f.partitions { - n += p.SeriesCount() - } - return n -} - -// SeriesIDIterator returns an iterator over all the series. -func (f *SeriesFile) SeriesIDIterator() SeriesIDIterator { - var ids []uint64 - for _, p := range f.partitions { - ids = p.AppendSeriesIDs(ids) - } - sort.Sort(uint64Slice(ids)) - return NewSeriesIDSliceIterator(ids) -} - -func (f *SeriesFile) SeriesIDPartitionID(id uint64) int { - return int((id - 1) % SeriesFilePartitionN) -} - -func (f *SeriesFile) SeriesIDPartition(id uint64) *SeriesPartition { - partitionID := f.SeriesIDPartitionID(id) - if partitionID >= len(f.partitions) { - return nil - } - return f.partitions[partitionID] -} - -func (f *SeriesFile) SeriesKeysPartitionIDs(keys [][]byte) []int { - partitionIDs := make([]int, len(keys)) - for i := range keys { - partitionIDs[i] = f.SeriesKeyPartitionID(keys[i]) - } - return partitionIDs -} - -func (f *SeriesFile) SeriesKeyPartitionID(key []byte) int { - return int(xxhash.Sum64(key) % SeriesFilePartitionN) -} - -func (f *SeriesFile) SeriesKeyPartition(key []byte) *SeriesPartition { - partitionID := f.SeriesKeyPartitionID(key) - if partitionID >= len(f.partitions) { - return nil - } - return f.partitions[partitionID] -} - -// AppendSeriesKey serializes name and tags to a byte slice. -// The total length is prepended as a uvarint. -func AppendSeriesKey(dst []byte, name []byte, tags models.Tags) []byte { - buf := make([]byte, binary.MaxVarintLen64) - origLen := len(dst) - - // The tag count is variable encoded, so we need to know ahead of time what - // the size of the tag count value will be. - tcBuf := make([]byte, binary.MaxVarintLen64) - tcSz := binary.PutUvarint(tcBuf, uint64(len(tags))) - - // Size of name/tags. Does not include total length. - size := 0 + // - 2 + // size of measurement - len(name) + // measurement - tcSz + // size of number of tags - (4 * len(tags)) + // length of each tag key and value - tags.Size() // size of tag keys/values - - // Variable encode length. - totalSz := binary.PutUvarint(buf, uint64(size)) - - // If caller doesn't provide a buffer then pre-allocate an exact one. - if dst == nil { - dst = make([]byte, 0, size+totalSz) - } - - // Append total length. - dst = append(dst, buf[:totalSz]...) - - // Append name. - binary.BigEndian.PutUint16(buf, uint16(len(name))) - dst = append(dst, buf[:2]...) - dst = append(dst, name...) - - // Append tag count. - dst = append(dst, tcBuf[:tcSz]...) - - // Append tags. - for _, tag := range tags { - binary.BigEndian.PutUint16(buf, uint16(len(tag.Key))) - dst = append(dst, buf[:2]...) - dst = append(dst, tag.Key...) - - binary.BigEndian.PutUint16(buf, uint16(len(tag.Value))) - dst = append(dst, buf[:2]...) - dst = append(dst, tag.Value...) - } - - // Verify that the total length equals the encoded byte count. - if got, exp := len(dst)-origLen, size+totalSz; got != exp { - panic(fmt.Sprintf("series key encoding does not match calculated total length: actual=%d, exp=%d, key=%x", got, exp, dst)) - } - - return dst -} - -// ReadSeriesKey returns the series key from the beginning of the buffer. -func ReadSeriesKey(data []byte) (key, remainder []byte) { - sz, n := binary.Uvarint(data) - return data[:int(sz)+n], data[int(sz)+n:] -} - -func ReadSeriesKeyLen(data []byte) (sz int, remainder []byte) { - sz64, i := binary.Uvarint(data) - return int(sz64), data[i:] -} - -func ReadSeriesKeyMeasurement(data []byte) (name, remainder []byte) { - n, data := binary.BigEndian.Uint16(data), data[2:] - return data[:n], data[n:] -} - -func ReadSeriesKeyTagN(data []byte) (n int, remainder []byte) { - n64, i := binary.Uvarint(data) - return int(n64), data[i:] -} - -func ReadSeriesKeyTag(data []byte) (key, value, remainder []byte) { - n, data := binary.BigEndian.Uint16(data), data[2:] - key, data = data[:n], data[n:] - - n, data = binary.BigEndian.Uint16(data), data[2:] - value, data = data[:n], data[n:] - return key, value, data -} - -// ParseSeriesKey extracts the name & tags from a series key. -func ParseSeriesKey(data []byte) (name []byte, tags models.Tags) { - return parseSeriesKey(data, nil) -} - -// ParseSeriesKeyInto extracts the name and tags for data, parsing the tags into -// dstTags, which is then returned. -// -// The returned dstTags may have a different length and capacity. -func ParseSeriesKeyInto(data []byte, dstTags models.Tags) ([]byte, models.Tags) { - return parseSeriesKey(data, dstTags) -} - -// parseSeriesKey extracts the name and tags from data, attempting to re-use the -// provided tags value rather than allocating. The returned tags may have a -// different length and capacity to those provided. -func parseSeriesKey(data []byte, dst models.Tags) ([]byte, models.Tags) { - var name []byte - _, data = ReadSeriesKeyLen(data) - name, data = ReadSeriesKeyMeasurement(data) - tagN, data := ReadSeriesKeyTagN(data) - - dst = dst[:cap(dst)] // Grow dst to use full capacity - if got, want := len(dst), tagN; got < want { - dst = append(dst, make(models.Tags, want-got)...) - } else if got > want { - dst = dst[:want] - } - dst = dst[:tagN] - - for i := 0; i < tagN; i++ { - var key, value []byte - key, value, data = ReadSeriesKeyTag(data) - dst[i].Key, dst[i].Value = key, value - } - - return name, dst -} - -func CompareSeriesKeys(a, b []byte) int { - // Handle 'nil' keys. - if len(a) == 0 && len(b) == 0 { - return 0 - } else if len(a) == 0 { - return -1 - } else if len(b) == 0 { - return 1 - } - - // Read total size. - _, a = ReadSeriesKeyLen(a) - _, b = ReadSeriesKeyLen(b) - - // Read names. - name0, a := ReadSeriesKeyMeasurement(a) - name1, b := ReadSeriesKeyMeasurement(b) - - // Compare names, return if not equal. - if cmp := bytes.Compare(name0, name1); cmp != 0 { - return cmp - } - - // Read tag counts. - tagN0, a := ReadSeriesKeyTagN(a) - tagN1, b := ReadSeriesKeyTagN(b) - - // Compare each tag in order. - for i := 0; ; i++ { - // Check for EOF. - if i == tagN0 && i == tagN1 { - return 0 - } else if i == tagN0 { - return -1 - } else if i == tagN1 { - return 1 - } - - // Read keys. - var key0, key1, value0, value1 []byte - key0, value0, a = ReadSeriesKeyTag(a) - key1, value1, b = ReadSeriesKeyTag(b) - - // Compare keys & values. - if cmp := bytes.Compare(key0, key1); cmp != 0 { - return cmp - } else if cmp := bytes.Compare(value0, value1); cmp != 0 { - return cmp - } - } -} - -// GenerateSeriesKeys generates series keys for a list of names & tags using -// a single large memory block. -func GenerateSeriesKeys(names [][]byte, tagsSlice []models.Tags) [][]byte { - buf := make([]byte, 0, SeriesKeysSize(names, tagsSlice)) - keys := make([][]byte, len(names)) - for i := range names { - offset := len(buf) - buf = AppendSeriesKey(buf, names[i], tagsSlice[i]) - keys[i] = buf[offset:] - } - return keys -} - -// SeriesKeysSize returns the number of bytes required to encode a list of name/tags. -func SeriesKeysSize(names [][]byte, tagsSlice []models.Tags) int { - var n int - for i := range names { - n += SeriesKeySize(names[i], tagsSlice[i]) - } - return n -} - -// SeriesKeySize returns the number of bytes required to encode a series key. -func SeriesKeySize(name []byte, tags models.Tags) int { - var n int - n += 2 + len(name) - n += binaryutil.UvarintSize(uint64(len(tags))) - for _, tag := range tags { - n += 2 + len(tag.Key) - n += 2 + len(tag.Value) - } - n += binaryutil.UvarintSize(uint64(n)) - return n -} - -type seriesKeys [][]byte - -func (a seriesKeys) Len() int { return len(a) } -func (a seriesKeys) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a seriesKeys) Less(i, j int) bool { - return CompareSeriesKeys(a[i], a[j]) == -1 -} - -type uint64Slice []uint64 - -func (a uint64Slice) Len() int { return len(a) } -func (a uint64Slice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a uint64Slice) Less(i, j int) bool { return a[i] < a[j] } - -func nop() {} diff --git a/tsdb/series_file_test.go b/tsdb/series_file_test.go deleted file mode 100644 index f9d8494994d..00000000000 --- a/tsdb/series_file_test.go +++ /dev/null @@ -1,380 +0,0 @@ -package tsdb_test - -import ( - "bytes" - "fmt" - "os" - "path" - "testing" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" - "golang.org/x/sync/errgroup" -) - -func TestParseSeriesKeyInto(t *testing.T) { - name := []byte("cpu") - tags := models.NewTags(map[string]string{"region": "east", "server": "a"}) - key := tsdb.AppendSeriesKey(nil, name, tags) - - dst := make(models.Tags, 0) - gotName, gotTags := tsdb.ParseSeriesKeyInto(key, dst) - - if !bytes.Equal(gotName, name) { - t.Fatalf("got %q, expected %q", gotName, name) - } - - if got, exp := len(gotTags), 2; got != exp { - t.Fatalf("got tags length %d, expected %d", got, exp) - } else if got, exp := gotTags, tags; !got.Equal(exp) { - t.Fatalf("got tags %v, expected %v", got, exp) - } - - dst = make(models.Tags, 0, 5) - _, gotTags = tsdb.ParseSeriesKeyInto(key, dst) - if got, exp := len(gotTags), 2; got != exp { - t.Fatalf("got tags length %d, expected %d", got, exp) - } else if got, exp := cap(gotTags), 5; got != exp { - t.Fatalf("got tags capacity %d, expected %d", got, exp) - } else if got, exp := gotTags, tags; !got.Equal(exp) { - t.Fatalf("got tags %v, expected %v", got, exp) - } - - dst = make(models.Tags, 1) - _, gotTags = tsdb.ParseSeriesKeyInto(key, dst) - if got, exp := len(gotTags), 2; got != exp { - t.Fatalf("got tags length %d, expected %d", got, exp) - } else if got, exp := gotTags, tags; !got.Equal(exp) { - t.Fatalf("got tags %v, expected %v", got, exp) - } -} - -// Ensure that broken series files are closed -func TestSeriesFile_Open_WhenFileCorrupt_ShouldReturnErr(t *testing.T) { - f := NewBrokenSeriesFile(t, []byte{0, 0, 0, 0, 0}) - defer f.Close() - f.Logger = zaptest.NewLogger(t) - - err := f.Open() - - if err == nil { - t.Fatalf("should report error") - } -} - -// Ensure series file contains the correct set of series. -func TestSeriesFile_Series(t *testing.T) { - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - series := []Series{ - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})}, - {Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "east"})}, - } - for _, s := range series { - if _, err := sfile.CreateSeriesListIfNotExists([][]byte{[]byte(s.Name)}, []models.Tags{s.Tags}); err != nil { - t.Fatal(err) - } - } - - // Verify total number of series is correct. - if n := sfile.SeriesCount(); n != 3 { - t.Fatalf("unexpected series count: %d", n) - } - - // Verify all series exist. - for i, s := range series { - if seriesID := sfile.SeriesID(s.Name, s.Tags, nil); seriesID == 0 { - t.Fatalf("series does not exist: i=%d", i) - } - } - - // Verify non-existent series doesn't exist. - if sfile.HasSeries([]byte("foo"), models.NewTags(map[string]string{"region": "north"}), nil) { - t.Fatal("series should not exist") - } -} - -// Ensure series file can be compacted. -func TestSeriesFileCompactor(t *testing.T) { - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - // Disable automatic compactions. - for _, p := range sfile.Partitions() { - p.CompactThreshold = 0 - } - - var names [][]byte - var tagsSlice []models.Tags - for i := 0; i < 10000; i++ { - names = append(names, []byte(fmt.Sprintf("m%d", i))) - tagsSlice = append(tagsSlice, models.NewTags(map[string]string{"foo": "bar"})) - } - if _, err := sfile.CreateSeriesListIfNotExists(names, tagsSlice); err != nil { - t.Fatal(err) - } - - // Verify total number of series is correct. - if n := sfile.SeriesCount(); n != uint64(len(names)) { - t.Fatalf("unexpected series count: %d", n) - } - - // Compact in-place for each partition. - for _, p := range sfile.Partitions() { - compactor := tsdb.NewSeriesPartitionCompactor() - if err := compactor.Compact(p); err != nil { - t.Fatal(err) - } - } - - // Verify all series exist. - for i := range names { - if seriesID := sfile.SeriesID(names[i], tagsSlice[i], nil); seriesID == 0 { - t.Fatalf("series does not exist: %s,%s", names[i], tagsSlice[i].String()) - } - } -} - -// Ensure series file deletions persist across compactions. -func TestSeriesFile_DeleteSeriesID(t *testing.T) { - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - ids0, err := sfile.CreateSeriesListIfNotExists([][]byte{[]byte("m1")}, []models.Tags{nil}) - if err != nil { - t.Fatal(err) - } else if _, err := sfile.CreateSeriesListIfNotExists([][]byte{[]byte("m2")}, []models.Tags{nil}); err != nil { - t.Fatal(err) - } else if err := sfile.ForceCompact(); err != nil { - t.Fatal(err) - } - - // Delete and ensure deletion. - if err := sfile.DeleteSeriesID(ids0[0]); err != nil { - t.Fatal(err) - } else if _, err := sfile.CreateSeriesListIfNotExists([][]byte{[]byte("m1")}, []models.Tags{nil}); err != nil { - t.Fatal(err) - } else if !sfile.IsDeleted(ids0[0]) { - t.Fatal("expected deletion before compaction") - } - - if err := sfile.ForceCompact(); err != nil { - t.Fatal(err) - } else if !sfile.IsDeleted(ids0[0]) { - t.Fatal("expected deletion after compaction") - } - - if err := sfile.Reopen(); err != nil { - t.Fatal(err) - } else if !sfile.IsDeleted(ids0[0]) { - t.Fatal("expected deletion after reopen") - } -} - -func TestSeriesFile_Compaction(t *testing.T) { - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - var segmentPaths []string - for _, p := range sfile.Partitions() { - for _, ss := range p.Segments() { - segmentPaths = append(segmentPaths, ss.Path()) - } - } - - sfileSize := func() (res int64) { - for _, p := range segmentPaths { - fi, err := os.Stat(p) - require.NoError(t, err) - res += fi.Size() - } - return - } - - // Generate a bunch of keys. - var mms [][]byte - var tagSets []models.Tags - for i := 0; i < 1000; i++ { - mms = append(mms, []byte("cpu")) - tagSets = append(tagSets, models.NewTags(map[string]string{"region": fmt.Sprintf("r%d", i)})) - } - - // Add all to the series file. - ids, err := sfile.CreateSeriesListIfNotExists(mms, tagSets) - require.NoError(t, err) - - // Delete a subset of keys. - for i, id := range ids { - if i%10 == 0 { - require.NoError(t, sfile.DeleteSeriesID(id)) - } - } - - // Check total series count. - require.Equal(t, 1000, int(sfile.SeriesCount())) - - // Compact all segments. - var paths []string - for _, p := range sfile.Partitions() { - for _, ss := range p.Segments() { - require.NoError(t, ss.CompactToPath(ss.Path()+".tmp", p.Index())) - paths = append(paths, ss.Path()) - } - } - - // Close index. - require.NoError(t, sfile.SeriesFile.Close()) - - // Compute total size of all series data. - origSize := sfileSize() - - // Overwrite files. - for _, path := range paths { - require.NoError(t, os.Rename(path+".tmp", path)) - } - - // Check size of compacted series data. - // We do this before reopening the index because on Windows, opening+mmap'ing the series - // file will cause the file to grow back to its original size. - newSize := sfileSize() - - // Verify new size is smaller. - require.Greater(t, origSize, newSize) - - // Reopen index. - sfile.SeriesFile = tsdb.NewSeriesFile(sfile.SeriesFile.Path()) - require.NoError(t, sfile.SeriesFile.Open()) - - // Ensure series status is correct. - for i, id := range ids { - require.Equal(t, (i%10) == 0, sfile.IsDeleted(id)) - } - - // Check total series count. - require.Equal(t, 900, int(sfile.SeriesCount())) -} - -var cachedCompactionSeriesFile *SeriesFile - -func BenchmarkSeriesFile_Compaction(b *testing.B) { - const n = 1000000 - - if cachedCompactionSeriesFile == nil { - sfile := MustOpenSeriesFile(b) - - // Generate a bunch of keys. - var ids []uint64 - for i := 0; i < n; i++ { - tmp, err := sfile.CreateSeriesListIfNotExists([][]byte{[]byte("cpu")}, []models.Tags{models.NewTags(map[string]string{"region": fmt.Sprintf("r%d", i)})}) - if err != nil { - b.Fatal(err) - } - ids = append(ids, tmp...) - } - - // Delete a subset of keys. - for i := 0; i < len(ids); i += 10 { - if err := sfile.DeleteSeriesID(ids[i]); err != nil { - b.Fatal(err) - } - } - - cachedCompactionSeriesFile = sfile - } - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - // Compact all segments in parallel. - var g errgroup.Group - for _, p := range cachedCompactionSeriesFile.Partitions() { - for _, segment := range p.Segments() { - p, segment := p, segment - g.Go(func() error { - return segment.CompactToPath(segment.Path()+".tmp", p.Index()) - }) - } - } - - if err := g.Wait(); err != nil { - b.Fatal(err) - } - } -} - -// Series represents name/tagset pairs that are used in testing. -type Series struct { - Name []byte - Tags models.Tags - Deleted bool -} - -// SeriesFile is a test wrapper for tsdb.SeriesFile. -type SeriesFile struct { - *tsdb.SeriesFile -} - -// NewSeriesFile returns a new instance of SeriesFile with a temporary file path. -func NewSeriesFile(tb testing.TB) *SeriesFile { - dir := tb.TempDir() - - f := &SeriesFile{SeriesFile: tsdb.NewSeriesFile(dir)} - - tb.Cleanup(func() { - f.Close() - }) - - return f -} - -func NewBrokenSeriesFile(tb testing.TB, content []byte) *SeriesFile { - sFile := NewSeriesFile(tb) - fPath := sFile.Path() - sFile.Open() - sFile.SeriesFile.Close() - - segPath := path.Join(fPath, "00", "0000") - if _, err := os.Stat(segPath); os.IsNotExist(err) { - panic(err) - } - err := os.WriteFile(segPath, content, 0777) - if err != nil { - panic(err) - } - return sFile -} - -// MustOpenSeriesFile returns a new, open instance of SeriesFile. Panic on error. -func MustOpenSeriesFile(tb testing.TB) *SeriesFile { - tb.Helper() - - f := NewSeriesFile(tb) - f.Logger = zaptest.NewLogger(tb) - if err := f.Open(); err != nil { - panic(err) - } - return f -} - -// Reopen close & reopens the series file. -func (f *SeriesFile) Reopen() error { - if err := f.SeriesFile.Close(); err != nil { - return err - } - f.SeriesFile = tsdb.NewSeriesFile(f.SeriesFile.Path()) - return f.SeriesFile.Open() -} - -// ForceCompact executes an immediate compaction across all partitions. -func (f *SeriesFile) ForceCompact() error { - for _, p := range f.Partitions() { - if err := tsdb.NewSeriesPartitionCompactor().Compact(p); err != nil { - return err - } - } - return nil -} diff --git a/tsdb/series_index.go b/tsdb/series_index.go deleted file mode 100644 index 5914234ef1d..00000000000 --- a/tsdb/series_index.go +++ /dev/null @@ -1,373 +0,0 @@ -package tsdb - -import ( - "bytes" - "encoding/binary" - "errors" - "io" - "os" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/mmap" - "github.com/influxdata/influxdb/v2/pkg/rhh" -) - -const ( - SeriesIndexVersion = 1 - SeriesIndexMagic = "SIDX" -) - -const ( - SeriesIndexElemSize = 16 // offset + id - SeriesIndexLoadFactor = 90 // rhh load factor - - SeriesIndexHeaderSize = 0 + - 4 + 1 + // magic + version - 8 + 8 + // max series + max offset - 8 + 8 + // count + capacity - 8 + 8 + // key/id map offset & size - 8 + 8 + // id/offset map offset & size - 0 -) - -var ErrInvalidSeriesIndex = errors.New("invalid series index") - -// SeriesIndex represents an index of key-to-id & id-to-offset mappings. -type SeriesIndex struct { - path string - - count uint64 - capacity int64 - mask int64 - - maxSeriesID uint64 - maxOffset int64 - - data []byte // mmap data - keyIDData []byte // key/id mmap data - idOffsetData []byte // id/offset mmap data - - // In-memory data since rebuild. - keyIDMap *rhh.HashMap - idOffsetMap map[uint64]int64 - tombstones map[uint64]struct{} -} - -func NewSeriesIndex(path string) *SeriesIndex { - return &SeriesIndex{ - path: path, - } -} - -// Open memory-maps the index file. -func (idx *SeriesIndex) Open() (err error) { - // Map data file, if it exists. - if err := func() error { - if _, err := os.Stat(idx.path); err != nil && !os.IsNotExist(err) { - return err - } else if err == nil { - if idx.data, err = mmap.Map(idx.path, 0); err != nil { - return err - } - - hdr, err := ReadSeriesIndexHeader(idx.data) - if err != nil { - return err - } - idx.count, idx.capacity, idx.mask = hdr.Count, hdr.Capacity, hdr.Capacity-1 - idx.maxSeriesID, idx.maxOffset = hdr.MaxSeriesID, hdr.MaxOffset - - idx.keyIDData = idx.data[hdr.KeyIDMap.Offset : hdr.KeyIDMap.Offset+hdr.KeyIDMap.Size] - idx.idOffsetData = idx.data[hdr.IDOffsetMap.Offset : hdr.IDOffsetMap.Offset+hdr.IDOffsetMap.Size] - } - return nil - }(); err != nil { - idx.Close() - return err - } - - idx.keyIDMap = rhh.NewHashMap(rhh.DefaultOptions) - idx.idOffsetMap = make(map[uint64]int64) - idx.tombstones = make(map[uint64]struct{}) - return nil -} - -// Close unmaps the index file. -func (idx *SeriesIndex) Close() (err error) { - if idx.data != nil { - err = mmap.Unmap(idx.data) - } - idx.keyIDData = nil - idx.idOffsetData = nil - - idx.keyIDMap = nil - idx.idOffsetMap = nil - idx.tombstones = nil - return err -} - -// Recover rebuilds the in-memory index for all new entries. -func (idx *SeriesIndex) Recover(segments []*SeriesSegment) error { - // Allocate new in-memory maps. - idx.keyIDMap = rhh.NewHashMap(rhh.DefaultOptions) - idx.idOffsetMap = make(map[uint64]int64) - idx.tombstones = make(map[uint64]struct{}) - - // Process all entries since the maximum offset in the on-disk index. - minSegmentID, _ := SplitSeriesOffset(idx.maxOffset) - for _, segment := range segments { - if segment.ID() < minSegmentID { - continue - } - - if err := segment.ForEachEntry(func(flag uint8, id uint64, offset int64, key []byte) error { - if offset <= idx.maxOffset { - return nil - } - idx.execEntry(flag, id, offset, key) - return nil - }); err != nil { - return err - } - } - return nil -} - -// Count returns the number of series in the index. -func (idx *SeriesIndex) Count() uint64 { - return idx.OnDiskCount() + idx.InMemCount() -} - -// OnDiskCount returns the number of series in the on-disk index. -func (idx *SeriesIndex) OnDiskCount() uint64 { return idx.count } - -// InMemCount returns the number of series in the in-memory index. -func (idx *SeriesIndex) InMemCount() uint64 { return uint64(len(idx.idOffsetMap)) } - -func (idx *SeriesIndex) Insert(key []byte, id uint64, offset int64) { - idx.execEntry(SeriesEntryInsertFlag, id, offset, key) -} - -// Delete marks the series id as deleted. -func (idx *SeriesIndex) Delete(id uint64) { - idx.execEntry(SeriesEntryTombstoneFlag, id, 0, nil) -} - -// IsDeleted returns true if series id has been deleted. -func (idx *SeriesIndex) IsDeleted(id uint64) bool { - if _, ok := idx.tombstones[id]; ok { - return true - } - return idx.FindOffsetByID(id) == 0 -} - -func (idx *SeriesIndex) execEntry(flag uint8, id uint64, offset int64, key []byte) { - switch flag { - case SeriesEntryInsertFlag: - idx.keyIDMap.Put(key, id) - idx.idOffsetMap[id] = offset - - if id > idx.maxSeriesID { - idx.maxSeriesID = id - } - if offset > idx.maxOffset { - idx.maxOffset = offset - } - - case SeriesEntryTombstoneFlag: - idx.tombstones[id] = struct{}{} - - default: - panic("unreachable") - } -} - -func (idx *SeriesIndex) FindIDBySeriesKey(segments []*SeriesSegment, key []byte) uint64 { - if v := idx.keyIDMap.Get(key); v != nil { - if id, _ := v.(uint64); id != 0 && !idx.IsDeleted(id) { - return id - } - } - if len(idx.data) == 0 { - return 0 - } - - hash := rhh.HashKey(key) - for d, pos := int64(0), hash&idx.mask; ; d, pos = d+1, (pos+1)&idx.mask { - elem := idx.keyIDData[(pos * SeriesIndexElemSize):] - elemOffset := int64(binary.BigEndian.Uint64(elem[:8])) - - if elemOffset == 0 { - return 0 - } - - elemKey := ReadSeriesKeyFromSegments(segments, elemOffset+SeriesEntryHeaderSize) - elemHash := rhh.HashKey(elemKey) - if d > rhh.Dist(elemHash, pos, idx.capacity) { - return 0 - } else if elemHash == hash && bytes.Equal(elemKey, key) { - id := binary.BigEndian.Uint64(elem[8:]) - if idx.IsDeleted(id) { - return 0 - } - return id - } - } -} - -func (idx *SeriesIndex) FindIDByNameTags(segments []*SeriesSegment, name []byte, tags models.Tags, buf []byte) uint64 { - id := idx.FindIDBySeriesKey(segments, AppendSeriesKey(buf[:0], name, tags)) - if _, ok := idx.tombstones[id]; ok { - return 0 - } - return id -} - -func (idx *SeriesIndex) FindIDListByNameTags(segments []*SeriesSegment, names [][]byte, tagsSlice []models.Tags, buf []byte) (ids []uint64, ok bool) { - ids, ok = make([]uint64, len(names)), true - for i := range names { - id := idx.FindIDByNameTags(segments, names[i], tagsSlice[i], buf) - if id == 0 { - ok = false - continue - } - ids[i] = id - } - return ids, ok -} - -func (idx *SeriesIndex) FindOffsetByID(id uint64) int64 { - if offset := idx.idOffsetMap[id]; offset != 0 { - return offset - } else if len(idx.data) == 0 { - return 0 - } - - hash := rhh.HashUint64(id) - for d, pos := int64(0), hash&idx.mask; ; d, pos = d+1, (pos+1)&idx.mask { - elem := idx.idOffsetData[(pos * SeriesIndexElemSize):] - elemID := binary.BigEndian.Uint64(elem[:8]) - - if elemID == id { - return int64(binary.BigEndian.Uint64(elem[8:])) - } else if elemID == 0 || d > rhh.Dist(rhh.HashUint64(elemID), pos, idx.capacity) { - return 0 - } - } -} - -// Clone returns a copy of idx for use during compaction. In-memory maps are not cloned. -func (idx *SeriesIndex) Clone() *SeriesIndex { - tombstones := make(map[uint64]struct{}, len(idx.tombstones)) - for id := range idx.tombstones { - tombstones[id] = struct{}{} - } - - idOffsetMap := make(map[uint64]int64) - for k, v := range idx.idOffsetMap { - idOffsetMap[k] = v - } - - return &SeriesIndex{ - path: idx.path, - count: idx.count, - capacity: idx.capacity, - mask: idx.mask, - maxSeriesID: idx.maxSeriesID, - maxOffset: idx.maxOffset, - data: idx.data, - keyIDData: idx.keyIDData, - idOffsetData: idx.idOffsetData, - tombstones: tombstones, - idOffsetMap: idOffsetMap, - } -} - -// SeriesIndexHeader represents the header of a series index. -type SeriesIndexHeader struct { - Version uint8 - - MaxSeriesID uint64 - MaxOffset int64 - - Count uint64 - Capacity int64 - - KeyIDMap struct { - Offset int64 - Size int64 - } - - IDOffsetMap struct { - Offset int64 - Size int64 - } -} - -// NewSeriesIndexHeader returns a new instance of SeriesIndexHeader. -func NewSeriesIndexHeader() SeriesIndexHeader { - return SeriesIndexHeader{Version: SeriesIndexVersion} -} - -// ReadSeriesIndexHeader returns the header from data. -func ReadSeriesIndexHeader(data []byte) (hdr SeriesIndexHeader, err error) { - r := bytes.NewReader(data) - - // Read magic number. - magic := make([]byte, len(SeriesIndexMagic)) - if _, err := io.ReadFull(r, magic); err != nil { - return hdr, err - } else if !bytes.Equal([]byte(SeriesIndexMagic), magic) { - return hdr, ErrInvalidSeriesIndex - } - - // Read version. - if err := binary.Read(r, binary.BigEndian, &hdr.Version); err != nil { - return hdr, err - } - - // Read max offset. - if err := binary.Read(r, binary.BigEndian, &hdr.MaxSeriesID); err != nil { - return hdr, err - } else if err := binary.Read(r, binary.BigEndian, &hdr.MaxOffset); err != nil { - return hdr, err - } - - // Read count & capacity. - if err := binary.Read(r, binary.BigEndian, &hdr.Count); err != nil { - return hdr, err - } else if err := binary.Read(r, binary.BigEndian, &hdr.Capacity); err != nil { - return hdr, err - } - - // Read key/id map position. - if err := binary.Read(r, binary.BigEndian, &hdr.KeyIDMap.Offset); err != nil { - return hdr, err - } else if err := binary.Read(r, binary.BigEndian, &hdr.KeyIDMap.Size); err != nil { - return hdr, err - } - - // Read offset/id map position. - if err := binary.Read(r, binary.BigEndian, &hdr.IDOffsetMap.Offset); err != nil { - return hdr, err - } else if err := binary.Read(r, binary.BigEndian, &hdr.IDOffsetMap.Size); err != nil { - return hdr, err - } - return hdr, nil -} - -// WriteTo writes the header to w. -func (hdr *SeriesIndexHeader) WriteTo(w io.Writer) (n int64, err error) { - var buf bytes.Buffer - buf.WriteString(SeriesIndexMagic) - binary.Write(&buf, binary.BigEndian, hdr.Version) - binary.Write(&buf, binary.BigEndian, hdr.MaxSeriesID) - binary.Write(&buf, binary.BigEndian, hdr.MaxOffset) - binary.Write(&buf, binary.BigEndian, hdr.Count) - binary.Write(&buf, binary.BigEndian, hdr.Capacity) - binary.Write(&buf, binary.BigEndian, hdr.KeyIDMap.Offset) - binary.Write(&buf, binary.BigEndian, hdr.KeyIDMap.Size) - binary.Write(&buf, binary.BigEndian, hdr.IDOffsetMap.Offset) - binary.Write(&buf, binary.BigEndian, hdr.IDOffsetMap.Size) - return buf.WriteTo(w) -} diff --git a/tsdb/series_index_test.go b/tsdb/series_index_test.go deleted file mode 100644 index 8f94d6e1733..00000000000 --- a/tsdb/series_index_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package tsdb_test - -import ( - "bytes" - "path/filepath" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/tsdb" -) - -func TestSeriesIndex_Count(t *testing.T) { - dir := t.TempDir() - - idx := tsdb.NewSeriesIndex(filepath.Join(dir, "index")) - if err := idx.Open(); err != nil { - t.Fatal(err) - } - defer idx.Close() - - key0 := tsdb.AppendSeriesKey(nil, []byte("m0"), nil) - idx.Insert(key0, 1, 10) - key1 := tsdb.AppendSeriesKey(nil, []byte("m1"), nil) - idx.Insert(key1, 2, 20) - - if n := idx.Count(); n != 2 { - t.Fatalf("unexpected count: %d", n) - } -} - -func TestSeriesIndex_Delete(t *testing.T) { - dir := t.TempDir() - - idx := tsdb.NewSeriesIndex(filepath.Join(dir, "index")) - if err := idx.Open(); err != nil { - t.Fatal(err) - } - defer idx.Close() - - key0 := tsdb.AppendSeriesKey(nil, []byte("m0"), nil) - idx.Insert(key0, 1, 10) - key1 := tsdb.AppendSeriesKey(nil, []byte("m1"), nil) - idx.Insert(key1, 2, 20) - idx.Delete(1) - - if !idx.IsDeleted(1) { - t.Fatal("expected deletion") - } else if idx.IsDeleted(2) { - t.Fatal("expected series to exist") - } -} - -func TestSeriesIndex_FindIDBySeriesKey(t *testing.T) { - dir := t.TempDir() - - idx := tsdb.NewSeriesIndex(filepath.Join(dir, "index")) - if err := idx.Open(); err != nil { - t.Fatal(err) - } - defer idx.Close() - - key0 := tsdb.AppendSeriesKey(nil, []byte("m0"), nil) - idx.Insert(key0, 1, 10) - key1 := tsdb.AppendSeriesKey(nil, []byte("m1"), nil) - idx.Insert(key1, 2, 20) - badKey := tsdb.AppendSeriesKey(nil, []byte("not_found"), nil) - - if id := idx.FindIDBySeriesKey(nil, key0); id != 1 { - t.Fatalf("unexpected id(0): %d", id) - } else if id := idx.FindIDBySeriesKey(nil, key1); id != 2 { - t.Fatalf("unexpected id(1): %d", id) - } else if id := idx.FindIDBySeriesKey(nil, badKey); id != 0 { - t.Fatalf("unexpected id(2): %d", id) - } - - if id := idx.FindIDByNameTags(nil, []byte("m0"), nil, nil); id != 1 { - t.Fatalf("unexpected id(0): %d", id) - } else if id := idx.FindIDByNameTags(nil, []byte("m1"), nil, nil); id != 2 { - t.Fatalf("unexpected id(1): %d", id) - } else if id := idx.FindIDByNameTags(nil, []byte("not_found"), nil, nil); id != 0 { - t.Fatalf("unexpected id(2): %d", id) - } -} - -func TestSeriesIndex_FindOffsetByID(t *testing.T) { - dir := t.TempDir() - - idx := tsdb.NewSeriesIndex(filepath.Join(dir, "index")) - if err := idx.Open(); err != nil { - t.Fatal(err) - } - defer idx.Close() - - idx.Insert(tsdb.AppendSeriesKey(nil, []byte("m0"), nil), 1, 10) - idx.Insert(tsdb.AppendSeriesKey(nil, []byte("m1"), nil), 2, 20) - - if offset := idx.FindOffsetByID(1); offset != 10 { - t.Fatalf("unexpected offset(0): %d", offset) - } else if offset := idx.FindOffsetByID(2); offset != 20 { - t.Fatalf("unexpected offset(1): %d", offset) - } else if offset := idx.FindOffsetByID(3); offset != 0 { - t.Fatalf("unexpected offset(2): %d", offset) - } -} - -func TestSeriesIndexHeader(t *testing.T) { - // Verify header initializes correctly. - hdr := tsdb.NewSeriesIndexHeader() - if hdr.Version != tsdb.SeriesIndexVersion { - t.Fatalf("unexpected version: %d", hdr.Version) - } - hdr.MaxSeriesID = 10 - hdr.MaxOffset = 20 - hdr.Count = 30 - hdr.Capacity = 40 - hdr.KeyIDMap.Offset, hdr.KeyIDMap.Size = 50, 60 - hdr.IDOffsetMap.Offset, hdr.IDOffsetMap.Size = 70, 80 - - // Marshal/unmarshal. - var buf bytes.Buffer - if _, err := hdr.WriteTo(&buf); err != nil { - t.Fatal(err) - } else if other, err := tsdb.ReadSeriesIndexHeader(buf.Bytes()); err != nil { - t.Fatal(err) - } else if diff := cmp.Diff(hdr, other); diff != "" { - t.Fatal(diff) - } -} diff --git a/tsdb/series_partition.go b/tsdb/series_partition.go deleted file mode 100644 index 7dc433ffc5d..00000000000 --- a/tsdb/series_partition.go +++ /dev/null @@ -1,742 +0,0 @@ -package tsdb - -import ( - "context" - "encoding/binary" - "errors" - "fmt" - "os" - "path/filepath" - "sync" - - "github.com/influxdata/influxdb/v2/logger" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/limiter" - "github.com/influxdata/influxdb/v2/pkg/rhh" - "go.uber.org/zap" -) - -var ( - ErrSeriesPartitionClosed = errors.New("tsdb: series partition closed") - ErrSeriesPartitionCompactionCancelled = errors.New("tsdb: series partition compaction cancelled") -) - -// DefaultSeriesPartitionCompactThreshold is the number of series IDs to hold in the in-memory -// series map before compacting and rebuilding the on-disk representation. -const DefaultSeriesPartitionCompactThreshold = 1 << 17 // 128K - -// SeriesPartition represents a subset of series file data. -type SeriesPartition struct { - mu sync.RWMutex - wg sync.WaitGroup - id int - path string - - closed bool - closing chan struct{} - once sync.Once - - segments []*SeriesSegment - index *SeriesIndex - seq uint64 // series id sequence - - compacting bool - compactionLimiter limiter.Fixed - compactionsDisabled int - - CompactThreshold int - - Logger *zap.Logger -} - -// NewSeriesPartition returns a new instance of SeriesPartition. -func NewSeriesPartition(id int, path string, compactionLimiter limiter.Fixed) *SeriesPartition { - return &SeriesPartition{ - id: id, - path: path, - closing: make(chan struct{}), - compactionLimiter: compactionLimiter, - CompactThreshold: DefaultSeriesPartitionCompactThreshold, - Logger: zap.NewNop(), - seq: uint64(id) + 1, - } -} - -// Open memory maps the data file at the partition's path. -func (p *SeriesPartition) Open() error { - if p.closed { - return errors.New("tsdb: cannot reopen series partition") - } - - // Create path if it doesn't exist. - if err := os.MkdirAll(filepath.Join(p.path), 0777); err != nil { - return err - } - - // Open components. - if err := func() (err error) { - if err := p.openSegments(); err != nil { - return err - } - - // Init last segment for writes. - if err := p.activeSegment().InitForWrite(); err != nil { - return err - } - - p.index = NewSeriesIndex(p.IndexPath()) - if err := p.index.Open(); err != nil { - return err - } else if err := p.index.Recover(p.segments); err != nil { - return err - } - - return nil - }(); err != nil { - p.Close() - return err - } - - return nil -} - -func (p *SeriesPartition) openSegments() error { - des, err := os.ReadDir(p.path) - if err != nil { - return err - } - - for _, de := range des { - segmentID, err := ParseSeriesSegmentFilename(de.Name()) - if err != nil { - continue - } - - segment := NewSeriesSegment(segmentID, filepath.Join(p.path, de.Name())) - if err := segment.Open(); err != nil { - return err - } - p.segments = append(p.segments, segment) - } - - // Find max series id by searching segments in reverse order. - for i := len(p.segments) - 1; i >= 0; i-- { - if seq := p.segments[i].MaxSeriesID(); seq >= p.seq { - // Reset our sequence num to the next one to assign - p.seq = seq + SeriesFilePartitionN - break - } - } - - // Create initial segment if none exist. - if len(p.segments) == 0 { - segment, err := CreateSeriesSegment(0, filepath.Join(p.path, "0000")) - if err != nil { - return err - } - p.segments = append(p.segments, segment) - } - - return nil -} - -// Close unmaps the data files. -func (p *SeriesPartition) Close() (err error) { - p.once.Do(func() { close(p.closing) }) - p.wg.Wait() - - p.mu.Lock() - defer p.mu.Unlock() - - p.closed = true - - for _, s := range p.segments { - if e := s.Close(); e != nil && err == nil { - err = e - } - } - p.segments = nil - - if p.index != nil { - if e := p.index.Close(); e != nil && err == nil { - err = e - } - } - p.index = nil - - return err -} - -// ID returns the partition id. -func (p *SeriesPartition) ID() int { return p.id } - -// Path returns the path to the partition. -func (p *SeriesPartition) Path() string { return p.path } - -// IndexPath returns the path to the series index. -func (p *SeriesPartition) IndexPath() string { return filepath.Join(p.path, "index") } - -// Index returns the partition's index. -func (p *SeriesPartition) Index() *SeriesIndex { return p.index } - -// Segments returns a list of partition segments. Used for testing. -func (p *SeriesPartition) Segments() []*SeriesSegment { return p.segments } - -// FileSize returns the size of all partitions, in bytes. -func (p *SeriesPartition) FileSize() (n int64, err error) { - for _, ss := range p.segments { - fi, err := os.Stat(ss.Path()) - if err != nil { - return 0, err - } - n += fi.Size() - } - return n, err -} - -// CreateSeriesListIfNotExists creates a list of series in bulk if they don't exist. -// The ids parameter is modified to contain series IDs for all keys belonging to this partition. -func (p *SeriesPartition) CreateSeriesListIfNotExists(keys [][]byte, keyPartitionIDs []int, ids []uint64) error { - var writeRequired bool - p.mu.RLock() - if p.closed { - p.mu.RUnlock() - return ErrSeriesPartitionClosed - } - for i := range keys { - if keyPartitionIDs[i] != p.id { - continue - } - id := p.index.FindIDBySeriesKey(p.segments, keys[i]) - if id == 0 { - writeRequired = true - continue - } - ids[i] = id - } - p.mu.RUnlock() - - // Exit if all series for this partition already exist. - if !writeRequired { - return nil - } - - type keyRange struct { - id uint64 - offset int64 - } - newKeyRanges := make([]keyRange, 0, len(keys)) - - // Obtain write lock to create new series. - p.mu.Lock() - defer p.mu.Unlock() - - if p.closed { - return ErrSeriesPartitionClosed - } - - // Track offsets of duplicate series. - newIDs := make(map[string]uint64, len(ids)) - - for i := range keys { - // Skip series that don't belong to the partition or have already been created. - if keyPartitionIDs[i] != p.id || ids[i] != 0 { - continue - } - - // Re-attempt lookup under write lock. - key := keys[i] - if ids[i] = newIDs[string(key)]; ids[i] != 0 { - continue - } else if ids[i] = p.index.FindIDBySeriesKey(p.segments, key); ids[i] != 0 { - continue - } - - // Write to series log and save offset. - id, offset, err := p.insert(key) - if err != nil { - return err - } - // Append new key to be added to hash map after flush. - ids[i] = id - newIDs[string(key)] = id - newKeyRanges = append(newKeyRanges, keyRange{id, offset}) - } - - // Flush active segment writes so we can access data in mmap. - if segment := p.activeSegment(); segment != nil { - if err := segment.Flush(); err != nil { - return err - } - } - - // Add keys to hash map(s). - for _, keyRange := range newKeyRanges { - p.index.Insert(p.seriesKeyByOffset(keyRange.offset), keyRange.id, keyRange.offset) - } - - // Check if we've crossed the compaction threshold. - if p.compactionsEnabled() && !p.compacting && - p.CompactThreshold != 0 && p.index.InMemCount() >= uint64(p.CompactThreshold) && - p.compactionLimiter.TryTake() { - p.compacting = true - log, logEnd := logger.NewOperation(context.TODO(), p.Logger, "Series partition compaction", "series_partition_compaction", zap.String("path", p.path)) - - p.wg.Add(1) - go func() { - defer p.wg.Done() - defer p.compactionLimiter.Release() - - compactor := NewSeriesPartitionCompactor() - compactor.cancel = p.closing - if err := compactor.Compact(p); err != nil { - log.Error("series partition compaction failed", zap.Error(err)) - } - - logEnd() - - // Clear compaction flag. - p.mu.Lock() - p.compacting = false - p.mu.Unlock() - }() - } - - return nil -} - -// Compacting returns if the SeriesPartition is currently compacting. -func (p *SeriesPartition) Compacting() bool { - p.mu.RLock() - defer p.mu.RUnlock() - return p.compacting -} - -// DeleteSeriesID flags a series as permanently deleted. -// If the series is reintroduced later then it must create a new id. -func (p *SeriesPartition) DeleteSeriesID(id uint64) error { - p.mu.Lock() - defer p.mu.Unlock() - - if p.closed { - return ErrSeriesPartitionClosed - } - - // Already tombstoned, ignore. - if p.index.IsDeleted(id) { - return nil - } - - // Write tombstone entry. - _, err := p.writeLogEntry(AppendSeriesEntry(nil, SeriesEntryTombstoneFlag, id, nil)) - if err != nil { - return err - } - - // Flush active segment write. - if segment := p.activeSegment(); segment != nil { - if err := segment.Flush(); err != nil { - return err - } - } - - // Mark tombstone in memory. - p.index.Delete(id) - - return nil -} - -// IsDeleted returns true if the ID has been deleted before. -func (p *SeriesPartition) IsDeleted(id uint64) bool { - p.mu.RLock() - if p.closed { - p.mu.RUnlock() - return false - } - v := p.index.IsDeleted(id) - p.mu.RUnlock() - return v -} - -// SeriesKey returns the series key for a given id. -func (p *SeriesPartition) SeriesKey(id uint64) []byte { - if id == 0 { - return nil - } - p.mu.RLock() - if p.closed { - p.mu.RUnlock() - return nil - } - key := p.seriesKeyByOffset(p.index.FindOffsetByID(id)) - p.mu.RUnlock() - return key -} - -// Series returns the parsed series name and tags for an offset. -func (p *SeriesPartition) Series(id uint64) ([]byte, models.Tags) { - key := p.SeriesKey(id) - if key == nil { - return nil, nil - } - return ParseSeriesKey(key) -} - -// FindIDBySeriesKey return the series id for the series key. -func (p *SeriesPartition) FindIDBySeriesKey(key []byte) uint64 { - p.mu.RLock() - if p.closed { - p.mu.RUnlock() - return 0 - } - id := p.index.FindIDBySeriesKey(p.segments, key) - p.mu.RUnlock() - return id -} - -// SeriesCount returns the number of series. -func (p *SeriesPartition) SeriesCount() uint64 { - p.mu.RLock() - if p.closed { - p.mu.RUnlock() - return 0 - } - n := p.index.Count() - p.mu.RUnlock() - return n -} - -func (p *SeriesPartition) DisableCompactions() { - p.mu.Lock() - defer p.mu.Unlock() - p.compactionsDisabled++ -} - -func (p *SeriesPartition) EnableCompactions() { - p.mu.Lock() - defer p.mu.Unlock() - - if p.compactionsEnabled() { - return - } - p.compactionsDisabled-- -} - -func (p *SeriesPartition) compactionsEnabled() bool { - return p.compactionLimiter != nil && p.compactionsDisabled == 0 -} - -// AppendSeriesIDs returns a list of all series ids. -func (p *SeriesPartition) AppendSeriesIDs(a []uint64) []uint64 { - for _, segment := range p.segments { - a = segment.AppendSeriesIDs(a) - } - return a -} - -// activeSegment returns the last segment. -func (p *SeriesPartition) activeSegment() *SeriesSegment { - if len(p.segments) == 0 { - return nil - } - return p.segments[len(p.segments)-1] -} - -func (p *SeriesPartition) insert(key []byte) (id uint64, offset int64, err error) { - id = p.seq - offset, err = p.writeLogEntry(AppendSeriesEntry(nil, SeriesEntryInsertFlag, id, key)) - if err != nil { - return 0, 0, err - } - - p.seq += SeriesFilePartitionN - return id, offset, nil -} - -// writeLogEntry appends an entry to the end of the active segment. -// If there is no more room in the segment then a new segment is added. -func (p *SeriesPartition) writeLogEntry(data []byte) (offset int64, err error) { - segment := p.activeSegment() - if segment == nil || !segment.CanWrite(data) { - if segment, err = p.createSegment(); err != nil { - return 0, err - } - } - return segment.WriteLogEntry(data) -} - -// createSegment appends a new segment -func (p *SeriesPartition) createSegment() (*SeriesSegment, error) { - // Close writer for active segment, if one exists. - if segment := p.activeSegment(); segment != nil { - if err := segment.CloseForWrite(); err != nil { - return nil, err - } - } - - // Generate a new sequential segment identifier. - var id uint16 - if len(p.segments) > 0 { - id = p.segments[len(p.segments)-1].ID() + 1 - } - filename := fmt.Sprintf("%04x", id) - - // Generate new empty segment. - segment, err := CreateSeriesSegment(id, filepath.Join(p.path, filename)) - if err != nil { - return nil, err - } - p.segments = append(p.segments, segment) - - // Allow segment to write. - if err := segment.InitForWrite(); err != nil { - return nil, err - } - - return segment, nil -} - -func (p *SeriesPartition) seriesKeyByOffset(offset int64) []byte { - if offset == 0 { - return nil - } - - segmentID, pos := SplitSeriesOffset(offset) - for _, segment := range p.segments { - if segment.ID() != segmentID { - continue - } - - key, _ := ReadSeriesKey(segment.Slice(pos + SeriesEntryHeaderSize)) - return key - } - - return nil -} - -// SeriesPartitionCompactor represents an object reindexes a series partition and optionally compacts segments. -type SeriesPartitionCompactor struct { - cancel <-chan struct{} -} - -// NewSeriesPartitionCompactor returns a new instance of SeriesPartitionCompactor. -func NewSeriesPartitionCompactor() *SeriesPartitionCompactor { - return &SeriesPartitionCompactor{} -} - -// Compact rebuilds the series partition index. -func (c *SeriesPartitionCompactor) Compact(p *SeriesPartition) error { - // Snapshot the partitions and index so we can check tombstones and replay at the end under lock. - p.mu.RLock() - segments := CloneSeriesSegments(p.segments) - index := p.index.Clone() - seriesN := p.index.Count() - p.mu.RUnlock() - - // Compact index to a temporary location. - indexPath := index.path + ".compacting" - if err := c.compactIndexTo(index, seriesN, segments, indexPath); err != nil { - return err - } - - // Swap compacted index under lock & replay since compaction. - if err := func() error { - p.mu.Lock() - defer p.mu.Unlock() - - // Reopen index with new file. - if err := p.index.Close(); err != nil { - return err - } else if err := os.Rename(indexPath, index.path); err != nil { - return err - } else if err := p.index.Open(); err != nil { - return err - } - - // Replay new entries. - if err := p.index.Recover(p.segments); err != nil { - return err - } - return nil - }(); err != nil { - return err - } - - return nil -} - -func (c *SeriesPartitionCompactor) compactIndexTo(index *SeriesIndex, seriesN uint64, segments []*SeriesSegment, path string) error { - hdr := NewSeriesIndexHeader() - hdr.Count = seriesN - hdr.Capacity = pow2((int64(hdr.Count) * 100) / SeriesIndexLoadFactor) - - // Allocate space for maps. - keyIDMap := make([]byte, (hdr.Capacity * SeriesIndexElemSize)) - idOffsetMap := make([]byte, (hdr.Capacity * SeriesIndexElemSize)) - - // Reindex all partitions. - var entryN int - for _, segment := range segments { - errDone := errors.New("done") - - if err := segment.ForEachEntry(func(flag uint8, id uint64, offset int64, key []byte) error { - - // Make sure we don't go past the offset where the compaction began. - if offset > index.maxOffset { - return errDone - } - - // Check for cancellation periodically. - if entryN++; entryN%1000 == 0 { - select { - case <-c.cancel: - return ErrSeriesPartitionCompactionCancelled - default: - } - } - - // Only process insert entries. - switch flag { - case SeriesEntryInsertFlag: // fallthrough - case SeriesEntryTombstoneFlag: - return nil - default: - return fmt.Errorf("unexpected series partition log entry flag: %d", flag) - } - - // Save max series identifier processed. - hdr.MaxSeriesID, hdr.MaxOffset = id, offset - - // Ignore entry if tombstoned. - if index.IsDeleted(id) { - return nil - } - - // Insert into maps. - c.insertIDOffsetMap(idOffsetMap, hdr.Capacity, id, offset) - return c.insertKeyIDMap(keyIDMap, hdr.Capacity, segments, key, offset, id) - }); err == errDone { - break - } else if err != nil { - return err - } - } - - // Open file handler. - f, err := os.Create(path) - if err != nil { - return err - } - defer f.Close() - - // Calculate map positions. - hdr.KeyIDMap.Offset, hdr.KeyIDMap.Size = SeriesIndexHeaderSize, int64(len(keyIDMap)) - hdr.IDOffsetMap.Offset, hdr.IDOffsetMap.Size = hdr.KeyIDMap.Offset+hdr.KeyIDMap.Size, int64(len(idOffsetMap)) - - // Write header. - if _, err := hdr.WriteTo(f); err != nil { - return err - } - - // Write maps. - if _, err := f.Write(keyIDMap); err != nil { - return err - } else if _, err := f.Write(idOffsetMap); err != nil { - return err - } - - // Sync & close. - if err := f.Sync(); err != nil { - return err - } else if err := f.Close(); err != nil { - return err - } - - return nil -} - -func (c *SeriesPartitionCompactor) insertKeyIDMap(dst []byte, capacity int64, segments []*SeriesSegment, key []byte, offset int64, id uint64) error { - mask := capacity - 1 - hash := rhh.HashKey(key) - - // Continue searching until we find an empty slot or lower probe distance. - for i, dist, pos := int64(0), int64(0), hash&mask; ; i, dist, pos = i+1, dist+1, (pos+1)&mask { - assert(i <= capacity, "key/id map full") - elem := dst[(pos * SeriesIndexElemSize):] - - // If empty slot found or matching offset, insert and exit. - elemOffset := int64(binary.BigEndian.Uint64(elem[:8])) - elemID := binary.BigEndian.Uint64(elem[8:]) - if elemOffset == 0 || elemOffset == offset { - binary.BigEndian.PutUint64(elem[:8], uint64(offset)) - binary.BigEndian.PutUint64(elem[8:], id) - return nil - } - - // Read key at position & hash. - elemKey := ReadSeriesKeyFromSegments(segments, elemOffset+SeriesEntryHeaderSize) - elemHash := rhh.HashKey(elemKey) - - // If the existing elem has probed less than us, then swap places with - // existing elem, and keep going to find another slot for that elem. - if d := rhh.Dist(elemHash, pos, capacity); d < dist { - // Insert current values. - binary.BigEndian.PutUint64(elem[:8], uint64(offset)) - binary.BigEndian.PutUint64(elem[8:], id) - - // Swap with values in that position. - _, _, offset, id = elemHash, elemKey, elemOffset, elemID - - // Update current distance. - dist = d - } - } -} - -func (c *SeriesPartitionCompactor) insertIDOffsetMap(dst []byte, capacity int64, id uint64, offset int64) { - mask := capacity - 1 - hash := rhh.HashUint64(id) - - // Continue searching until we find an empty slot or lower probe distance. - for i, dist, pos := int64(0), int64(0), hash&mask; ; i, dist, pos = i+1, dist+1, (pos+1)&mask { - assert(i <= capacity, "id/offset map full") - elem := dst[(pos * SeriesIndexElemSize):] - - // If empty slot found or matching id, insert and exit. - elemID := binary.BigEndian.Uint64(elem[:8]) - elemOffset := int64(binary.BigEndian.Uint64(elem[8:])) - if elemOffset == 0 || elemOffset == offset { - binary.BigEndian.PutUint64(elem[:8], id) - binary.BigEndian.PutUint64(elem[8:], uint64(offset)) - return - } - - // Hash key. - elemHash := rhh.HashUint64(elemID) - - // If the existing elem has probed less than us, then swap places with - // existing elem, and keep going to find another slot for that elem. - if d := rhh.Dist(elemHash, pos, capacity); d < dist { - // Insert current values. - binary.BigEndian.PutUint64(elem[:8], id) - binary.BigEndian.PutUint64(elem[8:], uint64(offset)) - - // Swap with values in that position. - _, id, offset = elemHash, elemID, elemOffset - - // Update current distance. - dist = d - } - } -} - -// pow2 returns the number that is the next highest power of 2. -// Returns v if it is a power of 2. -func pow2(v int64) int64 { - for i := int64(2); i < 1<<62; i *= 2 { - if i >= v { - return i - } - } - panic("unreachable") -} diff --git a/tsdb/series_segment.go b/tsdb/series_segment.go deleted file mode 100644 index b1820e5228e..00000000000 --- a/tsdb/series_segment.go +++ /dev/null @@ -1,458 +0,0 @@ -package tsdb - -import ( - "bufio" - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "os" - "regexp" - "strconv" - - "github.com/influxdata/influxdb/v2/pkg/mmap" -) - -const ( - SeriesSegmentVersion = 1 - SeriesSegmentMagic = "SSEG" - - SeriesSegmentHeaderSize = 4 + 1 // magic + version -) - -// Series entry constants. -const ( - SeriesEntryFlagSize = 1 - SeriesEntryHeaderSize = 1 + 8 // flag + id - - SeriesEntryInsertFlag = 0x01 - SeriesEntryTombstoneFlag = 0x02 -) - -var ( - ErrInvalidSeriesSegment = errors.New("invalid series segment") - ErrInvalidSeriesSegmentVersion = errors.New("invalid series segment version") - ErrSeriesSegmentNotWritable = errors.New("series segment not writable") -) - -// SeriesSegment represents a log of series entries. -type SeriesSegment struct { - id uint16 - path string - - data []byte // mmap file - file *os.File // write file handle - w *bufio.Writer // bufferred file handle - size uint32 // current file size -} - -// NewSeriesSegment returns a new instance of SeriesSegment. -func NewSeriesSegment(id uint16, path string) *SeriesSegment { - return &SeriesSegment{ - id: id, - path: path, - } -} - -// CreateSeriesSegment generates an empty segment at path. -func CreateSeriesSegment(id uint16, path string) (*SeriesSegment, error) { - // Generate segment in temp location. - f, err := os.Create(path + ".initializing") - if err != nil { - return nil, err - } - defer f.Close() - - // Write header to file and close. - hdr := NewSeriesSegmentHeader() - if _, err := hdr.WriteTo(f); err != nil { - return nil, err - } else if err := f.Truncate(int64(SeriesSegmentSize(id))); err != nil { - return nil, err - } else if err := f.Sync(); err != nil { - return nil, err - } else if err := f.Close(); err != nil { - return nil, err - } - - // Swap with target path. - if err := os.Rename(f.Name(), path); err != nil { - return nil, err - } - - // Open segment at new location. - segment := NewSeriesSegment(id, path) - if err := segment.Open(); err != nil { - return nil, err - } - return segment, nil -} - -// Open memory maps the data file at the file's path. -func (s *SeriesSegment) Open() error { - if err := func() (err error) { - // Memory map file data. - if s.data, err = mmap.Map(s.path, int64(SeriesSegmentSize(s.id))); err != nil { - return err - } - - // Read header. - hdr, err := ReadSeriesSegmentHeader(s.data) - if err != nil { - return err - } else if hdr.Version != SeriesSegmentVersion { - return ErrInvalidSeriesSegmentVersion - } - - return nil - }(); err != nil { - s.Close() - return err - } - - return nil -} - -// Path returns the file path to the segment. -func (s *SeriesSegment) Path() string { return s.path } - -// InitForWrite initializes a write handle for the segment. -// This is only used for the last segment in the series file. -func (s *SeriesSegment) InitForWrite() (err error) { - // Only calculate segment data size if writing. - for s.size = uint32(SeriesSegmentHeaderSize); s.size < uint32(len(s.data)); { - flag, _, _, sz := ReadSeriesEntry(s.data[s.size:]) - if !IsValidSeriesEntryFlag(flag) { - break - } - s.size += uint32(sz) - } - - // Open file handler for writing & seek to end of data. - if s.file, err = os.OpenFile(s.path, os.O_WRONLY|os.O_CREATE, 0666); err != nil { - return err - } else if _, err := s.file.Seek(int64(s.size), io.SeekStart); err != nil { - return err - } - s.w = bufio.NewWriterSize(s.file, 32*1024) - - return nil -} - -// Close unmaps the segment. -func (s *SeriesSegment) Close() (err error) { - if e := s.CloseForWrite(); e != nil && err == nil { - err = e - } - - if s.data != nil { - if e := mmap.Unmap(s.data); e != nil && err == nil { - err = e - } - s.data = nil - } - - return err -} - -func (s *SeriesSegment) CloseForWrite() (err error) { - if s.w != nil { - if e := s.w.Flush(); e != nil && err == nil { - err = e - } - s.w = nil - } - - if s.file != nil { - if e := s.file.Close(); e != nil && err == nil { - err = e - } - s.file = nil - } - return err -} - -// Data returns the raw data. -func (s *SeriesSegment) Data() []byte { return s.data } - -// ID returns the id the segment was initialized with. -func (s *SeriesSegment) ID() uint16 { return s.id } - -// Size returns the size of the data in the segment. -// This is only populated once InitForWrite() is called. -func (s *SeriesSegment) Size() int64 { return int64(s.size) } - -// Slice returns a byte slice starting at pos. -func (s *SeriesSegment) Slice(pos uint32) []byte { return s.data[pos:] } - -// WriteLogEntry writes entry data into the segment. -// Returns the offset of the beginning of the entry. -func (s *SeriesSegment) WriteLogEntry(data []byte) (offset int64, err error) { - if !s.CanWrite(data) { - return 0, ErrSeriesSegmentNotWritable - } - - offset = JoinSeriesOffset(s.id, s.size) - if _, err := s.w.Write(data); err != nil { - return 0, err - } - s.size += uint32(len(data)) - - return offset, nil -} - -// CanWrite returns true if segment has space to write entry data. -func (s *SeriesSegment) CanWrite(data []byte) bool { - return s.w != nil && s.size+uint32(len(data)) <= SeriesSegmentSize(s.id) -} - -// Flush flushes the buffer to disk. -func (s *SeriesSegment) Flush() error { - if s.w == nil { - return nil - } - if err := s.w.Flush(); err != nil { - return err - } - return s.file.Sync() -} - -// AppendSeriesIDs appends all the segments ids to a slice. Returns the new slice. -func (s *SeriesSegment) AppendSeriesIDs(a []uint64) []uint64 { - s.ForEachEntry(func(flag uint8, id uint64, _ int64, _ []byte) error { - if flag == SeriesEntryInsertFlag { - a = append(a, id) - } - return nil - }) - return a -} - -// MaxSeriesID returns the highest series id in the segment. -func (s *SeriesSegment) MaxSeriesID() uint64 { - var max uint64 - s.ForEachEntry(func(flag uint8, id uint64, _ int64, _ []byte) error { - if flag == SeriesEntryInsertFlag && id > max { - max = id - } - return nil - }) - return max -} - -// ForEachEntry executes fn for every entry in the segment. -func (s *SeriesSegment) ForEachEntry(fn func(flag uint8, id uint64, offset int64, key []byte) error) error { - for pos := uint32(SeriesSegmentHeaderSize); pos < uint32(len(s.data)); { - flag, id, key, sz := ReadSeriesEntry(s.data[pos:]) - if !IsValidSeriesEntryFlag(flag) { - break - } - - offset := JoinSeriesOffset(s.id, pos) - if err := fn(flag, id, offset, key); err != nil { - return err - } - pos += uint32(sz) - } - return nil -} - -// Clone returns a copy of the segment. Excludes the write handler, if set. -func (s *SeriesSegment) Clone() *SeriesSegment { - return &SeriesSegment{ - id: s.id, - path: s.path, - data: s.data, - size: s.size, - } -} - -// CompactToPath rewrites the segment to a new file and removes tombstoned entries. -func (s *SeriesSegment) CompactToPath(path string, index *SeriesIndex) error { - dst, err := CreateSeriesSegment(s.id, path) - if err != nil { - return err - } - defer dst.Close() - - if err = dst.InitForWrite(); err != nil { - return err - } - - // Iterate through the segment and write any entries to a new segment - // that exist in the index. - var buf []byte - if err = s.ForEachEntry(func(flag uint8, id uint64, _ int64, key []byte) error { - if index.IsDeleted(id) { - return nil // series id has been deleted from index - } else if flag == SeriesEntryTombstoneFlag { - return fmt.Errorf("[series id %d]: tombstone entry but exists in index", id) - } - - // copy entry over to new segment - buf = AppendSeriesEntry(buf[:0], flag, id, key) - if _, err := dst.WriteLogEntry(buf); err != nil { - return err - } - return err - }); err != nil { - return err - } - - // Close the segment and truncate it to its maximum size. - size := dst.size - if err := dst.Close(); err != nil { - return err - } else if err := os.Truncate(dst.path, int64(size)); err != nil { - return err - } - return nil -} - -// CloneSeriesSegments returns a copy of a slice of segments. -func CloneSeriesSegments(a []*SeriesSegment) []*SeriesSegment { - other := make([]*SeriesSegment, len(a)) - for i := range a { - other[i] = a[i].Clone() - } - return other -} - -// FindSegment returns a segment by id. -func FindSegment(a []*SeriesSegment, id uint16) *SeriesSegment { - for _, segment := range a { - if segment.id == id { - return segment - } - } - return nil -} - -// ReadSeriesKeyFromSegments returns a series key from an offset within a set of segments. -func ReadSeriesKeyFromSegments(a []*SeriesSegment, offset int64) []byte { - segmentID, pos := SplitSeriesOffset(offset) - segment := FindSegment(a, segmentID) - if segment == nil { - return nil - } - buf := segment.Slice(pos) - key, _ := ReadSeriesKey(buf) - return key -} - -// JoinSeriesOffset returns an offset that combines the 2-byte segmentID and 4-byte pos. -func JoinSeriesOffset(segmentID uint16, pos uint32) int64 { - return (int64(segmentID) << 32) | int64(pos) -} - -// SplitSeriesOffset splits a offset into its 2-byte segmentID and 4-byte pos parts. -func SplitSeriesOffset(offset int64) (segmentID uint16, pos uint32) { - return uint16((offset >> 32) & 0xFFFF), uint32(offset & 0xFFFFFFFF) -} - -// IsValidSeriesSegmentFilename returns true if filename is a 4-character lowercase hexadecimal number. -func IsValidSeriesSegmentFilename(filename string) bool { - return seriesSegmentFilenameRegex.MatchString(filename) -} - -// ParseSeriesSegmentFilename returns the id represented by the hexadecimal filename. -func ParseSeriesSegmentFilename(filename string) (uint16, error) { - i, err := strconv.ParseUint(filename, 16, 32) - return uint16(i), err -} - -var seriesSegmentFilenameRegex = regexp.MustCompile(`^[0-9a-f]{4}$`) - -// SeriesSegmentSize returns the maximum size of the segment. -// The size goes up by powers of 2 starting from 4MB and reaching 256MB. -func SeriesSegmentSize(id uint16) uint32 { - const min = 22 // 4MB - const max = 28 // 256MB - - shift := id + min - if shift >= max { - shift = max - } - return 1 << shift -} - -// SeriesSegmentHeader represents the header of a series segment. -type SeriesSegmentHeader struct { - Version uint8 -} - -// NewSeriesSegmentHeader returns a new instance of SeriesSegmentHeader. -func NewSeriesSegmentHeader() SeriesSegmentHeader { - return SeriesSegmentHeader{Version: SeriesSegmentVersion} -} - -// ReadSeriesSegmentHeader returns the header from data. -func ReadSeriesSegmentHeader(data []byte) (hdr SeriesSegmentHeader, err error) { - r := bytes.NewReader(data) - - // Read magic number. - magic := make([]byte, len(SeriesSegmentMagic)) - if _, err := io.ReadFull(r, magic); err != nil { - return hdr, err - } else if !bytes.Equal([]byte(SeriesSegmentMagic), magic) { - return hdr, ErrInvalidSeriesSegment - } - - // Read version. - if err := binary.Read(r, binary.BigEndian, &hdr.Version); err != nil { - return hdr, err - } - - return hdr, nil -} - -// WriteTo writes the header to w. -func (hdr *SeriesSegmentHeader) WriteTo(w io.Writer) (n int64, err error) { - var buf bytes.Buffer - buf.WriteString(SeriesSegmentMagic) - binary.Write(&buf, binary.BigEndian, hdr.Version) - return buf.WriteTo(w) -} - -func ReadSeriesEntry(data []byte) (flag uint8, id uint64, key []byte, sz int64) { - // If flag byte is zero then no more entries exist. - flag, data = uint8(data[0]), data[1:] - if !IsValidSeriesEntryFlag(flag) { - return 0, 0, nil, 1 - } - - id, data = binary.BigEndian.Uint64(data), data[8:] - switch flag { - case SeriesEntryInsertFlag: - key, _ = ReadSeriesKey(data) - } - return flag, id, key, int64(SeriesEntryHeaderSize + len(key)) -} - -func AppendSeriesEntry(dst []byte, flag uint8, id uint64, key []byte) []byte { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, id) - - dst = append(dst, flag) - dst = append(dst, buf...) - - switch flag { - case SeriesEntryInsertFlag: - dst = append(dst, key...) - case SeriesEntryTombstoneFlag: - default: - panic(fmt.Sprintf("unreachable: invalid flag: %d", flag)) - } - return dst -} - -// IsValidSeriesEntryFlag returns true if flag is valid. -func IsValidSeriesEntryFlag(flag byte) bool { - switch flag { - case SeriesEntryInsertFlag, SeriesEntryTombstoneFlag: - return true - default: - return false - } -} diff --git a/tsdb/series_segment_test.go b/tsdb/series_segment_test.go deleted file mode 100644 index 451091fe074..00000000000 --- a/tsdb/series_segment_test.go +++ /dev/null @@ -1,254 +0,0 @@ -package tsdb_test - -import ( - "bytes" - "os" - "path/filepath" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/tsdb" -) - -func TestSeriesSegment(t *testing.T) { - dir := t.TempDir() - - // Create a new initial segment (4mb) and initialize for writing. - segment, err := tsdb.CreateSeriesSegment(0, filepath.Join(dir, "0000")) - if err != nil { - t.Fatal(err) - } else if err := segment.InitForWrite(); err != nil { - t.Fatal(err) - } - defer segment.Close() - - // Write initial entry. - key1 := tsdb.AppendSeriesKey(nil, []byte("m0"), nil) - offset, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 1, key1)) - if err != nil { - t.Fatal(err) - } else if offset != tsdb.SeriesSegmentHeaderSize { - t.Fatalf("unexpected offset: %d", offset) - } - - // Write a large entry (3mb). - key2 := tsdb.AppendSeriesKey(nil, bytes.Repeat([]byte("m"), 3*(1<<20)), nil) - if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 2, key2)); err != nil { - t.Fatal(err) - } else if offset != tsdb.SeriesSegmentHeaderSize { - t.Fatalf("unexpected offset: %d", offset) - } - - // Write another entry that is too large for the remaining segment space. - if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 3, tsdb.AppendSeriesKey(nil, bytes.Repeat([]byte("n"), 3*(1<<20)), nil))); err != tsdb.ErrSeriesSegmentNotWritable { - t.Fatalf("unexpected error: %s", err) - } - - // Verify two entries exist. - var n int - segment.ForEachEntry(func(flag uint8, id uint64, offset int64, key []byte) error { - switch n { - case 0: - if flag != tsdb.SeriesEntryInsertFlag || id != 1 || !bytes.Equal(key1, key) { - t.Fatalf("unexpected entry(0): %d, %d, %q", flag, id, key) - } - case 1: - if flag != tsdb.SeriesEntryInsertFlag || id != 2 || !bytes.Equal(key2, key) { - t.Fatalf("unexpected entry(1): %d, %d, %q", flag, id, key) - } - default: - t.Fatalf("too many entries") - } - n++ - return nil - }) - if n != 2 { - t.Fatalf("unexpected entry count: %d", n) - } -} - -func TestSeriesSegment_AppendSeriesIDs(t *testing.T) { - dir := t.TempDir() - - segment, err := tsdb.CreateSeriesSegment(0, filepath.Join(dir, "0000")) - if err != nil { - t.Fatal(err) - } else if err := segment.InitForWrite(); err != nil { - t.Fatal(err) - } - defer segment.Close() - - // Write entries. - if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 10, tsdb.AppendSeriesKey(nil, []byte("m0"), nil))); err != nil { - t.Fatal(err) - } else if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 11, tsdb.AppendSeriesKey(nil, []byte("m1"), nil))); err != nil { - t.Fatal(err) - } else if err := segment.Flush(); err != nil { - t.Fatal(err) - } - - // Collect series ids with existing set. - a := segment.AppendSeriesIDs([]uint64{1, 2}) - if diff := cmp.Diff(a, []uint64{1, 2, 10, 11}); diff != "" { - t.Fatal(diff) - } -} - -func TestSeriesSegment_MaxSeriesID(t *testing.T) { - dir := t.TempDir() - - segment, err := tsdb.CreateSeriesSegment(0, filepath.Join(dir, "0000")) - if err != nil { - t.Fatal(err) - } else if err := segment.InitForWrite(); err != nil { - t.Fatal(err) - } - defer segment.Close() - - // Write entries. - if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 10, tsdb.AppendSeriesKey(nil, []byte("m0"), nil))); err != nil { - t.Fatal(err) - } else if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 11, tsdb.AppendSeriesKey(nil, []byte("m1"), nil))); err != nil { - t.Fatal(err) - } else if err := segment.Flush(); err != nil { - t.Fatal(err) - } - - // Verify maximum. - if max := segment.MaxSeriesID(); max != 11 { - t.Fatalf("unexpected max: %d", max) - } -} - -func TestSeriesSegmentHeader(t *testing.T) { - // Verify header initializes correctly. - hdr := tsdb.NewSeriesSegmentHeader() - if hdr.Version != tsdb.SeriesSegmentVersion { - t.Fatalf("unexpected version: %d", hdr.Version) - } - - // Marshal/unmarshal. - var buf bytes.Buffer - if _, err := hdr.WriteTo(&buf); err != nil { - t.Fatal(err) - } else if other, err := tsdb.ReadSeriesSegmentHeader(buf.Bytes()); err != nil { - t.Fatal(err) - } else if diff := cmp.Diff(hdr, other); diff != "" { - t.Fatal(diff) - } -} - -func TestSeriesSegment_PartialWrite(t *testing.T) { - dir := t.TempDir() - - // Create a new initial segment (4mb) and initialize for writing. - segment, err := tsdb.CreateSeriesSegment(0, filepath.Join(dir, "0000")) - if err != nil { - t.Fatal(err) - } else if err := segment.InitForWrite(); err != nil { - t.Fatal(err) - } - defer segment.Close() - - // Write two entries. - if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 1, tsdb.AppendSeriesKey(nil, []byte("A"), nil))); err != nil { - t.Fatal(err) - } else if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 2, tsdb.AppendSeriesKey(nil, []byte("B"), nil))); err != nil { - t.Fatal(err) - } - sz := segment.Size() - entrySize := len(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 2, tsdb.AppendSeriesKey(nil, []byte("B"), nil))) - - // Close segment. - if err := segment.Close(); err != nil { - t.Fatal(err) - } - - // Truncate at each point and reopen. - for i := entrySize; i > 0; i-- { - if err := os.Truncate(filepath.Join(dir, "0000"), sz-int64(entrySize-i)); err != nil { - t.Fatal(err) - } - segment := tsdb.NewSeriesSegment(0, filepath.Join(dir, "0000")) - if err := segment.Open(); err != nil { - t.Fatal(err) - } else if err := segment.InitForWrite(); err != nil { - t.Fatal(err) - } else if err := segment.Close(); err != nil { - t.Fatal(err) - } - } -} - -func TestJoinSeriesOffset(t *testing.T) { - if offset := tsdb.JoinSeriesOffset(0x1234, 0x56789ABC); offset != 0x123456789ABC { - t.Fatalf("unexpected offset: %x", offset) - } -} - -func TestSplitSeriesOffset(t *testing.T) { - if segmentID, pos := tsdb.SplitSeriesOffset(0x123456789ABC); segmentID != 0x1234 || pos != 0x56789ABC { - t.Fatalf("unexpected segmentID/pos: %x/%x", segmentID, pos) - } -} - -func TestIsValidSeriesSegmentFilename(t *testing.T) { - if tsdb.IsValidSeriesSegmentFilename("") { - t.Fatal("expected invalid") - } else if tsdb.IsValidSeriesSegmentFilename("0ab") { - t.Fatal("expected invalid") - } else if !tsdb.IsValidSeriesSegmentFilename("192a") { - t.Fatal("expected valid") - } -} - -func TestParseSeriesSegmentFilename(t *testing.T) { - if v, err := tsdb.ParseSeriesSegmentFilename("a90b"); err != nil { - t.Fatal(err) - } else if v != 0xA90B { - t.Fatalf("unexpected value: %x", v) - } - if v, err := tsdb.ParseSeriesSegmentFilename("0001"); err != nil { - t.Fatal(err) - } else if v != 1 { - t.Fatalf("unexpected value: %x", v) - } - if _, err := tsdb.ParseSeriesSegmentFilename("invalid"); err == nil { - t.Fatal("expected error") - } -} - -func TestSeriesSegmentSize(t *testing.T) { - const mb = (1 << 20) - if sz := tsdb.SeriesSegmentSize(0); sz != 4*mb { - t.Fatalf("unexpected size: %d", sz) - } else if sz := tsdb.SeriesSegmentSize(1); sz != 8*mb { - t.Fatalf("unexpected size: %d", sz) - } else if sz := tsdb.SeriesSegmentSize(2); sz != 16*mb { - t.Fatalf("unexpected size: %d", sz) - } else if sz := tsdb.SeriesSegmentSize(3); sz != 32*mb { - t.Fatalf("unexpected size: %d", sz) - } else if sz := tsdb.SeriesSegmentSize(4); sz != 64*mb { - t.Fatalf("unexpected size: %d", sz) - } else if sz := tsdb.SeriesSegmentSize(5); sz != 128*mb { - t.Fatalf("unexpected size: %d", sz) - } else if sz := tsdb.SeriesSegmentSize(6); sz != 256*mb { - t.Fatalf("unexpected size: %d", sz) - } else if sz := tsdb.SeriesSegmentSize(7); sz != 256*mb { - t.Fatalf("unexpected size: %d", sz) - } -} - -func TestSeriesEntry(t *testing.T) { - seriesKey := tsdb.AppendSeriesKey(nil, []byte("m0"), nil) - buf := tsdb.AppendSeriesEntry(nil, 1, 2, seriesKey) - if flag, id, key, sz := tsdb.ReadSeriesEntry(buf); flag != 1 { - t.Fatalf("unexpected flag: %d", flag) - } else if id != 2 { - t.Fatalf("unexpected id: %d", id) - } else if !bytes.Equal(seriesKey, key) { - t.Fatalf("unexpected key: %q", key) - } else if sz != int64(tsdb.SeriesEntryHeaderSize+len(key)) { - t.Fatalf("unexpected size: %d", sz) - } -} diff --git a/tsdb/series_set.go b/tsdb/series_set.go deleted file mode 100644 index b13d4c5cbb1..00000000000 --- a/tsdb/series_set.go +++ /dev/null @@ -1,290 +0,0 @@ -package tsdb - -import ( - "io" - "sync" - "unsafe" - - "github.com/RoaringBitmap/roaring" -) - -// SeriesIDSet represents a lockable bitmap of series ids. -type SeriesIDSet struct { - sync.RWMutex - bitmap *roaring.Bitmap -} - -// NewSeriesIDSet returns a new instance of SeriesIDSet. -func NewSeriesIDSet(a ...uint64) *SeriesIDSet { - ss := &SeriesIDSet{bitmap: roaring.NewBitmap()} - if len(a) > 0 { - a32 := make([]uint32, len(a)) - for i := range a { - a32[i] = uint32(a[i]) - } - ss.bitmap.AddMany(a32) - } - return ss -} - -// Bytes estimates the memory footprint of this SeriesIDSet, in bytes. -func (s *SeriesIDSet) Bytes() int { - var b int - s.RLock() - b += 24 // mu RWMutex is 24 bytes - b += int(unsafe.Sizeof(s.bitmap)) + int(s.bitmap.GetSizeInBytes()) - s.RUnlock() - return b -} - -// Add adds the series id to the set. -func (s *SeriesIDSet) Add(id uint64) { - s.Lock() - defer s.Unlock() - s.AddNoLock(id) -} - -// AddNoLock adds the series id to the set. Add is not safe for use from multiple -// goroutines. Callers must manage synchronization. -func (s *SeriesIDSet) AddNoLock(id uint64) { - s.bitmap.Add(uint32(id)) -} - -// AddMany adds multiple ids to the SeriesIDSet. AddMany takes a lock, so may not be -// optimal to call many times with few ids. -func (s *SeriesIDSet) AddMany(ids ...uint64) { - if len(ids) == 0 { - return - } - - a32 := make([]uint32, len(ids)) - for i := range ids { - a32[i] = uint32(ids[i]) - } - - s.Lock() - defer s.Unlock() - s.bitmap.AddMany(a32) -} - -// Contains returns true if the id exists in the set. -func (s *SeriesIDSet) Contains(id uint64) bool { - s.RLock() - x := s.ContainsNoLock(id) - s.RUnlock() - return x -} - -// ContainsNoLock returns true if the id exists in the set. ContainsNoLock is -// not safe for use from multiple goroutines. The caller must manage synchronization. -func (s *SeriesIDSet) ContainsNoLock(id uint64) bool { - return s.bitmap.Contains(uint32(id)) -} - -// Remove removes the id from the set. -func (s *SeriesIDSet) Remove(id uint64) { - s.Lock() - defer s.Unlock() - s.RemoveNoLock(id) -} - -// RemoveNoLock removes the id from the set. RemoveNoLock is not safe for use -// from multiple goroutines. The caller must manage synchronization. -func (s *SeriesIDSet) RemoveNoLock(id uint64) { - s.bitmap.Remove(uint32(id)) -} - -// Cardinality returns the cardinality of the SeriesIDSet. -func (s *SeriesIDSet) Cardinality() uint64 { - s.RLock() - defer s.RUnlock() - return s.bitmap.GetCardinality() -} - -// Merge merged the contents of others into s. The caller does not need to -// provide s as an argument, and the contents of s will always be present in s -// after Merge returns. -func (s *SeriesIDSet) Merge(others ...*SeriesIDSet) { - bms := make([]*roaring.Bitmap, 0, len(others)+1) - - s.RLock() - bms = append(bms, s.bitmap) // Add ourself. - - // Add other bitsets. - for _, other := range others { - other.RLock() - defer other.RUnlock() // Hold until we have merged all the bitmaps - bms = append(bms, other.bitmap) - } - - result := roaring.FastOr(bms...) - s.RUnlock() - - s.Lock() - s.bitmap = result - s.Unlock() -} - -// MergeInPlace merges other into s, modifying s in the process. -func (s *SeriesIDSet) MergeInPlace(other *SeriesIDSet) { - if s == other { - return - } - - other.RLock() - s.Lock() - s.bitmap.Or(other.bitmap) - s.Unlock() - other.RUnlock() -} - -// Equals returns true if other and s are the same set of ids. -func (s *SeriesIDSet) Equals(other *SeriesIDSet) bool { - if s == other { - return true - } - - s.RLock() - defer s.RUnlock() - other.RLock() - defer other.RUnlock() - return s.bitmap.Equals(other.bitmap) -} - -// And returns a new SeriesIDSet containing elements that were present in s and other. -func (s *SeriesIDSet) And(other *SeriesIDSet) *SeriesIDSet { - s.RLock() - defer s.RUnlock() - other.RLock() - defer other.RUnlock() - return &SeriesIDSet{bitmap: roaring.And(s.bitmap, other.bitmap)} -} - -// AndNot returns a new SeriesIDSet containing elements that were present in s, -// but not present in other. -func (s *SeriesIDSet) AndNot(other *SeriesIDSet) *SeriesIDSet { - s.RLock() - defer s.RUnlock() - other.RLock() - defer other.RUnlock() - - return &SeriesIDSet{bitmap: roaring.AndNot(s.bitmap, other.bitmap)} -} - -// ForEach calls f for each id in the set. The function is applied to the IDs -// in ascending order. -func (s *SeriesIDSet) ForEach(f func(id uint64)) { - s.RLock() - defer s.RUnlock() - itr := s.bitmap.Iterator() - for itr.HasNext() { - f(uint64(itr.Next())) - } -} - -// ForEachNoLock calls f for each id in the set without taking a lock. -func (s *SeriesIDSet) ForEachNoLock(f func(id uint64)) { - itr := s.bitmap.Iterator() - for itr.HasNext() { - f(uint64(itr.Next())) - } -} - -func (s *SeriesIDSet) String() string { - s.RLock() - defer s.RUnlock() - return s.bitmap.String() -} - -// Diff removes from s any elements also present in other. -func (s *SeriesIDSet) Diff(other *SeriesIDSet) { - other.RLock() - defer other.RUnlock() - - s.Lock() - defer s.Unlock() - s.bitmap = roaring.AndNot(s.bitmap, other.bitmap) -} - -// Intersects checks whether two SeriesIDSet intersects, SeriesIDSet are not modified -func (s *SeriesIDSet) Intersects(other *SeriesIDSet) bool { - other.RLock() - defer other.RUnlock() - - s.RLock() - defer s.RUnlock() - - return s.bitmap.Intersects(other.bitmap) -} - -// Clone returns a new SeriesIDSet with a deep copy of the underlying bitmap. -func (s *SeriesIDSet) Clone() *SeriesIDSet { - s.RLock() - defer s.RUnlock() - return s.CloneNoLock() -} - -// CloneNoLock calls Clone without taking a lock. -func (s *SeriesIDSet) CloneNoLock() *SeriesIDSet { - new := NewSeriesIDSet() - new.bitmap = s.bitmap.Clone() - return new -} - -// Iterator returns an iterator to the underlying bitmap. -// This iterator is not protected by a lock. -func (s *SeriesIDSet) Iterator() SeriesIDSetIterable { - return s.bitmap.Iterator() -} - -// UnmarshalBinary unmarshals data into the set. -func (s *SeriesIDSet) UnmarshalBinary(data []byte) error { - s.Lock() - defer s.Unlock() - return s.bitmap.UnmarshalBinary(data) -} - -// UnmarshalBinaryUnsafe unmarshals data into the set. -// References to the underlying data are used so data should not be reused by caller. -func (s *SeriesIDSet) UnmarshalBinaryUnsafe(data []byte) error { - s.Lock() - defer s.Unlock() - _, err := s.bitmap.FromBuffer(data) - return err -} - -// WriteTo writes the set to w. -func (s *SeriesIDSet) WriteTo(w io.Writer) (int64, error) { - s.RLock() - defer s.RUnlock() - return s.bitmap.WriteTo(w) -} - -// Clear clears the underlying bitmap for re-use. Clear is safe for use by multiple goroutines. -func (s *SeriesIDSet) Clear() { - s.Lock() - defer s.Unlock() - s.ClearNoLock() -} - -// ClearNoLock clears the underlying bitmap for re-use without taking a lock. -func (s *SeriesIDSet) ClearNoLock() { - s.bitmap.Clear() -} - -// Slice returns a slice of series ids. -func (s *SeriesIDSet) Slice() []uint64 { - s.RLock() - defer s.RUnlock() - - a := make([]uint64, 0, s.bitmap.GetCardinality()) - for _, seriesID := range s.bitmap.ToArray() { - a = append(a, uint64(seriesID)) - } - return a -} - -type SeriesIDSetIterable interface { - HasNext() bool - Next() uint32 -} diff --git a/tsdb/series_set_test.go b/tsdb/series_set_test.go deleted file mode 100644 index 6f99c0385cd..00000000000 --- a/tsdb/series_set_test.go +++ /dev/null @@ -1,649 +0,0 @@ -package tsdb - -import ( - "bytes" - "fmt" - "math" - "math/rand" - "runtime" - "sync" - "testing" -) - -func TestSeriesIDSet_AndNot(t *testing.T) { - examples := [][3][]uint64{ - { - {1, 10, 20, 30}, - {10, 12, 13, 14, 20}, - {1, 30}, - }, - { - {}, - {10}, - {}, - }, - { - {1, 10, 20, 30}, - {1, 10, 20, 30}, - {}, - }, - { - {1, 10}, - {1, 10, 100}, - {}, - }, - { - {1, 10}, - {}, - {1, 10}, - }, - } - - for i, example := range examples { - t.Run(fmt.Sprint(i), func(t *testing.T) { - // Build sets. - a, b := NewSeriesIDSet(), NewSeriesIDSet() - for _, v := range example[0] { - a.Add(v) - } - for _, v := range example[1] { - b.Add(v) - } - - expected := NewSeriesIDSet() - for _, v := range example[2] { - expected.Add(v) - } - - got := a.AndNot(b) - if got.String() != expected.String() { - t.Fatalf("got %s, expected %s", got.String(), expected.String()) - } - }) - } -} - -// Ensure that cloning is race-free. -func TestSeriesIDSet_Clone_Race(t *testing.T) { - main := NewSeriesIDSet() - total := NewSeriesIDSet() - for i := uint64(0); i < 1024; i++ { - main.AddNoLock(i) - total.AddNoLock(i) - } - - // One test with a closure around the main SeriesIDSet, - // so that we can run a subtest with and without COW. - test := func(t *testing.T) { - n := 10 * (runtime.NumCPU() + 1) - clones := make([]*SeriesIDSet, n) - var wg sync.WaitGroup - wg.Add(n) - for i := 1; i <= n; i++ { - go func(i int) { - defer wg.Done() - clones[i-1] = main.Clone() - - for j := 0; j < 1000; j++ { - id := uint64(j + (100000 * i)) - total.Add(id) - clones[i-1].AddNoLock(id) - } - }(i) - } - - wg.Wait() - for _, o := range clones { - if got, exp := o.Cardinality(), uint64(2024); got != exp { - t.Errorf("got cardinality %d, expected %d", got, exp) - } - } - - // The original set should be unaffected - if got, exp := main.Cardinality(), uint64(1024); got != exp { - t.Errorf("got cardinality %d, expected %d", got, exp) - } - - // Merging the clones should result in only 1024 shared values. - union := NewSeriesIDSet() - for _, o := range clones { - o.ForEachNoLock(func(id uint64) { - union.AddNoLock(id) - }) - } - - if !union.Equals(total) { - t.Fatal("union not equal to total") - } - } - t.Run("clone", test) -} - -var resultBool bool - -// Contains should be typically a constant time lookup. Example results on a laptop: -// -// BenchmarkSeriesIDSet_Contains/1-4 20000000 68.5 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Contains/2-4 20000000 70.8 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Contains/10-4 20000000 70.3 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Contains/100-4 20000000 71.3 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Contains/1000-4 20000000 80.5 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Contains/10000-4 20000000 67.3 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Contains/100000-4 20000000 73.1 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Contains/1000000-4 20000000 77.3 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Contains/10000000-4 20000000 75.3 ns/op 0 B/op 0 allocs/op -func BenchmarkSeriesIDSet_Contains(b *testing.B) { - cardinalities := []uint64{1, 2, 10, 100, 1000, 10000, 100000, 1000000, 10000000} - - for _, cardinality := range cardinalities { - // Setup... - set := NewSeriesIDSet() - for i := uint64(0); i < cardinality; i++ { - set.Add(i) - } - - lookup := cardinality / 2 - b.Run(fmt.Sprint(cardinality), func(b *testing.B) { - for i := 0; i < b.N; i++ { - resultBool = set.Contains(lookup) - } - }) - } -} - -var set *SeriesIDSet - -// Adding to a larger bitset shouldn't be significantly more expensive than adding -// to a smaller one. This benchmark adds a value to different cardinality sets. -// -// Example results from a laptop: -// BenchmarkSeriesIDSet_Add/1-4 1000000 1053 ns/op 48 B/op 2 allocs/op -// BenchmarkSeriesIDSet_Add/2-4 5000000 303 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Add/10-4 5000000 348 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Add/100-4 5000000 373 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Add/1000-4 5000000 342 ns/op 0 B/op 0 allocs/op -func BenchmarkSeriesIDSet_AddMore(b *testing.B) { - cardinalities := []uint64{1, 2, 10, 100, 1000, 10000, 100000, 1000000, 10000000} - - for _, cardinality := range cardinalities { - // Setup... - set = NewSeriesIDSet() - for i := uint64(0); i < cardinality-1; i++ { - set.Add(i) - } - - b.Run(fmt.Sprint(cardinality), func(b *testing.B) { - for i := 0; i < b.N; i++ { - // Add next value - set.Add(cardinality) - - b.StopTimer() - set.Remove(cardinality) - b.StartTimer() - } - }) - } -} - -// Add benchmarks the cost of adding the same element to a set versus the -// cost of checking if it exists before adding it. -// -// Typical benchmarks from a laptop: -// -// BenchmarkSeriesIDSet_Add/cardinality_1000000_add/same-8 20000000 64.8 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Add/cardinality_1000000_add/random-8 2000000 704 ns/op 5 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Add/cardinality_1000000_add/same_no_lock-8 50000000 40.3 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Add/cardinality_1000000_add/random_no_lock-8 2000000 644 ns/op 5 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/same_no_lock-8 50000000 34.0 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/random_no_lock-8 2000000 860 ns/op 14 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/same_global_lock-8 30000000 49.8 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/random_global_lock-8 2000000 914 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/same_multi_lock-8 30000000 39.7 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add/random_multi_lock-8 1000000 1002 ns/op 0 B/op 0 allocs/op -func BenchmarkSeriesIDSet_Add(b *testing.B) { - // Setup... - set = NewSeriesIDSet() - for i := uint64(0); i < 1000000; i++ { - set.Add(i) - } - lookup := uint64(300032) - - // Add the same value over and over. - b.Run("cardinality_1000000_add", func(b *testing.B) { - b.Run("same", func(b *testing.B) { - for i := 0; i < b.N; i++ { - set.Add(lookup) - } - }) - - b.Run("random", func(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - x := rand.Intn(math.MaxInt32) - b.StartTimer() - set.Add(uint64(x)) - } - }) - - b.Run("same no lock", func(b *testing.B) { - for i := 0; i < b.N; i++ { - set.AddNoLock(lookup) - } - }) - - b.Run("random no lock", func(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - x := rand.Intn(math.MaxInt32) - b.StartTimer() - set.AddNoLock(uint64(x)) - } - }) - }) - - // Add the same value over and over with no lock - b.Run("cardinality_1000000_check_add", func(b *testing.B) { - b.Run("same no lock", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if !set.ContainsNoLock(lookup) { - set.AddNoLock(lookup) - } - } - }) - - b.Run("random no lock", func(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - x := rand.Intn(math.MaxInt32) - b.StartTimer() - if !set.ContainsNoLock(uint64(x)) { - set.AddNoLock(uint64(x)) - } - } - }) - - b.Run("same global lock", func(b *testing.B) { - for i := 0; i < b.N; i++ { - set.Lock() - if !set.ContainsNoLock(lookup) { - set.AddNoLock(lookup) - } - set.Unlock() - } - }) - - b.Run("random global lock", func(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - x := rand.Intn(math.MaxInt32) - b.StartTimer() - set.Lock() - if !set.ContainsNoLock(uint64(x)) { - set.AddNoLock(uint64(x)) - } - set.Unlock() - } - }) - - b.Run("same multi lock", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if !set.Contains(lookup) { - set.Add(lookup) - } - } - }) - - b.Run("random multi lock", func(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - x := rand.Intn(math.MaxInt32) - b.StartTimer() - if !set.Contains(uint64(x)) { - set.Add(uint64(x)) - } - } - }) - }) -} - -var ssResult *SeriesIDSet - -// Benchmark various ways of creating a copy of a bitmap. Note, Clone_COW will result -// in a bitmap where future modifications will involve copies. -// -// Typical results from an i7 laptop. -// BenchmarkSeriesIDSet_Clone/cardinality_1000/re-use/Clone-8 30000 44171 ns/op 47200 B/op 1737 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_1000/re-use/Merge-8 100000 17877 ns/op 39008 B/op 30 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_1000/re-use/MergeInPlace-8 200000 7367 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_1000/re-use/Add-8 10000 137460 ns/op 62336 B/op 2596 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_1000/re-use/WriteTo-8 30000 52896 ns/op 35872 B/op 866 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_1000/don't_re-use/Clone-8 30000 41940 ns/op 47200 B/op 1737 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_1000/don't_re-use/Merge-8 100000 17624 ns/op 39008 B/op 30 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_1000/don't_re-use/MergeInPlace-8 100000 17320 ns/op 38880 B/op 28 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_1000/don't_re-use/Add-8 10000 167544 ns/op 101216 B/op 2624 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_1000/don't_re-use/WriteTo-8 20000 66976 ns/op 52897 B/op 869 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_10000/re-use/Clone-8 10000 179933 ns/op 177072 B/op 5895 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_10000/re-use/Merge-8 20000 77574 ns/op 210656 B/op 42 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_10000/re-use/MergeInPlace-8 100000 23645 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_10000/re-use/Add-8 2000 689254 ns/op 224161 B/op 9572 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_10000/re-use/WriteTo-8 10000 199052 ns/op 118791 B/op 2945 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_10000/don't_re-use/Clone-8 10000 183137 ns/op 177073 B/op 5895 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_10000/don't_re-use/Merge-8 20000 77502 ns/op 210656 B/op 42 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_10000/don't_re-use/MergeInPlace-8 20000 72610 ns/op 210528 B/op 40 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_10000/don't_re-use/Add-8 2000 724789 ns/op 434691 B/op 9612 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_10000/don't_re-use/WriteTo-8 10000 215734 ns/op 177159 B/op 2948 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_100000/re-use/Clone-8 5000 244971 ns/op 377648 B/op 6111 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_100000/re-use/Merge-8 20000 90580 ns/op 210656 B/op 42 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_100000/re-use/MergeInPlace-8 50000 24697 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_100000/re-use/Add-8 500 3274456 ns/op 758996 B/op 19853 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_100000/re-use/WriteTo-8 5000 248791 ns/op 122392 B/op 3053 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_100000/don't_re-use/Clone-8 5000 269152 ns/op 377648 B/op 6111 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_100000/don't_re-use/Merge-8 20000 85948 ns/op 210657 B/op 42 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_100000/don't_re-use/MergeInPlace-8 20000 78142 ns/op 210528 B/op 40 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_100000/don't_re-use/Add-8 500 3123753 ns/op 969529 B/op 19893 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_100000/don't_re-use/WriteTo-8 10000 230657 ns/op 180684 B/op 3056 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_1000000/re-use/Clone-8 3000 551781 ns/op 2245424 B/op 6111 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_1000000/re-use/Merge-8 20000 92104 ns/op 210656 B/op 42 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_1000000/re-use/MergeInPlace-8 50000 27408 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_1000000/re-use/Add-8 100 22573498 ns/op 6420446 B/op 30520 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_1000000/re-use/WriteTo-8 5000 284901 ns/op 123522 B/op 3053 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_1000000/don't_re-use/Clone-8 3000 679284 ns/op 2245424 B/op 6111 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_1000000/don't_re-use/Merge-8 20000 68965 ns/op 210656 B/op 42 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_1000000/don't_re-use/MergeInPlace-8 20000 64236 ns/op 210528 B/op 40 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_1000000/don't_re-use/Add-8 100 21960668 ns/op 6630979 B/op 30560 allocs/op -// BenchmarkSeriesIDSet_Clone/cardinality_1000000/don't_re-use/WriteTo-8 5000 298276 ns/op 181890 B/op 3056 allocs/op - -func BenchmarkSeriesIDSet_Clone(b *testing.B) { - toAddCardinalities := []int{1e3, 1e4, 1e5, 1e6} - - runBenchmarks := func(b *testing.B, other *SeriesIDSet, init func() *SeriesIDSet) { - b.Run("Clone", func(b *testing.B) { - for i := 0; i < b.N; i++ { - ssResult = other.Clone() - } - }) - - b.Run("Merge", func(b *testing.B) { - ssResult = init() - for i := 0; i < b.N; i++ { - ssResult.Merge(other) - b.StopTimer() - ssResult = init() - b.StartTimer() - } - }) - - b.Run("MergeInPlace", func(b *testing.B) { - ssResult = init() - for i := 0; i < b.N; i++ { - ssResult.MergeInPlace(other) - b.StopTimer() - ssResult = init() - b.StartTimer() - } - }) - - b.Run("Add", func(b *testing.B) { - ssResult = init() - for i := 0; i < b.N; i++ { - itr := other.Iterator() - ssResult.Lock() - for itr.HasNext() { - ssResult.AddNoLock(uint64(itr.Next())) - } - ssResult.Unlock() - b.StopTimer() - ssResult = init() - b.StartTimer() - } - }) - - b.Run("WriteTo", func(b *testing.B) { - var buf bytes.Buffer - ssResult = init() - for i := 0; i < b.N; i++ { - other.WriteTo(&buf) - ssResult.UnmarshalBinaryUnsafe(buf.Bytes()) - b.StopTimer() - ssResult = init() - buf.Reset() - b.StartTimer() - } - }) - } - - for _, toAddCardinality := range toAddCardinalities { - b.Run(fmt.Sprintf("cardinality %d", toAddCardinality), func(b *testing.B) { - ids := make([]uint64, 0, toAddCardinality) - for i := 0; i < toAddCardinality; i++ { - ids = append(ids, uint64(rand.Intn(200000000))) - } - other := NewSeriesIDSet(ids...) - - b.Run("re-use", func(b *testing.B) { - base := NewSeriesIDSet() - runBenchmarks(b, other, func() *SeriesIDSet { - base.Clear() - return base - }) - }) - - b.Run("don't re-use", func(b *testing.B) { - runBenchmarks(b, other, func() *SeriesIDSet { - return NewSeriesIDSet() - }) - }) - }) - } -} -func BenchmarkSeriesIDSet_AddMany(b *testing.B) { - cardinalities := []int{1, 1e3, 1e4, 1e5, 1e6} - toAddCardinalities := []int{1e3, 1e4, 1e5} - - for _, cardinality := range cardinalities { - ids := make([]uint64, 0, cardinality) - for i := 0; i < cardinality; i++ { - ids = append(ids, uint64(rand.Intn(200000000))) - } - - // Setup... - set = NewSeriesIDSet(ids...) - - // Check if the value exists before adding it under two locks. - b.Run(fmt.Sprintf("cardinality %d", cardinality), func(b *testing.B) { - for _, toAddCardinality := range toAddCardinalities { - ids := make([]uint64, 0, toAddCardinality) - for i := 0; i < toAddCardinality; i++ { - ids = append(ids, uint64(rand.Intn(200000000))) - } - - b.Run(fmt.Sprintf("adding %d", toAddCardinality), func(b *testing.B) { - b.Run("AddNoLock", func(b *testing.B) { - clone := set.Clone() - for i := 0; i < b.N; i++ { - for _, id := range ids { - clone.AddNoLock(id) - } - - b.StopTimer() - clone = set.Clone() - b.StartTimer() - } - }) - - b.Run("AddMany", func(b *testing.B) { - clone := set.Clone() - for i := 0; i < b.N; i++ { - clone.AddMany(ids...) - b.StopTimer() - clone = set.Clone() - b.StartTimer() - } - }) - - // Merge will involve a new bitmap being allocated. - b.Run("Merge", func(b *testing.B) { - clone := set.Clone() - for i := 0; i < b.N; i++ { - other := NewSeriesIDSet(ids...) - clone.Merge(other) - - b.StopTimer() - clone = set.Clone() - b.StartTimer() - } - }) - - b.Run("MergeInPlace", func(b *testing.B) { - clone := set.Clone() - for i := 0; i < b.N; i++ { - other := NewSeriesIDSet(ids...) - clone.MergeInPlace(other) - - b.StopTimer() - clone = set.Clone() - b.StartTimer() - } - }) - }) - - } - }) - } -} - -// Remove benchmarks the cost of removing the same element in a set versus the -// cost of checking if it exists before removing it. -// -// Typical benchmarks from a laptop: -// -// BenchmarkSeriesIDSet_Remove/cardinality_1000000_remove_same-4 20000000 99.1 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Remove/cardinality_1000000_check_remove_global_lock-4 20000000 57.7 ns/op 0 B/op 0 allocs/op -// BenchmarkSeriesIDSet_Remove/cardinality_1000000_check_remove_multi_lock-4 20000000 80.1 ns/op 0 B/op 0 allocs/op -func BenchmarkSeriesIDSet_Remove(b *testing.B) { - // Setup... - set = NewSeriesIDSet() - for i := uint64(0); i < 1000000; i++ { - set.Add(i) - } - lookup := uint64(300032) - - // Remove the same value over and over. - b.Run("cardinality_1000000_remove_same", func(b *testing.B) { - for i := 0; i < b.N; i++ { - set.Remove(lookup) - } - }) - - // Check if the value exists before adding it. Subsequent repeats of the code - // will result in contains checks. - b.Run("cardinality_1000000_check_remove_global_lock", func(b *testing.B) { - for i := 0; i < b.N; i++ { - set.Lock() - if set.ContainsNoLock(lookup) { - set.RemoveNoLock(lookup) - } - set.Unlock() - } - }) - - // Check if the value exists before adding it under two locks. - b.Run("cardinality_1000000_check_remove_multi_lock", func(b *testing.B) { - for i := 0; i < b.N; i++ { - if set.Contains(lookup) { - set.Remove(lookup) - } - } - }) -} - -// Typical benchmarks for a laptop: -// -// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_1/shards_1-4 200000 8095 ns/op 16656 B/op 11 allocs/op -// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_1/shards_10-4 200000 11755 ns/op 18032 B/op 47 allocs/op -// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_1/shards_100-4 50000 41632 ns/op 31794 B/op 407 allocs/op -// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_10000/shards_1-4 200000 6022 ns/op 8384 B/op 7 allocs/op -// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_10000/shards_10-4 100000 19674 ns/op 9760 B/op 43 allocs/op -// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_10000/shards_100-4 10000 152865 ns/op 23522 B/op 403 allocs/op -// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_1000000/shards_1-4 200000 8252 ns/op 9712 B/op 44 allocs/op -// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_1000000/shards_10-4 50000 29566 ns/op 15984 B/op 143 allocs/op -// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_1000000/shards_100-4 10000 237672 ns/op 78710 B/op 1133 allocs/op -// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_10000000/shards_1-4 100000 21559 ns/op 25968 B/op 330 allocs/op -// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_10000000/shards_10-4 20000 102326 ns/op 114325 B/op 537 allocs/op -// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_10000000/shards_100-4 2000 1042697 ns/op 997909 B/op 2608 allocs/op -func BenchmarkSeriesIDSet_Merge_Duplicates(b *testing.B) { - cardinalities := []int{1, 10000, 1000000, 10000000} - shards := []int{1, 10, 100} - - for _, cardinality := range cardinalities { - set = NewSeriesIDSet() - for i := 0; i < cardinality; i++ { - set.Add(uint64(i)) - } - - for _, shard := range shards { - others := make([]*SeriesIDSet, 0, shard) - for s := 0; s < shard; s++ { - others = append(others, &SeriesIDSet{bitmap: set.bitmap.Clone()}) - } - - b.Run(fmt.Sprintf("cardinality_%d/shards_%d", cardinality, shard), func(b *testing.B) { - base := &SeriesIDSet{bitmap: set.bitmap.Clone()} - for i := 0; i < b.N; i++ { - base.Merge(others...) - b.StopTimer() - base.bitmap = set.bitmap.Clone() - b.StartTimer() - } - }) - - } - } -} - -// Typical benchmarks for a laptop: -// -// BenchmarkSeriesIDSet_Merge_Unique/cardinality_1/shards_1-4 200000 7841 ns/op 16656 B/op 11 allocs/op -// BenchmarkSeriesIDSet_Merge_Unique/cardinality_1/shards_10-4 200000 13093 ns/op 18048 B/op 47 allocs/op -// BenchmarkSeriesIDSet_Merge_Unique/cardinality_1/shards_100-4 30000 57399 ns/op 31985 B/op 407 allocs/op -// BenchmarkSeriesIDSet_Merge_Unique/cardinality_10000/shards_1-4 200000 7740 ns/op 8384 B/op 7 allocs/op -// BenchmarkSeriesIDSet_Merge_Unique/cardinality_10000/shards_10-4 50000 37116 ns/op 18208 B/op 52 allocs/op -// BenchmarkSeriesIDSet_Merge_Unique/cardinality_10000/shards_100-4 5000 409487 ns/op 210563 B/op 955 allocs/op -// BenchmarkSeriesIDSet_Merge_Unique/cardinality_1000000/shards_1-4 100000 19289 ns/op 19328 B/op 79 allocs/op -// BenchmarkSeriesIDSet_Merge_Unique/cardinality_1000000/shards_10-4 10000 129048 ns/op 159716 B/op 556 allocs/op -// BenchmarkSeriesIDSet_Merge_Unique/cardinality_1000000/shards_100-4 500 3482907 ns/op 5428116 B/op 6174 allocs/op -// BenchmarkSeriesIDSet_Merge_Unique/cardinality_10000000/shards_1-4 30000 43734 ns/op 51872 B/op 641 allocs/op -// BenchmarkSeriesIDSet_Merge_Unique/cardinality_10000000/shards_10-4 3000 514412 ns/op 748678 B/op 3687 allocs/op -// BenchmarkSeriesIDSet_Merge_Unique/cardinality_10000000/shards_100-4 30 61891687 ns/op 69626539 B/op 36038 allocs/op -func BenchmarkSeriesIDSet_Merge_Unique(b *testing.B) { - cardinalities := []int{1, 10000, 1000000, 10000000} - shards := []int{1, 10, 100} - - for _, cardinality := range cardinalities { - set = NewSeriesIDSet() - for i := 0; i < cardinality; i++ { - set.Add(uint64(i)) - } - - for _, shard := range shards { - others := make([]*SeriesIDSet, 0, shard) - for s := 1; s <= shard; s++ { - other := NewSeriesIDSet() - for i := 0; i < cardinality; i++ { - other.Add(uint64(i + (s * cardinality))) - } - others = append(others, other) - } - - b.Run(fmt.Sprintf("cardinality_%d/shards_%d", cardinality, shard), func(b *testing.B) { - base := &SeriesIDSet{bitmap: set.bitmap.Clone()} - for i := 0; i < b.N; i++ { - base.Merge(others...) - b.StopTimer() - base.bitmap = set.bitmap.Clone() - b.StartTimer() - } - }) - } - } -} diff --git a/tsdb/shard.go b/tsdb/shard.go deleted file mode 100644 index 363dcc9869a..00000000000 --- a/tsdb/shard.go +++ /dev/null @@ -1,2748 +0,0 @@ -package tsdb - -import ( - "bytes" - "context" - "encoding/binary" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "regexp" - "runtime" - "sort" - "strings" - "sync" - "sync/atomic" - "time" - "unicode" - "unsafe" - - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/logger" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/bytesutil" - errors2 "github.com/influxdata/influxdb/v2/pkg/errors" - "github.com/influxdata/influxdb/v2/pkg/estimator" - "github.com/influxdata/influxdb/v2/pkg/file" - "github.com/influxdata/influxdb/v2/pkg/limiter" - "github.com/influxdata/influxdb/v2/pkg/slices" - internal "github.com/influxdata/influxdb/v2/tsdb/internal" - "github.com/influxdata/influxql" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" - "google.golang.org/protobuf/proto" -) - -const ( - measurementKey = "_name" - DefaultMetricInterval = 10 * time.Second - FieldsChangeFile = "fields.idxl" - bytesInInt64 = 8 -) - -var ( - // ErrFieldTypeConflict is returned when a new field already exists with a different type. - ErrFieldTypeConflict = errors.New("field type conflict") - - // ErrEngineClosed is returned when a caller attempts indirectly to - // access the shard's underlying engine. - ErrEngineClosed = errors.New("engine is closed") - - // ErrShardDisabled is returned when a the shard is not available for - // queries or writes. - ErrShardDisabled = errors.New("shard is disabled") - - // ErrUnknownFieldsFormat is returned when the fields index file is not identifiable by - // the file's magic number. - ErrUnknownFieldsFormat = errors.New("unknown field index format") - - // ErrUnknownFieldType is returned when the type of a field cannot be determined. - ErrUnknownFieldType = errors.New("unknown field type") - - // ErrShardNotIdle is returned when an operation requiring the shard to be idle/cold is - // attempted on a hot shard. - ErrShardNotIdle = errors.New("shard not idle") - - // fieldsIndexMagicNumber is the file magic number for the fields index file. - fieldsIndexMagicNumber = []byte{0, 6, 1, 3} -) - -var ( - // Static objects to prevent small allocs. - timeBytes = []byte("time") -) - -// A ShardError implements the error interface, and contains extra -// context about the shard that generated the error. -type ShardError struct { - id uint64 - Err error -} - -// NewShardError returns a new ShardError. -func NewShardError(id uint64, err error) error { - if err == nil { - return nil - } - return ShardError{id: id, Err: err} -} - -// Error returns the string representation of the error, to satisfy the error interface. -func (e ShardError) Error() string { - return fmt.Sprintf("[shard %d] %s", e.id, e.Err) -} - -// PartialWriteError indicates a write request could only write a portion of the -// requested values. -type PartialWriteError struct { - Reason string - Dropped int - - // A sorted slice of series keys that were dropped. - DroppedKeys [][]byte -} - -func (e PartialWriteError) Error() string { - return fmt.Sprintf("partial write: %s dropped=%d", e.Reason, e.Dropped) -} - -// Shard represents a self-contained time series database. An inverted index of -// the measurement and tag data is kept along with the raw time series data. -// Data can be split across many shards. The query engine in TSDB is responsible -// for combining the output of many shards into a single query result. -type Shard struct { - path string - walPath string - id uint64 - - database string - retentionPolicy string - - sfile *SeriesFile - options EngineOptions - - mu sync.RWMutex - _engine Engine - index Index - enabled bool - - stats *ShardMetrics - - baseLogger *zap.Logger - logger *zap.Logger - - metricUpdater *ticker - - EnableOnOpen bool - - // CompactionDisabled specifies the shard should not schedule compactions. - // This option is intended for offline tooling. - CompactionDisabled bool -} - -// NewShard returns a new initialized Shard. walPath doesn't apply to the b1 type index -func NewShard(id uint64, path string, walPath string, sfile *SeriesFile, opt EngineOptions) *Shard { - db, rp := decodeStorePath(path) - logger := zap.NewNop() - - engineTags := EngineTags{ - Path: path, - WalPath: walPath, - Id: fmt.Sprintf("%d", id), - Bucket: db, - EngineVersion: opt.EngineVersion, - } - - s := &Shard{ - id: id, - path: path, - walPath: walPath, - sfile: sfile, - options: opt, - stats: newShardMetrics(engineTags), - database: db, - retentionPolicy: rp, - logger: logger, - baseLogger: logger, - EnableOnOpen: true, - } - return s -} - -// WithLogger sets the logger on the shard. It must be called before Open. -func (s *Shard) WithLogger(log *zap.Logger) { - s.baseLogger = log - engine, err := s.Engine() - if err == nil { - engine.WithLogger(s.baseLogger) - s.index.WithLogger(s.baseLogger) - } - s.logger = s.baseLogger.With(zap.String("service", "shard")) -} - -// SetEnabled enables the shard for queries and write. When disabled, all -// writes and queries return an error and compactions are stopped for the shard. -func (s *Shard) SetEnabled(enabled bool) { - s.mu.Lock() - s.setEnabledNoLock(enabled) - s.mu.Unlock() -} - -// ! setEnabledNoLock performs actual work of SetEnabled. Must hold s.mu before calling. -func (s *Shard) setEnabledNoLock(enabled bool) { - // Prevent writes and queries - s.enabled = enabled - if s._engine != nil && !s.CompactionDisabled { - // Disable background compactions and snapshotting - s._engine.SetEnabled(enabled) - } -} - -// ScheduleFullCompaction forces a full compaction to be schedule on the shard. -func (s *Shard) ScheduleFullCompaction() error { - engine, err := s.Engine() - if err != nil { - return err - } - return engine.ScheduleFullCompaction() -} - -// ID returns the shards ID. -func (s *Shard) ID() uint64 { - return s.id -} - -// Database returns the database of the shard. -func (s *Shard) Database() string { - return s.database -} - -// RetentionPolicy returns the retention policy of the shard. -func (s *Shard) RetentionPolicy() string { - return s.retentionPolicy -} - -var globalShardMetrics = newAllShardMetrics() - -type twoCounterObserver struct { - count prometheus.Counter - sum prometheus.Counter -} - -func (t twoCounterObserver) Observe(f float64) { - t.sum.Inc() - t.count.Add(f) -} - -var _ prometheus.Observer = twoCounterObserver{} - -type allShardMetrics struct { - writes *prometheus.CounterVec - writesSum *prometheus.CounterVec - writesErr *prometheus.CounterVec - writesErrSum *prometheus.CounterVec - writesDropped *prometheus.CounterVec - fieldsCreated *prometheus.CounterVec - diskSize *prometheus.GaugeVec - series *prometheus.GaugeVec -} - -type ShardMetrics struct { - writes prometheus.Observer - writesErr prometheus.Observer - writesDropped prometheus.Counter - fieldsCreated prometheus.Counter - diskSize prometheus.Gauge - series prometheus.Gauge -} - -const storageNamespace = "storage" -const shardSubsystem = "shard" - -func newAllShardMetrics() *allShardMetrics { - labels := EngineLabelNames() - return &allShardMetrics{ - writes: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: storageNamespace, - Subsystem: shardSubsystem, - Name: "write_count", - Help: "Count of the number of write requests", - }, labels), - writesSum: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: storageNamespace, - Subsystem: shardSubsystem, - Name: "write_sum", - Help: "Counter of the number of points for write requests", - }, labels), - writesErr: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: storageNamespace, - Subsystem: shardSubsystem, - Name: "write_err_count", - Help: "Count of the number of write requests with errors", - }, labels), - writesErrSum: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: storageNamespace, - Subsystem: shardSubsystem, - Name: "write_err_sum", - Help: "Counter of the number of points for write requests with errors", - }, labels), - writesDropped: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: storageNamespace, - Subsystem: shardSubsystem, - Name: "write_dropped_sum", - Help: "Counter of the number of points droppped", - }, labels), - fieldsCreated: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: storageNamespace, - Subsystem: shardSubsystem, - Name: "fields_created", - Help: "Counter of the number of fields created", - }, labels), - diskSize: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: storageNamespace, - Subsystem: shardSubsystem, - Name: "disk_size", - Help: "Gauge of the disk size for the shard", - }, labels), - series: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: storageNamespace, - Subsystem: shardSubsystem, - Name: "series", - Help: "Gauge of the number of series in the shard index", - }, labels), - } -} - -func ShardCollectors() []prometheus.Collector { - return []prometheus.Collector{ - globalShardMetrics.writes, - globalShardMetrics.writesSum, - globalShardMetrics.writesErr, - globalShardMetrics.writesErrSum, - globalShardMetrics.writesDropped, - globalShardMetrics.fieldsCreated, - globalShardMetrics.diskSize, - globalShardMetrics.series, - } -} - -func newShardMetrics(tags EngineTags) *ShardMetrics { - labels := tags.GetLabels() - return &ShardMetrics{ - writes: twoCounterObserver{ - count: globalShardMetrics.writes.With(labels), - sum: globalShardMetrics.writesSum.With(labels), - }, - writesErr: twoCounterObserver{ - count: globalShardMetrics.writesErr.With(labels), - sum: globalShardMetrics.writesErrSum.With(labels), - }, - writesDropped: globalShardMetrics.writesDropped.With(labels), - fieldsCreated: globalShardMetrics.fieldsCreated.With(labels), - diskSize: globalShardMetrics.diskSize.With(labels), - series: globalShardMetrics.series.With(labels), - } -} - -// ticker runs fn periodically, and stops when Stop() is called -// -// Stop waits for the last function run to finish if already running -type ticker struct { - wg sync.WaitGroup - closing chan struct{} -} - -// Stops the ticker and waits for the function to complete -func (t *ticker) Stop() { - close(t.closing) - t.wg.Wait() -} - -// Path returns the path set on the shard when it was created. -func (s *Shard) Path() string { return s.path } - -// Open initializes and opens the shard's store. -func (s *Shard) Open(ctx context.Context) error { - s.mu.Lock() - closeWaitNeeded, err := s.openNoLock(ctx) - s.mu.Unlock() - if closeWaitNeeded { - werr := s.closeWait() - // We want the first error we get returned to the caller - if err == nil { - err = werr - } - } - return err -} - -// openNoLock performs work of Open. Must hold s.mu before calling. The first return -// value is true if the caller should call closeWait after unlocking s.mu in order -// to clean up a failed open operation. -func (s *Shard) openNoLock(ctx context.Context) (bool, error) { - if err := func() error { - // Return if the shard is already open - if s._engine != nil { - return nil - } - - seriesIDSet := NewSeriesIDSet() - - // Initialize underlying index. - ipath := filepath.Join(s.path, "index") - idx, err := NewIndex(s.id, s.database, ipath, seriesIDSet, s.sfile, s.options) - if err != nil { - return err - } - idx.WithLogger(s.baseLogger) - - // Check if the index needs to be rebuilt before Open() initializes - // its file system layout. - var shouldReindex bool - if _, err := os.Stat(ipath); os.IsNotExist(err) { - shouldReindex = true - } - - // Open index. - if err := idx.Open(); err != nil { - return err - } - s.index = idx - - // Initialize underlying engine. - e, err := NewEngine(s.id, idx, s.path, s.walPath, s.sfile, s.options) - if err != nil { - return err - } - - // Set log output on the engine. - e.WithLogger(s.baseLogger) - - // Disable compactions while loading the index - e.SetEnabled(false) - - // Open engine. - if err := e.Open(ctx); err != nil { - return err - } - if shouldReindex { - if err := e.Reindex(); err != nil { - return err - } - } - - if err := e.LoadMetadataIndex(s.id, s.index); err != nil { - return err - } - s._engine = e - - // Set up metric collection - metricUpdater := &ticker{ - closing: make(chan struct{}), - } - - // We want a way to turn off the series and disk size metrics if they are suspected to cause issues - // This corresponds to the top-level MetricsDisabled argument - if !s.options.MetricsDisabled { - metricUpdater.wg.Add(1) - go func() { - tick := time.NewTicker(DefaultMetricInterval) - defer metricUpdater.wg.Done() - defer tick.Stop() - for { - select { - case <-tick.C: - // Note this takes the engine lock, so we have to be careful not - // to close metricUpdater.closing while holding the engine lock - e, err := s.Engine() - if err != nil { - continue - } - s.stats.series.Set(float64(e.SeriesN())) - s.stats.diskSize.Set(float64(e.DiskSize())) - case <-metricUpdater.closing: - return - } - } - }() - } - - s.metricUpdater = metricUpdater - - return nil - }(); err != nil { - s.closeNoLock() - return true, NewShardError(s.id, err) - } - - if s.EnableOnOpen { - // enable writes, queries and compactions - s.setEnabledNoLock(true) - } - - return false, nil -} - -// Close shuts down the shard's store. -func (s *Shard) Close() error { - err := func() error { - s.mu.Lock() - defer s.mu.Unlock() - return s.closeNoLock() - }() - // make sure not to hold a lock while waiting for close to finish - werr := s.closeWait() - - if err != nil { - return err - } - return werr -} - -// closeNoLock closes the shard an removes reference to the shard from associated -// indexes. The s.mu mutex must be held before calling closeNoLock. closeWait should always -// be called after calling closeNoLock. -func (s *Shard) closeNoLock() error { - if s._engine == nil { - return nil - } - - if s.metricUpdater != nil { - close(s.metricUpdater.closing) - } - - err := s._engine.Close() - if err == nil { - s._engine = nil - } - - if e := s.index.Close(); e == nil { - s.index = nil - } - return err -} - -// closeWait waits for goroutines and other background operations associated with this -// shard to complete after closeNoLock is called. Must only be called after calling -// closeNoLock. closeWait should always be called after calling closeNoLock. -// Public methods which close the shard should call closeWait after closeNoLock before -// returning. Must be called without holding shard locks to avoid deadlocking. -func (s *Shard) closeWait() error { - if s.metricUpdater != nil { - s.metricUpdater.wg.Wait() - } - return nil -} - -// IndexType returns the index version being used for this shard. -// -// IndexType returns the empty string if it is called before the shard is opened, -// since it is only that point that the underlying index type is known. -func (s *Shard) IndexType() string { - s.mu.RLock() - defer s.mu.RUnlock() - if s._engine == nil || s.index == nil { // Shard not open yet. - return "" - } - return s.index.Type() -} - -// ready determines if the Shard is ready for queries or writes. -// It returns nil if ready, otherwise ErrShardClosed or ErrShardDisabled -func (s *Shard) ready() error { - var err error - if s._engine == nil { - err = ErrEngineClosed - } else if !s.enabled { - err = ErrShardDisabled - } - return err -} - -// LastModified returns the time when this shard was last modified. -func (s *Shard) LastModified() time.Time { - engine, err := s.Engine() - if err != nil { - return time.Time{} - } - return engine.LastModified() -} - -// Index returns a reference to the underlying index. It returns an error if -// the index is nil. -func (s *Shard) Index() (Index, error) { - s.mu.RLock() - defer s.mu.RUnlock() - if err := s.ready(); err != nil { - return nil, err - } - return s.index, nil -} - -// SeriesFile returns a reference the underlying series file. If return an error -// if the series file is nil. -func (s *Shard) SeriesFile() (*SeriesFile, error) { - s.mu.RLock() - defer s.mu.RUnlock() - if err := s.ready(); err != nil { - return nil, err - } - return s.sfile, nil -} - -// IsIdle return true if the shard is not receiving writes and is fully compacted. -func (s *Shard) IsIdle() (state bool, reason string) { - engine, err := s.Engine() - if err != nil { - return true, "" - } - return engine.IsIdle() -} - -func (s *Shard) Free() error { - engine, err := s.Engine() - if err != nil { - return err - } - - // Disable compactions to stop background goroutines - s.SetCompactionsEnabled(false) - - return engine.Free() -} - -// SetCompactionsEnabled enables or disable shard background compactions. -func (s *Shard) SetCompactionsEnabled(enabled bool) { - engine, err := s.Engine() - if err != nil { - return - } - engine.SetCompactionsEnabled(enabled) -} - -// DiskSize returns the size on disk of this shard. -func (s *Shard) DiskSize() (int64, error) { - s.mu.RLock() - defer s.mu.RUnlock() - // We don't use engine() because we still want to report the shard's disk - // size even if the shard has been disabled. - if s._engine == nil { - return 0, ErrEngineClosed - } - size := s._engine.DiskSize() - return size, nil -} - -// FieldCreate holds information for a field to create on a measurement. -type FieldCreate struct { - Measurement []byte - Field *Field -} - -// WritePoints will write the raw data points and any new metadata to the index in the shard. -func (s *Shard) WritePoints(ctx context.Context, points []models.Point) (rErr error) { - s.mu.RLock() - defer s.mu.RUnlock() - - engine, err := s.engineNoLock() - if err != nil { - return err - } - - var writeError error - s.stats.writes.Observe(float64(len(points))) - defer func() { - if rErr != nil { - s.stats.writesErr.Observe(float64(len(points))) - } - }() - - points, fieldsToCreate, err := s.validateSeriesAndFields(points) - if err != nil { - if _, ok := err.(PartialWriteError); !ok { - return err - } - // There was a partial write (points dropped), hold onto the error to return - // to the caller, but continue on writing the remaining points. - writeError = err - } - s.stats.fieldsCreated.Add(float64(len(fieldsToCreate))) - - // add any new fields and keep track of what needs to be saved - if err := s.createFieldsAndMeasurements(fieldsToCreate); err != nil { - return err - } - - // Write to the engine. - if err := engine.WritePoints(ctx, points); err != nil { - return fmt.Errorf("engine: %s", err) - } - - return writeError -} - -// validateSeriesAndFields checks which series and fields are new and whose metadata should be saved and indexed. -func (s *Shard) validateSeriesAndFields(points []models.Point) ([]models.Point, []*FieldCreate, error) { - var ( - fieldsToCreate []*FieldCreate - err error - dropped int - reason string // only first error reason is set unless returned from CreateSeriesListIfNotExists - ) - - // Create all series against the index in bulk. - keys := make([][]byte, len(points)) - names := make([][]byte, len(points)) - tagsSlice := make([]models.Tags, len(points)) - - // Check if keys should be unicode validated. - validateKeys := s.options.Config.ValidateKeys - - var j int - for i, p := range points { - tags := p.Tags() - - // Drop any series w/ a "time" tag, these are illegal - if v := tags.Get(timeBytes); v != nil { - dropped++ - if reason == "" { - reason = fmt.Sprintf( - "invalid tag key: input tag \"%s\" on measurement \"%s\" is invalid", - "time", string(p.Name())) - } - continue - } - - // Drop any series with invalid unicode characters in the key. - if validateKeys && !models.ValidKeyTokens(string(p.Name()), tags) { - dropped++ - if reason == "" { - reason = fmt.Sprintf("key contains invalid unicode: %q", makePrintable(string(p.Key()))) - } - continue - } - - keys[j] = p.Key() - names[j] = p.Name() - tagsSlice[j] = tags - points[j] = points[i] - j++ - } - points, keys, names, tagsSlice = points[:j], keys[:j], names[:j], tagsSlice[:j] - - engine, err := s.engineNoLock() - if err != nil { - return nil, nil, err - } - - // Add new series. Check for partial writes. - var droppedKeys [][]byte - if err := engine.CreateSeriesListIfNotExists(keys, names, tagsSlice); err != nil { - switch err := err.(type) { - // (DSB) This was previously *PartialWriteError. Now catch pointer and value types. - case *PartialWriteError: - reason = err.Reason - dropped += err.Dropped - droppedKeys = err.DroppedKeys - s.stats.writesDropped.Add(float64(err.Dropped)) - case PartialWriteError: - reason = err.Reason - dropped += err.Dropped - droppedKeys = err.DroppedKeys - s.stats.writesDropped.Add(float64(err.Dropped)) - default: - return nil, nil, err - } - } - - j = 0 - for i, p := range points { - // Skip any points with only invalid fields. - iter := p.FieldIterator() - validField := false - for iter.Next() { - if bytes.Equal(iter.FieldKey(), timeBytes) { - continue - } - validField = true - break - } - if !validField { - if reason == "" { - reason = fmt.Sprintf( - "invalid field name: input field \"%s\" on measurement \"%s\" is invalid", - "time", string(p.Name())) - } - dropped++ - continue - } - - // Skip any points whos keys have been dropped. Dropped has already been incremented for them. - if len(droppedKeys) > 0 && bytesutil.Contains(droppedKeys, keys[i]) { - continue - } - - name := p.Name() - mf := engine.MeasurementFields(name) - - // Check with the field validator. - if err := ValidateFields(mf, p, s.options.Config.SkipFieldSizeValidation); err != nil { - switch err := err.(type) { - case PartialWriteError: - if reason == "" { - reason = err.Reason - } - dropped += err.Dropped - s.stats.writesDropped.Add(float64(err.Dropped)) - default: - return nil, nil, err - } - continue - } - - points[j] = points[i] - j++ - - // Create any fields that are missing. - iter.Reset() - for iter.Next() { - fieldKey := iter.FieldKey() - - // Skip fields named "time". They are illegal. - if bytes.Equal(fieldKey, timeBytes) { - continue - } - - if mf.FieldBytes(fieldKey) != nil { - continue - } - - dataType := dataTypeFromModelsFieldType(iter.Type()) - if dataType == influxql.Unknown { - continue - } - - fieldsToCreate = append(fieldsToCreate, &FieldCreate{ - Measurement: name, - Field: &Field{ - Name: string(fieldKey), - Type: dataType, - }, - }) - } - } - - if dropped > 0 { - err = PartialWriteError{Reason: reason, Dropped: dropped} - } - - return points[:j], fieldsToCreate, err -} - -const unPrintReplRune = '?' -const unPrintMaxReplRune = 3 - -// makePrintable - replace invalid and non-printable unicode characters with a few '?' runes -func makePrintable(s string) string { - b := strings.Builder{} - b.Grow(len(s)) - c := 0 - for _, r := range strings.ToValidUTF8(s, string(unicode.ReplacementChar)) { - if !unicode.IsPrint(r) || r == unicode.ReplacementChar { - if c < unPrintMaxReplRune { - b.WriteRune(unPrintReplRune) - } - c++ - } else { - b.WriteRune(r) - c = 0 - } - } - return b.String() -} - -func (s *Shard) createFieldsAndMeasurements(fieldsToCreate []*FieldCreate) error { - if len(fieldsToCreate) == 0 { - return nil - } - - engine, err := s.engineNoLock() - if err != nil { - return err - } - - // add fields - changes := make([]*FieldChange, 0, len(fieldsToCreate)) - for _, f := range fieldsToCreate { - mf := engine.MeasurementFields(f.Measurement) - if err := mf.CreateFieldIfNotExists([]byte(f.Field.Name), f.Field.Type); err != nil { - return err - } - changes = append(changes, &FieldChange{ - FieldCreate: *f, - ChangeType: AddMeasurementField, - }) - } - - return engine.MeasurementFieldSet().Save(changes) -} - -// DeleteSeriesRange deletes all values from for seriesKeys between min and max (inclusive) -func (s *Shard) DeleteSeriesRange(ctx context.Context, itr SeriesIterator, min, max int64) error { - engine, err := s.Engine() - if err != nil { - return err - } - return engine.DeleteSeriesRange(ctx, itr, min, max) -} - -// DeleteSeriesRangeWithPredicate deletes all values from for seriesKeys between min and max (inclusive) -// for which predicate() returns true. If predicate() is nil, then all values in range are deleted. -func (s *Shard) DeleteSeriesRangeWithPredicate( - ctx context.Context, - itr SeriesIterator, - predicate func(name []byte, tags models.Tags) (int64, int64, bool), -) error { - engine, err := s.Engine() - if err != nil { - return err - } - return engine.DeleteSeriesRangeWithPredicate(ctx, itr, predicate) -} - -// DeleteMeasurement deletes a measurement and all underlying series. -func (s *Shard) DeleteMeasurement(ctx context.Context, name []byte) error { - engine, err := s.Engine() - if err != nil { - return err - } - return engine.DeleteMeasurement(ctx, name) -} - -// SeriesN returns the unique number of series in the shard. -func (s *Shard) SeriesN() int64 { - engine, err := s.Engine() - if err != nil { - return 0 - } - return engine.SeriesN() -} - -// SeriesSketches returns the measurement sketches for the shard. -func (s *Shard) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) { - engine, err := s.Engine() - if err != nil { - return nil, nil, err - } - return engine.SeriesSketches() -} - -// MeasurementsSketches returns the measurement sketches for the shard. -func (s *Shard) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) { - engine, err := s.Engine() - if err != nil { - return nil, nil, err - } - return engine.MeasurementsSketches() -} - -// MeasurementNamesByRegex returns names of measurements matching the regular expression. -func (s *Shard) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) { - engine, err := s.Engine() - if err != nil { - return nil, err - } - return engine.MeasurementNamesByRegex(re) -} - -// MeasurementNamesByPredicate returns fields for a measurement filtered by an expression. -func (s *Shard) MeasurementNamesByPredicate(expr influxql.Expr) ([][]byte, error) { - index, err := s.Index() - if err != nil { - return nil, err - } - indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: s.sfile} - return indexSet.MeasurementNamesByPredicate(query.OpenAuthorizer, expr) -} - -// MeasurementFields returns fields for a measurement. -func (s *Shard) MeasurementFields(name []byte) *MeasurementFields { - engine, err := s.Engine() - if err != nil { - return nil - } - return engine.MeasurementFields(name) -} - -// MeasurementExists returns true if the shard contains name. -// TODO(edd): This method is currently only being called from tests; do we -// really need it? -func (s *Shard) MeasurementExists(name []byte) (bool, error) { - engine, err := s.Engine() - if err != nil { - return false, err - } - return engine.MeasurementExists(name) -} - -// CreateIterator returns an iterator for the data in the shard. -func (s *Shard) CreateIterator(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { - engine, err := s.Engine() - if err != nil { - return nil, err - } - switch m.SystemIterator { - case "_fieldKeys": - return NewFieldKeysIterator(s, opt) - case "_series": - // TODO(benbjohnson): Move up to the Shards.CreateIterator(). - index, err := s.Index() - if err != nil { - return nil, err - } - indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: s.sfile} - - itr, err := NewSeriesPointIterator(indexSet, opt) - if err != nil { - return nil, err - } - - return query.NewInterruptIterator(itr, opt.InterruptCh), nil - case "_tagKeys": - return NewTagKeysIterator(s, opt) - } - return engine.CreateIterator(ctx, m.Name, opt) -} - -func (s *Shard) CreateSeriesCursor(ctx context.Context, req SeriesCursorRequest, cond influxql.Expr) (SeriesCursor, error) { - index, err := s.Index() - if err != nil { - return nil, err - } - return newSeriesCursor(req, IndexSet{Indexes: []Index{index}, SeriesFile: s.sfile}, cond) -} - -func (s *Shard) CreateCursorIterator(ctx context.Context) (CursorIterator, error) { - engine, err := s.Engine() - if err != nil { - return nil, err - } - return engine.CreateCursorIterator(ctx) -} - -// FieldDimensions returns unique sets of fields and dimensions across a list of sources. -func (s *Shard) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { - engine, err := s.Engine() - if err != nil { - return nil, nil, err - } - - fields = make(map[string]influxql.DataType) - dimensions = make(map[string]struct{}) - - index, err := s.Index() - if err != nil { - return nil, nil, err - } - for _, name := range measurements { - // Handle system sources. - if strings.HasPrefix(name, "_") { - var keys []string - switch name { - case "_fieldKeys": - keys = []string{"fieldKey", "fieldType"} - case "_series": - keys = []string{"key"} - case "_tagKeys": - keys = []string{"tagKey"} - } - - if len(keys) > 0 { - for _, k := range keys { - if fields[k].LessThan(influxql.String) { - fields[k] = influxql.String - } - } - continue - } - // Unknown system source so default to looking for a measurement. - } - - // Retrieve measurement. - if exists, err := engine.MeasurementExists([]byte(name)); err != nil { - return nil, nil, err - } else if !exists { - continue - } - - // Append fields and dimensions. - mf := engine.MeasurementFields([]byte(name)) - if mf != nil { - for k, typ := range mf.FieldSet() { - if fields[k].LessThan(typ) { - fields[k] = typ - } - } - } - - indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: s.sfile} - if err := indexSet.ForEachMeasurementTagKey([]byte(name), func(key []byte) error { - dimensions[string(key)] = struct{}{} - return nil - }); err != nil { - return nil, nil, err - } - } - - return fields, dimensions, nil -} - -// mapType returns the data type for the field within the measurement. -func (s *Shard) mapType(measurement, field string) (influxql.DataType, error) { - engine, err := s.engineNoLock() - if err != nil { - return 0, err - } - - switch field { - case "_name", "_tagKey", "_tagValue", "_seriesKey": - return influxql.String, nil - } - - // Process system measurements. - switch measurement { - case "_fieldKeys": - if field == "fieldKey" || field == "fieldType" { - return influxql.String, nil - } - return influxql.Unknown, nil - case "_series": - if field == "key" { - return influxql.String, nil - } - return influxql.Unknown, nil - case "_tagKeys": - if field == "tagKey" { - return influxql.String, nil - } - return influxql.Unknown, nil - } - // Unknown system source so default to looking for a measurement. - - if exists, _ := engine.MeasurementExists([]byte(measurement)); !exists { - return influxql.Unknown, nil - } - - mf := engine.MeasurementFields([]byte(measurement)) - if mf != nil { - f := mf.Field(field) - if f != nil { - return f.Type, nil - } - } - - if exists, _ := engine.HasTagKey([]byte(measurement), []byte(field)); exists { - return influxql.Tag, nil - } - - return influxql.Unknown, nil -} - -// expandSources expands regex sources and removes duplicates. -// NOTE: sources must be normalized (db and rp set) before calling this function. -func (s *Shard) expandSources(sources influxql.Sources) (influxql.Sources, error) { - engine, err := s.engineNoLock() - if err != nil { - return nil, err - } - - // Use a map as a set to prevent duplicates. - set := map[string]influxql.Source{} - - // Iterate all sources, expanding regexes when they're found. - for _, source := range sources { - switch src := source.(type) { - case *influxql.Measurement: - // Add non-regex measurements directly to the set. - if src.Regex == nil { - set[src.String()] = src - continue - } - - // Loop over matching measurements. - names, err := engine.MeasurementNamesByRegex(src.Regex.Val) - if err != nil { - return nil, err - } - - for _, name := range names { - other := &influxql.Measurement{ - Database: src.Database, - RetentionPolicy: src.RetentionPolicy, - Name: string(name), - } - set[other.String()] = other - } - - default: - return nil, fmt.Errorf("expandSources: unsupported source type: %T", source) - } - } - - // Convert set to sorted slice. - names := make([]string, 0, len(set)) - for name := range set { - names = append(names, name) - } - sort.Strings(names) - - // Convert set to a list of Sources. - expanded := make(influxql.Sources, 0, len(set)) - for _, name := range names { - expanded = append(expanded, set[name]) - } - - return expanded, nil -} - -// Backup backs up the shard by creating a tar archive of all TSM files that -// have been modified since the provided time. See Engine.Backup for more details. -func (s *Shard) Backup(w io.Writer, basePath string, since time.Time) error { - engine, err := s.Engine() - if err != nil { - return err - } - return engine.Backup(w, basePath, since) -} - -func (s *Shard) Export(w io.Writer, basePath string, start time.Time, end time.Time) error { - engine, err := s.Engine() - if err != nil { - return err - } - return engine.Export(w, basePath, start, end) -} - -// Restore restores data to the underlying engine for the shard. -// The shard is reopened after restore. -func (s *Shard) Restore(ctx context.Context, r io.Reader, basePath string) error { - closeWaitNeeded, err := func() (bool, error) { - s.mu.Lock() - defer s.mu.Unlock() - - closeWaitNeeded := false - - // Special case - we can still restore to a disabled shard, so we should - // only check if the engine is closed and not care if the shard is - // disabled. - if s._engine == nil { - return closeWaitNeeded, ErrEngineClosed - } - - // Restore to engine. - if err := s._engine.Restore(r, basePath); err != nil { - return closeWaitNeeded, nil - } - - // Close shard. - closeWaitNeeded = true // about to call closeNoLock, closeWait will be needed - if err := s.closeNoLock(); err != nil { - return closeWaitNeeded, err - } - return closeWaitNeeded, nil - }() - - // Now that we've unlocked, we can call closeWait if needed - if closeWaitNeeded { - werr := s.closeWait() - // Return the first error encountered to the caller - if err == nil { - err = werr - } - } - if err != nil { - return err - } - - // Reopen engine. Need locked method since we had to unlock for closeWait. - return s.Open(ctx) -} - -// Import imports data to the underlying engine for the shard. r should -// be a reader from a backup created by Backup. -func (s *Shard) Import(r io.Reader, basePath string) error { - // Special case - we can still import to a disabled shard, so we should - // only check if the engine is closed and not care if the shard is - // disabled. - s.mu.Lock() - defer s.mu.Unlock() - if s._engine == nil { - return ErrEngineClosed - } - - // Import to engine. - return s._engine.Import(r, basePath) -} - -// CreateSnapshot will return a path to a temp directory -// containing hard links to the underlying shard files. -func (s *Shard) CreateSnapshot(skipCacheOk bool) (string, error) { - engine, err := s.Engine() - if err != nil { - return "", err - } - return engine.CreateSnapshot(skipCacheOk) -} - -// ForEachMeasurementName iterates over each measurement in the shard. -func (s *Shard) ForEachMeasurementName(fn func(name []byte) error) error { - engine, err := s.Engine() - if err != nil { - return err - } - return engine.ForEachMeasurementName(fn) -} - -func (s *Shard) TagKeyCardinality(name, key []byte) int { - engine, err := s.Engine() - if err != nil { - return 0 - } - return engine.TagKeyCardinality(name, key) -} - -// Digest returns a digest of the shard. -func (s *Shard) Digest() (io.ReadCloser, int64, string, error) { - engine, err := s.Engine() - if err != nil { - return nil, 0, "", err - } - - // Make sure the shard is idle/cold. (No use creating a digest of a - // hot shard that is rapidly changing.) - if isIdle, reason := engine.IsIdle(); !isIdle { - return nil, 0, reason, ErrShardNotIdle - } - - readCloser, size, err := engine.Digest() - return readCloser, size, "", err -} - -// engine safely (under an RLock) returns a reference to the shard's Engine, or -// an error if the Engine is closed, or the shard is currently disabled. -// -// The shard's Engine should always be accessed via a call to engine(), rather -// than directly referencing Shard.engine. -// -// If a caller needs an Engine reference but is already under a lock, then they -// should use engineNoLock(). -func (s *Shard) Engine() (Engine, error) { - s.mu.RLock() - defer s.mu.RUnlock() - return s.engineNoLock() -} - -// engineNoLock is similar to calling engine(), but the caller must guarantee -// that they already hold an appropriate lock. -func (s *Shard) engineNoLock() (Engine, error) { - if err := s.ready(); err != nil { - return nil, err - } - return s._engine, nil -} - -type ShardGroup interface { - MeasurementsByRegex(re *regexp.Regexp) []string - FieldKeysByMeasurement(name []byte) []string - FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) - MapType(measurement, field string) influxql.DataType - CreateIterator(ctx context.Context, measurement *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) - IteratorCost(ctx context.Context, measurement string, opt query.IteratorOptions) (query.IteratorCost, error) - ExpandSources(sources influxql.Sources) (influxql.Sources, error) -} - -// Shards represents a sortable list of shards. -type Shards []*Shard - -// Len implements sort.Interface. -func (a Shards) Len() int { return len(a) } - -// Less implements sort.Interface. -func (a Shards) Less(i, j int) bool { return a[i].id < a[j].id } - -// Swap implements sort.Interface. -func (a Shards) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// MeasurementsByRegex returns the unique set of measurements matching the -// provided regex, for all the shards. -func (a Shards) MeasurementsByRegex(re *regexp.Regexp) []string { - var m map[string]struct{} - for _, sh := range a { - names, err := sh.MeasurementNamesByRegex(re) - if err != nil { - continue // Skip this shard's results—previous behaviour. - } - - if m == nil { - m = make(map[string]struct{}, len(names)) - } - - for _, name := range names { - m[string(name)] = struct{}{} - } - } - - if len(m) == 0 { - return nil - } - - names := make([]string, 0, len(m)) - for key := range m { - names = append(names, key) - } - sort.Strings(names) - return names -} - -// FieldKeysByMeasurement returns a de-duplicated, sorted, set of field keys for -// the provided measurement name. -func (a Shards) FieldKeysByMeasurement(name []byte) []string { - if len(a) == 1 { - mf := a[0].MeasurementFields(name) - if mf == nil { - return nil - } - return mf.FieldKeys() - } - - all := make([][]string, 0, len(a)) - for _, shard := range a { - mf := shard.MeasurementFields(name) - if mf == nil { - continue - } - all = append(all, mf.FieldKeys()) - } - return slices.MergeSortedStrings(all...) -} - -// MeasurementNamesByPredicate returns the measurements that match the given predicate. -func (a Shards) MeasurementNamesByPredicate(expr influxql.Expr) ([][]byte, error) { - if len(a) == 1 { - return a[0].MeasurementNamesByPredicate(expr) - } - - all := make([][][]byte, len(a)) - for i, shard := range a { - names, err := shard.MeasurementNamesByPredicate(expr) - if err != nil { - return nil, err - } - all[i] = names - } - return slices.MergeSortedBytes(all...), nil -} - -// FieldKeysByPredicate returns the field keys for series that match -// the given predicate. -func (a Shards) FieldKeysByPredicate(expr influxql.Expr) (map[string][]string, error) { - names, ok := measurementOptimization(expr, measurementKey) - if !ok { - var err error - if names, err = a.MeasurementNamesByPredicate(expr); err != nil { - return nil, err - } - } - - all := make(map[string][]string, len(names)) - for _, name := range names { - all[string(name)] = a.FieldKeysByMeasurement(name) - } - return all, nil -} - -// consecutiveAndChildren finds all child nodes of consecutive -// influxql.BinaryExpr with AND operator nodes ("AND nodes") which are not -// themselves AND nodes. This may be the root of the tree if the root of the -// tree is not an AND node. -type consecutiveAndChildren struct { - children []influxql.Node -} - -func (v *consecutiveAndChildren) Visit(node influxql.Node) influxql.Visitor { - switch n := node.(type) { - case *influxql.BinaryExpr: - if n.Op == influxql.AND { - return v - } - case *influxql.ParenExpr: - // Parens are essentially a no-op and can be traversed through. - return v - } - - // If this wasn't a BinaryExpr with an AND operator or a Paren, record this - // child node and stop the search for this branch. - v.children = append(v.children, node) - return nil -} - -// orMeasurementTree determines if a tree (or subtree) represents a grouping of -// exclusively measurement names OR'd together with EQ operators for the -// measurements themselves. It collects the list of measurement names -// encountered and records the validity of the tree. -type orMeasurementTree struct { - measurementKey string - measurementNames []string - valid bool -} - -func (v *orMeasurementTree) Visit(node influxql.Node) influxql.Visitor { - // Return early if this tree has already been invalidated - no reason to - // continue evaluating at that point. - if !v.valid { - return nil - } - - switch n := node.(type) { - case *influxql.BinaryExpr: - // A BinaryExpr must have an operation of OR or EQ in a valid tree - if n.Op == influxql.OR { - return v - } else if n.Op == influxql.EQ { - // An EQ must be in the form of "v.measurementKey == measurementName" in a - // valid tree - if name, ok := measurementNameFromEqBinary(n, v.measurementKey); ok { - v.measurementNames = append(v.measurementNames, name) - // If a valid measurement key/value was found, there is no need to - // continue evaluating the VarRef/StringLiteral child nodes of this - // node. - return nil - } - } - case *influxql.ParenExpr: - // Parens are essentially a no-op and can be traversed through. - return v - } - - // The the type switch didn't already return, this tree is invalid. - v.valid = false - return nil -} - -func measurementOptimization(expr influxql.Expr, key string) ([][]byte, bool) { - // A measurement optimization is possible if the query contains a single group - // of one or more measurements (in the form of _measurement = measName, - // equality operator only) grouped together by OR operators, with the subtree - // containing the OR'd measurements accessible from root of the tree either - // directly (tree contains nothing but OR'd measurements) or by traversing AND - // binary expression nodes. - - // Get a list of "candidate" measurement subtrees. - v := consecutiveAndChildren{} - influxql.Walk(&v, expr) - possibleSubtrees := v.children - - // Evaluate the candidate subtrees to determine which measurement names they - // contain, and to see if they are valid for the optimization. - validSubtrees := []orMeasurementTree{} - for _, h := range possibleSubtrees { - t := orMeasurementTree{ - measurementKey: key, - valid: true, - } - influxql.Walk(&t, h) - if t.valid { - validSubtrees = append(validSubtrees, t) - } - } - - // There must be exactly one valid measurement subtree for this optimization - // to be applied. Note: It may also be possible to have measurements in - // multiple subtrees, as long as there are no measurements in invalid - // subtrees, by determining an intersection of the measurement names across - // all valid subtrees - this is not currently implemented. - if len(validSubtrees) != 1 { - return nil, false - } - - return slices.StringsToBytes(validSubtrees[0].measurementNames...), true -} - -// measurementNameFromEqBinary returns the name of a measurement from a binary -// expression if possible, and a boolean status indicating if the binary -// expression contained a measurement name. A meausurement name will only be -// returned if the operator for the binary is EQ, and the measurement key is on -// the LHS with the measurement name on the RHS. -func measurementNameFromEqBinary(be *influxql.BinaryExpr, key string) (string, bool) { - lhs, ok := be.LHS.(*influxql.VarRef) - if !ok { - return "", false - } else if lhs.Val != key { - return "", false - } - - rhs, ok := be.RHS.(*influxql.StringLiteral) - if !ok { - return "", false - } - - return rhs.Val, true -} - -func (a Shards) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { - fields = make(map[string]influxql.DataType) - dimensions = make(map[string]struct{}) - - for _, sh := range a { - f, d, err := sh.FieldDimensions(measurements) - if err != nil { - return nil, nil, err - } - for k, typ := range f { - if fields[k].LessThan(typ) { - fields[k] = typ - } - } - for k := range d { - dimensions[k] = struct{}{} - } - } - return -} - -func (a Shards) MapType(measurement, field string) influxql.DataType { - var typ influxql.DataType - for _, sh := range a { - sh.mu.RLock() - if t, err := sh.mapType(measurement, field); err == nil && typ.LessThan(t) { - typ = t - } - sh.mu.RUnlock() - } - return typ -} - -func (a Shards) CallType(name string, args []influxql.DataType) (influxql.DataType, error) { - typmap := query.CallTypeMapper{} - return typmap.CallType(name, args) -} - -func (a Shards) CreateIterator(ctx context.Context, measurement *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { - switch measurement.SystemIterator { - case "_series": - return a.createSeriesIterator(ctx, opt) - } - - itrs := make([]query.Iterator, 0, len(a)) - for _, sh := range a { - itr, err := sh.CreateIterator(ctx, measurement, opt) - if err != nil { - query.Iterators(itrs).Close() - return nil, err - } else if itr == nil { - continue - } - itrs = append(itrs, itr) - - select { - case <-opt.InterruptCh: - query.Iterators(itrs).Close() - return nil, query.ErrQueryInterrupted - default: - } - - // Enforce series limit at creation time. - if opt.MaxSeriesN > 0 { - stats := itr.Stats() - if stats.SeriesN > opt.MaxSeriesN { - query.Iterators(itrs).Close() - return nil, fmt.Errorf("max-select-series limit exceeded: (%d/%d)", stats.SeriesN, opt.MaxSeriesN) - } - } - } - return query.Iterators(itrs).Merge(opt) -} - -func (a Shards) createSeriesIterator(ctx context.Context, opt query.IteratorOptions) (_ query.Iterator, err error) { - var ( - idxs = make([]Index, 0, len(a)) - sfile *SeriesFile - ) - for _, sh := range a { - var idx Index - if idx, err = sh.Index(); err == nil { - idxs = append(idxs, idx) - } - if sfile == nil { - sfile, _ = sh.SeriesFile() - } - } - - if sfile == nil { - return nil, nil - } - - return NewSeriesPointIterator(IndexSet{Indexes: idxs, SeriesFile: sfile}, opt) -} - -func (a Shards) IteratorCost(ctx context.Context, measurement string, opt query.IteratorOptions) (query.IteratorCost, error) { - var costs query.IteratorCost - var costerr error - var mu sync.RWMutex - - setErr := func(err error) { - mu.Lock() - defer mu.Unlock() - if costerr == nil { - costerr = err - } - } - - limit := limiter.NewFixed(runtime.GOMAXPROCS(0)) - var wg sync.WaitGroup - for _, sh := range a { - costerr = limit.Take(ctx) - wg.Add(1) - - mu.RLock() - if costerr != nil { - limit.Release() - mu.RUnlock() - break - } - mu.RUnlock() - - go func(sh *Shard) { - defer limit.Release() - defer wg.Done() - - engine, err := sh.Engine() - if err != nil { - setErr(err) - return - } - - cost, err := engine.IteratorCost(measurement, opt) - if err != nil { - setErr(err) - return - } - - mu.Lock() - costs = costs.Combine(cost) - mu.Unlock() - }(sh) - } - wg.Wait() - return costs, costerr -} - -func (a Shards) CreateSeriesCursor(ctx context.Context, req SeriesCursorRequest, cond influxql.Expr) (_ SeriesCursor, err error) { - var ( - idxs []Index - sfile *SeriesFile - ) - for _, sh := range a { - var idx Index - if idx, err = sh.Index(); err == nil { - idxs = append(idxs, idx) - } - if sfile == nil { - sfile, _ = sh.SeriesFile() - } - } - - if sfile == nil { - return nil, errors.New("CreateSeriesCursor: no series file") - } - - return newSeriesCursor(req, IndexSet{Indexes: idxs, SeriesFile: sfile}, cond) -} - -func (a Shards) ExpandSources(sources influxql.Sources) (influxql.Sources, error) { - // Use a map as a set to prevent duplicates. - set := map[string]influxql.Source{} - - // Iterate through every shard and expand the sources. - for _, sh := range a { - sh.mu.RLock() - expanded, err := sh.expandSources(sources) - sh.mu.RUnlock() - if err != nil { - return nil, err - } - - for _, src := range expanded { - switch src := src.(type) { - case *influxql.Measurement: - set[src.String()] = src - default: - return nil, fmt.Errorf("Store.ExpandSources: unsupported source type: %T", src) - } - } - } - - // Convert set to sorted slice. - names := make([]string, 0, len(set)) - for name := range set { - names = append(names, name) - } - sort.Strings(names) - - // Convert set to a list of Sources. - sorted := make([]influxql.Source, 0, len(set)) - for _, name := range names { - sorted = append(sorted, set[name]) - } - return sorted, nil -} - -// MeasurementFields holds the fields of a measurement and their codec. -type MeasurementFields struct { - mu sync.Mutex - - fields atomic.Value // map[string]*Field -} - -// NewMeasurementFields returns an initialised *MeasurementFields value. -func NewMeasurementFields() *MeasurementFields { - fields := make(map[string]*Field) - mf := &MeasurementFields{} - mf.fields.Store(fields) - return mf -} - -func (m *MeasurementFields) FieldKeys() []string { - fields := m.fields.Load().(map[string]*Field) - a := make([]string, 0, len(fields)) - for key := range fields { - a = append(a, key) - } - sort.Strings(a) - return a -} - -// bytes estimates the memory footprint of this MeasurementFields, in bytes. -func (m *MeasurementFields) bytes() int { - var b int - b += 24 // mu RWMutex is 24 bytes - fields := m.fields.Load().(map[string]*Field) - b += int(unsafe.Sizeof(fields)) - for k, v := range fields { - b += int(unsafe.Sizeof(k)) + len(k) - b += int(unsafe.Sizeof(v)+unsafe.Sizeof(*v)) + len(v.Name) - } - return b -} - -// CreateFieldIfNotExists creates a new field with an autoincrementing ID. -// Returns an error if 255 fields have already been created on the measurement or -// the fields already exists with a different type. -func (m *MeasurementFields) CreateFieldIfNotExists(name []byte, typ influxql.DataType) error { - fields := m.fields.Load().(map[string]*Field) - - // Ignore if the field already exists. - if f := fields[string(name)]; f != nil { - if f.Type != typ { - return ErrFieldTypeConflict - } - return nil - } - - m.mu.Lock() - defer m.mu.Unlock() - - fields = m.fields.Load().(map[string]*Field) - // Re-check field and type under write lock. - if f := fields[string(name)]; f != nil { - if f.Type != typ { - return ErrFieldTypeConflict - } - return nil - } - - fieldsUpdate := make(map[string]*Field, len(fields)+1) - for k, v := range fields { - fieldsUpdate[k] = v - } - // Create and append a new field. - f := &Field{ - ID: uint8(len(fields) + 1), - Name: string(name), - Type: typ, - } - fieldsUpdate[string(name)] = f - m.fields.Store(fieldsUpdate) - - return nil -} - -func (m *MeasurementFields) FieldN() int { - n := len(m.fields.Load().(map[string]*Field)) - return n -} - -// Field returns the field for name, or nil if there is no field for name. -func (m *MeasurementFields) Field(name string) *Field { - f := m.fields.Load().(map[string]*Field)[name] - return f -} - -func (m *MeasurementFields) HasField(name string) bool { - if m == nil { - return false - } - f := m.fields.Load().(map[string]*Field)[name] - return f != nil -} - -// FieldBytes returns the field for name, or nil if there is no field for name. -// FieldBytes should be preferred to Field when the caller has a []byte, because -// it avoids a string allocation, which can't be avoided if the caller converts -// the []byte to a string and calls Field. -func (m *MeasurementFields) FieldBytes(name []byte) *Field { - f := m.fields.Load().(map[string]*Field)[string(name)] - return f -} - -// FieldSet returns the set of fields and their types for the measurement. -func (m *MeasurementFields) FieldSet() map[string]influxql.DataType { - fields := m.fields.Load().(map[string]*Field) - fieldTypes := make(map[string]influxql.DataType) - for name, f := range fields { - fieldTypes[name] = f.Type - } - return fieldTypes -} - -func (m *MeasurementFields) ForEachField(fn func(name string, typ influxql.DataType) bool) { - fields := m.fields.Load().(map[string]*Field) - for name, f := range fields { - if !fn(name, f.Type) { - return - } - } -} - -type FieldChanges []*FieldChange - -func MeasurementsToFieldChangeDeletions(measurements []string) FieldChanges { - fcs := make([]*FieldChange, 0, len(measurements)) - for _, m := range measurements { - fcs = append(fcs, &FieldChange{ - FieldCreate: FieldCreate{ - Measurement: []byte(m), - Field: nil, - }, - ChangeType: DeleteMeasurement, - }) - } - return fcs -} - -// MeasurementFieldSet represents a collection of fields by measurement. -// This safe for concurrent use. -type MeasurementFieldSet struct { - mu sync.RWMutex - fields map[string]*MeasurementFields - // path is the location to persist field sets - path string - changeMgr *measurementFieldSetChangeMgr -} - -// NewMeasurementFieldSet returns a new instance of MeasurementFieldSet. -func NewMeasurementFieldSet(path string, logger *zap.Logger) (*MeasurementFieldSet, error) { - const MaxCombinedWrites = 100 - fs := &MeasurementFieldSet{ - fields: make(map[string]*MeasurementFields), - path: path, - } - if nil == logger { - logger = zap.NewNop() - } - fs.SetMeasurementFieldSetWriter(MaxCombinedWrites, logger) - // If there is a load error, return the error and an empty set so - // it can be rebuild manually. - return fs, fs.load() -} - -func (fs *MeasurementFieldSet) Close() error { - if fs != nil && fs.changeMgr != nil { - fs.changeMgr.Close() - // If there is a change log file, save the in-memory version - if _, err := os.Stat(fs.changeMgr.changeFilePath); err == nil { - return fs.WriteToFile() - } else if os.IsNotExist(err) { - return nil - } else { - return fmt.Errorf("cannot get file information for %s: %w", fs.changeMgr.changeFilePath, err) - } - } - return nil -} - -func (fs *MeasurementFieldSet) ChangesPath() string { - return fs.changeMgr.changeFilePath -} - -// Bytes estimates the memory footprint of this MeasurementFieldSet, in bytes. -func (fs *MeasurementFieldSet) Bytes() int { - var b int - fs.mu.RLock() - b += 24 // mu RWMutex is 24 bytes - for k, v := range fs.fields { - b += int(unsafe.Sizeof(k)) + len(k) - b += int(unsafe.Sizeof(v)) + v.bytes() - } - b += int(unsafe.Sizeof(fs.fields)) - b += int(unsafe.Sizeof(fs.path)) + len(fs.path) - fs.mu.RUnlock() - return b -} - -// MeasurementNames returns the names of all of the measurements in the field set in -// lexographical order. -func (fs *MeasurementFieldSet) MeasurementNames() []string { - fs.mu.RLock() - defer fs.mu.RUnlock() - - names := make([]string, 0, len(fs.fields)) - for name := range fs.fields { - names = append(names, name) - } - sort.Strings(names) - return names -} - -// Fields returns fields for a measurement by name. -func (fs *MeasurementFieldSet) Fields(name []byte) *MeasurementFields { - fs.mu.RLock() - mf := fs.fields[string(name)] - fs.mu.RUnlock() - return mf -} - -// FieldsByString returns fields for a measurement by name. -func (fs *MeasurementFieldSet) FieldsByString(name string) *MeasurementFields { - fs.mu.RLock() - mf := fs.fields[name] - fs.mu.RUnlock() - return mf -} - -// CreateFieldsIfNotExists returns fields for a measurement by name. -func (fs *MeasurementFieldSet) CreateFieldsIfNotExists(name []byte) *MeasurementFields { - fs.mu.RLock() - mf := fs.fields[string(name)] - fs.mu.RUnlock() - - if mf != nil { - return mf - } - - fs.mu.Lock() - mf = fs.fields[string(name)] - if mf == nil { - mf = NewMeasurementFields() - fs.fields[string(name)] = mf - } - fs.mu.Unlock() - return mf -} - -// Delete removes a field set for a measurement. -func (fs *MeasurementFieldSet) Delete(name string) { - fs.mu.Lock() - fs.deleteNoLock(name) - fs.mu.Unlock() -} - -// DeleteWithLock executes fn and removes a field set from a measurement under lock. -func (fs *MeasurementFieldSet) DeleteWithLock(name string, fn func() error) error { - fs.mu.Lock() - defer fs.mu.Unlock() - - if err := fn(); err != nil { - return err - } - - fs.deleteNoLock(name) - return nil -} - -// deleteNoLock removes a field set for a measurement -func (fs *MeasurementFieldSet) deleteNoLock(name string) { - delete(fs.fields, name) -} - -func (fs *MeasurementFieldSet) IsEmpty() bool { - fs.mu.RLock() - defer fs.mu.RUnlock() - return len(fs.fields) == 0 -} - -type errorChannel chan<- error - -type writeRequest struct { - errorReturn chan<- error - changes FieldChanges -} - -type measurementFieldSetChangeMgr struct { - mu sync.Mutex - wg sync.WaitGroup - writeRequests chan writeRequest - changeFilePath string - logger *zap.Logger - changeFileSize int64 -} - -// SetMeasurementFieldSetWriter - initialize the queue for write requests -// and start the background write process -func (fs *MeasurementFieldSet) SetMeasurementFieldSetWriter(queueLength int, logger *zap.Logger) { - fs.mu.Lock() - defer fs.mu.Unlock() - fs.changeMgr = &measurementFieldSetChangeMgr{ - writeRequests: make(chan writeRequest, queueLength), - changeFilePath: filepath.Join(filepath.Dir(fs.path), FieldsChangeFile), - logger: logger, - changeFileSize: int64(0), - } - fs.changeMgr.wg.Add(1) - go fs.changeMgr.SaveWriter() -} - -func (fscm *measurementFieldSetChangeMgr) Close() { - if fscm != nil { - close(fscm.writeRequests) - fscm.wg.Wait() - } -} - -func (fs *MeasurementFieldSet) Save(changes FieldChanges) error { - return fs.changeMgr.RequestSave(changes) -} - -func (fscm *measurementFieldSetChangeMgr) RequestSave(changes FieldChanges) error { - done := make(chan error) - fscm.writeRequests <- writeRequest{errorReturn: done, changes: changes} - return <-done -} - -func (fscm *measurementFieldSetChangeMgr) SaveWriter() { - defer fscm.wg.Done() - // Block until someone modifies the MeasurementFieldSet, and - // it needs to be written to disk. Exit when the channel is closed - for wr, ok := <-fscm.writeRequests; ok; wr, ok = <-fscm.writeRequests { - fscm.appendToChangesFile(wr) - } -} - -// WriteToFile: Write the new index to a temp file and rename when it's sync'd -// This locks the MeasurementFieldSet during the marshaling, the write, and the rename. -func (fs *MeasurementFieldSet) WriteToFile() error { - path := fs.path + ".tmp" - - // Open the temp file - fd, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_EXCL|os.O_SYNC, 0666) - if err != nil { - return fmt.Errorf("failed opening %s: %w", fs.path, err) - } - // Ensure temp file is cleaned up - defer func() { - if e := os.RemoveAll(path); err == nil && e != nil { - err = fmt.Errorf("failed removing temporary file %s: %w", path, e) - } - if e := os.RemoveAll(fs.changeMgr.changeFilePath); err == nil && e != nil { - err = fmt.Errorf("failed removing saved field changes - %s: %w", fs.changeMgr.changeFilePath, e) - } - }() - fs.mu.RLock() - defer fs.mu.RUnlock() - isEmpty, err := func() (isEmpty bool, err error) { - // ensure temp file closed before rename (for Windows) - defer func() { - if e := fd.Close(); err == nil && e != nil { - err = fmt.Errorf("closing %s: %w", path, e) - } - }() - if _, err = fd.Write(fieldsIndexMagicNumber); err != nil { - return true, fmt.Errorf("failed writing magic number for %s: %w", path, err) - } - - // Lock, copy, and marshal the in-memory index - b, err := fs.marshalMeasurementFieldSetNoLock() - if err != nil { - return true, fmt.Errorf("failed marshaling fields for %s: %w", fs.path, err) - } - if b == nil { - // No fields, file removed, all done - return true, nil - } - if _, err := fd.Write(b); err != nil { - return true, fmt.Errorf("failed saving fields to %s: %w", path, err) - } - return false, nil - }() - if err != nil { - return err - } else if isEmpty { - // remove empty file - if err = os.RemoveAll(fs.path); err != nil { - return fmt.Errorf("cannot remove %s: %w", fs.path, err) - } else { - return nil - } - } - - return fs.renameFileNoLock(path) -} - -// appendToChangesFile: Write a change file for fields.idx -// Only called in one Go proc, so does not need locking. -func (fscm *measurementFieldSetChangeMgr) appendToChangesFile(first writeRequest) { - var err error = nil - // Put the errorChannel on which we blocked into a slice to allow more invocations - // to share the return code from the file write - errorChannels := []errorChannel{first.errorReturn} - changes := []FieldChanges{first.changes} - // On return, send the error to every go proc that send changes - defer func() { - for _, c := range errorChannels { - c <- err - close(c) - } - }() - log, end := logger.NewOperation(context.TODO(), fscm.logger, "saving field index changes", "MeasurementFieldSet") - defer end() - // Do some blocking IO operations before marshalling the changes, - // to allow other changes to be queued up and be captured in one - // write operation, in case we are under heavy field creation load - fscm.mu.Lock() - defer fscm.mu.Unlock() - fd, err := os.OpenFile(fscm.changeFilePath, os.O_CREATE|os.O_APPEND|os.O_SYNC|os.O_WRONLY, 0666) - if err != nil { - err = fmt.Errorf("opening %s: %w", fscm.changeFilePath, err) - log.Error("failed", zap.Error(err)) - return - } - - // ensure file closed - defer errors2.Capture(&err, func() error { - if e := fd.Close(); e != nil { - e = fmt.Errorf("closing %s: %w", fd.Name(), e) - log.Error("failed", zap.Error(e)) - return e - } else { - return nil - } - })() - - var fi os.FileInfo - if fi, err = fd.Stat(); err != nil { - err = fmt.Errorf("unable to get size of %s: %w", fd.Name(), err) - log.Error("failed", zap.Error(err)) - return - } else if fi.Size() > fscm.changeFileSize { - // If we had a partial write last time, truncate the file to remove it. - if err = fd.Truncate(fscm.changeFileSize); err != nil { - err = fmt.Errorf("cannot truncate %s to last known good size of %d after incomplete write: %w", fd.Name(), fscm.changeFileSize, err) - log.Error("failed", zap.Error(err)) - return - } - } - - // Read all the pending field and measurement write or delete - // requests - for { - select { - case wr := <-fscm.writeRequests: - changes = append(changes, wr.changes) - errorChannels = append(errorChannels, wr.errorReturn) - continue - default: - } - break - } - // marshal the slice of slices of field changes in size-prefixed protobuf - var b []byte - b, err = marshalFieldChanges(changes...) - if err != nil { - err = fmt.Errorf("error marshaling changes for %s: %w", fd.Name(), err) - log.Error("failed", zap.Error(err)) - return - } - - if _, err = fd.Write(b); err != nil { - err = fmt.Errorf("failed writing to %s: %w", fd.Name(), err) - log.Error("failed", zap.Error(err)) - return - } else if fi, err = fd.Stat(); err != nil { - err = fmt.Errorf("unable to get final size of %s after appendation: %w", fd.Name(), err) - log.Error("failed", zap.Error(err)) - return - } else { - fscm.changeFileSize = fi.Size() - } -} - -func readSizePlusBuffer(r io.Reader, b []byte) ([]byte, error) { - var numBuf [bytesInInt64]byte - - if _, err := r.Read(numBuf[:]); err != nil { - return nil, err - } - size := int(binary.LittleEndian.Uint64(numBuf[:])) - if cap(b) < size { - b = make([]byte, size) - } - _, err := io.ReadAtLeast(r, b, size) - if err != nil { - return nil, err - } - return b, nil -} - -func (fs *MeasurementFieldSet) renameFileNoLock(path string) error { - if err := file.RenameFile(path, fs.path); err != nil { - return fmt.Errorf("cannot rename %s to %s: %w", path, fs.path, err) - } - - dir := filepath.Dir(fs.path) - if err := file.SyncDir(dir); err != nil { - return fmt.Errorf("cannot sync directory %s: %w", dir, err) - } - - return nil -} - -// marshalMeasurementFieldSetNoLock: remove the fields.idx file if no fields -// otherwise, copy the in-memory version into a protobuf to write to -// disk -func (fs *MeasurementFieldSet) marshalMeasurementFieldSetNoLock() (marshalled []byte, err error) { - if len(fs.fields) == 0 { - // If no fields left, remove the fields index file - return nil, nil - } - - pb := internal.MeasurementFieldSet{ - Measurements: make([]*internal.MeasurementFields, 0, len(fs.fields)), - } - - for name, mf := range fs.fields { - imf := &internal.MeasurementFields{ - Name: []byte(name), - Fields: make([]*internal.Field, 0, mf.FieldN()), - } - - mf.ForEachField(func(field string, typ influxql.DataType) bool { - imf.Fields = append(imf.Fields, &internal.Field{Name: []byte(field), Type: int32(typ)}) - return true - }) - - pb.Measurements = append(pb.Measurements, imf) - } - b, err := proto.Marshal(&pb) - if err != nil { - return nil, err - } else { - return b, nil - } -} - -func marshalFieldChanges(changeSet ...FieldChanges) ([]byte, error) { - fcs := internal.FieldChangeSet{ - Changes: nil, - } - for _, fc := range changeSet { - for _, f := range fc { - mfc := &internal.MeasurementFieldChange{ - Measurement: f.Measurement, - Change: internal.ChangeType(f.ChangeType), - } - if f.Field != nil { - mfc.Field = &internal.Field{ - Name: []byte(f.Field.Name), - Type: int32(f.Field.Type), - } - fcs.Changes = append(fcs.Changes, mfc) - } - } - } - mo := proto.MarshalOptions{} - var numBuf [bytesInInt64]byte - - b, err := mo.MarshalAppend(numBuf[:], &fcs) - binary.LittleEndian.PutUint64(b[0:bytesInInt64], uint64(len(b)-bytesInInt64)) - - if err != nil { - fields := make([]string, 0, len(fcs.Changes)) - for _, fc := range changeSet { - for _, f := range fc { - fields = append(fields, fmt.Sprintf("%q.%q", f.Measurement, f.Field.Name)) - } - } - return nil, fmt.Errorf("failed marshaling new fields - %s: %w", strings.Join(fields, ", "), err) - } - return b, nil -} - -func (fs *MeasurementFieldSet) load() (rErr error) { - err := func() error { - fs.mu.Lock() - defer fs.mu.Unlock() - - pb, err := fs.loadParseFieldIndexPB() - if err != nil { - return err - } - fs.fields = make(map[string]*MeasurementFields, len(pb.GetMeasurements())) - for _, measurement := range pb.GetMeasurements() { - fields := make(map[string]*Field, len(measurement.GetFields())) - for _, field := range measurement.GetFields() { - fields[string(field.GetName())] = &Field{Name: string(field.GetName()), Type: influxql.DataType(field.GetType())} - } - set := &MeasurementFields{} - set.fields.Store(fields) - fs.fields[string(measurement.GetName())] = set - } - return nil - }() - - if err != nil { - return fmt.Errorf("failed loading field indices: %w", err) - } - return fs.ApplyChanges() -} - -func (fs *MeasurementFieldSet) loadParseFieldIndexPB() (pb *internal.MeasurementFieldSet, rErr error) { - pb = &internal.MeasurementFieldSet{} - - fd, err := os.Open(fs.path) - if os.IsNotExist(err) { - return pb, nil - } else if err != nil { - err = fmt.Errorf("failed opening %s: %w", fs.path, err) - return nil, err - } - - defer errors2.Capture(&rErr, func() error { - if e := fd.Close(); e != nil { - return fmt.Errorf("failed closing %s: %w", fd.Name(), e) - } else { - return nil - } - })() - - var magic [4]byte - if _, err := fd.Read(magic[:]); err != nil { - err = fmt.Errorf("failed reading %s: %w", fs.path, err) - return nil, err - } - - if !bytes.Equal(magic[:], fieldsIndexMagicNumber) { - return nil, fmt.Errorf("%q: %w", fs.path, ErrUnknownFieldsFormat) - } - - b, err := io.ReadAll(fd) - if err != nil { - err = fmt.Errorf("failed reading %s: %w", fs.path, err) - return nil, err - } - if err = proto.Unmarshal(b, pb); err != nil { - err = fmt.Errorf("failed unmarshaling %s: %w", fs.path, err) - return nil, err - } - return pb, err -} - -func (fscm *measurementFieldSetChangeMgr) loadAllFieldChanges(log *zap.Logger) (changes []FieldChanges, rErr error) { - var fcs FieldChanges - - fscm.mu.Lock() - defer fscm.mu.Unlock() - fd, err := os.Open(fscm.changeFilePath) - if os.IsNotExist(err) { - return nil, nil - } - if err != nil { - err = fmt.Errorf("failed opening %s: %w", fscm.changeFilePath, err) - log.Error("field index file of changes", zap.Error(err)) - return nil, err - } - defer errors2.Capture(&rErr, func() error { - if e := fd.Close(); e != nil { - return fmt.Errorf("failed closing %s: %w", fd.Name(), e) - } else { - return nil - } - })() - for fcs, err = fscm.loadFieldChangeSet(fd); err == nil; fcs, err = fscm.loadFieldChangeSet(fd) { - changes = append(changes, fcs) - } - if errors.Is(err, io.EOF) { - return changes, nil - } else if errors.Is(err, io.ErrUnexpectedEOF) { - log.Warn("last entry was an incomplete write", zap.Error(err)) - return changes, nil - } else { - log.Error("field index file of changes", zap.Error(err)) - return nil, err - } -} - -func (fscm *measurementFieldSetChangeMgr) loadFieldChangeSet(r io.Reader) (FieldChanges, error) { - var pb internal.FieldChangeSet - - b, err := readSizePlusBuffer(r, nil) - if err != nil { - return nil, fmt.Errorf("failed reading %s: %w", fscm.changeFilePath, err) - } - if err := proto.Unmarshal(b, &pb); err != nil { - return nil, fmt.Errorf("failed unmarshalling %s: %w", fscm.changeFilePath, err) - } - - fcs := make([]*FieldChange, 0, len(pb.Changes)) - - for _, fc := range pb.Changes { - fcs = append(fcs, &FieldChange{ - FieldCreate: FieldCreate{ - Measurement: fc.Measurement, - Field: &Field{ - ID: 0, - Name: string(fc.Field.Name), - Type: influxql.DataType(fc.Field.Type), - }, - }, - ChangeType: ChangeType(fc.Change), - }) - } - return fcs, nil -} - -func (fs *MeasurementFieldSet) ApplyChanges() error { - log, end := logger.NewOperation(context.TODO(), fs.changeMgr.logger, "loading changes", "field indices") - defer end() - changes, err := fs.changeMgr.loadAllFieldChanges(log) - if err != nil { - return err - } - if len(changes) <= 0 { - return os.RemoveAll(fs.changeMgr.changeFilePath) - } - - for _, fcs := range changes { - for _, fc := range fcs { - if fc.ChangeType == DeleteMeasurement { - fs.Delete(string(fc.Measurement)) - } else { - mf := fs.CreateFieldsIfNotExists(fc.Measurement) - if err := mf.CreateFieldIfNotExists([]byte(fc.Field.Name), fc.Field.Type); err != nil { - err = fmt.Errorf("failed creating %q.%q: %w", fc.Measurement, fc.Field.Name, err) - log.Error("field creation", zap.Error(err)) - return err - } - } - } - } - return fs.WriteToFile() -} - -// Field represents a series field. All of the fields must be hashable. -type Field struct { - ID uint8 `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Type influxql.DataType `json:"type,omitempty"` -} - -type FieldChange struct { - FieldCreate - ChangeType ChangeType -} - -type ChangeType int - -const ( - AddMeasurementField = ChangeType(internal.ChangeType_AddMeasurementField) - DeleteMeasurement = ChangeType(internal.ChangeType_DeleteMeasurement) -) - -// NewFieldKeysIterator returns an iterator that can be iterated over to -// retrieve field keys. -func NewFieldKeysIterator(sh *Shard, opt query.IteratorOptions) (query.Iterator, error) { - itr := &fieldKeysIterator{shard: sh} - - index, err := sh.Index() - if err != nil { - return nil, err - } - - // Retrieve measurements from shard. Filter if condition specified. - // - // FGA is currently not supported when retrieving field keys. - indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: sh.sfile} - names, err := indexSet.MeasurementNamesByExpr(query.OpenAuthorizer, opt.Condition) - if err != nil { - return nil, err - } - itr.names = names - - return itr, nil -} - -// fieldKeysIterator iterates over measurements and gets field keys from each measurement. -type fieldKeysIterator struct { - shard *Shard - names [][]byte // remaining measurement names - buf struct { - name []byte // current measurement name - fields []Field // current measurement's fields - } -} - -// Stats returns stats about the points processed. -func (itr *fieldKeysIterator) Stats() query.IteratorStats { return query.IteratorStats{} } - -// Close closes the iterator. -func (itr *fieldKeysIterator) Close() error { return nil } - -// Next emits the next tag key name. -func (itr *fieldKeysIterator) Next() (*query.FloatPoint, error) { - for { - // If there are no more keys then move to the next measurements. - if len(itr.buf.fields) == 0 { - if len(itr.names) == 0 { - return nil, nil - } - - itr.buf.name = itr.names[0] - mf := itr.shard.MeasurementFields(itr.buf.name) - if mf != nil { - fset := mf.FieldSet() - if len(fset) == 0 { - itr.names = itr.names[1:] - continue - } - - keys := make([]string, 0, len(fset)) - for k := range fset { - keys = append(keys, k) - } - sort.Strings(keys) - - itr.buf.fields = make([]Field, len(keys)) - for i, name := range keys { - itr.buf.fields[i] = Field{Name: name, Type: fset[name]} - } - } - itr.names = itr.names[1:] - continue - } - - // Return next key. - field := itr.buf.fields[0] - p := &query.FloatPoint{ - Name: string(itr.buf.name), - Aux: []interface{}{field.Name, field.Type.String()}, - } - itr.buf.fields = itr.buf.fields[1:] - - return p, nil - } -} - -// NewTagKeysIterator returns a new instance of TagKeysIterator. -func NewTagKeysIterator(sh *Shard, opt query.IteratorOptions) (query.Iterator, error) { - fn := func(name []byte) ([][]byte, error) { - index, err := sh.Index() - if err != nil { - return nil, err - } - - indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: sh.sfile} - var keys [][]byte - if err := indexSet.ForEachMeasurementTagKey(name, func(key []byte) error { - keys = append(keys, key) - return nil - }); err != nil { - return nil, err - } - return keys, nil - } - return newMeasurementKeysIterator(sh, fn, opt) -} - -// measurementKeyFunc is the function called by measurementKeysIterator. -type measurementKeyFunc func(name []byte) ([][]byte, error) - -func newMeasurementKeysIterator(sh *Shard, fn measurementKeyFunc, opt query.IteratorOptions) (*measurementKeysIterator, error) { - index, err := sh.Index() - if err != nil { - return nil, err - } - - indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: sh.sfile} - itr := &measurementKeysIterator{fn: fn} - names, err := indexSet.MeasurementNamesByExpr(opt.Authorizer, opt.Condition) - if err != nil { - return nil, err - } - itr.names = names - - return itr, nil -} - -// measurementKeysIterator iterates over measurements and gets keys from each measurement. -type measurementKeysIterator struct { - names [][]byte // remaining measurement names - buf struct { - name []byte // current measurement name - keys [][]byte // current measurement's keys - } - fn measurementKeyFunc -} - -// Stats returns stats about the points processed. -func (itr *measurementKeysIterator) Stats() query.IteratorStats { return query.IteratorStats{} } - -// Close closes the iterator. -func (itr *measurementKeysIterator) Close() error { return nil } - -// Next emits the next tag key name. -func (itr *measurementKeysIterator) Next() (*query.FloatPoint, error) { - for { - // If there are no more keys then move to the next measurements. - if len(itr.buf.keys) == 0 { - if len(itr.names) == 0 { - return nil, nil - } - - itr.buf.name, itr.names = itr.names[0], itr.names[1:] - - keys, err := itr.fn(itr.buf.name) - if err != nil { - return nil, err - } - itr.buf.keys = keys - continue - } - - // Return next key. - p := &query.FloatPoint{ - Name: string(itr.buf.name), - Aux: []interface{}{string(itr.buf.keys[0])}, - } - itr.buf.keys = itr.buf.keys[1:] - - return p, nil - } -} - -// LimitError represents an error caused by a configurable limit. -type LimitError struct { - Reason string -} - -func (e *LimitError) Error() string { return e.Reason } diff --git a/tsdb/shard_internal_test.go b/tsdb/shard_internal_test.go deleted file mode 100644 index d0240267246..00000000000 --- a/tsdb/shard_internal_test.go +++ /dev/null @@ -1,412 +0,0 @@ -package tsdb - -import ( - "context" - "fmt" - "path/filepath" - "regexp" - "sort" - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxql" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func TestShard_ErrorPrinting(t *testing.T) { - - tests := []struct { - nSeq int - raw string - }{ - {1, string([]byte{'b', 'e', 'n', 't', 'e', 's', 't', '\t', '\n'})}, - {1, string([]byte{'b', 'e', 'n', 't', 'e', 's', 0, 0, 0xFE, 0, 0xFE, 't'})}, - {2, string([]byte{0, 0, 0, 0, 0xFE, '\t', '\n', '\t', 'b', 'e', 'n', 't', 'e', 's', 't', 0, 0, 0, 0, 0xFE, '\t', '\n', '\t', '\t', '\t'})}, - } - - for i := range tests { - f := makePrintable(tests[i].raw) - require.True(t, models.ValidToken([]byte(f))) - c := 0 - nSeq := 0 - for _, r := range f { - if r == unPrintReplRune { - c++ - if c == 1 { - nSeq++ - } - require.LessOrEqual(t, c, unPrintMaxReplRune, "too many repeated %c", unPrintReplRune) - } else { - c = 0 - } - } - require.Equalf(t, tests[i].nSeq, nSeq, "wrong number of elided sequences of replacement characters") - } -} - -func TestShard_MapType(t *testing.T) { - var sh *TempShard - - setup := func(index string) { - sh = NewTempShard(t, index) - - if err := sh.Open(context.Background()); err != nil { - t.Fatal(err) - } - - sh.MustWritePointsString(` -cpu,host=serverA,region=uswest value=100 0 -cpu,host=serverA,region=uswest value=50,val2=5 10 -cpu,host=serverB,region=uswest value=25 0 -mem,host=serverA value=25i 0 -mem,host=serverB value=50i,val3=t 10 -_reserved,region=uswest value="foo" 0 -`) - } - - for _, index := range RegisteredIndexes() { - setup(index) - for _, tt := range []struct { - measurement string - field string - typ influxql.DataType - }{ - { - measurement: "cpu", - field: "value", - typ: influxql.Float, - }, - { - measurement: "cpu", - field: "host", - typ: influxql.Tag, - }, - { - measurement: "cpu", - field: "region", - typ: influxql.Tag, - }, - { - measurement: "cpu", - field: "val2", - typ: influxql.Float, - }, - { - measurement: "cpu", - field: "unknown", - typ: influxql.Unknown, - }, - { - measurement: "mem", - field: "value", - typ: influxql.Integer, - }, - { - measurement: "mem", - field: "val3", - typ: influxql.Boolean, - }, - { - measurement: "mem", - field: "host", - typ: influxql.Tag, - }, - { - measurement: "unknown", - field: "unknown", - typ: influxql.Unknown, - }, - { - measurement: "_fieldKeys", - field: "fieldKey", - typ: influxql.String, - }, - { - measurement: "_fieldKeys", - field: "fieldType", - typ: influxql.String, - }, - { - measurement: "_fieldKeys", - field: "unknown", - typ: influxql.Unknown, - }, - { - measurement: "_series", - field: "key", - typ: influxql.String, - }, - { - measurement: "_series", - field: "unknown", - typ: influxql.Unknown, - }, - { - measurement: "_tagKeys", - field: "tagKey", - typ: influxql.String, - }, - { - measurement: "_tagKeys", - field: "unknown", - typ: influxql.Unknown, - }, - { - measurement: "_reserved", - field: "value", - typ: influxql.String, - }, - { - measurement: "_reserved", - field: "region", - typ: influxql.Tag, - }, - } { - name := fmt.Sprintf("%s_%s_%s", index, tt.measurement, tt.field) - t.Run(name, func(t *testing.T) { - typ, err := sh.mapType(tt.measurement, tt.field) - if err != nil { - t.Fatal(err) - } - - if have, want := typ, tt.typ; have != want { - t.Errorf("unexpected data type: have=%#v want=%#v", have, want) - } - }) - } - sh.Close() - } -} - -func TestShard_MeasurementsByRegex(t *testing.T) { - var sh *TempShard - setup := func(index string) { - sh = NewTempShard(t, index) - if err := sh.Open(context.Background()); err != nil { - t.Fatal(err) - } - - sh.MustWritePointsString(` -cpu,host=serverA,region=uswest value=100 0 -cpu,host=serverA,region=uswest value=50,val2=5 10 -cpu,host=serverB,region=uswest value=25 0 -mem,host=serverA value=25i 0 -mem,host=serverB value=50i,val3=t 10 -`) - } - - for _, index := range RegisteredIndexes() { - setup(index) - for _, tt := range []struct { - regex string - measurements []string - }{ - {regex: `cpu`, measurements: []string{"cpu"}}, - {regex: `mem`, measurements: []string{"mem"}}, - {regex: `cpu|mem`, measurements: []string{"cpu", "mem"}}, - {regex: `gpu`, measurements: []string{}}, - {regex: `pu`, measurements: []string{"cpu"}}, - {regex: `p|m`, measurements: []string{"cpu", "mem"}}, - } { - t.Run(index+"_"+tt.regex, func(t *testing.T) { - re := regexp.MustCompile(tt.regex) - measurements, err := sh.MeasurementNamesByRegex(re) - if err != nil { - t.Fatal(err) - } - - mstrings := make([]string, 0, len(measurements)) - for _, name := range measurements { - mstrings = append(mstrings, string(name)) - } - sort.Strings(mstrings) - if diff := cmp.Diff(tt.measurements, mstrings, cmpopts.EquateEmpty()); diff != "" { - t.Errorf("unexpected measurements:\n%s", diff) - } - }) - } - sh.Close() - } -} - -func TestShard_MeasurementOptimization(t *testing.T) { - t.Parallel() - - cases := []struct { - expr influxql.Expr - name string - ok bool - names [][]byte - }{ - { - expr: influxql.MustParseExpr(`_name = 'm0'`), - name: "single measurement", - ok: true, - names: [][]byte{[]byte("m0")}, - }, - { - expr: influxql.MustParseExpr(`_something = 'f' AND _name = 'm0'`), - name: "single measurement with AND", - ok: true, - names: [][]byte{[]byte("m0")}, - }, - { - expr: influxql.MustParseExpr(`_something = 'f' AND (a =~ /x0/ AND _name = 'm0')`), - name: "single measurement with multiple AND", - ok: true, - names: [][]byte{[]byte("m0")}, - }, - { - expr: influxql.MustParseExpr(`_name = 'm0' OR _name = 'm1' OR _name = 'm2'`), - name: "multiple measurements alone", - ok: true, - names: [][]byte{[]byte("m0"), []byte("m1"), []byte("m2")}, - }, - { - expr: influxql.MustParseExpr(`(_name = 'm0' OR _name = 'm1' OR _name = 'm2') AND (_field = 'foo' OR _field = 'bar' OR _field = 'qux')`), - name: "multiple measurements combined", - ok: true, - names: [][]byte{[]byte("m0"), []byte("m1"), []byte("m2")}, - }, - { - expr: influxql.MustParseExpr(`(_name = 'm0' OR (_name = 'm1' OR _name = 'm2')) AND tag1 != 'foo'`), - name: "parens in expression", - ok: true, - names: [][]byte{[]byte("m0"), []byte("m1"), []byte("m2")}, - }, - { - expr: influxql.MustParseExpr(`(tag1 != 'foo' OR tag2 = 'bar') AND (_name = 'm0' OR _name = 'm1' OR _name = 'm2') AND (_field = 'val1' OR _field = 'val2')`), - name: "multiple AND", - ok: true, - names: [][]byte{[]byte("m0"), []byte("m1"), []byte("m2")}, - }, - { - expr: influxql.MustParseExpr(`(_name = 'm0' OR _name = 'm1' OR _name = 'm2') AND (tag1 != 'foo' OR _name = 'm1')`), - name: "measurements on in multiple groups, only one valid group", - ok: true, - names: [][]byte{[]byte("m0"), []byte("m1"), []byte("m2")}, - }, - { - expr: influxql.MustParseExpr(`_name = 'm0' OR tag1 != 'foo'`), - name: "single measurement with OR", - ok: false, - names: nil, - }, - { - expr: influxql.MustParseExpr(`_name = 'm0' OR true`), - name: "measurement with OR boolean literal", - ok: false, - names: nil, - }, - { - expr: influxql.MustParseExpr(`_name != 'm0' AND tag1 != 'foo'`), - name: "single measurement with non-equal", - ok: false, - names: nil, - }, - { - expr: influxql.MustParseExpr(`(_name = 'm0' OR _name != 'm1' OR _name = 'm2') AND (_field = 'foo' OR _field = 'bar' OR _field = 'qux')`), - name: "multiple measurements with non-equal", - ok: false, - names: nil, - }, - { - expr: influxql.MustParseExpr(`tag1 = 'foo' AND tag2 = 'bar'`), - name: "no measurements - multiple tags", - ok: false, - names: nil, - }, - { - expr: influxql.MustParseExpr(`_field = 'foo'`), - name: "no measurements - single field", - ok: false, - names: nil, - }, - { - expr: influxql.MustParseExpr(`(_name = 'm0' OR _name = 'm1' AND _name = 'm2') AND tag1 != 'foo'`), - name: "measurements with AND", - ok: false, - names: nil, - }, - { - expr: influxql.MustParseExpr(`(_name = 'm0' OR _name = 'm1' OR _name = 'm2') OR (tag1 != 'foo' OR _name = 'm1')`), - name: "top level is not AND", - ok: false, - names: nil, - }, - } - - for _, tc := range cases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - names, ok := measurementOptimization(tc.expr, measurementKey) - require.Equal(t, tc.names, names) - require.Equal(t, tc.ok, ok) - }) - } -} - -// TempShard represents a test wrapper for Shard that uses temporary -// filesystem paths. -type TempShard struct { - *Shard - path string - sfile *SeriesFile -} - -// NewTempShard returns a new instance of TempShard with temp paths. -func NewTempShard(tb testing.TB, index string) *TempShard { - tb.Helper() - - // Create temporary path for data and WAL. - dir := tb.TempDir() - - // Create series file. - sfile := NewSeriesFile(filepath.Join(dir, "db0", SeriesFileDirectory)) - sfile.Logger = zaptest.NewLogger(tb) - if err := sfile.Open(); err != nil { - panic(err) - } - - // Build engine options. - opt := NewEngineOptions() - opt.IndexVersion = index - opt.Config.WALDir = filepath.Join(dir, "wal") - - return &TempShard{ - Shard: NewShard(0, - filepath.Join(dir, "data", "db0", "rp0", "1"), - filepath.Join(dir, "wal", "db0", "rp0", "1"), - sfile, - opt, - ), - sfile: sfile, - path: dir, - } -} - -// Close closes the shard and removes all underlying data. -func (sh *TempShard) Close() error { - sh.sfile.Close() - return sh.Shard.Close() -} - -// MustWritePointsString parses the line protocol (with second precision) and -// inserts the resulting points into the shard. Panic on error. -func (sh *TempShard) MustWritePointsString(s string) { - a, err := models.ParsePointsWithPrecision([]byte(strings.TrimSpace(s)), time.Time{}, "s") - if err != nil { - panic(err) - } - - if err := sh.WritePoints(context.Background(), a); err != nil { - panic(err) - } -} diff --git a/tsdb/shard_test.go b/tsdb/shard_test.go deleted file mode 100644 index f799cafee5f..00000000000 --- a/tsdb/shard_test.go +++ /dev/null @@ -1,2493 +0,0 @@ -package tsdb_test - -import ( - "bytes" - "context" - "errors" - "fmt" - "math" - "os" - "path/filepath" - "reflect" - "regexp" - "runtime" - "sort" - "strings" - "sync" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/internal" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/deep" - "github.com/influxdata/influxdb/v2/pkg/testing/assert" - "github.com/influxdata/influxdb/v2/tsdb" - _ "github.com/influxdata/influxdb/v2/tsdb/engine" - _ "github.com/influxdata/influxdb/v2/tsdb/index" - "github.com/influxdata/influxql" - assert2 "github.com/stretchr/testify/assert" -) - -func TestShardWriteAndIndex(t *testing.T) { - tmpDir := t.TempDir() - tmpShard := filepath.Join(tmpDir, "shard") - tmpWal := filepath.Join(tmpDir, "wal") - - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - opts := tsdb.NewEngineOptions() - opts.Config.WALDir = filepath.Join(tmpDir, "wal") - - sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) - - // Calling WritePoints when the engine is not open will return - // ErrEngineClosed. - if got, exp := sh.WritePoints(context.Background(), nil), tsdb.ErrEngineClosed; got != exp { - t.Fatalf("got %v, expected %v", got, exp) - } - - if err := sh.Open(context.Background()); err != nil { - t.Fatalf("error opening shard: %s", err.Error()) - } - - pt := models.MustNewPoint( - "cpu", - models.Tags{{Key: []byte("host"), Value: []byte("server")}}, - map[string]interface{}{"value": 1.0}, - time.Unix(1, 2), - ) - - err := sh.WritePoints(context.Background(), []models.Point{pt}) - if err != nil { - t.Fatalf(err.Error()) - } - - pt.SetTime(time.Unix(2, 3)) - err = sh.WritePoints(context.Background(), []models.Point{pt}) - if err != nil { - t.Fatalf(err.Error()) - } - - validateIndex := func() { - cnt := sh.SeriesN() - if got, exp := cnt, int64(1); got != exp { - t.Fatalf("got %v series, exp %v series in index", got, exp) - } - } - - validateIndex() - - // ensure the index gets loaded after closing and opening the shard - sh.Close() - - sh = tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) - if err := sh.Open(context.Background()); err != nil { - t.Fatalf("error opening shard: %s", err.Error()) - } - - validateIndex() - - // and ensure that we can still write data - pt.SetTime(time.Unix(2, 6)) - err = sh.WritePoints(context.Background(), []models.Point{pt}) - if err != nil { - t.Fatalf(err.Error()) - } - - sh.Close() -} - -func TestShardRebuildIndex(t *testing.T) { - tmpDir := t.TempDir() - tmpShard := filepath.Join(tmpDir, "shard") - tmpWal := filepath.Join(tmpDir, "wal") - - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - opts := tsdb.NewEngineOptions() - opts.Config.WALDir = filepath.Join(tmpDir, "wal") - - sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) - if err := sh.Open(context.Background()); err != nil { - t.Fatalf("error opening shard: %s", err.Error()) - } - - pt := models.MustNewPoint( - "cpu", - models.Tags{{Key: []byte("host"), Value: []byte("server")}}, - map[string]interface{}{"value": 1.0}, - time.Unix(1, 2), - ) - - err := sh.WritePoints(context.Background(), []models.Point{pt}) - if err != nil { - t.Fatalf(err.Error()) - } - - pt.SetTime(time.Unix(2, 3)) - err = sh.WritePoints(context.Background(), []models.Point{pt}) - if err != nil { - t.Fatalf(err.Error()) - } - - indexPath := filepath.Join(tmpShard, "index") - validateIndex := func() { - cnt := sh.SeriesN() - if got, exp := cnt, int64(1); got != exp { - t.Fatalf("got %v series, exp %v series in index", got, exp) - } - fi, err := os.Stat(indexPath) - - // Make sure index data is being persisted to disk. - if os.IsNotExist(err) { - t.Fatalf("index path %q does not exist", indexPath) - } - if !fi.IsDir() { - t.Fatalf("index path %q is not a directory", indexPath) - } - } - - validateIndex() - - // ensure the index gets rebuilt after its directory is deleted and - // the shard is reopened. - if err := sh.Close(); err != nil { - t.Fatalf(err.Error()) - } - if err := os.RemoveAll(indexPath); err != nil { - t.Fatalf(err.Error()) - } - - sh = tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) - if err := sh.Open(context.Background()); err != nil { - t.Fatalf("error opening shard: %s", err.Error()) - } - - validateIndex() - - // and ensure that we can still write data - pt.SetTime(time.Unix(2, 6)) - err = sh.WritePoints(context.Background(), []models.Point{pt}) - if err != nil { - t.Fatalf(err.Error()) - } - - sh.Close() -} - -func TestShard_Open_CorruptFieldsIndex(t *testing.T) { - tmpDir := t.TempDir() - tmpShard := filepath.Join(tmpDir, "shard") - tmpWal := filepath.Join(tmpDir, "wal") - - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - opts := tsdb.NewEngineOptions() - opts.Config.WALDir = filepath.Join(tmpDir, "wal") - - sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) - t.Cleanup(func() { sh.Close() }) - - // Calling WritePoints when the engine is not open will return - // ErrEngineClosed. - if got, exp := sh.WritePoints(context.Background(), nil), tsdb.ErrEngineClosed; got != exp { - t.Fatalf("got %v, expected %v", got, exp) - } - - if err := sh.Open(context.Background()); err != nil { - t.Fatalf("error opening shard: %s", err.Error()) - } - - pt := models.MustNewPoint( - "cpu", - models.Tags{{Key: []byte("host"), Value: []byte("server")}}, - map[string]interface{}{"value": 1.0}, - time.Unix(1, 2), - ) - - err := sh.WritePoints(context.Background(), []models.Point{pt}) - if err != nil { - t.Fatalf(err.Error()) - } - - if err := sh.Close(); err != nil { - t.Fatalf("close shard error: %v", err) - } - - path := filepath.Join(tmpShard, "fields.idx") - if err := os.Truncate(path, 6); err != nil { - t.Fatalf("truncate shard error: %v", err) - } - - if err := sh.Open(context.Background()); err != nil { - t.Fatalf("error opening shard: %s", err.Error()) - } -} - -func TestWriteTimeTag(t *testing.T) { - tmpDir := t.TempDir() - tmpShard := filepath.Join(tmpDir, "shard") - tmpWal := filepath.Join(tmpDir, "wal") - - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - opts := tsdb.NewEngineOptions() - opts.Config.WALDir = filepath.Join(tmpDir, "wal") - - sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) - if err := sh.Open(context.Background()); err != nil { - t.Fatalf("error opening shard: %s", err.Error()) - } - defer sh.Close() - - pt := models.MustNewPoint( - "cpu", - models.NewTags(map[string]string{}), - map[string]interface{}{"time": 1.0}, - time.Unix(1, 2), - ) - - if err := sh.WritePoints(context.Background(), []models.Point{pt}); err == nil { - t.Fatal("expected error: got nil") - } - - pt = models.MustNewPoint( - "cpu", - models.NewTags(map[string]string{}), - map[string]interface{}{"value": 1.0, "time": 1.0}, - time.Unix(1, 2), - ) - - if err := sh.WritePoints(context.Background(), []models.Point{pt}); err != nil { - t.Fatalf("unexpected error: %v", err) - } - - mf := sh.MeasurementFields([]byte("cpu")) - if mf == nil { - t.Fatal("expected cpu measurement fields") - } - - if got, exp := mf.FieldN(), 1; got != exp { - t.Fatalf("invalid number of field names: got=%v exp=%v", got, exp) - } -} - -func TestWriteTimeField(t *testing.T) { - tmpDir := t.TempDir() - tmpShard := filepath.Join(tmpDir, "shard") - tmpWal := filepath.Join(tmpDir, "wal") - - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - opts := tsdb.NewEngineOptions() - opts.Config.WALDir = filepath.Join(tmpDir, "wal") - - sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) - if err := sh.Open(context.Background()); err != nil { - t.Fatalf("error opening shard: %s", err.Error()) - } - defer sh.Close() - - pt := models.MustNewPoint( - "cpu", - models.NewTags(map[string]string{"time": "now"}), - map[string]interface{}{"value": 1.0}, - time.Unix(1, 2), - ) - - if err := sh.WritePoints(context.Background(), []models.Point{pt}); err == nil { - t.Fatal("expected error: got nil") - } - - key := models.MakeKey([]byte("cpu"), nil) - if ok, err := sh.MeasurementExists(key); ok && err == nil { - t.Fatal("unexpected series") - } -} - -func TestShardWriteAddNewField(t *testing.T) { - tmpDir := t.TempDir() - tmpShard := filepath.Join(tmpDir, "shard") - tmpWal := filepath.Join(tmpDir, "wal") - - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - opts := tsdb.NewEngineOptions() - opts.Config.WALDir = filepath.Join(tmpDir, "wal") - - sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) - if err := sh.Open(context.Background()); err != nil { - t.Fatalf("error opening shard: %s", err.Error()) - } - defer sh.Close() - - pt := models.MustNewPoint( - "cpu", - models.NewTags(map[string]string{"host": "server"}), - map[string]interface{}{"value": 1.0}, - time.Unix(1, 2), - ) - - err := sh.WritePoints(context.Background(), []models.Point{pt}) - if err != nil { - t.Fatalf(err.Error()) - } - - pt = models.MustNewPoint( - "cpu", - models.NewTags(map[string]string{"host": "server"}), - map[string]interface{}{"value": 1.0, "value2": 2.0}, - time.Unix(1, 2), - ) - - err = sh.WritePoints(context.Background(), []models.Point{pt}) - if err != nil { - t.Fatalf(err.Error()) - } - - if got, exp := sh.SeriesN(), int64(1); got != exp { - t.Fatalf("got %d series, exp %d series in index", got, exp) - } -} - -// Tests concurrently writing to the same shard with different field types which -// can trigger a panic when the shard is snapshotted to TSM files. -func TestShard_WritePoints_FieldConflictConcurrent(t *testing.T) { - if testing.Short() || runtime.GOOS == "windows" { - t.Skip("Skipping on short and windows") - } - tmpDir := t.TempDir() - tmpShard := filepath.Join(tmpDir, "shard") - tmpWal := filepath.Join(tmpDir, "wal") - - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - opts := tsdb.NewEngineOptions() - opts.Config.WALDir = filepath.Join(tmpDir, "wal") - opts.SeriesIDSets = seriesIDSets([]*tsdb.SeriesIDSet{}) - - sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) - if err := sh.Open(context.Background()); err != nil { - t.Fatalf("error opening shard: %s", err.Error()) - } - defer sh.Close() - - points := make([]models.Point, 0, 1000) - for i := 0; i < cap(points); i++ { - if i < 500 { - points = append(points, models.MustNewPoint( - "cpu", - models.NewTags(map[string]string{"host": "server"}), - map[string]interface{}{"value": 1.0}, - time.Unix(int64(i), 0), - )) - } else { - points = append(points, models.MustNewPoint( - "cpu", - models.NewTags(map[string]string{"host": "server"}), - map[string]interface{}{"value": int64(1)}, - time.Unix(int64(i), 0), - )) - } - } - - var wg sync.WaitGroup - wg.Add(2) - errC := make(chan error) - go func() { - defer wg.Done() - for i := 0; i < 50; i++ { - if err := sh.DeleteMeasurement(context.Background(), []byte("cpu")); err != nil { - errC <- err - return - } - - _ = sh.WritePoints(context.Background(), points[:500]) - if f, err := sh.CreateSnapshot(false); err == nil { - os.RemoveAll(f) - } - - } - }() - - go func() { - defer wg.Done() - for i := 0; i < 50; i++ { - if err := sh.DeleteMeasurement(context.Background(), []byte("cpu")); err != nil { - errC <- err - return - } - - _ = sh.WritePoints(context.Background(), points[500:]) - if f, err := sh.CreateSnapshot(false); err == nil { - os.RemoveAll(f) - } - } - }() - - go func() { - wg.Wait() - close(errC) - }() - - for err := range errC { - if err != nil { - t.Error(err) - } - } -} - -func TestShard_WritePoints_FieldConflictConcurrentQuery(t *testing.T) { - t.Skip("https://github.com/influxdata/influxdb/v2/issues/14267") - if testing.Short() { - t.Skip() - } - tmpDir := t.TempDir() - tmpShard := filepath.Join(tmpDir, "shard") - tmpWal := filepath.Join(tmpDir, "wal") - - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - opts := tsdb.NewEngineOptions() - opts.Config.WALDir = filepath.Join(tmpDir, "wal") - opts.SeriesIDSets = seriesIDSets([]*tsdb.SeriesIDSet{}) - - sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) - if err := sh.Open(context.Background()); err != nil { - t.Fatalf("error opening shard: %s", err.Error()) - } - defer sh.Close() - - // Spin up two goroutines that write points with different field types in reverse - // order concurrently. After writing them, query them back. - errC := make(chan error, 2) - go func() { - // Write 250 floats and then ints to the same field - points := make([]models.Point, 0, 500) - for i := 0; i < cap(points); i++ { - if i < 250 { - points = append(points, models.MustNewPoint( - "cpu", - models.NewTags(map[string]string{"host": "server"}), - map[string]interface{}{"value": 1.0}, - time.Unix(int64(i), 0), - )) - } else { - points = append(points, models.MustNewPoint( - "cpu", - models.NewTags(map[string]string{"host": "server"}), - map[string]interface{}{"value": int64(1)}, - time.Unix(int64(i), 0), - )) - } - } - - for i := 0; i < 500; i++ { - if err := sh.DeleteMeasurement(context.Background(), []byte("cpu")); err != nil { - errC <- err - } - - sh.WritePoints(context.Background(), points) - m := &influxql.Measurement{Name: "cpu"} - iter, err := sh.CreateIterator(context.Background(), m, query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Aux: []influxql.VarRef{{Val: "value"}}, - Dimensions: []string{}, - Ascending: true, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - }) - if err != nil { - errC <- err - } - - switch itr := iter.(type) { - case query.IntegerIterator: - p, err := itr.Next() - for p != nil && err == nil { - p, err = itr.Next() - } - iter.Close() - - case query.FloatIterator: - p, err := itr.Next() - for p != nil && err == nil { - p, err = itr.Next() - } - iter.Close() - - } - - } - errC <- nil - }() - - go func() { - // Write 250 ints and then floats to the same field - points := make([]models.Point, 0, 500) - for i := 0; i < cap(points); i++ { - if i < 250 { - points = append(points, models.MustNewPoint( - "cpu", - models.NewTags(map[string]string{"host": "server"}), - map[string]interface{}{"value": int64(1)}, - time.Unix(int64(i), 0), - )) - } else { - points = append(points, models.MustNewPoint( - "cpu", - models.NewTags(map[string]string{"host": "server"}), - map[string]interface{}{"value": 1.0}, - time.Unix(int64(i), 0), - )) - } - } - for i := 0; i < 500; i++ { - if err := sh.DeleteMeasurement(context.Background(), []byte("cpu")); err != nil { - errC <- err - } - - sh.WritePoints(context.Background(), points) - m := &influxql.Measurement{Name: "cpu"} - iter, err := sh.CreateIterator(context.Background(), m, query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Aux: []influxql.VarRef{{Val: "value"}}, - Dimensions: []string{}, - Ascending: true, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - }) - if err != nil { - errC <- err - } - - switch itr := iter.(type) { - case query.IntegerIterator: - p, err := itr.Next() - for p != nil && err == nil { - p, err = itr.Next() - } - iter.Close() - case query.FloatIterator: - p, err := itr.Next() - for p != nil && err == nil { - p, err = itr.Next() - } - iter.Close() - } - } - errC <- nil - }() - - // Check results - for i := 0; i < cap(errC); i++ { - if err := <-errC; err != nil { - t.Fatal(err) - } - } -} - -// Ensures that when a shard is closed, it removes any series meta-data -// from the index. -func TestShard_Close_RemoveIndex(t *testing.T) { - tmpDir := t.TempDir() - tmpShard := filepath.Join(tmpDir, "shard") - tmpWal := filepath.Join(tmpDir, "wal") - - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - opts := tsdb.NewEngineOptions() - opts.Config.WALDir = filepath.Join(tmpDir, "wal") - - sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) - if err := sh.Open(context.Background()); err != nil { - t.Fatalf("error opening shard: %s", err.Error()) - } - t.Cleanup(func() { sh.Close() }) - - pt := models.MustNewPoint( - "cpu", - models.NewTags(map[string]string{"host": "server"}), - map[string]interface{}{"value": 1.0}, - time.Unix(1, 2), - ) - - err := sh.WritePoints(context.Background(), []models.Point{pt}) - if err != nil { - t.Fatalf(err.Error()) - } - - if got, exp := sh.SeriesN(), int64(1); got != exp { - t.Fatalf("got %d series, exp %d series in index", got, exp) - } - - // ensure the index gets loaded after closing and opening the shard - sh.Close() - sh.Open(context.Background()) - - if got, exp := sh.SeriesN(), int64(1); got != exp { - t.Fatalf("got %d series, exp %d series in index", got, exp) - } -} - -// Ensure a shard can create iterators for its underlying data. -func TestShard_CreateIterator_Ascending(t *testing.T) { - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - sh := NewShard(t, index) - defer sh.Close() - - // Calling CreateIterator when the engine is not open will return - // ErrEngineClosed. - m := &influxql.Measurement{Name: "cpu"} - _, got := sh.CreateIterator(context.Background(), m, query.IteratorOptions{}) - if exp := tsdb.ErrEngineClosed; got != exp { - t.Fatalf("got %v, expected %v", got, exp) - } - - if err := sh.Open(context.Background()); err != nil { - t.Fatal(err) - } - - sh.MustWritePointsString(` -cpu,host=serverA,region=uswest value=100 0 -cpu,host=serverA,region=uswest value=50,val2=5 10 -cpu,host=serverB,region=uswest value=25 0 -`) - - // Create iterator. - var err error - m = &influxql.Measurement{Name: "cpu"} - itr, err := sh.CreateIterator(context.Background(), m, query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Aux: []influxql.VarRef{{Val: "val2"}}, - Dimensions: []string{"host"}, - Ascending: true, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - }) - if err != nil { - t.Fatal(err) - } - defer itr.Close() - fitr := itr.(query.FloatIterator) - - // Read values from iterator. - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(0): %s", err) - } else if !deep.Equal(p, &query.FloatPoint{ - Name: "cpu", - Tags: query.NewTags(map[string]string{"host": "serverA"}), - Time: time.Unix(0, 0).UnixNano(), - Value: 100, - Aux: []interface{}{(*float64)(nil)}, - }) { - t.Fatalf("unexpected point(0): %s", spew.Sdump(p)) - } - - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(1): %s", err) - } else if !deep.Equal(p, &query.FloatPoint{ - Name: "cpu", - Tags: query.NewTags(map[string]string{"host": "serverA"}), - Time: time.Unix(10, 0).UnixNano(), - Value: 50, - Aux: []interface{}{float64(5)}, - }) { - t.Fatalf("unexpected point(1): %s", spew.Sdump(p)) - } - - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(2): %s", err) - } else if !deep.Equal(p, &query.FloatPoint{ - Name: "cpu", - Tags: query.NewTags(map[string]string{"host": "serverB"}), - Time: time.Unix(0, 0).UnixNano(), - Value: 25, - Aux: []interface{}{(*float64)(nil)}, - }) { - t.Fatalf("unexpected point(2): %s", spew.Sdump(p)) - } - }) - } -} - -// Ensure a shard can create iterators for its underlying data. -func TestShard_CreateIterator_Descending(t *testing.T) { - test := func(t *testing.T, index string) { - sh := NewShard(t, index) - defer sh.Close() - - // Calling CreateIterator when the engine is not open will return - // ErrEngineClosed. - m := &influxql.Measurement{Name: "cpu"} - _, got := sh.CreateIterator(context.Background(), m, query.IteratorOptions{}) - if exp := tsdb.ErrEngineClosed; got != exp { - t.Fatalf("got %v, expected %v", got, exp) - } - - if err := sh.Open(context.Background()); err != nil { - t.Fatal(err) - } - - sh.MustWritePointsString(` -cpu,host=serverA,region=uswest value=100 0 -cpu,host=serverA,region=uswest value=50,val2=5 10 -cpu,host=serverB,region=uswest value=25 0 -`) - - // Create iterator. - var err error - m = &influxql.Measurement{Name: "cpu"} - itr, err := sh.CreateIterator(context.Background(), m, query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Aux: []influxql.VarRef{{Val: "val2"}}, - Dimensions: []string{"host"}, - Ascending: false, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - }) - if err != nil { - t.Fatal(err) - } - defer itr.Close() - fitr := itr.(query.FloatIterator) - - // Read values from iterator. - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(0): %s", err) - } else if !deep.Equal(p, &query.FloatPoint{ - Name: "cpu", - Tags: query.NewTags(map[string]string{"host": "serverB"}), - Time: time.Unix(0, 0).UnixNano(), - Value: 25, - Aux: []interface{}{(*float64)(nil)}, - }) { - t.Fatalf("unexpected point(0): %s", spew.Sdump(p)) - } - - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(1): %s", err) - } else if !deep.Equal(p, &query.FloatPoint{ - Name: "cpu", - Tags: query.NewTags(map[string]string{"host": "serverA"}), - Time: time.Unix(10, 0).UnixNano(), - Value: 50, - Aux: []interface{}{float64(5)}, - }) { - t.Fatalf("unexpected point(1): %s", spew.Sdump(p)) - } - - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(2): %s", err) - } else if !deep.Equal(p, &query.FloatPoint{ - Name: "cpu", - Tags: query.NewTags(map[string]string{"host": "serverA"}), - Time: time.Unix(0, 0).UnixNano(), - Value: 100, - Aux: []interface{}{(*float64)(nil)}, - }) { - t.Fatalf("unexpected point(2): %s", spew.Sdump(p)) - } - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(t, index) }) - } -} - -func TestShard_CreateIterator_Series_Auth(t *testing.T) { - type variant struct { - name string - m *influxql.Measurement - aux []influxql.VarRef - } - - examples := []variant{ - { - name: "use_index", - m: &influxql.Measurement{Name: "cpu"}, - aux: []influxql.VarRef{{Val: "_seriesKey", Type: influxql.String}}, - }, - { - name: "use_cursors", - m: &influxql.Measurement{Name: "cpu", SystemIterator: "_series"}, - aux: []influxql.VarRef{{Val: "key", Type: influxql.String}}, - }, - } - - test := func(t *testing.T, index string, v variant) error { - sh := MustNewOpenShard(t, index) - defer sh.Close() - sh.MustWritePointsString(` -cpu,host=serverA,region=uswest value=100 0 -cpu,host=serverA,region=uswest value=50,val2=5 10 -cpu,host=serverB,region=uswest value=25 0 -cpu,secret=foo value=100 0 -`) - - seriesAuthorizer := &internal.AuthorizerMock{ - AuthorizeSeriesReadFn: func(database string, measurement []byte, tags models.Tags) bool { - if database == "" || !bytes.Equal(measurement, []byte("cpu")) || tags.GetString("secret") != "" { - t.Logf("Rejecting series db=%s, m=%s, tags=%v", database, measurement, tags) - return false - } - return true - }, - } - - // Create iterator for case where we use cursors (e.g., where time - // included in a SHOW SERIES query). - itr, err := sh.CreateIterator(context.Background(), v.m, query.IteratorOptions{ - Aux: v.aux, - Ascending: true, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - Authorizer: seriesAuthorizer, - }) - if err != nil { - return err - } - - if itr == nil { - return fmt.Errorf("iterator is nil") - } - defer itr.Close() - - fitr := itr.(query.FloatIterator) - defer fitr.Close() - var expCount = 2 - var gotCount int - for { - f, err := fitr.Next() - if err != nil { - return err - } - - if f == nil { - break - } - - if got := f.Aux[0].(string); strings.Contains(got, "secret") { - return fmt.Errorf("got a series %q that should be filtered", got) - } - gotCount++ - } - - if gotCount != expCount { - return fmt.Errorf("got %d series, expected %d", gotCount, expCount) - } - - // Delete series cpu,host=serverA,region=uswest - // - // We can't call directly on the index as we need to ensure the series - // file is updated appropriately. - sitr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=serverA,region=uswest")}} - if err := sh.DeleteSeriesRange(context.Background(), sitr, math.MinInt64, math.MaxInt64); err != nil { - t.Fatalf("failed to drop series: %s", err.Error()) - } - - if itr, err = sh.CreateIterator(context.Background(), v.m, query.IteratorOptions{ - Aux: v.aux, - Ascending: true, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - Authorizer: seriesAuthorizer, - }); err != nil { - return err - } - - if itr == nil { - return fmt.Errorf("iterator is nil") - } - defer itr.Close() - - fitr = itr.(query.FloatIterator) - defer fitr.Close() - expCount = 1 - gotCount = 0 - for { - f, err := fitr.Next() - if err != nil { - return err - } - - if f == nil { - break - } - - if got := f.Aux[0].(string); strings.Contains(got, "secret") { - return fmt.Errorf("got a series %q that should be filtered", got) - } else if got := f.Aux[0].(string); strings.Contains(got, "serverA") { - return fmt.Errorf("got a series %q that should be filtered", got) - } - gotCount++ - } - - if gotCount != expCount { - return fmt.Errorf("got %d series, expected %d", gotCount, expCount) - } - - return nil - } - - for _, index := range tsdb.RegisteredIndexes() { - for _, example := range examples { - t.Run(index+"_"+example.name, func(t *testing.T) { - if err := test(t, index, example); err != nil { - t.Fatal(err) - } - }) - } - } -} - -func TestShard_Disabled_WriteQuery(t *testing.T) { - test := func(t *testing.T, index string) { - sh := NewShard(t, index) - if err := sh.Open(context.Background()); err != nil { - t.Fatal(err) - } - defer sh.Close() - - sh.SetEnabled(false) - - pt := models.MustNewPoint( - "cpu", - models.NewTags(map[string]string{"host": "server"}), - map[string]interface{}{"value": 1.0}, - time.Unix(1, 2), - ) - - err := sh.WritePoints(context.Background(), []models.Point{pt}) - if !errors.Is(err, tsdb.ErrShardDisabled) { - t.Fatalf("expected shard disabled error: %v", err.Error()) - } - m := &influxql.Measurement{Name: "cpu"} - _, err = sh.CreateIterator(context.Background(), m, query.IteratorOptions{}) - if exp := tsdb.ErrShardDisabled; !errors.Is(err, exp) { - t.Fatalf("got %v, expected %v", err, exp) - } - - sh.SetEnabled(true) - - err = sh.WritePoints(context.Background(), []models.Point{pt}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - m = &influxql.Measurement{Name: "cpu"} - itr, err := sh.CreateIterator(context.Background(), m, query.IteratorOptions{}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - assert2.NoError(t, itr.Close()) - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(t, index) }) - } -} - -func TestShard_Closed_Functions(t *testing.T) { - test := func(t *testing.T, index string) { - sh := NewShard(t, index) - if err := sh.Open(context.Background()); err != nil { - t.Fatal(err) - } - - pt := models.MustNewPoint( - "cpu", - models.NewTags(map[string]string{"host": "server"}), - map[string]interface{}{"value": 1.0}, - time.Unix(1, 2), - ) - - if err := sh.WritePoints(context.Background(), []models.Point{pt}); err != nil { - t.Fatalf("unexpected error: %v", err) - } - - sh.Close() - - // Should not panic. - if exp, got := 0, sh.TagKeyCardinality([]byte("cpu"), []byte("host")); exp != got { - t.Fatalf("got %d, expected %d", got, exp) - } - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(t, index) }) - } -} - -func TestShard_FieldDimensions(t *testing.T) { - var sh *Shard - - sfile := MustOpenSeriesFile(t) - defer sfile.Close() - - setup := func(index string) { - sh = NewShard(t, index) - - if err := sh.Open(context.Background()); err != nil { - t.Fatal(err) - } - - sh.MustWritePointsString(` -cpu,host=serverA,region=uswest value=100 0 -cpu,host=serverA,region=uswest value=50,val2=5 10 -cpu,host=serverB,region=uswest value=25 0 -mem,host=serverA value=25i 0 -mem,host=serverB value=50i,val3=t 10 -_reserved,region=uswest value="foo" 0 -`) - } - - for _, index := range tsdb.RegisteredIndexes() { - setup(index) - for _, tt := range []struct { - sources []string - f map[string]influxql.DataType - d map[string]struct{} - }{ - { - sources: []string{"cpu"}, - f: map[string]influxql.DataType{ - "value": influxql.Float, - "val2": influxql.Float, - }, - d: map[string]struct{}{ - "host": {}, - "region": {}, - }, - }, - { - sources: []string{"mem"}, - f: map[string]influxql.DataType{ - "value": influxql.Integer, - "val3": influxql.Boolean, - }, - d: map[string]struct{}{ - "host": {}, - }, - }, - { - sources: []string{"cpu", "mem"}, - f: map[string]influxql.DataType{ - "value": influxql.Float, - "val2": influxql.Float, - "val3": influxql.Boolean, - }, - d: map[string]struct{}{ - "host": {}, - "region": {}, - }, - }, - { - sources: []string{"_fieldKeys"}, - f: map[string]influxql.DataType{ - "fieldKey": influxql.String, - "fieldType": influxql.String, - }, - d: map[string]struct{}{}, - }, - { - sources: []string{"_series"}, - f: map[string]influxql.DataType{ - "key": influxql.String, - }, - d: map[string]struct{}{}, - }, - { - sources: []string{"_tagKeys"}, - f: map[string]influxql.DataType{ - "tagKey": influxql.String, - }, - d: map[string]struct{}{}, - }, - { - sources: []string{"_reserved"}, - f: map[string]influxql.DataType{ - "value": influxql.String, - }, - d: map[string]struct{}{ - "region": {}, - }, - }, - { - sources: []string{"unknown"}, - f: map[string]influxql.DataType{}, - d: map[string]struct{}{}, - }, - } { - name := fmt.Sprintf("%s_%s", strings.Join(tt.sources, ","), index) - t.Run(name, func(t *testing.T) { - f, d, err := sh.FieldDimensions(tt.sources) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if diff := cmp.Diff(tt.f, f, cmpopts.EquateEmpty()); diff != "" { - t.Errorf("unexpected fields:\n%s", diff) - } - if diff := cmp.Diff(tt.d, d, cmpopts.EquateEmpty()); diff != "" { - t.Errorf("unexpected dimensions:\n%s", diff) - } - }) - } - sh.Close() - } -} - -func TestShards_FieldKeysByMeasurement(t *testing.T) { - var shards Shards - - setup := func(index string) { - shards = NewShards(t, index, 2) - shards.MustOpen() - - shards[0].MustWritePointsString(`cpu,host=serverA,region=uswest a=2.2,b=33.3,value=100 0`) - - shards[1].MustWritePointsString(` - cpu,host=serverA,region=uswest a=2.2,c=12.3,value=100,z="hello" 0 - disk q=100 0 - `) - } - - for _, index := range tsdb.RegisteredIndexes() { - setup(index) - t.Run(fmt.Sprintf("%s_single_shard", index), func(t *testing.T) { - exp := []string{"a", "b", "value"} - if got := (tsdb.Shards{shards[0].Shard}).FieldKeysByMeasurement([]byte("cpu")); !reflect.DeepEqual(got, exp) { - shards.Close() - t.Fatalf("got keys %v, expected %v", got, exp) - } - }) - - t.Run(fmt.Sprintf("%s_multiple_shards", index), func(t *testing.T) { - exp := []string{"a", "b", "c", "value", "z"} - if got := shards.Shards().FieldKeysByMeasurement([]byte("cpu")); !reflect.DeepEqual(got, exp) { - shards.Close() - t.Fatalf("got keys %v, expected %v", got, exp) - } - }) - shards.Close() - } -} - -func TestShards_FieldDimensions(t *testing.T) { - var shard1, shard2 *Shard - - setup := func(index string) { - shard1 = NewShard(t, index) - if err := shard1.Open(context.Background()); err != nil { - t.Fatal(err) - } - - shard1.MustWritePointsString(` -cpu,host=serverA,region=uswest value=100 0 -cpu,host=serverA,region=uswest value=50,val2=5 10 -cpu,host=serverB,region=uswest value=25 0 -`) - - shard2 = NewShard(t, index) - if err := shard2.Open(context.Background()); err != nil { - t.Fatal(err) - } - - shard2.MustWritePointsString(` -mem,host=serverA value=25i 0 -mem,host=serverB value=50i,val3=t 10 -_reserved,region=uswest value="foo" 0 -`) - } - - for _, index := range tsdb.RegisteredIndexes() { - setup(index) - sh := tsdb.Shards([]*tsdb.Shard{shard1.Shard, shard2.Shard}) - for _, tt := range []struct { - sources []string - f map[string]influxql.DataType - d map[string]struct{} - }{ - { - sources: []string{"cpu"}, - f: map[string]influxql.DataType{ - "value": influxql.Float, - "val2": influxql.Float, - }, - d: map[string]struct{}{ - "host": {}, - "region": {}, - }, - }, - { - sources: []string{"mem"}, - f: map[string]influxql.DataType{ - "value": influxql.Integer, - "val3": influxql.Boolean, - }, - d: map[string]struct{}{ - "host": {}, - }, - }, - { - sources: []string{"cpu", "mem"}, - f: map[string]influxql.DataType{ - "value": influxql.Float, - "val2": influxql.Float, - "val3": influxql.Boolean, - }, - d: map[string]struct{}{ - "host": {}, - "region": {}, - }, - }, - { - sources: []string{"_fieldKeys"}, - f: map[string]influxql.DataType{ - "fieldKey": influxql.String, - "fieldType": influxql.String, - }, - d: map[string]struct{}{}, - }, - { - sources: []string{"_series"}, - f: map[string]influxql.DataType{ - "key": influxql.String, - }, - d: map[string]struct{}{}, - }, - { - sources: []string{"_tagKeys"}, - f: map[string]influxql.DataType{ - "tagKey": influxql.String, - }, - d: map[string]struct{}{}, - }, - { - sources: []string{"_reserved"}, - f: map[string]influxql.DataType{ - "value": influxql.String, - }, - d: map[string]struct{}{ - "region": {}, - }, - }, - { - sources: []string{"unknown"}, - f: map[string]influxql.DataType{}, - d: map[string]struct{}{}, - }, - } { - name := fmt.Sprintf("%s_%s", index, strings.Join(tt.sources, ",")) - t.Run(name, func(t *testing.T) { - f, d, err := sh.FieldDimensions(tt.sources) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if diff := cmp.Diff(tt.f, f, cmpopts.EquateEmpty()); diff != "" { - t.Errorf("unexpected fields:\n%s", diff) - } - if diff := cmp.Diff(tt.d, d, cmpopts.EquateEmpty()); diff != "" { - t.Errorf("unexpected dimensions:\n%s", diff) - } - }) - } - shard1.Close() - shard2.Close() - } -} - -func TestShards_MapType(t *testing.T) { - var shard1, shard2 *Shard - - setup := func(index string) { - shard1 = NewShard(t, index) - if err := shard1.Open(context.Background()); err != nil { - t.Fatal(err) - } - - shard1.MustWritePointsString(` -cpu,host=serverA,region=uswest value=100 0 -cpu,host=serverA,region=uswest value=50,val2=5 10 -cpu,host=serverB,region=uswest value=25 0 -`) - - shard2 = NewShard(t, index) - if err := shard2.Open(context.Background()); err != nil { - t.Fatal(err) - } - - shard2.MustWritePointsString(` -mem,host=serverA value=25i 0 -mem,host=serverB value=50i,val3=t 10 -_reserved,region=uswest value="foo" 0 -`) - } - - for _, index := range tsdb.RegisteredIndexes() { - setup(index) - sh := tsdb.Shards([]*tsdb.Shard{shard1.Shard, shard2.Shard}) - for _, tt := range []struct { - measurement string - field string - typ influxql.DataType - }{ - { - measurement: "cpu", - field: "value", - typ: influxql.Float, - }, - { - measurement: "cpu", - field: "host", - typ: influxql.Tag, - }, - { - measurement: "cpu", - field: "region", - typ: influxql.Tag, - }, - { - measurement: "cpu", - field: "val2", - typ: influxql.Float, - }, - { - measurement: "cpu", - field: "unknown", - typ: influxql.Unknown, - }, - { - measurement: "mem", - field: "value", - typ: influxql.Integer, - }, - { - measurement: "mem", - field: "val3", - typ: influxql.Boolean, - }, - { - measurement: "mem", - field: "host", - typ: influxql.Tag, - }, - { - measurement: "unknown", - field: "unknown", - typ: influxql.Unknown, - }, - { - measurement: "_fieldKeys", - field: "fieldKey", - typ: influxql.String, - }, - { - measurement: "_fieldKeys", - field: "fieldType", - typ: influxql.String, - }, - { - measurement: "_fieldKeys", - field: "unknown", - typ: influxql.Unknown, - }, - { - measurement: "_series", - field: "key", - typ: influxql.String, - }, - { - measurement: "_series", - field: "unknown", - typ: influxql.Unknown, - }, - { - measurement: "_tagKeys", - field: "tagKey", - typ: influxql.String, - }, - { - measurement: "_tagKeys", - field: "unknown", - typ: influxql.Unknown, - }, - { - measurement: "_reserved", - field: "value", - typ: influxql.String, - }, - { - measurement: "_reserved", - field: "region", - typ: influxql.Tag, - }, - } { - name := fmt.Sprintf("%s_%s_%s", index, tt.measurement, tt.field) - t.Run(name, func(t *testing.T) { - typ := sh.MapType(tt.measurement, tt.field) - if have, want := typ, tt.typ; have != want { - t.Errorf("unexpected data type: have=%#v want=%#v", have, want) - } - }) - } - shard1.Close() - shard2.Close() - } -} - -func TestShards_MeasurementsByRegex(t *testing.T) { - var shard1, shard2 *Shard - - setup := func(index string) { - shard1 = NewShard(t, index) - if err := shard1.Open(context.Background()); err != nil { - t.Fatal(err) - } - - shard1.MustWritePointsString(` -cpu,host=serverA,region=uswest value=100 0 -cpu,host=serverA,region=uswest value=50,val2=5 10 -cpu,host=serverB,region=uswest value=25 0 -`) - - shard2 = NewShard(t, index) - if err := shard2.Open(context.Background()); err != nil { - t.Fatal(err) - } - - shard2.MustWritePointsString(` -mem,host=serverA value=25i 0 -mem,host=serverB value=50i,val3=t 10 -_reserved,region=uswest value="foo" 0 -`) - } - - for _, index := range tsdb.RegisteredIndexes() { - setup(index) - sh := tsdb.Shards([]*tsdb.Shard{shard1.Shard, shard2.Shard}) - for _, tt := range []struct { - regex string - measurements []string - }{ - {regex: `cpu`, measurements: []string{"cpu"}}, - {regex: `mem`, measurements: []string{"mem"}}, - {regex: `cpu|mem`, measurements: []string{"cpu", "mem"}}, - {regex: `gpu`, measurements: []string{}}, - {regex: `pu`, measurements: []string{"cpu"}}, - {regex: `p|m`, measurements: []string{"cpu", "mem"}}, - } { - t.Run(tt.regex, func(t *testing.T) { - re := regexp.MustCompile(tt.regex) - measurements := sh.MeasurementsByRegex(re) - sort.Strings(measurements) - if diff := cmp.Diff(tt.measurements, measurements, cmpopts.EquateEmpty()); diff != "" { - t.Errorf("unexpected measurements:\n%s", diff) - } - }) - } - shard1.Close() - shard2.Close() - } -} - -func TestMeasurementFieldSet_SaveLoad(t *testing.T) { - const measurement = "cpu" - const fieldName = "value" - - dir := t.TempDir() - - path := filepath.Join(dir, "fields.idx") - mf, err := tsdb.NewMeasurementFieldSet(path, nil) - if err != nil { - t.Fatalf("NewMeasurementFieldSet error: %v", err) - } - defer checkMeasurementFieldSetClose(t, mf) - fields := mf.CreateFieldsIfNotExists([]byte(measurement)) - if err := fields.CreateFieldIfNotExists([]byte(fieldName), influxql.Float); err != nil { - t.Fatalf("create field error: %v", err) - } - change := tsdb.FieldChange{ - FieldCreate: tsdb.FieldCreate{ - Measurement: []byte(measurement), - Field: &tsdb.Field{ID: 0, Name: fieldName, Type: influxql.Float}, - }, - ChangeType: tsdb.AddMeasurementField, - } - - if err := mf.Save(tsdb.FieldChanges{&change}); err != nil { - t.Fatalf("save error: %v", err) - } - _, err = os.Stat(mf.ChangesPath()) - assert.NoError(t, err, "no field.idx change file") - - mf2, err := tsdb.NewMeasurementFieldSet(path, nil) - if err != nil { - t.Fatalf("NewMeasurementFieldSet error: %v", err) - } - _, err = os.Stat(mf.ChangesPath()) - assert2.Error(t, err, "file %s should have had this error: %s", mf.ChangesPath(), os.ErrNotExist) - if !os.IsNotExist(err) { - t.Fatalf("unexpected error for %s: got %s, expected %s", mf.ChangesPath(), err, os.ErrNotExist) - } - defer checkMeasurementFieldSetClose(t, mf2) - fields = mf2.FieldsByString(measurement) - field := fields.Field(fieldName) - if field == nil { - t.Fatalf("field is null") - } - - if got, exp := field.Type, influxql.Float; got != exp { - t.Fatalf("field type mismatch: got %v, exp %v", got, exp) - } -} - -func TestMeasurementFieldSet_Corrupt(t *testing.T) { - dir := t.TempDir() - - path := filepath.Join(dir, "fields.idx") - func() { - mf, err := tsdb.NewMeasurementFieldSet(path, nil) - if err != nil { - t.Fatalf("NewMeasurementFieldSet error: %v", err) - } - defer checkMeasurementFieldSetClose(t, mf) - measurement := []byte("cpu") - fields := mf.CreateFieldsIfNotExists(measurement) - fieldName := "value" - if err := fields.CreateFieldIfNotExists([]byte(fieldName), influxql.Float); err != nil { - t.Fatalf("create field error: %v", err) - } - change := tsdb.FieldChange{ - FieldCreate: tsdb.FieldCreate{ - Measurement: []byte(measurement), - Field: &tsdb.Field{ID: 0, Name: fieldName, Type: influxql.Float}, - }, - ChangeType: tsdb.AddMeasurementField, - } - - if err := mf.Save(tsdb.FieldChanges{&change}); err != nil { - t.Fatalf("save error: %v", err) - } - }() - stat, err := os.Stat(path) - if err != nil { - t.Fatalf("stat error: %v", err) - } - // Truncate the file to simulate a corrupted file - if err := os.Truncate(path, stat.Size()-3); err != nil { - t.Fatalf("truncate error: %v", err) - } - mf, err := tsdb.NewMeasurementFieldSet(path, nil) - if err == nil { - t.Fatal("NewMeasurementFieldSet expected error") - } - defer checkMeasurementFieldSetClose(t, mf) - - fields := mf.FieldsByString("cpu") - if fields != nil { - t.Fatal("expecte fields to be nil") - } -} - -func TestMeasurementFieldSet_CorruptChangeFile(t *testing.T) { - dir := t.TempDir() - - testFields := []struct { - Measurement string - Field string - FieldType influxql.DataType - }{ - { - Measurement: "cpu", - Field: "value_1", - FieldType: influxql.Float, - }, - { - Measurement: "cpu", - Field: "value_2", - FieldType: influxql.String, - }, - { - Measurement: "cpu", - Field: "value_3", - FieldType: influxql.Integer, - }, - } - - path := filepath.Join(dir, "fields.idx") - var mf *tsdb.MeasurementFieldSet - var err error - mf, err = tsdb.NewMeasurementFieldSet(path, nil) - if err != nil { - t.Fatalf("NewMeasurementFieldSet error: %v", err) - } - defer checkMeasurementFieldSetClose(t, mf) - for _, f := range testFields { - fields := mf.CreateFieldsIfNotExists([]byte(f.Measurement)) - if err := fields.CreateFieldIfNotExists([]byte(f.Field), f.FieldType); err != nil { - t.Fatalf("create field error: %v", err) - } - change := tsdb.FieldChange{ - FieldCreate: tsdb.FieldCreate{ - Measurement: []byte(f.Measurement), - Field: &tsdb.Field{ID: 0, Name: f.Field, Type: f.FieldType}, - }, - ChangeType: tsdb.AddMeasurementField, - } - - if err := mf.Save(tsdb.FieldChanges{&change}); err != nil { - t.Fatalf("save error: %v", err) - } - } - changeFile := filepath.Join(dir, tsdb.FieldsChangeFile) - stat, err := os.Stat(changeFile) - if err != nil { - t.Fatalf("stat error: %v", err) - } - // Truncate the file to simulate a corrupted file - if err := os.Truncate(changeFile, stat.Size()-3); err != nil { - t.Fatalf("truncate error: %v", err) - } - mf2, err := tsdb.NewMeasurementFieldSet(path, nil) - assert.NoError(t, err, "failed creating second MeasurementFieldSet") - defer checkMeasurementFieldSetClose(t, mf2) - - for i := 0; i < len(testFields)-1; i++ { - fields := mf2.FieldsByString(testFields[i].Measurement) - if fields == nil { - t.Fatalf("nil fields map for %s", testFields[i].Measurement) - } else if f := fields.Field(testFields[i].Field); f == nil { - t.Fatalf("%s not found in %s fields", testFields[i].Field, testFields[i].Measurement) - } else if f.Type != testFields[i].FieldType { - t.Fatalf("%s.%s wrong type: expected %v, got %v", testFields[i].Measurement, testFields[i].Field, testFields[i].FieldType, f.Type) - } - } - i := len(testFields) - 1 - fields := mf2.FieldsByString(testFields[i].Measurement) - if fields == nil { - t.Fatalf("nil fields map for %s", testFields[i].Measurement) - } else if f := fields.Field(testFields[i].Field); f != nil { - t.Fatalf("%s found in %s fields, should have not been present", testFields[i].Field, testFields[i].Measurement) - } -} - -func TestMeasurementFieldSet_DeleteEmpty(t *testing.T) { - const measurement = "cpu" - const fieldName = "value" - - dir := t.TempDir() - - path := filepath.Join(dir, "fields.idx") - mf, err := tsdb.NewMeasurementFieldSet(path, nil) - if err != nil { - t.Fatalf("NewMeasurementFieldSet error: %v", err) - } - defer checkMeasurementFieldSetClose(t, mf) - - fields := mf.CreateFieldsIfNotExists([]byte(measurement)) - if err := fields.CreateFieldIfNotExists([]byte(fieldName), influxql.Float); err != nil { - t.Fatalf("create field error: %v", err) - } - - change := tsdb.FieldChange{ - FieldCreate: tsdb.FieldCreate{ - Measurement: []byte(measurement), - Field: &tsdb.Field{ID: 0, Name: fieldName, Type: influxql.Float}, - }, - ChangeType: tsdb.AddMeasurementField, - } - - if err := mf.Save(tsdb.FieldChanges{&change}); err != nil { - t.Fatalf("save error: %v", err) - } - mf2, err := tsdb.NewMeasurementFieldSet(path, nil) - if err != nil { - t.Fatalf("NewMeasurementFieldSet error: %v", err) - } - fields = mf2.FieldsByString(measurement) - field := fields.Field(fieldName) - if field == nil { - t.Fatalf("field is null") - } - - if got, exp := field.Type, influxql.Float; got != exp { - t.Fatalf("field type mismatch: got %v, exp %v", got, exp) - } - - mf2.Delete(measurement) - - if err := mf2.Save(tsdb.MeasurementsToFieldChangeDeletions([]string{measurement})); err != nil { - t.Fatalf("save after delete error: %v", err) - } - _, err = os.Stat(mf.ChangesPath()) - assert.NoError(t, err, "no field.idx change file") - assert.NoError(t, mf2.Close(), "failed closing MeasurementFieldSet") - - _, err = os.Stat(mf.ChangesPath()) - assert2.Error(t, err, "file %s should have had this error: %s", mf.ChangesPath(), os.ErrNotExist) - if !os.IsNotExist(err) { - t.Fatalf("unexpected error for %s: got %s, expected %s", mf.ChangesPath(), err, os.ErrNotExist) - } - - if _, err = os.Stat(path); !os.IsNotExist(err) { - t.Fatalf("got %v, not exist err", err) - } -} - -func checkMeasurementFieldSetClose(t *testing.T, fs *tsdb.MeasurementFieldSet) { - assert.NoError(t, fs.Close(), "failed closing tsdb.MeasurementFieldSet") - _, err := os.Stat(fs.ChangesPath()) - assert2.Error(t, err, "file %s should have had this error: %s", fs.ChangesPath(), os.ErrNotExist) - if !os.IsNotExist(err) { - t.Fatalf("unexpected error for %s: got %s, expected %s", fs.ChangesPath(), err, os.ErrNotExist) - } -} - -func TestMeasurementFieldSet_InvalidFormat(t *testing.T) { - dir := t.TempDir() - - path := filepath.Join(dir, "fields.idx") - - if err := os.WriteFile(path, []byte{0, 0}, 0666); err != nil { - t.Fatalf("error writing fields.index: %v", err) - } - - mf, err := tsdb.NewMeasurementFieldSet(path, nil) - if !errors.Is(err, tsdb.ErrUnknownFieldsFormat) { - t.Fatalf("unexpected error: got %v, exp %v", err, tsdb.ErrUnknownFieldsFormat) - } - defer checkMeasurementFieldSetClose(t, mf) -} - -func TestMeasurementFieldSet_ConcurrentSave(t *testing.T) { - var iterations int - dir := t.TempDir() - - if testing.Short() { - iterations = 50 - } else { - iterations = 200 - } - - mt := []string{"cpu", "dpu", "epu", "fpu"} - ft := make([][]string, len(mt)) - for mi, m := range mt { - ft[mi] = make([]string, iterations) - for i := 0; i < iterations; i += 1 { - ft[mi][i] = fmt.Sprintf("%s_%s_%d", m, "value", i) - } - } - - path := filepath.Join(dir, "fields.idx") - mfs, err := tsdb.NewMeasurementFieldSet(path, nil) - if err != nil { - t.Fatalf("NewMeasurementFieldSet error: %v", err) - } - defer checkMeasurementFieldSetClose(t, mfs) - var wg sync.WaitGroup - - wg.Add(len(ft)) - for i, fs := range ft { - go testFieldMaker(t, &wg, mfs, mt[i], fs) - } - wg.Wait() - - mfs2, err := tsdb.NewMeasurementFieldSet(path, nil) - if err != nil { - t.Fatalf("NewMeasurementFieldSet error: %v", err) - } - defer checkMeasurementFieldSetClose(t, mfs2) - for i, fs := range ft { - mf := mfs.Fields([]byte(mt[i])) - mf2 := mfs2.Fields([]byte(mt[i])) - for _, f := range fs { - if mf2.Field(f) == nil { - t.Fatalf("Created field not found on reloaded MeasurementFieldSet %s", f) - } - if mf.Field(f) == nil { - t.Fatalf("Created field not found in original MeasureMentFieldSet: %s", f) - } - } - } -} - -func TestMeasurementFieldSet_MeasurementNames(t *testing.T) { - dir := t.TempDir() - path := filepath.Join(dir, "fields.idx") - mf, err := tsdb.NewMeasurementFieldSet(path, nil) - if err != nil { - t.Fatalf("NewMeasurementFieldSet error: %v", err) - } - defer mf.Close() - - mf.CreateFieldsIfNotExists([]byte("cpu")) - mf.CreateFieldsIfNotExists([]byte("memory")) - mf.CreateFieldsIfNotExists([]byte("disk_usage")) - - exp := []string{"cpu", "disk_usage", "memory"} - got := mf.MeasurementNames() - assert.Equal(t, exp, got) -} - -func testFieldMaker(t *testing.T, wg *sync.WaitGroup, mf *tsdb.MeasurementFieldSet, measurement string, fieldNames []string) { - defer wg.Done() - fields := mf.CreateFieldsIfNotExists([]byte(measurement)) - - for _, fieldName := range fieldNames { - if err := fields.CreateFieldIfNotExists([]byte(fieldName), influxql.Float); err != nil { - t.Errorf("create field error: %v", err) - return - } - change := tsdb.FieldChange{ - FieldCreate: tsdb.FieldCreate{ - Measurement: []byte(measurement), - Field: &tsdb.Field{ID: 0, Name: fieldName, Type: influxql.Float}, - }, - ChangeType: tsdb.AddMeasurementField, - } - - err := mf.Save(tsdb.FieldChanges{&change}) - if err != nil { - t.Logf("save error: %v", err) - t.Fail() - return - } - _, err = os.Stat(mf.ChangesPath()) - if err != nil { - t.Logf("unexpected error for field.idxl change file %s: %s", mf.ChangesPath(), err) - t.Fail() - } - } -} - -func BenchmarkWritePoints_NewSeries_1K(b *testing.B) { benchmarkWritePoints(b, 38, 3, 3, 1) } -func BenchmarkWritePoints_NewSeries_100K(b *testing.B) { benchmarkWritePoints(b, 32, 5, 5, 1) } -func BenchmarkWritePoints_NewSeries_250K(b *testing.B) { benchmarkWritePoints(b, 80, 5, 5, 1) } -func BenchmarkWritePoints_NewSeries_500K(b *testing.B) { benchmarkWritePoints(b, 160, 5, 5, 1) } -func BenchmarkWritePoints_NewSeries_1M(b *testing.B) { benchmarkWritePoints(b, 320, 5, 5, 1) } - -// Fix measurement and tag key cardinalities and vary tag value cardinality -func BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_100_TagValues(b *testing.B) { - benchmarkWritePoints(b, 1, 1, 100, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_500_TagValues(b *testing.B) { - benchmarkWritePoints(b, 1, 1, 500, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_1000_TagValues(b *testing.B) { - benchmarkWritePoints(b, 1, 1, 1000, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_5000_TagValues(b *testing.B) { - benchmarkWritePoints(b, 1, 1, 5000, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_10000_TagValues(b *testing.B) { - benchmarkWritePoints(b, 1, 1, 10000, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_50000_TagValues(b *testing.B) { - benchmarkWritePoints(b, 1, 1, 50000, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_100000_TagValues(b *testing.B) { - benchmarkWritePoints(b, 1, 1, 100000, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_500000_TagValues(b *testing.B) { - benchmarkWritePoints(b, 1, 1, 500000, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_1000000_TagValues(b *testing.B) { - benchmarkWritePoints(b, 1, 1, 1000000, 1) -} - -// Fix tag key and tag values cardinalities and vary measurement cardinality -func BenchmarkWritePoints_NewSeries_100_Measurements_1_TagKey_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 100, 1, 1, 1) -} -func BenchmarkWritePoints_NewSeries_500_Measurements_1_TagKey_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 500, 1, 1, 1) -} -func BenchmarkWritePoints_NewSeries_1000_Measurement_1_TagKey_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 1000, 1, 1, 1) -} - -func BenchmarkWritePoints_NewSeries_5000_Measurement_1_TagKey_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 5000, 1, 1, 1) -} -func BenchmarkWritePoints_NewSeries_10000_Measurement_1_TagKey_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 10000, 1, 1, 1) -} - -func BenchmarkWritePoints_NewSeries_1000_Measurement_10_TagKey_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 1000, 10, 1, 1) -} - -func BenchmarkWritePoints_NewSeries_50000_Measurement_1_TagKey_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 50000, 1, 1, 1) -} -func BenchmarkWritePoints_NewSeries_100000_Measurement_1_TagKey_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 100000, 1, 1, 1) -} - -func BenchmarkWritePoints_NewSeries_500000_Measurement_1_TagKey_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 500000, 1, 1, 1) -} -func BenchmarkWritePoints_NewSeries_1000000_Measurement_1_TagKey_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 1000000, 1, 1, 1) -} - -// Fix measurement and tag values cardinalities and vary tag key cardinality -func BenchmarkWritePoints_NewSeries_1_Measurement_2_TagKeys_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 1, 1<<1, 1, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurements_4_TagKeys_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 1, 1<<2, 1, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurements_8_TagKeys_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 1, 1<<3, 1, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurement_16_TagKeys_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 1, 1<<4, 1, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurement_32_TagKeys_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 1, 1<<5, 1, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurement_64_TagKeys_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 1, 1<<6, 1, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurement_128_TagKeys_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 1, 1<<7, 1, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurement_256_TagKeys_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 1, 1<<8, 1, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurement_512_TagKeys_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 1, 1<<9, 1, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurement_1024_TagKeys_1_TagValue(b *testing.B) { - benchmarkWritePoints(b, 1, 1<<10, 1, 1) -} - -// Fix series cardinality and vary tag keys and value cardinalities -func BenchmarkWritePoints_NewSeries_1_Measurement_1_TagKey_65536_TagValue(b *testing.B) { - benchmarkWritePoints(b, 1, 1, 1<<16, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurement_2_TagKeys_256_TagValue(b *testing.B) { - benchmarkWritePoints(b, 1, 2, 1<<8, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurement_4_TagKeys_16_TagValue(b *testing.B) { - benchmarkWritePoints(b, 1, 4, 1<<4, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurement_8_TagKeys_4_TagValue(b *testing.B) { - benchmarkWritePoints(b, 1, 8, 1<<2, 1) -} -func BenchmarkWritePoints_NewSeries_1_Measurement_16_TagKeys_2_TagValue(b *testing.B) { - benchmarkWritePoints(b, 1, 16, 1<<1, 1) -} - -func BenchmarkWritePoints_ExistingSeries_1K(b *testing.B) { - benchmarkWritePointsExistingSeries(b, 38, 3, 3, 1) -} -func BenchmarkWritePoints_ExistingSeries_100K(b *testing.B) { - benchmarkWritePointsExistingSeries(b, 32, 5, 5, 1) -} - -func BenchmarkWritePoints_ExistingSeries_250K(b *testing.B) { - benchmarkWritePointsExistingSeries(b, 80, 5, 5, 1) -} -func BenchmarkWritePoints_ExistingSeries_500K(b *testing.B) { - benchmarkWritePointsExistingSeries(b, 160, 5, 5, 1) -} -func BenchmarkWritePoints_ExistingSeries_1M(b *testing.B) { - benchmarkWritePointsExistingSeries(b, 320, 5, 5, 1) -} - -// The following two benchmarks measure time to write 10k points at a time for comparing performance with different measurement cardinalities. -func BenchmarkWritePoints_ExistingSeries_100K_1_1(b *testing.B) { - benchmarkWritePointsExistingSeriesEqualBatches(b, 100000, 1, 1, 1) -} - -func BenchmarkWritePoints_ExistingSeries_10K_10_1(b *testing.B) { - benchmarkWritePointsExistingSeriesEqualBatches(b, 10000, 10, 1, 1) -} - -func BenchmarkWritePoints_ExistingSeries_100K_1_1_Fields(b *testing.B) { - benchmarkWritePointsExistingSeriesFields(b, 100000, 1, 1, 1) -} - -func BenchmarkWritePoints_ExistingSeries_10K_10_1_Fields(b *testing.B) { - benchmarkWritePointsExistingSeriesFields(b, 10000, 10, 1, 1) -} - -// benchmarkWritePoints benchmarks writing new series to a shard. -// mCnt - measurement count -// tkCnt - tag key count -// tvCnt - tag value count (values per tag) -// pntCnt - points per series. # of series = mCnt * (tvCnt ^ tkCnt) -func benchmarkWritePoints(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) { - // Generate test series (measurements + unique tag sets). - series := genTestSeries(mCnt, tkCnt, tvCnt) - // Generate point data to write to the shard. - points := []models.Point{} - for _, s := range series { - for val := 0.0; val < float64(pntCnt); val++ { - p := models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": val}, time.Now()) - points = append(points, p) - } - } - - // Stop & reset timers and mem-stats before the main benchmark loop. - b.StopTimer() - b.ResetTimer() - - sfile := MustOpenSeriesFile(b) - defer sfile.Close() - - // Run the benchmark loop. - for n := 0; n < b.N; n++ { - shard, err := openShard(b, sfile) - if err != nil { - shard.Close() - b.Fatal(err) - } - - b.StartTimer() - // Call the function being benchmarked. - chunkedWrite(shard, points) - - b.StopTimer() - shard.Close() - } -} - -// benchmarkWritePointsExistingSeries benchmarks writing to existing series in a shard. -// mCnt - measurement count -// tkCnt - tag key count -// tvCnt - tag value count (values per tag) -// pntCnt - points per series. # of series = mCnt * (tvCnt ^ tkCnt) -func benchmarkWritePointsExistingSeries(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) { - // Generate test series (measurements + unique tag sets). - series := genTestSeries(mCnt, tkCnt, tvCnt) - // Generate point data to write to the shard. - points := []models.Point{} - for _, s := range series { - for val := 0.0; val < float64(pntCnt); val++ { - p := models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": val}, time.Now()) - points = append(points, p) - } - } - - sfile := MustOpenSeriesFile(b) - defer sfile.Close() - - shard, err := openShard(b, sfile) - defer func() { - _ = shard.Close() - }() - if err != nil { - b.Fatal(err) - } - - chunkedWrite(shard, points) - - // Reset timers and mem-stats before the main benchmark loop. - b.ResetTimer() - - // Run the benchmark loop. - for n := 0; n < b.N; n++ { - b.StopTimer() - - for _, p := range points { - p.SetTime(p.Time().Add(time.Second)) - } - - b.StartTimer() - // Call the function being benchmarked. - chunkedWrite(shard, points) - } -} - -func benchmarkWritePointsExistingSeriesFields(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) { - // Generate test series (measurements + unique tag sets). - series := genTestSeries(mCnt, tkCnt, tvCnt) - // Generate point data to write to the shard. - points := []models.Point{} - for _, s := range series { - i := 0 - for val := 0.0; val < float64(pntCnt); val++ { - field := fmt.Sprintf("v%d", i%256) - p := models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{field: val}, time.Now()) - points = append(points, p) - i++ - } - } - - sfile := MustOpenSeriesFile(b) - defer func() { - _ = sfile.Close() - }() - - shard, err := openShard(b, sfile) - defer func() { - _ = shard.Close() - }() - if err != nil { - b.Fatal(err) - } - - chunkedWrite(shard, points) - - // Reset timers and mem-stats before the main benchmark loop. - b.ResetTimer() - - // Run the benchmark loop. - for n := 0; n < b.N; n++ { - b.StopTimer() - - for _, p := range points { - p.SetTime(p.Time().Add(time.Second)) - } - - b.StartTimer() - // Call the function being benchmarked. - chunkedWrite(shard, points) - } -} - -func benchmarkWritePointsExistingSeriesEqualBatches(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) { - // Generate test series (measurements + unique tag sets). - series := genTestSeries(mCnt, tkCnt, tvCnt) - // Generate point data to write to the shard. - points := []models.Point{} - for _, s := range series { - for val := 0.0; val < float64(pntCnt); val++ { - p := models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": val}, time.Now()) - points = append(points, p) - } - } - - sfile := MustOpenSeriesFile(b) - defer sfile.Close() - - shard, err := openShard(b, sfile) - defer func() { - _ = shard.Close() - }() - if err != nil { - b.Fatal(err) - } - - chunkedWrite(shard, points) - - // Reset timers and mem-stats before the main benchmark loop. - b.ResetTimer() - - // Run the benchmark loop. - nPts := len(points) - chunkSz := 10000 - start := 0 - end := chunkSz - for n := 0; n < b.N; n++ { - b.StopTimer() - - if end > nPts { - end = nPts - } - if end-start == 0 { - start = 0 - end = chunkSz - } - - for _, p := range points[start:end] { - p.SetTime(p.Time().Add(time.Second)) - } - - b.StartTimer() - shard.WritePoints(context.Background(), points[start:end]) - b.StopTimer() - - start = end - end += chunkSz - } -} - -func openShard(tb testing.TB, sfile *SeriesFile) (*tsdb.Shard, error) { - tmpDir := tb.TempDir() - tmpShard := filepath.Join(tmpDir, "shard") - tmpWal := filepath.Join(tmpDir, "wal") - opts := tsdb.NewEngineOptions() - opts.Config.WALDir = tmpWal - shard := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) - err := shard.Open(context.Background()) - return shard, err -} - -func BenchmarkCreateIterator(b *testing.B) { - // Generate test series (measurements + unique tag sets). - series := genTestSeries(1, 6, 4) - // Generate point data to write to the shard. - points := make([]models.Point, 0, len(series)) - for _, s := range series { - p := models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"v0": 1.0, "v1": 1.0}, time.Now()) - points = append(points, p) - } - - setup := func(index string, shards Shards) { - // Write all the points to all the shards. - for _, sh := range shards { - if err := sh.WritePoints(context.Background(), points); err != nil { - b.Fatal(err) - } - } - } - - for _, index := range tsdb.RegisteredIndexes() { - var shards Shards - for i := 1; i <= 5; i++ { - name := fmt.Sprintf("%s_shards_%d", index, i) - shards = NewShards(b, index, i) - shards.MustOpen() - - setup(index, shards) - b.Run(name, func(b *testing.B) { - defer shards.Close() - - m := &influxql.Measurement{ - Database: "db0", - RetentionPolicy: "rp0", - Name: "measurement0", - } - - opts := query.IteratorOptions{ - Aux: []influxql.VarRef{{Val: "v0", Type: 1}, {Val: "v1", Type: 1}}, - StartTime: models.MinNanoTime, - EndTime: models.MaxNanoTime, - Ascending: false, - Limit: 5, - Ordered: true, - Authorizer: query.OpenAuthorizer, - } - - opts.Condition = &influxql.BinaryExpr{ - Op: 27, - LHS: &influxql.BinaryExpr{ - Op: 29, - LHS: &influxql.VarRef{Val: "tagKey1", Type: 7}, - RHS: &influxql.StringLiteral{Val: "tagValue1"}, - }, - RHS: &influxql.BinaryExpr{ - Op: 29, - LHS: &influxql.VarRef{Val: "tagKey2", Type: 7}, - RHS: &influxql.StringLiteral{Val: "tagValue1"}, - }, - } - for i := 0; i < b.N; i++ { - shards.Shards().CreateIterator(context.Background(), m, opts) - } - }) - } - } -} - -func chunkedWrite(shard *tsdb.Shard, points []models.Point) { - nPts := len(points) - chunkSz := 10000 - start := 0 - end := chunkSz - - for { - if end > nPts { - end = nPts - } - if end-start == 0 { - break - } - - shard.WritePoints(context.Background(), points[start:end]) - start = end - end += chunkSz - } -} - -// Shard represents a test wrapper for tsdb.Shard. -type Shard struct { - *tsdb.Shard - sfile *SeriesFile - path string -} - -type Shards []*Shard - -// NewShard returns a new instance of Shard with temp paths. -func NewShard(tb testing.TB, index string) *Shard { - tb.Helper() - return NewShards(tb, index, 1)[0] -} - -// MustNewOpenShard creates and opens a shard with the provided index. -func MustNewOpenShard(tb testing.TB, index string) *Shard { - tb.Helper() - sh := NewShard(tb, index) - if err := sh.Open(context.Background()); err != nil { - panic(err) - } - return sh -} - -// Close closes the shard and removes all underlying data. -func (sh *Shard) Close() error { - // Will remove temp series file data. - if err := sh.sfile.Close(); err != nil { - return err - } - - defer os.RemoveAll(sh.path) - return sh.Shard.Close() -} - -// NewShards create several shards all sharing the same -func NewShards(tb testing.TB, index string, n int) Shards { - tb.Helper() - - // Create temporary path for data and WAL. - dir := tb.TempDir() - - sfile := MustOpenSeriesFile(tb) - - var shards []*Shard - var idSets []*tsdb.SeriesIDSet - for i := 0; i < n; i++ { - idSets = append(idSets, tsdb.NewSeriesIDSet()) - } - - for i := 0; i < n; i++ { - // Build engine options. - opt := tsdb.NewEngineOptions() - opt.IndexVersion = index - opt.Config.WALDir = filepath.Join(dir, "wal") - - // Initialise series id sets. Need to do this as it's normally done at the - // store level. - opt.SeriesIDSets = seriesIDSets(idSets) - - sh := &Shard{ - Shard: tsdb.NewShard(uint64(i), - filepath.Join(dir, "data", "db0", "rp0", fmt.Sprint(i)), - filepath.Join(dir, "wal", "db0", "rp0", fmt.Sprint(i)), - sfile.SeriesFile, - opt, - ), - sfile: sfile, - path: dir, - } - - shards = append(shards, sh) - } - return Shards(shards) -} - -// Open opens all the underlying shards. -func (a Shards) Open() error { - for _, sh := range a { - if err := sh.Open(context.Background()); err != nil { - return err - } - } - return nil -} - -// MustOpen opens all the shards, panicking if an error is encountered. -func (a Shards) MustOpen() { - if err := a.Open(); err != nil { - panic(err) - } -} - -// Shards returns the set of shards as a tsdb.Shards type. -func (a Shards) Shards() tsdb.Shards { - var all tsdb.Shards - for _, sh := range a { - all = append(all, sh.Shard) - } - return all -} - -// Close closes all shards and removes all underlying data. -func (a Shards) Close() error { - if len(a) == 1 { - return a[0].Close() - } - - // Will remove temp series file data. - if err := a[0].sfile.Close(); err != nil { - return err - } - - defer os.RemoveAll(a[0].path) - for _, sh := range a { - if err := sh.Shard.Close(); err != nil { - return err - } - } - return nil -} - -// MustWritePointsString parses the line protocol (with second precision) and -// inserts the resulting points into the shard. Panic on error. -func (sh *Shard) MustWritePointsString(s string) { - a, err := models.ParsePointsWithPrecision([]byte(strings.TrimSpace(s)), time.Time{}, "s") - if err != nil { - panic(err) - } - - if err := sh.WritePoints(context.Background(), a); err != nil { - panic(err) - } -} - -type seriesIterator struct { - keys [][]byte -} - -type series struct { - name []byte - tags models.Tags - deleted bool -} - -func (s series) Name() []byte { return s.name } -func (s series) Tags() models.Tags { return s.tags } -func (s series) Deleted() bool { return s.deleted } -func (s series) Expr() influxql.Expr { return nil } - -func (itr *seriesIterator) Close() error { return nil } - -func (itr *seriesIterator) Next() (tsdb.SeriesElem, error) { - if len(itr.keys) == 0 { - return nil, nil - } - name, tags := models.ParseKeyBytes(itr.keys[0]) - s := series{name: name, tags: tags} - itr.keys = itr.keys[1:] - return s, nil -} - -type seriesIDSets []*tsdb.SeriesIDSet - -func (a seriesIDSets) ForEach(f func(ids *tsdb.SeriesIDSet)) error { - for _, v := range a { - f(v) - } - return nil -} diff --git a/tsdb/store.go b/tsdb/store.go deleted file mode 100644 index 1b8ad7f2308..00000000000 --- a/tsdb/store.go +++ /dev/null @@ -1,2157 +0,0 @@ -//lint:file-ignore ST1005 this is old code. we're not going to conform error messages -package tsdb // import "github.com/influxdata/influxdb/v2/tsdb" - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "sort" - "strconv" - "sync" - "time" - - errors3 "github.com/influxdata/influxdb/v2/pkg/errors" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/influxql/query" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/logger" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/estimator" - "github.com/influxdata/influxdb/v2/pkg/estimator/hll" - "github.com/influxdata/influxdb/v2/pkg/limiter" - "github.com/influxdata/influxql" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -var ( - // ErrShardNotFound is returned when trying to get a non existing shard. - ErrShardNotFound = fmt.Errorf("shard not found") - // ErrStoreClosed is returned when trying to use a closed Store. - ErrStoreClosed = fmt.Errorf("store is closed") - // ErrShardDeletion is returned when trying to create a shard that is being deleted - ErrShardDeletion = errors.New("shard is being deleted") - // ErrMultipleIndexTypes is returned when trying to do deletes on a database with - // multiple index types. - ErrMultipleIndexTypes = errors.New("cannot delete data. DB contains shards using multiple indexes. Please convert all shards to use the same index type to delete data") -) - -// SeriesFileDirectory is the name of the directory containing series files for -// a database. -const SeriesFileDirectory = "_series" - -// databaseState keeps track of the state of a database. -type databaseState struct{ indexTypes map[string]int } - -// addIndexType records that the database has a shard with the given index type. -func (d *databaseState) addIndexType(indexType string) { - if d.indexTypes == nil { - d.indexTypes = make(map[string]int) - } - d.indexTypes[indexType]++ -} - -// addIndexType records that the database no longer has a shard with the given index type. -func (d *databaseState) removeIndexType(indexType string) { - if d.indexTypes != nil { - d.indexTypes[indexType]-- - if d.indexTypes[indexType] <= 0 { - delete(d.indexTypes, indexType) - } - } -} - -// hasMultipleIndexTypes returns true if the database has multiple index types. -func (d *databaseState) hasMultipleIndexTypes() bool { return d != nil && len(d.indexTypes) > 1 } - -type shardErrorMap struct { - mu sync.Mutex - shardErrors map[uint64]error -} - -func (se *shardErrorMap) setShardOpenError(shardID uint64, err error) { - se.mu.Lock() - defer se.mu.Unlock() - if err == nil { - delete(se.shardErrors, shardID) - } else { - se.shardErrors[shardID] = &ErrPreviousShardFail{error: fmt.Errorf("opening shard previously failed with: %w", err)} - } -} - -func (se *shardErrorMap) shardError(shardID uint64) (error, bool) { - se.mu.Lock() - defer se.mu.Unlock() - oldErr, hasErr := se.shardErrors[shardID] - return oldErr, hasErr -} - -// Store manages shards and indexes for databases. -type Store struct { - mu sync.RWMutex - shards map[uint64]*Shard - databases map[string]*databaseState - sfiles map[string]*SeriesFile - SeriesFileMaxSize int64 // Determines size of series file mmap. Can be altered in tests. - path string - - // Maintains a set of shards that are in the process of deletion. - // This prevents new shards from being created while old ones are being deleted. - pendingShardDeletes map[uint64]struct{} - - // Maintains a set of shards that failed to open - badShards shardErrorMap - - // Epoch tracker helps serialize writes and deletes that may conflict. It - // is stored by shard. - epochs map[uint64]*epochTracker - - EngineOptions EngineOptions - - baseLogger *zap.Logger - Logger *zap.Logger - - closing chan struct{} - wg sync.WaitGroup - opened bool -} - -// NewStore returns a new store with the given path and a default configuration. -// The returned store must be initialized by calling Open before using it. -func NewStore(path string) *Store { - return &Store{ - databases: make(map[string]*databaseState), - path: path, - sfiles: make(map[string]*SeriesFile), - pendingShardDeletes: make(map[uint64]struct{}), - badShards: shardErrorMap{shardErrors: make(map[uint64]error)}, - epochs: make(map[uint64]*epochTracker), - EngineOptions: NewEngineOptions(), - Logger: zap.NewNop(), - baseLogger: zap.NewNop(), - } -} - -// WithLogger sets the logger for the store. -func (s *Store) WithLogger(log *zap.Logger) { - s.baseLogger = log - s.Logger = log.With(zap.String("service", "store")) - for _, sh := range s.shards { - sh.WithLogger(s.baseLogger) - } -} - -// CollectBucketMetrics sets prometheus metrics for each bucket -func (s *Store) CollectBucketMetrics() { - // Collect all the bucket cardinality estimations - databases := s.Databases() - for _, database := range databases { - - log := s.Logger.With(logger.Database(database)) - sc, err := s.SeriesCardinality(context.Background(), database) - if err != nil { - log.Info("Cannot retrieve series cardinality", zap.Error(err)) - continue - } - - mc, err := s.MeasurementsCardinality(context.Background(), database) - if err != nil { - log.Info("Cannot retrieve measurement cardinality", zap.Error(err)) - continue - } - - labels := prometheus.Labels{bucketLabel: database} - seriesCardinality := globalBucketMetrics.seriesCardinality.With(labels) - measureCardinality := globalBucketMetrics.measureCardinality.With(labels) - - seriesCardinality.Set(float64(sc)) - measureCardinality.Set(float64(mc)) - } -} - -var globalBucketMetrics = newAllBucketMetrics() - -const bucketSubsystem = "bucket" -const bucketLabel = "bucket" - -type allBucketMetrics struct { - seriesCardinality *prometheus.GaugeVec - measureCardinality *prometheus.GaugeVec -} - -func newAllBucketMetrics() *allBucketMetrics { - labels := []string{bucketLabel} - return &allBucketMetrics{ - seriesCardinality: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: storageNamespace, - Subsystem: bucketSubsystem, - Name: "series_num", - Help: "Gauge of series cardinality per bucket", - }, labels), - measureCardinality: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: storageNamespace, - Subsystem: bucketSubsystem, - Name: "measurement_num", - Help: "Gauge of measurement cardinality per bucket", - }, labels), - } -} - -func BucketCollectors() []prometheus.Collector { - return []prometheus.Collector{ - globalBucketMetrics.seriesCardinality, - globalBucketMetrics.measureCardinality, - } -} - -func (s *Store) IndexBytes() int { - // Build index set to work on. - is := IndexSet{Indexes: make([]Index, 0, len(s.shardIDs()))} - s.mu.RLock() - for _, sid := range s.shardIDs() { - shard, ok := s.shards[sid] - if !ok { - continue - } - - if is.SeriesFile == nil { - is.SeriesFile = shard.sfile - } - is.Indexes = append(is.Indexes, shard.index) - } - s.mu.RUnlock() - - var b int - for _, idx := range is.Indexes { - b += idx.Bytes() - } - - return b -} - -// Path returns the store's root path. -func (s *Store) Path() string { return s.path } - -// Open initializes the store, creating all necessary directories, loading all -// shards as well as initializing periodic maintenance of them. -func (s *Store) Open(ctx context.Context) error { - s.mu.Lock() - defer s.mu.Unlock() - - if s.opened { - // Already open - return nil - } - - s.closing = make(chan struct{}) - s.shards = map[uint64]*Shard{} - - s.Logger.Info("Using data dir", zap.String("path", s.Path())) - - // Create directory. - if err := os.MkdirAll(s.path, 0777); err != nil { - return err - } - - if err := s.loadShards(ctx); err != nil { - return err - } - - s.opened = true - - if !s.EngineOptions.MonitorDisabled { - s.wg.Add(1) - go func() { - s.wg.Done() - s.monitorShards() - }() - } - - if !s.EngineOptions.MetricsDisabled { - s.wg.Add(1) - go func() { - s.wg.Done() - s.collectMetrics() - }() - } - - return nil -} - -func (s *Store) loadShards(ctx context.Context) error { - // res holds the result from opening each shard in a goroutine - type res struct { - s *Shard - err error - } - - // Limit the number of concurrent TSM files to be opened to the number of cores. - s.EngineOptions.OpenLimiter = limiter.NewFixed(runtime.GOMAXPROCS(0)) - - // Setup a shared limiter for compactions - lim := s.EngineOptions.Config.MaxConcurrentCompactions - if lim == 0 { - lim = runtime.GOMAXPROCS(0) / 2 // Default to 50% of cores for compactions - - if lim < 1 { - lim = 1 - } - } - - // Don't allow more compactions to run than cores. - if lim > runtime.GOMAXPROCS(0) { - lim = runtime.GOMAXPROCS(0) - } - - s.EngineOptions.CompactionLimiter = limiter.NewFixed(lim) - - compactionSettings := []zapcore.Field{zap.Int("max_concurrent_compactions", lim)} - throughput := int(s.EngineOptions.Config.CompactThroughput) - throughputBurst := int(s.EngineOptions.Config.CompactThroughputBurst) - if throughput > 0 { - if throughputBurst < throughput { - throughputBurst = throughput - } - - compactionSettings = append( - compactionSettings, - zap.Int("throughput_bytes_per_second", throughput), - zap.Int("throughput_bytes_per_second_burst", throughputBurst), - ) - s.EngineOptions.CompactionThroughputLimiter = limiter.NewRate(throughput, throughputBurst) - } else { - compactionSettings = append( - compactionSettings, - zap.String("throughput_bytes_per_second", "unlimited"), - zap.String("throughput_bytes_per_second_burst", "unlimited"), - ) - } - - s.Logger.Info("Compaction settings", compactionSettings...) - - log, logEnd := logger.NewOperation(context.TODO(), s.Logger, "Open store", "tsdb_open") - defer logEnd() - - t := limiter.NewFixed(runtime.GOMAXPROCS(0)) - resC := make(chan *res) - var n int - - // Determine how many shards we need to open by checking the store path. - dbDirs, err := os.ReadDir(s.path) - if err != nil { - return err - } - - for _, db := range dbDirs { - dbPath := filepath.Join(s.path, db.Name()) - if !db.IsDir() { - log.Info("Skipping database dir", zap.String("name", db.Name()), zap.String("reason", "not a directory")) - continue - } - - if s.EngineOptions.DatabaseFilter != nil && !s.EngineOptions.DatabaseFilter(db.Name()) { - log.Info("Skipping database dir", logger.Database(db.Name()), zap.String("reason", "failed database filter")) - continue - } - - // Load series file. - sfile, err := s.openSeriesFile(db.Name()) - if err != nil { - return err - } - - // Load each retention policy within the database directory. - rpDirs, err := os.ReadDir(dbPath) - if err != nil { - return err - } - - for _, rp := range rpDirs { - rpPath := filepath.Join(s.path, db.Name(), rp.Name()) - if !rp.IsDir() { - log.Info("Skipping retention policy dir", zap.String("name", rp.Name()), zap.String("reason", "not a directory")) - continue - } - - // The .series directory is not a retention policy. - if rp.Name() == SeriesFileDirectory { - continue - } - - if s.EngineOptions.RetentionPolicyFilter != nil && !s.EngineOptions.RetentionPolicyFilter(db.Name(), rp.Name()) { - log.Info("Skipping retention policy dir", logger.RetentionPolicy(rp.Name()), zap.String("reason", "failed retention policy filter")) - continue - } - - shardDirs, err := os.ReadDir(rpPath) - if err != nil { - return err - } - - for _, sh := range shardDirs { - // Series file should not be in a retention policy but skip just in case. - if sh.Name() == SeriesFileDirectory { - log.Warn("Skipping series file in retention policy dir", zap.String("path", filepath.Join(s.path, db.Name(), rp.Name()))) - continue - } - - n++ - go func(db, rp, sh string) { - path := filepath.Join(s.path, db, rp, sh) - walPath := filepath.Join(s.EngineOptions.Config.WALDir, db, rp, sh) - - if err := t.Take(ctx); err != nil { - log.Error("failed to open shard at path", zap.String("path", path), zap.Error(err)) - resC <- &res{err: fmt.Errorf("failed to open shard at path %q: %w", path, err)} - return - } - defer t.Release() - - start := time.Now() - - // Shard file names are numeric shardIDs - shardID, err := strconv.ParseUint(sh, 10, 64) - if err != nil { - log.Error("invalid shard ID found at path", zap.String("path", path)) - resC <- &res{err: fmt.Errorf("%s is not a valid ID. Skipping shard", sh)} - return - } - - if s.EngineOptions.ShardFilter != nil && !s.EngineOptions.ShardFilter(db, rp, shardID) { - log.Warn("skipping shard", zap.String("path", path), logger.Shard(shardID)) - resC <- &res{} - return - } - - // Copy options and assign shared index. - opt := s.EngineOptions - - // Provide an implementation of the ShardIDSets - opt.SeriesIDSets = shardSet{store: s, db: db} - - // Open engine. - shard := NewShard(shardID, path, walPath, sfile, opt) - - // Disable compactions, writes and queries until all shards are loaded - shard.EnableOnOpen = false - shard.CompactionDisabled = s.EngineOptions.CompactionDisabled - shard.WithLogger(s.baseLogger) - - err = s.OpenShard(ctx, shard, false) - if err != nil { - log.Error("Failed to open shard", logger.Shard(shardID), zap.Error(err)) - resC <- &res{err: fmt.Errorf("failed to open shard: %d: %s", shardID, err)} - return - } - - resC <- &res{s: shard} - log.Info("Opened shard", zap.String("index_version", shard.IndexType()), zap.String("path", path), zap.Duration("duration", time.Since(start))) - }(db.Name(), rp.Name(), sh.Name()) - } - } - } - - // Gather results of opening shards concurrently, keeping track of how - // many databases we are managing. - for i := 0; i < n; i++ { - res := <-resC - if res.s == nil || res.err != nil { - continue - } - s.shards[res.s.id] = res.s - s.epochs[res.s.id] = newEpochTracker() - if _, ok := s.databases[res.s.database]; !ok { - s.databases[res.s.database] = new(databaseState) - } - s.databases[res.s.database].addIndexType(res.s.IndexType()) - } - close(resC) - - // Check if any databases are running multiple index types. - for db, state := range s.databases { - if state.hasMultipleIndexTypes() { - var fields []zapcore.Field - for idx, cnt := range state.indexTypes { - fields = append(fields, zap.Int(fmt.Sprintf("%s_count", idx), cnt)) - } - s.Logger.Warn("Mixed shard index types", append(fields, logger.Database(db))...) - } - } - - // Enable all shards - for _, sh := range s.shards { - sh.SetEnabled(true) - if isIdle, _ := sh.IsIdle(); isIdle { - if err := sh.Free(); err != nil { - return err - } - } - } - - return nil -} - -// Close closes the store and all associated shards. After calling Close accessing -// shards through the Store will result in ErrStoreClosed being returned. -func (s *Store) Close() error { - s.mu.Lock() - if s.opened { - close(s.closing) - } - s.mu.Unlock() - - s.wg.Wait() - // No other goroutines accessing the store, so no need for a Lock. - - // Close all the shards in parallel. - if err := s.walkShards(s.shardsSlice(), func(sh *Shard) error { - return sh.Close() - }); err != nil { - return err - } - - s.mu.Lock() - for _, sfile := range s.sfiles { - // Close out the series files. - if err := sfile.Close(); err != nil { - s.mu.Unlock() - return err - } - } - - s.databases = make(map[string]*databaseState) - s.sfiles = map[string]*SeriesFile{} - s.pendingShardDeletes = make(map[uint64]struct{}) - s.shards = nil - s.opened = false // Store may now be opened again. - s.mu.Unlock() - return nil -} - -// epochsForShards returns a copy of the epoch trackers only including what is necessary -// for the provided shards. Must be called under the lock. -func (s *Store) epochsForShards(shards []*Shard) map[uint64]*epochTracker { - out := make(map[uint64]*epochTracker) - for _, sh := range shards { - out[sh.id] = s.epochs[sh.id] - } - return out -} - -// openSeriesFile either returns or creates a series file for the provided -// database. It must be called under a full lock. -func (s *Store) openSeriesFile(database string) (*SeriesFile, error) { - if sfile := s.sfiles[database]; sfile != nil { - return sfile, nil - } - - sfile := NewSeriesFile(filepath.Join(s.path, database, SeriesFileDirectory)) - sfile.WithMaxCompactionConcurrency(s.EngineOptions.Config.SeriesFileMaxConcurrentSnapshotCompactions) - sfile.Logger = s.baseLogger - if err := sfile.Open(); err != nil { - return nil, err - } - s.sfiles[database] = sfile - return sfile, nil -} - -func (s *Store) SeriesFile(database string) *SeriesFile { - return s.seriesFile(database) -} - -func (s *Store) seriesFile(database string) *SeriesFile { - s.mu.RLock() - defer s.mu.RUnlock() - return s.sfiles[database] -} - -// Shard returns a shard by id. -func (s *Store) Shard(id uint64) *Shard { - s.mu.RLock() - defer s.mu.RUnlock() - sh, ok := s.shards[id] - if !ok { - return nil - } - return sh -} - -type ErrPreviousShardFail struct { - error -} - -func (e ErrPreviousShardFail) Unwrap() error { - return e.error -} - -func (e ErrPreviousShardFail) Is(err error) bool { - _, sOk := err.(ErrPreviousShardFail) - _, pOk := err.(*ErrPreviousShardFail) - return sOk || pOk -} - -func (e ErrPreviousShardFail) Error() string { - return e.error.Error() -} - -func (s *Store) OpenShard(ctx context.Context, sh *Shard, force bool) error { - if sh == nil { - return errors.New("cannot open nil shard") - } - oldErr, bad := s.badShards.shardError(sh.ID()) - if force || !bad { - err := sh.Open(ctx) - s.badShards.setShardOpenError(sh.ID(), err) - return err - } else { - return oldErr - } -} - -func (s *Store) SetShardOpenErrorForTest(shardID uint64, err error) { - s.badShards.setShardOpenError(shardID, err) -} - -// Shards returns a list of shards by id. -func (s *Store) Shards(ids []uint64) []*Shard { - s.mu.RLock() - defer s.mu.RUnlock() - a := make([]*Shard, 0, len(ids)) - for _, id := range ids { - sh, ok := s.shards[id] - if !ok { - continue - } - a = append(a, sh) - } - return a -} - -// ShardGroup returns a ShardGroup with a list of shards by id. -func (s *Store) ShardGroup(ids []uint64) ShardGroup { - return Shards(s.Shards(ids)) -} - -// ShardN returns the number of shards in the store. -func (s *Store) ShardN() int { - s.mu.RLock() - defer s.mu.RUnlock() - return len(s.shards) -} - -// ShardDigest returns a digest of the shard with the specified ID. -func (s *Store) ShardDigest(id uint64) (io.ReadCloser, int64, error) { - sh := s.Shard(id) - if sh == nil { - return nil, 0, ErrShardNotFound - } - - readCloser, size, _, err := sh.Digest() - return readCloser, size, err -} - -// CreateShard creates a shard with the given id and retention policy on a database. -func (s *Store) CreateShard(ctx context.Context, database, retentionPolicy string, shardID uint64, enabled bool) error { - s.mu.Lock() - defer s.mu.Unlock() - - select { - case <-s.closing: - return ErrStoreClosed - default: - } - - // Shard already exists. - if _, ok := s.shards[shardID]; ok { - return nil - } - - // Shard may be undergoing a pending deletion. While the shard can be - // recreated, it must wait for the pending delete to finish. - if _, ok := s.pendingShardDeletes[shardID]; ok { - return ErrShardDeletion - } - - // Create the db and retention policy directories if they don't exist. - if err := os.MkdirAll(filepath.Join(s.path, database, retentionPolicy), 0700); err != nil { - return err - } - - // Create the WAL directory. - walPath := filepath.Join(s.EngineOptions.Config.WALDir, database, retentionPolicy, fmt.Sprintf("%d", shardID)) - if err := os.MkdirAll(walPath, 0700); err != nil { - return err - } - - // Retrieve database series file. - sfile, err := s.openSeriesFile(database) - if err != nil { - return err - } - - // Copy index options and pass in shared index. - opt := s.EngineOptions - opt.SeriesIDSets = shardSet{store: s, db: database} - - path := filepath.Join(s.path, database, retentionPolicy, strconv.FormatUint(shardID, 10)) - shard := NewShard(shardID, path, walPath, sfile, opt) - shard.WithLogger(s.baseLogger) - shard.EnableOnOpen = enabled - - if err := s.OpenShard(ctx, shard, false); err != nil { - return err - } - - s.shards[shardID] = shard - s.epochs[shardID] = newEpochTracker() - if _, ok := s.databases[database]; !ok { - s.databases[database] = new(databaseState) - } - s.databases[database].addIndexType(shard.IndexType()) - if state := s.databases[database]; state.hasMultipleIndexTypes() { - var fields []zapcore.Field - for idx, cnt := range state.indexTypes { - fields = append(fields, zap.Int(fmt.Sprintf("%s_count", idx), cnt)) - } - s.Logger.Warn("Mixed shard index types", append(fields, logger.Database(database))...) - } - - return nil -} - -// CreateShardSnapShot will create a hard link to the underlying shard and return a path. -// The caller is responsible for cleaning up (removing) the file path returned. -func (s *Store) CreateShardSnapshot(id uint64, skipCacheOk bool) (string, error) { - sh := s.Shard(id) - if sh == nil { - return "", ErrShardNotFound - } - - return sh.CreateSnapshot(skipCacheOk) -} - -// SetShardEnabled enables or disables a shard for read and writes. -func (s *Store) SetShardEnabled(shardID uint64, enabled bool) error { - sh := s.Shard(shardID) - if sh == nil { - return ErrShardNotFound - } - sh.SetEnabled(enabled) - return nil -} - -// DeleteShards removes all shards from disk. -func (s *Store) DeleteShards() error { - for _, id := range s.ShardIDs() { - if err := s.DeleteShard(id); err != nil { - return err - } - } - return nil -} - -// DeleteShard removes a shard from disk. -func (s *Store) DeleteShard(shardID uint64) error { - sh := s.Shard(shardID) - if sh == nil { - return nil - } - - // Remove the shard from Store so it's not returned to callers requesting - // shards. Also mark that this shard is currently being deleted in a separate - // map so that we do not have to retain the global store lock while deleting - // files. - s.mu.Lock() - if _, ok := s.pendingShardDeletes[shardID]; ok { - // We are already being deleted? This is possible if delete shard - // was called twice in sequence before the shard could be removed from - // the mapping. - // This is not an error because deleting a shard twice is not an error. - s.mu.Unlock() - return nil - } - delete(s.shards, shardID) - delete(s.epochs, shardID) - s.pendingShardDeletes[shardID] = struct{}{} - - db := sh.Database() - // Determine if the shard contained any series that are not present in any - // other shards in the database. - shards := s.filterShards(byDatabase(db)) - s.mu.Unlock() - - // Ensure the pending deletion flag is cleared on exit. - defer func() { - s.mu.Lock() - defer s.mu.Unlock() - delete(s.pendingShardDeletes, shardID) - s.databases[db].removeIndexType(sh.IndexType()) - }() - - // Get the shard's local bitset of series IDs. - index, err := sh.Index() - if err != nil { - return err - } - - ss := index.SeriesIDSet() - - err = s.walkShards(shards, func(sh *Shard) error { - index, err := sh.Index() - if err != nil { - return err - } - - ss.Diff(index.SeriesIDSet()) - return nil - }) - - if err != nil { - s.Logger.Error("error walking shards during DeleteShard operation", zap.Error(err)) - } - - // Remove any remaining series in the set from the series file, as they don't - // exist in any of the database's remaining shards. - if ss.Cardinality() > 0 { - sfile := s.seriesFile(db) - if sfile != nil { - ss.ForEach(func(id uint64) { - err = sfile.DeleteSeriesID(id) - if err != nil { - s.Logger.Error("error deleting series id during DeleteShard operation", zap.Uint64("id", id), zap.Error(err)) - } - }) - } - - } - - // Close the shard. - if err := sh.Close(); err != nil { - return err - } - - // Remove the on-disk shard data. - if err := os.RemoveAll(sh.path); err != nil { - return err - } - - return os.RemoveAll(sh.walPath) -} - -// DeleteDatabase will close all shards associated with a database and remove the directory and files from disk. -// -// Returns nil if no database exists -func (s *Store) DeleteDatabase(name string) error { - s.mu.RLock() - if _, ok := s.databases[name]; !ok { - s.mu.RUnlock() - // no files locally, so nothing to do - return nil - } - shards := s.filterShards(func(sh *Shard) bool { - return sh.database == name - }) - s.mu.RUnlock() - - if err := s.walkShards(shards, func(sh *Shard) error { - if sh.database != name { - return nil - } - - return sh.Close() - }); err != nil { - return err - } - - dbPath := filepath.Clean(filepath.Join(s.path, name)) - - s.mu.Lock() - defer s.mu.Unlock() - - sfile := s.sfiles[name] - delete(s.sfiles, name) - - // Close series file. - if sfile != nil { - if err := sfile.Close(); err != nil { - return err - } - } - - // extra sanity check to make sure that even if someone named their database "../.." - // that we don't delete everything because of it, they'll just have extra files forever - if filepath.Clean(s.path) != filepath.Dir(dbPath) { - return fmt.Errorf("invalid database directory location for database '%s': %s", name, dbPath) - } - - if err := os.RemoveAll(dbPath); err != nil { - return err - } - if err := os.RemoveAll(filepath.Join(s.EngineOptions.Config.WALDir, name)); err != nil { - return err - } - - for _, sh := range shards { - delete(s.shards, sh.id) - delete(s.epochs, sh.id) - } - - // Remove database from store list of databases - delete(s.databases, name) - - return nil -} - -// DeleteRetentionPolicy will close all shards associated with the -// provided retention policy, remove the retention policy directories on -// both the DB and WAL, and remove all shard files from disk. -func (s *Store) DeleteRetentionPolicy(database, name string) error { - s.mu.RLock() - if _, ok := s.databases[database]; !ok { - s.mu.RUnlock() - // unknown database, nothing to do - return nil - } - shards := s.filterShards(func(sh *Shard) bool { - return sh.database == database && sh.retentionPolicy == name - }) - s.mu.RUnlock() - - // Close and delete all shards under the retention policy on the - // database. - if err := s.walkShards(shards, func(sh *Shard) error { - if sh.database != database || sh.retentionPolicy != name { - return nil - } - - return sh.Close() - }); err != nil { - return err - } - - // Remove the retention policy folder. - rpPath := filepath.Clean(filepath.Join(s.path, database, name)) - - // ensure Store's path is the grandparent of the retention policy - if filepath.Clean(s.path) != filepath.Dir(filepath.Dir(rpPath)) { - return fmt.Errorf("invalid path for database '%s', retention policy '%s': %s", database, name, rpPath) - } - - // Remove the retention policy folder. - if err := os.RemoveAll(filepath.Join(s.path, database, name)); err != nil { - return err - } - - // Remove the retention policy folder from the WAL. - if err := os.RemoveAll(filepath.Join(s.EngineOptions.Config.WALDir, database, name)); err != nil { - return err - } - - s.mu.Lock() - state := s.databases[database] - for _, sh := range shards { - delete(s.shards, sh.id) - state.removeIndexType(sh.IndexType()) - } - s.mu.Unlock() - return nil -} - -// DeleteMeasurement removes a measurement and all associated series from a database. -func (s *Store) DeleteMeasurement(ctx context.Context, database, name string) error { - s.mu.RLock() - if s.databases[database].hasMultipleIndexTypes() { - s.mu.RUnlock() - return ErrMultipleIndexTypes - } - shards := s.filterShards(byDatabase(database)) - epochs := s.epochsForShards(shards) - s.mu.RUnlock() - - // Limit to 1 delete for each shard since expanding the measurement into the list - // of series keys can be very memory intensive if run concurrently. - limit := limiter.NewFixed(1) - return s.walkShards(shards, func(sh *Shard) error { - if err := limit.Take(ctx); err != nil { - return err - } - defer limit.Release() - - // install our guard and wait for any prior deletes to finish. the - // guard ensures future deletes that could conflict wait for us. - guard := newGuard(influxql.MinTime, influxql.MaxTime, []string{name}, nil) - waiter := epochs[sh.id].WaitDelete(guard) - waiter.Wait() - defer waiter.Done() - - return sh.DeleteMeasurement(ctx, []byte(name)) - }) -} - -// filterShards returns a slice of shards where fn returns true -// for the shard. If the provided predicate is nil then all shards are returned. -// filterShards should be called under a lock. -func (s *Store) filterShards(fn func(sh *Shard) bool) []*Shard { - var shards []*Shard - if fn == nil { - shards = make([]*Shard, 0, len(s.shards)) - fn = func(*Shard) bool { return true } - } else { - shards = make([]*Shard, 0) - } - - for _, sh := range s.shards { - if fn(sh) { - shards = append(shards, sh) - } - } - return shards -} - -// byDatabase provides a predicate for filterShards that matches on the name of -// the database passed in. -func byDatabase(name string) func(sh *Shard) bool { - return func(sh *Shard) bool { - return sh.database == name - } -} - -// walkShards apply a function to each shard in parallel. fn must be safe for -// concurrent use. If any of the functions return an error, the first error is -// returned. -func (s *Store) walkShards(shards []*Shard, fn func(sh *Shard) error) error { - // struct to hold the result of opening each reader in a goroutine - type res struct { - err error - } - - resC := make(chan res) - var n int - - for _, sh := range shards { - n++ - - go func(sh *Shard) { - if err := fn(sh); err != nil { - resC <- res{err: fmt.Errorf("shard %d: %s", sh.id, err)} - return - } - - resC <- res{} - }(sh) - } - - var err error - for i := 0; i < n; i++ { - res := <-resC - if res.err != nil { - err = res.err - } - } - close(resC) - return err -} - -// ShardIDs returns a slice of all ShardIDs under management. -func (s *Store) ShardIDs() []uint64 { - s.mu.RLock() - defer s.mu.RUnlock() - return s.shardIDs() -} - -func (s *Store) shardIDs() []uint64 { - a := make([]uint64, 0, len(s.shards)) - for shardID := range s.shards { - a = append(a, shardID) - } - return a -} - -// shardsSlice returns an ordered list of shards. -func (s *Store) shardsSlice() []*Shard { - a := make([]*Shard, 0, len(s.shards)) - for _, sh := range s.shards { - a = append(a, sh) - } - sort.Sort(Shards(a)) - return a -} - -// Databases returns the names of all databases managed by the store. -func (s *Store) Databases() []string { - s.mu.RLock() - defer s.mu.RUnlock() - - databases := make([]string, 0, len(s.databases)) - for k := range s.databases { - databases = append(databases, k) - } - return databases -} - -// DiskSize returns the size of all the shard files in bytes. -// This size does not include the WAL size. -func (s *Store) DiskSize() (int64, error) { - var size int64 - - s.mu.RLock() - allShards := s.filterShards(nil) - s.mu.RUnlock() - - for _, sh := range allShards { - sz, err := sh.DiskSize() - if err != nil { - return 0, err - } - size += sz - } - return size, nil -} - -// sketchesForDatabase returns merged sketches for the provided database, by -// walking each shard in the database and merging the sketches found there. -func (s *Store) sketchesForDatabase(dbName string, getSketches func(*Shard) (estimator.Sketch, estimator.Sketch, error)) (estimator.Sketch, estimator.Sketch, error) { - var ( - ss estimator.Sketch // Sketch estimating number of items. - ts estimator.Sketch // Sketch estimating number of tombstoned items. - ) - - s.mu.RLock() - shards := s.filterShards(byDatabase(dbName)) - s.mu.RUnlock() - - // Never return nil sketches. In the case that db exists but no data written - // return empty sketches. - if len(shards) == 0 { - ss, ts = hll.NewDefaultPlus(), hll.NewDefaultPlus() - } - - // Iterate over all shards for the database and combine all of the sketches. - for _, shard := range shards { - s, t, err := getSketches(shard) - if err != nil { - return nil, nil, err - } - - if ss == nil { - ss, ts = s, t - } else if err = ss.Merge(s); err != nil { - return nil, nil, err - } else if err = ts.Merge(t); err != nil { - return nil, nil, err - } - } - return ss, ts, nil -} - -// SeriesCardinality returns the exact series cardinality for the provided -// database. -// -// Cardinality is calculated exactly by unioning all shards' bitsets of series -// IDs. The result of this method cannot be combined with any other results. -func (s *Store) SeriesCardinality(ctx context.Context, database string) (int64, error) { - s.mu.RLock() - shards := s.filterShards(byDatabase(database)) - s.mu.RUnlock() - - ss, err := s.SeriesCardinalityFromShards(ctx, shards) - if err != nil { - return 0, err - } - - return int64(ss.Cardinality()), nil -} - -func (s *Store) SeriesCardinalityFromShards(ctx context.Context, shards []*Shard) (*SeriesIDSet, error) { - var setMu sync.Mutex - others := make([]*SeriesIDSet, 0, len(shards)) - - err := s.walkShards(shards, func(sh *Shard) error { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - index, err := sh.Index() - if err != nil { - return err - } - - seriesIDs := index.SeriesIDSet() - setMu.Lock() - others = append(others, seriesIDs) - setMu.Unlock() - - return nil - }) - if err != nil { - return nil, err - } - - ss := NewSeriesIDSet() - ss.Merge(others...) - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - return ss, nil -} - -// SeriesSketches returns the sketches associated with the series data in all -// the shards in the provided database. -// -// The returned sketches can be combined with other sketches to provide an -// estimation across distributed databases. -func (s *Store) SeriesSketches(ctx context.Context, database string) (estimator.Sketch, estimator.Sketch, error) { - return s.sketchesForDatabase(database, func(sh *Shard) (estimator.Sketch, estimator.Sketch, error) { - select { - case <-ctx.Done(): - return nil, nil, ctx.Err() - default: - } - if sh == nil { - return nil, nil, errors.New("shard nil, can't get cardinality") - } - return sh.SeriesSketches() - }) -} - -// MeasurementsCardinality returns an estimation of the measurement cardinality -// for the provided database. -// -// Cardinality is calculated using a sketch-based estimation. The result of this -// method cannot be combined with any other results. -func (s *Store) MeasurementsCardinality(ctx context.Context, database string) (int64, error) { - ss, ts, err := s.MeasurementsSketches(ctx, database) - - if err != nil { - return 0, err - } - mc := int64(ss.Count() - ts.Count()) - if mc < 0 { - mc = 0 - } - return mc, nil -} - -// MeasurementsSketches returns the sketches associated with the measurement -// data in all the shards in the provided database. -// -// The returned sketches can be combined with other sketches to provide an -// estimation across distributed databases. -func (s *Store) MeasurementsSketches(ctx context.Context, database string) (estimator.Sketch, estimator.Sketch, error) { - return s.sketchesForDatabase(database, func(sh *Shard) (estimator.Sketch, estimator.Sketch, error) { - // every iteration, check for timeout. - select { - case <-ctx.Done(): - return nil, nil, ctx.Err() - default: - } - if sh == nil { - return nil, nil, errors.New("shard nil, can't get cardinality") - } - return sh.MeasurementsSketches() - }) -} - -// BackupShard will get the shard and have the engine backup since the passed in -// time to the writer. -func (s *Store) BackupShard(id uint64, since time.Time, w io.Writer) error { - shard := s.Shard(id) - if shard == nil { - return &errors2.Error{ - Code: errors2.ENotFound, - Msg: fmt.Sprintf("shard %d not found", id), - } - } - - path, err := relativePath(s.path, shard.path) - if err != nil { - return err - } - - return shard.Backup(w, path, since) -} - -func (s *Store) ExportShard(id uint64, start time.Time, end time.Time, w io.Writer) error { - shard := s.Shard(id) - if shard == nil { - return &errors2.Error{ - Code: errors2.ENotFound, - Msg: fmt.Sprintf("shard %d not found", id), - } - } - - path, err := relativePath(s.path, shard.path) - if err != nil { - return err - } - - return shard.Export(w, path, start, end) -} - -// RestoreShard restores a backup from r to a given shard. -// This will only overwrite files included in the backup. -func (s *Store) RestoreShard(ctx context.Context, id uint64, r io.Reader) error { - shard := s.Shard(id) - if shard == nil { - return fmt.Errorf("shard %d doesn't exist on this server", id) - } - - path, err := relativePath(s.path, shard.path) - if err != nil { - return err - } - - return shard.Restore(ctx, r, path) -} - -// ImportShard imports the contents of r to a given shard. -// All files in the backup are added as new files which may -// cause duplicated data to occur requiring more expensive -// compactions. -func (s *Store) ImportShard(id uint64, r io.Reader) error { - shard := s.Shard(id) - if shard == nil { - return fmt.Errorf("shard %d doesn't exist on this server", id) - } - - path, err := relativePath(s.path, shard.path) - if err != nil { - return err - } - - return shard.Import(r, path) -} - -// ShardRelativePath will return the relative path to the shard, i.e., -// //. -func (s *Store) ShardRelativePath(id uint64) (string, error) { - shard := s.Shard(id) - if shard == nil { - return "", fmt.Errorf("shard %d doesn't exist on this server", id) - } - return relativePath(s.path, shard.path) -} - -// DeleteSeries loops through the local shards and deletes the series data for -// the passed in series keys. -func (s *Store) DeleteSeriesWithPredicate(ctx context.Context, database string, min, max int64, pred influxdb.Predicate, measurement influxql.Expr) error { - s.mu.RLock() - if s.databases[database].hasMultipleIndexTypes() { - s.mu.RUnlock() - return ErrMultipleIndexTypes - } - sfile := s.sfiles[database] - if sfile == nil { - s.mu.RUnlock() - // No series file means nothing has been written to this DB and thus nothing to delete. - return nil - } - shards := s.filterShards(byDatabase(database)) - epochs := s.epochsForShards(shards) - s.mu.RUnlock() - - // Limit to 1 delete for each shard since expanding the measurement into the list - // of series keys can be very memory intensive if run concurrently. - limit := limiter.NewFixed(1) - - return s.walkShards(shards, func(sh *Shard) (err error) { - if err := limit.Take(ctx); err != nil { - return err - } - defer limit.Release() - - // install our guard and wait for any prior deletes to finish. the - // guard ensures future deletes that could conflict wait for us. - waiter := epochs[sh.id].WaitDelete(newGuard(min, max, nil, nil)) - waiter.Wait() - defer waiter.Done() - - index, err := sh.Index() - if err != nil { - return err - } - - measurementName := make([]byte, 0) - - if measurement != nil { - if m, ok := measurement.(*influxql.BinaryExpr); ok { - rhs, ok := m.RHS.(*influxql.VarRef) - if ok { - measurementName = []byte(rhs.Val) - exists, err := sh.MeasurementExists(measurementName) - if err != nil { - return err - } - if !exists { - return nil - } - } - } - } - - // Find matching series keys for each measurement. - mitr, err := index.MeasurementIterator() - if err != nil { - return err - } - defer errors3.Capture(&err, mitr.Close)() - - deleteSeries := func(mm []byte) error { - sitr, err := index.MeasurementSeriesIDIterator(mm) - if err != nil { - return err - } else if sitr == nil { - return nil - } - defer errors3.Capture(&err, sitr.Close)() - - itr := NewSeriesIteratorAdapter(sfile, NewPredicateSeriesIDIterator(sitr, sfile, pred)) - return sh.DeleteSeriesRange(ctx, itr, min, max) - } - - for { - mm, err := mitr.Next() - if err != nil { - return err - } else if mm == nil { - break - } - - // If we are deleting within a measurement and have found a match, we can return after the delete. - if measurementName != nil && bytes.Equal(mm, measurementName) { - return deleteSeries(mm) - } else { - err := deleteSeries(mm) - if err != nil { - return err - } - } - } - - return nil - }) -} - -// DeleteSeries loops through the local shards and deletes the series data for -// the passed in series keys. -func (s *Store) DeleteSeries(ctx context.Context, database string, sources []influxql.Source, condition influxql.Expr) error { - // Expand regex expressions in the FROM clause. - a, err := s.ExpandSources(sources) - if err != nil { - return err - } else if len(sources) > 0 && len(a) == 0 { - return nil - } - sources = a - - // Determine deletion time range. - condition, timeRange, err := influxql.ConditionExpr(condition, nil) - if err != nil { - return err - } - - var min, max int64 - if !timeRange.Min.IsZero() { - min = timeRange.Min.UnixNano() - } else { - min = influxql.MinTime - } - if !timeRange.Max.IsZero() { - max = timeRange.Max.UnixNano() - } else { - max = influxql.MaxTime - } - - s.mu.RLock() - if s.databases[database].hasMultipleIndexTypes() { - s.mu.RUnlock() - return ErrMultipleIndexTypes - } - sfile := s.sfiles[database] - if sfile == nil { - s.mu.RUnlock() - // No series file means nothing has been written to this DB and thus nothing to delete. - return nil - } - shards := s.filterShards(byDatabase(database)) - epochs := s.epochsForShards(shards) - s.mu.RUnlock() - - // Limit to 1 delete for each shard since expanding the measurement into the list - // of series keys can be very memory intensive if run concurrently. - limit := limiter.NewFixed(1) - - return s.walkShards(shards, func(sh *Shard) error { - // Determine list of measurements from sources. - // Use all measurements if no FROM clause was provided. - var names []string - if len(sources) > 0 { - for _, source := range sources { - names = append(names, source.(*influxql.Measurement).Name) - } - } else { - if err := sh.ForEachMeasurementName(func(name []byte) error { - names = append(names, string(name)) - return nil - }); err != nil { - return err - } - } - sort.Strings(names) - - if err := limit.Take(ctx); err != nil { - return err - } - defer limit.Release() - - // install our guard and wait for any prior deletes to finish. the - // guard ensures future deletes that could conflict wait for us. - waiter := epochs[sh.id].WaitDelete(newGuard(min, max, names, condition)) - waiter.Wait() - defer waiter.Done() - - index, err := sh.Index() - if err != nil { - return err - } - - indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: sfile} - // Find matching series keys for each measurement. - for _, name := range names { - itr, err := indexSet.MeasurementSeriesByExprIterator([]byte(name), condition) - if err != nil { - return err - } else if itr == nil { - continue - } - defer itr.Close() - if err := sh.DeleteSeriesRange(ctx, NewSeriesIteratorAdapter(sfile, itr), min, max); err != nil { - return err - } - - } - - return nil - }) -} - -// ExpandSources expands sources against all local shards. -func (s *Store) ExpandSources(sources influxql.Sources) (influxql.Sources, error) { - shards := func() Shards { - s.mu.RLock() - defer s.mu.RUnlock() - return Shards(s.shardsSlice()) - }() - return shards.ExpandSources(sources) -} - -// WriteToShard writes a list of points to a shard identified by its ID. -func (s *Store) WriteToShard(ctx context.Context, shardID uint64, points []models.Point) error { - s.mu.RLock() - - select { - case <-s.closing: - s.mu.RUnlock() - return ErrStoreClosed - default: - } - - sh := s.shards[shardID] - if sh == nil { - s.mu.RUnlock() - return ErrShardNotFound - } - - epoch := s.epochs[shardID] - - s.mu.RUnlock() - - // enter the epoch tracker - guards, gen := epoch.StartWrite() - defer epoch.EndWrite(gen) - - // wait for any guards before writing the points. - for _, guard := range guards { - if guard.Matches(points) { - guard.Wait() - } - } - - // Ensure snapshot compactions are enabled since the shard might have been cold - // and disabled by the monitor. - if isIdle, _ := sh.IsIdle(); isIdle { - sh.SetCompactionsEnabled(true) - } - - return sh.WritePoints(ctx, points) -} - -// MeasurementNames returns a slice of all measurements. Measurements accepts an -// optional condition expression. If cond is nil, then all measurements for the -// database will be returned. -func (s *Store) MeasurementNames(ctx context.Context, auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error) { - s.mu.RLock() - shards := s.filterShards(byDatabase(database)) - s.mu.RUnlock() - - sfile := s.seriesFile(database) - if sfile == nil { - return nil, nil - } - - // Build indexset. - is := IndexSet{Indexes: make([]Index, 0, len(shards)), SeriesFile: sfile} - for _, sh := range shards { - index, err := sh.Index() - if err != nil { - return nil, err - } - is.Indexes = append(is.Indexes, index) - } - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - return is.MeasurementNamesByExpr(auth, cond) -} - -type TagKeys struct { - Measurement string - Keys []string -} - -type TagKeysSlice []TagKeys - -func (a TagKeysSlice) Len() int { return len(a) } -func (a TagKeysSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a TagKeysSlice) Less(i, j int) bool { return a[i].Measurement < a[j].Measurement } - -// TagKeys returns the tag keys in the given database, matching the condition. -func (s *Store) TagKeys(ctx context.Context, auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]TagKeys, error) { - if len(shardIDs) == 0 { - return nil, nil - } - - // take out the _name = 'mymeasurement' clause from 'FROM' clause - measurementExpr, remainingExpr, err := influxql.PartitionExpr(influxql.CloneExpr(cond), func(e influxql.Expr) (bool, error) { - switch e := e.(type) { - case *influxql.BinaryExpr: - switch e.Op { - case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: - tag, ok := e.LHS.(*influxql.VarRef) - if ok && tag.Val == "_name" { - return true, nil - } - } - } - return false, nil - }) - if err != nil { - return nil, err - } - - // take out the _tagKey = 'mykey' clause from 'WITH KEY' clause - tagKeyExpr, filterExpr, err := influxql.PartitionExpr(remainingExpr, isTagKeyClause) - if err != nil { - return nil, err - } - if err = isBadQuoteTagValueClause(filterExpr); err != nil { - return nil, err - } - - // Get all the shards we're interested in. - is := IndexSet{Indexes: make([]Index, 0, len(shardIDs))} - s.mu.RLock() - for _, sid := range shardIDs { - shard, ok := s.shards[sid] - if !ok { - continue - } - - if is.SeriesFile == nil { - sfile, err := shard.SeriesFile() - if err != nil { - s.mu.RUnlock() - return nil, err - } - is.SeriesFile = sfile - } - - index, err := shard.Index() - if err != nil { - s.mu.RUnlock() - return nil, err - } - is.Indexes = append(is.Indexes, index) - } - s.mu.RUnlock() - - // Determine list of measurements. - names, err := is.MeasurementNamesByExpr(nil, measurementExpr) - if err != nil { - return nil, err - } - - // Iterate over each measurement. - var results []TagKeys - for _, name := range names { - - // Check for timeouts. - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - // Build keyset over all indexes for measurement. - tagKeySet, err := is.MeasurementTagKeysByExpr(name, tagKeyExpr) - if err != nil { - return nil, err - } else if len(tagKeySet) == 0 { - continue - } - - keys := make([]string, 0, len(tagKeySet)) - // If no tag value filter is present then all the tag keys can be returned - // If they have authorized series associated with them. - if filterExpr == nil { - for tagKey := range tagKeySet { - // check for timeouts - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - ok, err := is.TagKeyHasAuthorizedSeries(auth, []byte(name), []byte(tagKey)) - if err != nil { - return nil, err - } else if ok { - keys = append(keys, tagKey) - } - } - sort.Strings(keys) - - // Add to resultset. - results = append(results, TagKeys{ - Measurement: string(name), - Keys: keys, - }) - - continue - } - - // Tag filter provided so filter keys first. - - // Sort the tag keys. - for k := range tagKeySet { - keys = append(keys, k) - } - sort.Strings(keys) - - // Filter against tag values, skip if no values exist. - values, err := is.MeasurementTagKeyValuesByExpr(auth, name, keys, filterExpr, true) - if err != nil { - return nil, err - } - - // Filter final tag keys using the matching values. If a key has one or - // more matching values then it will be included in the final set. - finalKeys := keys[:0] // Use same backing array as keys to save allocation. - for i, k := range keys { - if len(values[i]) > 0 { - // Tag key k has one or more matching tag values. - finalKeys = append(finalKeys, k) - } - } - - // Add to resultset. - results = append(results, TagKeys{ - Measurement: string(name), - Keys: finalKeys, - }) - } - return results, nil -} - -type TagValues struct { - Measurement string - Values []KeyValue -} - -type TagValuesSlice []TagValues - -func (a TagValuesSlice) Len() int { return len(a) } -func (a TagValuesSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a TagValuesSlice) Less(i, j int) bool { return a[i].Measurement < a[j].Measurement } - -// tagValues is a temporary representation of a TagValues. Rather than allocating -// KeyValues as we build up a TagValues object, We hold off allocating KeyValues -// until we have merged multiple tagValues together. -type tagValues struct { - name []byte - keys []string - values [][]string -} - -// Is a slice of tagValues that can be sorted by measurement. -type tagValuesSlice []tagValues - -func (a tagValuesSlice) Len() int { return len(a) } -func (a tagValuesSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a tagValuesSlice) Less(i, j int) bool { return bytes.Compare(a[i].name, a[j].name) == -1 } - -func isTagKeyClause(e influxql.Expr) (bool, error) { - switch e := e.(type) { - case *influxql.BinaryExpr: - switch e.Op { - case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: - tag, ok := e.LHS.(*influxql.VarRef) - if ok && tag.Val == "_tagKey" { - return true, nil - } - case influxql.OR, influxql.AND: - ok1, err := isTagKeyClause(e.LHS) - if err != nil { - return false, err - } - ok2, err := isTagKeyClause(e.RHS) - if err != nil { - return false, err - } - return ok1 && ok2, nil - } - case *influxql.ParenExpr: - return isTagKeyClause(e.Expr) - } - return false, nil -} - -func isBadQuoteTagValueClause(e influxql.Expr) error { - switch e := e.(type) { - case *influxql.BinaryExpr: - switch e.Op { - case influxql.EQ, influxql.NEQ: - _, lOk := e.LHS.(*influxql.VarRef) - _, rOk := e.RHS.(*influxql.VarRef) - if lOk && rOk { - return fmt.Errorf("bad WHERE clause for metaquery; one term must be a string literal tag value within single quotes: %s", e.String()) - } - case influxql.OR, influxql.AND: - if err := isBadQuoteTagValueClause(e.LHS); err != nil { - return err - } else if err = isBadQuoteTagValueClause(e.RHS); err != nil { - return err - } else { - return nil - } - } - case *influxql.ParenExpr: - return isBadQuoteTagValueClause(e.Expr) - } - return nil -} - -// TagValues returns the tag keys and values for the provided shards, where the -// tag values satisfy the provided condition. -func (s *Store) TagValues(ctx context.Context, auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]TagValues, error) { - if len(shardIDs) == 0 { - return nil, nil - } - - if cond == nil { - return nil, errors.New("a condition is required") - } - - // take out the _name = 'mymeasurement' clause from 'FROM' clause - measurementExpr, remainingExpr, err := influxql.PartitionExpr(influxql.CloneExpr(cond), func(e influxql.Expr) (bool, error) { - switch e := e.(type) { - case *influxql.BinaryExpr: - switch e.Op { - case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: - tag, ok := e.LHS.(*influxql.VarRef) - if ok && tag.Val == "_name" { - return true, nil - } - } - } - return false, nil - }) - if err != nil { - return nil, err - } - - // take out the _tagKey = 'mykey' clause from 'WITH KEY' / 'WITH KEY IN' clause - tagKeyExpr, filterExpr, err := influxql.PartitionExpr(remainingExpr, isTagKeyClause) - if err != nil { - return nil, err - } - if err = isBadQuoteTagValueClause(filterExpr); err != nil { - return nil, err - } - // Build index set to work on. - is := IndexSet{Indexes: make([]Index, 0, len(shardIDs))} - s.mu.RLock() - for _, sid := range shardIDs { - shard, ok := s.shards[sid] - if !ok { - continue - } - - if is.SeriesFile == nil { - sfile, err := shard.SeriesFile() - if err != nil { - s.mu.RUnlock() - return nil, err - } - is.SeriesFile = sfile - } - - index, err := shard.Index() - if err != nil { - s.mu.RUnlock() - return nil, err - } - - is.Indexes = append(is.Indexes, index) - } - s.mu.RUnlock() - - var maxMeasurements int // Hint as to lower bound on number of measurements. - // names will be sorted by MeasurementNamesByExpr. - // Authorisation can be done later on, when series may have been filtered - // out by other conditions. - names, err := is.MeasurementNamesByExpr(nil, measurementExpr) - if err != nil { - return nil, err - } - - if len(names) > maxMeasurements { - maxMeasurements = len(names) - } - - // Stores each list of TagValues for each measurement. - allResults := make([]tagValues, 0, len(names)) - - // Iterate over each matching measurement in the shard. For each - // measurement we'll get the matching tag keys (e.g., when a WITH KEYS) - // statement is used, and we'll then use those to fetch all the relevant - // values from matching series. Series may be filtered using a WHERE - // filter. - for _, name := range names { - // check for timeouts - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - // Determine a list of keys from condition. - keySet, err := is.MeasurementTagKeysByExpr(name, tagKeyExpr) - if err != nil { - return nil, err - } - - if len(keySet) == 0 { - // No matching tag keys for this measurement - continue - } - - result := tagValues{ - name: name, - keys: make([]string, 0, len(keySet)), - } - - // Add the keys to the tagValues and sort them. - for k := range keySet { - result.keys = append(result.keys, k) - } - sort.Strings(result.keys) - - // get all the tag values for each key in the keyset. - // Each slice in the results contains the sorted values associated - // associated with each tag key for the measurement from the key set. - if result.values, err = is.MeasurementTagKeyValuesByExpr(auth, name, result.keys, filterExpr, true); err != nil { - return nil, err - } - - // remove any tag keys that didn't have any authorized values - j := 0 - for i := range result.keys { - if len(result.values[i]) == 0 { - continue - } - - result.keys[j] = result.keys[i] - result.values[j] = result.values[i] - j++ - } - result.keys = result.keys[:j] - result.values = result.values[:j] - - // only include result if there are keys with values - if len(result.keys) > 0 { - allResults = append(allResults, result) - } - } - - // Not sure this is necessary, should be pre-sorted - sort.Sort(tagValuesSlice(allResults)) - - result := make([]TagValues, 0, maxMeasurements) - for _, r := range allResults { - // check for timeouts - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - nextResult := makeTagValues(r) - if len(nextResult.Values) > 0 { - result = append(result, nextResult) - } - } - return result, nil -} - -func makeTagValues(tv tagValues) TagValues { - var result TagValues - result.Measurement = string(tv.name) - // TODO(edd): will be too small likely. Find a hint? - result.Values = make([]KeyValue, 0, len(tv.values)) - - for ki, key := range tv.keys { - for _, value := range tv.values[ki] { - result.Values = append(result.Values, KeyValue{Key: key, Value: value}) - } - } - return result -} - -func (s *Store) monitorShards() { - t := time.NewTicker(10 * time.Second) - defer t.Stop() - for { - select { - case <-s.closing: - return - case <-t.C: - s.mu.RLock() - for _, sh := range s.shards { - if isIdle, _ := sh.IsIdle(); isIdle { - if err := sh.Free(); err != nil { - s.Logger.Warn("Error while freeing cold shard resources", - zap.Error(err), - logger.Shard(sh.ID())) - } - } else { - sh.SetCompactionsEnabled(true) - } - } - s.mu.RUnlock() - } - } -} - -func (s *Store) collectMetrics() { - t := time.NewTicker(10 * time.Second) - defer t.Stop() - for { - select { - case <-s.closing: - return - case <-t.C: - s.CollectBucketMetrics() - } - } -} - -// KeyValue holds a string key and a string value. -type KeyValue struct { - Key, Value string -} - -// KeyValues is a sortable slice of KeyValue. -type KeyValues []KeyValue - -// Len implements sort.Interface. -func (a KeyValues) Len() int { return len(a) } - -// Swap implements sort.Interface. -func (a KeyValues) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// Less implements sort.Interface. Keys are compared before values. -func (a KeyValues) Less(i, j int) bool { - ki, kj := a[i].Key, a[j].Key - if ki == kj { - return a[i].Value < a[j].Value - } - return ki < kj -} - -// decodeStorePath extracts the database and retention policy names -// from a given shard or WAL path. -func decodeStorePath(shardOrWALPath string) (database, retentionPolicy string) { - // shardOrWALPath format: /maybe/absolute/base/then/:database/:retentionPolicy/:nameOfShardOrWAL - - // Discard the last part of the path (the shard name or the wal name). - path, _ := filepath.Split(filepath.Clean(shardOrWALPath)) - - // Extract the database and retention policy. - path, rp := filepath.Split(filepath.Clean(path)) - _, db := filepath.Split(filepath.Clean(path)) - return db, rp -} - -// relativePath will expand out the full paths passed in and return -// the relative shard path from the store -func relativePath(storePath, shardPath string) (string, error) { - path, err := filepath.Abs(storePath) - if err != nil { - return "", fmt.Errorf("store abs path: %s", err) - } - - fp, err := filepath.Abs(shardPath) - if err != nil { - return "", fmt.Errorf("file abs path: %s", err) - } - - name, err := filepath.Rel(path, fp) - if err != nil { - return "", fmt.Errorf("file rel path: %s", err) - } - - return name, nil -} - -type shardSet struct { - store *Store - db string -} - -func (s shardSet) ForEach(f func(ids *SeriesIDSet)) error { - s.store.mu.RLock() - shards := s.store.filterShards(byDatabase(s.db)) - s.store.mu.RUnlock() - - for _, sh := range shards { - idx, err := sh.Index() - if err != nil { - return err - } - - f(idx.SeriesIDSet()) - } - return nil -} diff --git a/tsdb/store_test.go b/tsdb/store_test.go deleted file mode 100644 index 89c8e0fc98b..00000000000 --- a/tsdb/store_test.go +++ /dev/null @@ -1,2515 +0,0 @@ -//lint:file-ignore SA2002 this is older code, and `go test` will panic if its really a problem. -package tsdb_test - -import ( - "bytes" - "context" - "errors" - "fmt" - "github.com/influxdata/influxdb/v2/predicate" - "math" - "math/rand" - "os" - "path/filepath" - "reflect" - "regexp" - "sort" - "strings" - "sync" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/internal" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/deep" - "github.com/influxdata/influxdb/v2/pkg/slices" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxql" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -// Ensure the store can delete a retention policy and all shards under -// it. -func TestStore_DeleteRetentionPolicy(t *testing.T) { - - test := func(t *testing.T, index string) { - s := MustOpenStore(t, index) - defer s.Close() - - // Create a new shard and verify that it exists. - if err := s.CreateShard(context.Background(), "db0", "rp0", 1, true); err != nil { - t.Fatal(err) - } else if sh := s.Shard(1); sh == nil { - t.Fatalf("expected shard") - } - - // Create a new shard under the same retention policy, and verify - // that it exists. - if err := s.CreateShard(context.Background(), "db0", "rp0", 2, true); err != nil { - t.Fatal(err) - } else if sh := s.Shard(2); sh == nil { - t.Fatalf("expected shard") - } - - // Create a new shard under a different retention policy, and - // verify that it exists. - if err := s.CreateShard(context.Background(), "db0", "rp1", 3, true); err != nil { - t.Fatal(err) - } else if sh := s.Shard(3); sh == nil { - t.Fatalf("expected shard") - } - - // Deleting the rp0 retention policy does not return an error. - if err := s.DeleteRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - // It deletes the shards under that retention policy. - if sh := s.Shard(1); sh != nil { - t.Errorf("shard 1 was not deleted") - } - - if sh := s.Shard(2); sh != nil { - t.Errorf("shard 2 was not deleted") - } - - // It deletes the retention policy directory. - if got, exp := dirExists(filepath.Join(s.Path(), "db0", "rp0")), false; got != exp { - t.Error("directory exists, but should have been removed") - } - - // It deletes the WAL retention policy directory. - if got, exp := dirExists(filepath.Join(s.EngineOptions.Config.WALDir, "db0", "rp0")), false; got != exp { - t.Error("directory exists, but should have been removed") - } - - // Reopen other shard and check it still exists. - if err := s.Reopen(t); err != nil { - t.Error(err) - } else if sh := s.Shard(3); sh == nil { - t.Errorf("shard 3 does not exist") - } - - // It does not delete other retention policy directories. - if got, exp := dirExists(filepath.Join(s.Path(), "db0", "rp1")), true; got != exp { - t.Error("directory does not exist, but should") - } - if got, exp := dirExists(filepath.Join(s.EngineOptions.Config.WALDir, "db0", "rp1")), true; got != exp { - t.Error("directory does not exist, but should") - } - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(t, index) }) - } -} - -// Ensure the store can create a new shard. -func TestStore_CreateShard(t *testing.T) { - - test := func(t *testing.T, index string) { - s := MustOpenStore(t, index) - defer s.Close() - - // Create a new shard and verify that it exists. - if err := s.CreateShard(context.Background(), "db0", "rp0", 1, true); err != nil { - t.Fatal(err) - } else if sh := s.Shard(1); sh == nil { - t.Fatalf("expected shard") - } - - // Create another shard and verify that it exists. - if err := s.CreateShard(context.Background(), "db0", "rp0", 2, true); err != nil { - t.Fatal(err) - } else if sh := s.Shard(2); sh == nil { - t.Fatalf("expected shard") - } - - // Reopen shard and recheck. - if err := s.Reopen(t); err != nil { - t.Fatal(err) - } else if sh := s.Shard(1); sh == nil { - t.Fatalf("expected shard(1)") - } else if sh = s.Shard(2); sh == nil { - t.Fatalf("expected shard(2)") - } - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(t, index) }) - } -} - -func TestStore_BadShard(t *testing.T) { - const errStr = "a shard open error" - indexes := tsdb.RegisteredIndexes() - for _, idx := range indexes { - func() { - s := MustOpenStore(t, idx) - defer require.NoErrorf(t, s.Close(), "closing store with index type: %s", idx) - - sh := tsdb.NewTempShard(t, idx) - err := s.OpenShard(context.Background(), sh.Shard, false) - require.NoError(t, err, "opening temp shard") - require.NoError(t, sh.Close(), "closing temporary shard") - - s.SetShardOpenErrorForTest(sh.ID(), errors.New(errStr)) - err2 := s.OpenShard(context.Background(), sh.Shard, false) - require.Error(t, err2, "no error opening bad shard") - require.True(t, errors.Is(err2, tsdb.ErrPreviousShardFail{}), "exp: ErrPreviousShardFail, got: %v", err2) - require.EqualError(t, err2, "opening shard previously failed with: "+errStr) - - // This should succeed with the force (and because opening an open shard automatically succeeds) - require.NoError(t, s.OpenShard(context.Background(), sh.Shard, true), "forced re-opening previously failing shard") - require.NoError(t, sh.Close()) - }() - } -} - -func TestStore_DropConcurrentWriteMultipleShards(t *testing.T) { - - test := func(t *testing.T, index string) { - s := MustOpenStore(t, index) - defer s.Close() - - if err := s.CreateShard(context.Background(), "db0", "rp0", 1, true); err != nil { - t.Fatal(err) - } - - s.MustWriteToShardString(1, "mem,server=a v=1 10") - - if err := s.CreateShard(context.Background(), "db0", "rp0", 2, true); err != nil { - t.Fatal(err) - } - - s.MustWriteToShardString(2, "mem,server=b v=1 20") - - var wg sync.WaitGroup - wg.Add(2) - - go func() { - defer wg.Done() - for i := 0; i < 50; i++ { - s.MustWriteToShardString(1, "cpu,server=a v=1 10") - s.MustWriteToShardString(2, "cpu,server=b v=1 20") - } - }() - - go func() { - defer wg.Done() - for i := 0; i < 50; i++ { - err := s.DeleteMeasurement(context.Background(), "db0", "cpu") - if err != nil { - t.Error(err) - return - } - } - }() - - wg.Wait() - - err := s.DeleteMeasurement(context.Background(), "db0", "cpu") - if err != nil { - t.Fatal(err) - } - - measurements, err := s.MeasurementNames(context.Background(), query.OpenAuthorizer, "db0", nil) - if err != nil { - t.Fatal(err) - } - - exp := [][]byte{[]byte("mem")} - if got, exp := measurements, exp; !reflect.DeepEqual(got, exp) { - t.Fatal(fmt.Errorf("got measurements %v, expected %v", got, exp)) - } - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(t, index) }) - } -} - -func TestStore_WriteMixedShards(t *testing.T) { - - test := func(t *testing.T, index1 string, index2 string) { - s := MustOpenStore(t, index1) - defer s.Close() - - if err := s.CreateShard(context.Background(), "db0", "rp0", 1, true); err != nil { - t.Fatal(err) - } - - s.MustWriteToShardString(1, "mem,server=a v=1 10") - - s.EngineOptions.IndexVersion = index2 - s.index = index2 - if err := s.Reopen(t); err != nil { - t.Fatal(err) - } - - if err := s.CreateShard(context.Background(), "db0", "rp0", 2, true); err != nil { - t.Fatal(err) - } - - s.MustWriteToShardString(2, "mem,server=b v=1 20") - - var wg sync.WaitGroup - wg.Add(2) - - go func() { - defer wg.Done() - for i := 0; i < 50; i++ { - s.MustWriteToShardString(1, fmt.Sprintf("cpu,server=a,f%0.2d=a v=1", i*2)) - } - }() - - go func() { - defer wg.Done() - for i := 0; i < 50; i++ { - s.MustWriteToShardString(2, fmt.Sprintf("cpu,server=b,f%0.2d=b v=1 20", i*2+1)) - } - }() - - wg.Wait() - - keys, err := s.TagKeys(context.Background(), nil, []uint64{1, 2}, nil) - if err != nil { - t.Fatal(err) - } - - cpuKeys := make([]string, 101) - for i := 0; i < 100; i++ { - cpuKeys[i] = fmt.Sprintf("f%0.2d", i) - } - cpuKeys[100] = "server" - expKeys := []tsdb.TagKeys{ - {Measurement: "cpu", Keys: cpuKeys}, - {Measurement: "mem", Keys: []string{"server"}}, - } - if got, exp := keys, expKeys; !reflect.DeepEqual(got, exp) { - t.Fatalf("got keys %v, expected %v", got, exp) - } - } - - indexes := tsdb.RegisteredIndexes() - for i := range indexes { - j := (i + 1) % len(indexes) - index1 := indexes[i] - index2 := indexes[j] - t.Run(fmt.Sprintf("%s-%s", index1, index2), func(t *testing.T) { test(t, index1, index2) }) - } -} - -// Ensure the store does not return an error when delete from a non-existent db. -func TestStore_DeleteSeries_NonExistentDB(t *testing.T) { - - test := func(t *testing.T, index string) { - s := MustOpenStore(t, index) - defer s.Close() - - if err := s.DeleteSeries(context.Background(), "db0", nil, nil); err != nil { - t.Fatal(err.Error()) - } - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(t, index) }) - } -} - -// Ensure the store can delete an existing shard. -func TestStore_DeleteShard(t *testing.T) { - - test := func(t *testing.T, index string) error { - s := MustOpenStore(t, index) - defer s.Close() - - // Create a new shard and verify that it exists. - if err := s.CreateShard(context.Background(), "db0", "rp0", 1, true); err != nil { - return err - } else if sh := s.Shard(1); sh == nil { - return fmt.Errorf("expected shard") - } - - // Create another shard. - if err := s.CreateShard(context.Background(), "db0", "rp0", 2, true); err != nil { - return err - } else if sh := s.Shard(2); sh == nil { - return fmt.Errorf("expected shard") - } - - // and another, but in a different db. - if err := s.CreateShard(context.Background(), "db1", "rp0", 3, true); err != nil { - return err - } else if sh := s.Shard(3); sh == nil { - return fmt.Errorf("expected shard") - } - - // Write series data to the db0 shards. - s.MustWriteToShardString(1, "cpu,servera=a v=1", "cpu,serverb=b v=1", "mem,serverc=a v=1") - s.MustWriteToShardString(2, "cpu,servera=a v=1", "mem,serverc=a v=1") - - // Write similar data to db1 database - s.MustWriteToShardString(3, "cpu,serverb=b v=1") - - // Reopen the store and check all shards still exist - if err := s.Reopen(t); err != nil { - return err - } - for i := uint64(1); i <= 3; i++ { - if sh := s.Shard(i); sh == nil { - return fmt.Errorf("shard %d missing", i) - } - } - - // Remove the first shard from the store. - if err := s.DeleteShard(1); err != nil { - return err - } - - // cpu,serverb=b should be removed from the series file for db0 because - // shard 1 was the only owner of that series. - // Verify by getting all tag keys. - keys, err := s.TagKeys(context.Background(), nil, []uint64{2}, nil) - if err != nil { - return err - } - - expKeys := []tsdb.TagKeys{ - {Measurement: "cpu", Keys: []string{"servera"}}, - {Measurement: "mem", Keys: []string{"serverc"}}, - } - if got, exp := keys, expKeys; !reflect.DeepEqual(got, exp) { - return fmt.Errorf("got keys %v, expected %v", got, exp) - } - - // Verify that the same series was not removed from other databases' - // series files. - if keys, err = s.TagKeys(context.Background(), nil, []uint64{3}, nil); err != nil { - return err - } - - expKeys = []tsdb.TagKeys{{Measurement: "cpu", Keys: []string{"serverb"}}} - if got, exp := keys, expKeys; !reflect.DeepEqual(got, exp) { - return fmt.Errorf("got keys %v, expected %v", got, exp) - } - return nil - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - if err := test(t, index); err != nil { - t.Error(err) - } - }) - } -} - -// Ensure the store can create a snapshot to a shard. -func TestStore_CreateShardSnapShot(t *testing.T) { - - test := func(t *testing.T, index string) { - s := MustOpenStore(t, index) - defer s.Close() - - // Create a new shard and verify that it exists. - if err := s.CreateShard(context.Background(), "db0", "rp0", 1, true); err != nil { - t.Fatal(err) - } else if sh := s.Shard(1); sh == nil { - t.Fatalf("expected shard") - } - - dir, e := s.CreateShardSnapshot(1, false) - if e != nil { - t.Fatal(e) - } - if dir == "" { - t.Fatal("empty directory name") - } - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(t, index) }) - } -} - -func TestStore_Open(t *testing.T) { - - test := func(t *testing.T, index string) { - s := NewStore(t, index) - defer s.Close() - - if err := os.MkdirAll(filepath.Join(s.Path(), "db0", "rp0", "2"), 0777); err != nil { - t.Fatal(err) - } - - if err := os.MkdirAll(filepath.Join(s.Path(), "db0", "rp2", "4"), 0777); err != nil { - t.Fatal(err) - } - - if err := os.MkdirAll(filepath.Join(s.Path(), "db1", "rp0", "1"), 0777); err != nil { - t.Fatal(err) - } - - // Store should ignore shard since it does not have a numeric name. - if err := s.Open(context.Background()); err != nil { - t.Fatal(err) - } else if n := len(s.Databases()); n != 2 { - t.Fatalf("unexpected database index count: %d", n) - } else if n := s.ShardN(); n != 3 { - t.Fatalf("unexpected shard count: %d", n) - } - - expDatabases := []string{"db0", "db1"} - gotDatabases := s.Databases() - sort.Strings(gotDatabases) - - if got, exp := gotDatabases, expDatabases; !reflect.DeepEqual(got, exp) { - t.Fatalf("got %#v, expected %#v", got, exp) - } - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(t, index) }) - } -} - -// Ensure the store reports an error when it can't open a database directory. -func TestStore_Open_InvalidDatabaseFile(t *testing.T) { - - test := func(t *testing.T, index string) { - s := NewStore(t, index) - defer s.Close() - - // Create a file instead of a directory for a database. - f, err := os.Create(filepath.Join(s.Path(), "db0")) - if err != nil { - t.Fatal(err) - } - require.NoError(t, f.Close()) - - // Store should ignore database since it's a file. - if err := s.Open(context.Background()); err != nil { - t.Fatal(err) - } else if n := len(s.Databases()); n != 0 { - t.Fatalf("unexpected database index count: %d", n) - } - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(t, index) }) - } -} - -// Ensure the store reports an error when it can't open a retention policy. -func TestStore_Open_InvalidRetentionPolicy(t *testing.T) { - - test := func(t *testing.T, index string) { - s := NewStore(t, index) - defer s.Close() - - // Create an RP file instead of a directory. - if err := os.MkdirAll(filepath.Join(s.Path(), "db0"), 0777); err != nil { - t.Fatal(err) - } - - f, err := os.Create(filepath.Join(s.Path(), "db0", "rp0")) - if err != nil { - t.Fatal(err) - } - require.NoError(t, f.Close()) - - // Store should ignore retention policy since it's a file, and there should - // be no indices created. - if err := s.Open(context.Background()); err != nil { - t.Fatal(err) - } else if n := len(s.Databases()); n != 0 { - t.Log(s.Databases()) - t.Fatalf("unexpected database index count: %d", n) - } - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(t, index) }) - } -} - -// Ensure the store reports an error when it can't open a retention policy. -func TestStore_Open_InvalidShard(t *testing.T) { - - test := func(t *testing.T, index string) { - s := NewStore(t, index) - defer s.Close() - - // Create a non-numeric shard file. - if err := os.MkdirAll(filepath.Join(s.Path(), "db0", "rp0"), 0777); err != nil { - t.Fatal(err) - } - - f, err := os.Create(filepath.Join(s.Path(), "db0", "rp0", "bad_shard")) - if err != nil { - t.Fatal(err) - } - require.NoError(t, f.Close()) - - // Store should ignore shard since it does not have a numeric name. - if err := s.Open(context.Background()); err != nil { - t.Fatal(err) - } else if n := len(s.Databases()); n != 0 { - t.Fatalf("unexpected database index count: %d", n) - } else if n := s.ShardN(); n != 0 { - t.Fatalf("unexpected shard count: %d", n) - } - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(t, index) }) - } -} - -// Ensure shards can create iterators. -func TestShards_CreateIterator(t *testing.T) { - - test := func(t *testing.T, index string) { - s := MustOpenStore(t, index) - defer s.Close() - - // Create shard #0 with data. - s.MustCreateShardWithData("db0", "rp0", 0, - `cpu,host=serverA value=1 0`, - `cpu,host=serverA value=2 10`, - `cpu,host=serverB value=3 20`, - ) - - // Create shard #1 with data. - s.MustCreateShardWithData("db0", "rp0", 1, - `cpu,host=serverA value=1 30`, - `mem,host=serverA value=2 40`, // skip: wrong source - `cpu,host=serverC value=3 60`, - ) - - // Retrieve shard group. - shards := s.ShardGroup([]uint64{0, 1}) - - // Create iterator. - m := &influxql.Measurement{Name: "cpu"} - itr, err := shards.CreateIterator(context.Background(), m, query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Dimensions: []string{"host"}, - Ascending: true, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - }) - if err != nil { - t.Fatal(err) - } - defer itr.Close() - fitr := itr.(query.FloatIterator) - - // Read values from iterator. The host=serverA points should come first. - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(0): %s", err) - } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverA"), Time: time.Unix(0, 0).UnixNano(), Value: 1}) { - t.Fatalf("unexpected point(0): %s", spew.Sdump(p)) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(1): %s", err) - } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverA"), Time: time.Unix(10, 0).UnixNano(), Value: 2}) { - t.Fatalf("unexpected point(1): %s", spew.Sdump(p)) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(2): %s", err) - } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverA"), Time: time.Unix(30, 0).UnixNano(), Value: 1}) { - t.Fatalf("unexpected point(2): %s", spew.Sdump(p)) - } - - // Next the host=serverB point. - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(3): %s", err) - } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverB"), Time: time.Unix(20, 0).UnixNano(), Value: 3}) { - t.Fatalf("unexpected point(3): %s", spew.Sdump(p)) - } - - // And finally the host=serverC point. - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(4): %s", err) - } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverC"), Time: time.Unix(60, 0).UnixNano(), Value: 3}) { - t.Fatalf("unexpected point(4): %s", spew.Sdump(p)) - } - - // Then an EOF should occur. - if p, err := fitr.Next(); err != nil { - t.Fatalf("expected eof, got error: %s", err) - } else if p != nil { - t.Fatalf("expected eof, got: %s", spew.Sdump(p)) - } - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(t, index) }) - } -} - -// Ensure the store can backup a shard and another store can restore it. -func TestStore_BackupRestoreShard(t *testing.T) { - test := func(t *testing.T, index string) { - s0, s1 := MustOpenStore(t, index), MustOpenStore(t, index) - defer s0.Close() - defer s1.Close() - - // Create shard with data. - s0.MustCreateShardWithData("db0", "rp0", 100, - `cpu value=1 0`, - `cpu value=2 10`, - `cpu value=3 20`, - ) - - if err := s0.Reopen(t); err != nil { - t.Fatal(err) - } - - // Backup shard to a buffer. - var buf bytes.Buffer - if err := s0.BackupShard(100, time.Time{}, &buf); err != nil { - t.Fatal(err) - } - - // Create the shard on the other store and restore from buffer. - if err := s1.CreateShard(context.Background(), "db0", "rp0", 100, true); err != nil { - t.Fatal(err) - } - if err := s1.RestoreShard(context.Background(), 100, &buf); err != nil { - t.Fatal(err) - } - - // Read data from - m := &influxql.Measurement{Name: "cpu"} - itr, err := s0.Shard(100).CreateIterator(context.Background(), m, query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Ascending: true, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - }) - if err != nil { - t.Fatal(err) - } - defer itr.Close() - fitr := itr.(query.FloatIterator) - - // Read values from iterator. The host=serverA points should come first. - p, e := fitr.Next() - if e != nil { - t.Fatal(e) - } - if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Time: time.Unix(0, 0).UnixNano(), Value: 1}) { - t.Fatalf("unexpected point(0): %s", spew.Sdump(p)) - } - p, e = fitr.Next() - if e != nil { - t.Fatal(e) - } - if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Time: time.Unix(10, 0).UnixNano(), Value: 2}) { - t.Fatalf("unexpected point(1): %s", spew.Sdump(p)) - } - p, e = fitr.Next() - if e != nil { - t.Fatal(e) - } - if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Time: time.Unix(20, 0).UnixNano(), Value: 3}) { - t.Fatalf("unexpected point(2): %s", spew.Sdump(p)) - } - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - test(t, index) - }) - } -} -func TestStore_Shard_SeriesN(t *testing.T) { - - test := func(t *testing.T, index string) error { - s := MustOpenStore(t, index) - defer s.Close() - - // Create shard with data. - s.MustCreateShardWithData("db0", "rp0", 1, - `cpu value=1 0`, - `cpu,host=serverA value=2 10`, - ) - - // Create 2nd shard w/ same measurements. - s.MustCreateShardWithData("db0", "rp0", 2, - `cpu value=1 0`, - `cpu value=2 10`, - ) - - if got, exp := s.Shard(1).SeriesN(), int64(2); got != exp { - return fmt.Errorf("[shard %d] got series count of %d, but expected %d", 1, got, exp) - } else if got, exp := s.Shard(2).SeriesN(), int64(1); got != exp { - return fmt.Errorf("[shard %d] got series count of %d, but expected %d", 2, got, exp) - } - return nil - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - if err := test(t, index); err != nil { - t.Error(err) - } - }) - } -} - -func TestStore_MeasurementNames_Deduplicate(t *testing.T) { - - test := func(t *testing.T, index string) { - s := MustOpenStore(t, index) - defer s.Close() - - // Create shard with data. - s.MustCreateShardWithData("db0", "rp0", 1, - `cpu value=1 0`, - `cpu value=2 10`, - `cpu value=3 20`, - ) - - // Create 2nd shard w/ same measurements. - s.MustCreateShardWithData("db0", "rp0", 2, - `cpu value=1 0`, - `cpu value=2 10`, - `cpu value=3 20`, - ) - - meas, err := s.MeasurementNames(context.Background(), query.OpenAuthorizer, "db0", nil) - if err != nil { - t.Fatalf("unexpected error with MeasurementNames: %v", err) - } - - if exp, got := 1, len(meas); exp != got { - t.Fatalf("measurement len mismatch: exp %v, got %v", exp, got) - } - - if exp, got := "cpu", string(meas[0]); exp != got { - t.Fatalf("measurement name mismatch: exp %v, got %v", exp, got) - } - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(t, index) }) - } -} - -func testStoreCardinalityTombstoning(t *testing.T, store *Store) { - // Generate point data to write to the shards. - series := genTestSeries(10, 2, 4) // 160 series - - points := make([]models.Point, 0, len(series)) - for _, s := range series { - points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now())) - } - - // Create requested number of shards in the store & write points across - // shards such that we never write the same series to multiple shards. - for shardID := 0; shardID < 4; shardID++ { - if err := store.CreateShard(context.Background(), "db", "rp", uint64(shardID), true); err != nil { - t.Errorf("create shard: %s", err) - } - - if err := store.BatchWrite(shardID, points[shardID*40:(shardID+1)*40]); err != nil { - t.Errorf("batch write: %s", err) - } - } - - // Delete all the series for each measurement. - mnames, err := store.MeasurementNames(context.Background(), nil, "db", nil) - if err != nil { - t.Fatal(err) - } - - for _, name := range mnames { - if err := store.DeleteSeries(context.Background(), "db", []influxql.Source{&influxql.Measurement{Name: string(name)}}, nil); err != nil { - t.Fatal(err) - } - } - - // Estimate the series cardinality... - cardinality, err := store.Store.SeriesCardinality(context.Background(), "db") - if err != nil { - t.Fatal(err) - } - - // Estimated cardinality should be well within 10 of the actual cardinality. - if got, exp := int(cardinality), 10; got > exp { - t.Errorf("series cardinality was %v (expected within %v), expected was: %d", got, exp, 0) - } - - // Since all the series have been deleted, all the measurements should have - // been removed from the index too. - if cardinality, err = store.Store.MeasurementsCardinality(context.Background(), "db"); err != nil { - t.Fatal(err) - } - - // Estimated cardinality should be well within 2 of the actual cardinality. - // TODO(edd): this is totally arbitrary. How can I make it better? - if got, exp := int(cardinality), 2; got > exp { - t.Errorf("measurement cardinality was %v (expected within %v), expected was: %d", got, exp, 0) - } -} - -func TestStore_Cardinality_Tombstoning(t *testing.T) { - - if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" || os.Getenv("CIRCLECI") != "" { - t.Skip("Skipping test in short, race, circleci and appveyor mode.") - } - - test := func(t *testing.T, index string) { - store := NewStore(t, index) - if err := store.Open(context.Background()); err != nil { - panic(err) - } - defer store.Close() - testStoreCardinalityTombstoning(t, store) - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(t, index) }) - } -} - -func testStoreCardinalityUnique(t *testing.T, store *Store) { - // Generate point data to write to the shards. - series := genTestSeries(64, 5, 5) // 200,000 series - expCardinality := len(series) - - points := make([]models.Point, 0, len(series)) - for _, s := range series { - points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now())) - } - - // Create requested number of shards in the store & write points across - // shards such that we never write the same series to multiple shards. - for shardID := 0; shardID < 10; shardID++ { - if err := store.CreateShard(context.Background(), "db", "rp", uint64(shardID), true); err != nil { - t.Fatalf("create shard: %s", err) - } - if err := store.BatchWrite(shardID, points[shardID*20000:(shardID+1)*20000]); err != nil { - t.Fatalf("batch write: %s", err) - } - } - - // Estimate the series cardinality... - cardinality, err := store.Store.SeriesCardinality(context.Background(), "db") - if err != nil { - t.Fatal(err) - } - - // Estimated cardinality should be well within 1.5% of the actual cardinality. - if got, exp := math.Abs(float64(cardinality)-float64(expCardinality))/float64(expCardinality), 0.015; got > exp { - t.Errorf("got epsilon of %v for series cardinality %v (expected %v), which is larger than expected %v", got, cardinality, expCardinality, exp) - } - - // Estimate the measurement cardinality... - if cardinality, err = store.Store.MeasurementsCardinality(context.Background(), "db"); err != nil { - t.Fatal(err) - } - - // Estimated cardinality should be well within 2 of the actual cardinality. (arbitrary...) - expCardinality = 64 - if got, exp := math.Abs(float64(cardinality)-float64(expCardinality)), 2.0; got > exp { - t.Errorf("got measurmement cardinality %v, expected upto %v; difference is larger than expected %v", cardinality, expCardinality, exp) - } -} - -func TestStore_Cardinality_Unique(t *testing.T) { - - if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" || os.Getenv("CIRCLECI") != "" { - t.Skip("Skipping test in short, race, circleci and appveyor mode.") - } - - test := func(t *testing.T, index string) { - store := NewStore(t, index) - if err := store.Open(context.Background()); err != nil { - panic(err) - } - defer store.Close() - testStoreCardinalityUnique(t, store) - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(t, index) }) - } -} - -// This test tests cardinality estimation when series data is duplicated across -// multiple shards. -func testStoreCardinalityDuplicates(t *testing.T, store *Store) { - // Generate point data to write to the shards. - series := genTestSeries(64, 5, 5) // 200,000 series. - expCardinality := len(series) - - points := make([]models.Point, 0, len(series)) - for _, s := range series { - points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now())) - } - - // Create requested number of shards in the store & write points. - for shardID := 0; shardID < 10; shardID++ { - if err := store.CreateShard(context.Background(), "db", "rp", uint64(shardID), true); err != nil { - t.Fatalf("create shard: %s", err) - } - - var from, to int - if shardID == 0 { - // if it's the first shard then write all of the points. - from, to = 0, len(points)-1 - } else { - // For other shards we write a random sub-section of all the points. - // which will duplicate the series and shouldn't increase the - // cardinality. - from, to = rand.Intn(len(points)), rand.Intn(len(points)) - if from > to { - from, to = to, from - } - } - - if err := store.BatchWrite(shardID, points[from:to]); err != nil { - t.Fatalf("batch write: %s", err) - } - } - - // Estimate the series cardinality... - cardinality, err := store.Store.SeriesCardinality(context.Background(), "db") - if err != nil { - t.Fatal(err) - } - - // Estimated cardinality should be well within 1.5% of the actual cardinality. - if got, exp := math.Abs(float64(cardinality)-float64(expCardinality))/float64(expCardinality), 0.015; got > exp { - t.Errorf("got epsilon of %v for series cardinality %d (expected %d), which is larger than expected %v", got, cardinality, expCardinality, exp) - } - - // Estimate the measurement cardinality... - if cardinality, err = store.Store.MeasurementsCardinality(context.Background(), "db"); err != nil { - t.Fatal(err) - } - - // Estimated cardinality should be well within 2 of the actual cardinality. (Arbitrary...) - expCardinality = 64 - if got, exp := math.Abs(float64(cardinality)-float64(expCardinality)), 2.0; got > exp { - t.Errorf("got measurement cardinality %v, expected upto %v; difference is larger than expected %v", cardinality, expCardinality, exp) - } -} - -func TestStore_Cardinality_Duplicates(t *testing.T) { - - if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" || os.Getenv("CIRCLECI") != "" { - t.Skip("Skipping test in short, race, circleci and appveyor mode.") - } - - test := func(t *testing.T, index string) { - store := NewStore(t, index) - if err := store.Open(context.Background()); err != nil { - panic(err) - } - defer store.Close() - testStoreCardinalityDuplicates(t, store) - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(t, index) }) - } -} - -func TestStore_MetaQuery_Timeout(t *testing.T) { - if testing.Short() || os.Getenv("APPVEYOR") != "" { - t.Skip("Skipping test in short and appveyor mode.") - } - - test := func(t *testing.T, index string) { - store := NewStore(t, index) - require.NoError(t, store.Open(context.Background())) - defer store.Close() - testStoreMetaQueryTimeout(t, store, index) - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - test(t, index) - }) - } -} - -func testStoreMetaQueryTimeout(t *testing.T, store *Store, index string) { - shards := testStoreMetaQuerySetup(t, store) - - testStoreMakeTimedFuncs(func(ctx context.Context) (string, error) { - const funcName = "SeriesCardinality" - _, err := store.Store.SeriesCardinality(ctx, "db") - return funcName, err - }, index)(t) - - testStoreMakeTimedFuncs(func(ctx context.Context) (string, error) { - const funcName = "MeasurementsCardinality" - _, err := store.Store.MeasurementsCardinality(ctx, "db") - return funcName, err - }, index)(t) - - keyCondition, allCondition := testStoreMetaQueryCondition() - - testStoreMakeTimedFuncs(func(ctx context.Context) (string, error) { - const funcName = "TagValues" - _, err := store.Store.TagValues(ctx, nil, shards, allCondition) - return funcName, err - }, index)(t) - - testStoreMakeTimedFuncs(func(ctx context.Context) (string, error) { - const funcName = "TagKeys" - _, err := store.Store.TagKeys(ctx, nil, shards, keyCondition) - return funcName, err - }, index)(t) - - testStoreMakeTimedFuncs(func(ctx context.Context) (string, error) { - const funcName = "MeasurementNames" - _, err := store.Store.MeasurementNames(ctx, nil, "db", nil) - return funcName, err - }, index)(t) -} - -func testStoreMetaQueryCondition() (influxql.Expr, influxql.Expr) { - keyCondition := &influxql.ParenExpr{ - Expr: &influxql.BinaryExpr{ - Op: influxql.OR, - LHS: &influxql.BinaryExpr{ - Op: influxql.EQ, - LHS: &influxql.VarRef{Val: "_tagKey"}, - RHS: &influxql.StringLiteral{Val: "tagKey4"}, - }, - RHS: &influxql.BinaryExpr{ - Op: influxql.EQ, - LHS: &influxql.VarRef{Val: "_tagKey"}, - RHS: &influxql.StringLiteral{Val: "tagKey5"}, - }, - }, - } - - whereCondition := &influxql.ParenExpr{ - Expr: &influxql.BinaryExpr{ - Op: influxql.AND, - LHS: &influxql.ParenExpr{ - Expr: &influxql.BinaryExpr{ - Op: influxql.EQ, - LHS: &influxql.VarRef{Val: "tagKey1"}, - RHS: &influxql.StringLiteral{Val: "tagValue2"}, - }, - }, - RHS: keyCondition, - }, - } - - allCondition := &influxql.BinaryExpr{ - Op: influxql.AND, - LHS: &influxql.ParenExpr{ - Expr: &influxql.BinaryExpr{ - Op: influxql.EQREGEX, - LHS: &influxql.VarRef{Val: "tagKey3"}, - RHS: &influxql.RegexLiteral{Val: regexp.MustCompile(`tagValue\d`)}, - }, - }, - RHS: whereCondition, - } - return keyCondition, allCondition -} - -func testStoreMetaQuerySetup(t *testing.T, store *Store) []uint64 { - const measurementCnt = 64 - const tagCnt = 5 - const valueCnt = 5 - const pointsPerShard = 20000 - - // Generate point data to write to the shards. - series := genTestSeries(measurementCnt, tagCnt, valueCnt) - - points := make([]models.Point, 0, len(series)) - for _, s := range series { - points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now())) - } - // Create requested number of shards in the store & write points across - // shards such that we never write the same series to multiple shards. - shards := make([]uint64, len(points)/pointsPerShard) - for shardID := 0; shardID < len(points)/pointsPerShard; shardID++ { - if err := store.CreateShard(context.Background(), "db", "rp", uint64(shardID), true); err != nil { - t.Fatalf("create shard: %s", err) - } - if err := store.BatchWrite(shardID, points[shardID*pointsPerShard:(shardID+1)*pointsPerShard]); err != nil { - t.Fatalf("batch write: %s", err) - } - shards[shardID] = uint64(shardID) - } - return shards -} - -func testStoreMakeTimedFuncs(tested func(context.Context) (string, error), index string) func(*testing.T) { - cancelTested := func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(0)) - defer cancel() - - funcName, err := tested(ctx) - if err == nil { - t.Fatalf("%v: failed to time out with index type %v", funcName, index) - } else if !strings.Contains(err.Error(), context.DeadlineExceeded.Error()) { - t.Fatalf("%v: failed with %v instead of %v with index type %v", funcName, err, context.DeadlineExceeded, index) - } - } - return cancelTested -} - -// Creates a large number of series in multiple shards, which will force -// compactions to occur. -func testStoreCardinalityCompactions(store *Store) error { - - // Generate point data to write to the shards. - series := genTestSeries(300, 5, 5) // 937,500 series - expCardinality := len(series) - - points := make([]models.Point, 0, len(series)) - for _, s := range series { - points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now())) - } - - // Create requested number of shards in the store & write points across - // shards such that we never write the same series to multiple shards. - for shardID := 0; shardID < 2; shardID++ { - if err := store.CreateShard(context.Background(), "db", "rp", uint64(shardID), true); err != nil { - return fmt.Errorf("create shard: %s", err) - } - if err := store.BatchWrite(shardID, points[shardID*468750:(shardID+1)*468750]); err != nil { - return fmt.Errorf("batch write: %s", err) - } - } - - // Estimate the series cardinality... - cardinality, err := store.Store.SeriesCardinality(context.Background(), "db") - if err != nil { - return err - } - - // Estimated cardinality should be well within 1.5% of the actual cardinality. - if got, exp := math.Abs(float64(cardinality)-float64(expCardinality))/float64(expCardinality), 0.015; got > exp { - return fmt.Errorf("got epsilon of %v for series cardinality %v (expected %v), which is larger than expected %v", got, cardinality, expCardinality, exp) - } - - // Estimate the measurement cardinality... - if cardinality, err = store.Store.MeasurementsCardinality(context.Background(), "db"); err != nil { - return err - } - - // Estimated cardinality should be well within 2 of the actual cardinality. (Arbitrary...) - expCardinality = 300 - if got, exp := math.Abs(float64(cardinality)-float64(expCardinality)), 2.0; got > exp { - return fmt.Errorf("got measurement cardinality %v, expected upto %v; difference is larger than expected %v", cardinality, expCardinality, exp) - } - return nil -} - -func TestStore_Cardinality_Compactions(t *testing.T) { - if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" || os.Getenv("CIRCLECI") != "" { - t.Skip("Skipping test in short, race, circleci and appveyor mode.") - } - - test := func(t *testing.T, index string) error { - store := NewStore(t, index) - if err := store.Open(context.Background()); err != nil { - panic(err) - } - defer store.Close() - return testStoreCardinalityCompactions(store) - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - if err := test(t, index); err != nil { - t.Fatal(err) - } - }) - } -} - -func TestStore_Sketches(t *testing.T) { - - checkCardinalities := func(store *tsdb.Store, series, tseries, measurements, tmeasurements int) error { - // Get sketches and check cardinality... - sketch, tsketch, err := store.SeriesSketches(context.Background(), "db") - if err != nil { - return err - } - - // delta calculates a rough 10% delta. If i is small then a minimum value - // of 2 is used. - delta := func(i int) int { - v := i / 10 - if v == 0 { - v = 2 - } - return v - } - - // series cardinality should be well within 10%. - if got, exp := int(sketch.Count()), series; got-exp < -delta(series) || got-exp > delta(series) { - return fmt.Errorf("got series cardinality %d, expected ~%d", got, exp) - } - - // check series tombstones - if got, exp := int(tsketch.Count()), tseries; got-exp < -delta(tseries) || got-exp > delta(tseries) { - return fmt.Errorf("got series tombstone cardinality %d, expected ~%d", got, exp) - } - - // Check measurement cardinality. - if sketch, tsketch, err = store.MeasurementsSketches(context.Background(), "db"); err != nil { - return err - } - - if got, exp := int(sketch.Count()), measurements; got-exp < -delta(measurements) || got-exp > delta(measurements) { - return fmt.Errorf("got measurement cardinality %d, expected ~%d", got, exp) - } - - if got, exp := int(tsketch.Count()), tmeasurements; got-exp < -delta(tmeasurements) || got-exp > delta(tmeasurements) { - return fmt.Errorf("got measurement tombstone cardinality %d, expected ~%d", got, exp) - } - - if mc, err := store.MeasurementsCardinality(context.Background(), "db"); err != nil { - return fmt.Errorf("unexpected error from MeasurementsCardinality: %w", err) - } else { - if mc < 0 { - return fmt.Errorf("MeasurementsCardinality returned < 0 (%v)", mc) - } - expMc := int64(sketch.Count() - tsketch.Count()) - if expMc < 0 { - expMc = 0 - } - if got, exp := int(mc), int(expMc); got-exp < -delta(exp) || got-exp > delta(exp) { - return fmt.Errorf("got measurement cardinality %d, expected ~%d", mc, exp) - } - } - return nil - } - - test := func(t *testing.T, index string) error { - store := MustOpenStore(t, index) - defer store.Close() - - // Generate point data to write to the shards. - series := genTestSeries(10, 2, 4) // 160 series - - points := make([]models.Point, 0, len(series)) - for _, s := range series { - points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now())) - } - - // Create requested number of shards in the store & write points across - // shards such that we never write the same series to multiple shards. - for shardID := 0; shardID < 4; shardID++ { - if err := store.CreateShard(context.Background(), "db", "rp", uint64(shardID), true); err != nil { - return fmt.Errorf("create shard: %s", err) - } - - if err := store.BatchWrite(shardID, points[shardID*40:(shardID+1)*40]); err != nil { - return fmt.Errorf("batch write: %s", err) - } - } - - // Check cardinalities - if err := checkCardinalities(store.Store, 160, 0, 10, 0); err != nil { - return fmt.Errorf("[initial] %v", err) - } - - // Reopen the store. - if err := store.Reopen(t); err != nil { - return err - } - - // Check cardinalities - if err := checkCardinalities(store.Store, 160, 0, 10, 0); err != nil { - return fmt.Errorf("[initial|re-open] %v", err) - } - - // Delete half the measurements data - mnames, err := store.MeasurementNames(context.Background(), nil, "db", nil) - if err != nil { - return err - } - - for _, name := range mnames[:len(mnames)/2] { - if err := store.DeleteSeries(context.Background(), "db", []influxql.Source{&influxql.Measurement{Name: string(name)}}, nil); err != nil { - return err - } - } - - // Check cardinalities. - expS, expTS, expM, expTM := 160, 80, 10, 5 - - // Check cardinalities - tombstones should be in - if err := checkCardinalities(store.Store, expS, expTS, expM, expTM); err != nil { - return fmt.Errorf("[initial|re-open|delete] %v", err) - } - - // Reopen the store. - if err := store.Reopen(t); err != nil { - return err - } - - // Check cardinalities. - expS, expTS, expM, expTM = 80, 80, 5, 5 - - if err := checkCardinalities(store.Store, expS, expTS, expM, expTM); err != nil { - return fmt.Errorf("[initial|re-open|delete|re-open] %v", err) - } - - // Now delete the rest of the measurements. - // This will cause the measurement tombstones to exceed the measurement cardinality for TSI. - mnames, err = store.MeasurementNames(context.Background(), nil, "db", nil) - if err != nil { - return err - } - - for _, name := range mnames { - if err := store.DeleteSeries(context.Background(), "db", []influxql.Source{&influxql.Measurement{Name: string(name)}}, nil); err != nil { - return err - } - } - - // Check cardinalities. In this case, the indexes behave differently. - expS, expTS, expM, expTM = 80, 159, 5, 10 - /* - if index == inmem.IndexName { - expS, expTS, expM, expTM = 80, 80, 5, 5 - } - */ - - // Check cardinalities - tombstones should be in - if err := checkCardinalities(store.Store, expS, expTS, expM, expTM); err != nil { - return fmt.Errorf("[initial|re-open|delete] %v", err) - } - - return nil - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - if err := test(t, index); err != nil { - t.Fatal(err) - } - }) - } -} - -func TestStore_TagValues(t *testing.T) { - - // No WHERE - just get for keys host and shard - RHSAll := &influxql.ParenExpr{ - Expr: &influxql.BinaryExpr{ - Op: influxql.OR, - LHS: &influxql.BinaryExpr{ - Op: influxql.EQ, - LHS: &influxql.VarRef{Val: "_tagKey"}, - RHS: &influxql.StringLiteral{Val: "host"}, - }, - RHS: &influxql.BinaryExpr{ - Op: influxql.EQ, - LHS: &influxql.VarRef{Val: "_tagKey"}, - RHS: &influxql.StringLiteral{Val: "shard"}, - }, - }, - } - - // Get for host and shard, but also WHERE on foo = a - RHSWhere := &influxql.ParenExpr{ - Expr: &influxql.BinaryExpr{ - Op: influxql.AND, - LHS: &influxql.ParenExpr{ - Expr: &influxql.BinaryExpr{ - Op: influxql.EQ, - LHS: &influxql.VarRef{Val: "foo"}, - RHS: &influxql.StringLiteral{Val: "a"}, - }, - }, - RHS: RHSAll, - }, - } - - // SHOW TAG VALUES FROM /cpu\d/ WITH KEY IN ("host", "shard") - // - // Switching out RHS for RHSWhere would make the query: - // SHOW TAG VALUES FROM /cpu\d/ WITH KEY IN ("host", "shard") WHERE foo = 'a' - base := influxql.BinaryExpr{ - Op: influxql.AND, - LHS: &influxql.ParenExpr{ - Expr: &influxql.BinaryExpr{ - Op: influxql.EQREGEX, - LHS: &influxql.VarRef{Val: "_name"}, - RHS: &influxql.RegexLiteral{Val: regexp.MustCompile(`cpu\d`)}, - }, - }, - RHS: RHSAll, - } - - var baseWhere *influxql.BinaryExpr = influxql.CloneExpr(&base).(*influxql.BinaryExpr) - baseWhere.RHS = RHSWhere - - examples := []struct { - Name string - Expr influxql.Expr - Exp []tsdb.TagValues - }{ - { - Name: "No WHERE clause", - Expr: &base, - Exp: []tsdb.TagValues{ - createTagValues("cpu0", map[string][]string{"shard": {"s0"}}), - createTagValues("cpu1", map[string][]string{"shard": {"s1"}}), - createTagValues("cpu10", map[string][]string{"host": {"nofoo", "tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), - createTagValues("cpu11", map[string][]string{"host": {"nofoo", "tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), - createTagValues("cpu12", map[string][]string{"host": {"nofoo", "tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), - createTagValues("cpu2", map[string][]string{"shard": {"s2"}}), - }, - }, - { - Name: "With WHERE clause", - Expr: baseWhere, - Exp: []tsdb.TagValues{ - createTagValues("cpu0", map[string][]string{"shard": {"s0"}}), - createTagValues("cpu1", map[string][]string{"shard": {"s1"}}), - createTagValues("cpu10", map[string][]string{"host": {"tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), - createTagValues("cpu11", map[string][]string{"host": {"tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), - createTagValues("cpu12", map[string][]string{"host": {"tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), - createTagValues("cpu2", map[string][]string{"shard": {"s2"}}), - }, - }, - } - - setup := func(t *testing.T, index string) (*Store, []uint64) { // returns shard ids - s := MustOpenStore(t, index) - - fmtStr := `cpu1%[1]d,foo=a,ignoreme=nope,host=tv%[2]d,shard=s%[3]d value=1 %[4]d - cpu1%[1]d,host=nofoo value=1 %[4]d - mem,host=nothanks value=1 %[4]d - cpu%[3]d,shard=s%[3]d,foo=a value=2 %[4]d - ` - genPoints := func(sid int) []string { - var ts int - points := make([]string, 0, 3*4) - for m := 0; m < 3; m++ { - for tagvid := 0; tagvid < 4; tagvid++ { - points = append(points, fmt.Sprintf(fmtStr, m, tagvid, sid, ts)) - ts++ - } - } - return points - } - - // Create data across 3 shards. - var ids []uint64 - for i := 0; i < 3; i++ { - ids = append(ids, uint64(i)) - s.MustCreateShardWithData("db0", "rp0", i, genPoints(i)...) - } - return s, ids - } - - for _, example := range examples { - for _, index := range tsdb.RegisteredIndexes() { - t.Run(example.Name+"_"+index, func(t *testing.T) { - s, shardIDs := setup(t, index) - defer s.Close() - got, err := s.TagValues(context.Background(), nil, shardIDs, example.Expr) - if err != nil { - t.Fatal(err) - } - exp := example.Exp - - if !reflect.DeepEqual(got, exp) { - t.Fatalf("got:\n%#v\n\nexp:\n%#v", got, exp) - } - }) - } - } -} - -func TestStore_Measurements_Auth(t *testing.T) { - - test := func(t *testing.T, index string) error { - s := MustOpenStore(t, index) - defer s.Close() - - // Create shard #0 with data. - s.MustCreateShardWithData("db0", "rp0", 0, - `cpu,host=serverA value=1 0`, - `cpu,host=serverA value=2 10`, - `cpu,region=west value=3 20`, - `cpu,secret=foo value=5 30`, // cpu still readable because it has other series that can be read. - `mem,secret=foo value=1 30`, - `disk value=4 30`, - ) - - authorizer := &internal.AuthorizerMock{ - AuthorizeSeriesReadFn: func(database string, measurement []byte, tags models.Tags) bool { - if database == "" || tags.GetString("secret") != "" { - t.Logf("Rejecting series db=%s, m=%s, tags=%v", database, measurement, tags) - return false - } - return true - }, - } - - names, err := s.MeasurementNames(context.Background(), authorizer, "db0", nil) - if err != nil { - return err - } - - // names should not contain any measurements where none of the associated - // series are authorised for reads. - expNames := 2 - var gotNames int - for _, name := range names { - if string(name) == "mem" { - return fmt.Errorf("got measurement %q but it should be filtered.", name) - } - gotNames++ - } - - if gotNames != expNames { - return fmt.Errorf("got %d measurements, but expected %d", gotNames, expNames) - } - - // Now delete all of the cpu series. - cond, err := influxql.ParseExpr("host = 'serverA' OR region = 'west'") - if err != nil { - return err - } - - if err := s.DeleteSeries(context.Background(), "db0", nil, cond); err != nil { - return err - } - - if names, err = s.MeasurementNames(context.Background(), authorizer, "db0", nil); err != nil { - return err - } - - // names should not contain any measurements where none of the associated - // series are authorised for reads. - expNames = 1 - gotNames = 0 - for _, name := range names { - if string(name) == "mem" || string(name) == "cpu" { - return fmt.Errorf("after delete got measurement %q but it should be filtered.", name) - } - gotNames++ - } - - if gotNames != expNames { - return fmt.Errorf("after delete got %d measurements, but expected %d", gotNames, expNames) - } - - return nil - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - if err := test(t, index); err != nil { - t.Fatal(err) - } - }) - } - -} - -func TestStore_TagKeys_Auth(t *testing.T) { - - test := func(t *testing.T, index string) error { - s := MustOpenStore(t, index) - defer s.Close() - - // Create shard #0 with data. - s.MustCreateShardWithData("db0", "rp0", 0, - `cpu,host=serverA value=1 0`, - `cpu,host=serverA,debug=true value=2 10`, - `cpu,region=west value=3 20`, - `cpu,secret=foo,machine=a value=1 20`, - ) - - authorizer := &internal.AuthorizerMock{ - AuthorizeSeriesReadFn: func(database string, measurement []byte, tags models.Tags) bool { - if database == "" || !bytes.Equal(measurement, []byte("cpu")) || tags.GetString("secret") != "" { - t.Logf("Rejecting series db=%s, m=%s, tags=%v", database, measurement, tags) - return false - } - return true - }, - } - - keys, err := s.TagKeys(context.Background(), authorizer, []uint64{0}, nil) - if err != nil { - return err - } - - // keys should not contain any tag keys associated with a series containing - // a secret tag. - expKeys := 3 - var gotKeys int - for _, tk := range keys { - if got, exp := tk.Measurement, "cpu"; got != exp { - return fmt.Errorf("got measurement %q, expected %q", got, exp) - } - - for _, key := range tk.Keys { - if key == "secret" || key == "machine" { - return fmt.Errorf("got tag key %q but it should be filtered.", key) - } - gotKeys++ - } - } - - if gotKeys != expKeys { - return fmt.Errorf("got %d keys, but expected %d", gotKeys, expKeys) - } - - // Delete the series with region = west - cond, err := influxql.ParseExpr("region = 'west'") - if err != nil { - return err - } - if err := s.DeleteSeries(context.Background(), "db0", nil, cond); err != nil { - return err - } - - if keys, err = s.TagKeys(context.Background(), authorizer, []uint64{0}, nil); err != nil { - return err - } - - // keys should not contain any tag keys associated with a series containing - // a secret tag or the deleted series - expKeys = 2 - gotKeys = 0 - for _, tk := range keys { - if got, exp := tk.Measurement, "cpu"; got != exp { - return fmt.Errorf("got measurement %q, expected %q", got, exp) - } - - for _, key := range tk.Keys { - if key == "secret" || key == "machine" || key == "region" { - return fmt.Errorf("got tag key %q but it should be filtered.", key) - } - gotKeys++ - } - } - - if gotKeys != expKeys { - return fmt.Errorf("got %d keys, but expected %d", gotKeys, expKeys) - } - - return nil - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - if err := test(t, index); err != nil { - t.Fatal(err) - } - }) - } - -} - -func TestStore_TagValues_Auth(t *testing.T) { - - test := func(t *testing.T, index string) error { - s := MustOpenStore(t, index) - defer s.Close() - - // Create shard #0 with data. - s.MustCreateShardWithData("db0", "rp0", 0, - `cpu,host=serverA value=1 0`, - `cpu,host=serverA value=2 10`, - `cpu,host=serverB value=3 20`, - `cpu,secret=foo,host=serverD value=1 20`, - ) - - authorizer := &internal.AuthorizerMock{ - AuthorizeSeriesReadFn: func(database string, measurement []byte, tags models.Tags) bool { - if database == "" || !bytes.Equal(measurement, []byte("cpu")) || tags.GetString("secret") != "" { - t.Logf("Rejecting series db=%s, m=%s, tags=%v", database, measurement, tags) - return false - } - return true - }, - } - - values, err := s.TagValues(context.Background(), authorizer, []uint64{0}, &influxql.BinaryExpr{ - Op: influxql.EQ, - LHS: &influxql.VarRef{Val: "_tagKey"}, - RHS: &influxql.StringLiteral{Val: "host"}, - }) - - if err != nil { - return err - } - - // values should not contain any tag values associated with a series containing - // a secret tag. - expValues := 2 - var gotValues int - for _, tv := range values { - if got, exp := tv.Measurement, "cpu"; got != exp { - return fmt.Errorf("got measurement %q, expected %q", got, exp) - } - - for _, v := range tv.Values { - if got, exp := v.Value, "serverD"; got == exp { - return fmt.Errorf("got tag value %q but it should be filtered.", got) - } - gotValues++ - } - } - - if gotValues != expValues { - return fmt.Errorf("got %d tags, but expected %d", gotValues, expValues) - } - - // Delete the series with values serverA - cond, err := influxql.ParseExpr("host = 'serverA'") - if err != nil { - return err - } - if err := s.DeleteSeries(context.Background(), "db0", nil, cond); err != nil { - return err - } - - values, err = s.TagValues(context.Background(), authorizer, []uint64{0}, &influxql.BinaryExpr{ - Op: influxql.EQ, - LHS: &influxql.VarRef{Val: "_tagKey"}, - RHS: &influxql.StringLiteral{Val: "host"}, - }) - - if err != nil { - return err - } - - // values should not contain any tag values associated with a series containing - // a secret tag. - expValues = 1 - gotValues = 0 - for _, tv := range values { - if got, exp := tv.Measurement, "cpu"; got != exp { - return fmt.Errorf("got measurement %q, expected %q", got, exp) - } - - for _, v := range tv.Values { - if got, exp := v.Value, "serverD"; got == exp { - return fmt.Errorf("got tag value %q but it should be filtered.", got) - } else if got, exp := v.Value, "serverA"; got == exp { - return fmt.Errorf("got tag value %q but it should be filtered.", got) - } - gotValues++ - } - } - - if gotValues != expValues { - return fmt.Errorf("got %d values, but expected %d", gotValues, expValues) - } - return nil - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - if err := test(t, index); err != nil { - t.Fatal(err) - } - }) - } -} - -// Helper to create some tag values -func createTagValues(mname string, kvs map[string][]string) tsdb.TagValues { - var sz int - for _, v := range kvs { - sz += len(v) - } - - out := tsdb.TagValues{ - Measurement: mname, - Values: make([]tsdb.KeyValue, 0, sz), - } - - for tk, tvs := range kvs { - for _, tv := range tvs { - out.Values = append(out.Values, tsdb.KeyValue{Key: tk, Value: tv}) - } - // We have to sort the KeyValues since that's how they're provided from - // the tsdb.Store. - sort.Sort(tsdb.KeyValues(out.Values)) - } - - return out -} - -func TestStore_MeasurementNames_ConcurrentDropShard(t *testing.T) { - for _, index := range tsdb.RegisteredIndexes() { - s := MustOpenStore(t, index) - defer s.Close() - - shardN := 10 - for i := 0; i < shardN; i++ { - // Create new shards with some data - s.MustCreateShardWithData("db0", "rp0", i, - `cpu,host=serverA value=1 30`, - `mem,region=west value=2 40`, // skip: wrong source - `cpu,host=serverC value=3 60`, - ) - } - - done := make(chan struct{}) - errC := make(chan error, 2) - - // Randomly close and open the shards. - go func() { - for { - select { - case <-done: - errC <- nil - return - default: - i := uint64(rand.Intn(int(shardN))) - if sh := s.Shard(i); sh == nil { - errC <- errors.New("shard should not be nil") - return - } else { - if err := sh.Close(); err != nil { - errC <- err - return - } - time.Sleep(500 * time.Microsecond) - if err := s.OpenShard(context.Background(), sh, false); err != nil { - errC <- err - return - } - } - } - } - }() - - // Attempt to get tag keys from the shards. - go func() { - for { - select { - case <-done: - errC <- nil - return - default: - names, err := s.MeasurementNames(context.Background(), nil, "db0", nil) - if err == tsdb.ErrIndexClosing || err == tsdb.ErrEngineClosed { - continue // These errors are expected - } - - if err != nil { - errC <- err - return - } - - if got, exp := names, slices.StringsToBytes("cpu", "mem"); !reflect.DeepEqual(got, exp) { - errC <- fmt.Errorf("got keys %v, expected %v", got, exp) - return - } - } - } - }() - - // Run for 500ms - time.Sleep(500 * time.Millisecond) - close(done) - - // Check for errors. - if err := <-errC; err != nil { - t.Fatal(err) - } - if err := <-errC; err != nil { - t.Fatal(err) - } - } -} - -func TestStore_TagKeys_ConcurrentDropShard(t *testing.T) { - for _, index := range tsdb.RegisteredIndexes() { - s := MustOpenStore(t, index) - defer s.Close() - - shardN := 10 - for i := 0; i < shardN; i++ { - // Create new shards with some data - s.MustCreateShardWithData("db0", "rp0", i, - `cpu,host=serverA value=1 30`, - `mem,region=west value=2 40`, // skip: wrong source - `cpu,host=serverC value=3 60`, - ) - } - - done := make(chan struct{}) - errC := make(chan error, 2) - - // Randomly close and open the shards. - go func() { - for { - select { - case <-done: - errC <- nil - return - default: - i := uint64(rand.Intn(int(shardN))) - if sh := s.Shard(i); sh == nil { - errC <- errors.New("shard should not be nil") - return - } else { - if err := sh.Close(); err != nil { - errC <- err - return - } - time.Sleep(500 * time.Microsecond) - if err := s.OpenShard(context.Background(), sh, false); err != nil { - errC <- err - return - } - } - } - } - }() - - // Attempt to get tag keys from the shards. - go func() { - for { - select { - case <-done: - errC <- nil - return - default: - keys, err := s.TagKeys(context.Background(), nil, []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, nil) - if err == tsdb.ErrIndexClosing || err == tsdb.ErrEngineClosed { - continue // These errors are expected - } - - if err != nil { - errC <- err - return - } - - if got, exp := keys[0].Keys, []string{"host"}; !reflect.DeepEqual(got, exp) { - errC <- fmt.Errorf("got keys %v, expected %v", got, exp) - return - } - - if got, exp := keys[1].Keys, []string{"region"}; !reflect.DeepEqual(got, exp) { - errC <- fmt.Errorf("got keys %v, expected %v", got, exp) - return - } - } - } - }() - - // Run for 500ms - time.Sleep(500 * time.Millisecond) - - close(done) - - // Check for errors - if err := <-errC; err != nil { - t.Fatal(err) - } - if err := <-errC; err != nil { - t.Fatal(err) - } - } -} - -func TestStore_TagValues_ConcurrentDropShard(t *testing.T) { - for _, index := range tsdb.RegisteredIndexes() { - s := MustOpenStore(t, index) - defer s.Close() - - shardN := 10 - for i := 0; i < shardN; i++ { - // Create new shards with some data - s.MustCreateShardWithData("db0", "rp0", i, - `cpu,host=serverA value=1 30`, - `mem,region=west value=2 40`, // skip: wrong source - `cpu,host=serverC value=3 60`, - ) - } - - done := make(chan struct{}) - errC := make(chan error, 2) - - // Randomly close and open the shards. - go func() { - for { - select { - case <-done: - errC <- nil - return - default: - i := uint64(rand.Intn(int(shardN))) - if sh := s.Shard(i); sh == nil { - errC <- errors.New("shard should not be nil") - return - } else { - if err := sh.Close(); err != nil { - errC <- err - return - } - time.Sleep(500 * time.Microsecond) - if err := s.OpenShard(context.Background(), sh, false); err != nil { - errC <- err - return - } - } - } - } - }() - - // Attempt to get tag keys from the shards. - go func() { - for { - select { - case <-done: - errC <- nil - return - default: - stmt, err := influxql.ParseStatement(`SHOW TAG VALUES WITH KEY = "host"`) - if err != nil { - t.Error(err) - return - } - rewrite, err := query.RewriteStatement(stmt) - if err != nil { - t.Error(err) - return - } - - cond := rewrite.(*influxql.ShowTagValuesStatement).Condition - values, err := s.TagValues(context.Background(), nil, []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, cond) - if err == tsdb.ErrIndexClosing || err == tsdb.ErrEngineClosed { - continue // These errors are expected - } - - if err != nil { - errC <- err - return - } - - exp := tsdb.TagValues{ - Measurement: "cpu", - Values: []tsdb.KeyValue{ - tsdb.KeyValue{Key: "host", Value: "serverA"}, - tsdb.KeyValue{Key: "host", Value: "serverC"}, - }, - } - - if got := values[0]; !reflect.DeepEqual(got, exp) { - errC <- fmt.Errorf("got keys %v, expected %v", got, exp) - return - } - } - } - }() - - // Run for 500ms - time.Sleep(500 * time.Millisecond) - - close(done) - - // Check for errors - if err := <-errC; err != nil { - t.Fatal(err) - } - if err := <-errC; err != nil { - t.Fatal(err) - } - } -} - -func TestStore_DeleteByPredicate(t *testing.T) { - test := func(t *testing.T, index string) error { - s := MustOpenStore(t, index) - defer s.Close() - - s.MustCreateShardWithData("db0", "rp0", 0, - `cpu,host=serverA value=1 0`, - `cpu,region=west value=3 20`, - `cpu,secret=foo value=5 30`, - `mem,secret=foo value=1 30`, - `disk value=4 30`, - ) - - p, err := predicate.Parse(`_measurement="cpu"`) - if err != nil { - return err - } - - pred, err := predicate.New(p) - if err != nil { - return err - } - - expr, err := influxql.ParseExpr(`_measurement="cpu"`) - if err != nil { - return err - } - - err = s.DeleteSeriesWithPredicate(context.Background(), "db0", math.MinInt, math.MaxInt, pred, expr) - if err != nil { - return err - } - - names, err := s.MeasurementNames(context.Background(), query.OpenAuthorizer, "db0", nil) - if err != nil { - return err - } - - require.Equal(t, 2, len(names), "expected cpu to be deleted, leaving 2 measurements") - - return nil - } - - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { - if err := test(t, index); err != nil { - t.Fatal(err) - } - }) - } -} - -func BenchmarkStore_SeriesCardinality_100_Shards(b *testing.B) { - for _, index := range tsdb.RegisteredIndexes() { - store := NewStore(b, index) - if err := store.Open(context.Background()); err != nil { - panic(err) - } - - // Write a point to n shards. - for shardID := 0; shardID < 100; shardID++ { - if err := store.CreateShard(context.Background(), "db", "rp", uint64(shardID), true); err != nil { - b.Fatalf("create shard: %s", err) - } - - err := store.WriteToShard(context.Background(), uint64(shardID), []models.Point{models.MustNewPoint("cpu", nil, map[string]interface{}{"value": 1.0}, time.Now())}) - if err != nil { - b.Fatalf("write: %s", err) - } - } - - b.Run(store.EngineOptions.IndexVersion, func(b *testing.B) { - for i := 0; i < b.N; i++ { - _, _ = store.SeriesCardinality(context.Background(), "db") - } - }) - store.Close() - } -} - -func BenchmarkStoreOpen_200KSeries_100Shards(b *testing.B) { benchmarkStoreOpen(b, 64, 5, 5, 1, 100) } - -func benchmarkStoreOpen(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt, shardCnt int) { - var store *Store - setup := func(index string) error { - store := MustOpenStore(b, index) - - // Generate test series (measurements + unique tag sets). - series := genTestSeries(mCnt, tkCnt, tvCnt) - - // Generate point data to write to the shards. - points := []models.Point{} - for _, s := range series { - for val := 0.0; val < float64(pntCnt); val++ { - p := models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": val}, time.Now()) - points = append(points, p) - } - } - - // Create requested number of shards in the store & write points. - for shardID := 0; shardID < shardCnt; shardID++ { - if err := store.CreateShard(context.Background(), "mydb", "myrp", uint64(shardID), true); err != nil { - return fmt.Errorf("create shard: %s", err) - } - if err := store.BatchWrite(shardID, points); err != nil { - return fmt.Errorf("batch write: %s", err) - } - } - return nil - } - - for _, index := range tsdb.RegisteredIndexes() { - if err := setup(index); err != nil { - b.Fatal(err) - } - b.Run(store.EngineOptions.IndexVersion, func(b *testing.B) { - for n := 0; n < b.N; n++ { - store := tsdb.NewStore(store.Path()) - if err := store.Open(context.Background()); err != nil { - b.Fatalf("open store error: %s", err) - } - - b.StopTimer() - store.Close() - b.StartTimer() - } - }) - os.RemoveAll(store.Path()) - } -} - -// To store result of benchmark (ensure allocated on heap). -var tvResult []tsdb.TagValues - -func BenchmarkStore_TagValues(b *testing.B) { - benchmarks := []struct { - name string - shards int - measurements int - tagValues int - }{ - {name: "s=1_m=1_v=100", shards: 1, measurements: 1, tagValues: 100}, - {name: "s=1_m=1_v=1000", shards: 1, measurements: 1, tagValues: 1000}, - {name: "s=1_m=10_v=100", shards: 1, measurements: 10, tagValues: 100}, - {name: "s=1_m=10_v=1000", shards: 1, measurements: 10, tagValues: 1000}, - {name: "s=1_m=100_v=100", shards: 1, measurements: 100, tagValues: 100}, - {name: "s=1_m=100_v=1000", shards: 1, measurements: 100, tagValues: 1000}, - {name: "s=10_m=1_v=100", shards: 10, measurements: 1, tagValues: 100}, - {name: "s=10_m=1_v=1000", shards: 10, measurements: 1, tagValues: 1000}, - {name: "s=10_m=10_v=100", shards: 10, measurements: 10, tagValues: 100}, - {name: "s=10_m=10_v=1000", shards: 10, measurements: 10, tagValues: 1000}, - {name: "s=10_m=100_v=100", shards: 10, measurements: 100, tagValues: 100}, - {name: "s=10_m=100_v=1000", shards: 10, measurements: 100, tagValues: 1000}, - } - - setup := func(shards, measurements, tagValues int, index string, useRandom bool) (*Store, []uint64) { // returns shard ids - s := NewStore(b, index) - if err := s.Open(context.Background()); err != nil { - panic(err) - } - - fmtStr := `cpu%[1]d,host=tv%[2]d,shard=s%[3]d,z1=s%[1]d%[2]d,z2=%[4]s value=1 %[5]d` - // genPoints generates some point data. If ran is true then random tag - // key values will be generated, meaning more work sorting and merging. - // If ran is false, then the same set of points will be produced for the - // same set of parameters, meaning more de-duplication of points will be - // needed. - genPoints := func(sid int, ran bool) []string { - var v, ts int - var half string - points := make([]string, 0, measurements*tagValues) - for m := 0; m < measurements; m++ { - for tagvid := 0; tagvid < tagValues; tagvid++ { - v = tagvid - if ran { - v = rand.Intn(100000) - } - half = fmt.Sprint(rand.Intn(2) == 0) - points = append(points, fmt.Sprintf(fmtStr, m, v, sid, half, ts)) - ts++ - } - } - return points - } - - // Create data across chosen number of shards. - var shardIDs []uint64 - for i := 0; i < shards; i++ { - shardIDs = append(shardIDs, uint64(i)) - s.MustCreateShardWithData("db0", "rp0", i, genPoints(i, useRandom)...) - } - return s, shardIDs - } - - // SHOW TAG VALUES WITH KEY IN ("host", "shard") - cond1 := &influxql.ParenExpr{ - Expr: &influxql.BinaryExpr{ - Op: influxql.OR, - LHS: &influxql.BinaryExpr{ - Op: influxql.EQ, - LHS: &influxql.VarRef{Val: "_tagKey"}, - RHS: &influxql.StringLiteral{Val: "host"}, - }, - RHS: &influxql.BinaryExpr{ - Op: influxql.EQ, - LHS: &influxql.VarRef{Val: "_tagKey"}, - RHS: &influxql.StringLiteral{Val: "shard"}, - }, - }, - } - - cond2 := &influxql.ParenExpr{ - Expr: &influxql.BinaryExpr{ - Op: influxql.AND, - LHS: &influxql.ParenExpr{ - Expr: &influxql.BinaryExpr{ - Op: influxql.EQ, - LHS: &influxql.VarRef{Val: "z2"}, - RHS: &influxql.StringLiteral{Val: "true"}, - }, - }, - RHS: cond1, - }, - } - - var err error - for _, index := range tsdb.RegisteredIndexes() { - for useRand := 0; useRand < 2; useRand++ { - for c, condition := range []influxql.Expr{cond1, cond2} { - for _, bm := range benchmarks { - s, shardIDs := setup(bm.shards, bm.measurements, bm.tagValues, index, useRand == 1) - teardown := func() { - if err := s.Close(); err != nil { - b.Fatal(err) - } - } - cnd := "Unfiltered" - if c == 0 { - cnd = "Filtered" - } - b.Run("random_values="+fmt.Sprint(useRand == 1)+"_index="+index+"_"+cnd+"_"+bm.name, func(b *testing.B) { - for i := 0; i < b.N; i++ { - if tvResult, err = s.TagValues(context.Background(), nil, shardIDs, condition); err != nil { - b.Fatal(err) - } - } - }) - teardown() - } - } - } - } -} - -// Store is a test wrapper for tsdb.Store. -type Store struct { - *tsdb.Store - index string -} - -// NewStore returns a new instance of Store with a temporary path. -func NewStore(tb testing.TB, index string) *Store { - tb.Helper() - - path := tb.TempDir() - - s := &Store{Store: tsdb.NewStore(path), index: index} - s.EngineOptions.IndexVersion = index - s.EngineOptions.Config.WALDir = filepath.Join(path, "wal") - s.EngineOptions.Config.TraceLoggingEnabled = true - s.WithLogger(zaptest.NewLogger(tb)) - - return s -} - -// MustOpenStore returns a new, open Store using the specified index, -// at a temporary path. -func MustOpenStore(tb testing.TB, index string) *Store { - tb.Helper() - - s := NewStore(tb, index) - - if err := s.Open(context.Background()); err != nil { - panic(err) - } - return s -} - -// Reopen closes and reopens the store as a new store. -func (s *Store) Reopen(tb testing.TB) error { - tb.Helper() - - if err := s.Store.Close(); err != nil { - return err - } - - s.Store = tsdb.NewStore(s.Path()) - s.EngineOptions.IndexVersion = s.index - s.EngineOptions.Config.WALDir = filepath.Join(s.Path(), "wal") - s.EngineOptions.Config.TraceLoggingEnabled = true - s.WithLogger(zaptest.NewLogger(tb)) - - return s.Store.Open(context.Background()) -} - -// Close closes the store and removes the underlying data. -func (s *Store) Close() error { - return s.Store.Close() -} - -// MustCreateShardWithData creates a shard and writes line protocol data to it. -func (s *Store) MustCreateShardWithData(db, rp string, shardID int, data ...string) { - if err := s.CreateShard(context.Background(), db, rp, uint64(shardID), true); err != nil { - panic(err) - } - s.MustWriteToShardString(shardID, data...) -} - -// MustWriteToShardString parses the line protocol (with second precision) and -// inserts the resulting points into a shard. Panic on error. -func (s *Store) MustWriteToShardString(shardID int, data ...string) { - var points []models.Point - for i := range data { - a, err := models.ParsePointsWithPrecision([]byte(strings.TrimSpace(data[i])), time.Time{}, "s") - if err != nil { - panic(err) - } - points = append(points, a...) - } - - if err := s.WriteToShard(context.Background(), uint64(shardID), points); err != nil { - panic(err) - } -} - -// BatchWrite writes points to a shard in chunks. -func (s *Store) BatchWrite(shardID int, points []models.Point) error { - nPts := len(points) - chunkSz := 10000 - start := 0 - end := chunkSz - - for { - if end > nPts { - end = nPts - } - if end-start == 0 { - break - } - - if err := s.WriteToShard(context.Background(), uint64(shardID), points[start:end]); err != nil { - return err - } - start = end - end += chunkSz - } - return nil -} - -// ParseTags returns an instance of Tags for a comma-delimited list of key/values. -func ParseTags(s string) query.Tags { - m := make(map[string]string) - for _, kv := range strings.Split(s, ",") { - a := strings.Split(kv, "=") - m[a[0]] = a[1] - } - return query.NewTags(m) -} - -func dirExists(path string) bool { - var err error - if _, err = os.Stat(path); err == nil { - return true - } - return !os.IsNotExist(err) -} diff --git a/ui/README.md b/ui/README.md deleted file mode 100644 index 3b3d9a08470..00000000000 --- a/ui/README.md +++ /dev/null @@ -1,42 +0,0 @@ -## InfluxDB UI - -UI assets for InfluxDB are automatically downloaded and embedded in the `influxd` binary -when using the top-level `Makefile`. The UI assets are built and made available from -the [`influxdata/ui` repository](https://github.com/influxdata/ui). All of the UI source code -has been removed from this directory, and now lives in the [`influxdata/ui` repository](https://github.com/influxdata/ui). -Please submit all PRs and issues related to the InfluxDB UI to the [`influxdata/ui` repository](https://github.com/influxdata/ui). - -### Starting a Local Development Environment - -It is possible to run a frontend development server with hot reloading using the UI from -[`influxdata/ui`](https://github.com/influxdata/ui) in front of the InfluxDB backend: - -Start `influxd` listening on the default port (`8086`): - -`$ ./bin/darwin/influxd` - -Clone (if needed) & start the UI development server from the `ui` repository: - -``` -$ git clone https://github.com/influxdata/ui.git -$ cd ui -$ yarn start -``` - -The UI development server runs at [`http://localhost:8080`](http://localhost:8080/) - -### Running InfluxDB with Local UI Assets - -To run InfluxDB with local UI assets, first build the assets: - -``` -$ git clone https://github.com/influxdata/ui.git -$ cd ui -$ yarn build -``` - -Start `influxd` using the local UI assets via the `--assets-path` flag. For example, -if the `ui` folder containing built assets is at the same level as the `influxdb` folder -and the `influxd` binary is at `influxdb/bin/darwin/influxd`: - -`$ ./bin/darwin/influxd --assets-path=../ui/build` diff --git a/usage.go b/usage.go deleted file mode 100644 index 1bbe27dfa59..00000000000 --- a/usage.go +++ /dev/null @@ -1,54 +0,0 @@ -package influxdb - -import ( - "context" - "time" - - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// UsageMetric used to track classes of usage. -type UsageMetric string - -const ( - // UsageWriteRequestCount is the name of the metrics for tracking write request count. - UsageWriteRequestCount UsageMetric = "usage_write_request_count" - // UsageWriteRequestBytes is the name of the metrics for tracking the number of write bytes. - UsageWriteRequestBytes UsageMetric = "usage_write_request_bytes" - - // UsageValues is the name of the metrics for tracking the number of values. - UsageValues UsageMetric = "usage_values" - // UsageSeries is the name of the metrics for tracking the number of series written. - UsageSeries UsageMetric = "usage_series" - - // UsageQueryRequestCount is the name of the metrics for tracking query request count. - UsageQueryRequestCount UsageMetric = "usage_query_request_count" - // UsageQueryRequestBytes is the name of the metrics for tracking the number of query bytes. - UsageQueryRequestBytes UsageMetric = "usage_query_request_bytes" -) - -// Usage is a metric associated with the utilization of a particular resource. -type Usage struct { - OrganizationID *platform.ID `json:"organizationID,omitempty"` - BucketID *platform.ID `json:"bucketID,omitempty"` - Type UsageMetric `json:"type"` - Value float64 `json:"value"` -} - -// UsageService is a service for accessing usage statistics. -type UsageService interface { - GetUsage(ctx context.Context, filter UsageFilter) (map[UsageMetric]*Usage, error) -} - -// UsageFilter is used to filter usage. -type UsageFilter struct { - OrgID *platform.ID - BucketID *platform.ID - Range *Timespan -} - -// Timespan represents a range of time. -type Timespan struct { - Start time.Time `json:"start"` - Stop time.Time `json:"stop"` -} diff --git a/user.go b/user.go deleted file mode 100644 index ba7c4d3cf66..00000000000 --- a/user.go +++ /dev/null @@ -1,98 +0,0 @@ -package influxdb - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -// UserStatus indicates whether a user is active or inactive -type UserStatus string - -// Valid validates user status -func (u *UserStatus) Valid() error { - if *u != "active" && *u != "inactive" { - return &errors.Error{Code: errors.EInvalid, Msg: "Invalid user status"} - } - - return nil -} - -// User is a user. 🎉 -type User struct { - ID platform.ID `json:"id,omitempty"` - Name string `json:"name"` - OAuthID string `json:"oauthID,omitempty"` - Status Status `json:"status"` -} - -// Valid validates user -func (u *User) Valid() error { - return u.Status.Valid() -} - -// Ops for user errors and op log. -const ( - OpFindUserByID = "FindUserByID" - OpFindUser = "FindUser" - OpFindUsers = "FindUsers" - OpCreateUser = "CreateUser" - OpUpdateUser = "UpdateUser" - OpDeleteUser = "DeleteUser" -) - -// UserService represents a service for managing user data. -type UserService interface { - - // Returns a single user by ID. - FindUserByID(ctx context.Context, id platform.ID) (*User, error) - - // Returns the first user that matches filter. - FindUser(ctx context.Context, filter UserFilter) (*User, error) - - // Returns a list of users that match filter and the total count of matching users. - // Additional options provide pagination & sorting. - FindUsers(ctx context.Context, filter UserFilter, opt ...FindOptions) ([]*User, int, error) - - // Creates a new user and sets u.ID with the new identifier. - CreateUser(ctx context.Context, u *User) error - - // Updates a single user with changeset. - // Returns the new user state after update. - UpdateUser(ctx context.Context, id platform.ID, upd UserUpdate) (*User, error) - - // Removes a user by ID. - DeleteUser(ctx context.Context, id platform.ID) error - - // FindPermissionForUser - FindPermissionForUser(ctx context.Context, UserID platform.ID) (PermissionSet, error) -} - -// UserUpdate represents updates to a user. -// Only fields which are set are updated. -type UserUpdate struct { - Name *string `json:"name"` - Status *Status `json:"status"` -} - -// Valid validates UserUpdate -func (uu UserUpdate) Valid() error { - if uu.Status == nil { - return nil - } - - return uu.Status.Valid() -} - -// UserFilter represents a set of filter that restrict the returned results. -type UserFilter struct { - ID *platform.ID - Name *string -} - -// UserResponse is the response of user -type UserResponse struct { - Links map[string]string `json:"links"` - User -} diff --git a/user_resource_mapping.go b/user_resource_mapping.go deleted file mode 100644 index c4c4f39d606..00000000000 --- a/user_resource_mapping.go +++ /dev/null @@ -1,219 +0,0 @@ -package influxdb - -import ( - "context" - "encoding/json" - "errors" - - "github.com/influxdata/influxdb/v2/kit/platform" -) - -var ( - // ErrInvalidUserType notes that the provided UserType is invalid - ErrInvalidUserType = errors.New("unknown user type") - // ErrInvalidMappingType notes that the provided MappingType is invalid - ErrInvalidMappingType = errors.New("unknown mapping type") - // ErrUserIDRequired notes that the ID was not provided - ErrUserIDRequired = errors.New("user id is required") - // ErrResourceIDRequired notes that the provided ID was not provided - ErrResourceIDRequired = errors.New("resource id is required") -) - -// UserType can either be owner or member. -type UserType string - -const ( - // Owner can read and write to a resource - Owner UserType = "owner" // 1 - // Member can read from a resource. - Member UserType = "member" // 2 -) - -// Valid checks if the UserType is a member of the UserType enum -func (ut UserType) Valid() (err error) { - switch ut { - case Owner: // 1 - case Member: // 2 - default: - err = ErrInvalidUserType - } - - return err -} - -type MappingType uint8 - -const ( - UserMappingType = 0 - OrgMappingType = 1 -) - -func (mt MappingType) Valid() error { - switch mt { - case UserMappingType, OrgMappingType: - return nil - } - - return ErrInvalidMappingType -} - -func (mt MappingType) String() string { - switch mt { - case UserMappingType: - return "user" - case OrgMappingType: - return "org" - } - - return "unknown" -} - -func (mt MappingType) MarshalJSON() ([]byte, error) { - return json.Marshal(mt.String()) -} - -func (mt *MappingType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - - switch s { - case "user": - *mt = UserMappingType - return nil - case "org": - *mt = OrgMappingType - return nil - } - - return ErrInvalidMappingType -} - -// UserResourceMappingService maps the relationships between users and resources. -type UserResourceMappingService interface { - // FindUserResourceMappings returns a list of UserResourceMappings that match filter and the total count of matching mappings. - FindUserResourceMappings(ctx context.Context, filter UserResourceMappingFilter, opt ...FindOptions) ([]*UserResourceMapping, int, error) - - // CreateUserResourceMapping creates a user resource mapping. - CreateUserResourceMapping(ctx context.Context, m *UserResourceMapping) error - - // DeleteUserResourceMapping deletes a user resource mapping. - DeleteUserResourceMapping(ctx context.Context, resourceID, userID platform.ID) error -} - -// UserResourceMapping represents a mapping of a resource to its user. -type UserResourceMapping struct { - UserID platform.ID `json:"userID"` - UserType UserType `json:"userType"` - MappingType MappingType `json:"mappingType"` - ResourceType ResourceType `json:"resourceType"` - ResourceID platform.ID `json:"resourceID"` -} - -// Validate reports any validation errors for the mapping. -func (m UserResourceMapping) Validate() error { - if !m.ResourceID.Valid() { - return ErrResourceIDRequired - } - - if !m.UserID.Valid() { - return ErrUserIDRequired - } - - if err := m.UserType.Valid(); err != nil { - return err - } - - if err := m.MappingType.Valid(); err != nil { - return err - } - - if err := m.ResourceType.Valid(); err != nil { - return err - } - - return nil -} - -// UserResourceMappingFilter represents a set of filters that restrict the returned results. -type UserResourceMappingFilter struct { - ResourceID platform.ID - ResourceType ResourceType - UserID platform.ID - UserType UserType -} - -func (m *UserResourceMapping) ownerPerms() ([]Permission, error) { - if m.ResourceType == OrgsResourceType { - return OwnerPermissions(m.ResourceID), nil - } - - if m.ResourceType == InstanceResourceType { - return []Permission{ - {Action: ReadAction, Resource: Resource{Type: InstanceResourceType}}, - {Action: WriteAction, Resource: Resource{Type: InstanceResourceType}}, - }, nil - } - - ps := []Permission{ - // TODO: Uncomment these once the URM system is no longer being used for find lookups for: - // Telegraf - // DashBoard - // notification rule - // notification endpoint - // Permission{ - // Action: ReadAction, - // Resource: Resource{ - // Type: m.ResourceType, - // ID: &m.ResourceID, - // }, - // }, - // Permission{ - // Action: WriteAction, - // Resource: Resource{ - // Type: m.ResourceType, - // ID: &m.ResourceID, - // }, - // }, - } - return ps, nil -} - -func (m *UserResourceMapping) memberPerms() ([]Permission, error) { - if m.ResourceType == OrgsResourceType { - return MemberPermissions(m.ResourceID), nil - } - - if m.ResourceType == BucketsResourceType { - return []Permission{MemberBucketPermission(m.ResourceID)}, nil - } - - ps := []Permission{ - // TODO: Uncomment these once the URM system is no longer being used for find lookups for: - // Telegraf - // DashBoard - // notification rule - // notification endpoint - // Permission{ - // Action: ReadAction, - // Resource: Resource{ - // Type: m.ResourceType, - // ID: &m.ResourceID, - // }, - // }, - } - return ps, nil -} - -// ToPermissions converts a user resource mapping into a set of permissions. -func (m *UserResourceMapping) ToPermissions() ([]Permission, error) { - switch m.UserType { - case Owner: - return m.ownerPerms() - case Member: - return m.memberPerms() - default: - return nil, ErrInvalidUserType - } -} diff --git a/user_resource_mapping_test.go b/user_resource_mapping_test.go deleted file mode 100644 index 35448ba05e4..00000000000 --- a/user_resource_mapping_test.go +++ /dev/null @@ -1,175 +0,0 @@ -package influxdb_test - -import ( - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "github.com/stretchr/testify/require" -) - -func TestOwnerMappingValidate(t *testing.T) { - type fields struct { - ResourceID platform.ID - ResourceType influxdb.ResourceType - UserID platform.ID - UserType influxdb.UserType - } - tests := []struct { - name string - fields fields - wantErr bool - }{ - { - name: "valid mapping", - fields: fields{ - UserID: influxdbtesting.MustIDBase16("debac1e0deadbeef"), - UserType: influxdb.Owner, - ResourceType: influxdb.DashboardsResourceType, - ResourceID: influxdbtesting.MustIDBase16("020f755c3c082000"), - }, - }, - { - name: "mapping requires a resourceid", - fields: fields{ - UserID: influxdbtesting.MustIDBase16("debac1e0deadbeef"), - UserType: influxdb.Owner, - ResourceType: influxdb.DashboardsResourceType, - }, - wantErr: true, - }, - { - name: "mapping requires a userid", - fields: fields{ - ResourceID: influxdbtesting.MustIDBase16("020f755c3c082000"), - UserType: influxdb.Owner, - ResourceType: influxdb.DashboardsResourceType, - }, - wantErr: true, - }, - { - name: "mapping requires a usertype", - fields: fields{ - ResourceID: influxdbtesting.MustIDBase16("020f755c3c082000"), - UserID: influxdbtesting.MustIDBase16("debac1e0deadbeef"), - ResourceType: influxdb.DashboardsResourceType, - }, - wantErr: true, - }, - { - name: "mapping requires a resourcetype", - fields: fields{ - ResourceID: influxdbtesting.MustIDBase16("020f755c3c082000"), - UserID: influxdbtesting.MustIDBase16("debac1e0deadbeef"), - UserType: influxdb.Owner, - }, - wantErr: true, - }, - { - name: "the usertype provided must be valid", - fields: fields{ - ResourceID: influxdbtesting.MustIDBase16("020f755c3c082000"), - UserID: influxdbtesting.MustIDBase16("debac1e0deadbeef"), - UserType: "foo", - ResourceType: influxdb.DashboardsResourceType, - }, - wantErr: true, - }, - { - name: "the resourcetype provided must be valid", - fields: fields{ - ResourceID: influxdbtesting.MustIDBase16("020f755c3c082000"), - UserID: influxdbtesting.MustIDBase16("debac1e0deadbeef"), - UserType: influxdb.Owner, - ResourceType: "foo", - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - m := influxdb.UserResourceMapping{ - ResourceID: tt.fields.ResourceID, - ResourceType: tt.fields.ResourceType, - UserID: tt.fields.UserID, - UserType: tt.fields.UserType, - } - if err := m.Validate(); (err != nil) != tt.wantErr { - t.Errorf("OwnerMapping.Validate() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestOwnerMappingToPermissions(t *testing.T) { - type wants struct { - perms influxdb.Permission - err bool - } - - ResourceID, _ := platform.IDFromString("020f755c3c082000") - - tests := []struct { - name string - urm influxdb.UserResourceMapping - wants wants - }{ - { - name: "Org Member Has Permission To Read Org", - urm: influxdb.UserResourceMapping{ - UserID: influxdbtesting.MustIDBase16("debac1e0deadbeef"), - UserType: influxdb.Member, - ResourceType: influxdb.OrgsResourceType, - ResourceID: influxdbtesting.MustIDBase16("020f755c3c082000"), - }, - wants: wants{ - err: false, - perms: influxdb.Permission{Action: "read", Resource: influxdb.Resource{Type: "orgs", ID: ResourceID}}}, - }, - { - name: "Org Owner Has Permission To Write Org", - urm: influxdb.UserResourceMapping{ - UserID: influxdbtesting.MustIDBase16("debac1e0deadbeef"), - UserType: influxdb.Owner, - ResourceType: influxdb.OrgsResourceType, - ResourceID: influxdbtesting.MustIDBase16("020f755c3c082000"), - }, - wants: wants{ - err: false, - perms: influxdb.Permission{Action: "write", Resource: influxdb.Resource{Type: "orgs", ID: ResourceID}}}, - }, - { - name: "Org Owner Has Permission To Read Org", - urm: influxdb.UserResourceMapping{ - UserID: influxdbtesting.MustIDBase16("debac1e0deadbeef"), - UserType: influxdb.Owner, - ResourceType: influxdb.OrgsResourceType, - ResourceID: influxdbtesting.MustIDBase16("020f755c3c082000"), - }, - wants: wants{ - err: false, - perms: influxdb.Permission{Action: "read", Resource: influxdb.Resource{Type: "orgs", ID: ResourceID}}}, - }, - { - name: "Bucket Member User Has Permission To Read Bucket", - urm: influxdb.UserResourceMapping{ - UserID: influxdbtesting.MustIDBase16("debac1e0deadbeef"), - UserType: influxdb.Member, - ResourceType: influxdb.BucketsResourceType, - ResourceID: influxdbtesting.MustIDBase16("020f755c3c082000"), - }, - wants: wants{ - err: false, - perms: influxdb.Permission{Action: "read", Resource: influxdb.Resource{Type: "buckets", ID: ResourceID}}}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - perms, err := tt.urm.ToPermissions() - - require.Contains(t, perms, tt.wants.perms) - require.Equal(t, tt.wants.err, err != nil) - }) - } -} diff --git a/uuid/uuid.go b/uuid/uuid.go deleted file mode 100644 index 2967ee384f7..00000000000 --- a/uuid/uuid.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (c) 2012 The gocql Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -// The uuid package can be used to generate and parse universally unique -// identifiers, a standardized format in the form of a 128 bit number. -// -// http://tools.ietf.org/html/rfc4122 - -// Package uuid provides functions to create time-based UUIDs. -package uuid // import "github.com/influxdata/influxdb/uuid" - -import ( - "crypto/rand" - "net" - "sync/atomic" - "time" -) - -// UUID - unique identifier type representing a 128 bit number -type UUID [16]byte - -var timeBase = time.Date(1582, time.October, 15, 0, 0, 0, 0, time.UTC).Unix() -var hardwareAddr = hwAddrFunc() -var clockSeq uint32 - -func hwAddrFunc() [6]byte { - u := [6]byte{} - - ifaces, err := net.Interfaces() - if err != nil { - rand.Reader.Read(u[:]) - return u - } - for _, iface := range ifaces { - if len(iface.HardwareAddr) >= 6 { - copy(u[:], iface.HardwareAddr) - return u - } - } - rand.Reader.Read(u[:]) - return u -} - -// TimeUUID generates a new time based UUID (version 1) using the current -// time as the timestamp. -func TimeUUID() UUID { - return FromTime(time.Now()) -} - -// FromTime generates a new time based UUID (version 1) as described in -// RFC 4122. This UUID contains the MAC address of the node that generated -// the UUID, the given timestamp and a sequence number. -func FromTime(aTime time.Time) UUID { - var u UUID - - utcTime := aTime.In(time.UTC) - t := uint64(utcTime.Unix()-timeBase)*10000000 + uint64(utcTime.Nanosecond()/100) - u[0], u[1], u[2], u[3] = byte(t>>24), byte(t>>16), byte(t>>8), byte(t) - u[4], u[5] = byte(t>>40), byte(t>>32) - u[6], u[7] = byte(t>>56)&0x0F, byte(t>>48) - - clock := atomic.AddUint32(&clockSeq, 1) - u[8] = byte(clock >> 8) - u[9] = byte(clock) - - copy(u[10:], hardwareAddr[:]) - - u[6] |= 0x10 // set version to 1 (time based uuid) - u[8] &= 0x3F // clear variant - u[8] |= 0x80 // set to IETF variant - - return u -} - -// String returns the UUID in it's canonical form, a 32 digit hexadecimal -// number in the form of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. -func (u UUID) String() string { - var offsets = [...]int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} - const hexString = "0123456789abcdef" - r := make([]byte, 36) - for i, b := range u { - r[offsets[i]] = hexString[b>>4] - r[offsets[i]+1] = hexString[b&0xF] - } - r[8] = '-' - r[13] = '-' - r[18] = '-' - r[23] = '-' - return string(r) - -} diff --git a/v1/authorization/authorizer.go b/v1/authorization/authorizer.go deleted file mode 100644 index ec7e4364efa..00000000000 --- a/v1/authorization/authorizer.go +++ /dev/null @@ -1,126 +0,0 @@ -package authorization - -import ( - "context" - "errors" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - ErrUnsupportedScheme = &errors2.Error{ - Code: errors2.EInternal, - Msg: "unsupported authorization scheme", - } -) - -type UserFinder interface { - // Returns a single user by ID. - FindUserByID(ctx context.Context, id platform.ID) (*influxdb.User, error) -} - -type PasswordComparer interface { - ComparePassword(ctx context.Context, authID platform.ID, password string) error -} - -type AuthTokenFinder interface { - FindAuthorizationByToken(ctx context.Context, token string) (*influxdb.Authorization, error) -} - -// A type that is used to verify credentials. -type Authorizer struct { - AuthV1 AuthTokenFinder // A service to find V1 tokens - AuthV2 AuthTokenFinder // A service to find V2 tokens - Comparer PasswordComparer // A service to compare passwords for V1 tokens - User UserFinder // A service to find users -} - -// Authorize returns an influxdb.Authorization if c can be verified; otherwise, an error. -// influxdb.ErrCredentialsUnauthorized will be returned if the credentials are invalid. -func (v *Authorizer) Authorize(ctx context.Context, c influxdb.CredentialsV1) (auth *influxdb.Authorization, err error) { - defer func() { - auth, err = v.checkAuthError(ctx, auth, err) - }() - - switch c.Scheme { - case influxdb.SchemeV1Basic, influxdb.SchemeV1URL: - auth, err = v.tryV1Authorization(ctx, c) - if errors.Is(err, ErrAuthNotFound) { - return v.tryV2Authorization(ctx, c) - } - - if err != nil { - return nil, v.normalizeError(err) - } - return - - case influxdb.SchemeV1Token: - return v.tryV2Authorization(ctx, c) - - default: - // this represents a programmer error - return nil, ErrUnsupportedScheme - } -} - -func (v *Authorizer) checkAuthError(ctx context.Context, auth *influxdb.Authorization, err error) (*influxdb.Authorization, error) { - if err != nil { - return nil, err - } - - if auth == nil { - return nil, influxdb.ErrCredentialsUnauthorized - } - - if auth.Status != influxdb.Active { - return nil, influxdb.ErrCredentialsUnauthorized - } - - // check the user is still active - if user, userErr := v.User.FindUserByID(ctx, auth.UserID); userErr != nil { - return nil, v.normalizeError(userErr) - } else if user == nil || user.Status != influxdb.Active { - return nil, influxdb.ErrCredentialsUnauthorized - } - - return auth, nil -} - -func (v *Authorizer) tryV1Authorization(ctx context.Context, c influxdb.CredentialsV1) (auth *influxdb.Authorization, err error) { - auth, err = v.AuthV1.FindAuthorizationByToken(ctx, c.Username) - if err != nil { - return nil, err - } - - if err := v.Comparer.ComparePassword(ctx, auth.ID, c.Token); err != nil { - return nil, err - } - - return auth, nil -} - -func (v *Authorizer) tryV2Authorization(ctx context.Context, c influxdb.CredentialsV1) (auth *influxdb.Authorization, err error) { - auth, err = v.AuthV2.FindAuthorizationByToken(ctx, c.Token) - if err != nil { - return nil, v.normalizeError(err) - } - return auth, nil -} - -func (v *Authorizer) normalizeError(err error) error { - if err == nil { - return nil - } - - var erri *errors2.Error - if errors.As(err, &erri) { - switch erri.Code { - case errors2.ENotFound, errors2.EForbidden: - return influxdb.ErrCredentialsUnauthorized - } - } - - return err -} diff --git a/v1/authorization/authorizer_test.go b/v1/authorization/authorizer_test.go deleted file mode 100644 index 61f19eb83ba..00000000000 --- a/v1/authorization/authorizer_test.go +++ /dev/null @@ -1,339 +0,0 @@ -package authorization - -import ( - "context" - "testing" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2" - itesting "github.com/influxdata/influxdb/v2/testing" - "github.com/influxdata/influxdb/v2/v1/authorization/mocks" - "github.com/stretchr/testify/assert" -) - -func TestAuthorizer_Authorize(t *testing.T) { - var ( - username = "foo" - token = "bar" - authID = itesting.MustIDBase16("0000000000001234") - userID = itesting.MustIDBase16("000000000000fefe") - expAuthErr = influxdb.ErrCredentialsUnauthorized.Error() - - auth = &influxdb.Authorization{ - ID: authID, - UserID: userID, - Token: username, - Status: influxdb.Active, - } - - user = &influxdb.User{ - ID: userID, - Status: influxdb.Active, - } - ) - - t.Run("invalid scheme returns error", func(t *testing.T) { - ctrl := gomock.NewController(t) - t.Cleanup(ctrl.Finish) - - ctx := context.Background() - - authz := Authorizer{} - - cred := influxdb.CredentialsV1{ - Scheme: "foo", - } - - gotAuth, gotErr := authz.Authorize(ctx, cred) - assert.Nil(t, gotAuth) - assert.EqualError(t, gotErr, ErrUnsupportedScheme.Error()) - }) - - tests := func(t *testing.T, scheme influxdb.SchemeV1) { - t.Run("invalid v1 and v2 token returns expected error", func(t *testing.T) { - ctrl := gomock.NewController(t) - t.Cleanup(ctrl.Finish) - - ctx := context.Background() - - v1 := mocks.NewMockAuthTokenFinder(ctrl) - v1.EXPECT(). - FindAuthorizationByToken(ctx, username). - Return(nil, ErrAuthNotFound) - - v2 := mocks.NewMockAuthTokenFinder(ctrl) - v2.EXPECT(). - FindAuthorizationByToken(ctx, token). - Return(nil, ErrAuthNotFound) - - authz := Authorizer{ - AuthV1: v1, - AuthV2: v2, - } - - cred := influxdb.CredentialsV1{ - Scheme: scheme, - Username: username, - Token: token, - } - - gotAuth, gotErr := authz.Authorize(ctx, cred) - assert.Nil(t, gotAuth) - assert.EqualError(t, gotErr, expAuthErr) - }) - - t.Run("valid v1 token and invalid password returns expected error", func(t *testing.T) { - ctrl := gomock.NewController(t) - t.Cleanup(ctrl.Finish) - - ctx := context.Background() - - v1 := mocks.NewMockAuthTokenFinder(ctrl) - v1.EXPECT(). - FindAuthorizationByToken(ctx, username). - Return(auth, nil) - - pw := mocks.NewMockPasswordComparer(ctrl) - pw.EXPECT(). - ComparePassword(ctx, authID, token). - Return(EIncorrectPassword) - - authz := Authorizer{ - AuthV1: v1, - Comparer: pw, - } - - cred := influxdb.CredentialsV1{ - Scheme: scheme, - Username: username, - Token: token, - } - - gotAuth, gotErr := authz.Authorize(ctx, cred) - assert.Nil(t, gotAuth) - assert.EqualError(t, gotErr, expAuthErr) - }) - - t.Run("valid v1 token and password returns authorization", func(t *testing.T) { - ctrl := gomock.NewController(t) - t.Cleanup(ctrl.Finish) - - ctx := context.Background() - - v1 := mocks.NewMockAuthTokenFinder(ctrl) - v1.EXPECT(). - FindAuthorizationByToken(ctx, username). - Return(auth, nil) - - pw := mocks.NewMockPasswordComparer(ctrl) - pw.EXPECT(). - ComparePassword(ctx, authID, token). - Return(nil) - - uf := mocks.NewMockUserFinder(ctrl) - uf.EXPECT(). - FindUserByID(ctx, userID). - Return(user, nil) - - authz := Authorizer{ - AuthV1: v1, - Comparer: pw, - User: uf, - } - - cred := influxdb.CredentialsV1{ - Scheme: scheme, - Username: username, - Token: token, - } - - gotAuth, gotErr := authz.Authorize(ctx, cred) - assert.NoError(t, gotErr) - assert.Equal(t, auth, gotAuth) - }) - - t.Run("invalid v1 token and valid v2 token returns authorization", func(t *testing.T) { - ctrl := gomock.NewController(t) - t.Cleanup(ctrl.Finish) - - ctx := context.Background() - - v1 := mocks.NewMockAuthTokenFinder(ctrl) - v1.EXPECT(). - FindAuthorizationByToken(ctx, username). - Return(nil, ErrAuthNotFound) - - v2 := mocks.NewMockAuthTokenFinder(ctrl) - v2.EXPECT(). - FindAuthorizationByToken(ctx, token). - Return(auth, nil) - - uf := mocks.NewMockUserFinder(ctrl) - uf.EXPECT(). - FindUserByID(ctx, userID). - Return(user, nil) - - authz := Authorizer{ - AuthV1: v1, - AuthV2: v2, - User: uf, - } - - cred := influxdb.CredentialsV1{ - Scheme: scheme, - Username: username, - Token: token, - } - - gotAuth, gotErr := authz.Authorize(ctx, cred) - assert.NoError(t, gotErr) - assert.Equal(t, auth, gotAuth) - }) - } - - t.Run("using Basic scheme", func(t *testing.T) { - tests(t, influxdb.SchemeV1Basic) - }) - - t.Run("using URL scheme", func(t *testing.T) { - tests(t, influxdb.SchemeV1URL) - }) - - t.Run("using Token scheme", func(t *testing.T) { - t.Run("invalid v2 token returns expected error", func(t *testing.T) { - ctrl := gomock.NewController(t) - t.Cleanup(ctrl.Finish) - - ctx := context.Background() - - v2 := mocks.NewMockAuthTokenFinder(ctrl) - v2.EXPECT(). - FindAuthorizationByToken(ctx, token). - Return(nil, ErrAuthNotFound) - - authz := Authorizer{ - AuthV2: v2, - } - - cred := influxdb.CredentialsV1{ - Scheme: influxdb.SchemeV1Token, - Username: username, - Token: token, - } - - gotAuth, gotErr := authz.Authorize(ctx, cred) - assert.Nil(t, gotAuth) - assert.EqualError(t, gotErr, expAuthErr) - }) - - t.Run("valid v2 token returns authorization", func(t *testing.T) { - ctrl := gomock.NewController(t) - t.Cleanup(ctrl.Finish) - - ctx := context.Background() - - v2 := mocks.NewMockAuthTokenFinder(ctrl) - v2.EXPECT(). - FindAuthorizationByToken(ctx, token). - Return(auth, nil) - - uf := mocks.NewMockUserFinder(ctrl) - uf.EXPECT(). - FindUserByID(ctx, userID). - Return(user, nil) - - authz := Authorizer{ - AuthV2: v2, - User: uf, - } - - cred := influxdb.CredentialsV1{ - Scheme: influxdb.SchemeV1Token, - Username: username, - Token: token, - } - - gotAuth, gotErr := authz.Authorize(ctx, cred) - assert.NoError(t, gotErr) - assert.Equal(t, auth, gotAuth) - }) - }) - - // test inactive user and inactive token - - t.Run("inactive user returns error", func(t *testing.T) { - ctrl := gomock.NewController(t) - t.Cleanup(ctrl.Finish) - - ctx := context.Background() - - v1 := mocks.NewMockAuthTokenFinder(ctrl) - v1.EXPECT(). - FindAuthorizationByToken(ctx, username). - Return(auth, nil) - - pw := mocks.NewMockPasswordComparer(ctrl) - pw.EXPECT(). - ComparePassword(ctx, authID, token). - Return(nil) - - user := *user - user.Status = influxdb.Inactive - - uf := mocks.NewMockUserFinder(ctrl) - uf.EXPECT(). - FindUserByID(ctx, userID). - Return(&user, nil) - - authz := Authorizer{ - AuthV1: v1, - Comparer: pw, - User: uf, - } - - cred := influxdb.CredentialsV1{ - Scheme: influxdb.SchemeV1Basic, - Username: username, - Token: token, - } - - gotAuth, gotErr := authz.Authorize(ctx, cred) - assert.Nil(t, gotAuth) - assert.EqualError(t, gotErr, expAuthErr) - }) - - t.Run("inactive token returns error", func(t *testing.T) { - ctrl := gomock.NewController(t) - t.Cleanup(ctrl.Finish) - - ctx := context.Background() - - auth := *auth - auth.Status = influxdb.Inactive - - v1 := mocks.NewMockAuthTokenFinder(ctrl) - v1.EXPECT(). - FindAuthorizationByToken(ctx, username). - Return(&auth, nil) - - pw := mocks.NewMockPasswordComparer(ctrl) - pw.EXPECT(). - ComparePassword(ctx, authID, token). - Return(nil) - - authz := Authorizer{ - AuthV1: v1, - Comparer: pw, - } - - cred := influxdb.CredentialsV1{ - Scheme: influxdb.SchemeV1Basic, - Username: username, - Token: token, - } - - gotAuth, gotErr := authz.Authorize(ctx, cred) - assert.Nil(t, gotAuth) - assert.EqualError(t, gotErr, expAuthErr) - }) -} diff --git a/v1/authorization/caching_password_service.go b/v1/authorization/caching_password_service.go deleted file mode 100644 index e164339ca4e..00000000000 --- a/v1/authorization/caching_password_service.go +++ /dev/null @@ -1,111 +0,0 @@ -package authorization - -import ( - "bytes" - "context" - crand "crypto/rand" - "crypto/sha256" - "io" - "sync" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// An implementation of influxdb.PasswordsService that will perform -// ComparePassword requests at a reduced cost under certain -// conditions. See ComparePassword for further information. -// -// The cache is only valid for the duration of the process. -type CachingPasswordsService struct { - inner influxdb.PasswordsService - - mu sync.RWMutex // protects concurrent access to authCache - authCache map[platform.ID]authUser -} - -func NewCachingPasswordsService(inner influxdb.PasswordsService) *CachingPasswordsService { - return &CachingPasswordsService{inner: inner, authCache: make(map[platform.ID]authUser)} -} - -var _ influxdb.PasswordsService = (*CachingPasswordsService)(nil) - -func (c *CachingPasswordsService) SetPassword(ctx context.Context, id platform.ID, password string) error { - err := c.inner.SetPassword(ctx, id, password) - if err == nil { - c.mu.Lock() - delete(c.authCache, id) - c.mu.Unlock() - } - return err -} - -// ComparePassword will attempt to perform the comparison using a lower cost hashing function -// if influxdb.ContextHasPasswordCacheOption returns true for ctx. -func (c *CachingPasswordsService) ComparePassword(ctx context.Context, id platform.ID, password string) error { - c.mu.RLock() - au, ok := c.authCache[id] - c.mu.RUnlock() - if ok { - // verify the password using the cached salt and hash - if bytes.Equal(c.hashWithSalt(au.salt, password), au.hash) { - return nil - } - - // fall through to requiring a full bcrypt hash for invalid passwords - } - - err := c.inner.ComparePassword(ctx, id, password) - if err != nil { - return err - } - - if salt, hashed, err := c.saltedHash(password); err == nil { - c.mu.Lock() - c.authCache[id] = authUser{salt: salt, hash: hashed} - c.mu.Unlock() - } - - return nil -} - -func (c *CachingPasswordsService) CompareAndSetPassword(ctx context.Context, id platform.ID, old, new string) error { - err := c.inner.CompareAndSetPassword(ctx, id, old, new) - if err == nil { - c.mu.Lock() - delete(c.authCache, id) - c.mu.Unlock() - } - return err -} - -// NOTE(sgc): This caching implementation was lifted from the 1.x source -// https://github.com/influxdata/influxdb/blob/c1e11e732e145fc1a356535ddf3dcb9fb732a22b/services/meta/client.go#L390-L406 - -const ( - // SaltBytes is the number of bytes used for salts. - SaltBytes = 32 -) - -type authUser struct { - salt []byte - hash []byte -} - -// hashWithSalt returns a salted hash of password using salt. -func (c *CachingPasswordsService) hashWithSalt(salt []byte, password string) []byte { - hasher := sha256.New() - hasher.Write(salt) - hasher.Write([]byte(password)) - return hasher.Sum(nil) -} - -// saltedHash returns a salt and salted hash of password. -func (c *CachingPasswordsService) saltedHash(password string) (salt, hash []byte, err error) { - salt = make([]byte, SaltBytes) - if _, err := io.ReadFull(crand.Reader, salt); err != nil { - return nil, nil, err - } - - return salt, c.hashWithSalt(salt, password), nil -} diff --git a/v1/authorization/caching_password_service_test.go b/v1/authorization/caching_password_service_test.go deleted file mode 100644 index e9405c2cafe..00000000000 --- a/v1/authorization/caching_password_service_test.go +++ /dev/null @@ -1,183 +0,0 @@ -package authorization - -import ( - "context" - "strings" - "testing" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/mock" - "github.com/influxdata/influxdb/v2/tenant" - "github.com/stretchr/testify/assert" -) - -func TestCachingPasswordsService(t *testing.T) { - const ( - user1 = platform.ID(1) - user2 = platform.ID(2) - ) - - makeUser := func(salt, pass string) authUser { - if len(salt) != SaltBytes { - panic("invalid salt") - } - - var ps CachingPasswordsService - return authUser{salt: []byte(salt), hash: ps.hashWithSalt([]byte(salt), pass)} - } - - var ( - userE1 = makeUser(strings.Repeat("salt---1", 4), "foo") - userE2 = makeUser(strings.Repeat("salt---2", 4), "bar") - ) - - t.Run("SetPassword deletes cached user", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - inner := mock.NewMockPasswordsService(ctrl) - inner.EXPECT(). - SetPassword(gomock.Any(), user1, "foo"). - Return(nil) - - s := NewCachingPasswordsService(inner) - s.authCache[user1] = userE1 - s.authCache[user2] = userE2 - - ctx := context.Background() - - _, ok := s.authCache[user1] - assert.True(t, ok) - assert.NoError(t, s.SetPassword(ctx, user1, "foo")) - _, ok = s.authCache[user1] - assert.False(t, ok) - _, ok = s.authCache[user2] - assert.True(t, ok) - }) - - t.Run("ComparePassword adds cached user", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - inner := mock.NewMockPasswordsService(ctrl) - inner.EXPECT(). - ComparePassword(gomock.Any(), user1, "foo"). - Return(nil) - - s := NewCachingPasswordsService(inner) - s.authCache[user2] = userE2 - - ctx := context.Background() - - assert.NoError(t, s.ComparePassword(ctx, user1, "foo")) - _, ok := s.authCache[user1] - assert.True(t, ok) - _, ok = s.authCache[user2] - assert.True(t, ok) - }) - - t.Run("ComparePassword does not add cached user when inner errors", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - inner := mock.NewMockPasswordsService(ctrl) - inner.EXPECT(). - ComparePassword(gomock.Any(), user1, "foo"). - Return(tenant.EShortPassword) - - s := NewCachingPasswordsService(inner) - s.authCache[user2] = userE2 - - ctx := context.Background() - - assert.Error(t, s.ComparePassword(ctx, user1, "foo")) - _, ok := s.authCache[user1] - assert.False(t, ok) - _, ok = s.authCache[user2] - assert.True(t, ok) - }) - - t.Run("ComparePassword uses cached password when context option set", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - inner := mock.NewMockPasswordsService(ctrl) - s := NewCachingPasswordsService(inner) - s.authCache[user1] = userE1 - s.authCache[user2] = userE2 - - ctx := context.Background() - assert.NoError(t, s.ComparePassword(ctx, user1, "foo")) - }) - - t.Run("CompareAndSetPassword deletes cached user", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - inner := mock.NewMockPasswordsService(ctrl) - inner.EXPECT(). - CompareAndSetPassword(gomock.Any(), user1, "foo", "foo2"). - Return(nil) - - s := NewCachingPasswordsService(inner) - s.authCache[user1] = userE1 - s.authCache[user2] = userE2 - - ctx := context.Background() - - assert.NoError(t, s.CompareAndSetPassword(ctx, user1, "foo", "foo2")) - _, ok := s.authCache[user1] - assert.False(t, ok) - _, ok = s.authCache[user2] - assert.True(t, ok) - }) - - // The following tests ensure the service does not change state for invalid - // requests, which may permit a certain class of attacks. - - t.Run("SetPassword does not delete cached user when inner errors", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - inner := mock.NewMockPasswordsService(ctrl) - inner.EXPECT(). - SetPassword(gomock.Any(), user1, "foo"). - Return(tenant.EShortPassword) - - s := NewCachingPasswordsService(inner) - s.authCache[user1] = userE1 - s.authCache[user2] = userE2 - - ctx := context.Background() - - _, ok := s.authCache[user1] - assert.True(t, ok) - assert.EqualError(t, s.SetPassword(ctx, user1, "foo"), tenant.EShortPassword.Error()) - _, ok = s.authCache[user1] - assert.True(t, ok) - }) - - t.Run("CompareAndSetPassword does not delete cached user when inner errors", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - inner := mock.NewMockPasswordsService(ctrl) - inner.EXPECT(). - CompareAndSetPassword(gomock.Any(), user1, "foo", "foo2"). - Return(tenant.EShortPassword) - - s := NewCachingPasswordsService(inner) - s.authCache[user1] = userE1 - s.authCache[user2] = userE2 - - ctx := context.Background() - - assert.Error(t, s.CompareAndSetPassword(ctx, user1, "foo", "foo2")) - _, ok := s.authCache[user1] - assert.True(t, ok) - _, ok = s.authCache[user2] - assert.True(t, ok) - }) - -} diff --git a/v1/authorization/error.go b/v1/authorization/error.go deleted file mode 100644 index a9a658f2745..00000000000 --- a/v1/authorization/error.go +++ /dev/null @@ -1,73 +0,0 @@ -package authorization - -import ( - "fmt" - - "github.com/influxdata/influxdb/v2/kit/platform/errors" -) - -var ( - // ErrInvalidAuthID is used when the Authorization's ID cannot be encoded - ErrInvalidAuthID = &errors.Error{ - Code: errors.EInvalid, - Msg: "authorization ID is invalid", - } - - // ErrAuthNotFound is used when the specified auth cannot be found - ErrAuthNotFound = &errors.Error{ - Code: errors.ENotFound, - Msg: "authorization not found", - } - - // NotUniqueIDError occurs when attempting to create an Authorization with an ID that already belongs to another one - NotUniqueIDError = &errors.Error{ - Code: errors.EConflict, - Msg: "ID already exists", - } - - // ErrFailureGeneratingID occurs ony when the random number generator - // cannot generate an ID in MaxIDGenerationN times. - ErrFailureGeneratingID = &errors.Error{ - Code: errors.EInternal, - Msg: "unable to generate valid id", - } - - // ErrTokenAlreadyExistsError is used when attempting to create an authorization - // with a token that already exists - ErrTokenAlreadyExistsError = &errors.Error{ - Code: errors.EConflict, - Msg: "token already exists", - } - - // ErrBucketNotFound is used when attempting to create an authorization - // with a bucket id that does not exist - ErrBucketNotFound = &errors.Error{ - Code: errors.ENotFound, - Msg: "bucket not found when creating auth", - } -) - -// ErrInvalidAuthIDError is used when a service was provided an invalid ID. -func ErrInvalidAuthIDError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "auth id provided is invalid", - Err: err, - } -} - -// ErrInternalServiceError is used when the error comes from an internal system. -func ErrInternalServiceError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Err: err, - } -} - -// UnexpectedAuthIndexError is used when the error comes from an internal system. -func UnexpectedAuthIndexError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: fmt.Sprintf("unexpected error retrieving auth index; Err: %v", err), - } -} diff --git a/v1/authorization/http_client.go b/v1/authorization/http_client.go deleted file mode 100644 index 1c8c86afca3..00000000000 --- a/v1/authorization/http_client.go +++ /dev/null @@ -1,122 +0,0 @@ -package authorization - -import ( - "context" - "errors" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/pkg/httpc" -) - -var ( - _ influxdb.AuthorizationService = (*Client)(nil) - _ PasswordService = (*Client)(nil) -) - -// Client connects to Influx via HTTP using tokens to manage authorizations -type Client struct { - Client *httpc.Client -} - -// CreateAuthorization creates a new authorization and sets b.ID with the new identifier. -func (s *Client) CreateAuthorization(ctx context.Context, a *influxdb.Authorization) error { - newAuth, err := newPostAuthorizationRequest(a) - if err != nil { - return err - } - - return s.Client. - PostJSON(newAuth, prefixAuthorization). - DecodeJSON(a). - Do(ctx) -} - -// FindAuthorizations returns a list of authorizations that match filter and the total count of matching authorizations. -// Additional options provide pagination & sorting. -func (s *Client) FindAuthorizations(ctx context.Context, filter influxdb.AuthorizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - params := influxdb.FindOptionParams(opt...) - if filter.ID != nil { - params = append(params, [2]string{"id", filter.ID.String()}) - } - if filter.Token != nil { - params = append(params, [2]string{"token", *filter.Token}) - } - if filter.UserID != nil { - params = append(params, [2]string{"userID", filter.UserID.String()}) - } - if filter.User != nil { - params = append(params, [2]string{"user", *filter.User}) - } - if filter.OrgID != nil { - params = append(params, [2]string{"orgID", filter.OrgID.String()}) - } - if filter.Org != nil { - params = append(params, [2]string{"org", *filter.Org}) - } - - var as authsResponse - err := s.Client. - Get(prefixAuthorization). - QueryParams(params...). - DecodeJSON(&as). - Do(ctx) - if err != nil { - return nil, 0, err - } - - auths := make([]*influxdb.Authorization, 0, len(as.Auths)) - for _, a := range as.Auths { - auths = append(auths, a.toInfluxdb()) - } - - return auths, len(auths), nil -} - -// FindAuthorizationByToken is not supported by the HTTP authorization service. -func (s *Client) FindAuthorizationByToken(ctx context.Context, token string) (*influxdb.Authorization, error) { - return nil, errors.New("not supported in HTTP authorization service") -} - -// FindAuthorizationByID finds a single Authorization by its ID against a remote influx server. -func (s *Client) FindAuthorizationByID(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { - var b influxdb.Authorization - err := s.Client. - Get(prefixAuthorization, id.String()). - DecodeJSON(&b). - Do(ctx) - if err != nil { - return nil, err - } - return &b, nil -} - -// UpdateAuthorization updates the status and description if available. -func (s *Client) UpdateAuthorization(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { - var res authResponse - err := s.Client. - PatchJSON(upd, prefixAuthorization, id.String()). - DecodeJSON(&res). - Do(ctx) - if err != nil { - return nil, err - } - - return res.toInfluxdb(), nil -} - -// DeleteAuthorization removes a authorization by id. -func (s *Client) DeleteAuthorization(ctx context.Context, id platform.ID) error { - return s.Client. - Delete(prefixAuthorization, id.String()). - Do(ctx) -} - -// SetPassword sets the password for the authorization token id. -func (s *Client) SetPassword(ctx context.Context, id platform.ID, password string) error { - return s.Client. - PostJSON(passwordSetRequest{ - Password: password, - }, prefixAuthorization, id.String(), "password"). - Do(ctx) -} diff --git a/v1/authorization/http_server.go b/v1/authorization/http_server.go deleted file mode 100644 index 41996f3cfc5..00000000000 --- a/v1/authorization/http_server.go +++ /dev/null @@ -1,663 +0,0 @@ -package authorization - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "time" - - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" - "github.com/influxdata/influxdb/v2" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - kithttp "github.com/influxdata/influxdb/v2/kit/transport/http" - "go.uber.org/zap" -) - -// TenantService is used to look up the Organization and User for an Authorization -type TenantService interface { - FindOrganizationByID(ctx context.Context, id platform.ID) (*influxdb.Organization, error) - FindOrganization(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) - FindUserByID(ctx context.Context, id platform.ID) (*influxdb.User, error) - FindUser(ctx context.Context, filter influxdb.UserFilter) (*influxdb.User, error) - FindBucketByID(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) -} - -type PasswordService interface { - SetPassword(ctx context.Context, id platform.ID, password string) error -} - -type AuthHandler struct { - chi.Router - api *kithttp.API - log *zap.Logger - authSvc influxdb.AuthorizationService - passwordSvc PasswordService - tenantService TenantService -} - -// NewHTTPAuthHandler constructs a new http server. -func NewHTTPAuthHandler(log *zap.Logger, authService influxdb.AuthorizationService, passwordService PasswordService, tenantService TenantService) *AuthHandler { - h := &AuthHandler{ - api: kithttp.NewAPI(kithttp.WithLog(log)), - log: log, - authSvc: authService, - passwordSvc: passwordService, - tenantService: tenantService, - } - - r := chi.NewRouter() - r.Use( - middleware.Recoverer, - middleware.RequestID, - middleware.RealIP, - ) - - r.Route("/", func(r chi.Router) { - r.Post("/", h.handlePostAuthorization) - r.Get("/", h.handleGetAuthorizations) - - r.Route("/{id}", func(r chi.Router) { - r.Get("/", h.handleGetAuthorization) - r.Patch("/", h.handleUpdateAuthorization) - r.Delete("/", h.handleDeleteAuthorization) - r.Post("/password", h.handlePostUserPassword) - }) - }) - - h.Router = r - return h -} - -const prefixAuthorization = "/private/legacy/authorizations" - -func (h *AuthHandler) Prefix() string { - return prefixAuthorization -} - -// handlePostAuthorization is the HTTP handler for the POST prefixAuthorization route. -func (h *AuthHandler) handlePostAuthorization(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - a, err := decodePostAuthorizationRequest(ctx, r) - if err != nil { - h.api.Err(w, r, err) - return - } - - user, err := getAuthorizedUser(r, h.tenantService) - if err != nil { - h.api.Err(w, r, influxdb.ErrUnableToCreateToken) - return - } - - userID := user.ID - if a.UserID != nil && a.UserID.Valid() { - userID = *a.UserID - } - - auth := a.toInfluxdb(userID) - - if err := h.authSvc.CreateAuthorization(ctx, auth); err != nil { - h.api.Err(w, r, err) - return - } - - perms, err := h.newPermissionsResponse(ctx, auth.Permissions) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.log.Debug("Auth created ", zap.String("auth", fmt.Sprint(auth))) - - resp, err := h.newAuthResponse(ctx, auth, perms) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusCreated, resp) -} - -func getAuthorizedUser(r *http.Request, ts TenantService) (*influxdb.User, error) { - ctx := r.Context() - - a, err := icontext.GetAuthorizer(ctx) - if err != nil { - return nil, err - } - - return ts.FindUserByID(ctx, a.GetUserID()) -} - -type postAuthorizationRequest struct { - Token string `json:"token"` - Status influxdb.Status `json:"status"` - OrgID platform.ID `json:"orgID"` - UserID *platform.ID `json:"userID,omitempty"` - Description string `json:"description"` - Permissions []influxdb.Permission `json:"permissions"` -} - -type authResponse struct { - ID platform.ID `json:"id"` - Token string `json:"token"` - Status influxdb.Status `json:"status"` - Description string `json:"description"` - OrgID platform.ID `json:"orgID"` - Org string `json:"org"` - UserID platform.ID `json:"userID"` - User string `json:"user"` - Permissions []permissionResponse `json:"permissions"` - Links map[string]string `json:"links"` - CreatedAt time.Time `json:"createdAt"` - UpdatedAt time.Time `json:"updatedAt"` -} - -// In the future, we would like only the service layer to look up the user and org to see if they are valid -// but for now we need to look up the User and Org here because the API expects the response -// to have the names of the Org and User -func (h *AuthHandler) newAuthResponse(ctx context.Context, a *influxdb.Authorization, ps []permissionResponse) (*authResponse, error) { - org, err := h.tenantService.FindOrganizationByID(ctx, a.OrgID) - if err != nil { - h.log.Info("Failed to get org", zap.String("handler", "getAuthorizations"), zap.String("orgID", a.OrgID.String()), zap.Error(err)) - return nil, err - } - user, err := h.tenantService.FindUserByID(ctx, a.UserID) - if err != nil { - h.log.Info("Failed to get user", zap.String("userID", a.UserID.String()), zap.Error(err)) - return nil, err - } - res := &authResponse{ - ID: a.ID, - Token: a.Token, - Status: a.Status, - Description: a.Description, - OrgID: a.OrgID, - UserID: a.UserID, - User: user.Name, - Org: org.Name, - Permissions: ps, - Links: map[string]string{ - "self": fmt.Sprintf(prefixAuthorization+"/%s", a.ID), - "user": fmt.Sprintf("/api/v2/users/%s", a.UserID), - }, - CreatedAt: a.CreatedAt, - UpdatedAt: a.UpdatedAt, - } - - return res, nil -} - -func (p *postAuthorizationRequest) toInfluxdb(userID platform.ID) *influxdb.Authorization { - t := &influxdb.Authorization{ - OrgID: p.OrgID, - Token: p.Token, - Status: p.Status, - Description: p.Description, - Permissions: p.Permissions, - UserID: userID, - } - - return t -} - -func (a *authResponse) toInfluxdb() *influxdb.Authorization { - res := &influxdb.Authorization{ - ID: a.ID, - Token: a.Token, - Status: a.Status, - Description: a.Description, - OrgID: a.OrgID, - UserID: a.UserID, - CRUDLog: influxdb.CRUDLog{ - CreatedAt: a.CreatedAt, - UpdatedAt: a.UpdatedAt, - }, - } - for _, p := range a.Permissions { - res.Permissions = append(res.Permissions, influxdb.Permission{Action: p.Action, Resource: p.Resource.Resource}) - } - return res -} - -type authsResponse struct { - Links map[string]string `json:"links"` - Auths []*authResponse `json:"authorizations"` -} - -func newAuthsResponse(as []*authResponse) *authsResponse { - return &authsResponse{ - // TODO(desa): update links to include paging and filter information - Links: map[string]string{ - "self": prefixAuthorization, - }, - Auths: as, - } -} - -func newPostAuthorizationRequest(a *influxdb.Authorization) (*postAuthorizationRequest, error) { - res := &postAuthorizationRequest{ - OrgID: a.OrgID, - Description: a.Description, - Permissions: a.Permissions, - Token: a.Token, - Status: a.Status, - } - - if a.UserID.Valid() { - res.UserID = &a.UserID - } - - res.SetDefaults() - - return res, res.Validate() -} - -func (p *postAuthorizationRequest) SetDefaults() { - if p.Status == "" { - p.Status = influxdb.Active - } -} - -func (p *postAuthorizationRequest) Validate() error { - if len(p.Permissions) == 0 { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "authorization must include permissions", - } - } - - for _, perm := range p.Permissions { - if err := perm.Valid(); err != nil { - return &errors.Error{ - Err: err, - } - } - } - - if !p.OrgID.Valid() { - return &errors.Error{ - Err: platform.ErrInvalidID, - Code: errors.EInvalid, - Msg: "org id required", - } - } - - if p.Status == "" { - p.Status = influxdb.Active - } - - if err := p.Status.Valid(); err != nil { - return err - } - - if p.Token == "" { - return &errors.Error{ - Msg: "token required for v1 user authorization type", - Code: errors.EInvalid, - } - } - - return nil -} - -type permissionResponse struct { - Action influxdb.Action `json:"action"` - Resource resourceResponse `json:"resource"` -} - -type resourceResponse struct { - influxdb.Resource - Name string `json:"name,omitempty"` - Organization string `json:"org,omitempty"` -} - -func (h *AuthHandler) newPermissionsResponse(ctx context.Context, ps []influxdb.Permission) ([]permissionResponse, error) { - res := make([]permissionResponse, len(ps)) - for i, p := range ps { - res[i] = permissionResponse{ - Action: p.Action, - Resource: resourceResponse{ - Resource: p.Resource, - }, - } - - if p.Resource.ID != nil { - name, err := h.getNameForResource(ctx, p.Resource.Type, *p.Resource.ID) - if errors.ErrorCode(err) == errors.ENotFound { - continue - } - if err != nil { - return nil, err - } - res[i].Resource.Name = name - } - - if p.Resource.OrgID != nil { - name, err := h.getNameForResource(ctx, influxdb.OrgsResourceType, *p.Resource.OrgID) - if errors.ErrorCode(err) == errors.ENotFound { - continue - } - if err != nil { - return nil, err - } - res[i].Resource.Organization = name - } - } - return res, nil -} - -func (h *AuthHandler) getNameForResource(ctx context.Context, resource influxdb.ResourceType, id platform.ID) (string, error) { - if err := resource.Valid(); err != nil { - return "", err - } - - if ok := id.Valid(); !ok { - return "", platform.ErrInvalidID - } - - switch resource { - case influxdb.BucketsResourceType: - r, err := h.tenantService.FindBucketByID(ctx, id) - if err != nil { - return "", err - } - return r.Name, nil - case influxdb.OrgsResourceType: - r, err := h.tenantService.FindOrganizationByID(ctx, id) - if err != nil { - return "", err - } - return r.Name, nil - case influxdb.UsersResourceType: - r, err := h.tenantService.FindUserByID(ctx, id) - if err != nil { - return "", err - } - return r.Name, nil - } - - return "", nil -} - -func decodePostAuthorizationRequest(ctx context.Context, r *http.Request) (*postAuthorizationRequest, error) { - a := &postAuthorizationRequest{} - if err := json.NewDecoder(r.Body).Decode(a); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid json structure", - Err: err, - } - } - - a.SetDefaults() - - return a, a.Validate() -} - -// handleGetAuthorizations is the HTTP handler for the GET prefixAuthorization route. -func (h *AuthHandler) handleGetAuthorizations(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeGetAuthorizationsRequest(ctx, r) - if err != nil { - h.log.Info("Failed to decode request", zap.String("handler", "getAuthorizations"), zap.Error(err)) - h.api.Err(w, r, err) - return - } - - opts := influxdb.FindOptions{} - as, _, err := h.authSvc.FindAuthorizations(ctx, req.filter, opts) - - if err != nil { - h.api.Err(w, r, err) - return - } - - f := req.filter - // If the user or org name was provided, look up the ID first - if f.User != nil { - u, err := h.tenantService.FindUser(ctx, influxdb.UserFilter{Name: f.User}) - if err != nil { - h.api.Err(w, r, err) - return - } - f.UserID = &u.ID - } - - if f.Org != nil { - o, err := h.tenantService.FindOrganization(ctx, influxdb.OrganizationFilter{Name: f.Org}) - if err != nil { - h.api.Err(w, r, err) - return - } - f.OrgID = &o.ID - } - - auths := make([]*authResponse, 0, len(as)) - for _, a := range as { - ps, err := h.newPermissionsResponse(ctx, a.Permissions) - if err != nil { - h.api.Err(w, r, err) - return - } - - resp, err := h.newAuthResponse(ctx, a, ps) - if err != nil { - h.log.Info("Failed to create auth response", zap.String("handler", "getAuthorizations")) - continue - } - auths = append(auths, resp) - } - - h.log.Debug("Auths retrieved ", zap.String("auths", fmt.Sprint(auths))) - - h.api.Respond(w, r, http.StatusOK, newAuthsResponse(auths)) -} - -type getAuthorizationsRequest struct { - filter influxdb.AuthorizationFilter -} - -func decodeGetAuthorizationsRequest(ctx context.Context, r *http.Request) (*getAuthorizationsRequest, error) { - qp := r.URL.Query() - - req := &getAuthorizationsRequest{} - - userID := qp.Get("userID") - if userID != "" { - id, err := platform.IDFromString(userID) - if err != nil { - return nil, err - } - req.filter.UserID = id - } - - user := qp.Get("user") - if user != "" { - req.filter.User = &user - } - - orgID := qp.Get("orgID") - if orgID != "" { - id, err := platform.IDFromString(orgID) - if err != nil { - return nil, err - } - req.filter.OrgID = id - } - - org := qp.Get("org") - if org != "" { - req.filter.Org = &org - } - - authID := qp.Get("id") - if authID != "" { - id, err := platform.IDFromString(authID) - if err != nil { - return nil, err - } - req.filter.ID = id - } - - token := qp.Get("token") - if token != "" { - req.filter.Token = &token - } - - return req, nil -} - -func (h *AuthHandler) handleGetAuthorization(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.log.Info("Failed to decode request", zap.String("handler", "getAuthorization"), zap.Error(err)) - h.api.Err(w, r, err) - return - } - - a, err := h.authSvc.FindAuthorizationByID(ctx, *id) - if err != nil { - // Don't log here, it should already be handled by the service - h.api.Err(w, r, err) - return - } - - ps, err := h.newPermissionsResponse(ctx, a.Permissions) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.log.Debug("Auth retrieved ", zap.String("auth", fmt.Sprint(a))) - - resp, err := h.newAuthResponse(ctx, a, ps) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, resp) -} - -// handleUpdateAuthorization is the HTTP handler for the PATCH /api/v2/authorizations/:id route that updates the authorization's status and desc. -func (h *AuthHandler) handleUpdateAuthorization(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - req, err := decodeUpdateAuthorizationRequest(ctx, r) - if err != nil { - h.log.Info("Failed to decode request", zap.String("handler", "updateAuthorization"), zap.Error(err)) - h.api.Err(w, r, err) - return - } - - a, err := h.authSvc.FindAuthorizationByID(ctx, req.ID) - if err != nil { - h.api.Err(w, r, err) - return - } - - a, err = h.authSvc.UpdateAuthorization(ctx, a.ID, req.AuthorizationUpdate) - if err != nil { - h.api.Err(w, r, err) - return - } - - ps, err := h.newPermissionsResponse(ctx, a.Permissions) - if err != nil { - h.api.Err(w, r, err) - return - } - h.log.Debug("Auth updated", zap.String("auth", fmt.Sprint(a))) - - resp, err := h.newAuthResponse(ctx, a, ps) - if err != nil { - h.api.Err(w, r, err) - return - } - - h.api.Respond(w, r, http.StatusOK, resp) -} - -type updateAuthorizationRequest struct { - ID platform.ID - *influxdb.AuthorizationUpdate -} - -func decodeUpdateAuthorizationRequest(ctx context.Context, r *http.Request) (*updateAuthorizationRequest, error) { - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - return nil, err - } - - upd := &influxdb.AuthorizationUpdate{} - if err := json.NewDecoder(r.Body).Decode(upd); err != nil { - return nil, err - } - - return &updateAuthorizationRequest{ - ID: *id, - AuthorizationUpdate: upd, - }, nil -} - -// handleDeleteAuthorization is the HTTP handler for the DELETE prefixAuthorization/:id route. -func (h *AuthHandler) handleDeleteAuthorization(w http.ResponseWriter, r *http.Request) { - id, err := platform.IDFromString(chi.URLParam(r, "id")) - if err != nil { - h.log.Info("Failed to decode request", zap.String("handler", "deleteAuthorization"), zap.Error(err)) - h.api.Err(w, r, err) - return - } - - if err := h.authSvc.DeleteAuthorization(r.Context(), *id); err != nil { - // Don't log here, it should already be handled by the service - h.api.Err(w, r, err) - return - } - - h.log.Debug("Auth deleted", zap.String("authID", fmt.Sprint(id))) - - w.WriteHeader(http.StatusNoContent) -} - -// password APIs - -type passwordSetRequest struct { - Password string `json:"password"` -} - -// handlePutPassword is the HTTP handler for the PUT /private/legacy/authorizations/:id/password -func (h *AuthHandler) handlePostUserPassword(w http.ResponseWriter, r *http.Request) { - var body passwordSetRequest - err := json.NewDecoder(r.Body).Decode(&body) - if err != nil { - h.api.Err(w, r, &errors.Error{ - Code: errors.EInvalid, - Err: err, - }) - return - } - - param := chi.URLParam(r, "id") - authID, err := platform.IDFromString(param) - if err != nil { - h.api.Err(w, r, &errors.Error{ - Msg: "invalid authorization ID provided in route", - }) - return - } - - err = h.passwordSvc.SetPassword(r.Context(), *authID, body.Password) - if err != nil { - h.api.Err(w, r, err) - return - } - - w.WriteHeader(http.StatusNoContent) -} diff --git a/v1/authorization/http_server_test.go b/v1/authorization/http_server_test.go deleted file mode 100644 index 5ea847ed82d..00000000000 --- a/v1/authorization/http_server_test.go +++ /dev/null @@ -1,947 +0,0 @@ -package authorization - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "sort" - "testing" - - "github.com/go-chi/chi" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/influxdata/httprouter" - "github.com/influxdata/influxdb/v2" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/mock" - itesting "github.com/influxdata/influxdb/v2/testing" - "go.uber.org/zap/zaptest" -) - -func TestService_handlePostAuthorization(t *testing.T) { - type fields struct { - AuthorizationService influxdb.AuthorizationService - TenantService TenantService - } - type args struct { - session *influxdb.Authorization - authorization *influxdb.Authorization - } - type wants struct { - statusCode int - contentType string - body string - bodyErr string - } - - var ( - defaultFields = fields{ - AuthorizationService: &mock.AuthorizationService{ - CreateAuthorizationFn: func(ctx context.Context, c *influxdb.Authorization) error { - c.ID = itesting.MustIDBase16("020f755c3c082000") - return nil - }, - }, - TenantService: &tenantService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ - ID: id, - Name: "u1", - }, nil - }, - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: id, - Name: "o1", - }, nil - }, - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: id, - Name: "b1", - }, nil - }, - }, - } - - defaultSession = &influxdb.Authorization{ - Token: "session-token", - ID: itesting.MustIDBase16("020f755c3c082000"), - UserID: itesting.MustIDBase16("aaaaaaaaaaaaaaaa"), - OrgID: itesting.MustIDBase16("020f755c3c083000"), - Description: "can write to authorization resource", - Permissions: []influxdb.Permission{ - { - Action: influxdb.WriteAction, - Resource: influxdb.Resource{ - Type: influxdb.AuthorizationsResourceType, - OrgID: itesting.IDPtr(itesting.MustIDBase16("020f755c3c083000")), - }, - }, - }, - } - ) - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "create an authorization", - fields: defaultFields, - args: args{ - session: defaultSession, - authorization: &influxdb.Authorization{ - ID: itesting.MustIDBase16("020f755c3c082000"), - OrgID: itesting.MustIDBase16("020f755c3c083000"), - Token: "first:second", - Description: "only read dashboards sucka", - Permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - OrgID: itesting.IDPtr(itesting.MustIDBase16("020f755c3c083000")), - }, - }, - }, - }, - }, - wants: wants{ - statusCode: http.StatusCreated, - contentType: "application/json; charset=utf-8", - body: ` -{ - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z", - "description": "only read dashboards sucka", - "id": "020f755c3c082000", - "links": { - "self": "/api/v2/authorizations/020f755c3c082000", - "user": "/api/v2/users/aaaaaaaaaaaaaaaa" - }, - "org": "o1", - "orgID": "020f755c3c083000", - "permissions": [ - { - "action": "read", - "resource": { - "type": "dashboards", - "orgID": "020f755c3c083000", - "org": "o1" - } - } - ], - "status": "active", - "authorizationType": "v1_user", - "user": "u1", - "userID": "aaaaaaaaaaaaaaaa" -} -`, - }, - }, - { - name: "create an authorization with missing token", - fields: defaultFields, - args: args{ - session: defaultSession, - authorization: &influxdb.Authorization{ - ID: itesting.MustIDBase16("020f755c3c082000"), - OrgID: itesting.MustIDBase16("020f755c3c083000"), - Description: "only read dashboards sucka", - Permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.DashboardsResourceType, - OrgID: itesting.IDPtr(itesting.MustIDBase16("020f755c3c083000")), - }, - }, - }, - }, - }, - wants: wants{ - statusCode: http.StatusBadRequest, - contentType: "application/json; charset=utf-8", - bodyErr: ` -{ - "code": "invalid", - "message": "token required for v1 user authorization type" -} -`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Helper() - - s := itesting.NewTestInmemStore(t) - storage, err := NewStore(s) - if err != nil { - t.Fatal(err) - } - - svc := NewService(storage, tt.fields.TenantService) - - handler := NewHTTPAuthHandler(zaptest.NewLogger(t), svc, nil, tt.fields.TenantService) - router := chi.NewRouter() - router.Mount(handler.Prefix(), handler) - - newRequest := func(a *influxdb.Authorization) *postAuthorizationRequest { - return &postAuthorizationRequest{ - OrgID: a.OrgID, - Description: a.Description, - Permissions: a.Permissions, - Status: a.Status, - Token: a.Token, - } - } - - req := newRequest(tt.args.authorization) - b, err := json.Marshal(req) - if err != nil { - t.Fatalf("failed to unmarshal authorization: %v", err) - } - - r := httptest.NewRequest("GET", "http://any.url", bytes.NewReader(b)) - r = r.WithContext(context.WithValue( - context.Background(), - httprouter.ParamsKey, - httprouter.Params{ - { - Key: "userID", - Value: fmt.Sprintf("%d", tt.args.session.UserID), - }, - })) - - w := httptest.NewRecorder() - - ctx := icontext.SetAuthorizer(context.Background(), tt.args.session) - r = r.WithContext(ctx) - - handler.handlePostAuthorization(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Logf("headers: %v body: %s", res.Header, body) - t.Errorf("%q. handlePostAuthorization() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handlePostAuthorization() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if tt.wants.body != "" { - if diff, err := jsonDiff(string(body), tt.wants.body); diff != "" { - t.Errorf("%q. handlePostAuthorization() = ***%s***", tt.name, diff) - } else if err != nil { - t.Errorf("%q, handlePostAuthorization() error: %v", tt.name, err) - } - } - if tt.wants.bodyErr != "" { - if diff, err := jsonDiffErr(string(body), tt.wants.bodyErr); diff != "" { - t.Errorf("%q. handlePostAuthorization() = ***%s***", tt.name, diff) - } else if err != nil { - t.Errorf("%q, handlePostAuthorization() error: %v", tt.name, err) - } - } - - }) - } -} - -func TestService_handleGetAuthorization(t *testing.T) { - type fields struct { - AuthorizationService influxdb.AuthorizationService - TenantService TenantService - } - type args struct { - id string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get a authorization by id", - fields: fields{ - AuthorizationService: &mock.AuthorizationService{ - FindAuthorizationByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { - if id == itesting.MustIDBase16("020f755c3c082000") { - return &influxdb.Authorization{ - ID: itesting.MustIDBase16("020f755c3c082000"), - UserID: itesting.MustIDBase16("020f755c3c082000"), - OrgID: itesting.MustIDBase16("020f755c3c083000"), - Permissions: []influxdb.Permission{ - { - Action: influxdb.ReadAction, - Resource: influxdb.Resource{ - Type: influxdb.BucketsResourceType, - OrgID: itesting.IDPtr(itesting.MustIDBase16("020f755c3c083000")), - ID: func() *platform.ID { - id := itesting.MustIDBase16("020f755c3c084000") - return &id - }(), - }, - }, - }, - Token: "hello", - }, nil - } - - return nil, fmt.Errorf("not found") - }, - }, - TenantService: &tenantService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ - ID: id, - Name: "u1", - }, nil - }, - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: id, - Name: "o1", - }, nil - }, - FindBucketByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - return &influxdb.Bucket{ - ID: id, - Name: "b1", - }, nil - }, - }, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z", - "description": "", - "id": "020f755c3c082000", - "links": { - "self": "/api/v2/authorizations/020f755c3c082000", - "user": "/api/v2/users/020f755c3c082000" - }, - "org": "o1", - "orgID": "020f755c3c083000", - "permissions": [ - { - "action": "read", - "resource": { - "type": "buckets", - "orgID": "020f755c3c083000", - "id": "020f755c3c084000", - "name": "b1", - "org": "o1" - } - } - ], - "status": "", - "authorizationType": "plain", - "token": "hello", - "user": "u1", - "userID": "020f755c3c082000" -} -`, - }, - }, - { - name: "not found", - fields: fields{ - AuthorizationService: &mock.AuthorizationService{ - FindAuthorizationByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: "authorization not found", - } - }, - }, - TenantService: &tenantService{}, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNotFound, - body: `{"code":"not found","message":"authorization not found"}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Helper() - - handler := NewHTTPAuthHandler(zaptest.NewLogger(t), tt.fields.AuthorizationService, nil, tt.fields.TenantService) - router := chi.NewRouter() - router.Mount(handler.Prefix(), handler) - - w := httptest.NewRecorder() - - r := httptest.NewRequest("GET", "http://any.url", nil) - rctx := chi.NewRouteContext() - rctx.URLParams.Add("id", tt.args.id) - r = r.WithContext(context.WithValue(r.Context(), chi.RouteCtxKey, rctx)) - - handler.handleGetAuthorization(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Logf("headers: %v body: %s", res.Header, body) - t.Errorf("%q. handleGetAuthorization() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetAuthorization() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if diff, err := jsonDiff(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleGetAuthorization. error unmarshaling json %v", tt.name, err) - } else if tt.wants.body != "" && diff != "" { - t.Errorf("%q. handleGetAuthorization() = -got/+want %s**", tt.name, diff) - } - }) - } -} - -func TestService_handleGetAuthorizations(t *testing.T) { - type fields struct { - AuthorizationService influxdb.AuthorizationService - TenantService TenantService - } - - type args struct { - queryParams map[string][]string - } - - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "get all authorizations", - fields: fields{ - &mock.AuthorizationService{ - FindAuthorizationsFn: func(ctx context.Context, filter influxdb.AuthorizationFilter, opts ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - return []*influxdb.Authorization{ - { - ID: itesting.MustIDBase16("0d0a657820696e74"), - Token: "hello", - UserID: itesting.MustIDBase16("2070616e656d2076"), - OrgID: itesting.MustIDBase16("3070616e656d2076"), - Description: "t1", - Permissions: influxdb.OperPermissions(), - }, - { - ID: itesting.MustIDBase16("6669646573207375"), - Token: "example", - UserID: itesting.MustIDBase16("6c7574652c206f6e"), - OrgID: itesting.MustIDBase16("9d70616e656d2076"), - Description: "t2", - Permissions: influxdb.OperPermissions(), - }, - }, 2, nil - }, - }, - &tenantService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ - ID: id, - Name: id.String(), - }, nil - }, - - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: id, - Name: id.String(), - }, nil - }, - }, - }, - args: args{}, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: fmt.Sprintf(` -{ - "links": { - "self": "/api/v2/authorizations" - }, - "authorizations": [ - { - "links": { - "user": "/api/v2/users/2070616e656d2076", - "self": "/api/v2/authorizations/0d0a657820696e74" - }, - "id": "0d0a657820696e74", - "userID": "2070616e656d2076", - "user": "2070616e656d2076", - "org": "3070616e656d2076", - "orgID": "3070616e656d2076", - "status": "", - "token": "hello", - "description": "t1", - "permissions": %s, - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z" - }, - { - "links": { - "user": "/api/v2/users/6c7574652c206f6e", - "self": "/api/v2/authorizations/6669646573207375" - }, - "id": "6669646573207375", - "userID": "6c7574652c206f6e", - "user": "6c7574652c206f6e", - "org": "9d70616e656d2076", - "orgID": "9d70616e656d2076", - "status": "", - "token": "example", - "description": "t2", - "permissions": %s, - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z" - } - ] -} -`, - MustMarshal(influxdb.OperPermissions()), - MustMarshal(influxdb.OperPermissions())), - }, - }, - { - name: "skip authorizations with no org", - fields: fields{ - &mock.AuthorizationService{ - FindAuthorizationsFn: func(ctx context.Context, filter influxdb.AuthorizationFilter, opts ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - return []*influxdb.Authorization{ - { - ID: itesting.MustIDBase16("0d0a657820696e74"), - Token: "hello", - UserID: itesting.MustIDBase16("2070616e656d2076"), - OrgID: itesting.MustIDBase16("3070616e656d2076"), - Description: "t1", - Permissions: influxdb.OperPermissions(), - }, - { - ID: itesting.MustIDBase16("6669646573207375"), - Token: "example", - UserID: itesting.MustIDBase16("6c7574652c206f6e"), - OrgID: itesting.MustIDBase16("9d70616e656d2076"), - Description: "t2", - Permissions: influxdb.OperPermissions(), - }, - }, 2, nil - }, - }, - &tenantService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - if id.String() == "2070616e656d2076" { - return &influxdb.User{ - ID: id, - Name: id.String(), - }, nil - } - return nil, &errors.Error{} - }, - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return &influxdb.Organization{ - ID: id, - Name: id.String(), - }, nil - }, - }, - }, - args: args{}, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: fmt.Sprintf(` -{ - "links": { - "self": "/api/v2/authorizations" - }, - "authorizations": [ - { - "links": { - "user": "/api/v2/users/2070616e656d2076", - "self": "/api/v2/authorizations/0d0a657820696e74" - }, - "id": "0d0a657820696e74", - "userID": "2070616e656d2076", - "user": "2070616e656d2076", - "org": "3070616e656d2076", - "orgID": "3070616e656d2076", - "status": "", - "token": "hello", - "description": "t1", - "permissions": %s, - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z" - } - ] -} -`, - MustMarshal(influxdb.OperPermissions())), - }, - }, - { - name: "skip authorizations with no user", - fields: fields{ - &mock.AuthorizationService{ - FindAuthorizationsFn: func(ctx context.Context, filter influxdb.AuthorizationFilter, opts ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - return []*influxdb.Authorization{ - { - ID: itesting.MustIDBase16("0d0a657820696e74"), - Token: "hello", - UserID: itesting.MustIDBase16("2070616e656d2076"), - OrgID: itesting.MustIDBase16("3070616e656d2076"), - Description: "t1", - Permissions: influxdb.OperPermissions(), - }, - { - ID: itesting.MustIDBase16("6669646573207375"), - Token: "example", - UserID: itesting.MustIDBase16("6c7574652c206f6e"), - OrgID: itesting.MustIDBase16("9d70616e656d2076"), - Description: "t2", - Permissions: influxdb.OperPermissions(), - }, - }, 2, nil - }, - }, - &tenantService{ - FindUserByIDFn: func(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return &influxdb.User{ - ID: id, - Name: id.String(), - }, nil - }, - FindOrganizationByIDF: func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - if id.String() == "3070616e656d2076" { - return &influxdb.Organization{ - ID: id, - Name: id.String(), - }, nil - } - return nil, &errors.Error{} - }, - }, - }, - args: args{}, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: fmt.Sprintf(` -{ - "links": { - "self": "/api/v2/authorizations" - }, - "authorizations": [ - { - "links": { - "user": "/api/v2/users/2070616e656d2076", - "self": "/api/v2/authorizations/0d0a657820696e74" - }, - "id": "0d0a657820696e74", - "userID": "2070616e656d2076", - "user": "2070616e656d2076", - "org": "3070616e656d2076", - "orgID": "3070616e656d2076", - "status": "", - "token": "hello", - "description": "t1", - "permissions": %s, - "createdAt": "0001-01-01T00:00:00Z", - "updatedAt": "0001-01-01T00:00:00Z" - } - ] -} -`, - MustMarshal(influxdb.OperPermissions())), - }, - }, - { - name: "get all authorizations when there are none", - fields: fields{ - AuthorizationService: &mock.AuthorizationService{ - FindAuthorizationsFn: func(ctx context.Context, filter influxdb.AuthorizationFilter, opts ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - return []*influxdb.Authorization{}, 0, nil - }, - }, - }, - args: args{}, - wants: wants{ - statusCode: http.StatusOK, - contentType: "application/json; charset=utf-8", - body: ` -{ - "links": { - "self": "/api/v2/authorizations" - }, - "authorizations": [] -}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Helper() - - s := itesting.NewTestInmemStore(t) - storage, err := NewStore(s) - if err != nil { - t.Fatal(err) - } - - svc := NewService(storage, tt.fields.TenantService) - - handler := NewHTTPAuthHandler(zaptest.NewLogger(t), svc, nil, tt.fields.TenantService) - router := chi.NewRouter() - router.Mount(handler.Prefix(), handler) - - r := httptest.NewRequest("GET", "http://any.url", nil) - - qp := r.URL.Query() - for k, vs := range tt.args.queryParams { - for _, v := range vs { - qp.Add(k, v) - } - } - r.URL.RawQuery = qp.Encode() - - w := httptest.NewRecorder() - - handler.handleGetAuthorizations(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleGetAuthorizations() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleGetAuthorizations() = %v, want %v", tt.name, content, tt.wants.contentType) - } - if diff, err := jsonDiff(string(body), tt.wants.body); diff != "" { - t.Errorf("%q. handleGetAuthorizations() = ***%s***", tt.name, diff) - } else if err != nil { - t.Errorf("%q, handleGetAuthorizations() error: %v", tt.name, err) - } - - }) - } -} - -func TestService_handleDeleteAuthorization(t *testing.T) { - type fields struct { - AuthorizationService influxdb.AuthorizationService - TenantService TenantService - } - type args struct { - id string - } - type wants struct { - statusCode int - contentType string - body string - } - - tests := []struct { - name string - fields fields - args args - wants wants - }{ - { - name: "remove a authorization by id", - fields: fields{ - &mock.AuthorizationService{ - DeleteAuthorizationFn: func(ctx context.Context, id platform.ID) error { - if id == itesting.MustIDBase16("020f755c3c082000") { - return nil - } - - return fmt.Errorf("wrong id") - }, - }, - &tenantService{}, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNoContent, - }, - }, - { - name: "authorization not found", - fields: fields{ - &mock.AuthorizationService{ - DeleteAuthorizationFn: func(ctx context.Context, id platform.ID) error { - return &errors.Error{ - Code: errors.ENotFound, - Msg: "authorization not found", - } - }, - }, - &tenantService{}, - }, - args: args{ - id: "020f755c3c082000", - }, - wants: wants{ - statusCode: http.StatusNotFound, - body: `{"code":"not found","message":"authorization not found"}`, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Helper() - - handler := NewHTTPAuthHandler(zaptest.NewLogger(t), tt.fields.AuthorizationService, nil, tt.fields.TenantService) - router := chi.NewRouter() - router.Mount(handler.Prefix(), handler) - - w := httptest.NewRecorder() - - r := httptest.NewRequest("GET", "http://any.url", nil) - rctx := chi.NewRouteContext() - rctx.URLParams.Add("id", tt.args.id) - r = r.WithContext(context.WithValue(r.Context(), chi.RouteCtxKey, rctx)) - - handler.handleDeleteAuthorization(w, r) - - res := w.Result() - content := res.Header.Get("Content-Type") - body, _ := io.ReadAll(res.Body) - - if res.StatusCode != tt.wants.statusCode { - t.Errorf("%q. handleDeleteAuthorization() = %v, want %v", tt.name, res.StatusCode, tt.wants.statusCode) - } - if tt.wants.contentType != "" && content != tt.wants.contentType { - t.Errorf("%q. handleDeleteAuthorization() = %v, want %v", tt.name, content, tt.wants.contentType) - } - - if tt.wants.body != "" { - if diff, err := jsonDiff(string(body), tt.wants.body); err != nil { - t.Errorf("%q, handleDeleteAuthorization(). error unmarshaling json %v", tt.name, err) - } else if diff != "" { - t.Errorf("%q. handleDeleteAuthorization() = ***%s***", tt.name, diff) - } - } - }) - } -} - -func jsonDiff(s1, s2 string) (diff string, err error) { - if s1 == s2 { - return "", nil - } - - if s1 == "" { - return s2, fmt.Errorf("s1 is empty") - } - - if s2 == "" { - return s1, fmt.Errorf("s2 is empty") - } - - var o1 influxdb.Authorization - if err = json.Unmarshal([]byte(s1), &o1); err != nil { - return - } - - var o2 influxdb.Authorization - if err = json.Unmarshal([]byte(s2), &o2); err != nil { - return - } - - return cmp.Diff(o1, o2, authorizationCmpOptions...), err -} - -func jsonDiffErr(s1, s2 string) (diff string, err error) { - if s1 == s2 { - return "", nil - } - - if s1 == "" { - return s2, fmt.Errorf("s1 is empty") - } - - if s2 == "" { - return s1, fmt.Errorf("s2 is empty") - } - - var o1 errors.Error - if err = json.Unmarshal([]byte(s1), &o1); err != nil { - return - } - - var o2 errors.Error - if err = json.Unmarshal([]byte(s2), &o2); err != nil { - return - } - - return cmp.Diff(o1, o2), err -} - -var authorizationCmpOptions = cmp.Options{ - cmpopts.EquateEmpty(), - cmpopts.IgnoreFields(influxdb.Authorization{}, "ID", "Token", "CreatedAt", "UpdatedAt"), - cmp.Comparer(func(x, y []byte) bool { - return bytes.Equal(x, y) - }), - cmp.Transformer("Sort", func(in []*influxdb.Authorization) []*influxdb.Authorization { - out := append([]*influxdb.Authorization(nil), in...) // Copy input to avoid mutating it - sort.Slice(out, func(i, j int) bool { - return out[i].ID.String() > out[j].ID.String() - }) - return out - }), -} - -func MustMarshal(o interface{}) []byte { - b, _ := json.Marshal(o) - return b -} diff --git a/v1/authorization/middleware_auth_password_service.go b/v1/authorization/middleware_auth_password_service.go deleted file mode 100644 index eb1e8385bb8..00000000000 --- a/v1/authorization/middleware_auth_password_service.go +++ /dev/null @@ -1,38 +0,0 @@ -package authorization - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -type AuthFinder interface { - FindAuthorizationByID(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) -} - -// AuthedPasswordService is middleware for authorizing requests to the inner PasswordService. -type AuthedPasswordService struct { - auth AuthFinder - inner PasswordService -} - -// NewAuthedPasswordService wraps an existing PasswordService with authorization middleware. -func NewAuthedPasswordService(auth AuthFinder, inner PasswordService) *AuthedPasswordService { - return &AuthedPasswordService{auth: auth, inner: inner} -} - -// SetPassword overrides the password of a known user. -func (s *AuthedPasswordService) SetPassword(ctx context.Context, authID platform.ID, password string) error { - auth, err := s.auth.FindAuthorizationByID(ctx, authID) - if err != nil { - return ErrAuthNotFound - } - - if _, _, err := authorizer.AuthorizeWriteResource(ctx, influxdb.UsersResourceType, auth.UserID); err != nil { - return err - } - - return s.inner.SetPassword(ctx, authID, password) -} diff --git a/v1/authorization/middleware_auth_password_service_test.go b/v1/authorization/middleware_auth_password_service_test.go deleted file mode 100644 index 6b47ae4dbf3..00000000000 --- a/v1/authorization/middleware_auth_password_service_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package authorization_test - -import ( - "context" - "testing" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - itest "github.com/influxdata/influxdb/v2/testing" - "github.com/influxdata/influxdb/v2/v1/authorization" - "github.com/influxdata/influxdb/v2/v1/authorization/mocks" - "github.com/stretchr/testify/assert" -) - -func TestAuthedPasswordService_SetPassword(t *testing.T) { - var ( - authID = itest.MustIDBase16("0000000000001000") - userID = itest.MustIDBase16("0000000000002000") - orgID = itest.MustIDBase16("0000000000003000") - ) - t.Run("error when auth not found", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - ctx := context.Background() - - af := mocks.NewMockAuthFinder(ctrl) - af.EXPECT(). - FindAuthorizationByID(ctx, authID). - Return(nil, &errors.Error{}) - - ps := authorization.NewAuthedPasswordService(af, nil) - err := ps.SetPassword(ctx, authID, "foo") - assert.EqualError(t, err, authorization.ErrAuthNotFound.Error()) - }) - - t.Run("error when no authorizer", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - ctx := context.Background() - auth := influxdb.Authorization{ - ID: authID, - OrgID: orgID, - UserID: userID, - } - - af := mocks.NewMockAuthFinder(ctrl) - af.EXPECT(). - FindAuthorizationByID(ctx, authID). - Return(&auth, nil) - - ps := authorization.NewAuthedPasswordService(af, nil) - err := ps.SetPassword(ctx, authID, "foo") - assert.EqualError(t, err, "authorizer not found on context") - }) - - t.Run("error with restricted permission", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - influxdb.OperPermissions() - auth := influxdb.Authorization{ - ID: authID, - OrgID: orgID, - UserID: userID, - Status: influxdb.Active, - } - ctx := context.Background() - ctx = icontext.SetAuthorizer(ctx, &auth) - - af := mocks.NewMockAuthFinder(ctrl) - af.EXPECT(). - FindAuthorizationByID(ctx, authID). - Return(&auth, nil) - - ps := authorization.NewAuthedPasswordService(af, nil) - err := ps.SetPassword(ctx, authID, "foo") - assert.Error(t, err) - }) - - t.Run("success", func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - influxdb.OperPermissions() - auth := influxdb.Authorization{ - ID: authID, - OrgID: orgID, - UserID: userID, - Status: influxdb.Active, - Permissions: influxdb.MePermissions(userID), - } - ctx := context.Background() - ctx = icontext.SetAuthorizer(ctx, &auth) - - af := mocks.NewMockAuthFinder(ctrl) - af.EXPECT(). - FindAuthorizationByID(ctx, authID). - Return(&auth, nil) - - inner := mocks.NewMockPasswordService(ctrl) - inner.EXPECT(). - SetPassword(ctx, authID, "foo"). - Return(nil) - - ps := authorization.NewAuthedPasswordService(af, inner) - err := ps.SetPassword(ctx, authID, "foo") - assert.NoError(t, err) - }) -} diff --git a/v1/authorization/mock_tenant.go b/v1/authorization/mock_tenant.go deleted file mode 100644 index 5a3cde54455..00000000000 --- a/v1/authorization/mock_tenant.go +++ /dev/null @@ -1,41 +0,0 @@ -package authorization - -import ( - "context" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// tenantService is a mock implementation of an authorization.tenantService -type tenantService struct { - FindUserByIDFn func(context.Context, platform.ID) (*influxdb.User, error) - FindUserFn func(context.Context, influxdb.UserFilter) (*influxdb.User, error) - FindOrganizationByIDF func(ctx context.Context, id platform.ID) (*influxdb.Organization, error) - FindOrganizationF func(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) - FindBucketByIDFn func(context.Context, platform.ID) (*influxdb.Bucket, error) -} - -// FindUserByID returns a single User by ID. -func (s *tenantService) FindUserByID(ctx context.Context, id platform.ID) (*influxdb.User, error) { - return s.FindUserByIDFn(ctx, id) -} - -// FindUser returns a single User that match filter. -func (s *tenantService) FindUser(ctx context.Context, filter influxdb.UserFilter) (*influxdb.User, error) { - return s.FindUserFn(ctx, filter) -} - -// FindOrganizationByID calls FindOrganizationByIDF. -func (s *tenantService) FindOrganizationByID(ctx context.Context, id platform.ID) (*influxdb.Organization, error) { - return s.FindOrganizationByIDF(ctx, id) -} - -// FindOrganization calls FindOrganizationF. -func (s *tenantService) FindOrganization(ctx context.Context, filter influxdb.OrganizationFilter) (*influxdb.Organization, error) { - return s.FindOrganizationF(ctx, filter) -} - -func (s *tenantService) FindBucketByID(ctx context.Context, id platform.ID) (*influxdb.Bucket, error) { - return s.FindBucketByIDFn(ctx, id) -} diff --git a/v1/authorization/mocks/auth_finder.go b/v1/authorization/mocks/auth_finder.go deleted file mode 100644 index c5f832882ec..00000000000 --- a/v1/authorization/mocks/auth_finder.go +++ /dev/null @@ -1,52 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/v1/authorization (interfaces: AuthFinder) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - influxdb "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockAuthFinder is a mock of AuthFinder interface -type MockAuthFinder struct { - ctrl *gomock.Controller - recorder *MockAuthFinderMockRecorder -} - -// MockAuthFinderMockRecorder is the mock recorder for MockAuthFinder -type MockAuthFinderMockRecorder struct { - mock *MockAuthFinder -} - -// NewMockAuthFinder creates a new mock instance -func NewMockAuthFinder(ctrl *gomock.Controller) *MockAuthFinder { - mock := &MockAuthFinder{ctrl: ctrl} - mock.recorder = &MockAuthFinderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockAuthFinder) EXPECT() *MockAuthFinderMockRecorder { - return m.recorder -} - -// FindAuthorizationByID mocks base method -func (m *MockAuthFinder) FindAuthorizationByID(arg0 context.Context, arg1 platform.ID) (*influxdb.Authorization, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FindAuthorizationByID", arg0, arg1) - ret0, _ := ret[0].(*influxdb.Authorization) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FindAuthorizationByID indicates an expected call of FindAuthorizationByID -func (mr *MockAuthFinderMockRecorder) FindAuthorizationByID(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindAuthorizationByID", reflect.TypeOf((*MockAuthFinder)(nil).FindAuthorizationByID), arg0, arg1) -} diff --git a/v1/authorization/mocks/auth_token_finder.go b/v1/authorization/mocks/auth_token_finder.go deleted file mode 100644 index 4c734161a39..00000000000 --- a/v1/authorization/mocks/auth_token_finder.go +++ /dev/null @@ -1,51 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/v1/authorization (interfaces: AuthTokenFinder) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - influxdb "github.com/influxdata/influxdb/v2" -) - -// MockAuthTokenFinder is a mock of AuthTokenFinder interface -type MockAuthTokenFinder struct { - ctrl *gomock.Controller - recorder *MockAuthTokenFinderMockRecorder -} - -// MockAuthTokenFinderMockRecorder is the mock recorder for MockAuthTokenFinder -type MockAuthTokenFinderMockRecorder struct { - mock *MockAuthTokenFinder -} - -// NewMockAuthTokenFinder creates a new mock instance -func NewMockAuthTokenFinder(ctrl *gomock.Controller) *MockAuthTokenFinder { - mock := &MockAuthTokenFinder{ctrl: ctrl} - mock.recorder = &MockAuthTokenFinderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockAuthTokenFinder) EXPECT() *MockAuthTokenFinderMockRecorder { - return m.recorder -} - -// FindAuthorizationByToken mocks base method -func (m *MockAuthTokenFinder) FindAuthorizationByToken(arg0 context.Context, arg1 string) (*influxdb.Authorization, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FindAuthorizationByToken", arg0, arg1) - ret0, _ := ret[0].(*influxdb.Authorization) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FindAuthorizationByToken indicates an expected call of FindAuthorizationByToken -func (mr *MockAuthTokenFinderMockRecorder) FindAuthorizationByToken(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindAuthorizationByToken", reflect.TypeOf((*MockAuthTokenFinder)(nil).FindAuthorizationByToken), arg0, arg1) -} diff --git a/v1/authorization/mocks/password_comparer.go b/v1/authorization/mocks/password_comparer.go deleted file mode 100644 index e7e5f22f0fd..00000000000 --- a/v1/authorization/mocks/password_comparer.go +++ /dev/null @@ -1,50 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/v1/authorization (interfaces: PasswordComparer) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockPasswordComparer is a mock of PasswordComparer interface -type MockPasswordComparer struct { - ctrl *gomock.Controller - recorder *MockPasswordComparerMockRecorder -} - -// MockPasswordComparerMockRecorder is the mock recorder for MockPasswordComparer -type MockPasswordComparerMockRecorder struct { - mock *MockPasswordComparer -} - -// NewMockPasswordComparer creates a new mock instance -func NewMockPasswordComparer(ctrl *gomock.Controller) *MockPasswordComparer { - mock := &MockPasswordComparer{ctrl: ctrl} - mock.recorder = &MockPasswordComparerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockPasswordComparer) EXPECT() *MockPasswordComparerMockRecorder { - return m.recorder -} - -// ComparePassword mocks base method -func (m *MockPasswordComparer) ComparePassword(arg0 context.Context, arg1 platform.ID, arg2 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ComparePassword", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// ComparePassword indicates an expected call of ComparePassword -func (mr *MockPasswordComparerMockRecorder) ComparePassword(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ComparePassword", reflect.TypeOf((*MockPasswordComparer)(nil).ComparePassword), arg0, arg1, arg2) -} diff --git a/v1/authorization/mocks/password_service.go b/v1/authorization/mocks/password_service.go deleted file mode 100644 index 5f562e7cf69..00000000000 --- a/v1/authorization/mocks/password_service.go +++ /dev/null @@ -1,50 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/v1/authorization (interfaces: PasswordService) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockPasswordService is a mock of PasswordService interface -type MockPasswordService struct { - ctrl *gomock.Controller - recorder *MockPasswordServiceMockRecorder -} - -// MockPasswordServiceMockRecorder is the mock recorder for MockPasswordService -type MockPasswordServiceMockRecorder struct { - mock *MockPasswordService -} - -// NewMockPasswordService creates a new mock instance -func NewMockPasswordService(ctrl *gomock.Controller) *MockPasswordService { - mock := &MockPasswordService{ctrl: ctrl} - mock.recorder = &MockPasswordServiceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockPasswordService) EXPECT() *MockPasswordServiceMockRecorder { - return m.recorder -} - -// SetPassword mocks base method -func (m *MockPasswordService) SetPassword(arg0 context.Context, arg1 platform.ID, arg2 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetPassword", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetPassword indicates an expected call of SetPassword -func (mr *MockPasswordServiceMockRecorder) SetPassword(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPassword", reflect.TypeOf((*MockPasswordService)(nil).SetPassword), arg0, arg1, arg2) -} diff --git a/v1/authorization/mocks/user_finder.go b/v1/authorization/mocks/user_finder.go deleted file mode 100644 index 462a0ffa04d..00000000000 --- a/v1/authorization/mocks/user_finder.go +++ /dev/null @@ -1,52 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/influxdata/influxdb/v2/v1/authorization (interfaces: UserFinder) - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - influxdb "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// MockUserFinder is a mock of UserFinder interface -type MockUserFinder struct { - ctrl *gomock.Controller - recorder *MockUserFinderMockRecorder -} - -// MockUserFinderMockRecorder is the mock recorder for MockUserFinder -type MockUserFinderMockRecorder struct { - mock *MockUserFinder -} - -// NewMockUserFinder creates a new mock instance -func NewMockUserFinder(ctrl *gomock.Controller) *MockUserFinder { - mock := &MockUserFinder{ctrl: ctrl} - mock.recorder = &MockUserFinderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockUserFinder) EXPECT() *MockUserFinderMockRecorder { - return m.recorder -} - -// FindUserByID mocks base method -func (m *MockUserFinder) FindUserByID(arg0 context.Context, arg1 platform.ID) (*influxdb.User, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FindUserByID", arg0, arg1) - ret0, _ := ret[0].(*influxdb.User) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FindUserByID indicates an expected call of FindUserByID -func (mr *MockUserFinderMockRecorder) FindUserByID(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindUserByID", reflect.TypeOf((*MockUserFinder)(nil).FindUserByID), arg0, arg1) -} diff --git a/v1/authorization/service.go b/v1/authorization/service.go deleted file mode 100644 index da8b6ac1364..00000000000 --- a/v1/authorization/service.go +++ /dev/null @@ -1,212 +0,0 @@ -package authorization - -import ( - "context" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" -) - -var ( - _ influxdb.AuthorizationService = (*Service)(nil) - _ influxdb.PasswordsService = (*Service)(nil) -) - -type Service struct { - store *Store - tenantService TenantService -} - -func NewService(st *Store, ts TenantService) *Service { - return &Service{ - store: st, - tenantService: ts, - } -} - -func (s *Service) CreateAuthorization(ctx context.Context, a *influxdb.Authorization) error { - if err := a.Valid(); err != nil { - return &errors.Error{ - Err: err, - } - } - - if a.Token == "" { - return influxdb.ErrUnableToCreateToken - } - - if _, err := s.tenantService.FindUserByID(ctx, a.UserID); err != nil { - return influxdb.ErrUnableToCreateToken - } - - if _, err := s.tenantService.FindOrganizationByID(ctx, a.OrgID); err != nil { - return influxdb.ErrUnableToCreateToken - } - - err := s.store.View(ctx, func(tx kv.Tx) error { - if err := s.store.uniqueAuthToken(ctx, tx, a); err != nil { - return err - } - return nil - }) - if err != nil { - return ErrTokenAlreadyExistsError - } - - now := time.Now() - a.SetCreatedAt(now) - a.SetUpdatedAt(now) - - return s.store.Update(ctx, func(tx kv.Tx) error { - return s.store.CreateAuthorization(ctx, tx, a) - }) -} - -func (s *Service) FindAuthorizationByID(ctx context.Context, id platform.ID) (*influxdb.Authorization, error) { - var a *influxdb.Authorization - err := s.store.View(ctx, func(tx kv.Tx) error { - auth, err := s.store.GetAuthorizationByID(ctx, tx, id) - if err != nil { - return err - } - - a = auth - return nil - }) - - if err != nil { - return nil, err - } - - return a, nil -} - -// FindAuthorizationByToken returns a authorization by token for a particular authorization. -func (s *Service) FindAuthorizationByToken(ctx context.Context, n string) (*influxdb.Authorization, error) { - var a *influxdb.Authorization - err := s.store.View(ctx, func(tx kv.Tx) error { - auth, err := s.store.GetAuthorizationByToken(ctx, tx, n) - if err != nil { - return err - } - - a = auth - - return nil - }) - - if err != nil { - return nil, err - } - - return a, nil -} - -// FindAuthorizations retrives all authorizations that match an arbitrary authorization filter. -// Filters using ID, or Token should be efficient. -// Other filters will do a linear scan across all authorizations searching for a match. -func (s *Service) FindAuthorizations(ctx context.Context, filter influxdb.AuthorizationFilter, opt ...influxdb.FindOptions) ([]*influxdb.Authorization, int, error) { - if filter.ID != nil { - var auth *influxdb.Authorization - err := s.store.View(ctx, func(tx kv.Tx) error { - a, e := s.store.GetAuthorizationByID(ctx, tx, *filter.ID) - if e != nil { - return e - } - auth = a - return nil - }) - if err != nil { - return nil, 0, &errors.Error{ - Err: err, - } - } - - return []*influxdb.Authorization{auth}, 1, nil - } - - if filter.Token != nil { - var auth *influxdb.Authorization - err := s.store.View(ctx, func(tx kv.Tx) error { - a, e := s.store.GetAuthorizationByToken(ctx, tx, *filter.Token) - if e != nil { - return e - } - auth = a - return nil - }) - if err != nil { - return nil, 0, &errors.Error{ - Err: err, - } - } - - return []*influxdb.Authorization{auth}, 1, nil - } - - as := []*influxdb.Authorization{} - err := s.store.View(ctx, func(tx kv.Tx) error { - auths, err := s.store.ListAuthorizations(ctx, tx, filter) - if err != nil { - return err - } - as = auths - return nil - }) - - if err != nil { - return nil, 0, &errors.Error{ - Err: err, - } - } - - return as, len(as), nil -} - -// UpdateAuthorization updates the status and description if available. -func (s *Service) UpdateAuthorization(ctx context.Context, id platform.ID, upd *influxdb.AuthorizationUpdate) (*influxdb.Authorization, error) { - var auth *influxdb.Authorization - err := s.store.View(ctx, func(tx kv.Tx) error { - a, e := s.store.GetAuthorizationByID(ctx, tx, id) - if e != nil { - return e - } - auth = a - return nil - }) - - if err != nil { - return nil, &errors.Error{ - Code: errors.ENotFound, - Err: err, - } - } - - if upd.Status != nil { - auth.Status = *upd.Status - } - if upd.Description != nil { - auth.Description = *upd.Description - } - - auth.SetUpdatedAt(time.Now()) - - err = s.store.Update(ctx, func(tx kv.Tx) error { - a, e := s.store.UpdateAuthorization(ctx, tx, id, auth) - if e != nil { - return e - } - auth = a - return nil - }) - return auth, err -} - -func (s *Service) DeleteAuthorization(ctx context.Context, id platform.ID) error { - return s.store.Update(ctx, func(tx kv.Tx) (err error) { - return s.store.DeleteAuthorization(ctx, tx, id) - }) -} diff --git a/v1/authorization/service_password.go b/v1/authorization/service_password.go deleted file mode 100644 index 1ad8648924f..00000000000 --- a/v1/authorization/service_password.go +++ /dev/null @@ -1,106 +0,0 @@ -package authorization - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/tenant" - "golang.org/x/crypto/bcrypt" -) - -var EIncorrectPassword = tenant.EIncorrectPassword - -// SetPasswordHash updates the password hash for id. If passHash is not a valid bcrypt hash, -// SetPasswordHash returns an error. -// -// This API is intended for upgrading 1.x users. -func (s *Service) SetPasswordHash(ctx context.Context, authID platform.ID, passHash string) error { - // verify passHash is a valid bcrypt hash - _, err := bcrypt.Cost([]byte(passHash)) - if err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Msg: "invalid bcrypt hash", - Err: err, - } - } - // set password - return s.store.Update(ctx, func(tx kv.Tx) error { - _, err := s.store.GetAuthorizationByID(ctx, tx, authID) - if err != nil { - return ErrAuthNotFound - } - return s.store.SetPassword(ctx, tx, authID, passHash) - }) -} - -// SetPassword overrides the password of a known user. -func (s *Service) SetPassword(ctx context.Context, authID platform.ID, password string) error { - if len(password) < tenant.MinPasswordLen { - return tenant.EShortPassword - } - passHash, err := encryptPassword(password) - if err != nil { - return err - } - // set password - return s.store.Update(ctx, func(tx kv.Tx) error { - _, err := s.store.GetAuthorizationByID(ctx, tx, authID) - if err != nil { - return ErrAuthNotFound - } - return s.store.SetPassword(ctx, tx, authID, passHash) - }) -} - -// ComparePassword checks if the password matches the password recorded. -// Passwords that do not match return errors. -func (s *Service) ComparePassword(ctx context.Context, authID platform.ID, password string) error { - // get password - var hash []byte - err := s.store.View(ctx, func(tx kv.Tx) error { - _, err := s.store.GetAuthorizationByID(ctx, tx, authID) - if err != nil { - return ErrAuthNotFound - } - h, err := s.store.GetPassword(ctx, tx, authID) - if err != nil { - if err == kv.ErrKeyNotFound { - return EIncorrectPassword - } - return err - } - hash = []byte(h) - return nil - }) - if err != nil { - return err - } - // compare password - if err := bcrypt.CompareHashAndPassword(hash, []byte(password)); err != nil { - return EIncorrectPassword - } - - return nil -} - -// CompareAndSetPassword checks the password and if they match -// updates to the new password. -func (s *Service) CompareAndSetPassword(ctx context.Context, authID platform.ID, old, new string) error { - err := s.ComparePassword(ctx, authID, old) - if err != nil { - return err - } - - return s.SetPassword(ctx, authID, new) -} - -func encryptPassword(password string) (string, error) { - passHash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) - if err != nil { - return "", err - } - return string(passHash), nil -} diff --git a/v1/authorization/storage.go b/v1/authorization/storage.go deleted file mode 100644 index 1f433c277f4..00000000000 --- a/v1/authorization/storage.go +++ /dev/null @@ -1,107 +0,0 @@ -package authorization - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kit/tracing" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/snowflake" -) - -const MaxIDGenerationN = 100 -const ReservedIDs = 1000 - -var ( - authBucket = []byte("legacy/authorizationsv1") - authIndex = []byte("legacy/authorizationindexv1") -) - -type Store struct { - kvStore kv.Store - IDGen platform.IDGenerator -} - -func NewStore(kvStore kv.Store) (*Store, error) { - st := &Store{ - kvStore: kvStore, - IDGen: snowflake.NewDefaultIDGenerator(), - } - return st, st.setup() -} - -// View opens up a transaction that will not write to any data. Implementing interfaces -// should take care to ensure that all view transactions do not mutate any data. -func (s *Store) View(ctx context.Context, fn func(kv.Tx) error) error { - return s.kvStore.View(ctx, fn) -} - -// Update opens up a transaction that will mutate data. -func (s *Store) Update(ctx context.Context, fn func(kv.Tx) error) error { - return s.kvStore.Update(ctx, fn) -} - -func (s *Store) setup() error { - return s.Update(context.Background(), func(tx kv.Tx) error { - if _, err := tx.Bucket(authBucket); err != nil { - return err - } - if _, err := authIndexBucket(tx); err != nil { - return err - } - - return nil - }) -} - -// generateSafeID attempts to create ids for buckets -// and orgs that are without backslash, commas, and spaces, BUT ALSO do not already exist. -func (s *Store) generateSafeID(ctx context.Context, tx kv.Tx, bucket []byte) (platform.ID, error) { - for i := 0; i < MaxIDGenerationN; i++ { - id := s.IDGen.ID() - - // TODO: this is probably unnecessary but for testing we need to keep it in. - // After KV is cleaned out we can update the tests and remove this. - if id < ReservedIDs { - continue - } - - err := s.uniqueID(ctx, tx, bucket, id) - if err == nil { - return id, nil - } - - if err == NotUniqueIDError { - continue - } - - return platform.InvalidID(), err - } - return platform.InvalidID(), ErrFailureGeneratingID -} - -func (s *Store) uniqueID(ctx context.Context, tx kv.Tx, bucket []byte, id platform.ID) error { - span, _ := tracing.StartSpanFromContext(ctx) - defer span.Finish() - - encodedID, err := id.Encode() - if err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - b, err := tx.Bucket(bucket) - if err != nil { - return err - } - - _, err = b.Get(encodedID) - if kv.IsNotFound(err) { - return nil - } - - return NotUniqueIDError -} diff --git a/v1/authorization/storage_authorization.go b/v1/authorization/storage_authorization.go deleted file mode 100644 index f0635f65e7b..00000000000 --- a/v1/authorization/storage_authorization.go +++ /dev/null @@ -1,467 +0,0 @@ -package authorization - -import ( - "context" - "encoding/json" - - "github.com/buger/jsonparser" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" - jsonp "github.com/influxdata/influxdb/v2/pkg/jsonparser" - "github.com/influxdata/influxdb/v2/tenant" -) - -func authIndexKey(n string) []byte { - return []byte(n) -} - -func authIndexBucket(tx kv.Tx) (kv.Bucket, error) { - b, err := tx.Bucket([]byte(authIndex)) - if err != nil { - return nil, UnexpectedAuthIndexError(err) - } - - return b, nil -} - -func encodeAuthorization(a *influxdb.Authorization) ([]byte, error) { - switch a.Status { - case influxdb.Active, influxdb.Inactive: - case "": - a.Status = influxdb.Active - default: - return nil, &errors.Error{ - Code: errors.EInvalid, - Msg: "unknown authorization status", - } - } - - return json.Marshal(a) -} - -func decodeAuthorization(b []byte, a *influxdb.Authorization) error { - if err := json.Unmarshal(b, a); err != nil { - return err - } - if a.Status == "" { - a.Status = influxdb.Active - } - return nil -} - -// CreateAuthorization takes an Authorization object and saves it in storage using its token -// using its token property as an index -func (s *Store) CreateAuthorization(ctx context.Context, tx kv.Tx, a *influxdb.Authorization) error { - // if the provided ID is invalid, or already maps to an existing Auth, then generate a new one - if !a.ID.Valid() { - id, err := s.generateSafeID(ctx, tx, authBucket) - if err != nil { - return nil - } - a.ID = id - } else if err := uniqueID(ctx, tx, a.ID); err != nil { - id, err := s.generateSafeID(ctx, tx, authBucket) - if err != nil { - return nil - } - a.ID = id - } - - ts := tenant.NewStore(s.kvStore) - for _, p := range a.Permissions { - if p.Resource.ID == nil || p.Resource.Type != influxdb.BucketsResourceType { - continue - } - _, err := ts.GetBucket(ctx, tx, *p.Resource.ID) - if err == tenant.ErrBucketNotFound { - return ErrBucketNotFound - } - } - - if err := s.uniqueAuthToken(ctx, tx, a); err != nil { - return ErrTokenAlreadyExistsError - } - - v, err := encodeAuthorization(a) - if err != nil { - return &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - encodedID, err := a.ID.Encode() - if err != nil { - return ErrInvalidAuthIDError(err) - } - - idx, err := authIndexBucket(tx) - if err != nil { - return err - } - - if err := idx.Put(authIndexKey(a.Token), encodedID); err != nil { - return &errors.Error{ - Code: errors.EInternal, - Err: err, - } - } - - b, err := tx.Bucket(authBucket) - if err != nil { - return err - } - - if err := b.Put(encodedID, v); err != nil { - return &errors.Error{ - Err: err, - } - } - - return nil -} - -// GetAuthorization gets an authorization by its ID from the auth bucket in kv -func (s *Store) GetAuthorizationByID(ctx context.Context, tx kv.Tx, id platform.ID) (*influxdb.Authorization, error) { - encodedID, err := id.Encode() - if err != nil { - return nil, ErrInvalidAuthID - } - - b, err := tx.Bucket(authBucket) - if err != nil { - return nil, ErrInternalServiceError(err) - } - - v, err := b.Get(encodedID) - if kv.IsNotFound(err) { - return nil, ErrAuthNotFound - } - - if err != nil { - return nil, ErrInternalServiceError(err) - } - - a := &influxdb.Authorization{} - if err := decodeAuthorization(v, a); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - return a, nil -} - -func (s *Store) GetAuthorizationByToken(ctx context.Context, tx kv.Tx, token string) (*influxdb.Authorization, error) { - idx, err := authIndexBucket(tx) - if err != nil { - return nil, err - } - - // use the token to look up the authorization's ID - idKey, err := idx.Get(authIndexKey(token)) - if kv.IsNotFound(err) { - return nil, &errors.Error{ - Code: errors.ENotFound, - Msg: "authorization not found", - } - } - - var id platform.ID - if err := id.Decode(idKey); err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - return s.GetAuthorizationByID(ctx, tx, id) -} - -// ListAuthorizations returns all the authorizations matching a set of FindOptions. This function is used for -// FindAuthorizationByID, FindAuthorizationByToken, and FindAuthorizations in the AuthorizationService implementation -func (s *Store) ListAuthorizations(ctx context.Context, tx kv.Tx, f influxdb.AuthorizationFilter) ([]*influxdb.Authorization, error) { - var as []*influxdb.Authorization - pred := authorizationsPredicateFn(f) - filterFn := filterAuthorizationsFn(f) - err := s.forEachAuthorization(ctx, tx, pred, func(a *influxdb.Authorization) bool { - if filterFn(a) { - as = append(as, a) - } - return true - }) - if err != nil { - return nil, err - } - - return as, nil -} - -// forEachAuthorization will iterate through all authorizations while fn returns true. -func (s *Store) forEachAuthorization(ctx context.Context, tx kv.Tx, pred kv.CursorPredicateFunc, fn func(*influxdb.Authorization) bool) error { - b, err := tx.Bucket(authBucket) - if err != nil { - return err - } - - var cur kv.Cursor - if pred != nil { - cur, err = b.Cursor(kv.WithCursorHintPredicate(pred)) - } else { - cur, err = b.Cursor() - } - if err != nil { - return err - } - - for k, v := cur.First(); k != nil; k, v = cur.Next() { - // preallocate Permissions to reduce multiple slice re-allocations - a := &influxdb.Authorization{ - Permissions: make([]influxdb.Permission, 64), - } - - if err := decodeAuthorization(v, a); err != nil { - return err - } - if !fn(a) { - break - } - } - - return nil -} - -// UpdateAuthorization updates the status and description only of an authorization -func (s *Store) UpdateAuthorization(ctx context.Context, tx kv.Tx, id platform.ID, a *influxdb.Authorization) (*influxdb.Authorization, error) { - v, err := encodeAuthorization(a) - if err != nil { - return nil, &errors.Error{ - Code: errors.EInvalid, - Err: err, - } - } - - encodedID, err := a.ID.Encode() - if err != nil { - return nil, &errors.Error{ - Code: errors.ENotFound, - Err: err, - } - } - - idx, err := authIndexBucket(tx) - if err != nil { - return nil, err - } - - if err := idx.Put(authIndexKey(a.Token), encodedID); err != nil { - return nil, &errors.Error{ - Code: errors.EInternal, - Err: err, - } - } - - b, err := tx.Bucket(authBucket) - if err != nil { - return nil, err - } - - if err := b.Put(encodedID, v); err != nil { - return nil, &errors.Error{ - Err: err, - } - } - - return a, nil - -} - -// DeleteAuthorization removes an authorization from storage -func (s *Store) DeleteAuthorization(ctx context.Context, tx kv.Tx, id platform.ID) error { - a, err := s.GetAuthorizationByID(ctx, tx, id) - if err != nil { - return err - } - - encodedID, err := id.Encode() - if err != nil { - return ErrInvalidAuthID - } - - idx, err := authIndexBucket(tx) - if err != nil { - return err - } - - b, err := tx.Bucket(authBucket) - if err != nil { - return err - } - - if err := idx.Delete([]byte(a.Token)); err != nil { - return ErrInternalServiceError(err) - } - - if err := b.Delete(encodedID); err != nil { - return ErrInternalServiceError(err) - } - - return nil -} - -func (s *Store) uniqueAuthToken(ctx context.Context, tx kv.Tx, a *influxdb.Authorization) error { - err := unique(ctx, tx, authIndex, authIndexKey(a.Token)) - if err == kv.NotUniqueError { - // by returning a generic error we are trying to hide when - // a token is non-unique. - return influxdb.ErrUnableToCreateToken - } - // otherwise, this is some sort of internal server error and we - // should provide some debugging information. - return err -} - -func unique(ctx context.Context, tx kv.Tx, indexBucket, indexKey []byte) error { - bucket, err := tx.Bucket(indexBucket) - if err != nil { - return kv.UnexpectedIndexError(err) - } - - _, err = bucket.Get(indexKey) - // if not found then this token is unique. - if kv.IsNotFound(err) { - return nil - } - - // no error means this is not unique - if err == nil { - return kv.NotUniqueError - } - - // any other error is some sort of internal server error - return kv.UnexpectedIndexError(err) -} - -// uniqueID returns nil if the ID provided is unique, returns an error otherwise -func uniqueID(ctx context.Context, tx kv.Tx, id platform.ID) error { - encodedID, err := id.Encode() - if err != nil { - return ErrInvalidAuthID - } - - b, err := tx.Bucket(authBucket) - if err != nil { - return ErrInternalServiceError(err) - } - - _, err = b.Get(encodedID) - // if not found then the ID is unique - if kv.IsNotFound(err) { - return nil - } - // no error means this is not unique - if err == nil { - return kv.NotUniqueError - } - - // any other error is some sort of internal server error - return kv.UnexpectedIndexError(err) -} - -func authorizationsPredicateFn(f influxdb.AuthorizationFilter) kv.CursorPredicateFunc { - // if any errors occur reading the JSON data, the predicate will always return true - // to ensure the value is included and handled higher up. - - if f.ID != nil { - exp := *f.ID - return func(_, value []byte) bool { - got, err := jsonp.GetID(value, "id") - if err != nil { - return true - } - return got == exp - } - } - - if f.Token != nil { - exp := *f.Token - return func(_, value []byte) bool { - // it is assumed that token never has escaped string data - got, _, _, err := jsonparser.Get(value, "token") - if err != nil { - return true - } - return string(got) == exp - } - } - - var pred kv.CursorPredicateFunc - if f.OrgID != nil { - exp := *f.OrgID - pred = func(_, value []byte) bool { - got, err := jsonp.GetID(value, "orgID") - if err != nil { - return true - } - - return got == exp - } - } - - if f.UserID != nil { - exp := *f.UserID - prevFn := pred - pred = func(key, value []byte) bool { - prev := prevFn == nil || prevFn(key, value) - got, exists, err := jsonp.GetOptionalID(value, "userID") - return prev && ((exp == got && exists) || err != nil) - } - } - - return pred -} - -type predicateFunc func(a *influxdb.Authorization) bool - -func filterAuthorizationsFn(filter influxdb.AuthorizationFilter) predicateFunc { - - if filter.ID != nil { - return func(a *influxdb.Authorization) bool { - return a.ID == *filter.ID - } - } - - if filter.Token != nil { - return func(a *influxdb.Authorization) bool { - return a.Token == *filter.Token - } - } - - var pred predicateFunc - if filter.OrgID != nil { - exp := *filter.OrgID - prevFn := pred - pred = func(a *influxdb.Authorization) bool { - prev := prevFn == nil || prevFn(a) - return prev && a.OrgID == exp - } - } - - if filter.UserID != nil { - exp := *filter.UserID - prevFn := pred - pred = func(a *influxdb.Authorization) bool { - prev := prevFn == nil || prevFn(a) - return prev && a.UserID == exp - } - } - - if pred == nil { - pred = func(a *influxdb.Authorization) bool { return true } - } - - return pred -} diff --git a/v1/authorization/storage_authorization_test.go b/v1/authorization/storage_authorization_test.go deleted file mode 100644 index 4c10beb47b0..00000000000 --- a/v1/authorization/storage_authorization_test.go +++ /dev/null @@ -1,430 +0,0 @@ -package authorization - -import ( - "context" - "fmt" - "reflect" - "testing" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/inmem" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/kv/migration/all" - "github.com/influxdata/influxdb/v2/pkg/pointer" - "github.com/influxdata/influxdb/v2/tenant" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zaptest" -) - -func TestAuth(t *testing.T) { - setup := func(t *testing.T, store *Store, tx kv.Tx) { - for i := 1; i <= 10; i++ { - err := store.CreateAuthorization(context.Background(), tx, &influxdb.Authorization{ - ID: platform.ID(i), - Token: fmt.Sprintf("randomtoken%d", i), - OrgID: platform.ID(i), - UserID: platform.ID(i), - Status: influxdb.Active, - }) - - if err != nil { - t.Fatal(err) - } - } - } - - tt := []struct { - name string - setup func(*testing.T, *Store, kv.Tx) - update func(*testing.T, *Store, kv.Tx) - results func(*testing.T, *Store, kv.Tx) - }{ - { - name: "create", - setup: setup, - results: func(t *testing.T, store *Store, tx kv.Tx) { - auths, err := store.ListAuthorizations(context.Background(), tx, influxdb.AuthorizationFilter{}) - if err != nil { - t.Fatal(err) - } - - if len(auths) != 10 { - t.Fatalf("expected 10 authorizations, got: %d", len(auths)) - } - - expected := []*influxdb.Authorization{} - for i := 1; i <= 10; i++ { - expected = append(expected, &influxdb.Authorization{ - ID: platform.ID(i), - Token: fmt.Sprintf("randomtoken%d", i), - OrgID: platform.ID(i), - UserID: platform.ID(i), - Status: "active", - }) - } - if !reflect.DeepEqual(auths, expected) { - t.Fatalf("expected identical authorizations: \n%+v\n%+v", auths, expected) - } - - // should not be able to create two authorizations with identical tokens - err = store.CreateAuthorization(context.Background(), tx, &influxdb.Authorization{ - ID: platform.ID(1), - Token: fmt.Sprintf("randomtoken%d", 1), - OrgID: platform.ID(1), - UserID: platform.ID(1), - }) - if err == nil { - t.Fatalf("expected to be unable to create authorizations with identical tokens") - } - }, - }, - { - name: "read", - setup: setup, - results: func(t *testing.T, store *Store, tx kv.Tx) { - for i := 1; i <= 10; i++ { - expectedAuth := &influxdb.Authorization{ - ID: platform.ID(i), - Token: fmt.Sprintf("randomtoken%d", i), - OrgID: platform.ID(i), - UserID: platform.ID(i), - Status: influxdb.Active, - } - - authByID, err := store.GetAuthorizationByID(context.Background(), tx, platform.ID(i)) - if err != nil { - t.Fatalf("Unexpectedly could not acquire Authorization by ID [Error]: %v", err) - } - - if !reflect.DeepEqual(authByID, expectedAuth) { - t.Fatalf("ID TEST: expected identical authorizations:\n[Expected]: %+#v\n[Got]: %+#v", expectedAuth, authByID) - } - - authByToken, err := store.GetAuthorizationByToken(context.Background(), tx, fmt.Sprintf("randomtoken%d", i)) - if err != nil { - t.Fatalf("cannot get authorization by Token [Error]: %v", err) - } - - if !reflect.DeepEqual(authByToken, expectedAuth) { - t.Fatalf("TOKEN TEST: expected identical authorizations:\n[Expected]: %+#v\n[Got]: %+#v", expectedAuth, authByToken) - } - } - - }, - }, - { - name: "update", - setup: setup, - update: func(t *testing.T, store *Store, tx kv.Tx) { - for i := 1; i <= 10; i++ { - auth, err := store.GetAuthorizationByID(context.Background(), tx, platform.ID(i)) - if err != nil { - t.Fatalf("Could not get authorization [Error]: %v", err) - } - - auth.Status = influxdb.Inactive - - _, err = store.UpdateAuthorization(context.Background(), tx, platform.ID(i), auth) - if err != nil { - t.Fatalf("Could not get updated authorization [Error]: %v", err) - } - } - }, - results: func(t *testing.T, store *Store, tx kv.Tx) { - - for i := 1; i <= 10; i++ { - auth, err := store.GetAuthorizationByID(context.Background(), tx, platform.ID(i)) - if err != nil { - t.Fatalf("Could not get authorization [Error]: %v", err) - } - - expectedAuth := &influxdb.Authorization{ - ID: platform.ID(i), - Token: fmt.Sprintf("randomtoken%d", i), - OrgID: platform.ID(i), - UserID: platform.ID(i), - Status: influxdb.Inactive, - } - - if !reflect.DeepEqual(auth, expectedAuth) { - t.Fatalf("expected identical authorizations:\n[Expected] %+#v\n[Got] %+#v", expectedAuth, auth) - } - } - }, - }, - { - name: "delete", - setup: setup, - update: func(t *testing.T, store *Store, tx kv.Tx) { - for i := 1; i <= 10; i++ { - err := store.DeleteAuthorization(context.Background(), tx, platform.ID(i)) - if err != nil { - t.Fatalf("Could not delete authorization [Error]: %v", err) - } - } - }, - results: func(t *testing.T, store *Store, tx kv.Tx) { - for i := 1; i <= 10; i++ { - _, err := store.GetAuthorizationByID(context.Background(), tx, platform.ID(i)) - if err == nil { - t.Fatal("Authorization was not deleted correctly") - } - } - }, - }, - } - - for _, testScenario := range tt { - t.Run(testScenario.name, func(t *testing.T) { - store := inmem.NewKVStore() - if err := all.Up(context.Background(), zaptest.NewLogger(t), store); err != nil { - t.Fatal(err) - } - - ts, err := NewStore(store) - if err != nil { - t.Fatal(err) - } - - // setup - if testScenario.setup != nil { - err := ts.Update(context.Background(), func(tx kv.Tx) error { - testScenario.setup(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - - // update - if testScenario.update != nil { - err := ts.Update(context.Background(), func(tx kv.Tx) error { - testScenario.update(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - - // results - if testScenario.results != nil { - err := ts.View(context.Background(), func(tx kv.Tx) error { - testScenario.results(t, ts, tx) - return nil - }) - - if err != nil { - t.Fatal(err) - } - } - }) - } -} - -func TestAuthBucketNotExists(t *testing.T) { - store := inmem.NewKVStore() - if err := all.Up(context.Background(), zaptest.NewLogger(t), store); err != nil { - t.Fatal(err) - } - - ts, err := NewStore(store) - require.NoError(t, err) - - bucketID := platform.ID(1) - tenant := tenant.NewStore(store) - err = tenant.Update(context.Background(), func(tx kv.Tx) error { - err := tenant.CreateBucket(context.Background(), tx, &influxdb.Bucket{ - ID: bucketID, - OrgID: platform.ID(10), - Name: "testbucket", - }) - if err != nil { - return err - } - - b, err := tenant.GetBucketByName(context.Background(), tx, platform.ID(10), "testbucket") - if err != nil { - return err - } - - bucketID = b.ID - - return nil - }) - require.NoError(t, err) - - perm1, err := influxdb.NewPermissionAtID( - bucketID, - influxdb.ReadAction, - influxdb.BucketsResourceType, - platform.ID(10), - ) - require.NoError(t, err) - - perm2, err := influxdb.NewPermissionAtID( - platform.ID(2), - influxdb.ReadAction, - influxdb.BucketsResourceType, - platform.ID(10), - ) - require.NoError(t, err) - - err = ts.Update(context.Background(), func(tx kv.Tx) error { - err = ts.CreateAuthorization(context.Background(), tx, &influxdb.Authorization{ - ID: platform.ID(1), - Token: "buckettoken", - OrgID: platform.ID(10), - UserID: platform.ID(4), - Status: influxdb.Active, - Permissions: []influxdb.Permission{ - *perm1, - }, - }) - - return err - }) - - require.NoErrorf(t, err, "Authorization creating should have succeeded") - - err = ts.Update(context.Background(), func(tx kv.Tx) error { - err = ts.CreateAuthorization(context.Background(), tx, &influxdb.Authorization{ - ID: platform.ID(1), - Token: "buckettoken", - OrgID: platform.ID(10), - UserID: platform.ID(4), - Status: influxdb.Active, - Permissions: []influxdb.Permission{ - *perm2, - }, - }) - - return err - }) - - if err == nil || err != ErrBucketNotFound { - t.Fatalf("Authorization creating should have failed with ErrBucketNotFound [Error]: %v", err) - } -} - -func Test_filterAuthorizationsFn(t *testing.T) { - var ( - otherID = platform.ID(999) - ) - - auth := influxdb.Authorization{ - ID: 1000, - Token: "foo", - Status: influxdb.Active, - OrgID: 2000, - UserID: 3000, - } - tests := []struct { - name string - filt influxdb.AuthorizationFilter - auth influxdb.Authorization - exp bool - }{ - { - name: "default is true", - filt: influxdb.AuthorizationFilter{}, - auth: auth, - exp: true, - }, - { - name: "match id", - filt: influxdb.AuthorizationFilter{ - ID: &auth.ID, - }, - auth: auth, - exp: true, - }, - { - name: "no match id", - filt: influxdb.AuthorizationFilter{ - ID: &otherID, - }, - auth: auth, - exp: false, - }, - { - name: "match token", - filt: influxdb.AuthorizationFilter{ - Token: &auth.Token, - }, - auth: auth, - exp: true, - }, - { - name: "no match token", - filt: influxdb.AuthorizationFilter{ - Token: pointer.String("2"), - }, - auth: auth, - exp: false, - }, - { - name: "match org", - filt: influxdb.AuthorizationFilter{ - OrgID: &auth.OrgID, - }, - auth: auth, - exp: true, - }, - { - name: "no match org", - filt: influxdb.AuthorizationFilter{ - OrgID: &otherID, - }, - auth: auth, - exp: false, - }, - { - name: "match user", - filt: influxdb.AuthorizationFilter{ - UserID: &auth.UserID, - }, - auth: auth, - exp: true, - }, - { - name: "no match user", - filt: influxdb.AuthorizationFilter{ - UserID: &otherID, - }, - auth: auth, - exp: false, - }, - { - name: "match org and user", - filt: influxdb.AuthorizationFilter{ - OrgID: &auth.OrgID, - UserID: &auth.UserID, - }, - auth: auth, - exp: true, - }, - { - name: "no match org and user", - filt: influxdb.AuthorizationFilter{ - OrgID: &otherID, - UserID: &auth.UserID, - }, - auth: auth, - exp: false, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - pred := filterAuthorizationsFn(tc.filt) - got := pred(&tc.auth) - assert.Equal(t, tc.exp, got) - }) - } -} diff --git a/v1/authorization/storage_password.go b/v1/authorization/storage_password.go deleted file mode 100644 index 17d34d446bf..00000000000 --- a/v1/authorization/storage_password.go +++ /dev/null @@ -1,69 +0,0 @@ -package authorization - -import ( - "context" - - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/kv" -) - -var ( - passwordBucket = []byte("legacy/authorizationPasswordv1") -) - -// UnavailablePasswordServiceError is used if we aren't able to add the -// password to the store, it means the store is not available at the moment -// (e.g. network). -func UnavailablePasswordServiceError(err error) *errors.Error { - return &errors.Error{ - Code: errors.EInternal, - Msg: "unable to access password bucket", - Err: err, - } -} - -func (s *Store) GetPassword(ctx context.Context, tx kv.Tx, id platform.ID) (string, error) { - encodedID, err := id.Encode() - if err != nil { - return "", ErrInvalidAuthIDError(err) - } - - b, err := tx.Bucket(passwordBucket) - if err != nil { - return "", UnavailablePasswordServiceError(err) - } - - passwd, err := b.Get(encodedID) - - return string(passwd), err -} - -func (s *Store) SetPassword(ctx context.Context, tx kv.Tx, id platform.ID, password string) error { - encodedID, err := id.Encode() - if err != nil { - return ErrInvalidAuthIDError(err) - } - - b, err := tx.Bucket(passwordBucket) - if err != nil { - return UnavailablePasswordServiceError(err) - } - - return b.Put(encodedID, []byte(password)) -} - -func (s *Store) DeletePassword(ctx context.Context, tx kv.Tx, id platform.ID) error { - encodedID, err := id.Encode() - if err != nil { - return ErrInvalidAuthIDError(err) - } - - b, err := tx.Bucket(passwordBucket) - if err != nil { - return UnavailablePasswordServiceError(err) - } - - return b.Delete(encodedID) - -} diff --git a/v1/coordinator/config.go b/v1/coordinator/config.go deleted file mode 100644 index 6a83f898781..00000000000 --- a/v1/coordinator/config.go +++ /dev/null @@ -1,39 +0,0 @@ -// Package coordinator contains abstractions for writing points, executing statements, -// and accessing meta data. -package coordinator - -import ( - "github.com/influxdata/influxdb/v2/toml" -) - -const ( - // DefaultMaxConcurrentQueries is the maximum number of running queries. - // A value of zero will make the maximum query limit unlimited. - DefaultMaxConcurrentQueries = 0 - - // DefaultMaxSelectPointN is the maximum number of points a SELECT can process. - // A value of zero will make the maximum point count unlimited. - DefaultMaxSelectPointN = 0 - - // DefaultMaxSelectSeriesN is the maximum number of series a SELECT can run. - // A value of zero will make the maximum series count unlimited. - DefaultMaxSelectSeriesN = 0 -) - -// Config represents the configuration for the coordinator service. -type Config struct { - MaxConcurrentQueries int `toml:"max-concurrent-queries"` - LogQueriesAfter toml.Duration `toml:"log-queries-after"` - MaxSelectPointN int `toml:"max-select-point"` - MaxSelectSeriesN int `toml:"max-select-series"` - MaxSelectBucketsN int `toml:"max-select-buckets"` -} - -// NewConfig returns an instance of Config with defaults. -func NewConfig() Config { - return Config{ - MaxConcurrentQueries: DefaultMaxConcurrentQueries, - MaxSelectPointN: DefaultMaxSelectPointN, - MaxSelectSeriesN: DefaultMaxSelectSeriesN, - } -} diff --git a/v1/coordinator/meta_client.go b/v1/coordinator/meta_client.go deleted file mode 100644 index 0ee3acd2134..00000000000 --- a/v1/coordinator/meta_client.go +++ /dev/null @@ -1,36 +0,0 @@ -package coordinator - -import ( - "time" - - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/influxdata/influxql" -) - -// MetaClient is an interface for accessing meta data. -type MetaClient interface { - CreateContinuousQuery(database, name, query string) error - CreateDatabase(name string) (*meta.DatabaseInfo, error) - CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) - CreateRetentionPolicy(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error) - CreateSubscription(database, rp, name, mode string, destinations []string) error - CreateUser(name, password string, admin bool) (meta.User, error) - Database(name string) *meta.DatabaseInfo - Databases() []meta.DatabaseInfo - DropShard(id uint64) error - DropContinuousQuery(database, name string) error - DropDatabase(name string) error - DropRetentionPolicy(database, name string) error - DropSubscription(database, rp, name string) error - DropUser(name string) error - RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) - SetAdminPrivilege(username string, admin bool) error - SetPrivilege(username, database string, p influxql.Privilege) error - ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) - TruncateShardGroups(t time.Time) error - UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error - UpdateUser(name, password string) error - UserPrivilege(username, database string) (*influxql.Privilege, error) - UserPrivileges(username string) (map[string]influxql.Privilege, error) - Users() []meta.UserInfo -} diff --git a/v1/coordinator/meta_client_test.go b/v1/coordinator/meta_client_test.go deleted file mode 100644 index dd780393db5..00000000000 --- a/v1/coordinator/meta_client_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package coordinator_test - -import ( - "time" - - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/influxdata/influxql" -) - -// MetaClient is a mockable implementation of cluster.MetaClient. -type MetaClient struct { - CreateContinuousQueryFn func(database, name, query string) error - CreateDatabaseFn func(name string) (*meta.DatabaseInfo, error) - CreateDatabaseWithRetentionPolicyFn func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) - CreateRetentionPolicyFn func(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error) - CreateSubscriptionFn func(database, rp, name, mode string, destinations []string) error - CreateUserFn func(name, password string, admin bool) (meta.User, error) - DatabaseFn func(name string) *meta.DatabaseInfo - DatabasesFn func() []meta.DatabaseInfo - DataNodeFn func(id uint64) (*meta.NodeInfo, error) - DataNodesFn func() ([]meta.NodeInfo, error) - DeleteDataNodeFn func(id uint64) error - DeleteMetaNodeFn func(id uint64) error - DropContinuousQueryFn func(database, name string) error - DropDatabaseFn func(name string) error - DropRetentionPolicyFn func(database, name string) error - DropSubscriptionFn func(database, rp, name string) error - DropShardFn func(id uint64) error - DropUserFn func(name string) error - MetaNodesFn func() ([]meta.NodeInfo, error) - RetentionPolicyFn func(database, name string) (rpi *meta.RetentionPolicyInfo, err error) - SetAdminPrivilegeFn func(username string, admin bool) error - SetPrivilegeFn func(username, database string, p influxql.Privilege) error - ShardGroupsByTimeRangeFn func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) - TruncateShardGroupsFn func(t time.Time) error - UpdateRetentionPolicyFn func(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error - UpdateUserFn func(name, password string) error - UserPrivilegeFn func(username, database string) (*influxql.Privilege, error) - UserPrivilegesFn func(username string) (map[string]influxql.Privilege, error) - UsersFn func() []meta.UserInfo -} - -func (c *MetaClient) CreateContinuousQuery(database, name, query string) error { - return c.CreateContinuousQueryFn(database, name, query) -} - -func (c *MetaClient) CreateDatabase(name string) (*meta.DatabaseInfo, error) { - return c.CreateDatabaseFn(name) -} - -func (c *MetaClient) CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { - return c.CreateDatabaseWithRetentionPolicyFn(name, spec) -} - -func (c *MetaClient) CreateRetentionPolicy(database string, spec *meta.RetentionPolicySpec, makeDefault bool) (*meta.RetentionPolicyInfo, error) { - return c.CreateRetentionPolicyFn(database, spec, makeDefault) -} - -func (c *MetaClient) DropShard(id uint64) error { - return c.DropShardFn(id) -} - -func (c *MetaClient) CreateSubscription(database, rp, name, mode string, destinations []string) error { - return c.CreateSubscriptionFn(database, rp, name, mode, destinations) -} - -func (c *MetaClient) CreateUser(name, password string, admin bool) (meta.User, error) { - return c.CreateUserFn(name, password, admin) -} - -func (c *MetaClient) Database(name string) *meta.DatabaseInfo { - return c.DatabaseFn(name) -} - -func (c *MetaClient) Databases() []meta.DatabaseInfo { - return c.DatabasesFn() -} - -func (c *MetaClient) DataNode(id uint64) (*meta.NodeInfo, error) { - return c.DataNodeFn(id) -} - -func (c *MetaClient) DataNodes() ([]meta.NodeInfo, error) { - return c.DataNodesFn() -} - -func (c *MetaClient) DeleteDataNode(id uint64) error { - return c.DeleteDataNodeFn(id) -} - -func (c *MetaClient) DeleteMetaNode(id uint64) error { - return c.DeleteMetaNodeFn(id) -} - -func (c *MetaClient) DropContinuousQuery(database, name string) error { - return c.DropContinuousQueryFn(database, name) -} - -func (c *MetaClient) DropDatabase(name string) error { - return c.DropDatabaseFn(name) -} - -func (c *MetaClient) DropRetentionPolicy(database, name string) error { - return c.DropRetentionPolicyFn(database, name) -} - -func (c *MetaClient) DropSubscription(database, rp, name string) error { - return c.DropSubscriptionFn(database, rp, name) -} - -func (c *MetaClient) DropUser(name string) error { - return c.DropUserFn(name) -} - -func (c *MetaClient) MetaNodes() ([]meta.NodeInfo, error) { - return c.MetaNodesFn() -} - -func (c *MetaClient) RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error) { - return c.RetentionPolicyFn(database, name) -} - -func (c *MetaClient) SetAdminPrivilege(username string, admin bool) error { - return c.SetAdminPrivilegeFn(username, admin) -} - -func (c *MetaClient) SetPrivilege(username, database string, p influxql.Privilege) error { - return c.SetPrivilegeFn(username, database, p) -} - -func (c *MetaClient) ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) { - return c.ShardGroupsByTimeRangeFn(database, policy, min, max) -} - -func (c *MetaClient) TruncateShardGroups(t time.Time) error { - return c.TruncateShardGroupsFn(t) -} - -func (c *MetaClient) UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error { - return c.UpdateRetentionPolicyFn(database, name, rpu, makeDefault) -} - -func (c *MetaClient) UpdateUser(name, password string) error { - return c.UpdateUserFn(name, password) -} - -func (c *MetaClient) UserPrivilege(username, database string) (*influxql.Privilege, error) { - return c.UserPrivilegeFn(username, database) -} - -func (c *MetaClient) UserPrivileges(username string) (map[string]influxql.Privilege, error) { - return c.UserPrivilegesFn(username) -} - -func (c *MetaClient) Users() []meta.UserInfo { - return c.UsersFn() -} - -// DefaultMetaClientDatabaseFn returns a single database (db0) with a retention policy. -func DefaultMetaClientDatabaseFn(name string) *meta.DatabaseInfo { - return &meta.DatabaseInfo{ - Name: DefaultDatabase, - - DefaultRetentionPolicy: DefaultRetentionPolicy, - } -} diff --git a/v1/coordinator/points_writer.go b/v1/coordinator/points_writer.go deleted file mode 100644 index 5914abed5c9..00000000000 --- a/v1/coordinator/points_writer.go +++ /dev/null @@ -1,454 +0,0 @@ -package coordinator - -import ( - "context" - "errors" - "fmt" - "sort" - "sync" - "time" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb" - influxdb "github.com/influxdata/influxdb/v2/v1" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -var ( - // ErrTimeout is returned when a write times out. - ErrTimeout = errors.New("timeout") - - // ErrWriteFailed is returned when no writes succeeded. - ErrWriteFailed = errors.New("write failed") -) - -// PointsWriter handles writes across multiple local and remote data nodes. -type PointsWriter struct { - mu sync.RWMutex - closing chan struct{} - WriteTimeout time.Duration - Logger *zap.Logger - - Node *influxdb.Node - - MetaClient interface { - Database(name string) (di *meta.DatabaseInfo) - RetentionPolicy(database, policy string) (*meta.RetentionPolicyInfo, error) - CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) - } - - TSDBStore interface { - CreateShard(ctx context.Context, database, retentionPolicy string, shardID uint64, enabled bool) error - WriteToShard(ctx context.Context, shardID uint64, points []models.Point) error - } - - stats *engineWriteMetrics -} - -// WritePointsRequest represents a request to write point data to the cluster. -type WritePointsRequest struct { - Database string - RetentionPolicy string - Points []models.Point -} - -// AddPoint adds a point to the WritePointRequest with field key 'value' -func (w *WritePointsRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) { - pt, err := models.NewPoint( - name, models.NewTags(tags), map[string]interface{}{"value": value}, timestamp, - ) - if err != nil { - return - } - w.Points = append(w.Points, pt) -} - -// NewPointsWriter returns a new instance of PointsWriter for a node. -func NewPointsWriter(writeTimeout time.Duration, path string) *PointsWriter { - return &PointsWriter{ - closing: make(chan struct{}), - WriteTimeout: writeTimeout, - Logger: zap.NewNop(), - stats: newEngineWriteMetrics(path), - } -} - -// ShardMapping contains a mapping of shards to points. -type ShardMapping struct { - n int - Points map[uint64][]models.Point // The points associated with a shard ID - Shards map[uint64]*meta.ShardInfo // The shards that have been mapped, keyed by shard ID - Dropped []models.Point // Points that were dropped -} - -// NewShardMapping creates an empty ShardMapping. -func NewShardMapping(n int) *ShardMapping { - return &ShardMapping{ - n: n, - Points: map[uint64][]models.Point{}, - Shards: map[uint64]*meta.ShardInfo{}, - } -} - -// MapPoint adds the point to the ShardMapping, associated with the given shardInfo. -func (s *ShardMapping) MapPoint(shardInfo *meta.ShardInfo, p models.Point) { - if cap(s.Points[shardInfo.ID]) < s.n { - s.Points[shardInfo.ID] = make([]models.Point, 0, s.n) - } - s.Points[shardInfo.ID] = append(s.Points[shardInfo.ID], p) - s.Shards[shardInfo.ID] = shardInfo -} - -// Open opens the communication channel with the point writer. -func (w *PointsWriter) Open() error { - w.mu.Lock() - defer w.mu.Unlock() - w.closing = make(chan struct{}) - return nil -} - -// Close closes the communication channel with the point writer. -func (w *PointsWriter) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - if w.closing != nil { - close(w.closing) - } - return nil -} - -// WithLogger sets the Logger on w. -func (w *PointsWriter) WithLogger(log *zap.Logger) { - w.Logger = log.With(zap.String("service", "write")) -} - -var globalPointsWriteMetrics *writeMetrics = newWriteMetrics() - -type writeMetrics struct { - // labels: type: requested,ok,dropped,err - pointsWriteRequested *prometheus.HistogramVec - pointsWriteOk *prometheus.HistogramVec - pointsWriteDropped *prometheus.HistogramVec - pointsWriteErr *prometheus.HistogramVec - timeout *prometheus.CounterVec -} - -// PrometheusCollectors returns all prometheus metrics for the tsm1 package. -func PrometheusCollectors() []prometheus.Collector { - return []prometheus.Collector{ - globalPointsWriteMetrics.pointsWriteRequested, - globalPointsWriteMetrics.pointsWriteOk, - globalPointsWriteMetrics.pointsWriteDropped, - globalPointsWriteMetrics.pointsWriteErr, - globalPointsWriteMetrics.timeout, - } -} - -const namespace = "storage" -const writerSubsystem = "writer" - -func newWriteMetrics() *writeMetrics { - labels := []string{"path"} - writeBuckets := []float64{10, 100, 1000, 10000, 100000} - return &writeMetrics{ - pointsWriteRequested: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: writerSubsystem, - Name: "req_points", - Help: "Histogram of number of points requested to be written", - Buckets: writeBuckets, - }, labels), - pointsWriteOk: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: writerSubsystem, - Name: "ok_points", - Help: "Histogram of number of points in successful shard write requests", - Buckets: writeBuckets, - }, labels), - pointsWriteDropped: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: writerSubsystem, - Name: "dropped_points", - Help: "Histogram of number of points dropped due to partial writes", - Buckets: writeBuckets, - }, labels), - pointsWriteErr: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: writerSubsystem, - Name: "err_points", - Help: "Histogram of number of points in errored shard write requests", - Buckets: writeBuckets, - }, labels), - timeout: prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespace, - Subsystem: writerSubsystem, - Name: "timeouts", - Help: "Number of shard write request timeouts", - ConstLabels: nil, - }, labels), - } -} - -type engineWriteMetrics struct { - pointsWriteRequested prometheus.Observer - pointsWriteOk prometheus.Observer - pointsWriteDropped prometheus.Observer - pointsWriteErr prometheus.Observer - timeout prometheus.Counter -} - -func newEngineWriteMetrics(path string) *engineWriteMetrics { - return &engineWriteMetrics{ - pointsWriteRequested: globalPointsWriteMetrics.pointsWriteRequested.With(prometheus.Labels{"path": path}), - pointsWriteOk: globalPointsWriteMetrics.pointsWriteOk.With(prometheus.Labels{"path": path}), - pointsWriteDropped: globalPointsWriteMetrics.pointsWriteDropped.With(prometheus.Labels{"path": path}), - pointsWriteErr: globalPointsWriteMetrics.pointsWriteErr.With(prometheus.Labels{"path": path}), - timeout: globalPointsWriteMetrics.timeout.With(prometheus.Labels{"path": path}), - } -} - -// MapShards maps the points contained in wp to a ShardMapping. If a point -// maps to a shard group or shard that does not currently exist, it will be -// created before returning the mapping. -func (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error) { - rp, err := w.MetaClient.RetentionPolicy(wp.Database, wp.RetentionPolicy) - if err != nil { - return nil, err - } else if rp == nil { - return nil, influxdb.ErrRetentionPolicyNotFound(wp.RetentionPolicy) - } - - // Holds all the shard groups and shards that are required for writes. - list := sgList{items: make(meta.ShardGroupInfos, 0, 8)} - min := time.Unix(0, models.MinNanoTime) - if rp.Duration > 0 { - min = time.Now().Add(-rp.Duration) - } - - for _, p := range wp.Points { - // Either the point is outside the scope of the RP, or we already have - // a suitable shard group for the point. - if p.Time().Before(min) || list.Covers(p.Time()) { - continue - } - - // No shard groups overlap with the point's time, so we will create - // a new shard group for this point. - sg, err := w.MetaClient.CreateShardGroup(wp.Database, wp.RetentionPolicy, p.Time()) - if err != nil { - return nil, err - } - - if sg == nil { - return nil, errors.New("nil shard group") - } - list.Add(*sg) - } - - mapping := NewShardMapping(len(wp.Points)) - for _, p := range wp.Points { - sg := list.ShardGroupAt(p.Time()) - if sg == nil { - // We didn't create a shard group because the point was outside the - // scope of the RP. - mapping.Dropped = append(mapping.Dropped, p) - continue - } - - sh := sg.ShardFor(p) - mapping.MapPoint(&sh, p) - } - - return mapping, nil -} - -// sgList is a wrapper around a meta.ShardGroupInfos where we can also check -// if a given time is covered by any of the shard groups in the list. -type sgList struct { - items meta.ShardGroupInfos - - // needsSort indicates if items has been modified without a sort operation. - needsSort bool - - // earliest is the last begin time of any item in items. - earliest time.Time - - // latest is the greatest end time of any item in items. - latest time.Time -} - -func (l sgList) Covers(t time.Time) bool { - if len(l.items) == 0 { - return false - } - return l.ShardGroupAt(t) != nil -} - -// ShardGroupAt attempts to find a shard group that could contain a point -// at the given time. -// -// Shard groups are sorted first according to end time, and then according -// to start time. Therefore, if there are multiple shard groups that match -// this point's time they will be preferred in this order: -// -// - a shard group with the earliest end time; -// - (assuming identical end times) the shard group with the earliest start time. -func (l sgList) ShardGroupAt(t time.Time) *meta.ShardGroupInfo { - if l.items.Len() == 0 { - return nil - } - - // find the earliest shardgroup that could contain this point using binary search. - if l.needsSort { - sort.Sort(l.items) - l.needsSort = false - } - idx := sort.Search(l.items.Len(), func(i int) bool { return l.items[i].EndTime.After(t) }) - - // Check if sort.Search actually found the proper shard. It feels like we should also - // be checking l.items[idx].EndTime, but sort.Search was looking at that field for us. - if idx == l.items.Len() || t.Before(l.items[idx].StartTime) { - // This could mean we are looking for a time not in the list, or we have - // overlaping shards. Overlapping shards do not work with binary searches - // on 1d arrays. You have to use an interval tree, but that's a lot of - // work for what is hopefully a rare event. Instead, we'll check if t - // should be in l, and perform a linear search if it is. This way we'll - // do the correct thing, it may just take a little longer. If we don't - // do this, then we may non-silently drop writes we should have accepted. - - if t.Before(l.earliest) || t.After(l.latest) { - // t is not in range, we can avoid going through the linear search. - return nil - } - - // Oh no, we've probably got overlapping shards. Perform a linear search. - for idx = 0; idx < l.items.Len(); idx++ { - if l.items[idx].Contains(t) { - // Found it! - break - } - } - if idx == l.items.Len() { - // We did not find a shard which contained t. This is very strange. - return nil - } - } - - return &l.items[idx] -} - -// Add appends a shard group to the list, updating the earliest/latest times of the list if needed. -func (l *sgList) Add(sgi meta.ShardGroupInfo) { - l.items = append(l.items, sgi) - l.needsSort = true - - // Update our earliest and latest times for l.items - if l.earliest.IsZero() || l.earliest.After(sgi.StartTime) { - l.earliest = sgi.StartTime - } - if l.latest.IsZero() || l.latest.Before(sgi.EndTime) { - l.latest = sgi.EndTime - } -} - -// WritePoints writes the data to the underlying storage. consistencyLevel and user are only used for clustered scenarios -func (w *PointsWriter) WritePoints( - ctx context.Context, - database, retentionPolicy string, - consistencyLevel models.ConsistencyLevel, - user meta.User, - points []models.Point, -) error { - return w.WritePointsPrivileged(ctx, database, retentionPolicy, consistencyLevel, points) -} - -// WritePointsPrivileged writes the data to the underlying storage, consistencyLevel is only used for clustered scenarios -func (w *PointsWriter) WritePointsPrivileged( - ctx context.Context, - database, retentionPolicy string, - consistencyLevel models.ConsistencyLevel, - points []models.Point, -) error { - w.stats.pointsWriteRequested.Observe(float64(len(points))) - - if retentionPolicy == "" { - db := w.MetaClient.Database(database) - if db == nil { - return influxdb.ErrDatabaseNotFound(database) - } - retentionPolicy = db.DefaultRetentionPolicy - } - - shardMappings, err := w.MapShards(&WritePointsRequest{Database: database, RetentionPolicy: retentionPolicy, Points: points}) - if err != nil { - return err - } - - // Write each shard in it's own goroutine and return as soon as one fails. - ch := make(chan error, len(shardMappings.Points)) - for shardID, points := range shardMappings.Points { - go func(shard *meta.ShardInfo, database, retentionPolicy string, points []models.Point) { - err := w.writeToShard(ctx, shard, database, retentionPolicy, points) - if err == nil { - w.stats.pointsWriteOk.Observe(float64(len(points))) - } else { - w.stats.pointsWriteErr.Observe(float64(len(points))) - } - if err == tsdb.ErrShardDeletion { - err = tsdb.PartialWriteError{Reason: fmt.Sprintf("shard %d is pending deletion", shard.ID), Dropped: len(points)} - } - ch <- err - }(shardMappings.Shards[shardID], database, retentionPolicy, points) - } - - if len(shardMappings.Dropped) > 0 { - w.stats.pointsWriteDropped.Observe(float64(len(shardMappings.Dropped))) - err = tsdb.PartialWriteError{Reason: "points beyond retention policy", Dropped: len(shardMappings.Dropped)} - } - timeout := time.NewTimer(w.WriteTimeout) - defer timeout.Stop() - for range shardMappings.Points { - select { - case <-w.closing: - return ErrWriteFailed - case <-timeout.C: - w.stats.timeout.Inc() - // return timeout error to caller - return ErrTimeout - case err := <-ch: - if err != nil { - return err - } - } - } - return err -} - -// writeToShards writes points to a shard. -func (w *PointsWriter) writeToShard(ctx context.Context, shard *meta.ShardInfo, database, retentionPolicy string, points []models.Point) error { - err := w.TSDBStore.WriteToShard(ctx, shard.ID, points) - if err == nil { - return nil - } - - // Except tsdb.ErrShardNotFound no error can be handled here - if err != tsdb.ErrShardNotFound { - return err - } - - // If we've written to shard that should exist on the current node, but the store has - // not actually created this shard, tell it to create it and retry the write - if err = w.TSDBStore.CreateShard(ctx, database, retentionPolicy, shard.ID, true); err != nil { - w.Logger.Warn("Write failed creating shard", zap.Uint64("shard", shard.ID), zap.Error(err)) - return err - } - - if err = w.TSDBStore.WriteToShard(ctx, shard.ID, points); err != nil { - w.Logger.Info("Write failed", zap.Uint64("shard", shard.ID), zap.Error(err)) - return err - } - - return nil -} diff --git a/v1/coordinator/points_writer_internal_test.go b/v1/coordinator/points_writer_internal_test.go deleted file mode 100644 index e3243dfe211..00000000000 --- a/v1/coordinator/points_writer_internal_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package coordinator - -import ( - "testing" - "time" - - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/stretchr/testify/require" -) - -func TestSgList_ShardGroupAt(t *testing.T) { - base := time.Date(2016, 10, 19, 0, 0, 0, 0, time.UTC) - day := func(n int) time.Time { - return base.Add(time.Duration(24*n) * time.Hour) - } - - items := meta.ShardGroupInfos{ - {ID: 1, StartTime: day(0), EndTime: day(1)}, - {ID: 2, StartTime: day(1), EndTime: day(2)}, - {ID: 3, StartTime: day(2), EndTime: day(3)}, - // SG day 3 to day 4 missing... - {ID: 4, StartTime: day(4), EndTime: day(5)}, - {ID: 5, StartTime: day(5), EndTime: day(6)}, - } - var list sgList - for _, i := range items { - list.Add(i) - } - - examples := []struct { - T time.Time - ShardGroupID uint64 // 0 will indicate we don't expect a shard group - }{ - {T: base.Add(-time.Minute), ShardGroupID: 0}, // Before any SG - {T: day(0), ShardGroupID: 1}, - {T: day(0).Add(time.Minute), ShardGroupID: 1}, - {T: day(1), ShardGroupID: 2}, - {T: day(3).Add(time.Minute), ShardGroupID: 0}, // No matching SG - {T: day(5).Add(time.Hour), ShardGroupID: 5}, - } - - for i, example := range examples { - sg := list.ShardGroupAt(example.T) - var id uint64 - if sg != nil { - id = sg.ID - } - - if got, exp := id, example.ShardGroupID; got != exp { - t.Errorf("[Example %d] got %v, expected %v", i+1, got, exp) - } - } -} - -func TestSgList_ShardGroupAtOverlapping(t *testing.T) { - base := time.Date(2016, 10, 19, 0, 0, 0, 0, time.UTC) - hour := func(n int) time.Time { - return base.Add(time.Duration(n) * time.Hour) - } - day := func(n int) time.Time { - return base.Add(time.Duration(24*n) * time.Hour) - } - - items := meta.ShardGroupInfos{ - {ID: 1, StartTime: hour(5), EndTime: hour(6)}, - {ID: 2, StartTime: hour(6), EndTime: hour(7)}, - // Day-long shard overlaps with the two hour-long shards. - {ID: 3, StartTime: base, EndTime: day(1)}, - } - var list sgList - for _, i := range items { - list.Add(i) - } - - examples := []struct { - T time.Time - ShardGroupID uint64 // 0 will indicate we don't expect a shard group - }{ - {T: base.Add(-time.Minute), ShardGroupID: 0}, // Before any SG - {T: base, ShardGroupID: 3}, - {T: hour(5), ShardGroupID: 1}, - {T: hour(7).Add(-time.Minute), ShardGroupID: 2}, - {T: hour(8), ShardGroupID: 3}, - {T: day(2), ShardGroupID: 0}, // No matching SG - } - - for _, example := range examples { - t.Run(example.T.String(), func(t *testing.T) { - sg := list.ShardGroupAt(example.T) - var id uint64 - if sg != nil { - id = sg.ID - } - require.Equal(t, example.ShardGroupID, id) - }) - } -} diff --git a/v1/coordinator/points_writer_test.go b/v1/coordinator/points_writer_test.go deleted file mode 100644 index 3a2d2728526..00000000000 --- a/v1/coordinator/points_writer_test.go +++ /dev/null @@ -1,538 +0,0 @@ -package coordinator_test - -import ( - "context" - "fmt" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/tsdb" - influxdb "github.com/influxdata/influxdb/v2/v1" - "github.com/influxdata/influxdb/v2/v1/coordinator" - "github.com/influxdata/influxdb/v2/v1/services/meta" -) - -// TODO(benbjohnson): Rewrite tests to use cluster_test.MetaClient. - -// Ensures the points writer maps a single point to a single shard. -func TestPointsWriter_MapShards_One(t *testing.T) { - ms := PointsWriterMetaClient{} - rp := NewRetentionPolicy("myp", time.Hour, 3) - - ms.NodeIDFn = func() uint64 { return 1 } - ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { - return rp, nil - } - - ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { - return &rp.ShardGroups[0], nil - } - - c := coordinator.PointsWriter{MetaClient: ms} - pr := &coordinator.WritePointsRequest{ - Database: "mydb", - RetentionPolicy: "myrp", - } - pr.AddPoint("cpu", 1.0, time.Now(), nil) - - var ( - shardMappings *coordinator.ShardMapping - err error - ) - if shardMappings, err = c.MapShards(pr); err != nil { - t.Fatalf("unexpected an error: %v", err) - } - - if exp := 1; len(shardMappings.Points) != exp { - t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) - } -} - -// Ensures the points writer maps to a new shard group when the shard duration -// is changed. -func TestPointsWriter_MapShards_AlterShardDuration(t *testing.T) { - ms := PointsWriterMetaClient{} - rp := NewRetentionPolicy("myp", time.Hour, 3) - - ms.NodeIDFn = func() uint64 { return 1 } - ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { - return rp, nil - } - - var ( - i int - now = time.Now() - ) - - ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { - sg := []meta.ShardGroupInfo{ - meta.ShardGroupInfo{ - Shards: make([]meta.ShardInfo, 1), - StartTime: now, EndTime: now.Add(rp.Duration).Add(-1), - }, - meta.ShardGroupInfo{ - Shards: make([]meta.ShardInfo, 1), - StartTime: now.Add(time.Hour), EndTime: now.Add(3 * time.Hour).Add(rp.Duration).Add(-1), - }, - }[i] - i++ - return &sg, nil - } - - c := coordinator.NewPointsWriter(time.Second, "") - c.MetaClient = ms - - pr := &coordinator.WritePointsRequest{ - Database: "mydb", - RetentionPolicy: "myrp", - } - pr.AddPoint("cpu", 1.0, now, nil) - pr.AddPoint("cpu", 2.0, now.Add(2*time.Second), nil) - - var ( - shardMappings *coordinator.ShardMapping - err error - ) - if shardMappings, err = c.MapShards(pr); err != nil { - t.Fatalf("unexpected an error: %v", err) - } - - if got, exp := len(shardMappings.Points[0]), 2; got != exp { - t.Fatalf("got %d point(s), expected %d", got, exp) - } - - if got, exp := len(shardMappings.Shards), 1; got != exp { - t.Errorf("got %d shard(s), expected %d", got, exp) - } - - // Now we alter the retention policy duration. - rp.ShardGroupDuration = 3 * time.Hour - - pr = &coordinator.WritePointsRequest{ - Database: "mydb", - RetentionPolicy: "myrp", - } - pr.AddPoint("cpu", 1.0, now.Add(2*time.Hour), nil) - - // Point is beyond previous shard group so a new shard group should be - // created. - if _, err = c.MapShards(pr); err != nil { - t.Fatalf("unexpected an error: %v", err) - } - - // We can check value of i since it's only incremeneted when a shard group - // is created. - if got, exp := i, 2; got != exp { - t.Fatal("new shard group was not created, expected it to be") - } -} - -// Ensures the points writer maps a multiple points across shard group boundaries. -func TestPointsWriter_MapShards_Multiple(t *testing.T) { - ms := PointsWriterMetaClient{} - rp := NewRetentionPolicy("myp", time.Hour, 3) - rp.ShardGroupDuration = time.Hour - AttachShardGroupInfo(rp, []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }) - AttachShardGroupInfo(rp, []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }) - - ms.NodeIDFn = func() uint64 { return 1 } - ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { - return rp, nil - } - - ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { - for i, sg := range rp.ShardGroups { - if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) { - return &rp.ShardGroups[i], nil - } - } - panic("should not get here") - } - - c := coordinator.NewPointsWriter(time.Second, "") - c.MetaClient = ms - defer c.Close() - pr := &coordinator.WritePointsRequest{ - Database: "mydb", - RetentionPolicy: "myrp", - } - - // Three points that range over the shardGroup duration (1h) and should map to two - // distinct shards - pr.AddPoint("cpu", 1.0, time.Now(), nil) - pr.AddPoint("cpu", 2.0, time.Now().Add(time.Hour), nil) - pr.AddPoint("cpu", 3.0, time.Now().Add(time.Hour+time.Second), nil) - - var ( - shardMappings *coordinator.ShardMapping - err error - ) - if shardMappings, err = c.MapShards(pr); err != nil { - t.Fatalf("unexpected an error: %v", err) - } - - if exp := 2; len(shardMappings.Points) != exp { - t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) - } - - for _, points := range shardMappings.Points { - // First shard should have 1 point w/ first point added - if len(points) == 1 && points[0].Time() != pr.Points[0].Time() { - t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[0].Time()) - } - - // Second shard should have the last two points added - if len(points) == 2 && points[0].Time() != pr.Points[1].Time() { - t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[1].Time()) - } - - if len(points) == 2 && points[1].Time() != pr.Points[2].Time() { - t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[1].Time(), pr.Points[2].Time()) - } - } -} - -// Ensures the points writer does not map points beyond the retention policy. -func TestPointsWriter_MapShards_Invalid(t *testing.T) { - ms := PointsWriterMetaClient{} - rp := NewRetentionPolicy("myp", time.Hour, 3) - - ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { - return rp, nil - } - - ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { - return &rp.ShardGroups[0], nil - } - - c := coordinator.NewPointsWriter(time.Second, "") - c.MetaClient = ms - defer c.Close() - pr := &coordinator.WritePointsRequest{ - Database: "mydb", - RetentionPolicy: "myrp", - } - - // Add a point that goes beyond the current retention policy. - pr.AddPoint("cpu", 1.0, time.Now().Add(-2*time.Hour), nil) - - var ( - shardMappings *coordinator.ShardMapping - err error - ) - if shardMappings, err = c.MapShards(pr); err != nil { - t.Fatalf("unexpected an error: %v", err) - } - - if got, exp := len(shardMappings.Points), 0; got != exp { - t.Errorf("MapShards() len mismatch. got %v, exp %v", got, exp) - } - - if got, exp := len(shardMappings.Dropped), 1; got != exp { - t.Fatalf("MapShard() dropped mismatch: got %v, exp %v", got, exp) - } -} - -func TestPointsWriter_WritePoints(t *testing.T) { - tests := []struct { - name string - database string - retentionPolicy string - - // the responses returned by each shard write call. node ID 1 = pos 0 - err []error - expErr error - }{ - { - name: "write one success", - database: "mydb", - retentionPolicy: "myrp", - expErr: nil, - }, - - // Write to non-existent database - { - name: "write to non-existent database", - database: "doesnt_exist", - retentionPolicy: "", - expErr: fmt.Errorf("database not found: doesnt_exist"), - }, - } - - for _, test := range tests { - - pr := &coordinator.WritePointsRequest{ - Database: test.database, - RetentionPolicy: test.retentionPolicy, - } - - // Ensure that the test shard groups are created before the points - // are created. - ms := NewPointsWriterMetaClient() - - // Three points that range over the shardGroup duration (1h) and should map to two - // distinct shards - pr.AddPoint("cpu", 1.0, time.Now(), nil) - pr.AddPoint("cpu", 2.0, time.Now().Add(time.Hour), nil) - pr.AddPoint("cpu", 3.0, time.Now().Add(time.Hour+time.Second), nil) - - // copy to prevent data race - sm := coordinator.NewShardMapping(16) - sm.MapPoint( - &meta.ShardInfo{ID: uint64(1), Owners: []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }}, - pr.Points[0]) - sm.MapPoint( - &meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }}, - pr.Points[1]) - sm.MapPoint( - &meta.ShardInfo{ID: uint64(2), Owners: []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }}, - pr.Points[2]) - - // Local coordinator.Node ShardWriter - // lock on the write increment since these functions get called in parallel - var mu sync.Mutex - - store := &fakeStore{ - WriteFn: func(_ context.Context, shardID uint64, points []models.Point) error { - mu.Lock() - defer mu.Unlock() - return nil - }, - } - - ms.DatabaseFn = func(database string) *meta.DatabaseInfo { - return nil - } - ms.NodeIDFn = func() uint64 { return 1 } - - c := coordinator.NewPointsWriter(time.Second, "") - c.MetaClient = ms - c.TSDBStore = store - c.Node = &influxdb.Node{ID: 1} - - c.Open() - defer c.Close() - - err := c.WritePointsPrivileged(context.Background(), pr.Database, pr.RetentionPolicy, models.ConsistencyLevelOne, pr.Points) - if err == nil && test.expErr != nil { - t.Errorf("PointsWriter.WritePointsPrivileged(): '%s' error: got %v, exp %v", test.name, err, test.expErr) - } - - if err != nil && test.expErr == nil { - t.Errorf("PointsWriter.WritePointsPrivileged(): '%s' error: got %v, exp %v", test.name, err, test.expErr) - } - if err != nil && test.expErr != nil && err.Error() != test.expErr.Error() { - t.Errorf("PointsWriter.WritePointsPrivileged(): '%s' error: got %v, exp %v", test.name, err, test.expErr) - } - } -} - -func TestPointsWriter_WritePoints_Dropped(t *testing.T) { - pr := &coordinator.WritePointsRequest{ - Database: "mydb", - RetentionPolicy: "myrp", - } - - // Ensure that the test shard groups are created before the points - // are created. - ms := NewPointsWriterMetaClient() - - // Three points that range over the shardGroup duration (1h) and should map to two - // distinct shards - pr.AddPoint("cpu", 1.0, time.Now().Add(-24*time.Hour), nil) - - // copy to prevent data race - sm := coordinator.NewShardMapping(16) - - // ShardMapper dropped this point - sm.Dropped = append(sm.Dropped, pr.Points[0]) - - // Local coordinator.Node ShardWriter - // lock on the write increment since these functions get called in parallel - var mu sync.Mutex - - store := &fakeStore{ - WriteFn: func(_ context.Context, shardID uint64, points []models.Point) error { - mu.Lock() - defer mu.Unlock() - return nil - }, - } - - ms.DatabaseFn = func(database string) *meta.DatabaseInfo { - return nil - } - ms.NodeIDFn = func() uint64 { return 1 } - - c := coordinator.NewPointsWriter(time.Second, "") - c.MetaClient = ms - c.TSDBStore = store - c.Node = &influxdb.Node{ID: 1} - - c.Open() - defer c.Close() - - err := c.WritePointsPrivileged(context.Background(), pr.Database, pr.RetentionPolicy, models.ConsistencyLevelOne, pr.Points) - if _, ok := err.(tsdb.PartialWriteError); !ok { - t.Errorf("PointsWriter.WritePoints(): got %v, exp %v", err, tsdb.PartialWriteError{}) - } -} - -var shardID uint64 - -type fakeStore struct { - WriteFn func(ctx context.Context, shardID uint64, points []models.Point) error - CreateShardfn func(ctx context.Context, database, retentionPolicy string, shardID uint64, enabled bool) error -} - -func (f *fakeStore) WriteToShard(ctx context.Context, shardID uint64, points []models.Point) error { - return f.WriteFn(ctx, shardID, points) -} - -func (f *fakeStore) CreateShard(ctx context.Context, database, retentionPolicy string, shardID uint64, enabled bool) error { - return f.CreateShardfn(ctx, database, retentionPolicy, shardID, enabled) -} - -func NewPointsWriterMetaClient() *PointsWriterMetaClient { - ms := &PointsWriterMetaClient{} - rp := NewRetentionPolicy("myp", time.Hour, 3) - AttachShardGroupInfo(rp, []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }) - AttachShardGroupInfo(rp, []meta.ShardOwner{ - {NodeID: 1}, - {NodeID: 2}, - {NodeID: 3}, - }) - - ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { - return rp, nil - } - - ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { - for i, sg := range rp.ShardGroups { - if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) { - return &rp.ShardGroups[i], nil - } - } - panic("should not get here") - } - return ms -} - -type PointsWriterMetaClient struct { - NodeIDFn func() uint64 - RetentionPolicyFn func(database, name string) (*meta.RetentionPolicyInfo, error) - CreateShardGroupIfNotExistsFn func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) - DatabaseFn func(database string) *meta.DatabaseInfo - ShardOwnerFn func(shardID uint64) (string, string, *meta.ShardGroupInfo) -} - -func (m PointsWriterMetaClient) NodeID() uint64 { return m.NodeIDFn() } - -func (m PointsWriterMetaClient) RetentionPolicy(database, name string) (*meta.RetentionPolicyInfo, error) { - return m.RetentionPolicyFn(database, name) -} - -func (m PointsWriterMetaClient) CreateShardGroup(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { - return m.CreateShardGroupIfNotExistsFn(database, policy, timestamp) -} - -func (m PointsWriterMetaClient) Database(database string) *meta.DatabaseInfo { - return m.DatabaseFn(database) -} - -func (m PointsWriterMetaClient) ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) { - return m.ShardOwnerFn(shardID) -} - -type Subscriber struct { - PointsFn func() chan<- *coordinator.WritePointsRequest -} - -func (s Subscriber) Points() chan<- *coordinator.WritePointsRequest { - return s.PointsFn() -} - -func NewRetentionPolicy(name string, duration time.Duration, nodeCount int) *meta.RetentionPolicyInfo { - shards := []meta.ShardInfo{} - owners := []meta.ShardOwner{} - for i := 1; i <= nodeCount; i++ { - owners = append(owners, meta.ShardOwner{NodeID: uint64(i)}) - } - - // each node is fully replicated with each other - shards = append(shards, meta.ShardInfo{ - ID: nextShardID(), - Owners: owners, - }) - - start := time.Now() - rp := &meta.RetentionPolicyInfo{ - Name: "myrp", - ReplicaN: nodeCount, - Duration: duration, - ShardGroupDuration: duration, - ShardGroups: []meta.ShardGroupInfo{ - meta.ShardGroupInfo{ - ID: nextShardID(), - StartTime: start, - EndTime: start.Add(duration).Add(-1), - Shards: shards, - }, - }, - } - return rp -} - -func AttachShardGroupInfo(rp *meta.RetentionPolicyInfo, owners []meta.ShardOwner) { - var startTime, endTime time.Time - if len(rp.ShardGroups) == 0 { - startTime = time.Now() - } else { - startTime = rp.ShardGroups[len(rp.ShardGroups)-1].StartTime.Add(rp.ShardGroupDuration) - } - endTime = startTime.Add(rp.ShardGroupDuration).Add(-1) - - sh := meta.ShardGroupInfo{ - ID: uint64(len(rp.ShardGroups) + 1), - StartTime: startTime, - EndTime: endTime, - Shards: []meta.ShardInfo{ - meta.ShardInfo{ - ID: nextShardID(), - Owners: owners, - }, - }, - } - rp.ShardGroups = append(rp.ShardGroups, sh) -} - -func nextShardID() uint64 { - return atomic.AddUint64(&shardID, 1) -} diff --git a/v1/coordinator/shard_mapper.go b/v1/coordinator/shard_mapper.go deleted file mode 100644 index 97fac2b584d..00000000000 --- a/v1/coordinator/shard_mapper.go +++ /dev/null @@ -1,268 +0,0 @@ -package coordinator - -import ( - "context" - "fmt" - "io" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/influxdata/influxql" -) - -// IteratorCreator is an interface that combines mapping fields and creating iterators. -type IteratorCreator interface { - query.IteratorCreator - influxql.FieldMapper - io.Closer -} - -// LocalShardMapper implements a ShardMapper for local shards. -type LocalShardMapper struct { - MetaClient interface { - ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) - } - - TSDBStore interface { - ShardGroup(ids []uint64) tsdb.ShardGroup - } - - DBRP influxdb.DBRPMappingService -} - -// MapShards maps the sources to the appropriate shards into an IteratorCreator. -func (e *LocalShardMapper) MapShards(ctx context.Context, sources influxql.Sources, t influxql.TimeRange, opt query.SelectOptions) (query.ShardGroup, error) { - a := &LocalShardMapping{ - ShardMap: make(map[Source]tsdb.ShardGroup), - } - - tmin := time.Unix(0, t.MinTimeNano()) - tmax := time.Unix(0, t.MaxTimeNano()) - if err := e.mapShards(ctx, a, sources, tmin, tmax, opt.OrgID); err != nil { - return nil, err - } - a.MinTime, a.MaxTime = tmin, tmax - return a, nil -} - -func (e *LocalShardMapper) mapShards(ctx context.Context, a *LocalShardMapping, sources influxql.Sources, tmin, tmax time.Time, orgID platform.ID) error { - for _, s := range sources { - switch s := s.(type) { - case *influxql.Measurement: - source := Source{ - Database: s.Database, - RetentionPolicy: s.RetentionPolicy, - } - // Retrieve the list of shards for this database. This list of - // shards is always the same regardless of which measurement we are - // using. - if _, ok := a.ShardMap[source]; !ok { - // lookup bucket and create info - mappings, _, err := e.DBRP.FindMany(ctx, influxdb.DBRPMappingFilter{ - OrgID: &orgID, - Database: &s.Database, - RetentionPolicy: &s.RetentionPolicy, - Virtual: nil, - }) - if err != nil { - return fmt.Errorf("finding DBRP mappings: %v", err) - } else if len(mappings) == 0 { - return fmt.Errorf("retention policy not found: %s", s.RetentionPolicy) - } else if len(mappings) != 1 { - return fmt.Errorf("finding DBRP mappings: expected 1, found %d", len(mappings)) - } - - mapping := mappings[0] - groups, err := e.MetaClient.ShardGroupsByTimeRange(mapping.BucketID.String(), meta.DefaultRetentionPolicyName, tmin, tmax) - if err != nil { - return err - } - - if len(groups) == 0 { - a.ShardMap[source] = nil - continue - } - - shardIDs := make([]uint64, 0, len(groups[0].Shards)*len(groups)) - for _, g := range groups { - for _, si := range g.Shards { - shardIDs = append(shardIDs, si.ID) - } - } - a.ShardMap[source] = e.TSDBStore.ShardGroup(shardIDs) - } - case *influxql.SubQuery: - if err := e.mapShards(ctx, a, s.Statement.Sources, tmin, tmax, orgID); err != nil { - return err - } - } - } - return nil -} - -// ShardMapper maps data sources to a list of shard information. -type LocalShardMapping struct { - ShardMap map[Source]tsdb.ShardGroup - - // MinTime is the minimum time that this shard mapper will allow. - // Any attempt to use a time before this one will automatically result in using - // this time instead. - MinTime time.Time - - // MaxTime is the maximum time that this shard mapper will allow. - // Any attempt to use a time after this one will automatically result in using - // this time instead. - MaxTime time.Time -} - -func (a *LocalShardMapping) FieldDimensions(ctx context.Context, m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { - source := Source{ - Database: m.Database, - RetentionPolicy: m.RetentionPolicy, - } - - sg := a.ShardMap[source] - if sg == nil { - return - } - - var measurements []string - if m.Regex != nil { - measurements = sg.MeasurementsByRegex(m.Regex.Val) - } else { - measurements = []string{m.Name} - } - - f, d, err := sg.FieldDimensions(measurements) - if err != nil { - return nil, nil, err - } - - return f, d, nil -} - -func (a *LocalShardMapping) MapType(ctx context.Context, m *influxql.Measurement, field string) influxql.DataType { - source := Source{ - Database: m.Database, - RetentionPolicy: m.RetentionPolicy, - } - - sg := a.ShardMap[source] - if sg == nil { - return influxql.Unknown - } - - var names []string - if m.Regex != nil { - names = sg.MeasurementsByRegex(m.Regex.Val) - } else { - names = []string{m.Name} - } - - var typ influxql.DataType - for _, name := range names { - if m.SystemIterator != "" { - name = m.SystemIterator - } - t := sg.MapType(name, field) - if typ.LessThan(t) { - typ = t - } - } - return typ -} - -func (a *LocalShardMapping) CreateIterator(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { - source := Source{ - Database: m.Database, - RetentionPolicy: m.RetentionPolicy, - } - - sg := a.ShardMap[source] - if sg == nil { - return nil, nil - } - - // Override the time constraints if they don't match each other. - if !a.MinTime.IsZero() && opt.StartTime < a.MinTime.UnixNano() { - opt.StartTime = a.MinTime.UnixNano() - } - if !a.MaxTime.IsZero() && opt.EndTime > a.MaxTime.UnixNano() { - opt.EndTime = a.MaxTime.UnixNano() - } - - if m.Regex != nil { - measurements := sg.MeasurementsByRegex(m.Regex.Val) - inputs := make([]query.Iterator, 0, len(measurements)) - if err := func() error { - // Create a Measurement for each returned matching measurement value - // from the regex. - for _, measurement := range measurements { - mm := m.Clone() - mm.Name = measurement // Set the name to this matching regex value. - input, err := sg.CreateIterator(ctx, mm, opt) - if err != nil { - return err - } - inputs = append(inputs, input) - } - return nil - }(); err != nil { - query.Iterators(inputs).Close() - return nil, err - } - - return query.Iterators(inputs).Merge(opt) - } - return sg.CreateIterator(ctx, m, opt) -} - -func (a *LocalShardMapping) IteratorCost(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.IteratorCost, error) { - source := Source{ - Database: m.Database, - RetentionPolicy: m.RetentionPolicy, - } - - sg := a.ShardMap[source] - if sg == nil { - return query.IteratorCost{}, nil - } - - // Override the time constraints if they don't match each other. - if !a.MinTime.IsZero() && opt.StartTime < a.MinTime.UnixNano() { - opt.StartTime = a.MinTime.UnixNano() - } - if !a.MaxTime.IsZero() && opt.EndTime > a.MaxTime.UnixNano() { - opt.EndTime = a.MaxTime.UnixNano() - } - - if m.Regex != nil { - var costs query.IteratorCost - measurements := sg.MeasurementsByRegex(m.Regex.Val) - for _, measurement := range measurements { - cost, err := sg.IteratorCost(ctx, measurement, opt) - if err != nil { - return query.IteratorCost{}, err - } - costs = costs.Combine(cost) - } - return costs, nil - } - return sg.IteratorCost(ctx, m.Name, opt) -} - -// Close clears out the list of mapped shards. -func (a *LocalShardMapping) Close() error { - a.ShardMap = nil - return nil -} - -// Source contains the database and retention policy source for data. -type Source struct { - Database string - RetentionPolicy string -} diff --git a/v1/coordinator/shard_mapper_test.go b/v1/coordinator/shard_mapper_test.go deleted file mode 100644 index 29c5c0a3c9a..00000000000 --- a/v1/coordinator/shard_mapper_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package coordinator_test - -import ( - "context" - "github.com/influxdata/influx-cli/v2/api" - "reflect" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/dbrp/mocks" - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/internal" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/v1/coordinator" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/influxdata/influxql" -) - -func TestLocalShardMapper(t *testing.T) { - orgID := platform.ID(0xff00) - bucketID := platform.ID(0xffee) - - tests := []struct { - name string - db string - rp string - filt influxdb.DBRPMappingFilter - mapping []*influxdb.DBRPMapping - }{ - { - name: "Physical DBRP Mapping", - db: "db0", - rp: "rp0", - filt: influxdb.DBRPMappingFilter{OrgID: &orgID, Database: api.PtrString("db0"), RetentionPolicy: api.PtrString("rp0"), Virtual: nil}, - mapping: []*influxdb.DBRPMapping{{Database: "db0", RetentionPolicy: "rp0", OrganizationID: orgID, BucketID: bucketID}}, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - dbrp := mocks.NewMockDBRPMappingService(ctrl) - dbrp.EXPECT(). - FindMany(gomock.Any(), tc.filt). - Times(2). - Return(tc.mapping, len(tc.mapping), nil) - - var metaClient MetaClient - metaClient.ShardGroupsByTimeRangeFn = func(database, policy string, min, max time.Time) ([]meta.ShardGroupInfo, error) { - if database != bucketID.String() { - t.Errorf("unexpected database: %s", database) - } - if policy != meta.DefaultRetentionPolicyName { - t.Errorf("unexpected retention policy: %s", policy) - } - return []meta.ShardGroupInfo{ - {ID: 1, Shards: []meta.ShardInfo{ - {ID: 1, Owners: []meta.ShardOwner{{NodeID: 0}}}, - {ID: 2, Owners: []meta.ShardOwner{{NodeID: 0}}}, - }}, - {ID: 2, Shards: []meta.ShardInfo{ - {ID: 3, Owners: []meta.ShardOwner{{NodeID: 0}}}, - {ID: 4, Owners: []meta.ShardOwner{{NodeID: 0}}}, - }}, - }, nil - } - - tsdbStore := &internal.TSDBStoreMock{} - tsdbStore.ShardGroupFn = func(ids []uint64) tsdb.ShardGroup { - if !reflect.DeepEqual(ids, []uint64{1, 2, 3, 4}) { - t.Errorf("unexpected shard ids: %#v", ids) - } - - var sh MockShard - sh.CreateIteratorFn = func(ctx context.Context, measurement *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { - if measurement.Name != "cpu" { - t.Errorf("unexpected measurement: %s", measurement.Name) - } - return &FloatIterator{}, nil - } - return &sh - } - - // Initialize the shard mapper. - shardMapper := &coordinator.LocalShardMapper{ - MetaClient: &metaClient, - TSDBStore: tsdbStore, - DBRP: dbrp, - } - - // Normal measurement. - measurement := &influxql.Measurement{ - Database: tc.db, - RetentionPolicy: tc.rp, - Name: "cpu", - } - ic, err := shardMapper.MapShards(context.Background(), []influxql.Source{measurement}, influxql.TimeRange{}, query.SelectOptions{OrgID: orgID}) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - // This should be a LocalShardMapping. - m, ok := ic.(*coordinator.LocalShardMapping) - if !ok { - t.Fatalf("unexpected mapping type: %T", ic) - } else if len(m.ShardMap) != 1 { - t.Fatalf("unexpected number of shard mappings: %d", len(m.ShardMap)) - } - - if _, err := ic.CreateIterator(context.Background(), measurement, query.IteratorOptions{OrgID: orgID}); err != nil { - t.Fatalf("unexpected error: %s", err) - } - - // Subquery. - subquery := &influxql.SubQuery{ - Statement: &influxql.SelectStatement{ - Sources: []influxql.Source{measurement}, - }, - } - ic, err = shardMapper.MapShards(context.Background(), []influxql.Source{subquery}, influxql.TimeRange{}, query.SelectOptions{OrgID: orgID}) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - // This should be a LocalShardMapping. - m, ok = ic.(*coordinator.LocalShardMapping) - if !ok { - t.Fatalf("unexpected mapping type: %T", ic) - } else if len(m.ShardMap) != 1 { - t.Fatalf("unexpected number of shard mappings: %d", len(m.ShardMap)) - } - - if _, err := ic.CreateIterator(context.Background(), measurement, query.IteratorOptions{OrgID: orgID}); err != nil { - t.Fatalf("unexpected error: %s", err) - } - }) - } -} diff --git a/v1/coordinator/statement_executor.go b/v1/coordinator/statement_executor.go deleted file mode 100644 index 01f43c5b471..00000000000 --- a/v1/coordinator/statement_executor.go +++ /dev/null @@ -1,862 +0,0 @@ -package coordinator - -import ( - "context" - "errors" - "fmt" - "sort" - "strings" - "time" - - "github.com/influxdata/influxdb/v2" - "github.com/influxdata/influxdb/v2/authorizer" - iql "github.com/influxdata/influxdb/v2/influxql" - "github.com/influxdata/influxdb/v2/influxql/query" - errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/tracing" - "github.com/influxdata/influxdb/v2/pkg/tracing/fields" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/influxdata/influxql" -) - -// ErrDatabaseNameRequired is returned when executing statements that require a database, -// when a database has not been provided. -var ErrDatabaseNameRequired = errors.New("database name required") - -// StatementExecutor executes a statement in the query. -type StatementExecutor struct { - MetaClient MetaClient - - // TSDB storage for local node. - TSDBStore TSDBStore - - // ShardMapper for mapping shards when executing a SELECT statement. - ShardMapper query.ShardMapper - - DBRP influxdb.DBRPMappingService - - // Select statement limits - MaxSelectPointN int - MaxSelectSeriesN int - MaxSelectBucketsN int -} - -// ExecuteStatement executes the given statement with the given execution context. -func (e *StatementExecutor) ExecuteStatement(ctx context.Context, stmt influxql.Statement, ectx *query.ExecutionContext) error { - // Select statements are handled separately so that they can be streamed. - if stmt, ok := stmt.(*influxql.SelectStatement); ok { - return e.executeSelectStatement(ctx, stmt, ectx) - } - - var rows models.Rows - var messages []*query.Message - var err error - switch stmt := stmt.(type) { - case *influxql.AlterRetentionPolicyStatement: - err = iql.ErrNotImplemented("ALTER RETENTION POLICY") - case *influxql.CreateContinuousQueryStatement: - err = iql.ErrNotImplemented("CREATE CONTINUOUS QUERY") - case *influxql.CreateDatabaseStatement: - err = iql.ErrNotImplemented("CREATE DATABASE") - case *influxql.CreateRetentionPolicyStatement: - err = iql.ErrNotImplemented("CREATE RETENTION POLICY") - case *influxql.CreateSubscriptionStatement: - err = iql.ErrNotImplemented("CREATE SUBSCRIPTION") - case *influxql.CreateUserStatement: - err = iql.ErrNotImplemented("CREATE USER") - case *influxql.DeleteSeriesStatement: - return e.executeDeleteSeriesStatement(ctx, stmt, ectx.Database, ectx) - case *influxql.DropContinuousQueryStatement: - err = iql.ErrNotImplemented("DROP CONTINUOUS QUERY") - case *influxql.DropDatabaseStatement: - err = iql.ErrNotImplemented("DROP DATABASE") - case *influxql.DropMeasurementStatement: - return e.executeDropMeasurementStatement(ctx, stmt, ectx.Database, ectx) - case *influxql.DropSeriesStatement: - err = iql.ErrNotImplemented("DROP SERIES") - case *influxql.DropRetentionPolicyStatement: - err = iql.ErrNotImplemented("DROP RETENTION POLICY") - case *influxql.DropShardStatement: - err = iql.ErrNotImplemented("DROP SHARD") - case *influxql.DropSubscriptionStatement: - err = iql.ErrNotImplemented("DROP SUBSCRIPTION") - case *influxql.DropUserStatement: - err = iql.ErrNotImplemented("DROP USER") - case *influxql.ExplainStatement: - if stmt.Analyze { - rows, err = e.executeExplainAnalyzeStatement(ctx, stmt, ectx) - } else { - rows, err = e.executeExplainStatement(ctx, stmt, ectx) - } - case *influxql.GrantStatement: - err = iql.ErrNotImplemented("GRANT") - case *influxql.GrantAdminStatement: - err = iql.ErrNotImplemented("GRANT ALL") - case *influxql.RevokeStatement: - err = iql.ErrNotImplemented("REVOKE") - case *influxql.RevokeAdminStatement: - err = iql.ErrNotImplemented("REVOKE ALL") - case *influxql.ShowContinuousQueriesStatement: - rows, err = nil, iql.ErrNotImplemented("SHOW CONTINUOUS QUERIES") - case *influxql.ShowDatabasesStatement: - rows, err = e.executeShowDatabasesStatement(ctx, stmt, ectx) - case *influxql.ShowDiagnosticsStatement: - rows, err = nil, iql.ErrNotImplemented("SHOW DIAGNOSTICS") - case *influxql.ShowGrantsForUserStatement: - rows, err = nil, iql.ErrNotImplemented("SHOW GRANTS") - case *influxql.ShowMeasurementsStatement: - return e.executeShowMeasurementsStatement(ctx, stmt, ectx) - case *influxql.ShowMeasurementCardinalityStatement: - rows, err = nil, iql.ErrNotImplemented("SHOW MEASUREMENT CARDINALITY") - case *influxql.ShowRetentionPoliciesStatement: - rows, err = e.executeShowRetentionPoliciesStatement(ctx, stmt, ectx) - case *influxql.ShowSeriesCardinalityStatement: - rows, err = nil, iql.ErrNotImplemented("SHOW SERIES CARDINALITY") - case *influxql.ShowShardsStatement: - rows, err = nil, iql.ErrNotImplemented("SHOW SHARDS") - case *influxql.ShowShardGroupsStatement: - rows, err = nil, iql.ErrNotImplemented("SHOW SHARD GROUPS") - case *influxql.ShowStatsStatement: - rows, err = nil, iql.ErrNotImplemented("SHOW STATS") - case *influxql.ShowSubscriptionsStatement: - rows, err = nil, iql.ErrNotImplemented("SHOW SUBSCRIPTIONS") - case *influxql.ShowTagKeysStatement: - return e.executeShowTagKeys(ctx, stmt, ectx) - case *influxql.ShowTagValuesStatement: - return e.executeShowTagValues(ctx, stmt, ectx) - case *influxql.ShowUsersStatement: - rows, err = nil, iql.ErrNotImplemented("SHOW USERS") - case *influxql.SetPasswordUserStatement: - err = iql.ErrNotImplemented("SET PASSWORD") - case *influxql.ShowQueriesStatement, *influxql.KillQueryStatement: - err = iql.ErrNotImplemented("SHOW QUERIES") - default: - return query.ErrInvalidQuery - } - - if err != nil { - return err - } - - return ectx.Send(ctx, &query.Result{ - Series: rows, - Messages: messages, - }) -} - -func (e *StatementExecutor) executeExplainStatement(ctx context.Context, q *influxql.ExplainStatement, ectx *query.ExecutionContext) (models.Rows, error) { - opt := query.SelectOptions{ - OrgID: ectx.OrgID, - NodeID: ectx.ExecutionOptions.NodeID, - MaxSeriesN: e.MaxSelectSeriesN, - MaxBucketsN: e.MaxSelectBucketsN, - } - - // Prepare the query for execution, but do not actually execute it. - // This should perform any needed substitutions. - p, err := query.Prepare(ctx, q.Statement, e.ShardMapper, opt) - if err != nil { - return nil, err - } - defer p.Close() - - plan, err := p.Explain(ctx) - if err != nil { - return nil, err - } - plan = strings.TrimSpace(plan) - - row := &models.Row{ - Columns: []string{"QUERY PLAN"}, - } - for _, s := range strings.Split(plan, "\n") { - row.Values = append(row.Values, []interface{}{s}) - } - return models.Rows{row}, nil -} - -func (e *StatementExecutor) executeExplainAnalyzeStatement(ctx context.Context, q *influxql.ExplainStatement, ectx *query.ExecutionContext) (models.Rows, error) { - stmt := q.Statement - t, span := tracing.NewTrace("select") - ctx = tracing.NewContextWithTrace(ctx, t) - ctx = tracing.NewContextWithSpan(ctx, span) - var aux query.Iterators - ctx = query.NewContextWithIterators(ctx, &aux) - start := time.Now() - - cur, err := e.createIterators(ctx, stmt, ectx.ExecutionOptions, ectx.StatisticsGatherer) - if err != nil { - return nil, err - } - - iterTime := time.Since(start) - - // Generate a row emitter from the iterator set. - em := query.NewEmitter(cur, ectx.ChunkSize) - - // Emit rows to the results channel. - var writeN int64 - for { - var row *models.Row - row, _, err = em.Emit() - if err != nil { - goto CLEANUP - } else if row == nil { - // Check if the query was interrupted while emitting. - if err = ctx.Err(); err != nil { - goto CLEANUP - } - break - } - - writeN += int64(len(row.Values)) - } - -CLEANUP: - em.Close() - if err != nil { - return nil, err - } - - // close auxiliary iterators deterministically to finalize any captured measurements - aux.Close() - - totalTime := time.Since(start) - span.MergeFields( - fields.Duration("total_time", totalTime), - fields.Duration("planning_time", iterTime), - fields.Duration("execution_time", totalTime-iterTime), - ) - span.Finish() - - row := &models.Row{ - Columns: []string{"EXPLAIN ANALYZE"}, - } - for _, s := range strings.Split(t.Tree().String(), "\n") { - row.Values = append(row.Values, []interface{}{s}) - } - - return models.Rows{row}, nil -} - -func (e *StatementExecutor) executeSelectStatement(ctx context.Context, stmt *influxql.SelectStatement, ectx *query.ExecutionContext) error { - cur, err := e.createIterators(ctx, stmt, ectx.ExecutionOptions, ectx.StatisticsGatherer) - if err != nil { - return err - } - - // Generate a row emitter from the iterator set. - em := query.NewEmitter(cur, ectx.ChunkSize) - defer em.Close() - - // Emit rows to the results channel. - var emitted bool - - if stmt.Target != nil { - // SELECT INTO is unsupported - return iql.ErrNotImplemented("SELECT INTO") - } - - for { - row, partial, err := em.Emit() - if err != nil { - return err - } else if row == nil { - // Check if the query was interrupted while emitting. - if err := ctx.Err(); err != nil { - return err - } - break - } - - result := &query.Result{ - Series: []*models.Row{row}, - Partial: partial, - } - - // Send results or exit if closing. - if err := ectx.Send(ctx, result); err != nil { - return err - } - - emitted = true - } - - // Always emit at least one result. - if !emitted { - return ectx.Send(ctx, &query.Result{ - Series: make([]*models.Row, 0), - }) - } - - return nil -} - -func (e *StatementExecutor) createIterators(ctx context.Context, stmt *influxql.SelectStatement, opt query.ExecutionOptions, gatherer *iql.StatisticsGatherer) (query.Cursor, error) { - defer func(start time.Time) { - dur := time.Since(start) - gatherer.Append(iql.NewImmutableCollector(iql.Statistics{PlanDuration: dur})) - }(time.Now()) - - sopt := query.SelectOptions{ - OrgID: opt.OrgID, - NodeID: opt.NodeID, - MaxSeriesN: e.MaxSelectSeriesN, - MaxPointN: e.MaxSelectPointN, - MaxBucketsN: e.MaxSelectBucketsN, - StatisticsGatherer: gatherer, - } - - // Create a set of iterators from a selection. - cur, err := query.Select(ctx, stmt, e.ShardMapper, sopt) - if err != nil { - return nil, err - } - return cur, nil -} - -func (e *StatementExecutor) executeShowDatabasesStatement(ctx context.Context, q *influxql.ShowDatabasesStatement, ectx *query.ExecutionContext) (models.Rows, error) { - row := &models.Row{Name: "databases", Columns: []string{"name"}} - dbrps, _, err := e.DBRP.FindMany(ctx, influxdb.DBRPMappingFilter{ - OrgID: &ectx.OrgID, - }) - if err != nil { - return nil, err - } - - seenDbs := make(map[string]struct{}, len(dbrps)) - for _, dbrp := range dbrps { - if _, ok := seenDbs[dbrp.Database]; ok { - continue - } - - perm, err := influxdb.NewPermissionAtID(dbrp.BucketID, influxdb.ReadAction, influxdb.BucketsResourceType, dbrp.OrganizationID) - if err != nil { - return nil, err - } - err = authorizer.IsAllowed(ctx, *perm) - if err != nil { - if errors2.ErrorCode(err) == errors2.EUnauthorized { - continue - } - return nil, err - } - seenDbs[dbrp.Database] = struct{}{} - row.Values = append(row.Values, []interface{}{dbrp.Database}) - } - return []*models.Row{row}, nil -} - -func (e *StatementExecutor) getDefaultRP(ctx context.Context, database string, ectx *query.ExecutionContext) (*influxdb.DBRPMapping, error) { - defaultRP := true - mappings, n, err := e.DBRP.FindMany(ctx, influxdb.DBRPMappingFilter{ - OrgID: &ectx.OrgID, - Database: &database, - Default: &defaultRP, - }) - if err != nil { - return nil, fmt.Errorf("finding DBRP mappings: %v", err) - } else if n == 0 { - return nil, fmt.Errorf("default retention policy not set for: %s", database) - } else if n != 1 { - return nil, fmt.Errorf("finding DBRP mappings: expected 1, found %d", n) - } - return mappings[0], nil -} - -func (e *StatementExecutor) executeDeleteSeriesStatement(ctx context.Context, q *influxql.DeleteSeriesStatement, database string, ectx *query.ExecutionContext) error { - mapping, err := e.getDefaultRP(ctx, database, ectx) - if err != nil { - return err - } - - // Require write for DELETE queries - _, _, err = authorizer.AuthorizeWrite(ctx, influxdb.BucketsResourceType, mapping.BucketID, ectx.OrgID) - if err != nil { - return ectx.Send(ctx, &query.Result{ - Err: fmt.Errorf("insufficient permissions"), - }) - } - - // Convert "now()" to current time. - q.Condition = influxql.Reduce(q.Condition, &influxql.NowValuer{Now: time.Now().UTC()}) - - return e.TSDBStore.DeleteSeries(ctx, mapping.BucketID.String(), q.Sources, q.Condition) -} - -func (e *StatementExecutor) executeDropMeasurementStatement(ctx context.Context, q *influxql.DropMeasurementStatement, database string, ectx *query.ExecutionContext) error { - mapping, err := e.getDefaultRP(ctx, database, ectx) - if err != nil { - return err - } - - // Require write for DROP MEASUREMENT queries - _, _, err = authorizer.AuthorizeWrite(ctx, influxdb.BucketsResourceType, mapping.BucketID, ectx.OrgID) - if err != nil { - return ectx.Send(ctx, &query.Result{ - Err: fmt.Errorf("insufficient permissions"), - }) - } - - return e.TSDBStore.DeleteMeasurement(ctx, mapping.BucketID.String(), q.Name) -} - -type measurementRow struct { - name []byte - db, rp string -} - -func (e *StatementExecutor) executeShowMeasurementsStatement(ctx context.Context, q *influxql.ShowMeasurementsStatement, ectx *query.ExecutionContext) error { - if q.Database == "" && !q.WildcardDatabase { - return ErrDatabaseNameRequired - } - - if q.WildcardDatabase { - // We could support this but it doesn't seem very useful. - if q.RetentionPolicy != "" { - return ectx.Send(ctx, &query.Result{ - Err: fmt.Errorf("query 'SHOW MEASUREMENTS ON *.rp' not supported. use 'ON *.*' or specify a database"), - }) - } - // It is not clear how '*' should interact with the default retention policy, so reject it - if !q.WildcardRetentionPolicy { - return ectx.Send(ctx, &query.Result{ - Err: fmt.Errorf("query 'SHOW MEASUREMENTS ON *' not supported. use 'ON *.*' or specify a database"), - }) - } - } - - onlyPrintMeasurements := !(q.WildcardDatabase || q.WildcardRetentionPolicy || q.RetentionPolicy != "") - - mappingsFilter := influxdb.DBRPMappingFilter{ - OrgID: &ectx.OrgID, - } - - if !q.WildcardDatabase { - mappingsFilter.Database = &q.Database - } - if !q.WildcardRetentionPolicy { - if q.RetentionPolicy == "" { - defaultRP := true - mappingsFilter.Default = &defaultRP - } else { - mappingsFilter.RetentionPolicy = &q.RetentionPolicy - } - } - mappings, _, err := e.DBRP.FindMany(ctx, mappingsFilter) - if err != nil { - return fmt.Errorf("finding DBRP mappings: %v", err) - } - - rows := make([]measurementRow, 0) - - // Sort the sources for consistent output - sort.Slice(mappings, func(i, j int) bool { - if mappings[i].Database != mappings[j].Database { - return mappings[i].Database < mappings[j].Database - } - return mappings[i].RetentionPolicy < mappings[j].RetentionPolicy - }) - - for _, mapping := range mappings { - names, err := e.TSDBStore.MeasurementNames(ctx, ectx.Authorizer, mapping.BucketID.String(), q.Condition) - if err != nil { - return ectx.Send(ctx, &query.Result{ - Err: err, - }) - } - for _, name := range names { - rows = append(rows, measurementRow{ - name: name, - db: mapping.Database, - rp: mapping.RetentionPolicy, - }) - } - } - - if q.Offset > 0 { - if q.Offset >= len(rows) { - rows = nil - } else { - rows = rows[q.Offset:] - } - } - - if q.Limit > 0 { - if q.Limit < len(rows) { - rows = rows[:q.Limit] - } - } - - if len(rows) == 0 { - return ectx.Send(ctx, &query.Result{}) - } - - if onlyPrintMeasurements { - values := make([][]interface{}, len(rows)) - for i, r := range rows { - values[i] = []interface{}{string(r.name)} - } - - return ectx.Send(ctx, &query.Result{ - Series: []*models.Row{{ - Name: "measurements", - Columns: []string{"name"}, - Values: values, - }}, - }) - } - - values := make([][]interface{}, len(rows)) - for i, r := range rows { - values[i] = []interface{}{string(r.name), r.db, r.rp} - } - - return ectx.Send(ctx, &query.Result{ - Series: []*models.Row{{ - Name: "measurements", - Columns: []string{"name", "database", "retention policy"}, - Values: values, - }}, - }) - -} - -func (e *StatementExecutor) executeShowRetentionPoliciesStatement(ctx context.Context, q *influxql.ShowRetentionPoliciesStatement, ectx *query.ExecutionContext) (models.Rows, error) { - if q.Database == "" { - return nil, ErrDatabaseNameRequired - } - - dbrps, _, err := e.DBRP.FindMany(ctx, influxdb.DBRPMappingFilter{ - OrgID: &ectx.OrgID, - Database: &q.Database, - }) - - if err != nil { - return nil, err - } - - row := &models.Row{Columns: []string{"name", "duration", "shardGroupDuration", "replicaN", "default"}} - for _, dbrp := range dbrps { - perm, err := influxdb.NewPermissionAtID(dbrp.BucketID, influxdb.ReadAction, influxdb.BucketsResourceType, dbrp.OrganizationID) - if err != nil { - return nil, err - } - err = authorizer.IsAllowed(ctx, *perm) - if err != nil { - if errors2.ErrorCode(err) == errors2.EUnauthorized { - continue - } - return nil, err - } - row.Values = append(row.Values, []interface{}{dbrp.RetentionPolicy, "0s", "168h0m0s", 1, dbrp.Default}) - } - - return []*models.Row{row}, nil -} - -func (e *StatementExecutor) executeShowTagKeys(ctx context.Context, q *influxql.ShowTagKeysStatement, ectx *query.ExecutionContext) error { - if q.Database == "" { - return ErrDatabaseNameRequired - } - - mapping, err := e.getDefaultRP(ctx, q.Database, ectx) - if err != nil { - return err - } - - // Determine shard set based on database and time range. - // SHOW TAG KEYS returns all tag keys for the default retention policy. - di := e.MetaClient.Database(mapping.BucketID.String()) - if di == nil { - return fmt.Errorf("database not found: %s", q.Database) - } - - // Determine appropriate time range. If one or fewer time boundaries provided - // then min/max possible time should be used instead. - valuer := &influxql.NowValuer{Now: time.Now()} - cond, timeRange, err := influxql.ConditionExpr(q.Condition, valuer) - if err != nil { - return err - } - - // Get all shards for all retention policies. - var allGroups []meta.ShardGroupInfo - for _, rpi := range di.RetentionPolicies { - sgis, err := e.MetaClient.ShardGroupsByTimeRange(mapping.BucketID.String(), rpi.Name, timeRange.MinTime(), timeRange.MaxTime()) - if err != nil { - return err - } - allGroups = append(allGroups, sgis...) - } - - var shardIDs []uint64 - for _, sgi := range allGroups { - for _, si := range sgi.Shards { - shardIDs = append(shardIDs, si.ID) - } - } - - tagKeys, err := e.TSDBStore.TagKeys(ctx, ectx.Authorizer, shardIDs, cond) - if err != nil { - return ectx.Send(ctx, &query.Result{ - Err: err, - }) - } - - emitted := false - for _, m := range tagKeys { - keys := m.Keys - - if q.Offset > 0 { - if q.Offset >= len(keys) { - keys = nil - } else { - keys = keys[q.Offset:] - } - } - if q.Limit > 0 && q.Limit < len(keys) { - keys = keys[:q.Limit] - } - - if len(keys) == 0 { - continue - } - - row := &models.Row{ - Name: m.Measurement, - Columns: []string{"tagKey"}, - Values: make([][]interface{}, len(keys)), - } - for i, key := range keys { - row.Values[i] = []interface{}{key} - } - - if err := ectx.Send(ctx, &query.Result{ - Series: []*models.Row{row}, - }); err != nil { - return err - } - emitted = true - } - - // Ensure at least one result is emitted. - if !emitted { - return ectx.Send(ctx, &query.Result{}) - } - return nil -} - -func (e *StatementExecutor) executeShowTagValues(ctx context.Context, q *influxql.ShowTagValuesStatement, ectx *query.ExecutionContext) error { - if q.Database == "" { - return ErrDatabaseNameRequired - } - - mapping, err := e.getDefaultRP(ctx, q.Database, ectx) - if err != nil { - return err - } - - // Determine shard set based on database and time range. - // SHOW TAG VALUES returns all tag values for the default retention policy. - di := e.MetaClient.Database(mapping.BucketID.String()) - if di == nil { - return fmt.Errorf("database not found: %s", q.Database) - } - - // Determine appropriate time range. If one or fewer time boundaries provided - // then min/max possible time should be used instead. - valuer := &influxql.NowValuer{Now: time.Now()} - cond, timeRange, err := influxql.ConditionExpr(q.Condition, valuer) - if err != nil { - return err - } - - // Get all shards for all retention policies. - var allGroups []meta.ShardGroupInfo - for _, rpi := range di.RetentionPolicies { - sgis, err := e.MetaClient.ShardGroupsByTimeRange(mapping.BucketID.String(), rpi.Name, timeRange.MinTime(), timeRange.MaxTime()) - if err != nil { - return err - } - allGroups = append(allGroups, sgis...) - } - - var shardIDs []uint64 - for _, sgi := range allGroups { - for _, si := range sgi.Shards { - shardIDs = append(shardIDs, si.ID) - } - } - - tagValues, err := e.TSDBStore.TagValues(ctx, ectx.Authorizer, shardIDs, cond) - if err != nil { - return ectx.Send(ctx, &query.Result{Err: err}) - } - - emitted := false - for _, m := range tagValues { - values := m.Values - - if q.Offset > 0 { - if q.Offset >= len(values) { - values = nil - } else { - values = values[q.Offset:] - } - } - - if q.Limit > 0 { - if q.Limit < len(values) { - values = values[:q.Limit] - } - } - - if len(values) == 0 { - continue - } - - row := &models.Row{ - Name: m.Measurement, - Columns: []string{"key", "value"}, - Values: make([][]interface{}, len(values)), - } - for i, v := range values { - row.Values[i] = []interface{}{v.Key, v.Value} - } - - if err := ectx.Send(ctx, &query.Result{ - Series: []*models.Row{row}, - }); err != nil { - return err - } - emitted = true - } - - // Ensure at least one result is emitted. - if !emitted { - return ectx.Send(ctx, &query.Result{}) - } - return nil -} - -// NormalizeStatement adds a default database and policy to the measurements in statement. -// Parameter defaultRetentionPolicy can be "". -func (e *StatementExecutor) NormalizeStatement(ctx context.Context, stmt influxql.Statement, defaultDatabase, defaultRetentionPolicy string, ectx *query.ExecutionContext) (err error) { - influxql.WalkFunc(stmt, func(node influxql.Node) { - if err != nil { - return - } - switch node := node.(type) { - case *influxql.ShowRetentionPoliciesStatement: - if node.Database == "" { - node.Database = defaultDatabase - } - case *influxql.ShowMeasurementsStatement: - if node.Database == "" { - node.Database = defaultDatabase - } - case *influxql.ShowTagKeysStatement: - if node.Database == "" { - node.Database = defaultDatabase - } - case *influxql.ShowTagValuesStatement: - if node.Database == "" { - node.Database = defaultDatabase - } - case *influxql.ShowMeasurementCardinalityStatement: - if node.Database == "" { - node.Database = defaultDatabase - } - case *influxql.ShowSeriesCardinalityStatement: - if node.Database == "" { - node.Database = defaultDatabase - } - case *influxql.Measurement: - switch stmt.(type) { - case *influxql.DropSeriesStatement, *influxql.DeleteSeriesStatement: - // DB and RP not supported by these statements so don't rewrite into invalid - // statements - default: - err = e.normalizeMeasurement(ctx, node, defaultDatabase, defaultRetentionPolicy, ectx) - } - } - }) - return -} - -func (e *StatementExecutor) normalizeMeasurement(ctx context.Context, m *influxql.Measurement, defaultDatabase, defaultRetentionPolicy string, ectx *query.ExecutionContext) error { - // Targets (measurements in an INTO clause) can have blank names, which means it will be - // the same as the measurement name it came from in the FROM clause. - if !m.IsTarget && m.Name == "" && m.SystemIterator == "" && m.Regex == nil { - return errors.New("invalid measurement") - } - - // Measurement does not have an explicit database? Insert default. - if m.Database == "" { - m.Database = defaultDatabase - } - - // The database must now be specified by this point. - if m.Database == "" { - return ErrDatabaseNameRequired - } - - // TODO(sgc): Validate database; fetch default RP - filter := influxdb.DBRPMappingFilter{ - OrgID: &ectx.OrgID, - Database: &m.Database, - } - - res, _, err := e.DBRP.FindMany(ctx, filter) - if err != nil { - return err - } - - if len(res) == 0 { - return query.ErrDatabaseNotFound(m.Database) - } - - // If no retention policy was specified, use the default. - if m.RetentionPolicy == "" { - if defaultRetentionPolicy != "" { - m.RetentionPolicy = defaultRetentionPolicy - } else if rp := mappings(res).DefaultRetentionPolicy(m.Database); rp != "" { - m.RetentionPolicy = rp - } else { - return fmt.Errorf("default retention policy not set for: %s", m.Database) - } - } - - return nil -} - -type mappings []*influxdb.DBRPMapping - -func (m mappings) DefaultRetentionPolicy(db string) string { - for _, v := range m { - if v.Database == db && v.Default { - return v.RetentionPolicy - } - } - return "" -} - -// TSDBStore is an interface for accessing the time series data store. -type TSDBStore interface { - DeleteMeasurement(ctx context.Context, database, name string) error - DeleteSeries(ctx context.Context, database string, sources []influxql.Source, condition influxql.Expr) error - MeasurementNames(ctx context.Context, auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error) - TagKeys(ctx context.Context, auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagKeys, error) - TagValues(ctx context.Context, auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagValues, error) -} - -var _ TSDBStore = LocalTSDBStore{} - -// LocalTSDBStore embeds a tsdb.Store and implements IteratorCreator -// to satisfy the TSDBStore interface. -type LocalTSDBStore struct { - *tsdb.Store -} diff --git a/v1/coordinator/statement_executor_test.go b/v1/coordinator/statement_executor_test.go deleted file mode 100644 index a50b69ae366..00000000000 --- a/v1/coordinator/statement_executor_test.go +++ /dev/null @@ -1,710 +0,0 @@ -package coordinator_test - -import ( - "bytes" - "context" - "errors" - "fmt" - "reflect" - "regexp" - "testing" - "time" - - "github.com/davecgh/go-spew/spew" - "github.com/golang/mock/gomock" - "github.com/influxdata/influxdb/v2" - icontext "github.com/influxdata/influxdb/v2/context" - "github.com/influxdata/influxdb/v2/dbrp/mocks" - influxql2 "github.com/influxdata/influxdb/v2/influxql" - "github.com/influxdata/influxdb/v2/influxql/control" - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/internal" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/models" - itesting "github.com/influxdata/influxdb/v2/testing" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/v1/coordinator" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/influxdata/influxql" - "go.uber.org/zap/zaptest" -) - -const ( - // DefaultDatabase is the default database name used in tests. - DefaultDatabase = "db0" - - // DefaultRetentionPolicy is the default retention policy name used in tests. - DefaultRetentionPolicy = "rp0" -) - -// Ensure query executor can execute a simple SELECT statement. -func TestQueryExecutor_ExecuteQuery_SelectStatement(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - dbrp := mocks.NewMockDBRPMappingService(ctrl) - orgID := platform.ID(0xff00) - empty := "" - filt := influxdb.DBRPMappingFilter{OrgID: &orgID, Database: &empty, RetentionPolicy: &empty, Virtual: nil} - res := []*influxdb.DBRPMapping{{}} - dbrp.EXPECT(). - FindMany(gomock.Any(), filt). - Return(res, 1, nil) - - e := DefaultQueryExecutor(t, WithDBRP(dbrp)) - - // The meta client should return a single shard owned by the local node. - e.MetaClient.ShardGroupsByTimeRangeFn = func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) { - return []meta.ShardGroupInfo{ - {ID: 1, Shards: []meta.ShardInfo{ - {ID: 100, Owners: []meta.ShardOwner{{NodeID: 0}}}, - }}, - }, nil - } - - // The TSDB store should return an IteratorCreator for shard. - // This IteratorCreator returns a single iterator with "value" in the aux fields. - e.TSDBStore.ShardGroupFn = func(ids []uint64) tsdb.ShardGroup { - if !reflect.DeepEqual(ids, []uint64{100}) { - t.Fatalf("unexpected shard ids: %v", ids) - } - - var sh MockShard - sh.CreateIteratorFn = func(_ context.Context, _ *influxql.Measurement, _ query.IteratorOptions) (query.Iterator, error) { - return &FloatIterator{Points: []query.FloatPoint{ - {Name: "cpu", Time: int64(0 * time.Second), Aux: []interface{}{float64(100)}}, - {Name: "cpu", Time: int64(1 * time.Second), Aux: []interface{}{float64(200)}}, - }}, nil - } - sh.FieldDimensionsFn = func(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { - if !reflect.DeepEqual(measurements, []string{"cpu"}) { - t.Fatalf("unexpected source: %#v", measurements) - } - return map[string]influxql.DataType{"value": influxql.Float}, nil, nil - } - return &sh - } - - // Verify all results from the query. - if a := ReadAllResults(e.ExecuteQuery(context.Background(), `SELECT * FROM cpu`, "db0", 0, orgID)); !reflect.DeepEqual(a, []*query.Result{ - { - StatementID: 0, - Series: []*models.Row{{ - Name: "cpu", - Columns: []string{"time", "value"}, - Values: [][]interface{}{ - {time.Unix(0, 0).UTC(), float64(100)}, - {time.Unix(1, 0).UTC(), float64(200)}, - }, - }}, - }, - }) { - t.Fatalf("unexpected results: %s", spew.Sdump(a)) - } -} - -// Ensure query executor can enforce a maximum bucket selection count. -func TestQueryExecutor_ExecuteQuery_MaxSelectBucketsN(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - dbrp := mocks.NewMockDBRPMappingService(ctrl) - orgID := platform.ID(0xff00) - empty := "" - filt := influxdb.DBRPMappingFilter{OrgID: &orgID, Database: &empty, RetentionPolicy: &empty, Virtual: nil} - res := []*influxdb.DBRPMapping{{}} - dbrp.EXPECT(). - FindMany(gomock.Any(), filt). - Return(res, 1, nil) - - e := DefaultQueryExecutor(t, WithDBRP(dbrp)) - - e.StatementExecutor.MaxSelectBucketsN = 3 - - // The meta client should return a single shards on the local node. - e.MetaClient.ShardGroupsByTimeRangeFn = func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) { - return []meta.ShardGroupInfo{ - {ID: 1, Shards: []meta.ShardInfo{ - {ID: 100, Owners: []meta.ShardOwner{{NodeID: 0}}}, - }}, - }, nil - } - - e.TSDBStore.ShardGroupFn = func(ids []uint64) tsdb.ShardGroup { - if !reflect.DeepEqual(ids, []uint64{100}) { - t.Fatalf("unexpected shard ids: %v", ids) - } - - var sh MockShard - sh.CreateIteratorFn = func(_ context.Context, _ *influxql.Measurement, _ query.IteratorOptions) (query.Iterator, error) { - return &FloatIterator{ - Points: []query.FloatPoint{{Name: "cpu", Time: int64(0 * time.Second), Aux: []interface{}{float64(100)}}}, - }, nil - } - sh.FieldDimensionsFn = func(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { - if !reflect.DeepEqual(measurements, []string{"cpu"}) { - t.Fatalf("unexpected source: %#v", measurements) - } - return map[string]influxql.DataType{"value": influxql.Float}, nil, nil - } - return &sh - } - - // Verify all results from the query. - if a := ReadAllResults(e.ExecuteQuery(context.Background(), `SELECT count(value) FROM cpu WHERE time >= '2000-01-01T00:00:05Z' AND time < '2000-01-01T00:00:35Z' GROUP BY time(10s)`, "db0", 0, orgID)); !reflect.DeepEqual(a, []*query.Result{ - { - StatementID: 0, - Err: errors.New("max-select-buckets limit exceeded: (4/3)"), - }, - }) { - t.Fatalf("unexpected results: %s", spew.Sdump(a)) - } -} - -func TestStatementExecutor_NormalizeStatement(t *testing.T) { - - testCases := []struct { - name string - query string - defaultDB string - defaultRP string - expectedDB string - expectedRP string - }{ - { - name: "defaults", - query: "SELECT f FROM m", - defaultDB: DefaultDatabase, - defaultRP: "", - expectedDB: DefaultDatabase, - expectedRP: DefaultRetentionPolicy, - }, - { - name: "alternate database via param", - query: "SELECT f FROM m", - defaultDB: "dbalt", - defaultRP: "", - expectedDB: "dbalt", - expectedRP: DefaultRetentionPolicy, - }, - { - name: "alternate database via query", - query: fmt.Sprintf("SELECT f FROM dbalt.%s.m", DefaultRetentionPolicy), - defaultDB: DefaultDatabase, - defaultRP: "", - expectedDB: "dbalt", - expectedRP: DefaultRetentionPolicy, - }, - { - name: "alternate RP via param", - query: "SELECT f FROM m", - defaultDB: DefaultDatabase, - defaultRP: "rpalt", - expectedDB: DefaultDatabase, - expectedRP: "rpalt", - }, - { - name: "alternate RP via query", - query: fmt.Sprintf("SELECT f FROM %s.rpalt.m", DefaultDatabase), - defaultDB: DefaultDatabase, - defaultRP: "", - expectedDB: DefaultDatabase, - expectedRP: "rpalt", - }, - { - name: "alternate RP query disagrees with param and query wins", - query: fmt.Sprintf("SELECT f FROM %s.rpquery.m", DefaultDatabase), - defaultDB: DefaultDatabase, - defaultRP: "rpparam", - expectedDB: DefaultDatabase, - expectedRP: "rpquery", - }, - } - - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - dbrp := mocks.NewMockDBRPMappingService(ctrl) - orgID := platform.ID(0xff00) - bucketID := platform.ID(0xffee) - filt := influxdb.DBRPMappingFilter{OrgID: &orgID, Database: &testCase.expectedDB} - res := []*influxdb.DBRPMapping{{Database: testCase.expectedDB, RetentionPolicy: testCase.expectedRP, OrganizationID: orgID, BucketID: bucketID, Default: true}} - dbrp.EXPECT(). - FindMany(gomock.Any(), filt). - Return(res, 1, nil) - - e := DefaultQueryExecutor(t, WithDBRP(dbrp)) - - q, err := influxql.ParseQuery(testCase.query) - if err != nil { - t.Fatalf("unexpected error parsing query: %v", err) - } - - stmt := q.Statements[0].(*influxql.SelectStatement) - - err = e.StatementExecutor.NormalizeStatement(context.Background(), stmt, testCase.defaultDB, testCase.defaultRP, &query.ExecutionContext{ExecutionOptions: query.ExecutionOptions{OrgID: orgID}}) - if err != nil { - t.Fatalf("unexpected error normalizing statement: %v", err) - } - - m := stmt.Sources[0].(*influxql.Measurement) - if m.Database != testCase.expectedDB { - t.Errorf("database got %v, want %v", m.Database, testCase.expectedDB) - } - if m.RetentionPolicy != testCase.expectedRP { - t.Errorf("retention policy got %v, want %v", m.RetentionPolicy, testCase.expectedRP) - } - }) - } -} - -func TestStatementExecutor_NormalizeDropSeries(t *testing.T) { - q, err := influxql.ParseQuery("DROP SERIES FROM cpu") - if err != nil { - t.Fatalf("unexpected error parsing query: %v", err) - } - - stmt := q.Statements[0].(*influxql.DropSeriesStatement) - - s := &coordinator.StatementExecutor{ - MetaClient: &internal.MetaClientMock{ - DatabaseFn: func(name string) *meta.DatabaseInfo { - t.Fatal("meta client should not be called") - return nil - }, - }, - } - if err := s.NormalizeStatement(context.Background(), stmt, "foo", "bar", &query.ExecutionContext{}); err != nil { - t.Fatalf("unexpected error normalizing statement: %v", err) - } - - m := stmt.Sources[0].(*influxql.Measurement) - if m.Database != "" { - t.Fatalf("database rewritten when not supposed to: %v", m.Database) - } - if m.RetentionPolicy != "" { - t.Fatalf("retention policy rewritten when not supposed to: %v", m.RetentionPolicy) - } - - if exp, got := "DROP SERIES FROM cpu", q.String(); exp != got { - t.Fatalf("generated query does match parsed: exp %v, got %v", exp, got) - } -} - -func TestStatementExecutor_NormalizeDeleteSeries(t *testing.T) { - q, err := influxql.ParseQuery("DELETE FROM cpu") - if err != nil { - t.Fatalf("unexpected error parsing query: %v", err) - } - - stmt := q.Statements[0].(*influxql.DeleteSeriesStatement) - - s := &coordinator.StatementExecutor{ - MetaClient: &internal.MetaClientMock{ - DatabaseFn: func(name string) *meta.DatabaseInfo { - t.Fatal("meta client should not be called") - return nil - }, - }, - } - if err := s.NormalizeStatement(context.Background(), stmt, "foo", "bar", &query.ExecutionContext{}); err != nil { - t.Fatalf("unexpected error normalizing statement: %v", err) - } - - m := stmt.Sources[0].(*influxql.Measurement) - if m.Database != "" { - t.Fatalf("database rewritten when not supposed to: %v", m.Database) - } - if m.RetentionPolicy != "" { - t.Fatalf("retention policy rewritten when not supposed to: %v", m.RetentionPolicy) - } - - if exp, got := "DELETE FROM cpu", q.String(); exp != got { - t.Fatalf("generated query does match parsed: exp %v, got %v", exp, got) - } -} - -func TestQueryExecutor_ExecuteQuery_ShowDatabases(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - dbrp := mocks.NewMockDBRPMappingService(ctrl) - orgID := platform.ID(0xff00) - filt := influxdb.DBRPMappingFilter{OrgID: &orgID} - res := []*influxdb.DBRPMapping{ - {Database: "db1", OrganizationID: orgID, BucketID: 0xffe0}, - {Database: "db2", OrganizationID: orgID, BucketID: 0xffe1}, - {Database: "db3", OrganizationID: orgID, BucketID: 0xffe2}, - {Database: "db4", OrganizationID: orgID, BucketID: 0xffe3}, - } - dbrp.EXPECT(). - FindMany(gomock.Any(), filt). - Return(res, 4, nil) - - qe := query.NewExecutor(zaptest.NewLogger(t), control.NewControllerMetrics([]string{})) - qe.StatementExecutor = &coordinator.StatementExecutor{ - DBRP: dbrp, - } - - opt := query.ExecutionOptions{ - OrgID: orgID, - } - - q, err := influxql.ParseQuery("SHOW DATABASES") - if err != nil { - t.Fatal(err) - } - - ctx := context.Background() - ctx = icontext.SetAuthorizer(ctx, &influxdb.Authorization{ - ID: orgID, - OrgID: orgID, - Status: influxdb.Active, - Permissions: []influxdb.Permission{ - *itesting.MustNewPermissionAtID(0xffe1, influxdb.ReadAction, influxdb.BucketsResourceType, orgID), - *itesting.MustNewPermissionAtID(0xffe3, influxdb.ReadAction, influxdb.BucketsResourceType, orgID), - }, - }) - - results := ReadAllResults(qe.ExecuteQuery(ctx, q, opt)) - exp := []*query.Result{ - { - StatementID: 0, - Series: []*models.Row{{ - Name: "databases", - Columns: []string{"name"}, - Values: [][]interface{}{ - {"db2"}, {"db4"}, - }, - }}, - }, - } - if !reflect.DeepEqual(results, exp) { - t.Fatalf("unexpected results: exp %s, got %s", spew.Sdump(exp), spew.Sdump(results)) - } -} - -func testExecDeleteSeriesOrDropMeasurement(t *testing.T, qType string) { - orgID := platform.ID(0xff00) - otherOrgID := platform.ID(0xff01) - bucketID := platform.ID(0xffee) - otherBucketID := platform.ID(0xffef) - - qStr := qType - if qStr == "DELETE" { - qStr = "DELETE FROM" - } - qErr := errors.New("insufficient permissions") - - testCases := []struct { - name string - query string - permissions []influxdb.Permission - expectedErr error - }{ - // expected FAIL - { - name: fmt.Sprintf("read-only bucket (%s)", qType), - query: qStr, - permissions: []influxdb.Permission{ - *itesting.MustNewPermissionAtID(bucketID, influxdb.ReadAction, influxdb.BucketsResourceType, orgID), - }, - expectedErr: qErr, - }, - { - name: fmt.Sprintf("read-only all buckets (%s)", qType), - query: qStr, - permissions: []influxdb.Permission{ - *itesting.MustNewPermission(influxdb.ReadAction, influxdb.BucketsResourceType, orgID), - }, - expectedErr: qErr, - }, - { - name: fmt.Sprintf("write-only other bucket (%s)", qType), - query: qStr, - permissions: []influxdb.Permission{ - *itesting.MustNewPermissionAtID(otherBucketID, influxdb.WriteAction, influxdb.BucketsResourceType, orgID), - }, - expectedErr: qErr, - }, - { - name: fmt.Sprintf("write-only other org (%s)", qType), - query: qStr, - permissions: []influxdb.Permission{ - *itesting.MustNewPermission(influxdb.WriteAction, influxdb.BucketsResourceType, otherOrgID), - }, - expectedErr: qErr, - }, - { - name: fmt.Sprintf("read-write other org (%s)", qType), - query: qStr, - permissions: []influxdb.Permission{ - *itesting.MustNewPermission(influxdb.ReadAction, influxdb.BucketsResourceType, otherOrgID), - *itesting.MustNewPermission(influxdb.WriteAction, influxdb.BucketsResourceType, otherOrgID), - }, - expectedErr: qErr, - }, - // expected PASS - { - name: fmt.Sprintf("write-only bucket (%s)", qType), - query: qStr, - permissions: []influxdb.Permission{ - *itesting.MustNewPermissionAtID(bucketID, influxdb.WriteAction, influxdb.BucketsResourceType, orgID), - }, - expectedErr: nil, - }, - { - name: fmt.Sprintf("write-only all buckets (%s)", qType), - query: qStr, - permissions: []influxdb.Permission{ - *itesting.MustNewPermission(influxdb.WriteAction, influxdb.BucketsResourceType, orgID), - }, - expectedErr: nil, - }, - { - name: fmt.Sprintf("read-write bucket (%s)", qType), - query: qStr, - permissions: []influxdb.Permission{ - *itesting.MustNewPermissionAtID(bucketID, influxdb.ReadAction, influxdb.BucketsResourceType, orgID), - *itesting.MustNewPermissionAtID(bucketID, influxdb.WriteAction, influxdb.BucketsResourceType, orgID), - }, - expectedErr: nil, - }, - { - name: fmt.Sprintf("read-write all buckets (%s)", qType), - query: qStr, - permissions: []influxdb.Permission{ - *itesting.MustNewPermission(influxdb.ReadAction, influxdb.BucketsResourceType, orgID), - *itesting.MustNewPermission(influxdb.WriteAction, influxdb.BucketsResourceType, orgID), - }, - expectedErr: nil, - }, - } - - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // setup a DBRP that we can use - dbrp := mocks.NewMockDBRPMappingService(ctrl) - db := "db0" - - empty := "" - isDefault := true - filt := influxdb.DBRPMappingFilter{OrgID: &orgID, Database: &db, RetentionPolicy: nil, Default: &isDefault} - res := []*influxdb.DBRPMapping{{Database: db, RetentionPolicy: empty, OrganizationID: orgID, BucketID: bucketID, Default: isDefault}} - dbrp.EXPECT(). - FindMany(gomock.Any(), filt). - Return(res, 1, nil) - - qe := DefaultQueryExecutor(t, WithDBRP(dbrp)) - - // assume storage succeeds if we get that far - qe.TSDBStore.DeleteSeriesFn = func(context.Context, string, []influxql.Source, influxql.Expr) error { - return nil - } - qe.TSDBStore.DeleteMeasurementFn = func(context.Context, string, string) error { - return nil - } - - ctx := context.Background() - ctx = icontext.SetAuthorizer(ctx, &influxdb.Authorization{ - ID: orgID, - OrgID: orgID, - Status: influxdb.Active, - Permissions: testCase.permissions, - }) - - results := ReadAllResults(qe.ExecuteQuery(ctx, fmt.Sprintf("%s cpu", testCase.query), "db0", 0, orgID)) - - var exp []*query.Result - if testCase.expectedErr != nil { - exp = []*query.Result{ - { - StatementID: 0, - Err: testCase.expectedErr, - }, - } - } - if !reflect.DeepEqual(results, exp) { - t.Fatalf("unexpected results: exp %s, got %s", spew.Sdump(exp), spew.Sdump(results)) - } - }) - } -} - -func TestQueryExecutor_ExecuteQuery_DeleteSeries(t *testing.T) { - testExecDeleteSeriesOrDropMeasurement(t, "DELETE") -} - -func TestQueryExecutor_ExecuteQuery_DropMeasurement(t *testing.T) { - testExecDeleteSeriesOrDropMeasurement(t, "DROP MEASUREMENT") -} - -// QueryExecutor is a test wrapper for coordinator.QueryExecutor. -type QueryExecutor struct { - *query.Executor - - MetaClient MetaClient - TSDBStore *internal.TSDBStoreMock - DBRP *mocks.MockDBRPMappingService - StatementExecutor *coordinator.StatementExecutor - LogOutput bytes.Buffer -} - -// NewQueryExecutor returns a new instance of Executor. -// This query executor always has a node id of 0. -func NewQueryExecutor(t *testing.T, opts ...optFn) *QueryExecutor { - e := &QueryExecutor{ - Executor: query.NewExecutor(zaptest.NewLogger(t), control.NewControllerMetrics([]string{})), - TSDBStore: &internal.TSDBStoreMock{}, - } - - for _, opt := range opts { - opt(e) - } - - e.TSDBStore.CreateShardFn = func(database, policy string, shardID uint64, enabled bool) error { - return nil - } - - e.TSDBStore.MeasurementNamesFn = func(_ context.Context, auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error) { - return nil, nil - } - - e.TSDBStore.TagValuesFn = func(_ context.Context, _ query.Authorizer, _ []uint64, _ influxql.Expr) ([]tsdb.TagValues, error) { - return nil, nil - } - - e.StatementExecutor = &coordinator.StatementExecutor{ - MetaClient: &e.MetaClient, - TSDBStore: e.TSDBStore, - DBRP: e.DBRP, - ShardMapper: &coordinator.LocalShardMapper{ - MetaClient: &e.MetaClient, - TSDBStore: e.TSDBStore, - DBRP: e.DBRP, - }, - } - e.Executor.StatementExecutor = e.StatementExecutor - - return e -} - -type optFn func(qe *QueryExecutor) - -func WithDBRP(dbrp *mocks.MockDBRPMappingService) optFn { - return func(qe *QueryExecutor) { - qe.DBRP = dbrp - } -} - -// DefaultQueryExecutor returns a Executor with a database (db0) and retention policy (rp0). -func DefaultQueryExecutor(t *testing.T, opts ...optFn) *QueryExecutor { - e := NewQueryExecutor(t, opts...) - e.MetaClient.DatabaseFn = DefaultMetaClientDatabaseFn - return e -} - -// ExecuteQuery parses query and executes against the database. -func (e *QueryExecutor) ExecuteQuery(ctx context.Context, q, database string, chunkSize int, orgID platform.ID) (<-chan *query.Result, *influxql2.Statistics) { - return e.Executor.ExecuteQuery(ctx, MustParseQuery(q), query.ExecutionOptions{ - OrgID: orgID, - Database: database, - ChunkSize: chunkSize, - }) -} - -type MockShard struct { - Measurements []string - FieldDimensionsFn func(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) - FieldKeysByMeasurementFn func(name []byte) []string - CreateIteratorFn func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) - IteratorCostFn func(ctx context.Context, m string, opt query.IteratorOptions) (query.IteratorCost, error) - ExpandSourcesFn func(sources influxql.Sources) (influxql.Sources, error) -} - -func (sh *MockShard) MeasurementsByRegex(re *regexp.Regexp) []string { - names := make([]string, 0, len(sh.Measurements)) - for _, name := range sh.Measurements { - if re.MatchString(name) { - names = append(names, name) - } - } - return names -} - -func (sh *MockShard) FieldKeysByMeasurement(name []byte) []string { - return sh.FieldKeysByMeasurementFn(name) -} - -func (sh *MockShard) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { - return sh.FieldDimensionsFn(measurements) -} - -func (sh *MockShard) MapType(measurement, field string) influxql.DataType { - f, d, err := sh.FieldDimensions([]string{measurement}) - if err != nil { - return influxql.Unknown - } - - if typ, ok := f[field]; ok { - return typ - } else if _, ok := d[field]; ok { - return influxql.Tag - } - return influxql.Unknown -} - -func (sh *MockShard) CreateIterator(ctx context.Context, measurement *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { - return sh.CreateIteratorFn(ctx, measurement, opt) -} - -func (sh *MockShard) IteratorCost(ctx context.Context, measurement string, opt query.IteratorOptions) (query.IteratorCost, error) { - return sh.IteratorCostFn(ctx, measurement, opt) -} - -func (sh *MockShard) ExpandSources(sources influxql.Sources) (influxql.Sources, error) { - return sh.ExpandSourcesFn(sources) -} - -// MustParseQuery parses s into a query. Panic on error. -func MustParseQuery(s string) *influxql.Query { - q, err := influxql.ParseQuery(s) - if err != nil { - panic(err) - } - return q -} - -// ReadAllResults reads all results from c and returns as a slice. -func ReadAllResults(c <-chan *query.Result, _ *influxql2.Statistics) []*query.Result { - var a []*query.Result - for result := range c { - a = append(a, result) - } - return a -} - -// FloatIterator is a represents an iterator that reads from a slice. -type FloatIterator struct { - Points []query.FloatPoint - stats query.IteratorStats -} - -func (itr *FloatIterator) Stats() query.IteratorStats { return itr.stats } -func (itr *FloatIterator) Close() error { return nil } - -// Next returns the next value and shifts it off the beginning of the points slice. -func (itr *FloatIterator) Next() (*query.FloatPoint, error) { - if len(itr.Points) == 0 { - return nil, nil - } - - v := &itr.Points[0] - itr.Points = itr.Points[1:] - return v, nil -} diff --git a/v1/errors.go b/v1/errors.go deleted file mode 100644 index 9bc6b998815..00000000000 --- a/v1/errors.go +++ /dev/null @@ -1,42 +0,0 @@ -package influxdb - -import ( - "errors" - "fmt" - "strings" -) - -// ErrFieldTypeConflict is returned when a new field already exists with a -// different type. -var ErrFieldTypeConflict = errors.New("field type conflict") - -// ErrDatabaseNotFound indicates that a database operation failed on the -// specified database because the specified database does not exist. -func ErrDatabaseNotFound(name string) error { return fmt.Errorf("database not found: %s", name) } - -// ErrRetentionPolicyNotFound indicates that the named retention policy could -// not be found in the database. -func ErrRetentionPolicyNotFound(name string) error { - return fmt.Errorf("retention policy not found: %s", name) -} - -// IsAuthorizationError indicates whether an error is due to an authorization failure -func IsAuthorizationError(err error) bool { - e, ok := err.(interface { - AuthorizationFailed() bool - }) - return ok && e.AuthorizationFailed() -} - -// IsClientError indicates whether an error is a known client error. -func IsClientError(err error) bool { - if err == nil { - return false - } - - if strings.HasPrefix(err.Error(), ErrFieldTypeConflict.Error()) { - return true - } - - return false -} diff --git a/v1/node.go b/v1/node.go deleted file mode 100644 index b7c2e0daa93..00000000000 --- a/v1/node.go +++ /dev/null @@ -1,120 +0,0 @@ -package influxdb - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - "strconv" -) - -const ( - nodeFile = "node.json" - oldNodeFile = "id" - peersFilename = "peers.json" -) - -type Node struct { - path string - ID uint64 -} - -// LoadNode will load the node information from disk if present -func LoadNode(path string) (*Node, error) { - // Always check to see if we are upgrading first - if err := upgradeNodeFile(path); err != nil { - return nil, err - } - - n := &Node{ - path: path, - } - - f, err := os.Open(filepath.Join(path, nodeFile)) - if err != nil { - return nil, err - } - defer f.Close() - - if err := json.NewDecoder(f).Decode(n); err != nil { - return nil, err - } - - return n, nil -} - -// NewNode will return a new node -func NewNode(path string) *Node { - return &Node{ - path: path, - } -} - -// Save will save the node file to disk and replace the existing one if present -func (n *Node) Save() error { - file := filepath.Join(n.path, nodeFile) - tmpFile := file + "tmp" - - f, err := os.Create(tmpFile) - if err != nil { - return err - } - - if err = json.NewEncoder(f).Encode(n); err != nil { - f.Close() - return err - } - - if err = f.Close(); nil != err { - return err - } - - return os.Rename(tmpFile, file) -} - -func upgradeNodeFile(path string) error { - oldFile := filepath.Join(path, oldNodeFile) - b, err := os.ReadFile(oldFile) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - // We shouldn't have an empty ID file, but if we do, ignore it - if len(b) == 0 { - return nil - } - - peers := []string{} - pb, err := os.ReadFile(filepath.Join(path, peersFilename)) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - err = json.Unmarshal(pb, &peers) - if err != nil { - return err - } - - if len(peers) > 1 { - return fmt.Errorf("to upgrade a cluster, please contact support at influxdata") - } - - n := &Node{ - path: path, - } - if n.ID, err = strconv.ParseUint(string(b), 10, 64); err != nil { - return err - } - if err := n.Save(); err != nil { - return err - } - if err := os.Remove(oldFile); err != nil { - return err - } - return nil -} diff --git a/v1/services/meta/client.go b/v1/services/meta/client.go deleted file mode 100644 index 8af05ae90df..00000000000 --- a/v1/services/meta/client.go +++ /dev/null @@ -1,1045 +0,0 @@ -// Package meta provides control over meta data for InfluxDB, -// such as controlling databases, retention policies, users, etc. -package meta - -import ( - "bytes" - "context" - crand "crypto/rand" - "crypto/sha256" - "errors" - "io" - "math/rand" - "net/http" - "sort" - "sync" - "time" - - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/logger" - influxdb "github.com/influxdata/influxdb/v2/v1" - "github.com/influxdata/influxql" - "go.uber.org/zap" - "golang.org/x/crypto/bcrypt" -) - -const ( - // SaltBytes is the number of bytes used for salts. - SaltBytes = 32 - - // Filename specifies the default name of the metadata file. - Filename = "meta.db" - - // ShardGroupDeletedExpiration is the amount of time before a shard group info will be removed from cached - // data after it has been marked deleted (2 weeks). - ShardGroupDeletedExpiration = -2 * 7 * 24 * time.Hour -) - -// Name of the bucket to store TSM metadata -var ( - BucketName = []byte("v1_tsm1_metadata") - metadataKey = []byte(Filename) -) - -var ( - // ErrServiceUnavailable is returned when the meta service is unavailable. - ErrServiceUnavailable = errors.New("meta service unavailable") - - // ErrService is returned when the meta service returns an error. - ErrService = errors.New("meta service error") -) - -// Client is used to execute commands on and read data from -// a meta service cluster. -type Client struct { - logger *zap.Logger - - mu sync.RWMutex - closing chan struct{} - changed chan struct{} - cacheData *Data - - // Authentication cache. - authCache map[string]authUser - - store kv.Store - - retentionAutoCreate bool -} - -type authUser struct { - bhash string - salt []byte - hash []byte -} - -// NewClient returns a new *Client. -func NewClient(config *Config, store kv.Store) *Client { - return &Client{ - cacheData: &Data{ - ClusterID: uint64(rand.Int63()), - Index: 1, - }, - closing: make(chan struct{}), - changed: make(chan struct{}), - logger: zap.NewNop(), - authCache: make(map[string]authUser), - store: store, - retentionAutoCreate: config.RetentionAutoCreate, - } -} - -// Open a connection to a meta service cluster. -func (c *Client) Open() error { - c.mu.Lock() - defer c.mu.Unlock() - - // Try to load from disk - if err := c.Load(); err != nil { - return err - } - - // If this is a brand new instance, persist to disk immediately. - if c.cacheData.Index == 1 { - if err := snapshot(c.store, c.cacheData); err != nil { - return err - } - } - - return nil -} - -// Close the meta service cluster connection. -func (c *Client) Close() error { - c.mu.Lock() - defer c.mu.Unlock() - - if t, ok := http.DefaultTransport.(*http.Transport); ok { - t.CloseIdleConnections() - } - - select { - case <-c.closing: - return nil - default: - close(c.closing) - } - - return nil -} - -// Database returns info for the requested database. -func (c *Client) Database(name string) *DatabaseInfo { - c.mu.RLock() - defer c.mu.RUnlock() - - for _, d := range c.cacheData.Databases { - if d.Name == name { - return &d - } - } - - return nil -} - -// Databases returns a list of all database infos. -func (c *Client) Databases() []DatabaseInfo { - c.mu.RLock() - defer c.mu.RUnlock() - - dbs := c.cacheData.Databases - if dbs == nil { - return []DatabaseInfo{} - } - return dbs -} - -// CreateDatabase creates a database or returns it if it already exists. -func (c *Client) CreateDatabase(name string) (*DatabaseInfo, error) { - c.mu.Lock() - defer c.mu.Unlock() - - data := c.cacheData.Clone() - - if db := data.Database(name); db != nil { - return db, nil - } - - if err := data.CreateDatabase(name); err != nil { - return nil, err - } - - // create default retention policy - if c.retentionAutoCreate { - rpi := DefaultRetentionPolicyInfo() - if err := data.CreateRetentionPolicy(name, rpi, true); err != nil { - return nil, err - } - } - - db := data.Database(name) - - if err := c.commit(data); err != nil { - return nil, err - } - - return db, nil -} - -// CreateDatabaseWithRetentionPolicy creates a database with the specified -// retention policy. -// -// When creating a database with a retention policy, the retention policy will -// always be set to default. Therefore if the caller provides a retention policy -// that already exists on the database, but that retention policy is not the -// default one, an error will be returned. -// -// This call is only idempotent when the caller provides the exact same -// retention policy, and that retention policy is already the default for the -// database. -func (c *Client) CreateDatabaseWithRetentionPolicy(name string, spec *RetentionPolicySpec) (*DatabaseInfo, error) { - if spec == nil { - return nil, errors.New("CreateDatabaseWithRetentionPolicy called with nil spec") - } - - c.mu.Lock() - defer c.mu.Unlock() - - data := c.cacheData.Clone() - - if spec.Duration != nil && *spec.Duration < MinRetentionPolicyDuration && *spec.Duration != 0 { - return nil, ErrRetentionPolicyDurationTooLow - } - - db := data.Database(name) - if db == nil { - if err := data.CreateDatabase(name); err != nil { - return nil, err - } - db = data.Database(name) - } - - // No existing retention policies, so we can create the provided policy as - // the new default policy. - rpi := spec.NewRetentionPolicyInfo() - if len(db.RetentionPolicies) == 0 { - if err := data.CreateRetentionPolicy(name, rpi, true); err != nil { - return nil, err - } - } else if !spec.Matches(db.RetentionPolicy(rpi.Name)) { - // In this case we already have a retention policy on the database and - // the provided retention policy does not match it. Therefore, this call - // is not idempotent and we need to return an error. - return nil, ErrRetentionPolicyConflict - } - - // If a non-default retention policy was passed in that already exists then - // it's an error regardless of if the exact same retention policy is - // provided. CREATE DATABASE WITH RETENTION POLICY should only be used to - // create DEFAULT retention policies. - if db.DefaultRetentionPolicy != rpi.Name { - return nil, ErrRetentionPolicyConflict - } - - // Commit the changes. - if err := c.commit(data); err != nil { - return nil, err - } - - // Refresh the database info. - db = data.Database(name) - - return db, nil -} - -// DropDatabase deletes a database. -// -// Returns nil if no database exists -func (c *Client) DropDatabase(name string) error { - c.mu.Lock() - defer c.mu.Unlock() - - data := c.cacheData.Clone() - - if err := data.DropDatabase(name); err != nil { - return err - } - - if err := c.commit(data); err != nil { - return err - } - - return nil -} - -// CreateRetentionPolicy creates a retention policy on the specified database. -func (c *Client) CreateRetentionPolicy(database string, spec *RetentionPolicySpec, makeDefault bool) (*RetentionPolicyInfo, error) { - c.mu.Lock() - defer c.mu.Unlock() - - data := c.cacheData.Clone() - - if spec.Duration != nil && *spec.Duration < MinRetentionPolicyDuration && *spec.Duration != 0 { - return nil, ErrRetentionPolicyDurationTooLow - } - - rp := spec.NewRetentionPolicyInfo() - if err := data.CreateRetentionPolicy(database, rp, makeDefault); err != nil { - return nil, err - } - - if err := c.commit(data); err != nil { - return nil, err - } - - return rp, nil -} - -// RetentionPolicy returns the requested retention policy info. -func (c *Client) RetentionPolicy(database, name string) (rpi *RetentionPolicyInfo, err error) { - c.mu.RLock() - defer c.mu.RUnlock() - - db := c.cacheData.Database(database) - if db == nil { - return nil, influxdb.ErrDatabaseNotFound(database) - } - - return db.RetentionPolicy(name), nil -} - -// DropRetentionPolicy drops a retention policy from a database. -func (c *Client) DropRetentionPolicy(database, name string) error { - c.mu.Lock() - defer c.mu.Unlock() - - data := c.cacheData.Clone() - - if err := data.DropRetentionPolicy(database, name); err != nil { - return err - } - - if err := c.commit(data); err != nil { - return err - } - - return nil -} - -// UpdateRetentionPolicy updates a retention policy. -func (c *Client) UpdateRetentionPolicy(database, name string, rpu *RetentionPolicyUpdate, makeDefault bool) error { - c.mu.Lock() - defer c.mu.Unlock() - - data := c.cacheData.Clone() - - if err := data.UpdateRetentionPolicy(database, name, rpu, makeDefault); err != nil { - return err - } - - if err := c.commit(data); err != nil { - return err - } - - return nil -} - -// Users returns a slice of UserInfo representing the currently known users. -func (c *Client) Users() []UserInfo { - c.mu.RLock() - defer c.mu.RUnlock() - - users := c.cacheData.Users - - if users == nil { - return []UserInfo{} - } - return users -} - -// User returns the user with the given name, or ErrUserNotFound. -func (c *Client) User(name string) (User, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - for _, u := range c.cacheData.Users { - if u.Name == name { - return &u, nil - } - } - - return nil, ErrUserNotFound -} - -// bcryptCost is the cost associated with generating password with bcrypt. -// This setting is lowered during testing to improve test suite performance. -var bcryptCost = bcrypt.DefaultCost - -// hashWithSalt returns a salted hash of password using salt. -func (c *Client) hashWithSalt(salt []byte, password string) []byte { - hasher := sha256.New() - hasher.Write(salt) - hasher.Write([]byte(password)) - return hasher.Sum(nil) -} - -// saltedHash returns a salt and salted hash of password. -func (c *Client) saltedHash(password string) (salt, hash []byte, err error) { - salt = make([]byte, SaltBytes) - if _, err := io.ReadFull(crand.Reader, salt); err != nil { - return nil, nil, err - } - - return salt, c.hashWithSalt(salt, password), nil -} - -// CreateUser adds a user with the given name and password and admin status. -func (c *Client) CreateUser(name, password string, admin bool) (User, error) { - c.mu.Lock() - defer c.mu.Unlock() - - data := c.cacheData.Clone() - - // See if the user already exists. - if u := data.user(name); u != nil { - if err := bcrypt.CompareHashAndPassword([]byte(u.Hash), []byte(password)); err != nil || u.Admin != admin { - return nil, ErrUserExists - } - return u, nil - } - - // Hash the password before serializing it. - hash, err := bcrypt.GenerateFromPassword([]byte(password), bcryptCost) - if err != nil { - return nil, err - } - - if err := data.CreateUser(name, string(hash), admin); err != nil { - return nil, err - } - - u := data.user(name) - - if err := c.commit(data); err != nil { - return nil, err - } - - return u, nil -} - -// UpdateUser updates the password of an existing user. -func (c *Client) UpdateUser(name, password string) error { - c.mu.Lock() - defer c.mu.Unlock() - - data := c.cacheData.Clone() - - // Hash the password before serializing it. - hash, err := bcrypt.GenerateFromPassword([]byte(password), bcryptCost) - if err != nil { - return err - } - - if err := data.UpdateUser(name, string(hash)); err != nil { - return err - } - - delete(c.authCache, name) - - return c.commit(data) -} - -// DropUser removes the user with the given name. -func (c *Client) DropUser(name string) error { - c.mu.Lock() - defer c.mu.Unlock() - - data := c.cacheData.Clone() - - if err := data.DropUser(name); err != nil { - return err - } - - if err := c.commit(data); err != nil { - return err - } - - return nil -} - -// SetPrivilege sets a privilege for the given user on the given database. -func (c *Client) SetPrivilege(username, database string, p influxql.Privilege) error { - c.mu.Lock() - defer c.mu.Unlock() - - data := c.cacheData.Clone() - - if err := data.SetPrivilege(username, database, p); err != nil { - return err - } - - if err := c.commit(data); err != nil { - return err - } - - return nil -} - -// SetAdminPrivilege sets or unsets admin privilege to the given username. -func (c *Client) SetAdminPrivilege(username string, admin bool) error { - c.mu.Lock() - defer c.mu.Unlock() - - data := c.cacheData.Clone() - - if err := data.SetAdminPrivilege(username, admin); err != nil { - return err - } - - if err := c.commit(data); err != nil { - return err - } - - return nil -} - -// UserPrivileges returns the privileges for a user mapped by database name. -func (c *Client) UserPrivileges(username string) (map[string]influxql.Privilege, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - p, err := c.cacheData.UserPrivileges(username) - if err != nil { - return nil, err - } - return p, nil -} - -// UserPrivilege returns the privilege for the given user on the given database. -func (c *Client) UserPrivilege(username, database string) (*influxql.Privilege, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - p, err := c.cacheData.UserPrivilege(username, database) - if err != nil { - return nil, err - } - return p, nil -} - -// AdminUserExists returns true if any user has admin privilege. -func (c *Client) AdminUserExists() bool { - c.mu.RLock() - defer c.mu.RUnlock() - return c.cacheData.AdminUserExists() -} - -// Authenticate returns a UserInfo if the username and password match an existing entry. -func (c *Client) Authenticate(username, password string) (User, error) { - // Find user. - c.mu.RLock() - userInfo := c.cacheData.user(username) - c.mu.RUnlock() - if userInfo == nil { - return nil, ErrUserNotFound - } - - // Check the local auth cache first. - c.mu.RLock() - au, ok := c.authCache[username] - c.mu.RUnlock() - if ok { - // verify the password using the cached salt and hash - if bytes.Equal(c.hashWithSalt(au.salt, password), au.hash) { - return userInfo, nil - } - - // fall through to requiring a full bcrypt hash for invalid passwords - } - - // Compare password with user hash. - if err := bcrypt.CompareHashAndPassword([]byte(userInfo.Hash), []byte(password)); err != nil { - return nil, ErrAuthenticate - } - - // generate a salt and hash of the password for the cache - salt, hashed, err := c.saltedHash(password) - if err != nil { - return nil, err - } - c.mu.Lock() - c.authCache[username] = authUser{salt: salt, hash: hashed, bhash: userInfo.Hash} - c.mu.Unlock() - return userInfo, nil -} - -// UserCount returns the number of users stored. -func (c *Client) UserCount() int { - c.mu.RLock() - defer c.mu.RUnlock() - - return len(c.cacheData.Users) -} - -// ShardIDs returns a list of all shard ids. -func (c *Client) ShardIDs() []uint64 { - c.mu.RLock() - - var a []uint64 - for _, dbi := range c.cacheData.Databases { - for _, rpi := range dbi.RetentionPolicies { - for _, sgi := range rpi.ShardGroups { - for _, si := range sgi.Shards { - a = append(a, si.ID) - } - } - } - } - c.mu.RUnlock() - sort.Sort(uint64Slice(a)) - return a -} - -// ShardGroupsByTimeRange returns a list of all shard groups on a database and policy that may contain data -// for the specified time range. Shard groups are sorted by start time. -func (c *Client) ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []ShardGroupInfo, err error) { - c.mu.RLock() - defer c.mu.RUnlock() - - // Find retention policy. - rpi, err := c.cacheData.RetentionPolicy(database, policy) - if err != nil { - return nil, err - } else if rpi == nil { - return nil, influxdb.ErrRetentionPolicyNotFound(policy) - } - groups := make([]ShardGroupInfo, 0, len(rpi.ShardGroups)) - for _, g := range rpi.ShardGroups { - if g.Deleted() || !g.Overlaps(min, max) { - continue - } - groups = append(groups, g) - } - return groups, nil -} - -// ShardsByTimeRange returns a slice of shards that may contain data in the time range. -func (c *Client) ShardsByTimeRange(sources influxql.Sources, tmin, tmax time.Time) (a []ShardInfo, err error) { - m := make(map[*ShardInfo]struct{}) - for _, mm := range sources.Measurements() { - groups, err := c.ShardGroupsByTimeRange(mm.Database, mm.RetentionPolicy, tmin, tmax) - if err != nil { - return nil, err - } - for _, g := range groups { - for i := range g.Shards { - m[&g.Shards[i]] = struct{}{} - } - } - } - - a = make([]ShardInfo, 0, len(m)) - for sh := range m { - a = append(a, *sh) - } - - return a, nil -} - -// DropShard deletes a shard by ID. -func (c *Client) DropShard(id uint64) error { - c.mu.Lock() - defer c.mu.Unlock() - - data := c.cacheData.Clone() - data.DropShard(id) - return c.commit(data) -} - -// TruncateShardGroups truncates any shard group that could contain timestamps beyond t. -func (c *Client) TruncateShardGroups(t time.Time) error { - c.mu.Lock() - defer c.mu.Unlock() - - data := c.cacheData.Clone() - data.TruncateShardGroups(t) - return c.commit(data) -} - -// PruneShardGroups remove deleted shard groups from the data store. -func (c *Client) PruneShardGroups() error { - var changed bool - expiration := time.Now().Add(ShardGroupDeletedExpiration) - c.mu.Lock() - defer c.mu.Unlock() - data := c.cacheData.Clone() - for i, d := range data.Databases { - for j, rp := range d.RetentionPolicies { - var remainingShardGroups []ShardGroupInfo - for _, sgi := range rp.ShardGroups { - if sgi.DeletedAt.IsZero() || !expiration.After(sgi.DeletedAt) { - remainingShardGroups = append(remainingShardGroups, sgi) - continue - } - changed = true - } - data.Databases[i].RetentionPolicies[j].ShardGroups = remainingShardGroups - } - } - if changed { - return c.commit(data) - } - return nil -} - -// CreateShardGroupWithShards creates a shard group on a database and policy for a given timestamp and assign shards to the shard group -func (c *Client) CreateShardGroupWithShards(database, policy string, timestamp time.Time, shards []ShardInfo) (*ShardGroupInfo, error) { - // Check under a read-lock - c.mu.RLock() - if sg, _ := c.cacheData.ShardGroupByTimestamp(database, policy, timestamp); sg != nil { - c.mu.RUnlock() - return sg, nil - } - c.mu.RUnlock() - - c.mu.Lock() - defer c.mu.Unlock() - - // Check again under the write lock - data := c.cacheData.Clone() - if sg, _ := data.ShardGroupByTimestamp(database, policy, timestamp); sg != nil { - return sg, nil - } - - sgi, err := createShardGroup(data, database, policy, timestamp, shards...) - if err != nil { - return nil, err - } - - if err := c.commit(data); err != nil { - return nil, err - } - - return sgi, nil -} - -func (c *Client) CreateShardGroup(database, policy string, timestamp time.Time) (*ShardGroupInfo, error) { - return c.CreateShardGroupWithShards(database, policy, timestamp, nil) -} - -func createShardGroup(data *Data, database, policy string, timestamp time.Time, shards ...ShardInfo) (*ShardGroupInfo, error) { - // It is the responsibility of the caller to check if it exists before calling this method. - if sg, _ := data.ShardGroupByTimestamp(database, policy, timestamp); sg != nil { - return nil, ErrShardGroupExists - } - - if err := data.CreateShardGroup(database, policy, timestamp, shards...); err != nil { - return nil, err - } - - rpi, err := data.RetentionPolicy(database, policy) - if err != nil { - return nil, err - } else if rpi == nil { - return nil, errors.New("retention policy deleted after shard group created") - } - - sgi := rpi.ShardGroupByTimestamp(timestamp) - return sgi, nil -} - -// DeleteShardGroup removes a shard group from a database and retention policy by id. -func (c *Client) DeleteShardGroup(database, policy string, id uint64) error { - c.mu.Lock() - defer c.mu.Unlock() - - data := c.cacheData.Clone() - - if err := data.DeleteShardGroup(database, policy, id); err != nil { - return err - } - - if err := c.commit(data); err != nil { - return err - } - - return nil -} - -// PrecreateShardGroups creates shard groups whose endtime is before the 'to' time passed in, but -// is yet to expire before 'from'. This is to avoid the need for these shards to be created when data -// for the corresponding time range arrives. Shard creation involves Raft consensus, and precreation -// avoids taking the hit at write-time. -func (c *Client) PrecreateShardGroups(from, to time.Time) error { - c.mu.Lock() - defer c.mu.Unlock() - data := c.cacheData.Clone() - var changed bool - - for _, di := range data.Databases { - for _, rp := range di.RetentionPolicies { - if len(rp.ShardGroups) == 0 { - // No data was ever written to this group, or all groups have been deleted. - continue - } - g := rp.ShardGroups[len(rp.ShardGroups)-1] // Get the last group in time. - if !g.Deleted() && g.EndTime.Before(to) && g.EndTime.After(from) { - // Group is not deleted, will end before the future time, but is still yet to expire. - // This last check is important, so the system doesn't create shards groups wholly - // in the past. - - // Create successive shard group. - nextShardGroupTime := g.EndTime.Add(1 * time.Nanosecond) - // if it already exists, continue - if sg, _ := data.ShardGroupByTimestamp(di.Name, rp.Name, nextShardGroupTime); sg != nil { - c.logger.Info("Shard group already exists", - logger.ShardGroup(sg.ID), - logger.Database(di.Name), - logger.RetentionPolicy(rp.Name)) - continue - } - newGroup, err := createShardGroup(data, di.Name, rp.Name, nextShardGroupTime) - if err != nil { - c.logger.Info("Failed to precreate successive shard group", - zap.Uint64("group_id", g.ID), zap.Error(err)) - continue - } - changed = true - c.logger.Info("New shard group successfully precreated", - logger.ShardGroup(newGroup.ID), - logger.Database(di.Name), - logger.RetentionPolicy(rp.Name)) - } - } - } - - if changed { - if err := c.commit(data); err != nil { - return err - } - } - - return nil -} - -// ShardOwner returns the owning shard group info for a specific shard. -func (c *Client) ShardOwner(shardID uint64) (database, policy string, sgi *ShardGroupInfo) { - c.mu.RLock() - defer c.mu.RUnlock() - - for _, dbi := range c.cacheData.Databases { - for _, rpi := range dbi.RetentionPolicies { - for _, g := range rpi.ShardGroups { - if g.Deleted() { - continue - } - - for _, sh := range g.Shards { - if sh.ID == shardID { - database = dbi.Name - policy = rpi.Name - sgi = &g - return - } - } - } - } - } - return -} - -// CreateContinuousQuery saves a continuous query with the given name for the given database. -func (c *Client) CreateContinuousQuery(database, name, query string) error { - c.mu.Lock() - defer c.mu.Unlock() - - data := c.cacheData.Clone() - - if err := data.CreateContinuousQuery(database, name, query); err != nil { - return err - } - - if err := c.commit(data); err != nil { - return err - } - - return nil -} - -// DropContinuousQuery removes the continuous query with the given name on the given database. -func (c *Client) DropContinuousQuery(database, name string) error { - c.mu.Lock() - defer c.mu.Unlock() - - data := c.cacheData.Clone() - - if err := data.DropContinuousQuery(database, name); err != nil { - return err - } - - if err := c.commit(data); err != nil { - return err - } - - return nil -} - -// CreateSubscription creates a subscription against the given database and retention policy. -func (c *Client) CreateSubscription(database, rp, name, mode string, destinations []string) error { - c.mu.Lock() - defer c.mu.Unlock() - - data := c.cacheData.Clone() - - if err := data.CreateSubscription(database, rp, name, mode, destinations); err != nil { - return err - } - - if err := c.commit(data); err != nil { - return err - } - - return nil -} - -// DropSubscription removes the named subscription from the given database and retention policy. -func (c *Client) DropSubscription(database, rp, name string) error { - c.mu.Lock() - defer c.mu.Unlock() - - data := c.cacheData.Clone() - - if err := data.DropSubscription(database, rp, name); err != nil { - return err - } - - if err := c.commit(data); err != nil { - return err - } - - return nil -} - -// SetData overwrites the underlying data in the meta store. -func (c *Client) SetData(data *Data) error { - c.mu.Lock() - - d := data.Clone() - - if err := c.commit(d); err != nil { - return err - } - - c.mu.Unlock() - - return nil -} - -// Data returns a clone of the underlying data in the meta store. -func (c *Client) Data() Data { - c.mu.RLock() - defer c.mu.RUnlock() - d := c.cacheData.Clone() - return *d -} - -// WaitForDataChanged returns a channel that will get closed when -// the metastore data has changed. -func (c *Client) WaitForDataChanged() chan struct{} { - c.mu.RLock() - defer c.mu.RUnlock() - return c.changed -} - -// commit writes data to the underlying store. -// This method assumes c's mutex is already locked. -func (c *Client) commit(data *Data) error { - data.Index++ - - // try to write to disk before updating in memory - if err := snapshot(c.store, data); err != nil { - return err - } - - // update in memory - c.cacheData = data - - // close channels to signal changes - close(c.changed) - c.changed = make(chan struct{}) - - return nil -} - -// MarshalBinary returns a binary representation of the underlying data. -func (c *Client) MarshalBinary() ([]byte, error) { - c.mu.RLock() - defer c.mu.RUnlock() - return c.cacheData.MarshalBinary() -} - -// WithLogger sets the logger for the client. -func (c *Client) WithLogger(log *zap.Logger) { - c.mu.Lock() - defer c.mu.Unlock() - c.logger = log.With(zap.String("service", "metaclient")) -} - -// snapshot saves the current meta data to disk. -func snapshot(store kv.Store, data *Data) (err error) { - var d []byte - if d, err = data.MarshalBinary(); err != nil { - return err - } - - return store.Update(context.TODO(), func(tx kv.Tx) error { - b, err := tx.Bucket(BucketName) - if err != nil { - return err - } - return b.Put(metadataKey, d) - }) -} - -// Load loads the current meta data from disk. -func (c *Client) Load() error { - return c.store.View(context.TODO(), func(tx kv.Tx) error { - b, err := tx.Bucket(BucketName) - if err != nil { - return err - } - - if data, err := b.Get(metadataKey); errors.Is(err, kv.ErrKeyNotFound) { - return nil - } else if err != nil { - return err - } else { - return c.cacheData.UnmarshalBinary(data) - } - }) -} - -func (c *Client) RLock() { - c.store.RLock() -} - -func (c *Client) RUnlock() { - c.store.RUnlock() -} - -func (c *Client) Backup(ctx context.Context, w io.Writer) error { - return c.store.Backup(ctx, w) -} - -func (c *Client) Restore(ctx context.Context, r io.Reader) error { - if err := c.store.Restore(ctx, r); err != nil { - return err - } - return c.Load() -} - -type uint64Slice []uint64 - -func (a uint64Slice) Len() int { return len(a) } -func (a uint64Slice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a uint64Slice) Less(i, j int) bool { return a[i] < a[j] } diff --git a/v1/services/meta/client_test.go b/v1/services/meta/client_test.go deleted file mode 100644 index 0a949356c19..00000000000 --- a/v1/services/meta/client_test.go +++ /dev/null @@ -1,1180 +0,0 @@ -package meta_test - -import ( - "context" - "reflect" - "strings" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/inmem" - influxdb "github.com/influxdata/influxdb/v2/v1" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/influxdata/influxql" -) - -func TestMetaClient_CreateDatabaseOnly(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - if db, err := c.CreateDatabase("db0"); err != nil { - t.Fatal(err) - } else if db.Name != "db0" { - t.Fatalf("database name mismatch. exp: db0, got %s", db.Name) - } - - db := c.Database("db0") - if db == nil { - t.Fatal("database not found") - } else if db.Name != "db0" { - t.Fatalf("db name wrong: %s", db.Name) - } - - // Make sure a default retention policy was created. - rp, err := c.RetentionPolicy("db0", "autogen") - if err != nil { - t.Fatal(err) - } else if rp == nil { - t.Fatal("failed to create rp") - } else if exp, got := "autogen", rp.Name; exp != got { - t.Fatalf("rp name wrong:\n\texp: %s\n\tgot: %s", exp, got) - } -} - -func TestMetaClient_CreateDatabaseIfNotExists(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - if _, err := c.CreateDatabase("db0"); err != nil { - t.Fatal(err) - } - - db := c.Database("db0") - if db == nil { - t.Fatal("database not found") - } else if db.Name != "db0" { - t.Fatalf("db name wrong: %s", db.Name) - } - - if _, err := c.CreateDatabase("db0"); err != nil { - t.Fatal(err) - } -} - -func TestMetaClient_CreateDatabaseWithRetentionPolicy(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - // Calling CreateDatabaseWithRetentionPolicy with a nil spec should return - // an error - if _, err := c.CreateDatabaseWithRetentionPolicy("db0", nil); err == nil { - t.Fatal("expected error") - } - - duration := 1 * time.Hour - replicaN := 1 - spec := meta.RetentionPolicySpec{ - Name: "rp0", - Duration: &duration, - ReplicaN: &replicaN, - ShardGroupDuration: 60 * time.Minute, - } - if _, err := c.CreateDatabaseWithRetentionPolicy("db0", &spec); err != nil { - t.Fatal(err) - } - - db := c.Database("db0") - if db == nil { - t.Fatal("database not found") - } else if db.Name != "db0" { - t.Fatalf("db name wrong: %s", db.Name) - } - - rp := db.RetentionPolicy("rp0") - if rp.Name != "rp0" { - t.Fatalf("rp name wrong: %s", rp.Name) - } else if rp.Duration != time.Hour { - t.Fatalf("rp duration wrong: %v", rp.Duration) - } else if rp.ReplicaN != 1 { - t.Fatalf("rp replication wrong: %d", rp.ReplicaN) - } else if rp.ShardGroupDuration != 60*time.Minute { - t.Fatalf("rp shard duration wrong: %v", rp.ShardGroupDuration) - } - - // Recreating the exact same database with retention policy is not - // an error. - if _, err := c.CreateDatabaseWithRetentionPolicy("db0", &spec); err != nil { - t.Fatal(err) - } - - // If create database is used by itself, no error should be returned and - // the default retention policy should not be changed. - if dbi, err := c.CreateDatabase("db0"); err != nil { - t.Fatalf("got %v, but expected %v", err, nil) - } else if dbi.DefaultRetentionPolicy != "rp0" { - t.Fatalf("got %v, but expected %v", dbi.DefaultRetentionPolicy, "rp0") - } else if got, exp := len(dbi.RetentionPolicies), 1; got != exp { - // Ensure no additional retention policies were created. - t.Fatalf("got %v, but expected %v", got, exp) - } -} - -func TestMetaClient_CreateDatabaseWithRetentionPolicy_Conflict_Fields(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - duration := 1 * time.Hour - replicaN := 1 - spec := meta.RetentionPolicySpec{ - Name: "rp0", - Duration: &duration, - ReplicaN: &replicaN, - ShardGroupDuration: 60 * time.Minute, - } - if _, err := c.CreateDatabaseWithRetentionPolicy("db0", &spec); err != nil { - t.Fatal(err) - } - - // If the rp's name is different, and error should be returned. - spec2 := spec - spec2.Name = spec.Name + "1" - if _, err := c.CreateDatabaseWithRetentionPolicy("db0", &spec2); err != meta.ErrRetentionPolicyConflict { - t.Fatalf("got %v, but expected %v", err, meta.ErrRetentionPolicyConflict) - } - - // If the rp's duration is different, an error should be returned. - spec2 = spec - duration2 := *spec.Duration + time.Minute - spec2.Duration = &duration2 - if _, err := c.CreateDatabaseWithRetentionPolicy("db0", &spec2); err != meta.ErrRetentionPolicyConflict { - t.Fatalf("got %v, but expected %v", err, meta.ErrRetentionPolicyConflict) - } - - // If the rp's replica is different, an error should be returned. - spec2 = spec - replica2 := *spec.ReplicaN + 1 - spec2.ReplicaN = &replica2 - if _, err := c.CreateDatabaseWithRetentionPolicy("db0", &spec2); err != meta.ErrRetentionPolicyConflict { - t.Fatalf("got %v, but expected %v", err, meta.ErrRetentionPolicyConflict) - } - - // If the rp's shard group duration is different, an error should be returned. - spec2 = spec - spec2.ShardGroupDuration = spec.ShardGroupDuration + time.Minute - if _, err := c.CreateDatabaseWithRetentionPolicy("db0", &spec2); err != meta.ErrRetentionPolicyConflict { - t.Fatalf("got %v, but expected %v", err, meta.ErrRetentionPolicyConflict) - } -} - -func TestMetaClient_CreateDatabaseWithRetentionPolicy_Conflict_NonDefault(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - duration := 1 * time.Hour - replicaN := 1 - spec := meta.RetentionPolicySpec{ - Name: "rp0", - Duration: &duration, - ReplicaN: &replicaN, - ShardGroupDuration: 60 * time.Minute, - } - - // Create a default retention policy. - if _, err := c.CreateDatabaseWithRetentionPolicy("db0", &spec); err != nil { - t.Fatal(err) - } - - // Let's create a non-default retention policy. - spec2 := spec - spec2.Name = "rp1" - if _, err := c.CreateRetentionPolicy("db0", &spec2, false); err != nil { - t.Fatal(err) - } - - // If we try to create a database with the non-default retention policy then - // it's an error. - if _, err := c.CreateDatabaseWithRetentionPolicy("db0", &spec2); err != meta.ErrRetentionPolicyConflict { - t.Fatalf("got %v, but expected %v", err, meta.ErrRetentionPolicyConflict) - } -} - -func TestMetaClient_Databases(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - // Create two databases. - db, err := c.CreateDatabase("db0") - if err != nil { - t.Fatal(err) - } else if db == nil { - t.Fatal("database not found") - } else if db.Name != "db0" { - t.Fatalf("db name wrong: %s", db.Name) - } - - db, err = c.CreateDatabase("db1") - if err != nil { - t.Fatal(err) - } else if db.Name != "db1" { - t.Fatalf("db name wrong: %s", db.Name) - } - - dbs := c.Databases() - if err != nil { - t.Fatal(err) - } - if len(dbs) != 2 { - t.Fatalf("expected 2 databases but got %d", len(dbs)) - } else if dbs[0].Name != "db0" { - t.Fatalf("db name wrong: %s", dbs[0].Name) - } else if dbs[1].Name != "db1" { - t.Fatalf("db name wrong: %s", dbs[1].Name) - } -} - -func TestMetaClient_DropDatabase(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - if _, err := c.CreateDatabase("db0"); err != nil { - t.Fatal(err) - } - - db := c.Database("db0") - if db == nil { - t.Fatalf("database not found") - } else if db.Name != "db0" { - t.Fatalf("db name wrong: %s", db.Name) - } - - if err := c.DropDatabase("db0"); err != nil { - t.Fatal(err) - } - - if db = c.Database("db0"); db != nil { - t.Fatalf("expected database to not return: %v", db) - } - - // Dropping a database that does not exist is not an error. - if err := c.DropDatabase("db foo"); err != nil { - t.Fatalf("got %v error, but expected no error", err) - } -} - -func TestMetaClient_CreateRetentionPolicy(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - if _, err := c.CreateDatabase("db0"); err != nil { - t.Fatal(err) - } - - db := c.Database("db0") - if db == nil { - t.Fatal("database not found") - } else if db.Name != "db0" { - t.Fatalf("db name wrong: %s", db.Name) - } - - rp0 := meta.RetentionPolicyInfo{ - Name: "rp0", - ReplicaN: 1, - Duration: 2 * time.Hour, - ShardGroupDuration: 2 * time.Hour, - } - - if _, err := c.CreateRetentionPolicy("db0", &meta.RetentionPolicySpec{ - Name: rp0.Name, - ReplicaN: &rp0.ReplicaN, - Duration: &rp0.Duration, - ShardGroupDuration: rp0.ShardGroupDuration, - }, true); err != nil { - t.Fatal(err) - } - - actual, err := c.RetentionPolicy("db0", "rp0") - if err != nil { - t.Fatal(err) - } else if got, exp := actual, &rp0; !reflect.DeepEqual(got, exp) { - t.Fatalf("got %#v, expected %#v", got, exp) - } - - // Create the same policy. Should not error. - if _, err := c.CreateRetentionPolicy("db0", &meta.RetentionPolicySpec{ - Name: rp0.Name, - ReplicaN: &rp0.ReplicaN, - Duration: &rp0.Duration, - ShardGroupDuration: rp0.ShardGroupDuration, - }, true); err != nil { - t.Fatal(err) - } else if actual, err = c.RetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } else if got, exp := actual, &rp0; !reflect.DeepEqual(got, exp) { - t.Fatalf("got %#v, expected %#v", got, exp) - } - - // Creating the same policy, but with a different duration should - // result in an error. - rp1 := rp0 - rp1.Duration = 2 * rp0.Duration - - _, got := c.CreateRetentionPolicy("db0", &meta.RetentionPolicySpec{ - Name: rp1.Name, - ReplicaN: &rp1.ReplicaN, - Duration: &rp1.Duration, - ShardGroupDuration: rp1.ShardGroupDuration, - }, true) - if exp := meta.ErrRetentionPolicyExists; got != exp { - t.Fatalf("got error %v, expected error %v", got, exp) - } - - // Creating the same policy, but with a different replica factor - // should also result in an error. - rp1 = rp0 - rp1.ReplicaN = rp0.ReplicaN + 1 - - _, got = c.CreateRetentionPolicy("db0", &meta.RetentionPolicySpec{ - Name: rp1.Name, - ReplicaN: &rp1.ReplicaN, - Duration: &rp1.Duration, - ShardGroupDuration: rp1.ShardGroupDuration, - }, true) - if exp := meta.ErrRetentionPolicyExists; got != exp { - t.Fatalf("got error %v, expected error %v", got, exp) - } - - // Creating the same policy, but with a different shard group - // duration should also result in an error. - rp1 = rp0 - rp1.ShardGroupDuration = rp0.ShardGroupDuration / 2 - - _, got = c.CreateRetentionPolicy("db0", &meta.RetentionPolicySpec{ - Name: rp1.Name, - ReplicaN: &rp1.ReplicaN, - Duration: &rp1.Duration, - ShardGroupDuration: rp1.ShardGroupDuration, - }, true) - if exp := meta.ErrRetentionPolicyExists; got != exp { - t.Fatalf("got error %v, expected error %v", got, exp) - } - - // Creating a policy with the shard duration being greater than the - // duration should also be an error. - rp1 = rp0 - rp1.Duration = 1 * time.Hour - rp1.ShardGroupDuration = 2 * time.Hour - - _, got = c.CreateRetentionPolicy("db0", &meta.RetentionPolicySpec{ - Name: rp1.Name, - ReplicaN: &rp1.ReplicaN, - Duration: &rp1.Duration, - ShardGroupDuration: rp1.ShardGroupDuration, - }, true) - if exp := meta.ErrIncompatibleDurations; got != exp { - t.Fatalf("got error %v, expected error %v", got, exp) - } -} - -func TestMetaClient_DefaultRetentionPolicy(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - duration := 1 * time.Hour - replicaN := 1 - if _, err := c.CreateDatabaseWithRetentionPolicy("db0", &meta.RetentionPolicySpec{ - Name: "rp0", - Duration: &duration, - ReplicaN: &replicaN, - }); err != nil { - t.Fatal(err) - } - - db := c.Database("db0") - if db == nil { - t.Fatal("database not found") - } else if db.Name != "db0" { - t.Fatalf("db name wrong: %s", db.Name) - } - - rp, err := c.RetentionPolicy("db0", "rp0") - if err != nil { - t.Fatal(err) - } else if rp.Name != "rp0" { - t.Fatalf("rp name wrong: %s", rp.Name) - } else if rp.Duration != time.Hour { - t.Fatalf("rp duration wrong: %s", rp.Duration.String()) - } else if rp.ReplicaN != 1 { - t.Fatalf("rp replication wrong: %d", rp.ReplicaN) - } - - // Make sure default retention policy is now rp0 - if exp, got := "rp0", db.DefaultRetentionPolicy; exp != got { - t.Fatalf("rp name wrong: \n\texp: %s\n\tgot: %s", exp, db.DefaultRetentionPolicy) - } -} - -func TestMetaClient_UpdateRetentionPolicy(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - if _, err := c.CreateDatabaseWithRetentionPolicy("db0", &meta.RetentionPolicySpec{ - Name: "rp0", - ShardGroupDuration: 4 * time.Hour, - }); err != nil { - t.Fatal(err) - } - - rpi, err := c.RetentionPolicy("db0", "rp0") - if err != nil { - t.Fatal(err) - } - - // Set the duration to another value and ensure that the shard group duration - // doesn't change. - duration := 2 * rpi.ShardGroupDuration - replicaN := 1 - if err := c.UpdateRetentionPolicy("db0", "rp0", &meta.RetentionPolicyUpdate{ - Duration: &duration, - ReplicaN: &replicaN, - }, true); err != nil { - t.Fatal(err) - } - - rpi, err = c.RetentionPolicy("db0", "rp0") - if err != nil { - t.Fatal(err) - } - if exp, got := 4*time.Hour, rpi.ShardGroupDuration; exp != got { - t.Fatalf("shard group duration wrong: \n\texp: %s\n\tgot: %s", exp, got) - } - - // Set the duration to below the shard group duration. This should return an error. - duration = rpi.ShardGroupDuration / 2 - if err := c.UpdateRetentionPolicy("db0", "rp0", &meta.RetentionPolicyUpdate{ - Duration: &duration, - }, true); err == nil { - t.Fatal("expected error") - } else if err != meta.ErrIncompatibleDurations { - t.Fatalf("expected error '%s', got '%s'", meta.ErrIncompatibleDurations, err) - } - - // Set the shard duration longer than the overall duration. This should also return an error. - sgDuration := rpi.Duration * 2 - if err := c.UpdateRetentionPolicy("db0", "rp0", &meta.RetentionPolicyUpdate{ - ShardGroupDuration: &sgDuration, - }, true); err == nil { - t.Fatal("expected error") - } else if err != meta.ErrIncompatibleDurations { - t.Fatalf("expected error '%s', got '%s'", meta.ErrIncompatibleDurations, err) - } - - // Set both values to incompatible values and ensure an error is returned. - duration = rpi.ShardGroupDuration - sgDuration = rpi.Duration - if err := c.UpdateRetentionPolicy("db0", "rp0", &meta.RetentionPolicyUpdate{ - Duration: &duration, - ShardGroupDuration: &sgDuration, - }, true); err == nil { - t.Fatal("expected error") - } else if err != meta.ErrIncompatibleDurations { - t.Fatalf("expected error '%s', got '%s'", meta.ErrIncompatibleDurations, err) - } - - // Allow any shard duration if the duration is set to zero. - duration = time.Duration(0) - sgDuration = 168 * time.Hour - if err := c.UpdateRetentionPolicy("db0", "rp0", &meta.RetentionPolicyUpdate{ - Duration: &duration, - ShardGroupDuration: &sgDuration, - }, true); err != nil { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestMetaClient_DropRetentionPolicy(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - if _, err := c.CreateDatabase("db0"); err != nil { - t.Fatal(err) - } - - db := c.Database("db0") - if db == nil { - t.Fatal("database not found") - } else if db.Name != "db0" { - t.Fatalf("db name wrong: %s", db.Name) - } - - duration := 1 * time.Hour - replicaN := 1 - if _, err := c.CreateRetentionPolicy("db0", &meta.RetentionPolicySpec{ - Name: "rp0", - Duration: &duration, - ReplicaN: &replicaN, - }, true); err != nil { - t.Fatal(err) - } - - rp, err := c.RetentionPolicy("db0", "rp0") - if err != nil { - t.Fatal(err) - } else if rp.Name != "rp0" { - t.Fatalf("rp name wrong: %s", rp.Name) - } else if rp.Duration != time.Hour { - t.Fatalf("rp duration wrong: %s", rp.Duration.String()) - } else if rp.ReplicaN != 1 { - t.Fatalf("rp replication wrong: %d", rp.ReplicaN) - } - - if err := c.DropRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - rp, err = c.RetentionPolicy("db0", "rp0") - if err != nil { - t.Fatal(err) - } else if rp != nil { - t.Fatalf("rp should have been dropped") - } -} - -func TestMetaClient_CreateUser(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - // Create an admin user - if _, err := c.CreateUser("fred", "supersecure", true); err != nil { - t.Fatal(err) - } - - // Create a non-admin user - if _, err := c.CreateUser("wilma", "password", false); err != nil { - t.Fatal(err) - } - - u, err := c.User("fred") - if err != nil { - t.Fatal(err) - } - if exp, got := "fred", u.ID(); exp != got { - t.Fatalf("unexpected user name: exp: %s got: %s", exp, got) - } - if !isAdmin(u) { - t.Fatalf("expected user to be admin") - } - - u, err = c.Authenticate("fred", "supersecure") - if u == nil || err != nil || u.ID() != "fred" { - t.Fatalf("failed to authenticate") - } - - // Auth for bad password should fail - u, err = c.Authenticate("fred", "badpassword") - if u != nil || err != meta.ErrAuthenticate { - t.Fatalf("authentication should fail with %s", meta.ErrAuthenticate) - } - - // Auth for no password should fail - u, err = c.Authenticate("fred", "") - if u != nil || err != meta.ErrAuthenticate { - t.Fatalf("authentication should fail with %s", meta.ErrAuthenticate) - } - - // Change password should succeed. - if err := c.UpdateUser("fred", "moresupersecure"); err != nil { - t.Fatal(err) - } - - // Auth for old password should fail - u, err = c.Authenticate("fred", "supersecure") - if u != nil || err != meta.ErrAuthenticate { - t.Fatalf("authentication should fail with %s", meta.ErrAuthenticate) - } - - // Auth for new password should succeed. - u, err = c.Authenticate("fred", "moresupersecure") - if u == nil || err != nil || u.ID() != "fred" { - t.Fatalf("failed to authenticate") - } - - // Auth for unknown user should fail - u, err = c.Authenticate("foo", "") - if u != nil || err != meta.ErrUserNotFound { - t.Fatalf("authentication should fail with %s", meta.ErrUserNotFound) - } - - u, err = c.User("wilma") - if err != nil { - t.Fatal(err) - } - if exp, got := "wilma", u.ID(); exp != got { - t.Fatalf("unexpected user name: exp: %s got: %s", exp, got) - } - if isAdmin(u) { - t.Fatalf("expected user not to be an admin") - } - - if exp, got := 2, c.UserCount(); exp != got { - t.Fatalf("unexpected user count. got: %d exp: %d", got, exp) - } - - // Grant privilidges to a non-admin user - if err := c.SetAdminPrivilege("wilma", true); err != nil { - t.Fatal(err) - } - - u, err = c.User("wilma") - if err != nil { - t.Fatal(err) - } - if exp, got := "wilma", u.ID(); exp != got { - t.Fatalf("unexpected user name: exp: %s got: %s", exp, got) - } - if !isAdmin(u) { - t.Fatalf("expected user to be an admin") - } - - // Revoke privilidges from user - if err := c.SetAdminPrivilege("wilma", false); err != nil { - t.Fatal(err) - } - - u, err = c.User("wilma") - if err != nil { - t.Fatal(err) - } - if exp, got := "wilma", u.ID(); exp != got { - t.Fatalf("unexpected user name: exp: %s got: %s", exp, got) - } - if isAdmin(u) { - t.Fatalf("expected user not to be an admin") - } - - // Create a database to use for assiging privileges to. - if _, err := c.CreateDatabase("db0"); err != nil { - t.Fatal(err) - } - - db := c.Database("db0") - if db.Name != "db0" { - t.Fatalf("db name wrong: %s", db.Name) - } - - // Assign a single privilege at the database level - if err := c.SetPrivilege("wilma", "db0", influxql.ReadPrivilege); err != nil { - t.Fatal(err) - } - - p, err := c.UserPrivilege("wilma", "db0") - if err != nil { - t.Fatal(err) - } - if p == nil { - t.Fatal("expected privilege but was nil") - } - if exp, got := influxql.ReadPrivilege, *p; exp != got { - t.Fatalf("unexpected privilege. exp: %d, got: %d", exp, got) - } - - // Remove a single privilege at the database level - if err := c.SetPrivilege("wilma", "db0", influxql.NoPrivileges); err != nil { - t.Fatal(err) - } - p, err = c.UserPrivilege("wilma", "db0") - if err != nil { - t.Fatal(err) - } - if p == nil { - t.Fatal("expected privilege but was nil") - } - if exp, got := influxql.NoPrivileges, *p; exp != got { - t.Fatalf("unexpected privilege. exp: %d, got: %d", exp, got) - } - - // Drop a user - if err := c.DropUser("wilma"); err != nil { - t.Fatal(err) - } - - if _, err = c.User("wilma"); err != meta.ErrUserNotFound { - t.Fatalf("user lookup should fail with %s", meta.ErrUserNotFound) - } - - if exp, got := 1, c.UserCount(); exp != got { - t.Fatalf("unexpected user count. got: %d exp: %d", got, exp) - } -} - -func TestMetaClient_UpdateUser(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - // UpdateUser that doesn't exist should return an error. - if err := c.UpdateUser("foo", "bar"); err == nil { - t.Fatalf("expected error, got nil") - } -} - -func TestMetaClient_ContinuousQueries(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - // Create a database to use - if _, err := c.CreateDatabase("db0"); err != nil { - t.Fatal(err) - } - db := c.Database("db0") - if db == nil { - t.Fatalf("database not found") - } else if db.Name != "db0" { - t.Fatalf("db name wrong: %s", db.Name) - } - - // Create a CQ - if err := c.CreateContinuousQuery("db0", "cq0", `SELECT count(value) INTO foo_count FROM foo GROUP BY time(10m)`); err != nil { - t.Fatal(err) - } - - // Recreating an existing CQ with the exact same query should not - // return an error. - if err := c.CreateContinuousQuery("db0", "cq0", `SELECT count(value) INTO foo_count FROM foo GROUP BY time(10m)`); err != nil { - t.Fatalf("got error %q, but didn't expect one", err) - } - - // Recreating an existing CQ with a different query should return - // an error. - if err := c.CreateContinuousQuery("db0", "cq0", `SELECT min(value) INTO foo_max FROM foo GROUP BY time(20m)`); err == nil { - t.Fatal("didn't get and error, but expected one") - } else if got, exp := err, meta.ErrContinuousQueryExists; got.Error() != exp.Error() { - t.Fatalf("got %v, expected %v", got, exp) - } - - // Create a few more CQ's - if err := c.CreateContinuousQuery("db0", "cq1", `SELECT max(value) INTO foo_max FROM foo GROUP BY time(10m)`); err != nil { - t.Fatal(err) - } - if err := c.CreateContinuousQuery("db0", "cq2", `SELECT min(value) INTO foo_min FROM foo GROUP BY time(10m)`); err != nil { - t.Fatal(err) - } - - // Drop a single CQ - if err := c.DropContinuousQuery("db0", "cq1"); err != nil { - t.Fatal(err) - } - - // Dropping a nonexistent CQ should not return an error. - if err := c.DropContinuousQuery("db0", "not-a-cq"); err != nil { - t.Fatal(err) - } -} - -func TestMetaClient_Subscriptions_Create(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - // Create a database to use - if _, err := c.CreateDatabase("db0"); err != nil { - t.Fatal(err) - } - db := c.Database("db0") - if db == nil { - t.Fatal("database not found") - } else if db.Name != "db0" { - t.Fatalf("db name wrong: %s", db.Name) - } - - // Create a subscription - if err := c.CreateSubscription("db0", "autogen", "sub0", "ALL", []string{"udp://example.com:9090"}); err != nil { - t.Fatal(err) - } - - // Re-create a subscription - err := c.CreateSubscription("db0", "autogen", "sub0", "ALL", []string{"udp://example.com:9090"}) - if err == nil || err.Error() != `subscription already exists` { - t.Fatalf("unexpected error: %s", err) - } - - // Create another subscription. - if err := c.CreateSubscription("db0", "autogen", "sub1", "ALL", []string{"udp://example.com:6060"}); err != nil { - t.Fatal(err) - } - - // Create a subscription with invalid scheme - err = c.CreateSubscription("db0", "autogen", "sub2", "ALL", []string{"bad://example.com:9191"}) - if err == nil || !strings.HasPrefix(err.Error(), "invalid subscription URL") { - t.Fatalf("unexpected error: %s", err) - } - - // Create a subscription without port number - err = c.CreateSubscription("db0", "autogen", "sub2", "ALL", []string{"udp://example.com"}) - if err == nil || !strings.HasPrefix(err.Error(), "invalid subscription URL") { - t.Fatalf("unexpected error: %s", err) - } - - // Create an HTTP subscription. - if err := c.CreateSubscription("db0", "autogen", "sub3", "ALL", []string{"http://example.com:9092"}); err != nil { - t.Fatal(err) - } - - // Create an HTTPS subscription. - if err := c.CreateSubscription("db0", "autogen", "sub4", "ALL", []string{"https://example.com:9092"}); err != nil { - t.Fatal(err) - } -} - -func TestMetaClient_Subscriptions_Drop(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - // Create a database to use - if _, err := c.CreateDatabase("db0"); err != nil { - t.Fatal(err) - } - - // DROP SUBSCRIPTION returns ErrSubscriptionNotFound when the - // subscription is unknown. - err := c.DropSubscription("db0", "autogen", "foo") - if got, exp := err, meta.ErrSubscriptionNotFound; got == nil || got.Error() != exp.Error() { - t.Fatalf("got: %s, exp: %s", got, exp) - } - - // Create a subscription. - if err := c.CreateSubscription("db0", "autogen", "sub0", "ALL", []string{"udp://example.com:9090"}); err != nil { - t.Fatal(err) - } - - // DROP SUBSCRIPTION returns an influxdb.ErrDatabaseNotFound when - // the database is unknown. - err = c.DropSubscription("foo", "autogen", "sub0") - if got, exp := err, influxdb.ErrDatabaseNotFound("foo"); got.Error() != exp.Error() { - t.Fatalf("got: %s, exp: %s", got, exp) - } - - // DROP SUBSCRIPTION returns an influxdb.ErrRetentionPolicyNotFound - // when the retention policy is unknown. - err = c.DropSubscription("db0", "foo_policy", "sub0") - if got, exp := err, influxdb.ErrRetentionPolicyNotFound("foo_policy"); got.Error() != exp.Error() { - t.Fatalf("got: %s, exp: %s", got, exp) - } - - // DROP SUBSCRIPTION drops the subscription if it can find it. - err = c.DropSubscription("db0", "autogen", "sub0") - if got := err; got != nil { - t.Fatalf("got: %s, exp: %v", got, nil) - } -} - -func TestMetaClient_Shards(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - if _, err := c.CreateDatabase("db0"); err != nil { - t.Fatal(err) - } - - // Test creating a shard group. - tmin := time.Now() - sg, err := c.CreateShardGroup("db0", "autogen", tmin) - if err != nil { - t.Fatal(err) - } else if sg == nil { - t.Fatalf("expected ShardGroup") - } - - // Test pre-creating shard groups. - dur := sg.EndTime.Sub(sg.StartTime) + time.Nanosecond - tmax := tmin.Add(dur) - if err := c.PrecreateShardGroups(tmin, tmax); err != nil { - t.Fatal(err) - } - - // Test finding shard groups by time range. - groups, err := c.ShardGroupsByTimeRange("db0", "autogen", tmin, tmax) - if err != nil { - t.Fatal(err) - } else if len(groups) != 2 { - t.Fatalf("wrong number of shard groups: %d", len(groups)) - } - - // Test finding shard owner. - db, rp, owner := c.ShardOwner(groups[0].Shards[0].ID) - if db != "db0" { - t.Fatalf("wrong db name: %s", db) - } else if rp != "autogen" { - t.Fatalf("wrong rp name: %s", rp) - } else if owner.ID != groups[0].ID { - t.Fatalf("wrong owner: exp %d got %d", groups[0].ID, owner.ID) - } - - // Test deleting a shard group. - if err := c.DeleteShardGroup("db0", "autogen", groups[0].ID); err != nil { - t.Fatal(err) - } else if groups, err = c.ShardGroupsByTimeRange("db0", "autogen", tmin, tmax); err != nil { - t.Fatal(err) - } else if len(groups) != 1 { - t.Fatalf("wrong number of shard groups after delete: %d", len(groups)) - } -} - -// Tests that calling CreateShardGroup for the same time range doesn't increment the data.Index -func TestMetaClient_CreateShardGroupIdempotent(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - if _, err := c.CreateDatabase("db0"); err != nil { - t.Fatal(err) - } - - // create a shard group. - tmin := time.Now() - sg, err := c.CreateShardGroup("db0", "autogen", tmin) - if err != nil { - t.Fatal(err) - } else if sg == nil { - t.Fatalf("expected ShardGroup") - } - - i := c.Data().Index - t.Log("index: ", i) - - // create the same shard group. - sg, err = c.CreateShardGroup("db0", "autogen", tmin) - if err != nil { - t.Fatal(err) - } else if sg == nil { - t.Fatalf("expected ShardGroup") - } - - t.Log("index: ", i) - if got, exp := c.Data().Index, i; got != exp { - t.Fatalf("PrecreateShardGroups failed: invalid index, got %d, exp %d", got, exp) - } - - // make sure pre-creating is also idempotent - // Test pre-creating shard groups. - dur := sg.EndTime.Sub(sg.StartTime) + time.Nanosecond - tmax := tmin.Add(dur) - if err := c.PrecreateShardGroups(tmin, tmax); err != nil { - t.Fatal(err) - } - i = c.Data().Index - t.Log("index: ", i) - if err := c.PrecreateShardGroups(tmin, tmax); err != nil { - t.Fatal(err) - } - t.Log("index: ", i) - if got, exp := c.Data().Index, i; got != exp { - t.Fatalf("PrecreateShardGroups failed: invalid index, got %d, exp %d", got, exp) - } -} - -func TestMetaClient_PruneShardGroups(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - if _, err := c.CreateDatabase("db0"); err != nil { - t.Fatal(err) - } - - if _, err := c.CreateDatabase("db1"); err != nil { - t.Fatal(err) - } - - duration := 1 * time.Hour - replicaN := 1 - - if _, err := c.CreateRetentionPolicy("db1", &meta.RetentionPolicySpec{ - Name: "rp0", - Duration: &duration, - ReplicaN: &replicaN, - }, true); err != nil { - t.Fatal(err) - } - - sg, err := c.CreateShardGroup("db1", "autogen", time.Now()) - if err != nil { - t.Fatal(err) - } else if sg == nil { - t.Fatalf("expected ShardGroup") - } - - sg, err = c.CreateShardGroup("db1", "autogen", time.Now().Add(15*24*time.Hour)) - if err != nil { - t.Fatal(err) - } else if sg == nil { - t.Fatalf("expected ShardGroup") - } - - sg, err = c.CreateShardGroup("db1", "rp0", time.Now()) - if err != nil { - t.Fatal(err) - } else if sg == nil { - t.Fatalf("expected ShardGroup") - } - - expiration := time.Now().Add(-2 * 7 * 24 * time.Hour).Add(-1 * time.Hour) - - data := c.Data() - data.Databases[1].RetentionPolicies[0].ShardGroups[0].DeletedAt = expiration - data.Databases[1].RetentionPolicies[0].ShardGroups[1].DeletedAt = expiration - - if err := c.SetData(&data); err != nil { - t.Fatal(err) - } - - if err := c.PruneShardGroups(); err != nil { - t.Fatal(err) - } - - data = c.Data() - rp, err := data.RetentionPolicy("db1", "autogen") - if err != nil { - t.Fatal(err) - } - if got, exp := len(rp.ShardGroups), 0; got != exp { - t.Fatalf("failed to prune shard group. got: %d, exp: %d", got, exp) - } - - rp, err = data.RetentionPolicy("db1", "rp0") - if err != nil { - t.Fatal(err) - } - if got, exp := len(rp.ShardGroups), 1; got != exp { - t.Fatalf("failed to prune shard group. got: %d, exp: %d", got, exp) - } -} - -// Tests that calling CreateShardGroup for the same time range doesn't increment the data.Index -func TestMetaClient_CreateShardGroupWithShards(t *testing.T) { - t.Parallel() - - d, c := newClient() - defer d() - defer c.Close() - - if _, err := c.CreateDatabase("db0"); err != nil { - t.Fatal(err) - } - - shards := []meta.ShardInfo{ - {1, []meta.ShardOwner{{1}}}, - {3, nil}, - } - // create a shard group. - tmin := time.Now() - sg, err := c.CreateShardGroupWithShards("db0", "autogen", tmin, shards) - if err != nil { - t.Fatal(err) - } else if sg == nil { - t.Fatalf("expected ShardGroup") - } - - if c.Data().MaxShardID != 3 { - t.Log("MaxShardID is not 3: ", c.Data().MaxShardID) - t.Fail() - } - // Test pre-creating shard groups. - dur := sg.EndTime.Sub(sg.StartTime) + time.Nanosecond - tmax := tmin.Add(dur) - groups, err := c.ShardGroupsByTimeRange("db0", "autogen", tmin, tmax) - if err != nil { - t.Fatal(err) - } else if len(groups) != 1 { - t.Fatalf("wrong number of shard groups: %d", len(groups)) - } else if len(groups[0].Shards) != 2 { - t.Fatalf("wrong number of shards: %d", len(groups[0].Shards)) - } else if groups[0].Shards[0].ID != 1 { - t.Fatalf("wrong id of shard 0: %d", groups[0].Shards[0].ID) - } else if len(groups[0].Shards[0].Owners) != 1 { - t.Fatalf("wrong number of shard 0 owners: %d", len(groups[0].Shards[0].Owners)) - } else if groups[0].Shards[0].Owners[0].NodeID != 1 { - t.Fatalf("wrong number of shard 0 owner 0 nodeID: %d", groups[0].Shards[0].Owners[0].NodeID) - } else if groups[0].Shards[1].ID != 3 { - t.Fatalf("wrong id of shard 1: %d", groups[0].Shards[1].ID) - } else if groups[0].Shards[1].Owners != nil { - t.Fatalf("wrong content of shard 1 owners: %v", groups[0].Shards[1].Owners) - } -} - -func newClient() (func(), *meta.Client) { - cfg := newConfig() - store := newStore() - c := meta.NewClient(cfg, store) - if err := c.Open(); err != nil { - panic(err) - } - return func() {}, c -} - -func newStore() *inmem.KVStore { - store := inmem.NewKVStore() - _ = store.CreateBucket(context.Background(), meta.BucketName) - return store -} - -func newConfig() *meta.Config { - return meta.NewConfig() -} - -func isAdmin(u meta.User) bool { - ui := u.(*meta.UserInfo) - return ui.Admin -} diff --git a/v1/services/meta/config.go b/v1/services/meta/config.go deleted file mode 100644 index 65a3a006125..00000000000 --- a/v1/services/meta/config.go +++ /dev/null @@ -1,38 +0,0 @@ -package meta - -import ( - "errors" - "time" -) - -const ( - // DefaultLeaseDuration is the default duration for leases. - DefaultLeaseDuration = 60 * time.Second - - // DefaultLoggingEnabled determines if log messages are printed for the meta service. - DefaultLoggingEnabled = true -) - -// Config represents the meta configuration. -type Config struct { - Dir string `toml:"dir"` - - RetentionAutoCreate bool `toml:"retention-autocreate"` - LoggingEnabled bool `toml:"logging-enabled"` -} - -// NewConfig builds a new configuration with default values. -func NewConfig() *Config { - return &Config{ - RetentionAutoCreate: true, - LoggingEnabled: DefaultLoggingEnabled, - } -} - -// Validate returns an error if the config is invalid. -func (c *Config) Validate() error { - if c.Dir == "" { - return errors.New("Meta.Dir must be specified") - } - return nil -} diff --git a/v1/services/meta/config_test.go b/v1/services/meta/config_test.go deleted file mode 100644 index f609a870259..00000000000 --- a/v1/services/meta/config_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package meta_test - -import ( - "testing" - - "github.com/BurntSushi/toml" - "github.com/influxdata/influxdb/v2/v1/services/meta" -) - -func TestConfig_Parse(t *testing.T) { - // Parse configuration. - var c meta.Config - if _, err := toml.Decode(` -dir = "/tmp/foo" -logging-enabled = false -`, &c); err != nil { - t.Fatal(err) - } - - // Validate configuration. - if c.Dir != "/tmp/foo" { - t.Fatalf("unexpected dir: %s", c.Dir) - } else if c.LoggingEnabled { - t.Fatalf("unexpected logging enabled: %v", c.LoggingEnabled) - } -} diff --git a/v1/services/meta/context.go b/v1/services/meta/context.go deleted file mode 100644 index 33fc67c9663..00000000000 --- a/v1/services/meta/context.go +++ /dev/null @@ -1,22 +0,0 @@ -package meta - -import ( - "context" -) - -type key int - -const ( - userKey key = iota -) - -// NewContextWithUser returns a new context with user added. -func NewContextWithUser(ctx context.Context, user User) context.Context { - return context.WithValue(ctx, userKey, user) -} - -// UserFromContext returns the User associated with ctx or nil if no user has been assigned. -func UserFromContext(ctx context.Context) User { - l, _ := ctx.Value(userKey).(User) - return l -} diff --git a/v1/services/meta/data.go b/v1/services/meta/data.go deleted file mode 100644 index 17988a4430b..00000000000 --- a/v1/services/meta/data.go +++ /dev/null @@ -1,1816 +0,0 @@ -package meta - -import ( - "errors" - "fmt" - "net" - "net/url" - "sort" - "strings" - "sync" - "time" - "unicode" - - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/models" - influxdb "github.com/influxdata/influxdb/v2/v1" - internal "github.com/influxdata/influxdb/v2/v1/services/meta/internal" - "github.com/influxdata/influxql" - "google.golang.org/protobuf/proto" -) - -//go:generate protoc --go_out=internal/ internal/meta.proto - -const ( - // DefaultRetentionPolicyReplicaN is the default value of RetentionPolicyInfo.ReplicaN. - DefaultRetentionPolicyReplicaN = 1 - - // DefaultRetentionPolicyDuration is the default value of RetentionPolicyInfo.Duration. - DefaultRetentionPolicyDuration = time.Duration(0) - - // DefaultRetentionPolicyName is the default name for auto generated retention policies. - DefaultRetentionPolicyName = "autogen" - - // MinRetentionPolicyDuration represents the minimum duration for a policy. - MinRetentionPolicyDuration = time.Hour - - // MaxNameLen is the maximum length of a database or retention policy name. - // InfluxDB uses the name for the directory name on disk. - MaxNameLen = 255 -) - -// Data represents the top level collection of all metadata. -type Data struct { - Term uint64 // associated raft term - Index uint64 // associated raft index - ClusterID uint64 - Databases []DatabaseInfo - Users []UserInfo - - // adminUserExists provides a constant time mechanism for determining - // if there is at least one admin user. - adminUserExists bool - - MaxShardGroupID uint64 - MaxShardID uint64 -} - -// Database returns a DatabaseInfo by the database name. -func (data *Data) Database(name string) *DatabaseInfo { - for i := range data.Databases { - if data.Databases[i].Name == name { - return &data.Databases[i] - } - } - return nil -} - -// CloneDatabases returns a copy of the DatabaseInfo. -func (data *Data) CloneDatabases() []DatabaseInfo { - if data.Databases == nil { - return nil - } - dbs := make([]DatabaseInfo, len(data.Databases)) - for i := range data.Databases { - dbs[i] = data.Databases[i].clone() - } - return dbs -} - -// CreateDatabase creates a new database. -// It returns an error if name is blank or if a database with the same name already exists. -func (data *Data) CreateDatabase(name string) error { - if name == "" { - return ErrDatabaseNameRequired - } else if len(name) > MaxNameLen { - return ErrNameTooLong - } else if data.Database(name) != nil { - return nil - } - - // Append new node. - data.Databases = append(data.Databases, DatabaseInfo{Name: name}) - - return nil -} - -// DropDatabase removes a database by name. It does not return an error -// if the database cannot be found. -func (data *Data) DropDatabase(name string) error { - for i := range data.Databases { - if data.Databases[i].Name == name { - data.Databases = append(data.Databases[:i], data.Databases[i+1:]...) - - // Remove all user privileges associated with this database. - for i := range data.Users { - delete(data.Users[i].Privileges, name) - } - break - } - } - return nil -} - -// RetentionPolicy returns a retention policy for a database by name. -func (data *Data) RetentionPolicy(database, name string) (*RetentionPolicyInfo, error) { - di := data.Database(database) - if di == nil { - return nil, influxdb.ErrDatabaseNotFound(database) - } - - for i := range di.RetentionPolicies { - if di.RetentionPolicies[i].Name == name { - return &di.RetentionPolicies[i], nil - } - } - return nil, nil -} - -// CreateRetentionPolicy creates a new retention policy on a database. -// It returns an error if name is blank or if the database does not exist. -func (data *Data) CreateRetentionPolicy(database string, rpi *RetentionPolicyInfo, makeDefault bool) error { - // Validate retention policy. - if rpi == nil { - return ErrRetentionPolicyRequired - } else if rpi.Name == "" { - return ErrRetentionPolicyNameRequired - } else if len(rpi.Name) > MaxNameLen { - return ErrNameTooLong - } else if rpi.ReplicaN < 1 { - return ErrReplicationFactorTooLow - } - - // Normalise ShardDuration before comparing to any existing - // retention policies. The client is supposed to do this, but - // do it again to verify input. - rpi.ShardGroupDuration = NormalisedShardDuration(rpi.ShardGroupDuration, rpi.Duration) - - if rpi.Duration > 0 && rpi.Duration < rpi.ShardGroupDuration { - return ErrIncompatibleDurations - } - - // Find database. - di := data.Database(database) - if di == nil { - return influxdb.ErrDatabaseNotFound(database) - } else if rp := di.RetentionPolicy(rpi.Name); rp != nil { - // RP with that name already exists. Make sure they're the same. - if rp.ReplicaN != rpi.ReplicaN || rp.Duration != rpi.Duration || rp.ShardGroupDuration != rpi.ShardGroupDuration { - return ErrRetentionPolicyExists - } - // if they want to make it default, and it's not the default, it's not an identical command so it's an error - if makeDefault && di.DefaultRetentionPolicy != rpi.Name { - return ErrRetentionPolicyConflict - } - return nil - } - - // Append copy of new policy. - di.RetentionPolicies = append(di.RetentionPolicies, *rpi) - - // Set the default if needed - if makeDefault { - di.DefaultRetentionPolicy = rpi.Name - } - - return nil -} - -// DropRetentionPolicy removes a retention policy from a database by name. -func (data *Data) DropRetentionPolicy(database, name string) error { - // Find database. - di := data.Database(database) - if di == nil { - // no database? no problem - return nil - } - - // Remove from list. - for i := range di.RetentionPolicies { - if di.RetentionPolicies[i].Name == name { - di.RetentionPolicies = append(di.RetentionPolicies[:i], di.RetentionPolicies[i+1:]...) - break - } - } - - return nil -} - -// RetentionPolicyUpdate represents retention policy fields to be updated. -type RetentionPolicyUpdate struct { - Name *string - Duration *time.Duration - ReplicaN *int - ShardGroupDuration *time.Duration -} - -// SetName sets the RetentionPolicyUpdate.Name. -func (rpu *RetentionPolicyUpdate) SetName(v string) { rpu.Name = &v } - -// SetDuration sets the RetentionPolicyUpdate.Duration. -func (rpu *RetentionPolicyUpdate) SetDuration(v time.Duration) { rpu.Duration = &v } - -// SetReplicaN sets the RetentionPolicyUpdate.ReplicaN. -func (rpu *RetentionPolicyUpdate) SetReplicaN(v int) { rpu.ReplicaN = &v } - -// SetShardGroupDuration sets the RetentionPolicyUpdate.ShardGroupDuration. -func (rpu *RetentionPolicyUpdate) SetShardGroupDuration(v time.Duration) { rpu.ShardGroupDuration = &v } - -// UpdateRetentionPolicy updates an existing retention policy. -func (data *Data) UpdateRetentionPolicy(database, name string, rpu *RetentionPolicyUpdate, makeDefault bool) error { - // Find database. - di := data.Database(database) - if di == nil { - return influxdb.ErrDatabaseNotFound(database) - } - - // Find policy. - rpi := di.RetentionPolicy(name) - if rpi == nil { - return influxdb.ErrRetentionPolicyNotFound(name) - } - - // Ensure new policy doesn't match an existing policy. - if rpu.Name != nil && *rpu.Name != name && di.RetentionPolicy(*rpu.Name) != nil { - return ErrRetentionPolicyNameExists - } - - // Enforce duration of at least MinRetentionPolicyDuration - if rpu.Duration != nil && *rpu.Duration < MinRetentionPolicyDuration && *rpu.Duration != 0 { - return ErrRetentionPolicyDurationTooLow - } - - // Enforce duration is at least the shard duration - if (rpu.Duration != nil && *rpu.Duration > 0 && - ((rpu.ShardGroupDuration != nil && *rpu.Duration < *rpu.ShardGroupDuration) || - (rpu.ShardGroupDuration == nil && *rpu.Duration < rpi.ShardGroupDuration))) || - (rpu.Duration == nil && rpi.Duration > 0 && - rpu.ShardGroupDuration != nil && rpi.Duration < *rpu.ShardGroupDuration) { - return ErrIncompatibleDurations - } - - // Update fields. - if rpu.Name != nil { - rpi.Name = *rpu.Name - } - if rpu.Duration != nil { - rpi.Duration = *rpu.Duration - } - if rpu.ReplicaN != nil { - rpi.ReplicaN = *rpu.ReplicaN - } - if rpu.ShardGroupDuration != nil { - rpi.ShardGroupDuration = NormalisedShardDuration(*rpu.ShardGroupDuration, rpi.Duration) - } - - if di.DefaultRetentionPolicy != rpi.Name && makeDefault { - di.DefaultRetentionPolicy = rpi.Name - } - - return nil -} - -// DropShard removes a shard by ID. -// -// DropShard won't return an error if the shard can't be found, which -// allows the command to be re-run in the case that the meta store -// succeeds but a data node fails. -func (data *Data) DropShard(id uint64) { - found := -1 - for dbidx, dbi := range data.Databases { - for rpidx, rpi := range dbi.RetentionPolicies { - for sgidx, sg := range rpi.ShardGroups { - for sidx, s := range sg.Shards { - if s.ID == id { - found = sidx - break - } - } - - if found > -1 { - shards := sg.Shards - data.Databases[dbidx].RetentionPolicies[rpidx].ShardGroups[sgidx].Shards = append(shards[:found], shards[found+1:]...) - - if len(shards) == 1 { - // We just deleted the last shard in the shard group. - data.Databases[dbidx].RetentionPolicies[rpidx].ShardGroups[sgidx].DeletedAt = time.Now() - } - return - } - } - } - } -} - -// ShardGroups returns a list of all shard groups on a database and retention policy. -func (data *Data) ShardGroups(database, policy string) ([]ShardGroupInfo, error) { - // Find retention policy. - rpi, err := data.RetentionPolicy(database, policy) - if err != nil { - return nil, err - } else if rpi == nil { - return nil, influxdb.ErrRetentionPolicyNotFound(policy) - } - groups := make([]ShardGroupInfo, 0, len(rpi.ShardGroups)) - for _, g := range rpi.ShardGroups { - if g.Deleted() { - continue - } - groups = append(groups, g) - } - return groups, nil -} - -// ShardGroupsByTimeRange returns a list of all shard groups on a database and policy that may contain data -// for the specified time range. Shard groups are sorted by start time. -func (data *Data) ShardGroupsByTimeRange(database, policy string, tmin, tmax time.Time) ([]ShardGroupInfo, error) { - // Find retention policy. - rpi, err := data.RetentionPolicy(database, policy) - if err != nil { - return nil, err - } else if rpi == nil { - return nil, influxdb.ErrRetentionPolicyNotFound(policy) - } - groups := make([]ShardGroupInfo, 0, len(rpi.ShardGroups)) - for _, g := range rpi.ShardGroups { - if g.Deleted() || !g.Overlaps(tmin, tmax) { - continue - } - groups = append(groups, g) - } - return groups, nil -} - -// ShardGroupByTimestamp returns the shard group on a database and policy for a given timestamp. -func (data *Data) ShardGroupByTimestamp(database, policy string, timestamp time.Time) (*ShardGroupInfo, error) { - // Find retention policy. - rpi, err := data.RetentionPolicy(database, policy) - if err != nil { - return nil, err - } else if rpi == nil { - return nil, influxdb.ErrRetentionPolicyNotFound(policy) - } - - return rpi.ShardGroupByTimestamp(timestamp), nil -} - -// CreateShardGroup creates a shard group on a database and policy for a given timestamp. -func (data *Data) CreateShardGroup(database, policy string, timestamp time.Time, shards ...ShardInfo) error { - // Find retention policy. - rpi, err := data.RetentionPolicy(database, policy) - if err != nil { - return err - } else if rpi == nil { - return influxdb.ErrRetentionPolicyNotFound(policy) - } - - // Verify that shard group doesn't already exist for this timestamp. - if rpi.ShardGroupByTimestamp(timestamp) != nil { - return nil - } - - startTime := timestamp.Truncate(rpi.ShardGroupDuration).UTC() - endTime := startTime.Add(rpi.ShardGroupDuration).UTC() - if endTime.After(time.Unix(0, models.MaxNanoTime)) { - // Shard group range is [start, end) so add one to the max time. - endTime = time.Unix(0, models.MaxNanoTime+1) - } - - for i := range rpi.ShardGroups { - if rpi.ShardGroups[i].Deleted() { - continue - } - startI := rpi.ShardGroups[i].StartTime - endI := rpi.ShardGroups[i].EndTime - if rpi.ShardGroups[i].Truncated() { - endI = rpi.ShardGroups[i].TruncatedAt - } - - // shard_i covers range [start_i, end_i) - // We want the largest range [startTime, endTime) such that all of the following hold: - // startTime <= timestamp < endTime - // for all i, not { start_i < endTime && startTime < end_i } - // Assume the above conditions are true for shards index < i, we want to modify startTime,endTime so they are true - // also for shard_i - - // It must be the case that either endI <= timestamp || timestamp < startI, because otherwise: - // startI <= timestamp < endI means timestamp is contained in shard I - if !timestamp.Before(endI) && endI.After(startTime) { - // startTime < endI <= timestamp - startTime = endI - } - if startI.After(timestamp) && startI.Before(endTime) { - // timestamp < startI < endTime - endTime = startI - } - } - - // Create the shard group. - data.MaxShardGroupID++ - sgi := ShardGroupInfo{} - sgi.ID = data.MaxShardGroupID - sgi.StartTime = startTime - sgi.EndTime = endTime - - if len(shards) > 0 { - sgi.Shards = make([]ShardInfo, len(shards)) - for i, si := range shards { - sgi.Shards[i] = si - if si.ID > data.MaxShardID { - data.MaxShardID = si.ID - } - } - } else { - data.MaxShardID++ - sgi.Shards = []ShardInfo{ - {ID: data.MaxShardID}, - } - } - - // Retention policy has a new shard group, so update the policy. Shard - // Groups must be stored in sorted order, as other parts of the system - // assume this to be the case. - rpi.ShardGroups = append(rpi.ShardGroups, sgi) - sort.Sort(ShardGroupInfos(rpi.ShardGroups)) - - return nil -} - -// DeleteShardGroup removes a shard group from a database and retention policy by id. -func (data *Data) DeleteShardGroup(database, policy string, id uint64) error { - // Find retention policy. - rpi, err := data.RetentionPolicy(database, policy) - if err != nil { - return err - } else if rpi == nil { - return influxdb.ErrRetentionPolicyNotFound(policy) - } - - // Find shard group by ID and set its deletion timestamp. - for i := range rpi.ShardGroups { - if rpi.ShardGroups[i].ID == id { - rpi.ShardGroups[i].DeletedAt = time.Now().UTC() - return nil - } - } - - return ErrShardGroupNotFound -} - -// CreateContinuousQuery adds a named continuous query to a database. -func (data *Data) CreateContinuousQuery(database, name, query string) error { - di := data.Database(database) - if di == nil { - return influxdb.ErrDatabaseNotFound(database) - } - - // Ensure the name doesn't already exist. - for _, cq := range di.ContinuousQueries { - if cq.Name == name { - // If the query string is the same, we'll silently return, - // otherwise we'll assume the user might be trying to - // overwrite an existing CQ with a different query. - //lint:ignore SA6005 this is old code so we should revisit the use of strings.EqualFold - if strings.ToLower(cq.Query) == strings.ToLower(query) { - return nil - } - return ErrContinuousQueryExists - } - } - - // Append new query. - di.ContinuousQueries = append(di.ContinuousQueries, ContinuousQueryInfo{ - Name: name, - Query: query, - }) - - return nil -} - -// DropContinuousQuery removes a continuous query. -func (data *Data) DropContinuousQuery(database, name string) error { - di := data.Database(database) - if di == nil { - return nil - } - - for i := range di.ContinuousQueries { - if di.ContinuousQueries[i].Name == name { - di.ContinuousQueries = append(di.ContinuousQueries[:i], di.ContinuousQueries[i+1:]...) - return nil - } - } - return nil -} - -// validateURL returns an error if the URL does not have a port or uses a scheme other than UDP or HTTP. -func validateURL(input string) error { - u, err := url.Parse(input) - if err != nil { - return ErrInvalidSubscriptionURL(input) - } - - if u.Scheme != "udp" && u.Scheme != "http" && u.Scheme != "https" { - return ErrInvalidSubscriptionURL(input) - } - - _, port, err := net.SplitHostPort(u.Host) - if err != nil || port == "" { - return ErrInvalidSubscriptionURL(input) - } - - return nil -} - -// CreateSubscription adds a named subscription to a database and retention policy. -func (data *Data) CreateSubscription(database, rp, name, mode string, destinations []string) error { - for _, d := range destinations { - if err := validateURL(d); err != nil { - return err - } - } - - rpi, err := data.RetentionPolicy(database, rp) - if err != nil { - return err - } else if rpi == nil { - return influxdb.ErrRetentionPolicyNotFound(rp) - } - - // Ensure the name doesn't already exist. - for i := range rpi.Subscriptions { - if rpi.Subscriptions[i].Name == name { - return ErrSubscriptionExists - } - } - - // Append new query. - rpi.Subscriptions = append(rpi.Subscriptions, SubscriptionInfo{ - Name: name, - Mode: mode, - Destinations: destinations, - }) - - return nil -} - -// DropSubscription removes a subscription. -func (data *Data) DropSubscription(database, rp, name string) error { - rpi, err := data.RetentionPolicy(database, rp) - if err != nil { - return err - } else if rpi == nil { - return influxdb.ErrRetentionPolicyNotFound(rp) - } - - for i := range rpi.Subscriptions { - if rpi.Subscriptions[i].Name == name { - rpi.Subscriptions = append(rpi.Subscriptions[:i], rpi.Subscriptions[i+1:]...) - return nil - } - } - return ErrSubscriptionNotFound -} - -func (data *Data) user(username string) *UserInfo { - for i := range data.Users { - if data.Users[i].Name == username { - return &data.Users[i] - } - } - return nil -} - -// User returns a user by username. -func (data *Data) User(username string) User { - u := data.user(username) - if u == nil { - // prevent non-nil interface with nil pointer - return nil - } - return u -} - -// CreateUser creates a new user. -func (data *Data) CreateUser(name, hash string, admin bool) error { - // Ensure the user doesn't already exist. - if name == "" { - return ErrUsernameRequired - } else if data.User(name) != nil { - return ErrUserExists - } - - // Append new user. - data.Users = append(data.Users, UserInfo{ - Name: name, - Hash: hash, - Admin: admin, - }) - - // We know there is now at least one admin user. - if admin { - data.adminUserExists = true - } - - return nil -} - -// DropUser removes an existing user by name. -func (data *Data) DropUser(name string) error { - for i := range data.Users { - if data.Users[i].Name == name { - wasAdmin := data.Users[i].Admin - data.Users = append(data.Users[:i], data.Users[i+1:]...) - - // Maybe we dropped the only admin user? - if wasAdmin { - data.adminUserExists = data.hasAdminUser() - } - return nil - } - } - - return ErrUserNotFound -} - -// UpdateUser updates the password hash of an existing user. -func (data *Data) UpdateUser(name, hash string) error { - for i := range data.Users { - if data.Users[i].Name == name { - data.Users[i].Hash = hash - return nil - } - } - return ErrUserNotFound -} - -// CloneUsers returns a copy of the user infos. -func (data *Data) CloneUsers() []UserInfo { - if len(data.Users) == 0 { - return []UserInfo{} - } - users := make([]UserInfo, len(data.Users)) - for i := range data.Users { - users[i] = data.Users[i].clone() - } - - return users -} - -// SetPrivilege sets a privilege for a user on a database. -func (data *Data) SetPrivilege(name, database string, p influxql.Privilege) error { - ui := data.user(name) - if ui == nil { - return ErrUserNotFound - } - - if data.Database(database) == nil { - return influxdb.ErrDatabaseNotFound(database) - } - - if ui.Privileges == nil { - ui.Privileges = make(map[string]influxql.Privilege) - } - ui.Privileges[database] = p - - return nil -} - -// SetAdminPrivilege sets the admin privilege for a user. -func (data *Data) SetAdminPrivilege(name string, admin bool) error { - ui := data.user(name) - if ui == nil { - return ErrUserNotFound - } - - ui.Admin = admin - - // We could have promoted or revoked the only admin. Check if an admin - // user exists. - data.adminUserExists = data.hasAdminUser() - return nil -} - -// AdminUserExists returns true if an admin user exists. -func (data Data) AdminUserExists() bool { - return data.adminUserExists -} - -// UserPrivileges gets the privileges for a user. -func (data *Data) UserPrivileges(name string) (map[string]influxql.Privilege, error) { - ui := data.user(name) - if ui == nil { - return nil, ErrUserNotFound - } - - return ui.Privileges, nil -} - -// UserPrivilege gets the privilege for a user on a database. -func (data *Data) UserPrivilege(name, database string) (*influxql.Privilege, error) { - ui := data.user(name) - if ui == nil { - return nil, ErrUserNotFound - } - - for db, p := range ui.Privileges { - if db == database { - return &p, nil - } - } - - return influxql.NewPrivilege(influxql.NoPrivileges), nil -} - -// Clone returns a copy of data with a new version. -func (data *Data) Clone() *Data { - other := *data - - other.Databases = data.CloneDatabases() - other.Users = data.CloneUsers() - - return &other -} - -// marshal serializes data to a protobuf representation. -func (data *Data) marshal() *internal.Data { - pb := &internal.Data{ - Term: proto.Uint64(data.Term), - Index: proto.Uint64(data.Index), - ClusterID: proto.Uint64(data.ClusterID), - - MaxShardGroupID: proto.Uint64(data.MaxShardGroupID), - MaxShardID: proto.Uint64(data.MaxShardID), - - // Need this for reverse compatibility - MaxNodeID: proto.Uint64(0), - } - - pb.Databases = make([]*internal.DatabaseInfo, len(data.Databases)) - for i := range data.Databases { - pb.Databases[i] = data.Databases[i].marshal() - } - - pb.Users = make([]*internal.UserInfo, len(data.Users)) - for i := range data.Users { - pb.Users[i] = data.Users[i].marshal() - } - - return pb -} - -// unmarshal deserializes from a protobuf representation. -func (data *Data) unmarshal(pb *internal.Data) { - data.Term = pb.GetTerm() - data.Index = pb.GetIndex() - data.ClusterID = pb.GetClusterID() - - data.MaxShardGroupID = pb.GetMaxShardGroupID() - data.MaxShardID = pb.GetMaxShardID() - - data.Databases = make([]DatabaseInfo, len(pb.GetDatabases())) - for i, x := range pb.GetDatabases() { - data.Databases[i].unmarshal(x) - } - - data.Users = make([]UserInfo, len(pb.GetUsers())) - for i, x := range pb.GetUsers() { - data.Users[i].unmarshal(x) - } - - // Exhaustively determine if there is an admin user. The marshalled cache - // value may not be correct. - data.adminUserExists = data.hasAdminUser() -} - -// MarshalBinary encodes the metadata to a binary format. -func (data *Data) MarshalBinary() ([]byte, error) { - return proto.Marshal(data.marshal()) -} - -// UnmarshalBinary decodes the object from a binary format. -func (data *Data) UnmarshalBinary(buf []byte) error { - var pb internal.Data - if err := proto.Unmarshal(buf, &pb); err != nil { - return err - } - data.unmarshal(&pb) - return nil -} - -// TruncateShardGroups truncates any shard group that could contain timestamps beyond t. -func (data *Data) TruncateShardGroups(t time.Time) { - for i := range data.Databases { - dbi := &data.Databases[i] - - for j := range dbi.RetentionPolicies { - rpi := &dbi.RetentionPolicies[j] - - for k := range rpi.ShardGroups { - sgi := &rpi.ShardGroups[k] - - if !t.Before(sgi.EndTime) || sgi.Deleted() || (sgi.Truncated() && sgi.TruncatedAt.Before(t)) { - continue - } - - if !t.After(sgi.StartTime) { - // future shardgroup - sgi.TruncatedAt = sgi.StartTime - } else { - sgi.TruncatedAt = t - } - } - } - } -} - -// hasAdminUser exhaustively checks for the presence of at least one admin -// user. -func (data *Data) hasAdminUser() bool { - for _, u := range data.Users { - if u.Admin { - return true - } - } - return false -} - -// ImportData imports selected data into the current metadata. -// if non-empty, backupDBName, restoreDBName, backupRPName, restoreRPName can be used to select DB metadata from other, -// and to assign a new name to the imported data. Returns a map of shard ID's in the old metadata to new shard ID's -// in the new metadata, along with a list of new databases created, both of which can assist in the import of existing -// shard data during a database restore. -func (data *Data) ImportData(other Data, backupDBName, restoreDBName, backupRPName, restoreRPName string) (map[uint64]uint64, []string, error) { - shardIDMap := make(map[uint64]uint64) - if backupDBName != "" { - dbName, err := data.importOneDB(other, backupDBName, restoreDBName, backupRPName, restoreRPName, shardIDMap) - if err != nil { - return nil, nil, err - } - - return shardIDMap, []string{dbName}, nil - } - - // if no backupDBName then we'll try to import all the DB's. If one of them fails, we'll mark the whole - // operation a failure and return an error. - var newDBs []string - for _, dbi := range other.Databases { - if dbi.Name == "_internal" { - continue - } - dbName, err := data.importOneDB(other, dbi.Name, "", "", "", shardIDMap) - if err != nil { - return nil, nil, err - } - newDBs = append(newDBs, dbName) - } - return shardIDMap, newDBs, nil -} - -// importOneDB imports a single database/rp from an external metadata object, renaming them if new names are provided. -func (data *Data) importOneDB(other Data, backupDBName, restoreDBName, backupRPName, restoreRPName string, shardIDMap map[uint64]uint64) (string, error) { - - dbPtr := other.Database(backupDBName) - if dbPtr == nil { - return "", fmt.Errorf("imported metadata does not have datbase named %s", backupDBName) - } - - if restoreDBName == "" { - restoreDBName = backupDBName - } - - if data.Database(restoreDBName) != nil { - return "", errors.New("database already exists") - } - - // change the names if we want/need to - err := data.CreateDatabase(restoreDBName) - if err != nil { - return "", err - } - dbImport := data.Database(restoreDBName) - - if backupRPName != "" { - rpPtr := dbPtr.RetentionPolicy(backupRPName) - - if rpPtr != nil { - rpImport := rpPtr.clone() - if restoreRPName == "" { - restoreRPName = backupRPName - } - rpImport.Name = restoreRPName - dbImport.RetentionPolicies = []RetentionPolicyInfo{rpImport} - dbImport.DefaultRetentionPolicy = restoreRPName - } else { - return "", fmt.Errorf("retention Policy not found in meta backup: %s.%s", backupDBName, backupRPName) - } - - } else { // import all RP's without renaming - dbImport.DefaultRetentionPolicy = dbPtr.DefaultRetentionPolicy - if dbPtr.RetentionPolicies != nil { - dbImport.RetentionPolicies = make([]RetentionPolicyInfo, len(dbPtr.RetentionPolicies)) - for i := range dbPtr.RetentionPolicies { - dbImport.RetentionPolicies[i] = dbPtr.RetentionPolicies[i].clone() - } - } - - } - - // renumber the shard groups and shards for the new retention policy(ies) - for _, rpImport := range dbImport.RetentionPolicies { - for j, sgImport := range rpImport.ShardGroups { - data.MaxShardGroupID++ - rpImport.ShardGroups[j].ID = data.MaxShardGroupID - for k := range sgImport.Shards { - data.MaxShardID++ - shardIDMap[sgImport.Shards[k].ID] = data.MaxShardID - sgImport.Shards[k].ID = data.MaxShardID - // OSS doesn't use Owners but if we are importing this from Enterprise, we'll want to clear it out - // to avoid any issues if they ever export this DB again to bring back to Enterprise. - sgImport.Shards[k].Owners = []ShardOwner{} - } - } - } - - return restoreDBName, nil -} - -// NodeInfo represents information about a single node in the cluster. -type NodeInfo struct { - ID uint64 - Host string - TCPHost string -} - -// NodeInfos is a slice of NodeInfo used for sorting -type NodeInfos []NodeInfo - -// Len implements sort.Interface. -func (n NodeInfos) Len() int { return len(n) } - -// Swap implements sort.Interface. -func (n NodeInfos) Swap(i, j int) { n[i], n[j] = n[j], n[i] } - -// Less implements sort.Interface. -func (n NodeInfos) Less(i, j int) bool { return n[i].ID < n[j].ID } - -// DatabaseInfo represents information about a database in the system. -type DatabaseInfo struct { - Name string - DefaultRetentionPolicy string - RetentionPolicies []RetentionPolicyInfo - ContinuousQueries []ContinuousQueryInfo -} - -// RetentionPolicy returns a retention policy by name. -func (di DatabaseInfo) RetentionPolicy(name string) *RetentionPolicyInfo { - if name == "" { - if di.DefaultRetentionPolicy == "" { - return nil - } - name = di.DefaultRetentionPolicy - } - - for i := range di.RetentionPolicies { - if di.RetentionPolicies[i].Name == name { - return &di.RetentionPolicies[i] - } - } - return nil -} - -// ShardInfos returns a list of all shards' info for the database. -func (di DatabaseInfo) ShardInfos() []ShardInfo { - shards := map[uint64]*ShardInfo{} - for i := range di.RetentionPolicies { - for j := range di.RetentionPolicies[i].ShardGroups { - sg := di.RetentionPolicies[i].ShardGroups[j] - // Skip deleted shard groups - if sg.Deleted() { - continue - } - for k := range sg.Shards { - si := &di.RetentionPolicies[i].ShardGroups[j].Shards[k] - shards[si.ID] = si - } - } - } - - infos := make([]ShardInfo, 0, len(shards)) - for _, info := range shards { - infos = append(infos, *info) - } - - return infos -} - -// clone returns a deep copy of di. -func (di DatabaseInfo) clone() DatabaseInfo { - other := di - - if di.RetentionPolicies != nil { - other.RetentionPolicies = make([]RetentionPolicyInfo, len(di.RetentionPolicies)) - for i := range di.RetentionPolicies { - other.RetentionPolicies[i] = di.RetentionPolicies[i].clone() - } - } - - // Copy continuous queries. - if di.ContinuousQueries != nil { - other.ContinuousQueries = make([]ContinuousQueryInfo, len(di.ContinuousQueries)) - for i := range di.ContinuousQueries { - other.ContinuousQueries[i] = di.ContinuousQueries[i].clone() - } - } - - return other -} - -// MarshalBinary encodes dbi to a binary format. -func (dbi *DatabaseInfo) MarshalBinary() ([]byte, error) { - return proto.Marshal(dbi.marshal()) -} - -// UnmarshalBinary decodes dbi from a binary format. -func (dbi *DatabaseInfo) UnmarshalBinary(data []byte) error { - var pb internal.DatabaseInfo - if err := proto.Unmarshal(data, &pb); err != nil { - return err - } - dbi.unmarshal(&pb) - return nil -} - -// marshal serializes to a protobuf representation. -func (di DatabaseInfo) marshal() *internal.DatabaseInfo { - pb := &internal.DatabaseInfo{} - pb.Name = proto.String(di.Name) - pb.DefaultRetentionPolicy = proto.String(di.DefaultRetentionPolicy) - - pb.RetentionPolicies = make([]*internal.RetentionPolicyInfo, len(di.RetentionPolicies)) - for i := range di.RetentionPolicies { - pb.RetentionPolicies[i] = di.RetentionPolicies[i].marshal() - } - - pb.ContinuousQueries = make([]*internal.ContinuousQueryInfo, len(di.ContinuousQueries)) - for i := range di.ContinuousQueries { - pb.ContinuousQueries[i] = di.ContinuousQueries[i].marshal() - } - return pb -} - -// unmarshal deserializes from a protobuf representation. -func (di *DatabaseInfo) unmarshal(pb *internal.DatabaseInfo) { - di.Name = pb.GetName() - di.DefaultRetentionPolicy = pb.GetDefaultRetentionPolicy() - - if len(pb.GetRetentionPolicies()) > 0 { - di.RetentionPolicies = make([]RetentionPolicyInfo, len(pb.GetRetentionPolicies())) - for i, x := range pb.GetRetentionPolicies() { - di.RetentionPolicies[i].unmarshal(x) - } - } - - if len(pb.GetContinuousQueries()) > 0 { - di.ContinuousQueries = make([]ContinuousQueryInfo, len(pb.GetContinuousQueries())) - for i, x := range pb.GetContinuousQueries() { - di.ContinuousQueries[i].unmarshal(x) - } - } -} - -// RetentionPolicySpec represents the specification for a new retention policy. -type RetentionPolicySpec struct { - Name string - ReplicaN *int - Duration *time.Duration - ShardGroupDuration time.Duration -} - -// NewRetentionPolicyInfo creates a new retention policy info from the specification. -func (s *RetentionPolicySpec) NewRetentionPolicyInfo() *RetentionPolicyInfo { - return DefaultRetentionPolicyInfo().Apply(s) -} - -// Matches checks if this retention policy specification matches -// an existing retention policy. -func (s *RetentionPolicySpec) Matches(rpi *RetentionPolicyInfo) bool { - if rpi == nil { - return false - } else if s.Name != "" && s.Name != rpi.Name { - return false - } else if s.Duration != nil && *s.Duration != rpi.Duration { - return false - } else if s.ReplicaN != nil && *s.ReplicaN != rpi.ReplicaN { - return false - } - - // Normalise ShardDuration before comparing to any existing retention policies. - // Normalize with the retention policy info's duration instead of the spec - // since they should be the same and we're performing a comparison. - sgDuration := NormalisedShardDuration(s.ShardGroupDuration, rpi.Duration) - return sgDuration == rpi.ShardGroupDuration -} - -// marshal serializes to a protobuf representation. -func (s *RetentionPolicySpec) marshal() *internal.RetentionPolicySpec { - pb := &internal.RetentionPolicySpec{} - if s.Name != "" { - pb.Name = proto.String(s.Name) - } - if s.Duration != nil { - pb.Duration = proto.Int64(int64(*s.Duration)) - } - if s.ShardGroupDuration > 0 { - pb.ShardGroupDuration = proto.Int64(int64(s.ShardGroupDuration)) - } - if s.ReplicaN != nil { - pb.ReplicaN = proto.Uint32(uint32(*s.ReplicaN)) - } - return pb -} - -// unmarshal deserializes from a protobuf representation. -func (s *RetentionPolicySpec) unmarshal(pb *internal.RetentionPolicySpec) { - if pb.Name != nil { - s.Name = pb.GetName() - } - if pb.Duration != nil { - duration := time.Duration(pb.GetDuration()) - s.Duration = &duration - } - if pb.ShardGroupDuration != nil { - s.ShardGroupDuration = time.Duration(pb.GetShardGroupDuration()) - } - if pb.ReplicaN != nil { - replicaN := int(pb.GetReplicaN()) - s.ReplicaN = &replicaN - } -} - -// MarshalBinary encodes RetentionPolicySpec to a binary format. -func (s *RetentionPolicySpec) MarshalBinary() ([]byte, error) { - return proto.Marshal(s.marshal()) -} - -// UnmarshalBinary decodes RetentionPolicySpec from a binary format. -func (s *RetentionPolicySpec) UnmarshalBinary(data []byte) error { - var pb internal.RetentionPolicySpec - if err := proto.Unmarshal(data, &pb); err != nil { - return err - } - s.unmarshal(&pb) - return nil -} - -// RetentionPolicyInfo represents metadata about a retention policy. -type RetentionPolicyInfo struct { - Name string - ReplicaN int - Duration time.Duration - ShardGroupDuration time.Duration - ShardGroups []ShardGroupInfo - Subscriptions []SubscriptionInfo -} - -// NewRetentionPolicyInfo returns a new instance of RetentionPolicyInfo -// with default replication and duration. -func NewRetentionPolicyInfo(name string) *RetentionPolicyInfo { - return &RetentionPolicyInfo{ - Name: name, - ReplicaN: DefaultRetentionPolicyReplicaN, - Duration: DefaultRetentionPolicyDuration, - } -} - -// DefaultRetentionPolicyInfo returns a new instance of RetentionPolicyInfo -// with default name, replication, and duration. -func DefaultRetentionPolicyInfo() *RetentionPolicyInfo { - return NewRetentionPolicyInfo(DefaultRetentionPolicyName) -} - -// ToSpec returns RetentionPolicySpec instance with the same data as in RetentionPolicyInfo -func (rpi *RetentionPolicyInfo) ToSpec() *RetentionPolicySpec { - return &RetentionPolicySpec{ - Name: rpi.Name, - ReplicaN: &rpi.ReplicaN, - Duration: &rpi.Duration, - ShardGroupDuration: rpi.ShardGroupDuration, - } -} - -// Apply applies a specification to the retention policy info. -func (rpi *RetentionPolicyInfo) Apply(spec *RetentionPolicySpec) *RetentionPolicyInfo { - rp := &RetentionPolicyInfo{ - Name: rpi.Name, - ReplicaN: rpi.ReplicaN, - Duration: rpi.Duration, - ShardGroupDuration: rpi.ShardGroupDuration, - } - if spec.Name != "" { - rp.Name = spec.Name - } - if spec.ReplicaN != nil { - rp.ReplicaN = *spec.ReplicaN - } - if spec.Duration != nil { - rp.Duration = *spec.Duration - } - rp.ShardGroupDuration = NormalisedShardDuration(spec.ShardGroupDuration, rp.Duration) - return rp -} - -// ShardGroupByTimestamp returns the shard group in the policy that contains the timestamp, -// or nil if no shard group matches. -func (rpi *RetentionPolicyInfo) ShardGroupByTimestamp(timestamp time.Time) *ShardGroupInfo { - for i := range rpi.ShardGroups { - sgi := &rpi.ShardGroups[i] - if sgi.Contains(timestamp) && !sgi.Deleted() && (!sgi.Truncated() || timestamp.Before(sgi.TruncatedAt)) { - return &rpi.ShardGroups[i] - } - } - - return nil -} - -// ExpiredShardGroups returns the Shard Groups which are considered expired, for the given time. -func (rpi *RetentionPolicyInfo) ExpiredShardGroups(t time.Time) []*ShardGroupInfo { - var groups = make([]*ShardGroupInfo, 0) - for i := range rpi.ShardGroups { - if rpi.ShardGroups[i].Deleted() { - continue - } - if rpi.Duration != 0 && rpi.ShardGroups[i].EndTime.Add(rpi.Duration).Before(t) { - groups = append(groups, &rpi.ShardGroups[i]) - } - } - return groups -} - -// DeletedShardGroups returns the Shard Groups which are marked as deleted. -func (rpi *RetentionPolicyInfo) DeletedShardGroups() []*ShardGroupInfo { - var groups = make([]*ShardGroupInfo, 0) - for i := range rpi.ShardGroups { - if rpi.ShardGroups[i].Deleted() { - groups = append(groups, &rpi.ShardGroups[i]) - } - } - return groups -} - -// marshal serializes to a protobuf representation. -func (rpi *RetentionPolicyInfo) marshal() *internal.RetentionPolicyInfo { - pb := &internal.RetentionPolicyInfo{ - Name: proto.String(rpi.Name), - ReplicaN: proto.Uint32(uint32(rpi.ReplicaN)), - Duration: proto.Int64(int64(rpi.Duration)), - ShardGroupDuration: proto.Int64(int64(rpi.ShardGroupDuration)), - } - - pb.ShardGroups = make([]*internal.ShardGroupInfo, len(rpi.ShardGroups)) - for i, sgi := range rpi.ShardGroups { - pb.ShardGroups[i] = sgi.marshal() - } - - pb.Subscriptions = make([]*internal.SubscriptionInfo, len(rpi.Subscriptions)) - for i, sub := range rpi.Subscriptions { - pb.Subscriptions[i] = sub.marshal() - } - - return pb -} - -// unmarshal deserializes from a protobuf representation. -func (rpi *RetentionPolicyInfo) unmarshal(pb *internal.RetentionPolicyInfo) { - rpi.Name = pb.GetName() - rpi.ReplicaN = int(pb.GetReplicaN()) - rpi.Duration = time.Duration(pb.GetDuration()) - rpi.ShardGroupDuration = time.Duration(pb.GetShardGroupDuration()) - - if len(pb.GetShardGroups()) > 0 { - rpi.ShardGroups = make([]ShardGroupInfo, len(pb.GetShardGroups())) - for i, x := range pb.GetShardGroups() { - rpi.ShardGroups[i].unmarshal(x) - } - } - if len(pb.GetSubscriptions()) > 0 { - rpi.Subscriptions = make([]SubscriptionInfo, len(pb.GetSubscriptions())) - for i, x := range pb.GetSubscriptions() { - rpi.Subscriptions[i].unmarshal(x) - } - } -} - -// clone returns a deep copy of rpi. -func (rpi RetentionPolicyInfo) clone() RetentionPolicyInfo { - other := rpi - - if rpi.ShardGroups != nil { - other.ShardGroups = make([]ShardGroupInfo, len(rpi.ShardGroups)) - for i := range rpi.ShardGroups { - other.ShardGroups[i] = rpi.ShardGroups[i].clone() - } - } - - return other -} - -// MarshalBinary encodes rpi to a binary format. -func (rpi *RetentionPolicyInfo) MarshalBinary() ([]byte, error) { - return proto.Marshal(rpi.marshal()) -} - -// UnmarshalBinary decodes rpi from a binary format. -func (rpi *RetentionPolicyInfo) UnmarshalBinary(data []byte) error { - var pb internal.RetentionPolicyInfo - if err := proto.Unmarshal(data, &pb); err != nil { - return err - } - rpi.unmarshal(&pb) - return nil -} - -// shardGroupDuration returns the default duration for a shard group based on a policy duration. -func shardGroupDuration(d time.Duration) time.Duration { - if d >= 180*24*time.Hour || d == 0 { // 6 months or 0 - return 7 * 24 * time.Hour - } else if d >= 2*24*time.Hour { // 2 days - return 1 * 24 * time.Hour - } - return 1 * time.Hour -} - -// NormalisedShardDuration returns normalised shard duration based on a policy duration. -func NormalisedShardDuration(sgd, d time.Duration) time.Duration { - // If it is zero, it likely wasn't specified, so we default to the shard group duration - if sgd == 0 { - return shardGroupDuration(d) - } - // If it was specified, but it's less than the MinRetentionPolicyDuration, then normalize - // to the MinRetentionPolicyDuration - if sgd < MinRetentionPolicyDuration { - return shardGroupDuration(MinRetentionPolicyDuration) - } - return sgd -} - -// ShardGroupInfo represents metadata about a shard group. The DeletedAt field is important -// because it makes it clear that a ShardGroup has been marked as deleted, and allow the system -// to be sure that a ShardGroup is not simply missing. If the DeletedAt is set, the system can -// safely delete any associated shards. -type ShardGroupInfo struct { - ID uint64 - StartTime time.Time - EndTime time.Time - DeletedAt time.Time - Shards []ShardInfo - TruncatedAt time.Time -} - -// ShardGroupInfos implements sort.Interface on []ShardGroupInfo, based -// on the StartTime field. -type ShardGroupInfos []ShardGroupInfo - -// Len implements sort.Interface. -func (a ShardGroupInfos) Len() int { return len(a) } - -// Swap implements sort.Interface. -func (a ShardGroupInfos) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// Less implements sort.Interface. -func (a ShardGroupInfos) Less(i, j int) bool { - iEnd := a[i].EndTime - if a[i].Truncated() { - iEnd = a[i].TruncatedAt - } - - jEnd := a[j].EndTime - if a[j].Truncated() { - jEnd = a[j].TruncatedAt - } - - if iEnd.Equal(jEnd) { - return a[i].StartTime.Before(a[j].StartTime) - } - - return iEnd.Before(jEnd) -} - -// Contains returns true iif StartTime ≤ t < EndTime. -func (sgi *ShardGroupInfo) Contains(t time.Time) bool { - return !t.Before(sgi.StartTime) && t.Before(sgi.EndTime) -} - -// Overlaps returns whether the shard group contains data for the time range between min and max -func (sgi *ShardGroupInfo) Overlaps(min, max time.Time) bool { - return !sgi.StartTime.After(max) && sgi.EndTime.After(min) -} - -// Deleted returns whether this ShardGroup has been deleted. -func (sgi *ShardGroupInfo) Deleted() bool { - return !sgi.DeletedAt.IsZero() -} - -// Truncated returns true if this ShardGroup has been truncated (no new writes). -func (sgi *ShardGroupInfo) Truncated() bool { - return !sgi.TruncatedAt.IsZero() -} - -// clone returns a deep copy of sgi. -func (sgi ShardGroupInfo) clone() ShardGroupInfo { - other := sgi - - if sgi.Shards != nil { - other.Shards = make([]ShardInfo, len(sgi.Shards)) - for i := range sgi.Shards { - other.Shards[i] = sgi.Shards[i].clone() - } - } - - return other -} - -// ShardFor returns the ShardInfo for a Point hash. -func (sgi *ShardGroupInfo) ShardFor(p models.Point) ShardInfo { - if len(sgi.Shards) == 1 { - return sgi.Shards[0] - } - - return sgi.Shards[p.HashID()%uint64(len(sgi.Shards))] -} - -// marshal serializes to a protobuf representation. -func (sgi *ShardGroupInfo) marshal() *internal.ShardGroupInfo { - pb := &internal.ShardGroupInfo{ - ID: proto.Uint64(sgi.ID), - StartTime: proto.Int64(MarshalTime(sgi.StartTime)), - EndTime: proto.Int64(MarshalTime(sgi.EndTime)), - DeletedAt: proto.Int64(MarshalTime(sgi.DeletedAt)), - } - - if !sgi.TruncatedAt.IsZero() { - pb.TruncatedAt = proto.Int64(MarshalTime(sgi.TruncatedAt)) - } - - pb.Shards = make([]*internal.ShardInfo, len(sgi.Shards)) - for i := range sgi.Shards { - pb.Shards[i] = sgi.Shards[i].marshal() - } - - return pb -} - -// unmarshal deserializes from a protobuf representation. -func (sgi *ShardGroupInfo) unmarshal(pb *internal.ShardGroupInfo) { - sgi.ID = pb.GetID() - if i := pb.GetStartTime(); i == 0 { - sgi.StartTime = time.Unix(0, 0).UTC() - } else { - sgi.StartTime = UnmarshalTime(i) - } - if i := pb.GetEndTime(); i == 0 { - sgi.EndTime = time.Unix(0, 0).UTC() - } else { - sgi.EndTime = UnmarshalTime(i) - } - sgi.DeletedAt = UnmarshalTime(pb.GetDeletedAt()) - - if pb != nil && pb.TruncatedAt != nil { - sgi.TruncatedAt = UnmarshalTime(pb.GetTruncatedAt()) - } - - if len(pb.GetShards()) > 0 { - sgi.Shards = make([]ShardInfo, len(pb.GetShards())) - for i, x := range pb.GetShards() { - sgi.Shards[i].unmarshal(x) - } - } -} - -// ShardInfo represents metadata about a shard. -type ShardInfo struct { - ID uint64 - Owners []ShardOwner -} - -// OwnedBy determines whether the shard's owner IDs includes nodeID. -func (si ShardInfo) OwnedBy(nodeID uint64) bool { - for _, so := range si.Owners { - if so.NodeID == nodeID { - return true - } - } - return false -} - -// clone returns a deep copy of si. -func (si ShardInfo) clone() ShardInfo { - other := si - - if si.Owners != nil { - other.Owners = make([]ShardOwner, len(si.Owners)) - for i := range si.Owners { - other.Owners[i] = si.Owners[i].clone() - } - } - - return other -} - -// marshal serializes to a protobuf representation. -func (si ShardInfo) marshal() *internal.ShardInfo { - pb := &internal.ShardInfo{ - ID: proto.Uint64(si.ID), - } - - pb.Owners = make([]*internal.ShardOwner, len(si.Owners)) - for i := range si.Owners { - pb.Owners[i] = si.Owners[i].marshal() - } - - return pb -} - -// UnmarshalBinary decodes the object from a binary format. -func (si *ShardInfo) UnmarshalBinary(buf []byte) error { - var pb internal.ShardInfo - if err := proto.Unmarshal(buf, &pb); err != nil { - return err - } - si.unmarshal(&pb) - return nil -} - -// unmarshal deserializes from a protobuf representation. -func (si *ShardInfo) unmarshal(pb *internal.ShardInfo) { - si.ID = pb.GetID() - - // If deprecated "OwnerIDs" exists then convert it to "Owners" format. - //lint:ignore SA1019 we need to check for the presence of the deprecated field so we can convert it - oldStyleOwnerIds := pb.GetOwnerIDs() - if len(oldStyleOwnerIds) > 0 { - si.Owners = make([]ShardOwner, len(oldStyleOwnerIds)) - for i, x := range oldStyleOwnerIds { - si.Owners[i].unmarshal(&internal.ShardOwner{ - NodeID: proto.Uint64(x), - }) - } - } else if len(pb.GetOwners()) > 0 { - si.Owners = make([]ShardOwner, len(pb.GetOwners())) - for i, x := range pb.GetOwners() { - si.Owners[i].unmarshal(x) - } - } -} - -// SubscriptionInfo holds the subscription information. -type SubscriptionInfo struct { - Name string - Mode string - Destinations []string -} - -// marshal serializes to a protobuf representation. -func (si SubscriptionInfo) marshal() *internal.SubscriptionInfo { - pb := &internal.SubscriptionInfo{ - Name: proto.String(si.Name), - Mode: proto.String(si.Mode), - } - - pb.Destinations = make([]string, len(si.Destinations)) - copy(pb.Destinations, si.Destinations) - return pb -} - -// unmarshal deserializes from a protobuf representation. -func (si *SubscriptionInfo) unmarshal(pb *internal.SubscriptionInfo) { - si.Name = pb.GetName() - si.Mode = pb.GetMode() - - if len(pb.GetDestinations()) > 0 { - si.Destinations = make([]string, len(pb.GetDestinations())) - copy(si.Destinations, pb.GetDestinations()) - } -} - -// ShardOwner represents a node that owns a shard. -type ShardOwner struct { - NodeID uint64 -} - -// clone returns a deep copy of so. -func (so ShardOwner) clone() ShardOwner { - return so -} - -// marshal serializes to a protobuf representation. -func (so ShardOwner) marshal() *internal.ShardOwner { - return &internal.ShardOwner{ - NodeID: proto.Uint64(so.NodeID), - } -} - -// unmarshal deserializes from a protobuf representation. -func (so *ShardOwner) unmarshal(pb *internal.ShardOwner) { - so.NodeID = pb.GetNodeID() -} - -// ContinuousQueryInfo represents metadata about a continuous query. -type ContinuousQueryInfo struct { - Name string - Query string -} - -// clone returns a deep copy of cqi. -func (cqi ContinuousQueryInfo) clone() ContinuousQueryInfo { return cqi } - -// marshal serializes to a protobuf representation. -func (cqi ContinuousQueryInfo) marshal() *internal.ContinuousQueryInfo { - return &internal.ContinuousQueryInfo{ - Name: proto.String(cqi.Name), - Query: proto.String(cqi.Query), - } -} - -// unmarshal deserializes from a protobuf representation. -func (cqi *ContinuousQueryInfo) unmarshal(pb *internal.ContinuousQueryInfo) { - cqi.Name = pb.GetName() - cqi.Query = pb.GetQuery() -} - -var _ query.Authorizer = (*UserInfo)(nil) - -// UserInfo represents metadata about a user in the system. -type UserInfo struct { - // User's name. - Name string - - // Hashed password. - Hash string - - // Whether the user is an admin, i.e. allowed to do everything. - Admin bool - - // Map of database name to granted privilege. - Privileges map[string]influxql.Privilege -} - -type User interface { - query.Authorizer - ID() string - AuthorizeUnrestricted() bool -} - -func (u *UserInfo) ID() string { - return u.Name -} - -// AuthorizeDatabase returns true if the user is authorized for the given privilege on the given database. -func (ui *UserInfo) AuthorizeDatabase(privilege influxql.Privilege, database string) bool { - if ui.Admin || privilege == influxql.NoPrivileges { - return true - } - p, ok := ui.Privileges[database] - return ok && (p == privilege || p == influxql.AllPrivileges) -} - -// AuthorizeSeriesRead is used to limit access per-series (enterprise only) -func (u *UserInfo) AuthorizeSeriesRead(database string, measurement []byte, tags models.Tags) bool { - return true -} - -// AuthorizeSeriesWrite is used to limit access per-series (enterprise only) -func (u *UserInfo) AuthorizeSeriesWrite(database string, measurement []byte, tags models.Tags) bool { - return true -} - -// AuthorizeUnrestricted allows admins to shortcut access checks. -func (u *UserInfo) AuthorizeUnrestricted() bool { - return u.Admin -} - -// clone returns a deep copy of si. -func (ui UserInfo) clone() UserInfo { - other := ui - - if ui.Privileges != nil { - other.Privileges = make(map[string]influxql.Privilege) - for k, v := range ui.Privileges { - other.Privileges[k] = v - } - } - - return other -} - -// marshal serializes to a protobuf representation. -func (ui UserInfo) marshal() *internal.UserInfo { - pb := &internal.UserInfo{ - Name: proto.String(ui.Name), - Hash: proto.String(ui.Hash), - Admin: proto.Bool(ui.Admin), - } - - for database, privilege := range ui.Privileges { - pb.Privileges = append(pb.Privileges, &internal.UserPrivilege{ - Database: proto.String(database), - Privilege: proto.Int32(int32(privilege)), - }) - } - - return pb -} - -// unmarshal deserializes from a protobuf representation. -func (ui *UserInfo) unmarshal(pb *internal.UserInfo) { - ui.Name = pb.GetName() - ui.Hash = pb.GetHash() - ui.Admin = pb.GetAdmin() - - ui.Privileges = make(map[string]influxql.Privilege) - for _, p := range pb.GetPrivileges() { - ui.Privileges[p.GetDatabase()] = influxql.Privilege(p.GetPrivilege()) - } -} - -// Lease represents a lease held on a resource. -type Lease struct { - Name string `json:"name"` - Expiration time.Time `json:"expiration"` - Owner uint64 `json:"owner"` -} - -// Leases is a concurrency-safe collection of leases keyed by name. -type Leases struct { - mu sync.Mutex - m map[string]*Lease - d time.Duration -} - -// NewLeases returns a new instance of Leases. -func NewLeases(d time.Duration) *Leases { - return &Leases{ - m: make(map[string]*Lease), - d: d, - } -} - -// Acquire acquires a lease with the given name for the given nodeID. -// If the lease doesn't exist or exists but is expired, a valid lease is returned. -// If nodeID already owns the named and unexpired lease, the lease expiration is extended. -// If a different node owns the lease, an error is returned. -func (leases *Leases) Acquire(name string, nodeID uint64) (*Lease, error) { - leases.mu.Lock() - defer leases.mu.Unlock() - - l := leases.m[name] - if l != nil { - if time.Now().After(l.Expiration) || l.Owner == nodeID { - l.Expiration = time.Now().Add(leases.d) - l.Owner = nodeID - return l, nil - } - return l, errors.New("another node has the lease") - } - - l = &Lease{ - Name: name, - Expiration: time.Now().Add(leases.d), - Owner: nodeID, - } - - leases.m[name] = l - - return l, nil -} - -// MarshalTime converts t to nanoseconds since epoch. A zero time returns 0. -func MarshalTime(t time.Time) int64 { - if t.IsZero() { - return 0 - } - return t.UnixNano() -} - -// UnmarshalTime converts nanoseconds since epoch to time. -// A zero value returns a zero time. -func UnmarshalTime(v int64) time.Time { - if v == 0 { - return time.Time{} - } - return time.Unix(0, v).UTC() -} - -// ValidName checks to see if the given name can would be valid for DB/RP name -func ValidName(name string) bool { - for _, r := range name { - if !unicode.IsPrint(r) { - return false - } - } - - return name != "" && - name != "." && - name != ".." && - !strings.ContainsAny(name, `/\`) -} diff --git a/v1/services/meta/data_internal_test.go b/v1/services/meta/data_internal_test.go deleted file mode 100644 index 0375863debe..00000000000 --- a/v1/services/meta/data_internal_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package meta - -import ( - "sort" - "testing" - "time" -) - -func TestShardGroupSort(t *testing.T) { - sg1 := ShardGroupInfo{ - ID: 1, - StartTime: time.Unix(1000, 0), - EndTime: time.Unix(1100, 0), - TruncatedAt: time.Unix(1050, 0), - } - - sg2 := ShardGroupInfo{ - ID: 2, - StartTime: time.Unix(1000, 0), - EndTime: time.Unix(1100, 0), - } - - sgs := ShardGroupInfos{sg2, sg1} - - sort.Sort(sgs) - - if sgs[len(sgs)-1].ID != 2 { - t.Fatal("unstable sort for ShardGroupInfos") - } -} - -func Test_Data_RetentionPolicy_MarshalBinary(t *testing.T) { - zeroTime := time.Time{} - epoch := time.Unix(0, 0).UTC() - - startTime := zeroTime - sgi := &ShardGroupInfo{ - StartTime: startTime, - } - isgi := sgi.marshal() - sgi.unmarshal(isgi) - if got, exp := sgi.StartTime.UTC(), epoch.UTC(); got != exp { - t.Errorf("unexpected start time. got: %s, exp: %s", got, exp) - } - - startTime = time.Unix(0, 0) - endTime := startTime.Add(time.Hour * 24) - sgi = &ShardGroupInfo{ - StartTime: startTime, - EndTime: endTime, - } - isgi = sgi.marshal() - sgi.unmarshal(isgi) - if got, exp := sgi.StartTime.UTC(), startTime.UTC(); got != exp { - t.Errorf("unexpected start time. got: %s, exp: %s", got, exp) - } - if got, exp := sgi.EndTime.UTC(), endTime.UTC(); got != exp { - t.Errorf("unexpected end time. got: %s, exp: %s", got, exp) - } - if got, exp := sgi.DeletedAt.UTC(), zeroTime.UTC(); got != exp { - t.Errorf("unexpected DeletedAt time. got: %s, exp: %s", got, exp) - } -} diff --git a/v1/services/meta/data_test.go b/v1/services/meta/data_test.go deleted file mode 100644 index d5872646099..00000000000 --- a/v1/services/meta/data_test.go +++ /dev/null @@ -1,477 +0,0 @@ -package meta_test - -import ( - "fmt" - "math/rand" - "reflect" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/pkg/testing/assert" - influxdb "github.com/influxdata/influxdb/v2/v1" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/influxdata/influxql" -) - -var seededRand *rand.Rand - -func init() { - seededRand = rand.New(rand.NewSource(time.Now().UnixNano())) -} - -func Test_Data_DropDatabase(t *testing.T) { - data := &meta.Data{ - Databases: []meta.DatabaseInfo{ - {Name: "db0"}, - {Name: "db1"}, - {Name: "db2"}, - {Name: "db4"}, - {Name: "db5"}, - }, - Users: []meta.UserInfo{ - {Name: "user1", Privileges: map[string]influxql.Privilege{"db1": influxql.ReadPrivilege, "db2": influxql.ReadPrivilege}}, - {Name: "user2", Privileges: map[string]influxql.Privilege{"db2": influxql.ReadPrivilege}}, - }, - } - - // Dropping the first database removes it from the Data object. - expDbs := make([]meta.DatabaseInfo, 4) - copy(expDbs, data.Databases[1:]) - if err := data.DropDatabase("db0"); err != nil { - t.Fatal(err) - } else if got, exp := data.Databases, expDbs; !reflect.DeepEqual(got, exp) { - t.Fatalf("got %v, expected %v", got, exp) - } - - // Dropping a middle database removes it from the data object. - expDbs = []meta.DatabaseInfo{{Name: "db1"}, {Name: "db2"}, {Name: "db5"}} - if err := data.DropDatabase("db4"); err != nil { - t.Fatal(err) - } else if got, exp := data.Databases, expDbs; !reflect.DeepEqual(got, exp) { - t.Fatalf("got %v, expected %v", got, exp) - } - - // Dropping the last database removes it from the data object. - expDbs = []meta.DatabaseInfo{{Name: "db1"}, {Name: "db2"}} - if err := data.DropDatabase("db5"); err != nil { - t.Fatal(err) - } else if got, exp := data.Databases, expDbs; !reflect.DeepEqual(got, exp) { - t.Fatalf("got %v, expected %v", got, exp) - } - - // Dropping a database also drops all the user privileges associated with - // it. - expUsers := []meta.UserInfo{ - {Name: "user1", Privileges: map[string]influxql.Privilege{"db1": influxql.ReadPrivilege}}, - {Name: "user2", Privileges: map[string]influxql.Privilege{}}, - } - if err := data.DropDatabase("db2"); err != nil { - t.Fatal(err) - } else if got, exp := data.Users, expUsers; !reflect.DeepEqual(got, exp) { - t.Fatalf("got %v, expected %v", got, exp) - } -} - -func Test_Data_CreateDatabase(t *testing.T) { - data := meta.Data{} - - // Test creating a database succeeds. - if err := data.CreateDatabase("foo"); err != nil { - t.Fatal(err) - } - - // Test creating a database with a name that is too long fails. - name := randString(meta.MaxNameLen + 1) - if err := data.CreateDatabase(name); err != meta.ErrNameTooLong { - t.Fatalf("exp: %v, got: %v", meta.ErrNameTooLong, err) - } -} - -func Test_Data_CreateRetentionPolicy(t *testing.T) { - data := meta.Data{} - - err := data.CreateDatabase("foo") - if err != nil { - t.Fatal(err) - } - - err = data.CreateRetentionPolicy("foo", &meta.RetentionPolicyInfo{ - Name: "bar", - ReplicaN: 1, - Duration: 24 * time.Hour, - }, false) - if err != nil { - t.Fatal(err) - } - - rp, err := data.RetentionPolicy("foo", "bar") - if err != nil { - t.Fatal(err) - } - - if rp == nil { - t.Fatal("creation of retention policy failed") - } - - // Try to recreate the same RP with default set to true, should fail - err = data.CreateRetentionPolicy("foo", &meta.RetentionPolicyInfo{ - Name: "bar", - ReplicaN: 1, - Duration: 24 * time.Hour, - }, true) - if err == nil || err != meta.ErrRetentionPolicyConflict { - t.Fatalf("unexpected error. got: %v, exp: %s", err, meta.ErrRetentionPolicyConflict) - } - - // Creating the same RP with the same specifications should succeed - err = data.CreateRetentionPolicy("foo", &meta.RetentionPolicyInfo{ - Name: "bar", - ReplicaN: 1, - Duration: 24 * time.Hour, - }, false) - if err != nil { - t.Fatal(err) - } - - // Try creating a retention policy with a name that is too long. Should fail. - err = data.CreateRetentionPolicy("foo", &meta.RetentionPolicyInfo{ - Name: randString(meta.MaxNameLen + 1), - ReplicaN: 1, - Duration: 24 * time.Hour, - }, true) - if err != meta.ErrNameTooLong { - t.Fatalf("exp: %v, got %v", meta.ErrNameTooLong, err) - } -} - -func TestData_AdminUserExists(t *testing.T) { - data := meta.Data{} - - // No users means no admin. - if data.AdminUserExists() { - t.Fatal("no admin user should exist") - } - - // Add a non-admin user. - if err := data.CreateUser("user1", "a", false); err != nil { - t.Fatal(err) - } - if got, exp := data.AdminUserExists(), false; got != exp { - t.Fatalf("got %v, expected %v", got, exp) - } - - // Add an admin user. - if err := data.CreateUser("admin1", "a", true); err != nil { - t.Fatal(err) - } - if got, exp := data.AdminUserExists(), true; got != exp { - t.Fatalf("got %v, expected %v", got, exp) - } - - // Remove the original user - if err := data.DropUser("user1"); err != nil { - t.Fatal(err) - } - if got, exp := data.AdminUserExists(), true; got != exp { - t.Fatalf("got %v, expected %v", got, exp) - } - - // Add another admin - if err := data.CreateUser("admin2", "a", true); err != nil { - t.Fatal(err) - } - if got, exp := data.AdminUserExists(), true; got != exp { - t.Fatalf("got %v, expected %v", got, exp) - } - - // Revoke privileges of the first admin - if err := data.SetAdminPrivilege("admin1", false); err != nil { - t.Fatal(err) - } - if got, exp := data.AdminUserExists(), true; got != exp { - t.Fatalf("got %v, expected %v", got, exp) - } - - // Add user1 back. - if err := data.CreateUser("user1", "a", false); err != nil { - t.Fatal(err) - } - // Revoke remaining admin. - if err := data.SetAdminPrivilege("admin2", false); err != nil { - t.Fatal(err) - } - // No longer any admins - if got, exp := data.AdminUserExists(), false; got != exp { - t.Fatalf("got %v, expected %v", got, exp) - } - - // Make user1 an admin - if err := data.SetAdminPrivilege("user1", true); err != nil { - t.Fatal(err) - } - if got, exp := data.AdminUserExists(), true; got != exp { - t.Fatalf("got %v, expected %v", got, exp) - } - - // Drop user1... - if err := data.DropUser("user1"); err != nil { - t.Fatal(err) - } - if got, exp := data.AdminUserExists(), false; got != exp { - t.Fatalf("got %v, expected %v", got, exp) - } -} - -func TestData_SetPrivilege(t *testing.T) { - data := meta.Data{} - if err := data.CreateDatabase("db0"); err != nil { - t.Fatal(err) - } - - if err := data.CreateUser("user1", "", false); err != nil { - t.Fatal(err) - } - - // When the user does not exist, SetPrivilege returns an error. - if got, exp := data.SetPrivilege("not a user", "db0", influxql.AllPrivileges), meta.ErrUserNotFound; got != exp { - t.Fatalf("got %v, expected %v", got, exp) - } - - // When the database does not exist, SetPrivilege returns an error. - if got, exp := data.SetPrivilege("user1", "db1", influxql.AllPrivileges), influxdb.ErrDatabaseNotFound("db1"); got == nil || got.Error() != exp.Error() { - t.Fatalf("got %v, expected %v", got, exp) - } - - // Otherwise, SetPrivilege sets the expected privileges. - if got := data.SetPrivilege("user1", "db0", influxql.AllPrivileges); got != nil { - t.Fatalf("got %v, expected %v", got, nil) - } -} - -func TestData_TruncateShardGroups(t *testing.T) { - data := &meta.Data{} - - must := func(err error) { - if err != nil { - t.Fatal(err) - } - } - - must(data.CreateDatabase("db")) - rp := meta.NewRetentionPolicyInfo("rp") - rp.ShardGroupDuration = 24 * time.Hour - must(data.CreateRetentionPolicy("db", rp, true)) - - must(data.CreateShardGroup("db", "rp", time.Unix(0, 0))) - - sg0, err := data.ShardGroupByTimestamp("db", "rp", time.Unix(0, 0)) - if err != nil { - t.Fatal("Failed to find shard group:", err) - } - - if sg0.Truncated() { - t.Fatal("shard group already truncated") - } - - sgEnd, err := data.ShardGroupByTimestamp("db", "rp", sg0.StartTime.Add(rp.ShardGroupDuration-1)) - if err != nil { - t.Fatal("Failed to find shard group for end range:", err) - } - - if sgEnd == nil || sgEnd.ID != sg0.ID { - t.Fatalf("Retention policy mis-match: Expected %v, Got %v", sg0, sgEnd) - } - - must(data.CreateShardGroup("db", "rp", sg0.StartTime.Add(rp.ShardGroupDuration))) - - sg1, err := data.ShardGroupByTimestamp("db", "rp", sg0.StartTime.Add(rp.ShardGroupDuration+time.Minute)) - if err != nil { - t.Fatal("Failed to find second shard group:", err) - } - - if sg1.Truncated() { - t.Fatal("second shard group already truncated") - } - - // shouldn't do anything - must(data.CreateShardGroup("db", "rp", sg0.EndTime.Add(-time.Minute))) - - sgs, err := data.ShardGroupsByTimeRange("db", "rp", time.Unix(0, 0), sg1.EndTime.Add(time.Minute)) - if err != nil { - t.Fatal("Failed to find shard groups:", err) - } - - if len(sgs) != 2 { - t.Fatalf("Expected %d shard groups, found %d", 2, len(sgs)) - } - - truncateTime := sg0.EndTime.Add(-time.Minute) - data.TruncateShardGroups(truncateTime) - - // at this point, we should get nil shard groups for times after truncateTime - for _, tc := range []struct { - t time.Time - exists bool - }{ - {sg0.StartTime, true}, - {sg0.EndTime.Add(-1), false}, - {truncateTime.Add(-1), true}, - {truncateTime, false}, - {sg1.StartTime, false}, - } { - sg, err := data.ShardGroupByTimestamp("db", "rp", tc.t) - if err != nil { - t.Fatalf("Failed to find shardgroup for %v: %v", tc.t, err) - } - if tc.exists && sg == nil { - t.Fatalf("Shard group for timestamp '%v' should exist, got nil", tc.t) - } - } - - for _, x := range data.Databases[0].RetentionPolicies[0].ShardGroups { - switch x.ID { - case sg0.ID: - *sg0 = x - case sg1.ID: - *sg1 = x - } - } - - if sg0.TruncatedAt != truncateTime { - t.Fatalf("Incorrect truncation of current shard group. Expected %v, got %v", truncateTime, sg0.TruncatedAt) - } - - if sg1.TruncatedAt != sg1.StartTime { - t.Fatalf("Incorrect truncation of future shard group. Expected %v, got %v", sg1.StartTime, sg1.TruncatedAt) - } - - groups := data.Databases[0].RetentionPolicies[0].ShardGroups - assert.Equal(t, 2, len(groups)) - assert.Equal(t, "1970-01-01 00:00:00 +0000 UTC", groups[0].StartTime.String()) - assert.Equal(t, "1970-01-02 00:00:00 +0000 UTC", groups[0].EndTime.String()) - assert.Equal(t, "1970-01-01 23:59:00 +0000 UTC", groups[0].TruncatedAt.String()) - - assert.Equal(t, "1970-01-02 00:00:00 +0000 UTC", groups[1].StartTime.String()) - assert.Equal(t, "1970-01-03 00:00:00 +0000 UTC", groups[1].EndTime.String()) - assert.Equal(t, "1970-01-02 00:00:00 +0000 UTC", groups[1].TruncatedAt.String()) - - // Create some more shard groups and validate there is no overlap - // Add a shard starting at sg0's truncation time, until 01/02 - must(data.CreateShardGroup("db", "rp", sg0.EndTime.Add(-time.Second))) - // Add a shard 01/02 - 01/03 (since sg1 is fully truncated) - must(data.CreateShardGroup("db", "rp", sg1.EndTime.Add(-time.Second))) - // Add a shard 01/06 - 01/07 - must(data.CreateShardGroup("db", "rp", sg1.EndTime.Add(3*rp.ShardGroupDuration))) - newDuration := 10 * rp.ShardGroupDuration - data.UpdateRetentionPolicy("db", "rp", &meta.RetentionPolicyUpdate{ - Name: nil, - Duration: nil, - ReplicaN: nil, - ShardGroupDuration: &newDuration, - }, true) - // Add a shard 01/03 - 01/06 - must(data.CreateShardGroup("db", "rp", sg1.EndTime.Add(1*rp.ShardGroupDuration))) - // Add a shard 01/07 - 01/09 - must(data.CreateShardGroup("db", "rp", sg1.EndTime.Add(4*rp.ShardGroupDuration))) - // Add a shard 01/09 - 01/19 - must(data.CreateShardGroup("db", "rp", sg1.EndTime.Add(10*rp.ShardGroupDuration))) - // No additional shard added - must(data.CreateShardGroup("db", "rp", sg1.EndTime.Add(11*rp.ShardGroupDuration))) - - groups = data.Databases[0].RetentionPolicies[0].ShardGroups - assert.Equal(t, 8, len(groups)) - - expectTimes := []struct { - start, end, truncated string - }{ - {"1970-01-01 00:00:00 +0000 UTC", "1970-01-02 00:00:00 +0000 UTC", "1970-01-01 23:59:00 +0000 UTC"}, - {"1970-01-01 23:59:00 +0000 UTC", "1970-01-02 00:00:00 +0000 UTC", "0001-01-01 00:00:00 +0000 UTC"}, - {"1970-01-02 00:00:00 +0000 UTC", "1970-01-03 00:00:00 +0000 UTC", "1970-01-02 00:00:00 +0000 UTC"}, - {"1970-01-02 00:00:00 +0000 UTC", "1970-01-03 00:00:00 +0000 UTC", "0001-01-01 00:00:00 +0000 UTC"}, - {"1970-01-03 00:00:00 +0000 UTC", "1970-01-06 00:00:00 +0000 UTC", "0001-01-01 00:00:00 +0000 UTC"}, - {"1970-01-06 00:00:00 +0000 UTC", "1970-01-07 00:00:00 +0000 UTC", "0001-01-01 00:00:00 +0000 UTC"}, - {"1970-01-07 00:00:00 +0000 UTC", "1970-01-09 00:00:00 +0000 UTC", "0001-01-01 00:00:00 +0000 UTC"}, - {"1970-01-09 00:00:00 +0000 UTC", "1970-01-19 00:00:00 +0000 UTC", "0001-01-01 00:00:00 +0000 UTC"}, - } - - for i := range expectTimes { - assert.Equal(t, expectTimes[i].start, groups[i].StartTime.String(), "start time %d", i) - assert.Equal(t, expectTimes[i].end, groups[i].EndTime.String(), "end time %d", i) - assert.Equal(t, expectTimes[i].truncated, groups[i].TruncatedAt.String(), "truncate time %d", i) - } -} - -func TestUserInfo_AuthorizeDatabase(t *testing.T) { - emptyUser := &meta.UserInfo{} - if !emptyUser.AuthorizeDatabase(influxql.NoPrivileges, "anydb") { - t.Fatal("expected NoPrivileges to be authorized but it wasn't") - } - if emptyUser.AuthorizeDatabase(influxql.ReadPrivilege, "anydb") { - t.Fatal("expected ReadPrivilege to prevent authorization, but it was authorized") - } - - adminUser := &meta.UserInfo{Admin: true} - if !adminUser.AuthorizeDatabase(influxql.AllPrivileges, "anydb") { - t.Fatalf("expected admin to be authorized but it wasn't") - } -} - -func TestShardGroupInfo_Contains(t *testing.T) { - sgi := &meta.ShardGroupInfo{StartTime: time.Unix(10, 0), EndTime: time.Unix(20, 0)} - - tests := []struct { - ts time.Time - exp bool - }{ - {time.Unix(0, 0), false}, - {time.Unix(9, 0), false}, - {time.Unix(10, 0), true}, - {time.Unix(11, 0), true}, - {time.Unix(15, 0), true}, - {time.Unix(19, 0), true}, - {time.Unix(20, 0), false}, - {time.Unix(21, 0), false}, - } - for _, test := range tests { - t.Run(fmt.Sprintf("ts=%d", test.ts.Unix()), func(t *testing.T) { - got := sgi.Contains(test.ts) - assert.Equal(t, got, test.exp) - }) - } -} - -func TestRetentionPolicyInfo_ToSpec(t *testing.T) { - rp := &meta.RetentionPolicyInfo{ - Name: "bar", - ReplicaN: 1, - Duration: 24 * time.Hour, - ShardGroupDuration: time.Hour, - } - spec := rp.ToSpec() - - if spec == nil { - t.Fatal("invalid spec") - } else if spec.Name != rp.Name { - t.Fatalf("invalid name: %s", spec.Name) - } else if spec.ReplicaN == nil { - t.Fatalf("invalid ReplicaN") - } else if *spec.ReplicaN != rp.ReplicaN { - t.Fatalf("invalid ReplicaN: %d", *spec.ReplicaN) - } else if spec.Duration == nil { - t.Fatalf("invalid Duration") - } else if *spec.Duration != rp.Duration { - t.Fatalf("invalid Duration: %s", spec.Duration.String()) - } else if spec.ShardGroupDuration != rp.ShardGroupDuration { - t.Fatalf("invalid ShardGroupDuration: %s", spec.ShardGroupDuration.String()) - } - -} - -func randString(n int) string { - var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") - b := make([]rune, n) - for i := range b { - b[i] = letters[seededRand.Intn(len(letters))] - } - return string(b) -} diff --git a/v1/services/meta/errors.go b/v1/services/meta/errors.go deleted file mode 100644 index e6b5b61d0ba..00000000000 --- a/v1/services/meta/errors.go +++ /dev/null @@ -1,119 +0,0 @@ -package meta - -import ( - "errors" - "fmt" -) - -var ( - // ErrStoreOpen is returned when opening an already open store. - ErrStoreOpen = errors.New("store already open") - - // ErrStoreClosed is returned when closing an already closed store. - ErrStoreClosed = errors.New("raft store already closed") -) - -var ( - // ErrDatabaseExists is returned when creating an already existing database. - ErrDatabaseExists = errors.New("database already exists") - - // ErrDatabaseNotExists is returned when operating on a not existing database. - ErrDatabaseNotExists = errors.New("database does not exist") - - // ErrDatabaseNameRequired is returned when creating a database without a name. - ErrDatabaseNameRequired = errors.New("database name required") - - // ErrNameTooLong is returned when attempting to create a database or - // retention policy with a name that is too long. - ErrNameTooLong = errors.New("name too long") - - // ErrInvalidName is returned when attempting to create a database or retention policy with an invalid name - ErrInvalidName = errors.New("invalid name") -) - -var ( - // ErrRetentionPolicyExists is returned when creating an already existing policy. - ErrRetentionPolicyExists = errors.New("retention policy already exists") - - // ErrRetentionPolicyNotFound is returned when an expected policy wasn't found. - ErrRetentionPolicyNotFound = errors.New("retention policy not found") - - // ErrRetentionPolicyDefault is returned when attempting a prohibited operation - // on a default retention policy. - ErrRetentionPolicyDefault = errors.New("retention policy is default") - - // ErrRetentionPolicyRequired is returned when a retention policy is required - // by an operation, but a nil policy was passed. - ErrRetentionPolicyRequired = errors.New("retention policy required") - - // ErrRetentionPolicyNameRequired is returned when creating a policy without a name. - ErrRetentionPolicyNameRequired = errors.New("retention policy name required") - - // ErrRetentionPolicyNameExists is returned when renaming a policy to - // the same name as another existing policy. - ErrRetentionPolicyNameExists = errors.New("retention policy name already exists") - - // ErrRetentionPolicyDurationTooLow is returned when updating a retention - // policy that has a duration lower than the allowed minimum. - ErrRetentionPolicyDurationTooLow = fmt.Errorf("retention policy duration must be at least %s", MinRetentionPolicyDuration) - - // ErrRetentionPolicyConflict is returned when creating a retention policy conflicts - // with an existing policy. - ErrRetentionPolicyConflict = errors.New("retention policy conflicts with an existing policy") - - // ErrIncompatibleDurations is returned when creating or updating a - // retention policy that has a duration lower than the current shard - // duration. - ErrIncompatibleDurations = errors.New("retention policy duration must be greater than the shard duration") - - // ErrReplicationFactorTooLow is returned when the replication factor is not in an - // acceptable range. - ErrReplicationFactorTooLow = errors.New("replication factor must be greater than 0") -) - -var ( - // ErrShardGroupExists is returned when creating an already existing shard group. - ErrShardGroupExists = errors.New("shard group already exists") - - // ErrShardGroupNotFound is returned when mutating a shard group that doesn't exist. - ErrShardGroupNotFound = errors.New("shard group not found") - - // ErrShardNotReplicated is returned if the node requested to be dropped has - // the last copy of a shard present and the force keyword was not used - ErrShardNotReplicated = errors.New("shard not replicated") -) - -var ( - // ErrContinuousQueryExists is returned when creating an already existing continuous query. - ErrContinuousQueryExists = errors.New("continuous query already exists") - - // ErrContinuousQueryNotFound is returned when removing a continuous query that doesn't exist. - ErrContinuousQueryNotFound = errors.New("continuous query not found") -) - -var ( - // ErrSubscriptionExists is returned when creating an already existing subscription. - ErrSubscriptionExists = errors.New("subscription already exists") - - // ErrSubscriptionNotFound is returned when removing a subscription that doesn't exist. - ErrSubscriptionNotFound = errors.New("subscription not found") -) - -// ErrInvalidSubscriptionURL is returned when the subscription's destination URL is invalid. -func ErrInvalidSubscriptionURL(url string) error { - return fmt.Errorf("invalid subscription URL: %s", url) -} - -var ( - // ErrUserExists is returned when creating an already existing user. - ErrUserExists = errors.New("user already exists") - - // ErrUserNotFound is returned when mutating a user that doesn't exist. - ErrUserNotFound = errors.New("user not found") - - // ErrUsernameRequired is returned when creating a user without a username. - ErrUsernameRequired = errors.New("username required") - - // ErrAuthenticate is returned when authentication fails. - ErrAuthenticate = errors.New("authentication failed") -) diff --git a/v1/services/meta/filestore/README.md b/v1/services/meta/filestore/README.md deleted file mode 100644 index 250fdeee1d3..00000000000 --- a/v1/services/meta/filestore/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# filestore - -This package provides the ability to read legacy meta.db -files by meta.Client \ No newline at end of file diff --git a/v1/services/meta/filestore/kv.go b/v1/services/meta/filestore/kv.go deleted file mode 100644 index 60ad86611eb..00000000000 --- a/v1/services/meta/filestore/kv.go +++ /dev/null @@ -1,168 +0,0 @@ -package filestore - -import ( - "context" - "io" - "os" - "path/filepath" - "sync" - - "github.com/influxdata/influxdb/v2/kv" - "github.com/influxdata/influxdb/v2/pkg/file" -) - -type KVStore struct { - mu sync.RWMutex - path string // root directory where file will be stored - bucketName string // the name of the bucket - keyName string // the name of the file - full string -} - -func New(path, bucketName, keyName string) *KVStore { - return &KVStore{path: path, bucketName: bucketName, keyName: keyName, full: filepath.Join(path, keyName)} -} - -func (s *KVStore) View(ctx context.Context, f func(kv.Tx) error) error { - return f(&Tx{kv: s, ctx: ctx}) -} - -func (s *KVStore) Update(ctx context.Context, f func(kv.Tx) error) error { - return f(&Tx{kv: s, ctx: ctx, writable: true}) -} - -func (s *KVStore) RLock() { - s.mu.RLock() -} - -func (s *KVStore) RUnlock() { - s.mu.RUnlock() -} - -func (s *KVStore) Backup(ctx context.Context, w io.Writer) error { - panic("not implemented") -} - -func (s *KVStore) Restore(ctx context.Context, r io.Reader) error { - panic("not implemented") -} - -// Tx is an in memory transaction. -// TODO: make transactions actually transactional -type Tx struct { - kv *KVStore - ctx context.Context - writable bool -} - -func (t *Tx) Bucket(b []byte) (kv.Bucket, error) { - if string(b) != t.kv.bucketName { - return nil, kv.ErrBucketNotFound - } - - return t.kv, nil -} - -func (t *Tx) Context() context.Context { - return t.ctx -} - -func (t *Tx) WithContext(ctx context.Context) { - t.ctx = ctx -} - -// region: kv.Bucket implementation - -func (s *KVStore) checkKey(key []byte) bool { - return string(key) == s.keyName -} - -func (s *KVStore) Get(key []byte) ([]byte, error) { - if !s.checkKey(key) { - return nil, kv.ErrKeyNotFound - } - - s.mu.RLock() - defer s.mu.RUnlock() - - return s.get() -} - -func (s *KVStore) GetBatch(keys ...[]byte) (values [][]byte, err error) { - s.mu.RLock() - defer s.mu.RUnlock() - - values = make([][]byte, len(keys)) - for i := range keys { - if string(keys[i]) == s.keyName { - if values[i], err = s.get(); err != nil { - return nil, err - } - } - } - - return values, nil -} - -func (s *KVStore) get() ([]byte, error) { - if d, err := os.ReadFile(s.full); os.IsNotExist(err) { - return nil, kv.ErrKeyNotFound - } else if err != nil { - return nil, err - } else { - return d, nil - } -} - -func (s *KVStore) Cursor(hints ...kv.CursorHint) (kv.Cursor, error) { - panic("not implemented") -} - -func (s *KVStore) Put(key, value []byte) error { - if !s.checkKey(key) { - return kv.ErrKeyNotFound - } - - s.mu.Lock() - defer s.mu.Unlock() - - tmpFile := s.full + "tmp" - - f, err := os.Create(tmpFile) - if err != nil { - return err - } - defer func() { _ = f.Close() }() - - if _, err := f.Write(value); err != nil { - return err - } - - if err = f.Sync(); err != nil { - return err - } - - // close file handle before renaming to support Windows - if err = f.Close(); err != nil { - return err - } - - return file.RenameFile(tmpFile, s.full) -} - -func (s *KVStore) Delete(key []byte) error { - if !s.checkKey(key) { - return kv.ErrKeyNotFound - } - - s.mu.Lock() - defer s.mu.Unlock() - - return os.Remove(s.full) -} - -func (s *KVStore) ForwardCursor(seek []byte, opts ...kv.CursorOption) (kv.ForwardCursor, error) { - panic("not implemented") -} - -// endregion diff --git a/v1/services/meta/internal/meta.pb.go b/v1/services/meta/internal/meta.pb.go deleted file mode 100644 index 0dc03ad46d7..00000000000 --- a/v1/services/meta/internal/meta.pb.go +++ /dev/null @@ -1,4250 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.27.1 -// protoc v3.17.3 -// source: internal/meta.proto - -package meta - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Command_Type int32 - -const ( - Command_CreateNodeCommand Command_Type = 1 - Command_DeleteNodeCommand Command_Type = 2 - Command_CreateDatabaseCommand Command_Type = 3 - Command_DropDatabaseCommand Command_Type = 4 - Command_CreateRetentionPolicyCommand Command_Type = 5 - Command_DropRetentionPolicyCommand Command_Type = 6 - Command_SetDefaultRetentionPolicyCommand Command_Type = 7 - Command_UpdateRetentionPolicyCommand Command_Type = 8 - Command_CreateShardGroupCommand Command_Type = 9 - Command_DeleteShardGroupCommand Command_Type = 10 - Command_CreateContinuousQueryCommand Command_Type = 11 - Command_DropContinuousQueryCommand Command_Type = 12 - Command_CreateUserCommand Command_Type = 13 - Command_DropUserCommand Command_Type = 14 - Command_UpdateUserCommand Command_Type = 15 - Command_SetPrivilegeCommand Command_Type = 16 - Command_SetDataCommand Command_Type = 17 - Command_SetAdminPrivilegeCommand Command_Type = 18 - Command_UpdateNodeCommand Command_Type = 19 - Command_CreateSubscriptionCommand Command_Type = 21 - Command_DropSubscriptionCommand Command_Type = 22 - Command_RemovePeerCommand Command_Type = 23 - Command_CreateMetaNodeCommand Command_Type = 24 - Command_CreateDataNodeCommand Command_Type = 25 - Command_UpdateDataNodeCommand Command_Type = 26 - Command_DeleteMetaNodeCommand Command_Type = 27 - Command_DeleteDataNodeCommand Command_Type = 28 - Command_SetMetaNodeCommand Command_Type = 29 - Command_DropShardCommand Command_Type = 30 -) - -// Enum value maps for Command_Type. -var ( - Command_Type_name = map[int32]string{ - 1: "CreateNodeCommand", - 2: "DeleteNodeCommand", - 3: "CreateDatabaseCommand", - 4: "DropDatabaseCommand", - 5: "CreateRetentionPolicyCommand", - 6: "DropRetentionPolicyCommand", - 7: "SetDefaultRetentionPolicyCommand", - 8: "UpdateRetentionPolicyCommand", - 9: "CreateShardGroupCommand", - 10: "DeleteShardGroupCommand", - 11: "CreateContinuousQueryCommand", - 12: "DropContinuousQueryCommand", - 13: "CreateUserCommand", - 14: "DropUserCommand", - 15: "UpdateUserCommand", - 16: "SetPrivilegeCommand", - 17: "SetDataCommand", - 18: "SetAdminPrivilegeCommand", - 19: "UpdateNodeCommand", - 21: "CreateSubscriptionCommand", - 22: "DropSubscriptionCommand", - 23: "RemovePeerCommand", - 24: "CreateMetaNodeCommand", - 25: "CreateDataNodeCommand", - 26: "UpdateDataNodeCommand", - 27: "DeleteMetaNodeCommand", - 28: "DeleteDataNodeCommand", - 29: "SetMetaNodeCommand", - 30: "DropShardCommand", - } - Command_Type_value = map[string]int32{ - "CreateNodeCommand": 1, - "DeleteNodeCommand": 2, - "CreateDatabaseCommand": 3, - "DropDatabaseCommand": 4, - "CreateRetentionPolicyCommand": 5, - "DropRetentionPolicyCommand": 6, - "SetDefaultRetentionPolicyCommand": 7, - "UpdateRetentionPolicyCommand": 8, - "CreateShardGroupCommand": 9, - "DeleteShardGroupCommand": 10, - "CreateContinuousQueryCommand": 11, - "DropContinuousQueryCommand": 12, - "CreateUserCommand": 13, - "DropUserCommand": 14, - "UpdateUserCommand": 15, - "SetPrivilegeCommand": 16, - "SetDataCommand": 17, - "SetAdminPrivilegeCommand": 18, - "UpdateNodeCommand": 19, - "CreateSubscriptionCommand": 21, - "DropSubscriptionCommand": 22, - "RemovePeerCommand": 23, - "CreateMetaNodeCommand": 24, - "CreateDataNodeCommand": 25, - "UpdateDataNodeCommand": 26, - "DeleteMetaNodeCommand": 27, - "DeleteDataNodeCommand": 28, - "SetMetaNodeCommand": 29, - "DropShardCommand": 30, - } -) - -func (x Command_Type) Enum() *Command_Type { - p := new(Command_Type) - *p = x - return p -} - -func (x Command_Type) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Command_Type) Descriptor() protoreflect.EnumDescriptor { - return file_internal_meta_proto_enumTypes[0].Descriptor() -} - -func (Command_Type) Type() protoreflect.EnumType { - return &file_internal_meta_proto_enumTypes[0] -} - -func (x Command_Type) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *Command_Type) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = Command_Type(num) - return nil -} - -// Deprecated: Use Command_Type.Descriptor instead. -func (Command_Type) EnumDescriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{12, 0} -} - -type Data struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Term *uint64 `protobuf:"varint,1,req,name=Term" json:"Term,omitempty"` - Index *uint64 `protobuf:"varint,2,req,name=Index" json:"Index,omitempty"` - ClusterID *uint64 `protobuf:"varint,3,req,name=ClusterID" json:"ClusterID,omitempty"` - Nodes []*NodeInfo `protobuf:"bytes,4,rep,name=Nodes" json:"Nodes,omitempty"` - Databases []*DatabaseInfo `protobuf:"bytes,5,rep,name=Databases" json:"Databases,omitempty"` - Users []*UserInfo `protobuf:"bytes,6,rep,name=Users" json:"Users,omitempty"` - MaxNodeID *uint64 `protobuf:"varint,7,req,name=MaxNodeID" json:"MaxNodeID,omitempty"` - MaxShardGroupID *uint64 `protobuf:"varint,8,req,name=MaxShardGroupID" json:"MaxShardGroupID,omitempty"` - MaxShardID *uint64 `protobuf:"varint,9,req,name=MaxShardID" json:"MaxShardID,omitempty"` - // added for 0.10.0 - DataNodes []*NodeInfo `protobuf:"bytes,10,rep,name=DataNodes" json:"DataNodes,omitempty"` - MetaNodes []*NodeInfo `protobuf:"bytes,11,rep,name=MetaNodes" json:"MetaNodes,omitempty"` -} - -func (x *Data) Reset() { - *x = Data{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Data) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Data) ProtoMessage() {} - -func (x *Data) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Data.ProtoReflect.Descriptor instead. -func (*Data) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{0} -} - -func (x *Data) GetTerm() uint64 { - if x != nil && x.Term != nil { - return *x.Term - } - return 0 -} - -func (x *Data) GetIndex() uint64 { - if x != nil && x.Index != nil { - return *x.Index - } - return 0 -} - -func (x *Data) GetClusterID() uint64 { - if x != nil && x.ClusterID != nil { - return *x.ClusterID - } - return 0 -} - -func (x *Data) GetNodes() []*NodeInfo { - if x != nil { - return x.Nodes - } - return nil -} - -func (x *Data) GetDatabases() []*DatabaseInfo { - if x != nil { - return x.Databases - } - return nil -} - -func (x *Data) GetUsers() []*UserInfo { - if x != nil { - return x.Users - } - return nil -} - -func (x *Data) GetMaxNodeID() uint64 { - if x != nil && x.MaxNodeID != nil { - return *x.MaxNodeID - } - return 0 -} - -func (x *Data) GetMaxShardGroupID() uint64 { - if x != nil && x.MaxShardGroupID != nil { - return *x.MaxShardGroupID - } - return 0 -} - -func (x *Data) GetMaxShardID() uint64 { - if x != nil && x.MaxShardID != nil { - return *x.MaxShardID - } - return 0 -} - -func (x *Data) GetDataNodes() []*NodeInfo { - if x != nil { - return x.DataNodes - } - return nil -} - -func (x *Data) GetMetaNodes() []*NodeInfo { - if x != nil { - return x.MetaNodes - } - return nil -} - -type NodeInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"` - Host *string `protobuf:"bytes,2,req,name=Host" json:"Host,omitempty"` - TCPHost *string `protobuf:"bytes,3,opt,name=TCPHost" json:"TCPHost,omitempty"` -} - -func (x *NodeInfo) Reset() { - *x = NodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NodeInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NodeInfo) ProtoMessage() {} - -func (x *NodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NodeInfo.ProtoReflect.Descriptor instead. -func (*NodeInfo) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{1} -} - -func (x *NodeInfo) GetID() uint64 { - if x != nil && x.ID != nil { - return *x.ID - } - return 0 -} - -func (x *NodeInfo) GetHost() string { - if x != nil && x.Host != nil { - return *x.Host - } - return "" -} - -func (x *NodeInfo) GetTCPHost() string { - if x != nil && x.TCPHost != nil { - return *x.TCPHost - } - return "" -} - -type DatabaseInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` - DefaultRetentionPolicy *string `protobuf:"bytes,2,req,name=DefaultRetentionPolicy" json:"DefaultRetentionPolicy,omitempty"` - RetentionPolicies []*RetentionPolicyInfo `protobuf:"bytes,3,rep,name=RetentionPolicies" json:"RetentionPolicies,omitempty"` - ContinuousQueries []*ContinuousQueryInfo `protobuf:"bytes,4,rep,name=ContinuousQueries" json:"ContinuousQueries,omitempty"` -} - -func (x *DatabaseInfo) Reset() { - *x = DatabaseInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DatabaseInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DatabaseInfo) ProtoMessage() {} - -func (x *DatabaseInfo) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DatabaseInfo.ProtoReflect.Descriptor instead. -func (*DatabaseInfo) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{2} -} - -func (x *DatabaseInfo) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *DatabaseInfo) GetDefaultRetentionPolicy() string { - if x != nil && x.DefaultRetentionPolicy != nil { - return *x.DefaultRetentionPolicy - } - return "" -} - -func (x *DatabaseInfo) GetRetentionPolicies() []*RetentionPolicyInfo { - if x != nil { - return x.RetentionPolicies - } - return nil -} - -func (x *DatabaseInfo) GetContinuousQueries() []*ContinuousQueryInfo { - if x != nil { - return x.ContinuousQueries - } - return nil -} - -type RetentionPolicySpec struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=Name" json:"Name,omitempty"` - Duration *int64 `protobuf:"varint,2,opt,name=Duration" json:"Duration,omitempty"` - ShardGroupDuration *int64 `protobuf:"varint,3,opt,name=ShardGroupDuration" json:"ShardGroupDuration,omitempty"` - ReplicaN *uint32 `protobuf:"varint,4,opt,name=ReplicaN" json:"ReplicaN,omitempty"` -} - -func (x *RetentionPolicySpec) Reset() { - *x = RetentionPolicySpec{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RetentionPolicySpec) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RetentionPolicySpec) ProtoMessage() {} - -func (x *RetentionPolicySpec) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RetentionPolicySpec.ProtoReflect.Descriptor instead. -func (*RetentionPolicySpec) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{3} -} - -func (x *RetentionPolicySpec) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *RetentionPolicySpec) GetDuration() int64 { - if x != nil && x.Duration != nil { - return *x.Duration - } - return 0 -} - -func (x *RetentionPolicySpec) GetShardGroupDuration() int64 { - if x != nil && x.ShardGroupDuration != nil { - return *x.ShardGroupDuration - } - return 0 -} - -func (x *RetentionPolicySpec) GetReplicaN() uint32 { - if x != nil && x.ReplicaN != nil { - return *x.ReplicaN - } - return 0 -} - -type RetentionPolicyInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` - Duration *int64 `protobuf:"varint,2,req,name=Duration" json:"Duration,omitempty"` - ShardGroupDuration *int64 `protobuf:"varint,3,req,name=ShardGroupDuration" json:"ShardGroupDuration,omitempty"` - ReplicaN *uint32 `protobuf:"varint,4,req,name=ReplicaN" json:"ReplicaN,omitempty"` - ShardGroups []*ShardGroupInfo `protobuf:"bytes,5,rep,name=ShardGroups" json:"ShardGroups,omitempty"` - Subscriptions []*SubscriptionInfo `protobuf:"bytes,6,rep,name=Subscriptions" json:"Subscriptions,omitempty"` -} - -func (x *RetentionPolicyInfo) Reset() { - *x = RetentionPolicyInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RetentionPolicyInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RetentionPolicyInfo) ProtoMessage() {} - -func (x *RetentionPolicyInfo) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RetentionPolicyInfo.ProtoReflect.Descriptor instead. -func (*RetentionPolicyInfo) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{4} -} - -func (x *RetentionPolicyInfo) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *RetentionPolicyInfo) GetDuration() int64 { - if x != nil && x.Duration != nil { - return *x.Duration - } - return 0 -} - -func (x *RetentionPolicyInfo) GetShardGroupDuration() int64 { - if x != nil && x.ShardGroupDuration != nil { - return *x.ShardGroupDuration - } - return 0 -} - -func (x *RetentionPolicyInfo) GetReplicaN() uint32 { - if x != nil && x.ReplicaN != nil { - return *x.ReplicaN - } - return 0 -} - -func (x *RetentionPolicyInfo) GetShardGroups() []*ShardGroupInfo { - if x != nil { - return x.ShardGroups - } - return nil -} - -func (x *RetentionPolicyInfo) GetSubscriptions() []*SubscriptionInfo { - if x != nil { - return x.Subscriptions - } - return nil -} - -type ShardGroupInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"` - StartTime *int64 `protobuf:"varint,2,req,name=StartTime" json:"StartTime,omitempty"` - EndTime *int64 `protobuf:"varint,3,req,name=EndTime" json:"EndTime,omitempty"` - DeletedAt *int64 `protobuf:"varint,4,req,name=DeletedAt" json:"DeletedAt,omitempty"` - Shards []*ShardInfo `protobuf:"bytes,5,rep,name=Shards" json:"Shards,omitempty"` - TruncatedAt *int64 `protobuf:"varint,6,opt,name=TruncatedAt" json:"TruncatedAt,omitempty"` -} - -func (x *ShardGroupInfo) Reset() { - *x = ShardGroupInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ShardGroupInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ShardGroupInfo) ProtoMessage() {} - -func (x *ShardGroupInfo) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ShardGroupInfo.ProtoReflect.Descriptor instead. -func (*ShardGroupInfo) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{5} -} - -func (x *ShardGroupInfo) GetID() uint64 { - if x != nil && x.ID != nil { - return *x.ID - } - return 0 -} - -func (x *ShardGroupInfo) GetStartTime() int64 { - if x != nil && x.StartTime != nil { - return *x.StartTime - } - return 0 -} - -func (x *ShardGroupInfo) GetEndTime() int64 { - if x != nil && x.EndTime != nil { - return *x.EndTime - } - return 0 -} - -func (x *ShardGroupInfo) GetDeletedAt() int64 { - if x != nil && x.DeletedAt != nil { - return *x.DeletedAt - } - return 0 -} - -func (x *ShardGroupInfo) GetShards() []*ShardInfo { - if x != nil { - return x.Shards - } - return nil -} - -func (x *ShardGroupInfo) GetTruncatedAt() int64 { - if x != nil && x.TruncatedAt != nil { - return *x.TruncatedAt - } - return 0 -} - -type ShardInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"` - // Deprecated: Do not use. - OwnerIDs []uint64 `protobuf:"varint,2,rep,name=OwnerIDs" json:"OwnerIDs,omitempty"` - Owners []*ShardOwner `protobuf:"bytes,3,rep,name=Owners" json:"Owners,omitempty"` -} - -func (x *ShardInfo) Reset() { - *x = ShardInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ShardInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ShardInfo) ProtoMessage() {} - -func (x *ShardInfo) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ShardInfo.ProtoReflect.Descriptor instead. -func (*ShardInfo) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{6} -} - -func (x *ShardInfo) GetID() uint64 { - if x != nil && x.ID != nil { - return *x.ID - } - return 0 -} - -// Deprecated: Do not use. -func (x *ShardInfo) GetOwnerIDs() []uint64 { - if x != nil { - return x.OwnerIDs - } - return nil -} - -func (x *ShardInfo) GetOwners() []*ShardOwner { - if x != nil { - return x.Owners - } - return nil -} - -type SubscriptionInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` - Mode *string `protobuf:"bytes,2,req,name=Mode" json:"Mode,omitempty"` - Destinations []string `protobuf:"bytes,3,rep,name=Destinations" json:"Destinations,omitempty"` -} - -func (x *SubscriptionInfo) Reset() { - *x = SubscriptionInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SubscriptionInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubscriptionInfo) ProtoMessage() {} - -func (x *SubscriptionInfo) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubscriptionInfo.ProtoReflect.Descriptor instead. -func (*SubscriptionInfo) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{7} -} - -func (x *SubscriptionInfo) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *SubscriptionInfo) GetMode() string { - if x != nil && x.Mode != nil { - return *x.Mode - } - return "" -} - -func (x *SubscriptionInfo) GetDestinations() []string { - if x != nil { - return x.Destinations - } - return nil -} - -type ShardOwner struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NodeID *uint64 `protobuf:"varint,1,req,name=NodeID" json:"NodeID,omitempty"` -} - -func (x *ShardOwner) Reset() { - *x = ShardOwner{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ShardOwner) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ShardOwner) ProtoMessage() {} - -func (x *ShardOwner) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ShardOwner.ProtoReflect.Descriptor instead. -func (*ShardOwner) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{8} -} - -func (x *ShardOwner) GetNodeID() uint64 { - if x != nil && x.NodeID != nil { - return *x.NodeID - } - return 0 -} - -type ContinuousQueryInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` - Query *string `protobuf:"bytes,2,req,name=Query" json:"Query,omitempty"` -} - -func (x *ContinuousQueryInfo) Reset() { - *x = ContinuousQueryInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ContinuousQueryInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ContinuousQueryInfo) ProtoMessage() {} - -func (x *ContinuousQueryInfo) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ContinuousQueryInfo.ProtoReflect.Descriptor instead. -func (*ContinuousQueryInfo) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{9} -} - -func (x *ContinuousQueryInfo) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *ContinuousQueryInfo) GetQuery() string { - if x != nil && x.Query != nil { - return *x.Query - } - return "" -} - -type UserInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` - Hash *string `protobuf:"bytes,2,req,name=Hash" json:"Hash,omitempty"` - Admin *bool `protobuf:"varint,3,req,name=Admin" json:"Admin,omitempty"` - Privileges []*UserPrivilege `protobuf:"bytes,4,rep,name=Privileges" json:"Privileges,omitempty"` -} - -func (x *UserInfo) Reset() { - *x = UserInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UserInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UserInfo) ProtoMessage() {} - -func (x *UserInfo) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UserInfo.ProtoReflect.Descriptor instead. -func (*UserInfo) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{10} -} - -func (x *UserInfo) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *UserInfo) GetHash() string { - if x != nil && x.Hash != nil { - return *x.Hash - } - return "" -} - -func (x *UserInfo) GetAdmin() bool { - if x != nil && x.Admin != nil { - return *x.Admin - } - return false -} - -func (x *UserInfo) GetPrivileges() []*UserPrivilege { - if x != nil { - return x.Privileges - } - return nil -} - -type UserPrivilege struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"` - Privilege *int32 `protobuf:"varint,2,req,name=Privilege" json:"Privilege,omitempty"` -} - -func (x *UserPrivilege) Reset() { - *x = UserPrivilege{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UserPrivilege) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UserPrivilege) ProtoMessage() {} - -func (x *UserPrivilege) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UserPrivilege.ProtoReflect.Descriptor instead. -func (*UserPrivilege) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{11} -} - -func (x *UserPrivilege) GetDatabase() string { - if x != nil && x.Database != nil { - return *x.Database - } - return "" -} - -func (x *UserPrivilege) GetPrivilege() int32 { - if x != nil && x.Privilege != nil { - return *x.Privilege - } - return 0 -} - -type Command struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - - Type *Command_Type `protobuf:"varint,1,req,name=type,enum=meta.Command_Type" json:"type,omitempty"` -} - -func (x *Command) Reset() { - *x = Command{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Command) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Command) ProtoMessage() {} - -func (x *Command) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Command.ProtoReflect.Descriptor instead. -func (*Command) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{12} -} - -func (x *Command) GetType() Command_Type { - if x != nil && x.Type != nil { - return *x.Type - } - return Command_CreateNodeCommand -} - -// This isn't used in >= 0.10.0. Kept around for upgrade purposes. Instead -// look at CreateDataNodeCommand and CreateMetaNodeCommand -type CreateNodeCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Host *string `protobuf:"bytes,1,req,name=Host" json:"Host,omitempty"` - Rand *uint64 `protobuf:"varint,2,req,name=Rand" json:"Rand,omitempty"` -} - -func (x *CreateNodeCommand) Reset() { - *x = CreateNodeCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateNodeCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateNodeCommand) ProtoMessage() {} - -func (x *CreateNodeCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateNodeCommand.ProtoReflect.Descriptor instead. -func (*CreateNodeCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{13} -} - -func (x *CreateNodeCommand) GetHost() string { - if x != nil && x.Host != nil { - return *x.Host - } - return "" -} - -func (x *CreateNodeCommand) GetRand() uint64 { - if x != nil && x.Rand != nil { - return *x.Rand - } - return 0 -} - -type DeleteNodeCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"` - Force *bool `protobuf:"varint,2,req,name=Force" json:"Force,omitempty"` -} - -func (x *DeleteNodeCommand) Reset() { - *x = DeleteNodeCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteNodeCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteNodeCommand) ProtoMessage() {} - -func (x *DeleteNodeCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteNodeCommand.ProtoReflect.Descriptor instead. -func (*DeleteNodeCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{14} -} - -func (x *DeleteNodeCommand) GetID() uint64 { - if x != nil && x.ID != nil { - return *x.ID - } - return 0 -} - -func (x *DeleteNodeCommand) GetForce() bool { - if x != nil && x.Force != nil { - return *x.Force - } - return false -} - -type CreateDatabaseCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` - RetentionPolicy *RetentionPolicyInfo `protobuf:"bytes,2,opt,name=RetentionPolicy" json:"RetentionPolicy,omitempty"` -} - -func (x *CreateDatabaseCommand) Reset() { - *x = CreateDatabaseCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateDatabaseCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateDatabaseCommand) ProtoMessage() {} - -func (x *CreateDatabaseCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateDatabaseCommand.ProtoReflect.Descriptor instead. -func (*CreateDatabaseCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{15} -} - -func (x *CreateDatabaseCommand) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *CreateDatabaseCommand) GetRetentionPolicy() *RetentionPolicyInfo { - if x != nil { - return x.RetentionPolicy - } - return nil -} - -type DropDatabaseCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` -} - -func (x *DropDatabaseCommand) Reset() { - *x = DropDatabaseCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DropDatabaseCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DropDatabaseCommand) ProtoMessage() {} - -func (x *DropDatabaseCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DropDatabaseCommand.ProtoReflect.Descriptor instead. -func (*DropDatabaseCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{16} -} - -func (x *DropDatabaseCommand) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -type CreateRetentionPolicyCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"` - RetentionPolicy *RetentionPolicyInfo `protobuf:"bytes,2,req,name=RetentionPolicy" json:"RetentionPolicy,omitempty"` -} - -func (x *CreateRetentionPolicyCommand) Reset() { - *x = CreateRetentionPolicyCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateRetentionPolicyCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateRetentionPolicyCommand) ProtoMessage() {} - -func (x *CreateRetentionPolicyCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateRetentionPolicyCommand.ProtoReflect.Descriptor instead. -func (*CreateRetentionPolicyCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{17} -} - -func (x *CreateRetentionPolicyCommand) GetDatabase() string { - if x != nil && x.Database != nil { - return *x.Database - } - return "" -} - -func (x *CreateRetentionPolicyCommand) GetRetentionPolicy() *RetentionPolicyInfo { - if x != nil { - return x.RetentionPolicy - } - return nil -} - -type DropRetentionPolicyCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"` - Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"` -} - -func (x *DropRetentionPolicyCommand) Reset() { - *x = DropRetentionPolicyCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DropRetentionPolicyCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DropRetentionPolicyCommand) ProtoMessage() {} - -func (x *DropRetentionPolicyCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DropRetentionPolicyCommand.ProtoReflect.Descriptor instead. -func (*DropRetentionPolicyCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{18} -} - -func (x *DropRetentionPolicyCommand) GetDatabase() string { - if x != nil && x.Database != nil { - return *x.Database - } - return "" -} - -func (x *DropRetentionPolicyCommand) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -type SetDefaultRetentionPolicyCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"` - Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"` -} - -func (x *SetDefaultRetentionPolicyCommand) Reset() { - *x = SetDefaultRetentionPolicyCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetDefaultRetentionPolicyCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetDefaultRetentionPolicyCommand) ProtoMessage() {} - -func (x *SetDefaultRetentionPolicyCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetDefaultRetentionPolicyCommand.ProtoReflect.Descriptor instead. -func (*SetDefaultRetentionPolicyCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{19} -} - -func (x *SetDefaultRetentionPolicyCommand) GetDatabase() string { - if x != nil && x.Database != nil { - return *x.Database - } - return "" -} - -func (x *SetDefaultRetentionPolicyCommand) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -type UpdateRetentionPolicyCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"` - Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"` - NewName *string `protobuf:"bytes,3,opt,name=NewName" json:"NewName,omitempty"` - Duration *int64 `protobuf:"varint,4,opt,name=Duration" json:"Duration,omitempty"` - ReplicaN *uint32 `protobuf:"varint,5,opt,name=ReplicaN" json:"ReplicaN,omitempty"` -} - -func (x *UpdateRetentionPolicyCommand) Reset() { - *x = UpdateRetentionPolicyCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateRetentionPolicyCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateRetentionPolicyCommand) ProtoMessage() {} - -func (x *UpdateRetentionPolicyCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateRetentionPolicyCommand.ProtoReflect.Descriptor instead. -func (*UpdateRetentionPolicyCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{20} -} - -func (x *UpdateRetentionPolicyCommand) GetDatabase() string { - if x != nil && x.Database != nil { - return *x.Database - } - return "" -} - -func (x *UpdateRetentionPolicyCommand) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *UpdateRetentionPolicyCommand) GetNewName() string { - if x != nil && x.NewName != nil { - return *x.NewName - } - return "" -} - -func (x *UpdateRetentionPolicyCommand) GetDuration() int64 { - if x != nil && x.Duration != nil { - return *x.Duration - } - return 0 -} - -func (x *UpdateRetentionPolicyCommand) GetReplicaN() uint32 { - if x != nil && x.ReplicaN != nil { - return *x.ReplicaN - } - return 0 -} - -type CreateShardGroupCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"` - Policy *string `protobuf:"bytes,2,req,name=Policy" json:"Policy,omitempty"` - Timestamp *int64 `protobuf:"varint,3,req,name=Timestamp" json:"Timestamp,omitempty"` -} - -func (x *CreateShardGroupCommand) Reset() { - *x = CreateShardGroupCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateShardGroupCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateShardGroupCommand) ProtoMessage() {} - -func (x *CreateShardGroupCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateShardGroupCommand.ProtoReflect.Descriptor instead. -func (*CreateShardGroupCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{21} -} - -func (x *CreateShardGroupCommand) GetDatabase() string { - if x != nil && x.Database != nil { - return *x.Database - } - return "" -} - -func (x *CreateShardGroupCommand) GetPolicy() string { - if x != nil && x.Policy != nil { - return *x.Policy - } - return "" -} - -func (x *CreateShardGroupCommand) GetTimestamp() int64 { - if x != nil && x.Timestamp != nil { - return *x.Timestamp - } - return 0 -} - -type DeleteShardGroupCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"` - Policy *string `protobuf:"bytes,2,req,name=Policy" json:"Policy,omitempty"` - ShardGroupID *uint64 `protobuf:"varint,3,req,name=ShardGroupID" json:"ShardGroupID,omitempty"` -} - -func (x *DeleteShardGroupCommand) Reset() { - *x = DeleteShardGroupCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteShardGroupCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteShardGroupCommand) ProtoMessage() {} - -func (x *DeleteShardGroupCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteShardGroupCommand.ProtoReflect.Descriptor instead. -func (*DeleteShardGroupCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{22} -} - -func (x *DeleteShardGroupCommand) GetDatabase() string { - if x != nil && x.Database != nil { - return *x.Database - } - return "" -} - -func (x *DeleteShardGroupCommand) GetPolicy() string { - if x != nil && x.Policy != nil { - return *x.Policy - } - return "" -} - -func (x *DeleteShardGroupCommand) GetShardGroupID() uint64 { - if x != nil && x.ShardGroupID != nil { - return *x.ShardGroupID - } - return 0 -} - -type CreateContinuousQueryCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"` - Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"` - Query *string `protobuf:"bytes,3,req,name=Query" json:"Query,omitempty"` -} - -func (x *CreateContinuousQueryCommand) Reset() { - *x = CreateContinuousQueryCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateContinuousQueryCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateContinuousQueryCommand) ProtoMessage() {} - -func (x *CreateContinuousQueryCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateContinuousQueryCommand.ProtoReflect.Descriptor instead. -func (*CreateContinuousQueryCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{23} -} - -func (x *CreateContinuousQueryCommand) GetDatabase() string { - if x != nil && x.Database != nil { - return *x.Database - } - return "" -} - -func (x *CreateContinuousQueryCommand) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *CreateContinuousQueryCommand) GetQuery() string { - if x != nil && x.Query != nil { - return *x.Query - } - return "" -} - -type DropContinuousQueryCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Database *string `protobuf:"bytes,1,req,name=Database" json:"Database,omitempty"` - Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"` -} - -func (x *DropContinuousQueryCommand) Reset() { - *x = DropContinuousQueryCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DropContinuousQueryCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DropContinuousQueryCommand) ProtoMessage() {} - -func (x *DropContinuousQueryCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DropContinuousQueryCommand.ProtoReflect.Descriptor instead. -func (*DropContinuousQueryCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{24} -} - -func (x *DropContinuousQueryCommand) GetDatabase() string { - if x != nil && x.Database != nil { - return *x.Database - } - return "" -} - -func (x *DropContinuousQueryCommand) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -type CreateUserCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` - Hash *string `protobuf:"bytes,2,req,name=Hash" json:"Hash,omitempty"` - Admin *bool `protobuf:"varint,3,req,name=Admin" json:"Admin,omitempty"` -} - -func (x *CreateUserCommand) Reset() { - *x = CreateUserCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateUserCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateUserCommand) ProtoMessage() {} - -func (x *CreateUserCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateUserCommand.ProtoReflect.Descriptor instead. -func (*CreateUserCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{25} -} - -func (x *CreateUserCommand) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *CreateUserCommand) GetHash() string { - if x != nil && x.Hash != nil { - return *x.Hash - } - return "" -} - -func (x *CreateUserCommand) GetAdmin() bool { - if x != nil && x.Admin != nil { - return *x.Admin - } - return false -} - -type DropUserCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` -} - -func (x *DropUserCommand) Reset() { - *x = DropUserCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DropUserCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DropUserCommand) ProtoMessage() {} - -func (x *DropUserCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DropUserCommand.ProtoReflect.Descriptor instead. -func (*DropUserCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{26} -} - -func (x *DropUserCommand) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -type UpdateUserCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` - Hash *string `protobuf:"bytes,2,req,name=Hash" json:"Hash,omitempty"` -} - -func (x *UpdateUserCommand) Reset() { - *x = UpdateUserCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateUserCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateUserCommand) ProtoMessage() {} - -func (x *UpdateUserCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateUserCommand.ProtoReflect.Descriptor instead. -func (*UpdateUserCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{27} -} - -func (x *UpdateUserCommand) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *UpdateUserCommand) GetHash() string { - if x != nil && x.Hash != nil { - return *x.Hash - } - return "" -} - -type SetPrivilegeCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Username *string `protobuf:"bytes,1,req,name=Username" json:"Username,omitempty"` - Database *string `protobuf:"bytes,2,req,name=Database" json:"Database,omitempty"` - Privilege *int32 `protobuf:"varint,3,req,name=Privilege" json:"Privilege,omitempty"` -} - -func (x *SetPrivilegeCommand) Reset() { - *x = SetPrivilegeCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetPrivilegeCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetPrivilegeCommand) ProtoMessage() {} - -func (x *SetPrivilegeCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetPrivilegeCommand.ProtoReflect.Descriptor instead. -func (*SetPrivilegeCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{28} -} - -func (x *SetPrivilegeCommand) GetUsername() string { - if x != nil && x.Username != nil { - return *x.Username - } - return "" -} - -func (x *SetPrivilegeCommand) GetDatabase() string { - if x != nil && x.Database != nil { - return *x.Database - } - return "" -} - -func (x *SetPrivilegeCommand) GetPrivilege() int32 { - if x != nil && x.Privilege != nil { - return *x.Privilege - } - return 0 -} - -type SetDataCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Data *Data `protobuf:"bytes,1,req,name=Data" json:"Data,omitempty"` -} - -func (x *SetDataCommand) Reset() { - *x = SetDataCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetDataCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetDataCommand) ProtoMessage() {} - -func (x *SetDataCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetDataCommand.ProtoReflect.Descriptor instead. -func (*SetDataCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{29} -} - -func (x *SetDataCommand) GetData() *Data { - if x != nil { - return x.Data - } - return nil -} - -type SetAdminPrivilegeCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Username *string `protobuf:"bytes,1,req,name=Username" json:"Username,omitempty"` - Admin *bool `protobuf:"varint,2,req,name=Admin" json:"Admin,omitempty"` -} - -func (x *SetAdminPrivilegeCommand) Reset() { - *x = SetAdminPrivilegeCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetAdminPrivilegeCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetAdminPrivilegeCommand) ProtoMessage() {} - -func (x *SetAdminPrivilegeCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetAdminPrivilegeCommand.ProtoReflect.Descriptor instead. -func (*SetAdminPrivilegeCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{30} -} - -func (x *SetAdminPrivilegeCommand) GetUsername() string { - if x != nil && x.Username != nil { - return *x.Username - } - return "" -} - -func (x *SetAdminPrivilegeCommand) GetAdmin() bool { - if x != nil && x.Admin != nil { - return *x.Admin - } - return false -} - -type UpdateNodeCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"` - Host *string `protobuf:"bytes,2,req,name=Host" json:"Host,omitempty"` -} - -func (x *UpdateNodeCommand) Reset() { - *x = UpdateNodeCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateNodeCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateNodeCommand) ProtoMessage() {} - -func (x *UpdateNodeCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateNodeCommand.ProtoReflect.Descriptor instead. -func (*UpdateNodeCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{31} -} - -func (x *UpdateNodeCommand) GetID() uint64 { - if x != nil && x.ID != nil { - return *x.ID - } - return 0 -} - -func (x *UpdateNodeCommand) GetHost() string { - if x != nil && x.Host != nil { - return *x.Host - } - return "" -} - -type CreateSubscriptionCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` - Database *string `protobuf:"bytes,2,req,name=Database" json:"Database,omitempty"` - RetentionPolicy *string `protobuf:"bytes,3,req,name=RetentionPolicy" json:"RetentionPolicy,omitempty"` - Mode *string `protobuf:"bytes,4,req,name=Mode" json:"Mode,omitempty"` - Destinations []string `protobuf:"bytes,5,rep,name=Destinations" json:"Destinations,omitempty"` -} - -func (x *CreateSubscriptionCommand) Reset() { - *x = CreateSubscriptionCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateSubscriptionCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateSubscriptionCommand) ProtoMessage() {} - -func (x *CreateSubscriptionCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateSubscriptionCommand.ProtoReflect.Descriptor instead. -func (*CreateSubscriptionCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{32} -} - -func (x *CreateSubscriptionCommand) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *CreateSubscriptionCommand) GetDatabase() string { - if x != nil && x.Database != nil { - return *x.Database - } - return "" -} - -func (x *CreateSubscriptionCommand) GetRetentionPolicy() string { - if x != nil && x.RetentionPolicy != nil { - return *x.RetentionPolicy - } - return "" -} - -func (x *CreateSubscriptionCommand) GetMode() string { - if x != nil && x.Mode != nil { - return *x.Mode - } - return "" -} - -func (x *CreateSubscriptionCommand) GetDestinations() []string { - if x != nil { - return x.Destinations - } - return nil -} - -type DropSubscriptionCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,req,name=Name" json:"Name,omitempty"` - Database *string `protobuf:"bytes,2,req,name=Database" json:"Database,omitempty"` - RetentionPolicy *string `protobuf:"bytes,3,req,name=RetentionPolicy" json:"RetentionPolicy,omitempty"` -} - -func (x *DropSubscriptionCommand) Reset() { - *x = DropSubscriptionCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DropSubscriptionCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DropSubscriptionCommand) ProtoMessage() {} - -func (x *DropSubscriptionCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DropSubscriptionCommand.ProtoReflect.Descriptor instead. -func (*DropSubscriptionCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{33} -} - -func (x *DropSubscriptionCommand) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *DropSubscriptionCommand) GetDatabase() string { - if x != nil && x.Database != nil { - return *x.Database - } - return "" -} - -func (x *DropSubscriptionCommand) GetRetentionPolicy() string { - if x != nil && x.RetentionPolicy != nil { - return *x.RetentionPolicy - } - return "" -} - -type RemovePeerCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ID *uint64 `protobuf:"varint,1,opt,name=ID" json:"ID,omitempty"` - Addr *string `protobuf:"bytes,2,req,name=Addr" json:"Addr,omitempty"` -} - -func (x *RemovePeerCommand) Reset() { - *x = RemovePeerCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemovePeerCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemovePeerCommand) ProtoMessage() {} - -func (x *RemovePeerCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemovePeerCommand.ProtoReflect.Descriptor instead. -func (*RemovePeerCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{34} -} - -func (x *RemovePeerCommand) GetID() uint64 { - if x != nil && x.ID != nil { - return *x.ID - } - return 0 -} - -func (x *RemovePeerCommand) GetAddr() string { - if x != nil && x.Addr != nil { - return *x.Addr - } - return "" -} - -type CreateMetaNodeCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - HTTPAddr *string `protobuf:"bytes,1,req,name=HTTPAddr" json:"HTTPAddr,omitempty"` - TCPAddr *string `protobuf:"bytes,2,req,name=TCPAddr" json:"TCPAddr,omitempty"` - Rand *uint64 `protobuf:"varint,3,req,name=Rand" json:"Rand,omitempty"` -} - -func (x *CreateMetaNodeCommand) Reset() { - *x = CreateMetaNodeCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateMetaNodeCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateMetaNodeCommand) ProtoMessage() {} - -func (x *CreateMetaNodeCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateMetaNodeCommand.ProtoReflect.Descriptor instead. -func (*CreateMetaNodeCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{35} -} - -func (x *CreateMetaNodeCommand) GetHTTPAddr() string { - if x != nil && x.HTTPAddr != nil { - return *x.HTTPAddr - } - return "" -} - -func (x *CreateMetaNodeCommand) GetTCPAddr() string { - if x != nil && x.TCPAddr != nil { - return *x.TCPAddr - } - return "" -} - -func (x *CreateMetaNodeCommand) GetRand() uint64 { - if x != nil && x.Rand != nil { - return *x.Rand - } - return 0 -} - -type CreateDataNodeCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - HTTPAddr *string `protobuf:"bytes,1,req,name=HTTPAddr" json:"HTTPAddr,omitempty"` - TCPAddr *string `protobuf:"bytes,2,req,name=TCPAddr" json:"TCPAddr,omitempty"` -} - -func (x *CreateDataNodeCommand) Reset() { - *x = CreateDataNodeCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateDataNodeCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateDataNodeCommand) ProtoMessage() {} - -func (x *CreateDataNodeCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateDataNodeCommand.ProtoReflect.Descriptor instead. -func (*CreateDataNodeCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{36} -} - -func (x *CreateDataNodeCommand) GetHTTPAddr() string { - if x != nil && x.HTTPAddr != nil { - return *x.HTTPAddr - } - return "" -} - -func (x *CreateDataNodeCommand) GetTCPAddr() string { - if x != nil && x.TCPAddr != nil { - return *x.TCPAddr - } - return "" -} - -type UpdateDataNodeCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"` - Host *string `protobuf:"bytes,2,req,name=Host" json:"Host,omitempty"` - TCPHost *string `protobuf:"bytes,3,req,name=TCPHost" json:"TCPHost,omitempty"` -} - -func (x *UpdateDataNodeCommand) Reset() { - *x = UpdateDataNodeCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateDataNodeCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateDataNodeCommand) ProtoMessage() {} - -func (x *UpdateDataNodeCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateDataNodeCommand.ProtoReflect.Descriptor instead. -func (*UpdateDataNodeCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{37} -} - -func (x *UpdateDataNodeCommand) GetID() uint64 { - if x != nil && x.ID != nil { - return *x.ID - } - return 0 -} - -func (x *UpdateDataNodeCommand) GetHost() string { - if x != nil && x.Host != nil { - return *x.Host - } - return "" -} - -func (x *UpdateDataNodeCommand) GetTCPHost() string { - if x != nil && x.TCPHost != nil { - return *x.TCPHost - } - return "" -} - -type DeleteMetaNodeCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"` -} - -func (x *DeleteMetaNodeCommand) Reset() { - *x = DeleteMetaNodeCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteMetaNodeCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteMetaNodeCommand) ProtoMessage() {} - -func (x *DeleteMetaNodeCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteMetaNodeCommand.ProtoReflect.Descriptor instead. -func (*DeleteMetaNodeCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{38} -} - -func (x *DeleteMetaNodeCommand) GetID() uint64 { - if x != nil && x.ID != nil { - return *x.ID - } - return 0 -} - -type DeleteDataNodeCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"` -} - -func (x *DeleteDataNodeCommand) Reset() { - *x = DeleteDataNodeCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteDataNodeCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteDataNodeCommand) ProtoMessage() {} - -func (x *DeleteDataNodeCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteDataNodeCommand.ProtoReflect.Descriptor instead. -func (*DeleteDataNodeCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{39} -} - -func (x *DeleteDataNodeCommand) GetID() uint64 { - if x != nil && x.ID != nil { - return *x.ID - } - return 0 -} - -type Response struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - OK *bool `protobuf:"varint,1,req,name=OK" json:"OK,omitempty"` - Error *string `protobuf:"bytes,2,opt,name=Error" json:"Error,omitempty"` - Index *uint64 `protobuf:"varint,3,opt,name=Index" json:"Index,omitempty"` -} - -func (x *Response) Reset() { - *x = Response{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Response) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Response) ProtoMessage() {} - -func (x *Response) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Response.ProtoReflect.Descriptor instead. -func (*Response) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{40} -} - -func (x *Response) GetOK() bool { - if x != nil && x.OK != nil { - return *x.OK - } - return false -} - -func (x *Response) GetError() string { - if x != nil && x.Error != nil { - return *x.Error - } - return "" -} - -func (x *Response) GetIndex() uint64 { - if x != nil && x.Index != nil { - return *x.Index - } - return 0 -} - -// SetMetaNodeCommand is for the initial metanode in a cluster or -// if the single host restarts and its hostname changes, this will update it -type SetMetaNodeCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - HTTPAddr *string `protobuf:"bytes,1,req,name=HTTPAddr" json:"HTTPAddr,omitempty"` - TCPAddr *string `protobuf:"bytes,2,req,name=TCPAddr" json:"TCPAddr,omitempty"` - Rand *uint64 `protobuf:"varint,3,req,name=Rand" json:"Rand,omitempty"` -} - -func (x *SetMetaNodeCommand) Reset() { - *x = SetMetaNodeCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetMetaNodeCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetMetaNodeCommand) ProtoMessage() {} - -func (x *SetMetaNodeCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetMetaNodeCommand.ProtoReflect.Descriptor instead. -func (*SetMetaNodeCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{41} -} - -func (x *SetMetaNodeCommand) GetHTTPAddr() string { - if x != nil && x.HTTPAddr != nil { - return *x.HTTPAddr - } - return "" -} - -func (x *SetMetaNodeCommand) GetTCPAddr() string { - if x != nil && x.TCPAddr != nil { - return *x.TCPAddr - } - return "" -} - -func (x *SetMetaNodeCommand) GetRand() uint64 { - if x != nil && x.Rand != nil { - return *x.Rand - } - return 0 -} - -type DropShardCommand struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ID *uint64 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"` -} - -func (x *DropShardCommand) Reset() { - *x = DropShardCommand{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_meta_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DropShardCommand) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DropShardCommand) ProtoMessage() {} - -func (x *DropShardCommand) ProtoReflect() protoreflect.Message { - mi := &file_internal_meta_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DropShardCommand.ProtoReflect.Descriptor instead. -func (*DropShardCommand) Descriptor() ([]byte, []int) { - return file_internal_meta_proto_rawDescGZIP(), []int{42} -} - -func (x *DropShardCommand) GetID() uint64 { - if x != nil && x.ID != nil { - return *x.ID - } - return 0 -} - -var file_internal_meta_proto_extTypes = []protoimpl.ExtensionInfo{ - { - ExtendedType: (*Command)(nil), - ExtensionType: (*CreateNodeCommand)(nil), - Field: 101, - Name: "meta.CreateNodeCommand.command", - Tag: "bytes,101,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*DeleteNodeCommand)(nil), - Field: 102, - Name: "meta.DeleteNodeCommand.command", - Tag: "bytes,102,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*CreateDatabaseCommand)(nil), - Field: 103, - Name: "meta.CreateDatabaseCommand.command", - Tag: "bytes,103,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*DropDatabaseCommand)(nil), - Field: 104, - Name: "meta.DropDatabaseCommand.command", - Tag: "bytes,104,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*CreateRetentionPolicyCommand)(nil), - Field: 105, - Name: "meta.CreateRetentionPolicyCommand.command", - Tag: "bytes,105,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*DropRetentionPolicyCommand)(nil), - Field: 106, - Name: "meta.DropRetentionPolicyCommand.command", - Tag: "bytes,106,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*SetDefaultRetentionPolicyCommand)(nil), - Field: 107, - Name: "meta.SetDefaultRetentionPolicyCommand.command", - Tag: "bytes,107,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*UpdateRetentionPolicyCommand)(nil), - Field: 108, - Name: "meta.UpdateRetentionPolicyCommand.command", - Tag: "bytes,108,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*CreateShardGroupCommand)(nil), - Field: 109, - Name: "meta.CreateShardGroupCommand.command", - Tag: "bytes,109,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*DeleteShardGroupCommand)(nil), - Field: 110, - Name: "meta.DeleteShardGroupCommand.command", - Tag: "bytes,110,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*CreateContinuousQueryCommand)(nil), - Field: 111, - Name: "meta.CreateContinuousQueryCommand.command", - Tag: "bytes,111,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*DropContinuousQueryCommand)(nil), - Field: 112, - Name: "meta.DropContinuousQueryCommand.command", - Tag: "bytes,112,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*CreateUserCommand)(nil), - Field: 113, - Name: "meta.CreateUserCommand.command", - Tag: "bytes,113,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*DropUserCommand)(nil), - Field: 114, - Name: "meta.DropUserCommand.command", - Tag: "bytes,114,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*UpdateUserCommand)(nil), - Field: 115, - Name: "meta.UpdateUserCommand.command", - Tag: "bytes,115,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*SetPrivilegeCommand)(nil), - Field: 116, - Name: "meta.SetPrivilegeCommand.command", - Tag: "bytes,116,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*SetDataCommand)(nil), - Field: 117, - Name: "meta.SetDataCommand.command", - Tag: "bytes,117,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*SetAdminPrivilegeCommand)(nil), - Field: 118, - Name: "meta.SetAdminPrivilegeCommand.command", - Tag: "bytes,118,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*UpdateNodeCommand)(nil), - Field: 119, - Name: "meta.UpdateNodeCommand.command", - Tag: "bytes,119,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*CreateSubscriptionCommand)(nil), - Field: 121, - Name: "meta.CreateSubscriptionCommand.command", - Tag: "bytes,121,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*DropSubscriptionCommand)(nil), - Field: 122, - Name: "meta.DropSubscriptionCommand.command", - Tag: "bytes,122,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*RemovePeerCommand)(nil), - Field: 123, - Name: "meta.RemovePeerCommand.command", - Tag: "bytes,123,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*CreateMetaNodeCommand)(nil), - Field: 124, - Name: "meta.CreateMetaNodeCommand.command", - Tag: "bytes,124,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*CreateDataNodeCommand)(nil), - Field: 125, - Name: "meta.CreateDataNodeCommand.command", - Tag: "bytes,125,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*UpdateDataNodeCommand)(nil), - Field: 126, - Name: "meta.UpdateDataNodeCommand.command", - Tag: "bytes,126,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*DeleteMetaNodeCommand)(nil), - Field: 127, - Name: "meta.DeleteMetaNodeCommand.command", - Tag: "bytes,127,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*DeleteDataNodeCommand)(nil), - Field: 128, - Name: "meta.DeleteDataNodeCommand.command", - Tag: "bytes,128,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*SetMetaNodeCommand)(nil), - Field: 129, - Name: "meta.SetMetaNodeCommand.command", - Tag: "bytes,129,opt,name=command", - Filename: "internal/meta.proto", - }, - { - ExtendedType: (*Command)(nil), - ExtensionType: (*DropShardCommand)(nil), - Field: 130, - Name: "meta.DropShardCommand.command", - Tag: "bytes,130,opt,name=command", - Filename: "internal/meta.proto", - }, -} - -// Extension fields to Command. -var ( - // optional meta.CreateNodeCommand command = 101; - E_CreateNodeCommand_Command = &file_internal_meta_proto_extTypes[0] - // optional meta.DeleteNodeCommand command = 102; - E_DeleteNodeCommand_Command = &file_internal_meta_proto_extTypes[1] - // optional meta.CreateDatabaseCommand command = 103; - E_CreateDatabaseCommand_Command = &file_internal_meta_proto_extTypes[2] - // optional meta.DropDatabaseCommand command = 104; - E_DropDatabaseCommand_Command = &file_internal_meta_proto_extTypes[3] - // optional meta.CreateRetentionPolicyCommand command = 105; - E_CreateRetentionPolicyCommand_Command = &file_internal_meta_proto_extTypes[4] - // optional meta.DropRetentionPolicyCommand command = 106; - E_DropRetentionPolicyCommand_Command = &file_internal_meta_proto_extTypes[5] - // optional meta.SetDefaultRetentionPolicyCommand command = 107; - E_SetDefaultRetentionPolicyCommand_Command = &file_internal_meta_proto_extTypes[6] - // optional meta.UpdateRetentionPolicyCommand command = 108; - E_UpdateRetentionPolicyCommand_Command = &file_internal_meta_proto_extTypes[7] - // optional meta.CreateShardGroupCommand command = 109; - E_CreateShardGroupCommand_Command = &file_internal_meta_proto_extTypes[8] - // optional meta.DeleteShardGroupCommand command = 110; - E_DeleteShardGroupCommand_Command = &file_internal_meta_proto_extTypes[9] - // optional meta.CreateContinuousQueryCommand command = 111; - E_CreateContinuousQueryCommand_Command = &file_internal_meta_proto_extTypes[10] - // optional meta.DropContinuousQueryCommand command = 112; - E_DropContinuousQueryCommand_Command = &file_internal_meta_proto_extTypes[11] - // optional meta.CreateUserCommand command = 113; - E_CreateUserCommand_Command = &file_internal_meta_proto_extTypes[12] - // optional meta.DropUserCommand command = 114; - E_DropUserCommand_Command = &file_internal_meta_proto_extTypes[13] - // optional meta.UpdateUserCommand command = 115; - E_UpdateUserCommand_Command = &file_internal_meta_proto_extTypes[14] - // optional meta.SetPrivilegeCommand command = 116; - E_SetPrivilegeCommand_Command = &file_internal_meta_proto_extTypes[15] - // optional meta.SetDataCommand command = 117; - E_SetDataCommand_Command = &file_internal_meta_proto_extTypes[16] - // optional meta.SetAdminPrivilegeCommand command = 118; - E_SetAdminPrivilegeCommand_Command = &file_internal_meta_proto_extTypes[17] - // optional meta.UpdateNodeCommand command = 119; - E_UpdateNodeCommand_Command = &file_internal_meta_proto_extTypes[18] - // optional meta.CreateSubscriptionCommand command = 121; - E_CreateSubscriptionCommand_Command = &file_internal_meta_proto_extTypes[19] - // optional meta.DropSubscriptionCommand command = 122; - E_DropSubscriptionCommand_Command = &file_internal_meta_proto_extTypes[20] - // optional meta.RemovePeerCommand command = 123; - E_RemovePeerCommand_Command = &file_internal_meta_proto_extTypes[21] - // optional meta.CreateMetaNodeCommand command = 124; - E_CreateMetaNodeCommand_Command = &file_internal_meta_proto_extTypes[22] - // optional meta.CreateDataNodeCommand command = 125; - E_CreateDataNodeCommand_Command = &file_internal_meta_proto_extTypes[23] - // optional meta.UpdateDataNodeCommand command = 126; - E_UpdateDataNodeCommand_Command = &file_internal_meta_proto_extTypes[24] - // optional meta.DeleteMetaNodeCommand command = 127; - E_DeleteMetaNodeCommand_Command = &file_internal_meta_proto_extTypes[25] - // optional meta.DeleteDataNodeCommand command = 128; - E_DeleteDataNodeCommand_Command = &file_internal_meta_proto_extTypes[26] - // optional meta.SetMetaNodeCommand command = 129; - E_SetMetaNodeCommand_Command = &file_internal_meta_proto_extTypes[27] - // optional meta.DropShardCommand command = 130; - E_DropShardCommand_Command = &file_internal_meta_proto_extTypes[28] -) - -var File_internal_meta_proto protoreflect.FileDescriptor - -var file_internal_meta_proto_rawDesc = []byte{ - 0x0a, 0x13, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x22, 0x90, 0x03, 0x0a, 0x04, - 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x65, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x02, - 0x28, 0x04, 0x52, 0x04, 0x54, 0x65, 0x72, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, - 0x78, 0x18, 0x02, 0x20, 0x02, 0x28, 0x04, 0x52, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1c, - 0x0a, 0x09, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x44, 0x18, 0x03, 0x20, 0x02, 0x28, - 0x04, 0x52, 0x09, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x44, 0x12, 0x24, 0x0a, 0x05, - 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6d, 0x65, - 0x74, 0x61, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x4e, 0x6f, 0x64, - 0x65, 0x73, 0x12, 0x30, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x44, 0x61, 0x74, - 0x61, 0x62, 0x61, 0x73, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x44, 0x61, 0x74, 0x61, 0x62, - 0x61, 0x73, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x05, 0x55, 0x73, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x4d, 0x61, - 0x78, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x07, 0x20, 0x02, 0x28, 0x04, 0x52, 0x09, 0x4d, - 0x61, 0x78, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x12, 0x28, 0x0a, 0x0f, 0x4d, 0x61, 0x78, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x18, 0x08, 0x20, 0x02, 0x28, - 0x04, 0x52, 0x0f, 0x4d, 0x61, 0x78, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, - 0x49, 0x44, 0x12, 0x1e, 0x0a, 0x0a, 0x4d, 0x61, 0x78, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x44, - 0x18, 0x09, 0x20, 0x02, 0x28, 0x04, 0x52, 0x0a, 0x4d, 0x61, 0x78, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x49, 0x44, 0x12, 0x2c, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x18, - 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x4e, 0x6f, 0x64, - 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x73, - 0x12, 0x2c, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x0b, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x48, - 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, - 0x18, 0x01, 0x20, 0x02, 0x28, 0x04, 0x52, 0x02, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x6f, - 0x73, 0x74, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x18, - 0x0a, 0x07, 0x54, 0x43, 0x50, 0x48, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x54, 0x43, 0x50, 0x48, 0x6f, 0x73, 0x74, 0x22, 0xec, 0x01, 0x0a, 0x0c, 0x44, 0x61, 0x74, - 0x61, 0x62, 0x61, 0x73, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, - 0x16, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x16, 0x44, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x47, 0x0a, 0x11, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x52, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x47, - 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x6f, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, - 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x65, 0x74, 0x61, - 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x6f, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x6f, 0x75, 0x73, - 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x13, 0x52, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x53, 0x70, 0x65, 0x63, 0x12, - 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x2e, 0x0a, 0x12, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x1a, 0x0a, 0x08, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x08, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x22, 0x87, 0x02, 0x0a, 0x13, - 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, - 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x03, 0x52, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x12, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x72, 0x6f, 0x75, - 0x70, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x02, 0x28, 0x03, 0x52, - 0x12, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x18, - 0x04, 0x20, 0x02, 0x28, 0x0d, 0x52, 0x08, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x12, - 0x36, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0b, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x3c, 0x0a, 0x0d, 0x53, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, - 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0d, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xc1, 0x01, 0x0a, 0x0e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, - 0x20, 0x02, 0x28, 0x04, 0x52, 0x02, 0x49, 0x44, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x02, 0x28, 0x03, 0x52, 0x09, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x45, 0x6e, 0x64, 0x54, 0x69, 0x6d, - 0x65, 0x18, 0x03, 0x20, 0x02, 0x28, 0x03, 0x52, 0x07, 0x45, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x1c, 0x0a, 0x09, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x04, 0x20, - 0x02, 0x28, 0x03, 0x52, 0x09, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x27, - 0x0a, 0x06, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, - 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x06, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x54, 0x72, 0x75, 0x6e, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x54, 0x72, - 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x65, 0x0a, 0x09, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x02, - 0x28, 0x04, 0x52, 0x02, 0x49, 0x44, 0x12, 0x1e, 0x0a, 0x08, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x49, - 0x44, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x04, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, 0x4f, 0x77, - 0x6e, 0x65, 0x72, 0x49, 0x44, 0x73, 0x12, 0x28, 0x0a, 0x06, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x52, 0x06, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x73, - 0x22, 0x5e, 0x0a, 0x10, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x02, - 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, - 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x22, 0x0a, 0x0c, - 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0c, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x22, 0x24, 0x0a, 0x0a, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, - 0x0a, 0x06, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x02, 0x28, 0x04, 0x52, 0x06, - 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x44, 0x22, 0x3f, 0x0a, 0x13, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, - 0x75, 0x6f, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, - 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, - 0x52, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x22, 0x7d, 0x0a, 0x08, 0x55, 0x73, 0x65, 0x72, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, - 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x61, 0x73, 0x68, 0x18, - 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x48, 0x61, 0x73, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x41, - 0x64, 0x6d, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x02, 0x28, 0x08, 0x52, 0x05, 0x41, 0x64, 0x6d, 0x69, - 0x6e, 0x12, 0x33, 0x0a, 0x0a, 0x50, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x73, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x55, 0x73, 0x65, - 0x72, 0x50, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x52, 0x0a, 0x50, 0x72, 0x69, 0x76, - 0x69, 0x6c, 0x65, 0x67, 0x65, 0x73, 0x22, 0x49, 0x0a, 0x0d, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, - 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, - 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, - 0x61, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x02, 0x28, 0x05, 0x52, 0x09, 0x50, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, - 0x65, 0x22, 0xd9, 0x06, 0x0a, 0x07, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x26, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x6d, 0x65, - 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x9b, 0x06, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, - 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, - 0x61, 0x6e, 0x64, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, - 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x43, 0x6f, - 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x72, 0x6f, 0x70, 0x44, - 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x10, 0x04, - 0x12, 0x20, 0x0a, 0x1c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, - 0x10, 0x05, 0x12, 0x1e, 0x0a, 0x1a, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, - 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, - 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x10, 0x07, 0x12, 0x20, 0x0a, 0x1c, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x10, 0x08, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x43, 0x6f, - 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x10, 0x09, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x43, 0x6f, 0x6d, 0x6d, 0x61, - 0x6e, 0x64, 0x10, 0x0a, 0x12, 0x20, 0x0a, 0x1c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, - 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x6f, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6d, - 0x6d, 0x61, 0x6e, 0x64, 0x10, 0x0b, 0x12, 0x1e, 0x0a, 0x1a, 0x44, 0x72, 0x6f, 0x70, 0x43, 0x6f, - 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x6f, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6d, - 0x6d, 0x61, 0x6e, 0x64, 0x10, 0x0c, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x55, 0x73, 0x65, 0x72, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x10, 0x0d, 0x12, 0x13, 0x0a, - 0x0f, 0x44, 0x72, 0x6f, 0x70, 0x55, 0x73, 0x65, 0x72, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, - 0x10, 0x0e, 0x12, 0x15, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, - 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x10, 0x0f, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x65, 0x74, - 0x50, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, - 0x10, 0x10, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, - 0x6d, 0x61, 0x6e, 0x64, 0x10, 0x11, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x65, 0x74, 0x41, 0x64, 0x6d, - 0x69, 0x6e, 0x50, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, - 0x6e, 0x64, 0x10, 0x12, 0x12, 0x15, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, - 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x10, 0x13, 0x12, 0x1d, 0x0a, 0x19, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x10, 0x15, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x72, - 0x6f, 0x70, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x10, 0x16, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x65, 0x6d, 0x6f, 0x76, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x10, 0x17, 0x12, 0x19, - 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, - 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x10, 0x18, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, - 0x6e, 0x64, 0x10, 0x19, 0x12, 0x19, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, - 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x10, 0x1a, 0x12, - 0x19, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x4e, 0x6f, 0x64, - 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x10, 0x1b, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, - 0x61, 0x6e, 0x64, 0x10, 0x1c, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, - 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x10, 0x1d, 0x12, 0x14, 0x0a, - 0x10, 0x44, 0x72, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, - 0x64, 0x10, 0x1e, 0x2a, 0x08, 0x08, 0x64, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x7d, 0x0a, - 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, - 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, - 0x52, 0x04, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x52, 0x61, 0x6e, 0x64, 0x18, 0x02, - 0x20, 0x02, 0x28, 0x04, 0x52, 0x04, 0x52, 0x61, 0x6e, 0x64, 0x32, 0x40, 0x0a, 0x07, 0x63, 0x6f, - 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x65, 0x74, - 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, - 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, 0x7b, 0x0a, 0x11, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, - 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x02, 0x28, 0x04, 0x52, 0x02, 0x49, - 0x44, 0x12, 0x14, 0x0a, 0x05, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, - 0x52, 0x05, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x32, 0x40, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, - 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, - 0x64, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, - 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, 0xb6, 0x01, 0x0a, 0x15, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x43, 0x6f, 0x6d, 0x6d, - 0x61, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, - 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0f, 0x52, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x32, 0x44, 0x0a, 0x07, - 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, - 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x67, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, - 0x65, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, - 0x73, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, - 0x6e, 0x64, 0x22, 0x6d, 0x0a, 0x13, 0x44, 0x72, 0x6f, 0x70, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, - 0x73, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x32, 0x42, 0x0a, - 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x68, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, - 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, - 0x64, 0x22, 0xcc, 0x01, 0x0a, 0x1c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x61, - 0x6e, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x01, - 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x43, - 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x02, 0x20, 0x02, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x32, 0x4b, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, - 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x69, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, - 0x22, 0x97, 0x01, 0x0a, 0x1a, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, - 0x1a, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, - 0x09, 0x52, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x32, - 0x49, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, - 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x74, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, - 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, 0xa3, 0x01, 0x0a, 0x20, 0x53, - 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, - 0x1a, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, - 0x09, 0x52, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x32, - 0x4f, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, - 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x26, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, - 0x22, 0xed, 0x01, 0x0a, 0x1c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x74, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, - 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, - 0x02, 0x28, 0x09, 0x52, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x4e, 0x65, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x4e, 0x65, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x4e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x4e, 0x32, 0x4b, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, - 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x6c, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, - 0x22, 0xb3, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x47, 0x72, 0x6f, 0x75, 0x70, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x1a, 0x0a, 0x08, - 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, - 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, - 0x02, 0x28, 0x03, 0x52, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x32, 0x46, - 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, 0x61, - 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x6d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, - 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, - 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, 0xb9, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x43, 0x6f, 0x6d, 0x6d, 0x61, - 0x6e, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x01, - 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x06, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x0a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x18, 0x03, 0x20, 0x02, 0x28, 0x04, 0x52, 0x0c, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x32, 0x46, 0x0a, 0x07, 0x63, 0x6f, - 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x6e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x65, 0x74, - 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x72, 0x6f, - 0x75, 0x70, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, - 0x6e, 0x64, 0x22, 0xb1, 0x01, 0x0a, 0x1c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, - 0x74, 0x69, 0x6e, 0x75, 0x6f, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6d, 0x6d, - 0x61, 0x6e, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, - 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x02, - 0x28, 0x09, 0x52, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x32, 0x4b, 0x0a, 0x07, 0x63, 0x6f, 0x6d, - 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, - 0x61, 0x6e, 0x64, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6d, 0x65, 0x74, 0x61, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x6f, 0x75, - 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, - 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, 0x97, 0x01, 0x0a, 0x1a, 0x44, 0x72, 0x6f, 0x70, 0x43, - 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x6f, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, - 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, - 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, - 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x32, 0x49, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, - 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, - 0x70, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x44, 0x72, 0x6f, - 0x70, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x6f, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, - 0x22, 0x93, 0x01, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x43, - 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x61, - 0x73, 0x68, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x48, 0x61, 0x73, 0x68, 0x12, 0x14, - 0x0a, 0x05, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x02, 0x28, 0x08, 0x52, 0x05, 0x41, - 0x64, 0x6d, 0x69, 0x6e, 0x32, 0x40, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, - 0x0d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x71, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, - 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, 0x65, 0x0a, 0x0f, 0x44, 0x72, 0x6f, 0x70, 0x55, 0x73, - 0x65, 0x72, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x32, 0x3e, 0x0a, - 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x72, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x55, 0x73, 0x65, 0x72, 0x43, 0x6f, 0x6d, - 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, 0x7d, 0x0a, - 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x43, 0x6f, 0x6d, 0x6d, 0x61, - 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, - 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x61, 0x73, 0x68, 0x18, 0x02, - 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x48, 0x61, 0x73, 0x68, 0x32, 0x40, 0x0a, 0x07, 0x63, 0x6f, - 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x73, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x65, 0x74, - 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x43, 0x6f, 0x6d, 0x6d, - 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, 0xaf, 0x01, 0x0a, - 0x13, 0x53, 0x65, 0x74, 0x50, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x43, 0x6f, 0x6d, - 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1a, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x02, 0x20, 0x02, - 0x28, 0x09, 0x52, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, - 0x50, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x18, 0x03, 0x20, 0x02, 0x28, 0x05, 0x52, - 0x09, 0x50, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x32, 0x42, 0x0a, 0x07, 0x63, 0x6f, - 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x74, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x65, 0x74, - 0x61, 0x2e, 0x53, 0x65, 0x74, 0x50, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x43, 0x6f, - 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, 0x6f, - 0x0a, 0x0e, 0x53, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, - 0x12, 0x1e, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x02, 0x28, 0x0b, 0x32, 0x0a, - 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, - 0x32, 0x3d, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, - 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x75, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x43, - 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, - 0x95, 0x01, 0x0a, 0x18, 0x53, 0x65, 0x74, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x69, 0x76, - 0x69, 0x6c, 0x65, 0x67, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x1a, 0x0a, 0x08, - 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, - 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x41, 0x64, 0x6d, 0x69, - 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x05, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x32, 0x47, - 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, 0x61, - 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x76, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, - 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, - 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, - 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, 0x79, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0e, 0x0a, 0x02, - 0x49, 0x44, 0x18, 0x01, 0x20, 0x02, 0x28, 0x04, 0x52, 0x02, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, - 0x48, 0x6f, 0x73, 0x74, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x48, 0x6f, 0x73, 0x74, - 0x32, 0x40, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, - 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x77, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, - 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, - 0x6e, 0x64, 0x22, 0xf7, 0x01, 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, - 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, - 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, - 0x12, 0x28, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x02, 0x28, 0x09, 0x52, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x4d, 0x6f, - 0x64, 0x65, 0x18, 0x04, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x22, - 0x0a, 0x0c, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x32, 0x48, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, - 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x79, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x6d, - 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, 0xbb, 0x01, 0x0a, - 0x17, 0x44, 0x72, 0x6f, 0x70, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, - 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, - 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x02, 0x28, - 0x09, 0x52, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x32, 0x46, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, - 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x7a, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x53, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, - 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, 0x79, 0x0a, 0x11, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, - 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x49, 0x44, 0x12, - 0x12, 0x0a, 0x04, 0x41, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x41, - 0x64, 0x64, 0x72, 0x32, 0x40, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, - 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x7b, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, - 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, 0xa7, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x4d, 0x65, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, - 0x1a, 0x0a, 0x08, 0x48, 0x54, 0x54, 0x50, 0x41, 0x64, 0x64, 0x72, 0x18, 0x01, 0x20, 0x02, 0x28, - 0x09, 0x52, 0x08, 0x48, 0x54, 0x54, 0x50, 0x41, 0x64, 0x64, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x54, - 0x43, 0x50, 0x41, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x07, 0x54, 0x43, - 0x50, 0x41, 0x64, 0x64, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x52, 0x61, 0x6e, 0x64, 0x18, 0x03, 0x20, - 0x02, 0x28, 0x04, 0x52, 0x04, 0x52, 0x61, 0x6e, 0x64, 0x32, 0x44, 0x0a, 0x07, 0x63, 0x6f, 0x6d, - 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, - 0x61, 0x6e, 0x64, 0x18, 0x7c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x65, 0x74, 0x61, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x43, - 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, - 0x93, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, - 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x48, 0x54, 0x54, - 0x50, 0x41, 0x64, 0x64, 0x72, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x48, 0x54, 0x54, - 0x50, 0x41, 0x64, 0x64, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x54, 0x43, 0x50, 0x41, 0x64, 0x64, 0x72, - 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x07, 0x54, 0x43, 0x50, 0x41, 0x64, 0x64, 0x72, 0x32, - 0x44, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, - 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x7d, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1b, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, - 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, - 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, 0x9b, 0x01, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, - 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x02, 0x28, 0x04, 0x52, 0x02, 0x49, 0x44, 0x12, - 0x12, 0x0a, 0x04, 0x48, 0x6f, 0x73, 0x74, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x48, - 0x6f, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x54, 0x43, 0x50, 0x48, 0x6f, 0x73, 0x74, 0x18, 0x03, - 0x20, 0x02, 0x28, 0x09, 0x52, 0x07, 0x54, 0x43, 0x50, 0x48, 0x6f, 0x73, 0x74, 0x32, 0x44, 0x0a, - 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x7e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, - 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, - 0x61, 0x6e, 0x64, 0x22, 0x6d, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0e, 0x0a, 0x02, - 0x49, 0x44, 0x18, 0x01, 0x20, 0x02, 0x28, 0x04, 0x52, 0x02, 0x49, 0x44, 0x32, 0x44, 0x0a, 0x07, - 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, - 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x7f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, - 0x65, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x4e, 0x6f, - 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, - 0x6e, 0x64, 0x22, 0x6e, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, - 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x49, - 0x44, 0x18, 0x01, 0x20, 0x02, 0x28, 0x04, 0x52, 0x02, 0x49, 0x44, 0x32, 0x45, 0x0a, 0x07, 0x63, - 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x80, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, - 0x65, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x4e, 0x6f, - 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, - 0x6e, 0x64, 0x22, 0x46, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, - 0x0a, 0x02, 0x4f, 0x4b, 0x18, 0x01, 0x20, 0x02, 0x28, 0x08, 0x52, 0x02, 0x4f, 0x4b, 0x12, 0x14, - 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, - 0x72, 0x72, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xa2, 0x01, 0x0a, 0x12, 0x53, - 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, - 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x48, 0x54, 0x54, 0x50, 0x41, 0x64, 0x64, 0x72, 0x18, 0x01, 0x20, - 0x02, 0x28, 0x09, 0x52, 0x08, 0x48, 0x54, 0x54, 0x50, 0x41, 0x64, 0x64, 0x72, 0x12, 0x18, 0x0a, - 0x07, 0x54, 0x43, 0x50, 0x41, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x07, - 0x54, 0x43, 0x50, 0x41, 0x64, 0x64, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x52, 0x61, 0x6e, 0x64, 0x18, - 0x03, 0x20, 0x02, 0x28, 0x04, 0x52, 0x04, 0x52, 0x61, 0x6e, 0x64, 0x32, 0x42, 0x0a, 0x07, 0x63, - 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x81, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, - 0x65, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x4e, 0x6f, 0x64, 0x65, 0x43, - 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, - 0x64, 0x0a, 0x10, 0x44, 0x72, 0x6f, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6d, 0x6d, - 0x61, 0x6e, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x02, 0x28, 0x04, 0x52, - 0x02, 0x49, 0x44, 0x32, 0x40, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x0d, - 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x82, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x44, 0x72, 0x6f, 0x70, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x07, 0x63, 0x6f, - 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x42, 0x08, 0x5a, 0x06, 0x2e, 0x3b, 0x6d, 0x65, 0x74, 0x61, -} - -var ( - file_internal_meta_proto_rawDescOnce sync.Once - file_internal_meta_proto_rawDescData = file_internal_meta_proto_rawDesc -) - -func file_internal_meta_proto_rawDescGZIP() []byte { - file_internal_meta_proto_rawDescOnce.Do(func() { - file_internal_meta_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_meta_proto_rawDescData) - }) - return file_internal_meta_proto_rawDescData -} - -var file_internal_meta_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_internal_meta_proto_msgTypes = make([]protoimpl.MessageInfo, 43) -var file_internal_meta_proto_goTypes = []interface{}{ - (Command_Type)(0), // 0: meta.Command.Type - (*Data)(nil), // 1: meta.Data - (*NodeInfo)(nil), // 2: meta.NodeInfo - (*DatabaseInfo)(nil), // 3: meta.DatabaseInfo - (*RetentionPolicySpec)(nil), // 4: meta.RetentionPolicySpec - (*RetentionPolicyInfo)(nil), // 5: meta.RetentionPolicyInfo - (*ShardGroupInfo)(nil), // 6: meta.ShardGroupInfo - (*ShardInfo)(nil), // 7: meta.ShardInfo - (*SubscriptionInfo)(nil), // 8: meta.SubscriptionInfo - (*ShardOwner)(nil), // 9: meta.ShardOwner - (*ContinuousQueryInfo)(nil), // 10: meta.ContinuousQueryInfo - (*UserInfo)(nil), // 11: meta.UserInfo - (*UserPrivilege)(nil), // 12: meta.UserPrivilege - (*Command)(nil), // 13: meta.Command - (*CreateNodeCommand)(nil), // 14: meta.CreateNodeCommand - (*DeleteNodeCommand)(nil), // 15: meta.DeleteNodeCommand - (*CreateDatabaseCommand)(nil), // 16: meta.CreateDatabaseCommand - (*DropDatabaseCommand)(nil), // 17: meta.DropDatabaseCommand - (*CreateRetentionPolicyCommand)(nil), // 18: meta.CreateRetentionPolicyCommand - (*DropRetentionPolicyCommand)(nil), // 19: meta.DropRetentionPolicyCommand - (*SetDefaultRetentionPolicyCommand)(nil), // 20: meta.SetDefaultRetentionPolicyCommand - (*UpdateRetentionPolicyCommand)(nil), // 21: meta.UpdateRetentionPolicyCommand - (*CreateShardGroupCommand)(nil), // 22: meta.CreateShardGroupCommand - (*DeleteShardGroupCommand)(nil), // 23: meta.DeleteShardGroupCommand - (*CreateContinuousQueryCommand)(nil), // 24: meta.CreateContinuousQueryCommand - (*DropContinuousQueryCommand)(nil), // 25: meta.DropContinuousQueryCommand - (*CreateUserCommand)(nil), // 26: meta.CreateUserCommand - (*DropUserCommand)(nil), // 27: meta.DropUserCommand - (*UpdateUserCommand)(nil), // 28: meta.UpdateUserCommand - (*SetPrivilegeCommand)(nil), // 29: meta.SetPrivilegeCommand - (*SetDataCommand)(nil), // 30: meta.SetDataCommand - (*SetAdminPrivilegeCommand)(nil), // 31: meta.SetAdminPrivilegeCommand - (*UpdateNodeCommand)(nil), // 32: meta.UpdateNodeCommand - (*CreateSubscriptionCommand)(nil), // 33: meta.CreateSubscriptionCommand - (*DropSubscriptionCommand)(nil), // 34: meta.DropSubscriptionCommand - (*RemovePeerCommand)(nil), // 35: meta.RemovePeerCommand - (*CreateMetaNodeCommand)(nil), // 36: meta.CreateMetaNodeCommand - (*CreateDataNodeCommand)(nil), // 37: meta.CreateDataNodeCommand - (*UpdateDataNodeCommand)(nil), // 38: meta.UpdateDataNodeCommand - (*DeleteMetaNodeCommand)(nil), // 39: meta.DeleteMetaNodeCommand - (*DeleteDataNodeCommand)(nil), // 40: meta.DeleteDataNodeCommand - (*Response)(nil), // 41: meta.Response - (*SetMetaNodeCommand)(nil), // 42: meta.SetMetaNodeCommand - (*DropShardCommand)(nil), // 43: meta.DropShardCommand -} -var file_internal_meta_proto_depIdxs = []int32{ - 2, // 0: meta.Data.Nodes:type_name -> meta.NodeInfo - 3, // 1: meta.Data.Databases:type_name -> meta.DatabaseInfo - 11, // 2: meta.Data.Users:type_name -> meta.UserInfo - 2, // 3: meta.Data.DataNodes:type_name -> meta.NodeInfo - 2, // 4: meta.Data.MetaNodes:type_name -> meta.NodeInfo - 5, // 5: meta.DatabaseInfo.RetentionPolicies:type_name -> meta.RetentionPolicyInfo - 10, // 6: meta.DatabaseInfo.ContinuousQueries:type_name -> meta.ContinuousQueryInfo - 6, // 7: meta.RetentionPolicyInfo.ShardGroups:type_name -> meta.ShardGroupInfo - 8, // 8: meta.RetentionPolicyInfo.Subscriptions:type_name -> meta.SubscriptionInfo - 7, // 9: meta.ShardGroupInfo.Shards:type_name -> meta.ShardInfo - 9, // 10: meta.ShardInfo.Owners:type_name -> meta.ShardOwner - 12, // 11: meta.UserInfo.Privileges:type_name -> meta.UserPrivilege - 0, // 12: meta.Command.type:type_name -> meta.Command.Type - 5, // 13: meta.CreateDatabaseCommand.RetentionPolicy:type_name -> meta.RetentionPolicyInfo - 5, // 14: meta.CreateRetentionPolicyCommand.RetentionPolicy:type_name -> meta.RetentionPolicyInfo - 1, // 15: meta.SetDataCommand.Data:type_name -> meta.Data - 13, // 16: meta.CreateNodeCommand.command:extendee -> meta.Command - 13, // 17: meta.DeleteNodeCommand.command:extendee -> meta.Command - 13, // 18: meta.CreateDatabaseCommand.command:extendee -> meta.Command - 13, // 19: meta.DropDatabaseCommand.command:extendee -> meta.Command - 13, // 20: meta.CreateRetentionPolicyCommand.command:extendee -> meta.Command - 13, // 21: meta.DropRetentionPolicyCommand.command:extendee -> meta.Command - 13, // 22: meta.SetDefaultRetentionPolicyCommand.command:extendee -> meta.Command - 13, // 23: meta.UpdateRetentionPolicyCommand.command:extendee -> meta.Command - 13, // 24: meta.CreateShardGroupCommand.command:extendee -> meta.Command - 13, // 25: meta.DeleteShardGroupCommand.command:extendee -> meta.Command - 13, // 26: meta.CreateContinuousQueryCommand.command:extendee -> meta.Command - 13, // 27: meta.DropContinuousQueryCommand.command:extendee -> meta.Command - 13, // 28: meta.CreateUserCommand.command:extendee -> meta.Command - 13, // 29: meta.DropUserCommand.command:extendee -> meta.Command - 13, // 30: meta.UpdateUserCommand.command:extendee -> meta.Command - 13, // 31: meta.SetPrivilegeCommand.command:extendee -> meta.Command - 13, // 32: meta.SetDataCommand.command:extendee -> meta.Command - 13, // 33: meta.SetAdminPrivilegeCommand.command:extendee -> meta.Command - 13, // 34: meta.UpdateNodeCommand.command:extendee -> meta.Command - 13, // 35: meta.CreateSubscriptionCommand.command:extendee -> meta.Command - 13, // 36: meta.DropSubscriptionCommand.command:extendee -> meta.Command - 13, // 37: meta.RemovePeerCommand.command:extendee -> meta.Command - 13, // 38: meta.CreateMetaNodeCommand.command:extendee -> meta.Command - 13, // 39: meta.CreateDataNodeCommand.command:extendee -> meta.Command - 13, // 40: meta.UpdateDataNodeCommand.command:extendee -> meta.Command - 13, // 41: meta.DeleteMetaNodeCommand.command:extendee -> meta.Command - 13, // 42: meta.DeleteDataNodeCommand.command:extendee -> meta.Command - 13, // 43: meta.SetMetaNodeCommand.command:extendee -> meta.Command - 13, // 44: meta.DropShardCommand.command:extendee -> meta.Command - 14, // 45: meta.CreateNodeCommand.command:type_name -> meta.CreateNodeCommand - 15, // 46: meta.DeleteNodeCommand.command:type_name -> meta.DeleteNodeCommand - 16, // 47: meta.CreateDatabaseCommand.command:type_name -> meta.CreateDatabaseCommand - 17, // 48: meta.DropDatabaseCommand.command:type_name -> meta.DropDatabaseCommand - 18, // 49: meta.CreateRetentionPolicyCommand.command:type_name -> meta.CreateRetentionPolicyCommand - 19, // 50: meta.DropRetentionPolicyCommand.command:type_name -> meta.DropRetentionPolicyCommand - 20, // 51: meta.SetDefaultRetentionPolicyCommand.command:type_name -> meta.SetDefaultRetentionPolicyCommand - 21, // 52: meta.UpdateRetentionPolicyCommand.command:type_name -> meta.UpdateRetentionPolicyCommand - 22, // 53: meta.CreateShardGroupCommand.command:type_name -> meta.CreateShardGroupCommand - 23, // 54: meta.DeleteShardGroupCommand.command:type_name -> meta.DeleteShardGroupCommand - 24, // 55: meta.CreateContinuousQueryCommand.command:type_name -> meta.CreateContinuousQueryCommand - 25, // 56: meta.DropContinuousQueryCommand.command:type_name -> meta.DropContinuousQueryCommand - 26, // 57: meta.CreateUserCommand.command:type_name -> meta.CreateUserCommand - 27, // 58: meta.DropUserCommand.command:type_name -> meta.DropUserCommand - 28, // 59: meta.UpdateUserCommand.command:type_name -> meta.UpdateUserCommand - 29, // 60: meta.SetPrivilegeCommand.command:type_name -> meta.SetPrivilegeCommand - 30, // 61: meta.SetDataCommand.command:type_name -> meta.SetDataCommand - 31, // 62: meta.SetAdminPrivilegeCommand.command:type_name -> meta.SetAdminPrivilegeCommand - 32, // 63: meta.UpdateNodeCommand.command:type_name -> meta.UpdateNodeCommand - 33, // 64: meta.CreateSubscriptionCommand.command:type_name -> meta.CreateSubscriptionCommand - 34, // 65: meta.DropSubscriptionCommand.command:type_name -> meta.DropSubscriptionCommand - 35, // 66: meta.RemovePeerCommand.command:type_name -> meta.RemovePeerCommand - 36, // 67: meta.CreateMetaNodeCommand.command:type_name -> meta.CreateMetaNodeCommand - 37, // 68: meta.CreateDataNodeCommand.command:type_name -> meta.CreateDataNodeCommand - 38, // 69: meta.UpdateDataNodeCommand.command:type_name -> meta.UpdateDataNodeCommand - 39, // 70: meta.DeleteMetaNodeCommand.command:type_name -> meta.DeleteMetaNodeCommand - 40, // 71: meta.DeleteDataNodeCommand.command:type_name -> meta.DeleteDataNodeCommand - 42, // 72: meta.SetMetaNodeCommand.command:type_name -> meta.SetMetaNodeCommand - 43, // 73: meta.DropShardCommand.command:type_name -> meta.DropShardCommand - 74, // [74:74] is the sub-list for method output_type - 74, // [74:74] is the sub-list for method input_type - 45, // [45:74] is the sub-list for extension type_name - 16, // [16:45] is the sub-list for extension extendee - 0, // [0:16] is the sub-list for field type_name -} - -func init() { file_internal_meta_proto_init() } -func file_internal_meta_proto_init() { - if File_internal_meta_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_internal_meta_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Data); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DatabaseInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RetentionPolicySpec); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RetentionPolicyInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardGroupInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SubscriptionInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardOwner); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ContinuousQueryInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UserInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UserPrivilege); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Command); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateNodeCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteNodeCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateDatabaseCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DropDatabaseCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateRetentionPolicyCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DropRetentionPolicyCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetDefaultRetentionPolicyCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateRetentionPolicyCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateShardGroupCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteShardGroupCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateContinuousQueryCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DropContinuousQueryCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateUserCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DropUserCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateUserCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetPrivilegeCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetDataCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetAdminPrivilegeCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateNodeCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateSubscriptionCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DropSubscriptionCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemovePeerCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateMetaNodeCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateDataNodeCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateDataNodeCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteMetaNodeCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteDataNodeCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Response); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetMetaNodeCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_meta_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DropShardCommand); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_meta_proto_rawDesc, - NumEnums: 1, - NumMessages: 43, - NumExtensions: 29, - NumServices: 0, - }, - GoTypes: file_internal_meta_proto_goTypes, - DependencyIndexes: file_internal_meta_proto_depIdxs, - EnumInfos: file_internal_meta_proto_enumTypes, - MessageInfos: file_internal_meta_proto_msgTypes, - ExtensionInfos: file_internal_meta_proto_extTypes, - }.Build() - File_internal_meta_proto = out.File - file_internal_meta_proto_rawDesc = nil - file_internal_meta_proto_goTypes = nil - file_internal_meta_proto_depIdxs = nil -} diff --git a/v1/services/meta/internal/meta.proto b/v1/services/meta/internal/meta.proto deleted file mode 100644 index f3a398c989b..00000000000 --- a/v1/services/meta/internal/meta.proto +++ /dev/null @@ -1,396 +0,0 @@ -syntax = "proto2"; - -package meta; -option go_package = ".;meta"; - -//======================================================================== -// -// Metadata -// -//======================================================================== - -message Data { - required uint64 Term = 1; - required uint64 Index = 2; - required uint64 ClusterID = 3; - - repeated NodeInfo Nodes = 4; - repeated DatabaseInfo Databases = 5; - repeated UserInfo Users = 6; - - required uint64 MaxNodeID = 7; - required uint64 MaxShardGroupID = 8; - required uint64 MaxShardID = 9; - - // added for 0.10.0 - repeated NodeInfo DataNodes = 10; - repeated NodeInfo MetaNodes = 11; -} - -message NodeInfo { - required uint64 ID = 1; - required string Host = 2; - optional string TCPHost = 3; -} - -message DatabaseInfo { - required string Name = 1; - required string DefaultRetentionPolicy = 2; - repeated RetentionPolicyInfo RetentionPolicies = 3; - repeated ContinuousQueryInfo ContinuousQueries = 4; -} - -message RetentionPolicySpec { - optional string Name = 1; - optional int64 Duration = 2; - optional int64 ShardGroupDuration = 3; - optional uint32 ReplicaN = 4; -} - -message RetentionPolicyInfo { - required string Name = 1; - required int64 Duration = 2; - required int64 ShardGroupDuration = 3; - required uint32 ReplicaN = 4; - repeated ShardGroupInfo ShardGroups = 5; - repeated SubscriptionInfo Subscriptions = 6; -} - -message ShardGroupInfo { - required uint64 ID = 1; - required int64 StartTime = 2; - required int64 EndTime = 3; - required int64 DeletedAt = 4; - repeated ShardInfo Shards = 5; - optional int64 TruncatedAt = 6; -} - -message ShardInfo { - required uint64 ID = 1; - repeated uint64 OwnerIDs = 2 [deprecated=true]; - repeated ShardOwner Owners = 3; -} - -message SubscriptionInfo{ - required string Name = 1; - required string Mode = 2; - repeated string Destinations = 3; -} - -message ShardOwner { - required uint64 NodeID = 1; -} - -message ContinuousQueryInfo { - required string Name = 1; - required string Query = 2; -} - -message UserInfo { - required string Name = 1; - required string Hash = 2; - required bool Admin = 3; - repeated UserPrivilege Privileges = 4; -} - -message UserPrivilege { - required string Database = 1; - required int32 Privilege = 2; -} - - -//======================================================================== -// -// COMMANDS -// -//======================================================================== - -message Command { - extensions 100 to max; - - enum Type { - CreateNodeCommand = 1; - DeleteNodeCommand = 2; - CreateDatabaseCommand = 3; - DropDatabaseCommand = 4; - CreateRetentionPolicyCommand = 5; - DropRetentionPolicyCommand = 6; - SetDefaultRetentionPolicyCommand = 7; - UpdateRetentionPolicyCommand = 8; - CreateShardGroupCommand = 9; - DeleteShardGroupCommand = 10; - CreateContinuousQueryCommand = 11; - DropContinuousQueryCommand = 12; - CreateUserCommand = 13; - DropUserCommand = 14; - UpdateUserCommand = 15; - SetPrivilegeCommand = 16; - SetDataCommand = 17; - SetAdminPrivilegeCommand = 18; - UpdateNodeCommand = 19; - CreateSubscriptionCommand = 21; - DropSubscriptionCommand = 22; - RemovePeerCommand = 23; - CreateMetaNodeCommand = 24; - CreateDataNodeCommand = 25; - UpdateDataNodeCommand = 26; - DeleteMetaNodeCommand = 27; - DeleteDataNodeCommand = 28; - SetMetaNodeCommand = 29; - DropShardCommand = 30; - } - - required Type type = 1; -} - -// This isn't used in >= 0.10.0. Kept around for upgrade purposes. Instead -// look at CreateDataNodeCommand and CreateMetaNodeCommand -message CreateNodeCommand { - extend Command { - optional CreateNodeCommand command = 101; - } - required string Host = 1; - required uint64 Rand = 2; -} - -message DeleteNodeCommand { - extend Command { - optional DeleteNodeCommand command = 102; - } - required uint64 ID = 1; - required bool Force = 2; -} - -message CreateDatabaseCommand { - extend Command { - optional CreateDatabaseCommand command = 103; - } - required string Name = 1; - optional RetentionPolicyInfo RetentionPolicy = 2; -} - -message DropDatabaseCommand { - extend Command { - optional DropDatabaseCommand command = 104; - } - required string Name = 1; -} - -message CreateRetentionPolicyCommand { - extend Command { - optional CreateRetentionPolicyCommand command = 105; - } - required string Database = 1; - required RetentionPolicyInfo RetentionPolicy = 2; -} - -message DropRetentionPolicyCommand { - extend Command { - optional DropRetentionPolicyCommand command = 106; - } - required string Database = 1; - required string Name = 2; -} - -message SetDefaultRetentionPolicyCommand { - extend Command { - optional SetDefaultRetentionPolicyCommand command = 107; - } - required string Database = 1; - required string Name = 2; -} - -message UpdateRetentionPolicyCommand { - extend Command { - optional UpdateRetentionPolicyCommand command = 108; - } - required string Database = 1; - required string Name = 2; - optional string NewName = 3; - optional int64 Duration = 4; - optional uint32 ReplicaN = 5; -} - -message CreateShardGroupCommand { - extend Command { - optional CreateShardGroupCommand command = 109; - } - required string Database = 1; - required string Policy = 2; - required int64 Timestamp = 3; -} - -message DeleteShardGroupCommand { - extend Command { - optional DeleteShardGroupCommand command = 110; - } - required string Database = 1; - required string Policy = 2; - required uint64 ShardGroupID = 3; -} - -message CreateContinuousQueryCommand { - extend Command { - optional CreateContinuousQueryCommand command = 111; - } - required string Database = 1; - required string Name = 2; - required string Query = 3; -} - -message DropContinuousQueryCommand { - extend Command { - optional DropContinuousQueryCommand command = 112; - } - required string Database = 1; - required string Name = 2; -} - -message CreateUserCommand { - extend Command { - optional CreateUserCommand command = 113; - } - required string Name = 1; - required string Hash = 2; - required bool Admin = 3; -} - -message DropUserCommand { - extend Command { - optional DropUserCommand command = 114; - } - required string Name = 1; -} - -message UpdateUserCommand { - extend Command { - optional UpdateUserCommand command = 115; - } - required string Name = 1; - required string Hash = 2; -} - -message SetPrivilegeCommand { - extend Command { - optional SetPrivilegeCommand command = 116; - } - required string Username = 1; - required string Database = 2; - required int32 Privilege = 3; -} - -message SetDataCommand { - extend Command { - optional SetDataCommand command = 117; - } - required Data Data = 1; -} - -message SetAdminPrivilegeCommand { - extend Command { - optional SetAdminPrivilegeCommand command = 118; - } - required string Username = 1; - required bool Admin = 2; -} - -message UpdateNodeCommand { - extend Command { - optional UpdateNodeCommand command = 119; - } - required uint64 ID = 1; - required string Host = 2; -} - -message CreateSubscriptionCommand { - extend Command { - optional CreateSubscriptionCommand command = 121; - } - required string Name = 1; - required string Database = 2; - required string RetentionPolicy = 3; - required string Mode = 4; - repeated string Destinations = 5; - -} - -message DropSubscriptionCommand { - extend Command { - optional DropSubscriptionCommand command = 122; - } - required string Name = 1; - required string Database = 2; - required string RetentionPolicy = 3; -} - -message RemovePeerCommand { - extend Command { - optional RemovePeerCommand command = 123; - } - optional uint64 ID = 1; - required string Addr = 2; -} - -message CreateMetaNodeCommand { - extend Command { - optional CreateMetaNodeCommand command = 124; - } - required string HTTPAddr = 1; - required string TCPAddr = 2; - required uint64 Rand = 3; -} - -message CreateDataNodeCommand { - extend Command { - optional CreateDataNodeCommand command = 125; - } - required string HTTPAddr = 1; - required string TCPAddr = 2; -} - -message UpdateDataNodeCommand { - extend Command { - optional UpdateDataNodeCommand command = 126; - } - required uint64 ID = 1; - required string Host = 2; - required string TCPHost = 3; -} - -message DeleteMetaNodeCommand { - extend Command { - optional DeleteMetaNodeCommand command = 127; - } - required uint64 ID = 1; -} - -message DeleteDataNodeCommand { - extend Command { - optional DeleteDataNodeCommand command = 128; - } - required uint64 ID = 1; -} - -message Response { - required bool OK = 1; - optional string Error = 2; - optional uint64 Index = 3; -} - -// SetMetaNodeCommand is for the initial metanode in a cluster or -// if the single host restarts and its hostname changes, this will update it -message SetMetaNodeCommand { - extend Command { - optional SetMetaNodeCommand command = 129; - } - required string HTTPAddr = 1; - required string TCPAddr = 2; - required uint64 Rand = 3; -} - -message DropShardCommand { - extend Command { - optional DropShardCommand command = 130; - } - required uint64 ID = 1; -} diff --git a/v1/services/meta/meta_test.go b/v1/services/meta/meta_test.go deleted file mode 100644 index 993b3b28048..00000000000 --- a/v1/services/meta/meta_test.go +++ /dev/null @@ -1,7 +0,0 @@ -package meta - -import "golang.org/x/crypto/bcrypt" - -func init() { - bcryptCost = bcrypt.MinCost -} diff --git a/v1/services/meta/query_authorizer.go b/v1/services/meta/query_authorizer.go deleted file mode 100644 index acf92a86631..00000000000 --- a/v1/services/meta/query_authorizer.go +++ /dev/null @@ -1,135 +0,0 @@ -package meta - -import ( - "fmt" - - "github.com/influxdata/influxql" -) - -// QueryAuthorizer determines whether a user is authorized to execute a given query. -type QueryAuthorizer struct { - Client *Client -} - -// NewQueryAuthorizer returns a new instance of QueryAuthorizer. -func NewQueryAuthorizer(c *Client) *QueryAuthorizer { - return &QueryAuthorizer{ - Client: c, - } -} - -// AuthorizeQuery authorizes u to execute q on database. -// Database can be "" for queries that do not require a database. -// If no user is provided it will return an error unless the query's first statement is to create -// a root user. -func (a *QueryAuthorizer) AuthorizeQuery(u User, query *influxql.Query, database string) error { - // Special case if no users exist. - if n := a.Client.UserCount(); n == 0 { - // Ensure there is at least one statement. - if len(query.Statements) > 0 { - // First statement in the query must create a user with admin privilege. - cu, ok := query.Statements[0].(*influxql.CreateUserStatement) - if ok && cu.Admin { - return nil - } - } - return &ErrAuthorize{ - Query: query, - Database: database, - Message: "create admin user first or disable authentication", - } - } - - if u == nil { - return &ErrAuthorize{ - Query: query, - Database: database, - Message: "no user provided", - } - } - - return u.AuthorizeQuery(database, query) -} - -func (a *QueryAuthorizer) AuthorizeDatabase(u User, priv influxql.Privilege, database string) error { - if u == nil { - return &ErrAuthorize{ - Database: database, - Message: "no user provided", - } - } - - if !u.AuthorizeDatabase(priv, database) { - return &ErrAuthorize{ - Database: database, - Message: fmt.Sprintf("user %q, requires %s for database %q", u.ID(), priv.String(), database), - } - } - - return nil -} - -func (u *UserInfo) AuthorizeQuery(database string, query *influxql.Query) error { - - // Admin privilege allows the user to execute all statements. - if u.Admin { - return nil - } - - // Check each statement in the query. - for _, stmt := range query.Statements { - // Get the privileges required to execute the statement. - privs, err := stmt.RequiredPrivileges() - if err != nil { - return err - } - - // Make sure the user has the privileges required to execute - // each statement. - for _, p := range privs { - if p.Admin { - // Admin privilege already checked so statement requiring admin - // privilege cannot be run. - return &ErrAuthorize{ - Query: query, - User: u.Name, - Database: database, - Message: fmt.Sprintf("statement '%s', requires admin privilege", stmt), - } - } - - // Use the db name specified by the statement or the db - // name passed by the caller if one wasn't specified by - // the statement. - db := p.Name - if db == "" { - db = database - } - if !u.AuthorizeDatabase(p.Privilege, db) { - return &ErrAuthorize{ - Query: query, - User: u.Name, - Database: database, - Message: fmt.Sprintf("statement '%s', requires %s on %s", stmt, p.Privilege.String(), db), - } - } - } - } - return nil -} - -// ErrAuthorize represents an authorization error. -type ErrAuthorize struct { - Query *influxql.Query - User string - Database string - Message string -} - -// Error returns the text of the error. -func (e ErrAuthorize) Error() string { - if e.User == "" { - return fmt.Sprint(e.Message) - } - return fmt.Sprintf("%s not authorized to execute %s", e.User, e.Message) -} diff --git a/v1/services/meta/write_authorizer.go b/v1/services/meta/write_authorizer.go deleted file mode 100644 index 51f3ebd038f..00000000000 --- a/v1/services/meta/write_authorizer.go +++ /dev/null @@ -1,29 +0,0 @@ -package meta - -import ( - "fmt" - - "github.com/influxdata/influxql" -) - -// WriteAuthorizer determines whether a user is authorized to write to a given database. -type WriteAuthorizer struct { - Client *Client -} - -// NewWriteAuthorizer returns a new instance of WriteAuthorizer. -func NewWriteAuthorizer(c *Client) *WriteAuthorizer { - return &WriteAuthorizer{Client: c} -} - -// AuthorizeWrite returns nil if the user has permission to write to the database. -func (a WriteAuthorizer) AuthorizeWrite(username, database string) error { - u, err := a.Client.User(username) - if err != nil || u == nil || !u.AuthorizeDatabase(influxql.WritePrivilege, database) { - return &ErrAuthorize{ - Database: database, - Message: fmt.Sprintf("%s not authorized to write to %s", username, database), - } - } - return nil -} diff --git a/v1/services/precreator/README.md b/v1/services/precreator/README.md deleted file mode 100644 index 8830b7310c4..00000000000 --- a/v1/services/precreator/README.md +++ /dev/null @@ -1,13 +0,0 @@ -Shard Precreation -============ - -During normal operation when InfluxDB receives time-series data, it writes the data to files known as _shards_. Each shard only contains data for a specific range of time. Therefore, before data can be accepted by the system, the shards must exist and InfluxDB always checks that the required shards exist for every incoming data point. If the required shards do not exist, InfluxDB will create those shards. Because this requires a cluster to reach consensus, the process is not instantaneous and can temporarily impact write-throughput. - -Since almost all time-series data is written sequentially in time, the system has an excellent idea of the timestamps of future data. Shard precreation takes advantage of this fact by creating required shards ahead of time, thereby ensuring the required shards exist by the time new time-series data actually arrives. Write-throughput is therefore not affected when data is first received for a range of time that would normally trigger shard creation. - -Note that the shard-existence check must remain in place in the code, even with shard precreation. This is because while most data is written sequentially in time, this is not always the case. Data may be written with timestamps in the past, or farther in the future than shard precreation handles. - -## Configuration -Shard precreation can be disabled if necessary, though this is not recommended. If it is disabled, then shards will be only be created when explicitly needed. - -The interval between runs of the shard precreation service, as well as the time-in-advance the shards are created, are also configurable. The defaults should work for most deployments. diff --git a/v1/services/precreator/config.go b/v1/services/precreator/config.go deleted file mode 100644 index 0ef7aae0225..00000000000 --- a/v1/services/precreator/config.go +++ /dev/null @@ -1,49 +0,0 @@ -package precreator - -import ( - "errors" - "time" - - "github.com/influxdata/influxdb/v2/toml" -) - -const ( - // DefaultCheckInterval is the shard precreation check time if none is specified. - DefaultCheckInterval = 10 * time.Minute - - // DefaultAdvancePeriod is the default period ahead of the endtime of a shard group - // that its successor group is created. - DefaultAdvancePeriod = 30 * time.Minute -) - -// Config represents the configuration for shard precreation. -type Config struct { - Enabled bool `toml:"enabled"` - CheckInterval toml.Duration `toml:"check-interval"` - AdvancePeriod toml.Duration `toml:"advance-period"` -} - -// NewConfig returns a new Config with defaults. -func NewConfig() Config { - return Config{ - Enabled: true, - CheckInterval: toml.Duration(DefaultCheckInterval), - AdvancePeriod: toml.Duration(DefaultAdvancePeriod), - } -} - -// Validate returns an error if the Config is invalid. -func (c Config) Validate() error { - if !c.Enabled { - return nil - } - - if c.CheckInterval <= 0 { - return errors.New("check-interval must be positive") - } - if c.AdvancePeriod <= 0 { - return errors.New("advance-period must be positive") - } - - return nil -} diff --git a/v1/services/precreator/config_test.go b/v1/services/precreator/config_test.go deleted file mode 100644 index 26860015887..00000000000 --- a/v1/services/precreator/config_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package precreator_test - -import ( - "testing" - "time" - - "github.com/BurntSushi/toml" - "github.com/influxdata/influxdb/v2/v1/services/precreator" -) - -func TestConfig_Parse(t *testing.T) { - // Parse configuration. - var c precreator.Config - if _, err := toml.Decode(` -enabled = true -check-interval = "2m" -advance-period = "10m" -`, &c); err != nil { - - t.Fatal(err) - } - - // Validate configuration. - if !c.Enabled { - t.Fatalf("unexpected enabled state: %v", c.Enabled) - } else if time.Duration(c.CheckInterval) != 2*time.Minute { - t.Fatalf("unexpected check interval: %s", c.CheckInterval) - } else if time.Duration(c.AdvancePeriod) != 10*time.Minute { - t.Fatalf("unexpected advance period: %s", c.AdvancePeriod) - } -} - -func TestConfig_Validate(t *testing.T) { - c := precreator.NewConfig() - if err := c.Validate(); err != nil { - t.Fatalf("unexpected validation fail from NewConfig: %s", err) - } - - c = precreator.NewConfig() - c.CheckInterval = 0 - if err := c.Validate(); err == nil { - t.Fatal("expected error for check-interval = 0, got nil") - } - - c = precreator.NewConfig() - c.CheckInterval *= -1 - if err := c.Validate(); err == nil { - t.Fatal("expected error for negative check-interval, got nil") - } - - c = precreator.NewConfig() - c.AdvancePeriod = 0 - if err := c.Validate(); err == nil { - t.Fatal("expected error for advance-period = 0, got nil") - } - - c = precreator.NewConfig() - c.AdvancePeriod *= -1 - if err := c.Validate(); err == nil { - t.Fatal("expected error for negative advance-period, got nil") - } - - c.Enabled = false - if err := c.Validate(); err != nil { - t.Fatalf("unexpected validation fail from disabled config: %s", err) - } -} diff --git a/v1/services/precreator/service.go b/v1/services/precreator/service.go deleted file mode 100644 index 28e8f165d37..00000000000 --- a/v1/services/precreator/service.go +++ /dev/null @@ -1,93 +0,0 @@ -// Package precreator provides the shard precreation service. -package precreator // import "github.com/influxdata/influxdb/v2/v1/services/precreator" - -import ( - "context" - "sync" - "time" - - "github.com/influxdata/influxdb/v2/logger" - "go.uber.org/zap" -) - -// Service manages the shard precreation service. -type Service struct { - checkInterval time.Duration - advancePeriod time.Duration - - Logger *zap.Logger - - cancel context.CancelFunc - wg sync.WaitGroup - - MetaClient interface { - PrecreateShardGroups(now, cutoff time.Time) error - } -} - -// NewService returns an instance of the precreation service. -func NewService(c Config) *Service { - return &Service{ - checkInterval: time.Duration(c.CheckInterval), - advancePeriod: time.Duration(c.AdvancePeriod), - Logger: zap.NewNop(), - } -} - -// WithLogger sets the logger for the service. -func (s *Service) WithLogger(log *zap.Logger) { - s.Logger = log.With(zap.String("service", "shard-precreation")) -} - -// Open starts the precreation service. -func (s *Service) Open(ctx context.Context) error { - if s.cancel != nil { - return nil - } - - s.Logger.Info("Starting precreation service", - logger.DurationLiteral("check_interval", s.checkInterval), - logger.DurationLiteral("advance_period", s.advancePeriod)) - - ctx, s.cancel = context.WithCancel(ctx) - - s.wg.Add(1) - go s.runPrecreation(ctx) - return nil -} - -// Close stops the precreation service. -func (s *Service) Close() error { - if s.cancel == nil { - return nil - } - - s.cancel() - s.wg.Wait() - s.cancel = nil - - return nil -} - -// runPrecreation continually checks if resources need precreation. -func (s *Service) runPrecreation(ctx context.Context) { - defer s.wg.Done() - - for { - select { - case <-time.After(s.checkInterval): - if err := s.precreate(time.Now().UTC()); err != nil { - s.Logger.Info("Failed to precreate shards", zap.Error(err)) - } - case <-ctx.Done(): - s.Logger.Info("Terminating precreation service") - return - } - } -} - -// precreate performs actual resource precreation. -func (s *Service) precreate(now time.Time) error { - cutoff := now.Add(s.advancePeriod).UTC() - return s.MetaClient.PrecreateShardGroups(now, cutoff) -} diff --git a/v1/services/precreator/service_test.go b/v1/services/precreator/service_test.go deleted file mode 100644 index 071d01b5734..00000000000 --- a/v1/services/precreator/service_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package precreator_test - -import ( - "context" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/internal" - "github.com/influxdata/influxdb/v2/toml" - "github.com/influxdata/influxdb/v2/v1/services/precreator" - "go.uber.org/zap/zaptest" -) - -func TestShardPrecreation(t *testing.T) { - done := make(chan struct{}) - precreate := false - - var mc internal.MetaClientMock - mc.PrecreateShardGroupsFn = func(now, cutoff time.Time) error { - if !precreate { - close(done) - precreate = true - } - return nil - } - - s := NewTestService(t) - s.MetaClient = &mc - - if err := s.Open(context.Background()); err != nil { - t.Fatalf("unexpected open error: %s", err) - } - defer s.Close() // double close should not cause a panic - - timer := time.NewTimer(100 * time.Millisecond) - select { - case <-done: - timer.Stop() - case <-timer.C: - t.Errorf("timeout exceeded while waiting for precreate") - } - - if err := s.Close(); err != nil { - t.Fatalf("unexpected close error: %s", err) - } -} - -func NewTestService(tb testing.TB) *precreator.Service { - tb.Helper() - - config := precreator.NewConfig() - config.CheckInterval = toml.Duration(10 * time.Millisecond) - - s := precreator.NewService(config) - s.WithLogger(zaptest.NewLogger(tb)) - return s -} diff --git a/v1/services/retention/config.go b/v1/services/retention/config.go deleted file mode 100644 index 63e0edf22c8..00000000000 --- a/v1/services/retention/config.go +++ /dev/null @@ -1,32 +0,0 @@ -package retention - -import ( - "errors" - "time" - - "github.com/influxdata/influxdb/v2/toml" -) - -// Config represents the configuration for the retention service. -type Config struct { - Enabled bool `toml:"enabled"` - CheckInterval toml.Duration `toml:"check-interval"` -} - -// NewConfig returns an instance of Config with defaults. -func NewConfig() Config { - return Config{Enabled: true, CheckInterval: toml.Duration(30 * time.Minute)} -} - -// Validate returns an error if the Config is invalid. -func (c Config) Validate() error { - if !c.Enabled { - return nil - } - - if c.CheckInterval <= 0 { - return errors.New("check-interval must be positive") - } - - return nil -} diff --git a/v1/services/retention/config_test.go b/v1/services/retention/config_test.go deleted file mode 100644 index a31c5622f09..00000000000 --- a/v1/services/retention/config_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package retention_test - -import ( - "testing" - "time" - - "github.com/BurntSushi/toml" - "github.com/influxdata/influxdb/v2/v1/services/retention" -) - -func TestConfig_Parse(t *testing.T) { - // Parse configuration. - var c retention.Config - if _, err := toml.Decode(` -enabled = true -check-interval = "1s" -`, &c); err != nil { - t.Fatal(err) - } - - // Validate configuration. - if !c.Enabled { - t.Fatalf("unexpected enabled state: %v", c.Enabled) - } else if time.Duration(c.CheckInterval) != time.Second { - t.Fatalf("unexpected check interval: %v", c.CheckInterval) - } -} - -func TestConfig_Validate(t *testing.T) { - c := retention.NewConfig() - if err := c.Validate(); err != nil { - t.Fatalf("unexpected validation fail from NewConfig: %s", err) - } - - c = retention.NewConfig() - c.CheckInterval = 0 - if err := c.Validate(); err == nil { - t.Fatal("expected error for check-interval = 0, got nil") - } - - c = retention.NewConfig() - c.CheckInterval *= -1 - if err := c.Validate(); err == nil { - t.Fatal("expected error for negative check-interval, got nil") - } - - c.Enabled = false - if err := c.Validate(); err != nil { - t.Fatalf("unexpected validation fail from disabled config: %s", err) - } -} diff --git a/v1/services/retention/service.go b/v1/services/retention/service.go deleted file mode 100644 index b263e3b7a90..00000000000 --- a/v1/services/retention/service.go +++ /dev/null @@ -1,199 +0,0 @@ -// Package retention provides the retention policy enforcement service. -package retention // import "github.com/influxdata/influxdb/services/retention" - -import ( - "context" - "sync" - "time" - - "github.com/influxdata/influxdb/v2/logger" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/zap" -) - -// Service represents the retention policy enforcement service. -type Service struct { - MetaClient interface { - Databases() []meta.DatabaseInfo - DeleteShardGroup(database, policy string, id uint64) error - PruneShardGroups() error - } - TSDBStore interface { - ShardIDs() []uint64 - DeleteShard(shardID uint64) error - } - - config Config - wg sync.WaitGroup - cancel context.CancelFunc - - logger *zap.Logger -} - -// NewService returns a configured retention policy enforcement service. -func NewService(c Config) *Service { - return &Service{ - config: c, - logger: zap.NewNop(), - } -} - -// Open starts retention policy enforcement. -func (s *Service) Open(ctx context.Context) error { - if !s.config.Enabled || s.cancel != nil { - return nil - } - - s.logger.Info("Starting retention policy enforcement service", - logger.DurationLiteral("check_interval", time.Duration(s.config.CheckInterval))) - - ctx, s.cancel = context.WithCancel(ctx) - - s.wg.Add(1) - go func() { - defer s.wg.Done() - s.run(ctx) - }() - return nil -} - -// Close stops retention policy enforcement. -func (s *Service) Close() error { - if !s.config.Enabled || s.cancel == nil { - return nil - } - - s.logger.Info("Closing retention policy enforcement service") - s.cancel() - - s.wg.Wait() - - s.cancel = nil - - return nil -} - -// WithLogger sets the logger on the service. -func (s *Service) WithLogger(log *zap.Logger) { - s.logger = log.With(zap.String("service", "retention")) -} - -var globalRetentionMetrics = newRetentionMetrics() - -const storageNamespace = "storage" -const retentionSubsystem = "retention" - -type retentionMetrics struct { - checkDuration prometheus.Histogram -} - -func newRetentionMetrics() *retentionMetrics { - return &retentionMetrics{ - checkDuration: prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: storageNamespace, - Subsystem: retentionSubsystem, - Name: "check_duration", - Help: "Histogram of duration of retention check (in seconds)", - }), - } -} - -func PrometheusCollectors() []prometheus.Collector { - return []prometheus.Collector{ - globalRetentionMetrics.checkDuration, - } -} - -func (s *Service) run(ctx context.Context) { - ticker := time.NewTicker(time.Duration(s.config.CheckInterval)) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - - case <-ticker.C: - startTime := time.Now() - log, logEnd := logger.NewOperation(context.Background(), s.logger, "Retention policy deletion check", "retention_delete_check") - - type deletionInfo struct { - db string - rp string - } - deletedShardIDs := make(map[uint64]deletionInfo) - - // Mark down if an error occurred during this function so we can inform the - // user that we will try again on the next interval. - // Without the message, they may see the error message and assume they - // have to do it manually. - var retryNeeded bool - dbs := s.MetaClient.Databases() - for _, d := range dbs { - for _, r := range d.RetentionPolicies { - // Build list of already deleted shards. - for _, g := range r.DeletedShardGroups() { - for _, sh := range g.Shards { - deletedShardIDs[sh.ID] = deletionInfo{db: d.Name, rp: r.Name} - } - } - - // Determine all shards that have expired and need to be deleted. - for _, g := range r.ExpiredShardGroups(time.Now().UTC()) { - if err := s.MetaClient.DeleteShardGroup(d.Name, r.Name, g.ID); err != nil { - log.Info("Failed to delete shard group", - logger.Database(d.Name), - logger.ShardGroup(g.ID), - logger.RetentionPolicy(r.Name), - zap.Error(err)) - retryNeeded = true - continue - } - - log.Info("Deleted shard group", - logger.Database(d.Name), - logger.ShardGroup(g.ID), - logger.RetentionPolicy(r.Name)) - - // Store all the shard IDs that may possibly need to be removed locally. - for _, sh := range g.Shards { - deletedShardIDs[sh.ID] = deletionInfo{db: d.Name, rp: r.Name} - } - } - } - } - - // Remove shards if we store them locally - for _, id := range s.TSDBStore.ShardIDs() { - if info, ok := deletedShardIDs[id]; ok { - if err := s.TSDBStore.DeleteShard(id); err != nil { - log.Info("Failed to delete shard", - logger.Database(info.db), - logger.Shard(id), - logger.RetentionPolicy(info.rp), - zap.Error(err)) - retryNeeded = true - continue - } - log.Info("Deleted shard", - logger.Database(info.db), - logger.Shard(id), - logger.RetentionPolicy(info.rp)) - } - } - - if err := s.MetaClient.PruneShardGroups(); err != nil { - log.Info("Problem pruning shard groups", zap.Error(err)) - retryNeeded = true - } - - if retryNeeded { - log.Info("One or more errors occurred during shard deletion and will be retried on the next check", logger.DurationLiteral("check_interval", time.Duration(s.config.CheckInterval))) - } - - logEnd() - elapsed := time.Since(startTime) - globalRetentionMetrics.checkDuration.Observe(elapsed.Seconds()) - } - } -} diff --git a/v1/services/retention/service_test.go b/v1/services/retention/service_test.go deleted file mode 100644 index 3cbb7c6103e..00000000000 --- a/v1/services/retention/service_test.go +++ /dev/null @@ -1,405 +0,0 @@ -package retention_test - -import ( - "context" - "fmt" - "reflect" - "sync" - "testing" - "time" - - "github.com/influxdata/influxdb/v2/internal" - "github.com/influxdata/influxdb/v2/toml" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/influxdata/influxdb/v2/v1/services/retention" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "go.uber.org/zap/zaptest/observer" -) - -func TestService_OpenDisabled(t *testing.T) { - // Opening a disabled service should be a no-op. - c := retention.NewConfig() - c.Enabled = false - s := NewService(t, c) - - if err := s.Open(context.Background()); err != nil { - t.Fatal(err) - } - - if s.LogBuf.Len() > 0 { - t.Fatalf("service logged %q, didn't expect any logging", s.LogBuf.All()) - } -} - -func TestService_OpenClose(t *testing.T) { - // Opening a disabled service should be a no-op. - s := NewService(t, retention.NewConfig()) - - ctx := context.Background() - if err := s.Open(ctx); err != nil { - t.Fatal(err) - } - - if s.LogBuf.Len() == 0 { - t.Fatal("service didn't log anything on open") - } - - // Reopening is a no-op - if err := s.Open(ctx); err != nil { - t.Fatal(err) - } - - if err := s.Close(); err != nil { - t.Fatal(err) - } - - // Re-closing is a no-op - if err := s.Close(); err != nil { - t.Fatal(err) - } -} - -func TestService_CheckShards(t *testing.T) { - now := time.Now() - // Account for any time difference that could cause some of the logic in - // this test to fail due to a race condition. If we are at the very end of - // the hour, we can choose a time interval based on one "now" time and then - // run the retention service in the next hour. If we're in one of those - // situations, wait 100 milliseconds until we're in the next hour. - if got, want := now.Add(100*time.Millisecond).Truncate(time.Hour), now.Truncate(time.Hour); !got.Equal(want) { - time.Sleep(100 * time.Millisecond) - } - - data := []meta.DatabaseInfo{ - { - Name: "db0", - - DefaultRetentionPolicy: "rp0", - RetentionPolicies: []meta.RetentionPolicyInfo{ - { - Name: "rp0", - ReplicaN: 1, - Duration: time.Hour, - ShardGroupDuration: time.Hour, - ShardGroups: []meta.ShardGroupInfo{ - { - ID: 1, - StartTime: now.Truncate(time.Hour).Add(-2 * time.Hour), - EndTime: now.Truncate(time.Hour).Add(-1 * time.Hour), - Shards: []meta.ShardInfo{ - {ID: 2}, - {ID: 3}, - }, - }, - { - ID: 4, - StartTime: now.Truncate(time.Hour).Add(-1 * time.Hour), - EndTime: now.Truncate(time.Hour), - Shards: []meta.ShardInfo{ - {ID: 5}, - {ID: 6}, - }, - }, - { - ID: 7, - StartTime: now.Truncate(time.Hour), - EndTime: now.Truncate(time.Hour).Add(time.Hour), - Shards: []meta.ShardInfo{ - {ID: 8}, - {ID: 9}, - }, - }, - }, - }, - }, - }, - } - - config := retention.NewConfig() - config.CheckInterval = toml.Duration(10 * time.Millisecond) - s := NewService(t, config) - s.MetaClient.DatabasesFn = func() []meta.DatabaseInfo { - return data - } - - done := make(chan struct{}) - deletedShardGroups := make(map[string]struct{}) - s.MetaClient.DeleteShardGroupFn = func(database, policy string, id uint64) error { - for _, dbi := range data { - if dbi.Name == database { - for _, rpi := range dbi.RetentionPolicies { - if rpi.Name == policy { - for i, sg := range rpi.ShardGroups { - if sg.ID == id { - rpi.ShardGroups[i].DeletedAt = time.Now().UTC() - } - } - } - } - } - } - - deletedShardGroups[fmt.Sprintf("%s.%s.%d", database, policy, id)] = struct{}{} - if got, want := deletedShardGroups, map[string]struct{}{ - "db0.rp0.1": struct{}{}, - }; reflect.DeepEqual(got, want) { - close(done) - } else if len(got) > 1 { - t.Errorf("deleted too many shard groups") - } - return nil - } - - pruned := false - closing := make(chan struct{}) - s.MetaClient.PruneShardGroupsFn = func() error { - select { - case <-done: - if !pruned { - close(closing) - pruned = true - } - default: - } - return nil - } - - deletedShards := make(map[uint64]struct{}) - s.TSDBStore.ShardIDsFn = func() []uint64 { - return []uint64{2, 3, 5, 6} - } - s.TSDBStore.DeleteShardFn = func(shardID uint64) error { - deletedShards[shardID] = struct{}{} - return nil - } - - if err := s.Open(context.Background()); err != nil { - t.Fatalf("unexpected open error: %s", err) - } - defer func() { - if err := s.Close(); err != nil { - t.Fatalf("unexpected close error: %s", err) - } - }() - - timer := time.NewTimer(100 * time.Millisecond) - select { - case <-done: - timer.Stop() - case <-timer.C: - t.Errorf("timeout waiting for shard groups to be deleted") - return - } - - timer = time.NewTimer(100 * time.Millisecond) - select { - case <-closing: - timer.Stop() - case <-timer.C: - t.Errorf("timeout waiting for shards to be deleted") - return - } - - if got, want := deletedShards, map[uint64]struct{}{ - 2: struct{}{}, - 3: struct{}{}, - }; !reflect.DeepEqual(got, want) { - t.Errorf("unexpected deleted shards: got=%#v want=%#v", got, want) - } -} - -// This reproduces https://github.com/influxdata/influxdb/issues/8819 -func TestService_8819_repro(t *testing.T) { - for i := 0; i < 1000; i++ { - s, errC, done := testService_8819_repro(t) - - if err := s.Open(context.Background()); err != nil { - t.Fatal(err) - } - - // Wait for service to run one sweep of all dbs/rps/shards. - if err := <-errC; err != nil { - t.Fatalf("%dth iteration: %v", i, err) - } - // Mark that we do not expect more errors in case it runs one more time. - close(done) - - if err := s.Close(); err != nil { - t.Fatal(err) - } - } -} - -func testService_8819_repro(t *testing.T) (*Service, chan error, chan struct{}) { - c := retention.NewConfig() - c.CheckInterval = toml.Duration(time.Millisecond) - s := NewService(t, c) - errC := make(chan error, 1) // Buffer Important to prevent deadlock. - done := make(chan struct{}) - - // A database and a bunch of shards - var mu sync.Mutex - shards := []uint64{3, 5, 8, 9, 11, 12} - localShards := []uint64{3, 5, 8, 9, 11, 12} - databases := []meta.DatabaseInfo{ - { - Name: "db0", - RetentionPolicies: []meta.RetentionPolicyInfo{ - { - Name: "autogen", - Duration: 24 * time.Hour, - ShardGroupDuration: 24 * time.Hour, - ShardGroups: []meta.ShardGroupInfo{ - { - ID: 1, - StartTime: time.Date(1980, 1, 1, 0, 0, 0, 0, time.UTC), - EndTime: time.Date(1981, 1, 1, 0, 0, 0, 0, time.UTC), - Shards: []meta.ShardInfo{ - {ID: 3}, {ID: 9}, - }, - }, - { - ID: 2, - StartTime: time.Now().Add(-1 * time.Hour), - EndTime: time.Now(), - DeletedAt: time.Now(), - Shards: []meta.ShardInfo{ - {ID: 11}, {ID: 12}, - }, - }, - }, - }, - }, - }, - } - - sendError := func(err error) { - select { - case errC <- err: - case <-done: - } - } - - s.MetaClient.DatabasesFn = func() []meta.DatabaseInfo { - mu.Lock() - defer mu.Unlock() - return databases - } - - s.MetaClient.DeleteShardGroupFn = func(database string, policy string, id uint64) error { - if database != "db0" { - sendError(fmt.Errorf("wrong db name: %s", database)) - return nil - } else if policy != "autogen" { - sendError(fmt.Errorf("wrong rp name: %s", policy)) - return nil - } else if id != 1 { - sendError(fmt.Errorf("wrong shard group id: %d", id)) - return nil - } - - // remove the associated shards (3 and 9) from the shards slice... - mu.Lock() - newShards := make([]uint64, 0, len(shards)) - for _, sid := range shards { - if sid != 3 && sid != 9 { - newShards = append(newShards, sid) - } - } - shards = newShards - databases[0].RetentionPolicies[0].ShardGroups[0].DeletedAt = time.Now().UTC() - mu.Unlock() - return nil - } - - s.MetaClient.PruneShardGroupsFn = func() error { - // When this is called all shards that have been deleted from the meta - // store (expired) should also have been deleted from disk. - // If they haven't then that indicates that shards can be removed from - // the meta store and there can be a race where they haven't yet been - // removed from the local disk and indexes. This has an impact on, for - // example, the max series per database limit. - - mu.Lock() - defer mu.Unlock() - for _, lid := range localShards { - var found bool - for _, mid := range shards { - if lid == mid { - found = true - break - } - } - - if !found { - sendError(fmt.Errorf("local shard %d present, yet it's missing from meta store. %v -- %v ", lid, shards, localShards)) - return nil - } - } - - // We should have removed shards 3 and 9 - if !reflect.DeepEqual(localShards, []uint64{5, 8}) { - sendError(fmt.Errorf("removed shards still present locally: %v", localShards)) - return nil - } - sendError(nil) - return nil - } - - s.TSDBStore.ShardIDsFn = func() []uint64 { - mu.Lock() - defer mu.Unlock() - return localShards - } - - s.TSDBStore.DeleteShardFn = func(id uint64) error { - var found bool - mu.Lock() - newShards := make([]uint64, 0, len(localShards)) - for _, sid := range localShards { - if sid != id { - newShards = append(newShards, sid) - } else { - found = true - } - } - localShards = newShards - mu.Unlock() - - if !found { - return fmt.Errorf("shard %d not found locally", id) - } - return nil - } - - return s, errC, done -} - -type Service struct { - MetaClient *internal.MetaClientMock - TSDBStore *internal.TSDBStoreMock - - LogBuf *observer.ObservedLogs - *retention.Service -} - -func NewService(tb testing.TB, c retention.Config) *Service { - tb.Helper() - - s := &Service{ - MetaClient: &internal.MetaClientMock{}, - TSDBStore: &internal.TSDBStoreMock{}, - Service: retention.NewService(c), - } - - logcore, logbuf := observer.New(zapcore.InfoLevel) - log := zap.New(logcore) - - s.LogBuf = logbuf - s.WithLogger(log) - - s.Service.MetaClient = s.MetaClient - s.Service.TSDBStore = s.TSDBStore - return s -} diff --git a/v1/services/storage/context.go b/v1/services/storage/context.go deleted file mode 100644 index 24d2b2b25e2..00000000000 --- a/v1/services/storage/context.go +++ /dev/null @@ -1,29 +0,0 @@ -package storage - -import ( - "context" -) - -type key int - -const ( - readOptionsKey key = iota -) - -// ReadOptions are additional options that may be passed with context.Context -// to configure the behavior of a storage read request. -type ReadOptions struct { - NodeID uint64 -} - -// NewContextWithRequestOptions returns a new Context with nodeID added. -func NewContextWithReadOptions(ctx context.Context, opts *ReadOptions) context.Context { - return context.WithValue(ctx, readOptionsKey, opts) -} - -// ReadOptionsFromContext returns the ReadOptions associated with the context -// or nil if no additional options have been specified. -func ReadOptionsFromContext(ctx context.Context) *ReadOptions { - opts, _ := ctx.Value(readOptionsKey).(*ReadOptions) - return opts -} diff --git a/v1/services/storage/gen.go b/v1/services/storage/gen.go deleted file mode 100644 index 6fb61f4c5d9..00000000000 --- a/v1/services/storage/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -package storage - -//go:generate protoc --go_out=. source.proto diff --git a/v1/services/storage/predicate_influxql.go b/v1/services/storage/predicate_influxql.go deleted file mode 100644 index 66b72df3dc5..00000000000 --- a/v1/services/storage/predicate_influxql.go +++ /dev/null @@ -1,91 +0,0 @@ -package storage - -import ( - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxql" -) - -var measurementRemap = map[string]string{ - "_measurement": "_name", - models.MeasurementTagKey: "_name", - models.FieldKeyTagKey: "_field", -} - -func RewriteExprRemoveFieldKeyAndValue(expr influxql.Expr) influxql.Expr { - return influxql.RewriteExpr(expr, func(expr influxql.Expr) influxql.Expr { - if be, ok := expr.(*influxql.BinaryExpr); ok { - if ref, ok := be.LHS.(*influxql.VarRef); ok { - if ref.Val == "_field" || ref.Val == "$" { - return &influxql.BooleanLiteral{Val: true} - } - } - } - - return expr - }) -} - -type hasRefs struct { - refs []string - found []bool -} - -func (v *hasRefs) allFound() bool { - for _, val := range v.found { - if !val { - return false - } - } - return true -} - -func (v *hasRefs) Visit(node influxql.Node) influxql.Visitor { - if v.allFound() { - return nil - } - - if n, ok := node.(*influxql.VarRef); ok { - for i, r := range v.refs { - if !v.found[i] && r == n.Val { - v.found[i] = true - if v.allFound() { - return nil - } - } - } - } - return v -} - -func HasFieldKeyOrValue(expr influxql.Expr) (bool, bool) { - refs := hasRefs{refs: []string{fieldKey, "$"}, found: make([]bool, 2)} - influxql.Walk(&refs, expr) - return refs.found[0], refs.found[1] -} - -type hasAnyTagKeys struct { - found bool -} - -func (v *hasAnyTagKeys) Visit(node influxql.Node) influxql.Visitor { - if v.found { - return nil - } - - if n, ok := node.(*influxql.VarRef); ok { - // The influxql expression will have had references to "_measurement" - // remapped to "_name" at this point by reads.NodeToExpr, so be sure to - // check for the appropriate value here using the measurementRemap map. - if n.Val != fieldKey && n.Val != measurementRemap[measurementKey] && n.Val != "$" { - v.found = true - return nil - } - } - return v -} - -func hasTagKey(expr influxql.Expr) bool { - v := &hasAnyTagKeys{} - influxql.Walk(v, expr) - return v.found -} diff --git a/v1/services/storage/predicate_test.go b/v1/services/storage/predicate_test.go deleted file mode 100644 index c51cd093919..00000000000 --- a/v1/services/storage/predicate_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package storage_test - -import ( - "testing" - - "github.com/influxdata/influxdb/v2/pkg/testing/assert" - "github.com/influxdata/influxdb/v2/storage/reads" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/influxdata/influxdb/v2/v1/services/storage" - "github.com/influxdata/influxql" -) - -func TestRewriteExprRemoveFieldKeyAndValue(t *testing.T) { - node := &datatypes.Node{ - NodeType: datatypes.Node_TypeLogicalExpression, - Value: &datatypes.Node_Logical_{Logical: datatypes.Node_LogicalAnd}, - Children: []*datatypes.Node{ - { - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: datatypes.Node_ComparisonEqual}, - Children: []*datatypes.Node{ - {NodeType: datatypes.Node_TypeTagRef, Value: &datatypes.Node_TagRefValue{TagRefValue: "host"}}, - {NodeType: datatypes.Node_TypeLiteral, Value: &datatypes.Node_StringValue{StringValue: "host1"}}, - }, - }, - { - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: datatypes.Node_ComparisonRegex}, - Children: []*datatypes.Node{ - {NodeType: datatypes.Node_TypeTagRef, Value: &datatypes.Node_TagRefValue{TagRefValue: "_field"}}, - {NodeType: datatypes.Node_TypeLiteral, Value: &datatypes.Node_RegexValue{RegexValue: "^us-west"}}, - }, - }, - { - NodeType: datatypes.Node_TypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: datatypes.Node_ComparisonEqual}, - Children: []*datatypes.Node{ - {NodeType: datatypes.Node_TypeFieldRef, Value: &datatypes.Node_FieldRefValue{FieldRefValue: "$"}}, - {NodeType: datatypes.Node_TypeLiteral, Value: &datatypes.Node_FloatValue{FloatValue: 0.5}}, - }, - }, - }, - } - - expr, err := reads.NodeToExpr(node, nil) - assert.NoError(t, err, "NodeToExpr failed") - assert.Equal(t, expr.String(), `host::tag = 'host1' AND _field::tag =~ /^us-west/ AND "$" = 0.500`) - - expr = storage.RewriteExprRemoveFieldKeyAndValue(expr) - assert.Equal(t, expr.String(), `host::tag = 'host1' AND true AND true`) - - expr = influxql.Reduce(expr, mapValuer{"host": "host1"}) - assert.Equal(t, expr.String(), `true`) -} - -type mapValuer map[string]string - -var _ influxql.Valuer = mapValuer(nil) - -func (vs mapValuer) Value(key string) (interface{}, bool) { - v, ok := vs[key] - return v, ok -} diff --git a/v1/services/storage/series_cursor.go b/v1/services/storage/series_cursor.go deleted file mode 100644 index 522f3942db4..00000000000 --- a/v1/services/storage/series_cursor.go +++ /dev/null @@ -1,217 +0,0 @@ -package storage - -import ( - "context" - "sort" - - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/slices" - "github.com/influxdata/influxdb/v2/storage/reads" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxql" - opentracing "github.com/opentracing/opentracing-go" -) - -const ( - measurementKey = "_measurement" - fieldKey = "_field" -) - -var ( - measurementKeyBytes = []byte(measurementKey) - fieldKeyBytes = []byte(fieldKey) -) - -type indexSeriesCursor struct { - sqry tsdb.SeriesCursor - fields measurementFields - nf []field - field field - err error - tags models.Tags - cond influxql.Expr - measurementCond influxql.Expr - row reads.SeriesRow - eof bool - hasFieldExpr bool - hasValueExpr bool -} - -func newIndexSeriesCursor(ctx context.Context, predicate *datatypes.Predicate, shards []*tsdb.Shard) (*indexSeriesCursor, error) { - var expr influxql.Expr - if root := predicate.GetRoot(); root != nil { - var err error - if expr, err = reads.NodeToExpr(root, measurementRemap); err != nil { - return nil, err - } - } - - return newIndexSeriesCursorInfluxQLPred(ctx, expr, shards) -} - -func newIndexSeriesCursorInfluxQLPred(ctx context.Context, predicate influxql.Expr, shards []*tsdb.Shard) (*indexSeriesCursor, error) { - queries, err := tsdb.CreateCursorIterators(ctx, shards) - if err != nil { - return nil, err - } - - if queries == nil { - return nil, nil - } - - span := opentracing.SpanFromContext(ctx) - if span != nil { - span = opentracing.StartSpan("index_cursor.create", opentracing.ChildOf(span.Context())) - defer span.Finish() - } - - opt := query.IteratorOptions{ - Aux: []influxql.VarRef{{Val: "key"}}, - Authorizer: query.OpenAuthorizer, - Ascending: true, - Ordered: true, - } - p := &indexSeriesCursor{row: reads.SeriesRow{Query: queries}} - - if predicate != nil { - p.cond = predicate - - p.hasFieldExpr, p.hasValueExpr = HasFieldKeyOrValue(p.cond) - if !(p.hasFieldExpr || p.hasValueExpr) { - p.measurementCond = p.cond - opt.Condition = p.cond - } else { - p.measurementCond = influxql.Reduce(reads.RewriteExprRemoveFieldValue(influxql.CloneExpr(p.cond)), nil) - if reads.IsTrueBooleanLiteral(p.measurementCond) { - p.measurementCond = nil - } - - opt.Condition = influxql.Reduce(RewriteExprRemoveFieldKeyAndValue(influxql.CloneExpr(p.cond)), nil) - if reads.IsTrueBooleanLiteral(opt.Condition) { - opt.Condition = nil - } - } - } - - sg := tsdb.Shards(shards) - if mfkeys, err := sg.FieldKeysByPredicate(opt.Condition); err == nil { - p.fields = make(map[string][]field, len(mfkeys)) - measurementNamesForFields := []string{} - for name, fkeys := range mfkeys { - fields := make([]field, 0, len(fkeys)) - for _, key := range fkeys { - fields = append(fields, field{n: key, nb: []byte(key)}) - } - p.fields[name] = fields - measurementNamesForFields = append(measurementNamesForFields, name) - } - - sort.Strings(measurementNamesForFields) - mitr := tsdb.NewMeasurementSliceIterator(slices.StringsToBytes(measurementNamesForFields...)) - p.sqry, err = sg.CreateSeriesCursor(ctx, tsdb.SeriesCursorRequest{Measurements: mitr}, opt.Condition) - if p.sqry != nil && err == nil { - return p, nil - } - } - - p.Close() - return nil, err -} - -func (c *indexSeriesCursor) Close() { - if !c.eof { - c.eof = true - if c.sqry != nil { - c.sqry.Close() - c.sqry = nil - } - } -} - -func copyTags(dst, src models.Tags) models.Tags { - if cap(dst) < src.Len() { - dst = make(models.Tags, src.Len()) - } else { - dst = dst[:src.Len()] - } - copy(dst, src) - return dst -} - -func (c *indexSeriesCursor) Next() *reads.SeriesRow { - if c.eof { - return nil - } - - for { - if len(c.nf) == 0 { - // next series key - sr, err := c.sqry.Next() - if err != nil { - c.err = err - c.Close() - return nil - } else if sr == nil { - c.Close() - return nil - } - - c.row.Name = sr.Name - c.row.SeriesTags = sr.Tags - c.tags = copyTags(c.tags, sr.Tags) - c.tags.Set(measurementKeyBytes, sr.Name) - - c.nf = c.fields[string(sr.Name)] - // c.nf may be nil if there are no fields - } else { - c.field, c.nf = c.nf[0], c.nf[1:] - - if c.measurementCond == nil || reads.EvalExprBool(c.measurementCond, c) { - break - } - } - } - - c.tags.Set(fieldKeyBytes, c.field.nb) - c.row.Field = c.field.n - - if c.cond != nil && c.hasValueExpr { - // TODO(sgc): lazily evaluate valueCond - c.row.ValueCond = influxql.Reduce(c.cond, c) - if reads.IsTrueBooleanLiteral(c.row.ValueCond) { - // we've reduced the expression to "true" - c.row.ValueCond = nil - } - } - - c.row.Tags = copyTags(c.row.Tags, c.tags) - - return &c.row -} - -func (c *indexSeriesCursor) Value(key string) (interface{}, bool) { - switch key { - case "_name": - return string(c.row.Name), true - case fieldKey: - return c.field.n, true - case "$": - return nil, false - default: - res := c.row.SeriesTags.GetString(key) - return res, true - } -} - -func (c *indexSeriesCursor) Err() error { - return c.err -} - -type measurementFields map[string][]field - -type field struct { - n string - nb []byte -} diff --git a/v1/services/storage/series_cursor_test.go b/v1/services/storage/series_cursor_test.go deleted file mode 100644 index bacf47210f7..00000000000 --- a/v1/services/storage/series_cursor_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package storage - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxql" -) - -func exprEqual(x, y influxql.Expr) bool { - if x == nil { - return y == nil - } - - if y == nil { - return false - } - - return x.String() == y.String() -} - -func TestSeriesCursorValuer(t *testing.T) { - tests := []struct { - n string - m string - expr string - exp string - }{ - { - n: "equals name", - m: "cpu,_field=foo", - expr: `"_name"::tag = 'cpu' AND "$"::tag = 3`, - exp: `"$"::tag = 3`, - }, - { - n: "not equals name", - m: "cpu,_field=foo", - expr: `"_name"::tag = 'mem' AND "$"::tag = 3`, - exp: `false`, - }, - { - n: "equals tag", - m: "cpu,_field=foo,tag0=val0", - expr: `"tag0"::tag = 'val0' AND "$"::tag = 3`, - exp: `"$"::tag = 3`, - }, - { - n: "not equals tag", - m: "cpu,_field=foo,tag0=val0", - expr: `"tag0"::tag = 'val1' AND "$"::tag = 3`, - exp: `false`, - }, - { - n: "missing tag", - m: "cpu,_field=foo,tag0=val0", - expr: `"tag1"::tag = 'val1' AND "$"::tag = 3`, - exp: `false`, - }, - { - n: "equals field", - m: "cpu,_field=foo,tag0=val0", - expr: `"tag0"::tag = 'val1' AND "$"::tag = 3`, - exp: `false`, - }, - { - n: "not equals field", - m: "cpu,_field=foo,tag0=val0", - expr: `"_field"::tag = 'bar' AND "$"::tag = 3`, - exp: `false`, - }, - } - - for _, tc := range tests { - t.Run(tc.n, func(t *testing.T) { - var sc indexSeriesCursor - sc.row.Name, sc.row.SeriesTags = models.ParseKeyBytes([]byte(tc.m)) - sc.field.n = sc.row.SeriesTags.GetString(fieldKey) - sc.row.SeriesTags.Delete(fieldKeyBytes) - - expr, err := influxql.ParseExpr(tc.expr) - if err != nil { - t.Fatalf("unable to parse input expression %q, %v", tc.expr, err) - } - exp, err := influxql.ParseExpr(tc.exp) - if err != nil { - t.Fatalf("unable to parse expected expression %q, %v", tc.exp, err) - } - - if got := influxql.Reduce(expr, &sc); !cmp.Equal(got, exp, cmp.Comparer(exprEqual)) { - t.Errorf("unexpected result from Reduce, -got/+exp\n%s", cmp.Diff(got, exp)) - } - }) - } -} diff --git a/v1/services/storage/source.go b/v1/services/storage/source.go deleted file mode 100644 index e4158cf5f75..00000000000 --- a/v1/services/storage/source.go +++ /dev/null @@ -1,18 +0,0 @@ -package storage - -import ( - "errors" - - "google.golang.org/protobuf/types/known/anypb" -) - -func GetReadSource(any *anypb.Any) (*ReadSource, error) { - if any == nil { - return nil, errors.New("reque") - } - var source ReadSource - if err := any.UnmarshalTo(&source); err != nil { - return nil, err - } - return &source, nil -} diff --git a/v1/services/storage/source.pb.go b/v1/services/storage/source.pb.go deleted file mode 100644 index d459a7812f5..00000000000 --- a/v1/services/storage/source.pb.go +++ /dev/null @@ -1,156 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.27.1 -// protoc v3.17.3 -// source: source.proto - -package storage - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type ReadSource struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // OrgID specifies the organization identifier for this request. - OrgID uint64 `protobuf:"varint,1,opt,name=OrgID,proto3" json:"OrgID,omitempty"` - // BucketID specifies the bucket in the organization. - BucketID uint64 `protobuf:"varint,2,opt,name=BucketID,proto3" json:"BucketID,omitempty"` -} - -func (x *ReadSource) Reset() { - *x = ReadSource{} - if protoimpl.UnsafeEnabled { - mi := &file_source_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReadSource) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReadSource) ProtoMessage() {} - -func (x *ReadSource) ProtoReflect() protoreflect.Message { - mi := &file_source_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReadSource.ProtoReflect.Descriptor instead. -func (*ReadSource) Descriptor() ([]byte, []int) { - return file_source_proto_rawDescGZIP(), []int{0} -} - -func (x *ReadSource) GetOrgID() uint64 { - if x != nil { - return x.OrgID - } - return 0 -} - -func (x *ReadSource) GetBucketID() uint64 { - if x != nil { - return x.BucketID - } - return 0 -} - -var File_source_proto protoreflect.FileDescriptor - -var file_source_proto_rawDesc = []byte{ - 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x2f, - 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, - 0x78, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x62, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, - 0x3e, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x4f, 0x72, 0x67, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x4f, 0x72, - 0x67, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x44, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x44, 0x42, - 0x0b, 0x5a, 0x09, 0x2e, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_source_proto_rawDescOnce sync.Once - file_source_proto_rawDescData = file_source_proto_rawDesc -) - -func file_source_proto_rawDescGZIP() []byte { - file_source_proto_rawDescOnce.Do(func() { - file_source_proto_rawDescData = protoimpl.X.CompressGZIP(file_source_proto_rawDescData) - }) - return file_source_proto_rawDescData -} - -var file_source_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_source_proto_goTypes = []interface{}{ - (*ReadSource)(nil), // 0: com.github.influxdata.influxdb.services.storage.ReadSource -} -var file_source_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_source_proto_init() } -func file_source_proto_init() { - if File_source_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_source_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadSource); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_source_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_source_proto_goTypes, - DependencyIndexes: file_source_proto_depIdxs, - MessageInfos: file_source_proto_msgTypes, - }.Build() - File_source_proto = out.File - file_source_proto_rawDesc = nil - file_source_proto_goTypes = nil - file_source_proto_depIdxs = nil -} diff --git a/v1/services/storage/source.proto b/v1/services/storage/source.proto deleted file mode 100644 index abc1eb677b5..00000000000 --- a/v1/services/storage/source.proto +++ /dev/null @@ -1,11 +0,0 @@ -syntax = "proto3"; -package com.github.influxdata.influxdb.services.storage; -option go_package = ".;storage"; - -message ReadSource { - // OrgID specifies the organization identifier for this request. - uint64 OrgID = 1; - - // BucketID specifies the bucket in the organization. - uint64 BucketID = 2; -} diff --git a/v1/services/storage/store.go b/v1/services/storage/store.go deleted file mode 100644 index af4ad096500..00000000000 --- a/v1/services/storage/store.go +++ /dev/null @@ -1,819 +0,0 @@ -package storage - -import ( - "context" - "errors" - "fmt" - "sort" - "time" - - "github.com/influxdata/influxdb/v2/influxql/query" - "github.com/influxdata/influxdb/v2/kit/platform" - "github.com/influxdata/influxdb/v2/models" - "github.com/influxdata/influxdb/v2/pkg/slices" - "github.com/influxdata/influxdb/v2/storage/reads" - "github.com/influxdata/influxdb/v2/storage/reads/datatypes" - "github.com/influxdata/influxdb/v2/tsdb" - "github.com/influxdata/influxdb/v2/tsdb/cursors" - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/influxdata/influxql" - "go.uber.org/zap" - "google.golang.org/protobuf/proto" -) - -var ( - ErrMissingReadSource = errors.New("missing ReadSource") -) - -type TSDBStore interface { - MeasurementNames(ctx context.Context, auth query.Authorizer, database string, cond influxql.Expr) ([][]byte, error) - ShardGroup(ids []uint64) tsdb.ShardGroup - Shards(ids []uint64) []*tsdb.Shard - TagKeys(ctx context.Context, auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagKeys, error) - TagValues(ctx context.Context, auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagValues, error) - SeriesCardinality(ctx context.Context, database string) (int64, error) - SeriesCardinalityFromShards(ctx context.Context, shards []*tsdb.Shard) (*tsdb.SeriesIDSet, error) - SeriesFile(database string) *tsdb.SeriesFile -} - -type MetaClient interface { - Database(name string) *meta.DatabaseInfo - ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) -} - -type Store struct { - TSDBStore TSDBStore - MetaClient MetaClient - Logger *zap.Logger -} - -func (s *Store) WindowAggregate(ctx context.Context, req *datatypes.ReadWindowAggregateRequest) (reads.ResultSet, error) { - if req.ReadSource == nil { - return nil, ErrMissingReadSource - } - - source, err := GetReadSource(req.ReadSource) - if err != nil { - return nil, err - } - - database, rp, start, end, err := s.validateArgs(source.GetOrgID(), source.GetBucketID(), req.Range.GetStart(), req.Range.GetEnd()) - if err != nil { - return nil, err - } - - // Due to some optimizations around how flux's `last()` function is implemented with the - // storage engine, we need to detect if the read request requires a descending - // cursor or not. - descending := reads.IsLastDescendingAggregateOptimization(req) - shardIDs, err := s.findShardIDs(database, rp, descending, start, end) - if err != nil { - return nil, err - } - if len(shardIDs) == 0 { // TODO(jeff): this was a typed nil - return nil, nil - } - - var cur reads.SeriesCursor - if ic, err := newIndexSeriesCursor(ctx, req.Predicate, s.TSDBStore.Shards(shardIDs)); err != nil { - return nil, err - } else if ic == nil { // TODO(jeff): this was a typed nil - return nil, nil - } else { - cur = ic - } - - return reads.NewWindowAggregateResultSet(ctx, req, cur) -} - -func NewStore(store TSDBStore, metaClient MetaClient) *Store { - return &Store{ - TSDBStore: store, - MetaClient: metaClient, - Logger: zap.NewNop(), - } -} - -// WithLogger sets the logger for the service. -func (s *Store) WithLogger(log *zap.Logger) { - s.Logger = log.With(zap.String("service", "store")) -} - -func (s *Store) findShardIDs(database, rp string, desc bool, start, end int64) ([]uint64, error) { - groups, err := s.MetaClient.ShardGroupsByTimeRange(database, rp, time.Unix(0, start), time.Unix(0, end)) - if err != nil { - return nil, err - } - - if len(groups) == 0 { - return nil, nil - } - - if desc { - sort.Sort(sort.Reverse(meta.ShardGroupInfos(groups))) - } else { - sort.Sort(meta.ShardGroupInfos(groups)) - } - - shardIDs := make([]uint64, 0, len(groups[0].Shards)*len(groups)) - for _, g := range groups { - for _, si := range g.Shards { - shardIDs = append(shardIDs, si.ID) - } - } - return shardIDs, nil -} - -func (s *Store) validateArgs(orgID, bucketID uint64, start, end int64) (string, string, int64, int64, error) { - database := platform.ID(bucketID).String() - rp := meta.DefaultRetentionPolicyName - - di := s.MetaClient.Database(database) - if di == nil { - return "", "", 0, 0, errors.New("no database") - } - - rpi := di.RetentionPolicy(rp) - if rpi == nil { - return "", "", 0, 0, errors.New("invalid retention policy") - } - - if start <= 0 { - start = models.MinNanoTime - } - if end <= 0 { - end = models.MaxNanoTime - } - return database, rp, start, end, nil -} - -func (s *Store) ReadFilter(ctx context.Context, req *datatypes.ReadFilterRequest) (reads.ResultSet, error) { - if req.ReadSource == nil { - return nil, ErrMissingReadSource - } - - source, err := GetReadSource(req.ReadSource) - if err != nil { - return nil, err - } - - database, rp, start, end, err := s.validateArgs(source.OrgID, source.BucketID, req.Range.GetStart(), req.Range.GetEnd()) - if err != nil { - return nil, err - } - - shardIDs, err := s.findShardIDs(database, rp, false, start, end) - if err != nil { - return nil, err - } - if len(shardIDs) == 0 { // TODO(jeff): this was a typed nil - return nil, nil - } - - var cur reads.SeriesCursor - if ic, err := newIndexSeriesCursor(ctx, req.Predicate, s.TSDBStore.Shards(shardIDs)); err != nil { - return nil, err - } else if ic == nil { // TODO(jeff): this was a typed nil - return nil, nil - } else { - cur = ic - } - - req.Range = &datatypes.TimestampRange{ - Start: start, - End: end, - } - - return reads.NewFilteredResultSet(ctx, req.Range.GetStart(), req.Range.GetEnd(), cur), nil -} - -func (s *Store) ReadGroup(ctx context.Context, req *datatypes.ReadGroupRequest) (reads.GroupResultSet, error) { - if req.ReadSource == nil { - return nil, ErrMissingReadSource - } - - source, err := GetReadSource(req.ReadSource) - if err != nil { - return nil, err - } - - database, rp, start, end, err := s.validateArgs(source.OrgID, source.BucketID, req.Range.GetStart(), req.Range.GetEnd()) - if err != nil { - return nil, err - } - - // Due to some optimizations around how flux's `last()` function is implemented with the - // storage engine, we need to detect if the read request requires a descending - // cursor or not. - descending := reads.IsLastDescendingGroupOptimization(req) - shardIDs, err := s.findShardIDs(database, rp, descending, start, end) - if err != nil { - return nil, err - } - if len(shardIDs) == 0 { - return nil, nil - } - - shards := s.TSDBStore.Shards(shardIDs) - - req.Range = &datatypes.TimestampRange{ - Start: start, - End: end, - } - - newCursor := func() (reads.SeriesCursor, error) { - cur, err := newIndexSeriesCursor(ctx, req.Predicate, shards) - if cur == nil || err != nil { - return nil, err - } - return cur, nil - } - - rs := reads.NewGroupResultSet(ctx, req, newCursor) - if rs == nil { - return nil, nil - } - - return rs, nil -} - -type metaqueryAttributes struct { - orgID platform.ID - db, rp string - start, end int64 - pred influxql.Expr -} - -func (s *Store) tagKeysWithFieldPredicate(ctx context.Context, mqAttrs *metaqueryAttributes, shardIDs []uint64) (cursors.StringIterator, error) { - var cur reads.SeriesCursor - if ic, err := newIndexSeriesCursorInfluxQLPred(ctx, mqAttrs.pred, s.TSDBStore.Shards(shardIDs)); err != nil { - return nil, err - } else if ic == nil { - return cursors.EmptyStringIterator, nil - } else { - cur = ic - } - m := make(map[string]struct{}) - rs := reads.NewFilteredResultSet(ctx, mqAttrs.start, mqAttrs.end, cur) - for rs.Next() { - func() { - c := rs.Cursor() - if c == nil { - // no data for series key + field combination - return - } - defer c.Close() - if cursorHasData(c) { - tags := rs.Tags() - for i := range tags { - m[string(tags[i].Key)] = struct{}{} - } - } - }() - } - - arr := make([]string, 0, len(m)) - for tag := range m { - arr = append(arr, tag) - } - sort.Strings(arr) - return cursors.NewStringSliceIterator(arr), nil -} - -func (s *Store) TagKeys(ctx context.Context, req *datatypes.TagKeysRequest) (cursors.StringIterator, error) { - if req.TagsSource == nil { - return nil, ErrMissingReadSource - } - source, err := GetReadSource(req.TagsSource) - if err != nil { - return nil, err - } - db, rp, start, end, err := s.validateArgs(source.OrgID, source.BucketID, req.Range.GetStart(), req.Range.GetEnd()) - if err != nil { - return nil, err - } - - shardIDs, err := s.findShardIDs(db, rp, false, start, end) - if err != nil { - return nil, err - } - if len(shardIDs) == 0 { - return cursors.EmptyStringIterator, nil - } - - var expr influxql.Expr - if root := req.Predicate.GetRoot(); root != nil { - var err error - expr, err = reads.NodeToExpr(root, measurementRemap) - if err != nil { - return nil, err - } - - if found := reads.HasFieldValueKey(expr); found { - return nil, errors.New("field values unsupported") - } - if found := reads.ExprHasKey(expr, fieldKey); found { - mqAttrs := &metaqueryAttributes{ - orgID: platform.ID(source.GetOrgID()), - db: db, - rp: rp, - start: start, - end: end, - pred: expr, - } - return s.tagKeysWithFieldPredicate(ctx, mqAttrs, shardIDs) - } - expr = influxql.Reduce(influxql.CloneExpr(expr), nil) - if reads.IsTrueBooleanLiteral(expr) { - expr = nil - } - } - - // TODO(jsternberg): Use a real authorizer. - auth := query.OpenAuthorizer - keys, err := s.TSDBStore.TagKeys(ctx, auth, shardIDs, expr) - if err != nil { - return cursors.EmptyStringIterator, err - } - - m := map[string]bool{ - measurementKey: true, - fieldKey: true, - } - for _, ks := range keys { - for _, k := range ks.Keys { - m[k] = true - } - } - - names := make([]string, 0, len(m)) - for name := range m { - names = append(names, name) - } - sort.Strings(names) - return cursors.NewStringSliceIterator(names), nil -} - -func (s *Store) TagValues(ctx context.Context, req *datatypes.TagValuesRequest) (cursors.StringIterator, error) { - if req.TagsSource == nil { - return nil, ErrMissingReadSource - } - - source, err := GetReadSource(req.TagsSource) - if err != nil { - return nil, err - } - - db, rp, start, end, err := s.validateArgs(source.OrgID, source.BucketID, req.Range.GetStart(), req.Range.GetEnd()) - if err != nil { - return nil, err - } - - var influxqlPred influxql.Expr - if root := req.Predicate.GetRoot(); root != nil { - var err error - influxqlPred, err = reads.NodeToExpr(root, measurementRemap) - if err != nil { - return nil, err - } - - if found := reads.HasFieldValueKey(influxqlPred); found { - return nil, errors.New("field values unsupported") - } - - influxqlPred = influxql.Reduce(influxql.CloneExpr(influxqlPred), nil) - if reads.IsTrueBooleanLiteral(influxqlPred) { - influxqlPred = nil - } - } - - mqAttrs := &metaqueryAttributes{ - orgID: platform.ID(source.GetOrgID()), - db: db, - rp: rp, - start: start, - end: end, - pred: influxqlPred, - } - - tagKey, ok := measurementRemap[req.TagKey] - if !ok { - tagKey = req.TagKey - } - - // Getting values of _measurement or _field are handled specially - switch tagKey { - case "_name": - return s.MeasurementNames(ctx, mqAttrs) - - case "_field": - return s.measurementFields(ctx, mqAttrs) - } - - return s.tagValues(ctx, mqAttrs, tagKey) -} - -func (s *Store) tagValues(ctx context.Context, mqAttrs *metaqueryAttributes, tagKey string) (cursors.StringIterator, error) { - // If there are any references to _field, we need to use the slow path - // since we cannot rely on the index alone. - if mqAttrs.pred != nil { - if hasFieldKey := reads.ExprHasKey(mqAttrs.pred, fieldKey); hasFieldKey { - return s.tagValuesSlow(ctx, mqAttrs, tagKey) - } - } - - shardIDs, err := s.findShardIDs(mqAttrs.db, mqAttrs.rp, false, mqAttrs.start, mqAttrs.end) - if err != nil { - return nil, err - } - if len(shardIDs) == 0 { - return cursors.EmptyStringIterator, nil - } - - tagKeyExpr := &influxql.BinaryExpr{ - Op: influxql.EQ, - LHS: &influxql.VarRef{ - Val: "_tagKey", - }, - RHS: &influxql.StringLiteral{ - Val: tagKey, - }, - } - - if mqAttrs.pred != nil { - mqAttrs.pred = &influxql.BinaryExpr{ - Op: influxql.AND, - LHS: tagKeyExpr, - RHS: &influxql.ParenExpr{ - Expr: mqAttrs.pred, - }, - } - } else { - mqAttrs.pred = tagKeyExpr - } - - // TODO(jsternberg): Use a real authorizer. - auth := query.OpenAuthorizer - values, err := s.TSDBStore.TagValues(ctx, auth, shardIDs, mqAttrs.pred) - if err != nil { - return nil, err - } - - m := make(map[string]struct{}) - for _, kvs := range values { - for _, kv := range kvs.Values { - m[kv.Value] = struct{}{} - } - } - - names := make([]string, 0, len(m)) - for name := range m { - names = append(names, name) - } - sort.Strings(names) - return cursors.NewStringSliceIterator(names), nil -} - -func (s *Store) MeasurementNames(ctx context.Context, mqAttrs *metaqueryAttributes) (cursors.StringIterator, error) { - if mqAttrs.pred != nil { - if hasFieldKey := reads.ExprHasKey(mqAttrs.pred, fieldKey); hasFieldKey { - // If there is a predicate on _field, we cannot use the index - // to filter out unwanted measurement names. Use a slower - // block scan instead. - return s.tagValuesSlow(ctx, mqAttrs, measurementKey) - } - } - - // TODO(jsternberg): Use a real authorizer. - auth := query.OpenAuthorizer - values, err := s.TSDBStore.MeasurementNames(ctx, auth, mqAttrs.db, mqAttrs.pred) - if err != nil { - return nil, err - } - - m := make(map[string]struct{}) - for _, name := range values { - m[string(name)] = struct{}{} - } - - names := make([]string, 0, len(m)) - for name := range m { - names = append(names, name) - } - sort.Strings(names) - return cursors.NewStringSliceIterator(names), nil -} - -func (s *Store) GetSource(orgID, bucketID uint64) proto.Message { - return &ReadSource{ - BucketID: bucketID, - OrgID: orgID, - } -} - -func (s *Store) measurementFields(ctx context.Context, mqAttrs *metaqueryAttributes) (cursors.StringIterator, error) { - if mqAttrs.pred != nil { - if hasFieldKey := reads.ExprHasKey(mqAttrs.pred, fieldKey); hasFieldKey { - return s.tagValuesSlow(ctx, mqAttrs, fieldKey) - } - - // If there predicates on anything besides _measurement, we can't - // use the index and need to use the slow path. - if hasTagKey(mqAttrs.pred) { - return s.tagValuesSlow(ctx, mqAttrs, fieldKey) - } - } - - shardIDs, err := s.findShardIDs(mqAttrs.db, mqAttrs.rp, false, mqAttrs.start, mqAttrs.end) - if err != nil { - return nil, err - } - if len(shardIDs) == 0 { - return cursors.EmptyStringIterator, nil - } - - sg := s.TSDBStore.ShardGroup(shardIDs) - ms := &influxql.Measurement{ - Database: mqAttrs.db, - RetentionPolicy: mqAttrs.rp, - SystemIterator: "_fieldKeys", - } - opts := query.IteratorOptions{ - OrgID: mqAttrs.orgID, - Condition: mqAttrs.pred, - Authorizer: query.OpenAuthorizer, - } - iter, err := sg.CreateIterator(ctx, ms, opts) - if err != nil { - return nil, err - } - defer func() { - if iter != nil { - _ = iter.Close() - } - }() - - var fieldNames []string - fitr, ok := iter.(query.FloatIterator) - if !ok { - return cursors.NewStringSliceIterator(fieldNames), nil - } - - for p, _ := fitr.Next(); p != nil; p, _ = fitr.Next() { - if len(p.Aux) >= 1 { - fieldNames = append(fieldNames, p.Aux[0].(string)) - } - } - - sort.Strings(fieldNames) - fieldNames = slices.MergeSortedStrings(fieldNames) - - return cursors.NewStringSliceIterator(fieldNames), nil -} - -func cursorHasData(c cursors.Cursor) bool { - var l int - switch typedCur := c.(type) { - case cursors.IntegerArrayCursor: - ia := typedCur.Next() - l = ia.Len() - case cursors.FloatArrayCursor: - ia := typedCur.Next() - l = ia.Len() - case cursors.UnsignedArrayCursor: - ia := typedCur.Next() - l = ia.Len() - case cursors.BooleanArrayCursor: - ia := typedCur.Next() - l = ia.Len() - case cursors.StringArrayCursor: - ia := typedCur.Next() - l = ia.Len() - default: - panic(fmt.Sprintf("unreachable: %T", typedCur)) - } - return l != 0 -} - -// tagValuesSlow will determine the tag values for the given tagKey. -// It's generally faster to use tagValues, measurementFields or -// MeasurementNames, but those methods will only use the index and metadata -// stored in the shard. Because fields are not themselves indexed, we have no way -// of correlating fields to tag values, so we sometimes need to consult tsm to -// provide an accurate answer. -func (s *Store) tagValuesSlow(ctx context.Context, mqAttrs *metaqueryAttributes, tagKey string) (cursors.StringIterator, error) { - shardIDs, err := s.findShardIDs(mqAttrs.db, mqAttrs.rp, false, mqAttrs.start, mqAttrs.end) - if err != nil { - return nil, err - } - if len(shardIDs) == 0 { - return cursors.EmptyStringIterator, nil - } - - var cur reads.SeriesCursor - if ic, err := newIndexSeriesCursorInfluxQLPred(ctx, mqAttrs.pred, s.TSDBStore.Shards(shardIDs)); err != nil { - return nil, err - } else if ic == nil { - return cursors.EmptyStringIterator, nil - } else { - cur = ic - } - m := make(map[string]struct{}) - - rs := reads.NewFilteredResultSet(ctx, mqAttrs.start, mqAttrs.end, cur) - for rs.Next() { - func() { - c := rs.Cursor() - if c == nil { - // no data for series key + field combination? - // It seems that even when there is no data for this series key + field - // combo that the cursor may be not nil. We need to - // request invoke an array cursor to be sure. - // This is the reason for the call to cursorHasData below. - return - } - defer c.Close() - - if cursorHasData(c) { - f := rs.Tags().Get([]byte(tagKey)) - m[string(f)] = struct{}{} - } - }() - } - - names := make([]string, 0, len(m)) - for name := range m { - names = append(names, name) - } - sort.Strings(names) - return cursors.NewStringSliceIterator(names), nil -} - -func (s *Store) ReadSeriesCardinality(ctx context.Context, req *datatypes.ReadSeriesCardinalityRequest) (cursors.Int64Iterator, error) { - if req.ReadSource == nil { - return nil, ErrMissingReadSource - } - - source, err := GetReadSource(req.ReadSource) - if err != nil { - return nil, err - } - - db, rp, start, end, err := s.validateArgs(source.OrgID, source.BucketID, req.Range.GetStart(), req.Range.GetEnd()) - if err != nil { - return nil, err - } - - sgs, err := s.MetaClient.ShardGroupsByTimeRange(db, rp, time.Unix(0, start), time.Unix(0, end)) - if err != nil { - return nil, err - } - - if len(sgs) == 0 { - return cursors.NewInt64SliceIterator([]int64{0}), nil - } - - var expr influxql.Expr - if root := req.Predicate.GetRoot(); root != nil { - expr, err = reads.NodeToExpr(root, measurementRemap) - if err != nil { - return nil, err - } - - if found := reads.HasFieldValueKey(expr); found { - return nil, errors.New("filtering on field values is not supported in cardinality predicates") - } - expr = influxql.Reduce(influxql.CloneExpr(expr), nil) - - // Single boolean literals are not handled well by the cursor that will be - // generated to solve the query, so specifically check and handle those - // cases here. A true boolean is equivalent to not having any predicate, and - // a false boolean will return no results. - if reads.IsTrueBooleanLiteral(expr) { - expr = nil - } - if reads.IsFalseBooleanLiteral(expr) { - return cursors.NewInt64SliceIterator([]int64{0}), nil - } - - } - - shardsEntirelyInTimeRange, shardsPartiallyInTimeRange := groupShardsByTime(sgs, start, end) - sfile := s.TSDBStore.SeriesFile(db) - - // Get the cardinality for the set of shards that are completely within the - // provided time range. This can be done much faster than verifying that the - // series have data in the time range, so it is done separately. - c1, err := s.seriesCardinalityWithPredicate(ctx, s.TSDBStore.Shards(shardsEntirelyInTimeRange), expr, sfile) - if err != nil { - return nil, err - } - - // Others use a slower way - c2, err := s.seriesCardinalityWithPredicateAndTime(ctx, s.TSDBStore.Shards(shardsPartiallyInTimeRange), expr, sfile, start, end) - if err != nil { - return nil, err - } - - ss := tsdb.NewSeriesIDSet() - ss.Merge(c1, c2) - - return cursors.NewInt64SliceIterator([]int64{int64(ss.Cardinality())}), nil -} - -func (s *Store) seriesCardinalityWithPredicate(ctx context.Context, shards []*tsdb.Shard, expr influxql.Expr, sfile *tsdb.SeriesFile) (*tsdb.SeriesIDSet, error) { - if expr == nil { - return s.TSDBStore.SeriesCardinalityFromShards(ctx, shards) - } - - ss := tsdb.NewSeriesIDSet() - if len(shards) == 0 { - return ss, nil - } - - cur, err := newIndexSeriesCursorInfluxQLPred(ctx, expr, shards) - if err != nil { - return nil, err - } - - buf := make([]byte, 1024) - for { - r := cur.Next() - if r == nil { - break - } - skey := sfile.SeriesID(r.Name, r.SeriesTags, buf) - ss.Add(skey) - } - - return ss, nil -} - -func (s *Store) seriesCardinalityWithPredicateAndTime(ctx context.Context, shards []*tsdb.Shard, expr influxql.Expr, sfile *tsdb.SeriesFile, start, end int64) (*tsdb.SeriesIDSet, error) { - ss := tsdb.NewSeriesIDSet() - if len(shards) == 0 { - return ss, nil - } - - cur, err := newIndexSeriesCursorInfluxQLPred(ctx, expr, shards) - if err != nil { - return nil, err - } - - buf := make([]byte, 1024) - rs := reads.NewFilteredResultSet(ctx, start, end, cur) - for rs.Next() { - func() { - c := rs.Cursor() - if c == nil { - // no data for series key + field combination - return - } - defer c.Close() - - if cursorHasData(c) { - r := cur.row - skey := sfile.SeriesID(r.Name, r.SeriesTags, buf) - ss.Add(skey) - } - }() - } - - return ss, nil -} - -func (s *Store) SupportReadSeriesCardinality(ctx context.Context) bool { - return true -} - -// Returns two slices of shard IDs - the first is shards that are entirely in -// the provided time range; the second is shards that are not entirely within -// the provided time range. -func groupShardsByTime(sgs []meta.ShardGroupInfo, start, end int64) ([]uint64, []uint64) { - entirelyInRange := []uint64{} - partiallyInRange := []uint64{} - - for _, sg := range sgs { - shards := make([]uint64, 0, len(sg.Shards)) - for _, si := range sg.Shards { - shards = append(shards, si.ID) - } - - if timesWithinRangeInclusive(sg.StartTime, sg.EndTime, time.Unix(0, start), time.Unix(0, end)) { - entirelyInRange = append(entirelyInRange, shards...) - continue - } - - partiallyInRange = append(partiallyInRange, shards...) - } - - return entirelyInRange, partiallyInRange -} - -// timesWithinRangeInclusive checks to see if the provided start and end time -// are within the start end and times of the range, with the check being -// inclusive. -func timesWithinRangeInclusive(start, end, rangeStart, rangeEnd time.Time) bool { - return (start.After(rangeStart) || start.Equal(rangeStart)) && - (end.Before(rangeEnd) || end.Equal(rangeEnd)) -} diff --git a/v1/services/storage/store_test.go b/v1/services/storage/store_test.go deleted file mode 100644 index cecf8a3f45c..00000000000 --- a/v1/services/storage/store_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package storage - -import ( - "testing" - "time" - - "github.com/influxdata/influxdb/v2/v1/services/meta" - "github.com/stretchr/testify/require" -) - -func TestGroupShardsByTime(t *testing.T) { - tests := []struct { - name string - shardGroups []meta.ShardGroupInfo - start, end int64 - wantInRange, wantNotInRange []uint64 - }{ - { - name: "all are within the time range", - shardGroups: []meta.ShardGroupInfo{ - { - ID: 1, - StartTime: time.Unix(0, 10), - EndTime: time.Unix(0, 12), - Shards: []meta.ShardInfo{ - {ID: 1}, - }, - }, - { - ID: 2, - StartTime: time.Unix(0, 11), - EndTime: time.Unix(0, 13), - Shards: []meta.ShardInfo{ - {ID: 2}, - }, - }, - }, - start: 0, - end: 15, - wantInRange: []uint64{1, 2}, - wantNotInRange: []uint64{}, - }, - { - name: "none are within the time range", - shardGroups: []meta.ShardGroupInfo{ - { - ID: 1, - StartTime: time.Unix(0, 10), - EndTime: time.Unix(0, 12), - Shards: []meta.ShardInfo{ - {ID: 1}, - }, - }, - { - ID: 2, - StartTime: time.Unix(0, 11), - EndTime: time.Unix(0, 13), - Shards: []meta.ShardInfo{ - {ID: 2}, - }, - }, - }, - start: 20, - end: 25, - wantInRange: []uint64{}, - wantNotInRange: []uint64{1, 2}, - }, - { - name: "some are in the time range; some are not", - shardGroups: []meta.ShardGroupInfo{ - { - ID: 1, - StartTime: time.Unix(0, 10), - EndTime: time.Unix(0, 12), - Shards: []meta.ShardInfo{ - {ID: 1}, - }, - }, - { - ID: 2, - StartTime: time.Unix(0, 12), - EndTime: time.Unix(0, 14), - Shards: []meta.ShardInfo{ - {ID: 2}, - }, - }, - }, - start: 11, - end: 15, - wantInRange: []uint64{2}, - wantNotInRange: []uint64{1}, - }, - { - name: "time ranges are inclusive", - shardGroups: []meta.ShardGroupInfo{ - { - ID: 1, - StartTime: time.Unix(0, 10), - EndTime: time.Unix(0, 12), - Shards: []meta.ShardInfo{ - {ID: 1}, - }, - }, - { - ID: 2, - StartTime: time.Unix(0, 12), - EndTime: time.Unix(0, 14), - Shards: []meta.ShardInfo{ - {ID: 2}, - }, - }, - }, - start: 10, - end: 14, - wantInRange: []uint64{1, 2}, - wantNotInRange: []uint64{}, - }, - } - - for _, tt := range tests { - gotInRange, gotNotInRange := groupShardsByTime(tt.shardGroups, tt.start, tt.end) - require.Equal(t, tt.wantInRange, gotInRange) - require.Equal(t, tt.wantNotInRange, gotNotInRange) - } -} diff --git a/variable.go b/variable.go deleted file mode 100644 index 16d796781e6..00000000000 --- a/variable.go +++ /dev/null @@ -1,253 +0,0 @@ -package influxdb - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - "regexp" - - "github.com/influxdata/influxdb/v2/kit/platform" -) - -// ErrVariableNotFound is the error msg for a missing variable. -const ErrVariableNotFound = "variable not found" - -// ops for variable error. -const ( - OpFindVariableByID = "FindVariableByID" - OpFindVariables = "FindVariables" - OpCreateVariable = "CreateVariable" - OpUpdateVariable = "UpdateVariable" - OpReplaceVariable = "ReplaceVariable" - OpDeleteVariable = "DeleteVariable" -) - -// VariableService describes a service for managing Variables -type VariableService interface { - // FindVariableByID finds a single variable from the store by its ID - FindVariableByID(ctx context.Context, id platform.ID) (*Variable, error) - - // FindVariables returns all variables in the store - FindVariables(ctx context.Context, filter VariableFilter, opt ...FindOptions) ([]*Variable, error) - - // CreateVariable creates a new variable and assigns it an ID - CreateVariable(ctx context.Context, m *Variable) error - - // UpdateVariable updates a single variable with a changeset - UpdateVariable(ctx context.Context, id platform.ID, update *VariableUpdate) (*Variable, error) - - // ReplaceVariable replaces a single variable - ReplaceVariable(ctx context.Context, variable *Variable) error - - // DeleteVariable removes a variable from the store - DeleteVariable(ctx context.Context, id platform.ID) error -} - -// A Variable describes a keyword that can be expanded into several possible -// values when used in an InfluxQL or Flux query -type Variable struct { - ID platform.ID `json:"id,omitempty"` - OrganizationID platform.ID `json:"orgID,omitempty"` - Name string `json:"name"` - Description string `json:"description"` - Selected []string `json:"selected"` - Arguments *VariableArguments `json:"arguments"` - CRUDLog -} - -// DefaultVariableFindOptions are the default find options for variables. -var DefaultVariableFindOptions = FindOptions{} - -// VariableFilter represents a set of filter that restrict the returned results. -type VariableFilter struct { - ID *platform.ID - OrganizationID *platform.ID - Organization *string -} - -// QueryParams implements PagingFilter. -// -// It converts VariableFilter fields to url query params. -func (f VariableFilter) QueryParams() map[string][]string { - qp := url.Values{} - if f.ID != nil { - qp.Add("id", f.ID.String()) - } - - if f.OrganizationID != nil { - qp.Add("orgID", f.OrganizationID.String()) - } - - if f.Organization != nil { - qp.Add("org", *f.Organization) - } - - return qp -} - -// A VariableUpdate describes a set of changes that can be applied to a Variable -type VariableUpdate struct { - Name string `json:"name"` - Selected []string `json:"selected"` - Description string `json:"description"` - Arguments *VariableArguments `json:"arguments"` -} - -// A VariableArguments contains arguments used when expanding a Variable -type VariableArguments struct { - Type string `json:"type"` // "constant", "map", or "query" - Values interface{} `json:"values"` // either VariableQueryValues, VariableConstantValues, VariableMapValues -} - -// VariableQueryValues contains a query used when expanding a query-based Variable -type VariableQueryValues struct { - Query string `json:"query"` - Language string `json:"language"` // "influxql" or "flux" -} - -// VariableConstantValues are the data for expanding a constants-based Variable -type VariableConstantValues []string - -// VariableMapValues are the data for expanding a map-based Variable -type VariableMapValues map[string]string - -// Valid returns an error if a Variable contains invalid data -func (m *Variable) Valid() error { - // todo(leodido) > check it org ID validity? - - if m.Name == "" { - return fmt.Errorf("missing variable name") - } - - // variable name must start with a letter to be a valid identifier in Flux - if !regexp.MustCompile(`^[a-zA-Z_].*`).MatchString(m.Name) { - return fmt.Errorf("variable name must start with a letter") - } - - validTypes := map[string]bool{ - "constant": true, - "map": true, - "query": true, - } - - if m.Arguments == nil || !validTypes[m.Arguments.Type] { - return fmt.Errorf("invalid arguments type") - } - - inValidNames := [11]string{"and", "import", "not", "return", "option", "test", "empty", "in", "or", "package", "builtin"} - - for x := range inValidNames { - - if m.Name == inValidNames[x] { - return fmt.Errorf("%q is a protected variable name", inValidNames[x]) - } - } - - return nil -} - -// Valid returns an error if a Variable changeset is not valid -func (u *VariableUpdate) Valid() error { - if u.Name == "" && u.Description == "" && u.Selected == nil && u.Arguments == nil { - return fmt.Errorf("no fields supplied in update") - } - - return nil -} - -// Apply applies non-zero fields from a VariableUpdate to a Variable -func (u *VariableUpdate) Apply(m *Variable) { - if u.Name != "" { - m.Name = u.Name - } - - if u.Selected != nil { - m.Selected = u.Selected - } - - if u.Arguments != nil { - m.Arguments = u.Arguments - } - - if u.Description != "" { - m.Description = u.Description - } -} - -// UnmarshalJSON unmarshals json into a VariableArguments struct, using the `Type` -// field to assign the approriate struct to the `Values` field -func (a *VariableArguments) UnmarshalJSON(data []byte) error { - type Alias VariableArguments - aux := struct{ *Alias }{Alias: (*Alias)(a)} - - err := json.Unmarshal(data, &aux) - if err != nil { - return err - } - - // Decode the polymorphic VariableArguments.Values field into the appropriate struct - switch aux.Type { - case "constant": - values, ok := aux.Values.([]interface{}) - if !ok { - return fmt.Errorf("error parsing %v as VariableConstantArguments", aux.Values) - } - - variableValues := make(VariableConstantValues, len(values)) - for i, v := range values { - if _, ok := v.(string); !ok { - return fmt.Errorf("expected variable constant value to be string but received %T", v) - } - variableValues[i] = v.(string) - } - - a.Values = variableValues - case "map": - values, ok := aux.Values.(map[string]interface{}) - if !ok { - return fmt.Errorf("error parsing %v as VariableMapArguments", aux.Values) - } - - variableValues := VariableMapValues{} - for k, v := range values { - if _, ok := v.(string); !ok { - return fmt.Errorf("expected variable map value to be string but received %T", v) - } - variableValues[k] = v.(string) - } - - a.Values = variableValues - case "query": - values, ok := aux.Values.(map[string]interface{}) - if !ok { - return fmt.Errorf("error parsing %v as VariableQueryArguments", aux.Values) - } - - variableValues := VariableQueryValues{} - - query, prs := values["query"] - if !prs { - return fmt.Errorf("\"query\" key not present in VariableQueryArguments") - } - if _, ok := query.(string); !ok { - return fmt.Errorf("expected \"query\" to be string but received %T", query) - } - - language, prs := values["language"] - if !prs { - return fmt.Errorf("\"language\" key not present in VariableQueryArguments") - } - if _, ok := language.(string); !ok { - return fmt.Errorf("expected \"language\" to be string but received %T", language) - } - - variableValues.Query = query.(string) - variableValues.Language = language.(string) - a.Values = variableValues - default: - return fmt.Errorf("unknown VariableArguments type %s", aux.Type) - } - - return nil -} diff --git a/variable_test.go b/variable_test.go deleted file mode 100644 index bc07e80c5f9..00000000000 --- a/variable_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package influxdb_test - -import ( - "encoding/json" - "reflect" - "testing" - - platform "github.com/influxdata/influxdb/v2" - platformtesting "github.com/influxdata/influxdb/v2/testing" -) - -var ( - variableTestID = "debac1e0deadbeef" - variableTestOrgID = "deadbeefdeadbeef" -) - -func TestVariable_UnmarshalJSON(t *testing.T) { - tests := []struct { - name string - json string - want platform.Variable - }{ - { - name: "with organization", - json: ` -{ - "id": "debac1e0deadbeef", - "orgID": "deadbeefdeadbeef", - "name": "howdy", - "selected": [], - "arguments": { - "type": "constant", - "values": ["a", "b", "c", "d"] - } -} -`, - want: platform.Variable{ - ID: platformtesting.MustIDBase16(variableTestID), - OrganizationID: platformtesting.MustIDBase16(variableTestOrgID), - Name: "howdy", - Selected: make([]string, 0), - Arguments: &platform.VariableArguments{ - Type: "constant", - Values: platform.VariableConstantValues{"a", "b", "c", "d"}, - }, - }, - }, - { - name: "with constant arguments", - json: ` -{ - "id": "debac1e0deadbeef", - "name": "howdy", - "selected": [], - "arguments": { - "type": "constant", - "values": ["a", "b", "c"] - } -} -`, - want: platform.Variable{ - ID: platformtesting.MustIDBase16(variableTestID), - Name: "howdy", - Selected: make([]string, 0), - Arguments: &platform.VariableArguments{ - Type: "constant", - Values: platform.VariableConstantValues{"a", "b", "c"}, - }, - }, - }, - { - name: "with map arguments", - json: ` -{ - "id": "debac1e0deadbeef", - "name": "howdy", - "selected": [], - "arguments": { - "type": "map", - "values": { - "a": "A", - "b": "B" - } - } -} -`, - want: platform.Variable{ - ID: platformtesting.MustIDBase16(variableTestID), - Name: "howdy", - Selected: make([]string, 0), - Arguments: &platform.VariableArguments{ - Type: "map", - Values: platform.VariableMapValues{"a": "A", "b": "B"}, - }, - }, - }, - { - name: "with query arguments", - json: ` -{ - "id": "debac1e0deadbeef", - "name": "howdy", - "selected": [], - "arguments": { - "type": "query", - "values": { - "query": "howdy", - "language": "flux" - } - } -} -`, - want: platform.Variable{ - ID: platformtesting.MustIDBase16(variableTestID), - Name: "howdy", - Selected: make([]string, 0), - Arguments: &platform.VariableArguments{ - Type: "query", - Values: platform.VariableQueryValues{ - Query: "howdy", - Language: "flux", - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var m platform.Variable - - err := json.Unmarshal([]byte(tt.json), &m) - if err != nil { - t.Fatalf("error unmarshalling json: %v", err) - } - - if !reflect.DeepEqual(m, tt.want) { - t.Errorf("%q. got = %+v, want %+v", tt.name, m, tt.want) - } - }) - } -} diff --git a/vault/README.md b/vault/README.md deleted file mode 100644 index 9063805031b..00000000000 --- a/vault/README.md +++ /dev/null @@ -1,85 +0,0 @@ -# Vault Secret Service -This package implements `platform.SecretService` using [vault](https://github.com/hashicorp/vault). - -## Key layout -All secrets are stored in vault as key value pairs that can be found under -the key `/secret/data/:orgID`. - -For example - -```txt -/secret/data/031c8cbefe101000 -> - github_api_key: foo - some_other_key: bar - a_secret: key -``` - -## Configuration - -When a new secret service is instatiated with `vault.NewSecretService()` we read the -environment for the [standard vault environment variables](https://www.vaultproject.io/docs/commands/index.html#environment-variables). - -It is expected that the vault provided is unsealed and that the `VAULT_TOKEN` has sufficient privileges to access the key space described above. - -## Test/Dev - -The vault secret service may be used by starting a vault server - -```sh -vault server -dev -``` - -```sh -VAULT_ADDR='' VAULT_TOKEN='' influxd --secret-store vault -``` - -Once the vault and influxdb servers have been started and initialized, you may test the service by executing the following: - -```sh -curl --request GET \ - --url http://localhost:8086/api/v2/orgs//secrets \ - --header 'authorization: Token - -# should return -# -# { -# "links": { -# "org": "/api/v2/orgs/031c8cbefe101000", -# "secrets": "/api/v2/orgs/031c8cbefe101000/secrets" -# }, -# "secrets": [] -# } -``` - -```sh -curl --request PATCH \ - --url http://localhost:8086/api/v2/orgs//secrets \ - --header 'authorization: Token \ - --header 'content-type: application/json' \ - --data '{ - "foo": "bar", - "hello": "world" -}' - -# should return 204 no content -``` - -```sh -curl --request GET \ - --url http://localhost:8086/api/v2/orgs//secrets \ - --header 'authorization: Token - -# should return -# -# { -# "links": { -# "org": "/api/v2/orgs/031c8cbefe101000", -# "secrets": "/api/v2/orgs/031c8cbefe101000/secrets" -# }, -# "secrets": [ -# "foo", -# "hello" -# ] -# } -``` - diff --git a/vault/secret.go b/vault/secret.go deleted file mode 100644 index 7c95a4258b5..00000000000 --- a/vault/secret.go +++ /dev/null @@ -1,274 +0,0 @@ -package vault - -import ( - "context" - "encoding/json" - "fmt" - "strconv" - "time" - - "github.com/hashicorp/vault/api" - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" -) - -var _ platform.SecretService = (*SecretService)(nil) - -// SecretService is service for storing user secrets -type SecretService struct { - Client *api.Client -} - -// Config may setup the vault client configuration. If any field is a zero -// value, it will be ignored and the default used. -type Config struct { - Address string - AgentAddress string - ClientTimeout time.Duration - MaxRetries int - Token string - TLSConfig -} - -// TLSConfig is the configuration for TLS. -type TLSConfig struct { - CACert string - CAPath string - ClientCert string - ClientKey string - InsecureSkipVerify bool - TLSServerName string -} - -func (c Config) assign(apiCFG *api.Config) error { - if c.Address != "" { - apiCFG.Address = c.Address - } - - if c.AgentAddress != "" { - apiCFG.AgentAddress = c.AgentAddress - } - - if c.ClientTimeout > 0 { - apiCFG.Timeout = c.ClientTimeout - } - - if c.MaxRetries > 0 { - apiCFG.MaxRetries = c.MaxRetries - } - - if c.TLSServerName != "" { - err := apiCFG.ConfigureTLS(&api.TLSConfig{ - CACert: c.CACert, - CAPath: c.CAPath, - ClientCert: c.ClientCert, - ClientKey: c.ClientKey, - TLSServerName: c.TLSServerName, - Insecure: c.InsecureSkipVerify, - }) - if err != nil { - return err - } - } - - return nil -} - -// ConfigOptFn is a functional input option to configure a vault service. -type ConfigOptFn func(Config) Config - -// WithConfig provides a configuration to the service constructor. -func WithConfig(config Config) ConfigOptFn { - return func(Config) Config { - return config - } -} - -// WithTLSConfig allows one to set the TLS config only. -func WithTLSConfig(tlsCFG TLSConfig) ConfigOptFn { - return func(cfg Config) Config { - cfg.TLSConfig = tlsCFG - return cfg - } -} - -// NewSecretService creates an instance of a SecretService. -// The service is configured using the standard vault environment variables. -// https://www.vaultproject.io/docs/commands/index.html#environment-variables -func NewSecretService(cfgOpts ...ConfigOptFn) (*SecretService, error) { - explicitConfig := Config{} - for _, o := range cfgOpts { - explicitConfig = o(explicitConfig) - } - - cfg := api.DefaultConfig() - if cfg.Error != nil { - return nil, cfg.Error - } - - err := explicitConfig.assign(cfg) - if err != nil { - return nil, err - } - - c, err := api.NewClient(cfg) - if err != nil { - return nil, err - } - - if explicitConfig.Token != "" { - c.SetToken(explicitConfig.Token) - } - - return &SecretService{ - Client: c, - }, nil -} - -// LoadSecret retrieves the secret value v found at key k for organization orgID. -func (s *SecretService) LoadSecret(ctx context.Context, orgID platform2.ID, k string) (string, error) { - data, _, err := s.loadSecrets(ctx, orgID) - if err != nil { - return "", err - } - - if v, ok := data[k]; ok { - return v, nil - } - - return "", fmt.Errorf("secret not found") -} - -// loadSecrets retrieves a map of secrets for an organization and the version of the secrets retrieved. -// The version is used to ensure that concurrent updates will not overwrite one another. -func (s *SecretService) loadSecrets(ctx context.Context, orgID platform2.ID) (map[string]string, int, error) { - // TODO(desa): update url construction - sec, err := s.Client.Logical().Read(fmt.Sprintf("/secret/data/%s", orgID)) - if err != nil { - return nil, -1, err - } - - m := map[string]string{} - if sec == nil { - return m, 0, nil - } - - data, ok := sec.Data["data"].(map[string]interface{}) - if !ok { - return nil, -1, fmt.Errorf("value found in secret data is not map[string]interface{}") - } - - for k, v := range data { - val, ok := v.(string) - if !ok { - continue - } - m[k] = val - } - - metadata, ok := sec.Data["metadata"].(map[string]interface{}) - if !ok { - return nil, -1, fmt.Errorf("value found in secret metadata is not map[string]interface{}") - } - - var version int - switch v := metadata["version"].(type) { - case json.Number: - ver, err := v.Int64() - if err != nil { - return nil, -1, err - } - version = int(ver) - case string: - ver, err := strconv.Atoi(v) - if err != nil { - return nil, -1, fmt.Errorf("version provided is not a valid integer: %v", err) - } - version = ver - case int: - version = v - default: - return nil, -1, fmt.Errorf("version provided is %T not a string or int", v) - } - - return m, version, nil -} - -// GetSecretKeys retrieves all secret keys that are stored for the organization orgID. -func (s *SecretService) GetSecretKeys(ctx context.Context, orgID platform2.ID) ([]string, error) { - data, _, err := s.loadSecrets(ctx, orgID) - if err != nil { - return nil, err - } - - keys := make([]string, 0, len(data)) - for k := range data { - keys = append(keys, k) - } - - return keys, nil -} - -// PutSecret stores the secret pair (k,v) for the organization orgID. -func (s *SecretService) PutSecret(ctx context.Context, orgID platform2.ID, k string, v string) error { - data, ver, err := s.loadSecrets(ctx, orgID) - if err != nil { - return err - } - - data[k] = v - - return s.putSecrets(ctx, orgID, data, ver) -} - -// putSecrets will set all provided data values for the organization orgID. -// If version is negative, the write will overwrite all specified values. -// If version is 0, the write will only be allowed if the keys do not exists. -// If version is non-zero, the write will only be allowed if the keys current -// version in vault matches the version specified. -func (s *SecretService) putSecrets(ctx context.Context, orgID platform2.ID, data map[string]string, version int) error { - m := map[string]interface{}{"data": data} - - if version >= 0 { - m["options"] = map[string]interface{}{"cas": version} - } - - if _, err := s.Client.Logical().Write(fmt.Sprintf("/secret/data/%s", orgID), m); err != nil { - return err - } - - return nil -} - -// PutSecrets puts all provided secrets and overwrites any previous values. -func (s *SecretService) PutSecrets(ctx context.Context, orgID platform2.ID, m map[string]string) error { - return s.putSecrets(ctx, orgID, m, -1) -} - -// PatchSecrets patches all provided secrets and updates any previous values. -func (s *SecretService) PatchSecrets(ctx context.Context, orgID platform2.ID, m map[string]string) error { - data, ver, err := s.loadSecrets(ctx, orgID) - if err != nil { - return err - } - - for k, v := range m { - data[k] = v - } - - return s.putSecrets(ctx, orgID, data, ver) -} - -// DeleteSecret removes a single secret from the secret store. -func (s *SecretService) DeleteSecret(ctx context.Context, orgID platform2.ID, ks ...string) error { - data, ver, err := s.loadSecrets(ctx, orgID) - if err != nil { - return err - } - - for _, k := range ks { - delete(data, k) - } - - return s.putSecrets(ctx, orgID, data, ver) -} diff --git a/vault/secret_test.go b/vault/secret_test.go deleted file mode 100644 index d698671cacf..00000000000 --- a/vault/secret_test.go +++ /dev/null @@ -1,64 +0,0 @@ -//go:build integration - -package vault_test - -import ( - "context" - "fmt" - "testing" - - "github.com/influxdata/influxdb/v2" - influxdbtesting "github.com/influxdata/influxdb/v2/testing" - "github.com/influxdata/influxdb/v2/vault" - testcontainers "github.com/testcontainers/testcontainers-go" -) - -func initSecretService(f influxdbtesting.SecretServiceFields, t *testing.T) (influxdb.SecretService, func()) { - token := "test" - ctx := context.Background() - vaultC, err := GenericContainer(ctx, testcontainers.GenericContainerRequest{ - ContainerRequest: testcontainers.ContainerRequest{ - Image: "docker.io/vault:latest", - ExposedPorts: []string{ - "8200/tcp", - }, - Cmd: fmt.Sprintf(`vault server -dev -dev-listen-address 0.0.0.0:8200 -dev-root-token-id=%s`, token), - }, - Started: true, - }) - if err != nil { - t.Fatalf("failed to initialize vault container: %v", err) - } - - host, err := vaultC.Host(ctx) - if err != nil { - t.Fatalf("failed to get host from vault container: %v", err) - } - - port, err := vaultC.MappedPort(ctx, "8200/tcp") - if err != nil { - t.Fatalf("failed to get exposed 8200 port from vault container: %v", err) - } - - s, err := vault.NewSecretService() - if err != nil { - t.Fatal(err) - } - s.Client.SetToken(token) - s.Client.SetAddress(fmt.Sprintf("http://%v:%v", host, port.Int())) - - for _, sec := range f.Secrets { - for k, v := range sec.Env { - if err := s.PutSecret(ctx, sec.OrganizationID, k, v); err != nil { - t.Fatalf("failed to populate secrets: %v", err) - } - } - } - return s, func() { - defer vaultC.Terminate(ctx, t) - } -} - -func TestSecretService(t *testing.T) { - influxdbtesting.SecretService(initSecretService, t) -} diff --git a/write.go b/write.go deleted file mode 100644 index 0ab0f4c76e3..00000000000 --- a/write.go +++ /dev/null @@ -1,11 +0,0 @@ -package influxdb - -import ( - "context" - "io" -) - -// WriteService writes data read from the reader. -type WriteService interface { - WriteTo(ctx context.Context, filter BucketFilter, r io.Reader) error -} diff --git a/zap/auth_service.go b/zap/auth_service.go deleted file mode 100644 index 6fedaed3288..00000000000 --- a/zap/auth_service.go +++ /dev/null @@ -1,83 +0,0 @@ -package zap - -import ( - "context" - - platform "github.com/influxdata/influxdb/v2" - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - "go.uber.org/zap" -) - -var _ platform.AuthorizationService = (*AuthorizationService)(nil) - -// AuthorizationService manages authorizations. -type AuthorizationService struct { - log *zap.Logger - AuthorizationService platform.AuthorizationService -} - -// FindAuthorizationByID returns an authorization given an id, and logs any errors. -func (s *AuthorizationService) FindAuthorizationByID(ctx context.Context, id platform2.ID) (a *platform.Authorization, err error) { - defer func() { - if err != nil { - s.log.Info("Error finding authorization by id", zap.Error(err)) - } - }() - - return s.AuthorizationService.FindAuthorizationByID(ctx, id) -} - -// FindAuthorizationByToken returns an authorization given a token, and logs any errors. -func (s *AuthorizationService) FindAuthorizationByToken(ctx context.Context, t string) (a *platform.Authorization, err error) { - defer func() { - if err != nil { - s.log.Info("Error finding authorization by token", zap.Error(err)) - } - }() - - return s.AuthorizationService.FindAuthorizationByToken(ctx, t) -} - -// FindAuthorizations returns authorizations given a filter, and logs any errors. -func (s *AuthorizationService) FindAuthorizations(ctx context.Context, filter platform.AuthorizationFilter, opt ...platform.FindOptions) (as []*platform.Authorization, i int, err error) { - defer func() { - if err != nil { - s.log.Info("Error finding authorizations", zap.Error(err)) - } - }() - - return s.AuthorizationService.FindAuthorizations(ctx, filter, opt...) -} - -// CreateAuthorization creates an authorization, and logs any errors. -func (s *AuthorizationService) CreateAuthorization(ctx context.Context, a *platform.Authorization) (err error) { - defer func() { - if err != nil { - s.log.Info("Error creating authorization", zap.Error(err)) - } - }() - - return s.AuthorizationService.CreateAuthorization(ctx, a) -} - -// DeleteAuthorization deletes an authorization, and logs any errors. -func (s *AuthorizationService) DeleteAuthorization(ctx context.Context, id platform2.ID) (err error) { - defer func() { - if err != nil { - s.log.Info("Error deleting authorization", zap.Error(err)) - } - }() - - return s.AuthorizationService.DeleteAuthorization(ctx, id) -} - -// UpdateAuthorization updates an authorization's status, description and logs any errors. -func (s *AuthorizationService) UpdateAuthorization(ctx context.Context, id platform2.ID, upd *platform.AuthorizationUpdate) (a *platform.Authorization, err error) { - defer func() { - if err != nil { - s.log.Info("Error updating authorization", zap.Error(err)) - } - }() - - return s.AuthorizationService.UpdateAuthorization(ctx, id, upd) -} diff --git a/zap/proxy_query_service.go b/zap/proxy_query_service.go deleted file mode 100644 index 32bc01c7b86..00000000000 --- a/zap/proxy_query_service.go +++ /dev/null @@ -1,31 +0,0 @@ -package zap - -import ( - "context" - "io" - - "github.com/influxdata/influxdb/v2/query" - "go.uber.org/zap" -) - -// ProxyQueryService logs the request but does not write to the writer. -type ProxyQueryService struct { - log *zap.Logger -} - -// NewProxyQueryService creates a new proxy query service with a log. -// If the logger is nil, then it will use a noop logger. -func NewProxyQueryService(log *zap.Logger) *ProxyQueryService { - return &ProxyQueryService{ - log: log, - } -} - -// Query logs the query request. -func (s *ProxyQueryService) Query(ctx context.Context, w io.Writer, req *query.ProxyRequest) (int64, error) { - if req != nil { - s.log.Info("Query", zap.Any("request", req)) - } - n, err := w.Write([]byte{}) - return int64(n), err -} diff --git a/zap/tracer.go b/zap/tracer.go deleted file mode 100644 index 89e23c907b5..00000000000 --- a/zap/tracer.go +++ /dev/null @@ -1,319 +0,0 @@ -package zap - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "time" - - platform2 "github.com/influxdata/influxdb/v2/kit/platform" - opentracing "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/log" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -const ( - traceHTTPHeader = "Zap-Trace-Span" - - logTraceIDKey = "ot_trace_id" - logSpanIDKey = "ot_span_id" - logStartKey = "ot_start" - logStopKey = "ot_stop" - logDurationKey = "ot_duration" - logChildOfKey = "ot_child_of" - logFollowsFromKey = "ot_follows_from" -) - -// Tracer implements opentracing.Tracer and logs each span as its own log. -type Tracer struct { - log *zap.Logger - idGenerator platform2.IDGenerator -} - -func NewTracer(log *zap.Logger, idGenerator platform2.IDGenerator) *Tracer { - return &Tracer{ - log: log, - idGenerator: idGenerator, - } -} - -func (t *Tracer) StartSpan(operationName string, opts ...opentracing.StartSpanOption) opentracing.Span { - startOpts := &opentracing.StartSpanOptions{ - StartTime: time.Now(), - } - for _, opt := range opts { - opt.Apply(startOpts) - } - ctx := newSpanContext() - ctx.spanID = t.idGenerator.ID() - for _, ref := range startOpts.References { - refCtx, ok := ref.ReferencedContext.(SpanContext) - if ok { - ctx.traceID = refCtx.traceID - break - } - } - if !ctx.traceID.Valid() { - ctx.traceID = t.idGenerator.ID() - } - return &Span{ - tracer: t, - opts: *startOpts, - opName: operationName, - tags: make(map[string]interface{}), - ctx: ctx, - } -} - -func (t *Tracer) Inject(sm opentracing.SpanContext, format interface{}, carrier interface{}) error { - ctx, ok := sm.(SpanContext) - if !ok { - return fmt.Errorf("unsupported span context %T", sm) - } - switch format { - case opentracing.Binary: - w, ok := carrier.(io.Writer) - if !ok { - return fmt.Errorf("carrier must be an io.Writer for binary format, got %T", carrier) - } - return json.NewEncoder(w).Encode(sm) - case opentracing.TextMap: - w, ok := carrier.(opentracing.TextMapWriter) - if !ok { - return fmt.Errorf("carrier must be an opentracing.TextMapWriter for text map format, got %T", carrier) - } - return injectTextMapWriter(ctx, w) - case opentracing.HTTPHeaders: - w, ok := carrier.(opentracing.TextMapWriter) - if !ok { - return fmt.Errorf("carrier must be an opentracing.TextMapWriter for http header format, got %T", carrier) - } - return injectTextMapWriter(ctx, w) - default: - return fmt.Errorf("unsupported format %v", format) - } -} - -func (t *Tracer) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) { - var err error - ctx := newSpanContext() - switch format { - case opentracing.Binary: - r, ok := carrier.(io.Reader) - if !ok { - return nil, fmt.Errorf("carrier must be an io.Reader for binary format, got %T", carrier) - } - err = json.NewDecoder(r).Decode(&ctx) - case opentracing.TextMap: - r, ok := carrier.(opentracing.TextMapReader) - if !ok { - return nil, fmt.Errorf("carrier must be an opentracing.TextMapReader for text map format, got %T", carrier) - } - err = extractTextMapReader(&ctx, r) - case opentracing.HTTPHeaders: - r, ok := carrier.(opentracing.TextMapReader) - if !ok { - return nil, fmt.Errorf("carrier must be an opentracing.TextMapReader for http header format, got %T", carrier) - } - err = extractTextMapReader(&ctx, r) - default: - return nil, fmt.Errorf("unsupported format %v", format) - } - if !ctx.traceID.Valid() { - ctx.traceID = t.idGenerator.ID() - } - if !ctx.spanID.Valid() { - return nil, errors.New("no span ID found in carrier") - } - return ctx, err -} - -func injectTextMapWriter(ctx SpanContext, w opentracing.TextMapWriter) error { - data, err := json.Marshal(ctx) - if err != nil { - return err - } - w.Set(traceHTTPHeader, string(data)) - return nil -} - -func extractTextMapReader(ctx *SpanContext, r opentracing.TextMapReader) error { - var data []byte - r.ForeachKey(func(k, v string) error { - if http.CanonicalHeaderKey(k) == traceHTTPHeader { - data = []byte(v) - } - return nil - }) - return json.Unmarshal(data, ctx) -} - -// Span implements opentracing.Span, all Spans must be created using the Tracer. -type Span struct { - tracer *Tracer - opts opentracing.StartSpanOptions - opName string - tags map[string]interface{} - fields []zapcore.Field - ctx SpanContext -} - -func (s *Span) Finish() { - s.FinishWithOptions(opentracing.FinishOptions{}) -} - -func (s *Span) FinishWithOptions(opts opentracing.FinishOptions) { - if opts.FinishTime.IsZero() { - opts.FinishTime = time.Now() - } - duration := opts.FinishTime.Sub(s.opts.StartTime) - fields := append(s.fields, - zap.String(logTraceIDKey, s.ctx.traceID.String()), - zap.String(logSpanIDKey, s.ctx.spanID.String()), - zap.Time(logStartKey, s.opts.StartTime), - zap.Time(logStopKey, opts.FinishTime), - zap.Duration(logDurationKey, duration), - ) - for _, ref := range s.opts.References { - ctx, ok := ref.ReferencedContext.(SpanContext) - if !ok { - continue - } - switch ref.Type { - case opentracing.ChildOfRef: - fields = append(fields, zap.String(logChildOfKey, ctx.spanID.String())) - case opentracing.FollowsFromRef: - fields = append(fields, zap.String(logFollowsFromKey, ctx.spanID.String())) - } - } - for k, v := range s.tags { - fields = append(fields, zap.Any(k, v)) - } - for k, v := range s.ctx.baggage { - fields = append(fields, zap.String(k, v)) - } - s.tracer.log.Info(s.opName, fields...) -} - -func (s *Span) Context() opentracing.SpanContext { - return s.ctx -} - -func (s *Span) SetOperationName(operationName string) opentracing.Span { - s.opName = operationName - return s -} - -func (s *Span) SetTag(key string, value interface{}) opentracing.Span { - s.tags[key] = value - return s -} - -func (s *Span) LogFields(fields ...log.Field) { - for _, field := range fields { - s.fields = append(s.fields, convertField(field)) - } -} - -func convertField(field log.Field) zapcore.Field { - return zap.Any(field.Key(), field.Value()) -} - -func (s *Span) LogKV(keyValues ...interface{}) { - if len(keyValues)%2 != 0 { - s.LogFields(log.Error(fmt.Errorf("non-even keyValues len: %v", len(keyValues)))) - return - } - fields, err := log.InterleavedKVToFields(keyValues...) - if err != nil { - s.LogFields(log.Error(err), log.String("function", "LogKV")) - return - } - s.LogFields(fields...) -} - -func (s *Span) SetBaggageItem(restrictedKey string, value string) opentracing.Span { - s.ctx.baggage[restrictedKey] = value - return s -} - -func (s *Span) BaggageItem(restrictedKey string) string { - return s.ctx.baggage[restrictedKey] -} - -func (s *Span) Tracer() opentracing.Tracer { - return s.tracer -} - -// LogEvent is deprecated, as such it is not implemented. -func (s *Span) LogEvent(event string) { - panic("use of deprecated LogEvent: not implemented") -} - -// LogEventWithPayload is deprecated, as such it is not implemented. -func (s *Span) LogEventWithPayload(event string, payload interface{}) { - panic("use of deprecated LogEventWithPayload: not implemented") -} - -// Log is deprecated, as such it is not implemented. -func (s *Span) Log(data opentracing.LogData) { - panic("use of deprecated Log: not implemented") -} - -// SpanContext implements opentracing.SpanContext, all span contexts must be created using the Tracer. -type SpanContext struct { - traceID platform2.ID - spanID platform2.ID - baggage map[string]string -} - -func newSpanContext() SpanContext { - return SpanContext{ - baggage: make(map[string]string), - } -} - -func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) { - for k, v := range c.baggage { - if !handler(k, v) { - return - } - } -} - -func (c SpanContext) MarshalJSON() ([]byte, error) { - raw := struct { - TraceID platform2.ID `json:"trace_id"` - SpanID platform2.ID `json:"span_id"` - Baggage map[string]string `json:"baggage"` - }{ - TraceID: c.traceID, - SpanID: c.spanID, - Baggage: c.baggage, - } - return json.Marshal(raw) -} - -func (c *SpanContext) UnmarshalJSON(data []byte) error { - raw := struct { - TraceID platform2.ID `json:"trace_id"` - SpanID platform2.ID `json:"span_id"` - Baggage map[string]string `json:"baggage"` - }{ - TraceID: c.traceID, - SpanID: c.spanID, - Baggage: c.baggage, - } - if err := json.Unmarshal(data, &raw); err != nil { - return err - } - - c.traceID = raw.TraceID - c.spanID = raw.SpanID - c.baggage = raw.Baggage - - return nil -}